if_mcx.c revision 1.12.4.2 1 1.12.4.2 martin /* $NetBSD: if_mcx.c,v 1.12.4.2 2020/04/13 08:04:26 martin Exp $ */
2 1.12.4.2 martin /* $OpenBSD: if_mcx.c,v 1.33 2019/09/12 04:23:59 jmatthew Exp $ */
3 1.12.4.2 martin
4 1.12.4.2 martin /*
5 1.12.4.2 martin * Copyright (c) 2017 David Gwynne <dlg (at) openbsd.org>
6 1.12.4.2 martin * Copyright (c) 2019 Jonathan Matthew <jmatthew (at) openbsd.org>
7 1.12.4.2 martin *
8 1.12.4.2 martin * Permission to use, copy, modify, and distribute this software for any
9 1.12.4.2 martin * purpose with or without fee is hereby granted, provided that the above
10 1.12.4.2 martin * copyright notice and this permission notice appear in all copies.
11 1.12.4.2 martin *
12 1.12.4.2 martin * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 1.12.4.2 martin * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 1.12.4.2 martin * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 1.12.4.2 martin * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 1.12.4.2 martin * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 1.12.4.2 martin * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 1.12.4.2 martin * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 1.12.4.2 martin */
20 1.12.4.2 martin
21 1.12.4.2 martin #ifdef _KERNEL_OPT
22 1.12.4.2 martin #include "opt_net_mpsafe.h"
23 1.12.4.2 martin #endif
24 1.12.4.2 martin
25 1.12.4.2 martin #include <sys/param.h>
26 1.12.4.2 martin #include <sys/systm.h>
27 1.12.4.2 martin #include <sys/sockio.h>
28 1.12.4.2 martin #include <sys/mbuf.h>
29 1.12.4.2 martin #include <sys/kernel.h>
30 1.12.4.2 martin #include <sys/socket.h>
31 1.12.4.2 martin #include <sys/device.h>
32 1.12.4.2 martin #include <sys/pool.h>
33 1.12.4.2 martin #include <sys/queue.h>
34 1.12.4.2 martin #include <sys/callout.h>
35 1.12.4.2 martin #include <sys/workqueue.h>
36 1.12.4.2 martin #include <sys/atomic.h>
37 1.12.4.2 martin #include <sys/kmem.h>
38 1.12.4.2 martin #include <sys/bus.h>
39 1.12.4.2 martin
40 1.12.4.2 martin #include <machine/intr.h>
41 1.12.4.2 martin
42 1.12.4.2 martin #include <net/if.h>
43 1.12.4.2 martin #include <net/if_dl.h>
44 1.12.4.2 martin #include <net/if_ether.h>
45 1.12.4.2 martin #include <net/if_media.h>
46 1.12.4.2 martin
47 1.12.4.2 martin #include <net/bpf.h>
48 1.12.4.2 martin
49 1.12.4.2 martin #include <netinet/in.h>
50 1.12.4.2 martin
51 1.12.4.2 martin #include <dev/pci/pcireg.h>
52 1.12.4.2 martin #include <dev/pci/pcivar.h>
53 1.12.4.2 martin #include <dev/pci/pcidevs.h>
54 1.12.4.2 martin
55 1.12.4.2 martin /* XXX This driver is not yet MP-safe; don't claim to be! */
56 1.12.4.2 martin /* #ifdef NET_MPSAFE */
57 1.12.4.2 martin /* #define MCX_MPSAFE 1 */
58 1.12.4.2 martin /* #define CALLOUT_FLAGS CALLOUT_MPSAFE */
59 1.12.4.2 martin /* #else */
60 1.12.4.2 martin #define CALLOUT_FLAGS 0
61 1.12.4.2 martin /* #endif */
62 1.12.4.2 martin
63 1.12.4.2 martin #define MCX_MAX_NINTR 1
64 1.12.4.2 martin
65 1.12.4.2 martin #define BUS_DMASYNC_PRERW (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)
66 1.12.4.2 martin #define BUS_DMASYNC_POSTRW (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)
67 1.12.4.2 martin
68 1.12.4.2 martin #define MCX_HCA_BAR PCI_MAPREG_START /* BAR 0 */
69 1.12.4.2 martin
70 1.12.4.2 martin #define MCX_FW_VER 0x0000
71 1.12.4.2 martin #define MCX_FW_VER_MAJOR(_v) ((_v) & 0xffff)
72 1.12.4.2 martin #define MCX_FW_VER_MINOR(_v) ((_v) >> 16)
73 1.12.4.2 martin #define MCX_CMDIF_FW_SUBVER 0x0004
74 1.12.4.2 martin #define MCX_FW_VER_SUBMINOR(_v) ((_v) & 0xffff)
75 1.12.4.2 martin #define MCX_CMDIF(_v) ((_v) >> 16)
76 1.12.4.2 martin
77 1.12.4.2 martin #define MCX_ISSI 1 /* as per the PRM */
78 1.12.4.2 martin #define MCX_CMD_IF_SUPPORTED 5
79 1.12.4.2 martin
80 1.12.4.2 martin #define MCX_HARDMTU 9500
81 1.12.4.2 martin
82 1.12.4.2 martin #define MCX_MAX_CQS 2 /* rq, sq */
83 1.12.4.2 martin
84 1.12.4.2 martin /* queue sizes */
85 1.12.4.2 martin #define MCX_LOG_EQ_SIZE 6 /* one page */
86 1.12.4.2 martin #define MCX_LOG_CQ_SIZE 11
87 1.12.4.2 martin #define MCX_LOG_RQ_SIZE 10
88 1.12.4.2 martin #define MCX_LOG_SQ_SIZE 11
89 1.12.4.2 martin
90 1.12.4.2 martin /* completion event moderation - about 10khz, or 90% of the cq */
91 1.12.4.2 martin #define MCX_CQ_MOD_PERIOD 50
92 1.12.4.2 martin #define MCX_CQ_MOD_COUNTER (((1 << (MCX_LOG_CQ_SIZE - 1)) * 9) / 10)
93 1.12.4.2 martin
94 1.12.4.2 martin #define MCX_LOG_SQ_ENTRY_SIZE 6
95 1.12.4.2 martin #define MCX_SQ_ENTRY_MAX_SLOTS 4
96 1.12.4.2 martin #define MCX_SQ_SEGS_PER_SLOT \
97 1.12.4.2 martin (sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg))
98 1.12.4.2 martin #define MCX_SQ_MAX_SEGMENTS \
99 1.12.4.2 martin 1 + ((MCX_SQ_ENTRY_MAX_SLOTS-1) * MCX_SQ_SEGS_PER_SLOT)
100 1.12.4.2 martin
101 1.12.4.2 martin #define MCX_LOG_FLOW_TABLE_SIZE 5
102 1.12.4.2 martin #define MCX_NUM_STATIC_FLOWS 4 /* promisc, allmulti, ucast, bcast */
103 1.12.4.2 martin #define MCX_NUM_MCAST_FLOWS \
104 1.12.4.2 martin ((1 << MCX_LOG_FLOW_TABLE_SIZE) - MCX_NUM_STATIC_FLOWS)
105 1.12.4.2 martin
106 1.12.4.2 martin #define MCX_SQ_INLINE_SIZE 18
107 1.12.4.2 martin
108 1.12.4.2 martin /* doorbell offsets */
109 1.12.4.2 martin #define MCX_CQ_DOORBELL_OFFSET 0
110 1.12.4.2 martin #define MCX_CQ_DOORBELL_SIZE 16
111 1.12.4.2 martin #define MCX_RQ_DOORBELL_OFFSET 64
112 1.12.4.2 martin #define MCX_SQ_DOORBELL_OFFSET 64
113 1.12.4.2 martin
114 1.12.4.2 martin #define MCX_WQ_DOORBELL_MASK 0xffff
115 1.12.4.2 martin
116 1.12.4.2 martin /* uar registers */
117 1.12.4.2 martin #define MCX_UAR_CQ_DOORBELL 0x20
118 1.12.4.2 martin #define MCX_UAR_EQ_DOORBELL_ARM 0x40
119 1.12.4.2 martin #define MCX_UAR_EQ_DOORBELL 0x48
120 1.12.4.2 martin #define MCX_UAR_BF 0x800
121 1.12.4.2 martin
122 1.12.4.2 martin #define MCX_CMDQ_ADDR_HI 0x0010
123 1.12.4.2 martin #define MCX_CMDQ_ADDR_LO 0x0014
124 1.12.4.2 martin #define MCX_CMDQ_ADDR_NMASK 0xfff
125 1.12.4.2 martin #define MCX_CMDQ_LOG_SIZE(_v) ((_v) >> 4 & 0xf)
126 1.12.4.2 martin #define MCX_CMDQ_LOG_STRIDE(_v) ((_v) >> 0 & 0xf)
127 1.12.4.2 martin #define MCX_CMDQ_INTERFACE_MASK (0x3 << 8)
128 1.12.4.2 martin #define MCX_CMDQ_INTERFACE_FULL_DRIVER (0x0 << 8)
129 1.12.4.2 martin #define MCX_CMDQ_INTERFACE_DISABLED (0x1 << 8)
130 1.12.4.2 martin
131 1.12.4.2 martin #define MCX_CMDQ_DOORBELL 0x0018
132 1.12.4.2 martin
133 1.12.4.2 martin #define MCX_STATE 0x01fc
134 1.12.4.2 martin #define MCX_STATE_MASK (1U << 31)
135 1.12.4.2 martin #define MCX_STATE_INITIALIZING (1 << 31)
136 1.12.4.2 martin #define MCX_STATE_READY (0 << 31)
137 1.12.4.2 martin #define MCX_STATE_INTERFACE_MASK (0x3 << 24)
138 1.12.4.2 martin #define MCX_STATE_INTERFACE_FULL_DRIVER (0x0 << 24)
139 1.12.4.2 martin #define MCX_STATE_INTERFACE_DISABLED (0x1 << 24)
140 1.12.4.2 martin
141 1.12.4.2 martin #define MCX_INTERNAL_TIMER 0x1000
142 1.12.4.2 martin #define MCX_INTERNAL_TIMER_H 0x1000
143 1.12.4.2 martin #define MCX_INTERNAL_TIMER_L 0x1004
144 1.12.4.2 martin
145 1.12.4.2 martin #define MCX_CLEAR_INT 0x100c
146 1.12.4.2 martin
147 1.12.4.2 martin #define MCX_REG_OP_WRITE 0
148 1.12.4.2 martin #define MCX_REG_OP_READ 1
149 1.12.4.2 martin
150 1.12.4.2 martin #define MCX_REG_PMLP 0x5002
151 1.12.4.2 martin #define MCX_REG_PMTU 0x5003
152 1.12.4.2 martin #define MCX_REG_PTYS 0x5004
153 1.12.4.2 martin #define MCX_REG_PAOS 0x5006
154 1.12.4.2 martin #define MCX_REG_PFCC 0x5007
155 1.12.4.2 martin #define MCX_REG_PPCNT 0x5008
156 1.12.4.2 martin #define MCX_REG_MCIA 0x9014
157 1.12.4.2 martin
158 1.12.4.2 martin #define MCX_ETHER_CAP_SGMII (1 << 0)
159 1.12.4.2 martin #define MCX_ETHER_CAP_1000_KX (1 << 1)
160 1.12.4.2 martin #define MCX_ETHER_CAP_10G_CX4 (1 << 2)
161 1.12.4.2 martin #define MCX_ETHER_CAP_10G_KX4 (1 << 3)
162 1.12.4.2 martin #define MCX_ETHER_CAP_10G_KR (1 << 4)
163 1.12.4.2 martin #define MCX_ETHER_CAP_20G_KR2 (1 << 5)
164 1.12.4.2 martin #define MCX_ETHER_CAP_40G_CR4 (1 << 6)
165 1.12.4.2 martin #define MCX_ETHER_CAP_40G_KR4 (1 << 7)
166 1.12.4.2 martin #define MCX_ETHER_CAP_56G_R4 (1 << 8)
167 1.12.4.2 martin #define MCX_ETHER_CAP_10G_CR (1 << 12)
168 1.12.4.2 martin #define MCX_ETHER_CAP_10G_SR (1 << 13)
169 1.12.4.2 martin #define MCX_ETHER_CAP_10G_LR (1 << 14)
170 1.12.4.2 martin #define MCX_ETHER_CAP_40G_SR4 (1 << 15)
171 1.12.4.2 martin #define MCX_ETHER_CAP_40G_LR4 (1 << 16)
172 1.12.4.2 martin #define MCX_ETHER_CAP_50G_SR2 (1 << 18)
173 1.12.4.2 martin #define MCX_ETHER_CAP_100G_CR4 (1 << 20)
174 1.12.4.2 martin #define MCX_ETHER_CAP_100G_SR4 (1 << 21)
175 1.12.4.2 martin #define MCX_ETHER_CAP_100G_KR4 (1 << 22)
176 1.12.4.2 martin #define MCX_ETHER_CAP_100G_LR4 (1 << 23)
177 1.12.4.2 martin #define MCX_ETHER_CAP_100_TX (1 << 24)
178 1.12.4.2 martin #define MCX_ETHER_CAP_1000_T (1 << 25)
179 1.12.4.2 martin #define MCX_ETHER_CAP_10G_T (1 << 26)
180 1.12.4.2 martin #define MCX_ETHER_CAP_25G_CR (1 << 27)
181 1.12.4.2 martin #define MCX_ETHER_CAP_25G_KR (1 << 28)
182 1.12.4.2 martin #define MCX_ETHER_CAP_25G_SR (1 << 29)
183 1.12.4.2 martin #define MCX_ETHER_CAP_50G_CR2 (1 << 30)
184 1.12.4.2 martin #define MCX_ETHER_CAP_50G_KR2 (1 << 31)
185 1.12.4.2 martin
186 1.12.4.2 martin #define MCX_PAGE_SHIFT 12
187 1.12.4.2 martin #define MCX_PAGE_SIZE (1 << MCX_PAGE_SHIFT)
188 1.12.4.2 martin #define MCX_MAX_CQE 32
189 1.12.4.2 martin
190 1.12.4.2 martin #define MCX_CMD_QUERY_HCA_CAP 0x100
191 1.12.4.2 martin #define MCX_CMD_QUERY_ADAPTER 0x101
192 1.12.4.2 martin #define MCX_CMD_INIT_HCA 0x102
193 1.12.4.2 martin #define MCX_CMD_TEARDOWN_HCA 0x103
194 1.12.4.2 martin #define MCX_CMD_ENABLE_HCA 0x104
195 1.12.4.2 martin #define MCX_CMD_DISABLE_HCA 0x105
196 1.12.4.2 martin #define MCX_CMD_QUERY_PAGES 0x107
197 1.12.4.2 martin #define MCX_CMD_MANAGE_PAGES 0x108
198 1.12.4.2 martin #define MCX_CMD_SET_HCA_CAP 0x109
199 1.12.4.2 martin #define MCX_CMD_QUERY_ISSI 0x10a
200 1.12.4.2 martin #define MCX_CMD_SET_ISSI 0x10b
201 1.12.4.2 martin #define MCX_CMD_SET_DRIVER_VERSION \
202 1.12.4.2 martin 0x10d
203 1.12.4.2 martin #define MCX_CMD_QUERY_SPECIAL_CONTEXTS \
204 1.12.4.2 martin 0x203
205 1.12.4.2 martin #define MCX_CMD_CREATE_EQ 0x301
206 1.12.4.2 martin #define MCX_CMD_DESTROY_EQ 0x302
207 1.12.4.2 martin #define MCX_CMD_CREATE_CQ 0x400
208 1.12.4.2 martin #define MCX_CMD_DESTROY_CQ 0x401
209 1.12.4.2 martin #define MCX_CMD_QUERY_NIC_VPORT_CONTEXT \
210 1.12.4.2 martin 0x754
211 1.12.4.2 martin #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT \
212 1.12.4.2 martin 0x755
213 1.12.4.2 martin #define MCX_CMD_QUERY_VPORT_COUNTERS \
214 1.12.4.2 martin 0x770
215 1.12.4.2 martin #define MCX_CMD_ALLOC_PD 0x800
216 1.12.4.2 martin #define MCX_CMD_ALLOC_UAR 0x802
217 1.12.4.2 martin #define MCX_CMD_ACCESS_REG 0x805
218 1.12.4.2 martin #define MCX_CMD_ALLOC_TRANSPORT_DOMAIN \
219 1.12.4.2 martin 0x816
220 1.12.4.2 martin #define MCX_CMD_CREATE_TIR 0x900
221 1.12.4.2 martin #define MCX_CMD_DESTROY_TIR 0x902
222 1.12.4.2 martin #define MCX_CMD_CREATE_SQ 0x904
223 1.12.4.2 martin #define MCX_CMD_MODIFY_SQ 0x905
224 1.12.4.2 martin #define MCX_CMD_DESTROY_SQ 0x906
225 1.12.4.2 martin #define MCX_CMD_QUERY_SQ 0x907
226 1.12.4.2 martin #define MCX_CMD_CREATE_RQ 0x908
227 1.12.4.2 martin #define MCX_CMD_MODIFY_RQ 0x909
228 1.12.4.2 martin #define MCX_CMD_DESTROY_RQ 0x90a
229 1.12.4.2 martin #define MCX_CMD_QUERY_RQ 0x90b
230 1.12.4.2 martin #define MCX_CMD_CREATE_TIS 0x912
231 1.12.4.2 martin #define MCX_CMD_DESTROY_TIS 0x914
232 1.12.4.2 martin #define MCX_CMD_SET_FLOW_TABLE_ROOT \
233 1.12.4.2 martin 0x92f
234 1.12.4.2 martin #define MCX_CMD_CREATE_FLOW_TABLE \
235 1.12.4.2 martin 0x930
236 1.12.4.2 martin #define MCX_CMD_DESTROY_FLOW_TABLE \
237 1.12.4.2 martin 0x931
238 1.12.4.2 martin #define MCX_CMD_QUERY_FLOW_TABLE \
239 1.12.4.2 martin 0x932
240 1.12.4.2 martin #define MCX_CMD_CREATE_FLOW_GROUP \
241 1.12.4.2 martin 0x933
242 1.12.4.2 martin #define MCX_CMD_DESTROY_FLOW_GROUP \
243 1.12.4.2 martin 0x934
244 1.12.4.2 martin #define MCX_CMD_QUERY_FLOW_GROUP \
245 1.12.4.2 martin 0x935
246 1.12.4.2 martin #define MCX_CMD_SET_FLOW_TABLE_ENTRY \
247 1.12.4.2 martin 0x936
248 1.12.4.2 martin #define MCX_CMD_QUERY_FLOW_TABLE_ENTRY \
249 1.12.4.2 martin 0x937
250 1.12.4.2 martin #define MCX_CMD_DELETE_FLOW_TABLE_ENTRY \
251 1.12.4.2 martin 0x938
252 1.12.4.2 martin #define MCX_CMD_ALLOC_FLOW_COUNTER \
253 1.12.4.2 martin 0x939
254 1.12.4.2 martin #define MCX_CMD_QUERY_FLOW_COUNTER \
255 1.12.4.2 martin 0x93b
256 1.12.4.2 martin
257 1.12.4.2 martin #define MCX_QUEUE_STATE_RST 0
258 1.12.4.2 martin #define MCX_QUEUE_STATE_RDY 1
259 1.12.4.2 martin #define MCX_QUEUE_STATE_ERR 3
260 1.12.4.2 martin
261 1.12.4.2 martin #define MCX_FLOW_TABLE_TYPE_RX 0
262 1.12.4.2 martin #define MCX_FLOW_TABLE_TYPE_TX 1
263 1.12.4.2 martin
264 1.12.4.2 martin #define MCX_CMDQ_INLINE_DATASIZE 16
265 1.12.4.2 martin
266 1.12.4.2 martin struct mcx_cmdq_entry {
267 1.12.4.2 martin uint8_t cq_type;
268 1.12.4.2 martin #define MCX_CMDQ_TYPE_PCIE 0x7
269 1.12.4.2 martin uint8_t cq_reserved0[3];
270 1.12.4.2 martin
271 1.12.4.2 martin uint32_t cq_input_length;
272 1.12.4.2 martin uint64_t cq_input_ptr;
273 1.12.4.2 martin uint8_t cq_input_data[MCX_CMDQ_INLINE_DATASIZE];
274 1.12.4.2 martin
275 1.12.4.2 martin uint8_t cq_output_data[MCX_CMDQ_INLINE_DATASIZE];
276 1.12.4.2 martin uint64_t cq_output_ptr;
277 1.12.4.2 martin uint32_t cq_output_length;
278 1.12.4.2 martin
279 1.12.4.2 martin uint8_t cq_token;
280 1.12.4.2 martin uint8_t cq_signature;
281 1.12.4.2 martin uint8_t cq_reserved1[1];
282 1.12.4.2 martin uint8_t cq_status;
283 1.12.4.2 martin #define MCX_CQ_STATUS_SHIFT 1
284 1.12.4.2 martin #define MCX_CQ_STATUS_MASK (0x7f << MCX_CQ_STATUS_SHIFT)
285 1.12.4.2 martin #define MCX_CQ_STATUS_OK (0x00 << MCX_CQ_STATUS_SHIFT)
286 1.12.4.2 martin #define MCX_CQ_STATUS_INT_ERR (0x01 << MCX_CQ_STATUS_SHIFT)
287 1.12.4.2 martin #define MCX_CQ_STATUS_BAD_OPCODE (0x02 << MCX_CQ_STATUS_SHIFT)
288 1.12.4.2 martin #define MCX_CQ_STATUS_BAD_PARAM (0x03 << MCX_CQ_STATUS_SHIFT)
289 1.12.4.2 martin #define MCX_CQ_STATUS_BAD_SYS_STATE (0x04 << MCX_CQ_STATUS_SHIFT)
290 1.12.4.2 martin #define MCX_CQ_STATUS_BAD_RESOURCE (0x05 << MCX_CQ_STATUS_SHIFT)
291 1.12.4.2 martin #define MCX_CQ_STATUS_RESOURCE_BUSY (0x06 << MCX_CQ_STATUS_SHIFT)
292 1.12.4.2 martin #define MCX_CQ_STATUS_EXCEED_LIM (0x08 << MCX_CQ_STATUS_SHIFT)
293 1.12.4.2 martin #define MCX_CQ_STATUS_BAD_RES_STATE (0x09 << MCX_CQ_STATUS_SHIFT)
294 1.12.4.2 martin #define MCX_CQ_STATUS_BAD_INDEX (0x0a << MCX_CQ_STATUS_SHIFT)
295 1.12.4.2 martin #define MCX_CQ_STATUS_NO_RESOURCES (0x0f << MCX_CQ_STATUS_SHIFT)
296 1.12.4.2 martin #define MCX_CQ_STATUS_BAD_INPUT_LEN (0x50 << MCX_CQ_STATUS_SHIFT)
297 1.12.4.2 martin #define MCX_CQ_STATUS_BAD_OUTPUT_LEN (0x51 << MCX_CQ_STATUS_SHIFT)
298 1.12.4.2 martin #define MCX_CQ_STATUS_BAD_RESOURCE_STATE \
299 1.12.4.2 martin (0x10 << MCX_CQ_STATUS_SHIFT)
300 1.12.4.2 martin #define MCX_CQ_STATUS_BAD_SIZE (0x40 << MCX_CQ_STATUS_SHIFT)
301 1.12.4.2 martin #define MCX_CQ_STATUS_OWN_MASK 0x1
302 1.12.4.2 martin #define MCX_CQ_STATUS_OWN_SW 0x0
303 1.12.4.2 martin #define MCX_CQ_STATUS_OWN_HW 0x1
304 1.12.4.2 martin } __packed __aligned(8);
305 1.12.4.2 martin
306 1.12.4.2 martin #define MCX_CMDQ_MAILBOX_DATASIZE 512
307 1.12.4.2 martin
308 1.12.4.2 martin struct mcx_cmdq_mailbox {
309 1.12.4.2 martin uint8_t mb_data[MCX_CMDQ_MAILBOX_DATASIZE];
310 1.12.4.2 martin uint8_t mb_reserved0[48];
311 1.12.4.2 martin uint64_t mb_next_ptr;
312 1.12.4.2 martin uint32_t mb_block_number;
313 1.12.4.2 martin uint8_t mb_reserved1[1];
314 1.12.4.2 martin uint8_t mb_token;
315 1.12.4.2 martin uint8_t mb_ctrl_signature;
316 1.12.4.2 martin uint8_t mb_signature;
317 1.12.4.2 martin } __packed __aligned(8);
318 1.12.4.2 martin
319 1.12.4.2 martin #define MCX_CMDQ_MAILBOX_ALIGN (1 << 10)
320 1.12.4.2 martin #define MCX_CMDQ_MAILBOX_SIZE roundup(sizeof(struct mcx_cmdq_mailbox), \
321 1.12.4.2 martin MCX_CMDQ_MAILBOX_ALIGN)
322 1.12.4.2 martin /*
323 1.12.4.2 martin * command mailbox structres
324 1.12.4.2 martin */
325 1.12.4.2 martin
326 1.12.4.2 martin struct mcx_cmd_enable_hca_in {
327 1.12.4.2 martin uint16_t cmd_opcode;
328 1.12.4.2 martin uint8_t cmd_reserved0[4];
329 1.12.4.2 martin uint16_t cmd_op_mod;
330 1.12.4.2 martin uint8_t cmd_reserved1[2];
331 1.12.4.2 martin uint16_t cmd_function_id;
332 1.12.4.2 martin uint8_t cmd_reserved2[4];
333 1.12.4.2 martin } __packed __aligned(4);
334 1.12.4.2 martin
335 1.12.4.2 martin struct mcx_cmd_enable_hca_out {
336 1.12.4.2 martin uint8_t cmd_status;
337 1.12.4.2 martin uint8_t cmd_reserved0[3];
338 1.12.4.2 martin uint32_t cmd_syndrome;
339 1.12.4.2 martin uint8_t cmd_reserved1[4];
340 1.12.4.2 martin } __packed __aligned(4);
341 1.12.4.2 martin
342 1.12.4.2 martin struct mcx_cmd_init_hca_in {
343 1.12.4.2 martin uint16_t cmd_opcode;
344 1.12.4.2 martin uint8_t cmd_reserved0[4];
345 1.12.4.2 martin uint16_t cmd_op_mod;
346 1.12.4.2 martin uint8_t cmd_reserved1[8];
347 1.12.4.2 martin } __packed __aligned(4);
348 1.12.4.2 martin
349 1.12.4.2 martin struct mcx_cmd_init_hca_out {
350 1.12.4.2 martin uint8_t cmd_status;
351 1.12.4.2 martin uint8_t cmd_reserved0[3];
352 1.12.4.2 martin uint32_t cmd_syndrome;
353 1.12.4.2 martin uint8_t cmd_reserved1[8];
354 1.12.4.2 martin } __packed __aligned(4);
355 1.12.4.2 martin
356 1.12.4.2 martin struct mcx_cmd_teardown_hca_in {
357 1.12.4.2 martin uint16_t cmd_opcode;
358 1.12.4.2 martin uint8_t cmd_reserved0[4];
359 1.12.4.2 martin uint16_t cmd_op_mod;
360 1.12.4.2 martin uint8_t cmd_reserved1[2];
361 1.12.4.2 martin #define MCX_CMD_TEARDOWN_HCA_GRACEFUL 0x0
362 1.12.4.2 martin #define MCX_CMD_TEARDOWN_HCA_PANIC 0x1
363 1.12.4.2 martin uint16_t cmd_profile;
364 1.12.4.2 martin uint8_t cmd_reserved2[4];
365 1.12.4.2 martin } __packed __aligned(4);
366 1.12.4.2 martin
367 1.12.4.2 martin struct mcx_cmd_teardown_hca_out {
368 1.12.4.2 martin uint8_t cmd_status;
369 1.12.4.2 martin uint8_t cmd_reserved0[3];
370 1.12.4.2 martin uint32_t cmd_syndrome;
371 1.12.4.2 martin uint8_t cmd_reserved1[8];
372 1.12.4.2 martin } __packed __aligned(4);
373 1.12.4.2 martin
374 1.12.4.2 martin struct mcx_cmd_access_reg_in {
375 1.12.4.2 martin uint16_t cmd_opcode;
376 1.12.4.2 martin uint8_t cmd_reserved0[4];
377 1.12.4.2 martin uint16_t cmd_op_mod;
378 1.12.4.2 martin uint8_t cmd_reserved1[2];
379 1.12.4.2 martin uint16_t cmd_register_id;
380 1.12.4.2 martin uint32_t cmd_argument;
381 1.12.4.2 martin } __packed __aligned(4);
382 1.12.4.2 martin
383 1.12.4.2 martin struct mcx_cmd_access_reg_out {
384 1.12.4.2 martin uint8_t cmd_status;
385 1.12.4.2 martin uint8_t cmd_reserved0[3];
386 1.12.4.2 martin uint32_t cmd_syndrome;
387 1.12.4.2 martin uint8_t cmd_reserved1[8];
388 1.12.4.2 martin } __packed __aligned(4);
389 1.12.4.2 martin
390 1.12.4.2 martin struct mcx_reg_pmtu {
391 1.12.4.2 martin uint8_t rp_reserved1;
392 1.12.4.2 martin uint8_t rp_local_port;
393 1.12.4.2 martin uint8_t rp_reserved2[2];
394 1.12.4.2 martin uint16_t rp_max_mtu;
395 1.12.4.2 martin uint8_t rp_reserved3[2];
396 1.12.4.2 martin uint16_t rp_admin_mtu;
397 1.12.4.2 martin uint8_t rp_reserved4[2];
398 1.12.4.2 martin uint16_t rp_oper_mtu;
399 1.12.4.2 martin uint8_t rp_reserved5[2];
400 1.12.4.2 martin } __packed __aligned(4);
401 1.12.4.2 martin
402 1.12.4.2 martin struct mcx_reg_ptys {
403 1.12.4.2 martin uint8_t rp_reserved1;
404 1.12.4.2 martin uint8_t rp_local_port;
405 1.12.4.2 martin uint8_t rp_reserved2;
406 1.12.4.2 martin uint8_t rp_proto_mask;
407 1.12.4.2 martin #define MCX_REG_PTYS_PROTO_MASK_ETH (1 << 2)
408 1.12.4.2 martin uint8_t rp_reserved3[8];
409 1.12.4.2 martin uint32_t rp_eth_proto_cap;
410 1.12.4.2 martin uint8_t rp_reserved4[8];
411 1.12.4.2 martin uint32_t rp_eth_proto_admin;
412 1.12.4.2 martin uint8_t rp_reserved5[8];
413 1.12.4.2 martin uint32_t rp_eth_proto_oper;
414 1.12.4.2 martin uint8_t rp_reserved6[24];
415 1.12.4.2 martin } __packed __aligned(4);
416 1.12.4.2 martin
417 1.12.4.2 martin struct mcx_reg_paos {
418 1.12.4.2 martin uint8_t rp_reserved1;
419 1.12.4.2 martin uint8_t rp_local_port;
420 1.12.4.2 martin uint8_t rp_admin_status;
421 1.12.4.2 martin #define MCX_REG_PAOS_ADMIN_STATUS_UP 1
422 1.12.4.2 martin #define MCX_REG_PAOS_ADMIN_STATUS_DOWN 2
423 1.12.4.2 martin #define MCX_REG_PAOS_ADMIN_STATUS_UP_ONCE 3
424 1.12.4.2 martin #define MCX_REG_PAOS_ADMIN_STATUS_DISABLED 4
425 1.12.4.2 martin uint8_t rp_oper_status;
426 1.12.4.2 martin #define MCX_REG_PAOS_OPER_STATUS_UP 1
427 1.12.4.2 martin #define MCX_REG_PAOS_OPER_STATUS_DOWN 2
428 1.12.4.2 martin #define MCX_REG_PAOS_OPER_STATUS_FAILED 4
429 1.12.4.2 martin uint8_t rp_admin_state_update;
430 1.12.4.2 martin #define MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN (1 << 7)
431 1.12.4.2 martin uint8_t rp_reserved2[11];
432 1.12.4.2 martin } __packed __aligned(4);
433 1.12.4.2 martin
434 1.12.4.2 martin struct mcx_reg_pfcc {
435 1.12.4.2 martin uint8_t rp_reserved1;
436 1.12.4.2 martin uint8_t rp_local_port;
437 1.12.4.2 martin uint8_t rp_reserved2[3];
438 1.12.4.2 martin uint8_t rp_prio_mask_tx;
439 1.12.4.2 martin uint8_t rp_reserved3;
440 1.12.4.2 martin uint8_t rp_prio_mask_rx;
441 1.12.4.2 martin uint8_t rp_pptx_aptx;
442 1.12.4.2 martin uint8_t rp_pfctx;
443 1.12.4.2 martin uint8_t rp_fctx_dis;
444 1.12.4.2 martin uint8_t rp_reserved4;
445 1.12.4.2 martin uint8_t rp_pprx_aprx;
446 1.12.4.2 martin uint8_t rp_pfcrx;
447 1.12.4.2 martin uint8_t rp_reserved5[2];
448 1.12.4.2 martin uint16_t rp_dev_stall_min;
449 1.12.4.2 martin uint16_t rp_dev_stall_crit;
450 1.12.4.2 martin uint8_t rp_reserved6[12];
451 1.12.4.2 martin } __packed __aligned(4);
452 1.12.4.2 martin
453 1.12.4.2 martin #define MCX_PMLP_MODULE_NUM_MASK 0xff
454 1.12.4.2 martin struct mcx_reg_pmlp {
455 1.12.4.2 martin uint8_t rp_rxtx;
456 1.12.4.2 martin uint8_t rp_local_port;
457 1.12.4.2 martin uint8_t rp_reserved0;
458 1.12.4.2 martin uint8_t rp_width;
459 1.12.4.2 martin uint32_t rp_lane0_mapping;
460 1.12.4.2 martin uint32_t rp_lane1_mapping;
461 1.12.4.2 martin uint32_t rp_lane2_mapping;
462 1.12.4.2 martin uint32_t rp_lane3_mapping;
463 1.12.4.2 martin uint8_t rp_reserved1[44];
464 1.12.4.2 martin } __packed __aligned(4);
465 1.12.4.2 martin
466 1.12.4.2 martin #define MCX_MCIA_EEPROM_BYTES 32
467 1.12.4.2 martin struct mcx_reg_mcia {
468 1.12.4.2 martin uint8_t rm_l;
469 1.12.4.2 martin uint8_t rm_module;
470 1.12.4.2 martin uint8_t rm_reserved0;
471 1.12.4.2 martin uint8_t rm_status;
472 1.12.4.2 martin uint8_t rm_i2c_addr;
473 1.12.4.2 martin uint8_t rm_page_num;
474 1.12.4.2 martin uint16_t rm_dev_addr;
475 1.12.4.2 martin uint16_t rm_reserved1;
476 1.12.4.2 martin uint16_t rm_size;
477 1.12.4.2 martin uint32_t rm_reserved2;
478 1.12.4.2 martin uint8_t rm_data[48];
479 1.12.4.2 martin } __packed __aligned(4);
480 1.12.4.2 martin
481 1.12.4.2 martin struct mcx_cmd_query_issi_in {
482 1.12.4.2 martin uint16_t cmd_opcode;
483 1.12.4.2 martin uint8_t cmd_reserved0[4];
484 1.12.4.2 martin uint16_t cmd_op_mod;
485 1.12.4.2 martin uint8_t cmd_reserved1[8];
486 1.12.4.2 martin } __packed __aligned(4);
487 1.12.4.2 martin
488 1.12.4.2 martin struct mcx_cmd_query_issi_il_out {
489 1.12.4.2 martin uint8_t cmd_status;
490 1.12.4.2 martin uint8_t cmd_reserved0[3];
491 1.12.4.2 martin uint32_t cmd_syndrome;
492 1.12.4.2 martin uint8_t cmd_reserved1[2];
493 1.12.4.2 martin uint16_t cmd_current_issi;
494 1.12.4.2 martin uint8_t cmd_reserved2[4];
495 1.12.4.2 martin } __packed __aligned(4);
496 1.12.4.2 martin
497 1.12.4.2 martin CTASSERT(sizeof(struct mcx_cmd_query_issi_il_out) == MCX_CMDQ_INLINE_DATASIZE);
498 1.12.4.2 martin
499 1.12.4.2 martin struct mcx_cmd_query_issi_mb_out {
500 1.12.4.2 martin uint8_t cmd_reserved2[16];
501 1.12.4.2 martin uint8_t cmd_supported_issi[80]; /* very big endian */
502 1.12.4.2 martin } __packed __aligned(4);
503 1.12.4.2 martin
504 1.12.4.2 martin CTASSERT(sizeof(struct mcx_cmd_query_issi_mb_out) <= MCX_CMDQ_MAILBOX_DATASIZE);
505 1.12.4.2 martin
506 1.12.4.2 martin struct mcx_cmd_set_issi_in {
507 1.12.4.2 martin uint16_t cmd_opcode;
508 1.12.4.2 martin uint8_t cmd_reserved0[4];
509 1.12.4.2 martin uint16_t cmd_op_mod;
510 1.12.4.2 martin uint8_t cmd_reserved1[2];
511 1.12.4.2 martin uint16_t cmd_current_issi;
512 1.12.4.2 martin uint8_t cmd_reserved2[4];
513 1.12.4.2 martin } __packed __aligned(4);
514 1.12.4.2 martin
515 1.12.4.2 martin CTASSERT(sizeof(struct mcx_cmd_set_issi_in) <= MCX_CMDQ_INLINE_DATASIZE);
516 1.12.4.2 martin
517 1.12.4.2 martin struct mcx_cmd_set_issi_out {
518 1.12.4.2 martin uint8_t cmd_status;
519 1.12.4.2 martin uint8_t cmd_reserved0[3];
520 1.12.4.2 martin uint32_t cmd_syndrome;
521 1.12.4.2 martin uint8_t cmd_reserved1[8];
522 1.12.4.2 martin } __packed __aligned(4);
523 1.12.4.2 martin
524 1.12.4.2 martin CTASSERT(sizeof(struct mcx_cmd_set_issi_out) <= MCX_CMDQ_INLINE_DATASIZE);
525 1.12.4.2 martin
526 1.12.4.2 martin struct mcx_cmd_query_pages_in {
527 1.12.4.2 martin uint16_t cmd_opcode;
528 1.12.4.2 martin uint8_t cmd_reserved0[4];
529 1.12.4.2 martin uint16_t cmd_op_mod;
530 1.12.4.2 martin #define MCX_CMD_QUERY_PAGES_BOOT 0x01
531 1.12.4.2 martin #define MCX_CMD_QUERY_PAGES_INIT 0x02
532 1.12.4.2 martin #define MCX_CMD_QUERY_PAGES_REGULAR 0x03
533 1.12.4.2 martin uint8_t cmd_reserved1[8];
534 1.12.4.2 martin } __packed __aligned(4);
535 1.12.4.2 martin
536 1.12.4.2 martin struct mcx_cmd_query_pages_out {
537 1.12.4.2 martin uint8_t cmd_status;
538 1.12.4.2 martin uint8_t cmd_reserved0[3];
539 1.12.4.2 martin uint32_t cmd_syndrome;
540 1.12.4.2 martin uint8_t cmd_reserved1[2];
541 1.12.4.2 martin uint16_t cmd_func_id;
542 1.12.4.2 martin uint32_t cmd_num_pages;
543 1.12.4.2 martin } __packed __aligned(4);
544 1.12.4.2 martin
545 1.12.4.2 martin struct mcx_cmd_manage_pages_in {
546 1.12.4.2 martin uint16_t cmd_opcode;
547 1.12.4.2 martin uint8_t cmd_reserved0[4];
548 1.12.4.2 martin uint16_t cmd_op_mod;
549 1.12.4.2 martin #define MCX_CMD_MANAGE_PAGES_ALLOC_FAIL \
550 1.12.4.2 martin 0x00
551 1.12.4.2 martin #define MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS \
552 1.12.4.2 martin 0x01
553 1.12.4.2 martin #define MCX_CMD_MANAGE_PAGES_HCA_RETURN_PAGES \
554 1.12.4.2 martin 0x02
555 1.12.4.2 martin uint8_t cmd_reserved1[2];
556 1.12.4.2 martin uint16_t cmd_func_id;
557 1.12.4.2 martin uint32_t cmd_input_num_entries;
558 1.12.4.2 martin } __packed __aligned(4);
559 1.12.4.2 martin
560 1.12.4.2 martin CTASSERT(sizeof(struct mcx_cmd_manage_pages_in) == MCX_CMDQ_INLINE_DATASIZE);
561 1.12.4.2 martin
562 1.12.4.2 martin struct mcx_cmd_manage_pages_out {
563 1.12.4.2 martin uint8_t cmd_status;
564 1.12.4.2 martin uint8_t cmd_reserved0[3];
565 1.12.4.2 martin uint32_t cmd_syndrome;
566 1.12.4.2 martin uint32_t cmd_output_num_entries;
567 1.12.4.2 martin uint8_t cmd_reserved1[4];
568 1.12.4.2 martin } __packed __aligned(4);
569 1.12.4.2 martin
570 1.12.4.2 martin CTASSERT(sizeof(struct mcx_cmd_manage_pages_out) == MCX_CMDQ_INLINE_DATASIZE);
571 1.12.4.2 martin
572 1.12.4.2 martin struct mcx_cmd_query_hca_cap_in {
573 1.12.4.2 martin uint16_t cmd_opcode;
574 1.12.4.2 martin uint8_t cmd_reserved0[4];
575 1.12.4.2 martin uint16_t cmd_op_mod;
576 1.12.4.2 martin #define MCX_CMD_QUERY_HCA_CAP_MAX (0x0 << 0)
577 1.12.4.2 martin #define MCX_CMD_QUERY_HCA_CAP_CURRENT (0x1 << 0)
578 1.12.4.2 martin #define MCX_CMD_QUERY_HCA_CAP_DEVICE (0x0 << 1)
579 1.12.4.2 martin #define MCX_CMD_QUERY_HCA_CAP_OFFLOAD (0x1 << 1)
580 1.12.4.2 martin #define MCX_CMD_QUERY_HCA_CAP_FLOW (0x7 << 1)
581 1.12.4.2 martin uint8_t cmd_reserved1[8];
582 1.12.4.2 martin } __packed __aligned(4);
583 1.12.4.2 martin
584 1.12.4.2 martin struct mcx_cmd_query_hca_cap_out {
585 1.12.4.2 martin uint8_t cmd_status;
586 1.12.4.2 martin uint8_t cmd_reserved0[3];
587 1.12.4.2 martin uint32_t cmd_syndrome;
588 1.12.4.2 martin uint8_t cmd_reserved1[8];
589 1.12.4.2 martin } __packed __aligned(4);
590 1.12.4.2 martin
591 1.12.4.2 martin #define MCX_HCA_CAP_LEN 0x1000
592 1.12.4.2 martin #define MCX_HCA_CAP_NMAILBOXES \
593 1.12.4.2 martin (MCX_HCA_CAP_LEN / MCX_CMDQ_MAILBOX_DATASIZE)
594 1.12.4.2 martin
595 1.12.4.2 martin #if __GNUC_PREREQ__(4, 3)
596 1.12.4.2 martin #define __counter__ __COUNTER__
597 1.12.4.2 martin #else
598 1.12.4.2 martin #define __counter__ __LINE__
599 1.12.4.2 martin #endif
600 1.12.4.2 martin
601 1.12.4.2 martin #define __token(_tok, _num) _tok##_num
602 1.12.4.2 martin #define _token(_tok, _num) __token(_tok, _num)
603 1.12.4.2 martin #define __reserved__ _token(__reserved, __counter__)
604 1.12.4.2 martin
605 1.12.4.2 martin struct mcx_cap_device {
606 1.12.4.2 martin uint8_t reserved0[16];
607 1.12.4.2 martin
608 1.12.4.2 martin uint8_t log_max_srq_sz;
609 1.12.4.2 martin uint8_t log_max_qp_sz;
610 1.12.4.2 martin uint8_t __reserved__[1];
611 1.12.4.2 martin uint8_t log_max_qp; /* 5 bits */
612 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_MAX_QP 0x1f
613 1.12.4.2 martin
614 1.12.4.2 martin uint8_t __reserved__[1];
615 1.12.4.2 martin uint8_t log_max_srq; /* 5 bits */
616 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_MAX_SRQ 0x1f
617 1.12.4.2 martin uint8_t __reserved__[2];
618 1.12.4.2 martin
619 1.12.4.2 martin uint8_t __reserved__[1];
620 1.12.4.2 martin uint8_t log_max_cq_sz;
621 1.12.4.2 martin uint8_t __reserved__[1];
622 1.12.4.2 martin uint8_t log_max_cq; /* 5 bits */
623 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_MAX_CQ 0x1f
624 1.12.4.2 martin
625 1.12.4.2 martin uint8_t log_max_eq_sz;
626 1.12.4.2 martin uint8_t log_max_mkey; /* 6 bits */
627 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_MAX_MKEY 0x3f
628 1.12.4.2 martin uint8_t __reserved__[1];
629 1.12.4.2 martin uint8_t log_max_eq; /* 4 bits */
630 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_MAX_EQ 0x0f
631 1.12.4.2 martin
632 1.12.4.2 martin uint8_t max_indirection;
633 1.12.4.2 martin uint8_t log_max_mrw_sz; /* 7 bits */
634 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_MAX_MRW_SZ 0x7f
635 1.12.4.2 martin uint8_t teardown_log_max_msf_list_size;
636 1.12.4.2 martin #define MCX_CAP_DEVICE_FORCE_TEARDOWN 0x80
637 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_MAX_MSF_LIST_SIZE \
638 1.12.4.2 martin 0x3f
639 1.12.4.2 martin uint8_t log_max_klm_list_size; /* 6 bits */
640 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_MAX_KLM_LIST_SIZE \
641 1.12.4.2 martin 0x3f
642 1.12.4.2 martin
643 1.12.4.2 martin uint8_t __reserved__[1];
644 1.12.4.2 martin uint8_t log_max_ra_req_dc; /* 6 bits */
645 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_MAX_REQ_DC 0x3f
646 1.12.4.2 martin uint8_t __reserved__[1];
647 1.12.4.2 martin uint8_t log_max_ra_res_dc; /* 6 bits */
648 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_DC \
649 1.12.4.2 martin 0x3f
650 1.12.4.2 martin
651 1.12.4.2 martin uint8_t __reserved__[1];
652 1.12.4.2 martin uint8_t log_max_ra_req_qp; /* 6 bits */
653 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_MAX_RA_REQ_QP \
654 1.12.4.2 martin 0x3f
655 1.12.4.2 martin uint8_t __reserved__[1];
656 1.12.4.2 martin uint8_t log_max_ra_res_qp; /* 6 bits */
657 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_QP \
658 1.12.4.2 martin 0x3f
659 1.12.4.2 martin
660 1.12.4.2 martin uint8_t flags1;
661 1.12.4.2 martin #define MCX_CAP_DEVICE_END_PAD 0x80
662 1.12.4.2 martin #define MCX_CAP_DEVICE_CC_QUERY_ALLOWED 0x40
663 1.12.4.2 martin #define MCX_CAP_DEVICE_CC_MODIFY_ALLOWED \
664 1.12.4.2 martin 0x20
665 1.12.4.2 martin #define MCX_CAP_DEVICE_START_PAD 0x10
666 1.12.4.2 martin #define MCX_CAP_DEVICE_128BYTE_CACHELINE \
667 1.12.4.2 martin 0x08
668 1.12.4.2 martin uint8_t __reserved__[1];
669 1.12.4.2 martin uint16_t gid_table_size;
670 1.12.4.2 martin
671 1.12.4.2 martin uint16_t flags2;
672 1.12.4.2 martin #define MCX_CAP_DEVICE_OUT_OF_SEQ_CNT 0x8000
673 1.12.4.2 martin #define MCX_CAP_DEVICE_VPORT_COUNTERS 0x4000
674 1.12.4.2 martin #define MCX_CAP_DEVICE_RETRANSMISSION_Q_COUNTERS \
675 1.12.4.2 martin 0x2000
676 1.12.4.2 martin #define MCX_CAP_DEVICE_DEBUG 0x1000
677 1.12.4.2 martin #define MCX_CAP_DEVICE_MODIFY_RQ_COUNTERS_SET_ID \
678 1.12.4.2 martin 0x8000
679 1.12.4.2 martin #define MCX_CAP_DEVICE_RQ_DELAY_DROP 0x4000
680 1.12.4.2 martin #define MCX_CAP_DEVICe_MAX_QP_CNT_MASK 0x03ff
681 1.12.4.2 martin uint16_t pkey_table_size;
682 1.12.4.2 martin
683 1.12.4.2 martin uint8_t flags3;
684 1.12.4.2 martin #define MCX_CAP_DEVICE_VPORT_GROUP_MANAGER \
685 1.12.4.2 martin 0x80
686 1.12.4.2 martin #define MCX_CAP_DEVICE_VHCA_GROUP_MANAGER \
687 1.12.4.2 martin 0x40
688 1.12.4.2 martin #define MCX_CAP_DEVICE_IB_VIRTUAL 0x20
689 1.12.4.2 martin #define MCX_CAP_DEVICE_ETH_VIRTUAL 0x10
690 1.12.4.2 martin #define MCX_CAP_DEVICE_ETS 0x04
691 1.12.4.2 martin #define MCX_CAP_DEVICE_NIC_FLOW_TABLE 0x02
692 1.12.4.2 martin #define MCX_CAP_DEVICE_ESWITCH_FLOW_TABLE \
693 1.12.4.2 martin 0x01
694 1.12.4.2 martin uint8_t local_ca_ack_delay; /* 5 bits */
695 1.12.4.2 martin #define MCX_CAP_DEVICE_LOCAL_CA_ACK_DELAY \
696 1.12.4.2 martin 0x1f
697 1.12.4.2 martin uint8_t port_type;
698 1.12.4.2 martin #define MCX_CAP_DEVICE_PORT_MODULE_EVENT \
699 1.12.4.2 martin 0x80
700 1.12.4.2 martin #define MCX_CAP_DEVICE_PORT_TYPE 0x03
701 1.12.4.2 martin uint8_t num_ports;
702 1.12.4.2 martin
703 1.12.4.2 martin uint8_t snapshot_log_max_msg;
704 1.12.4.2 martin #define MCX_CAP_DEVICE_SNAPSHOT 0x80
705 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_MAX_MSG 0x1f
706 1.12.4.2 martin uint8_t max_tc; /* 4 bits */
707 1.12.4.2 martin #define MCX_CAP_DEVICE_MAX_TC 0x0f
708 1.12.4.2 martin uint8_t flags4;
709 1.12.4.2 martin #define MCX_CAP_DEVICE_TEMP_WARN_EVENT 0x80
710 1.12.4.2 martin #define MCX_CAP_DEVICE_DCBX 0x40
711 1.12.4.2 martin #define MCX_CAP_DEVICE_ROL_S 0x02
712 1.12.4.2 martin #define MCX_CAP_DEVICE_ROL_G 0x01
713 1.12.4.2 martin uint8_t wol;
714 1.12.4.2 martin #define MCX_CAP_DEVICE_WOL_S 0x40
715 1.12.4.2 martin #define MCX_CAP_DEVICE_WOL_G 0x20
716 1.12.4.2 martin #define MCX_CAP_DEVICE_WOL_A 0x10
717 1.12.4.2 martin #define MCX_CAP_DEVICE_WOL_B 0x08
718 1.12.4.2 martin #define MCX_CAP_DEVICE_WOL_M 0x04
719 1.12.4.2 martin #define MCX_CAP_DEVICE_WOL_U 0x02
720 1.12.4.2 martin #define MCX_CAP_DEVICE_WOL_P 0x01
721 1.12.4.2 martin
722 1.12.4.2 martin uint16_t stat_rate_support;
723 1.12.4.2 martin uint8_t __reserved__[1];
724 1.12.4.2 martin uint8_t cqe_version; /* 4 bits */
725 1.12.4.2 martin #define MCX_CAP_DEVICE_CQE_VERSION 0x0f
726 1.12.4.2 martin
727 1.12.4.2 martin uint32_t flags5;
728 1.12.4.2 martin #define MCX_CAP_DEVICE_COMPACT_ADDRESS_VECTOR \
729 1.12.4.2 martin 0x80000000
730 1.12.4.2 martin #define MCX_CAP_DEVICE_STRIDING_RQ 0x40000000
731 1.12.4.2 martin #define MCX_CAP_DEVICE_IPOIP_ENHANCED_OFFLOADS \
732 1.12.4.2 martin 0x10000000
733 1.12.4.2 martin #define MCX_CAP_DEVICE_IPOIP_IPOIP_OFFLOADS \
734 1.12.4.2 martin 0x08000000
735 1.12.4.2 martin #define MCX_CAP_DEVICE_DC_CONNECT_CP 0x00040000
736 1.12.4.2 martin #define MCX_CAP_DEVICE_DC_CNAK_DRACE 0x00020000
737 1.12.4.2 martin #define MCX_CAP_DEVICE_DRAIN_SIGERR 0x00010000
738 1.12.4.2 martin #define MCX_CAP_DEVICE_DRAIN_SIGERR 0x00010000
739 1.12.4.2 martin #define MCX_CAP_DEVICE_CMDIF_CHECKSUM 0x0000c000
740 1.12.4.2 martin #define MCX_CAP_DEVICE_SIGERR_QCE 0x00002000
741 1.12.4.2 martin #define MCX_CAP_DEVICE_WQ_SIGNATURE 0x00000800
742 1.12.4.2 martin #define MCX_CAP_DEVICE_SCTR_DATA_CQE 0x00000400
743 1.12.4.2 martin #define MCX_CAP_DEVICE_SHO 0x00000100
744 1.12.4.2 martin #define MCX_CAP_DEVICE_TPH 0x00000080
745 1.12.4.2 martin #define MCX_CAP_DEVICE_RF 0x00000040
746 1.12.4.2 martin #define MCX_CAP_DEVICE_DCT 0x00000020
747 1.12.4.2 martin #define MCX_CAP_DEVICE_QOS 0x00000010
748 1.12.4.2 martin #define MCX_CAP_DEVICe_ETH_NET_OFFLOADS 0x00000008
749 1.12.4.2 martin #define MCX_CAP_DEVICE_ROCE 0x00000004
750 1.12.4.2 martin #define MCX_CAP_DEVICE_ATOMIC 0x00000002
751 1.12.4.2 martin
752 1.12.4.2 martin uint32_t flags6;
753 1.12.4.2 martin #define MCX_CAP_DEVICE_CQ_OI 0x80000000
754 1.12.4.2 martin #define MCX_CAP_DEVICE_CQ_RESIZE 0x40000000
755 1.12.4.2 martin #define MCX_CAP_DEVICE_CQ_MODERATION 0x20000000
756 1.12.4.2 martin #define MCX_CAP_DEVICE_CQ_PERIOD_MODE_MODIFY \
757 1.12.4.2 martin 0x10000000
758 1.12.4.2 martin #define MCX_CAP_DEVICE_CQ_INVALIDATE 0x08000000
759 1.12.4.2 martin #define MCX_CAP_DEVICE_RESERVED_AT_255 0x04000000
760 1.12.4.2 martin #define MCX_CAP_DEVICE_CQ_EQ_REMAP 0x02000000
761 1.12.4.2 martin #define MCX_CAP_DEVICE_PG 0x01000000
762 1.12.4.2 martin #define MCX_CAP_DEVICE_BLOCK_LB_MC 0x00800000
763 1.12.4.2 martin #define MCX_CAP_DEVICE_EXPONENTIAL_BACKOFF \
764 1.12.4.2 martin 0x00400000
765 1.12.4.2 martin #define MCX_CAP_DEVICE_SCQE_BREAK_MODERATION \
766 1.12.4.2 martin 0x00200000
767 1.12.4.2 martin #define MCX_CAP_DEVICE_CQ_PERIOD_START_FROM_CQE \
768 1.12.4.2 martin 0x00100000
769 1.12.4.2 martin #define MCX_CAP_DEVICE_CD 0x00080000
770 1.12.4.2 martin #define MCX_CAP_DEVICE_ATM 0x00040000
771 1.12.4.2 martin #define MCX_CAP_DEVICE_APM 0x00020000
772 1.12.4.2 martin #define MCX_CAP_DEVICE_IMAICL 0x00010000
773 1.12.4.2 martin #define MCX_CAP_DEVICE_QKV 0x00000200
774 1.12.4.2 martin #define MCX_CAP_DEVICE_PKV 0x00000100
775 1.12.4.2 martin #define MCX_CAP_DEVICE_SET_DETH_SQPN 0x00000080
776 1.12.4.2 martin #define MCX_CAP_DEVICE_XRC 0x00000008
777 1.12.4.2 martin #define MCX_CAP_DEVICE_UD 0x00000004
778 1.12.4.2 martin #define MCX_CAP_DEVICE_UC 0x00000002
779 1.12.4.2 martin #define MCX_CAP_DEVICE_RC 0x00000001
780 1.12.4.2 martin
781 1.12.4.2 martin uint8_t uar_flags;
782 1.12.4.2 martin #define MCX_CAP_DEVICE_UAR_4K 0x80
783 1.12.4.2 martin uint8_t uar_sz; /* 6 bits */
784 1.12.4.2 martin #define MCX_CAP_DEVICE_UAR_SZ 0x3f
785 1.12.4.2 martin uint8_t __reserved__[1];
786 1.12.4.2 martin uint8_t log_pg_sz;
787 1.12.4.2 martin
788 1.12.4.2 martin uint8_t flags7;
789 1.12.4.2 martin #define MCX_CAP_DEVICE_BF 0x80
790 1.12.4.2 martin #define MCX_CAP_DEVICE_DRIVER_VERSION 0x40
791 1.12.4.2 martin #define MCX_CAP_DEVICE_PAD_TX_ETH_PACKET \
792 1.12.4.2 martin 0x20
793 1.12.4.2 martin uint8_t log_bf_reg_size; /* 5 bits */
794 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_BF_REG_SIZE 0x1f
795 1.12.4.2 martin uint8_t __reserved__[2];
796 1.12.4.2 martin
797 1.12.4.2 martin uint16_t num_of_diagnostic_counters;
798 1.12.4.2 martin uint16_t max_wqe_sz_sq;
799 1.12.4.2 martin
800 1.12.4.2 martin uint8_t __reserved__[2];
801 1.12.4.2 martin uint16_t max_wqe_sz_rq;
802 1.12.4.2 martin
803 1.12.4.2 martin uint8_t __reserved__[2];
804 1.12.4.2 martin uint16_t max_wqe_sz_sq_dc;
805 1.12.4.2 martin
806 1.12.4.2 martin uint32_t max_qp_mcg; /* 25 bits */
807 1.12.4.2 martin #define MCX_CAP_DEVICE_MAX_QP_MCG 0x1ffffff
808 1.12.4.2 martin
809 1.12.4.2 martin uint8_t __reserved__[3];
810 1.12.4.2 martin uint8_t log_max_mcq;
811 1.12.4.2 martin
812 1.12.4.2 martin uint8_t log_max_transport_domain; /* 5 bits */
813 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_MAX_TRANSORT_DOMAIN \
814 1.12.4.2 martin 0x1f
815 1.12.4.2 martin uint8_t log_max_pd; /* 5 bits */
816 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_MAX_PD 0x1f
817 1.12.4.2 martin uint8_t __reserved__[1];
818 1.12.4.2 martin uint8_t log_max_xrcd; /* 5 bits */
819 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_MAX_XRCD 0x1f
820 1.12.4.2 martin
821 1.12.4.2 martin uint8_t __reserved__[2];
822 1.12.4.2 martin uint16_t max_flow_counter;
823 1.12.4.2 martin
824 1.12.4.2 martin uint8_t log_max_rq; /* 5 bits */
825 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_MAX_RQ 0x1f
826 1.12.4.2 martin uint8_t log_max_sq; /* 5 bits */
827 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_MAX_SQ 0x1f
828 1.12.4.2 martin uint8_t log_max_tir; /* 5 bits */
829 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_MAX_TIR 0x1f
830 1.12.4.2 martin uint8_t log_max_tis; /* 5 bits */
831 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_MAX_TIS 0x1f
832 1.12.4.2 martin
833 1.12.4.2 martin uint8_t flags8;
834 1.12.4.2 martin #define MCX_CAP_DEVICE_BASIC_CYCLIC_RCV_WQE \
835 1.12.4.2 martin 0x80
836 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_MAX_RMP 0x1f
837 1.12.4.2 martin uint8_t log_max_rqt; /* 5 bits */
838 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_MAX_RQT 0x1f
839 1.12.4.2 martin uint8_t log_max_rqt_size; /* 5 bits */
840 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_MAX_RQT_SIZE 0x1f
841 1.12.4.2 martin uint8_t log_max_tis_per_sq; /* 5 bits */
842 1.12.4.2 martin #define MCX_CAP_DEVICE_LOG_MAX_TIS_PER_SQ \
843 1.12.4.2 martin 0x1f
844 1.12.4.2 martin } __packed __aligned(8);
845 1.12.4.2 martin
846 1.12.4.2 martin CTASSERT(offsetof(struct mcx_cap_device, max_indirection) == 0x20);
847 1.12.4.2 martin CTASSERT(offsetof(struct mcx_cap_device, flags1) == 0x2c);
848 1.12.4.2 martin CTASSERT(offsetof(struct mcx_cap_device, flags2) == 0x30);
849 1.12.4.2 martin CTASSERT(offsetof(struct mcx_cap_device, snapshot_log_max_msg) == 0x38);
850 1.12.4.2 martin CTASSERT(offsetof(struct mcx_cap_device, flags5) == 0x40);
851 1.12.4.2 martin CTASSERT(offsetof(struct mcx_cap_device, flags7) == 0x4c);
852 1.12.4.2 martin CTASSERT(sizeof(struct mcx_cap_device) <= MCX_CMDQ_MAILBOX_DATASIZE);
853 1.12.4.2 martin
854 1.12.4.2 martin struct mcx_cmd_set_driver_version_in {
855 1.12.4.2 martin uint16_t cmd_opcode;
856 1.12.4.2 martin uint8_t cmd_reserved0[4];
857 1.12.4.2 martin uint16_t cmd_op_mod;
858 1.12.4.2 martin uint8_t cmd_reserved1[8];
859 1.12.4.2 martin } __packed __aligned(4);
860 1.12.4.2 martin
861 1.12.4.2 martin struct mcx_cmd_set_driver_version_out {
862 1.12.4.2 martin uint8_t cmd_status;
863 1.12.4.2 martin uint8_t cmd_reserved0[3];
864 1.12.4.2 martin uint32_t cmd_syndrome;
865 1.12.4.2 martin uint8_t cmd_reserved1[8];
866 1.12.4.2 martin } __packed __aligned(4);
867 1.12.4.2 martin
868 1.12.4.2 martin struct mcx_cmd_set_driver_version {
869 1.12.4.2 martin uint8_t cmd_driver_version[64];
870 1.12.4.2 martin } __packed __aligned(8);
871 1.12.4.2 martin
872 1.12.4.2 martin struct mcx_cmd_modify_nic_vport_context_in {
873 1.12.4.2 martin uint16_t cmd_opcode;
874 1.12.4.2 martin uint8_t cmd_reserved0[4];
875 1.12.4.2 martin uint16_t cmd_op_mod;
876 1.12.4.2 martin uint8_t cmd_reserved1[4];
877 1.12.4.2 martin uint32_t cmd_field_select;
878 1.12.4.2 martin #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_ADDR 0x04
879 1.12.4.2 martin #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC 0x10
880 1.12.4.2 martin #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU 0x40
881 1.12.4.2 martin } __packed __aligned(4);
882 1.12.4.2 martin
883 1.12.4.2 martin struct mcx_cmd_modify_nic_vport_context_out {
884 1.12.4.2 martin uint8_t cmd_status;
885 1.12.4.2 martin uint8_t cmd_reserved0[3];
886 1.12.4.2 martin uint32_t cmd_syndrome;
887 1.12.4.2 martin uint8_t cmd_reserved1[8];
888 1.12.4.2 martin } __packed __aligned(4);
889 1.12.4.2 martin
890 1.12.4.2 martin struct mcx_cmd_query_nic_vport_context_in {
891 1.12.4.2 martin uint16_t cmd_opcode;
892 1.12.4.2 martin uint8_t cmd_reserved0[4];
893 1.12.4.2 martin uint16_t cmd_op_mod;
894 1.12.4.2 martin uint8_t cmd_reserved1[4];
895 1.12.4.2 martin uint8_t cmd_allowed_list_type;
896 1.12.4.2 martin uint8_t cmd_reserved2[3];
897 1.12.4.2 martin } __packed __aligned(4);
898 1.12.4.2 martin
899 1.12.4.2 martin struct mcx_cmd_query_nic_vport_context_out {
900 1.12.4.2 martin uint8_t cmd_status;
901 1.12.4.2 martin uint8_t cmd_reserved0[3];
902 1.12.4.2 martin uint32_t cmd_syndrome;
903 1.12.4.2 martin uint8_t cmd_reserved1[8];
904 1.12.4.2 martin } __packed __aligned(4);
905 1.12.4.2 martin
906 1.12.4.2 martin struct mcx_nic_vport_ctx {
907 1.12.4.2 martin uint32_t vp_min_wqe_inline_mode;
908 1.12.4.2 martin uint8_t vp_reserved0[32];
909 1.12.4.2 martin uint32_t vp_mtu;
910 1.12.4.2 martin uint8_t vp_reserved1[200];
911 1.12.4.2 martin uint16_t vp_flags;
912 1.12.4.2 martin #define MCX_NIC_VPORT_CTX_LIST_UC_MAC (0)
913 1.12.4.2 martin #define MCX_NIC_VPORT_CTX_LIST_MC_MAC (1 << 24)
914 1.12.4.2 martin #define MCX_NIC_VPORT_CTX_LIST_VLAN (2 << 24)
915 1.12.4.2 martin #define MCX_NIC_VPORT_CTX_PROMISC_ALL (1 << 13)
916 1.12.4.2 martin #define MCX_NIC_VPORT_CTX_PROMISC_MCAST (1 << 14)
917 1.12.4.2 martin #define MCX_NIC_VPORT_CTX_PROMISC_UCAST (1 << 15)
918 1.12.4.2 martin uint16_t vp_allowed_list_size;
919 1.12.4.2 martin uint64_t vp_perm_addr;
920 1.12.4.2 martin uint8_t vp_reserved2[4];
921 1.12.4.2 martin /* allowed list follows */
922 1.12.4.2 martin } __packed __aligned(4);
923 1.12.4.2 martin
924 1.12.4.2 martin struct mcx_counter {
925 1.12.4.2 martin uint64_t packets;
926 1.12.4.2 martin uint64_t octets;
927 1.12.4.2 martin } __packed __aligned(4);
928 1.12.4.2 martin
929 1.12.4.2 martin struct mcx_nic_vport_counters {
930 1.12.4.2 martin struct mcx_counter rx_err;
931 1.12.4.2 martin struct mcx_counter tx_err;
932 1.12.4.2 martin uint8_t reserved0[64]; /* 0x30 */
933 1.12.4.2 martin struct mcx_counter rx_bcast;
934 1.12.4.2 martin struct mcx_counter tx_bcast;
935 1.12.4.2 martin struct mcx_counter rx_ucast;
936 1.12.4.2 martin struct mcx_counter tx_ucast;
937 1.12.4.2 martin struct mcx_counter rx_mcast;
938 1.12.4.2 martin struct mcx_counter tx_mcast;
939 1.12.4.2 martin uint8_t reserved1[0x210 - 0xd0];
940 1.12.4.2 martin } __packed __aligned(4);
941 1.12.4.2 martin
942 1.12.4.2 martin struct mcx_cmd_query_vport_counters_in {
943 1.12.4.2 martin uint16_t cmd_opcode;
944 1.12.4.2 martin uint8_t cmd_reserved0[4];
945 1.12.4.2 martin uint16_t cmd_op_mod;
946 1.12.4.2 martin uint8_t cmd_reserved1[8];
947 1.12.4.2 martin } __packed __aligned(4);
948 1.12.4.2 martin
949 1.12.4.2 martin struct mcx_cmd_query_vport_counters_mb_in {
950 1.12.4.2 martin uint8_t cmd_reserved0[8];
951 1.12.4.2 martin uint8_t cmd_clear;
952 1.12.4.2 martin uint8_t cmd_reserved1[7];
953 1.12.4.2 martin } __packed __aligned(4);
954 1.12.4.2 martin
955 1.12.4.2 martin struct mcx_cmd_query_vport_counters_out {
956 1.12.4.2 martin uint8_t cmd_status;
957 1.12.4.2 martin uint8_t cmd_reserved0[3];
958 1.12.4.2 martin uint32_t cmd_syndrome;
959 1.12.4.2 martin uint8_t cmd_reserved1[8];
960 1.12.4.2 martin } __packed __aligned(4);
961 1.12.4.2 martin
962 1.12.4.2 martin struct mcx_cmd_query_flow_counter_in {
963 1.12.4.2 martin uint16_t cmd_opcode;
964 1.12.4.2 martin uint8_t cmd_reserved0[4];
965 1.12.4.2 martin uint16_t cmd_op_mod;
966 1.12.4.2 martin uint8_t cmd_reserved1[8];
967 1.12.4.2 martin } __packed __aligned(4);
968 1.12.4.2 martin
969 1.12.4.2 martin struct mcx_cmd_query_flow_counter_mb_in {
970 1.12.4.2 martin uint8_t cmd_reserved0[8];
971 1.12.4.2 martin uint8_t cmd_clear;
972 1.12.4.2 martin uint8_t cmd_reserved1[5];
973 1.12.4.2 martin uint16_t cmd_flow_counter_id;
974 1.12.4.2 martin } __packed __aligned(4);
975 1.12.4.2 martin
976 1.12.4.2 martin struct mcx_cmd_query_flow_counter_out {
977 1.12.4.2 martin uint8_t cmd_status;
978 1.12.4.2 martin uint8_t cmd_reserved0[3];
979 1.12.4.2 martin uint32_t cmd_syndrome;
980 1.12.4.2 martin uint8_t cmd_reserved1[8];
981 1.12.4.2 martin } __packed __aligned(4);
982 1.12.4.2 martin
983 1.12.4.2 martin struct mcx_cmd_alloc_uar_in {
984 1.12.4.2 martin uint16_t cmd_opcode;
985 1.12.4.2 martin uint8_t cmd_reserved0[4];
986 1.12.4.2 martin uint16_t cmd_op_mod;
987 1.12.4.2 martin uint8_t cmd_reserved1[8];
988 1.12.4.2 martin } __packed __aligned(4);
989 1.12.4.2 martin
990 1.12.4.2 martin struct mcx_cmd_alloc_uar_out {
991 1.12.4.2 martin uint8_t cmd_status;
992 1.12.4.2 martin uint8_t cmd_reserved0[3];
993 1.12.4.2 martin uint32_t cmd_syndrome;
994 1.12.4.2 martin uint32_t cmd_uar;
995 1.12.4.2 martin uint8_t cmd_reserved1[4];
996 1.12.4.2 martin } __packed __aligned(4);
997 1.12.4.2 martin
998 1.12.4.2 martin struct mcx_cmd_query_special_ctx_in {
999 1.12.4.2 martin uint16_t cmd_opcode;
1000 1.12.4.2 martin uint8_t cmd_reserved0[4];
1001 1.12.4.2 martin uint16_t cmd_op_mod;
1002 1.12.4.2 martin uint8_t cmd_reserved1[8];
1003 1.12.4.2 martin } __packed __aligned(4);
1004 1.12.4.2 martin
1005 1.12.4.2 martin struct mcx_cmd_query_special_ctx_out {
1006 1.12.4.2 martin uint8_t cmd_status;
1007 1.12.4.2 martin uint8_t cmd_reserved0[3];
1008 1.12.4.2 martin uint32_t cmd_syndrome;
1009 1.12.4.2 martin uint8_t cmd_reserved1[4];
1010 1.12.4.2 martin uint32_t cmd_resd_lkey;
1011 1.12.4.2 martin } __packed __aligned(4);
1012 1.12.4.2 martin
1013 1.12.4.2 martin struct mcx_eq_ctx {
1014 1.12.4.2 martin uint32_t eq_status;
1015 1.12.4.2 martin #define MCX_EQ_CTX_ST_SHIFT 8
1016 1.12.4.2 martin #define MCX_EQ_CTX_ST_MASK (0xf << MCX_EQ_CTX_ST_SHIFT)
1017 1.12.4.2 martin #define MCX_EQ_CTX_ST_ARMED (0x9 << MCX_EQ_CTX_ST_SHIFT)
1018 1.12.4.2 martin #define MCX_EQ_CTX_ST_FIRED (0xa << MCX_EQ_CTX_ST_SHIFT)
1019 1.12.4.2 martin #define MCX_EQ_CTX_OI_SHIFT 17
1020 1.12.4.2 martin #define MCX_EQ_CTX_OI (1 << MCX_EQ_CTX_OI_SHIFT)
1021 1.12.4.2 martin #define MCX_EQ_CTX_EC_SHIFT 18
1022 1.12.4.2 martin #define MCX_EQ_CTX_EC (1 << MCX_EQ_CTX_EC_SHIFT)
1023 1.12.4.2 martin #define MCX_EQ_CTX_STATUS_SHIFT 28
1024 1.12.4.2 martin #define MCX_EQ_CTX_STATUS_MASK (0xf << MCX_EQ_CTX_STATUS_SHIFT)
1025 1.12.4.2 martin #define MCX_EQ_CTX_STATUS_OK (0x0 << MCX_EQ_CTX_STATUS_SHIFT)
1026 1.12.4.2 martin #define MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE \
1027 1.12.4.2 martin (0xa << MCX_EQ_CTX_STATUS_SHIFT)
1028 1.12.4.2 martin uint32_t eq_reserved1;
1029 1.12.4.2 martin uint32_t eq_page_offset;
1030 1.12.4.2 martin #define MCX_EQ_CTX_PAGE_OFFSET_SHIFT 5
1031 1.12.4.2 martin uint32_t eq_uar_size;
1032 1.12.4.2 martin #define MCX_EQ_CTX_UAR_PAGE_MASK 0xffffff
1033 1.12.4.2 martin #define MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT 24
1034 1.12.4.2 martin uint32_t eq_reserved2;
1035 1.12.4.2 martin uint8_t eq_reserved3[3];
1036 1.12.4.2 martin uint8_t eq_intr;
1037 1.12.4.2 martin uint32_t eq_log_page_size;
1038 1.12.4.2 martin #define MCX_EQ_CTX_LOG_PAGE_SIZE_SHIFT 24
1039 1.12.4.2 martin uint32_t eq_reserved4[3];
1040 1.12.4.2 martin uint32_t eq_consumer_counter;
1041 1.12.4.2 martin uint32_t eq_producer_counter;
1042 1.12.4.2 martin #define MCX_EQ_CTX_COUNTER_MASK 0xffffff
1043 1.12.4.2 martin uint32_t eq_reserved5[4];
1044 1.12.4.2 martin } __packed __aligned(4);
1045 1.12.4.2 martin
1046 1.12.4.2 martin CTASSERT(sizeof(struct mcx_eq_ctx) == 64);
1047 1.12.4.2 martin
1048 1.12.4.2 martin struct mcx_cmd_create_eq_in {
1049 1.12.4.2 martin uint16_t cmd_opcode;
1050 1.12.4.2 martin uint8_t cmd_reserved0[4];
1051 1.12.4.2 martin uint16_t cmd_op_mod;
1052 1.12.4.2 martin uint8_t cmd_reserved1[8];
1053 1.12.4.2 martin } __packed __aligned(4);
1054 1.12.4.2 martin
1055 1.12.4.2 martin struct mcx_cmd_create_eq_mb_in {
1056 1.12.4.2 martin struct mcx_eq_ctx cmd_eq_ctx;
1057 1.12.4.2 martin uint8_t cmd_reserved0[8];
1058 1.12.4.2 martin uint64_t cmd_event_bitmask;
1059 1.12.4.2 martin #define MCX_EVENT_TYPE_COMPLETION 0x00
1060 1.12.4.2 martin #define MCX_EVENT_TYPE_CQ_ERROR 0x04
1061 1.12.4.2 martin #define MCX_EVENT_TYPE_INTERNAL_ERROR 0x08
1062 1.12.4.2 martin #define MCX_EVENT_TYPE_PORT_CHANGE 0x09
1063 1.12.4.2 martin #define MCX_EVENT_TYPE_CMD_COMPLETION 0x0a
1064 1.12.4.2 martin #define MCX_EVENT_TYPE_PAGE_REQUEST 0x0b
1065 1.12.4.2 martin #define MCX_EVENT_TYPE_LAST_WQE 0x13
1066 1.12.4.2 martin uint8_t cmd_reserved1[176];
1067 1.12.4.2 martin } __packed __aligned(4);
1068 1.12.4.2 martin
1069 1.12.4.2 martin struct mcx_cmd_create_eq_out {
1070 1.12.4.2 martin uint8_t cmd_status;
1071 1.12.4.2 martin uint8_t cmd_reserved0[3];
1072 1.12.4.2 martin uint32_t cmd_syndrome;
1073 1.12.4.2 martin uint32_t cmd_eqn;
1074 1.12.4.2 martin uint8_t cmd_reserved1[4];
1075 1.12.4.2 martin } __packed __aligned(4);
1076 1.12.4.2 martin
1077 1.12.4.2 martin struct mcx_eq_entry {
1078 1.12.4.2 martin uint8_t eq_reserved1;
1079 1.12.4.2 martin uint8_t eq_event_type;
1080 1.12.4.2 martin uint8_t eq_reserved2;
1081 1.12.4.2 martin uint8_t eq_event_sub_type;
1082 1.12.4.2 martin
1083 1.12.4.2 martin uint8_t eq_reserved3[28];
1084 1.12.4.2 martin uint32_t eq_event_data[7];
1085 1.12.4.2 martin uint8_t eq_reserved4[2];
1086 1.12.4.2 martin uint8_t eq_signature;
1087 1.12.4.2 martin uint8_t eq_owner;
1088 1.12.4.2 martin #define MCX_EQ_ENTRY_OWNER_INIT 1
1089 1.12.4.2 martin } __packed __aligned(4);
1090 1.12.4.2 martin
1091 1.12.4.2 martin CTASSERT(sizeof(struct mcx_eq_entry) == 64);
1092 1.12.4.2 martin
1093 1.12.4.2 martin struct mcx_cmd_alloc_pd_in {
1094 1.12.4.2 martin uint16_t cmd_opcode;
1095 1.12.4.2 martin uint8_t cmd_reserved0[4];
1096 1.12.4.2 martin uint16_t cmd_op_mod;
1097 1.12.4.2 martin uint8_t cmd_reserved1[8];
1098 1.12.4.2 martin } __packed __aligned(4);
1099 1.12.4.2 martin
1100 1.12.4.2 martin struct mcx_cmd_alloc_pd_out {
1101 1.12.4.2 martin uint8_t cmd_status;
1102 1.12.4.2 martin uint8_t cmd_reserved0[3];
1103 1.12.4.2 martin uint32_t cmd_syndrome;
1104 1.12.4.2 martin uint32_t cmd_pd;
1105 1.12.4.2 martin uint8_t cmd_reserved1[4];
1106 1.12.4.2 martin } __packed __aligned(4);
1107 1.12.4.2 martin
1108 1.12.4.2 martin struct mcx_cmd_alloc_td_in {
1109 1.12.4.2 martin uint16_t cmd_opcode;
1110 1.12.4.2 martin uint8_t cmd_reserved0[4];
1111 1.12.4.2 martin uint16_t cmd_op_mod;
1112 1.12.4.2 martin uint8_t cmd_reserved1[8];
1113 1.12.4.2 martin } __packed __aligned(4);
1114 1.12.4.2 martin
1115 1.12.4.2 martin struct mcx_cmd_alloc_td_out {
1116 1.12.4.2 martin uint8_t cmd_status;
1117 1.12.4.2 martin uint8_t cmd_reserved0[3];
1118 1.12.4.2 martin uint32_t cmd_syndrome;
1119 1.12.4.2 martin uint32_t cmd_tdomain;
1120 1.12.4.2 martin uint8_t cmd_reserved1[4];
1121 1.12.4.2 martin } __packed __aligned(4);
1122 1.12.4.2 martin
1123 1.12.4.2 martin struct mcx_cmd_create_tir_in {
1124 1.12.4.2 martin uint16_t cmd_opcode;
1125 1.12.4.2 martin uint8_t cmd_reserved0[4];
1126 1.12.4.2 martin uint16_t cmd_op_mod;
1127 1.12.4.2 martin uint8_t cmd_reserved1[8];
1128 1.12.4.2 martin } __packed __aligned(4);
1129 1.12.4.2 martin
1130 1.12.4.2 martin struct mcx_cmd_create_tir_mb_in {
1131 1.12.4.2 martin uint8_t cmd_reserved0[20];
1132 1.12.4.2 martin uint32_t cmd_disp_type;
1133 1.12.4.2 martin #define MCX_TIR_CTX_DISP_TYPE_SHIFT 28
1134 1.12.4.2 martin uint8_t cmd_reserved1[8];
1135 1.12.4.2 martin uint32_t cmd_lro;
1136 1.12.4.2 martin uint8_t cmd_reserved2[8];
1137 1.12.4.2 martin uint32_t cmd_inline_rqn;
1138 1.12.4.2 martin uint32_t cmd_indir_table;
1139 1.12.4.2 martin uint32_t cmd_tdomain;
1140 1.12.4.2 martin uint8_t cmd_rx_hash_key[40];
1141 1.12.4.2 martin uint32_t cmd_rx_hash_sel_outer;
1142 1.12.4.2 martin uint32_t cmd_rx_hash_sel_inner;
1143 1.12.4.2 martin uint8_t cmd_reserved3[152];
1144 1.12.4.2 martin } __packed __aligned(4);
1145 1.12.4.2 martin
1146 1.12.4.2 martin struct mcx_cmd_create_tir_out {
1147 1.12.4.2 martin uint8_t cmd_status;
1148 1.12.4.2 martin uint8_t cmd_reserved0[3];
1149 1.12.4.2 martin uint32_t cmd_syndrome;
1150 1.12.4.2 martin uint32_t cmd_tirn;
1151 1.12.4.2 martin uint8_t cmd_reserved1[4];
1152 1.12.4.2 martin } __packed __aligned(4);
1153 1.12.4.2 martin
1154 1.12.4.2 martin struct mcx_cmd_destroy_tir_in {
1155 1.12.4.2 martin uint16_t cmd_opcode;
1156 1.12.4.2 martin uint8_t cmd_reserved0[4];
1157 1.12.4.2 martin uint16_t cmd_op_mod;
1158 1.12.4.2 martin uint32_t cmd_tirn;
1159 1.12.4.2 martin uint8_t cmd_reserved1[4];
1160 1.12.4.2 martin } __packed __aligned(4);
1161 1.12.4.2 martin
1162 1.12.4.2 martin struct mcx_cmd_destroy_tir_out {
1163 1.12.4.2 martin uint8_t cmd_status;
1164 1.12.4.2 martin uint8_t cmd_reserved0[3];
1165 1.12.4.2 martin uint32_t cmd_syndrome;
1166 1.12.4.2 martin uint8_t cmd_reserved1[8];
1167 1.12.4.2 martin } __packed __aligned(4);
1168 1.12.4.2 martin
1169 1.12.4.2 martin struct mcx_cmd_create_tis_in {
1170 1.12.4.2 martin uint16_t cmd_opcode;
1171 1.12.4.2 martin uint8_t cmd_reserved0[4];
1172 1.12.4.2 martin uint16_t cmd_op_mod;
1173 1.12.4.2 martin uint8_t cmd_reserved1[8];
1174 1.12.4.2 martin } __packed __aligned(4);
1175 1.12.4.2 martin
1176 1.12.4.2 martin struct mcx_cmd_create_tis_mb_in {
1177 1.12.4.2 martin uint8_t cmd_reserved[16];
1178 1.12.4.2 martin uint32_t cmd_prio;
1179 1.12.4.2 martin uint8_t cmd_reserved1[32];
1180 1.12.4.2 martin uint32_t cmd_tdomain;
1181 1.12.4.2 martin uint8_t cmd_reserved2[120];
1182 1.12.4.2 martin } __packed __aligned(4);
1183 1.12.4.2 martin
1184 1.12.4.2 martin struct mcx_cmd_create_tis_out {
1185 1.12.4.2 martin uint8_t cmd_status;
1186 1.12.4.2 martin uint8_t cmd_reserved0[3];
1187 1.12.4.2 martin uint32_t cmd_syndrome;
1188 1.12.4.2 martin uint32_t cmd_tisn;
1189 1.12.4.2 martin uint8_t cmd_reserved1[4];
1190 1.12.4.2 martin } __packed __aligned(4);
1191 1.12.4.2 martin
1192 1.12.4.2 martin struct mcx_cmd_destroy_tis_in {
1193 1.12.4.2 martin uint16_t cmd_opcode;
1194 1.12.4.2 martin uint8_t cmd_reserved0[4];
1195 1.12.4.2 martin uint16_t cmd_op_mod;
1196 1.12.4.2 martin uint32_t cmd_tisn;
1197 1.12.4.2 martin uint8_t cmd_reserved1[4];
1198 1.12.4.2 martin } __packed __aligned(4);
1199 1.12.4.2 martin
1200 1.12.4.2 martin struct mcx_cmd_destroy_tis_out {
1201 1.12.4.2 martin uint8_t cmd_status;
1202 1.12.4.2 martin uint8_t cmd_reserved0[3];
1203 1.12.4.2 martin uint32_t cmd_syndrome;
1204 1.12.4.2 martin uint8_t cmd_reserved1[8];
1205 1.12.4.2 martin } __packed __aligned(4);
1206 1.12.4.2 martin
1207 1.12.4.2 martin struct mcx_cq_ctx {
1208 1.12.4.2 martin uint32_t cq_status;
1209 1.12.4.2 martin uint32_t cq_reserved1;
1210 1.12.4.2 martin uint32_t cq_page_offset;
1211 1.12.4.2 martin uint32_t cq_uar_size;
1212 1.12.4.2 martin #define MCX_CQ_CTX_UAR_PAGE_MASK 0xffffff
1213 1.12.4.2 martin #define MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT 24
1214 1.12.4.2 martin uint32_t cq_period_max_count;
1215 1.12.4.2 martin #define MCX_CQ_CTX_PERIOD_SHIFT 16
1216 1.12.4.2 martin uint32_t cq_eqn;
1217 1.12.4.2 martin uint32_t cq_log_page_size;
1218 1.12.4.2 martin #define MCX_CQ_CTX_LOG_PAGE_SIZE_SHIFT 24
1219 1.12.4.2 martin uint32_t cq_reserved2;
1220 1.12.4.2 martin uint32_t cq_last_notified;
1221 1.12.4.2 martin uint32_t cq_last_solicit;
1222 1.12.4.2 martin uint32_t cq_consumer_counter;
1223 1.12.4.2 martin uint32_t cq_producer_counter;
1224 1.12.4.2 martin uint8_t cq_reserved3[8];
1225 1.12.4.2 martin uint64_t cq_doorbell;
1226 1.12.4.2 martin } __packed __aligned(4);
1227 1.12.4.2 martin
1228 1.12.4.2 martin CTASSERT(sizeof(struct mcx_cq_ctx) == 64);
1229 1.12.4.2 martin
1230 1.12.4.2 martin struct mcx_cmd_create_cq_in {
1231 1.12.4.2 martin uint16_t cmd_opcode;
1232 1.12.4.2 martin uint8_t cmd_reserved0[4];
1233 1.12.4.2 martin uint16_t cmd_op_mod;
1234 1.12.4.2 martin uint8_t cmd_reserved1[8];
1235 1.12.4.2 martin } __packed __aligned(4);
1236 1.12.4.2 martin
1237 1.12.4.2 martin struct mcx_cmd_create_cq_mb_in {
1238 1.12.4.2 martin struct mcx_cq_ctx cmd_cq_ctx;
1239 1.12.4.2 martin uint8_t cmd_reserved1[192];
1240 1.12.4.2 martin } __packed __aligned(4);
1241 1.12.4.2 martin
1242 1.12.4.2 martin struct mcx_cmd_create_cq_out {
1243 1.12.4.2 martin uint8_t cmd_status;
1244 1.12.4.2 martin uint8_t cmd_reserved0[3];
1245 1.12.4.2 martin uint32_t cmd_syndrome;
1246 1.12.4.2 martin uint32_t cmd_cqn;
1247 1.12.4.2 martin uint8_t cmd_reserved1[4];
1248 1.12.4.2 martin } __packed __aligned(4);
1249 1.12.4.2 martin
1250 1.12.4.2 martin struct mcx_cmd_destroy_cq_in {
1251 1.12.4.2 martin uint16_t cmd_opcode;
1252 1.12.4.2 martin uint8_t cmd_reserved0[4];
1253 1.12.4.2 martin uint16_t cmd_op_mod;
1254 1.12.4.2 martin uint32_t cmd_cqn;
1255 1.12.4.2 martin uint8_t cmd_reserved1[4];
1256 1.12.4.2 martin } __packed __aligned(4);
1257 1.12.4.2 martin
1258 1.12.4.2 martin struct mcx_cmd_destroy_cq_out {
1259 1.12.4.2 martin uint8_t cmd_status;
1260 1.12.4.2 martin uint8_t cmd_reserved0[3];
1261 1.12.4.2 martin uint32_t cmd_syndrome;
1262 1.12.4.2 martin uint8_t cmd_reserved1[8];
1263 1.12.4.2 martin } __packed __aligned(4);
1264 1.12.4.2 martin
1265 1.12.4.2 martin struct mcx_cq_entry {
1266 1.12.4.2 martin uint32_t __reserved__;
1267 1.12.4.2 martin uint32_t cq_lro;
1268 1.12.4.2 martin uint32_t cq_lro_ack_seq_num;
1269 1.12.4.2 martin uint32_t cq_rx_hash;
1270 1.12.4.2 martin uint8_t cq_rx_hash_type;
1271 1.12.4.2 martin uint8_t cq_ml_path;
1272 1.12.4.2 martin uint16_t __reserved__;
1273 1.12.4.2 martin uint32_t cq_checksum;
1274 1.12.4.2 martin uint32_t __reserved__;
1275 1.12.4.2 martin uint32_t cq_flags;
1276 1.12.4.2 martin uint32_t cq_lro_srqn;
1277 1.12.4.2 martin uint32_t __reserved__[2];
1278 1.12.4.2 martin uint32_t cq_byte_cnt;
1279 1.12.4.2 martin uint64_t cq_timestamp;
1280 1.12.4.2 martin uint8_t cq_rx_drops;
1281 1.12.4.2 martin uint8_t cq_flow_tag[3];
1282 1.12.4.2 martin uint16_t cq_wqe_count;
1283 1.12.4.2 martin uint8_t cq_signature;
1284 1.12.4.2 martin uint8_t cq_opcode_owner;
1285 1.12.4.2 martin #define MCX_CQ_ENTRY_FLAG_OWNER (1 << 0)
1286 1.12.4.2 martin #define MCX_CQ_ENTRY_FLAG_SE (1 << 1)
1287 1.12.4.2 martin #define MCX_CQ_ENTRY_FORMAT_SHIFT 2
1288 1.12.4.2 martin #define MCX_CQ_ENTRY_OPCODE_SHIFT 4
1289 1.12.4.2 martin
1290 1.12.4.2 martin #define MCX_CQ_ENTRY_FORMAT_NO_INLINE 0
1291 1.12.4.2 martin #define MCX_CQ_ENTRY_FORMAT_INLINE_32 1
1292 1.12.4.2 martin #define MCX_CQ_ENTRY_FORMAT_INLINE_64 2
1293 1.12.4.2 martin #define MCX_CQ_ENTRY_FORMAT_COMPRESSED 3
1294 1.12.4.2 martin
1295 1.12.4.2 martin #define MCX_CQ_ENTRY_OPCODE_REQ 0
1296 1.12.4.2 martin #define MCX_CQ_ENTRY_OPCODE_SEND 2
1297 1.12.4.2 martin #define MCX_CQ_ENTRY_OPCODE_REQ_ERR 13
1298 1.12.4.2 martin #define MCX_CQ_ENTRY_OPCODE_SEND_ERR 14
1299 1.12.4.2 martin #define MCX_CQ_ENTRY_OPCODE_INVALID 15
1300 1.12.4.2 martin
1301 1.12.4.2 martin } __packed __aligned(4);
1302 1.12.4.2 martin
1303 1.12.4.2 martin CTASSERT(sizeof(struct mcx_cq_entry) == 64);
1304 1.12.4.2 martin
1305 1.12.4.2 martin struct mcx_cq_doorbell {
1306 1.12.4.2 martin uint32_t db_update_ci;
1307 1.12.4.2 martin uint32_t db_arm_ci;
1308 1.12.4.2 martin #define MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT 28
1309 1.12.4.2 martin #define MCX_CQ_DOORBELL_ARM_CMD (1 << 24)
1310 1.12.4.2 martin #define MCX_CQ_DOORBELL_ARM_CI_MASK (0xffffff)
1311 1.12.4.2 martin } __packed __aligned(8);
1312 1.12.4.2 martin
1313 1.12.4.2 martin struct mcx_wq_ctx {
1314 1.12.4.2 martin uint8_t wq_type;
1315 1.12.4.2 martin #define MCX_WQ_CTX_TYPE_CYCLIC (1 << 4)
1316 1.12.4.2 martin #define MCX_WQ_CTX_TYPE_SIGNATURE (1 << 3)
1317 1.12.4.2 martin uint8_t wq_reserved0[5];
1318 1.12.4.2 martin uint16_t wq_lwm;
1319 1.12.4.2 martin uint32_t wq_pd;
1320 1.12.4.2 martin uint32_t wq_uar_page;
1321 1.12.4.2 martin uint64_t wq_doorbell;
1322 1.12.4.2 martin uint32_t wq_hw_counter;
1323 1.12.4.2 martin uint32_t wq_sw_counter;
1324 1.12.4.2 martin uint16_t wq_log_stride;
1325 1.12.4.2 martin uint8_t wq_log_page_sz;
1326 1.12.4.2 martin uint8_t wq_log_size;
1327 1.12.4.2 martin uint8_t wq_reserved1[156];
1328 1.12.4.2 martin } __packed __aligned(4);
1329 1.12.4.2 martin
1330 1.12.4.2 martin CTASSERT(sizeof(struct mcx_wq_ctx) == 0xC0);
1331 1.12.4.2 martin
1332 1.12.4.2 martin struct mcx_sq_ctx {
1333 1.12.4.2 martin uint32_t sq_flags;
1334 1.12.4.2 martin #define MCX_SQ_CTX_RLKEY (1U << 31)
1335 1.12.4.2 martin #define MCX_SQ_CTX_FRE_SHIFT (1 << 29)
1336 1.12.4.2 martin #define MCX_SQ_CTX_FLUSH_IN_ERROR (1 << 28)
1337 1.12.4.2 martin #define MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT 24
1338 1.12.4.2 martin #define MCX_SQ_CTX_STATE_SHIFT 20
1339 1.12.4.2 martin uint32_t sq_user_index;
1340 1.12.4.2 martin uint32_t sq_cqn;
1341 1.12.4.2 martin uint32_t sq_reserved1[5];
1342 1.12.4.2 martin uint32_t sq_tis_lst_sz;
1343 1.12.4.2 martin #define MCX_SQ_CTX_TIS_LST_SZ_SHIFT 16
1344 1.12.4.2 martin uint32_t sq_reserved2[2];
1345 1.12.4.2 martin uint32_t sq_tis_num;
1346 1.12.4.2 martin struct mcx_wq_ctx sq_wq;
1347 1.12.4.2 martin } __packed __aligned(4);
1348 1.12.4.2 martin
1349 1.12.4.2 martin struct mcx_sq_entry_seg {
1350 1.12.4.2 martin uint32_t sqs_byte_count;
1351 1.12.4.2 martin uint32_t sqs_lkey;
1352 1.12.4.2 martin uint64_t sqs_addr;
1353 1.12.4.2 martin } __packed __aligned(4);
1354 1.12.4.2 martin
1355 1.12.4.2 martin struct mcx_sq_entry {
1356 1.12.4.2 martin /* control segment */
1357 1.12.4.2 martin uint32_t sqe_opcode_index;
1358 1.12.4.2 martin #define MCX_SQE_WQE_INDEX_SHIFT 8
1359 1.12.4.2 martin #define MCX_SQE_WQE_OPCODE_NOP 0x00
1360 1.12.4.2 martin #define MCX_SQE_WQE_OPCODE_SEND 0x0a
1361 1.12.4.2 martin uint32_t sqe_ds_sq_num;
1362 1.12.4.2 martin #define MCX_SQE_SQ_NUM_SHIFT 8
1363 1.12.4.2 martin uint32_t sqe_signature;
1364 1.12.4.2 martin #define MCX_SQE_SIGNATURE_SHIFT 24
1365 1.12.4.2 martin #define MCX_SQE_SOLICITED_EVENT 0x02
1366 1.12.4.2 martin #define MCX_SQE_CE_CQE_ON_ERR 0x00
1367 1.12.4.2 martin #define MCX_SQE_CE_CQE_FIRST_ERR 0x04
1368 1.12.4.2 martin #define MCX_SQE_CE_CQE_ALWAYS 0x08
1369 1.12.4.2 martin #define MCX_SQE_CE_CQE_SOLICIT 0x0C
1370 1.12.4.2 martin #define MCX_SQE_FM_NO_FENCE 0x00
1371 1.12.4.2 martin #define MCX_SQE_FM_SMALL_FENCE 0x40
1372 1.12.4.2 martin uint32_t sqe_mkey;
1373 1.12.4.2 martin
1374 1.12.4.2 martin /* ethernet segment */
1375 1.12.4.2 martin uint32_t sqe_reserved1;
1376 1.12.4.2 martin uint32_t sqe_mss_csum;
1377 1.12.4.2 martin #define MCX_SQE_L4_CSUM (1 << 31)
1378 1.12.4.2 martin #define MCX_SQE_L3_CSUM (1 << 30)
1379 1.12.4.2 martin uint32_t sqe_reserved2;
1380 1.12.4.2 martin uint16_t sqe_inline_header_size;
1381 1.12.4.2 martin uint16_t sqe_inline_headers[9];
1382 1.12.4.2 martin
1383 1.12.4.2 martin /* data segment */
1384 1.12.4.2 martin struct mcx_sq_entry_seg sqe_segs[1];
1385 1.12.4.2 martin } __packed __aligned(64);
1386 1.12.4.2 martin
1387 1.12.4.2 martin CTASSERT(sizeof(struct mcx_sq_entry) == 64);
1388 1.12.4.2 martin
1389 1.12.4.2 martin struct mcx_cmd_create_sq_in {
1390 1.12.4.2 martin uint16_t cmd_opcode;
1391 1.12.4.2 martin uint8_t cmd_reserved0[4];
1392 1.12.4.2 martin uint16_t cmd_op_mod;
1393 1.12.4.2 martin uint8_t cmd_reserved1[8];
1394 1.12.4.2 martin } __packed __aligned(4);
1395 1.12.4.2 martin
1396 1.12.4.2 martin struct mcx_cmd_create_sq_out {
1397 1.12.4.2 martin uint8_t cmd_status;
1398 1.12.4.2 martin uint8_t cmd_reserved0[3];
1399 1.12.4.2 martin uint32_t cmd_syndrome;
1400 1.12.4.2 martin uint32_t cmd_sqn;
1401 1.12.4.2 martin uint8_t cmd_reserved1[4];
1402 1.12.4.2 martin } __packed __aligned(4);
1403 1.12.4.2 martin
1404 1.12.4.2 martin struct mcx_cmd_modify_sq_in {
1405 1.12.4.2 martin uint16_t cmd_opcode;
1406 1.12.4.2 martin uint8_t cmd_reserved0[4];
1407 1.12.4.2 martin uint16_t cmd_op_mod;
1408 1.12.4.2 martin uint32_t cmd_sq_state;
1409 1.12.4.2 martin uint8_t cmd_reserved1[4];
1410 1.12.4.2 martin } __packed __aligned(4);
1411 1.12.4.2 martin
1412 1.12.4.2 martin struct mcx_cmd_modify_sq_mb_in {
1413 1.12.4.2 martin uint32_t cmd_modify_hi;
1414 1.12.4.2 martin uint32_t cmd_modify_lo;
1415 1.12.4.2 martin uint8_t cmd_reserved0[8];
1416 1.12.4.2 martin struct mcx_sq_ctx cmd_sq_ctx;
1417 1.12.4.2 martin } __packed __aligned(4);
1418 1.12.4.2 martin
1419 1.12.4.2 martin struct mcx_cmd_modify_sq_out {
1420 1.12.4.2 martin uint8_t cmd_status;
1421 1.12.4.2 martin uint8_t cmd_reserved0[3];
1422 1.12.4.2 martin uint32_t cmd_syndrome;
1423 1.12.4.2 martin uint8_t cmd_reserved1[8];
1424 1.12.4.2 martin } __packed __aligned(4);
1425 1.12.4.2 martin
1426 1.12.4.2 martin struct mcx_cmd_destroy_sq_in {
1427 1.12.4.2 martin uint16_t cmd_opcode;
1428 1.12.4.2 martin uint8_t cmd_reserved0[4];
1429 1.12.4.2 martin uint16_t cmd_op_mod;
1430 1.12.4.2 martin uint32_t cmd_sqn;
1431 1.12.4.2 martin uint8_t cmd_reserved1[4];
1432 1.12.4.2 martin } __packed __aligned(4);
1433 1.12.4.2 martin
1434 1.12.4.2 martin struct mcx_cmd_destroy_sq_out {
1435 1.12.4.2 martin uint8_t cmd_status;
1436 1.12.4.2 martin uint8_t cmd_reserved0[3];
1437 1.12.4.2 martin uint32_t cmd_syndrome;
1438 1.12.4.2 martin uint8_t cmd_reserved1[8];
1439 1.12.4.2 martin } __packed __aligned(4);
1440 1.12.4.2 martin
1441 1.12.4.2 martin
1442 1.12.4.2 martin struct mcx_rq_ctx {
1443 1.12.4.2 martin uint32_t rq_flags;
1444 1.12.4.2 martin #define MCX_RQ_CTX_RLKEY (1U << 31)
1445 1.12.4.2 martin #define MCX_RQ_CTX_VLAN_STRIP_DIS (1 << 28)
1446 1.12.4.2 martin #define MCX_RQ_CTX_MEM_RQ_TYPE_SHIFT 24
1447 1.12.4.2 martin #define MCX_RQ_CTX_STATE_SHIFT 20
1448 1.12.4.2 martin #define MCX_RQ_CTX_FLUSH_IN_ERROR (1 << 18)
1449 1.12.4.2 martin uint32_t rq_user_index;
1450 1.12.4.2 martin uint32_t rq_cqn;
1451 1.12.4.2 martin uint32_t rq_reserved1;
1452 1.12.4.2 martin uint32_t rq_rmpn;
1453 1.12.4.2 martin uint32_t rq_reserved2[7];
1454 1.12.4.2 martin struct mcx_wq_ctx rq_wq;
1455 1.12.4.2 martin } __packed __aligned(4);
1456 1.12.4.2 martin
1457 1.12.4.2 martin struct mcx_rq_entry {
1458 1.12.4.2 martin uint32_t rqe_byte_count;
1459 1.12.4.2 martin uint32_t rqe_lkey;
1460 1.12.4.2 martin uint64_t rqe_addr;
1461 1.12.4.2 martin } __packed __aligned(16);
1462 1.12.4.2 martin
1463 1.12.4.2 martin struct mcx_cmd_create_rq_in {
1464 1.12.4.2 martin uint16_t cmd_opcode;
1465 1.12.4.2 martin uint8_t cmd_reserved0[4];
1466 1.12.4.2 martin uint16_t cmd_op_mod;
1467 1.12.4.2 martin uint8_t cmd_reserved1[8];
1468 1.12.4.2 martin } __packed __aligned(4);
1469 1.12.4.2 martin
1470 1.12.4.2 martin struct mcx_cmd_create_rq_out {
1471 1.12.4.2 martin uint8_t cmd_status;
1472 1.12.4.2 martin uint8_t cmd_reserved0[3];
1473 1.12.4.2 martin uint32_t cmd_syndrome;
1474 1.12.4.2 martin uint32_t cmd_rqn;
1475 1.12.4.2 martin uint8_t cmd_reserved1[4];
1476 1.12.4.2 martin } __packed __aligned(4);
1477 1.12.4.2 martin
1478 1.12.4.2 martin struct mcx_cmd_modify_rq_in {
1479 1.12.4.2 martin uint16_t cmd_opcode;
1480 1.12.4.2 martin uint8_t cmd_reserved0[4];
1481 1.12.4.2 martin uint16_t cmd_op_mod;
1482 1.12.4.2 martin uint32_t cmd_rq_state;
1483 1.12.4.2 martin uint8_t cmd_reserved1[4];
1484 1.12.4.2 martin } __packed __aligned(4);
1485 1.12.4.2 martin
1486 1.12.4.2 martin struct mcx_cmd_modify_rq_mb_in {
1487 1.12.4.2 martin uint32_t cmd_modify_hi;
1488 1.12.4.2 martin uint32_t cmd_modify_lo;
1489 1.12.4.2 martin uint8_t cmd_reserved0[8];
1490 1.12.4.2 martin struct mcx_rq_ctx cmd_rq_ctx;
1491 1.12.4.2 martin } __packed __aligned(4);
1492 1.12.4.2 martin
1493 1.12.4.2 martin struct mcx_cmd_modify_rq_out {
1494 1.12.4.2 martin uint8_t cmd_status;
1495 1.12.4.2 martin uint8_t cmd_reserved0[3];
1496 1.12.4.2 martin uint32_t cmd_syndrome;
1497 1.12.4.2 martin uint8_t cmd_reserved1[8];
1498 1.12.4.2 martin } __packed __aligned(4);
1499 1.12.4.2 martin
1500 1.12.4.2 martin struct mcx_cmd_destroy_rq_in {
1501 1.12.4.2 martin uint16_t cmd_opcode;
1502 1.12.4.2 martin uint8_t cmd_reserved0[4];
1503 1.12.4.2 martin uint16_t cmd_op_mod;
1504 1.12.4.2 martin uint32_t cmd_rqn;
1505 1.12.4.2 martin uint8_t cmd_reserved1[4];
1506 1.12.4.2 martin } __packed __aligned(4);
1507 1.12.4.2 martin
1508 1.12.4.2 martin struct mcx_cmd_destroy_rq_out {
1509 1.12.4.2 martin uint8_t cmd_status;
1510 1.12.4.2 martin uint8_t cmd_reserved0[3];
1511 1.12.4.2 martin uint32_t cmd_syndrome;
1512 1.12.4.2 martin uint8_t cmd_reserved1[8];
1513 1.12.4.2 martin } __packed __aligned(4);
1514 1.12.4.2 martin
1515 1.12.4.2 martin struct mcx_cmd_create_flow_table_in {
1516 1.12.4.2 martin uint16_t cmd_opcode;
1517 1.12.4.2 martin uint8_t cmd_reserved0[4];
1518 1.12.4.2 martin uint16_t cmd_op_mod;
1519 1.12.4.2 martin uint8_t cmd_reserved1[8];
1520 1.12.4.2 martin } __packed __aligned(4);
1521 1.12.4.2 martin
1522 1.12.4.2 martin struct mcx_flow_table_ctx {
1523 1.12.4.2 martin uint8_t ft_miss_action;
1524 1.12.4.2 martin uint8_t ft_level;
1525 1.12.4.2 martin uint8_t ft_reserved0;
1526 1.12.4.2 martin uint8_t ft_log_size;
1527 1.12.4.2 martin uint32_t ft_table_miss_id;
1528 1.12.4.2 martin uint8_t ft_reserved1[28];
1529 1.12.4.2 martin } __packed __aligned(4);
1530 1.12.4.2 martin
1531 1.12.4.2 martin struct mcx_cmd_create_flow_table_mb_in {
1532 1.12.4.2 martin uint8_t cmd_table_type;
1533 1.12.4.2 martin uint8_t cmd_reserved0[7];
1534 1.12.4.2 martin struct mcx_flow_table_ctx cmd_ctx;
1535 1.12.4.2 martin } __packed __aligned(4);
1536 1.12.4.2 martin
1537 1.12.4.2 martin struct mcx_cmd_create_flow_table_out {
1538 1.12.4.2 martin uint8_t cmd_status;
1539 1.12.4.2 martin uint8_t cmd_reserved0[3];
1540 1.12.4.2 martin uint32_t cmd_syndrome;
1541 1.12.4.2 martin uint32_t cmd_table_id;
1542 1.12.4.2 martin uint8_t cmd_reserved1[4];
1543 1.12.4.2 martin } __packed __aligned(4);
1544 1.12.4.2 martin
1545 1.12.4.2 martin struct mcx_cmd_destroy_flow_table_in {
1546 1.12.4.2 martin uint16_t cmd_opcode;
1547 1.12.4.2 martin uint8_t cmd_reserved0[4];
1548 1.12.4.2 martin uint16_t cmd_op_mod;
1549 1.12.4.2 martin uint8_t cmd_reserved1[8];
1550 1.12.4.2 martin } __packed __aligned(4);
1551 1.12.4.2 martin
1552 1.12.4.2 martin struct mcx_cmd_destroy_flow_table_mb_in {
1553 1.12.4.2 martin uint8_t cmd_table_type;
1554 1.12.4.2 martin uint8_t cmd_reserved0[3];
1555 1.12.4.2 martin uint32_t cmd_table_id;
1556 1.12.4.2 martin uint8_t cmd_reserved1[40];
1557 1.12.4.2 martin } __packed __aligned(4);
1558 1.12.4.2 martin
1559 1.12.4.2 martin struct mcx_cmd_destroy_flow_table_out {
1560 1.12.4.2 martin uint8_t cmd_status;
1561 1.12.4.2 martin uint8_t cmd_reserved0[3];
1562 1.12.4.2 martin uint32_t cmd_syndrome;
1563 1.12.4.2 martin uint8_t cmd_reserved1[8];
1564 1.12.4.2 martin } __packed __aligned(4);
1565 1.12.4.2 martin
1566 1.12.4.2 martin struct mcx_cmd_set_flow_table_root_in {
1567 1.12.4.2 martin uint16_t cmd_opcode;
1568 1.12.4.2 martin uint8_t cmd_reserved0[4];
1569 1.12.4.2 martin uint16_t cmd_op_mod;
1570 1.12.4.2 martin uint8_t cmd_reserved1[8];
1571 1.12.4.2 martin } __packed __aligned(4);
1572 1.12.4.2 martin
1573 1.12.4.2 martin struct mcx_cmd_set_flow_table_root_mb_in {
1574 1.12.4.2 martin uint8_t cmd_table_type;
1575 1.12.4.2 martin uint8_t cmd_reserved0[3];
1576 1.12.4.2 martin uint32_t cmd_table_id;
1577 1.12.4.2 martin uint8_t cmd_reserved1[56];
1578 1.12.4.2 martin } __packed __aligned(4);
1579 1.12.4.2 martin
1580 1.12.4.2 martin struct mcx_cmd_set_flow_table_root_out {
1581 1.12.4.2 martin uint8_t cmd_status;
1582 1.12.4.2 martin uint8_t cmd_reserved0[3];
1583 1.12.4.2 martin uint32_t cmd_syndrome;
1584 1.12.4.2 martin uint8_t cmd_reserved1[8];
1585 1.12.4.2 martin } __packed __aligned(4);
1586 1.12.4.2 martin
1587 1.12.4.2 martin struct mcx_flow_match {
1588 1.12.4.2 martin /* outer headers */
1589 1.12.4.2 martin uint8_t mc_src_mac[6];
1590 1.12.4.2 martin uint16_t mc_ethertype;
1591 1.12.4.2 martin uint8_t mc_dest_mac[6];
1592 1.12.4.2 martin uint16_t mc_first_vlan;
1593 1.12.4.2 martin uint8_t mc_ip_proto;
1594 1.12.4.2 martin uint8_t mc_ip_dscp_ecn;
1595 1.12.4.2 martin uint8_t mc_vlan_flags;
1596 1.12.4.2 martin uint8_t mc_tcp_flags;
1597 1.12.4.2 martin uint16_t mc_tcp_sport;
1598 1.12.4.2 martin uint16_t mc_tcp_dport;
1599 1.12.4.2 martin uint32_t mc_reserved0;
1600 1.12.4.2 martin uint16_t mc_udp_sport;
1601 1.12.4.2 martin uint16_t mc_udp_dport;
1602 1.12.4.2 martin uint8_t mc_src_ip[16];
1603 1.12.4.2 martin uint8_t mc_dest_ip[16];
1604 1.12.4.2 martin
1605 1.12.4.2 martin /* misc parameters */
1606 1.12.4.2 martin uint8_t mc_reserved1[8];
1607 1.12.4.2 martin uint16_t mc_second_vlan;
1608 1.12.4.2 martin uint8_t mc_reserved2[2];
1609 1.12.4.2 martin uint8_t mc_second_vlan_flags;
1610 1.12.4.2 martin uint8_t mc_reserved3[15];
1611 1.12.4.2 martin uint32_t mc_outer_ipv6_flow_label;
1612 1.12.4.2 martin uint8_t mc_reserved4[32];
1613 1.12.4.2 martin
1614 1.12.4.2 martin uint8_t mc_reserved[384];
1615 1.12.4.2 martin } __packed __aligned(4);
1616 1.12.4.2 martin
1617 1.12.4.2 martin CTASSERT(sizeof(struct mcx_flow_match) == 512);
1618 1.12.4.2 martin
1619 1.12.4.2 martin struct mcx_cmd_create_flow_group_in {
1620 1.12.4.2 martin uint16_t cmd_opcode;
1621 1.12.4.2 martin uint8_t cmd_reserved0[4];
1622 1.12.4.2 martin uint16_t cmd_op_mod;
1623 1.12.4.2 martin uint8_t cmd_reserved1[8];
1624 1.12.4.2 martin } __packed __aligned(4);
1625 1.12.4.2 martin
1626 1.12.4.2 martin struct mcx_cmd_create_flow_group_mb_in {
1627 1.12.4.2 martin uint8_t cmd_table_type;
1628 1.12.4.2 martin uint8_t cmd_reserved0[3];
1629 1.12.4.2 martin uint32_t cmd_table_id;
1630 1.12.4.2 martin uint8_t cmd_reserved1[4];
1631 1.12.4.2 martin uint32_t cmd_start_flow_index;
1632 1.12.4.2 martin uint8_t cmd_reserved2[4];
1633 1.12.4.2 martin uint32_t cmd_end_flow_index;
1634 1.12.4.2 martin uint8_t cmd_reserved3[23];
1635 1.12.4.2 martin uint8_t cmd_match_criteria_enable;
1636 1.12.4.2 martin #define MCX_CREATE_FLOW_GROUP_CRIT_OUTER (1 << 0)
1637 1.12.4.2 martin #define MCX_CREATE_FLOW_GROUP_CRIT_MISC (1 << 1)
1638 1.12.4.2 martin #define MCX_CREATE_FLOW_GROUP_CRIT_INNER (1 << 2)
1639 1.12.4.2 martin struct mcx_flow_match cmd_match_criteria;
1640 1.12.4.2 martin uint8_t cmd_reserved4[448];
1641 1.12.4.2 martin } __packed __aligned(4);
1642 1.12.4.2 martin
1643 1.12.4.2 martin struct mcx_cmd_create_flow_group_out {
1644 1.12.4.2 martin uint8_t cmd_status;
1645 1.12.4.2 martin uint8_t cmd_reserved0[3];
1646 1.12.4.2 martin uint32_t cmd_syndrome;
1647 1.12.4.2 martin uint32_t cmd_group_id;
1648 1.12.4.2 martin uint8_t cmd_reserved1[4];
1649 1.12.4.2 martin } __packed __aligned(4);
1650 1.12.4.2 martin
1651 1.12.4.2 martin struct mcx_flow_ctx {
1652 1.12.4.2 martin uint8_t fc_reserved0[4];
1653 1.12.4.2 martin uint32_t fc_group_id;
1654 1.12.4.2 martin uint32_t fc_flow_tag;
1655 1.12.4.2 martin uint32_t fc_action;
1656 1.12.4.2 martin #define MCX_FLOW_CONTEXT_ACTION_ALLOW (1 << 0)
1657 1.12.4.2 martin #define MCX_FLOW_CONTEXT_ACTION_DROP (1 << 1)
1658 1.12.4.2 martin #define MCX_FLOW_CONTEXT_ACTION_FORWARD (1 << 2)
1659 1.12.4.2 martin #define MCX_FLOW_CONTEXT_ACTION_COUNT (1 << 3)
1660 1.12.4.2 martin uint32_t fc_dest_list_size;
1661 1.12.4.2 martin uint32_t fc_counter_list_size;
1662 1.12.4.2 martin uint8_t fc_reserved1[40];
1663 1.12.4.2 martin struct mcx_flow_match fc_match_value;
1664 1.12.4.2 martin uint8_t fc_reserved2[192];
1665 1.12.4.2 martin } __packed __aligned(4);
1666 1.12.4.2 martin
1667 1.12.4.2 martin #define MCX_FLOW_CONTEXT_DEST_TYPE_TABLE (1 << 24)
1668 1.12.4.2 martin #define MCX_FLOW_CONTEXT_DEST_TYPE_TIR (2 << 24)
1669 1.12.4.2 martin
1670 1.12.4.2 martin struct mcx_cmd_destroy_flow_group_in {
1671 1.12.4.2 martin uint16_t cmd_opcode;
1672 1.12.4.2 martin uint8_t cmd_reserved0[4];
1673 1.12.4.2 martin uint16_t cmd_op_mod;
1674 1.12.4.2 martin uint8_t cmd_reserved1[8];
1675 1.12.4.2 martin } __packed __aligned(4);
1676 1.12.4.2 martin
1677 1.12.4.2 martin struct mcx_cmd_destroy_flow_group_mb_in {
1678 1.12.4.2 martin uint8_t cmd_table_type;
1679 1.12.4.2 martin uint8_t cmd_reserved0[3];
1680 1.12.4.2 martin uint32_t cmd_table_id;
1681 1.12.4.2 martin uint32_t cmd_group_id;
1682 1.12.4.2 martin uint8_t cmd_reserved1[36];
1683 1.12.4.2 martin } __packed __aligned(4);
1684 1.12.4.2 martin
1685 1.12.4.2 martin struct mcx_cmd_destroy_flow_group_out {
1686 1.12.4.2 martin uint8_t cmd_status;
1687 1.12.4.2 martin uint8_t cmd_reserved0[3];
1688 1.12.4.2 martin uint32_t cmd_syndrome;
1689 1.12.4.2 martin uint8_t cmd_reserved1[8];
1690 1.12.4.2 martin } __packed __aligned(4);
1691 1.12.4.2 martin
1692 1.12.4.2 martin struct mcx_cmd_set_flow_table_entry_in {
1693 1.12.4.2 martin uint16_t cmd_opcode;
1694 1.12.4.2 martin uint8_t cmd_reserved0[4];
1695 1.12.4.2 martin uint16_t cmd_op_mod;
1696 1.12.4.2 martin uint8_t cmd_reserved1[8];
1697 1.12.4.2 martin } __packed __aligned(4);
1698 1.12.4.2 martin
1699 1.12.4.2 martin struct mcx_cmd_set_flow_table_entry_mb_in {
1700 1.12.4.2 martin uint8_t cmd_table_type;
1701 1.12.4.2 martin uint8_t cmd_reserved0[3];
1702 1.12.4.2 martin uint32_t cmd_table_id;
1703 1.12.4.2 martin uint32_t cmd_modify_enable_mask;
1704 1.12.4.2 martin uint8_t cmd_reserved1[4];
1705 1.12.4.2 martin uint32_t cmd_flow_index;
1706 1.12.4.2 martin uint8_t cmd_reserved2[28];
1707 1.12.4.2 martin struct mcx_flow_ctx cmd_flow_ctx;
1708 1.12.4.2 martin } __packed __aligned(4);
1709 1.12.4.2 martin
1710 1.12.4.2 martin struct mcx_cmd_set_flow_table_entry_out {
1711 1.12.4.2 martin uint8_t cmd_status;
1712 1.12.4.2 martin uint8_t cmd_reserved0[3];
1713 1.12.4.2 martin uint32_t cmd_syndrome;
1714 1.12.4.2 martin uint8_t cmd_reserved1[8];
1715 1.12.4.2 martin } __packed __aligned(4);
1716 1.12.4.2 martin
1717 1.12.4.2 martin struct mcx_cmd_query_flow_table_entry_in {
1718 1.12.4.2 martin uint16_t cmd_opcode;
1719 1.12.4.2 martin uint8_t cmd_reserved0[4];
1720 1.12.4.2 martin uint16_t cmd_op_mod;
1721 1.12.4.2 martin uint8_t cmd_reserved1[8];
1722 1.12.4.2 martin } __packed __aligned(4);
1723 1.12.4.2 martin
1724 1.12.4.2 martin struct mcx_cmd_query_flow_table_entry_mb_in {
1725 1.12.4.2 martin uint8_t cmd_table_type;
1726 1.12.4.2 martin uint8_t cmd_reserved0[3];
1727 1.12.4.2 martin uint32_t cmd_table_id;
1728 1.12.4.2 martin uint8_t cmd_reserved1[8];
1729 1.12.4.2 martin uint32_t cmd_flow_index;
1730 1.12.4.2 martin uint8_t cmd_reserved2[28];
1731 1.12.4.2 martin } __packed __aligned(4);
1732 1.12.4.2 martin
1733 1.12.4.2 martin struct mcx_cmd_query_flow_table_entry_out {
1734 1.12.4.2 martin uint8_t cmd_status;
1735 1.12.4.2 martin uint8_t cmd_reserved0[3];
1736 1.12.4.2 martin uint32_t cmd_syndrome;
1737 1.12.4.2 martin uint8_t cmd_reserved1[8];
1738 1.12.4.2 martin } __packed __aligned(4);
1739 1.12.4.2 martin
1740 1.12.4.2 martin struct mcx_cmd_query_flow_table_entry_mb_out {
1741 1.12.4.2 martin uint8_t cmd_reserved0[48];
1742 1.12.4.2 martin struct mcx_flow_ctx cmd_flow_ctx;
1743 1.12.4.2 martin } __packed __aligned(4);
1744 1.12.4.2 martin
1745 1.12.4.2 martin struct mcx_cmd_delete_flow_table_entry_in {
1746 1.12.4.2 martin uint16_t cmd_opcode;
1747 1.12.4.2 martin uint8_t cmd_reserved0[4];
1748 1.12.4.2 martin uint16_t cmd_op_mod;
1749 1.12.4.2 martin uint8_t cmd_reserved1[8];
1750 1.12.4.2 martin } __packed __aligned(4);
1751 1.12.4.2 martin
1752 1.12.4.2 martin struct mcx_cmd_delete_flow_table_entry_mb_in {
1753 1.12.4.2 martin uint8_t cmd_table_type;
1754 1.12.4.2 martin uint8_t cmd_reserved0[3];
1755 1.12.4.2 martin uint32_t cmd_table_id;
1756 1.12.4.2 martin uint8_t cmd_reserved1[8];
1757 1.12.4.2 martin uint32_t cmd_flow_index;
1758 1.12.4.2 martin uint8_t cmd_reserved2[28];
1759 1.12.4.2 martin } __packed __aligned(4);
1760 1.12.4.2 martin
1761 1.12.4.2 martin struct mcx_cmd_delete_flow_table_entry_out {
1762 1.12.4.2 martin uint8_t cmd_status;
1763 1.12.4.2 martin uint8_t cmd_reserved0[3];
1764 1.12.4.2 martin uint32_t cmd_syndrome;
1765 1.12.4.2 martin uint8_t cmd_reserved1[8];
1766 1.12.4.2 martin } __packed __aligned(4);
1767 1.12.4.2 martin
1768 1.12.4.2 martin struct mcx_cmd_query_flow_group_in {
1769 1.12.4.2 martin uint16_t cmd_opcode;
1770 1.12.4.2 martin uint8_t cmd_reserved0[4];
1771 1.12.4.2 martin uint16_t cmd_op_mod;
1772 1.12.4.2 martin uint8_t cmd_reserved1[8];
1773 1.12.4.2 martin } __packed __aligned(4);
1774 1.12.4.2 martin
1775 1.12.4.2 martin struct mcx_cmd_query_flow_group_mb_in {
1776 1.12.4.2 martin uint8_t cmd_table_type;
1777 1.12.4.2 martin uint8_t cmd_reserved0[3];
1778 1.12.4.2 martin uint32_t cmd_table_id;
1779 1.12.4.2 martin uint32_t cmd_group_id;
1780 1.12.4.2 martin uint8_t cmd_reserved1[36];
1781 1.12.4.2 martin } __packed __aligned(4);
1782 1.12.4.2 martin
1783 1.12.4.2 martin struct mcx_cmd_query_flow_group_out {
1784 1.12.4.2 martin uint8_t cmd_status;
1785 1.12.4.2 martin uint8_t cmd_reserved0[3];
1786 1.12.4.2 martin uint32_t cmd_syndrome;
1787 1.12.4.2 martin uint8_t cmd_reserved1[8];
1788 1.12.4.2 martin } __packed __aligned(4);
1789 1.12.4.2 martin
1790 1.12.4.2 martin struct mcx_cmd_query_flow_group_mb_out {
1791 1.12.4.2 martin uint8_t cmd_reserved0[12];
1792 1.12.4.2 martin uint32_t cmd_start_flow_index;
1793 1.12.4.2 martin uint8_t cmd_reserved1[4];
1794 1.12.4.2 martin uint32_t cmd_end_flow_index;
1795 1.12.4.2 martin uint8_t cmd_reserved2[20];
1796 1.12.4.2 martin uint32_t cmd_match_criteria_enable;
1797 1.12.4.2 martin uint8_t cmd_match_criteria[512];
1798 1.12.4.2 martin uint8_t cmd_reserved4[448];
1799 1.12.4.2 martin } __packed __aligned(4);
1800 1.12.4.2 martin
1801 1.12.4.2 martin struct mcx_cmd_query_flow_table_in {
1802 1.12.4.2 martin uint16_t cmd_opcode;
1803 1.12.4.2 martin uint8_t cmd_reserved0[4];
1804 1.12.4.2 martin uint16_t cmd_op_mod;
1805 1.12.4.2 martin uint8_t cmd_reserved1[8];
1806 1.12.4.2 martin } __packed __aligned(4);
1807 1.12.4.2 martin
1808 1.12.4.2 martin struct mcx_cmd_query_flow_table_mb_in {
1809 1.12.4.2 martin uint8_t cmd_table_type;
1810 1.12.4.2 martin uint8_t cmd_reserved0[3];
1811 1.12.4.2 martin uint32_t cmd_table_id;
1812 1.12.4.2 martin uint8_t cmd_reserved1[40];
1813 1.12.4.2 martin } __packed __aligned(4);
1814 1.12.4.2 martin
1815 1.12.4.2 martin struct mcx_cmd_query_flow_table_out {
1816 1.12.4.2 martin uint8_t cmd_status;
1817 1.12.4.2 martin uint8_t cmd_reserved0[3];
1818 1.12.4.2 martin uint32_t cmd_syndrome;
1819 1.12.4.2 martin uint8_t cmd_reserved1[8];
1820 1.12.4.2 martin } __packed __aligned(4);
1821 1.12.4.2 martin
1822 1.12.4.2 martin struct mcx_cmd_query_flow_table_mb_out {
1823 1.12.4.2 martin uint8_t cmd_reserved0[4];
1824 1.12.4.2 martin struct mcx_flow_table_ctx cmd_ctx;
1825 1.12.4.2 martin } __packed __aligned(4);
1826 1.12.4.2 martin
1827 1.12.4.2 martin struct mcx_cmd_alloc_flow_counter_in {
1828 1.12.4.2 martin uint16_t cmd_opcode;
1829 1.12.4.2 martin uint8_t cmd_reserved0[4];
1830 1.12.4.2 martin uint16_t cmd_op_mod;
1831 1.12.4.2 martin uint8_t cmd_reserved1[8];
1832 1.12.4.2 martin } __packed __aligned(4);
1833 1.12.4.2 martin
1834 1.12.4.2 martin struct mcx_cmd_query_rq_in {
1835 1.12.4.2 martin uint16_t cmd_opcode;
1836 1.12.4.2 martin uint8_t cmd_reserved0[4];
1837 1.12.4.2 martin uint16_t cmd_op_mod;
1838 1.12.4.2 martin uint32_t cmd_rqn;
1839 1.12.4.2 martin uint8_t cmd_reserved1[4];
1840 1.12.4.2 martin } __packed __aligned(4);
1841 1.12.4.2 martin
1842 1.12.4.2 martin struct mcx_cmd_query_rq_out {
1843 1.12.4.2 martin uint8_t cmd_status;
1844 1.12.4.2 martin uint8_t cmd_reserved0[3];
1845 1.12.4.2 martin uint32_t cmd_syndrome;
1846 1.12.4.2 martin uint8_t cmd_reserved1[8];
1847 1.12.4.2 martin } __packed __aligned(4);
1848 1.12.4.2 martin
1849 1.12.4.2 martin struct mcx_cmd_query_rq_mb_out {
1850 1.12.4.2 martin uint8_t cmd_reserved0[16];
1851 1.12.4.2 martin struct mcx_rq_ctx cmd_ctx;
1852 1.12.4.2 martin };
1853 1.12.4.2 martin
1854 1.12.4.2 martin struct mcx_cmd_query_sq_in {
1855 1.12.4.2 martin uint16_t cmd_opcode;
1856 1.12.4.2 martin uint8_t cmd_reserved0[4];
1857 1.12.4.2 martin uint16_t cmd_op_mod;
1858 1.12.4.2 martin uint32_t cmd_sqn;
1859 1.12.4.2 martin uint8_t cmd_reserved1[4];
1860 1.12.4.2 martin } __packed __aligned(4);
1861 1.12.4.2 martin
1862 1.12.4.2 martin struct mcx_cmd_query_sq_out {
1863 1.12.4.2 martin uint8_t cmd_status;
1864 1.12.4.2 martin uint8_t cmd_reserved0[3];
1865 1.12.4.2 martin uint32_t cmd_syndrome;
1866 1.12.4.2 martin uint8_t cmd_reserved1[8];
1867 1.12.4.2 martin } __packed __aligned(4);
1868 1.12.4.2 martin
1869 1.12.4.2 martin struct mcx_cmd_query_sq_mb_out {
1870 1.12.4.2 martin uint8_t cmd_reserved0[16];
1871 1.12.4.2 martin struct mcx_sq_ctx cmd_ctx;
1872 1.12.4.2 martin };
1873 1.12.4.2 martin
1874 1.12.4.2 martin struct mcx_cmd_alloc_flow_counter_out {
1875 1.12.4.2 martin uint8_t cmd_status;
1876 1.12.4.2 martin uint8_t cmd_reserved0[3];
1877 1.12.4.2 martin uint32_t cmd_syndrome;
1878 1.12.4.2 martin uint8_t cmd_reserved1[2];
1879 1.12.4.2 martin uint16_t cmd_flow_counter_id;
1880 1.12.4.2 martin uint8_t cmd_reserved2[4];
1881 1.12.4.2 martin } __packed __aligned(4);
1882 1.12.4.2 martin
1883 1.12.4.2 martin struct mcx_wq_doorbell {
1884 1.12.4.2 martin uint32_t db_recv_counter;
1885 1.12.4.2 martin uint32_t db_send_counter;
1886 1.12.4.2 martin } __packed __aligned(8);
1887 1.12.4.2 martin
1888 1.12.4.2 martin struct mcx_dmamem {
1889 1.12.4.2 martin bus_dmamap_t mxm_map;
1890 1.12.4.2 martin bus_dma_segment_t mxm_seg;
1891 1.12.4.2 martin int mxm_nsegs;
1892 1.12.4.2 martin size_t mxm_size;
1893 1.12.4.2 martin void *mxm_kva;
1894 1.12.4.2 martin };
1895 1.12.4.2 martin #define MCX_DMA_MAP(_mxm) ((_mxm)->mxm_map)
1896 1.12.4.2 martin #define MCX_DMA_DVA(_mxm) ((_mxm)->mxm_map->dm_segs[0].ds_addr)
1897 1.12.4.2 martin #define MCX_DMA_KVA(_mxm) ((void *)(_mxm)->mxm_kva)
1898 1.12.4.2 martin #define MCX_DMA_LEN(_mxm) ((_mxm)->mxm_size)
1899 1.12.4.2 martin
1900 1.12.4.2 martin struct mcx_hwmem {
1901 1.12.4.2 martin bus_dmamap_t mhm_map;
1902 1.12.4.2 martin bus_dma_segment_t *mhm_segs;
1903 1.12.4.2 martin unsigned int mhm_seg_count;
1904 1.12.4.2 martin unsigned int mhm_npages;
1905 1.12.4.2 martin };
1906 1.12.4.2 martin
1907 1.12.4.2 martin struct mcx_slot {
1908 1.12.4.2 martin bus_dmamap_t ms_map;
1909 1.12.4.2 martin struct mbuf *ms_m;
1910 1.12.4.2 martin };
1911 1.12.4.2 martin
1912 1.12.4.2 martin struct mcx_cq {
1913 1.12.4.2 martin int cq_n;
1914 1.12.4.2 martin struct mcx_dmamem cq_mem;
1915 1.12.4.2 martin uint32_t *cq_doorbell;
1916 1.12.4.2 martin uint32_t cq_cons;
1917 1.12.4.2 martin uint32_t cq_count;
1918 1.12.4.2 martin };
1919 1.12.4.2 martin
1920 1.12.4.2 martin struct mcx_calibration {
1921 1.12.4.2 martin uint64_t c_timestamp; /* previous mcx chip time */
1922 1.12.4.2 martin uint64_t c_uptime; /* previous kernel nanouptime */
1923 1.12.4.2 martin uint64_t c_tbase; /* mcx chip time */
1924 1.12.4.2 martin uint64_t c_ubase; /* kernel nanouptime */
1925 1.12.4.2 martin uint64_t c_tdiff;
1926 1.12.4.2 martin uint64_t c_udiff;
1927 1.12.4.2 martin };
1928 1.12.4.2 martin
1929 1.12.4.2 martin #define MCX_CALIBRATE_FIRST 2
1930 1.12.4.2 martin #define MCX_CALIBRATE_NORMAL 30
1931 1.12.4.2 martin
1932 1.12.4.2 martin struct mcx_rxring {
1933 1.12.4.2 martin u_int rxr_total;
1934 1.12.4.2 martin u_int rxr_inuse;
1935 1.12.4.2 martin };
1936 1.12.4.2 martin
1937 1.12.4.2 martin MBUFQ_HEAD(mcx_mbufq);
1938 1.12.4.2 martin
1939 1.12.4.2 martin struct mcx_softc {
1940 1.12.4.2 martin device_t sc_dev;
1941 1.12.4.2 martin struct ethercom sc_ec;
1942 1.12.4.2 martin struct ifmedia sc_media;
1943 1.12.4.2 martin uint64_t sc_media_status;
1944 1.12.4.2 martin uint64_t sc_media_active;
1945 1.12.4.2 martin kmutex_t sc_media_mutex;
1946 1.12.4.2 martin
1947 1.12.4.2 martin pci_chipset_tag_t sc_pc;
1948 1.12.4.2 martin pci_intr_handle_t *sc_intrs;
1949 1.12.4.2 martin void *sc_ihs[MCX_MAX_NINTR];
1950 1.12.4.2 martin pcitag_t sc_tag;
1951 1.12.4.2 martin
1952 1.12.4.2 martin bus_dma_tag_t sc_dmat;
1953 1.12.4.2 martin bus_space_tag_t sc_memt;
1954 1.12.4.2 martin bus_space_handle_t sc_memh;
1955 1.12.4.2 martin bus_size_t sc_mems;
1956 1.12.4.2 martin
1957 1.12.4.2 martin struct mcx_dmamem sc_cmdq_mem;
1958 1.12.4.2 martin unsigned int sc_cmdq_mask;
1959 1.12.4.2 martin unsigned int sc_cmdq_size;
1960 1.12.4.2 martin
1961 1.12.4.2 martin unsigned int sc_cmdq_token;
1962 1.12.4.2 martin
1963 1.12.4.2 martin struct mcx_hwmem sc_boot_pages;
1964 1.12.4.2 martin struct mcx_hwmem sc_init_pages;
1965 1.12.4.2 martin struct mcx_hwmem sc_regular_pages;
1966 1.12.4.2 martin
1967 1.12.4.2 martin int sc_uar;
1968 1.12.4.2 martin int sc_pd;
1969 1.12.4.2 martin int sc_tdomain;
1970 1.12.4.2 martin uint32_t sc_lkey;
1971 1.12.4.2 martin
1972 1.12.4.2 martin struct mcx_dmamem sc_doorbell_mem;
1973 1.12.4.2 martin
1974 1.12.4.2 martin int sc_eqn;
1975 1.12.4.2 martin int sc_eq_cons;
1976 1.12.4.2 martin struct mcx_dmamem sc_eq_mem;
1977 1.12.4.2 martin int sc_hardmtu;
1978 1.12.4.2 martin
1979 1.12.4.2 martin struct workqueue *sc_workq;
1980 1.12.4.2 martin struct work sc_port_change;
1981 1.12.4.2 martin
1982 1.12.4.2 martin int sc_flow_table_id;
1983 1.12.4.2 martin #define MCX_FLOW_GROUP_PROMISC 0
1984 1.12.4.2 martin #define MCX_FLOW_GROUP_ALLMULTI 1
1985 1.12.4.2 martin #define MCX_FLOW_GROUP_MAC 2
1986 1.12.4.2 martin #define MCX_NUM_FLOW_GROUPS 3
1987 1.12.4.2 martin int sc_flow_group_id[MCX_NUM_FLOW_GROUPS];
1988 1.12.4.2 martin int sc_flow_group_size[MCX_NUM_FLOW_GROUPS];
1989 1.12.4.2 martin int sc_flow_group_start[MCX_NUM_FLOW_GROUPS];
1990 1.12.4.2 martin int sc_promisc_flow_enabled;
1991 1.12.4.2 martin int sc_allmulti_flow_enabled;
1992 1.12.4.2 martin int sc_mcast_flow_base;
1993 1.12.4.2 martin int sc_extra_mcast;
1994 1.12.4.2 martin uint8_t sc_mcast_flows[MCX_NUM_MCAST_FLOWS][ETHER_ADDR_LEN];
1995 1.12.4.2 martin
1996 1.12.4.2 martin struct mcx_calibration sc_calibration[2];
1997 1.12.4.2 martin unsigned int sc_calibration_gen;
1998 1.12.4.2 martin callout_t sc_calibrate;
1999 1.12.4.2 martin
2000 1.12.4.2 martin struct mcx_cq sc_cq[MCX_MAX_CQS];
2001 1.12.4.2 martin int sc_num_cq;
2002 1.12.4.2 martin
2003 1.12.4.2 martin /* rx */
2004 1.12.4.2 martin int sc_tirn;
2005 1.12.4.2 martin int sc_rqn;
2006 1.12.4.2 martin struct mcx_dmamem sc_rq_mem;
2007 1.12.4.2 martin struct mcx_slot *sc_rx_slots;
2008 1.12.4.2 martin uint32_t *sc_rx_doorbell;
2009 1.12.4.2 martin
2010 1.12.4.2 martin uint32_t sc_rx_prod;
2011 1.12.4.2 martin callout_t sc_rx_refill;
2012 1.12.4.2 martin struct mcx_rxring sc_rxr;
2013 1.12.4.2 martin
2014 1.12.4.2 martin /* tx */
2015 1.12.4.2 martin int sc_tisn;
2016 1.12.4.2 martin int sc_sqn;
2017 1.12.4.2 martin struct mcx_dmamem sc_sq_mem;
2018 1.12.4.2 martin struct mcx_slot *sc_tx_slots;
2019 1.12.4.2 martin uint32_t *sc_tx_doorbell;
2020 1.12.4.2 martin int sc_bf_size;
2021 1.12.4.2 martin int sc_bf_offset;
2022 1.12.4.2 martin
2023 1.12.4.2 martin uint32_t sc_tx_cons;
2024 1.12.4.2 martin uint32_t sc_tx_prod;
2025 1.12.4.2 martin
2026 1.12.4.2 martin uint64_t sc_last_cq_db;
2027 1.12.4.2 martin uint64_t sc_last_srq_db;
2028 1.12.4.2 martin };
2029 1.12.4.2 martin #define DEVNAME(_sc) device_xname((_sc)->sc_dev)
2030 1.12.4.2 martin
2031 1.12.4.2 martin static int mcx_match(device_t, cfdata_t, void *);
2032 1.12.4.2 martin static void mcx_attach(device_t, device_t, void *);
2033 1.12.4.2 martin
2034 1.12.4.2 martin static void mcx_rxr_init(struct mcx_rxring *, u_int, u_int);
2035 1.12.4.2 martin static u_int mcx_rxr_get(struct mcx_rxring *, u_int);
2036 1.12.4.2 martin static void mcx_rxr_put(struct mcx_rxring *, u_int);
2037 1.12.4.2 martin static u_int mcx_rxr_inuse(struct mcx_rxring *);
2038 1.12.4.2 martin
2039 1.12.4.2 martin static int mcx_version(struct mcx_softc *);
2040 1.12.4.2 martin static int mcx_init_wait(struct mcx_softc *);
2041 1.12.4.2 martin static int mcx_enable_hca(struct mcx_softc *);
2042 1.12.4.2 martin static int mcx_teardown_hca(struct mcx_softc *, uint16_t);
2043 1.12.4.2 martin static int mcx_access_hca_reg(struct mcx_softc *, uint16_t, int, void *,
2044 1.12.4.2 martin int);
2045 1.12.4.2 martin static int mcx_issi(struct mcx_softc *);
2046 1.12.4.2 martin static int mcx_pages(struct mcx_softc *, struct mcx_hwmem *, uint16_t);
2047 1.12.4.2 martin static int mcx_hca_max_caps(struct mcx_softc *);
2048 1.12.4.2 martin static int mcx_hca_set_caps(struct mcx_softc *);
2049 1.12.4.2 martin static int mcx_init_hca(struct mcx_softc *);
2050 1.12.4.2 martin static int mcx_set_driver_version(struct mcx_softc *);
2051 1.12.4.2 martin static int mcx_iff(struct mcx_softc *);
2052 1.12.4.2 martin static int mcx_alloc_uar(struct mcx_softc *);
2053 1.12.4.2 martin static int mcx_alloc_pd(struct mcx_softc *);
2054 1.12.4.2 martin static int mcx_alloc_tdomain(struct mcx_softc *);
2055 1.12.4.2 martin static int mcx_create_eq(struct mcx_softc *);
2056 1.12.4.2 martin static int mcx_query_nic_vport_context(struct mcx_softc *, uint8_t *);
2057 1.12.4.2 martin static int mcx_query_special_contexts(struct mcx_softc *);
2058 1.12.4.2 martin static int mcx_set_port_mtu(struct mcx_softc *, int);
2059 1.12.4.2 martin static int mcx_create_cq(struct mcx_softc *, int);
2060 1.12.4.2 martin static int mcx_destroy_cq(struct mcx_softc *, int);
2061 1.12.4.2 martin static int mcx_create_sq(struct mcx_softc *, int);
2062 1.12.4.2 martin static int mcx_destroy_sq(struct mcx_softc *);
2063 1.12.4.2 martin static int mcx_ready_sq(struct mcx_softc *);
2064 1.12.4.2 martin static int mcx_create_rq(struct mcx_softc *, int);
2065 1.12.4.2 martin static int mcx_destroy_rq(struct mcx_softc *);
2066 1.12.4.2 martin static int mcx_ready_rq(struct mcx_softc *);
2067 1.12.4.2 martin static int mcx_create_tir(struct mcx_softc *);
2068 1.12.4.2 martin static int mcx_destroy_tir(struct mcx_softc *);
2069 1.12.4.2 martin static int mcx_create_tis(struct mcx_softc *);
2070 1.12.4.2 martin static int mcx_destroy_tis(struct mcx_softc *);
2071 1.12.4.2 martin static int mcx_create_flow_table(struct mcx_softc *, int);
2072 1.12.4.2 martin static int mcx_set_flow_table_root(struct mcx_softc *);
2073 1.12.4.2 martin static int mcx_destroy_flow_table(struct mcx_softc *);
2074 1.12.4.2 martin static int mcx_create_flow_group(struct mcx_softc *, int, int,
2075 1.12.4.2 martin int, int, struct mcx_flow_match *);
2076 1.12.4.2 martin static int mcx_destroy_flow_group(struct mcx_softc *, int);
2077 1.12.4.2 martin static int mcx_set_flow_table_entry(struct mcx_softc *, int, int,
2078 1.12.4.2 martin const uint8_t *);
2079 1.12.4.2 martin static int mcx_delete_flow_table_entry(struct mcx_softc *, int, int);
2080 1.12.4.2 martin
2081 1.12.4.2 martin #if 0
2082 1.12.4.2 martin static int mcx_dump_flow_table(struct mcx_softc *);
2083 1.12.4.2 martin static int mcx_dump_flow_table_entry(struct mcx_softc *, int);
2084 1.12.4.2 martin static int mcx_dump_flow_group(struct mcx_softc *);
2085 1.12.4.2 martin static int mcx_dump_rq(struct mcx_softc *);
2086 1.12.4.2 martin static int mcx_dump_sq(struct mcx_softc *);
2087 1.12.4.2 martin #endif
2088 1.12.4.2 martin
2089 1.12.4.2 martin
2090 1.12.4.2 martin /*
2091 1.12.4.2 martin static void mcx_cmdq_dump(const struct mcx_cmdq_entry *);
2092 1.12.4.2 martin static void mcx_cmdq_mbox_dump(struct mcx_dmamem *, int);
2093 1.12.4.2 martin */
2094 1.12.4.2 martin static void mcx_refill(void *);
2095 1.12.4.2 martin static int mcx_process_rx(struct mcx_softc *, struct mcx_cq_entry *,
2096 1.12.4.2 martin struct mcx_mbufq *, const struct mcx_calibration *);
2097 1.12.4.2 martin static void mcx_process_txeof(struct mcx_softc *, struct mcx_cq_entry *,
2098 1.12.4.2 martin int *);
2099 1.12.4.2 martin static void mcx_process_cq(struct mcx_softc *, struct mcx_cq *);
2100 1.12.4.2 martin
2101 1.12.4.2 martin static void mcx_arm_cq(struct mcx_softc *, struct mcx_cq *);
2102 1.12.4.2 martin static void mcx_arm_eq(struct mcx_softc *);
2103 1.12.4.2 martin static int mcx_intr(void *);
2104 1.12.4.2 martin
2105 1.12.4.2 martin static int mcx_init(struct ifnet *);
2106 1.12.4.2 martin static void mcx_stop(struct ifnet *, int);
2107 1.12.4.2 martin static int mcx_ioctl(struct ifnet *, u_long, void *);
2108 1.12.4.2 martin static void mcx_start(struct ifnet *);
2109 1.12.4.2 martin static void mcx_watchdog(struct ifnet *);
2110 1.12.4.2 martin static void mcx_media_add_types(struct mcx_softc *);
2111 1.12.4.2 martin static void mcx_media_status(struct ifnet *, struct ifmediareq *);
2112 1.12.4.2 martin static int mcx_media_change(struct ifnet *);
2113 1.12.4.2 martin #if 0
2114 1.12.4.2 martin static int mcx_get_sffpage(struct ifnet *, struct if_sffpage *);
2115 1.12.4.2 martin #endif
2116 1.12.4.2 martin static void mcx_port_change(struct work *, void *);
2117 1.12.4.2 martin
2118 1.12.4.2 martin static void mcx_calibrate_first(struct mcx_softc *);
2119 1.12.4.2 martin static void mcx_calibrate(void *);
2120 1.12.4.2 martin
2121 1.12.4.2 martin static inline uint32_t
2122 1.12.4.2 martin mcx_rd(struct mcx_softc *, bus_size_t);
2123 1.12.4.2 martin static inline void
2124 1.12.4.2 martin mcx_wr(struct mcx_softc *, bus_size_t, uint32_t);
2125 1.12.4.2 martin static inline void
2126 1.12.4.2 martin mcx_bar(struct mcx_softc *, bus_size_t, bus_size_t, int);
2127 1.12.4.2 martin
2128 1.12.4.2 martin static uint64_t mcx_timer(struct mcx_softc *);
2129 1.12.4.2 martin
2130 1.12.4.2 martin static int mcx_dmamem_alloc(struct mcx_softc *, struct mcx_dmamem *,
2131 1.12.4.2 martin bus_size_t, u_int align);
2132 1.12.4.2 martin static void mcx_dmamem_zero(struct mcx_dmamem *);
2133 1.12.4.2 martin static void mcx_dmamem_free(struct mcx_softc *, struct mcx_dmamem *);
2134 1.12.4.2 martin
2135 1.12.4.2 martin static int mcx_hwmem_alloc(struct mcx_softc *, struct mcx_hwmem *,
2136 1.12.4.2 martin unsigned int);
2137 1.12.4.2 martin static void mcx_hwmem_free(struct mcx_softc *, struct mcx_hwmem *);
2138 1.12.4.2 martin
2139 1.12.4.2 martin CFATTACH_DECL_NEW(mcx, sizeof(struct mcx_softc), mcx_match, mcx_attach, NULL, NULL);
2140 1.12.4.2 martin
2141 1.12.4.2 martin static const struct {
2142 1.12.4.2 martin pci_vendor_id_t vendor;
2143 1.12.4.2 martin pci_product_id_t product;
2144 1.12.4.2 martin } mcx_devices[] = {
2145 1.12.4.2 martin { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27700 },
2146 1.12.4.2 martin { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27710 },
2147 1.12.4.2 martin { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT27800 },
2148 1.12.4.2 martin { PCI_VENDOR_MELLANOX, PCI_PRODUCT_MELLANOX_MT28800 },
2149 1.12.4.2 martin };
2150 1.12.4.2 martin
2151 1.12.4.2 martin static const uint64_t mcx_eth_cap_map[] = {
2152 1.12.4.2 martin IFM_1000_SGMII,
2153 1.12.4.2 martin IFM_1000_KX,
2154 1.12.4.2 martin IFM_10G_CX4,
2155 1.12.4.2 martin IFM_10G_KX4,
2156 1.12.4.2 martin IFM_10G_KR,
2157 1.12.4.2 martin IFM_20G_KR2,
2158 1.12.4.2 martin IFM_40G_CR4,
2159 1.12.4.2 martin IFM_40G_KR4,
2160 1.12.4.2 martin IFM_56G_R4,
2161 1.12.4.2 martin 0,
2162 1.12.4.2 martin 0,
2163 1.12.4.2 martin 0,
2164 1.12.4.2 martin IFM_10G_CR1,
2165 1.12.4.2 martin IFM_10G_SR,
2166 1.12.4.2 martin IFM_10G_LR,
2167 1.12.4.2 martin IFM_40G_SR4,
2168 1.12.4.2 martin IFM_40G_LR4,
2169 1.12.4.2 martin 0,
2170 1.12.4.2 martin IFM_50G_SR2,
2171 1.12.4.2 martin 0,
2172 1.12.4.2 martin IFM_100G_CR4,
2173 1.12.4.2 martin IFM_100G_SR4,
2174 1.12.4.2 martin IFM_100G_KR4,
2175 1.12.4.2 martin IFM_100G_LR4,
2176 1.12.4.2 martin IFM_100_TX,
2177 1.12.4.2 martin IFM_1000_T,
2178 1.12.4.2 martin IFM_10G_T,
2179 1.12.4.2 martin IFM_25G_CR,
2180 1.12.4.2 martin IFM_25G_KR,
2181 1.12.4.2 martin IFM_25G_SR,
2182 1.12.4.2 martin IFM_50G_CR2,
2183 1.12.4.2 martin IFM_50G_KR2
2184 1.12.4.2 martin };
2185 1.12.4.2 martin
2186 1.12.4.2 martin static int
2187 1.12.4.2 martin mcx_match(device_t parent, cfdata_t cf, void *aux)
2188 1.12.4.2 martin {
2189 1.12.4.2 martin struct pci_attach_args *pa = aux;
2190 1.12.4.2 martin int n;
2191 1.12.4.2 martin
2192 1.12.4.2 martin for (n = 0; n < __arraycount(mcx_devices); n++) {
2193 1.12.4.2 martin if (PCI_VENDOR(pa->pa_id) == mcx_devices[n].vendor &&
2194 1.12.4.2 martin PCI_PRODUCT(pa->pa_id) == mcx_devices[n].product)
2195 1.12.4.2 martin return 1;
2196 1.12.4.2 martin }
2197 1.12.4.2 martin
2198 1.12.4.2 martin return 0;
2199 1.12.4.2 martin }
2200 1.12.4.2 martin
2201 1.12.4.2 martin void
2202 1.12.4.2 martin mcx_attach(device_t parent, device_t self, void *aux)
2203 1.12.4.2 martin {
2204 1.12.4.2 martin struct mcx_softc *sc = device_private(self);
2205 1.12.4.2 martin struct ifnet *ifp = &sc->sc_ec.ec_if;
2206 1.12.4.2 martin struct pci_attach_args *pa = aux;
2207 1.12.4.2 martin uint8_t enaddr[ETHER_ADDR_LEN];
2208 1.12.4.2 martin int counts[PCI_INTR_TYPE_SIZE];
2209 1.12.4.2 martin char intrbuf[PCI_INTRSTR_LEN];
2210 1.12.4.2 martin pcireg_t memtype;
2211 1.12.4.2 martin uint32_t r;
2212 1.12.4.2 martin unsigned int cq_stride;
2213 1.12.4.2 martin unsigned int cq_size;
2214 1.12.4.2 martin const char *intrstr;
2215 1.12.4.2 martin int i;
2216 1.12.4.2 martin
2217 1.12.4.2 martin sc->sc_dev = self;
2218 1.12.4.2 martin sc->sc_pc = pa->pa_pc;
2219 1.12.4.2 martin sc->sc_tag = pa->pa_tag;
2220 1.12.4.2 martin if (pci_dma64_available(pa))
2221 1.12.4.2 martin sc->sc_dmat = pa->pa_dmat64;
2222 1.12.4.2 martin else
2223 1.12.4.2 martin sc->sc_dmat = pa->pa_dmat;
2224 1.12.4.2 martin
2225 1.12.4.2 martin /* Map the PCI memory space */
2226 1.12.4.2 martin memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MCX_HCA_BAR);
2227 1.12.4.2 martin if (pci_mapreg_map(pa, MCX_HCA_BAR, memtype,
2228 1.12.4.2 martin 0 /*BUS_SPACE_MAP_PREFETCHABLE*/, &sc->sc_memt, &sc->sc_memh,
2229 1.12.4.2 martin NULL, &sc->sc_mems)) {
2230 1.12.4.2 martin aprint_error(": unable to map register memory\n");
2231 1.12.4.2 martin return;
2232 1.12.4.2 martin }
2233 1.12.4.2 martin
2234 1.12.4.2 martin pci_aprint_devinfo(pa, "Ethernet controller");
2235 1.12.4.2 martin
2236 1.12.4.2 martin mutex_init(&sc->sc_media_mutex, MUTEX_DEFAULT, IPL_SOFTNET);
2237 1.12.4.2 martin
2238 1.12.4.2 martin if (mcx_version(sc) != 0) {
2239 1.12.4.2 martin /* error printed by mcx_version */
2240 1.12.4.2 martin goto unmap;
2241 1.12.4.2 martin }
2242 1.12.4.2 martin
2243 1.12.4.2 martin r = mcx_rd(sc, MCX_CMDQ_ADDR_LO);
2244 1.12.4.2 martin cq_stride = 1 << MCX_CMDQ_LOG_STRIDE(r); /* size of the entries */
2245 1.12.4.2 martin cq_size = 1 << MCX_CMDQ_LOG_SIZE(r); /* number of entries */
2246 1.12.4.2 martin if (cq_size > MCX_MAX_CQE) {
2247 1.12.4.2 martin aprint_error_dev(self,
2248 1.12.4.2 martin "command queue size overflow %u\n", cq_size);
2249 1.12.4.2 martin goto unmap;
2250 1.12.4.2 martin }
2251 1.12.4.2 martin if (cq_stride < sizeof(struct mcx_cmdq_entry)) {
2252 1.12.4.2 martin aprint_error_dev(self,
2253 1.12.4.2 martin "command queue entry size underflow %u\n", cq_stride);
2254 1.12.4.2 martin goto unmap;
2255 1.12.4.2 martin }
2256 1.12.4.2 martin if (cq_stride * cq_size > MCX_PAGE_SIZE) {
2257 1.12.4.2 martin aprint_error_dev(self, "command queue page overflow\n");
2258 1.12.4.2 martin goto unmap;
2259 1.12.4.2 martin }
2260 1.12.4.2 martin
2261 1.12.4.2 martin if (mcx_dmamem_alloc(sc, &sc->sc_doorbell_mem, MCX_PAGE_SIZE,
2262 1.12.4.2 martin MCX_PAGE_SIZE) != 0) {
2263 1.12.4.2 martin aprint_error_dev(self, "unable to allocate doorbell memory\n");
2264 1.12.4.2 martin goto unmap;
2265 1.12.4.2 martin }
2266 1.12.4.2 martin
2267 1.12.4.2 martin if (mcx_dmamem_alloc(sc, &sc->sc_cmdq_mem, MCX_PAGE_SIZE,
2268 1.12.4.2 martin MCX_PAGE_SIZE) != 0) {
2269 1.12.4.2 martin aprint_error_dev(self, "unable to allocate command queue\n");
2270 1.12.4.2 martin goto dbfree;
2271 1.12.4.2 martin }
2272 1.12.4.2 martin
2273 1.12.4.2 martin mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
2274 1.12.4.2 martin mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint32_t), BUS_SPACE_BARRIER_WRITE);
2275 1.12.4.2 martin mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem));
2276 1.12.4.2 martin mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint32_t), BUS_SPACE_BARRIER_WRITE);
2277 1.12.4.2 martin
2278 1.12.4.2 martin if (mcx_init_wait(sc) != 0) {
2279 1.12.4.2 martin aprint_error_dev(self, "timeout waiting for init\n");
2280 1.12.4.2 martin goto cqfree;
2281 1.12.4.2 martin }
2282 1.12.4.2 martin
2283 1.12.4.2 martin sc->sc_cmdq_mask = cq_size - 1;
2284 1.12.4.2 martin sc->sc_cmdq_size = cq_stride;
2285 1.12.4.2 martin
2286 1.12.4.2 martin if (mcx_enable_hca(sc) != 0) {
2287 1.12.4.2 martin /* error printed by mcx_enable_hca */
2288 1.12.4.2 martin goto cqfree;
2289 1.12.4.2 martin }
2290 1.12.4.2 martin
2291 1.12.4.2 martin if (mcx_issi(sc) != 0) {
2292 1.12.4.2 martin /* error printed by mcx_issi */
2293 1.12.4.2 martin goto teardown;
2294 1.12.4.2 martin }
2295 1.12.4.2 martin
2296 1.12.4.2 martin if (mcx_pages(sc, &sc->sc_boot_pages,
2297 1.12.4.2 martin htobe16(MCX_CMD_QUERY_PAGES_BOOT)) != 0) {
2298 1.12.4.2 martin /* error printed by mcx_pages */
2299 1.12.4.2 martin goto teardown;
2300 1.12.4.2 martin }
2301 1.12.4.2 martin
2302 1.12.4.2 martin if (mcx_hca_max_caps(sc) != 0) {
2303 1.12.4.2 martin /* error printed by mcx_hca_max_caps */
2304 1.12.4.2 martin goto teardown;
2305 1.12.4.2 martin }
2306 1.12.4.2 martin
2307 1.12.4.2 martin if (mcx_hca_set_caps(sc) != 0) {
2308 1.12.4.2 martin /* error printed by mcx_hca_set_caps */
2309 1.12.4.2 martin goto teardown;
2310 1.12.4.2 martin }
2311 1.12.4.2 martin
2312 1.12.4.2 martin if (mcx_pages(sc, &sc->sc_init_pages,
2313 1.12.4.2 martin htobe16(MCX_CMD_QUERY_PAGES_INIT)) != 0) {
2314 1.12.4.2 martin /* error printed by mcx_pages */
2315 1.12.4.2 martin goto teardown;
2316 1.12.4.2 martin }
2317 1.12.4.2 martin
2318 1.12.4.2 martin if (mcx_init_hca(sc) != 0) {
2319 1.12.4.2 martin /* error printed by mcx_init_hca */
2320 1.12.4.2 martin goto teardown;
2321 1.12.4.2 martin }
2322 1.12.4.2 martin
2323 1.12.4.2 martin if (mcx_pages(sc, &sc->sc_regular_pages,
2324 1.12.4.2 martin htobe16(MCX_CMD_QUERY_PAGES_REGULAR)) != 0) {
2325 1.12.4.2 martin /* error printed by mcx_pages */
2326 1.12.4.2 martin goto teardown;
2327 1.12.4.2 martin }
2328 1.12.4.2 martin
2329 1.12.4.2 martin /* apparently not necessary? */
2330 1.12.4.2 martin if (mcx_set_driver_version(sc) != 0) {
2331 1.12.4.2 martin /* error printed by mcx_set_driver_version */
2332 1.12.4.2 martin goto teardown;
2333 1.12.4.2 martin }
2334 1.12.4.2 martin
2335 1.12.4.2 martin if (mcx_iff(sc) != 0) { /* modify nic vport context */
2336 1.12.4.2 martin /* error printed by mcx_iff? */
2337 1.12.4.2 martin goto teardown;
2338 1.12.4.2 martin }
2339 1.12.4.2 martin
2340 1.12.4.2 martin if (mcx_alloc_uar(sc) != 0) {
2341 1.12.4.2 martin /* error printed by mcx_alloc_uar */
2342 1.12.4.2 martin goto teardown;
2343 1.12.4.2 martin }
2344 1.12.4.2 martin
2345 1.12.4.2 martin if (mcx_alloc_pd(sc) != 0) {
2346 1.12.4.2 martin /* error printed by mcx_alloc_pd */
2347 1.12.4.2 martin goto teardown;
2348 1.12.4.2 martin }
2349 1.12.4.2 martin
2350 1.12.4.2 martin if (mcx_alloc_tdomain(sc) != 0) {
2351 1.12.4.2 martin /* error printed by mcx_alloc_tdomain */
2352 1.12.4.2 martin goto teardown;
2353 1.12.4.2 martin }
2354 1.12.4.2 martin
2355 1.12.4.2 martin /*
2356 1.12.4.2 martin * PRM makes no mention of msi interrupts, just legacy and msi-x.
2357 1.12.4.2 martin * mellanox support tells me legacy interrupts are not supported,
2358 1.12.4.2 martin * so we're stuck with just msi-x.
2359 1.12.4.2 martin */
2360 1.12.4.2 martin counts[PCI_INTR_TYPE_MSIX] = 1;
2361 1.12.4.2 martin counts[PCI_INTR_TYPE_MSI] = 0;
2362 1.12.4.2 martin counts[PCI_INTR_TYPE_INTX] = 0;
2363 1.12.4.2 martin if (pci_intr_alloc(pa, &sc->sc_intrs, counts, PCI_INTR_TYPE_MSIX) != 0) {
2364 1.12.4.2 martin aprint_error_dev(self, "unable to allocate interrupt\n");
2365 1.12.4.2 martin goto teardown;
2366 1.12.4.2 martin }
2367 1.12.4.2 martin KASSERT(pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX);
2368 1.12.4.2 martin
2369 1.12.4.2 martin #ifdef MCX_MPSAFE
2370 1.12.4.2 martin pci_intr_setattr(sc->sc_pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
2371 1.12.4.2 martin #endif
2372 1.12.4.2 martin
2373 1.12.4.2 martin intrstr = pci_intr_string(sc->sc_pc, sc->sc_intrs[0], intrbuf,
2374 1.12.4.2 martin sizeof(intrbuf));
2375 1.12.4.2 martin sc->sc_ihs[0] = pci_intr_establish_xname(sc->sc_pc, sc->sc_intrs[0],
2376 1.12.4.2 martin IPL_NET, mcx_intr, sc, DEVNAME(sc));
2377 1.12.4.2 martin if (sc->sc_ihs[0] == NULL) {
2378 1.12.4.2 martin aprint_error_dev(self, "unable to establish interrupt%s%s\n",
2379 1.12.4.2 martin intrstr ? " at " : "",
2380 1.12.4.2 martin intrstr ? intrstr : "");
2381 1.12.4.2 martin goto teardown;
2382 1.12.4.2 martin }
2383 1.12.4.2 martin
2384 1.12.4.2 martin aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
2385 1.12.4.2 martin
2386 1.12.4.2 martin if (mcx_create_eq(sc) != 0) {
2387 1.12.4.2 martin /* error printed by mcx_create_eq */
2388 1.12.4.2 martin goto teardown;
2389 1.12.4.2 martin }
2390 1.12.4.2 martin
2391 1.12.4.2 martin if (mcx_query_nic_vport_context(sc, enaddr) != 0) {
2392 1.12.4.2 martin /* error printed by mcx_query_nic_vport_context */
2393 1.12.4.2 martin goto teardown;
2394 1.12.4.2 martin }
2395 1.12.4.2 martin
2396 1.12.4.2 martin if (mcx_query_special_contexts(sc) != 0) {
2397 1.12.4.2 martin /* error printed by mcx_query_special_contexts */
2398 1.12.4.2 martin goto teardown;
2399 1.12.4.2 martin }
2400 1.12.4.2 martin
2401 1.12.4.2 martin if (mcx_set_port_mtu(sc, MCX_HARDMTU) != 0) {
2402 1.12.4.2 martin /* error printed by mcx_set_port_mtu */
2403 1.12.4.2 martin goto teardown;
2404 1.12.4.2 martin }
2405 1.12.4.2 martin
2406 1.12.4.2 martin aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2407 1.12.4.2 martin ether_sprintf(enaddr));
2408 1.12.4.2 martin
2409 1.12.4.2 martin strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
2410 1.12.4.2 martin ifp->if_softc = sc;
2411 1.12.4.2 martin ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
2412 1.12.4.2 martin #ifdef MCX_MPSAFE
2413 1.12.4.2 martin ifp->if_extflags = IFEF_MPSAFE;
2414 1.12.4.2 martin #endif
2415 1.12.4.2 martin ifp->if_init = mcx_init;
2416 1.12.4.2 martin ifp->if_stop = mcx_stop;
2417 1.12.4.2 martin ifp->if_ioctl = mcx_ioctl;
2418 1.12.4.2 martin ifp->if_start = mcx_start;
2419 1.12.4.2 martin ifp->if_watchdog = mcx_watchdog;
2420 1.12.4.2 martin ifp->if_mtu = sc->sc_hardmtu;
2421 1.12.4.2 martin IFQ_SET_MAXLEN(&ifp->if_snd, 1024);
2422 1.12.4.2 martin IFQ_SET_READY(&ifp->if_snd);
2423 1.12.4.2 martin
2424 1.12.4.2 martin sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
2425 1.12.4.2 martin
2426 1.12.4.2 martin sc->sc_ec.ec_ifmedia = &sc->sc_media;
2427 1.12.4.2 martin ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, mcx_media_change,
2428 1.12.4.2 martin mcx_media_status, &sc->sc_media_mutex);
2429 1.12.4.2 martin mcx_media_add_types(sc);
2430 1.12.4.2 martin ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
2431 1.12.4.2 martin ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
2432 1.12.4.2 martin
2433 1.12.4.2 martin if_attach(ifp);
2434 1.12.4.2 martin if_deferred_start_init(ifp, NULL);
2435 1.12.4.2 martin
2436 1.12.4.2 martin ether_ifattach(ifp, enaddr);
2437 1.12.4.2 martin
2438 1.12.4.2 martin callout_init(&sc->sc_rx_refill, CALLOUT_FLAGS);
2439 1.12.4.2 martin callout_setfunc(&sc->sc_rx_refill, mcx_refill, sc);
2440 1.12.4.2 martin callout_init(&sc->sc_calibrate, CALLOUT_FLAGS);
2441 1.12.4.2 martin callout_setfunc(&sc->sc_calibrate, mcx_calibrate, sc);
2442 1.12.4.2 martin
2443 1.12.4.2 martin if (workqueue_create(&sc->sc_workq, "mcxportchg", mcx_port_change, sc,
2444 1.12.4.2 martin PRI_NONE, IPL_NET, 0) != 0) {
2445 1.12.4.2 martin aprint_error_dev(self, "couldn't create port change workq\n");
2446 1.12.4.2 martin goto teardown;
2447 1.12.4.2 martin }
2448 1.12.4.2 martin
2449 1.12.4.2 martin mcx_port_change(&sc->sc_port_change, sc);
2450 1.12.4.2 martin
2451 1.12.4.2 martin sc->sc_flow_table_id = -1;
2452 1.12.4.2 martin for (i = 0; i < MCX_NUM_FLOW_GROUPS; i++) {
2453 1.12.4.2 martin sc->sc_flow_group_id[i] = -1;
2454 1.12.4.2 martin sc->sc_flow_group_size[i] = 0;
2455 1.12.4.2 martin sc->sc_flow_group_start[i] = 0;
2456 1.12.4.2 martin }
2457 1.12.4.2 martin sc->sc_extra_mcast = 0;
2458 1.12.4.2 martin memset(sc->sc_mcast_flows, 0, sizeof(sc->sc_mcast_flows));
2459 1.12.4.2 martin return;
2460 1.12.4.2 martin
2461 1.12.4.2 martin teardown:
2462 1.12.4.2 martin mcx_teardown_hca(sc, htobe16(MCX_CMD_TEARDOWN_HCA_GRACEFUL));
2463 1.12.4.2 martin /* error printed by mcx_teardown_hca, and we're already unwinding */
2464 1.12.4.2 martin cqfree:
2465 1.12.4.2 martin mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
2466 1.12.4.2 martin mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t), BUS_SPACE_BARRIER_WRITE);
2467 1.12.4.2 martin mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem) |
2468 1.12.4.2 martin MCX_CMDQ_INTERFACE_DISABLED);
2469 1.12.4.2 martin mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint64_t), BUS_SPACE_BARRIER_WRITE);
2470 1.12.4.2 martin
2471 1.12.4.2 martin mcx_wr(sc, MCX_CMDQ_ADDR_HI, 0);
2472 1.12.4.2 martin mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t), BUS_SPACE_BARRIER_WRITE);
2473 1.12.4.2 martin mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_CMDQ_INTERFACE_DISABLED);
2474 1.12.4.2 martin
2475 1.12.4.2 martin mcx_dmamem_free(sc, &sc->sc_cmdq_mem);
2476 1.12.4.2 martin dbfree:
2477 1.12.4.2 martin mcx_dmamem_free(sc, &sc->sc_doorbell_mem);
2478 1.12.4.2 martin unmap:
2479 1.12.4.2 martin bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
2480 1.12.4.2 martin sc->sc_mems = 0;
2481 1.12.4.2 martin }
2482 1.12.4.2 martin
2483 1.12.4.2 martin static void
2484 1.12.4.2 martin mcx_rxr_init(struct mcx_rxring *rxr, u_int lwm __unused, u_int hwm)
2485 1.12.4.2 martin {
2486 1.12.4.2 martin rxr->rxr_total = hwm;
2487 1.12.4.2 martin rxr->rxr_inuse = 0;
2488 1.12.4.2 martin }
2489 1.12.4.2 martin
2490 1.12.4.2 martin static u_int
2491 1.12.4.2 martin mcx_rxr_get(struct mcx_rxring *rxr, u_int max)
2492 1.12.4.2 martin {
2493 1.12.4.2 martin const u_int taken = MIN(max, rxr->rxr_total - rxr->rxr_inuse);
2494 1.12.4.2 martin
2495 1.12.4.2 martin rxr->rxr_inuse += taken;
2496 1.12.4.2 martin
2497 1.12.4.2 martin return taken;
2498 1.12.4.2 martin }
2499 1.12.4.2 martin
2500 1.12.4.2 martin static void
2501 1.12.4.2 martin mcx_rxr_put(struct mcx_rxring *rxr, u_int n)
2502 1.12.4.2 martin {
2503 1.12.4.2 martin rxr->rxr_inuse -= n;
2504 1.12.4.2 martin }
2505 1.12.4.2 martin
2506 1.12.4.2 martin static u_int
2507 1.12.4.2 martin mcx_rxr_inuse(struct mcx_rxring *rxr)
2508 1.12.4.2 martin {
2509 1.12.4.2 martin return rxr->rxr_inuse;
2510 1.12.4.2 martin }
2511 1.12.4.2 martin
2512 1.12.4.2 martin static int
2513 1.12.4.2 martin mcx_version(struct mcx_softc *sc)
2514 1.12.4.2 martin {
2515 1.12.4.2 martin uint32_t fw0, fw1;
2516 1.12.4.2 martin uint16_t cmdif;
2517 1.12.4.2 martin
2518 1.12.4.2 martin fw0 = mcx_rd(sc, MCX_FW_VER);
2519 1.12.4.2 martin fw1 = mcx_rd(sc, MCX_CMDIF_FW_SUBVER);
2520 1.12.4.2 martin
2521 1.12.4.2 martin aprint_normal_dev(sc->sc_dev, "FW %u.%u.%04u\n", MCX_FW_VER_MAJOR(fw0),
2522 1.12.4.2 martin MCX_FW_VER_MINOR(fw0), MCX_FW_VER_SUBMINOR(fw1));
2523 1.12.4.2 martin
2524 1.12.4.2 martin cmdif = MCX_CMDIF(fw1);
2525 1.12.4.2 martin if (cmdif != MCX_CMD_IF_SUPPORTED) {
2526 1.12.4.2 martin aprint_error_dev(sc->sc_dev,
2527 1.12.4.2 martin "unsupported command interface %u\n", cmdif);
2528 1.12.4.2 martin return (-1);
2529 1.12.4.2 martin }
2530 1.12.4.2 martin
2531 1.12.4.2 martin return (0);
2532 1.12.4.2 martin }
2533 1.12.4.2 martin
2534 1.12.4.2 martin static int
2535 1.12.4.2 martin mcx_init_wait(struct mcx_softc *sc)
2536 1.12.4.2 martin {
2537 1.12.4.2 martin unsigned int i;
2538 1.12.4.2 martin uint32_t r;
2539 1.12.4.2 martin
2540 1.12.4.2 martin for (i = 0; i < 2000; i++) {
2541 1.12.4.2 martin r = mcx_rd(sc, MCX_STATE);
2542 1.12.4.2 martin if ((r & MCX_STATE_MASK) == MCX_STATE_READY)
2543 1.12.4.2 martin return (0);
2544 1.12.4.2 martin
2545 1.12.4.2 martin delay(1000);
2546 1.12.4.2 martin mcx_bar(sc, MCX_STATE, sizeof(uint32_t),
2547 1.12.4.2 martin BUS_SPACE_BARRIER_READ);
2548 1.12.4.2 martin }
2549 1.12.4.2 martin
2550 1.12.4.2 martin return (-1);
2551 1.12.4.2 martin }
2552 1.12.4.2 martin
2553 1.12.4.2 martin static uint8_t
2554 1.12.4.2 martin mcx_cmdq_poll(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
2555 1.12.4.2 martin unsigned int msec)
2556 1.12.4.2 martin {
2557 1.12.4.2 martin unsigned int i;
2558 1.12.4.2 martin
2559 1.12.4.2 martin for (i = 0; i < msec; i++) {
2560 1.12.4.2 martin bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
2561 1.12.4.2 martin 0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_POSTRW);
2562 1.12.4.2 martin
2563 1.12.4.2 martin if ((cqe->cq_status & MCX_CQ_STATUS_OWN_MASK) ==
2564 1.12.4.2 martin MCX_CQ_STATUS_OWN_SW) {
2565 1.12.4.2 martin if (sc->sc_eqn != 0)
2566 1.12.4.2 martin mcx_intr(sc);
2567 1.12.4.2 martin return (0);
2568 1.12.4.2 martin }
2569 1.12.4.2 martin
2570 1.12.4.2 martin delay(1000);
2571 1.12.4.2 martin }
2572 1.12.4.2 martin
2573 1.12.4.2 martin return (ETIMEDOUT);
2574 1.12.4.2 martin }
2575 1.12.4.2 martin
2576 1.12.4.2 martin static uint32_t
2577 1.12.4.2 martin mcx_mix_u64(uint32_t xor, uint64_t u64)
2578 1.12.4.2 martin {
2579 1.12.4.2 martin xor ^= u64 >> 32;
2580 1.12.4.2 martin xor ^= u64;
2581 1.12.4.2 martin
2582 1.12.4.2 martin return (xor);
2583 1.12.4.2 martin }
2584 1.12.4.2 martin
2585 1.12.4.2 martin static uint32_t
2586 1.12.4.2 martin mcx_mix_u32(uint32_t xor, uint32_t u32)
2587 1.12.4.2 martin {
2588 1.12.4.2 martin xor ^= u32;
2589 1.12.4.2 martin
2590 1.12.4.2 martin return (xor);
2591 1.12.4.2 martin }
2592 1.12.4.2 martin
2593 1.12.4.2 martin static uint32_t
2594 1.12.4.2 martin mcx_mix_u8(uint32_t xor, uint8_t u8)
2595 1.12.4.2 martin {
2596 1.12.4.2 martin xor ^= u8;
2597 1.12.4.2 martin
2598 1.12.4.2 martin return (xor);
2599 1.12.4.2 martin }
2600 1.12.4.2 martin
2601 1.12.4.2 martin static uint8_t
2602 1.12.4.2 martin mcx_mix_done(uint32_t xor)
2603 1.12.4.2 martin {
2604 1.12.4.2 martin xor ^= xor >> 16;
2605 1.12.4.2 martin xor ^= xor >> 8;
2606 1.12.4.2 martin
2607 1.12.4.2 martin return (xor);
2608 1.12.4.2 martin }
2609 1.12.4.2 martin
2610 1.12.4.2 martin static uint8_t
2611 1.12.4.2 martin mcx_xor(const void *buf, size_t len)
2612 1.12.4.2 martin {
2613 1.12.4.2 martin const uint32_t *dwords = buf;
2614 1.12.4.2 martin uint32_t xor = 0xff;
2615 1.12.4.2 martin size_t i;
2616 1.12.4.2 martin
2617 1.12.4.2 martin len /= sizeof(*dwords);
2618 1.12.4.2 martin
2619 1.12.4.2 martin for (i = 0; i < len; i++)
2620 1.12.4.2 martin xor ^= dwords[i];
2621 1.12.4.2 martin
2622 1.12.4.2 martin return (mcx_mix_done(xor));
2623 1.12.4.2 martin }
2624 1.12.4.2 martin
2625 1.12.4.2 martin static uint8_t
2626 1.12.4.2 martin mcx_cmdq_token(struct mcx_softc *sc)
2627 1.12.4.2 martin {
2628 1.12.4.2 martin uint8_t token;
2629 1.12.4.2 martin
2630 1.12.4.2 martin do {
2631 1.12.4.2 martin token = ++sc->sc_cmdq_token;
2632 1.12.4.2 martin } while (token == 0);
2633 1.12.4.2 martin
2634 1.12.4.2 martin return (token);
2635 1.12.4.2 martin }
2636 1.12.4.2 martin
2637 1.12.4.2 martin static void
2638 1.12.4.2 martin mcx_cmdq_init(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
2639 1.12.4.2 martin uint32_t ilen, uint32_t olen, uint8_t token)
2640 1.12.4.2 martin {
2641 1.12.4.2 martin memset(cqe, 0, sc->sc_cmdq_size);
2642 1.12.4.2 martin
2643 1.12.4.2 martin cqe->cq_type = MCX_CMDQ_TYPE_PCIE;
2644 1.12.4.2 martin be32enc(&cqe->cq_input_length, ilen);
2645 1.12.4.2 martin be32enc(&cqe->cq_output_length, olen);
2646 1.12.4.2 martin cqe->cq_token = token;
2647 1.12.4.2 martin cqe->cq_status = MCX_CQ_STATUS_OWN_HW;
2648 1.12.4.2 martin }
2649 1.12.4.2 martin
2650 1.12.4.2 martin static void
2651 1.12.4.2 martin mcx_cmdq_sign(struct mcx_cmdq_entry *cqe)
2652 1.12.4.2 martin {
2653 1.12.4.2 martin cqe->cq_signature = ~mcx_xor(cqe, sizeof(*cqe));
2654 1.12.4.2 martin }
2655 1.12.4.2 martin
2656 1.12.4.2 martin static int
2657 1.12.4.2 martin mcx_cmdq_verify(const struct mcx_cmdq_entry *cqe)
2658 1.12.4.2 martin {
2659 1.12.4.2 martin /* return (mcx_xor(cqe, sizeof(*cqe)) ? -1 : 0); */
2660 1.12.4.2 martin return (0);
2661 1.12.4.2 martin }
2662 1.12.4.2 martin
2663 1.12.4.2 martin static void *
2664 1.12.4.2 martin mcx_cmdq_in(struct mcx_cmdq_entry *cqe)
2665 1.12.4.2 martin {
2666 1.12.4.2 martin return (&cqe->cq_input_data);
2667 1.12.4.2 martin }
2668 1.12.4.2 martin
2669 1.12.4.2 martin static void *
2670 1.12.4.2 martin mcx_cmdq_out(struct mcx_cmdq_entry *cqe)
2671 1.12.4.2 martin {
2672 1.12.4.2 martin return (&cqe->cq_output_data);
2673 1.12.4.2 martin }
2674 1.12.4.2 martin
2675 1.12.4.2 martin static void
2676 1.12.4.2 martin mcx_cmdq_post(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
2677 1.12.4.2 martin unsigned int slot)
2678 1.12.4.2 martin {
2679 1.12.4.2 martin mcx_cmdq_sign(cqe);
2680 1.12.4.2 martin
2681 1.12.4.2 martin bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
2682 1.12.4.2 martin 0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_PRERW);
2683 1.12.4.2 martin
2684 1.12.4.2 martin mcx_wr(sc, MCX_CMDQ_DOORBELL, 1U << slot);
2685 1.12.4.2 martin }
2686 1.12.4.2 martin
2687 1.12.4.2 martin static int
2688 1.12.4.2 martin mcx_enable_hca(struct mcx_softc *sc)
2689 1.12.4.2 martin {
2690 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
2691 1.12.4.2 martin struct mcx_cmd_enable_hca_in *in;
2692 1.12.4.2 martin struct mcx_cmd_enable_hca_out *out;
2693 1.12.4.2 martin int error;
2694 1.12.4.2 martin uint8_t status;
2695 1.12.4.2 martin
2696 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2697 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
2698 1.12.4.2 martin
2699 1.12.4.2 martin in = mcx_cmdq_in(cqe);
2700 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_ENABLE_HCA);
2701 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
2702 1.12.4.2 martin in->cmd_function_id = htobe16(0);
2703 1.12.4.2 martin
2704 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
2705 1.12.4.2 martin
2706 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
2707 1.12.4.2 martin if (error != 0) {
2708 1.12.4.2 martin printf(", hca enable timeout\n");
2709 1.12.4.2 martin return (-1);
2710 1.12.4.2 martin }
2711 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
2712 1.12.4.2 martin printf(", hca enable command corrupt\n");
2713 1.12.4.2 martin return (-1);
2714 1.12.4.2 martin }
2715 1.12.4.2 martin
2716 1.12.4.2 martin status = cqe->cq_output_data[0];
2717 1.12.4.2 martin if (status != MCX_CQ_STATUS_OK) {
2718 1.12.4.2 martin printf(", hca enable failed (%x)\n", status);
2719 1.12.4.2 martin return (-1);
2720 1.12.4.2 martin }
2721 1.12.4.2 martin
2722 1.12.4.2 martin return (0);
2723 1.12.4.2 martin }
2724 1.12.4.2 martin
2725 1.12.4.2 martin static int
2726 1.12.4.2 martin mcx_teardown_hca(struct mcx_softc *sc, uint16_t profile)
2727 1.12.4.2 martin {
2728 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
2729 1.12.4.2 martin struct mcx_cmd_teardown_hca_in *in;
2730 1.12.4.2 martin struct mcx_cmd_teardown_hca_out *out;
2731 1.12.4.2 martin int error;
2732 1.12.4.2 martin uint8_t status;
2733 1.12.4.2 martin
2734 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2735 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
2736 1.12.4.2 martin
2737 1.12.4.2 martin in = mcx_cmdq_in(cqe);
2738 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_TEARDOWN_HCA);
2739 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
2740 1.12.4.2 martin in->cmd_profile = profile;
2741 1.12.4.2 martin
2742 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
2743 1.12.4.2 martin
2744 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
2745 1.12.4.2 martin if (error != 0) {
2746 1.12.4.2 martin printf(", hca teardown timeout\n");
2747 1.12.4.2 martin return (-1);
2748 1.12.4.2 martin }
2749 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
2750 1.12.4.2 martin printf(", hca teardown command corrupt\n");
2751 1.12.4.2 martin return (-1);
2752 1.12.4.2 martin }
2753 1.12.4.2 martin
2754 1.12.4.2 martin status = cqe->cq_output_data[0];
2755 1.12.4.2 martin if (status != MCX_CQ_STATUS_OK) {
2756 1.12.4.2 martin printf(", hca teardown failed (%x)\n", status);
2757 1.12.4.2 martin return (-1);
2758 1.12.4.2 martin }
2759 1.12.4.2 martin
2760 1.12.4.2 martin return (0);
2761 1.12.4.2 martin }
2762 1.12.4.2 martin
2763 1.12.4.2 martin static int
2764 1.12.4.2 martin mcx_cmdq_mboxes_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
2765 1.12.4.2 martin unsigned int nmb, uint64_t *ptr, uint8_t token)
2766 1.12.4.2 martin {
2767 1.12.4.2 martin uint8_t *kva;
2768 1.12.4.2 martin uint64_t dva;
2769 1.12.4.2 martin int i;
2770 1.12.4.2 martin int error;
2771 1.12.4.2 martin
2772 1.12.4.2 martin error = mcx_dmamem_alloc(sc, mxm,
2773 1.12.4.2 martin nmb * MCX_CMDQ_MAILBOX_SIZE, MCX_CMDQ_MAILBOX_ALIGN);
2774 1.12.4.2 martin if (error != 0)
2775 1.12.4.2 martin return (error);
2776 1.12.4.2 martin
2777 1.12.4.2 martin mcx_dmamem_zero(mxm);
2778 1.12.4.2 martin
2779 1.12.4.2 martin dva = MCX_DMA_DVA(mxm);
2780 1.12.4.2 martin kva = MCX_DMA_KVA(mxm);
2781 1.12.4.2 martin for (i = 0; i < nmb; i++) {
2782 1.12.4.2 martin struct mcx_cmdq_mailbox *mbox = (struct mcx_cmdq_mailbox *)kva;
2783 1.12.4.2 martin
2784 1.12.4.2 martin /* patch the cqe or mbox pointing at this one */
2785 1.12.4.2 martin be64enc(ptr, dva);
2786 1.12.4.2 martin
2787 1.12.4.2 martin /* fill in this mbox */
2788 1.12.4.2 martin be32enc(&mbox->mb_block_number, i);
2789 1.12.4.2 martin mbox->mb_token = token;
2790 1.12.4.2 martin
2791 1.12.4.2 martin /* move to the next one */
2792 1.12.4.2 martin ptr = &mbox->mb_next_ptr;
2793 1.12.4.2 martin
2794 1.12.4.2 martin dva += MCX_CMDQ_MAILBOX_SIZE;
2795 1.12.4.2 martin kva += MCX_CMDQ_MAILBOX_SIZE;
2796 1.12.4.2 martin }
2797 1.12.4.2 martin
2798 1.12.4.2 martin return (0);
2799 1.12.4.2 martin }
2800 1.12.4.2 martin
2801 1.12.4.2 martin static uint32_t
2802 1.12.4.2 martin mcx_cmdq_mbox_ctrl_sig(const struct mcx_cmdq_mailbox *mb)
2803 1.12.4.2 martin {
2804 1.12.4.2 martin uint32_t xor = 0xff;
2805 1.12.4.2 martin
2806 1.12.4.2 martin /* only 3 fields get set, so mix them directly */
2807 1.12.4.2 martin xor = mcx_mix_u64(xor, mb->mb_next_ptr);
2808 1.12.4.2 martin xor = mcx_mix_u32(xor, mb->mb_block_number);
2809 1.12.4.2 martin xor = mcx_mix_u8(xor, mb->mb_token);
2810 1.12.4.2 martin
2811 1.12.4.2 martin return (mcx_mix_done(xor));
2812 1.12.4.2 martin }
2813 1.12.4.2 martin
2814 1.12.4.2 martin static void
2815 1.12.4.2 martin mcx_cmdq_mboxes_sign(struct mcx_dmamem *mxm, unsigned int nmb)
2816 1.12.4.2 martin {
2817 1.12.4.2 martin uint8_t *kva;
2818 1.12.4.2 martin int i;
2819 1.12.4.2 martin
2820 1.12.4.2 martin kva = MCX_DMA_KVA(mxm);
2821 1.12.4.2 martin
2822 1.12.4.2 martin for (i = 0; i < nmb; i++) {
2823 1.12.4.2 martin struct mcx_cmdq_mailbox *mb = (struct mcx_cmdq_mailbox *)kva;
2824 1.12.4.2 martin uint8_t sig = mcx_cmdq_mbox_ctrl_sig(mb);
2825 1.12.4.2 martin mb->mb_ctrl_signature = sig;
2826 1.12.4.2 martin mb->mb_signature = sig ^
2827 1.12.4.2 martin mcx_xor(mb->mb_data, sizeof(mb->mb_data));
2828 1.12.4.2 martin
2829 1.12.4.2 martin kva += MCX_CMDQ_MAILBOX_SIZE;
2830 1.12.4.2 martin }
2831 1.12.4.2 martin }
2832 1.12.4.2 martin
2833 1.12.4.2 martin static void
2834 1.12.4.2 martin mcx_cmdq_mboxes_sync(struct mcx_softc *sc, struct mcx_dmamem *mxm, int ops)
2835 1.12.4.2 martin {
2836 1.12.4.2 martin bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(mxm),
2837 1.12.4.2 martin 0, MCX_DMA_LEN(mxm), ops);
2838 1.12.4.2 martin }
2839 1.12.4.2 martin
2840 1.12.4.2 martin static struct mcx_cmdq_mailbox *
2841 1.12.4.2 martin mcx_cq_mbox(struct mcx_dmamem *mxm, unsigned int i)
2842 1.12.4.2 martin {
2843 1.12.4.2 martin uint8_t *kva;
2844 1.12.4.2 martin
2845 1.12.4.2 martin kva = MCX_DMA_KVA(mxm);
2846 1.12.4.2 martin kva += i * MCX_CMDQ_MAILBOX_SIZE;
2847 1.12.4.2 martin
2848 1.12.4.2 martin return ((struct mcx_cmdq_mailbox *)kva);
2849 1.12.4.2 martin }
2850 1.12.4.2 martin
2851 1.12.4.2 martin static inline void *
2852 1.12.4.2 martin mcx_cq_mbox_data(struct mcx_cmdq_mailbox *mb)
2853 1.12.4.2 martin {
2854 1.12.4.2 martin return (&mb->mb_data);
2855 1.12.4.2 martin }
2856 1.12.4.2 martin
2857 1.12.4.2 martin static void
2858 1.12.4.2 martin mcx_cmdq_mboxes_copyin(struct mcx_dmamem *mxm, unsigned int nmb,
2859 1.12.4.2 martin void *b, size_t len)
2860 1.12.4.2 martin {
2861 1.12.4.2 martin uint8_t *buf = b;
2862 1.12.4.2 martin struct mcx_cmdq_mailbox *mb;
2863 1.12.4.2 martin int i;
2864 1.12.4.2 martin
2865 1.12.4.2 martin mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
2866 1.12.4.2 martin for (i = 0; i < nmb; i++) {
2867 1.12.4.2 martin
2868 1.12.4.2 martin memcpy(mb->mb_data, buf, uimin(sizeof(mb->mb_data), len));
2869 1.12.4.2 martin
2870 1.12.4.2 martin if (sizeof(mb->mb_data) >= len)
2871 1.12.4.2 martin break;
2872 1.12.4.2 martin
2873 1.12.4.2 martin buf += sizeof(mb->mb_data);
2874 1.12.4.2 martin len -= sizeof(mb->mb_data);
2875 1.12.4.2 martin mb++;
2876 1.12.4.2 martin }
2877 1.12.4.2 martin }
2878 1.12.4.2 martin
2879 1.12.4.2 martin static void
2880 1.12.4.2 martin mcx_cmdq_mboxes_copyout(struct mcx_dmamem *mxm, int nmb, void *b, size_t len)
2881 1.12.4.2 martin {
2882 1.12.4.2 martin uint8_t *buf = b;
2883 1.12.4.2 martin struct mcx_cmdq_mailbox *mb;
2884 1.12.4.2 martin int i;
2885 1.12.4.2 martin
2886 1.12.4.2 martin mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
2887 1.12.4.2 martin for (i = 0; i < nmb; i++) {
2888 1.12.4.2 martin memcpy(buf, mb->mb_data, uimin(sizeof(mb->mb_data), len));
2889 1.12.4.2 martin
2890 1.12.4.2 martin if (sizeof(mb->mb_data) >= len)
2891 1.12.4.2 martin break;
2892 1.12.4.2 martin
2893 1.12.4.2 martin buf += sizeof(mb->mb_data);
2894 1.12.4.2 martin len -= sizeof(mb->mb_data);
2895 1.12.4.2 martin mb++;
2896 1.12.4.2 martin }
2897 1.12.4.2 martin }
2898 1.12.4.2 martin
2899 1.12.4.2 martin static void
2900 1.12.4.2 martin mcx_cq_mboxes_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
2901 1.12.4.2 martin {
2902 1.12.4.2 martin mcx_dmamem_free(sc, mxm);
2903 1.12.4.2 martin }
2904 1.12.4.2 martin
2905 1.12.4.2 martin #if 0
2906 1.12.4.2 martin static void
2907 1.12.4.2 martin mcx_cmdq_dump(const struct mcx_cmdq_entry *cqe)
2908 1.12.4.2 martin {
2909 1.12.4.2 martin unsigned int i;
2910 1.12.4.2 martin
2911 1.12.4.2 martin printf(" type %02x, ilen %u, iptr %016llx", cqe->cq_type,
2912 1.12.4.2 martin be32dec(&cqe->cq_input_length), be64dec(&cqe->cq_input_ptr));
2913 1.12.4.2 martin
2914 1.12.4.2 martin printf(", idata ");
2915 1.12.4.2 martin for (i = 0; i < sizeof(cqe->cq_input_data); i++)
2916 1.12.4.2 martin printf("%02x", cqe->cq_input_data[i]);
2917 1.12.4.2 martin
2918 1.12.4.2 martin printf(", odata ");
2919 1.12.4.2 martin for (i = 0; i < sizeof(cqe->cq_output_data); i++)
2920 1.12.4.2 martin printf("%02x", cqe->cq_output_data[i]);
2921 1.12.4.2 martin
2922 1.12.4.2 martin printf(", optr %016llx, olen %u, token %02x, sig %02x, status %02x",
2923 1.12.4.2 martin be64dec(&cqe->cq_output_ptr), be32dec(&cqe->cq_output_length),
2924 1.12.4.2 martin cqe->cq_token, cqe->cq_signature, cqe->cq_status);
2925 1.12.4.2 martin }
2926 1.12.4.2 martin
2927 1.12.4.2 martin static void
2928 1.12.4.2 martin mcx_cmdq_mbox_dump(struct mcx_dmamem *mboxes, int num)
2929 1.12.4.2 martin {
2930 1.12.4.2 martin int i, j;
2931 1.12.4.2 martin uint8_t *d;
2932 1.12.4.2 martin
2933 1.12.4.2 martin for (i = 0; i < num; i++) {
2934 1.12.4.2 martin struct mcx_cmdq_mailbox *mbox;
2935 1.12.4.2 martin mbox = mcx_cq_mbox(mboxes, i);
2936 1.12.4.2 martin
2937 1.12.4.2 martin d = mcx_cq_mbox_data(mbox);
2938 1.12.4.2 martin for (j = 0; j < MCX_CMDQ_MAILBOX_DATASIZE; j++) {
2939 1.12.4.2 martin if (j != 0 && (j % 16 == 0))
2940 1.12.4.2 martin printf("\n");
2941 1.12.4.2 martin printf("%.2x ", d[j]);
2942 1.12.4.2 martin }
2943 1.12.4.2 martin }
2944 1.12.4.2 martin }
2945 1.12.4.2 martin #endif
2946 1.12.4.2 martin
2947 1.12.4.2 martin static int
2948 1.12.4.2 martin mcx_access_hca_reg(struct mcx_softc *sc, uint16_t reg, int op, void *data,
2949 1.12.4.2 martin int len)
2950 1.12.4.2 martin {
2951 1.12.4.2 martin struct mcx_dmamem mxm;
2952 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
2953 1.12.4.2 martin struct mcx_cmd_access_reg_in *in;
2954 1.12.4.2 martin struct mcx_cmd_access_reg_out *out;
2955 1.12.4.2 martin uint8_t token = mcx_cmdq_token(sc);
2956 1.12.4.2 martin int error, nmb;
2957 1.12.4.2 martin
2958 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
2959 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in) + len, sizeof(*out) + len,
2960 1.12.4.2 martin token);
2961 1.12.4.2 martin
2962 1.12.4.2 martin in = mcx_cmdq_in(cqe);
2963 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_ACCESS_REG);
2964 1.12.4.2 martin in->cmd_op_mod = htobe16(op);
2965 1.12.4.2 martin in->cmd_register_id = htobe16(reg);
2966 1.12.4.2 martin
2967 1.12.4.2 martin nmb = howmany(len, MCX_CMDQ_MAILBOX_DATASIZE);
2968 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb, &cqe->cq_output_ptr, token) != 0) {
2969 1.12.4.2 martin printf(", unable to allocate access reg mailboxen\n");
2970 1.12.4.2 martin return (-1);
2971 1.12.4.2 martin }
2972 1.12.4.2 martin cqe->cq_input_ptr = cqe->cq_output_ptr;
2973 1.12.4.2 martin mcx_cmdq_mboxes_copyin(&mxm, nmb, data, len);
2974 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, nmb);
2975 1.12.4.2 martin mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
2976 1.12.4.2 martin
2977 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
2978 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
2979 1.12.4.2 martin mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
2980 1.12.4.2 martin
2981 1.12.4.2 martin if (error != 0) {
2982 1.12.4.2 martin printf("%s: access reg (%s %x) timeout\n", DEVNAME(sc),
2983 1.12.4.2 martin (op == MCX_REG_OP_WRITE ? "write" : "read"), reg);
2984 1.12.4.2 martin goto free;
2985 1.12.4.2 martin }
2986 1.12.4.2 martin error = mcx_cmdq_verify(cqe);
2987 1.12.4.2 martin if (error != 0) {
2988 1.12.4.2 martin printf("%s: access reg (%s %x) reply corrupt\n",
2989 1.12.4.2 martin (op == MCX_REG_OP_WRITE ? "write" : "read"), DEVNAME(sc),
2990 1.12.4.2 martin reg);
2991 1.12.4.2 martin goto free;
2992 1.12.4.2 martin }
2993 1.12.4.2 martin
2994 1.12.4.2 martin out = mcx_cmdq_out(cqe);
2995 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
2996 1.12.4.2 martin printf("%s: access reg (%s %x) failed (%x, %.6x)\n",
2997 1.12.4.2 martin DEVNAME(sc), (op == MCX_REG_OP_WRITE ? "write" : "read"),
2998 1.12.4.2 martin reg, out->cmd_status, out->cmd_syndrome);
2999 1.12.4.2 martin error = -1;
3000 1.12.4.2 martin goto free;
3001 1.12.4.2 martin }
3002 1.12.4.2 martin
3003 1.12.4.2 martin mcx_cmdq_mboxes_copyout(&mxm, nmb, data, len);
3004 1.12.4.2 martin free:
3005 1.12.4.2 martin mcx_dmamem_free(sc, &mxm);
3006 1.12.4.2 martin
3007 1.12.4.2 martin return (error);
3008 1.12.4.2 martin }
3009 1.12.4.2 martin
3010 1.12.4.2 martin static int
3011 1.12.4.2 martin mcx_set_issi(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe, unsigned int slot)
3012 1.12.4.2 martin {
3013 1.12.4.2 martin struct mcx_cmd_set_issi_in *in;
3014 1.12.4.2 martin struct mcx_cmd_set_issi_out *out;
3015 1.12.4.2 martin uint8_t status;
3016 1.12.4.2 martin
3017 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3018 1.12.4.2 martin
3019 1.12.4.2 martin in = mcx_cmdq_in(cqe);
3020 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_SET_ISSI);
3021 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
3022 1.12.4.2 martin in->cmd_current_issi = htobe16(MCX_ISSI);
3023 1.12.4.2 martin
3024 1.12.4.2 martin mcx_cmdq_post(sc, cqe, slot);
3025 1.12.4.2 martin if (mcx_cmdq_poll(sc, cqe, 1000) != 0)
3026 1.12.4.2 martin return (-1);
3027 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0)
3028 1.12.4.2 martin return (-1);
3029 1.12.4.2 martin
3030 1.12.4.2 martin status = cqe->cq_output_data[0];
3031 1.12.4.2 martin if (status != MCX_CQ_STATUS_OK)
3032 1.12.4.2 martin return (-1);
3033 1.12.4.2 martin
3034 1.12.4.2 martin return (0);
3035 1.12.4.2 martin }
3036 1.12.4.2 martin
3037 1.12.4.2 martin static int
3038 1.12.4.2 martin mcx_issi(struct mcx_softc *sc)
3039 1.12.4.2 martin {
3040 1.12.4.2 martin struct mcx_dmamem mxm;
3041 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
3042 1.12.4.2 martin struct mcx_cmd_query_issi_in *in;
3043 1.12.4.2 martin struct mcx_cmd_query_issi_il_out *out;
3044 1.12.4.2 martin struct mcx_cmd_query_issi_mb_out *mb;
3045 1.12.4.2 martin uint8_t token = mcx_cmdq_token(sc);
3046 1.12.4.2 martin uint8_t status;
3047 1.12.4.2 martin int error;
3048 1.12.4.2 martin
3049 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3050 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mb), token);
3051 1.12.4.2 martin
3052 1.12.4.2 martin in = mcx_cmdq_in(cqe);
3053 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_QUERY_ISSI);
3054 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
3055 1.12.4.2 martin
3056 1.12.4.2 martin CTASSERT(sizeof(*mb) <= MCX_CMDQ_MAILBOX_DATASIZE);
3057 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
3058 1.12.4.2 martin &cqe->cq_output_ptr, token) != 0) {
3059 1.12.4.2 martin printf(", unable to allocate query issi mailbox\n");
3060 1.12.4.2 martin return (-1);
3061 1.12.4.2 martin }
3062 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, 1);
3063 1.12.4.2 martin
3064 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
3065 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
3066 1.12.4.2 martin if (error != 0) {
3067 1.12.4.2 martin printf(", query issi timeout\n");
3068 1.12.4.2 martin goto free;
3069 1.12.4.2 martin }
3070 1.12.4.2 martin error = mcx_cmdq_verify(cqe);
3071 1.12.4.2 martin if (error != 0) {
3072 1.12.4.2 martin printf(", query issi reply corrupt\n");
3073 1.12.4.2 martin goto free;
3074 1.12.4.2 martin }
3075 1.12.4.2 martin
3076 1.12.4.2 martin status = cqe->cq_output_data[0];
3077 1.12.4.2 martin switch (status) {
3078 1.12.4.2 martin case MCX_CQ_STATUS_OK:
3079 1.12.4.2 martin break;
3080 1.12.4.2 martin case MCX_CQ_STATUS_BAD_OPCODE:
3081 1.12.4.2 martin /* use ISSI 0 */
3082 1.12.4.2 martin goto free;
3083 1.12.4.2 martin default:
3084 1.12.4.2 martin printf(", query issi failed (%x)\n", status);
3085 1.12.4.2 martin error = -1;
3086 1.12.4.2 martin goto free;
3087 1.12.4.2 martin }
3088 1.12.4.2 martin
3089 1.12.4.2 martin out = mcx_cmdq_out(cqe);
3090 1.12.4.2 martin if (out->cmd_current_issi == htobe16(MCX_ISSI)) {
3091 1.12.4.2 martin /* use ISSI 1 */
3092 1.12.4.2 martin goto free;
3093 1.12.4.2 martin }
3094 1.12.4.2 martin
3095 1.12.4.2 martin /* don't need to read cqe anymore, can be used for SET ISSI */
3096 1.12.4.2 martin
3097 1.12.4.2 martin mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3098 1.12.4.2 martin CTASSERT(MCX_ISSI < NBBY);
3099 1.12.4.2 martin /* XXX math is hard */
3100 1.12.4.2 martin if (!ISSET(mb->cmd_supported_issi[79], 1 << MCX_ISSI)) {
3101 1.12.4.2 martin /* use ISSI 0 */
3102 1.12.4.2 martin goto free;
3103 1.12.4.2 martin }
3104 1.12.4.2 martin
3105 1.12.4.2 martin if (mcx_set_issi(sc, cqe, 0) != 0) {
3106 1.12.4.2 martin /* ignore the error, just use ISSI 0 */
3107 1.12.4.2 martin } else {
3108 1.12.4.2 martin /* use ISSI 1 */
3109 1.12.4.2 martin }
3110 1.12.4.2 martin
3111 1.12.4.2 martin free:
3112 1.12.4.2 martin mcx_cq_mboxes_free(sc, &mxm);
3113 1.12.4.2 martin return (error);
3114 1.12.4.2 martin }
3115 1.12.4.2 martin
3116 1.12.4.2 martin static int
3117 1.12.4.2 martin mcx_query_pages(struct mcx_softc *sc, uint16_t type,
3118 1.12.4.2 martin uint32_t *npages, uint16_t *func_id)
3119 1.12.4.2 martin {
3120 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
3121 1.12.4.2 martin struct mcx_cmd_query_pages_in *in;
3122 1.12.4.2 martin struct mcx_cmd_query_pages_out *out;
3123 1.12.4.2 martin
3124 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3125 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3126 1.12.4.2 martin
3127 1.12.4.2 martin in = mcx_cmdq_in(cqe);
3128 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_QUERY_PAGES);
3129 1.12.4.2 martin in->cmd_op_mod = type;
3130 1.12.4.2 martin
3131 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
3132 1.12.4.2 martin if (mcx_cmdq_poll(sc, cqe, 1000) != 0) {
3133 1.12.4.2 martin printf(", query pages timeout\n");
3134 1.12.4.2 martin return (-1);
3135 1.12.4.2 martin }
3136 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
3137 1.12.4.2 martin printf(", query pages reply corrupt\n");
3138 1.12.4.2 martin return (-1);
3139 1.12.4.2 martin }
3140 1.12.4.2 martin
3141 1.12.4.2 martin out = mcx_cmdq_out(cqe);
3142 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
3143 1.12.4.2 martin printf(", query pages failed (%x)\n", out->cmd_status);
3144 1.12.4.2 martin return (-1);
3145 1.12.4.2 martin }
3146 1.12.4.2 martin
3147 1.12.4.2 martin *func_id = out->cmd_func_id;
3148 1.12.4.2 martin *npages = be32dec(&out->cmd_num_pages);
3149 1.12.4.2 martin
3150 1.12.4.2 martin return (0);
3151 1.12.4.2 martin }
3152 1.12.4.2 martin
3153 1.12.4.2 martin struct bus_dma_iter {
3154 1.12.4.2 martin bus_dmamap_t i_map;
3155 1.12.4.2 martin bus_size_t i_offset;
3156 1.12.4.2 martin unsigned int i_index;
3157 1.12.4.2 martin };
3158 1.12.4.2 martin
3159 1.12.4.2 martin static void
3160 1.12.4.2 martin bus_dma_iter_init(struct bus_dma_iter *i, bus_dmamap_t map)
3161 1.12.4.2 martin {
3162 1.12.4.2 martin i->i_map = map;
3163 1.12.4.2 martin i->i_offset = 0;
3164 1.12.4.2 martin i->i_index = 0;
3165 1.12.4.2 martin }
3166 1.12.4.2 martin
3167 1.12.4.2 martin static bus_addr_t
3168 1.12.4.2 martin bus_dma_iter_addr(struct bus_dma_iter *i)
3169 1.12.4.2 martin {
3170 1.12.4.2 martin return (i->i_map->dm_segs[i->i_index].ds_addr + i->i_offset);
3171 1.12.4.2 martin }
3172 1.12.4.2 martin
3173 1.12.4.2 martin static void
3174 1.12.4.2 martin bus_dma_iter_add(struct bus_dma_iter *i, bus_size_t size)
3175 1.12.4.2 martin {
3176 1.12.4.2 martin bus_dma_segment_t *seg = i->i_map->dm_segs + i->i_index;
3177 1.12.4.2 martin bus_size_t diff;
3178 1.12.4.2 martin
3179 1.12.4.2 martin do {
3180 1.12.4.2 martin diff = seg->ds_len - i->i_offset;
3181 1.12.4.2 martin if (size < diff)
3182 1.12.4.2 martin break;
3183 1.12.4.2 martin
3184 1.12.4.2 martin size -= diff;
3185 1.12.4.2 martin
3186 1.12.4.2 martin seg++;
3187 1.12.4.2 martin
3188 1.12.4.2 martin i->i_offset = 0;
3189 1.12.4.2 martin i->i_index++;
3190 1.12.4.2 martin } while (size > 0);
3191 1.12.4.2 martin
3192 1.12.4.2 martin i->i_offset += size;
3193 1.12.4.2 martin }
3194 1.12.4.2 martin
3195 1.12.4.2 martin static int
3196 1.12.4.2 martin mcx_add_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t func_id)
3197 1.12.4.2 martin {
3198 1.12.4.2 martin struct mcx_dmamem mxm;
3199 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
3200 1.12.4.2 martin struct mcx_cmd_manage_pages_in *in;
3201 1.12.4.2 martin struct mcx_cmd_manage_pages_out *out;
3202 1.12.4.2 martin unsigned int paslen, nmb, i, j, npages;
3203 1.12.4.2 martin struct bus_dma_iter iter;
3204 1.12.4.2 martin uint64_t *pas;
3205 1.12.4.2 martin uint8_t status;
3206 1.12.4.2 martin uint8_t token = mcx_cmdq_token(sc);
3207 1.12.4.2 martin int error;
3208 1.12.4.2 martin
3209 1.12.4.2 martin npages = mhm->mhm_npages;
3210 1.12.4.2 martin
3211 1.12.4.2 martin paslen = sizeof(*pas) * npages;
3212 1.12.4.2 martin nmb = howmany(paslen, MCX_CMDQ_MAILBOX_DATASIZE);
3213 1.12.4.2 martin
3214 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3215 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in) + paslen, sizeof(*out), token);
3216 1.12.4.2 martin
3217 1.12.4.2 martin in = mcx_cmdq_in(cqe);
3218 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_MANAGE_PAGES);
3219 1.12.4.2 martin in->cmd_op_mod = htobe16(MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS);
3220 1.12.4.2 martin in->cmd_func_id = func_id;
3221 1.12.4.2 martin be32enc(&in->cmd_input_num_entries, npages);
3222 1.12.4.2 martin
3223 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb,
3224 1.12.4.2 martin &cqe->cq_input_ptr, token) != 0) {
3225 1.12.4.2 martin printf(", unable to allocate manage pages mailboxen\n");
3226 1.12.4.2 martin return (-1);
3227 1.12.4.2 martin }
3228 1.12.4.2 martin
3229 1.12.4.2 martin bus_dma_iter_init(&iter, mhm->mhm_map);
3230 1.12.4.2 martin for (i = 0; i < nmb; i++) {
3231 1.12.4.2 martin unsigned int lim;
3232 1.12.4.2 martin
3233 1.12.4.2 martin pas = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, i));
3234 1.12.4.2 martin lim = uimin(MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas), npages);
3235 1.12.4.2 martin
3236 1.12.4.2 martin for (j = 0; j < lim; j++) {
3237 1.12.4.2 martin be64enc(&pas[j], bus_dma_iter_addr(&iter));
3238 1.12.4.2 martin bus_dma_iter_add(&iter, MCX_PAGE_SIZE);
3239 1.12.4.2 martin }
3240 1.12.4.2 martin
3241 1.12.4.2 martin npages -= lim;
3242 1.12.4.2 martin }
3243 1.12.4.2 martin
3244 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, nmb);
3245 1.12.4.2 martin
3246 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
3247 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
3248 1.12.4.2 martin if (error != 0) {
3249 1.12.4.2 martin printf(", manage pages timeout\n");
3250 1.12.4.2 martin goto free;
3251 1.12.4.2 martin }
3252 1.12.4.2 martin error = mcx_cmdq_verify(cqe);
3253 1.12.4.2 martin if (error != 0) {
3254 1.12.4.2 martin printf(", manage pages reply corrupt\n");
3255 1.12.4.2 martin goto free;
3256 1.12.4.2 martin }
3257 1.12.4.2 martin
3258 1.12.4.2 martin status = cqe->cq_output_data[0];
3259 1.12.4.2 martin if (status != MCX_CQ_STATUS_OK) {
3260 1.12.4.2 martin printf(", manage pages failed (%x)\n", status);
3261 1.12.4.2 martin error = -1;
3262 1.12.4.2 martin goto free;
3263 1.12.4.2 martin }
3264 1.12.4.2 martin
3265 1.12.4.2 martin free:
3266 1.12.4.2 martin mcx_dmamem_free(sc, &mxm);
3267 1.12.4.2 martin
3268 1.12.4.2 martin return (error);
3269 1.12.4.2 martin }
3270 1.12.4.2 martin
3271 1.12.4.2 martin static int
3272 1.12.4.2 martin mcx_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t type)
3273 1.12.4.2 martin {
3274 1.12.4.2 martin uint32_t npages;
3275 1.12.4.2 martin uint16_t func_id;
3276 1.12.4.2 martin
3277 1.12.4.2 martin if (mcx_query_pages(sc, type, &npages, &func_id) != 0) {
3278 1.12.4.2 martin /* error printed by mcx_query_pages */
3279 1.12.4.2 martin return (-1);
3280 1.12.4.2 martin }
3281 1.12.4.2 martin
3282 1.12.4.2 martin if (npages == 0)
3283 1.12.4.2 martin return (0);
3284 1.12.4.2 martin
3285 1.12.4.2 martin if (mcx_hwmem_alloc(sc, mhm, npages) != 0) {
3286 1.12.4.2 martin printf(", unable to allocate hwmem\n");
3287 1.12.4.2 martin return (-1);
3288 1.12.4.2 martin }
3289 1.12.4.2 martin
3290 1.12.4.2 martin if (mcx_add_pages(sc, mhm, func_id) != 0) {
3291 1.12.4.2 martin printf(", unable to add hwmem\n");
3292 1.12.4.2 martin goto free;
3293 1.12.4.2 martin }
3294 1.12.4.2 martin
3295 1.12.4.2 martin return (0);
3296 1.12.4.2 martin
3297 1.12.4.2 martin free:
3298 1.12.4.2 martin mcx_hwmem_free(sc, mhm);
3299 1.12.4.2 martin
3300 1.12.4.2 martin return (-1);
3301 1.12.4.2 martin }
3302 1.12.4.2 martin
3303 1.12.4.2 martin static int
3304 1.12.4.2 martin mcx_hca_max_caps(struct mcx_softc *sc)
3305 1.12.4.2 martin {
3306 1.12.4.2 martin struct mcx_dmamem mxm;
3307 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
3308 1.12.4.2 martin struct mcx_cmd_query_hca_cap_in *in;
3309 1.12.4.2 martin struct mcx_cmd_query_hca_cap_out *out;
3310 1.12.4.2 martin struct mcx_cmdq_mailbox *mb;
3311 1.12.4.2 martin struct mcx_cap_device *hca;
3312 1.12.4.2 martin uint8_t status;
3313 1.12.4.2 martin uint8_t token = mcx_cmdq_token(sc);
3314 1.12.4.2 martin int error;
3315 1.12.4.2 martin
3316 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3317 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
3318 1.12.4.2 martin token);
3319 1.12.4.2 martin
3320 1.12.4.2 martin in = mcx_cmdq_in(cqe);
3321 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
3322 1.12.4.2 martin in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_MAX |
3323 1.12.4.2 martin MCX_CMD_QUERY_HCA_CAP_DEVICE);
3324 1.12.4.2 martin
3325 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
3326 1.12.4.2 martin &cqe->cq_output_ptr, token) != 0) {
3327 1.12.4.2 martin printf(", unable to allocate query hca caps mailboxen\n");
3328 1.12.4.2 martin return (-1);
3329 1.12.4.2 martin }
3330 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
3331 1.12.4.2 martin mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3332 1.12.4.2 martin
3333 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
3334 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
3335 1.12.4.2 martin mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3336 1.12.4.2 martin
3337 1.12.4.2 martin if (error != 0) {
3338 1.12.4.2 martin printf(", query hca caps timeout\n");
3339 1.12.4.2 martin goto free;
3340 1.12.4.2 martin }
3341 1.12.4.2 martin error = mcx_cmdq_verify(cqe);
3342 1.12.4.2 martin if (error != 0) {
3343 1.12.4.2 martin printf(", query hca caps reply corrupt\n");
3344 1.12.4.2 martin goto free;
3345 1.12.4.2 martin }
3346 1.12.4.2 martin
3347 1.12.4.2 martin status = cqe->cq_output_data[0];
3348 1.12.4.2 martin if (status != MCX_CQ_STATUS_OK) {
3349 1.12.4.2 martin printf(", query hca caps failed (%x)\n", status);
3350 1.12.4.2 martin error = -1;
3351 1.12.4.2 martin goto free;
3352 1.12.4.2 martin }
3353 1.12.4.2 martin
3354 1.12.4.2 martin mb = mcx_cq_mbox(&mxm, 0);
3355 1.12.4.2 martin hca = mcx_cq_mbox_data(mb);
3356 1.12.4.2 martin
3357 1.12.4.2 martin if (hca->log_pg_sz > PAGE_SHIFT) {
3358 1.12.4.2 martin printf(", minimum system page shift %u is too large\n",
3359 1.12.4.2 martin hca->log_pg_sz);
3360 1.12.4.2 martin error = -1;
3361 1.12.4.2 martin goto free;
3362 1.12.4.2 martin }
3363 1.12.4.2 martin /*
3364 1.12.4.2 martin * blueflame register is split into two buffers, and we must alternate
3365 1.12.4.2 martin * between the two of them.
3366 1.12.4.2 martin */
3367 1.12.4.2 martin sc->sc_bf_size = (1 << hca->log_bf_reg_size) / 2;
3368 1.12.4.2 martin
3369 1.12.4.2 martin free:
3370 1.12.4.2 martin mcx_dmamem_free(sc, &mxm);
3371 1.12.4.2 martin
3372 1.12.4.2 martin return (error);
3373 1.12.4.2 martin }
3374 1.12.4.2 martin
3375 1.12.4.2 martin static int
3376 1.12.4.2 martin mcx_hca_set_caps(struct mcx_softc *sc)
3377 1.12.4.2 martin {
3378 1.12.4.2 martin struct mcx_dmamem mxm;
3379 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
3380 1.12.4.2 martin struct mcx_cmd_query_hca_cap_in *in;
3381 1.12.4.2 martin struct mcx_cmd_query_hca_cap_out *out;
3382 1.12.4.2 martin struct mcx_cmdq_mailbox *mb;
3383 1.12.4.2 martin struct mcx_cap_device *hca;
3384 1.12.4.2 martin uint8_t status;
3385 1.12.4.2 martin uint8_t token = mcx_cmdq_token(sc);
3386 1.12.4.2 martin int error;
3387 1.12.4.2 martin
3388 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3389 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
3390 1.12.4.2 martin token);
3391 1.12.4.2 martin
3392 1.12.4.2 martin in = mcx_cmdq_in(cqe);
3393 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
3394 1.12.4.2 martin in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_CURRENT |
3395 1.12.4.2 martin MCX_CMD_QUERY_HCA_CAP_DEVICE);
3396 1.12.4.2 martin
3397 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
3398 1.12.4.2 martin &cqe->cq_output_ptr, token) != 0) {
3399 1.12.4.2 martin printf(", unable to allocate manage pages mailboxen\n");
3400 1.12.4.2 martin return (-1);
3401 1.12.4.2 martin }
3402 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
3403 1.12.4.2 martin mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3404 1.12.4.2 martin
3405 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
3406 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
3407 1.12.4.2 martin mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3408 1.12.4.2 martin
3409 1.12.4.2 martin if (error != 0) {
3410 1.12.4.2 martin printf(", query hca caps timeout\n");
3411 1.12.4.2 martin goto free;
3412 1.12.4.2 martin }
3413 1.12.4.2 martin error = mcx_cmdq_verify(cqe);
3414 1.12.4.2 martin if (error != 0) {
3415 1.12.4.2 martin printf(", query hca caps reply corrupt\n");
3416 1.12.4.2 martin goto free;
3417 1.12.4.2 martin }
3418 1.12.4.2 martin
3419 1.12.4.2 martin status = cqe->cq_output_data[0];
3420 1.12.4.2 martin if (status != MCX_CQ_STATUS_OK) {
3421 1.12.4.2 martin printf(", query hca caps failed (%x)\n", status);
3422 1.12.4.2 martin error = -1;
3423 1.12.4.2 martin goto free;
3424 1.12.4.2 martin }
3425 1.12.4.2 martin
3426 1.12.4.2 martin mb = mcx_cq_mbox(&mxm, 0);
3427 1.12.4.2 martin hca = mcx_cq_mbox_data(mb);
3428 1.12.4.2 martin
3429 1.12.4.2 martin hca->log_pg_sz = PAGE_SHIFT;
3430 1.12.4.2 martin
3431 1.12.4.2 martin free:
3432 1.12.4.2 martin mcx_dmamem_free(sc, &mxm);
3433 1.12.4.2 martin
3434 1.12.4.2 martin return (error);
3435 1.12.4.2 martin }
3436 1.12.4.2 martin
3437 1.12.4.2 martin
3438 1.12.4.2 martin static int
3439 1.12.4.2 martin mcx_init_hca(struct mcx_softc *sc)
3440 1.12.4.2 martin {
3441 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
3442 1.12.4.2 martin struct mcx_cmd_init_hca_in *in;
3443 1.12.4.2 martin struct mcx_cmd_init_hca_out *out;
3444 1.12.4.2 martin int error;
3445 1.12.4.2 martin uint8_t status;
3446 1.12.4.2 martin
3447 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3448 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3449 1.12.4.2 martin
3450 1.12.4.2 martin in = mcx_cmdq_in(cqe);
3451 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_INIT_HCA);
3452 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
3453 1.12.4.2 martin
3454 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
3455 1.12.4.2 martin
3456 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
3457 1.12.4.2 martin if (error != 0) {
3458 1.12.4.2 martin printf(", hca init timeout\n");
3459 1.12.4.2 martin return (-1);
3460 1.12.4.2 martin }
3461 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
3462 1.12.4.2 martin printf(", hca init command corrupt\n");
3463 1.12.4.2 martin return (-1);
3464 1.12.4.2 martin }
3465 1.12.4.2 martin
3466 1.12.4.2 martin status = cqe->cq_output_data[0];
3467 1.12.4.2 martin if (status != MCX_CQ_STATUS_OK) {
3468 1.12.4.2 martin printf(", hca init failed (%x)\n", status);
3469 1.12.4.2 martin return (-1);
3470 1.12.4.2 martin }
3471 1.12.4.2 martin
3472 1.12.4.2 martin return (0);
3473 1.12.4.2 martin }
3474 1.12.4.2 martin
3475 1.12.4.2 martin static int
3476 1.12.4.2 martin mcx_set_driver_version(struct mcx_softc *sc)
3477 1.12.4.2 martin {
3478 1.12.4.2 martin struct mcx_dmamem mxm;
3479 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
3480 1.12.4.2 martin struct mcx_cmd_set_driver_version_in *in;
3481 1.12.4.2 martin struct mcx_cmd_set_driver_version_out *out;
3482 1.12.4.2 martin int error;
3483 1.12.4.2 martin int token;
3484 1.12.4.2 martin uint8_t status;
3485 1.12.4.2 martin
3486 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3487 1.12.4.2 martin token = mcx_cmdq_token(sc);
3488 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in) +
3489 1.12.4.2 martin sizeof(struct mcx_cmd_set_driver_version), sizeof(*out), token);
3490 1.12.4.2 martin
3491 1.12.4.2 martin in = mcx_cmdq_in(cqe);
3492 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_SET_DRIVER_VERSION);
3493 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
3494 1.12.4.2 martin
3495 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
3496 1.12.4.2 martin &cqe->cq_input_ptr, token) != 0) {
3497 1.12.4.2 martin printf(", unable to allocate set driver version mailboxen\n");
3498 1.12.4.2 martin return (-1);
3499 1.12.4.2 martin }
3500 1.12.4.2 martin strlcpy(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)),
3501 1.12.4.2 martin "OpenBSD,mcx,1.000.000000", MCX_CMDQ_MAILBOX_DATASIZE);
3502 1.12.4.2 martin
3503 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, 1);
3504 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
3505 1.12.4.2 martin
3506 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
3507 1.12.4.2 martin if (error != 0) {
3508 1.12.4.2 martin printf(", set driver version timeout\n");
3509 1.12.4.2 martin goto free;
3510 1.12.4.2 martin }
3511 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
3512 1.12.4.2 martin printf(", set driver version command corrupt\n");
3513 1.12.4.2 martin goto free;
3514 1.12.4.2 martin }
3515 1.12.4.2 martin
3516 1.12.4.2 martin status = cqe->cq_output_data[0];
3517 1.12.4.2 martin if (status != MCX_CQ_STATUS_OK) {
3518 1.12.4.2 martin printf(", set driver version failed (%x)\n", status);
3519 1.12.4.2 martin error = -1;
3520 1.12.4.2 martin goto free;
3521 1.12.4.2 martin }
3522 1.12.4.2 martin
3523 1.12.4.2 martin free:
3524 1.12.4.2 martin mcx_dmamem_free(sc, &mxm);
3525 1.12.4.2 martin
3526 1.12.4.2 martin return (error);
3527 1.12.4.2 martin }
3528 1.12.4.2 martin
3529 1.12.4.2 martin static int
3530 1.12.4.2 martin mcx_iff(struct mcx_softc *sc)
3531 1.12.4.2 martin {
3532 1.12.4.2 martin struct ifnet *ifp = &sc->sc_ec.ec_if;
3533 1.12.4.2 martin struct mcx_dmamem mxm;
3534 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
3535 1.12.4.2 martin struct mcx_cmd_modify_nic_vport_context_in *in;
3536 1.12.4.2 martin struct mcx_cmd_modify_nic_vport_context_out *out;
3537 1.12.4.2 martin struct mcx_nic_vport_ctx *ctx;
3538 1.12.4.2 martin int error;
3539 1.12.4.2 martin int token;
3540 1.12.4.2 martin int insize;
3541 1.12.4.2 martin
3542 1.12.4.2 martin /* enable or disable the promisc flow */
3543 1.12.4.2 martin if (ISSET(ifp->if_flags, IFF_PROMISC)) {
3544 1.12.4.2 martin if (sc->sc_promisc_flow_enabled == 0) {
3545 1.12.4.2 martin mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC,
3546 1.12.4.2 martin 0, NULL);
3547 1.12.4.2 martin sc->sc_promisc_flow_enabled = 1;
3548 1.12.4.2 martin }
3549 1.12.4.2 martin } else if (sc->sc_promisc_flow_enabled != 0) {
3550 1.12.4.2 martin mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
3551 1.12.4.2 martin sc->sc_promisc_flow_enabled = 0;
3552 1.12.4.2 martin }
3553 1.12.4.2 martin
3554 1.12.4.2 martin /* enable or disable the all-multicast flow */
3555 1.12.4.2 martin if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
3556 1.12.4.2 martin if (sc->sc_allmulti_flow_enabled == 0) {
3557 1.12.4.2 martin uint8_t mcast[ETHER_ADDR_LEN];
3558 1.12.4.2 martin
3559 1.12.4.2 martin memset(mcast, 0, sizeof(mcast));
3560 1.12.4.2 martin mcast[0] = 0x01;
3561 1.12.4.2 martin mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI,
3562 1.12.4.2 martin 0, mcast);
3563 1.12.4.2 martin sc->sc_allmulti_flow_enabled = 1;
3564 1.12.4.2 martin }
3565 1.12.4.2 martin } else if (sc->sc_allmulti_flow_enabled != 0) {
3566 1.12.4.2 martin mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
3567 1.12.4.2 martin sc->sc_allmulti_flow_enabled = 0;
3568 1.12.4.2 martin }
3569 1.12.4.2 martin
3570 1.12.4.2 martin insize = sizeof(struct mcx_nic_vport_ctx) + 240;
3571 1.12.4.2 martin
3572 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3573 1.12.4.2 martin token = mcx_cmdq_token(sc);
3574 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
3575 1.12.4.2 martin
3576 1.12.4.2 martin in = mcx_cmdq_in(cqe);
3577 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_MODIFY_NIC_VPORT_CONTEXT);
3578 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
3579 1.12.4.2 martin in->cmd_field_select = htobe32(
3580 1.12.4.2 martin MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC |
3581 1.12.4.2 martin MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU);
3582 1.12.4.2 martin
3583 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
3584 1.12.4.2 martin printf(", unable to allocate modify nic vport context mailboxen\n");
3585 1.12.4.2 martin return (-1);
3586 1.12.4.2 martin }
3587 1.12.4.2 martin ctx = (struct mcx_nic_vport_ctx *)
3588 1.12.4.2 martin (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 240);
3589 1.12.4.2 martin ctx->vp_mtu = htobe32(sc->sc_hardmtu);
3590 1.12.4.2 martin /*
3591 1.12.4.2 martin * always leave promisc-all enabled on the vport since we can't give it
3592 1.12.4.2 martin * a vlan list, and we're already doing multicast filtering in the flow
3593 1.12.4.2 martin * table.
3594 1.12.4.2 martin */
3595 1.12.4.2 martin ctx->vp_flags = htobe16(MCX_NIC_VPORT_CTX_PROMISC_ALL);
3596 1.12.4.2 martin
3597 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, 1);
3598 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
3599 1.12.4.2 martin
3600 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
3601 1.12.4.2 martin if (error != 0) {
3602 1.12.4.2 martin printf(", modify nic vport context timeout\n");
3603 1.12.4.2 martin goto free;
3604 1.12.4.2 martin }
3605 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
3606 1.12.4.2 martin printf(", modify nic vport context command corrupt\n");
3607 1.12.4.2 martin goto free;
3608 1.12.4.2 martin }
3609 1.12.4.2 martin
3610 1.12.4.2 martin out = mcx_cmdq_out(cqe);
3611 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
3612 1.12.4.2 martin printf(", modify nic vport context failed (%x, %x)\n",
3613 1.12.4.2 martin out->cmd_status, out->cmd_syndrome);
3614 1.12.4.2 martin error = -1;
3615 1.12.4.2 martin goto free;
3616 1.12.4.2 martin }
3617 1.12.4.2 martin
3618 1.12.4.2 martin free:
3619 1.12.4.2 martin mcx_dmamem_free(sc, &mxm);
3620 1.12.4.2 martin
3621 1.12.4.2 martin return (error);
3622 1.12.4.2 martin }
3623 1.12.4.2 martin
3624 1.12.4.2 martin static int
3625 1.12.4.2 martin mcx_alloc_uar(struct mcx_softc *sc)
3626 1.12.4.2 martin {
3627 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
3628 1.12.4.2 martin struct mcx_cmd_alloc_uar_in *in;
3629 1.12.4.2 martin struct mcx_cmd_alloc_uar_out *out;
3630 1.12.4.2 martin int error;
3631 1.12.4.2 martin
3632 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3633 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3634 1.12.4.2 martin
3635 1.12.4.2 martin in = mcx_cmdq_in(cqe);
3636 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_ALLOC_UAR);
3637 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
3638 1.12.4.2 martin
3639 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
3640 1.12.4.2 martin
3641 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
3642 1.12.4.2 martin if (error != 0) {
3643 1.12.4.2 martin printf(", alloc uar timeout\n");
3644 1.12.4.2 martin return (-1);
3645 1.12.4.2 martin }
3646 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
3647 1.12.4.2 martin printf(", alloc uar command corrupt\n");
3648 1.12.4.2 martin return (-1);
3649 1.12.4.2 martin }
3650 1.12.4.2 martin
3651 1.12.4.2 martin out = mcx_cmdq_out(cqe);
3652 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
3653 1.12.4.2 martin printf(", alloc uar failed (%x)\n", out->cmd_status);
3654 1.12.4.2 martin return (-1);
3655 1.12.4.2 martin }
3656 1.12.4.2 martin
3657 1.12.4.2 martin sc->sc_uar = be32toh(out->cmd_uar);
3658 1.12.4.2 martin
3659 1.12.4.2 martin return (0);
3660 1.12.4.2 martin }
3661 1.12.4.2 martin
3662 1.12.4.2 martin static int
3663 1.12.4.2 martin mcx_create_eq(struct mcx_softc *sc)
3664 1.12.4.2 martin {
3665 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
3666 1.12.4.2 martin struct mcx_dmamem mxm;
3667 1.12.4.2 martin struct mcx_cmd_create_eq_in *in;
3668 1.12.4.2 martin struct mcx_cmd_create_eq_mb_in *mbin;
3669 1.12.4.2 martin struct mcx_cmd_create_eq_out *out;
3670 1.12.4.2 martin struct mcx_eq_entry *eqe;
3671 1.12.4.2 martin int error;
3672 1.12.4.2 martin uint64_t *pas;
3673 1.12.4.2 martin int insize, npages, paslen, i, token;
3674 1.12.4.2 martin
3675 1.12.4.2 martin sc->sc_eq_cons = 0;
3676 1.12.4.2 martin
3677 1.12.4.2 martin npages = howmany((1 << MCX_LOG_EQ_SIZE) * sizeof(struct mcx_eq_entry),
3678 1.12.4.2 martin MCX_PAGE_SIZE);
3679 1.12.4.2 martin paslen = npages * sizeof(*pas);
3680 1.12.4.2 martin insize = sizeof(struct mcx_cmd_create_eq_mb_in) + paslen;
3681 1.12.4.2 martin
3682 1.12.4.2 martin if (mcx_dmamem_alloc(sc, &sc->sc_eq_mem, npages * MCX_PAGE_SIZE,
3683 1.12.4.2 martin MCX_PAGE_SIZE) != 0) {
3684 1.12.4.2 martin printf(", unable to allocate event queue memory\n");
3685 1.12.4.2 martin return (-1);
3686 1.12.4.2 martin }
3687 1.12.4.2 martin
3688 1.12.4.2 martin eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&sc->sc_eq_mem);
3689 1.12.4.2 martin for (i = 0; i < (1 << MCX_LOG_EQ_SIZE); i++) {
3690 1.12.4.2 martin eqe[i].eq_owner = MCX_EQ_ENTRY_OWNER_INIT;
3691 1.12.4.2 martin }
3692 1.12.4.2 martin
3693 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3694 1.12.4.2 martin token = mcx_cmdq_token(sc);
3695 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
3696 1.12.4.2 martin
3697 1.12.4.2 martin in = mcx_cmdq_in(cqe);
3698 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_CREATE_EQ);
3699 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
3700 1.12.4.2 martin
3701 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
3702 1.12.4.2 martin &cqe->cq_input_ptr, token) != 0) {
3703 1.12.4.2 martin printf(", unable to allocate create eq mailboxen\n");
3704 1.12.4.2 martin return (-1);
3705 1.12.4.2 martin }
3706 1.12.4.2 martin mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3707 1.12.4.2 martin mbin->cmd_eq_ctx.eq_uar_size = htobe32(
3708 1.12.4.2 martin (MCX_LOG_EQ_SIZE << MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT) | sc->sc_uar);
3709 1.12.4.2 martin mbin->cmd_event_bitmask = htobe64(
3710 1.12.4.2 martin (1ull << MCX_EVENT_TYPE_INTERNAL_ERROR) |
3711 1.12.4.2 martin (1ull << MCX_EVENT_TYPE_PORT_CHANGE) |
3712 1.12.4.2 martin (1ull << MCX_EVENT_TYPE_CMD_COMPLETION) |
3713 1.12.4.2 martin (1ull << MCX_EVENT_TYPE_PAGE_REQUEST));
3714 1.12.4.2 martin
3715 1.12.4.2 martin /* physical addresses follow the mailbox in data */
3716 1.12.4.2 martin pas = (uint64_t *)(mbin + 1);
3717 1.12.4.2 martin for (i = 0; i < npages; i++) {
3718 1.12.4.2 martin pas[i] = htobe64(MCX_DMA_DVA(&sc->sc_eq_mem) +
3719 1.12.4.2 martin (i * MCX_PAGE_SIZE));
3720 1.12.4.2 martin }
3721 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE));
3722 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
3723 1.12.4.2 martin
3724 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
3725 1.12.4.2 martin if (error != 0) {
3726 1.12.4.2 martin printf(", create eq timeout\n");
3727 1.12.4.2 martin goto free;
3728 1.12.4.2 martin }
3729 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
3730 1.12.4.2 martin printf(", create eq command corrupt\n");
3731 1.12.4.2 martin goto free;
3732 1.12.4.2 martin }
3733 1.12.4.2 martin
3734 1.12.4.2 martin out = mcx_cmdq_out(cqe);
3735 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
3736 1.12.4.2 martin printf(", create eq failed (%x, %x)\n", out->cmd_status,
3737 1.12.4.2 martin be32toh(out->cmd_syndrome));
3738 1.12.4.2 martin error = -1;
3739 1.12.4.2 martin goto free;
3740 1.12.4.2 martin }
3741 1.12.4.2 martin
3742 1.12.4.2 martin sc->sc_eqn = be32toh(out->cmd_eqn);
3743 1.12.4.2 martin mcx_arm_eq(sc);
3744 1.12.4.2 martin free:
3745 1.12.4.2 martin mcx_dmamem_free(sc, &mxm);
3746 1.12.4.2 martin return (error);
3747 1.12.4.2 martin }
3748 1.12.4.2 martin
3749 1.12.4.2 martin static int
3750 1.12.4.2 martin mcx_alloc_pd(struct mcx_softc *sc)
3751 1.12.4.2 martin {
3752 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
3753 1.12.4.2 martin struct mcx_cmd_alloc_pd_in *in;
3754 1.12.4.2 martin struct mcx_cmd_alloc_pd_out *out;
3755 1.12.4.2 martin int error;
3756 1.12.4.2 martin
3757 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3758 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3759 1.12.4.2 martin
3760 1.12.4.2 martin in = mcx_cmdq_in(cqe);
3761 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_ALLOC_PD);
3762 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
3763 1.12.4.2 martin
3764 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
3765 1.12.4.2 martin
3766 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
3767 1.12.4.2 martin if (error != 0) {
3768 1.12.4.2 martin printf(", alloc pd timeout\n");
3769 1.12.4.2 martin return (-1);
3770 1.12.4.2 martin }
3771 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
3772 1.12.4.2 martin printf(", alloc pd command corrupt\n");
3773 1.12.4.2 martin return (-1);
3774 1.12.4.2 martin }
3775 1.12.4.2 martin
3776 1.12.4.2 martin out = mcx_cmdq_out(cqe);
3777 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
3778 1.12.4.2 martin printf(", alloc pd failed (%x)\n", out->cmd_status);
3779 1.12.4.2 martin return (-1);
3780 1.12.4.2 martin }
3781 1.12.4.2 martin
3782 1.12.4.2 martin sc->sc_pd = be32toh(out->cmd_pd);
3783 1.12.4.2 martin return (0);
3784 1.12.4.2 martin }
3785 1.12.4.2 martin
3786 1.12.4.2 martin static int
3787 1.12.4.2 martin mcx_alloc_tdomain(struct mcx_softc *sc)
3788 1.12.4.2 martin {
3789 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
3790 1.12.4.2 martin struct mcx_cmd_alloc_td_in *in;
3791 1.12.4.2 martin struct mcx_cmd_alloc_td_out *out;
3792 1.12.4.2 martin int error;
3793 1.12.4.2 martin
3794 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3795 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3796 1.12.4.2 martin
3797 1.12.4.2 martin in = mcx_cmdq_in(cqe);
3798 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_ALLOC_TRANSPORT_DOMAIN);
3799 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
3800 1.12.4.2 martin
3801 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
3802 1.12.4.2 martin
3803 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
3804 1.12.4.2 martin if (error != 0) {
3805 1.12.4.2 martin printf(", alloc transport domain timeout\n");
3806 1.12.4.2 martin return (-1);
3807 1.12.4.2 martin }
3808 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
3809 1.12.4.2 martin printf(", alloc transport domain command corrupt\n");
3810 1.12.4.2 martin return (-1);
3811 1.12.4.2 martin }
3812 1.12.4.2 martin
3813 1.12.4.2 martin out = mcx_cmdq_out(cqe);
3814 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
3815 1.12.4.2 martin printf(", alloc transport domain failed (%x)\n",
3816 1.12.4.2 martin out->cmd_status);
3817 1.12.4.2 martin return (-1);
3818 1.12.4.2 martin }
3819 1.12.4.2 martin
3820 1.12.4.2 martin sc->sc_tdomain = be32toh(out->cmd_tdomain);
3821 1.12.4.2 martin return (0);
3822 1.12.4.2 martin }
3823 1.12.4.2 martin
3824 1.12.4.2 martin static int
3825 1.12.4.2 martin mcx_query_nic_vport_context(struct mcx_softc *sc, uint8_t *enaddr)
3826 1.12.4.2 martin {
3827 1.12.4.2 martin struct mcx_dmamem mxm;
3828 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
3829 1.12.4.2 martin struct mcx_cmd_query_nic_vport_context_in *in;
3830 1.12.4.2 martin struct mcx_cmd_query_nic_vport_context_out *out;
3831 1.12.4.2 martin struct mcx_nic_vport_ctx *ctx;
3832 1.12.4.2 martin uint8_t *addr;
3833 1.12.4.2 martin int error, token, i;
3834 1.12.4.2 martin
3835 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3836 1.12.4.2 martin token = mcx_cmdq_token(sc);
3837 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx), token);
3838 1.12.4.2 martin
3839 1.12.4.2 martin in = mcx_cmdq_in(cqe);
3840 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_QUERY_NIC_VPORT_CONTEXT);
3841 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
3842 1.12.4.2 martin in->cmd_allowed_list_type = 0;
3843 1.12.4.2 martin
3844 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_output_ptr, token) != 0) {
3845 1.12.4.2 martin printf(", unable to allocate query nic vport context mailboxen\n");
3846 1.12.4.2 martin return (-1);
3847 1.12.4.2 martin }
3848 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, 1);
3849 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
3850 1.12.4.2 martin
3851 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
3852 1.12.4.2 martin if (error != 0) {
3853 1.12.4.2 martin printf(", query nic vport context timeout\n");
3854 1.12.4.2 martin goto free;
3855 1.12.4.2 martin }
3856 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
3857 1.12.4.2 martin printf(", query nic vport context command corrupt\n");
3858 1.12.4.2 martin goto free;
3859 1.12.4.2 martin }
3860 1.12.4.2 martin
3861 1.12.4.2 martin out = mcx_cmdq_out(cqe);
3862 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
3863 1.12.4.2 martin printf(", query nic vport context failed (%x, %x)\n",
3864 1.12.4.2 martin out->cmd_status, out->cmd_syndrome);
3865 1.12.4.2 martin error = -1;
3866 1.12.4.2 martin goto free;
3867 1.12.4.2 martin }
3868 1.12.4.2 martin
3869 1.12.4.2 martin ctx = (struct mcx_nic_vport_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
3870 1.12.4.2 martin addr = (uint8_t *)&ctx->vp_perm_addr;
3871 1.12.4.2 martin for (i = 0; i < ETHER_ADDR_LEN; i++) {
3872 1.12.4.2 martin enaddr[i] = addr[i + 2];
3873 1.12.4.2 martin }
3874 1.12.4.2 martin free:
3875 1.12.4.2 martin mcx_dmamem_free(sc, &mxm);
3876 1.12.4.2 martin
3877 1.12.4.2 martin return (error);
3878 1.12.4.2 martin }
3879 1.12.4.2 martin
3880 1.12.4.2 martin static int
3881 1.12.4.2 martin mcx_query_special_contexts(struct mcx_softc *sc)
3882 1.12.4.2 martin {
3883 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
3884 1.12.4.2 martin struct mcx_cmd_query_special_ctx_in *in;
3885 1.12.4.2 martin struct mcx_cmd_query_special_ctx_out *out;
3886 1.12.4.2 martin int error;
3887 1.12.4.2 martin
3888 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3889 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3890 1.12.4.2 martin
3891 1.12.4.2 martin in = mcx_cmdq_in(cqe);
3892 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_QUERY_SPECIAL_CONTEXTS);
3893 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
3894 1.12.4.2 martin
3895 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
3896 1.12.4.2 martin
3897 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
3898 1.12.4.2 martin if (error != 0) {
3899 1.12.4.2 martin printf(", query special contexts timeout\n");
3900 1.12.4.2 martin return (-1);
3901 1.12.4.2 martin }
3902 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
3903 1.12.4.2 martin printf(", query special contexts command corrupt\n");
3904 1.12.4.2 martin return (-1);
3905 1.12.4.2 martin }
3906 1.12.4.2 martin
3907 1.12.4.2 martin out = mcx_cmdq_out(cqe);
3908 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
3909 1.12.4.2 martin printf(", query special contexts failed (%x)\n",
3910 1.12.4.2 martin out->cmd_status);
3911 1.12.4.2 martin return (-1);
3912 1.12.4.2 martin }
3913 1.12.4.2 martin
3914 1.12.4.2 martin sc->sc_lkey = be32toh(out->cmd_resd_lkey);
3915 1.12.4.2 martin return (0);
3916 1.12.4.2 martin }
3917 1.12.4.2 martin
3918 1.12.4.2 martin static int
3919 1.12.4.2 martin mcx_set_port_mtu(struct mcx_softc *sc, int mtu)
3920 1.12.4.2 martin {
3921 1.12.4.2 martin struct mcx_reg_pmtu pmtu;
3922 1.12.4.2 martin int error;
3923 1.12.4.2 martin
3924 1.12.4.2 martin /* read max mtu */
3925 1.12.4.2 martin memset(&pmtu, 0, sizeof(pmtu));
3926 1.12.4.2 martin pmtu.rp_local_port = 1;
3927 1.12.4.2 martin error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_READ, &pmtu,
3928 1.12.4.2 martin sizeof(pmtu));
3929 1.12.4.2 martin if (error != 0) {
3930 1.12.4.2 martin printf(", unable to get port MTU\n");
3931 1.12.4.2 martin return error;
3932 1.12.4.2 martin }
3933 1.12.4.2 martin
3934 1.12.4.2 martin mtu = uimin(mtu, be16toh(pmtu.rp_max_mtu));
3935 1.12.4.2 martin pmtu.rp_admin_mtu = htobe16(mtu);
3936 1.12.4.2 martin error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_WRITE, &pmtu,
3937 1.12.4.2 martin sizeof(pmtu));
3938 1.12.4.2 martin if (error != 0) {
3939 1.12.4.2 martin printf(", unable to set port MTU\n");
3940 1.12.4.2 martin return error;
3941 1.12.4.2 martin }
3942 1.12.4.2 martin
3943 1.12.4.2 martin sc->sc_hardmtu = mtu;
3944 1.12.4.2 martin return 0;
3945 1.12.4.2 martin }
3946 1.12.4.2 martin
3947 1.12.4.2 martin static int
3948 1.12.4.2 martin mcx_create_cq(struct mcx_softc *sc, int eqn)
3949 1.12.4.2 martin {
3950 1.12.4.2 martin struct mcx_cmdq_entry *cmde;
3951 1.12.4.2 martin struct mcx_cq_entry *cqe;
3952 1.12.4.2 martin struct mcx_cq *cq;
3953 1.12.4.2 martin struct mcx_dmamem mxm;
3954 1.12.4.2 martin struct mcx_cmd_create_cq_in *in;
3955 1.12.4.2 martin struct mcx_cmd_create_cq_mb_in *mbin;
3956 1.12.4.2 martin struct mcx_cmd_create_cq_out *out;
3957 1.12.4.2 martin int error;
3958 1.12.4.2 martin uint64_t *pas;
3959 1.12.4.2 martin int insize, npages, paslen, i, token;
3960 1.12.4.2 martin
3961 1.12.4.2 martin if (sc->sc_num_cq >= MCX_MAX_CQS) {
3962 1.12.4.2 martin printf("%s: tried to create too many cqs\n", DEVNAME(sc));
3963 1.12.4.2 martin return (-1);
3964 1.12.4.2 martin }
3965 1.12.4.2 martin cq = &sc->sc_cq[sc->sc_num_cq];
3966 1.12.4.2 martin
3967 1.12.4.2 martin npages = howmany((1 << MCX_LOG_CQ_SIZE) * sizeof(struct mcx_cq_entry),
3968 1.12.4.2 martin MCX_PAGE_SIZE);
3969 1.12.4.2 martin paslen = npages * sizeof(*pas);
3970 1.12.4.2 martin insize = sizeof(struct mcx_cmd_create_cq_mb_in) + paslen;
3971 1.12.4.2 martin
3972 1.12.4.2 martin if (mcx_dmamem_alloc(sc, &cq->cq_mem, npages * MCX_PAGE_SIZE,
3973 1.12.4.2 martin MCX_PAGE_SIZE) != 0) {
3974 1.12.4.2 martin printf("%s: unable to allocate completion queue memory\n",
3975 1.12.4.2 martin DEVNAME(sc));
3976 1.12.4.2 martin return (-1);
3977 1.12.4.2 martin }
3978 1.12.4.2 martin cqe = MCX_DMA_KVA(&cq->cq_mem);
3979 1.12.4.2 martin for (i = 0; i < (1 << MCX_LOG_CQ_SIZE); i++) {
3980 1.12.4.2 martin cqe[i].cq_opcode_owner = MCX_CQ_ENTRY_FLAG_OWNER;
3981 1.12.4.2 martin }
3982 1.12.4.2 martin
3983 1.12.4.2 martin cmde = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3984 1.12.4.2 martin token = mcx_cmdq_token(sc);
3985 1.12.4.2 martin mcx_cmdq_init(sc, cmde, sizeof(*in) + insize, sizeof(*out), token);
3986 1.12.4.2 martin
3987 1.12.4.2 martin in = mcx_cmdq_in(cmde);
3988 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_CREATE_CQ);
3989 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
3990 1.12.4.2 martin
3991 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
3992 1.12.4.2 martin &cmde->cq_input_ptr, token) != 0) {
3993 1.12.4.2 martin printf("%s: unable to allocate create cq mailboxen\n", DEVNAME(sc));
3994 1.12.4.2 martin error = -1;
3995 1.12.4.2 martin goto free;
3996 1.12.4.2 martin }
3997 1.12.4.2 martin mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3998 1.12.4.2 martin mbin->cmd_cq_ctx.cq_uar_size = htobe32(
3999 1.12.4.2 martin (MCX_LOG_CQ_SIZE << MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT) | sc->sc_uar);
4000 1.12.4.2 martin mbin->cmd_cq_ctx.cq_eqn = htobe32(eqn);
4001 1.12.4.2 martin mbin->cmd_cq_ctx.cq_period_max_count = htobe32(
4002 1.12.4.2 martin (MCX_CQ_MOD_PERIOD << MCX_CQ_CTX_PERIOD_SHIFT) |
4003 1.12.4.2 martin MCX_CQ_MOD_COUNTER);
4004 1.12.4.2 martin mbin->cmd_cq_ctx.cq_doorbell = htobe64(
4005 1.12.4.2 martin MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4006 1.12.4.2 martin MCX_CQ_DOORBELL_OFFSET + (MCX_CQ_DOORBELL_SIZE * sc->sc_num_cq));
4007 1.12.4.2 martin
4008 1.12.4.2 martin /* physical addresses follow the mailbox in data */
4009 1.12.4.2 martin pas = (uint64_t *)(mbin + 1);
4010 1.12.4.2 martin for (i = 0; i < npages; i++) {
4011 1.12.4.2 martin pas[i] = htobe64(MCX_DMA_DVA(&cq->cq_mem) + (i * MCX_PAGE_SIZE));
4012 1.12.4.2 martin }
4013 1.12.4.2 martin mcx_cmdq_post(sc, cmde, 0);
4014 1.12.4.2 martin
4015 1.12.4.2 martin error = mcx_cmdq_poll(sc, cmde, 1000);
4016 1.12.4.2 martin if (error != 0) {
4017 1.12.4.2 martin printf("%s: create cq timeout\n", DEVNAME(sc));
4018 1.12.4.2 martin goto free;
4019 1.12.4.2 martin }
4020 1.12.4.2 martin if (mcx_cmdq_verify(cmde) != 0) {
4021 1.12.4.2 martin printf("%s: create cq command corrupt\n", DEVNAME(sc));
4022 1.12.4.2 martin goto free;
4023 1.12.4.2 martin }
4024 1.12.4.2 martin
4025 1.12.4.2 martin out = mcx_cmdq_out(cmde);
4026 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
4027 1.12.4.2 martin printf("%s: create cq failed (%x, %x)\n", DEVNAME(sc),
4028 1.12.4.2 martin out->cmd_status, be32toh(out->cmd_syndrome));
4029 1.12.4.2 martin error = -1;
4030 1.12.4.2 martin goto free;
4031 1.12.4.2 martin }
4032 1.12.4.2 martin
4033 1.12.4.2 martin cq->cq_n = be32toh(out->cmd_cqn);
4034 1.12.4.2 martin cq->cq_cons = 0;
4035 1.12.4.2 martin cq->cq_count = 0;
4036 1.12.4.2 martin cq->cq_doorbell = (void *)((uint8_t *)MCX_DMA_KVA(&sc->sc_doorbell_mem) +
4037 1.12.4.2 martin MCX_CQ_DOORBELL_OFFSET + (MCX_CQ_DOORBELL_SIZE * sc->sc_num_cq));
4038 1.12.4.2 martin mcx_arm_cq(sc, cq);
4039 1.12.4.2 martin sc->sc_num_cq++;
4040 1.12.4.2 martin
4041 1.12.4.2 martin free:
4042 1.12.4.2 martin mcx_dmamem_free(sc, &mxm);
4043 1.12.4.2 martin return (error);
4044 1.12.4.2 martin }
4045 1.12.4.2 martin
4046 1.12.4.2 martin static int
4047 1.12.4.2 martin mcx_destroy_cq(struct mcx_softc *sc, int index)
4048 1.12.4.2 martin {
4049 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
4050 1.12.4.2 martin struct mcx_cmd_destroy_cq_in *in;
4051 1.12.4.2 martin struct mcx_cmd_destroy_cq_out *out;
4052 1.12.4.2 martin int error;
4053 1.12.4.2 martin int token;
4054 1.12.4.2 martin
4055 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4056 1.12.4.2 martin token = mcx_cmdq_token(sc);
4057 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4058 1.12.4.2 martin
4059 1.12.4.2 martin in = mcx_cmdq_in(cqe);
4060 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_DESTROY_CQ);
4061 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
4062 1.12.4.2 martin in->cmd_cqn = htobe32(sc->sc_cq[index].cq_n);
4063 1.12.4.2 martin
4064 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
4065 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
4066 1.12.4.2 martin if (error != 0) {
4067 1.12.4.2 martin printf("%s: destroy cq timeout\n", DEVNAME(sc));
4068 1.12.4.2 martin return error;
4069 1.12.4.2 martin }
4070 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
4071 1.12.4.2 martin printf("%s: destroy cq command corrupt\n", DEVNAME(sc));
4072 1.12.4.2 martin return error;
4073 1.12.4.2 martin }
4074 1.12.4.2 martin
4075 1.12.4.2 martin out = mcx_cmdq_out(cqe);
4076 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
4077 1.12.4.2 martin printf("%s: destroy cq failed (%x, %x)\n", DEVNAME(sc),
4078 1.12.4.2 martin out->cmd_status, be32toh(out->cmd_syndrome));
4079 1.12.4.2 martin return -1;
4080 1.12.4.2 martin }
4081 1.12.4.2 martin
4082 1.12.4.2 martin sc->sc_cq[index].cq_n = 0;
4083 1.12.4.2 martin mcx_dmamem_free(sc, &sc->sc_cq[index].cq_mem);
4084 1.12.4.2 martin sc->sc_cq[index].cq_cons = 0;
4085 1.12.4.2 martin sc->sc_cq[index].cq_count = 0;
4086 1.12.4.2 martin return 0;
4087 1.12.4.2 martin }
4088 1.12.4.2 martin
4089 1.12.4.2 martin static int
4090 1.12.4.2 martin mcx_create_rq(struct mcx_softc *sc, int cqn)
4091 1.12.4.2 martin {
4092 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
4093 1.12.4.2 martin struct mcx_dmamem mxm;
4094 1.12.4.2 martin struct mcx_cmd_create_rq_in *in;
4095 1.12.4.2 martin struct mcx_cmd_create_rq_out *out;
4096 1.12.4.2 martin struct mcx_rq_ctx *mbin;
4097 1.12.4.2 martin int error;
4098 1.12.4.2 martin uint64_t *pas;
4099 1.12.4.2 martin uint8_t *doorbell;
4100 1.12.4.2 martin int insize, npages, paslen, i, token;
4101 1.12.4.2 martin
4102 1.12.4.2 martin npages = howmany((1 << MCX_LOG_RQ_SIZE) * sizeof(struct mcx_rq_entry),
4103 1.12.4.2 martin MCX_PAGE_SIZE);
4104 1.12.4.2 martin paslen = npages * sizeof(*pas);
4105 1.12.4.2 martin insize = 0x10 + sizeof(struct mcx_rq_ctx) + paslen;
4106 1.12.4.2 martin
4107 1.12.4.2 martin if (mcx_dmamem_alloc(sc, &sc->sc_rq_mem, npages * MCX_PAGE_SIZE,
4108 1.12.4.2 martin MCX_PAGE_SIZE) != 0) {
4109 1.12.4.2 martin printf("%s: unable to allocate receive queue memory\n",
4110 1.12.4.2 martin DEVNAME(sc));
4111 1.12.4.2 martin return (-1);
4112 1.12.4.2 martin }
4113 1.12.4.2 martin
4114 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4115 1.12.4.2 martin token = mcx_cmdq_token(sc);
4116 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4117 1.12.4.2 martin
4118 1.12.4.2 martin in = mcx_cmdq_in(cqe);
4119 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQ);
4120 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
4121 1.12.4.2 martin
4122 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4123 1.12.4.2 martin &cqe->cq_input_ptr, token) != 0) {
4124 1.12.4.2 martin printf("%s: unable to allocate create rq mailboxen\n",
4125 1.12.4.2 martin DEVNAME(sc));
4126 1.12.4.2 martin error = -1;
4127 1.12.4.2 martin goto free;
4128 1.12.4.2 martin }
4129 1.12.4.2 martin mbin = (struct mcx_rq_ctx *)(((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
4130 1.12.4.2 martin mbin->rq_flags = htobe32(MCX_RQ_CTX_RLKEY | MCX_RQ_CTX_VLAN_STRIP_DIS);
4131 1.12.4.2 martin mbin->rq_cqn = htobe32(cqn);
4132 1.12.4.2 martin mbin->rq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
4133 1.12.4.2 martin mbin->rq_wq.wq_pd = htobe32(sc->sc_pd);
4134 1.12.4.2 martin mbin->rq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4135 1.12.4.2 martin MCX_RQ_DOORBELL_OFFSET);
4136 1.12.4.2 martin mbin->rq_wq.wq_log_stride = htobe16(4);
4137 1.12.4.2 martin mbin->rq_wq.wq_log_size = MCX_LOG_RQ_SIZE;
4138 1.12.4.2 martin
4139 1.12.4.2 martin /* physical addresses follow the mailbox in data */
4140 1.12.4.2 martin pas = (uint64_t *)(mbin + 1);
4141 1.12.4.2 martin for (i = 0; i < npages; i++) {
4142 1.12.4.2 martin pas[i] = htobe64(MCX_DMA_DVA(&sc->sc_rq_mem) +
4143 1.12.4.2 martin (i * MCX_PAGE_SIZE));
4144 1.12.4.2 martin }
4145 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
4146 1.12.4.2 martin
4147 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
4148 1.12.4.2 martin if (error != 0) {
4149 1.12.4.2 martin printf("%s: create rq timeout\n", DEVNAME(sc));
4150 1.12.4.2 martin goto free;
4151 1.12.4.2 martin }
4152 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
4153 1.12.4.2 martin printf("%s: create rq command corrupt\n", DEVNAME(sc));
4154 1.12.4.2 martin goto free;
4155 1.12.4.2 martin }
4156 1.12.4.2 martin
4157 1.12.4.2 martin out = mcx_cmdq_out(cqe);
4158 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
4159 1.12.4.2 martin printf("%s: create rq failed (%x, %x)\n", DEVNAME(sc),
4160 1.12.4.2 martin out->cmd_status, be32toh(out->cmd_syndrome));
4161 1.12.4.2 martin error = -1;
4162 1.12.4.2 martin goto free;
4163 1.12.4.2 martin }
4164 1.12.4.2 martin
4165 1.12.4.2 martin sc->sc_rqn = be32toh(out->cmd_rqn);
4166 1.12.4.2 martin
4167 1.12.4.2 martin doorbell = MCX_DMA_KVA(&sc->sc_doorbell_mem);
4168 1.12.4.2 martin sc->sc_rx_doorbell = (uint32_t *)(doorbell + MCX_RQ_DOORBELL_OFFSET);
4169 1.12.4.2 martin
4170 1.12.4.2 martin free:
4171 1.12.4.2 martin mcx_dmamem_free(sc, &mxm);
4172 1.12.4.2 martin return (error);
4173 1.12.4.2 martin }
4174 1.12.4.2 martin
4175 1.12.4.2 martin static int
4176 1.12.4.2 martin mcx_ready_rq(struct mcx_softc *sc)
4177 1.12.4.2 martin {
4178 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
4179 1.12.4.2 martin struct mcx_dmamem mxm;
4180 1.12.4.2 martin struct mcx_cmd_modify_rq_in *in;
4181 1.12.4.2 martin struct mcx_cmd_modify_rq_mb_in *mbin;
4182 1.12.4.2 martin struct mcx_cmd_modify_rq_out *out;
4183 1.12.4.2 martin int error;
4184 1.12.4.2 martin int token;
4185 1.12.4.2 martin
4186 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4187 1.12.4.2 martin token = mcx_cmdq_token(sc);
4188 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4189 1.12.4.2 martin
4190 1.12.4.2 martin in = mcx_cmdq_in(cqe);
4191 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_MODIFY_RQ);
4192 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
4193 1.12.4.2 martin in->cmd_rq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | sc->sc_rqn);
4194 1.12.4.2 martin
4195 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4196 1.12.4.2 martin printf("%s: unable to allocate modify rq mailbox\n", DEVNAME(sc));
4197 1.12.4.2 martin return (-1);
4198 1.12.4.2 martin }
4199 1.12.4.2 martin mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4200 1.12.4.2 martin mbin->cmd_rq_ctx.rq_flags = htobe32(
4201 1.12.4.2 martin MCX_QUEUE_STATE_RDY << MCX_RQ_CTX_STATE_SHIFT);
4202 1.12.4.2 martin
4203 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, 1);
4204 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
4205 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
4206 1.12.4.2 martin if (error != 0) {
4207 1.12.4.2 martin printf("%s: modify rq timeout\n", DEVNAME(sc));
4208 1.12.4.2 martin goto free;
4209 1.12.4.2 martin }
4210 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
4211 1.12.4.2 martin printf("%s: modify rq command corrupt\n", DEVNAME(sc));
4212 1.12.4.2 martin goto free;
4213 1.12.4.2 martin }
4214 1.12.4.2 martin
4215 1.12.4.2 martin out = mcx_cmdq_out(cqe);
4216 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
4217 1.12.4.2 martin printf("%s: modify rq failed (%x, %x)\n", DEVNAME(sc),
4218 1.12.4.2 martin out->cmd_status, be32toh(out->cmd_syndrome));
4219 1.12.4.2 martin error = -1;
4220 1.12.4.2 martin goto free;
4221 1.12.4.2 martin }
4222 1.12.4.2 martin
4223 1.12.4.2 martin free:
4224 1.12.4.2 martin mcx_dmamem_free(sc, &mxm);
4225 1.12.4.2 martin return (error);
4226 1.12.4.2 martin }
4227 1.12.4.2 martin
4228 1.12.4.2 martin static int
4229 1.12.4.2 martin mcx_destroy_rq(struct mcx_softc *sc)
4230 1.12.4.2 martin {
4231 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
4232 1.12.4.2 martin struct mcx_cmd_destroy_rq_in *in;
4233 1.12.4.2 martin struct mcx_cmd_destroy_rq_out *out;
4234 1.12.4.2 martin int error;
4235 1.12.4.2 martin int token;
4236 1.12.4.2 martin
4237 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4238 1.12.4.2 martin token = mcx_cmdq_token(sc);
4239 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4240 1.12.4.2 martin
4241 1.12.4.2 martin in = mcx_cmdq_in(cqe);
4242 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQ);
4243 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
4244 1.12.4.2 martin in->cmd_rqn = htobe32(sc->sc_rqn);
4245 1.12.4.2 martin
4246 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
4247 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
4248 1.12.4.2 martin if (error != 0) {
4249 1.12.4.2 martin printf("%s: destroy rq timeout\n", DEVNAME(sc));
4250 1.12.4.2 martin return error;
4251 1.12.4.2 martin }
4252 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
4253 1.12.4.2 martin printf("%s: destroy rq command corrupt\n", DEVNAME(sc));
4254 1.12.4.2 martin return error;
4255 1.12.4.2 martin }
4256 1.12.4.2 martin
4257 1.12.4.2 martin out = mcx_cmdq_out(cqe);
4258 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
4259 1.12.4.2 martin printf("%s: destroy rq failed (%x, %x)\n", DEVNAME(sc),
4260 1.12.4.2 martin out->cmd_status, be32toh(out->cmd_syndrome));
4261 1.12.4.2 martin return -1;
4262 1.12.4.2 martin }
4263 1.12.4.2 martin
4264 1.12.4.2 martin sc->sc_rqn = 0;
4265 1.12.4.2 martin return 0;
4266 1.12.4.2 martin }
4267 1.12.4.2 martin
4268 1.12.4.2 martin static int
4269 1.12.4.2 martin mcx_create_tir(struct mcx_softc *sc)
4270 1.12.4.2 martin {
4271 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
4272 1.12.4.2 martin struct mcx_dmamem mxm;
4273 1.12.4.2 martin struct mcx_cmd_create_tir_in *in;
4274 1.12.4.2 martin struct mcx_cmd_create_tir_mb_in *mbin;
4275 1.12.4.2 martin struct mcx_cmd_create_tir_out *out;
4276 1.12.4.2 martin int error;
4277 1.12.4.2 martin int token;
4278 1.12.4.2 martin
4279 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4280 1.12.4.2 martin token = mcx_cmdq_token(sc);
4281 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4282 1.12.4.2 martin
4283 1.12.4.2 martin in = mcx_cmdq_in(cqe);
4284 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR);
4285 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
4286 1.12.4.2 martin
4287 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4288 1.12.4.2 martin printf("%s: unable to allocate create tir mailbox\n",
4289 1.12.4.2 martin DEVNAME(sc));
4290 1.12.4.2 martin return (-1);
4291 1.12.4.2 martin }
4292 1.12.4.2 martin mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4293 1.12.4.2 martin /* leave disp_type = 0, so packets get sent to the inline rqn */
4294 1.12.4.2 martin mbin->cmd_inline_rqn = htobe32(sc->sc_rqn);
4295 1.12.4.2 martin mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
4296 1.12.4.2 martin
4297 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
4298 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
4299 1.12.4.2 martin if (error != 0) {
4300 1.12.4.2 martin printf("%s: create tir timeout\n", DEVNAME(sc));
4301 1.12.4.2 martin goto free;
4302 1.12.4.2 martin }
4303 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
4304 1.12.4.2 martin printf("%s: create tir command corrupt\n", DEVNAME(sc));
4305 1.12.4.2 martin goto free;
4306 1.12.4.2 martin }
4307 1.12.4.2 martin
4308 1.12.4.2 martin out = mcx_cmdq_out(cqe);
4309 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
4310 1.12.4.2 martin printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc),
4311 1.12.4.2 martin out->cmd_status, be32toh(out->cmd_syndrome));
4312 1.12.4.2 martin error = -1;
4313 1.12.4.2 martin goto free;
4314 1.12.4.2 martin }
4315 1.12.4.2 martin
4316 1.12.4.2 martin sc->sc_tirn = be32toh(out->cmd_tirn);
4317 1.12.4.2 martin free:
4318 1.12.4.2 martin mcx_dmamem_free(sc, &mxm);
4319 1.12.4.2 martin return (error);
4320 1.12.4.2 martin }
4321 1.12.4.2 martin
4322 1.12.4.2 martin static int
4323 1.12.4.2 martin mcx_destroy_tir(struct mcx_softc *sc)
4324 1.12.4.2 martin {
4325 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
4326 1.12.4.2 martin struct mcx_cmd_destroy_tir_in *in;
4327 1.12.4.2 martin struct mcx_cmd_destroy_tir_out *out;
4328 1.12.4.2 martin int error;
4329 1.12.4.2 martin int token;
4330 1.12.4.2 martin
4331 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4332 1.12.4.2 martin token = mcx_cmdq_token(sc);
4333 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4334 1.12.4.2 martin
4335 1.12.4.2 martin in = mcx_cmdq_in(cqe);
4336 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIR);
4337 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
4338 1.12.4.2 martin in->cmd_tirn = htobe32(sc->sc_tirn);
4339 1.12.4.2 martin
4340 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
4341 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
4342 1.12.4.2 martin if (error != 0) {
4343 1.12.4.2 martin printf("%s: destroy tir timeout\n", DEVNAME(sc));
4344 1.12.4.2 martin return error;
4345 1.12.4.2 martin }
4346 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
4347 1.12.4.2 martin printf("%s: destroy tir command corrupt\n", DEVNAME(sc));
4348 1.12.4.2 martin return error;
4349 1.12.4.2 martin }
4350 1.12.4.2 martin
4351 1.12.4.2 martin out = mcx_cmdq_out(cqe);
4352 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
4353 1.12.4.2 martin printf("%s: destroy tir failed (%x, %x)\n", DEVNAME(sc),
4354 1.12.4.2 martin out->cmd_status, be32toh(out->cmd_syndrome));
4355 1.12.4.2 martin return -1;
4356 1.12.4.2 martin }
4357 1.12.4.2 martin
4358 1.12.4.2 martin sc->sc_tirn = 0;
4359 1.12.4.2 martin return 0;
4360 1.12.4.2 martin }
4361 1.12.4.2 martin
4362 1.12.4.2 martin static int
4363 1.12.4.2 martin mcx_create_sq(struct mcx_softc *sc, int cqn)
4364 1.12.4.2 martin {
4365 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
4366 1.12.4.2 martin struct mcx_dmamem mxm;
4367 1.12.4.2 martin struct mcx_cmd_create_sq_in *in;
4368 1.12.4.2 martin struct mcx_sq_ctx *mbin;
4369 1.12.4.2 martin struct mcx_cmd_create_sq_out *out;
4370 1.12.4.2 martin int error;
4371 1.12.4.2 martin uint64_t *pas;
4372 1.12.4.2 martin uint8_t *doorbell;
4373 1.12.4.2 martin int insize, npages, paslen, i, token;
4374 1.12.4.2 martin
4375 1.12.4.2 martin npages = howmany((1 << MCX_LOG_SQ_SIZE) * sizeof(struct mcx_sq_entry),
4376 1.12.4.2 martin MCX_PAGE_SIZE);
4377 1.12.4.2 martin paslen = npages * sizeof(*pas);
4378 1.12.4.2 martin insize = sizeof(struct mcx_sq_ctx) + paslen;
4379 1.12.4.2 martin
4380 1.12.4.2 martin if (mcx_dmamem_alloc(sc, &sc->sc_sq_mem, npages * MCX_PAGE_SIZE,
4381 1.12.4.2 martin MCX_PAGE_SIZE) != 0) {
4382 1.12.4.2 martin printf("%s: unable to allocate send queue memory\n", DEVNAME(sc));
4383 1.12.4.2 martin return (-1);
4384 1.12.4.2 martin }
4385 1.12.4.2 martin
4386 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4387 1.12.4.2 martin token = mcx_cmdq_token(sc);
4388 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in) + insize + paslen, sizeof(*out),
4389 1.12.4.2 martin token);
4390 1.12.4.2 martin
4391 1.12.4.2 martin in = mcx_cmdq_in(cqe);
4392 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_CREATE_SQ);
4393 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
4394 1.12.4.2 martin
4395 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4396 1.12.4.2 martin &cqe->cq_input_ptr, token) != 0) {
4397 1.12.4.2 martin printf("%s: unable to allocate create sq mailboxen\n", DEVNAME(sc));
4398 1.12.4.2 martin error = -1;
4399 1.12.4.2 martin goto free;
4400 1.12.4.2 martin }
4401 1.12.4.2 martin mbin = (struct mcx_sq_ctx *)(((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
4402 1.12.4.2 martin mbin->sq_flags = htobe32(MCX_SQ_CTX_RLKEY |
4403 1.12.4.2 martin (1 << MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT));
4404 1.12.4.2 martin mbin->sq_cqn = htobe32(cqn);
4405 1.12.4.2 martin mbin->sq_tis_lst_sz = htobe32(1 << MCX_SQ_CTX_TIS_LST_SZ_SHIFT);
4406 1.12.4.2 martin mbin->sq_tis_num = htobe32(sc->sc_tisn);
4407 1.12.4.2 martin mbin->sq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
4408 1.12.4.2 martin mbin->sq_wq.wq_pd = htobe32(sc->sc_pd);
4409 1.12.4.2 martin mbin->sq_wq.wq_uar_page = htobe32(sc->sc_uar);
4410 1.12.4.2 martin mbin->sq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4411 1.12.4.2 martin MCX_SQ_DOORBELL_OFFSET);
4412 1.12.4.2 martin mbin->sq_wq.wq_log_stride = htobe16(MCX_LOG_SQ_ENTRY_SIZE);
4413 1.12.4.2 martin mbin->sq_wq.wq_log_size = MCX_LOG_SQ_SIZE;
4414 1.12.4.2 martin
4415 1.12.4.2 martin /* physical addresses follow the mailbox in data */
4416 1.12.4.2 martin pas = (uint64_t *)(mbin + 1);
4417 1.12.4.2 martin for (i = 0; i < npages; i++) {
4418 1.12.4.2 martin pas[i] = htobe64(MCX_DMA_DVA(&sc->sc_sq_mem) +
4419 1.12.4.2 martin (i * MCX_PAGE_SIZE));
4420 1.12.4.2 martin }
4421 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
4422 1.12.4.2 martin
4423 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
4424 1.12.4.2 martin if (error != 0) {
4425 1.12.4.2 martin printf("%s: create sq timeout\n", DEVNAME(sc));
4426 1.12.4.2 martin goto free;
4427 1.12.4.2 martin }
4428 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
4429 1.12.4.2 martin printf("%s: create sq command corrupt\n", DEVNAME(sc));
4430 1.12.4.2 martin goto free;
4431 1.12.4.2 martin }
4432 1.12.4.2 martin
4433 1.12.4.2 martin out = mcx_cmdq_out(cqe);
4434 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
4435 1.12.4.2 martin printf("%s: create sq failed (%x, %x)\n", DEVNAME(sc),
4436 1.12.4.2 martin out->cmd_status, be32toh(out->cmd_syndrome));
4437 1.12.4.2 martin error = -1;
4438 1.12.4.2 martin goto free;
4439 1.12.4.2 martin }
4440 1.12.4.2 martin
4441 1.12.4.2 martin sc->sc_sqn = be32toh(out->cmd_sqn);
4442 1.12.4.2 martin
4443 1.12.4.2 martin doorbell = MCX_DMA_KVA(&sc->sc_doorbell_mem);
4444 1.12.4.2 martin sc->sc_tx_doorbell = (uint32_t *)(doorbell + MCX_SQ_DOORBELL_OFFSET + 4);
4445 1.12.4.2 martin free:
4446 1.12.4.2 martin mcx_dmamem_free(sc, &mxm);
4447 1.12.4.2 martin return (error);
4448 1.12.4.2 martin }
4449 1.12.4.2 martin
4450 1.12.4.2 martin static int
4451 1.12.4.2 martin mcx_destroy_sq(struct mcx_softc *sc)
4452 1.12.4.2 martin {
4453 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
4454 1.12.4.2 martin struct mcx_cmd_destroy_sq_in *in;
4455 1.12.4.2 martin struct mcx_cmd_destroy_sq_out *out;
4456 1.12.4.2 martin int error;
4457 1.12.4.2 martin int token;
4458 1.12.4.2 martin
4459 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4460 1.12.4.2 martin token = mcx_cmdq_token(sc);
4461 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4462 1.12.4.2 martin
4463 1.12.4.2 martin in = mcx_cmdq_in(cqe);
4464 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_DESTROY_SQ);
4465 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
4466 1.12.4.2 martin in->cmd_sqn = htobe32(sc->sc_sqn);
4467 1.12.4.2 martin
4468 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
4469 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
4470 1.12.4.2 martin if (error != 0) {
4471 1.12.4.2 martin printf("%s: destroy sq timeout\n", DEVNAME(sc));
4472 1.12.4.2 martin return error;
4473 1.12.4.2 martin }
4474 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
4475 1.12.4.2 martin printf("%s: destroy sq command corrupt\n", DEVNAME(sc));
4476 1.12.4.2 martin return error;
4477 1.12.4.2 martin }
4478 1.12.4.2 martin
4479 1.12.4.2 martin out = mcx_cmdq_out(cqe);
4480 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
4481 1.12.4.2 martin printf("%s: destroy sq failed (%x, %x)\n", DEVNAME(sc),
4482 1.12.4.2 martin out->cmd_status, be32toh(out->cmd_syndrome));
4483 1.12.4.2 martin return -1;
4484 1.12.4.2 martin }
4485 1.12.4.2 martin
4486 1.12.4.2 martin sc->sc_sqn = 0;
4487 1.12.4.2 martin return 0;
4488 1.12.4.2 martin }
4489 1.12.4.2 martin
4490 1.12.4.2 martin static int
4491 1.12.4.2 martin mcx_ready_sq(struct mcx_softc *sc)
4492 1.12.4.2 martin {
4493 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
4494 1.12.4.2 martin struct mcx_dmamem mxm;
4495 1.12.4.2 martin struct mcx_cmd_modify_sq_in *in;
4496 1.12.4.2 martin struct mcx_cmd_modify_sq_mb_in *mbin;
4497 1.12.4.2 martin struct mcx_cmd_modify_sq_out *out;
4498 1.12.4.2 martin int error;
4499 1.12.4.2 martin int token;
4500 1.12.4.2 martin
4501 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4502 1.12.4.2 martin token = mcx_cmdq_token(sc);
4503 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4504 1.12.4.2 martin
4505 1.12.4.2 martin in = mcx_cmdq_in(cqe);
4506 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_MODIFY_SQ);
4507 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
4508 1.12.4.2 martin in->cmd_sq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | sc->sc_sqn);
4509 1.12.4.2 martin
4510 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4511 1.12.4.2 martin printf("%s: unable to allocate modify sq mailbox\n",
4512 1.12.4.2 martin DEVNAME(sc));
4513 1.12.4.2 martin return (-1);
4514 1.12.4.2 martin }
4515 1.12.4.2 martin mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4516 1.12.4.2 martin mbin->cmd_sq_ctx.sq_flags = htobe32(
4517 1.12.4.2 martin MCX_QUEUE_STATE_RDY << MCX_SQ_CTX_STATE_SHIFT);
4518 1.12.4.2 martin
4519 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, 1);
4520 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
4521 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
4522 1.12.4.2 martin if (error != 0) {
4523 1.12.4.2 martin printf("%s: modify sq timeout\n", DEVNAME(sc));
4524 1.12.4.2 martin goto free;
4525 1.12.4.2 martin }
4526 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
4527 1.12.4.2 martin printf("%s: modify sq command corrupt\n", DEVNAME(sc));
4528 1.12.4.2 martin goto free;
4529 1.12.4.2 martin }
4530 1.12.4.2 martin
4531 1.12.4.2 martin out = mcx_cmdq_out(cqe);
4532 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
4533 1.12.4.2 martin printf("%s: modify sq failed (%x, %x)\n", DEVNAME(sc),
4534 1.12.4.2 martin out->cmd_status, be32toh(out->cmd_syndrome));
4535 1.12.4.2 martin error = -1;
4536 1.12.4.2 martin goto free;
4537 1.12.4.2 martin }
4538 1.12.4.2 martin
4539 1.12.4.2 martin free:
4540 1.12.4.2 martin mcx_dmamem_free(sc, &mxm);
4541 1.12.4.2 martin return (error);
4542 1.12.4.2 martin }
4543 1.12.4.2 martin
4544 1.12.4.2 martin static int
4545 1.12.4.2 martin mcx_create_tis(struct mcx_softc *sc)
4546 1.12.4.2 martin {
4547 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
4548 1.12.4.2 martin struct mcx_dmamem mxm;
4549 1.12.4.2 martin struct mcx_cmd_create_tis_in *in;
4550 1.12.4.2 martin struct mcx_cmd_create_tis_mb_in *mbin;
4551 1.12.4.2 martin struct mcx_cmd_create_tis_out *out;
4552 1.12.4.2 martin int error;
4553 1.12.4.2 martin int token;
4554 1.12.4.2 martin
4555 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4556 1.12.4.2 martin token = mcx_cmdq_token(sc);
4557 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4558 1.12.4.2 martin
4559 1.12.4.2 martin in = mcx_cmdq_in(cqe);
4560 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIS);
4561 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
4562 1.12.4.2 martin
4563 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4564 1.12.4.2 martin printf("%s: unable to allocate create tis mailbox\n", DEVNAME(sc));
4565 1.12.4.2 martin return (-1);
4566 1.12.4.2 martin }
4567 1.12.4.2 martin mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4568 1.12.4.2 martin mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
4569 1.12.4.2 martin
4570 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, 1);
4571 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
4572 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
4573 1.12.4.2 martin if (error != 0) {
4574 1.12.4.2 martin printf("%s: create tis timeout\n", DEVNAME(sc));
4575 1.12.4.2 martin goto free;
4576 1.12.4.2 martin }
4577 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
4578 1.12.4.2 martin printf("%s: create tis command corrupt\n", DEVNAME(sc));
4579 1.12.4.2 martin goto free;
4580 1.12.4.2 martin }
4581 1.12.4.2 martin
4582 1.12.4.2 martin out = mcx_cmdq_out(cqe);
4583 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
4584 1.12.4.2 martin printf("%s: create tis failed (%x, %x)\n", DEVNAME(sc),
4585 1.12.4.2 martin out->cmd_status, be32toh(out->cmd_syndrome));
4586 1.12.4.2 martin error = -1;
4587 1.12.4.2 martin goto free;
4588 1.12.4.2 martin }
4589 1.12.4.2 martin
4590 1.12.4.2 martin sc->sc_tisn = be32toh(out->cmd_tisn);
4591 1.12.4.2 martin free:
4592 1.12.4.2 martin mcx_dmamem_free(sc, &mxm);
4593 1.12.4.2 martin return (error);
4594 1.12.4.2 martin }
4595 1.12.4.2 martin
4596 1.12.4.2 martin static int
4597 1.12.4.2 martin mcx_destroy_tis(struct mcx_softc *sc)
4598 1.12.4.2 martin {
4599 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
4600 1.12.4.2 martin struct mcx_cmd_destroy_tis_in *in;
4601 1.12.4.2 martin struct mcx_cmd_destroy_tis_out *out;
4602 1.12.4.2 martin int error;
4603 1.12.4.2 martin int token;
4604 1.12.4.2 martin
4605 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4606 1.12.4.2 martin token = mcx_cmdq_token(sc);
4607 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4608 1.12.4.2 martin
4609 1.12.4.2 martin in = mcx_cmdq_in(cqe);
4610 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIS);
4611 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
4612 1.12.4.2 martin in->cmd_tisn = htobe32(sc->sc_tisn);
4613 1.12.4.2 martin
4614 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
4615 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
4616 1.12.4.2 martin if (error != 0) {
4617 1.12.4.2 martin printf("%s: destroy tis timeout\n", DEVNAME(sc));
4618 1.12.4.2 martin return error;
4619 1.12.4.2 martin }
4620 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
4621 1.12.4.2 martin printf("%s: destroy tis command corrupt\n", DEVNAME(sc));
4622 1.12.4.2 martin return error;
4623 1.12.4.2 martin }
4624 1.12.4.2 martin
4625 1.12.4.2 martin out = mcx_cmdq_out(cqe);
4626 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
4627 1.12.4.2 martin printf("%s: destroy tis failed (%x, %x)\n", DEVNAME(sc),
4628 1.12.4.2 martin out->cmd_status, be32toh(out->cmd_syndrome));
4629 1.12.4.2 martin return -1;
4630 1.12.4.2 martin }
4631 1.12.4.2 martin
4632 1.12.4.2 martin sc->sc_tirn = 0;
4633 1.12.4.2 martin return 0;
4634 1.12.4.2 martin }
4635 1.12.4.2 martin
4636 1.12.4.2 martin #if 0
4637 1.12.4.2 martin static int
4638 1.12.4.2 martin mcx_alloc_flow_counter(struct mcx_softc *sc, int i)
4639 1.12.4.2 martin {
4640 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
4641 1.12.4.2 martin struct mcx_cmd_alloc_flow_counter_in *in;
4642 1.12.4.2 martin struct mcx_cmd_alloc_flow_counter_out *out;
4643 1.12.4.2 martin int error;
4644 1.12.4.2 martin
4645 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4646 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4647 1.12.4.2 martin
4648 1.12.4.2 martin in = mcx_cmdq_in(cqe);
4649 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_ALLOC_FLOW_COUNTER);
4650 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
4651 1.12.4.2 martin
4652 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
4653 1.12.4.2 martin
4654 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
4655 1.12.4.2 martin if (error != 0) {
4656 1.12.4.2 martin printf("%s: alloc flow counter timeout\n", DEVNAME(sc));
4657 1.12.4.2 martin return (-1);
4658 1.12.4.2 martin }
4659 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
4660 1.12.4.2 martin printf("%s: alloc flow counter command corrupt\n", DEVNAME(sc));
4661 1.12.4.2 martin return (-1);
4662 1.12.4.2 martin }
4663 1.12.4.2 martin
4664 1.12.4.2 martin out = (struct mcx_cmd_alloc_flow_counter_out *)cqe->cq_output_data;
4665 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
4666 1.12.4.2 martin printf("%s: alloc flow counter failed (%x)\n", DEVNAME(sc),
4667 1.12.4.2 martin out->cmd_status);
4668 1.12.4.2 martin return (-1);
4669 1.12.4.2 martin }
4670 1.12.4.2 martin
4671 1.12.4.2 martin sc->sc_flow_counter_id[i] = be16toh(out->cmd_flow_counter_id);
4672 1.12.4.2 martin printf("flow counter id %d = %d\n", i, sc->sc_flow_counter_id[i]);
4673 1.12.4.2 martin
4674 1.12.4.2 martin return (0);
4675 1.12.4.2 martin }
4676 1.12.4.2 martin #endif
4677 1.12.4.2 martin
4678 1.12.4.2 martin static int
4679 1.12.4.2 martin mcx_create_flow_table(struct mcx_softc *sc, int log_size)
4680 1.12.4.2 martin {
4681 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
4682 1.12.4.2 martin struct mcx_dmamem mxm;
4683 1.12.4.2 martin struct mcx_cmd_create_flow_table_in *in;
4684 1.12.4.2 martin struct mcx_cmd_create_flow_table_mb_in *mbin;
4685 1.12.4.2 martin struct mcx_cmd_create_flow_table_out *out;
4686 1.12.4.2 martin int error;
4687 1.12.4.2 martin int token;
4688 1.12.4.2 martin
4689 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4690 1.12.4.2 martin token = mcx_cmdq_token(sc);
4691 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4692 1.12.4.2 martin
4693 1.12.4.2 martin in = mcx_cmdq_in(cqe);
4694 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_TABLE);
4695 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
4696 1.12.4.2 martin
4697 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4698 1.12.4.2 martin printf("%s: unable to allocate create flow table mailbox\n",
4699 1.12.4.2 martin DEVNAME(sc));
4700 1.12.4.2 martin return (-1);
4701 1.12.4.2 martin }
4702 1.12.4.2 martin mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4703 1.12.4.2 martin mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4704 1.12.4.2 martin mbin->cmd_ctx.ft_log_size = log_size;
4705 1.12.4.2 martin
4706 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, 1);
4707 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
4708 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
4709 1.12.4.2 martin if (error != 0) {
4710 1.12.4.2 martin printf("%s: create flow table timeout\n", DEVNAME(sc));
4711 1.12.4.2 martin goto free;
4712 1.12.4.2 martin }
4713 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
4714 1.12.4.2 martin printf("%s: create flow table command corrupt\n", DEVNAME(sc));
4715 1.12.4.2 martin goto free;
4716 1.12.4.2 martin }
4717 1.12.4.2 martin
4718 1.12.4.2 martin out = mcx_cmdq_out(cqe);
4719 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
4720 1.12.4.2 martin printf("%s: create flow table failed (%x, %x)\n", DEVNAME(sc),
4721 1.12.4.2 martin out->cmd_status, be32toh(out->cmd_syndrome));
4722 1.12.4.2 martin error = -1;
4723 1.12.4.2 martin goto free;
4724 1.12.4.2 martin }
4725 1.12.4.2 martin
4726 1.12.4.2 martin sc->sc_flow_table_id = be32toh(out->cmd_table_id);
4727 1.12.4.2 martin free:
4728 1.12.4.2 martin mcx_dmamem_free(sc, &mxm);
4729 1.12.4.2 martin return (error);
4730 1.12.4.2 martin }
4731 1.12.4.2 martin
4732 1.12.4.2 martin static int
4733 1.12.4.2 martin mcx_set_flow_table_root(struct mcx_softc *sc)
4734 1.12.4.2 martin {
4735 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
4736 1.12.4.2 martin struct mcx_dmamem mxm;
4737 1.12.4.2 martin struct mcx_cmd_set_flow_table_root_in *in;
4738 1.12.4.2 martin struct mcx_cmd_set_flow_table_root_mb_in *mbin;
4739 1.12.4.2 martin struct mcx_cmd_set_flow_table_root_out *out;
4740 1.12.4.2 martin int error;
4741 1.12.4.2 martin int token;
4742 1.12.4.2 martin
4743 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4744 1.12.4.2 martin token = mcx_cmdq_token(sc);
4745 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out), token);
4746 1.12.4.2 martin
4747 1.12.4.2 martin in = mcx_cmdq_in(cqe);
4748 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ROOT);
4749 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
4750 1.12.4.2 martin
4751 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4752 1.12.4.2 martin printf("%s: unable to allocate set flow table root mailbox\n",
4753 1.12.4.2 martin DEVNAME(sc));
4754 1.12.4.2 martin return (-1);
4755 1.12.4.2 martin }
4756 1.12.4.2 martin mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4757 1.12.4.2 martin mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4758 1.12.4.2 martin mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
4759 1.12.4.2 martin
4760 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, 1);
4761 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
4762 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
4763 1.12.4.2 martin if (error != 0) {
4764 1.12.4.2 martin printf("%s: set flow table root timeout\n", DEVNAME(sc));
4765 1.12.4.2 martin goto free;
4766 1.12.4.2 martin }
4767 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
4768 1.12.4.2 martin printf("%s: set flow table root command corrupt\n",
4769 1.12.4.2 martin DEVNAME(sc));
4770 1.12.4.2 martin goto free;
4771 1.12.4.2 martin }
4772 1.12.4.2 martin
4773 1.12.4.2 martin out = mcx_cmdq_out(cqe);
4774 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
4775 1.12.4.2 martin printf("%s: set flow table root failed (%x, %x)\n",
4776 1.12.4.2 martin DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
4777 1.12.4.2 martin error = -1;
4778 1.12.4.2 martin goto free;
4779 1.12.4.2 martin }
4780 1.12.4.2 martin
4781 1.12.4.2 martin free:
4782 1.12.4.2 martin mcx_dmamem_free(sc, &mxm);
4783 1.12.4.2 martin return (error);
4784 1.12.4.2 martin }
4785 1.12.4.2 martin
4786 1.12.4.2 martin static int
4787 1.12.4.2 martin mcx_destroy_flow_table(struct mcx_softc *sc)
4788 1.12.4.2 martin {
4789 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
4790 1.12.4.2 martin struct mcx_dmamem mxm;
4791 1.12.4.2 martin struct mcx_cmd_destroy_flow_table_in *in;
4792 1.12.4.2 martin struct mcx_cmd_destroy_flow_table_mb_in *mb;
4793 1.12.4.2 martin struct mcx_cmd_destroy_flow_table_out *out;
4794 1.12.4.2 martin int error;
4795 1.12.4.2 martin int token;
4796 1.12.4.2 martin
4797 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4798 1.12.4.2 martin token = mcx_cmdq_token(sc);
4799 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
4800 1.12.4.2 martin
4801 1.12.4.2 martin in = mcx_cmdq_in(cqe);
4802 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_TABLE);
4803 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
4804 1.12.4.2 martin
4805 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4806 1.12.4.2 martin printf("%s: unable to allocate destroy flow table mailbox\n",
4807 1.12.4.2 martin DEVNAME(sc));
4808 1.12.4.2 martin return (-1);
4809 1.12.4.2 martin }
4810 1.12.4.2 martin mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4811 1.12.4.2 martin mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4812 1.12.4.2 martin mb->cmd_table_id = htobe32(sc->sc_flow_table_id);
4813 1.12.4.2 martin
4814 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, 1);
4815 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
4816 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
4817 1.12.4.2 martin if (error != 0) {
4818 1.12.4.2 martin printf("%s: destroy flow table timeout\n", DEVNAME(sc));
4819 1.12.4.2 martin goto free;
4820 1.12.4.2 martin }
4821 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
4822 1.12.4.2 martin printf("%s: destroy flow table command corrupt\n", DEVNAME(sc));
4823 1.12.4.2 martin goto free;
4824 1.12.4.2 martin }
4825 1.12.4.2 martin
4826 1.12.4.2 martin out = mcx_cmdq_out(cqe);
4827 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
4828 1.12.4.2 martin printf("%s: destroy flow table failed (%x, %x)\n", DEVNAME(sc),
4829 1.12.4.2 martin out->cmd_status, be32toh(out->cmd_syndrome));
4830 1.12.4.2 martin error = -1;
4831 1.12.4.2 martin goto free;
4832 1.12.4.2 martin }
4833 1.12.4.2 martin
4834 1.12.4.2 martin sc->sc_flow_table_id = -1;
4835 1.12.4.2 martin free:
4836 1.12.4.2 martin mcx_dmamem_free(sc, &mxm);
4837 1.12.4.2 martin return (error);
4838 1.12.4.2 martin }
4839 1.12.4.2 martin
4840 1.12.4.2 martin
4841 1.12.4.2 martin static int
4842 1.12.4.2 martin mcx_create_flow_group(struct mcx_softc *sc, int group, int start, int size,
4843 1.12.4.2 martin int match_enable, struct mcx_flow_match *match)
4844 1.12.4.2 martin {
4845 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
4846 1.12.4.2 martin struct mcx_dmamem mxm;
4847 1.12.4.2 martin struct mcx_cmd_create_flow_group_in *in;
4848 1.12.4.2 martin struct mcx_cmd_create_flow_group_mb_in *mbin;
4849 1.12.4.2 martin struct mcx_cmd_create_flow_group_out *out;
4850 1.12.4.2 martin int error;
4851 1.12.4.2 martin int token;
4852 1.12.4.2 martin
4853 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4854 1.12.4.2 martin token = mcx_cmdq_token(sc);
4855 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
4856 1.12.4.2 martin token);
4857 1.12.4.2 martin
4858 1.12.4.2 martin in = mcx_cmdq_in(cqe);
4859 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_GROUP);
4860 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
4861 1.12.4.2 martin
4862 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
4863 1.12.4.2 martin != 0) {
4864 1.12.4.2 martin printf("%s: unable to allocate create flow group mailbox\n",
4865 1.12.4.2 martin DEVNAME(sc));
4866 1.12.4.2 martin return (-1);
4867 1.12.4.2 martin }
4868 1.12.4.2 martin mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4869 1.12.4.2 martin mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4870 1.12.4.2 martin mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
4871 1.12.4.2 martin mbin->cmd_start_flow_index = htobe32(start);
4872 1.12.4.2 martin mbin->cmd_end_flow_index = htobe32(start + (size - 1));
4873 1.12.4.2 martin
4874 1.12.4.2 martin mbin->cmd_match_criteria_enable = match_enable;
4875 1.12.4.2 martin memcpy(&mbin->cmd_match_criteria, match, sizeof(*match));
4876 1.12.4.2 martin
4877 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, 2);
4878 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
4879 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
4880 1.12.4.2 martin if (error != 0) {
4881 1.12.4.2 martin printf("%s: create flow group timeout\n", DEVNAME(sc));
4882 1.12.4.2 martin goto free;
4883 1.12.4.2 martin }
4884 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
4885 1.12.4.2 martin printf("%s: create flow group command corrupt\n", DEVNAME(sc));
4886 1.12.4.2 martin goto free;
4887 1.12.4.2 martin }
4888 1.12.4.2 martin
4889 1.12.4.2 martin out = mcx_cmdq_out(cqe);
4890 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
4891 1.12.4.2 martin printf("%s: create flow group failed (%x, %x)\n", DEVNAME(sc),
4892 1.12.4.2 martin out->cmd_status, be32toh(out->cmd_syndrome));
4893 1.12.4.2 martin error = -1;
4894 1.12.4.2 martin goto free;
4895 1.12.4.2 martin }
4896 1.12.4.2 martin
4897 1.12.4.2 martin sc->sc_flow_group_id[group] = be32toh(out->cmd_group_id);
4898 1.12.4.2 martin sc->sc_flow_group_size[group] = size;
4899 1.12.4.2 martin sc->sc_flow_group_start[group] = start;
4900 1.12.4.2 martin
4901 1.12.4.2 martin free:
4902 1.12.4.2 martin mcx_dmamem_free(sc, &mxm);
4903 1.12.4.2 martin return (error);
4904 1.12.4.2 martin }
4905 1.12.4.2 martin
4906 1.12.4.2 martin static int
4907 1.12.4.2 martin mcx_destroy_flow_group(struct mcx_softc *sc, int group)
4908 1.12.4.2 martin {
4909 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
4910 1.12.4.2 martin struct mcx_dmamem mxm;
4911 1.12.4.2 martin struct mcx_cmd_destroy_flow_group_in *in;
4912 1.12.4.2 martin struct mcx_cmd_destroy_flow_group_mb_in *mb;
4913 1.12.4.2 martin struct mcx_cmd_destroy_flow_group_out *out;
4914 1.12.4.2 martin int error;
4915 1.12.4.2 martin int token;
4916 1.12.4.2 martin
4917 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4918 1.12.4.2 martin token = mcx_cmdq_token(sc);
4919 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
4920 1.12.4.2 martin
4921 1.12.4.2 martin in = mcx_cmdq_in(cqe);
4922 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_GROUP);
4923 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
4924 1.12.4.2 martin
4925 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token) != 0) {
4926 1.12.4.2 martin printf("%s: unable to allocate destroy flow group mailbox\n",
4927 1.12.4.2 martin DEVNAME(sc));
4928 1.12.4.2 martin return (-1);
4929 1.12.4.2 martin }
4930 1.12.4.2 martin mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4931 1.12.4.2 martin mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4932 1.12.4.2 martin mb->cmd_table_id = htobe32(sc->sc_flow_table_id);
4933 1.12.4.2 martin mb->cmd_group_id = htobe32(sc->sc_flow_group_id[group]);
4934 1.12.4.2 martin
4935 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, 2);
4936 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
4937 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
4938 1.12.4.2 martin if (error != 0) {
4939 1.12.4.2 martin printf("%s: destroy flow group timeout\n", DEVNAME(sc));
4940 1.12.4.2 martin goto free;
4941 1.12.4.2 martin }
4942 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
4943 1.12.4.2 martin printf("%s: destroy flow group command corrupt\n", DEVNAME(sc));
4944 1.12.4.2 martin goto free;
4945 1.12.4.2 martin }
4946 1.12.4.2 martin
4947 1.12.4.2 martin out = mcx_cmdq_out(cqe);
4948 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
4949 1.12.4.2 martin printf("%s: destroy flow group failed (%x, %x)\n", DEVNAME(sc),
4950 1.12.4.2 martin out->cmd_status, be32toh(out->cmd_syndrome));
4951 1.12.4.2 martin error = -1;
4952 1.12.4.2 martin goto free;
4953 1.12.4.2 martin }
4954 1.12.4.2 martin
4955 1.12.4.2 martin sc->sc_flow_group_id[group] = -1;
4956 1.12.4.2 martin sc->sc_flow_group_size[group] = 0;
4957 1.12.4.2 martin free:
4958 1.12.4.2 martin mcx_dmamem_free(sc, &mxm);
4959 1.12.4.2 martin return (error);
4960 1.12.4.2 martin }
4961 1.12.4.2 martin
4962 1.12.4.2 martin static int
4963 1.12.4.2 martin mcx_set_flow_table_entry(struct mcx_softc *sc, int group, int index,
4964 1.12.4.2 martin const uint8_t *macaddr)
4965 1.12.4.2 martin {
4966 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
4967 1.12.4.2 martin struct mcx_dmamem mxm;
4968 1.12.4.2 martin struct mcx_cmd_set_flow_table_entry_in *in;
4969 1.12.4.2 martin struct mcx_cmd_set_flow_table_entry_mb_in *mbin;
4970 1.12.4.2 martin struct mcx_cmd_set_flow_table_entry_out *out;
4971 1.12.4.2 martin uint32_t *dest;
4972 1.12.4.2 martin int error;
4973 1.12.4.2 martin int token;
4974 1.12.4.2 martin
4975 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4976 1.12.4.2 martin token = mcx_cmdq_token(sc);
4977 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*dest),
4978 1.12.4.2 martin sizeof(*out), token);
4979 1.12.4.2 martin
4980 1.12.4.2 martin in = mcx_cmdq_in(cqe);
4981 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY);
4982 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
4983 1.12.4.2 martin
4984 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
4985 1.12.4.2 martin != 0) {
4986 1.12.4.2 martin printf("%s: unable to allocate set flow table entry mailbox\n",
4987 1.12.4.2 martin DEVNAME(sc));
4988 1.12.4.2 martin return (-1);
4989 1.12.4.2 martin }
4990 1.12.4.2 martin mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4991 1.12.4.2 martin mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
4992 1.12.4.2 martin mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
4993 1.12.4.2 martin mbin->cmd_flow_index = htobe32(sc->sc_flow_group_start[group] + index);
4994 1.12.4.2 martin mbin->cmd_flow_ctx.fc_group_id = htobe32(sc->sc_flow_group_id[group]);
4995 1.12.4.2 martin
4996 1.12.4.2 martin /* flow context ends at offset 0x330, 0x130 into the second mbox */
4997 1.12.4.2 martin dest = (uint32_t *)
4998 1.12.4.2 martin (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130);
4999 1.12.4.2 martin mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD);
5000 1.12.4.2 martin mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1);
5001 1.12.4.2 martin *dest = htobe32(sc->sc_tirn | MCX_FLOW_CONTEXT_DEST_TYPE_TIR);
5002 1.12.4.2 martin
5003 1.12.4.2 martin /* the only thing we match on at the moment is the dest mac address */
5004 1.12.4.2 martin if (macaddr != NULL) {
5005 1.12.4.2 martin memcpy(mbin->cmd_flow_ctx.fc_match_value.mc_dest_mac, macaddr,
5006 1.12.4.2 martin ETHER_ADDR_LEN);
5007 1.12.4.2 martin }
5008 1.12.4.2 martin
5009 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, 2);
5010 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
5011 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
5012 1.12.4.2 martin if (error != 0) {
5013 1.12.4.2 martin printf("%s: set flow table entry timeout\n", DEVNAME(sc));
5014 1.12.4.2 martin goto free;
5015 1.12.4.2 martin }
5016 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
5017 1.12.4.2 martin printf("%s: set flow table entry command corrupt\n",
5018 1.12.4.2 martin DEVNAME(sc));
5019 1.12.4.2 martin goto free;
5020 1.12.4.2 martin }
5021 1.12.4.2 martin
5022 1.12.4.2 martin out = mcx_cmdq_out(cqe);
5023 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
5024 1.12.4.2 martin printf("%s: set flow table entry failed (%x, %x)\n",
5025 1.12.4.2 martin DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
5026 1.12.4.2 martin error = -1;
5027 1.12.4.2 martin goto free;
5028 1.12.4.2 martin }
5029 1.12.4.2 martin
5030 1.12.4.2 martin free:
5031 1.12.4.2 martin mcx_dmamem_free(sc, &mxm);
5032 1.12.4.2 martin return (error);
5033 1.12.4.2 martin }
5034 1.12.4.2 martin
5035 1.12.4.2 martin static int
5036 1.12.4.2 martin mcx_delete_flow_table_entry(struct mcx_softc *sc, int group, int index)
5037 1.12.4.2 martin {
5038 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
5039 1.12.4.2 martin struct mcx_dmamem mxm;
5040 1.12.4.2 martin struct mcx_cmd_delete_flow_table_entry_in *in;
5041 1.12.4.2 martin struct mcx_cmd_delete_flow_table_entry_mb_in *mbin;
5042 1.12.4.2 martin struct mcx_cmd_delete_flow_table_entry_out *out;
5043 1.12.4.2 martin int error;
5044 1.12.4.2 martin int token;
5045 1.12.4.2 martin
5046 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5047 1.12.4.2 martin token = mcx_cmdq_token(sc);
5048 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
5049 1.12.4.2 martin token);
5050 1.12.4.2 martin
5051 1.12.4.2 martin in = mcx_cmdq_in(cqe);
5052 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_DELETE_FLOW_TABLE_ENTRY);
5053 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
5054 1.12.4.2 martin
5055 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token) != 0) {
5056 1.12.4.2 martin printf("%s: unable to allocate delete flow table entry mailbox\n",
5057 1.12.4.2 martin DEVNAME(sc));
5058 1.12.4.2 martin return (-1);
5059 1.12.4.2 martin }
5060 1.12.4.2 martin mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5061 1.12.4.2 martin mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5062 1.12.4.2 martin mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5063 1.12.4.2 martin mbin->cmd_flow_index = htobe32(sc->sc_flow_group_start[group] + index);
5064 1.12.4.2 martin
5065 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, 2);
5066 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
5067 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
5068 1.12.4.2 martin if (error != 0) {
5069 1.12.4.2 martin printf("%s: delete flow table entry timeout\n", DEVNAME(sc));
5070 1.12.4.2 martin goto free;
5071 1.12.4.2 martin }
5072 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
5073 1.12.4.2 martin printf("%s: delete flow table entry command corrupt\n",
5074 1.12.4.2 martin DEVNAME(sc));
5075 1.12.4.2 martin goto free;
5076 1.12.4.2 martin }
5077 1.12.4.2 martin
5078 1.12.4.2 martin out = mcx_cmdq_out(cqe);
5079 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
5080 1.12.4.2 martin printf("%s: delete flow table entry %d:%d failed (%x, %x)\n",
5081 1.12.4.2 martin DEVNAME(sc), group, index, out->cmd_status,
5082 1.12.4.2 martin be32toh(out->cmd_syndrome));
5083 1.12.4.2 martin error = -1;
5084 1.12.4.2 martin goto free;
5085 1.12.4.2 martin }
5086 1.12.4.2 martin
5087 1.12.4.2 martin free:
5088 1.12.4.2 martin mcx_dmamem_free(sc, &mxm);
5089 1.12.4.2 martin return (error);
5090 1.12.4.2 martin }
5091 1.12.4.2 martin
5092 1.12.4.2 martin #if 0
5093 1.12.4.2 martin int
5094 1.12.4.2 martin mcx_dump_flow_table(struct mcx_softc *sc)
5095 1.12.4.2 martin {
5096 1.12.4.2 martin struct mcx_dmamem mxm;
5097 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
5098 1.12.4.2 martin struct mcx_cmd_query_flow_table_in *in;
5099 1.12.4.2 martin struct mcx_cmd_query_flow_table_mb_in *mbin;
5100 1.12.4.2 martin struct mcx_cmd_query_flow_table_out *out;
5101 1.12.4.2 martin struct mcx_cmd_query_flow_table_mb_out *mbout;
5102 1.12.4.2 martin uint8_t token = mcx_cmdq_token(sc);
5103 1.12.4.2 martin int error;
5104 1.12.4.2 martin int i;
5105 1.12.4.2 martin uint8_t *dump;
5106 1.12.4.2 martin
5107 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5108 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5109 1.12.4.2 martin sizeof(*out) + sizeof(*mbout) + 16, token);
5110 1.12.4.2 martin
5111 1.12.4.2 martin in = mcx_cmdq_in(cqe);
5112 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE);
5113 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
5114 1.12.4.2 martin
5115 1.12.4.2 martin CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
5116 1.12.4.2 martin CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE);
5117 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5118 1.12.4.2 martin &cqe->cq_output_ptr, token) != 0) {
5119 1.12.4.2 martin printf(", unable to allocate query flow table mailboxes\n");
5120 1.12.4.2 martin return (-1);
5121 1.12.4.2 martin }
5122 1.12.4.2 martin cqe->cq_input_ptr = cqe->cq_output_ptr;
5123 1.12.4.2 martin
5124 1.12.4.2 martin mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5125 1.12.4.2 martin mbin->cmd_table_type = 0;
5126 1.12.4.2 martin mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5127 1.12.4.2 martin
5128 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, 1);
5129 1.12.4.2 martin
5130 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
5131 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
5132 1.12.4.2 martin if (error != 0) {
5133 1.12.4.2 martin printf("%s: query flow table timeout\n", DEVNAME(sc));
5134 1.12.4.2 martin goto free;
5135 1.12.4.2 martin }
5136 1.12.4.2 martin error = mcx_cmdq_verify(cqe);
5137 1.12.4.2 martin if (error != 0) {
5138 1.12.4.2 martin printf("%s: query flow table reply corrupt\n", DEVNAME(sc));
5139 1.12.4.2 martin goto free;
5140 1.12.4.2 martin }
5141 1.12.4.2 martin
5142 1.12.4.2 martin out = mcx_cmdq_out(cqe);
5143 1.12.4.2 martin switch (out->cmd_status) {
5144 1.12.4.2 martin case MCX_CQ_STATUS_OK:
5145 1.12.4.2 martin break;
5146 1.12.4.2 martin default:
5147 1.12.4.2 martin printf("%s: query flow table failed (%x/%x)\n", DEVNAME(sc),
5148 1.12.4.2 martin out->cmd_status, be32toh(out->cmd_syndrome));
5149 1.12.4.2 martin error = -1;
5150 1.12.4.2 martin goto free;
5151 1.12.4.2 martin }
5152 1.12.4.2 martin
5153 1.12.4.2 martin mbout = (struct mcx_cmd_query_flow_table_mb_out *)
5154 1.12.4.2 martin (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5155 1.12.4.2 martin dump = (uint8_t *)mbout + 8;
5156 1.12.4.2 martin for (i = 0; i < sizeof(struct mcx_flow_table_ctx); i++) {
5157 1.12.4.2 martin printf("%.2x ", dump[i]);
5158 1.12.4.2 martin if (i % 16 == 15)
5159 1.12.4.2 martin printf("\n");
5160 1.12.4.2 martin }
5161 1.12.4.2 martin free:
5162 1.12.4.2 martin mcx_cq_mboxes_free(sc, &mxm);
5163 1.12.4.2 martin return (error);
5164 1.12.4.2 martin }
5165 1.12.4.2 martin int
5166 1.12.4.2 martin mcx_dump_flow_table_entry(struct mcx_softc *sc, int index)
5167 1.12.4.2 martin {
5168 1.12.4.2 martin struct mcx_dmamem mxm;
5169 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
5170 1.12.4.2 martin struct mcx_cmd_query_flow_table_entry_in *in;
5171 1.12.4.2 martin struct mcx_cmd_query_flow_table_entry_mb_in *mbin;
5172 1.12.4.2 martin struct mcx_cmd_query_flow_table_entry_out *out;
5173 1.12.4.2 martin struct mcx_cmd_query_flow_table_entry_mb_out *mbout;
5174 1.12.4.2 martin uint8_t token = mcx_cmdq_token(sc);
5175 1.12.4.2 martin int error;
5176 1.12.4.2 martin int i;
5177 1.12.4.2 martin uint8_t *dump;
5178 1.12.4.2 martin
5179 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5180 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5181 1.12.4.2 martin sizeof(*out) + sizeof(*mbout) + 16, token);
5182 1.12.4.2 martin
5183 1.12.4.2 martin in = mcx_cmdq_in(cqe);
5184 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE_ENTRY);
5185 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
5186 1.12.4.2 martin
5187 1.12.4.2 martin CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
5188 1.12.4.2 martin CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5189 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5190 1.12.4.2 martin &cqe->cq_output_ptr, token) != 0) {
5191 1.12.4.2 martin printf(", unable to allocate query flow table entry mailboxes\n");
5192 1.12.4.2 martin return (-1);
5193 1.12.4.2 martin }
5194 1.12.4.2 martin cqe->cq_input_ptr = cqe->cq_output_ptr;
5195 1.12.4.2 martin
5196 1.12.4.2 martin mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5197 1.12.4.2 martin mbin->cmd_table_type = 0;
5198 1.12.4.2 martin mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5199 1.12.4.2 martin mbin->cmd_flow_index = htobe32(index);
5200 1.12.4.2 martin
5201 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, 1);
5202 1.12.4.2 martin
5203 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
5204 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
5205 1.12.4.2 martin if (error != 0) {
5206 1.12.4.2 martin printf("%s: query flow table entry timeout\n", DEVNAME(sc));
5207 1.12.4.2 martin goto free;
5208 1.12.4.2 martin }
5209 1.12.4.2 martin error = mcx_cmdq_verify(cqe);
5210 1.12.4.2 martin if (error != 0) {
5211 1.12.4.2 martin printf("%s: query flow table entry reply corrupt\n",
5212 1.12.4.2 martin DEVNAME(sc));
5213 1.12.4.2 martin goto free;
5214 1.12.4.2 martin }
5215 1.12.4.2 martin
5216 1.12.4.2 martin out = mcx_cmdq_out(cqe);
5217 1.12.4.2 martin switch (out->cmd_status) {
5218 1.12.4.2 martin case MCX_CQ_STATUS_OK:
5219 1.12.4.2 martin break;
5220 1.12.4.2 martin default:
5221 1.12.4.2 martin printf("%s: query flow table entry failed (%x/%x)\n",
5222 1.12.4.2 martin DEVNAME(sc), out->cmd_status, be32toh(out->cmd_syndrome));
5223 1.12.4.2 martin error = -1;
5224 1.12.4.2 martin goto free;
5225 1.12.4.2 martin }
5226 1.12.4.2 martin
5227 1.12.4.2 martin mbout = (struct mcx_cmd_query_flow_table_entry_mb_out *)
5228 1.12.4.2 martin (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5229 1.12.4.2 martin dump = (uint8_t *)mbout;
5230 1.12.4.2 martin for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5231 1.12.4.2 martin printf("%.2x ", dump[i]);
5232 1.12.4.2 martin if (i % 16 == 15)
5233 1.12.4.2 martin printf("\n");
5234 1.12.4.2 martin }
5235 1.12.4.2 martin
5236 1.12.4.2 martin free:
5237 1.12.4.2 martin mcx_cq_mboxes_free(sc, &mxm);
5238 1.12.4.2 martin return (error);
5239 1.12.4.2 martin }
5240 1.12.4.2 martin
5241 1.12.4.2 martin int
5242 1.12.4.2 martin mcx_dump_flow_group(struct mcx_softc *sc)
5243 1.12.4.2 martin {
5244 1.12.4.2 martin struct mcx_dmamem mxm;
5245 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
5246 1.12.4.2 martin struct mcx_cmd_query_flow_group_in *in;
5247 1.12.4.2 martin struct mcx_cmd_query_flow_group_mb_in *mbin;
5248 1.12.4.2 martin struct mcx_cmd_query_flow_group_out *out;
5249 1.12.4.2 martin struct mcx_cmd_query_flow_group_mb_out *mbout;
5250 1.12.4.2 martin uint8_t token = mcx_cmdq_token(sc);
5251 1.12.4.2 martin int error;
5252 1.12.4.2 martin int i;
5253 1.12.4.2 martin uint8_t *dump;
5254 1.12.4.2 martin
5255 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5256 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5257 1.12.4.2 martin sizeof(*out) + sizeof(*mbout) + 16, token);
5258 1.12.4.2 martin
5259 1.12.4.2 martin in = mcx_cmdq_in(cqe);
5260 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_GROUP);
5261 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
5262 1.12.4.2 martin
5263 1.12.4.2 martin CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
5264 1.12.4.2 martin CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5265 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5266 1.12.4.2 martin &cqe->cq_output_ptr, token) != 0) {
5267 1.12.4.2 martin printf(", unable to allocate query flow group mailboxes\n");
5268 1.12.4.2 martin return (-1);
5269 1.12.4.2 martin }
5270 1.12.4.2 martin cqe->cq_input_ptr = cqe->cq_output_ptr;
5271 1.12.4.2 martin
5272 1.12.4.2 martin mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5273 1.12.4.2 martin mbin->cmd_table_type = 0;
5274 1.12.4.2 martin mbin->cmd_table_id = htobe32(sc->sc_flow_table_id);
5275 1.12.4.2 martin mbin->cmd_group_id = htobe32(sc->sc_flow_group_id);
5276 1.12.4.2 martin
5277 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, 1);
5278 1.12.4.2 martin
5279 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
5280 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
5281 1.12.4.2 martin if (error != 0) {
5282 1.12.4.2 martin printf("%s: query flow group timeout\n", DEVNAME(sc));
5283 1.12.4.2 martin goto free;
5284 1.12.4.2 martin }
5285 1.12.4.2 martin error = mcx_cmdq_verify(cqe);
5286 1.12.4.2 martin if (error != 0) {
5287 1.12.4.2 martin printf("%s: query flow group reply corrupt\n", DEVNAME(sc));
5288 1.12.4.2 martin goto free;
5289 1.12.4.2 martin }
5290 1.12.4.2 martin
5291 1.12.4.2 martin out = mcx_cmdq_out(cqe);
5292 1.12.4.2 martin switch (out->cmd_status) {
5293 1.12.4.2 martin case MCX_CQ_STATUS_OK:
5294 1.12.4.2 martin break;
5295 1.12.4.2 martin default:
5296 1.12.4.2 martin printf("%s: query flow group failed (%x/%x)\n", DEVNAME(sc),
5297 1.12.4.2 martin out->cmd_status, be32toh(out->cmd_syndrome));
5298 1.12.4.2 martin error = -1;
5299 1.12.4.2 martin goto free;
5300 1.12.4.2 martin }
5301 1.12.4.2 martin
5302 1.12.4.2 martin mbout = (struct mcx_cmd_query_flow_group_mb_out *)
5303 1.12.4.2 martin (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5304 1.12.4.2 martin dump = (uint8_t *)mbout;
5305 1.12.4.2 martin for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5306 1.12.4.2 martin printf("%.2x ", dump[i]);
5307 1.12.4.2 martin if (i % 16 == 15)
5308 1.12.4.2 martin printf("\n");
5309 1.12.4.2 martin }
5310 1.12.4.2 martin dump = (uint8_t *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1)));
5311 1.12.4.2 martin for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5312 1.12.4.2 martin printf("%.2x ", dump[i]);
5313 1.12.4.2 martin if (i % 16 == 15)
5314 1.12.4.2 martin printf("\n");
5315 1.12.4.2 martin }
5316 1.12.4.2 martin
5317 1.12.4.2 martin free:
5318 1.12.4.2 martin mcx_cq_mboxes_free(sc, &mxm);
5319 1.12.4.2 martin return (error);
5320 1.12.4.2 martin }
5321 1.12.4.2 martin
5322 1.12.4.2 martin int
5323 1.12.4.2 martin mcx_dump_rq(struct mcx_softc *sc)
5324 1.12.4.2 martin {
5325 1.12.4.2 martin struct mcx_dmamem mxm;
5326 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
5327 1.12.4.2 martin struct mcx_cmd_query_rq_in *in;
5328 1.12.4.2 martin struct mcx_cmd_query_rq_out *out;
5329 1.12.4.2 martin struct mcx_cmd_query_rq_mb_out *mbout;
5330 1.12.4.2 martin uint8_t token = mcx_cmdq_token(sc);
5331 1.12.4.2 martin int error;
5332 1.12.4.2 martin
5333 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5334 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
5335 1.12.4.2 martin token);
5336 1.12.4.2 martin
5337 1.12.4.2 martin in = mcx_cmdq_in(cqe);
5338 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_QUERY_RQ);
5339 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
5340 1.12.4.2 martin in->cmd_rqn = htobe32(sc->sc_rqn);
5341 1.12.4.2 martin
5342 1.12.4.2 martin CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5343 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5344 1.12.4.2 martin &cqe->cq_output_ptr, token) != 0) {
5345 1.12.4.2 martin printf(", unable to allocate query flow group mailboxes\n");
5346 1.12.4.2 martin return (-1);
5347 1.12.4.2 martin }
5348 1.12.4.2 martin
5349 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, 1);
5350 1.12.4.2 martin
5351 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
5352 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
5353 1.12.4.2 martin if (error != 0) {
5354 1.12.4.2 martin printf("%s: query rq timeout\n", DEVNAME(sc));
5355 1.12.4.2 martin goto free;
5356 1.12.4.2 martin }
5357 1.12.4.2 martin error = mcx_cmdq_verify(cqe);
5358 1.12.4.2 martin if (error != 0) {
5359 1.12.4.2 martin printf("%s: query rq reply corrupt\n", DEVNAME(sc));
5360 1.12.4.2 martin goto free;
5361 1.12.4.2 martin }
5362 1.12.4.2 martin
5363 1.12.4.2 martin out = mcx_cmdq_out(cqe);
5364 1.12.4.2 martin switch (out->cmd_status) {
5365 1.12.4.2 martin case MCX_CQ_STATUS_OK:
5366 1.12.4.2 martin break;
5367 1.12.4.2 martin default:
5368 1.12.4.2 martin printf("%s: query rq failed (%x/%x)\n", DEVNAME(sc),
5369 1.12.4.2 martin out->cmd_status, be32toh(out->cmd_syndrome));
5370 1.12.4.2 martin error = -1;
5371 1.12.4.2 martin goto free;
5372 1.12.4.2 martin }
5373 1.12.4.2 martin
5374 1.12.4.2 martin mbout = (struct mcx_cmd_query_rq_mb_out *)
5375 1.12.4.2 martin (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5376 1.12.4.2 martin printf("%s: rq: state %d, ui %d, cqn %d, s/s %d/%d/%d, hw %d, sw %d\n",
5377 1.12.4.2 martin DEVNAME(sc),
5378 1.12.4.2 martin (be32toh(mbout->cmd_ctx.rq_flags) >> MCX_RQ_CTX_STATE_SHIFT) & 0x0f,
5379 1.12.4.2 martin be32toh(mbout->cmd_ctx.rq_user_index),
5380 1.12.4.2 martin be32toh(mbout->cmd_ctx.rq_cqn),
5381 1.12.4.2 martin be16toh(mbout->cmd_ctx.rq_wq.wq_log_stride),
5382 1.12.4.2 martin mbout->cmd_ctx.rq_wq.wq_log_page_sz,
5383 1.12.4.2 martin mbout->cmd_ctx.rq_wq.wq_log_size,
5384 1.12.4.2 martin be32toh(mbout->cmd_ctx.rq_wq.wq_hw_counter),
5385 1.12.4.2 martin be32toh(mbout->cmd_ctx.rq_wq.wq_sw_counter));
5386 1.12.4.2 martin
5387 1.12.4.2 martin free:
5388 1.12.4.2 martin mcx_cq_mboxes_free(sc, &mxm);
5389 1.12.4.2 martin return (error);
5390 1.12.4.2 martin }
5391 1.12.4.2 martin
5392 1.12.4.2 martin int
5393 1.12.4.2 martin mcx_dump_sq(struct mcx_softc *sc)
5394 1.12.4.2 martin {
5395 1.12.4.2 martin struct mcx_dmamem mxm;
5396 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
5397 1.12.4.2 martin struct mcx_cmd_query_sq_in *in;
5398 1.12.4.2 martin struct mcx_cmd_query_sq_out *out;
5399 1.12.4.2 martin struct mcx_cmd_query_sq_mb_out *mbout;
5400 1.12.4.2 martin uint8_t token = mcx_cmdq_token(sc);
5401 1.12.4.2 martin int error;
5402 1.12.4.2 martin int i;
5403 1.12.4.2 martin uint8_t *dump;
5404 1.12.4.2 martin
5405 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5406 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
5407 1.12.4.2 martin token);
5408 1.12.4.2 martin
5409 1.12.4.2 martin in = mcx_cmdq_in(cqe);
5410 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_QUERY_SQ);
5411 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
5412 1.12.4.2 martin in->cmd_sqn = htobe32(sc->sc_sqn);
5413 1.12.4.2 martin
5414 1.12.4.2 martin CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
5415 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5416 1.12.4.2 martin &cqe->cq_output_ptr, token) != 0) {
5417 1.12.4.2 martin printf(", unable to allocate query sq mailboxes\n");
5418 1.12.4.2 martin return (-1);
5419 1.12.4.2 martin }
5420 1.12.4.2 martin
5421 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, 1);
5422 1.12.4.2 martin
5423 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
5424 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
5425 1.12.4.2 martin if (error != 0) {
5426 1.12.4.2 martin printf("%s: query sq timeout\n", DEVNAME(sc));
5427 1.12.4.2 martin goto free;
5428 1.12.4.2 martin }
5429 1.12.4.2 martin error = mcx_cmdq_verify(cqe);
5430 1.12.4.2 martin if (error != 0) {
5431 1.12.4.2 martin printf("%s: query sq reply corrupt\n", DEVNAME(sc));
5432 1.12.4.2 martin goto free;
5433 1.12.4.2 martin }
5434 1.12.4.2 martin
5435 1.12.4.2 martin out = mcx_cmdq_out(cqe);
5436 1.12.4.2 martin switch (out->cmd_status) {
5437 1.12.4.2 martin case MCX_CQ_STATUS_OK:
5438 1.12.4.2 martin break;
5439 1.12.4.2 martin default:
5440 1.12.4.2 martin printf("%s: query sq failed (%x/%x)\n", DEVNAME(sc),
5441 1.12.4.2 martin out->cmd_status, be32toh(out->cmd_syndrome));
5442 1.12.4.2 martin error = -1;
5443 1.12.4.2 martin goto free;
5444 1.12.4.2 martin }
5445 1.12.4.2 martin
5446 1.12.4.2 martin mbout = (struct mcx_cmd_query_sq_mb_out *)
5447 1.12.4.2 martin (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5448 1.12.4.2 martin /*
5449 1.12.4.2 martin printf("%s: rq: state %d, ui %d, cqn %d, s/s %d/%d/%d, hw %d, sw %d\n",
5450 1.12.4.2 martin DEVNAME(sc),
5451 1.12.4.2 martin (be32toh(mbout->cmd_ctx.rq_flags) >> MCX_RQ_CTX_STATE_SHIFT) & 0x0f,
5452 1.12.4.2 martin be32toh(mbout->cmd_ctx.rq_user_index),
5453 1.12.4.2 martin be32toh(mbout->cmd_ctx.rq_cqn),
5454 1.12.4.2 martin be16toh(mbout->cmd_ctx.rq_wq.wq_log_stride),
5455 1.12.4.2 martin mbout->cmd_ctx.rq_wq.wq_log_page_sz,
5456 1.12.4.2 martin mbout->cmd_ctx.rq_wq.wq_log_size,
5457 1.12.4.2 martin be32toh(mbout->cmd_ctx.rq_wq.wq_hw_counter),
5458 1.12.4.2 martin be32toh(mbout->cmd_ctx.rq_wq.wq_sw_counter));
5459 1.12.4.2 martin */
5460 1.12.4.2 martin dump = (uint8_t *)mbout;
5461 1.12.4.2 martin for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
5462 1.12.4.2 martin printf("%.2x ", dump[i]);
5463 1.12.4.2 martin if (i % 16 == 15)
5464 1.12.4.2 martin printf("\n");
5465 1.12.4.2 martin }
5466 1.12.4.2 martin
5467 1.12.4.2 martin free:
5468 1.12.4.2 martin mcx_cq_mboxes_free(sc, &mxm);
5469 1.12.4.2 martin return (error);
5470 1.12.4.2 martin }
5471 1.12.4.2 martin
5472 1.12.4.2 martin static int
5473 1.12.4.2 martin mcx_dump_counters(struct mcx_softc *sc)
5474 1.12.4.2 martin {
5475 1.12.4.2 martin struct mcx_dmamem mxm;
5476 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
5477 1.12.4.2 martin struct mcx_cmd_query_vport_counters_in *in;
5478 1.12.4.2 martin struct mcx_cmd_query_vport_counters_mb_in *mbin;
5479 1.12.4.2 martin struct mcx_cmd_query_vport_counters_out *out;
5480 1.12.4.2 martin struct mcx_nic_vport_counters *counters;
5481 1.12.4.2 martin int error, token;
5482 1.12.4.2 martin
5483 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5484 1.12.4.2 martin token = mcx_cmdq_token(sc);
5485 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5486 1.12.4.2 martin sizeof(*out) + sizeof(*counters), token);
5487 1.12.4.2 martin
5488 1.12.4.2 martin in = mcx_cmdq_in(cqe);
5489 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_QUERY_VPORT_COUNTERS);
5490 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
5491 1.12.4.2 martin
5492 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_output_ptr, token) != 0) {
5493 1.12.4.2 martin printf(", unable to allocate query nic vport counters mailboxen\n");
5494 1.12.4.2 martin return (-1);
5495 1.12.4.2 martin }
5496 1.12.4.2 martin cqe->cq_input_ptr = cqe->cq_output_ptr;
5497 1.12.4.2 martin
5498 1.12.4.2 martin mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5499 1.12.4.2 martin mbin->cmd_clear = 0x80;
5500 1.12.4.2 martin
5501 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, 1);
5502 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
5503 1.12.4.2 martin
5504 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
5505 1.12.4.2 martin if (error != 0) {
5506 1.12.4.2 martin printf("%s: query nic vport counters timeout\n", DEVNAME(sc));
5507 1.12.4.2 martin goto free;
5508 1.12.4.2 martin }
5509 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
5510 1.12.4.2 martin printf("%s: query nic vport counters command corrupt\n",
5511 1.12.4.2 martin DEVNAME(sc));
5512 1.12.4.2 martin goto free;
5513 1.12.4.2 martin }
5514 1.12.4.2 martin
5515 1.12.4.2 martin out = mcx_cmdq_out(cqe);
5516 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
5517 1.12.4.2 martin printf("%s: query nic vport counters failed (%x, %x)\n",
5518 1.12.4.2 martin DEVNAME(sc), out->cmd_status, out->cmd_syndrome);
5519 1.12.4.2 martin error = -1;
5520 1.12.4.2 martin goto free;
5521 1.12.4.2 martin }
5522 1.12.4.2 martin
5523 1.12.4.2 martin counters = (struct mcx_nic_vport_counters *)
5524 1.12.4.2 martin (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5525 1.12.4.2 martin if (counters->rx_bcast.packets + counters->tx_bcast.packets +
5526 1.12.4.2 martin counters->rx_ucast.packets + counters->tx_ucast.packets +
5527 1.12.4.2 martin counters->rx_err.packets + counters->tx_err.packets)
5528 1.12.4.2 martin printf("%s: err %llx/%llx uc %llx/%llx bc %llx/%llx\n",
5529 1.12.4.2 martin DEVNAME(sc),
5530 1.12.4.2 martin be64toh(counters->tx_err.packets),
5531 1.12.4.2 martin be64toh(counters->rx_err.packets),
5532 1.12.4.2 martin be64toh(counters->tx_ucast.packets),
5533 1.12.4.2 martin be64toh(counters->rx_ucast.packets),
5534 1.12.4.2 martin be64toh(counters->tx_bcast.packets),
5535 1.12.4.2 martin be64toh(counters->rx_bcast.packets));
5536 1.12.4.2 martin free:
5537 1.12.4.2 martin mcx_dmamem_free(sc, &mxm);
5538 1.12.4.2 martin
5539 1.12.4.2 martin return (error);
5540 1.12.4.2 martin }
5541 1.12.4.2 martin
5542 1.12.4.2 martin static int
5543 1.12.4.2 martin mcx_dump_flow_counter(struct mcx_softc *sc, int index, const char *what)
5544 1.12.4.2 martin {
5545 1.12.4.2 martin struct mcx_dmamem mxm;
5546 1.12.4.2 martin struct mcx_cmdq_entry *cqe;
5547 1.12.4.2 martin struct mcx_cmd_query_flow_counter_in *in;
5548 1.12.4.2 martin struct mcx_cmd_query_flow_counter_mb_in *mbin;
5549 1.12.4.2 martin struct mcx_cmd_query_flow_counter_out *out;
5550 1.12.4.2 martin struct mcx_counter *counters;
5551 1.12.4.2 martin int error, token;
5552 1.12.4.2 martin
5553 1.12.4.2 martin cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5554 1.12.4.2 martin token = mcx_cmdq_token(sc);
5555 1.12.4.2 martin mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out) +
5556 1.12.4.2 martin sizeof(*counters), token);
5557 1.12.4.2 martin
5558 1.12.4.2 martin in = mcx_cmdq_in(cqe);
5559 1.12.4.2 martin in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_COUNTER);
5560 1.12.4.2 martin in->cmd_op_mod = htobe16(0);
5561 1.12.4.2 martin
5562 1.12.4.2 martin if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_output_ptr, token) != 0) {
5563 1.12.4.2 martin printf(", unable to allocate query flow counter mailboxen\n");
5564 1.12.4.2 martin return (-1);
5565 1.12.4.2 martin }
5566 1.12.4.2 martin cqe->cq_input_ptr = cqe->cq_output_ptr;
5567 1.12.4.2 martin mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5568 1.12.4.2 martin mbin->cmd_flow_counter_id = htobe16(sc->sc_flow_counter_id[index]);
5569 1.12.4.2 martin mbin->cmd_clear = 0x80;
5570 1.12.4.2 martin
5571 1.12.4.2 martin mcx_cmdq_mboxes_sign(&mxm, 1);
5572 1.12.4.2 martin mcx_cmdq_post(sc, cqe, 0);
5573 1.12.4.2 martin
5574 1.12.4.2 martin error = mcx_cmdq_poll(sc, cqe, 1000);
5575 1.12.4.2 martin if (error != 0) {
5576 1.12.4.2 martin printf("%s: query flow counter timeout\n", DEVNAME(sc));
5577 1.12.4.2 martin goto free;
5578 1.12.4.2 martin }
5579 1.12.4.2 martin if (mcx_cmdq_verify(cqe) != 0) {
5580 1.12.4.2 martin printf("%s: query flow counter command corrupt\n", DEVNAME(sc));
5581 1.12.4.2 martin goto free;
5582 1.12.4.2 martin }
5583 1.12.4.2 martin
5584 1.12.4.2 martin out = mcx_cmdq_out(cqe);
5585 1.12.4.2 martin if (out->cmd_status != MCX_CQ_STATUS_OK) {
5586 1.12.4.2 martin printf("%s: query flow counter failed (%x, %x)\n", DEVNAME(sc),
5587 1.12.4.2 martin out->cmd_status, out->cmd_syndrome);
5588 1.12.4.2 martin error = -1;
5589 1.12.4.2 martin goto free;
5590 1.12.4.2 martin }
5591 1.12.4.2 martin
5592 1.12.4.2 martin counters = (struct mcx_counter *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
5593 1.12.4.2 martin if (counters->packets)
5594 1.12.4.2 martin printf("%s: %s inflow %llx\n", DEVNAME(sc), what,
5595 1.12.4.2 martin be64toh(counters->packets));
5596 1.12.4.2 martin free:
5597 1.12.4.2 martin mcx_dmamem_free(sc, &mxm);
5598 1.12.4.2 martin
5599 1.12.4.2 martin return (error);
5600 1.12.4.2 martin }
5601 1.12.4.2 martin
5602 1.12.4.2 martin #endif
5603 1.12.4.2 martin
5604 1.12.4.2 martin static int
5605 1.12.4.2 martin mcx_rx_fill_slots(struct mcx_softc *sc, void *ring, struct mcx_slot *slots,
5606 1.12.4.2 martin uint *prod, int bufsize, uint nslots)
5607 1.12.4.2 martin {
5608 1.12.4.2 martin struct mcx_rq_entry *rqe;
5609 1.12.4.2 martin struct mcx_slot *ms;
5610 1.12.4.2 martin struct mbuf *m;
5611 1.12.4.2 martin uint slot, p, fills;
5612 1.12.4.2 martin
5613 1.12.4.2 martin p = *prod;
5614 1.12.4.2 martin slot = (p % (1 << MCX_LOG_RQ_SIZE));
5615 1.12.4.2 martin rqe = ring;
5616 1.12.4.2 martin for (fills = 0; fills < nslots; fills++) {
5617 1.12.4.2 martin ms = &slots[slot];
5618 1.12.4.2 martin #if 0
5619 1.12.4.2 martin m = MCLGETI(NULL, M_DONTWAIT, NULL, bufsize + ETHER_ALIGN);
5620 1.12.4.2 martin if (m == NULL)
5621 1.12.4.2 martin break;
5622 1.12.4.2 martin #else
5623 1.12.4.2 martin m = NULL;
5624 1.12.4.2 martin MGETHDR(m, M_DONTWAIT, MT_DATA);
5625 1.12.4.2 martin if (m == NULL)
5626 1.12.4.2 martin break;
5627 1.12.4.2 martin
5628 1.12.4.2 martin MCLGET(m, M_DONTWAIT);
5629 1.12.4.2 martin if ((m->m_flags & M_EXT) == 0) {
5630 1.12.4.2 martin m_freem(m);
5631 1.12.4.2 martin break;
5632 1.12.4.2 martin }
5633 1.12.4.2 martin #endif
5634 1.12.4.2 martin
5635 1.12.4.2 martin m->m_data += ETHER_ALIGN;
5636 1.12.4.2 martin m->m_len = m->m_pkthdr.len = m->m_ext.ext_size - ETHER_ALIGN;
5637 1.12.4.2 martin if (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
5638 1.12.4.2 martin BUS_DMA_NOWAIT) != 0) {
5639 1.12.4.2 martin m_freem(m);
5640 1.12.4.2 martin break;
5641 1.12.4.2 martin }
5642 1.12.4.2 martin bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, ms->ms_map->dm_mapsize, BUS_DMASYNC_PREREAD);
5643 1.12.4.2 martin ms->ms_m = m;
5644 1.12.4.2 martin
5645 1.12.4.2 martin rqe[slot].rqe_byte_count = htobe32(m->m_len);
5646 1.12.4.2 martin rqe[slot].rqe_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr);
5647 1.12.4.2 martin rqe[slot].rqe_lkey = htobe32(sc->sc_lkey);
5648 1.12.4.2 martin
5649 1.12.4.2 martin p++;
5650 1.12.4.2 martin slot++;
5651 1.12.4.2 martin if (slot == (1 << MCX_LOG_RQ_SIZE))
5652 1.12.4.2 martin slot = 0;
5653 1.12.4.2 martin }
5654 1.12.4.2 martin
5655 1.12.4.2 martin if (fills != 0) {
5656 1.12.4.2 martin *sc->sc_rx_doorbell = htobe32(p & MCX_WQ_DOORBELL_MASK);
5657 1.12.4.2 martin /* barrier? */
5658 1.12.4.2 martin }
5659 1.12.4.2 martin
5660 1.12.4.2 martin *prod = p;
5661 1.12.4.2 martin
5662 1.12.4.2 martin return (nslots - fills);
5663 1.12.4.2 martin }
5664 1.12.4.2 martin
5665 1.12.4.2 martin static int
5666 1.12.4.2 martin mcx_rx_fill(struct mcx_softc *sc)
5667 1.12.4.2 martin {
5668 1.12.4.2 martin u_int slots;
5669 1.12.4.2 martin
5670 1.12.4.2 martin slots = mcx_rxr_get(&sc->sc_rxr, (1 << MCX_LOG_RQ_SIZE));
5671 1.12.4.2 martin if (slots == 0)
5672 1.12.4.2 martin return (1);
5673 1.12.4.2 martin
5674 1.12.4.2 martin slots = mcx_rx_fill_slots(sc, MCX_DMA_KVA(&sc->sc_rq_mem),
5675 1.12.4.2 martin sc->sc_rx_slots, &sc->sc_rx_prod, sc->sc_hardmtu, slots);
5676 1.12.4.2 martin mcx_rxr_put(&sc->sc_rxr, slots);
5677 1.12.4.2 martin return (0);
5678 1.12.4.2 martin }
5679 1.12.4.2 martin
5680 1.12.4.2 martin void
5681 1.12.4.2 martin mcx_refill(void *xsc)
5682 1.12.4.2 martin {
5683 1.12.4.2 martin struct mcx_softc *sc = xsc;
5684 1.12.4.2 martin
5685 1.12.4.2 martin mcx_rx_fill(sc);
5686 1.12.4.2 martin
5687 1.12.4.2 martin if (mcx_rxr_inuse(&sc->sc_rxr) == 0)
5688 1.12.4.2 martin callout_schedule(&sc->sc_rx_refill, 1);
5689 1.12.4.2 martin }
5690 1.12.4.2 martin
5691 1.12.4.2 martin void
5692 1.12.4.2 martin mcx_process_txeof(struct mcx_softc *sc, struct mcx_cq_entry *cqe, int *txfree)
5693 1.12.4.2 martin {
5694 1.12.4.2 martin struct mcx_slot *ms;
5695 1.12.4.2 martin bus_dmamap_t map;
5696 1.12.4.2 martin int slot, slots;
5697 1.12.4.2 martin
5698 1.12.4.2 martin slot = be16toh(cqe->cq_wqe_count) % (1 << MCX_LOG_SQ_SIZE);
5699 1.12.4.2 martin
5700 1.12.4.2 martin ms = &sc->sc_tx_slots[slot];
5701 1.12.4.2 martin map = ms->ms_map;
5702 1.12.4.2 martin bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
5703 1.12.4.2 martin BUS_DMASYNC_POSTWRITE);
5704 1.12.4.2 martin
5705 1.12.4.2 martin slots = 1;
5706 1.12.4.2 martin if (map->dm_nsegs > 1)
5707 1.12.4.2 martin slots += (map->dm_nsegs+2) / MCX_SQ_SEGS_PER_SLOT;
5708 1.12.4.2 martin
5709 1.12.4.2 martin (*txfree) += slots;
5710 1.12.4.2 martin bus_dmamap_unload(sc->sc_dmat, map);
5711 1.12.4.2 martin m_freem(ms->ms_m);
5712 1.12.4.2 martin ms->ms_m = NULL;
5713 1.12.4.2 martin }
5714 1.12.4.2 martin
5715 1.12.4.2 martin static uint64_t
5716 1.12.4.2 martin mcx_uptime(void)
5717 1.12.4.2 martin {
5718 1.12.4.2 martin struct timespec ts;
5719 1.12.4.2 martin
5720 1.12.4.2 martin nanouptime(&ts);
5721 1.12.4.2 martin
5722 1.12.4.2 martin return ((uint64_t)ts.tv_sec * 1000000000 + (uint64_t)ts.tv_nsec);
5723 1.12.4.2 martin }
5724 1.12.4.2 martin
5725 1.12.4.2 martin static void
5726 1.12.4.2 martin mcx_calibrate_first(struct mcx_softc *sc)
5727 1.12.4.2 martin {
5728 1.12.4.2 martin struct mcx_calibration *c = &sc->sc_calibration[0];
5729 1.12.4.2 martin
5730 1.12.4.2 martin sc->sc_calibration_gen = 0;
5731 1.12.4.2 martin
5732 1.12.4.2 martin c->c_ubase = mcx_uptime();
5733 1.12.4.2 martin c->c_tbase = mcx_timer(sc);
5734 1.12.4.2 martin c->c_tdiff = 0;
5735 1.12.4.2 martin
5736 1.12.4.2 martin callout_schedule(&sc->sc_calibrate, MCX_CALIBRATE_FIRST * hz);
5737 1.12.4.2 martin }
5738 1.12.4.2 martin
5739 1.12.4.2 martin #define MCX_TIMESTAMP_SHIFT 10
5740 1.12.4.2 martin
5741 1.12.4.2 martin static void
5742 1.12.4.2 martin mcx_calibrate(void *arg)
5743 1.12.4.2 martin {
5744 1.12.4.2 martin struct mcx_softc *sc = arg;
5745 1.12.4.2 martin struct mcx_calibration *nc, *pc;
5746 1.12.4.2 martin unsigned int gen;
5747 1.12.4.2 martin
5748 1.12.4.2 martin if (!ISSET(sc->sc_ec.ec_if.if_flags, IFF_RUNNING))
5749 1.12.4.2 martin return;
5750 1.12.4.2 martin
5751 1.12.4.2 martin callout_schedule(&sc->sc_calibrate, MCX_CALIBRATE_NORMAL * hz);
5752 1.12.4.2 martin
5753 1.12.4.2 martin gen = sc->sc_calibration_gen;
5754 1.12.4.2 martin pc = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
5755 1.12.4.2 martin gen++;
5756 1.12.4.2 martin nc = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
5757 1.12.4.2 martin
5758 1.12.4.2 martin nc->c_uptime = pc->c_ubase;
5759 1.12.4.2 martin nc->c_timestamp = pc->c_tbase;
5760 1.12.4.2 martin
5761 1.12.4.2 martin nc->c_ubase = mcx_uptime();
5762 1.12.4.2 martin nc->c_tbase = mcx_timer(sc);
5763 1.12.4.2 martin
5764 1.12.4.2 martin nc->c_udiff = (nc->c_ubase - nc->c_uptime) >> MCX_TIMESTAMP_SHIFT;
5765 1.12.4.2 martin nc->c_tdiff = (nc->c_tbase - nc->c_timestamp) >> MCX_TIMESTAMP_SHIFT;
5766 1.12.4.2 martin
5767 1.12.4.2 martin membar_producer();
5768 1.12.4.2 martin sc->sc_calibration_gen = gen;
5769 1.12.4.2 martin }
5770 1.12.4.2 martin
5771 1.12.4.2 martin static int
5772 1.12.4.2 martin mcx_process_rx(struct mcx_softc *sc, struct mcx_cq_entry *cqe,
5773 1.12.4.2 martin struct mcx_mbufq *mq, const struct mcx_calibration *c)
5774 1.12.4.2 martin {
5775 1.12.4.2 martin struct mcx_slot *ms;
5776 1.12.4.2 martin struct mbuf *m;
5777 1.12.4.2 martin int slot;
5778 1.12.4.2 martin
5779 1.12.4.2 martin slot = be16toh(cqe->cq_wqe_count) % (1 << MCX_LOG_RQ_SIZE);
5780 1.12.4.2 martin
5781 1.12.4.2 martin ms = &sc->sc_rx_slots[slot];
5782 1.12.4.2 martin bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, ms->ms_map->dm_mapsize,
5783 1.12.4.2 martin BUS_DMASYNC_POSTREAD);
5784 1.12.4.2 martin bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
5785 1.12.4.2 martin
5786 1.12.4.2 martin m = ms->ms_m;
5787 1.12.4.2 martin ms->ms_m = NULL;
5788 1.12.4.2 martin
5789 1.12.4.2 martin m_set_rcvif(m, &sc->sc_ec.ec_if);
5790 1.12.4.2 martin m->m_pkthdr.len = m->m_len = be32dec(&cqe->cq_byte_cnt);
5791 1.12.4.2 martin
5792 1.12.4.2 martin #if 0
5793 1.12.4.2 martin if (cqe->cq_rx_hash_type) {
5794 1.12.4.2 martin m->m_pkthdr.ph_flowid = M_FLOWID_VALID |
5795 1.12.4.2 martin be32toh(cqe->cq_rx_hash);
5796 1.12.4.2 martin }
5797 1.12.4.2 martin #endif
5798 1.12.4.2 martin
5799 1.12.4.2 martin #if 0
5800 1.12.4.2 martin if (c->c_tdiff) {
5801 1.12.4.2 martin uint64_t t = be64dec(&cqe->cq_timestamp) - c->c_timestamp;
5802 1.12.4.2 martin t *= c->c_udiff;
5803 1.12.4.2 martin t /= c->c_tdiff;
5804 1.12.4.2 martin
5805 1.12.4.2 martin m->m_pkthdr.ph_timestamp = c->c_uptime + t;
5806 1.12.4.2 martin SET(m->m_pkthdr.csum_flags, M_TIMESTAMP);
5807 1.12.4.2 martin }
5808 1.12.4.2 martin #endif
5809 1.12.4.2 martin
5810 1.12.4.2 martin MBUFQ_ENQUEUE(mq, m);
5811 1.12.4.2 martin
5812 1.12.4.2 martin return (1);
5813 1.12.4.2 martin }
5814 1.12.4.2 martin
5815 1.12.4.2 martin static struct mcx_cq_entry *
5816 1.12.4.2 martin mcx_next_cq_entry(struct mcx_softc *sc, struct mcx_cq *cq)
5817 1.12.4.2 martin {
5818 1.12.4.2 martin struct mcx_cq_entry *cqe;
5819 1.12.4.2 martin int next;
5820 1.12.4.2 martin
5821 1.12.4.2 martin cqe = (struct mcx_cq_entry *)MCX_DMA_KVA(&cq->cq_mem);
5822 1.12.4.2 martin next = cq->cq_cons % (1 << MCX_LOG_CQ_SIZE);
5823 1.12.4.2 martin
5824 1.12.4.2 martin if ((cqe[next].cq_opcode_owner & MCX_CQ_ENTRY_FLAG_OWNER) ==
5825 1.12.4.2 martin ((cq->cq_cons >> MCX_LOG_CQ_SIZE) & 1)) {
5826 1.12.4.2 martin return (&cqe[next]);
5827 1.12.4.2 martin }
5828 1.12.4.2 martin
5829 1.12.4.2 martin return (NULL);
5830 1.12.4.2 martin }
5831 1.12.4.2 martin
5832 1.12.4.2 martin static void
5833 1.12.4.2 martin mcx_arm_cq(struct mcx_softc *sc, struct mcx_cq *cq)
5834 1.12.4.2 martin {
5835 1.12.4.2 martin bus_size_t offset;
5836 1.12.4.2 martin uint32_t val;
5837 1.12.4.2 martin uint64_t uval;
5838 1.12.4.2 martin
5839 1.12.4.2 martin /* different uar per cq? */
5840 1.12.4.2 martin offset = (MCX_PAGE_SIZE * sc->sc_uar);
5841 1.12.4.2 martin val = ((cq->cq_count) & 3) << MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT;
5842 1.12.4.2 martin val |= (cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
5843 1.12.4.2 martin
5844 1.12.4.2 martin cq->cq_doorbell[0] = htobe32(cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
5845 1.12.4.2 martin cq->cq_doorbell[1] = htobe32(val);
5846 1.12.4.2 martin
5847 1.12.4.2 martin uval = val;
5848 1.12.4.2 martin uval <<= 32;
5849 1.12.4.2 martin uval |= cq->cq_n;
5850 1.12.4.2 martin bus_space_write_8(sc->sc_memt, sc->sc_memh,
5851 1.12.4.2 martin offset + MCX_UAR_CQ_DOORBELL, htobe64(uval));
5852 1.12.4.2 martin mcx_bar(sc, offset + MCX_UAR_CQ_DOORBELL, sizeof(uint64_t),
5853 1.12.4.2 martin BUS_SPACE_BARRIER_WRITE);
5854 1.12.4.2 martin }
5855 1.12.4.2 martin
5856 1.12.4.2 martin void
5857 1.12.4.2 martin mcx_process_cq(struct mcx_softc *sc, struct mcx_cq *cq)
5858 1.12.4.2 martin {
5859 1.12.4.2 martin struct ifnet *ifp = &sc->sc_ec.ec_if;
5860 1.12.4.2 martin const struct mcx_calibration *c;
5861 1.12.4.2 martin unsigned int gen;
5862 1.12.4.2 martin struct mcx_cq_entry *cqe;
5863 1.12.4.2 martin struct mcx_mbufq mq;
5864 1.12.4.2 martin struct mbuf *m;
5865 1.12.4.2 martin int rxfree, txfree;
5866 1.12.4.2 martin
5867 1.12.4.2 martin MBUFQ_INIT(&mq);
5868 1.12.4.2 martin
5869 1.12.4.2 martin gen = sc->sc_calibration_gen;
5870 1.12.4.2 martin membar_consumer();
5871 1.12.4.2 martin c = &sc->sc_calibration[gen % __arraycount(sc->sc_calibration)];
5872 1.12.4.2 martin
5873 1.12.4.2 martin rxfree = 0;
5874 1.12.4.2 martin txfree = 0;
5875 1.12.4.2 martin while ((cqe = mcx_next_cq_entry(sc, cq))) {
5876 1.12.4.2 martin uint8_t opcode;
5877 1.12.4.2 martin opcode = (cqe->cq_opcode_owner >> MCX_CQ_ENTRY_OPCODE_SHIFT);
5878 1.12.4.2 martin switch (opcode) {
5879 1.12.4.2 martin case MCX_CQ_ENTRY_OPCODE_REQ:
5880 1.12.4.2 martin mcx_process_txeof(sc, cqe, &txfree);
5881 1.12.4.2 martin break;
5882 1.12.4.2 martin case MCX_CQ_ENTRY_OPCODE_SEND:
5883 1.12.4.2 martin rxfree += mcx_process_rx(sc, cqe, &mq, c);
5884 1.12.4.2 martin break;
5885 1.12.4.2 martin case MCX_CQ_ENTRY_OPCODE_REQ_ERR:
5886 1.12.4.2 martin case MCX_CQ_ENTRY_OPCODE_SEND_ERR:
5887 1.12.4.2 martin /* uint8_t *cqp = (uint8_t *)cqe; */
5888 1.12.4.2 martin /* printf("%s: cq completion error: %x\n", DEVNAME(sc), cqp[0x37]); */
5889 1.12.4.2 martin break;
5890 1.12.4.2 martin
5891 1.12.4.2 martin default:
5892 1.12.4.2 martin /* printf("%s: cq completion opcode %x??\n", DEVNAME(sc), opcode); */
5893 1.12.4.2 martin break;
5894 1.12.4.2 martin }
5895 1.12.4.2 martin
5896 1.12.4.2 martin cq->cq_cons++;
5897 1.12.4.2 martin }
5898 1.12.4.2 martin
5899 1.12.4.2 martin cq->cq_count++;
5900 1.12.4.2 martin mcx_arm_cq(sc, cq);
5901 1.12.4.2 martin
5902 1.12.4.2 martin if (rxfree > 0) {
5903 1.12.4.2 martin mcx_rxr_put(&sc->sc_rxr, rxfree);
5904 1.12.4.2 martin while (MBUFQ_FIRST(&mq) != NULL) {
5905 1.12.4.2 martin MBUFQ_DEQUEUE(&mq, m);
5906 1.12.4.2 martin if_percpuq_enqueue(ifp->if_percpuq, m);
5907 1.12.4.2 martin }
5908 1.12.4.2 martin
5909 1.12.4.2 martin mcx_rx_fill(sc);
5910 1.12.4.2 martin
5911 1.12.4.2 martin if (mcx_rxr_inuse(&sc->sc_rxr) == 0)
5912 1.12.4.2 martin callout_schedule(&sc->sc_rx_refill, 1);
5913 1.12.4.2 martin }
5914 1.12.4.2 martin if (txfree > 0) {
5915 1.12.4.2 martin sc->sc_tx_cons += txfree;
5916 1.12.4.2 martin if_schedule_deferred_start(ifp);
5917 1.12.4.2 martin }
5918 1.12.4.2 martin }
5919 1.12.4.2 martin
5920 1.12.4.2 martin static void
5921 1.12.4.2 martin mcx_arm_eq(struct mcx_softc *sc)
5922 1.12.4.2 martin {
5923 1.12.4.2 martin bus_size_t offset;
5924 1.12.4.2 martin uint32_t val;
5925 1.12.4.2 martin
5926 1.12.4.2 martin offset = (MCX_PAGE_SIZE * sc->sc_uar) + MCX_UAR_EQ_DOORBELL_ARM;
5927 1.12.4.2 martin val = (sc->sc_eqn << 24) | (sc->sc_eq_cons & 0xffffff);
5928 1.12.4.2 martin
5929 1.12.4.2 martin mcx_wr(sc, offset, val);
5930 1.12.4.2 martin /* barrier? */
5931 1.12.4.2 martin }
5932 1.12.4.2 martin
5933 1.12.4.2 martin static struct mcx_eq_entry *
5934 1.12.4.2 martin mcx_next_eq_entry(struct mcx_softc *sc)
5935 1.12.4.2 martin {
5936 1.12.4.2 martin struct mcx_eq_entry *eqe;
5937 1.12.4.2 martin int next;
5938 1.12.4.2 martin
5939 1.12.4.2 martin eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&sc->sc_eq_mem);
5940 1.12.4.2 martin next = sc->sc_eq_cons % (1 << MCX_LOG_EQ_SIZE);
5941 1.12.4.2 martin if ((eqe[next].eq_owner & 1) == ((sc->sc_eq_cons >> MCX_LOG_EQ_SIZE) & 1)) {
5942 1.12.4.2 martin sc->sc_eq_cons++;
5943 1.12.4.2 martin return (&eqe[next]);
5944 1.12.4.2 martin }
5945 1.12.4.2 martin return (NULL);
5946 1.12.4.2 martin }
5947 1.12.4.2 martin
5948 1.12.4.2 martin int
5949 1.12.4.2 martin mcx_intr(void *xsc)
5950 1.12.4.2 martin {
5951 1.12.4.2 martin struct mcx_softc *sc = (struct mcx_softc *)xsc;
5952 1.12.4.2 martin struct mcx_eq_entry *eqe;
5953 1.12.4.2 martin int i, cq;
5954 1.12.4.2 martin
5955 1.12.4.2 martin while ((eqe = mcx_next_eq_entry(sc))) {
5956 1.12.4.2 martin switch (eqe->eq_event_type) {
5957 1.12.4.2 martin case MCX_EVENT_TYPE_COMPLETION:
5958 1.12.4.2 martin cq = be32toh(eqe->eq_event_data[6]);
5959 1.12.4.2 martin for (i = 0; i < sc->sc_num_cq; i++) {
5960 1.12.4.2 martin if (sc->sc_cq[i].cq_n == cq) {
5961 1.12.4.2 martin mcx_process_cq(sc, &sc->sc_cq[i]);
5962 1.12.4.2 martin break;
5963 1.12.4.2 martin }
5964 1.12.4.2 martin }
5965 1.12.4.2 martin break;
5966 1.12.4.2 martin
5967 1.12.4.2 martin case MCX_EVENT_TYPE_LAST_WQE:
5968 1.12.4.2 martin /* printf("%s: last wqe reached?\n", DEVNAME(sc)); */
5969 1.12.4.2 martin break;
5970 1.12.4.2 martin
5971 1.12.4.2 martin case MCX_EVENT_TYPE_CQ_ERROR:
5972 1.12.4.2 martin /* printf("%s: cq error\n", DEVNAME(sc)); */
5973 1.12.4.2 martin break;
5974 1.12.4.2 martin
5975 1.12.4.2 martin case MCX_EVENT_TYPE_CMD_COMPLETION:
5976 1.12.4.2 martin /* wakeup probably */
5977 1.12.4.2 martin break;
5978 1.12.4.2 martin
5979 1.12.4.2 martin case MCX_EVENT_TYPE_PORT_CHANGE:
5980 1.12.4.2 martin workqueue_enqueue(sc->sc_workq, &sc->sc_port_change, NULL);
5981 1.12.4.2 martin break;
5982 1.12.4.2 martin
5983 1.12.4.2 martin default:
5984 1.12.4.2 martin /* printf("%s: something happened\n", DEVNAME(sc)); */
5985 1.12.4.2 martin break;
5986 1.12.4.2 martin }
5987 1.12.4.2 martin }
5988 1.12.4.2 martin mcx_arm_eq(sc);
5989 1.12.4.2 martin return (1);
5990 1.12.4.2 martin }
5991 1.12.4.2 martin
5992 1.12.4.2 martin static void
5993 1.12.4.2 martin mcx_free_slots(struct mcx_softc *sc, struct mcx_slot *slots, int allocated,
5994 1.12.4.2 martin int total)
5995 1.12.4.2 martin {
5996 1.12.4.2 martin struct mcx_slot *ms;
5997 1.12.4.2 martin
5998 1.12.4.2 martin int i = allocated;
5999 1.12.4.2 martin while (i-- > 0) {
6000 1.12.4.2 martin ms = &slots[i];
6001 1.12.4.2 martin bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
6002 1.12.4.2 martin if (ms->ms_m != NULL)
6003 1.12.4.2 martin m_freem(ms->ms_m);
6004 1.12.4.2 martin }
6005 1.12.4.2 martin kmem_free(slots, total * sizeof(*ms));
6006 1.12.4.2 martin }
6007 1.12.4.2 martin
6008 1.12.4.2 martin static int
6009 1.12.4.2 martin mcx_init(struct ifnet *ifp)
6010 1.12.4.2 martin {
6011 1.12.4.2 martin struct mcx_softc *sc = ifp->if_softc;
6012 1.12.4.2 martin struct mcx_slot *ms;
6013 1.12.4.2 martin int i, start;
6014 1.12.4.2 martin struct mcx_flow_match match_crit;
6015 1.12.4.2 martin
6016 1.12.4.2 martin if (ISSET(ifp->if_flags, IFF_RUNNING))
6017 1.12.4.2 martin mcx_stop(ifp, 0);
6018 1.12.4.2 martin
6019 1.12.4.2 martin sc->sc_rx_slots = kmem_zalloc(sizeof(*ms) * (1 << MCX_LOG_RQ_SIZE),
6020 1.12.4.2 martin KM_SLEEP);
6021 1.12.4.2 martin
6022 1.12.4.2 martin for (i = 0; i < (1 << MCX_LOG_RQ_SIZE); i++) {
6023 1.12.4.2 martin ms = &sc->sc_rx_slots[i];
6024 1.12.4.2 martin if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu, 1,
6025 1.12.4.2 martin sc->sc_hardmtu, 0,
6026 1.12.4.2 martin BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
6027 1.12.4.2 martin &ms->ms_map) != 0) {
6028 1.12.4.2 martin printf("%s: failed to allocate rx dma maps\n",
6029 1.12.4.2 martin DEVNAME(sc));
6030 1.12.4.2 martin goto destroy_rx_slots;
6031 1.12.4.2 martin }
6032 1.12.4.2 martin }
6033 1.12.4.2 martin
6034 1.12.4.2 martin sc->sc_tx_slots = kmem_zalloc(sizeof(*ms) * (1 << MCX_LOG_SQ_SIZE),
6035 1.12.4.2 martin KM_SLEEP);
6036 1.12.4.2 martin
6037 1.12.4.2 martin for (i = 0; i < (1 << MCX_LOG_SQ_SIZE); i++) {
6038 1.12.4.2 martin ms = &sc->sc_tx_slots[i];
6039 1.12.4.2 martin if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu,
6040 1.12.4.2 martin MCX_SQ_MAX_SEGMENTS, sc->sc_hardmtu, 0,
6041 1.12.4.2 martin BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
6042 1.12.4.2 martin &ms->ms_map) != 0) {
6043 1.12.4.2 martin printf("%s: failed to allocate tx dma maps\n",
6044 1.12.4.2 martin DEVNAME(sc));
6045 1.12.4.2 martin goto destroy_tx_slots;
6046 1.12.4.2 martin }
6047 1.12.4.2 martin }
6048 1.12.4.2 martin
6049 1.12.4.2 martin if (mcx_create_cq(sc, sc->sc_eqn) != 0)
6050 1.12.4.2 martin goto down;
6051 1.12.4.2 martin
6052 1.12.4.2 martin /* send queue */
6053 1.12.4.2 martin if (mcx_create_tis(sc) != 0)
6054 1.12.4.2 martin goto down;
6055 1.12.4.2 martin
6056 1.12.4.2 martin if (mcx_create_sq(sc, sc->sc_cq[0].cq_n) != 0)
6057 1.12.4.2 martin goto down;
6058 1.12.4.2 martin
6059 1.12.4.2 martin /* receive queue */
6060 1.12.4.2 martin if (mcx_create_rq(sc, sc->sc_cq[0].cq_n) != 0)
6061 1.12.4.2 martin goto down;
6062 1.12.4.2 martin
6063 1.12.4.2 martin if (mcx_create_tir(sc) != 0)
6064 1.12.4.2 martin goto down;
6065 1.12.4.2 martin
6066 1.12.4.2 martin if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE) != 0)
6067 1.12.4.2 martin goto down;
6068 1.12.4.2 martin
6069 1.12.4.2 martin /* promisc flow group */
6070 1.12.4.2 martin start = 0;
6071 1.12.4.2 martin memset(&match_crit, 0, sizeof(match_crit));
6072 1.12.4.2 martin if (mcx_create_flow_group(sc, MCX_FLOW_GROUP_PROMISC, start, 1,
6073 1.12.4.2 martin 0, &match_crit) != 0)
6074 1.12.4.2 martin goto down;
6075 1.12.4.2 martin sc->sc_promisc_flow_enabled = 0;
6076 1.12.4.2 martin start++;
6077 1.12.4.2 martin
6078 1.12.4.2 martin /* all multicast flow group */
6079 1.12.4.2 martin match_crit.mc_dest_mac[0] = 0x01;
6080 1.12.4.2 martin if (mcx_create_flow_group(sc, MCX_FLOW_GROUP_ALLMULTI, start, 1,
6081 1.12.4.2 martin MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
6082 1.12.4.2 martin goto down;
6083 1.12.4.2 martin sc->sc_allmulti_flow_enabled = 0;
6084 1.12.4.2 martin start++;
6085 1.12.4.2 martin
6086 1.12.4.2 martin /* mac address matching flow group */
6087 1.12.4.2 martin memset(&match_crit.mc_dest_mac, 0xff, sizeof(match_crit.mc_dest_mac));
6088 1.12.4.2 martin if (mcx_create_flow_group(sc, MCX_FLOW_GROUP_MAC, start,
6089 1.12.4.2 martin (1 << MCX_LOG_FLOW_TABLE_SIZE) - start,
6090 1.12.4.2 martin MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
6091 1.12.4.2 martin goto down;
6092 1.12.4.2 martin
6093 1.12.4.2 martin /* flow table entries for unicast and broadcast */
6094 1.12.4.2 martin start = 0;
6095 1.12.4.2 martin if (mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, start,
6096 1.12.4.2 martin LLADDR(satosdl(ifp->if_dl->ifa_addr))) != 0)
6097 1.12.4.2 martin goto down;
6098 1.12.4.2 martin start++;
6099 1.12.4.2 martin
6100 1.12.4.2 martin if (mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, start,
6101 1.12.4.2 martin etherbroadcastaddr) != 0)
6102 1.12.4.2 martin goto down;
6103 1.12.4.2 martin start++;
6104 1.12.4.2 martin
6105 1.12.4.2 martin /* multicast entries go after that */
6106 1.12.4.2 martin sc->sc_mcast_flow_base = start;
6107 1.12.4.2 martin
6108 1.12.4.2 martin /* re-add any existing multicast flows */
6109 1.12.4.2 martin for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6110 1.12.4.2 martin if (sc->sc_mcast_flows[i][0] != 0) {
6111 1.12.4.2 martin mcx_set_flow_table_entry(sc, MCX_FLOW_GROUP_MAC,
6112 1.12.4.2 martin sc->sc_mcast_flow_base + i,
6113 1.12.4.2 martin sc->sc_mcast_flows[i]);
6114 1.12.4.2 martin }
6115 1.12.4.2 martin }
6116 1.12.4.2 martin
6117 1.12.4.2 martin if (mcx_set_flow_table_root(sc) != 0)
6118 1.12.4.2 martin goto down;
6119 1.12.4.2 martin
6120 1.12.4.2 martin /* start the queues */
6121 1.12.4.2 martin if (mcx_ready_sq(sc) != 0)
6122 1.12.4.2 martin goto down;
6123 1.12.4.2 martin
6124 1.12.4.2 martin if (mcx_ready_rq(sc) != 0)
6125 1.12.4.2 martin goto down;
6126 1.12.4.2 martin
6127 1.12.4.2 martin mcx_rxr_init(&sc->sc_rxr, 1, (1 << MCX_LOG_RQ_SIZE));
6128 1.12.4.2 martin sc->sc_rx_prod = 0;
6129 1.12.4.2 martin mcx_rx_fill(sc);
6130 1.12.4.2 martin
6131 1.12.4.2 martin mcx_calibrate_first(sc);
6132 1.12.4.2 martin
6133 1.12.4.2 martin SET(ifp->if_flags, IFF_RUNNING);
6134 1.12.4.2 martin
6135 1.12.4.2 martin sc->sc_tx_cons = 0;
6136 1.12.4.2 martin sc->sc_tx_prod = 0;
6137 1.12.4.2 martin CLR(ifp->if_flags, IFF_OACTIVE);
6138 1.12.4.2 martin if_schedule_deferred_start(ifp);
6139 1.12.4.2 martin
6140 1.12.4.2 martin return 0;
6141 1.12.4.2 martin destroy_tx_slots:
6142 1.12.4.2 martin mcx_free_slots(sc, sc->sc_tx_slots, i, (1 << MCX_LOG_SQ_SIZE));
6143 1.12.4.2 martin sc->sc_rx_slots = NULL;
6144 1.12.4.2 martin
6145 1.12.4.2 martin i = (1 << MCX_LOG_RQ_SIZE);
6146 1.12.4.2 martin destroy_rx_slots:
6147 1.12.4.2 martin mcx_free_slots(sc, sc->sc_rx_slots, i, (1 << MCX_LOG_RQ_SIZE));
6148 1.12.4.2 martin sc->sc_rx_slots = NULL;
6149 1.12.4.2 martin down:
6150 1.12.4.2 martin mcx_stop(ifp, 0);
6151 1.12.4.2 martin return EIO;
6152 1.12.4.2 martin }
6153 1.12.4.2 martin
6154 1.12.4.2 martin static void
6155 1.12.4.2 martin mcx_stop(struct ifnet *ifp, int disable)
6156 1.12.4.2 martin {
6157 1.12.4.2 martin struct mcx_softc *sc = ifp->if_softc;
6158 1.12.4.2 martin int group, i;
6159 1.12.4.2 martin
6160 1.12.4.2 martin CLR(ifp->if_flags, IFF_RUNNING);
6161 1.12.4.2 martin
6162 1.12.4.2 martin /*
6163 1.12.4.2 martin * delete flow table entries first, so no packets can arrive
6164 1.12.4.2 martin * after the barriers
6165 1.12.4.2 martin */
6166 1.12.4.2 martin if (sc->sc_promisc_flow_enabled)
6167 1.12.4.2 martin mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
6168 1.12.4.2 martin if (sc->sc_allmulti_flow_enabled)
6169 1.12.4.2 martin mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
6170 1.12.4.2 martin mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 0);
6171 1.12.4.2 martin mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 1);
6172 1.12.4.2 martin for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6173 1.12.4.2 martin if (sc->sc_mcast_flows[i][0] != 0) {
6174 1.12.4.2 martin mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC,
6175 1.12.4.2 martin sc->sc_mcast_flow_base + i);
6176 1.12.4.2 martin }
6177 1.12.4.2 martin }
6178 1.12.4.2 martin
6179 1.12.4.2 martin callout_halt(&sc->sc_calibrate, NULL);
6180 1.12.4.2 martin
6181 1.12.4.2 martin for (group = 0; group < MCX_NUM_FLOW_GROUPS; group++) {
6182 1.12.4.2 martin if (sc->sc_flow_group_id[group] != -1)
6183 1.12.4.2 martin mcx_destroy_flow_group(sc,
6184 1.12.4.2 martin sc->sc_flow_group_id[group]);
6185 1.12.4.2 martin }
6186 1.12.4.2 martin
6187 1.12.4.2 martin if (sc->sc_flow_table_id != -1)
6188 1.12.4.2 martin mcx_destroy_flow_table(sc);
6189 1.12.4.2 martin
6190 1.12.4.2 martin if (sc->sc_tirn != 0)
6191 1.12.4.2 martin mcx_destroy_tir(sc);
6192 1.12.4.2 martin if (sc->sc_rqn != 0)
6193 1.12.4.2 martin mcx_destroy_rq(sc);
6194 1.12.4.2 martin
6195 1.12.4.2 martin if (sc->sc_sqn != 0)
6196 1.12.4.2 martin mcx_destroy_sq(sc);
6197 1.12.4.2 martin if (sc->sc_tisn != 0)
6198 1.12.4.2 martin mcx_destroy_tis(sc);
6199 1.12.4.2 martin
6200 1.12.4.2 martin for (i = 0; i < sc->sc_num_cq; i++)
6201 1.12.4.2 martin mcx_destroy_cq(sc, i);
6202 1.12.4.2 martin sc->sc_num_cq = 0;
6203 1.12.4.2 martin
6204 1.12.4.2 martin if (sc->sc_tx_slots != NULL) {
6205 1.12.4.2 martin mcx_free_slots(sc, sc->sc_tx_slots, (1 << MCX_LOG_SQ_SIZE),
6206 1.12.4.2 martin (1 << MCX_LOG_SQ_SIZE));
6207 1.12.4.2 martin sc->sc_tx_slots = NULL;
6208 1.12.4.2 martin }
6209 1.12.4.2 martin if (sc->sc_rx_slots != NULL) {
6210 1.12.4.2 martin mcx_free_slots(sc, sc->sc_rx_slots, (1 << MCX_LOG_RQ_SIZE),
6211 1.12.4.2 martin (1 << MCX_LOG_RQ_SIZE));
6212 1.12.4.2 martin sc->sc_rx_slots = NULL;
6213 1.12.4.2 martin }
6214 1.12.4.2 martin }
6215 1.12.4.2 martin
6216 1.12.4.2 martin static int
6217 1.12.4.2 martin mcx_ioctl(struct ifnet *ifp, u_long cmd, void *data)
6218 1.12.4.2 martin {
6219 1.12.4.2 martin struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6220 1.12.4.2 martin struct ifreq *ifr = (struct ifreq *)data;
6221 1.12.4.2 martin struct ethercom *ec = &sc->sc_ec;
6222 1.12.4.2 martin uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
6223 1.12.4.2 martin struct ether_multi *enm;
6224 1.12.4.2 martin struct ether_multistep step;
6225 1.12.4.2 martin int s, i, flags, error = 0;
6226 1.12.4.2 martin
6227 1.12.4.2 martin s = splnet();
6228 1.12.4.2 martin switch (cmd) {
6229 1.12.4.2 martin
6230 1.12.4.2 martin case SIOCADDMULTI:
6231 1.12.4.2 martin if (ether_addmulti(ifreq_getaddr(cmd, ifr), &sc->sc_ec) == ENETRESET) {
6232 1.12.4.2 martin error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
6233 1.12.4.2 martin if (error != 0) {
6234 1.12.4.2 martin splx(s);
6235 1.12.4.2 martin return (error);
6236 1.12.4.2 martin }
6237 1.12.4.2 martin
6238 1.12.4.2 martin for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6239 1.12.4.2 martin if (sc->sc_mcast_flows[i][0] == 0) {
6240 1.12.4.2 martin memcpy(sc->sc_mcast_flows[i], addrlo,
6241 1.12.4.2 martin ETHER_ADDR_LEN);
6242 1.12.4.2 martin if (ISSET(ifp->if_flags, IFF_RUNNING)) {
6243 1.12.4.2 martin mcx_set_flow_table_entry(sc,
6244 1.12.4.2 martin MCX_FLOW_GROUP_MAC,
6245 1.12.4.2 martin sc->sc_mcast_flow_base + i,
6246 1.12.4.2 martin sc->sc_mcast_flows[i]);
6247 1.12.4.2 martin }
6248 1.12.4.2 martin break;
6249 1.12.4.2 martin }
6250 1.12.4.2 martin }
6251 1.12.4.2 martin
6252 1.12.4.2 martin if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
6253 1.12.4.2 martin if (i == MCX_NUM_MCAST_FLOWS) {
6254 1.12.4.2 martin SET(ifp->if_flags, IFF_ALLMULTI);
6255 1.12.4.2 martin sc->sc_extra_mcast++;
6256 1.12.4.2 martin error = ENETRESET;
6257 1.12.4.2 martin }
6258 1.12.4.2 martin
6259 1.12.4.2 martin if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN)) {
6260 1.12.4.2 martin SET(ifp->if_flags, IFF_ALLMULTI);
6261 1.12.4.2 martin error = ENETRESET;
6262 1.12.4.2 martin }
6263 1.12.4.2 martin }
6264 1.12.4.2 martin }
6265 1.12.4.2 martin break;
6266 1.12.4.2 martin
6267 1.12.4.2 martin case SIOCDELMULTI:
6268 1.12.4.2 martin if (ether_delmulti(ifreq_getaddr(cmd, ifr), &sc->sc_ec) == ENETRESET) {
6269 1.12.4.2 martin error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
6270 1.12.4.2 martin if (error != 0) {
6271 1.12.4.2 martin splx(s);
6272 1.12.4.2 martin return (error);
6273 1.12.4.2 martin }
6274 1.12.4.2 martin
6275 1.12.4.2 martin for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
6276 1.12.4.2 martin if (memcmp(sc->sc_mcast_flows[i], addrlo,
6277 1.12.4.2 martin ETHER_ADDR_LEN) == 0) {
6278 1.12.4.2 martin if (ISSET(ifp->if_flags, IFF_RUNNING)) {
6279 1.12.4.2 martin mcx_delete_flow_table_entry(sc,
6280 1.12.4.2 martin MCX_FLOW_GROUP_MAC,
6281 1.12.4.2 martin sc->sc_mcast_flow_base + i);
6282 1.12.4.2 martin }
6283 1.12.4.2 martin sc->sc_mcast_flows[i][0] = 0;
6284 1.12.4.2 martin break;
6285 1.12.4.2 martin }
6286 1.12.4.2 martin }
6287 1.12.4.2 martin
6288 1.12.4.2 martin if (i == MCX_NUM_MCAST_FLOWS)
6289 1.12.4.2 martin sc->sc_extra_mcast--;
6290 1.12.4.2 martin
6291 1.12.4.2 martin if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
6292 1.12.4.2 martin sc->sc_extra_mcast == 0) {
6293 1.12.4.2 martin flags = 0;
6294 1.12.4.2 martin ETHER_LOCK(ec);
6295 1.12.4.2 martin ETHER_FIRST_MULTI(step, ec, enm);
6296 1.12.4.2 martin while (enm != NULL) {
6297 1.12.4.2 martin if (memcmp(enm->enm_addrlo,
6298 1.12.4.2 martin enm->enm_addrhi, ETHER_ADDR_LEN)) {
6299 1.12.4.2 martin SET(flags, IFF_ALLMULTI);
6300 1.12.4.2 martin break;
6301 1.12.4.2 martin }
6302 1.12.4.2 martin ETHER_NEXT_MULTI(step, enm);
6303 1.12.4.2 martin }
6304 1.12.4.2 martin ETHER_UNLOCK(ec);
6305 1.12.4.2 martin if (!ISSET(flags, IFF_ALLMULTI)) {
6306 1.12.4.2 martin CLR(ifp->if_flags, IFF_ALLMULTI);
6307 1.12.4.2 martin error = ENETRESET;
6308 1.12.4.2 martin }
6309 1.12.4.2 martin }
6310 1.12.4.2 martin }
6311 1.12.4.2 martin break;
6312 1.12.4.2 martin
6313 1.12.4.2 martin default:
6314 1.12.4.2 martin error = ether_ioctl(ifp, cmd, data);
6315 1.12.4.2 martin }
6316 1.12.4.2 martin
6317 1.12.4.2 martin if (error == ENETRESET) {
6318 1.12.4.2 martin if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6319 1.12.4.2 martin (IFF_UP | IFF_RUNNING))
6320 1.12.4.2 martin mcx_iff(sc);
6321 1.12.4.2 martin error = 0;
6322 1.12.4.2 martin }
6323 1.12.4.2 martin splx(s);
6324 1.12.4.2 martin
6325 1.12.4.2 martin return (error);
6326 1.12.4.2 martin }
6327 1.12.4.2 martin
6328 1.12.4.2 martin #if 0
6329 1.12.4.2 martin static int
6330 1.12.4.2 martin mcx_get_sffpage(struct ifnet *ifp, struct if_sffpage *sff)
6331 1.12.4.2 martin {
6332 1.12.4.2 martin struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6333 1.12.4.2 martin struct mcx_reg_mcia mcia;
6334 1.12.4.2 martin struct mcx_reg_pmlp pmlp;
6335 1.12.4.2 martin int offset, error;
6336 1.12.4.2 martin
6337 1.12.4.2 martin /* get module number */
6338 1.12.4.2 martin memset(&pmlp, 0, sizeof(pmlp));
6339 1.12.4.2 martin pmlp.rp_local_port = 1;
6340 1.12.4.2 martin error = mcx_access_hca_reg(sc, MCX_REG_PMLP, MCX_REG_OP_READ, &pmlp,
6341 1.12.4.2 martin sizeof(pmlp));
6342 1.12.4.2 martin if (error != 0) {
6343 1.12.4.2 martin printf("%s: unable to get eeprom module number\n",
6344 1.12.4.2 martin DEVNAME(sc));
6345 1.12.4.2 martin return error;
6346 1.12.4.2 martin }
6347 1.12.4.2 martin
6348 1.12.4.2 martin for (offset = 0; offset < 256; offset += MCX_MCIA_EEPROM_BYTES) {
6349 1.12.4.2 martin memset(&mcia, 0, sizeof(mcia));
6350 1.12.4.2 martin mcia.rm_l = 0;
6351 1.12.4.2 martin mcia.rm_module = be32toh(pmlp.rp_lane0_mapping) &
6352 1.12.4.2 martin MCX_PMLP_MODULE_NUM_MASK;
6353 1.12.4.2 martin mcia.rm_i2c_addr = sff->sff_addr / 2; /* apparently */
6354 1.12.4.2 martin mcia.rm_page_num = sff->sff_page;
6355 1.12.4.2 martin mcia.rm_dev_addr = htobe16(offset);
6356 1.12.4.2 martin mcia.rm_size = htobe16(MCX_MCIA_EEPROM_BYTES);
6357 1.12.4.2 martin
6358 1.12.4.2 martin error = mcx_access_hca_reg(sc, MCX_REG_MCIA, MCX_REG_OP_READ,
6359 1.12.4.2 martin &mcia, sizeof(mcia));
6360 1.12.4.2 martin if (error != 0) {
6361 1.12.4.2 martin printf("%s: unable to read eeprom at %x\n",
6362 1.12.4.2 martin DEVNAME(sc), offset);
6363 1.12.4.2 martin return error;
6364 1.12.4.2 martin }
6365 1.12.4.2 martin
6366 1.12.4.2 martin memcpy(sff->sff_data + offset, mcia.rm_data,
6367 1.12.4.2 martin MCX_MCIA_EEPROM_BYTES);
6368 1.12.4.2 martin }
6369 1.12.4.2 martin
6370 1.12.4.2 martin return 0;
6371 1.12.4.2 martin }
6372 1.12.4.2 martin #endif
6373 1.12.4.2 martin
6374 1.12.4.2 martin static int
6375 1.12.4.2 martin mcx_load_mbuf(struct mcx_softc *sc, struct mcx_slot *ms, struct mbuf *m)
6376 1.12.4.2 martin {
6377 1.12.4.2 martin switch (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
6378 1.12.4.2 martin BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
6379 1.12.4.2 martin case 0:
6380 1.12.4.2 martin break;
6381 1.12.4.2 martin
6382 1.12.4.2 martin case EFBIG:
6383 1.12.4.2 martin if (m_defrag(m, M_DONTWAIT) != NULL &&
6384 1.12.4.2 martin bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
6385 1.12.4.2 martin BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
6386 1.12.4.2 martin break;
6387 1.12.4.2 martin
6388 1.12.4.2 martin /* FALLTHROUGH */
6389 1.12.4.2 martin default:
6390 1.12.4.2 martin return (1);
6391 1.12.4.2 martin }
6392 1.12.4.2 martin
6393 1.12.4.2 martin ms->ms_m = m;
6394 1.12.4.2 martin return (0);
6395 1.12.4.2 martin }
6396 1.12.4.2 martin
6397 1.12.4.2 martin static void
6398 1.12.4.2 martin mcx_start(struct ifnet *ifp)
6399 1.12.4.2 martin {
6400 1.12.4.2 martin struct mcx_softc *sc = ifp->if_softc;
6401 1.12.4.2 martin struct mcx_sq_entry *sq, *sqe;
6402 1.12.4.2 martin struct mcx_sq_entry_seg *sqs;
6403 1.12.4.2 martin struct mcx_slot *ms;
6404 1.12.4.2 martin bus_dmamap_t map;
6405 1.12.4.2 martin struct mbuf *m;
6406 1.12.4.2 martin u_int idx, free, used;
6407 1.12.4.2 martin uint64_t *bf;
6408 1.12.4.2 martin size_t bf_base;
6409 1.12.4.2 martin int i, seg, nseg;
6410 1.12.4.2 martin
6411 1.12.4.2 martin bf_base = (sc->sc_uar * MCX_PAGE_SIZE) + MCX_UAR_BF;
6412 1.12.4.2 martin
6413 1.12.4.2 martin idx = sc->sc_tx_prod % (1 << MCX_LOG_SQ_SIZE);
6414 1.12.4.2 martin free = (sc->sc_tx_cons + (1 << MCX_LOG_SQ_SIZE)) - sc->sc_tx_prod;
6415 1.12.4.2 martin
6416 1.12.4.2 martin used = 0;
6417 1.12.4.2 martin bf = NULL;
6418 1.12.4.2 martin sq = (struct mcx_sq_entry *)MCX_DMA_KVA(&sc->sc_sq_mem);
6419 1.12.4.2 martin
6420 1.12.4.2 martin for (;;) {
6421 1.12.4.2 martin if (used + MCX_SQ_ENTRY_MAX_SLOTS >= free) {
6422 1.12.4.2 martin SET(ifp->if_flags, IFF_OACTIVE);
6423 1.12.4.2 martin break;
6424 1.12.4.2 martin }
6425 1.12.4.2 martin
6426 1.12.4.2 martin IFQ_DEQUEUE(&ifp->if_snd, m);
6427 1.12.4.2 martin if (m == NULL) {
6428 1.12.4.2 martin break;
6429 1.12.4.2 martin }
6430 1.12.4.2 martin
6431 1.12.4.2 martin sqe = sq + idx;
6432 1.12.4.2 martin ms = &sc->sc_tx_slots[idx];
6433 1.12.4.2 martin memset(sqe, 0, sizeof(*sqe));
6434 1.12.4.2 martin
6435 1.12.4.2 martin /* ctrl segment */
6436 1.12.4.2 martin sqe->sqe_opcode_index = htobe32(MCX_SQE_WQE_OPCODE_SEND |
6437 1.12.4.2 martin ((sc->sc_tx_prod & 0xffff) << MCX_SQE_WQE_INDEX_SHIFT));
6438 1.12.4.2 martin /* always generate a completion event */
6439 1.12.4.2 martin sqe->sqe_signature = htobe32(MCX_SQE_CE_CQE_ALWAYS);
6440 1.12.4.2 martin
6441 1.12.4.2 martin /* eth segment */
6442 1.12.4.2 martin sqe->sqe_inline_header_size = htobe16(MCX_SQ_INLINE_SIZE);
6443 1.12.4.2 martin m_copydata(m, 0, MCX_SQ_INLINE_SIZE, sqe->sqe_inline_headers);
6444 1.12.4.2 martin m_adj(m, MCX_SQ_INLINE_SIZE);
6445 1.12.4.2 martin
6446 1.12.4.2 martin if (mcx_load_mbuf(sc, ms, m) != 0) {
6447 1.12.4.2 martin m_freem(m);
6448 1.12.4.2 martin if_statinc(ifp, if_oerrors);
6449 1.12.4.2 martin continue;
6450 1.12.4.2 martin }
6451 1.12.4.2 martin bf = (uint64_t *)sqe;
6452 1.12.4.2 martin
6453 1.12.4.2 martin if (ifp->if_bpf != NULL)
6454 1.12.4.2 martin bpf_mtap2(ifp->if_bpf, sqe->sqe_inline_headers,
6455 1.12.4.2 martin MCX_SQ_INLINE_SIZE, m, BPF_D_OUT);
6456 1.12.4.2 martin
6457 1.12.4.2 martin map = ms->ms_map;
6458 1.12.4.2 martin bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
6459 1.12.4.2 martin BUS_DMASYNC_PREWRITE);
6460 1.12.4.2 martin
6461 1.12.4.2 martin sqe->sqe_ds_sq_num =
6462 1.12.4.2 martin htobe32((sc->sc_sqn << MCX_SQE_SQ_NUM_SHIFT) |
6463 1.12.4.2 martin (map->dm_nsegs + 3));
6464 1.12.4.2 martin
6465 1.12.4.2 martin /* data segment - first wqe has one segment */
6466 1.12.4.2 martin sqs = sqe->sqe_segs;
6467 1.12.4.2 martin seg = 0;
6468 1.12.4.2 martin nseg = 1;
6469 1.12.4.2 martin for (i = 0; i < map->dm_nsegs; i++) {
6470 1.12.4.2 martin if (seg == nseg) {
6471 1.12.4.2 martin /* next slot */
6472 1.12.4.2 martin idx++;
6473 1.12.4.2 martin if (idx == (1 << MCX_LOG_SQ_SIZE))
6474 1.12.4.2 martin idx = 0;
6475 1.12.4.2 martin sc->sc_tx_prod++;
6476 1.12.4.2 martin used++;
6477 1.12.4.2 martin
6478 1.12.4.2 martin sqs = (struct mcx_sq_entry_seg *)(sq + idx);
6479 1.12.4.2 martin seg = 0;
6480 1.12.4.2 martin nseg = MCX_SQ_SEGS_PER_SLOT;
6481 1.12.4.2 martin }
6482 1.12.4.2 martin sqs[seg].sqs_byte_count =
6483 1.12.4.2 martin htobe32(map->dm_segs[i].ds_len);
6484 1.12.4.2 martin sqs[seg].sqs_lkey = htobe32(sc->sc_lkey);
6485 1.12.4.2 martin sqs[seg].sqs_addr = htobe64(map->dm_segs[i].ds_addr);
6486 1.12.4.2 martin seg++;
6487 1.12.4.2 martin }
6488 1.12.4.2 martin
6489 1.12.4.2 martin idx++;
6490 1.12.4.2 martin if (idx == (1 << MCX_LOG_SQ_SIZE))
6491 1.12.4.2 martin idx = 0;
6492 1.12.4.2 martin sc->sc_tx_prod++;
6493 1.12.4.2 martin used++;
6494 1.12.4.2 martin }
6495 1.12.4.2 martin
6496 1.12.4.2 martin if (used) {
6497 1.12.4.2 martin *sc->sc_tx_doorbell = htobe32(sc->sc_tx_prod & MCX_WQ_DOORBELL_MASK);
6498 1.12.4.2 martin
6499 1.12.4.2 martin membar_sync();
6500 1.12.4.2 martin
6501 1.12.4.2 martin /*
6502 1.12.4.2 martin * write the first 64 bits of the last sqe we produced
6503 1.12.4.2 martin * to the blue flame buffer
6504 1.12.4.2 martin */
6505 1.12.4.2 martin bus_space_write_8(sc->sc_memt, sc->sc_memh,
6506 1.12.4.2 martin bf_base + sc->sc_bf_offset, *bf);
6507 1.12.4.2 martin /* next write goes to the other buffer */
6508 1.12.4.2 martin sc->sc_bf_offset ^= sc->sc_bf_size;
6509 1.12.4.2 martin
6510 1.12.4.2 martin membar_sync();
6511 1.12.4.2 martin }
6512 1.12.4.2 martin }
6513 1.12.4.2 martin
6514 1.12.4.2 martin static void
6515 1.12.4.2 martin mcx_watchdog(struct ifnet *ifp)
6516 1.12.4.2 martin {
6517 1.12.4.2 martin }
6518 1.12.4.2 martin
6519 1.12.4.2 martin static void
6520 1.12.4.2 martin mcx_media_add_types(struct mcx_softc *sc)
6521 1.12.4.2 martin {
6522 1.12.4.2 martin struct mcx_reg_ptys ptys;
6523 1.12.4.2 martin int i;
6524 1.12.4.2 martin uint32_t proto_cap;
6525 1.12.4.2 martin
6526 1.12.4.2 martin memset(&ptys, 0, sizeof(ptys));
6527 1.12.4.2 martin ptys.rp_local_port = 1;
6528 1.12.4.2 martin ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6529 1.12.4.2 martin if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
6530 1.12.4.2 martin sizeof(ptys)) != 0) {
6531 1.12.4.2 martin printf("%s: unable to read port type/speed\n", DEVNAME(sc));
6532 1.12.4.2 martin return;
6533 1.12.4.2 martin }
6534 1.12.4.2 martin
6535 1.12.4.2 martin proto_cap = be32toh(ptys.rp_eth_proto_cap);
6536 1.12.4.2 martin for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
6537 1.12.4.2 martin if ((proto_cap & (1U << i)) && (mcx_eth_cap_map[i] != 0))
6538 1.12.4.2 martin ifmedia_add(&sc->sc_media, IFM_ETHER |
6539 1.12.4.2 martin mcx_eth_cap_map[i], 0, NULL);
6540 1.12.4.2 martin }
6541 1.12.4.2 martin }
6542 1.12.4.2 martin
6543 1.12.4.2 martin static void
6544 1.12.4.2 martin mcx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
6545 1.12.4.2 martin {
6546 1.12.4.2 martin struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6547 1.12.4.2 martin struct mcx_reg_ptys ptys;
6548 1.12.4.2 martin int i;
6549 1.12.4.2 martin uint32_t /* proto_cap, */ proto_oper;
6550 1.12.4.2 martin uint64_t media_oper;
6551 1.12.4.2 martin
6552 1.12.4.2 martin memset(&ptys, 0, sizeof(ptys));
6553 1.12.4.2 martin ptys.rp_local_port = 1;
6554 1.12.4.2 martin ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6555 1.12.4.2 martin
6556 1.12.4.2 martin if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
6557 1.12.4.2 martin sizeof(ptys)) != 0) {
6558 1.12.4.2 martin printf("%s: unable to read port type/speed\n", DEVNAME(sc));
6559 1.12.4.2 martin return;
6560 1.12.4.2 martin }
6561 1.12.4.2 martin
6562 1.12.4.2 martin /* proto_cap = be32toh(ptys.rp_eth_proto_cap); */
6563 1.12.4.2 martin proto_oper = be32toh(ptys.rp_eth_proto_oper);
6564 1.12.4.2 martin
6565 1.12.4.2 martin media_oper = 0;
6566 1.12.4.2 martin for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
6567 1.12.4.2 martin if (proto_oper & (1U << i)) {
6568 1.12.4.2 martin media_oper = mcx_eth_cap_map[i];
6569 1.12.4.2 martin }
6570 1.12.4.2 martin }
6571 1.12.4.2 martin
6572 1.12.4.2 martin ifmr->ifm_status = IFM_AVALID;
6573 1.12.4.2 martin /* not sure if this is the right thing to check, maybe paos? */
6574 1.12.4.2 martin if (proto_oper != 0) {
6575 1.12.4.2 martin ifmr->ifm_status |= IFM_ACTIVE;
6576 1.12.4.2 martin ifmr->ifm_active = IFM_ETHER | IFM_AUTO | media_oper;
6577 1.12.4.2 martin /* txpause, rxpause, duplex? */
6578 1.12.4.2 martin }
6579 1.12.4.2 martin }
6580 1.12.4.2 martin
6581 1.12.4.2 martin static int
6582 1.12.4.2 martin mcx_media_change(struct ifnet *ifp)
6583 1.12.4.2 martin {
6584 1.12.4.2 martin struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
6585 1.12.4.2 martin struct mcx_reg_ptys ptys;
6586 1.12.4.2 martin struct mcx_reg_paos paos;
6587 1.12.4.2 martin uint32_t media;
6588 1.12.4.2 martin int i, error;
6589 1.12.4.2 martin
6590 1.12.4.2 martin if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
6591 1.12.4.2 martin return EINVAL;
6592 1.12.4.2 martin
6593 1.12.4.2 martin error = 0;
6594 1.12.4.2 martin
6595 1.12.4.2 martin if (IFM_SUBTYPE(sc->sc_media.ifm_media) == IFM_AUTO) {
6596 1.12.4.2 martin /* read ptys to get supported media */
6597 1.12.4.2 martin memset(&ptys, 0, sizeof(ptys));
6598 1.12.4.2 martin ptys.rp_local_port = 1;
6599 1.12.4.2 martin ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6600 1.12.4.2 martin if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ,
6601 1.12.4.2 martin &ptys, sizeof(ptys)) != 0) {
6602 1.12.4.2 martin printf("%s: unable to read port type/speed\n",
6603 1.12.4.2 martin DEVNAME(sc));
6604 1.12.4.2 martin return EIO;
6605 1.12.4.2 martin }
6606 1.12.4.2 martin
6607 1.12.4.2 martin media = be32toh(ptys.rp_eth_proto_cap);
6608 1.12.4.2 martin } else {
6609 1.12.4.2 martin /* map media type */
6610 1.12.4.2 martin media = 0;
6611 1.12.4.2 martin for (i = 0; i < __arraycount(mcx_eth_cap_map); i++) {
6612 1.12.4.2 martin if (mcx_eth_cap_map[i] ==
6613 1.12.4.2 martin IFM_SUBTYPE(sc->sc_media.ifm_media)) {
6614 1.12.4.2 martin media = (1 << i);
6615 1.12.4.2 martin break;
6616 1.12.4.2 martin }
6617 1.12.4.2 martin }
6618 1.12.4.2 martin }
6619 1.12.4.2 martin
6620 1.12.4.2 martin /* disable the port */
6621 1.12.4.2 martin memset(&paos, 0, sizeof(paos));
6622 1.12.4.2 martin paos.rp_local_port = 1;
6623 1.12.4.2 martin paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_DOWN;
6624 1.12.4.2 martin paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
6625 1.12.4.2 martin if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
6626 1.12.4.2 martin sizeof(paos)) != 0) {
6627 1.12.4.2 martin printf("%s: unable to set port state to down\n", DEVNAME(sc));
6628 1.12.4.2 martin return EIO;
6629 1.12.4.2 martin }
6630 1.12.4.2 martin
6631 1.12.4.2 martin memset(&ptys, 0, sizeof(ptys));
6632 1.12.4.2 martin ptys.rp_local_port = 1;
6633 1.12.4.2 martin ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
6634 1.12.4.2 martin ptys.rp_eth_proto_admin = htobe32(media);
6635 1.12.4.2 martin if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_WRITE, &ptys,
6636 1.12.4.2 martin sizeof(ptys)) != 0) {
6637 1.12.4.2 martin printf("%s: unable to set port media type/speed\n",
6638 1.12.4.2 martin DEVNAME(sc));
6639 1.12.4.2 martin error = EIO;
6640 1.12.4.2 martin }
6641 1.12.4.2 martin
6642 1.12.4.2 martin /* re-enable the port to start negotiation */
6643 1.12.4.2 martin memset(&paos, 0, sizeof(paos));
6644 1.12.4.2 martin paos.rp_local_port = 1;
6645 1.12.4.2 martin paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_UP;
6646 1.12.4.2 martin paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
6647 1.12.4.2 martin if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
6648 1.12.4.2 martin sizeof(paos)) != 0) {
6649 1.12.4.2 martin printf("%s: unable to set port state to up\n", DEVNAME(sc));
6650 1.12.4.2 martin error = EIO;
6651 1.12.4.2 martin }
6652 1.12.4.2 martin
6653 1.12.4.2 martin return error;
6654 1.12.4.2 martin }
6655 1.12.4.2 martin
6656 1.12.4.2 martin static void
6657 1.12.4.2 martin mcx_port_change(struct work *wk, void *xsc)
6658 1.12.4.2 martin {
6659 1.12.4.2 martin struct mcx_softc *sc = xsc;
6660 1.12.4.2 martin struct ifnet *ifp = &sc->sc_ec.ec_if;
6661 1.12.4.2 martin struct mcx_reg_paos paos;
6662 1.12.4.2 martin int link_state = LINK_STATE_DOWN;
6663 1.12.4.2 martin struct ifmediareq ifmr;
6664 1.12.4.2 martin
6665 1.12.4.2 martin memset(&paos, 0, sizeof(paos));
6666 1.12.4.2 martin paos.rp_local_port = 1;
6667 1.12.4.2 martin if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_READ, &paos,
6668 1.12.4.2 martin sizeof(paos)) == 0) {
6669 1.12.4.2 martin if (paos.rp_oper_status == MCX_REG_PAOS_OPER_STATUS_UP)
6670 1.12.4.2 martin link_state = LINK_STATE_UP;
6671 1.12.4.2 martin mcx_media_status(ifp, &ifmr);
6672 1.12.4.2 martin ifp->if_baudrate = ifmedia_baudrate(ifmr.ifm_active);
6673 1.12.4.2 martin }
6674 1.12.4.2 martin
6675 1.12.4.2 martin if (link_state != ifp->if_link_state) {
6676 1.12.4.2 martin if_link_state_change(ifp, link_state);
6677 1.12.4.2 martin }
6678 1.12.4.2 martin }
6679 1.12.4.2 martin
6680 1.12.4.2 martin
6681 1.12.4.2 martin static inline uint32_t
6682 1.12.4.2 martin mcx_rd(struct mcx_softc *sc, bus_size_t r)
6683 1.12.4.2 martin {
6684 1.12.4.2 martin uint32_t word;
6685 1.12.4.2 martin
6686 1.12.4.2 martin word = bus_space_read_4(sc->sc_memt, sc->sc_memh, r);
6687 1.12.4.2 martin
6688 1.12.4.2 martin return (be32toh(word));
6689 1.12.4.2 martin }
6690 1.12.4.2 martin
6691 1.12.4.2 martin static inline void
6692 1.12.4.2 martin mcx_wr(struct mcx_softc *sc, bus_size_t r, uint32_t v)
6693 1.12.4.2 martin {
6694 1.12.4.2 martin bus_space_write_4(sc->sc_memt, sc->sc_memh, r, htobe32(v));
6695 1.12.4.2 martin }
6696 1.12.4.2 martin
6697 1.12.4.2 martin static inline void
6698 1.12.4.2 martin mcx_bar(struct mcx_softc *sc, bus_size_t r, bus_size_t l, int f)
6699 1.12.4.2 martin {
6700 1.12.4.2 martin bus_space_barrier(sc->sc_memt, sc->sc_memh, r, l, f);
6701 1.12.4.2 martin }
6702 1.12.4.2 martin
6703 1.12.4.2 martin static uint64_t
6704 1.12.4.2 martin mcx_timer(struct mcx_softc *sc)
6705 1.12.4.2 martin {
6706 1.12.4.2 martin uint32_t hi, lo, ni;
6707 1.12.4.2 martin
6708 1.12.4.2 martin hi = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
6709 1.12.4.2 martin for (;;) {
6710 1.12.4.2 martin lo = mcx_rd(sc, MCX_INTERNAL_TIMER_L);
6711 1.12.4.2 martin mcx_bar(sc, MCX_INTERNAL_TIMER_L, 8, BUS_SPACE_BARRIER_READ);
6712 1.12.4.2 martin ni = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
6713 1.12.4.2 martin
6714 1.12.4.2 martin if (ni == hi)
6715 1.12.4.2 martin break;
6716 1.12.4.2 martin
6717 1.12.4.2 martin hi = ni;
6718 1.12.4.2 martin }
6719 1.12.4.2 martin
6720 1.12.4.2 martin return (((uint64_t)hi << 32) | (uint64_t)lo);
6721 1.12.4.2 martin }
6722 1.12.4.2 martin
6723 1.12.4.2 martin static int
6724 1.12.4.2 martin mcx_dmamem_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
6725 1.12.4.2 martin bus_size_t size, u_int align)
6726 1.12.4.2 martin {
6727 1.12.4.2 martin mxm->mxm_size = size;
6728 1.12.4.2 martin
6729 1.12.4.2 martin if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
6730 1.12.4.2 martin mxm->mxm_size, 0,
6731 1.12.4.2 martin BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
6732 1.12.4.2 martin &mxm->mxm_map) != 0)
6733 1.12.4.2 martin return (1);
6734 1.12.4.2 martin if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
6735 1.12.4.2 martin align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
6736 1.12.4.2 martin BUS_DMA_WAITOK) != 0)
6737 1.12.4.2 martin goto destroy;
6738 1.12.4.2 martin if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
6739 1.12.4.2 martin mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
6740 1.12.4.2 martin goto free;
6741 1.12.4.2 martin if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
6742 1.12.4.2 martin mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
6743 1.12.4.2 martin goto unmap;
6744 1.12.4.2 martin
6745 1.12.4.2 martin mcx_dmamem_zero(mxm);
6746 1.12.4.2 martin
6747 1.12.4.2 martin return (0);
6748 1.12.4.2 martin unmap:
6749 1.12.4.2 martin bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
6750 1.12.4.2 martin free:
6751 1.12.4.2 martin bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
6752 1.12.4.2 martin destroy:
6753 1.12.4.2 martin bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
6754 1.12.4.2 martin return (1);
6755 1.12.4.2 martin }
6756 1.12.4.2 martin
6757 1.12.4.2 martin static void
6758 1.12.4.2 martin mcx_dmamem_zero(struct mcx_dmamem *mxm)
6759 1.12.4.2 martin {
6760 1.12.4.2 martin memset(MCX_DMA_KVA(mxm), 0, MCX_DMA_LEN(mxm));
6761 1.12.4.2 martin }
6762 1.12.4.2 martin
6763 1.12.4.2 martin static void
6764 1.12.4.2 martin mcx_dmamem_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
6765 1.12.4.2 martin {
6766 1.12.4.2 martin bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
6767 1.12.4.2 martin bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
6768 1.12.4.2 martin bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
6769 1.12.4.2 martin bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
6770 1.12.4.2 martin }
6771 1.12.4.2 martin
6772 1.12.4.2 martin static int
6773 1.12.4.2 martin mcx_hwmem_alloc(struct mcx_softc *sc, struct mcx_hwmem *mhm, unsigned int pages)
6774 1.12.4.2 martin {
6775 1.12.4.2 martin bus_dma_segment_t *segs;
6776 1.12.4.2 martin bus_size_t len = pages * MCX_PAGE_SIZE;
6777 1.12.4.2 martin size_t seglen;
6778 1.12.4.2 martin
6779 1.12.4.2 martin segs = kmem_alloc(sizeof(*segs) * pages, KM_SLEEP);
6780 1.12.4.2 martin seglen = sizeof(*segs) * pages;
6781 1.12.4.2 martin
6782 1.12.4.2 martin if (bus_dmamem_alloc(sc->sc_dmat, len, MCX_PAGE_SIZE, 0,
6783 1.12.4.2 martin segs, pages, &mhm->mhm_seg_count, BUS_DMA_NOWAIT) != 0)
6784 1.12.4.2 martin goto free_segs;
6785 1.12.4.2 martin
6786 1.12.4.2 martin if (mhm->mhm_seg_count < pages) {
6787 1.12.4.2 martin size_t nseglen;
6788 1.12.4.2 martin
6789 1.12.4.2 martin mhm->mhm_segs = kmem_alloc(
6790 1.12.4.2 martin sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count, KM_SLEEP);
6791 1.12.4.2 martin
6792 1.12.4.2 martin nseglen = sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count;
6793 1.12.4.2 martin
6794 1.12.4.2 martin memcpy(mhm->mhm_segs, segs, nseglen);
6795 1.12.4.2 martin
6796 1.12.4.2 martin kmem_free(segs, seglen);
6797 1.12.4.2 martin
6798 1.12.4.2 martin segs = mhm->mhm_segs;
6799 1.12.4.2 martin seglen = nseglen;
6800 1.12.4.2 martin } else
6801 1.12.4.2 martin mhm->mhm_segs = segs;
6802 1.12.4.2 martin
6803 1.12.4.2 martin if (bus_dmamap_create(sc->sc_dmat, len, pages, MCX_PAGE_SIZE,
6804 1.12.4.2 martin MCX_PAGE_SIZE, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW /*|BUS_DMA_64BIT*/,
6805 1.12.4.2 martin &mhm->mhm_map) != 0)
6806 1.12.4.2 martin goto free_dmamem;
6807 1.12.4.2 martin
6808 1.12.4.2 martin if (bus_dmamap_load_raw(sc->sc_dmat, mhm->mhm_map,
6809 1.12.4.2 martin mhm->mhm_segs, mhm->mhm_seg_count, len, BUS_DMA_NOWAIT) != 0)
6810 1.12.4.2 martin goto destroy;
6811 1.12.4.2 martin
6812 1.12.4.2 martin bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
6813 1.12.4.2 martin 0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_PRERW);
6814 1.12.4.2 martin
6815 1.12.4.2 martin mhm->mhm_npages = pages;
6816 1.12.4.2 martin
6817 1.12.4.2 martin return (0);
6818 1.12.4.2 martin
6819 1.12.4.2 martin destroy:
6820 1.12.4.2 martin bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
6821 1.12.4.2 martin free_dmamem:
6822 1.12.4.2 martin bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
6823 1.12.4.2 martin free_segs:
6824 1.12.4.2 martin kmem_free(segs, seglen);
6825 1.12.4.2 martin mhm->mhm_segs = NULL;
6826 1.12.4.2 martin
6827 1.12.4.2 martin return (-1);
6828 1.12.4.2 martin }
6829 1.12.4.2 martin
6830 1.12.4.2 martin static void
6831 1.12.4.2 martin mcx_hwmem_free(struct mcx_softc *sc, struct mcx_hwmem *mhm)
6832 1.12.4.2 martin {
6833 1.12.4.2 martin if (mhm->mhm_npages == 0)
6834 1.12.4.2 martin return;
6835 1.12.4.2 martin
6836 1.12.4.2 martin bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
6837 1.12.4.2 martin 0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_POSTRW);
6838 1.12.4.2 martin
6839 1.12.4.2 martin bus_dmamap_unload(sc->sc_dmat, mhm->mhm_map);
6840 1.12.4.2 martin bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
6841 1.12.4.2 martin bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
6842 1.12.4.2 martin kmem_free(mhm->mhm_segs, sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count);
6843 1.12.4.2 martin
6844 1.12.4.2 martin mhm->mhm_npages = 0;
6845 1.12.4.2 martin }
6846