if_vmx.c revision 1.1 1 1.1 ryo /* $NetBSD: if_vmx.c,v 1.1 2020/10/14 10:19:11 ryo Exp $ */
2 1.1 ryo /* $OpenBSD: if_vmx.c,v 1.16 2014/01/22 06:04:17 brad Exp $ */
3 1.1 ryo
4 1.1 ryo /*
5 1.1 ryo * Copyright (c) 2013 Tsubai Masanari
6 1.1 ryo * Copyright (c) 2013 Bryan Venteicher <bryanv (at) FreeBSD.org>
7 1.1 ryo *
8 1.1 ryo * Permission to use, copy, modify, and distribute this software for any
9 1.1 ryo * purpose with or without fee is hereby granted, provided that the above
10 1.1 ryo * copyright notice and this permission notice appear in all copies.
11 1.1 ryo *
12 1.1 ryo * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 1.1 ryo * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 1.1 ryo * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 1.1 ryo * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 1.1 ryo * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 1.1 ryo * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 1.1 ryo * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 1.1 ryo */
20 1.1 ryo
21 1.1 ryo #include <sys/cdefs.h>
22 1.1 ryo __KERNEL_RCSID(0, "$NetBSD: if_vmx.c,v 1.1 2020/10/14 10:19:11 ryo Exp $");
23 1.1 ryo
24 1.1 ryo #include <sys/param.h>
25 1.1 ryo #include <sys/cpu.h>
26 1.1 ryo #include <sys/kernel.h>
27 1.1 ryo #include <sys/kmem.h>
28 1.1 ryo #include <sys/bitops.h>
29 1.1 ryo #include <sys/bus.h>
30 1.1 ryo #include <sys/device.h>
31 1.1 ryo #include <sys/mbuf.h>
32 1.1 ryo #include <sys/sockio.h>
33 1.1 ryo #include <sys/pcq.h>
34 1.1 ryo #include <sys/workqueue.h>
35 1.1 ryo #include <sys/interrupt.h>
36 1.1 ryo
37 1.1 ryo #include <net/bpf.h>
38 1.1 ryo #include <net/if.h>
39 1.1 ryo #include <net/if_ether.h>
40 1.1 ryo #include <net/if_media.h>
41 1.1 ryo
42 1.1 ryo #include <netinet/if_inarp.h>
43 1.1 ryo #include <netinet/in_systm.h> /* for <netinet/ip.h> */
44 1.1 ryo #include <netinet/in.h> /* for <netinet/ip.h> */
45 1.1 ryo #include <netinet/ip.h> /* for struct ip */
46 1.1 ryo #include <netinet/ip6.h> /* for struct ip6_hdr */
47 1.1 ryo #include <netinet/tcp.h> /* for struct tcphdr */
48 1.1 ryo #include <netinet/udp.h> /* for struct udphdr */
49 1.1 ryo
50 1.1 ryo #include <dev/pci/pcivar.h>
51 1.1 ryo #include <dev/pci/pcireg.h>
52 1.1 ryo #include <dev/pci/pcidevs.h>
53 1.1 ryo
54 1.1 ryo #include <dev/pci/if_vmxreg.h>
55 1.1 ryo
56 1.1 ryo #define VMXNET3_DRIVER_VERSION 0x00010000
57 1.1 ryo
58 1.1 ryo /*
59 1.1 ryo * Max descriptors per Tx packet. We must limit the size of the
60 1.1 ryo * any TSO packets based on the number of segments.
61 1.1 ryo */
62 1.1 ryo #define VMXNET3_TX_MAXSEGS 32
63 1.1 ryo #define VMXNET3_TX_MAXSIZE (VMXNET3_TX_MAXSEGS * MCLBYTES)
64 1.1 ryo
65 1.1 ryo /*
66 1.1 ryo * Maximum support Tx segments size. The length field in the
67 1.1 ryo * Tx descriptor is 14 bits.
68 1.1 ryo */
69 1.1 ryo #define VMXNET3_TX_MAXSEGSIZE (1 << 14)
70 1.1 ryo
71 1.1 ryo /*
72 1.1 ryo * The maximum number of Rx segments we accept.
73 1.1 ryo */
74 1.1 ryo #define VMXNET3_MAX_RX_SEGS 0 /* no segments */
75 1.1 ryo
76 1.1 ryo /*
77 1.1 ryo * Predetermined size of the multicast MACs filter table. If the
78 1.1 ryo * number of multicast addresses exceeds this size, then the
79 1.1 ryo * ALL_MULTI mode is use instead.
80 1.1 ryo */
81 1.1 ryo #define VMXNET3_MULTICAST_MAX 32
82 1.1 ryo
83 1.1 ryo /*
84 1.1 ryo * Our Tx watchdog timeout.
85 1.1 ryo */
86 1.1 ryo #define VMXNET3_WATCHDOG_TIMEOUT 5
87 1.1 ryo
88 1.1 ryo /*
89 1.1 ryo * Default value for vmx_intr_{rx,tx}_process_limit which is used for
90 1.1 ryo * max number of packets to process for interrupt handler
91 1.1 ryo */
92 1.1 ryo #define VMXNET3_RX_INTR_PROCESS_LIMIT 0U
93 1.1 ryo #define VMXNET3_TX_INTR_PROCESS_LIMIT 256
94 1.1 ryo
95 1.1 ryo /*
96 1.1 ryo * Default value for vmx_{rx,tx}_process_limit which is used for
97 1.1 ryo * max number of packets to process for deferred processing
98 1.1 ryo */
99 1.1 ryo #define VMXNET3_RX_PROCESS_LIMIT 256
100 1.1 ryo #define VMXNET3_TX_PROCESS_LIMIT 256
101 1.1 ryo
102 1.1 ryo #define VMXNET3_WORKQUEUE_PRI PRI_SOFTNET
103 1.1 ryo
104 1.1 ryo /*
105 1.1 ryo * IP protocols that we can perform Tx checksum offloading of.
106 1.1 ryo */
107 1.1 ryo #define VMXNET3_CSUM_OFFLOAD \
108 1.1 ryo (M_CSUM_TCPv4 | M_CSUM_UDPv4)
109 1.1 ryo #define VMXNET3_CSUM_OFFLOAD_IPV6 \
110 1.1 ryo (M_CSUM_TCPv6 | M_CSUM_UDPv6)
111 1.1 ryo
112 1.1 ryo #define VMXNET3_CSUM_ALL_OFFLOAD \
113 1.1 ryo (VMXNET3_CSUM_OFFLOAD | VMXNET3_CSUM_OFFLOAD_IPV6 | M_CSUM_TSOv4 | M_CSUM_TSOv6)
114 1.1 ryo
115 1.1 ryo #define VMXNET3_RXRINGS_PERQ 2
116 1.1 ryo
117 1.1 ryo #define VMXNET3_CORE_LOCK(_sc) mutex_enter((_sc)->vmx_mtx)
118 1.1 ryo #define VMXNET3_CORE_UNLOCK(_sc) mutex_exit((_sc)->vmx_mtx)
119 1.1 ryo #define VMXNET3_CORE_LOCK_ASSERT(_sc) mutex_owned((_sc)->vmx_mtx)
120 1.1 ryo
121 1.1 ryo #define VMXNET3_RXQ_LOCK(_rxq) mutex_enter((_rxq)->vxrxq_mtx)
122 1.1 ryo #define VMXNET3_RXQ_UNLOCK(_rxq) mutex_exit((_rxq)->vxrxq_mtx)
123 1.1 ryo #define VMXNET3_RXQ_LOCK_ASSERT(_rxq) \
124 1.1 ryo mutex_owned((_rxq)->vxrxq_mtx)
125 1.1 ryo
126 1.1 ryo #define VMXNET3_TXQ_LOCK(_txq) mutex_enter((_txq)->vxtxq_mtx)
127 1.1 ryo #define VMXNET3_TXQ_TRYLOCK(_txq) mutex_tryenter((_txq)->vxtxq_mtx)
128 1.1 ryo #define VMXNET3_TXQ_UNLOCK(_txq) mutex_exit((_txq)->vxtxq_mtx)
129 1.1 ryo #define VMXNET3_TXQ_LOCK_ASSERT(_txq) \
130 1.1 ryo mutex_owned((_txq)->vxtxq_mtx)
131 1.1 ryo
132 1.1 ryo struct vmxnet3_dma_alloc {
133 1.1 ryo bus_addr_t dma_paddr;
134 1.1 ryo void *dma_vaddr;
135 1.1 ryo bus_dmamap_t dma_map;
136 1.1 ryo bus_size_t dma_size;
137 1.1 ryo bus_dma_segment_t dma_segs[1];
138 1.1 ryo };
139 1.1 ryo
140 1.1 ryo struct vmxnet3_txbuf {
141 1.1 ryo bus_dmamap_t vtxb_dmamap;
142 1.1 ryo struct mbuf *vtxb_m;
143 1.1 ryo };
144 1.1 ryo
145 1.1 ryo struct vmxnet3_txring {
146 1.1 ryo struct vmxnet3_txbuf *vxtxr_txbuf;
147 1.1 ryo struct vmxnet3_txdesc *vxtxr_txd;
148 1.1 ryo u_int vxtxr_head;
149 1.1 ryo u_int vxtxr_next;
150 1.1 ryo u_int vxtxr_ndesc;
151 1.1 ryo int vxtxr_gen;
152 1.1 ryo struct vmxnet3_dma_alloc vxtxr_dma;
153 1.1 ryo };
154 1.1 ryo
155 1.1 ryo struct vmxnet3_rxbuf {
156 1.1 ryo bus_dmamap_t vrxb_dmamap;
157 1.1 ryo struct mbuf *vrxb_m;
158 1.1 ryo };
159 1.1 ryo
160 1.1 ryo struct vmxnet3_rxring {
161 1.1 ryo struct vmxnet3_rxbuf *vxrxr_rxbuf;
162 1.1 ryo struct vmxnet3_rxdesc *vxrxr_rxd;
163 1.1 ryo u_int vxrxr_fill;
164 1.1 ryo u_int vxrxr_ndesc;
165 1.1 ryo int vxrxr_gen;
166 1.1 ryo int vxrxr_rid;
167 1.1 ryo struct vmxnet3_dma_alloc vxrxr_dma;
168 1.1 ryo bus_dmamap_t vxrxr_spare_dmap;
169 1.1 ryo };
170 1.1 ryo
171 1.1 ryo struct vmxnet3_comp_ring {
172 1.1 ryo union {
173 1.1 ryo struct vmxnet3_txcompdesc *txcd;
174 1.1 ryo struct vmxnet3_rxcompdesc *rxcd;
175 1.1 ryo } vxcr_u;
176 1.1 ryo u_int vxcr_next;
177 1.1 ryo u_int vxcr_ndesc;
178 1.1 ryo int vxcr_gen;
179 1.1 ryo struct vmxnet3_dma_alloc vxcr_dma;
180 1.1 ryo };
181 1.1 ryo
182 1.1 ryo struct vmxnet3_txq_stats {
183 1.1 ryo #if 0
184 1.1 ryo uint64_t vmtxs_opackets; /* if_opackets */
185 1.1 ryo uint64_t vmtxs_obytes; /* if_obytes */
186 1.1 ryo uint64_t vmtxs_omcasts; /* if_omcasts */
187 1.1 ryo #endif
188 1.1 ryo uint64_t vmtxs_csum;
189 1.1 ryo uint64_t vmtxs_tso;
190 1.1 ryo uint64_t vmtxs_full;
191 1.1 ryo uint64_t vmtxs_offload_failed;
192 1.1 ryo };
193 1.1 ryo
194 1.1 ryo struct vmxnet3_txqueue {
195 1.1 ryo kmutex_t *vxtxq_mtx;
196 1.1 ryo struct vmxnet3_softc *vxtxq_sc;
197 1.1 ryo int vxtxq_watchdog;
198 1.1 ryo pcq_t *vxtxq_interq;
199 1.1 ryo struct vmxnet3_txring vxtxq_cmd_ring;
200 1.1 ryo struct vmxnet3_comp_ring vxtxq_comp_ring;
201 1.1 ryo struct vmxnet3_txq_stats vxtxq_stats;
202 1.1 ryo struct vmxnet3_txq_shared *vxtxq_ts;
203 1.1 ryo char vxtxq_name[16];
204 1.1 ryo
205 1.1 ryo void *vxtxq_si;
206 1.1 ryo
207 1.1 ryo struct evcnt vxtxq_intr;
208 1.1 ryo struct evcnt vxtxq_defer;
209 1.1 ryo struct evcnt vxtxq_deferreq;
210 1.1 ryo struct evcnt vxtxq_pcqdrop;
211 1.1 ryo struct evcnt vxtxq_transmitdef;
212 1.1 ryo struct evcnt vxtxq_watchdogto;
213 1.1 ryo struct evcnt vxtxq_defragged;
214 1.1 ryo struct evcnt vxtxq_defrag_failed;
215 1.1 ryo };
216 1.1 ryo
217 1.1 ryo #if 0
218 1.1 ryo struct vmxnet3_rxq_stats {
219 1.1 ryo uint64_t vmrxs_ipackets; /* if_ipackets */
220 1.1 ryo uint64_t vmrxs_ibytes; /* if_ibytes */
221 1.1 ryo uint64_t vmrxs_iqdrops; /* if_iqdrops */
222 1.1 ryo uint64_t vmrxs_ierrors; /* if_ierrors */
223 1.1 ryo };
224 1.1 ryo #endif
225 1.1 ryo
226 1.1 ryo struct vmxnet3_rxqueue {
227 1.1 ryo kmutex_t *vxrxq_mtx;
228 1.1 ryo struct vmxnet3_softc *vxrxq_sc;
229 1.1 ryo struct mbuf *vxrxq_mhead;
230 1.1 ryo struct mbuf *vxrxq_mtail;
231 1.1 ryo struct vmxnet3_rxring vxrxq_cmd_ring[VMXNET3_RXRINGS_PERQ];
232 1.1 ryo struct vmxnet3_comp_ring vxrxq_comp_ring;
233 1.1 ryo #if 0
234 1.1 ryo struct vmxnet3_rxq_stats vxrxq_stats;
235 1.1 ryo #endif
236 1.1 ryo struct vmxnet3_rxq_shared *vxrxq_rs;
237 1.1 ryo char vxrxq_name[16];
238 1.1 ryo
239 1.1 ryo struct evcnt vxrxq_intr;
240 1.1 ryo struct evcnt vxrxq_defer;
241 1.1 ryo struct evcnt vxrxq_deferreq;
242 1.1 ryo struct evcnt vxrxq_mgetcl_failed;
243 1.1 ryo struct evcnt vxrxq_mbuf_load_failed;
244 1.1 ryo };
245 1.1 ryo
246 1.1 ryo struct vmxnet3_queue {
247 1.1 ryo int vxq_id;
248 1.1 ryo int vxq_intr_idx;
249 1.1 ryo
250 1.1 ryo struct vmxnet3_txqueue vxq_txqueue;
251 1.1 ryo struct vmxnet3_rxqueue vxq_rxqueue;
252 1.1 ryo
253 1.1 ryo void *vxq_si;
254 1.1 ryo bool vxq_workqueue;
255 1.1 ryo struct work vxq_wq_cookie;
256 1.1 ryo };
257 1.1 ryo
258 1.1 ryo struct vmxnet3_softc {
259 1.1 ryo device_t vmx_dev;
260 1.1 ryo struct ethercom vmx_ethercom;
261 1.1 ryo struct ifmedia vmx_media;
262 1.1 ryo struct vmxnet3_driver_shared *vmx_ds;
263 1.1 ryo int vmx_flags;
264 1.1 ryo #define VMXNET3_FLAG_NO_MSIX (1 << 0)
265 1.1 ryo #define VMXNET3_FLAG_RSS (1 << 1)
266 1.1 ryo #define VMXNET3_FLAG_ATTACHED (1 << 2)
267 1.1 ryo
268 1.1 ryo struct vmxnet3_queue *vmx_queue;
269 1.1 ryo
270 1.1 ryo struct pci_attach_args *vmx_pa;
271 1.1 ryo pci_chipset_tag_t vmx_pc;
272 1.1 ryo
273 1.1 ryo bus_space_tag_t vmx_iot0;
274 1.1 ryo bus_space_tag_t vmx_iot1;
275 1.1 ryo bus_space_handle_t vmx_ioh0;
276 1.1 ryo bus_space_handle_t vmx_ioh1;
277 1.1 ryo bus_size_t vmx_ios0;
278 1.1 ryo bus_size_t vmx_ios1;
279 1.1 ryo bus_dma_tag_t vmx_dmat;
280 1.1 ryo
281 1.1 ryo int vmx_link_active;
282 1.1 ryo int vmx_ntxqueues;
283 1.1 ryo int vmx_nrxqueues;
284 1.1 ryo int vmx_ntxdescs;
285 1.1 ryo int vmx_nrxdescs;
286 1.1 ryo int vmx_max_rxsegs;
287 1.1 ryo
288 1.1 ryo struct evcnt vmx_event_intr;
289 1.1 ryo struct evcnt vmx_event_link;
290 1.1 ryo struct evcnt vmx_event_txqerror;
291 1.1 ryo struct evcnt vmx_event_rxqerror;
292 1.1 ryo struct evcnt vmx_event_dic;
293 1.1 ryo struct evcnt vmx_event_debug;
294 1.1 ryo
295 1.1 ryo int vmx_intr_type;
296 1.1 ryo int vmx_intr_mask_mode;
297 1.1 ryo int vmx_event_intr_idx;
298 1.1 ryo int vmx_nintrs;
299 1.1 ryo pci_intr_handle_t *vmx_intrs; /* legacy use vmx_intrs[0] */
300 1.1 ryo void *vmx_ihs[VMXNET3_MAX_INTRS];
301 1.1 ryo
302 1.1 ryo kmutex_t *vmx_mtx;
303 1.1 ryo
304 1.1 ryo uint8_t *vmx_mcast;
305 1.1 ryo void *vmx_qs;
306 1.1 ryo struct vmxnet3_rss_shared *vmx_rss;
307 1.1 ryo callout_t vmx_tick;
308 1.1 ryo struct vmxnet3_dma_alloc vmx_ds_dma;
309 1.1 ryo struct vmxnet3_dma_alloc vmx_qs_dma;
310 1.1 ryo struct vmxnet3_dma_alloc vmx_mcast_dma;
311 1.1 ryo struct vmxnet3_dma_alloc vmx_rss_dma;
312 1.1 ryo int vmx_max_ntxqueues;
313 1.1 ryo int vmx_max_nrxqueues;
314 1.1 ryo uint8_t vmx_lladdr[ETHER_ADDR_LEN];
315 1.1 ryo
316 1.1 ryo u_int vmx_rx_intr_process_limit;
317 1.1 ryo u_int vmx_tx_intr_process_limit;
318 1.1 ryo u_int vmx_rx_process_limit;
319 1.1 ryo u_int vmx_tx_process_limit;
320 1.1 ryo struct sysctllog *vmx_sysctllog;
321 1.1 ryo
322 1.1 ryo bool vmx_txrx_workqueue;
323 1.1 ryo struct workqueue *vmx_queue_wq;
324 1.1 ryo };
325 1.1 ryo
326 1.1 ryo #define VMXNET3_STAT
327 1.1 ryo
328 1.1 ryo #ifdef VMXNET3_STAT
329 1.1 ryo struct {
330 1.1 ryo u_int txhead;
331 1.1 ryo u_int txdone;
332 1.1 ryo u_int maxtxlen;
333 1.1 ryo u_int rxdone;
334 1.1 ryo u_int rxfill;
335 1.1 ryo u_int intr;
336 1.1 ryo } vmxstat;
337 1.1 ryo #endif
338 1.1 ryo
339 1.1 ryo typedef enum {
340 1.1 ryo VMXNET3_BARRIER_RD,
341 1.1 ryo VMXNET3_BARRIER_WR,
342 1.1 ryo VMXNET3_BARRIER_RDWR,
343 1.1 ryo } vmxnet3_barrier_t;
344 1.1 ryo
345 1.1 ryo #define JUMBO_LEN (MCLBYTES - ETHER_ALIGN) /* XXX */
346 1.1 ryo #define DMAADDR(map) ((map)->dm_segs[0].ds_addr)
347 1.1 ryo
348 1.1 ryo #define vtophys(va) 0 /* XXX ok? */
349 1.1 ryo
350 1.1 ryo static int vmxnet3_match(device_t, cfdata_t, void *);
351 1.1 ryo static void vmxnet3_attach(device_t, device_t, void *);
352 1.1 ryo static int vmxnet3_detach(device_t, int);
353 1.1 ryo
354 1.1 ryo static int vmxnet3_alloc_pci_resources(struct vmxnet3_softc *);
355 1.1 ryo static void vmxnet3_free_pci_resources(struct vmxnet3_softc *);
356 1.1 ryo static int vmxnet3_check_version(struct vmxnet3_softc *);
357 1.1 ryo static void vmxnet3_check_multiqueue(struct vmxnet3_softc *);
358 1.1 ryo
359 1.1 ryo static int vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *);
360 1.1 ryo static int vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *);
361 1.1 ryo static int vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *);
362 1.1 ryo static int vmxnet3_alloc_interrupts(struct vmxnet3_softc *);
363 1.1 ryo static void vmxnet3_free_interrupts(struct vmxnet3_softc *);
364 1.1 ryo
365 1.1 ryo static int vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *);
366 1.1 ryo static int vmxnet3_setup_msi_interrupt(struct vmxnet3_softc *);
367 1.1 ryo static int vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *);
368 1.1 ryo static void vmxnet3_set_interrupt_idx(struct vmxnet3_softc *);
369 1.1 ryo static int vmxnet3_setup_interrupts(struct vmxnet3_softc *);
370 1.1 ryo static int vmxnet3_setup_sysctl(struct vmxnet3_softc *);
371 1.1 ryo
372 1.1 ryo static int vmxnet3_setup_stats(struct vmxnet3_softc *);
373 1.1 ryo static void vmxnet3_teardown_stats(struct vmxnet3_softc *);
374 1.1 ryo
375 1.1 ryo static int vmxnet3_init_rxq(struct vmxnet3_softc *, int);
376 1.1 ryo static int vmxnet3_init_txq(struct vmxnet3_softc *, int);
377 1.1 ryo static int vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *);
378 1.1 ryo static void vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *);
379 1.1 ryo static void vmxnet3_destroy_txq(struct vmxnet3_txqueue *);
380 1.1 ryo static void vmxnet3_free_rxtx_queues(struct vmxnet3_softc *);
381 1.1 ryo
382 1.1 ryo static int vmxnet3_alloc_shared_data(struct vmxnet3_softc *);
383 1.1 ryo static void vmxnet3_free_shared_data(struct vmxnet3_softc *);
384 1.1 ryo static int vmxnet3_alloc_txq_data(struct vmxnet3_softc *);
385 1.1 ryo static void vmxnet3_free_txq_data(struct vmxnet3_softc *);
386 1.1 ryo static int vmxnet3_alloc_rxq_data(struct vmxnet3_softc *);
387 1.1 ryo static void vmxnet3_free_rxq_data(struct vmxnet3_softc *);
388 1.1 ryo static int vmxnet3_alloc_queue_data(struct vmxnet3_softc *);
389 1.1 ryo static void vmxnet3_free_queue_data(struct vmxnet3_softc *);
390 1.1 ryo static int vmxnet3_alloc_mcast_table(struct vmxnet3_softc *);
391 1.1 ryo static void vmxnet3_free_mcast_table(struct vmxnet3_softc *);
392 1.1 ryo static void vmxnet3_init_shared_data(struct vmxnet3_softc *);
393 1.1 ryo static void vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *);
394 1.1 ryo static void vmxnet3_reinit_shared_data(struct vmxnet3_softc *);
395 1.1 ryo static int vmxnet3_alloc_data(struct vmxnet3_softc *);
396 1.1 ryo static void vmxnet3_free_data(struct vmxnet3_softc *);
397 1.1 ryo static int vmxnet3_setup_interface(struct vmxnet3_softc *);
398 1.1 ryo
399 1.1 ryo static void vmxnet3_evintr(struct vmxnet3_softc *);
400 1.1 ryo static bool vmxnet3_txq_eof(struct vmxnet3_txqueue *, u_int);
401 1.1 ryo static int vmxnet3_newbuf(struct vmxnet3_softc *, struct vmxnet3_rxqueue *,
402 1.1 ryo struct vmxnet3_rxring *);
403 1.1 ryo static void vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *,
404 1.1 ryo struct vmxnet3_rxring *, int);
405 1.1 ryo static void vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *);
406 1.1 ryo static void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *);
407 1.1 ryo static void vmxnet3_rxq_input(struct vmxnet3_rxqueue *,
408 1.1 ryo struct vmxnet3_rxcompdesc *, struct mbuf *);
409 1.1 ryo static bool vmxnet3_rxq_eof(struct vmxnet3_rxqueue *, u_int);
410 1.1 ryo static int vmxnet3_legacy_intr(void *);
411 1.1 ryo static int vmxnet3_txrxq_intr(void *);
412 1.1 ryo static void vmxnet3_handle_queue(void *);
413 1.1 ryo static void vmxnet3_handle_queue_work(struct work *, void *);
414 1.1 ryo static int vmxnet3_event_intr(void *);
415 1.1 ryo
416 1.1 ryo static void vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
417 1.1 ryo static void vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
418 1.1 ryo static void vmxnet3_stop_locked(struct vmxnet3_softc *);
419 1.1 ryo static void vmxnet3_stop_rendezvous(struct vmxnet3_softc *);
420 1.1 ryo static void vmxnet3_stop(struct ifnet *, int);
421 1.1 ryo
422 1.1 ryo static void vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
423 1.1 ryo static int vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
424 1.1 ryo static int vmxnet3_reinit_queues(struct vmxnet3_softc *);
425 1.1 ryo static int vmxnet3_enable_device(struct vmxnet3_softc *);
426 1.1 ryo static void vmxnet3_reinit_rxfilters(struct vmxnet3_softc *);
427 1.1 ryo static int vmxnet3_reinit(struct vmxnet3_softc *);
428 1.1 ryo
429 1.1 ryo static int vmxnet3_init_locked(struct vmxnet3_softc *);
430 1.1 ryo static int vmxnet3_init(struct ifnet *);
431 1.1 ryo
432 1.1 ryo static int vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *, struct mbuf *, int *, int *);
433 1.1 ryo static int vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *, struct mbuf **, bus_dmamap_t);
434 1.1 ryo static void vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *, bus_dmamap_t);
435 1.1 ryo static int vmxnet3_txq_encap(struct vmxnet3_txqueue *, struct mbuf **);
436 1.1 ryo static void vmxnet3_start_locked(struct ifnet *);
437 1.1 ryo static void vmxnet3_start(struct ifnet *);
438 1.1 ryo static void vmxnet3_transmit_locked(struct ifnet *, struct vmxnet3_txqueue *);
439 1.1 ryo static int vmxnet3_transmit(struct ifnet *, struct mbuf *);
440 1.1 ryo static void vmxnet3_deferred_transmit(void *);
441 1.1 ryo
442 1.1 ryo static void vmxnet3_set_rxfilter(struct vmxnet3_softc *);
443 1.1 ryo static int vmxnet3_ioctl(struct ifnet *, u_long, void *);
444 1.1 ryo static int vmxnet3_ifflags_cb(struct ethercom *);
445 1.1 ryo
446 1.1 ryo static int vmxnet3_watchdog(struct vmxnet3_txqueue *);
447 1.1 ryo static void vmxnet3_refresh_host_stats(struct vmxnet3_softc *);
448 1.1 ryo static void vmxnet3_tick(void *);
449 1.1 ryo static void vmxnet3_if_link_status(struct vmxnet3_softc *);
450 1.1 ryo static bool vmxnet3_cmd_link_status(struct ifnet *);
451 1.1 ryo static void vmxnet3_ifmedia_status(struct ifnet *, struct ifmediareq *);
452 1.1 ryo static int vmxnet3_ifmedia_change(struct ifnet *);
453 1.1 ryo static void vmxnet3_set_lladdr(struct vmxnet3_softc *);
454 1.1 ryo static void vmxnet3_get_lladdr(struct vmxnet3_softc *);
455 1.1 ryo
456 1.1 ryo static void vmxnet3_enable_all_intrs(struct vmxnet3_softc *);
457 1.1 ryo static void vmxnet3_disable_all_intrs(struct vmxnet3_softc *);
458 1.1 ryo
459 1.1 ryo static int vmxnet3_dma_malloc(struct vmxnet3_softc *, bus_size_t, bus_size_t,
460 1.1 ryo struct vmxnet3_dma_alloc *);
461 1.1 ryo static void vmxnet3_dma_free(struct vmxnet3_softc *, struct vmxnet3_dma_alloc *);
462 1.1 ryo
463 1.1 ryo CFATTACH_DECL3_NEW(vmx, sizeof(struct vmxnet3_softc),
464 1.1 ryo vmxnet3_match, vmxnet3_attach, vmxnet3_detach, NULL, NULL, NULL, 0);
465 1.1 ryo
466 1.1 ryo /* round down to the nearest power of 2 */
467 1.1 ryo static int
468 1.1 ryo vmxnet3_calc_queue_size(int n)
469 1.1 ryo {
470 1.1 ryo
471 1.1 ryo if (__predict_false(n <= 0))
472 1.1 ryo return 1;
473 1.1 ryo
474 1.1 ryo return (1U << (fls32(n) - 1));
475 1.1 ryo }
476 1.1 ryo
477 1.1 ryo static inline void
478 1.1 ryo vmxnet3_write_bar0(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
479 1.1 ryo {
480 1.1 ryo
481 1.1 ryo bus_space_write_4(sc->vmx_iot0, sc->vmx_ioh0, r, v);
482 1.1 ryo }
483 1.1 ryo
484 1.1 ryo static inline uint32_t
485 1.1 ryo vmxnet3_read_bar1(struct vmxnet3_softc *sc, bus_size_t r)
486 1.1 ryo {
487 1.1 ryo
488 1.1 ryo return (bus_space_read_4(sc->vmx_iot1, sc->vmx_ioh1, r));
489 1.1 ryo }
490 1.1 ryo
491 1.1 ryo static inline void
492 1.1 ryo vmxnet3_write_bar1(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
493 1.1 ryo {
494 1.1 ryo
495 1.1 ryo bus_space_write_4(sc->vmx_iot1, sc->vmx_ioh1, r, v);
496 1.1 ryo }
497 1.1 ryo
498 1.1 ryo static inline void
499 1.1 ryo vmxnet3_write_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
500 1.1 ryo {
501 1.1 ryo
502 1.1 ryo vmxnet3_write_bar1(sc, VMXNET3_BAR1_CMD, cmd);
503 1.1 ryo }
504 1.1 ryo
505 1.1 ryo static inline uint32_t
506 1.1 ryo vmxnet3_read_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
507 1.1 ryo {
508 1.1 ryo
509 1.1 ryo vmxnet3_write_cmd(sc, cmd);
510 1.1 ryo return (vmxnet3_read_bar1(sc, VMXNET3_BAR1_CMD));
511 1.1 ryo }
512 1.1 ryo
513 1.1 ryo static inline void
514 1.1 ryo vmxnet3_enable_intr(struct vmxnet3_softc *sc, int irq)
515 1.1 ryo {
516 1.1 ryo vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 0);
517 1.1 ryo }
518 1.1 ryo
519 1.1 ryo static inline void
520 1.1 ryo vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq)
521 1.1 ryo {
522 1.1 ryo vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 1);
523 1.1 ryo }
524 1.1 ryo
525 1.1 ryo static inline void
526 1.1 ryo vmxnet3_rxr_increment_fill(struct vmxnet3_rxring *rxr)
527 1.1 ryo {
528 1.1 ryo
529 1.1 ryo if (++rxr->vxrxr_fill == rxr->vxrxr_ndesc) {
530 1.1 ryo rxr->vxrxr_fill = 0;
531 1.1 ryo rxr->vxrxr_gen ^= 1;
532 1.1 ryo }
533 1.1 ryo }
534 1.1 ryo
535 1.1 ryo static inline int
536 1.1 ryo vmxnet3_txring_avail(struct vmxnet3_txring *txr)
537 1.1 ryo {
538 1.1 ryo int avail = txr->vxtxr_next - txr->vxtxr_head - 1;
539 1.1 ryo return (avail < 0 ? txr->vxtxr_ndesc + avail : avail);
540 1.1 ryo }
541 1.1 ryo
542 1.1 ryo /*
543 1.1 ryo * Since this is a purely paravirtualized device, we do not have
544 1.1 ryo * to worry about DMA coherency. But at times, we must make sure
545 1.1 ryo * both the compiler and CPU do not reorder memory operations.
546 1.1 ryo */
547 1.1 ryo static inline void
548 1.1 ryo vmxnet3_barrier(struct vmxnet3_softc *sc, vmxnet3_barrier_t type)
549 1.1 ryo {
550 1.1 ryo
551 1.1 ryo switch (type) {
552 1.1 ryo case VMXNET3_BARRIER_RD:
553 1.1 ryo membar_consumer();
554 1.1 ryo break;
555 1.1 ryo case VMXNET3_BARRIER_WR:
556 1.1 ryo membar_producer();
557 1.1 ryo break;
558 1.1 ryo case VMXNET3_BARRIER_RDWR:
559 1.1 ryo membar_sync();
560 1.1 ryo break;
561 1.1 ryo default:
562 1.1 ryo panic("%s: bad barrier type %d", __func__, type);
563 1.1 ryo }
564 1.1 ryo }
565 1.1 ryo
566 1.1 ryo static int
567 1.1 ryo vmxnet3_match(device_t parent, cfdata_t match, void *aux)
568 1.1 ryo {
569 1.1 ryo struct pci_attach_args *pa = (struct pci_attach_args *)aux;
570 1.1 ryo
571 1.1 ryo if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VMWARE &&
572 1.1 ryo PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VMWARE_VMXNET3)
573 1.1 ryo return 1;
574 1.1 ryo
575 1.1 ryo return 0;
576 1.1 ryo }
577 1.1 ryo
578 1.1 ryo static void
579 1.1 ryo vmxnet3_attach(device_t parent, device_t self, void *aux)
580 1.1 ryo {
581 1.1 ryo struct vmxnet3_softc *sc = device_private(self);
582 1.1 ryo struct pci_attach_args *pa = aux;
583 1.1 ryo pcireg_t preg;
584 1.1 ryo int error;
585 1.1 ryo int candidate;
586 1.1 ryo
587 1.1 ryo sc->vmx_dev = self;
588 1.1 ryo sc->vmx_pa = pa;
589 1.1 ryo sc->vmx_pc = pa->pa_pc;
590 1.1 ryo if (pci_dma64_available(pa))
591 1.1 ryo sc->vmx_dmat = pa->pa_dmat64;
592 1.1 ryo else
593 1.1 ryo sc->vmx_dmat = pa->pa_dmat;
594 1.1 ryo
595 1.1 ryo pci_aprint_devinfo_fancy(pa, "Ethernet controller", "vmxnet3", 1);
596 1.1 ryo
597 1.1 ryo preg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
598 1.1 ryo preg |= PCI_COMMAND_MASTER_ENABLE;
599 1.1 ryo pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
600 1.1 ryo
601 1.1 ryo sc->vmx_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
602 1.1 ryo callout_init(&sc->vmx_tick, CALLOUT_MPSAFE);
603 1.1 ryo
604 1.1 ryo candidate = MIN(MIN(VMXNET3_MAX_TX_QUEUES, VMXNET3_MAX_RX_QUEUES),
605 1.1 ryo ncpu);
606 1.1 ryo sc->vmx_max_ntxqueues = sc->vmx_max_nrxqueues =
607 1.1 ryo vmxnet3_calc_queue_size(candidate);
608 1.1 ryo sc->vmx_ntxdescs = 512;
609 1.1 ryo sc->vmx_nrxdescs = 256;
610 1.1 ryo sc->vmx_max_rxsegs = VMXNET3_MAX_RX_SEGS;
611 1.1 ryo
612 1.1 ryo error = vmxnet3_alloc_pci_resources(sc);
613 1.1 ryo if (error)
614 1.1 ryo return;
615 1.1 ryo
616 1.1 ryo error = vmxnet3_check_version(sc);
617 1.1 ryo if (error)
618 1.1 ryo return;
619 1.1 ryo
620 1.1 ryo error = vmxnet3_alloc_rxtx_queues(sc);
621 1.1 ryo if (error)
622 1.1 ryo return;
623 1.1 ryo
624 1.1 ryo error = vmxnet3_alloc_interrupts(sc);
625 1.1 ryo if (error)
626 1.1 ryo return;
627 1.1 ryo
628 1.1 ryo vmxnet3_check_multiqueue(sc);
629 1.1 ryo
630 1.1 ryo error = vmxnet3_alloc_data(sc);
631 1.1 ryo if (error)
632 1.1 ryo return;
633 1.1 ryo
634 1.1 ryo error = vmxnet3_setup_interface(sc);
635 1.1 ryo if (error)
636 1.1 ryo return;
637 1.1 ryo
638 1.1 ryo error = vmxnet3_setup_interrupts(sc);
639 1.1 ryo if (error)
640 1.1 ryo return;
641 1.1 ryo
642 1.1 ryo error = vmxnet3_setup_sysctl(sc);
643 1.1 ryo if (error)
644 1.1 ryo return;
645 1.1 ryo
646 1.1 ryo error = vmxnet3_setup_stats(sc);
647 1.1 ryo if (error)
648 1.1 ryo return;
649 1.1 ryo
650 1.1 ryo sc->vmx_flags |= VMXNET3_FLAG_ATTACHED;
651 1.1 ryo }
652 1.1 ryo
653 1.1 ryo static int
654 1.1 ryo vmxnet3_detach(device_t self, int flags)
655 1.1 ryo {
656 1.1 ryo struct vmxnet3_softc *sc;
657 1.1 ryo struct ifnet *ifp;
658 1.1 ryo
659 1.1 ryo sc = device_private(self);
660 1.1 ryo ifp = &sc->vmx_ethercom.ec_if;
661 1.1 ryo
662 1.1 ryo if (sc->vmx_flags & VMXNET3_FLAG_ATTACHED) {
663 1.1 ryo VMXNET3_CORE_LOCK(sc);
664 1.1 ryo vmxnet3_stop_locked(sc);
665 1.1 ryo callout_halt(&sc->vmx_tick, sc->vmx_mtx);
666 1.1 ryo callout_destroy(&sc->vmx_tick);
667 1.1 ryo VMXNET3_CORE_UNLOCK(sc);
668 1.1 ryo
669 1.1 ryo ether_ifdetach(ifp);
670 1.1 ryo if_detach(ifp);
671 1.1 ryo ifmedia_fini(&sc->vmx_media);
672 1.1 ryo }
673 1.1 ryo
674 1.1 ryo vmxnet3_teardown_stats(sc);
675 1.1 ryo sysctl_teardown(&sc->vmx_sysctllog);
676 1.1 ryo
677 1.1 ryo vmxnet3_free_interrupts(sc);
678 1.1 ryo
679 1.1 ryo vmxnet3_free_data(sc);
680 1.1 ryo vmxnet3_free_pci_resources(sc);
681 1.1 ryo vmxnet3_free_rxtx_queues(sc);
682 1.1 ryo
683 1.1 ryo if (sc->vmx_mtx)
684 1.1 ryo mutex_obj_free(sc->vmx_mtx);
685 1.1 ryo
686 1.1 ryo return (0);
687 1.1 ryo }
688 1.1 ryo
689 1.1 ryo static int
690 1.1 ryo vmxnet3_alloc_pci_resources(struct vmxnet3_softc *sc)
691 1.1 ryo {
692 1.1 ryo struct pci_attach_args *pa = sc->vmx_pa;
693 1.1 ryo pcireg_t memtype;
694 1.1 ryo
695 1.1 ryo memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
696 1.1 ryo if (pci_mapreg_map(pa, PCI_BAR(0), memtype, 0, &sc->vmx_iot0, &sc->vmx_ioh0,
697 1.1 ryo NULL, &sc->vmx_ios0)) {
698 1.1 ryo aprint_error_dev(sc->vmx_dev, "failed to map BAR0\n");
699 1.1 ryo return (ENXIO);
700 1.1 ryo }
701 1.1 ryo memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(1));
702 1.1 ryo if (pci_mapreg_map(pa, PCI_BAR(1), memtype, 0, &sc->vmx_iot1, &sc->vmx_ioh1,
703 1.1 ryo NULL, &sc->vmx_ios1)) {
704 1.1 ryo aprint_error_dev(sc->vmx_dev, "failed to map BAR1\n");
705 1.1 ryo return (ENXIO);
706 1.1 ryo }
707 1.1 ryo
708 1.1 ryo if (!pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, NULL, NULL)) {
709 1.1 ryo sc->vmx_flags |= VMXNET3_FLAG_NO_MSIX;
710 1.1 ryo return (0);
711 1.1 ryo }
712 1.1 ryo
713 1.1 ryo return (0);
714 1.1 ryo }
715 1.1 ryo
716 1.1 ryo static void
717 1.1 ryo vmxnet3_free_pci_resources(struct vmxnet3_softc *sc)
718 1.1 ryo {
719 1.1 ryo
720 1.1 ryo if (sc->vmx_ios0) {
721 1.1 ryo bus_space_unmap(sc->vmx_iot0, sc->vmx_ioh0, sc->vmx_ios0);
722 1.1 ryo sc->vmx_ios0 = 0;
723 1.1 ryo }
724 1.1 ryo
725 1.1 ryo if (sc->vmx_ios1) {
726 1.1 ryo bus_space_unmap(sc->vmx_iot1, sc->vmx_ioh1, sc->vmx_ios1);
727 1.1 ryo sc->vmx_ios1 = 0;
728 1.1 ryo }
729 1.1 ryo }
730 1.1 ryo
731 1.1 ryo static int
732 1.1 ryo vmxnet3_check_version(struct vmxnet3_softc *sc)
733 1.1 ryo {
734 1.1 ryo u_int ver;
735 1.1 ryo
736 1.1 ryo ver = vmxnet3_read_bar1(sc, VMXNET3_BAR1_VRRS);
737 1.1 ryo if ((ver & 0x1) == 0) {
738 1.1 ryo aprint_error_dev(sc->vmx_dev,
739 1.1 ryo "unsupported hardware version 0x%x\n", ver);
740 1.1 ryo return (ENOTSUP);
741 1.1 ryo }
742 1.1 ryo vmxnet3_write_bar1(sc, VMXNET3_BAR1_VRRS, 1);
743 1.1 ryo
744 1.1 ryo ver = vmxnet3_read_bar1(sc, VMXNET3_BAR1_UVRS);
745 1.1 ryo if ((ver & 0x1) == 0) {
746 1.1 ryo aprint_error_dev(sc->vmx_dev,
747 1.1 ryo "incompatiable UPT version 0x%x\n", ver);
748 1.1 ryo return (ENOTSUP);
749 1.1 ryo }
750 1.1 ryo vmxnet3_write_bar1(sc, VMXNET3_BAR1_UVRS, 1);
751 1.1 ryo
752 1.1 ryo return (0);
753 1.1 ryo }
754 1.1 ryo
755 1.1 ryo static void
756 1.1 ryo vmxnet3_check_multiqueue(struct vmxnet3_softc *sc)
757 1.1 ryo {
758 1.1 ryo
759 1.1 ryo if (sc->vmx_intr_type != VMXNET3_IT_MSIX)
760 1.1 ryo goto out;
761 1.1 ryo
762 1.1 ryo /* Just use the maximum configured for now. */
763 1.1 ryo sc->vmx_nrxqueues = sc->vmx_max_nrxqueues;
764 1.1 ryo sc->vmx_ntxqueues = sc->vmx_max_ntxqueues;
765 1.1 ryo
766 1.1 ryo if (sc->vmx_nrxqueues > 1)
767 1.1 ryo sc->vmx_flags |= VMXNET3_FLAG_RSS;
768 1.1 ryo
769 1.1 ryo return;
770 1.1 ryo
771 1.1 ryo out:
772 1.1 ryo sc->vmx_ntxqueues = 1;
773 1.1 ryo sc->vmx_nrxqueues = 1;
774 1.1 ryo }
775 1.1 ryo
776 1.1 ryo static int
777 1.1 ryo vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *sc)
778 1.1 ryo {
779 1.1 ryo int required;
780 1.1 ryo struct pci_attach_args *pa = sc->vmx_pa;
781 1.1 ryo
782 1.1 ryo if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX)
783 1.1 ryo return (1);
784 1.1 ryo
785 1.1 ryo /* Allocate an additional vector for the events interrupt. */
786 1.1 ryo required = MIN(sc->vmx_max_ntxqueues, sc->vmx_max_nrxqueues) + 1;
787 1.1 ryo
788 1.1 ryo if (pci_msix_count(pa->pa_pc, pa->pa_tag) < required)
789 1.1 ryo return (1);
790 1.1 ryo
791 1.1 ryo if (pci_msix_alloc_exact(pa, &sc->vmx_intrs, required) == 0) {
792 1.1 ryo sc->vmx_nintrs = required;
793 1.1 ryo return (0);
794 1.1 ryo }
795 1.1 ryo
796 1.1 ryo return (1);
797 1.1 ryo }
798 1.1 ryo
799 1.1 ryo static int
800 1.1 ryo vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *sc)
801 1.1 ryo {
802 1.1 ryo int nmsi, required;
803 1.1 ryo struct pci_attach_args *pa = sc->vmx_pa;
804 1.1 ryo
805 1.1 ryo required = 1;
806 1.1 ryo
807 1.1 ryo nmsi = pci_msi_count(pa->pa_pc, pa->pa_tag);
808 1.1 ryo if (nmsi < required)
809 1.1 ryo return (1);
810 1.1 ryo
811 1.1 ryo if (pci_msi_alloc_exact(pa, &sc->vmx_intrs, required) == 0) {
812 1.1 ryo sc->vmx_nintrs = required;
813 1.1 ryo return (0);
814 1.1 ryo }
815 1.1 ryo
816 1.1 ryo return (1);
817 1.1 ryo }
818 1.1 ryo
819 1.1 ryo static int
820 1.1 ryo vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *sc)
821 1.1 ryo {
822 1.1 ryo
823 1.1 ryo if (pci_intx_alloc(sc->vmx_pa, &sc->vmx_intrs) == 0) {
824 1.1 ryo sc->vmx_nintrs = 1;
825 1.1 ryo return (0);
826 1.1 ryo }
827 1.1 ryo
828 1.1 ryo return (1);
829 1.1 ryo }
830 1.1 ryo
831 1.1 ryo static int
832 1.1 ryo vmxnet3_alloc_interrupts(struct vmxnet3_softc *sc)
833 1.1 ryo {
834 1.1 ryo u_int config;
835 1.1 ryo int error;
836 1.1 ryo
837 1.1 ryo config = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_INTRCFG);
838 1.1 ryo
839 1.1 ryo sc->vmx_intr_type = config & 0x03;
840 1.1 ryo sc->vmx_intr_mask_mode = (config >> 2) & 0x03;
841 1.1 ryo
842 1.1 ryo switch (sc->vmx_intr_type) {
843 1.1 ryo case VMXNET3_IT_AUTO:
844 1.1 ryo sc->vmx_intr_type = VMXNET3_IT_MSIX;
845 1.1 ryo /* FALLTHROUGH */
846 1.1 ryo case VMXNET3_IT_MSIX:
847 1.1 ryo error = vmxnet3_alloc_msix_interrupts(sc);
848 1.1 ryo if (error == 0)
849 1.1 ryo break;
850 1.1 ryo sc->vmx_intr_type = VMXNET3_IT_MSI;
851 1.1 ryo /* FALLTHROUGH */
852 1.1 ryo case VMXNET3_IT_MSI:
853 1.1 ryo error = vmxnet3_alloc_msi_interrupts(sc);
854 1.1 ryo if (error == 0)
855 1.1 ryo break;
856 1.1 ryo sc->vmx_intr_type = VMXNET3_IT_LEGACY;
857 1.1 ryo /* FALLTHROUGH */
858 1.1 ryo case VMXNET3_IT_LEGACY:
859 1.1 ryo error = vmxnet3_alloc_legacy_interrupts(sc);
860 1.1 ryo if (error == 0)
861 1.1 ryo break;
862 1.1 ryo /* FALLTHROUGH */
863 1.1 ryo default:
864 1.1 ryo sc->vmx_intr_type = -1;
865 1.1 ryo aprint_error_dev(sc->vmx_dev, "cannot allocate any interrupt resources\n");
866 1.1 ryo return (ENXIO);
867 1.1 ryo }
868 1.1 ryo
869 1.1 ryo return (error);
870 1.1 ryo }
871 1.1 ryo
872 1.1 ryo static void
873 1.1 ryo vmxnet3_free_interrupts(struct vmxnet3_softc *sc)
874 1.1 ryo {
875 1.1 ryo pci_chipset_tag_t pc = sc->vmx_pc;
876 1.1 ryo int i;
877 1.1 ryo
878 1.1 ryo workqueue_destroy(sc->vmx_queue_wq);
879 1.1 ryo for (i = 0; i < sc->vmx_ntxqueues; i++) {
880 1.1 ryo struct vmxnet3_queue *vmxq = &sc->vmx_queue[i];
881 1.1 ryo
882 1.1 ryo softint_disestablish(vmxq->vxq_si);
883 1.1 ryo vmxq->vxq_si = NULL;
884 1.1 ryo }
885 1.1 ryo for (i = 0; i < sc->vmx_nintrs; i++) {
886 1.1 ryo pci_intr_disestablish(pc, sc->vmx_ihs[i]);
887 1.1 ryo }
888 1.1 ryo pci_intr_release(pc, sc->vmx_intrs, sc->vmx_nintrs);
889 1.1 ryo }
890 1.1 ryo
891 1.1 ryo static int
892 1.1 ryo vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *sc)
893 1.1 ryo {
894 1.1 ryo pci_chipset_tag_t pc = sc->vmx_pa->pa_pc;
895 1.1 ryo struct vmxnet3_queue *vmxq;
896 1.1 ryo pci_intr_handle_t *intr;
897 1.1 ryo void **ihs;
898 1.1 ryo int intr_idx, i, use_queues, error;
899 1.1 ryo kcpuset_t *affinity;
900 1.1 ryo const char *intrstr;
901 1.1 ryo char intrbuf[PCI_INTRSTR_LEN];
902 1.1 ryo char xnamebuf[32];
903 1.1 ryo
904 1.1 ryo intr = sc->vmx_intrs;
905 1.1 ryo intr_idx = 0;
906 1.1 ryo ihs = sc->vmx_ihs;
907 1.1 ryo
908 1.1 ryo /* See vmxnet3_alloc_msix_interrupts() */
909 1.1 ryo use_queues = MIN(sc->vmx_max_ntxqueues, sc->vmx_max_nrxqueues);
910 1.1 ryo for (i = 0; i < use_queues; i++, intr++, ihs++, intr_idx++) {
911 1.1 ryo snprintf(xnamebuf, 32, "%s: txrx %d", device_xname(sc->vmx_dev), i);
912 1.1 ryo
913 1.1 ryo vmxq = &sc->vmx_queue[i];
914 1.1 ryo
915 1.1 ryo intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
916 1.1 ryo
917 1.1 ryo pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true);
918 1.1 ryo *ihs = pci_intr_establish_xname(pc, *intr, IPL_NET,
919 1.1 ryo vmxnet3_txrxq_intr, vmxq, xnamebuf);
920 1.1 ryo if (*ihs == NULL) {
921 1.1 ryo aprint_error_dev(sc->vmx_dev,
922 1.1 ryo "unable to establish txrx interrupt at %s\n", intrstr);
923 1.1 ryo return (-1);
924 1.1 ryo }
925 1.1 ryo aprint_normal_dev(sc->vmx_dev, "txrx interrupting at %s\n", intrstr);
926 1.1 ryo
927 1.1 ryo kcpuset_create(&affinity, true);
928 1.1 ryo kcpuset_set(affinity, intr_idx % ncpu);
929 1.1 ryo error = interrupt_distribute(*ihs, affinity, NULL);
930 1.1 ryo if (error) {
931 1.1 ryo aprint_normal_dev(sc->vmx_dev,
932 1.1 ryo "%s cannot be changed affinity, use default CPU\n",
933 1.1 ryo intrstr);
934 1.1 ryo }
935 1.1 ryo kcpuset_destroy(affinity);
936 1.1 ryo
937 1.1 ryo vmxq->vxq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
938 1.1 ryo vmxnet3_handle_queue, vmxq);
939 1.1 ryo if (vmxq->vxq_si == NULL) {
940 1.1 ryo aprint_error_dev(sc->vmx_dev,
941 1.1 ryo "softint_establish for vxq_si failed\n");
942 1.1 ryo return (-1);
943 1.1 ryo }
944 1.1 ryo
945 1.1 ryo vmxq->vxq_intr_idx = intr_idx;
946 1.1 ryo }
947 1.1 ryo snprintf(xnamebuf, MAXCOMLEN, "%s_tx_rx", device_xname(sc->vmx_dev));
948 1.1 ryo error = workqueue_create(&sc->vmx_queue_wq, xnamebuf,
949 1.1 ryo vmxnet3_handle_queue_work, sc, VMXNET3_WORKQUEUE_PRI, IPL_NET,
950 1.1 ryo WQ_PERCPU | WQ_MPSAFE);
951 1.1 ryo if (error) {
952 1.1 ryo aprint_error_dev(sc->vmx_dev, "workqueue_create failed\n");
953 1.1 ryo return (-1);
954 1.1 ryo }
955 1.1 ryo sc->vmx_txrx_workqueue = false;
956 1.1 ryo
957 1.1 ryo intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
958 1.1 ryo
959 1.1 ryo snprintf(xnamebuf, 32, "%s: link", device_xname(sc->vmx_dev));
960 1.1 ryo pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true);
961 1.1 ryo *ihs = pci_intr_establish_xname(pc, *intr, IPL_NET,
962 1.1 ryo vmxnet3_event_intr, sc, xnamebuf);
963 1.1 ryo if (*ihs == NULL) {
964 1.1 ryo aprint_error_dev(sc->vmx_dev,
965 1.1 ryo "unable to establish event interrupt at %s\n", intrstr);
966 1.1 ryo return (-1);
967 1.1 ryo }
968 1.1 ryo aprint_normal_dev(sc->vmx_dev, "event interrupting at %s\n", intrstr);
969 1.1 ryo
970 1.1 ryo sc->vmx_event_intr_idx = intr_idx;
971 1.1 ryo
972 1.1 ryo return (0);
973 1.1 ryo }
974 1.1 ryo
975 1.1 ryo static int
976 1.1 ryo vmxnet3_setup_msi_interrupt(struct vmxnet3_softc *sc)
977 1.1 ryo {
978 1.1 ryo pci_chipset_tag_t pc = sc->vmx_pa->pa_pc;
979 1.1 ryo pci_intr_handle_t *intr;
980 1.1 ryo void **ihs;
981 1.1 ryo struct vmxnet3_queue *vmxq;
982 1.1 ryo int i;
983 1.1 ryo const char *intrstr;
984 1.1 ryo char intrbuf[PCI_INTRSTR_LEN];
985 1.1 ryo char xnamebuf[32];
986 1.1 ryo
987 1.1 ryo intr = &sc->vmx_intrs[0];
988 1.1 ryo ihs = sc->vmx_ihs;
989 1.1 ryo vmxq = &sc->vmx_queue[0];
990 1.1 ryo
991 1.1 ryo intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
992 1.1 ryo
993 1.1 ryo snprintf(xnamebuf, 32, "%s: msi", device_xname(sc->vmx_dev));
994 1.1 ryo pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true);
995 1.1 ryo *ihs = pci_intr_establish_xname(pc, *intr, IPL_NET,
996 1.1 ryo vmxnet3_legacy_intr, sc, xnamebuf);
997 1.1 ryo if (*ihs == NULL) {
998 1.1 ryo aprint_error_dev(sc->vmx_dev,
999 1.1 ryo "unable to establish interrupt at %s\n", intrstr);
1000 1.1 ryo return (-1);
1001 1.1 ryo }
1002 1.1 ryo aprint_normal_dev(sc->vmx_dev, "interrupting at %s\n", intrstr);
1003 1.1 ryo
1004 1.1 ryo vmxq->vxq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
1005 1.1 ryo vmxnet3_handle_queue, vmxq);
1006 1.1 ryo if (vmxq->vxq_si == NULL) {
1007 1.1 ryo aprint_error_dev(sc->vmx_dev,
1008 1.1 ryo "softint_establish for vxq_si failed\n");
1009 1.1 ryo return (-1);
1010 1.1 ryo }
1011 1.1 ryo
1012 1.1 ryo for (i = 0; i < MIN(sc->vmx_nrxqueues, sc->vmx_nrxqueues); i++)
1013 1.1 ryo sc->vmx_queue[i].vxq_intr_idx = 0;
1014 1.1 ryo sc->vmx_event_intr_idx = 0;
1015 1.1 ryo
1016 1.1 ryo return (0);
1017 1.1 ryo }
1018 1.1 ryo
1019 1.1 ryo static int
1020 1.1 ryo vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *sc)
1021 1.1 ryo {
1022 1.1 ryo pci_chipset_tag_t pc = sc->vmx_pa->pa_pc;
1023 1.1 ryo pci_intr_handle_t *intr;
1024 1.1 ryo void **ihs;
1025 1.1 ryo struct vmxnet3_queue *vmxq;
1026 1.1 ryo int i;
1027 1.1 ryo const char *intrstr;
1028 1.1 ryo char intrbuf[PCI_INTRSTR_LEN];
1029 1.1 ryo char xnamebuf[32];
1030 1.1 ryo
1031 1.1 ryo intr = &sc->vmx_intrs[0];
1032 1.1 ryo ihs = sc->vmx_ihs;
1033 1.1 ryo vmxq = &sc->vmx_queue[0];
1034 1.1 ryo
1035 1.1 ryo intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
1036 1.1 ryo
1037 1.1 ryo snprintf(xnamebuf, 32, "%s:legacy", device_xname(sc->vmx_dev));
1038 1.1 ryo pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true);
1039 1.1 ryo *ihs = pci_intr_establish_xname(pc, *intr, IPL_NET,
1040 1.1 ryo vmxnet3_legacy_intr, sc, xnamebuf);
1041 1.1 ryo if (*ihs == NULL) {
1042 1.1 ryo aprint_error_dev(sc->vmx_dev,
1043 1.1 ryo "unable to establish interrupt at %s\n", intrstr);
1044 1.1 ryo return (-1);
1045 1.1 ryo }
1046 1.1 ryo aprint_normal_dev(sc->vmx_dev, "interrupting at %s\n", intrstr);
1047 1.1 ryo
1048 1.1 ryo vmxq->vxq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
1049 1.1 ryo vmxnet3_handle_queue, vmxq);
1050 1.1 ryo if (vmxq->vxq_si == NULL) {
1051 1.1 ryo aprint_error_dev(sc->vmx_dev,
1052 1.1 ryo "softint_establish for vxq_si failed\n");
1053 1.1 ryo return (-1);
1054 1.1 ryo }
1055 1.1 ryo
1056 1.1 ryo for (i = 0; i < MIN(sc->vmx_nrxqueues, sc->vmx_nrxqueues); i++)
1057 1.1 ryo sc->vmx_queue[i].vxq_intr_idx = 0;
1058 1.1 ryo sc->vmx_event_intr_idx = 0;
1059 1.1 ryo
1060 1.1 ryo return (0);
1061 1.1 ryo }
1062 1.1 ryo
1063 1.1 ryo static void
1064 1.1 ryo vmxnet3_set_interrupt_idx(struct vmxnet3_softc *sc)
1065 1.1 ryo {
1066 1.1 ryo struct vmxnet3_queue *vmxq;
1067 1.1 ryo struct vmxnet3_txqueue *txq;
1068 1.1 ryo struct vmxnet3_txq_shared *txs;
1069 1.1 ryo struct vmxnet3_rxqueue *rxq;
1070 1.1 ryo struct vmxnet3_rxq_shared *rxs;
1071 1.1 ryo int i;
1072 1.1 ryo
1073 1.1 ryo sc->vmx_ds->evintr = sc->vmx_event_intr_idx;
1074 1.1 ryo
1075 1.1 ryo for (i = 0; i < sc->vmx_ntxqueues; i++) {
1076 1.1 ryo vmxq = &sc->vmx_queue[i];
1077 1.1 ryo txq = &vmxq->vxq_txqueue;
1078 1.1 ryo txs = txq->vxtxq_ts;
1079 1.1 ryo txs->intr_idx = vmxq->vxq_intr_idx;
1080 1.1 ryo }
1081 1.1 ryo
1082 1.1 ryo for (i = 0; i < sc->vmx_nrxqueues; i++) {
1083 1.1 ryo vmxq = &sc->vmx_queue[i];
1084 1.1 ryo rxq = &vmxq->vxq_rxqueue;
1085 1.1 ryo rxs = rxq->vxrxq_rs;
1086 1.1 ryo rxs->intr_idx = vmxq->vxq_intr_idx;
1087 1.1 ryo }
1088 1.1 ryo }
1089 1.1 ryo
1090 1.1 ryo static int
1091 1.1 ryo vmxnet3_setup_interrupts(struct vmxnet3_softc *sc)
1092 1.1 ryo {
1093 1.1 ryo int error;
1094 1.1 ryo
1095 1.1 ryo switch (sc->vmx_intr_type) {
1096 1.1 ryo case VMXNET3_IT_MSIX:
1097 1.1 ryo error = vmxnet3_setup_msix_interrupts(sc);
1098 1.1 ryo break;
1099 1.1 ryo case VMXNET3_IT_MSI:
1100 1.1 ryo error = vmxnet3_setup_msi_interrupt(sc);
1101 1.1 ryo break;
1102 1.1 ryo case VMXNET3_IT_LEGACY:
1103 1.1 ryo error = vmxnet3_setup_legacy_interrupt(sc);
1104 1.1 ryo break;
1105 1.1 ryo default:
1106 1.1 ryo panic("%s: invalid interrupt type %d", __func__,
1107 1.1 ryo sc->vmx_intr_type);
1108 1.1 ryo }
1109 1.1 ryo
1110 1.1 ryo if (error == 0)
1111 1.1 ryo vmxnet3_set_interrupt_idx(sc);
1112 1.1 ryo
1113 1.1 ryo return (error);
1114 1.1 ryo }
1115 1.1 ryo
1116 1.1 ryo static int
1117 1.1 ryo vmxnet3_init_rxq(struct vmxnet3_softc *sc, int q)
1118 1.1 ryo {
1119 1.1 ryo struct vmxnet3_rxqueue *rxq;
1120 1.1 ryo struct vmxnet3_rxring *rxr;
1121 1.1 ryo int i;
1122 1.1 ryo
1123 1.1 ryo rxq = &sc->vmx_queue[q].vxq_rxqueue;
1124 1.1 ryo
1125 1.1 ryo snprintf(rxq->vxrxq_name, sizeof(rxq->vxrxq_name), "%s-rx%d",
1126 1.1 ryo device_xname(sc->vmx_dev), q);
1127 1.1 ryo rxq->vxrxq_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET /* XXX */);
1128 1.1 ryo
1129 1.1 ryo rxq->vxrxq_sc = sc;
1130 1.1 ryo
1131 1.1 ryo for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1132 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i];
1133 1.1 ryo rxr->vxrxr_rid = i;
1134 1.1 ryo rxr->vxrxr_ndesc = sc->vmx_nrxdescs;
1135 1.1 ryo rxr->vxrxr_rxbuf = kmem_zalloc(rxr->vxrxr_ndesc *
1136 1.1 ryo sizeof(struct vmxnet3_rxbuf), KM_SLEEP);
1137 1.1 ryo
1138 1.1 ryo rxq->vxrxq_comp_ring.vxcr_ndesc += sc->vmx_nrxdescs;
1139 1.1 ryo }
1140 1.1 ryo
1141 1.1 ryo return (0);
1142 1.1 ryo }
1143 1.1 ryo
1144 1.1 ryo static int
1145 1.1 ryo vmxnet3_init_txq(struct vmxnet3_softc *sc, int q)
1146 1.1 ryo {
1147 1.1 ryo struct vmxnet3_txqueue *txq;
1148 1.1 ryo struct vmxnet3_txring *txr;
1149 1.1 ryo
1150 1.1 ryo txq = &sc->vmx_queue[q].vxq_txqueue;
1151 1.1 ryo txr = &txq->vxtxq_cmd_ring;
1152 1.1 ryo
1153 1.1 ryo snprintf(txq->vxtxq_name, sizeof(txq->vxtxq_name), "%s-tx%d",
1154 1.1 ryo device_xname(sc->vmx_dev), q);
1155 1.1 ryo txq->vxtxq_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET /* XXX */);
1156 1.1 ryo
1157 1.1 ryo txq->vxtxq_sc = sc;
1158 1.1 ryo
1159 1.1 ryo txq->vxtxq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
1160 1.1 ryo vmxnet3_deferred_transmit, txq);
1161 1.1 ryo if (txq->vxtxq_si == NULL) {
1162 1.1 ryo mutex_obj_free(txq->vxtxq_mtx);
1163 1.1 ryo aprint_error_dev(sc->vmx_dev,
1164 1.1 ryo "softint_establish for vxtxq_si failed\n");
1165 1.1 ryo return ENOMEM;
1166 1.1 ryo }
1167 1.1 ryo
1168 1.1 ryo txr->vxtxr_ndesc = sc->vmx_ntxdescs;
1169 1.1 ryo txr->vxtxr_txbuf = kmem_zalloc(txr->vxtxr_ndesc *
1170 1.1 ryo sizeof(struct vmxnet3_txbuf), KM_SLEEP);
1171 1.1 ryo
1172 1.1 ryo txq->vxtxq_comp_ring.vxcr_ndesc = sc->vmx_ntxdescs;
1173 1.1 ryo
1174 1.1 ryo txq->vxtxq_interq = pcq_create(sc->vmx_ntxdescs, KM_SLEEP);
1175 1.1 ryo
1176 1.1 ryo return (0);
1177 1.1 ryo }
1178 1.1 ryo
1179 1.1 ryo static int
1180 1.1 ryo vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *sc)
1181 1.1 ryo {
1182 1.1 ryo int i, error, max_nqueues;
1183 1.1 ryo
1184 1.1 ryo KASSERT(!cpu_intr_p());
1185 1.1 ryo KASSERT(!cpu_softintr_p());
1186 1.1 ryo
1187 1.1 ryo /*
1188 1.1 ryo * Only attempt to create multiple queues if MSIX is available.
1189 1.1 ryo * This check prevents us from allocating queue structures that
1190 1.1 ryo * we will not use.
1191 1.1 ryo *
1192 1.1 ryo * FreeBSD:
1193 1.1 ryo * MSIX is disabled by default because its apparently broken for
1194 1.1 ryo * devices passed through by at least ESXi 5.1.
1195 1.1 ryo * The hw.pci.honor_msi_blacklist tunable must be set to zero for MSIX.
1196 1.1 ryo */
1197 1.1 ryo if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX) {
1198 1.1 ryo sc->vmx_max_nrxqueues = 1;
1199 1.1 ryo sc->vmx_max_ntxqueues = 1;
1200 1.1 ryo }
1201 1.1 ryo
1202 1.1 ryo max_nqueues = MAX(sc->vmx_max_ntxqueues, sc->vmx_max_nrxqueues);
1203 1.1 ryo sc->vmx_queue = kmem_zalloc(sizeof(struct vmxnet3_queue) * max_nqueues,
1204 1.1 ryo KM_SLEEP);
1205 1.1 ryo
1206 1.1 ryo for (i = 0; i < max_nqueues; i++) {
1207 1.1 ryo struct vmxnet3_queue *vmxq = &sc->vmx_queue[i];
1208 1.1 ryo vmxq->vxq_id = i;
1209 1.1 ryo }
1210 1.1 ryo
1211 1.1 ryo for (i = 0; i < sc->vmx_max_nrxqueues; i++) {
1212 1.1 ryo error = vmxnet3_init_rxq(sc, i);
1213 1.1 ryo if (error)
1214 1.1 ryo return (error);
1215 1.1 ryo }
1216 1.1 ryo
1217 1.1 ryo for (i = 0; i < sc->vmx_max_ntxqueues; i++) {
1218 1.1 ryo error = vmxnet3_init_txq(sc, i);
1219 1.1 ryo if (error)
1220 1.1 ryo return (error);
1221 1.1 ryo }
1222 1.1 ryo
1223 1.1 ryo return (0);
1224 1.1 ryo }
1225 1.1 ryo
1226 1.1 ryo static void
1227 1.1 ryo vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *rxq)
1228 1.1 ryo {
1229 1.1 ryo struct vmxnet3_rxring *rxr;
1230 1.1 ryo int i;
1231 1.1 ryo
1232 1.1 ryo rxq->vxrxq_sc = NULL;
1233 1.1 ryo
1234 1.1 ryo for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1235 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i];
1236 1.1 ryo
1237 1.1 ryo if (rxr->vxrxr_rxbuf != NULL) {
1238 1.1 ryo kmem_free(rxr->vxrxr_rxbuf,
1239 1.1 ryo rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxbuf));
1240 1.1 ryo rxr->vxrxr_rxbuf = NULL;
1241 1.1 ryo }
1242 1.1 ryo }
1243 1.1 ryo
1244 1.1 ryo if (rxq->vxrxq_mtx != NULL)
1245 1.1 ryo mutex_obj_free(rxq->vxrxq_mtx);
1246 1.1 ryo }
1247 1.1 ryo
1248 1.1 ryo static void
1249 1.1 ryo vmxnet3_destroy_txq(struct vmxnet3_txqueue *txq)
1250 1.1 ryo {
1251 1.1 ryo struct vmxnet3_txring *txr;
1252 1.1 ryo struct mbuf *m;
1253 1.1 ryo
1254 1.1 ryo txr = &txq->vxtxq_cmd_ring;
1255 1.1 ryo
1256 1.1 ryo txq->vxtxq_sc = NULL;
1257 1.1 ryo
1258 1.1 ryo softint_disestablish(txq->vxtxq_si);
1259 1.1 ryo
1260 1.1 ryo while ((m = pcq_get(txq->vxtxq_interq)) != NULL)
1261 1.1 ryo m_freem(m);
1262 1.1 ryo pcq_destroy(txq->vxtxq_interq);
1263 1.1 ryo
1264 1.1 ryo if (txr->vxtxr_txbuf != NULL) {
1265 1.1 ryo kmem_free(txr->vxtxr_txbuf,
1266 1.1 ryo txr->vxtxr_ndesc * sizeof(struct vmxnet3_txbuf));
1267 1.1 ryo txr->vxtxr_txbuf = NULL;
1268 1.1 ryo }
1269 1.1 ryo
1270 1.1 ryo if (txq->vxtxq_mtx != NULL)
1271 1.1 ryo mutex_obj_free(txq->vxtxq_mtx);
1272 1.1 ryo }
1273 1.1 ryo
1274 1.1 ryo static void
1275 1.1 ryo vmxnet3_free_rxtx_queues(struct vmxnet3_softc *sc)
1276 1.1 ryo {
1277 1.1 ryo int i;
1278 1.1 ryo
1279 1.1 ryo if (sc->vmx_queue != NULL) {
1280 1.1 ryo int max_nqueues;
1281 1.1 ryo
1282 1.1 ryo for (i = 0; i < sc->vmx_max_nrxqueues; i++)
1283 1.1 ryo vmxnet3_destroy_rxq(&sc->vmx_queue[i].vxq_rxqueue);
1284 1.1 ryo
1285 1.1 ryo for (i = 0; i < sc->vmx_max_ntxqueues; i++)
1286 1.1 ryo vmxnet3_destroy_txq(&sc->vmx_queue[i].vxq_txqueue);
1287 1.1 ryo
1288 1.1 ryo max_nqueues = MAX(sc->vmx_max_nrxqueues, sc->vmx_max_ntxqueues);
1289 1.1 ryo kmem_free(sc->vmx_queue,
1290 1.1 ryo sizeof(struct vmxnet3_queue) * max_nqueues);
1291 1.1 ryo }
1292 1.1 ryo }
1293 1.1 ryo
1294 1.1 ryo static int
1295 1.1 ryo vmxnet3_alloc_shared_data(struct vmxnet3_softc *sc)
1296 1.1 ryo {
1297 1.1 ryo device_t dev;
1298 1.1 ryo uint8_t *kva;
1299 1.1 ryo size_t size;
1300 1.1 ryo int i, error;
1301 1.1 ryo
1302 1.1 ryo dev = sc->vmx_dev;
1303 1.1 ryo
1304 1.1 ryo size = sizeof(struct vmxnet3_driver_shared);
1305 1.1 ryo error = vmxnet3_dma_malloc(sc, size, 1, &sc->vmx_ds_dma);
1306 1.1 ryo if (error) {
1307 1.1 ryo device_printf(dev, "cannot alloc shared memory\n");
1308 1.1 ryo return (error);
1309 1.1 ryo }
1310 1.1 ryo sc->vmx_ds = (struct vmxnet3_driver_shared *) sc->vmx_ds_dma.dma_vaddr;
1311 1.1 ryo
1312 1.1 ryo size = sc->vmx_ntxqueues * sizeof(struct vmxnet3_txq_shared) +
1313 1.1 ryo sc->vmx_nrxqueues * sizeof(struct vmxnet3_rxq_shared);
1314 1.1 ryo error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_qs_dma);
1315 1.1 ryo if (error) {
1316 1.1 ryo device_printf(dev, "cannot alloc queue shared memory\n");
1317 1.1 ryo return (error);
1318 1.1 ryo }
1319 1.1 ryo sc->vmx_qs = (void *) sc->vmx_qs_dma.dma_vaddr;
1320 1.1 ryo kva = sc->vmx_qs;
1321 1.1 ryo
1322 1.1 ryo for (i = 0; i < sc->vmx_ntxqueues; i++) {
1323 1.1 ryo sc->vmx_queue[i].vxq_txqueue.vxtxq_ts =
1324 1.1 ryo (struct vmxnet3_txq_shared *) kva;
1325 1.1 ryo kva += sizeof(struct vmxnet3_txq_shared);
1326 1.1 ryo }
1327 1.1 ryo for (i = 0; i < sc->vmx_nrxqueues; i++) {
1328 1.1 ryo sc->vmx_queue[i].vxq_rxqueue.vxrxq_rs =
1329 1.1 ryo (struct vmxnet3_rxq_shared *) kva;
1330 1.1 ryo kva += sizeof(struct vmxnet3_rxq_shared);
1331 1.1 ryo }
1332 1.1 ryo
1333 1.1 ryo if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1334 1.1 ryo size = sizeof(struct vmxnet3_rss_shared);
1335 1.1 ryo error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_rss_dma);
1336 1.1 ryo if (error) {
1337 1.1 ryo device_printf(dev, "cannot alloc rss shared memory\n");
1338 1.1 ryo return (error);
1339 1.1 ryo }
1340 1.1 ryo sc->vmx_rss =
1341 1.1 ryo (struct vmxnet3_rss_shared *) sc->vmx_rss_dma.dma_vaddr;
1342 1.1 ryo }
1343 1.1 ryo
1344 1.1 ryo return (0);
1345 1.1 ryo }
1346 1.1 ryo
1347 1.1 ryo static void
1348 1.1 ryo vmxnet3_free_shared_data(struct vmxnet3_softc *sc)
1349 1.1 ryo {
1350 1.1 ryo
1351 1.1 ryo if (sc->vmx_rss != NULL) {
1352 1.1 ryo vmxnet3_dma_free(sc, &sc->vmx_rss_dma);
1353 1.1 ryo sc->vmx_rss = NULL;
1354 1.1 ryo }
1355 1.1 ryo
1356 1.1 ryo if (sc->vmx_qs != NULL) {
1357 1.1 ryo vmxnet3_dma_free(sc, &sc->vmx_qs_dma);
1358 1.1 ryo sc->vmx_qs = NULL;
1359 1.1 ryo }
1360 1.1 ryo
1361 1.1 ryo if (sc->vmx_ds != NULL) {
1362 1.1 ryo vmxnet3_dma_free(sc, &sc->vmx_ds_dma);
1363 1.1 ryo sc->vmx_ds = NULL;
1364 1.1 ryo }
1365 1.1 ryo }
1366 1.1 ryo
1367 1.1 ryo static int
1368 1.1 ryo vmxnet3_alloc_txq_data(struct vmxnet3_softc *sc)
1369 1.1 ryo {
1370 1.1 ryo device_t dev;
1371 1.1 ryo struct vmxnet3_txqueue *txq;
1372 1.1 ryo struct vmxnet3_txring *txr;
1373 1.1 ryo struct vmxnet3_comp_ring *txc;
1374 1.1 ryo size_t descsz, compsz;
1375 1.1 ryo int i, q, error;
1376 1.1 ryo
1377 1.1 ryo dev = sc->vmx_dev;
1378 1.1 ryo
1379 1.1 ryo for (q = 0; q < sc->vmx_ntxqueues; q++) {
1380 1.1 ryo txq = &sc->vmx_queue[q].vxq_txqueue;
1381 1.1 ryo txr = &txq->vxtxq_cmd_ring;
1382 1.1 ryo txc = &txq->vxtxq_comp_ring;
1383 1.1 ryo
1384 1.1 ryo descsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc);
1385 1.1 ryo compsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txcompdesc);
1386 1.1 ryo
1387 1.1 ryo error = vmxnet3_dma_malloc(sc, descsz, 512, &txr->vxtxr_dma);
1388 1.1 ryo if (error) {
1389 1.1 ryo device_printf(dev, "cannot alloc Tx descriptors for "
1390 1.1 ryo "queue %d error %d\n", q, error);
1391 1.1 ryo return (error);
1392 1.1 ryo }
1393 1.1 ryo txr->vxtxr_txd =
1394 1.1 ryo (struct vmxnet3_txdesc *) txr->vxtxr_dma.dma_vaddr;
1395 1.1 ryo
1396 1.1 ryo error = vmxnet3_dma_malloc(sc, compsz, 512, &txc->vxcr_dma);
1397 1.1 ryo if (error) {
1398 1.1 ryo device_printf(dev, "cannot alloc Tx comp descriptors "
1399 1.1 ryo "for queue %d error %d\n", q, error);
1400 1.1 ryo return (error);
1401 1.1 ryo }
1402 1.1 ryo txc->vxcr_u.txcd =
1403 1.1 ryo (struct vmxnet3_txcompdesc *) txc->vxcr_dma.dma_vaddr;
1404 1.1 ryo
1405 1.1 ryo for (i = 0; i < txr->vxtxr_ndesc; i++) {
1406 1.1 ryo error = bus_dmamap_create(sc->vmx_dmat, VMXNET3_TX_MAXSIZE,
1407 1.1 ryo VMXNET3_TX_MAXSEGS, VMXNET3_TX_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
1408 1.1 ryo &txr->vxtxr_txbuf[i].vtxb_dmamap);
1409 1.1 ryo if (error) {
1410 1.1 ryo device_printf(dev, "unable to create Tx buf "
1411 1.1 ryo "dmamap for queue %d idx %d\n", q, i);
1412 1.1 ryo return (error);
1413 1.1 ryo }
1414 1.1 ryo }
1415 1.1 ryo }
1416 1.1 ryo
1417 1.1 ryo return (0);
1418 1.1 ryo }
1419 1.1 ryo
1420 1.1 ryo static void
1421 1.1 ryo vmxnet3_free_txq_data(struct vmxnet3_softc *sc)
1422 1.1 ryo {
1423 1.1 ryo struct vmxnet3_txqueue *txq;
1424 1.1 ryo struct vmxnet3_txring *txr;
1425 1.1 ryo struct vmxnet3_comp_ring *txc;
1426 1.1 ryo struct vmxnet3_txbuf *txb;
1427 1.1 ryo int i, q;
1428 1.1 ryo
1429 1.1 ryo for (q = 0; q < sc->vmx_ntxqueues; q++) {
1430 1.1 ryo txq = &sc->vmx_queue[q].vxq_txqueue;
1431 1.1 ryo txr = &txq->vxtxq_cmd_ring;
1432 1.1 ryo txc = &txq->vxtxq_comp_ring;
1433 1.1 ryo
1434 1.1 ryo for (i = 0; i < txr->vxtxr_ndesc; i++) {
1435 1.1 ryo txb = &txr->vxtxr_txbuf[i];
1436 1.1 ryo if (txb->vtxb_dmamap != NULL) {
1437 1.1 ryo bus_dmamap_destroy(sc->vmx_dmat,
1438 1.1 ryo txb->vtxb_dmamap);
1439 1.1 ryo txb->vtxb_dmamap = NULL;
1440 1.1 ryo }
1441 1.1 ryo }
1442 1.1 ryo
1443 1.1 ryo if (txc->vxcr_u.txcd != NULL) {
1444 1.1 ryo vmxnet3_dma_free(sc, &txc->vxcr_dma);
1445 1.1 ryo txc->vxcr_u.txcd = NULL;
1446 1.1 ryo }
1447 1.1 ryo
1448 1.1 ryo if (txr->vxtxr_txd != NULL) {
1449 1.1 ryo vmxnet3_dma_free(sc, &txr->vxtxr_dma);
1450 1.1 ryo txr->vxtxr_txd = NULL;
1451 1.1 ryo }
1452 1.1 ryo }
1453 1.1 ryo }
1454 1.1 ryo
1455 1.1 ryo static int
1456 1.1 ryo vmxnet3_alloc_rxq_data(struct vmxnet3_softc *sc)
1457 1.1 ryo {
1458 1.1 ryo device_t dev;
1459 1.1 ryo struct vmxnet3_rxqueue *rxq;
1460 1.1 ryo struct vmxnet3_rxring *rxr;
1461 1.1 ryo struct vmxnet3_comp_ring *rxc;
1462 1.1 ryo int descsz, compsz;
1463 1.1 ryo int i, j, q, error;
1464 1.1 ryo
1465 1.1 ryo dev = sc->vmx_dev;
1466 1.1 ryo
1467 1.1 ryo for (q = 0; q < sc->vmx_nrxqueues; q++) {
1468 1.1 ryo rxq = &sc->vmx_queue[q].vxq_rxqueue;
1469 1.1 ryo rxc = &rxq->vxrxq_comp_ring;
1470 1.1 ryo compsz = 0;
1471 1.1 ryo
1472 1.1 ryo for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1473 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i];
1474 1.1 ryo
1475 1.1 ryo descsz = rxr->vxrxr_ndesc *
1476 1.1 ryo sizeof(struct vmxnet3_rxdesc);
1477 1.1 ryo compsz += rxr->vxrxr_ndesc *
1478 1.1 ryo sizeof(struct vmxnet3_rxcompdesc);
1479 1.1 ryo
1480 1.1 ryo error = vmxnet3_dma_malloc(sc, descsz, 512,
1481 1.1 ryo &rxr->vxrxr_dma);
1482 1.1 ryo if (error) {
1483 1.1 ryo device_printf(dev, "cannot allocate Rx "
1484 1.1 ryo "descriptors for queue %d/%d error %d\n",
1485 1.1 ryo i, q, error);
1486 1.1 ryo return (error);
1487 1.1 ryo }
1488 1.1 ryo rxr->vxrxr_rxd =
1489 1.1 ryo (struct vmxnet3_rxdesc *) rxr->vxrxr_dma.dma_vaddr;
1490 1.1 ryo }
1491 1.1 ryo
1492 1.1 ryo error = vmxnet3_dma_malloc(sc, compsz, 512, &rxc->vxcr_dma);
1493 1.1 ryo if (error) {
1494 1.1 ryo device_printf(dev, "cannot alloc Rx comp descriptors "
1495 1.1 ryo "for queue %d error %d\n", q, error);
1496 1.1 ryo return (error);
1497 1.1 ryo }
1498 1.1 ryo rxc->vxcr_u.rxcd =
1499 1.1 ryo (struct vmxnet3_rxcompdesc *) rxc->vxcr_dma.dma_vaddr;
1500 1.1 ryo
1501 1.1 ryo for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1502 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i];
1503 1.1 ryo
1504 1.1 ryo error = bus_dmamap_create(sc->vmx_dmat, JUMBO_LEN, 1,
1505 1.1 ryo JUMBO_LEN, 0, BUS_DMA_NOWAIT,
1506 1.1 ryo &rxr->vxrxr_spare_dmap);
1507 1.1 ryo if (error) {
1508 1.1 ryo device_printf(dev, "unable to create spare "
1509 1.1 ryo "dmamap for queue %d/%d error %d\n",
1510 1.1 ryo q, i, error);
1511 1.1 ryo return (error);
1512 1.1 ryo }
1513 1.1 ryo
1514 1.1 ryo for (j = 0; j < rxr->vxrxr_ndesc; j++) {
1515 1.1 ryo error = bus_dmamap_create(sc->vmx_dmat, JUMBO_LEN, 1,
1516 1.1 ryo JUMBO_LEN, 0, BUS_DMA_NOWAIT,
1517 1.1 ryo &rxr->vxrxr_rxbuf[j].vrxb_dmamap);
1518 1.1 ryo if (error) {
1519 1.1 ryo device_printf(dev, "unable to create "
1520 1.1 ryo "dmamap for queue %d/%d slot %d "
1521 1.1 ryo "error %d\n",
1522 1.1 ryo q, i, j, error);
1523 1.1 ryo return (error);
1524 1.1 ryo }
1525 1.1 ryo }
1526 1.1 ryo }
1527 1.1 ryo }
1528 1.1 ryo
1529 1.1 ryo return (0);
1530 1.1 ryo }
1531 1.1 ryo
1532 1.1 ryo static void
1533 1.1 ryo vmxnet3_free_rxq_data(struct vmxnet3_softc *sc)
1534 1.1 ryo {
1535 1.1 ryo struct vmxnet3_rxqueue *rxq;
1536 1.1 ryo struct vmxnet3_rxring *rxr;
1537 1.1 ryo struct vmxnet3_comp_ring *rxc;
1538 1.1 ryo struct vmxnet3_rxbuf *rxb;
1539 1.1 ryo int i, j, q;
1540 1.1 ryo
1541 1.1 ryo for (q = 0; q < sc->vmx_nrxqueues; q++) {
1542 1.1 ryo rxq = &sc->vmx_queue[q].vxq_rxqueue;
1543 1.1 ryo rxc = &rxq->vxrxq_comp_ring;
1544 1.1 ryo
1545 1.1 ryo for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1546 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i];
1547 1.1 ryo
1548 1.1 ryo if (rxr->vxrxr_spare_dmap != NULL) {
1549 1.1 ryo bus_dmamap_destroy(sc->vmx_dmat,
1550 1.1 ryo rxr->vxrxr_spare_dmap);
1551 1.1 ryo rxr->vxrxr_spare_dmap = NULL;
1552 1.1 ryo }
1553 1.1 ryo
1554 1.1 ryo for (j = 0; j < rxr->vxrxr_ndesc; j++) {
1555 1.1 ryo rxb = &rxr->vxrxr_rxbuf[j];
1556 1.1 ryo if (rxb->vrxb_dmamap != NULL) {
1557 1.1 ryo bus_dmamap_destroy(sc->vmx_dmat,
1558 1.1 ryo rxb->vrxb_dmamap);
1559 1.1 ryo rxb->vrxb_dmamap = NULL;
1560 1.1 ryo }
1561 1.1 ryo }
1562 1.1 ryo }
1563 1.1 ryo
1564 1.1 ryo if (rxc->vxcr_u.rxcd != NULL) {
1565 1.1 ryo vmxnet3_dma_free(sc, &rxc->vxcr_dma);
1566 1.1 ryo rxc->vxcr_u.rxcd = NULL;
1567 1.1 ryo }
1568 1.1 ryo
1569 1.1 ryo for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1570 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i];
1571 1.1 ryo
1572 1.1 ryo if (rxr->vxrxr_rxd != NULL) {
1573 1.1 ryo vmxnet3_dma_free(sc, &rxr->vxrxr_dma);
1574 1.1 ryo rxr->vxrxr_rxd = NULL;
1575 1.1 ryo }
1576 1.1 ryo }
1577 1.1 ryo }
1578 1.1 ryo }
1579 1.1 ryo
1580 1.1 ryo static int
1581 1.1 ryo vmxnet3_alloc_queue_data(struct vmxnet3_softc *sc)
1582 1.1 ryo {
1583 1.1 ryo int error;
1584 1.1 ryo
1585 1.1 ryo error = vmxnet3_alloc_txq_data(sc);
1586 1.1 ryo if (error)
1587 1.1 ryo return (error);
1588 1.1 ryo
1589 1.1 ryo error = vmxnet3_alloc_rxq_data(sc);
1590 1.1 ryo if (error)
1591 1.1 ryo return (error);
1592 1.1 ryo
1593 1.1 ryo return (0);
1594 1.1 ryo }
1595 1.1 ryo
1596 1.1 ryo static void
1597 1.1 ryo vmxnet3_free_queue_data(struct vmxnet3_softc *sc)
1598 1.1 ryo {
1599 1.1 ryo
1600 1.1 ryo if (sc->vmx_queue != NULL) {
1601 1.1 ryo vmxnet3_free_rxq_data(sc);
1602 1.1 ryo vmxnet3_free_txq_data(sc);
1603 1.1 ryo }
1604 1.1 ryo }
1605 1.1 ryo
1606 1.1 ryo static int
1607 1.1 ryo vmxnet3_alloc_mcast_table(struct vmxnet3_softc *sc)
1608 1.1 ryo {
1609 1.1 ryo int error;
1610 1.1 ryo
1611 1.1 ryo error = vmxnet3_dma_malloc(sc, VMXNET3_MULTICAST_MAX * ETHER_ADDR_LEN,
1612 1.1 ryo 32, &sc->vmx_mcast_dma);
1613 1.1 ryo if (error)
1614 1.1 ryo device_printf(sc->vmx_dev, "unable to alloc multicast table\n");
1615 1.1 ryo else
1616 1.1 ryo sc->vmx_mcast = sc->vmx_mcast_dma.dma_vaddr;
1617 1.1 ryo
1618 1.1 ryo return (error);
1619 1.1 ryo }
1620 1.1 ryo
1621 1.1 ryo static void
1622 1.1 ryo vmxnet3_free_mcast_table(struct vmxnet3_softc *sc)
1623 1.1 ryo {
1624 1.1 ryo
1625 1.1 ryo if (sc->vmx_mcast != NULL) {
1626 1.1 ryo vmxnet3_dma_free(sc, &sc->vmx_mcast_dma);
1627 1.1 ryo sc->vmx_mcast = NULL;
1628 1.1 ryo }
1629 1.1 ryo }
1630 1.1 ryo
1631 1.1 ryo static void
1632 1.1 ryo vmxnet3_init_shared_data(struct vmxnet3_softc *sc)
1633 1.1 ryo {
1634 1.1 ryo struct vmxnet3_driver_shared *ds;
1635 1.1 ryo struct vmxnet3_txqueue *txq;
1636 1.1 ryo struct vmxnet3_txq_shared *txs;
1637 1.1 ryo struct vmxnet3_rxqueue *rxq;
1638 1.1 ryo struct vmxnet3_rxq_shared *rxs;
1639 1.1 ryo int i;
1640 1.1 ryo
1641 1.1 ryo ds = sc->vmx_ds;
1642 1.1 ryo
1643 1.1 ryo /*
1644 1.1 ryo * Initialize fields of the shared data that remains the same across
1645 1.1 ryo * reinits. Note the shared data is zero'd when allocated.
1646 1.1 ryo */
1647 1.1 ryo
1648 1.1 ryo ds->magic = VMXNET3_REV1_MAGIC;
1649 1.1 ryo
1650 1.1 ryo /* DriverInfo */
1651 1.1 ryo ds->version = VMXNET3_DRIVER_VERSION;
1652 1.1 ryo ds->guest = VMXNET3_GOS_FREEBSD |
1653 1.1 ryo #ifdef __LP64__
1654 1.1 ryo VMXNET3_GOS_64BIT;
1655 1.1 ryo #else
1656 1.1 ryo VMXNET3_GOS_32BIT;
1657 1.1 ryo #endif
1658 1.1 ryo ds->vmxnet3_revision = 1;
1659 1.1 ryo ds->upt_version = 1;
1660 1.1 ryo
1661 1.1 ryo /* Misc. conf */
1662 1.1 ryo ds->driver_data = vtophys(sc);
1663 1.1 ryo ds->driver_data_len = sizeof(struct vmxnet3_softc);
1664 1.1 ryo ds->queue_shared = sc->vmx_qs_dma.dma_paddr;
1665 1.1 ryo ds->queue_shared_len = sc->vmx_qs_dma.dma_size;
1666 1.1 ryo ds->nrxsg_max = sc->vmx_max_rxsegs;
1667 1.1 ryo
1668 1.1 ryo /* RSS conf */
1669 1.1 ryo if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1670 1.1 ryo ds->rss.version = 1;
1671 1.1 ryo ds->rss.paddr = sc->vmx_rss_dma.dma_paddr;
1672 1.1 ryo ds->rss.len = sc->vmx_rss_dma.dma_size;
1673 1.1 ryo }
1674 1.1 ryo
1675 1.1 ryo /* Interrupt control. */
1676 1.1 ryo ds->automask = sc->vmx_intr_mask_mode == VMXNET3_IMM_AUTO;
1677 1.1 ryo ds->nintr = sc->vmx_nintrs;
1678 1.1 ryo ds->evintr = sc->vmx_event_intr_idx;
1679 1.1 ryo ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL;
1680 1.1 ryo
1681 1.1 ryo for (i = 0; i < sc->vmx_nintrs; i++)
1682 1.1 ryo ds->modlevel[i] = UPT1_IMOD_ADAPTIVE;
1683 1.1 ryo
1684 1.1 ryo /* Receive filter. */
1685 1.1 ryo ds->mcast_table = sc->vmx_mcast_dma.dma_paddr;
1686 1.1 ryo ds->mcast_tablelen = sc->vmx_mcast_dma.dma_size;
1687 1.1 ryo
1688 1.1 ryo /* Tx queues */
1689 1.1 ryo for (i = 0; i < sc->vmx_ntxqueues; i++) {
1690 1.1 ryo txq = &sc->vmx_queue[i].vxq_txqueue;
1691 1.1 ryo txs = txq->vxtxq_ts;
1692 1.1 ryo
1693 1.1 ryo txs->cmd_ring = txq->vxtxq_cmd_ring.vxtxr_dma.dma_paddr;
1694 1.1 ryo txs->cmd_ring_len = txq->vxtxq_cmd_ring.vxtxr_ndesc;
1695 1.1 ryo txs->comp_ring = txq->vxtxq_comp_ring.vxcr_dma.dma_paddr;
1696 1.1 ryo txs->comp_ring_len = txq->vxtxq_comp_ring.vxcr_ndesc;
1697 1.1 ryo txs->driver_data = vtophys(txq);
1698 1.1 ryo txs->driver_data_len = sizeof(struct vmxnet3_txqueue);
1699 1.1 ryo }
1700 1.1 ryo
1701 1.1 ryo /* Rx queues */
1702 1.1 ryo for (i = 0; i < sc->vmx_nrxqueues; i++) {
1703 1.1 ryo rxq = &sc->vmx_queue[i].vxq_rxqueue;
1704 1.1 ryo rxs = rxq->vxrxq_rs;
1705 1.1 ryo
1706 1.1 ryo rxs->cmd_ring[0] = rxq->vxrxq_cmd_ring[0].vxrxr_dma.dma_paddr;
1707 1.1 ryo rxs->cmd_ring_len[0] = rxq->vxrxq_cmd_ring[0].vxrxr_ndesc;
1708 1.1 ryo rxs->cmd_ring[1] = rxq->vxrxq_cmd_ring[1].vxrxr_dma.dma_paddr;
1709 1.1 ryo rxs->cmd_ring_len[1] = rxq->vxrxq_cmd_ring[1].vxrxr_ndesc;
1710 1.1 ryo rxs->comp_ring = rxq->vxrxq_comp_ring.vxcr_dma.dma_paddr;
1711 1.1 ryo rxs->comp_ring_len = rxq->vxrxq_comp_ring.vxcr_ndesc;
1712 1.1 ryo rxs->driver_data = vtophys(rxq);
1713 1.1 ryo rxs->driver_data_len = sizeof(struct vmxnet3_rxqueue);
1714 1.1 ryo }
1715 1.1 ryo }
1716 1.1 ryo
1717 1.1 ryo static void
1718 1.1 ryo vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *sc)
1719 1.1 ryo {
1720 1.1 ryo /*
1721 1.1 ryo * Use the same key as the Linux driver until FreeBSD can do
1722 1.1 ryo * RSS (presumably Toeplitz) in software.
1723 1.1 ryo */
1724 1.1 ryo static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = {
1725 1.1 ryo 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
1726 1.1 ryo 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
1727 1.1 ryo 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
1728 1.1 ryo 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
1729 1.1 ryo 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
1730 1.1 ryo };
1731 1.1 ryo
1732 1.1 ryo struct vmxnet3_rss_shared *rss;
1733 1.1 ryo int i;
1734 1.1 ryo
1735 1.1 ryo rss = sc->vmx_rss;
1736 1.1 ryo
1737 1.1 ryo rss->hash_type =
1738 1.1 ryo UPT1_RSS_HASH_TYPE_IPV4 | UPT1_RSS_HASH_TYPE_TCP_IPV4 |
1739 1.1 ryo UPT1_RSS_HASH_TYPE_IPV6 | UPT1_RSS_HASH_TYPE_TCP_IPV6;
1740 1.1 ryo rss->hash_func = UPT1_RSS_HASH_FUNC_TOEPLITZ;
1741 1.1 ryo rss->hash_key_size = UPT1_RSS_MAX_KEY_SIZE;
1742 1.1 ryo rss->ind_table_size = UPT1_RSS_MAX_IND_TABLE_SIZE;
1743 1.1 ryo memcpy(rss->hash_key, rss_key, UPT1_RSS_MAX_KEY_SIZE);
1744 1.1 ryo
1745 1.1 ryo for (i = 0; i < UPT1_RSS_MAX_IND_TABLE_SIZE; i++)
1746 1.1 ryo rss->ind_table[i] = i % sc->vmx_nrxqueues;
1747 1.1 ryo }
1748 1.1 ryo
1749 1.1 ryo static void
1750 1.1 ryo vmxnet3_reinit_shared_data(struct vmxnet3_softc *sc)
1751 1.1 ryo {
1752 1.1 ryo struct ifnet *ifp;
1753 1.1 ryo struct vmxnet3_driver_shared *ds;
1754 1.1 ryo
1755 1.1 ryo ifp = &sc->vmx_ethercom.ec_if;
1756 1.1 ryo ds = sc->vmx_ds;
1757 1.1 ryo
1758 1.1 ryo ds->mtu = ifp->if_mtu;
1759 1.1 ryo ds->ntxqueue = sc->vmx_ntxqueues;
1760 1.1 ryo ds->nrxqueue = sc->vmx_nrxqueues;
1761 1.1 ryo
1762 1.1 ryo ds->upt_features = 0;
1763 1.1 ryo if (ifp->if_capenable &
1764 1.1 ryo (IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
1765 1.1 ryo IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
1766 1.1 ryo ds->upt_features |= UPT1_F_CSUM;
1767 1.1 ryo if (sc->vmx_ethercom.ec_capenable & ETHERCAP_VLAN_HWTAGGING)
1768 1.1 ryo ds->upt_features |= UPT1_F_VLAN;
1769 1.1 ryo
1770 1.1 ryo if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1771 1.1 ryo ds->upt_features |= UPT1_F_RSS;
1772 1.1 ryo vmxnet3_reinit_rss_shared_data(sc);
1773 1.1 ryo }
1774 1.1 ryo
1775 1.1 ryo vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSL, sc->vmx_ds_dma.dma_paddr);
1776 1.1 ryo vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSH,
1777 1.1 ryo (uint64_t) sc->vmx_ds_dma.dma_paddr >> 32);
1778 1.1 ryo }
1779 1.1 ryo
1780 1.1 ryo static int
1781 1.1 ryo vmxnet3_alloc_data(struct vmxnet3_softc *sc)
1782 1.1 ryo {
1783 1.1 ryo int error;
1784 1.1 ryo
1785 1.1 ryo error = vmxnet3_alloc_shared_data(sc);
1786 1.1 ryo if (error)
1787 1.1 ryo return (error);
1788 1.1 ryo
1789 1.1 ryo error = vmxnet3_alloc_queue_data(sc);
1790 1.1 ryo if (error)
1791 1.1 ryo return (error);
1792 1.1 ryo
1793 1.1 ryo error = vmxnet3_alloc_mcast_table(sc);
1794 1.1 ryo if (error)
1795 1.1 ryo return (error);
1796 1.1 ryo
1797 1.1 ryo vmxnet3_init_shared_data(sc);
1798 1.1 ryo
1799 1.1 ryo return (0);
1800 1.1 ryo }
1801 1.1 ryo
1802 1.1 ryo static void
1803 1.1 ryo vmxnet3_free_data(struct vmxnet3_softc *sc)
1804 1.1 ryo {
1805 1.1 ryo
1806 1.1 ryo vmxnet3_free_mcast_table(sc);
1807 1.1 ryo vmxnet3_free_queue_data(sc);
1808 1.1 ryo vmxnet3_free_shared_data(sc);
1809 1.1 ryo }
1810 1.1 ryo
1811 1.1 ryo static int
1812 1.1 ryo vmxnet3_setup_interface(struct vmxnet3_softc *sc)
1813 1.1 ryo {
1814 1.1 ryo struct ifnet *ifp = &sc->vmx_ethercom.ec_if;
1815 1.1 ryo
1816 1.1 ryo vmxnet3_get_lladdr(sc);
1817 1.1 ryo aprint_normal_dev(sc->vmx_dev, "Ethernet address %s\n",
1818 1.1 ryo ether_sprintf(sc->vmx_lladdr));
1819 1.1 ryo vmxnet3_set_lladdr(sc);
1820 1.1 ryo
1821 1.1 ryo strlcpy(ifp->if_xname, device_xname(sc->vmx_dev), IFNAMSIZ);
1822 1.1 ryo ifp->if_softc = sc;
1823 1.1 ryo ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
1824 1.1 ryo ifp->if_extflags = IFEF_MPSAFE;
1825 1.1 ryo ifp->if_ioctl = vmxnet3_ioctl;
1826 1.1 ryo ifp->if_start = vmxnet3_start;
1827 1.1 ryo ifp->if_transmit = vmxnet3_transmit;
1828 1.1 ryo ifp->if_watchdog = NULL;
1829 1.1 ryo ifp->if_init = vmxnet3_init;
1830 1.1 ryo ifp->if_stop = vmxnet3_stop;
1831 1.1 ryo sc->vmx_ethercom.ec_if.if_capabilities |=IFCAP_CSUM_IPv4_Rx |
1832 1.1 ryo IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1833 1.1 ryo IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1834 1.1 ryo IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx |
1835 1.1 ryo IFCAP_CSUM_UDPv6_Tx | IFCAP_CSUM_UDPv6_Rx;
1836 1.1 ryo
1837 1.1 ryo ifp->if_capenable = ifp->if_capabilities;
1838 1.1 ryo
1839 1.1 ryo sc->vmx_ethercom.ec_if.if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
1840 1.1 ryo
1841 1.1 ryo sc->vmx_ethercom.ec_capabilities |=
1842 1.1 ryo ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING | ETHERCAP_JUMBO_MTU;
1843 1.1 ryo sc->vmx_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
1844 1.1 ryo
1845 1.1 ryo IFQ_SET_MAXLEN(&ifp->if_snd, sc->vmx_ntxdescs);
1846 1.1 ryo IFQ_SET_READY(&ifp->if_snd);
1847 1.1 ryo
1848 1.1 ryo /* Initialize ifmedia structures. */
1849 1.1 ryo sc->vmx_ethercom.ec_ifmedia = &sc->vmx_media;
1850 1.1 ryo ifmedia_init_with_lock(&sc->vmx_media, IFM_IMASK, vmxnet3_ifmedia_change,
1851 1.1 ryo vmxnet3_ifmedia_status, sc->vmx_mtx);
1852 1.1 ryo ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1853 1.1 ryo ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL);
1854 1.1 ryo ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_10G_T, 0, NULL);
1855 1.1 ryo ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1856 1.1 ryo ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_1000_T, 0, NULL);
1857 1.1 ryo ifmedia_set(&sc->vmx_media, IFM_ETHER | IFM_AUTO);
1858 1.1 ryo
1859 1.1 ryo if_attach(ifp);
1860 1.1 ryo if_deferred_start_init(ifp, NULL);
1861 1.1 ryo ether_ifattach(ifp, sc->vmx_lladdr);
1862 1.1 ryo ether_set_ifflags_cb(&sc->vmx_ethercom, vmxnet3_ifflags_cb);
1863 1.1 ryo vmxnet3_cmd_link_status(ifp);
1864 1.1 ryo
1865 1.1 ryo /* should set before setting interrupts */
1866 1.1 ryo sc->vmx_rx_intr_process_limit = VMXNET3_RX_INTR_PROCESS_LIMIT;
1867 1.1 ryo sc->vmx_rx_process_limit = VMXNET3_RX_PROCESS_LIMIT;
1868 1.1 ryo sc->vmx_tx_intr_process_limit = VMXNET3_TX_INTR_PROCESS_LIMIT;
1869 1.1 ryo sc->vmx_tx_process_limit = VMXNET3_TX_PROCESS_LIMIT;
1870 1.1 ryo
1871 1.1 ryo return (0);
1872 1.1 ryo }
1873 1.1 ryo
1874 1.1 ryo static int
1875 1.1 ryo vmxnet3_setup_sysctl(struct vmxnet3_softc *sc)
1876 1.1 ryo {
1877 1.1 ryo const char *devname;
1878 1.1 ryo struct sysctllog **log;
1879 1.1 ryo const struct sysctlnode *rnode, *rxnode, *txnode;
1880 1.1 ryo int error;
1881 1.1 ryo
1882 1.1 ryo log = &sc->vmx_sysctllog;
1883 1.1 ryo devname = device_xname(sc->vmx_dev);
1884 1.1 ryo
1885 1.1 ryo error = sysctl_createv(log, 0, NULL, &rnode,
1886 1.1 ryo 0, CTLTYPE_NODE, devname,
1887 1.1 ryo SYSCTL_DESCR("vmxnet3 information and settings"),
1888 1.1 ryo NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
1889 1.1 ryo if (error)
1890 1.1 ryo goto out;
1891 1.1 ryo error = sysctl_createv(log, 0, &rnode, NULL,
1892 1.1 ryo CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
1893 1.1 ryo SYSCTL_DESCR("Use workqueue for packet processing"),
1894 1.1 ryo NULL, 0, &sc->vmx_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
1895 1.1 ryo if (error)
1896 1.1 ryo goto out;
1897 1.1 ryo
1898 1.1 ryo error = sysctl_createv(log, 0, &rnode, &rxnode,
1899 1.1 ryo 0, CTLTYPE_NODE, "rx",
1900 1.1 ryo SYSCTL_DESCR("vmxnet3 information and settings for Rx"),
1901 1.1 ryo NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
1902 1.1 ryo if (error)
1903 1.1 ryo goto out;
1904 1.1 ryo error = sysctl_createv(log, 0, &rxnode, NULL,
1905 1.1 ryo CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
1906 1.1 ryo SYSCTL_DESCR("max number of Rx packets to process for interrupt processing"),
1907 1.1 ryo NULL, 0, &sc->vmx_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
1908 1.1 ryo if (error)
1909 1.1 ryo goto out;
1910 1.1 ryo error = sysctl_createv(log, 0, &rxnode, NULL,
1911 1.1 ryo CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
1912 1.1 ryo SYSCTL_DESCR("max number of Rx packets to process for deferred processing"),
1913 1.1 ryo NULL, 0, &sc->vmx_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
1914 1.1 ryo if (error)
1915 1.1 ryo goto out;
1916 1.1 ryo
1917 1.1 ryo error = sysctl_createv(log, 0, &rnode, &txnode,
1918 1.1 ryo 0, CTLTYPE_NODE, "tx",
1919 1.1 ryo SYSCTL_DESCR("vmxnet3 information and settings for Tx"),
1920 1.1 ryo NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
1921 1.1 ryo if (error)
1922 1.1 ryo goto out;
1923 1.1 ryo error = sysctl_createv(log, 0, &txnode, NULL,
1924 1.1 ryo CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
1925 1.1 ryo SYSCTL_DESCR("max number of Tx packets to process for interrupt processing"),
1926 1.1 ryo NULL, 0, &sc->vmx_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
1927 1.1 ryo if (error)
1928 1.1 ryo goto out;
1929 1.1 ryo error = sysctl_createv(log, 0, &txnode, NULL,
1930 1.1 ryo CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
1931 1.1 ryo SYSCTL_DESCR("max number of Tx packets to process for deferred processing"),
1932 1.1 ryo NULL, 0, &sc->vmx_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
1933 1.1 ryo
1934 1.1 ryo out:
1935 1.1 ryo if (error) {
1936 1.1 ryo aprint_error_dev(sc->vmx_dev,
1937 1.1 ryo "unable to create sysctl node\n");
1938 1.1 ryo sysctl_teardown(log);
1939 1.1 ryo }
1940 1.1 ryo return error;
1941 1.1 ryo }
1942 1.1 ryo
1943 1.1 ryo static int
1944 1.1 ryo vmxnet3_setup_stats(struct vmxnet3_softc *sc)
1945 1.1 ryo {
1946 1.1 ryo struct vmxnet3_queue *vmxq;
1947 1.1 ryo struct vmxnet3_txqueue *txq;
1948 1.1 ryo struct vmxnet3_rxqueue *rxq;
1949 1.1 ryo int i;
1950 1.1 ryo
1951 1.1 ryo for (i = 0; i < sc->vmx_ntxqueues; i++) {
1952 1.1 ryo vmxq = &sc->vmx_queue[i];
1953 1.1 ryo txq = &vmxq->vxq_txqueue;
1954 1.1 ryo evcnt_attach_dynamic(&txq->vxtxq_intr, EVCNT_TYPE_INTR,
1955 1.1 ryo NULL, txq->vxtxq_name, "Interrupt on queue");
1956 1.1 ryo evcnt_attach_dynamic(&txq->vxtxq_defer, EVCNT_TYPE_MISC,
1957 1.1 ryo NULL, txq->vxtxq_name, "Handled queue in softint/workqueue");
1958 1.1 ryo evcnt_attach_dynamic(&txq->vxtxq_deferreq, EVCNT_TYPE_MISC,
1959 1.1 ryo NULL, txq->vxtxq_name, "Requested in softint/workqueue");
1960 1.1 ryo evcnt_attach_dynamic(&txq->vxtxq_pcqdrop, EVCNT_TYPE_MISC,
1961 1.1 ryo NULL, txq->vxtxq_name, "Dropped in pcq");
1962 1.1 ryo evcnt_attach_dynamic(&txq->vxtxq_transmitdef, EVCNT_TYPE_MISC,
1963 1.1 ryo NULL, txq->vxtxq_name, "Deferred transmit");
1964 1.1 ryo evcnt_attach_dynamic(&txq->vxtxq_watchdogto, EVCNT_TYPE_MISC,
1965 1.1 ryo NULL, txq->vxtxq_name, "Watchdog timeout");
1966 1.1 ryo evcnt_attach_dynamic(&txq->vxtxq_defragged, EVCNT_TYPE_MISC,
1967 1.1 ryo NULL, txq->vxtxq_name, "m_defrag successed");
1968 1.1 ryo evcnt_attach_dynamic(&txq->vxtxq_defrag_failed, EVCNT_TYPE_MISC,
1969 1.1 ryo NULL, txq->vxtxq_name, "m_defrag failed");
1970 1.1 ryo }
1971 1.1 ryo
1972 1.1 ryo for (i = 0; i < sc->vmx_nrxqueues; i++) {
1973 1.1 ryo vmxq = &sc->vmx_queue[i];
1974 1.1 ryo rxq = &vmxq->vxq_rxqueue;
1975 1.1 ryo evcnt_attach_dynamic(&rxq->vxrxq_intr, EVCNT_TYPE_INTR,
1976 1.1 ryo NULL, rxq->vxrxq_name, "Interrupt on queue");
1977 1.1 ryo evcnt_attach_dynamic(&rxq->vxrxq_defer, EVCNT_TYPE_MISC,
1978 1.1 ryo NULL, rxq->vxrxq_name, "Handled queue in softint/workqueue");
1979 1.1 ryo evcnt_attach_dynamic(&rxq->vxrxq_deferreq, EVCNT_TYPE_MISC,
1980 1.1 ryo NULL, rxq->vxrxq_name, "Requested in softint/workqueue");
1981 1.1 ryo evcnt_attach_dynamic(&rxq->vxrxq_mgetcl_failed, EVCNT_TYPE_MISC,
1982 1.1 ryo NULL, rxq->vxrxq_name, "MCLGET failed");
1983 1.1 ryo evcnt_attach_dynamic(&rxq->vxrxq_mbuf_load_failed, EVCNT_TYPE_MISC,
1984 1.1 ryo NULL, rxq->vxrxq_name, "bus_dmamap_load_mbuf failed");
1985 1.1 ryo }
1986 1.1 ryo
1987 1.1 ryo evcnt_attach_dynamic(&sc->vmx_event_intr, EVCNT_TYPE_INTR,
1988 1.1 ryo NULL, device_xname(sc->vmx_dev), "Interrupt for other events");
1989 1.1 ryo evcnt_attach_dynamic(&sc->vmx_event_link, EVCNT_TYPE_MISC,
1990 1.1 ryo NULL, device_xname(sc->vmx_dev), "Link status event");
1991 1.1 ryo evcnt_attach_dynamic(&sc->vmx_event_txqerror, EVCNT_TYPE_MISC,
1992 1.1 ryo NULL, device_xname(sc->vmx_dev), "Tx queue error event");
1993 1.1 ryo evcnt_attach_dynamic(&sc->vmx_event_rxqerror, EVCNT_TYPE_MISC,
1994 1.1 ryo NULL, device_xname(sc->vmx_dev), "Rx queue error event");
1995 1.1 ryo evcnt_attach_dynamic(&sc->vmx_event_dic, EVCNT_TYPE_MISC,
1996 1.1 ryo NULL, device_xname(sc->vmx_dev), "Device impl change event");
1997 1.1 ryo evcnt_attach_dynamic(&sc->vmx_event_debug, EVCNT_TYPE_MISC,
1998 1.1 ryo NULL, device_xname(sc->vmx_dev), "Debug event");
1999 1.1 ryo
2000 1.1 ryo return 0;
2001 1.1 ryo }
2002 1.1 ryo
2003 1.1 ryo static void
2004 1.1 ryo vmxnet3_teardown_stats(struct vmxnet3_softc *sc)
2005 1.1 ryo {
2006 1.1 ryo struct vmxnet3_queue *vmxq;
2007 1.1 ryo struct vmxnet3_txqueue *txq;
2008 1.1 ryo struct vmxnet3_rxqueue *rxq;
2009 1.1 ryo int i;
2010 1.1 ryo
2011 1.1 ryo for (i = 0; i < sc->vmx_ntxqueues; i++) {
2012 1.1 ryo vmxq = &sc->vmx_queue[i];
2013 1.1 ryo txq = &vmxq->vxq_txqueue;
2014 1.1 ryo evcnt_detach(&txq->vxtxq_intr);
2015 1.1 ryo evcnt_detach(&txq->vxtxq_defer);
2016 1.1 ryo evcnt_detach(&txq->vxtxq_deferreq);
2017 1.1 ryo evcnt_detach(&txq->vxtxq_pcqdrop);
2018 1.1 ryo evcnt_detach(&txq->vxtxq_transmitdef);
2019 1.1 ryo evcnt_detach(&txq->vxtxq_watchdogto);
2020 1.1 ryo evcnt_detach(&txq->vxtxq_defragged);
2021 1.1 ryo evcnt_detach(&txq->vxtxq_defrag_failed);
2022 1.1 ryo }
2023 1.1 ryo
2024 1.1 ryo for (i = 0; i < sc->vmx_nrxqueues; i++) {
2025 1.1 ryo vmxq = &sc->vmx_queue[i];
2026 1.1 ryo rxq = &vmxq->vxq_rxqueue;
2027 1.1 ryo evcnt_detach(&rxq->vxrxq_intr);
2028 1.1 ryo evcnt_detach(&rxq->vxrxq_defer);
2029 1.1 ryo evcnt_detach(&rxq->vxrxq_deferreq);
2030 1.1 ryo evcnt_detach(&rxq->vxrxq_mgetcl_failed);
2031 1.1 ryo evcnt_detach(&rxq->vxrxq_mbuf_load_failed);
2032 1.1 ryo }
2033 1.1 ryo
2034 1.1 ryo evcnt_detach(&sc->vmx_event_intr);
2035 1.1 ryo evcnt_detach(&sc->vmx_event_link);
2036 1.1 ryo evcnt_detach(&sc->vmx_event_txqerror);
2037 1.1 ryo evcnt_detach(&sc->vmx_event_rxqerror);
2038 1.1 ryo evcnt_detach(&sc->vmx_event_dic);
2039 1.1 ryo evcnt_detach(&sc->vmx_event_debug);
2040 1.1 ryo }
2041 1.1 ryo
2042 1.1 ryo static void
2043 1.1 ryo vmxnet3_evintr(struct vmxnet3_softc *sc)
2044 1.1 ryo {
2045 1.1 ryo device_t dev;
2046 1.1 ryo struct vmxnet3_txq_shared *ts;
2047 1.1 ryo struct vmxnet3_rxq_shared *rs;
2048 1.1 ryo uint32_t event;
2049 1.1 ryo int reset;
2050 1.1 ryo
2051 1.1 ryo dev = sc->vmx_dev;
2052 1.1 ryo reset = 0;
2053 1.1 ryo
2054 1.1 ryo VMXNET3_CORE_LOCK(sc);
2055 1.1 ryo
2056 1.1 ryo /* Clear events. */
2057 1.1 ryo event = sc->vmx_ds->event;
2058 1.1 ryo vmxnet3_write_bar1(sc, VMXNET3_BAR1_EVENT, event);
2059 1.1 ryo
2060 1.1 ryo if (event & VMXNET3_EVENT_LINK) {
2061 1.1 ryo sc->vmx_event_link.ev_count++;
2062 1.1 ryo vmxnet3_if_link_status(sc);
2063 1.1 ryo if (sc->vmx_link_active != 0)
2064 1.1 ryo if_schedule_deferred_start(&sc->vmx_ethercom.ec_if);
2065 1.1 ryo }
2066 1.1 ryo
2067 1.1 ryo if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) {
2068 1.1 ryo if (event & VMXNET3_EVENT_TQERROR)
2069 1.1 ryo sc->vmx_event_txqerror.ev_count++;
2070 1.1 ryo if (event & VMXNET3_EVENT_RQERROR)
2071 1.1 ryo sc->vmx_event_rxqerror.ev_count++;
2072 1.1 ryo
2073 1.1 ryo reset = 1;
2074 1.1 ryo vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_STATUS);
2075 1.1 ryo ts = sc->vmx_queue[0].vxq_txqueue.vxtxq_ts;
2076 1.1 ryo if (ts->stopped != 0)
2077 1.1 ryo device_printf(dev, "Tx queue error %#x\n", ts->error);
2078 1.1 ryo rs = sc->vmx_queue[0].vxq_rxqueue.vxrxq_rs;
2079 1.1 ryo if (rs->stopped != 0)
2080 1.1 ryo device_printf(dev, "Rx queue error %#x\n", rs->error);
2081 1.1 ryo device_printf(dev, "Rx/Tx queue error event ... resetting\n");
2082 1.1 ryo }
2083 1.1 ryo
2084 1.1 ryo if (event & VMXNET3_EVENT_DIC) {
2085 1.1 ryo sc->vmx_event_dic.ev_count++;
2086 1.1 ryo device_printf(dev, "device implementation change event\n");
2087 1.1 ryo }
2088 1.1 ryo if (event & VMXNET3_EVENT_DEBUG) {
2089 1.1 ryo sc->vmx_event_debug.ev_count++;
2090 1.1 ryo device_printf(dev, "debug event\n");
2091 1.1 ryo }
2092 1.1 ryo
2093 1.1 ryo if (reset != 0)
2094 1.1 ryo vmxnet3_init_locked(sc);
2095 1.1 ryo
2096 1.1 ryo VMXNET3_CORE_UNLOCK(sc);
2097 1.1 ryo }
2098 1.1 ryo
2099 1.1 ryo static bool
2100 1.1 ryo vmxnet3_txq_eof(struct vmxnet3_txqueue *txq, u_int limit)
2101 1.1 ryo {
2102 1.1 ryo struct vmxnet3_softc *sc;
2103 1.1 ryo struct vmxnet3_txring *txr;
2104 1.1 ryo struct vmxnet3_comp_ring *txc;
2105 1.1 ryo struct vmxnet3_txcompdesc *txcd;
2106 1.1 ryo struct vmxnet3_txbuf *txb;
2107 1.1 ryo struct ifnet *ifp;
2108 1.1 ryo struct mbuf *m;
2109 1.1 ryo u_int sop;
2110 1.1 ryo bool more = false;
2111 1.1 ryo
2112 1.1 ryo sc = txq->vxtxq_sc;
2113 1.1 ryo txr = &txq->vxtxq_cmd_ring;
2114 1.1 ryo txc = &txq->vxtxq_comp_ring;
2115 1.1 ryo ifp = &sc->vmx_ethercom.ec_if;
2116 1.1 ryo
2117 1.1 ryo VMXNET3_TXQ_LOCK_ASSERT(txq);
2118 1.1 ryo
2119 1.1 ryo net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2120 1.1 ryo for (;;) {
2121 1.1 ryo if (limit-- == 0) {
2122 1.1 ryo more = true;
2123 1.1 ryo break;
2124 1.1 ryo }
2125 1.1 ryo
2126 1.1 ryo txcd = &txc->vxcr_u.txcd[txc->vxcr_next];
2127 1.1 ryo if (txcd->gen != txc->vxcr_gen)
2128 1.1 ryo break;
2129 1.1 ryo vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
2130 1.1 ryo
2131 1.1 ryo if (++txc->vxcr_next == txc->vxcr_ndesc) {
2132 1.1 ryo txc->vxcr_next = 0;
2133 1.1 ryo txc->vxcr_gen ^= 1;
2134 1.1 ryo }
2135 1.1 ryo
2136 1.1 ryo sop = txr->vxtxr_next;
2137 1.1 ryo txb = &txr->vxtxr_txbuf[sop];
2138 1.1 ryo
2139 1.1 ryo if ((m = txb->vtxb_m) != NULL) {
2140 1.1 ryo bus_dmamap_sync(sc->vmx_dmat, txb->vtxb_dmamap,
2141 1.1 ryo 0, txb->vtxb_dmamap->dm_mapsize,
2142 1.1 ryo BUS_DMASYNC_POSTWRITE);
2143 1.1 ryo bus_dmamap_unload(sc->vmx_dmat, txb->vtxb_dmamap);
2144 1.1 ryo
2145 1.1 ryo if_statinc_ref(nsr, if_opackets);
2146 1.1 ryo if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
2147 1.1 ryo if (m->m_flags & M_MCAST)
2148 1.1 ryo if_statinc_ref(nsr, if_omcasts);
2149 1.1 ryo
2150 1.1 ryo m_freem(m);
2151 1.1 ryo txb->vtxb_m = NULL;
2152 1.1 ryo }
2153 1.1 ryo
2154 1.1 ryo txr->vxtxr_next = (txcd->eop_idx + 1) % txr->vxtxr_ndesc;
2155 1.1 ryo }
2156 1.1 ryo IF_STAT_PUTREF(ifp);
2157 1.1 ryo
2158 1.1 ryo if (txr->vxtxr_head == txr->vxtxr_next)
2159 1.1 ryo txq->vxtxq_watchdog = 0;
2160 1.1 ryo
2161 1.1 ryo return more;
2162 1.1 ryo }
2163 1.1 ryo
2164 1.1 ryo static int
2165 1.1 ryo vmxnet3_newbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq,
2166 1.1 ryo struct vmxnet3_rxring *rxr)
2167 1.1 ryo {
2168 1.1 ryo struct mbuf *m;
2169 1.1 ryo struct vmxnet3_rxdesc *rxd;
2170 1.1 ryo struct vmxnet3_rxbuf *rxb;
2171 1.1 ryo bus_dma_tag_t tag;
2172 1.1 ryo bus_dmamap_t dmap;
2173 1.1 ryo int idx, btype, error;
2174 1.1 ryo
2175 1.1 ryo tag = sc->vmx_dmat;
2176 1.1 ryo dmap = rxr->vxrxr_spare_dmap;
2177 1.1 ryo idx = rxr->vxrxr_fill;
2178 1.1 ryo rxd = &rxr->vxrxr_rxd[idx];
2179 1.1 ryo rxb = &rxr->vxrxr_rxbuf[idx];
2180 1.1 ryo
2181 1.1 ryo /* Don't allocate buffers for ring 2 for now. */
2182 1.1 ryo if (rxr->vxrxr_rid != 0)
2183 1.1 ryo return -1;
2184 1.1 ryo btype = VMXNET3_BTYPE_HEAD;
2185 1.1 ryo
2186 1.1 ryo MGETHDR(m, M_DONTWAIT, MT_DATA);
2187 1.1 ryo if (m == NULL)
2188 1.1 ryo return (ENOBUFS);
2189 1.1 ryo
2190 1.1 ryo MCLGET(m, M_DONTWAIT);
2191 1.1 ryo if ((m->m_flags & M_EXT) == 0) {
2192 1.1 ryo rxq->vxrxq_mgetcl_failed.ev_count++;
2193 1.1 ryo m_freem(m);
2194 1.1 ryo return (ENOBUFS);
2195 1.1 ryo }
2196 1.1 ryo
2197 1.1 ryo m->m_pkthdr.len = m->m_len = JUMBO_LEN;
2198 1.1 ryo m_adj(m, ETHER_ALIGN);
2199 1.1 ryo
2200 1.1 ryo error = bus_dmamap_load_mbuf(sc->vmx_dmat, dmap, m, BUS_DMA_NOWAIT);
2201 1.1 ryo if (error) {
2202 1.1 ryo m_freem(m);
2203 1.1 ryo rxq->vxrxq_mbuf_load_failed.ev_count++;
2204 1.1 ryo return (error);
2205 1.1 ryo }
2206 1.1 ryo
2207 1.1 ryo if (rxb->vrxb_m != NULL) {
2208 1.1 ryo bus_dmamap_sync(tag, rxb->vrxb_dmamap,
2209 1.1 ryo 0, rxb->vrxb_dmamap->dm_mapsize,
2210 1.1 ryo BUS_DMASYNC_POSTREAD);
2211 1.1 ryo bus_dmamap_unload(tag, rxb->vrxb_dmamap);
2212 1.1 ryo }
2213 1.1 ryo
2214 1.1 ryo rxr->vxrxr_spare_dmap = rxb->vrxb_dmamap;
2215 1.1 ryo rxb->vrxb_dmamap = dmap;
2216 1.1 ryo rxb->vrxb_m = m;
2217 1.1 ryo
2218 1.1 ryo rxd->addr = DMAADDR(dmap);
2219 1.1 ryo rxd->len = m->m_pkthdr.len;
2220 1.1 ryo rxd->btype = btype;
2221 1.1 ryo rxd->gen = rxr->vxrxr_gen;
2222 1.1 ryo
2223 1.1 ryo vmxnet3_rxr_increment_fill(rxr);
2224 1.1 ryo return (0);
2225 1.1 ryo }
2226 1.1 ryo
2227 1.1 ryo static void
2228 1.1 ryo vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *rxq,
2229 1.1 ryo struct vmxnet3_rxring *rxr, int idx)
2230 1.1 ryo {
2231 1.1 ryo struct vmxnet3_rxdesc *rxd;
2232 1.1 ryo
2233 1.1 ryo rxd = &rxr->vxrxr_rxd[idx];
2234 1.1 ryo rxd->gen = rxr->vxrxr_gen;
2235 1.1 ryo vmxnet3_rxr_increment_fill(rxr);
2236 1.1 ryo }
2237 1.1 ryo
2238 1.1 ryo static void
2239 1.1 ryo vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *rxq)
2240 1.1 ryo {
2241 1.1 ryo struct vmxnet3_softc *sc;
2242 1.1 ryo struct vmxnet3_rxring *rxr;
2243 1.1 ryo struct vmxnet3_comp_ring *rxc;
2244 1.1 ryo struct vmxnet3_rxcompdesc *rxcd;
2245 1.1 ryo int idx, eof;
2246 1.1 ryo
2247 1.1 ryo sc = rxq->vxrxq_sc;
2248 1.1 ryo rxc = &rxq->vxrxq_comp_ring;
2249 1.1 ryo
2250 1.1 ryo do {
2251 1.1 ryo rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
2252 1.1 ryo if (rxcd->gen != rxc->vxcr_gen)
2253 1.1 ryo break; /* Not expected. */
2254 1.1 ryo vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
2255 1.1 ryo
2256 1.1 ryo if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
2257 1.1 ryo rxc->vxcr_next = 0;
2258 1.1 ryo rxc->vxcr_gen ^= 1;
2259 1.1 ryo }
2260 1.1 ryo
2261 1.1 ryo idx = rxcd->rxd_idx;
2262 1.1 ryo eof = rxcd->eop;
2263 1.1 ryo if (rxcd->qid < sc->vmx_nrxqueues)
2264 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[0];
2265 1.1 ryo else
2266 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[1];
2267 1.1 ryo vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2268 1.1 ryo } while (!eof);
2269 1.1 ryo }
2270 1.1 ryo
2271 1.1 ryo static void
2272 1.1 ryo vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
2273 1.1 ryo {
2274 1.1 ryo if (rxcd->no_csum)
2275 1.1 ryo return;
2276 1.1 ryo
2277 1.1 ryo if (rxcd->ipv4) {
2278 1.1 ryo m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2279 1.1 ryo if (rxcd->ipcsum_ok == 0)
2280 1.1 ryo m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
2281 1.1 ryo }
2282 1.1 ryo
2283 1.1 ryo if (rxcd->fragment)
2284 1.1 ryo return;
2285 1.1 ryo
2286 1.1 ryo if (rxcd->tcp) {
2287 1.1 ryo m->m_pkthdr.csum_flags |=
2288 1.1 ryo rxcd->ipv4 ? M_CSUM_TCPv4 : M_CSUM_TCPv6;
2289 1.1 ryo if ((rxcd->csum_ok) == 0)
2290 1.1 ryo m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
2291 1.1 ryo }
2292 1.1 ryo
2293 1.1 ryo if (rxcd->udp) {
2294 1.1 ryo m->m_pkthdr.csum_flags |=
2295 1.1 ryo rxcd->ipv4 ? M_CSUM_UDPv4 : M_CSUM_UDPv6 ;
2296 1.1 ryo if ((rxcd->csum_ok) == 0)
2297 1.1 ryo m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
2298 1.1 ryo }
2299 1.1 ryo }
2300 1.1 ryo
2301 1.1 ryo static void
2302 1.1 ryo vmxnet3_rxq_input(struct vmxnet3_rxqueue *rxq,
2303 1.1 ryo struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
2304 1.1 ryo {
2305 1.1 ryo struct vmxnet3_softc *sc;
2306 1.1 ryo struct ifnet *ifp;
2307 1.1 ryo
2308 1.1 ryo sc = rxq->vxrxq_sc;
2309 1.1 ryo ifp = &sc->vmx_ethercom.ec_if;
2310 1.1 ryo
2311 1.1 ryo if (rxcd->error) {
2312 1.1 ryo if_statinc(ifp, if_ierrors);
2313 1.1 ryo m_freem(m);
2314 1.1 ryo return;
2315 1.1 ryo }
2316 1.1 ryo
2317 1.1 ryo if (!rxcd->no_csum)
2318 1.1 ryo vmxnet3_rx_csum(rxcd, m);
2319 1.1 ryo if (rxcd->vlan)
2320 1.1 ryo vlan_set_tag(m, rxcd->vtag);
2321 1.1 ryo
2322 1.1 ryo net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2323 1.1 ryo if_statinc_ref(nsr, if_ipackets);
2324 1.1 ryo if_statadd_ref(nsr, if_ibytes, m->m_pkthdr.len);
2325 1.1 ryo IF_STAT_PUTREF(ifp);
2326 1.1 ryo
2327 1.1 ryo if_percpuq_enqueue(ifp->if_percpuq, m);
2328 1.1 ryo }
2329 1.1 ryo
2330 1.1 ryo static bool
2331 1.1 ryo vmxnet3_rxq_eof(struct vmxnet3_rxqueue *rxq, u_int limit)
2332 1.1 ryo {
2333 1.1 ryo struct vmxnet3_softc *sc;
2334 1.1 ryo struct ifnet *ifp;
2335 1.1 ryo struct vmxnet3_rxring *rxr;
2336 1.1 ryo struct vmxnet3_comp_ring *rxc;
2337 1.1 ryo struct vmxnet3_rxdesc *rxd __diagused;
2338 1.1 ryo struct vmxnet3_rxcompdesc *rxcd;
2339 1.1 ryo struct mbuf *m, *m_head, *m_tail;
2340 1.1 ryo int idx, length;
2341 1.1 ryo bool more = false;
2342 1.1 ryo
2343 1.1 ryo sc = rxq->vxrxq_sc;
2344 1.1 ryo ifp = &sc->vmx_ethercom.ec_if;
2345 1.1 ryo rxc = &rxq->vxrxq_comp_ring;
2346 1.1 ryo
2347 1.1 ryo VMXNET3_RXQ_LOCK_ASSERT(rxq);
2348 1.1 ryo
2349 1.1 ryo if ((ifp->if_flags & IFF_RUNNING) == 0)
2350 1.1 ryo return more;
2351 1.1 ryo
2352 1.1 ryo m_head = rxq->vxrxq_mhead;
2353 1.1 ryo rxq->vxrxq_mhead = NULL;
2354 1.1 ryo m_tail = rxq->vxrxq_mtail;
2355 1.1 ryo rxq->vxrxq_mtail = NULL;
2356 1.1 ryo KASSERT(m_head == NULL || m_tail != NULL);
2357 1.1 ryo
2358 1.1 ryo for (;;) {
2359 1.1 ryo if (limit-- == 0) {
2360 1.1 ryo more = true;
2361 1.1 ryo break;
2362 1.1 ryo }
2363 1.1 ryo
2364 1.1 ryo rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
2365 1.1 ryo if (rxcd->gen != rxc->vxcr_gen) {
2366 1.1 ryo rxq->vxrxq_mhead = m_head;
2367 1.1 ryo rxq->vxrxq_mtail = m_tail;
2368 1.1 ryo break;
2369 1.1 ryo }
2370 1.1 ryo vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
2371 1.1 ryo
2372 1.1 ryo if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
2373 1.1 ryo rxc->vxcr_next = 0;
2374 1.1 ryo rxc->vxcr_gen ^= 1;
2375 1.1 ryo }
2376 1.1 ryo
2377 1.1 ryo idx = rxcd->rxd_idx;
2378 1.1 ryo length = rxcd->len;
2379 1.1 ryo if (rxcd->qid < sc->vmx_nrxqueues)
2380 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[0];
2381 1.1 ryo else
2382 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[1];
2383 1.1 ryo rxd = &rxr->vxrxr_rxd[idx];
2384 1.1 ryo
2385 1.1 ryo m = rxr->vxrxr_rxbuf[idx].vrxb_m;
2386 1.1 ryo KASSERT(m != NULL);
2387 1.1 ryo
2388 1.1 ryo /*
2389 1.1 ryo * The host may skip descriptors. We detect this when this
2390 1.1 ryo * descriptor does not match the previous fill index. Catch
2391 1.1 ryo * up with the host now.
2392 1.1 ryo */
2393 1.1 ryo if (__predict_false(rxr->vxrxr_fill != idx)) {
2394 1.1 ryo while (rxr->vxrxr_fill != idx) {
2395 1.1 ryo rxr->vxrxr_rxd[rxr->vxrxr_fill].gen =
2396 1.1 ryo rxr->vxrxr_gen;
2397 1.1 ryo vmxnet3_rxr_increment_fill(rxr);
2398 1.1 ryo }
2399 1.1 ryo }
2400 1.1 ryo
2401 1.1 ryo if (rxcd->sop) {
2402 1.1 ryo /* start of frame w/o head buffer */
2403 1.1 ryo KASSERT(rxd->btype == VMXNET3_BTYPE_HEAD);
2404 1.1 ryo /* start of frame not in ring 0 */
2405 1.1 ryo KASSERT(rxr == &rxq->vxrxq_cmd_ring[0]);
2406 1.1 ryo /* duplicate start of frame? */
2407 1.1 ryo KASSERT(m_head == NULL);
2408 1.1 ryo
2409 1.1 ryo if (length == 0) {
2410 1.1 ryo /* Just ignore this descriptor. */
2411 1.1 ryo vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2412 1.1 ryo goto nextp;
2413 1.1 ryo }
2414 1.1 ryo
2415 1.1 ryo if (vmxnet3_newbuf(sc, rxq, rxr) != 0) {
2416 1.1 ryo if_statinc(ifp, if_iqdrops);
2417 1.1 ryo vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2418 1.1 ryo if (!rxcd->eop)
2419 1.1 ryo vmxnet3_rxq_discard_chain(rxq);
2420 1.1 ryo goto nextp;
2421 1.1 ryo }
2422 1.1 ryo
2423 1.1 ryo m_set_rcvif(m, ifp);
2424 1.1 ryo m->m_pkthdr.len = m->m_len = length;
2425 1.1 ryo m->m_pkthdr.csum_flags = 0;
2426 1.1 ryo m_head = m_tail = m;
2427 1.1 ryo
2428 1.1 ryo } else {
2429 1.1 ryo /* non start of frame w/o body buffer */
2430 1.1 ryo KASSERT(rxd->btype == VMXNET3_BTYPE_BODY);
2431 1.1 ryo /* frame not started? */
2432 1.1 ryo KASSERT(m_head != NULL);
2433 1.1 ryo
2434 1.1 ryo if (vmxnet3_newbuf(sc, rxq, rxr) != 0) {
2435 1.1 ryo if_statinc(ifp, if_iqdrops);
2436 1.1 ryo vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2437 1.1 ryo if (!rxcd->eop)
2438 1.1 ryo vmxnet3_rxq_discard_chain(rxq);
2439 1.1 ryo m_freem(m_head);
2440 1.1 ryo m_head = m_tail = NULL;
2441 1.1 ryo goto nextp;
2442 1.1 ryo }
2443 1.1 ryo
2444 1.1 ryo m->m_len = length;
2445 1.1 ryo m_head->m_pkthdr.len += length;
2446 1.1 ryo m_tail->m_next = m;
2447 1.1 ryo m_tail = m;
2448 1.1 ryo }
2449 1.1 ryo
2450 1.1 ryo if (rxcd->eop) {
2451 1.1 ryo vmxnet3_rxq_input(rxq, rxcd, m_head);
2452 1.1 ryo m_head = m_tail = NULL;
2453 1.1 ryo
2454 1.1 ryo /* Must recheck after dropping the Rx lock. */
2455 1.1 ryo if ((ifp->if_flags & IFF_RUNNING) == 0)
2456 1.1 ryo break;
2457 1.1 ryo }
2458 1.1 ryo
2459 1.1 ryo nextp:
2460 1.1 ryo if (__predict_false(rxq->vxrxq_rs->update_rxhead)) {
2461 1.1 ryo int qid = rxcd->qid;
2462 1.1 ryo bus_size_t r;
2463 1.1 ryo
2464 1.1 ryo idx = (idx + 1) % rxr->vxrxr_ndesc;
2465 1.1 ryo if (qid >= sc->vmx_nrxqueues) {
2466 1.1 ryo qid -= sc->vmx_nrxqueues;
2467 1.1 ryo r = VMXNET3_BAR0_RXH2(qid);
2468 1.1 ryo } else
2469 1.1 ryo r = VMXNET3_BAR0_RXH1(qid);
2470 1.1 ryo vmxnet3_write_bar0(sc, r, idx);
2471 1.1 ryo }
2472 1.1 ryo }
2473 1.1 ryo
2474 1.1 ryo return more;
2475 1.1 ryo }
2476 1.1 ryo
2477 1.1 ryo static inline void
2478 1.1 ryo vmxnet3_sched_handle_queue(struct vmxnet3_softc *sc, struct vmxnet3_queue *vmxq)
2479 1.1 ryo {
2480 1.1 ryo
2481 1.1 ryo if (vmxq->vxq_workqueue) {
2482 1.1 ryo workqueue_enqueue(sc->vmx_queue_wq, &vmxq->vxq_wq_cookie,
2483 1.1 ryo curcpu());
2484 1.1 ryo } else {
2485 1.1 ryo softint_schedule(vmxq->vxq_si);
2486 1.1 ryo }
2487 1.1 ryo }
2488 1.1 ryo
2489 1.1 ryo static int
2490 1.1 ryo vmxnet3_legacy_intr(void *xsc)
2491 1.1 ryo {
2492 1.1 ryo struct vmxnet3_softc *sc;
2493 1.1 ryo struct vmxnet3_rxqueue *rxq;
2494 1.1 ryo struct vmxnet3_txqueue *txq;
2495 1.1 ryo u_int txlimit, rxlimit;
2496 1.1 ryo bool txmore, rxmore;
2497 1.1 ryo
2498 1.1 ryo sc = xsc;
2499 1.1 ryo rxq = &sc->vmx_queue[0].vxq_rxqueue;
2500 1.1 ryo txq = &sc->vmx_queue[0].vxq_txqueue;
2501 1.1 ryo txlimit = sc->vmx_tx_intr_process_limit;
2502 1.1 ryo rxlimit = sc->vmx_rx_intr_process_limit;
2503 1.1 ryo
2504 1.1 ryo if (sc->vmx_intr_type == VMXNET3_IT_LEGACY) {
2505 1.1 ryo if (vmxnet3_read_bar1(sc, VMXNET3_BAR1_INTR) == 0)
2506 1.1 ryo return (0);
2507 1.1 ryo }
2508 1.1 ryo if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2509 1.1 ryo vmxnet3_disable_all_intrs(sc);
2510 1.1 ryo
2511 1.1 ryo if (sc->vmx_ds->event != 0)
2512 1.1 ryo vmxnet3_evintr(sc);
2513 1.1 ryo
2514 1.1 ryo VMXNET3_RXQ_LOCK(rxq);
2515 1.1 ryo rxmore = vmxnet3_rxq_eof(rxq, rxlimit);
2516 1.1 ryo VMXNET3_RXQ_UNLOCK(rxq);
2517 1.1 ryo
2518 1.1 ryo VMXNET3_TXQ_LOCK(txq);
2519 1.1 ryo txmore = vmxnet3_txq_eof(txq, txlimit);
2520 1.1 ryo VMXNET3_TXQ_UNLOCK(txq);
2521 1.1 ryo
2522 1.1 ryo if (txmore || rxmore) {
2523 1.1 ryo vmxnet3_sched_handle_queue(sc, &sc->vmx_queue[0]);
2524 1.1 ryo } else {
2525 1.1 ryo if_schedule_deferred_start(&sc->vmx_ethercom.ec_if);
2526 1.1 ryo vmxnet3_enable_all_intrs(sc);
2527 1.1 ryo }
2528 1.1 ryo return (1);
2529 1.1 ryo }
2530 1.1 ryo
2531 1.1 ryo static int
2532 1.1 ryo vmxnet3_txrxq_intr(void *xvmxq)
2533 1.1 ryo {
2534 1.1 ryo struct vmxnet3_softc *sc;
2535 1.1 ryo struct vmxnet3_queue *vmxq;
2536 1.1 ryo struct vmxnet3_txqueue *txq;
2537 1.1 ryo struct vmxnet3_rxqueue *rxq;
2538 1.1 ryo u_int txlimit, rxlimit;
2539 1.1 ryo bool txmore, rxmore;
2540 1.1 ryo
2541 1.1 ryo vmxq = xvmxq;
2542 1.1 ryo txq = &vmxq->vxq_txqueue;
2543 1.1 ryo rxq = &vmxq->vxq_rxqueue;
2544 1.1 ryo sc = txq->vxtxq_sc;
2545 1.1 ryo txlimit = sc->vmx_tx_intr_process_limit;
2546 1.1 ryo rxlimit = sc->vmx_rx_intr_process_limit;
2547 1.1 ryo vmxq->vxq_workqueue = sc->vmx_txrx_workqueue;
2548 1.1 ryo
2549 1.1 ryo if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2550 1.1 ryo vmxnet3_disable_intr(sc, vmxq->vxq_intr_idx);
2551 1.1 ryo
2552 1.1 ryo VMXNET3_TXQ_LOCK(txq);
2553 1.1 ryo txq->vxtxq_intr.ev_count++;
2554 1.1 ryo txmore = vmxnet3_txq_eof(txq, txlimit);
2555 1.1 ryo VMXNET3_TXQ_UNLOCK(txq);
2556 1.1 ryo
2557 1.1 ryo VMXNET3_RXQ_LOCK(rxq);
2558 1.1 ryo rxq->vxrxq_intr.ev_count++;
2559 1.1 ryo rxmore = vmxnet3_rxq_eof(rxq, rxlimit);
2560 1.1 ryo VMXNET3_RXQ_UNLOCK(rxq);
2561 1.1 ryo
2562 1.1 ryo if (txmore || rxmore) {
2563 1.1 ryo vmxnet3_sched_handle_queue(sc, vmxq);
2564 1.1 ryo } else {
2565 1.1 ryo /* for ALTQ */
2566 1.1 ryo if (vmxq->vxq_id == 0)
2567 1.1 ryo if_schedule_deferred_start(&sc->vmx_ethercom.ec_if);
2568 1.1 ryo softint_schedule(txq->vxtxq_si);
2569 1.1 ryo
2570 1.1 ryo vmxnet3_enable_intr(sc, vmxq->vxq_intr_idx);
2571 1.1 ryo }
2572 1.1 ryo
2573 1.1 ryo return (1);
2574 1.1 ryo }
2575 1.1 ryo
2576 1.1 ryo static void
2577 1.1 ryo vmxnet3_handle_queue(void *xvmxq)
2578 1.1 ryo {
2579 1.1 ryo struct vmxnet3_softc *sc;
2580 1.1 ryo struct vmxnet3_queue *vmxq;
2581 1.1 ryo struct vmxnet3_txqueue *txq;
2582 1.1 ryo struct vmxnet3_rxqueue *rxq;
2583 1.1 ryo u_int txlimit, rxlimit;
2584 1.1 ryo bool txmore, rxmore;
2585 1.1 ryo
2586 1.1 ryo vmxq = xvmxq;
2587 1.1 ryo txq = &vmxq->vxq_txqueue;
2588 1.1 ryo rxq = &vmxq->vxq_rxqueue;
2589 1.1 ryo sc = txq->vxtxq_sc;
2590 1.1 ryo txlimit = sc->vmx_tx_process_limit;
2591 1.1 ryo rxlimit = sc->vmx_rx_process_limit;
2592 1.1 ryo
2593 1.1 ryo VMXNET3_TXQ_LOCK(txq);
2594 1.1 ryo txq->vxtxq_defer.ev_count++;
2595 1.1 ryo txmore = vmxnet3_txq_eof(txq, txlimit);
2596 1.1 ryo if (txmore)
2597 1.1 ryo txq->vxtxq_deferreq.ev_count++;
2598 1.1 ryo /* for ALTQ */
2599 1.1 ryo if (vmxq->vxq_id == 0)
2600 1.1 ryo if_schedule_deferred_start(&sc->vmx_ethercom.ec_if);
2601 1.1 ryo softint_schedule(txq->vxtxq_si);
2602 1.1 ryo VMXNET3_TXQ_UNLOCK(txq);
2603 1.1 ryo
2604 1.1 ryo VMXNET3_RXQ_LOCK(rxq);
2605 1.1 ryo rxq->vxrxq_defer.ev_count++;
2606 1.1 ryo rxmore = vmxnet3_rxq_eof(rxq, rxlimit);
2607 1.1 ryo if (rxmore)
2608 1.1 ryo rxq->vxrxq_deferreq.ev_count++;
2609 1.1 ryo VMXNET3_RXQ_UNLOCK(rxq);
2610 1.1 ryo
2611 1.1 ryo if (txmore || rxmore)
2612 1.1 ryo vmxnet3_sched_handle_queue(sc, vmxq);
2613 1.1 ryo else
2614 1.1 ryo vmxnet3_enable_intr(sc, vmxq->vxq_intr_idx);
2615 1.1 ryo }
2616 1.1 ryo
2617 1.1 ryo static void
2618 1.1 ryo vmxnet3_handle_queue_work(struct work *wk, void *context)
2619 1.1 ryo {
2620 1.1 ryo struct vmxnet3_queue *vmxq;
2621 1.1 ryo
2622 1.1 ryo vmxq = container_of(wk, struct vmxnet3_queue, vxq_wq_cookie);
2623 1.1 ryo vmxnet3_handle_queue(vmxq);
2624 1.1 ryo }
2625 1.1 ryo
2626 1.1 ryo static int
2627 1.1 ryo vmxnet3_event_intr(void *xsc)
2628 1.1 ryo {
2629 1.1 ryo struct vmxnet3_softc *sc;
2630 1.1 ryo
2631 1.1 ryo sc = xsc;
2632 1.1 ryo
2633 1.1 ryo if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2634 1.1 ryo vmxnet3_disable_intr(sc, sc->vmx_event_intr_idx);
2635 1.1 ryo
2636 1.1 ryo sc->vmx_event_intr.ev_count++;
2637 1.1 ryo
2638 1.1 ryo if (sc->vmx_ds->event != 0)
2639 1.1 ryo vmxnet3_evintr(sc);
2640 1.1 ryo
2641 1.1 ryo vmxnet3_enable_intr(sc, sc->vmx_event_intr_idx);
2642 1.1 ryo
2643 1.1 ryo return (1);
2644 1.1 ryo }
2645 1.1 ryo
2646 1.1 ryo static void
2647 1.1 ryo vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
2648 1.1 ryo {
2649 1.1 ryo struct vmxnet3_txring *txr;
2650 1.1 ryo struct vmxnet3_txbuf *txb;
2651 1.1 ryo int i;
2652 1.1 ryo
2653 1.1 ryo txr = &txq->vxtxq_cmd_ring;
2654 1.1 ryo
2655 1.1 ryo for (i = 0; i < txr->vxtxr_ndesc; i++) {
2656 1.1 ryo txb = &txr->vxtxr_txbuf[i];
2657 1.1 ryo
2658 1.1 ryo if (txb->vtxb_m == NULL)
2659 1.1 ryo continue;
2660 1.1 ryo
2661 1.1 ryo bus_dmamap_sync(sc->vmx_dmat, txb->vtxb_dmamap,
2662 1.1 ryo 0, txb->vtxb_dmamap->dm_mapsize,
2663 1.1 ryo BUS_DMASYNC_POSTWRITE);
2664 1.1 ryo bus_dmamap_unload(sc->vmx_dmat, txb->vtxb_dmamap);
2665 1.1 ryo m_freem(txb->vtxb_m);
2666 1.1 ryo txb->vtxb_m = NULL;
2667 1.1 ryo }
2668 1.1 ryo }
2669 1.1 ryo
2670 1.1 ryo static void
2671 1.1 ryo vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
2672 1.1 ryo {
2673 1.1 ryo struct vmxnet3_rxring *rxr;
2674 1.1 ryo struct vmxnet3_rxbuf *rxb;
2675 1.1 ryo int i, j;
2676 1.1 ryo
2677 1.1 ryo if (rxq->vxrxq_mhead != NULL) {
2678 1.1 ryo m_freem(rxq->vxrxq_mhead);
2679 1.1 ryo rxq->vxrxq_mhead = NULL;
2680 1.1 ryo rxq->vxrxq_mtail = NULL;
2681 1.1 ryo }
2682 1.1 ryo
2683 1.1 ryo for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
2684 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i];
2685 1.1 ryo
2686 1.1 ryo for (j = 0; j < rxr->vxrxr_ndesc; j++) {
2687 1.1 ryo rxb = &rxr->vxrxr_rxbuf[j];
2688 1.1 ryo
2689 1.1 ryo if (rxb->vrxb_m == NULL)
2690 1.1 ryo continue;
2691 1.1 ryo
2692 1.1 ryo bus_dmamap_sync(sc->vmx_dmat, rxb->vrxb_dmamap,
2693 1.1 ryo 0, rxb->vrxb_dmamap->dm_mapsize,
2694 1.1 ryo BUS_DMASYNC_POSTREAD);
2695 1.1 ryo bus_dmamap_unload(sc->vmx_dmat, rxb->vrxb_dmamap);
2696 1.1 ryo m_freem(rxb->vrxb_m);
2697 1.1 ryo rxb->vrxb_m = NULL;
2698 1.1 ryo }
2699 1.1 ryo }
2700 1.1 ryo }
2701 1.1 ryo
2702 1.1 ryo static void
2703 1.1 ryo vmxnet3_stop_rendezvous(struct vmxnet3_softc *sc)
2704 1.1 ryo {
2705 1.1 ryo struct vmxnet3_rxqueue *rxq;
2706 1.1 ryo struct vmxnet3_txqueue *txq;
2707 1.1 ryo int i;
2708 1.1 ryo
2709 1.1 ryo for (i = 0; i < sc->vmx_nrxqueues; i++) {
2710 1.1 ryo rxq = &sc->vmx_queue[i].vxq_rxqueue;
2711 1.1 ryo VMXNET3_RXQ_LOCK(rxq);
2712 1.1 ryo VMXNET3_RXQ_UNLOCK(rxq);
2713 1.1 ryo }
2714 1.1 ryo for (i = 0; i < sc->vmx_ntxqueues; i++) {
2715 1.1 ryo txq = &sc->vmx_queue[i].vxq_txqueue;
2716 1.1 ryo VMXNET3_TXQ_LOCK(txq);
2717 1.1 ryo VMXNET3_TXQ_UNLOCK(txq);
2718 1.1 ryo }
2719 1.1 ryo }
2720 1.1 ryo
2721 1.1 ryo static void
2722 1.1 ryo vmxnet3_stop_locked(struct vmxnet3_softc *sc)
2723 1.1 ryo {
2724 1.1 ryo struct ifnet *ifp;
2725 1.1 ryo int q;
2726 1.1 ryo
2727 1.1 ryo ifp = &sc->vmx_ethercom.ec_if;
2728 1.1 ryo VMXNET3_CORE_LOCK_ASSERT(sc);
2729 1.1 ryo
2730 1.1 ryo ifp->if_flags &= ~IFF_RUNNING;
2731 1.1 ryo sc->vmx_link_active = 0;
2732 1.1 ryo callout_stop(&sc->vmx_tick);
2733 1.1 ryo
2734 1.1 ryo /* Disable interrupts. */
2735 1.1 ryo vmxnet3_disable_all_intrs(sc);
2736 1.1 ryo vmxnet3_write_cmd(sc, VMXNET3_CMD_DISABLE);
2737 1.1 ryo
2738 1.1 ryo vmxnet3_stop_rendezvous(sc);
2739 1.1 ryo
2740 1.1 ryo for (q = 0; q < sc->vmx_ntxqueues; q++)
2741 1.1 ryo vmxnet3_txstop(sc, &sc->vmx_queue[q].vxq_txqueue);
2742 1.1 ryo for (q = 0; q < sc->vmx_nrxqueues; q++)
2743 1.1 ryo vmxnet3_rxstop(sc, &sc->vmx_queue[q].vxq_rxqueue);
2744 1.1 ryo
2745 1.1 ryo vmxnet3_write_cmd(sc, VMXNET3_CMD_RESET);
2746 1.1 ryo }
2747 1.1 ryo
2748 1.1 ryo static void
2749 1.1 ryo vmxnet3_stop(struct ifnet *ifp, int disable)
2750 1.1 ryo {
2751 1.1 ryo struct vmxnet3_softc *sc = ifp->if_softc;
2752 1.1 ryo
2753 1.1 ryo VMXNET3_CORE_LOCK(sc);
2754 1.1 ryo vmxnet3_stop_locked(sc);
2755 1.1 ryo VMXNET3_CORE_UNLOCK(sc);
2756 1.1 ryo }
2757 1.1 ryo
2758 1.1 ryo static void
2759 1.1 ryo vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
2760 1.1 ryo {
2761 1.1 ryo struct vmxnet3_txring *txr;
2762 1.1 ryo struct vmxnet3_comp_ring *txc;
2763 1.1 ryo
2764 1.1 ryo txr = &txq->vxtxq_cmd_ring;
2765 1.1 ryo txr->vxtxr_head = 0;
2766 1.1 ryo txr->vxtxr_next = 0;
2767 1.1 ryo txr->vxtxr_gen = VMXNET3_INIT_GEN;
2768 1.1 ryo memset(txr->vxtxr_txd, 0,
2769 1.1 ryo txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc));
2770 1.1 ryo
2771 1.1 ryo txc = &txq->vxtxq_comp_ring;
2772 1.1 ryo txc->vxcr_next = 0;
2773 1.1 ryo txc->vxcr_gen = VMXNET3_INIT_GEN;
2774 1.1 ryo memset(txc->vxcr_u.txcd, 0,
2775 1.1 ryo txc->vxcr_ndesc * sizeof(struct vmxnet3_txcompdesc));
2776 1.1 ryo }
2777 1.1 ryo
2778 1.1 ryo static int
2779 1.1 ryo vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
2780 1.1 ryo {
2781 1.1 ryo struct vmxnet3_rxring *rxr;
2782 1.1 ryo struct vmxnet3_comp_ring *rxc;
2783 1.1 ryo int i, populate, idx, error;
2784 1.1 ryo
2785 1.1 ryo /* LRO and jumbo frame is not supported yet */
2786 1.1 ryo populate = 1;
2787 1.1 ryo
2788 1.1 ryo for (i = 0; i < populate; i++) {
2789 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i];
2790 1.1 ryo rxr->vxrxr_fill = 0;
2791 1.1 ryo rxr->vxrxr_gen = VMXNET3_INIT_GEN;
2792 1.1 ryo memset(rxr->vxrxr_rxd, 0,
2793 1.1 ryo rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
2794 1.1 ryo
2795 1.1 ryo for (idx = 0; idx < rxr->vxrxr_ndesc; idx++) {
2796 1.1 ryo error = vmxnet3_newbuf(sc, rxq, rxr);
2797 1.1 ryo if (error)
2798 1.1 ryo return (error);
2799 1.1 ryo }
2800 1.1 ryo }
2801 1.1 ryo
2802 1.1 ryo for (/**/; i < VMXNET3_RXRINGS_PERQ; i++) {
2803 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i];
2804 1.1 ryo rxr->vxrxr_fill = 0;
2805 1.1 ryo rxr->vxrxr_gen = 0;
2806 1.1 ryo memset(rxr->vxrxr_rxd, 0,
2807 1.1 ryo rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
2808 1.1 ryo }
2809 1.1 ryo
2810 1.1 ryo rxc = &rxq->vxrxq_comp_ring;
2811 1.1 ryo rxc->vxcr_next = 0;
2812 1.1 ryo rxc->vxcr_gen = VMXNET3_INIT_GEN;
2813 1.1 ryo memset(rxc->vxcr_u.rxcd, 0,
2814 1.1 ryo rxc->vxcr_ndesc * sizeof(struct vmxnet3_rxcompdesc));
2815 1.1 ryo
2816 1.1 ryo return (0);
2817 1.1 ryo }
2818 1.1 ryo
2819 1.1 ryo static int
2820 1.1 ryo vmxnet3_reinit_queues(struct vmxnet3_softc *sc)
2821 1.1 ryo {
2822 1.1 ryo device_t dev;
2823 1.1 ryo int q, error;
2824 1.1 ryo dev = sc->vmx_dev;
2825 1.1 ryo
2826 1.1 ryo for (q = 0; q < sc->vmx_ntxqueues; q++)
2827 1.1 ryo vmxnet3_txinit(sc, &sc->vmx_queue[q].vxq_txqueue);
2828 1.1 ryo
2829 1.1 ryo for (q = 0; q < sc->vmx_nrxqueues; q++) {
2830 1.1 ryo error = vmxnet3_rxinit(sc, &sc->vmx_queue[q].vxq_rxqueue);
2831 1.1 ryo if (error) {
2832 1.1 ryo device_printf(dev, "cannot populate Rx queue %d\n", q);
2833 1.1 ryo return (error);
2834 1.1 ryo }
2835 1.1 ryo }
2836 1.1 ryo
2837 1.1 ryo return (0);
2838 1.1 ryo }
2839 1.1 ryo
2840 1.1 ryo static int
2841 1.1 ryo vmxnet3_enable_device(struct vmxnet3_softc *sc)
2842 1.1 ryo {
2843 1.1 ryo int q;
2844 1.1 ryo
2845 1.1 ryo if (vmxnet3_read_cmd(sc, VMXNET3_CMD_ENABLE) != 0) {
2846 1.1 ryo device_printf(sc->vmx_dev, "device enable command failed!\n");
2847 1.1 ryo return (1);
2848 1.1 ryo }
2849 1.1 ryo
2850 1.1 ryo /* Reset the Rx queue heads. */
2851 1.1 ryo for (q = 0; q < sc->vmx_nrxqueues; q++) {
2852 1.1 ryo vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH1(q), 0);
2853 1.1 ryo vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH2(q), 0);
2854 1.1 ryo }
2855 1.1 ryo
2856 1.1 ryo return (0);
2857 1.1 ryo }
2858 1.1 ryo
2859 1.1 ryo static void
2860 1.1 ryo vmxnet3_reinit_rxfilters(struct vmxnet3_softc *sc)
2861 1.1 ryo {
2862 1.1 ryo
2863 1.1 ryo vmxnet3_set_rxfilter(sc);
2864 1.1 ryo
2865 1.1 ryo memset(sc->vmx_ds->vlan_filter, 0, sizeof(sc->vmx_ds->vlan_filter));
2866 1.1 ryo vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER);
2867 1.1 ryo }
2868 1.1 ryo
2869 1.1 ryo static int
2870 1.1 ryo vmxnet3_reinit(struct vmxnet3_softc *sc)
2871 1.1 ryo {
2872 1.1 ryo
2873 1.1 ryo vmxnet3_set_lladdr(sc);
2874 1.1 ryo vmxnet3_reinit_shared_data(sc);
2875 1.1 ryo
2876 1.1 ryo if (vmxnet3_reinit_queues(sc) != 0)
2877 1.1 ryo return (ENXIO);
2878 1.1 ryo
2879 1.1 ryo if (vmxnet3_enable_device(sc) != 0)
2880 1.1 ryo return (ENXIO);
2881 1.1 ryo
2882 1.1 ryo vmxnet3_reinit_rxfilters(sc);
2883 1.1 ryo
2884 1.1 ryo return (0);
2885 1.1 ryo }
2886 1.1 ryo
2887 1.1 ryo static int
2888 1.1 ryo vmxnet3_init_locked(struct vmxnet3_softc *sc)
2889 1.1 ryo {
2890 1.1 ryo struct ifnet *ifp = &sc->vmx_ethercom.ec_if;
2891 1.1 ryo int error;
2892 1.1 ryo
2893 1.1 ryo vmxnet3_stop_locked(sc);
2894 1.1 ryo
2895 1.1 ryo error = vmxnet3_reinit(sc);
2896 1.1 ryo if (error) {
2897 1.1 ryo vmxnet3_stop_locked(sc);
2898 1.1 ryo return (error);
2899 1.1 ryo }
2900 1.1 ryo
2901 1.1 ryo ifp->if_flags |= IFF_RUNNING;
2902 1.1 ryo vmxnet3_cmd_link_status(ifp);
2903 1.1 ryo
2904 1.1 ryo vmxnet3_enable_all_intrs(sc);
2905 1.1 ryo callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
2906 1.1 ryo
2907 1.1 ryo return (0);
2908 1.1 ryo }
2909 1.1 ryo
2910 1.1 ryo static int
2911 1.1 ryo vmxnet3_init(struct ifnet *ifp)
2912 1.1 ryo {
2913 1.1 ryo struct vmxnet3_softc *sc = ifp->if_softc;
2914 1.1 ryo int error;
2915 1.1 ryo
2916 1.1 ryo VMXNET3_CORE_LOCK(sc);
2917 1.1 ryo error = vmxnet3_init_locked(sc);
2918 1.1 ryo VMXNET3_CORE_UNLOCK(sc);
2919 1.1 ryo
2920 1.1 ryo return (error);
2921 1.1 ryo }
2922 1.1 ryo
2923 1.1 ryo static int
2924 1.1 ryo vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *txq, struct mbuf *m,
2925 1.1 ryo int *start, int *csum_start)
2926 1.1 ryo {
2927 1.1 ryo struct ether_header *eh;
2928 1.1 ryo struct mbuf *mp;
2929 1.1 ryo int offset, csum_off, iphl, offp;
2930 1.1 ryo bool v4;
2931 1.1 ryo
2932 1.1 ryo eh = mtod(m, struct ether_header *);
2933 1.1 ryo switch (htons(eh->ether_type)) {
2934 1.1 ryo case ETHERTYPE_IP:
2935 1.1 ryo case ETHERTYPE_IPV6:
2936 1.1 ryo offset = ETHER_HDR_LEN;
2937 1.1 ryo break;
2938 1.1 ryo case ETHERTYPE_VLAN:
2939 1.1 ryo offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2940 1.1 ryo break;
2941 1.1 ryo default:
2942 1.1 ryo m_freem(m);
2943 1.1 ryo return (EINVAL);
2944 1.1 ryo }
2945 1.1 ryo
2946 1.1 ryo if ((m->m_pkthdr.csum_flags &
2947 1.1 ryo (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
2948 1.1 ryo iphl = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2949 1.1 ryo v4 = true;
2950 1.1 ryo } else {
2951 1.1 ryo iphl = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
2952 1.1 ryo v4 = false;
2953 1.1 ryo }
2954 1.1 ryo *start = offset + iphl;
2955 1.1 ryo
2956 1.1 ryo if (m->m_pkthdr.csum_flags &
2957 1.1 ryo (M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_TSOv4 | M_CSUM_TSOv6)) {
2958 1.1 ryo csum_off = offsetof(struct tcphdr, th_sum);
2959 1.1 ryo } else {
2960 1.1 ryo csum_off = offsetof(struct udphdr, uh_sum);
2961 1.1 ryo }
2962 1.1 ryo
2963 1.1 ryo *csum_start = *start + csum_off;
2964 1.1 ryo mp = m_pulldown(m, 0, *csum_start + 2, &offp);
2965 1.1 ryo if (!mp) {
2966 1.1 ryo /* m is already freed */
2967 1.1 ryo return ENOBUFS;
2968 1.1 ryo }
2969 1.1 ryo
2970 1.1 ryo if (m->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) {
2971 1.1 ryo struct tcphdr *tcp;
2972 1.1 ryo
2973 1.1 ryo txq->vxtxq_stats.vmtxs_tso++;
2974 1.1 ryo tcp = (void *)(mtod(mp, char *) + offp + *start);
2975 1.1 ryo
2976 1.1 ryo if (v4) {
2977 1.1 ryo struct ip *ip;
2978 1.1 ryo
2979 1.1 ryo ip = (void *)(mtod(mp, char *) + offp + offset);
2980 1.1 ryo tcp->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2981 1.1 ryo ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2982 1.1 ryo } else {
2983 1.1 ryo struct ip6_hdr *ip6;
2984 1.1 ryo
2985 1.1 ryo ip6 = (void *)(mtod(mp, char *) + offp + offset);
2986 1.1 ryo tcp->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2987 1.1 ryo &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2988 1.1 ryo }
2989 1.1 ryo
2990 1.1 ryo /*
2991 1.1 ryo * For TSO, the size of the protocol header is also
2992 1.1 ryo * included in the descriptor header size.
2993 1.1 ryo */
2994 1.1 ryo *start += (tcp->th_off << 2);
2995 1.1 ryo } else
2996 1.1 ryo txq->vxtxq_stats.vmtxs_csum++;
2997 1.1 ryo
2998 1.1 ryo return (0);
2999 1.1 ryo }
3000 1.1 ryo
3001 1.1 ryo static int
3002 1.1 ryo vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *txq, struct mbuf **m0,
3003 1.1 ryo bus_dmamap_t dmap)
3004 1.1 ryo {
3005 1.1 ryo struct mbuf *m;
3006 1.1 ryo bus_dma_tag_t tag;
3007 1.1 ryo int error;
3008 1.1 ryo
3009 1.1 ryo m = *m0;
3010 1.1 ryo tag = txq->vxtxq_sc->vmx_dmat;
3011 1.1 ryo
3012 1.1 ryo error = bus_dmamap_load_mbuf(tag, dmap, m, BUS_DMA_NOWAIT);
3013 1.1 ryo if (error == 0 || error != EFBIG)
3014 1.1 ryo return (error);
3015 1.1 ryo
3016 1.1 ryo m = m_defrag(m, M_NOWAIT);
3017 1.1 ryo if (m != NULL) {
3018 1.1 ryo *m0 = m;
3019 1.1 ryo error = bus_dmamap_load_mbuf(tag, dmap, m, BUS_DMA_NOWAIT);
3020 1.1 ryo } else
3021 1.1 ryo error = ENOBUFS;
3022 1.1 ryo
3023 1.1 ryo if (error) {
3024 1.1 ryo m_freem(*m0);
3025 1.1 ryo *m0 = NULL;
3026 1.1 ryo txq->vxtxq_defrag_failed.ev_count++;
3027 1.1 ryo } else
3028 1.1 ryo txq->vxtxq_defragged.ev_count++;
3029 1.1 ryo
3030 1.1 ryo return (error);
3031 1.1 ryo }
3032 1.1 ryo
3033 1.1 ryo static void
3034 1.1 ryo vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *txq, bus_dmamap_t dmap)
3035 1.1 ryo {
3036 1.1 ryo
3037 1.1 ryo bus_dmamap_unload(txq->vxtxq_sc->vmx_dmat, dmap);
3038 1.1 ryo }
3039 1.1 ryo
3040 1.1 ryo static int
3041 1.1 ryo vmxnet3_txq_encap(struct vmxnet3_txqueue *txq, struct mbuf **m0)
3042 1.1 ryo {
3043 1.1 ryo struct vmxnet3_softc *sc;
3044 1.1 ryo struct vmxnet3_txring *txr;
3045 1.1 ryo struct vmxnet3_txdesc *txd, *sop;
3046 1.1 ryo struct mbuf *m;
3047 1.1 ryo bus_dmamap_t dmap;
3048 1.1 ryo bus_dma_segment_t *segs;
3049 1.1 ryo int i, gen, start, csum_start, nsegs, error;
3050 1.1 ryo
3051 1.1 ryo sc = txq->vxtxq_sc;
3052 1.1 ryo start = 0;
3053 1.1 ryo txd = NULL;
3054 1.1 ryo txr = &txq->vxtxq_cmd_ring;
3055 1.1 ryo dmap = txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_dmamap;
3056 1.1 ryo csum_start = 0; /* GCC */
3057 1.1 ryo
3058 1.1 ryo error = vmxnet3_txq_load_mbuf(txq, m0, dmap);
3059 1.1 ryo if (error)
3060 1.1 ryo return (error);
3061 1.1 ryo
3062 1.1 ryo nsegs = dmap->dm_nsegs;
3063 1.1 ryo segs = dmap->dm_segs;
3064 1.1 ryo
3065 1.1 ryo m = *m0;
3066 1.1 ryo KASSERT(m->m_flags & M_PKTHDR);
3067 1.1 ryo KASSERT(nsegs <= VMXNET3_TX_MAXSEGS);
3068 1.1 ryo
3069 1.1 ryo if (vmxnet3_txring_avail(txr) < nsegs) {
3070 1.1 ryo txq->vxtxq_stats.vmtxs_full++;
3071 1.1 ryo vmxnet3_txq_unload_mbuf(txq, dmap);
3072 1.1 ryo return (ENOSPC);
3073 1.1 ryo } else if (m->m_pkthdr.csum_flags & VMXNET3_CSUM_ALL_OFFLOAD) {
3074 1.1 ryo error = vmxnet3_txq_offload_ctx(txq, m, &start, &csum_start);
3075 1.1 ryo if (error) {
3076 1.1 ryo /* m is already freed */
3077 1.1 ryo txq->vxtxq_stats.vmtxs_offload_failed++;
3078 1.1 ryo vmxnet3_txq_unload_mbuf(txq, dmap);
3079 1.1 ryo *m0 = NULL;
3080 1.1 ryo return (error);
3081 1.1 ryo }
3082 1.1 ryo }
3083 1.1 ryo
3084 1.1 ryo txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_m = m;
3085 1.1 ryo sop = &txr->vxtxr_txd[txr->vxtxr_head];
3086 1.1 ryo gen = txr->vxtxr_gen ^ 1; /* Owned by cpu (yet) */
3087 1.1 ryo
3088 1.1 ryo for (i = 0; i < nsegs; i++) {
3089 1.1 ryo txd = &txr->vxtxr_txd[txr->vxtxr_head];
3090 1.1 ryo
3091 1.1 ryo txd->addr = segs[i].ds_addr;
3092 1.1 ryo txd->len = segs[i].ds_len;
3093 1.1 ryo txd->gen = gen;
3094 1.1 ryo txd->dtype = 0;
3095 1.1 ryo txd->offload_mode = VMXNET3_OM_NONE;
3096 1.1 ryo txd->offload_pos = 0;
3097 1.1 ryo txd->hlen = 0;
3098 1.1 ryo txd->eop = 0;
3099 1.1 ryo txd->compreq = 0;
3100 1.1 ryo txd->vtag_mode = 0;
3101 1.1 ryo txd->vtag = 0;
3102 1.1 ryo
3103 1.1 ryo if (++txr->vxtxr_head == txr->vxtxr_ndesc) {
3104 1.1 ryo txr->vxtxr_head = 0;
3105 1.1 ryo txr->vxtxr_gen ^= 1;
3106 1.1 ryo }
3107 1.1 ryo gen = txr->vxtxr_gen;
3108 1.1 ryo }
3109 1.1 ryo txd->eop = 1;
3110 1.1 ryo txd->compreq = 1;
3111 1.1 ryo
3112 1.1 ryo if (vlan_has_tag(m)) {
3113 1.1 ryo sop->vtag_mode = 1;
3114 1.1 ryo sop->vtag = vlan_get_tag(m);
3115 1.1 ryo }
3116 1.1 ryo
3117 1.1 ryo if (m->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) {
3118 1.1 ryo sop->offload_mode = VMXNET3_OM_TSO;
3119 1.1 ryo sop->hlen = start;
3120 1.1 ryo sop->offload_pos = m->m_pkthdr.segsz;
3121 1.1 ryo } else if (m->m_pkthdr.csum_flags & (VMXNET3_CSUM_OFFLOAD |
3122 1.1 ryo VMXNET3_CSUM_OFFLOAD_IPV6)) {
3123 1.1 ryo sop->offload_mode = VMXNET3_OM_CSUM;
3124 1.1 ryo sop->hlen = start;
3125 1.1 ryo sop->offload_pos = csum_start;
3126 1.1 ryo }
3127 1.1 ryo
3128 1.1 ryo /* Finally, change the ownership. */
3129 1.1 ryo vmxnet3_barrier(sc, VMXNET3_BARRIER_WR);
3130 1.1 ryo sop->gen ^= 1;
3131 1.1 ryo
3132 1.1 ryo txq->vxtxq_ts->npending += nsegs;
3133 1.1 ryo if (txq->vxtxq_ts->npending >= txq->vxtxq_ts->intr_threshold) {
3134 1.1 ryo struct vmxnet3_queue *vmxq;
3135 1.1 ryo vmxq = container_of(txq, struct vmxnet3_queue, vxq_txqueue);
3136 1.1 ryo txq->vxtxq_ts->npending = 0;
3137 1.1 ryo vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(vmxq->vxq_id),
3138 1.1 ryo txr->vxtxr_head);
3139 1.1 ryo }
3140 1.1 ryo
3141 1.1 ryo return (0);
3142 1.1 ryo }
3143 1.1 ryo
3144 1.1 ryo #define VMXNET3_TX_START 1
3145 1.1 ryo #define VMXNET3_TX_TRANSMIT 2
3146 1.1 ryo static inline void
3147 1.1 ryo vmxnet3_tx_common_locked(struct ifnet *ifp, struct vmxnet3_txqueue *txq, int txtype)
3148 1.1 ryo {
3149 1.1 ryo struct vmxnet3_softc *sc;
3150 1.1 ryo struct vmxnet3_txring *txr;
3151 1.1 ryo struct mbuf *m_head;
3152 1.1 ryo int tx;
3153 1.1 ryo
3154 1.1 ryo sc = ifp->if_softc;
3155 1.1 ryo txr = &txq->vxtxq_cmd_ring;
3156 1.1 ryo tx = 0;
3157 1.1 ryo
3158 1.1 ryo VMXNET3_TXQ_LOCK_ASSERT(txq);
3159 1.1 ryo
3160 1.1 ryo if ((ifp->if_flags & IFF_RUNNING) == 0 ||
3161 1.1 ryo sc->vmx_link_active == 0)
3162 1.1 ryo return;
3163 1.1 ryo
3164 1.1 ryo for (;;) {
3165 1.1 ryo if (txtype == VMXNET3_TX_START)
3166 1.1 ryo IFQ_POLL(&ifp->if_snd, m_head);
3167 1.1 ryo else
3168 1.1 ryo m_head = pcq_peek(txq->vxtxq_interq);
3169 1.1 ryo if (m_head == NULL)
3170 1.1 ryo break;
3171 1.1 ryo
3172 1.1 ryo if (vmxnet3_txring_avail(txr) < VMXNET3_TX_MAXSEGS)
3173 1.1 ryo break;
3174 1.1 ryo
3175 1.1 ryo if (txtype == VMXNET3_TX_START)
3176 1.1 ryo IFQ_DEQUEUE(&ifp->if_snd, m_head);
3177 1.1 ryo else
3178 1.1 ryo m_head = pcq_get(txq->vxtxq_interq);
3179 1.1 ryo if (m_head == NULL)
3180 1.1 ryo break;
3181 1.1 ryo
3182 1.1 ryo if (vmxnet3_txq_encap(txq, &m_head) != 0) {
3183 1.1 ryo if (m_head != NULL)
3184 1.1 ryo m_freem(m_head);
3185 1.1 ryo break;
3186 1.1 ryo }
3187 1.1 ryo
3188 1.1 ryo tx++;
3189 1.1 ryo bpf_mtap(ifp, m_head, BPF_D_OUT);
3190 1.1 ryo }
3191 1.1 ryo
3192 1.1 ryo if (tx > 0)
3193 1.1 ryo txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT;
3194 1.1 ryo }
3195 1.1 ryo
3196 1.1 ryo static void
3197 1.1 ryo vmxnet3_start_locked(struct ifnet *ifp)
3198 1.1 ryo {
3199 1.1 ryo struct vmxnet3_softc *sc;
3200 1.1 ryo struct vmxnet3_txqueue *txq;
3201 1.1 ryo
3202 1.1 ryo sc = ifp->if_softc;
3203 1.1 ryo txq = &sc->vmx_queue[0].vxq_txqueue;
3204 1.1 ryo
3205 1.1 ryo vmxnet3_tx_common_locked(ifp, txq, VMXNET3_TX_START);
3206 1.1 ryo }
3207 1.1 ryo
3208 1.1 ryo void
3209 1.1 ryo vmxnet3_start(struct ifnet *ifp)
3210 1.1 ryo {
3211 1.1 ryo struct vmxnet3_softc *sc;
3212 1.1 ryo struct vmxnet3_txqueue *txq;
3213 1.1 ryo
3214 1.1 ryo sc = ifp->if_softc;
3215 1.1 ryo txq = &sc->vmx_queue[0].vxq_txqueue;
3216 1.1 ryo
3217 1.1 ryo VMXNET3_TXQ_LOCK(txq);
3218 1.1 ryo vmxnet3_start_locked(ifp);
3219 1.1 ryo VMXNET3_TXQ_UNLOCK(txq);
3220 1.1 ryo }
3221 1.1 ryo
3222 1.1 ryo static int
3223 1.1 ryo vmxnet3_select_txqueue(struct ifnet *ifp, struct mbuf *m __unused)
3224 1.1 ryo {
3225 1.1 ryo struct vmxnet3_softc *sc;
3226 1.1 ryo u_int cpuid;
3227 1.1 ryo
3228 1.1 ryo sc = ifp->if_softc;
3229 1.1 ryo cpuid = cpu_index(curcpu());
3230 1.1 ryo /*
3231 1.1 ryo * Furure work
3232 1.1 ryo * We should select txqueue to even up the load even if ncpu is
3233 1.1 ryo * different from sc->vmx_ntxqueues. Currently, the load is not
3234 1.1 ryo * even, that is, when ncpu is six and ntxqueues is four, the load
3235 1.1 ryo * of vmx_queue[0] and vmx_queue[1] is higher than vmx_queue[2] and
3236 1.1 ryo * vmx_queue[3] because CPU#4 always uses vmx_queue[0] and CPU#5 always
3237 1.1 ryo * uses vmx_queue[1].
3238 1.1 ryo * Furthermore, we should not use random value to select txqueue to
3239 1.1 ryo * avoid reordering. We should use flow information of mbuf.
3240 1.1 ryo */
3241 1.1 ryo return cpuid % sc->vmx_ntxqueues;
3242 1.1 ryo }
3243 1.1 ryo
3244 1.1 ryo static void
3245 1.1 ryo vmxnet3_transmit_locked(struct ifnet *ifp, struct vmxnet3_txqueue *txq)
3246 1.1 ryo {
3247 1.1 ryo
3248 1.1 ryo vmxnet3_tx_common_locked(ifp, txq, VMXNET3_TX_TRANSMIT);
3249 1.1 ryo }
3250 1.1 ryo
3251 1.1 ryo static int
3252 1.1 ryo vmxnet3_transmit(struct ifnet *ifp, struct mbuf *m)
3253 1.1 ryo {
3254 1.1 ryo struct vmxnet3_softc *sc;
3255 1.1 ryo struct vmxnet3_txqueue *txq;
3256 1.1 ryo int qid;
3257 1.1 ryo
3258 1.1 ryo qid = vmxnet3_select_txqueue(ifp, m);
3259 1.1 ryo sc = ifp->if_softc;
3260 1.1 ryo txq = &sc->vmx_queue[qid].vxq_txqueue;
3261 1.1 ryo
3262 1.1 ryo if (__predict_false(!pcq_put(txq->vxtxq_interq, m))) {
3263 1.1 ryo VMXNET3_TXQ_LOCK(txq);
3264 1.1 ryo txq->vxtxq_pcqdrop.ev_count++;
3265 1.1 ryo VMXNET3_TXQ_UNLOCK(txq);
3266 1.1 ryo m_freem(m);
3267 1.1 ryo return ENOBUFS;
3268 1.1 ryo }
3269 1.1 ryo
3270 1.1 ryo if (VMXNET3_TXQ_TRYLOCK(txq)) {
3271 1.1 ryo vmxnet3_transmit_locked(ifp, txq);
3272 1.1 ryo VMXNET3_TXQ_UNLOCK(txq);
3273 1.1 ryo } else {
3274 1.1 ryo kpreempt_disable();
3275 1.1 ryo softint_schedule(txq->vxtxq_si);
3276 1.1 ryo kpreempt_enable();
3277 1.1 ryo }
3278 1.1 ryo
3279 1.1 ryo return 0;
3280 1.1 ryo }
3281 1.1 ryo
3282 1.1 ryo static void
3283 1.1 ryo vmxnet3_deferred_transmit(void *arg)
3284 1.1 ryo {
3285 1.1 ryo struct vmxnet3_txqueue *txq = arg;
3286 1.1 ryo struct vmxnet3_softc *sc = txq->vxtxq_sc;
3287 1.1 ryo struct ifnet *ifp = &sc->vmx_ethercom.ec_if;
3288 1.1 ryo
3289 1.1 ryo VMXNET3_TXQ_LOCK(txq);
3290 1.1 ryo txq->vxtxq_transmitdef.ev_count++;
3291 1.1 ryo if (pcq_peek(txq->vxtxq_interq) != NULL)
3292 1.1 ryo vmxnet3_transmit_locked(ifp, txq);
3293 1.1 ryo VMXNET3_TXQ_UNLOCK(txq);
3294 1.1 ryo }
3295 1.1 ryo
3296 1.1 ryo static void
3297 1.1 ryo vmxnet3_set_rxfilter(struct vmxnet3_softc *sc)
3298 1.1 ryo {
3299 1.1 ryo struct ifnet *ifp = &sc->vmx_ethercom.ec_if;
3300 1.1 ryo struct ethercom *ec = &sc->vmx_ethercom;
3301 1.1 ryo struct vmxnet3_driver_shared *ds = sc->vmx_ds;
3302 1.1 ryo struct ether_multi *enm;
3303 1.1 ryo struct ether_multistep step;
3304 1.1 ryo u_int mode;
3305 1.1 ryo uint8_t *p;
3306 1.1 ryo
3307 1.1 ryo ds->mcast_tablelen = 0;
3308 1.1 ryo ETHER_LOCK(ec);
3309 1.1 ryo CLR(ec->ec_flags, ETHER_F_ALLMULTI);
3310 1.1 ryo ETHER_UNLOCK(ec);
3311 1.1 ryo
3312 1.1 ryo /*
3313 1.1 ryo * Always accept broadcast frames.
3314 1.1 ryo * Always accept frames destined to our station address.
3315 1.1 ryo */
3316 1.1 ryo mode = VMXNET3_RXMODE_BCAST | VMXNET3_RXMODE_UCAST;
3317 1.1 ryo
3318 1.1 ryo ETHER_LOCK(ec);
3319 1.1 ryo if (ISSET(ifp->if_flags, IFF_PROMISC) ||
3320 1.1 ryo ec->ec_multicnt > VMXNET3_MULTICAST_MAX)
3321 1.1 ryo goto allmulti;
3322 1.1 ryo
3323 1.1 ryo p = sc->vmx_mcast;
3324 1.1 ryo ETHER_FIRST_MULTI(step, ec, enm);
3325 1.1 ryo while (enm != NULL) {
3326 1.1 ryo if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3327 1.1 ryo /*
3328 1.1 ryo * We must listen to a range of multicast addresses.
3329 1.1 ryo * For now, just accept all multicasts, rather than
3330 1.1 ryo * trying to set only those filter bits needed to match
3331 1.1 ryo * the range. (At this time, the only use of address
3332 1.1 ryo * ranges is for IP multicast routing, for which the
3333 1.1 ryo * range is big enough to require all bits set.)
3334 1.1 ryo */
3335 1.1 ryo goto allmulti;
3336 1.1 ryo }
3337 1.1 ryo memcpy(p, enm->enm_addrlo, ETHER_ADDR_LEN);
3338 1.1 ryo
3339 1.1 ryo p += ETHER_ADDR_LEN;
3340 1.1 ryo
3341 1.1 ryo ETHER_NEXT_MULTI(step, enm);
3342 1.1 ryo }
3343 1.1 ryo
3344 1.1 ryo if (ec->ec_multicnt > 0) {
3345 1.1 ryo SET(mode, VMXNET3_RXMODE_MCAST);
3346 1.1 ryo ds->mcast_tablelen = p - sc->vmx_mcast;
3347 1.1 ryo }
3348 1.1 ryo ETHER_UNLOCK(ec);
3349 1.1 ryo
3350 1.1 ryo goto setit;
3351 1.1 ryo
3352 1.1 ryo allmulti:
3353 1.1 ryo SET(ec->ec_flags, ETHER_F_ALLMULTI);
3354 1.1 ryo ETHER_UNLOCK(ec);
3355 1.1 ryo SET(mode, (VMXNET3_RXMODE_ALLMULTI | VMXNET3_RXMODE_MCAST));
3356 1.1 ryo if (ifp->if_flags & IFF_PROMISC)
3357 1.1 ryo SET(mode, VMXNET3_RXMODE_PROMISC);
3358 1.1 ryo
3359 1.1 ryo setit:
3360 1.1 ryo vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_FILTER);
3361 1.1 ryo ds->rxmode = mode;
3362 1.1 ryo vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_RXMODE);
3363 1.1 ryo }
3364 1.1 ryo
3365 1.1 ryo static int
3366 1.1 ryo vmxnet3_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3367 1.1 ryo {
3368 1.1 ryo struct vmxnet3_softc *sc = ifp->if_softc;
3369 1.1 ryo struct ifreq *ifr = (struct ifreq *)data;
3370 1.1 ryo int s, error = 0;
3371 1.1 ryo
3372 1.1 ryo switch (cmd) {
3373 1.1 ryo case SIOCSIFMTU: {
3374 1.1 ryo int nmtu = ifr->ifr_mtu;
3375 1.1 ryo
3376 1.1 ryo if (nmtu < VMXNET3_MIN_MTU || nmtu > VMXNET3_MAX_MTU) {
3377 1.1 ryo error = EINVAL;
3378 1.1 ryo break;
3379 1.1 ryo }
3380 1.1 ryo if (ifp->if_mtu != nmtu) {
3381 1.1 ryo s = splnet();
3382 1.1 ryo error = ether_ioctl(ifp, cmd, data);
3383 1.1 ryo splx(s);
3384 1.1 ryo if (error == ENETRESET)
3385 1.1 ryo error = vmxnet3_init(ifp);
3386 1.1 ryo }
3387 1.1 ryo break;
3388 1.1 ryo }
3389 1.1 ryo
3390 1.1 ryo default:
3391 1.1 ryo s = splnet();
3392 1.1 ryo error = ether_ioctl(ifp, cmd, data);
3393 1.1 ryo splx(s);
3394 1.1 ryo }
3395 1.1 ryo
3396 1.1 ryo if (error == ENETRESET) {
3397 1.1 ryo VMXNET3_CORE_LOCK(sc);
3398 1.1 ryo if (ifp->if_flags & IFF_RUNNING)
3399 1.1 ryo vmxnet3_set_rxfilter(sc);
3400 1.1 ryo VMXNET3_CORE_UNLOCK(sc);
3401 1.1 ryo error = 0;
3402 1.1 ryo }
3403 1.1 ryo
3404 1.1 ryo return error;
3405 1.1 ryo }
3406 1.1 ryo
3407 1.1 ryo static int
3408 1.1 ryo vmxnet3_ifflags_cb(struct ethercom *ec)
3409 1.1 ryo {
3410 1.1 ryo struct vmxnet3_softc *sc;
3411 1.1 ryo
3412 1.1 ryo sc = ec->ec_if.if_softc;
3413 1.1 ryo
3414 1.1 ryo VMXNET3_CORE_LOCK(sc);
3415 1.1 ryo vmxnet3_set_rxfilter(sc);
3416 1.1 ryo VMXNET3_CORE_UNLOCK(sc);
3417 1.1 ryo
3418 1.1 ryo vmxnet3_if_link_status(sc);
3419 1.1 ryo
3420 1.1 ryo return 0;
3421 1.1 ryo }
3422 1.1 ryo
3423 1.1 ryo static int
3424 1.1 ryo vmxnet3_watchdog(struct vmxnet3_txqueue *txq)
3425 1.1 ryo {
3426 1.1 ryo struct vmxnet3_softc *sc;
3427 1.1 ryo struct vmxnet3_queue *vmxq;
3428 1.1 ryo
3429 1.1 ryo sc = txq->vxtxq_sc;
3430 1.1 ryo vmxq = container_of(txq, struct vmxnet3_queue, vxq_txqueue);
3431 1.1 ryo
3432 1.1 ryo VMXNET3_TXQ_LOCK(txq);
3433 1.1 ryo if (txq->vxtxq_watchdog == 0 || --txq->vxtxq_watchdog) {
3434 1.1 ryo VMXNET3_TXQ_UNLOCK(txq);
3435 1.1 ryo return (0);
3436 1.1 ryo }
3437 1.1 ryo txq->vxtxq_watchdogto.ev_count++;
3438 1.1 ryo VMXNET3_TXQ_UNLOCK(txq);
3439 1.1 ryo
3440 1.1 ryo device_printf(sc->vmx_dev, "watchdog timeout on queue %d\n",
3441 1.1 ryo vmxq->vxq_id);
3442 1.1 ryo return (1);
3443 1.1 ryo }
3444 1.1 ryo
3445 1.1 ryo static void
3446 1.1 ryo vmxnet3_refresh_host_stats(struct vmxnet3_softc *sc)
3447 1.1 ryo {
3448 1.1 ryo
3449 1.1 ryo vmxnet3_write_cmd(sc, VMXNET3_CMD_GET_STATS);
3450 1.1 ryo }
3451 1.1 ryo
3452 1.1 ryo static void
3453 1.1 ryo vmxnet3_tick(void *xsc)
3454 1.1 ryo {
3455 1.1 ryo struct vmxnet3_softc *sc;
3456 1.1 ryo int i, timedout;
3457 1.1 ryo
3458 1.1 ryo sc = xsc;
3459 1.1 ryo timedout = 0;
3460 1.1 ryo
3461 1.1 ryo VMXNET3_CORE_LOCK(sc);
3462 1.1 ryo
3463 1.1 ryo vmxnet3_refresh_host_stats(sc);
3464 1.1 ryo
3465 1.1 ryo for (i = 0; i < sc->vmx_ntxqueues; i++)
3466 1.1 ryo timedout |= vmxnet3_watchdog(&sc->vmx_queue[i].vxq_txqueue);
3467 1.1 ryo
3468 1.1 ryo if (timedout != 0)
3469 1.1 ryo vmxnet3_init_locked(sc);
3470 1.1 ryo else
3471 1.1 ryo callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
3472 1.1 ryo
3473 1.1 ryo VMXNET3_CORE_UNLOCK(sc);
3474 1.1 ryo }
3475 1.1 ryo
3476 1.1 ryo /*
3477 1.1 ryo * update link state of ifnet and softc
3478 1.1 ryo */
3479 1.1 ryo static void
3480 1.1 ryo vmxnet3_if_link_status(struct vmxnet3_softc *sc)
3481 1.1 ryo {
3482 1.1 ryo struct ifnet *ifp = &sc->vmx_ethercom.ec_if;
3483 1.1 ryo u_int x, link;
3484 1.1 ryo
3485 1.1 ryo vmxnet3_cmd_link_status(ifp);
3486 1.1 ryo
3487 1.1 ryo x = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_LINK);
3488 1.1 ryo if (x & 1) {
3489 1.1 ryo sc->vmx_link_active = 1;
3490 1.1 ryo link = LINK_STATE_UP;
3491 1.1 ryo } else {
3492 1.1 ryo sc->vmx_link_active = 0;
3493 1.1 ryo link = LINK_STATE_DOWN;
3494 1.1 ryo }
3495 1.1 ryo
3496 1.1 ryo if_link_state_change(ifp, link);
3497 1.1 ryo }
3498 1.1 ryo
3499 1.1 ryo /*
3500 1.1 ryo * check vmx(4) state by VMXNET3_CMD and update ifp->if_baudrate
3501 1.1 ryo * returns
3502 1.1 ryo * - true: link up
3503 1.1 ryo * - flase: link down
3504 1.1 ryo */
3505 1.1 ryo static bool
3506 1.1 ryo vmxnet3_cmd_link_status(struct ifnet *ifp)
3507 1.1 ryo {
3508 1.1 ryo struct vmxnet3_softc *sc = ifp->if_softc;
3509 1.1 ryo u_int x, speed;
3510 1.1 ryo
3511 1.1 ryo x = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_LINK);
3512 1.1 ryo if ((x & 1) == 0)
3513 1.1 ryo return false;
3514 1.1 ryo
3515 1.1 ryo speed = x >> 16;
3516 1.1 ryo ifp->if_baudrate = IF_Mbps(speed);
3517 1.1 ryo return true;
3518 1.1 ryo }
3519 1.1 ryo
3520 1.1 ryo static void
3521 1.1 ryo vmxnet3_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
3522 1.1 ryo {
3523 1.1 ryo bool up;
3524 1.1 ryo
3525 1.1 ryo ifmr->ifm_status = IFM_AVALID;
3526 1.1 ryo ifmr->ifm_active = IFM_ETHER;
3527 1.1 ryo
3528 1.1 ryo up = vmxnet3_cmd_link_status(ifp);
3529 1.1 ryo if (!up)
3530 1.1 ryo return;
3531 1.1 ryo
3532 1.1 ryo ifmr->ifm_status |= IFM_ACTIVE;
3533 1.1 ryo
3534 1.1 ryo if (ifp->if_baudrate >= IF_Gbps(10ULL))
3535 1.1 ryo ifmr->ifm_active |= IFM_10G_T;
3536 1.1 ryo }
3537 1.1 ryo
3538 1.1 ryo static int
3539 1.1 ryo vmxnet3_ifmedia_change(struct ifnet *ifp)
3540 1.1 ryo {
3541 1.1 ryo return 0;
3542 1.1 ryo }
3543 1.1 ryo
3544 1.1 ryo static void
3545 1.1 ryo vmxnet3_set_lladdr(struct vmxnet3_softc *sc)
3546 1.1 ryo {
3547 1.1 ryo uint32_t ml, mh;
3548 1.1 ryo
3549 1.1 ryo ml = sc->vmx_lladdr[0];
3550 1.1 ryo ml |= sc->vmx_lladdr[1] << 8;
3551 1.1 ryo ml |= sc->vmx_lladdr[2] << 16;
3552 1.1 ryo ml |= sc->vmx_lladdr[3] << 24;
3553 1.1 ryo vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACL, ml);
3554 1.1 ryo
3555 1.1 ryo mh = sc->vmx_lladdr[4];
3556 1.1 ryo mh |= sc->vmx_lladdr[5] << 8;
3557 1.1 ryo vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACH, mh);
3558 1.1 ryo }
3559 1.1 ryo
3560 1.1 ryo static void
3561 1.1 ryo vmxnet3_get_lladdr(struct vmxnet3_softc *sc)
3562 1.1 ryo {
3563 1.1 ryo uint32_t ml, mh;
3564 1.1 ryo
3565 1.1 ryo ml = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACL);
3566 1.1 ryo mh = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACH);
3567 1.1 ryo
3568 1.1 ryo sc->vmx_lladdr[0] = ml;
3569 1.1 ryo sc->vmx_lladdr[1] = ml >> 8;
3570 1.1 ryo sc->vmx_lladdr[2] = ml >> 16;
3571 1.1 ryo sc->vmx_lladdr[3] = ml >> 24;
3572 1.1 ryo sc->vmx_lladdr[4] = mh;
3573 1.1 ryo sc->vmx_lladdr[5] = mh >> 8;
3574 1.1 ryo }
3575 1.1 ryo
3576 1.1 ryo static void
3577 1.1 ryo vmxnet3_enable_all_intrs(struct vmxnet3_softc *sc)
3578 1.1 ryo {
3579 1.1 ryo int i;
3580 1.1 ryo
3581 1.1 ryo sc->vmx_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL;
3582 1.1 ryo for (i = 0; i < sc->vmx_nintrs; i++)
3583 1.1 ryo vmxnet3_enable_intr(sc, i);
3584 1.1 ryo }
3585 1.1 ryo
3586 1.1 ryo static void
3587 1.1 ryo vmxnet3_disable_all_intrs(struct vmxnet3_softc *sc)
3588 1.1 ryo {
3589 1.1 ryo int i;
3590 1.1 ryo
3591 1.1 ryo sc->vmx_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL;
3592 1.1 ryo for (i = 0; i < sc->vmx_nintrs; i++)
3593 1.1 ryo vmxnet3_disable_intr(sc, i);
3594 1.1 ryo }
3595 1.1 ryo
3596 1.1 ryo static int
3597 1.1 ryo vmxnet3_dma_malloc(struct vmxnet3_softc *sc, bus_size_t size, bus_size_t align,
3598 1.1 ryo struct vmxnet3_dma_alloc *dma)
3599 1.1 ryo {
3600 1.1 ryo bus_dma_tag_t t = sc->vmx_dmat;
3601 1.1 ryo bus_dma_segment_t *segs = dma->dma_segs;
3602 1.1 ryo int n, error;
3603 1.1 ryo
3604 1.1 ryo memset(dma, 0, sizeof(*dma));
3605 1.1 ryo
3606 1.1 ryo error = bus_dmamem_alloc(t, size, align, 0, segs, 1, &n, BUS_DMA_NOWAIT);
3607 1.1 ryo if (error) {
3608 1.1 ryo aprint_error_dev(sc->vmx_dev, "bus_dmamem_alloc failed: %d\n", error);
3609 1.1 ryo goto fail1;
3610 1.1 ryo }
3611 1.1 ryo KASSERT(n == 1);
3612 1.1 ryo
3613 1.1 ryo error = bus_dmamem_map(t, segs, 1, size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
3614 1.1 ryo if (error) {
3615 1.1 ryo aprint_error_dev(sc->vmx_dev, "bus_dmamem_map failed: %d\n", error);
3616 1.1 ryo goto fail2;
3617 1.1 ryo }
3618 1.1 ryo
3619 1.1 ryo error = bus_dmamap_create(t, size, 1, size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
3620 1.1 ryo if (error) {
3621 1.1 ryo aprint_error_dev(sc->vmx_dev, "bus_dmamap_create failed: %d\n", error);
3622 1.1 ryo goto fail3;
3623 1.1 ryo }
3624 1.1 ryo
3625 1.1 ryo error = bus_dmamap_load(t, dma->dma_map, dma->dma_vaddr, size, NULL,
3626 1.1 ryo BUS_DMA_NOWAIT);
3627 1.1 ryo if (error) {
3628 1.1 ryo aprint_error_dev(sc->vmx_dev, "bus_dmamap_load failed: %d\n", error);
3629 1.1 ryo goto fail4;
3630 1.1 ryo }
3631 1.1 ryo
3632 1.1 ryo memset(dma->dma_vaddr, 0, size);
3633 1.1 ryo dma->dma_paddr = DMAADDR(dma->dma_map);
3634 1.1 ryo dma->dma_size = size;
3635 1.1 ryo
3636 1.1 ryo return (0);
3637 1.1 ryo fail4:
3638 1.1 ryo bus_dmamap_destroy(t, dma->dma_map);
3639 1.1 ryo fail3:
3640 1.1 ryo bus_dmamem_unmap(t, dma->dma_vaddr, size);
3641 1.1 ryo fail2:
3642 1.1 ryo bus_dmamem_free(t, segs, 1);
3643 1.1 ryo fail1:
3644 1.1 ryo return (error);
3645 1.1 ryo }
3646 1.1 ryo
3647 1.1 ryo static void
3648 1.1 ryo vmxnet3_dma_free(struct vmxnet3_softc *sc, struct vmxnet3_dma_alloc *dma)
3649 1.1 ryo {
3650 1.1 ryo bus_dma_tag_t t = sc->vmx_dmat;
3651 1.1 ryo
3652 1.1 ryo bus_dmamap_unload(t, dma->dma_map);
3653 1.1 ryo bus_dmamap_destroy(t, dma->dma_map);
3654 1.1 ryo bus_dmamem_unmap(t, dma->dma_vaddr, dma->dma_size);
3655 1.1 ryo bus_dmamem_free(t, dma->dma_segs, 1);
3656 1.1 ryo
3657 1.1 ryo memset(dma, 0, sizeof(*dma));
3658 1.1 ryo }
3659