if_vmx.c revision 1.10 1 1.10 knakahar /* $NetBSD: if_vmx.c,v 1.10 2022/09/16 03:10:12 knakahara Exp $ */
2 1.1 ryo /* $OpenBSD: if_vmx.c,v 1.16 2014/01/22 06:04:17 brad Exp $ */
3 1.1 ryo
4 1.1 ryo /*
5 1.1 ryo * Copyright (c) 2013 Tsubai Masanari
6 1.1 ryo * Copyright (c) 2013 Bryan Venteicher <bryanv (at) FreeBSD.org>
7 1.1 ryo *
8 1.1 ryo * Permission to use, copy, modify, and distribute this software for any
9 1.1 ryo * purpose with or without fee is hereby granted, provided that the above
10 1.1 ryo * copyright notice and this permission notice appear in all copies.
11 1.1 ryo *
12 1.1 ryo * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 1.1 ryo * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 1.1 ryo * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 1.1 ryo * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 1.1 ryo * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 1.1 ryo * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 1.1 ryo * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 1.1 ryo */
20 1.1 ryo
21 1.1 ryo #include <sys/cdefs.h>
22 1.10 knakahar __KERNEL_RCSID(0, "$NetBSD: if_vmx.c,v 1.10 2022/09/16 03:10:12 knakahara Exp $");
23 1.10 knakahar
24 1.10 knakahar #ifdef _KERNEL_OPT
25 1.10 knakahar #include "opt_if_vmx.h"
26 1.10 knakahar #endif
27 1.1 ryo
28 1.1 ryo #include <sys/param.h>
29 1.1 ryo #include <sys/cpu.h>
30 1.1 ryo #include <sys/kernel.h>
31 1.1 ryo #include <sys/kmem.h>
32 1.1 ryo #include <sys/bitops.h>
33 1.1 ryo #include <sys/bus.h>
34 1.1 ryo #include <sys/device.h>
35 1.1 ryo #include <sys/mbuf.h>
36 1.2 ryo #include <sys/module.h>
37 1.1 ryo #include <sys/sockio.h>
38 1.1 ryo #include <sys/pcq.h>
39 1.1 ryo #include <sys/workqueue.h>
40 1.1 ryo #include <sys/interrupt.h>
41 1.1 ryo
42 1.1 ryo #include <net/bpf.h>
43 1.1 ryo #include <net/if.h>
44 1.1 ryo #include <net/if_ether.h>
45 1.1 ryo #include <net/if_media.h>
46 1.1 ryo
47 1.1 ryo #include <netinet/if_inarp.h>
48 1.1 ryo #include <netinet/in_systm.h> /* for <netinet/ip.h> */
49 1.1 ryo #include <netinet/in.h> /* for <netinet/ip.h> */
50 1.1 ryo #include <netinet/ip.h> /* for struct ip */
51 1.1 ryo #include <netinet/ip6.h> /* for struct ip6_hdr */
52 1.1 ryo #include <netinet/tcp.h> /* for struct tcphdr */
53 1.1 ryo #include <netinet/udp.h> /* for struct udphdr */
54 1.1 ryo
55 1.1 ryo #include <dev/pci/pcivar.h>
56 1.1 ryo #include <dev/pci/pcireg.h>
57 1.1 ryo #include <dev/pci/pcidevs.h>
58 1.1 ryo
59 1.1 ryo #include <dev/pci/if_vmxreg.h>
60 1.1 ryo
61 1.1 ryo #define VMXNET3_DRIVER_VERSION 0x00010000
62 1.1 ryo
63 1.1 ryo /*
64 1.1 ryo * Max descriptors per Tx packet. We must limit the size of the
65 1.1 ryo * any TSO packets based on the number of segments.
66 1.1 ryo */
67 1.1 ryo #define VMXNET3_TX_MAXSEGS 32
68 1.1 ryo #define VMXNET3_TX_MAXSIZE (VMXNET3_TX_MAXSEGS * MCLBYTES)
69 1.1 ryo
70 1.1 ryo /*
71 1.1 ryo * Maximum support Tx segments size. The length field in the
72 1.1 ryo * Tx descriptor is 14 bits.
73 1.1 ryo */
74 1.1 ryo #define VMXNET3_TX_MAXSEGSIZE (1 << 14)
75 1.1 ryo
76 1.1 ryo /*
77 1.1 ryo * The maximum number of Rx segments we accept.
78 1.1 ryo */
79 1.1 ryo #define VMXNET3_MAX_RX_SEGS 0 /* no segments */
80 1.1 ryo
81 1.1 ryo /*
82 1.1 ryo * Predetermined size of the multicast MACs filter table. If the
83 1.1 ryo * number of multicast addresses exceeds this size, then the
84 1.1 ryo * ALL_MULTI mode is use instead.
85 1.1 ryo */
86 1.1 ryo #define VMXNET3_MULTICAST_MAX 32
87 1.1 ryo
88 1.1 ryo /*
89 1.1 ryo * Our Tx watchdog timeout.
90 1.1 ryo */
91 1.1 ryo #define VMXNET3_WATCHDOG_TIMEOUT 5
92 1.1 ryo
93 1.1 ryo /*
94 1.1 ryo * Default value for vmx_intr_{rx,tx}_process_limit which is used for
95 1.1 ryo * max number of packets to process for interrupt handler
96 1.1 ryo */
97 1.1 ryo #define VMXNET3_RX_INTR_PROCESS_LIMIT 0U
98 1.1 ryo #define VMXNET3_TX_INTR_PROCESS_LIMIT 256
99 1.1 ryo
100 1.1 ryo /*
101 1.1 ryo * Default value for vmx_{rx,tx}_process_limit which is used for
102 1.1 ryo * max number of packets to process for deferred processing
103 1.1 ryo */
104 1.1 ryo #define VMXNET3_RX_PROCESS_LIMIT 256
105 1.1 ryo #define VMXNET3_TX_PROCESS_LIMIT 256
106 1.1 ryo
107 1.1 ryo #define VMXNET3_WORKQUEUE_PRI PRI_SOFTNET
108 1.1 ryo
109 1.1 ryo /*
110 1.1 ryo * IP protocols that we can perform Tx checksum offloading of.
111 1.1 ryo */
112 1.1 ryo #define VMXNET3_CSUM_OFFLOAD \
113 1.1 ryo (M_CSUM_TCPv4 | M_CSUM_UDPv4)
114 1.1 ryo #define VMXNET3_CSUM_OFFLOAD_IPV6 \
115 1.1 ryo (M_CSUM_TCPv6 | M_CSUM_UDPv6)
116 1.1 ryo
117 1.1 ryo #define VMXNET3_CSUM_ALL_OFFLOAD \
118 1.1 ryo (VMXNET3_CSUM_OFFLOAD | VMXNET3_CSUM_OFFLOAD_IPV6 | M_CSUM_TSOv4 | M_CSUM_TSOv6)
119 1.1 ryo
120 1.1 ryo #define VMXNET3_RXRINGS_PERQ 2
121 1.1 ryo
122 1.1 ryo #define VMXNET3_CORE_LOCK(_sc) mutex_enter((_sc)->vmx_mtx)
123 1.1 ryo #define VMXNET3_CORE_UNLOCK(_sc) mutex_exit((_sc)->vmx_mtx)
124 1.1 ryo #define VMXNET3_CORE_LOCK_ASSERT(_sc) mutex_owned((_sc)->vmx_mtx)
125 1.1 ryo
126 1.1 ryo #define VMXNET3_RXQ_LOCK(_rxq) mutex_enter((_rxq)->vxrxq_mtx)
127 1.1 ryo #define VMXNET3_RXQ_UNLOCK(_rxq) mutex_exit((_rxq)->vxrxq_mtx)
128 1.1 ryo #define VMXNET3_RXQ_LOCK_ASSERT(_rxq) \
129 1.1 ryo mutex_owned((_rxq)->vxrxq_mtx)
130 1.1 ryo
131 1.1 ryo #define VMXNET3_TXQ_LOCK(_txq) mutex_enter((_txq)->vxtxq_mtx)
132 1.1 ryo #define VMXNET3_TXQ_TRYLOCK(_txq) mutex_tryenter((_txq)->vxtxq_mtx)
133 1.1 ryo #define VMXNET3_TXQ_UNLOCK(_txq) mutex_exit((_txq)->vxtxq_mtx)
134 1.1 ryo #define VMXNET3_TXQ_LOCK_ASSERT(_txq) \
135 1.1 ryo mutex_owned((_txq)->vxtxq_mtx)
136 1.1 ryo
137 1.1 ryo struct vmxnet3_dma_alloc {
138 1.1 ryo bus_addr_t dma_paddr;
139 1.1 ryo void *dma_vaddr;
140 1.1 ryo bus_dmamap_t dma_map;
141 1.1 ryo bus_size_t dma_size;
142 1.1 ryo bus_dma_segment_t dma_segs[1];
143 1.1 ryo };
144 1.1 ryo
145 1.1 ryo struct vmxnet3_txbuf {
146 1.1 ryo bus_dmamap_t vtxb_dmamap;
147 1.1 ryo struct mbuf *vtxb_m;
148 1.1 ryo };
149 1.1 ryo
150 1.1 ryo struct vmxnet3_txring {
151 1.1 ryo struct vmxnet3_txbuf *vxtxr_txbuf;
152 1.1 ryo struct vmxnet3_txdesc *vxtxr_txd;
153 1.1 ryo u_int vxtxr_head;
154 1.1 ryo u_int vxtxr_next;
155 1.1 ryo u_int vxtxr_ndesc;
156 1.1 ryo int vxtxr_gen;
157 1.1 ryo struct vmxnet3_dma_alloc vxtxr_dma;
158 1.1 ryo };
159 1.1 ryo
160 1.1 ryo struct vmxnet3_rxbuf {
161 1.1 ryo bus_dmamap_t vrxb_dmamap;
162 1.1 ryo struct mbuf *vrxb_m;
163 1.1 ryo };
164 1.1 ryo
165 1.1 ryo struct vmxnet3_rxring {
166 1.1 ryo struct vmxnet3_rxbuf *vxrxr_rxbuf;
167 1.1 ryo struct vmxnet3_rxdesc *vxrxr_rxd;
168 1.1 ryo u_int vxrxr_fill;
169 1.1 ryo u_int vxrxr_ndesc;
170 1.1 ryo int vxrxr_gen;
171 1.1 ryo int vxrxr_rid;
172 1.1 ryo struct vmxnet3_dma_alloc vxrxr_dma;
173 1.1 ryo bus_dmamap_t vxrxr_spare_dmap;
174 1.1 ryo };
175 1.1 ryo
176 1.1 ryo struct vmxnet3_comp_ring {
177 1.1 ryo union {
178 1.1 ryo struct vmxnet3_txcompdesc *txcd;
179 1.1 ryo struct vmxnet3_rxcompdesc *rxcd;
180 1.1 ryo } vxcr_u;
181 1.1 ryo u_int vxcr_next;
182 1.1 ryo u_int vxcr_ndesc;
183 1.1 ryo int vxcr_gen;
184 1.1 ryo struct vmxnet3_dma_alloc vxcr_dma;
185 1.1 ryo };
186 1.1 ryo
187 1.1 ryo struct vmxnet3_txq_stats {
188 1.1 ryo #if 0
189 1.1 ryo uint64_t vmtxs_opackets; /* if_opackets */
190 1.1 ryo uint64_t vmtxs_obytes; /* if_obytes */
191 1.1 ryo uint64_t vmtxs_omcasts; /* if_omcasts */
192 1.1 ryo #endif
193 1.1 ryo uint64_t vmtxs_csum;
194 1.1 ryo uint64_t vmtxs_tso;
195 1.1 ryo uint64_t vmtxs_full;
196 1.1 ryo uint64_t vmtxs_offload_failed;
197 1.1 ryo };
198 1.1 ryo
199 1.1 ryo struct vmxnet3_txqueue {
200 1.1 ryo kmutex_t *vxtxq_mtx;
201 1.1 ryo struct vmxnet3_softc *vxtxq_sc;
202 1.1 ryo int vxtxq_watchdog;
203 1.1 ryo pcq_t *vxtxq_interq;
204 1.1 ryo struct vmxnet3_txring vxtxq_cmd_ring;
205 1.1 ryo struct vmxnet3_comp_ring vxtxq_comp_ring;
206 1.1 ryo struct vmxnet3_txq_stats vxtxq_stats;
207 1.1 ryo struct vmxnet3_txq_shared *vxtxq_ts;
208 1.1 ryo char vxtxq_name[16];
209 1.1 ryo
210 1.1 ryo void *vxtxq_si;
211 1.1 ryo
212 1.1 ryo struct evcnt vxtxq_intr;
213 1.1 ryo struct evcnt vxtxq_defer;
214 1.1 ryo struct evcnt vxtxq_deferreq;
215 1.1 ryo struct evcnt vxtxq_pcqdrop;
216 1.1 ryo struct evcnt vxtxq_transmitdef;
217 1.1 ryo struct evcnt vxtxq_watchdogto;
218 1.1 ryo struct evcnt vxtxq_defragged;
219 1.1 ryo struct evcnt vxtxq_defrag_failed;
220 1.1 ryo };
221 1.1 ryo
222 1.1 ryo #if 0
223 1.1 ryo struct vmxnet3_rxq_stats {
224 1.1 ryo uint64_t vmrxs_ipackets; /* if_ipackets */
225 1.1 ryo uint64_t vmrxs_ibytes; /* if_ibytes */
226 1.1 ryo uint64_t vmrxs_iqdrops; /* if_iqdrops */
227 1.1 ryo uint64_t vmrxs_ierrors; /* if_ierrors */
228 1.1 ryo };
229 1.1 ryo #endif
230 1.1 ryo
231 1.1 ryo struct vmxnet3_rxqueue {
232 1.1 ryo kmutex_t *vxrxq_mtx;
233 1.1 ryo struct vmxnet3_softc *vxrxq_sc;
234 1.1 ryo struct mbuf *vxrxq_mhead;
235 1.1 ryo struct mbuf *vxrxq_mtail;
236 1.1 ryo struct vmxnet3_rxring vxrxq_cmd_ring[VMXNET3_RXRINGS_PERQ];
237 1.1 ryo struct vmxnet3_comp_ring vxrxq_comp_ring;
238 1.1 ryo #if 0
239 1.1 ryo struct vmxnet3_rxq_stats vxrxq_stats;
240 1.1 ryo #endif
241 1.1 ryo struct vmxnet3_rxq_shared *vxrxq_rs;
242 1.1 ryo char vxrxq_name[16];
243 1.1 ryo
244 1.1 ryo struct evcnt vxrxq_intr;
245 1.1 ryo struct evcnt vxrxq_defer;
246 1.1 ryo struct evcnt vxrxq_deferreq;
247 1.1 ryo struct evcnt vxrxq_mgetcl_failed;
248 1.1 ryo struct evcnt vxrxq_mbuf_load_failed;
249 1.1 ryo };
250 1.1 ryo
251 1.1 ryo struct vmxnet3_queue {
252 1.1 ryo int vxq_id;
253 1.1 ryo int vxq_intr_idx;
254 1.1 ryo
255 1.1 ryo struct vmxnet3_txqueue vxq_txqueue;
256 1.1 ryo struct vmxnet3_rxqueue vxq_rxqueue;
257 1.1 ryo
258 1.1 ryo void *vxq_si;
259 1.1 ryo bool vxq_workqueue;
260 1.5 knakahar bool vxq_wq_enqueued;
261 1.1 ryo struct work vxq_wq_cookie;
262 1.1 ryo };
263 1.1 ryo
264 1.1 ryo struct vmxnet3_softc {
265 1.1 ryo device_t vmx_dev;
266 1.1 ryo struct ethercom vmx_ethercom;
267 1.1 ryo struct ifmedia vmx_media;
268 1.1 ryo struct vmxnet3_driver_shared *vmx_ds;
269 1.1 ryo int vmx_flags;
270 1.1 ryo #define VMXNET3_FLAG_NO_MSIX (1 << 0)
271 1.1 ryo #define VMXNET3_FLAG_RSS (1 << 1)
272 1.1 ryo #define VMXNET3_FLAG_ATTACHED (1 << 2)
273 1.1 ryo
274 1.1 ryo struct vmxnet3_queue *vmx_queue;
275 1.1 ryo
276 1.1 ryo struct pci_attach_args *vmx_pa;
277 1.1 ryo pci_chipset_tag_t vmx_pc;
278 1.1 ryo
279 1.1 ryo bus_space_tag_t vmx_iot0;
280 1.1 ryo bus_space_tag_t vmx_iot1;
281 1.1 ryo bus_space_handle_t vmx_ioh0;
282 1.1 ryo bus_space_handle_t vmx_ioh1;
283 1.1 ryo bus_size_t vmx_ios0;
284 1.1 ryo bus_size_t vmx_ios1;
285 1.1 ryo bus_dma_tag_t vmx_dmat;
286 1.1 ryo
287 1.1 ryo int vmx_link_active;
288 1.1 ryo int vmx_ntxqueues;
289 1.1 ryo int vmx_nrxqueues;
290 1.1 ryo int vmx_ntxdescs;
291 1.1 ryo int vmx_nrxdescs;
292 1.1 ryo int vmx_max_rxsegs;
293 1.1 ryo
294 1.1 ryo struct evcnt vmx_event_intr;
295 1.1 ryo struct evcnt vmx_event_link;
296 1.1 ryo struct evcnt vmx_event_txqerror;
297 1.1 ryo struct evcnt vmx_event_rxqerror;
298 1.1 ryo struct evcnt vmx_event_dic;
299 1.1 ryo struct evcnt vmx_event_debug;
300 1.1 ryo
301 1.1 ryo int vmx_intr_type;
302 1.1 ryo int vmx_intr_mask_mode;
303 1.1 ryo int vmx_event_intr_idx;
304 1.1 ryo int vmx_nintrs;
305 1.1 ryo pci_intr_handle_t *vmx_intrs; /* legacy use vmx_intrs[0] */
306 1.1 ryo void *vmx_ihs[VMXNET3_MAX_INTRS];
307 1.1 ryo
308 1.1 ryo kmutex_t *vmx_mtx;
309 1.1 ryo
310 1.1 ryo uint8_t *vmx_mcast;
311 1.1 ryo void *vmx_qs;
312 1.1 ryo struct vmxnet3_rss_shared *vmx_rss;
313 1.1 ryo callout_t vmx_tick;
314 1.1 ryo struct vmxnet3_dma_alloc vmx_ds_dma;
315 1.1 ryo struct vmxnet3_dma_alloc vmx_qs_dma;
316 1.1 ryo struct vmxnet3_dma_alloc vmx_mcast_dma;
317 1.1 ryo struct vmxnet3_dma_alloc vmx_rss_dma;
318 1.1 ryo int vmx_max_ntxqueues;
319 1.1 ryo int vmx_max_nrxqueues;
320 1.1 ryo uint8_t vmx_lladdr[ETHER_ADDR_LEN];
321 1.1 ryo
322 1.1 ryo u_int vmx_rx_intr_process_limit;
323 1.1 ryo u_int vmx_tx_intr_process_limit;
324 1.1 ryo u_int vmx_rx_process_limit;
325 1.1 ryo u_int vmx_tx_process_limit;
326 1.1 ryo struct sysctllog *vmx_sysctllog;
327 1.1 ryo
328 1.1 ryo bool vmx_txrx_workqueue;
329 1.1 ryo struct workqueue *vmx_queue_wq;
330 1.1 ryo };
331 1.1 ryo
332 1.1 ryo #define VMXNET3_STAT
333 1.1 ryo
334 1.1 ryo #ifdef VMXNET3_STAT
335 1.1 ryo struct {
336 1.1 ryo u_int txhead;
337 1.1 ryo u_int txdone;
338 1.1 ryo u_int maxtxlen;
339 1.1 ryo u_int rxdone;
340 1.1 ryo u_int rxfill;
341 1.1 ryo u_int intr;
342 1.1 ryo } vmxstat;
343 1.1 ryo #endif
344 1.1 ryo
345 1.1 ryo typedef enum {
346 1.1 ryo VMXNET3_BARRIER_RD,
347 1.1 ryo VMXNET3_BARRIER_WR,
348 1.1 ryo } vmxnet3_barrier_t;
349 1.1 ryo
350 1.1 ryo #define JUMBO_LEN (MCLBYTES - ETHER_ALIGN) /* XXX */
351 1.1 ryo #define DMAADDR(map) ((map)->dm_segs[0].ds_addr)
352 1.1 ryo
353 1.1 ryo #define vtophys(va) 0 /* XXX ok? */
354 1.1 ryo
355 1.1 ryo static int vmxnet3_match(device_t, cfdata_t, void *);
356 1.1 ryo static void vmxnet3_attach(device_t, device_t, void *);
357 1.1 ryo static int vmxnet3_detach(device_t, int);
358 1.1 ryo
359 1.1 ryo static int vmxnet3_alloc_pci_resources(struct vmxnet3_softc *);
360 1.1 ryo static void vmxnet3_free_pci_resources(struct vmxnet3_softc *);
361 1.1 ryo static int vmxnet3_check_version(struct vmxnet3_softc *);
362 1.1 ryo static void vmxnet3_check_multiqueue(struct vmxnet3_softc *);
363 1.1 ryo
364 1.1 ryo static int vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *);
365 1.1 ryo static int vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *);
366 1.1 ryo static int vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *);
367 1.1 ryo static int vmxnet3_alloc_interrupts(struct vmxnet3_softc *);
368 1.1 ryo static void vmxnet3_free_interrupts(struct vmxnet3_softc *);
369 1.1 ryo
370 1.1 ryo static int vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *);
371 1.1 ryo static int vmxnet3_setup_msi_interrupt(struct vmxnet3_softc *);
372 1.1 ryo static int vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *);
373 1.1 ryo static void vmxnet3_set_interrupt_idx(struct vmxnet3_softc *);
374 1.1 ryo static int vmxnet3_setup_interrupts(struct vmxnet3_softc *);
375 1.1 ryo static int vmxnet3_setup_sysctl(struct vmxnet3_softc *);
376 1.1 ryo
377 1.1 ryo static int vmxnet3_setup_stats(struct vmxnet3_softc *);
378 1.1 ryo static void vmxnet3_teardown_stats(struct vmxnet3_softc *);
379 1.1 ryo
380 1.1 ryo static int vmxnet3_init_rxq(struct vmxnet3_softc *, int);
381 1.1 ryo static int vmxnet3_init_txq(struct vmxnet3_softc *, int);
382 1.1 ryo static int vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *);
383 1.1 ryo static void vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *);
384 1.1 ryo static void vmxnet3_destroy_txq(struct vmxnet3_txqueue *);
385 1.1 ryo static void vmxnet3_free_rxtx_queues(struct vmxnet3_softc *);
386 1.1 ryo
387 1.1 ryo static int vmxnet3_alloc_shared_data(struct vmxnet3_softc *);
388 1.1 ryo static void vmxnet3_free_shared_data(struct vmxnet3_softc *);
389 1.1 ryo static int vmxnet3_alloc_txq_data(struct vmxnet3_softc *);
390 1.1 ryo static void vmxnet3_free_txq_data(struct vmxnet3_softc *);
391 1.1 ryo static int vmxnet3_alloc_rxq_data(struct vmxnet3_softc *);
392 1.1 ryo static void vmxnet3_free_rxq_data(struct vmxnet3_softc *);
393 1.1 ryo static int vmxnet3_alloc_queue_data(struct vmxnet3_softc *);
394 1.1 ryo static void vmxnet3_free_queue_data(struct vmxnet3_softc *);
395 1.1 ryo static int vmxnet3_alloc_mcast_table(struct vmxnet3_softc *);
396 1.1 ryo static void vmxnet3_free_mcast_table(struct vmxnet3_softc *);
397 1.1 ryo static void vmxnet3_init_shared_data(struct vmxnet3_softc *);
398 1.1 ryo static void vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *);
399 1.1 ryo static void vmxnet3_reinit_shared_data(struct vmxnet3_softc *);
400 1.1 ryo static int vmxnet3_alloc_data(struct vmxnet3_softc *);
401 1.1 ryo static void vmxnet3_free_data(struct vmxnet3_softc *);
402 1.1 ryo static int vmxnet3_setup_interface(struct vmxnet3_softc *);
403 1.1 ryo
404 1.1 ryo static void vmxnet3_evintr(struct vmxnet3_softc *);
405 1.1 ryo static bool vmxnet3_txq_eof(struct vmxnet3_txqueue *, u_int);
406 1.1 ryo static int vmxnet3_newbuf(struct vmxnet3_softc *, struct vmxnet3_rxqueue *,
407 1.1 ryo struct vmxnet3_rxring *);
408 1.1 ryo static void vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *,
409 1.1 ryo struct vmxnet3_rxring *, int);
410 1.1 ryo static void vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *);
411 1.1 ryo static void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *);
412 1.1 ryo static void vmxnet3_rxq_input(struct vmxnet3_rxqueue *,
413 1.1 ryo struct vmxnet3_rxcompdesc *, struct mbuf *);
414 1.1 ryo static bool vmxnet3_rxq_eof(struct vmxnet3_rxqueue *, u_int);
415 1.1 ryo static int vmxnet3_legacy_intr(void *);
416 1.1 ryo static int vmxnet3_txrxq_intr(void *);
417 1.1 ryo static void vmxnet3_handle_queue(void *);
418 1.1 ryo static void vmxnet3_handle_queue_work(struct work *, void *);
419 1.1 ryo static int vmxnet3_event_intr(void *);
420 1.1 ryo
421 1.1 ryo static void vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
422 1.1 ryo static void vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
423 1.1 ryo static void vmxnet3_stop_locked(struct vmxnet3_softc *);
424 1.1 ryo static void vmxnet3_stop_rendezvous(struct vmxnet3_softc *);
425 1.1 ryo static void vmxnet3_stop(struct ifnet *, int);
426 1.1 ryo
427 1.1 ryo static void vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
428 1.1 ryo static int vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
429 1.1 ryo static int vmxnet3_reinit_queues(struct vmxnet3_softc *);
430 1.1 ryo static int vmxnet3_enable_device(struct vmxnet3_softc *);
431 1.1 ryo static void vmxnet3_reinit_rxfilters(struct vmxnet3_softc *);
432 1.1 ryo static int vmxnet3_reinit(struct vmxnet3_softc *);
433 1.1 ryo
434 1.1 ryo static int vmxnet3_init_locked(struct vmxnet3_softc *);
435 1.1 ryo static int vmxnet3_init(struct ifnet *);
436 1.1 ryo
437 1.1 ryo static int vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *, struct mbuf *, int *, int *);
438 1.1 ryo static int vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *, struct mbuf **, bus_dmamap_t);
439 1.1 ryo static void vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *, bus_dmamap_t);
440 1.1 ryo static int vmxnet3_txq_encap(struct vmxnet3_txqueue *, struct mbuf **);
441 1.1 ryo static void vmxnet3_start_locked(struct ifnet *);
442 1.1 ryo static void vmxnet3_start(struct ifnet *);
443 1.1 ryo static void vmxnet3_transmit_locked(struct ifnet *, struct vmxnet3_txqueue *);
444 1.1 ryo static int vmxnet3_transmit(struct ifnet *, struct mbuf *);
445 1.1 ryo static void vmxnet3_deferred_transmit(void *);
446 1.1 ryo
447 1.1 ryo static void vmxnet3_set_rxfilter(struct vmxnet3_softc *);
448 1.1 ryo static int vmxnet3_ioctl(struct ifnet *, u_long, void *);
449 1.1 ryo static int vmxnet3_ifflags_cb(struct ethercom *);
450 1.1 ryo
451 1.1 ryo static int vmxnet3_watchdog(struct vmxnet3_txqueue *);
452 1.1 ryo static void vmxnet3_refresh_host_stats(struct vmxnet3_softc *);
453 1.1 ryo static void vmxnet3_tick(void *);
454 1.1 ryo static void vmxnet3_if_link_status(struct vmxnet3_softc *);
455 1.1 ryo static bool vmxnet3_cmd_link_status(struct ifnet *);
456 1.1 ryo static void vmxnet3_ifmedia_status(struct ifnet *, struct ifmediareq *);
457 1.1 ryo static int vmxnet3_ifmedia_change(struct ifnet *);
458 1.1 ryo static void vmxnet3_set_lladdr(struct vmxnet3_softc *);
459 1.1 ryo static void vmxnet3_get_lladdr(struct vmxnet3_softc *);
460 1.1 ryo
461 1.1 ryo static void vmxnet3_enable_all_intrs(struct vmxnet3_softc *);
462 1.1 ryo static void vmxnet3_disable_all_intrs(struct vmxnet3_softc *);
463 1.1 ryo
464 1.1 ryo static int vmxnet3_dma_malloc(struct vmxnet3_softc *, bus_size_t, bus_size_t,
465 1.1 ryo struct vmxnet3_dma_alloc *);
466 1.1 ryo static void vmxnet3_dma_free(struct vmxnet3_softc *, struct vmxnet3_dma_alloc *);
467 1.1 ryo
468 1.1 ryo CFATTACH_DECL3_NEW(vmx, sizeof(struct vmxnet3_softc),
469 1.1 ryo vmxnet3_match, vmxnet3_attach, vmxnet3_detach, NULL, NULL, NULL, 0);
470 1.1 ryo
471 1.1 ryo /* round down to the nearest power of 2 */
472 1.1 ryo static int
473 1.1 ryo vmxnet3_calc_queue_size(int n)
474 1.1 ryo {
475 1.1 ryo
476 1.1 ryo if (__predict_false(n <= 0))
477 1.1 ryo return 1;
478 1.1 ryo
479 1.1 ryo return (1U << (fls32(n) - 1));
480 1.1 ryo }
481 1.1 ryo
482 1.1 ryo static inline void
483 1.1 ryo vmxnet3_write_bar0(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
484 1.1 ryo {
485 1.1 ryo
486 1.1 ryo bus_space_write_4(sc->vmx_iot0, sc->vmx_ioh0, r, v);
487 1.1 ryo }
488 1.1 ryo
489 1.1 ryo static inline uint32_t
490 1.1 ryo vmxnet3_read_bar1(struct vmxnet3_softc *sc, bus_size_t r)
491 1.1 ryo {
492 1.1 ryo
493 1.1 ryo return (bus_space_read_4(sc->vmx_iot1, sc->vmx_ioh1, r));
494 1.1 ryo }
495 1.1 ryo
496 1.1 ryo static inline void
497 1.1 ryo vmxnet3_write_bar1(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
498 1.1 ryo {
499 1.1 ryo
500 1.1 ryo bus_space_write_4(sc->vmx_iot1, sc->vmx_ioh1, r, v);
501 1.1 ryo }
502 1.1 ryo
503 1.1 ryo static inline void
504 1.1 ryo vmxnet3_write_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
505 1.1 ryo {
506 1.1 ryo
507 1.1 ryo vmxnet3_write_bar1(sc, VMXNET3_BAR1_CMD, cmd);
508 1.1 ryo }
509 1.1 ryo
510 1.1 ryo static inline uint32_t
511 1.1 ryo vmxnet3_read_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
512 1.1 ryo {
513 1.1 ryo
514 1.1 ryo vmxnet3_write_cmd(sc, cmd);
515 1.1 ryo return (vmxnet3_read_bar1(sc, VMXNET3_BAR1_CMD));
516 1.1 ryo }
517 1.1 ryo
518 1.1 ryo static inline void
519 1.1 ryo vmxnet3_enable_intr(struct vmxnet3_softc *sc, int irq)
520 1.1 ryo {
521 1.1 ryo vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 0);
522 1.1 ryo }
523 1.1 ryo
524 1.1 ryo static inline void
525 1.1 ryo vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq)
526 1.1 ryo {
527 1.1 ryo vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 1);
528 1.1 ryo }
529 1.1 ryo
530 1.1 ryo static inline void
531 1.1 ryo vmxnet3_rxr_increment_fill(struct vmxnet3_rxring *rxr)
532 1.1 ryo {
533 1.1 ryo
534 1.1 ryo if (++rxr->vxrxr_fill == rxr->vxrxr_ndesc) {
535 1.1 ryo rxr->vxrxr_fill = 0;
536 1.1 ryo rxr->vxrxr_gen ^= 1;
537 1.1 ryo }
538 1.1 ryo }
539 1.1 ryo
540 1.1 ryo static inline int
541 1.1 ryo vmxnet3_txring_avail(struct vmxnet3_txring *txr)
542 1.1 ryo {
543 1.1 ryo int avail = txr->vxtxr_next - txr->vxtxr_head - 1;
544 1.2 ryo return (avail < 0 ? (int)txr->vxtxr_ndesc + avail : avail);
545 1.1 ryo }
546 1.1 ryo
547 1.1 ryo /*
548 1.1 ryo * Since this is a purely paravirtualized device, we do not have
549 1.1 ryo * to worry about DMA coherency. But at times, we must make sure
550 1.1 ryo * both the compiler and CPU do not reorder memory operations.
551 1.1 ryo */
552 1.1 ryo static inline void
553 1.1 ryo vmxnet3_barrier(struct vmxnet3_softc *sc, vmxnet3_barrier_t type)
554 1.1 ryo {
555 1.1 ryo
556 1.1 ryo switch (type) {
557 1.1 ryo case VMXNET3_BARRIER_RD:
558 1.1 ryo membar_consumer();
559 1.1 ryo break;
560 1.1 ryo case VMXNET3_BARRIER_WR:
561 1.1 ryo membar_producer();
562 1.1 ryo break;
563 1.1 ryo default:
564 1.1 ryo panic("%s: bad barrier type %d", __func__, type);
565 1.1 ryo }
566 1.1 ryo }
567 1.1 ryo
568 1.1 ryo static int
569 1.1 ryo vmxnet3_match(device_t parent, cfdata_t match, void *aux)
570 1.1 ryo {
571 1.1 ryo struct pci_attach_args *pa = (struct pci_attach_args *)aux;
572 1.1 ryo
573 1.1 ryo if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VMWARE &&
574 1.1 ryo PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VMWARE_VMXNET3)
575 1.1 ryo return 1;
576 1.1 ryo
577 1.1 ryo return 0;
578 1.1 ryo }
579 1.1 ryo
580 1.1 ryo static void
581 1.1 ryo vmxnet3_attach(device_t parent, device_t self, void *aux)
582 1.1 ryo {
583 1.1 ryo struct vmxnet3_softc *sc = device_private(self);
584 1.1 ryo struct pci_attach_args *pa = aux;
585 1.1 ryo pcireg_t preg;
586 1.1 ryo int error;
587 1.1 ryo int candidate;
588 1.1 ryo
589 1.1 ryo sc->vmx_dev = self;
590 1.1 ryo sc->vmx_pa = pa;
591 1.1 ryo sc->vmx_pc = pa->pa_pc;
592 1.1 ryo if (pci_dma64_available(pa))
593 1.1 ryo sc->vmx_dmat = pa->pa_dmat64;
594 1.1 ryo else
595 1.1 ryo sc->vmx_dmat = pa->pa_dmat;
596 1.1 ryo
597 1.1 ryo pci_aprint_devinfo_fancy(pa, "Ethernet controller", "vmxnet3", 1);
598 1.1 ryo
599 1.1 ryo preg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
600 1.1 ryo preg |= PCI_COMMAND_MASTER_ENABLE;
601 1.1 ryo pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
602 1.1 ryo
603 1.1 ryo sc->vmx_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
604 1.1 ryo callout_init(&sc->vmx_tick, CALLOUT_MPSAFE);
605 1.1 ryo
606 1.1 ryo candidate = MIN(MIN(VMXNET3_MAX_TX_QUEUES, VMXNET3_MAX_RX_QUEUES),
607 1.1 ryo ncpu);
608 1.1 ryo sc->vmx_max_ntxqueues = sc->vmx_max_nrxqueues =
609 1.1 ryo vmxnet3_calc_queue_size(candidate);
610 1.1 ryo sc->vmx_ntxdescs = 512;
611 1.1 ryo sc->vmx_nrxdescs = 256;
612 1.1 ryo sc->vmx_max_rxsegs = VMXNET3_MAX_RX_SEGS;
613 1.1 ryo
614 1.1 ryo error = vmxnet3_alloc_pci_resources(sc);
615 1.1 ryo if (error)
616 1.1 ryo return;
617 1.1 ryo
618 1.1 ryo error = vmxnet3_check_version(sc);
619 1.1 ryo if (error)
620 1.1 ryo return;
621 1.1 ryo
622 1.1 ryo error = vmxnet3_alloc_rxtx_queues(sc);
623 1.1 ryo if (error)
624 1.1 ryo return;
625 1.1 ryo
626 1.1 ryo error = vmxnet3_alloc_interrupts(sc);
627 1.1 ryo if (error)
628 1.1 ryo return;
629 1.1 ryo
630 1.1 ryo vmxnet3_check_multiqueue(sc);
631 1.1 ryo
632 1.1 ryo error = vmxnet3_alloc_data(sc);
633 1.1 ryo if (error)
634 1.1 ryo return;
635 1.1 ryo
636 1.1 ryo error = vmxnet3_setup_interface(sc);
637 1.1 ryo if (error)
638 1.1 ryo return;
639 1.1 ryo
640 1.1 ryo error = vmxnet3_setup_interrupts(sc);
641 1.1 ryo if (error)
642 1.1 ryo return;
643 1.1 ryo
644 1.1 ryo error = vmxnet3_setup_sysctl(sc);
645 1.1 ryo if (error)
646 1.1 ryo return;
647 1.1 ryo
648 1.1 ryo error = vmxnet3_setup_stats(sc);
649 1.1 ryo if (error)
650 1.1 ryo return;
651 1.1 ryo
652 1.1 ryo sc->vmx_flags |= VMXNET3_FLAG_ATTACHED;
653 1.1 ryo }
654 1.1 ryo
655 1.1 ryo static int
656 1.1 ryo vmxnet3_detach(device_t self, int flags)
657 1.1 ryo {
658 1.1 ryo struct vmxnet3_softc *sc;
659 1.1 ryo struct ifnet *ifp;
660 1.1 ryo
661 1.1 ryo sc = device_private(self);
662 1.1 ryo ifp = &sc->vmx_ethercom.ec_if;
663 1.1 ryo
664 1.1 ryo if (sc->vmx_flags & VMXNET3_FLAG_ATTACHED) {
665 1.1 ryo VMXNET3_CORE_LOCK(sc);
666 1.1 ryo vmxnet3_stop_locked(sc);
667 1.1 ryo callout_halt(&sc->vmx_tick, sc->vmx_mtx);
668 1.1 ryo callout_destroy(&sc->vmx_tick);
669 1.1 ryo VMXNET3_CORE_UNLOCK(sc);
670 1.1 ryo
671 1.1 ryo ether_ifdetach(ifp);
672 1.1 ryo if_detach(ifp);
673 1.1 ryo ifmedia_fini(&sc->vmx_media);
674 1.1 ryo }
675 1.1 ryo
676 1.1 ryo vmxnet3_teardown_stats(sc);
677 1.1 ryo sysctl_teardown(&sc->vmx_sysctllog);
678 1.1 ryo
679 1.1 ryo vmxnet3_free_interrupts(sc);
680 1.1 ryo
681 1.1 ryo vmxnet3_free_data(sc);
682 1.1 ryo vmxnet3_free_pci_resources(sc);
683 1.1 ryo vmxnet3_free_rxtx_queues(sc);
684 1.1 ryo
685 1.1 ryo if (sc->vmx_mtx)
686 1.1 ryo mutex_obj_free(sc->vmx_mtx);
687 1.1 ryo
688 1.1 ryo return (0);
689 1.1 ryo }
690 1.1 ryo
691 1.1 ryo static int
692 1.1 ryo vmxnet3_alloc_pci_resources(struct vmxnet3_softc *sc)
693 1.1 ryo {
694 1.1 ryo struct pci_attach_args *pa = sc->vmx_pa;
695 1.1 ryo pcireg_t memtype;
696 1.1 ryo
697 1.1 ryo memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
698 1.1 ryo if (pci_mapreg_map(pa, PCI_BAR(0), memtype, 0, &sc->vmx_iot0, &sc->vmx_ioh0,
699 1.1 ryo NULL, &sc->vmx_ios0)) {
700 1.1 ryo aprint_error_dev(sc->vmx_dev, "failed to map BAR0\n");
701 1.1 ryo return (ENXIO);
702 1.1 ryo }
703 1.1 ryo memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(1));
704 1.1 ryo if (pci_mapreg_map(pa, PCI_BAR(1), memtype, 0, &sc->vmx_iot1, &sc->vmx_ioh1,
705 1.1 ryo NULL, &sc->vmx_ios1)) {
706 1.1 ryo aprint_error_dev(sc->vmx_dev, "failed to map BAR1\n");
707 1.1 ryo return (ENXIO);
708 1.1 ryo }
709 1.1 ryo
710 1.1 ryo if (!pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, NULL, NULL)) {
711 1.1 ryo sc->vmx_flags |= VMXNET3_FLAG_NO_MSIX;
712 1.1 ryo return (0);
713 1.1 ryo }
714 1.1 ryo
715 1.1 ryo return (0);
716 1.1 ryo }
717 1.1 ryo
718 1.1 ryo static void
719 1.1 ryo vmxnet3_free_pci_resources(struct vmxnet3_softc *sc)
720 1.1 ryo {
721 1.1 ryo
722 1.1 ryo if (sc->vmx_ios0) {
723 1.1 ryo bus_space_unmap(sc->vmx_iot0, sc->vmx_ioh0, sc->vmx_ios0);
724 1.1 ryo sc->vmx_ios0 = 0;
725 1.1 ryo }
726 1.1 ryo
727 1.1 ryo if (sc->vmx_ios1) {
728 1.1 ryo bus_space_unmap(sc->vmx_iot1, sc->vmx_ioh1, sc->vmx_ios1);
729 1.1 ryo sc->vmx_ios1 = 0;
730 1.1 ryo }
731 1.1 ryo }
732 1.1 ryo
733 1.1 ryo static int
734 1.1 ryo vmxnet3_check_version(struct vmxnet3_softc *sc)
735 1.1 ryo {
736 1.1 ryo u_int ver;
737 1.1 ryo
738 1.1 ryo ver = vmxnet3_read_bar1(sc, VMXNET3_BAR1_VRRS);
739 1.1 ryo if ((ver & 0x1) == 0) {
740 1.1 ryo aprint_error_dev(sc->vmx_dev,
741 1.1 ryo "unsupported hardware version 0x%x\n", ver);
742 1.1 ryo return (ENOTSUP);
743 1.1 ryo }
744 1.1 ryo vmxnet3_write_bar1(sc, VMXNET3_BAR1_VRRS, 1);
745 1.1 ryo
746 1.1 ryo ver = vmxnet3_read_bar1(sc, VMXNET3_BAR1_UVRS);
747 1.1 ryo if ((ver & 0x1) == 0) {
748 1.1 ryo aprint_error_dev(sc->vmx_dev,
749 1.1 ryo "incompatiable UPT version 0x%x\n", ver);
750 1.1 ryo return (ENOTSUP);
751 1.1 ryo }
752 1.1 ryo vmxnet3_write_bar1(sc, VMXNET3_BAR1_UVRS, 1);
753 1.1 ryo
754 1.1 ryo return (0);
755 1.1 ryo }
756 1.1 ryo
757 1.1 ryo static void
758 1.1 ryo vmxnet3_check_multiqueue(struct vmxnet3_softc *sc)
759 1.1 ryo {
760 1.1 ryo
761 1.1 ryo if (sc->vmx_intr_type != VMXNET3_IT_MSIX)
762 1.1 ryo goto out;
763 1.1 ryo
764 1.1 ryo /* Just use the maximum configured for now. */
765 1.1 ryo sc->vmx_nrxqueues = sc->vmx_max_nrxqueues;
766 1.1 ryo sc->vmx_ntxqueues = sc->vmx_max_ntxqueues;
767 1.1 ryo
768 1.1 ryo if (sc->vmx_nrxqueues > 1)
769 1.1 ryo sc->vmx_flags |= VMXNET3_FLAG_RSS;
770 1.1 ryo
771 1.1 ryo return;
772 1.1 ryo
773 1.1 ryo out:
774 1.1 ryo sc->vmx_ntxqueues = 1;
775 1.1 ryo sc->vmx_nrxqueues = 1;
776 1.1 ryo }
777 1.1 ryo
778 1.1 ryo static int
779 1.1 ryo vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *sc)
780 1.1 ryo {
781 1.1 ryo int required;
782 1.1 ryo struct pci_attach_args *pa = sc->vmx_pa;
783 1.1 ryo
784 1.1 ryo if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX)
785 1.1 ryo return (1);
786 1.1 ryo
787 1.1 ryo /* Allocate an additional vector for the events interrupt. */
788 1.1 ryo required = MIN(sc->vmx_max_ntxqueues, sc->vmx_max_nrxqueues) + 1;
789 1.1 ryo
790 1.1 ryo if (pci_msix_count(pa->pa_pc, pa->pa_tag) < required)
791 1.1 ryo return (1);
792 1.1 ryo
793 1.1 ryo if (pci_msix_alloc_exact(pa, &sc->vmx_intrs, required) == 0) {
794 1.1 ryo sc->vmx_nintrs = required;
795 1.1 ryo return (0);
796 1.1 ryo }
797 1.1 ryo
798 1.1 ryo return (1);
799 1.1 ryo }
800 1.1 ryo
801 1.1 ryo static int
802 1.1 ryo vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *sc)
803 1.1 ryo {
804 1.1 ryo int nmsi, required;
805 1.1 ryo struct pci_attach_args *pa = sc->vmx_pa;
806 1.1 ryo
807 1.1 ryo required = 1;
808 1.1 ryo
809 1.1 ryo nmsi = pci_msi_count(pa->pa_pc, pa->pa_tag);
810 1.1 ryo if (nmsi < required)
811 1.1 ryo return (1);
812 1.1 ryo
813 1.1 ryo if (pci_msi_alloc_exact(pa, &sc->vmx_intrs, required) == 0) {
814 1.1 ryo sc->vmx_nintrs = required;
815 1.1 ryo return (0);
816 1.1 ryo }
817 1.1 ryo
818 1.1 ryo return (1);
819 1.1 ryo }
820 1.1 ryo
821 1.1 ryo static int
822 1.1 ryo vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *sc)
823 1.1 ryo {
824 1.1 ryo
825 1.1 ryo if (pci_intx_alloc(sc->vmx_pa, &sc->vmx_intrs) == 0) {
826 1.1 ryo sc->vmx_nintrs = 1;
827 1.1 ryo return (0);
828 1.1 ryo }
829 1.1 ryo
830 1.1 ryo return (1);
831 1.1 ryo }
832 1.1 ryo
833 1.1 ryo static int
834 1.1 ryo vmxnet3_alloc_interrupts(struct vmxnet3_softc *sc)
835 1.1 ryo {
836 1.1 ryo u_int config;
837 1.1 ryo int error;
838 1.1 ryo
839 1.1 ryo config = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_INTRCFG);
840 1.1 ryo
841 1.1 ryo sc->vmx_intr_type = config & 0x03;
842 1.1 ryo sc->vmx_intr_mask_mode = (config >> 2) & 0x03;
843 1.1 ryo
844 1.1 ryo switch (sc->vmx_intr_type) {
845 1.1 ryo case VMXNET3_IT_AUTO:
846 1.1 ryo sc->vmx_intr_type = VMXNET3_IT_MSIX;
847 1.1 ryo /* FALLTHROUGH */
848 1.1 ryo case VMXNET3_IT_MSIX:
849 1.1 ryo error = vmxnet3_alloc_msix_interrupts(sc);
850 1.1 ryo if (error == 0)
851 1.1 ryo break;
852 1.1 ryo sc->vmx_intr_type = VMXNET3_IT_MSI;
853 1.1 ryo /* FALLTHROUGH */
854 1.1 ryo case VMXNET3_IT_MSI:
855 1.1 ryo error = vmxnet3_alloc_msi_interrupts(sc);
856 1.1 ryo if (error == 0)
857 1.1 ryo break;
858 1.1 ryo sc->vmx_intr_type = VMXNET3_IT_LEGACY;
859 1.1 ryo /* FALLTHROUGH */
860 1.1 ryo case VMXNET3_IT_LEGACY:
861 1.1 ryo error = vmxnet3_alloc_legacy_interrupts(sc);
862 1.1 ryo if (error == 0)
863 1.1 ryo break;
864 1.1 ryo /* FALLTHROUGH */
865 1.1 ryo default:
866 1.1 ryo sc->vmx_intr_type = -1;
867 1.1 ryo aprint_error_dev(sc->vmx_dev, "cannot allocate any interrupt resources\n");
868 1.1 ryo return (ENXIO);
869 1.1 ryo }
870 1.1 ryo
871 1.1 ryo return (error);
872 1.1 ryo }
873 1.1 ryo
874 1.1 ryo static void
875 1.1 ryo vmxnet3_free_interrupts(struct vmxnet3_softc *sc)
876 1.1 ryo {
877 1.1 ryo pci_chipset_tag_t pc = sc->vmx_pc;
878 1.1 ryo int i;
879 1.1 ryo
880 1.1 ryo workqueue_destroy(sc->vmx_queue_wq);
881 1.1 ryo for (i = 0; i < sc->vmx_ntxqueues; i++) {
882 1.1 ryo struct vmxnet3_queue *vmxq = &sc->vmx_queue[i];
883 1.1 ryo
884 1.1 ryo softint_disestablish(vmxq->vxq_si);
885 1.1 ryo vmxq->vxq_si = NULL;
886 1.1 ryo }
887 1.1 ryo for (i = 0; i < sc->vmx_nintrs; i++) {
888 1.1 ryo pci_intr_disestablish(pc, sc->vmx_ihs[i]);
889 1.1 ryo }
890 1.1 ryo pci_intr_release(pc, sc->vmx_intrs, sc->vmx_nintrs);
891 1.1 ryo }
892 1.1 ryo
893 1.1 ryo static int
894 1.1 ryo vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *sc)
895 1.1 ryo {
896 1.1 ryo pci_chipset_tag_t pc = sc->vmx_pa->pa_pc;
897 1.1 ryo struct vmxnet3_queue *vmxq;
898 1.1 ryo pci_intr_handle_t *intr;
899 1.1 ryo void **ihs;
900 1.1 ryo int intr_idx, i, use_queues, error;
901 1.1 ryo kcpuset_t *affinity;
902 1.1 ryo const char *intrstr;
903 1.1 ryo char intrbuf[PCI_INTRSTR_LEN];
904 1.1 ryo char xnamebuf[32];
905 1.1 ryo
906 1.1 ryo intr = sc->vmx_intrs;
907 1.1 ryo intr_idx = 0;
908 1.1 ryo ihs = sc->vmx_ihs;
909 1.1 ryo
910 1.1 ryo /* See vmxnet3_alloc_msix_interrupts() */
911 1.1 ryo use_queues = MIN(sc->vmx_max_ntxqueues, sc->vmx_max_nrxqueues);
912 1.1 ryo for (i = 0; i < use_queues; i++, intr++, ihs++, intr_idx++) {
913 1.1 ryo snprintf(xnamebuf, 32, "%s: txrx %d", device_xname(sc->vmx_dev), i);
914 1.1 ryo
915 1.1 ryo vmxq = &sc->vmx_queue[i];
916 1.1 ryo
917 1.1 ryo intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
918 1.1 ryo
919 1.1 ryo pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true);
920 1.1 ryo *ihs = pci_intr_establish_xname(pc, *intr, IPL_NET,
921 1.1 ryo vmxnet3_txrxq_intr, vmxq, xnamebuf);
922 1.1 ryo if (*ihs == NULL) {
923 1.1 ryo aprint_error_dev(sc->vmx_dev,
924 1.1 ryo "unable to establish txrx interrupt at %s\n", intrstr);
925 1.1 ryo return (-1);
926 1.1 ryo }
927 1.1 ryo aprint_normal_dev(sc->vmx_dev, "txrx interrupting at %s\n", intrstr);
928 1.1 ryo
929 1.1 ryo kcpuset_create(&affinity, true);
930 1.1 ryo kcpuset_set(affinity, intr_idx % ncpu);
931 1.1 ryo error = interrupt_distribute(*ihs, affinity, NULL);
932 1.1 ryo if (error) {
933 1.1 ryo aprint_normal_dev(sc->vmx_dev,
934 1.1 ryo "%s cannot be changed affinity, use default CPU\n",
935 1.1 ryo intrstr);
936 1.1 ryo }
937 1.1 ryo kcpuset_destroy(affinity);
938 1.1 ryo
939 1.1 ryo vmxq->vxq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
940 1.1 ryo vmxnet3_handle_queue, vmxq);
941 1.1 ryo if (vmxq->vxq_si == NULL) {
942 1.1 ryo aprint_error_dev(sc->vmx_dev,
943 1.1 ryo "softint_establish for vxq_si failed\n");
944 1.1 ryo return (-1);
945 1.1 ryo }
946 1.1 ryo
947 1.1 ryo vmxq->vxq_intr_idx = intr_idx;
948 1.1 ryo }
949 1.1 ryo snprintf(xnamebuf, MAXCOMLEN, "%s_tx_rx", device_xname(sc->vmx_dev));
950 1.1 ryo error = workqueue_create(&sc->vmx_queue_wq, xnamebuf,
951 1.1 ryo vmxnet3_handle_queue_work, sc, VMXNET3_WORKQUEUE_PRI, IPL_NET,
952 1.1 ryo WQ_PERCPU | WQ_MPSAFE);
953 1.1 ryo if (error) {
954 1.1 ryo aprint_error_dev(sc->vmx_dev, "workqueue_create failed\n");
955 1.1 ryo return (-1);
956 1.1 ryo }
957 1.1 ryo sc->vmx_txrx_workqueue = false;
958 1.1 ryo
959 1.1 ryo intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
960 1.1 ryo
961 1.1 ryo snprintf(xnamebuf, 32, "%s: link", device_xname(sc->vmx_dev));
962 1.1 ryo pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true);
963 1.1 ryo *ihs = pci_intr_establish_xname(pc, *intr, IPL_NET,
964 1.1 ryo vmxnet3_event_intr, sc, xnamebuf);
965 1.1 ryo if (*ihs == NULL) {
966 1.1 ryo aprint_error_dev(sc->vmx_dev,
967 1.1 ryo "unable to establish event interrupt at %s\n", intrstr);
968 1.1 ryo return (-1);
969 1.1 ryo }
970 1.1 ryo aprint_normal_dev(sc->vmx_dev, "event interrupting at %s\n", intrstr);
971 1.1 ryo
972 1.1 ryo sc->vmx_event_intr_idx = intr_idx;
973 1.1 ryo
974 1.1 ryo return (0);
975 1.1 ryo }
976 1.1 ryo
977 1.1 ryo static int
978 1.1 ryo vmxnet3_setup_msi_interrupt(struct vmxnet3_softc *sc)
979 1.1 ryo {
980 1.1 ryo pci_chipset_tag_t pc = sc->vmx_pa->pa_pc;
981 1.1 ryo pci_intr_handle_t *intr;
982 1.1 ryo void **ihs;
983 1.1 ryo struct vmxnet3_queue *vmxq;
984 1.1 ryo int i;
985 1.1 ryo const char *intrstr;
986 1.1 ryo char intrbuf[PCI_INTRSTR_LEN];
987 1.1 ryo char xnamebuf[32];
988 1.1 ryo
989 1.1 ryo intr = &sc->vmx_intrs[0];
990 1.1 ryo ihs = sc->vmx_ihs;
991 1.1 ryo vmxq = &sc->vmx_queue[0];
992 1.1 ryo
993 1.1 ryo intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
994 1.1 ryo
995 1.1 ryo snprintf(xnamebuf, 32, "%s: msi", device_xname(sc->vmx_dev));
996 1.1 ryo pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true);
997 1.1 ryo *ihs = pci_intr_establish_xname(pc, *intr, IPL_NET,
998 1.1 ryo vmxnet3_legacy_intr, sc, xnamebuf);
999 1.1 ryo if (*ihs == NULL) {
1000 1.1 ryo aprint_error_dev(sc->vmx_dev,
1001 1.1 ryo "unable to establish interrupt at %s\n", intrstr);
1002 1.1 ryo return (-1);
1003 1.1 ryo }
1004 1.1 ryo aprint_normal_dev(sc->vmx_dev, "interrupting at %s\n", intrstr);
1005 1.1 ryo
1006 1.1 ryo vmxq->vxq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
1007 1.1 ryo vmxnet3_handle_queue, vmxq);
1008 1.1 ryo if (vmxq->vxq_si == NULL) {
1009 1.1 ryo aprint_error_dev(sc->vmx_dev,
1010 1.1 ryo "softint_establish for vxq_si failed\n");
1011 1.1 ryo return (-1);
1012 1.1 ryo }
1013 1.1 ryo
1014 1.1 ryo for (i = 0; i < MIN(sc->vmx_nrxqueues, sc->vmx_nrxqueues); i++)
1015 1.1 ryo sc->vmx_queue[i].vxq_intr_idx = 0;
1016 1.1 ryo sc->vmx_event_intr_idx = 0;
1017 1.1 ryo
1018 1.1 ryo return (0);
1019 1.1 ryo }
1020 1.1 ryo
1021 1.1 ryo static int
1022 1.1 ryo vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *sc)
1023 1.1 ryo {
1024 1.1 ryo pci_chipset_tag_t pc = sc->vmx_pa->pa_pc;
1025 1.1 ryo pci_intr_handle_t *intr;
1026 1.1 ryo void **ihs;
1027 1.1 ryo struct vmxnet3_queue *vmxq;
1028 1.1 ryo int i;
1029 1.1 ryo const char *intrstr;
1030 1.1 ryo char intrbuf[PCI_INTRSTR_LEN];
1031 1.1 ryo char xnamebuf[32];
1032 1.1 ryo
1033 1.1 ryo intr = &sc->vmx_intrs[0];
1034 1.1 ryo ihs = sc->vmx_ihs;
1035 1.1 ryo vmxq = &sc->vmx_queue[0];
1036 1.1 ryo
1037 1.1 ryo intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
1038 1.1 ryo
1039 1.1 ryo snprintf(xnamebuf, 32, "%s:legacy", device_xname(sc->vmx_dev));
1040 1.1 ryo pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true);
1041 1.1 ryo *ihs = pci_intr_establish_xname(pc, *intr, IPL_NET,
1042 1.1 ryo vmxnet3_legacy_intr, sc, xnamebuf);
1043 1.1 ryo if (*ihs == NULL) {
1044 1.1 ryo aprint_error_dev(sc->vmx_dev,
1045 1.1 ryo "unable to establish interrupt at %s\n", intrstr);
1046 1.1 ryo return (-1);
1047 1.1 ryo }
1048 1.1 ryo aprint_normal_dev(sc->vmx_dev, "interrupting at %s\n", intrstr);
1049 1.1 ryo
1050 1.1 ryo vmxq->vxq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
1051 1.1 ryo vmxnet3_handle_queue, vmxq);
1052 1.1 ryo if (vmxq->vxq_si == NULL) {
1053 1.1 ryo aprint_error_dev(sc->vmx_dev,
1054 1.1 ryo "softint_establish for vxq_si failed\n");
1055 1.1 ryo return (-1);
1056 1.1 ryo }
1057 1.1 ryo
1058 1.1 ryo for (i = 0; i < MIN(sc->vmx_nrxqueues, sc->vmx_nrxqueues); i++)
1059 1.1 ryo sc->vmx_queue[i].vxq_intr_idx = 0;
1060 1.1 ryo sc->vmx_event_intr_idx = 0;
1061 1.1 ryo
1062 1.1 ryo return (0);
1063 1.1 ryo }
1064 1.1 ryo
1065 1.1 ryo static void
1066 1.1 ryo vmxnet3_set_interrupt_idx(struct vmxnet3_softc *sc)
1067 1.1 ryo {
1068 1.1 ryo struct vmxnet3_queue *vmxq;
1069 1.1 ryo struct vmxnet3_txqueue *txq;
1070 1.1 ryo struct vmxnet3_txq_shared *txs;
1071 1.1 ryo struct vmxnet3_rxqueue *rxq;
1072 1.1 ryo struct vmxnet3_rxq_shared *rxs;
1073 1.1 ryo int i;
1074 1.1 ryo
1075 1.1 ryo sc->vmx_ds->evintr = sc->vmx_event_intr_idx;
1076 1.1 ryo
1077 1.1 ryo for (i = 0; i < sc->vmx_ntxqueues; i++) {
1078 1.1 ryo vmxq = &sc->vmx_queue[i];
1079 1.1 ryo txq = &vmxq->vxq_txqueue;
1080 1.1 ryo txs = txq->vxtxq_ts;
1081 1.1 ryo txs->intr_idx = vmxq->vxq_intr_idx;
1082 1.1 ryo }
1083 1.1 ryo
1084 1.1 ryo for (i = 0; i < sc->vmx_nrxqueues; i++) {
1085 1.1 ryo vmxq = &sc->vmx_queue[i];
1086 1.1 ryo rxq = &vmxq->vxq_rxqueue;
1087 1.1 ryo rxs = rxq->vxrxq_rs;
1088 1.1 ryo rxs->intr_idx = vmxq->vxq_intr_idx;
1089 1.1 ryo }
1090 1.1 ryo }
1091 1.1 ryo
1092 1.1 ryo static int
1093 1.1 ryo vmxnet3_setup_interrupts(struct vmxnet3_softc *sc)
1094 1.1 ryo {
1095 1.1 ryo int error;
1096 1.1 ryo
1097 1.1 ryo switch (sc->vmx_intr_type) {
1098 1.1 ryo case VMXNET3_IT_MSIX:
1099 1.1 ryo error = vmxnet3_setup_msix_interrupts(sc);
1100 1.1 ryo break;
1101 1.1 ryo case VMXNET3_IT_MSI:
1102 1.1 ryo error = vmxnet3_setup_msi_interrupt(sc);
1103 1.1 ryo break;
1104 1.1 ryo case VMXNET3_IT_LEGACY:
1105 1.1 ryo error = vmxnet3_setup_legacy_interrupt(sc);
1106 1.1 ryo break;
1107 1.1 ryo default:
1108 1.1 ryo panic("%s: invalid interrupt type %d", __func__,
1109 1.1 ryo sc->vmx_intr_type);
1110 1.1 ryo }
1111 1.1 ryo
1112 1.1 ryo if (error == 0)
1113 1.1 ryo vmxnet3_set_interrupt_idx(sc);
1114 1.1 ryo
1115 1.1 ryo return (error);
1116 1.1 ryo }
1117 1.1 ryo
1118 1.1 ryo static int
1119 1.1 ryo vmxnet3_init_rxq(struct vmxnet3_softc *sc, int q)
1120 1.1 ryo {
1121 1.1 ryo struct vmxnet3_rxqueue *rxq;
1122 1.1 ryo struct vmxnet3_rxring *rxr;
1123 1.1 ryo int i;
1124 1.1 ryo
1125 1.1 ryo rxq = &sc->vmx_queue[q].vxq_rxqueue;
1126 1.1 ryo
1127 1.1 ryo snprintf(rxq->vxrxq_name, sizeof(rxq->vxrxq_name), "%s-rx%d",
1128 1.1 ryo device_xname(sc->vmx_dev), q);
1129 1.1 ryo rxq->vxrxq_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET /* XXX */);
1130 1.1 ryo
1131 1.1 ryo rxq->vxrxq_sc = sc;
1132 1.1 ryo
1133 1.1 ryo for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1134 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i];
1135 1.1 ryo rxr->vxrxr_rid = i;
1136 1.1 ryo rxr->vxrxr_ndesc = sc->vmx_nrxdescs;
1137 1.1 ryo rxr->vxrxr_rxbuf = kmem_zalloc(rxr->vxrxr_ndesc *
1138 1.1 ryo sizeof(struct vmxnet3_rxbuf), KM_SLEEP);
1139 1.1 ryo
1140 1.1 ryo rxq->vxrxq_comp_ring.vxcr_ndesc += sc->vmx_nrxdescs;
1141 1.1 ryo }
1142 1.1 ryo
1143 1.1 ryo return (0);
1144 1.1 ryo }
1145 1.1 ryo
1146 1.1 ryo static int
1147 1.1 ryo vmxnet3_init_txq(struct vmxnet3_softc *sc, int q)
1148 1.1 ryo {
1149 1.1 ryo struct vmxnet3_txqueue *txq;
1150 1.1 ryo struct vmxnet3_txring *txr;
1151 1.1 ryo
1152 1.1 ryo txq = &sc->vmx_queue[q].vxq_txqueue;
1153 1.1 ryo txr = &txq->vxtxq_cmd_ring;
1154 1.1 ryo
1155 1.1 ryo snprintf(txq->vxtxq_name, sizeof(txq->vxtxq_name), "%s-tx%d",
1156 1.1 ryo device_xname(sc->vmx_dev), q);
1157 1.1 ryo txq->vxtxq_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET /* XXX */);
1158 1.1 ryo
1159 1.1 ryo txq->vxtxq_sc = sc;
1160 1.1 ryo
1161 1.1 ryo txq->vxtxq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
1162 1.1 ryo vmxnet3_deferred_transmit, txq);
1163 1.1 ryo if (txq->vxtxq_si == NULL) {
1164 1.1 ryo mutex_obj_free(txq->vxtxq_mtx);
1165 1.1 ryo aprint_error_dev(sc->vmx_dev,
1166 1.1 ryo "softint_establish for vxtxq_si failed\n");
1167 1.1 ryo return ENOMEM;
1168 1.1 ryo }
1169 1.1 ryo
1170 1.1 ryo txr->vxtxr_ndesc = sc->vmx_ntxdescs;
1171 1.1 ryo txr->vxtxr_txbuf = kmem_zalloc(txr->vxtxr_ndesc *
1172 1.1 ryo sizeof(struct vmxnet3_txbuf), KM_SLEEP);
1173 1.1 ryo
1174 1.1 ryo txq->vxtxq_comp_ring.vxcr_ndesc = sc->vmx_ntxdescs;
1175 1.1 ryo
1176 1.1 ryo txq->vxtxq_interq = pcq_create(sc->vmx_ntxdescs, KM_SLEEP);
1177 1.1 ryo
1178 1.1 ryo return (0);
1179 1.1 ryo }
1180 1.1 ryo
1181 1.1 ryo static int
1182 1.1 ryo vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *sc)
1183 1.1 ryo {
1184 1.1 ryo int i, error, max_nqueues;
1185 1.1 ryo
1186 1.1 ryo KASSERT(!cpu_intr_p());
1187 1.1 ryo KASSERT(!cpu_softintr_p());
1188 1.1 ryo
1189 1.1 ryo /*
1190 1.1 ryo * Only attempt to create multiple queues if MSIX is available.
1191 1.1 ryo * This check prevents us from allocating queue structures that
1192 1.1 ryo * we will not use.
1193 1.1 ryo *
1194 1.1 ryo * FreeBSD:
1195 1.1 ryo * MSIX is disabled by default because its apparently broken for
1196 1.1 ryo * devices passed through by at least ESXi 5.1.
1197 1.1 ryo * The hw.pci.honor_msi_blacklist tunable must be set to zero for MSIX.
1198 1.1 ryo */
1199 1.1 ryo if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX) {
1200 1.1 ryo sc->vmx_max_nrxqueues = 1;
1201 1.1 ryo sc->vmx_max_ntxqueues = 1;
1202 1.1 ryo }
1203 1.1 ryo
1204 1.1 ryo max_nqueues = MAX(sc->vmx_max_ntxqueues, sc->vmx_max_nrxqueues);
1205 1.1 ryo sc->vmx_queue = kmem_zalloc(sizeof(struct vmxnet3_queue) * max_nqueues,
1206 1.1 ryo KM_SLEEP);
1207 1.1 ryo
1208 1.1 ryo for (i = 0; i < max_nqueues; i++) {
1209 1.1 ryo struct vmxnet3_queue *vmxq = &sc->vmx_queue[i];
1210 1.1 ryo vmxq->vxq_id = i;
1211 1.1 ryo }
1212 1.1 ryo
1213 1.1 ryo for (i = 0; i < sc->vmx_max_nrxqueues; i++) {
1214 1.1 ryo error = vmxnet3_init_rxq(sc, i);
1215 1.1 ryo if (error)
1216 1.1 ryo return (error);
1217 1.1 ryo }
1218 1.1 ryo
1219 1.1 ryo for (i = 0; i < sc->vmx_max_ntxqueues; i++) {
1220 1.1 ryo error = vmxnet3_init_txq(sc, i);
1221 1.1 ryo if (error)
1222 1.1 ryo return (error);
1223 1.1 ryo }
1224 1.1 ryo
1225 1.1 ryo return (0);
1226 1.1 ryo }
1227 1.1 ryo
1228 1.1 ryo static void
1229 1.1 ryo vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *rxq)
1230 1.1 ryo {
1231 1.1 ryo struct vmxnet3_rxring *rxr;
1232 1.1 ryo int i;
1233 1.1 ryo
1234 1.1 ryo rxq->vxrxq_sc = NULL;
1235 1.1 ryo
1236 1.1 ryo for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1237 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i];
1238 1.1 ryo
1239 1.1 ryo if (rxr->vxrxr_rxbuf != NULL) {
1240 1.1 ryo kmem_free(rxr->vxrxr_rxbuf,
1241 1.1 ryo rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxbuf));
1242 1.1 ryo rxr->vxrxr_rxbuf = NULL;
1243 1.1 ryo }
1244 1.1 ryo }
1245 1.1 ryo
1246 1.1 ryo if (rxq->vxrxq_mtx != NULL)
1247 1.1 ryo mutex_obj_free(rxq->vxrxq_mtx);
1248 1.1 ryo }
1249 1.1 ryo
1250 1.1 ryo static void
1251 1.1 ryo vmxnet3_destroy_txq(struct vmxnet3_txqueue *txq)
1252 1.1 ryo {
1253 1.1 ryo struct vmxnet3_txring *txr;
1254 1.1 ryo struct mbuf *m;
1255 1.1 ryo
1256 1.1 ryo txr = &txq->vxtxq_cmd_ring;
1257 1.1 ryo
1258 1.1 ryo txq->vxtxq_sc = NULL;
1259 1.1 ryo
1260 1.1 ryo softint_disestablish(txq->vxtxq_si);
1261 1.1 ryo
1262 1.1 ryo while ((m = pcq_get(txq->vxtxq_interq)) != NULL)
1263 1.1 ryo m_freem(m);
1264 1.1 ryo pcq_destroy(txq->vxtxq_interq);
1265 1.1 ryo
1266 1.1 ryo if (txr->vxtxr_txbuf != NULL) {
1267 1.1 ryo kmem_free(txr->vxtxr_txbuf,
1268 1.1 ryo txr->vxtxr_ndesc * sizeof(struct vmxnet3_txbuf));
1269 1.1 ryo txr->vxtxr_txbuf = NULL;
1270 1.1 ryo }
1271 1.1 ryo
1272 1.1 ryo if (txq->vxtxq_mtx != NULL)
1273 1.1 ryo mutex_obj_free(txq->vxtxq_mtx);
1274 1.1 ryo }
1275 1.1 ryo
1276 1.1 ryo static void
1277 1.1 ryo vmxnet3_free_rxtx_queues(struct vmxnet3_softc *sc)
1278 1.1 ryo {
1279 1.1 ryo int i;
1280 1.1 ryo
1281 1.1 ryo if (sc->vmx_queue != NULL) {
1282 1.1 ryo int max_nqueues;
1283 1.1 ryo
1284 1.1 ryo for (i = 0; i < sc->vmx_max_nrxqueues; i++)
1285 1.1 ryo vmxnet3_destroy_rxq(&sc->vmx_queue[i].vxq_rxqueue);
1286 1.1 ryo
1287 1.1 ryo for (i = 0; i < sc->vmx_max_ntxqueues; i++)
1288 1.1 ryo vmxnet3_destroy_txq(&sc->vmx_queue[i].vxq_txqueue);
1289 1.1 ryo
1290 1.1 ryo max_nqueues = MAX(sc->vmx_max_nrxqueues, sc->vmx_max_ntxqueues);
1291 1.1 ryo kmem_free(sc->vmx_queue,
1292 1.1 ryo sizeof(struct vmxnet3_queue) * max_nqueues);
1293 1.1 ryo }
1294 1.1 ryo }
1295 1.1 ryo
1296 1.1 ryo static int
1297 1.1 ryo vmxnet3_alloc_shared_data(struct vmxnet3_softc *sc)
1298 1.1 ryo {
1299 1.1 ryo device_t dev;
1300 1.1 ryo uint8_t *kva;
1301 1.1 ryo size_t size;
1302 1.1 ryo int i, error;
1303 1.1 ryo
1304 1.1 ryo dev = sc->vmx_dev;
1305 1.1 ryo
1306 1.1 ryo size = sizeof(struct vmxnet3_driver_shared);
1307 1.1 ryo error = vmxnet3_dma_malloc(sc, size, 1, &sc->vmx_ds_dma);
1308 1.1 ryo if (error) {
1309 1.1 ryo device_printf(dev, "cannot alloc shared memory\n");
1310 1.1 ryo return (error);
1311 1.1 ryo }
1312 1.1 ryo sc->vmx_ds = (struct vmxnet3_driver_shared *) sc->vmx_ds_dma.dma_vaddr;
1313 1.1 ryo
1314 1.1 ryo size = sc->vmx_ntxqueues * sizeof(struct vmxnet3_txq_shared) +
1315 1.1 ryo sc->vmx_nrxqueues * sizeof(struct vmxnet3_rxq_shared);
1316 1.1 ryo error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_qs_dma);
1317 1.1 ryo if (error) {
1318 1.1 ryo device_printf(dev, "cannot alloc queue shared memory\n");
1319 1.1 ryo return (error);
1320 1.1 ryo }
1321 1.1 ryo sc->vmx_qs = (void *) sc->vmx_qs_dma.dma_vaddr;
1322 1.1 ryo kva = sc->vmx_qs;
1323 1.1 ryo
1324 1.1 ryo for (i = 0; i < sc->vmx_ntxqueues; i++) {
1325 1.1 ryo sc->vmx_queue[i].vxq_txqueue.vxtxq_ts =
1326 1.1 ryo (struct vmxnet3_txq_shared *) kva;
1327 1.1 ryo kva += sizeof(struct vmxnet3_txq_shared);
1328 1.1 ryo }
1329 1.1 ryo for (i = 0; i < sc->vmx_nrxqueues; i++) {
1330 1.1 ryo sc->vmx_queue[i].vxq_rxqueue.vxrxq_rs =
1331 1.1 ryo (struct vmxnet3_rxq_shared *) kva;
1332 1.1 ryo kva += sizeof(struct vmxnet3_rxq_shared);
1333 1.1 ryo }
1334 1.1 ryo
1335 1.1 ryo if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1336 1.1 ryo size = sizeof(struct vmxnet3_rss_shared);
1337 1.1 ryo error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_rss_dma);
1338 1.1 ryo if (error) {
1339 1.1 ryo device_printf(dev, "cannot alloc rss shared memory\n");
1340 1.1 ryo return (error);
1341 1.1 ryo }
1342 1.1 ryo sc->vmx_rss =
1343 1.1 ryo (struct vmxnet3_rss_shared *) sc->vmx_rss_dma.dma_vaddr;
1344 1.1 ryo }
1345 1.1 ryo
1346 1.1 ryo return (0);
1347 1.1 ryo }
1348 1.1 ryo
1349 1.1 ryo static void
1350 1.1 ryo vmxnet3_free_shared_data(struct vmxnet3_softc *sc)
1351 1.1 ryo {
1352 1.1 ryo
1353 1.1 ryo if (sc->vmx_rss != NULL) {
1354 1.1 ryo vmxnet3_dma_free(sc, &sc->vmx_rss_dma);
1355 1.1 ryo sc->vmx_rss = NULL;
1356 1.1 ryo }
1357 1.1 ryo
1358 1.1 ryo if (sc->vmx_qs != NULL) {
1359 1.1 ryo vmxnet3_dma_free(sc, &sc->vmx_qs_dma);
1360 1.1 ryo sc->vmx_qs = NULL;
1361 1.1 ryo }
1362 1.1 ryo
1363 1.1 ryo if (sc->vmx_ds != NULL) {
1364 1.1 ryo vmxnet3_dma_free(sc, &sc->vmx_ds_dma);
1365 1.1 ryo sc->vmx_ds = NULL;
1366 1.1 ryo }
1367 1.1 ryo }
1368 1.1 ryo
1369 1.1 ryo static int
1370 1.1 ryo vmxnet3_alloc_txq_data(struct vmxnet3_softc *sc)
1371 1.1 ryo {
1372 1.1 ryo device_t dev;
1373 1.1 ryo struct vmxnet3_txqueue *txq;
1374 1.1 ryo struct vmxnet3_txring *txr;
1375 1.1 ryo struct vmxnet3_comp_ring *txc;
1376 1.1 ryo size_t descsz, compsz;
1377 1.2 ryo u_int i;
1378 1.2 ryo int q, error;
1379 1.1 ryo
1380 1.1 ryo dev = sc->vmx_dev;
1381 1.1 ryo
1382 1.1 ryo for (q = 0; q < sc->vmx_ntxqueues; q++) {
1383 1.1 ryo txq = &sc->vmx_queue[q].vxq_txqueue;
1384 1.1 ryo txr = &txq->vxtxq_cmd_ring;
1385 1.1 ryo txc = &txq->vxtxq_comp_ring;
1386 1.1 ryo
1387 1.1 ryo descsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc);
1388 1.1 ryo compsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txcompdesc);
1389 1.1 ryo
1390 1.1 ryo error = vmxnet3_dma_malloc(sc, descsz, 512, &txr->vxtxr_dma);
1391 1.1 ryo if (error) {
1392 1.1 ryo device_printf(dev, "cannot alloc Tx descriptors for "
1393 1.1 ryo "queue %d error %d\n", q, error);
1394 1.1 ryo return (error);
1395 1.1 ryo }
1396 1.1 ryo txr->vxtxr_txd =
1397 1.1 ryo (struct vmxnet3_txdesc *) txr->vxtxr_dma.dma_vaddr;
1398 1.1 ryo
1399 1.1 ryo error = vmxnet3_dma_malloc(sc, compsz, 512, &txc->vxcr_dma);
1400 1.1 ryo if (error) {
1401 1.1 ryo device_printf(dev, "cannot alloc Tx comp descriptors "
1402 1.1 ryo "for queue %d error %d\n", q, error);
1403 1.1 ryo return (error);
1404 1.1 ryo }
1405 1.1 ryo txc->vxcr_u.txcd =
1406 1.1 ryo (struct vmxnet3_txcompdesc *) txc->vxcr_dma.dma_vaddr;
1407 1.1 ryo
1408 1.1 ryo for (i = 0; i < txr->vxtxr_ndesc; i++) {
1409 1.1 ryo error = bus_dmamap_create(sc->vmx_dmat, VMXNET3_TX_MAXSIZE,
1410 1.1 ryo VMXNET3_TX_MAXSEGS, VMXNET3_TX_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
1411 1.1 ryo &txr->vxtxr_txbuf[i].vtxb_dmamap);
1412 1.1 ryo if (error) {
1413 1.1 ryo device_printf(dev, "unable to create Tx buf "
1414 1.1 ryo "dmamap for queue %d idx %d\n", q, i);
1415 1.1 ryo return (error);
1416 1.1 ryo }
1417 1.1 ryo }
1418 1.1 ryo }
1419 1.1 ryo
1420 1.1 ryo return (0);
1421 1.1 ryo }
1422 1.1 ryo
1423 1.1 ryo static void
1424 1.1 ryo vmxnet3_free_txq_data(struct vmxnet3_softc *sc)
1425 1.1 ryo {
1426 1.1 ryo struct vmxnet3_txqueue *txq;
1427 1.1 ryo struct vmxnet3_txring *txr;
1428 1.1 ryo struct vmxnet3_comp_ring *txc;
1429 1.1 ryo struct vmxnet3_txbuf *txb;
1430 1.2 ryo u_int i;
1431 1.2 ryo int q;
1432 1.1 ryo
1433 1.1 ryo for (q = 0; q < sc->vmx_ntxqueues; q++) {
1434 1.1 ryo txq = &sc->vmx_queue[q].vxq_txqueue;
1435 1.1 ryo txr = &txq->vxtxq_cmd_ring;
1436 1.1 ryo txc = &txq->vxtxq_comp_ring;
1437 1.1 ryo
1438 1.1 ryo for (i = 0; i < txr->vxtxr_ndesc; i++) {
1439 1.1 ryo txb = &txr->vxtxr_txbuf[i];
1440 1.1 ryo if (txb->vtxb_dmamap != NULL) {
1441 1.1 ryo bus_dmamap_destroy(sc->vmx_dmat,
1442 1.1 ryo txb->vtxb_dmamap);
1443 1.1 ryo txb->vtxb_dmamap = NULL;
1444 1.1 ryo }
1445 1.1 ryo }
1446 1.1 ryo
1447 1.1 ryo if (txc->vxcr_u.txcd != NULL) {
1448 1.1 ryo vmxnet3_dma_free(sc, &txc->vxcr_dma);
1449 1.1 ryo txc->vxcr_u.txcd = NULL;
1450 1.1 ryo }
1451 1.1 ryo
1452 1.1 ryo if (txr->vxtxr_txd != NULL) {
1453 1.1 ryo vmxnet3_dma_free(sc, &txr->vxtxr_dma);
1454 1.1 ryo txr->vxtxr_txd = NULL;
1455 1.1 ryo }
1456 1.1 ryo }
1457 1.1 ryo }
1458 1.1 ryo
1459 1.1 ryo static int
1460 1.1 ryo vmxnet3_alloc_rxq_data(struct vmxnet3_softc *sc)
1461 1.1 ryo {
1462 1.1 ryo device_t dev;
1463 1.1 ryo struct vmxnet3_rxqueue *rxq;
1464 1.1 ryo struct vmxnet3_rxring *rxr;
1465 1.1 ryo struct vmxnet3_comp_ring *rxc;
1466 1.1 ryo int descsz, compsz;
1467 1.2 ryo u_int i, j;
1468 1.2 ryo int q, error;
1469 1.1 ryo
1470 1.1 ryo dev = sc->vmx_dev;
1471 1.1 ryo
1472 1.1 ryo for (q = 0; q < sc->vmx_nrxqueues; q++) {
1473 1.1 ryo rxq = &sc->vmx_queue[q].vxq_rxqueue;
1474 1.1 ryo rxc = &rxq->vxrxq_comp_ring;
1475 1.1 ryo compsz = 0;
1476 1.1 ryo
1477 1.1 ryo for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1478 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i];
1479 1.1 ryo
1480 1.1 ryo descsz = rxr->vxrxr_ndesc *
1481 1.1 ryo sizeof(struct vmxnet3_rxdesc);
1482 1.1 ryo compsz += rxr->vxrxr_ndesc *
1483 1.1 ryo sizeof(struct vmxnet3_rxcompdesc);
1484 1.1 ryo
1485 1.1 ryo error = vmxnet3_dma_malloc(sc, descsz, 512,
1486 1.1 ryo &rxr->vxrxr_dma);
1487 1.1 ryo if (error) {
1488 1.1 ryo device_printf(dev, "cannot allocate Rx "
1489 1.1 ryo "descriptors for queue %d/%d error %d\n",
1490 1.1 ryo i, q, error);
1491 1.1 ryo return (error);
1492 1.1 ryo }
1493 1.1 ryo rxr->vxrxr_rxd =
1494 1.1 ryo (struct vmxnet3_rxdesc *) rxr->vxrxr_dma.dma_vaddr;
1495 1.1 ryo }
1496 1.1 ryo
1497 1.1 ryo error = vmxnet3_dma_malloc(sc, compsz, 512, &rxc->vxcr_dma);
1498 1.1 ryo if (error) {
1499 1.1 ryo device_printf(dev, "cannot alloc Rx comp descriptors "
1500 1.1 ryo "for queue %d error %d\n", q, error);
1501 1.1 ryo return (error);
1502 1.1 ryo }
1503 1.1 ryo rxc->vxcr_u.rxcd =
1504 1.1 ryo (struct vmxnet3_rxcompdesc *) rxc->vxcr_dma.dma_vaddr;
1505 1.1 ryo
1506 1.1 ryo for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1507 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i];
1508 1.1 ryo
1509 1.1 ryo error = bus_dmamap_create(sc->vmx_dmat, JUMBO_LEN, 1,
1510 1.1 ryo JUMBO_LEN, 0, BUS_DMA_NOWAIT,
1511 1.1 ryo &rxr->vxrxr_spare_dmap);
1512 1.1 ryo if (error) {
1513 1.1 ryo device_printf(dev, "unable to create spare "
1514 1.1 ryo "dmamap for queue %d/%d error %d\n",
1515 1.1 ryo q, i, error);
1516 1.1 ryo return (error);
1517 1.1 ryo }
1518 1.1 ryo
1519 1.1 ryo for (j = 0; j < rxr->vxrxr_ndesc; j++) {
1520 1.1 ryo error = bus_dmamap_create(sc->vmx_dmat, JUMBO_LEN, 1,
1521 1.1 ryo JUMBO_LEN, 0, BUS_DMA_NOWAIT,
1522 1.1 ryo &rxr->vxrxr_rxbuf[j].vrxb_dmamap);
1523 1.1 ryo if (error) {
1524 1.1 ryo device_printf(dev, "unable to create "
1525 1.1 ryo "dmamap for queue %d/%d slot %d "
1526 1.1 ryo "error %d\n",
1527 1.1 ryo q, i, j, error);
1528 1.1 ryo return (error);
1529 1.1 ryo }
1530 1.1 ryo }
1531 1.1 ryo }
1532 1.1 ryo }
1533 1.1 ryo
1534 1.1 ryo return (0);
1535 1.1 ryo }
1536 1.1 ryo
1537 1.1 ryo static void
1538 1.1 ryo vmxnet3_free_rxq_data(struct vmxnet3_softc *sc)
1539 1.1 ryo {
1540 1.1 ryo struct vmxnet3_rxqueue *rxq;
1541 1.1 ryo struct vmxnet3_rxring *rxr;
1542 1.1 ryo struct vmxnet3_comp_ring *rxc;
1543 1.1 ryo struct vmxnet3_rxbuf *rxb;
1544 1.2 ryo u_int i, j;
1545 1.2 ryo int q;
1546 1.1 ryo
1547 1.1 ryo for (q = 0; q < sc->vmx_nrxqueues; q++) {
1548 1.1 ryo rxq = &sc->vmx_queue[q].vxq_rxqueue;
1549 1.1 ryo rxc = &rxq->vxrxq_comp_ring;
1550 1.1 ryo
1551 1.1 ryo for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1552 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i];
1553 1.1 ryo
1554 1.1 ryo if (rxr->vxrxr_spare_dmap != NULL) {
1555 1.1 ryo bus_dmamap_destroy(sc->vmx_dmat,
1556 1.1 ryo rxr->vxrxr_spare_dmap);
1557 1.1 ryo rxr->vxrxr_spare_dmap = NULL;
1558 1.1 ryo }
1559 1.1 ryo
1560 1.1 ryo for (j = 0; j < rxr->vxrxr_ndesc; j++) {
1561 1.1 ryo rxb = &rxr->vxrxr_rxbuf[j];
1562 1.1 ryo if (rxb->vrxb_dmamap != NULL) {
1563 1.1 ryo bus_dmamap_destroy(sc->vmx_dmat,
1564 1.1 ryo rxb->vrxb_dmamap);
1565 1.1 ryo rxb->vrxb_dmamap = NULL;
1566 1.1 ryo }
1567 1.1 ryo }
1568 1.1 ryo }
1569 1.1 ryo
1570 1.1 ryo if (rxc->vxcr_u.rxcd != NULL) {
1571 1.1 ryo vmxnet3_dma_free(sc, &rxc->vxcr_dma);
1572 1.1 ryo rxc->vxcr_u.rxcd = NULL;
1573 1.1 ryo }
1574 1.1 ryo
1575 1.1 ryo for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1576 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i];
1577 1.1 ryo
1578 1.1 ryo if (rxr->vxrxr_rxd != NULL) {
1579 1.1 ryo vmxnet3_dma_free(sc, &rxr->vxrxr_dma);
1580 1.1 ryo rxr->vxrxr_rxd = NULL;
1581 1.1 ryo }
1582 1.1 ryo }
1583 1.1 ryo }
1584 1.1 ryo }
1585 1.1 ryo
1586 1.1 ryo static int
1587 1.1 ryo vmxnet3_alloc_queue_data(struct vmxnet3_softc *sc)
1588 1.1 ryo {
1589 1.1 ryo int error;
1590 1.1 ryo
1591 1.1 ryo error = vmxnet3_alloc_txq_data(sc);
1592 1.1 ryo if (error)
1593 1.1 ryo return (error);
1594 1.1 ryo
1595 1.1 ryo error = vmxnet3_alloc_rxq_data(sc);
1596 1.1 ryo if (error)
1597 1.1 ryo return (error);
1598 1.1 ryo
1599 1.1 ryo return (0);
1600 1.1 ryo }
1601 1.1 ryo
1602 1.1 ryo static void
1603 1.1 ryo vmxnet3_free_queue_data(struct vmxnet3_softc *sc)
1604 1.1 ryo {
1605 1.1 ryo
1606 1.1 ryo if (sc->vmx_queue != NULL) {
1607 1.1 ryo vmxnet3_free_rxq_data(sc);
1608 1.1 ryo vmxnet3_free_txq_data(sc);
1609 1.1 ryo }
1610 1.1 ryo }
1611 1.1 ryo
1612 1.1 ryo static int
1613 1.1 ryo vmxnet3_alloc_mcast_table(struct vmxnet3_softc *sc)
1614 1.1 ryo {
1615 1.1 ryo int error;
1616 1.1 ryo
1617 1.1 ryo error = vmxnet3_dma_malloc(sc, VMXNET3_MULTICAST_MAX * ETHER_ADDR_LEN,
1618 1.1 ryo 32, &sc->vmx_mcast_dma);
1619 1.1 ryo if (error)
1620 1.1 ryo device_printf(sc->vmx_dev, "unable to alloc multicast table\n");
1621 1.1 ryo else
1622 1.1 ryo sc->vmx_mcast = sc->vmx_mcast_dma.dma_vaddr;
1623 1.1 ryo
1624 1.1 ryo return (error);
1625 1.1 ryo }
1626 1.1 ryo
1627 1.1 ryo static void
1628 1.1 ryo vmxnet3_free_mcast_table(struct vmxnet3_softc *sc)
1629 1.1 ryo {
1630 1.1 ryo
1631 1.1 ryo if (sc->vmx_mcast != NULL) {
1632 1.1 ryo vmxnet3_dma_free(sc, &sc->vmx_mcast_dma);
1633 1.1 ryo sc->vmx_mcast = NULL;
1634 1.1 ryo }
1635 1.1 ryo }
1636 1.1 ryo
1637 1.1 ryo static void
1638 1.1 ryo vmxnet3_init_shared_data(struct vmxnet3_softc *sc)
1639 1.1 ryo {
1640 1.1 ryo struct vmxnet3_driver_shared *ds;
1641 1.1 ryo struct vmxnet3_txqueue *txq;
1642 1.1 ryo struct vmxnet3_txq_shared *txs;
1643 1.1 ryo struct vmxnet3_rxqueue *rxq;
1644 1.1 ryo struct vmxnet3_rxq_shared *rxs;
1645 1.1 ryo int i;
1646 1.1 ryo
1647 1.1 ryo ds = sc->vmx_ds;
1648 1.1 ryo
1649 1.1 ryo /*
1650 1.1 ryo * Initialize fields of the shared data that remains the same across
1651 1.1 ryo * reinits. Note the shared data is zero'd when allocated.
1652 1.1 ryo */
1653 1.1 ryo
1654 1.1 ryo ds->magic = VMXNET3_REV1_MAGIC;
1655 1.1 ryo
1656 1.1 ryo /* DriverInfo */
1657 1.1 ryo ds->version = VMXNET3_DRIVER_VERSION;
1658 1.1 ryo ds->guest = VMXNET3_GOS_FREEBSD |
1659 1.1 ryo #ifdef __LP64__
1660 1.1 ryo VMXNET3_GOS_64BIT;
1661 1.1 ryo #else
1662 1.1 ryo VMXNET3_GOS_32BIT;
1663 1.1 ryo #endif
1664 1.1 ryo ds->vmxnet3_revision = 1;
1665 1.1 ryo ds->upt_version = 1;
1666 1.1 ryo
1667 1.1 ryo /* Misc. conf */
1668 1.1 ryo ds->driver_data = vtophys(sc);
1669 1.1 ryo ds->driver_data_len = sizeof(struct vmxnet3_softc);
1670 1.1 ryo ds->queue_shared = sc->vmx_qs_dma.dma_paddr;
1671 1.1 ryo ds->queue_shared_len = sc->vmx_qs_dma.dma_size;
1672 1.1 ryo ds->nrxsg_max = sc->vmx_max_rxsegs;
1673 1.1 ryo
1674 1.1 ryo /* RSS conf */
1675 1.1 ryo if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1676 1.1 ryo ds->rss.version = 1;
1677 1.1 ryo ds->rss.paddr = sc->vmx_rss_dma.dma_paddr;
1678 1.1 ryo ds->rss.len = sc->vmx_rss_dma.dma_size;
1679 1.1 ryo }
1680 1.1 ryo
1681 1.1 ryo /* Interrupt control. */
1682 1.1 ryo ds->automask = sc->vmx_intr_mask_mode == VMXNET3_IMM_AUTO;
1683 1.1 ryo ds->nintr = sc->vmx_nintrs;
1684 1.1 ryo ds->evintr = sc->vmx_event_intr_idx;
1685 1.1 ryo ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL;
1686 1.1 ryo
1687 1.1 ryo for (i = 0; i < sc->vmx_nintrs; i++)
1688 1.1 ryo ds->modlevel[i] = UPT1_IMOD_ADAPTIVE;
1689 1.1 ryo
1690 1.1 ryo /* Receive filter. */
1691 1.1 ryo ds->mcast_table = sc->vmx_mcast_dma.dma_paddr;
1692 1.1 ryo ds->mcast_tablelen = sc->vmx_mcast_dma.dma_size;
1693 1.1 ryo
1694 1.1 ryo /* Tx queues */
1695 1.1 ryo for (i = 0; i < sc->vmx_ntxqueues; i++) {
1696 1.1 ryo txq = &sc->vmx_queue[i].vxq_txqueue;
1697 1.1 ryo txs = txq->vxtxq_ts;
1698 1.1 ryo
1699 1.1 ryo txs->cmd_ring = txq->vxtxq_cmd_ring.vxtxr_dma.dma_paddr;
1700 1.1 ryo txs->cmd_ring_len = txq->vxtxq_cmd_ring.vxtxr_ndesc;
1701 1.1 ryo txs->comp_ring = txq->vxtxq_comp_ring.vxcr_dma.dma_paddr;
1702 1.1 ryo txs->comp_ring_len = txq->vxtxq_comp_ring.vxcr_ndesc;
1703 1.1 ryo txs->driver_data = vtophys(txq);
1704 1.1 ryo txs->driver_data_len = sizeof(struct vmxnet3_txqueue);
1705 1.1 ryo }
1706 1.1 ryo
1707 1.1 ryo /* Rx queues */
1708 1.1 ryo for (i = 0; i < sc->vmx_nrxqueues; i++) {
1709 1.1 ryo rxq = &sc->vmx_queue[i].vxq_rxqueue;
1710 1.1 ryo rxs = rxq->vxrxq_rs;
1711 1.1 ryo
1712 1.1 ryo rxs->cmd_ring[0] = rxq->vxrxq_cmd_ring[0].vxrxr_dma.dma_paddr;
1713 1.1 ryo rxs->cmd_ring_len[0] = rxq->vxrxq_cmd_ring[0].vxrxr_ndesc;
1714 1.1 ryo rxs->cmd_ring[1] = rxq->vxrxq_cmd_ring[1].vxrxr_dma.dma_paddr;
1715 1.1 ryo rxs->cmd_ring_len[1] = rxq->vxrxq_cmd_ring[1].vxrxr_ndesc;
1716 1.1 ryo rxs->comp_ring = rxq->vxrxq_comp_ring.vxcr_dma.dma_paddr;
1717 1.1 ryo rxs->comp_ring_len = rxq->vxrxq_comp_ring.vxcr_ndesc;
1718 1.1 ryo rxs->driver_data = vtophys(rxq);
1719 1.1 ryo rxs->driver_data_len = sizeof(struct vmxnet3_rxqueue);
1720 1.1 ryo }
1721 1.1 ryo }
1722 1.1 ryo
1723 1.1 ryo static void
1724 1.1 ryo vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *sc)
1725 1.1 ryo {
1726 1.1 ryo /*
1727 1.1 ryo * Use the same key as the Linux driver until FreeBSD can do
1728 1.1 ryo * RSS (presumably Toeplitz) in software.
1729 1.1 ryo */
1730 1.1 ryo static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = {
1731 1.1 ryo 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
1732 1.1 ryo 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
1733 1.1 ryo 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
1734 1.1 ryo 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
1735 1.1 ryo 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
1736 1.1 ryo };
1737 1.1 ryo
1738 1.1 ryo struct vmxnet3_rss_shared *rss;
1739 1.1 ryo int i;
1740 1.1 ryo
1741 1.1 ryo rss = sc->vmx_rss;
1742 1.1 ryo
1743 1.1 ryo rss->hash_type =
1744 1.1 ryo UPT1_RSS_HASH_TYPE_IPV4 | UPT1_RSS_HASH_TYPE_TCP_IPV4 |
1745 1.1 ryo UPT1_RSS_HASH_TYPE_IPV6 | UPT1_RSS_HASH_TYPE_TCP_IPV6;
1746 1.1 ryo rss->hash_func = UPT1_RSS_HASH_FUNC_TOEPLITZ;
1747 1.1 ryo rss->hash_key_size = UPT1_RSS_MAX_KEY_SIZE;
1748 1.1 ryo rss->ind_table_size = UPT1_RSS_MAX_IND_TABLE_SIZE;
1749 1.1 ryo memcpy(rss->hash_key, rss_key, UPT1_RSS_MAX_KEY_SIZE);
1750 1.1 ryo
1751 1.1 ryo for (i = 0; i < UPT1_RSS_MAX_IND_TABLE_SIZE; i++)
1752 1.1 ryo rss->ind_table[i] = i % sc->vmx_nrxqueues;
1753 1.1 ryo }
1754 1.1 ryo
1755 1.1 ryo static void
1756 1.1 ryo vmxnet3_reinit_shared_data(struct vmxnet3_softc *sc)
1757 1.1 ryo {
1758 1.1 ryo struct ifnet *ifp;
1759 1.1 ryo struct vmxnet3_driver_shared *ds;
1760 1.1 ryo
1761 1.1 ryo ifp = &sc->vmx_ethercom.ec_if;
1762 1.1 ryo ds = sc->vmx_ds;
1763 1.1 ryo
1764 1.1 ryo ds->mtu = ifp->if_mtu;
1765 1.1 ryo ds->ntxqueue = sc->vmx_ntxqueues;
1766 1.1 ryo ds->nrxqueue = sc->vmx_nrxqueues;
1767 1.1 ryo
1768 1.1 ryo ds->upt_features = 0;
1769 1.1 ryo if (ifp->if_capenable &
1770 1.1 ryo (IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
1771 1.1 ryo IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
1772 1.1 ryo ds->upt_features |= UPT1_F_CSUM;
1773 1.1 ryo if (sc->vmx_ethercom.ec_capenable & ETHERCAP_VLAN_HWTAGGING)
1774 1.1 ryo ds->upt_features |= UPT1_F_VLAN;
1775 1.1 ryo
1776 1.1 ryo if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1777 1.1 ryo ds->upt_features |= UPT1_F_RSS;
1778 1.1 ryo vmxnet3_reinit_rss_shared_data(sc);
1779 1.1 ryo }
1780 1.1 ryo
1781 1.1 ryo vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSL, sc->vmx_ds_dma.dma_paddr);
1782 1.1 ryo vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSH,
1783 1.1 ryo (uint64_t) sc->vmx_ds_dma.dma_paddr >> 32);
1784 1.1 ryo }
1785 1.1 ryo
1786 1.1 ryo static int
1787 1.1 ryo vmxnet3_alloc_data(struct vmxnet3_softc *sc)
1788 1.1 ryo {
1789 1.1 ryo int error;
1790 1.1 ryo
1791 1.1 ryo error = vmxnet3_alloc_shared_data(sc);
1792 1.1 ryo if (error)
1793 1.1 ryo return (error);
1794 1.1 ryo
1795 1.1 ryo error = vmxnet3_alloc_queue_data(sc);
1796 1.1 ryo if (error)
1797 1.1 ryo return (error);
1798 1.1 ryo
1799 1.1 ryo error = vmxnet3_alloc_mcast_table(sc);
1800 1.1 ryo if (error)
1801 1.1 ryo return (error);
1802 1.1 ryo
1803 1.1 ryo vmxnet3_init_shared_data(sc);
1804 1.1 ryo
1805 1.1 ryo return (0);
1806 1.1 ryo }
1807 1.1 ryo
1808 1.1 ryo static void
1809 1.1 ryo vmxnet3_free_data(struct vmxnet3_softc *sc)
1810 1.1 ryo {
1811 1.1 ryo
1812 1.1 ryo vmxnet3_free_mcast_table(sc);
1813 1.1 ryo vmxnet3_free_queue_data(sc);
1814 1.1 ryo vmxnet3_free_shared_data(sc);
1815 1.1 ryo }
1816 1.1 ryo
1817 1.1 ryo static int
1818 1.1 ryo vmxnet3_setup_interface(struct vmxnet3_softc *sc)
1819 1.1 ryo {
1820 1.1 ryo struct ifnet *ifp = &sc->vmx_ethercom.ec_if;
1821 1.1 ryo
1822 1.1 ryo vmxnet3_get_lladdr(sc);
1823 1.1 ryo aprint_normal_dev(sc->vmx_dev, "Ethernet address %s\n",
1824 1.1 ryo ether_sprintf(sc->vmx_lladdr));
1825 1.1 ryo vmxnet3_set_lladdr(sc);
1826 1.1 ryo
1827 1.1 ryo strlcpy(ifp->if_xname, device_xname(sc->vmx_dev), IFNAMSIZ);
1828 1.1 ryo ifp->if_softc = sc;
1829 1.1 ryo ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
1830 1.1 ryo ifp->if_extflags = IFEF_MPSAFE;
1831 1.1 ryo ifp->if_ioctl = vmxnet3_ioctl;
1832 1.1 ryo ifp->if_start = vmxnet3_start;
1833 1.1 ryo ifp->if_transmit = vmxnet3_transmit;
1834 1.1 ryo ifp->if_watchdog = NULL;
1835 1.1 ryo ifp->if_init = vmxnet3_init;
1836 1.1 ryo ifp->if_stop = vmxnet3_stop;
1837 1.1 ryo sc->vmx_ethercom.ec_if.if_capabilities |=IFCAP_CSUM_IPv4_Rx |
1838 1.1 ryo IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1839 1.1 ryo IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1840 1.1 ryo IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx |
1841 1.1 ryo IFCAP_CSUM_UDPv6_Tx | IFCAP_CSUM_UDPv6_Rx;
1842 1.1 ryo
1843 1.1 ryo ifp->if_capenable = ifp->if_capabilities;
1844 1.1 ryo
1845 1.1 ryo sc->vmx_ethercom.ec_if.if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
1846 1.1 ryo
1847 1.1 ryo sc->vmx_ethercom.ec_capabilities |=
1848 1.1 ryo ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING | ETHERCAP_JUMBO_MTU;
1849 1.1 ryo sc->vmx_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
1850 1.1 ryo
1851 1.1 ryo IFQ_SET_MAXLEN(&ifp->if_snd, sc->vmx_ntxdescs);
1852 1.1 ryo IFQ_SET_READY(&ifp->if_snd);
1853 1.1 ryo
1854 1.1 ryo /* Initialize ifmedia structures. */
1855 1.1 ryo sc->vmx_ethercom.ec_ifmedia = &sc->vmx_media;
1856 1.1 ryo ifmedia_init_with_lock(&sc->vmx_media, IFM_IMASK, vmxnet3_ifmedia_change,
1857 1.1 ryo vmxnet3_ifmedia_status, sc->vmx_mtx);
1858 1.1 ryo ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1859 1.1 ryo ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL);
1860 1.1 ryo ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_10G_T, 0, NULL);
1861 1.1 ryo ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1862 1.1 ryo ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_1000_T, 0, NULL);
1863 1.1 ryo ifmedia_set(&sc->vmx_media, IFM_ETHER | IFM_AUTO);
1864 1.1 ryo
1865 1.1 ryo if_attach(ifp);
1866 1.1 ryo if_deferred_start_init(ifp, NULL);
1867 1.1 ryo ether_ifattach(ifp, sc->vmx_lladdr);
1868 1.1 ryo ether_set_ifflags_cb(&sc->vmx_ethercom, vmxnet3_ifflags_cb);
1869 1.1 ryo vmxnet3_cmd_link_status(ifp);
1870 1.1 ryo
1871 1.1 ryo /* should set before setting interrupts */
1872 1.1 ryo sc->vmx_rx_intr_process_limit = VMXNET3_RX_INTR_PROCESS_LIMIT;
1873 1.1 ryo sc->vmx_rx_process_limit = VMXNET3_RX_PROCESS_LIMIT;
1874 1.1 ryo sc->vmx_tx_intr_process_limit = VMXNET3_TX_INTR_PROCESS_LIMIT;
1875 1.1 ryo sc->vmx_tx_process_limit = VMXNET3_TX_PROCESS_LIMIT;
1876 1.1 ryo
1877 1.1 ryo return (0);
1878 1.1 ryo }
1879 1.1 ryo
1880 1.1 ryo static int
1881 1.1 ryo vmxnet3_setup_sysctl(struct vmxnet3_softc *sc)
1882 1.1 ryo {
1883 1.1 ryo const char *devname;
1884 1.1 ryo struct sysctllog **log;
1885 1.1 ryo const struct sysctlnode *rnode, *rxnode, *txnode;
1886 1.1 ryo int error;
1887 1.1 ryo
1888 1.1 ryo log = &sc->vmx_sysctllog;
1889 1.1 ryo devname = device_xname(sc->vmx_dev);
1890 1.1 ryo
1891 1.1 ryo error = sysctl_createv(log, 0, NULL, &rnode,
1892 1.1 ryo 0, CTLTYPE_NODE, devname,
1893 1.1 ryo SYSCTL_DESCR("vmxnet3 information and settings"),
1894 1.1 ryo NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
1895 1.1 ryo if (error)
1896 1.1 ryo goto out;
1897 1.1 ryo error = sysctl_createv(log, 0, &rnode, NULL,
1898 1.1 ryo CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
1899 1.1 ryo SYSCTL_DESCR("Use workqueue for packet processing"),
1900 1.1 ryo NULL, 0, &sc->vmx_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
1901 1.1 ryo if (error)
1902 1.1 ryo goto out;
1903 1.1 ryo
1904 1.1 ryo error = sysctl_createv(log, 0, &rnode, &rxnode,
1905 1.1 ryo 0, CTLTYPE_NODE, "rx",
1906 1.1 ryo SYSCTL_DESCR("vmxnet3 information and settings for Rx"),
1907 1.1 ryo NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
1908 1.1 ryo if (error)
1909 1.1 ryo goto out;
1910 1.1 ryo error = sysctl_createv(log, 0, &rxnode, NULL,
1911 1.1 ryo CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
1912 1.1 ryo SYSCTL_DESCR("max number of Rx packets to process for interrupt processing"),
1913 1.1 ryo NULL, 0, &sc->vmx_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
1914 1.1 ryo if (error)
1915 1.1 ryo goto out;
1916 1.1 ryo error = sysctl_createv(log, 0, &rxnode, NULL,
1917 1.1 ryo CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
1918 1.1 ryo SYSCTL_DESCR("max number of Rx packets to process for deferred processing"),
1919 1.1 ryo NULL, 0, &sc->vmx_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
1920 1.1 ryo if (error)
1921 1.1 ryo goto out;
1922 1.1 ryo
1923 1.1 ryo error = sysctl_createv(log, 0, &rnode, &txnode,
1924 1.1 ryo 0, CTLTYPE_NODE, "tx",
1925 1.1 ryo SYSCTL_DESCR("vmxnet3 information and settings for Tx"),
1926 1.1 ryo NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
1927 1.1 ryo if (error)
1928 1.1 ryo goto out;
1929 1.1 ryo error = sysctl_createv(log, 0, &txnode, NULL,
1930 1.1 ryo CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
1931 1.1 ryo SYSCTL_DESCR("max number of Tx packets to process for interrupt processing"),
1932 1.1 ryo NULL, 0, &sc->vmx_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
1933 1.1 ryo if (error)
1934 1.1 ryo goto out;
1935 1.1 ryo error = sysctl_createv(log, 0, &txnode, NULL,
1936 1.1 ryo CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
1937 1.1 ryo SYSCTL_DESCR("max number of Tx packets to process for deferred processing"),
1938 1.1 ryo NULL, 0, &sc->vmx_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
1939 1.1 ryo
1940 1.1 ryo out:
1941 1.1 ryo if (error) {
1942 1.1 ryo aprint_error_dev(sc->vmx_dev,
1943 1.1 ryo "unable to create sysctl node\n");
1944 1.1 ryo sysctl_teardown(log);
1945 1.1 ryo }
1946 1.1 ryo return error;
1947 1.1 ryo }
1948 1.1 ryo
1949 1.1 ryo static int
1950 1.1 ryo vmxnet3_setup_stats(struct vmxnet3_softc *sc)
1951 1.1 ryo {
1952 1.1 ryo struct vmxnet3_queue *vmxq;
1953 1.1 ryo struct vmxnet3_txqueue *txq;
1954 1.1 ryo struct vmxnet3_rxqueue *rxq;
1955 1.1 ryo int i;
1956 1.1 ryo
1957 1.1 ryo for (i = 0; i < sc->vmx_ntxqueues; i++) {
1958 1.1 ryo vmxq = &sc->vmx_queue[i];
1959 1.1 ryo txq = &vmxq->vxq_txqueue;
1960 1.1 ryo evcnt_attach_dynamic(&txq->vxtxq_intr, EVCNT_TYPE_INTR,
1961 1.1 ryo NULL, txq->vxtxq_name, "Interrupt on queue");
1962 1.1 ryo evcnt_attach_dynamic(&txq->vxtxq_defer, EVCNT_TYPE_MISC,
1963 1.1 ryo NULL, txq->vxtxq_name, "Handled queue in softint/workqueue");
1964 1.1 ryo evcnt_attach_dynamic(&txq->vxtxq_deferreq, EVCNT_TYPE_MISC,
1965 1.1 ryo NULL, txq->vxtxq_name, "Requested in softint/workqueue");
1966 1.1 ryo evcnt_attach_dynamic(&txq->vxtxq_pcqdrop, EVCNT_TYPE_MISC,
1967 1.1 ryo NULL, txq->vxtxq_name, "Dropped in pcq");
1968 1.1 ryo evcnt_attach_dynamic(&txq->vxtxq_transmitdef, EVCNT_TYPE_MISC,
1969 1.1 ryo NULL, txq->vxtxq_name, "Deferred transmit");
1970 1.1 ryo evcnt_attach_dynamic(&txq->vxtxq_watchdogto, EVCNT_TYPE_MISC,
1971 1.1 ryo NULL, txq->vxtxq_name, "Watchdog timeout");
1972 1.1 ryo evcnt_attach_dynamic(&txq->vxtxq_defragged, EVCNT_TYPE_MISC,
1973 1.1 ryo NULL, txq->vxtxq_name, "m_defrag successed");
1974 1.1 ryo evcnt_attach_dynamic(&txq->vxtxq_defrag_failed, EVCNT_TYPE_MISC,
1975 1.1 ryo NULL, txq->vxtxq_name, "m_defrag failed");
1976 1.1 ryo }
1977 1.1 ryo
1978 1.1 ryo for (i = 0; i < sc->vmx_nrxqueues; i++) {
1979 1.1 ryo vmxq = &sc->vmx_queue[i];
1980 1.1 ryo rxq = &vmxq->vxq_rxqueue;
1981 1.1 ryo evcnt_attach_dynamic(&rxq->vxrxq_intr, EVCNT_TYPE_INTR,
1982 1.1 ryo NULL, rxq->vxrxq_name, "Interrupt on queue");
1983 1.1 ryo evcnt_attach_dynamic(&rxq->vxrxq_defer, EVCNT_TYPE_MISC,
1984 1.1 ryo NULL, rxq->vxrxq_name, "Handled queue in softint/workqueue");
1985 1.1 ryo evcnt_attach_dynamic(&rxq->vxrxq_deferreq, EVCNT_TYPE_MISC,
1986 1.1 ryo NULL, rxq->vxrxq_name, "Requested in softint/workqueue");
1987 1.1 ryo evcnt_attach_dynamic(&rxq->vxrxq_mgetcl_failed, EVCNT_TYPE_MISC,
1988 1.1 ryo NULL, rxq->vxrxq_name, "MCLGET failed");
1989 1.1 ryo evcnt_attach_dynamic(&rxq->vxrxq_mbuf_load_failed, EVCNT_TYPE_MISC,
1990 1.1 ryo NULL, rxq->vxrxq_name, "bus_dmamap_load_mbuf failed");
1991 1.1 ryo }
1992 1.1 ryo
1993 1.1 ryo evcnt_attach_dynamic(&sc->vmx_event_intr, EVCNT_TYPE_INTR,
1994 1.1 ryo NULL, device_xname(sc->vmx_dev), "Interrupt for other events");
1995 1.1 ryo evcnt_attach_dynamic(&sc->vmx_event_link, EVCNT_TYPE_MISC,
1996 1.1 ryo NULL, device_xname(sc->vmx_dev), "Link status event");
1997 1.1 ryo evcnt_attach_dynamic(&sc->vmx_event_txqerror, EVCNT_TYPE_MISC,
1998 1.1 ryo NULL, device_xname(sc->vmx_dev), "Tx queue error event");
1999 1.1 ryo evcnt_attach_dynamic(&sc->vmx_event_rxqerror, EVCNT_TYPE_MISC,
2000 1.1 ryo NULL, device_xname(sc->vmx_dev), "Rx queue error event");
2001 1.1 ryo evcnt_attach_dynamic(&sc->vmx_event_dic, EVCNT_TYPE_MISC,
2002 1.1 ryo NULL, device_xname(sc->vmx_dev), "Device impl change event");
2003 1.1 ryo evcnt_attach_dynamic(&sc->vmx_event_debug, EVCNT_TYPE_MISC,
2004 1.1 ryo NULL, device_xname(sc->vmx_dev), "Debug event");
2005 1.1 ryo
2006 1.1 ryo return 0;
2007 1.1 ryo }
2008 1.1 ryo
2009 1.1 ryo static void
2010 1.1 ryo vmxnet3_teardown_stats(struct vmxnet3_softc *sc)
2011 1.1 ryo {
2012 1.1 ryo struct vmxnet3_queue *vmxq;
2013 1.1 ryo struct vmxnet3_txqueue *txq;
2014 1.1 ryo struct vmxnet3_rxqueue *rxq;
2015 1.1 ryo int i;
2016 1.1 ryo
2017 1.1 ryo for (i = 0; i < sc->vmx_ntxqueues; i++) {
2018 1.1 ryo vmxq = &sc->vmx_queue[i];
2019 1.1 ryo txq = &vmxq->vxq_txqueue;
2020 1.1 ryo evcnt_detach(&txq->vxtxq_intr);
2021 1.1 ryo evcnt_detach(&txq->vxtxq_defer);
2022 1.1 ryo evcnt_detach(&txq->vxtxq_deferreq);
2023 1.1 ryo evcnt_detach(&txq->vxtxq_pcqdrop);
2024 1.1 ryo evcnt_detach(&txq->vxtxq_transmitdef);
2025 1.1 ryo evcnt_detach(&txq->vxtxq_watchdogto);
2026 1.1 ryo evcnt_detach(&txq->vxtxq_defragged);
2027 1.1 ryo evcnt_detach(&txq->vxtxq_defrag_failed);
2028 1.1 ryo }
2029 1.1 ryo
2030 1.1 ryo for (i = 0; i < sc->vmx_nrxqueues; i++) {
2031 1.1 ryo vmxq = &sc->vmx_queue[i];
2032 1.1 ryo rxq = &vmxq->vxq_rxqueue;
2033 1.1 ryo evcnt_detach(&rxq->vxrxq_intr);
2034 1.1 ryo evcnt_detach(&rxq->vxrxq_defer);
2035 1.1 ryo evcnt_detach(&rxq->vxrxq_deferreq);
2036 1.1 ryo evcnt_detach(&rxq->vxrxq_mgetcl_failed);
2037 1.1 ryo evcnt_detach(&rxq->vxrxq_mbuf_load_failed);
2038 1.1 ryo }
2039 1.1 ryo
2040 1.1 ryo evcnt_detach(&sc->vmx_event_intr);
2041 1.1 ryo evcnt_detach(&sc->vmx_event_link);
2042 1.1 ryo evcnt_detach(&sc->vmx_event_txqerror);
2043 1.1 ryo evcnt_detach(&sc->vmx_event_rxqerror);
2044 1.1 ryo evcnt_detach(&sc->vmx_event_dic);
2045 1.1 ryo evcnt_detach(&sc->vmx_event_debug);
2046 1.1 ryo }
2047 1.1 ryo
2048 1.1 ryo static void
2049 1.1 ryo vmxnet3_evintr(struct vmxnet3_softc *sc)
2050 1.1 ryo {
2051 1.1 ryo device_t dev;
2052 1.1 ryo struct vmxnet3_txq_shared *ts;
2053 1.1 ryo struct vmxnet3_rxq_shared *rs;
2054 1.1 ryo uint32_t event;
2055 1.1 ryo int reset;
2056 1.1 ryo
2057 1.1 ryo dev = sc->vmx_dev;
2058 1.1 ryo reset = 0;
2059 1.1 ryo
2060 1.1 ryo VMXNET3_CORE_LOCK(sc);
2061 1.1 ryo
2062 1.1 ryo /* Clear events. */
2063 1.1 ryo event = sc->vmx_ds->event;
2064 1.1 ryo vmxnet3_write_bar1(sc, VMXNET3_BAR1_EVENT, event);
2065 1.1 ryo
2066 1.1 ryo if (event & VMXNET3_EVENT_LINK) {
2067 1.1 ryo sc->vmx_event_link.ev_count++;
2068 1.1 ryo vmxnet3_if_link_status(sc);
2069 1.1 ryo if (sc->vmx_link_active != 0)
2070 1.1 ryo if_schedule_deferred_start(&sc->vmx_ethercom.ec_if);
2071 1.1 ryo }
2072 1.1 ryo
2073 1.1 ryo if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) {
2074 1.1 ryo if (event & VMXNET3_EVENT_TQERROR)
2075 1.1 ryo sc->vmx_event_txqerror.ev_count++;
2076 1.1 ryo if (event & VMXNET3_EVENT_RQERROR)
2077 1.1 ryo sc->vmx_event_rxqerror.ev_count++;
2078 1.1 ryo
2079 1.1 ryo reset = 1;
2080 1.1 ryo vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_STATUS);
2081 1.1 ryo ts = sc->vmx_queue[0].vxq_txqueue.vxtxq_ts;
2082 1.1 ryo if (ts->stopped != 0)
2083 1.1 ryo device_printf(dev, "Tx queue error %#x\n", ts->error);
2084 1.1 ryo rs = sc->vmx_queue[0].vxq_rxqueue.vxrxq_rs;
2085 1.1 ryo if (rs->stopped != 0)
2086 1.1 ryo device_printf(dev, "Rx queue error %#x\n", rs->error);
2087 1.1 ryo device_printf(dev, "Rx/Tx queue error event ... resetting\n");
2088 1.1 ryo }
2089 1.1 ryo
2090 1.1 ryo if (event & VMXNET3_EVENT_DIC) {
2091 1.1 ryo sc->vmx_event_dic.ev_count++;
2092 1.1 ryo device_printf(dev, "device implementation change event\n");
2093 1.1 ryo }
2094 1.1 ryo if (event & VMXNET3_EVENT_DEBUG) {
2095 1.1 ryo sc->vmx_event_debug.ev_count++;
2096 1.1 ryo device_printf(dev, "debug event\n");
2097 1.1 ryo }
2098 1.1 ryo
2099 1.1 ryo if (reset != 0)
2100 1.1 ryo vmxnet3_init_locked(sc);
2101 1.1 ryo
2102 1.1 ryo VMXNET3_CORE_UNLOCK(sc);
2103 1.1 ryo }
2104 1.1 ryo
2105 1.1 ryo static bool
2106 1.1 ryo vmxnet3_txq_eof(struct vmxnet3_txqueue *txq, u_int limit)
2107 1.1 ryo {
2108 1.1 ryo struct vmxnet3_softc *sc;
2109 1.1 ryo struct vmxnet3_txring *txr;
2110 1.1 ryo struct vmxnet3_comp_ring *txc;
2111 1.1 ryo struct vmxnet3_txcompdesc *txcd;
2112 1.1 ryo struct vmxnet3_txbuf *txb;
2113 1.1 ryo struct ifnet *ifp;
2114 1.1 ryo struct mbuf *m;
2115 1.1 ryo u_int sop;
2116 1.1 ryo bool more = false;
2117 1.1 ryo
2118 1.1 ryo sc = txq->vxtxq_sc;
2119 1.1 ryo txr = &txq->vxtxq_cmd_ring;
2120 1.1 ryo txc = &txq->vxtxq_comp_ring;
2121 1.1 ryo ifp = &sc->vmx_ethercom.ec_if;
2122 1.1 ryo
2123 1.1 ryo VMXNET3_TXQ_LOCK_ASSERT(txq);
2124 1.1 ryo
2125 1.1 ryo net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2126 1.1 ryo for (;;) {
2127 1.1 ryo if (limit-- == 0) {
2128 1.1 ryo more = true;
2129 1.1 ryo break;
2130 1.1 ryo }
2131 1.1 ryo
2132 1.1 ryo txcd = &txc->vxcr_u.txcd[txc->vxcr_next];
2133 1.1 ryo if (txcd->gen != txc->vxcr_gen)
2134 1.1 ryo break;
2135 1.1 ryo vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
2136 1.1 ryo
2137 1.1 ryo if (++txc->vxcr_next == txc->vxcr_ndesc) {
2138 1.1 ryo txc->vxcr_next = 0;
2139 1.1 ryo txc->vxcr_gen ^= 1;
2140 1.1 ryo }
2141 1.1 ryo
2142 1.1 ryo sop = txr->vxtxr_next;
2143 1.1 ryo txb = &txr->vxtxr_txbuf[sop];
2144 1.1 ryo
2145 1.1 ryo if ((m = txb->vtxb_m) != NULL) {
2146 1.1 ryo bus_dmamap_sync(sc->vmx_dmat, txb->vtxb_dmamap,
2147 1.1 ryo 0, txb->vtxb_dmamap->dm_mapsize,
2148 1.1 ryo BUS_DMASYNC_POSTWRITE);
2149 1.1 ryo bus_dmamap_unload(sc->vmx_dmat, txb->vtxb_dmamap);
2150 1.1 ryo
2151 1.1 ryo if_statinc_ref(nsr, if_opackets);
2152 1.1 ryo if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
2153 1.1 ryo if (m->m_flags & M_MCAST)
2154 1.1 ryo if_statinc_ref(nsr, if_omcasts);
2155 1.1 ryo
2156 1.1 ryo m_freem(m);
2157 1.1 ryo txb->vtxb_m = NULL;
2158 1.1 ryo }
2159 1.1 ryo
2160 1.1 ryo txr->vxtxr_next = (txcd->eop_idx + 1) % txr->vxtxr_ndesc;
2161 1.1 ryo }
2162 1.1 ryo IF_STAT_PUTREF(ifp);
2163 1.1 ryo
2164 1.1 ryo if (txr->vxtxr_head == txr->vxtxr_next)
2165 1.1 ryo txq->vxtxq_watchdog = 0;
2166 1.1 ryo
2167 1.1 ryo return more;
2168 1.1 ryo }
2169 1.1 ryo
2170 1.1 ryo static int
2171 1.1 ryo vmxnet3_newbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq,
2172 1.1 ryo struct vmxnet3_rxring *rxr)
2173 1.1 ryo {
2174 1.1 ryo struct mbuf *m;
2175 1.1 ryo struct vmxnet3_rxdesc *rxd;
2176 1.1 ryo struct vmxnet3_rxbuf *rxb;
2177 1.1 ryo bus_dma_tag_t tag;
2178 1.1 ryo bus_dmamap_t dmap;
2179 1.1 ryo int idx, btype, error;
2180 1.1 ryo
2181 1.1 ryo tag = sc->vmx_dmat;
2182 1.1 ryo dmap = rxr->vxrxr_spare_dmap;
2183 1.1 ryo idx = rxr->vxrxr_fill;
2184 1.1 ryo rxd = &rxr->vxrxr_rxd[idx];
2185 1.1 ryo rxb = &rxr->vxrxr_rxbuf[idx];
2186 1.1 ryo
2187 1.1 ryo /* Don't allocate buffers for ring 2 for now. */
2188 1.1 ryo if (rxr->vxrxr_rid != 0)
2189 1.1 ryo return -1;
2190 1.1 ryo btype = VMXNET3_BTYPE_HEAD;
2191 1.1 ryo
2192 1.1 ryo MGETHDR(m, M_DONTWAIT, MT_DATA);
2193 1.1 ryo if (m == NULL)
2194 1.1 ryo return (ENOBUFS);
2195 1.1 ryo
2196 1.1 ryo MCLGET(m, M_DONTWAIT);
2197 1.1 ryo if ((m->m_flags & M_EXT) == 0) {
2198 1.1 ryo rxq->vxrxq_mgetcl_failed.ev_count++;
2199 1.1 ryo m_freem(m);
2200 1.1 ryo return (ENOBUFS);
2201 1.1 ryo }
2202 1.1 ryo
2203 1.1 ryo m->m_pkthdr.len = m->m_len = JUMBO_LEN;
2204 1.1 ryo m_adj(m, ETHER_ALIGN);
2205 1.1 ryo
2206 1.1 ryo error = bus_dmamap_load_mbuf(sc->vmx_dmat, dmap, m, BUS_DMA_NOWAIT);
2207 1.1 ryo if (error) {
2208 1.1 ryo m_freem(m);
2209 1.1 ryo rxq->vxrxq_mbuf_load_failed.ev_count++;
2210 1.1 ryo return (error);
2211 1.1 ryo }
2212 1.1 ryo
2213 1.1 ryo if (rxb->vrxb_m != NULL) {
2214 1.1 ryo bus_dmamap_sync(tag, rxb->vrxb_dmamap,
2215 1.1 ryo 0, rxb->vrxb_dmamap->dm_mapsize,
2216 1.1 ryo BUS_DMASYNC_POSTREAD);
2217 1.1 ryo bus_dmamap_unload(tag, rxb->vrxb_dmamap);
2218 1.1 ryo }
2219 1.1 ryo
2220 1.1 ryo rxr->vxrxr_spare_dmap = rxb->vrxb_dmamap;
2221 1.1 ryo rxb->vrxb_dmamap = dmap;
2222 1.1 ryo rxb->vrxb_m = m;
2223 1.1 ryo
2224 1.1 ryo rxd->addr = DMAADDR(dmap);
2225 1.1 ryo rxd->len = m->m_pkthdr.len;
2226 1.1 ryo rxd->btype = btype;
2227 1.1 ryo rxd->gen = rxr->vxrxr_gen;
2228 1.1 ryo
2229 1.1 ryo vmxnet3_rxr_increment_fill(rxr);
2230 1.1 ryo return (0);
2231 1.1 ryo }
2232 1.1 ryo
2233 1.1 ryo static void
2234 1.1 ryo vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *rxq,
2235 1.1 ryo struct vmxnet3_rxring *rxr, int idx)
2236 1.1 ryo {
2237 1.1 ryo struct vmxnet3_rxdesc *rxd;
2238 1.1 ryo
2239 1.1 ryo rxd = &rxr->vxrxr_rxd[idx];
2240 1.1 ryo rxd->gen = rxr->vxrxr_gen;
2241 1.1 ryo vmxnet3_rxr_increment_fill(rxr);
2242 1.1 ryo }
2243 1.1 ryo
2244 1.1 ryo static void
2245 1.1 ryo vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *rxq)
2246 1.1 ryo {
2247 1.1 ryo struct vmxnet3_softc *sc;
2248 1.1 ryo struct vmxnet3_rxring *rxr;
2249 1.1 ryo struct vmxnet3_comp_ring *rxc;
2250 1.1 ryo struct vmxnet3_rxcompdesc *rxcd;
2251 1.1 ryo int idx, eof;
2252 1.1 ryo
2253 1.1 ryo sc = rxq->vxrxq_sc;
2254 1.1 ryo rxc = &rxq->vxrxq_comp_ring;
2255 1.1 ryo
2256 1.1 ryo do {
2257 1.1 ryo rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
2258 1.1 ryo if (rxcd->gen != rxc->vxcr_gen)
2259 1.1 ryo break; /* Not expected. */
2260 1.1 ryo vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
2261 1.1 ryo
2262 1.1 ryo if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
2263 1.1 ryo rxc->vxcr_next = 0;
2264 1.1 ryo rxc->vxcr_gen ^= 1;
2265 1.1 ryo }
2266 1.1 ryo
2267 1.1 ryo idx = rxcd->rxd_idx;
2268 1.1 ryo eof = rxcd->eop;
2269 1.1 ryo if (rxcd->qid < sc->vmx_nrxqueues)
2270 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[0];
2271 1.1 ryo else
2272 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[1];
2273 1.1 ryo vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2274 1.1 ryo } while (!eof);
2275 1.1 ryo }
2276 1.1 ryo
2277 1.1 ryo static void
2278 1.1 ryo vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
2279 1.1 ryo {
2280 1.1 ryo if (rxcd->no_csum)
2281 1.1 ryo return;
2282 1.1 ryo
2283 1.1 ryo if (rxcd->ipv4) {
2284 1.1 ryo m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2285 1.1 ryo if (rxcd->ipcsum_ok == 0)
2286 1.1 ryo m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
2287 1.1 ryo }
2288 1.1 ryo
2289 1.1 ryo if (rxcd->fragment)
2290 1.1 ryo return;
2291 1.1 ryo
2292 1.1 ryo if (rxcd->tcp) {
2293 1.1 ryo m->m_pkthdr.csum_flags |=
2294 1.1 ryo rxcd->ipv4 ? M_CSUM_TCPv4 : M_CSUM_TCPv6;
2295 1.1 ryo if ((rxcd->csum_ok) == 0)
2296 1.1 ryo m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
2297 1.1 ryo }
2298 1.1 ryo
2299 1.1 ryo if (rxcd->udp) {
2300 1.1 ryo m->m_pkthdr.csum_flags |=
2301 1.1 ryo rxcd->ipv4 ? M_CSUM_UDPv4 : M_CSUM_UDPv6 ;
2302 1.1 ryo if ((rxcd->csum_ok) == 0)
2303 1.1 ryo m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
2304 1.1 ryo }
2305 1.1 ryo }
2306 1.1 ryo
2307 1.1 ryo static void
2308 1.1 ryo vmxnet3_rxq_input(struct vmxnet3_rxqueue *rxq,
2309 1.1 ryo struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
2310 1.1 ryo {
2311 1.1 ryo struct vmxnet3_softc *sc;
2312 1.1 ryo struct ifnet *ifp;
2313 1.1 ryo
2314 1.1 ryo sc = rxq->vxrxq_sc;
2315 1.1 ryo ifp = &sc->vmx_ethercom.ec_if;
2316 1.1 ryo
2317 1.1 ryo if (rxcd->error) {
2318 1.1 ryo if_statinc(ifp, if_ierrors);
2319 1.1 ryo m_freem(m);
2320 1.1 ryo return;
2321 1.1 ryo }
2322 1.1 ryo
2323 1.1 ryo if (!rxcd->no_csum)
2324 1.1 ryo vmxnet3_rx_csum(rxcd, m);
2325 1.1 ryo if (rxcd->vlan)
2326 1.1 ryo vlan_set_tag(m, rxcd->vtag);
2327 1.1 ryo
2328 1.1 ryo net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2329 1.1 ryo if_statinc_ref(nsr, if_ipackets);
2330 1.1 ryo if_statadd_ref(nsr, if_ibytes, m->m_pkthdr.len);
2331 1.1 ryo IF_STAT_PUTREF(ifp);
2332 1.1 ryo
2333 1.1 ryo if_percpuq_enqueue(ifp->if_percpuq, m);
2334 1.1 ryo }
2335 1.1 ryo
2336 1.1 ryo static bool
2337 1.1 ryo vmxnet3_rxq_eof(struct vmxnet3_rxqueue *rxq, u_int limit)
2338 1.1 ryo {
2339 1.1 ryo struct vmxnet3_softc *sc;
2340 1.1 ryo struct ifnet *ifp;
2341 1.1 ryo struct vmxnet3_rxring *rxr;
2342 1.1 ryo struct vmxnet3_comp_ring *rxc;
2343 1.1 ryo struct vmxnet3_rxdesc *rxd __diagused;
2344 1.1 ryo struct vmxnet3_rxcompdesc *rxcd;
2345 1.1 ryo struct mbuf *m, *m_head, *m_tail;
2346 1.2 ryo u_int idx, length;
2347 1.1 ryo bool more = false;
2348 1.1 ryo
2349 1.1 ryo sc = rxq->vxrxq_sc;
2350 1.1 ryo ifp = &sc->vmx_ethercom.ec_if;
2351 1.1 ryo rxc = &rxq->vxrxq_comp_ring;
2352 1.1 ryo
2353 1.1 ryo VMXNET3_RXQ_LOCK_ASSERT(rxq);
2354 1.1 ryo
2355 1.1 ryo if ((ifp->if_flags & IFF_RUNNING) == 0)
2356 1.1 ryo return more;
2357 1.1 ryo
2358 1.1 ryo m_head = rxq->vxrxq_mhead;
2359 1.1 ryo rxq->vxrxq_mhead = NULL;
2360 1.1 ryo m_tail = rxq->vxrxq_mtail;
2361 1.1 ryo rxq->vxrxq_mtail = NULL;
2362 1.1 ryo KASSERT(m_head == NULL || m_tail != NULL);
2363 1.1 ryo
2364 1.1 ryo for (;;) {
2365 1.1 ryo if (limit-- == 0) {
2366 1.1 ryo more = true;
2367 1.1 ryo break;
2368 1.1 ryo }
2369 1.1 ryo
2370 1.1 ryo rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
2371 1.1 ryo if (rxcd->gen != rxc->vxcr_gen) {
2372 1.1 ryo rxq->vxrxq_mhead = m_head;
2373 1.1 ryo rxq->vxrxq_mtail = m_tail;
2374 1.1 ryo break;
2375 1.1 ryo }
2376 1.1 ryo vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
2377 1.1 ryo
2378 1.1 ryo if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
2379 1.1 ryo rxc->vxcr_next = 0;
2380 1.1 ryo rxc->vxcr_gen ^= 1;
2381 1.1 ryo }
2382 1.1 ryo
2383 1.1 ryo idx = rxcd->rxd_idx;
2384 1.1 ryo length = rxcd->len;
2385 1.1 ryo if (rxcd->qid < sc->vmx_nrxqueues)
2386 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[0];
2387 1.1 ryo else
2388 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[1];
2389 1.1 ryo rxd = &rxr->vxrxr_rxd[idx];
2390 1.1 ryo
2391 1.1 ryo m = rxr->vxrxr_rxbuf[idx].vrxb_m;
2392 1.1 ryo KASSERT(m != NULL);
2393 1.1 ryo
2394 1.1 ryo /*
2395 1.1 ryo * The host may skip descriptors. We detect this when this
2396 1.1 ryo * descriptor does not match the previous fill index. Catch
2397 1.1 ryo * up with the host now.
2398 1.1 ryo */
2399 1.1 ryo if (__predict_false(rxr->vxrxr_fill != idx)) {
2400 1.1 ryo while (rxr->vxrxr_fill != idx) {
2401 1.1 ryo rxr->vxrxr_rxd[rxr->vxrxr_fill].gen =
2402 1.1 ryo rxr->vxrxr_gen;
2403 1.1 ryo vmxnet3_rxr_increment_fill(rxr);
2404 1.1 ryo }
2405 1.1 ryo }
2406 1.1 ryo
2407 1.1 ryo if (rxcd->sop) {
2408 1.1 ryo /* start of frame w/o head buffer */
2409 1.1 ryo KASSERT(rxd->btype == VMXNET3_BTYPE_HEAD);
2410 1.1 ryo /* start of frame not in ring 0 */
2411 1.1 ryo KASSERT(rxr == &rxq->vxrxq_cmd_ring[0]);
2412 1.1 ryo /* duplicate start of frame? */
2413 1.1 ryo KASSERT(m_head == NULL);
2414 1.1 ryo
2415 1.1 ryo if (length == 0) {
2416 1.1 ryo /* Just ignore this descriptor. */
2417 1.1 ryo vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2418 1.1 ryo goto nextp;
2419 1.1 ryo }
2420 1.1 ryo
2421 1.1 ryo if (vmxnet3_newbuf(sc, rxq, rxr) != 0) {
2422 1.1 ryo if_statinc(ifp, if_iqdrops);
2423 1.1 ryo vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2424 1.1 ryo if (!rxcd->eop)
2425 1.1 ryo vmxnet3_rxq_discard_chain(rxq);
2426 1.1 ryo goto nextp;
2427 1.1 ryo }
2428 1.1 ryo
2429 1.1 ryo m_set_rcvif(m, ifp);
2430 1.1 ryo m->m_pkthdr.len = m->m_len = length;
2431 1.1 ryo m->m_pkthdr.csum_flags = 0;
2432 1.1 ryo m_head = m_tail = m;
2433 1.1 ryo
2434 1.1 ryo } else {
2435 1.1 ryo /* non start of frame w/o body buffer */
2436 1.1 ryo KASSERT(rxd->btype == VMXNET3_BTYPE_BODY);
2437 1.1 ryo /* frame not started? */
2438 1.1 ryo KASSERT(m_head != NULL);
2439 1.1 ryo
2440 1.1 ryo if (vmxnet3_newbuf(sc, rxq, rxr) != 0) {
2441 1.1 ryo if_statinc(ifp, if_iqdrops);
2442 1.1 ryo vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2443 1.1 ryo if (!rxcd->eop)
2444 1.1 ryo vmxnet3_rxq_discard_chain(rxq);
2445 1.1 ryo m_freem(m_head);
2446 1.1 ryo m_head = m_tail = NULL;
2447 1.1 ryo goto nextp;
2448 1.1 ryo }
2449 1.1 ryo
2450 1.1 ryo m->m_len = length;
2451 1.1 ryo m_head->m_pkthdr.len += length;
2452 1.1 ryo m_tail->m_next = m;
2453 1.1 ryo m_tail = m;
2454 1.1 ryo }
2455 1.1 ryo
2456 1.1 ryo if (rxcd->eop) {
2457 1.1 ryo vmxnet3_rxq_input(rxq, rxcd, m_head);
2458 1.1 ryo m_head = m_tail = NULL;
2459 1.1 ryo
2460 1.1 ryo /* Must recheck after dropping the Rx lock. */
2461 1.1 ryo if ((ifp->if_flags & IFF_RUNNING) == 0)
2462 1.1 ryo break;
2463 1.1 ryo }
2464 1.1 ryo
2465 1.1 ryo nextp:
2466 1.1 ryo if (__predict_false(rxq->vxrxq_rs->update_rxhead)) {
2467 1.1 ryo int qid = rxcd->qid;
2468 1.1 ryo bus_size_t r;
2469 1.1 ryo
2470 1.1 ryo idx = (idx + 1) % rxr->vxrxr_ndesc;
2471 1.1 ryo if (qid >= sc->vmx_nrxqueues) {
2472 1.1 ryo qid -= sc->vmx_nrxqueues;
2473 1.1 ryo r = VMXNET3_BAR0_RXH2(qid);
2474 1.1 ryo } else
2475 1.1 ryo r = VMXNET3_BAR0_RXH1(qid);
2476 1.1 ryo vmxnet3_write_bar0(sc, r, idx);
2477 1.1 ryo }
2478 1.1 ryo }
2479 1.1 ryo
2480 1.1 ryo return more;
2481 1.1 ryo }
2482 1.1 ryo
2483 1.1 ryo static inline void
2484 1.1 ryo vmxnet3_sched_handle_queue(struct vmxnet3_softc *sc, struct vmxnet3_queue *vmxq)
2485 1.1 ryo {
2486 1.1 ryo
2487 1.1 ryo if (vmxq->vxq_workqueue) {
2488 1.5 knakahar /*
2489 1.5 knakahar * When this function is called, "vmxq" is owned by one CPU.
2490 1.5 knakahar * so, atomic operation is not required here.
2491 1.5 knakahar */
2492 1.5 knakahar if (!vmxq->vxq_wq_enqueued) {
2493 1.5 knakahar vmxq->vxq_wq_enqueued = true;
2494 1.5 knakahar workqueue_enqueue(sc->vmx_queue_wq,
2495 1.5 knakahar &vmxq->vxq_wq_cookie, curcpu());
2496 1.5 knakahar }
2497 1.1 ryo } else {
2498 1.1 ryo softint_schedule(vmxq->vxq_si);
2499 1.1 ryo }
2500 1.1 ryo }
2501 1.1 ryo
2502 1.1 ryo static int
2503 1.1 ryo vmxnet3_legacy_intr(void *xsc)
2504 1.1 ryo {
2505 1.1 ryo struct vmxnet3_softc *sc;
2506 1.8 msaitoh struct vmxnet3_queue *vmxq;
2507 1.8 msaitoh struct vmxnet3_txqueue *txq;
2508 1.1 ryo struct vmxnet3_rxqueue *rxq;
2509 1.1 ryo u_int txlimit, rxlimit;
2510 1.1 ryo bool txmore, rxmore;
2511 1.1 ryo
2512 1.1 ryo sc = xsc;
2513 1.8 msaitoh vmxq = &sc->vmx_queue[0];
2514 1.8 msaitoh txq = &vmxq->vxq_txqueue;
2515 1.8 msaitoh rxq = &vmxq->vxq_rxqueue;
2516 1.1 ryo txlimit = sc->vmx_tx_intr_process_limit;
2517 1.1 ryo rxlimit = sc->vmx_rx_intr_process_limit;
2518 1.1 ryo
2519 1.1 ryo if (sc->vmx_intr_type == VMXNET3_IT_LEGACY) {
2520 1.1 ryo if (vmxnet3_read_bar1(sc, VMXNET3_BAR1_INTR) == 0)
2521 1.1 ryo return (0);
2522 1.1 ryo }
2523 1.1 ryo if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2524 1.1 ryo vmxnet3_disable_all_intrs(sc);
2525 1.1 ryo
2526 1.1 ryo if (sc->vmx_ds->event != 0)
2527 1.1 ryo vmxnet3_evintr(sc);
2528 1.1 ryo
2529 1.9 msaitoh VMXNET3_TXQ_LOCK(txq);
2530 1.9 msaitoh txmore = vmxnet3_txq_eof(txq, txlimit);
2531 1.9 msaitoh VMXNET3_TXQ_UNLOCK(txq);
2532 1.9 msaitoh
2533 1.1 ryo VMXNET3_RXQ_LOCK(rxq);
2534 1.1 ryo rxmore = vmxnet3_rxq_eof(rxq, rxlimit);
2535 1.1 ryo VMXNET3_RXQ_UNLOCK(rxq);
2536 1.1 ryo
2537 1.8 msaitoh if (txmore || rxmore)
2538 1.8 msaitoh vmxnet3_sched_handle_queue(sc, vmxq);
2539 1.8 msaitoh else {
2540 1.1 ryo if_schedule_deferred_start(&sc->vmx_ethercom.ec_if);
2541 1.1 ryo vmxnet3_enable_all_intrs(sc);
2542 1.1 ryo }
2543 1.8 msaitoh
2544 1.1 ryo return (1);
2545 1.1 ryo }
2546 1.1 ryo
2547 1.1 ryo static int
2548 1.1 ryo vmxnet3_txrxq_intr(void *xvmxq)
2549 1.1 ryo {
2550 1.1 ryo struct vmxnet3_softc *sc;
2551 1.1 ryo struct vmxnet3_queue *vmxq;
2552 1.1 ryo struct vmxnet3_txqueue *txq;
2553 1.1 ryo struct vmxnet3_rxqueue *rxq;
2554 1.1 ryo u_int txlimit, rxlimit;
2555 1.1 ryo bool txmore, rxmore;
2556 1.1 ryo
2557 1.1 ryo vmxq = xvmxq;
2558 1.1 ryo txq = &vmxq->vxq_txqueue;
2559 1.1 ryo rxq = &vmxq->vxq_rxqueue;
2560 1.1 ryo sc = txq->vxtxq_sc;
2561 1.1 ryo txlimit = sc->vmx_tx_intr_process_limit;
2562 1.1 ryo rxlimit = sc->vmx_rx_intr_process_limit;
2563 1.1 ryo vmxq->vxq_workqueue = sc->vmx_txrx_workqueue;
2564 1.1 ryo
2565 1.1 ryo if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2566 1.1 ryo vmxnet3_disable_intr(sc, vmxq->vxq_intr_idx);
2567 1.1 ryo
2568 1.1 ryo VMXNET3_TXQ_LOCK(txq);
2569 1.1 ryo txq->vxtxq_intr.ev_count++;
2570 1.1 ryo txmore = vmxnet3_txq_eof(txq, txlimit);
2571 1.1 ryo VMXNET3_TXQ_UNLOCK(txq);
2572 1.1 ryo
2573 1.1 ryo VMXNET3_RXQ_LOCK(rxq);
2574 1.1 ryo rxq->vxrxq_intr.ev_count++;
2575 1.1 ryo rxmore = vmxnet3_rxq_eof(rxq, rxlimit);
2576 1.1 ryo VMXNET3_RXQ_UNLOCK(rxq);
2577 1.1 ryo
2578 1.8 msaitoh if (txmore || rxmore)
2579 1.1 ryo vmxnet3_sched_handle_queue(sc, vmxq);
2580 1.8 msaitoh else {
2581 1.1 ryo /* for ALTQ */
2582 1.1 ryo if (vmxq->vxq_id == 0)
2583 1.1 ryo if_schedule_deferred_start(&sc->vmx_ethercom.ec_if);
2584 1.1 ryo softint_schedule(txq->vxtxq_si);
2585 1.1 ryo
2586 1.1 ryo vmxnet3_enable_intr(sc, vmxq->vxq_intr_idx);
2587 1.1 ryo }
2588 1.1 ryo
2589 1.1 ryo return (1);
2590 1.1 ryo }
2591 1.1 ryo
2592 1.1 ryo static void
2593 1.1 ryo vmxnet3_handle_queue(void *xvmxq)
2594 1.1 ryo {
2595 1.1 ryo struct vmxnet3_softc *sc;
2596 1.1 ryo struct vmxnet3_queue *vmxq;
2597 1.1 ryo struct vmxnet3_txqueue *txq;
2598 1.1 ryo struct vmxnet3_rxqueue *rxq;
2599 1.1 ryo u_int txlimit, rxlimit;
2600 1.1 ryo bool txmore, rxmore;
2601 1.1 ryo
2602 1.1 ryo vmxq = xvmxq;
2603 1.1 ryo txq = &vmxq->vxq_txqueue;
2604 1.1 ryo rxq = &vmxq->vxq_rxqueue;
2605 1.1 ryo sc = txq->vxtxq_sc;
2606 1.1 ryo txlimit = sc->vmx_tx_process_limit;
2607 1.1 ryo rxlimit = sc->vmx_rx_process_limit;
2608 1.1 ryo
2609 1.1 ryo VMXNET3_TXQ_LOCK(txq);
2610 1.1 ryo txq->vxtxq_defer.ev_count++;
2611 1.1 ryo txmore = vmxnet3_txq_eof(txq, txlimit);
2612 1.1 ryo if (txmore)
2613 1.1 ryo txq->vxtxq_deferreq.ev_count++;
2614 1.1 ryo /* for ALTQ */
2615 1.1 ryo if (vmxq->vxq_id == 0)
2616 1.1 ryo if_schedule_deferred_start(&sc->vmx_ethercom.ec_if);
2617 1.1 ryo softint_schedule(txq->vxtxq_si);
2618 1.1 ryo VMXNET3_TXQ_UNLOCK(txq);
2619 1.1 ryo
2620 1.1 ryo VMXNET3_RXQ_LOCK(rxq);
2621 1.1 ryo rxq->vxrxq_defer.ev_count++;
2622 1.1 ryo rxmore = vmxnet3_rxq_eof(rxq, rxlimit);
2623 1.1 ryo if (rxmore)
2624 1.1 ryo rxq->vxrxq_deferreq.ev_count++;
2625 1.1 ryo VMXNET3_RXQ_UNLOCK(rxq);
2626 1.1 ryo
2627 1.1 ryo if (txmore || rxmore)
2628 1.1 ryo vmxnet3_sched_handle_queue(sc, vmxq);
2629 1.1 ryo else
2630 1.1 ryo vmxnet3_enable_intr(sc, vmxq->vxq_intr_idx);
2631 1.1 ryo }
2632 1.1 ryo
2633 1.1 ryo static void
2634 1.1 ryo vmxnet3_handle_queue_work(struct work *wk, void *context)
2635 1.1 ryo {
2636 1.1 ryo struct vmxnet3_queue *vmxq;
2637 1.1 ryo
2638 1.1 ryo vmxq = container_of(wk, struct vmxnet3_queue, vxq_wq_cookie);
2639 1.5 knakahar vmxq->vxq_wq_enqueued = false;
2640 1.1 ryo vmxnet3_handle_queue(vmxq);
2641 1.1 ryo }
2642 1.1 ryo
2643 1.1 ryo static int
2644 1.1 ryo vmxnet3_event_intr(void *xsc)
2645 1.1 ryo {
2646 1.1 ryo struct vmxnet3_softc *sc;
2647 1.1 ryo
2648 1.1 ryo sc = xsc;
2649 1.1 ryo
2650 1.1 ryo if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2651 1.1 ryo vmxnet3_disable_intr(sc, sc->vmx_event_intr_idx);
2652 1.1 ryo
2653 1.1 ryo sc->vmx_event_intr.ev_count++;
2654 1.1 ryo
2655 1.1 ryo if (sc->vmx_ds->event != 0)
2656 1.1 ryo vmxnet3_evintr(sc);
2657 1.1 ryo
2658 1.1 ryo vmxnet3_enable_intr(sc, sc->vmx_event_intr_idx);
2659 1.1 ryo
2660 1.1 ryo return (1);
2661 1.1 ryo }
2662 1.1 ryo
2663 1.1 ryo static void
2664 1.1 ryo vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
2665 1.1 ryo {
2666 1.1 ryo struct vmxnet3_txring *txr;
2667 1.1 ryo struct vmxnet3_txbuf *txb;
2668 1.2 ryo u_int i;
2669 1.1 ryo
2670 1.1 ryo txr = &txq->vxtxq_cmd_ring;
2671 1.1 ryo
2672 1.1 ryo for (i = 0; i < txr->vxtxr_ndesc; i++) {
2673 1.1 ryo txb = &txr->vxtxr_txbuf[i];
2674 1.1 ryo
2675 1.1 ryo if (txb->vtxb_m == NULL)
2676 1.1 ryo continue;
2677 1.1 ryo
2678 1.1 ryo bus_dmamap_sync(sc->vmx_dmat, txb->vtxb_dmamap,
2679 1.1 ryo 0, txb->vtxb_dmamap->dm_mapsize,
2680 1.1 ryo BUS_DMASYNC_POSTWRITE);
2681 1.1 ryo bus_dmamap_unload(sc->vmx_dmat, txb->vtxb_dmamap);
2682 1.1 ryo m_freem(txb->vtxb_m);
2683 1.1 ryo txb->vtxb_m = NULL;
2684 1.1 ryo }
2685 1.1 ryo }
2686 1.1 ryo
2687 1.1 ryo static void
2688 1.1 ryo vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
2689 1.1 ryo {
2690 1.1 ryo struct vmxnet3_rxring *rxr;
2691 1.1 ryo struct vmxnet3_rxbuf *rxb;
2692 1.2 ryo u_int i, j;
2693 1.1 ryo
2694 1.1 ryo if (rxq->vxrxq_mhead != NULL) {
2695 1.1 ryo m_freem(rxq->vxrxq_mhead);
2696 1.1 ryo rxq->vxrxq_mhead = NULL;
2697 1.1 ryo rxq->vxrxq_mtail = NULL;
2698 1.1 ryo }
2699 1.1 ryo
2700 1.1 ryo for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
2701 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i];
2702 1.1 ryo
2703 1.1 ryo for (j = 0; j < rxr->vxrxr_ndesc; j++) {
2704 1.1 ryo rxb = &rxr->vxrxr_rxbuf[j];
2705 1.1 ryo
2706 1.1 ryo if (rxb->vrxb_m == NULL)
2707 1.1 ryo continue;
2708 1.1 ryo
2709 1.1 ryo bus_dmamap_sync(sc->vmx_dmat, rxb->vrxb_dmamap,
2710 1.1 ryo 0, rxb->vrxb_dmamap->dm_mapsize,
2711 1.1 ryo BUS_DMASYNC_POSTREAD);
2712 1.1 ryo bus_dmamap_unload(sc->vmx_dmat, rxb->vrxb_dmamap);
2713 1.1 ryo m_freem(rxb->vrxb_m);
2714 1.1 ryo rxb->vrxb_m = NULL;
2715 1.1 ryo }
2716 1.1 ryo }
2717 1.1 ryo }
2718 1.1 ryo
2719 1.1 ryo static void
2720 1.1 ryo vmxnet3_stop_rendezvous(struct vmxnet3_softc *sc)
2721 1.1 ryo {
2722 1.1 ryo struct vmxnet3_rxqueue *rxq;
2723 1.1 ryo struct vmxnet3_txqueue *txq;
2724 1.7 knakahar struct vmxnet3_queue *vmxq;
2725 1.1 ryo int i;
2726 1.1 ryo
2727 1.1 ryo for (i = 0; i < sc->vmx_nrxqueues; i++) {
2728 1.1 ryo rxq = &sc->vmx_queue[i].vxq_rxqueue;
2729 1.1 ryo VMXNET3_RXQ_LOCK(rxq);
2730 1.1 ryo VMXNET3_RXQ_UNLOCK(rxq);
2731 1.1 ryo }
2732 1.1 ryo for (i = 0; i < sc->vmx_ntxqueues; i++) {
2733 1.1 ryo txq = &sc->vmx_queue[i].vxq_txqueue;
2734 1.1 ryo VMXNET3_TXQ_LOCK(txq);
2735 1.1 ryo VMXNET3_TXQ_UNLOCK(txq);
2736 1.1 ryo }
2737 1.7 knakahar for (i = 0; i < sc->vmx_nrxqueues; i++) {
2738 1.7 knakahar vmxq = &sc->vmx_queue[i];
2739 1.7 knakahar workqueue_wait(sc->vmx_queue_wq, &vmxq->vxq_wq_cookie);
2740 1.7 knakahar }
2741 1.1 ryo }
2742 1.1 ryo
2743 1.1 ryo static void
2744 1.1 ryo vmxnet3_stop_locked(struct vmxnet3_softc *sc)
2745 1.1 ryo {
2746 1.1 ryo struct ifnet *ifp;
2747 1.1 ryo int q;
2748 1.1 ryo
2749 1.1 ryo ifp = &sc->vmx_ethercom.ec_if;
2750 1.1 ryo VMXNET3_CORE_LOCK_ASSERT(sc);
2751 1.1 ryo
2752 1.1 ryo ifp->if_flags &= ~IFF_RUNNING;
2753 1.1 ryo sc->vmx_link_active = 0;
2754 1.1 ryo callout_stop(&sc->vmx_tick);
2755 1.1 ryo
2756 1.1 ryo /* Disable interrupts. */
2757 1.1 ryo vmxnet3_disable_all_intrs(sc);
2758 1.1 ryo vmxnet3_write_cmd(sc, VMXNET3_CMD_DISABLE);
2759 1.1 ryo
2760 1.1 ryo vmxnet3_stop_rendezvous(sc);
2761 1.1 ryo
2762 1.1 ryo for (q = 0; q < sc->vmx_ntxqueues; q++)
2763 1.1 ryo vmxnet3_txstop(sc, &sc->vmx_queue[q].vxq_txqueue);
2764 1.1 ryo for (q = 0; q < sc->vmx_nrxqueues; q++)
2765 1.1 ryo vmxnet3_rxstop(sc, &sc->vmx_queue[q].vxq_rxqueue);
2766 1.1 ryo
2767 1.1 ryo vmxnet3_write_cmd(sc, VMXNET3_CMD_RESET);
2768 1.1 ryo }
2769 1.1 ryo
2770 1.1 ryo static void
2771 1.1 ryo vmxnet3_stop(struct ifnet *ifp, int disable)
2772 1.1 ryo {
2773 1.1 ryo struct vmxnet3_softc *sc = ifp->if_softc;
2774 1.1 ryo
2775 1.1 ryo VMXNET3_CORE_LOCK(sc);
2776 1.1 ryo vmxnet3_stop_locked(sc);
2777 1.1 ryo VMXNET3_CORE_UNLOCK(sc);
2778 1.1 ryo }
2779 1.1 ryo
2780 1.1 ryo static void
2781 1.1 ryo vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
2782 1.1 ryo {
2783 1.1 ryo struct vmxnet3_txring *txr;
2784 1.1 ryo struct vmxnet3_comp_ring *txc;
2785 1.1 ryo
2786 1.1 ryo txr = &txq->vxtxq_cmd_ring;
2787 1.1 ryo txr->vxtxr_head = 0;
2788 1.1 ryo txr->vxtxr_next = 0;
2789 1.1 ryo txr->vxtxr_gen = VMXNET3_INIT_GEN;
2790 1.1 ryo memset(txr->vxtxr_txd, 0,
2791 1.1 ryo txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc));
2792 1.1 ryo
2793 1.1 ryo txc = &txq->vxtxq_comp_ring;
2794 1.1 ryo txc->vxcr_next = 0;
2795 1.1 ryo txc->vxcr_gen = VMXNET3_INIT_GEN;
2796 1.1 ryo memset(txc->vxcr_u.txcd, 0,
2797 1.1 ryo txc->vxcr_ndesc * sizeof(struct vmxnet3_txcompdesc));
2798 1.1 ryo }
2799 1.1 ryo
2800 1.1 ryo static int
2801 1.1 ryo vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
2802 1.1 ryo {
2803 1.1 ryo struct vmxnet3_rxring *rxr;
2804 1.1 ryo struct vmxnet3_comp_ring *rxc;
2805 1.2 ryo u_int i, populate, idx;
2806 1.2 ryo int error;
2807 1.1 ryo
2808 1.1 ryo /* LRO and jumbo frame is not supported yet */
2809 1.1 ryo populate = 1;
2810 1.1 ryo
2811 1.1 ryo for (i = 0; i < populate; i++) {
2812 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i];
2813 1.1 ryo rxr->vxrxr_fill = 0;
2814 1.1 ryo rxr->vxrxr_gen = VMXNET3_INIT_GEN;
2815 1.1 ryo memset(rxr->vxrxr_rxd, 0,
2816 1.1 ryo rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
2817 1.1 ryo
2818 1.1 ryo for (idx = 0; idx < rxr->vxrxr_ndesc; idx++) {
2819 1.1 ryo error = vmxnet3_newbuf(sc, rxq, rxr);
2820 1.1 ryo if (error)
2821 1.1 ryo return (error);
2822 1.1 ryo }
2823 1.1 ryo }
2824 1.1 ryo
2825 1.1 ryo for (/**/; i < VMXNET3_RXRINGS_PERQ; i++) {
2826 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i];
2827 1.1 ryo rxr->vxrxr_fill = 0;
2828 1.1 ryo rxr->vxrxr_gen = 0;
2829 1.1 ryo memset(rxr->vxrxr_rxd, 0,
2830 1.1 ryo rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
2831 1.1 ryo }
2832 1.1 ryo
2833 1.1 ryo rxc = &rxq->vxrxq_comp_ring;
2834 1.1 ryo rxc->vxcr_next = 0;
2835 1.1 ryo rxc->vxcr_gen = VMXNET3_INIT_GEN;
2836 1.1 ryo memset(rxc->vxcr_u.rxcd, 0,
2837 1.1 ryo rxc->vxcr_ndesc * sizeof(struct vmxnet3_rxcompdesc));
2838 1.1 ryo
2839 1.1 ryo return (0);
2840 1.1 ryo }
2841 1.1 ryo
2842 1.1 ryo static int
2843 1.1 ryo vmxnet3_reinit_queues(struct vmxnet3_softc *sc)
2844 1.1 ryo {
2845 1.1 ryo device_t dev;
2846 1.1 ryo int q, error;
2847 1.1 ryo dev = sc->vmx_dev;
2848 1.1 ryo
2849 1.1 ryo for (q = 0; q < sc->vmx_ntxqueues; q++)
2850 1.1 ryo vmxnet3_txinit(sc, &sc->vmx_queue[q].vxq_txqueue);
2851 1.1 ryo
2852 1.1 ryo for (q = 0; q < sc->vmx_nrxqueues; q++) {
2853 1.1 ryo error = vmxnet3_rxinit(sc, &sc->vmx_queue[q].vxq_rxqueue);
2854 1.1 ryo if (error) {
2855 1.1 ryo device_printf(dev, "cannot populate Rx queue %d\n", q);
2856 1.1 ryo return (error);
2857 1.1 ryo }
2858 1.1 ryo }
2859 1.1 ryo
2860 1.1 ryo return (0);
2861 1.1 ryo }
2862 1.1 ryo
2863 1.1 ryo static int
2864 1.1 ryo vmxnet3_enable_device(struct vmxnet3_softc *sc)
2865 1.1 ryo {
2866 1.1 ryo int q;
2867 1.1 ryo
2868 1.1 ryo if (vmxnet3_read_cmd(sc, VMXNET3_CMD_ENABLE) != 0) {
2869 1.1 ryo device_printf(sc->vmx_dev, "device enable command failed!\n");
2870 1.1 ryo return (1);
2871 1.1 ryo }
2872 1.1 ryo
2873 1.1 ryo /* Reset the Rx queue heads. */
2874 1.1 ryo for (q = 0; q < sc->vmx_nrxqueues; q++) {
2875 1.1 ryo vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH1(q), 0);
2876 1.1 ryo vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH2(q), 0);
2877 1.1 ryo }
2878 1.1 ryo
2879 1.1 ryo return (0);
2880 1.1 ryo }
2881 1.1 ryo
2882 1.1 ryo static void
2883 1.1 ryo vmxnet3_reinit_rxfilters(struct vmxnet3_softc *sc)
2884 1.1 ryo {
2885 1.1 ryo
2886 1.1 ryo vmxnet3_set_rxfilter(sc);
2887 1.1 ryo
2888 1.1 ryo memset(sc->vmx_ds->vlan_filter, 0, sizeof(sc->vmx_ds->vlan_filter));
2889 1.1 ryo vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER);
2890 1.1 ryo }
2891 1.1 ryo
2892 1.1 ryo static int
2893 1.1 ryo vmxnet3_reinit(struct vmxnet3_softc *sc)
2894 1.1 ryo {
2895 1.1 ryo
2896 1.1 ryo vmxnet3_set_lladdr(sc);
2897 1.1 ryo vmxnet3_reinit_shared_data(sc);
2898 1.1 ryo
2899 1.1 ryo if (vmxnet3_reinit_queues(sc) != 0)
2900 1.1 ryo return (ENXIO);
2901 1.1 ryo
2902 1.1 ryo if (vmxnet3_enable_device(sc) != 0)
2903 1.1 ryo return (ENXIO);
2904 1.1 ryo
2905 1.1 ryo vmxnet3_reinit_rxfilters(sc);
2906 1.1 ryo
2907 1.1 ryo return (0);
2908 1.1 ryo }
2909 1.1 ryo
2910 1.1 ryo static int
2911 1.1 ryo vmxnet3_init_locked(struct vmxnet3_softc *sc)
2912 1.1 ryo {
2913 1.1 ryo struct ifnet *ifp = &sc->vmx_ethercom.ec_if;
2914 1.1 ryo int error;
2915 1.1 ryo
2916 1.1 ryo vmxnet3_stop_locked(sc);
2917 1.1 ryo
2918 1.1 ryo error = vmxnet3_reinit(sc);
2919 1.1 ryo if (error) {
2920 1.1 ryo vmxnet3_stop_locked(sc);
2921 1.1 ryo return (error);
2922 1.1 ryo }
2923 1.1 ryo
2924 1.1 ryo ifp->if_flags |= IFF_RUNNING;
2925 1.3 ryo vmxnet3_if_link_status(sc);
2926 1.1 ryo
2927 1.1 ryo vmxnet3_enable_all_intrs(sc);
2928 1.1 ryo callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
2929 1.1 ryo
2930 1.1 ryo return (0);
2931 1.1 ryo }
2932 1.1 ryo
2933 1.1 ryo static int
2934 1.1 ryo vmxnet3_init(struct ifnet *ifp)
2935 1.1 ryo {
2936 1.1 ryo struct vmxnet3_softc *sc = ifp->if_softc;
2937 1.1 ryo int error;
2938 1.1 ryo
2939 1.1 ryo VMXNET3_CORE_LOCK(sc);
2940 1.1 ryo error = vmxnet3_init_locked(sc);
2941 1.1 ryo VMXNET3_CORE_UNLOCK(sc);
2942 1.1 ryo
2943 1.1 ryo return (error);
2944 1.1 ryo }
2945 1.1 ryo
2946 1.1 ryo static int
2947 1.1 ryo vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *txq, struct mbuf *m,
2948 1.1 ryo int *start, int *csum_start)
2949 1.1 ryo {
2950 1.1 ryo struct ether_header *eh;
2951 1.1 ryo struct mbuf *mp;
2952 1.1 ryo int offset, csum_off, iphl, offp;
2953 1.1 ryo bool v4;
2954 1.1 ryo
2955 1.1 ryo eh = mtod(m, struct ether_header *);
2956 1.1 ryo switch (htons(eh->ether_type)) {
2957 1.1 ryo case ETHERTYPE_IP:
2958 1.1 ryo case ETHERTYPE_IPV6:
2959 1.1 ryo offset = ETHER_HDR_LEN;
2960 1.1 ryo break;
2961 1.1 ryo case ETHERTYPE_VLAN:
2962 1.1 ryo offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2963 1.1 ryo break;
2964 1.1 ryo default:
2965 1.1 ryo m_freem(m);
2966 1.1 ryo return (EINVAL);
2967 1.1 ryo }
2968 1.1 ryo
2969 1.1 ryo if ((m->m_pkthdr.csum_flags &
2970 1.1 ryo (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
2971 1.1 ryo iphl = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2972 1.1 ryo v4 = true;
2973 1.1 ryo } else {
2974 1.1 ryo iphl = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
2975 1.1 ryo v4 = false;
2976 1.1 ryo }
2977 1.1 ryo *start = offset + iphl;
2978 1.1 ryo
2979 1.1 ryo if (m->m_pkthdr.csum_flags &
2980 1.1 ryo (M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_TSOv4 | M_CSUM_TSOv6)) {
2981 1.1 ryo csum_off = offsetof(struct tcphdr, th_sum);
2982 1.1 ryo } else {
2983 1.1 ryo csum_off = offsetof(struct udphdr, uh_sum);
2984 1.1 ryo }
2985 1.1 ryo
2986 1.1 ryo *csum_start = *start + csum_off;
2987 1.1 ryo mp = m_pulldown(m, 0, *csum_start + 2, &offp);
2988 1.1 ryo if (!mp) {
2989 1.1 ryo /* m is already freed */
2990 1.1 ryo return ENOBUFS;
2991 1.1 ryo }
2992 1.1 ryo
2993 1.1 ryo if (m->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) {
2994 1.1 ryo struct tcphdr *tcp;
2995 1.1 ryo
2996 1.1 ryo txq->vxtxq_stats.vmtxs_tso++;
2997 1.1 ryo tcp = (void *)(mtod(mp, char *) + offp + *start);
2998 1.1 ryo
2999 1.1 ryo if (v4) {
3000 1.1 ryo struct ip *ip;
3001 1.1 ryo
3002 1.1 ryo ip = (void *)(mtod(mp, char *) + offp + offset);
3003 1.1 ryo tcp->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
3004 1.1 ryo ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3005 1.1 ryo } else {
3006 1.1 ryo struct ip6_hdr *ip6;
3007 1.1 ryo
3008 1.1 ryo ip6 = (void *)(mtod(mp, char *) + offp + offset);
3009 1.1 ryo tcp->th_sum = in6_cksum_phdr(&ip6->ip6_src,
3010 1.1 ryo &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
3011 1.1 ryo }
3012 1.1 ryo
3013 1.1 ryo /*
3014 1.1 ryo * For TSO, the size of the protocol header is also
3015 1.1 ryo * included in the descriptor header size.
3016 1.1 ryo */
3017 1.1 ryo *start += (tcp->th_off << 2);
3018 1.1 ryo } else
3019 1.1 ryo txq->vxtxq_stats.vmtxs_csum++;
3020 1.1 ryo
3021 1.1 ryo return (0);
3022 1.1 ryo }
3023 1.1 ryo
3024 1.1 ryo static int
3025 1.1 ryo vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *txq, struct mbuf **m0,
3026 1.1 ryo bus_dmamap_t dmap)
3027 1.1 ryo {
3028 1.1 ryo struct mbuf *m;
3029 1.1 ryo bus_dma_tag_t tag;
3030 1.1 ryo int error;
3031 1.1 ryo
3032 1.1 ryo m = *m0;
3033 1.1 ryo tag = txq->vxtxq_sc->vmx_dmat;
3034 1.1 ryo
3035 1.1 ryo error = bus_dmamap_load_mbuf(tag, dmap, m, BUS_DMA_NOWAIT);
3036 1.1 ryo if (error == 0 || error != EFBIG)
3037 1.1 ryo return (error);
3038 1.1 ryo
3039 1.1 ryo m = m_defrag(m, M_NOWAIT);
3040 1.1 ryo if (m != NULL) {
3041 1.1 ryo *m0 = m;
3042 1.1 ryo error = bus_dmamap_load_mbuf(tag, dmap, m, BUS_DMA_NOWAIT);
3043 1.1 ryo } else
3044 1.1 ryo error = ENOBUFS;
3045 1.1 ryo
3046 1.1 ryo if (error) {
3047 1.1 ryo m_freem(*m0);
3048 1.1 ryo *m0 = NULL;
3049 1.1 ryo txq->vxtxq_defrag_failed.ev_count++;
3050 1.1 ryo } else
3051 1.1 ryo txq->vxtxq_defragged.ev_count++;
3052 1.1 ryo
3053 1.1 ryo return (error);
3054 1.1 ryo }
3055 1.1 ryo
3056 1.1 ryo static void
3057 1.1 ryo vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *txq, bus_dmamap_t dmap)
3058 1.1 ryo {
3059 1.1 ryo
3060 1.1 ryo bus_dmamap_unload(txq->vxtxq_sc->vmx_dmat, dmap);
3061 1.1 ryo }
3062 1.1 ryo
3063 1.1 ryo static int
3064 1.1 ryo vmxnet3_txq_encap(struct vmxnet3_txqueue *txq, struct mbuf **m0)
3065 1.1 ryo {
3066 1.1 ryo struct vmxnet3_softc *sc;
3067 1.1 ryo struct vmxnet3_txring *txr;
3068 1.1 ryo struct vmxnet3_txdesc *txd, *sop;
3069 1.1 ryo struct mbuf *m;
3070 1.1 ryo bus_dmamap_t dmap;
3071 1.1 ryo bus_dma_segment_t *segs;
3072 1.1 ryo int i, gen, start, csum_start, nsegs, error;
3073 1.1 ryo
3074 1.1 ryo sc = txq->vxtxq_sc;
3075 1.1 ryo start = 0;
3076 1.1 ryo txd = NULL;
3077 1.1 ryo txr = &txq->vxtxq_cmd_ring;
3078 1.1 ryo dmap = txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_dmamap;
3079 1.1 ryo csum_start = 0; /* GCC */
3080 1.1 ryo
3081 1.1 ryo error = vmxnet3_txq_load_mbuf(txq, m0, dmap);
3082 1.1 ryo if (error)
3083 1.1 ryo return (error);
3084 1.1 ryo
3085 1.1 ryo nsegs = dmap->dm_nsegs;
3086 1.1 ryo segs = dmap->dm_segs;
3087 1.1 ryo
3088 1.1 ryo m = *m0;
3089 1.1 ryo KASSERT(m->m_flags & M_PKTHDR);
3090 1.1 ryo KASSERT(nsegs <= VMXNET3_TX_MAXSEGS);
3091 1.1 ryo
3092 1.1 ryo if (vmxnet3_txring_avail(txr) < nsegs) {
3093 1.1 ryo txq->vxtxq_stats.vmtxs_full++;
3094 1.1 ryo vmxnet3_txq_unload_mbuf(txq, dmap);
3095 1.1 ryo return (ENOSPC);
3096 1.1 ryo } else if (m->m_pkthdr.csum_flags & VMXNET3_CSUM_ALL_OFFLOAD) {
3097 1.1 ryo error = vmxnet3_txq_offload_ctx(txq, m, &start, &csum_start);
3098 1.1 ryo if (error) {
3099 1.1 ryo /* m is already freed */
3100 1.1 ryo txq->vxtxq_stats.vmtxs_offload_failed++;
3101 1.1 ryo vmxnet3_txq_unload_mbuf(txq, dmap);
3102 1.1 ryo *m0 = NULL;
3103 1.1 ryo return (error);
3104 1.1 ryo }
3105 1.1 ryo }
3106 1.1 ryo
3107 1.1 ryo txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_m = m;
3108 1.1 ryo sop = &txr->vxtxr_txd[txr->vxtxr_head];
3109 1.1 ryo gen = txr->vxtxr_gen ^ 1; /* Owned by cpu (yet) */
3110 1.1 ryo
3111 1.1 ryo for (i = 0; i < nsegs; i++) {
3112 1.1 ryo txd = &txr->vxtxr_txd[txr->vxtxr_head];
3113 1.1 ryo
3114 1.1 ryo txd->addr = segs[i].ds_addr;
3115 1.1 ryo txd->len = segs[i].ds_len;
3116 1.1 ryo txd->gen = gen;
3117 1.1 ryo txd->dtype = 0;
3118 1.1 ryo txd->offload_mode = VMXNET3_OM_NONE;
3119 1.1 ryo txd->offload_pos = 0;
3120 1.1 ryo txd->hlen = 0;
3121 1.1 ryo txd->eop = 0;
3122 1.1 ryo txd->compreq = 0;
3123 1.1 ryo txd->vtag_mode = 0;
3124 1.1 ryo txd->vtag = 0;
3125 1.1 ryo
3126 1.1 ryo if (++txr->vxtxr_head == txr->vxtxr_ndesc) {
3127 1.1 ryo txr->vxtxr_head = 0;
3128 1.1 ryo txr->vxtxr_gen ^= 1;
3129 1.1 ryo }
3130 1.1 ryo gen = txr->vxtxr_gen;
3131 1.1 ryo }
3132 1.1 ryo txd->eop = 1;
3133 1.1 ryo txd->compreq = 1;
3134 1.1 ryo
3135 1.1 ryo if (vlan_has_tag(m)) {
3136 1.1 ryo sop->vtag_mode = 1;
3137 1.1 ryo sop->vtag = vlan_get_tag(m);
3138 1.1 ryo }
3139 1.1 ryo
3140 1.1 ryo if (m->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) {
3141 1.1 ryo sop->offload_mode = VMXNET3_OM_TSO;
3142 1.1 ryo sop->hlen = start;
3143 1.1 ryo sop->offload_pos = m->m_pkthdr.segsz;
3144 1.1 ryo } else if (m->m_pkthdr.csum_flags & (VMXNET3_CSUM_OFFLOAD |
3145 1.1 ryo VMXNET3_CSUM_OFFLOAD_IPV6)) {
3146 1.1 ryo sop->offload_mode = VMXNET3_OM_CSUM;
3147 1.1 ryo sop->hlen = start;
3148 1.1 ryo sop->offload_pos = csum_start;
3149 1.1 ryo }
3150 1.1 ryo
3151 1.1 ryo /* Finally, change the ownership. */
3152 1.1 ryo vmxnet3_barrier(sc, VMXNET3_BARRIER_WR);
3153 1.1 ryo sop->gen ^= 1;
3154 1.1 ryo
3155 1.1 ryo txq->vxtxq_ts->npending += nsegs;
3156 1.1 ryo if (txq->vxtxq_ts->npending >= txq->vxtxq_ts->intr_threshold) {
3157 1.1 ryo struct vmxnet3_queue *vmxq;
3158 1.1 ryo vmxq = container_of(txq, struct vmxnet3_queue, vxq_txqueue);
3159 1.1 ryo txq->vxtxq_ts->npending = 0;
3160 1.1 ryo vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(vmxq->vxq_id),
3161 1.1 ryo txr->vxtxr_head);
3162 1.1 ryo }
3163 1.1 ryo
3164 1.1 ryo return (0);
3165 1.1 ryo }
3166 1.1 ryo
3167 1.1 ryo #define VMXNET3_TX_START 1
3168 1.1 ryo #define VMXNET3_TX_TRANSMIT 2
3169 1.1 ryo static inline void
3170 1.1 ryo vmxnet3_tx_common_locked(struct ifnet *ifp, struct vmxnet3_txqueue *txq, int txtype)
3171 1.1 ryo {
3172 1.1 ryo struct vmxnet3_softc *sc;
3173 1.1 ryo struct vmxnet3_txring *txr;
3174 1.1 ryo struct mbuf *m_head;
3175 1.1 ryo int tx;
3176 1.1 ryo
3177 1.1 ryo sc = ifp->if_softc;
3178 1.1 ryo txr = &txq->vxtxq_cmd_ring;
3179 1.1 ryo tx = 0;
3180 1.1 ryo
3181 1.1 ryo VMXNET3_TXQ_LOCK_ASSERT(txq);
3182 1.1 ryo
3183 1.1 ryo if ((ifp->if_flags & IFF_RUNNING) == 0 ||
3184 1.1 ryo sc->vmx_link_active == 0)
3185 1.1 ryo return;
3186 1.1 ryo
3187 1.1 ryo for (;;) {
3188 1.1 ryo if (txtype == VMXNET3_TX_START)
3189 1.1 ryo IFQ_POLL(&ifp->if_snd, m_head);
3190 1.1 ryo else
3191 1.1 ryo m_head = pcq_peek(txq->vxtxq_interq);
3192 1.1 ryo if (m_head == NULL)
3193 1.1 ryo break;
3194 1.1 ryo
3195 1.1 ryo if (vmxnet3_txring_avail(txr) < VMXNET3_TX_MAXSEGS)
3196 1.1 ryo break;
3197 1.1 ryo
3198 1.1 ryo if (txtype == VMXNET3_TX_START)
3199 1.1 ryo IFQ_DEQUEUE(&ifp->if_snd, m_head);
3200 1.1 ryo else
3201 1.1 ryo m_head = pcq_get(txq->vxtxq_interq);
3202 1.1 ryo if (m_head == NULL)
3203 1.1 ryo break;
3204 1.1 ryo
3205 1.1 ryo if (vmxnet3_txq_encap(txq, &m_head) != 0) {
3206 1.1 ryo if (m_head != NULL)
3207 1.1 ryo m_freem(m_head);
3208 1.1 ryo break;
3209 1.1 ryo }
3210 1.1 ryo
3211 1.1 ryo tx++;
3212 1.1 ryo bpf_mtap(ifp, m_head, BPF_D_OUT);
3213 1.1 ryo }
3214 1.1 ryo
3215 1.1 ryo if (tx > 0)
3216 1.1 ryo txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT;
3217 1.1 ryo }
3218 1.1 ryo
3219 1.1 ryo static void
3220 1.1 ryo vmxnet3_start_locked(struct ifnet *ifp)
3221 1.1 ryo {
3222 1.1 ryo struct vmxnet3_softc *sc;
3223 1.1 ryo struct vmxnet3_txqueue *txq;
3224 1.1 ryo
3225 1.1 ryo sc = ifp->if_softc;
3226 1.1 ryo txq = &sc->vmx_queue[0].vxq_txqueue;
3227 1.1 ryo
3228 1.1 ryo vmxnet3_tx_common_locked(ifp, txq, VMXNET3_TX_START);
3229 1.1 ryo }
3230 1.1 ryo
3231 1.1 ryo void
3232 1.1 ryo vmxnet3_start(struct ifnet *ifp)
3233 1.1 ryo {
3234 1.1 ryo struct vmxnet3_softc *sc;
3235 1.1 ryo struct vmxnet3_txqueue *txq;
3236 1.1 ryo
3237 1.1 ryo sc = ifp->if_softc;
3238 1.1 ryo txq = &sc->vmx_queue[0].vxq_txqueue;
3239 1.1 ryo
3240 1.1 ryo VMXNET3_TXQ_LOCK(txq);
3241 1.1 ryo vmxnet3_start_locked(ifp);
3242 1.1 ryo VMXNET3_TXQ_UNLOCK(txq);
3243 1.1 ryo }
3244 1.1 ryo
3245 1.1 ryo static int
3246 1.1 ryo vmxnet3_select_txqueue(struct ifnet *ifp, struct mbuf *m __unused)
3247 1.1 ryo {
3248 1.1 ryo struct vmxnet3_softc *sc;
3249 1.1 ryo u_int cpuid;
3250 1.1 ryo
3251 1.1 ryo sc = ifp->if_softc;
3252 1.1 ryo cpuid = cpu_index(curcpu());
3253 1.1 ryo /*
3254 1.1 ryo * Furure work
3255 1.1 ryo * We should select txqueue to even up the load even if ncpu is
3256 1.1 ryo * different from sc->vmx_ntxqueues. Currently, the load is not
3257 1.1 ryo * even, that is, when ncpu is six and ntxqueues is four, the load
3258 1.1 ryo * of vmx_queue[0] and vmx_queue[1] is higher than vmx_queue[2] and
3259 1.1 ryo * vmx_queue[3] because CPU#4 always uses vmx_queue[0] and CPU#5 always
3260 1.1 ryo * uses vmx_queue[1].
3261 1.1 ryo * Furthermore, we should not use random value to select txqueue to
3262 1.1 ryo * avoid reordering. We should use flow information of mbuf.
3263 1.1 ryo */
3264 1.1 ryo return cpuid % sc->vmx_ntxqueues;
3265 1.1 ryo }
3266 1.1 ryo
3267 1.1 ryo static void
3268 1.1 ryo vmxnet3_transmit_locked(struct ifnet *ifp, struct vmxnet3_txqueue *txq)
3269 1.1 ryo {
3270 1.1 ryo
3271 1.1 ryo vmxnet3_tx_common_locked(ifp, txq, VMXNET3_TX_TRANSMIT);
3272 1.1 ryo }
3273 1.1 ryo
3274 1.1 ryo static int
3275 1.1 ryo vmxnet3_transmit(struct ifnet *ifp, struct mbuf *m)
3276 1.1 ryo {
3277 1.1 ryo struct vmxnet3_softc *sc;
3278 1.1 ryo struct vmxnet3_txqueue *txq;
3279 1.1 ryo int qid;
3280 1.1 ryo
3281 1.1 ryo qid = vmxnet3_select_txqueue(ifp, m);
3282 1.1 ryo sc = ifp->if_softc;
3283 1.1 ryo txq = &sc->vmx_queue[qid].vxq_txqueue;
3284 1.1 ryo
3285 1.1 ryo if (__predict_false(!pcq_put(txq->vxtxq_interq, m))) {
3286 1.1 ryo VMXNET3_TXQ_LOCK(txq);
3287 1.1 ryo txq->vxtxq_pcqdrop.ev_count++;
3288 1.1 ryo VMXNET3_TXQ_UNLOCK(txq);
3289 1.1 ryo m_freem(m);
3290 1.1 ryo return ENOBUFS;
3291 1.1 ryo }
3292 1.1 ryo
3293 1.10 knakahar #ifdef VMXNET3_ALWAYS_TXDEFER
3294 1.10 knakahar kpreempt_disable();
3295 1.10 knakahar softint_schedule(txq->vxtxq_si);
3296 1.10 knakahar kpreempt_enable();
3297 1.10 knakahar #else
3298 1.1 ryo if (VMXNET3_TXQ_TRYLOCK(txq)) {
3299 1.1 ryo vmxnet3_transmit_locked(ifp, txq);
3300 1.1 ryo VMXNET3_TXQ_UNLOCK(txq);
3301 1.1 ryo } else {
3302 1.1 ryo kpreempt_disable();
3303 1.1 ryo softint_schedule(txq->vxtxq_si);
3304 1.1 ryo kpreempt_enable();
3305 1.1 ryo }
3306 1.10 knakahar #endif
3307 1.1 ryo
3308 1.1 ryo return 0;
3309 1.1 ryo }
3310 1.1 ryo
3311 1.1 ryo static void
3312 1.1 ryo vmxnet3_deferred_transmit(void *arg)
3313 1.1 ryo {
3314 1.1 ryo struct vmxnet3_txqueue *txq = arg;
3315 1.1 ryo struct vmxnet3_softc *sc = txq->vxtxq_sc;
3316 1.1 ryo struct ifnet *ifp = &sc->vmx_ethercom.ec_if;
3317 1.1 ryo
3318 1.1 ryo VMXNET3_TXQ_LOCK(txq);
3319 1.1 ryo txq->vxtxq_transmitdef.ev_count++;
3320 1.1 ryo if (pcq_peek(txq->vxtxq_interq) != NULL)
3321 1.1 ryo vmxnet3_transmit_locked(ifp, txq);
3322 1.1 ryo VMXNET3_TXQ_UNLOCK(txq);
3323 1.1 ryo }
3324 1.1 ryo
3325 1.1 ryo static void
3326 1.1 ryo vmxnet3_set_rxfilter(struct vmxnet3_softc *sc)
3327 1.1 ryo {
3328 1.1 ryo struct ifnet *ifp = &sc->vmx_ethercom.ec_if;
3329 1.1 ryo struct ethercom *ec = &sc->vmx_ethercom;
3330 1.1 ryo struct vmxnet3_driver_shared *ds = sc->vmx_ds;
3331 1.1 ryo struct ether_multi *enm;
3332 1.1 ryo struct ether_multistep step;
3333 1.1 ryo u_int mode;
3334 1.1 ryo uint8_t *p;
3335 1.1 ryo
3336 1.1 ryo ds->mcast_tablelen = 0;
3337 1.1 ryo ETHER_LOCK(ec);
3338 1.1 ryo CLR(ec->ec_flags, ETHER_F_ALLMULTI);
3339 1.1 ryo ETHER_UNLOCK(ec);
3340 1.1 ryo
3341 1.1 ryo /*
3342 1.1 ryo * Always accept broadcast frames.
3343 1.1 ryo * Always accept frames destined to our station address.
3344 1.1 ryo */
3345 1.1 ryo mode = VMXNET3_RXMODE_BCAST | VMXNET3_RXMODE_UCAST;
3346 1.1 ryo
3347 1.1 ryo ETHER_LOCK(ec);
3348 1.1 ryo if (ISSET(ifp->if_flags, IFF_PROMISC) ||
3349 1.1 ryo ec->ec_multicnt > VMXNET3_MULTICAST_MAX)
3350 1.1 ryo goto allmulti;
3351 1.1 ryo
3352 1.1 ryo p = sc->vmx_mcast;
3353 1.1 ryo ETHER_FIRST_MULTI(step, ec, enm);
3354 1.1 ryo while (enm != NULL) {
3355 1.1 ryo if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3356 1.1 ryo /*
3357 1.1 ryo * We must listen to a range of multicast addresses.
3358 1.1 ryo * For now, just accept all multicasts, rather than
3359 1.1 ryo * trying to set only those filter bits needed to match
3360 1.1 ryo * the range. (At this time, the only use of address
3361 1.1 ryo * ranges is for IP multicast routing, for which the
3362 1.1 ryo * range is big enough to require all bits set.)
3363 1.1 ryo */
3364 1.1 ryo goto allmulti;
3365 1.1 ryo }
3366 1.1 ryo memcpy(p, enm->enm_addrlo, ETHER_ADDR_LEN);
3367 1.1 ryo
3368 1.1 ryo p += ETHER_ADDR_LEN;
3369 1.1 ryo
3370 1.1 ryo ETHER_NEXT_MULTI(step, enm);
3371 1.1 ryo }
3372 1.1 ryo
3373 1.1 ryo if (ec->ec_multicnt > 0) {
3374 1.1 ryo SET(mode, VMXNET3_RXMODE_MCAST);
3375 1.1 ryo ds->mcast_tablelen = p - sc->vmx_mcast;
3376 1.1 ryo }
3377 1.1 ryo ETHER_UNLOCK(ec);
3378 1.1 ryo
3379 1.1 ryo goto setit;
3380 1.1 ryo
3381 1.1 ryo allmulti:
3382 1.1 ryo SET(ec->ec_flags, ETHER_F_ALLMULTI);
3383 1.1 ryo ETHER_UNLOCK(ec);
3384 1.1 ryo SET(mode, (VMXNET3_RXMODE_ALLMULTI | VMXNET3_RXMODE_MCAST));
3385 1.1 ryo if (ifp->if_flags & IFF_PROMISC)
3386 1.1 ryo SET(mode, VMXNET3_RXMODE_PROMISC);
3387 1.1 ryo
3388 1.1 ryo setit:
3389 1.1 ryo vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_FILTER);
3390 1.1 ryo ds->rxmode = mode;
3391 1.1 ryo vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_RXMODE);
3392 1.1 ryo }
3393 1.1 ryo
3394 1.1 ryo static int
3395 1.1 ryo vmxnet3_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3396 1.1 ryo {
3397 1.1 ryo struct vmxnet3_softc *sc = ifp->if_softc;
3398 1.1 ryo struct ifreq *ifr = (struct ifreq *)data;
3399 1.1 ryo int s, error = 0;
3400 1.1 ryo
3401 1.1 ryo switch (cmd) {
3402 1.1 ryo case SIOCSIFMTU: {
3403 1.1 ryo int nmtu = ifr->ifr_mtu;
3404 1.1 ryo
3405 1.1 ryo if (nmtu < VMXNET3_MIN_MTU || nmtu > VMXNET3_MAX_MTU) {
3406 1.1 ryo error = EINVAL;
3407 1.1 ryo break;
3408 1.1 ryo }
3409 1.2 ryo if (ifp->if_mtu != (uint64_t)nmtu) {
3410 1.1 ryo s = splnet();
3411 1.1 ryo error = ether_ioctl(ifp, cmd, data);
3412 1.1 ryo splx(s);
3413 1.1 ryo if (error == ENETRESET)
3414 1.1 ryo error = vmxnet3_init(ifp);
3415 1.1 ryo }
3416 1.1 ryo break;
3417 1.1 ryo }
3418 1.1 ryo
3419 1.1 ryo default:
3420 1.1 ryo s = splnet();
3421 1.1 ryo error = ether_ioctl(ifp, cmd, data);
3422 1.1 ryo splx(s);
3423 1.1 ryo }
3424 1.1 ryo
3425 1.1 ryo if (error == ENETRESET) {
3426 1.1 ryo VMXNET3_CORE_LOCK(sc);
3427 1.1 ryo if (ifp->if_flags & IFF_RUNNING)
3428 1.1 ryo vmxnet3_set_rxfilter(sc);
3429 1.1 ryo VMXNET3_CORE_UNLOCK(sc);
3430 1.1 ryo error = 0;
3431 1.1 ryo }
3432 1.1 ryo
3433 1.1 ryo return error;
3434 1.1 ryo }
3435 1.1 ryo
3436 1.1 ryo static int
3437 1.1 ryo vmxnet3_ifflags_cb(struct ethercom *ec)
3438 1.1 ryo {
3439 1.1 ryo struct vmxnet3_softc *sc;
3440 1.1 ryo
3441 1.1 ryo sc = ec->ec_if.if_softc;
3442 1.1 ryo
3443 1.1 ryo VMXNET3_CORE_LOCK(sc);
3444 1.1 ryo vmxnet3_set_rxfilter(sc);
3445 1.1 ryo VMXNET3_CORE_UNLOCK(sc);
3446 1.1 ryo
3447 1.1 ryo vmxnet3_if_link_status(sc);
3448 1.1 ryo
3449 1.1 ryo return 0;
3450 1.1 ryo }
3451 1.1 ryo
3452 1.1 ryo static int
3453 1.1 ryo vmxnet3_watchdog(struct vmxnet3_txqueue *txq)
3454 1.1 ryo {
3455 1.1 ryo struct vmxnet3_softc *sc;
3456 1.1 ryo struct vmxnet3_queue *vmxq;
3457 1.1 ryo
3458 1.1 ryo sc = txq->vxtxq_sc;
3459 1.1 ryo vmxq = container_of(txq, struct vmxnet3_queue, vxq_txqueue);
3460 1.1 ryo
3461 1.1 ryo VMXNET3_TXQ_LOCK(txq);
3462 1.1 ryo if (txq->vxtxq_watchdog == 0 || --txq->vxtxq_watchdog) {
3463 1.1 ryo VMXNET3_TXQ_UNLOCK(txq);
3464 1.1 ryo return (0);
3465 1.1 ryo }
3466 1.1 ryo txq->vxtxq_watchdogto.ev_count++;
3467 1.1 ryo VMXNET3_TXQ_UNLOCK(txq);
3468 1.1 ryo
3469 1.1 ryo device_printf(sc->vmx_dev, "watchdog timeout on queue %d\n",
3470 1.1 ryo vmxq->vxq_id);
3471 1.1 ryo return (1);
3472 1.1 ryo }
3473 1.1 ryo
3474 1.1 ryo static void
3475 1.1 ryo vmxnet3_refresh_host_stats(struct vmxnet3_softc *sc)
3476 1.1 ryo {
3477 1.1 ryo
3478 1.1 ryo vmxnet3_write_cmd(sc, VMXNET3_CMD_GET_STATS);
3479 1.1 ryo }
3480 1.1 ryo
3481 1.1 ryo static void
3482 1.1 ryo vmxnet3_tick(void *xsc)
3483 1.1 ryo {
3484 1.1 ryo struct vmxnet3_softc *sc;
3485 1.1 ryo int i, timedout;
3486 1.1 ryo
3487 1.1 ryo sc = xsc;
3488 1.1 ryo timedout = 0;
3489 1.1 ryo
3490 1.1 ryo VMXNET3_CORE_LOCK(sc);
3491 1.1 ryo
3492 1.1 ryo vmxnet3_refresh_host_stats(sc);
3493 1.1 ryo
3494 1.1 ryo for (i = 0; i < sc->vmx_ntxqueues; i++)
3495 1.1 ryo timedout |= vmxnet3_watchdog(&sc->vmx_queue[i].vxq_txqueue);
3496 1.1 ryo
3497 1.1 ryo if (timedout != 0)
3498 1.1 ryo vmxnet3_init_locked(sc);
3499 1.1 ryo else
3500 1.1 ryo callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
3501 1.1 ryo
3502 1.1 ryo VMXNET3_CORE_UNLOCK(sc);
3503 1.1 ryo }
3504 1.1 ryo
3505 1.1 ryo /*
3506 1.1 ryo * update link state of ifnet and softc
3507 1.1 ryo */
3508 1.1 ryo static void
3509 1.1 ryo vmxnet3_if_link_status(struct vmxnet3_softc *sc)
3510 1.1 ryo {
3511 1.1 ryo struct ifnet *ifp = &sc->vmx_ethercom.ec_if;
3512 1.4 ryo u_int link;
3513 1.4 ryo bool up;
3514 1.1 ryo
3515 1.4 ryo up = vmxnet3_cmd_link_status(ifp);
3516 1.4 ryo if (up) {
3517 1.1 ryo sc->vmx_link_active = 1;
3518 1.1 ryo link = LINK_STATE_UP;
3519 1.1 ryo } else {
3520 1.1 ryo sc->vmx_link_active = 0;
3521 1.1 ryo link = LINK_STATE_DOWN;
3522 1.1 ryo }
3523 1.1 ryo
3524 1.1 ryo if_link_state_change(ifp, link);
3525 1.1 ryo }
3526 1.1 ryo
3527 1.1 ryo /*
3528 1.1 ryo * check vmx(4) state by VMXNET3_CMD and update ifp->if_baudrate
3529 1.1 ryo * returns
3530 1.1 ryo * - true: link up
3531 1.1 ryo * - flase: link down
3532 1.1 ryo */
3533 1.1 ryo static bool
3534 1.1 ryo vmxnet3_cmd_link_status(struct ifnet *ifp)
3535 1.1 ryo {
3536 1.1 ryo struct vmxnet3_softc *sc = ifp->if_softc;
3537 1.1 ryo u_int x, speed;
3538 1.1 ryo
3539 1.1 ryo x = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_LINK);
3540 1.1 ryo if ((x & 1) == 0)
3541 1.1 ryo return false;
3542 1.1 ryo
3543 1.1 ryo speed = x >> 16;
3544 1.1 ryo ifp->if_baudrate = IF_Mbps(speed);
3545 1.1 ryo return true;
3546 1.1 ryo }
3547 1.1 ryo
3548 1.1 ryo static void
3549 1.1 ryo vmxnet3_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
3550 1.1 ryo {
3551 1.1 ryo bool up;
3552 1.1 ryo
3553 1.1 ryo ifmr->ifm_status = IFM_AVALID;
3554 1.1 ryo ifmr->ifm_active = IFM_ETHER;
3555 1.1 ryo
3556 1.1 ryo up = vmxnet3_cmd_link_status(ifp);
3557 1.1 ryo if (!up)
3558 1.1 ryo return;
3559 1.1 ryo
3560 1.1 ryo ifmr->ifm_status |= IFM_ACTIVE;
3561 1.1 ryo
3562 1.1 ryo if (ifp->if_baudrate >= IF_Gbps(10ULL))
3563 1.1 ryo ifmr->ifm_active |= IFM_10G_T;
3564 1.1 ryo }
3565 1.1 ryo
3566 1.1 ryo static int
3567 1.1 ryo vmxnet3_ifmedia_change(struct ifnet *ifp)
3568 1.1 ryo {
3569 1.1 ryo return 0;
3570 1.1 ryo }
3571 1.1 ryo
3572 1.1 ryo static void
3573 1.1 ryo vmxnet3_set_lladdr(struct vmxnet3_softc *sc)
3574 1.1 ryo {
3575 1.1 ryo uint32_t ml, mh;
3576 1.1 ryo
3577 1.1 ryo ml = sc->vmx_lladdr[0];
3578 1.1 ryo ml |= sc->vmx_lladdr[1] << 8;
3579 1.1 ryo ml |= sc->vmx_lladdr[2] << 16;
3580 1.1 ryo ml |= sc->vmx_lladdr[3] << 24;
3581 1.1 ryo vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACL, ml);
3582 1.1 ryo
3583 1.1 ryo mh = sc->vmx_lladdr[4];
3584 1.1 ryo mh |= sc->vmx_lladdr[5] << 8;
3585 1.1 ryo vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACH, mh);
3586 1.1 ryo }
3587 1.1 ryo
3588 1.1 ryo static void
3589 1.1 ryo vmxnet3_get_lladdr(struct vmxnet3_softc *sc)
3590 1.1 ryo {
3591 1.1 ryo uint32_t ml, mh;
3592 1.1 ryo
3593 1.1 ryo ml = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACL);
3594 1.1 ryo mh = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACH);
3595 1.1 ryo
3596 1.1 ryo sc->vmx_lladdr[0] = ml;
3597 1.1 ryo sc->vmx_lladdr[1] = ml >> 8;
3598 1.1 ryo sc->vmx_lladdr[2] = ml >> 16;
3599 1.1 ryo sc->vmx_lladdr[3] = ml >> 24;
3600 1.1 ryo sc->vmx_lladdr[4] = mh;
3601 1.1 ryo sc->vmx_lladdr[5] = mh >> 8;
3602 1.1 ryo }
3603 1.1 ryo
3604 1.1 ryo static void
3605 1.1 ryo vmxnet3_enable_all_intrs(struct vmxnet3_softc *sc)
3606 1.1 ryo {
3607 1.1 ryo int i;
3608 1.1 ryo
3609 1.1 ryo sc->vmx_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL;
3610 1.1 ryo for (i = 0; i < sc->vmx_nintrs; i++)
3611 1.1 ryo vmxnet3_enable_intr(sc, i);
3612 1.1 ryo }
3613 1.1 ryo
3614 1.1 ryo static void
3615 1.1 ryo vmxnet3_disable_all_intrs(struct vmxnet3_softc *sc)
3616 1.1 ryo {
3617 1.1 ryo int i;
3618 1.1 ryo
3619 1.1 ryo sc->vmx_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL;
3620 1.1 ryo for (i = 0; i < sc->vmx_nintrs; i++)
3621 1.1 ryo vmxnet3_disable_intr(sc, i);
3622 1.1 ryo }
3623 1.1 ryo
3624 1.1 ryo static int
3625 1.1 ryo vmxnet3_dma_malloc(struct vmxnet3_softc *sc, bus_size_t size, bus_size_t align,
3626 1.1 ryo struct vmxnet3_dma_alloc *dma)
3627 1.1 ryo {
3628 1.1 ryo bus_dma_tag_t t = sc->vmx_dmat;
3629 1.1 ryo bus_dma_segment_t *segs = dma->dma_segs;
3630 1.1 ryo int n, error;
3631 1.1 ryo
3632 1.1 ryo memset(dma, 0, sizeof(*dma));
3633 1.1 ryo
3634 1.1 ryo error = bus_dmamem_alloc(t, size, align, 0, segs, 1, &n, BUS_DMA_NOWAIT);
3635 1.1 ryo if (error) {
3636 1.1 ryo aprint_error_dev(sc->vmx_dev, "bus_dmamem_alloc failed: %d\n", error);
3637 1.1 ryo goto fail1;
3638 1.1 ryo }
3639 1.1 ryo KASSERT(n == 1);
3640 1.1 ryo
3641 1.1 ryo error = bus_dmamem_map(t, segs, 1, size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
3642 1.1 ryo if (error) {
3643 1.1 ryo aprint_error_dev(sc->vmx_dev, "bus_dmamem_map failed: %d\n", error);
3644 1.1 ryo goto fail2;
3645 1.1 ryo }
3646 1.1 ryo
3647 1.1 ryo error = bus_dmamap_create(t, size, 1, size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
3648 1.1 ryo if (error) {
3649 1.1 ryo aprint_error_dev(sc->vmx_dev, "bus_dmamap_create failed: %d\n", error);
3650 1.1 ryo goto fail3;
3651 1.1 ryo }
3652 1.1 ryo
3653 1.1 ryo error = bus_dmamap_load(t, dma->dma_map, dma->dma_vaddr, size, NULL,
3654 1.1 ryo BUS_DMA_NOWAIT);
3655 1.1 ryo if (error) {
3656 1.1 ryo aprint_error_dev(sc->vmx_dev, "bus_dmamap_load failed: %d\n", error);
3657 1.1 ryo goto fail4;
3658 1.1 ryo }
3659 1.1 ryo
3660 1.1 ryo memset(dma->dma_vaddr, 0, size);
3661 1.1 ryo dma->dma_paddr = DMAADDR(dma->dma_map);
3662 1.1 ryo dma->dma_size = size;
3663 1.1 ryo
3664 1.1 ryo return (0);
3665 1.1 ryo fail4:
3666 1.1 ryo bus_dmamap_destroy(t, dma->dma_map);
3667 1.1 ryo fail3:
3668 1.1 ryo bus_dmamem_unmap(t, dma->dma_vaddr, size);
3669 1.1 ryo fail2:
3670 1.1 ryo bus_dmamem_free(t, segs, 1);
3671 1.1 ryo fail1:
3672 1.1 ryo return (error);
3673 1.1 ryo }
3674 1.1 ryo
3675 1.1 ryo static void
3676 1.1 ryo vmxnet3_dma_free(struct vmxnet3_softc *sc, struct vmxnet3_dma_alloc *dma)
3677 1.1 ryo {
3678 1.1 ryo bus_dma_tag_t t = sc->vmx_dmat;
3679 1.1 ryo
3680 1.1 ryo bus_dmamap_unload(t, dma->dma_map);
3681 1.1 ryo bus_dmamap_destroy(t, dma->dma_map);
3682 1.1 ryo bus_dmamem_unmap(t, dma->dma_vaddr, dma->dma_size);
3683 1.1 ryo bus_dmamem_free(t, dma->dma_segs, 1);
3684 1.1 ryo
3685 1.1 ryo memset(dma, 0, sizeof(*dma));
3686 1.1 ryo }
3687 1.2 ryo
3688 1.2 ryo MODULE(MODULE_CLASS_DRIVER, if_vmx, "pci");
3689 1.2 ryo
3690 1.2 ryo #ifdef _MODULE
3691 1.2 ryo #include "ioconf.c"
3692 1.2 ryo #endif
3693 1.2 ryo
3694 1.2 ryo static int
3695 1.2 ryo if_vmx_modcmd(modcmd_t cmd, void *opaque)
3696 1.2 ryo {
3697 1.2 ryo int error = 0;
3698 1.2 ryo
3699 1.2 ryo switch (cmd) {
3700 1.2 ryo case MODULE_CMD_INIT:
3701 1.2 ryo #ifdef _MODULE
3702 1.2 ryo error = config_init_component(cfdriver_ioconf_if_vmx,
3703 1.2 ryo cfattach_ioconf_if_vmx, cfdata_ioconf_if_vmx);
3704 1.2 ryo #endif
3705 1.2 ryo return error;
3706 1.2 ryo case MODULE_CMD_FINI:
3707 1.2 ryo #ifdef _MODULE
3708 1.2 ryo error = config_fini_component(cfdriver_ioconf_if_vmx,
3709 1.2 ryo cfattach_ioconf_if_vmx, cfdata_ioconf_if_vmx);
3710 1.2 ryo #endif
3711 1.2 ryo return error;
3712 1.2 ryo default:
3713 1.2 ryo return ENOTTY;
3714 1.2 ryo }
3715 1.2 ryo }
3716 1.2 ryo
3717