1 1.17 rin /* $NetBSD: if_vmx.c,v 1.17 2024/07/05 04:31:51 rin Exp $ */ 2 1.1 ryo /* $OpenBSD: if_vmx.c,v 1.16 2014/01/22 06:04:17 brad Exp $ */ 3 1.1 ryo 4 1.1 ryo /* 5 1.1 ryo * Copyright (c) 2013 Tsubai Masanari 6 1.1 ryo * Copyright (c) 2013 Bryan Venteicher <bryanv (at) FreeBSD.org> 7 1.1 ryo * 8 1.1 ryo * Permission to use, copy, modify, and distribute this software for any 9 1.1 ryo * purpose with or without fee is hereby granted, provided that the above 10 1.1 ryo * copyright notice and this permission notice appear in all copies. 11 1.1 ryo * 12 1.1 ryo * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 1.1 ryo * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 1.1 ryo * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 1.1 ryo * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 1.1 ryo * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 1.1 ryo * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 1.1 ryo * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 1.1 ryo */ 20 1.1 ryo 21 1.1 ryo #include <sys/cdefs.h> 22 1.17 rin __KERNEL_RCSID(0, "$NetBSD: if_vmx.c,v 1.17 2024/07/05 04:31:51 rin Exp $"); 23 1.10 knakahar 24 1.10 knakahar #ifdef _KERNEL_OPT 25 1.10 knakahar #include "opt_if_vmx.h" 26 1.10 knakahar #endif 27 1.1 ryo 28 1.1 ryo #include <sys/param.h> 29 1.1 ryo #include <sys/cpu.h> 30 1.1 ryo #include <sys/kernel.h> 31 1.1 ryo #include <sys/kmem.h> 32 1.1 ryo #include <sys/bitops.h> 33 1.1 ryo #include <sys/bus.h> 34 1.1 ryo #include <sys/device.h> 35 1.1 ryo #include <sys/mbuf.h> 36 1.2 ryo #include <sys/module.h> 37 1.1 ryo #include <sys/sockio.h> 38 1.1 ryo #include <sys/pcq.h> 39 1.1 ryo #include <sys/workqueue.h> 40 1.1 ryo #include <sys/interrupt.h> 41 1.1 ryo 42 1.1 ryo #include <net/bpf.h> 43 1.1 ryo #include <net/if.h> 44 1.1 ryo #include <net/if_ether.h> 45 1.1 ryo #include <net/if_media.h> 46 1.1 ryo 47 1.1 ryo #include <netinet/if_inarp.h> 48 1.1 ryo #include <netinet/in_systm.h> /* for <netinet/ip.h> */ 49 1.1 ryo #include <netinet/in.h> /* for <netinet/ip.h> */ 50 1.1 ryo #include <netinet/ip.h> /* for struct ip */ 51 1.1 ryo #include <netinet/ip6.h> /* for struct ip6_hdr */ 52 1.1 ryo #include <netinet/tcp.h> /* for struct tcphdr */ 53 1.1 ryo #include <netinet/udp.h> /* for struct udphdr */ 54 1.1 ryo 55 1.1 ryo #include <dev/pci/pcivar.h> 56 1.1 ryo #include <dev/pci/pcireg.h> 57 1.1 ryo #include <dev/pci/pcidevs.h> 58 1.1 ryo 59 1.1 ryo #include <dev/pci/if_vmxreg.h> 60 1.1 ryo 61 1.1 ryo #define VMXNET3_DRIVER_VERSION 0x00010000 62 1.1 ryo 63 1.1 ryo /* 64 1.1 ryo * Max descriptors per Tx packet. We must limit the size of the 65 1.1 ryo * any TSO packets based on the number of segments. 66 1.1 ryo */ 67 1.1 ryo #define VMXNET3_TX_MAXSEGS 32 68 1.1 ryo #define VMXNET3_TX_MAXSIZE (VMXNET3_TX_MAXSEGS * MCLBYTES) 69 1.1 ryo 70 1.1 ryo /* 71 1.1 ryo * Maximum support Tx segments size. The length field in the 72 1.1 ryo * Tx descriptor is 14 bits. 73 1.1 ryo */ 74 1.1 ryo #define VMXNET3_TX_MAXSEGSIZE (1 << 14) 75 1.1 ryo 76 1.1 ryo /* 77 1.1 ryo * The maximum number of Rx segments we accept. 78 1.1 ryo */ 79 1.1 ryo #define VMXNET3_MAX_RX_SEGS 0 /* no segments */ 80 1.1 ryo 81 1.1 ryo /* 82 1.1 ryo * Predetermined size of the multicast MACs filter table. If the 83 1.1 ryo * number of multicast addresses exceeds this size, then the 84 1.1 ryo * ALL_MULTI mode is use instead. 85 1.1 ryo */ 86 1.1 ryo #define VMXNET3_MULTICAST_MAX 32 87 1.1 ryo 88 1.1 ryo /* 89 1.1 ryo * Our Tx watchdog timeout. 90 1.1 ryo */ 91 1.1 ryo #define VMXNET3_WATCHDOG_TIMEOUT 5 92 1.1 ryo 93 1.1 ryo /* 94 1.1 ryo * Default value for vmx_intr_{rx,tx}_process_limit which is used for 95 1.1 ryo * max number of packets to process for interrupt handler 96 1.1 ryo */ 97 1.1 ryo #define VMXNET3_RX_INTR_PROCESS_LIMIT 0U 98 1.1 ryo #define VMXNET3_TX_INTR_PROCESS_LIMIT 256 99 1.1 ryo 100 1.1 ryo /* 101 1.1 ryo * Default value for vmx_{rx,tx}_process_limit which is used for 102 1.1 ryo * max number of packets to process for deferred processing 103 1.1 ryo */ 104 1.1 ryo #define VMXNET3_RX_PROCESS_LIMIT 256 105 1.1 ryo #define VMXNET3_TX_PROCESS_LIMIT 256 106 1.1 ryo 107 1.1 ryo #define VMXNET3_WORKQUEUE_PRI PRI_SOFTNET 108 1.1 ryo 109 1.1 ryo /* 110 1.1 ryo * IP protocols that we can perform Tx checksum offloading of. 111 1.1 ryo */ 112 1.1 ryo #define VMXNET3_CSUM_OFFLOAD \ 113 1.1 ryo (M_CSUM_TCPv4 | M_CSUM_UDPv4) 114 1.1 ryo #define VMXNET3_CSUM_OFFLOAD_IPV6 \ 115 1.1 ryo (M_CSUM_TCPv6 | M_CSUM_UDPv6) 116 1.1 ryo 117 1.1 ryo #define VMXNET3_CSUM_ALL_OFFLOAD \ 118 1.1 ryo (VMXNET3_CSUM_OFFLOAD | VMXNET3_CSUM_OFFLOAD_IPV6 | M_CSUM_TSOv4 | M_CSUM_TSOv6) 119 1.1 ryo 120 1.1 ryo #define VMXNET3_RXRINGS_PERQ 2 121 1.1 ryo 122 1.1 ryo #define VMXNET3_CORE_LOCK(_sc) mutex_enter((_sc)->vmx_mtx) 123 1.1 ryo #define VMXNET3_CORE_UNLOCK(_sc) mutex_exit((_sc)->vmx_mtx) 124 1.1 ryo #define VMXNET3_CORE_LOCK_ASSERT(_sc) mutex_owned((_sc)->vmx_mtx) 125 1.1 ryo 126 1.1 ryo #define VMXNET3_RXQ_LOCK(_rxq) mutex_enter((_rxq)->vxrxq_mtx) 127 1.1 ryo #define VMXNET3_RXQ_UNLOCK(_rxq) mutex_exit((_rxq)->vxrxq_mtx) 128 1.1 ryo #define VMXNET3_RXQ_LOCK_ASSERT(_rxq) \ 129 1.1 ryo mutex_owned((_rxq)->vxrxq_mtx) 130 1.1 ryo 131 1.1 ryo #define VMXNET3_TXQ_LOCK(_txq) mutex_enter((_txq)->vxtxq_mtx) 132 1.1 ryo #define VMXNET3_TXQ_TRYLOCK(_txq) mutex_tryenter((_txq)->vxtxq_mtx) 133 1.1 ryo #define VMXNET3_TXQ_UNLOCK(_txq) mutex_exit((_txq)->vxtxq_mtx) 134 1.1 ryo #define VMXNET3_TXQ_LOCK_ASSERT(_txq) \ 135 1.1 ryo mutex_owned((_txq)->vxtxq_mtx) 136 1.1 ryo 137 1.1 ryo struct vmxnet3_dma_alloc { 138 1.1 ryo bus_addr_t dma_paddr; 139 1.1 ryo void *dma_vaddr; 140 1.1 ryo bus_dmamap_t dma_map; 141 1.1 ryo bus_size_t dma_size; 142 1.1 ryo bus_dma_segment_t dma_segs[1]; 143 1.1 ryo }; 144 1.1 ryo 145 1.1 ryo struct vmxnet3_txbuf { 146 1.1 ryo bus_dmamap_t vtxb_dmamap; 147 1.1 ryo struct mbuf *vtxb_m; 148 1.1 ryo }; 149 1.1 ryo 150 1.1 ryo struct vmxnet3_txring { 151 1.1 ryo struct vmxnet3_txbuf *vxtxr_txbuf; 152 1.1 ryo struct vmxnet3_txdesc *vxtxr_txd; 153 1.1 ryo u_int vxtxr_head; 154 1.1 ryo u_int vxtxr_next; 155 1.1 ryo u_int vxtxr_ndesc; 156 1.1 ryo int vxtxr_gen; 157 1.1 ryo struct vmxnet3_dma_alloc vxtxr_dma; 158 1.1 ryo }; 159 1.1 ryo 160 1.1 ryo struct vmxnet3_rxbuf { 161 1.1 ryo bus_dmamap_t vrxb_dmamap; 162 1.1 ryo struct mbuf *vrxb_m; 163 1.1 ryo }; 164 1.1 ryo 165 1.1 ryo struct vmxnet3_rxring { 166 1.1 ryo struct vmxnet3_rxbuf *vxrxr_rxbuf; 167 1.1 ryo struct vmxnet3_rxdesc *vxrxr_rxd; 168 1.1 ryo u_int vxrxr_fill; 169 1.1 ryo u_int vxrxr_ndesc; 170 1.1 ryo int vxrxr_gen; 171 1.1 ryo int vxrxr_rid; 172 1.1 ryo struct vmxnet3_dma_alloc vxrxr_dma; 173 1.1 ryo bus_dmamap_t vxrxr_spare_dmap; 174 1.1 ryo }; 175 1.1 ryo 176 1.1 ryo struct vmxnet3_comp_ring { 177 1.1 ryo union { 178 1.1 ryo struct vmxnet3_txcompdesc *txcd; 179 1.1 ryo struct vmxnet3_rxcompdesc *rxcd; 180 1.1 ryo } vxcr_u; 181 1.1 ryo u_int vxcr_next; 182 1.1 ryo u_int vxcr_ndesc; 183 1.1 ryo int vxcr_gen; 184 1.1 ryo struct vmxnet3_dma_alloc vxcr_dma; 185 1.1 ryo }; 186 1.1 ryo 187 1.1 ryo struct vmxnet3_txq_stats { 188 1.1 ryo uint64_t vmtxs_csum; 189 1.1 ryo uint64_t vmtxs_tso; 190 1.1 ryo uint64_t vmtxs_full; 191 1.1 ryo uint64_t vmtxs_offload_failed; 192 1.1 ryo }; 193 1.1 ryo 194 1.1 ryo struct vmxnet3_txqueue { 195 1.1 ryo kmutex_t *vxtxq_mtx; 196 1.1 ryo struct vmxnet3_softc *vxtxq_sc; 197 1.1 ryo int vxtxq_watchdog; 198 1.1 ryo pcq_t *vxtxq_interq; 199 1.1 ryo struct vmxnet3_txring vxtxq_cmd_ring; 200 1.1 ryo struct vmxnet3_comp_ring vxtxq_comp_ring; 201 1.1 ryo struct vmxnet3_txq_stats vxtxq_stats; 202 1.1 ryo struct vmxnet3_txq_shared *vxtxq_ts; 203 1.1 ryo char vxtxq_name[16]; 204 1.1 ryo 205 1.1 ryo void *vxtxq_si; 206 1.1 ryo 207 1.1 ryo struct evcnt vxtxq_intr; 208 1.1 ryo struct evcnt vxtxq_defer; 209 1.1 ryo struct evcnt vxtxq_deferreq; 210 1.1 ryo struct evcnt vxtxq_pcqdrop; 211 1.1 ryo struct evcnt vxtxq_transmitdef; 212 1.1 ryo struct evcnt vxtxq_watchdogto; 213 1.1 ryo struct evcnt vxtxq_defragged; 214 1.1 ryo struct evcnt vxtxq_defrag_failed; 215 1.12 riastrad 216 1.12 riastrad bool vxtxq_stopping; 217 1.1 ryo }; 218 1.1 ryo 219 1.1 ryo 220 1.1 ryo struct vmxnet3_rxqueue { 221 1.1 ryo kmutex_t *vxrxq_mtx; 222 1.1 ryo struct vmxnet3_softc *vxrxq_sc; 223 1.1 ryo struct mbuf *vxrxq_mhead; 224 1.1 ryo struct mbuf *vxrxq_mtail; 225 1.1 ryo struct vmxnet3_rxring vxrxq_cmd_ring[VMXNET3_RXRINGS_PERQ]; 226 1.1 ryo struct vmxnet3_comp_ring vxrxq_comp_ring; 227 1.1 ryo struct vmxnet3_rxq_shared *vxrxq_rs; 228 1.1 ryo char vxrxq_name[16]; 229 1.1 ryo 230 1.1 ryo struct evcnt vxrxq_intr; 231 1.1 ryo struct evcnt vxrxq_defer; 232 1.1 ryo struct evcnt vxrxq_deferreq; 233 1.1 ryo struct evcnt vxrxq_mgetcl_failed; 234 1.1 ryo struct evcnt vxrxq_mbuf_load_failed; 235 1.12 riastrad 236 1.12 riastrad bool vxrxq_stopping; 237 1.1 ryo }; 238 1.1 ryo 239 1.1 ryo struct vmxnet3_queue { 240 1.1 ryo int vxq_id; 241 1.1 ryo int vxq_intr_idx; 242 1.1 ryo 243 1.1 ryo struct vmxnet3_txqueue vxq_txqueue; 244 1.1 ryo struct vmxnet3_rxqueue vxq_rxqueue; 245 1.1 ryo 246 1.1 ryo void *vxq_si; 247 1.1 ryo bool vxq_workqueue; 248 1.5 knakahar bool vxq_wq_enqueued; 249 1.1 ryo struct work vxq_wq_cookie; 250 1.1 ryo }; 251 1.1 ryo 252 1.1 ryo struct vmxnet3_softc { 253 1.1 ryo device_t vmx_dev; 254 1.1 ryo struct ethercom vmx_ethercom; 255 1.1 ryo struct ifmedia vmx_media; 256 1.1 ryo struct vmxnet3_driver_shared *vmx_ds; 257 1.1 ryo int vmx_flags; 258 1.1 ryo #define VMXNET3_FLAG_NO_MSIX (1 << 0) 259 1.1 ryo #define VMXNET3_FLAG_RSS (1 << 1) 260 1.1 ryo #define VMXNET3_FLAG_ATTACHED (1 << 2) 261 1.1 ryo 262 1.1 ryo struct vmxnet3_queue *vmx_queue; 263 1.1 ryo 264 1.1 ryo struct pci_attach_args *vmx_pa; 265 1.1 ryo pci_chipset_tag_t vmx_pc; 266 1.1 ryo 267 1.1 ryo bus_space_tag_t vmx_iot0; 268 1.1 ryo bus_space_tag_t vmx_iot1; 269 1.1 ryo bus_space_handle_t vmx_ioh0; 270 1.1 ryo bus_space_handle_t vmx_ioh1; 271 1.1 ryo bus_size_t vmx_ios0; 272 1.1 ryo bus_size_t vmx_ios1; 273 1.1 ryo bus_dma_tag_t vmx_dmat; 274 1.1 ryo 275 1.1 ryo int vmx_link_active; 276 1.1 ryo int vmx_ntxqueues; 277 1.1 ryo int vmx_nrxqueues; 278 1.1 ryo int vmx_ntxdescs; 279 1.1 ryo int vmx_nrxdescs; 280 1.1 ryo int vmx_max_rxsegs; 281 1.1 ryo 282 1.1 ryo struct evcnt vmx_event_intr; 283 1.1 ryo struct evcnt vmx_event_link; 284 1.1 ryo struct evcnt vmx_event_txqerror; 285 1.1 ryo struct evcnt vmx_event_rxqerror; 286 1.1 ryo struct evcnt vmx_event_dic; 287 1.1 ryo struct evcnt vmx_event_debug; 288 1.1 ryo 289 1.1 ryo int vmx_intr_type; 290 1.1 ryo int vmx_intr_mask_mode; 291 1.1 ryo int vmx_event_intr_idx; 292 1.1 ryo int vmx_nintrs; 293 1.1 ryo pci_intr_handle_t *vmx_intrs; /* legacy use vmx_intrs[0] */ 294 1.1 ryo void *vmx_ihs[VMXNET3_MAX_INTRS]; 295 1.1 ryo 296 1.1 ryo kmutex_t *vmx_mtx; 297 1.1 ryo 298 1.12 riastrad int vmx_if_flags; 299 1.12 riastrad bool vmx_promisc; 300 1.12 riastrad bool vmx_mcastactive; 301 1.1 ryo uint8_t *vmx_mcast; 302 1.1 ryo void *vmx_qs; 303 1.1 ryo struct vmxnet3_rss_shared *vmx_rss; 304 1.1 ryo callout_t vmx_tick; 305 1.1 ryo struct vmxnet3_dma_alloc vmx_ds_dma; 306 1.1 ryo struct vmxnet3_dma_alloc vmx_qs_dma; 307 1.1 ryo struct vmxnet3_dma_alloc vmx_mcast_dma; 308 1.1 ryo struct vmxnet3_dma_alloc vmx_rss_dma; 309 1.1 ryo int vmx_max_ntxqueues; 310 1.1 ryo int vmx_max_nrxqueues; 311 1.1 ryo uint8_t vmx_lladdr[ETHER_ADDR_LEN]; 312 1.1 ryo 313 1.1 ryo u_int vmx_rx_intr_process_limit; 314 1.1 ryo u_int vmx_tx_intr_process_limit; 315 1.1 ryo u_int vmx_rx_process_limit; 316 1.1 ryo u_int vmx_tx_process_limit; 317 1.1 ryo struct sysctllog *vmx_sysctllog; 318 1.1 ryo 319 1.1 ryo bool vmx_txrx_workqueue; 320 1.1 ryo struct workqueue *vmx_queue_wq; 321 1.12 riastrad 322 1.12 riastrad struct workqueue *vmx_reset_wq; 323 1.12 riastrad struct work vmx_reset_work; 324 1.12 riastrad bool vmx_reset_pending; 325 1.1 ryo }; 326 1.1 ryo 327 1.1 ryo #define VMXNET3_STAT 328 1.1 ryo 329 1.1 ryo #ifdef VMXNET3_STAT 330 1.1 ryo struct { 331 1.1 ryo u_int txhead; 332 1.1 ryo u_int txdone; 333 1.1 ryo u_int maxtxlen; 334 1.1 ryo u_int rxdone; 335 1.1 ryo u_int rxfill; 336 1.1 ryo u_int intr; 337 1.1 ryo } vmxstat; 338 1.1 ryo #endif 339 1.1 ryo 340 1.1 ryo typedef enum { 341 1.1 ryo VMXNET3_BARRIER_RD, 342 1.1 ryo VMXNET3_BARRIER_WR, 343 1.1 ryo } vmxnet3_barrier_t; 344 1.1 ryo 345 1.1 ryo #define JUMBO_LEN (MCLBYTES - ETHER_ALIGN) /* XXX */ 346 1.1 ryo #define DMAADDR(map) ((map)->dm_segs[0].ds_addr) 347 1.1 ryo 348 1.1 ryo #define vtophys(va) 0 /* XXX ok? */ 349 1.1 ryo 350 1.1 ryo static int vmxnet3_match(device_t, cfdata_t, void *); 351 1.1 ryo static void vmxnet3_attach(device_t, device_t, void *); 352 1.1 ryo static int vmxnet3_detach(device_t, int); 353 1.1 ryo 354 1.1 ryo static int vmxnet3_alloc_pci_resources(struct vmxnet3_softc *); 355 1.1 ryo static void vmxnet3_free_pci_resources(struct vmxnet3_softc *); 356 1.1 ryo static int vmxnet3_check_version(struct vmxnet3_softc *); 357 1.1 ryo static void vmxnet3_check_multiqueue(struct vmxnet3_softc *); 358 1.1 ryo 359 1.1 ryo static int vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *); 360 1.1 ryo static int vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *); 361 1.1 ryo static int vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *); 362 1.1 ryo static int vmxnet3_alloc_interrupts(struct vmxnet3_softc *); 363 1.1 ryo static void vmxnet3_free_interrupts(struct vmxnet3_softc *); 364 1.1 ryo 365 1.1 ryo static int vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *); 366 1.1 ryo static int vmxnet3_setup_msi_interrupt(struct vmxnet3_softc *); 367 1.1 ryo static int vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *); 368 1.1 ryo static void vmxnet3_set_interrupt_idx(struct vmxnet3_softc *); 369 1.1 ryo static int vmxnet3_setup_interrupts(struct vmxnet3_softc *); 370 1.1 ryo static int vmxnet3_setup_sysctl(struct vmxnet3_softc *); 371 1.1 ryo 372 1.1 ryo static int vmxnet3_setup_stats(struct vmxnet3_softc *); 373 1.1 ryo static void vmxnet3_teardown_stats(struct vmxnet3_softc *); 374 1.1 ryo 375 1.1 ryo static int vmxnet3_init_rxq(struct vmxnet3_softc *, int); 376 1.1 ryo static int vmxnet3_init_txq(struct vmxnet3_softc *, int); 377 1.1 ryo static int vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *); 378 1.1 ryo static void vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *); 379 1.1 ryo static void vmxnet3_destroy_txq(struct vmxnet3_txqueue *); 380 1.1 ryo static void vmxnet3_free_rxtx_queues(struct vmxnet3_softc *); 381 1.1 ryo 382 1.1 ryo static int vmxnet3_alloc_shared_data(struct vmxnet3_softc *); 383 1.1 ryo static void vmxnet3_free_shared_data(struct vmxnet3_softc *); 384 1.1 ryo static int vmxnet3_alloc_txq_data(struct vmxnet3_softc *); 385 1.1 ryo static void vmxnet3_free_txq_data(struct vmxnet3_softc *); 386 1.1 ryo static int vmxnet3_alloc_rxq_data(struct vmxnet3_softc *); 387 1.1 ryo static void vmxnet3_free_rxq_data(struct vmxnet3_softc *); 388 1.1 ryo static int vmxnet3_alloc_queue_data(struct vmxnet3_softc *); 389 1.1 ryo static void vmxnet3_free_queue_data(struct vmxnet3_softc *); 390 1.1 ryo static int vmxnet3_alloc_mcast_table(struct vmxnet3_softc *); 391 1.1 ryo static void vmxnet3_free_mcast_table(struct vmxnet3_softc *); 392 1.1 ryo static void vmxnet3_init_shared_data(struct vmxnet3_softc *); 393 1.1 ryo static void vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *); 394 1.1 ryo static void vmxnet3_reinit_shared_data(struct vmxnet3_softc *); 395 1.1 ryo static int vmxnet3_alloc_data(struct vmxnet3_softc *); 396 1.1 ryo static void vmxnet3_free_data(struct vmxnet3_softc *); 397 1.1 ryo static int vmxnet3_setup_interface(struct vmxnet3_softc *); 398 1.1 ryo 399 1.1 ryo static void vmxnet3_evintr(struct vmxnet3_softc *); 400 1.1 ryo static bool vmxnet3_txq_eof(struct vmxnet3_txqueue *, u_int); 401 1.1 ryo static int vmxnet3_newbuf(struct vmxnet3_softc *, struct vmxnet3_rxqueue *, 402 1.1 ryo struct vmxnet3_rxring *); 403 1.1 ryo static void vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *, 404 1.1 ryo struct vmxnet3_rxring *, int); 405 1.1 ryo static void vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *); 406 1.1 ryo static void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *); 407 1.1 ryo static void vmxnet3_rxq_input(struct vmxnet3_rxqueue *, 408 1.1 ryo struct vmxnet3_rxcompdesc *, struct mbuf *); 409 1.1 ryo static bool vmxnet3_rxq_eof(struct vmxnet3_rxqueue *, u_int); 410 1.1 ryo static int vmxnet3_legacy_intr(void *); 411 1.1 ryo static int vmxnet3_txrxq_intr(void *); 412 1.1 ryo static void vmxnet3_handle_queue(void *); 413 1.1 ryo static void vmxnet3_handle_queue_work(struct work *, void *); 414 1.1 ryo static int vmxnet3_event_intr(void *); 415 1.1 ryo 416 1.1 ryo static void vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *); 417 1.1 ryo static void vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *); 418 1.1 ryo static void vmxnet3_stop_locked(struct vmxnet3_softc *); 419 1.1 ryo static void vmxnet3_stop_rendezvous(struct vmxnet3_softc *); 420 1.1 ryo static void vmxnet3_stop(struct ifnet *, int); 421 1.1 ryo 422 1.1 ryo static void vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *); 423 1.1 ryo static int vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *); 424 1.1 ryo static int vmxnet3_reinit_queues(struct vmxnet3_softc *); 425 1.1 ryo static int vmxnet3_enable_device(struct vmxnet3_softc *); 426 1.1 ryo static void vmxnet3_reinit_rxfilters(struct vmxnet3_softc *); 427 1.1 ryo static int vmxnet3_reinit(struct vmxnet3_softc *); 428 1.1 ryo 429 1.1 ryo static int vmxnet3_init_locked(struct vmxnet3_softc *); 430 1.1 ryo static int vmxnet3_init(struct ifnet *); 431 1.1 ryo 432 1.1 ryo static int vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *, struct mbuf *, int *, int *); 433 1.1 ryo static int vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *, struct mbuf **, bus_dmamap_t); 434 1.1 ryo static void vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *, bus_dmamap_t); 435 1.1 ryo static int vmxnet3_txq_encap(struct vmxnet3_txqueue *, struct mbuf **); 436 1.1 ryo static void vmxnet3_start_locked(struct ifnet *); 437 1.1 ryo static void vmxnet3_start(struct ifnet *); 438 1.1 ryo static void vmxnet3_transmit_locked(struct ifnet *, struct vmxnet3_txqueue *); 439 1.1 ryo static int vmxnet3_transmit(struct ifnet *, struct mbuf *); 440 1.1 ryo static void vmxnet3_deferred_transmit(void *); 441 1.1 ryo 442 1.1 ryo static void vmxnet3_set_rxfilter(struct vmxnet3_softc *); 443 1.1 ryo static int vmxnet3_ioctl(struct ifnet *, u_long, void *); 444 1.1 ryo static int vmxnet3_ifflags_cb(struct ethercom *); 445 1.1 ryo 446 1.1 ryo static int vmxnet3_watchdog(struct vmxnet3_txqueue *); 447 1.1 ryo static void vmxnet3_refresh_host_stats(struct vmxnet3_softc *); 448 1.1 ryo static void vmxnet3_tick(void *); 449 1.12 riastrad static void vmxnet3_reset_work(struct work *, void *); 450 1.1 ryo static void vmxnet3_if_link_status(struct vmxnet3_softc *); 451 1.1 ryo static bool vmxnet3_cmd_link_status(struct ifnet *); 452 1.1 ryo static void vmxnet3_ifmedia_status(struct ifnet *, struct ifmediareq *); 453 1.1 ryo static int vmxnet3_ifmedia_change(struct ifnet *); 454 1.1 ryo static void vmxnet3_set_lladdr(struct vmxnet3_softc *); 455 1.1 ryo static void vmxnet3_get_lladdr(struct vmxnet3_softc *); 456 1.1 ryo 457 1.1 ryo static void vmxnet3_enable_all_intrs(struct vmxnet3_softc *); 458 1.1 ryo static void vmxnet3_disable_all_intrs(struct vmxnet3_softc *); 459 1.1 ryo 460 1.1 ryo static int vmxnet3_dma_malloc(struct vmxnet3_softc *, bus_size_t, bus_size_t, 461 1.1 ryo struct vmxnet3_dma_alloc *); 462 1.1 ryo static void vmxnet3_dma_free(struct vmxnet3_softc *, struct vmxnet3_dma_alloc *); 463 1.1 ryo 464 1.1 ryo CFATTACH_DECL3_NEW(vmx, sizeof(struct vmxnet3_softc), 465 1.1 ryo vmxnet3_match, vmxnet3_attach, vmxnet3_detach, NULL, NULL, NULL, 0); 466 1.1 ryo 467 1.1 ryo /* round down to the nearest power of 2 */ 468 1.1 ryo static int 469 1.1 ryo vmxnet3_calc_queue_size(int n) 470 1.1 ryo { 471 1.1 ryo 472 1.1 ryo if (__predict_false(n <= 0)) 473 1.1 ryo return 1; 474 1.1 ryo 475 1.1 ryo return (1U << (fls32(n) - 1)); 476 1.1 ryo } 477 1.1 ryo 478 1.1 ryo static inline void 479 1.1 ryo vmxnet3_write_bar0(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v) 480 1.1 ryo { 481 1.1 ryo 482 1.1 ryo bus_space_write_4(sc->vmx_iot0, sc->vmx_ioh0, r, v); 483 1.1 ryo } 484 1.1 ryo 485 1.1 ryo static inline uint32_t 486 1.1 ryo vmxnet3_read_bar1(struct vmxnet3_softc *sc, bus_size_t r) 487 1.1 ryo { 488 1.1 ryo 489 1.1 ryo return (bus_space_read_4(sc->vmx_iot1, sc->vmx_ioh1, r)); 490 1.1 ryo } 491 1.1 ryo 492 1.1 ryo static inline void 493 1.1 ryo vmxnet3_write_bar1(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v) 494 1.1 ryo { 495 1.1 ryo 496 1.1 ryo bus_space_write_4(sc->vmx_iot1, sc->vmx_ioh1, r, v); 497 1.1 ryo } 498 1.1 ryo 499 1.1 ryo static inline void 500 1.1 ryo vmxnet3_write_cmd(struct vmxnet3_softc *sc, uint32_t cmd) 501 1.1 ryo { 502 1.1 ryo 503 1.1 ryo vmxnet3_write_bar1(sc, VMXNET3_BAR1_CMD, cmd); 504 1.1 ryo } 505 1.1 ryo 506 1.1 ryo static inline uint32_t 507 1.1 ryo vmxnet3_read_cmd(struct vmxnet3_softc *sc, uint32_t cmd) 508 1.1 ryo { 509 1.1 ryo 510 1.1 ryo vmxnet3_write_cmd(sc, cmd); 511 1.1 ryo return (vmxnet3_read_bar1(sc, VMXNET3_BAR1_CMD)); 512 1.1 ryo } 513 1.1 ryo 514 1.1 ryo static inline void 515 1.1 ryo vmxnet3_enable_intr(struct vmxnet3_softc *sc, int irq) 516 1.1 ryo { 517 1.1 ryo vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 0); 518 1.1 ryo } 519 1.1 ryo 520 1.1 ryo static inline void 521 1.1 ryo vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq) 522 1.1 ryo { 523 1.1 ryo vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 1); 524 1.1 ryo } 525 1.1 ryo 526 1.1 ryo static inline void 527 1.1 ryo vmxnet3_rxr_increment_fill(struct vmxnet3_rxring *rxr) 528 1.1 ryo { 529 1.1 ryo 530 1.1 ryo if (++rxr->vxrxr_fill == rxr->vxrxr_ndesc) { 531 1.1 ryo rxr->vxrxr_fill = 0; 532 1.1 ryo rxr->vxrxr_gen ^= 1; 533 1.1 ryo } 534 1.1 ryo } 535 1.1 ryo 536 1.1 ryo static inline int 537 1.1 ryo vmxnet3_txring_avail(struct vmxnet3_txring *txr) 538 1.1 ryo { 539 1.1 ryo int avail = txr->vxtxr_next - txr->vxtxr_head - 1; 540 1.2 ryo return (avail < 0 ? (int)txr->vxtxr_ndesc + avail : avail); 541 1.1 ryo } 542 1.1 ryo 543 1.1 ryo /* 544 1.1 ryo * Since this is a purely paravirtualized device, we do not have 545 1.1 ryo * to worry about DMA coherency. But at times, we must make sure 546 1.1 ryo * both the compiler and CPU do not reorder memory operations. 547 1.1 ryo */ 548 1.1 ryo static inline void 549 1.1 ryo vmxnet3_barrier(struct vmxnet3_softc *sc, vmxnet3_barrier_t type) 550 1.1 ryo { 551 1.1 ryo 552 1.1 ryo switch (type) { 553 1.1 ryo case VMXNET3_BARRIER_RD: 554 1.1 ryo membar_consumer(); 555 1.1 ryo break; 556 1.1 ryo case VMXNET3_BARRIER_WR: 557 1.1 ryo membar_producer(); 558 1.1 ryo break; 559 1.1 ryo default: 560 1.1 ryo panic("%s: bad barrier type %d", __func__, type); 561 1.1 ryo } 562 1.1 ryo } 563 1.1 ryo 564 1.1 ryo static int 565 1.1 ryo vmxnet3_match(device_t parent, cfdata_t match, void *aux) 566 1.1 ryo { 567 1.1 ryo struct pci_attach_args *pa = (struct pci_attach_args *)aux; 568 1.1 ryo 569 1.1 ryo if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VMWARE && 570 1.1 ryo PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VMWARE_VMXNET3) 571 1.1 ryo return 1; 572 1.1 ryo 573 1.1 ryo return 0; 574 1.1 ryo } 575 1.1 ryo 576 1.1 ryo static void 577 1.1 ryo vmxnet3_attach(device_t parent, device_t self, void *aux) 578 1.1 ryo { 579 1.1 ryo struct vmxnet3_softc *sc = device_private(self); 580 1.1 ryo struct pci_attach_args *pa = aux; 581 1.1 ryo pcireg_t preg; 582 1.1 ryo int error; 583 1.1 ryo int candidate; 584 1.1 ryo 585 1.1 ryo sc->vmx_dev = self; 586 1.1 ryo sc->vmx_pa = pa; 587 1.1 ryo sc->vmx_pc = pa->pa_pc; 588 1.1 ryo if (pci_dma64_available(pa)) 589 1.1 ryo sc->vmx_dmat = pa->pa_dmat64; 590 1.1 ryo else 591 1.1 ryo sc->vmx_dmat = pa->pa_dmat; 592 1.1 ryo 593 1.1 ryo pci_aprint_devinfo_fancy(pa, "Ethernet controller", "vmxnet3", 1); 594 1.1 ryo 595 1.1 ryo preg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 596 1.1 ryo preg |= PCI_COMMAND_MASTER_ENABLE; 597 1.1 ryo pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg); 598 1.1 ryo 599 1.1 ryo sc->vmx_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 600 1.1 ryo callout_init(&sc->vmx_tick, CALLOUT_MPSAFE); 601 1.1 ryo 602 1.1 ryo candidate = MIN(MIN(VMXNET3_MAX_TX_QUEUES, VMXNET3_MAX_RX_QUEUES), 603 1.1 ryo ncpu); 604 1.1 ryo sc->vmx_max_ntxqueues = sc->vmx_max_nrxqueues = 605 1.1 ryo vmxnet3_calc_queue_size(candidate); 606 1.1 ryo sc->vmx_ntxdescs = 512; 607 1.1 ryo sc->vmx_nrxdescs = 256; 608 1.1 ryo sc->vmx_max_rxsegs = VMXNET3_MAX_RX_SEGS; 609 1.1 ryo 610 1.1 ryo error = vmxnet3_alloc_pci_resources(sc); 611 1.1 ryo if (error) 612 1.1 ryo return; 613 1.1 ryo 614 1.1 ryo error = vmxnet3_check_version(sc); 615 1.1 ryo if (error) 616 1.1 ryo return; 617 1.1 ryo 618 1.1 ryo error = vmxnet3_alloc_rxtx_queues(sc); 619 1.1 ryo if (error) 620 1.1 ryo return; 621 1.1 ryo 622 1.1 ryo error = vmxnet3_alloc_interrupts(sc); 623 1.1 ryo if (error) 624 1.1 ryo return; 625 1.1 ryo 626 1.1 ryo vmxnet3_check_multiqueue(sc); 627 1.1 ryo 628 1.1 ryo error = vmxnet3_alloc_data(sc); 629 1.1 ryo if (error) 630 1.1 ryo return; 631 1.1 ryo 632 1.1 ryo error = vmxnet3_setup_interface(sc); 633 1.1 ryo if (error) 634 1.1 ryo return; 635 1.1 ryo 636 1.1 ryo error = vmxnet3_setup_interrupts(sc); 637 1.1 ryo if (error) 638 1.1 ryo return; 639 1.1 ryo 640 1.1 ryo error = vmxnet3_setup_sysctl(sc); 641 1.1 ryo if (error) 642 1.1 ryo return; 643 1.1 ryo 644 1.1 ryo error = vmxnet3_setup_stats(sc); 645 1.1 ryo if (error) 646 1.1 ryo return; 647 1.1 ryo 648 1.12 riastrad char buf[128]; 649 1.12 riastrad snprintf(buf, sizeof(buf), "%s_reset", device_xname(sc->vmx_dev)); 650 1.12 riastrad error = workqueue_create(&sc->vmx_reset_wq, "%s_reset", 651 1.12 riastrad vmxnet3_reset_work, sc, VMXNET3_WORKQUEUE_PRI, IPL_SOFTCLOCK, 652 1.12 riastrad WQ_MPSAFE); 653 1.12 riastrad if (error) { 654 1.12 riastrad aprint_error_dev(sc->vmx_dev, 655 1.12 riastrad "failed to create reset workqueue: %d\n", 656 1.12 riastrad error); 657 1.12 riastrad return; 658 1.12 riastrad } 659 1.12 riastrad 660 1.1 ryo sc->vmx_flags |= VMXNET3_FLAG_ATTACHED; 661 1.1 ryo } 662 1.1 ryo 663 1.1 ryo static int 664 1.1 ryo vmxnet3_detach(device_t self, int flags) 665 1.1 ryo { 666 1.1 ryo struct vmxnet3_softc *sc; 667 1.1 ryo struct ifnet *ifp; 668 1.1 ryo 669 1.1 ryo sc = device_private(self); 670 1.1 ryo ifp = &sc->vmx_ethercom.ec_if; 671 1.1 ryo 672 1.1 ryo if (sc->vmx_flags & VMXNET3_FLAG_ATTACHED) { 673 1.1 ryo VMXNET3_CORE_LOCK(sc); 674 1.1 ryo vmxnet3_stop_locked(sc); 675 1.1 ryo callout_halt(&sc->vmx_tick, sc->vmx_mtx); 676 1.1 ryo callout_destroy(&sc->vmx_tick); 677 1.1 ryo VMXNET3_CORE_UNLOCK(sc); 678 1.1 ryo 679 1.1 ryo ether_ifdetach(ifp); 680 1.1 ryo if_detach(ifp); 681 1.1 ryo ifmedia_fini(&sc->vmx_media); 682 1.1 ryo } 683 1.1 ryo 684 1.1 ryo vmxnet3_teardown_stats(sc); 685 1.1 ryo sysctl_teardown(&sc->vmx_sysctllog); 686 1.1 ryo 687 1.1 ryo vmxnet3_free_interrupts(sc); 688 1.1 ryo 689 1.1 ryo vmxnet3_free_data(sc); 690 1.1 ryo vmxnet3_free_pci_resources(sc); 691 1.1 ryo vmxnet3_free_rxtx_queues(sc); 692 1.1 ryo 693 1.1 ryo if (sc->vmx_mtx) 694 1.1 ryo mutex_obj_free(sc->vmx_mtx); 695 1.1 ryo 696 1.1 ryo return (0); 697 1.1 ryo } 698 1.1 ryo 699 1.1 ryo static int 700 1.1 ryo vmxnet3_alloc_pci_resources(struct vmxnet3_softc *sc) 701 1.1 ryo { 702 1.1 ryo struct pci_attach_args *pa = sc->vmx_pa; 703 1.1 ryo pcireg_t memtype; 704 1.1 ryo 705 1.1 ryo memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0)); 706 1.1 ryo if (pci_mapreg_map(pa, PCI_BAR(0), memtype, 0, &sc->vmx_iot0, &sc->vmx_ioh0, 707 1.1 ryo NULL, &sc->vmx_ios0)) { 708 1.1 ryo aprint_error_dev(sc->vmx_dev, "failed to map BAR0\n"); 709 1.1 ryo return (ENXIO); 710 1.1 ryo } 711 1.1 ryo memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(1)); 712 1.1 ryo if (pci_mapreg_map(pa, PCI_BAR(1), memtype, 0, &sc->vmx_iot1, &sc->vmx_ioh1, 713 1.1 ryo NULL, &sc->vmx_ios1)) { 714 1.1 ryo aprint_error_dev(sc->vmx_dev, "failed to map BAR1\n"); 715 1.1 ryo return (ENXIO); 716 1.1 ryo } 717 1.1 ryo 718 1.1 ryo if (!pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, NULL, NULL)) { 719 1.1 ryo sc->vmx_flags |= VMXNET3_FLAG_NO_MSIX; 720 1.1 ryo return (0); 721 1.1 ryo } 722 1.1 ryo 723 1.1 ryo return (0); 724 1.1 ryo } 725 1.1 ryo 726 1.1 ryo static void 727 1.1 ryo vmxnet3_free_pci_resources(struct vmxnet3_softc *sc) 728 1.1 ryo { 729 1.1 ryo 730 1.1 ryo if (sc->vmx_ios0) { 731 1.1 ryo bus_space_unmap(sc->vmx_iot0, sc->vmx_ioh0, sc->vmx_ios0); 732 1.1 ryo sc->vmx_ios0 = 0; 733 1.1 ryo } 734 1.1 ryo 735 1.1 ryo if (sc->vmx_ios1) { 736 1.1 ryo bus_space_unmap(sc->vmx_iot1, sc->vmx_ioh1, sc->vmx_ios1); 737 1.1 ryo sc->vmx_ios1 = 0; 738 1.1 ryo } 739 1.1 ryo } 740 1.1 ryo 741 1.1 ryo static int 742 1.1 ryo vmxnet3_check_version(struct vmxnet3_softc *sc) 743 1.1 ryo { 744 1.1 ryo u_int ver; 745 1.1 ryo 746 1.1 ryo ver = vmxnet3_read_bar1(sc, VMXNET3_BAR1_VRRS); 747 1.1 ryo if ((ver & 0x1) == 0) { 748 1.1 ryo aprint_error_dev(sc->vmx_dev, 749 1.1 ryo "unsupported hardware version 0x%x\n", ver); 750 1.1 ryo return (ENOTSUP); 751 1.1 ryo } 752 1.1 ryo vmxnet3_write_bar1(sc, VMXNET3_BAR1_VRRS, 1); 753 1.1 ryo 754 1.1 ryo ver = vmxnet3_read_bar1(sc, VMXNET3_BAR1_UVRS); 755 1.1 ryo if ((ver & 0x1) == 0) { 756 1.1 ryo aprint_error_dev(sc->vmx_dev, 757 1.14 andvar "incompatible UPT version 0x%x\n", ver); 758 1.1 ryo return (ENOTSUP); 759 1.1 ryo } 760 1.1 ryo vmxnet3_write_bar1(sc, VMXNET3_BAR1_UVRS, 1); 761 1.1 ryo 762 1.1 ryo return (0); 763 1.1 ryo } 764 1.1 ryo 765 1.1 ryo static void 766 1.1 ryo vmxnet3_check_multiqueue(struct vmxnet3_softc *sc) 767 1.1 ryo { 768 1.1 ryo 769 1.1 ryo if (sc->vmx_intr_type != VMXNET3_IT_MSIX) 770 1.1 ryo goto out; 771 1.1 ryo 772 1.1 ryo /* Just use the maximum configured for now. */ 773 1.1 ryo sc->vmx_nrxqueues = sc->vmx_max_nrxqueues; 774 1.1 ryo sc->vmx_ntxqueues = sc->vmx_max_ntxqueues; 775 1.1 ryo 776 1.1 ryo if (sc->vmx_nrxqueues > 1) 777 1.1 ryo sc->vmx_flags |= VMXNET3_FLAG_RSS; 778 1.1 ryo 779 1.1 ryo return; 780 1.1 ryo 781 1.1 ryo out: 782 1.1 ryo sc->vmx_ntxqueues = 1; 783 1.1 ryo sc->vmx_nrxqueues = 1; 784 1.1 ryo } 785 1.1 ryo 786 1.1 ryo static int 787 1.1 ryo vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *sc) 788 1.1 ryo { 789 1.1 ryo int required; 790 1.1 ryo struct pci_attach_args *pa = sc->vmx_pa; 791 1.1 ryo 792 1.1 ryo if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX) 793 1.1 ryo return (1); 794 1.1 ryo 795 1.1 ryo /* Allocate an additional vector for the events interrupt. */ 796 1.1 ryo required = MIN(sc->vmx_max_ntxqueues, sc->vmx_max_nrxqueues) + 1; 797 1.1 ryo 798 1.1 ryo if (pci_msix_count(pa->pa_pc, pa->pa_tag) < required) 799 1.1 ryo return (1); 800 1.1 ryo 801 1.1 ryo if (pci_msix_alloc_exact(pa, &sc->vmx_intrs, required) == 0) { 802 1.1 ryo sc->vmx_nintrs = required; 803 1.1 ryo return (0); 804 1.1 ryo } 805 1.1 ryo 806 1.1 ryo return (1); 807 1.1 ryo } 808 1.1 ryo 809 1.1 ryo static int 810 1.1 ryo vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *sc) 811 1.1 ryo { 812 1.1 ryo int nmsi, required; 813 1.1 ryo struct pci_attach_args *pa = sc->vmx_pa; 814 1.1 ryo 815 1.1 ryo required = 1; 816 1.1 ryo 817 1.1 ryo nmsi = pci_msi_count(pa->pa_pc, pa->pa_tag); 818 1.1 ryo if (nmsi < required) 819 1.1 ryo return (1); 820 1.1 ryo 821 1.1 ryo if (pci_msi_alloc_exact(pa, &sc->vmx_intrs, required) == 0) { 822 1.1 ryo sc->vmx_nintrs = required; 823 1.1 ryo return (0); 824 1.1 ryo } 825 1.1 ryo 826 1.1 ryo return (1); 827 1.1 ryo } 828 1.1 ryo 829 1.1 ryo static int 830 1.1 ryo vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *sc) 831 1.1 ryo { 832 1.1 ryo 833 1.1 ryo if (pci_intx_alloc(sc->vmx_pa, &sc->vmx_intrs) == 0) { 834 1.1 ryo sc->vmx_nintrs = 1; 835 1.1 ryo return (0); 836 1.1 ryo } 837 1.1 ryo 838 1.1 ryo return (1); 839 1.1 ryo } 840 1.1 ryo 841 1.1 ryo static int 842 1.1 ryo vmxnet3_alloc_interrupts(struct vmxnet3_softc *sc) 843 1.1 ryo { 844 1.1 ryo u_int config; 845 1.1 ryo int error; 846 1.1 ryo 847 1.1 ryo config = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_INTRCFG); 848 1.1 ryo 849 1.1 ryo sc->vmx_intr_type = config & 0x03; 850 1.1 ryo sc->vmx_intr_mask_mode = (config >> 2) & 0x03; 851 1.1 ryo 852 1.1 ryo switch (sc->vmx_intr_type) { 853 1.1 ryo case VMXNET3_IT_AUTO: 854 1.1 ryo sc->vmx_intr_type = VMXNET3_IT_MSIX; 855 1.1 ryo /* FALLTHROUGH */ 856 1.1 ryo case VMXNET3_IT_MSIX: 857 1.1 ryo error = vmxnet3_alloc_msix_interrupts(sc); 858 1.1 ryo if (error == 0) 859 1.1 ryo break; 860 1.1 ryo sc->vmx_intr_type = VMXNET3_IT_MSI; 861 1.1 ryo /* FALLTHROUGH */ 862 1.1 ryo case VMXNET3_IT_MSI: 863 1.1 ryo error = vmxnet3_alloc_msi_interrupts(sc); 864 1.1 ryo if (error == 0) 865 1.1 ryo break; 866 1.1 ryo sc->vmx_intr_type = VMXNET3_IT_LEGACY; 867 1.1 ryo /* FALLTHROUGH */ 868 1.1 ryo case VMXNET3_IT_LEGACY: 869 1.1 ryo error = vmxnet3_alloc_legacy_interrupts(sc); 870 1.1 ryo if (error == 0) 871 1.1 ryo break; 872 1.1 ryo /* FALLTHROUGH */ 873 1.1 ryo default: 874 1.1 ryo sc->vmx_intr_type = -1; 875 1.1 ryo aprint_error_dev(sc->vmx_dev, "cannot allocate any interrupt resources\n"); 876 1.1 ryo return (ENXIO); 877 1.1 ryo } 878 1.1 ryo 879 1.1 ryo return (error); 880 1.1 ryo } 881 1.1 ryo 882 1.1 ryo static void 883 1.1 ryo vmxnet3_free_interrupts(struct vmxnet3_softc *sc) 884 1.1 ryo { 885 1.1 ryo pci_chipset_tag_t pc = sc->vmx_pc; 886 1.1 ryo int i; 887 1.1 ryo 888 1.1 ryo workqueue_destroy(sc->vmx_queue_wq); 889 1.1 ryo for (i = 0; i < sc->vmx_ntxqueues; i++) { 890 1.1 ryo struct vmxnet3_queue *vmxq = &sc->vmx_queue[i]; 891 1.1 ryo 892 1.1 ryo softint_disestablish(vmxq->vxq_si); 893 1.1 ryo vmxq->vxq_si = NULL; 894 1.1 ryo } 895 1.1 ryo for (i = 0; i < sc->vmx_nintrs; i++) { 896 1.1 ryo pci_intr_disestablish(pc, sc->vmx_ihs[i]); 897 1.1 ryo } 898 1.1 ryo pci_intr_release(pc, sc->vmx_intrs, sc->vmx_nintrs); 899 1.1 ryo } 900 1.1 ryo 901 1.1 ryo static int 902 1.1 ryo vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *sc) 903 1.1 ryo { 904 1.1 ryo pci_chipset_tag_t pc = sc->vmx_pa->pa_pc; 905 1.1 ryo struct vmxnet3_queue *vmxq; 906 1.1 ryo pci_intr_handle_t *intr; 907 1.1 ryo void **ihs; 908 1.1 ryo int intr_idx, i, use_queues, error; 909 1.1 ryo kcpuset_t *affinity; 910 1.1 ryo const char *intrstr; 911 1.1 ryo char intrbuf[PCI_INTRSTR_LEN]; 912 1.1 ryo char xnamebuf[32]; 913 1.1 ryo 914 1.1 ryo intr = sc->vmx_intrs; 915 1.1 ryo intr_idx = 0; 916 1.1 ryo ihs = sc->vmx_ihs; 917 1.1 ryo 918 1.1 ryo /* See vmxnet3_alloc_msix_interrupts() */ 919 1.1 ryo use_queues = MIN(sc->vmx_max_ntxqueues, sc->vmx_max_nrxqueues); 920 1.1 ryo for (i = 0; i < use_queues; i++, intr++, ihs++, intr_idx++) { 921 1.1 ryo snprintf(xnamebuf, 32, "%s: txrx %d", device_xname(sc->vmx_dev), i); 922 1.1 ryo 923 1.1 ryo vmxq = &sc->vmx_queue[i]; 924 1.1 ryo 925 1.1 ryo intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf)); 926 1.1 ryo 927 1.1 ryo pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true); 928 1.1 ryo *ihs = pci_intr_establish_xname(pc, *intr, IPL_NET, 929 1.1 ryo vmxnet3_txrxq_intr, vmxq, xnamebuf); 930 1.1 ryo if (*ihs == NULL) { 931 1.1 ryo aprint_error_dev(sc->vmx_dev, 932 1.1 ryo "unable to establish txrx interrupt at %s\n", intrstr); 933 1.1 ryo return (-1); 934 1.1 ryo } 935 1.1 ryo aprint_normal_dev(sc->vmx_dev, "txrx interrupting at %s\n", intrstr); 936 1.1 ryo 937 1.1 ryo kcpuset_create(&affinity, true); 938 1.1 ryo kcpuset_set(affinity, intr_idx % ncpu); 939 1.1 ryo error = interrupt_distribute(*ihs, affinity, NULL); 940 1.1 ryo if (error) { 941 1.1 ryo aprint_normal_dev(sc->vmx_dev, 942 1.1 ryo "%s cannot be changed affinity, use default CPU\n", 943 1.1 ryo intrstr); 944 1.1 ryo } 945 1.1 ryo kcpuset_destroy(affinity); 946 1.1 ryo 947 1.1 ryo vmxq->vxq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 948 1.1 ryo vmxnet3_handle_queue, vmxq); 949 1.1 ryo if (vmxq->vxq_si == NULL) { 950 1.1 ryo aprint_error_dev(sc->vmx_dev, 951 1.1 ryo "softint_establish for vxq_si failed\n"); 952 1.1 ryo return (-1); 953 1.1 ryo } 954 1.1 ryo 955 1.1 ryo vmxq->vxq_intr_idx = intr_idx; 956 1.1 ryo } 957 1.1 ryo snprintf(xnamebuf, MAXCOMLEN, "%s_tx_rx", device_xname(sc->vmx_dev)); 958 1.1 ryo error = workqueue_create(&sc->vmx_queue_wq, xnamebuf, 959 1.1 ryo vmxnet3_handle_queue_work, sc, VMXNET3_WORKQUEUE_PRI, IPL_NET, 960 1.1 ryo WQ_PERCPU | WQ_MPSAFE); 961 1.1 ryo if (error) { 962 1.1 ryo aprint_error_dev(sc->vmx_dev, "workqueue_create failed\n"); 963 1.1 ryo return (-1); 964 1.1 ryo } 965 1.1 ryo sc->vmx_txrx_workqueue = false; 966 1.1 ryo 967 1.1 ryo intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf)); 968 1.1 ryo 969 1.1 ryo snprintf(xnamebuf, 32, "%s: link", device_xname(sc->vmx_dev)); 970 1.1 ryo pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true); 971 1.1 ryo *ihs = pci_intr_establish_xname(pc, *intr, IPL_NET, 972 1.1 ryo vmxnet3_event_intr, sc, xnamebuf); 973 1.1 ryo if (*ihs == NULL) { 974 1.1 ryo aprint_error_dev(sc->vmx_dev, 975 1.1 ryo "unable to establish event interrupt at %s\n", intrstr); 976 1.1 ryo return (-1); 977 1.1 ryo } 978 1.1 ryo aprint_normal_dev(sc->vmx_dev, "event interrupting at %s\n", intrstr); 979 1.1 ryo 980 1.1 ryo sc->vmx_event_intr_idx = intr_idx; 981 1.1 ryo 982 1.1 ryo return (0); 983 1.1 ryo } 984 1.1 ryo 985 1.1 ryo static int 986 1.1 ryo vmxnet3_setup_msi_interrupt(struct vmxnet3_softc *sc) 987 1.1 ryo { 988 1.1 ryo pci_chipset_tag_t pc = sc->vmx_pa->pa_pc; 989 1.1 ryo pci_intr_handle_t *intr; 990 1.1 ryo void **ihs; 991 1.1 ryo struct vmxnet3_queue *vmxq; 992 1.1 ryo int i; 993 1.1 ryo const char *intrstr; 994 1.1 ryo char intrbuf[PCI_INTRSTR_LEN]; 995 1.1 ryo char xnamebuf[32]; 996 1.1 ryo 997 1.1 ryo intr = &sc->vmx_intrs[0]; 998 1.1 ryo ihs = sc->vmx_ihs; 999 1.1 ryo vmxq = &sc->vmx_queue[0]; 1000 1.1 ryo 1001 1.1 ryo intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf)); 1002 1.1 ryo 1003 1.1 ryo snprintf(xnamebuf, 32, "%s: msi", device_xname(sc->vmx_dev)); 1004 1.1 ryo pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true); 1005 1.1 ryo *ihs = pci_intr_establish_xname(pc, *intr, IPL_NET, 1006 1.1 ryo vmxnet3_legacy_intr, sc, xnamebuf); 1007 1.1 ryo if (*ihs == NULL) { 1008 1.1 ryo aprint_error_dev(sc->vmx_dev, 1009 1.1 ryo "unable to establish interrupt at %s\n", intrstr); 1010 1.1 ryo return (-1); 1011 1.1 ryo } 1012 1.1 ryo aprint_normal_dev(sc->vmx_dev, "interrupting at %s\n", intrstr); 1013 1.1 ryo 1014 1.1 ryo vmxq->vxq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 1015 1.1 ryo vmxnet3_handle_queue, vmxq); 1016 1.1 ryo if (vmxq->vxq_si == NULL) { 1017 1.1 ryo aprint_error_dev(sc->vmx_dev, 1018 1.1 ryo "softint_establish for vxq_si failed\n"); 1019 1.1 ryo return (-1); 1020 1.1 ryo } 1021 1.1 ryo 1022 1.1 ryo for (i = 0; i < MIN(sc->vmx_nrxqueues, sc->vmx_nrxqueues); i++) 1023 1.1 ryo sc->vmx_queue[i].vxq_intr_idx = 0; 1024 1.1 ryo sc->vmx_event_intr_idx = 0; 1025 1.1 ryo 1026 1.1 ryo return (0); 1027 1.1 ryo } 1028 1.1 ryo 1029 1.1 ryo static int 1030 1.1 ryo vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *sc) 1031 1.1 ryo { 1032 1.1 ryo pci_chipset_tag_t pc = sc->vmx_pa->pa_pc; 1033 1.1 ryo pci_intr_handle_t *intr; 1034 1.1 ryo void **ihs; 1035 1.1 ryo struct vmxnet3_queue *vmxq; 1036 1.1 ryo int i; 1037 1.1 ryo const char *intrstr; 1038 1.1 ryo char intrbuf[PCI_INTRSTR_LEN]; 1039 1.1 ryo char xnamebuf[32]; 1040 1.1 ryo 1041 1.1 ryo intr = &sc->vmx_intrs[0]; 1042 1.1 ryo ihs = sc->vmx_ihs; 1043 1.1 ryo vmxq = &sc->vmx_queue[0]; 1044 1.1 ryo 1045 1.1 ryo intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf)); 1046 1.1 ryo 1047 1.1 ryo snprintf(xnamebuf, 32, "%s:legacy", device_xname(sc->vmx_dev)); 1048 1.1 ryo pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true); 1049 1.1 ryo *ihs = pci_intr_establish_xname(pc, *intr, IPL_NET, 1050 1.1 ryo vmxnet3_legacy_intr, sc, xnamebuf); 1051 1.1 ryo if (*ihs == NULL) { 1052 1.1 ryo aprint_error_dev(sc->vmx_dev, 1053 1.1 ryo "unable to establish interrupt at %s\n", intrstr); 1054 1.1 ryo return (-1); 1055 1.1 ryo } 1056 1.1 ryo aprint_normal_dev(sc->vmx_dev, "interrupting at %s\n", intrstr); 1057 1.1 ryo 1058 1.1 ryo vmxq->vxq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 1059 1.1 ryo vmxnet3_handle_queue, vmxq); 1060 1.1 ryo if (vmxq->vxq_si == NULL) { 1061 1.1 ryo aprint_error_dev(sc->vmx_dev, 1062 1.1 ryo "softint_establish for vxq_si failed\n"); 1063 1.1 ryo return (-1); 1064 1.1 ryo } 1065 1.1 ryo 1066 1.1 ryo for (i = 0; i < MIN(sc->vmx_nrxqueues, sc->vmx_nrxqueues); i++) 1067 1.1 ryo sc->vmx_queue[i].vxq_intr_idx = 0; 1068 1.1 ryo sc->vmx_event_intr_idx = 0; 1069 1.1 ryo 1070 1.1 ryo return (0); 1071 1.1 ryo } 1072 1.1 ryo 1073 1.1 ryo static void 1074 1.1 ryo vmxnet3_set_interrupt_idx(struct vmxnet3_softc *sc) 1075 1.1 ryo { 1076 1.1 ryo struct vmxnet3_queue *vmxq; 1077 1.1 ryo struct vmxnet3_txqueue *txq; 1078 1.1 ryo struct vmxnet3_txq_shared *txs; 1079 1.1 ryo struct vmxnet3_rxqueue *rxq; 1080 1.1 ryo struct vmxnet3_rxq_shared *rxs; 1081 1.1 ryo int i; 1082 1.1 ryo 1083 1.1 ryo sc->vmx_ds->evintr = sc->vmx_event_intr_idx; 1084 1.1 ryo 1085 1.1 ryo for (i = 0; i < sc->vmx_ntxqueues; i++) { 1086 1.1 ryo vmxq = &sc->vmx_queue[i]; 1087 1.1 ryo txq = &vmxq->vxq_txqueue; 1088 1.1 ryo txs = txq->vxtxq_ts; 1089 1.1 ryo txs->intr_idx = vmxq->vxq_intr_idx; 1090 1.1 ryo } 1091 1.1 ryo 1092 1.1 ryo for (i = 0; i < sc->vmx_nrxqueues; i++) { 1093 1.1 ryo vmxq = &sc->vmx_queue[i]; 1094 1.1 ryo rxq = &vmxq->vxq_rxqueue; 1095 1.1 ryo rxs = rxq->vxrxq_rs; 1096 1.1 ryo rxs->intr_idx = vmxq->vxq_intr_idx; 1097 1.1 ryo } 1098 1.1 ryo } 1099 1.1 ryo 1100 1.1 ryo static int 1101 1.1 ryo vmxnet3_setup_interrupts(struct vmxnet3_softc *sc) 1102 1.1 ryo { 1103 1.1 ryo int error; 1104 1.1 ryo 1105 1.1 ryo switch (sc->vmx_intr_type) { 1106 1.1 ryo case VMXNET3_IT_MSIX: 1107 1.1 ryo error = vmxnet3_setup_msix_interrupts(sc); 1108 1.1 ryo break; 1109 1.1 ryo case VMXNET3_IT_MSI: 1110 1.1 ryo error = vmxnet3_setup_msi_interrupt(sc); 1111 1.1 ryo break; 1112 1.1 ryo case VMXNET3_IT_LEGACY: 1113 1.1 ryo error = vmxnet3_setup_legacy_interrupt(sc); 1114 1.1 ryo break; 1115 1.1 ryo default: 1116 1.1 ryo panic("%s: invalid interrupt type %d", __func__, 1117 1.1 ryo sc->vmx_intr_type); 1118 1.1 ryo } 1119 1.1 ryo 1120 1.1 ryo if (error == 0) 1121 1.1 ryo vmxnet3_set_interrupt_idx(sc); 1122 1.1 ryo 1123 1.1 ryo return (error); 1124 1.1 ryo } 1125 1.1 ryo 1126 1.1 ryo static int 1127 1.1 ryo vmxnet3_init_rxq(struct vmxnet3_softc *sc, int q) 1128 1.1 ryo { 1129 1.1 ryo struct vmxnet3_rxqueue *rxq; 1130 1.1 ryo struct vmxnet3_rxring *rxr; 1131 1.1 ryo int i; 1132 1.1 ryo 1133 1.1 ryo rxq = &sc->vmx_queue[q].vxq_rxqueue; 1134 1.1 ryo 1135 1.1 ryo snprintf(rxq->vxrxq_name, sizeof(rxq->vxrxq_name), "%s-rx%d", 1136 1.1 ryo device_xname(sc->vmx_dev), q); 1137 1.1 ryo rxq->vxrxq_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET /* XXX */); 1138 1.1 ryo 1139 1.1 ryo rxq->vxrxq_sc = sc; 1140 1.1 ryo 1141 1.1 ryo for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { 1142 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i]; 1143 1.1 ryo rxr->vxrxr_rid = i; 1144 1.1 ryo rxr->vxrxr_ndesc = sc->vmx_nrxdescs; 1145 1.1 ryo rxr->vxrxr_rxbuf = kmem_zalloc(rxr->vxrxr_ndesc * 1146 1.1 ryo sizeof(struct vmxnet3_rxbuf), KM_SLEEP); 1147 1.1 ryo 1148 1.1 ryo rxq->vxrxq_comp_ring.vxcr_ndesc += sc->vmx_nrxdescs; 1149 1.1 ryo } 1150 1.1 ryo 1151 1.12 riastrad rxq->vxrxq_stopping = true; 1152 1.12 riastrad 1153 1.1 ryo return (0); 1154 1.1 ryo } 1155 1.1 ryo 1156 1.1 ryo static int 1157 1.1 ryo vmxnet3_init_txq(struct vmxnet3_softc *sc, int q) 1158 1.1 ryo { 1159 1.1 ryo struct vmxnet3_txqueue *txq; 1160 1.1 ryo struct vmxnet3_txring *txr; 1161 1.1 ryo 1162 1.1 ryo txq = &sc->vmx_queue[q].vxq_txqueue; 1163 1.1 ryo txr = &txq->vxtxq_cmd_ring; 1164 1.1 ryo 1165 1.1 ryo snprintf(txq->vxtxq_name, sizeof(txq->vxtxq_name), "%s-tx%d", 1166 1.1 ryo device_xname(sc->vmx_dev), q); 1167 1.1 ryo txq->vxtxq_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET /* XXX */); 1168 1.1 ryo 1169 1.1 ryo txq->vxtxq_sc = sc; 1170 1.1 ryo 1171 1.1 ryo txq->vxtxq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 1172 1.1 ryo vmxnet3_deferred_transmit, txq); 1173 1.1 ryo if (txq->vxtxq_si == NULL) { 1174 1.1 ryo mutex_obj_free(txq->vxtxq_mtx); 1175 1.1 ryo aprint_error_dev(sc->vmx_dev, 1176 1.1 ryo "softint_establish for vxtxq_si failed\n"); 1177 1.1 ryo return ENOMEM; 1178 1.1 ryo } 1179 1.1 ryo 1180 1.1 ryo txr->vxtxr_ndesc = sc->vmx_ntxdescs; 1181 1.1 ryo txr->vxtxr_txbuf = kmem_zalloc(txr->vxtxr_ndesc * 1182 1.1 ryo sizeof(struct vmxnet3_txbuf), KM_SLEEP); 1183 1.1 ryo 1184 1.1 ryo txq->vxtxq_comp_ring.vxcr_ndesc = sc->vmx_ntxdescs; 1185 1.1 ryo 1186 1.1 ryo txq->vxtxq_interq = pcq_create(sc->vmx_ntxdescs, KM_SLEEP); 1187 1.1 ryo 1188 1.12 riastrad txq->vxtxq_stopping = true; 1189 1.12 riastrad 1190 1.1 ryo return (0); 1191 1.1 ryo } 1192 1.1 ryo 1193 1.1 ryo static int 1194 1.1 ryo vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *sc) 1195 1.1 ryo { 1196 1.1 ryo int i, error, max_nqueues; 1197 1.1 ryo 1198 1.1 ryo KASSERT(!cpu_intr_p()); 1199 1.1 ryo KASSERT(!cpu_softintr_p()); 1200 1.1 ryo 1201 1.1 ryo /* 1202 1.1 ryo * Only attempt to create multiple queues if MSIX is available. 1203 1.1 ryo * This check prevents us from allocating queue structures that 1204 1.1 ryo * we will not use. 1205 1.1 ryo * 1206 1.1 ryo * FreeBSD: 1207 1.1 ryo * MSIX is disabled by default because its apparently broken for 1208 1.1 ryo * devices passed through by at least ESXi 5.1. 1209 1.1 ryo * The hw.pci.honor_msi_blacklist tunable must be set to zero for MSIX. 1210 1.1 ryo */ 1211 1.1 ryo if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX) { 1212 1.1 ryo sc->vmx_max_nrxqueues = 1; 1213 1.1 ryo sc->vmx_max_ntxqueues = 1; 1214 1.1 ryo } 1215 1.1 ryo 1216 1.1 ryo max_nqueues = MAX(sc->vmx_max_ntxqueues, sc->vmx_max_nrxqueues); 1217 1.1 ryo sc->vmx_queue = kmem_zalloc(sizeof(struct vmxnet3_queue) * max_nqueues, 1218 1.1 ryo KM_SLEEP); 1219 1.1 ryo 1220 1.1 ryo for (i = 0; i < max_nqueues; i++) { 1221 1.1 ryo struct vmxnet3_queue *vmxq = &sc->vmx_queue[i]; 1222 1.1 ryo vmxq->vxq_id = i; 1223 1.1 ryo } 1224 1.1 ryo 1225 1.1 ryo for (i = 0; i < sc->vmx_max_nrxqueues; i++) { 1226 1.1 ryo error = vmxnet3_init_rxq(sc, i); 1227 1.1 ryo if (error) 1228 1.1 ryo return (error); 1229 1.1 ryo } 1230 1.1 ryo 1231 1.1 ryo for (i = 0; i < sc->vmx_max_ntxqueues; i++) { 1232 1.1 ryo error = vmxnet3_init_txq(sc, i); 1233 1.1 ryo if (error) 1234 1.1 ryo return (error); 1235 1.1 ryo } 1236 1.1 ryo 1237 1.1 ryo return (0); 1238 1.1 ryo } 1239 1.1 ryo 1240 1.1 ryo static void 1241 1.1 ryo vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *rxq) 1242 1.1 ryo { 1243 1.1 ryo struct vmxnet3_rxring *rxr; 1244 1.1 ryo int i; 1245 1.1 ryo 1246 1.1 ryo rxq->vxrxq_sc = NULL; 1247 1.1 ryo 1248 1.1 ryo for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { 1249 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i]; 1250 1.1 ryo 1251 1.1 ryo if (rxr->vxrxr_rxbuf != NULL) { 1252 1.1 ryo kmem_free(rxr->vxrxr_rxbuf, 1253 1.1 ryo rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxbuf)); 1254 1.1 ryo rxr->vxrxr_rxbuf = NULL; 1255 1.1 ryo } 1256 1.1 ryo } 1257 1.1 ryo 1258 1.1 ryo if (rxq->vxrxq_mtx != NULL) 1259 1.1 ryo mutex_obj_free(rxq->vxrxq_mtx); 1260 1.1 ryo } 1261 1.1 ryo 1262 1.1 ryo static void 1263 1.1 ryo vmxnet3_destroy_txq(struct vmxnet3_txqueue *txq) 1264 1.1 ryo { 1265 1.1 ryo struct vmxnet3_txring *txr; 1266 1.1 ryo struct mbuf *m; 1267 1.1 ryo 1268 1.1 ryo txr = &txq->vxtxq_cmd_ring; 1269 1.1 ryo 1270 1.1 ryo txq->vxtxq_sc = NULL; 1271 1.1 ryo 1272 1.1 ryo softint_disestablish(txq->vxtxq_si); 1273 1.1 ryo 1274 1.1 ryo while ((m = pcq_get(txq->vxtxq_interq)) != NULL) 1275 1.1 ryo m_freem(m); 1276 1.1 ryo pcq_destroy(txq->vxtxq_interq); 1277 1.1 ryo 1278 1.1 ryo if (txr->vxtxr_txbuf != NULL) { 1279 1.1 ryo kmem_free(txr->vxtxr_txbuf, 1280 1.1 ryo txr->vxtxr_ndesc * sizeof(struct vmxnet3_txbuf)); 1281 1.1 ryo txr->vxtxr_txbuf = NULL; 1282 1.1 ryo } 1283 1.1 ryo 1284 1.1 ryo if (txq->vxtxq_mtx != NULL) 1285 1.1 ryo mutex_obj_free(txq->vxtxq_mtx); 1286 1.1 ryo } 1287 1.1 ryo 1288 1.1 ryo static void 1289 1.1 ryo vmxnet3_free_rxtx_queues(struct vmxnet3_softc *sc) 1290 1.1 ryo { 1291 1.1 ryo int i; 1292 1.1 ryo 1293 1.1 ryo if (sc->vmx_queue != NULL) { 1294 1.1 ryo int max_nqueues; 1295 1.1 ryo 1296 1.1 ryo for (i = 0; i < sc->vmx_max_nrxqueues; i++) 1297 1.1 ryo vmxnet3_destroy_rxq(&sc->vmx_queue[i].vxq_rxqueue); 1298 1.1 ryo 1299 1.1 ryo for (i = 0; i < sc->vmx_max_ntxqueues; i++) 1300 1.1 ryo vmxnet3_destroy_txq(&sc->vmx_queue[i].vxq_txqueue); 1301 1.1 ryo 1302 1.1 ryo max_nqueues = MAX(sc->vmx_max_nrxqueues, sc->vmx_max_ntxqueues); 1303 1.1 ryo kmem_free(sc->vmx_queue, 1304 1.1 ryo sizeof(struct vmxnet3_queue) * max_nqueues); 1305 1.1 ryo } 1306 1.1 ryo } 1307 1.1 ryo 1308 1.1 ryo static int 1309 1.1 ryo vmxnet3_alloc_shared_data(struct vmxnet3_softc *sc) 1310 1.1 ryo { 1311 1.1 ryo device_t dev; 1312 1.1 ryo uint8_t *kva; 1313 1.1 ryo size_t size; 1314 1.1 ryo int i, error; 1315 1.1 ryo 1316 1.1 ryo dev = sc->vmx_dev; 1317 1.1 ryo 1318 1.1 ryo size = sizeof(struct vmxnet3_driver_shared); 1319 1.1 ryo error = vmxnet3_dma_malloc(sc, size, 1, &sc->vmx_ds_dma); 1320 1.1 ryo if (error) { 1321 1.1 ryo device_printf(dev, "cannot alloc shared memory\n"); 1322 1.1 ryo return (error); 1323 1.1 ryo } 1324 1.1 ryo sc->vmx_ds = (struct vmxnet3_driver_shared *) sc->vmx_ds_dma.dma_vaddr; 1325 1.1 ryo 1326 1.1 ryo size = sc->vmx_ntxqueues * sizeof(struct vmxnet3_txq_shared) + 1327 1.1 ryo sc->vmx_nrxqueues * sizeof(struct vmxnet3_rxq_shared); 1328 1.1 ryo error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_qs_dma); 1329 1.1 ryo if (error) { 1330 1.1 ryo device_printf(dev, "cannot alloc queue shared memory\n"); 1331 1.1 ryo return (error); 1332 1.1 ryo } 1333 1.1 ryo sc->vmx_qs = (void *) sc->vmx_qs_dma.dma_vaddr; 1334 1.1 ryo kva = sc->vmx_qs; 1335 1.1 ryo 1336 1.1 ryo for (i = 0; i < sc->vmx_ntxqueues; i++) { 1337 1.1 ryo sc->vmx_queue[i].vxq_txqueue.vxtxq_ts = 1338 1.1 ryo (struct vmxnet3_txq_shared *) kva; 1339 1.1 ryo kva += sizeof(struct vmxnet3_txq_shared); 1340 1.1 ryo } 1341 1.1 ryo for (i = 0; i < sc->vmx_nrxqueues; i++) { 1342 1.1 ryo sc->vmx_queue[i].vxq_rxqueue.vxrxq_rs = 1343 1.1 ryo (struct vmxnet3_rxq_shared *) kva; 1344 1.1 ryo kva += sizeof(struct vmxnet3_rxq_shared); 1345 1.1 ryo } 1346 1.1 ryo 1347 1.1 ryo if (sc->vmx_flags & VMXNET3_FLAG_RSS) { 1348 1.1 ryo size = sizeof(struct vmxnet3_rss_shared); 1349 1.1 ryo error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_rss_dma); 1350 1.1 ryo if (error) { 1351 1.1 ryo device_printf(dev, "cannot alloc rss shared memory\n"); 1352 1.1 ryo return (error); 1353 1.1 ryo } 1354 1.1 ryo sc->vmx_rss = 1355 1.1 ryo (struct vmxnet3_rss_shared *) sc->vmx_rss_dma.dma_vaddr; 1356 1.1 ryo } 1357 1.1 ryo 1358 1.1 ryo return (0); 1359 1.1 ryo } 1360 1.1 ryo 1361 1.1 ryo static void 1362 1.1 ryo vmxnet3_free_shared_data(struct vmxnet3_softc *sc) 1363 1.1 ryo { 1364 1.1 ryo 1365 1.1 ryo if (sc->vmx_rss != NULL) { 1366 1.1 ryo vmxnet3_dma_free(sc, &sc->vmx_rss_dma); 1367 1.1 ryo sc->vmx_rss = NULL; 1368 1.1 ryo } 1369 1.1 ryo 1370 1.1 ryo if (sc->vmx_qs != NULL) { 1371 1.1 ryo vmxnet3_dma_free(sc, &sc->vmx_qs_dma); 1372 1.1 ryo sc->vmx_qs = NULL; 1373 1.1 ryo } 1374 1.1 ryo 1375 1.1 ryo if (sc->vmx_ds != NULL) { 1376 1.1 ryo vmxnet3_dma_free(sc, &sc->vmx_ds_dma); 1377 1.1 ryo sc->vmx_ds = NULL; 1378 1.1 ryo } 1379 1.1 ryo } 1380 1.1 ryo 1381 1.1 ryo static int 1382 1.1 ryo vmxnet3_alloc_txq_data(struct vmxnet3_softc *sc) 1383 1.1 ryo { 1384 1.1 ryo device_t dev; 1385 1.1 ryo struct vmxnet3_txqueue *txq; 1386 1.1 ryo struct vmxnet3_txring *txr; 1387 1.1 ryo struct vmxnet3_comp_ring *txc; 1388 1.1 ryo size_t descsz, compsz; 1389 1.2 ryo u_int i; 1390 1.2 ryo int q, error; 1391 1.1 ryo 1392 1.1 ryo dev = sc->vmx_dev; 1393 1.1 ryo 1394 1.1 ryo for (q = 0; q < sc->vmx_ntxqueues; q++) { 1395 1.1 ryo txq = &sc->vmx_queue[q].vxq_txqueue; 1396 1.1 ryo txr = &txq->vxtxq_cmd_ring; 1397 1.1 ryo txc = &txq->vxtxq_comp_ring; 1398 1.1 ryo 1399 1.1 ryo descsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc); 1400 1.1 ryo compsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txcompdesc); 1401 1.1 ryo 1402 1.1 ryo error = vmxnet3_dma_malloc(sc, descsz, 512, &txr->vxtxr_dma); 1403 1.1 ryo if (error) { 1404 1.1 ryo device_printf(dev, "cannot alloc Tx descriptors for " 1405 1.1 ryo "queue %d error %d\n", q, error); 1406 1.1 ryo return (error); 1407 1.1 ryo } 1408 1.1 ryo txr->vxtxr_txd = 1409 1.1 ryo (struct vmxnet3_txdesc *) txr->vxtxr_dma.dma_vaddr; 1410 1.1 ryo 1411 1.1 ryo error = vmxnet3_dma_malloc(sc, compsz, 512, &txc->vxcr_dma); 1412 1.1 ryo if (error) { 1413 1.1 ryo device_printf(dev, "cannot alloc Tx comp descriptors " 1414 1.1 ryo "for queue %d error %d\n", q, error); 1415 1.1 ryo return (error); 1416 1.1 ryo } 1417 1.1 ryo txc->vxcr_u.txcd = 1418 1.1 ryo (struct vmxnet3_txcompdesc *) txc->vxcr_dma.dma_vaddr; 1419 1.1 ryo 1420 1.1 ryo for (i = 0; i < txr->vxtxr_ndesc; i++) { 1421 1.1 ryo error = bus_dmamap_create(sc->vmx_dmat, VMXNET3_TX_MAXSIZE, 1422 1.1 ryo VMXNET3_TX_MAXSEGS, VMXNET3_TX_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, 1423 1.1 ryo &txr->vxtxr_txbuf[i].vtxb_dmamap); 1424 1.1 ryo if (error) { 1425 1.1 ryo device_printf(dev, "unable to create Tx buf " 1426 1.1 ryo "dmamap for queue %d idx %d\n", q, i); 1427 1.1 ryo return (error); 1428 1.1 ryo } 1429 1.1 ryo } 1430 1.1 ryo } 1431 1.1 ryo 1432 1.1 ryo return (0); 1433 1.1 ryo } 1434 1.1 ryo 1435 1.1 ryo static void 1436 1.1 ryo vmxnet3_free_txq_data(struct vmxnet3_softc *sc) 1437 1.1 ryo { 1438 1.1 ryo struct vmxnet3_txqueue *txq; 1439 1.1 ryo struct vmxnet3_txring *txr; 1440 1.1 ryo struct vmxnet3_comp_ring *txc; 1441 1.1 ryo struct vmxnet3_txbuf *txb; 1442 1.2 ryo u_int i; 1443 1.2 ryo int q; 1444 1.1 ryo 1445 1.1 ryo for (q = 0; q < sc->vmx_ntxqueues; q++) { 1446 1.1 ryo txq = &sc->vmx_queue[q].vxq_txqueue; 1447 1.1 ryo txr = &txq->vxtxq_cmd_ring; 1448 1.1 ryo txc = &txq->vxtxq_comp_ring; 1449 1.1 ryo 1450 1.1 ryo for (i = 0; i < txr->vxtxr_ndesc; i++) { 1451 1.1 ryo txb = &txr->vxtxr_txbuf[i]; 1452 1.1 ryo if (txb->vtxb_dmamap != NULL) { 1453 1.1 ryo bus_dmamap_destroy(sc->vmx_dmat, 1454 1.1 ryo txb->vtxb_dmamap); 1455 1.1 ryo txb->vtxb_dmamap = NULL; 1456 1.1 ryo } 1457 1.1 ryo } 1458 1.1 ryo 1459 1.1 ryo if (txc->vxcr_u.txcd != NULL) { 1460 1.1 ryo vmxnet3_dma_free(sc, &txc->vxcr_dma); 1461 1.1 ryo txc->vxcr_u.txcd = NULL; 1462 1.1 ryo } 1463 1.1 ryo 1464 1.1 ryo if (txr->vxtxr_txd != NULL) { 1465 1.1 ryo vmxnet3_dma_free(sc, &txr->vxtxr_dma); 1466 1.1 ryo txr->vxtxr_txd = NULL; 1467 1.1 ryo } 1468 1.1 ryo } 1469 1.1 ryo } 1470 1.1 ryo 1471 1.1 ryo static int 1472 1.1 ryo vmxnet3_alloc_rxq_data(struct vmxnet3_softc *sc) 1473 1.1 ryo { 1474 1.1 ryo device_t dev; 1475 1.1 ryo struct vmxnet3_rxqueue *rxq; 1476 1.1 ryo struct vmxnet3_rxring *rxr; 1477 1.1 ryo struct vmxnet3_comp_ring *rxc; 1478 1.1 ryo int descsz, compsz; 1479 1.2 ryo u_int i, j; 1480 1.2 ryo int q, error; 1481 1.1 ryo 1482 1.1 ryo dev = sc->vmx_dev; 1483 1.1 ryo 1484 1.1 ryo for (q = 0; q < sc->vmx_nrxqueues; q++) { 1485 1.1 ryo rxq = &sc->vmx_queue[q].vxq_rxqueue; 1486 1.1 ryo rxc = &rxq->vxrxq_comp_ring; 1487 1.1 ryo compsz = 0; 1488 1.1 ryo 1489 1.1 ryo for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { 1490 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i]; 1491 1.1 ryo 1492 1.1 ryo descsz = rxr->vxrxr_ndesc * 1493 1.1 ryo sizeof(struct vmxnet3_rxdesc); 1494 1.1 ryo compsz += rxr->vxrxr_ndesc * 1495 1.1 ryo sizeof(struct vmxnet3_rxcompdesc); 1496 1.1 ryo 1497 1.1 ryo error = vmxnet3_dma_malloc(sc, descsz, 512, 1498 1.1 ryo &rxr->vxrxr_dma); 1499 1.1 ryo if (error) { 1500 1.1 ryo device_printf(dev, "cannot allocate Rx " 1501 1.1 ryo "descriptors for queue %d/%d error %d\n", 1502 1.1 ryo i, q, error); 1503 1.1 ryo return (error); 1504 1.1 ryo } 1505 1.1 ryo rxr->vxrxr_rxd = 1506 1.1 ryo (struct vmxnet3_rxdesc *) rxr->vxrxr_dma.dma_vaddr; 1507 1.1 ryo } 1508 1.1 ryo 1509 1.1 ryo error = vmxnet3_dma_malloc(sc, compsz, 512, &rxc->vxcr_dma); 1510 1.1 ryo if (error) { 1511 1.1 ryo device_printf(dev, "cannot alloc Rx comp descriptors " 1512 1.1 ryo "for queue %d error %d\n", q, error); 1513 1.1 ryo return (error); 1514 1.1 ryo } 1515 1.1 ryo rxc->vxcr_u.rxcd = 1516 1.1 ryo (struct vmxnet3_rxcompdesc *) rxc->vxcr_dma.dma_vaddr; 1517 1.1 ryo 1518 1.1 ryo for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { 1519 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i]; 1520 1.1 ryo 1521 1.1 ryo error = bus_dmamap_create(sc->vmx_dmat, JUMBO_LEN, 1, 1522 1.1 ryo JUMBO_LEN, 0, BUS_DMA_NOWAIT, 1523 1.1 ryo &rxr->vxrxr_spare_dmap); 1524 1.1 ryo if (error) { 1525 1.1 ryo device_printf(dev, "unable to create spare " 1526 1.1 ryo "dmamap for queue %d/%d error %d\n", 1527 1.1 ryo q, i, error); 1528 1.1 ryo return (error); 1529 1.1 ryo } 1530 1.1 ryo 1531 1.1 ryo for (j = 0; j < rxr->vxrxr_ndesc; j++) { 1532 1.1 ryo error = bus_dmamap_create(sc->vmx_dmat, JUMBO_LEN, 1, 1533 1.1 ryo JUMBO_LEN, 0, BUS_DMA_NOWAIT, 1534 1.1 ryo &rxr->vxrxr_rxbuf[j].vrxb_dmamap); 1535 1.1 ryo if (error) { 1536 1.1 ryo device_printf(dev, "unable to create " 1537 1.1 ryo "dmamap for queue %d/%d slot %d " 1538 1.1 ryo "error %d\n", 1539 1.1 ryo q, i, j, error); 1540 1.1 ryo return (error); 1541 1.1 ryo } 1542 1.1 ryo } 1543 1.1 ryo } 1544 1.1 ryo } 1545 1.1 ryo 1546 1.1 ryo return (0); 1547 1.1 ryo } 1548 1.1 ryo 1549 1.1 ryo static void 1550 1.1 ryo vmxnet3_free_rxq_data(struct vmxnet3_softc *sc) 1551 1.1 ryo { 1552 1.1 ryo struct vmxnet3_rxqueue *rxq; 1553 1.1 ryo struct vmxnet3_rxring *rxr; 1554 1.1 ryo struct vmxnet3_comp_ring *rxc; 1555 1.1 ryo struct vmxnet3_rxbuf *rxb; 1556 1.2 ryo u_int i, j; 1557 1.2 ryo int q; 1558 1.1 ryo 1559 1.1 ryo for (q = 0; q < sc->vmx_nrxqueues; q++) { 1560 1.1 ryo rxq = &sc->vmx_queue[q].vxq_rxqueue; 1561 1.1 ryo rxc = &rxq->vxrxq_comp_ring; 1562 1.1 ryo 1563 1.1 ryo for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { 1564 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i]; 1565 1.1 ryo 1566 1.1 ryo if (rxr->vxrxr_spare_dmap != NULL) { 1567 1.1 ryo bus_dmamap_destroy(sc->vmx_dmat, 1568 1.1 ryo rxr->vxrxr_spare_dmap); 1569 1.1 ryo rxr->vxrxr_spare_dmap = NULL; 1570 1.1 ryo } 1571 1.1 ryo 1572 1.1 ryo for (j = 0; j < rxr->vxrxr_ndesc; j++) { 1573 1.1 ryo rxb = &rxr->vxrxr_rxbuf[j]; 1574 1.1 ryo if (rxb->vrxb_dmamap != NULL) { 1575 1.1 ryo bus_dmamap_destroy(sc->vmx_dmat, 1576 1.1 ryo rxb->vrxb_dmamap); 1577 1.1 ryo rxb->vrxb_dmamap = NULL; 1578 1.1 ryo } 1579 1.1 ryo } 1580 1.1 ryo } 1581 1.1 ryo 1582 1.1 ryo if (rxc->vxcr_u.rxcd != NULL) { 1583 1.1 ryo vmxnet3_dma_free(sc, &rxc->vxcr_dma); 1584 1.1 ryo rxc->vxcr_u.rxcd = NULL; 1585 1.1 ryo } 1586 1.1 ryo 1587 1.1 ryo for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { 1588 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i]; 1589 1.1 ryo 1590 1.1 ryo if (rxr->vxrxr_rxd != NULL) { 1591 1.1 ryo vmxnet3_dma_free(sc, &rxr->vxrxr_dma); 1592 1.1 ryo rxr->vxrxr_rxd = NULL; 1593 1.1 ryo } 1594 1.1 ryo } 1595 1.1 ryo } 1596 1.1 ryo } 1597 1.1 ryo 1598 1.1 ryo static int 1599 1.1 ryo vmxnet3_alloc_queue_data(struct vmxnet3_softc *sc) 1600 1.1 ryo { 1601 1.1 ryo int error; 1602 1.1 ryo 1603 1.1 ryo error = vmxnet3_alloc_txq_data(sc); 1604 1.1 ryo if (error) 1605 1.1 ryo return (error); 1606 1.1 ryo 1607 1.1 ryo error = vmxnet3_alloc_rxq_data(sc); 1608 1.1 ryo if (error) 1609 1.1 ryo return (error); 1610 1.1 ryo 1611 1.1 ryo return (0); 1612 1.1 ryo } 1613 1.1 ryo 1614 1.1 ryo static void 1615 1.1 ryo vmxnet3_free_queue_data(struct vmxnet3_softc *sc) 1616 1.1 ryo { 1617 1.1 ryo 1618 1.1 ryo if (sc->vmx_queue != NULL) { 1619 1.1 ryo vmxnet3_free_rxq_data(sc); 1620 1.1 ryo vmxnet3_free_txq_data(sc); 1621 1.1 ryo } 1622 1.1 ryo } 1623 1.1 ryo 1624 1.1 ryo static int 1625 1.1 ryo vmxnet3_alloc_mcast_table(struct vmxnet3_softc *sc) 1626 1.1 ryo { 1627 1.1 ryo int error; 1628 1.1 ryo 1629 1.1 ryo error = vmxnet3_dma_malloc(sc, VMXNET3_MULTICAST_MAX * ETHER_ADDR_LEN, 1630 1.1 ryo 32, &sc->vmx_mcast_dma); 1631 1.1 ryo if (error) 1632 1.1 ryo device_printf(sc->vmx_dev, "unable to alloc multicast table\n"); 1633 1.1 ryo else 1634 1.1 ryo sc->vmx_mcast = sc->vmx_mcast_dma.dma_vaddr; 1635 1.1 ryo 1636 1.1 ryo return (error); 1637 1.1 ryo } 1638 1.1 ryo 1639 1.1 ryo static void 1640 1.1 ryo vmxnet3_free_mcast_table(struct vmxnet3_softc *sc) 1641 1.1 ryo { 1642 1.1 ryo 1643 1.1 ryo if (sc->vmx_mcast != NULL) { 1644 1.1 ryo vmxnet3_dma_free(sc, &sc->vmx_mcast_dma); 1645 1.1 ryo sc->vmx_mcast = NULL; 1646 1.1 ryo } 1647 1.1 ryo } 1648 1.1 ryo 1649 1.1 ryo static void 1650 1.1 ryo vmxnet3_init_shared_data(struct vmxnet3_softc *sc) 1651 1.1 ryo { 1652 1.1 ryo struct vmxnet3_driver_shared *ds; 1653 1.1 ryo struct vmxnet3_txqueue *txq; 1654 1.1 ryo struct vmxnet3_txq_shared *txs; 1655 1.1 ryo struct vmxnet3_rxqueue *rxq; 1656 1.1 ryo struct vmxnet3_rxq_shared *rxs; 1657 1.1 ryo int i; 1658 1.1 ryo 1659 1.1 ryo ds = sc->vmx_ds; 1660 1.1 ryo 1661 1.1 ryo /* 1662 1.1 ryo * Initialize fields of the shared data that remains the same across 1663 1.1 ryo * reinits. Note the shared data is zero'd when allocated. 1664 1.1 ryo */ 1665 1.1 ryo 1666 1.1 ryo ds->magic = VMXNET3_REV1_MAGIC; 1667 1.1 ryo 1668 1.1 ryo /* DriverInfo */ 1669 1.1 ryo ds->version = VMXNET3_DRIVER_VERSION; 1670 1.1 ryo ds->guest = VMXNET3_GOS_FREEBSD | 1671 1.1 ryo #ifdef __LP64__ 1672 1.1 ryo VMXNET3_GOS_64BIT; 1673 1.1 ryo #else 1674 1.1 ryo VMXNET3_GOS_32BIT; 1675 1.1 ryo #endif 1676 1.1 ryo ds->vmxnet3_revision = 1; 1677 1.1 ryo ds->upt_version = 1; 1678 1.1 ryo 1679 1.1 ryo /* Misc. conf */ 1680 1.1 ryo ds->driver_data = vtophys(sc); 1681 1.1 ryo ds->driver_data_len = sizeof(struct vmxnet3_softc); 1682 1.1 ryo ds->queue_shared = sc->vmx_qs_dma.dma_paddr; 1683 1.1 ryo ds->queue_shared_len = sc->vmx_qs_dma.dma_size; 1684 1.1 ryo ds->nrxsg_max = sc->vmx_max_rxsegs; 1685 1.1 ryo 1686 1.1 ryo /* RSS conf */ 1687 1.1 ryo if (sc->vmx_flags & VMXNET3_FLAG_RSS) { 1688 1.1 ryo ds->rss.version = 1; 1689 1.1 ryo ds->rss.paddr = sc->vmx_rss_dma.dma_paddr; 1690 1.1 ryo ds->rss.len = sc->vmx_rss_dma.dma_size; 1691 1.1 ryo } 1692 1.1 ryo 1693 1.1 ryo /* Interrupt control. */ 1694 1.1 ryo ds->automask = sc->vmx_intr_mask_mode == VMXNET3_IMM_AUTO; 1695 1.1 ryo ds->nintr = sc->vmx_nintrs; 1696 1.1 ryo ds->evintr = sc->vmx_event_intr_idx; 1697 1.1 ryo ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL; 1698 1.1 ryo 1699 1.1 ryo for (i = 0; i < sc->vmx_nintrs; i++) 1700 1.1 ryo ds->modlevel[i] = UPT1_IMOD_ADAPTIVE; 1701 1.1 ryo 1702 1.1 ryo /* Receive filter. */ 1703 1.1 ryo ds->mcast_table = sc->vmx_mcast_dma.dma_paddr; 1704 1.1 ryo ds->mcast_tablelen = sc->vmx_mcast_dma.dma_size; 1705 1.1 ryo 1706 1.1 ryo /* Tx queues */ 1707 1.1 ryo for (i = 0; i < sc->vmx_ntxqueues; i++) { 1708 1.1 ryo txq = &sc->vmx_queue[i].vxq_txqueue; 1709 1.1 ryo txs = txq->vxtxq_ts; 1710 1.1 ryo 1711 1.1 ryo txs->cmd_ring = txq->vxtxq_cmd_ring.vxtxr_dma.dma_paddr; 1712 1.1 ryo txs->cmd_ring_len = txq->vxtxq_cmd_ring.vxtxr_ndesc; 1713 1.1 ryo txs->comp_ring = txq->vxtxq_comp_ring.vxcr_dma.dma_paddr; 1714 1.1 ryo txs->comp_ring_len = txq->vxtxq_comp_ring.vxcr_ndesc; 1715 1.1 ryo txs->driver_data = vtophys(txq); 1716 1.1 ryo txs->driver_data_len = sizeof(struct vmxnet3_txqueue); 1717 1.1 ryo } 1718 1.1 ryo 1719 1.1 ryo /* Rx queues */ 1720 1.1 ryo for (i = 0; i < sc->vmx_nrxqueues; i++) { 1721 1.1 ryo rxq = &sc->vmx_queue[i].vxq_rxqueue; 1722 1.1 ryo rxs = rxq->vxrxq_rs; 1723 1.1 ryo 1724 1.1 ryo rxs->cmd_ring[0] = rxq->vxrxq_cmd_ring[0].vxrxr_dma.dma_paddr; 1725 1.1 ryo rxs->cmd_ring_len[0] = rxq->vxrxq_cmd_ring[0].vxrxr_ndesc; 1726 1.1 ryo rxs->cmd_ring[1] = rxq->vxrxq_cmd_ring[1].vxrxr_dma.dma_paddr; 1727 1.1 ryo rxs->cmd_ring_len[1] = rxq->vxrxq_cmd_ring[1].vxrxr_ndesc; 1728 1.1 ryo rxs->comp_ring = rxq->vxrxq_comp_ring.vxcr_dma.dma_paddr; 1729 1.1 ryo rxs->comp_ring_len = rxq->vxrxq_comp_ring.vxcr_ndesc; 1730 1.1 ryo rxs->driver_data = vtophys(rxq); 1731 1.1 ryo rxs->driver_data_len = sizeof(struct vmxnet3_rxqueue); 1732 1.1 ryo } 1733 1.1 ryo } 1734 1.1 ryo 1735 1.1 ryo static void 1736 1.1 ryo vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *sc) 1737 1.1 ryo { 1738 1.1 ryo /* 1739 1.1 ryo * Use the same key as the Linux driver until FreeBSD can do 1740 1.1 ryo * RSS (presumably Toeplitz) in software. 1741 1.1 ryo */ 1742 1.1 ryo static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = { 1743 1.1 ryo 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac, 1744 1.1 ryo 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28, 1745 1.1 ryo 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70, 1746 1.1 ryo 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3, 1747 1.1 ryo 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9, 1748 1.1 ryo }; 1749 1.1 ryo 1750 1.1 ryo struct vmxnet3_rss_shared *rss; 1751 1.1 ryo int i; 1752 1.1 ryo 1753 1.1 ryo rss = sc->vmx_rss; 1754 1.1 ryo 1755 1.1 ryo rss->hash_type = 1756 1.1 ryo UPT1_RSS_HASH_TYPE_IPV4 | UPT1_RSS_HASH_TYPE_TCP_IPV4 | 1757 1.1 ryo UPT1_RSS_HASH_TYPE_IPV6 | UPT1_RSS_HASH_TYPE_TCP_IPV6; 1758 1.1 ryo rss->hash_func = UPT1_RSS_HASH_FUNC_TOEPLITZ; 1759 1.1 ryo rss->hash_key_size = UPT1_RSS_MAX_KEY_SIZE; 1760 1.1 ryo rss->ind_table_size = UPT1_RSS_MAX_IND_TABLE_SIZE; 1761 1.1 ryo memcpy(rss->hash_key, rss_key, UPT1_RSS_MAX_KEY_SIZE); 1762 1.1 ryo 1763 1.1 ryo for (i = 0; i < UPT1_RSS_MAX_IND_TABLE_SIZE; i++) 1764 1.1 ryo rss->ind_table[i] = i % sc->vmx_nrxqueues; 1765 1.1 ryo } 1766 1.1 ryo 1767 1.1 ryo static void 1768 1.1 ryo vmxnet3_reinit_shared_data(struct vmxnet3_softc *sc) 1769 1.1 ryo { 1770 1.1 ryo struct ifnet *ifp; 1771 1.1 ryo struct vmxnet3_driver_shared *ds; 1772 1.1 ryo 1773 1.1 ryo ifp = &sc->vmx_ethercom.ec_if; 1774 1.1 ryo ds = sc->vmx_ds; 1775 1.1 ryo 1776 1.1 ryo ds->mtu = ifp->if_mtu; 1777 1.1 ryo ds->ntxqueue = sc->vmx_ntxqueues; 1778 1.1 ryo ds->nrxqueue = sc->vmx_nrxqueues; 1779 1.1 ryo 1780 1.1 ryo ds->upt_features = 0; 1781 1.1 ryo if (ifp->if_capenable & 1782 1.1 ryo (IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx | 1783 1.1 ryo IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx)) 1784 1.1 ryo ds->upt_features |= UPT1_F_CSUM; 1785 1.1 ryo if (sc->vmx_ethercom.ec_capenable & ETHERCAP_VLAN_HWTAGGING) 1786 1.1 ryo ds->upt_features |= UPT1_F_VLAN; 1787 1.1 ryo 1788 1.1 ryo if (sc->vmx_flags & VMXNET3_FLAG_RSS) { 1789 1.1 ryo ds->upt_features |= UPT1_F_RSS; 1790 1.1 ryo vmxnet3_reinit_rss_shared_data(sc); 1791 1.1 ryo } 1792 1.1 ryo 1793 1.1 ryo vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSL, sc->vmx_ds_dma.dma_paddr); 1794 1.1 ryo vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSH, 1795 1.1 ryo (uint64_t) sc->vmx_ds_dma.dma_paddr >> 32); 1796 1.1 ryo } 1797 1.1 ryo 1798 1.1 ryo static int 1799 1.1 ryo vmxnet3_alloc_data(struct vmxnet3_softc *sc) 1800 1.1 ryo { 1801 1.1 ryo int error; 1802 1.1 ryo 1803 1.1 ryo error = vmxnet3_alloc_shared_data(sc); 1804 1.1 ryo if (error) 1805 1.1 ryo return (error); 1806 1.1 ryo 1807 1.1 ryo error = vmxnet3_alloc_queue_data(sc); 1808 1.1 ryo if (error) 1809 1.1 ryo return (error); 1810 1.1 ryo 1811 1.1 ryo error = vmxnet3_alloc_mcast_table(sc); 1812 1.1 ryo if (error) 1813 1.1 ryo return (error); 1814 1.1 ryo 1815 1.1 ryo vmxnet3_init_shared_data(sc); 1816 1.1 ryo 1817 1.1 ryo return (0); 1818 1.1 ryo } 1819 1.1 ryo 1820 1.1 ryo static void 1821 1.1 ryo vmxnet3_free_data(struct vmxnet3_softc *sc) 1822 1.1 ryo { 1823 1.1 ryo 1824 1.1 ryo vmxnet3_free_mcast_table(sc); 1825 1.1 ryo vmxnet3_free_queue_data(sc); 1826 1.1 ryo vmxnet3_free_shared_data(sc); 1827 1.1 ryo } 1828 1.1 ryo 1829 1.1 ryo static int 1830 1.1 ryo vmxnet3_setup_interface(struct vmxnet3_softc *sc) 1831 1.1 ryo { 1832 1.1 ryo struct ifnet *ifp = &sc->vmx_ethercom.ec_if; 1833 1.1 ryo 1834 1.1 ryo vmxnet3_get_lladdr(sc); 1835 1.1 ryo aprint_normal_dev(sc->vmx_dev, "Ethernet address %s\n", 1836 1.1 ryo ether_sprintf(sc->vmx_lladdr)); 1837 1.1 ryo vmxnet3_set_lladdr(sc); 1838 1.1 ryo 1839 1.1 ryo strlcpy(ifp->if_xname, device_xname(sc->vmx_dev), IFNAMSIZ); 1840 1.1 ryo ifp->if_softc = sc; 1841 1.1 ryo ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX; 1842 1.1 ryo ifp->if_extflags = IFEF_MPSAFE; 1843 1.1 ryo ifp->if_ioctl = vmxnet3_ioctl; 1844 1.1 ryo ifp->if_start = vmxnet3_start; 1845 1.1 ryo ifp->if_transmit = vmxnet3_transmit; 1846 1.1 ryo ifp->if_watchdog = NULL; 1847 1.1 ryo ifp->if_init = vmxnet3_init; 1848 1.1 ryo ifp->if_stop = vmxnet3_stop; 1849 1.1 ryo sc->vmx_ethercom.ec_if.if_capabilities |=IFCAP_CSUM_IPv4_Rx | 1850 1.1 ryo IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 1851 1.1 ryo IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx | 1852 1.1 ryo IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx | 1853 1.1 ryo IFCAP_CSUM_UDPv6_Tx | IFCAP_CSUM_UDPv6_Rx; 1854 1.1 ryo 1855 1.1 ryo ifp->if_capenable = ifp->if_capabilities; 1856 1.1 ryo 1857 1.1 ryo sc->vmx_ethercom.ec_if.if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6; 1858 1.1 ryo 1859 1.1 ryo sc->vmx_ethercom.ec_capabilities |= 1860 1.1 ryo ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING | ETHERCAP_JUMBO_MTU; 1861 1.1 ryo sc->vmx_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING; 1862 1.1 ryo 1863 1.1 ryo IFQ_SET_MAXLEN(&ifp->if_snd, sc->vmx_ntxdescs); 1864 1.1 ryo IFQ_SET_READY(&ifp->if_snd); 1865 1.1 ryo 1866 1.1 ryo /* Initialize ifmedia structures. */ 1867 1.1 ryo sc->vmx_ethercom.ec_ifmedia = &sc->vmx_media; 1868 1.1 ryo ifmedia_init_with_lock(&sc->vmx_media, IFM_IMASK, vmxnet3_ifmedia_change, 1869 1.1 ryo vmxnet3_ifmedia_status, sc->vmx_mtx); 1870 1.1 ryo ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_AUTO, 0, NULL); 1871 1.1 ryo ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL); 1872 1.1 ryo ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_10G_T, 0, NULL); 1873 1.1 ryo ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 1874 1.1 ryo ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_1000_T, 0, NULL); 1875 1.1 ryo ifmedia_set(&sc->vmx_media, IFM_ETHER | IFM_AUTO); 1876 1.1 ryo 1877 1.1 ryo if_attach(ifp); 1878 1.1 ryo if_deferred_start_init(ifp, NULL); 1879 1.1 ryo ether_ifattach(ifp, sc->vmx_lladdr); 1880 1.1 ryo ether_set_ifflags_cb(&sc->vmx_ethercom, vmxnet3_ifflags_cb); 1881 1.1 ryo vmxnet3_cmd_link_status(ifp); 1882 1.1 ryo 1883 1.1 ryo /* should set before setting interrupts */ 1884 1.1 ryo sc->vmx_rx_intr_process_limit = VMXNET3_RX_INTR_PROCESS_LIMIT; 1885 1.1 ryo sc->vmx_rx_process_limit = VMXNET3_RX_PROCESS_LIMIT; 1886 1.1 ryo sc->vmx_tx_intr_process_limit = VMXNET3_TX_INTR_PROCESS_LIMIT; 1887 1.1 ryo sc->vmx_tx_process_limit = VMXNET3_TX_PROCESS_LIMIT; 1888 1.1 ryo 1889 1.1 ryo return (0); 1890 1.1 ryo } 1891 1.1 ryo 1892 1.1 ryo static int 1893 1.1 ryo vmxnet3_setup_sysctl(struct vmxnet3_softc *sc) 1894 1.1 ryo { 1895 1.1 ryo const char *devname; 1896 1.1 ryo struct sysctllog **log; 1897 1.1 ryo const struct sysctlnode *rnode, *rxnode, *txnode; 1898 1.1 ryo int error; 1899 1.1 ryo 1900 1.1 ryo log = &sc->vmx_sysctllog; 1901 1.1 ryo devname = device_xname(sc->vmx_dev); 1902 1.1 ryo 1903 1.1 ryo error = sysctl_createv(log, 0, NULL, &rnode, 1904 1.1 ryo 0, CTLTYPE_NODE, devname, 1905 1.1 ryo SYSCTL_DESCR("vmxnet3 information and settings"), 1906 1.1 ryo NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 1907 1.1 ryo if (error) 1908 1.1 ryo goto out; 1909 1.1 ryo error = sysctl_createv(log, 0, &rnode, NULL, 1910 1.1 ryo CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue", 1911 1.1 ryo SYSCTL_DESCR("Use workqueue for packet processing"), 1912 1.1 ryo NULL, 0, &sc->vmx_txrx_workqueue, 0, CTL_CREATE, CTL_EOL); 1913 1.1 ryo if (error) 1914 1.1 ryo goto out; 1915 1.1 ryo 1916 1.1 ryo error = sysctl_createv(log, 0, &rnode, &rxnode, 1917 1.1 ryo 0, CTLTYPE_NODE, "rx", 1918 1.1 ryo SYSCTL_DESCR("vmxnet3 information and settings for Rx"), 1919 1.1 ryo NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 1920 1.1 ryo if (error) 1921 1.1 ryo goto out; 1922 1.1 ryo error = sysctl_createv(log, 0, &rxnode, NULL, 1923 1.1 ryo CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit", 1924 1.1 ryo SYSCTL_DESCR("max number of Rx packets to process for interrupt processing"), 1925 1.1 ryo NULL, 0, &sc->vmx_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL); 1926 1.1 ryo if (error) 1927 1.1 ryo goto out; 1928 1.1 ryo error = sysctl_createv(log, 0, &rxnode, NULL, 1929 1.1 ryo CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit", 1930 1.1 ryo SYSCTL_DESCR("max number of Rx packets to process for deferred processing"), 1931 1.1 ryo NULL, 0, &sc->vmx_rx_process_limit, 0, CTL_CREATE, CTL_EOL); 1932 1.1 ryo if (error) 1933 1.1 ryo goto out; 1934 1.1 ryo 1935 1.1 ryo error = sysctl_createv(log, 0, &rnode, &txnode, 1936 1.1 ryo 0, CTLTYPE_NODE, "tx", 1937 1.1 ryo SYSCTL_DESCR("vmxnet3 information and settings for Tx"), 1938 1.1 ryo NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 1939 1.1 ryo if (error) 1940 1.1 ryo goto out; 1941 1.1 ryo error = sysctl_createv(log, 0, &txnode, NULL, 1942 1.1 ryo CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit", 1943 1.1 ryo SYSCTL_DESCR("max number of Tx packets to process for interrupt processing"), 1944 1.1 ryo NULL, 0, &sc->vmx_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL); 1945 1.1 ryo if (error) 1946 1.1 ryo goto out; 1947 1.1 ryo error = sysctl_createv(log, 0, &txnode, NULL, 1948 1.1 ryo CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit", 1949 1.1 ryo SYSCTL_DESCR("max number of Tx packets to process for deferred processing"), 1950 1.1 ryo NULL, 0, &sc->vmx_tx_process_limit, 0, CTL_CREATE, CTL_EOL); 1951 1.1 ryo 1952 1.1 ryo out: 1953 1.1 ryo if (error) { 1954 1.1 ryo aprint_error_dev(sc->vmx_dev, 1955 1.1 ryo "unable to create sysctl node\n"); 1956 1.1 ryo sysctl_teardown(log); 1957 1.1 ryo } 1958 1.1 ryo return error; 1959 1.1 ryo } 1960 1.1 ryo 1961 1.1 ryo static int 1962 1.1 ryo vmxnet3_setup_stats(struct vmxnet3_softc *sc) 1963 1.1 ryo { 1964 1.1 ryo struct vmxnet3_queue *vmxq; 1965 1.1 ryo struct vmxnet3_txqueue *txq; 1966 1.1 ryo struct vmxnet3_rxqueue *rxq; 1967 1.1 ryo int i; 1968 1.1 ryo 1969 1.1 ryo for (i = 0; i < sc->vmx_ntxqueues; i++) { 1970 1.1 ryo vmxq = &sc->vmx_queue[i]; 1971 1.1 ryo txq = &vmxq->vxq_txqueue; 1972 1.1 ryo evcnt_attach_dynamic(&txq->vxtxq_intr, EVCNT_TYPE_INTR, 1973 1.1 ryo NULL, txq->vxtxq_name, "Interrupt on queue"); 1974 1.1 ryo evcnt_attach_dynamic(&txq->vxtxq_defer, EVCNT_TYPE_MISC, 1975 1.1 ryo NULL, txq->vxtxq_name, "Handled queue in softint/workqueue"); 1976 1.1 ryo evcnt_attach_dynamic(&txq->vxtxq_deferreq, EVCNT_TYPE_MISC, 1977 1.1 ryo NULL, txq->vxtxq_name, "Requested in softint/workqueue"); 1978 1.1 ryo evcnt_attach_dynamic(&txq->vxtxq_pcqdrop, EVCNT_TYPE_MISC, 1979 1.1 ryo NULL, txq->vxtxq_name, "Dropped in pcq"); 1980 1.1 ryo evcnt_attach_dynamic(&txq->vxtxq_transmitdef, EVCNT_TYPE_MISC, 1981 1.1 ryo NULL, txq->vxtxq_name, "Deferred transmit"); 1982 1.1 ryo evcnt_attach_dynamic(&txq->vxtxq_watchdogto, EVCNT_TYPE_MISC, 1983 1.1 ryo NULL, txq->vxtxq_name, "Watchdog timeout"); 1984 1.1 ryo evcnt_attach_dynamic(&txq->vxtxq_defragged, EVCNT_TYPE_MISC, 1985 1.1 ryo NULL, txq->vxtxq_name, "m_defrag successed"); 1986 1.1 ryo evcnt_attach_dynamic(&txq->vxtxq_defrag_failed, EVCNT_TYPE_MISC, 1987 1.1 ryo NULL, txq->vxtxq_name, "m_defrag failed"); 1988 1.1 ryo } 1989 1.1 ryo 1990 1.1 ryo for (i = 0; i < sc->vmx_nrxqueues; i++) { 1991 1.1 ryo vmxq = &sc->vmx_queue[i]; 1992 1.1 ryo rxq = &vmxq->vxq_rxqueue; 1993 1.1 ryo evcnt_attach_dynamic(&rxq->vxrxq_intr, EVCNT_TYPE_INTR, 1994 1.1 ryo NULL, rxq->vxrxq_name, "Interrupt on queue"); 1995 1.1 ryo evcnt_attach_dynamic(&rxq->vxrxq_defer, EVCNT_TYPE_MISC, 1996 1.1 ryo NULL, rxq->vxrxq_name, "Handled queue in softint/workqueue"); 1997 1.1 ryo evcnt_attach_dynamic(&rxq->vxrxq_deferreq, EVCNT_TYPE_MISC, 1998 1.1 ryo NULL, rxq->vxrxq_name, "Requested in softint/workqueue"); 1999 1.1 ryo evcnt_attach_dynamic(&rxq->vxrxq_mgetcl_failed, EVCNT_TYPE_MISC, 2000 1.1 ryo NULL, rxq->vxrxq_name, "MCLGET failed"); 2001 1.1 ryo evcnt_attach_dynamic(&rxq->vxrxq_mbuf_load_failed, EVCNT_TYPE_MISC, 2002 1.1 ryo NULL, rxq->vxrxq_name, "bus_dmamap_load_mbuf failed"); 2003 1.1 ryo } 2004 1.1 ryo 2005 1.1 ryo evcnt_attach_dynamic(&sc->vmx_event_intr, EVCNT_TYPE_INTR, 2006 1.1 ryo NULL, device_xname(sc->vmx_dev), "Interrupt for other events"); 2007 1.1 ryo evcnt_attach_dynamic(&sc->vmx_event_link, EVCNT_TYPE_MISC, 2008 1.1 ryo NULL, device_xname(sc->vmx_dev), "Link status event"); 2009 1.1 ryo evcnt_attach_dynamic(&sc->vmx_event_txqerror, EVCNT_TYPE_MISC, 2010 1.1 ryo NULL, device_xname(sc->vmx_dev), "Tx queue error event"); 2011 1.1 ryo evcnt_attach_dynamic(&sc->vmx_event_rxqerror, EVCNT_TYPE_MISC, 2012 1.1 ryo NULL, device_xname(sc->vmx_dev), "Rx queue error event"); 2013 1.1 ryo evcnt_attach_dynamic(&sc->vmx_event_dic, EVCNT_TYPE_MISC, 2014 1.1 ryo NULL, device_xname(sc->vmx_dev), "Device impl change event"); 2015 1.1 ryo evcnt_attach_dynamic(&sc->vmx_event_debug, EVCNT_TYPE_MISC, 2016 1.1 ryo NULL, device_xname(sc->vmx_dev), "Debug event"); 2017 1.1 ryo 2018 1.1 ryo return 0; 2019 1.1 ryo } 2020 1.1 ryo 2021 1.1 ryo static void 2022 1.1 ryo vmxnet3_teardown_stats(struct vmxnet3_softc *sc) 2023 1.1 ryo { 2024 1.1 ryo struct vmxnet3_queue *vmxq; 2025 1.1 ryo struct vmxnet3_txqueue *txq; 2026 1.1 ryo struct vmxnet3_rxqueue *rxq; 2027 1.1 ryo int i; 2028 1.1 ryo 2029 1.1 ryo for (i = 0; i < sc->vmx_ntxqueues; i++) { 2030 1.1 ryo vmxq = &sc->vmx_queue[i]; 2031 1.1 ryo txq = &vmxq->vxq_txqueue; 2032 1.1 ryo evcnt_detach(&txq->vxtxq_intr); 2033 1.1 ryo evcnt_detach(&txq->vxtxq_defer); 2034 1.1 ryo evcnt_detach(&txq->vxtxq_deferreq); 2035 1.1 ryo evcnt_detach(&txq->vxtxq_pcqdrop); 2036 1.1 ryo evcnt_detach(&txq->vxtxq_transmitdef); 2037 1.1 ryo evcnt_detach(&txq->vxtxq_watchdogto); 2038 1.1 ryo evcnt_detach(&txq->vxtxq_defragged); 2039 1.1 ryo evcnt_detach(&txq->vxtxq_defrag_failed); 2040 1.1 ryo } 2041 1.1 ryo 2042 1.1 ryo for (i = 0; i < sc->vmx_nrxqueues; i++) { 2043 1.1 ryo vmxq = &sc->vmx_queue[i]; 2044 1.1 ryo rxq = &vmxq->vxq_rxqueue; 2045 1.1 ryo evcnt_detach(&rxq->vxrxq_intr); 2046 1.1 ryo evcnt_detach(&rxq->vxrxq_defer); 2047 1.1 ryo evcnt_detach(&rxq->vxrxq_deferreq); 2048 1.1 ryo evcnt_detach(&rxq->vxrxq_mgetcl_failed); 2049 1.1 ryo evcnt_detach(&rxq->vxrxq_mbuf_load_failed); 2050 1.1 ryo } 2051 1.1 ryo 2052 1.1 ryo evcnt_detach(&sc->vmx_event_intr); 2053 1.1 ryo evcnt_detach(&sc->vmx_event_link); 2054 1.1 ryo evcnt_detach(&sc->vmx_event_txqerror); 2055 1.1 ryo evcnt_detach(&sc->vmx_event_rxqerror); 2056 1.1 ryo evcnt_detach(&sc->vmx_event_dic); 2057 1.1 ryo evcnt_detach(&sc->vmx_event_debug); 2058 1.1 ryo } 2059 1.1 ryo 2060 1.1 ryo static void 2061 1.1 ryo vmxnet3_evintr(struct vmxnet3_softc *sc) 2062 1.1 ryo { 2063 1.1 ryo device_t dev; 2064 1.1 ryo struct vmxnet3_txq_shared *ts; 2065 1.1 ryo struct vmxnet3_rxq_shared *rs; 2066 1.1 ryo uint32_t event; 2067 1.1 ryo int reset; 2068 1.1 ryo 2069 1.1 ryo dev = sc->vmx_dev; 2070 1.1 ryo reset = 0; 2071 1.1 ryo 2072 1.1 ryo VMXNET3_CORE_LOCK(sc); 2073 1.1 ryo 2074 1.1 ryo /* Clear events. */ 2075 1.1 ryo event = sc->vmx_ds->event; 2076 1.1 ryo vmxnet3_write_bar1(sc, VMXNET3_BAR1_EVENT, event); 2077 1.1 ryo 2078 1.1 ryo if (event & VMXNET3_EVENT_LINK) { 2079 1.1 ryo sc->vmx_event_link.ev_count++; 2080 1.1 ryo vmxnet3_if_link_status(sc); 2081 1.1 ryo if (sc->vmx_link_active != 0) 2082 1.1 ryo if_schedule_deferred_start(&sc->vmx_ethercom.ec_if); 2083 1.1 ryo } 2084 1.1 ryo 2085 1.1 ryo if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) { 2086 1.1 ryo if (event & VMXNET3_EVENT_TQERROR) 2087 1.1 ryo sc->vmx_event_txqerror.ev_count++; 2088 1.1 ryo if (event & VMXNET3_EVENT_RQERROR) 2089 1.1 ryo sc->vmx_event_rxqerror.ev_count++; 2090 1.1 ryo 2091 1.1 ryo reset = 1; 2092 1.1 ryo vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_STATUS); 2093 1.1 ryo ts = sc->vmx_queue[0].vxq_txqueue.vxtxq_ts; 2094 1.1 ryo if (ts->stopped != 0) 2095 1.1 ryo device_printf(dev, "Tx queue error %#x\n", ts->error); 2096 1.1 ryo rs = sc->vmx_queue[0].vxq_rxqueue.vxrxq_rs; 2097 1.1 ryo if (rs->stopped != 0) 2098 1.1 ryo device_printf(dev, "Rx queue error %#x\n", rs->error); 2099 1.1 ryo device_printf(dev, "Rx/Tx queue error event ... resetting\n"); 2100 1.1 ryo } 2101 1.1 ryo 2102 1.1 ryo if (event & VMXNET3_EVENT_DIC) { 2103 1.1 ryo sc->vmx_event_dic.ev_count++; 2104 1.1 ryo device_printf(dev, "device implementation change event\n"); 2105 1.1 ryo } 2106 1.1 ryo if (event & VMXNET3_EVENT_DEBUG) { 2107 1.1 ryo sc->vmx_event_debug.ev_count++; 2108 1.1 ryo device_printf(dev, "debug event\n"); 2109 1.1 ryo } 2110 1.1 ryo 2111 1.1 ryo if (reset != 0) 2112 1.1 ryo vmxnet3_init_locked(sc); 2113 1.1 ryo 2114 1.1 ryo VMXNET3_CORE_UNLOCK(sc); 2115 1.1 ryo } 2116 1.1 ryo 2117 1.1 ryo static bool 2118 1.1 ryo vmxnet3_txq_eof(struct vmxnet3_txqueue *txq, u_int limit) 2119 1.1 ryo { 2120 1.1 ryo struct vmxnet3_softc *sc; 2121 1.1 ryo struct vmxnet3_txring *txr; 2122 1.1 ryo struct vmxnet3_comp_ring *txc; 2123 1.1 ryo struct vmxnet3_txcompdesc *txcd; 2124 1.1 ryo struct vmxnet3_txbuf *txb; 2125 1.1 ryo struct ifnet *ifp; 2126 1.1 ryo struct mbuf *m; 2127 1.1 ryo u_int sop; 2128 1.1 ryo bool more = false; 2129 1.1 ryo 2130 1.1 ryo sc = txq->vxtxq_sc; 2131 1.1 ryo txr = &txq->vxtxq_cmd_ring; 2132 1.1 ryo txc = &txq->vxtxq_comp_ring; 2133 1.1 ryo ifp = &sc->vmx_ethercom.ec_if; 2134 1.1 ryo 2135 1.1 ryo VMXNET3_TXQ_LOCK_ASSERT(txq); 2136 1.1 ryo 2137 1.1 ryo net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 2138 1.1 ryo for (;;) { 2139 1.1 ryo if (limit-- == 0) { 2140 1.1 ryo more = true; 2141 1.1 ryo break; 2142 1.1 ryo } 2143 1.1 ryo 2144 1.1 ryo txcd = &txc->vxcr_u.txcd[txc->vxcr_next]; 2145 1.1 ryo if (txcd->gen != txc->vxcr_gen) 2146 1.1 ryo break; 2147 1.1 ryo vmxnet3_barrier(sc, VMXNET3_BARRIER_RD); 2148 1.1 ryo 2149 1.1 ryo if (++txc->vxcr_next == txc->vxcr_ndesc) { 2150 1.1 ryo txc->vxcr_next = 0; 2151 1.1 ryo txc->vxcr_gen ^= 1; 2152 1.1 ryo } 2153 1.1 ryo 2154 1.1 ryo sop = txr->vxtxr_next; 2155 1.1 ryo txb = &txr->vxtxr_txbuf[sop]; 2156 1.1 ryo 2157 1.1 ryo if ((m = txb->vtxb_m) != NULL) { 2158 1.1 ryo bus_dmamap_sync(sc->vmx_dmat, txb->vtxb_dmamap, 2159 1.1 ryo 0, txb->vtxb_dmamap->dm_mapsize, 2160 1.1 ryo BUS_DMASYNC_POSTWRITE); 2161 1.1 ryo bus_dmamap_unload(sc->vmx_dmat, txb->vtxb_dmamap); 2162 1.1 ryo 2163 1.16 riastrad if_statinc_ref(ifp, nsr, if_opackets); 2164 1.16 riastrad if_statadd_ref(ifp, nsr, if_obytes, m->m_pkthdr.len); 2165 1.1 ryo if (m->m_flags & M_MCAST) 2166 1.16 riastrad if_statinc_ref(ifp, nsr, if_omcasts); 2167 1.1 ryo 2168 1.1 ryo m_freem(m); 2169 1.1 ryo txb->vtxb_m = NULL; 2170 1.1 ryo } 2171 1.1 ryo 2172 1.1 ryo txr->vxtxr_next = (txcd->eop_idx + 1) % txr->vxtxr_ndesc; 2173 1.1 ryo } 2174 1.1 ryo IF_STAT_PUTREF(ifp); 2175 1.1 ryo 2176 1.1 ryo if (txr->vxtxr_head == txr->vxtxr_next) 2177 1.1 ryo txq->vxtxq_watchdog = 0; 2178 1.1 ryo 2179 1.1 ryo return more; 2180 1.1 ryo } 2181 1.1 ryo 2182 1.1 ryo static int 2183 1.1 ryo vmxnet3_newbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq, 2184 1.1 ryo struct vmxnet3_rxring *rxr) 2185 1.1 ryo { 2186 1.1 ryo struct mbuf *m; 2187 1.1 ryo struct vmxnet3_rxdesc *rxd; 2188 1.1 ryo struct vmxnet3_rxbuf *rxb; 2189 1.1 ryo bus_dma_tag_t tag; 2190 1.1 ryo bus_dmamap_t dmap; 2191 1.1 ryo int idx, btype, error; 2192 1.1 ryo 2193 1.1 ryo tag = sc->vmx_dmat; 2194 1.1 ryo dmap = rxr->vxrxr_spare_dmap; 2195 1.1 ryo idx = rxr->vxrxr_fill; 2196 1.1 ryo rxd = &rxr->vxrxr_rxd[idx]; 2197 1.1 ryo rxb = &rxr->vxrxr_rxbuf[idx]; 2198 1.1 ryo 2199 1.1 ryo /* Don't allocate buffers for ring 2 for now. */ 2200 1.1 ryo if (rxr->vxrxr_rid != 0) 2201 1.1 ryo return -1; 2202 1.1 ryo btype = VMXNET3_BTYPE_HEAD; 2203 1.1 ryo 2204 1.1 ryo MGETHDR(m, M_DONTWAIT, MT_DATA); 2205 1.1 ryo if (m == NULL) 2206 1.1 ryo return (ENOBUFS); 2207 1.1 ryo 2208 1.1 ryo MCLGET(m, M_DONTWAIT); 2209 1.1 ryo if ((m->m_flags & M_EXT) == 0) { 2210 1.1 ryo rxq->vxrxq_mgetcl_failed.ev_count++; 2211 1.1 ryo m_freem(m); 2212 1.1 ryo return (ENOBUFS); 2213 1.1 ryo } 2214 1.1 ryo 2215 1.1 ryo m->m_pkthdr.len = m->m_len = JUMBO_LEN; 2216 1.1 ryo m_adj(m, ETHER_ALIGN); 2217 1.1 ryo 2218 1.1 ryo error = bus_dmamap_load_mbuf(sc->vmx_dmat, dmap, m, BUS_DMA_NOWAIT); 2219 1.1 ryo if (error) { 2220 1.1 ryo m_freem(m); 2221 1.1 ryo rxq->vxrxq_mbuf_load_failed.ev_count++; 2222 1.1 ryo return (error); 2223 1.1 ryo } 2224 1.1 ryo 2225 1.1 ryo if (rxb->vrxb_m != NULL) { 2226 1.1 ryo bus_dmamap_sync(tag, rxb->vrxb_dmamap, 2227 1.1 ryo 0, rxb->vrxb_dmamap->dm_mapsize, 2228 1.1 ryo BUS_DMASYNC_POSTREAD); 2229 1.1 ryo bus_dmamap_unload(tag, rxb->vrxb_dmamap); 2230 1.1 ryo } 2231 1.1 ryo 2232 1.1 ryo rxr->vxrxr_spare_dmap = rxb->vrxb_dmamap; 2233 1.1 ryo rxb->vrxb_dmamap = dmap; 2234 1.1 ryo rxb->vrxb_m = m; 2235 1.1 ryo 2236 1.1 ryo rxd->addr = DMAADDR(dmap); 2237 1.1 ryo rxd->len = m->m_pkthdr.len; 2238 1.1 ryo rxd->btype = btype; 2239 1.1 ryo rxd->gen = rxr->vxrxr_gen; 2240 1.1 ryo 2241 1.1 ryo vmxnet3_rxr_increment_fill(rxr); 2242 1.1 ryo return (0); 2243 1.1 ryo } 2244 1.1 ryo 2245 1.1 ryo static void 2246 1.1 ryo vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *rxq, 2247 1.1 ryo struct vmxnet3_rxring *rxr, int idx) 2248 1.1 ryo { 2249 1.1 ryo struct vmxnet3_rxdesc *rxd; 2250 1.1 ryo 2251 1.1 ryo rxd = &rxr->vxrxr_rxd[idx]; 2252 1.1 ryo rxd->gen = rxr->vxrxr_gen; 2253 1.1 ryo vmxnet3_rxr_increment_fill(rxr); 2254 1.1 ryo } 2255 1.1 ryo 2256 1.1 ryo static void 2257 1.1 ryo vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *rxq) 2258 1.1 ryo { 2259 1.1 ryo struct vmxnet3_softc *sc; 2260 1.1 ryo struct vmxnet3_rxring *rxr; 2261 1.1 ryo struct vmxnet3_comp_ring *rxc; 2262 1.1 ryo struct vmxnet3_rxcompdesc *rxcd; 2263 1.1 ryo int idx, eof; 2264 1.1 ryo 2265 1.1 ryo sc = rxq->vxrxq_sc; 2266 1.1 ryo rxc = &rxq->vxrxq_comp_ring; 2267 1.1 ryo 2268 1.1 ryo do { 2269 1.1 ryo rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next]; 2270 1.1 ryo if (rxcd->gen != rxc->vxcr_gen) 2271 1.1 ryo break; /* Not expected. */ 2272 1.1 ryo vmxnet3_barrier(sc, VMXNET3_BARRIER_RD); 2273 1.1 ryo 2274 1.1 ryo if (++rxc->vxcr_next == rxc->vxcr_ndesc) { 2275 1.1 ryo rxc->vxcr_next = 0; 2276 1.1 ryo rxc->vxcr_gen ^= 1; 2277 1.1 ryo } 2278 1.1 ryo 2279 1.1 ryo idx = rxcd->rxd_idx; 2280 1.1 ryo eof = rxcd->eop; 2281 1.1 ryo if (rxcd->qid < sc->vmx_nrxqueues) 2282 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[0]; 2283 1.1 ryo else 2284 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[1]; 2285 1.1 ryo vmxnet3_rxq_eof_discard(rxq, rxr, idx); 2286 1.1 ryo } while (!eof); 2287 1.1 ryo } 2288 1.1 ryo 2289 1.1 ryo static void 2290 1.1 ryo vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m) 2291 1.1 ryo { 2292 1.1 ryo if (rxcd->no_csum) 2293 1.1 ryo return; 2294 1.1 ryo 2295 1.1 ryo if (rxcd->ipv4) { 2296 1.1 ryo m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 2297 1.1 ryo if (rxcd->ipcsum_ok == 0) 2298 1.1 ryo m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 2299 1.1 ryo } 2300 1.1 ryo 2301 1.1 ryo if (rxcd->fragment) 2302 1.1 ryo return; 2303 1.1 ryo 2304 1.1 ryo if (rxcd->tcp) { 2305 1.1 ryo m->m_pkthdr.csum_flags |= 2306 1.1 ryo rxcd->ipv4 ? M_CSUM_TCPv4 : M_CSUM_TCPv6; 2307 1.1 ryo if ((rxcd->csum_ok) == 0) 2308 1.1 ryo m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 2309 1.1 ryo } 2310 1.1 ryo 2311 1.1 ryo if (rxcd->udp) { 2312 1.1 ryo m->m_pkthdr.csum_flags |= 2313 1.1 ryo rxcd->ipv4 ? M_CSUM_UDPv4 : M_CSUM_UDPv6 ; 2314 1.1 ryo if ((rxcd->csum_ok) == 0) 2315 1.1 ryo m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 2316 1.1 ryo } 2317 1.1 ryo } 2318 1.1 ryo 2319 1.1 ryo static void 2320 1.1 ryo vmxnet3_rxq_input(struct vmxnet3_rxqueue *rxq, 2321 1.1 ryo struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m) 2322 1.1 ryo { 2323 1.1 ryo struct vmxnet3_softc *sc; 2324 1.1 ryo struct ifnet *ifp; 2325 1.1 ryo 2326 1.1 ryo sc = rxq->vxrxq_sc; 2327 1.1 ryo ifp = &sc->vmx_ethercom.ec_if; 2328 1.1 ryo 2329 1.1 ryo if (rxcd->error) { 2330 1.1 ryo if_statinc(ifp, if_ierrors); 2331 1.1 ryo m_freem(m); 2332 1.1 ryo return; 2333 1.1 ryo } 2334 1.1 ryo 2335 1.1 ryo if (!rxcd->no_csum) 2336 1.1 ryo vmxnet3_rx_csum(rxcd, m); 2337 1.1 ryo if (rxcd->vlan) 2338 1.1 ryo vlan_set_tag(m, rxcd->vtag); 2339 1.1 ryo 2340 1.1 ryo net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 2341 1.16 riastrad if_statinc_ref(ifp, nsr, if_ipackets); 2342 1.16 riastrad if_statadd_ref(ifp, nsr, if_ibytes, m->m_pkthdr.len); 2343 1.1 ryo IF_STAT_PUTREF(ifp); 2344 1.1 ryo 2345 1.1 ryo if_percpuq_enqueue(ifp->if_percpuq, m); 2346 1.1 ryo } 2347 1.1 ryo 2348 1.1 ryo static bool 2349 1.1 ryo vmxnet3_rxq_eof(struct vmxnet3_rxqueue *rxq, u_int limit) 2350 1.1 ryo { 2351 1.1 ryo struct vmxnet3_softc *sc; 2352 1.1 ryo struct ifnet *ifp; 2353 1.1 ryo struct vmxnet3_rxring *rxr; 2354 1.1 ryo struct vmxnet3_comp_ring *rxc; 2355 1.1 ryo struct vmxnet3_rxdesc *rxd __diagused; 2356 1.1 ryo struct vmxnet3_rxcompdesc *rxcd; 2357 1.1 ryo struct mbuf *m, *m_head, *m_tail; 2358 1.2 ryo u_int idx, length; 2359 1.1 ryo bool more = false; 2360 1.1 ryo 2361 1.1 ryo sc = rxq->vxrxq_sc; 2362 1.1 ryo ifp = &sc->vmx_ethercom.ec_if; 2363 1.1 ryo rxc = &rxq->vxrxq_comp_ring; 2364 1.1 ryo 2365 1.1 ryo VMXNET3_RXQ_LOCK_ASSERT(rxq); 2366 1.1 ryo 2367 1.12 riastrad if (rxq->vxrxq_stopping) 2368 1.1 ryo return more; 2369 1.1 ryo 2370 1.1 ryo m_head = rxq->vxrxq_mhead; 2371 1.1 ryo rxq->vxrxq_mhead = NULL; 2372 1.1 ryo m_tail = rxq->vxrxq_mtail; 2373 1.1 ryo rxq->vxrxq_mtail = NULL; 2374 1.1 ryo KASSERT(m_head == NULL || m_tail != NULL); 2375 1.1 ryo 2376 1.1 ryo for (;;) { 2377 1.1 ryo if (limit-- == 0) { 2378 1.1 ryo more = true; 2379 1.1 ryo break; 2380 1.1 ryo } 2381 1.1 ryo 2382 1.1 ryo rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next]; 2383 1.1 ryo if (rxcd->gen != rxc->vxcr_gen) { 2384 1.1 ryo rxq->vxrxq_mhead = m_head; 2385 1.1 ryo rxq->vxrxq_mtail = m_tail; 2386 1.1 ryo break; 2387 1.1 ryo } 2388 1.1 ryo vmxnet3_barrier(sc, VMXNET3_BARRIER_RD); 2389 1.1 ryo 2390 1.1 ryo if (++rxc->vxcr_next == rxc->vxcr_ndesc) { 2391 1.1 ryo rxc->vxcr_next = 0; 2392 1.1 ryo rxc->vxcr_gen ^= 1; 2393 1.1 ryo } 2394 1.1 ryo 2395 1.1 ryo idx = rxcd->rxd_idx; 2396 1.1 ryo length = rxcd->len; 2397 1.1 ryo if (rxcd->qid < sc->vmx_nrxqueues) 2398 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[0]; 2399 1.1 ryo else 2400 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[1]; 2401 1.1 ryo rxd = &rxr->vxrxr_rxd[idx]; 2402 1.1 ryo 2403 1.1 ryo m = rxr->vxrxr_rxbuf[idx].vrxb_m; 2404 1.1 ryo KASSERT(m != NULL); 2405 1.1 ryo 2406 1.1 ryo /* 2407 1.1 ryo * The host may skip descriptors. We detect this when this 2408 1.1 ryo * descriptor does not match the previous fill index. Catch 2409 1.1 ryo * up with the host now. 2410 1.1 ryo */ 2411 1.1 ryo if (__predict_false(rxr->vxrxr_fill != idx)) { 2412 1.1 ryo while (rxr->vxrxr_fill != idx) { 2413 1.1 ryo rxr->vxrxr_rxd[rxr->vxrxr_fill].gen = 2414 1.1 ryo rxr->vxrxr_gen; 2415 1.1 ryo vmxnet3_rxr_increment_fill(rxr); 2416 1.1 ryo } 2417 1.1 ryo } 2418 1.1 ryo 2419 1.1 ryo if (rxcd->sop) { 2420 1.1 ryo /* start of frame w/o head buffer */ 2421 1.1 ryo KASSERT(rxd->btype == VMXNET3_BTYPE_HEAD); 2422 1.1 ryo /* start of frame not in ring 0 */ 2423 1.1 ryo KASSERT(rxr == &rxq->vxrxq_cmd_ring[0]); 2424 1.1 ryo /* duplicate start of frame? */ 2425 1.1 ryo KASSERT(m_head == NULL); 2426 1.1 ryo 2427 1.1 ryo if (length == 0) { 2428 1.1 ryo /* Just ignore this descriptor. */ 2429 1.1 ryo vmxnet3_rxq_eof_discard(rxq, rxr, idx); 2430 1.1 ryo goto nextp; 2431 1.1 ryo } 2432 1.1 ryo 2433 1.1 ryo if (vmxnet3_newbuf(sc, rxq, rxr) != 0) { 2434 1.1 ryo if_statinc(ifp, if_iqdrops); 2435 1.1 ryo vmxnet3_rxq_eof_discard(rxq, rxr, idx); 2436 1.1 ryo if (!rxcd->eop) 2437 1.1 ryo vmxnet3_rxq_discard_chain(rxq); 2438 1.1 ryo goto nextp; 2439 1.1 ryo } 2440 1.1 ryo 2441 1.1 ryo m_set_rcvif(m, ifp); 2442 1.1 ryo m->m_pkthdr.len = m->m_len = length; 2443 1.1 ryo m->m_pkthdr.csum_flags = 0; 2444 1.1 ryo m_head = m_tail = m; 2445 1.1 ryo 2446 1.1 ryo } else { 2447 1.1 ryo /* non start of frame w/o body buffer */ 2448 1.1 ryo KASSERT(rxd->btype == VMXNET3_BTYPE_BODY); 2449 1.1 ryo /* frame not started? */ 2450 1.1 ryo KASSERT(m_head != NULL); 2451 1.1 ryo 2452 1.1 ryo if (vmxnet3_newbuf(sc, rxq, rxr) != 0) { 2453 1.1 ryo if_statinc(ifp, if_iqdrops); 2454 1.1 ryo vmxnet3_rxq_eof_discard(rxq, rxr, idx); 2455 1.1 ryo if (!rxcd->eop) 2456 1.1 ryo vmxnet3_rxq_discard_chain(rxq); 2457 1.1 ryo m_freem(m_head); 2458 1.1 ryo m_head = m_tail = NULL; 2459 1.1 ryo goto nextp; 2460 1.1 ryo } 2461 1.1 ryo 2462 1.1 ryo m->m_len = length; 2463 1.1 ryo m_head->m_pkthdr.len += length; 2464 1.1 ryo m_tail->m_next = m; 2465 1.1 ryo m_tail = m; 2466 1.1 ryo } 2467 1.1 ryo 2468 1.1 ryo if (rxcd->eop) { 2469 1.1 ryo vmxnet3_rxq_input(rxq, rxcd, m_head); 2470 1.1 ryo m_head = m_tail = NULL; 2471 1.1 ryo 2472 1.1 ryo /* Must recheck after dropping the Rx lock. */ 2473 1.12 riastrad if (rxq->vxrxq_stopping) 2474 1.1 ryo break; 2475 1.1 ryo } 2476 1.1 ryo 2477 1.1 ryo nextp: 2478 1.1 ryo if (__predict_false(rxq->vxrxq_rs->update_rxhead)) { 2479 1.1 ryo int qid = rxcd->qid; 2480 1.1 ryo bus_size_t r; 2481 1.1 ryo 2482 1.1 ryo idx = (idx + 1) % rxr->vxrxr_ndesc; 2483 1.1 ryo if (qid >= sc->vmx_nrxqueues) { 2484 1.1 ryo qid -= sc->vmx_nrxqueues; 2485 1.1 ryo r = VMXNET3_BAR0_RXH2(qid); 2486 1.1 ryo } else 2487 1.1 ryo r = VMXNET3_BAR0_RXH1(qid); 2488 1.1 ryo vmxnet3_write_bar0(sc, r, idx); 2489 1.1 ryo } 2490 1.1 ryo } 2491 1.1 ryo 2492 1.1 ryo return more; 2493 1.1 ryo } 2494 1.1 ryo 2495 1.1 ryo static inline void 2496 1.1 ryo vmxnet3_sched_handle_queue(struct vmxnet3_softc *sc, struct vmxnet3_queue *vmxq) 2497 1.1 ryo { 2498 1.1 ryo 2499 1.1 ryo if (vmxq->vxq_workqueue) { 2500 1.5 knakahar /* 2501 1.5 knakahar * When this function is called, "vmxq" is owned by one CPU. 2502 1.5 knakahar * so, atomic operation is not required here. 2503 1.5 knakahar */ 2504 1.5 knakahar if (!vmxq->vxq_wq_enqueued) { 2505 1.5 knakahar vmxq->vxq_wq_enqueued = true; 2506 1.5 knakahar workqueue_enqueue(sc->vmx_queue_wq, 2507 1.5 knakahar &vmxq->vxq_wq_cookie, curcpu()); 2508 1.5 knakahar } 2509 1.1 ryo } else { 2510 1.1 ryo softint_schedule(vmxq->vxq_si); 2511 1.1 ryo } 2512 1.1 ryo } 2513 1.1 ryo 2514 1.1 ryo static int 2515 1.1 ryo vmxnet3_legacy_intr(void *xsc) 2516 1.1 ryo { 2517 1.1 ryo struct vmxnet3_softc *sc; 2518 1.8 msaitoh struct vmxnet3_queue *vmxq; 2519 1.8 msaitoh struct vmxnet3_txqueue *txq; 2520 1.1 ryo struct vmxnet3_rxqueue *rxq; 2521 1.1 ryo u_int txlimit, rxlimit; 2522 1.1 ryo bool txmore, rxmore; 2523 1.1 ryo 2524 1.1 ryo sc = xsc; 2525 1.8 msaitoh vmxq = &sc->vmx_queue[0]; 2526 1.8 msaitoh txq = &vmxq->vxq_txqueue; 2527 1.8 msaitoh rxq = &vmxq->vxq_rxqueue; 2528 1.1 ryo txlimit = sc->vmx_tx_intr_process_limit; 2529 1.1 ryo rxlimit = sc->vmx_rx_intr_process_limit; 2530 1.1 ryo 2531 1.1 ryo if (sc->vmx_intr_type == VMXNET3_IT_LEGACY) { 2532 1.1 ryo if (vmxnet3_read_bar1(sc, VMXNET3_BAR1_INTR) == 0) 2533 1.1 ryo return (0); 2534 1.1 ryo } 2535 1.1 ryo if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE) 2536 1.1 ryo vmxnet3_disable_all_intrs(sc); 2537 1.1 ryo 2538 1.1 ryo if (sc->vmx_ds->event != 0) 2539 1.1 ryo vmxnet3_evintr(sc); 2540 1.1 ryo 2541 1.9 msaitoh VMXNET3_TXQ_LOCK(txq); 2542 1.9 msaitoh txmore = vmxnet3_txq_eof(txq, txlimit); 2543 1.9 msaitoh VMXNET3_TXQ_UNLOCK(txq); 2544 1.9 msaitoh 2545 1.1 ryo VMXNET3_RXQ_LOCK(rxq); 2546 1.1 ryo rxmore = vmxnet3_rxq_eof(rxq, rxlimit); 2547 1.1 ryo VMXNET3_RXQ_UNLOCK(rxq); 2548 1.1 ryo 2549 1.8 msaitoh if (txmore || rxmore) 2550 1.8 msaitoh vmxnet3_sched_handle_queue(sc, vmxq); 2551 1.8 msaitoh else { 2552 1.1 ryo if_schedule_deferred_start(&sc->vmx_ethercom.ec_if); 2553 1.1 ryo vmxnet3_enable_all_intrs(sc); 2554 1.1 ryo } 2555 1.8 msaitoh 2556 1.1 ryo return (1); 2557 1.1 ryo } 2558 1.1 ryo 2559 1.1 ryo static int 2560 1.1 ryo vmxnet3_txrxq_intr(void *xvmxq) 2561 1.1 ryo { 2562 1.1 ryo struct vmxnet3_softc *sc; 2563 1.1 ryo struct vmxnet3_queue *vmxq; 2564 1.1 ryo struct vmxnet3_txqueue *txq; 2565 1.1 ryo struct vmxnet3_rxqueue *rxq; 2566 1.1 ryo u_int txlimit, rxlimit; 2567 1.1 ryo bool txmore, rxmore; 2568 1.1 ryo 2569 1.1 ryo vmxq = xvmxq; 2570 1.1 ryo txq = &vmxq->vxq_txqueue; 2571 1.1 ryo rxq = &vmxq->vxq_rxqueue; 2572 1.1 ryo sc = txq->vxtxq_sc; 2573 1.1 ryo txlimit = sc->vmx_tx_intr_process_limit; 2574 1.1 ryo rxlimit = sc->vmx_rx_intr_process_limit; 2575 1.1 ryo vmxq->vxq_workqueue = sc->vmx_txrx_workqueue; 2576 1.1 ryo 2577 1.1 ryo if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE) 2578 1.1 ryo vmxnet3_disable_intr(sc, vmxq->vxq_intr_idx); 2579 1.1 ryo 2580 1.1 ryo VMXNET3_TXQ_LOCK(txq); 2581 1.1 ryo txq->vxtxq_intr.ev_count++; 2582 1.1 ryo txmore = vmxnet3_txq_eof(txq, txlimit); 2583 1.1 ryo VMXNET3_TXQ_UNLOCK(txq); 2584 1.1 ryo 2585 1.1 ryo VMXNET3_RXQ_LOCK(rxq); 2586 1.1 ryo rxq->vxrxq_intr.ev_count++; 2587 1.1 ryo rxmore = vmxnet3_rxq_eof(rxq, rxlimit); 2588 1.1 ryo VMXNET3_RXQ_UNLOCK(rxq); 2589 1.1 ryo 2590 1.8 msaitoh if (txmore || rxmore) 2591 1.1 ryo vmxnet3_sched_handle_queue(sc, vmxq); 2592 1.8 msaitoh else { 2593 1.1 ryo /* for ALTQ */ 2594 1.1 ryo if (vmxq->vxq_id == 0) 2595 1.1 ryo if_schedule_deferred_start(&sc->vmx_ethercom.ec_if); 2596 1.1 ryo softint_schedule(txq->vxtxq_si); 2597 1.1 ryo 2598 1.1 ryo vmxnet3_enable_intr(sc, vmxq->vxq_intr_idx); 2599 1.1 ryo } 2600 1.1 ryo 2601 1.1 ryo return (1); 2602 1.1 ryo } 2603 1.1 ryo 2604 1.1 ryo static void 2605 1.1 ryo vmxnet3_handle_queue(void *xvmxq) 2606 1.1 ryo { 2607 1.1 ryo struct vmxnet3_softc *sc; 2608 1.1 ryo struct vmxnet3_queue *vmxq; 2609 1.1 ryo struct vmxnet3_txqueue *txq; 2610 1.1 ryo struct vmxnet3_rxqueue *rxq; 2611 1.1 ryo u_int txlimit, rxlimit; 2612 1.1 ryo bool txmore, rxmore; 2613 1.1 ryo 2614 1.1 ryo vmxq = xvmxq; 2615 1.1 ryo txq = &vmxq->vxq_txqueue; 2616 1.1 ryo rxq = &vmxq->vxq_rxqueue; 2617 1.1 ryo sc = txq->vxtxq_sc; 2618 1.1 ryo txlimit = sc->vmx_tx_process_limit; 2619 1.1 ryo rxlimit = sc->vmx_rx_process_limit; 2620 1.1 ryo 2621 1.1 ryo VMXNET3_TXQ_LOCK(txq); 2622 1.1 ryo txq->vxtxq_defer.ev_count++; 2623 1.1 ryo txmore = vmxnet3_txq_eof(txq, txlimit); 2624 1.1 ryo if (txmore) 2625 1.1 ryo txq->vxtxq_deferreq.ev_count++; 2626 1.1 ryo /* for ALTQ */ 2627 1.1 ryo if (vmxq->vxq_id == 0) 2628 1.1 ryo if_schedule_deferred_start(&sc->vmx_ethercom.ec_if); 2629 1.1 ryo softint_schedule(txq->vxtxq_si); 2630 1.1 ryo VMXNET3_TXQ_UNLOCK(txq); 2631 1.1 ryo 2632 1.1 ryo VMXNET3_RXQ_LOCK(rxq); 2633 1.1 ryo rxq->vxrxq_defer.ev_count++; 2634 1.1 ryo rxmore = vmxnet3_rxq_eof(rxq, rxlimit); 2635 1.1 ryo if (rxmore) 2636 1.1 ryo rxq->vxrxq_deferreq.ev_count++; 2637 1.1 ryo VMXNET3_RXQ_UNLOCK(rxq); 2638 1.1 ryo 2639 1.1 ryo if (txmore || rxmore) 2640 1.1 ryo vmxnet3_sched_handle_queue(sc, vmxq); 2641 1.1 ryo else 2642 1.1 ryo vmxnet3_enable_intr(sc, vmxq->vxq_intr_idx); 2643 1.1 ryo } 2644 1.1 ryo 2645 1.1 ryo static void 2646 1.1 ryo vmxnet3_handle_queue_work(struct work *wk, void *context) 2647 1.1 ryo { 2648 1.1 ryo struct vmxnet3_queue *vmxq; 2649 1.1 ryo 2650 1.1 ryo vmxq = container_of(wk, struct vmxnet3_queue, vxq_wq_cookie); 2651 1.5 knakahar vmxq->vxq_wq_enqueued = false; 2652 1.1 ryo vmxnet3_handle_queue(vmxq); 2653 1.1 ryo } 2654 1.1 ryo 2655 1.1 ryo static int 2656 1.1 ryo vmxnet3_event_intr(void *xsc) 2657 1.1 ryo { 2658 1.1 ryo struct vmxnet3_softc *sc; 2659 1.1 ryo 2660 1.1 ryo sc = xsc; 2661 1.1 ryo 2662 1.1 ryo if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE) 2663 1.1 ryo vmxnet3_disable_intr(sc, sc->vmx_event_intr_idx); 2664 1.1 ryo 2665 1.1 ryo sc->vmx_event_intr.ev_count++; 2666 1.1 ryo 2667 1.1 ryo if (sc->vmx_ds->event != 0) 2668 1.1 ryo vmxnet3_evintr(sc); 2669 1.1 ryo 2670 1.1 ryo vmxnet3_enable_intr(sc, sc->vmx_event_intr_idx); 2671 1.1 ryo 2672 1.1 ryo return (1); 2673 1.1 ryo } 2674 1.1 ryo 2675 1.1 ryo static void 2676 1.1 ryo vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq) 2677 1.1 ryo { 2678 1.1 ryo struct vmxnet3_txring *txr; 2679 1.1 ryo struct vmxnet3_txbuf *txb; 2680 1.2 ryo u_int i; 2681 1.1 ryo 2682 1.1 ryo txr = &txq->vxtxq_cmd_ring; 2683 1.1 ryo 2684 1.1 ryo for (i = 0; i < txr->vxtxr_ndesc; i++) { 2685 1.1 ryo txb = &txr->vxtxr_txbuf[i]; 2686 1.1 ryo 2687 1.1 ryo if (txb->vtxb_m == NULL) 2688 1.1 ryo continue; 2689 1.1 ryo 2690 1.1 ryo bus_dmamap_sync(sc->vmx_dmat, txb->vtxb_dmamap, 2691 1.1 ryo 0, txb->vtxb_dmamap->dm_mapsize, 2692 1.1 ryo BUS_DMASYNC_POSTWRITE); 2693 1.1 ryo bus_dmamap_unload(sc->vmx_dmat, txb->vtxb_dmamap); 2694 1.1 ryo m_freem(txb->vtxb_m); 2695 1.1 ryo txb->vtxb_m = NULL; 2696 1.1 ryo } 2697 1.1 ryo } 2698 1.1 ryo 2699 1.1 ryo static void 2700 1.1 ryo vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq) 2701 1.1 ryo { 2702 1.1 ryo struct vmxnet3_rxring *rxr; 2703 1.1 ryo struct vmxnet3_rxbuf *rxb; 2704 1.2 ryo u_int i, j; 2705 1.1 ryo 2706 1.1 ryo if (rxq->vxrxq_mhead != NULL) { 2707 1.1 ryo m_freem(rxq->vxrxq_mhead); 2708 1.1 ryo rxq->vxrxq_mhead = NULL; 2709 1.1 ryo rxq->vxrxq_mtail = NULL; 2710 1.1 ryo } 2711 1.1 ryo 2712 1.1 ryo for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { 2713 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i]; 2714 1.1 ryo 2715 1.1 ryo for (j = 0; j < rxr->vxrxr_ndesc; j++) { 2716 1.1 ryo rxb = &rxr->vxrxr_rxbuf[j]; 2717 1.1 ryo 2718 1.1 ryo if (rxb->vrxb_m == NULL) 2719 1.1 ryo continue; 2720 1.1 ryo 2721 1.1 ryo bus_dmamap_sync(sc->vmx_dmat, rxb->vrxb_dmamap, 2722 1.1 ryo 0, rxb->vrxb_dmamap->dm_mapsize, 2723 1.1 ryo BUS_DMASYNC_POSTREAD); 2724 1.1 ryo bus_dmamap_unload(sc->vmx_dmat, rxb->vrxb_dmamap); 2725 1.1 ryo m_freem(rxb->vrxb_m); 2726 1.1 ryo rxb->vrxb_m = NULL; 2727 1.1 ryo } 2728 1.1 ryo } 2729 1.1 ryo } 2730 1.1 ryo 2731 1.1 ryo static void 2732 1.1 ryo vmxnet3_stop_rendezvous(struct vmxnet3_softc *sc) 2733 1.1 ryo { 2734 1.1 ryo struct vmxnet3_rxqueue *rxq; 2735 1.1 ryo struct vmxnet3_txqueue *txq; 2736 1.7 knakahar struct vmxnet3_queue *vmxq; 2737 1.1 ryo int i; 2738 1.1 ryo 2739 1.1 ryo for (i = 0; i < sc->vmx_nrxqueues; i++) { 2740 1.1 ryo rxq = &sc->vmx_queue[i].vxq_rxqueue; 2741 1.1 ryo VMXNET3_RXQ_LOCK(rxq); 2742 1.12 riastrad rxq->vxrxq_stopping = true; 2743 1.1 ryo VMXNET3_RXQ_UNLOCK(rxq); 2744 1.1 ryo } 2745 1.1 ryo for (i = 0; i < sc->vmx_ntxqueues; i++) { 2746 1.1 ryo txq = &sc->vmx_queue[i].vxq_txqueue; 2747 1.1 ryo VMXNET3_TXQ_LOCK(txq); 2748 1.12 riastrad txq->vxtxq_stopping = true; 2749 1.1 ryo VMXNET3_TXQ_UNLOCK(txq); 2750 1.1 ryo } 2751 1.7 knakahar for (i = 0; i < sc->vmx_nrxqueues; i++) { 2752 1.7 knakahar vmxq = &sc->vmx_queue[i]; 2753 1.7 knakahar workqueue_wait(sc->vmx_queue_wq, &vmxq->vxq_wq_cookie); 2754 1.7 knakahar } 2755 1.1 ryo } 2756 1.1 ryo 2757 1.1 ryo static void 2758 1.1 ryo vmxnet3_stop_locked(struct vmxnet3_softc *sc) 2759 1.1 ryo { 2760 1.12 riastrad struct ifnet *ifp = &sc->vmx_ethercom.ec_if; 2761 1.1 ryo int q; 2762 1.1 ryo 2763 1.1 ryo VMXNET3_CORE_LOCK_ASSERT(sc); 2764 1.12 riastrad KASSERT(IFNET_LOCKED(ifp)); 2765 1.12 riastrad 2766 1.12 riastrad vmxnet3_stop_rendezvous(sc); 2767 1.12 riastrad 2768 1.12 riastrad sc->vmx_mcastactive = false; 2769 1.12 riastrad sc->vmx_link_active = 0; 2770 1.12 riastrad callout_halt(&sc->vmx_tick, sc->vmx_mtx); 2771 1.1 ryo 2772 1.1 ryo ifp->if_flags &= ~IFF_RUNNING; 2773 1.1 ryo 2774 1.1 ryo /* Disable interrupts. */ 2775 1.1 ryo vmxnet3_disable_all_intrs(sc); 2776 1.1 ryo vmxnet3_write_cmd(sc, VMXNET3_CMD_DISABLE); 2777 1.1 ryo 2778 1.1 ryo for (q = 0; q < sc->vmx_ntxqueues; q++) 2779 1.1 ryo vmxnet3_txstop(sc, &sc->vmx_queue[q].vxq_txqueue); 2780 1.1 ryo for (q = 0; q < sc->vmx_nrxqueues; q++) 2781 1.1 ryo vmxnet3_rxstop(sc, &sc->vmx_queue[q].vxq_rxqueue); 2782 1.1 ryo 2783 1.1 ryo vmxnet3_write_cmd(sc, VMXNET3_CMD_RESET); 2784 1.1 ryo } 2785 1.1 ryo 2786 1.1 ryo static void 2787 1.1 ryo vmxnet3_stop(struct ifnet *ifp, int disable) 2788 1.1 ryo { 2789 1.1 ryo struct vmxnet3_softc *sc = ifp->if_softc; 2790 1.1 ryo 2791 1.12 riastrad KASSERT(IFNET_LOCKED(ifp)); 2792 1.12 riastrad 2793 1.1 ryo VMXNET3_CORE_LOCK(sc); 2794 1.1 ryo vmxnet3_stop_locked(sc); 2795 1.1 ryo VMXNET3_CORE_UNLOCK(sc); 2796 1.1 ryo } 2797 1.1 ryo 2798 1.1 ryo static void 2799 1.1 ryo vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq) 2800 1.1 ryo { 2801 1.1 ryo struct vmxnet3_txring *txr; 2802 1.1 ryo struct vmxnet3_comp_ring *txc; 2803 1.1 ryo 2804 1.1 ryo txr = &txq->vxtxq_cmd_ring; 2805 1.1 ryo txr->vxtxr_head = 0; 2806 1.1 ryo txr->vxtxr_next = 0; 2807 1.1 ryo txr->vxtxr_gen = VMXNET3_INIT_GEN; 2808 1.1 ryo memset(txr->vxtxr_txd, 0, 2809 1.1 ryo txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc)); 2810 1.1 ryo 2811 1.1 ryo txc = &txq->vxtxq_comp_ring; 2812 1.1 ryo txc->vxcr_next = 0; 2813 1.1 ryo txc->vxcr_gen = VMXNET3_INIT_GEN; 2814 1.1 ryo memset(txc->vxcr_u.txcd, 0, 2815 1.1 ryo txc->vxcr_ndesc * sizeof(struct vmxnet3_txcompdesc)); 2816 1.1 ryo } 2817 1.1 ryo 2818 1.1 ryo static int 2819 1.1 ryo vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq) 2820 1.1 ryo { 2821 1.1 ryo struct vmxnet3_rxring *rxr; 2822 1.1 ryo struct vmxnet3_comp_ring *rxc; 2823 1.2 ryo u_int i, populate, idx; 2824 1.2 ryo int error; 2825 1.1 ryo 2826 1.1 ryo /* LRO and jumbo frame is not supported yet */ 2827 1.1 ryo populate = 1; 2828 1.1 ryo 2829 1.1 ryo for (i = 0; i < populate; i++) { 2830 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i]; 2831 1.1 ryo rxr->vxrxr_fill = 0; 2832 1.1 ryo rxr->vxrxr_gen = VMXNET3_INIT_GEN; 2833 1.1 ryo memset(rxr->vxrxr_rxd, 0, 2834 1.1 ryo rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc)); 2835 1.1 ryo 2836 1.1 ryo for (idx = 0; idx < rxr->vxrxr_ndesc; idx++) { 2837 1.1 ryo error = vmxnet3_newbuf(sc, rxq, rxr); 2838 1.1 ryo if (error) 2839 1.1 ryo return (error); 2840 1.1 ryo } 2841 1.1 ryo } 2842 1.1 ryo 2843 1.1 ryo for (/**/; i < VMXNET3_RXRINGS_PERQ; i++) { 2844 1.1 ryo rxr = &rxq->vxrxq_cmd_ring[i]; 2845 1.1 ryo rxr->vxrxr_fill = 0; 2846 1.1 ryo rxr->vxrxr_gen = 0; 2847 1.1 ryo memset(rxr->vxrxr_rxd, 0, 2848 1.1 ryo rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc)); 2849 1.1 ryo } 2850 1.1 ryo 2851 1.1 ryo rxc = &rxq->vxrxq_comp_ring; 2852 1.1 ryo rxc->vxcr_next = 0; 2853 1.1 ryo rxc->vxcr_gen = VMXNET3_INIT_GEN; 2854 1.1 ryo memset(rxc->vxcr_u.rxcd, 0, 2855 1.1 ryo rxc->vxcr_ndesc * sizeof(struct vmxnet3_rxcompdesc)); 2856 1.1 ryo 2857 1.1 ryo return (0); 2858 1.1 ryo } 2859 1.1 ryo 2860 1.1 ryo static int 2861 1.1 ryo vmxnet3_reinit_queues(struct vmxnet3_softc *sc) 2862 1.1 ryo { 2863 1.1 ryo device_t dev; 2864 1.1 ryo int q, error; 2865 1.1 ryo dev = sc->vmx_dev; 2866 1.1 ryo 2867 1.1 ryo for (q = 0; q < sc->vmx_ntxqueues; q++) 2868 1.1 ryo vmxnet3_txinit(sc, &sc->vmx_queue[q].vxq_txqueue); 2869 1.1 ryo 2870 1.1 ryo for (q = 0; q < sc->vmx_nrxqueues; q++) { 2871 1.1 ryo error = vmxnet3_rxinit(sc, &sc->vmx_queue[q].vxq_rxqueue); 2872 1.1 ryo if (error) { 2873 1.1 ryo device_printf(dev, "cannot populate Rx queue %d\n", q); 2874 1.1 ryo return (error); 2875 1.1 ryo } 2876 1.1 ryo } 2877 1.1 ryo 2878 1.1 ryo return (0); 2879 1.1 ryo } 2880 1.1 ryo 2881 1.1 ryo static int 2882 1.1 ryo vmxnet3_enable_device(struct vmxnet3_softc *sc) 2883 1.1 ryo { 2884 1.1 ryo int q; 2885 1.1 ryo 2886 1.1 ryo if (vmxnet3_read_cmd(sc, VMXNET3_CMD_ENABLE) != 0) { 2887 1.1 ryo device_printf(sc->vmx_dev, "device enable command failed!\n"); 2888 1.1 ryo return (1); 2889 1.1 ryo } 2890 1.1 ryo 2891 1.1 ryo /* Reset the Rx queue heads. */ 2892 1.1 ryo for (q = 0; q < sc->vmx_nrxqueues; q++) { 2893 1.1 ryo vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH1(q), 0); 2894 1.1 ryo vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH2(q), 0); 2895 1.1 ryo } 2896 1.1 ryo 2897 1.1 ryo return (0); 2898 1.1 ryo } 2899 1.1 ryo 2900 1.1 ryo static void 2901 1.1 ryo vmxnet3_reinit_rxfilters(struct vmxnet3_softc *sc) 2902 1.1 ryo { 2903 1.1 ryo 2904 1.1 ryo vmxnet3_set_rxfilter(sc); 2905 1.1 ryo 2906 1.1 ryo memset(sc->vmx_ds->vlan_filter, 0, sizeof(sc->vmx_ds->vlan_filter)); 2907 1.1 ryo vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER); 2908 1.1 ryo } 2909 1.1 ryo 2910 1.1 ryo static int 2911 1.1 ryo vmxnet3_reinit(struct vmxnet3_softc *sc) 2912 1.1 ryo { 2913 1.1 ryo 2914 1.12 riastrad VMXNET3_CORE_LOCK_ASSERT(sc); 2915 1.12 riastrad 2916 1.1 ryo vmxnet3_set_lladdr(sc); 2917 1.1 ryo vmxnet3_reinit_shared_data(sc); 2918 1.1 ryo 2919 1.1 ryo if (vmxnet3_reinit_queues(sc) != 0) 2920 1.1 ryo return (ENXIO); 2921 1.1 ryo 2922 1.1 ryo if (vmxnet3_enable_device(sc) != 0) 2923 1.1 ryo return (ENXIO); 2924 1.1 ryo 2925 1.1 ryo vmxnet3_reinit_rxfilters(sc); 2926 1.1 ryo 2927 1.1 ryo return (0); 2928 1.1 ryo } 2929 1.1 ryo 2930 1.1 ryo static int 2931 1.1 ryo vmxnet3_init_locked(struct vmxnet3_softc *sc) 2932 1.1 ryo { 2933 1.1 ryo struct ifnet *ifp = &sc->vmx_ethercom.ec_if; 2934 1.12 riastrad int q; 2935 1.1 ryo int error; 2936 1.1 ryo 2937 1.12 riastrad KASSERT(IFNET_LOCKED(ifp)); 2938 1.12 riastrad VMXNET3_CORE_LOCK_ASSERT(sc); 2939 1.12 riastrad 2940 1.1 ryo vmxnet3_stop_locked(sc); 2941 1.1 ryo 2942 1.1 ryo error = vmxnet3_reinit(sc); 2943 1.1 ryo if (error) { 2944 1.1 ryo vmxnet3_stop_locked(sc); 2945 1.1 ryo return (error); 2946 1.1 ryo } 2947 1.1 ryo 2948 1.1 ryo ifp->if_flags |= IFF_RUNNING; 2949 1.3 ryo vmxnet3_if_link_status(sc); 2950 1.12 riastrad sc->vmx_mcastactive = true; 2951 1.1 ryo 2952 1.1 ryo vmxnet3_enable_all_intrs(sc); 2953 1.1 ryo callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc); 2954 1.1 ryo 2955 1.12 riastrad for (q = 0; q < sc->vmx_ntxqueues; q++) { 2956 1.12 riastrad VMXNET3_TXQ_LOCK(&sc->vmx_queue[q].vxq_txqueue); 2957 1.12 riastrad sc->vmx_queue[q].vxq_txqueue.vxtxq_stopping = false; 2958 1.12 riastrad VMXNET3_TXQ_UNLOCK(&sc->vmx_queue[q].vxq_txqueue); 2959 1.12 riastrad } 2960 1.12 riastrad for (q = 0; q < sc->vmx_nrxqueues; q++) { 2961 1.12 riastrad VMXNET3_RXQ_LOCK(&sc->vmx_queue[q].vxq_rxqueue); 2962 1.12 riastrad sc->vmx_queue[q].vxq_rxqueue.vxrxq_stopping = false; 2963 1.12 riastrad VMXNET3_RXQ_UNLOCK(&sc->vmx_queue[q].vxq_rxqueue); 2964 1.12 riastrad } 2965 1.12 riastrad 2966 1.1 ryo return (0); 2967 1.1 ryo } 2968 1.1 ryo 2969 1.1 ryo static int 2970 1.1 ryo vmxnet3_init(struct ifnet *ifp) 2971 1.1 ryo { 2972 1.1 ryo struct vmxnet3_softc *sc = ifp->if_softc; 2973 1.1 ryo int error; 2974 1.1 ryo 2975 1.12 riastrad KASSERT(IFNET_LOCKED(ifp)); 2976 1.12 riastrad 2977 1.1 ryo VMXNET3_CORE_LOCK(sc); 2978 1.1 ryo error = vmxnet3_init_locked(sc); 2979 1.1 ryo VMXNET3_CORE_UNLOCK(sc); 2980 1.1 ryo 2981 1.1 ryo return (error); 2982 1.1 ryo } 2983 1.1 ryo 2984 1.1 ryo static int 2985 1.1 ryo vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *txq, struct mbuf *m, 2986 1.1 ryo int *start, int *csum_start) 2987 1.1 ryo { 2988 1.1 ryo struct ether_header *eh; 2989 1.1 ryo struct mbuf *mp; 2990 1.1 ryo int offset, csum_off, iphl, offp; 2991 1.1 ryo bool v4; 2992 1.1 ryo 2993 1.1 ryo eh = mtod(m, struct ether_header *); 2994 1.1 ryo switch (htons(eh->ether_type)) { 2995 1.1 ryo case ETHERTYPE_IP: 2996 1.1 ryo case ETHERTYPE_IPV6: 2997 1.1 ryo offset = ETHER_HDR_LEN; 2998 1.1 ryo break; 2999 1.1 ryo case ETHERTYPE_VLAN: 3000 1.1 ryo offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3001 1.1 ryo break; 3002 1.1 ryo default: 3003 1.1 ryo m_freem(m); 3004 1.1 ryo return (EINVAL); 3005 1.1 ryo } 3006 1.1 ryo 3007 1.1 ryo if ((m->m_pkthdr.csum_flags & 3008 1.1 ryo (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) { 3009 1.1 ryo iphl = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 3010 1.1 ryo v4 = true; 3011 1.1 ryo } else { 3012 1.1 ryo iphl = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data); 3013 1.1 ryo v4 = false; 3014 1.1 ryo } 3015 1.1 ryo *start = offset + iphl; 3016 1.1 ryo 3017 1.1 ryo if (m->m_pkthdr.csum_flags & 3018 1.1 ryo (M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_TSOv4 | M_CSUM_TSOv6)) { 3019 1.1 ryo csum_off = offsetof(struct tcphdr, th_sum); 3020 1.1 ryo } else { 3021 1.1 ryo csum_off = offsetof(struct udphdr, uh_sum); 3022 1.1 ryo } 3023 1.1 ryo 3024 1.1 ryo *csum_start = *start + csum_off; 3025 1.1 ryo mp = m_pulldown(m, 0, *csum_start + 2, &offp); 3026 1.1 ryo if (!mp) { 3027 1.1 ryo /* m is already freed */ 3028 1.1 ryo return ENOBUFS; 3029 1.1 ryo } 3030 1.1 ryo 3031 1.1 ryo if (m->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) { 3032 1.1 ryo struct tcphdr *tcp; 3033 1.1 ryo 3034 1.1 ryo txq->vxtxq_stats.vmtxs_tso++; 3035 1.1 ryo tcp = (void *)(mtod(mp, char *) + offp + *start); 3036 1.1 ryo 3037 1.1 ryo if (v4) { 3038 1.1 ryo struct ip *ip; 3039 1.1 ryo 3040 1.1 ryo ip = (void *)(mtod(mp, char *) + offp + offset); 3041 1.1 ryo tcp->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 3042 1.1 ryo ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 3043 1.1 ryo } else { 3044 1.1 ryo struct ip6_hdr *ip6; 3045 1.1 ryo 3046 1.1 ryo ip6 = (void *)(mtod(mp, char *) + offp + offset); 3047 1.1 ryo tcp->th_sum = in6_cksum_phdr(&ip6->ip6_src, 3048 1.1 ryo &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); 3049 1.1 ryo } 3050 1.1 ryo 3051 1.1 ryo /* 3052 1.1 ryo * For TSO, the size of the protocol header is also 3053 1.1 ryo * included in the descriptor header size. 3054 1.1 ryo */ 3055 1.1 ryo *start += (tcp->th_off << 2); 3056 1.1 ryo } else 3057 1.1 ryo txq->vxtxq_stats.vmtxs_csum++; 3058 1.1 ryo 3059 1.1 ryo return (0); 3060 1.1 ryo } 3061 1.1 ryo 3062 1.1 ryo static int 3063 1.1 ryo vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *txq, struct mbuf **m0, 3064 1.1 ryo bus_dmamap_t dmap) 3065 1.1 ryo { 3066 1.1 ryo struct mbuf *m; 3067 1.1 ryo bus_dma_tag_t tag; 3068 1.1 ryo int error; 3069 1.1 ryo 3070 1.1 ryo m = *m0; 3071 1.1 ryo tag = txq->vxtxq_sc->vmx_dmat; 3072 1.1 ryo 3073 1.1 ryo error = bus_dmamap_load_mbuf(tag, dmap, m, BUS_DMA_NOWAIT); 3074 1.1 ryo if (error == 0 || error != EFBIG) 3075 1.1 ryo return (error); 3076 1.1 ryo 3077 1.1 ryo m = m_defrag(m, M_NOWAIT); 3078 1.1 ryo if (m != NULL) { 3079 1.1 ryo *m0 = m; 3080 1.1 ryo error = bus_dmamap_load_mbuf(tag, dmap, m, BUS_DMA_NOWAIT); 3081 1.1 ryo } else 3082 1.1 ryo error = ENOBUFS; 3083 1.1 ryo 3084 1.1 ryo if (error) { 3085 1.1 ryo m_freem(*m0); 3086 1.1 ryo *m0 = NULL; 3087 1.1 ryo txq->vxtxq_defrag_failed.ev_count++; 3088 1.1 ryo } else 3089 1.1 ryo txq->vxtxq_defragged.ev_count++; 3090 1.1 ryo 3091 1.1 ryo return (error); 3092 1.1 ryo } 3093 1.1 ryo 3094 1.1 ryo static void 3095 1.1 ryo vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *txq, bus_dmamap_t dmap) 3096 1.1 ryo { 3097 1.1 ryo 3098 1.1 ryo bus_dmamap_unload(txq->vxtxq_sc->vmx_dmat, dmap); 3099 1.1 ryo } 3100 1.1 ryo 3101 1.1 ryo static int 3102 1.1 ryo vmxnet3_txq_encap(struct vmxnet3_txqueue *txq, struct mbuf **m0) 3103 1.1 ryo { 3104 1.1 ryo struct vmxnet3_softc *sc; 3105 1.1 ryo struct vmxnet3_txring *txr; 3106 1.1 ryo struct vmxnet3_txdesc *txd, *sop; 3107 1.1 ryo struct mbuf *m; 3108 1.1 ryo bus_dmamap_t dmap; 3109 1.1 ryo bus_dma_segment_t *segs; 3110 1.1 ryo int i, gen, start, csum_start, nsegs, error; 3111 1.1 ryo 3112 1.1 ryo sc = txq->vxtxq_sc; 3113 1.1 ryo start = 0; 3114 1.1 ryo txd = NULL; 3115 1.1 ryo txr = &txq->vxtxq_cmd_ring; 3116 1.1 ryo dmap = txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_dmamap; 3117 1.1 ryo csum_start = 0; /* GCC */ 3118 1.1 ryo 3119 1.1 ryo error = vmxnet3_txq_load_mbuf(txq, m0, dmap); 3120 1.1 ryo if (error) 3121 1.1 ryo return (error); 3122 1.1 ryo 3123 1.1 ryo nsegs = dmap->dm_nsegs; 3124 1.1 ryo segs = dmap->dm_segs; 3125 1.1 ryo 3126 1.1 ryo m = *m0; 3127 1.1 ryo KASSERT(m->m_flags & M_PKTHDR); 3128 1.1 ryo KASSERT(nsegs <= VMXNET3_TX_MAXSEGS); 3129 1.1 ryo 3130 1.1 ryo if (vmxnet3_txring_avail(txr) < nsegs) { 3131 1.1 ryo txq->vxtxq_stats.vmtxs_full++; 3132 1.1 ryo vmxnet3_txq_unload_mbuf(txq, dmap); 3133 1.1 ryo return (ENOSPC); 3134 1.1 ryo } else if (m->m_pkthdr.csum_flags & VMXNET3_CSUM_ALL_OFFLOAD) { 3135 1.1 ryo error = vmxnet3_txq_offload_ctx(txq, m, &start, &csum_start); 3136 1.1 ryo if (error) { 3137 1.1 ryo /* m is already freed */ 3138 1.1 ryo txq->vxtxq_stats.vmtxs_offload_failed++; 3139 1.1 ryo vmxnet3_txq_unload_mbuf(txq, dmap); 3140 1.1 ryo *m0 = NULL; 3141 1.1 ryo return (error); 3142 1.1 ryo } 3143 1.1 ryo } 3144 1.1 ryo 3145 1.1 ryo txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_m = m; 3146 1.1 ryo sop = &txr->vxtxr_txd[txr->vxtxr_head]; 3147 1.1 ryo gen = txr->vxtxr_gen ^ 1; /* Owned by cpu (yet) */ 3148 1.1 ryo 3149 1.1 ryo for (i = 0; i < nsegs; i++) { 3150 1.1 ryo txd = &txr->vxtxr_txd[txr->vxtxr_head]; 3151 1.1 ryo 3152 1.1 ryo txd->addr = segs[i].ds_addr; 3153 1.1 ryo txd->len = segs[i].ds_len; 3154 1.1 ryo txd->gen = gen; 3155 1.1 ryo txd->dtype = 0; 3156 1.1 ryo txd->offload_mode = VMXNET3_OM_NONE; 3157 1.1 ryo txd->offload_pos = 0; 3158 1.1 ryo txd->hlen = 0; 3159 1.1 ryo txd->eop = 0; 3160 1.1 ryo txd->compreq = 0; 3161 1.1 ryo txd->vtag_mode = 0; 3162 1.1 ryo txd->vtag = 0; 3163 1.1 ryo 3164 1.1 ryo if (++txr->vxtxr_head == txr->vxtxr_ndesc) { 3165 1.1 ryo txr->vxtxr_head = 0; 3166 1.1 ryo txr->vxtxr_gen ^= 1; 3167 1.1 ryo } 3168 1.1 ryo gen = txr->vxtxr_gen; 3169 1.1 ryo } 3170 1.1 ryo txd->eop = 1; 3171 1.1 ryo txd->compreq = 1; 3172 1.1 ryo 3173 1.1 ryo if (vlan_has_tag(m)) { 3174 1.1 ryo sop->vtag_mode = 1; 3175 1.1 ryo sop->vtag = vlan_get_tag(m); 3176 1.1 ryo } 3177 1.1 ryo 3178 1.1 ryo if (m->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) { 3179 1.1 ryo sop->offload_mode = VMXNET3_OM_TSO; 3180 1.1 ryo sop->hlen = start; 3181 1.1 ryo sop->offload_pos = m->m_pkthdr.segsz; 3182 1.1 ryo } else if (m->m_pkthdr.csum_flags & (VMXNET3_CSUM_OFFLOAD | 3183 1.1 ryo VMXNET3_CSUM_OFFLOAD_IPV6)) { 3184 1.1 ryo sop->offload_mode = VMXNET3_OM_CSUM; 3185 1.1 ryo sop->hlen = start; 3186 1.1 ryo sop->offload_pos = csum_start; 3187 1.1 ryo } 3188 1.1 ryo 3189 1.1 ryo /* Finally, change the ownership. */ 3190 1.1 ryo vmxnet3_barrier(sc, VMXNET3_BARRIER_WR); 3191 1.1 ryo sop->gen ^= 1; 3192 1.1 ryo 3193 1.1 ryo txq->vxtxq_ts->npending += nsegs; 3194 1.1 ryo if (txq->vxtxq_ts->npending >= txq->vxtxq_ts->intr_threshold) { 3195 1.1 ryo struct vmxnet3_queue *vmxq; 3196 1.1 ryo vmxq = container_of(txq, struct vmxnet3_queue, vxq_txqueue); 3197 1.1 ryo txq->vxtxq_ts->npending = 0; 3198 1.1 ryo vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(vmxq->vxq_id), 3199 1.1 ryo txr->vxtxr_head); 3200 1.1 ryo } 3201 1.1 ryo 3202 1.1 ryo return (0); 3203 1.1 ryo } 3204 1.1 ryo 3205 1.1 ryo #define VMXNET3_TX_START 1 3206 1.1 ryo #define VMXNET3_TX_TRANSMIT 2 3207 1.1 ryo static inline void 3208 1.1 ryo vmxnet3_tx_common_locked(struct ifnet *ifp, struct vmxnet3_txqueue *txq, int txtype) 3209 1.1 ryo { 3210 1.1 ryo struct vmxnet3_softc *sc; 3211 1.1 ryo struct vmxnet3_txring *txr; 3212 1.1 ryo struct mbuf *m_head; 3213 1.1 ryo int tx; 3214 1.1 ryo 3215 1.1 ryo sc = ifp->if_softc; 3216 1.1 ryo txr = &txq->vxtxq_cmd_ring; 3217 1.1 ryo tx = 0; 3218 1.1 ryo 3219 1.1 ryo VMXNET3_TXQ_LOCK_ASSERT(txq); 3220 1.1 ryo 3221 1.12 riastrad if (txq->vxtxq_stopping || sc->vmx_link_active == 0) 3222 1.1 ryo return; 3223 1.1 ryo 3224 1.1 ryo for (;;) { 3225 1.1 ryo if (txtype == VMXNET3_TX_START) 3226 1.1 ryo IFQ_POLL(&ifp->if_snd, m_head); 3227 1.1 ryo else 3228 1.1 ryo m_head = pcq_peek(txq->vxtxq_interq); 3229 1.1 ryo if (m_head == NULL) 3230 1.1 ryo break; 3231 1.1 ryo 3232 1.1 ryo if (vmxnet3_txring_avail(txr) < VMXNET3_TX_MAXSEGS) 3233 1.1 ryo break; 3234 1.1 ryo 3235 1.1 ryo if (txtype == VMXNET3_TX_START) 3236 1.1 ryo IFQ_DEQUEUE(&ifp->if_snd, m_head); 3237 1.1 ryo else 3238 1.1 ryo m_head = pcq_get(txq->vxtxq_interq); 3239 1.1 ryo if (m_head == NULL) 3240 1.1 ryo break; 3241 1.1 ryo 3242 1.1 ryo if (vmxnet3_txq_encap(txq, &m_head) != 0) { 3243 1.17 rin m_freem(m_head); 3244 1.1 ryo break; 3245 1.1 ryo } 3246 1.1 ryo 3247 1.1 ryo tx++; 3248 1.1 ryo bpf_mtap(ifp, m_head, BPF_D_OUT); 3249 1.1 ryo } 3250 1.1 ryo 3251 1.1 ryo if (tx > 0) 3252 1.1 ryo txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT; 3253 1.1 ryo } 3254 1.1 ryo 3255 1.1 ryo static void 3256 1.1 ryo vmxnet3_start_locked(struct ifnet *ifp) 3257 1.1 ryo { 3258 1.1 ryo struct vmxnet3_softc *sc; 3259 1.1 ryo struct vmxnet3_txqueue *txq; 3260 1.1 ryo 3261 1.1 ryo sc = ifp->if_softc; 3262 1.1 ryo txq = &sc->vmx_queue[0].vxq_txqueue; 3263 1.1 ryo 3264 1.1 ryo vmxnet3_tx_common_locked(ifp, txq, VMXNET3_TX_START); 3265 1.1 ryo } 3266 1.1 ryo 3267 1.1 ryo void 3268 1.1 ryo vmxnet3_start(struct ifnet *ifp) 3269 1.1 ryo { 3270 1.1 ryo struct vmxnet3_softc *sc; 3271 1.1 ryo struct vmxnet3_txqueue *txq; 3272 1.1 ryo 3273 1.1 ryo sc = ifp->if_softc; 3274 1.1 ryo txq = &sc->vmx_queue[0].vxq_txqueue; 3275 1.1 ryo 3276 1.1 ryo VMXNET3_TXQ_LOCK(txq); 3277 1.1 ryo vmxnet3_start_locked(ifp); 3278 1.1 ryo VMXNET3_TXQ_UNLOCK(txq); 3279 1.1 ryo } 3280 1.1 ryo 3281 1.1 ryo static int 3282 1.1 ryo vmxnet3_select_txqueue(struct ifnet *ifp, struct mbuf *m __unused) 3283 1.1 ryo { 3284 1.1 ryo struct vmxnet3_softc *sc; 3285 1.1 ryo u_int cpuid; 3286 1.1 ryo 3287 1.1 ryo sc = ifp->if_softc; 3288 1.1 ryo cpuid = cpu_index(curcpu()); 3289 1.1 ryo /* 3290 1.15 andvar * Future work 3291 1.1 ryo * We should select txqueue to even up the load even if ncpu is 3292 1.1 ryo * different from sc->vmx_ntxqueues. Currently, the load is not 3293 1.1 ryo * even, that is, when ncpu is six and ntxqueues is four, the load 3294 1.1 ryo * of vmx_queue[0] and vmx_queue[1] is higher than vmx_queue[2] and 3295 1.1 ryo * vmx_queue[3] because CPU#4 always uses vmx_queue[0] and CPU#5 always 3296 1.1 ryo * uses vmx_queue[1]. 3297 1.1 ryo * Furthermore, we should not use random value to select txqueue to 3298 1.1 ryo * avoid reordering. We should use flow information of mbuf. 3299 1.1 ryo */ 3300 1.1 ryo return cpuid % sc->vmx_ntxqueues; 3301 1.1 ryo } 3302 1.1 ryo 3303 1.1 ryo static void 3304 1.1 ryo vmxnet3_transmit_locked(struct ifnet *ifp, struct vmxnet3_txqueue *txq) 3305 1.1 ryo { 3306 1.1 ryo 3307 1.1 ryo vmxnet3_tx_common_locked(ifp, txq, VMXNET3_TX_TRANSMIT); 3308 1.1 ryo } 3309 1.1 ryo 3310 1.1 ryo static int 3311 1.1 ryo vmxnet3_transmit(struct ifnet *ifp, struct mbuf *m) 3312 1.1 ryo { 3313 1.1 ryo struct vmxnet3_softc *sc; 3314 1.1 ryo struct vmxnet3_txqueue *txq; 3315 1.1 ryo int qid; 3316 1.1 ryo 3317 1.1 ryo qid = vmxnet3_select_txqueue(ifp, m); 3318 1.1 ryo sc = ifp->if_softc; 3319 1.1 ryo txq = &sc->vmx_queue[qid].vxq_txqueue; 3320 1.1 ryo 3321 1.1 ryo if (__predict_false(!pcq_put(txq->vxtxq_interq, m))) { 3322 1.1 ryo VMXNET3_TXQ_LOCK(txq); 3323 1.1 ryo txq->vxtxq_pcqdrop.ev_count++; 3324 1.1 ryo VMXNET3_TXQ_UNLOCK(txq); 3325 1.1 ryo m_freem(m); 3326 1.1 ryo return ENOBUFS; 3327 1.1 ryo } 3328 1.1 ryo 3329 1.10 knakahar #ifdef VMXNET3_ALWAYS_TXDEFER 3330 1.10 knakahar kpreempt_disable(); 3331 1.10 knakahar softint_schedule(txq->vxtxq_si); 3332 1.10 knakahar kpreempt_enable(); 3333 1.10 knakahar #else 3334 1.1 ryo if (VMXNET3_TXQ_TRYLOCK(txq)) { 3335 1.1 ryo vmxnet3_transmit_locked(ifp, txq); 3336 1.1 ryo VMXNET3_TXQ_UNLOCK(txq); 3337 1.1 ryo } else { 3338 1.1 ryo kpreempt_disable(); 3339 1.1 ryo softint_schedule(txq->vxtxq_si); 3340 1.1 ryo kpreempt_enable(); 3341 1.1 ryo } 3342 1.10 knakahar #endif 3343 1.1 ryo 3344 1.1 ryo return 0; 3345 1.1 ryo } 3346 1.1 ryo 3347 1.1 ryo static void 3348 1.1 ryo vmxnet3_deferred_transmit(void *arg) 3349 1.1 ryo { 3350 1.1 ryo struct vmxnet3_txqueue *txq = arg; 3351 1.1 ryo struct vmxnet3_softc *sc = txq->vxtxq_sc; 3352 1.1 ryo struct ifnet *ifp = &sc->vmx_ethercom.ec_if; 3353 1.1 ryo 3354 1.1 ryo VMXNET3_TXQ_LOCK(txq); 3355 1.1 ryo txq->vxtxq_transmitdef.ev_count++; 3356 1.1 ryo if (pcq_peek(txq->vxtxq_interq) != NULL) 3357 1.1 ryo vmxnet3_transmit_locked(ifp, txq); 3358 1.1 ryo VMXNET3_TXQ_UNLOCK(txq); 3359 1.1 ryo } 3360 1.1 ryo 3361 1.1 ryo static void 3362 1.1 ryo vmxnet3_set_rxfilter(struct vmxnet3_softc *sc) 3363 1.1 ryo { 3364 1.1 ryo struct ethercom *ec = &sc->vmx_ethercom; 3365 1.1 ryo struct vmxnet3_driver_shared *ds = sc->vmx_ds; 3366 1.1 ryo struct ether_multi *enm; 3367 1.1 ryo struct ether_multistep step; 3368 1.1 ryo u_int mode; 3369 1.1 ryo uint8_t *p; 3370 1.1 ryo 3371 1.12 riastrad VMXNET3_CORE_LOCK_ASSERT(sc); 3372 1.12 riastrad 3373 1.1 ryo ds->mcast_tablelen = 0; 3374 1.1 ryo ETHER_LOCK(ec); 3375 1.1 ryo CLR(ec->ec_flags, ETHER_F_ALLMULTI); 3376 1.1 ryo ETHER_UNLOCK(ec); 3377 1.1 ryo 3378 1.1 ryo /* 3379 1.1 ryo * Always accept broadcast frames. 3380 1.1 ryo * Always accept frames destined to our station address. 3381 1.1 ryo */ 3382 1.1 ryo mode = VMXNET3_RXMODE_BCAST | VMXNET3_RXMODE_UCAST; 3383 1.1 ryo 3384 1.1 ryo ETHER_LOCK(ec); 3385 1.12 riastrad if (sc->vmx_promisc || 3386 1.1 ryo ec->ec_multicnt > VMXNET3_MULTICAST_MAX) 3387 1.1 ryo goto allmulti; 3388 1.1 ryo 3389 1.1 ryo p = sc->vmx_mcast; 3390 1.1 ryo ETHER_FIRST_MULTI(step, ec, enm); 3391 1.1 ryo while (enm != NULL) { 3392 1.1 ryo if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 3393 1.1 ryo /* 3394 1.1 ryo * We must listen to a range of multicast addresses. 3395 1.1 ryo * For now, just accept all multicasts, rather than 3396 1.1 ryo * trying to set only those filter bits needed to match 3397 1.1 ryo * the range. (At this time, the only use of address 3398 1.1 ryo * ranges is for IP multicast routing, for which the 3399 1.1 ryo * range is big enough to require all bits set.) 3400 1.1 ryo */ 3401 1.1 ryo goto allmulti; 3402 1.1 ryo } 3403 1.1 ryo memcpy(p, enm->enm_addrlo, ETHER_ADDR_LEN); 3404 1.1 ryo 3405 1.1 ryo p += ETHER_ADDR_LEN; 3406 1.1 ryo 3407 1.1 ryo ETHER_NEXT_MULTI(step, enm); 3408 1.1 ryo } 3409 1.1 ryo 3410 1.1 ryo if (ec->ec_multicnt > 0) { 3411 1.1 ryo SET(mode, VMXNET3_RXMODE_MCAST); 3412 1.1 ryo ds->mcast_tablelen = p - sc->vmx_mcast; 3413 1.1 ryo } 3414 1.1 ryo ETHER_UNLOCK(ec); 3415 1.1 ryo 3416 1.1 ryo goto setit; 3417 1.1 ryo 3418 1.1 ryo allmulti: 3419 1.1 ryo SET(ec->ec_flags, ETHER_F_ALLMULTI); 3420 1.1 ryo ETHER_UNLOCK(ec); 3421 1.1 ryo SET(mode, (VMXNET3_RXMODE_ALLMULTI | VMXNET3_RXMODE_MCAST)); 3422 1.12 riastrad if (sc->vmx_promisc) 3423 1.1 ryo SET(mode, VMXNET3_RXMODE_PROMISC); 3424 1.1 ryo 3425 1.1 ryo setit: 3426 1.1 ryo vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_FILTER); 3427 1.1 ryo ds->rxmode = mode; 3428 1.1 ryo vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_RXMODE); 3429 1.1 ryo } 3430 1.1 ryo 3431 1.1 ryo static int 3432 1.1 ryo vmxnet3_ioctl(struct ifnet *ifp, u_long cmd, void *data) 3433 1.1 ryo { 3434 1.1 ryo struct vmxnet3_softc *sc = ifp->if_softc; 3435 1.1 ryo struct ifreq *ifr = (struct ifreq *)data; 3436 1.1 ryo int s, error = 0; 3437 1.1 ryo 3438 1.1 ryo switch (cmd) { 3439 1.12 riastrad case SIOCADDMULTI: 3440 1.12 riastrad case SIOCDELMULTI: 3441 1.12 riastrad break; 3442 1.12 riastrad default: 3443 1.12 riastrad KASSERT(IFNET_LOCKED(ifp)); 3444 1.12 riastrad } 3445 1.12 riastrad 3446 1.12 riastrad switch (cmd) { 3447 1.1 ryo case SIOCSIFMTU: { 3448 1.1 ryo int nmtu = ifr->ifr_mtu; 3449 1.1 ryo 3450 1.1 ryo if (nmtu < VMXNET3_MIN_MTU || nmtu > VMXNET3_MAX_MTU) { 3451 1.1 ryo error = EINVAL; 3452 1.1 ryo break; 3453 1.1 ryo } 3454 1.2 ryo if (ifp->if_mtu != (uint64_t)nmtu) { 3455 1.1 ryo s = splnet(); 3456 1.1 ryo error = ether_ioctl(ifp, cmd, data); 3457 1.1 ryo splx(s); 3458 1.1 ryo if (error == ENETRESET) 3459 1.1 ryo error = vmxnet3_init(ifp); 3460 1.1 ryo } 3461 1.1 ryo break; 3462 1.1 ryo } 3463 1.1 ryo 3464 1.1 ryo default: 3465 1.1 ryo s = splnet(); 3466 1.1 ryo error = ether_ioctl(ifp, cmd, data); 3467 1.1 ryo splx(s); 3468 1.1 ryo } 3469 1.1 ryo 3470 1.1 ryo if (error == ENETRESET) { 3471 1.1 ryo VMXNET3_CORE_LOCK(sc); 3472 1.12 riastrad if (sc->vmx_mcastactive) 3473 1.1 ryo vmxnet3_set_rxfilter(sc); 3474 1.1 ryo VMXNET3_CORE_UNLOCK(sc); 3475 1.1 ryo error = 0; 3476 1.1 ryo } 3477 1.1 ryo 3478 1.1 ryo return error; 3479 1.1 ryo } 3480 1.1 ryo 3481 1.1 ryo static int 3482 1.1 ryo vmxnet3_ifflags_cb(struct ethercom *ec) 3483 1.1 ryo { 3484 1.12 riastrad struct ifnet *ifp = &ec->ec_if; 3485 1.12 riastrad struct vmxnet3_softc *sc = ifp->if_softc; 3486 1.12 riastrad int error = 0; 3487 1.1 ryo 3488 1.12 riastrad KASSERT(IFNET_LOCKED(ifp)); 3489 1.1 ryo 3490 1.1 ryo VMXNET3_CORE_LOCK(sc); 3491 1.12 riastrad const unsigned short changed = ifp->if_flags ^ sc->vmx_if_flags; 3492 1.12 riastrad if ((changed & ~(IFF_CANTCHANGE | IFF_DEBUG)) == 0) { 3493 1.12 riastrad sc->vmx_if_flags = ifp->if_flags; 3494 1.12 riastrad if (changed & IFF_PROMISC) { 3495 1.12 riastrad sc->vmx_promisc = ifp->if_flags & IFF_PROMISC; 3496 1.12 riastrad error = ENETRESET; 3497 1.12 riastrad } 3498 1.12 riastrad } else { 3499 1.12 riastrad error = ENETRESET; 3500 1.12 riastrad } 3501 1.1 ryo VMXNET3_CORE_UNLOCK(sc); 3502 1.1 ryo 3503 1.1 ryo vmxnet3_if_link_status(sc); 3504 1.1 ryo 3505 1.12 riastrad return error; 3506 1.1 ryo } 3507 1.1 ryo 3508 1.1 ryo static int 3509 1.1 ryo vmxnet3_watchdog(struct vmxnet3_txqueue *txq) 3510 1.1 ryo { 3511 1.1 ryo struct vmxnet3_softc *sc; 3512 1.1 ryo struct vmxnet3_queue *vmxq; 3513 1.1 ryo 3514 1.1 ryo sc = txq->vxtxq_sc; 3515 1.1 ryo vmxq = container_of(txq, struct vmxnet3_queue, vxq_txqueue); 3516 1.1 ryo 3517 1.1 ryo VMXNET3_TXQ_LOCK(txq); 3518 1.1 ryo if (txq->vxtxq_watchdog == 0 || --txq->vxtxq_watchdog) { 3519 1.1 ryo VMXNET3_TXQ_UNLOCK(txq); 3520 1.1 ryo return (0); 3521 1.1 ryo } 3522 1.1 ryo txq->vxtxq_watchdogto.ev_count++; 3523 1.1 ryo VMXNET3_TXQ_UNLOCK(txq); 3524 1.1 ryo 3525 1.1 ryo device_printf(sc->vmx_dev, "watchdog timeout on queue %d\n", 3526 1.1 ryo vmxq->vxq_id); 3527 1.1 ryo return (1); 3528 1.1 ryo } 3529 1.1 ryo 3530 1.1 ryo static void 3531 1.1 ryo vmxnet3_refresh_host_stats(struct vmxnet3_softc *sc) 3532 1.1 ryo { 3533 1.1 ryo 3534 1.1 ryo vmxnet3_write_cmd(sc, VMXNET3_CMD_GET_STATS); 3535 1.1 ryo } 3536 1.1 ryo 3537 1.1 ryo static void 3538 1.1 ryo vmxnet3_tick(void *xsc) 3539 1.1 ryo { 3540 1.1 ryo struct vmxnet3_softc *sc; 3541 1.1 ryo int i, timedout; 3542 1.1 ryo 3543 1.1 ryo sc = xsc; 3544 1.1 ryo timedout = 0; 3545 1.1 ryo 3546 1.1 ryo VMXNET3_CORE_LOCK(sc); 3547 1.1 ryo 3548 1.1 ryo vmxnet3_refresh_host_stats(sc); 3549 1.1 ryo 3550 1.1 ryo for (i = 0; i < sc->vmx_ntxqueues; i++) 3551 1.1 ryo timedout |= vmxnet3_watchdog(&sc->vmx_queue[i].vxq_txqueue); 3552 1.1 ryo 3553 1.12 riastrad if (timedout != 0) { 3554 1.12 riastrad if (!sc->vmx_reset_pending) { 3555 1.12 riastrad sc->vmx_reset_pending = true; 3556 1.12 riastrad workqueue_enqueue(sc->vmx_reset_wq, 3557 1.12 riastrad &sc->vmx_reset_work, NULL); 3558 1.12 riastrad } 3559 1.12 riastrad } else { 3560 1.1 ryo callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc); 3561 1.12 riastrad } 3562 1.1 ryo 3563 1.1 ryo VMXNET3_CORE_UNLOCK(sc); 3564 1.1 ryo } 3565 1.1 ryo 3566 1.12 riastrad static void 3567 1.12 riastrad vmxnet3_reset_work(struct work *work, void *arg) 3568 1.12 riastrad { 3569 1.12 riastrad struct vmxnet3_softc *sc = arg; 3570 1.12 riastrad struct ifnet *ifp = &sc->vmx_ethercom.ec_if; 3571 1.12 riastrad 3572 1.12 riastrad VMXNET3_CORE_LOCK(sc); 3573 1.12 riastrad KASSERT(sc->vmx_reset_pending); 3574 1.12 riastrad sc->vmx_reset_pending = false; 3575 1.12 riastrad VMXNET3_CORE_UNLOCK(sc); 3576 1.12 riastrad 3577 1.12 riastrad IFNET_LOCK(ifp); 3578 1.12 riastrad (void)vmxnet3_init(ifp); 3579 1.12 riastrad IFNET_UNLOCK(ifp); 3580 1.12 riastrad } 3581 1.12 riastrad 3582 1.1 ryo /* 3583 1.1 ryo * update link state of ifnet and softc 3584 1.1 ryo */ 3585 1.1 ryo static void 3586 1.1 ryo vmxnet3_if_link_status(struct vmxnet3_softc *sc) 3587 1.1 ryo { 3588 1.1 ryo struct ifnet *ifp = &sc->vmx_ethercom.ec_if; 3589 1.4 ryo u_int link; 3590 1.4 ryo bool up; 3591 1.1 ryo 3592 1.4 ryo up = vmxnet3_cmd_link_status(ifp); 3593 1.4 ryo if (up) { 3594 1.1 ryo sc->vmx_link_active = 1; 3595 1.1 ryo link = LINK_STATE_UP; 3596 1.1 ryo } else { 3597 1.1 ryo sc->vmx_link_active = 0; 3598 1.1 ryo link = LINK_STATE_DOWN; 3599 1.1 ryo } 3600 1.1 ryo 3601 1.1 ryo if_link_state_change(ifp, link); 3602 1.1 ryo } 3603 1.1 ryo 3604 1.1 ryo /* 3605 1.1 ryo * check vmx(4) state by VMXNET3_CMD and update ifp->if_baudrate 3606 1.1 ryo * returns 3607 1.1 ryo * - true: link up 3608 1.15 andvar * - false: link down 3609 1.1 ryo */ 3610 1.1 ryo static bool 3611 1.1 ryo vmxnet3_cmd_link_status(struct ifnet *ifp) 3612 1.1 ryo { 3613 1.1 ryo struct vmxnet3_softc *sc = ifp->if_softc; 3614 1.1 ryo u_int x, speed; 3615 1.1 ryo 3616 1.1 ryo x = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_LINK); 3617 1.1 ryo if ((x & 1) == 0) 3618 1.1 ryo return false; 3619 1.1 ryo 3620 1.1 ryo speed = x >> 16; 3621 1.1 ryo ifp->if_baudrate = IF_Mbps(speed); 3622 1.1 ryo return true; 3623 1.1 ryo } 3624 1.1 ryo 3625 1.1 ryo static void 3626 1.1 ryo vmxnet3_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) 3627 1.1 ryo { 3628 1.1 ryo bool up; 3629 1.1 ryo 3630 1.1 ryo ifmr->ifm_status = IFM_AVALID; 3631 1.1 ryo ifmr->ifm_active = IFM_ETHER; 3632 1.1 ryo 3633 1.1 ryo up = vmxnet3_cmd_link_status(ifp); 3634 1.1 ryo if (!up) 3635 1.1 ryo return; 3636 1.1 ryo 3637 1.1 ryo ifmr->ifm_status |= IFM_ACTIVE; 3638 1.1 ryo 3639 1.1 ryo if (ifp->if_baudrate >= IF_Gbps(10ULL)) 3640 1.13 msaitoh ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 3641 1.1 ryo } 3642 1.1 ryo 3643 1.1 ryo static int 3644 1.1 ryo vmxnet3_ifmedia_change(struct ifnet *ifp) 3645 1.1 ryo { 3646 1.1 ryo return 0; 3647 1.1 ryo } 3648 1.1 ryo 3649 1.1 ryo static void 3650 1.1 ryo vmxnet3_set_lladdr(struct vmxnet3_softc *sc) 3651 1.1 ryo { 3652 1.1 ryo uint32_t ml, mh; 3653 1.1 ryo 3654 1.1 ryo ml = sc->vmx_lladdr[0]; 3655 1.1 ryo ml |= sc->vmx_lladdr[1] << 8; 3656 1.1 ryo ml |= sc->vmx_lladdr[2] << 16; 3657 1.1 ryo ml |= sc->vmx_lladdr[3] << 24; 3658 1.1 ryo vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACL, ml); 3659 1.1 ryo 3660 1.1 ryo mh = sc->vmx_lladdr[4]; 3661 1.1 ryo mh |= sc->vmx_lladdr[5] << 8; 3662 1.1 ryo vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACH, mh); 3663 1.1 ryo } 3664 1.1 ryo 3665 1.1 ryo static void 3666 1.1 ryo vmxnet3_get_lladdr(struct vmxnet3_softc *sc) 3667 1.1 ryo { 3668 1.1 ryo uint32_t ml, mh; 3669 1.1 ryo 3670 1.1 ryo ml = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACL); 3671 1.1 ryo mh = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACH); 3672 1.1 ryo 3673 1.1 ryo sc->vmx_lladdr[0] = ml; 3674 1.1 ryo sc->vmx_lladdr[1] = ml >> 8; 3675 1.1 ryo sc->vmx_lladdr[2] = ml >> 16; 3676 1.1 ryo sc->vmx_lladdr[3] = ml >> 24; 3677 1.1 ryo sc->vmx_lladdr[4] = mh; 3678 1.1 ryo sc->vmx_lladdr[5] = mh >> 8; 3679 1.1 ryo } 3680 1.1 ryo 3681 1.1 ryo static void 3682 1.1 ryo vmxnet3_enable_all_intrs(struct vmxnet3_softc *sc) 3683 1.1 ryo { 3684 1.1 ryo int i; 3685 1.1 ryo 3686 1.1 ryo sc->vmx_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL; 3687 1.1 ryo for (i = 0; i < sc->vmx_nintrs; i++) 3688 1.1 ryo vmxnet3_enable_intr(sc, i); 3689 1.1 ryo } 3690 1.1 ryo 3691 1.1 ryo static void 3692 1.1 ryo vmxnet3_disable_all_intrs(struct vmxnet3_softc *sc) 3693 1.1 ryo { 3694 1.1 ryo int i; 3695 1.1 ryo 3696 1.1 ryo sc->vmx_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL; 3697 1.1 ryo for (i = 0; i < sc->vmx_nintrs; i++) 3698 1.1 ryo vmxnet3_disable_intr(sc, i); 3699 1.1 ryo } 3700 1.1 ryo 3701 1.1 ryo static int 3702 1.1 ryo vmxnet3_dma_malloc(struct vmxnet3_softc *sc, bus_size_t size, bus_size_t align, 3703 1.1 ryo struct vmxnet3_dma_alloc *dma) 3704 1.1 ryo { 3705 1.1 ryo bus_dma_tag_t t = sc->vmx_dmat; 3706 1.1 ryo bus_dma_segment_t *segs = dma->dma_segs; 3707 1.1 ryo int n, error; 3708 1.1 ryo 3709 1.1 ryo memset(dma, 0, sizeof(*dma)); 3710 1.1 ryo 3711 1.1 ryo error = bus_dmamem_alloc(t, size, align, 0, segs, 1, &n, BUS_DMA_NOWAIT); 3712 1.1 ryo if (error) { 3713 1.1 ryo aprint_error_dev(sc->vmx_dev, "bus_dmamem_alloc failed: %d\n", error); 3714 1.1 ryo goto fail1; 3715 1.1 ryo } 3716 1.1 ryo KASSERT(n == 1); 3717 1.1 ryo 3718 1.1 ryo error = bus_dmamem_map(t, segs, 1, size, &dma->dma_vaddr, BUS_DMA_NOWAIT); 3719 1.1 ryo if (error) { 3720 1.1 ryo aprint_error_dev(sc->vmx_dev, "bus_dmamem_map failed: %d\n", error); 3721 1.1 ryo goto fail2; 3722 1.1 ryo } 3723 1.1 ryo 3724 1.1 ryo error = bus_dmamap_create(t, size, 1, size, 0, BUS_DMA_NOWAIT, &dma->dma_map); 3725 1.1 ryo if (error) { 3726 1.1 ryo aprint_error_dev(sc->vmx_dev, "bus_dmamap_create failed: %d\n", error); 3727 1.1 ryo goto fail3; 3728 1.1 ryo } 3729 1.1 ryo 3730 1.1 ryo error = bus_dmamap_load(t, dma->dma_map, dma->dma_vaddr, size, NULL, 3731 1.1 ryo BUS_DMA_NOWAIT); 3732 1.1 ryo if (error) { 3733 1.1 ryo aprint_error_dev(sc->vmx_dev, "bus_dmamap_load failed: %d\n", error); 3734 1.1 ryo goto fail4; 3735 1.1 ryo } 3736 1.1 ryo 3737 1.1 ryo memset(dma->dma_vaddr, 0, size); 3738 1.1 ryo dma->dma_paddr = DMAADDR(dma->dma_map); 3739 1.1 ryo dma->dma_size = size; 3740 1.1 ryo 3741 1.1 ryo return (0); 3742 1.1 ryo fail4: 3743 1.1 ryo bus_dmamap_destroy(t, dma->dma_map); 3744 1.1 ryo fail3: 3745 1.1 ryo bus_dmamem_unmap(t, dma->dma_vaddr, size); 3746 1.1 ryo fail2: 3747 1.1 ryo bus_dmamem_free(t, segs, 1); 3748 1.1 ryo fail1: 3749 1.1 ryo return (error); 3750 1.1 ryo } 3751 1.1 ryo 3752 1.1 ryo static void 3753 1.1 ryo vmxnet3_dma_free(struct vmxnet3_softc *sc, struct vmxnet3_dma_alloc *dma) 3754 1.1 ryo { 3755 1.1 ryo bus_dma_tag_t t = sc->vmx_dmat; 3756 1.1 ryo 3757 1.1 ryo bus_dmamap_unload(t, dma->dma_map); 3758 1.1 ryo bus_dmamap_destroy(t, dma->dma_map); 3759 1.1 ryo bus_dmamem_unmap(t, dma->dma_vaddr, dma->dma_size); 3760 1.1 ryo bus_dmamem_free(t, dma->dma_segs, 1); 3761 1.1 ryo 3762 1.1 ryo memset(dma, 0, sizeof(*dma)); 3763 1.1 ryo } 3764 1.2 ryo 3765 1.2 ryo MODULE(MODULE_CLASS_DRIVER, if_vmx, "pci"); 3766 1.2 ryo 3767 1.2 ryo #ifdef _MODULE 3768 1.2 ryo #include "ioconf.c" 3769 1.2 ryo #endif 3770 1.2 ryo 3771 1.2 ryo static int 3772 1.2 ryo if_vmx_modcmd(modcmd_t cmd, void *opaque) 3773 1.2 ryo { 3774 1.2 ryo int error = 0; 3775 1.2 ryo 3776 1.2 ryo switch (cmd) { 3777 1.2 ryo case MODULE_CMD_INIT: 3778 1.2 ryo #ifdef _MODULE 3779 1.2 ryo error = config_init_component(cfdriver_ioconf_if_vmx, 3780 1.2 ryo cfattach_ioconf_if_vmx, cfdata_ioconf_if_vmx); 3781 1.2 ryo #endif 3782 1.2 ryo return error; 3783 1.2 ryo case MODULE_CMD_FINI: 3784 1.2 ryo #ifdef _MODULE 3785 1.2 ryo error = config_fini_component(cfdriver_ioconf_if_vmx, 3786 1.2 ryo cfattach_ioconf_if_vmx, cfdata_ioconf_if_vmx); 3787 1.2 ryo #endif 3788 1.2 ryo return error; 3789 1.2 ryo default: 3790 1.2 ryo return ENOTTY; 3791 1.2 ryo } 3792 1.2 ryo } 3793 1.2 ryo 3794