Home | History | Annotate | Line # | Download | only in igc
if_igc.c revision 1.15.2.1
      1  1.15.2.1  perseant /*	$NetBSD: if_igc.c,v 1.15.2.1 2025/08/02 05:57:01 perseant Exp $	*/
      2       1.1       rin /*	$OpenBSD: if_igc.c,v 1.13 2023/04/28 10:18:57 bluhm Exp $	*/
      3       1.1       rin /*-
      4       1.1       rin  * SPDX-License-Identifier: BSD-2-Clause
      5       1.1       rin  *
      6       1.1       rin  * Copyright (c) 2016 Nicole Graziano <nicole (at) nextbsd.org>
      7       1.1       rin  * All rights reserved.
      8       1.1       rin  * Copyright (c) 2021 Rubicon Communications, LLC (Netgate)
      9       1.1       rin  *
     10       1.1       rin  * Redistribution and use in source and binary forms, with or without
     11       1.1       rin  * modification, are permitted provided that the following conditions
     12       1.1       rin  * are met:
     13       1.1       rin  * 1. Redistributions of source code must retain the above copyright
     14       1.1       rin  *    notice, this list of conditions and the following disclaimer.
     15       1.1       rin  * 2. Redistributions in binary form must reproduce the above copyright
     16       1.1       rin  *    notice, this list of conditions and the following disclaimer in the
     17       1.1       rin  *    documentation and/or other materials provided with the distribution.
     18       1.1       rin  *
     19       1.1       rin  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     20       1.1       rin  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21       1.1       rin  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22       1.1       rin  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     23       1.1       rin  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24       1.1       rin  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25       1.1       rin  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26       1.1       rin  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27       1.1       rin  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28       1.1       rin  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29       1.1       rin  * SUCH DAMAGE.
     30       1.1       rin  */
     31       1.1       rin 
     32       1.2       rin #include <sys/cdefs.h>
     33  1.15.2.1  perseant __KERNEL_RCSID(0, "$NetBSD: if_igc.c,v 1.15.2.1 2025/08/02 05:57:01 perseant Exp $");
     34       1.2       rin 
     35       1.2       rin #ifdef _KERNEL_OPT
     36       1.2       rin #include "opt_if_igc.h"
     37       1.2       rin #if 0 /* notyet */
     38       1.1       rin #include "vlan.h"
     39       1.2       rin #endif
     40       1.2       rin #endif
     41       1.1       rin 
     42       1.1       rin #include <sys/param.h>
     43       1.1       rin #include <sys/systm.h>
     44       1.2       rin #include <sys/bus.h>
     45       1.2       rin #include <sys/cpu.h>
     46       1.2       rin #include <sys/device.h>
     47       1.2       rin #include <sys/endian.h>
     48       1.2       rin #include <sys/intr.h>
     49       1.2       rin #include <sys/interrupt.h>
     50       1.2       rin #include <sys/kernel.h>
     51       1.2       rin #include <sys/kmem.h>
     52       1.1       rin #include <sys/mbuf.h>
     53       1.2       rin #include <sys/mutex.h>
     54       1.1       rin #include <sys/socket.h>
     55       1.2       rin #include <sys/workqueue.h>
     56       1.2       rin #include <sys/xcall.h>
     57       1.1       rin 
     58       1.2       rin #include <net/bpf.h>
     59       1.1       rin #include <net/if.h>
     60       1.2       rin #include <net/if_ether.h>
     61       1.1       rin #include <net/if_media.h>
     62       1.2       rin #include <net/if_vlanvar.h>
     63       1.2       rin #include <net/rss_config.h>
     64       1.1       rin 
     65       1.1       rin #include <netinet/in.h>
     66       1.1       rin #include <netinet/ip.h>
     67       1.1       rin #include <netinet/ip6.h>
     68       1.2       rin #include <netinet/tcp.h>
     69       1.1       rin 
     70       1.2       rin #include <dev/pci/pcivar.h>
     71       1.2       rin #include <dev/pci/pcireg.h>
     72       1.2       rin #include <dev/pci/pcidevs.h>
     73       1.2       rin 
     74       1.2       rin #include <dev/pci/igc/if_igc.h>
     75       1.2       rin #include <dev/pci/igc/igc_evcnt.h>
     76       1.2       rin #include <dev/pci/igc/igc_hw.h>
     77       1.2       rin #include <dev/mii/miivar.h>
     78       1.2       rin 
     79       1.2       rin #define IGC_WORKQUEUE_PRI	PRI_SOFTNET
     80       1.2       rin 
     81       1.2       rin #ifndef IGC_RX_INTR_PROCESS_LIMIT_DEFAULT
     82       1.2       rin #define IGC_RX_INTR_PROCESS_LIMIT_DEFAULT	0
     83       1.2       rin #endif
     84       1.2       rin #ifndef IGC_TX_INTR_PROCESS_LIMIT_DEFAULT
     85       1.2       rin #define IGC_TX_INTR_PROCESS_LIMIT_DEFAULT	0
     86       1.1       rin #endif
     87       1.1       rin 
     88       1.2       rin #ifndef IGC_RX_PROCESS_LIMIT_DEFAULT
     89       1.2       rin #define IGC_RX_PROCESS_LIMIT_DEFAULT		256
     90       1.2       rin #endif
     91       1.2       rin #ifndef IGC_TX_PROCESS_LIMIT_DEFAULT
     92       1.2       rin #define IGC_TX_PROCESS_LIMIT_DEFAULT		256
     93       1.2       rin #endif
     94       1.1       rin 
     95       1.2       rin #define	htolem32(p, x)	(*((uint32_t *)(p)) = htole32(x))
     96       1.2       rin #define	htolem64(p, x)	(*((uint64_t *)(p)) = htole64(x))
     97       1.1       rin 
     98       1.2       rin static const struct igc_product {
     99       1.2       rin 	pci_vendor_id_t		igcp_vendor;
    100       1.2       rin 	pci_product_id_t	igcp_product;
    101       1.2       rin 	const char		*igcp_name;
    102       1.2       rin } igc_products[] = {
    103       1.2       rin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_IT,
    104       1.2       rin 	    "Intel(R) Ethernet Controller I225-IT(2)" },
    105       1.2       rin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_LM,
    106       1.2       rin 	    "Intel(R) Ethernet Controller I226-LM" },
    107       1.2       rin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_V,
    108       1.2       rin 	    "Intel(R) Ethernet Controller I226-V" },
    109       1.2       rin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_IT,
    110       1.2       rin 	    "Intel(R) Ethernet Controller I226-IT" },
    111       1.2       rin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I221_V,
    112       1.2       rin 	    "Intel(R) Ethernet Controller I221-V" },
    113       1.2       rin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_BLANK_NVM,
    114       1.2       rin 	    "Intel(R) Ethernet Controller I226(blankNVM)" },
    115       1.2       rin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_LM,
    116       1.2       rin 	    "Intel(R) Ethernet Controller I225-LM" },
    117       1.2       rin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_V,
    118       1.2       rin 	    "Intel(R) Ethernet Controller I225-V" },
    119       1.2       rin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I220_V,
    120       1.2       rin 	    "Intel(R) Ethernet Controller I220-V" },
    121       1.2       rin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_I,
    122       1.2       rin 	    "Intel(R) Ethernet Controller I225-I" },
    123       1.2       rin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_BLANK_NVM,
    124       1.2       rin 	    "Intel(R) Ethernet Controller I225(blankNVM)" },
    125       1.2       rin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_K,
    126       1.2       rin 	    "Intel(R) Ethernet Controller I225-K" },
    127       1.2       rin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_K2,
    128       1.2       rin 	    "Intel(R) Ethernet Controller I225-K(2)" },
    129       1.2       rin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_K,
    130       1.2       rin 	    "Intel(R) Ethernet Controller I226-K" },
    131       1.2       rin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_LMVP,
    132       1.2       rin 	    "Intel(R) Ethernet Controller I225-LMvP(2)" },
    133       1.2       rin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_LMVP,
    134       1.2       rin 	    "Intel(R) Ethernet Controller I226-LMvP" },
    135       1.2       rin 	{ 0, 0, NULL },
    136       1.1       rin };
    137       1.1       rin 
    138       1.2       rin #define	IGC_DF_CFG	0x1
    139       1.2       rin #define	IGC_DF_TX	0x2
    140       1.2       rin #define	IGC_DF_RX	0x4
    141       1.2       rin #define	IGC_DF_MISC	0x8
    142       1.2       rin 
    143       1.2       rin #ifdef IGC_DEBUG_FLAGS
    144       1.2       rin int igc_debug_flags = IGC_DEBUG_FLAGS;
    145       1.2       rin #else
    146       1.2       rin int igc_debug_flags = 0;
    147       1.2       rin #endif
    148       1.2       rin 
    149       1.2       rin #define	DPRINTF(flag, fmt, args...)		do {			\
    150       1.2       rin 	if (igc_debug_flags & (IGC_DF_ ## flag))			\
    151       1.2       rin 		printf("%s: %d: " fmt, __func__, __LINE__, ##args);	\
    152       1.2       rin     } while (0)
    153       1.2       rin 
    154       1.1       rin /*********************************************************************
    155       1.1       rin  *  Function Prototypes
    156       1.1       rin  *********************************************************************/
    157       1.2       rin static int	igc_match(device_t, cfdata_t, void *);
    158       1.2       rin static void	igc_attach(device_t, device_t, void *);
    159       1.2       rin static int	igc_detach(device_t, int);
    160       1.2       rin 
    161       1.2       rin static void	igc_identify_hardware(struct igc_softc *);
    162       1.2       rin static int	igc_adjust_nqueues(struct igc_softc *);
    163       1.2       rin static int	igc_allocate_pci_resources(struct igc_softc *);
    164       1.2       rin static int	igc_allocate_interrupts(struct igc_softc *);
    165       1.2       rin static int	igc_allocate_queues(struct igc_softc *);
    166       1.2       rin static void	igc_free_pci_resources(struct igc_softc *);
    167       1.2       rin static void	igc_free_interrupts(struct igc_softc *);
    168       1.2       rin static void	igc_free_queues(struct igc_softc *);
    169       1.2       rin static void	igc_reset(struct igc_softc *);
    170       1.2       rin static void	igc_init_dmac(struct igc_softc *, uint32_t);
    171       1.2       rin static int	igc_setup_interrupts(struct igc_softc *);
    172       1.2       rin static void	igc_attach_counters(struct igc_softc *sc);
    173       1.2       rin static void	igc_detach_counters(struct igc_softc *sc);
    174       1.2       rin static void	igc_update_counters(struct igc_softc *sc);
    175       1.2       rin static void	igc_clear_counters(struct igc_softc *sc);
    176       1.2       rin static int	igc_setup_msix(struct igc_softc *);
    177       1.2       rin static int	igc_setup_msi(struct igc_softc *);
    178       1.2       rin static int	igc_setup_intx(struct igc_softc *);
    179       1.2       rin static int	igc_dma_malloc(struct igc_softc *, bus_size_t,
    180       1.2       rin 		    struct igc_dma_alloc *);
    181       1.2       rin static void	igc_dma_free(struct igc_softc *, struct igc_dma_alloc *);
    182       1.2       rin static void	igc_setup_interface(struct igc_softc *);
    183       1.2       rin 
    184       1.2       rin static int	igc_init(struct ifnet *);
    185       1.2       rin static int	igc_init_locked(struct igc_softc *);
    186       1.2       rin static void	igc_start(struct ifnet *);
    187       1.2       rin static int	igc_transmit(struct ifnet *, struct mbuf *);
    188       1.2       rin static void	igc_tx_common_locked(struct ifnet *, struct tx_ring *, int);
    189       1.2       rin static bool	igc_txeof(struct tx_ring *, u_int);
    190       1.2       rin static void	igc_intr_barrier(struct igc_softc *);
    191       1.2       rin static void	igc_stop(struct ifnet *, int);
    192       1.2       rin static void	igc_stop_locked(struct igc_softc *);
    193       1.2       rin static int	igc_ioctl(struct ifnet *, u_long, void *);
    194       1.2       rin #ifdef IF_RXR
    195       1.2       rin static int	igc_rxrinfo(struct igc_softc *, struct if_rxrinfo *);
    196       1.2       rin #endif
    197       1.2       rin static void	igc_rxfill(struct rx_ring *);
    198       1.2       rin static void	igc_rxrefill(struct rx_ring *, int);
    199       1.2       rin static bool	igc_rxeof(struct rx_ring *, u_int);
    200       1.2       rin static int	igc_rx_checksum(struct igc_queue *, uint64_t, uint32_t,
    201       1.2       rin 		    uint32_t);
    202       1.2       rin static void	igc_watchdog(struct ifnet *);
    203       1.2       rin static void	igc_tick(void *);
    204       1.2       rin static void	igc_media_status(struct ifnet *, struct ifmediareq *);
    205       1.2       rin static int	igc_media_change(struct ifnet *);
    206       1.2       rin static int	igc_ifflags_cb(struct ethercom *);
    207       1.2       rin static void	igc_set_filter(struct igc_softc *);
    208       1.2       rin static void	igc_update_link_status(struct igc_softc *);
    209       1.2       rin static int	igc_get_buf(struct rx_ring *, int, bool);
    210  1.15.2.1  perseant static bool	igc_tx_ctx_setup(struct tx_ring *, struct mbuf *, int,
    211       1.2       rin 		    uint32_t *, uint32_t *);
    212       1.2       rin 
    213       1.2       rin static void	igc_configure_queues(struct igc_softc *);
    214       1.2       rin static void	igc_set_queues(struct igc_softc *, uint32_t, uint32_t, int);
    215       1.2       rin static void	igc_enable_queue(struct igc_softc *, uint32_t);
    216       1.2       rin static void	igc_enable_intr(struct igc_softc *);
    217       1.2       rin static void	igc_disable_intr(struct igc_softc *);
    218       1.2       rin static int	igc_intr_link(void *);
    219       1.2       rin static int	igc_intr_queue(void *);
    220       1.2       rin static int	igc_intr(void *);
    221       1.2       rin static void	igc_handle_queue(void *);
    222       1.2       rin static void	igc_handle_queue_work(struct work *, void *);
    223       1.2       rin static void	igc_sched_handle_queue(struct igc_softc *, struct igc_queue *);
    224       1.2       rin static void	igc_barrier_handle_queue(struct igc_softc *);
    225       1.2       rin 
    226       1.2       rin static int	igc_allocate_transmit_buffers(struct tx_ring *);
    227       1.2       rin static int	igc_setup_transmit_structures(struct igc_softc *);
    228       1.2       rin static int	igc_setup_transmit_ring(struct tx_ring *);
    229       1.2       rin static void	igc_initialize_transmit_unit(struct igc_softc *);
    230       1.2       rin static void	igc_free_transmit_structures(struct igc_softc *);
    231       1.2       rin static void	igc_free_transmit_buffers(struct tx_ring *);
    232       1.2       rin static void	igc_withdraw_transmit_packets(struct tx_ring *, bool);
    233       1.2       rin static int	igc_allocate_receive_buffers(struct rx_ring *);
    234       1.2       rin static int	igc_setup_receive_structures(struct igc_softc *);
    235       1.2       rin static int	igc_setup_receive_ring(struct rx_ring *);
    236       1.2       rin static void	igc_initialize_receive_unit(struct igc_softc *);
    237       1.2       rin static void	igc_free_receive_structures(struct igc_softc *);
    238       1.2       rin static void	igc_free_receive_buffers(struct rx_ring *);
    239       1.2       rin static void	igc_clear_receive_status(struct rx_ring *);
    240       1.2       rin static void	igc_initialize_rss_mapping(struct igc_softc *);
    241       1.2       rin 
    242       1.2       rin static void	igc_get_hw_control(struct igc_softc *);
    243       1.2       rin static void	igc_release_hw_control(struct igc_softc *);
    244       1.2       rin static int	igc_is_valid_ether_addr(uint8_t *);
    245       1.2       rin static void	igc_print_devinfo(struct igc_softc *);
    246       1.2       rin 
    247       1.2       rin CFATTACH_DECL3_NEW(igc, sizeof(struct igc_softc),
    248       1.2       rin     igc_match, igc_attach, igc_detach, NULL, NULL, NULL, 0);
    249       1.2       rin 
    250       1.2       rin static inline int
    251       1.2       rin igc_txdesc_incr(struct igc_softc *sc, int id)
    252       1.2       rin {
    253       1.2       rin 
    254       1.2       rin 	if (++id == sc->num_tx_desc)
    255       1.2       rin 		id = 0;
    256       1.2       rin 	return id;
    257       1.2       rin }
    258       1.2       rin 
    259       1.2       rin static inline int __unused
    260       1.2       rin igc_txdesc_decr(struct igc_softc *sc, int id)
    261       1.2       rin {
    262       1.2       rin 
    263       1.2       rin 	if (--id < 0)
    264       1.2       rin 		id = sc->num_tx_desc - 1;
    265       1.2       rin 	return id;
    266       1.2       rin }
    267       1.2       rin 
    268       1.2       rin static inline void
    269       1.2       rin igc_txdesc_sync(struct tx_ring *txr, int id, int ops)
    270       1.2       rin {
    271       1.2       rin 
    272       1.2       rin 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
    273       1.2       rin 	    id * sizeof(union igc_adv_tx_desc), sizeof(union igc_adv_tx_desc),
    274       1.2       rin 	    ops);
    275       1.2       rin }
    276       1.1       rin 
    277       1.2       rin static inline int
    278       1.2       rin igc_rxdesc_incr(struct igc_softc *sc, int id)
    279       1.2       rin {
    280       1.2       rin 
    281       1.2       rin 	if (++id == sc->num_rx_desc)
    282       1.2       rin 		id = 0;
    283       1.2       rin 	return id;
    284       1.2       rin }
    285       1.2       rin 
    286       1.2       rin static inline int
    287       1.2       rin igc_rxdesc_decr(struct igc_softc *sc, int id)
    288       1.2       rin {
    289       1.2       rin 
    290       1.2       rin 	if (--id < 0)
    291       1.2       rin 		id = sc->num_rx_desc - 1;
    292       1.2       rin 	return id;
    293       1.2       rin }
    294       1.2       rin 
    295       1.2       rin static inline void
    296       1.2       rin igc_rxdesc_sync(struct rx_ring *rxr, int id, int ops)
    297       1.2       rin {
    298       1.2       rin 
    299       1.2       rin 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
    300       1.2       rin 	    id * sizeof(union igc_adv_rx_desc), sizeof(union igc_adv_rx_desc),
    301       1.2       rin 	    ops);
    302       1.2       rin }
    303       1.1       rin 
    304       1.2       rin static const struct igc_product *
    305       1.2       rin igc_lookup(const struct pci_attach_args *pa)
    306       1.2       rin {
    307       1.2       rin 	const struct igc_product *igcp;
    308       1.1       rin 
    309       1.2       rin 	for (igcp = igc_products; igcp->igcp_name != NULL; igcp++) {
    310       1.2       rin 		if (PCI_VENDOR(pa->pa_id) == igcp->igcp_vendor &&
    311       1.2       rin 		    PCI_PRODUCT(pa->pa_id) == igcp->igcp_product)
    312       1.2       rin 			return igcp;
    313       1.2       rin 	}
    314       1.2       rin 	return NULL;
    315       1.2       rin }
    316       1.1       rin 
    317       1.1       rin /*********************************************************************
    318       1.1       rin  *  Device identification routine
    319       1.1       rin  *
    320       1.1       rin  *  igc_match determines if the driver should be loaded on
    321       1.1       rin  *  adapter based on PCI vendor/device id of the adapter.
    322       1.1       rin  *
    323       1.1       rin  *  return 0 on success, positive on failure
    324       1.1       rin  *********************************************************************/
    325       1.2       rin static int
    326       1.2       rin igc_match(device_t parent, cfdata_t match, void *aux)
    327       1.1       rin {
    328       1.2       rin 	struct pci_attach_args *pa = aux;
    329       1.2       rin 
    330       1.2       rin 	if (igc_lookup(pa) != NULL)
    331       1.2       rin 		return 1;
    332       1.2       rin 
    333       1.2       rin 	return 0;
    334       1.1       rin }
    335       1.1       rin 
    336       1.1       rin /*********************************************************************
    337       1.1       rin  *  Device initialization routine
    338       1.1       rin  *
    339       1.1       rin  *  The attach entry point is called when the driver is being loaded.
    340       1.1       rin  *  This routine identifies the type of hardware, allocates all resources
    341       1.1       rin  *  and initializes the hardware.
    342       1.1       rin  *
    343       1.1       rin  *  return 0 on success, positive on failure
    344       1.1       rin  *********************************************************************/
    345       1.2       rin static void
    346       1.2       rin igc_attach(device_t parent, device_t self, void *aux)
    347       1.1       rin {
    348       1.2       rin 	struct pci_attach_args *pa = aux;
    349       1.2       rin 	struct igc_softc *sc = device_private(self);
    350       1.1       rin 	struct igc_hw *hw = &sc->hw;
    351       1.1       rin 
    352       1.2       rin 	const struct igc_product *igcp = igc_lookup(pa);
    353       1.2       rin 	KASSERT(igcp != NULL);
    354       1.2       rin 
    355       1.2       rin 	sc->sc_dev = self;
    356       1.2       rin 	callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
    357       1.2       rin 	callout_setfunc(&sc->sc_tick_ch, igc_tick, sc);
    358       1.2       rin 	sc->sc_core_stopping = false;
    359       1.2       rin 
    360       1.1       rin 	sc->osdep.os_sc = sc;
    361       1.1       rin 	sc->osdep.os_pa = *pa;
    362       1.8       rin #ifndef __aarch64__
    363       1.3       rin 	/*
    364       1.3       rin 	 * XXX PR port-arm/57643
    365       1.3       rin 	 * 64-bit DMA does not work at least for LX2K with 32/64GB memory.
    366       1.3       rin 	 * smmu(4) support may be required.
    367       1.3       rin 	 */
    368       1.8       rin 	if (pci_dma64_available(pa)) {
    369       1.8       rin 		aprint_verbose(", 64-bit DMA");
    370       1.8       rin 		sc->osdep.os_dmat = pa->pa_dmat64;
    371       1.8       rin 	} else
    372       1.3       rin #endif
    373       1.8       rin 	{
    374       1.8       rin 		aprint_verbose(", 32-bit DMA");
    375       1.8       rin 		sc->osdep.os_dmat = pa->pa_dmat;
    376       1.8       rin 	}
    377       1.8       rin 
    378       1.8       rin 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", igcp->igcp_name, 1);
    379       1.1       rin 
    380       1.1       rin 	/* Determine hardware and mac info */
    381       1.1       rin 	igc_identify_hardware(sc);
    382       1.1       rin 
    383       1.1       rin 	sc->num_tx_desc = IGC_DEFAULT_TXD;
    384       1.1       rin 	sc->num_rx_desc = IGC_DEFAULT_RXD;
    385       1.1       rin 
    386       1.1       rin 	 /* Setup PCI resources */
    387       1.2       rin 	if (igc_allocate_pci_resources(sc)) {
    388       1.2       rin 		aprint_error_dev(sc->sc_dev,
    389       1.2       rin 		    "unable to allocate PCI resources\n");
    390       1.2       rin 		goto err_pci;
    391       1.2       rin 	}
    392       1.2       rin 
    393       1.2       rin 	if (igc_allocate_interrupts(sc)) {
    394       1.2       rin 		aprint_error_dev(sc->sc_dev, "unable to allocate interrupts\n");
    395       1.2       rin 		goto err_pci;
    396       1.2       rin 	}
    397       1.1       rin 
    398       1.1       rin 	/* Allocate TX/RX queues */
    399       1.2       rin 	if (igc_allocate_queues(sc)) {
    400       1.2       rin 		aprint_error_dev(sc->sc_dev, "unable to allocate queues\n");
    401       1.2       rin 		goto err_alloc_intr;
    402       1.2       rin 	}
    403       1.1       rin 
    404       1.1       rin 	/* Do shared code initialization */
    405       1.1       rin 	if (igc_setup_init_funcs(hw, true)) {
    406       1.2       rin 		aprint_error_dev(sc->sc_dev, "unable to initialize\n");
    407       1.2       rin 		goto err_alloc_intr;
    408       1.1       rin 	}
    409       1.1       rin 
    410       1.1       rin 	hw->mac.autoneg = DO_AUTO_NEG;
    411       1.1       rin 	hw->phy.autoneg_wait_to_complete = false;
    412       1.1       rin 	hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
    413       1.1       rin 
    414       1.1       rin 	/* Copper options. */
    415       1.1       rin 	if (hw->phy.media_type == igc_media_type_copper)
    416       1.1       rin 		hw->phy.mdix = AUTO_ALL_MODES;
    417       1.1       rin 
    418       1.1       rin 	/* Set the max frame size. */
    419       1.1       rin 	sc->hw.mac.max_frame_size = 9234;
    420       1.1       rin 
    421       1.1       rin 	/* Allocate multicast array memory. */
    422       1.2       rin 	sc->mta = kmem_alloc(IGC_MTA_LEN, KM_SLEEP);
    423       1.1       rin 
    424       1.1       rin 	/* Check SOL/IDER usage. */
    425       1.2       rin 	if (igc_check_reset_block(hw)) {
    426       1.2       rin 		aprint_error_dev(sc->sc_dev,
    427       1.2       rin 		    "PHY reset is blocked due to SOL/IDER session\n");
    428       1.2       rin 	}
    429       1.1       rin 
    430       1.1       rin 	/* Disable Energy Efficient Ethernet. */
    431       1.1       rin 	sc->hw.dev_spec._i225.eee_disable = true;
    432       1.1       rin 
    433       1.1       rin 	igc_reset_hw(hw);
    434       1.1       rin 
    435       1.1       rin 	/* Make sure we have a good EEPROM before we read from it. */
    436       1.1       rin 	if (igc_validate_nvm_checksum(hw) < 0) {
    437       1.1       rin 		/*
    438       1.1       rin 		 * Some PCI-E parts fail the first check due to
    439       1.1       rin 		 * the link being in sleep state, call it again,
    440       1.1       rin 		 * if it fails a second time its a real issue.
    441       1.1       rin 		 */
    442       1.1       rin 		if (igc_validate_nvm_checksum(hw) < 0) {
    443       1.2       rin 			aprint_error_dev(sc->sc_dev,
    444       1.2       rin 			    "EEPROM checksum invalid\n");
    445       1.1       rin 			goto err_late;
    446       1.1       rin 		}
    447       1.1       rin 	}
    448       1.1       rin 
    449       1.1       rin 	/* Copy the permanent MAC address out of the EEPROM. */
    450       1.1       rin 	if (igc_read_mac_addr(hw) < 0) {
    451       1.2       rin 		aprint_error_dev(sc->sc_dev,
    452       1.2       rin 		    "unable to read MAC address from EEPROM\n");
    453       1.1       rin 		goto err_late;
    454       1.1       rin 	}
    455       1.1       rin 
    456       1.1       rin 	if (!igc_is_valid_ether_addr(hw->mac.addr)) {
    457       1.2       rin 		aprint_error_dev(sc->sc_dev, "invalid MAC address\n");
    458       1.1       rin 		goto err_late;
    459       1.1       rin 	}
    460       1.1       rin 
    461       1.2       rin 	if (igc_setup_interrupts(sc))
    462       1.2       rin 		goto err_late;
    463       1.1       rin 
    464       1.2       rin 	/* Attach counters. */
    465       1.2       rin 	igc_attach_counters(sc);
    466       1.1       rin 
    467       1.1       rin 	/* Setup OS specific network interface. */
    468       1.1       rin 	igc_setup_interface(sc);
    469       1.1       rin 
    470       1.2       rin 	igc_print_devinfo(sc);
    471       1.2       rin 
    472       1.1       rin 	igc_reset(sc);
    473       1.1       rin 	hw->mac.get_link_status = true;
    474       1.1       rin 	igc_update_link_status(sc);
    475       1.1       rin 
    476       1.1       rin 	/* The driver can now take control from firmware. */
    477       1.1       rin 	igc_get_hw_control(sc);
    478       1.1       rin 
    479       1.2       rin 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
    480       1.2       rin 	    ether_sprintf(sc->hw.mac.addr));
    481       1.2       rin 
    482       1.2       rin 	if (pmf_device_register(self, NULL, NULL))
    483       1.2       rin 		pmf_class_network_register(self, &sc->sc_ec.ec_if);
    484       1.2       rin 	else
    485       1.2       rin 		aprint_error_dev(self, "couldn't establish power handler\n");
    486       1.2       rin 
    487       1.1       rin 	return;
    488       1.1       rin 
    489       1.2       rin  err_late:
    490       1.1       rin 	igc_release_hw_control(sc);
    491       1.2       rin  err_alloc_intr:
    492       1.2       rin 	igc_free_interrupts(sc);
    493       1.2       rin  err_pci:
    494       1.1       rin 	igc_free_pci_resources(sc);
    495       1.2       rin 	kmem_free(sc->mta, IGC_MTA_LEN);
    496       1.1       rin }
    497       1.1       rin 
    498       1.1       rin /*********************************************************************
    499       1.1       rin  *  Device removal routine
    500       1.1       rin  *
    501       1.1       rin  *  The detach entry point is called when the driver is being removed.
    502       1.1       rin  *  This routine stops the adapter and deallocates all the resources
    503       1.1       rin  *  that were allocated for driver operation.
    504       1.1       rin  *
    505       1.1       rin  *  return 0 on success, positive on failure
    506       1.1       rin  *********************************************************************/
    507       1.2       rin static int
    508       1.2       rin igc_detach(device_t self, int flags)
    509       1.1       rin {
    510       1.2       rin 	struct igc_softc *sc = device_private(self);
    511       1.2       rin 	struct ifnet *ifp = &sc->sc_ec.ec_if;
    512       1.2       rin 
    513       1.2       rin 	mutex_enter(&sc->sc_core_lock);
    514       1.2       rin 	igc_stop_locked(sc);
    515       1.2       rin 	mutex_exit(&sc->sc_core_lock);
    516       1.2       rin 
    517       1.2       rin 	igc_detach_counters(sc);
    518       1.1       rin 
    519       1.2       rin 	igc_free_queues(sc);
    520       1.1       rin 
    521       1.1       rin 	igc_phy_hw_reset(&sc->hw);
    522       1.1       rin 	igc_release_hw_control(sc);
    523       1.1       rin 
    524       1.1       rin 	ether_ifdetach(ifp);
    525       1.1       rin 	if_detach(ifp);
    526       1.2       rin 	ifmedia_fini(&sc->media);
    527       1.1       rin 
    528       1.2       rin 	igc_free_interrupts(sc);
    529       1.1       rin 	igc_free_pci_resources(sc);
    530       1.2       rin 	kmem_free(sc->mta, IGC_MTA_LEN);
    531       1.1       rin 
    532       1.2       rin 	mutex_destroy(&sc->sc_core_lock);
    533       1.1       rin 
    534       1.1       rin 	return 0;
    535       1.1       rin }
    536       1.1       rin 
    537       1.2       rin static void
    538       1.1       rin igc_identify_hardware(struct igc_softc *sc)
    539       1.1       rin {
    540       1.1       rin 	struct igc_osdep *os = &sc->osdep;
    541       1.1       rin 	struct pci_attach_args *pa = &os->os_pa;
    542       1.1       rin 
    543       1.1       rin 	/* Save off the information about this board. */
    544       1.1       rin 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
    545       1.1       rin 
    546       1.1       rin 	/* Do shared code init and setup. */
    547       1.1       rin 	if (igc_set_mac_type(&sc->hw)) {
    548       1.2       rin 		aprint_error_dev(sc->sc_dev, "unable to identify hardware\n");
    549       1.1       rin 		return;
    550       1.2       rin 	}
    551       1.1       rin }
    552       1.1       rin 
    553       1.2       rin static int
    554       1.1       rin igc_allocate_pci_resources(struct igc_softc *sc)
    555       1.1       rin {
    556       1.1       rin 	struct igc_osdep *os = &sc->osdep;
    557       1.1       rin 	struct pci_attach_args *pa = &os->os_pa;
    558       1.1       rin 
    559       1.2       rin 	/*
    560       1.2       rin 	 * Enable bus mastering and memory-mapped I/O for sure.
    561       1.2       rin 	 */
    562       1.2       rin 	pcireg_t csr =
    563       1.2       rin 	    pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
    564       1.2       rin 	csr |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_MEM_ENABLE;
    565       1.2       rin 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
    566       1.2       rin 
    567       1.2       rin 	const pcireg_t memtype =
    568       1.2       rin 	    pci_mapreg_type(pa->pa_pc, pa->pa_tag, IGC_PCIREG);
    569       1.1       rin 	if (pci_mapreg_map(pa, IGC_PCIREG, memtype, 0, &os->os_memt,
    570       1.2       rin 	    &os->os_memh, &os->os_membase, &os->os_memsize)) {
    571       1.2       rin 		aprint_error_dev(sc->sc_dev, "unable to map registers\n");
    572       1.1       rin 		return ENXIO;
    573       1.1       rin 	}
    574       1.2       rin 
    575       1.2       rin 	sc->hw.hw_addr = os->os_membase;
    576       1.1       rin 	sc->hw.back = os;
    577       1.1       rin 
    578       1.2       rin 	return 0;
    579       1.2       rin }
    580       1.2       rin 
    581       1.2       rin static int __unused
    582       1.2       rin igc_adjust_nqueues(struct igc_softc *sc)
    583       1.2       rin {
    584       1.2       rin 	struct pci_attach_args *pa = &sc->osdep.os_pa;
    585       1.2       rin 	int nqueues = MIN(IGC_MAX_NQUEUES, ncpu);
    586       1.2       rin 
    587       1.2       rin 	const int nmsix = pci_msix_count(pa->pa_pc, pa->pa_tag);
    588       1.2       rin 	if (nmsix <= 1)
    589       1.2       rin 		nqueues = 1;
    590       1.2       rin 	else if (nmsix < nqueues + 1)
    591       1.2       rin 		nqueues = nmsix - 1;
    592       1.2       rin 
    593       1.2       rin 	return nqueues;
    594       1.2       rin }
    595       1.2       rin 
    596       1.2       rin static int
    597       1.2       rin igc_allocate_interrupts(struct igc_softc *sc)
    598       1.2       rin {
    599       1.2       rin 	struct pci_attach_args *pa = &sc->osdep.os_pa;
    600       1.2       rin 	int error;
    601       1.2       rin 
    602       1.2       rin #ifndef IGC_DISABLE_MSIX
    603       1.2       rin 	const int nqueues = igc_adjust_nqueues(sc);
    604       1.2       rin 	if (nqueues > 1) {
    605       1.2       rin 		sc->sc_nintrs = nqueues + 1;
    606       1.2       rin 		error = pci_msix_alloc_exact(pa, &sc->sc_intrs, sc->sc_nintrs);
    607       1.2       rin 		if (!error) {
    608       1.2       rin 			sc->sc_nqueues = nqueues;
    609       1.2       rin 			sc->sc_intr_type = PCI_INTR_TYPE_MSIX;
    610       1.2       rin 			return 0;
    611       1.2       rin 		}
    612       1.2       rin 	}
    613       1.2       rin #endif
    614       1.2       rin 
    615       1.2       rin 	/* fallback to MSI */
    616       1.2       rin 	sc->sc_nintrs = sc->sc_nqueues = 1;
    617       1.2       rin 
    618       1.2       rin #ifndef IGC_DISABLE_MSI
    619       1.2       rin 	error = pci_msi_alloc_exact(pa, &sc->sc_intrs, sc->sc_nintrs);
    620       1.2       rin 	if (!error) {
    621       1.2       rin 		sc->sc_intr_type = PCI_INTR_TYPE_MSI;
    622       1.2       rin 		return 0;
    623       1.2       rin 	}
    624       1.2       rin #endif
    625       1.2       rin 
    626       1.2       rin 	/* fallback to INTx */
    627       1.1       rin 
    628       1.2       rin 	error = pci_intx_alloc(pa, &sc->sc_intrs);
    629       1.2       rin 	if (!error) {
    630       1.2       rin 		sc->sc_intr_type = PCI_INTR_TYPE_INTX;
    631       1.2       rin 		return 0;
    632       1.2       rin 	}
    633       1.2       rin 
    634       1.2       rin 	return error;
    635       1.1       rin }
    636       1.1       rin 
    637       1.2       rin static int
    638       1.1       rin igc_allocate_queues(struct igc_softc *sc)
    639       1.1       rin {
    640       1.2       rin 	device_t dev = sc->sc_dev;
    641       1.2       rin 	int rxconf = 0, txconf = 0;
    642       1.1       rin 
    643       1.1       rin 	/* Allocate the top level queue structs. */
    644       1.2       rin 	sc->queues =
    645       1.2       rin 	    kmem_zalloc(sc->sc_nqueues * sizeof(struct igc_queue), KM_SLEEP);
    646       1.1       rin 
    647       1.1       rin 	/* Allocate the TX ring. */
    648       1.2       rin 	sc->tx_rings =
    649       1.2       rin 	    kmem_zalloc(sc->sc_nqueues * sizeof(struct tx_ring), KM_SLEEP);
    650       1.2       rin 
    651       1.1       rin 	/* Allocate the RX ring. */
    652       1.2       rin 	sc->rx_rings =
    653       1.2       rin 	    kmem_zalloc(sc->sc_nqueues * sizeof(struct rx_ring), KM_SLEEP);
    654       1.1       rin 
    655       1.2       rin 	/* Set up the TX queues. */
    656       1.2       rin 	for (int iq = 0; iq < sc->sc_nqueues; iq++, txconf++) {
    657       1.2       rin 		struct tx_ring *txr = &sc->tx_rings[iq];
    658       1.2       rin 		const int tsize = roundup2(
    659       1.2       rin 		    sc->num_tx_desc * sizeof(union igc_adv_tx_desc),
    660       1.2       rin 		    IGC_DBA_ALIGN);
    661       1.1       rin 
    662       1.1       rin 		txr->sc = sc;
    663       1.2       rin 		txr->txr_igcq = &sc->queues[iq];
    664       1.2       rin 		txr->me = iq;
    665       1.1       rin 		if (igc_dma_malloc(sc, tsize, &txr->txdma)) {
    666       1.2       rin 			aprint_error_dev(dev,
    667       1.2       rin 			    "unable to allocate TX descriptor\n");
    668       1.2       rin 			goto fail;
    669       1.1       rin 		}
    670       1.1       rin 		txr->tx_base = (union igc_adv_tx_desc *)txr->txdma.dma_vaddr;
    671       1.2       rin 		memset(txr->tx_base, 0, tsize);
    672       1.2       rin 	}
    673       1.2       rin 
    674       1.2       rin 	/* Prepare transmit descriptors and buffers. */
    675       1.2       rin 	if (igc_setup_transmit_structures(sc)) {
    676       1.2       rin 		aprint_error_dev(dev, "unable to setup transmit structures\n");
    677       1.2       rin 		goto fail;
    678       1.1       rin 	}
    679       1.1       rin 
    680       1.1       rin 	/* Set up the RX queues. */
    681       1.2       rin 	for (int iq = 0; iq < sc->sc_nqueues; iq++, rxconf++) {
    682       1.2       rin 		struct rx_ring *rxr = &sc->rx_rings[iq];
    683       1.2       rin 		const int rsize = roundup2(
    684       1.2       rin 		    sc->num_rx_desc * sizeof(union igc_adv_rx_desc),
    685       1.2       rin 		    IGC_DBA_ALIGN);
    686       1.2       rin 
    687       1.1       rin 		rxr->sc = sc;
    688       1.2       rin 		rxr->rxr_igcq = &sc->queues[iq];
    689       1.2       rin 		rxr->me = iq;
    690       1.2       rin #ifdef OPENBSD
    691       1.1       rin 		timeout_set(&rxr->rx_refill, igc_rxrefill, rxr);
    692       1.2       rin #endif
    693       1.1       rin 		if (igc_dma_malloc(sc, rsize, &rxr->rxdma)) {
    694       1.2       rin 			aprint_error_dev(dev,
    695       1.2       rin 			    "unable to allocate RX descriptor\n");
    696       1.2       rin 			goto fail;
    697       1.1       rin 		}
    698       1.1       rin 		rxr->rx_base = (union igc_adv_rx_desc *)rxr->rxdma.dma_vaddr;
    699       1.2       rin 		memset(rxr->rx_base, 0, rsize);
    700       1.2       rin 	}
    701       1.2       rin 
    702       1.2       rin 	sc->rx_mbuf_sz = MCLBYTES;
    703       1.2       rin 	/* Prepare receive descriptors and buffers. */
    704       1.2       rin 	if (igc_setup_receive_structures(sc)) {
    705       1.2       rin 		aprint_error_dev(sc->sc_dev,
    706       1.2       rin 		    "unable to setup receive structures\n");
    707       1.2       rin 		goto fail;
    708       1.1       rin 	}
    709       1.1       rin 
    710       1.1       rin 	/* Set up the queue holding structs. */
    711       1.2       rin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
    712       1.2       rin 		struct igc_queue *q = &sc->queues[iq];
    713       1.2       rin 
    714       1.2       rin 		q->sc = sc;
    715       1.2       rin 		q->txr = &sc->tx_rings[iq];
    716       1.2       rin 		q->rxr = &sc->rx_rings[iq];
    717       1.1       rin 	}
    718       1.1       rin 
    719       1.1       rin 	return 0;
    720       1.1       rin 
    721       1.2       rin  fail:
    722       1.2       rin 	for (struct rx_ring *rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
    723       1.1       rin 		igc_dma_free(sc, &rxr->rxdma);
    724       1.2       rin 	for (struct tx_ring *txr = sc->tx_rings; txconf > 0; txr++, txconf--)
    725       1.1       rin 		igc_dma_free(sc, &txr->txdma);
    726       1.2       rin 
    727       1.2       rin 	kmem_free(sc->rx_rings, sc->sc_nqueues * sizeof(struct rx_ring));
    728       1.1       rin 	sc->rx_rings = NULL;
    729       1.2       rin 	kmem_free(sc->tx_rings, sc->sc_nqueues * sizeof(struct tx_ring));
    730       1.1       rin 	sc->tx_rings = NULL;
    731       1.2       rin 	kmem_free(sc->queues, sc->sc_nqueues * sizeof(struct igc_queue));
    732       1.2       rin 	sc->queues = NULL;
    733       1.2       rin 
    734       1.1       rin 	return ENOMEM;
    735       1.1       rin }
    736       1.1       rin 
    737       1.2       rin static void
    738       1.1       rin igc_free_pci_resources(struct igc_softc *sc)
    739       1.1       rin {
    740       1.1       rin 	struct igc_osdep *os = &sc->osdep;
    741       1.1       rin 
    742       1.1       rin 	if (os->os_membase != 0)
    743       1.1       rin 		bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize);
    744       1.1       rin 	os->os_membase = 0;
    745       1.1       rin }
    746       1.1       rin 
    747       1.2       rin static void
    748       1.2       rin igc_free_interrupts(struct igc_softc *sc)
    749       1.2       rin {
    750       1.2       rin 	struct pci_attach_args *pa = &sc->osdep.os_pa;
    751       1.2       rin 	pci_chipset_tag_t pc = pa->pa_pc;
    752       1.2       rin 
    753       1.2       rin 	for (int i = 0; i < sc->sc_nintrs; i++) {
    754       1.2       rin 		if (sc->sc_ihs[i] != NULL) {
    755       1.2       rin 			pci_intr_disestablish(pc, sc->sc_ihs[i]);
    756       1.2       rin 			sc->sc_ihs[i] = NULL;
    757       1.2       rin 		}
    758       1.2       rin 	}
    759       1.2       rin 	pci_intr_release(pc, sc->sc_intrs, sc->sc_nintrs);
    760       1.2       rin }
    761       1.2       rin 
    762       1.2       rin static void
    763       1.2       rin igc_free_queues(struct igc_softc *sc)
    764       1.2       rin {
    765       1.2       rin 
    766       1.2       rin 	igc_free_receive_structures(sc);
    767       1.2       rin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
    768       1.2       rin 		struct rx_ring *rxr = &sc->rx_rings[iq];
    769       1.2       rin 
    770       1.2       rin 		igc_dma_free(sc, &rxr->rxdma);
    771       1.2       rin 	}
    772       1.2       rin 
    773       1.2       rin 	igc_free_transmit_structures(sc);
    774       1.2       rin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
    775       1.2       rin 		struct tx_ring *txr = &sc->tx_rings[iq];
    776       1.2       rin 
    777       1.2       rin 		igc_dma_free(sc, &txr->txdma);
    778       1.2       rin 	}
    779       1.2       rin 
    780       1.2       rin 	kmem_free(sc->rx_rings, sc->sc_nqueues * sizeof(struct rx_ring));
    781       1.2       rin 	kmem_free(sc->tx_rings, sc->sc_nqueues * sizeof(struct tx_ring));
    782       1.2       rin 	kmem_free(sc->queues, sc->sc_nqueues * sizeof(struct igc_queue));
    783       1.2       rin }
    784       1.2       rin 
    785       1.1       rin /*********************************************************************
    786       1.1       rin  *
    787       1.1       rin  *  Initialize the hardware to a configuration as specified by the
    788       1.1       rin  *  adapter structure.
    789       1.1       rin  *
    790       1.1       rin  **********************************************************************/
    791       1.2       rin static void
    792       1.1       rin igc_reset(struct igc_softc *sc)
    793       1.1       rin {
    794       1.1       rin 	struct igc_hw *hw = &sc->hw;
    795       1.1       rin 
    796       1.1       rin 	/* Let the firmware know the OS is in control */
    797       1.1       rin 	igc_get_hw_control(sc);
    798       1.1       rin 
    799       1.1       rin 	/*
    800       1.1       rin 	 * Packet Buffer Allocation (PBA)
    801       1.1       rin 	 * Writing PBA sets the receive portion of the buffer
    802       1.1       rin 	 * the remainder is used for the transmit buffer.
    803       1.1       rin 	 */
    804       1.2       rin 	const uint32_t pba = IGC_PBA_34K;
    805       1.1       rin 
    806       1.1       rin 	/*
    807       1.1       rin 	 * These parameters control the automatic generation (Tx) and
    808       1.1       rin 	 * response (Rx) to Ethernet PAUSE frames.
    809       1.1       rin 	 * - High water mark should allow for at least two frames to be
    810       1.1       rin 	 *   received after sending an XOFF.
    811       1.1       rin 	 * - Low water mark works best when it is very near the high water mark.
    812       1.1       rin 	 *   This allows the receiver to restart by sending XON when it has
    813       1.1       rin 	 *   drained a bit. Here we use an arbitrary value of 1500 which will
    814       1.1       rin 	 *   restart after one full frame is pulled from the buffer. There
    815       1.1       rin 	 *   could be several smaller frames in the buffer and if so they will
    816       1.1       rin 	 *   not trigger the XON until their total number reduces the buffer
    817       1.1       rin 	 *   by 1500.
    818       1.1       rin 	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
    819       1.1       rin 	 */
    820       1.2       rin 	const uint16_t rx_buffer_size = (pba & 0xffff) << 10;
    821       1.2       rin 
    822       1.1       rin 	hw->fc.high_water = rx_buffer_size -
    823       1.1       rin 	    roundup2(sc->hw.mac.max_frame_size, 1024);
    824       1.1       rin 	/* 16-byte granularity */
    825       1.1       rin 	hw->fc.low_water = hw->fc.high_water - 16;
    826       1.1       rin 
    827       1.1       rin 	if (sc->fc) /* locally set flow control value? */
    828       1.1       rin 		hw->fc.requested_mode = sc->fc;
    829       1.1       rin 	else
    830       1.1       rin 		hw->fc.requested_mode = igc_fc_full;
    831       1.1       rin 
    832       1.1       rin 	hw->fc.pause_time = IGC_FC_PAUSE_TIME;
    833       1.1       rin 
    834       1.1       rin 	hw->fc.send_xon = true;
    835       1.1       rin 
    836       1.1       rin 	/* Issue a global reset */
    837       1.1       rin 	igc_reset_hw(hw);
    838       1.1       rin 	IGC_WRITE_REG(hw, IGC_WUC, 0);
    839       1.1       rin 
    840       1.1       rin 	/* and a re-init */
    841       1.1       rin 	if (igc_init_hw(hw) < 0) {
    842       1.2       rin 		aprint_error_dev(sc->sc_dev, "unable to reset hardware\n");
    843       1.1       rin 		return;
    844       1.1       rin 	}
    845       1.1       rin 
    846       1.1       rin 	/* Setup DMA Coalescing */
    847       1.1       rin 	igc_init_dmac(sc, pba);
    848       1.1       rin 
    849       1.1       rin 	IGC_WRITE_REG(hw, IGC_VET, ETHERTYPE_VLAN);
    850       1.1       rin 	igc_get_phy_info(hw);
    851       1.1       rin 	igc_check_for_link(hw);
    852       1.1       rin }
    853       1.1       rin 
    854       1.1       rin /*********************************************************************
    855       1.1       rin  *
    856       1.1       rin  *  Initialize the DMA Coalescing feature
    857       1.1       rin  *
    858       1.1       rin  **********************************************************************/
    859       1.2       rin static void
    860       1.1       rin igc_init_dmac(struct igc_softc *sc, uint32_t pba)
    861       1.1       rin {
    862       1.1       rin 	struct igc_hw *hw = &sc->hw;
    863       1.2       rin 	const uint16_t max_frame_size = sc->hw.mac.max_frame_size;
    864       1.2       rin 	uint32_t reg, status;
    865       1.1       rin 
    866       1.1       rin 	if (sc->dmac == 0) { /* Disabling it */
    867       1.2       rin 		reg = ~IGC_DMACR_DMAC_EN;	/* XXXRO */
    868       1.1       rin 		IGC_WRITE_REG(hw, IGC_DMACR, reg);
    869       1.2       rin 		DPRINTF(MISC, "DMA coalescing disabled\n");
    870       1.1       rin 		return;
    871       1.2       rin 	} else {
    872       1.2       rin 		device_printf(sc->sc_dev, "DMA coalescing enabled\n");
    873       1.2       rin 	}
    874       1.1       rin 
    875       1.1       rin 	/* Set starting threshold */
    876       1.1       rin 	IGC_WRITE_REG(hw, IGC_DMCTXTH, 0);
    877       1.1       rin 
    878       1.2       rin 	uint16_t hwm = 64 * pba - max_frame_size / 16;
    879       1.1       rin 	if (hwm < 64 * (pba - 6))
    880       1.1       rin 		hwm = 64 * (pba - 6);
    881       1.1       rin 	reg = IGC_READ_REG(hw, IGC_FCRTC);
    882       1.1       rin 	reg &= ~IGC_FCRTC_RTH_COAL_MASK;
    883       1.2       rin 	reg |= (hwm << IGC_FCRTC_RTH_COAL_SHIFT) & IGC_FCRTC_RTH_COAL_MASK;
    884       1.1       rin 	IGC_WRITE_REG(hw, IGC_FCRTC, reg);
    885       1.1       rin 
    886       1.2       rin 	uint32_t dmac = pba - max_frame_size / 512;
    887       1.1       rin 	if (dmac < pba - 10)
    888       1.1       rin 		dmac = pba - 10;
    889       1.1       rin 	reg = IGC_READ_REG(hw, IGC_DMACR);
    890       1.1       rin 	reg &= ~IGC_DMACR_DMACTHR_MASK;
    891       1.2       rin 	reg |= (dmac << IGC_DMACR_DMACTHR_SHIFT) & IGC_DMACR_DMACTHR_MASK;
    892       1.1       rin 
    893       1.1       rin 	/* transition to L0x or L1 if available..*/
    894       1.2       rin 	reg |= IGC_DMACR_DMAC_EN | IGC_DMACR_DMAC_LX_MASK;
    895       1.1       rin 
    896       1.1       rin 	/* Check if status is 2.5Gb backplane connection
    897       1.1       rin 	 * before configuration of watchdog timer, which is
    898       1.1       rin 	 * in msec values in 12.8usec intervals
    899       1.1       rin 	 * watchdog timer= msec values in 32usec intervals
    900       1.1       rin 	 * for non 2.5Gb connection
    901       1.1       rin 	 */
    902       1.1       rin 	status = IGC_READ_REG(hw, IGC_STATUS);
    903       1.1       rin 	if ((status & IGC_STATUS_2P5_SKU) &&
    904       1.2       rin 	    !(status & IGC_STATUS_2P5_SKU_OVER))
    905       1.2       rin 		reg |= (sc->dmac * 5) >> 6;
    906       1.1       rin 	else
    907       1.2       rin 		reg |= sc->dmac >> 5;
    908       1.1       rin 
    909       1.1       rin 	IGC_WRITE_REG(hw, IGC_DMACR, reg);
    910       1.1       rin 
    911       1.1       rin 	IGC_WRITE_REG(hw, IGC_DMCRTRH, 0);
    912       1.1       rin 
    913       1.1       rin 	/* Set the interval before transition */
    914       1.1       rin 	reg = IGC_READ_REG(hw, IGC_DMCTLX);
    915       1.1       rin 	reg |= IGC_DMCTLX_DCFLUSH_DIS;
    916       1.1       rin 
    917       1.1       rin 	/*
    918       1.2       rin 	 * in 2.5Gb connection, TTLX unit is 0.4 usec
    919       1.2       rin 	 * which is 0x4*2 = 0xA. But delay is still 4 usec
    920       1.2       rin 	 */
    921       1.1       rin 	status = IGC_READ_REG(hw, IGC_STATUS);
    922       1.1       rin 	if ((status & IGC_STATUS_2P5_SKU) &&
    923       1.2       rin 	    !(status & IGC_STATUS_2P5_SKU_OVER))
    924       1.1       rin 		reg |= 0xA;
    925       1.1       rin 	else
    926       1.1       rin 		reg |= 0x4;
    927       1.1       rin 
    928       1.1       rin 	IGC_WRITE_REG(hw, IGC_DMCTLX, reg);
    929       1.1       rin 
    930       1.1       rin 	/* free space in tx packet buffer to wake from DMA coal */
    931       1.2       rin 	IGC_WRITE_REG(hw, IGC_DMCTXTH,
    932       1.2       rin 	    (IGC_TXPBSIZE - (2 * max_frame_size)) >> 6);
    933       1.1       rin 
    934       1.1       rin 	/* make low power state decision controlled by DMA coal */
    935       1.1       rin 	reg = IGC_READ_REG(hw, IGC_PCIEMISC);
    936       1.1       rin 	reg &= ~IGC_PCIEMISC_LX_DECISION;
    937       1.1       rin 	IGC_WRITE_REG(hw, IGC_PCIEMISC, reg);
    938       1.1       rin }
    939       1.1       rin 
    940       1.2       rin static int
    941       1.2       rin igc_setup_interrupts(struct igc_softc *sc)
    942       1.2       rin {
    943       1.2       rin 	int error;
    944       1.2       rin 
    945       1.2       rin 	switch (sc->sc_intr_type) {
    946       1.2       rin 	case PCI_INTR_TYPE_MSIX:
    947       1.2       rin 		error = igc_setup_msix(sc);
    948       1.2       rin 		break;
    949       1.2       rin 	case PCI_INTR_TYPE_MSI:
    950       1.2       rin 		error = igc_setup_msi(sc);
    951       1.2       rin 		break;
    952       1.2       rin 	case PCI_INTR_TYPE_INTX:
    953       1.2       rin 		error = igc_setup_intx(sc);
    954       1.2       rin 		break;
    955       1.2       rin 	default:
    956       1.2       rin 		panic("%s: invalid interrupt type: %d",
    957       1.2       rin 		    device_xname(sc->sc_dev), sc->sc_intr_type);
    958       1.2       rin 	}
    959       1.2       rin 
    960       1.2       rin 	return error;
    961       1.2       rin }
    962       1.2       rin 
    963       1.2       rin static void
    964       1.2       rin igc_attach_counters(struct igc_softc *sc)
    965       1.1       rin {
    966       1.2       rin #ifdef IGC_EVENT_COUNTERS
    967       1.2       rin 
    968       1.2       rin 	/* Global counters */
    969       1.2       rin 	sc->sc_global_evcnts = kmem_zalloc(
    970       1.2       rin 	    IGC_GLOBAL_COUNTERS * sizeof(sc->sc_global_evcnts[0]), KM_SLEEP);
    971       1.2       rin 
    972       1.2       rin 	for (int cnt = 0; cnt < IGC_GLOBAL_COUNTERS; cnt++) {
    973       1.2       rin 		evcnt_attach_dynamic(&sc->sc_global_evcnts[cnt],
    974       1.2       rin 		    igc_global_counters[cnt].type, NULL,
    975       1.2       rin 		    device_xname(sc->sc_dev), igc_global_counters[cnt].name);
    976       1.2       rin 	}
    977       1.2       rin 
    978       1.2       rin 	/* Driver counters */
    979       1.2       rin 	sc->sc_driver_evcnts = kmem_zalloc(
    980       1.2       rin 	    IGC_DRIVER_COUNTERS * sizeof(sc->sc_driver_evcnts[0]), KM_SLEEP);
    981       1.2       rin 
    982       1.2       rin 	for (int cnt = 0; cnt < IGC_DRIVER_COUNTERS; cnt++) {
    983       1.2       rin 		evcnt_attach_dynamic(&sc->sc_driver_evcnts[cnt],
    984       1.2       rin 		    igc_driver_counters[cnt].type, NULL,
    985       1.2       rin 		    device_xname(sc->sc_dev), igc_driver_counters[cnt].name);
    986       1.2       rin 	}
    987       1.2       rin 
    988       1.2       rin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
    989       1.2       rin 		struct igc_queue *q = &sc->queues[iq];
    990       1.2       rin 
    991       1.2       rin 		q->igcq_driver_counters = kmem_zalloc(
    992       1.2       rin 		    IGC_DRIVER_COUNTERS * sizeof(q->igcq_driver_counters[0]),
    993       1.2       rin 		    KM_SLEEP);
    994       1.2       rin 	}
    995       1.2       rin 
    996       1.2       rin 	/* Queue counters */
    997       1.2       rin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
    998       1.2       rin 		struct igc_queue *q = &sc->queues[iq];
    999       1.2       rin 
   1000       1.2       rin 		snprintf(q->igcq_queue_evname, sizeof(q->igcq_queue_evname),
   1001       1.2       rin 		    "%s q%d", device_xname(sc->sc_dev), iq);
   1002       1.2       rin 
   1003       1.2       rin 		q->igcq_queue_evcnts = kmem_zalloc(
   1004       1.2       rin 		    IGC_QUEUE_COUNTERS * sizeof(q->igcq_queue_evcnts[0]),
   1005       1.2       rin 		    KM_SLEEP);
   1006       1.2       rin 
   1007       1.2       rin 		for (int cnt = 0; cnt < IGC_QUEUE_COUNTERS; cnt++) {
   1008       1.2       rin 			evcnt_attach_dynamic(&q->igcq_queue_evcnts[cnt],
   1009       1.2       rin 			    igc_queue_counters[cnt].type, NULL,
   1010       1.2       rin 			    q->igcq_queue_evname, igc_queue_counters[cnt].name);
   1011       1.1       rin 		}
   1012       1.2       rin 	}
   1013       1.2       rin 
   1014       1.2       rin 	/* MAC counters */
   1015       1.2       rin 	snprintf(sc->sc_mac_evname, sizeof(sc->sc_mac_evname),
   1016       1.2       rin 	    "%s Mac Statistics", device_xname(sc->sc_dev));
   1017       1.2       rin 
   1018       1.2       rin 	sc->sc_mac_evcnts = kmem_zalloc(
   1019       1.2       rin 	    IGC_MAC_COUNTERS * sizeof(sc->sc_mac_evcnts[0]), KM_SLEEP);
   1020       1.2       rin 
   1021       1.2       rin 	for (int cnt = 0; cnt < IGC_MAC_COUNTERS; cnt++) {
   1022       1.2       rin 		evcnt_attach_dynamic(&sc->sc_mac_evcnts[cnt], EVCNT_TYPE_MISC,
   1023       1.2       rin 		    NULL, sc->sc_mac_evname, igc_mac_counters[cnt].name);
   1024       1.2       rin 	}
   1025       1.2       rin #endif
   1026       1.2       rin }
   1027       1.2       rin 
   1028       1.2       rin static void
   1029       1.2       rin igc_detach_counters(struct igc_softc *sc)
   1030       1.2       rin {
   1031       1.2       rin #ifdef IGC_EVENT_COUNTERS
   1032       1.2       rin 
   1033       1.2       rin 	/* Global counters */
   1034       1.2       rin 	for (int cnt = 0; cnt < IGC_GLOBAL_COUNTERS; cnt++)
   1035       1.2       rin 		evcnt_detach(&sc->sc_global_evcnts[cnt]);
   1036       1.2       rin 
   1037       1.2       rin 	kmem_free(sc->sc_global_evcnts,
   1038  1.15.2.1  perseant 	    IGC_GLOBAL_COUNTERS * sizeof(sc->sc_global_evcnts[0]));
   1039       1.2       rin 
   1040       1.2       rin 	/* Driver counters */
   1041       1.2       rin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
   1042       1.2       rin 		struct igc_queue *q = &sc->queues[iq];
   1043       1.2       rin 
   1044       1.2       rin 		kmem_free(q->igcq_driver_counters,
   1045       1.2       rin 		    IGC_DRIVER_COUNTERS * sizeof(q->igcq_driver_counters[0]));
   1046       1.2       rin 	}
   1047       1.2       rin 
   1048       1.2       rin 	for (int cnt = 0; cnt < IGC_DRIVER_COUNTERS; cnt++)
   1049       1.2       rin 		evcnt_detach(&sc->sc_driver_evcnts[cnt]);
   1050       1.2       rin 
   1051       1.2       rin 	kmem_free(sc->sc_driver_evcnts,
   1052  1.15.2.1  perseant 	    IGC_DRIVER_COUNTERS * sizeof(sc->sc_driver_evcnts[0]));
   1053       1.2       rin 
   1054       1.2       rin 	/* Queue counters */
   1055       1.2       rin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
   1056       1.2       rin 		struct igc_queue *q = &sc->queues[iq];
   1057       1.2       rin 
   1058       1.2       rin 		for (int cnt = 0; cnt < IGC_QUEUE_COUNTERS; cnt++)
   1059       1.2       rin 			evcnt_detach(&q->igcq_queue_evcnts[cnt]);
   1060       1.2       rin 
   1061       1.2       rin 		kmem_free(q->igcq_queue_evcnts,
   1062       1.2       rin 		    IGC_QUEUE_COUNTERS * sizeof(q->igcq_queue_evcnts[0]));
   1063       1.2       rin 	}
   1064       1.2       rin 
   1065       1.2       rin 	/* MAC statistics */
   1066       1.2       rin 	for (int cnt = 0; cnt < IGC_MAC_COUNTERS; cnt++)
   1067       1.2       rin 		evcnt_detach(&sc->sc_mac_evcnts[cnt]);
   1068       1.2       rin 
   1069       1.2       rin 	kmem_free(sc->sc_mac_evcnts,
   1070       1.2       rin 	    IGC_MAC_COUNTERS * sizeof(sc->sc_mac_evcnts[0]));
   1071       1.2       rin #endif
   1072       1.2       rin }
   1073       1.2       rin 
   1074       1.2       rin /*
   1075       1.2       rin  * XXX
   1076       1.2       rin  * FreeBSD uses 4-byte-wise read for 64-bit counters, while Linux just
   1077       1.2       rin  * drops hi words.
   1078       1.2       rin  */
   1079       1.2       rin static inline uint64_t __unused
   1080       1.2       rin igc_read_mac_counter(struct igc_hw *hw, bus_size_t reg, bool is64)
   1081       1.2       rin {
   1082       1.2       rin 	uint64_t val;
   1083       1.2       rin 
   1084       1.2       rin 	val = IGC_READ_REG(hw, reg);
   1085       1.2       rin 	if (is64)
   1086       1.2       rin 		val += ((uint64_t)IGC_READ_REG(hw, reg + 4)) << 32;
   1087       1.2       rin 	return val;
   1088       1.2       rin }
   1089       1.2       rin 
   1090       1.2       rin static void
   1091       1.2       rin igc_update_counters(struct igc_softc *sc)
   1092       1.2       rin {
   1093       1.2       rin #ifdef IGC_EVENT_COUNTERS
   1094       1.2       rin 
   1095       1.2       rin 	/* Global counters: nop */
   1096       1.2       rin 
   1097       1.2       rin 	/* Driver counters */
   1098       1.2       rin 	uint64_t sum[IGC_DRIVER_COUNTERS];
   1099       1.1       rin 
   1100       1.2       rin 	memset(sum, 0, sizeof(sum));
   1101       1.2       rin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
   1102       1.2       rin 		struct igc_queue *q = &sc->queues[iq];
   1103       1.2       rin 
   1104       1.2       rin 		for (int cnt = 0; cnt < IGC_DRIVER_COUNTERS; cnt++) {
   1105       1.2       rin 			sum[cnt] += IGC_QUEUE_DRIVER_COUNTER_VAL(q, cnt);
   1106       1.2       rin 			IGC_QUEUE_DRIVER_COUNTER_STORE(q, cnt, 0);
   1107       1.1       rin 		}
   1108       1.2       rin 	}
   1109       1.2       rin 
   1110       1.2       rin 	for (int cnt = 0; cnt < IGC_DRIVER_COUNTERS; cnt++)
   1111       1.2       rin 		IGC_DRIVER_COUNTER_ADD(sc, cnt, sum[cnt]);
   1112       1.2       rin 
   1113       1.2       rin 	/* Queue counters: nop */
   1114       1.1       rin 
   1115       1.2       rin 	/* Mac statistics */
   1116       1.2       rin 	struct igc_hw *hw = &sc->hw;
   1117      1.10   msaitoh 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1118      1.10   msaitoh 	uint64_t iqdrops = 0;
   1119       1.2       rin 
   1120       1.2       rin 	for (int cnt = 0; cnt < IGC_MAC_COUNTERS; cnt++) {
   1121      1.10   msaitoh 		uint64_t val;
   1122      1.10   msaitoh 		bus_size_t regaddr = igc_mac_counters[cnt].reg;
   1123      1.10   msaitoh 
   1124      1.10   msaitoh 		val = igc_read_mac_counter(hw, regaddr,
   1125      1.10   msaitoh 		    igc_mac_counters[cnt].is64);
   1126      1.10   msaitoh 		IGC_MAC_COUNTER_ADD(sc, cnt, val);
   1127      1.10   msaitoh 		/* XXX Count MPC to iqdrops. */
   1128      1.10   msaitoh 		if (regaddr == IGC_MPC)
   1129      1.10   msaitoh 			iqdrops += val;
   1130       1.2       rin 	}
   1131      1.10   msaitoh 
   1132      1.10   msaitoh 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
   1133      1.10   msaitoh 		uint32_t val;
   1134      1.10   msaitoh 
   1135      1.10   msaitoh 		/* XXX RQDPC should be visible via evcnt(9). */
   1136      1.10   msaitoh 		val = IGC_READ_REG(hw, IGC_RQDPC(iq));
   1137      1.10   msaitoh 
   1138      1.10   msaitoh 		/* RQDPC is not cleard on read. */
   1139      1.10   msaitoh 		if (val != 0)
   1140      1.10   msaitoh 			IGC_WRITE_REG(hw, IGC_RQDPC(iq), 0);
   1141      1.10   msaitoh 		iqdrops += val;
   1142      1.10   msaitoh 	}
   1143      1.10   msaitoh 
   1144      1.10   msaitoh 	if (iqdrops != 0)
   1145      1.10   msaitoh 		if_statadd(ifp, if_iqdrops, iqdrops);
   1146       1.2       rin #endif
   1147       1.2       rin }
   1148       1.2       rin 
   1149       1.2       rin static void
   1150       1.2       rin igc_clear_counters(struct igc_softc *sc)
   1151       1.2       rin {
   1152       1.2       rin #ifdef IGC_EVENT_COUNTERS
   1153       1.2       rin 
   1154       1.2       rin 	/* Global counters */
   1155       1.2       rin 	for (int cnt = 0; cnt < IGC_GLOBAL_COUNTERS; cnt++)
   1156       1.2       rin 		IGC_GLOBAL_COUNTER_STORE(sc, cnt, 0);
   1157       1.2       rin 
   1158       1.2       rin 	/* Driver counters */
   1159       1.2       rin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
   1160       1.2       rin 		struct igc_queue *q = &sc->queues[iq];
   1161       1.2       rin 
   1162       1.2       rin 		for (int cnt = 0; cnt < IGC_DRIVER_COUNTERS; cnt++)
   1163       1.2       rin 			IGC_QUEUE_DRIVER_COUNTER_STORE(q, cnt, 0);
   1164       1.1       rin 	}
   1165       1.1       rin 
   1166       1.2       rin 	for (int cnt = 0; cnt < IGC_DRIVER_COUNTERS; cnt++)
   1167       1.2       rin 		IGC_DRIVER_COUNTER_STORE(sc, cnt, 0);
   1168       1.2       rin 
   1169       1.2       rin 	/* Queue counters */
   1170       1.2       rin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
   1171       1.2       rin 		struct igc_queue *q = &sc->queues[iq];
   1172       1.2       rin 
   1173       1.2       rin 		for (int cnt = 0; cnt < IGC_QUEUE_COUNTERS; cnt++)
   1174       1.2       rin 			IGC_QUEUE_COUNTER_STORE(q, cnt, 0);
   1175       1.2       rin 	}
   1176       1.2       rin 
   1177       1.2       rin 	/* Mac statistics */
   1178       1.2       rin 	struct igc_hw *hw = &sc->hw;
   1179       1.2       rin 
   1180       1.2       rin 	for (int cnt = 0; cnt < IGC_MAC_COUNTERS; cnt++) {
   1181       1.2       rin 		(void)igc_read_mac_counter(hw, igc_mac_counters[cnt].reg,
   1182       1.2       rin 		    igc_mac_counters[cnt].is64);
   1183       1.2       rin 		IGC_MAC_COUNTER_STORE(sc, cnt, 0);
   1184       1.2       rin 	}
   1185       1.2       rin #endif
   1186       1.2       rin }
   1187       1.2       rin 
   1188       1.2       rin static int
   1189       1.2       rin igc_setup_msix(struct igc_softc *sc)
   1190       1.2       rin {
   1191       1.2       rin 	pci_chipset_tag_t pc = sc->osdep.os_pa.pa_pc;
   1192       1.2       rin 	device_t dev = sc->sc_dev;
   1193       1.2       rin 	pci_intr_handle_t *intrs;
   1194       1.2       rin 	void **ihs;
   1195       1.2       rin 	const char *intrstr;
   1196       1.2       rin 	char intrbuf[PCI_INTRSTR_LEN];
   1197       1.2       rin 	char xnamebuf[MAX(32, MAXCOMLEN)];
   1198       1.2       rin 	int iq, error;
   1199       1.2       rin 
   1200       1.2       rin 	for (iq = 0, intrs = sc->sc_intrs, ihs = sc->sc_ihs;
   1201       1.2       rin 	    iq < sc->sc_nqueues; iq++, intrs++, ihs++) {
   1202       1.2       rin 		struct igc_queue *q = &sc->queues[iq];
   1203       1.2       rin 
   1204       1.2       rin 		snprintf(xnamebuf, sizeof(xnamebuf), "%s: txrx %d",
   1205       1.2       rin 		    device_xname(dev), iq);
   1206       1.2       rin 
   1207       1.2       rin 		intrstr = pci_intr_string(pc, *intrs, intrbuf, sizeof(intrbuf));
   1208       1.2       rin 
   1209       1.2       rin 		pci_intr_setattr(pc, intrs, PCI_INTR_MPSAFE, true);
   1210       1.2       rin 		*ihs = pci_intr_establish_xname(pc, *intrs, IPL_NET,
   1211       1.2       rin 		    igc_intr_queue, q, xnamebuf);
   1212       1.2       rin 		if (*ihs == NULL) {
   1213       1.2       rin 			aprint_error_dev(dev,
   1214       1.2       rin 			    "unable to establish txrx interrupt at %s\n",
   1215       1.2       rin 			    intrstr);
   1216       1.2       rin 			return ENOBUFS;
   1217       1.2       rin 		}
   1218       1.2       rin 		aprint_normal_dev(dev, "txrx interrupting at %s\n", intrstr);
   1219       1.2       rin 
   1220       1.2       rin 		kcpuset_t *affinity;
   1221       1.2       rin 		kcpuset_create(&affinity, true);
   1222       1.2       rin 		kcpuset_set(affinity, iq % ncpu);
   1223       1.2       rin 		error = interrupt_distribute(*ihs, affinity, NULL);
   1224       1.2       rin 		if (error) {
   1225       1.2       rin 			aprint_normal_dev(dev,
   1226       1.2       rin 			    "%s: unable to change affinity, use default CPU\n",
   1227       1.2       rin 			    intrstr);
   1228       1.2       rin 		}
   1229       1.2       rin 		kcpuset_destroy(affinity);
   1230       1.2       rin 
   1231       1.2       rin 		q->igcq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
   1232       1.2       rin 		    igc_handle_queue, q);
   1233       1.2       rin 		if (q->igcq_si == NULL) {
   1234       1.2       rin 			aprint_error_dev(dev,
   1235       1.2       rin 			    "%s: unable to establish softint\n", intrstr);
   1236       1.2       rin 			return ENOBUFS;
   1237       1.2       rin 		}
   1238       1.2       rin 
   1239       1.2       rin 		q->msix = iq;
   1240       1.2       rin 		q->eims = 1 << iq;
   1241       1.1       rin 	}
   1242       1.1       rin 
   1243       1.2       rin 	snprintf(xnamebuf, MAXCOMLEN, "%s_tx_rx", device_xname(dev));
   1244       1.2       rin 	error = workqueue_create(&sc->sc_queue_wq, xnamebuf,
   1245       1.2       rin 	    igc_handle_queue_work, sc, IGC_WORKQUEUE_PRI, IPL_NET,
   1246       1.2       rin 	    WQ_PERCPU | WQ_MPSAFE);
   1247       1.2       rin 	if (error) {
   1248       1.2       rin 		aprint_error_dev(dev, "workqueue_create failed\n");
   1249       1.2       rin 		return ENOBUFS;
   1250       1.1       rin 	}
   1251       1.2       rin 	sc->sc_txrx_workqueue = false;
   1252       1.1       rin 
   1253       1.2       rin 	intrstr = pci_intr_string(pc, *intrs, intrbuf, sizeof(intrbuf));
   1254       1.2       rin 	snprintf(xnamebuf, sizeof(xnamebuf), "%s: link", device_xname(dev));
   1255       1.2       rin 	pci_intr_setattr(pc, intrs, PCI_INTR_MPSAFE, true);
   1256       1.2       rin 	*ihs = pci_intr_establish_xname(pc, *intrs, IPL_NET,
   1257       1.2       rin 	    igc_intr_link, sc, xnamebuf);
   1258       1.2       rin 	if (*ihs == NULL) {
   1259       1.2       rin 		aprint_error_dev(dev,
   1260       1.2       rin 		    "unable to establish link interrupt at %s\n", intrstr);
   1261       1.2       rin 		return ENOBUFS;
   1262       1.2       rin 	}
   1263       1.2       rin 	aprint_normal_dev(dev, "link interrupting at %s\n", intrstr);
   1264       1.2       rin 	/* use later in igc_configure_queues() */
   1265       1.2       rin 	sc->linkvec = iq;
   1266       1.1       rin 
   1267       1.1       rin 	return 0;
   1268       1.2       rin }
   1269       1.2       rin 
   1270       1.2       rin static int
   1271       1.2       rin igc_setup_msi(struct igc_softc *sc)
   1272       1.2       rin {
   1273       1.2       rin 	pci_chipset_tag_t pc = sc->osdep.os_pa.pa_pc;
   1274       1.2       rin 	device_t dev = sc->sc_dev;
   1275       1.2       rin 	pci_intr_handle_t *intr = sc->sc_intrs;
   1276       1.2       rin 	void **ihs = sc->sc_ihs;
   1277       1.2       rin 	const char *intrstr;
   1278       1.2       rin 	char intrbuf[PCI_INTRSTR_LEN];
   1279       1.2       rin 	char xnamebuf[MAX(32, MAXCOMLEN)];
   1280       1.2       rin 	int error;
   1281       1.2       rin 
   1282       1.2       rin 	intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
   1283       1.2       rin 
   1284       1.2       rin 	snprintf(xnamebuf, sizeof(xnamebuf), "%s: msi", device_xname(dev));
   1285       1.2       rin 	pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true);
   1286       1.2       rin 	*ihs = pci_intr_establish_xname(pc, *intr, IPL_NET,
   1287       1.2       rin 	    igc_intr, sc, xnamebuf);
   1288       1.2       rin 	if (*ihs == NULL) {
   1289       1.2       rin 		aprint_error_dev(dev,
   1290       1.2       rin 		    "unable to establish interrupt at %s\n", intrstr);
   1291       1.2       rin 		return ENOBUFS;
   1292       1.2       rin 	}
   1293       1.2       rin 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
   1294       1.2       rin 
   1295       1.2       rin 	struct igc_queue *iq = sc->queues;
   1296       1.2       rin 	iq->igcq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
   1297       1.2       rin 	    igc_handle_queue, iq);
   1298       1.2       rin 	if (iq->igcq_si == NULL) {
   1299       1.2       rin 		aprint_error_dev(dev,
   1300       1.2       rin 		    "%s: unable to establish softint\n", intrstr);
   1301       1.2       rin 		return ENOBUFS;
   1302       1.2       rin 	}
   1303       1.2       rin 
   1304       1.2       rin 	snprintf(xnamebuf, MAXCOMLEN, "%s_tx_rx", device_xname(dev));
   1305       1.2       rin 	error = workqueue_create(&sc->sc_queue_wq, xnamebuf,
   1306       1.2       rin 	    igc_handle_queue_work, sc, IGC_WORKQUEUE_PRI, IPL_NET,
   1307       1.2       rin 	    WQ_PERCPU | WQ_MPSAFE);
   1308       1.2       rin 	if (error) {
   1309       1.2       rin 		aprint_error_dev(dev, "workqueue_create failed\n");
   1310       1.2       rin 		return ENOBUFS;
   1311       1.1       rin 	}
   1312       1.2       rin 	sc->sc_txrx_workqueue = false;
   1313       1.2       rin 
   1314       1.2       rin 	sc->queues[0].msix = 0;
   1315       1.2       rin 	sc->linkvec = 0;
   1316       1.1       rin 
   1317       1.2       rin 	return 0;
   1318       1.1       rin }
   1319       1.1       rin 
   1320       1.2       rin static int
   1321       1.2       rin igc_setup_intx(struct igc_softc *sc)
   1322       1.1       rin {
   1323       1.2       rin 	pci_chipset_tag_t pc = sc->osdep.os_pa.pa_pc;
   1324       1.2       rin 	device_t dev = sc->sc_dev;
   1325       1.2       rin 	pci_intr_handle_t *intr = sc->sc_intrs;
   1326       1.2       rin 	void **ihs = sc->sc_ihs;
   1327       1.2       rin 	const char *intrstr;
   1328       1.2       rin 	char intrbuf[PCI_INTRSTR_LEN];
   1329       1.2       rin 	char xnamebuf[32];
   1330       1.2       rin 
   1331       1.2       rin 	intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
   1332       1.2       rin 
   1333       1.2       rin 	snprintf(xnamebuf, sizeof(xnamebuf), "%s:intx", device_xname(dev));
   1334       1.2       rin 	pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true);
   1335       1.2       rin 	*ihs = pci_intr_establish_xname(pc, *intr, IPL_NET,
   1336       1.2       rin 	    igc_intr, sc, xnamebuf);
   1337       1.2       rin 	if (*ihs == NULL) {
   1338       1.2       rin 		aprint_error_dev(dev,
   1339       1.2       rin 		    "unable to establish interrupt at %s\n", intrstr);
   1340       1.2       rin 		return ENOBUFS;
   1341       1.2       rin 	}
   1342       1.2       rin 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
   1343       1.2       rin 
   1344       1.2       rin 	struct igc_queue *iq = sc->queues;
   1345       1.2       rin 	iq->igcq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
   1346       1.2       rin 	    igc_handle_queue, iq);
   1347       1.2       rin 	if (iq->igcq_si == NULL) {
   1348       1.2       rin 		aprint_error_dev(dev,
   1349       1.2       rin 		    "%s: unable to establish softint\n", intrstr);
   1350       1.2       rin 		return ENOBUFS;
   1351       1.2       rin 	}
   1352       1.1       rin 
   1353       1.2       rin 	/* create workqueue? */
   1354       1.2       rin 	sc->sc_txrx_workqueue = false;
   1355       1.1       rin 
   1356       1.2       rin 	sc->queues[0].msix = 0;
   1357       1.2       rin 	sc->linkvec = 0;
   1358       1.1       rin 
   1359       1.2       rin 	return 0;
   1360       1.1       rin }
   1361       1.1       rin 
   1362       1.2       rin static int
   1363       1.1       rin igc_dma_malloc(struct igc_softc *sc, bus_size_t size, struct igc_dma_alloc *dma)
   1364       1.1       rin {
   1365       1.1       rin 	struct igc_osdep *os = &sc->osdep;
   1366       1.1       rin 
   1367       1.2       rin 	dma->dma_tag = os->os_dmat;
   1368       1.1       rin 
   1369       1.2       rin 	if (bus_dmamap_create(dma->dma_tag, size, 1, size, 0,
   1370       1.2       rin 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &dma->dma_map))
   1371       1.1       rin 		return 1;
   1372       1.1       rin 	if (bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
   1373       1.2       rin 	    1, &dma->dma_nseg, BUS_DMA_WAITOK))
   1374       1.1       rin 		goto destroy;
   1375       1.2       rin 	/*
   1376       1.2       rin 	 * XXXRO
   1377       1.2       rin 	 *
   1378       1.2       rin 	 * Coherent mapping for descriptors is required for now.
   1379       1.2       rin 	 *
   1380       1.2       rin 	 * Both TX and RX descriptors are 16-byte length, which is shorter
   1381       1.2       rin 	 * than dcache lines on modern CPUs. Therefore, sync for a descriptor
   1382       1.2       rin 	 * may overwrite DMA read for descriptors in the same cache line.
   1383       1.2       rin 	 *
   1384       1.2       rin 	 * Can't we avoid this by use cache-line-aligned descriptors at once?
   1385       1.2       rin 	 */
   1386       1.1       rin 	if (bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
   1387       1.2       rin 	    &dma->dma_vaddr, BUS_DMA_WAITOK | BUS_DMA_COHERENT /* XXXRO */))
   1388       1.1       rin 		goto free;
   1389       1.1       rin 	if (bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size,
   1390       1.2       rin 	    NULL, BUS_DMA_WAITOK))
   1391       1.1       rin 		goto unmap;
   1392       1.1       rin 
   1393       1.1       rin 	dma->dma_size = size;
   1394       1.1       rin 
   1395       1.1       rin 	return 0;
   1396       1.2       rin  unmap:
   1397       1.1       rin 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
   1398       1.2       rin  free:
   1399       1.1       rin 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
   1400       1.2       rin  destroy:
   1401       1.1       rin 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
   1402       1.1       rin 	dma->dma_map = NULL;
   1403       1.1       rin 	dma->dma_tag = NULL;
   1404       1.1       rin 	return 1;
   1405       1.1       rin }
   1406       1.1       rin 
   1407       1.2       rin static void
   1408       1.1       rin igc_dma_free(struct igc_softc *sc, struct igc_dma_alloc *dma)
   1409       1.1       rin {
   1410       1.2       rin 
   1411       1.1       rin 	if (dma->dma_tag == NULL)
   1412       1.1       rin 		return;
   1413       1.1       rin 
   1414       1.1       rin 	if (dma->dma_map != NULL) {
   1415       1.1       rin 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
   1416       1.1       rin 		    dma->dma_map->dm_mapsize,
   1417       1.1       rin 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1418       1.1       rin 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
   1419       1.1       rin 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
   1420       1.1       rin 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
   1421       1.1       rin 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
   1422       1.1       rin 		dma->dma_map = NULL;
   1423       1.1       rin 	}
   1424       1.1       rin }
   1425       1.1       rin 
   1426       1.1       rin /*********************************************************************
   1427       1.1       rin  *
   1428       1.1       rin  *  Setup networking device structure and register an interface.
   1429       1.1       rin  *
   1430       1.1       rin  **********************************************************************/
   1431       1.2       rin static void
   1432       1.1       rin igc_setup_interface(struct igc_softc *sc)
   1433       1.2       rin {
   1434       1.2       rin 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1435       1.2       rin 
   1436       1.2       rin 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), sizeof(ifp->if_xname));
   1437       1.2       rin 	ifp->if_softc = sc;
   1438       1.2       rin 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1439       1.2       rin 	ifp->if_extflags = IFEF_MPSAFE;
   1440       1.2       rin 	ifp->if_ioctl = igc_ioctl;
   1441       1.2       rin 	ifp->if_start = igc_start;
   1442       1.2       rin 	if (sc->sc_nqueues > 1)
   1443       1.2       rin 		ifp->if_transmit = igc_transmit;
   1444       1.2       rin 	ifp->if_watchdog = igc_watchdog;
   1445       1.2       rin 	ifp->if_init = igc_init;
   1446       1.2       rin 	ifp->if_stop = igc_stop;
   1447       1.2       rin 
   1448       1.2       rin 	ifp->if_capabilities = IFCAP_TSOv4 | IFCAP_TSOv6;
   1449       1.2       rin 
   1450       1.2       rin 	ifp->if_capabilities |=
   1451       1.2       rin 	    IFCAP_CSUM_IPv4_Tx  | IFCAP_CSUM_IPv4_Rx  |
   1452       1.2       rin 	    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   1453       1.2       rin 	    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   1454       1.2       rin 	    IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx |
   1455       1.2       rin 	    IFCAP_CSUM_UDPv6_Tx | IFCAP_CSUM_UDPv6_Rx;
   1456       1.2       rin 
   1457       1.2       rin 	ifp->if_capenable = 0;
   1458       1.1       rin 
   1459       1.2       rin 	sc->sc_ec.ec_capabilities |=
   1460       1.2       rin 	    ETHERCAP_JUMBO_MTU | ETHERCAP_VLAN_MTU;
   1461       1.1       rin 
   1462       1.2       rin 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
   1463       1.2       rin 	IFQ_SET_READY(&ifp->if_snd);
   1464       1.1       rin 
   1465       1.1       rin #if NVLAN > 0
   1466       1.2       rin 	sc->sc_ec.ec_capabilities |=  ETHERCAP_VLAN_HWTAGGING;
   1467       1.1       rin #endif
   1468       1.1       rin 
   1469       1.2       rin 	mutex_init(&sc->sc_core_lock, MUTEX_DEFAULT, IPL_NET);
   1470       1.1       rin 
   1471       1.1       rin 	/* Initialize ifmedia structures. */
   1472       1.2       rin 	sc->sc_ec.ec_ifmedia = &sc->media;
   1473       1.2       rin 	ifmedia_init_with_lock(&sc->media, IFM_IMASK, igc_media_change,
   1474       1.2       rin 	    igc_media_status, &sc->sc_core_lock);
   1475       1.1       rin 	ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
   1476       1.1       rin 	ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
   1477       1.1       rin 	ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
   1478       1.1       rin 	ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
   1479       1.1       rin 	ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
   1480       1.2       rin 	ifmedia_add(&sc->media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
   1481       1.1       rin 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1482       1.1       rin 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
   1483       1.1       rin 
   1484       1.2       rin 	sc->sc_rx_intr_process_limit = IGC_RX_INTR_PROCESS_LIMIT_DEFAULT;
   1485       1.2       rin 	sc->sc_tx_intr_process_limit = IGC_TX_INTR_PROCESS_LIMIT_DEFAULT;
   1486       1.2       rin 	sc->sc_rx_process_limit = IGC_RX_PROCESS_LIMIT_DEFAULT;
   1487       1.2       rin 	sc->sc_tx_process_limit = IGC_TX_PROCESS_LIMIT_DEFAULT;
   1488       1.2       rin 
   1489       1.2       rin 	if_initialize(ifp);
   1490       1.2       rin 	sc->sc_ipq = if_percpuq_create(ifp);
   1491       1.2       rin 	if_deferred_start_init(ifp, NULL);
   1492       1.2       rin 	ether_ifattach(ifp, sc->hw.mac.addr);
   1493       1.2       rin 	ether_set_ifflags_cb(&sc->sc_ec, igc_ifflags_cb);
   1494       1.2       rin 	if_register(ifp);
   1495       1.2       rin }
   1496       1.1       rin 
   1497       1.2       rin static int
   1498       1.2       rin igc_init(struct ifnet *ifp)
   1499       1.2       rin {
   1500       1.2       rin 	struct igc_softc *sc = ifp->if_softc;
   1501       1.2       rin 	int error;
   1502       1.1       rin 
   1503       1.2       rin 	mutex_enter(&sc->sc_core_lock);
   1504       1.2       rin 	error = igc_init_locked(sc);
   1505       1.2       rin 	mutex_exit(&sc->sc_core_lock);
   1506       1.1       rin 
   1507       1.2       rin 	return error;
   1508       1.1       rin }
   1509       1.1       rin 
   1510       1.2       rin static int
   1511       1.2       rin igc_init_locked(struct igc_softc *sc)
   1512       1.1       rin {
   1513       1.2       rin 	struct ethercom *ec = &sc->sc_ec;
   1514       1.2       rin 	struct ifnet *ifp = &ec->ec_if;
   1515       1.1       rin 
   1516       1.2       rin 	DPRINTF(CFG, "called\n");
   1517       1.1       rin 
   1518       1.2       rin 	KASSERT(mutex_owned(&sc->sc_core_lock));
   1519       1.1       rin 
   1520       1.2       rin 	if (ISSET(ifp->if_flags, IFF_RUNNING))
   1521       1.2       rin 		igc_stop_locked(sc);
   1522       1.1       rin 
   1523       1.1       rin 	/* Put the address into the receive address array. */
   1524       1.1       rin 	igc_rar_set(&sc->hw, sc->hw.mac.addr, 0);
   1525       1.1       rin 
   1526       1.1       rin 	/* Initialize the hardware. */
   1527       1.1       rin 	igc_reset(sc);
   1528       1.1       rin 	igc_update_link_status(sc);
   1529       1.1       rin 
   1530       1.1       rin 	/* Setup VLAN support, basic and offload if available. */
   1531       1.1       rin 	IGC_WRITE_REG(&sc->hw, IGC_VET, ETHERTYPE_VLAN);
   1532       1.1       rin 
   1533       1.1       rin 	igc_initialize_transmit_unit(sc);
   1534       1.1       rin 	igc_initialize_receive_unit(sc);
   1535       1.1       rin 
   1536       1.2       rin 	if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
   1537       1.2       rin 		uint32_t ctrl = IGC_READ_REG(&sc->hw, IGC_CTRL);
   1538       1.1       rin 		ctrl |= IGC_CTRL_VME;
   1539       1.1       rin 		IGC_WRITE_REG(&sc->hw, IGC_CTRL, ctrl);
   1540       1.1       rin 	}
   1541       1.1       rin 
   1542       1.1       rin 	/* Setup multicast table. */
   1543       1.2       rin 	igc_set_filter(sc);
   1544       1.1       rin 
   1545       1.1       rin 	igc_clear_hw_cntrs_base_generic(&sc->hw);
   1546       1.1       rin 
   1547       1.2       rin 	if (sc->sc_intr_type == PCI_INTR_TYPE_MSIX)
   1548       1.2       rin 		igc_configure_queues(sc);
   1549       1.1       rin 
   1550       1.1       rin 	/* This clears any pending interrupts */
   1551       1.1       rin 	IGC_READ_REG(&sc->hw, IGC_ICR);
   1552       1.1       rin 	IGC_WRITE_REG(&sc->hw, IGC_ICS, IGC_ICS_LSC);
   1553       1.1       rin 
   1554       1.1       rin 	/* The driver can now take control from firmware. */
   1555       1.1       rin 	igc_get_hw_control(sc);
   1556       1.1       rin 
   1557       1.1       rin 	/* Set Energy Efficient Ethernet. */
   1558       1.1       rin 	igc_set_eee_i225(&sc->hw, true, true, true);
   1559       1.1       rin 
   1560       1.2       rin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
   1561       1.2       rin 		struct rx_ring *rxr = &sc->rx_rings[iq];
   1562       1.2       rin 
   1563       1.2       rin 		mutex_enter(&rxr->rxr_lock);
   1564       1.1       rin 		igc_rxfill(rxr);
   1565       1.2       rin 		mutex_exit(&rxr->rxr_lock);
   1566       1.1       rin 	}
   1567       1.1       rin 
   1568       1.2       rin 	sc->sc_core_stopping = false;
   1569       1.2       rin 
   1570       1.1       rin 	ifp->if_flags |= IFF_RUNNING;
   1571       1.1       rin 
   1572       1.2       rin 	/* Save last flags for the callback */
   1573       1.2       rin 	sc->sc_if_flags = ifp->if_flags;
   1574       1.2       rin 
   1575       1.6       rin 	callout_schedule(&sc->sc_tick_ch, hz);
   1576       1.6       rin 
   1577       1.6       rin 	igc_enable_intr(sc);
   1578       1.6       rin 
   1579       1.2       rin 	return 0;
   1580       1.1       rin }
   1581       1.1       rin 
   1582       1.1       rin static inline int
   1583       1.2       rin igc_load_mbuf(struct igc_queue *q, bus_dma_tag_t dmat, bus_dmamap_t map,
   1584       1.2       rin     struct mbuf *m)
   1585       1.1       rin {
   1586       1.1       rin 	int error;
   1587       1.1       rin 
   1588       1.1       rin 	error = bus_dmamap_load_mbuf(dmat, map, m,
   1589       1.2       rin 	    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   1590       1.2       rin 
   1591       1.2       rin 	if (__predict_false(error == EFBIG)) {
   1592       1.2       rin 		IGC_DRIVER_EVENT(q, txdma_efbig, 1);
   1593       1.2       rin 		m = m_defrag(m, M_NOWAIT);
   1594       1.2       rin 		if (__predict_false(m == NULL)) {
   1595       1.2       rin 			IGC_DRIVER_EVENT(q, txdma_defrag, 1);
   1596       1.2       rin 			return ENOBUFS;
   1597       1.2       rin 		}
   1598       1.2       rin 		error = bus_dmamap_load_mbuf(dmat, map, m,
   1599       1.2       rin 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   1600       1.2       rin 	}
   1601       1.1       rin 
   1602       1.2       rin 	switch (error) {
   1603       1.2       rin 	case 0:
   1604       1.2       rin 		break;
   1605       1.2       rin 	case ENOMEM:
   1606       1.2       rin 		IGC_DRIVER_EVENT(q, txdma_enomem, 1);
   1607       1.2       rin 		break;
   1608       1.2       rin 	case EINVAL:
   1609       1.2       rin 		IGC_DRIVER_EVENT(q, txdma_einval, 1);
   1610       1.2       rin 		break;
   1611       1.2       rin 	case EAGAIN:
   1612       1.2       rin 		IGC_DRIVER_EVENT(q, txdma_eagain, 1);
   1613       1.2       rin 		break;
   1614       1.2       rin 	default:
   1615       1.2       rin 		IGC_DRIVER_EVENT(q, txdma_other, 1);
   1616       1.2       rin 		break;
   1617       1.2       rin 	}
   1618       1.1       rin 
   1619       1.2       rin 	return error;
   1620       1.1       rin }
   1621       1.1       rin 
   1622       1.2       rin #define IGC_TX_START	1
   1623       1.2       rin #define IGC_TX_TRANSMIT	2
   1624       1.2       rin 
   1625       1.2       rin static void
   1626       1.2       rin igc_start(struct ifnet *ifp)
   1627       1.1       rin {
   1628       1.1       rin 	struct igc_softc *sc = ifp->if_softc;
   1629       1.1       rin 
   1630       1.2       rin 	if (__predict_false(!sc->link_active)) {
   1631       1.2       rin 		IFQ_PURGE(&ifp->if_snd);
   1632       1.1       rin 		return;
   1633       1.1       rin 	}
   1634       1.1       rin 
   1635       1.2       rin 	struct tx_ring *txr = &sc->tx_rings[0]; /* queue 0 */
   1636       1.2       rin 	mutex_enter(&txr->txr_lock);
   1637       1.2       rin 	igc_tx_common_locked(ifp, txr, IGC_TX_START);
   1638       1.2       rin 	mutex_exit(&txr->txr_lock);
   1639       1.2       rin }
   1640       1.2       rin 
   1641       1.2       rin static inline u_int
   1642       1.2       rin igc_select_txqueue(struct igc_softc *sc, struct mbuf *m __unused)
   1643       1.2       rin {
   1644       1.2       rin 	const u_int cpuid = cpu_index(curcpu());
   1645       1.2       rin 
   1646       1.2       rin 	return cpuid % sc->sc_nqueues;
   1647       1.2       rin }
   1648       1.2       rin 
   1649       1.2       rin static int
   1650       1.2       rin igc_transmit(struct ifnet *ifp, struct mbuf *m)
   1651       1.2       rin {
   1652       1.2       rin 	struct igc_softc *sc = ifp->if_softc;
   1653       1.2       rin 	const u_int qid = igc_select_txqueue(sc, m);
   1654       1.2       rin 	struct tx_ring *txr = &sc->tx_rings[qid];
   1655       1.2       rin 	struct igc_queue *q = txr->txr_igcq;
   1656       1.2       rin 
   1657       1.2       rin 	if (__predict_false(!pcq_put(txr->txr_interq, m))) {
   1658       1.2       rin 		IGC_QUEUE_EVENT(q, tx_pcq_drop, 1);
   1659       1.2       rin 		m_freem(m);
   1660       1.2       rin 		return ENOBUFS;
   1661       1.2       rin 	}
   1662       1.2       rin 
   1663       1.2       rin 	mutex_enter(&txr->txr_lock);
   1664       1.2       rin 	igc_tx_common_locked(ifp, txr, IGC_TX_TRANSMIT);
   1665       1.2       rin 	mutex_exit(&txr->txr_lock);
   1666       1.2       rin 
   1667       1.2       rin 	return 0;
   1668       1.2       rin }
   1669       1.2       rin 
   1670       1.2       rin static void
   1671       1.2       rin igc_tx_common_locked(struct ifnet *ifp, struct tx_ring *txr, int caller)
   1672       1.2       rin {
   1673       1.2       rin 	struct igc_softc *sc = ifp->if_softc;
   1674       1.2       rin 	struct igc_queue *q = txr->txr_igcq;
   1675       1.2       rin 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   1676       1.2       rin 	int prod, free, last = -1;
   1677       1.2       rin 	bool post = false;
   1678       1.2       rin 
   1679       1.1       rin 	prod = txr->next_avail_desc;
   1680       1.1       rin 	free = txr->next_to_clean;
   1681       1.1       rin 	if (free <= prod)
   1682       1.1       rin 		free += sc->num_tx_desc;
   1683       1.1       rin 	free -= prod;
   1684       1.1       rin 
   1685       1.2       rin 	DPRINTF(TX, "%s: begin: msix %d prod %d n2c %d free %d\n",
   1686       1.2       rin 	    caller == IGC_TX_TRANSMIT ? "transmit" : "start",
   1687       1.2       rin 	    txr->me, prod, txr->next_to_clean, free);
   1688       1.1       rin 
   1689       1.2       rin 	for (;;) {
   1690       1.2       rin 		struct mbuf *m;
   1691       1.1       rin 
   1692       1.2       rin 		if (__predict_false(free <= IGC_MAX_SCATTER)) {
   1693       1.2       rin 			IGC_QUEUE_EVENT(q, tx_no_desc, 1);
   1694       1.1       rin 			break;
   1695       1.1       rin 		}
   1696       1.1       rin 
   1697       1.2       rin 		if (caller == IGC_TX_TRANSMIT)
   1698       1.2       rin 			m = pcq_get(txr->txr_interq);
   1699       1.2       rin 		else
   1700       1.2       rin 			IFQ_DEQUEUE(&ifp->if_snd, m);
   1701       1.2       rin 		if (__predict_false(m == NULL))
   1702       1.1       rin 			break;
   1703       1.1       rin 
   1704       1.2       rin 		struct igc_tx_buf *txbuf = &txr->tx_buffers[prod];
   1705       1.2       rin 		bus_dmamap_t map = txbuf->map;
   1706       1.1       rin 
   1707       1.2       rin 		if (__predict_false(
   1708       1.2       rin 		    igc_load_mbuf(q, txr->txdma.dma_tag, map, m))) {
   1709       1.2       rin 			if (caller == IGC_TX_TRANSMIT)
   1710       1.2       rin 				IGC_QUEUE_EVENT(q, tx_pcq_drop, 1);
   1711       1.1       rin 			m_freem(m);
   1712      1.15  riastrad 			if_statinc_ref(ifp, nsr, if_oerrors);
   1713       1.1       rin 			continue;
   1714       1.1       rin 		}
   1715       1.1       rin 
   1716       1.2       rin 		uint32_t ctx_cmd_type_len = 0, olinfo_status = 0;
   1717       1.2       rin 		if (igc_tx_ctx_setup(txr, m, prod, &ctx_cmd_type_len,
   1718       1.2       rin 		    &olinfo_status)) {
   1719       1.2       rin 			IGC_QUEUE_EVENT(q, tx_ctx, 1);
   1720       1.1       rin 			/* Consume the first descriptor */
   1721       1.2       rin 			prod = igc_txdesc_incr(sc, prod);
   1722       1.1       rin 			free--;
   1723       1.1       rin 		}
   1724  1.15.2.1  perseant 
   1725  1.15.2.1  perseant 		bus_dmamap_sync(txr->txdma.dma_tag, map, 0,
   1726  1.15.2.1  perseant 		    map->dm_mapsize, BUS_DMASYNC_PREWRITE);
   1727  1.15.2.1  perseant 
   1728       1.2       rin 		for (int i = 0; i < map->dm_nsegs; i++) {
   1729       1.2       rin 			union igc_adv_tx_desc *txdesc = &txr->tx_base[prod];
   1730       1.1       rin 
   1731       1.2       rin 			uint32_t cmd_type_len = ctx_cmd_type_len |
   1732       1.2       rin 			    IGC_ADVTXD_DCMD_IFCS | IGC_ADVTXD_DTYP_DATA |
   1733       1.1       rin 			    IGC_ADVTXD_DCMD_DEXT | map->dm_segs[i].ds_len;
   1734       1.2       rin 			if (i == map->dm_nsegs - 1) {
   1735       1.2       rin 				cmd_type_len |=
   1736       1.2       rin 				    IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS;
   1737       1.2       rin 			}
   1738       1.1       rin 
   1739       1.2       rin 			igc_txdesc_sync(txr, prod,
   1740       1.2       rin 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1741       1.2       rin 			htolem64(&txdesc->read.buffer_addr,
   1742       1.2       rin 			    map->dm_segs[i].ds_addr);
   1743       1.1       rin 			htolem32(&txdesc->read.cmd_type_len, cmd_type_len);
   1744       1.1       rin 			htolem32(&txdesc->read.olinfo_status, olinfo_status);
   1745       1.2       rin 			igc_txdesc_sync(txr, prod,
   1746       1.2       rin 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1747       1.1       rin 
   1748       1.1       rin 			last = prod;
   1749       1.2       rin 			prod = igc_txdesc_incr(sc, prod);
   1750       1.1       rin 		}
   1751       1.1       rin 
   1752       1.1       rin 		txbuf->m_head = m;
   1753       1.1       rin 		txbuf->eop_index = last;
   1754       1.1       rin 
   1755       1.2       rin 		bpf_mtap(ifp, m, BPF_D_OUT);
   1756       1.2       rin 
   1757      1.15  riastrad 		if_statadd_ref(ifp, nsr, if_obytes, m->m_pkthdr.len);
   1758       1.2       rin 		if (m->m_flags & M_MCAST)
   1759      1.15  riastrad 			if_statinc_ref(ifp, nsr, if_omcasts);
   1760       1.2       rin 		IGC_QUEUE_EVENT(q, tx_packets, 1);
   1761       1.2       rin 		IGC_QUEUE_EVENT(q, tx_bytes, m->m_pkthdr.len);
   1762       1.1       rin 
   1763       1.2       rin 		free -= map->dm_nsegs;
   1764       1.2       rin 		post = true;
   1765       1.1       rin 	}
   1766       1.1       rin 
   1767       1.1       rin 	if (post) {
   1768       1.1       rin 		txr->next_avail_desc = prod;
   1769       1.1       rin 		IGC_WRITE_REG(&sc->hw, IGC_TDT(txr->me), prod);
   1770       1.1       rin 	}
   1771       1.2       rin 
   1772       1.2       rin 	DPRINTF(TX, "%s: done : msix %d prod %d n2c %d free %d\n",
   1773       1.2       rin 	    caller == IGC_TX_TRANSMIT ? "transmit" : "start",
   1774       1.2       rin 	    txr->me, prod, txr->next_to_clean, free);
   1775       1.2       rin 
   1776       1.2       rin 	IF_STAT_PUTREF(ifp);
   1777       1.1       rin }
   1778       1.1       rin 
   1779       1.2       rin static bool
   1780       1.2       rin igc_txeof(struct tx_ring *txr, u_int limit)
   1781       1.1       rin {
   1782       1.1       rin 	struct igc_softc *sc = txr->sc;
   1783       1.2       rin 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1784       1.2       rin 	int cons, prod;
   1785       1.2       rin 	bool more = false;
   1786       1.1       rin 
   1787       1.1       rin 	prod = txr->next_avail_desc;
   1788       1.1       rin 	cons = txr->next_to_clean;
   1789       1.1       rin 
   1790       1.2       rin 	if (cons == prod) {
   1791       1.2       rin 		DPRINTF(TX, "false: msix %d cons %d prod %d\n",
   1792       1.2       rin 		    txr->me, cons, prod);
   1793       1.2       rin 		return false;
   1794       1.2       rin 	}
   1795       1.2       rin 
   1796       1.2       rin 	do {
   1797       1.2       rin 		struct igc_tx_buf *txbuf = &txr->tx_buffers[cons];
   1798       1.2       rin 		const int last = txbuf->eop_index;
   1799       1.1       rin 
   1800       1.2       rin 		membar_consumer();	/* XXXRO necessary? */
   1801       1.1       rin 
   1802       1.2       rin 		KASSERT(last != -1);
   1803       1.2       rin 		union igc_adv_tx_desc *txdesc = &txr->tx_base[last];
   1804       1.2       rin 		igc_txdesc_sync(txr, last, BUS_DMASYNC_POSTREAD);
   1805       1.2       rin 		const uint32_t status = le32toh(txdesc->wb.status);
   1806       1.2       rin 		igc_txdesc_sync(txr, last, BUS_DMASYNC_PREREAD);
   1807       1.1       rin 
   1808       1.2       rin 		if (!(status & IGC_TXD_STAT_DD))
   1809       1.2       rin 			break;
   1810       1.1       rin 
   1811       1.2       rin 		if (limit-- == 0) {
   1812       1.2       rin 			more = true;
   1813       1.2       rin 			DPRINTF(TX, "pending TX "
   1814       1.2       rin 			    "msix %d cons %d last %d prod %d "
   1815       1.2       rin 			    "status 0x%08x\n",
   1816       1.2       rin 			    txr->me, cons, last, prod, status);
   1817       1.1       rin 			break;
   1818       1.2       rin 		}
   1819       1.2       rin 
   1820       1.2       rin 		DPRINTF(TX, "handled TX "
   1821       1.2       rin 		    "msix %d cons %d last %d prod %d "
   1822       1.2       rin 		    "status 0x%08x\n",
   1823       1.2       rin 		    txr->me, cons, last, prod, status);
   1824       1.1       rin 
   1825       1.2       rin 		if_statinc(ifp, if_opackets);
   1826       1.1       rin 
   1827       1.2       rin 		bus_dmamap_t map = txbuf->map;
   1828       1.1       rin 		bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,
   1829       1.1       rin 		    BUS_DMASYNC_POSTWRITE);
   1830       1.1       rin 		bus_dmamap_unload(txr->txdma.dma_tag, map);
   1831       1.1       rin 		m_freem(txbuf->m_head);
   1832       1.1       rin 
   1833       1.1       rin 		txbuf->m_head = NULL;
   1834       1.1       rin 		txbuf->eop_index = -1;
   1835       1.1       rin 
   1836       1.2       rin 		cons = igc_txdesc_incr(sc, last);
   1837       1.2       rin 	} while (cons != prod);
   1838       1.2       rin 
   1839       1.2       rin 	txr->next_to_clean = cons;
   1840       1.1       rin 
   1841       1.2       rin 	return more;
   1842       1.2       rin }
   1843       1.1       rin 
   1844       1.2       rin static void
   1845       1.2       rin igc_intr_barrier(struct igc_softc *sc __unused)
   1846       1.2       rin {
   1847       1.1       rin 
   1848       1.2       rin 	xc_barrier(0);
   1849       1.2       rin }
   1850       1.1       rin 
   1851       1.2       rin static void
   1852       1.2       rin igc_stop(struct ifnet *ifp, int disable)
   1853       1.2       rin {
   1854       1.2       rin 	struct igc_softc *sc = ifp->if_softc;
   1855       1.1       rin 
   1856       1.2       rin 	mutex_enter(&sc->sc_core_lock);
   1857       1.2       rin 	igc_stop_locked(sc);
   1858       1.2       rin 	mutex_exit(&sc->sc_core_lock);
   1859       1.1       rin }
   1860       1.1       rin 
   1861       1.1       rin /*********************************************************************
   1862       1.1       rin  *
   1863       1.1       rin  *  This routine disables all traffic on the adapter by issuing a
   1864       1.1       rin  *  global reset on the MAC.
   1865       1.1       rin  *
   1866       1.1       rin  **********************************************************************/
   1867       1.2       rin static void
   1868       1.2       rin igc_stop_locked(struct igc_softc *sc)
   1869       1.1       rin {
   1870       1.2       rin 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1871       1.2       rin 
   1872       1.2       rin 	DPRINTF(CFG, "called\n");
   1873       1.2       rin 
   1874       1.2       rin 	KASSERT(mutex_owned(&sc->sc_core_lock));
   1875       1.2       rin 
   1876       1.2       rin 	/*
   1877       1.2       rin 	 * If stopping processing has already started, do nothing.
   1878       1.2       rin 	 */
   1879       1.2       rin 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   1880       1.2       rin 		return;
   1881       1.1       rin 
   1882       1.1       rin 	/* Tell the stack that the interface is no longer active. */
   1883       1.2       rin 	ifp->if_flags &= ~IFF_RUNNING;
   1884       1.2       rin 
   1885       1.2       rin 	/*
   1886       1.2       rin 	 * igc_handle_queue() can enable interrupts, so wait for completion of
   1887       1.2       rin 	 * last igc_handle_queue() after unset IFF_RUNNING.
   1888       1.2       rin 	 */
   1889       1.2       rin 	mutex_exit(&sc->sc_core_lock);
   1890       1.2       rin 	igc_barrier_handle_queue(sc);
   1891       1.2       rin 	mutex_enter(&sc->sc_core_lock);
   1892       1.2       rin 
   1893       1.2       rin 	sc->sc_core_stopping = true;
   1894       1.1       rin 
   1895       1.1       rin 	igc_disable_intr(sc);
   1896       1.1       rin 
   1897       1.2       rin 	callout_halt(&sc->sc_tick_ch, &sc->sc_core_lock);
   1898       1.2       rin 
   1899       1.1       rin 	igc_reset_hw(&sc->hw);
   1900       1.1       rin 	IGC_WRITE_REG(&sc->hw, IGC_WUC, 0);
   1901       1.1       rin 
   1902       1.2       rin 	/*
   1903       1.2       rin 	 * Wait for completion of interrupt handlers.
   1904       1.2       rin 	 */
   1905       1.2       rin 	mutex_exit(&sc->sc_core_lock);
   1906       1.2       rin 	igc_intr_barrier(sc);
   1907       1.2       rin 	mutex_enter(&sc->sc_core_lock);
   1908       1.2       rin 
   1909       1.2       rin 	igc_update_link_status(sc);
   1910       1.2       rin 
   1911       1.2       rin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
   1912       1.2       rin 		struct tx_ring *txr = &sc->tx_rings[iq];
   1913       1.2       rin 
   1914       1.2       rin 		igc_withdraw_transmit_packets(txr, false);
   1915       1.2       rin 	}
   1916       1.2       rin 
   1917       1.2       rin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
   1918       1.2       rin 		struct rx_ring *rxr = &sc->rx_rings[iq];
   1919       1.1       rin 
   1920       1.2       rin 		igc_clear_receive_status(rxr);
   1921       1.2       rin 	}
   1922       1.1       rin 
   1923       1.2       rin 	/* Save last flags for the callback */
   1924       1.2       rin 	sc->sc_if_flags = ifp->if_flags;
   1925       1.1       rin }
   1926       1.1       rin 
   1927       1.1       rin /*********************************************************************
   1928       1.1       rin  *  Ioctl entry point
   1929       1.1       rin  *
   1930       1.1       rin  *  igc_ioctl is called when the user wants to configure the
   1931       1.1       rin  *  interface.
   1932       1.1       rin  *
   1933       1.1       rin  *  return 0 on success, positive on failure
   1934       1.1       rin  **********************************************************************/
   1935       1.2       rin static int
   1936       1.2       rin igc_ioctl(struct ifnet * ifp, u_long cmd, void *data)
   1937       1.1       rin {
   1938       1.2       rin 	struct igc_softc *sc __unused = ifp->if_softc;
   1939       1.2       rin 	int s;
   1940       1.2       rin 	int error;
   1941       1.1       rin 
   1942       1.2       rin 	DPRINTF(CFG, "cmd 0x%016lx\n", cmd);
   1943       1.1       rin 
   1944       1.1       rin 	switch (cmd) {
   1945       1.2       rin 	case SIOCADDMULTI:
   1946       1.2       rin 	case SIOCDELMULTI:
   1947       1.1       rin 		break;
   1948       1.2       rin 	default:
   1949       1.2       rin 		KASSERT(IFNET_LOCKED(ifp));
   1950       1.2       rin 	}
   1951       1.2       rin 
   1952       1.2       rin 	if (cmd == SIOCZIFDATA) {
   1953       1.2       rin 		mutex_enter(&sc->sc_core_lock);
   1954       1.2       rin 		igc_clear_counters(sc);
   1955       1.2       rin 		mutex_exit(&sc->sc_core_lock);
   1956       1.2       rin 	}
   1957       1.2       rin 
   1958       1.2       rin 	switch (cmd) {
   1959       1.2       rin #ifdef IF_RXR
   1960       1.1       rin 	case SIOCGIFRXR:
   1961       1.2       rin 		s = splnet();
   1962       1.1       rin 		error = igc_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
   1963       1.2       rin 		splx(s);
   1964       1.1       rin 		break;
   1965       1.2       rin #endif
   1966       1.1       rin 	default:
   1967       1.2       rin 		s = splnet();
   1968       1.2       rin 		error = ether_ioctl(ifp, cmd, data);
   1969       1.2       rin 		splx(s);
   1970       1.2       rin 		break;
   1971       1.1       rin 	}
   1972       1.1       rin 
   1973       1.2       rin 	if (error != ENETRESET)
   1974       1.2       rin 		return error;
   1975       1.2       rin 
   1976       1.2       rin 	error = 0;
   1977       1.2       rin 
   1978       1.2       rin 	if (cmd == SIOCSIFCAP)
   1979       1.2       rin 		error = if_init(ifp);
   1980       1.2       rin 	else if ((cmd == SIOCADDMULTI) || (cmd == SIOCDELMULTI)) {
   1981       1.2       rin 		mutex_enter(&sc->sc_core_lock);
   1982       1.2       rin 		if (sc->sc_if_flags & IFF_RUNNING) {
   1983       1.2       rin 			/*
   1984       1.2       rin 			 * Multicast list has changed; set the hardware filter
   1985       1.2       rin 			 * accordingly.
   1986       1.2       rin 			 */
   1987       1.1       rin 			igc_disable_intr(sc);
   1988       1.2       rin 			igc_set_filter(sc);
   1989       1.1       rin 			igc_enable_intr(sc);
   1990       1.1       rin 		}
   1991       1.2       rin 		mutex_exit(&sc->sc_core_lock);
   1992       1.1       rin 	}
   1993       1.1       rin 
   1994       1.1       rin 	return error;
   1995       1.1       rin }
   1996       1.1       rin 
   1997       1.2       rin #ifdef IF_RXR
   1998       1.2       rin static int
   1999       1.1       rin igc_rxrinfo(struct igc_softc *sc, struct if_rxrinfo *ifri)
   2000       1.1       rin {
   2001       1.2       rin 	struct if_rxring_info *ifr, ifr1;
   2002       1.2       rin 	int error;
   2003       1.2       rin 
   2004       1.2       rin 	if (sc->sc_nqueues > 1) {
   2005       1.2       rin 		ifr = kmem_zalloc(sc->sc_nqueues * sizeof(*ifr), KM_SLEEP);
   2006       1.2       rin 	} else {
   2007       1.2       rin 		ifr = &ifr1;
   2008       1.2       rin 		memset(ifr, 0, sizeof(*ifr));
   2009       1.2       rin 	}
   2010       1.2       rin 
   2011       1.2       rin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
   2012       1.2       rin 		struct rx_ring *rxr = &sc->rx_rings[iq];
   2013       1.2       rin 
   2014       1.2       rin 		ifr[iq].ifr_size = MCLBYTES;
   2015       1.2       rin 		snprintf(ifr[iq].ifr_name, sizeof(ifr[iq].ifr_name), "%d", iq);
   2016       1.2       rin 		ifr[iq].ifr_info = rxr->rx_ring;
   2017       1.1       rin 	}
   2018       1.1       rin 
   2019       1.1       rin 	error = if_rxr_info_ioctl(ifri, sc->sc_nqueues, ifr);
   2020       1.2       rin 	if (sc->sc_nqueues > 1)
   2021       1.2       rin 		kmem_free(ifr, sc->sc_nqueues * sizeof(*ifr));
   2022       1.1       rin 
   2023       1.1       rin 	return error;
   2024       1.1       rin }
   2025       1.2       rin #endif
   2026       1.1       rin 
   2027       1.2       rin static void
   2028       1.1       rin igc_rxfill(struct rx_ring *rxr)
   2029       1.1       rin {
   2030       1.1       rin 	struct igc_softc *sc = rxr->sc;
   2031       1.2       rin 	int id;
   2032       1.1       rin 
   2033       1.2       rin 	for (id = 0; id < sc->num_rx_desc; id++) {
   2034       1.2       rin 		if (igc_get_buf(rxr, id, false)) {
   2035       1.2       rin 			panic("%s: msix=%d i=%d\n", __func__, rxr->me, id);
   2036       1.2       rin 		}
   2037       1.1       rin 	}
   2038       1.1       rin 
   2039       1.2       rin 	id = sc->num_rx_desc - 1;
   2040       1.2       rin 	rxr->last_desc_filled = id;
   2041       1.2       rin 	IGC_WRITE_REG(&sc->hw, IGC_RDT(rxr->me), id);
   2042       1.2       rin 	rxr->next_to_check = 0;
   2043       1.1       rin }
   2044       1.1       rin 
   2045       1.2       rin static void
   2046       1.2       rin igc_rxrefill(struct rx_ring *rxr, int end)
   2047       1.1       rin {
   2048       1.1       rin 	struct igc_softc *sc = rxr->sc;
   2049       1.2       rin 	int id;
   2050       1.1       rin 
   2051       1.2       rin 	for (id = rxr->next_to_check; id != end; id = igc_rxdesc_incr(sc, id)) {
   2052       1.2       rin 		if (igc_get_buf(rxr, id, true)) {
   2053       1.2       rin 			/* XXXRO */
   2054       1.2       rin 			panic("%s: msix=%d id=%d\n", __func__, rxr->me, id);
   2055       1.2       rin 		}
   2056       1.1       rin 	}
   2057       1.2       rin 
   2058       1.2       rin 	id = igc_rxdesc_decr(sc, id);
   2059       1.2       rin 	DPRINTF(RX, "%s RDT %d id %d\n",
   2060       1.2       rin 	    rxr->last_desc_filled == id ? "same" : "diff",
   2061       1.2       rin 	    rxr->last_desc_filled, id);
   2062       1.2       rin 	rxr->last_desc_filled = id;
   2063       1.2       rin 	IGC_WRITE_REG(&sc->hw, IGC_RDT(rxr->me), id);
   2064       1.1       rin }
   2065       1.1       rin 
   2066       1.1       rin /*********************************************************************
   2067       1.1       rin  *
   2068       1.1       rin  *  This routine executes in interrupt context. It replenishes
   2069       1.1       rin  *  the mbufs in the descriptor and sends data which has been
   2070       1.1       rin  *  dma'ed into host memory to upper layer.
   2071       1.1       rin  *
   2072       1.1       rin  *********************************************************************/
   2073       1.2       rin static bool
   2074       1.2       rin igc_rxeof(struct rx_ring *rxr, u_int limit)
   2075       1.1       rin {
   2076       1.1       rin 	struct igc_softc *sc = rxr->sc;
   2077       1.2       rin 	struct igc_queue *q = rxr->rxr_igcq;
   2078       1.2       rin 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2079       1.2       rin 	int id;
   2080       1.2       rin 	bool more = false;
   2081       1.2       rin 
   2082       1.2       rin 	id = rxr->next_to_check;
   2083       1.2       rin 	for (;;) {
   2084       1.2       rin 		union igc_adv_rx_desc *rxdesc = &rxr->rx_base[id];
   2085       1.2       rin 		struct igc_rx_buf *rxbuf, *nxbuf;
   2086       1.2       rin 		struct mbuf *mp, *m;
   2087       1.1       rin 
   2088       1.2       rin 		igc_rxdesc_sync(rxr, id,
   2089       1.2       rin 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   2090       1.1       rin 
   2091       1.2       rin 		const uint32_t staterr = le32toh(rxdesc->wb.upper.status_error);
   2092       1.1       rin 
   2093       1.1       rin 		if (!ISSET(staterr, IGC_RXD_STAT_DD)) {
   2094       1.2       rin 			igc_rxdesc_sync(rxr, id,
   2095       1.2       rin 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2096       1.2       rin 			break;
   2097       1.2       rin 		}
   2098       1.2       rin 
   2099       1.2       rin 		if (limit-- == 0) {
   2100       1.2       rin 			igc_rxdesc_sync(rxr, id,
   2101       1.2       rin 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2102       1.2       rin 			DPRINTF(RX, "more=true\n");
   2103       1.2       rin 			more = true;
   2104       1.1       rin 			break;
   2105       1.1       rin 		}
   2106       1.1       rin 
   2107       1.1       rin 		/* Zero out the receive descriptors status. */
   2108       1.1       rin 		rxdesc->wb.upper.status_error = 0;
   2109       1.1       rin 
   2110       1.1       rin 		/* Pull the mbuf off the ring. */
   2111       1.2       rin 		rxbuf = &rxr->rx_buffers[id];
   2112       1.2       rin 		bus_dmamap_t map = rxbuf->map;
   2113       1.2       rin 		bus_dmamap_sync(rxr->rxdma.dma_tag, map,
   2114       1.2       rin 		    0, map->dm_mapsize, BUS_DMASYNC_POSTREAD);
   2115       1.2       rin 		bus_dmamap_unload(rxr->rxdma.dma_tag, map);
   2116       1.1       rin 
   2117       1.1       rin 		mp = rxbuf->buf;
   2118       1.2       rin 		rxbuf->buf = NULL;
   2119       1.2       rin 
   2120       1.2       rin 		const bool eop = staterr & IGC_RXD_STAT_EOP;
   2121       1.2       rin 		const uint16_t len = le16toh(rxdesc->wb.upper.length);
   2122       1.2       rin 
   2123       1.7     oster #if NVLAN > 0
   2124       1.2       rin 		const uint16_t vtag = le16toh(rxdesc->wb.upper.vlan);
   2125       1.7     oster #endif
   2126       1.2       rin 
   2127       1.2       rin 		const uint32_t ptype = le32toh(rxdesc->wb.lower.lo_dword.data) &
   2128       1.1       rin 		    IGC_PKTTYPE_MASK;
   2129       1.1       rin 
   2130       1.2       rin 		const uint32_t hash __unused =
   2131       1.2       rin 		    le32toh(rxdesc->wb.lower.hi_dword.rss);
   2132       1.2       rin 		const uint16_t hashtype __unused =
   2133       1.2       rin 		    le16toh(rxdesc->wb.lower.lo_dword.hs_rss.pkt_info) &
   2134       1.2       rin 		    IGC_RXDADV_RSSTYPE_MASK;
   2135       1.2       rin 
   2136       1.2       rin 		igc_rxdesc_sync(rxr, id,
   2137       1.2       rin 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2138       1.2       rin 
   2139       1.2       rin 		if (__predict_false(staterr & IGC_RXDEXT_STATERR_RXE)) {
   2140  1.15.2.1  perseant 			m_freem(rxbuf->fmp);
   2141  1.15.2.1  perseant 			rxbuf->fmp = NULL;
   2142       1.1       rin 
   2143       1.1       rin 			m_freem(mp);
   2144       1.2       rin 			m = NULL;
   2145       1.2       rin 
   2146       1.2       rin 			if_statinc(ifp, if_ierrors);
   2147       1.2       rin 			IGC_QUEUE_EVENT(q, rx_discard, 1);
   2148       1.2       rin 
   2149       1.2       rin 			DPRINTF(RX, "ierrors++\n");
   2150       1.2       rin 
   2151       1.1       rin 			goto next_desc;
   2152       1.1       rin 		}
   2153       1.1       rin 
   2154       1.2       rin 		if (__predict_false(mp == NULL)) {
   2155       1.1       rin 			panic("%s: igc_rxeof: NULL mbuf in slot %d "
   2156       1.2       rin 			    "(filled %d)", device_xname(sc->sc_dev),
   2157       1.2       rin 			    id, rxr->last_desc_filled);
   2158       1.1       rin 		}
   2159       1.1       rin 
   2160       1.1       rin 		if (!eop) {
   2161       1.1       rin 			/*
   2162       1.1       rin 			 * Figure out the next descriptor of this frame.
   2163       1.1       rin 			 */
   2164       1.2       rin 			int nextp = igc_rxdesc_incr(sc, id);
   2165       1.2       rin 
   2166       1.1       rin 			nxbuf = &rxr->rx_buffers[nextp];
   2167       1.2       rin 			/*
   2168       1.2       rin 			 * TODO prefetch(nxbuf);
   2169       1.2       rin 			 */
   2170       1.1       rin 		}
   2171       1.1       rin 
   2172       1.1       rin 		mp->m_len = len;
   2173       1.1       rin 
   2174       1.1       rin 		m = rxbuf->fmp;
   2175       1.2       rin 		rxbuf->fmp = NULL;
   2176       1.1       rin 
   2177       1.2       rin 		if (m != NULL) {
   2178       1.1       rin 			m->m_pkthdr.len += mp->m_len;
   2179       1.2       rin 		} else {
   2180       1.1       rin 			m = mp;
   2181       1.1       rin 			m->m_pkthdr.len = mp->m_len;
   2182       1.1       rin #if NVLAN > 0
   2183       1.2       rin 			if (staterr & IGC_RXD_STAT_VP)
   2184       1.2       rin 				vlan_set_tag(m, vtag);
   2185       1.1       rin #endif
   2186       1.1       rin 		}
   2187       1.1       rin 
   2188       1.1       rin 		/* Pass the head pointer on */
   2189       1.2       rin 		if (!eop) {
   2190       1.1       rin 			nxbuf->fmp = m;
   2191       1.1       rin 			m = NULL;
   2192       1.1       rin 			mp->m_next = nxbuf->buf;
   2193       1.1       rin 		} else {
   2194       1.2       rin 			m_set_rcvif(m, ifp);
   2195       1.2       rin 
   2196       1.2       rin 			m->m_pkthdr.csum_flags = igc_rx_checksum(q,
   2197       1.2       rin 			    ifp->if_capenable, staterr, ptype);
   2198       1.1       rin 
   2199       1.2       rin #ifdef notyet
   2200       1.1       rin 			if (hashtype != IGC_RXDADV_RSSTYPE_NONE) {
   2201       1.1       rin 				m->m_pkthdr.ph_flowid = hash;
   2202       1.1       rin 				SET(m->m_pkthdr.csum_flags, M_FLOWID);
   2203       1.1       rin 			}
   2204       1.2       rin 			ml_enqueue(&ml, m);
   2205       1.2       rin #endif
   2206       1.2       rin 
   2207       1.2       rin 			if_percpuq_enqueue(sc->sc_ipq, m);
   2208       1.1       rin 
   2209       1.2       rin 			if_statinc(ifp, if_ipackets);
   2210       1.2       rin 			IGC_QUEUE_EVENT(q, rx_packets, 1);
   2211       1.2       rin 			IGC_QUEUE_EVENT(q, rx_bytes, m->m_pkthdr.len);
   2212       1.1       rin 		}
   2213       1.2       rin  next_desc:
   2214       1.1       rin 		/* Advance our pointers to the next descriptor. */
   2215       1.2       rin 		id = igc_rxdesc_incr(sc, id);
   2216       1.1       rin 	}
   2217       1.1       rin 
   2218       1.2       rin 	DPRINTF(RX, "fill queue[%d]\n", rxr->me);
   2219       1.2       rin 	igc_rxrefill(rxr, id);
   2220       1.1       rin 
   2221       1.2       rin 	DPRINTF(RX, "%s n2c %d id %d\n",
   2222       1.2       rin 	    rxr->next_to_check == id ? "same" : "diff",
   2223       1.2       rin 	    rxr->next_to_check, id);
   2224       1.2       rin 	rxr->next_to_check = id;
   2225       1.2       rin 
   2226       1.2       rin #ifdef OPENBSD
   2227       1.1       rin 	if (!(staterr & IGC_RXD_STAT_DD))
   2228       1.1       rin 		return 0;
   2229       1.2       rin #endif
   2230       1.1       rin 
   2231       1.2       rin 	return more;
   2232       1.1       rin }
   2233       1.1       rin 
   2234       1.1       rin /*********************************************************************
   2235       1.1       rin  *
   2236       1.1       rin  *  Verify that the hardware indicated that the checksum is valid.
   2237       1.1       rin  *  Inform the stack about the status of checksum so that stack
   2238       1.1       rin  *  doesn't spend time verifying the checksum.
   2239       1.1       rin  *
   2240       1.1       rin  *********************************************************************/
   2241       1.2       rin static int
   2242       1.2       rin igc_rx_checksum(struct igc_queue *q, uint64_t capenable, uint32_t staterr,
   2243       1.2       rin     uint32_t ptype)
   2244       1.1       rin {
   2245       1.2       rin 	const uint16_t status = (uint16_t)staterr;
   2246       1.2       rin 	const uint8_t errors = (uint8_t)(staterr >> 24);
   2247       1.2       rin 	int flags = 0;
   2248       1.2       rin 
   2249       1.2       rin 	if ((status & IGC_RXD_STAT_IPCS) != 0 &&
   2250       1.2       rin 	    (capenable & IFCAP_CSUM_IPv4_Rx) != 0) {
   2251       1.2       rin 		IGC_DRIVER_EVENT(q, rx_ipcs, 1);
   2252       1.2       rin 		flags |= M_CSUM_IPv4;
   2253       1.2       rin 		if (__predict_false((errors & IGC_RXD_ERR_IPE) != 0)) {
   2254       1.2       rin 			IGC_DRIVER_EVENT(q, rx_ipcs_bad, 1);
   2255       1.2       rin 			flags |= M_CSUM_IPv4_BAD;
   2256       1.2       rin 		}
   2257       1.2       rin 	}
   2258       1.2       rin 
   2259       1.2       rin 	if ((status & IGC_RXD_STAT_TCPCS) != 0) {
   2260       1.2       rin 		IGC_DRIVER_EVENT(q, rx_tcpcs, 1);
   2261       1.2       rin 		if ((capenable & IFCAP_CSUM_TCPv4_Rx) != 0)
   2262       1.2       rin 			flags |= M_CSUM_TCPv4;
   2263       1.2       rin 		if ((capenable & IFCAP_CSUM_TCPv6_Rx) != 0)
   2264       1.2       rin 			flags |= M_CSUM_TCPv6;
   2265       1.2       rin 	}
   2266       1.1       rin 
   2267       1.2       rin 	if ((status & IGC_RXD_STAT_UDPCS) != 0) {
   2268       1.2       rin 		IGC_DRIVER_EVENT(q, rx_udpcs, 1);
   2269       1.2       rin 		if ((capenable & IFCAP_CSUM_UDPv4_Rx) != 0)
   2270       1.2       rin 			flags |= M_CSUM_UDPv4;
   2271       1.2       rin 		if ((capenable & IFCAP_CSUM_UDPv6_Rx) != 0)
   2272       1.2       rin 			flags |= M_CSUM_UDPv6;
   2273       1.1       rin 	}
   2274       1.1       rin 
   2275       1.2       rin 	if (__predict_false((errors & IGC_RXD_ERR_TCPE) != 0)) {
   2276       1.2       rin 		IGC_DRIVER_EVENT(q, rx_l4cs_bad, 1);
   2277       1.2       rin 		if ((flags & ~M_CSUM_IPv4) != 0)
   2278       1.2       rin 			flags |= M_CSUM_TCP_UDP_BAD;
   2279       1.1       rin 	}
   2280       1.2       rin 
   2281       1.2       rin 	return flags;
   2282       1.1       rin }
   2283       1.1       rin 
   2284       1.2       rin static void
   2285       1.1       rin igc_watchdog(struct ifnet * ifp)
   2286       1.1       rin {
   2287       1.1       rin }
   2288       1.1       rin 
   2289       1.2       rin static void
   2290       1.2       rin igc_tick(void *arg)
   2291       1.2       rin {
   2292       1.2       rin 	struct igc_softc *sc = arg;
   2293       1.2       rin 
   2294       1.2       rin 	mutex_enter(&sc->sc_core_lock);
   2295       1.2       rin 
   2296       1.2       rin 	if (__predict_false(sc->sc_core_stopping)) {
   2297       1.2       rin 		mutex_exit(&sc->sc_core_lock);
   2298       1.2       rin 		return;
   2299       1.2       rin 	}
   2300       1.2       rin 
   2301       1.2       rin 	/* XXX watchdog */
   2302       1.2       rin 	if (0) {
   2303       1.2       rin 		IGC_GLOBAL_EVENT(sc, watchdog, 1);
   2304       1.2       rin 	}
   2305       1.2       rin 
   2306       1.2       rin 	igc_update_counters(sc);
   2307       1.2       rin 
   2308       1.2       rin 	mutex_exit(&sc->sc_core_lock);
   2309       1.2       rin 
   2310       1.2       rin 	callout_schedule(&sc->sc_tick_ch, hz);
   2311       1.2       rin }
   2312       1.2       rin 
   2313       1.1       rin /*********************************************************************
   2314       1.1       rin  *
   2315       1.1       rin  *  Media Ioctl callback
   2316       1.1       rin  *
   2317       1.1       rin  *  This routine is called whenever the user queries the status of
   2318       1.1       rin  *  the interface using ifconfig.
   2319       1.1       rin  *
   2320       1.1       rin  **********************************************************************/
   2321       1.2       rin static void
   2322       1.1       rin igc_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   2323       1.1       rin {
   2324       1.1       rin 	struct igc_softc *sc = ifp->if_softc;
   2325       1.2       rin 	struct igc_hw *hw = &sc->hw;
   2326       1.1       rin 
   2327       1.1       rin 	igc_update_link_status(sc);
   2328       1.1       rin 
   2329       1.1       rin 	ifmr->ifm_status = IFM_AVALID;
   2330       1.1       rin 	ifmr->ifm_active = IFM_ETHER;
   2331       1.1       rin 
   2332       1.1       rin 	if (!sc->link_active) {
   2333       1.1       rin 		ifmr->ifm_active |= IFM_NONE;
   2334       1.1       rin 		return;
   2335       1.1       rin 	}
   2336       1.1       rin 
   2337       1.1       rin 	ifmr->ifm_status |= IFM_ACTIVE;
   2338       1.1       rin 
   2339       1.1       rin 	switch (sc->link_speed) {
   2340       1.1       rin 	case 10:
   2341       1.1       rin 		ifmr->ifm_active |= IFM_10_T;
   2342       1.1       rin 		break;
   2343       1.1       rin 	case 100:
   2344       1.1       rin 		ifmr->ifm_active |= IFM_100_TX;
   2345       1.2       rin 		break;
   2346       1.1       rin 	case 1000:
   2347       1.1       rin 		ifmr->ifm_active |= IFM_1000_T;
   2348       1.1       rin 		break;
   2349       1.1       rin 	case 2500:
   2350       1.2       rin 		ifmr->ifm_active |= IFM_2500_T;
   2351       1.2       rin 		break;
   2352       1.1       rin 	}
   2353       1.1       rin 
   2354       1.1       rin 	if (sc->link_duplex == FULL_DUPLEX)
   2355       1.1       rin 		ifmr->ifm_active |= IFM_FDX;
   2356       1.1       rin 	else
   2357       1.1       rin 		ifmr->ifm_active |= IFM_HDX;
   2358       1.2       rin 
   2359       1.2       rin 	switch (hw->fc.current_mode) {
   2360       1.2       rin 	case igc_fc_tx_pause:
   2361       1.2       rin 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   2362       1.2       rin 		break;
   2363       1.2       rin 	case igc_fc_rx_pause:
   2364       1.2       rin 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   2365       1.2       rin 		break;
   2366       1.2       rin 	case igc_fc_full:
   2367       1.2       rin 		ifmr->ifm_active |= IFM_FLOW |
   2368       1.2       rin 		    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2369       1.2       rin 		break;
   2370       1.2       rin 	case igc_fc_none:
   2371       1.2       rin 	default:
   2372       1.2       rin 		break;
   2373       1.2       rin 	}
   2374       1.1       rin }
   2375       1.1       rin 
   2376       1.1       rin /*********************************************************************
   2377       1.1       rin  *
   2378       1.1       rin  *  Media Ioctl callback
   2379       1.1       rin  *
   2380       1.1       rin  *  This routine is called when the user changes speed/duplex using
   2381       1.1       rin  *  media/mediopt option with ifconfig.
   2382       1.1       rin  *
   2383       1.1       rin  **********************************************************************/
   2384       1.2       rin static int
   2385       1.1       rin igc_media_change(struct ifnet *ifp)
   2386       1.1       rin {
   2387       1.1       rin 	struct igc_softc *sc = ifp->if_softc;
   2388       1.1       rin 	struct ifmedia *ifm = &sc->media;
   2389       1.1       rin 
   2390       1.1       rin 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   2391       1.2       rin 		return EINVAL;
   2392       1.1       rin 
   2393       1.1       rin 	sc->hw.mac.autoneg = DO_AUTO_NEG;
   2394       1.1       rin 
   2395       1.1       rin 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   2396       1.1       rin 	case IFM_AUTO:
   2397       1.1       rin 		sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
   2398       1.1       rin 		break;
   2399       1.2       rin 	case IFM_2500_T:
   2400       1.2       rin 		sc->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
   2401       1.2       rin 		break;
   2402       1.1       rin 	case IFM_1000_T:
   2403       1.1       rin 		sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
   2404       1.1       rin 		break;
   2405       1.1       rin 	case IFM_100_TX:
   2406       1.4   msaitoh 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
   2407       1.4   msaitoh 			sc->hw.phy.autoneg_advertised = ADVERTISE_100_FULL;
   2408       1.4   msaitoh 		else
   2409       1.1       rin 			sc->hw.phy.autoneg_advertised = ADVERTISE_100_HALF;
   2410       1.1       rin 		break;
   2411       1.1       rin 	case IFM_10_T:
   2412       1.4   msaitoh 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
   2413       1.4   msaitoh 			sc->hw.phy.autoneg_advertised = ADVERTISE_10_FULL;
   2414       1.4   msaitoh 		else
   2415       1.1       rin 			sc->hw.phy.autoneg_advertised = ADVERTISE_10_HALF;
   2416       1.1       rin 		break;
   2417       1.1       rin 	default:
   2418       1.1       rin 		return EINVAL;
   2419       1.1       rin 	}
   2420       1.1       rin 
   2421       1.2       rin 	igc_init_locked(sc);
   2422       1.1       rin 
   2423       1.1       rin 	return 0;
   2424       1.1       rin }
   2425       1.1       rin 
   2426       1.2       rin static int
   2427       1.2       rin igc_ifflags_cb(struct ethercom *ec)
   2428       1.2       rin {
   2429       1.2       rin 	struct ifnet *ifp = &ec->ec_if;
   2430       1.2       rin 	struct igc_softc *sc = ifp->if_softc;
   2431       1.2       rin 	int rc = 0;
   2432       1.2       rin 	u_short iffchange;
   2433       1.2       rin 	bool needreset = false;
   2434       1.2       rin 
   2435       1.2       rin 	DPRINTF(CFG, "called\n");
   2436       1.2       rin 
   2437       1.2       rin 	KASSERT(IFNET_LOCKED(ifp));
   2438       1.2       rin 
   2439       1.2       rin 	mutex_enter(&sc->sc_core_lock);
   2440       1.2       rin 
   2441       1.2       rin 	/*
   2442       1.2       rin 	 * Check for if_flags.
   2443       1.2       rin 	 * Main usage is to prevent linkdown when opening bpf.
   2444       1.2       rin 	 */
   2445       1.2       rin 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   2446       1.2       rin 	sc->sc_if_flags = ifp->if_flags;
   2447       1.2       rin 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2448       1.2       rin 		needreset = true;
   2449       1.2       rin 		goto ec;
   2450       1.2       rin 	}
   2451       1.2       rin 
   2452       1.2       rin 	/* iff related updates */
   2453       1.2       rin 	if ((iffchange & IFF_PROMISC) != 0)
   2454       1.2       rin 		igc_set_filter(sc);
   2455       1.2       rin 
   2456       1.2       rin #ifdef notyet
   2457       1.2       rin 	igc_set_vlan(sc);
   2458       1.2       rin #endif
   2459       1.2       rin 
   2460       1.2       rin ec:
   2461       1.2       rin #ifdef notyet
   2462       1.2       rin 	/* Check for ec_capenable. */
   2463       1.2       rin 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   2464       1.2       rin 	sc->sc_ec_capenable = ec->ec_capenable;
   2465       1.2       rin 	if ((ecchange & ~ETHERCAP_SOMETHING) != 0) {
   2466       1.2       rin 		needreset = true;
   2467       1.2       rin 		goto out;
   2468       1.2       rin 	}
   2469       1.2       rin #endif
   2470       1.2       rin 	if (needreset)
   2471       1.2       rin 		rc = ENETRESET;
   2472       1.2       rin 
   2473       1.2       rin 	mutex_exit(&sc->sc_core_lock);
   2474       1.2       rin 
   2475       1.2       rin 	return rc;
   2476       1.2       rin }
   2477       1.2       rin 
   2478       1.2       rin static void
   2479       1.2       rin igc_set_filter(struct igc_softc *sc)
   2480       1.1       rin {
   2481       1.2       rin 	struct ethercom *ec = &sc->sc_ec;
   2482       1.2       rin 	uint32_t rctl;
   2483       1.2       rin 
   2484       1.2       rin 	rctl = IGC_READ_REG(&sc->hw, IGC_RCTL);
   2485       1.2       rin 	rctl &= ~(IGC_RCTL_BAM |IGC_RCTL_UPE | IGC_RCTL_MPE);
   2486       1.2       rin 
   2487       1.2       rin 	if ((sc->sc_if_flags & IFF_BROADCAST) != 0)
   2488       1.2       rin 		rctl |= IGC_RCTL_BAM;
   2489       1.2       rin 	if ((sc->sc_if_flags & IFF_PROMISC) != 0) {
   2490       1.2       rin 		DPRINTF(CFG, "promisc\n");
   2491       1.2       rin 		rctl |= IGC_RCTL_UPE;
   2492       1.2       rin 		ETHER_LOCK(ec);
   2493       1.2       rin  allmulti:
   2494       1.2       rin 		ec->ec_flags |= ETHER_F_ALLMULTI;
   2495       1.2       rin 		ETHER_UNLOCK(ec);
   2496       1.2       rin 		rctl |= IGC_RCTL_MPE;
   2497       1.1       rin 	} else {
   2498       1.2       rin 		struct ether_multistep step;
   2499       1.2       rin 		struct ether_multi *enm;
   2500       1.2       rin 		int mcnt = 0;
   2501       1.2       rin 
   2502       1.2       rin 		memset(sc->mta, 0, IGC_MTA_LEN);
   2503       1.2       rin 
   2504       1.2       rin 		ETHER_LOCK(ec);
   2505       1.2       rin 		ETHER_FIRST_MULTI(step, ec, enm);
   2506       1.1       rin 		while (enm != NULL) {
   2507       1.2       rin 			if (((memcmp(enm->enm_addrlo, enm->enm_addrhi,
   2508       1.2       rin 					ETHER_ADDR_LEN)) != 0) ||
   2509       1.2       rin 			    (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)) {
   2510       1.2       rin 				/*
   2511       1.2       rin 				 * We must listen to a range of multicast
   2512       1.2       rin 				 * addresses. For now, just accept all
   2513       1.2       rin 				 * multicasts, rather than trying to set only
   2514       1.2       rin 				 * those filter bits needed to match the range.
   2515       1.2       rin 				 * (At this time, the only use of address
   2516       1.2       rin 				 * ranges is for IP multicast routing, for
   2517       1.2       rin 				 * which the range is big enough to require all
   2518       1.2       rin 				 * bits set.)
   2519       1.2       rin 				 */
   2520       1.2       rin 				goto allmulti;
   2521       1.2       rin 			}
   2522       1.2       rin 			DPRINTF(CFG, "%d: %s\n", mcnt,
   2523       1.2       rin 			    ether_sprintf(enm->enm_addrlo));
   2524       1.2       rin 			memcpy(&sc->mta[mcnt * ETHER_ADDR_LEN],
   2525       1.2       rin 			    enm->enm_addrlo, ETHER_ADDR_LEN);
   2526       1.2       rin 
   2527       1.1       rin 			mcnt++;
   2528       1.1       rin 			ETHER_NEXT_MULTI(step, enm);
   2529       1.1       rin 		}
   2530       1.2       rin 		ec->ec_flags &= ~ETHER_F_ALLMULTI;
   2531       1.2       rin 		ETHER_UNLOCK(ec);
   2532       1.1       rin 
   2533       1.2       rin 		DPRINTF(CFG, "hw filter\n");
   2534       1.2       rin 		igc_update_mc_addr_list(&sc->hw, sc->mta, mcnt);
   2535       1.1       rin 	}
   2536       1.1       rin 
   2537       1.2       rin 	IGC_WRITE_REG(&sc->hw, IGC_RCTL, rctl);
   2538       1.1       rin }
   2539       1.1       rin 
   2540       1.2       rin static void
   2541       1.1       rin igc_update_link_status(struct igc_softc *sc)
   2542       1.1       rin {
   2543       1.2       rin 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2544       1.1       rin 	struct igc_hw *hw = &sc->hw;
   2545       1.1       rin 
   2546      1.11   msaitoh 	if (hw->mac.get_link_status == true)
   2547      1.11   msaitoh 		igc_check_for_link(hw);
   2548      1.11   msaitoh 
   2549       1.1       rin 	if (IGC_READ_REG(&sc->hw, IGC_STATUS) & IGC_STATUS_LU) {
   2550       1.1       rin 		if (sc->link_active == 0) {
   2551       1.1       rin 			igc_get_speed_and_duplex(hw, &sc->link_speed,
   2552       1.1       rin 			    &sc->link_duplex);
   2553       1.1       rin 			sc->link_active = 1;
   2554       1.1       rin 			ifp->if_baudrate = IF_Mbps(sc->link_speed);
   2555       1.2       rin 			if_link_state_change(ifp, LINK_STATE_UP);
   2556       1.1       rin 		}
   2557       1.1       rin 	} else {
   2558       1.1       rin 		if (sc->link_active == 1) {
   2559       1.1       rin 			ifp->if_baudrate = sc->link_speed = 0;
   2560       1.1       rin 			sc->link_duplex = 0;
   2561       1.1       rin 			sc->link_active = 0;
   2562       1.2       rin 			if_link_state_change(ifp, LINK_STATE_DOWN);
   2563       1.1       rin 		}
   2564       1.1       rin 	}
   2565       1.1       rin }
   2566       1.1       rin 
   2567       1.1       rin /*********************************************************************
   2568       1.1       rin  *
   2569       1.1       rin  *  Get a buffer from system mbuf buffer pool.
   2570       1.1       rin  *
   2571       1.1       rin  **********************************************************************/
   2572       1.2       rin static int
   2573       1.2       rin igc_get_buf(struct rx_ring *rxr, int id, bool strict)
   2574       1.1       rin {
   2575       1.1       rin 	struct igc_softc *sc = rxr->sc;
   2576       1.2       rin 	struct igc_queue *q = rxr->rxr_igcq;
   2577       1.2       rin 	struct igc_rx_buf *rxbuf = &rxr->rx_buffers[id];
   2578       1.2       rin 	bus_dmamap_t map = rxbuf->map;
   2579       1.2       rin 	struct mbuf *m;
   2580       1.2       rin 	int error;
   2581       1.2       rin 
   2582       1.2       rin 	if (__predict_false(rxbuf->buf)) {
   2583       1.2       rin 		if (strict) {
   2584       1.2       rin 			DPRINTF(RX, "slot %d already has an mbuf\n", id);
   2585       1.2       rin 			return EINVAL;
   2586       1.2       rin 		}
   2587       1.2       rin 		return 0;
   2588       1.2       rin 	}
   2589       1.1       rin 
   2590       1.2       rin 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   2591       1.2       rin 	if (__predict_false(m == NULL)) {
   2592       1.2       rin  enobuf:
   2593       1.2       rin 		IGC_QUEUE_EVENT(q, rx_no_mbuf, 1);
   2594       1.1       rin 		return ENOBUFS;
   2595       1.1       rin 	}
   2596  1.15.2.1  perseant 	MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
   2597       1.1       rin 
   2598       1.2       rin 	MCLGET(m, M_DONTWAIT);
   2599       1.2       rin 	if (__predict_false(!(m->m_flags & M_EXT))) {
   2600       1.2       rin 		m_freem(m);
   2601       1.2       rin 		goto enobuf;
   2602       1.2       rin 	}
   2603       1.1       rin 
   2604       1.1       rin 	m->m_len = m->m_pkthdr.len = sc->rx_mbuf_sz;
   2605       1.1       rin 
   2606       1.2       rin 	error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, map, m,
   2607       1.2       rin 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   2608       1.1       rin 	if (error) {
   2609       1.1       rin 		m_freem(m);
   2610       1.1       rin 		return error;
   2611       1.1       rin 	}
   2612       1.1       rin 
   2613       1.2       rin 	bus_dmamap_sync(rxr->rxdma.dma_tag, map, 0,
   2614       1.2       rin 	    map->dm_mapsize, BUS_DMASYNC_PREREAD);
   2615       1.1       rin 	rxbuf->buf = m;
   2616       1.1       rin 
   2617       1.2       rin 	union igc_adv_rx_desc *rxdesc = &rxr->rx_base[id];
   2618       1.2       rin 	igc_rxdesc_sync(rxr, id, BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
   2619       1.2       rin 	rxdesc->read.pkt_addr = htole64(map->dm_segs[0].ds_addr);
   2620       1.2       rin 	igc_rxdesc_sync(rxr, id, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
   2621       1.1       rin 
   2622       1.1       rin 	return 0;
   2623       1.1       rin }
   2624       1.1       rin 
   2625       1.2       rin static void
   2626       1.1       rin igc_configure_queues(struct igc_softc *sc)
   2627       1.1       rin {
   2628       1.1       rin 	struct igc_hw *hw = &sc->hw;
   2629       1.2       rin 	uint32_t ivar;
   2630       1.1       rin 
   2631       1.1       rin 	/* First turn on RSS capability */
   2632       1.1       rin 	IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE | IGC_GPIE_EIAME |
   2633       1.1       rin 	    IGC_GPIE_PBA | IGC_GPIE_NSICR);
   2634       1.1       rin 
   2635       1.1       rin 	/* Set the starting interrupt rate */
   2636       1.2       rin 	uint32_t newitr = (4000000 / MAX_INTS_PER_SEC) & 0x7FFC;
   2637       1.1       rin 	newitr |= IGC_EITR_CNT_IGNR;
   2638       1.1       rin 
   2639       1.1       rin 	/* Turn on MSI-X */
   2640       1.2       rin 	uint32_t newmask = 0;
   2641       1.2       rin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
   2642       1.2       rin 		struct igc_queue *q = &sc->queues[iq];
   2643       1.2       rin 
   2644       1.1       rin 		/* RX entries */
   2645       1.2       rin 		igc_set_queues(sc, iq, q->msix, 0);
   2646       1.1       rin 		/* TX entries */
   2647       1.2       rin 		igc_set_queues(sc, iq, q->msix, 1);
   2648       1.2       rin 		newmask |= q->eims;
   2649       1.2       rin 		IGC_WRITE_REG(hw, IGC_EITR(q->msix), newitr);
   2650       1.2       rin 	}
   2651       1.2       rin 	sc->msix_queuesmask = newmask;
   2652       1.2       rin 
   2653       1.2       rin #if 1
   2654       1.2       rin 	ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, 0);
   2655       1.2       rin 	DPRINTF(CFG, "ivar(0)=0x%x\n", ivar);
   2656       1.2       rin 	ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, 1);
   2657       1.2       rin 	DPRINTF(CFG, "ivar(1)=0x%x\n", ivar);
   2658       1.2       rin #endif
   2659       1.1       rin 
   2660       1.1       rin 	/* And for the link interrupt */
   2661       1.1       rin 	ivar = (sc->linkvec | IGC_IVAR_VALID) << 8;
   2662       1.1       rin 	sc->msix_linkmask = 1 << sc->linkvec;
   2663       1.1       rin 	IGC_WRITE_REG(hw, IGC_IVAR_MISC, ivar);
   2664       1.1       rin }
   2665       1.1       rin 
   2666       1.2       rin static void
   2667       1.1       rin igc_set_queues(struct igc_softc *sc, uint32_t entry, uint32_t vector, int type)
   2668       1.1       rin {
   2669       1.1       rin 	struct igc_hw *hw = &sc->hw;
   2670       1.2       rin 	const uint32_t index = entry >> 1;
   2671       1.2       rin 	uint32_t ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index);
   2672       1.1       rin 
   2673       1.1       rin 	if (type) {
   2674       1.1       rin 		if (entry & 1) {
   2675       1.1       rin 			ivar &= 0x00FFFFFF;
   2676       1.1       rin 			ivar |= (vector | IGC_IVAR_VALID) << 24;
   2677       1.1       rin 		} else {
   2678       1.1       rin 			ivar &= 0xFFFF00FF;
   2679       1.1       rin 			ivar |= (vector | IGC_IVAR_VALID) << 8;
   2680       1.1       rin 		}
   2681       1.1       rin 	} else {
   2682       1.1       rin 		if (entry & 1) {
   2683       1.1       rin 			ivar &= 0xFF00FFFF;
   2684       1.1       rin 			ivar |= (vector | IGC_IVAR_VALID) << 16;
   2685       1.1       rin 		} else {
   2686       1.1       rin 			ivar &= 0xFFFFFF00;
   2687       1.1       rin 			ivar |= vector | IGC_IVAR_VALID;
   2688       1.1       rin 		}
   2689       1.1       rin 	}
   2690       1.1       rin 	IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar);
   2691       1.1       rin }
   2692       1.1       rin 
   2693       1.2       rin static void
   2694       1.1       rin igc_enable_queue(struct igc_softc *sc, uint32_t eims)
   2695       1.1       rin {
   2696       1.1       rin 	IGC_WRITE_REG(&sc->hw, IGC_EIMS, eims);
   2697       1.1       rin }
   2698       1.1       rin 
   2699       1.2       rin static void
   2700       1.1       rin igc_enable_intr(struct igc_softc *sc)
   2701       1.1       rin {
   2702       1.1       rin 	struct igc_hw *hw = &sc->hw;
   2703       1.1       rin 
   2704       1.2       rin 	if (sc->sc_intr_type == PCI_INTR_TYPE_MSIX) {
   2705       1.2       rin 		const uint32_t mask = sc->msix_queuesmask | sc->msix_linkmask;
   2706       1.2       rin 
   2707       1.2       rin 		IGC_WRITE_REG(hw, IGC_EIAC, mask);
   2708       1.2       rin 		IGC_WRITE_REG(hw, IGC_EIAM, mask);
   2709       1.2       rin 		IGC_WRITE_REG(hw, IGC_EIMS, mask);
   2710       1.2       rin 		IGC_WRITE_REG(hw, IGC_IMS, IGC_IMS_LSC);
   2711       1.2       rin 	} else {
   2712       1.2       rin 		IGC_WRITE_REG(hw, IGC_IMS, IMS_ENABLE_MASK);
   2713       1.2       rin 	}
   2714       1.1       rin 	IGC_WRITE_FLUSH(hw);
   2715       1.1       rin }
   2716       1.1       rin 
   2717       1.2       rin static void
   2718       1.1       rin igc_disable_intr(struct igc_softc *sc)
   2719       1.1       rin {
   2720       1.1       rin 	struct igc_hw *hw = &sc->hw;
   2721       1.1       rin 
   2722       1.2       rin 	if (sc->sc_intr_type == PCI_INTR_TYPE_MSIX) {
   2723       1.2       rin 		IGC_WRITE_REG(hw, IGC_EIMC, 0xffffffff);
   2724       1.2       rin 		IGC_WRITE_REG(hw, IGC_EIAC, 0);
   2725       1.2       rin 	}
   2726       1.1       rin 	IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff);
   2727       1.1       rin 	IGC_WRITE_FLUSH(hw);
   2728       1.1       rin }
   2729       1.1       rin 
   2730       1.2       rin static int
   2731       1.1       rin igc_intr_link(void *arg)
   2732       1.1       rin {
   2733       1.1       rin 	struct igc_softc *sc = (struct igc_softc *)arg;
   2734       1.2       rin 	const uint32_t reg_icr = IGC_READ_REG(&sc->hw, IGC_ICR);
   2735       1.2       rin 
   2736       1.2       rin 	IGC_GLOBAL_EVENT(sc, link, 1);
   2737       1.1       rin 
   2738       1.1       rin 	if (reg_icr & IGC_ICR_LSC) {
   2739       1.2       rin 		mutex_enter(&sc->sc_core_lock);
   2740       1.1       rin 		sc->hw.mac.get_link_status = true;
   2741       1.1       rin 		igc_update_link_status(sc);
   2742       1.2       rin 		mutex_exit(&sc->sc_core_lock);
   2743       1.1       rin 	}
   2744       1.1       rin 
   2745       1.1       rin 	IGC_WRITE_REG(&sc->hw, IGC_IMS, IGC_IMS_LSC);
   2746       1.1       rin 	IGC_WRITE_REG(&sc->hw, IGC_EIMS, sc->msix_linkmask);
   2747       1.1       rin 
   2748       1.1       rin 	return 1;
   2749       1.1       rin }
   2750       1.1       rin 
   2751       1.2       rin static int
   2752       1.1       rin igc_intr_queue(void *arg)
   2753       1.1       rin {
   2754       1.1       rin 	struct igc_queue *iq = arg;
   2755       1.1       rin 	struct igc_softc *sc = iq->sc;
   2756       1.2       rin 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2757       1.2       rin 	struct rx_ring *rxr = iq->rxr;
   2758       1.2       rin 	struct tx_ring *txr = iq->txr;
   2759       1.2       rin 	const u_int txlimit = sc->sc_tx_intr_process_limit,
   2760       1.2       rin 		    rxlimit = sc->sc_rx_intr_process_limit;
   2761       1.2       rin 	bool txmore, rxmore;
   2762       1.2       rin 
   2763       1.2       rin 	IGC_QUEUE_EVENT(iq, irqs, 1);
   2764       1.2       rin 
   2765       1.2       rin 	if (__predict_false(!ISSET(ifp->if_flags, IFF_RUNNING)))
   2766       1.2       rin 		return 0;
   2767       1.2       rin 
   2768       1.2       rin 	mutex_enter(&txr->txr_lock);
   2769       1.2       rin 	txmore = igc_txeof(txr, txlimit);
   2770       1.2       rin 	mutex_exit(&txr->txr_lock);
   2771       1.2       rin 	mutex_enter(&rxr->rxr_lock);
   2772       1.2       rin 	rxmore = igc_rxeof(rxr, rxlimit);
   2773       1.2       rin 	mutex_exit(&rxr->rxr_lock);
   2774       1.2       rin 
   2775       1.2       rin 	if (txmore || rxmore) {
   2776       1.2       rin 		IGC_QUEUE_EVENT(iq, req, 1);
   2777       1.2       rin 		igc_sched_handle_queue(sc, iq);
   2778       1.2       rin 	} else {
   2779       1.2       rin 		igc_enable_queue(sc, iq->eims);
   2780       1.2       rin 	}
   2781       1.2       rin 
   2782       1.2       rin 	return 1;
   2783       1.2       rin }
   2784       1.2       rin 
   2785       1.2       rin static int
   2786       1.2       rin igc_intr(void *arg)
   2787       1.2       rin {
   2788       1.2       rin 	struct igc_softc *sc = arg;
   2789       1.2       rin 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2790       1.2       rin 	struct igc_queue *iq = &sc->queues[0];
   2791       1.1       rin 	struct rx_ring *rxr = iq->rxr;
   2792       1.1       rin 	struct tx_ring *txr = iq->txr;
   2793       1.2       rin 	const u_int txlimit = sc->sc_tx_intr_process_limit,
   2794       1.2       rin 		    rxlimit = sc->sc_rx_intr_process_limit;
   2795       1.2       rin 	bool txmore, rxmore;
   2796       1.2       rin 
   2797       1.2       rin 	if (__predict_false(!ISSET(ifp->if_flags, IFF_RUNNING)))
   2798       1.2       rin 		return 0;
   2799       1.2       rin 
   2800       1.2       rin 	const uint32_t reg_icr = IGC_READ_REG(&sc->hw, IGC_ICR);
   2801       1.2       rin 	DPRINTF(MISC, "reg_icr=0x%x\n", reg_icr);
   2802       1.2       rin 
   2803       1.2       rin 	/* Definitely not our interrupt. */
   2804       1.2       rin 	if (reg_icr == 0x0) {
   2805      1.14       rin 		DPRINTF(MISC, "not for me\n");
   2806       1.2       rin 		return 0;
   2807       1.2       rin 	}
   2808       1.2       rin 
   2809       1.2       rin 	IGC_QUEUE_EVENT(iq, irqs, 1);
   2810       1.2       rin 
   2811       1.2       rin 	/* Hot eject? */
   2812       1.2       rin 	if (__predict_false(reg_icr == 0xffffffff)) {
   2813       1.2       rin 		DPRINTF(MISC, "hot eject\n");
   2814       1.2       rin 		return 0;
   2815       1.2       rin 	}
   2816       1.2       rin 
   2817       1.2       rin 	if (__predict_false(!(reg_icr & IGC_ICR_INT_ASSERTED))) {
   2818       1.2       rin 		DPRINTF(MISC, "not set IGC_ICR_INT_ASSERTED");
   2819       1.2       rin 		return 0;
   2820       1.2       rin 	}
   2821       1.2       rin 
   2822       1.2       rin 	/*
   2823       1.2       rin 	 * Only MSI-X interrupts have one-shot behavior by taking advantage
   2824       1.2       rin 	 * of the EIAC register.  Thus, explicitly disable interrupts.  This
   2825       1.2       rin 	 * also works around the MSI message reordering errata on certain
   2826       1.2       rin 	 * systems.
   2827       1.2       rin 	 */
   2828       1.2       rin 	igc_disable_intr(sc);
   2829       1.1       rin 
   2830       1.2       rin 	mutex_enter(&txr->txr_lock);
   2831       1.2       rin 	txmore = igc_txeof(txr, txlimit);
   2832       1.2       rin 	mutex_exit(&txr->txr_lock);
   2833       1.2       rin 	mutex_enter(&rxr->rxr_lock);
   2834       1.2       rin 	rxmore = igc_rxeof(rxr, rxlimit);
   2835       1.2       rin 	mutex_exit(&rxr->rxr_lock);
   2836       1.2       rin 
   2837       1.2       rin 	/* Link status change */
   2838       1.2       rin 	// XXXX FreeBSD checks IGC_ICR_RXSEQ
   2839       1.2       rin 	if (__predict_false(reg_icr & IGC_ICR_LSC)) {
   2840       1.2       rin 		IGC_GLOBAL_EVENT(sc, link, 1);
   2841       1.2       rin 		mutex_enter(&sc->sc_core_lock);
   2842       1.2       rin 		sc->hw.mac.get_link_status = true;
   2843       1.2       rin 		igc_update_link_status(sc);
   2844       1.2       rin 		mutex_exit(&sc->sc_core_lock);
   2845       1.1       rin 	}
   2846       1.1       rin 
   2847       1.2       rin 	if (txmore || rxmore) {
   2848       1.2       rin 		IGC_QUEUE_EVENT(iq, req, 1);
   2849       1.2       rin 		igc_sched_handle_queue(sc, iq);
   2850       1.2       rin 	} else {
   2851       1.2       rin 		igc_enable_intr(sc);
   2852       1.2       rin 	}
   2853       1.1       rin 
   2854       1.1       rin 	return 1;
   2855       1.1       rin }
   2856       1.1       rin 
   2857       1.2       rin static void
   2858       1.2       rin igc_handle_queue(void *arg)
   2859       1.2       rin {
   2860       1.2       rin 	struct igc_queue *iq = arg;
   2861       1.2       rin 	struct igc_softc *sc = iq->sc;
   2862       1.2       rin 	struct tx_ring *txr = iq->txr;
   2863       1.2       rin 	struct rx_ring *rxr = iq->rxr;
   2864       1.2       rin 	const u_int txlimit = sc->sc_tx_process_limit,
   2865       1.2       rin 		    rxlimit = sc->sc_rx_process_limit;
   2866       1.2       rin 	bool txmore, rxmore;
   2867       1.2       rin 
   2868       1.2       rin 	IGC_QUEUE_EVENT(iq, handleq, 1);
   2869       1.2       rin 
   2870       1.2       rin 	mutex_enter(&txr->txr_lock);
   2871       1.2       rin 	txmore = igc_txeof(txr, txlimit);
   2872       1.2       rin 	/* for ALTQ, dequeue from if_snd */
   2873       1.2       rin 	if (txr->me == 0) {
   2874       1.2       rin 		struct ifnet *ifp = &sc->sc_ec.ec_if;
   2875       1.2       rin 
   2876       1.2       rin 		igc_tx_common_locked(ifp, txr, IGC_TX_START);
   2877       1.2       rin 	}
   2878       1.2       rin 	mutex_exit(&txr->txr_lock);
   2879       1.2       rin 
   2880       1.2       rin 	mutex_enter(&rxr->rxr_lock);
   2881       1.2       rin 	rxmore = igc_rxeof(rxr, rxlimit);
   2882       1.2       rin 	mutex_exit(&rxr->rxr_lock);
   2883       1.2       rin 
   2884       1.2       rin 	if (txmore || rxmore) {
   2885       1.2       rin 		igc_sched_handle_queue(sc, iq);
   2886       1.2       rin 	} else {
   2887       1.2       rin 		if (sc->sc_intr_type == PCI_INTR_TYPE_MSIX)
   2888       1.2       rin 			igc_enable_queue(sc, iq->eims);
   2889       1.2       rin 		else
   2890       1.2       rin 			igc_enable_intr(sc);
   2891       1.2       rin 	}
   2892       1.2       rin }
   2893       1.2       rin 
   2894       1.2       rin static void
   2895       1.2       rin igc_handle_queue_work(struct work *wk, void *context)
   2896       1.2       rin {
   2897       1.2       rin 	struct igc_queue *iq =
   2898       1.2       rin 	    container_of(wk, struct igc_queue, igcq_wq_cookie);
   2899       1.2       rin 
   2900       1.2       rin 	igc_handle_queue(iq);
   2901       1.2       rin }
   2902       1.2       rin 
   2903       1.2       rin static void
   2904       1.2       rin igc_sched_handle_queue(struct igc_softc *sc, struct igc_queue *iq)
   2905       1.2       rin {
   2906       1.2       rin 
   2907       1.2       rin 	if (iq->igcq_workqueue) {
   2908       1.2       rin 		/* XXXRO notyet */
   2909       1.2       rin 		workqueue_enqueue(sc->sc_queue_wq, &iq->igcq_wq_cookie,
   2910       1.2       rin 		    curcpu());
   2911       1.2       rin 	} else {
   2912       1.2       rin 		softint_schedule(iq->igcq_si);
   2913       1.2       rin 	}
   2914       1.2       rin }
   2915       1.2       rin 
   2916       1.2       rin static void
   2917       1.2       rin igc_barrier_handle_queue(struct igc_softc *sc)
   2918       1.2       rin {
   2919       1.2       rin 
   2920       1.2       rin 	if (sc->sc_txrx_workqueue) {
   2921       1.2       rin 		for (int iq = 0; iq < sc->sc_nqueues; iq++) {
   2922       1.2       rin 			struct igc_queue *q = &sc->queues[iq];
   2923       1.2       rin 
   2924       1.2       rin 			workqueue_wait(sc->sc_queue_wq, &q->igcq_wq_cookie);
   2925       1.2       rin 		}
   2926       1.2       rin 	} else {
   2927       1.2       rin 		xc_barrier(0);
   2928       1.2       rin 	}
   2929       1.2       rin }
   2930       1.2       rin 
   2931       1.1       rin /*********************************************************************
   2932       1.1       rin  *
   2933       1.1       rin  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
   2934       1.1       rin  *  the information needed to transmit a packet on the wire.
   2935       1.1       rin  *
   2936       1.1       rin  **********************************************************************/
   2937       1.2       rin static int
   2938       1.1       rin igc_allocate_transmit_buffers(struct tx_ring *txr)
   2939       1.1       rin {
   2940       1.1       rin 	struct igc_softc *sc = txr->sc;
   2941       1.2       rin 	int error;
   2942       1.1       rin 
   2943       1.2       rin 	txr->tx_buffers =
   2944       1.2       rin 	    kmem_zalloc(sc->num_tx_desc * sizeof(struct igc_tx_buf), KM_SLEEP);
   2945       1.1       rin 	txr->txtag = txr->txdma.dma_tag;
   2946       1.1       rin 
   2947       1.1       rin 	/* Create the descriptor buffer dma maps. */
   2948       1.2       rin 	for (int id = 0; id < sc->num_tx_desc; id++) {
   2949       1.2       rin 		struct igc_tx_buf *txbuf = &txr->tx_buffers[id];
   2950       1.2       rin 
   2951       1.2       rin 		error = bus_dmamap_create(txr->txdma.dma_tag,
   2952       1.2       rin 		    round_page(IGC_TSO_SIZE + sizeof(struct ether_vlan_header)),
   2953       1.1       rin 		    IGC_MAX_SCATTER, PAGE_SIZE, 0, BUS_DMA_NOWAIT, &txbuf->map);
   2954       1.1       rin 		if (error != 0) {
   2955       1.2       rin 			aprint_error_dev(sc->sc_dev,
   2956       1.2       rin 			    "unable to create TX DMA map\n");
   2957       1.1       rin 			goto fail;
   2958       1.1       rin 		}
   2959       1.2       rin 
   2960       1.2       rin 		txbuf->eop_index = -1;
   2961       1.1       rin 	}
   2962       1.1       rin 
   2963       1.1       rin 	return 0;
   2964       1.2       rin  fail:
   2965       1.1       rin 	return error;
   2966       1.1       rin }
   2967       1.1       rin 
   2968       1.1       rin 
   2969       1.1       rin /*********************************************************************
   2970       1.1       rin  *
   2971       1.1       rin  *  Allocate and initialize transmit structures.
   2972       1.1       rin  *
   2973       1.1       rin  **********************************************************************/
   2974       1.2       rin static int
   2975       1.1       rin igc_setup_transmit_structures(struct igc_softc *sc)
   2976       1.1       rin {
   2977       1.1       rin 
   2978       1.2       rin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
   2979       1.2       rin 		struct tx_ring *txr = &sc->tx_rings[iq];
   2980       1.2       rin 
   2981       1.1       rin 		if (igc_setup_transmit_ring(txr))
   2982       1.1       rin 			goto fail;
   2983       1.1       rin 	}
   2984       1.1       rin 
   2985       1.1       rin 	return 0;
   2986       1.2       rin  fail:
   2987       1.1       rin 	igc_free_transmit_structures(sc);
   2988       1.1       rin 	return ENOBUFS;
   2989       1.1       rin }
   2990       1.1       rin 
   2991       1.1       rin /*********************************************************************
   2992       1.1       rin  *
   2993       1.1       rin  *  Initialize a transmit ring.
   2994       1.1       rin  *
   2995       1.1       rin  **********************************************************************/
   2996       1.2       rin static int
   2997       1.1       rin igc_setup_transmit_ring(struct tx_ring *txr)
   2998       1.1       rin {
   2999       1.1       rin 	struct igc_softc *sc = txr->sc;
   3000       1.1       rin 
   3001       1.1       rin 	/* Now allocate transmit buffers for the ring. */
   3002       1.1       rin 	if (igc_allocate_transmit_buffers(txr))
   3003       1.1       rin 		return ENOMEM;
   3004       1.1       rin 
   3005       1.1       rin 	/* Clear the old ring contents */
   3006       1.2       rin 	memset(txr->tx_base, 0,
   3007       1.2       rin 	    sizeof(union igc_adv_tx_desc) * sc->num_tx_desc);
   3008       1.1       rin 
   3009       1.1       rin 	/* Reset indices. */
   3010       1.1       rin 	txr->next_avail_desc = 0;
   3011       1.1       rin 	txr->next_to_clean = 0;
   3012       1.1       rin 
   3013       1.1       rin 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
   3014       1.1       rin 	    txr->txdma.dma_map->dm_mapsize,
   3015       1.1       rin 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3016       1.1       rin 
   3017       1.2       rin 	txr->txr_interq = pcq_create(sc->num_tx_desc, KM_SLEEP);
   3018       1.2       rin 
   3019       1.2       rin 	mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
   3020       1.2       rin 
   3021       1.1       rin 	return 0;
   3022       1.1       rin }
   3023       1.1       rin 
   3024       1.1       rin /*********************************************************************
   3025       1.1       rin  *
   3026       1.1       rin  *  Enable transmit unit.
   3027       1.1       rin  *
   3028       1.1       rin  **********************************************************************/
   3029       1.2       rin static void
   3030       1.1       rin igc_initialize_transmit_unit(struct igc_softc *sc)
   3031       1.1       rin {
   3032       1.2       rin 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   3033       1.1       rin 	struct igc_hw *hw = &sc->hw;
   3034       1.1       rin 
   3035       1.1       rin 	/* Setup the Base and Length of the TX descriptor ring. */
   3036       1.2       rin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
   3037       1.2       rin 		struct tx_ring *txr = &sc->tx_rings[iq];
   3038       1.2       rin 		const uint64_t bus_addr =
   3039       1.2       rin 		    txr->txdma.dma_map->dm_segs[0].ds_addr;
   3040       1.1       rin 
   3041       1.1       rin 		/* Base and len of TX ring */
   3042       1.2       rin 		IGC_WRITE_REG(hw, IGC_TDLEN(iq),
   3043       1.1       rin 		    sc->num_tx_desc * sizeof(union igc_adv_tx_desc));
   3044       1.2       rin 		IGC_WRITE_REG(hw, IGC_TDBAH(iq), (uint32_t)(bus_addr >> 32));
   3045       1.2       rin 		IGC_WRITE_REG(hw, IGC_TDBAL(iq), (uint32_t)bus_addr);
   3046       1.1       rin 
   3047       1.1       rin 		/* Init the HEAD/TAIL indices */
   3048       1.2       rin 		IGC_WRITE_REG(hw, IGC_TDT(iq), 0 /* XXX txr->next_avail_desc */);
   3049       1.2       rin 		IGC_WRITE_REG(hw, IGC_TDH(iq), 0);
   3050       1.1       rin 
   3051       1.1       rin 		txr->watchdog_timer = 0;
   3052       1.1       rin 
   3053       1.2       rin 		uint32_t txdctl = 0;	/* Clear txdctl */
   3054       1.1       rin 		txdctl |= 0x1f;		/* PTHRESH */
   3055       1.1       rin 		txdctl |= 1 << 8;	/* HTHRESH */
   3056       1.1       rin 		txdctl |= 1 << 16;	/* WTHRESH */
   3057       1.1       rin 		txdctl |= 1 << 22;	/* Reserved bit 22 must always be 1 */
   3058       1.1       rin 		txdctl |= IGC_TXDCTL_GRAN;
   3059       1.1       rin 		txdctl |= 1 << 25;	/* LWTHRESH */
   3060       1.1       rin 
   3061       1.2       rin 		IGC_WRITE_REG(hw, IGC_TXDCTL(iq), txdctl);
   3062       1.1       rin 	}
   3063       1.1       rin 	ifp->if_timer = 0;
   3064       1.1       rin 
   3065       1.1       rin 	/* Program the Transmit Control Register */
   3066       1.2       rin 	uint32_t tctl = IGC_READ_REG(&sc->hw, IGC_TCTL);
   3067       1.1       rin 	tctl &= ~IGC_TCTL_CT;
   3068       1.1       rin 	tctl |= (IGC_TCTL_PSP | IGC_TCTL_RTLC | IGC_TCTL_EN |
   3069       1.1       rin 	    (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT));
   3070       1.1       rin 
   3071       1.1       rin 	/* This write will effectively turn on the transmit unit. */
   3072       1.1       rin 	IGC_WRITE_REG(&sc->hw, IGC_TCTL, tctl);
   3073       1.1       rin }
   3074       1.1       rin 
   3075       1.1       rin /*********************************************************************
   3076       1.1       rin  *
   3077       1.1       rin  *  Free all transmit rings.
   3078       1.1       rin  *
   3079       1.1       rin  **********************************************************************/
   3080       1.2       rin static void
   3081       1.1       rin igc_free_transmit_structures(struct igc_softc *sc)
   3082       1.1       rin {
   3083       1.1       rin 
   3084       1.2       rin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
   3085       1.2       rin 		struct tx_ring *txr = &sc->tx_rings[iq];
   3086       1.2       rin 
   3087       1.1       rin 		igc_free_transmit_buffers(txr);
   3088       1.2       rin 	}
   3089       1.1       rin }
   3090       1.1       rin 
   3091       1.1       rin /*********************************************************************
   3092       1.1       rin  *
   3093       1.1       rin  *  Free transmit ring related data structures.
   3094       1.1       rin  *
   3095       1.1       rin  **********************************************************************/
   3096       1.2       rin static void
   3097       1.1       rin igc_free_transmit_buffers(struct tx_ring *txr)
   3098       1.1       rin {
   3099       1.1       rin 	struct igc_softc *sc = txr->sc;
   3100       1.1       rin 
   3101       1.1       rin 	if (txr->tx_buffers == NULL)
   3102       1.1       rin 		return;
   3103       1.1       rin 
   3104       1.2       rin 	igc_withdraw_transmit_packets(txr, true);
   3105       1.2       rin 
   3106       1.2       rin 	kmem_free(txr->tx_buffers,
   3107       1.2       rin 	    sc->num_tx_desc * sizeof(struct igc_tx_buf));
   3108       1.2       rin 	txr->tx_buffers = NULL;
   3109       1.2       rin 	txr->txtag = NULL;
   3110       1.2       rin 
   3111       1.2       rin 	pcq_destroy(txr->txr_interq);
   3112       1.2       rin 	mutex_destroy(&txr->txr_lock);
   3113       1.2       rin }
   3114       1.2       rin 
   3115       1.2       rin /*********************************************************************
   3116       1.2       rin  *
   3117       1.2       rin  *  Withdraw transmit packets.
   3118       1.2       rin  *
   3119       1.2       rin  **********************************************************************/
   3120       1.2       rin static void
   3121       1.2       rin igc_withdraw_transmit_packets(struct tx_ring *txr, bool destroy)
   3122       1.2       rin {
   3123       1.2       rin 	struct igc_softc *sc = txr->sc;
   3124       1.2       rin 	struct igc_queue *q = txr->txr_igcq;
   3125       1.2       rin 
   3126       1.2       rin 	mutex_enter(&txr->txr_lock);
   3127       1.2       rin 
   3128       1.2       rin 	for (int id = 0; id < sc->num_tx_desc; id++) {
   3129       1.2       rin 		union igc_adv_tx_desc *txdesc = &txr->tx_base[id];
   3130       1.2       rin 
   3131       1.2       rin 		igc_txdesc_sync(txr, id,
   3132       1.2       rin 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   3133       1.2       rin 		txdesc->read.buffer_addr = 0;
   3134       1.2       rin 		txdesc->read.cmd_type_len = 0;
   3135       1.2       rin 		txdesc->read.olinfo_status = 0;
   3136       1.2       rin 		igc_txdesc_sync(txr, id,
   3137       1.2       rin 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3138       1.2       rin 
   3139       1.2       rin 		struct igc_tx_buf *txbuf = &txr->tx_buffers[id];
   3140       1.2       rin 		bus_dmamap_t map = txbuf->map;
   3141       1.2       rin 
   3142       1.2       rin 		if (map != NULL && map->dm_nsegs > 0) {
   3143       1.2       rin 			bus_dmamap_sync(txr->txdma.dma_tag, map,
   3144       1.2       rin 			    0, map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   3145       1.2       rin 			bus_dmamap_unload(txr->txdma.dma_tag, map);
   3146       1.1       rin 		}
   3147  1.15.2.1  perseant 		m_freem(txbuf->m_head);
   3148  1.15.2.1  perseant 		txbuf->m_head = NULL;
   3149       1.2       rin 		if (map != NULL && destroy) {
   3150       1.2       rin 			bus_dmamap_destroy(txr->txdma.dma_tag, map);
   3151       1.1       rin 			txbuf->map = NULL;
   3152       1.1       rin 		}
   3153       1.2       rin 		txbuf->eop_index = -1;
   3154       1.2       rin 
   3155       1.2       rin 		txr->next_avail_desc = 0;
   3156       1.2       rin 		txr->next_to_clean = 0;
   3157       1.2       rin 	}
   3158       1.2       rin 
   3159       1.2       rin 	struct mbuf *m;
   3160       1.2       rin 	while ((m = pcq_get(txr->txr_interq)) != NULL) {
   3161       1.2       rin 		IGC_QUEUE_EVENT(q, tx_pcq_drop, 1);
   3162       1.2       rin 		m_freem(m);
   3163       1.1       rin 	}
   3164       1.1       rin 
   3165       1.2       rin 	mutex_exit(&txr->txr_lock);
   3166       1.1       rin }
   3167       1.1       rin 
   3168       1.1       rin 
   3169       1.1       rin /*********************************************************************
   3170       1.1       rin  *
   3171       1.1       rin  *  Advanced Context Descriptor setup for VLAN, CSUM or TSO
   3172       1.1       rin  *
   3173       1.1       rin  **********************************************************************/
   3174       1.1       rin 
   3175  1.15.2.1  perseant static bool
   3176       1.1       rin igc_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp, int prod,
   3177       1.2       rin     uint32_t *cmd_type_len, uint32_t *olinfo_status)
   3178       1.1       rin {
   3179       1.2       rin 	struct ether_vlan_header *evl;
   3180  1.15.2.1  perseant 	struct tcphdr *th = NULL /* XXXGCC */;
   3181       1.1       rin 	uint32_t vlan_macip_lens = 0;
   3182  1.15.2.1  perseant 	uint32_t type_tucmd_mlhl = 0;
   3183  1.15.2.1  perseant 	uint32_t mss_l4len_idx = 0;
   3184       1.2       rin 	uint32_t ehlen, iphlen;
   3185       1.2       rin 	uint16_t ehtype;
   3186       1.1       rin 
   3187       1.2       rin 	const int csum_flags = mp->m_pkthdr.csum_flags;
   3188       1.2       rin 	const bool v4 = (csum_flags &
   3189  1.15.2.1  perseant 	    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) != 0;
   3190  1.15.2.1  perseant 	const bool v6 = (csum_flags &
   3191  1.15.2.1  perseant 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) != 0;
   3192  1.15.2.1  perseant 	const bool tso = (csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   3193  1.15.2.1  perseant 	const bool tcp = tso ||
   3194  1.15.2.1  perseant 	    (csum_flags & (M_CSUM_TCPv4 | M_CSUM_TCPv6)) != 0;
   3195  1.15.2.1  perseant 	const bool udp = (csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) != 0;
   3196       1.2       rin 
   3197       1.2       rin 	/* Indicate the whole packet as payload when not doing TSO */
   3198  1.15.2.1  perseant 	if (!tso) {
   3199  1.15.2.1  perseant 		*olinfo_status |= mp->m_pkthdr.len << IGC_ADVTXD_PAYLEN_SHIFT;
   3200  1.15.2.1  perseant 	} else {
   3201  1.15.2.1  perseant 		/* Set L4 payload length later... */
   3202  1.15.2.1  perseant 	}
   3203       1.1       rin 
   3204  1.15.2.1  perseant #if NVLAN > 0
   3205       1.1       rin 	/*
   3206       1.1       rin 	 * In advanced descriptors the vlan tag must
   3207       1.1       rin 	 * be placed into the context descriptor. Hence
   3208       1.1       rin 	 * we need to make one even if not doing offloads.
   3209       1.1       rin 	 */
   3210       1.2       rin 	if (vlan_has_tag(mp)) {
   3211       1.2       rin 		vlan_macip_lens |= (uint32_t)vlan_get_tag(mp)
   3212       1.2       rin 		    << IGC_ADVTXD_VLAN_SHIFT;
   3213       1.2       rin 	} else
   3214       1.2       rin #endif
   3215       1.2       rin 	if (!v4 && !v6)
   3216  1.15.2.1  perseant 		return false;
   3217       1.2       rin 
   3218       1.2       rin 	KASSERT(mp->m_len >= sizeof(struct ether_header));
   3219       1.2       rin 	evl = mtod(mp, struct ether_vlan_header *);
   3220       1.2       rin 	if (evl->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
   3221       1.2       rin 		KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
   3222       1.2       rin 		ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   3223       1.2       rin 		ehtype = evl->evl_proto;
   3224       1.2       rin 	} else {
   3225       1.2       rin 		ehlen = ETHER_HDR_LEN;
   3226       1.2       rin 		ehtype = evl->evl_encap_proto;
   3227       1.1       rin 	}
   3228       1.1       rin 
   3229       1.2       rin 	switch (ntohs(ehtype)) {
   3230       1.2       rin 	case ETHERTYPE_IP:
   3231       1.2       rin 		iphlen = M_CSUM_DATA_IPv4_IPHL(mp->m_pkthdr.csum_data);
   3232       1.1       rin 		type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV4;
   3233       1.2       rin 
   3234  1.15.2.1  perseant 		if ((csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) != 0)
   3235       1.1       rin 			*olinfo_status |= IGC_TXD_POPTS_IXSM << 8;
   3236  1.15.2.1  perseant 
   3237  1.15.2.1  perseant 		if (!tso)
   3238  1.15.2.1  perseant 			break;
   3239  1.15.2.1  perseant 
   3240  1.15.2.1  perseant 		struct ip *ip;
   3241  1.15.2.1  perseant 		KASSERT(mp->m_len >= ehlen + sizeof(*ip));
   3242  1.15.2.1  perseant 		ip = (void *)(mtod(mp, char *) + ehlen);
   3243  1.15.2.1  perseant 		ip->ip_len = 0;
   3244  1.15.2.1  perseant 
   3245  1.15.2.1  perseant 		KASSERT(mp->m_len >= ehlen + iphlen + sizeof(*th));
   3246  1.15.2.1  perseant 		th = (void *)((char *)ip + iphlen);
   3247  1.15.2.1  perseant 		th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   3248  1.15.2.1  perseant 		    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   3249       1.2       rin 		break;
   3250       1.2       rin 	case ETHERTYPE_IPV6:
   3251       1.2       rin 		iphlen = M_CSUM_DATA_IPv6_IPHL(mp->m_pkthdr.csum_data);
   3252       1.1       rin 		type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV6;
   3253  1.15.2.1  perseant 
   3254  1.15.2.1  perseant 		if (!tso)
   3255  1.15.2.1  perseant 			break;
   3256  1.15.2.1  perseant 
   3257  1.15.2.1  perseant 		struct ip6_hdr *ip6;
   3258  1.15.2.1  perseant 		KASSERT(mp->m_len >= ehlen + sizeof(*ip6));
   3259  1.15.2.1  perseant 		ip6 = (void *)(mtod(mp, char *) + ehlen);
   3260  1.15.2.1  perseant 		ip6->ip6_plen = 0;
   3261  1.15.2.1  perseant 
   3262  1.15.2.1  perseant 		KASSERT(mp->m_len >= ehlen + iphlen + sizeof(*th));
   3263  1.15.2.1  perseant 		th = (void *)((char *)ip6 + iphlen);
   3264  1.15.2.1  perseant 		th->th_sum = in6_cksum_phdr(&ip6->ip6_src, &ip6->ip6_dst, 0,
   3265  1.15.2.1  perseant 		    htonl(IPPROTO_TCP));
   3266       1.2       rin 		break;
   3267       1.2       rin 	default:
   3268       1.2       rin 		/*
   3269       1.2       rin 		 * Unknown L3 protocol. Clear L3 header length and proceed for
   3270       1.2       rin 		 * LAN as done by Linux driver.
   3271       1.2       rin 		 */
   3272       1.2       rin 		KASSERT(!v4 && !v6);
   3273  1.15.2.1  perseant 		iphlen = 0;
   3274       1.2       rin 		break;
   3275       1.1       rin 	}
   3276       1.1       rin 
   3277       1.2       rin 	if (tcp) {
   3278       1.1       rin 		type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP;
   3279       1.2       rin 		*olinfo_status |= IGC_TXD_POPTS_TXSM << 8;
   3280       1.2       rin 	} else if (udp) {
   3281       1.1       rin 		type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_UDP;
   3282       1.2       rin 		*olinfo_status |= IGC_TXD_POPTS_TXSM << 8;
   3283       1.2       rin 	}
   3284       1.2       rin 
   3285  1.15.2.1  perseant 	if (tso) {
   3286  1.15.2.1  perseant 		const uint32_t tcphlen = th->th_off << 2;
   3287  1.15.2.1  perseant 		const uint32_t paylen =
   3288  1.15.2.1  perseant 		    mp->m_pkthdr.len - ehlen - iphlen - tcphlen;
   3289       1.2       rin 
   3290  1.15.2.1  perseant 		mss_l4len_idx |= mp->m_pkthdr.segsz << IGC_ADVTXD_MSS_SHIFT;
   3291  1.15.2.1  perseant 		mss_l4len_idx |= tcphlen << IGC_ADVTXD_L4LEN_SHIFT;
   3292       1.2       rin 
   3293  1.15.2.1  perseant 		*cmd_type_len |= IGC_ADVTXD_DCMD_TSE;
   3294       1.2       rin 
   3295  1.15.2.1  perseant 		*olinfo_status |= paylen << IGC_ADVTXD_PAYLEN_SHIFT;
   3296       1.2       rin 	}
   3297       1.2       rin 
   3298       1.2       rin 	vlan_macip_lens |= iphlen;
   3299  1.15.2.1  perseant 	vlan_macip_lens |= ehlen << IGC_ADVTXD_MACLEN_SHIFT;
   3300       1.2       rin 
   3301       1.2       rin 	type_tucmd_mlhl |= IGC_ADVTXD_DCMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
   3302       1.2       rin 
   3303       1.2       rin 	/* Now ready a context descriptor */
   3304       1.2       rin 	struct igc_adv_tx_context_desc *txdesc =
   3305       1.2       rin 	    (struct igc_adv_tx_context_desc *)&txr->tx_base[prod];
   3306       1.2       rin 
   3307       1.2       rin 	igc_txdesc_sync(txr, prod,
   3308       1.2       rin 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   3309  1.15.2.1  perseant 
   3310  1.15.2.1  perseant 	/* Now copy bits into descriptor */
   3311       1.2       rin 	htolem32(&txdesc->vlan_macip_lens, vlan_macip_lens);
   3312       1.2       rin 	htolem32(&txdesc->type_tucmd_mlhl, type_tucmd_mlhl);
   3313       1.2       rin 	htolem32(&txdesc->seqnum_seed, 0);
   3314       1.2       rin 	htolem32(&txdesc->mss_l4len_idx, mss_l4len_idx);
   3315  1.15.2.1  perseant 
   3316       1.2       rin 	igc_txdesc_sync(txr, prod,
   3317       1.2       rin 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3318       1.2       rin 
   3319       1.1       rin 	return 1;
   3320       1.1       rin }
   3321       1.1       rin 
   3322       1.1       rin /*********************************************************************
   3323       1.1       rin  *
   3324       1.1       rin  *  Allocate memory for rx_buffer structures. Since we use one
   3325       1.1       rin  *  rx_buffer per received packet, the maximum number of rx_buffer's
   3326       1.1       rin  *  that we'll need is equal to the number of receive descriptors
   3327       1.1       rin  *  that we've allocated.
   3328       1.1       rin  *
   3329       1.1       rin  **********************************************************************/
   3330       1.2       rin static int
   3331       1.1       rin igc_allocate_receive_buffers(struct rx_ring *rxr)
   3332       1.1       rin {
   3333       1.1       rin 	struct igc_softc *sc = rxr->sc;
   3334       1.2       rin 	int error;
   3335       1.1       rin 
   3336       1.2       rin 	rxr->rx_buffers =
   3337       1.2       rin 	    kmem_zalloc(sc->num_rx_desc * sizeof(struct igc_rx_buf), KM_SLEEP);
   3338       1.2       rin 
   3339       1.2       rin 	for (int id = 0; id < sc->num_rx_desc; id++) {
   3340       1.2       rin 		struct igc_rx_buf *rxbuf = &rxr->rx_buffers[id];
   3341       1.1       rin 
   3342       1.2       rin 		error = bus_dmamap_create(rxr->rxdma.dma_tag, MCLBYTES, 1,
   3343       1.2       rin 		    MCLBYTES, 0, BUS_DMA_WAITOK, &rxbuf->map);
   3344       1.1       rin 		if (error) {
   3345       1.2       rin 			aprint_error_dev(sc->sc_dev,
   3346       1.2       rin 			    "unable to create RX DMA map\n");
   3347       1.1       rin 			goto fail;
   3348       1.1       rin 		}
   3349       1.1       rin 	}
   3350       1.1       rin 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
   3351       1.1       rin 	    rxr->rxdma.dma_map->dm_mapsize,
   3352       1.1       rin 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3353       1.1       rin 
   3354       1.1       rin 	return 0;
   3355       1.2       rin  fail:
   3356       1.1       rin 	return error;
   3357       1.1       rin }
   3358       1.1       rin 
   3359       1.1       rin /*********************************************************************
   3360       1.1       rin  *
   3361       1.1       rin  *  Allocate and initialize receive structures.
   3362       1.1       rin  *
   3363       1.1       rin  **********************************************************************/
   3364       1.2       rin static int
   3365       1.1       rin igc_setup_receive_structures(struct igc_softc *sc)
   3366       1.1       rin {
   3367       1.1       rin 
   3368       1.2       rin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
   3369       1.2       rin 		struct rx_ring *rxr = &sc->rx_rings[iq];
   3370       1.2       rin 
   3371       1.1       rin 		if (igc_setup_receive_ring(rxr))
   3372       1.1       rin 			goto fail;
   3373       1.1       rin 	}
   3374       1.1       rin 
   3375       1.1       rin 	return 0;
   3376       1.2       rin  fail:
   3377       1.1       rin 	igc_free_receive_structures(sc);
   3378       1.1       rin 	return ENOBUFS;
   3379       1.1       rin }
   3380       1.1       rin 
   3381       1.1       rin /*********************************************************************
   3382       1.1       rin  *
   3383       1.1       rin  *  Initialize a receive ring and its buffers.
   3384       1.1       rin  *
   3385       1.1       rin  **********************************************************************/
   3386       1.2       rin static int
   3387       1.1       rin igc_setup_receive_ring(struct rx_ring *rxr)
   3388       1.1       rin {
   3389       1.1       rin 	struct igc_softc *sc = rxr->sc;
   3390       1.2       rin 	const int rsize = roundup2(
   3391       1.2       rin 	    sc->num_rx_desc * sizeof(union igc_adv_rx_desc), IGC_DBA_ALIGN);
   3392       1.1       rin 
   3393       1.1       rin 	/* Clear the ring contents. */
   3394       1.2       rin 	memset(rxr->rx_base, 0, rsize);
   3395       1.1       rin 
   3396       1.1       rin 	if (igc_allocate_receive_buffers(rxr))
   3397       1.1       rin 		return ENOMEM;
   3398       1.1       rin 
   3399       1.1       rin 	/* Setup our descriptor indices. */
   3400       1.1       rin 	rxr->next_to_check = 0;
   3401       1.2       rin 	rxr->last_desc_filled = 0;
   3402       1.1       rin 
   3403       1.2       rin 	mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
   3404       1.1       rin 
   3405       1.1       rin 	return 0;
   3406       1.1       rin }
   3407       1.1       rin 
   3408       1.1       rin /*********************************************************************
   3409       1.1       rin  *
   3410       1.1       rin  *  Enable receive unit.
   3411       1.1       rin  *
   3412       1.1       rin  **********************************************************************/
   3413       1.2       rin static void
   3414       1.1       rin igc_initialize_receive_unit(struct igc_softc *sc)
   3415       1.1       rin {
   3416       1.2       rin 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   3417       1.2       rin 	struct igc_hw *hw = &sc->hw;
   3418       1.2       rin 	uint32_t rctl, rxcsum, srrctl;
   3419       1.2       rin 
   3420       1.2       rin 	DPRINTF(RX, "called\n");
   3421       1.1       rin 
   3422       1.1       rin 	/*
   3423       1.1       rin 	 * Make sure receives are disabled while setting
   3424       1.1       rin 	 * up the descriptor ring.
   3425       1.1       rin 	 */
   3426       1.1       rin 	rctl = IGC_READ_REG(hw, IGC_RCTL);
   3427       1.1       rin 	IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN);
   3428       1.1       rin 
   3429       1.1       rin 	/* Setup the Receive Control Register */
   3430       1.1       rin 	rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
   3431       1.1       rin 	rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_LBM_NO |
   3432       1.1       rin 	    IGC_RCTL_RDMTS_HALF | (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
   3433       1.1       rin 
   3434       1.2       rin #if 1
   3435       1.1       rin 	/* Do not store bad packets */
   3436       1.1       rin 	rctl &= ~IGC_RCTL_SBP;
   3437       1.2       rin #else
   3438       1.2       rin 	/* for debug */
   3439       1.2       rin 	rctl |= IGC_RCTL_SBP;
   3440       1.2       rin #endif
   3441       1.1       rin 
   3442       1.1       rin 	/* Enable Long Packet receive */
   3443       1.2       rin 	if (sc->hw.mac.max_frame_size > ETHER_MAX_LEN)
   3444       1.1       rin 		rctl |= IGC_RCTL_LPE;
   3445       1.2       rin 	else
   3446       1.2       rin 		rctl &= ~IGC_RCTL_LPE;
   3447       1.1       rin 
   3448       1.1       rin 	/* Strip the CRC */
   3449       1.1       rin 	rctl |= IGC_RCTL_SECRC;
   3450       1.1       rin 
   3451       1.1       rin 	/*
   3452       1.1       rin 	 * Set the interrupt throttling rate. Value is calculated
   3453       1.1       rin 	 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
   3454       1.2       rin 	 *
   3455       1.2       rin 	 * XXX Sync with Linux, especially for jumbo MTU or TSO.
   3456       1.2       rin 	 * XXX Shouldn't be here?
   3457       1.1       rin 	 */
   3458       1.1       rin 	IGC_WRITE_REG(hw, IGC_ITR, DEFAULT_ITR);
   3459       1.1       rin 
   3460       1.1       rin 	rxcsum = IGC_READ_REG(hw, IGC_RXCSUM);
   3461       1.2       rin 	rxcsum &= ~(IGC_RXCSUM_IPOFL | IGC_RXCSUM_TUOFL | IGC_RXCSUM_PCSD);
   3462       1.2       rin 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   3463       1.2       rin 		rxcsum |= IGC_RXCSUM_IPOFL;
   3464       1.2       rin 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
   3465       1.2       rin 				 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   3466       1.2       rin 		rxcsum |= IGC_RXCSUM_TUOFL;
   3467       1.1       rin 	if (sc->sc_nqueues > 1)
   3468       1.1       rin 		rxcsum |= IGC_RXCSUM_PCSD;
   3469       1.1       rin 	IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum);
   3470       1.1       rin 
   3471       1.1       rin 	if (sc->sc_nqueues > 1)
   3472       1.1       rin 		igc_initialize_rss_mapping(sc);
   3473       1.1       rin 
   3474       1.2       rin 	srrctl = 0;
   3475       1.1       rin #if 0
   3476       1.1       rin 	srrctl |= 4096 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
   3477       1.1       rin 	rctl |= IGC_RCTL_SZ_4096 | IGC_RCTL_BSEX;
   3478       1.2       rin #else
   3479       1.1       rin 	srrctl |= 2048 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
   3480       1.1       rin 	rctl |= IGC_RCTL_SZ_2048;
   3481       1.2       rin #endif
   3482       1.1       rin 
   3483       1.1       rin 	/*
   3484       1.1       rin 	 * If TX flow control is disabled and there's > 1 queue defined,
   3485       1.1       rin 	 * enable DROP.
   3486       1.1       rin 	 *
   3487       1.1       rin 	 * This drops frames rather than hanging the RX MAC for all queues.
   3488       1.1       rin 	 */
   3489       1.2       rin 	if (sc->sc_nqueues > 1 &&
   3490       1.2       rin 	    (sc->fc == igc_fc_none || sc->fc == igc_fc_rx_pause))
   3491       1.1       rin 		srrctl |= IGC_SRRCTL_DROP_EN;
   3492       1.1       rin 
   3493       1.1       rin 	/* Setup the Base and Length of the RX descriptor rings. */
   3494       1.2       rin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
   3495       1.2       rin 		struct rx_ring *rxr = &sc->rx_rings[iq];
   3496       1.2       rin 		const uint64_t bus_addr =
   3497       1.2       rin 		    rxr->rxdma.dma_map->dm_segs[0].ds_addr;
   3498       1.2       rin 
   3499       1.2       rin 		IGC_WRITE_REG(hw, IGC_RXDCTL(iq), 0);
   3500       1.1       rin 
   3501       1.1       rin 		srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
   3502       1.1       rin 
   3503       1.2       rin 		IGC_WRITE_REG(hw, IGC_RDLEN(iq),
   3504       1.1       rin 		    sc->num_rx_desc * sizeof(union igc_adv_rx_desc));
   3505       1.2       rin 		IGC_WRITE_REG(hw, IGC_RDBAH(iq), (uint32_t)(bus_addr >> 32));
   3506       1.2       rin 		IGC_WRITE_REG(hw, IGC_RDBAL(iq), (uint32_t)bus_addr);
   3507       1.2       rin 		IGC_WRITE_REG(hw, IGC_SRRCTL(iq), srrctl);
   3508       1.1       rin 
   3509       1.1       rin 		/* Setup the Head and Tail Descriptor Pointers */
   3510       1.2       rin 		IGC_WRITE_REG(hw, IGC_RDH(iq), 0);
   3511       1.2       rin 		IGC_WRITE_REG(hw, IGC_RDT(iq), 0 /* XXX rxr->last_desc_filled */);
   3512       1.1       rin 
   3513       1.1       rin 		/* Enable this Queue */
   3514       1.2       rin 		uint32_t rxdctl = IGC_READ_REG(hw, IGC_RXDCTL(iq));
   3515       1.1       rin 		rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
   3516       1.1       rin 		rxdctl &= 0xFFF00000;
   3517       1.1       rin 		rxdctl |= IGC_RX_PTHRESH;
   3518       1.1       rin 		rxdctl |= IGC_RX_HTHRESH << 8;
   3519       1.1       rin 		rxdctl |= IGC_RX_WTHRESH << 16;
   3520       1.2       rin 		IGC_WRITE_REG(hw, IGC_RXDCTL(iq), rxdctl);
   3521       1.1       rin 	}
   3522       1.1       rin 
   3523       1.1       rin 	/* Make sure VLAN Filters are off */
   3524       1.1       rin 	rctl &= ~IGC_RCTL_VFE;
   3525       1.1       rin 
   3526       1.1       rin 	/* Write out the settings */
   3527       1.1       rin 	IGC_WRITE_REG(hw, IGC_RCTL, rctl);
   3528       1.1       rin }
   3529       1.1       rin 
   3530       1.1       rin /*********************************************************************
   3531       1.1       rin  *
   3532       1.1       rin  *  Free all receive rings.
   3533       1.1       rin  *
   3534       1.1       rin  **********************************************************************/
   3535       1.2       rin static void
   3536       1.1       rin igc_free_receive_structures(struct igc_softc *sc)
   3537       1.1       rin {
   3538       1.1       rin 
   3539       1.2       rin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
   3540       1.2       rin 		struct rx_ring *rxr = &sc->rx_rings[iq];
   3541       1.1       rin 
   3542       1.1       rin 		igc_free_receive_buffers(rxr);
   3543       1.2       rin 	}
   3544       1.1       rin }
   3545       1.1       rin 
   3546       1.1       rin /*********************************************************************
   3547       1.1       rin  *
   3548       1.1       rin  *  Free receive ring data structures
   3549       1.1       rin  *
   3550       1.1       rin  **********************************************************************/
   3551       1.2       rin static void
   3552       1.1       rin igc_free_receive_buffers(struct rx_ring *rxr)
   3553       1.1       rin {
   3554       1.1       rin 	struct igc_softc *sc = rxr->sc;
   3555       1.1       rin 
   3556       1.1       rin 	if (rxr->rx_buffers != NULL) {
   3557       1.2       rin 		for (int id = 0; id < sc->num_rx_desc; id++) {
   3558       1.2       rin 			struct igc_rx_buf *rxbuf = &rxr->rx_buffers[id];
   3559       1.2       rin 			bus_dmamap_t map = rxbuf->map;
   3560       1.2       rin 
   3561       1.1       rin 			if (rxbuf->buf != NULL) {
   3562       1.2       rin 				bus_dmamap_sync(rxr->rxdma.dma_tag, map,
   3563       1.2       rin 				    0, map->dm_mapsize, BUS_DMASYNC_POSTREAD);
   3564       1.2       rin 				bus_dmamap_unload(rxr->rxdma.dma_tag, map);
   3565       1.1       rin 				m_freem(rxbuf->buf);
   3566       1.1       rin 				rxbuf->buf = NULL;
   3567       1.1       rin 			}
   3568       1.2       rin 			bus_dmamap_destroy(rxr->rxdma.dma_tag, map);
   3569       1.1       rin 			rxbuf->map = NULL;
   3570       1.1       rin 		}
   3571       1.2       rin 		kmem_free(rxr->rx_buffers,
   3572       1.1       rin 		    sc->num_rx_desc * sizeof(struct igc_rx_buf));
   3573       1.1       rin 		rxr->rx_buffers = NULL;
   3574       1.1       rin 	}
   3575       1.2       rin 
   3576       1.2       rin 	mutex_destroy(&rxr->rxr_lock);
   3577       1.2       rin }
   3578       1.2       rin 
   3579       1.2       rin /*********************************************************************
   3580       1.2       rin  *
   3581       1.2       rin  * Clear status registers in all RX descriptors.
   3582       1.2       rin  *
   3583       1.2       rin  **********************************************************************/
   3584       1.2       rin static void
   3585       1.2       rin igc_clear_receive_status(struct rx_ring *rxr)
   3586       1.2       rin {
   3587       1.2       rin 	struct igc_softc *sc = rxr->sc;
   3588       1.2       rin 
   3589       1.2       rin 	mutex_enter(&rxr->rxr_lock);
   3590       1.2       rin 
   3591       1.2       rin 	for (int id = 0; id < sc->num_rx_desc; id++) {
   3592       1.2       rin 		union igc_adv_rx_desc *rxdesc = &rxr->rx_base[id];
   3593       1.2       rin 
   3594       1.2       rin 		igc_rxdesc_sync(rxr, id,
   3595       1.2       rin 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   3596       1.2       rin 		rxdesc->wb.upper.status_error = 0;
   3597       1.2       rin 		igc_rxdesc_sync(rxr, id,
   3598       1.2       rin 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   3599       1.2       rin 	}
   3600       1.2       rin 
   3601       1.2       rin 	mutex_exit(&rxr->rxr_lock);
   3602       1.1       rin }
   3603       1.1       rin 
   3604       1.1       rin /*
   3605       1.1       rin  * Initialise the RSS mapping for NICs that support multiple transmit/
   3606       1.1       rin  * receive rings.
   3607       1.1       rin  */
   3608       1.2       rin static void
   3609       1.1       rin igc_initialize_rss_mapping(struct igc_softc *sc)
   3610       1.1       rin {
   3611       1.1       rin 	struct igc_hw *hw = &sc->hw;
   3612       1.1       rin 
   3613       1.1       rin 	/*
   3614       1.1       rin 	 * The redirection table controls which destination
   3615       1.1       rin 	 * queue each bucket redirects traffic to.
   3616       1.1       rin 	 * Each DWORD represents four queues, with the LSB
   3617       1.1       rin 	 * being the first queue in the DWORD.
   3618       1.1       rin 	 *
   3619       1.1       rin 	 * This just allocates buckets to queues using round-robin
   3620       1.1       rin 	 * allocation.
   3621       1.1       rin 	 *
   3622       1.1       rin 	 * NOTE: It Just Happens to line up with the default
   3623       1.1       rin 	 * RSS allocation method.
   3624       1.1       rin 	 */
   3625       1.1       rin 
   3626       1.1       rin 	/* Warning FM follows */
   3627       1.2       rin 	uint32_t reta = 0;
   3628       1.2       rin 	for (int i = 0; i < 128; i++) {
   3629       1.2       rin 		const int shift = 0; /* XXXRO */
   3630       1.2       rin 		int queue_id = i % sc->sc_nqueues;
   3631       1.1       rin 		/* Adjust if required */
   3632       1.2       rin 		queue_id <<= shift;
   3633       1.1       rin 
   3634       1.1       rin 		/*
   3635       1.1       rin 		 * The low 8 bits are for hash value (n+0);
   3636       1.1       rin 		 * The next 8 bits are for hash value (n+1), etc.
   3637       1.1       rin 		 */
   3638       1.2       rin 		reta >>= 8;
   3639       1.2       rin 		reta |= ((uint32_t)queue_id) << 24;
   3640       1.1       rin 		if ((i & 3) == 3) {
   3641       1.1       rin 			IGC_WRITE_REG(hw, IGC_RETA(i >> 2), reta);
   3642       1.1       rin 			reta = 0;
   3643       1.1       rin 		}
   3644       1.1       rin 	}
   3645       1.1       rin 
   3646       1.1       rin 	/*
   3647       1.1       rin 	 * MRQC: Multiple Receive Queues Command
   3648       1.1       rin 	 * Set queuing to RSS control, number depends on the device.
   3649       1.1       rin 	 */
   3650       1.1       rin 
   3651       1.1       rin 	/* Set up random bits */
   3652       1.2       rin 	uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
   3653       1.2       rin 	rss_getkey((uint8_t *)rss_key);
   3654       1.1       rin 
   3655       1.1       rin 	/* Now fill our hash function seeds */
   3656       1.2       rin 	for (int i = 0; i < __arraycount(rss_key); i++)
   3657       1.1       rin 		IGC_WRITE_REG_ARRAY(hw, IGC_RSSRK(0), i, rss_key[i]);
   3658       1.1       rin 
   3659       1.1       rin 	/*
   3660       1.1       rin 	 * Configure the RSS fields to hash upon.
   3661       1.1       rin 	 */
   3662       1.2       rin 	uint32_t mrqc = IGC_MRQC_ENABLE_RSS_4Q;
   3663       1.2       rin 	mrqc |= IGC_MRQC_RSS_FIELD_IPV4 | IGC_MRQC_RSS_FIELD_IPV4_TCP;
   3664       1.2       rin 	mrqc |= IGC_MRQC_RSS_FIELD_IPV6 | IGC_MRQC_RSS_FIELD_IPV6_TCP;
   3665       1.1       rin 	mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
   3666       1.1       rin 
   3667       1.1       rin 	IGC_WRITE_REG(hw, IGC_MRQC, mrqc);
   3668       1.1       rin }
   3669       1.1       rin 
   3670       1.1       rin /*
   3671       1.1       rin  * igc_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
   3672       1.1       rin  * For ASF and Pass Through versions of f/w this means
   3673       1.1       rin  * that the driver is loaded. For AMT version type f/w
   3674       1.1       rin  * this means that the network i/f is open.
   3675       1.1       rin  */
   3676       1.2       rin static void
   3677       1.1       rin igc_get_hw_control(struct igc_softc *sc)
   3678       1.1       rin {
   3679       1.2       rin 	const uint32_t ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT);
   3680       1.1       rin 
   3681       1.1       rin 	IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
   3682       1.1       rin }
   3683       1.1       rin 
   3684       1.1       rin /*
   3685       1.1       rin  * igc_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
   3686       1.1       rin  * For ASF and Pass Through versions of f/w this means that
   3687       1.1       rin  * the driver is no longer loaded. For AMT versions of the
   3688       1.1       rin  * f/w this means that the network i/f is closed.
   3689       1.1       rin  */
   3690       1.2       rin static void
   3691       1.1       rin igc_release_hw_control(struct igc_softc *sc)
   3692       1.1       rin {
   3693       1.2       rin 	const uint32_t ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT);
   3694       1.1       rin 
   3695       1.1       rin 	IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
   3696       1.1       rin }
   3697       1.1       rin 
   3698       1.2       rin static int
   3699       1.1       rin igc_is_valid_ether_addr(uint8_t *addr)
   3700       1.1       rin {
   3701       1.2       rin 	const char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
   3702       1.1       rin 
   3703       1.2       rin 	if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN))
   3704       1.1       rin 		return 0;
   3705       1.1       rin 
   3706       1.1       rin 	return 1;
   3707       1.1       rin }
   3708       1.2       rin 
   3709       1.2       rin static void
   3710       1.2       rin igc_print_devinfo(struct igc_softc *sc)
   3711       1.2       rin {
   3712       1.2       rin 	device_t dev = sc->sc_dev;
   3713       1.2       rin 	struct igc_hw *hw = &sc->hw;
   3714       1.2       rin 	struct igc_phy_info *phy = &hw->phy;
   3715       1.2       rin 	u_int oui, model, rev;
   3716      1.13   msaitoh 	uint16_t id1, id2, nvm_ver, phy_ver, etk_lo, etk_hi;
   3717       1.2       rin 	char descr[MII_MAX_DESCR_LEN];
   3718       1.2       rin 
   3719       1.2       rin 	/* Print PHY Info */
   3720       1.2       rin 	id1 = phy->id >> 16;
   3721       1.2       rin 	/* The revision field in phy->id is cleard and it's in phy->revision */
   3722       1.2       rin 	id2 = (phy->id & 0xfff0) | phy->revision;
   3723       1.2       rin 	oui = MII_OUI(id1, id2);
   3724       1.2       rin 	model = MII_MODEL(id2);
   3725       1.2       rin 	rev = MII_REV(id2);
   3726       1.2       rin 	mii_get_descr(descr, sizeof(descr), oui, model);
   3727       1.2       rin 	if (descr[0])
   3728      1.12   msaitoh 		aprint_normal_dev(dev, "PHY: %s, rev. %d",
   3729       1.2       rin 		    descr, rev);
   3730       1.2       rin 	else
   3731       1.2       rin 		aprint_normal_dev(dev,
   3732      1.12   msaitoh 		    "PHY OUI 0x%06x, model 0x%04x, rev. %d",
   3733       1.2       rin 		    oui, model, rev);
   3734       1.2       rin 
   3735      1.12   msaitoh 	/* PHY FW version */
   3736      1.12   msaitoh 	phy->ops.read_reg(hw, 0x1e, &phy_ver);
   3737      1.12   msaitoh 	aprint_normal(", PHY FW version 0x%04hx\n", phy_ver);
   3738      1.12   msaitoh 
   3739      1.13   msaitoh 	/* NVM version */
   3740       1.2       rin 	hw->nvm.ops.read(hw, NVM_VERSION, 1, &nvm_ver);
   3741       1.2       rin 
   3742      1.13   msaitoh 	/* EtrackID */
   3743      1.13   msaitoh 	hw->nvm.ops.read(hw, NVM_ETKID_LO, 1, &etk_lo);
   3744      1.13   msaitoh 	hw->nvm.ops.read(hw, NVM_ETKID_HI, 1, &etk_hi);
   3745      1.13   msaitoh 
   3746      1.13   msaitoh 	aprint_normal_dev(dev,
   3747      1.13   msaitoh 	    "NVM image version %x.%02x, EtrackID %04hx%04hx\n",
   3748       1.2       rin 	    (nvm_ver & NVM_VERSION_MAJOR) >> NVM_VERSION_MAJOR_SHIFT,
   3749      1.13   msaitoh 	    nvm_ver & NVM_VERSION_MINOR, etk_hi, etk_lo);
   3750       1.2       rin }
   3751