Home | History | Annotate | Line # | Download | only in pci
if_rge.c revision 1.3.2.3
      1  1.3.2.3  ad /*	$NetBSD: if_rge.c,v 1.3.2.3 2020/02/29 20:19:10 ad Exp $	*/
      2  1.3.2.2  ad /*	$OpenBSD: if_rge.c,v 1.2 2020/01/02 09:00:45 kevlo Exp $	*/
      3  1.3.2.2  ad 
      4  1.3.2.2  ad /*
      5  1.3.2.2  ad  * Copyright (c) 2019 Kevin Lo <kevlo (at) openbsd.org>
      6  1.3.2.2  ad  *
      7  1.3.2.2  ad  * Permission to use, copy, modify, and distribute this software for any
      8  1.3.2.2  ad  * purpose with or without fee is hereby granted, provided that the above
      9  1.3.2.2  ad  * copyright notice and this permission notice appear in all copies.
     10  1.3.2.2  ad  *
     11  1.3.2.2  ad  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     12  1.3.2.2  ad  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     13  1.3.2.2  ad  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     14  1.3.2.2  ad  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     15  1.3.2.2  ad  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     16  1.3.2.2  ad  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     17  1.3.2.2  ad  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     18  1.3.2.2  ad  */
     19  1.3.2.2  ad 
     20  1.3.2.2  ad #include <sys/cdefs.h>
     21  1.3.2.3  ad __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.3.2.3 2020/02/29 20:19:10 ad Exp $");
     22  1.3.2.2  ad 
     23  1.3.2.2  ad /* #include "vlan.h" Sevan */
     24  1.3.2.2  ad 
     25  1.3.2.2  ad #include <sys/types.h>
     26  1.3.2.2  ad 
     27  1.3.2.2  ad #include <sys/param.h>
     28  1.3.2.2  ad #include <sys/systm.h>
     29  1.3.2.2  ad #include <sys/sockio.h>
     30  1.3.2.2  ad #include <sys/mbuf.h>
     31  1.3.2.2  ad #include <sys/malloc.h>
     32  1.3.2.2  ad #include <sys/kernel.h>
     33  1.3.2.2  ad #include <sys/socket.h>
     34  1.3.2.2  ad #include <sys/device.h>
     35  1.3.2.2  ad #include <sys/endian.h>
     36  1.3.2.2  ad #include <sys/callout.h>
     37  1.3.2.2  ad #include <sys/workqueue.h>
     38  1.3.2.2  ad 
     39  1.3.2.2  ad #include <net/if.h>
     40  1.3.2.2  ad 
     41  1.3.2.2  ad #include <net/if_dl.h>
     42  1.3.2.2  ad #include <net/if_ether.h>
     43  1.3.2.2  ad 
     44  1.3.2.2  ad #include <net/if_media.h>
     45  1.3.2.2  ad 
     46  1.3.2.2  ad #include <netinet/in.h>
     47  1.3.2.2  ad #include <net/if_ether.h>
     48  1.3.2.2  ad 
     49  1.3.2.2  ad #if NBPFILTER > 0
     50  1.3.2.2  ad #include <net/bpf.h>
     51  1.3.2.2  ad #endif
     52  1.3.2.2  ad 
     53  1.3.2.2  ad #include <sys/bus.h>
     54  1.3.2.2  ad #include <machine/intr.h>
     55  1.3.2.2  ad 
     56  1.3.2.2  ad #include <dev/mii/mii.h>
     57  1.3.2.2  ad 
     58  1.3.2.2  ad #include <dev/pci/pcivar.h>
     59  1.3.2.2  ad #include <dev/pci/pcireg.h>
     60  1.3.2.2  ad #include <dev/pci/pcidevs.h>
     61  1.3.2.2  ad 
     62  1.3.2.2  ad #include <dev/pci/if_rgereg.h>
     63  1.3.2.2  ad 
     64  1.3.2.2  ad #ifdef __NetBSD__
     65  1.3.2.2  ad #define letoh32 	htole32
     66  1.3.2.2  ad #define nitems(x) 	__arraycount(x)
     67  1.3.2.2  ad #define MBUF_LIST_INITIALIZER() 	{ NULL, NULL, 0 }
     68  1.3.2.2  ad struct mbuf_list {
     69  1.3.2.2  ad 	struct mbuf 	*ml_head;
     70  1.3.2.2  ad 	struct mbuf 	*ml_tail;
     71  1.3.2.2  ad 	u_int 	ml_len;
     72  1.3.2.2  ad };
     73  1.3.2.3  ad 
     74  1.3.2.3  ad static struct mbuf *
     75  1.3.2.3  ad MCLGETI(struct rge_softc *sc __unused, int how,
     76  1.3.2.3  ad     struct ifnet *ifp __unused, u_int size)
     77  1.3.2.3  ad {
     78  1.3.2.3  ad 	struct mbuf *m;
     79  1.3.2.3  ad 
     80  1.3.2.3  ad 	MGETHDR(m, how, MT_DATA);
     81  1.3.2.3  ad 	if (m == NULL)
     82  1.3.2.3  ad 		return NULL;
     83  1.3.2.3  ad 
     84  1.3.2.3  ad 	MEXTMALLOC(m, size, how);
     85  1.3.2.3  ad 	if ((m->m_flags & M_EXT) == 0) {
     86  1.3.2.3  ad 		m_freem(m);
     87  1.3.2.3  ad 		return NULL;
     88  1.3.2.3  ad 	}
     89  1.3.2.3  ad 	return m;
     90  1.3.2.3  ad }
     91  1.3.2.3  ad 
     92  1.3.2.2  ad #ifdef NET_MPSAFE
     93  1.3.2.2  ad #define 	RGE_MPSAFE	1
     94  1.3.2.2  ad #define 	CALLOUT_FLAGS	CALLOUT_MPSAFE
     95  1.3.2.2  ad #else
     96  1.3.2.2  ad #define 	CALLOUT_FLAGS	0
     97  1.3.2.2  ad #endif
     98  1.3.2.2  ad #endif
     99  1.3.2.2  ad 
    100  1.3.2.2  ad static int		rge_match(device_t, cfdata_t, void *);
    101  1.3.2.3  ad static void		rge_attach(device_t, device_t, void *);
    102  1.3.2.2  ad int		rge_intr(void *);
    103  1.3.2.2  ad int		rge_encap(struct rge_softc *, struct mbuf *, int);
    104  1.3.2.2  ad int		rge_ioctl(struct ifnet *, u_long, void *);
    105  1.3.2.2  ad void		rge_start(struct ifnet *);
    106  1.3.2.2  ad void		rge_watchdog(struct ifnet *);
    107  1.3.2.2  ad int		rge_init(struct ifnet *);
    108  1.3.2.2  ad void		rge_stop(struct ifnet *);
    109  1.3.2.2  ad int		rge_ifmedia_upd(struct ifnet *);
    110  1.3.2.2  ad void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
    111  1.3.2.2  ad int		rge_allocmem(struct rge_softc *);
    112  1.3.2.2  ad int		rge_newbuf(struct rge_softc *, int);
    113  1.3.2.2  ad void		rge_discard_rxbuf(struct rge_softc *, int);
    114  1.3.2.2  ad int		rge_rx_list_init(struct rge_softc *);
    115  1.3.2.2  ad void		rge_tx_list_init(struct rge_softc *);
    116  1.3.2.2  ad int		rge_rxeof(struct rge_softc *);
    117  1.3.2.2  ad int		rge_txeof(struct rge_softc *);
    118  1.3.2.2  ad void		rge_reset(struct rge_softc *);
    119  1.3.2.2  ad void		rge_iff(struct rge_softc *);
    120  1.3.2.2  ad void		rge_set_phy_power(struct rge_softc *, int);
    121  1.3.2.2  ad void		rge_phy_config(struct rge_softc *);
    122  1.3.2.2  ad void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
    123  1.3.2.2  ad void		rge_get_macaddr(struct rge_softc *, uint8_t *);
    124  1.3.2.2  ad void		rge_hw_init(struct rge_softc *);
    125  1.3.2.2  ad void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
    126  1.3.2.2  ad void		rge_patch_phy_mcu(struct rge_softc *, int);
    127  1.3.2.2  ad void		rge_add_media_types(struct rge_softc *);
    128  1.3.2.2  ad void		rge_config_imtype(struct rge_softc *, int);
    129  1.3.2.2  ad void		rge_disable_sim_im(struct rge_softc *);
    130  1.3.2.2  ad void		rge_setup_sim_im(struct rge_softc *);
    131  1.3.2.2  ad void		rge_setup_intr(struct rge_softc *, int);
    132  1.3.2.2  ad void		rge_exit_oob(struct rge_softc *);
    133  1.3.2.2  ad void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
    134  1.3.2.2  ad uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
    135  1.3.2.2  ad void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
    136  1.3.2.2  ad uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
    137  1.3.2.2  ad void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
    138  1.3.2.2  ad void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
    139  1.3.2.2  ad void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
    140  1.3.2.2  ad uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
    141  1.3.2.2  ad int		rge_get_link_status(struct rge_softc *);
    142  1.3.2.2  ad void		rge_txstart(struct work *, void *);
    143  1.3.2.2  ad void		rge_tick(void *);
    144  1.3.2.2  ad void		rge_link_state(struct rge_softc *);
    145  1.3.2.2  ad 
    146  1.3.2.2  ad static const struct {
    147  1.3.2.2  ad 	uint16_t reg;
    148  1.3.2.2  ad 	uint16_t val;
    149  1.3.2.2  ad }  rtl8125_def_bps[] = {
    150  1.3.2.2  ad 	RTL8125_DEF_BPS
    151  1.3.2.2  ad }, rtl8125_mac_cfg2_ephy[] = {
    152  1.3.2.2  ad 	RTL8125_MAC_CFG2_EPHY
    153  1.3.2.2  ad }, rtl8125_mac_cfg2_mcu[] = {
    154  1.3.2.2  ad 	RTL8125_MAC_CFG2_MCU
    155  1.3.2.2  ad }, rtl8125_mac_cfg3_ephy[] = {
    156  1.3.2.2  ad 	RTL8125_MAC_CFG3_EPHY
    157  1.3.2.2  ad }, rtl8125_mac_cfg3_mcu[] = {
    158  1.3.2.2  ad 	RTL8125_MAC_CFG3_MCU
    159  1.3.2.2  ad };
    160  1.3.2.2  ad 
    161  1.3.2.2  ad CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
    162  1.3.2.2  ad 		NULL, NULL); /* Sevan - detach function? */
    163  1.3.2.2  ad 
    164  1.3.2.2  ad extern struct cfdriver rge_cd;
    165  1.3.2.2  ad 
    166  1.3.2.2  ad static const struct {
    167  1.3.2.2  ad 	pci_vendor_id_t 	vendor;
    168  1.3.2.2  ad 	pci_product_id_t 	product;
    169  1.3.2.2  ad }rge_devices[] = {
    170  1.3.2.2  ad 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
    171  1.3.2.2  ad 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125 },
    172  1.3.2.2  ad };
    173  1.3.2.2  ad 
    174  1.3.2.2  ad static int
    175  1.3.2.2  ad rge_match(device_t parent, cfdata_t match, void *aux)
    176  1.3.2.2  ad {
    177  1.3.2.2  ad 	struct pci_attach_args *pa =aux;
    178  1.3.2.2  ad 	int n;
    179  1.3.2.2  ad 
    180  1.3.2.2  ad 	for (n =0; n < __arraycount(rge_devices); n++) {
    181  1.3.2.2  ad 		if (PCI_VENDOR(pa->pa_id) == rge_devices[n].vendor &&
    182  1.3.2.2  ad 		    PCI_PRODUCT(pa->pa_id) == rge_devices[n].product)
    183  1.3.2.2  ad 			return 1;
    184  1.3.2.2  ad 	}
    185  1.3.2.2  ad 
    186  1.3.2.2  ad 	return 0;
    187  1.3.2.2  ad }
    188  1.3.2.2  ad 
    189  1.3.2.2  ad void
    190  1.3.2.2  ad rge_attach(device_t parent, device_t self, void *aux)
    191  1.3.2.2  ad {
    192  1.3.2.2  ad 	struct rge_softc *sc = (struct rge_softc *)self;
    193  1.3.2.2  ad 	struct pci_attach_args *pa = aux;
    194  1.3.2.2  ad 	pci_chipset_tag_t pc = pa->pa_pc;
    195  1.3.2.2  ad 	pci_intr_handle_t ih;
    196  1.3.2.2  ad 	char intrbuf[PCI_INTRSTR_LEN];
    197  1.3.2.2  ad 	const char *intrstr = NULL;
    198  1.3.2.2  ad 	struct ifnet *ifp;
    199  1.3.2.2  ad 	pcireg_t reg;
    200  1.3.2.2  ad 	uint32_t hwrev;
    201  1.3.2.2  ad 	uint8_t eaddr[ETHER_ADDR_LEN];
    202  1.3.2.2  ad 	int offset;
    203  1.3.2.2  ad 
    204  1.3.2.2  ad 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
    205  1.3.2.2  ad 
    206  1.3.2.3  ad 	/*
    207  1.3.2.2  ad 	 * Map control/status registers.
    208  1.3.2.2  ad 	 */
    209  1.3.2.2  ad 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
    210  1.3.2.2  ad 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
    211  1.3.2.2  ad 	    NULL, &sc->rge_bsize)) {
    212  1.3.2.2  ad 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
    213  1.3.2.2  ad 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
    214  1.3.2.2  ad 		    &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
    215  1.3.2.2  ad 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
    216  1.3.2.2  ad 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
    217  1.3.2.2  ad 			    &sc->rge_bsize)) {
    218  1.3.2.2  ad 				printf(": can't map mem or i/o space\n");
    219  1.3.2.2  ad 				return;
    220  1.3.2.2  ad 			}
    221  1.3.2.2  ad 		}
    222  1.3.2.2  ad 	}
    223  1.3.2.2  ad 
    224  1.3.2.3  ad 	/*
    225  1.3.2.2  ad 	 * Allocate interrupt.
    226  1.3.2.2  ad 	 */
    227  1.3.2.2  ad 	if (pci_intr_map(pa, &ih) == 0)
    228  1.3.2.2  ad 		sc->rge_flags |= RGE_FLAG_MSI;
    229  1.3.2.2  ad 	else if (pci_intr_map(pa, &ih) != 0) {
    230  1.3.2.2  ad 		printf(": couldn't map interrupt\n");
    231  1.3.2.2  ad 		return;
    232  1.3.2.2  ad 	}
    233  1.3.2.2  ad 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
    234  1.3.2.2  ad 	sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, rge_intr,
    235  1.3.2.2  ad 	    sc, sc->sc_dev.dv_xname);
    236  1.3.2.2  ad 	if (sc->sc_ih == NULL) {
    237  1.3.2.2  ad 		printf(": couldn't establish interrupt");
    238  1.3.2.2  ad 		if (intrstr != NULL)
    239  1.3.2.2  ad 			printf(" at %s", intrstr);
    240  1.3.2.2  ad 		printf("\n");
    241  1.3.2.2  ad 		return;
    242  1.3.2.2  ad 	}
    243  1.3.2.2  ad 	printf(": %s", intrstr);
    244  1.3.2.2  ad 
    245  1.3.2.2  ad 	sc->sc_dmat = pa->pa_dmat;
    246  1.3.2.2  ad 	sc->sc_pc = pa->pa_pc;
    247  1.3.2.2  ad 	sc->sc_tag = pa->pa_tag;
    248  1.3.2.2  ad 
    249  1.3.2.2  ad 	/* Determine hardware revision */
    250  1.3.2.2  ad 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
    251  1.3.2.2  ad 	switch (hwrev) {
    252  1.3.2.2  ad 	case 0x60800000:
    253  1.3.2.2  ad 		sc->rge_type = MAC_CFG2;
    254  1.3.2.2  ad 		break;
    255  1.3.2.2  ad 	case 0x60900000:
    256  1.3.2.2  ad 		sc->rge_type = MAC_CFG3;
    257  1.3.2.2  ad 		break;
    258  1.3.2.2  ad 	default:
    259  1.3.2.2  ad 		printf(": unknown version 0x%08x\n", hwrev);
    260  1.3.2.2  ad 		return;
    261  1.3.2.2  ad 	}
    262  1.3.2.2  ad 
    263  1.3.2.2  ad 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
    264  1.3.2.2  ad 
    265  1.3.2.3  ad 	/*
    266  1.3.2.2  ad 	 * PCI Express check.
    267  1.3.2.2  ad 	 */
    268  1.3.2.2  ad 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
    269  1.3.2.2  ad 	    &offset, NULL)) {
    270  1.3.2.2  ad 		/* Disable PCIe ASPM. */
    271  1.3.2.2  ad 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
    272  1.3.2.2  ad 		    offset + PCIE_LCSR);
    273  1.3.2.2  ad 		reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 );
    274  1.3.2.2  ad 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
    275  1.3.2.2  ad 		    reg);
    276  1.3.2.2  ad 	}
    277  1.3.2.2  ad 
    278  1.3.2.2  ad 	rge_exit_oob(sc);
    279  1.3.2.2  ad 	rge_hw_init(sc);
    280  1.3.2.2  ad 
    281  1.3.2.2  ad 	rge_get_macaddr(sc, eaddr);
    282  1.3.2.2  ad 	printf(", address %s\n", ether_sprintf(eaddr));
    283  1.3.2.2  ad 
    284  1.3.2.2  ad 	memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
    285  1.3.2.2  ad 
    286  1.3.2.2  ad 	rge_set_phy_power(sc, 1);
    287  1.3.2.2  ad 	rge_phy_config(sc);
    288  1.3.2.2  ad 
    289  1.3.2.2  ad 	if (rge_allocmem(sc))
    290  1.3.2.2  ad 		return;
    291  1.3.2.2  ad 
    292  1.3.2.2  ad 	ifp = &sc->sc_ec.ec_if;
    293  1.3.2.2  ad 	ifp->if_softc = sc;
    294  1.3.2.2  ad 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
    295  1.3.2.2  ad 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    296  1.3.2.2  ad #ifdef RGE_MPSAFE
    297  1.3.2.2  ad 	ifp->if_xflags = IFEF_MPSAFE;
    298  1.3.2.2  ad #endif
    299  1.3.2.2  ad 	ifp->if_ioctl = rge_ioctl;
    300  1.3.2.2  ad 	ifp->if_start = rge_start;
    301  1.3.2.2  ad 	ifp->if_watchdog = rge_watchdog;
    302  1.3.2.2  ad 	IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT);
    303  1.3.2.2  ad 	ifp->if_mtu = RGE_JUMBO_MTU;
    304  1.3.2.2  ad 
    305  1.3.2.2  ad 	ifp->if_capabilities = ETHERCAP_VLAN_MTU | IFCAP_CSUM_IPv4_Rx |
    306  1.3.2.2  ad 	    IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
    307  1.3.2.2  ad 	    IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
    308  1.3.2.2  ad 
    309  1.3.2.2  ad #if NVLAN > 0
    310  1.3.2.2  ad 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
    311  1.3.2.2  ad #endif
    312  1.3.2.2  ad 
    313  1.3.2.2  ad 	callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
    314  1.3.2.2  ad 	callout_setfunc(&sc->sc_timeout, rge_tick, sc);
    315  1.3.2.2  ad 	rge_txstart(&sc->sc_task, sc);
    316  1.3.2.2  ad 
    317  1.3.2.2  ad 	/* Initialize ifmedia structures. */
    318  1.3.2.2  ad 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
    319  1.3.2.2  ad 	    rge_ifmedia_sts);
    320  1.3.2.2  ad 	rge_add_media_types(sc);
    321  1.3.2.2  ad 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
    322  1.3.2.2  ad 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
    323  1.3.2.2  ad 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
    324  1.3.2.2  ad 
    325  1.3.2.2  ad 	if_attach(ifp);
    326  1.3.2.2  ad 	ether_ifattach(ifp, eaddr);
    327  1.3.2.2  ad }
    328  1.3.2.2  ad 
    329  1.3.2.2  ad int
    330  1.3.2.2  ad rge_intr(void *arg)
    331  1.3.2.2  ad {
    332  1.3.2.2  ad 	struct rge_softc *sc = arg;
    333  1.3.2.2  ad 	struct ifnet *ifp = &sc->sc_ec.ec_if;
    334  1.3.2.2  ad 	uint32_t status;
    335  1.3.2.2  ad 	int claimed = 0, rx, tx;
    336  1.3.2.2  ad 
    337  1.3.2.2  ad 	if (!(ifp->if_flags & IFF_RUNNING))
    338  1.3.2.2  ad 		return (0);
    339  1.3.2.2  ad 
    340  1.3.2.2  ad 	/* Disable interrupts. */
    341  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_IMR, 0);
    342  1.3.2.2  ad 
    343  1.3.2.2  ad 	status = RGE_READ_4(sc, RGE_ISR);
    344  1.3.2.2  ad 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
    345  1.3.2.2  ad 		if ((status & RGE_INTRS) == 0 || status == 0xffffffff)
    346  1.3.2.2  ad 			return (0);
    347  1.3.2.2  ad 	}
    348  1.3.2.2  ad 	if (status)
    349  1.3.2.2  ad 		RGE_WRITE_4(sc, RGE_ISR, status);
    350  1.3.2.2  ad 
    351  1.3.2.2  ad 	if (status & RGE_ISR_PCS_TIMEOUT)
    352  1.3.2.2  ad 		claimed = 1;
    353  1.3.2.2  ad 
    354  1.3.2.2  ad 	rx = tx = 0;
    355  1.3.2.2  ad 	if (status & RGE_INTRS) {
    356  1.3.2.2  ad 		if (status &
    357  1.3.2.2  ad 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
    358  1.3.2.2  ad 			rx |= rge_rxeof(sc);
    359  1.3.2.2  ad 			claimed = 1;
    360  1.3.2.2  ad 		}
    361  1.3.2.2  ad 
    362  1.3.2.2  ad 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
    363  1.3.2.2  ad 			tx |= rge_txeof(sc);
    364  1.3.2.2  ad 			claimed = 1;
    365  1.3.2.2  ad 		}
    366  1.3.2.2  ad 
    367  1.3.2.2  ad 		if (status & RGE_ISR_SYSTEM_ERR) {
    368  1.3.2.2  ad 			KERNEL_LOCK(1, NULL);
    369  1.3.2.2  ad 			rge_init(ifp);
    370  1.3.2.2  ad 			KERNEL_UNLOCK_ONE(NULL);
    371  1.3.2.2  ad 			claimed = 1;
    372  1.3.2.2  ad 		}
    373  1.3.2.2  ad 	}
    374  1.3.2.2  ad 
    375  1.3.2.2  ad 	if (sc->rge_timerintr) {
    376  1.3.2.2  ad 		if ((tx | rx) == 0) {
    377  1.3.2.2  ad 			/*
    378  1.3.2.2  ad 			 * Nothing needs to be processed, fallback
    379  1.3.2.2  ad 			 * to use TX/RX interrupts.
    380  1.3.2.2  ad 			 */
    381  1.3.2.2  ad 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
    382  1.3.2.2  ad 
    383  1.3.2.2  ad 			/*
    384  1.3.2.2  ad 			 * Recollect, mainly to avoid the possible
    385  1.3.2.2  ad 			 * race introduced by changing interrupt
    386  1.3.2.2  ad 			 * masks.
    387  1.3.2.2  ad 			 */
    388  1.3.2.2  ad 			rge_rxeof(sc);
    389  1.3.2.2  ad 			rge_txeof(sc);
    390  1.3.2.2  ad 		} else
    391  1.3.2.2  ad 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
    392  1.3.2.2  ad 	} else if (tx | rx) {
    393  1.3.2.2  ad 		/*
    394  1.3.2.2  ad 		 * Assume that using simulated interrupt moderation
    395  1.3.2.2  ad 		 * (hardware timer based) could reduce the interrupt
    396  1.3.2.2  ad 		 * rate.
    397  1.3.2.2  ad 		 */
    398  1.3.2.2  ad 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
    399  1.3.2.2  ad 	}
    400  1.3.2.2  ad 
    401  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
    402  1.3.2.2  ad 
    403  1.3.2.2  ad 	return (claimed);
    404  1.3.2.2  ad }
    405  1.3.2.2  ad 
    406  1.3.2.2  ad int
    407  1.3.2.2  ad rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
    408  1.3.2.2  ad {
    409  1.3.2.2  ad 	struct rge_tx_desc *d = NULL;
    410  1.3.2.2  ad 	struct rge_txq *txq;
    411  1.3.2.2  ad 	bus_dmamap_t txmap;
    412  1.3.2.2  ad 	uint32_t cmdsts, cflags = 0;
    413  1.3.2.2  ad 	int cur, error, i, last, nsegs;
    414  1.3.2.2  ad 
    415  1.3.2.2  ad 	/*
    416  1.3.2.2  ad 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
    417  1.3.2.2  ad 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
    418  1.3.2.2  ad 	 * take affect.
    419  1.3.2.2  ad 	 */
    420  1.3.2.2  ad 	if ((m->m_pkthdr.csum_flags &
    421  1.3.2.2  ad 	    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
    422  1.3.2.2  ad 		cflags |= RGE_TDEXTSTS_IPCSUM;
    423  1.3.2.2  ad 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
    424  1.3.2.2  ad 			cflags |= RGE_TDEXTSTS_TCPCSUM;
    425  1.3.2.2  ad 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
    426  1.3.2.2  ad 			cflags |= RGE_TDEXTSTS_UDPCSUM;
    427  1.3.2.2  ad 	}
    428  1.3.2.2  ad 
    429  1.3.2.2  ad 	txq = &sc->rge_ldata.rge_txq[idx];
    430  1.3.2.2  ad 	txmap = txq->txq_dmamap;
    431  1.3.2.2  ad 
    432  1.3.2.2  ad 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
    433  1.3.2.2  ad 	switch (error) {
    434  1.3.2.2  ad 	case 0:
    435  1.3.2.2  ad 		break;
    436  1.3.2.2  ad 	case EFBIG: /* mbuf chain is too fragmented */
    437  1.3.2.2  ad 		if (m_defrag(m, M_DONTWAIT) == 0 &&
    438  1.3.2.2  ad 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
    439  1.3.2.2  ad 		    BUS_DMA_NOWAIT) == 0)
    440  1.3.2.2  ad 			break;
    441  1.3.2.2  ad 
    442  1.3.2.2  ad 		/* FALLTHROUGH */
    443  1.3.2.2  ad 	default:
    444  1.3.2.2  ad 		return (0);
    445  1.3.2.2  ad 	}
    446  1.3.2.2  ad 
    447  1.3.2.2  ad 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
    448  1.3.2.2  ad 	    BUS_DMASYNC_PREWRITE);
    449  1.3.2.2  ad 
    450  1.3.2.2  ad 	nsegs = txmap->dm_nsegs;
    451  1.3.2.2  ad 
    452  1.3.2.2  ad 	/* Set up hardware VLAN tagging. */
    453  1.3.2.2  ad #if NVLAN > 0
    454  1.3.2.2  ad 	if (m->m_flags & M_VLANTAG)
    455  1.3.2.2  ad 		cflags |= swap16(m->m_pkthdr.ether_vtag | RGE_TDEXTSTS_VTAG);
    456  1.3.2.2  ad #endif
    457  1.3.2.2  ad 
    458  1.3.2.2  ad 	cur = idx;
    459  1.3.2.2  ad 	cmdsts = RGE_TDCMDSTS_SOF;
    460  1.3.2.2  ad 
    461  1.3.2.2  ad 	for (i = 0; i < txmap->dm_nsegs; i++) {
    462  1.3.2.2  ad 		d = &sc->rge_ldata.rge_tx_list[cur];
    463  1.3.2.2  ad 
    464  1.3.2.2  ad 		d->rge_extsts = htole32(cflags);
    465  1.3.2.2  ad 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
    466  1.3.2.2  ad 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
    467  1.3.2.2  ad 
    468  1.3.2.2  ad 		cmdsts |= txmap->dm_segs[i].ds_len;
    469  1.3.2.2  ad 
    470  1.3.2.2  ad 		if (cur == RGE_TX_LIST_CNT - 1)
    471  1.3.2.2  ad 			cmdsts |= RGE_TDCMDSTS_EOR;
    472  1.3.2.2  ad 
    473  1.3.2.2  ad 		d->rge_cmdsts = htole32(cmdsts);
    474  1.3.2.2  ad 
    475  1.3.2.2  ad 		last = cur;
    476  1.3.2.2  ad 		cmdsts = RGE_TDCMDSTS_OWN;
    477  1.3.2.2  ad 		cur = RGE_NEXT_TX_DESC(cur);
    478  1.3.2.2  ad 	}
    479  1.3.2.2  ad 
    480  1.3.2.2  ad 	/* Set EOF on the last descriptor. */
    481  1.3.2.2  ad 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
    482  1.3.2.2  ad 
    483  1.3.2.2  ad 	/* Transfer ownership of packet to the chip. */
    484  1.3.2.2  ad 	d = &sc->rge_ldata.rge_tx_list[idx];
    485  1.3.2.2  ad 
    486  1.3.2.2  ad 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
    487  1.3.2.2  ad 
    488  1.3.2.2  ad 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
    489  1.3.2.2  ad 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
    490  1.3.2.2  ad 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    491  1.3.2.2  ad 
    492  1.3.2.2  ad 	/* Update info of TX queue and descriptors. */
    493  1.3.2.2  ad 	txq->txq_mbuf = m;
    494  1.3.2.2  ad 	txq->txq_descidx = last;
    495  1.3.2.2  ad 
    496  1.3.2.2  ad 	return (nsegs);
    497  1.3.2.2  ad }
    498  1.3.2.2  ad 
    499  1.3.2.2  ad int
    500  1.3.2.2  ad rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    501  1.3.2.2  ad {
    502  1.3.2.2  ad 	struct rge_softc *sc = ifp->if_softc;
    503  1.3.2.2  ad 	struct ifreq *ifr = (struct ifreq *)data;
    504  1.3.2.2  ad 	int s, error = 0;
    505  1.3.2.2  ad 
    506  1.3.2.2  ad 	s = splnet();
    507  1.3.2.2  ad 
    508  1.3.2.2  ad 	switch (cmd) {
    509  1.3.2.2  ad 	case SIOCSIFADDR:
    510  1.3.2.2  ad 		ifp->if_flags |= IFF_UP;
    511  1.3.2.2  ad 		if (!(ifp->if_flags & IFF_RUNNING))
    512  1.3.2.2  ad 			rge_init(ifp);
    513  1.3.2.2  ad 		break;
    514  1.3.2.2  ad 	case SIOCSIFFLAGS:
    515  1.3.2.2  ad 		if (ifp->if_flags & IFF_UP) {
    516  1.3.2.2  ad 			if (ifp->if_flags & IFF_RUNNING)
    517  1.3.2.2  ad 				error = ENETRESET;
    518  1.3.2.2  ad 			else
    519  1.3.2.2  ad 				rge_init(ifp);
    520  1.3.2.2  ad 		} else {
    521  1.3.2.2  ad 			if (ifp->if_flags & IFF_RUNNING)
    522  1.3.2.2  ad 				rge_stop(ifp);
    523  1.3.2.2  ad 		}
    524  1.3.2.2  ad 		break;
    525  1.3.2.2  ad 	case SIOCGIFMEDIA:
    526  1.3.2.2  ad 	case SIOCSIFMEDIA:
    527  1.3.2.2  ad 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
    528  1.3.2.2  ad 		break;
    529  1.3.2.2  ad 	case SIOCSIFMTU:
    530  1.3.2.2  ad 		if (ifr->ifr_mtu > ifp->if_mtu) {
    531  1.3.2.2  ad 			error = EINVAL;
    532  1.3.2.2  ad 			break;
    533  1.3.2.2  ad 		}
    534  1.3.2.2  ad 		ifp->if_mtu = ifr->ifr_mtu;
    535  1.3.2.2  ad 		break;
    536  1.3.2.2  ad 	default:
    537  1.3.2.2  ad 		error = ether_ioctl(ifp, cmd, data);
    538  1.3.2.2  ad 	}
    539  1.3.2.2  ad 
    540  1.3.2.2  ad 	if (error == ENETRESET) {
    541  1.3.2.2  ad 		if (ifp->if_flags & IFF_RUNNING)
    542  1.3.2.2  ad 			rge_iff(sc);
    543  1.3.2.2  ad 		error = 0;
    544  1.3.2.2  ad 	}
    545  1.3.2.2  ad 
    546  1.3.2.2  ad 	splx(s);
    547  1.3.2.2  ad 	return (error);
    548  1.3.2.2  ad }
    549  1.3.2.2  ad 
    550  1.3.2.2  ad void
    551  1.3.2.2  ad rge_start(struct ifnet *ifp)
    552  1.3.2.2  ad {
    553  1.3.2.2  ad 	struct rge_softc *sc = ifp->if_softc;
    554  1.3.2.2  ad 	struct mbuf *m;
    555  1.3.2.2  ad 	int free, idx, used;
    556  1.3.2.2  ad 	int queued = 0;
    557  1.3.2.2  ad 
    558  1.3.2.2  ad #define LINK_STATE_IS_UP(_s)    \
    559  1.3.2.2  ad 	((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
    560  1.3.2.2  ad 
    561  1.3.2.2  ad 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
    562  1.3.2.2  ad 		ifq_purge(ifq);
    563  1.3.2.2  ad 		return;
    564  1.3.2.2  ad 	}
    565  1.3.2.2  ad 
    566  1.3.2.2  ad 	/* Calculate free space. */
    567  1.3.2.2  ad 	idx = sc->rge_ldata.rge_txq_prodidx;
    568  1.3.2.2  ad 	free = sc->rge_ldata.rge_txq_considx;
    569  1.3.2.2  ad 	if (free <= idx)
    570  1.3.2.2  ad 		free += RGE_TX_LIST_CNT;
    571  1.3.2.2  ad 	free -= idx;
    572  1.3.2.2  ad 
    573  1.3.2.2  ad 	for (;;) {
    574  1.3.2.2  ad 		if (RGE_TX_NSEGS >= free + 2) {
    575  1.3.2.2  ad 			SET(ifp->if_flags, IFF_OACTIVE);
    576  1.3.2.2  ad 			break;
    577  1.3.2.2  ad 		}
    578  1.3.2.2  ad 
    579  1.3.2.2  ad 		IFQ_DEQUEUE(&ifp->if_snd, m);
    580  1.3.2.2  ad 		if (m == NULL)
    581  1.3.2.2  ad 			break;
    582  1.3.2.2  ad 
    583  1.3.2.2  ad 		used = rge_encap(sc, m, idx);
    584  1.3.2.2  ad 		if (used == 0) {
    585  1.3.2.2  ad 			m_freem(m);
    586  1.3.2.2  ad 			continue;
    587  1.3.2.2  ad 		}
    588  1.3.2.2  ad 
    589  1.3.2.2  ad 		KASSERT(used <= free);
    590  1.3.2.2  ad 		free -= used;
    591  1.3.2.2  ad 
    592  1.3.2.2  ad #if NBPFILTER > 0
    593  1.3.2.2  ad 		if (ifp->if_bpf)
    594  1.3.2.2  ad 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
    595  1.3.2.2  ad #endif
    596  1.3.2.2  ad 
    597  1.3.2.2  ad 		idx += used;
    598  1.3.2.2  ad 		if (idx >= RGE_TX_LIST_CNT)
    599  1.3.2.2  ad 			idx -= RGE_TX_LIST_CNT;
    600  1.3.2.2  ad 
    601  1.3.2.2  ad 		queued++;
    602  1.3.2.2  ad 	}
    603  1.3.2.2  ad 
    604  1.3.2.2  ad 	if (queued == 0)
    605  1.3.2.2  ad 		return;
    606  1.3.2.2  ad 
    607  1.3.2.2  ad 	/* Set a timeout in case the chip goes out to lunch. */
    608  1.3.2.2  ad 	ifp->if_timer = 5;
    609  1.3.2.2  ad 
    610  1.3.2.2  ad 	sc->rge_ldata.rge_txq_prodidx = idx;
    611  1.3.2.2  ad 	ifq_serialize(ifq, &sc->sc_task);
    612  1.3.2.2  ad }
    613  1.3.2.2  ad 
    614  1.3.2.2  ad void
    615  1.3.2.2  ad rge_watchdog(struct ifnet *ifp)
    616  1.3.2.2  ad {
    617  1.3.2.2  ad 	struct rge_softc *sc = ifp->if_softc;
    618  1.3.2.2  ad 
    619  1.3.2.2  ad 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
    620  1.3.2.3  ad 	if_statinc(ifp, if_oerrors);
    621  1.3.2.2  ad 
    622  1.3.2.2  ad 	rge_init(ifp);
    623  1.3.2.2  ad }
    624  1.3.2.2  ad 
    625  1.3.2.2  ad int
    626  1.3.2.2  ad rge_init(struct ifnet *ifp)
    627  1.3.2.2  ad {
    628  1.3.2.2  ad 	struct rge_softc *sc = ifp->if_softc;
    629  1.3.2.2  ad 	uint32_t val;
    630  1.3.2.2  ad 	uint16_t max_frame_size;
    631  1.3.2.2  ad 	int i;
    632  1.3.2.2  ad 
    633  1.3.2.2  ad 	rge_stop(ifp);
    634  1.3.2.2  ad 
    635  1.3.2.2  ad 	/* Set MAC address. */
    636  1.3.2.2  ad 	rge_set_macaddr(sc, sc->sc_enaddr);
    637  1.3.2.2  ad 
    638  1.3.2.2  ad 	/* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
    639  1.3.2.2  ad 	if (ifp->if_mtu < ETHERMTU)
    640  1.3.2.2  ad 		max_frame_size = ETHERMTU;
    641  1.3.2.2  ad 	else
    642  1.3.2.2  ad 		max_frame_size = ifp->if_mtu;
    643  1.3.2.2  ad 
    644  1.3.2.2  ad 	max_frame_size += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
    645  1.3.2.2  ad 	    ETHER_CRC_LEN + 1;
    646  1.3.2.2  ad 
    647  1.3.2.2  ad 	if (max_frame_size > RGE_JUMBO_FRAMELEN)
    648  1.3.2.3  ad 		max_frame_size -= 1;
    649  1.3.2.2  ad 
    650  1.3.2.2  ad 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, max_frame_size);
    651  1.3.2.2  ad 
    652  1.3.2.2  ad 	/* Initialize RX descriptors list. */
    653  1.3.2.2  ad 	if (rge_rx_list_init(sc) == ENOBUFS) {
    654  1.3.2.2  ad 		printf("%s: init failed: no memory for RX buffers\n",
    655  1.3.2.2  ad 		    sc->sc_dev.dv_xname);
    656  1.3.2.2  ad 		rge_stop(ifp);
    657  1.3.2.2  ad 		return (ENOBUFS);
    658  1.3.2.2  ad 	}
    659  1.3.2.2  ad 
    660  1.3.2.2  ad 	/* Initialize TX descriptors. */
    661  1.3.2.2  ad 	rge_tx_list_init(sc);
    662  1.3.2.2  ad 
    663  1.3.2.2  ad 	/* Load the addresses of the RX and TX lists into the chip. */
    664  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
    665  1.3.2.2  ad 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    666  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
    667  1.3.2.2  ad 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    668  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
    669  1.3.2.2  ad 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    670  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
    671  1.3.2.2  ad 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    672  1.3.2.2  ad 
    673  1.3.2.2  ad 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    674  1.3.2.2  ad 
    675  1.3.2.2  ad 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
    676  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    677  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    678  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
    679  1.3.2.2  ad 
    680  1.3.2.2  ad 	/* Clear interrupt moderation timer. */
    681  1.3.2.2  ad 	for (i = 0; i < 64; i++)
    682  1.3.2.2  ad 		RGE_WRITE_4(sc, RGE_IM(i), 0);
    683  1.3.2.2  ad 
    684  1.3.2.2  ad 	/* Set the initial RX and TX configurations. */
    685  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
    686  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
    687  1.3.2.2  ad 
    688  1.3.2.2  ad 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
    689  1.3.2.2  ad 	rge_write_csi(sc, 0x70c, val | 0x27000000);
    690  1.3.2.2  ad 
    691  1.3.2.2  ad 	/* Enable hardware optimization function. */
    692  1.3.2.2  ad 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
    693  1.3.2.2  ad 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
    694  1.3.2.2  ad 
    695  1.3.2.2  ad 	RGE_WRITE_2(sc, 0x0382, 0x221b);
    696  1.3.2.2  ad 	RGE_WRITE_1(sc, 0x4500, 0);
    697  1.3.2.2  ad 	RGE_WRITE_2(sc, 0x4800, 0);
    698  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
    699  1.3.2.2  ad 
    700  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
    701  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
    702  1.3.2.2  ad 
    703  1.3.2.2  ad 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
    704  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
    705  1.3.2.2  ad 
    706  1.3.2.2  ad 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
    707  1.3.2.2  ad 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
    708  1.3.2.2  ad 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
    709  1.3.2.2  ad 
    710  1.3.2.2  ad 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
    711  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
    712  1.3.2.2  ad 
    713  1.3.2.2  ad 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
    714  1.3.2.2  ad 
    715  1.3.2.2  ad 	val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
    716  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
    717  1.3.2.2  ad 
    718  1.3.2.2  ad 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
    719  1.3.2.2  ad 
    720  1.3.2.2  ad 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x007f;
    721  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
    722  1.3.2.2  ad 
    723  1.3.2.2  ad 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
    724  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
    725  1.3.2.2  ad 
    726  1.3.2.2  ad 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
    727  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
    728  1.3.2.2  ad 
    729  1.3.2.2  ad 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
    730  1.3.2.2  ad 
    731  1.3.2.2  ad 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
    732  1.3.2.2  ad 
    733  1.3.2.2  ad 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
    734  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
    735  1.3.2.2  ad 
    736  1.3.2.2  ad 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
    737  1.3.2.2  ad 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
    738  1.3.2.2  ad 
    739  1.3.2.2  ad 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
    740  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
    741  1.3.2.2  ad 
    742  1.3.2.2  ad 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
    743  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
    744  1.3.2.2  ad 
    745  1.3.2.3  ad 	RGE_MAC_SETBIT(sc, 0xe84c, 0x00c0);
    746  1.3.2.2  ad 
    747  1.3.2.2  ad 	/* Disable EEE plus. */
    748  1.3.2.2  ad 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
    749  1.3.2.2  ad 
    750  1.3.2.2  ad 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
    751  1.3.2.2  ad 
    752  1.3.2.2  ad 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
    753  1.3.2.2  ad 	DELAY(1);
    754  1.3.2.2  ad 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
    755  1.3.2.2  ad 
    756  1.3.2.2  ad 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
    757  1.3.2.2  ad 
    758  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
    759  1.3.2.2  ad 
    760  1.3.2.2  ad 	if (ifp->if_capabilities & ETHERCAP_VLAN_HWTAGGING)
    761  1.3.2.2  ad 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
    762  1.3.2.2  ad 
    763  1.3.2.2  ad 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
    764  1.3.2.2  ad 
    765  1.3.2.2  ad 	for (i = 0; i < 10; i++) {
    766  1.3.2.2  ad 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
    767  1.3.2.2  ad 			break;
    768  1.3.2.2  ad 		DELAY(1000);
    769  1.3.2.2  ad 	}
    770  1.3.2.2  ad 
    771  1.3.2.2  ad 	/* Disable RXDV gate. */
    772  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
    773  1.3.2.2  ad 	DELAY(2000);
    774  1.3.2.2  ad 
    775  1.3.2.2  ad 	rge_ifmedia_upd(ifp);
    776  1.3.2.2  ad 
    777  1.3.2.2  ad 	/* Enable transmit and receive. */
    778  1.3.2.2  ad 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
    779  1.3.2.2  ad 
    780  1.3.2.2  ad 	/* Program promiscuous mode and multicast filters. */
    781  1.3.2.2  ad 	rge_iff(sc);
    782  1.3.2.2  ad 
    783  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    784  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    785  1.3.2.2  ad 
    786  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    787  1.3.2.2  ad 
    788  1.3.2.2  ad 	/* Enable interrupts. */
    789  1.3.2.2  ad 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
    790  1.3.2.2  ad 
    791  1.3.2.2  ad 	ifp->if_flags |= IFF_RUNNING;
    792  1.3.2.2  ad 	CLR(ifp->if_flags, IFF_OACTIVE);
    793  1.3.2.2  ad 
    794  1.3.2.2  ad 	callout_schedule(&sc->sc_timeout, 1);
    795  1.3.2.2  ad 
    796  1.3.2.2  ad 	return (0);
    797  1.3.2.2  ad }
    798  1.3.2.2  ad 
    799  1.3.2.2  ad /*
    800  1.3.2.2  ad  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
    801  1.3.2.2  ad  */
    802  1.3.2.2  ad void
    803  1.3.2.2  ad rge_stop(struct ifnet *ifp)
    804  1.3.2.2  ad {
    805  1.3.2.2  ad 	struct rge_softc *sc = ifp->if_softc;
    806  1.3.2.2  ad 	int i;
    807  1.3.2.2  ad 
    808  1.3.2.2  ad 	timeout_del(&sc->sc_timeout);
    809  1.3.2.2  ad 
    810  1.3.2.2  ad 	ifp->if_timer = 0;
    811  1.3.2.2  ad 	ifp->if_flags &= ~IFF_RUNNING;
    812  1.3.2.2  ad 	sc->rge_timerintr = 0;
    813  1.3.2.2  ad 
    814  1.3.2.2  ad 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
    815  1.3.2.2  ad 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
    816  1.3.2.2  ad 	    RGE_RXCFG_ERRPKT);
    817  1.3.2.2  ad 
    818  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_IMR, 0);
    819  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_ISR, 0xffffffff);
    820  1.3.2.2  ad 
    821  1.3.2.2  ad 	rge_reset(sc);
    822  1.3.2.2  ad 
    823  1.3.2.2  ad 	intr_barrier(sc->sc_ih);
    824  1.3.2.2  ad 	ifq_barrier(&ifp->if_snd);
    825  1.3.2.2  ad /*	ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
    826  1.3.2.2  ad 
    827  1.3.2.2  ad 	if (sc->rge_head != NULL) {
    828  1.3.2.2  ad 		m_freem(sc->rge_head);
    829  1.3.2.2  ad 		sc->rge_head = sc->rge_tail = NULL;
    830  1.3.2.2  ad 	}
    831  1.3.2.2  ad 
    832  1.3.2.2  ad 	/* Free the TX list buffers. */
    833  1.3.2.2  ad 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
    834  1.3.2.2  ad 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
    835  1.3.2.2  ad 			bus_dmamap_unload(sc->sc_dmat,
    836  1.3.2.2  ad 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
    837  1.3.2.2  ad 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
    838  1.3.2.2  ad 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
    839  1.3.2.2  ad 		}
    840  1.3.2.2  ad 	}
    841  1.3.2.2  ad 
    842  1.3.2.2  ad 	/* Free the RX list buffers. */
    843  1.3.2.2  ad 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
    844  1.3.2.2  ad 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
    845  1.3.2.2  ad 			bus_dmamap_unload(sc->sc_dmat,
    846  1.3.2.2  ad 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
    847  1.3.2.2  ad 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
    848  1.3.2.2  ad 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
    849  1.3.2.2  ad 		}
    850  1.3.2.2  ad 	}
    851  1.3.2.2  ad }
    852  1.3.2.2  ad 
    853  1.3.2.2  ad /*
    854  1.3.2.2  ad  * Set media options.
    855  1.3.2.2  ad  */
    856  1.3.2.2  ad int
    857  1.3.2.2  ad rge_ifmedia_upd(struct ifnet *ifp)
    858  1.3.2.2  ad {
    859  1.3.2.2  ad 	struct rge_softc *sc = ifp->if_softc;
    860  1.3.2.2  ad 	struct ifmedia *ifm = &sc->sc_media;
    861  1.3.2.2  ad 	int anar, gig, val;
    862  1.3.2.2  ad 
    863  1.3.2.2  ad 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    864  1.3.2.2  ad 		return (EINVAL);
    865  1.3.2.2  ad 
    866  1.3.2.2  ad 	/* Disable Gigabit Lite. */
    867  1.3.2.2  ad 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
    868  1.3.2.2  ad 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
    869  1.3.2.2  ad 
    870  1.3.2.2  ad 	val = rge_read_phy_ocp(sc, 0xa5d4);
    871  1.3.2.2  ad 	val &= ~RGE_ADV_2500TFDX;
    872  1.3.2.2  ad 
    873  1.3.2.2  ad 	anar = gig = 0;
    874  1.3.2.2  ad 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
    875  1.3.2.2  ad 	case IFM_AUTO:
    876  1.3.2.2  ad 		anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    877  1.3.2.2  ad 		gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    878  1.3.2.2  ad 		val |= RGE_ADV_2500TFDX;
    879  1.3.2.2  ad 		break;
    880  1.3.2.2  ad 	case IFM_2500_T:
    881  1.3.2.2  ad 		anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    882  1.3.2.2  ad 		gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    883  1.3.2.2  ad 		val |= RGE_ADV_2500TFDX;
    884  1.3.2.2  ad 		ifp->if_baudrate = IF_Mbps(2500);
    885  1.3.2.2  ad 		break;
    886  1.3.2.2  ad 	case IFM_1000_T:
    887  1.3.2.2  ad 		anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    888  1.3.2.2  ad 		gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    889  1.3.2.2  ad 		ifp->if_baudrate = IF_Gbps(1);
    890  1.3.2.2  ad 		break;
    891  1.3.2.2  ad 	case IFM_100_TX:
    892  1.3.2.2  ad 		anar |= ANAR_TX | ANAR_TX_FD;
    893  1.3.2.2  ad 		ifp->if_baudrate = IF_Mbps(100);
    894  1.3.2.2  ad 		break;
    895  1.3.2.2  ad 	case IFM_10_T:
    896  1.3.2.2  ad 		anar |= ANAR_10 | ANAR_10_FD;
    897  1.3.2.2  ad 		ifp->if_baudrate = IF_Mbps(10);
    898  1.3.2.2  ad 		break;
    899  1.3.2.2  ad 	default:
    900  1.3.2.2  ad 		printf("%s: unsupported media type\n", sc->sc_dev.dv_xname);
    901  1.3.2.2  ad 		return (EINVAL);
    902  1.3.2.2  ad 	}
    903  1.3.2.2  ad 
    904  1.3.2.2  ad 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
    905  1.3.2.2  ad 	rge_write_phy(sc, 0, MII_100T2CR, gig);
    906  1.3.2.2  ad 	rge_write_phy_ocp(sc, 0xa5d4, val);
    907  1.3.2.2  ad 	rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG);
    908  1.3.2.2  ad 
    909  1.3.2.2  ad 	return (0);
    910  1.3.2.2  ad }
    911  1.3.2.2  ad 
    912  1.3.2.2  ad /*
    913  1.3.2.2  ad  * Report current media status.
    914  1.3.2.2  ad  */
    915  1.3.2.2  ad void
    916  1.3.2.2  ad rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
    917  1.3.2.2  ad {
    918  1.3.2.2  ad 	struct rge_softc *sc = ifp->if_softc;
    919  1.3.2.2  ad 	uint16_t status = 0;
    920  1.3.2.2  ad 
    921  1.3.2.2  ad 	ifmr->ifm_status = IFM_AVALID;
    922  1.3.2.2  ad 	ifmr->ifm_active = IFM_ETHER;
    923  1.3.2.2  ad 
    924  1.3.2.2  ad 	if (rge_get_link_status(sc)) {
    925  1.3.2.2  ad 		ifmr->ifm_status |= IFM_ACTIVE;
    926  1.3.2.2  ad 
    927  1.3.2.2  ad 		status = RGE_READ_2(sc, RGE_PHYSTAT);
    928  1.3.2.2  ad 		if ((status & RGE_PHYSTAT_FDX) ||
    929  1.3.2.2  ad 		    (status & RGE_PHYSTAT_2500MBPS))
    930  1.3.2.2  ad 			ifmr->ifm_active |= IFM_FDX;
    931  1.3.2.2  ad 		else
    932  1.3.2.2  ad 			ifmr->ifm_active |= IFM_HDX;
    933  1.3.2.2  ad 
    934  1.3.2.2  ad 		if (status & RGE_PHYSTAT_10MBPS)
    935  1.3.2.2  ad 			ifmr->ifm_active |= IFM_10_T;
    936  1.3.2.2  ad 		else if (status & RGE_PHYSTAT_100MBPS)
    937  1.3.2.2  ad 			ifmr->ifm_active |= IFM_100_TX;
    938  1.3.2.2  ad 		else if (status & RGE_PHYSTAT_1000MBPS)
    939  1.3.2.2  ad 			ifmr->ifm_active |= IFM_1000_T;
    940  1.3.2.2  ad 		else if (status & RGE_PHYSTAT_2500MBPS)
    941  1.3.2.2  ad 			ifmr->ifm_active |= IFM_2500_T;
    942  1.3.2.2  ad 	}
    943  1.3.2.2  ad }
    944  1.3.2.2  ad 
    945  1.3.2.3  ad /*
    946  1.3.2.2  ad  * Allocate memory for RX/TX rings.
    947  1.3.2.2  ad  */
    948  1.3.2.2  ad int
    949  1.3.2.2  ad rge_allocmem(struct rge_softc *sc)
    950  1.3.2.2  ad {
    951  1.3.2.2  ad 	int error, i;
    952  1.3.2.2  ad 
    953  1.3.2.2  ad 	/* Allocate DMA'able memory for the TX ring. */
    954  1.3.2.2  ad 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
    955  1.3.2.2  ad 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
    956  1.3.2.2  ad 	if (error) {
    957  1.3.2.2  ad 		printf("%s: can't create TX list map\n", sc->sc_dev.dv_xname);
    958  1.3.2.2  ad 		return (error);
    959  1.3.2.2  ad 	}
    960  1.3.2.2  ad 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
    961  1.3.2.2  ad 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
    962  1.3.2.2  ad 	    BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_ZERO */
    963  1.3.2.2  ad 	if (error) {
    964  1.3.2.2  ad 		printf("%s: can't alloc TX list\n", sc->sc_dev.dv_xname);
    965  1.3.2.2  ad 		return (error);
    966  1.3.2.2  ad 	}
    967  1.3.2.2  ad 
    968  1.3.2.2  ad 	/* Load the map for the TX ring. */
    969  1.3.2.2  ad 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
    970  1.3.2.2  ad 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
    971  1.3.2.3  ad 	    (void **) &sc->rge_ldata.rge_tx_list,
    972  1.3.2.2  ad 	    BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_COHERENT */
    973  1.3.2.2  ad 	if (error) {
    974  1.3.2.2  ad 		printf("%s: can't map TX dma buffers\n", sc->sc_dev.dv_xname);
    975  1.3.2.2  ad 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
    976  1.3.2.2  ad 		    sc->rge_ldata.rge_tx_listnseg);
    977  1.3.2.2  ad 		return (error);
    978  1.3.2.2  ad 	}
    979  1.3.2.2  ad 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
    980  1.3.2.2  ad 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
    981  1.3.2.2  ad 	if (error) {
    982  1.3.2.2  ad 		printf("%s: can't load TX dma map\n", sc->sc_dev.dv_xname);
    983  1.3.2.2  ad 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
    984  1.3.2.2  ad 		bus_dmamem_unmap(sc->sc_dmat,
    985  1.3.2.2  ad 		    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
    986  1.3.2.2  ad 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
    987  1.3.2.2  ad 		    sc->rge_ldata.rge_tx_listnseg);
    988  1.3.2.2  ad 		return (error);
    989  1.3.2.2  ad 	}
    990  1.3.2.2  ad 
    991  1.3.2.2  ad 	/* Create DMA maps for TX buffers. */
    992  1.3.2.2  ad 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
    993  1.3.2.2  ad 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
    994  1.3.2.2  ad 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
    995  1.3.2.2  ad 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
    996  1.3.2.2  ad 		if (error) {
    997  1.3.2.2  ad 			printf("%s: can't create DMA map for TX\n",
    998  1.3.2.2  ad 			    sc->sc_dev.dv_xname);
    999  1.3.2.2  ad 			return (error);
   1000  1.3.2.2  ad 		}
   1001  1.3.2.2  ad 	}
   1002  1.3.2.2  ad 
   1003  1.3.2.2  ad 	/* Allocate DMA'able memory for the RX ring. */
   1004  1.3.2.2  ad 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
   1005  1.3.2.2  ad 	    RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
   1006  1.3.2.2  ad 	if (error) {
   1007  1.3.2.2  ad 		printf("%s: can't create RX list map\n", sc->sc_dev.dv_xname);
   1008  1.3.2.2  ad 		return (error);
   1009  1.3.2.2  ad 	}
   1010  1.3.2.2  ad 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
   1011  1.3.2.2  ad 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
   1012  1.3.2.2  ad 	    BUS_DMA_NOWAIT);  /* XXX OpenBSD adds BUS_DMA_ZERO */
   1013  1.3.2.2  ad 	if (error) {
   1014  1.3.2.2  ad 		printf("%s: can't alloc RX list\n", sc->sc_dev.dv_xname);
   1015  1.3.2.2  ad 		return (error);
   1016  1.3.2.2  ad 	}
   1017  1.3.2.2  ad 
   1018  1.3.2.2  ad 	/* Load the map for the RX ring. */
   1019  1.3.2.2  ad 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1020  1.3.2.2  ad 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
   1021  1.3.2.3  ad 	    (void **) &sc->rge_ldata.rge_rx_list,
   1022  1.3.2.2  ad 	    BUS_DMA_NOWAIT);  /* XXX OpenBSD adds BUS_DMA_COHERENT */
   1023  1.3.2.2  ad 	if (error) {
   1024  1.3.2.2  ad 		printf("%s: can't map RX dma buffers\n", sc->sc_dev.dv_xname);
   1025  1.3.2.2  ad 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1026  1.3.2.2  ad 		    sc->rge_ldata.rge_rx_listnseg);
   1027  1.3.2.2  ad 		return (error);
   1028  1.3.2.2  ad 	}
   1029  1.3.2.2  ad 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1030  1.3.2.2  ad 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
   1031  1.3.2.2  ad 	if (error) {
   1032  1.3.2.2  ad 		printf("%s: can't load RX dma map\n", sc->sc_dev.dv_xname);
   1033  1.3.2.2  ad 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
   1034  1.3.2.2  ad 		bus_dmamem_unmap(sc->sc_dmat,
   1035  1.3.2.2  ad 		    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
   1036  1.3.2.2  ad 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1037  1.3.2.2  ad 		    sc->rge_ldata.rge_rx_listnseg);
   1038  1.3.2.2  ad 		return (error);
   1039  1.3.2.2  ad 	}
   1040  1.3.2.2  ad 
   1041  1.3.2.2  ad 	/* Create DMA maps for RX buffers. */
   1042  1.3.2.2  ad 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1043  1.3.2.2  ad 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
   1044  1.3.2.2  ad 		    RGE_JUMBO_FRAMELEN, 0, 0,
   1045  1.3.2.2  ad 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
   1046  1.3.2.2  ad 		if (error) {
   1047  1.3.2.2  ad 			printf("%s: can't create DMA map for RX\n",
   1048  1.3.2.2  ad 			    sc->sc_dev.dv_xname);
   1049  1.3.2.2  ad 			return (error);
   1050  1.3.2.2  ad 		}
   1051  1.3.2.2  ad 	}
   1052  1.3.2.2  ad 
   1053  1.3.2.2  ad 	return (error);
   1054  1.3.2.2  ad }
   1055  1.3.2.2  ad 
   1056  1.3.2.2  ad /*
   1057  1.3.2.2  ad  * Initialize the RX descriptor and attach an mbuf cluster.
   1058  1.3.2.2  ad  */
   1059  1.3.2.2  ad int
   1060  1.3.2.2  ad rge_newbuf(struct rge_softc *sc, int idx)
   1061  1.3.2.2  ad {
   1062  1.3.2.2  ad 	struct mbuf *m;
   1063  1.3.2.2  ad 	struct rge_rx_desc *r;
   1064  1.3.2.2  ad 	struct rge_rxq *rxq;
   1065  1.3.2.2  ad 	bus_dmamap_t rxmap;
   1066  1.3.2.2  ad 
   1067  1.3.2.2  ad 	m = MCLGETI(NULL, M_DONTWAIT, NULL, RGE_JUMBO_FRAMELEN);
   1068  1.3.2.2  ad 	if (m == NULL)
   1069  1.3.2.2  ad 		return (ENOBUFS);
   1070  1.3.2.2  ad 
   1071  1.3.2.2  ad 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
   1072  1.3.2.2  ad 
   1073  1.3.2.2  ad 	rxq = &sc->rge_ldata.rge_rxq[idx];
   1074  1.3.2.2  ad 	rxmap = rxq->rxq_dmamap;
   1075  1.3.2.2  ad 
   1076  1.3.2.2  ad 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT))
   1077  1.3.2.2  ad 		goto out;
   1078  1.3.2.2  ad 
   1079  1.3.2.2  ad 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
   1080  1.3.2.2  ad 	    BUS_DMASYNC_PREREAD);
   1081  1.3.2.2  ad 
   1082  1.3.2.2  ad 	/* Map the segments into RX descriptors. */
   1083  1.3.2.2  ad 	r = &sc->rge_ldata.rge_rx_list[idx];
   1084  1.3.2.2  ad 
   1085  1.3.2.2  ad 	if (RGE_OWN(r)) {
   1086  1.3.2.2  ad 		printf("%s: tried to map busy RX descriptor\n",
   1087  1.3.2.2  ad 		    sc->sc_dev.dv_xname);
   1088  1.3.2.2  ad 		goto out;
   1089  1.3.2.2  ad 	}
   1090  1.3.2.2  ad 
   1091  1.3.2.2  ad 	rxq->rxq_mbuf = m;
   1092  1.3.2.2  ad 
   1093  1.3.2.2  ad 	r->rge_extsts = 0;
   1094  1.3.2.2  ad 	r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
   1095  1.3.2.2  ad 	r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr));
   1096  1.3.2.2  ad 
   1097  1.3.2.2  ad 	r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
   1098  1.3.2.2  ad 	if (idx == RGE_RX_LIST_CNT - 1)
   1099  1.3.2.2  ad 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
   1100  1.3.2.2  ad 
   1101  1.3.2.2  ad 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
   1102  1.3.2.2  ad 
   1103  1.3.2.2  ad 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1104  1.3.2.2  ad 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1105  1.3.2.2  ad 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1106  1.3.2.2  ad 
   1107  1.3.2.2  ad 	return (0);
   1108  1.3.2.2  ad out:
   1109  1.3.2.2  ad 	if (m != NULL)
   1110  1.3.2.2  ad 		m_freem(m);
   1111  1.3.2.2  ad 	return (ENOMEM);
   1112  1.3.2.2  ad }
   1113  1.3.2.2  ad 
   1114  1.3.2.2  ad void
   1115  1.3.2.2  ad rge_discard_rxbuf(struct rge_softc *sc, int idx)
   1116  1.3.2.2  ad {
   1117  1.3.2.2  ad 	struct rge_rx_desc *r;
   1118  1.3.2.2  ad 
   1119  1.3.2.2  ad 	r = &sc->rge_ldata.rge_rx_list[idx];
   1120  1.3.2.2  ad 
   1121  1.3.2.2  ad 	r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
   1122  1.3.2.2  ad 	r->rge_extsts = 0;
   1123  1.3.2.2  ad 	if (idx == RGE_RX_LIST_CNT - 1)
   1124  1.3.2.2  ad 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
   1125  1.3.2.2  ad 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
   1126  1.3.2.2  ad 
   1127  1.3.2.2  ad 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1128  1.3.2.2  ad 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1129  1.3.2.2  ad 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1130  1.3.2.2  ad }
   1131  1.3.2.2  ad 
   1132  1.3.2.2  ad int
   1133  1.3.2.2  ad rge_rx_list_init(struct rge_softc *sc)
   1134  1.3.2.2  ad {
   1135  1.3.2.2  ad 	int i;
   1136  1.3.2.2  ad 
   1137  1.3.2.2  ad 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
   1138  1.3.2.2  ad 
   1139  1.3.2.2  ad 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1140  1.3.2.2  ad 		sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
   1141  1.3.2.2  ad 		if (rge_newbuf(sc, i) == ENOBUFS)
   1142  1.3.2.2  ad 			return (ENOBUFS);
   1143  1.3.2.2  ad 	}
   1144  1.3.2.2  ad 
   1145  1.3.2.2  ad 	sc->rge_ldata.rge_rxq_prodidx = 0;
   1146  1.3.2.2  ad 	sc->rge_head = sc->rge_tail = NULL;
   1147  1.3.2.2  ad 
   1148  1.3.2.2  ad 	return (0);
   1149  1.3.2.2  ad }
   1150  1.3.2.2  ad 
   1151  1.3.2.2  ad void
   1152  1.3.2.2  ad rge_tx_list_init(struct rge_softc *sc)
   1153  1.3.2.2  ad {
   1154  1.3.2.2  ad 	int i;
   1155  1.3.2.2  ad 
   1156  1.3.2.2  ad 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
   1157  1.3.2.2  ad 
   1158  1.3.2.2  ad 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
   1159  1.3.2.2  ad 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
   1160  1.3.2.2  ad 
   1161  1.3.2.2  ad 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
   1162  1.3.2.2  ad 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
   1163  1.3.2.2  ad 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1164  1.3.2.2  ad 
   1165  1.3.2.2  ad 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
   1166  1.3.2.2  ad }
   1167  1.3.2.2  ad 
   1168  1.3.2.2  ad int
   1169  1.3.2.2  ad rge_rxeof(struct rge_softc *sc)
   1170  1.3.2.2  ad {
   1171  1.3.2.2  ad 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
   1172  1.3.2.2  ad 	struct mbuf *m;
   1173  1.3.2.2  ad 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1174  1.3.2.2  ad 	struct rge_rx_desc *cur_rx;
   1175  1.3.2.2  ad 	struct rge_rxq *rxq;
   1176  1.3.2.2  ad 	uint32_t rxstat, extsts;
   1177  1.3.2.2  ad 	int i, total_len, rx = 0;
   1178  1.3.2.2  ad 
   1179  1.3.2.2  ad 	for (i = sc->rge_ldata.rge_rxq_prodidx; ; i = RGE_NEXT_RX_DESC(i)) {
   1180  1.3.2.2  ad 		/* Invalidate the descriptor memory. */
   1181  1.3.2.2  ad 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1182  1.3.2.2  ad 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1183  1.3.2.2  ad 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1184  1.3.2.2  ad 
   1185  1.3.2.2  ad 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
   1186  1.3.2.2  ad 
   1187  1.3.2.2  ad 		if (RGE_OWN(cur_rx))
   1188  1.3.2.2  ad 			break;
   1189  1.3.2.2  ad 
   1190  1.3.2.2  ad 		rxstat = letoh32(cur_rx->rge_cmdsts);
   1191  1.3.2.2  ad 		extsts = letoh32(cur_rx->rge_extsts);
   1192  1.3.2.3  ad 
   1193  1.3.2.2  ad 		total_len = RGE_RXBYTES(cur_rx);
   1194  1.3.2.2  ad 		rxq = &sc->rge_ldata.rge_rxq[i];
   1195  1.3.2.2  ad 		m = rxq->rxq_mbuf;
   1196  1.3.2.2  ad 		rx = 1;
   1197  1.3.2.2  ad 
   1198  1.3.2.2  ad 		/* Invalidate the RX mbuf and unload its map. */
   1199  1.3.2.2  ad 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
   1200  1.3.2.2  ad 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1201  1.3.2.2  ad 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
   1202  1.3.2.2  ad 
   1203  1.3.2.2  ad 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
   1204  1.3.2.2  ad 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
   1205  1.3.2.2  ad 			rge_discard_rxbuf(sc, i);
   1206  1.3.2.2  ad 			continue;
   1207  1.3.2.2  ad 		}
   1208  1.3.2.2  ad 
   1209  1.3.2.2  ad 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
   1210  1.3.2.3  ad 			if_statinc(ifp, if_ierrors);
   1211  1.3.2.2  ad 			/*
   1212  1.3.2.2  ad 			 * If this is part of a multi-fragment packet,
   1213  1.3.2.2  ad 			 * discard all the pieces.
   1214  1.3.2.2  ad 			 */
   1215  1.3.2.2  ad 			 if (sc->rge_head != NULL) {
   1216  1.3.2.2  ad 				m_freem(sc->rge_head);
   1217  1.3.2.2  ad 				sc->rge_head = sc->rge_tail = NULL;
   1218  1.3.2.2  ad 			}
   1219  1.3.2.2  ad 			rge_discard_rxbuf(sc, i);
   1220  1.3.2.2  ad 			continue;
   1221  1.3.2.2  ad 		}
   1222  1.3.2.2  ad 
   1223  1.3.2.2  ad 		/*
   1224  1.3.2.2  ad 		 * If allocating a replacement mbuf fails,
   1225  1.3.2.2  ad 		 * reload the current one.
   1226  1.3.2.2  ad 		 */
   1227  1.3.2.2  ad 
   1228  1.3.2.2  ad 		if (rge_newbuf(sc, i) == ENOBUFS) {
   1229  1.3.2.2  ad 			if (sc->rge_head != NULL) {
   1230  1.3.2.2  ad 				m_freem(sc->rge_head);
   1231  1.3.2.2  ad 				sc->rge_head = sc->rge_tail = NULL;
   1232  1.3.2.2  ad 			}
   1233  1.3.2.2  ad 			rge_discard_rxbuf(sc, i);
   1234  1.3.2.2  ad 			continue;
   1235  1.3.2.2  ad 		}
   1236  1.3.2.2  ad 
   1237  1.3.2.2  ad 		if (sc->rge_head != NULL) {
   1238  1.3.2.2  ad 			m->m_len = total_len;
   1239  1.3.2.2  ad 			/*
   1240  1.3.2.2  ad 			 * Special case: if there's 4 bytes or less
   1241  1.3.2.2  ad 			 * in this buffer, the mbuf can be discarded:
   1242  1.3.2.2  ad 			 * the last 4 bytes is the CRC, which we don't
   1243  1.3.2.2  ad 			 * care about anyway.
   1244  1.3.2.2  ad 			 */
   1245  1.3.2.2  ad 			if (m->m_len <= ETHER_CRC_LEN) {
   1246  1.3.2.2  ad 				sc->rge_tail->m_len -=
   1247  1.3.2.2  ad 				    (ETHER_CRC_LEN - m->m_len);
   1248  1.3.2.2  ad 				m_freem(m);
   1249  1.3.2.2  ad 			} else {
   1250  1.3.2.2  ad 				m->m_len -= ETHER_CRC_LEN;
   1251  1.3.2.2  ad 				m->m_flags &= ~M_PKTHDR;
   1252  1.3.2.2  ad 				sc->rge_tail->m_next = m;
   1253  1.3.2.2  ad 			}
   1254  1.3.2.2  ad 			m = sc->rge_head;
   1255  1.3.2.2  ad 			sc->rge_head = sc->rge_tail = NULL;
   1256  1.3.2.2  ad 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
   1257  1.3.2.2  ad 		} else
   1258  1.3.2.2  ad 			m->m_pkthdr.len = m->m_len =
   1259  1.3.2.2  ad 			    (total_len - ETHER_CRC_LEN);
   1260  1.3.2.2  ad 
   1261  1.3.2.2  ad 		/* Check IP header checksum. */
   1262  1.3.2.2  ad 		if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) &&
   1263  1.3.2.2  ad 		    (extsts & RGE_RDEXTSTS_IPV4))
   1264  1.3.2.2  ad 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
   1265  1.3.2.2  ad 
   1266  1.3.2.2  ad 		/* Check TCP/UDP checksum. */
   1267  1.3.2.2  ad 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
   1268  1.3.2.2  ad 		    (((rxstat & RGE_RDCMDSTS_TCPPKT) &&
   1269  1.3.2.2  ad 		    !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) ||
   1270  1.3.2.2  ad 		    ((rxstat & RGE_RDCMDSTS_UDPPKT) &&
   1271  1.3.2.2  ad 		    !(rxstat & RGE_RDCMDSTS_UDPCSUMERR))))
   1272  1.3.2.2  ad 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
   1273  1.3.2.2  ad 			    M_UDP_CSUM_IN_OK;
   1274  1.3.2.2  ad 
   1275  1.3.2.2  ad #if NVLAN > 0
   1276  1.3.2.2  ad 		if (extsts & RGE_RDEXTSTS_VTAG) {
   1277  1.3.2.2  ad 			m->m_pkthdr.ether_vtag =
   1278  1.3.2.2  ad 			    ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
   1279  1.3.2.2  ad 			m->m_flags |= M_VLANTAG;
   1280  1.3.2.2  ad 		}
   1281  1.3.2.2  ad #endif
   1282  1.3.2.2  ad 
   1283  1.3.2.2  ad 		ml_enqueue(&ml, m);
   1284  1.3.2.2  ad 	}
   1285  1.3.2.2  ad 
   1286  1.3.2.2  ad 	sc->rge_ldata.rge_rxq_prodidx = i;
   1287  1.3.2.2  ad 
   1288  1.3.2.2  ad 	if_input(ifp, &ml);
   1289  1.3.2.2  ad 
   1290  1.3.2.2  ad 	return (rx);
   1291  1.3.2.2  ad }
   1292  1.3.2.2  ad 
   1293  1.3.2.2  ad int
   1294  1.3.2.2  ad rge_txeof(struct rge_softc *sc)
   1295  1.3.2.2  ad {
   1296  1.3.2.2  ad 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1297  1.3.2.2  ad 	struct rge_txq *txq;
   1298  1.3.2.2  ad 	uint32_t txstat;
   1299  1.3.2.2  ad 	int cons, idx, prod;
   1300  1.3.2.2  ad 	int free = 0;
   1301  1.3.2.2  ad 
   1302  1.3.2.2  ad 	prod = sc->rge_ldata.rge_txq_prodidx;
   1303  1.3.2.2  ad 	cons = sc->rge_ldata.rge_txq_considx;
   1304  1.3.2.2  ad 
   1305  1.3.2.2  ad 	while (prod != cons) {
   1306  1.3.2.2  ad 		txq = &sc->rge_ldata.rge_txq[cons];
   1307  1.3.2.2  ad 		idx = txq->txq_descidx;
   1308  1.3.2.2  ad 
   1309  1.3.2.2  ad 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1310  1.3.2.2  ad 		    idx * sizeof(struct rge_tx_desc),
   1311  1.3.2.2  ad 		    sizeof(struct rge_tx_desc),
   1312  1.3.2.2  ad 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1313  1.3.2.2  ad 
   1314  1.3.2.2  ad 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
   1315  1.3.2.2  ad 
   1316  1.3.2.2  ad 		if (txstat & RGE_TDCMDSTS_OWN) {
   1317  1.3.2.2  ad 			free = 2;
   1318  1.3.2.2  ad 			break;
   1319  1.3.2.2  ad 		}
   1320  1.3.2.2  ad 
   1321  1.3.2.3  ad 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
   1322  1.3.2.2  ad 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1323  1.3.2.2  ad 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
   1324  1.3.2.2  ad 		m_freem(txq->txq_mbuf);
   1325  1.3.2.2  ad 		txq->txq_mbuf = NULL;
   1326  1.3.2.2  ad 
   1327  1.3.2.2  ad 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
   1328  1.3.2.3  ad 			if_statinc(ifp, if_collisions);
   1329  1.3.2.2  ad 		if (txstat & RGE_TDCMDSTS_TXERR)
   1330  1.3.2.3  ad 			if_statinc(ifp, if_oerrors);
   1331  1.3.2.2  ad 
   1332  1.3.2.2  ad 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1333  1.3.2.2  ad 		    idx * sizeof(struct rge_tx_desc),
   1334  1.3.2.2  ad 		    sizeof(struct rge_tx_desc),
   1335  1.3.2.2  ad 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1336  1.3.2.2  ad 
   1337  1.3.2.2  ad 		cons = RGE_NEXT_TX_DESC(idx);
   1338  1.3.2.2  ad 		free = 1;
   1339  1.3.2.2  ad 	}
   1340  1.3.2.2  ad 
   1341  1.3.2.2  ad 	if (free == 0)
   1342  1.3.2.2  ad 		return (0);
   1343  1.3.2.2  ad 
   1344  1.3.2.2  ad 	sc->rge_ldata.rge_txq_considx = cons;
   1345  1.3.2.2  ad 
   1346  1.3.2.2  ad 	if (ifq_is_oactive(&ifp->if_snd))
   1347  1.3.2.2  ad 		ifq_restart(&ifp->if_snd);
   1348  1.3.2.2  ad 	else if (free == 2)
   1349  1.3.2.2  ad 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
   1350  1.3.2.2  ad 	else
   1351  1.3.2.2  ad 		ifp->if_timer = 0;
   1352  1.3.2.2  ad 
   1353  1.3.2.2  ad 	return (1);
   1354  1.3.2.2  ad }
   1355  1.3.2.2  ad 
   1356  1.3.2.2  ad void
   1357  1.3.2.2  ad rge_reset(struct rge_softc *sc)
   1358  1.3.2.2  ad {
   1359  1.3.2.2  ad 	int i;
   1360  1.3.2.2  ad 
   1361  1.3.2.2  ad 	/* Enable RXDV gate. */
   1362  1.3.2.2  ad 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
   1363  1.3.2.2  ad 	DELAY(2000);
   1364  1.3.2.2  ad 
   1365  1.3.2.2  ad 	for (i = 0; i < 10; i++) {
   1366  1.3.2.2  ad 		DELAY(100);
   1367  1.3.2.2  ad 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
   1368  1.3.2.2  ad 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
   1369  1.3.2.2  ad 		    RGE_MCUCMD_TXFIFO_EMPTY))
   1370  1.3.2.2  ad 			break;
   1371  1.3.2.2  ad 	}
   1372  1.3.2.2  ad 
   1373  1.3.2.2  ad 	/* Soft reset. */
   1374  1.3.2.2  ad 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
   1375  1.3.2.2  ad 
   1376  1.3.2.2  ad 	for (i = 0; i < RGE_TIMEOUT; i++) {
   1377  1.3.2.2  ad 		DELAY(100);
   1378  1.3.2.2  ad 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
   1379  1.3.2.2  ad 			break;
   1380  1.3.2.2  ad 	}
   1381  1.3.2.2  ad 	if (i == RGE_TIMEOUT)
   1382  1.3.2.2  ad 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
   1383  1.3.2.2  ad }
   1384  1.3.2.2  ad 
   1385  1.3.2.2  ad void
   1386  1.3.2.2  ad rge_iff(struct rge_softc *sc)
   1387  1.3.2.2  ad {
   1388  1.3.2.2  ad 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1389  1.3.2.2  ad 	struct ethercom *ac = &sc->sc_ec;
   1390  1.3.2.2  ad 	struct ether_multi *enm;
   1391  1.3.2.2  ad 	struct ether_multistep step;
   1392  1.3.2.2  ad 	uint32_t hashes[2];
   1393  1.3.2.2  ad 	uint32_t rxfilt;
   1394  1.3.2.2  ad 	int h = 0;
   1395  1.3.2.2  ad 
   1396  1.3.2.2  ad 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
   1397  1.3.2.2  ad 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
   1398  1.3.2.2  ad 	ifp->if_flags &= ~IFF_ALLMULTI;
   1399  1.3.2.2  ad 
   1400  1.3.2.2  ad 	/*
   1401  1.3.2.2  ad 	 * Always accept frames destined to our station address.
   1402  1.3.2.2  ad 	 * Always accept broadcast frames.
   1403  1.3.2.2  ad 	 */
   1404  1.3.2.2  ad 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
   1405  1.3.2.2  ad 
   1406  1.3.2.2  ad 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
   1407  1.3.2.2  ad 		ifp->if_flags |= IFF_ALLMULTI;
   1408  1.3.2.2  ad 		rxfilt |= RGE_RXCFG_MULTI;
   1409  1.3.2.2  ad 		if (ifp->if_flags & IFF_PROMISC)
   1410  1.3.2.2  ad 			rxfilt |= RGE_RXCFG_ALLPHYS;
   1411  1.3.2.2  ad 		hashes[0] = hashes[1] = 0xffffffff;
   1412  1.3.2.2  ad 	} else {
   1413  1.3.2.2  ad 		rxfilt |= RGE_RXCFG_MULTI;
   1414  1.3.2.2  ad 		/* Program new filter. */
   1415  1.3.2.2  ad 		memset(hashes, 0, sizeof(hashes));
   1416  1.3.2.2  ad 
   1417  1.3.2.2  ad 		ETHER_FIRST_MULTI(step, ac, enm);
   1418  1.3.2.2  ad 		while (enm != NULL) {
   1419  1.3.2.2  ad 			h = ether_crc32_be(enm->enm_addrlo,
   1420  1.3.2.2  ad 			    ETHER_ADDR_LEN) >> 26;
   1421  1.3.2.2  ad 
   1422  1.3.2.2  ad 			if (h < 32)
   1423  1.3.2.2  ad 				hashes[0] |= (1 << h);
   1424  1.3.2.2  ad 			else
   1425  1.3.2.2  ad 				hashes[1] |= (1 << (h - 32));
   1426  1.3.2.2  ad 
   1427  1.3.2.2  ad 			ETHER_NEXT_MULTI(step, enm);
   1428  1.3.2.2  ad 		}
   1429  1.3.2.2  ad 	}
   1430  1.3.2.2  ad 
   1431  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
   1432  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
   1433  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
   1434  1.3.2.2  ad }
   1435  1.3.2.2  ad 
   1436  1.3.2.2  ad void
   1437  1.3.2.2  ad rge_set_phy_power(struct rge_softc *sc, int on)
   1438  1.3.2.2  ad {
   1439  1.3.2.2  ad 	int i;
   1440  1.3.2.2  ad 
   1441  1.3.2.2  ad 	if (on) {
   1442  1.3.2.2  ad 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
   1443  1.3.2.2  ad 
   1444  1.3.2.2  ad 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
   1445  1.3.2.2  ad 
   1446  1.3.2.2  ad 		for (i = 0; i < RGE_TIMEOUT; i++) {
   1447  1.3.2.2  ad 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0080) == 3)
   1448  1.3.2.2  ad 				break;
   1449  1.3.2.2  ad 			DELAY(1000);
   1450  1.3.2.2  ad 		}
   1451  1.3.2.2  ad 	} else
   1452  1.3.2.2  ad 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
   1453  1.3.2.2  ad }
   1454  1.3.2.2  ad 
   1455  1.3.2.2  ad void
   1456  1.3.2.2  ad rge_phy_config(struct rge_softc *sc)
   1457  1.3.2.2  ad {
   1458  1.3.2.2  ad 	uint16_t mcode_ver, val;
   1459  1.3.2.2  ad 	int i;
   1460  1.3.2.2  ad 	static const uint16_t mac_cfg3_a438_value[] =
   1461  1.3.2.2  ad 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
   1462  1.3.2.2  ad 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
   1463  1.3.2.2  ad 
   1464  1.3.2.2  ad 	static const uint16_t mac_cfg3_b88e_value[] =
   1465  1.3.2.3  ad 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
   1466  1.3.2.2  ad 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
   1467  1.3.2.2  ad 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
   1468  1.3.2.2  ad 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
   1469  1.3.2.2  ad 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
   1470  1.3.2.2  ad 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
   1471  1.3.2.2  ad 
   1472  1.3.2.2  ad 	/* Read microcode version. */
   1473  1.3.2.2  ad 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1474  1.3.2.2  ad 	mcode_ver = rge_read_phy_ocp(sc, 0xa438);
   1475  1.3.2.2  ad 
   1476  1.3.2.2  ad 	if (sc->rge_type == MAC_CFG2) {
   1477  1.3.2.2  ad 		for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++) {
   1478  1.3.2.2  ad 			rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
   1479  1.3.2.2  ad 			    rtl8125_mac_cfg2_ephy[i].val);
   1480  1.3.2.2  ad 		}
   1481  1.3.2.2  ad 
   1482  1.3.2.2  ad 		if (mcode_ver != RGE_MAC_CFG2_MCODE_VER) {
   1483  1.3.2.2  ad 			/* Disable PHY config. */
   1484  1.3.2.2  ad 			RGE_CLRBIT_1(sc, 0xf2, 0x20);
   1485  1.3.2.2  ad 			DELAY(1000);
   1486  1.3.2.2  ad 
   1487  1.3.2.2  ad 			rge_patch_phy_mcu(sc, 1);
   1488  1.3.2.2  ad 
   1489  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1490  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa438, 0x8600);
   1491  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
   1492  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
   1493  1.3.2.2  ad 
   1494  1.3.2.2  ad 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
   1495  1.3.2.2  ad 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
   1496  1.3.2.2  ad 				rge_write_phy_ocp(sc,
   1497  1.3.2.2  ad 				    rtl8125_mac_cfg2_mcu[i].reg,
   1498  1.3.2.2  ad 				    rtl8125_mac_cfg2_mcu[i].val);
   1499  1.3.2.2  ad 			}
   1500  1.3.2.2  ad 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
   1501  1.3.2.2  ad 
   1502  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa436, 0);
   1503  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa438, 0);
   1504  1.3.2.2  ad 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
   1505  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1506  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa438, 0);
   1507  1.3.2.2  ad 
   1508  1.3.2.2  ad 			rge_patch_phy_mcu(sc, 0);
   1509  1.3.2.2  ad 
   1510  1.3.2.2  ad 			/* Enable PHY config. */
   1511  1.3.2.2  ad 			RGE_SETBIT_1(sc, 0xf2, 0x20);
   1512  1.3.2.2  ad 
   1513  1.3.2.2  ad 			/* Write microcode version. */
   1514  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1515  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa438, RGE_MAC_CFG2_MCODE_VER);
   1516  1.3.2.2  ad 		}
   1517  1.3.2.3  ad 
   1518  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
   1519  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
   1520  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   1521  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   1522  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
   1523  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
   1524  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   1525  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
   1526  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
   1527  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
   1528  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
   1529  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
   1530  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
   1531  1.3.2.2  ad 
   1532  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   1533  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1534  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
   1535  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x80eb);
   1536  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
   1537  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
   1538  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x80f8);
   1539  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1540  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
   1541  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x80f1);
   1542  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1543  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
   1544  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x80fe);
   1545  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1546  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
   1547  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x8102);
   1548  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1549  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   1550  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x8105);
   1551  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1552  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
   1553  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x8100);
   1554  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1555  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
   1556  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x8104);
   1557  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1558  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
   1559  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x8106);
   1560  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1561  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
   1562  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x80dc);
   1563  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1564  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
   1565  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x80df);
   1566  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   1567  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x80e1);
   1568  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
   1569  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
   1570  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
   1571  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x819f);
   1572  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
   1573  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xbc34, 0x5555);
   1574  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
   1575  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
   1576  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
   1577  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1578  1.3.2.2  ad 	} else {
   1579  1.3.2.2  ad 		for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
   1580  1.3.2.2  ad 			rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
   1581  1.3.2.2  ad 			    rtl8125_mac_cfg3_ephy[i].val);
   1582  1.3.2.2  ad 
   1583  1.3.2.2  ad 		if (mcode_ver != RGE_MAC_CFG3_MCODE_VER) {
   1584  1.3.2.2  ad 			/* Disable PHY config. */
   1585  1.3.2.2  ad 			RGE_CLRBIT_1(sc, 0xf2, 0x20);
   1586  1.3.2.2  ad 			DELAY(1000);
   1587  1.3.2.2  ad 
   1588  1.3.2.2  ad 			rge_patch_phy_mcu(sc, 1);
   1589  1.3.2.2  ad 
   1590  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1591  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa438, 0x8601);
   1592  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
   1593  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
   1594  1.3.2.2  ad 
   1595  1.3.2.2  ad 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
   1596  1.3.2.2  ad 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
   1597  1.3.2.2  ad 				rge_write_phy_ocp(sc,
   1598  1.3.2.2  ad 				    rtl8125_mac_cfg3_mcu[i].reg,
   1599  1.3.2.2  ad 				    rtl8125_mac_cfg3_mcu[i].val);
   1600  1.3.2.2  ad 			}
   1601  1.3.2.2  ad 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
   1602  1.3.2.2  ad 
   1603  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa436, 0);
   1604  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa438, 0);
   1605  1.3.2.2  ad 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
   1606  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1607  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa438, 0);
   1608  1.3.2.2  ad 
   1609  1.3.2.2  ad 			rge_patch_phy_mcu(sc, 0);
   1610  1.3.2.2  ad 
   1611  1.3.2.2  ad 			/* Enable PHY config. */
   1612  1.3.2.2  ad 			RGE_SETBIT_1(sc, 0xf2, 0x20);
   1613  1.3.2.2  ad 
   1614  1.3.2.2  ad 			/* Write microcode version. */
   1615  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1616  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa438, RGE_MAC_CFG3_MCODE_VER);
   1617  1.3.2.2  ad 		}
   1618  1.3.2.2  ad 
   1619  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   1620  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   1621  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
   1622  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
   1623  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   1624  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
   1625  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
   1626  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
   1627  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
   1628  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
   1629  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
   1630  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
   1631  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
   1632  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
   1633  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
   1634  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
   1635  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
   1636  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
   1637  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
   1638  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
   1639  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
   1640  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
   1641  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   1642  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xb87c, 0x809c);
   1643  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   1644  1.3.2.2  ad 
   1645  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x81b3);
   1646  1.3.2.2  ad 		for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
   1647  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
   1648  1.3.2.2  ad 		for (i = 0; i < 26; i++)
   1649  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa438, 0);
   1650  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x8257);
   1651  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, 0x020f);
   1652  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   1653  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, 0x7843);
   1654  1.3.2.2  ad 
   1655  1.3.2.2  ad 		rge_patch_phy_mcu(sc, 1);
   1656  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
   1657  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
   1658  1.3.2.2  ad 		for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
   1659  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
   1660  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xb890,
   1661  1.3.2.2  ad 			    mac_cfg3_b88e_value[i + 1]);
   1662  1.3.2.2  ad 		}
   1663  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
   1664  1.3.2.2  ad 		rge_patch_phy_mcu(sc, 0);
   1665  1.3.2.2  ad 
   1666  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
   1667  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x81a2);
   1668  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   1669  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
   1670  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
   1671  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
   1672  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
   1673  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
   1674  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
   1675  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1676  1.3.2.2  ad 	}
   1677  1.3.2.2  ad 
   1678  1.3.2.2  ad 	/* Disable EEE. */
   1679  1.3.2.2  ad 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
   1680  1.3.2.2  ad 	RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
   1681  1.3.2.2  ad 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
   1682  1.3.2.2  ad 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
   1683  1.3.2.2  ad 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
   1684  1.3.2.2  ad 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
   1685  1.3.2.2  ad 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
   1686  1.3.2.2  ad 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
   1687  1.3.2.2  ad 
   1688  1.3.2.2  ad 	rge_patch_phy_mcu(sc, 1);
   1689  1.3.2.2  ad 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
   1690  1.3.2.2  ad 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
   1691  1.3.2.2  ad 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
   1692  1.3.2.2  ad 	rge_patch_phy_mcu(sc, 0);
   1693  1.3.2.2  ad }
   1694  1.3.2.2  ad 
   1695  1.3.2.2  ad void
   1696  1.3.2.2  ad rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
   1697  1.3.2.2  ad {
   1698  1.3.2.2  ad 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1699  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_MAC0,
   1700  1.3.2.2  ad 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
   1701  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_MAC4,
   1702  1.3.2.2  ad 	    addr[5] <<  8 | addr[4]);
   1703  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1704  1.3.2.2  ad }
   1705  1.3.2.2  ad 
   1706  1.3.2.2  ad void
   1707  1.3.2.2  ad rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
   1708  1.3.2.2  ad {
   1709  1.3.2.2  ad 	*(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
   1710  1.3.2.2  ad 	*(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
   1711  1.3.2.2  ad }
   1712  1.3.2.2  ad 
   1713  1.3.2.2  ad void
   1714  1.3.2.2  ad rge_hw_init(struct rge_softc *sc)
   1715  1.3.2.2  ad {
   1716  1.3.2.2  ad 	int i;
   1717  1.3.2.2  ad 
   1718  1.3.2.2  ad 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1719  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
   1720  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
   1721  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1722  1.3.2.2  ad 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
   1723  1.3.2.2  ad 
   1724  1.3.2.2  ad 	/* Disable UPS. */
   1725  1.3.2.2  ad 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
   1726  1.3.2.2  ad 
   1727  1.3.2.2  ad 	/* Configure MAC MCU. */
   1728  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xfc38, 0);
   1729  1.3.2.2  ad 
   1730  1.3.2.2  ad 	for (i = 0xfc28; i < 0xfc38; i += 2)
   1731  1.3.2.2  ad 		rge_write_mac_ocp(sc, i, 0);
   1732  1.3.2.2  ad 
   1733  1.3.2.2  ad 	DELAY(3000);
   1734  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xfc26, 0);
   1735  1.3.2.2  ad 
   1736  1.3.2.2  ad 	if (sc->rge_type == MAC_CFG3) {
   1737  1.3.2.2  ad 		for (i = 0; i < nitems(rtl8125_def_bps); i++)
   1738  1.3.2.2  ad 			rge_write_mac_ocp(sc, rtl8125_def_bps[i].reg,
   1739  1.3.2.2  ad 			    rtl8125_def_bps[i].val);
   1740  1.3.2.2  ad 	}
   1741  1.3.2.2  ad 
   1742  1.3.2.2  ad 	/* Disable PHY power saving. */
   1743  1.3.2.2  ad 	rge_disable_phy_ocp_pwrsave(sc);
   1744  1.3.2.2  ad 
   1745  1.3.2.2  ad 	/* Set PCIe uncorrectable error status. */
   1746  1.3.2.2  ad 	rge_write_csi(sc, 0x108,
   1747  1.3.2.2  ad 	    rge_read_csi(sc, 0x108) | 0x00100000);
   1748  1.3.2.2  ad }
   1749  1.3.2.2  ad 
   1750  1.3.2.2  ad void
   1751  1.3.2.2  ad rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
   1752  1.3.2.2  ad {
   1753  1.3.2.2  ad 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
   1754  1.3.2.2  ad 		rge_patch_phy_mcu(sc, 1);
   1755  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xc416, 0);
   1756  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
   1757  1.3.2.2  ad 		rge_patch_phy_mcu(sc, 0);
   1758  1.3.2.2  ad 	}
   1759  1.3.2.2  ad }
   1760  1.3.2.2  ad 
   1761  1.3.2.2  ad void
   1762  1.3.2.2  ad rge_patch_phy_mcu(struct rge_softc *sc, int set)
   1763  1.3.2.2  ad {
   1764  1.3.2.2  ad 	uint16_t val;
   1765  1.3.2.2  ad 	int i;
   1766  1.3.2.2  ad 
   1767  1.3.2.2  ad 	if (set)
   1768  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
   1769  1.3.2.2  ad 	else
   1770  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
   1771  1.3.2.2  ad 
   1772  1.3.2.2  ad 	for (i = 0; i < 1000; i++) {
   1773  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xb800) & 0x0040;
   1774  1.3.2.2  ad 		DELAY(100);
   1775  1.3.2.2  ad 		if (val == 0x0040)
   1776  1.3.2.2  ad 			break;
   1777  1.3.2.2  ad 	}
   1778  1.3.2.2  ad 	if (i == 1000)
   1779  1.3.2.2  ad 		printf("%s: timeout waiting to patch phy mcu\n",
   1780  1.3.2.2  ad 		    sc->sc_dev.dv_xname);
   1781  1.3.2.2  ad }
   1782  1.3.2.2  ad 
   1783  1.3.2.2  ad void
   1784  1.3.2.2  ad rge_add_media_types(struct rge_softc *sc)
   1785  1.3.2.2  ad {
   1786  1.3.2.2  ad 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
   1787  1.3.2.2  ad 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
   1788  1.3.2.2  ad 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
   1789  1.3.2.2  ad 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
   1790  1.3.2.2  ad 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
   1791  1.3.2.2  ad 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
   1792  1.3.2.2  ad 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
   1793  1.3.2.2  ad 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
   1794  1.3.2.2  ad }
   1795  1.3.2.2  ad 
   1796  1.3.2.2  ad void
   1797  1.3.2.2  ad rge_config_imtype(struct rge_softc *sc, int imtype)
   1798  1.3.2.2  ad {
   1799  1.3.2.2  ad 	switch (imtype) {
   1800  1.3.2.2  ad 	case RGE_IMTYPE_NONE:
   1801  1.3.2.2  ad 		sc->rge_intrs = RGE_INTRS;
   1802  1.3.2.2  ad 		sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
   1803  1.3.2.2  ad 		    RGE_ISR_RX_FIFO_OFLOW;
   1804  1.3.2.2  ad 		sc->rge_tx_ack = RGE_ISR_TX_OK;
   1805  1.3.2.2  ad 		break;
   1806  1.3.2.2  ad 	case RGE_IMTYPE_SIM:
   1807  1.3.2.2  ad 		sc->rge_intrs = RGE_INTRS_TIMER;
   1808  1.3.2.2  ad 		sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
   1809  1.3.2.2  ad 		sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
   1810  1.3.2.2  ad 		break;
   1811  1.3.2.2  ad 	default:
   1812  1.3.2.2  ad 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
   1813  1.3.2.2  ad 	}
   1814  1.3.2.2  ad }
   1815  1.3.2.2  ad 
   1816  1.3.2.2  ad void
   1817  1.3.2.2  ad rge_disable_sim_im(struct rge_softc *sc)
   1818  1.3.2.2  ad {
   1819  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_TIMERINT, 0);
   1820  1.3.2.2  ad 	sc->rge_timerintr = 0;
   1821  1.3.2.2  ad }
   1822  1.3.2.2  ad 
   1823  1.3.2.2  ad void
   1824  1.3.2.2  ad rge_setup_sim_im(struct rge_softc *sc)
   1825  1.3.2.2  ad {
   1826  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_TIMERINT, 0x2600);
   1827  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
   1828  1.3.2.2  ad 	sc->rge_timerintr = 1;
   1829  1.3.2.2  ad }
   1830  1.3.2.2  ad 
   1831  1.3.2.2  ad void
   1832  1.3.2.2  ad rge_setup_intr(struct rge_softc *sc, int imtype)
   1833  1.3.2.2  ad {
   1834  1.3.2.2  ad 	rge_config_imtype(sc, imtype);
   1835  1.3.2.2  ad 
   1836  1.3.2.2  ad 	/* Enable interrupts. */
   1837  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
   1838  1.3.2.2  ad 
   1839  1.3.2.2  ad 	switch (imtype) {
   1840  1.3.2.2  ad 	case RGE_IMTYPE_NONE:
   1841  1.3.2.2  ad 		rge_disable_sim_im(sc);
   1842  1.3.2.2  ad 		break;
   1843  1.3.2.2  ad 	case RGE_IMTYPE_SIM:
   1844  1.3.2.2  ad 		rge_setup_sim_im(sc);
   1845  1.3.2.2  ad 		break;
   1846  1.3.2.2  ad 	default:
   1847  1.3.2.2  ad 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
   1848  1.3.2.2  ad 	}
   1849  1.3.2.2  ad }
   1850  1.3.2.2  ad 
   1851  1.3.2.2  ad void
   1852  1.3.2.2  ad rge_exit_oob(struct rge_softc *sc)
   1853  1.3.2.2  ad {
   1854  1.3.2.2  ad 	int i;
   1855  1.3.2.2  ad 
   1856  1.3.2.2  ad 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
   1857  1.3.2.2  ad 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
   1858  1.3.2.2  ad 	    RGE_RXCFG_ERRPKT);
   1859  1.3.2.2  ad 
   1860  1.3.2.2  ad 	/* Disable RealWoW. */
   1861  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
   1862  1.3.2.2  ad 
   1863  1.3.2.2  ad 	rge_reset(sc);
   1864  1.3.2.2  ad 
   1865  1.3.2.2  ad 	/* Disable OOB. */
   1866  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
   1867  1.3.2.2  ad 
   1868  1.3.2.2  ad 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
   1869  1.3.2.2  ad 
   1870  1.3.2.2  ad 	for (i = 0; i < 10; i++) {
   1871  1.3.2.2  ad 		DELAY(100);
   1872  1.3.2.2  ad 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   1873  1.3.2.2  ad 			break;
   1874  1.3.2.2  ad 	}
   1875  1.3.2.2  ad 
   1876  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
   1877  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xc0a6, 0x0150);
   1878  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
   1879  1.3.2.2  ad 
   1880  1.3.2.2  ad 	for (i = 0; i < 10; i++) {
   1881  1.3.2.2  ad 		DELAY(100);
   1882  1.3.2.2  ad 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   1883  1.3.2.2  ad 			break;
   1884  1.3.2.2  ad 	}
   1885  1.3.2.2  ad 
   1886  1.3.2.2  ad 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
   1887  1.3.2.2  ad 		for (i = 0; i < RGE_TIMEOUT; i++) {
   1888  1.3.2.2  ad 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0080) == 2)
   1889  1.3.2.2  ad 				break;
   1890  1.3.2.2  ad 			DELAY(1000);
   1891  1.3.2.2  ad 		}
   1892  1.3.2.2  ad 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
   1893  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
   1894  1.3.2.2  ad 	}
   1895  1.3.2.2  ad }
   1896  1.3.2.2  ad 
   1897  1.3.2.2  ad void
   1898  1.3.2.2  ad rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
   1899  1.3.2.2  ad {
   1900  1.3.2.2  ad 	int i;
   1901  1.3.2.2  ad 
   1902  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_CSIDR, val);
   1903  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_CSIAR, (1 << 16) | (reg & RGE_CSIAR_ADDR_MASK) |
   1904  1.3.2.2  ad 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
   1905  1.3.2.2  ad 
   1906  1.3.2.2  ad 	for (i = 0; i < 10; i++) {
   1907  1.3.2.2  ad 		 DELAY(100);
   1908  1.3.2.2  ad 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
   1909  1.3.2.2  ad 			break;
   1910  1.3.2.2  ad 	}
   1911  1.3.2.2  ad 
   1912  1.3.2.2  ad 	DELAY(20);
   1913  1.3.2.2  ad }
   1914  1.3.2.2  ad 
   1915  1.3.2.2  ad uint32_t
   1916  1.3.2.2  ad rge_read_csi(struct rge_softc *sc, uint32_t reg)
   1917  1.3.2.2  ad {
   1918  1.3.2.2  ad 	int i;
   1919  1.3.2.2  ad 
   1920  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_CSIAR, (1 << 16) | (reg & RGE_CSIAR_ADDR_MASK) |
   1921  1.3.2.2  ad 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
   1922  1.3.2.2  ad 
   1923  1.3.2.2  ad 	for (i = 0; i < 10; i++) {
   1924  1.3.2.2  ad 		 DELAY(100);
   1925  1.3.2.2  ad 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
   1926  1.3.2.2  ad 			break;
   1927  1.3.2.2  ad 	}
   1928  1.3.2.2  ad 
   1929  1.3.2.2  ad 	DELAY(20);
   1930  1.3.2.2  ad 
   1931  1.3.2.2  ad 	return (RGE_READ_4(sc, RGE_CSIDR));
   1932  1.3.2.2  ad }
   1933  1.3.2.2  ad 
   1934  1.3.2.2  ad void
   1935  1.3.2.2  ad rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   1936  1.3.2.2  ad {
   1937  1.3.2.2  ad 	uint32_t tmp;
   1938  1.3.2.2  ad 
   1939  1.3.2.2  ad 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   1940  1.3.2.2  ad 	tmp += val;
   1941  1.3.2.2  ad 	tmp |= RGE_MACOCP_BUSY;
   1942  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
   1943  1.3.2.2  ad }
   1944  1.3.2.2  ad 
   1945  1.3.2.2  ad uint16_t
   1946  1.3.2.2  ad rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
   1947  1.3.2.2  ad {
   1948  1.3.2.2  ad 	uint32_t val;
   1949  1.3.2.2  ad 
   1950  1.3.2.2  ad 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   1951  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_MACOCP, val);
   1952  1.3.2.2  ad 
   1953  1.3.2.2  ad 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
   1954  1.3.2.2  ad }
   1955  1.3.2.2  ad 
   1956  1.3.2.2  ad void
   1957  1.3.2.2  ad rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
   1958  1.3.2.2  ad {
   1959  1.3.2.2  ad 	uint32_t tmp;
   1960  1.3.2.2  ad 	int i;
   1961  1.3.2.2  ad 
   1962  1.3.2.2  ad 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
   1963  1.3.2.2  ad 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
   1964  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
   1965  1.3.2.2  ad 
   1966  1.3.2.2  ad 	for (i = 0; i < 10; i++) {
   1967  1.3.2.2  ad 		DELAY(100);
   1968  1.3.2.2  ad 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
   1969  1.3.2.2  ad 			break;
   1970  1.3.2.2  ad 	}
   1971  1.3.2.2  ad 
   1972  1.3.2.2  ad 	DELAY(20);
   1973  1.3.2.2  ad }
   1974  1.3.2.2  ad 
   1975  1.3.2.2  ad void
   1976  1.3.2.2  ad rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
   1977  1.3.2.2  ad {
   1978  1.3.2.2  ad 	uint16_t off, phyaddr;
   1979  1.3.2.2  ad 
   1980  1.3.2.2  ad 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
   1981  1.3.2.2  ad 	phyaddr <<= 4;
   1982  1.3.2.2  ad 
   1983  1.3.2.2  ad 	off = addr ? reg : 0x10 + (reg % 8);
   1984  1.3.2.2  ad 
   1985  1.3.2.2  ad 	phyaddr += (off - 16) << 1;
   1986  1.3.2.2  ad 
   1987  1.3.2.2  ad 	rge_write_phy_ocp(sc, phyaddr, val);
   1988  1.3.2.2  ad }
   1989  1.3.2.2  ad 
   1990  1.3.2.2  ad void
   1991  1.3.2.2  ad rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   1992  1.3.2.2  ad {
   1993  1.3.2.2  ad 	uint32_t tmp;
   1994  1.3.2.2  ad 	int i;
   1995  1.3.2.2  ad 
   1996  1.3.2.2  ad 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   1997  1.3.2.2  ad 	tmp |= RGE_PHYOCP_BUSY | val;
   1998  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
   1999  1.3.2.2  ad 
   2000  1.3.2.2  ad 	for (i = 0; i < RGE_TIMEOUT; i++) {
   2001  1.3.2.2  ad 		DELAY(1);
   2002  1.3.2.2  ad 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
   2003  1.3.2.2  ad 			break;
   2004  1.3.2.2  ad 	}
   2005  1.3.2.2  ad }
   2006  1.3.2.2  ad 
   2007  1.3.2.2  ad uint16_t
   2008  1.3.2.2  ad rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
   2009  1.3.2.2  ad {
   2010  1.3.2.2  ad 	uint32_t val;
   2011  1.3.2.2  ad 	int i;
   2012  1.3.2.2  ad 
   2013  1.3.2.2  ad 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   2014  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
   2015  1.3.2.2  ad 
   2016  1.3.2.2  ad 	for (i = 0; i < RGE_TIMEOUT; i++) {
   2017  1.3.2.2  ad 		DELAY(1);
   2018  1.3.2.2  ad 		val = RGE_READ_4(sc, RGE_PHYOCP);
   2019  1.3.2.2  ad 		if (val & RGE_PHYOCP_BUSY)
   2020  1.3.2.2  ad 			break;
   2021  1.3.2.2  ad 	}
   2022  1.3.2.2  ad 
   2023  1.3.2.2  ad 	return (val & RGE_PHYOCP_DATA_MASK);
   2024  1.3.2.2  ad }
   2025  1.3.2.2  ad 
   2026  1.3.2.2  ad int
   2027  1.3.2.2  ad rge_get_link_status(struct rge_softc *sc)
   2028  1.3.2.2  ad {
   2029  1.3.2.2  ad 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
   2030  1.3.2.2  ad }
   2031  1.3.2.2  ad 
   2032  1.3.2.2  ad void
   2033  1.3.2.2  ad rge_txstart(struct work *wk, void *arg)
   2034  1.3.2.2  ad {
   2035  1.3.2.2  ad 	struct rge_softc *sc = arg;
   2036  1.3.2.2  ad 
   2037  1.3.2.2  ad 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
   2038  1.3.2.2  ad }
   2039  1.3.2.2  ad 
   2040  1.3.2.2  ad void
   2041  1.3.2.2  ad rge_tick(void *arg)
   2042  1.3.2.2  ad {
   2043  1.3.2.2  ad 	struct rge_softc *sc = arg;
   2044  1.3.2.2  ad 	int s;
   2045  1.3.2.2  ad 
   2046  1.3.2.2  ad 	s = splnet();
   2047  1.3.2.2  ad 	rge_link_state(sc);
   2048  1.3.2.2  ad 	splx(s);
   2049  1.3.2.2  ad 
   2050  1.3.2.2  ad 	timeout_add_sec(&sc->sc_timeout, 1);
   2051  1.3.2.2  ad }
   2052  1.3.2.2  ad 
   2053  1.3.2.2  ad void
   2054  1.3.2.2  ad rge_link_state(struct rge_softc *sc)
   2055  1.3.2.2  ad {
   2056  1.3.2.2  ad 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2057  1.3.2.2  ad 	int link = LINK_STATE_DOWN;
   2058  1.3.2.2  ad 
   2059  1.3.2.2  ad 	if (rge_get_link_status(sc))
   2060  1.3.2.2  ad 		link = LINK_STATE_UP;
   2061  1.3.2.2  ad 
   2062  1.3.2.2  ad 	if (ifp->if_link_state != link) {
   2063  1.3.2.2  ad 		ifp->if_link_state = link;
   2064  1.3.2.2  ad 		if_link_state_change(ifp, LINK_STATE_DOWN);
   2065  1.3.2.2  ad 	}
   2066  1.3.2.2  ad }
   2067