Home | History | Annotate | Line # | Download | only in pci
if_rge.c revision 1.3.2.2
      1  1.3.2.2  ad /*	$NetBSD: if_rge.c,v 1.3.2.2 2020/01/17 21:47:31 ad Exp $	*/
      2  1.3.2.2  ad /*	$OpenBSD: if_rge.c,v 1.2 2020/01/02 09:00:45 kevlo Exp $	*/
      3  1.3.2.2  ad 
      4  1.3.2.2  ad /*
      5  1.3.2.2  ad  * Copyright (c) 2019 Kevin Lo <kevlo (at) openbsd.org>
      6  1.3.2.2  ad  *
      7  1.3.2.2  ad  * Permission to use, copy, modify, and distribute this software for any
      8  1.3.2.2  ad  * purpose with or without fee is hereby granted, provided that the above
      9  1.3.2.2  ad  * copyright notice and this permission notice appear in all copies.
     10  1.3.2.2  ad  *
     11  1.3.2.2  ad  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     12  1.3.2.2  ad  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     13  1.3.2.2  ad  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     14  1.3.2.2  ad  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     15  1.3.2.2  ad  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     16  1.3.2.2  ad  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     17  1.3.2.2  ad  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     18  1.3.2.2  ad  */
     19  1.3.2.2  ad 
     20  1.3.2.2  ad #include <sys/cdefs.h>
     21  1.3.2.2  ad __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.3.2.2 2020/01/17 21:47:31 ad Exp $");
     22  1.3.2.2  ad 
     23  1.3.2.2  ad /* #include "bpfilter.h" Sevan */
     24  1.3.2.2  ad /* #include "vlan.h" Sevan */
     25  1.3.2.2  ad 
     26  1.3.2.2  ad #include <sys/types.h>
     27  1.3.2.2  ad 
     28  1.3.2.2  ad #include <sys/param.h>
     29  1.3.2.2  ad #include <sys/systm.h>
     30  1.3.2.2  ad #include <sys/sockio.h>
     31  1.3.2.2  ad #include <sys/mbuf.h>
     32  1.3.2.2  ad #include <sys/malloc.h>
     33  1.3.2.2  ad #include <sys/kernel.h>
     34  1.3.2.2  ad #include <sys/socket.h>
     35  1.3.2.2  ad #include <sys/device.h>
     36  1.3.2.2  ad #include <sys/endian.h>
     37  1.3.2.2  ad #include <sys/callout.h>
     38  1.3.2.2  ad #include <sys/workqueue.h>
     39  1.3.2.2  ad 
     40  1.3.2.2  ad #include <net/if.h>
     41  1.3.2.2  ad 
     42  1.3.2.2  ad #include <net/if_dl.h>
     43  1.3.2.2  ad #include <net/if_ether.h>
     44  1.3.2.2  ad 
     45  1.3.2.2  ad #include <net/if_media.h>
     46  1.3.2.2  ad 
     47  1.3.2.2  ad #include <netinet/in.h>
     48  1.3.2.2  ad #include <net/if_ether.h>
     49  1.3.2.2  ad 
     50  1.3.2.2  ad #if NBPFILTER > 0
     51  1.3.2.2  ad #include <net/bpf.h>
     52  1.3.2.2  ad #endif
     53  1.3.2.2  ad 
     54  1.3.2.2  ad #include <sys/bus.h>
     55  1.3.2.2  ad #include <machine/intr.h>
     56  1.3.2.2  ad 
     57  1.3.2.2  ad #include <dev/mii/mii.h>
     58  1.3.2.2  ad 
     59  1.3.2.2  ad #include <dev/pci/pcivar.h>
     60  1.3.2.2  ad #include <dev/pci/pcireg.h>
     61  1.3.2.2  ad #include <dev/pci/pcidevs.h>
     62  1.3.2.2  ad 
     63  1.3.2.2  ad #include <dev/pci/if_rgereg.h>
     64  1.3.2.2  ad 
     65  1.3.2.2  ad #ifdef __NetBSD__
     66  1.3.2.2  ad #define letoh32 	htole32
     67  1.3.2.2  ad #define nitems(x) 	__arraycount(x)
     68  1.3.2.2  ad #define MBUF_LIST_INITIALIZER() 	{ NULL, NULL, 0 }
     69  1.3.2.2  ad struct mbuf_list {
     70  1.3.2.2  ad 	struct mbuf 	*ml_head;
     71  1.3.2.2  ad 	struct mbuf 	*ml_tail;
     72  1.3.2.2  ad 	u_int 	ml_len;
     73  1.3.2.2  ad };
     74  1.3.2.2  ad #ifdef NET_MPSAFE
     75  1.3.2.2  ad #define 	RGE_MPSAFE	1
     76  1.3.2.2  ad #define 	CALLOUT_FLAGS	CALLOUT_MPSAFE
     77  1.3.2.2  ad #else
     78  1.3.2.2  ad #define 	CALLOUT_FLAGS	0
     79  1.3.2.2  ad #endif
     80  1.3.2.2  ad #endif
     81  1.3.2.2  ad 
     82  1.3.2.2  ad static int		rge_match(device_t, cfdata_t, void *);
     83  1.3.2.2  ad static void		rge_attach(device_t, device_t, void *);
     84  1.3.2.2  ad int		rge_intr(void *);
     85  1.3.2.2  ad int		rge_encap(struct rge_softc *, struct mbuf *, int);
     86  1.3.2.2  ad int		rge_ioctl(struct ifnet *, u_long, void *);
     87  1.3.2.2  ad void		rge_start(struct ifnet *);
     88  1.3.2.2  ad void		rge_watchdog(struct ifnet *);
     89  1.3.2.2  ad int		rge_init(struct ifnet *);
     90  1.3.2.2  ad void		rge_stop(struct ifnet *);
     91  1.3.2.2  ad int		rge_ifmedia_upd(struct ifnet *);
     92  1.3.2.2  ad void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
     93  1.3.2.2  ad int		rge_allocmem(struct rge_softc *);
     94  1.3.2.2  ad int		rge_newbuf(struct rge_softc *, int);
     95  1.3.2.2  ad void		rge_discard_rxbuf(struct rge_softc *, int);
     96  1.3.2.2  ad int		rge_rx_list_init(struct rge_softc *);
     97  1.3.2.2  ad void		rge_tx_list_init(struct rge_softc *);
     98  1.3.2.2  ad int		rge_rxeof(struct rge_softc *);
     99  1.3.2.2  ad int		rge_txeof(struct rge_softc *);
    100  1.3.2.2  ad void		rge_reset(struct rge_softc *);
    101  1.3.2.2  ad void		rge_iff(struct rge_softc *);
    102  1.3.2.2  ad void		rge_set_phy_power(struct rge_softc *, int);
    103  1.3.2.2  ad void		rge_phy_config(struct rge_softc *);
    104  1.3.2.2  ad void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
    105  1.3.2.2  ad void		rge_get_macaddr(struct rge_softc *, uint8_t *);
    106  1.3.2.2  ad void		rge_hw_init(struct rge_softc *);
    107  1.3.2.2  ad void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
    108  1.3.2.2  ad void		rge_patch_phy_mcu(struct rge_softc *, int);
    109  1.3.2.2  ad void		rge_add_media_types(struct rge_softc *);
    110  1.3.2.2  ad void		rge_config_imtype(struct rge_softc *, int);
    111  1.3.2.2  ad void		rge_disable_sim_im(struct rge_softc *);
    112  1.3.2.2  ad void		rge_setup_sim_im(struct rge_softc *);
    113  1.3.2.2  ad void		rge_setup_intr(struct rge_softc *, int);
    114  1.3.2.2  ad void		rge_exit_oob(struct rge_softc *);
    115  1.3.2.2  ad void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
    116  1.3.2.2  ad uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
    117  1.3.2.2  ad void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
    118  1.3.2.2  ad uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
    119  1.3.2.2  ad void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
    120  1.3.2.2  ad void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
    121  1.3.2.2  ad void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
    122  1.3.2.2  ad uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
    123  1.3.2.2  ad int		rge_get_link_status(struct rge_softc *);
    124  1.3.2.2  ad void		rge_txstart(struct work *, void *);
    125  1.3.2.2  ad void		rge_tick(void *);
    126  1.3.2.2  ad void		rge_link_state(struct rge_softc *);
    127  1.3.2.2  ad 
    128  1.3.2.2  ad static const struct {
    129  1.3.2.2  ad 	uint16_t reg;
    130  1.3.2.2  ad 	uint16_t val;
    131  1.3.2.2  ad }  rtl8125_def_bps[] = {
    132  1.3.2.2  ad 	RTL8125_DEF_BPS
    133  1.3.2.2  ad }, rtl8125_mac_cfg2_ephy[] = {
    134  1.3.2.2  ad 	RTL8125_MAC_CFG2_EPHY
    135  1.3.2.2  ad }, rtl8125_mac_cfg2_mcu[] = {
    136  1.3.2.2  ad 	RTL8125_MAC_CFG2_MCU
    137  1.3.2.2  ad }, rtl8125_mac_cfg3_ephy[] = {
    138  1.3.2.2  ad 	RTL8125_MAC_CFG3_EPHY
    139  1.3.2.2  ad }, rtl8125_mac_cfg3_mcu[] = {
    140  1.3.2.2  ad 	RTL8125_MAC_CFG3_MCU
    141  1.3.2.2  ad };
    142  1.3.2.2  ad 
    143  1.3.2.2  ad CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
    144  1.3.2.2  ad 		NULL, NULL); /* Sevan - detach function? */
    145  1.3.2.2  ad 
    146  1.3.2.2  ad extern struct cfdriver rge_cd;
    147  1.3.2.2  ad 
    148  1.3.2.2  ad static const struct {
    149  1.3.2.2  ad 	pci_vendor_id_t 	vendor;
    150  1.3.2.2  ad 	pci_product_id_t 	product;
    151  1.3.2.2  ad }rge_devices[] = {
    152  1.3.2.2  ad 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
    153  1.3.2.2  ad 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125 },
    154  1.3.2.2  ad };
    155  1.3.2.2  ad 
    156  1.3.2.2  ad static int
    157  1.3.2.2  ad rge_match(device_t parent, cfdata_t match, void *aux)
    158  1.3.2.2  ad {
    159  1.3.2.2  ad 	struct pci_attach_args *pa =aux;
    160  1.3.2.2  ad 	int n;
    161  1.3.2.2  ad 
    162  1.3.2.2  ad 	for (n =0; n < __arraycount(rge_devices); n++) {
    163  1.3.2.2  ad 		if (PCI_VENDOR(pa->pa_id) == rge_devices[n].vendor &&
    164  1.3.2.2  ad 		    PCI_PRODUCT(pa->pa_id) == rge_devices[n].product)
    165  1.3.2.2  ad 			return 1;
    166  1.3.2.2  ad 	}
    167  1.3.2.2  ad 
    168  1.3.2.2  ad 	return 0;
    169  1.3.2.2  ad }
    170  1.3.2.2  ad 
    171  1.3.2.2  ad void
    172  1.3.2.2  ad rge_attach(device_t parent, device_t self, void *aux)
    173  1.3.2.2  ad {
    174  1.3.2.2  ad 	struct rge_softc *sc = (struct rge_softc *)self;
    175  1.3.2.2  ad 	struct pci_attach_args *pa = aux;
    176  1.3.2.2  ad 	pci_chipset_tag_t pc = pa->pa_pc;
    177  1.3.2.2  ad 	pci_intr_handle_t ih;
    178  1.3.2.2  ad 	char intrbuf[PCI_INTRSTR_LEN];
    179  1.3.2.2  ad 	const char *intrstr = NULL;
    180  1.3.2.2  ad 	struct ifnet *ifp;
    181  1.3.2.2  ad 	pcireg_t reg;
    182  1.3.2.2  ad 	uint32_t hwrev;
    183  1.3.2.2  ad 	uint8_t eaddr[ETHER_ADDR_LEN];
    184  1.3.2.2  ad 	int offset;
    185  1.3.2.2  ad 
    186  1.3.2.2  ad 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
    187  1.3.2.2  ad 
    188  1.3.2.2  ad 	/*
    189  1.3.2.2  ad 	 * Map control/status registers.
    190  1.3.2.2  ad 	 */
    191  1.3.2.2  ad 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
    192  1.3.2.2  ad 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
    193  1.3.2.2  ad 	    NULL, &sc->rge_bsize)) {
    194  1.3.2.2  ad 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
    195  1.3.2.2  ad 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
    196  1.3.2.2  ad 		    &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
    197  1.3.2.2  ad 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
    198  1.3.2.2  ad 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
    199  1.3.2.2  ad 			    &sc->rge_bsize)) {
    200  1.3.2.2  ad 				printf(": can't map mem or i/o space\n");
    201  1.3.2.2  ad 				return;
    202  1.3.2.2  ad 			}
    203  1.3.2.2  ad 		}
    204  1.3.2.2  ad 	}
    205  1.3.2.2  ad 
    206  1.3.2.2  ad 	/*
    207  1.3.2.2  ad 	 * Allocate interrupt.
    208  1.3.2.2  ad 	 */
    209  1.3.2.2  ad 	if (pci_intr_map(pa, &ih) == 0)
    210  1.3.2.2  ad 		sc->rge_flags |= RGE_FLAG_MSI;
    211  1.3.2.2  ad 	else if (pci_intr_map(pa, &ih) != 0) {
    212  1.3.2.2  ad 		printf(": couldn't map interrupt\n");
    213  1.3.2.2  ad 		return;
    214  1.3.2.2  ad 	}
    215  1.3.2.2  ad 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
    216  1.3.2.2  ad 	sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, rge_intr,
    217  1.3.2.2  ad 	    sc, sc->sc_dev.dv_xname);
    218  1.3.2.2  ad 	if (sc->sc_ih == NULL) {
    219  1.3.2.2  ad 		printf(": couldn't establish interrupt");
    220  1.3.2.2  ad 		if (intrstr != NULL)
    221  1.3.2.2  ad 			printf(" at %s", intrstr);
    222  1.3.2.2  ad 		printf("\n");
    223  1.3.2.2  ad 		return;
    224  1.3.2.2  ad 	}
    225  1.3.2.2  ad 	printf(": %s", intrstr);
    226  1.3.2.2  ad 
    227  1.3.2.2  ad 	sc->sc_dmat = pa->pa_dmat;
    228  1.3.2.2  ad 	sc->sc_pc = pa->pa_pc;
    229  1.3.2.2  ad 	sc->sc_tag = pa->pa_tag;
    230  1.3.2.2  ad 
    231  1.3.2.2  ad 	/* Determine hardware revision */
    232  1.3.2.2  ad 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
    233  1.3.2.2  ad 	switch (hwrev) {
    234  1.3.2.2  ad 	case 0x60800000:
    235  1.3.2.2  ad 		sc->rge_type = MAC_CFG2;
    236  1.3.2.2  ad 		break;
    237  1.3.2.2  ad 	case 0x60900000:
    238  1.3.2.2  ad 		sc->rge_type = MAC_CFG3;
    239  1.3.2.2  ad 		break;
    240  1.3.2.2  ad 	default:
    241  1.3.2.2  ad 		printf(": unknown version 0x%08x\n", hwrev);
    242  1.3.2.2  ad 		return;
    243  1.3.2.2  ad 	}
    244  1.3.2.2  ad 
    245  1.3.2.2  ad 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
    246  1.3.2.2  ad 
    247  1.3.2.2  ad 	/*
    248  1.3.2.2  ad 	 * PCI Express check.
    249  1.3.2.2  ad 	 */
    250  1.3.2.2  ad 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
    251  1.3.2.2  ad 	    &offset, NULL)) {
    252  1.3.2.2  ad 		/* Disable PCIe ASPM. */
    253  1.3.2.2  ad 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
    254  1.3.2.2  ad 		    offset + PCIE_LCSR);
    255  1.3.2.2  ad 		reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 );
    256  1.3.2.2  ad 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
    257  1.3.2.2  ad 		    reg);
    258  1.3.2.2  ad 	}
    259  1.3.2.2  ad 
    260  1.3.2.2  ad 	rge_exit_oob(sc);
    261  1.3.2.2  ad 	rge_hw_init(sc);
    262  1.3.2.2  ad 
    263  1.3.2.2  ad 	rge_get_macaddr(sc, eaddr);
    264  1.3.2.2  ad 	printf(", address %s\n", ether_sprintf(eaddr));
    265  1.3.2.2  ad 
    266  1.3.2.2  ad 	memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
    267  1.3.2.2  ad 
    268  1.3.2.2  ad 	rge_set_phy_power(sc, 1);
    269  1.3.2.2  ad 	rge_phy_config(sc);
    270  1.3.2.2  ad 
    271  1.3.2.2  ad 	if (rge_allocmem(sc))
    272  1.3.2.2  ad 		return;
    273  1.3.2.2  ad 
    274  1.3.2.2  ad 	ifp = &sc->sc_ec.ec_if;
    275  1.3.2.2  ad 	ifp->if_softc = sc;
    276  1.3.2.2  ad 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
    277  1.3.2.2  ad 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    278  1.3.2.2  ad #ifdef RGE_MPSAFE
    279  1.3.2.2  ad 	ifp->if_xflags = IFEF_MPSAFE;
    280  1.3.2.2  ad #endif
    281  1.3.2.2  ad 	ifp->if_ioctl = rge_ioctl;
    282  1.3.2.2  ad 	ifp->if_start = rge_start;
    283  1.3.2.2  ad 	ifp->if_watchdog = rge_watchdog;
    284  1.3.2.2  ad 	IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT);
    285  1.3.2.2  ad 	ifp->if_mtu = RGE_JUMBO_MTU;
    286  1.3.2.2  ad 
    287  1.3.2.2  ad 	ifp->if_capabilities = ETHERCAP_VLAN_MTU | IFCAP_CSUM_IPv4_Rx |
    288  1.3.2.2  ad 	    IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
    289  1.3.2.2  ad 	    IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
    290  1.3.2.2  ad 
    291  1.3.2.2  ad #if NVLAN > 0
    292  1.3.2.2  ad 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
    293  1.3.2.2  ad #endif
    294  1.3.2.2  ad 
    295  1.3.2.2  ad 	callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
    296  1.3.2.2  ad 	callout_setfunc(&sc->sc_timeout, rge_tick, sc);
    297  1.3.2.2  ad 	rge_txstart(&sc->sc_task, sc);
    298  1.3.2.2  ad 
    299  1.3.2.2  ad 	/* Initialize ifmedia structures. */
    300  1.3.2.2  ad 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
    301  1.3.2.2  ad 	    rge_ifmedia_sts);
    302  1.3.2.2  ad 	rge_add_media_types(sc);
    303  1.3.2.2  ad 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
    304  1.3.2.2  ad 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
    305  1.3.2.2  ad 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
    306  1.3.2.2  ad 
    307  1.3.2.2  ad 	if_attach(ifp);
    308  1.3.2.2  ad 	ether_ifattach(ifp, eaddr);
    309  1.3.2.2  ad }
    310  1.3.2.2  ad 
    311  1.3.2.2  ad int
    312  1.3.2.2  ad rge_intr(void *arg)
    313  1.3.2.2  ad {
    314  1.3.2.2  ad 	struct rge_softc *sc = arg;
    315  1.3.2.2  ad 	struct ifnet *ifp = &sc->sc_ec.ec_if;
    316  1.3.2.2  ad 	uint32_t status;
    317  1.3.2.2  ad 	int claimed = 0, rx, tx;
    318  1.3.2.2  ad 
    319  1.3.2.2  ad 	if (!(ifp->if_flags & IFF_RUNNING))
    320  1.3.2.2  ad 		return (0);
    321  1.3.2.2  ad 
    322  1.3.2.2  ad 	/* Disable interrupts. */
    323  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_IMR, 0);
    324  1.3.2.2  ad 
    325  1.3.2.2  ad 	status = RGE_READ_4(sc, RGE_ISR);
    326  1.3.2.2  ad 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
    327  1.3.2.2  ad 		if ((status & RGE_INTRS) == 0 || status == 0xffffffff)
    328  1.3.2.2  ad 			return (0);
    329  1.3.2.2  ad 	}
    330  1.3.2.2  ad 	if (status)
    331  1.3.2.2  ad 		RGE_WRITE_4(sc, RGE_ISR, status);
    332  1.3.2.2  ad 
    333  1.3.2.2  ad 	if (status & RGE_ISR_PCS_TIMEOUT)
    334  1.3.2.2  ad 		claimed = 1;
    335  1.3.2.2  ad 
    336  1.3.2.2  ad 	rx = tx = 0;
    337  1.3.2.2  ad 	if (status & RGE_INTRS) {
    338  1.3.2.2  ad 		if (status &
    339  1.3.2.2  ad 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
    340  1.3.2.2  ad 			rx |= rge_rxeof(sc);
    341  1.3.2.2  ad 			claimed = 1;
    342  1.3.2.2  ad 		}
    343  1.3.2.2  ad 
    344  1.3.2.2  ad 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
    345  1.3.2.2  ad 			tx |= rge_txeof(sc);
    346  1.3.2.2  ad 			claimed = 1;
    347  1.3.2.2  ad 		}
    348  1.3.2.2  ad 
    349  1.3.2.2  ad 		if (status & RGE_ISR_SYSTEM_ERR) {
    350  1.3.2.2  ad 			KERNEL_LOCK(1, NULL);
    351  1.3.2.2  ad 			rge_init(ifp);
    352  1.3.2.2  ad 			KERNEL_UNLOCK_ONE(NULL);
    353  1.3.2.2  ad 			claimed = 1;
    354  1.3.2.2  ad 		}
    355  1.3.2.2  ad 	}
    356  1.3.2.2  ad 
    357  1.3.2.2  ad 	if (sc->rge_timerintr) {
    358  1.3.2.2  ad 		if ((tx | rx) == 0) {
    359  1.3.2.2  ad 			/*
    360  1.3.2.2  ad 			 * Nothing needs to be processed, fallback
    361  1.3.2.2  ad 			 * to use TX/RX interrupts.
    362  1.3.2.2  ad 			 */
    363  1.3.2.2  ad 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
    364  1.3.2.2  ad 
    365  1.3.2.2  ad 			/*
    366  1.3.2.2  ad 			 * Recollect, mainly to avoid the possible
    367  1.3.2.2  ad 			 * race introduced by changing interrupt
    368  1.3.2.2  ad 			 * masks.
    369  1.3.2.2  ad 			 */
    370  1.3.2.2  ad 			rge_rxeof(sc);
    371  1.3.2.2  ad 			rge_txeof(sc);
    372  1.3.2.2  ad 		} else
    373  1.3.2.2  ad 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
    374  1.3.2.2  ad 	} else if (tx | rx) {
    375  1.3.2.2  ad 		/*
    376  1.3.2.2  ad 		 * Assume that using simulated interrupt moderation
    377  1.3.2.2  ad 		 * (hardware timer based) could reduce the interrupt
    378  1.3.2.2  ad 		 * rate.
    379  1.3.2.2  ad 		 */
    380  1.3.2.2  ad 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
    381  1.3.2.2  ad 	}
    382  1.3.2.2  ad 
    383  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
    384  1.3.2.2  ad 
    385  1.3.2.2  ad 	return (claimed);
    386  1.3.2.2  ad }
    387  1.3.2.2  ad 
    388  1.3.2.2  ad int
    389  1.3.2.2  ad rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
    390  1.3.2.2  ad {
    391  1.3.2.2  ad 	struct rge_tx_desc *d = NULL;
    392  1.3.2.2  ad 	struct rge_txq *txq;
    393  1.3.2.2  ad 	bus_dmamap_t txmap;
    394  1.3.2.2  ad 	uint32_t cmdsts, cflags = 0;
    395  1.3.2.2  ad 	int cur, error, i, last, nsegs;
    396  1.3.2.2  ad 
    397  1.3.2.2  ad 	/*
    398  1.3.2.2  ad 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
    399  1.3.2.2  ad 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
    400  1.3.2.2  ad 	 * take affect.
    401  1.3.2.2  ad 	 */
    402  1.3.2.2  ad 	if ((m->m_pkthdr.csum_flags &
    403  1.3.2.2  ad 	    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
    404  1.3.2.2  ad 		cflags |= RGE_TDEXTSTS_IPCSUM;
    405  1.3.2.2  ad 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
    406  1.3.2.2  ad 			cflags |= RGE_TDEXTSTS_TCPCSUM;
    407  1.3.2.2  ad 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
    408  1.3.2.2  ad 			cflags |= RGE_TDEXTSTS_UDPCSUM;
    409  1.3.2.2  ad 	}
    410  1.3.2.2  ad 
    411  1.3.2.2  ad 	txq = &sc->rge_ldata.rge_txq[idx];
    412  1.3.2.2  ad 	txmap = txq->txq_dmamap;
    413  1.3.2.2  ad 
    414  1.3.2.2  ad 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
    415  1.3.2.2  ad 	switch (error) {
    416  1.3.2.2  ad 	case 0:
    417  1.3.2.2  ad 		break;
    418  1.3.2.2  ad 	case EFBIG: /* mbuf chain is too fragmented */
    419  1.3.2.2  ad 		if (m_defrag(m, M_DONTWAIT) == 0 &&
    420  1.3.2.2  ad 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
    421  1.3.2.2  ad 		    BUS_DMA_NOWAIT) == 0)
    422  1.3.2.2  ad 			break;
    423  1.3.2.2  ad 
    424  1.3.2.2  ad 		/* FALLTHROUGH */
    425  1.3.2.2  ad 	default:
    426  1.3.2.2  ad 		return (0);
    427  1.3.2.2  ad 	}
    428  1.3.2.2  ad 
    429  1.3.2.2  ad 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
    430  1.3.2.2  ad 	    BUS_DMASYNC_PREWRITE);
    431  1.3.2.2  ad 
    432  1.3.2.2  ad 	nsegs = txmap->dm_nsegs;
    433  1.3.2.2  ad 
    434  1.3.2.2  ad 	/* Set up hardware VLAN tagging. */
    435  1.3.2.2  ad #if NVLAN > 0
    436  1.3.2.2  ad 	if (m->m_flags & M_VLANTAG)
    437  1.3.2.2  ad 		cflags |= swap16(m->m_pkthdr.ether_vtag | RGE_TDEXTSTS_VTAG);
    438  1.3.2.2  ad #endif
    439  1.3.2.2  ad 
    440  1.3.2.2  ad 	cur = idx;
    441  1.3.2.2  ad 	cmdsts = RGE_TDCMDSTS_SOF;
    442  1.3.2.2  ad 
    443  1.3.2.2  ad 	for (i = 0; i < txmap->dm_nsegs; i++) {
    444  1.3.2.2  ad 		d = &sc->rge_ldata.rge_tx_list[cur];
    445  1.3.2.2  ad 
    446  1.3.2.2  ad 		d->rge_extsts = htole32(cflags);
    447  1.3.2.2  ad 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
    448  1.3.2.2  ad 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
    449  1.3.2.2  ad 
    450  1.3.2.2  ad 		cmdsts |= txmap->dm_segs[i].ds_len;
    451  1.3.2.2  ad 
    452  1.3.2.2  ad 		if (cur == RGE_TX_LIST_CNT - 1)
    453  1.3.2.2  ad 			cmdsts |= RGE_TDCMDSTS_EOR;
    454  1.3.2.2  ad 
    455  1.3.2.2  ad 		d->rge_cmdsts = htole32(cmdsts);
    456  1.3.2.2  ad 
    457  1.3.2.2  ad 		last = cur;
    458  1.3.2.2  ad 		cmdsts = RGE_TDCMDSTS_OWN;
    459  1.3.2.2  ad 		cur = RGE_NEXT_TX_DESC(cur);
    460  1.3.2.2  ad 	}
    461  1.3.2.2  ad 
    462  1.3.2.2  ad 	/* Set EOF on the last descriptor. */
    463  1.3.2.2  ad 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
    464  1.3.2.2  ad 
    465  1.3.2.2  ad 	/* Transfer ownership of packet to the chip. */
    466  1.3.2.2  ad 	d = &sc->rge_ldata.rge_tx_list[idx];
    467  1.3.2.2  ad 
    468  1.3.2.2  ad 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
    469  1.3.2.2  ad 
    470  1.3.2.2  ad 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
    471  1.3.2.2  ad 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
    472  1.3.2.2  ad 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    473  1.3.2.2  ad 
    474  1.3.2.2  ad 	/* Update info of TX queue and descriptors. */
    475  1.3.2.2  ad 	txq->txq_mbuf = m;
    476  1.3.2.2  ad 	txq->txq_descidx = last;
    477  1.3.2.2  ad 
    478  1.3.2.2  ad 	return (nsegs);
    479  1.3.2.2  ad }
    480  1.3.2.2  ad 
    481  1.3.2.2  ad int
    482  1.3.2.2  ad rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    483  1.3.2.2  ad {
    484  1.3.2.2  ad 	struct rge_softc *sc = ifp->if_softc;
    485  1.3.2.2  ad 	struct ifreq *ifr = (struct ifreq *)data;
    486  1.3.2.2  ad 	int s, error = 0;
    487  1.3.2.2  ad 
    488  1.3.2.2  ad 	s = splnet();
    489  1.3.2.2  ad 
    490  1.3.2.2  ad 	switch (cmd) {
    491  1.3.2.2  ad 	case SIOCSIFADDR:
    492  1.3.2.2  ad 		ifp->if_flags |= IFF_UP;
    493  1.3.2.2  ad 		if (!(ifp->if_flags & IFF_RUNNING))
    494  1.3.2.2  ad 			rge_init(ifp);
    495  1.3.2.2  ad 		break;
    496  1.3.2.2  ad 	case SIOCSIFFLAGS:
    497  1.3.2.2  ad 		if (ifp->if_flags & IFF_UP) {
    498  1.3.2.2  ad 			if (ifp->if_flags & IFF_RUNNING)
    499  1.3.2.2  ad 				error = ENETRESET;
    500  1.3.2.2  ad 			else
    501  1.3.2.2  ad 				rge_init(ifp);
    502  1.3.2.2  ad 		} else {
    503  1.3.2.2  ad 			if (ifp->if_flags & IFF_RUNNING)
    504  1.3.2.2  ad 				rge_stop(ifp);
    505  1.3.2.2  ad 		}
    506  1.3.2.2  ad 		break;
    507  1.3.2.2  ad 	case SIOCGIFMEDIA:
    508  1.3.2.2  ad 	case SIOCSIFMEDIA:
    509  1.3.2.2  ad 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
    510  1.3.2.2  ad 		break;
    511  1.3.2.2  ad 	case SIOCSIFMTU:
    512  1.3.2.2  ad 		if (ifr->ifr_mtu > ifp->if_mtu) {
    513  1.3.2.2  ad 			error = EINVAL;
    514  1.3.2.2  ad 			break;
    515  1.3.2.2  ad 		}
    516  1.3.2.2  ad 		ifp->if_mtu = ifr->ifr_mtu;
    517  1.3.2.2  ad 		break;
    518  1.3.2.2  ad 	default:
    519  1.3.2.2  ad 		error = ether_ioctl(ifp, cmd, data);
    520  1.3.2.2  ad 	}
    521  1.3.2.2  ad 
    522  1.3.2.2  ad 	if (error == ENETRESET) {
    523  1.3.2.2  ad 		if (ifp->if_flags & IFF_RUNNING)
    524  1.3.2.2  ad 			rge_iff(sc);
    525  1.3.2.2  ad 		error = 0;
    526  1.3.2.2  ad 	}
    527  1.3.2.2  ad 
    528  1.3.2.2  ad 	splx(s);
    529  1.3.2.2  ad 	return (error);
    530  1.3.2.2  ad }
    531  1.3.2.2  ad 
    532  1.3.2.2  ad void
    533  1.3.2.2  ad rge_start(struct ifnet *ifp)
    534  1.3.2.2  ad {
    535  1.3.2.2  ad 	struct rge_softc *sc = ifp->if_softc;
    536  1.3.2.2  ad 	struct mbuf *m;
    537  1.3.2.2  ad 	int free, idx, used;
    538  1.3.2.2  ad 	int queued = 0;
    539  1.3.2.2  ad 
    540  1.3.2.2  ad #define LINK_STATE_IS_UP(_s)    \
    541  1.3.2.2  ad 	((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
    542  1.3.2.2  ad 
    543  1.3.2.2  ad 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
    544  1.3.2.2  ad 		ifq_purge(ifq);
    545  1.3.2.2  ad 		return;
    546  1.3.2.2  ad 	}
    547  1.3.2.2  ad 
    548  1.3.2.2  ad 	/* Calculate free space. */
    549  1.3.2.2  ad 	idx = sc->rge_ldata.rge_txq_prodidx;
    550  1.3.2.2  ad 	free = sc->rge_ldata.rge_txq_considx;
    551  1.3.2.2  ad 	if (free <= idx)
    552  1.3.2.2  ad 		free += RGE_TX_LIST_CNT;
    553  1.3.2.2  ad 	free -= idx;
    554  1.3.2.2  ad 
    555  1.3.2.2  ad 	for (;;) {
    556  1.3.2.2  ad 		if (RGE_TX_NSEGS >= free + 2) {
    557  1.3.2.2  ad 			SET(ifp->if_flags, IFF_OACTIVE);
    558  1.3.2.2  ad 			break;
    559  1.3.2.2  ad 		}
    560  1.3.2.2  ad 
    561  1.3.2.2  ad 		IFQ_DEQUEUE(&ifp->if_snd, m);
    562  1.3.2.2  ad 		if (m == NULL)
    563  1.3.2.2  ad 			break;
    564  1.3.2.2  ad 
    565  1.3.2.2  ad 		used = rge_encap(sc, m, idx);
    566  1.3.2.2  ad 		if (used == 0) {
    567  1.3.2.2  ad 			m_freem(m);
    568  1.3.2.2  ad 			continue;
    569  1.3.2.2  ad 		}
    570  1.3.2.2  ad 
    571  1.3.2.2  ad 		KASSERT(used <= free);
    572  1.3.2.2  ad 		free -= used;
    573  1.3.2.2  ad 
    574  1.3.2.2  ad #if NBPFILTER > 0
    575  1.3.2.2  ad 		if (ifp->if_bpf)
    576  1.3.2.2  ad 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
    577  1.3.2.2  ad #endif
    578  1.3.2.2  ad 
    579  1.3.2.2  ad 		idx += used;
    580  1.3.2.2  ad 		if (idx >= RGE_TX_LIST_CNT)
    581  1.3.2.2  ad 			idx -= RGE_TX_LIST_CNT;
    582  1.3.2.2  ad 
    583  1.3.2.2  ad 		queued++;
    584  1.3.2.2  ad 	}
    585  1.3.2.2  ad 
    586  1.3.2.2  ad 	if (queued == 0)
    587  1.3.2.2  ad 		return;
    588  1.3.2.2  ad 
    589  1.3.2.2  ad 	/* Set a timeout in case the chip goes out to lunch. */
    590  1.3.2.2  ad 	ifp->if_timer = 5;
    591  1.3.2.2  ad 
    592  1.3.2.2  ad 	sc->rge_ldata.rge_txq_prodidx = idx;
    593  1.3.2.2  ad 	ifq_serialize(ifq, &sc->sc_task);
    594  1.3.2.2  ad }
    595  1.3.2.2  ad 
    596  1.3.2.2  ad void
    597  1.3.2.2  ad rge_watchdog(struct ifnet *ifp)
    598  1.3.2.2  ad {
    599  1.3.2.2  ad 	struct rge_softc *sc = ifp->if_softc;
    600  1.3.2.2  ad 
    601  1.3.2.2  ad 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
    602  1.3.2.2  ad 	ifp->if_oerrors++;
    603  1.3.2.2  ad 
    604  1.3.2.2  ad 	rge_init(ifp);
    605  1.3.2.2  ad }
    606  1.3.2.2  ad 
    607  1.3.2.2  ad int
    608  1.3.2.2  ad rge_init(struct ifnet *ifp)
    609  1.3.2.2  ad {
    610  1.3.2.2  ad 	struct rge_softc *sc = ifp->if_softc;
    611  1.3.2.2  ad 	uint32_t val;
    612  1.3.2.2  ad 	uint16_t max_frame_size;
    613  1.3.2.2  ad 	int i;
    614  1.3.2.2  ad 
    615  1.3.2.2  ad 	rge_stop(ifp);
    616  1.3.2.2  ad 
    617  1.3.2.2  ad 	/* Set MAC address. */
    618  1.3.2.2  ad 	rge_set_macaddr(sc, sc->sc_enaddr);
    619  1.3.2.2  ad 
    620  1.3.2.2  ad 	/* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
    621  1.3.2.2  ad 	if (ifp->if_mtu < ETHERMTU)
    622  1.3.2.2  ad 		max_frame_size = ETHERMTU;
    623  1.3.2.2  ad 	else
    624  1.3.2.2  ad 		max_frame_size = ifp->if_mtu;
    625  1.3.2.2  ad 
    626  1.3.2.2  ad 	max_frame_size += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
    627  1.3.2.2  ad 	    ETHER_CRC_LEN + 1;
    628  1.3.2.2  ad 
    629  1.3.2.2  ad 	if (max_frame_size > RGE_JUMBO_FRAMELEN)
    630  1.3.2.2  ad 		max_frame_size -= 1;
    631  1.3.2.2  ad 
    632  1.3.2.2  ad 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, max_frame_size);
    633  1.3.2.2  ad 
    634  1.3.2.2  ad 	/* Initialize RX descriptors list. */
    635  1.3.2.2  ad 	if (rge_rx_list_init(sc) == ENOBUFS) {
    636  1.3.2.2  ad 		printf("%s: init failed: no memory for RX buffers\n",
    637  1.3.2.2  ad 		    sc->sc_dev.dv_xname);
    638  1.3.2.2  ad 		rge_stop(ifp);
    639  1.3.2.2  ad 		return (ENOBUFS);
    640  1.3.2.2  ad 	}
    641  1.3.2.2  ad 
    642  1.3.2.2  ad 	/* Initialize TX descriptors. */
    643  1.3.2.2  ad 	rge_tx_list_init(sc);
    644  1.3.2.2  ad 
    645  1.3.2.2  ad 	/* Load the addresses of the RX and TX lists into the chip. */
    646  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
    647  1.3.2.2  ad 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    648  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
    649  1.3.2.2  ad 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    650  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
    651  1.3.2.2  ad 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    652  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
    653  1.3.2.2  ad 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    654  1.3.2.2  ad 
    655  1.3.2.2  ad 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    656  1.3.2.2  ad 
    657  1.3.2.2  ad 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
    658  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    659  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    660  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
    661  1.3.2.2  ad 
    662  1.3.2.2  ad 	/* Clear interrupt moderation timer. */
    663  1.3.2.2  ad 	for (i = 0; i < 64; i++)
    664  1.3.2.2  ad 		RGE_WRITE_4(sc, RGE_IM(i), 0);
    665  1.3.2.2  ad 
    666  1.3.2.2  ad 	/* Set the initial RX and TX configurations. */
    667  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
    668  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
    669  1.3.2.2  ad 
    670  1.3.2.2  ad 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
    671  1.3.2.2  ad 	rge_write_csi(sc, 0x70c, val | 0x27000000);
    672  1.3.2.2  ad 
    673  1.3.2.2  ad 	/* Enable hardware optimization function. */
    674  1.3.2.2  ad 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
    675  1.3.2.2  ad 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
    676  1.3.2.2  ad 
    677  1.3.2.2  ad 	RGE_WRITE_2(sc, 0x0382, 0x221b);
    678  1.3.2.2  ad 	RGE_WRITE_1(sc, 0x4500, 0);
    679  1.3.2.2  ad 	RGE_WRITE_2(sc, 0x4800, 0);
    680  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
    681  1.3.2.2  ad 
    682  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
    683  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
    684  1.3.2.2  ad 
    685  1.3.2.2  ad 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
    686  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
    687  1.3.2.2  ad 
    688  1.3.2.2  ad 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
    689  1.3.2.2  ad 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
    690  1.3.2.2  ad 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
    691  1.3.2.2  ad 
    692  1.3.2.2  ad 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
    693  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
    694  1.3.2.2  ad 
    695  1.3.2.2  ad 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
    696  1.3.2.2  ad 
    697  1.3.2.2  ad 	val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
    698  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
    699  1.3.2.2  ad 
    700  1.3.2.2  ad 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
    701  1.3.2.2  ad 
    702  1.3.2.2  ad 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x007f;
    703  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
    704  1.3.2.2  ad 
    705  1.3.2.2  ad 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
    706  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
    707  1.3.2.2  ad 
    708  1.3.2.2  ad 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
    709  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
    710  1.3.2.2  ad 
    711  1.3.2.2  ad 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
    712  1.3.2.2  ad 
    713  1.3.2.2  ad 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
    714  1.3.2.2  ad 
    715  1.3.2.2  ad 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
    716  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
    717  1.3.2.2  ad 
    718  1.3.2.2  ad 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
    719  1.3.2.2  ad 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
    720  1.3.2.2  ad 
    721  1.3.2.2  ad 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
    722  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
    723  1.3.2.2  ad 
    724  1.3.2.2  ad 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
    725  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
    726  1.3.2.2  ad 
    727  1.3.2.2  ad 	RGE_MAC_SETBIT(sc, 0xe84c, 0x00c0);
    728  1.3.2.2  ad 
    729  1.3.2.2  ad 	/* Disable EEE plus. */
    730  1.3.2.2  ad 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
    731  1.3.2.2  ad 
    732  1.3.2.2  ad 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
    733  1.3.2.2  ad 
    734  1.3.2.2  ad 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
    735  1.3.2.2  ad 	DELAY(1);
    736  1.3.2.2  ad 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
    737  1.3.2.2  ad 
    738  1.3.2.2  ad 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
    739  1.3.2.2  ad 
    740  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
    741  1.3.2.2  ad 
    742  1.3.2.2  ad 	if (ifp->if_capabilities & ETHERCAP_VLAN_HWTAGGING)
    743  1.3.2.2  ad 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
    744  1.3.2.2  ad 
    745  1.3.2.2  ad 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
    746  1.3.2.2  ad 
    747  1.3.2.2  ad 	for (i = 0; i < 10; i++) {
    748  1.3.2.2  ad 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
    749  1.3.2.2  ad 			break;
    750  1.3.2.2  ad 		DELAY(1000);
    751  1.3.2.2  ad 	}
    752  1.3.2.2  ad 
    753  1.3.2.2  ad 	/* Disable RXDV gate. */
    754  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
    755  1.3.2.2  ad 	DELAY(2000);
    756  1.3.2.2  ad 
    757  1.3.2.2  ad 	rge_ifmedia_upd(ifp);
    758  1.3.2.2  ad 
    759  1.3.2.2  ad 	/* Enable transmit and receive. */
    760  1.3.2.2  ad 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
    761  1.3.2.2  ad 
    762  1.3.2.2  ad 	/* Program promiscuous mode and multicast filters. */
    763  1.3.2.2  ad 	rge_iff(sc);
    764  1.3.2.2  ad 
    765  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    766  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    767  1.3.2.2  ad 
    768  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    769  1.3.2.2  ad 
    770  1.3.2.2  ad 	/* Enable interrupts. */
    771  1.3.2.2  ad 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
    772  1.3.2.2  ad 
    773  1.3.2.2  ad 	ifp->if_flags |= IFF_RUNNING;
    774  1.3.2.2  ad 	CLR(ifp->if_flags, IFF_OACTIVE);
    775  1.3.2.2  ad 
    776  1.3.2.2  ad 	callout_schedule(&sc->sc_timeout, 1);
    777  1.3.2.2  ad 
    778  1.3.2.2  ad 	return (0);
    779  1.3.2.2  ad }
    780  1.3.2.2  ad 
    781  1.3.2.2  ad /*
    782  1.3.2.2  ad  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
    783  1.3.2.2  ad  */
    784  1.3.2.2  ad void
    785  1.3.2.2  ad rge_stop(struct ifnet *ifp)
    786  1.3.2.2  ad {
    787  1.3.2.2  ad 	struct rge_softc *sc = ifp->if_softc;
    788  1.3.2.2  ad 	int i;
    789  1.3.2.2  ad 
    790  1.3.2.2  ad 	timeout_del(&sc->sc_timeout);
    791  1.3.2.2  ad 
    792  1.3.2.2  ad 	ifp->if_timer = 0;
    793  1.3.2.2  ad 	ifp->if_flags &= ~IFF_RUNNING;
    794  1.3.2.2  ad 	sc->rge_timerintr = 0;
    795  1.3.2.2  ad 
    796  1.3.2.2  ad 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
    797  1.3.2.2  ad 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
    798  1.3.2.2  ad 	    RGE_RXCFG_ERRPKT);
    799  1.3.2.2  ad 
    800  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_IMR, 0);
    801  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_ISR, 0xffffffff);
    802  1.3.2.2  ad 
    803  1.3.2.2  ad 	rge_reset(sc);
    804  1.3.2.2  ad 
    805  1.3.2.2  ad 	intr_barrier(sc->sc_ih);
    806  1.3.2.2  ad 	ifq_barrier(&ifp->if_snd);
    807  1.3.2.2  ad /*	ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
    808  1.3.2.2  ad 
    809  1.3.2.2  ad 	if (sc->rge_head != NULL) {
    810  1.3.2.2  ad 		m_freem(sc->rge_head);
    811  1.3.2.2  ad 		sc->rge_head = sc->rge_tail = NULL;
    812  1.3.2.2  ad 	}
    813  1.3.2.2  ad 
    814  1.3.2.2  ad 	/* Free the TX list buffers. */
    815  1.3.2.2  ad 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
    816  1.3.2.2  ad 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
    817  1.3.2.2  ad 			bus_dmamap_unload(sc->sc_dmat,
    818  1.3.2.2  ad 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
    819  1.3.2.2  ad 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
    820  1.3.2.2  ad 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
    821  1.3.2.2  ad 		}
    822  1.3.2.2  ad 	}
    823  1.3.2.2  ad 
    824  1.3.2.2  ad 	/* Free the RX list buffers. */
    825  1.3.2.2  ad 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
    826  1.3.2.2  ad 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
    827  1.3.2.2  ad 			bus_dmamap_unload(sc->sc_dmat,
    828  1.3.2.2  ad 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
    829  1.3.2.2  ad 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
    830  1.3.2.2  ad 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
    831  1.3.2.2  ad 		}
    832  1.3.2.2  ad 	}
    833  1.3.2.2  ad }
    834  1.3.2.2  ad 
    835  1.3.2.2  ad /*
    836  1.3.2.2  ad  * Set media options.
    837  1.3.2.2  ad  */
    838  1.3.2.2  ad int
    839  1.3.2.2  ad rge_ifmedia_upd(struct ifnet *ifp)
    840  1.3.2.2  ad {
    841  1.3.2.2  ad 	struct rge_softc *sc = ifp->if_softc;
    842  1.3.2.2  ad 	struct ifmedia *ifm = &sc->sc_media;
    843  1.3.2.2  ad 	int anar, gig, val;
    844  1.3.2.2  ad 
    845  1.3.2.2  ad 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    846  1.3.2.2  ad 		return (EINVAL);
    847  1.3.2.2  ad 
    848  1.3.2.2  ad 	/* Disable Gigabit Lite. */
    849  1.3.2.2  ad 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
    850  1.3.2.2  ad 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
    851  1.3.2.2  ad 
    852  1.3.2.2  ad 	val = rge_read_phy_ocp(sc, 0xa5d4);
    853  1.3.2.2  ad 	val &= ~RGE_ADV_2500TFDX;
    854  1.3.2.2  ad 
    855  1.3.2.2  ad 	anar = gig = 0;
    856  1.3.2.2  ad 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
    857  1.3.2.2  ad 	case IFM_AUTO:
    858  1.3.2.2  ad 		anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    859  1.3.2.2  ad 		gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    860  1.3.2.2  ad 		val |= RGE_ADV_2500TFDX;
    861  1.3.2.2  ad 		break;
    862  1.3.2.2  ad 	case IFM_2500_T:
    863  1.3.2.2  ad 		anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    864  1.3.2.2  ad 		gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    865  1.3.2.2  ad 		val |= RGE_ADV_2500TFDX;
    866  1.3.2.2  ad 		ifp->if_baudrate = IF_Mbps(2500);
    867  1.3.2.2  ad 		break;
    868  1.3.2.2  ad 	case IFM_1000_T:
    869  1.3.2.2  ad 		anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    870  1.3.2.2  ad 		gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    871  1.3.2.2  ad 		ifp->if_baudrate = IF_Gbps(1);
    872  1.3.2.2  ad 		break;
    873  1.3.2.2  ad 	case IFM_100_TX:
    874  1.3.2.2  ad 		anar |= ANAR_TX | ANAR_TX_FD;
    875  1.3.2.2  ad 		ifp->if_baudrate = IF_Mbps(100);
    876  1.3.2.2  ad 		break;
    877  1.3.2.2  ad 	case IFM_10_T:
    878  1.3.2.2  ad 		anar |= ANAR_10 | ANAR_10_FD;
    879  1.3.2.2  ad 		ifp->if_baudrate = IF_Mbps(10);
    880  1.3.2.2  ad 		break;
    881  1.3.2.2  ad 	default:
    882  1.3.2.2  ad 		printf("%s: unsupported media type\n", sc->sc_dev.dv_xname);
    883  1.3.2.2  ad 		return (EINVAL);
    884  1.3.2.2  ad 	}
    885  1.3.2.2  ad 
    886  1.3.2.2  ad 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
    887  1.3.2.2  ad 	rge_write_phy(sc, 0, MII_100T2CR, gig);
    888  1.3.2.2  ad 	rge_write_phy_ocp(sc, 0xa5d4, val);
    889  1.3.2.2  ad 	rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG);
    890  1.3.2.2  ad 
    891  1.3.2.2  ad 	return (0);
    892  1.3.2.2  ad }
    893  1.3.2.2  ad 
    894  1.3.2.2  ad /*
    895  1.3.2.2  ad  * Report current media status.
    896  1.3.2.2  ad  */
    897  1.3.2.2  ad void
    898  1.3.2.2  ad rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
    899  1.3.2.2  ad {
    900  1.3.2.2  ad 	struct rge_softc *sc = ifp->if_softc;
    901  1.3.2.2  ad 	uint16_t status = 0;
    902  1.3.2.2  ad 
    903  1.3.2.2  ad 	ifmr->ifm_status = IFM_AVALID;
    904  1.3.2.2  ad 	ifmr->ifm_active = IFM_ETHER;
    905  1.3.2.2  ad 
    906  1.3.2.2  ad 	if (rge_get_link_status(sc)) {
    907  1.3.2.2  ad 		ifmr->ifm_status |= IFM_ACTIVE;
    908  1.3.2.2  ad 
    909  1.3.2.2  ad 		status = RGE_READ_2(sc, RGE_PHYSTAT);
    910  1.3.2.2  ad 		if ((status & RGE_PHYSTAT_FDX) ||
    911  1.3.2.2  ad 		    (status & RGE_PHYSTAT_2500MBPS))
    912  1.3.2.2  ad 			ifmr->ifm_active |= IFM_FDX;
    913  1.3.2.2  ad 		else
    914  1.3.2.2  ad 			ifmr->ifm_active |= IFM_HDX;
    915  1.3.2.2  ad 
    916  1.3.2.2  ad 		if (status & RGE_PHYSTAT_10MBPS)
    917  1.3.2.2  ad 			ifmr->ifm_active |= IFM_10_T;
    918  1.3.2.2  ad 		else if (status & RGE_PHYSTAT_100MBPS)
    919  1.3.2.2  ad 			ifmr->ifm_active |= IFM_100_TX;
    920  1.3.2.2  ad 		else if (status & RGE_PHYSTAT_1000MBPS)
    921  1.3.2.2  ad 			ifmr->ifm_active |= IFM_1000_T;
    922  1.3.2.2  ad 		else if (status & RGE_PHYSTAT_2500MBPS)
    923  1.3.2.2  ad 			ifmr->ifm_active |= IFM_2500_T;
    924  1.3.2.2  ad 	}
    925  1.3.2.2  ad }
    926  1.3.2.2  ad 
    927  1.3.2.2  ad /*
    928  1.3.2.2  ad  * Allocate memory for RX/TX rings.
    929  1.3.2.2  ad  */
    930  1.3.2.2  ad int
    931  1.3.2.2  ad rge_allocmem(struct rge_softc *sc)
    932  1.3.2.2  ad {
    933  1.3.2.2  ad 	int error, i;
    934  1.3.2.2  ad 
    935  1.3.2.2  ad 	/* Allocate DMA'able memory for the TX ring. */
    936  1.3.2.2  ad 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
    937  1.3.2.2  ad 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
    938  1.3.2.2  ad 	if (error) {
    939  1.3.2.2  ad 		printf("%s: can't create TX list map\n", sc->sc_dev.dv_xname);
    940  1.3.2.2  ad 		return (error);
    941  1.3.2.2  ad 	}
    942  1.3.2.2  ad 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
    943  1.3.2.2  ad 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
    944  1.3.2.2  ad 	    BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_ZERO */
    945  1.3.2.2  ad 	if (error) {
    946  1.3.2.2  ad 		printf("%s: can't alloc TX list\n", sc->sc_dev.dv_xname);
    947  1.3.2.2  ad 		return (error);
    948  1.3.2.2  ad 	}
    949  1.3.2.2  ad 
    950  1.3.2.2  ad 	/* Load the map for the TX ring. */
    951  1.3.2.2  ad 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
    952  1.3.2.2  ad 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
    953  1.3.2.2  ad 	    &sc->rge_ldata.rge_tx_list,
    954  1.3.2.2  ad 	    BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_COHERENT */
    955  1.3.2.2  ad 	if (error) {
    956  1.3.2.2  ad 		printf("%s: can't map TX dma buffers\n", sc->sc_dev.dv_xname);
    957  1.3.2.2  ad 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
    958  1.3.2.2  ad 		    sc->rge_ldata.rge_tx_listnseg);
    959  1.3.2.2  ad 		return (error);
    960  1.3.2.2  ad 	}
    961  1.3.2.2  ad 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
    962  1.3.2.2  ad 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
    963  1.3.2.2  ad 	if (error) {
    964  1.3.2.2  ad 		printf("%s: can't load TX dma map\n", sc->sc_dev.dv_xname);
    965  1.3.2.2  ad 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
    966  1.3.2.2  ad 		bus_dmamem_unmap(sc->sc_dmat,
    967  1.3.2.2  ad 		    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
    968  1.3.2.2  ad 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
    969  1.3.2.2  ad 		    sc->rge_ldata.rge_tx_listnseg);
    970  1.3.2.2  ad 		return (error);
    971  1.3.2.2  ad 	}
    972  1.3.2.2  ad 
    973  1.3.2.2  ad 	/* Create DMA maps for TX buffers. */
    974  1.3.2.2  ad 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
    975  1.3.2.2  ad 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
    976  1.3.2.2  ad 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
    977  1.3.2.2  ad 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
    978  1.3.2.2  ad 		if (error) {
    979  1.3.2.2  ad 			printf("%s: can't create DMA map for TX\n",
    980  1.3.2.2  ad 			    sc->sc_dev.dv_xname);
    981  1.3.2.2  ad 			return (error);
    982  1.3.2.2  ad 		}
    983  1.3.2.2  ad 	}
    984  1.3.2.2  ad 
    985  1.3.2.2  ad 	/* Allocate DMA'able memory for the RX ring. */
    986  1.3.2.2  ad 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
    987  1.3.2.2  ad 	    RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
    988  1.3.2.2  ad 	if (error) {
    989  1.3.2.2  ad 		printf("%s: can't create RX list map\n", sc->sc_dev.dv_xname);
    990  1.3.2.2  ad 		return (error);
    991  1.3.2.2  ad 	}
    992  1.3.2.2  ad 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
    993  1.3.2.2  ad 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
    994  1.3.2.2  ad 	    BUS_DMA_NOWAIT);  /* XXX OpenBSD adds BUS_DMA_ZERO */
    995  1.3.2.2  ad 	if (error) {
    996  1.3.2.2  ad 		printf("%s: can't alloc RX list\n", sc->sc_dev.dv_xname);
    997  1.3.2.2  ad 		return (error);
    998  1.3.2.2  ad 	}
    999  1.3.2.2  ad 
   1000  1.3.2.2  ad 	/* Load the map for the RX ring. */
   1001  1.3.2.2  ad 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1002  1.3.2.2  ad 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
   1003  1.3.2.2  ad 	    &sc->rge_ldata.rge_rx_list,
   1004  1.3.2.2  ad 	    BUS_DMA_NOWAIT);  /* XXX OpenBSD adds BUS_DMA_COHERENT */
   1005  1.3.2.2  ad 	if (error) {
   1006  1.3.2.2  ad 		printf("%s: can't map RX dma buffers\n", sc->sc_dev.dv_xname);
   1007  1.3.2.2  ad 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1008  1.3.2.2  ad 		    sc->rge_ldata.rge_rx_listnseg);
   1009  1.3.2.2  ad 		return (error);
   1010  1.3.2.2  ad 	}
   1011  1.3.2.2  ad 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1012  1.3.2.2  ad 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
   1013  1.3.2.2  ad 	if (error) {
   1014  1.3.2.2  ad 		printf("%s: can't load RX dma map\n", sc->sc_dev.dv_xname);
   1015  1.3.2.2  ad 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
   1016  1.3.2.2  ad 		bus_dmamem_unmap(sc->sc_dmat,
   1017  1.3.2.2  ad 		    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
   1018  1.3.2.2  ad 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1019  1.3.2.2  ad 		    sc->rge_ldata.rge_rx_listnseg);
   1020  1.3.2.2  ad 		return (error);
   1021  1.3.2.2  ad 	}
   1022  1.3.2.2  ad 
   1023  1.3.2.2  ad 	/* Create DMA maps for RX buffers. */
   1024  1.3.2.2  ad 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1025  1.3.2.2  ad 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
   1026  1.3.2.2  ad 		    RGE_JUMBO_FRAMELEN, 0, 0,
   1027  1.3.2.2  ad 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
   1028  1.3.2.2  ad 		if (error) {
   1029  1.3.2.2  ad 			printf("%s: can't create DMA map for RX\n",
   1030  1.3.2.2  ad 			    sc->sc_dev.dv_xname);
   1031  1.3.2.2  ad 			return (error);
   1032  1.3.2.2  ad 		}
   1033  1.3.2.2  ad 	}
   1034  1.3.2.2  ad 
   1035  1.3.2.2  ad 	return (error);
   1036  1.3.2.2  ad }
   1037  1.3.2.2  ad 
   1038  1.3.2.2  ad /*
   1039  1.3.2.2  ad  * Initialize the RX descriptor and attach an mbuf cluster.
   1040  1.3.2.2  ad  */
   1041  1.3.2.2  ad int
   1042  1.3.2.2  ad rge_newbuf(struct rge_softc *sc, int idx)
   1043  1.3.2.2  ad {
   1044  1.3.2.2  ad 	struct mbuf *m;
   1045  1.3.2.2  ad 	struct rge_rx_desc *r;
   1046  1.3.2.2  ad 	struct rge_rxq *rxq;
   1047  1.3.2.2  ad 	bus_dmamap_t rxmap;
   1048  1.3.2.2  ad 
   1049  1.3.2.2  ad 	m = MCLGETI(NULL, M_DONTWAIT, NULL, RGE_JUMBO_FRAMELEN);
   1050  1.3.2.2  ad 	if (m == NULL)
   1051  1.3.2.2  ad 		return (ENOBUFS);
   1052  1.3.2.2  ad 
   1053  1.3.2.2  ad 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
   1054  1.3.2.2  ad 
   1055  1.3.2.2  ad 	rxq = &sc->rge_ldata.rge_rxq[idx];
   1056  1.3.2.2  ad 	rxmap = rxq->rxq_dmamap;
   1057  1.3.2.2  ad 
   1058  1.3.2.2  ad 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT))
   1059  1.3.2.2  ad 		goto out;
   1060  1.3.2.2  ad 
   1061  1.3.2.2  ad 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
   1062  1.3.2.2  ad 	    BUS_DMASYNC_PREREAD);
   1063  1.3.2.2  ad 
   1064  1.3.2.2  ad 	/* Map the segments into RX descriptors. */
   1065  1.3.2.2  ad 	r = &sc->rge_ldata.rge_rx_list[idx];
   1066  1.3.2.2  ad 
   1067  1.3.2.2  ad 	if (RGE_OWN(r)) {
   1068  1.3.2.2  ad 		printf("%s: tried to map busy RX descriptor\n",
   1069  1.3.2.2  ad 		    sc->sc_dev.dv_xname);
   1070  1.3.2.2  ad 		goto out;
   1071  1.3.2.2  ad 	}
   1072  1.3.2.2  ad 
   1073  1.3.2.2  ad 	rxq->rxq_mbuf = m;
   1074  1.3.2.2  ad 
   1075  1.3.2.2  ad 	r->rge_extsts = 0;
   1076  1.3.2.2  ad 	r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
   1077  1.3.2.2  ad 	r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr));
   1078  1.3.2.2  ad 
   1079  1.3.2.2  ad 	r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
   1080  1.3.2.2  ad 	if (idx == RGE_RX_LIST_CNT - 1)
   1081  1.3.2.2  ad 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
   1082  1.3.2.2  ad 
   1083  1.3.2.2  ad 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
   1084  1.3.2.2  ad 
   1085  1.3.2.2  ad 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1086  1.3.2.2  ad 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1087  1.3.2.2  ad 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1088  1.3.2.2  ad 
   1089  1.3.2.2  ad 	return (0);
   1090  1.3.2.2  ad out:
   1091  1.3.2.2  ad 	if (m != NULL)
   1092  1.3.2.2  ad 		m_freem(m);
   1093  1.3.2.2  ad 	return (ENOMEM);
   1094  1.3.2.2  ad }
   1095  1.3.2.2  ad 
   1096  1.3.2.2  ad void
   1097  1.3.2.2  ad rge_discard_rxbuf(struct rge_softc *sc, int idx)
   1098  1.3.2.2  ad {
   1099  1.3.2.2  ad 	struct rge_rx_desc *r;
   1100  1.3.2.2  ad 
   1101  1.3.2.2  ad 	r = &sc->rge_ldata.rge_rx_list[idx];
   1102  1.3.2.2  ad 
   1103  1.3.2.2  ad 	r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
   1104  1.3.2.2  ad 	r->rge_extsts = 0;
   1105  1.3.2.2  ad 	if (idx == RGE_RX_LIST_CNT - 1)
   1106  1.3.2.2  ad 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
   1107  1.3.2.2  ad 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
   1108  1.3.2.2  ad 
   1109  1.3.2.2  ad 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1110  1.3.2.2  ad 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1111  1.3.2.2  ad 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1112  1.3.2.2  ad }
   1113  1.3.2.2  ad 
   1114  1.3.2.2  ad int
   1115  1.3.2.2  ad rge_rx_list_init(struct rge_softc *sc)
   1116  1.3.2.2  ad {
   1117  1.3.2.2  ad 	int i;
   1118  1.3.2.2  ad 
   1119  1.3.2.2  ad 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
   1120  1.3.2.2  ad 
   1121  1.3.2.2  ad 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1122  1.3.2.2  ad 		sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
   1123  1.3.2.2  ad 		if (rge_newbuf(sc, i) == ENOBUFS)
   1124  1.3.2.2  ad 			return (ENOBUFS);
   1125  1.3.2.2  ad 	}
   1126  1.3.2.2  ad 
   1127  1.3.2.2  ad 	sc->rge_ldata.rge_rxq_prodidx = 0;
   1128  1.3.2.2  ad 	sc->rge_head = sc->rge_tail = NULL;
   1129  1.3.2.2  ad 
   1130  1.3.2.2  ad 	return (0);
   1131  1.3.2.2  ad }
   1132  1.3.2.2  ad 
   1133  1.3.2.2  ad void
   1134  1.3.2.2  ad rge_tx_list_init(struct rge_softc *sc)
   1135  1.3.2.2  ad {
   1136  1.3.2.2  ad 	int i;
   1137  1.3.2.2  ad 
   1138  1.3.2.2  ad 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
   1139  1.3.2.2  ad 
   1140  1.3.2.2  ad 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
   1141  1.3.2.2  ad 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
   1142  1.3.2.2  ad 
   1143  1.3.2.2  ad 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
   1144  1.3.2.2  ad 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
   1145  1.3.2.2  ad 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1146  1.3.2.2  ad 
   1147  1.3.2.2  ad 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
   1148  1.3.2.2  ad }
   1149  1.3.2.2  ad 
   1150  1.3.2.2  ad int
   1151  1.3.2.2  ad rge_rxeof(struct rge_softc *sc)
   1152  1.3.2.2  ad {
   1153  1.3.2.2  ad 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
   1154  1.3.2.2  ad 	struct mbuf *m;
   1155  1.3.2.2  ad 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1156  1.3.2.2  ad 	struct rge_rx_desc *cur_rx;
   1157  1.3.2.2  ad 	struct rge_rxq *rxq;
   1158  1.3.2.2  ad 	uint32_t rxstat, extsts;
   1159  1.3.2.2  ad 	int i, total_len, rx = 0;
   1160  1.3.2.2  ad 
   1161  1.3.2.2  ad 	for (i = sc->rge_ldata.rge_rxq_prodidx; ; i = RGE_NEXT_RX_DESC(i)) {
   1162  1.3.2.2  ad 		/* Invalidate the descriptor memory. */
   1163  1.3.2.2  ad 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1164  1.3.2.2  ad 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1165  1.3.2.2  ad 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1166  1.3.2.2  ad 
   1167  1.3.2.2  ad 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
   1168  1.3.2.2  ad 
   1169  1.3.2.2  ad 		if (RGE_OWN(cur_rx))
   1170  1.3.2.2  ad 			break;
   1171  1.3.2.2  ad 
   1172  1.3.2.2  ad 		rxstat = letoh32(cur_rx->rge_cmdsts);
   1173  1.3.2.2  ad 		extsts = letoh32(cur_rx->rge_extsts);
   1174  1.3.2.2  ad 
   1175  1.3.2.2  ad 		total_len = RGE_RXBYTES(cur_rx);
   1176  1.3.2.2  ad 		rxq = &sc->rge_ldata.rge_rxq[i];
   1177  1.3.2.2  ad 		m = rxq->rxq_mbuf;
   1178  1.3.2.2  ad 		rx = 1;
   1179  1.3.2.2  ad 
   1180  1.3.2.2  ad 		/* Invalidate the RX mbuf and unload its map. */
   1181  1.3.2.2  ad 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
   1182  1.3.2.2  ad 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1183  1.3.2.2  ad 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
   1184  1.3.2.2  ad 
   1185  1.3.2.2  ad 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
   1186  1.3.2.2  ad 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
   1187  1.3.2.2  ad 			rge_discard_rxbuf(sc, i);
   1188  1.3.2.2  ad 			continue;
   1189  1.3.2.2  ad 		}
   1190  1.3.2.2  ad 
   1191  1.3.2.2  ad 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
   1192  1.3.2.2  ad 			ifp->if_ierrors++;
   1193  1.3.2.2  ad 			/*
   1194  1.3.2.2  ad 			 * If this is part of a multi-fragment packet,
   1195  1.3.2.2  ad 			 * discard all the pieces.
   1196  1.3.2.2  ad 			 */
   1197  1.3.2.2  ad 			 if (sc->rge_head != NULL) {
   1198  1.3.2.2  ad 				m_freem(sc->rge_head);
   1199  1.3.2.2  ad 				sc->rge_head = sc->rge_tail = NULL;
   1200  1.3.2.2  ad 			}
   1201  1.3.2.2  ad 			rge_discard_rxbuf(sc, i);
   1202  1.3.2.2  ad 			continue;
   1203  1.3.2.2  ad 		}
   1204  1.3.2.2  ad 
   1205  1.3.2.2  ad 		/*
   1206  1.3.2.2  ad 		 * If allocating a replacement mbuf fails,
   1207  1.3.2.2  ad 		 * reload the current one.
   1208  1.3.2.2  ad 		 */
   1209  1.3.2.2  ad 
   1210  1.3.2.2  ad 		if (rge_newbuf(sc, i) == ENOBUFS) {
   1211  1.3.2.2  ad 			if (sc->rge_head != NULL) {
   1212  1.3.2.2  ad 				m_freem(sc->rge_head);
   1213  1.3.2.2  ad 				sc->rge_head = sc->rge_tail = NULL;
   1214  1.3.2.2  ad 			}
   1215  1.3.2.2  ad 			rge_discard_rxbuf(sc, i);
   1216  1.3.2.2  ad 			continue;
   1217  1.3.2.2  ad 		}
   1218  1.3.2.2  ad 
   1219  1.3.2.2  ad 		if (sc->rge_head != NULL) {
   1220  1.3.2.2  ad 			m->m_len = total_len;
   1221  1.3.2.2  ad 			/*
   1222  1.3.2.2  ad 			 * Special case: if there's 4 bytes or less
   1223  1.3.2.2  ad 			 * in this buffer, the mbuf can be discarded:
   1224  1.3.2.2  ad 			 * the last 4 bytes is the CRC, which we don't
   1225  1.3.2.2  ad 			 * care about anyway.
   1226  1.3.2.2  ad 			 */
   1227  1.3.2.2  ad 			if (m->m_len <= ETHER_CRC_LEN) {
   1228  1.3.2.2  ad 				sc->rge_tail->m_len -=
   1229  1.3.2.2  ad 				    (ETHER_CRC_LEN - m->m_len);
   1230  1.3.2.2  ad 				m_freem(m);
   1231  1.3.2.2  ad 			} else {
   1232  1.3.2.2  ad 				m->m_len -= ETHER_CRC_LEN;
   1233  1.3.2.2  ad 				m->m_flags &= ~M_PKTHDR;
   1234  1.3.2.2  ad 				sc->rge_tail->m_next = m;
   1235  1.3.2.2  ad 			}
   1236  1.3.2.2  ad 			m = sc->rge_head;
   1237  1.3.2.2  ad 			sc->rge_head = sc->rge_tail = NULL;
   1238  1.3.2.2  ad 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
   1239  1.3.2.2  ad 		} else
   1240  1.3.2.2  ad 			m->m_pkthdr.len = m->m_len =
   1241  1.3.2.2  ad 			    (total_len - ETHER_CRC_LEN);
   1242  1.3.2.2  ad 
   1243  1.3.2.2  ad 		/* Check IP header checksum. */
   1244  1.3.2.2  ad 		if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) &&
   1245  1.3.2.2  ad 		    (extsts & RGE_RDEXTSTS_IPV4))
   1246  1.3.2.2  ad 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
   1247  1.3.2.2  ad 
   1248  1.3.2.2  ad 		/* Check TCP/UDP checksum. */
   1249  1.3.2.2  ad 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
   1250  1.3.2.2  ad 		    (((rxstat & RGE_RDCMDSTS_TCPPKT) &&
   1251  1.3.2.2  ad 		    !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) ||
   1252  1.3.2.2  ad 		    ((rxstat & RGE_RDCMDSTS_UDPPKT) &&
   1253  1.3.2.2  ad 		    !(rxstat & RGE_RDCMDSTS_UDPCSUMERR))))
   1254  1.3.2.2  ad 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
   1255  1.3.2.2  ad 			    M_UDP_CSUM_IN_OK;
   1256  1.3.2.2  ad 
   1257  1.3.2.2  ad #if NVLAN > 0
   1258  1.3.2.2  ad 		if (extsts & RGE_RDEXTSTS_VTAG) {
   1259  1.3.2.2  ad 			m->m_pkthdr.ether_vtag =
   1260  1.3.2.2  ad 			    ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
   1261  1.3.2.2  ad 			m->m_flags |= M_VLANTAG;
   1262  1.3.2.2  ad 		}
   1263  1.3.2.2  ad #endif
   1264  1.3.2.2  ad 
   1265  1.3.2.2  ad 		ml_enqueue(&ml, m);
   1266  1.3.2.2  ad 	}
   1267  1.3.2.2  ad 
   1268  1.3.2.2  ad 	sc->rge_ldata.rge_rxq_prodidx = i;
   1269  1.3.2.2  ad 
   1270  1.3.2.2  ad 	if_input(ifp, &ml);
   1271  1.3.2.2  ad 
   1272  1.3.2.2  ad 	return (rx);
   1273  1.3.2.2  ad }
   1274  1.3.2.2  ad 
   1275  1.3.2.2  ad int
   1276  1.3.2.2  ad rge_txeof(struct rge_softc *sc)
   1277  1.3.2.2  ad {
   1278  1.3.2.2  ad 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1279  1.3.2.2  ad 	struct rge_txq *txq;
   1280  1.3.2.2  ad 	uint32_t txstat;
   1281  1.3.2.2  ad 	int cons, idx, prod;
   1282  1.3.2.2  ad 	int free = 0;
   1283  1.3.2.2  ad 
   1284  1.3.2.2  ad 	prod = sc->rge_ldata.rge_txq_prodidx;
   1285  1.3.2.2  ad 	cons = sc->rge_ldata.rge_txq_considx;
   1286  1.3.2.2  ad 
   1287  1.3.2.2  ad 	while (prod != cons) {
   1288  1.3.2.2  ad 		txq = &sc->rge_ldata.rge_txq[cons];
   1289  1.3.2.2  ad 		idx = txq->txq_descidx;
   1290  1.3.2.2  ad 
   1291  1.3.2.2  ad 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1292  1.3.2.2  ad 		    idx * sizeof(struct rge_tx_desc),
   1293  1.3.2.2  ad 		    sizeof(struct rge_tx_desc),
   1294  1.3.2.2  ad 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1295  1.3.2.2  ad 
   1296  1.3.2.2  ad 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
   1297  1.3.2.2  ad 
   1298  1.3.2.2  ad 		if (txstat & RGE_TDCMDSTS_OWN) {
   1299  1.3.2.2  ad 			free = 2;
   1300  1.3.2.2  ad 			break;
   1301  1.3.2.2  ad 		}
   1302  1.3.2.2  ad 
   1303  1.3.2.2  ad 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
   1304  1.3.2.2  ad 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1305  1.3.2.2  ad 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
   1306  1.3.2.2  ad 		m_freem(txq->txq_mbuf);
   1307  1.3.2.2  ad 		txq->txq_mbuf = NULL;
   1308  1.3.2.2  ad 
   1309  1.3.2.2  ad 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
   1310  1.3.2.2  ad 			ifp->if_collisions++;
   1311  1.3.2.2  ad 		if (txstat & RGE_TDCMDSTS_TXERR)
   1312  1.3.2.2  ad 			ifp->if_oerrors++;
   1313  1.3.2.2  ad 
   1314  1.3.2.2  ad 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1315  1.3.2.2  ad 		    idx * sizeof(struct rge_tx_desc),
   1316  1.3.2.2  ad 		    sizeof(struct rge_tx_desc),
   1317  1.3.2.2  ad 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1318  1.3.2.2  ad 
   1319  1.3.2.2  ad 		cons = RGE_NEXT_TX_DESC(idx);
   1320  1.3.2.2  ad 		free = 1;
   1321  1.3.2.2  ad 	}
   1322  1.3.2.2  ad 
   1323  1.3.2.2  ad 	if (free == 0)
   1324  1.3.2.2  ad 		return (0);
   1325  1.3.2.2  ad 
   1326  1.3.2.2  ad 	sc->rge_ldata.rge_txq_considx = cons;
   1327  1.3.2.2  ad 
   1328  1.3.2.2  ad 	if (ifq_is_oactive(&ifp->if_snd))
   1329  1.3.2.2  ad 		ifq_restart(&ifp->if_snd);
   1330  1.3.2.2  ad 	else if (free == 2)
   1331  1.3.2.2  ad 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
   1332  1.3.2.2  ad 	else
   1333  1.3.2.2  ad 		ifp->if_timer = 0;
   1334  1.3.2.2  ad 
   1335  1.3.2.2  ad 	return (1);
   1336  1.3.2.2  ad }
   1337  1.3.2.2  ad 
   1338  1.3.2.2  ad void
   1339  1.3.2.2  ad rge_reset(struct rge_softc *sc)
   1340  1.3.2.2  ad {
   1341  1.3.2.2  ad 	int i;
   1342  1.3.2.2  ad 
   1343  1.3.2.2  ad 	/* Enable RXDV gate. */
   1344  1.3.2.2  ad 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
   1345  1.3.2.2  ad 	DELAY(2000);
   1346  1.3.2.2  ad 
   1347  1.3.2.2  ad 	for (i = 0; i < 10; i++) {
   1348  1.3.2.2  ad 		DELAY(100);
   1349  1.3.2.2  ad 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
   1350  1.3.2.2  ad 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
   1351  1.3.2.2  ad 		    RGE_MCUCMD_TXFIFO_EMPTY))
   1352  1.3.2.2  ad 			break;
   1353  1.3.2.2  ad 	}
   1354  1.3.2.2  ad 
   1355  1.3.2.2  ad 	/* Soft reset. */
   1356  1.3.2.2  ad 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
   1357  1.3.2.2  ad 
   1358  1.3.2.2  ad 	for (i = 0; i < RGE_TIMEOUT; i++) {
   1359  1.3.2.2  ad 		DELAY(100);
   1360  1.3.2.2  ad 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
   1361  1.3.2.2  ad 			break;
   1362  1.3.2.2  ad 	}
   1363  1.3.2.2  ad 	if (i == RGE_TIMEOUT)
   1364  1.3.2.2  ad 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
   1365  1.3.2.2  ad }
   1366  1.3.2.2  ad 
   1367  1.3.2.2  ad void
   1368  1.3.2.2  ad rge_iff(struct rge_softc *sc)
   1369  1.3.2.2  ad {
   1370  1.3.2.2  ad 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1371  1.3.2.2  ad 	struct ethercom *ac = &sc->sc_ec;
   1372  1.3.2.2  ad 	struct ether_multi *enm;
   1373  1.3.2.2  ad 	struct ether_multistep step;
   1374  1.3.2.2  ad 	uint32_t hashes[2];
   1375  1.3.2.2  ad 	uint32_t rxfilt;
   1376  1.3.2.2  ad 	int h = 0;
   1377  1.3.2.2  ad 
   1378  1.3.2.2  ad 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
   1379  1.3.2.2  ad 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
   1380  1.3.2.2  ad 	ifp->if_flags &= ~IFF_ALLMULTI;
   1381  1.3.2.2  ad 
   1382  1.3.2.2  ad 	/*
   1383  1.3.2.2  ad 	 * Always accept frames destined to our station address.
   1384  1.3.2.2  ad 	 * Always accept broadcast frames.
   1385  1.3.2.2  ad 	 */
   1386  1.3.2.2  ad 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
   1387  1.3.2.2  ad 
   1388  1.3.2.2  ad 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
   1389  1.3.2.2  ad 		ifp->if_flags |= IFF_ALLMULTI;
   1390  1.3.2.2  ad 		rxfilt |= RGE_RXCFG_MULTI;
   1391  1.3.2.2  ad 		if (ifp->if_flags & IFF_PROMISC)
   1392  1.3.2.2  ad 			rxfilt |= RGE_RXCFG_ALLPHYS;
   1393  1.3.2.2  ad 		hashes[0] = hashes[1] = 0xffffffff;
   1394  1.3.2.2  ad 	} else {
   1395  1.3.2.2  ad 		rxfilt |= RGE_RXCFG_MULTI;
   1396  1.3.2.2  ad 		/* Program new filter. */
   1397  1.3.2.2  ad 		memset(hashes, 0, sizeof(hashes));
   1398  1.3.2.2  ad 
   1399  1.3.2.2  ad 		ETHER_FIRST_MULTI(step, ac, enm);
   1400  1.3.2.2  ad 		while (enm != NULL) {
   1401  1.3.2.2  ad 			h = ether_crc32_be(enm->enm_addrlo,
   1402  1.3.2.2  ad 			    ETHER_ADDR_LEN) >> 26;
   1403  1.3.2.2  ad 
   1404  1.3.2.2  ad 			if (h < 32)
   1405  1.3.2.2  ad 				hashes[0] |= (1 << h);
   1406  1.3.2.2  ad 			else
   1407  1.3.2.2  ad 				hashes[1] |= (1 << (h - 32));
   1408  1.3.2.2  ad 
   1409  1.3.2.2  ad 			ETHER_NEXT_MULTI(step, enm);
   1410  1.3.2.2  ad 		}
   1411  1.3.2.2  ad 	}
   1412  1.3.2.2  ad 
   1413  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
   1414  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
   1415  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
   1416  1.3.2.2  ad }
   1417  1.3.2.2  ad 
   1418  1.3.2.2  ad void
   1419  1.3.2.2  ad rge_set_phy_power(struct rge_softc *sc, int on)
   1420  1.3.2.2  ad {
   1421  1.3.2.2  ad 	int i;
   1422  1.3.2.2  ad 
   1423  1.3.2.2  ad 	if (on) {
   1424  1.3.2.2  ad 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
   1425  1.3.2.2  ad 
   1426  1.3.2.2  ad 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
   1427  1.3.2.2  ad 
   1428  1.3.2.2  ad 		for (i = 0; i < RGE_TIMEOUT; i++) {
   1429  1.3.2.2  ad 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0080) == 3)
   1430  1.3.2.2  ad 				break;
   1431  1.3.2.2  ad 			DELAY(1000);
   1432  1.3.2.2  ad 		}
   1433  1.3.2.2  ad 	} else
   1434  1.3.2.2  ad 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
   1435  1.3.2.2  ad }
   1436  1.3.2.2  ad 
   1437  1.3.2.2  ad void
   1438  1.3.2.2  ad rge_phy_config(struct rge_softc *sc)
   1439  1.3.2.2  ad {
   1440  1.3.2.2  ad 	uint16_t mcode_ver, val;
   1441  1.3.2.2  ad 	int i;
   1442  1.3.2.2  ad 	static const uint16_t mac_cfg3_a438_value[] =
   1443  1.3.2.2  ad 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
   1444  1.3.2.2  ad 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
   1445  1.3.2.2  ad 
   1446  1.3.2.2  ad 	static const uint16_t mac_cfg3_b88e_value[] =
   1447  1.3.2.2  ad 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
   1448  1.3.2.2  ad 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
   1449  1.3.2.2  ad 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
   1450  1.3.2.2  ad 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
   1451  1.3.2.2  ad 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
   1452  1.3.2.2  ad 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
   1453  1.3.2.2  ad 
   1454  1.3.2.2  ad 	/* Read microcode version. */
   1455  1.3.2.2  ad 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1456  1.3.2.2  ad 	mcode_ver = rge_read_phy_ocp(sc, 0xa438);
   1457  1.3.2.2  ad 
   1458  1.3.2.2  ad 	if (sc->rge_type == MAC_CFG2) {
   1459  1.3.2.2  ad 		for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++) {
   1460  1.3.2.2  ad 			rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
   1461  1.3.2.2  ad 			    rtl8125_mac_cfg2_ephy[i].val);
   1462  1.3.2.2  ad 		}
   1463  1.3.2.2  ad 
   1464  1.3.2.2  ad 		if (mcode_ver != RGE_MAC_CFG2_MCODE_VER) {
   1465  1.3.2.2  ad 			/* Disable PHY config. */
   1466  1.3.2.2  ad 			RGE_CLRBIT_1(sc, 0xf2, 0x20);
   1467  1.3.2.2  ad 			DELAY(1000);
   1468  1.3.2.2  ad 
   1469  1.3.2.2  ad 			rge_patch_phy_mcu(sc, 1);
   1470  1.3.2.2  ad 
   1471  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1472  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa438, 0x8600);
   1473  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
   1474  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
   1475  1.3.2.2  ad 
   1476  1.3.2.2  ad 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
   1477  1.3.2.2  ad 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
   1478  1.3.2.2  ad 				rge_write_phy_ocp(sc,
   1479  1.3.2.2  ad 				    rtl8125_mac_cfg2_mcu[i].reg,
   1480  1.3.2.2  ad 				    rtl8125_mac_cfg2_mcu[i].val);
   1481  1.3.2.2  ad 			}
   1482  1.3.2.2  ad 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
   1483  1.3.2.2  ad 
   1484  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa436, 0);
   1485  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa438, 0);
   1486  1.3.2.2  ad 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
   1487  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1488  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa438, 0);
   1489  1.3.2.2  ad 
   1490  1.3.2.2  ad 			rge_patch_phy_mcu(sc, 0);
   1491  1.3.2.2  ad 
   1492  1.3.2.2  ad 			/* Enable PHY config. */
   1493  1.3.2.2  ad 			RGE_SETBIT_1(sc, 0xf2, 0x20);
   1494  1.3.2.2  ad 
   1495  1.3.2.2  ad 			/* Write microcode version. */
   1496  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1497  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa438, RGE_MAC_CFG2_MCODE_VER);
   1498  1.3.2.2  ad 		}
   1499  1.3.2.2  ad 
   1500  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
   1501  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
   1502  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   1503  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   1504  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
   1505  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
   1506  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   1507  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
   1508  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
   1509  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
   1510  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
   1511  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
   1512  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
   1513  1.3.2.2  ad 
   1514  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   1515  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1516  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
   1517  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x80eb);
   1518  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
   1519  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
   1520  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x80f8);
   1521  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1522  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
   1523  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x80f1);
   1524  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1525  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
   1526  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x80fe);
   1527  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1528  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
   1529  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x8102);
   1530  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1531  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   1532  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x8105);
   1533  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1534  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
   1535  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x8100);
   1536  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1537  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
   1538  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x8104);
   1539  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1540  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
   1541  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x8106);
   1542  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1543  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
   1544  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x80dc);
   1545  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1546  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
   1547  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x80df);
   1548  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   1549  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x80e1);
   1550  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
   1551  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
   1552  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
   1553  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x819f);
   1554  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
   1555  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xbc34, 0x5555);
   1556  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
   1557  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
   1558  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
   1559  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1560  1.3.2.2  ad 	} else {
   1561  1.3.2.2  ad 		for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
   1562  1.3.2.2  ad 			rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
   1563  1.3.2.2  ad 			    rtl8125_mac_cfg3_ephy[i].val);
   1564  1.3.2.2  ad 
   1565  1.3.2.2  ad 		if (mcode_ver != RGE_MAC_CFG3_MCODE_VER) {
   1566  1.3.2.2  ad 			/* Disable PHY config. */
   1567  1.3.2.2  ad 			RGE_CLRBIT_1(sc, 0xf2, 0x20);
   1568  1.3.2.2  ad 			DELAY(1000);
   1569  1.3.2.2  ad 
   1570  1.3.2.2  ad 			rge_patch_phy_mcu(sc, 1);
   1571  1.3.2.2  ad 
   1572  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1573  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa438, 0x8601);
   1574  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
   1575  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
   1576  1.3.2.2  ad 
   1577  1.3.2.2  ad 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
   1578  1.3.2.2  ad 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
   1579  1.3.2.2  ad 				rge_write_phy_ocp(sc,
   1580  1.3.2.2  ad 				    rtl8125_mac_cfg3_mcu[i].reg,
   1581  1.3.2.2  ad 				    rtl8125_mac_cfg3_mcu[i].val);
   1582  1.3.2.2  ad 			}
   1583  1.3.2.2  ad 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
   1584  1.3.2.2  ad 
   1585  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa436, 0);
   1586  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa438, 0);
   1587  1.3.2.2  ad 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
   1588  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1589  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa438, 0);
   1590  1.3.2.2  ad 
   1591  1.3.2.2  ad 			rge_patch_phy_mcu(sc, 0);
   1592  1.3.2.2  ad 
   1593  1.3.2.2  ad 			/* Enable PHY config. */
   1594  1.3.2.2  ad 			RGE_SETBIT_1(sc, 0xf2, 0x20);
   1595  1.3.2.2  ad 
   1596  1.3.2.2  ad 			/* Write microcode version. */
   1597  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1598  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa438, RGE_MAC_CFG3_MCODE_VER);
   1599  1.3.2.2  ad 		}
   1600  1.3.2.2  ad 
   1601  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   1602  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   1603  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
   1604  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
   1605  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   1606  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
   1607  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
   1608  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
   1609  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
   1610  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
   1611  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
   1612  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
   1613  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
   1614  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
   1615  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
   1616  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
   1617  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
   1618  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
   1619  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
   1620  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
   1621  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
   1622  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
   1623  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   1624  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xb87c, 0x809c);
   1625  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   1626  1.3.2.2  ad 
   1627  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x81b3);
   1628  1.3.2.2  ad 		for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
   1629  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
   1630  1.3.2.2  ad 		for (i = 0; i < 26; i++)
   1631  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xa438, 0);
   1632  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x8257);
   1633  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, 0x020f);
   1634  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   1635  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa438, 0x7843);
   1636  1.3.2.2  ad 
   1637  1.3.2.2  ad 		rge_patch_phy_mcu(sc, 1);
   1638  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
   1639  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
   1640  1.3.2.2  ad 		for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
   1641  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
   1642  1.3.2.2  ad 			rge_write_phy_ocp(sc, 0xb890,
   1643  1.3.2.2  ad 			    mac_cfg3_b88e_value[i + 1]);
   1644  1.3.2.2  ad 		}
   1645  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
   1646  1.3.2.2  ad 		rge_patch_phy_mcu(sc, 0);
   1647  1.3.2.2  ad 
   1648  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
   1649  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xa436, 0x81a2);
   1650  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   1651  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
   1652  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
   1653  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
   1654  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
   1655  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
   1656  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
   1657  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1658  1.3.2.2  ad 	}
   1659  1.3.2.2  ad 
   1660  1.3.2.2  ad 	/* Disable EEE. */
   1661  1.3.2.2  ad 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
   1662  1.3.2.2  ad 	RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
   1663  1.3.2.2  ad 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
   1664  1.3.2.2  ad 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
   1665  1.3.2.2  ad 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
   1666  1.3.2.2  ad 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
   1667  1.3.2.2  ad 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
   1668  1.3.2.2  ad 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
   1669  1.3.2.2  ad 
   1670  1.3.2.2  ad 	rge_patch_phy_mcu(sc, 1);
   1671  1.3.2.2  ad 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
   1672  1.3.2.2  ad 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
   1673  1.3.2.2  ad 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
   1674  1.3.2.2  ad 	rge_patch_phy_mcu(sc, 0);
   1675  1.3.2.2  ad }
   1676  1.3.2.2  ad 
   1677  1.3.2.2  ad void
   1678  1.3.2.2  ad rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
   1679  1.3.2.2  ad {
   1680  1.3.2.2  ad 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1681  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_MAC0,
   1682  1.3.2.2  ad 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
   1683  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_MAC4,
   1684  1.3.2.2  ad 	    addr[5] <<  8 | addr[4]);
   1685  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1686  1.3.2.2  ad }
   1687  1.3.2.2  ad 
   1688  1.3.2.2  ad void
   1689  1.3.2.2  ad rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
   1690  1.3.2.2  ad {
   1691  1.3.2.2  ad 	*(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
   1692  1.3.2.2  ad 	*(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
   1693  1.3.2.2  ad }
   1694  1.3.2.2  ad 
   1695  1.3.2.2  ad void
   1696  1.3.2.2  ad rge_hw_init(struct rge_softc *sc)
   1697  1.3.2.2  ad {
   1698  1.3.2.2  ad 	int i;
   1699  1.3.2.2  ad 
   1700  1.3.2.2  ad 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1701  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
   1702  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
   1703  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1704  1.3.2.2  ad 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
   1705  1.3.2.2  ad 
   1706  1.3.2.2  ad 	/* Disable UPS. */
   1707  1.3.2.2  ad 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
   1708  1.3.2.2  ad 
   1709  1.3.2.2  ad 	/* Configure MAC MCU. */
   1710  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xfc38, 0);
   1711  1.3.2.2  ad 
   1712  1.3.2.2  ad 	for (i = 0xfc28; i < 0xfc38; i += 2)
   1713  1.3.2.2  ad 		rge_write_mac_ocp(sc, i, 0);
   1714  1.3.2.2  ad 
   1715  1.3.2.2  ad 	DELAY(3000);
   1716  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xfc26, 0);
   1717  1.3.2.2  ad 
   1718  1.3.2.2  ad 	if (sc->rge_type == MAC_CFG3) {
   1719  1.3.2.2  ad 		for (i = 0; i < nitems(rtl8125_def_bps); i++)
   1720  1.3.2.2  ad 			rge_write_mac_ocp(sc, rtl8125_def_bps[i].reg,
   1721  1.3.2.2  ad 			    rtl8125_def_bps[i].val);
   1722  1.3.2.2  ad 	}
   1723  1.3.2.2  ad 
   1724  1.3.2.2  ad 	/* Disable PHY power saving. */
   1725  1.3.2.2  ad 	rge_disable_phy_ocp_pwrsave(sc);
   1726  1.3.2.2  ad 
   1727  1.3.2.2  ad 	/* Set PCIe uncorrectable error status. */
   1728  1.3.2.2  ad 	rge_write_csi(sc, 0x108,
   1729  1.3.2.2  ad 	    rge_read_csi(sc, 0x108) | 0x00100000);
   1730  1.3.2.2  ad }
   1731  1.3.2.2  ad 
   1732  1.3.2.2  ad void
   1733  1.3.2.2  ad rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
   1734  1.3.2.2  ad {
   1735  1.3.2.2  ad 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
   1736  1.3.2.2  ad 		rge_patch_phy_mcu(sc, 1);
   1737  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xc416, 0);
   1738  1.3.2.2  ad 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
   1739  1.3.2.2  ad 		rge_patch_phy_mcu(sc, 0);
   1740  1.3.2.2  ad 	}
   1741  1.3.2.2  ad }
   1742  1.3.2.2  ad 
   1743  1.3.2.2  ad void
   1744  1.3.2.2  ad rge_patch_phy_mcu(struct rge_softc *sc, int set)
   1745  1.3.2.2  ad {
   1746  1.3.2.2  ad 	uint16_t val;
   1747  1.3.2.2  ad 	int i;
   1748  1.3.2.2  ad 
   1749  1.3.2.2  ad 	if (set)
   1750  1.3.2.2  ad 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
   1751  1.3.2.2  ad 	else
   1752  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
   1753  1.3.2.2  ad 
   1754  1.3.2.2  ad 	for (i = 0; i < 1000; i++) {
   1755  1.3.2.2  ad 		val = rge_read_phy_ocp(sc, 0xb800) & 0x0040;
   1756  1.3.2.2  ad 		DELAY(100);
   1757  1.3.2.2  ad 		if (val == 0x0040)
   1758  1.3.2.2  ad 			break;
   1759  1.3.2.2  ad 	}
   1760  1.3.2.2  ad 	if (i == 1000)
   1761  1.3.2.2  ad 		printf("%s: timeout waiting to patch phy mcu\n",
   1762  1.3.2.2  ad 		    sc->sc_dev.dv_xname);
   1763  1.3.2.2  ad }
   1764  1.3.2.2  ad 
   1765  1.3.2.2  ad void
   1766  1.3.2.2  ad rge_add_media_types(struct rge_softc *sc)
   1767  1.3.2.2  ad {
   1768  1.3.2.2  ad 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
   1769  1.3.2.2  ad 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
   1770  1.3.2.2  ad 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
   1771  1.3.2.2  ad 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
   1772  1.3.2.2  ad 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
   1773  1.3.2.2  ad 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
   1774  1.3.2.2  ad 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
   1775  1.3.2.2  ad 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
   1776  1.3.2.2  ad }
   1777  1.3.2.2  ad 
   1778  1.3.2.2  ad void
   1779  1.3.2.2  ad rge_config_imtype(struct rge_softc *sc, int imtype)
   1780  1.3.2.2  ad {
   1781  1.3.2.2  ad 	switch (imtype) {
   1782  1.3.2.2  ad 	case RGE_IMTYPE_NONE:
   1783  1.3.2.2  ad 		sc->rge_intrs = RGE_INTRS;
   1784  1.3.2.2  ad 		sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
   1785  1.3.2.2  ad 		    RGE_ISR_RX_FIFO_OFLOW;
   1786  1.3.2.2  ad 		sc->rge_tx_ack = RGE_ISR_TX_OK;
   1787  1.3.2.2  ad 		break;
   1788  1.3.2.2  ad 	case RGE_IMTYPE_SIM:
   1789  1.3.2.2  ad 		sc->rge_intrs = RGE_INTRS_TIMER;
   1790  1.3.2.2  ad 		sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
   1791  1.3.2.2  ad 		sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
   1792  1.3.2.2  ad 		break;
   1793  1.3.2.2  ad 	default:
   1794  1.3.2.2  ad 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
   1795  1.3.2.2  ad 	}
   1796  1.3.2.2  ad }
   1797  1.3.2.2  ad 
   1798  1.3.2.2  ad void
   1799  1.3.2.2  ad rge_disable_sim_im(struct rge_softc *sc)
   1800  1.3.2.2  ad {
   1801  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_TIMERINT, 0);
   1802  1.3.2.2  ad 	sc->rge_timerintr = 0;
   1803  1.3.2.2  ad }
   1804  1.3.2.2  ad 
   1805  1.3.2.2  ad void
   1806  1.3.2.2  ad rge_setup_sim_im(struct rge_softc *sc)
   1807  1.3.2.2  ad {
   1808  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_TIMERINT, 0x2600);
   1809  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
   1810  1.3.2.2  ad 	sc->rge_timerintr = 1;
   1811  1.3.2.2  ad }
   1812  1.3.2.2  ad 
   1813  1.3.2.2  ad void
   1814  1.3.2.2  ad rge_setup_intr(struct rge_softc *sc, int imtype)
   1815  1.3.2.2  ad {
   1816  1.3.2.2  ad 	rge_config_imtype(sc, imtype);
   1817  1.3.2.2  ad 
   1818  1.3.2.2  ad 	/* Enable interrupts. */
   1819  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
   1820  1.3.2.2  ad 
   1821  1.3.2.2  ad 	switch (imtype) {
   1822  1.3.2.2  ad 	case RGE_IMTYPE_NONE:
   1823  1.3.2.2  ad 		rge_disable_sim_im(sc);
   1824  1.3.2.2  ad 		break;
   1825  1.3.2.2  ad 	case RGE_IMTYPE_SIM:
   1826  1.3.2.2  ad 		rge_setup_sim_im(sc);
   1827  1.3.2.2  ad 		break;
   1828  1.3.2.2  ad 	default:
   1829  1.3.2.2  ad 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
   1830  1.3.2.2  ad 	}
   1831  1.3.2.2  ad }
   1832  1.3.2.2  ad 
   1833  1.3.2.2  ad void
   1834  1.3.2.2  ad rge_exit_oob(struct rge_softc *sc)
   1835  1.3.2.2  ad {
   1836  1.3.2.2  ad 	int i;
   1837  1.3.2.2  ad 
   1838  1.3.2.2  ad 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
   1839  1.3.2.2  ad 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
   1840  1.3.2.2  ad 	    RGE_RXCFG_ERRPKT);
   1841  1.3.2.2  ad 
   1842  1.3.2.2  ad 	/* Disable RealWoW. */
   1843  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
   1844  1.3.2.2  ad 
   1845  1.3.2.2  ad 	rge_reset(sc);
   1846  1.3.2.2  ad 
   1847  1.3.2.2  ad 	/* Disable OOB. */
   1848  1.3.2.2  ad 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
   1849  1.3.2.2  ad 
   1850  1.3.2.2  ad 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
   1851  1.3.2.2  ad 
   1852  1.3.2.2  ad 	for (i = 0; i < 10; i++) {
   1853  1.3.2.2  ad 		DELAY(100);
   1854  1.3.2.2  ad 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   1855  1.3.2.2  ad 			break;
   1856  1.3.2.2  ad 	}
   1857  1.3.2.2  ad 
   1858  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
   1859  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xc0a6, 0x0150);
   1860  1.3.2.2  ad 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
   1861  1.3.2.2  ad 
   1862  1.3.2.2  ad 	for (i = 0; i < 10; i++) {
   1863  1.3.2.2  ad 		DELAY(100);
   1864  1.3.2.2  ad 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   1865  1.3.2.2  ad 			break;
   1866  1.3.2.2  ad 	}
   1867  1.3.2.2  ad 
   1868  1.3.2.2  ad 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
   1869  1.3.2.2  ad 		for (i = 0; i < RGE_TIMEOUT; i++) {
   1870  1.3.2.2  ad 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0080) == 2)
   1871  1.3.2.2  ad 				break;
   1872  1.3.2.2  ad 			DELAY(1000);
   1873  1.3.2.2  ad 		}
   1874  1.3.2.2  ad 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
   1875  1.3.2.2  ad 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
   1876  1.3.2.2  ad 	}
   1877  1.3.2.2  ad }
   1878  1.3.2.2  ad 
   1879  1.3.2.2  ad void
   1880  1.3.2.2  ad rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
   1881  1.3.2.2  ad {
   1882  1.3.2.2  ad 	int i;
   1883  1.3.2.2  ad 
   1884  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_CSIDR, val);
   1885  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_CSIAR, (1 << 16) | (reg & RGE_CSIAR_ADDR_MASK) |
   1886  1.3.2.2  ad 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
   1887  1.3.2.2  ad 
   1888  1.3.2.2  ad 	for (i = 0; i < 10; i++) {
   1889  1.3.2.2  ad 		 DELAY(100);
   1890  1.3.2.2  ad 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
   1891  1.3.2.2  ad 			break;
   1892  1.3.2.2  ad 	}
   1893  1.3.2.2  ad 
   1894  1.3.2.2  ad 	DELAY(20);
   1895  1.3.2.2  ad }
   1896  1.3.2.2  ad 
   1897  1.3.2.2  ad uint32_t
   1898  1.3.2.2  ad rge_read_csi(struct rge_softc *sc, uint32_t reg)
   1899  1.3.2.2  ad {
   1900  1.3.2.2  ad 	int i;
   1901  1.3.2.2  ad 
   1902  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_CSIAR, (1 << 16) | (reg & RGE_CSIAR_ADDR_MASK) |
   1903  1.3.2.2  ad 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
   1904  1.3.2.2  ad 
   1905  1.3.2.2  ad 	for (i = 0; i < 10; i++) {
   1906  1.3.2.2  ad 		 DELAY(100);
   1907  1.3.2.2  ad 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
   1908  1.3.2.2  ad 			break;
   1909  1.3.2.2  ad 	}
   1910  1.3.2.2  ad 
   1911  1.3.2.2  ad 	DELAY(20);
   1912  1.3.2.2  ad 
   1913  1.3.2.2  ad 	return (RGE_READ_4(sc, RGE_CSIDR));
   1914  1.3.2.2  ad }
   1915  1.3.2.2  ad 
   1916  1.3.2.2  ad void
   1917  1.3.2.2  ad rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   1918  1.3.2.2  ad {
   1919  1.3.2.2  ad 	uint32_t tmp;
   1920  1.3.2.2  ad 
   1921  1.3.2.2  ad 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   1922  1.3.2.2  ad 	tmp += val;
   1923  1.3.2.2  ad 	tmp |= RGE_MACOCP_BUSY;
   1924  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
   1925  1.3.2.2  ad }
   1926  1.3.2.2  ad 
   1927  1.3.2.2  ad uint16_t
   1928  1.3.2.2  ad rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
   1929  1.3.2.2  ad {
   1930  1.3.2.2  ad 	uint32_t val;
   1931  1.3.2.2  ad 
   1932  1.3.2.2  ad 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   1933  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_MACOCP, val);
   1934  1.3.2.2  ad 
   1935  1.3.2.2  ad 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
   1936  1.3.2.2  ad }
   1937  1.3.2.2  ad 
   1938  1.3.2.2  ad void
   1939  1.3.2.2  ad rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
   1940  1.3.2.2  ad {
   1941  1.3.2.2  ad 	uint32_t tmp;
   1942  1.3.2.2  ad 	int i;
   1943  1.3.2.2  ad 
   1944  1.3.2.2  ad 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
   1945  1.3.2.2  ad 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
   1946  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
   1947  1.3.2.2  ad 
   1948  1.3.2.2  ad 	for (i = 0; i < 10; i++) {
   1949  1.3.2.2  ad 		DELAY(100);
   1950  1.3.2.2  ad 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
   1951  1.3.2.2  ad 			break;
   1952  1.3.2.2  ad 	}
   1953  1.3.2.2  ad 
   1954  1.3.2.2  ad 	DELAY(20);
   1955  1.3.2.2  ad }
   1956  1.3.2.2  ad 
   1957  1.3.2.2  ad void
   1958  1.3.2.2  ad rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
   1959  1.3.2.2  ad {
   1960  1.3.2.2  ad 	uint16_t off, phyaddr;
   1961  1.3.2.2  ad 
   1962  1.3.2.2  ad 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
   1963  1.3.2.2  ad 	phyaddr <<= 4;
   1964  1.3.2.2  ad 
   1965  1.3.2.2  ad 	off = addr ? reg : 0x10 + (reg % 8);
   1966  1.3.2.2  ad 
   1967  1.3.2.2  ad 	phyaddr += (off - 16) << 1;
   1968  1.3.2.2  ad 
   1969  1.3.2.2  ad 	rge_write_phy_ocp(sc, phyaddr, val);
   1970  1.3.2.2  ad }
   1971  1.3.2.2  ad 
   1972  1.3.2.2  ad void
   1973  1.3.2.2  ad rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   1974  1.3.2.2  ad {
   1975  1.3.2.2  ad 	uint32_t tmp;
   1976  1.3.2.2  ad 	int i;
   1977  1.3.2.2  ad 
   1978  1.3.2.2  ad 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   1979  1.3.2.2  ad 	tmp |= RGE_PHYOCP_BUSY | val;
   1980  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
   1981  1.3.2.2  ad 
   1982  1.3.2.2  ad 	for (i = 0; i < RGE_TIMEOUT; i++) {
   1983  1.3.2.2  ad 		DELAY(1);
   1984  1.3.2.2  ad 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
   1985  1.3.2.2  ad 			break;
   1986  1.3.2.2  ad 	}
   1987  1.3.2.2  ad }
   1988  1.3.2.2  ad 
   1989  1.3.2.2  ad uint16_t
   1990  1.3.2.2  ad rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
   1991  1.3.2.2  ad {
   1992  1.3.2.2  ad 	uint32_t val;
   1993  1.3.2.2  ad 	int i;
   1994  1.3.2.2  ad 
   1995  1.3.2.2  ad 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   1996  1.3.2.2  ad 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
   1997  1.3.2.2  ad 
   1998  1.3.2.2  ad 	for (i = 0; i < RGE_TIMEOUT; i++) {
   1999  1.3.2.2  ad 		DELAY(1);
   2000  1.3.2.2  ad 		val = RGE_READ_4(sc, RGE_PHYOCP);
   2001  1.3.2.2  ad 		if (val & RGE_PHYOCP_BUSY)
   2002  1.3.2.2  ad 			break;
   2003  1.3.2.2  ad 	}
   2004  1.3.2.2  ad 
   2005  1.3.2.2  ad 	return (val & RGE_PHYOCP_DATA_MASK);
   2006  1.3.2.2  ad }
   2007  1.3.2.2  ad 
   2008  1.3.2.2  ad int
   2009  1.3.2.2  ad rge_get_link_status(struct rge_softc *sc)
   2010  1.3.2.2  ad {
   2011  1.3.2.2  ad 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
   2012  1.3.2.2  ad }
   2013  1.3.2.2  ad 
   2014  1.3.2.2  ad void
   2015  1.3.2.2  ad rge_txstart(struct work *wk, void *arg)
   2016  1.3.2.2  ad {
   2017  1.3.2.2  ad 	struct rge_softc *sc = arg;
   2018  1.3.2.2  ad 
   2019  1.3.2.2  ad 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
   2020  1.3.2.2  ad }
   2021  1.3.2.2  ad 
   2022  1.3.2.2  ad void
   2023  1.3.2.2  ad rge_tick(void *arg)
   2024  1.3.2.2  ad {
   2025  1.3.2.2  ad 	struct rge_softc *sc = arg;
   2026  1.3.2.2  ad 	int s;
   2027  1.3.2.2  ad 
   2028  1.3.2.2  ad 	s = splnet();
   2029  1.3.2.2  ad 	rge_link_state(sc);
   2030  1.3.2.2  ad 	splx(s);
   2031  1.3.2.2  ad 
   2032  1.3.2.2  ad 	timeout_add_sec(&sc->sc_timeout, 1);
   2033  1.3.2.2  ad }
   2034  1.3.2.2  ad 
   2035  1.3.2.2  ad void
   2036  1.3.2.2  ad rge_link_state(struct rge_softc *sc)
   2037  1.3.2.2  ad {
   2038  1.3.2.2  ad 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2039  1.3.2.2  ad 	int link = LINK_STATE_DOWN;
   2040  1.3.2.2  ad 
   2041  1.3.2.2  ad 	if (rge_get_link_status(sc))
   2042  1.3.2.2  ad 		link = LINK_STATE_UP;
   2043  1.3.2.2  ad 
   2044  1.3.2.2  ad 	if (ifp->if_link_state != link) {
   2045  1.3.2.2  ad 		ifp->if_link_state = link;
   2046  1.3.2.2  ad 		if_link_state_change(ifp, LINK_STATE_DOWN);
   2047  1.3.2.2  ad 	}
   2048  1.3.2.2  ad }
   2049