Home | History | Annotate | Line # | Download | only in pci
if_rge.c revision 1.26
      1 /*	$NetBSD: if_rge.c,v 1.26 2023/10/05 21:43:02 mrg Exp $	*/
      2 /*	$OpenBSD: if_rge.c,v 1.9 2020/12/12 11:48:53 jan Exp $	*/
      3 
      4 /*
      5  * Copyright (c) 2019, 2020 Kevin Lo <kevlo (at) openbsd.org>
      6  *
      7  * Permission to use, copy, modify, and distribute this software for any
      8  * purpose with or without fee is hereby granted, provided that the above
      9  * copyright notice and this permission notice appear in all copies.
     10  *
     11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     18  */
     19 
     20 #include <sys/cdefs.h>
     21 __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.26 2023/10/05 21:43:02 mrg Exp $");
     22 
     23 #include <sys/types.h>
     24 
     25 #include <sys/param.h>
     26 #include <sys/systm.h>
     27 #include <sys/sockio.h>
     28 #include <sys/mbuf.h>
     29 #include <sys/kernel.h>
     30 #include <sys/socket.h>
     31 #include <sys/device.h>
     32 #include <sys/endian.h>
     33 #include <sys/callout.h>
     34 #include <sys/workqueue.h>
     35 
     36 #include <net/if.h>
     37 
     38 #include <net/if_dl.h>
     39 #include <net/if_ether.h>
     40 
     41 #include <net/if_media.h>
     42 
     43 #include <netinet/in.h>
     44 #include <net/if_ether.h>
     45 
     46 #include <net/bpf.h>
     47 
     48 #include <sys/bus.h>
     49 #include <machine/intr.h>
     50 
     51 #include <dev/mii/mii.h>
     52 
     53 #include <dev/pci/pcivar.h>
     54 #include <dev/pci/pcireg.h>
     55 #include <dev/pci/pcidevs.h>
     56 
     57 #include <dev/pci/if_rgereg.h>
     58 
     59 #ifdef __NetBSD__
     60 #define letoh32 	htole32
     61 #define nitems(x) 	__arraycount(x)
     62 
     63 static struct mbuf *
     64 MCLGETL(struct rge_softc *sc __unused, int how,
     65     u_int size)
     66 {
     67 	struct mbuf *m;
     68 
     69 	MGETHDR(m, how, MT_DATA);
     70 	if (m == NULL)
     71 		return NULL;
     72 
     73 	MEXTMALLOC(m, size, how);
     74 	if ((m->m_flags & M_EXT) == 0) {
     75 		m_freem(m);
     76 		return NULL;
     77 	}
     78 	return m;
     79 }
     80 
     81 #ifdef NET_MPSAFE
     82 #define 	RGE_MPSAFE	1
     83 #define 	CALLOUT_FLAGS	CALLOUT_MPSAFE
     84 #else
     85 #define 	CALLOUT_FLAGS	0
     86 #endif
     87 #endif
     88 
     89 #ifdef RGE_DEBUG
     90 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
     91 int rge_debug = 0;
     92 #else
     93 #define DPRINTF(x)
     94 #endif
     95 
     96 static int		rge_match(device_t, cfdata_t, void *);
     97 static void		rge_attach(device_t, device_t, void *);
     98 int		rge_intr(void *);
     99 int		rge_encap(struct rge_softc *, struct mbuf *, int);
    100 int		rge_ioctl(struct ifnet *, u_long, void *);
    101 void		rge_start(struct ifnet *);
    102 void		rge_watchdog(struct ifnet *);
    103 int		rge_init(struct ifnet *);
    104 void		rge_stop(struct ifnet *, int);
    105 int		rge_ifmedia_upd(struct ifnet *);
    106 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
    107 int		rge_allocmem(struct rge_softc *);
    108 int		rge_newbuf(struct rge_softc *, int);
    109 void		rge_discard_rxbuf(struct rge_softc *, int);
    110 static int	rge_rx_list_init(struct rge_softc *);
    111 static void	rge_rx_list_fini(struct rge_softc *);
    112 static void	rge_tx_list_init(struct rge_softc *);
    113 static void	rge_tx_list_fini(struct rge_softc *);
    114 int		rge_rxeof(struct rge_softc *);
    115 int		rge_txeof(struct rge_softc *);
    116 void		rge_reset(struct rge_softc *);
    117 void		rge_iff(struct rge_softc *);
    118 void		rge_set_phy_power(struct rge_softc *, int);
    119 void		rge_phy_config(struct rge_softc *);
    120 void		rge_phy_config_mac_cfg2(struct rge_softc *);
    121 void		rge_phy_config_mac_cfg3(struct rge_softc *);
    122 void		rge_phy_config_mac_cfg4(struct rge_softc *);
    123 void		rge_phy_config_mac_cfg5(struct rge_softc *);
    124 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
    125 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
    126 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
    127 void		rge_hw_init(struct rge_softc *);
    128 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
    129 void		rge_patch_phy_mcu(struct rge_softc *, int);
    130 void		rge_add_media_types(struct rge_softc *);
    131 void		rge_config_imtype(struct rge_softc *, int);
    132 void		rge_disable_hw_im(struct rge_softc *);
    133 void		rge_disable_sim_im(struct rge_softc *);
    134 void		rge_setup_sim_im(struct rge_softc *);
    135 void		rge_setup_intr(struct rge_softc *, int);
    136 void		rge_exit_oob(struct rge_softc *);
    137 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
    138 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
    139 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
    140 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
    141 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
    142 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
    143 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
    144 uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
    145 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
    146 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
    147 int		rge_get_link_status(struct rge_softc *);
    148 void		rge_txstart(struct work *, void *);
    149 void		rge_tick(void *);
    150 void		rge_link_state(struct rge_softc *);
    151 
    152 static const struct {
    153 	uint16_t reg;
    154 	uint16_t val;
    155 }  rtl8125_mac_cfg2_mcu[] = {
    156 	RTL8125_MAC_CFG2_MCU
    157 }, rtl8125_mac_cfg3_mcu[] = {
    158 	RTL8125_MAC_CFG3_MCU
    159 }, rtl8125_mac_cfg4_mcu[] = {
    160 	RTL8125_MAC_CFG4_MCU
    161 }, rtl8125_mac_cfg5_mcu[] = {
    162 	RTL8125_MAC_CFG5_MCU
    163 };
    164 
    165 CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
    166 		NULL, NULL); /* Sevan - detach function? */
    167 
    168 static const struct device_compatible_entry compat_data[] = {
    169 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000) },
    170 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125) },
    171 
    172 	PCI_COMPAT_EOL
    173 };
    174 
    175 static int
    176 rge_match(device_t parent, cfdata_t match, void *aux)
    177 {
    178 	struct pci_attach_args *pa =aux;
    179 
    180 	return pci_compatible_match(pa, compat_data);
    181 }
    182 
    183 void
    184 rge_attach(device_t parent, device_t self, void *aux)
    185 {
    186 	struct rge_softc *sc = device_private(self);
    187 	struct pci_attach_args *pa = aux;
    188 	pci_chipset_tag_t pc = pa->pa_pc;
    189 	pci_intr_handle_t *ihp;
    190 	char intrbuf[PCI_INTRSTR_LEN];
    191 	const char *intrstr = NULL;
    192 	struct ifnet *ifp;
    193 	pcireg_t reg;
    194 	uint32_t hwrev;
    195 	uint8_t eaddr[ETHER_ADDR_LEN];
    196 	int offset;
    197 	pcireg_t command;
    198 
    199 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
    200 
    201 	sc->sc_dev = self;
    202 
    203 	pci_aprint_devinfo(pa, "Ethernet controller");
    204 
    205 	/*
    206 	 * Map control/status registers.
    207 	 */
    208 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
    209 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
    210 	    NULL, &sc->rge_bsize)) {
    211 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
    212 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
    213 		    &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
    214 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
    215 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
    216 			    &sc->rge_bsize)) {
    217 				aprint_error(": can't map mem or i/o space\n");
    218 				return;
    219 			}
    220 		}
    221 	}
    222 
    223 	int counts[PCI_INTR_TYPE_SIZE] = {
    224  		[PCI_INTR_TYPE_INTX] = 1,
    225  		[PCI_INTR_TYPE_MSI] = 1,
    226  		[PCI_INTR_TYPE_MSIX] = 1,
    227  	};
    228 	int max_type = PCI_INTR_TYPE_MSIX;
    229 	/*
    230 	 * Allocate interrupt.
    231 	 */
    232 	if (pci_intr_alloc(pa, &ihp, counts, max_type) != 0) {
    233 		aprint_error(": couldn't map interrupt\n");
    234 		return;
    235 	}
    236 	switch (pci_intr_type(pc, ihp[0])) {
    237 	case PCI_INTR_TYPE_MSIX:
    238 	case PCI_INTR_TYPE_MSI:
    239 		sc->rge_flags |= RGE_FLAG_MSI;
    240 		break;
    241 	default:
    242 		break;
    243 	}
    244 	intrstr = pci_intr_string(pc, ihp[0], intrbuf, sizeof(intrbuf));
    245 	sc->sc_ih = pci_intr_establish_xname(pc, ihp[0], IPL_NET, rge_intr,
    246 	    sc, device_xname(sc->sc_dev));
    247 	if (sc->sc_ih == NULL) {
    248 		aprint_error_dev(sc->sc_dev, ": couldn't establish interrupt");
    249 		if (intrstr != NULL)
    250 			aprint_error(" at %s\n", intrstr);
    251 		aprint_error("\n");
    252 		return;
    253 	}
    254 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
    255 
    256 	if (pci_dma64_available(pa))
    257 		sc->sc_dmat = pa->pa_dmat64;
    258 	else
    259 		sc->sc_dmat = pa->pa_dmat;
    260 
    261 	sc->sc_pc = pa->pa_pc;
    262 	sc->sc_tag = pa->pa_tag;
    263 
    264 	/* Determine hardware revision */
    265 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
    266 	switch (hwrev) {
    267 	case 0x60800000:
    268 		sc->rge_type = MAC_CFG2;
    269 		break;
    270 	case 0x60900000:
    271 		sc->rge_type = MAC_CFG3;
    272 		break;
    273 	case 0x64000000:
    274 		sc->rge_type = MAC_CFG4;
    275 		break;
    276 	case 0x64100000:
    277 		sc->rge_type = MAC_CFG5;
    278 		break;
    279 	default:
    280 		aprint_error(": unknown version 0x%08x\n", hwrev);
    281 		return;
    282 	}
    283 
    284 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
    285 
    286 	/*
    287 	 * PCI Express check.
    288 	 */
    289 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
    290 	    &offset, NULL)) {
    291 		/* Disable PCIe ASPM and ECPM. */
    292 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
    293 		    offset + PCIE_LCSR);
    294 		reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 |
    295 		    PCIE_LCSR_ENCLKPM);
    296 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
    297 		    reg);
    298 	}
    299 
    300 	rge_exit_oob(sc);
    301 	rge_hw_init(sc);
    302 
    303 	rge_get_macaddr(sc, eaddr);
    304 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
    305 	    ether_sprintf(eaddr));
    306 
    307 	memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
    308 
    309 	rge_set_phy_power(sc, 1);
    310 	rge_phy_config(sc);
    311 
    312 	if (rge_allocmem(sc))
    313 		return;
    314 
    315 	ifp = &sc->sc_ec.ec_if;
    316 	ifp->if_softc = sc;
    317 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    318 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    319 #ifdef RGE_MPSAFE
    320 	ifp->if_extflags = IFEF_MPSAFE;
    321 #endif
    322 	ifp->if_ioctl = rge_ioctl;
    323 	ifp->if_stop = rge_stop;
    324 	ifp->if_start = rge_start;
    325 	ifp->if_init = rge_init;
    326 	ifp->if_watchdog = rge_watchdog;
    327 	IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
    328 
    329 #if notyet
    330 	ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx |
    331 	    IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
    332 	    IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
    333 #endif
    334 
    335 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
    336 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
    337 
    338 	callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
    339 	callout_setfunc(&sc->sc_timeout, rge_tick, sc);
    340 
    341 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
    342 	command |= PCI_COMMAND_MASTER_ENABLE;
    343 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
    344 
    345 	/* Initialize ifmedia structures. */
    346 	sc->sc_ec.ec_ifmedia = &sc->sc_media;
    347 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
    348 	    rge_ifmedia_sts);
    349 	rge_add_media_types(sc);
    350 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
    351 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
    352 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
    353 
    354 	if_attach(ifp);
    355 	ether_ifattach(ifp, eaddr);
    356 
    357 	if (pmf_device_register(self, NULL, NULL))
    358 		pmf_class_network_register(self, ifp);
    359 	else
    360 		aprint_error_dev(self, "couldn't establish power handler\n");
    361 }
    362 
    363 int
    364 rge_intr(void *arg)
    365 {
    366 	struct rge_softc *sc = arg;
    367 	struct ifnet *ifp = &sc->sc_ec.ec_if;
    368 	uint32_t status;
    369 	int claimed = 0, rx, tx;
    370 
    371 	if (!(ifp->if_flags & IFF_RUNNING))
    372 		return (0);
    373 
    374 	/* Disable interrupts. */
    375 	RGE_WRITE_4(sc, RGE_IMR, 0);
    376 
    377 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
    378 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
    379 			return (0);
    380 	}
    381 
    382 	status = RGE_READ_4(sc, RGE_ISR);
    383 	if (status)
    384 		RGE_WRITE_4(sc, RGE_ISR, status);
    385 
    386 	if (status & RGE_ISR_PCS_TIMEOUT)
    387 		claimed = 1;
    388 
    389 	rx = tx = 0;
    390 	if (status & sc->rge_intrs) {
    391 		if (status &
    392 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
    393 			rx |= rge_rxeof(sc);
    394 			claimed = 1;
    395 		}
    396 
    397 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
    398 			tx |= rge_txeof(sc);
    399 			claimed = 1;
    400 		}
    401 
    402 		if (status & RGE_ISR_SYSTEM_ERR) {
    403 			KERNEL_LOCK(1, NULL);
    404 			rge_init(ifp);
    405 			KERNEL_UNLOCK_ONE(NULL);
    406 			claimed = 1;
    407 		}
    408 	}
    409 
    410 	if (sc->rge_timerintr) {
    411 		if ((tx | rx) == 0) {
    412 			/*
    413 			 * Nothing needs to be processed, fallback
    414 			 * to use TX/RX interrupts.
    415 			 */
    416 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
    417 
    418 			/*
    419 			 * Recollect, mainly to avoid the possible
    420 			 * race introduced by changing interrupt
    421 			 * masks.
    422 			 */
    423 			rge_rxeof(sc);
    424 			rge_txeof(sc);
    425 		} else
    426 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
    427 	} else if (tx | rx) {
    428 		/*
    429 		 * Assume that using simulated interrupt moderation
    430 		 * (hardware timer based) could reduce the interrupt
    431 		 * rate.
    432 		 */
    433 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
    434 	}
    435 
    436 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
    437 
    438 	return (claimed);
    439 }
    440 
    441 int
    442 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
    443 {
    444 	struct rge_tx_desc *d = NULL;
    445 	struct rge_txq *txq;
    446 	bus_dmamap_t txmap;
    447 	uint32_t cmdsts, cflags = 0;
    448 	int cur, error, i, last, nsegs;
    449 
    450 #if notyet
    451 	/*
    452 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
    453 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
    454 	 * take affect.
    455 	 */
    456 	if ((m->m_pkthdr.csum_flags &
    457 	    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
    458 		cflags |= RGE_TDEXTSTS_IPCSUM;
    459 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
    460 			cflags |= RGE_TDEXTSTS_TCPCSUM;
    461 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
    462 			cflags |= RGE_TDEXTSTS_UDPCSUM;
    463 	}
    464 #endif
    465 
    466 	txq = &sc->rge_ldata.rge_txq[idx];
    467 	txmap = txq->txq_dmamap;
    468 
    469 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
    470 	switch (error) {
    471 	case 0:
    472 		break;
    473 	case EFBIG: /* mbuf chain is too fragmented */
    474 		if (m_defrag(m, M_DONTWAIT) == 0 &&
    475 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
    476 		    BUS_DMA_NOWAIT) == 0)
    477 			break;
    478 
    479 		/* FALLTHROUGH */
    480 	default:
    481 		return (0);
    482 	}
    483 
    484 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
    485 	    BUS_DMASYNC_PREWRITE);
    486 
    487 	nsegs = txmap->dm_nsegs;
    488 
    489 	/* Set up hardware VLAN tagging. */
    490 	if (vlan_has_tag(m))
    491 		cflags |= bswap16(vlan_get_tag(m)) | RGE_TDEXTSTS_VTAG;
    492 
    493 	last = cur = idx;
    494 	cmdsts = RGE_TDCMDSTS_SOF;
    495 
    496 	for (i = 0; i < txmap->dm_nsegs; i++) {
    497 		d = &sc->rge_ldata.rge_tx_list[cur];
    498 
    499 		d->rge_extsts = htole32(cflags);
    500 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
    501 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
    502 
    503 		cmdsts |= txmap->dm_segs[i].ds_len;
    504 
    505 		if (cur == RGE_TX_LIST_CNT - 1)
    506 			cmdsts |= RGE_TDCMDSTS_EOR;
    507 
    508 		d->rge_cmdsts = htole32(cmdsts);
    509 
    510 		last = cur;
    511 		cmdsts = RGE_TDCMDSTS_OWN;
    512 		cur = RGE_NEXT_TX_DESC(cur);
    513 	}
    514 
    515 	/* Set EOF on the last descriptor. */
    516 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
    517 
    518 	/* Transfer ownership of packet to the chip. */
    519 	d = &sc->rge_ldata.rge_tx_list[idx];
    520 
    521 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
    522 
    523 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
    524 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
    525 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    526 
    527 	/* Update info of TX queue and descriptors. */
    528 	txq->txq_mbuf = m;
    529 	txq->txq_descidx = last;
    530 
    531 	return (nsegs);
    532 }
    533 
    534 int
    535 rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    536 {
    537 	struct rge_softc *sc = ifp->if_softc;
    538 	//struct ifreq *ifr = (struct ifreq *)data;
    539 	int s, error = 0;
    540 
    541 	s = splnet();
    542 
    543 	switch (cmd) {
    544 	case SIOCSIFFLAGS:
    545 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
    546 			break;
    547 		/* XXX set an ifflags callback and let ether_ioctl
    548 		 * handle all of this.
    549 		 */
    550 		if (ifp->if_flags & IFF_UP) {
    551 			if (ifp->if_flags & IFF_RUNNING)
    552 				error = ENETRESET;
    553 			else
    554 				rge_init(ifp);
    555 		} else {
    556 			if (ifp->if_flags & IFF_RUNNING)
    557 				rge_stop(ifp, 1);
    558 		}
    559 		break;
    560 	default:
    561 		error = ether_ioctl(ifp, cmd, data);
    562 	}
    563 
    564 	if (error == ENETRESET) {
    565 		if (ifp->if_flags & IFF_RUNNING)
    566 			rge_iff(sc);
    567 		error = 0;
    568 	}
    569 
    570 	splx(s);
    571 	return (error);
    572 }
    573 
    574 void
    575 rge_start(struct ifnet *ifp)
    576 {
    577 	struct rge_softc *sc = ifp->if_softc;
    578 	struct mbuf *m;
    579 	int free, idx, used;
    580 	int queued = 0;
    581 
    582 #define LINK_STATE_IS_UP(_s)    \
    583 	((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
    584 
    585 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
    586 		IFQ_PURGE(&ifp->if_snd);
    587 		return;
    588 	}
    589 
    590 	/* Calculate free space. */
    591 	idx = sc->rge_ldata.rge_txq_prodidx;
    592 	free = sc->rge_ldata.rge_txq_considx;
    593 	if (free <= idx)
    594 		free += RGE_TX_LIST_CNT;
    595 	free -= idx;
    596 
    597 	for (;;) {
    598 		if (RGE_TX_NSEGS >= free + 2) {
    599 			SET(ifp->if_flags, IFF_OACTIVE);
    600 			break;
    601 		}
    602 
    603 		IFQ_DEQUEUE(&ifp->if_snd, m);
    604 		if (m == NULL)
    605 			break;
    606 
    607 		used = rge_encap(sc, m, idx);
    608 		if (used == 0) {
    609 			m_freem(m);
    610 			continue;
    611 		}
    612 
    613 		KASSERT(used <= free);
    614 		free -= used;
    615 
    616 		bpf_mtap(ifp, m, BPF_D_OUT);
    617 
    618 		idx += used;
    619 		if (idx >= RGE_TX_LIST_CNT)
    620 			idx -= RGE_TX_LIST_CNT;
    621 
    622 		queued++;
    623 	}
    624 
    625 	if (queued == 0)
    626 		return;
    627 
    628 	/* Set a timeout in case the chip goes out to lunch. */
    629 	ifp->if_timer = 5;
    630 
    631 	sc->rge_ldata.rge_txq_prodidx = idx;
    632 #if 0
    633 	ifq_serialize(ifq, &sc->sc_task);
    634 #else
    635 	rge_txstart(&sc->sc_task, sc);
    636 #endif
    637 }
    638 
    639 void
    640 rge_watchdog(struct ifnet *ifp)
    641 {
    642 	struct rge_softc *sc = ifp->if_softc;
    643 
    644 	device_printf(sc->sc_dev, "watchdog timeout\n");
    645 	if_statinc(ifp, if_oerrors);
    646 
    647 	rge_init(ifp);
    648 }
    649 
    650 int
    651 rge_init(struct ifnet *ifp)
    652 {
    653 	struct rge_softc *sc = ifp->if_softc;
    654 	uint32_t val;
    655 	unsigned i;
    656 
    657 	rge_stop(ifp, 0);
    658 
    659 	/* Set MAC address. */
    660 	rge_set_macaddr(sc, CLLADDR(ifp->if_sadl));
    661 
    662 	/* Set Maximum frame size. */
    663 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
    664 
    665 	/* Initialize RX descriptors list. */
    666 	int error = rge_rx_list_init(sc);
    667 	if (error != 0) {
    668 		device_printf(sc->sc_dev,
    669 		    "init failed: no memory for RX buffers\n");
    670 		rge_stop(ifp, 1);
    671 		return error;
    672 	}
    673 
    674 	/* Initialize TX descriptors. */
    675 	rge_tx_list_init(sc);
    676 
    677 	/* Load the addresses of the RX and TX lists into the chip. */
    678 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
    679 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    680 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
    681 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    682 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
    683 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    684 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
    685 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    686 
    687 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    688 
    689 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
    690 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    691 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    692 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
    693 
    694 	/* Clear interrupt moderation timer. */
    695 	for (i = 0; i < 64; i++)
    696 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
    697 
    698 	/* Set the initial RX and TX configurations. */
    699 	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
    700 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
    701 
    702 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
    703 	rge_write_csi(sc, 0x70c, val | 0x27000000);
    704 
    705 	/* Enable hardware optimization function. */
    706 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
    707 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
    708 
    709 	RGE_WRITE_2(sc, 0x0382, 0x221b);
    710 	RGE_WRITE_1(sc, 0x4500, 0);
    711 	RGE_WRITE_2(sc, 0x4800, 0);
    712 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
    713 
    714 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
    715 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
    716 
    717 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
    718 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
    719 
    720 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
    721 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
    722 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
    723 
    724 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
    725 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
    726 		rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
    727 	else
    728 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
    729 
    730 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
    731 
    732 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
    733 		val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
    734 		rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
    735 	} else
    736 		RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
    737 
    738 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
    739 
    740 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
    741 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
    742 
    743 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
    744 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
    745 
    746 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
    747 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
    748 
    749 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
    750 
    751 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
    752 
    753 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
    754 
    755 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
    756 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
    757 
    758 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
    759 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
    760 
    761 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
    762 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
    763 
    764 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
    765 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
    766 
    767 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
    768 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
    769 
    770 	val = rge_read_mac_ocp(sc, 0xe84c) & ~0x0040;
    771 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
    772 		rge_write_mac_ocp(sc, 0xe84c, 0x00c0);
    773 	else
    774 		rge_write_mac_ocp(sc, 0xe84c, 0x0080);
    775 
    776 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN);
    777 
    778 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
    779 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
    780 
    781 	/* Disable EEE plus. */
    782 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
    783 
    784 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
    785 
    786 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
    787 	DELAY(1);
    788 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
    789 
    790 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
    791 
    792 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
    793 
    794 	if ((sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0)
    795 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
    796 	else
    797 		RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
    798 
    799 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
    800 
    801 	for (i = 0; i < 10; i++) {
    802 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
    803 			break;
    804 		DELAY(1000);
    805 	}
    806 
    807 	/* Disable RXDV gate. */
    808 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
    809 	DELAY(2000);
    810 
    811 	rge_ifmedia_upd(ifp);
    812 
    813 	/* Enable transmit and receive. */
    814 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
    815 
    816 	/* Program promiscuous mode and multicast filters. */
    817 	rge_iff(sc);
    818 
    819 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    820 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    821 
    822 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    823 
    824 	/* Enable interrupts. */
    825 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
    826 
    827 	ifp->if_flags |= IFF_RUNNING;
    828 	CLR(ifp->if_flags, IFF_OACTIVE);
    829 
    830 	callout_schedule(&sc->sc_timeout, 1);
    831 
    832 	return (0);
    833 }
    834 
    835 /*
    836  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
    837  */
    838 void
    839 rge_stop(struct ifnet *ifp, int disable)
    840 {
    841 	struct rge_softc *sc = ifp->if_softc;
    842 
    843 	if (disable) {
    844 		callout_halt(&sc->sc_timeout, NULL);
    845 	} else
    846 		callout_stop(&sc->sc_timeout);
    847 
    848 	ifp->if_timer = 0;
    849 	ifp->if_flags &= ~IFF_RUNNING;
    850 	sc->rge_timerintr = 0;
    851 
    852 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
    853 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
    854 	    RGE_RXCFG_ERRPKT);
    855 
    856 	RGE_WRITE_4(sc, RGE_IMR, 0);
    857 
    858 	/* Clear timer interrupts. */
    859 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
    860 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
    861 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
    862 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
    863 
    864 	rge_reset(sc);
    865 
    866 //	intr_barrier(sc->sc_ih);
    867 //	ifq_barrier(&ifp->if_snd);
    868 /*	ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
    869 
    870 	if (sc->rge_head != NULL) {
    871 		m_freem(sc->rge_head);
    872 		sc->rge_head = sc->rge_tail = NULL;
    873 	}
    874 
    875 	rge_tx_list_fini(sc);
    876 	rge_rx_list_fini(sc);
    877 }
    878 
    879 /*
    880  * Set media options.
    881  */
    882 int
    883 rge_ifmedia_upd(struct ifnet *ifp)
    884 {
    885 	struct rge_softc *sc = ifp->if_softc;
    886 	struct ifmedia *ifm = &sc->sc_media;
    887 	int anar, gig, val;
    888 
    889 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    890 		return (EINVAL);
    891 
    892 	/* Disable Gigabit Lite. */
    893 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
    894 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
    895 
    896 	val = rge_read_phy_ocp(sc, 0xa5d4);
    897 	val &= ~RGE_ADV_2500TFDX;
    898 
    899 	anar = gig = 0;
    900 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
    901 	case IFM_AUTO:
    902 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    903 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    904 		val |= RGE_ADV_2500TFDX;
    905 		break;
    906 	case IFM_2500_T:
    907 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    908 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    909 		val |= RGE_ADV_2500TFDX;
    910 		ifp->if_baudrate = IF_Mbps(2500);
    911 		break;
    912 	case IFM_1000_T:
    913 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    914 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    915 		ifp->if_baudrate = IF_Gbps(1);
    916 		break;
    917 	case IFM_100_TX:
    918 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
    919 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
    920 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
    921 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
    922 		    ANAR_TX | ANAR_10_FD | ANAR_10;
    923 		ifp->if_baudrate = IF_Mbps(100);
    924 		break;
    925 	case IFM_10_T:
    926 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
    927 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
    928 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
    929 		    ANAR_10_FD | ANAR_10 : ANAR_10;
    930 		ifp->if_baudrate = IF_Mbps(10);
    931 		break;
    932 	default:
    933 		device_printf(sc->sc_dev,
    934 		    "unsupported media type\n");
    935 		return (EINVAL);
    936 	}
    937 
    938 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
    939 	rge_write_phy(sc, 0, MII_100T2CR, gig);
    940 	rge_write_phy_ocp(sc, 0xa5d4, val);
    941 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
    942 	    BMCR_STARTNEG);
    943 
    944 	return (0);
    945 }
    946 
    947 /*
    948  * Report current media status.
    949  */
    950 void
    951 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
    952 {
    953 	struct rge_softc *sc = ifp->if_softc;
    954 	uint16_t status = 0;
    955 
    956 	ifmr->ifm_status = IFM_AVALID;
    957 	ifmr->ifm_active = IFM_ETHER;
    958 
    959 	if (rge_get_link_status(sc)) {
    960 		ifmr->ifm_status |= IFM_ACTIVE;
    961 
    962 		status = RGE_READ_2(sc, RGE_PHYSTAT);
    963 		if ((status & RGE_PHYSTAT_FDX) ||
    964 		    (status & RGE_PHYSTAT_2500MBPS))
    965 			ifmr->ifm_active |= IFM_FDX;
    966 		else
    967 			ifmr->ifm_active |= IFM_HDX;
    968 
    969 		if (status & RGE_PHYSTAT_10MBPS)
    970 			ifmr->ifm_active |= IFM_10_T;
    971 		else if (status & RGE_PHYSTAT_100MBPS)
    972 			ifmr->ifm_active |= IFM_100_TX;
    973 		else if (status & RGE_PHYSTAT_1000MBPS)
    974 			ifmr->ifm_active |= IFM_1000_T;
    975 		else if (status & RGE_PHYSTAT_2500MBPS)
    976 			ifmr->ifm_active |= IFM_2500_T;
    977 	}
    978 }
    979 
    980 /*
    981  * Allocate memory for RX/TX rings.
    982  */
    983 int
    984 rge_allocmem(struct rge_softc *sc)
    985 {
    986 	int error, i;
    987 
    988 	/* Allocate DMA'able memory for the TX ring. */
    989 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
    990 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
    991 	if (error) {
    992 		aprint_error_dev(sc->sc_dev, "can't create TX list map\n");
    993 		return (error);
    994 	}
    995 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
    996 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
    997 	    BUS_DMA_NOWAIT);
    998 	if (error) {
    999 		aprint_error_dev(sc->sc_dev, "can't alloc TX list\n");
   1000 		return (error);
   1001 	}
   1002 
   1003 	/* Load the map for the TX ring. */
   1004 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1005 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
   1006 	    (void **) &sc->rge_ldata.rge_tx_list,
   1007 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
   1008 	if (error) {
   1009 		aprint_error_dev(sc->sc_dev, "can't map TX dma buffers\n");
   1010 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1011 		    sc->rge_ldata.rge_tx_listnseg);
   1012 		return (error);
   1013 	}
   1014 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
   1015 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1016 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
   1017 	if (error) {
   1018 		aprint_error_dev(sc->sc_dev, "can't load TX dma map\n");
   1019 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
   1020 		bus_dmamem_unmap(sc->sc_dmat,
   1021 		    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
   1022 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1023 		    sc->rge_ldata.rge_tx_listnseg);
   1024 		return (error);
   1025 	}
   1026 
   1027 	/* Create DMA maps for TX buffers. */
   1028 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
   1029 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
   1030 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
   1031 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
   1032 		if (error) {
   1033 			aprint_error_dev(sc->sc_dev, "can't create DMA map for TX\n");
   1034 			return (error);
   1035 		}
   1036 	}
   1037 
   1038 	/* Allocate DMA'able memory for the RX ring. */
   1039 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
   1040 	    RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
   1041 	if (error) {
   1042 		aprint_error_dev(sc->sc_dev, "can't create RX list map\n");
   1043 		return (error);
   1044 	}
   1045 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
   1046 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
   1047 	    BUS_DMA_NOWAIT);
   1048 	if (error) {
   1049 		aprint_error_dev(sc->sc_dev, "can't alloc RX list\n");
   1050 		return (error);
   1051 	}
   1052 
   1053 	/* Load the map for the RX ring. */
   1054 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1055 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
   1056 	    (void **) &sc->rge_ldata.rge_rx_list,
   1057 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
   1058 	if (error) {
   1059 		aprint_error_dev(sc->sc_dev, "can't map RX dma buffers\n");
   1060 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1061 		    sc->rge_ldata.rge_rx_listnseg);
   1062 		return (error);
   1063 	}
   1064 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
   1065 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1066 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
   1067 	if (error) {
   1068 		aprint_error_dev(sc->sc_dev, "can't load RX dma map\n");
   1069 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
   1070 		bus_dmamem_unmap(sc->sc_dmat,
   1071 		    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
   1072 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1073 		    sc->rge_ldata.rge_rx_listnseg);
   1074 		return (error);
   1075 	}
   1076 
   1077 	/* Create DMA maps for RX buffers. */
   1078 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1079 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
   1080 		    RGE_JUMBO_FRAMELEN, 0, 0,
   1081 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
   1082 		if (error) {
   1083 			aprint_error_dev(sc->sc_dev, "can't create DMA map for RX\n");
   1084 			return (error);
   1085 		}
   1086 	}
   1087 
   1088 	return (error);
   1089 }
   1090 
   1091 /*
   1092  * Initialize the RX descriptor and attach an mbuf cluster.
   1093  */
   1094 int
   1095 rge_newbuf(struct rge_softc *sc, int idx)
   1096 {
   1097 	struct mbuf *m;
   1098 	struct rge_rx_desc *r;
   1099 	struct rge_rxq *rxq;
   1100 	bus_dmamap_t rxmap;
   1101 
   1102 	m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
   1103 	if (m == NULL)
   1104 		return (ENOBUFS);
   1105 
   1106 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
   1107 
   1108 	rxq = &sc->rge_ldata.rge_rxq[idx];
   1109 	rxmap = rxq->rxq_dmamap;
   1110 
   1111 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT))
   1112 		goto out;
   1113 
   1114 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
   1115 	    BUS_DMASYNC_PREREAD);
   1116 
   1117 	/* Map the segments into RX descriptors. */
   1118 	r = &sc->rge_ldata.rge_rx_list[idx];
   1119 
   1120 	rxq->rxq_mbuf = m;
   1121 
   1122 	r->hi_qword1.rx_qword4.rge_extsts = 0;
   1123 	r->hi_qword0.rge_addr = htole64(rxmap->dm_segs[0].ds_addr);
   1124 
   1125 	r->hi_qword1.rx_qword4.rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
   1126 	if (idx == RGE_RX_LIST_CNT - 1)
   1127 		r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
   1128 
   1129 	r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
   1130 
   1131 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1132 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1133 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1134 
   1135 	return (0);
   1136 out:
   1137 	if (m != NULL)
   1138 		m_freem(m);
   1139 	return (ENOMEM);
   1140 }
   1141 
   1142 void
   1143 rge_discard_rxbuf(struct rge_softc *sc, int idx)
   1144 {
   1145 	struct rge_rx_desc *r;
   1146 
   1147 	r = &sc->rge_ldata.rge_rx_list[idx];
   1148 
   1149 	r->hi_qword1.rx_qword4.rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
   1150 	r->hi_qword1.rx_qword4.rge_extsts = 0;
   1151 	if (idx == RGE_RX_LIST_CNT - 1)
   1152 		r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
   1153 	r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
   1154 
   1155 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1156 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1157 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1158 }
   1159 
   1160 static int
   1161 rge_rx_list_init(struct rge_softc *sc)
   1162 {
   1163 	unsigned i;
   1164 
   1165 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
   1166 
   1167 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1168 		sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
   1169 		if (rge_newbuf(sc, i) != 0) {
   1170 			rge_rx_list_fini(sc);
   1171 			return (ENOBUFS);
   1172 		}
   1173 	}
   1174 
   1175 	sc->rge_ldata.rge_rxq_prodidx = sc->rge_ldata.rge_rxq_considx = 0;
   1176 	sc->rge_head = sc->rge_tail = NULL;
   1177 
   1178 	return (0);
   1179 }
   1180 
   1181 static void
   1182 rge_rx_list_fini(struct rge_softc *sc)
   1183 {
   1184 	unsigned i;
   1185 
   1186 	/* Free the RX list buffers. */
   1187 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1188 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
   1189 			bus_dmamap_unload(sc->sc_dmat,
   1190 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
   1191 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
   1192 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
   1193 		}
   1194 	}
   1195 }
   1196 
   1197 static void
   1198 rge_tx_list_init(struct rge_softc *sc)
   1199 {
   1200 	unsigned i;
   1201 
   1202 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
   1203 
   1204 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
   1205 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
   1206 
   1207 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
   1208 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
   1209 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1210 
   1211 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
   1212 }
   1213 
   1214 static void
   1215 rge_tx_list_fini(struct rge_softc *sc)
   1216 {
   1217 	unsigned i;
   1218 
   1219 	/* Free the TX list buffers. */
   1220 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
   1221 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
   1222 			bus_dmamap_unload(sc->sc_dmat,
   1223 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
   1224 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
   1225 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
   1226 		}
   1227 	}
   1228 }
   1229 
   1230 int
   1231 rge_rxeof(struct rge_softc *sc)
   1232 {
   1233 	struct mbuf *m;
   1234 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1235 	struct rge_rx_desc *cur_rx;
   1236 	struct rge_rxq *rxq;
   1237 	uint32_t rxstat, extsts;
   1238 	int i, total_len, rx = 0;
   1239 
   1240 	for (i = sc->rge_ldata.rge_rxq_considx; ; i = RGE_NEXT_RX_DESC(i)) {
   1241 		/* Invalidate the descriptor memory. */
   1242 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1243 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1244 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1245 
   1246 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
   1247 
   1248 		if (RGE_OWN(cur_rx))
   1249 			break;
   1250 
   1251 		rxstat = letoh32(cur_rx->hi_qword1.rx_qword4.rge_cmdsts);
   1252 		extsts = letoh32(cur_rx->hi_qword1.rx_qword4.rge_extsts);
   1253 
   1254 		total_len = RGE_RXBYTES(cur_rx);
   1255 		rxq = &sc->rge_ldata.rge_rxq[i];
   1256 		m = rxq->rxq_mbuf;
   1257 		rxq->rxq_mbuf = NULL;
   1258 		rx = 1;
   1259 
   1260 		/* Invalidate the RX mbuf and unload its map. */
   1261 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
   1262 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1263 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
   1264 
   1265 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
   1266 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
   1267 			rge_discard_rxbuf(sc, i);
   1268 			continue;
   1269 		}
   1270 
   1271 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
   1272 			if_statinc(ifp, if_ierrors);
   1273 			/*
   1274 			 * If this is part of a multi-fragment packet,
   1275 			 * discard all the pieces.
   1276 			 */
   1277 			 if (sc->rge_head != NULL) {
   1278 				m_freem(sc->rge_head);
   1279 				sc->rge_head = sc->rge_tail = NULL;
   1280 			}
   1281 			rge_discard_rxbuf(sc, i);
   1282 			continue;
   1283 		}
   1284 
   1285 		/*
   1286 		 * If allocating a replacement mbuf fails,
   1287 		 * reload the current one.
   1288 		 */
   1289 
   1290 		if (rge_newbuf(sc, i) != 0) {
   1291 			if (sc->rge_head != NULL) {
   1292 				m_freem(sc->rge_head);
   1293 				sc->rge_head = sc->rge_tail = NULL;
   1294 			}
   1295 			rge_discard_rxbuf(sc, i);
   1296 			continue;
   1297 		}
   1298 
   1299 		m_set_rcvif(m, ifp);
   1300 		if (sc->rge_head != NULL) {
   1301 			m->m_len = total_len;
   1302 			/*
   1303 			 * Special case: if there's 4 bytes or less
   1304 			 * in this buffer, the mbuf can be discarded:
   1305 			 * the last 4 bytes is the CRC, which we don't
   1306 			 * care about anyway.
   1307 			 */
   1308 			if (m->m_len <= ETHER_CRC_LEN) {
   1309 				sc->rge_tail->m_len -=
   1310 				    (ETHER_CRC_LEN - m->m_len);
   1311 				m_freem(m);
   1312 			} else {
   1313 				m->m_len -= ETHER_CRC_LEN;
   1314 				m->m_flags &= ~M_PKTHDR;
   1315 				sc->rge_tail->m_next = m;
   1316 			}
   1317 			m = sc->rge_head;
   1318 			sc->rge_head = sc->rge_tail = NULL;
   1319 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
   1320 		} else
   1321 	#if 0
   1322 			m->m_pkthdr.len = m->m_len =
   1323 			    (total_len - ETHER_CRC_LEN);
   1324 	#else
   1325 		{
   1326 			m->m_pkthdr.len = m->m_len = total_len;
   1327 			m->m_flags |= M_HASFCS;
   1328 		}
   1329 	#endif
   1330 
   1331 #if notyet
   1332 		/* Check IP header checksum. */
   1333 		if (!(extsts & RGE_RDEXTSTS_IPCSUMERR) &&
   1334 		    (extsts & RGE_RDEXTSTS_IPV4))
   1335 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
   1336 
   1337 		/* Check TCP/UDP checksum. */
   1338 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
   1339 		    (((extsts & RGE_RDEXTSTS_TCPPKT) &&
   1340 		    !(extsts & RGE_RDEXTSTS_TCPCSUMERR)) ||
   1341 		    ((extsts & RGE_RDEXTSTS_UDPPKT) &&
   1342 		    !(extsts & RGE_RDEXTSTS_UDPCSUMERR))))
   1343 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
   1344 			    M_UDP_CSUM_IN_OK;
   1345 #endif
   1346 
   1347 		if (extsts & RGE_RDEXTSTS_VTAG) {
   1348 			vlan_set_tag(m,
   1349 			    bswap16(extsts & RGE_RDEXTSTS_VLAN_MASK));
   1350 		}
   1351 
   1352 		if_percpuq_enqueue(ifp->if_percpuq, m);
   1353 	}
   1354 
   1355 	sc->rge_ldata.rge_rxq_considx = i;
   1356 
   1357 	return (rx);
   1358 }
   1359 
   1360 int
   1361 rge_txeof(struct rge_softc *sc)
   1362 {
   1363 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1364 	struct rge_txq *txq;
   1365 	uint32_t txstat;
   1366 	int cons, idx, prod;
   1367 	int free = 0;
   1368 
   1369 	prod = sc->rge_ldata.rge_txq_prodidx;
   1370 	cons = sc->rge_ldata.rge_txq_considx;
   1371 
   1372 	while (prod != cons) {
   1373 		txq = &sc->rge_ldata.rge_txq[cons];
   1374 		idx = txq->txq_descidx;
   1375 
   1376 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1377 		    idx * sizeof(struct rge_tx_desc),
   1378 		    sizeof(struct rge_tx_desc),
   1379 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1380 
   1381 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
   1382 
   1383 		if (txstat & RGE_TDCMDSTS_OWN) {
   1384 			free = 2;
   1385 			break;
   1386 		}
   1387 
   1388 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
   1389 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1390 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
   1391 		m_freem(txq->txq_mbuf);
   1392 		txq->txq_mbuf = NULL;
   1393 
   1394 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
   1395 			if_statinc(ifp, if_collisions);
   1396 		if (txstat & RGE_TDCMDSTS_TXERR)
   1397 			if_statinc(ifp, if_oerrors);
   1398 
   1399 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1400 		    idx * sizeof(struct rge_tx_desc),
   1401 		    sizeof(struct rge_tx_desc),
   1402 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1403 
   1404 		cons = RGE_NEXT_TX_DESC(idx);
   1405 		free = 1;
   1406 	}
   1407 
   1408 	if (free == 0)
   1409 		return (0);
   1410 
   1411 	sc->rge_ldata.rge_txq_considx = cons;
   1412 
   1413 #if 0
   1414 	if (ifq_is_oactive(&ifp->if_snd))
   1415 		ifq_restart(&ifp->if_snd);
   1416 	else if (free == 2)
   1417 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
   1418 	else
   1419 		ifp->if_timer = 0;
   1420 #else
   1421 #if 0
   1422 	if (!IF_IS_EMPTY(&ifp->if_snd))
   1423 		rge_start(ifp);
   1424 	else
   1425 	if (free == 2)
   1426 		if (0) { rge_txstart(&sc->sc_task, sc); }
   1427 	else
   1428 #endif
   1429 		ifp->if_timer = 0;
   1430 #endif
   1431 
   1432 	return (1);
   1433 }
   1434 
   1435 void
   1436 rge_reset(struct rge_softc *sc)
   1437 {
   1438 	int i;
   1439 
   1440 	/* Enable RXDV gate. */
   1441 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
   1442 	DELAY(2000);
   1443 
   1444 	for (i = 0; i < 3000; i++) {
   1445 		DELAY(50);
   1446 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
   1447 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
   1448 		    RGE_MCUCMD_TXFIFO_EMPTY))
   1449 			break;
   1450 	}
   1451 	if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
   1452 		for (i = 0; i < 3000; i++) {
   1453 			DELAY(50);
   1454 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
   1455 				break;
   1456 		}
   1457 	}
   1458 
   1459 	DELAY(2000);
   1460 
   1461 	/* Soft reset. */
   1462 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
   1463 
   1464 	for (i = 0; i < RGE_TIMEOUT; i++) {
   1465 		DELAY(100);
   1466 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
   1467 			break;
   1468 	}
   1469 	if (i == RGE_TIMEOUT)
   1470 		device_printf(sc->sc_dev, "reset never completed!\n");
   1471 }
   1472 
   1473 void
   1474 rge_iff(struct rge_softc *sc)
   1475 {
   1476 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1477 	struct ethercom *ec = &sc->sc_ec;
   1478 	struct ether_multi *enm;
   1479 	struct ether_multistep step;
   1480 	uint32_t hashes[2];
   1481 	uint32_t rxfilt;
   1482 	int h = 0;
   1483 
   1484 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
   1485 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
   1486 	ifp->if_flags &= ~IFF_ALLMULTI;
   1487 
   1488 	/*
   1489 	 * Always accept frames destined to our station address.
   1490 	 * Always accept broadcast frames.
   1491 	 */
   1492 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
   1493 
   1494 	if (ifp->if_flags & IFF_PROMISC) {
   1495  allmulti:
   1496 		ifp->if_flags |= IFF_ALLMULTI;
   1497 		rxfilt |= RGE_RXCFG_MULTI;
   1498 		if (ifp->if_flags & IFF_PROMISC)
   1499 			rxfilt |= RGE_RXCFG_ALLPHYS;
   1500 		hashes[0] = hashes[1] = 0xffffffff;
   1501 	} else {
   1502 		rxfilt |= RGE_RXCFG_MULTI;
   1503 		/* Program new filter. */
   1504 		memset(hashes, 0, sizeof(hashes));
   1505 
   1506 		ETHER_LOCK(ec);
   1507 		ETHER_FIRST_MULTI(step, ec, enm);
   1508 		while (enm != NULL) {
   1509 			if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1510 			    ETHER_ADDR_LEN) != 0) {
   1511 			    	ETHER_UNLOCK(ec);
   1512 				goto allmulti;
   1513 			}
   1514 			h = ether_crc32_be(enm->enm_addrlo,
   1515 			    ETHER_ADDR_LEN) >> 26;
   1516 
   1517 			if (h < 32)
   1518 				hashes[0] |= (1U << h);
   1519 			else
   1520 				hashes[1] |= (1U << (h - 32));
   1521 
   1522 			ETHER_NEXT_MULTI(step, enm);
   1523 		}
   1524 		ETHER_UNLOCK(ec);
   1525 	}
   1526 
   1527 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
   1528 	RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
   1529 	RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
   1530 }
   1531 
   1532 void
   1533 rge_set_phy_power(struct rge_softc *sc, int on)
   1534 {
   1535 	int i;
   1536 
   1537 	if (on) {
   1538 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
   1539 
   1540 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
   1541 
   1542 		for (i = 0; i < RGE_TIMEOUT; i++) {
   1543 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
   1544 				break;
   1545 			DELAY(1000);
   1546 		}
   1547 	} else {
   1548 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
   1549 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
   1550 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
   1551 	}
   1552 }
   1553 
   1554 void
   1555 rge_phy_config(struct rge_softc *sc)
   1556 {
   1557 	/* Read microcode version. */
   1558 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1559 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
   1560 
   1561 	switch (sc->rge_type) {
   1562 	case MAC_CFG2:
   1563 		rge_phy_config_mac_cfg2(sc);
   1564 		break;
   1565 	case MAC_CFG3:
   1566 		rge_phy_config_mac_cfg3(sc);
   1567 		break;
   1568 	case MAC_CFG4:
   1569 		rge_phy_config_mac_cfg4(sc);
   1570 		break;
   1571 	case MAC_CFG5:
   1572 		rge_phy_config_mac_cfg5(sc);
   1573 		break;
   1574 	default:
   1575 		break;	/* Can't happen. */
   1576 	}
   1577 
   1578 	rge_write_phy(sc, 0x0a5b, 0x12,
   1579 	    rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
   1580 
   1581 	/* Disable EEE. */
   1582 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
   1583 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
   1584 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
   1585 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
   1586 	}
   1587 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
   1588 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
   1589 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
   1590 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
   1591 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
   1592 
   1593 	rge_patch_phy_mcu(sc, 1);
   1594 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
   1595 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
   1596 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
   1597 	rge_patch_phy_mcu(sc, 0);
   1598 }
   1599 
   1600 void
   1601 rge_phy_config_mac_cfg2(struct rge_softc *sc)
   1602 {
   1603 	uint16_t val;
   1604 	int i;
   1605 
   1606 	for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
   1607 		rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
   1608 		    rtl8125_mac_cfg2_ephy[i].val);
   1609 
   1610 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
   1611 
   1612 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
   1613 	rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
   1614 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   1615 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   1616 	rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
   1617 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
   1618 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   1619 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
   1620 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
   1621 	rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
   1622 	RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
   1623 	RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
   1624 	RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
   1625 
   1626 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   1627 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1628 	rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
   1629 	rge_write_phy_ocp(sc, 0xa436, 0x80eb);
   1630 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
   1631 	rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
   1632 	rge_write_phy_ocp(sc, 0xa436, 0x80f8);
   1633 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1634 	rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
   1635 	rge_write_phy_ocp(sc, 0xa436, 0x80f1);
   1636 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1637 	rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
   1638 	rge_write_phy_ocp(sc, 0xa436, 0x80fe);
   1639 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1640 	rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
   1641 	rge_write_phy_ocp(sc, 0xa436, 0x8102);
   1642 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1643 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   1644 	rge_write_phy_ocp(sc, 0xa436, 0x8105);
   1645 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1646 	rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
   1647 	rge_write_phy_ocp(sc, 0xa436, 0x8100);
   1648 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1649 	rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
   1650 	rge_write_phy_ocp(sc, 0xa436, 0x8104);
   1651 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1652 	rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
   1653 	rge_write_phy_ocp(sc, 0xa436, 0x8106);
   1654 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1655 	rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
   1656 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
   1657 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1658 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
   1659 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
   1660 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   1661 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
   1662 	RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
   1663 	val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
   1664 	rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
   1665 	rge_write_phy_ocp(sc, 0xa436, 0x819f);
   1666 	rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
   1667 	rge_write_phy_ocp(sc, 0xbc34, 0x5555);
   1668 	val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
   1669 	rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
   1670 	RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
   1671 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1672 }
   1673 
   1674 void
   1675 rge_phy_config_mac_cfg3(struct rge_softc *sc)
   1676 {
   1677 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1678 	uint16_t val;
   1679 	int i;
   1680 	static const uint16_t mac_cfg3_a438_value[] =
   1681 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
   1682 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
   1683 
   1684 	static const uint16_t mac_cfg3_b88e_value[] =
   1685 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
   1686 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
   1687 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
   1688 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
   1689 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
   1690 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
   1691 
   1692 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
   1693 		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
   1694 		    rtl8125_mac_cfg3_ephy[i].val);
   1695 
   1696 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
   1697 	rge_write_ephy(sc, 0x002a, val | 0x3000);
   1698 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
   1699 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
   1700 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
   1701 	rge_write_ephy(sc, 0x0002, 0x6042);
   1702 	rge_write_ephy(sc, 0x0006, 0x0014);
   1703 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
   1704 	rge_write_ephy(sc, 0x006a, val | 0x3000);
   1705 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
   1706 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
   1707 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
   1708 	rge_write_ephy(sc, 0x0042, 0x6042);
   1709 	rge_write_ephy(sc, 0x0046, 0x0014);
   1710 
   1711 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
   1712 
   1713 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   1714 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   1715 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
   1716 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
   1717 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   1718 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
   1719 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
   1720 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
   1721 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
   1722 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
   1723 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
   1724 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
   1725 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
   1726 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
   1727 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
   1728 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
   1729 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
   1730 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
   1731 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
   1732 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
   1733 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
   1734 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
   1735 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1736 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
   1737 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
   1738 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1739 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
   1740 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
   1741 	    32);
   1742 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
   1743 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   1744 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
   1745 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   1746 
   1747 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
   1748 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
   1749 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
   1750 	for (i = 0; i < 26; i++)
   1751 		rge_write_phy_ocp(sc, 0xa438, 0);
   1752 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
   1753 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
   1754 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   1755 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
   1756 
   1757 	rge_patch_phy_mcu(sc, 1);
   1758 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
   1759 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
   1760 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
   1761 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
   1762 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
   1763 	}
   1764 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
   1765 	rge_patch_phy_mcu(sc, 0);
   1766 
   1767 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
   1768 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
   1769 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   1770 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
   1771 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
   1772 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
   1773 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
   1774 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
   1775 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
   1776 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1777 }
   1778 
   1779 void
   1780 rge_phy_config_mac_cfg4(struct rge_softc *sc)
   1781 {
   1782 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1783 	uint16_t val;
   1784 	int i;
   1785 	static const uint16_t mac_cfg4_b87c_value[] =
   1786 	    { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
   1787 	      0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
   1788 	      0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
   1789 	      0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
   1790 	      0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
   1791 	      0x80b0, 0x0f31 };
   1792 
   1793 	for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
   1794 		rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
   1795 		    rtl8125_mac_cfg4_ephy[i].val);
   1796 
   1797 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
   1798 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
   1799 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
   1800 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
   1801 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
   1802 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
   1803 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
   1804 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
   1805 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
   1806 
   1807 	rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
   1808 
   1809 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1810 	RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
   1811 	rge_write_phy_ocp(sc, 0xa436, 0x8fff);
   1812 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1813 	rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
   1814 	for (i = 0; i < 6; i++) {
   1815 		rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
   1816 		if (i < 3)
   1817 			rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
   1818 		else
   1819 			rge_write_phy_ocp(sc, 0xb87e, 0x147d);
   1820 	}
   1821 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
   1822 	rge_write_phy_ocp(sc, 0xb87e, 0x0907);
   1823 	val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
   1824 	rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
   1825 	val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
   1826 	rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
   1827 	rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
   1828 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
   1829 	rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
   1830 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
   1831 	rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
   1832 	rge_write_phy_ocp(sc, 0xb87e, 0x6077);
   1833 	rge_write_phy_ocp(sc, 0xb506, 0x01e7);
   1834 	rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
   1835 	rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
   1836 	rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
   1837 	rge_write_phy_ocp(sc, 0xac3c, 0x9240);
   1838 	rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
   1839 	rge_write_phy_ocp(sc, 0xacc6, 0x0707);
   1840 	rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
   1841 	rge_write_phy_ocp(sc, 0xad08, 0x0007);
   1842 	for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
   1843 		rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
   1844 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
   1845 	}
   1846 	RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
   1847 	RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
   1848 	rge_write_phy_ocp(sc, 0xb87c, 0x8141);
   1849 	rge_write_phy_ocp(sc, 0xb87e, 0x320e);
   1850 	rge_write_phy_ocp(sc, 0xb87c, 0x8153);
   1851 	rge_write_phy_ocp(sc, 0xb87e, 0x720e);
   1852 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
   1853 	rge_write_phy_ocp(sc, 0xb87c, 0x8529);
   1854 	rge_write_phy_ocp(sc, 0xb87e, 0x050e);
   1855 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
   1856 	    32);
   1857 	rge_write_phy_ocp(sc, 0xa436, 0x816c);
   1858 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
   1859 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
   1860 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
   1861 	rge_write_phy_ocp(sc, 0xa436, 0x8174);
   1862 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
   1863 	rge_write_phy_ocp(sc, 0xa436, 0x8178);
   1864 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
   1865 	rge_write_phy_ocp(sc, 0xa436, 0x817c);
   1866 	rge_write_phy_ocp(sc, 0xa438, 0x0719);
   1867 	rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
   1868 	rge_write_phy_ocp(sc, 0xa438, 0x0400);
   1869 	rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
   1870 	rge_write_phy_ocp(sc, 0xa438, 0x0404);
   1871 	rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
   1872 	for (i = 0; i < 6; i++) {
   1873 		rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
   1874 		if (i == 2)
   1875 			rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
   1876 		else
   1877 			rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
   1878 	}
   1879 	rge_write_phy_ocp(sc, 0xb87c, 0x8145);
   1880 	rge_write_phy_ocp(sc, 0xb87e, 0x370e);
   1881 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
   1882 	rge_write_phy_ocp(sc, 0xb87e, 0x770e);
   1883 	rge_write_phy_ocp(sc, 0xb87c, 0x8169);
   1884 	rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
   1885 	rge_write_phy_ocp(sc, 0xb87c, 0x817b);
   1886 	rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
   1887 	rge_write_phy_ocp(sc, 0xa436, 0x8217);
   1888 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1889 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   1890 	rge_write_phy_ocp(sc, 0xa436, 0x821a);
   1891 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1892 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   1893 	rge_write_phy_ocp(sc, 0xa436, 0x80da);
   1894 	rge_write_phy_ocp(sc, 0xa438, 0x0403);
   1895 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
   1896 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1897 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
   1898 	rge_write_phy_ocp(sc, 0xa436, 0x80b3);
   1899 	rge_write_phy_ocp(sc, 0xa438, 0x0384);
   1900 	rge_write_phy_ocp(sc, 0xa436, 0x80b7);
   1901 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
   1902 	rge_write_phy_ocp(sc, 0xa436, 0x80ba);
   1903 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1904 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
   1905 	rge_write_phy_ocp(sc, 0xa436, 0x80b5);
   1906 	rge_write_phy_ocp(sc, 0xa438, 0xf009);
   1907 	rge_write_phy_ocp(sc, 0xa436, 0x80bd);
   1908 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1909 	rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
   1910 	rge_write_phy_ocp(sc, 0xa436, 0x80c7);
   1911 	rge_write_phy_ocp(sc, 0xa438, 0xf083);
   1912 	rge_write_phy_ocp(sc, 0xa436, 0x80dd);
   1913 	rge_write_phy_ocp(sc, 0xa438, 0x03f0);
   1914 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
   1915 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1916 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
   1917 	rge_write_phy_ocp(sc, 0xa436, 0x80cb);
   1918 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
   1919 	rge_write_phy_ocp(sc, 0xa436, 0x80ce);
   1920 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1921 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
   1922 	rge_write_phy_ocp(sc, 0xa436, 0x80c9);
   1923 	rge_write_phy_ocp(sc, 0xa438, 0x8009);
   1924 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
   1925 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1926 	rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
   1927 	rge_write_phy_ocp(sc, 0xa436, 0x80a3);
   1928 	rge_write_phy_ocp(sc, 0xa438, 0x200a);
   1929 	rge_write_phy_ocp(sc, 0xa436, 0x80a5);
   1930 	rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
   1931 	rge_write_phy_ocp(sc, 0xa436, 0x809f);
   1932 	rge_write_phy_ocp(sc, 0xa438, 0x6073);
   1933 	rge_write_phy_ocp(sc, 0xa436, 0x80a1);
   1934 	rge_write_phy_ocp(sc, 0xa438, 0x000b);
   1935 	rge_write_phy_ocp(sc, 0xa436, 0x80a9);
   1936 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1937 	rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
   1938 	rge_patch_phy_mcu(sc, 1);
   1939 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
   1940 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
   1941 	rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
   1942 	rge_write_phy_ocp(sc, 0xb890, 0x0000);
   1943 	rge_write_phy_ocp(sc, 0xb88e, 0xc240);
   1944 	rge_write_phy_ocp(sc, 0xb890, 0x0103);
   1945 	rge_write_phy_ocp(sc, 0xb88e, 0xc242);
   1946 	rge_write_phy_ocp(sc, 0xb890, 0x0507);
   1947 	rge_write_phy_ocp(sc, 0xb88e, 0xc244);
   1948 	rge_write_phy_ocp(sc, 0xb890, 0x090b);
   1949 	rge_write_phy_ocp(sc, 0xb88e, 0xc246);
   1950 	rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
   1951 	rge_write_phy_ocp(sc, 0xb88e, 0xc248);
   1952 	rge_write_phy_ocp(sc, 0xb890, 0x1012);
   1953 	rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
   1954 	rge_write_phy_ocp(sc, 0xb890, 0x1416);
   1955 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
   1956 	rge_patch_phy_mcu(sc, 0);
   1957 	RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
   1958 	RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
   1959 	rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
   1960 	rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
   1961 	rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
   1962 	rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
   1963 	rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
   1964 	RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
   1965 	rge_write_phy_ocp(sc, 0xa436, 0x817d);
   1966 	RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
   1967 }
   1968 
   1969 void
   1970 rge_phy_config_mac_cfg5(struct rge_softc *sc)
   1971 {
   1972 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1973 	uint16_t val;
   1974 	int i;
   1975 
   1976 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
   1977 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
   1978 		    rtl8125_mac_cfg5_ephy[i].val);
   1979 
   1980 	val = rge_read_ephy(sc, 0x0022) & ~0x0030;
   1981 	rge_write_ephy(sc, 0x0022, val | 0x0020);
   1982 	val = rge_read_ephy(sc, 0x0062) & ~0x0030;
   1983 	rge_write_ephy(sc, 0x0062, val | 0x0020);
   1984 
   1985 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
   1986 
   1987 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1988 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
   1989 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
   1990 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
   1991 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
   1992 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
   1993 	    32);
   1994 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
   1995 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
   1996 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
   1997 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
   1998 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
   1999 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2000 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
   2001 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
   2002 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
   2003 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
   2004 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
   2005 	for (i = 0; i < 10; i++) {
   2006 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
   2007 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
   2008 	}
   2009 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
   2010 	val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
   2011 	rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
   2012 }
   2013 
   2014 void
   2015 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
   2016 {
   2017 	if (sc->rge_mcodever != mcode_version) {
   2018 		int i;
   2019 
   2020 		rge_patch_phy_mcu(sc, 1);
   2021 
   2022 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
   2023 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   2024 			if (sc->rge_type == MAC_CFG2)
   2025 				rge_write_phy_ocp(sc, 0xa438, 0x8600);
   2026 			else
   2027 				rge_write_phy_ocp(sc, 0xa438, 0x8601);
   2028 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
   2029 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
   2030 
   2031 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
   2032 		}
   2033 
   2034 		if (sc->rge_type == MAC_CFG2) {
   2035 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
   2036 				rge_write_phy_ocp(sc,
   2037 				    rtl8125_mac_cfg2_mcu[i].reg,
   2038 				    rtl8125_mac_cfg2_mcu[i].val);
   2039 			}
   2040 		} else if (sc->rge_type == MAC_CFG3) {
   2041 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
   2042 				rge_write_phy_ocp(sc,
   2043 				    rtl8125_mac_cfg3_mcu[i].reg,
   2044 				    rtl8125_mac_cfg3_mcu[i].val);
   2045 			}
   2046 		} else if (sc->rge_type == MAC_CFG4) {
   2047 			for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
   2048 				rge_write_phy_ocp(sc,
   2049 				    rtl8125_mac_cfg4_mcu[i].reg,
   2050 				    rtl8125_mac_cfg4_mcu[i].val);
   2051 			}
   2052 		} else if (sc->rge_type == MAC_CFG5) {
   2053 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
   2054 				rge_write_phy_ocp(sc,
   2055 				    rtl8125_mac_cfg5_mcu[i].reg,
   2056 				    rtl8125_mac_cfg5_mcu[i].val);
   2057 			}
   2058 		}
   2059 
   2060 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
   2061 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
   2062 
   2063 			rge_write_phy_ocp(sc, 0xa436, 0);
   2064 			rge_write_phy_ocp(sc, 0xa438, 0);
   2065 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
   2066 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   2067 			rge_write_phy_ocp(sc, 0xa438, 0);
   2068 		}
   2069 
   2070 		rge_patch_phy_mcu(sc, 0);
   2071 
   2072 		/* Write microcode version. */
   2073 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
   2074 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
   2075 	}
   2076 }
   2077 
   2078 void
   2079 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
   2080 {
   2081 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2082 	RGE_WRITE_4(sc, RGE_MAC0,
   2083 	    (uint32_t)addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
   2084 	RGE_WRITE_4(sc, RGE_MAC4,
   2085 	    addr[5] <<  8 | addr[4]);
   2086 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2087 }
   2088 
   2089 void
   2090 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
   2091 {
   2092 	int i;
   2093 
   2094 	for (i = 0; i < ETHER_ADDR_LEN; i++)
   2095 		addr[i] = RGE_READ_1(sc, RGE_ADDR0 + i);
   2096 }
   2097 
   2098 void
   2099 rge_hw_init(struct rge_softc *sc)
   2100 {
   2101 	int i;
   2102 
   2103 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2104 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
   2105 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
   2106 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2107 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
   2108 
   2109 	/* Disable UPS. */
   2110 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
   2111 
   2112 	/* Configure MAC MCU. */
   2113 	rge_write_mac_ocp(sc, 0xfc38, 0);
   2114 
   2115 	for (i = 0xfc28; i < 0xfc38; i += 2)
   2116 		rge_write_mac_ocp(sc, i, 0);
   2117 
   2118 	DELAY(3000);
   2119 	rge_write_mac_ocp(sc, 0xfc26, 0);
   2120 
   2121 	if (sc->rge_type == MAC_CFG3) {
   2122 		for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
   2123 			rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
   2124 			    rtl8125_mac_bps[i].val);
   2125 		}
   2126 	} else if (sc->rge_type == MAC_CFG5) {
   2127 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
   2128 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
   2129 			    rtl8125b_mac_bps[i].val);
   2130 		}
   2131 	}
   2132 
   2133 	/* Disable PHY power saving. */
   2134 	rge_disable_phy_ocp_pwrsave(sc);
   2135 
   2136 	/* Set PCIe uncorrectable error status. */
   2137 	rge_write_csi(sc, 0x108,
   2138 	    rge_read_csi(sc, 0x108) | 0x00100000);
   2139 }
   2140 
   2141 void
   2142 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
   2143 {
   2144 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
   2145 		rge_patch_phy_mcu(sc, 1);
   2146 		rge_write_phy_ocp(sc, 0xc416, 0);
   2147 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
   2148 		rge_patch_phy_mcu(sc, 0);
   2149 	}
   2150 }
   2151 
   2152 void
   2153 rge_patch_phy_mcu(struct rge_softc *sc, int set)
   2154 {
   2155 	int i;
   2156 
   2157 	if (set)
   2158 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
   2159 	else
   2160 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
   2161 
   2162 	for (i = 0; i < 1000; i++) {
   2163 		if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
   2164 			break;
   2165 		DELAY(100);
   2166 	}
   2167 	if (i == 1000) {
   2168 		DPRINTF(("timeout waiting to patch phy mcu\n"));
   2169 		return;
   2170 	}
   2171 }
   2172 
   2173 void
   2174 rge_add_media_types(struct rge_softc *sc)
   2175 {
   2176 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
   2177 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
   2178 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
   2179 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
   2180 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
   2181 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
   2182 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
   2183 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
   2184 }
   2185 
   2186 void
   2187 rge_config_imtype(struct rge_softc *sc, int imtype)
   2188 {
   2189 	switch (imtype) {
   2190 	case RGE_IMTYPE_NONE:
   2191 		sc->rge_intrs = RGE_INTRS;
   2192 		sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
   2193 		    RGE_ISR_RX_FIFO_OFLOW;
   2194 		sc->rge_tx_ack = RGE_ISR_TX_OK;
   2195 		break;
   2196 	case RGE_IMTYPE_SIM:
   2197 		sc->rge_intrs = RGE_INTRS_TIMER;
   2198 		sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
   2199 		sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
   2200 		break;
   2201 	default:
   2202 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
   2203 	}
   2204 }
   2205 
   2206 void
   2207 rge_disable_hw_im(struct rge_softc *sc)
   2208 {
   2209 	RGE_WRITE_2(sc, RGE_IM, 0);
   2210 }
   2211 
   2212 void
   2213 rge_disable_sim_im(struct rge_softc *sc)
   2214 {
   2215 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
   2216 	sc->rge_timerintr = 0;
   2217 }
   2218 
   2219 void
   2220 rge_setup_sim_im(struct rge_softc *sc)
   2221 {
   2222 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
   2223 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
   2224 	sc->rge_timerintr = 1;
   2225 }
   2226 
   2227 void
   2228 rge_setup_intr(struct rge_softc *sc, int imtype)
   2229 {
   2230 	rge_config_imtype(sc, imtype);
   2231 
   2232 	/* Enable interrupts. */
   2233 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
   2234 
   2235 	switch (imtype) {
   2236 	case RGE_IMTYPE_NONE:
   2237 		rge_disable_sim_im(sc);
   2238 		rge_disable_hw_im(sc);
   2239 		break;
   2240 	case RGE_IMTYPE_SIM:
   2241 		rge_disable_hw_im(sc);
   2242 		rge_setup_sim_im(sc);
   2243 		break;
   2244 	default:
   2245 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
   2246 	}
   2247 }
   2248 
   2249 void
   2250 rge_exit_oob(struct rge_softc *sc)
   2251 {
   2252 	int i;
   2253 
   2254 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
   2255 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
   2256 	    RGE_RXCFG_ERRPKT);
   2257 
   2258 	/* Disable RealWoW. */
   2259 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
   2260 
   2261 	rge_reset(sc);
   2262 
   2263 	/* Disable OOB. */
   2264 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
   2265 
   2266 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
   2267 
   2268 	for (i = 0; i < 10; i++) {
   2269 		DELAY(100);
   2270 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   2271 			break;
   2272 	}
   2273 
   2274 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
   2275 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
   2276 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
   2277 
   2278 	for (i = 0; i < 10; i++) {
   2279 		DELAY(100);
   2280 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   2281 			break;
   2282 	}
   2283 
   2284 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
   2285 		printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
   2286 		    device_xname(sc->sc_dev));
   2287 		for (i = 0; i < RGE_TIMEOUT; i++) {
   2288 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
   2289 				break;
   2290 			DELAY(1000);
   2291 		}
   2292 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
   2293 		if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
   2294 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
   2295 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
   2296 	}
   2297 }
   2298 
   2299 void
   2300 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
   2301 {
   2302 	int i;
   2303 
   2304 	RGE_WRITE_4(sc, RGE_CSIDR, val);
   2305 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
   2306 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
   2307 
   2308 	for (i = 0; i < 10; i++) {
   2309 		 DELAY(100);
   2310 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
   2311 			break;
   2312 	}
   2313 
   2314 	DELAY(20);
   2315 }
   2316 
   2317 uint32_t
   2318 rge_read_csi(struct rge_softc *sc, uint32_t reg)
   2319 {
   2320 	int i;
   2321 
   2322 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
   2323 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
   2324 
   2325 	for (i = 0; i < 10; i++) {
   2326 		 DELAY(100);
   2327 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
   2328 			break;
   2329 	}
   2330 
   2331 	DELAY(20);
   2332 
   2333 	return (RGE_READ_4(sc, RGE_CSIDR));
   2334 }
   2335 
   2336 void
   2337 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   2338 {
   2339 	uint32_t tmp;
   2340 
   2341 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   2342 	tmp += val;
   2343 	tmp |= RGE_MACOCP_BUSY;
   2344 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
   2345 }
   2346 
   2347 uint16_t
   2348 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
   2349 {
   2350 	uint32_t val;
   2351 
   2352 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   2353 	RGE_WRITE_4(sc, RGE_MACOCP, val);
   2354 
   2355 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
   2356 }
   2357 
   2358 void
   2359 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
   2360 {
   2361 	uint32_t tmp;
   2362 	int i;
   2363 
   2364 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
   2365 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
   2366 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
   2367 
   2368 	for (i = 0; i < 10; i++) {
   2369 		DELAY(100);
   2370 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
   2371 			break;
   2372 	}
   2373 
   2374 	DELAY(20);
   2375 }
   2376 
   2377 uint16_t
   2378 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
   2379 {
   2380 	uint32_t val;
   2381 	int i;
   2382 
   2383 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
   2384 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
   2385 
   2386 	for (i = 0; i < 10; i++) {
   2387 		DELAY(100);
   2388 		val = RGE_READ_4(sc, RGE_EPHYAR);
   2389 		if (val & RGE_EPHYAR_BUSY)
   2390 			break;
   2391 	}
   2392 
   2393 	DELAY(20);
   2394 
   2395 	return (val & RGE_EPHYAR_DATA_MASK);
   2396 }
   2397 
   2398 void
   2399 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
   2400 {
   2401 	uint16_t off, phyaddr;
   2402 
   2403 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
   2404 	phyaddr <<= 4;
   2405 
   2406 	off = addr ? reg : 0x10 + (reg % 8);
   2407 
   2408 	phyaddr += (off - 16) << 1;
   2409 
   2410 	rge_write_phy_ocp(sc, phyaddr, val);
   2411 }
   2412 
   2413 uint16_t
   2414 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
   2415 {
   2416 	uint16_t off, phyaddr;
   2417 
   2418 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
   2419 	phyaddr <<= 4;
   2420 
   2421 	off = addr ? reg : 0x10 + (reg % 8);
   2422 
   2423 	phyaddr += (off - 16) << 1;
   2424 
   2425 	return (rge_read_phy_ocp(sc, phyaddr));
   2426 }
   2427 
   2428 void
   2429 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   2430 {
   2431 	uint32_t tmp;
   2432 	int i;
   2433 
   2434 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   2435 	tmp |= RGE_PHYOCP_BUSY | val;
   2436 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
   2437 
   2438 	for (i = 0; i < RGE_TIMEOUT; i++) {
   2439 		DELAY(1);
   2440 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
   2441 			break;
   2442 	}
   2443 }
   2444 
   2445 uint16_t
   2446 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
   2447 {
   2448 	uint32_t val;
   2449 	int i;
   2450 
   2451 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   2452 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
   2453 
   2454 	for (i = 0; i < RGE_TIMEOUT; i++) {
   2455 		DELAY(1);
   2456 		val = RGE_READ_4(sc, RGE_PHYOCP);
   2457 		if (val & RGE_PHYOCP_BUSY)
   2458 			break;
   2459 	}
   2460 
   2461 	return (val & RGE_PHYOCP_DATA_MASK);
   2462 }
   2463 
   2464 int
   2465 rge_get_link_status(struct rge_softc *sc)
   2466 {
   2467 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
   2468 }
   2469 
   2470 void
   2471 rge_txstart(struct work *wk, void *arg)
   2472 {
   2473 	struct rge_softc *sc = arg;
   2474 
   2475 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
   2476 }
   2477 
   2478 void
   2479 rge_tick(void *arg)
   2480 {
   2481 	struct rge_softc *sc = arg;
   2482 	int s;
   2483 
   2484 	s = splnet();
   2485 	rge_link_state(sc);
   2486 	splx(s);
   2487 
   2488 	callout_schedule(&sc->sc_timeout, hz);
   2489 }
   2490 
   2491 void
   2492 rge_link_state(struct rge_softc *sc)
   2493 {
   2494 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2495 	int link = LINK_STATE_DOWN;
   2496 
   2497 	if (rge_get_link_status(sc))
   2498 		link = LINK_STATE_UP;
   2499 
   2500 	if (ifp->if_link_state != link) { /* XXX not safe to access */
   2501 		if_link_state_change(ifp, link);
   2502 	}
   2503 }
   2504