Home | History | Annotate | Line # | Download | only in pci
if_rge.c revision 1.1
      1 /*	$NetBSD: if_rge.c,v 1.1 2020/01/11 20:56:51 sevan Exp $	*/
      2 /*	$OpenBSD: if_rge.c,v 1.2 2020/01/02 09:00:45 kevlo Exp $	*/
      3 
      4 /*
      5  * Copyright (c) 2019 Kevin Lo <kevlo (at) openbsd.org>
      6  *
      7  * Permission to use, copy, modify, and distribute this software for any
      8  * purpose with or without fee is hereby granted, provided that the above
      9  * copyright notice and this permission notice appear in all copies.
     10  *
     11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     18  */
     19 
     20 #include "bpfilter.h"
     21 #include "vlan.h"
     22 
     23 #include <sys/param.h>
     24 #include <sys/systm.h>
     25 #include <sys/sockio.h>
     26 #include <sys/mbuf.h>
     27 #include <sys/malloc.h>
     28 #include <sys/kernel.h>
     29 #include <sys/socket.h>
     30 #include <sys/device.h>
     31 #include <sys/endian.h>
     32 
     33 #include <net/if.h>
     34 #include <net/if_media.h>
     35 
     36 #include <netinet/in.h>
     37 #include <netinet/if_ether.h>
     38 
     39 #if NBPFILTER > 0
     40 #include <net/bpf.h>
     41 #endif
     42 
     43 #include <machine/bus.h>
     44 #include <machine/intr.h>
     45 
     46 #include <dev/mii/mii.h>
     47 
     48 #include <dev/pci/pcivar.h>
     49 #include <dev/pci/pcireg.h>
     50 #include <dev/pci/pcidevs.h>
     51 
     52 #include <dev/pci/if_rgereg.h>
     53 
     54 int		rge_match(struct device *, void *, void *);
     55 void		rge_attach(struct device *, struct device *, void *);
     56 int		rge_intr(void *);
     57 int		rge_encap(struct rge_softc *, struct mbuf *, int);
     58 int		rge_ioctl(struct ifnet *, u_long, caddr_t);
     59 void		rge_start(struct ifqueue *);
     60 void		rge_watchdog(struct ifnet *);
     61 int		rge_init(struct ifnet *);
     62 void		rge_stop(struct ifnet *);
     63 int		rge_ifmedia_upd(struct ifnet *);
     64 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
     65 int		rge_allocmem(struct rge_softc *);
     66 int		rge_newbuf(struct rge_softc *, int);
     67 void		rge_discard_rxbuf(struct rge_softc *, int);
     68 int		rge_rx_list_init(struct rge_softc *);
     69 void		rge_tx_list_init(struct rge_softc *);
     70 int		rge_rxeof(struct rge_softc *);
     71 int		rge_txeof(struct rge_softc *);
     72 void		rge_reset(struct rge_softc *);
     73 void		rge_iff(struct rge_softc *);
     74 void		rge_set_phy_power(struct rge_softc *, int);
     75 void		rge_phy_config(struct rge_softc *);
     76 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
     77 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
     78 void		rge_hw_init(struct rge_softc *);
     79 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
     80 void		rge_patch_phy_mcu(struct rge_softc *, int);
     81 void		rge_add_media_types(struct rge_softc *);
     82 void		rge_config_imtype(struct rge_softc *, int);
     83 void		rge_disable_sim_im(struct rge_softc *);
     84 void		rge_setup_sim_im(struct rge_softc *);
     85 void		rge_setup_intr(struct rge_softc *, int);
     86 void		rge_exit_oob(struct rge_softc *);
     87 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
     88 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
     89 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
     90 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
     91 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
     92 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
     93 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
     94 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
     95 int		rge_get_link_status(struct rge_softc *);
     96 void		rge_txstart(void *);
     97 void		rge_tick(void *);
     98 void		rge_link_state(struct rge_softc *);
     99 
    100 static const struct {
    101 	uint16_t reg;
    102 	uint16_t val;
    103 }  rtl8125_def_bps[] = {
    104 	RTL8125_DEF_BPS
    105 }, rtl8125_mac_cfg2_ephy[] = {
    106 	RTL8125_MAC_CFG2_EPHY
    107 }, rtl8125_mac_cfg2_mcu[] = {
    108 	RTL8125_MAC_CFG2_MCU
    109 }, rtl8125_mac_cfg3_ephy[] = {
    110 	RTL8125_MAC_CFG3_EPHY
    111 }, rtl8125_mac_cfg3_mcu[] = {
    112 	RTL8125_MAC_CFG3_MCU
    113 };
    114 
    115 struct cfattach rge_ca = {
    116 	sizeof(struct rge_softc), rge_match, rge_attach
    117 };
    118 
    119 struct cfdriver rge_cd = {
    120 	NULL, "rge", DV_IFNET
    121 };
    122 
    123 const struct pci_matchid rge_devices[] = {
    124 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
    125 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8125 }
    126 };
    127 
    128 int
    129 rge_match(struct device *parent, void *match, void *aux)
    130 {
    131 	return (pci_matchbyid((struct pci_attach_args *)aux, rge_devices,
    132 	    nitems(rge_devices)));
    133 }
    134 
    135 void
    136 rge_attach(struct device *parent, struct device *self, void *aux)
    137 {
    138 	struct rge_softc *sc = (struct rge_softc *)self;
    139 	struct pci_attach_args *pa = aux;
    140 	pci_chipset_tag_t pc = pa->pa_pc;
    141 	pci_intr_handle_t ih;
    142 	const char *intrstr = NULL;
    143 	struct ifnet *ifp;
    144 	pcireg_t reg;
    145 	uint32_t hwrev;
    146 	uint8_t eaddr[ETHER_ADDR_LEN];
    147 	int offset;
    148 
    149 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
    150 
    151 	/*
    152 	 * Map control/status registers.
    153 	 */
    154 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
    155 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
    156 	    NULL, &sc->rge_bsize, 0)) {
    157 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
    158 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
    159 		    &sc->rge_bhandle, NULL, &sc->rge_bsize, 0)) {
    160 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
    161 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
    162 			    &sc->rge_bsize, 0)) {
    163 				printf(": can't map mem or i/o space\n");
    164 				return;
    165 			}
    166 		}
    167 	}
    168 
    169 	/*
    170 	 * Allocate interrupt.
    171 	 */
    172 	if (pci_intr_map_msi(pa, &ih) == 0)
    173 		sc->rge_flags |= RGE_FLAG_MSI;
    174 	else if (pci_intr_map(pa, &ih) != 0) {
    175 		printf(": couldn't map interrupt\n");
    176 		return;
    177 	}
    178 	intrstr = pci_intr_string(pc, ih);
    179 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE, rge_intr,
    180 	    sc, sc->sc_dev.dv_xname);
    181 	if (sc->sc_ih == NULL) {
    182 		printf(": couldn't establish interrupt");
    183 		if (intrstr != NULL)
    184 			printf(" at %s", intrstr);
    185 		printf("\n");
    186 		return;
    187 	}
    188 	printf(": %s", intrstr);
    189 
    190 	sc->sc_dmat = pa->pa_dmat;
    191 	sc->sc_pc = pa->pa_pc;
    192 	sc->sc_tag = pa->pa_tag;
    193 
    194 	/* Determine hardware revision */
    195 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
    196 	switch (hwrev) {
    197 	case 0x60800000:
    198 		sc->rge_type = MAC_CFG2;
    199 		break;
    200 	case 0x60900000:
    201 		sc->rge_type = MAC_CFG3;
    202 		break;
    203 	default:
    204 		printf(": unknown version 0x%08x\n", hwrev);
    205 		return;
    206 	}
    207 
    208 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
    209 
    210 	/*
    211 	 * PCI Express check.
    212 	 */
    213 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
    214 	    &offset, NULL)) {
    215 		/* Disable PCIe ASPM and ECPM. */
    216 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
    217 		    offset + PCI_PCIE_LCSR);
    218 		reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1 |
    219 		    PCI_PCIE_LCSR_ECPM);
    220 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCI_PCIE_LCSR,
    221 		    reg);
    222 	}
    223 
    224 	rge_exit_oob(sc);
    225 	rge_hw_init(sc);
    226 
    227 	rge_get_macaddr(sc, eaddr);
    228 	printf(", address %s\n", ether_sprintf(eaddr));
    229 
    230 	memcpy(sc->sc_arpcom.ac_enaddr, eaddr, ETHER_ADDR_LEN);
    231 
    232 	rge_set_phy_power(sc, 1);
    233 	rge_phy_config(sc);
    234 
    235 	if (rge_allocmem(sc))
    236 		return;
    237 
    238 	ifp = &sc->sc_arpcom.ac_if;
    239 	ifp->if_softc = sc;
    240 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
    241 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    242 	ifp->if_xflags = IFXF_MPSAFE;
    243 	ifp->if_ioctl = rge_ioctl;
    244 	ifp->if_qstart = rge_start;
    245 	ifp->if_watchdog = rge_watchdog;
    246 	IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT);
    247 	ifp->if_hardmtu = RGE_JUMBO_MTU;
    248 
    249 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
    250 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
    251 
    252 #if NVLAN > 0
    253 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
    254 #endif
    255 
    256 	timeout_set(&sc->sc_timeout, rge_tick, sc);
    257 	task_set(&sc->sc_task, rge_txstart, sc);
    258 
    259 	/* Initialize ifmedia structures. */
    260 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
    261 	    rge_ifmedia_sts);
    262 	rge_add_media_types(sc);
    263 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
    264 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
    265 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
    266 
    267 	if_attach(ifp);
    268 	ether_ifattach(ifp);
    269 }
    270 
    271 int
    272 rge_intr(void *arg)
    273 {
    274 	struct rge_softc *sc = arg;
    275 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
    276 	uint32_t status;
    277 	int claimed = 0, rx, tx;
    278 
    279 	if (!(ifp->if_flags & IFF_RUNNING))
    280 		return (0);
    281 
    282 	/* Disable interrupts. */
    283 	RGE_WRITE_4(sc, RGE_IMR, 0);
    284 
    285 	status = RGE_READ_4(sc, RGE_ISR);
    286 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
    287 		if ((status & RGE_INTRS) == 0 || status == 0xffffffff)
    288 			return (0);
    289 	}
    290 	if (status)
    291 		RGE_WRITE_4(sc, RGE_ISR, status);
    292 
    293 	if (status & RGE_ISR_PCS_TIMEOUT)
    294 		claimed = 1;
    295 
    296 	rx = tx = 0;
    297 	if (status & RGE_INTRS) {
    298 		if (status &
    299 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
    300 			rx |= rge_rxeof(sc);
    301 			claimed = 1;
    302 		}
    303 
    304 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
    305 			tx |= rge_txeof(sc);
    306 			claimed = 1;
    307 		}
    308 
    309 		if (status & RGE_ISR_SYSTEM_ERR) {
    310 			KERNEL_LOCK();
    311 			rge_init(ifp);
    312 			KERNEL_UNLOCK();
    313 			claimed = 1;
    314 		}
    315 	}
    316 
    317 	if (sc->rge_timerintr) {
    318 		if ((tx | rx) == 0) {
    319 			/*
    320 			 * Nothing needs to be processed, fallback
    321 			 * to use TX/RX interrupts.
    322 			 */
    323 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
    324 
    325 			/*
    326 			 * Recollect, mainly to avoid the possible
    327 			 * race introduced by changing interrupt
    328 			 * masks.
    329 			 */
    330 			rge_rxeof(sc);
    331 			rge_txeof(sc);
    332 		} else
    333 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
    334 	} else if (tx | rx) {
    335 		/*
    336 		 * Assume that using simulated interrupt moderation
    337 		 * (hardware timer based) could reduce the interrupt
    338 		 * rate.
    339 		 */
    340 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
    341 	}
    342 
    343 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
    344 
    345 	return (claimed);
    346 }
    347 
    348 int
    349 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
    350 {
    351 	struct rge_tx_desc *d = NULL;
    352 	struct rge_txq *txq;
    353 	bus_dmamap_t txmap;
    354 	uint32_t cmdsts, cflags = 0;
    355 	int cur, error, i, last, nsegs;
    356 
    357 	/*
    358 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
    359 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
    360 	 * take affect.
    361 	 */
    362 	if ((m->m_pkthdr.csum_flags &
    363 	    (M_IPV4_CSUM_OUT | M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) != 0) {
    364 		cflags |= RGE_TDEXTSTS_IPCSUM;
    365 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
    366 			cflags |= RGE_TDEXTSTS_TCPCSUM;
    367 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
    368 			cflags |= RGE_TDEXTSTS_UDPCSUM;
    369 	}
    370 
    371 	txq = &sc->rge_ldata.rge_txq[idx];
    372 	txmap = txq->txq_dmamap;
    373 
    374 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
    375 	switch (error) {
    376 	case 0:
    377 		break;
    378 	case EFBIG: /* mbuf chain is too fragmented */
    379 		if (m_defrag(m, M_DONTWAIT) == 0 &&
    380 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
    381 		    BUS_DMA_NOWAIT) == 0)
    382 			break;
    383 
    384 		/* FALLTHROUGH */
    385 	default:
    386 		return (0);
    387 	}
    388 
    389 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
    390 	    BUS_DMASYNC_PREWRITE);
    391 
    392 	nsegs = txmap->dm_nsegs;
    393 
    394 	/* Set up hardware VLAN tagging. */
    395 #if NVLAN > 0
    396 	if (m->m_flags & M_VLANTAG)
    397 		cflags |= swap16(m->m_pkthdr.ether_vtag | RGE_TDEXTSTS_VTAG);
    398 #endif
    399 
    400 	cur = idx;
    401 	cmdsts = RGE_TDCMDSTS_SOF;
    402 
    403 	for (i = 0; i < txmap->dm_nsegs; i++) {
    404 		d = &sc->rge_ldata.rge_tx_list[cur];
    405 
    406 		d->rge_extsts = htole32(cflags);
    407 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
    408 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
    409 
    410 		cmdsts |= txmap->dm_segs[i].ds_len;
    411 
    412 		if (cur == RGE_TX_LIST_CNT - 1)
    413 			cmdsts |= RGE_TDCMDSTS_EOR;
    414 
    415 		d->rge_cmdsts = htole32(cmdsts);
    416 
    417 		last = cur;
    418 		cmdsts = RGE_TDCMDSTS_OWN;
    419 		cur = RGE_NEXT_TX_DESC(cur);
    420 	}
    421 
    422 	/* Set EOF on the last descriptor. */
    423 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
    424 
    425 	/* Transfer ownership of packet to the chip. */
    426 	d = &sc->rge_ldata.rge_tx_list[idx];
    427 
    428 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
    429 
    430 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
    431 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
    432 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    433 
    434 	/* Update info of TX queue and descriptors. */
    435 	txq->txq_mbuf = m;
    436 	txq->txq_descidx = last;
    437 
    438 	return (nsegs);
    439 }
    440 
    441 int
    442 rge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
    443 {
    444 	struct rge_softc *sc = ifp->if_softc;
    445 	struct ifreq *ifr = (struct ifreq *)data;
    446 	int s, error = 0;
    447 
    448 	s = splnet();
    449 
    450 	switch (cmd) {
    451 	case SIOCSIFADDR:
    452 		ifp->if_flags |= IFF_UP;
    453 		if (!(ifp->if_flags & IFF_RUNNING))
    454 			rge_init(ifp);
    455 		break;
    456 	case SIOCSIFFLAGS:
    457 		if (ifp->if_flags & IFF_UP) {
    458 			if (ifp->if_flags & IFF_RUNNING)
    459 				error = ENETRESET;
    460 			else
    461 				rge_init(ifp);
    462 		} else {
    463 			if (ifp->if_flags & IFF_RUNNING)
    464 				rge_stop(ifp);
    465 		}
    466 		break;
    467 	case SIOCGIFMEDIA:
    468 	case SIOCSIFMEDIA:
    469 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
    470 		break;
    471 	case SIOCSIFMTU:
    472 		if (ifr->ifr_mtu > ifp->if_hardmtu) {
    473 			error = EINVAL;
    474 			break;
    475 		}
    476 		ifp->if_mtu = ifr->ifr_mtu;
    477 		break;
    478 	default:
    479 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
    480 	}
    481 
    482 	if (error == ENETRESET) {
    483 		if (ifp->if_flags & IFF_RUNNING)
    484 			rge_iff(sc);
    485 		error = 0;
    486 	}
    487 
    488 	splx(s);
    489 	return (error);
    490 }
    491 
    492 void
    493 rge_start(struct ifqueue *ifq)
    494 {
    495 	struct ifnet *ifp = ifq->ifq_if;
    496 	struct rge_softc *sc = ifp->if_softc;
    497 	struct mbuf *m;
    498 	int free, idx, used;
    499 	int queued = 0;
    500 
    501 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
    502 		ifq_purge(ifq);
    503 		return;
    504 	}
    505 
    506 	/* Calculate free space. */
    507 	idx = sc->rge_ldata.rge_txq_prodidx;
    508 	free = sc->rge_ldata.rge_txq_considx;
    509 	if (free <= idx)
    510 		free += RGE_TX_LIST_CNT;
    511 	free -= idx;
    512 
    513 	for (;;) {
    514 		if (RGE_TX_NSEGS >= free + 2) {
    515 			ifq_set_oactive(&ifp->if_snd);
    516 			break;
    517 		}
    518 
    519 		m = ifq_dequeue(ifq);
    520 		if (m == NULL)
    521 			break;
    522 
    523 		used = rge_encap(sc, m, idx);
    524 		if (used == 0) {
    525 			m_freem(m);
    526 			continue;
    527 		}
    528 
    529 		KASSERT(used <= free);
    530 		free -= used;
    531 
    532 #if NBPFILTER > 0
    533 		if (ifp->if_bpf)
    534 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
    535 #endif
    536 
    537 		idx += used;
    538 		if (idx >= RGE_TX_LIST_CNT)
    539 			idx -= RGE_TX_LIST_CNT;
    540 
    541 		queued++;
    542 	}
    543 
    544 	if (queued == 0)
    545 		return;
    546 
    547 	/* Set a timeout in case the chip goes out to lunch. */
    548 	ifp->if_timer = 5;
    549 
    550 	sc->rge_ldata.rge_txq_prodidx = idx;
    551 	ifq_serialize(ifq, &sc->sc_task);
    552 }
    553 
    554 void
    555 rge_watchdog(struct ifnet *ifp)
    556 {
    557 	struct rge_softc *sc = ifp->if_softc;
    558 
    559 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
    560 	ifp->if_oerrors++;
    561 
    562 	rge_init(ifp);
    563 }
    564 
    565 int
    566 rge_init(struct ifnet *ifp)
    567 {
    568 	struct rge_softc *sc = ifp->if_softc;
    569 	uint32_t val;
    570 	uint16_t max_frame_size;
    571 	int i;
    572 
    573 	rge_stop(ifp);
    574 
    575 	/* Set MAC address. */
    576 	rge_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
    577 
    578 	/* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
    579 	if (ifp->if_mtu < ETHERMTU)
    580 		max_frame_size = ETHERMTU;
    581 	else
    582 		max_frame_size = ifp->if_mtu;
    583 
    584 	max_frame_size += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
    585 	    ETHER_CRC_LEN + 1;
    586 
    587 	if (max_frame_size > RGE_JUMBO_FRAMELEN)
    588 		max_frame_size -= 1;
    589 
    590 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, max_frame_size);
    591 
    592 	/* Initialize RX descriptors list. */
    593 	if (rge_rx_list_init(sc) == ENOBUFS) {
    594 		printf("%s: init failed: no memory for RX buffers\n",
    595 		    sc->sc_dev.dv_xname);
    596 		rge_stop(ifp);
    597 		return (ENOBUFS);
    598 	}
    599 
    600 	/* Initialize TX descriptors. */
    601 	rge_tx_list_init(sc);
    602 
    603 	/* Load the addresses of the RX and TX lists into the chip. */
    604 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
    605 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    606 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
    607 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    608 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
    609 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    610 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
    611 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    612 
    613 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    614 
    615 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
    616 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    617 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    618 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
    619 
    620 	/* Clear interrupt moderation timer. */
    621 	for (i = 0; i < 64; i++)
    622 		RGE_WRITE_4(sc, RGE_IM(i), 0);
    623 
    624 	/* Set the initial RX and TX configurations. */
    625 	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
    626 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
    627 
    628 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
    629 	rge_write_csi(sc, 0x70c, val | 0x27000000);
    630 
    631 	/* Enable hardware optimization function. */
    632 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
    633 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
    634 
    635 	RGE_WRITE_2(sc, 0x0382, 0x221b);
    636 	RGE_WRITE_1(sc, 0x4500, 0);
    637 	RGE_WRITE_2(sc, 0x4800, 0);
    638 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
    639 
    640 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
    641 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
    642 
    643 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
    644 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
    645 
    646 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
    647 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
    648 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
    649 
    650 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
    651 	rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
    652 
    653 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
    654 
    655 	val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
    656 	rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
    657 
    658 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
    659 
    660 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x007f;
    661 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
    662 
    663 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
    664 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
    665 
    666 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
    667 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
    668 
    669 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
    670 
    671 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
    672 
    673 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
    674 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
    675 
    676 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
    677 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
    678 
    679 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
    680 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
    681 
    682 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
    683 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
    684 
    685 	RGE_MAC_SETBIT(sc, 0xe84c, 0x00c0);
    686 
    687 	/* Disable EEE plus. */
    688 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
    689 
    690 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
    691 
    692 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
    693 	DELAY(1);
    694 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
    695 
    696 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
    697 
    698 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
    699 
    700 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
    701 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
    702 
    703 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
    704 
    705 	for (i = 0; i < 10; i++) {
    706 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
    707 			break;
    708 		DELAY(1000);
    709 	}
    710 
    711 	/* Disable RXDV gate. */
    712 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
    713 	DELAY(2000);
    714 
    715 	rge_ifmedia_upd(ifp);
    716 
    717 	/* Enable transmit and receive. */
    718 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
    719 
    720 	/* Program promiscuous mode and multicast filters. */
    721 	rge_iff(sc);
    722 
    723 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    724 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    725 
    726 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    727 
    728 	/* Enable interrupts. */
    729 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
    730 
    731 	ifp->if_flags |= IFF_RUNNING;
    732 	ifq_clr_oactive(&ifp->if_snd);
    733 
    734 	timeout_add_sec(&sc->sc_timeout, 1);
    735 
    736 	return (0);
    737 }
    738 
    739 /*
    740  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
    741  */
    742 void
    743 rge_stop(struct ifnet *ifp)
    744 {
    745 	struct rge_softc *sc = ifp->if_softc;
    746 	int i;
    747 
    748 	timeout_del(&sc->sc_timeout);
    749 
    750 	ifp->if_timer = 0;
    751 	ifp->if_flags &= ~IFF_RUNNING;
    752 	sc->rge_timerintr = 0;
    753 
    754 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
    755 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
    756 	    RGE_RXCFG_ERRPKT);
    757 
    758 	RGE_WRITE_4(sc, RGE_IMR, 0);
    759 	RGE_WRITE_4(sc, RGE_ISR, 0xffffffff);
    760 
    761 	rge_reset(sc);
    762 
    763 	intr_barrier(sc->sc_ih);
    764 	ifq_barrier(&ifp->if_snd);
    765 	ifq_clr_oactive(&ifp->if_snd);
    766 
    767 	if (sc->rge_head != NULL) {
    768 		m_freem(sc->rge_head);
    769 		sc->rge_head = sc->rge_tail = NULL;
    770 	}
    771 
    772 	/* Free the TX list buffers. */
    773 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
    774 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
    775 			bus_dmamap_unload(sc->sc_dmat,
    776 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
    777 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
    778 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
    779 		}
    780 	}
    781 
    782 	/* Free the RX list buffers. */
    783 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
    784 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
    785 			bus_dmamap_unload(sc->sc_dmat,
    786 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
    787 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
    788 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
    789 		}
    790 	}
    791 }
    792 
    793 /*
    794  * Set media options.
    795  */
    796 int
    797 rge_ifmedia_upd(struct ifnet *ifp)
    798 {
    799 	struct rge_softc *sc = ifp->if_softc;
    800 	struct ifmedia *ifm = &sc->sc_media;
    801 	int anar, gig, val;
    802 
    803 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    804 		return (EINVAL);
    805 
    806 	/* Disable Gigabit Lite. */
    807 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
    808 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
    809 
    810 	val = rge_read_phy_ocp(sc, 0xa5d4);
    811 	val &= ~RGE_ADV_2500TFDX;
    812 
    813 	anar = gig = 0;
    814 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
    815 	case IFM_AUTO:
    816 		anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    817 		gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    818 		val |= RGE_ADV_2500TFDX;
    819 		break;
    820 	case IFM_2500_T:
    821 		anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    822 		gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    823 		val |= RGE_ADV_2500TFDX;
    824 		ifp->if_baudrate = IF_Mbps(2500);
    825 		break;
    826 	case IFM_1000_T:
    827 		anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    828 		gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    829 		ifp->if_baudrate = IF_Gbps(1);
    830 		break;
    831 	case IFM_100_TX:
    832 		anar |= ANAR_TX | ANAR_TX_FD;
    833 		ifp->if_baudrate = IF_Mbps(100);
    834 		break;
    835 	case IFM_10_T:
    836 		anar |= ANAR_10 | ANAR_10_FD;
    837 		ifp->if_baudrate = IF_Mbps(10);
    838 		break;
    839 	default:
    840 		printf("%s: unsupported media type\n", sc->sc_dev.dv_xname);
    841 		return (EINVAL);
    842 	}
    843 
    844 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
    845 	rge_write_phy(sc, 0, MII_100T2CR, gig);
    846 	rge_write_phy_ocp(sc, 0xa5d4, val);
    847 	rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG);
    848 
    849 	return (0);
    850 }
    851 
    852 /*
    853  * Report current media status.
    854  */
    855 void
    856 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
    857 {
    858 	struct rge_softc *sc = ifp->if_softc;
    859 	uint16_t status = 0;
    860 
    861 	ifmr->ifm_status = IFM_AVALID;
    862 	ifmr->ifm_active = IFM_ETHER;
    863 
    864 	if (rge_get_link_status(sc)) {
    865 		ifmr->ifm_status |= IFM_ACTIVE;
    866 
    867 		status = RGE_READ_2(sc, RGE_PHYSTAT);
    868 		if ((status & RGE_PHYSTAT_FDX) ||
    869 		    (status & RGE_PHYSTAT_2500MBPS))
    870 			ifmr->ifm_active |= IFM_FDX;
    871 		else
    872 			ifmr->ifm_active |= IFM_HDX;
    873 
    874 		if (status & RGE_PHYSTAT_10MBPS)
    875 			ifmr->ifm_active |= IFM_10_T;
    876 		else if (status & RGE_PHYSTAT_100MBPS)
    877 			ifmr->ifm_active |= IFM_100_TX;
    878 		else if (status & RGE_PHYSTAT_1000MBPS)
    879 			ifmr->ifm_active |= IFM_1000_T;
    880 		else if (status & RGE_PHYSTAT_2500MBPS)
    881 			ifmr->ifm_active |= IFM_2500_T;
    882 	}
    883 }
    884 
    885 /*
    886  * Allocate memory for RX/TX rings.
    887  */
    888 int
    889 rge_allocmem(struct rge_softc *sc)
    890 {
    891 	int error, i;
    892 
    893 	/* Allocate DMA'able memory for the TX ring. */
    894 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
    895 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
    896 	if (error) {
    897 		printf("%s: can't create TX list map\n", sc->sc_dev.dv_xname);
    898 		return (error);
    899 	}
    900 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
    901 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
    902 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
    903 	if (error) {
    904 		printf("%s: can't alloc TX list\n", sc->sc_dev.dv_xname);
    905 		return (error);
    906 	}
    907 
    908 	/* Load the map for the TX ring. */
    909 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
    910 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
    911 	    (caddr_t *)&sc->rge_ldata.rge_tx_list,
    912 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
    913 	if (error) {
    914 		printf("%s: can't map TX dma buffers\n", sc->sc_dev.dv_xname);
    915 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
    916 		    sc->rge_ldata.rge_tx_listnseg);
    917 		return (error);
    918 	}
    919 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
    920 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
    921 	if (error) {
    922 		printf("%s: can't load TX dma map\n", sc->sc_dev.dv_xname);
    923 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
    924 		bus_dmamem_unmap(sc->sc_dmat,
    925 		    (caddr_t)sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
    926 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
    927 		    sc->rge_ldata.rge_tx_listnseg);
    928 		return (error);
    929 	}
    930 
    931 	/* Create DMA maps for TX buffers. */
    932 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
    933 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
    934 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
    935 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
    936 		if (error) {
    937 			printf("%s: can't create DMA map for TX\n",
    938 			    sc->sc_dev.dv_xname);
    939 			return (error);
    940 		}
    941 	}
    942 
    943 	/* Allocate DMA'able memory for the RX ring. */
    944 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
    945 	    RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
    946 	if (error) {
    947 		printf("%s: can't create RX list map\n", sc->sc_dev.dv_xname);
    948 		return (error);
    949 	}
    950 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
    951 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
    952 	    BUS_DMA_NOWAIT| BUS_DMA_ZERO);
    953 	if (error) {
    954 		printf("%s: can't alloc RX list\n", sc->sc_dev.dv_xname);
    955 		return (error);
    956 	}
    957 
    958 	/* Load the map for the RX ring. */
    959 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
    960 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
    961 	    (caddr_t *)&sc->rge_ldata.rge_rx_list,
    962 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
    963 	if (error) {
    964 		printf("%s: can't map RX dma buffers\n", sc->sc_dev.dv_xname);
    965 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
    966 		    sc->rge_ldata.rge_rx_listnseg);
    967 		return (error);
    968 	}
    969 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
    970 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
    971 	if (error) {
    972 		printf("%s: can't load RX dma map\n", sc->sc_dev.dv_xname);
    973 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
    974 		bus_dmamem_unmap(sc->sc_dmat,
    975 		    (caddr_t)sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
    976 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
    977 		    sc->rge_ldata.rge_rx_listnseg);
    978 		return (error);
    979 	}
    980 
    981 	/* Create DMA maps for RX buffers. */
    982 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
    983 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
    984 		    RGE_JUMBO_FRAMELEN, 0, 0,
    985 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
    986 		if (error) {
    987 			printf("%s: can't create DMA map for RX\n",
    988 			    sc->sc_dev.dv_xname);
    989 			return (error);
    990 		}
    991 	}
    992 
    993 	return (error);
    994 }
    995 
    996 /*
    997  * Initialize the RX descriptor and attach an mbuf cluster.
    998  */
    999 int
   1000 rge_newbuf(struct rge_softc *sc, int idx)
   1001 {
   1002 	struct mbuf *m;
   1003 	struct rge_rx_desc *r;
   1004 	struct rge_rxq *rxq;
   1005 	bus_dmamap_t rxmap;
   1006 
   1007 	m = MCLGETI(NULL, M_DONTWAIT, NULL, RGE_JUMBO_FRAMELEN);
   1008 	if (m == NULL)
   1009 		return (ENOBUFS);
   1010 
   1011 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
   1012 
   1013 	rxq = &sc->rge_ldata.rge_rxq[idx];
   1014 	rxmap = rxq->rxq_dmamap;
   1015 
   1016 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT))
   1017 		goto out;
   1018 
   1019 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
   1020 	    BUS_DMASYNC_PREREAD);
   1021 
   1022 	/* Map the segments into RX descriptors. */
   1023 	r = &sc->rge_ldata.rge_rx_list[idx];
   1024 
   1025 	if (RGE_OWN(r)) {
   1026 		printf("%s: tried to map busy RX descriptor\n",
   1027 		    sc->sc_dev.dv_xname);
   1028 		goto out;
   1029 	}
   1030 
   1031 	rxq->rxq_mbuf = m;
   1032 
   1033 	r->rge_extsts = 0;
   1034 	r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
   1035 	r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr));
   1036 
   1037 	r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
   1038 	if (idx == RGE_RX_LIST_CNT - 1)
   1039 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
   1040 
   1041 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
   1042 
   1043 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1044 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1045 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1046 
   1047 	return (0);
   1048 out:
   1049 	if (m != NULL)
   1050 		m_freem(m);
   1051 	return (ENOMEM);
   1052 }
   1053 
   1054 void
   1055 rge_discard_rxbuf(struct rge_softc *sc, int idx)
   1056 {
   1057 	struct rge_rx_desc *r;
   1058 
   1059 	r = &sc->rge_ldata.rge_rx_list[idx];
   1060 
   1061 	r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
   1062 	r->rge_extsts = 0;
   1063 	if (idx == RGE_RX_LIST_CNT - 1)
   1064 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
   1065 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
   1066 
   1067 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1068 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1069 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1070 }
   1071 
   1072 int
   1073 rge_rx_list_init(struct rge_softc *sc)
   1074 {
   1075 	int i;
   1076 
   1077 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
   1078 
   1079 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1080 		sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
   1081 		if (rge_newbuf(sc, i) == ENOBUFS)
   1082 			return (ENOBUFS);
   1083 	}
   1084 
   1085 	sc->rge_ldata.rge_rxq_prodidx = 0;
   1086 	sc->rge_head = sc->rge_tail = NULL;
   1087 
   1088 	return (0);
   1089 }
   1090 
   1091 void
   1092 rge_tx_list_init(struct rge_softc *sc)
   1093 {
   1094 	int i;
   1095 
   1096 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
   1097 
   1098 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
   1099 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
   1100 
   1101 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
   1102 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
   1103 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1104 
   1105 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
   1106 }
   1107 
   1108 int
   1109 rge_rxeof(struct rge_softc *sc)
   1110 {
   1111 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
   1112 	struct mbuf *m;
   1113 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
   1114 	struct rge_rx_desc *cur_rx;
   1115 	struct rge_rxq *rxq;
   1116 	uint32_t rxstat, extsts;
   1117 	int i, total_len, rx = 0;
   1118 
   1119 	for (i = sc->rge_ldata.rge_rxq_prodidx; ; i = RGE_NEXT_RX_DESC(i)) {
   1120 		/* Invalidate the descriptor memory. */
   1121 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1122 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1123 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1124 
   1125 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
   1126 
   1127 		if (RGE_OWN(cur_rx))
   1128 			break;
   1129 
   1130 		rxstat = letoh32(cur_rx->rge_cmdsts);
   1131 		extsts = letoh32(cur_rx->rge_extsts);
   1132 
   1133 		total_len = RGE_RXBYTES(cur_rx);
   1134 		rxq = &sc->rge_ldata.rge_rxq[i];
   1135 		m = rxq->rxq_mbuf;
   1136 		rx = 1;
   1137 
   1138 		/* Invalidate the RX mbuf and unload its map. */
   1139 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
   1140 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1141 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
   1142 
   1143 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
   1144 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
   1145 			rge_discard_rxbuf(sc, i);
   1146 			continue;
   1147 		}
   1148 
   1149 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
   1150 			ifp->if_ierrors++;
   1151 			/*
   1152 			 * If this is part of a multi-fragment packet,
   1153 			 * discard all the pieces.
   1154 			 */
   1155 			 if (sc->rge_head != NULL) {
   1156 				m_freem(sc->rge_head);
   1157 				sc->rge_head = sc->rge_tail = NULL;
   1158 			}
   1159 			rge_discard_rxbuf(sc, i);
   1160 			continue;
   1161 		}
   1162 
   1163 		/*
   1164 		 * If allocating a replacement mbuf fails,
   1165 		 * reload the current one.
   1166 		 */
   1167 
   1168 		if (rge_newbuf(sc, i) == ENOBUFS) {
   1169 			if (sc->rge_head != NULL) {
   1170 				m_freem(sc->rge_head);
   1171 				sc->rge_head = sc->rge_tail = NULL;
   1172 			}
   1173 			rge_discard_rxbuf(sc, i);
   1174 			continue;
   1175 		}
   1176 
   1177 		if (sc->rge_head != NULL) {
   1178 			m->m_len = total_len;
   1179 			/*
   1180 			 * Special case: if there's 4 bytes or less
   1181 			 * in this buffer, the mbuf can be discarded:
   1182 			 * the last 4 bytes is the CRC, which we don't
   1183 			 * care about anyway.
   1184 			 */
   1185 			if (m->m_len <= ETHER_CRC_LEN) {
   1186 				sc->rge_tail->m_len -=
   1187 				    (ETHER_CRC_LEN - m->m_len);
   1188 				m_freem(m);
   1189 			} else {
   1190 				m->m_len -= ETHER_CRC_LEN;
   1191 				m->m_flags &= ~M_PKTHDR;
   1192 				sc->rge_tail->m_next = m;
   1193 			}
   1194 			m = sc->rge_head;
   1195 			sc->rge_head = sc->rge_tail = NULL;
   1196 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
   1197 		} else
   1198 			m->m_pkthdr.len = m->m_len =
   1199 			    (total_len - ETHER_CRC_LEN);
   1200 
   1201 		/* Check IP header checksum. */
   1202 		if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) &&
   1203 		    (extsts & RGE_RDEXTSTS_IPV4))
   1204 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
   1205 
   1206 		/* Check TCP/UDP checksum. */
   1207 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
   1208 		    (((rxstat & RGE_RDCMDSTS_TCPPKT) &&
   1209 		    !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) ||
   1210 		    ((rxstat & RGE_RDCMDSTS_UDPPKT) &&
   1211 		    !(rxstat & RGE_RDCMDSTS_UDPCSUMERR))))
   1212 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
   1213 			    M_UDP_CSUM_IN_OK;
   1214 
   1215 #if NVLAN > 0
   1216 		if (extsts & RGE_RDEXTSTS_VTAG) {
   1217 			m->m_pkthdr.ether_vtag =
   1218 			    ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
   1219 			m->m_flags |= M_VLANTAG;
   1220 		}
   1221 #endif
   1222 
   1223 		ml_enqueue(&ml, m);
   1224 	}
   1225 
   1226 	sc->rge_ldata.rge_rxq_prodidx = i;
   1227 
   1228 	if_input(ifp, &ml);
   1229 
   1230 	return (rx);
   1231 }
   1232 
   1233 int
   1234 rge_txeof(struct rge_softc *sc)
   1235 {
   1236 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
   1237 	struct rge_txq *txq;
   1238 	uint32_t txstat;
   1239 	int cons, idx, prod;
   1240 	int free = 0;
   1241 
   1242 	prod = sc->rge_ldata.rge_txq_prodidx;
   1243 	cons = sc->rge_ldata.rge_txq_considx;
   1244 
   1245 	while (prod != cons) {
   1246 		txq = &sc->rge_ldata.rge_txq[cons];
   1247 		idx = txq->txq_descidx;
   1248 
   1249 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1250 		    idx * sizeof(struct rge_tx_desc),
   1251 		    sizeof(struct rge_tx_desc),
   1252 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1253 
   1254 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
   1255 
   1256 		if (txstat & RGE_TDCMDSTS_OWN) {
   1257 			free = 2;
   1258 			break;
   1259 		}
   1260 
   1261 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
   1262 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1263 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
   1264 		m_freem(txq->txq_mbuf);
   1265 		txq->txq_mbuf = NULL;
   1266 
   1267 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
   1268 			ifp->if_collisions++;
   1269 		if (txstat & RGE_TDCMDSTS_TXERR)
   1270 			ifp->if_oerrors++;
   1271 
   1272 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1273 		    idx * sizeof(struct rge_tx_desc),
   1274 		    sizeof(struct rge_tx_desc),
   1275 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1276 
   1277 		cons = RGE_NEXT_TX_DESC(idx);
   1278 		free = 1;
   1279 	}
   1280 
   1281 	if (free == 0)
   1282 		return (0);
   1283 
   1284 	sc->rge_ldata.rge_txq_considx = cons;
   1285 
   1286 	if (ifq_is_oactive(&ifp->if_snd))
   1287 		ifq_restart(&ifp->if_snd);
   1288 	else if (free == 2)
   1289 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
   1290 	else
   1291 		ifp->if_timer = 0;
   1292 
   1293 	return (1);
   1294 }
   1295 
   1296 void
   1297 rge_reset(struct rge_softc *sc)
   1298 {
   1299 	int i;
   1300 
   1301 	/* Enable RXDV gate. */
   1302 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
   1303 	DELAY(2000);
   1304 
   1305 	for (i = 0; i < 10; i++) {
   1306 		DELAY(100);
   1307 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
   1308 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
   1309 		    RGE_MCUCMD_TXFIFO_EMPTY))
   1310 			break;
   1311 	}
   1312 
   1313 	/* Soft reset. */
   1314 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
   1315 
   1316 	for (i = 0; i < RGE_TIMEOUT; i++) {
   1317 		DELAY(100);
   1318 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
   1319 			break;
   1320 	}
   1321 	if (i == RGE_TIMEOUT)
   1322 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
   1323 }
   1324 
   1325 void
   1326 rge_iff(struct rge_softc *sc)
   1327 {
   1328 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
   1329 	struct arpcom *ac = &sc->sc_arpcom;
   1330 	struct ether_multi *enm;
   1331 	struct ether_multistep step;
   1332 	uint32_t hashes[2];
   1333 	uint32_t rxfilt;
   1334 	int h = 0;
   1335 
   1336 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
   1337 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
   1338 	ifp->if_flags &= ~IFF_ALLMULTI;
   1339 
   1340 	/*
   1341 	 * Always accept frames destined to our station address.
   1342 	 * Always accept broadcast frames.
   1343 	 */
   1344 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
   1345 
   1346 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
   1347 		ifp->if_flags |= IFF_ALLMULTI;
   1348 		rxfilt |= RGE_RXCFG_MULTI;
   1349 		if (ifp->if_flags & IFF_PROMISC)
   1350 			rxfilt |= RGE_RXCFG_ALLPHYS;
   1351 		hashes[0] = hashes[1] = 0xffffffff;
   1352 	} else {
   1353 		rxfilt |= RGE_RXCFG_MULTI;
   1354 		/* Program new filter. */
   1355 		memset(hashes, 0, sizeof(hashes));
   1356 
   1357 		ETHER_FIRST_MULTI(step, ac, enm);
   1358 		while (enm != NULL) {
   1359 			h = ether_crc32_be(enm->enm_addrlo,
   1360 			    ETHER_ADDR_LEN) >> 26;
   1361 
   1362 			if (h < 32)
   1363 				hashes[0] |= (1 << h);
   1364 			else
   1365 				hashes[1] |= (1 << (h - 32));
   1366 
   1367 			ETHER_NEXT_MULTI(step, enm);
   1368 		}
   1369 	}
   1370 
   1371 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
   1372 	RGE_WRITE_4(sc, RGE_MAR0, swap32(hashes[1]));
   1373 	RGE_WRITE_4(sc, RGE_MAR4, swap32(hashes[0]));
   1374 }
   1375 
   1376 void
   1377 rge_set_phy_power(struct rge_softc *sc, int on)
   1378 {
   1379 	int i;
   1380 
   1381 	if (on) {
   1382 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
   1383 
   1384 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
   1385 
   1386 		for (i = 0; i < RGE_TIMEOUT; i++) {
   1387 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0080) == 3)
   1388 				break;
   1389 			DELAY(1000);
   1390 		}
   1391 	} else
   1392 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
   1393 }
   1394 
   1395 void
   1396 rge_phy_config(struct rge_softc *sc)
   1397 {
   1398 	uint16_t mcode_ver, val;
   1399 	int i;
   1400 	static const uint16_t mac_cfg3_a438_value[] =
   1401 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
   1402 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
   1403 
   1404 	static const uint16_t mac_cfg3_b88e_value[] =
   1405 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
   1406 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
   1407 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
   1408 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
   1409 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
   1410 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
   1411 
   1412 	/* Read microcode version. */
   1413 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1414 	mcode_ver = rge_read_phy_ocp(sc, 0xa438);
   1415 
   1416 	if (sc->rge_type == MAC_CFG2) {
   1417 		for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++) {
   1418 			rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
   1419 			    rtl8125_mac_cfg2_ephy[i].val);
   1420 		}
   1421 
   1422 		if (mcode_ver != RGE_MAC_CFG2_MCODE_VER) {
   1423 			/* Disable PHY config. */
   1424 			RGE_CLRBIT_1(sc, 0xf2, 0x20);
   1425 			DELAY(1000);
   1426 
   1427 			rge_patch_phy_mcu(sc, 1);
   1428 
   1429 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1430 			rge_write_phy_ocp(sc, 0xa438, 0x8600);
   1431 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
   1432 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
   1433 
   1434 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
   1435 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
   1436 				rge_write_phy_ocp(sc,
   1437 				    rtl8125_mac_cfg2_mcu[i].reg,
   1438 				    rtl8125_mac_cfg2_mcu[i].val);
   1439 			}
   1440 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
   1441 
   1442 			rge_write_phy_ocp(sc, 0xa436, 0);
   1443 			rge_write_phy_ocp(sc, 0xa438, 0);
   1444 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
   1445 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1446 			rge_write_phy_ocp(sc, 0xa438, 0);
   1447 
   1448 			rge_patch_phy_mcu(sc, 0);
   1449 
   1450 			/* Enable PHY config. */
   1451 			RGE_SETBIT_1(sc, 0xf2, 0x20);
   1452 
   1453 			/* Write microcode version. */
   1454 			rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1455 			rge_write_phy_ocp(sc, 0xa438, RGE_MAC_CFG2_MCODE_VER);
   1456 		}
   1457 
   1458 		val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
   1459 		rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
   1460 		RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   1461 		val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   1462 		rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
   1463 		val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
   1464 		rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   1465 		RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
   1466 		val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
   1467 		rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
   1468 		RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
   1469 		RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
   1470 		RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
   1471 
   1472 		rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   1473 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1474 		rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
   1475 		rge_write_phy_ocp(sc, 0xa436, 0x80eb);
   1476 		val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
   1477 		rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
   1478 		rge_write_phy_ocp(sc, 0xa436, 0x80f8);
   1479 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1480 		rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
   1481 		rge_write_phy_ocp(sc, 0xa436, 0x80f1);
   1482 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1483 		rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
   1484 		rge_write_phy_ocp(sc, 0xa436, 0x80fe);
   1485 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1486 		rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
   1487 		rge_write_phy_ocp(sc, 0xa436, 0x8102);
   1488 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1489 		rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   1490 		rge_write_phy_ocp(sc, 0xa436, 0x8105);
   1491 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1492 		rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
   1493 		rge_write_phy_ocp(sc, 0xa436, 0x8100);
   1494 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1495 		rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
   1496 		rge_write_phy_ocp(sc, 0xa436, 0x8104);
   1497 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1498 		rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
   1499 		rge_write_phy_ocp(sc, 0xa436, 0x8106);
   1500 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1501 		rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
   1502 		rge_write_phy_ocp(sc, 0xa436, 0x80dc);
   1503 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1504 		rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
   1505 		rge_write_phy_ocp(sc, 0xa436, 0x80df);
   1506 		RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   1507 		rge_write_phy_ocp(sc, 0xa436, 0x80e1);
   1508 		RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
   1509 		val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
   1510 		rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
   1511 		rge_write_phy_ocp(sc, 0xa436, 0x819f);
   1512 		rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
   1513 		rge_write_phy_ocp(sc, 0xbc34, 0x5555);
   1514 		val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
   1515 		rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
   1516 		RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
   1517 		RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1518 	} else {
   1519 		for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
   1520 			rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
   1521 			    rtl8125_mac_cfg3_ephy[i].val);
   1522 
   1523 		if (mcode_ver != RGE_MAC_CFG3_MCODE_VER) {
   1524 			/* Disable PHY config. */
   1525 			RGE_CLRBIT_1(sc, 0xf2, 0x20);
   1526 			DELAY(1000);
   1527 
   1528 			rge_patch_phy_mcu(sc, 1);
   1529 
   1530 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1531 			rge_write_phy_ocp(sc, 0xa438, 0x8601);
   1532 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
   1533 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
   1534 
   1535 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
   1536 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
   1537 				rge_write_phy_ocp(sc,
   1538 				    rtl8125_mac_cfg3_mcu[i].reg,
   1539 				    rtl8125_mac_cfg3_mcu[i].val);
   1540 			}
   1541 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
   1542 
   1543 			rge_write_phy_ocp(sc, 0xa436, 0);
   1544 			rge_write_phy_ocp(sc, 0xa438, 0);
   1545 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
   1546 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1547 			rge_write_phy_ocp(sc, 0xa438, 0);
   1548 
   1549 			rge_patch_phy_mcu(sc, 0);
   1550 
   1551 			/* Enable PHY config. */
   1552 			RGE_SETBIT_1(sc, 0xf2, 0x20);
   1553 
   1554 			/* Write microcode version. */
   1555 			rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1556 			rge_write_phy_ocp(sc, 0xa438, RGE_MAC_CFG3_MCODE_VER);
   1557 		}
   1558 
   1559 		RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   1560 		val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   1561 		rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
   1562 		val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
   1563 		rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   1564 		RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
   1565 		RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
   1566 		val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
   1567 		rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
   1568 		val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
   1569 		rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
   1570 		val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
   1571 		rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
   1572 		RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
   1573 		RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
   1574 		val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
   1575 		rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
   1576 		rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
   1577 		rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
   1578 		val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
   1579 		rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
   1580 		rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
   1581 		rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   1582 		rge_write_phy_ocp(sc, 0xb87c, 0x809c);
   1583 		rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   1584 
   1585 		rge_write_phy_ocp(sc, 0xa436, 0x81b3);
   1586 		for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
   1587 			rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
   1588 		for (i = 0; i < 26; i++)
   1589 			rge_write_phy_ocp(sc, 0xa438, 0);
   1590 		rge_write_phy_ocp(sc, 0xa436, 0x8257);
   1591 		rge_write_phy_ocp(sc, 0xa438, 0x020f);
   1592 		rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   1593 		rge_write_phy_ocp(sc, 0xa438, 0x7843);
   1594 
   1595 		rge_patch_phy_mcu(sc, 1);
   1596 		RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
   1597 		RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
   1598 		for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
   1599 			rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
   1600 			rge_write_phy_ocp(sc, 0xb890,
   1601 			    mac_cfg3_b88e_value[i + 1]);
   1602 		}
   1603 		RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
   1604 		rge_patch_phy_mcu(sc, 0);
   1605 
   1606 		RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
   1607 		rge_write_phy_ocp(sc, 0xa436, 0x81a2);
   1608 		RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   1609 		val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
   1610 		rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
   1611 		RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
   1612 		RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
   1613 		RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
   1614 		RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
   1615 		RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1616 	}
   1617 
   1618 	/* Disable EEE. */
   1619 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
   1620 	RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
   1621 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
   1622 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
   1623 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
   1624 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
   1625 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
   1626 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
   1627 
   1628 	rge_patch_phy_mcu(sc, 1);
   1629 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
   1630 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
   1631 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
   1632 	rge_patch_phy_mcu(sc, 0);
   1633 }
   1634 
   1635 void
   1636 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
   1637 {
   1638 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1639 	RGE_WRITE_4(sc, RGE_MAC0,
   1640 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
   1641 	RGE_WRITE_4(sc, RGE_MAC4,
   1642 	    addr[5] <<  8 | addr[4]);
   1643 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1644 }
   1645 
   1646 void
   1647 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
   1648 {
   1649 	*(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
   1650 	*(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
   1651 }
   1652 
   1653 void
   1654 rge_hw_init(struct rge_softc *sc)
   1655 {
   1656 	int i;
   1657 
   1658 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1659 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
   1660 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
   1661 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1662 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
   1663 
   1664 	/* Disable UPS. */
   1665 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
   1666 
   1667 	/* Configure MAC MCU. */
   1668 	rge_write_mac_ocp(sc, 0xfc38, 0);
   1669 
   1670 	for (i = 0xfc28; i < 0xfc38; i += 2)
   1671 		rge_write_mac_ocp(sc, i, 0);
   1672 
   1673 	DELAY(3000);
   1674 	rge_write_mac_ocp(sc, 0xfc26, 0);
   1675 
   1676 	if (sc->rge_type == MAC_CFG3) {
   1677 		for (i = 0; i < nitems(rtl8125_def_bps); i++)
   1678 			rge_write_mac_ocp(sc, rtl8125_def_bps[i].reg,
   1679 			    rtl8125_def_bps[i].val);
   1680 	}
   1681 
   1682 	/* Disable PHY power saving. */
   1683 	rge_disable_phy_ocp_pwrsave(sc);
   1684 
   1685 	/* Set PCIe uncorrectable error status. */
   1686 	rge_write_csi(sc, 0x108,
   1687 	    rge_read_csi(sc, 0x108) | 0x00100000);
   1688 }
   1689 
   1690 void
   1691 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
   1692 {
   1693 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
   1694 		rge_patch_phy_mcu(sc, 1);
   1695 		rge_write_phy_ocp(sc, 0xc416, 0);
   1696 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
   1697 		rge_patch_phy_mcu(sc, 0);
   1698 	}
   1699 }
   1700 
   1701 void
   1702 rge_patch_phy_mcu(struct rge_softc *sc, int set)
   1703 {
   1704 	uint16_t val;
   1705 	int i;
   1706 
   1707 	if (set)
   1708 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
   1709 	else
   1710 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
   1711 
   1712 	for (i = 0; i < 1000; i++) {
   1713 		val = rge_read_phy_ocp(sc, 0xb800) & 0x0040;
   1714 		DELAY(100);
   1715 		if (val == 0x0040)
   1716 			break;
   1717 	}
   1718 	if (i == 1000)
   1719 		printf("%s: timeout waiting to patch phy mcu\n",
   1720 		    sc->sc_dev.dv_xname);
   1721 }
   1722 
   1723 void
   1724 rge_add_media_types(struct rge_softc *sc)
   1725 {
   1726 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
   1727 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
   1728 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
   1729 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
   1730 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
   1731 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
   1732 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
   1733 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
   1734 }
   1735 
   1736 void
   1737 rge_config_imtype(struct rge_softc *sc, int imtype)
   1738 {
   1739 	switch (imtype) {
   1740 	case RGE_IMTYPE_NONE:
   1741 		sc->rge_intrs = RGE_INTRS;
   1742 		sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
   1743 		    RGE_ISR_RX_FIFO_OFLOW;
   1744 		sc->rge_tx_ack = RGE_ISR_TX_OK;
   1745 		break;
   1746 	case RGE_IMTYPE_SIM:
   1747 		sc->rge_intrs = RGE_INTRS_TIMER;
   1748 		sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
   1749 		sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
   1750 		break;
   1751 	default:
   1752 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
   1753 	}
   1754 }
   1755 
   1756 void
   1757 rge_disable_sim_im(struct rge_softc *sc)
   1758 {
   1759 	RGE_WRITE_4(sc, RGE_TIMERINT, 0);
   1760 	sc->rge_timerintr = 0;
   1761 }
   1762 
   1763 void
   1764 rge_setup_sim_im(struct rge_softc *sc)
   1765 {
   1766 	RGE_WRITE_4(sc, RGE_TIMERINT, 0x2600);
   1767 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
   1768 	sc->rge_timerintr = 1;
   1769 }
   1770 
   1771 void
   1772 rge_setup_intr(struct rge_softc *sc, int imtype)
   1773 {
   1774 	rge_config_imtype(sc, imtype);
   1775 
   1776 	/* Enable interrupts. */
   1777 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
   1778 
   1779 	switch (imtype) {
   1780 	case RGE_IMTYPE_NONE:
   1781 		rge_disable_sim_im(sc);
   1782 		break;
   1783 	case RGE_IMTYPE_SIM:
   1784 		rge_setup_sim_im(sc);
   1785 		break;
   1786 	default:
   1787 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
   1788 	}
   1789 }
   1790 
   1791 void
   1792 rge_exit_oob(struct rge_softc *sc)
   1793 {
   1794 	int i;
   1795 
   1796 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
   1797 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
   1798 	    RGE_RXCFG_ERRPKT);
   1799 
   1800 	/* Disable RealWoW. */
   1801 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
   1802 
   1803 	rge_reset(sc);
   1804 
   1805 	/* Disable OOB. */
   1806 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
   1807 
   1808 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
   1809 
   1810 	for (i = 0; i < 10; i++) {
   1811 		DELAY(100);
   1812 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   1813 			break;
   1814 	}
   1815 
   1816 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
   1817 	rge_write_mac_ocp(sc, 0xc0a6, 0x0150);
   1818 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
   1819 
   1820 	for (i = 0; i < 10; i++) {
   1821 		DELAY(100);
   1822 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   1823 			break;
   1824 	}
   1825 
   1826 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
   1827 		for (i = 0; i < RGE_TIMEOUT; i++) {
   1828 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0080) == 2)
   1829 				break;
   1830 			DELAY(1000);
   1831 		}
   1832 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
   1833 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
   1834 	}
   1835 }
   1836 
   1837 void
   1838 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
   1839 {
   1840 	int i;
   1841 
   1842 	RGE_WRITE_4(sc, RGE_CSIDR, val);
   1843 	RGE_WRITE_4(sc, RGE_CSIAR, (1 << 16) | (reg & RGE_CSIAR_ADDR_MASK) |
   1844 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
   1845 
   1846 	for (i = 0; i < 10; i++) {
   1847 		 DELAY(100);
   1848 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
   1849 			break;
   1850 	}
   1851 
   1852 	DELAY(20);
   1853 }
   1854 
   1855 uint32_t
   1856 rge_read_csi(struct rge_softc *sc, uint32_t reg)
   1857 {
   1858 	int i;
   1859 
   1860 	RGE_WRITE_4(sc, RGE_CSIAR, (1 << 16) | (reg & RGE_CSIAR_ADDR_MASK) |
   1861 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
   1862 
   1863 	for (i = 0; i < 10; i++) {
   1864 		 DELAY(100);
   1865 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
   1866 			break;
   1867 	}
   1868 
   1869 	DELAY(20);
   1870 
   1871 	return (RGE_READ_4(sc, RGE_CSIDR));
   1872 }
   1873 
   1874 void
   1875 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   1876 {
   1877 	uint32_t tmp;
   1878 
   1879 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   1880 	tmp += val;
   1881 	tmp |= RGE_MACOCP_BUSY;
   1882 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
   1883 }
   1884 
   1885 uint16_t
   1886 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
   1887 {
   1888 	uint32_t val;
   1889 
   1890 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   1891 	RGE_WRITE_4(sc, RGE_MACOCP, val);
   1892 
   1893 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
   1894 }
   1895 
   1896 void
   1897 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
   1898 {
   1899 	uint32_t tmp;
   1900 	int i;
   1901 
   1902 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
   1903 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
   1904 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
   1905 
   1906 	for (i = 0; i < 10; i++) {
   1907 		DELAY(100);
   1908 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
   1909 			break;
   1910 	}
   1911 
   1912 	DELAY(20);
   1913 }
   1914 
   1915 void
   1916 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
   1917 {
   1918 	uint16_t off, phyaddr;
   1919 
   1920 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
   1921 	phyaddr <<= 4;
   1922 
   1923 	off = addr ? reg : 0x10 + (reg % 8);
   1924 
   1925 	phyaddr += (off - 16) << 1;
   1926 
   1927 	rge_write_phy_ocp(sc, phyaddr, val);
   1928 }
   1929 
   1930 void
   1931 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   1932 {
   1933 	uint32_t tmp;
   1934 	int i;
   1935 
   1936 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   1937 	tmp |= RGE_PHYOCP_BUSY | val;
   1938 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
   1939 
   1940 	for (i = 0; i < RGE_TIMEOUT; i++) {
   1941 		DELAY(1);
   1942 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
   1943 			break;
   1944 	}
   1945 }
   1946 
   1947 uint16_t
   1948 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
   1949 {
   1950 	uint32_t val;
   1951 	int i;
   1952 
   1953 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   1954 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
   1955 
   1956 	for (i = 0; i < RGE_TIMEOUT; i++) {
   1957 		DELAY(1);
   1958 		val = RGE_READ_4(sc, RGE_PHYOCP);
   1959 		if (val & RGE_PHYOCP_BUSY)
   1960 			break;
   1961 	}
   1962 
   1963 	return (val & RGE_PHYOCP_DATA_MASK);
   1964 }
   1965 
   1966 int
   1967 rge_get_link_status(struct rge_softc *sc)
   1968 {
   1969 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
   1970 }
   1971 
   1972 void
   1973 rge_txstart(void *arg)
   1974 {
   1975 	struct rge_softc *sc = arg;
   1976 
   1977 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
   1978 }
   1979 
   1980 void
   1981 rge_tick(void *arg)
   1982 {
   1983 	struct rge_softc *sc = arg;
   1984 	int s;
   1985 
   1986 	s = splnet();
   1987 	rge_link_state(sc);
   1988 	splx(s);
   1989 
   1990 	timeout_add_sec(&sc->sc_timeout, 1);
   1991 }
   1992 
   1993 void
   1994 rge_link_state(struct rge_softc *sc)
   1995 {
   1996 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
   1997 	int link = LINK_STATE_DOWN;
   1998 
   1999 	if (rge_get_link_status(sc))
   2000 		link = LINK_STATE_UP;
   2001 
   2002 	if (ifp->if_link_state != link) {
   2003 		ifp->if_link_state = link;
   2004 		if_link_state_change(ifp);
   2005 	}
   2006 }
   2007