Home | History | Annotate | Line # | Download | only in pci
if_rge.c revision 1.2
      1 /*	$NetBSD: if_rge.c,v 1.2 2020/01/11 21:05:45 sevan Exp $	*/
      2 /*	$OpenBSD: if_rge.c,v 1.2 2020/01/02 09:00:45 kevlo Exp $	*/
      3 
      4 /*
      5  * Copyright (c) 2019 Kevin Lo <kevlo (at) openbsd.org>
      6  *
      7  * Permission to use, copy, modify, and distribute this software for any
      8  * purpose with or without fee is hereby granted, provided that the above
      9  * copyright notice and this permission notice appear in all copies.
     10  *
     11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     18  */
     19 
     20 #include <sys/cdefs.h>
     21 __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.2 2020/01/11 21:05:45 sevan Exp $");
     22 
     23 /* #include "bpfilter.h" Sevan */
     24 /* #include "vlan.h" Sevan */
     25 
     26 #include <sys/types.h>
     27 
     28 #include <sys/param.h>
     29 #include <sys/systm.h>
     30 #include <sys/sockio.h>
     31 #include <sys/mbuf.h>
     32 #include <sys/malloc.h>
     33 #include <sys/kernel.h>
     34 #include <sys/socket.h>
     35 #include <sys/device.h>
     36 #include <sys/endian.h>
     37 
     38 #include <net/if.h>
     39 
     40 #include <net/if_dl.h>
     41 #include <net/if_ether.h>
     42 
     43 #include <net/if_media.h>
     44 
     45 #include <netinet/in.h>
     46 #include <net/if_ether.h>
     47 
     48 #if NBPFILTER > 0
     49 #include <net/bpf.h>
     50 #endif
     51 
     52 #include <sys/bus.h>
     53 #include <machine/intr.h>
     54 
     55 #include <dev/mii/mii.h>
     56 
     57 #include <dev/pci/pcivar.h>
     58 #include <dev/pci/pcireg.h>
     59 #include <dev/pci/pcidevs.h>
     60 
     61 #include <dev/pci/if_rgereg.h>
     62 
     63 #ifdef __NetBSD__
     64 #define letoh32 	htole32
     65 #define nitems(x) 	__arraycount(x)
     66 #define MBUF_LIST_INITIALIZER() 	{ NULL, NULL, 0 }
     67 struct mbuf_list {
     68 	struct mbuf 	*ml_head;
     69 	struct mbuf 	*ml_tail;
     70 	u_int 	ml_len;
     71 };
     72 #endif
     73 
     74 static int		rge_match(device_t, cfdata_t, void *);
     75 static void		rge_attach(device_t, device_t, void *);
     76 int		rge_intr(void *);
     77 int		rge_encap(struct rge_softc *, struct mbuf *, int);
     78 int		rge_ioctl(struct ifnet *, u_long, void *);
     79 void		rge_start(struct ifnet *);
     80 void		rge_watchdog(struct ifnet *);
     81 int		rge_init(struct ifnet *);
     82 void		rge_stop(struct ifnet *);
     83 int		rge_ifmedia_upd(struct ifnet *);
     84 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
     85 int		rge_allocmem(struct rge_softc *);
     86 int		rge_newbuf(struct rge_softc *, int);
     87 void		rge_discard_rxbuf(struct rge_softc *, int);
     88 int		rge_rx_list_init(struct rge_softc *);
     89 void		rge_tx_list_init(struct rge_softc *);
     90 int		rge_rxeof(struct rge_softc *);
     91 int		rge_txeof(struct rge_softc *);
     92 void		rge_reset(struct rge_softc *);
     93 void		rge_iff(struct rge_softc *);
     94 void		rge_set_phy_power(struct rge_softc *, int);
     95 void		rge_phy_config(struct rge_softc *);
     96 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
     97 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
     98 void		rge_hw_init(struct rge_softc *);
     99 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
    100 void		rge_patch_phy_mcu(struct rge_softc *, int);
    101 void		rge_add_media_types(struct rge_softc *);
    102 void		rge_config_imtype(struct rge_softc *, int);
    103 void		rge_disable_sim_im(struct rge_softc *);
    104 void		rge_setup_sim_im(struct rge_softc *);
    105 void		rge_setup_intr(struct rge_softc *, int);
    106 void		rge_exit_oob(struct rge_softc *);
    107 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
    108 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
    109 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
    110 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
    111 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
    112 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
    113 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
    114 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
    115 int		rge_get_link_status(struct rge_softc *);
    116 void		rge_txstart(void *);
    117 void		rge_tick(void *);
    118 void		rge_link_state(struct rge_softc *);
    119 
    120 static const struct {
    121 	uint16_t reg;
    122 	uint16_t val;
    123 }  rtl8125_def_bps[] = {
    124 	RTL8125_DEF_BPS
    125 }, rtl8125_mac_cfg2_ephy[] = {
    126 	RTL8125_MAC_CFG2_EPHY
    127 }, rtl8125_mac_cfg2_mcu[] = {
    128 	RTL8125_MAC_CFG2_MCU
    129 }, rtl8125_mac_cfg3_ephy[] = {
    130 	RTL8125_MAC_CFG3_EPHY
    131 }, rtl8125_mac_cfg3_mcu[] = {
    132 	RTL8125_MAC_CFG3_MCU
    133 };
    134 
    135 CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
    136 		NULL, NULL); /* Sevan - detach function? */
    137 
    138 extern struct cfdriver rge_cd;
    139 
    140 static const struct {
    141 	pci_vendor_id_t 	vendor;
    142 	pci_product_id_t 	product;
    143 }rge_devices[] = {
    144 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
    145 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125 },
    146 };
    147 
    148 static int
    149 rge_match(device_t parent, cfdata_t match, void *aux)
    150 {
    151 	struct pci_attach_args *pa =aux;
    152 	int n;
    153 
    154 	for (n =0; n < __arraycount(rge_devices); n++) {
    155 		if (PCI_VENDOR(pa->pa_id) == rge_devices[n].vendor &&
    156 		    PCI_PRODUCT(pa->pa_id) == rge_devices[n].product)
    157 			return 1;
    158 	}
    159 
    160 	return 0;
    161 }
    162 
    163 void
    164 rge_attach(device_t parent, device_t self, void *aux)
    165 {
    166 	struct rge_softc *sc = (struct rge_softc *)self;
    167 	struct pci_attach_args *pa = aux;
    168 	pci_chipset_tag_t pc = pa->pa_pc;
    169 	pci_intr_handle_t ih;
    170 	char intrbuf[PCI_INTRSTR_LEN];
    171 	const char *intrstr = NULL;
    172 	struct ifnet *ifp;
    173 	pcireg_t reg;
    174 	uint32_t hwrev;
    175 	uint8_t eaddr[ETHER_ADDR_LEN];
    176 	int offset;
    177 
    178 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
    179 
    180 	/*
    181 	 * Map control/status registers.
    182 	 */
    183 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
    184 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
    185 	    NULL, &sc->rge_bsize)) {
    186 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
    187 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
    188 		    &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
    189 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
    190 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
    191 			    &sc->rge_bsize)) {
    192 				printf(": can't map mem or i/o space\n");
    193 				return;
    194 			}
    195 		}
    196 	}
    197 
    198 	/*
    199 	 * Allocate interrupt.
    200 	 */
    201 	if (pci_intr_map(pa, &ih) == 0)
    202 		sc->rge_flags |= RGE_FLAG_MSI;
    203 	else if (pci_intr_map(pa, &ih) != 0) {
    204 		printf(": couldn't map interrupt\n");
    205 		return;
    206 	}
    207 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
    208 	sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, rge_intr,
    209 	    sc, sc->sc_dev.dv_xname);
    210 	if (sc->sc_ih == NULL) {
    211 		printf(": couldn't establish interrupt");
    212 		if (intrstr != NULL)
    213 			printf(" at %s", intrstr);
    214 		printf("\n");
    215 		return;
    216 	}
    217 	printf(": %s", intrstr);
    218 
    219 	sc->sc_dmat = pa->pa_dmat;
    220 	sc->sc_pc = pa->pa_pc;
    221 	sc->sc_tag = pa->pa_tag;
    222 
    223 	/* Determine hardware revision */
    224 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
    225 	switch (hwrev) {
    226 	case 0x60800000:
    227 		sc->rge_type = MAC_CFG2;
    228 		break;
    229 	case 0x60900000:
    230 		sc->rge_type = MAC_CFG3;
    231 		break;
    232 	default:
    233 		printf(": unknown version 0x%08x\n", hwrev);
    234 		return;
    235 	}
    236 
    237 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
    238 
    239 	/*
    240 	 * PCI Express check.
    241 	 */
    242 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
    243 	    &offset, NULL)) {
    244 		/* Disable PCIe ASPM. */
    245 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
    246 		    offset + PCIE_LCSR);
    247 		reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 );
    248 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
    249 		    reg);
    250 	}
    251 
    252 	rge_exit_oob(sc);
    253 	rge_hw_init(sc);
    254 
    255 	rge_get_macaddr(sc, eaddr);
    256 	printf(", address %s\n", ether_sprintf(eaddr));
    257 
    258 	memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
    259 
    260 	rge_set_phy_power(sc, 1);
    261 	rge_phy_config(sc);
    262 
    263 	if (rge_allocmem(sc))
    264 		return;
    265 
    266 	ifp = &sc->sc_ec.ec_if;
    267 	ifp->if_softc = sc;
    268 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
    269 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    270 #ifdef RGE_MPSAFE
    271 	ifp->if_xflags = IFEF_MPSAFE;
    272 #endif
    273 	ifp->if_ioctl = rge_ioctl;
    274 	ifp->if_start = rge_start;
    275 	ifp->if_watchdog = rge_watchdog;
    276 	IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT);
    277 	ifp->if_mtu = RGE_JUMBO_MTU;
    278 
    279 	ifp->if_capabilities = ETHERCAP_VLAN_MTU | IFCAP_CSUM_IPv4_Rx |
    280 	    IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
    281 	    IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
    282 
    283 #if NVLAN > 0
    284 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
    285 #endif
    286 
    287 	timeout_set(&sc->sc_timeout, rge_tick, sc);
    288 	task_set(&sc->sc_task, rge_txstart, sc);
    289 
    290 	/* Initialize ifmedia structures. */
    291 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
    292 	    rge_ifmedia_sts);
    293 	rge_add_media_types(sc);
    294 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
    295 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
    296 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
    297 
    298 	if_attach(ifp);
    299 	ether_ifattach(ifp, eaddr);
    300 }
    301 
    302 int
    303 rge_intr(void *arg)
    304 {
    305 	struct rge_softc *sc = arg;
    306 	struct ifnet *ifp = &sc->sc_ec.ec_if;
    307 	uint32_t status;
    308 	int claimed = 0, rx, tx;
    309 
    310 	if (!(ifp->if_flags & IFF_RUNNING))
    311 		return (0);
    312 
    313 	/* Disable interrupts. */
    314 	RGE_WRITE_4(sc, RGE_IMR, 0);
    315 
    316 	status = RGE_READ_4(sc, RGE_ISR);
    317 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
    318 		if ((status & RGE_INTRS) == 0 || status == 0xffffffff)
    319 			return (0);
    320 	}
    321 	if (status)
    322 		RGE_WRITE_4(sc, RGE_ISR, status);
    323 
    324 	if (status & RGE_ISR_PCS_TIMEOUT)
    325 		claimed = 1;
    326 
    327 	rx = tx = 0;
    328 	if (status & RGE_INTRS) {
    329 		if (status &
    330 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
    331 			rx |= rge_rxeof(sc);
    332 			claimed = 1;
    333 		}
    334 
    335 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
    336 			tx |= rge_txeof(sc);
    337 			claimed = 1;
    338 		}
    339 
    340 		if (status & RGE_ISR_SYSTEM_ERR) {
    341 			KERNEL_LOCK(1, NULL);
    342 			rge_init(ifp);
    343 			KERNEL_UNLOCK_ONE(NULL);
    344 			claimed = 1;
    345 		}
    346 	}
    347 
    348 	if (sc->rge_timerintr) {
    349 		if ((tx | rx) == 0) {
    350 			/*
    351 			 * Nothing needs to be processed, fallback
    352 			 * to use TX/RX interrupts.
    353 			 */
    354 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
    355 
    356 			/*
    357 			 * Recollect, mainly to avoid the possible
    358 			 * race introduced by changing interrupt
    359 			 * masks.
    360 			 */
    361 			rge_rxeof(sc);
    362 			rge_txeof(sc);
    363 		} else
    364 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
    365 	} else if (tx | rx) {
    366 		/*
    367 		 * Assume that using simulated interrupt moderation
    368 		 * (hardware timer based) could reduce the interrupt
    369 		 * rate.
    370 		 */
    371 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
    372 	}
    373 
    374 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
    375 
    376 	return (claimed);
    377 }
    378 
    379 int
    380 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
    381 {
    382 	struct rge_tx_desc *d = NULL;
    383 	struct rge_txq *txq;
    384 	bus_dmamap_t txmap;
    385 	uint32_t cmdsts, cflags = 0;
    386 	int cur, error, i, last, nsegs;
    387 
    388 	/*
    389 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
    390 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
    391 	 * take affect.
    392 	 */
    393 	if ((m->m_pkthdr.csum_flags &
    394 	    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
    395 		cflags |= RGE_TDEXTSTS_IPCSUM;
    396 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
    397 			cflags |= RGE_TDEXTSTS_TCPCSUM;
    398 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
    399 			cflags |= RGE_TDEXTSTS_UDPCSUM;
    400 	}
    401 
    402 	txq = &sc->rge_ldata.rge_txq[idx];
    403 	txmap = txq->txq_dmamap;
    404 
    405 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
    406 	switch (error) {
    407 	case 0:
    408 		break;
    409 	case EFBIG: /* mbuf chain is too fragmented */
    410 		if (m_defrag(m, M_DONTWAIT) == 0 &&
    411 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
    412 		    BUS_DMA_NOWAIT) == 0)
    413 			break;
    414 
    415 		/* FALLTHROUGH */
    416 	default:
    417 		return (0);
    418 	}
    419 
    420 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
    421 	    BUS_DMASYNC_PREWRITE);
    422 
    423 	nsegs = txmap->dm_nsegs;
    424 
    425 	/* Set up hardware VLAN tagging. */
    426 #if NVLAN > 0
    427 	if (m->m_flags & M_VLANTAG)
    428 		cflags |= swap16(m->m_pkthdr.ether_vtag | RGE_TDEXTSTS_VTAG);
    429 #endif
    430 
    431 	cur = idx;
    432 	cmdsts = RGE_TDCMDSTS_SOF;
    433 
    434 	for (i = 0; i < txmap->dm_nsegs; i++) {
    435 		d = &sc->rge_ldata.rge_tx_list[cur];
    436 
    437 		d->rge_extsts = htole32(cflags);
    438 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
    439 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
    440 
    441 		cmdsts |= txmap->dm_segs[i].ds_len;
    442 
    443 		if (cur == RGE_TX_LIST_CNT - 1)
    444 			cmdsts |= RGE_TDCMDSTS_EOR;
    445 
    446 		d->rge_cmdsts = htole32(cmdsts);
    447 
    448 		last = cur;
    449 		cmdsts = RGE_TDCMDSTS_OWN;
    450 		cur = RGE_NEXT_TX_DESC(cur);
    451 	}
    452 
    453 	/* Set EOF on the last descriptor. */
    454 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
    455 
    456 	/* Transfer ownership of packet to the chip. */
    457 	d = &sc->rge_ldata.rge_tx_list[idx];
    458 
    459 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
    460 
    461 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
    462 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
    463 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    464 
    465 	/* Update info of TX queue and descriptors. */
    466 	txq->txq_mbuf = m;
    467 	txq->txq_descidx = last;
    468 
    469 	return (nsegs);
    470 }
    471 
    472 int
    473 rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    474 {
    475 	struct rge_softc *sc = ifp->if_softc;
    476 	struct ifreq *ifr = (struct ifreq *)data;
    477 	int s, error = 0;
    478 
    479 	s = splnet();
    480 
    481 	switch (cmd) {
    482 	case SIOCSIFADDR:
    483 		ifp->if_flags |= IFF_UP;
    484 		if (!(ifp->if_flags & IFF_RUNNING))
    485 			rge_init(ifp);
    486 		break;
    487 	case SIOCSIFFLAGS:
    488 		if (ifp->if_flags & IFF_UP) {
    489 			if (ifp->if_flags & IFF_RUNNING)
    490 				error = ENETRESET;
    491 			else
    492 				rge_init(ifp);
    493 		} else {
    494 			if (ifp->if_flags & IFF_RUNNING)
    495 				rge_stop(ifp);
    496 		}
    497 		break;
    498 	case SIOCGIFMEDIA:
    499 	case SIOCSIFMEDIA:
    500 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
    501 		break;
    502 	case SIOCSIFMTU:
    503 		if (ifr->ifr_mtu > ifp->if_mtu) {
    504 			error = EINVAL;
    505 			break;
    506 		}
    507 		ifp->if_mtu = ifr->ifr_mtu;
    508 		break;
    509 	default:
    510 		error = ether_ioctl(ifp, cmd, data);
    511 	}
    512 
    513 	if (error == ENETRESET) {
    514 		if (ifp->if_flags & IFF_RUNNING)
    515 			rge_iff(sc);
    516 		error = 0;
    517 	}
    518 
    519 	splx(s);
    520 	return (error);
    521 }
    522 
    523 void
    524 rge_start(struct ifnet *ifq)
    525 {
    526 	struct ifnet *ifp = ifq->ifq_if;
    527 	struct rge_softc *sc = ifp->if_softc;
    528 	struct mbuf *m;
    529 	int free, idx, used;
    530 	int queued = 0;
    531 
    532 #define LINK_STATE_IS_UP(_s)    \
    533 	((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
    534 
    535 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
    536 		ifq_purge(ifq);
    537 		return;
    538 	}
    539 
    540 	/* Calculate free space. */
    541 	idx = sc->rge_ldata.rge_txq_prodidx;
    542 	free = sc->rge_ldata.rge_txq_considx;
    543 	if (free <= idx)
    544 		free += RGE_TX_LIST_CNT;
    545 	free -= idx;
    546 
    547 	for (;;) {
    548 		if (RGE_TX_NSEGS >= free + 2) {
    549 			ifq_set_oactive(&ifp->if_snd);
    550 			break;
    551 		}
    552 
    553 		m = ifq_dequeue(ifq);
    554 		if (m == NULL)
    555 			break;
    556 
    557 		used = rge_encap(sc, m, idx);
    558 		if (used == 0) {
    559 			m_freem(m);
    560 			continue;
    561 		}
    562 
    563 		KASSERT(used <= free);
    564 		free -= used;
    565 
    566 #if NBPFILTER > 0
    567 		if (ifp->if_bpf)
    568 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
    569 #endif
    570 
    571 		idx += used;
    572 		if (idx >= RGE_TX_LIST_CNT)
    573 			idx -= RGE_TX_LIST_CNT;
    574 
    575 		queued++;
    576 	}
    577 
    578 	if (queued == 0)
    579 		return;
    580 
    581 	/* Set a timeout in case the chip goes out to lunch. */
    582 	ifp->if_timer = 5;
    583 
    584 	sc->rge_ldata.rge_txq_prodidx = idx;
    585 	ifq_serialize(ifq, &sc->sc_task);
    586 }
    587 
    588 void
    589 rge_watchdog(struct ifnet *ifp)
    590 {
    591 	struct rge_softc *sc = ifp->if_softc;
    592 
    593 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
    594 	ifp->if_oerrors++;
    595 
    596 	rge_init(ifp);
    597 }
    598 
    599 int
    600 rge_init(struct ifnet *ifp)
    601 {
    602 	struct rge_softc *sc = ifp->if_softc;
    603 	uint32_t val;
    604 	uint16_t max_frame_size;
    605 	int i;
    606 
    607 	rge_stop(ifp);
    608 
    609 	/* Set MAC address. */
    610 	rge_set_macaddr(sc, sc->sc_enaddr);
    611 
    612 	/* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
    613 	if (ifp->if_mtu < ETHERMTU)
    614 		max_frame_size = ETHERMTU;
    615 	else
    616 		max_frame_size = ifp->if_mtu;
    617 
    618 	max_frame_size += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
    619 	    ETHER_CRC_LEN + 1;
    620 
    621 	if (max_frame_size > RGE_JUMBO_FRAMELEN)
    622 		max_frame_size -= 1;
    623 
    624 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, max_frame_size);
    625 
    626 	/* Initialize RX descriptors list. */
    627 	if (rge_rx_list_init(sc) == ENOBUFS) {
    628 		printf("%s: init failed: no memory for RX buffers\n",
    629 		    sc->sc_dev.dv_xname);
    630 		rge_stop(ifp);
    631 		return (ENOBUFS);
    632 	}
    633 
    634 	/* Initialize TX descriptors. */
    635 	rge_tx_list_init(sc);
    636 
    637 	/* Load the addresses of the RX and TX lists into the chip. */
    638 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
    639 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    640 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
    641 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    642 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
    643 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    644 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
    645 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    646 
    647 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    648 
    649 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
    650 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    651 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    652 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
    653 
    654 	/* Clear interrupt moderation timer. */
    655 	for (i = 0; i < 64; i++)
    656 		RGE_WRITE_4(sc, RGE_IM(i), 0);
    657 
    658 	/* Set the initial RX and TX configurations. */
    659 	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
    660 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
    661 
    662 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
    663 	rge_write_csi(sc, 0x70c, val | 0x27000000);
    664 
    665 	/* Enable hardware optimization function. */
    666 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
    667 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
    668 
    669 	RGE_WRITE_2(sc, 0x0382, 0x221b);
    670 	RGE_WRITE_1(sc, 0x4500, 0);
    671 	RGE_WRITE_2(sc, 0x4800, 0);
    672 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
    673 
    674 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
    675 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
    676 
    677 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
    678 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
    679 
    680 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
    681 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
    682 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
    683 
    684 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
    685 	rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
    686 
    687 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
    688 
    689 	val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
    690 	rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
    691 
    692 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
    693 
    694 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x007f;
    695 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
    696 
    697 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
    698 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
    699 
    700 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
    701 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
    702 
    703 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
    704 
    705 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
    706 
    707 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
    708 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
    709 
    710 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
    711 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
    712 
    713 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
    714 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
    715 
    716 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
    717 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
    718 
    719 	RGE_MAC_SETBIT(sc, 0xe84c, 0x00c0);
    720 
    721 	/* Disable EEE plus. */
    722 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
    723 
    724 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
    725 
    726 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
    727 	DELAY(1);
    728 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
    729 
    730 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
    731 
    732 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
    733 
    734 	if (ifp->if_capabilities & ETHERCAP_VLAN_HWTAGGING)
    735 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
    736 
    737 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
    738 
    739 	for (i = 0; i < 10; i++) {
    740 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
    741 			break;
    742 		DELAY(1000);
    743 	}
    744 
    745 	/* Disable RXDV gate. */
    746 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
    747 	DELAY(2000);
    748 
    749 	rge_ifmedia_upd(ifp);
    750 
    751 	/* Enable transmit and receive. */
    752 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
    753 
    754 	/* Program promiscuous mode and multicast filters. */
    755 	rge_iff(sc);
    756 
    757 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    758 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    759 
    760 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    761 
    762 	/* Enable interrupts. */
    763 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
    764 
    765 	ifp->if_flags |= IFF_RUNNING;
    766 	ifq_clr_oactive(&ifp->if_snd);
    767 
    768 	timeout_add_sec(&sc->sc_timeout, 1);
    769 
    770 	return (0);
    771 }
    772 
    773 /*
    774  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
    775  */
    776 void
    777 rge_stop(struct ifnet *ifp)
    778 {
    779 	struct rge_softc *sc = ifp->if_softc;
    780 	int i;
    781 
    782 	timeout_del(&sc->sc_timeout);
    783 
    784 	ifp->if_timer = 0;
    785 	ifp->if_flags &= ~IFF_RUNNING;
    786 	sc->rge_timerintr = 0;
    787 
    788 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
    789 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
    790 	    RGE_RXCFG_ERRPKT);
    791 
    792 	RGE_WRITE_4(sc, RGE_IMR, 0);
    793 	RGE_WRITE_4(sc, RGE_ISR, 0xffffffff);
    794 
    795 	rge_reset(sc);
    796 
    797 	intr_barrier(sc->sc_ih);
    798 	ifq_barrier(&ifp->if_snd);
    799 /*	ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
    800 
    801 	if (sc->rge_head != NULL) {
    802 		m_freem(sc->rge_head);
    803 		sc->rge_head = sc->rge_tail = NULL;
    804 	}
    805 
    806 	/* Free the TX list buffers. */
    807 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
    808 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
    809 			bus_dmamap_unload(sc->sc_dmat,
    810 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
    811 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
    812 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
    813 		}
    814 	}
    815 
    816 	/* Free the RX list buffers. */
    817 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
    818 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
    819 			bus_dmamap_unload(sc->sc_dmat,
    820 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
    821 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
    822 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
    823 		}
    824 	}
    825 }
    826 
    827 /*
    828  * Set media options.
    829  */
    830 int
    831 rge_ifmedia_upd(struct ifnet *ifp)
    832 {
    833 	struct rge_softc *sc = ifp->if_softc;
    834 	struct ifmedia *ifm = &sc->sc_media;
    835 	int anar, gig, val;
    836 
    837 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    838 		return (EINVAL);
    839 
    840 	/* Disable Gigabit Lite. */
    841 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
    842 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
    843 
    844 	val = rge_read_phy_ocp(sc, 0xa5d4);
    845 	val &= ~RGE_ADV_2500TFDX;
    846 
    847 	anar = gig = 0;
    848 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
    849 	case IFM_AUTO:
    850 		anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    851 		gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    852 		val |= RGE_ADV_2500TFDX;
    853 		break;
    854 	case IFM_2500_T:
    855 		anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    856 		gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    857 		val |= RGE_ADV_2500TFDX;
    858 		ifp->if_baudrate = IF_Mbps(2500);
    859 		break;
    860 	case IFM_1000_T:
    861 		anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    862 		gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    863 		ifp->if_baudrate = IF_Gbps(1);
    864 		break;
    865 	case IFM_100_TX:
    866 		anar |= ANAR_TX | ANAR_TX_FD;
    867 		ifp->if_baudrate = IF_Mbps(100);
    868 		break;
    869 	case IFM_10_T:
    870 		anar |= ANAR_10 | ANAR_10_FD;
    871 		ifp->if_baudrate = IF_Mbps(10);
    872 		break;
    873 	default:
    874 		printf("%s: unsupported media type\n", sc->sc_dev.dv_xname);
    875 		return (EINVAL);
    876 	}
    877 
    878 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
    879 	rge_write_phy(sc, 0, MII_100T2CR, gig);
    880 	rge_write_phy_ocp(sc, 0xa5d4, val);
    881 	rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG);
    882 
    883 	return (0);
    884 }
    885 
    886 /*
    887  * Report current media status.
    888  */
    889 void
    890 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
    891 {
    892 	struct rge_softc *sc = ifp->if_softc;
    893 	uint16_t status = 0;
    894 
    895 	ifmr->ifm_status = IFM_AVALID;
    896 	ifmr->ifm_active = IFM_ETHER;
    897 
    898 	if (rge_get_link_status(sc)) {
    899 		ifmr->ifm_status |= IFM_ACTIVE;
    900 
    901 		status = RGE_READ_2(sc, RGE_PHYSTAT);
    902 		if ((status & RGE_PHYSTAT_FDX) ||
    903 		    (status & RGE_PHYSTAT_2500MBPS))
    904 			ifmr->ifm_active |= IFM_FDX;
    905 		else
    906 			ifmr->ifm_active |= IFM_HDX;
    907 
    908 		if (status & RGE_PHYSTAT_10MBPS)
    909 			ifmr->ifm_active |= IFM_10_T;
    910 		else if (status & RGE_PHYSTAT_100MBPS)
    911 			ifmr->ifm_active |= IFM_100_TX;
    912 		else if (status & RGE_PHYSTAT_1000MBPS)
    913 			ifmr->ifm_active |= IFM_1000_T;
    914 		else if (status & RGE_PHYSTAT_2500MBPS)
    915 			ifmr->ifm_active |= IFM_2500_T;
    916 	}
    917 }
    918 
    919 /*
    920  * Allocate memory for RX/TX rings.
    921  */
    922 int
    923 rge_allocmem(struct rge_softc *sc)
    924 {
    925 	int error, i;
    926 
    927 	/* Allocate DMA'able memory for the TX ring. */
    928 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
    929 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
    930 	if (error) {
    931 		printf("%s: can't create TX list map\n", sc->sc_dev.dv_xname);
    932 		return (error);
    933 	}
    934 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
    935 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
    936 	    BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_ZERO */
    937 	if (error) {
    938 		printf("%s: can't alloc TX list\n", sc->sc_dev.dv_xname);
    939 		return (error);
    940 	}
    941 
    942 	/* Load the map for the TX ring. */
    943 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
    944 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
    945 	    &sc->rge_ldata.rge_tx_list,
    946 	    BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_COHERENT */
    947 	if (error) {
    948 		printf("%s: can't map TX dma buffers\n", sc->sc_dev.dv_xname);
    949 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
    950 		    sc->rge_ldata.rge_tx_listnseg);
    951 		return (error);
    952 	}
    953 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
    954 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
    955 	if (error) {
    956 		printf("%s: can't load TX dma map\n", sc->sc_dev.dv_xname);
    957 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
    958 		bus_dmamem_unmap(sc->sc_dmat,
    959 		    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
    960 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
    961 		    sc->rge_ldata.rge_tx_listnseg);
    962 		return (error);
    963 	}
    964 
    965 	/* Create DMA maps for TX buffers. */
    966 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
    967 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
    968 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
    969 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
    970 		if (error) {
    971 			printf("%s: can't create DMA map for TX\n",
    972 			    sc->sc_dev.dv_xname);
    973 			return (error);
    974 		}
    975 	}
    976 
    977 	/* Allocate DMA'able memory for the RX ring. */
    978 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
    979 	    RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
    980 	if (error) {
    981 		printf("%s: can't create RX list map\n", sc->sc_dev.dv_xname);
    982 		return (error);
    983 	}
    984 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
    985 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
    986 	    BUS_DMA_NOWAIT);  /* XXX OpenBSD adds BUS_DMA_ZERO */
    987 	if (error) {
    988 		printf("%s: can't alloc RX list\n", sc->sc_dev.dv_xname);
    989 		return (error);
    990 	}
    991 
    992 	/* Load the map for the RX ring. */
    993 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
    994 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
    995 	    &sc->rge_ldata.rge_rx_list,
    996 	    BUS_DMA_NOWAIT);  /* XXX OpenBSD adds BUS_DMA_COHERENT */
    997 	if (error) {
    998 		printf("%s: can't map RX dma buffers\n", sc->sc_dev.dv_xname);
    999 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1000 		    sc->rge_ldata.rge_rx_listnseg);
   1001 		return (error);
   1002 	}
   1003 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1004 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
   1005 	if (error) {
   1006 		printf("%s: can't load RX dma map\n", sc->sc_dev.dv_xname);
   1007 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
   1008 		bus_dmamem_unmap(sc->sc_dmat,
   1009 		    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
   1010 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1011 		    sc->rge_ldata.rge_rx_listnseg);
   1012 		return (error);
   1013 	}
   1014 
   1015 	/* Create DMA maps for RX buffers. */
   1016 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1017 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
   1018 		    RGE_JUMBO_FRAMELEN, 0, 0,
   1019 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
   1020 		if (error) {
   1021 			printf("%s: can't create DMA map for RX\n",
   1022 			    sc->sc_dev.dv_xname);
   1023 			return (error);
   1024 		}
   1025 	}
   1026 
   1027 	return (error);
   1028 }
   1029 
   1030 /*
   1031  * Initialize the RX descriptor and attach an mbuf cluster.
   1032  */
   1033 int
   1034 rge_newbuf(struct rge_softc *sc, int idx)
   1035 {
   1036 	struct mbuf *m;
   1037 	struct rge_rx_desc *r;
   1038 	struct rge_rxq *rxq;
   1039 	bus_dmamap_t rxmap;
   1040 
   1041 	m = MCLGETI(NULL, M_DONTWAIT, NULL, RGE_JUMBO_FRAMELEN);
   1042 	if (m == NULL)
   1043 		return (ENOBUFS);
   1044 
   1045 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
   1046 
   1047 	rxq = &sc->rge_ldata.rge_rxq[idx];
   1048 	rxmap = rxq->rxq_dmamap;
   1049 
   1050 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT))
   1051 		goto out;
   1052 
   1053 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
   1054 	    BUS_DMASYNC_PREREAD);
   1055 
   1056 	/* Map the segments into RX descriptors. */
   1057 	r = &sc->rge_ldata.rge_rx_list[idx];
   1058 
   1059 	if (RGE_OWN(r)) {
   1060 		printf("%s: tried to map busy RX descriptor\n",
   1061 		    sc->sc_dev.dv_xname);
   1062 		goto out;
   1063 	}
   1064 
   1065 	rxq->rxq_mbuf = m;
   1066 
   1067 	r->rge_extsts = 0;
   1068 	r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
   1069 	r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr));
   1070 
   1071 	r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
   1072 	if (idx == RGE_RX_LIST_CNT - 1)
   1073 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
   1074 
   1075 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
   1076 
   1077 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1078 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1079 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1080 
   1081 	return (0);
   1082 out:
   1083 	if (m != NULL)
   1084 		m_freem(m);
   1085 	return (ENOMEM);
   1086 }
   1087 
   1088 void
   1089 rge_discard_rxbuf(struct rge_softc *sc, int idx)
   1090 {
   1091 	struct rge_rx_desc *r;
   1092 
   1093 	r = &sc->rge_ldata.rge_rx_list[idx];
   1094 
   1095 	r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
   1096 	r->rge_extsts = 0;
   1097 	if (idx == RGE_RX_LIST_CNT - 1)
   1098 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
   1099 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
   1100 
   1101 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1102 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1103 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1104 }
   1105 
   1106 int
   1107 rge_rx_list_init(struct rge_softc *sc)
   1108 {
   1109 	int i;
   1110 
   1111 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
   1112 
   1113 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1114 		sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
   1115 		if (rge_newbuf(sc, i) == ENOBUFS)
   1116 			return (ENOBUFS);
   1117 	}
   1118 
   1119 	sc->rge_ldata.rge_rxq_prodidx = 0;
   1120 	sc->rge_head = sc->rge_tail = NULL;
   1121 
   1122 	return (0);
   1123 }
   1124 
   1125 void
   1126 rge_tx_list_init(struct rge_softc *sc)
   1127 {
   1128 	int i;
   1129 
   1130 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
   1131 
   1132 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
   1133 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
   1134 
   1135 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
   1136 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
   1137 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1138 
   1139 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
   1140 }
   1141 
   1142 int
   1143 rge_rxeof(struct rge_softc *sc)
   1144 {
   1145 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
   1146 	struct mbuf *m;
   1147 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1148 	struct rge_rx_desc *cur_rx;
   1149 	struct rge_rxq *rxq;
   1150 	uint32_t rxstat, extsts;
   1151 	int i, total_len, rx = 0;
   1152 
   1153 	for (i = sc->rge_ldata.rge_rxq_prodidx; ; i = RGE_NEXT_RX_DESC(i)) {
   1154 		/* Invalidate the descriptor memory. */
   1155 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1156 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1157 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1158 
   1159 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
   1160 
   1161 		if (RGE_OWN(cur_rx))
   1162 			break;
   1163 
   1164 		rxstat = letoh32(cur_rx->rge_cmdsts);
   1165 		extsts = letoh32(cur_rx->rge_extsts);
   1166 
   1167 		total_len = RGE_RXBYTES(cur_rx);
   1168 		rxq = &sc->rge_ldata.rge_rxq[i];
   1169 		m = rxq->rxq_mbuf;
   1170 		rx = 1;
   1171 
   1172 		/* Invalidate the RX mbuf and unload its map. */
   1173 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
   1174 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1175 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
   1176 
   1177 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
   1178 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
   1179 			rge_discard_rxbuf(sc, i);
   1180 			continue;
   1181 		}
   1182 
   1183 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
   1184 			ifp->if_ierrors++;
   1185 			/*
   1186 			 * If this is part of a multi-fragment packet,
   1187 			 * discard all the pieces.
   1188 			 */
   1189 			 if (sc->rge_head != NULL) {
   1190 				m_freem(sc->rge_head);
   1191 				sc->rge_head = sc->rge_tail = NULL;
   1192 			}
   1193 			rge_discard_rxbuf(sc, i);
   1194 			continue;
   1195 		}
   1196 
   1197 		/*
   1198 		 * If allocating a replacement mbuf fails,
   1199 		 * reload the current one.
   1200 		 */
   1201 
   1202 		if (rge_newbuf(sc, i) == ENOBUFS) {
   1203 			if (sc->rge_head != NULL) {
   1204 				m_freem(sc->rge_head);
   1205 				sc->rge_head = sc->rge_tail = NULL;
   1206 			}
   1207 			rge_discard_rxbuf(sc, i);
   1208 			continue;
   1209 		}
   1210 
   1211 		if (sc->rge_head != NULL) {
   1212 			m->m_len = total_len;
   1213 			/*
   1214 			 * Special case: if there's 4 bytes or less
   1215 			 * in this buffer, the mbuf can be discarded:
   1216 			 * the last 4 bytes is the CRC, which we don't
   1217 			 * care about anyway.
   1218 			 */
   1219 			if (m->m_len <= ETHER_CRC_LEN) {
   1220 				sc->rge_tail->m_len -=
   1221 				    (ETHER_CRC_LEN - m->m_len);
   1222 				m_freem(m);
   1223 			} else {
   1224 				m->m_len -= ETHER_CRC_LEN;
   1225 				m->m_flags &= ~M_PKTHDR;
   1226 				sc->rge_tail->m_next = m;
   1227 			}
   1228 			m = sc->rge_head;
   1229 			sc->rge_head = sc->rge_tail = NULL;
   1230 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
   1231 		} else
   1232 			m->m_pkthdr.len = m->m_len =
   1233 			    (total_len - ETHER_CRC_LEN);
   1234 
   1235 		/* Check IP header checksum. */
   1236 		if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) &&
   1237 		    (extsts & RGE_RDEXTSTS_IPV4))
   1238 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
   1239 
   1240 		/* Check TCP/UDP checksum. */
   1241 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
   1242 		    (((rxstat & RGE_RDCMDSTS_TCPPKT) &&
   1243 		    !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) ||
   1244 		    ((rxstat & RGE_RDCMDSTS_UDPPKT) &&
   1245 		    !(rxstat & RGE_RDCMDSTS_UDPCSUMERR))))
   1246 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
   1247 			    M_UDP_CSUM_IN_OK;
   1248 
   1249 #if NVLAN > 0
   1250 		if (extsts & RGE_RDEXTSTS_VTAG) {
   1251 			m->m_pkthdr.ether_vtag =
   1252 			    ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
   1253 			m->m_flags |= M_VLANTAG;
   1254 		}
   1255 #endif
   1256 
   1257 		ml_enqueue(&ml, m);
   1258 	}
   1259 
   1260 	sc->rge_ldata.rge_rxq_prodidx = i;
   1261 
   1262 	if_input(ifp, &ml);
   1263 
   1264 	return (rx);
   1265 }
   1266 
   1267 int
   1268 rge_txeof(struct rge_softc *sc)
   1269 {
   1270 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1271 	struct rge_txq *txq;
   1272 	uint32_t txstat;
   1273 	int cons, idx, prod;
   1274 	int free = 0;
   1275 
   1276 	prod = sc->rge_ldata.rge_txq_prodidx;
   1277 	cons = sc->rge_ldata.rge_txq_considx;
   1278 
   1279 	while (prod != cons) {
   1280 		txq = &sc->rge_ldata.rge_txq[cons];
   1281 		idx = txq->txq_descidx;
   1282 
   1283 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1284 		    idx * sizeof(struct rge_tx_desc),
   1285 		    sizeof(struct rge_tx_desc),
   1286 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1287 
   1288 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
   1289 
   1290 		if (txstat & RGE_TDCMDSTS_OWN) {
   1291 			free = 2;
   1292 			break;
   1293 		}
   1294 
   1295 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
   1296 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1297 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
   1298 		m_freem(txq->txq_mbuf);
   1299 		txq->txq_mbuf = NULL;
   1300 
   1301 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
   1302 			ifp->if_collisions++;
   1303 		if (txstat & RGE_TDCMDSTS_TXERR)
   1304 			ifp->if_oerrors++;
   1305 
   1306 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1307 		    idx * sizeof(struct rge_tx_desc),
   1308 		    sizeof(struct rge_tx_desc),
   1309 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1310 
   1311 		cons = RGE_NEXT_TX_DESC(idx);
   1312 		free = 1;
   1313 	}
   1314 
   1315 	if (free == 0)
   1316 		return (0);
   1317 
   1318 	sc->rge_ldata.rge_txq_considx = cons;
   1319 
   1320 	if (ifq_is_oactive(&ifp->if_snd))
   1321 		ifq_restart(&ifp->if_snd);
   1322 	else if (free == 2)
   1323 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
   1324 	else
   1325 		ifp->if_timer = 0;
   1326 
   1327 	return (1);
   1328 }
   1329 
   1330 void
   1331 rge_reset(struct rge_softc *sc)
   1332 {
   1333 	int i;
   1334 
   1335 	/* Enable RXDV gate. */
   1336 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
   1337 	DELAY(2000);
   1338 
   1339 	for (i = 0; i < 10; i++) {
   1340 		DELAY(100);
   1341 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
   1342 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
   1343 		    RGE_MCUCMD_TXFIFO_EMPTY))
   1344 			break;
   1345 	}
   1346 
   1347 	/* Soft reset. */
   1348 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
   1349 
   1350 	for (i = 0; i < RGE_TIMEOUT; i++) {
   1351 		DELAY(100);
   1352 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
   1353 			break;
   1354 	}
   1355 	if (i == RGE_TIMEOUT)
   1356 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
   1357 }
   1358 
   1359 void
   1360 rge_iff(struct rge_softc *sc)
   1361 {
   1362 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1363 	struct ethercom *ac = &sc->sc_ec;
   1364 	struct ether_multi *enm;
   1365 	struct ether_multistep step;
   1366 	uint32_t hashes[2];
   1367 	uint32_t rxfilt;
   1368 	int h = 0;
   1369 
   1370 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
   1371 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
   1372 	ifp->if_flags &= ~IFF_ALLMULTI;
   1373 
   1374 	/*
   1375 	 * Always accept frames destined to our station address.
   1376 	 * Always accept broadcast frames.
   1377 	 */
   1378 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
   1379 
   1380 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
   1381 		ifp->if_flags |= IFF_ALLMULTI;
   1382 		rxfilt |= RGE_RXCFG_MULTI;
   1383 		if (ifp->if_flags & IFF_PROMISC)
   1384 			rxfilt |= RGE_RXCFG_ALLPHYS;
   1385 		hashes[0] = hashes[1] = 0xffffffff;
   1386 	} else {
   1387 		rxfilt |= RGE_RXCFG_MULTI;
   1388 		/* Program new filter. */
   1389 		memset(hashes, 0, sizeof(hashes));
   1390 
   1391 		ETHER_FIRST_MULTI(step, ac, enm);
   1392 		while (enm != NULL) {
   1393 			h = ether_crc32_be(enm->enm_addrlo,
   1394 			    ETHER_ADDR_LEN) >> 26;
   1395 
   1396 			if (h < 32)
   1397 				hashes[0] |= (1 << h);
   1398 			else
   1399 				hashes[1] |= (1 << (h - 32));
   1400 
   1401 			ETHER_NEXT_MULTI(step, enm);
   1402 		}
   1403 	}
   1404 
   1405 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
   1406 	RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
   1407 	RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
   1408 }
   1409 
   1410 void
   1411 rge_set_phy_power(struct rge_softc *sc, int on)
   1412 {
   1413 	int i;
   1414 
   1415 	if (on) {
   1416 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
   1417 
   1418 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
   1419 
   1420 		for (i = 0; i < RGE_TIMEOUT; i++) {
   1421 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0080) == 3)
   1422 				break;
   1423 			DELAY(1000);
   1424 		}
   1425 	} else
   1426 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
   1427 }
   1428 
   1429 void
   1430 rge_phy_config(struct rge_softc *sc)
   1431 {
   1432 	uint16_t mcode_ver, val;
   1433 	int i;
   1434 	static const uint16_t mac_cfg3_a438_value[] =
   1435 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
   1436 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
   1437 
   1438 	static const uint16_t mac_cfg3_b88e_value[] =
   1439 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
   1440 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
   1441 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
   1442 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
   1443 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
   1444 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
   1445 
   1446 	/* Read microcode version. */
   1447 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1448 	mcode_ver = rge_read_phy_ocp(sc, 0xa438);
   1449 
   1450 	if (sc->rge_type == MAC_CFG2) {
   1451 		for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++) {
   1452 			rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
   1453 			    rtl8125_mac_cfg2_ephy[i].val);
   1454 		}
   1455 
   1456 		if (mcode_ver != RGE_MAC_CFG2_MCODE_VER) {
   1457 			/* Disable PHY config. */
   1458 			RGE_CLRBIT_1(sc, 0xf2, 0x20);
   1459 			DELAY(1000);
   1460 
   1461 			rge_patch_phy_mcu(sc, 1);
   1462 
   1463 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1464 			rge_write_phy_ocp(sc, 0xa438, 0x8600);
   1465 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
   1466 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
   1467 
   1468 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
   1469 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
   1470 				rge_write_phy_ocp(sc,
   1471 				    rtl8125_mac_cfg2_mcu[i].reg,
   1472 				    rtl8125_mac_cfg2_mcu[i].val);
   1473 			}
   1474 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
   1475 
   1476 			rge_write_phy_ocp(sc, 0xa436, 0);
   1477 			rge_write_phy_ocp(sc, 0xa438, 0);
   1478 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
   1479 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1480 			rge_write_phy_ocp(sc, 0xa438, 0);
   1481 
   1482 			rge_patch_phy_mcu(sc, 0);
   1483 
   1484 			/* Enable PHY config. */
   1485 			RGE_SETBIT_1(sc, 0xf2, 0x20);
   1486 
   1487 			/* Write microcode version. */
   1488 			rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1489 			rge_write_phy_ocp(sc, 0xa438, RGE_MAC_CFG2_MCODE_VER);
   1490 		}
   1491 
   1492 		val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
   1493 		rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
   1494 		RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   1495 		val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   1496 		rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
   1497 		val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
   1498 		rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   1499 		RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
   1500 		val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
   1501 		rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
   1502 		RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
   1503 		RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
   1504 		RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
   1505 
   1506 		rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   1507 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1508 		rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
   1509 		rge_write_phy_ocp(sc, 0xa436, 0x80eb);
   1510 		val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
   1511 		rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
   1512 		rge_write_phy_ocp(sc, 0xa436, 0x80f8);
   1513 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1514 		rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
   1515 		rge_write_phy_ocp(sc, 0xa436, 0x80f1);
   1516 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1517 		rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
   1518 		rge_write_phy_ocp(sc, 0xa436, 0x80fe);
   1519 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1520 		rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
   1521 		rge_write_phy_ocp(sc, 0xa436, 0x8102);
   1522 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1523 		rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   1524 		rge_write_phy_ocp(sc, 0xa436, 0x8105);
   1525 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1526 		rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
   1527 		rge_write_phy_ocp(sc, 0xa436, 0x8100);
   1528 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1529 		rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
   1530 		rge_write_phy_ocp(sc, 0xa436, 0x8104);
   1531 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1532 		rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
   1533 		rge_write_phy_ocp(sc, 0xa436, 0x8106);
   1534 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1535 		rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
   1536 		rge_write_phy_ocp(sc, 0xa436, 0x80dc);
   1537 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1538 		rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
   1539 		rge_write_phy_ocp(sc, 0xa436, 0x80df);
   1540 		RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   1541 		rge_write_phy_ocp(sc, 0xa436, 0x80e1);
   1542 		RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
   1543 		val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
   1544 		rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
   1545 		rge_write_phy_ocp(sc, 0xa436, 0x819f);
   1546 		rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
   1547 		rge_write_phy_ocp(sc, 0xbc34, 0x5555);
   1548 		val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
   1549 		rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
   1550 		RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
   1551 		RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1552 	} else {
   1553 		for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
   1554 			rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
   1555 			    rtl8125_mac_cfg3_ephy[i].val);
   1556 
   1557 		if (mcode_ver != RGE_MAC_CFG3_MCODE_VER) {
   1558 			/* Disable PHY config. */
   1559 			RGE_CLRBIT_1(sc, 0xf2, 0x20);
   1560 			DELAY(1000);
   1561 
   1562 			rge_patch_phy_mcu(sc, 1);
   1563 
   1564 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1565 			rge_write_phy_ocp(sc, 0xa438, 0x8601);
   1566 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
   1567 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
   1568 
   1569 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
   1570 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
   1571 				rge_write_phy_ocp(sc,
   1572 				    rtl8125_mac_cfg3_mcu[i].reg,
   1573 				    rtl8125_mac_cfg3_mcu[i].val);
   1574 			}
   1575 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
   1576 
   1577 			rge_write_phy_ocp(sc, 0xa436, 0);
   1578 			rge_write_phy_ocp(sc, 0xa438, 0);
   1579 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
   1580 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1581 			rge_write_phy_ocp(sc, 0xa438, 0);
   1582 
   1583 			rge_patch_phy_mcu(sc, 0);
   1584 
   1585 			/* Enable PHY config. */
   1586 			RGE_SETBIT_1(sc, 0xf2, 0x20);
   1587 
   1588 			/* Write microcode version. */
   1589 			rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1590 			rge_write_phy_ocp(sc, 0xa438, RGE_MAC_CFG3_MCODE_VER);
   1591 		}
   1592 
   1593 		RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   1594 		val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   1595 		rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
   1596 		val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
   1597 		rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   1598 		RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
   1599 		RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
   1600 		val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
   1601 		rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
   1602 		val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
   1603 		rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
   1604 		val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
   1605 		rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
   1606 		RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
   1607 		RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
   1608 		val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
   1609 		rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
   1610 		rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
   1611 		rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
   1612 		val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
   1613 		rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
   1614 		rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
   1615 		rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   1616 		rge_write_phy_ocp(sc, 0xb87c, 0x809c);
   1617 		rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   1618 
   1619 		rge_write_phy_ocp(sc, 0xa436, 0x81b3);
   1620 		for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
   1621 			rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
   1622 		for (i = 0; i < 26; i++)
   1623 			rge_write_phy_ocp(sc, 0xa438, 0);
   1624 		rge_write_phy_ocp(sc, 0xa436, 0x8257);
   1625 		rge_write_phy_ocp(sc, 0xa438, 0x020f);
   1626 		rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   1627 		rge_write_phy_ocp(sc, 0xa438, 0x7843);
   1628 
   1629 		rge_patch_phy_mcu(sc, 1);
   1630 		RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
   1631 		RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
   1632 		for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
   1633 			rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
   1634 			rge_write_phy_ocp(sc, 0xb890,
   1635 			    mac_cfg3_b88e_value[i + 1]);
   1636 		}
   1637 		RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
   1638 		rge_patch_phy_mcu(sc, 0);
   1639 
   1640 		RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
   1641 		rge_write_phy_ocp(sc, 0xa436, 0x81a2);
   1642 		RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   1643 		val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
   1644 		rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
   1645 		RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
   1646 		RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
   1647 		RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
   1648 		RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
   1649 		RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1650 	}
   1651 
   1652 	/* Disable EEE. */
   1653 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
   1654 	RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
   1655 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
   1656 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
   1657 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
   1658 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
   1659 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
   1660 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
   1661 
   1662 	rge_patch_phy_mcu(sc, 1);
   1663 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
   1664 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
   1665 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
   1666 	rge_patch_phy_mcu(sc, 0);
   1667 }
   1668 
   1669 void
   1670 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
   1671 {
   1672 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1673 	RGE_WRITE_4(sc, RGE_MAC0,
   1674 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
   1675 	RGE_WRITE_4(sc, RGE_MAC4,
   1676 	    addr[5] <<  8 | addr[4]);
   1677 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1678 }
   1679 
   1680 void
   1681 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
   1682 {
   1683 	*(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
   1684 	*(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
   1685 }
   1686 
   1687 void
   1688 rge_hw_init(struct rge_softc *sc)
   1689 {
   1690 	int i;
   1691 
   1692 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1693 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
   1694 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
   1695 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1696 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
   1697 
   1698 	/* Disable UPS. */
   1699 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
   1700 
   1701 	/* Configure MAC MCU. */
   1702 	rge_write_mac_ocp(sc, 0xfc38, 0);
   1703 
   1704 	for (i = 0xfc28; i < 0xfc38; i += 2)
   1705 		rge_write_mac_ocp(sc, i, 0);
   1706 
   1707 	DELAY(3000);
   1708 	rge_write_mac_ocp(sc, 0xfc26, 0);
   1709 
   1710 	if (sc->rge_type == MAC_CFG3) {
   1711 		for (i = 0; i < nitems(rtl8125_def_bps); i++)
   1712 			rge_write_mac_ocp(sc, rtl8125_def_bps[i].reg,
   1713 			    rtl8125_def_bps[i].val);
   1714 	}
   1715 
   1716 	/* Disable PHY power saving. */
   1717 	rge_disable_phy_ocp_pwrsave(sc);
   1718 
   1719 	/* Set PCIe uncorrectable error status. */
   1720 	rge_write_csi(sc, 0x108,
   1721 	    rge_read_csi(sc, 0x108) | 0x00100000);
   1722 }
   1723 
   1724 void
   1725 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
   1726 {
   1727 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
   1728 		rge_patch_phy_mcu(sc, 1);
   1729 		rge_write_phy_ocp(sc, 0xc416, 0);
   1730 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
   1731 		rge_patch_phy_mcu(sc, 0);
   1732 	}
   1733 }
   1734 
   1735 void
   1736 rge_patch_phy_mcu(struct rge_softc *sc, int set)
   1737 {
   1738 	uint16_t val;
   1739 	int i;
   1740 
   1741 	if (set)
   1742 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
   1743 	else
   1744 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
   1745 
   1746 	for (i = 0; i < 1000; i++) {
   1747 		val = rge_read_phy_ocp(sc, 0xb800) & 0x0040;
   1748 		DELAY(100);
   1749 		if (val == 0x0040)
   1750 			break;
   1751 	}
   1752 	if (i == 1000)
   1753 		printf("%s: timeout waiting to patch phy mcu\n",
   1754 		    sc->sc_dev.dv_xname);
   1755 }
   1756 
   1757 void
   1758 rge_add_media_types(struct rge_softc *sc)
   1759 {
   1760 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
   1761 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
   1762 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
   1763 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
   1764 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
   1765 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
   1766 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
   1767 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
   1768 }
   1769 
   1770 void
   1771 rge_config_imtype(struct rge_softc *sc, int imtype)
   1772 {
   1773 	switch (imtype) {
   1774 	case RGE_IMTYPE_NONE:
   1775 		sc->rge_intrs = RGE_INTRS;
   1776 		sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
   1777 		    RGE_ISR_RX_FIFO_OFLOW;
   1778 		sc->rge_tx_ack = RGE_ISR_TX_OK;
   1779 		break;
   1780 	case RGE_IMTYPE_SIM:
   1781 		sc->rge_intrs = RGE_INTRS_TIMER;
   1782 		sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
   1783 		sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
   1784 		break;
   1785 	default:
   1786 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
   1787 	}
   1788 }
   1789 
   1790 void
   1791 rge_disable_sim_im(struct rge_softc *sc)
   1792 {
   1793 	RGE_WRITE_4(sc, RGE_TIMERINT, 0);
   1794 	sc->rge_timerintr = 0;
   1795 }
   1796 
   1797 void
   1798 rge_setup_sim_im(struct rge_softc *sc)
   1799 {
   1800 	RGE_WRITE_4(sc, RGE_TIMERINT, 0x2600);
   1801 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
   1802 	sc->rge_timerintr = 1;
   1803 }
   1804 
   1805 void
   1806 rge_setup_intr(struct rge_softc *sc, int imtype)
   1807 {
   1808 	rge_config_imtype(sc, imtype);
   1809 
   1810 	/* Enable interrupts. */
   1811 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
   1812 
   1813 	switch (imtype) {
   1814 	case RGE_IMTYPE_NONE:
   1815 		rge_disable_sim_im(sc);
   1816 		break;
   1817 	case RGE_IMTYPE_SIM:
   1818 		rge_setup_sim_im(sc);
   1819 		break;
   1820 	default:
   1821 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
   1822 	}
   1823 }
   1824 
   1825 void
   1826 rge_exit_oob(struct rge_softc *sc)
   1827 {
   1828 	int i;
   1829 
   1830 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
   1831 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
   1832 	    RGE_RXCFG_ERRPKT);
   1833 
   1834 	/* Disable RealWoW. */
   1835 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
   1836 
   1837 	rge_reset(sc);
   1838 
   1839 	/* Disable OOB. */
   1840 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
   1841 
   1842 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
   1843 
   1844 	for (i = 0; i < 10; i++) {
   1845 		DELAY(100);
   1846 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   1847 			break;
   1848 	}
   1849 
   1850 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
   1851 	rge_write_mac_ocp(sc, 0xc0a6, 0x0150);
   1852 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
   1853 
   1854 	for (i = 0; i < 10; i++) {
   1855 		DELAY(100);
   1856 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   1857 			break;
   1858 	}
   1859 
   1860 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
   1861 		for (i = 0; i < RGE_TIMEOUT; i++) {
   1862 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0080) == 2)
   1863 				break;
   1864 			DELAY(1000);
   1865 		}
   1866 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
   1867 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
   1868 	}
   1869 }
   1870 
   1871 void
   1872 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
   1873 {
   1874 	int i;
   1875 
   1876 	RGE_WRITE_4(sc, RGE_CSIDR, val);
   1877 	RGE_WRITE_4(sc, RGE_CSIAR, (1 << 16) | (reg & RGE_CSIAR_ADDR_MASK) |
   1878 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
   1879 
   1880 	for (i = 0; i < 10; i++) {
   1881 		 DELAY(100);
   1882 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
   1883 			break;
   1884 	}
   1885 
   1886 	DELAY(20);
   1887 }
   1888 
   1889 uint32_t
   1890 rge_read_csi(struct rge_softc *sc, uint32_t reg)
   1891 {
   1892 	int i;
   1893 
   1894 	RGE_WRITE_4(sc, RGE_CSIAR, (1 << 16) | (reg & RGE_CSIAR_ADDR_MASK) |
   1895 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
   1896 
   1897 	for (i = 0; i < 10; i++) {
   1898 		 DELAY(100);
   1899 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
   1900 			break;
   1901 	}
   1902 
   1903 	DELAY(20);
   1904 
   1905 	return (RGE_READ_4(sc, RGE_CSIDR));
   1906 }
   1907 
   1908 void
   1909 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   1910 {
   1911 	uint32_t tmp;
   1912 
   1913 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   1914 	tmp += val;
   1915 	tmp |= RGE_MACOCP_BUSY;
   1916 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
   1917 }
   1918 
   1919 uint16_t
   1920 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
   1921 {
   1922 	uint32_t val;
   1923 
   1924 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   1925 	RGE_WRITE_4(sc, RGE_MACOCP, val);
   1926 
   1927 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
   1928 }
   1929 
   1930 void
   1931 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
   1932 {
   1933 	uint32_t tmp;
   1934 	int i;
   1935 
   1936 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
   1937 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
   1938 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
   1939 
   1940 	for (i = 0; i < 10; i++) {
   1941 		DELAY(100);
   1942 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
   1943 			break;
   1944 	}
   1945 
   1946 	DELAY(20);
   1947 }
   1948 
   1949 void
   1950 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
   1951 {
   1952 	uint16_t off, phyaddr;
   1953 
   1954 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
   1955 	phyaddr <<= 4;
   1956 
   1957 	off = addr ? reg : 0x10 + (reg % 8);
   1958 
   1959 	phyaddr += (off - 16) << 1;
   1960 
   1961 	rge_write_phy_ocp(sc, phyaddr, val);
   1962 }
   1963 
   1964 void
   1965 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   1966 {
   1967 	uint32_t tmp;
   1968 	int i;
   1969 
   1970 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   1971 	tmp |= RGE_PHYOCP_BUSY | val;
   1972 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
   1973 
   1974 	for (i = 0; i < RGE_TIMEOUT; i++) {
   1975 		DELAY(1);
   1976 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
   1977 			break;
   1978 	}
   1979 }
   1980 
   1981 uint16_t
   1982 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
   1983 {
   1984 	uint32_t val;
   1985 	int i;
   1986 
   1987 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   1988 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
   1989 
   1990 	for (i = 0; i < RGE_TIMEOUT; i++) {
   1991 		DELAY(1);
   1992 		val = RGE_READ_4(sc, RGE_PHYOCP);
   1993 		if (val & RGE_PHYOCP_BUSY)
   1994 			break;
   1995 	}
   1996 
   1997 	return (val & RGE_PHYOCP_DATA_MASK);
   1998 }
   1999 
   2000 int
   2001 rge_get_link_status(struct rge_softc *sc)
   2002 {
   2003 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
   2004 }
   2005 
   2006 void
   2007 rge_txstart(void *arg)
   2008 {
   2009 	struct rge_softc *sc = arg;
   2010 
   2011 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
   2012 }
   2013 
   2014 void
   2015 rge_tick(void *arg)
   2016 {
   2017 	struct rge_softc *sc = arg;
   2018 	int s;
   2019 
   2020 	s = splnet();
   2021 	rge_link_state(sc);
   2022 	splx(s);
   2023 
   2024 	timeout_add_sec(&sc->sc_timeout, 1);
   2025 }
   2026 
   2027 void
   2028 rge_link_state(struct rge_softc *sc)
   2029 {
   2030 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2031 	int link = LINK_STATE_DOWN;
   2032 
   2033 	if (rge_get_link_status(sc))
   2034 		link = LINK_STATE_UP;
   2035 
   2036 	if (ifp->if_link_state != link) {
   2037 		ifp->if_link_state = link;
   2038 		if_link_state_change(ifp, LINK_STATE_DOWN);
   2039 	}
   2040 }
   2041