Home | History | Annotate | Line # | Download | only in pci
if_rge.c revision 1.8
      1 /*	$NetBSD: if_rge.c,v 1.8 2020/02/27 23:15:34 sevan Exp $	*/
      2 /*	$OpenBSD: if_rge.c,v 1.2 2020/01/02 09:00:45 kevlo Exp $	*/
      3 
      4 /*
      5  * Copyright (c) 2019 Kevin Lo <kevlo (at) openbsd.org>
      6  *
      7  * Permission to use, copy, modify, and distribute this software for any
      8  * purpose with or without fee is hereby granted, provided that the above
      9  * copyright notice and this permission notice appear in all copies.
     10  *
     11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     18  */
     19 
     20 #include <sys/cdefs.h>
     21 __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.8 2020/02/27 23:15:34 sevan Exp $");
     22 
     23 /* #include "vlan.h" Sevan */
     24 
     25 #include <sys/types.h>
     26 
     27 #include <sys/param.h>
     28 #include <sys/systm.h>
     29 #include <sys/sockio.h>
     30 #include <sys/mbuf.h>
     31 #include <sys/malloc.h>
     32 #include <sys/kernel.h>
     33 #include <sys/socket.h>
     34 #include <sys/device.h>
     35 #include <sys/endian.h>
     36 #include <sys/callout.h>
     37 #include <sys/workqueue.h>
     38 
     39 #include <net/if.h>
     40 
     41 #include <net/if_dl.h>
     42 #include <net/if_ether.h>
     43 
     44 #include <net/if_media.h>
     45 
     46 #include <netinet/in.h>
     47 #include <net/if_ether.h>
     48 
     49 #if NBPFILTER > 0
     50 #include <net/bpf.h>
     51 #endif
     52 
     53 #include <sys/bus.h>
     54 #include <machine/intr.h>
     55 
     56 #include <dev/mii/mii.h>
     57 
     58 #include <dev/pci/pcivar.h>
     59 #include <dev/pci/pcireg.h>
     60 #include <dev/pci/pcidevs.h>
     61 
     62 #include <dev/pci/if_rgereg.h>
     63 
     64 #ifdef __NetBSD__
     65 #define letoh32 	htole32
     66 #define nitems(x) 	__arraycount(x)
     67 #define MBUF_LIST_INITIALIZER() 	{ NULL, NULL, 0 }
     68 struct mbuf_list {
     69 	struct mbuf 	*ml_head;
     70 	struct mbuf 	*ml_tail;
     71 	u_int 	ml_len;
     72 };
     73 
     74 static struct mbuf *
     75 MCLGETI(struct rge_softc *sc __unused, int how,
     76     struct ifnet *ifp __unused, u_int size)
     77 {
     78 	struct mbuf *m;
     79 
     80 	MGETHDR(m, how, MT_DATA);
     81 	if (m == NULL)
     82 		return NULL;
     83 
     84 	MEXTMALLOC(m, size, how);
     85 	if ((m->m_flags & M_EXT) == 0) {
     86 		m_freem(m);
     87 		return NULL;
     88 	}
     89 	return m;
     90 }
     91 
     92 #ifdef NET_MPSAFE
     93 #define 	RGE_MPSAFE	1
     94 #define 	CALLOUT_FLAGS	CALLOUT_MPSAFE
     95 #else
     96 #define 	CALLOUT_FLAGS	0
     97 #endif
     98 #endif
     99 
    100 static int		rge_match(device_t, cfdata_t, void *);
    101 static void		rge_attach(device_t, device_t, void *);
    102 int		rge_intr(void *);
    103 int		rge_encap(struct rge_softc *, struct mbuf *, int);
    104 int		rge_ioctl(struct ifnet *, u_long, void *);
    105 void		rge_start(struct ifnet *);
    106 void		rge_watchdog(struct ifnet *);
    107 int		rge_init(struct ifnet *);
    108 void		rge_stop(struct ifnet *);
    109 int		rge_ifmedia_upd(struct ifnet *);
    110 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
    111 int		rge_allocmem(struct rge_softc *);
    112 int		rge_newbuf(struct rge_softc *, int);
    113 void		rge_discard_rxbuf(struct rge_softc *, int);
    114 int		rge_rx_list_init(struct rge_softc *);
    115 void		rge_tx_list_init(struct rge_softc *);
    116 int		rge_rxeof(struct rge_softc *);
    117 int		rge_txeof(struct rge_softc *);
    118 void		rge_reset(struct rge_softc *);
    119 void		rge_iff(struct rge_softc *);
    120 void		rge_set_phy_power(struct rge_softc *, int);
    121 void		rge_phy_config(struct rge_softc *);
    122 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
    123 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
    124 void		rge_hw_init(struct rge_softc *);
    125 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
    126 void		rge_patch_phy_mcu(struct rge_softc *, int);
    127 void		rge_add_media_types(struct rge_softc *);
    128 void		rge_config_imtype(struct rge_softc *, int);
    129 void		rge_disable_sim_im(struct rge_softc *);
    130 void		rge_setup_sim_im(struct rge_softc *);
    131 void		rge_setup_intr(struct rge_softc *, int);
    132 void		rge_exit_oob(struct rge_softc *);
    133 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
    134 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
    135 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
    136 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
    137 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
    138 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
    139 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
    140 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
    141 int		rge_get_link_status(struct rge_softc *);
    142 void		rge_txstart(struct work *, void *);
    143 void		rge_tick(void *);
    144 void		rge_link_state(struct rge_softc *);
    145 
    146 static const struct {
    147 	uint16_t reg;
    148 	uint16_t val;
    149 }  rtl8125_def_bps[] = {
    150 	RTL8125_DEF_BPS
    151 }, rtl8125_mac_cfg2_ephy[] = {
    152 	RTL8125_MAC_CFG2_EPHY
    153 }, rtl8125_mac_cfg2_mcu[] = {
    154 	RTL8125_MAC_CFG2_MCU
    155 }, rtl8125_mac_cfg3_ephy[] = {
    156 	RTL8125_MAC_CFG3_EPHY
    157 }, rtl8125_mac_cfg3_mcu[] = {
    158 	RTL8125_MAC_CFG3_MCU
    159 };
    160 
    161 CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
    162 		NULL, NULL); /* Sevan - detach function? */
    163 
    164 extern struct cfdriver rge_cd;
    165 
    166 static const struct {
    167 	pci_vendor_id_t 	vendor;
    168 	pci_product_id_t 	product;
    169 }rge_devices[] = {
    170 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
    171 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125 },
    172 };
    173 
    174 static int
    175 rge_match(device_t parent, cfdata_t match, void *aux)
    176 {
    177 	struct pci_attach_args *pa =aux;
    178 	int n;
    179 
    180 	for (n =0; n < __arraycount(rge_devices); n++) {
    181 		if (PCI_VENDOR(pa->pa_id) == rge_devices[n].vendor &&
    182 		    PCI_PRODUCT(pa->pa_id) == rge_devices[n].product)
    183 			return 1;
    184 	}
    185 
    186 	return 0;
    187 }
    188 
    189 void
    190 rge_attach(device_t parent, device_t self, void *aux)
    191 {
    192 	struct rge_softc *sc = (struct rge_softc *)self;
    193 	struct pci_attach_args *pa = aux;
    194 	pci_chipset_tag_t pc = pa->pa_pc;
    195 	pci_intr_handle_t ih;
    196 	char intrbuf[PCI_INTRSTR_LEN];
    197 	const char *intrstr = NULL;
    198 	struct ifnet *ifp;
    199 	pcireg_t reg;
    200 	uint32_t hwrev;
    201 	uint8_t eaddr[ETHER_ADDR_LEN];
    202 	int offset;
    203 
    204 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
    205 
    206 	/*
    207 	 * Map control/status registers.
    208 	 */
    209 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
    210 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
    211 	    NULL, &sc->rge_bsize)) {
    212 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
    213 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
    214 		    &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
    215 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
    216 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
    217 			    &sc->rge_bsize)) {
    218 				printf(": can't map mem or i/o space\n");
    219 				return;
    220 			}
    221 		}
    222 	}
    223 
    224 	/*
    225 	 * Allocate interrupt.
    226 	 */
    227 	if (pci_intr_map(pa, &ih) == 0)
    228 		sc->rge_flags |= RGE_FLAG_MSI;
    229 	else if (pci_intr_map(pa, &ih) != 0) {
    230 		printf(": couldn't map interrupt\n");
    231 		return;
    232 	}
    233 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
    234 	sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, rge_intr,
    235 	    sc, sc->sc_dev.dv_xname);
    236 	if (sc->sc_ih == NULL) {
    237 		printf(": couldn't establish interrupt");
    238 		if (intrstr != NULL)
    239 			printf(" at %s", intrstr);
    240 		printf("\n");
    241 		return;
    242 	}
    243 	printf(": %s", intrstr);
    244 
    245 	sc->sc_dmat = pa->pa_dmat;
    246 	sc->sc_pc = pa->pa_pc;
    247 	sc->sc_tag = pa->pa_tag;
    248 
    249 	/* Determine hardware revision */
    250 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
    251 	switch (hwrev) {
    252 	case 0x60800000:
    253 		sc->rge_type = MAC_CFG2;
    254 		break;
    255 	case 0x60900000:
    256 		sc->rge_type = MAC_CFG3;
    257 		break;
    258 	default:
    259 		printf(": unknown version 0x%08x\n", hwrev);
    260 		return;
    261 	}
    262 
    263 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
    264 
    265 	/*
    266 	 * PCI Express check.
    267 	 */
    268 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
    269 	    &offset, NULL)) {
    270 		/* Disable PCIe ASPM. */
    271 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
    272 		    offset + PCIE_LCSR);
    273 		reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 );
    274 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
    275 		    reg);
    276 	}
    277 
    278 	rge_exit_oob(sc);
    279 	rge_hw_init(sc);
    280 
    281 	rge_get_macaddr(sc, eaddr);
    282 	printf(", address %s\n", ether_sprintf(eaddr));
    283 
    284 	memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
    285 
    286 	rge_set_phy_power(sc, 1);
    287 	rge_phy_config(sc);
    288 
    289 	if (rge_allocmem(sc))
    290 		return;
    291 
    292 	ifp = &sc->sc_ec.ec_if;
    293 	ifp->if_softc = sc;
    294 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
    295 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    296 #ifdef RGE_MPSAFE
    297 	ifp->if_xflags = IFEF_MPSAFE;
    298 #endif
    299 	ifp->if_ioctl = rge_ioctl;
    300 	ifp->if_start = rge_start;
    301 	ifp->if_watchdog = rge_watchdog;
    302 	IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT);
    303 	ifp->if_mtu = RGE_JUMBO_MTU;
    304 
    305 	ifp->if_capabilities = ETHERCAP_VLAN_MTU | IFCAP_CSUM_IPv4_Rx |
    306 	    IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
    307 	    IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
    308 
    309 #if NVLAN > 0
    310 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
    311 #endif
    312 
    313 	callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
    314 	callout_setfunc(&sc->sc_timeout, rge_tick, sc);
    315 	rge_txstart(&sc->sc_task, sc);
    316 
    317 	/* Initialize ifmedia structures. */
    318 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
    319 	    rge_ifmedia_sts);
    320 	rge_add_media_types(sc);
    321 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
    322 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
    323 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
    324 
    325 	if_attach(ifp);
    326 	ether_ifattach(ifp, eaddr);
    327 }
    328 
    329 int
    330 rge_intr(void *arg)
    331 {
    332 	struct rge_softc *sc = arg;
    333 	struct ifnet *ifp = &sc->sc_ec.ec_if;
    334 	uint32_t status;
    335 	int claimed = 0, rx, tx;
    336 
    337 	if (!(ifp->if_flags & IFF_RUNNING))
    338 		return (0);
    339 
    340 	/* Disable interrupts. */
    341 	RGE_WRITE_4(sc, RGE_IMR, 0);
    342 
    343 	status = RGE_READ_4(sc, RGE_ISR);
    344 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
    345 		if ((status & RGE_INTRS) == 0 || status == 0xffffffff)
    346 			return (0);
    347 	}
    348 	if (status)
    349 		RGE_WRITE_4(sc, RGE_ISR, status);
    350 
    351 	if (status & RGE_ISR_PCS_TIMEOUT)
    352 		claimed = 1;
    353 
    354 	rx = tx = 0;
    355 	if (status & RGE_INTRS) {
    356 		if (status &
    357 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
    358 			rx |= rge_rxeof(sc);
    359 			claimed = 1;
    360 		}
    361 
    362 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
    363 			tx |= rge_txeof(sc);
    364 			claimed = 1;
    365 		}
    366 
    367 		if (status & RGE_ISR_SYSTEM_ERR) {
    368 			KERNEL_LOCK(1, NULL);
    369 			rge_init(ifp);
    370 			KERNEL_UNLOCK_ONE(NULL);
    371 			claimed = 1;
    372 		}
    373 	}
    374 
    375 	if (sc->rge_timerintr) {
    376 		if ((tx | rx) == 0) {
    377 			/*
    378 			 * Nothing needs to be processed, fallback
    379 			 * to use TX/RX interrupts.
    380 			 */
    381 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
    382 
    383 			/*
    384 			 * Recollect, mainly to avoid the possible
    385 			 * race introduced by changing interrupt
    386 			 * masks.
    387 			 */
    388 			rge_rxeof(sc);
    389 			rge_txeof(sc);
    390 		} else
    391 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
    392 	} else if (tx | rx) {
    393 		/*
    394 		 * Assume that using simulated interrupt moderation
    395 		 * (hardware timer based) could reduce the interrupt
    396 		 * rate.
    397 		 */
    398 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
    399 	}
    400 
    401 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
    402 
    403 	return (claimed);
    404 }
    405 
    406 int
    407 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
    408 {
    409 	struct rge_tx_desc *d = NULL;
    410 	struct rge_txq *txq;
    411 	bus_dmamap_t txmap;
    412 	uint32_t cmdsts, cflags = 0;
    413 	int cur, error, i, last, nsegs;
    414 
    415 	/*
    416 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
    417 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
    418 	 * take affect.
    419 	 */
    420 	if ((m->m_pkthdr.csum_flags &
    421 	    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
    422 		cflags |= RGE_TDEXTSTS_IPCSUM;
    423 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
    424 			cflags |= RGE_TDEXTSTS_TCPCSUM;
    425 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
    426 			cflags |= RGE_TDEXTSTS_UDPCSUM;
    427 	}
    428 
    429 	txq = &sc->rge_ldata.rge_txq[idx];
    430 	txmap = txq->txq_dmamap;
    431 
    432 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
    433 	switch (error) {
    434 	case 0:
    435 		break;
    436 	case EFBIG: /* mbuf chain is too fragmented */
    437 		if (m_defrag(m, M_DONTWAIT) == 0 &&
    438 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
    439 		    BUS_DMA_NOWAIT) == 0)
    440 			break;
    441 
    442 		/* FALLTHROUGH */
    443 	default:
    444 		return (0);
    445 	}
    446 
    447 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
    448 	    BUS_DMASYNC_PREWRITE);
    449 
    450 	nsegs = txmap->dm_nsegs;
    451 
    452 	/* Set up hardware VLAN tagging. */
    453 #if NVLAN > 0
    454 	if (m->m_flags & M_VLANTAG)
    455 		cflags |= swap16(m->m_pkthdr.ether_vtag | RGE_TDEXTSTS_VTAG);
    456 #endif
    457 
    458 	cur = idx;
    459 	cmdsts = RGE_TDCMDSTS_SOF;
    460 
    461 	for (i = 0; i < txmap->dm_nsegs; i++) {
    462 		d = &sc->rge_ldata.rge_tx_list[cur];
    463 
    464 		d->rge_extsts = htole32(cflags);
    465 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
    466 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
    467 
    468 		cmdsts |= txmap->dm_segs[i].ds_len;
    469 
    470 		if (cur == RGE_TX_LIST_CNT - 1)
    471 			cmdsts |= RGE_TDCMDSTS_EOR;
    472 
    473 		d->rge_cmdsts = htole32(cmdsts);
    474 
    475 		last = cur;
    476 		cmdsts = RGE_TDCMDSTS_OWN;
    477 		cur = RGE_NEXT_TX_DESC(cur);
    478 	}
    479 
    480 	/* Set EOF on the last descriptor. */
    481 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
    482 
    483 	/* Transfer ownership of packet to the chip. */
    484 	d = &sc->rge_ldata.rge_tx_list[idx];
    485 
    486 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
    487 
    488 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
    489 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
    490 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    491 
    492 	/* Update info of TX queue and descriptors. */
    493 	txq->txq_mbuf = m;
    494 	txq->txq_descidx = last;
    495 
    496 	return (nsegs);
    497 }
    498 
    499 int
    500 rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    501 {
    502 	struct rge_softc *sc = ifp->if_softc;
    503 	struct ifreq *ifr = (struct ifreq *)data;
    504 	int s, error = 0;
    505 
    506 	s = splnet();
    507 
    508 	switch (cmd) {
    509 	case SIOCSIFADDR:
    510 		ifp->if_flags |= IFF_UP;
    511 		if (!(ifp->if_flags & IFF_RUNNING))
    512 			rge_init(ifp);
    513 		break;
    514 	case SIOCSIFFLAGS:
    515 		if (ifp->if_flags & IFF_UP) {
    516 			if (ifp->if_flags & IFF_RUNNING)
    517 				error = ENETRESET;
    518 			else
    519 				rge_init(ifp);
    520 		} else {
    521 			if (ifp->if_flags & IFF_RUNNING)
    522 				rge_stop(ifp);
    523 		}
    524 		break;
    525 	case SIOCGIFMEDIA:
    526 	case SIOCSIFMEDIA:
    527 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
    528 		break;
    529 	case SIOCSIFMTU:
    530 		if (ifr->ifr_mtu > ifp->if_mtu) {
    531 			error = EINVAL;
    532 			break;
    533 		}
    534 		ifp->if_mtu = ifr->ifr_mtu;
    535 		break;
    536 	default:
    537 		error = ether_ioctl(ifp, cmd, data);
    538 	}
    539 
    540 	if (error == ENETRESET) {
    541 		if (ifp->if_flags & IFF_RUNNING)
    542 			rge_iff(sc);
    543 		error = 0;
    544 	}
    545 
    546 	splx(s);
    547 	return (error);
    548 }
    549 
    550 void
    551 rge_start(struct ifnet *ifp)
    552 {
    553 	struct rge_softc *sc = ifp->if_softc;
    554 	struct mbuf *m;
    555 	int free, idx, used;
    556 	int queued = 0;
    557 
    558 #define LINK_STATE_IS_UP(_s)    \
    559 	((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
    560 
    561 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
    562 		ifq_purge(ifq);
    563 		return;
    564 	}
    565 
    566 	/* Calculate free space. */
    567 	idx = sc->rge_ldata.rge_txq_prodidx;
    568 	free = sc->rge_ldata.rge_txq_considx;
    569 	if (free <= idx)
    570 		free += RGE_TX_LIST_CNT;
    571 	free -= idx;
    572 
    573 	for (;;) {
    574 		if (RGE_TX_NSEGS >= free + 2) {
    575 			SET(ifp->if_flags, IFF_OACTIVE);
    576 			break;
    577 		}
    578 
    579 		IFQ_DEQUEUE(&ifp->if_snd, m);
    580 		if (m == NULL)
    581 			break;
    582 
    583 		used = rge_encap(sc, m, idx);
    584 		if (used == 0) {
    585 			m_freem(m);
    586 			continue;
    587 		}
    588 
    589 		KASSERT(used <= free);
    590 		free -= used;
    591 
    592 #if NBPFILTER > 0
    593 		if (ifp->if_bpf)
    594 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
    595 #endif
    596 
    597 		idx += used;
    598 		if (idx >= RGE_TX_LIST_CNT)
    599 			idx -= RGE_TX_LIST_CNT;
    600 
    601 		queued++;
    602 	}
    603 
    604 	if (queued == 0)
    605 		return;
    606 
    607 	/* Set a timeout in case the chip goes out to lunch. */
    608 	ifp->if_timer = 5;
    609 
    610 	sc->rge_ldata.rge_txq_prodidx = idx;
    611 	ifq_serialize(ifq, &sc->sc_task);
    612 }
    613 
    614 void
    615 rge_watchdog(struct ifnet *ifp)
    616 {
    617 	struct rge_softc *sc = ifp->if_softc;
    618 
    619 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
    620 	if_statinc(ifp, if_oerrors);
    621 
    622 	rge_init(ifp);
    623 }
    624 
    625 int
    626 rge_init(struct ifnet *ifp)
    627 {
    628 	struct rge_softc *sc = ifp->if_softc;
    629 	uint32_t val;
    630 	uint16_t max_frame_size;
    631 	int i;
    632 
    633 	rge_stop(ifp);
    634 
    635 	/* Set MAC address. */
    636 	rge_set_macaddr(sc, sc->sc_enaddr);
    637 
    638 	/* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
    639 	if (ifp->if_mtu < ETHERMTU)
    640 		max_frame_size = ETHERMTU;
    641 	else
    642 		max_frame_size = ifp->if_mtu;
    643 
    644 	max_frame_size += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
    645 	    ETHER_CRC_LEN + 1;
    646 
    647 	if (max_frame_size > RGE_JUMBO_FRAMELEN)
    648 		max_frame_size -= 1;
    649 
    650 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, max_frame_size);
    651 
    652 	/* Initialize RX descriptors list. */
    653 	if (rge_rx_list_init(sc) == ENOBUFS) {
    654 		printf("%s: init failed: no memory for RX buffers\n",
    655 		    sc->sc_dev.dv_xname);
    656 		rge_stop(ifp);
    657 		return (ENOBUFS);
    658 	}
    659 
    660 	/* Initialize TX descriptors. */
    661 	rge_tx_list_init(sc);
    662 
    663 	/* Load the addresses of the RX and TX lists into the chip. */
    664 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
    665 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    666 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
    667 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    668 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
    669 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    670 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
    671 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    672 
    673 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    674 
    675 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
    676 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    677 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    678 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
    679 
    680 	/* Clear interrupt moderation timer. */
    681 	for (i = 0; i < 64; i++)
    682 		RGE_WRITE_4(sc, RGE_IM(i), 0);
    683 
    684 	/* Set the initial RX and TX configurations. */
    685 	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
    686 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
    687 
    688 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
    689 	rge_write_csi(sc, 0x70c, val | 0x27000000);
    690 
    691 	/* Enable hardware optimization function. */
    692 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
    693 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
    694 
    695 	RGE_WRITE_2(sc, 0x0382, 0x221b);
    696 	RGE_WRITE_1(sc, 0x4500, 0);
    697 	RGE_WRITE_2(sc, 0x4800, 0);
    698 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
    699 
    700 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
    701 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
    702 
    703 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
    704 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
    705 
    706 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
    707 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
    708 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
    709 
    710 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
    711 	rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
    712 
    713 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
    714 
    715 	val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
    716 	rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
    717 
    718 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
    719 
    720 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x007f;
    721 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
    722 
    723 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
    724 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
    725 
    726 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
    727 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
    728 
    729 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
    730 
    731 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
    732 
    733 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
    734 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
    735 
    736 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
    737 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
    738 
    739 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
    740 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
    741 
    742 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
    743 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
    744 
    745 	RGE_MAC_SETBIT(sc, 0xe84c, 0x00c0);
    746 
    747 	/* Disable EEE plus. */
    748 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
    749 
    750 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
    751 
    752 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
    753 	DELAY(1);
    754 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
    755 
    756 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
    757 
    758 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
    759 
    760 	if (ifp->if_capabilities & ETHERCAP_VLAN_HWTAGGING)
    761 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
    762 
    763 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
    764 
    765 	for (i = 0; i < 10; i++) {
    766 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
    767 			break;
    768 		DELAY(1000);
    769 	}
    770 
    771 	/* Disable RXDV gate. */
    772 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
    773 	DELAY(2000);
    774 
    775 	rge_ifmedia_upd(ifp);
    776 
    777 	/* Enable transmit and receive. */
    778 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
    779 
    780 	/* Program promiscuous mode and multicast filters. */
    781 	rge_iff(sc);
    782 
    783 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    784 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    785 
    786 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    787 
    788 	/* Enable interrupts. */
    789 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
    790 
    791 	ifp->if_flags |= IFF_RUNNING;
    792 	CLR(ifp->if_flags, IFF_OACTIVE);
    793 
    794 	callout_schedule(&sc->sc_timeout, 1);
    795 
    796 	return (0);
    797 }
    798 
    799 /*
    800  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
    801  */
    802 void
    803 rge_stop(struct ifnet *ifp)
    804 {
    805 	struct rge_softc *sc = ifp->if_softc;
    806 	int i;
    807 
    808 	timeout_del(&sc->sc_timeout);
    809 
    810 	ifp->if_timer = 0;
    811 	ifp->if_flags &= ~IFF_RUNNING;
    812 	sc->rge_timerintr = 0;
    813 
    814 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
    815 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
    816 	    RGE_RXCFG_ERRPKT);
    817 
    818 	RGE_WRITE_4(sc, RGE_IMR, 0);
    819 	RGE_WRITE_4(sc, RGE_ISR, 0xffffffff);
    820 
    821 	rge_reset(sc);
    822 
    823 	intr_barrier(sc->sc_ih);
    824 	ifq_barrier(&ifp->if_snd);
    825 /*	ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
    826 
    827 	if (sc->rge_head != NULL) {
    828 		m_freem(sc->rge_head);
    829 		sc->rge_head = sc->rge_tail = NULL;
    830 	}
    831 
    832 	/* Free the TX list buffers. */
    833 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
    834 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
    835 			bus_dmamap_unload(sc->sc_dmat,
    836 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
    837 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
    838 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
    839 		}
    840 	}
    841 
    842 	/* Free the RX list buffers. */
    843 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
    844 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
    845 			bus_dmamap_unload(sc->sc_dmat,
    846 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
    847 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
    848 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
    849 		}
    850 	}
    851 }
    852 
    853 /*
    854  * Set media options.
    855  */
    856 int
    857 rge_ifmedia_upd(struct ifnet *ifp)
    858 {
    859 	struct rge_softc *sc = ifp->if_softc;
    860 	struct ifmedia *ifm = &sc->sc_media;
    861 	int anar, gig, val;
    862 
    863 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    864 		return (EINVAL);
    865 
    866 	/* Disable Gigabit Lite. */
    867 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
    868 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
    869 
    870 	val = rge_read_phy_ocp(sc, 0xa5d4);
    871 	val &= ~RGE_ADV_2500TFDX;
    872 
    873 	anar = gig = 0;
    874 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
    875 	case IFM_AUTO:
    876 		anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    877 		gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    878 		val |= RGE_ADV_2500TFDX;
    879 		break;
    880 	case IFM_2500_T:
    881 		anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    882 		gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    883 		val |= RGE_ADV_2500TFDX;
    884 		ifp->if_baudrate = IF_Mbps(2500);
    885 		break;
    886 	case IFM_1000_T:
    887 		anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    888 		gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    889 		ifp->if_baudrate = IF_Gbps(1);
    890 		break;
    891 	case IFM_100_TX:
    892 		anar |= ANAR_TX | ANAR_TX_FD;
    893 		ifp->if_baudrate = IF_Mbps(100);
    894 		break;
    895 	case IFM_10_T:
    896 		anar |= ANAR_10 | ANAR_10_FD;
    897 		ifp->if_baudrate = IF_Mbps(10);
    898 		break;
    899 	default:
    900 		printf("%s: unsupported media type\n", sc->sc_dev.dv_xname);
    901 		return (EINVAL);
    902 	}
    903 
    904 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
    905 	rge_write_phy(sc, 0, MII_100T2CR, gig);
    906 	rge_write_phy_ocp(sc, 0xa5d4, val);
    907 	rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG);
    908 
    909 	return (0);
    910 }
    911 
    912 /*
    913  * Report current media status.
    914  */
    915 void
    916 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
    917 {
    918 	struct rge_softc *sc = ifp->if_softc;
    919 	uint16_t status = 0;
    920 
    921 	ifmr->ifm_status = IFM_AVALID;
    922 	ifmr->ifm_active = IFM_ETHER;
    923 
    924 	if (rge_get_link_status(sc)) {
    925 		ifmr->ifm_status |= IFM_ACTIVE;
    926 
    927 		status = RGE_READ_2(sc, RGE_PHYSTAT);
    928 		if ((status & RGE_PHYSTAT_FDX) ||
    929 		    (status & RGE_PHYSTAT_2500MBPS))
    930 			ifmr->ifm_active |= IFM_FDX;
    931 		else
    932 			ifmr->ifm_active |= IFM_HDX;
    933 
    934 		if (status & RGE_PHYSTAT_10MBPS)
    935 			ifmr->ifm_active |= IFM_10_T;
    936 		else if (status & RGE_PHYSTAT_100MBPS)
    937 			ifmr->ifm_active |= IFM_100_TX;
    938 		else if (status & RGE_PHYSTAT_1000MBPS)
    939 			ifmr->ifm_active |= IFM_1000_T;
    940 		else if (status & RGE_PHYSTAT_2500MBPS)
    941 			ifmr->ifm_active |= IFM_2500_T;
    942 	}
    943 }
    944 
    945 /*
    946  * Allocate memory for RX/TX rings.
    947  */
    948 int
    949 rge_allocmem(struct rge_softc *sc)
    950 {
    951 	int error, i;
    952 
    953 	/* Allocate DMA'able memory for the TX ring. */
    954 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
    955 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
    956 	if (error) {
    957 		printf("%s: can't create TX list map\n", sc->sc_dev.dv_xname);
    958 		return (error);
    959 	}
    960 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
    961 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
    962 	    BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_ZERO */
    963 	if (error) {
    964 		printf("%s: can't alloc TX list\n", sc->sc_dev.dv_xname);
    965 		return (error);
    966 	}
    967 
    968 	/* Load the map for the TX ring. */
    969 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
    970 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
    971 	    (void **) &sc->rge_ldata.rge_tx_list,
    972 	    BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_COHERENT */
    973 	if (error) {
    974 		printf("%s: can't map TX dma buffers\n", sc->sc_dev.dv_xname);
    975 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
    976 		    sc->rge_ldata.rge_tx_listnseg);
    977 		return (error);
    978 	}
    979 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
    980 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
    981 	if (error) {
    982 		printf("%s: can't load TX dma map\n", sc->sc_dev.dv_xname);
    983 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
    984 		bus_dmamem_unmap(sc->sc_dmat,
    985 		    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
    986 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
    987 		    sc->rge_ldata.rge_tx_listnseg);
    988 		return (error);
    989 	}
    990 
    991 	/* Create DMA maps for TX buffers. */
    992 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
    993 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
    994 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
    995 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
    996 		if (error) {
    997 			printf("%s: can't create DMA map for TX\n",
    998 			    sc->sc_dev.dv_xname);
    999 			return (error);
   1000 		}
   1001 	}
   1002 
   1003 	/* Allocate DMA'able memory for the RX ring. */
   1004 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
   1005 	    RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
   1006 	if (error) {
   1007 		printf("%s: can't create RX list map\n", sc->sc_dev.dv_xname);
   1008 		return (error);
   1009 	}
   1010 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
   1011 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
   1012 	    BUS_DMA_NOWAIT);  /* XXX OpenBSD adds BUS_DMA_ZERO */
   1013 	if (error) {
   1014 		printf("%s: can't alloc RX list\n", sc->sc_dev.dv_xname);
   1015 		return (error);
   1016 	}
   1017 
   1018 	/* Load the map for the RX ring. */
   1019 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1020 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
   1021 	    (void **) &sc->rge_ldata.rge_rx_list,
   1022 	    BUS_DMA_NOWAIT);  /* XXX OpenBSD adds BUS_DMA_COHERENT */
   1023 	if (error) {
   1024 		printf("%s: can't map RX dma buffers\n", sc->sc_dev.dv_xname);
   1025 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1026 		    sc->rge_ldata.rge_rx_listnseg);
   1027 		return (error);
   1028 	}
   1029 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1030 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
   1031 	if (error) {
   1032 		printf("%s: can't load RX dma map\n", sc->sc_dev.dv_xname);
   1033 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
   1034 		bus_dmamem_unmap(sc->sc_dmat,
   1035 		    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
   1036 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1037 		    sc->rge_ldata.rge_rx_listnseg);
   1038 		return (error);
   1039 	}
   1040 
   1041 	/* Create DMA maps for RX buffers. */
   1042 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1043 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
   1044 		    RGE_JUMBO_FRAMELEN, 0, 0,
   1045 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
   1046 		if (error) {
   1047 			printf("%s: can't create DMA map for RX\n",
   1048 			    sc->sc_dev.dv_xname);
   1049 			return (error);
   1050 		}
   1051 	}
   1052 
   1053 	return (error);
   1054 }
   1055 
   1056 /*
   1057  * Initialize the RX descriptor and attach an mbuf cluster.
   1058  */
   1059 int
   1060 rge_newbuf(struct rge_softc *sc, int idx)
   1061 {
   1062 	struct mbuf *m;
   1063 	struct rge_rx_desc *r;
   1064 	struct rge_rxq *rxq;
   1065 	bus_dmamap_t rxmap;
   1066 
   1067 	m = MCLGETI(NULL, M_DONTWAIT, NULL, RGE_JUMBO_FRAMELEN);
   1068 	if (m == NULL)
   1069 		return (ENOBUFS);
   1070 
   1071 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
   1072 
   1073 	rxq = &sc->rge_ldata.rge_rxq[idx];
   1074 	rxmap = rxq->rxq_dmamap;
   1075 
   1076 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT))
   1077 		goto out;
   1078 
   1079 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
   1080 	    BUS_DMASYNC_PREREAD);
   1081 
   1082 	/* Map the segments into RX descriptors. */
   1083 	r = &sc->rge_ldata.rge_rx_list[idx];
   1084 
   1085 	if (RGE_OWN(r)) {
   1086 		printf("%s: tried to map busy RX descriptor\n",
   1087 		    sc->sc_dev.dv_xname);
   1088 		goto out;
   1089 	}
   1090 
   1091 	rxq->rxq_mbuf = m;
   1092 
   1093 	r->rge_extsts = 0;
   1094 	r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
   1095 	r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr));
   1096 
   1097 	r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
   1098 	if (idx == RGE_RX_LIST_CNT - 1)
   1099 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
   1100 
   1101 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
   1102 
   1103 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1104 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1105 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1106 
   1107 	return (0);
   1108 out:
   1109 	if (m != NULL)
   1110 		m_freem(m);
   1111 	return (ENOMEM);
   1112 }
   1113 
   1114 void
   1115 rge_discard_rxbuf(struct rge_softc *sc, int idx)
   1116 {
   1117 	struct rge_rx_desc *r;
   1118 
   1119 	r = &sc->rge_ldata.rge_rx_list[idx];
   1120 
   1121 	r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
   1122 	r->rge_extsts = 0;
   1123 	if (idx == RGE_RX_LIST_CNT - 1)
   1124 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
   1125 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
   1126 
   1127 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1128 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1129 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1130 }
   1131 
   1132 int
   1133 rge_rx_list_init(struct rge_softc *sc)
   1134 {
   1135 	int i;
   1136 
   1137 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
   1138 
   1139 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1140 		sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
   1141 		if (rge_newbuf(sc, i) == ENOBUFS)
   1142 			return (ENOBUFS);
   1143 	}
   1144 
   1145 	sc->rge_ldata.rge_rxq_prodidx = 0;
   1146 	sc->rge_head = sc->rge_tail = NULL;
   1147 
   1148 	return (0);
   1149 }
   1150 
   1151 void
   1152 rge_tx_list_init(struct rge_softc *sc)
   1153 {
   1154 	int i;
   1155 
   1156 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
   1157 
   1158 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
   1159 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
   1160 
   1161 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
   1162 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
   1163 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1164 
   1165 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
   1166 }
   1167 
   1168 int
   1169 rge_rxeof(struct rge_softc *sc)
   1170 {
   1171 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
   1172 	struct mbuf *m;
   1173 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1174 	struct rge_rx_desc *cur_rx;
   1175 	struct rge_rxq *rxq;
   1176 	uint32_t rxstat, extsts;
   1177 	int i, total_len, rx = 0;
   1178 
   1179 	for (i = sc->rge_ldata.rge_rxq_prodidx; ; i = RGE_NEXT_RX_DESC(i)) {
   1180 		/* Invalidate the descriptor memory. */
   1181 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1182 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1183 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1184 
   1185 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
   1186 
   1187 		if (RGE_OWN(cur_rx))
   1188 			break;
   1189 
   1190 		rxstat = letoh32(cur_rx->rge_cmdsts);
   1191 		extsts = letoh32(cur_rx->rge_extsts);
   1192 
   1193 		total_len = RGE_RXBYTES(cur_rx);
   1194 		rxq = &sc->rge_ldata.rge_rxq[i];
   1195 		m = rxq->rxq_mbuf;
   1196 		rx = 1;
   1197 
   1198 		/* Invalidate the RX mbuf and unload its map. */
   1199 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
   1200 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1201 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
   1202 
   1203 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
   1204 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
   1205 			rge_discard_rxbuf(sc, i);
   1206 			continue;
   1207 		}
   1208 
   1209 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
   1210 			if_statinc(ifp, if_ierrors);
   1211 			/*
   1212 			 * If this is part of a multi-fragment packet,
   1213 			 * discard all the pieces.
   1214 			 */
   1215 			 if (sc->rge_head != NULL) {
   1216 				m_freem(sc->rge_head);
   1217 				sc->rge_head = sc->rge_tail = NULL;
   1218 			}
   1219 			rge_discard_rxbuf(sc, i);
   1220 			continue;
   1221 		}
   1222 
   1223 		/*
   1224 		 * If allocating a replacement mbuf fails,
   1225 		 * reload the current one.
   1226 		 */
   1227 
   1228 		if (rge_newbuf(sc, i) == ENOBUFS) {
   1229 			if (sc->rge_head != NULL) {
   1230 				m_freem(sc->rge_head);
   1231 				sc->rge_head = sc->rge_tail = NULL;
   1232 			}
   1233 			rge_discard_rxbuf(sc, i);
   1234 			continue;
   1235 		}
   1236 
   1237 		if (sc->rge_head != NULL) {
   1238 			m->m_len = total_len;
   1239 			/*
   1240 			 * Special case: if there's 4 bytes or less
   1241 			 * in this buffer, the mbuf can be discarded:
   1242 			 * the last 4 bytes is the CRC, which we don't
   1243 			 * care about anyway.
   1244 			 */
   1245 			if (m->m_len <= ETHER_CRC_LEN) {
   1246 				sc->rge_tail->m_len -=
   1247 				    (ETHER_CRC_LEN - m->m_len);
   1248 				m_freem(m);
   1249 			} else {
   1250 				m->m_len -= ETHER_CRC_LEN;
   1251 				m->m_flags &= ~M_PKTHDR;
   1252 				sc->rge_tail->m_next = m;
   1253 			}
   1254 			m = sc->rge_head;
   1255 			sc->rge_head = sc->rge_tail = NULL;
   1256 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
   1257 		} else
   1258 			m->m_pkthdr.len = m->m_len =
   1259 			    (total_len - ETHER_CRC_LEN);
   1260 
   1261 		/* Check IP header checksum. */
   1262 		if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) &&
   1263 		    (extsts & RGE_RDEXTSTS_IPV4))
   1264 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
   1265 
   1266 		/* Check TCP/UDP checksum. */
   1267 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
   1268 		    (((rxstat & RGE_RDCMDSTS_TCPPKT) &&
   1269 		    !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) ||
   1270 		    ((rxstat & RGE_RDCMDSTS_UDPPKT) &&
   1271 		    !(rxstat & RGE_RDCMDSTS_UDPCSUMERR))))
   1272 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
   1273 			    M_UDP_CSUM_IN_OK;
   1274 
   1275 #if NVLAN > 0
   1276 		if (extsts & RGE_RDEXTSTS_VTAG) {
   1277 			m->m_pkthdr.ether_vtag =
   1278 			    ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
   1279 			m->m_flags |= M_VLANTAG;
   1280 		}
   1281 #endif
   1282 
   1283 		ml_enqueue(&ml, m);
   1284 	}
   1285 
   1286 	sc->rge_ldata.rge_rxq_prodidx = i;
   1287 
   1288 	if_input(ifp, &ml);
   1289 
   1290 	return (rx);
   1291 }
   1292 
   1293 int
   1294 rge_txeof(struct rge_softc *sc)
   1295 {
   1296 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1297 	struct rge_txq *txq;
   1298 	uint32_t txstat;
   1299 	int cons, idx, prod;
   1300 	int free = 0;
   1301 
   1302 	prod = sc->rge_ldata.rge_txq_prodidx;
   1303 	cons = sc->rge_ldata.rge_txq_considx;
   1304 
   1305 	while (prod != cons) {
   1306 		txq = &sc->rge_ldata.rge_txq[cons];
   1307 		idx = txq->txq_descidx;
   1308 
   1309 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1310 		    idx * sizeof(struct rge_tx_desc),
   1311 		    sizeof(struct rge_tx_desc),
   1312 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1313 
   1314 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
   1315 
   1316 		if (txstat & RGE_TDCMDSTS_OWN) {
   1317 			free = 2;
   1318 			break;
   1319 		}
   1320 
   1321 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
   1322 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1323 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
   1324 		m_freem(txq->txq_mbuf);
   1325 		txq->txq_mbuf = NULL;
   1326 
   1327 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
   1328 			if_statinc(ifp, if_collisions);
   1329 		if (txstat & RGE_TDCMDSTS_TXERR)
   1330 			if_statinc(ifp, if_oerrors);
   1331 
   1332 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1333 		    idx * sizeof(struct rge_tx_desc),
   1334 		    sizeof(struct rge_tx_desc),
   1335 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1336 
   1337 		cons = RGE_NEXT_TX_DESC(idx);
   1338 		free = 1;
   1339 	}
   1340 
   1341 	if (free == 0)
   1342 		return (0);
   1343 
   1344 	sc->rge_ldata.rge_txq_considx = cons;
   1345 
   1346 	if (ifq_is_oactive(&ifp->if_snd))
   1347 		ifq_restart(&ifp->if_snd);
   1348 	else if (free == 2)
   1349 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
   1350 	else
   1351 		ifp->if_timer = 0;
   1352 
   1353 	return (1);
   1354 }
   1355 
   1356 void
   1357 rge_reset(struct rge_softc *sc)
   1358 {
   1359 	int i;
   1360 
   1361 	/* Enable RXDV gate. */
   1362 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
   1363 	DELAY(2000);
   1364 
   1365 	for (i = 0; i < 10; i++) {
   1366 		DELAY(100);
   1367 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
   1368 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
   1369 		    RGE_MCUCMD_TXFIFO_EMPTY))
   1370 			break;
   1371 	}
   1372 
   1373 	/* Soft reset. */
   1374 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
   1375 
   1376 	for (i = 0; i < RGE_TIMEOUT; i++) {
   1377 		DELAY(100);
   1378 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
   1379 			break;
   1380 	}
   1381 	if (i == RGE_TIMEOUT)
   1382 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
   1383 }
   1384 
   1385 void
   1386 rge_iff(struct rge_softc *sc)
   1387 {
   1388 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1389 	struct ethercom *ac = &sc->sc_ec;
   1390 	struct ether_multi *enm;
   1391 	struct ether_multistep step;
   1392 	uint32_t hashes[2];
   1393 	uint32_t rxfilt;
   1394 	int h = 0;
   1395 
   1396 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
   1397 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
   1398 	ifp->if_flags &= ~IFF_ALLMULTI;
   1399 
   1400 	/*
   1401 	 * Always accept frames destined to our station address.
   1402 	 * Always accept broadcast frames.
   1403 	 */
   1404 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
   1405 
   1406 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
   1407 		ifp->if_flags |= IFF_ALLMULTI;
   1408 		rxfilt |= RGE_RXCFG_MULTI;
   1409 		if (ifp->if_flags & IFF_PROMISC)
   1410 			rxfilt |= RGE_RXCFG_ALLPHYS;
   1411 		hashes[0] = hashes[1] = 0xffffffff;
   1412 	} else {
   1413 		rxfilt |= RGE_RXCFG_MULTI;
   1414 		/* Program new filter. */
   1415 		memset(hashes, 0, sizeof(hashes));
   1416 
   1417 		ETHER_FIRST_MULTI(step, ac, enm);
   1418 		while (enm != NULL) {
   1419 			h = ether_crc32_be(enm->enm_addrlo,
   1420 			    ETHER_ADDR_LEN) >> 26;
   1421 
   1422 			if (h < 32)
   1423 				hashes[0] |= (1 << h);
   1424 			else
   1425 				hashes[1] |= (1 << (h - 32));
   1426 
   1427 			ETHER_NEXT_MULTI(step, enm);
   1428 		}
   1429 	}
   1430 
   1431 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
   1432 	RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
   1433 	RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
   1434 }
   1435 
   1436 void
   1437 rge_set_phy_power(struct rge_softc *sc, int on)
   1438 {
   1439 	int i;
   1440 
   1441 	if (on) {
   1442 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
   1443 
   1444 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
   1445 
   1446 		for (i = 0; i < RGE_TIMEOUT; i++) {
   1447 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0080) == 3)
   1448 				break;
   1449 			DELAY(1000);
   1450 		}
   1451 	} else
   1452 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
   1453 }
   1454 
   1455 void
   1456 rge_phy_config(struct rge_softc *sc)
   1457 {
   1458 	uint16_t mcode_ver, val;
   1459 	int i;
   1460 	static const uint16_t mac_cfg3_a438_value[] =
   1461 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
   1462 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
   1463 
   1464 	static const uint16_t mac_cfg3_b88e_value[] =
   1465 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
   1466 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
   1467 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
   1468 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
   1469 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
   1470 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
   1471 
   1472 	/* Read microcode version. */
   1473 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1474 	mcode_ver = rge_read_phy_ocp(sc, 0xa438);
   1475 
   1476 	if (sc->rge_type == MAC_CFG2) {
   1477 		for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++) {
   1478 			rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
   1479 			    rtl8125_mac_cfg2_ephy[i].val);
   1480 		}
   1481 
   1482 		if (mcode_ver != RGE_MAC_CFG2_MCODE_VER) {
   1483 			/* Disable PHY config. */
   1484 			RGE_CLRBIT_1(sc, 0xf2, 0x20);
   1485 			DELAY(1000);
   1486 
   1487 			rge_patch_phy_mcu(sc, 1);
   1488 
   1489 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1490 			rge_write_phy_ocp(sc, 0xa438, 0x8600);
   1491 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
   1492 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
   1493 
   1494 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
   1495 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
   1496 				rge_write_phy_ocp(sc,
   1497 				    rtl8125_mac_cfg2_mcu[i].reg,
   1498 				    rtl8125_mac_cfg2_mcu[i].val);
   1499 			}
   1500 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
   1501 
   1502 			rge_write_phy_ocp(sc, 0xa436, 0);
   1503 			rge_write_phy_ocp(sc, 0xa438, 0);
   1504 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
   1505 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1506 			rge_write_phy_ocp(sc, 0xa438, 0);
   1507 
   1508 			rge_patch_phy_mcu(sc, 0);
   1509 
   1510 			/* Enable PHY config. */
   1511 			RGE_SETBIT_1(sc, 0xf2, 0x20);
   1512 
   1513 			/* Write microcode version. */
   1514 			rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1515 			rge_write_phy_ocp(sc, 0xa438, RGE_MAC_CFG2_MCODE_VER);
   1516 		}
   1517 
   1518 		val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
   1519 		rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
   1520 		RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   1521 		val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   1522 		rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
   1523 		val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
   1524 		rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   1525 		RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
   1526 		val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
   1527 		rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
   1528 		RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
   1529 		RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
   1530 		RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
   1531 
   1532 		rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   1533 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1534 		rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
   1535 		rge_write_phy_ocp(sc, 0xa436, 0x80eb);
   1536 		val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
   1537 		rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
   1538 		rge_write_phy_ocp(sc, 0xa436, 0x80f8);
   1539 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1540 		rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
   1541 		rge_write_phy_ocp(sc, 0xa436, 0x80f1);
   1542 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1543 		rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
   1544 		rge_write_phy_ocp(sc, 0xa436, 0x80fe);
   1545 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1546 		rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
   1547 		rge_write_phy_ocp(sc, 0xa436, 0x8102);
   1548 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1549 		rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   1550 		rge_write_phy_ocp(sc, 0xa436, 0x8105);
   1551 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1552 		rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
   1553 		rge_write_phy_ocp(sc, 0xa436, 0x8100);
   1554 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1555 		rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
   1556 		rge_write_phy_ocp(sc, 0xa436, 0x8104);
   1557 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1558 		rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
   1559 		rge_write_phy_ocp(sc, 0xa436, 0x8106);
   1560 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1561 		rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
   1562 		rge_write_phy_ocp(sc, 0xa436, 0x80dc);
   1563 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1564 		rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
   1565 		rge_write_phy_ocp(sc, 0xa436, 0x80df);
   1566 		RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   1567 		rge_write_phy_ocp(sc, 0xa436, 0x80e1);
   1568 		RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
   1569 		val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
   1570 		rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
   1571 		rge_write_phy_ocp(sc, 0xa436, 0x819f);
   1572 		rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
   1573 		rge_write_phy_ocp(sc, 0xbc34, 0x5555);
   1574 		val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
   1575 		rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
   1576 		RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
   1577 		RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1578 	} else {
   1579 		for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
   1580 			rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
   1581 			    rtl8125_mac_cfg3_ephy[i].val);
   1582 
   1583 		if (mcode_ver != RGE_MAC_CFG3_MCODE_VER) {
   1584 			/* Disable PHY config. */
   1585 			RGE_CLRBIT_1(sc, 0xf2, 0x20);
   1586 			DELAY(1000);
   1587 
   1588 			rge_patch_phy_mcu(sc, 1);
   1589 
   1590 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1591 			rge_write_phy_ocp(sc, 0xa438, 0x8601);
   1592 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
   1593 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
   1594 
   1595 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
   1596 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
   1597 				rge_write_phy_ocp(sc,
   1598 				    rtl8125_mac_cfg3_mcu[i].reg,
   1599 				    rtl8125_mac_cfg3_mcu[i].val);
   1600 			}
   1601 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
   1602 
   1603 			rge_write_phy_ocp(sc, 0xa436, 0);
   1604 			rge_write_phy_ocp(sc, 0xa438, 0);
   1605 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
   1606 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1607 			rge_write_phy_ocp(sc, 0xa438, 0);
   1608 
   1609 			rge_patch_phy_mcu(sc, 0);
   1610 
   1611 			/* Enable PHY config. */
   1612 			RGE_SETBIT_1(sc, 0xf2, 0x20);
   1613 
   1614 			/* Write microcode version. */
   1615 			rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1616 			rge_write_phy_ocp(sc, 0xa438, RGE_MAC_CFG3_MCODE_VER);
   1617 		}
   1618 
   1619 		RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   1620 		val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   1621 		rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
   1622 		val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
   1623 		rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   1624 		RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
   1625 		RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
   1626 		val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
   1627 		rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
   1628 		val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
   1629 		rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
   1630 		val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
   1631 		rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
   1632 		RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
   1633 		RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
   1634 		val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
   1635 		rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
   1636 		rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
   1637 		rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
   1638 		val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
   1639 		rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
   1640 		rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
   1641 		rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   1642 		rge_write_phy_ocp(sc, 0xb87c, 0x809c);
   1643 		rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   1644 
   1645 		rge_write_phy_ocp(sc, 0xa436, 0x81b3);
   1646 		for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
   1647 			rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
   1648 		for (i = 0; i < 26; i++)
   1649 			rge_write_phy_ocp(sc, 0xa438, 0);
   1650 		rge_write_phy_ocp(sc, 0xa436, 0x8257);
   1651 		rge_write_phy_ocp(sc, 0xa438, 0x020f);
   1652 		rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   1653 		rge_write_phy_ocp(sc, 0xa438, 0x7843);
   1654 
   1655 		rge_patch_phy_mcu(sc, 1);
   1656 		RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
   1657 		RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
   1658 		for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
   1659 			rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
   1660 			rge_write_phy_ocp(sc, 0xb890,
   1661 			    mac_cfg3_b88e_value[i + 1]);
   1662 		}
   1663 		RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
   1664 		rge_patch_phy_mcu(sc, 0);
   1665 
   1666 		RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
   1667 		rge_write_phy_ocp(sc, 0xa436, 0x81a2);
   1668 		RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   1669 		val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
   1670 		rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
   1671 		RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
   1672 		RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
   1673 		RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
   1674 		RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
   1675 		RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1676 	}
   1677 
   1678 	/* Disable EEE. */
   1679 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
   1680 	RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
   1681 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
   1682 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
   1683 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
   1684 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
   1685 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
   1686 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
   1687 
   1688 	rge_patch_phy_mcu(sc, 1);
   1689 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
   1690 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
   1691 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
   1692 	rge_patch_phy_mcu(sc, 0);
   1693 }
   1694 
   1695 void
   1696 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
   1697 {
   1698 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1699 	RGE_WRITE_4(sc, RGE_MAC0,
   1700 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
   1701 	RGE_WRITE_4(sc, RGE_MAC4,
   1702 	    addr[5] <<  8 | addr[4]);
   1703 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1704 }
   1705 
   1706 void
   1707 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
   1708 {
   1709 	*(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
   1710 	*(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
   1711 }
   1712 
   1713 void
   1714 rge_hw_init(struct rge_softc *sc)
   1715 {
   1716 	int i;
   1717 
   1718 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1719 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
   1720 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
   1721 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1722 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
   1723 
   1724 	/* Disable UPS. */
   1725 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
   1726 
   1727 	/* Configure MAC MCU. */
   1728 	rge_write_mac_ocp(sc, 0xfc38, 0);
   1729 
   1730 	for (i = 0xfc28; i < 0xfc38; i += 2)
   1731 		rge_write_mac_ocp(sc, i, 0);
   1732 
   1733 	DELAY(3000);
   1734 	rge_write_mac_ocp(sc, 0xfc26, 0);
   1735 
   1736 	if (sc->rge_type == MAC_CFG3) {
   1737 		for (i = 0; i < nitems(rtl8125_def_bps); i++)
   1738 			rge_write_mac_ocp(sc, rtl8125_def_bps[i].reg,
   1739 			    rtl8125_def_bps[i].val);
   1740 	}
   1741 
   1742 	/* Disable PHY power saving. */
   1743 	rge_disable_phy_ocp_pwrsave(sc);
   1744 
   1745 	/* Set PCIe uncorrectable error status. */
   1746 	rge_write_csi(sc, 0x108,
   1747 	    rge_read_csi(sc, 0x108) | 0x00100000);
   1748 }
   1749 
   1750 void
   1751 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
   1752 {
   1753 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
   1754 		rge_patch_phy_mcu(sc, 1);
   1755 		rge_write_phy_ocp(sc, 0xc416, 0);
   1756 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
   1757 		rge_patch_phy_mcu(sc, 0);
   1758 	}
   1759 }
   1760 
   1761 void
   1762 rge_patch_phy_mcu(struct rge_softc *sc, int set)
   1763 {
   1764 	uint16_t val;
   1765 	int i;
   1766 
   1767 	if (set)
   1768 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
   1769 	else
   1770 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
   1771 
   1772 	for (i = 0; i < 1000; i++) {
   1773 		val = rge_read_phy_ocp(sc, 0xb800) & 0x0040;
   1774 		DELAY(100);
   1775 		if (val == 0x0040)
   1776 			break;
   1777 	}
   1778 	if (i == 1000)
   1779 		printf("%s: timeout waiting to patch phy mcu\n",
   1780 		    sc->sc_dev.dv_xname);
   1781 }
   1782 
   1783 void
   1784 rge_add_media_types(struct rge_softc *sc)
   1785 {
   1786 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
   1787 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
   1788 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
   1789 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
   1790 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
   1791 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
   1792 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
   1793 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
   1794 }
   1795 
   1796 void
   1797 rge_config_imtype(struct rge_softc *sc, int imtype)
   1798 {
   1799 	switch (imtype) {
   1800 	case RGE_IMTYPE_NONE:
   1801 		sc->rge_intrs = RGE_INTRS;
   1802 		sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
   1803 		    RGE_ISR_RX_FIFO_OFLOW;
   1804 		sc->rge_tx_ack = RGE_ISR_TX_OK;
   1805 		break;
   1806 	case RGE_IMTYPE_SIM:
   1807 		sc->rge_intrs = RGE_INTRS_TIMER;
   1808 		sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
   1809 		sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
   1810 		break;
   1811 	default:
   1812 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
   1813 	}
   1814 }
   1815 
   1816 void
   1817 rge_disable_sim_im(struct rge_softc *sc)
   1818 {
   1819 	RGE_WRITE_4(sc, RGE_TIMERINT, 0);
   1820 	sc->rge_timerintr = 0;
   1821 }
   1822 
   1823 void
   1824 rge_setup_sim_im(struct rge_softc *sc)
   1825 {
   1826 	RGE_WRITE_4(sc, RGE_TIMERINT, 0x2600);
   1827 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
   1828 	sc->rge_timerintr = 1;
   1829 }
   1830 
   1831 void
   1832 rge_setup_intr(struct rge_softc *sc, int imtype)
   1833 {
   1834 	rge_config_imtype(sc, imtype);
   1835 
   1836 	/* Enable interrupts. */
   1837 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
   1838 
   1839 	switch (imtype) {
   1840 	case RGE_IMTYPE_NONE:
   1841 		rge_disable_sim_im(sc);
   1842 		break;
   1843 	case RGE_IMTYPE_SIM:
   1844 		rge_setup_sim_im(sc);
   1845 		break;
   1846 	default:
   1847 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
   1848 	}
   1849 }
   1850 
   1851 void
   1852 rge_exit_oob(struct rge_softc *sc)
   1853 {
   1854 	int i;
   1855 
   1856 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
   1857 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
   1858 	    RGE_RXCFG_ERRPKT);
   1859 
   1860 	/* Disable RealWoW. */
   1861 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
   1862 
   1863 	rge_reset(sc);
   1864 
   1865 	/* Disable OOB. */
   1866 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
   1867 
   1868 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
   1869 
   1870 	for (i = 0; i < 10; i++) {
   1871 		DELAY(100);
   1872 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   1873 			break;
   1874 	}
   1875 
   1876 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
   1877 	rge_write_mac_ocp(sc, 0xc0a6, 0x0150);
   1878 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
   1879 
   1880 	for (i = 0; i < 10; i++) {
   1881 		DELAY(100);
   1882 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   1883 			break;
   1884 	}
   1885 
   1886 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
   1887 		for (i = 0; i < RGE_TIMEOUT; i++) {
   1888 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0080) == 2)
   1889 				break;
   1890 			DELAY(1000);
   1891 		}
   1892 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
   1893 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
   1894 	}
   1895 }
   1896 
   1897 void
   1898 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
   1899 {
   1900 	int i;
   1901 
   1902 	RGE_WRITE_4(sc, RGE_CSIDR, val);
   1903 	RGE_WRITE_4(sc, RGE_CSIAR, (1 << 16) | (reg & RGE_CSIAR_ADDR_MASK) |
   1904 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
   1905 
   1906 	for (i = 0; i < 10; i++) {
   1907 		 DELAY(100);
   1908 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
   1909 			break;
   1910 	}
   1911 
   1912 	DELAY(20);
   1913 }
   1914 
   1915 uint32_t
   1916 rge_read_csi(struct rge_softc *sc, uint32_t reg)
   1917 {
   1918 	int i;
   1919 
   1920 	RGE_WRITE_4(sc, RGE_CSIAR, (1 << 16) | (reg & RGE_CSIAR_ADDR_MASK) |
   1921 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
   1922 
   1923 	for (i = 0; i < 10; i++) {
   1924 		 DELAY(100);
   1925 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
   1926 			break;
   1927 	}
   1928 
   1929 	DELAY(20);
   1930 
   1931 	return (RGE_READ_4(sc, RGE_CSIDR));
   1932 }
   1933 
   1934 void
   1935 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   1936 {
   1937 	uint32_t tmp;
   1938 
   1939 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   1940 	tmp += val;
   1941 	tmp |= RGE_MACOCP_BUSY;
   1942 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
   1943 }
   1944 
   1945 uint16_t
   1946 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
   1947 {
   1948 	uint32_t val;
   1949 
   1950 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   1951 	RGE_WRITE_4(sc, RGE_MACOCP, val);
   1952 
   1953 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
   1954 }
   1955 
   1956 void
   1957 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
   1958 {
   1959 	uint32_t tmp;
   1960 	int i;
   1961 
   1962 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
   1963 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
   1964 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
   1965 
   1966 	for (i = 0; i < 10; i++) {
   1967 		DELAY(100);
   1968 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
   1969 			break;
   1970 	}
   1971 
   1972 	DELAY(20);
   1973 }
   1974 
   1975 void
   1976 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
   1977 {
   1978 	uint16_t off, phyaddr;
   1979 
   1980 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
   1981 	phyaddr <<= 4;
   1982 
   1983 	off = addr ? reg : 0x10 + (reg % 8);
   1984 
   1985 	phyaddr += (off - 16) << 1;
   1986 
   1987 	rge_write_phy_ocp(sc, phyaddr, val);
   1988 }
   1989 
   1990 void
   1991 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   1992 {
   1993 	uint32_t tmp;
   1994 	int i;
   1995 
   1996 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   1997 	tmp |= RGE_PHYOCP_BUSY | val;
   1998 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
   1999 
   2000 	for (i = 0; i < RGE_TIMEOUT; i++) {
   2001 		DELAY(1);
   2002 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
   2003 			break;
   2004 	}
   2005 }
   2006 
   2007 uint16_t
   2008 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
   2009 {
   2010 	uint32_t val;
   2011 	int i;
   2012 
   2013 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   2014 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
   2015 
   2016 	for (i = 0; i < RGE_TIMEOUT; i++) {
   2017 		DELAY(1);
   2018 		val = RGE_READ_4(sc, RGE_PHYOCP);
   2019 		if (val & RGE_PHYOCP_BUSY)
   2020 			break;
   2021 	}
   2022 
   2023 	return (val & RGE_PHYOCP_DATA_MASK);
   2024 }
   2025 
   2026 int
   2027 rge_get_link_status(struct rge_softc *sc)
   2028 {
   2029 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
   2030 }
   2031 
   2032 void
   2033 rge_txstart(struct work *wk, void *arg)
   2034 {
   2035 	struct rge_softc *sc = arg;
   2036 
   2037 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
   2038 }
   2039 
   2040 void
   2041 rge_tick(void *arg)
   2042 {
   2043 	struct rge_softc *sc = arg;
   2044 	int s;
   2045 
   2046 	s = splnet();
   2047 	rge_link_state(sc);
   2048 	splx(s);
   2049 
   2050 	timeout_add_sec(&sc->sc_timeout, 1);
   2051 }
   2052 
   2053 void
   2054 rge_link_state(struct rge_softc *sc)
   2055 {
   2056 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2057 	int link = LINK_STATE_DOWN;
   2058 
   2059 	if (rge_get_link_status(sc))
   2060 		link = LINK_STATE_UP;
   2061 
   2062 	if (ifp->if_link_state != link) {
   2063 		ifp->if_link_state = link;
   2064 		if_link_state_change(ifp, LINK_STATE_DOWN);
   2065 	}
   2066 }
   2067