Home | History | Annotate | Line # | Download | only in pci
if_rge.c revision 1.10
      1 /*	$NetBSD: if_rge.c,v 1.10 2020/04/30 00:32:16 sevan Exp $	*/
      2 /*	$OpenBSD: if_rge.c,v 1.2 2020/01/02 09:00:45 kevlo Exp $	*/
      3 
      4 /*
      5  * Copyright (c) 2019 Kevin Lo <kevlo (at) openbsd.org>
      6  *
      7  * Permission to use, copy, modify, and distribute this software for any
      8  * purpose with or without fee is hereby granted, provided that the above
      9  * copyright notice and this permission notice appear in all copies.
     10  *
     11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     18  */
     19 
     20 #include <sys/cdefs.h>
     21 __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.10 2020/04/30 00:32:16 sevan Exp $");
     22 
     23 /* #include "vlan.h" Sevan */
     24 
     25 #include <sys/types.h>
     26 
     27 #include <sys/param.h>
     28 #include <sys/systm.h>
     29 #include <sys/sockio.h>
     30 #include <sys/mbuf.h>
     31 #include <sys/malloc.h>
     32 #include <sys/kernel.h>
     33 #include <sys/socket.h>
     34 #include <sys/device.h>
     35 #include <sys/endian.h>
     36 #include <sys/callout.h>
     37 #include <sys/workqueue.h>
     38 
     39 #include <net/if.h>
     40 
     41 #include <net/if_dl.h>
     42 #include <net/if_ether.h>
     43 
     44 #include <net/if_media.h>
     45 
     46 #include <netinet/in.h>
     47 #include <net/if_ether.h>
     48 
     49 #if NBPFILTER > 0
     50 #include <net/bpf.h>
     51 #endif
     52 
     53 #include <sys/bus.h>
     54 #include <machine/intr.h>
     55 
     56 #include <dev/mii/mii.h>
     57 
     58 #include <dev/pci/pcivar.h>
     59 #include <dev/pci/pcireg.h>
     60 #include <dev/pci/pcidevs.h>
     61 
     62 #include <dev/pci/if_rgereg.h>
     63 
     64 #ifdef __NetBSD__
     65 #define letoh32 	htole32
     66 #define nitems(x) 	__arraycount(x)
     67 #define MBUF_LIST_INITIALIZER() 	{ NULL, NULL, 0 }
     68 struct mbuf_list {
     69 	struct mbuf 	*ml_head;
     70 	struct mbuf 	*ml_tail;
     71 	u_int 	ml_len;
     72 };
     73 
     74 static struct mbuf *
     75 MCLGETI(struct rge_softc *sc __unused, int how,
     76     struct ifnet *ifp __unused, u_int size)
     77 {
     78 	struct mbuf *m;
     79 
     80 	MGETHDR(m, how, MT_DATA);
     81 	if (m == NULL)
     82 		return NULL;
     83 
     84 	MEXTMALLOC(m, size, how);
     85 	if ((m->m_flags & M_EXT) == 0) {
     86 		m_freem(m);
     87 		return NULL;
     88 	}
     89 	return m;
     90 }
     91 
     92 #ifdef NET_MPSAFE
     93 #define 	RGE_MPSAFE	1
     94 #define 	CALLOUT_FLAGS	CALLOUT_MPSAFE
     95 #else
     96 #define 	CALLOUT_FLAGS	0
     97 #endif
     98 #endif
     99 
    100 static int		rge_match(device_t, cfdata_t, void *);
    101 static void		rge_attach(device_t, device_t, void *);
    102 int		rge_intr(void *);
    103 int		rge_encap(struct rge_softc *, struct mbuf *, int);
    104 int		rge_ioctl(struct ifnet *, u_long, void *);
    105 void		rge_start(struct ifnet *);
    106 void		rge_watchdog(struct ifnet *);
    107 int		rge_init(struct ifnet *);
    108 void		rge_stop(struct ifnet *);
    109 int		rge_ifmedia_upd(struct ifnet *);
    110 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
    111 int		rge_allocmem(struct rge_softc *);
    112 int		rge_newbuf(struct rge_softc *, int);
    113 void		rge_discard_rxbuf(struct rge_softc *, int);
    114 int		rge_rx_list_init(struct rge_softc *);
    115 void		rge_tx_list_init(struct rge_softc *);
    116 int		rge_rxeof(struct rge_softc *);
    117 int		rge_txeof(struct rge_softc *);
    118 void		rge_reset(struct rge_softc *);
    119 void		rge_iff(struct rge_softc *);
    120 void		rge_set_phy_power(struct rge_softc *, int);
    121 void		rge_phy_config(struct rge_softc *);
    122 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
    123 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
    124 void		rge_hw_init(struct rge_softc *);
    125 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
    126 void		rge_patch_phy_mcu(struct rge_softc *, int);
    127 void		rge_add_media_types(struct rge_softc *);
    128 void		rge_config_imtype(struct rge_softc *, int);
    129 void		rge_disable_sim_im(struct rge_softc *);
    130 void		rge_setup_sim_im(struct rge_softc *);
    131 void		rge_setup_intr(struct rge_softc *, int);
    132 void		rge_exit_oob(struct rge_softc *);
    133 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
    134 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
    135 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
    136 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
    137 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
    138 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
    139 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
    140 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
    141 int		rge_get_link_status(struct rge_softc *);
    142 void		rge_txstart(struct work *, void *);
    143 void		rge_tick(void *);
    144 void		rge_link_state(struct rge_softc *);
    145 
    146 static const struct {
    147 	uint16_t reg;
    148 	uint16_t val;
    149 }  rtl8125_def_bps[] = {
    150 	RTL8125_DEF_BPS
    151 }, rtl8125_mac_cfg2_ephy[] = {
    152 	RTL8125_MAC_CFG2_EPHY
    153 }, rtl8125_mac_cfg2_mcu[] = {
    154 	RTL8125_MAC_CFG2_MCU
    155 }, rtl8125_mac_cfg3_ephy[] = {
    156 	RTL8125_MAC_CFG3_EPHY
    157 }, rtl8125_mac_cfg3_mcu[] = {
    158 	RTL8125_MAC_CFG3_MCU
    159 };
    160 
    161 CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
    162 		NULL, NULL); /* Sevan - detach function? */
    163 
    164 extern struct cfdriver rge_cd;
    165 
    166 static const struct {
    167 	pci_vendor_id_t 	vendor;
    168 	pci_product_id_t 	product;
    169 }rge_devices[] = {
    170 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
    171 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125 },
    172 };
    173 
    174 static int
    175 rge_match(device_t parent, cfdata_t match, void *aux)
    176 {
    177 	struct pci_attach_args *pa =aux;
    178 	int n;
    179 
    180 	for (n =0; n < __arraycount(rge_devices); n++) {
    181 		if (PCI_VENDOR(pa->pa_id) == rge_devices[n].vendor &&
    182 		    PCI_PRODUCT(pa->pa_id) == rge_devices[n].product)
    183 			return 1;
    184 	}
    185 
    186 	return 0;
    187 }
    188 
    189 void
    190 rge_attach(device_t parent, device_t self, void *aux)
    191 {
    192 	struct rge_softc *sc = (struct rge_softc *)self;
    193 	struct pci_attach_args *pa = aux;
    194 	pci_chipset_tag_t pc = pa->pa_pc;
    195 	pci_intr_handle_t ih;
    196 	char intrbuf[PCI_INTRSTR_LEN];
    197 	const char *intrstr = NULL;
    198 	struct ifnet *ifp;
    199 	pcireg_t reg;
    200 	uint32_t hwrev;
    201 	uint8_t eaddr[ETHER_ADDR_LEN];
    202 	int offset;
    203 
    204 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
    205 
    206 	/*
    207 	 * Map control/status registers.
    208 	 */
    209 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
    210 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
    211 	    NULL, &sc->rge_bsize)) {
    212 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
    213 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
    214 		    &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
    215 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
    216 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
    217 			    &sc->rge_bsize)) {
    218 				printf(": can't map mem or i/o space\n");
    219 				return;
    220 			}
    221 		}
    222 	}
    223 
    224 	/*
    225 	 * Allocate interrupt.
    226 	 */
    227 	if (pci_intr_map(pa, &ih) == 0)
    228 		sc->rge_flags |= RGE_FLAG_MSI;
    229 	else if (pci_intr_map(pa, &ih) != 0) {
    230 		printf(": couldn't map interrupt\n");
    231 		return;
    232 	}
    233 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
    234 	sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, rge_intr,
    235 	    sc, sc->sc_dev.dv_xname);
    236 	if (sc->sc_ih == NULL) {
    237 		printf(": couldn't establish interrupt");
    238 		if (intrstr != NULL)
    239 			printf(" at %s", intrstr);
    240 		printf("\n");
    241 		return;
    242 	}
    243 	printf(": %s", intrstr);
    244 
    245 	if (pci_dma64_available(pa))
    246 		sc->sc_dmat = pa->pa_dmat64;
    247 	else
    248 		sc->sc_dmat = pa->pa_dmat;
    249 
    250 	sc->sc_pc = pa->pa_pc;
    251 	sc->sc_tag = pa->pa_tag;
    252 
    253 	/* Determine hardware revision */
    254 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
    255 	switch (hwrev) {
    256 	case 0x60800000:
    257 		sc->rge_type = MAC_CFG2;
    258 		break;
    259 	case 0x60900000:
    260 		sc->rge_type = MAC_CFG3;
    261 		break;
    262 	default:
    263 		printf(": unknown version 0x%08x\n", hwrev);
    264 		return;
    265 	}
    266 
    267 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
    268 
    269 	/*
    270 	 * PCI Express check.
    271 	 */
    272 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
    273 	    &offset, NULL)) {
    274 		/* Disable PCIe ASPM. */
    275 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
    276 		    offset + PCIE_LCSR);
    277 		reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 );
    278 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
    279 		    reg);
    280 	}
    281 
    282 	rge_exit_oob(sc);
    283 	rge_hw_init(sc);
    284 
    285 	rge_get_macaddr(sc, eaddr);
    286 	printf(", address %s\n", ether_sprintf(eaddr));
    287 
    288 	memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
    289 
    290 	rge_set_phy_power(sc, 1);
    291 	rge_phy_config(sc);
    292 
    293 	if (rge_allocmem(sc))
    294 		return;
    295 
    296 	ifp = &sc->sc_ec.ec_if;
    297 	ifp->if_softc = sc;
    298 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
    299 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    300 #ifdef RGE_MPSAFE
    301 	ifp->if_xflags = IFEF_MPSAFE;
    302 #endif
    303 	ifp->if_ioctl = rge_ioctl;
    304 	ifp->if_start = rge_start;
    305 	ifp->if_watchdog = rge_watchdog;
    306 	IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT);
    307 	ifp->if_mtu = RGE_JUMBO_MTU;
    308 
    309 	ifp->if_capabilities = ETHERCAP_VLAN_MTU | IFCAP_CSUM_IPv4_Rx |
    310 	    IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
    311 	    IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
    312 
    313 #if NVLAN > 0
    314 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
    315 #endif
    316 
    317 	callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
    318 	callout_setfunc(&sc->sc_timeout, rge_tick, sc);
    319 	rge_txstart(&sc->sc_task, sc);
    320 
    321 	/* Initialize ifmedia structures. */
    322 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
    323 	    rge_ifmedia_sts);
    324 	rge_add_media_types(sc);
    325 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
    326 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
    327 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
    328 
    329 	if_attach(ifp);
    330 	ether_ifattach(ifp, eaddr);
    331 }
    332 
    333 int
    334 rge_intr(void *arg)
    335 {
    336 	struct rge_softc *sc = arg;
    337 	struct ifnet *ifp = &sc->sc_ec.ec_if;
    338 	uint32_t status;
    339 	int claimed = 0, rx, tx;
    340 
    341 	if (!(ifp->if_flags & IFF_RUNNING))
    342 		return (0);
    343 
    344 	/* Disable interrupts. */
    345 	RGE_WRITE_4(sc, RGE_IMR, 0);
    346 
    347 	status = RGE_READ_4(sc, RGE_ISR);
    348 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
    349 		if ((status & RGE_INTRS) == 0 || status == 0xffffffff)
    350 			return (0);
    351 	}
    352 	if (status)
    353 		RGE_WRITE_4(sc, RGE_ISR, status);
    354 
    355 	if (status & RGE_ISR_PCS_TIMEOUT)
    356 		claimed = 1;
    357 
    358 	rx = tx = 0;
    359 	if (status & RGE_INTRS) {
    360 		if (status &
    361 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
    362 			rx |= rge_rxeof(sc);
    363 			claimed = 1;
    364 		}
    365 
    366 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
    367 			tx |= rge_txeof(sc);
    368 			claimed = 1;
    369 		}
    370 
    371 		if (status & RGE_ISR_SYSTEM_ERR) {
    372 			KERNEL_LOCK(1, NULL);
    373 			rge_init(ifp);
    374 			KERNEL_UNLOCK_ONE(NULL);
    375 			claimed = 1;
    376 		}
    377 	}
    378 
    379 	if (sc->rge_timerintr) {
    380 		if ((tx | rx) == 0) {
    381 			/*
    382 			 * Nothing needs to be processed, fallback
    383 			 * to use TX/RX interrupts.
    384 			 */
    385 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
    386 
    387 			/*
    388 			 * Recollect, mainly to avoid the possible
    389 			 * race introduced by changing interrupt
    390 			 * masks.
    391 			 */
    392 			rge_rxeof(sc);
    393 			rge_txeof(sc);
    394 		} else
    395 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
    396 	} else if (tx | rx) {
    397 		/*
    398 		 * Assume that using simulated interrupt moderation
    399 		 * (hardware timer based) could reduce the interrupt
    400 		 * rate.
    401 		 */
    402 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
    403 	}
    404 
    405 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
    406 
    407 	return (claimed);
    408 }
    409 
    410 int
    411 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
    412 {
    413 	struct rge_tx_desc *d = NULL;
    414 	struct rge_txq *txq;
    415 	bus_dmamap_t txmap;
    416 	uint32_t cmdsts, cflags = 0;
    417 	int cur, error, i, last, nsegs;
    418 
    419 	/*
    420 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
    421 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
    422 	 * take affect.
    423 	 */
    424 	if ((m->m_pkthdr.csum_flags &
    425 	    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
    426 		cflags |= RGE_TDEXTSTS_IPCSUM;
    427 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
    428 			cflags |= RGE_TDEXTSTS_TCPCSUM;
    429 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
    430 			cflags |= RGE_TDEXTSTS_UDPCSUM;
    431 	}
    432 
    433 	txq = &sc->rge_ldata.rge_txq[idx];
    434 	txmap = txq->txq_dmamap;
    435 
    436 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
    437 	switch (error) {
    438 	case 0:
    439 		break;
    440 	case EFBIG: /* mbuf chain is too fragmented */
    441 		if (m_defrag(m, M_DONTWAIT) == 0 &&
    442 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
    443 		    BUS_DMA_NOWAIT) == 0)
    444 			break;
    445 
    446 		/* FALLTHROUGH */
    447 	default:
    448 		return (0);
    449 	}
    450 
    451 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
    452 	    BUS_DMASYNC_PREWRITE);
    453 
    454 	nsegs = txmap->dm_nsegs;
    455 
    456 	/* Set up hardware VLAN tagging. */
    457 #if NVLAN > 0
    458 	if (m->m_flags & M_VLANTAG)
    459 		cflags |= swap16(m->m_pkthdr.ether_vtag | RGE_TDEXTSTS_VTAG);
    460 #endif
    461 
    462 	cur = idx;
    463 	cmdsts = RGE_TDCMDSTS_SOF;
    464 
    465 	for (i = 0; i < txmap->dm_nsegs; i++) {
    466 		d = &sc->rge_ldata.rge_tx_list[cur];
    467 
    468 		d->rge_extsts = htole32(cflags);
    469 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
    470 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
    471 
    472 		cmdsts |= txmap->dm_segs[i].ds_len;
    473 
    474 		if (cur == RGE_TX_LIST_CNT - 1)
    475 			cmdsts |= RGE_TDCMDSTS_EOR;
    476 
    477 		d->rge_cmdsts = htole32(cmdsts);
    478 
    479 		last = cur;
    480 		cmdsts = RGE_TDCMDSTS_OWN;
    481 		cur = RGE_NEXT_TX_DESC(cur);
    482 	}
    483 
    484 	/* Set EOF on the last descriptor. */
    485 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
    486 
    487 	/* Transfer ownership of packet to the chip. */
    488 	d = &sc->rge_ldata.rge_tx_list[idx];
    489 
    490 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
    491 
    492 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
    493 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
    494 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    495 
    496 	/* Update info of TX queue and descriptors. */
    497 	txq->txq_mbuf = m;
    498 	txq->txq_descidx = last;
    499 
    500 	return (nsegs);
    501 }
    502 
    503 int
    504 rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    505 {
    506 	struct rge_softc *sc = ifp->if_softc;
    507 	struct ifreq *ifr = (struct ifreq *)data;
    508 	int s, error = 0;
    509 
    510 	s = splnet();
    511 
    512 	switch (cmd) {
    513 	case SIOCSIFADDR:
    514 		ifp->if_flags |= IFF_UP;
    515 		if (!(ifp->if_flags & IFF_RUNNING))
    516 			rge_init(ifp);
    517 		break;
    518 	case SIOCSIFFLAGS:
    519 		if (ifp->if_flags & IFF_UP) {
    520 			if (ifp->if_flags & IFF_RUNNING)
    521 				error = ENETRESET;
    522 			else
    523 				rge_init(ifp);
    524 		} else {
    525 			if (ifp->if_flags & IFF_RUNNING)
    526 				rge_stop(ifp);
    527 		}
    528 		break;
    529 	case SIOCGIFMEDIA:
    530 	case SIOCSIFMEDIA:
    531 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
    532 		break;
    533 	case SIOCSIFMTU:
    534 		if (ifr->ifr_mtu > ifp->if_mtu) {
    535 			error = EINVAL;
    536 			break;
    537 		}
    538 		ifp->if_mtu = ifr->ifr_mtu;
    539 		break;
    540 	default:
    541 		error = ether_ioctl(ifp, cmd, data);
    542 	}
    543 
    544 	if (error == ENETRESET) {
    545 		if (ifp->if_flags & IFF_RUNNING)
    546 			rge_iff(sc);
    547 		error = 0;
    548 	}
    549 
    550 	splx(s);
    551 	return (error);
    552 }
    553 
    554 void
    555 rge_start(struct ifnet *ifp)
    556 {
    557 	struct rge_softc *sc = ifp->if_softc;
    558 	struct mbuf *m;
    559 	int free, idx, used;
    560 	int queued = 0;
    561 
    562 #define LINK_STATE_IS_UP(_s)    \
    563 	((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
    564 
    565 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
    566 		ifq_purge(ifq);
    567 		return;
    568 	}
    569 
    570 	/* Calculate free space. */
    571 	idx = sc->rge_ldata.rge_txq_prodidx;
    572 	free = sc->rge_ldata.rge_txq_considx;
    573 	if (free <= idx)
    574 		free += RGE_TX_LIST_CNT;
    575 	free -= idx;
    576 
    577 	for (;;) {
    578 		if (RGE_TX_NSEGS >= free + 2) {
    579 			SET(ifp->if_flags, IFF_OACTIVE);
    580 			break;
    581 		}
    582 
    583 		IFQ_DEQUEUE(&ifp->if_snd, m);
    584 		if (m == NULL)
    585 			break;
    586 
    587 		used = rge_encap(sc, m, idx);
    588 		if (used == 0) {
    589 			m_freem(m);
    590 			continue;
    591 		}
    592 
    593 		KASSERT(used <= free);
    594 		free -= used;
    595 
    596 #if NBPFILTER > 0
    597 		if (ifp->if_bpf)
    598 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
    599 #endif
    600 
    601 		idx += used;
    602 		if (idx >= RGE_TX_LIST_CNT)
    603 			idx -= RGE_TX_LIST_CNT;
    604 
    605 		queued++;
    606 	}
    607 
    608 	if (queued == 0)
    609 		return;
    610 
    611 	/* Set a timeout in case the chip goes out to lunch. */
    612 	ifp->if_timer = 5;
    613 
    614 	sc->rge_ldata.rge_txq_prodidx = idx;
    615 	ifq_serialize(ifq, &sc->sc_task);
    616 }
    617 
    618 void
    619 rge_watchdog(struct ifnet *ifp)
    620 {
    621 	struct rge_softc *sc = ifp->if_softc;
    622 
    623 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
    624 	if_statinc(ifp, if_oerrors);
    625 
    626 	rge_init(ifp);
    627 }
    628 
    629 int
    630 rge_init(struct ifnet *ifp)
    631 {
    632 	struct rge_softc *sc = ifp->if_softc;
    633 	uint32_t val;
    634 	uint16_t max_frame_size;
    635 	int i;
    636 
    637 	rge_stop(ifp);
    638 
    639 	/* Set MAC address. */
    640 	rge_set_macaddr(sc, sc->sc_enaddr);
    641 
    642 	/* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
    643 	if (ifp->if_mtu < ETHERMTU)
    644 		max_frame_size = ETHERMTU;
    645 	else
    646 		max_frame_size = ifp->if_mtu;
    647 
    648 	max_frame_size += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
    649 	    ETHER_CRC_LEN + 1;
    650 
    651 	if (max_frame_size > RGE_JUMBO_FRAMELEN)
    652 		max_frame_size -= 1;
    653 
    654 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, max_frame_size);
    655 
    656 	/* Initialize RX descriptors list. */
    657 	if (rge_rx_list_init(sc) == ENOBUFS) {
    658 		printf("%s: init failed: no memory for RX buffers\n",
    659 		    sc->sc_dev.dv_xname);
    660 		rge_stop(ifp);
    661 		return (ENOBUFS);
    662 	}
    663 
    664 	/* Initialize TX descriptors. */
    665 	rge_tx_list_init(sc);
    666 
    667 	/* Load the addresses of the RX and TX lists into the chip. */
    668 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
    669 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    670 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
    671 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    672 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
    673 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    674 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
    675 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    676 
    677 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    678 
    679 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
    680 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    681 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    682 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
    683 
    684 	/* Clear interrupt moderation timer. */
    685 	for (i = 0; i < 64; i++)
    686 		RGE_WRITE_4(sc, RGE_IM(i), 0);
    687 
    688 	/* Set the initial RX and TX configurations. */
    689 	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
    690 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
    691 
    692 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
    693 	rge_write_csi(sc, 0x70c, val | 0x27000000);
    694 
    695 	/* Enable hardware optimization function. */
    696 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
    697 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
    698 
    699 	RGE_WRITE_2(sc, 0x0382, 0x221b);
    700 	RGE_WRITE_1(sc, 0x4500, 0);
    701 	RGE_WRITE_2(sc, 0x4800, 0);
    702 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
    703 
    704 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
    705 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
    706 
    707 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
    708 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
    709 
    710 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
    711 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
    712 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
    713 
    714 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
    715 	rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
    716 
    717 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
    718 
    719 	val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
    720 	rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
    721 
    722 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
    723 
    724 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x007f;
    725 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
    726 
    727 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
    728 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
    729 
    730 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
    731 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
    732 
    733 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
    734 
    735 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
    736 
    737 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
    738 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
    739 
    740 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
    741 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
    742 
    743 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
    744 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
    745 
    746 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
    747 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
    748 
    749 	RGE_MAC_SETBIT(sc, 0xe84c, 0x00c0);
    750 
    751 	/* Disable EEE plus. */
    752 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
    753 
    754 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
    755 
    756 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
    757 	DELAY(1);
    758 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
    759 
    760 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
    761 
    762 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
    763 
    764 	if (ifp->if_capabilities & ETHERCAP_VLAN_HWTAGGING)
    765 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
    766 
    767 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
    768 
    769 	for (i = 0; i < 10; i++) {
    770 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
    771 			break;
    772 		DELAY(1000);
    773 	}
    774 
    775 	/* Disable RXDV gate. */
    776 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
    777 	DELAY(2000);
    778 
    779 	rge_ifmedia_upd(ifp);
    780 
    781 	/* Enable transmit and receive. */
    782 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
    783 
    784 	/* Program promiscuous mode and multicast filters. */
    785 	rge_iff(sc);
    786 
    787 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    788 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    789 
    790 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    791 
    792 	/* Enable interrupts. */
    793 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
    794 
    795 	ifp->if_flags |= IFF_RUNNING;
    796 	CLR(ifp->if_flags, IFF_OACTIVE);
    797 
    798 	callout_schedule(&sc->sc_timeout, 1);
    799 
    800 	return (0);
    801 }
    802 
    803 /*
    804  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
    805  */
    806 void
    807 rge_stop(struct ifnet *ifp)
    808 {
    809 	struct rge_softc *sc = ifp->if_softc;
    810 	int i;
    811 
    812 	timeout_del(&sc->sc_timeout);
    813 
    814 	ifp->if_timer = 0;
    815 	ifp->if_flags &= ~IFF_RUNNING;
    816 	sc->rge_timerintr = 0;
    817 
    818 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
    819 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
    820 	    RGE_RXCFG_ERRPKT);
    821 
    822 	RGE_WRITE_4(sc, RGE_IMR, 0);
    823 	RGE_WRITE_4(sc, RGE_ISR, 0xffffffff);
    824 
    825 	rge_reset(sc);
    826 
    827 	intr_barrier(sc->sc_ih);
    828 	ifq_barrier(&ifp->if_snd);
    829 /*	ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
    830 
    831 	if (sc->rge_head != NULL) {
    832 		m_freem(sc->rge_head);
    833 		sc->rge_head = sc->rge_tail = NULL;
    834 	}
    835 
    836 	/* Free the TX list buffers. */
    837 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
    838 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
    839 			bus_dmamap_unload(sc->sc_dmat,
    840 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
    841 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
    842 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
    843 		}
    844 	}
    845 
    846 	/* Free the RX list buffers. */
    847 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
    848 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
    849 			bus_dmamap_unload(sc->sc_dmat,
    850 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
    851 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
    852 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
    853 		}
    854 	}
    855 }
    856 
    857 /*
    858  * Set media options.
    859  */
    860 int
    861 rge_ifmedia_upd(struct ifnet *ifp)
    862 {
    863 	struct rge_softc *sc = ifp->if_softc;
    864 	struct ifmedia *ifm = &sc->sc_media;
    865 	int anar, gig, val;
    866 
    867 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    868 		return (EINVAL);
    869 
    870 	/* Disable Gigabit Lite. */
    871 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
    872 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
    873 
    874 	val = rge_read_phy_ocp(sc, 0xa5d4);
    875 	val &= ~RGE_ADV_2500TFDX;
    876 
    877 	anar = gig = 0;
    878 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
    879 	case IFM_AUTO:
    880 		anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    881 		gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    882 		val |= RGE_ADV_2500TFDX;
    883 		break;
    884 	case IFM_2500_T:
    885 		anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    886 		gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    887 		val |= RGE_ADV_2500TFDX;
    888 		ifp->if_baudrate = IF_Mbps(2500);
    889 		break;
    890 	case IFM_1000_T:
    891 		anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    892 		gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    893 		ifp->if_baudrate = IF_Gbps(1);
    894 		break;
    895 	case IFM_100_TX:
    896 		anar |= ANAR_TX | ANAR_TX_FD;
    897 		ifp->if_baudrate = IF_Mbps(100);
    898 		break;
    899 	case IFM_10_T:
    900 		anar |= ANAR_10 | ANAR_10_FD;
    901 		ifp->if_baudrate = IF_Mbps(10);
    902 		break;
    903 	default:
    904 		printf("%s: unsupported media type\n", sc->sc_dev.dv_xname);
    905 		return (EINVAL);
    906 	}
    907 
    908 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
    909 	rge_write_phy(sc, 0, MII_100T2CR, gig);
    910 	rge_write_phy_ocp(sc, 0xa5d4, val);
    911 	rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG);
    912 
    913 	return (0);
    914 }
    915 
    916 /*
    917  * Report current media status.
    918  */
    919 void
    920 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
    921 {
    922 	struct rge_softc *sc = ifp->if_softc;
    923 	uint16_t status = 0;
    924 
    925 	ifmr->ifm_status = IFM_AVALID;
    926 	ifmr->ifm_active = IFM_ETHER;
    927 
    928 	if (rge_get_link_status(sc)) {
    929 		ifmr->ifm_status |= IFM_ACTIVE;
    930 
    931 		status = RGE_READ_2(sc, RGE_PHYSTAT);
    932 		if ((status & RGE_PHYSTAT_FDX) ||
    933 		    (status & RGE_PHYSTAT_2500MBPS))
    934 			ifmr->ifm_active |= IFM_FDX;
    935 		else
    936 			ifmr->ifm_active |= IFM_HDX;
    937 
    938 		if (status & RGE_PHYSTAT_10MBPS)
    939 			ifmr->ifm_active |= IFM_10_T;
    940 		else if (status & RGE_PHYSTAT_100MBPS)
    941 			ifmr->ifm_active |= IFM_100_TX;
    942 		else if (status & RGE_PHYSTAT_1000MBPS)
    943 			ifmr->ifm_active |= IFM_1000_T;
    944 		else if (status & RGE_PHYSTAT_2500MBPS)
    945 			ifmr->ifm_active |= IFM_2500_T;
    946 	}
    947 }
    948 
    949 /*
    950  * Allocate memory for RX/TX rings.
    951  */
    952 int
    953 rge_allocmem(struct rge_softc *sc)
    954 {
    955 	int error, i;
    956 
    957 	/* Allocate DMA'able memory for the TX ring. */
    958 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
    959 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
    960 	if (error) {
    961 		printf("%s: can't create TX list map\n", sc->sc_dev.dv_xname);
    962 		return (error);
    963 	}
    964 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
    965 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
    966 	    BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_ZERO */
    967 	if (error) {
    968 		printf("%s: can't alloc TX list\n", sc->sc_dev.dv_xname);
    969 		return (error);
    970 	}
    971 
    972 	/* Load the map for the TX ring. */
    973 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
    974 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
    975 	    (void **) &sc->rge_ldata.rge_tx_list,
    976 	    BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_COHERENT */
    977 	if (error) {
    978 		printf("%s: can't map TX dma buffers\n", sc->sc_dev.dv_xname);
    979 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
    980 		    sc->rge_ldata.rge_tx_listnseg);
    981 		return (error);
    982 	}
    983 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
    984 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
    985 	if (error) {
    986 		printf("%s: can't load TX dma map\n", sc->sc_dev.dv_xname);
    987 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
    988 		bus_dmamem_unmap(sc->sc_dmat,
    989 		    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
    990 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
    991 		    sc->rge_ldata.rge_tx_listnseg);
    992 		return (error);
    993 	}
    994 
    995 	/* Create DMA maps for TX buffers. */
    996 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
    997 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
    998 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
    999 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
   1000 		if (error) {
   1001 			printf("%s: can't create DMA map for TX\n",
   1002 			    sc->sc_dev.dv_xname);
   1003 			return (error);
   1004 		}
   1005 	}
   1006 
   1007 	/* Allocate DMA'able memory for the RX ring. */
   1008 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
   1009 	    RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
   1010 	if (error) {
   1011 		printf("%s: can't create RX list map\n", sc->sc_dev.dv_xname);
   1012 		return (error);
   1013 	}
   1014 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
   1015 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
   1016 	    BUS_DMA_NOWAIT);  /* XXX OpenBSD adds BUS_DMA_ZERO */
   1017 	if (error) {
   1018 		printf("%s: can't alloc RX list\n", sc->sc_dev.dv_xname);
   1019 		return (error);
   1020 	}
   1021 
   1022 	/* Load the map for the RX ring. */
   1023 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1024 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
   1025 	    (void **) &sc->rge_ldata.rge_rx_list,
   1026 	    BUS_DMA_NOWAIT);  /* XXX OpenBSD adds BUS_DMA_COHERENT */
   1027 	if (error) {
   1028 		printf("%s: can't map RX dma buffers\n", sc->sc_dev.dv_xname);
   1029 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1030 		    sc->rge_ldata.rge_rx_listnseg);
   1031 		return (error);
   1032 	}
   1033 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1034 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
   1035 	if (error) {
   1036 		printf("%s: can't load RX dma map\n", sc->sc_dev.dv_xname);
   1037 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
   1038 		bus_dmamem_unmap(sc->sc_dmat,
   1039 		    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
   1040 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1041 		    sc->rge_ldata.rge_rx_listnseg);
   1042 		return (error);
   1043 	}
   1044 
   1045 	/* Create DMA maps for RX buffers. */
   1046 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1047 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
   1048 		    RGE_JUMBO_FRAMELEN, 0, 0,
   1049 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
   1050 		if (error) {
   1051 			printf("%s: can't create DMA map for RX\n",
   1052 			    sc->sc_dev.dv_xname);
   1053 			return (error);
   1054 		}
   1055 	}
   1056 
   1057 	return (error);
   1058 }
   1059 
   1060 /*
   1061  * Initialize the RX descriptor and attach an mbuf cluster.
   1062  */
   1063 int
   1064 rge_newbuf(struct rge_softc *sc, int idx)
   1065 {
   1066 	struct mbuf *m;
   1067 	struct rge_rx_desc *r;
   1068 	struct rge_rxq *rxq;
   1069 	bus_dmamap_t rxmap;
   1070 
   1071 	m = MCLGETI(NULL, M_DONTWAIT, NULL, RGE_JUMBO_FRAMELEN);
   1072 	if (m == NULL)
   1073 		return (ENOBUFS);
   1074 
   1075 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
   1076 
   1077 	rxq = &sc->rge_ldata.rge_rxq[idx];
   1078 	rxmap = rxq->rxq_dmamap;
   1079 
   1080 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT))
   1081 		goto out;
   1082 
   1083 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
   1084 	    BUS_DMASYNC_PREREAD);
   1085 
   1086 	/* Map the segments into RX descriptors. */
   1087 	r = &sc->rge_ldata.rge_rx_list[idx];
   1088 
   1089 	if (RGE_OWN(r)) {
   1090 		printf("%s: tried to map busy RX descriptor\n",
   1091 		    sc->sc_dev.dv_xname);
   1092 		goto out;
   1093 	}
   1094 
   1095 	rxq->rxq_mbuf = m;
   1096 
   1097 	r->rge_extsts = 0;
   1098 	r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
   1099 	r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr));
   1100 
   1101 	r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
   1102 	if (idx == RGE_RX_LIST_CNT - 1)
   1103 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
   1104 
   1105 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
   1106 
   1107 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1108 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1109 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1110 
   1111 	return (0);
   1112 out:
   1113 	if (m != NULL)
   1114 		m_freem(m);
   1115 	return (ENOMEM);
   1116 }
   1117 
   1118 void
   1119 rge_discard_rxbuf(struct rge_softc *sc, int idx)
   1120 {
   1121 	struct rge_rx_desc *r;
   1122 
   1123 	r = &sc->rge_ldata.rge_rx_list[idx];
   1124 
   1125 	r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
   1126 	r->rge_extsts = 0;
   1127 	if (idx == RGE_RX_LIST_CNT - 1)
   1128 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
   1129 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
   1130 
   1131 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1132 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1133 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1134 }
   1135 
   1136 int
   1137 rge_rx_list_init(struct rge_softc *sc)
   1138 {
   1139 	int i;
   1140 
   1141 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
   1142 
   1143 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1144 		sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
   1145 		if (rge_newbuf(sc, i) == ENOBUFS)
   1146 			return (ENOBUFS);
   1147 	}
   1148 
   1149 	sc->rge_ldata.rge_rxq_prodidx = 0;
   1150 	sc->rge_head = sc->rge_tail = NULL;
   1151 
   1152 	return (0);
   1153 }
   1154 
   1155 void
   1156 rge_tx_list_init(struct rge_softc *sc)
   1157 {
   1158 	int i;
   1159 
   1160 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
   1161 
   1162 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
   1163 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
   1164 
   1165 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
   1166 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
   1167 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1168 
   1169 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
   1170 }
   1171 
   1172 int
   1173 rge_rxeof(struct rge_softc *sc)
   1174 {
   1175 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
   1176 	struct mbuf *m;
   1177 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1178 	struct rge_rx_desc *cur_rx;
   1179 	struct rge_rxq *rxq;
   1180 	uint32_t rxstat, extsts;
   1181 	int i, total_len, rx = 0;
   1182 
   1183 	for (i = sc->rge_ldata.rge_rxq_prodidx; ; i = RGE_NEXT_RX_DESC(i)) {
   1184 		/* Invalidate the descriptor memory. */
   1185 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1186 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1187 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1188 
   1189 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
   1190 
   1191 		if (RGE_OWN(cur_rx))
   1192 			break;
   1193 
   1194 		rxstat = letoh32(cur_rx->rge_cmdsts);
   1195 		extsts = letoh32(cur_rx->rge_extsts);
   1196 
   1197 		total_len = RGE_RXBYTES(cur_rx);
   1198 		rxq = &sc->rge_ldata.rge_rxq[i];
   1199 		m = rxq->rxq_mbuf;
   1200 		rx = 1;
   1201 
   1202 		/* Invalidate the RX mbuf and unload its map. */
   1203 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
   1204 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1205 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
   1206 
   1207 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
   1208 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
   1209 			rge_discard_rxbuf(sc, i);
   1210 			continue;
   1211 		}
   1212 
   1213 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
   1214 			if_statinc(ifp, if_ierrors);
   1215 			/*
   1216 			 * If this is part of a multi-fragment packet,
   1217 			 * discard all the pieces.
   1218 			 */
   1219 			 if (sc->rge_head != NULL) {
   1220 				m_freem(sc->rge_head);
   1221 				sc->rge_head = sc->rge_tail = NULL;
   1222 			}
   1223 			rge_discard_rxbuf(sc, i);
   1224 			continue;
   1225 		}
   1226 
   1227 		/*
   1228 		 * If allocating a replacement mbuf fails,
   1229 		 * reload the current one.
   1230 		 */
   1231 
   1232 		if (rge_newbuf(sc, i) == ENOBUFS) {
   1233 			if (sc->rge_head != NULL) {
   1234 				m_freem(sc->rge_head);
   1235 				sc->rge_head = sc->rge_tail = NULL;
   1236 			}
   1237 			rge_discard_rxbuf(sc, i);
   1238 			continue;
   1239 		}
   1240 
   1241 		if (sc->rge_head != NULL) {
   1242 			m->m_len = total_len;
   1243 			/*
   1244 			 * Special case: if there's 4 bytes or less
   1245 			 * in this buffer, the mbuf can be discarded:
   1246 			 * the last 4 bytes is the CRC, which we don't
   1247 			 * care about anyway.
   1248 			 */
   1249 			if (m->m_len <= ETHER_CRC_LEN) {
   1250 				sc->rge_tail->m_len -=
   1251 				    (ETHER_CRC_LEN - m->m_len);
   1252 				m_freem(m);
   1253 			} else {
   1254 				m->m_len -= ETHER_CRC_LEN;
   1255 				m->m_flags &= ~M_PKTHDR;
   1256 				sc->rge_tail->m_next = m;
   1257 			}
   1258 			m = sc->rge_head;
   1259 			sc->rge_head = sc->rge_tail = NULL;
   1260 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
   1261 		} else
   1262 			m->m_pkthdr.len = m->m_len =
   1263 			    (total_len - ETHER_CRC_LEN);
   1264 
   1265 		/* Check IP header checksum. */
   1266 		if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) &&
   1267 		    (extsts & RGE_RDEXTSTS_IPV4))
   1268 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
   1269 
   1270 		/* Check TCP/UDP checksum. */
   1271 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
   1272 		    (((rxstat & RGE_RDCMDSTS_TCPPKT) &&
   1273 		    !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) ||
   1274 		    ((rxstat & RGE_RDCMDSTS_UDPPKT) &&
   1275 		    !(rxstat & RGE_RDCMDSTS_UDPCSUMERR))))
   1276 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
   1277 			    M_UDP_CSUM_IN_OK;
   1278 
   1279 #if NVLAN > 0
   1280 		if (extsts & RGE_RDEXTSTS_VTAG) {
   1281 			m->m_pkthdr.ether_vtag =
   1282 			    ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
   1283 			m->m_flags |= M_VLANTAG;
   1284 		}
   1285 #endif
   1286 
   1287 		ml_enqueue(&ml, m);
   1288 	}
   1289 
   1290 	sc->rge_ldata.rge_rxq_prodidx = i;
   1291 
   1292 	if_input(ifp, &ml);
   1293 
   1294 	return (rx);
   1295 }
   1296 
   1297 int
   1298 rge_txeof(struct rge_softc *sc)
   1299 {
   1300 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1301 	struct rge_txq *txq;
   1302 	uint32_t txstat;
   1303 	int cons, idx, prod;
   1304 	int free = 0;
   1305 
   1306 	prod = sc->rge_ldata.rge_txq_prodidx;
   1307 	cons = sc->rge_ldata.rge_txq_considx;
   1308 
   1309 	while (prod != cons) {
   1310 		txq = &sc->rge_ldata.rge_txq[cons];
   1311 		idx = txq->txq_descidx;
   1312 
   1313 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1314 		    idx * sizeof(struct rge_tx_desc),
   1315 		    sizeof(struct rge_tx_desc),
   1316 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1317 
   1318 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
   1319 
   1320 		if (txstat & RGE_TDCMDSTS_OWN) {
   1321 			free = 2;
   1322 			break;
   1323 		}
   1324 
   1325 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
   1326 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1327 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
   1328 		m_freem(txq->txq_mbuf);
   1329 		txq->txq_mbuf = NULL;
   1330 
   1331 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
   1332 			if_statinc(ifp, if_collisions);
   1333 		if (txstat & RGE_TDCMDSTS_TXERR)
   1334 			if_statinc(ifp, if_oerrors);
   1335 
   1336 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1337 		    idx * sizeof(struct rge_tx_desc),
   1338 		    sizeof(struct rge_tx_desc),
   1339 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1340 
   1341 		cons = RGE_NEXT_TX_DESC(idx);
   1342 		free = 1;
   1343 	}
   1344 
   1345 	if (free == 0)
   1346 		return (0);
   1347 
   1348 	sc->rge_ldata.rge_txq_considx = cons;
   1349 
   1350 	if (ifq_is_oactive(&ifp->if_snd))
   1351 		ifq_restart(&ifp->if_snd);
   1352 	else if (free == 2)
   1353 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
   1354 	else
   1355 		ifp->if_timer = 0;
   1356 
   1357 	return (1);
   1358 }
   1359 
   1360 void
   1361 rge_reset(struct rge_softc *sc)
   1362 {
   1363 	int i;
   1364 
   1365 	/* Enable RXDV gate. */
   1366 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
   1367 	DELAY(2000);
   1368 
   1369 	for (i = 0; i < 10; i++) {
   1370 		DELAY(100);
   1371 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
   1372 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
   1373 		    RGE_MCUCMD_TXFIFO_EMPTY))
   1374 			break;
   1375 	}
   1376 
   1377 	/* Soft reset. */
   1378 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
   1379 
   1380 	for (i = 0; i < RGE_TIMEOUT; i++) {
   1381 		DELAY(100);
   1382 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
   1383 			break;
   1384 	}
   1385 	if (i == RGE_TIMEOUT)
   1386 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
   1387 }
   1388 
   1389 void
   1390 rge_iff(struct rge_softc *sc)
   1391 {
   1392 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1393 	struct ethercom *ac = &sc->sc_ec;
   1394 	struct ether_multi *enm;
   1395 	struct ether_multistep step;
   1396 	uint32_t hashes[2];
   1397 	uint32_t rxfilt;
   1398 	int h = 0;
   1399 
   1400 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
   1401 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
   1402 	ifp->if_flags &= ~IFF_ALLMULTI;
   1403 
   1404 	/*
   1405 	 * Always accept frames destined to our station address.
   1406 	 * Always accept broadcast frames.
   1407 	 */
   1408 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
   1409 
   1410 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
   1411 		ifp->if_flags |= IFF_ALLMULTI;
   1412 		rxfilt |= RGE_RXCFG_MULTI;
   1413 		if (ifp->if_flags & IFF_PROMISC)
   1414 			rxfilt |= RGE_RXCFG_ALLPHYS;
   1415 		hashes[0] = hashes[1] = 0xffffffff;
   1416 	} else {
   1417 		rxfilt |= RGE_RXCFG_MULTI;
   1418 		/* Program new filter. */
   1419 		memset(hashes, 0, sizeof(hashes));
   1420 
   1421 		ETHER_FIRST_MULTI(step, ac, enm);
   1422 		while (enm != NULL) {
   1423 			h = ether_crc32_be(enm->enm_addrlo,
   1424 			    ETHER_ADDR_LEN) >> 26;
   1425 
   1426 			if (h < 32)
   1427 				hashes[0] |= (1 << h);
   1428 			else
   1429 				hashes[1] |= (1 << (h - 32));
   1430 
   1431 			ETHER_NEXT_MULTI(step, enm);
   1432 		}
   1433 	}
   1434 
   1435 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
   1436 	RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
   1437 	RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
   1438 }
   1439 
   1440 void
   1441 rge_set_phy_power(struct rge_softc *sc, int on)
   1442 {
   1443 	int i;
   1444 
   1445 	if (on) {
   1446 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
   1447 
   1448 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
   1449 
   1450 		for (i = 0; i < RGE_TIMEOUT; i++) {
   1451 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
   1452 				break;
   1453 			DELAY(1000);
   1454 		}
   1455 	} else
   1456 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
   1457 }
   1458 
   1459 void
   1460 rge_phy_config(struct rge_softc *sc)
   1461 {
   1462 	uint16_t mcode_ver, val;
   1463 	int i;
   1464 	static const uint16_t mac_cfg3_a438_value[] =
   1465 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
   1466 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
   1467 
   1468 	static const uint16_t mac_cfg3_b88e_value[] =
   1469 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
   1470 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
   1471 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
   1472 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
   1473 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
   1474 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
   1475 
   1476 	/* Read microcode version. */
   1477 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1478 	mcode_ver = rge_read_phy_ocp(sc, 0xa438);
   1479 
   1480 	if (sc->rge_type == MAC_CFG2) {
   1481 		for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++) {
   1482 			rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
   1483 			    rtl8125_mac_cfg2_ephy[i].val);
   1484 		}
   1485 
   1486 		if (mcode_ver != RGE_MAC_CFG2_MCODE_VER) {
   1487 			/* Disable PHY config. */
   1488 			RGE_CLRBIT_1(sc, 0xf2, 0x20);
   1489 			DELAY(1000);
   1490 
   1491 			rge_patch_phy_mcu(sc, 1);
   1492 
   1493 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1494 			rge_write_phy_ocp(sc, 0xa438, 0x8600);
   1495 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
   1496 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
   1497 
   1498 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
   1499 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
   1500 				rge_write_phy_ocp(sc,
   1501 				    rtl8125_mac_cfg2_mcu[i].reg,
   1502 				    rtl8125_mac_cfg2_mcu[i].val);
   1503 			}
   1504 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
   1505 
   1506 			rge_write_phy_ocp(sc, 0xa436, 0);
   1507 			rge_write_phy_ocp(sc, 0xa438, 0);
   1508 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
   1509 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1510 			rge_write_phy_ocp(sc, 0xa438, 0);
   1511 
   1512 			rge_patch_phy_mcu(sc, 0);
   1513 
   1514 			/* Enable PHY config. */
   1515 			RGE_SETBIT_1(sc, 0xf2, 0x20);
   1516 
   1517 			/* Write microcode version. */
   1518 			rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1519 			rge_write_phy_ocp(sc, 0xa438, RGE_MAC_CFG2_MCODE_VER);
   1520 		}
   1521 
   1522 		val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
   1523 		rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
   1524 		RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   1525 		val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   1526 		rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
   1527 		val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
   1528 		rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   1529 		RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
   1530 		val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
   1531 		rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
   1532 		RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
   1533 		RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
   1534 		RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
   1535 
   1536 		rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   1537 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1538 		rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
   1539 		rge_write_phy_ocp(sc, 0xa436, 0x80eb);
   1540 		val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
   1541 		rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
   1542 		rge_write_phy_ocp(sc, 0xa436, 0x80f8);
   1543 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1544 		rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
   1545 		rge_write_phy_ocp(sc, 0xa436, 0x80f1);
   1546 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1547 		rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
   1548 		rge_write_phy_ocp(sc, 0xa436, 0x80fe);
   1549 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1550 		rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
   1551 		rge_write_phy_ocp(sc, 0xa436, 0x8102);
   1552 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1553 		rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   1554 		rge_write_phy_ocp(sc, 0xa436, 0x8105);
   1555 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1556 		rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
   1557 		rge_write_phy_ocp(sc, 0xa436, 0x8100);
   1558 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1559 		rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
   1560 		rge_write_phy_ocp(sc, 0xa436, 0x8104);
   1561 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1562 		rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
   1563 		rge_write_phy_ocp(sc, 0xa436, 0x8106);
   1564 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1565 		rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
   1566 		rge_write_phy_ocp(sc, 0xa436, 0x80dc);
   1567 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1568 		rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
   1569 		rge_write_phy_ocp(sc, 0xa436, 0x80df);
   1570 		RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   1571 		rge_write_phy_ocp(sc, 0xa436, 0x80e1);
   1572 		RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
   1573 		val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
   1574 		rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
   1575 		rge_write_phy_ocp(sc, 0xa436, 0x819f);
   1576 		rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
   1577 		rge_write_phy_ocp(sc, 0xbc34, 0x5555);
   1578 		val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
   1579 		rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
   1580 		RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
   1581 		RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1582 	} else {
   1583 		for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
   1584 			rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
   1585 			    rtl8125_mac_cfg3_ephy[i].val);
   1586 
   1587 		if (mcode_ver != RGE_MAC_CFG3_MCODE_VER) {
   1588 			/* Disable PHY config. */
   1589 			RGE_CLRBIT_1(sc, 0xf2, 0x20);
   1590 			DELAY(1000);
   1591 
   1592 			rge_patch_phy_mcu(sc, 1);
   1593 
   1594 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1595 			rge_write_phy_ocp(sc, 0xa438, 0x8601);
   1596 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
   1597 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
   1598 
   1599 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
   1600 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
   1601 				rge_write_phy_ocp(sc,
   1602 				    rtl8125_mac_cfg3_mcu[i].reg,
   1603 				    rtl8125_mac_cfg3_mcu[i].val);
   1604 			}
   1605 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
   1606 
   1607 			rge_write_phy_ocp(sc, 0xa436, 0);
   1608 			rge_write_phy_ocp(sc, 0xa438, 0);
   1609 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
   1610 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1611 			rge_write_phy_ocp(sc, 0xa438, 0);
   1612 
   1613 			rge_patch_phy_mcu(sc, 0);
   1614 
   1615 			/* Enable PHY config. */
   1616 			RGE_SETBIT_1(sc, 0xf2, 0x20);
   1617 
   1618 			/* Write microcode version. */
   1619 			rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1620 			rge_write_phy_ocp(sc, 0xa438, RGE_MAC_CFG3_MCODE_VER);
   1621 		}
   1622 
   1623 		RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   1624 		val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   1625 		rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
   1626 		val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
   1627 		rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   1628 		RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
   1629 		RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
   1630 		val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
   1631 		rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
   1632 		val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
   1633 		rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
   1634 		val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
   1635 		rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
   1636 		RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
   1637 		RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
   1638 		val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
   1639 		rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
   1640 		rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
   1641 		rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
   1642 		val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
   1643 		rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
   1644 		rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
   1645 		rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   1646 		rge_write_phy_ocp(sc, 0xb87c, 0x809c);
   1647 		rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   1648 
   1649 		rge_write_phy_ocp(sc, 0xa436, 0x81b3);
   1650 		for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
   1651 			rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
   1652 		for (i = 0; i < 26; i++)
   1653 			rge_write_phy_ocp(sc, 0xa438, 0);
   1654 		rge_write_phy_ocp(sc, 0xa436, 0x8257);
   1655 		rge_write_phy_ocp(sc, 0xa438, 0x020f);
   1656 		rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   1657 		rge_write_phy_ocp(sc, 0xa438, 0x7843);
   1658 
   1659 		rge_patch_phy_mcu(sc, 1);
   1660 		RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
   1661 		RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
   1662 		for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
   1663 			rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
   1664 			rge_write_phy_ocp(sc, 0xb890,
   1665 			    mac_cfg3_b88e_value[i + 1]);
   1666 		}
   1667 		RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
   1668 		rge_patch_phy_mcu(sc, 0);
   1669 
   1670 		RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
   1671 		rge_write_phy_ocp(sc, 0xa436, 0x81a2);
   1672 		RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   1673 		val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
   1674 		rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
   1675 		RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
   1676 		RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
   1677 		RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
   1678 		RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
   1679 		RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1680 	}
   1681 
   1682 	/* Disable EEE. */
   1683 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
   1684 	RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
   1685 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
   1686 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
   1687 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
   1688 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
   1689 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
   1690 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
   1691 
   1692 	rge_patch_phy_mcu(sc, 1);
   1693 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
   1694 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
   1695 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
   1696 	rge_patch_phy_mcu(sc, 0);
   1697 }
   1698 
   1699 void
   1700 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
   1701 {
   1702 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1703 	RGE_WRITE_4(sc, RGE_MAC0,
   1704 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
   1705 	RGE_WRITE_4(sc, RGE_MAC4,
   1706 	    addr[5] <<  8 | addr[4]);
   1707 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1708 }
   1709 
   1710 void
   1711 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
   1712 {
   1713 	*(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
   1714 	*(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
   1715 }
   1716 
   1717 void
   1718 rge_hw_init(struct rge_softc *sc)
   1719 {
   1720 	int i;
   1721 
   1722 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1723 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
   1724 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
   1725 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1726 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
   1727 
   1728 	/* Disable UPS. */
   1729 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
   1730 
   1731 	/* Configure MAC MCU. */
   1732 	rge_write_mac_ocp(sc, 0xfc38, 0);
   1733 
   1734 	for (i = 0xfc28; i < 0xfc38; i += 2)
   1735 		rge_write_mac_ocp(sc, i, 0);
   1736 
   1737 	DELAY(3000);
   1738 	rge_write_mac_ocp(sc, 0xfc26, 0);
   1739 
   1740 	if (sc->rge_type == MAC_CFG3) {
   1741 		for (i = 0; i < nitems(rtl8125_def_bps); i++)
   1742 			rge_write_mac_ocp(sc, rtl8125_def_bps[i].reg,
   1743 			    rtl8125_def_bps[i].val);
   1744 	}
   1745 
   1746 	/* Disable PHY power saving. */
   1747 	rge_disable_phy_ocp_pwrsave(sc);
   1748 
   1749 	/* Set PCIe uncorrectable error status. */
   1750 	rge_write_csi(sc, 0x108,
   1751 	    rge_read_csi(sc, 0x108) | 0x00100000);
   1752 }
   1753 
   1754 void
   1755 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
   1756 {
   1757 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
   1758 		rge_patch_phy_mcu(sc, 1);
   1759 		rge_write_phy_ocp(sc, 0xc416, 0);
   1760 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
   1761 		rge_patch_phy_mcu(sc, 0);
   1762 	}
   1763 }
   1764 
   1765 void
   1766 rge_patch_phy_mcu(struct rge_softc *sc, int set)
   1767 {
   1768 	uint16_t val;
   1769 	int i;
   1770 
   1771 	if (set)
   1772 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
   1773 	else
   1774 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
   1775 
   1776 	for (i = 0; i < 1000; i++) {
   1777 		val = rge_read_phy_ocp(sc, 0xb800) & 0x0040;
   1778 		DELAY(100);
   1779 		if (val == 0x0040)
   1780 			break;
   1781 	}
   1782 	if (i == 1000)
   1783 		printf("%s: timeout waiting to patch phy mcu\n",
   1784 		    sc->sc_dev.dv_xname);
   1785 }
   1786 
   1787 void
   1788 rge_add_media_types(struct rge_softc *sc)
   1789 {
   1790 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
   1791 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
   1792 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
   1793 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
   1794 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
   1795 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
   1796 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
   1797 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
   1798 }
   1799 
   1800 void
   1801 rge_config_imtype(struct rge_softc *sc, int imtype)
   1802 {
   1803 	switch (imtype) {
   1804 	case RGE_IMTYPE_NONE:
   1805 		sc->rge_intrs = RGE_INTRS;
   1806 		sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
   1807 		    RGE_ISR_RX_FIFO_OFLOW;
   1808 		sc->rge_tx_ack = RGE_ISR_TX_OK;
   1809 		break;
   1810 	case RGE_IMTYPE_SIM:
   1811 		sc->rge_intrs = RGE_INTRS_TIMER;
   1812 		sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
   1813 		sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
   1814 		break;
   1815 	default:
   1816 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
   1817 	}
   1818 }
   1819 
   1820 void
   1821 rge_disable_sim_im(struct rge_softc *sc)
   1822 {
   1823 	RGE_WRITE_4(sc, RGE_TIMERINT, 0);
   1824 	sc->rge_timerintr = 0;
   1825 }
   1826 
   1827 void
   1828 rge_setup_sim_im(struct rge_softc *sc)
   1829 {
   1830 	RGE_WRITE_4(sc, RGE_TIMERINT, 0x2600);
   1831 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
   1832 	sc->rge_timerintr = 1;
   1833 }
   1834 
   1835 void
   1836 rge_setup_intr(struct rge_softc *sc, int imtype)
   1837 {
   1838 	rge_config_imtype(sc, imtype);
   1839 
   1840 	/* Enable interrupts. */
   1841 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
   1842 
   1843 	switch (imtype) {
   1844 	case RGE_IMTYPE_NONE:
   1845 		rge_disable_sim_im(sc);
   1846 		break;
   1847 	case RGE_IMTYPE_SIM:
   1848 		rge_setup_sim_im(sc);
   1849 		break;
   1850 	default:
   1851 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
   1852 	}
   1853 }
   1854 
   1855 void
   1856 rge_exit_oob(struct rge_softc *sc)
   1857 {
   1858 	int i;
   1859 
   1860 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
   1861 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
   1862 	    RGE_RXCFG_ERRPKT);
   1863 
   1864 	/* Disable RealWoW. */
   1865 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
   1866 
   1867 	rge_reset(sc);
   1868 
   1869 	/* Disable OOB. */
   1870 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
   1871 
   1872 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
   1873 
   1874 	for (i = 0; i < 10; i++) {
   1875 		DELAY(100);
   1876 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   1877 			break;
   1878 	}
   1879 
   1880 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
   1881 	rge_write_mac_ocp(sc, 0xc0a6, 0x0150);
   1882 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
   1883 
   1884 	for (i = 0; i < 10; i++) {
   1885 		DELAY(100);
   1886 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   1887 			break;
   1888 	}
   1889 
   1890 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
   1891 		for (i = 0; i < RGE_TIMEOUT; i++) {
   1892 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
   1893 				break;
   1894 			DELAY(1000);
   1895 		}
   1896 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
   1897 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
   1898 	}
   1899 }
   1900 
   1901 void
   1902 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
   1903 {
   1904 	int i;
   1905 
   1906 	RGE_WRITE_4(sc, RGE_CSIDR, val);
   1907 	RGE_WRITE_4(sc, RGE_CSIAR, (1 << 16) | (reg & RGE_CSIAR_ADDR_MASK) |
   1908 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
   1909 
   1910 	for (i = 0; i < 10; i++) {
   1911 		 DELAY(100);
   1912 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
   1913 			break;
   1914 	}
   1915 
   1916 	DELAY(20);
   1917 }
   1918 
   1919 uint32_t
   1920 rge_read_csi(struct rge_softc *sc, uint32_t reg)
   1921 {
   1922 	int i;
   1923 
   1924 	RGE_WRITE_4(sc, RGE_CSIAR, (1 << 16) | (reg & RGE_CSIAR_ADDR_MASK) |
   1925 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
   1926 
   1927 	for (i = 0; i < 10; i++) {
   1928 		 DELAY(100);
   1929 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
   1930 			break;
   1931 	}
   1932 
   1933 	DELAY(20);
   1934 
   1935 	return (RGE_READ_4(sc, RGE_CSIDR));
   1936 }
   1937 
   1938 void
   1939 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   1940 {
   1941 	uint32_t tmp;
   1942 
   1943 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   1944 	tmp += val;
   1945 	tmp |= RGE_MACOCP_BUSY;
   1946 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
   1947 }
   1948 
   1949 uint16_t
   1950 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
   1951 {
   1952 	uint32_t val;
   1953 
   1954 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   1955 	RGE_WRITE_4(sc, RGE_MACOCP, val);
   1956 
   1957 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
   1958 }
   1959 
   1960 void
   1961 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
   1962 {
   1963 	uint32_t tmp;
   1964 	int i;
   1965 
   1966 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
   1967 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
   1968 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
   1969 
   1970 	for (i = 0; i < 10; i++) {
   1971 		DELAY(100);
   1972 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
   1973 			break;
   1974 	}
   1975 
   1976 	DELAY(20);
   1977 }
   1978 
   1979 void
   1980 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
   1981 {
   1982 	uint16_t off, phyaddr;
   1983 
   1984 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
   1985 	phyaddr <<= 4;
   1986 
   1987 	off = addr ? reg : 0x10 + (reg % 8);
   1988 
   1989 	phyaddr += (off - 16) << 1;
   1990 
   1991 	rge_write_phy_ocp(sc, phyaddr, val);
   1992 }
   1993 
   1994 void
   1995 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   1996 {
   1997 	uint32_t tmp;
   1998 	int i;
   1999 
   2000 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   2001 	tmp |= RGE_PHYOCP_BUSY | val;
   2002 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
   2003 
   2004 	for (i = 0; i < RGE_TIMEOUT; i++) {
   2005 		DELAY(1);
   2006 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
   2007 			break;
   2008 	}
   2009 }
   2010 
   2011 uint16_t
   2012 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
   2013 {
   2014 	uint32_t val;
   2015 	int i;
   2016 
   2017 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   2018 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
   2019 
   2020 	for (i = 0; i < RGE_TIMEOUT; i++) {
   2021 		DELAY(1);
   2022 		val = RGE_READ_4(sc, RGE_PHYOCP);
   2023 		if (val & RGE_PHYOCP_BUSY)
   2024 			break;
   2025 	}
   2026 
   2027 	return (val & RGE_PHYOCP_DATA_MASK);
   2028 }
   2029 
   2030 int
   2031 rge_get_link_status(struct rge_softc *sc)
   2032 {
   2033 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
   2034 }
   2035 
   2036 void
   2037 rge_txstart(struct work *wk, void *arg)
   2038 {
   2039 	struct rge_softc *sc = arg;
   2040 
   2041 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
   2042 }
   2043 
   2044 void
   2045 rge_tick(void *arg)
   2046 {
   2047 	struct rge_softc *sc = arg;
   2048 	int s;
   2049 
   2050 	s = splnet();
   2051 	rge_link_state(sc);
   2052 	splx(s);
   2053 
   2054 	timeout_add_sec(&sc->sc_timeout, 1);
   2055 }
   2056 
   2057 void
   2058 rge_link_state(struct rge_softc *sc)
   2059 {
   2060 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2061 	int link = LINK_STATE_DOWN;
   2062 
   2063 	if (rge_get_link_status(sc))
   2064 		link = LINK_STATE_UP;
   2065 
   2066 	if (ifp->if_link_state != link) {
   2067 		ifp->if_link_state = link;
   2068 		if_link_state_change(ifp, LINK_STATE_DOWN);
   2069 	}
   2070 }
   2071