Home | History | Annotate | Line # | Download | only in pci
if_rge.c revision 1.25
      1 /*	$NetBSD: if_rge.c,v 1.25 2022/12/21 05:19:15 nonaka Exp $	*/
      2 /*	$OpenBSD: if_rge.c,v 1.9 2020/12/12 11:48:53 jan Exp $	*/
      3 
      4 /*
      5  * Copyright (c) 2019, 2020 Kevin Lo <kevlo (at) openbsd.org>
      6  *
      7  * Permission to use, copy, modify, and distribute this software for any
      8  * purpose with or without fee is hereby granted, provided that the above
      9  * copyright notice and this permission notice appear in all copies.
     10  *
     11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     18  */
     19 
     20 #include <sys/cdefs.h>
     21 __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.25 2022/12/21 05:19:15 nonaka Exp $");
     22 
     23 #include <sys/types.h>
     24 
     25 #include <sys/param.h>
     26 #include <sys/systm.h>
     27 #include <sys/sockio.h>
     28 #include <sys/mbuf.h>
     29 #include <sys/kernel.h>
     30 #include <sys/socket.h>
     31 #include <sys/device.h>
     32 #include <sys/endian.h>
     33 #include <sys/callout.h>
     34 #include <sys/workqueue.h>
     35 
     36 #include <net/if.h>
     37 
     38 #include <net/if_dl.h>
     39 #include <net/if_ether.h>
     40 
     41 #include <net/if_media.h>
     42 
     43 #include <netinet/in.h>
     44 #include <net/if_ether.h>
     45 
     46 #include <net/bpf.h>
     47 
     48 #include <sys/bus.h>
     49 #include <machine/intr.h>
     50 
     51 #include <dev/mii/mii.h>
     52 
     53 #include <dev/pci/pcivar.h>
     54 #include <dev/pci/pcireg.h>
     55 #include <dev/pci/pcidevs.h>
     56 
     57 #include <dev/pci/if_rgereg.h>
     58 
     59 #ifdef __NetBSD__
     60 #define letoh32 	htole32
     61 #define nitems(x) 	__arraycount(x)
     62 
     63 static struct mbuf *
     64 MCLGETL(struct rge_softc *sc __unused, int how,
     65     u_int size)
     66 {
     67 	struct mbuf *m;
     68 
     69 	MGETHDR(m, how, MT_DATA);
     70 	if (m == NULL)
     71 		return NULL;
     72 
     73 	MEXTMALLOC(m, size, how);
     74 	if ((m->m_flags & M_EXT) == 0) {
     75 		m_freem(m);
     76 		return NULL;
     77 	}
     78 	return m;
     79 }
     80 
     81 #ifdef NET_MPSAFE
     82 #define 	RGE_MPSAFE	1
     83 #define 	CALLOUT_FLAGS	CALLOUT_MPSAFE
     84 #else
     85 #define 	CALLOUT_FLAGS	0
     86 #endif
     87 #endif
     88 
     89 #ifdef RGE_DEBUG
     90 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
     91 int rge_debug = 0;
     92 #else
     93 #define DPRINTF(x)
     94 #endif
     95 
     96 static int		rge_match(device_t, cfdata_t, void *);
     97 static void		rge_attach(device_t, device_t, void *);
     98 int		rge_intr(void *);
     99 int		rge_encap(struct rge_softc *, struct mbuf *, int);
    100 int		rge_ioctl(struct ifnet *, u_long, void *);
    101 void		rge_start(struct ifnet *);
    102 void		rge_watchdog(struct ifnet *);
    103 int		rge_init(struct ifnet *);
    104 void		rge_stop(struct ifnet *, int);
    105 int		rge_ifmedia_upd(struct ifnet *);
    106 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
    107 int		rge_allocmem(struct rge_softc *);
    108 int		rge_newbuf(struct rge_softc *, int);
    109 void		rge_discard_rxbuf(struct rge_softc *, int);
    110 int		rge_rx_list_init(struct rge_softc *);
    111 void		rge_tx_list_init(struct rge_softc *);
    112 int		rge_rxeof(struct rge_softc *);
    113 int		rge_txeof(struct rge_softc *);
    114 void		rge_reset(struct rge_softc *);
    115 void		rge_iff(struct rge_softc *);
    116 void		rge_set_phy_power(struct rge_softc *, int);
    117 void		rge_phy_config(struct rge_softc *);
    118 void		rge_phy_config_mac_cfg2(struct rge_softc *);
    119 void		rge_phy_config_mac_cfg3(struct rge_softc *);
    120 void		rge_phy_config_mac_cfg4(struct rge_softc *);
    121 void		rge_phy_config_mac_cfg5(struct rge_softc *);
    122 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
    123 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
    124 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
    125 void		rge_hw_init(struct rge_softc *);
    126 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
    127 void		rge_patch_phy_mcu(struct rge_softc *, int);
    128 void		rge_add_media_types(struct rge_softc *);
    129 void		rge_config_imtype(struct rge_softc *, int);
    130 void		rge_disable_hw_im(struct rge_softc *);
    131 void		rge_disable_sim_im(struct rge_softc *);
    132 void		rge_setup_sim_im(struct rge_softc *);
    133 void		rge_setup_intr(struct rge_softc *, int);
    134 void		rge_exit_oob(struct rge_softc *);
    135 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
    136 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
    137 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
    138 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
    139 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
    140 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
    141 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
    142 uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
    143 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
    144 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
    145 int		rge_get_link_status(struct rge_softc *);
    146 void		rge_txstart(struct work *, void *);
    147 void		rge_tick(void *);
    148 void		rge_link_state(struct rge_softc *);
    149 
    150 static const struct {
    151 	uint16_t reg;
    152 	uint16_t val;
    153 }  rtl8125_mac_cfg2_mcu[] = {
    154 	RTL8125_MAC_CFG2_MCU
    155 }, rtl8125_mac_cfg3_mcu[] = {
    156 	RTL8125_MAC_CFG3_MCU
    157 }, rtl8125_mac_cfg4_mcu[] = {
    158 	RTL8125_MAC_CFG4_MCU
    159 }, rtl8125_mac_cfg5_mcu[] = {
    160 	RTL8125_MAC_CFG5_MCU
    161 };
    162 
    163 CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
    164 		NULL, NULL); /* Sevan - detach function? */
    165 
    166 static const struct device_compatible_entry compat_data[] = {
    167 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000) },
    168 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125) },
    169 
    170 	PCI_COMPAT_EOL
    171 };
    172 
    173 static int
    174 rge_match(device_t parent, cfdata_t match, void *aux)
    175 {
    176 	struct pci_attach_args *pa =aux;
    177 
    178 	return pci_compatible_match(pa, compat_data);
    179 }
    180 
    181 void
    182 rge_attach(device_t parent, device_t self, void *aux)
    183 {
    184 	struct rge_softc *sc = device_private(self);
    185 	struct pci_attach_args *pa = aux;
    186 	pci_chipset_tag_t pc = pa->pa_pc;
    187 	pci_intr_handle_t *ihp;
    188 	char intrbuf[PCI_INTRSTR_LEN];
    189 	const char *intrstr = NULL;
    190 	struct ifnet *ifp;
    191 	pcireg_t reg;
    192 	uint32_t hwrev;
    193 	uint8_t eaddr[ETHER_ADDR_LEN];
    194 	int offset;
    195 	pcireg_t command;
    196 
    197 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
    198 
    199 	sc->sc_dev = self;
    200 
    201 	pci_aprint_devinfo(pa, "Ethernet controller");
    202 
    203 	/*
    204 	 * Map control/status registers.
    205 	 */
    206 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
    207 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
    208 	    NULL, &sc->rge_bsize)) {
    209 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
    210 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
    211 		    &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
    212 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
    213 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
    214 			    &sc->rge_bsize)) {
    215 				aprint_error(": can't map mem or i/o space\n");
    216 				return;
    217 			}
    218 		}
    219 	}
    220 
    221 	int counts[PCI_INTR_TYPE_SIZE] = {
    222  		[PCI_INTR_TYPE_INTX] = 1,
    223  		[PCI_INTR_TYPE_MSI] = 1,
    224  		[PCI_INTR_TYPE_MSIX] = 1,
    225  	};
    226 	int max_type = PCI_INTR_TYPE_MSIX;
    227 	/*
    228 	 * Allocate interrupt.
    229 	 */
    230 	if (pci_intr_alloc(pa, &ihp, counts, max_type) != 0) {
    231 		aprint_error(": couldn't map interrupt\n");
    232 		return;
    233 	}
    234 	switch (pci_intr_type(pc, ihp[0])) {
    235 	case PCI_INTR_TYPE_MSIX:
    236 	case PCI_INTR_TYPE_MSI:
    237 		sc->rge_flags |= RGE_FLAG_MSI;
    238 		break;
    239 	default:
    240 		break;
    241 	}
    242 	intrstr = pci_intr_string(pc, ihp[0], intrbuf, sizeof(intrbuf));
    243 	sc->sc_ih = pci_intr_establish_xname(pc, ihp[0], IPL_NET, rge_intr,
    244 	    sc, device_xname(sc->sc_dev));
    245 	if (sc->sc_ih == NULL) {
    246 		aprint_error_dev(sc->sc_dev, ": couldn't establish interrupt");
    247 		if (intrstr != NULL)
    248 			aprint_error(" at %s\n", intrstr);
    249 		aprint_error("\n");
    250 		return;
    251 	}
    252 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
    253 
    254 	if (pci_dma64_available(pa))
    255 		sc->sc_dmat = pa->pa_dmat64;
    256 	else
    257 		sc->sc_dmat = pa->pa_dmat;
    258 
    259 	sc->sc_pc = pa->pa_pc;
    260 	sc->sc_tag = pa->pa_tag;
    261 
    262 	/* Determine hardware revision */
    263 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
    264 	switch (hwrev) {
    265 	case 0x60800000:
    266 		sc->rge_type = MAC_CFG2;
    267 		break;
    268 	case 0x60900000:
    269 		sc->rge_type = MAC_CFG3;
    270 		break;
    271 	case 0x64000000:
    272 		sc->rge_type = MAC_CFG4;
    273 		break;
    274 	case 0x64100000:
    275 		sc->rge_type = MAC_CFG5;
    276 		break;
    277 	default:
    278 		aprint_error(": unknown version 0x%08x\n", hwrev);
    279 		return;
    280 	}
    281 
    282 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
    283 
    284 	/*
    285 	 * PCI Express check.
    286 	 */
    287 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
    288 	    &offset, NULL)) {
    289 		/* Disable PCIe ASPM and ECPM. */
    290 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
    291 		    offset + PCIE_LCSR);
    292 		reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 |
    293 		    PCIE_LCSR_ENCLKPM);
    294 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
    295 		    reg);
    296 	}
    297 
    298 	rge_exit_oob(sc);
    299 	rge_hw_init(sc);
    300 
    301 	rge_get_macaddr(sc, eaddr);
    302 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
    303 	    ether_sprintf(eaddr));
    304 
    305 	memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
    306 
    307 	rge_set_phy_power(sc, 1);
    308 	rge_phy_config(sc);
    309 
    310 	if (rge_allocmem(sc))
    311 		return;
    312 
    313 	ifp = &sc->sc_ec.ec_if;
    314 	ifp->if_softc = sc;
    315 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    316 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    317 #ifdef RGE_MPSAFE
    318 	ifp->if_extflags = IFEF_MPSAFE;
    319 #endif
    320 	ifp->if_ioctl = rge_ioctl;
    321 	ifp->if_stop = rge_stop;
    322 	ifp->if_start = rge_start;
    323 	ifp->if_init = rge_init;
    324 	ifp->if_watchdog = rge_watchdog;
    325 	IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
    326 
    327 #if notyet
    328 	ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx |
    329 	    IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
    330 	    IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
    331 #endif
    332 
    333 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
    334 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
    335 
    336 	callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
    337 	callout_setfunc(&sc->sc_timeout, rge_tick, sc);
    338 
    339 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
    340 	command |= PCI_COMMAND_MASTER_ENABLE;
    341 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
    342 
    343 	/* Initialize ifmedia structures. */
    344 	sc->sc_ec.ec_ifmedia = &sc->sc_media;
    345 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
    346 	    rge_ifmedia_sts);
    347 	rge_add_media_types(sc);
    348 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
    349 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
    350 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
    351 
    352 	if_attach(ifp);
    353 	ether_ifattach(ifp, eaddr);
    354 
    355 	if (pmf_device_register(self, NULL, NULL))
    356 		pmf_class_network_register(self, ifp);
    357 	else
    358 		aprint_error_dev(self, "couldn't establish power handler\n");
    359 }
    360 
    361 int
    362 rge_intr(void *arg)
    363 {
    364 	struct rge_softc *sc = arg;
    365 	struct ifnet *ifp = &sc->sc_ec.ec_if;
    366 	uint32_t status;
    367 	int claimed = 0, rx, tx;
    368 
    369 	if (!(ifp->if_flags & IFF_RUNNING))
    370 		return (0);
    371 
    372 	/* Disable interrupts. */
    373 	RGE_WRITE_4(sc, RGE_IMR, 0);
    374 
    375 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
    376 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
    377 			return (0);
    378 	}
    379 
    380 	status = RGE_READ_4(sc, RGE_ISR);
    381 	if (status)
    382 		RGE_WRITE_4(sc, RGE_ISR, status);
    383 
    384 	if (status & RGE_ISR_PCS_TIMEOUT)
    385 		claimed = 1;
    386 
    387 	rx = tx = 0;
    388 	if (status & sc->rge_intrs) {
    389 		if (status &
    390 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
    391 			rx |= rge_rxeof(sc);
    392 			claimed = 1;
    393 		}
    394 
    395 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
    396 			tx |= rge_txeof(sc);
    397 			claimed = 1;
    398 		}
    399 
    400 		if (status & RGE_ISR_SYSTEM_ERR) {
    401 			KERNEL_LOCK(1, NULL);
    402 			rge_init(ifp);
    403 			KERNEL_UNLOCK_ONE(NULL);
    404 			claimed = 1;
    405 		}
    406 	}
    407 
    408 	if (sc->rge_timerintr) {
    409 		if ((tx | rx) == 0) {
    410 			/*
    411 			 * Nothing needs to be processed, fallback
    412 			 * to use TX/RX interrupts.
    413 			 */
    414 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
    415 
    416 			/*
    417 			 * Recollect, mainly to avoid the possible
    418 			 * race introduced by changing interrupt
    419 			 * masks.
    420 			 */
    421 			rge_rxeof(sc);
    422 			rge_txeof(sc);
    423 		} else
    424 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
    425 	} else if (tx | rx) {
    426 		/*
    427 		 * Assume that using simulated interrupt moderation
    428 		 * (hardware timer based) could reduce the interrupt
    429 		 * rate.
    430 		 */
    431 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
    432 	}
    433 
    434 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
    435 
    436 	return (claimed);
    437 }
    438 
    439 int
    440 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
    441 {
    442 	struct rge_tx_desc *d = NULL;
    443 	struct rge_txq *txq;
    444 	bus_dmamap_t txmap;
    445 	uint32_t cmdsts, cflags = 0;
    446 	int cur, error, i, last, nsegs;
    447 
    448 #if notyet
    449 	/*
    450 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
    451 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
    452 	 * take affect.
    453 	 */
    454 	if ((m->m_pkthdr.csum_flags &
    455 	    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
    456 		cflags |= RGE_TDEXTSTS_IPCSUM;
    457 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
    458 			cflags |= RGE_TDEXTSTS_TCPCSUM;
    459 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
    460 			cflags |= RGE_TDEXTSTS_UDPCSUM;
    461 	}
    462 #endif
    463 
    464 	txq = &sc->rge_ldata.rge_txq[idx];
    465 	txmap = txq->txq_dmamap;
    466 
    467 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
    468 	switch (error) {
    469 	case 0:
    470 		break;
    471 	case EFBIG: /* mbuf chain is too fragmented */
    472 		if (m_defrag(m, M_DONTWAIT) == 0 &&
    473 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
    474 		    BUS_DMA_NOWAIT) == 0)
    475 			break;
    476 
    477 		/* FALLTHROUGH */
    478 	default:
    479 		return (0);
    480 	}
    481 
    482 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
    483 	    BUS_DMASYNC_PREWRITE);
    484 
    485 	nsegs = txmap->dm_nsegs;
    486 
    487 	/* Set up hardware VLAN tagging. */
    488 	if (vlan_has_tag(m))
    489 		cflags |= bswap16(vlan_get_tag(m)) | RGE_TDEXTSTS_VTAG;
    490 
    491 	last = cur = idx;
    492 	cmdsts = RGE_TDCMDSTS_SOF;
    493 
    494 	for (i = 0; i < txmap->dm_nsegs; i++) {
    495 		d = &sc->rge_ldata.rge_tx_list[cur];
    496 
    497 		d->rge_extsts = htole32(cflags);
    498 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
    499 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
    500 
    501 		cmdsts |= txmap->dm_segs[i].ds_len;
    502 
    503 		if (cur == RGE_TX_LIST_CNT - 1)
    504 			cmdsts |= RGE_TDCMDSTS_EOR;
    505 
    506 		d->rge_cmdsts = htole32(cmdsts);
    507 
    508 		last = cur;
    509 		cmdsts = RGE_TDCMDSTS_OWN;
    510 		cur = RGE_NEXT_TX_DESC(cur);
    511 	}
    512 
    513 	/* Set EOF on the last descriptor. */
    514 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
    515 
    516 	/* Transfer ownership of packet to the chip. */
    517 	d = &sc->rge_ldata.rge_tx_list[idx];
    518 
    519 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
    520 
    521 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
    522 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
    523 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    524 
    525 	/* Update info of TX queue and descriptors. */
    526 	txq->txq_mbuf = m;
    527 	txq->txq_descidx = last;
    528 
    529 	return (nsegs);
    530 }
    531 
    532 int
    533 rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    534 {
    535 	struct rge_softc *sc = ifp->if_softc;
    536 	//struct ifreq *ifr = (struct ifreq *)data;
    537 	int s, error = 0;
    538 
    539 	s = splnet();
    540 
    541 	switch (cmd) {
    542 	case SIOCSIFFLAGS:
    543 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
    544 			break;
    545 		/* XXX set an ifflags callback and let ether_ioctl
    546 		 * handle all of this.
    547 		 */
    548 		if (ifp->if_flags & IFF_UP) {
    549 			if (ifp->if_flags & IFF_RUNNING)
    550 				error = ENETRESET;
    551 			else
    552 				rge_init(ifp);
    553 		} else {
    554 			if (ifp->if_flags & IFF_RUNNING)
    555 				rge_stop(ifp, 1);
    556 		}
    557 		break;
    558 	default:
    559 		error = ether_ioctl(ifp, cmd, data);
    560 	}
    561 
    562 	if (error == ENETRESET) {
    563 		if (ifp->if_flags & IFF_RUNNING)
    564 			rge_iff(sc);
    565 		error = 0;
    566 	}
    567 
    568 	splx(s);
    569 	return (error);
    570 }
    571 
    572 void
    573 rge_start(struct ifnet *ifp)
    574 {
    575 	struct rge_softc *sc = ifp->if_softc;
    576 	struct mbuf *m;
    577 	int free, idx, used;
    578 	int queued = 0;
    579 
    580 #define LINK_STATE_IS_UP(_s)    \
    581 	((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
    582 
    583 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
    584 		IFQ_PURGE(&ifp->if_snd);
    585 		return;
    586 	}
    587 
    588 	/* Calculate free space. */
    589 	idx = sc->rge_ldata.rge_txq_prodidx;
    590 	free = sc->rge_ldata.rge_txq_considx;
    591 	if (free <= idx)
    592 		free += RGE_TX_LIST_CNT;
    593 	free -= idx;
    594 
    595 	for (;;) {
    596 		if (RGE_TX_NSEGS >= free + 2) {
    597 			SET(ifp->if_flags, IFF_OACTIVE);
    598 			break;
    599 		}
    600 
    601 		IFQ_DEQUEUE(&ifp->if_snd, m);
    602 		if (m == NULL)
    603 			break;
    604 
    605 		used = rge_encap(sc, m, idx);
    606 		if (used == 0) {
    607 			m_freem(m);
    608 			continue;
    609 		}
    610 
    611 		KASSERT(used <= free);
    612 		free -= used;
    613 
    614 		bpf_mtap(ifp, m, BPF_D_OUT);
    615 
    616 		idx += used;
    617 		if (idx >= RGE_TX_LIST_CNT)
    618 			idx -= RGE_TX_LIST_CNT;
    619 
    620 		queued++;
    621 	}
    622 
    623 	if (queued == 0)
    624 		return;
    625 
    626 	/* Set a timeout in case the chip goes out to lunch. */
    627 	ifp->if_timer = 5;
    628 
    629 	sc->rge_ldata.rge_txq_prodidx = idx;
    630 #if 0
    631 	ifq_serialize(ifq, &sc->sc_task);
    632 #else
    633 	rge_txstart(&sc->sc_task, sc);
    634 #endif
    635 }
    636 
    637 void
    638 rge_watchdog(struct ifnet *ifp)
    639 {
    640 	struct rge_softc *sc = ifp->if_softc;
    641 
    642 	device_printf(sc->sc_dev, "watchdog timeout\n");
    643 	if_statinc(ifp, if_oerrors);
    644 
    645 	rge_init(ifp);
    646 }
    647 
    648 int
    649 rge_init(struct ifnet *ifp)
    650 {
    651 	struct rge_softc *sc = ifp->if_softc;
    652 	uint32_t val;
    653 	int i;
    654 
    655 	rge_stop(ifp, 0);
    656 
    657 	/* Set MAC address. */
    658 	rge_set_macaddr(sc, CLLADDR(ifp->if_sadl));
    659 
    660 	/* Set Maximum frame size. */
    661 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
    662 
    663 	/* Initialize RX descriptors list. */
    664 	if (rge_rx_list_init(sc) == ENOBUFS) {
    665 		device_printf(sc->sc_dev,
    666 		    "init failed: no memory for RX buffers\n");
    667 		rge_stop(ifp, 1);
    668 		return (ENOBUFS);
    669 	}
    670 
    671 	/* Initialize TX descriptors. */
    672 	rge_tx_list_init(sc);
    673 
    674 	/* Load the addresses of the RX and TX lists into the chip. */
    675 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
    676 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    677 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
    678 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    679 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
    680 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    681 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
    682 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    683 
    684 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    685 
    686 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
    687 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    688 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    689 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
    690 
    691 	/* Clear interrupt moderation timer. */
    692 	for (i = 0; i < 64; i++)
    693 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
    694 
    695 	/* Set the initial RX and TX configurations. */
    696 	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
    697 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
    698 
    699 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
    700 	rge_write_csi(sc, 0x70c, val | 0x27000000);
    701 
    702 	/* Enable hardware optimization function. */
    703 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
    704 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
    705 
    706 	RGE_WRITE_2(sc, 0x0382, 0x221b);
    707 	RGE_WRITE_1(sc, 0x4500, 0);
    708 	RGE_WRITE_2(sc, 0x4800, 0);
    709 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
    710 
    711 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
    712 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
    713 
    714 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
    715 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
    716 
    717 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
    718 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
    719 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
    720 
    721 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
    722 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
    723 		rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
    724 	else
    725 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
    726 
    727 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
    728 
    729 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
    730 		val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
    731 		rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
    732 	} else
    733 		RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
    734 
    735 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
    736 
    737 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
    738 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
    739 
    740 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
    741 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
    742 
    743 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
    744 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
    745 
    746 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
    747 
    748 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
    749 
    750 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
    751 
    752 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
    753 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
    754 
    755 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
    756 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
    757 
    758 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
    759 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
    760 
    761 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
    762 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
    763 
    764 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
    765 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
    766 
    767 	val = rge_read_mac_ocp(sc, 0xe84c) & ~0x0040;
    768 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
    769 		rge_write_mac_ocp(sc, 0xe84c, 0x00c0);
    770 	else
    771 		rge_write_mac_ocp(sc, 0xe84c, 0x0080);
    772 
    773 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN);
    774 
    775 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
    776 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
    777 
    778 	/* Disable EEE plus. */
    779 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
    780 
    781 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
    782 
    783 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
    784 	DELAY(1);
    785 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
    786 
    787 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
    788 
    789 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
    790 
    791 	if ((sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0)
    792 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
    793 	else
    794 		RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
    795 
    796 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
    797 
    798 	for (i = 0; i < 10; i++) {
    799 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
    800 			break;
    801 		DELAY(1000);
    802 	}
    803 
    804 	/* Disable RXDV gate. */
    805 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
    806 	DELAY(2000);
    807 
    808 	rge_ifmedia_upd(ifp);
    809 
    810 	/* Enable transmit and receive. */
    811 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
    812 
    813 	/* Program promiscuous mode and multicast filters. */
    814 	rge_iff(sc);
    815 
    816 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    817 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    818 
    819 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    820 
    821 	/* Enable interrupts. */
    822 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
    823 
    824 	ifp->if_flags |= IFF_RUNNING;
    825 	CLR(ifp->if_flags, IFF_OACTIVE);
    826 
    827 	callout_schedule(&sc->sc_timeout, 1);
    828 
    829 	return (0);
    830 }
    831 
    832 /*
    833  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
    834  */
    835 void
    836 rge_stop(struct ifnet *ifp, int disable)
    837 {
    838 	struct rge_softc *sc = ifp->if_softc;
    839 	int i;
    840 
    841 	if (disable) {
    842 		callout_halt(&sc->sc_timeout, NULL);
    843 	} else
    844 		callout_stop(&sc->sc_timeout);
    845 
    846 	ifp->if_timer = 0;
    847 	ifp->if_flags &= ~IFF_RUNNING;
    848 	sc->rge_timerintr = 0;
    849 
    850 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
    851 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
    852 	    RGE_RXCFG_ERRPKT);
    853 
    854 	RGE_WRITE_4(sc, RGE_IMR, 0);
    855 
    856 	/* Clear timer interrupts. */
    857 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
    858 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
    859 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
    860 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
    861 
    862 	rge_reset(sc);
    863 
    864 //	intr_barrier(sc->sc_ih);
    865 //	ifq_barrier(&ifp->if_snd);
    866 /*	ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
    867 
    868 	if (sc->rge_head != NULL) {
    869 		m_freem(sc->rge_head);
    870 		sc->rge_head = sc->rge_tail = NULL;
    871 	}
    872 
    873 	/* Free the TX list buffers. */
    874 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
    875 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
    876 			bus_dmamap_unload(sc->sc_dmat,
    877 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
    878 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
    879 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
    880 		}
    881 	}
    882 
    883 	/* Free the RX list buffers. */
    884 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
    885 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
    886 			bus_dmamap_unload(sc->sc_dmat,
    887 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
    888 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
    889 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
    890 		}
    891 	}
    892 }
    893 
    894 /*
    895  * Set media options.
    896  */
    897 int
    898 rge_ifmedia_upd(struct ifnet *ifp)
    899 {
    900 	struct rge_softc *sc = ifp->if_softc;
    901 	struct ifmedia *ifm = &sc->sc_media;
    902 	int anar, gig, val;
    903 
    904 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    905 		return (EINVAL);
    906 
    907 	/* Disable Gigabit Lite. */
    908 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
    909 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
    910 
    911 	val = rge_read_phy_ocp(sc, 0xa5d4);
    912 	val &= ~RGE_ADV_2500TFDX;
    913 
    914 	anar = gig = 0;
    915 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
    916 	case IFM_AUTO:
    917 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    918 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    919 		val |= RGE_ADV_2500TFDX;
    920 		break;
    921 	case IFM_2500_T:
    922 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    923 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    924 		val |= RGE_ADV_2500TFDX;
    925 		ifp->if_baudrate = IF_Mbps(2500);
    926 		break;
    927 	case IFM_1000_T:
    928 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    929 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    930 		ifp->if_baudrate = IF_Gbps(1);
    931 		break;
    932 	case IFM_100_TX:
    933 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
    934 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
    935 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
    936 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
    937 		    ANAR_TX | ANAR_10_FD | ANAR_10;
    938 		ifp->if_baudrate = IF_Mbps(100);
    939 		break;
    940 	case IFM_10_T:
    941 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
    942 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
    943 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
    944 		    ANAR_10_FD | ANAR_10 : ANAR_10;
    945 		ifp->if_baudrate = IF_Mbps(10);
    946 		break;
    947 	default:
    948 		device_printf(sc->sc_dev,
    949 		    "unsupported media type\n");
    950 		return (EINVAL);
    951 	}
    952 
    953 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
    954 	rge_write_phy(sc, 0, MII_100T2CR, gig);
    955 	rge_write_phy_ocp(sc, 0xa5d4, val);
    956 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
    957 	    BMCR_STARTNEG);
    958 
    959 	return (0);
    960 }
    961 
    962 /*
    963  * Report current media status.
    964  */
    965 void
    966 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
    967 {
    968 	struct rge_softc *sc = ifp->if_softc;
    969 	uint16_t status = 0;
    970 
    971 	ifmr->ifm_status = IFM_AVALID;
    972 	ifmr->ifm_active = IFM_ETHER;
    973 
    974 	if (rge_get_link_status(sc)) {
    975 		ifmr->ifm_status |= IFM_ACTIVE;
    976 
    977 		status = RGE_READ_2(sc, RGE_PHYSTAT);
    978 		if ((status & RGE_PHYSTAT_FDX) ||
    979 		    (status & RGE_PHYSTAT_2500MBPS))
    980 			ifmr->ifm_active |= IFM_FDX;
    981 		else
    982 			ifmr->ifm_active |= IFM_HDX;
    983 
    984 		if (status & RGE_PHYSTAT_10MBPS)
    985 			ifmr->ifm_active |= IFM_10_T;
    986 		else if (status & RGE_PHYSTAT_100MBPS)
    987 			ifmr->ifm_active |= IFM_100_TX;
    988 		else if (status & RGE_PHYSTAT_1000MBPS)
    989 			ifmr->ifm_active |= IFM_1000_T;
    990 		else if (status & RGE_PHYSTAT_2500MBPS)
    991 			ifmr->ifm_active |= IFM_2500_T;
    992 	}
    993 }
    994 
    995 /*
    996  * Allocate memory for RX/TX rings.
    997  */
    998 int
    999 rge_allocmem(struct rge_softc *sc)
   1000 {
   1001 	int error, i;
   1002 
   1003 	/* Allocate DMA'able memory for the TX ring. */
   1004 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
   1005 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
   1006 	if (error) {
   1007 		aprint_error_dev(sc->sc_dev, "can't create TX list map\n");
   1008 		return (error);
   1009 	}
   1010 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
   1011 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
   1012 	    BUS_DMA_NOWAIT);
   1013 	if (error) {
   1014 		aprint_error_dev(sc->sc_dev, "can't alloc TX list\n");
   1015 		return (error);
   1016 	}
   1017 
   1018 	/* Load the map for the TX ring. */
   1019 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1020 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
   1021 	    (void **) &sc->rge_ldata.rge_tx_list,
   1022 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
   1023 	if (error) {
   1024 		aprint_error_dev(sc->sc_dev, "can't map TX dma buffers\n");
   1025 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1026 		    sc->rge_ldata.rge_tx_listnseg);
   1027 		return (error);
   1028 	}
   1029 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
   1030 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1031 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
   1032 	if (error) {
   1033 		aprint_error_dev(sc->sc_dev, "can't load TX dma map\n");
   1034 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
   1035 		bus_dmamem_unmap(sc->sc_dmat,
   1036 		    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
   1037 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1038 		    sc->rge_ldata.rge_tx_listnseg);
   1039 		return (error);
   1040 	}
   1041 
   1042 	/* Create DMA maps for TX buffers. */
   1043 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
   1044 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
   1045 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
   1046 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
   1047 		if (error) {
   1048 			aprint_error_dev(sc->sc_dev, "can't create DMA map for TX\n");
   1049 			return (error);
   1050 		}
   1051 	}
   1052 
   1053 	/* Allocate DMA'able memory for the RX ring. */
   1054 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
   1055 	    RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
   1056 	if (error) {
   1057 		aprint_error_dev(sc->sc_dev, "can't create RX list map\n");
   1058 		return (error);
   1059 	}
   1060 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
   1061 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
   1062 	    BUS_DMA_NOWAIT);
   1063 	if (error) {
   1064 		aprint_error_dev(sc->sc_dev, "can't alloc RX list\n");
   1065 		return (error);
   1066 	}
   1067 
   1068 	/* Load the map for the RX ring. */
   1069 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1070 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
   1071 	    (void **) &sc->rge_ldata.rge_rx_list,
   1072 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
   1073 	if (error) {
   1074 		aprint_error_dev(sc->sc_dev, "can't map RX dma buffers\n");
   1075 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1076 		    sc->rge_ldata.rge_rx_listnseg);
   1077 		return (error);
   1078 	}
   1079 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
   1080 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1081 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
   1082 	if (error) {
   1083 		aprint_error_dev(sc->sc_dev, "can't load RX dma map\n");
   1084 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
   1085 		bus_dmamem_unmap(sc->sc_dmat,
   1086 		    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
   1087 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1088 		    sc->rge_ldata.rge_rx_listnseg);
   1089 		return (error);
   1090 	}
   1091 
   1092 	/* Create DMA maps for RX buffers. */
   1093 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1094 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
   1095 		    RGE_JUMBO_FRAMELEN, 0, 0,
   1096 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
   1097 		if (error) {
   1098 			aprint_error_dev(sc->sc_dev, "can't create DMA map for RX\n");
   1099 			return (error);
   1100 		}
   1101 	}
   1102 
   1103 	return (error);
   1104 }
   1105 
   1106 /*
   1107  * Initialize the RX descriptor and attach an mbuf cluster.
   1108  */
   1109 int
   1110 rge_newbuf(struct rge_softc *sc, int idx)
   1111 {
   1112 	struct mbuf *m;
   1113 	struct rge_rx_desc *r;
   1114 	struct rge_rxq *rxq;
   1115 	bus_dmamap_t rxmap;
   1116 
   1117 	m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
   1118 	if (m == NULL)
   1119 		return (ENOBUFS);
   1120 
   1121 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
   1122 
   1123 	rxq = &sc->rge_ldata.rge_rxq[idx];
   1124 	rxmap = rxq->rxq_dmamap;
   1125 
   1126 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT))
   1127 		goto out;
   1128 
   1129 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
   1130 	    BUS_DMASYNC_PREREAD);
   1131 
   1132 	/* Map the segments into RX descriptors. */
   1133 	r = &sc->rge_ldata.rge_rx_list[idx];
   1134 
   1135 	rxq->rxq_mbuf = m;
   1136 
   1137 	r->hi_qword1.rx_qword4.rge_extsts = 0;
   1138 	r->hi_qword0.rge_addr = htole64(rxmap->dm_segs[0].ds_addr);
   1139 
   1140 	r->hi_qword1.rx_qword4.rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
   1141 	if (idx == RGE_RX_LIST_CNT - 1)
   1142 		r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
   1143 
   1144 	r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
   1145 
   1146 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1147 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1148 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1149 
   1150 	return (0);
   1151 out:
   1152 	if (m != NULL)
   1153 		m_freem(m);
   1154 	return (ENOMEM);
   1155 }
   1156 
   1157 void
   1158 rge_discard_rxbuf(struct rge_softc *sc, int idx)
   1159 {
   1160 	struct rge_rx_desc *r;
   1161 
   1162 	r = &sc->rge_ldata.rge_rx_list[idx];
   1163 
   1164 	r->hi_qword1.rx_qword4.rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
   1165 	r->hi_qword1.rx_qword4.rge_extsts = 0;
   1166 	if (idx == RGE_RX_LIST_CNT - 1)
   1167 		r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
   1168 	r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
   1169 
   1170 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1171 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1172 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1173 }
   1174 
   1175 int
   1176 rge_rx_list_init(struct rge_softc *sc)
   1177 {
   1178 	int i;
   1179 
   1180 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
   1181 
   1182 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1183 		sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
   1184 		if (rge_newbuf(sc, i) == ENOBUFS)
   1185 			return (ENOBUFS);
   1186 	}
   1187 
   1188 	sc->rge_ldata.rge_rxq_prodidx = sc->rge_ldata.rge_rxq_considx = 0;
   1189 	sc->rge_head = sc->rge_tail = NULL;
   1190 
   1191 	return (0);
   1192 }
   1193 
   1194 void
   1195 rge_tx_list_init(struct rge_softc *sc)
   1196 {
   1197 	int i;
   1198 
   1199 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
   1200 
   1201 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
   1202 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
   1203 
   1204 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
   1205 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
   1206 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1207 
   1208 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
   1209 }
   1210 
   1211 int
   1212 rge_rxeof(struct rge_softc *sc)
   1213 {
   1214 	struct mbuf *m;
   1215 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1216 	struct rge_rx_desc *cur_rx;
   1217 	struct rge_rxq *rxq;
   1218 	uint32_t rxstat, extsts;
   1219 	int i, total_len, rx = 0;
   1220 
   1221 	for (i = sc->rge_ldata.rge_rxq_considx; ; i = RGE_NEXT_RX_DESC(i)) {
   1222 		/* Invalidate the descriptor memory. */
   1223 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1224 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1225 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1226 
   1227 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
   1228 
   1229 		if (RGE_OWN(cur_rx))
   1230 			break;
   1231 
   1232 		rxstat = letoh32(cur_rx->hi_qword1.rx_qword4.rge_cmdsts);
   1233 		extsts = letoh32(cur_rx->hi_qword1.rx_qword4.rge_extsts);
   1234 
   1235 		total_len = RGE_RXBYTES(cur_rx);
   1236 		rxq = &sc->rge_ldata.rge_rxq[i];
   1237 		m = rxq->rxq_mbuf;
   1238 		rxq->rxq_mbuf = NULL;
   1239 		rx = 1;
   1240 
   1241 		/* Invalidate the RX mbuf and unload its map. */
   1242 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
   1243 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1244 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
   1245 
   1246 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
   1247 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
   1248 			rge_discard_rxbuf(sc, i);
   1249 			continue;
   1250 		}
   1251 
   1252 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
   1253 			if_statinc(ifp, if_ierrors);
   1254 			/*
   1255 			 * If this is part of a multi-fragment packet,
   1256 			 * discard all the pieces.
   1257 			 */
   1258 			 if (sc->rge_head != NULL) {
   1259 				m_freem(sc->rge_head);
   1260 				sc->rge_head = sc->rge_tail = NULL;
   1261 			}
   1262 			rge_discard_rxbuf(sc, i);
   1263 			continue;
   1264 		}
   1265 
   1266 		/*
   1267 		 * If allocating a replacement mbuf fails,
   1268 		 * reload the current one.
   1269 		 */
   1270 
   1271 		if (rge_newbuf(sc, i) == ENOBUFS) {
   1272 			if (sc->rge_head != NULL) {
   1273 				m_freem(sc->rge_head);
   1274 				sc->rge_head = sc->rge_tail = NULL;
   1275 			}
   1276 			rge_discard_rxbuf(sc, i);
   1277 			continue;
   1278 		}
   1279 
   1280 		m_set_rcvif(m, ifp);
   1281 		if (sc->rge_head != NULL) {
   1282 			m->m_len = total_len;
   1283 			/*
   1284 			 * Special case: if there's 4 bytes or less
   1285 			 * in this buffer, the mbuf can be discarded:
   1286 			 * the last 4 bytes is the CRC, which we don't
   1287 			 * care about anyway.
   1288 			 */
   1289 			if (m->m_len <= ETHER_CRC_LEN) {
   1290 				sc->rge_tail->m_len -=
   1291 				    (ETHER_CRC_LEN - m->m_len);
   1292 				m_freem(m);
   1293 			} else {
   1294 				m->m_len -= ETHER_CRC_LEN;
   1295 				m->m_flags &= ~M_PKTHDR;
   1296 				sc->rge_tail->m_next = m;
   1297 			}
   1298 			m = sc->rge_head;
   1299 			sc->rge_head = sc->rge_tail = NULL;
   1300 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
   1301 		} else
   1302 	#if 0
   1303 			m->m_pkthdr.len = m->m_len =
   1304 			    (total_len - ETHER_CRC_LEN);
   1305 	#else
   1306 		{
   1307 			m->m_pkthdr.len = m->m_len = total_len;
   1308 			m->m_flags |= M_HASFCS;
   1309 		}
   1310 	#endif
   1311 
   1312 #if notyet
   1313 		/* Check IP header checksum. */
   1314 		if (!(extsts & RGE_RDEXTSTS_IPCSUMERR) &&
   1315 		    (extsts & RGE_RDEXTSTS_IPV4))
   1316 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
   1317 
   1318 		/* Check TCP/UDP checksum. */
   1319 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
   1320 		    (((extsts & RGE_RDEXTSTS_TCPPKT) &&
   1321 		    !(extsts & RGE_RDEXTSTS_TCPCSUMERR)) ||
   1322 		    ((extsts & RGE_RDEXTSTS_UDPPKT) &&
   1323 		    !(extsts & RGE_RDEXTSTS_UDPCSUMERR))))
   1324 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
   1325 			    M_UDP_CSUM_IN_OK;
   1326 #endif
   1327 
   1328 		if (extsts & RGE_RDEXTSTS_VTAG) {
   1329 			vlan_set_tag(m,
   1330 			    bswap16(extsts & RGE_RDEXTSTS_VLAN_MASK));
   1331 		}
   1332 
   1333 		if_percpuq_enqueue(ifp->if_percpuq, m);
   1334 	}
   1335 
   1336 	sc->rge_ldata.rge_rxq_considx = i;
   1337 
   1338 	return (rx);
   1339 }
   1340 
   1341 int
   1342 rge_txeof(struct rge_softc *sc)
   1343 {
   1344 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1345 	struct rge_txq *txq;
   1346 	uint32_t txstat;
   1347 	int cons, idx, prod;
   1348 	int free = 0;
   1349 
   1350 	prod = sc->rge_ldata.rge_txq_prodidx;
   1351 	cons = sc->rge_ldata.rge_txq_considx;
   1352 
   1353 	while (prod != cons) {
   1354 		txq = &sc->rge_ldata.rge_txq[cons];
   1355 		idx = txq->txq_descidx;
   1356 
   1357 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1358 		    idx * sizeof(struct rge_tx_desc),
   1359 		    sizeof(struct rge_tx_desc),
   1360 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1361 
   1362 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
   1363 
   1364 		if (txstat & RGE_TDCMDSTS_OWN) {
   1365 			free = 2;
   1366 			break;
   1367 		}
   1368 
   1369 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
   1370 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1371 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
   1372 		m_freem(txq->txq_mbuf);
   1373 		txq->txq_mbuf = NULL;
   1374 
   1375 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
   1376 			if_statinc(ifp, if_collisions);
   1377 		if (txstat & RGE_TDCMDSTS_TXERR)
   1378 			if_statinc(ifp, if_oerrors);
   1379 
   1380 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1381 		    idx * sizeof(struct rge_tx_desc),
   1382 		    sizeof(struct rge_tx_desc),
   1383 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1384 
   1385 		cons = RGE_NEXT_TX_DESC(idx);
   1386 		free = 1;
   1387 	}
   1388 
   1389 	if (free == 0)
   1390 		return (0);
   1391 
   1392 	sc->rge_ldata.rge_txq_considx = cons;
   1393 
   1394 #if 0
   1395 	if (ifq_is_oactive(&ifp->if_snd))
   1396 		ifq_restart(&ifp->if_snd);
   1397 	else if (free == 2)
   1398 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
   1399 	else
   1400 		ifp->if_timer = 0;
   1401 #else
   1402 #if 0
   1403 	if (!IF_IS_EMPTY(&ifp->if_snd))
   1404 		rge_start(ifp);
   1405 	else
   1406 	if (free == 2)
   1407 		if (0) { rge_txstart(&sc->sc_task, sc); }
   1408 	else
   1409 #endif
   1410 		ifp->if_timer = 0;
   1411 #endif
   1412 
   1413 	return (1);
   1414 }
   1415 
   1416 void
   1417 rge_reset(struct rge_softc *sc)
   1418 {
   1419 	int i;
   1420 
   1421 	/* Enable RXDV gate. */
   1422 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
   1423 	DELAY(2000);
   1424 
   1425 	for (i = 0; i < 3000; i++) {
   1426 		DELAY(50);
   1427 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
   1428 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
   1429 		    RGE_MCUCMD_TXFIFO_EMPTY))
   1430 			break;
   1431 	}
   1432 	if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
   1433 		for (i = 0; i < 3000; i++) {
   1434 			DELAY(50);
   1435 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
   1436 				break;
   1437 		}
   1438 	}
   1439 
   1440 	DELAY(2000);
   1441 
   1442 	/* Soft reset. */
   1443 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
   1444 
   1445 	for (i = 0; i < RGE_TIMEOUT; i++) {
   1446 		DELAY(100);
   1447 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
   1448 			break;
   1449 	}
   1450 	if (i == RGE_TIMEOUT)
   1451 		device_printf(sc->sc_dev, "reset never completed!\n");
   1452 }
   1453 
   1454 void
   1455 rge_iff(struct rge_softc *sc)
   1456 {
   1457 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1458 	struct ethercom *ec = &sc->sc_ec;
   1459 	struct ether_multi *enm;
   1460 	struct ether_multistep step;
   1461 	uint32_t hashes[2];
   1462 	uint32_t rxfilt;
   1463 	int h = 0;
   1464 
   1465 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
   1466 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
   1467 	ifp->if_flags &= ~IFF_ALLMULTI;
   1468 
   1469 	/*
   1470 	 * Always accept frames destined to our station address.
   1471 	 * Always accept broadcast frames.
   1472 	 */
   1473 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
   1474 
   1475 	if (ifp->if_flags & IFF_PROMISC) {
   1476  allmulti:
   1477 		ifp->if_flags |= IFF_ALLMULTI;
   1478 		rxfilt |= RGE_RXCFG_MULTI;
   1479 		if (ifp->if_flags & IFF_PROMISC)
   1480 			rxfilt |= RGE_RXCFG_ALLPHYS;
   1481 		hashes[0] = hashes[1] = 0xffffffff;
   1482 	} else {
   1483 		rxfilt |= RGE_RXCFG_MULTI;
   1484 		/* Program new filter. */
   1485 		memset(hashes, 0, sizeof(hashes));
   1486 
   1487 		ETHER_LOCK(ec);
   1488 		ETHER_FIRST_MULTI(step, ec, enm);
   1489 		while (enm != NULL) {
   1490 			if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1491 			    ETHER_ADDR_LEN) != 0) {
   1492 			    	ETHER_UNLOCK(ec);
   1493 				goto allmulti;
   1494 			}
   1495 			h = ether_crc32_be(enm->enm_addrlo,
   1496 			    ETHER_ADDR_LEN) >> 26;
   1497 
   1498 			if (h < 32)
   1499 				hashes[0] |= (1U << h);
   1500 			else
   1501 				hashes[1] |= (1U << (h - 32));
   1502 
   1503 			ETHER_NEXT_MULTI(step, enm);
   1504 		}
   1505 		ETHER_UNLOCK(ec);
   1506 	}
   1507 
   1508 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
   1509 	RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
   1510 	RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
   1511 }
   1512 
   1513 void
   1514 rge_set_phy_power(struct rge_softc *sc, int on)
   1515 {
   1516 	int i;
   1517 
   1518 	if (on) {
   1519 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
   1520 
   1521 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
   1522 
   1523 		for (i = 0; i < RGE_TIMEOUT; i++) {
   1524 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
   1525 				break;
   1526 			DELAY(1000);
   1527 		}
   1528 	} else {
   1529 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
   1530 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
   1531 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
   1532 	}
   1533 }
   1534 
   1535 void
   1536 rge_phy_config(struct rge_softc *sc)
   1537 {
   1538 	/* Read microcode version. */
   1539 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1540 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
   1541 
   1542 	switch (sc->rge_type) {
   1543 	case MAC_CFG2:
   1544 		rge_phy_config_mac_cfg2(sc);
   1545 		break;
   1546 	case MAC_CFG3:
   1547 		rge_phy_config_mac_cfg3(sc);
   1548 		break;
   1549 	case MAC_CFG4:
   1550 		rge_phy_config_mac_cfg4(sc);
   1551 		break;
   1552 	case MAC_CFG5:
   1553 		rge_phy_config_mac_cfg5(sc);
   1554 		break;
   1555 	default:
   1556 		break;	/* Can't happen. */
   1557 	}
   1558 
   1559 	rge_write_phy(sc, 0x0a5b, 0x12,
   1560 	    rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
   1561 
   1562 	/* Disable EEE. */
   1563 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
   1564 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
   1565 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
   1566 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
   1567 	}
   1568 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
   1569 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
   1570 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
   1571 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
   1572 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
   1573 
   1574 	rge_patch_phy_mcu(sc, 1);
   1575 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
   1576 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
   1577 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
   1578 	rge_patch_phy_mcu(sc, 0);
   1579 }
   1580 
   1581 void
   1582 rge_phy_config_mac_cfg2(struct rge_softc *sc)
   1583 {
   1584 	uint16_t val;
   1585 	int i;
   1586 
   1587 	for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
   1588 		rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
   1589 		    rtl8125_mac_cfg2_ephy[i].val);
   1590 
   1591 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
   1592 
   1593 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
   1594 	rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
   1595 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   1596 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   1597 	rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
   1598 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
   1599 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   1600 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
   1601 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
   1602 	rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
   1603 	RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
   1604 	RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
   1605 	RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
   1606 
   1607 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   1608 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1609 	rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
   1610 	rge_write_phy_ocp(sc, 0xa436, 0x80eb);
   1611 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
   1612 	rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
   1613 	rge_write_phy_ocp(sc, 0xa436, 0x80f8);
   1614 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1615 	rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
   1616 	rge_write_phy_ocp(sc, 0xa436, 0x80f1);
   1617 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1618 	rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
   1619 	rge_write_phy_ocp(sc, 0xa436, 0x80fe);
   1620 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1621 	rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
   1622 	rge_write_phy_ocp(sc, 0xa436, 0x8102);
   1623 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1624 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   1625 	rge_write_phy_ocp(sc, 0xa436, 0x8105);
   1626 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1627 	rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
   1628 	rge_write_phy_ocp(sc, 0xa436, 0x8100);
   1629 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1630 	rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
   1631 	rge_write_phy_ocp(sc, 0xa436, 0x8104);
   1632 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1633 	rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
   1634 	rge_write_phy_ocp(sc, 0xa436, 0x8106);
   1635 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1636 	rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
   1637 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
   1638 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1639 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
   1640 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
   1641 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   1642 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
   1643 	RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
   1644 	val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
   1645 	rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
   1646 	rge_write_phy_ocp(sc, 0xa436, 0x819f);
   1647 	rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
   1648 	rge_write_phy_ocp(sc, 0xbc34, 0x5555);
   1649 	val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
   1650 	rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
   1651 	RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
   1652 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1653 }
   1654 
   1655 void
   1656 rge_phy_config_mac_cfg3(struct rge_softc *sc)
   1657 {
   1658 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1659 	uint16_t val;
   1660 	int i;
   1661 	static const uint16_t mac_cfg3_a438_value[] =
   1662 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
   1663 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
   1664 
   1665 	static const uint16_t mac_cfg3_b88e_value[] =
   1666 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
   1667 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
   1668 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
   1669 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
   1670 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
   1671 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
   1672 
   1673 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
   1674 		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
   1675 		    rtl8125_mac_cfg3_ephy[i].val);
   1676 
   1677 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
   1678 	rge_write_ephy(sc, 0x002a, val | 0x3000);
   1679 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
   1680 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
   1681 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
   1682 	rge_write_ephy(sc, 0x0002, 0x6042);
   1683 	rge_write_ephy(sc, 0x0006, 0x0014);
   1684 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
   1685 	rge_write_ephy(sc, 0x006a, val | 0x3000);
   1686 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
   1687 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
   1688 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
   1689 	rge_write_ephy(sc, 0x0042, 0x6042);
   1690 	rge_write_ephy(sc, 0x0046, 0x0014);
   1691 
   1692 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
   1693 
   1694 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   1695 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   1696 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
   1697 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
   1698 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   1699 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
   1700 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
   1701 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
   1702 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
   1703 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
   1704 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
   1705 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
   1706 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
   1707 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
   1708 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
   1709 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
   1710 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
   1711 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
   1712 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
   1713 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
   1714 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
   1715 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
   1716 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1717 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
   1718 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
   1719 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1720 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
   1721 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
   1722 	    32);
   1723 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
   1724 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   1725 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
   1726 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   1727 
   1728 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
   1729 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
   1730 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
   1731 	for (i = 0; i < 26; i++)
   1732 		rge_write_phy_ocp(sc, 0xa438, 0);
   1733 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
   1734 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
   1735 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   1736 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
   1737 
   1738 	rge_patch_phy_mcu(sc, 1);
   1739 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
   1740 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
   1741 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
   1742 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
   1743 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
   1744 	}
   1745 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
   1746 	rge_patch_phy_mcu(sc, 0);
   1747 
   1748 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
   1749 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
   1750 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   1751 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
   1752 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
   1753 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
   1754 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
   1755 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
   1756 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
   1757 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1758 }
   1759 
   1760 void
   1761 rge_phy_config_mac_cfg4(struct rge_softc *sc)
   1762 {
   1763 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1764 	uint16_t val;
   1765 	int i;
   1766 	static const uint16_t mac_cfg4_b87c_value[] =
   1767 	    { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
   1768 	      0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
   1769 	      0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
   1770 	      0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
   1771 	      0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
   1772 	      0x80b0, 0x0f31 };
   1773 
   1774 	for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
   1775 		rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
   1776 		    rtl8125_mac_cfg4_ephy[i].val);
   1777 
   1778 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
   1779 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
   1780 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
   1781 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
   1782 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
   1783 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
   1784 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
   1785 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
   1786 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
   1787 
   1788 	rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
   1789 
   1790 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1791 	RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
   1792 	rge_write_phy_ocp(sc, 0xa436, 0x8fff);
   1793 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1794 	rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
   1795 	for (i = 0; i < 6; i++) {
   1796 		rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
   1797 		if (i < 3)
   1798 			rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
   1799 		else
   1800 			rge_write_phy_ocp(sc, 0xb87e, 0x147d);
   1801 	}
   1802 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
   1803 	rge_write_phy_ocp(sc, 0xb87e, 0x0907);
   1804 	val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
   1805 	rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
   1806 	val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
   1807 	rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
   1808 	rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
   1809 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
   1810 	rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
   1811 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
   1812 	rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
   1813 	rge_write_phy_ocp(sc, 0xb87e, 0x6077);
   1814 	rge_write_phy_ocp(sc, 0xb506, 0x01e7);
   1815 	rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
   1816 	rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
   1817 	rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
   1818 	rge_write_phy_ocp(sc, 0xac3c, 0x9240);
   1819 	rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
   1820 	rge_write_phy_ocp(sc, 0xacc6, 0x0707);
   1821 	rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
   1822 	rge_write_phy_ocp(sc, 0xad08, 0x0007);
   1823 	for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
   1824 		rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
   1825 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
   1826 	}
   1827 	RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
   1828 	RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
   1829 	rge_write_phy_ocp(sc, 0xb87c, 0x8141);
   1830 	rge_write_phy_ocp(sc, 0xb87e, 0x320e);
   1831 	rge_write_phy_ocp(sc, 0xb87c, 0x8153);
   1832 	rge_write_phy_ocp(sc, 0xb87e, 0x720e);
   1833 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
   1834 	rge_write_phy_ocp(sc, 0xb87c, 0x8529);
   1835 	rge_write_phy_ocp(sc, 0xb87e, 0x050e);
   1836 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
   1837 	    32);
   1838 	rge_write_phy_ocp(sc, 0xa436, 0x816c);
   1839 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
   1840 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
   1841 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
   1842 	rge_write_phy_ocp(sc, 0xa436, 0x8174);
   1843 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
   1844 	rge_write_phy_ocp(sc, 0xa436, 0x8178);
   1845 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
   1846 	rge_write_phy_ocp(sc, 0xa436, 0x817c);
   1847 	rge_write_phy_ocp(sc, 0xa438, 0x0719);
   1848 	rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
   1849 	rge_write_phy_ocp(sc, 0xa438, 0x0400);
   1850 	rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
   1851 	rge_write_phy_ocp(sc, 0xa438, 0x0404);
   1852 	rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
   1853 	for (i = 0; i < 6; i++) {
   1854 		rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
   1855 		if (i == 2)
   1856 			rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
   1857 		else
   1858 			rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
   1859 	}
   1860 	rge_write_phy_ocp(sc, 0xb87c, 0x8145);
   1861 	rge_write_phy_ocp(sc, 0xb87e, 0x370e);
   1862 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
   1863 	rge_write_phy_ocp(sc, 0xb87e, 0x770e);
   1864 	rge_write_phy_ocp(sc, 0xb87c, 0x8169);
   1865 	rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
   1866 	rge_write_phy_ocp(sc, 0xb87c, 0x817b);
   1867 	rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
   1868 	rge_write_phy_ocp(sc, 0xa436, 0x8217);
   1869 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1870 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   1871 	rge_write_phy_ocp(sc, 0xa436, 0x821a);
   1872 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1873 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   1874 	rge_write_phy_ocp(sc, 0xa436, 0x80da);
   1875 	rge_write_phy_ocp(sc, 0xa438, 0x0403);
   1876 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
   1877 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1878 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
   1879 	rge_write_phy_ocp(sc, 0xa436, 0x80b3);
   1880 	rge_write_phy_ocp(sc, 0xa438, 0x0384);
   1881 	rge_write_phy_ocp(sc, 0xa436, 0x80b7);
   1882 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
   1883 	rge_write_phy_ocp(sc, 0xa436, 0x80ba);
   1884 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1885 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
   1886 	rge_write_phy_ocp(sc, 0xa436, 0x80b5);
   1887 	rge_write_phy_ocp(sc, 0xa438, 0xf009);
   1888 	rge_write_phy_ocp(sc, 0xa436, 0x80bd);
   1889 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1890 	rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
   1891 	rge_write_phy_ocp(sc, 0xa436, 0x80c7);
   1892 	rge_write_phy_ocp(sc, 0xa438, 0xf083);
   1893 	rge_write_phy_ocp(sc, 0xa436, 0x80dd);
   1894 	rge_write_phy_ocp(sc, 0xa438, 0x03f0);
   1895 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
   1896 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1897 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
   1898 	rge_write_phy_ocp(sc, 0xa436, 0x80cb);
   1899 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
   1900 	rge_write_phy_ocp(sc, 0xa436, 0x80ce);
   1901 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1902 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
   1903 	rge_write_phy_ocp(sc, 0xa436, 0x80c9);
   1904 	rge_write_phy_ocp(sc, 0xa438, 0x8009);
   1905 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
   1906 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1907 	rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
   1908 	rge_write_phy_ocp(sc, 0xa436, 0x80a3);
   1909 	rge_write_phy_ocp(sc, 0xa438, 0x200a);
   1910 	rge_write_phy_ocp(sc, 0xa436, 0x80a5);
   1911 	rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
   1912 	rge_write_phy_ocp(sc, 0xa436, 0x809f);
   1913 	rge_write_phy_ocp(sc, 0xa438, 0x6073);
   1914 	rge_write_phy_ocp(sc, 0xa436, 0x80a1);
   1915 	rge_write_phy_ocp(sc, 0xa438, 0x000b);
   1916 	rge_write_phy_ocp(sc, 0xa436, 0x80a9);
   1917 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1918 	rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
   1919 	rge_patch_phy_mcu(sc, 1);
   1920 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
   1921 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
   1922 	rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
   1923 	rge_write_phy_ocp(sc, 0xb890, 0x0000);
   1924 	rge_write_phy_ocp(sc, 0xb88e, 0xc240);
   1925 	rge_write_phy_ocp(sc, 0xb890, 0x0103);
   1926 	rge_write_phy_ocp(sc, 0xb88e, 0xc242);
   1927 	rge_write_phy_ocp(sc, 0xb890, 0x0507);
   1928 	rge_write_phy_ocp(sc, 0xb88e, 0xc244);
   1929 	rge_write_phy_ocp(sc, 0xb890, 0x090b);
   1930 	rge_write_phy_ocp(sc, 0xb88e, 0xc246);
   1931 	rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
   1932 	rge_write_phy_ocp(sc, 0xb88e, 0xc248);
   1933 	rge_write_phy_ocp(sc, 0xb890, 0x1012);
   1934 	rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
   1935 	rge_write_phy_ocp(sc, 0xb890, 0x1416);
   1936 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
   1937 	rge_patch_phy_mcu(sc, 0);
   1938 	RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
   1939 	RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
   1940 	rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
   1941 	rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
   1942 	rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
   1943 	rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
   1944 	rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
   1945 	RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
   1946 	rge_write_phy_ocp(sc, 0xa436, 0x817d);
   1947 	RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
   1948 }
   1949 
   1950 void
   1951 rge_phy_config_mac_cfg5(struct rge_softc *sc)
   1952 {
   1953 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1954 	uint16_t val;
   1955 	int i;
   1956 
   1957 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
   1958 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
   1959 		    rtl8125_mac_cfg5_ephy[i].val);
   1960 
   1961 	val = rge_read_ephy(sc, 0x0022) & ~0x0030;
   1962 	rge_write_ephy(sc, 0x0022, val | 0x0020);
   1963 	val = rge_read_ephy(sc, 0x0062) & ~0x0030;
   1964 	rge_write_ephy(sc, 0x0062, val | 0x0020);
   1965 
   1966 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
   1967 
   1968 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1969 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
   1970 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
   1971 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
   1972 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
   1973 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
   1974 	    32);
   1975 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
   1976 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
   1977 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
   1978 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
   1979 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
   1980 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1981 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
   1982 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
   1983 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
   1984 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
   1985 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
   1986 	for (i = 0; i < 10; i++) {
   1987 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
   1988 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
   1989 	}
   1990 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
   1991 	val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
   1992 	rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
   1993 }
   1994 
   1995 void
   1996 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
   1997 {
   1998 	if (sc->rge_mcodever != mcode_version) {
   1999 		int i;
   2000 
   2001 		rge_patch_phy_mcu(sc, 1);
   2002 
   2003 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
   2004 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   2005 			if (sc->rge_type == MAC_CFG2)
   2006 				rge_write_phy_ocp(sc, 0xa438, 0x8600);
   2007 			else
   2008 				rge_write_phy_ocp(sc, 0xa438, 0x8601);
   2009 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
   2010 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
   2011 
   2012 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
   2013 		}
   2014 
   2015 		if (sc->rge_type == MAC_CFG2) {
   2016 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
   2017 				rge_write_phy_ocp(sc,
   2018 				    rtl8125_mac_cfg2_mcu[i].reg,
   2019 				    rtl8125_mac_cfg2_mcu[i].val);
   2020 			}
   2021 		} else if (sc->rge_type == MAC_CFG3) {
   2022 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
   2023 				rge_write_phy_ocp(sc,
   2024 				    rtl8125_mac_cfg3_mcu[i].reg,
   2025 				    rtl8125_mac_cfg3_mcu[i].val);
   2026 			}
   2027 		} else if (sc->rge_type == MAC_CFG4) {
   2028 			for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
   2029 				rge_write_phy_ocp(sc,
   2030 				    rtl8125_mac_cfg4_mcu[i].reg,
   2031 				    rtl8125_mac_cfg4_mcu[i].val);
   2032 			}
   2033 		} else if (sc->rge_type == MAC_CFG5) {
   2034 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
   2035 				rge_write_phy_ocp(sc,
   2036 				    rtl8125_mac_cfg5_mcu[i].reg,
   2037 				    rtl8125_mac_cfg5_mcu[i].val);
   2038 			}
   2039 		}
   2040 
   2041 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
   2042 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
   2043 
   2044 			rge_write_phy_ocp(sc, 0xa436, 0);
   2045 			rge_write_phy_ocp(sc, 0xa438, 0);
   2046 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
   2047 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   2048 			rge_write_phy_ocp(sc, 0xa438, 0);
   2049 		}
   2050 
   2051 		rge_patch_phy_mcu(sc, 0);
   2052 
   2053 		/* Write microcode version. */
   2054 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
   2055 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
   2056 	}
   2057 }
   2058 
   2059 void
   2060 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
   2061 {
   2062 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2063 	RGE_WRITE_4(sc, RGE_MAC0,
   2064 	    (uint32_t)addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
   2065 	RGE_WRITE_4(sc, RGE_MAC4,
   2066 	    addr[5] <<  8 | addr[4]);
   2067 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2068 }
   2069 
   2070 void
   2071 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
   2072 {
   2073 	int i;
   2074 
   2075 	for (i = 0; i < ETHER_ADDR_LEN; i++)
   2076 		addr[i] = RGE_READ_1(sc, RGE_ADDR0 + i);
   2077 }
   2078 
   2079 void
   2080 rge_hw_init(struct rge_softc *sc)
   2081 {
   2082 	int i;
   2083 
   2084 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2085 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
   2086 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
   2087 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2088 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
   2089 
   2090 	/* Disable UPS. */
   2091 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
   2092 
   2093 	/* Configure MAC MCU. */
   2094 	rge_write_mac_ocp(sc, 0xfc38, 0);
   2095 
   2096 	for (i = 0xfc28; i < 0xfc38; i += 2)
   2097 		rge_write_mac_ocp(sc, i, 0);
   2098 
   2099 	DELAY(3000);
   2100 	rge_write_mac_ocp(sc, 0xfc26, 0);
   2101 
   2102 	if (sc->rge_type == MAC_CFG3) {
   2103 		for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
   2104 			rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
   2105 			    rtl8125_mac_bps[i].val);
   2106 		}
   2107 	} else if (sc->rge_type == MAC_CFG5) {
   2108 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
   2109 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
   2110 			    rtl8125b_mac_bps[i].val);
   2111 		}
   2112 	}
   2113 
   2114 	/* Disable PHY power saving. */
   2115 	rge_disable_phy_ocp_pwrsave(sc);
   2116 
   2117 	/* Set PCIe uncorrectable error status. */
   2118 	rge_write_csi(sc, 0x108,
   2119 	    rge_read_csi(sc, 0x108) | 0x00100000);
   2120 }
   2121 
   2122 void
   2123 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
   2124 {
   2125 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
   2126 		rge_patch_phy_mcu(sc, 1);
   2127 		rge_write_phy_ocp(sc, 0xc416, 0);
   2128 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
   2129 		rge_patch_phy_mcu(sc, 0);
   2130 	}
   2131 }
   2132 
   2133 void
   2134 rge_patch_phy_mcu(struct rge_softc *sc, int set)
   2135 {
   2136 	int i;
   2137 
   2138 	if (set)
   2139 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
   2140 	else
   2141 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
   2142 
   2143 	for (i = 0; i < 1000; i++) {
   2144 		if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
   2145 			break;
   2146 		DELAY(100);
   2147 	}
   2148 	if (i == 1000) {
   2149 		DPRINTF(("timeout waiting to patch phy mcu\n"));
   2150 		return;
   2151 	}
   2152 }
   2153 
   2154 void
   2155 rge_add_media_types(struct rge_softc *sc)
   2156 {
   2157 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
   2158 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
   2159 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
   2160 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
   2161 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
   2162 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
   2163 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
   2164 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
   2165 }
   2166 
   2167 void
   2168 rge_config_imtype(struct rge_softc *sc, int imtype)
   2169 {
   2170 	switch (imtype) {
   2171 	case RGE_IMTYPE_NONE:
   2172 		sc->rge_intrs = RGE_INTRS;
   2173 		sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
   2174 		    RGE_ISR_RX_FIFO_OFLOW;
   2175 		sc->rge_tx_ack = RGE_ISR_TX_OK;
   2176 		break;
   2177 	case RGE_IMTYPE_SIM:
   2178 		sc->rge_intrs = RGE_INTRS_TIMER;
   2179 		sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
   2180 		sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
   2181 		break;
   2182 	default:
   2183 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
   2184 	}
   2185 }
   2186 
   2187 void
   2188 rge_disable_hw_im(struct rge_softc *sc)
   2189 {
   2190 	RGE_WRITE_2(sc, RGE_IM, 0);
   2191 }
   2192 
   2193 void
   2194 rge_disable_sim_im(struct rge_softc *sc)
   2195 {
   2196 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
   2197 	sc->rge_timerintr = 0;
   2198 }
   2199 
   2200 void
   2201 rge_setup_sim_im(struct rge_softc *sc)
   2202 {
   2203 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
   2204 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
   2205 	sc->rge_timerintr = 1;
   2206 }
   2207 
   2208 void
   2209 rge_setup_intr(struct rge_softc *sc, int imtype)
   2210 {
   2211 	rge_config_imtype(sc, imtype);
   2212 
   2213 	/* Enable interrupts. */
   2214 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
   2215 
   2216 	switch (imtype) {
   2217 	case RGE_IMTYPE_NONE:
   2218 		rge_disable_sim_im(sc);
   2219 		rge_disable_hw_im(sc);
   2220 		break;
   2221 	case RGE_IMTYPE_SIM:
   2222 		rge_disable_hw_im(sc);
   2223 		rge_setup_sim_im(sc);
   2224 		break;
   2225 	default:
   2226 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
   2227 	}
   2228 }
   2229 
   2230 void
   2231 rge_exit_oob(struct rge_softc *sc)
   2232 {
   2233 	int i;
   2234 
   2235 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
   2236 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
   2237 	    RGE_RXCFG_ERRPKT);
   2238 
   2239 	/* Disable RealWoW. */
   2240 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
   2241 
   2242 	rge_reset(sc);
   2243 
   2244 	/* Disable OOB. */
   2245 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
   2246 
   2247 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
   2248 
   2249 	for (i = 0; i < 10; i++) {
   2250 		DELAY(100);
   2251 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   2252 			break;
   2253 	}
   2254 
   2255 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
   2256 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
   2257 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
   2258 
   2259 	for (i = 0; i < 10; i++) {
   2260 		DELAY(100);
   2261 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   2262 			break;
   2263 	}
   2264 
   2265 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
   2266 		printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
   2267 		    device_xname(sc->sc_dev));
   2268 		for (i = 0; i < RGE_TIMEOUT; i++) {
   2269 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
   2270 				break;
   2271 			DELAY(1000);
   2272 		}
   2273 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
   2274 		if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
   2275 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
   2276 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
   2277 	}
   2278 }
   2279 
   2280 void
   2281 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
   2282 {
   2283 	int i;
   2284 
   2285 	RGE_WRITE_4(sc, RGE_CSIDR, val);
   2286 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
   2287 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
   2288 
   2289 	for (i = 0; i < 10; i++) {
   2290 		 DELAY(100);
   2291 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
   2292 			break;
   2293 	}
   2294 
   2295 	DELAY(20);
   2296 }
   2297 
   2298 uint32_t
   2299 rge_read_csi(struct rge_softc *sc, uint32_t reg)
   2300 {
   2301 	int i;
   2302 
   2303 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
   2304 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
   2305 
   2306 	for (i = 0; i < 10; i++) {
   2307 		 DELAY(100);
   2308 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
   2309 			break;
   2310 	}
   2311 
   2312 	DELAY(20);
   2313 
   2314 	return (RGE_READ_4(sc, RGE_CSIDR));
   2315 }
   2316 
   2317 void
   2318 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   2319 {
   2320 	uint32_t tmp;
   2321 
   2322 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   2323 	tmp += val;
   2324 	tmp |= RGE_MACOCP_BUSY;
   2325 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
   2326 }
   2327 
   2328 uint16_t
   2329 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
   2330 {
   2331 	uint32_t val;
   2332 
   2333 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   2334 	RGE_WRITE_4(sc, RGE_MACOCP, val);
   2335 
   2336 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
   2337 }
   2338 
   2339 void
   2340 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
   2341 {
   2342 	uint32_t tmp;
   2343 	int i;
   2344 
   2345 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
   2346 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
   2347 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
   2348 
   2349 	for (i = 0; i < 10; i++) {
   2350 		DELAY(100);
   2351 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
   2352 			break;
   2353 	}
   2354 
   2355 	DELAY(20);
   2356 }
   2357 
   2358 uint16_t
   2359 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
   2360 {
   2361 	uint32_t val;
   2362 	int i;
   2363 
   2364 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
   2365 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
   2366 
   2367 	for (i = 0; i < 10; i++) {
   2368 		DELAY(100);
   2369 		val = RGE_READ_4(sc, RGE_EPHYAR);
   2370 		if (val & RGE_EPHYAR_BUSY)
   2371 			break;
   2372 	}
   2373 
   2374 	DELAY(20);
   2375 
   2376 	return (val & RGE_EPHYAR_DATA_MASK);
   2377 }
   2378 
   2379 void
   2380 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
   2381 {
   2382 	uint16_t off, phyaddr;
   2383 
   2384 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
   2385 	phyaddr <<= 4;
   2386 
   2387 	off = addr ? reg : 0x10 + (reg % 8);
   2388 
   2389 	phyaddr += (off - 16) << 1;
   2390 
   2391 	rge_write_phy_ocp(sc, phyaddr, val);
   2392 }
   2393 
   2394 uint16_t
   2395 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
   2396 {
   2397 	uint16_t off, phyaddr;
   2398 
   2399 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
   2400 	phyaddr <<= 4;
   2401 
   2402 	off = addr ? reg : 0x10 + (reg % 8);
   2403 
   2404 	phyaddr += (off - 16) << 1;
   2405 
   2406 	return (rge_read_phy_ocp(sc, phyaddr));
   2407 }
   2408 
   2409 void
   2410 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   2411 {
   2412 	uint32_t tmp;
   2413 	int i;
   2414 
   2415 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   2416 	tmp |= RGE_PHYOCP_BUSY | val;
   2417 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
   2418 
   2419 	for (i = 0; i < RGE_TIMEOUT; i++) {
   2420 		DELAY(1);
   2421 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
   2422 			break;
   2423 	}
   2424 }
   2425 
   2426 uint16_t
   2427 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
   2428 {
   2429 	uint32_t val;
   2430 	int i;
   2431 
   2432 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   2433 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
   2434 
   2435 	for (i = 0; i < RGE_TIMEOUT; i++) {
   2436 		DELAY(1);
   2437 		val = RGE_READ_4(sc, RGE_PHYOCP);
   2438 		if (val & RGE_PHYOCP_BUSY)
   2439 			break;
   2440 	}
   2441 
   2442 	return (val & RGE_PHYOCP_DATA_MASK);
   2443 }
   2444 
   2445 int
   2446 rge_get_link_status(struct rge_softc *sc)
   2447 {
   2448 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
   2449 }
   2450 
   2451 void
   2452 rge_txstart(struct work *wk, void *arg)
   2453 {
   2454 	struct rge_softc *sc = arg;
   2455 
   2456 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
   2457 }
   2458 
   2459 void
   2460 rge_tick(void *arg)
   2461 {
   2462 	struct rge_softc *sc = arg;
   2463 	int s;
   2464 
   2465 	s = splnet();
   2466 	rge_link_state(sc);
   2467 	splx(s);
   2468 
   2469 	callout_schedule(&sc->sc_timeout, hz);
   2470 }
   2471 
   2472 void
   2473 rge_link_state(struct rge_softc *sc)
   2474 {
   2475 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2476 	int link = LINK_STATE_DOWN;
   2477 
   2478 	if (rge_get_link_status(sc))
   2479 		link = LINK_STATE_UP;
   2480 
   2481 	if (ifp->if_link_state != link) { /* XXX not safe to access */
   2482 		if_link_state_change(ifp, link);
   2483 	}
   2484 }
   2485