Home | History | Annotate | Line # | Download | only in pci
      1 /*	$NetBSD: if_rge.c,v 1.34 2025/02/04 23:55:23 jmcneill Exp $	*/
      2 /*	$OpenBSD: if_rge.c,v 1.9 2020/12/12 11:48:53 jan Exp $	*/
      3 
      4 /*
      5  * Copyright (c) 2019, 2020 Kevin Lo <kevlo (at) openbsd.org>
      6  *
      7  * Permission to use, copy, modify, and distribute this software for any
      8  * purpose with or without fee is hereby granted, provided that the above
      9  * copyright notice and this permission notice appear in all copies.
     10  *
     11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     18  */
     19 
     20 #include <sys/cdefs.h>
     21 __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.34 2025/02/04 23:55:23 jmcneill Exp $");
     22 
     23 #if defined(_KERNEL_OPT)
     24 #include "opt_net_mpsafe.h"
     25 #endif
     26 
     27 #include <sys/types.h>
     28 
     29 #include <sys/param.h>
     30 #include <sys/systm.h>
     31 #include <sys/sockio.h>
     32 #include <sys/mbuf.h>
     33 #include <sys/kernel.h>
     34 #include <sys/socket.h>
     35 #include <sys/device.h>
     36 #include <sys/endian.h>
     37 #include <sys/callout.h>
     38 #include <sys/workqueue.h>
     39 
     40 #include <net/if.h>
     41 
     42 #include <net/if_dl.h>
     43 #include <net/if_ether.h>
     44 
     45 #include <net/if_media.h>
     46 
     47 #include <netinet/in.h>
     48 #include <net/if_ether.h>
     49 
     50 #include <net/bpf.h>
     51 
     52 #include <sys/bus.h>
     53 #include <machine/intr.h>
     54 
     55 #include <dev/mii/mii.h>
     56 
     57 #include <dev/pci/pcivar.h>
     58 #include <dev/pci/pcireg.h>
     59 #include <dev/pci/pcidevs.h>
     60 
     61 #include <dev/pci/if_rgereg.h>
     62 
     63 #ifdef __NetBSD__
     64 #define letoh32 	htole32
     65 #define nitems(x) 	__arraycount(x)
     66 
     67 static struct mbuf *
     68 MCLGETL(struct rge_softc *sc __unused, int how,
     69     u_int size)
     70 {
     71 	struct mbuf *m;
     72 
     73 	MGETHDR(m, how, MT_DATA);
     74 	if (m == NULL)
     75 		return NULL;
     76 
     77 	MEXTMALLOC(m, size, how);
     78 	if ((m->m_flags & M_EXT) == 0) {
     79 		m_freem(m);
     80 		return NULL;
     81 	}
     82 	return m;
     83 }
     84 
     85 #ifdef NET_MPSAFE
     86 #define 	RGE_MPSAFE	1
     87 #define 	CALLOUT_FLAGS	CALLOUT_MPSAFE
     88 #else
     89 #define 	CALLOUT_FLAGS	0
     90 #endif
     91 #endif
     92 
     93 #ifdef RGE_DEBUG
     94 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
     95 int rge_debug = 0;
     96 #else
     97 #define DPRINTF(x)
     98 #endif
     99 
    100 static int		rge_match(device_t, cfdata_t, void *);
    101 static void		rge_attach(device_t, device_t, void *);
    102 int		rge_intr(void *);
    103 int		rge_encap(struct rge_softc *, struct mbuf *, int);
    104 int		rge_ioctl(struct ifnet *, u_long, void *);
    105 void		rge_start(struct ifnet *);
    106 void		rge_watchdog(struct ifnet *);
    107 int		rge_init(struct ifnet *);
    108 void		rge_stop(struct ifnet *, int);
    109 int		rge_ifmedia_upd(struct ifnet *);
    110 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
    111 int		rge_allocmem(struct rge_softc *);
    112 int		rge_newbuf(struct rge_softc *, int);
    113 static int	rge_rx_list_init(struct rge_softc *);
    114 static void	rge_rx_list_fini(struct rge_softc *);
    115 static void	rge_tx_list_init(struct rge_softc *);
    116 static void	rge_tx_list_fini(struct rge_softc *);
    117 int		rge_rxeof(struct rge_softc *);
    118 int		rge_txeof(struct rge_softc *);
    119 void		rge_reset(struct rge_softc *);
    120 void		rge_iff(struct rge_softc *);
    121 void		rge_set_phy_power(struct rge_softc *, int);
    122 void		rge_phy_config(struct rge_softc *);
    123 void		rge_phy_config_mac_cfg2_8126(struct rge_softc *);
    124 void		rge_phy_config_mac_cfg2(struct rge_softc *);
    125 void		rge_phy_config_mac_cfg3(struct rge_softc *);
    126 void		rge_phy_config_mac_cfg4(struct rge_softc *);
    127 void		rge_phy_config_mac_cfg5(struct rge_softc *);
    128 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
    129 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
    130 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
    131 void		rge_hw_init(struct rge_softc *);
    132 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
    133 void		rge_patch_phy_mcu(struct rge_softc *, int);
    134 void		rge_add_media_types(struct rge_softc *);
    135 void		rge_config_imtype(struct rge_softc *, int);
    136 void		rge_disable_hw_im(struct rge_softc *);
    137 void		rge_disable_sim_im(struct rge_softc *);
    138 void		rge_setup_sim_im(struct rge_softc *);
    139 void		rge_setup_intr(struct rge_softc *, int);
    140 void		rge_exit_oob(struct rge_softc *);
    141 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
    142 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
    143 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
    144 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
    145 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
    146 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
    147 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
    148 uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
    149 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
    150 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
    151 int		rge_get_link_status(struct rge_softc *);
    152 void		rge_txstart(void *);
    153 void		rge_tick(void *);
    154 void		rge_link_state(struct rge_softc *);
    155 
    156 static const struct {
    157 	uint16_t reg;
    158 	uint16_t val;
    159 }  rtl8125_mac_cfg2_mcu[] = {
    160 	RTL8125_MAC_CFG2_MCU
    161 }, rtl8125_mac_cfg3_mcu[] = {
    162 	RTL8125_MAC_CFG3_MCU
    163 }, rtl8125_mac_cfg4_mcu[] = {
    164 	RTL8125_MAC_CFG4_MCU
    165 }, rtl8125_mac_cfg5_mcu[] = {
    166 	RTL8125_MAC_CFG5_MCU
    167 }, rtl8126_mac_cfg2_mcu[] = {
    168 	RTL8126_MAC_CFG2_MCU
    169 };
    170 
    171 CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
    172 		NULL, NULL); /* Sevan - detach function? */
    173 
    174 static const struct device_compatible_entry compat_data[] = {
    175 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000) },
    176 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125) },
    177 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8126) },
    178 
    179 	PCI_COMPAT_EOL
    180 };
    181 
    182 static int
    183 rge_match(device_t parent, cfdata_t match, void *aux)
    184 {
    185 	struct pci_attach_args *pa =aux;
    186 
    187 	return pci_compatible_match(pa, compat_data);
    188 }
    189 
    190 void
    191 rge_attach(device_t parent, device_t self, void *aux)
    192 {
    193 	struct rge_softc *sc = device_private(self);
    194 	struct pci_attach_args *pa = aux;
    195 	pci_chipset_tag_t pc = pa->pa_pc;
    196 	pci_intr_handle_t *ihp;
    197 	char intrbuf[PCI_INTRSTR_LEN];
    198 	const char *intrstr = NULL;
    199 	struct ifnet *ifp;
    200 	pcireg_t reg;
    201 	uint32_t hwrev;
    202 	uint8_t eaddr[ETHER_ADDR_LEN];
    203 	int offset;
    204 	pcireg_t command;
    205 	const char *revstr;
    206 
    207 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
    208 
    209 	sc->sc_dev = self;
    210 
    211 	pci_aprint_devinfo(pa, "Ethernet controller");
    212 
    213 	/*
    214 	 * Map control/status registers.
    215 	 */
    216 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
    217 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
    218 	    NULL, &sc->rge_bsize)) {
    219 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
    220 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
    221 		    &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
    222 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
    223 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
    224 			    &sc->rge_bsize)) {
    225 				aprint_error(": can't map mem or i/o space\n");
    226 				return;
    227 			}
    228 		}
    229 	}
    230 
    231 	int counts[PCI_INTR_TYPE_SIZE] = {
    232  		[PCI_INTR_TYPE_INTX] = 1,
    233  		[PCI_INTR_TYPE_MSI] = 1,
    234  		[PCI_INTR_TYPE_MSIX] = 1,
    235  	};
    236 	int max_type = PCI_INTR_TYPE_MSIX;
    237 	/*
    238 	 * Allocate interrupt.
    239 	 */
    240 	if (pci_intr_alloc(pa, &ihp, counts, max_type) != 0) {
    241 		aprint_error(": couldn't map interrupt\n");
    242 		return;
    243 	}
    244 	switch (pci_intr_type(pc, ihp[0])) {
    245 	case PCI_INTR_TYPE_MSIX:
    246 	case PCI_INTR_TYPE_MSI:
    247 		sc->rge_flags |= RGE_FLAG_MSI;
    248 		break;
    249 	default:
    250 		break;
    251 	}
    252 	intrstr = pci_intr_string(pc, ihp[0], intrbuf, sizeof(intrbuf));
    253 	sc->sc_ih = pci_intr_establish_xname(pc, ihp[0], IPL_NET, rge_intr,
    254 	    sc, device_xname(sc->sc_dev));
    255 	if (sc->sc_ih == NULL) {
    256 		aprint_error_dev(sc->sc_dev, ": couldn't establish interrupt");
    257 		if (intrstr != NULL)
    258 			aprint_error(" at %s\n", intrstr);
    259 		aprint_error("\n");
    260 		return;
    261 	}
    262 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
    263 
    264 	if (pci_dma64_available(pa))
    265 		sc->sc_dmat = pa->pa_dmat64;
    266 	else
    267 		sc->sc_dmat = pa->pa_dmat;
    268 
    269 	sc->sc_pc = pa->pa_pc;
    270 	sc->sc_tag = pa->pa_tag;
    271 
    272 	/* Determine hardware revision */
    273 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
    274 	switch (hwrev) {
    275 	case 0x60800000:
    276 		sc->rge_type = MAC_CFG2;
    277 		revstr = "Z1";
    278 		break;
    279 	case 0x60900000:
    280 		sc->rge_type = MAC_CFG3;
    281 		revstr = "Z2";
    282 		break;
    283 	case 0x64000000:
    284 		sc->rge_type = MAC_CFG4;
    285 		revstr = "A";
    286 		break;
    287 	case 0x64100000:
    288 		sc->rge_type = MAC_CFG5;
    289 		revstr = "B";
    290 		break;
    291 	case 0x64900000:
    292 		sc->rge_type = MAC_CFG2_8126;
    293 		revstr = "A";
    294 		break;
    295 	default:
    296 		aprint_error(": unknown version 0x%08x\n", hwrev);
    297 		return;
    298 	}
    299 
    300 	aprint_normal_dev(sc->sc_dev, "HW rev. %s\n", revstr);
    301 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
    302 
    303 	/*
    304 	 * PCI Express check.
    305 	 */
    306 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
    307 	    &offset, NULL)) {
    308 		/* Disable PCIe ASPM and ECPM. */
    309 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
    310 		    offset + PCIE_LCSR);
    311 		reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 |
    312 		    PCIE_LCSR_ENCLKPM);
    313 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
    314 		    reg);
    315 	}
    316 
    317 	rge_exit_oob(sc);
    318 	rge_hw_init(sc);
    319 
    320 	rge_get_macaddr(sc, eaddr);
    321 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
    322 	    ether_sprintf(eaddr));
    323 
    324 	memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
    325 
    326 	rge_set_phy_power(sc, 1);
    327 	rge_phy_config(sc);
    328 
    329 	if (rge_allocmem(sc))
    330 		return;
    331 
    332 	ifp = &sc->sc_ec.ec_if;
    333 	ifp->if_softc = sc;
    334 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    335 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    336 #ifdef RGE_MPSAFE
    337 	ifp->if_extflags = IFEF_MPSAFE;
    338 #endif
    339 	ifp->if_ioctl = rge_ioctl;
    340 	ifp->if_stop = rge_stop;
    341 	ifp->if_start = rge_start;
    342 	ifp->if_init = rge_init;
    343 	ifp->if_watchdog = rge_watchdog;
    344 	IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
    345 
    346 #if notyet
    347 	ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx |
    348 	    IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
    349 	    IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
    350 #endif
    351 
    352 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
    353 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
    354 
    355 	callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
    356 	callout_setfunc(&sc->sc_timeout, rge_tick, sc);
    357 
    358 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
    359 	command |= PCI_COMMAND_MASTER_ENABLE;
    360 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
    361 
    362 	/* Initialize ifmedia structures. */
    363 	sc->sc_ec.ec_ifmedia = &sc->sc_media;
    364 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
    365 	    rge_ifmedia_sts);
    366 	rge_add_media_types(sc);
    367 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
    368 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
    369 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
    370 
    371 	if_attach(ifp);
    372 	if_deferred_start_init(ifp, NULL);
    373 	ether_ifattach(ifp, eaddr);
    374 
    375 	if (pmf_device_register(self, NULL, NULL))
    376 		pmf_class_network_register(self, ifp);
    377 	else
    378 		aprint_error_dev(self, "couldn't establish power handler\n");
    379 }
    380 
    381 int
    382 rge_intr(void *arg)
    383 {
    384 	struct rge_softc *sc = arg;
    385 	struct ifnet *ifp = &sc->sc_ec.ec_if;
    386 	uint32_t status;
    387 	int claimed = 0, rx, tx;
    388 
    389 	if (!(ifp->if_flags & IFF_RUNNING))
    390 		return (0);
    391 
    392 	/* Disable interrupts. */
    393 	RGE_WRITE_4(sc, RGE_IMR, 0);
    394 
    395 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
    396 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
    397 			return (0);
    398 	}
    399 
    400 	status = RGE_READ_4(sc, RGE_ISR);
    401 	if (status)
    402 		RGE_WRITE_4(sc, RGE_ISR, status);
    403 
    404 	if (status & RGE_ISR_PCS_TIMEOUT)
    405 		claimed = 1;
    406 
    407 	rx = tx = 0;
    408 	if (status & sc->rge_intrs) {
    409 		if (status &
    410 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
    411 			rx |= rge_rxeof(sc);
    412 			claimed = 1;
    413 		}
    414 
    415 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
    416 			tx |= rge_txeof(sc);
    417 			claimed = 1;
    418 		}
    419 
    420 		if (status & RGE_ISR_SYSTEM_ERR) {
    421 			KERNEL_LOCK(1, NULL);
    422 			rge_init(ifp);
    423 			KERNEL_UNLOCK_ONE(NULL);
    424 			claimed = 1;
    425 		}
    426 	}
    427 
    428 	if (sc->rge_timerintr) {
    429 		if ((tx | rx) == 0) {
    430 			/*
    431 			 * Nothing needs to be processed, fallback
    432 			 * to use TX/RX interrupts.
    433 			 */
    434 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
    435 
    436 			/*
    437 			 * Recollect, mainly to avoid the possible
    438 			 * race introduced by changing interrupt
    439 			 * masks.
    440 			 */
    441 			rge_rxeof(sc);
    442 			rge_txeof(sc);
    443 		} else
    444 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
    445 	} else if (tx | rx) {
    446 		/*
    447 		 * Assume that using simulated interrupt moderation
    448 		 * (hardware timer based) could reduce the interrupt
    449 		 * rate.
    450 		 */
    451 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
    452 	}
    453 
    454 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
    455 
    456 	return (claimed);
    457 }
    458 
    459 int
    460 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
    461 {
    462 	struct rge_tx_desc *d = NULL;
    463 	struct rge_txq *txq;
    464 	bus_dmamap_t txmap;
    465 	uint32_t cmdsts, cflags = 0;
    466 	int cur, error, i, last, nsegs;
    467 
    468 #if notyet
    469 	/*
    470 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
    471 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
    472 	 * take affect.
    473 	 */
    474 	if ((m->m_pkthdr.csum_flags &
    475 	    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
    476 		cflags |= RGE_TDEXTSTS_IPCSUM;
    477 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
    478 			cflags |= RGE_TDEXTSTS_TCPCSUM;
    479 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
    480 			cflags |= RGE_TDEXTSTS_UDPCSUM;
    481 	}
    482 #endif
    483 
    484 	txq = &sc->rge_ldata.rge_txq[idx];
    485 	txmap = txq->txq_dmamap;
    486 
    487 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
    488 	switch (error) {
    489 	case 0:
    490 		break;
    491 	case EFBIG: /* mbuf chain is too fragmented */
    492 		if (m_defrag(m, M_DONTWAIT) == 0 &&
    493 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
    494 		    BUS_DMA_NOWAIT) == 0)
    495 			break;
    496 
    497 		/* FALLTHROUGH */
    498 	default:
    499 		return (0);
    500 	}
    501 
    502 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
    503 	    BUS_DMASYNC_PREWRITE);
    504 
    505 	nsegs = txmap->dm_nsegs;
    506 
    507 	/* Set up hardware VLAN tagging. */
    508 	if (vlan_has_tag(m))
    509 		cflags |= bswap16(vlan_get_tag(m)) | RGE_TDEXTSTS_VTAG;
    510 
    511 	last = cur = idx;
    512 	cmdsts = RGE_TDCMDSTS_SOF;
    513 
    514 	for (i = 0; i < txmap->dm_nsegs; i++) {
    515 		d = &sc->rge_ldata.rge_tx_list[cur];
    516 
    517 		d->rge_extsts = htole32(cflags);
    518 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
    519 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
    520 
    521 		cmdsts |= txmap->dm_segs[i].ds_len;
    522 
    523 		if (cur == RGE_TX_LIST_CNT - 1)
    524 			cmdsts |= RGE_TDCMDSTS_EOR;
    525 
    526 		d->rge_cmdsts = htole32(cmdsts);
    527 
    528 		last = cur;
    529 		cmdsts = RGE_TDCMDSTS_OWN;
    530 		cur = RGE_NEXT_TX_DESC(cur);
    531 	}
    532 
    533 	/* Set EOF on the last descriptor. */
    534 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
    535 
    536 	/* Transfer ownership of packet to the chip. */
    537 	d = &sc->rge_ldata.rge_tx_list[idx];
    538 
    539 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
    540 
    541 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
    542 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
    543 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    544 
    545 	/* Update info of TX queue and descriptors. */
    546 	txq->txq_mbuf = m;
    547 	txq->txq_descidx = last;
    548 
    549 	return (nsegs);
    550 }
    551 
    552 int
    553 rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    554 {
    555 	struct rge_softc *sc = ifp->if_softc;
    556 	//struct ifreq *ifr = (struct ifreq *)data;
    557 	int s, error = 0;
    558 
    559 	s = splnet();
    560 
    561 	switch (cmd) {
    562 	case SIOCSIFFLAGS:
    563 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
    564 			break;
    565 		/* XXX set an ifflags callback and let ether_ioctl
    566 		 * handle all of this.
    567 		 */
    568 		if (ifp->if_flags & IFF_UP) {
    569 			if (ifp->if_flags & IFF_RUNNING)
    570 				error = ENETRESET;
    571 			else
    572 				rge_init(ifp);
    573 		} else {
    574 			if (ifp->if_flags & IFF_RUNNING)
    575 				rge_stop(ifp, 1);
    576 		}
    577 		break;
    578 	default:
    579 		error = ether_ioctl(ifp, cmd, data);
    580 	}
    581 
    582 	if (error == ENETRESET) {
    583 		if (ifp->if_flags & IFF_RUNNING)
    584 			rge_iff(sc);
    585 		error = 0;
    586 	}
    587 
    588 	splx(s);
    589 	return (error);
    590 }
    591 
    592 void
    593 rge_start(struct ifnet *ifp)
    594 {
    595 	struct rge_softc *sc = ifp->if_softc;
    596 	struct mbuf *m;
    597 	int free, idx, used;
    598 	int queued = 0;
    599 
    600 #define LINK_STATE_IS_UP(_s)    \
    601 	((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
    602 
    603 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
    604 		IFQ_PURGE(&ifp->if_snd);
    605 		return;
    606 	}
    607 
    608 	/* Calculate free space. */
    609 	idx = sc->rge_ldata.rge_txq_prodidx;
    610 	free = sc->rge_ldata.rge_txq_considx;
    611 	if (free <= idx)
    612 		free += RGE_TX_LIST_CNT;
    613 	free -= idx;
    614 
    615 	for (;;) {
    616 		if (RGE_TX_NSEGS >= free + 2) {
    617 			SET(ifp->if_flags, IFF_OACTIVE);
    618 			break;
    619 		}
    620 
    621 		IFQ_DEQUEUE(&ifp->if_snd, m);
    622 		if (m == NULL)
    623 			break;
    624 
    625 		used = rge_encap(sc, m, idx);
    626 		if (used == 0) {
    627 			m_freem(m);
    628 			continue;
    629 		}
    630 
    631 		KASSERT(used <= free);
    632 		free -= used;
    633 
    634 		bpf_mtap(ifp, m, BPF_D_OUT);
    635 
    636 		idx += used;
    637 		if (idx >= RGE_TX_LIST_CNT)
    638 			idx -= RGE_TX_LIST_CNT;
    639 
    640 		queued++;
    641 	}
    642 
    643 	if (queued == 0)
    644 		return;
    645 
    646 	/* Set a timeout in case the chip goes out to lunch. */
    647 	ifp->if_timer = 5;
    648 
    649 	sc->rge_ldata.rge_txq_prodidx = idx;
    650 	rge_txstart(sc);
    651 }
    652 
    653 void
    654 rge_watchdog(struct ifnet *ifp)
    655 {
    656 	struct rge_softc *sc = ifp->if_softc;
    657 
    658 	device_printf(sc->sc_dev, "watchdog timeout\n");
    659 	if_statinc(ifp, if_oerrors);
    660 
    661 	rge_init(ifp);
    662 }
    663 
    664 int
    665 rge_init(struct ifnet *ifp)
    666 {
    667 	struct rge_softc *sc = ifp->if_softc;
    668 	uint32_t val;
    669 	unsigned i;
    670 
    671 	rge_stop(ifp, 0);
    672 
    673 	/* Set MAC address. */
    674 	rge_set_macaddr(sc, CLLADDR(ifp->if_sadl));
    675 
    676 	/* Set Maximum frame size. */
    677 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
    678 
    679 	/* Initialize RX descriptors list. */
    680 	int error = rge_rx_list_init(sc);
    681 	if (error != 0) {
    682 		device_printf(sc->sc_dev,
    683 		    "init failed: no memory for RX buffers\n");
    684 		rge_stop(ifp, 1);
    685 		return error;
    686 	}
    687 
    688 	/* Initialize TX descriptors. */
    689 	rge_tx_list_init(sc);
    690 
    691 	/* Load the addresses of the RX and TX lists into the chip. */
    692 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
    693 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    694 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
    695 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    696 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
    697 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    698 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
    699 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    700 
    701 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    702 
    703 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
    704 	if (sc->rge_type == MAC_CFG2_8126)
    705 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, 0x08);
    706 	else
    707 		RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    708 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    709 	if (sc->rge_type != MAC_CFG2_8126)
    710 		RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
    711 
    712 	/* Clear interrupt moderation timer. */
    713 	for (i = 0; i < 64; i++)
    714 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
    715 
    716 	/* Set the initial RX and TX configurations. */
    717 	if (sc->rge_type == MAC_CFG2_8126)
    718 		RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG_8126);
    719 	else
    720 		RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
    721 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
    722 
    723 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
    724 	rge_write_csi(sc, 0x70c, val | 0x27000000);
    725 
    726 	/* Enable hardware optimization function. */
    727 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
    728 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
    729 
    730 	if (sc->rge_type == MAC_CFG2_8126) {
    731 		/* Disable L1 timeout. */
    732 		val = rge_read_csi(sc, 0x890) & ~0x00000001;
    733 		rge_write_csi(sc, 0x890, val);
    734 	} else
    735 		RGE_WRITE_2(sc, 0x0382, 0x221b);
    736 	RGE_WRITE_1(sc, 0x4500, 0);
    737 	RGE_WRITE_2(sc, 0x4800, 0);
    738 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
    739 
    740 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
    741 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
    742 
    743 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
    744 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
    745 
    746 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
    747 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
    748 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
    749 
    750 	if (sc->rge_type == MAC_CFG2_8126)
    751 		RGE_CLRBIT_1(sc, 0xd8, 0x02);
    752 
    753 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
    754 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3 ||
    755 	    sc->rge_type == MAC_CFG2_8126)
    756 		rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
    757 	else
    758 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
    759 
    760 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
    761 
    762 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3 ||
    763 	    sc->rge_type == MAC_CFG2_8126) {
    764 		val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
    765 		rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
    766 	} else
    767 		RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
    768 
    769 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
    770 
    771 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
    772 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
    773 
    774 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
    775 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
    776 
    777 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
    778 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
    779 
    780 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
    781 
    782 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
    783 
    784 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
    785 
    786 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
    787 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
    788 
    789 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
    790 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
    791 
    792 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
    793 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
    794 
    795 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
    796 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
    797 
    798 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
    799 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
    800 
    801 	val = rge_read_mac_ocp(sc, 0xe84c) & ~0x0040;
    802 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
    803 		rge_write_mac_ocp(sc, 0xe84c, 0x00c0);
    804 	else
    805 		rge_write_mac_ocp(sc, 0xe84c, 0x0080);
    806 
    807 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN);
    808 
    809 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
    810 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
    811 
    812 	/* Disable EEE plus. */
    813 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
    814 
    815 	if (sc->rge_type == MAC_CFG2_8126)
    816 		RGE_MAC_CLRBIT(sc, 0xea1c, 0x0304);
    817 	else
    818 		RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
    819 
    820 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
    821 	DELAY(1);
    822 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
    823 
    824 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
    825 
    826 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
    827 
    828 	if ((sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0)
    829 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
    830 	else
    831 		RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
    832 
    833 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
    834 
    835 	for (i = 0; i < 10; i++) {
    836 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
    837 			break;
    838 		DELAY(1000);
    839 	}
    840 
    841 	/* Disable RXDV gate. */
    842 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
    843 	DELAY(2000);
    844 
    845 	rge_ifmedia_upd(ifp);
    846 
    847 	/* Enable transmit and receive. */
    848 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
    849 
    850 	/* Program promiscuous mode and multicast filters. */
    851 	rge_iff(sc);
    852 
    853 	if (sc->rge_type == MAC_CFG2_8126)
    854 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, 0x08);
    855 	else
    856 		RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    857 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    858 
    859 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    860 
    861 	/* Enable interrupts. */
    862 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
    863 
    864 	ifp->if_flags |= IFF_RUNNING;
    865 	CLR(ifp->if_flags, IFF_OACTIVE);
    866 
    867 	callout_schedule(&sc->sc_timeout, 1);
    868 
    869 	return (0);
    870 }
    871 
    872 /*
    873  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
    874  */
    875 void
    876 rge_stop(struct ifnet *ifp, int disable)
    877 {
    878 	struct rge_softc *sc = ifp->if_softc;
    879 
    880 	callout_halt(&sc->sc_timeout, NULL);
    881 
    882 	ifp->if_timer = 0;
    883 	ifp->if_flags &= ~IFF_RUNNING;
    884 	sc->rge_timerintr = 0;
    885 
    886 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
    887 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
    888 	    RGE_RXCFG_ERRPKT);
    889 
    890 	RGE_WRITE_4(sc, RGE_IMR, 0);
    891 
    892 	/* Config interrupt type for RTL8126. */
    893 	if (sc->rge_type == MAC_CFG2_8126)
    894 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, RGE_INT_CFG0_EN);
    895 
    896 	/* Clear timer interrupts. */
    897 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
    898 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
    899 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
    900 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
    901 
    902 	rge_reset(sc);
    903 
    904 //	intr_barrier(sc->sc_ih);
    905 //	ifq_barrier(&ifp->if_snd);
    906 /*	ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
    907 
    908 	if (sc->rge_head != NULL) {
    909 		m_freem(sc->rge_head);
    910 		sc->rge_head = sc->rge_tail = NULL;
    911 	}
    912 
    913 	rge_tx_list_fini(sc);
    914 	rge_rx_list_fini(sc);
    915 }
    916 
    917 /*
    918  * Set media options.
    919  */
    920 int
    921 rge_ifmedia_upd(struct ifnet *ifp)
    922 {
    923 	struct rge_softc *sc = ifp->if_softc;
    924 	struct ifmedia *ifm = &sc->sc_media;
    925 	int anar, gig, val;
    926 
    927 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    928 		return (EINVAL);
    929 
    930 	/* Disable Gigabit Lite. */
    931 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
    932 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
    933 	if (sc->rge_type == MAC_CFG2_8126)
    934 		RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0002);
    935 
    936 	val = rge_read_phy_ocp(sc, 0xa5d4);
    937 	val &= ~RGE_ADV_2500TFDX;
    938 	if (sc->rge_type == MAC_CFG2_8126)
    939 		val &= ~RGE_ADV_5000TFDX;
    940 
    941 	anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    942 	gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    943 
    944 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
    945 	case IFM_AUTO:
    946 		val |= (sc->rge_type != MAC_CFG2_8126) ?
    947 		    RGE_ADV_2500TFDX : (RGE_ADV_2500TFDX | RGE_ADV_5000TFDX);
    948 		break;
    949 	case IFM_5000_T:
    950 		val |= RGE_ADV_5000TFDX;
    951 		ifp->if_baudrate = IF_Gbps(5);
    952 		break;
    953 	case IFM_2500_T:
    954 		val |= RGE_ADV_2500TFDX;
    955 		ifp->if_baudrate = IF_Mbps(2500);
    956 		break;
    957 	case IFM_1000_T:
    958 		ifp->if_baudrate = IF_Gbps(1);
    959 		break;
    960 	case IFM_100_TX:
    961 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
    962 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
    963 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
    964 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
    965 		    ANAR_TX | ANAR_10_FD | ANAR_10;
    966 		ifp->if_baudrate = IF_Mbps(100);
    967 		break;
    968 	case IFM_10_T:
    969 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
    970 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
    971 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
    972 		    ANAR_10_FD | ANAR_10 : ANAR_10;
    973 		ifp->if_baudrate = IF_Mbps(10);
    974 		break;
    975 	default:
    976 		device_printf(sc->sc_dev,
    977 		    "unsupported media type\n");
    978 		return (EINVAL);
    979 	}
    980 
    981 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
    982 	rge_write_phy(sc, 0, MII_100T2CR, gig);
    983 	rge_write_phy_ocp(sc, 0xa5d4, val);
    984 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
    985 	    BMCR_STARTNEG);
    986 
    987 	return (0);
    988 }
    989 
    990 /*
    991  * Report current media status.
    992  */
    993 void
    994 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
    995 {
    996 	struct rge_softc *sc = ifp->if_softc;
    997 	uint16_t status = 0;
    998 
    999 	ifmr->ifm_status = IFM_AVALID;
   1000 	ifmr->ifm_active = IFM_ETHER;
   1001 
   1002 	if (rge_get_link_status(sc)) {
   1003 		ifmr->ifm_status |= IFM_ACTIVE;
   1004 
   1005 		status = RGE_READ_2(sc, RGE_PHYSTAT);
   1006 		if ((status & RGE_PHYSTAT_FDX) ||
   1007 		    (status & (RGE_PHYSTAT_2500MBPS | RGE_PHYSTAT_5000MBPS)))
   1008 			ifmr->ifm_active |= IFM_FDX;
   1009 		else
   1010 			ifmr->ifm_active |= IFM_HDX;
   1011 
   1012 		if (status & RGE_PHYSTAT_10MBPS)
   1013 			ifmr->ifm_active |= IFM_10_T;
   1014 		else if (status & RGE_PHYSTAT_100MBPS)
   1015 			ifmr->ifm_active |= IFM_100_TX;
   1016 		else if (status & RGE_PHYSTAT_1000MBPS)
   1017 			ifmr->ifm_active |= IFM_1000_T;
   1018 		else if (status & RGE_PHYSTAT_2500MBPS)
   1019 			ifmr->ifm_active |= IFM_2500_T;
   1020 		else if (status & RGE_PHYSTAT_5000MBPS)
   1021 			ifmr->ifm_active |= IFM_5000_T;
   1022 	}
   1023 }
   1024 
   1025 /*
   1026  * Allocate memory for RX/TX rings.
   1027  *
   1028  * XXX There is no tear-down for this if it any part fails, so everything
   1029  * remains allocated.
   1030  */
   1031 int
   1032 rge_allocmem(struct rge_softc *sc)
   1033 {
   1034 	int error, i;
   1035 
   1036 	/* Allocate DMA'able memory for the TX ring. */
   1037 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
   1038 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
   1039 	if (error) {
   1040 		aprint_error_dev(sc->sc_dev, "can't create TX list map\n");
   1041 		return (error);
   1042 	}
   1043 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
   1044 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
   1045 	    BUS_DMA_NOWAIT);
   1046 	if (error) {
   1047 		aprint_error_dev(sc->sc_dev, "can't alloc TX list\n");
   1048 		return (error);
   1049 	}
   1050 
   1051 	/* Load the map for the TX ring. */
   1052 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1053 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
   1054 	    (void **) &sc->rge_ldata.rge_tx_list,
   1055 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
   1056 	if (error) {
   1057 		aprint_error_dev(sc->sc_dev, "can't map TX dma buffers\n");
   1058 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1059 		    sc->rge_ldata.rge_tx_listnseg);
   1060 		return (error);
   1061 	}
   1062 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
   1063 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1064 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
   1065 	if (error) {
   1066 		aprint_error_dev(sc->sc_dev, "can't load TX dma map\n");
   1067 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
   1068 		bus_dmamem_unmap(sc->sc_dmat,
   1069 		    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
   1070 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1071 		    sc->rge_ldata.rge_tx_listnseg);
   1072 		return (error);
   1073 	}
   1074 
   1075 	/* Create DMA maps for TX buffers. */
   1076 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
   1077 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
   1078 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
   1079 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
   1080 		if (error) {
   1081 			aprint_error_dev(sc->sc_dev, "can't create DMA map for TX\n");
   1082 			return (error);
   1083 		}
   1084 	}
   1085 
   1086 	/* Allocate DMA'able memory for the RX ring. */
   1087 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
   1088 	    RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
   1089 	if (error) {
   1090 		aprint_error_dev(sc->sc_dev, "can't create RX list map\n");
   1091 		return (error);
   1092 	}
   1093 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
   1094 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
   1095 	    BUS_DMA_NOWAIT);
   1096 	if (error) {
   1097 		aprint_error_dev(sc->sc_dev, "can't alloc RX list\n");
   1098 		return (error);
   1099 	}
   1100 
   1101 	/* Load the map for the RX ring. */
   1102 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1103 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
   1104 	    (void **) &sc->rge_ldata.rge_rx_list,
   1105 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
   1106 	if (error) {
   1107 		aprint_error_dev(sc->sc_dev, "can't map RX dma buffers\n");
   1108 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1109 		    sc->rge_ldata.rge_rx_listnseg);
   1110 		return (error);
   1111 	}
   1112 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
   1113 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1114 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
   1115 	if (error) {
   1116 		aprint_error_dev(sc->sc_dev, "can't load RX dma map\n");
   1117 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
   1118 		bus_dmamem_unmap(sc->sc_dmat,
   1119 		    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
   1120 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1121 		    sc->rge_ldata.rge_rx_listnseg);
   1122 		return (error);
   1123 	}
   1124 
   1125 	/*
   1126 	 * Create DMA maps for RX buffers.  Use BUS_DMA_ALLOCNOW to avoid any
   1127 	 * potential failure in bus_dmamap_load_mbuf() in the RX path.
   1128 	 */
   1129 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1130 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
   1131 		    RGE_JUMBO_FRAMELEN, 0, BUS_DMA_ALLOCNOW,
   1132 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
   1133 		if (error) {
   1134 			aprint_error_dev(sc->sc_dev, "can't create DMA map for RX\n");
   1135 			return (error);
   1136 		}
   1137 	}
   1138 
   1139 	return (error);
   1140 }
   1141 
   1142 /*
   1143  * Set an RX descriptor and sync it.
   1144  */
   1145 static void
   1146 rge_load_rxbuf(struct rge_softc *sc, int idx)
   1147 {
   1148 	struct rge_rx_desc *r = &sc->rge_ldata.rge_rx_list[idx];
   1149 	struct rge_rxq *rxq = &sc->rge_ldata.rge_rxq[idx];
   1150 	bus_dmamap_t rxmap = rxq->rxq_dmamap;
   1151 	uint32_t cmdsts;
   1152 
   1153 	cmdsts = rxmap->dm_segs[0].ds_len | RGE_RDCMDSTS_OWN;
   1154 	if (idx == RGE_RX_LIST_CNT - 1)
   1155 		cmdsts |= RGE_RDCMDSTS_EOR;
   1156 
   1157 	r->hi_qword0.rge_addr = htole64(rxmap->dm_segs[0].ds_addr);
   1158 	r->hi_qword1.rx_qword4.rge_extsts = 0;
   1159 	r->hi_qword1.rx_qword4.rge_cmdsts = htole32(cmdsts);
   1160 
   1161 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1162 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1163 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1164 }
   1165 
   1166 /*
   1167  * Initialize the RX descriptor and attach an mbuf cluster.
   1168  */
   1169 int
   1170 rge_newbuf(struct rge_softc *sc, int idx)
   1171 {
   1172 	struct mbuf *m;
   1173 	struct rge_rxq *rxq;
   1174 	bus_dmamap_t rxmap;
   1175 	int error __diagused;
   1176 
   1177 	m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
   1178 	if (m == NULL)
   1179 		return (ENOBUFS);
   1180 	MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
   1181 
   1182 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
   1183 
   1184 	rxq = &sc->rge_ldata.rge_rxq[idx];
   1185 	rxmap = rxq->rxq_dmamap;
   1186 
   1187 	if (rxq->rxq_mbuf != NULL)
   1188 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
   1189 
   1190 	/* This map was created with BUS_DMA_ALLOCNOW so should never fail. */
   1191 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT);
   1192 	KASSERTMSG(error == 0, "error=%d", error);
   1193 
   1194 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
   1195 	    BUS_DMASYNC_PREREAD);
   1196 
   1197 	/* Map the segments into RX descriptors. */
   1198 
   1199 	rxq->rxq_mbuf = m;
   1200 	rge_load_rxbuf(sc, idx);
   1201 
   1202 	return 0;
   1203 }
   1204 
   1205 static int
   1206 rge_rx_list_init(struct rge_softc *sc)
   1207 {
   1208 	unsigned i;
   1209 
   1210 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
   1211 
   1212 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1213 		sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
   1214 		if (rge_newbuf(sc, i) != 0) {
   1215 			rge_rx_list_fini(sc);
   1216 			return (ENOBUFS);
   1217 		}
   1218 	}
   1219 
   1220 	sc->rge_ldata.rge_rxq_prodidx = sc->rge_ldata.rge_rxq_considx = 0;
   1221 	sc->rge_head = sc->rge_tail = NULL;
   1222 
   1223 	return (0);
   1224 }
   1225 
   1226 static void
   1227 rge_rx_list_fini(struct rge_softc *sc)
   1228 {
   1229 	unsigned i;
   1230 
   1231 	/* Free the RX list buffers. */
   1232 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1233 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
   1234 			bus_dmamap_unload(sc->sc_dmat,
   1235 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
   1236 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
   1237 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
   1238 		}
   1239 	}
   1240 }
   1241 
   1242 static void
   1243 rge_tx_list_init(struct rge_softc *sc)
   1244 {
   1245 	unsigned i;
   1246 
   1247 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
   1248 
   1249 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
   1250 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
   1251 
   1252 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
   1253 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
   1254 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1255 
   1256 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
   1257 }
   1258 
   1259 static void
   1260 rge_tx_list_fini(struct rge_softc *sc)
   1261 {
   1262 	unsigned i;
   1263 
   1264 	/* Free the TX list buffers. */
   1265 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
   1266 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
   1267 			bus_dmamap_unload(sc->sc_dmat,
   1268 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
   1269 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
   1270 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
   1271 		}
   1272 	}
   1273 }
   1274 
   1275 int
   1276 rge_rxeof(struct rge_softc *sc)
   1277 {
   1278 	struct mbuf *m;
   1279 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1280 	struct rge_rx_desc *cur_rx;
   1281 	struct rge_rxq *rxq;
   1282 	uint32_t rxstat, extsts;
   1283 	int i, total_len, rx = 0;
   1284 
   1285 	for (i = sc->rge_ldata.rge_rxq_considx; ; i = RGE_NEXT_RX_DESC(i)) {
   1286 		/* Invalidate the descriptor memory. */
   1287 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1288 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1289 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1290 
   1291 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
   1292 
   1293 		if (RGE_OWN(cur_rx))
   1294 			break;
   1295 
   1296 		rxstat = letoh32(cur_rx->hi_qword1.rx_qword4.rge_cmdsts);
   1297 		extsts = letoh32(cur_rx->hi_qword1.rx_qword4.rge_extsts);
   1298 
   1299 		total_len = RGE_RXBYTES(cur_rx);
   1300 		rxq = &sc->rge_ldata.rge_rxq[i];
   1301 		m = rxq->rxq_mbuf;
   1302 		rx = 1;
   1303 
   1304 		/* Invalidate the RX mbuf. */
   1305 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
   1306 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1307 
   1308 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
   1309 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
   1310 			if_statinc(ifp, if_ierrors);
   1311 			rge_load_rxbuf(sc, i);
   1312 			continue;
   1313 		}
   1314 
   1315 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
   1316 			if_statinc(ifp, if_ierrors);
   1317 			/*
   1318 			 * If this is part of a multi-fragment packet,
   1319 			 * discard all the pieces.
   1320 			 */
   1321 			if (sc->rge_head != NULL) {
   1322 				m_freem(sc->rge_head);
   1323 				sc->rge_head = sc->rge_tail = NULL;
   1324 			}
   1325 			rge_load_rxbuf(sc, i);
   1326 			continue;
   1327 		}
   1328 
   1329 		/*
   1330 		 * If allocating a replacement mbuf fails,
   1331 		 * reload the current one.
   1332 		 */
   1333 		if (rge_newbuf(sc, i) != 0) {
   1334 			if_statinc(ifp, if_iqdrops);
   1335 			if (sc->rge_head != NULL) {
   1336 				m_freem(sc->rge_head);
   1337 				sc->rge_head = sc->rge_tail = NULL;
   1338 			}
   1339 			rge_load_rxbuf(sc, i);
   1340 			continue;
   1341 		}
   1342 
   1343 		m_set_rcvif(m, ifp);
   1344 		if (sc->rge_head != NULL) {
   1345 			m->m_len = total_len;
   1346 			/*
   1347 			 * Special case: if there's 4 bytes or less
   1348 			 * in this buffer, the mbuf can be discarded:
   1349 			 * the last 4 bytes is the CRC, which we don't
   1350 			 * care about anyway.
   1351 			 */
   1352 			if (m->m_len <= ETHER_CRC_LEN) {
   1353 				sc->rge_tail->m_len -=
   1354 				    (ETHER_CRC_LEN - m->m_len);
   1355 				m_freem(m);
   1356 			} else {
   1357 				m->m_len -= ETHER_CRC_LEN;
   1358 				m->m_flags &= ~M_PKTHDR;
   1359 				sc->rge_tail->m_next = m;
   1360 			}
   1361 			m = sc->rge_head;
   1362 			sc->rge_head = sc->rge_tail = NULL;
   1363 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
   1364 		} else
   1365 	#if 0
   1366 			m->m_pkthdr.len = m->m_len =
   1367 			    (total_len - ETHER_CRC_LEN);
   1368 	#else
   1369 		{
   1370 			m->m_pkthdr.len = m->m_len = total_len;
   1371 			m->m_flags |= M_HASFCS;
   1372 		}
   1373 	#endif
   1374 
   1375 #if notyet
   1376 		/* Check IP header checksum. */
   1377 		if (!(extsts & RGE_RDEXTSTS_IPCSUMERR) &&
   1378 		    (extsts & RGE_RDEXTSTS_IPV4))
   1379 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
   1380 
   1381 		/* Check TCP/UDP checksum. */
   1382 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
   1383 		    (((extsts & RGE_RDEXTSTS_TCPPKT) &&
   1384 		    !(extsts & RGE_RDEXTSTS_TCPCSUMERR)) ||
   1385 		    ((extsts & RGE_RDEXTSTS_UDPPKT) &&
   1386 		    !(extsts & RGE_RDEXTSTS_UDPCSUMERR))))
   1387 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
   1388 			    M_UDP_CSUM_IN_OK;
   1389 #endif
   1390 
   1391 		if (extsts & RGE_RDEXTSTS_VTAG) {
   1392 			vlan_set_tag(m,
   1393 			    bswap16(extsts & RGE_RDEXTSTS_VLAN_MASK));
   1394 		}
   1395 
   1396 		if_percpuq_enqueue(ifp->if_percpuq, m);
   1397 	}
   1398 
   1399 	sc->rge_ldata.rge_rxq_considx = i;
   1400 
   1401 	return (rx);
   1402 }
   1403 
   1404 int
   1405 rge_txeof(struct rge_softc *sc)
   1406 {
   1407 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1408 	struct rge_txq *txq;
   1409 	uint32_t txstat;
   1410 	int cons, idx, prod;
   1411 	int free = 0;
   1412 
   1413 	prod = sc->rge_ldata.rge_txq_prodidx;
   1414 	cons = sc->rge_ldata.rge_txq_considx;
   1415 
   1416 	while (prod != cons) {
   1417 		txq = &sc->rge_ldata.rge_txq[cons];
   1418 		idx = txq->txq_descidx;
   1419 
   1420 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1421 		    idx * sizeof(struct rge_tx_desc),
   1422 		    sizeof(struct rge_tx_desc),
   1423 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1424 
   1425 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
   1426 
   1427 		if (txstat & RGE_TDCMDSTS_OWN) {
   1428 			free = 2;
   1429 			break;
   1430 		}
   1431 
   1432 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
   1433 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1434 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
   1435 		m_freem(txq->txq_mbuf);
   1436 		txq->txq_mbuf = NULL;
   1437 
   1438 		net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   1439 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
   1440 			if_statinc_ref(ifp, nsr, if_collisions);
   1441 		if (txstat & RGE_TDCMDSTS_TXERR)
   1442 			if_statinc_ref(ifp, nsr, if_oerrors);
   1443 		else
   1444 			if_statinc_ref(ifp, nsr, if_opackets);
   1445 		IF_STAT_PUTREF(ifp);
   1446 
   1447 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1448 		    idx * sizeof(struct rge_tx_desc),
   1449 		    sizeof(struct rge_tx_desc),
   1450 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1451 
   1452 		cons = RGE_NEXT_TX_DESC(idx);
   1453 		free = 1;
   1454 	}
   1455 
   1456 	if (free == 0)
   1457 		return (0);
   1458 
   1459 	sc->rge_ldata.rge_txq_considx = cons;
   1460 
   1461 	if (free == 2)
   1462 		rge_txstart(sc);
   1463 
   1464 	CLR(ifp->if_flags, IFF_OACTIVE);
   1465 	ifp->if_timer = 0;
   1466 	if_schedule_deferred_start(ifp);
   1467 
   1468 	return (1);
   1469 }
   1470 
   1471 void
   1472 rge_reset(struct rge_softc *sc)
   1473 {
   1474 	int i;
   1475 
   1476 	/* Enable RXDV gate. */
   1477 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
   1478 	DELAY(2000);
   1479 
   1480 	for (i = 0; i < 3000; i++) {
   1481 		DELAY(50);
   1482 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
   1483 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
   1484 		    RGE_MCUCMD_TXFIFO_EMPTY))
   1485 			break;
   1486 	}
   1487 	if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
   1488 		for (i = 0; i < 3000; i++) {
   1489 			DELAY(50);
   1490 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
   1491 				break;
   1492 		}
   1493 	}
   1494 
   1495 	DELAY(2000);
   1496 
   1497 	/* Soft reset. */
   1498 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
   1499 
   1500 	for (i = 0; i < RGE_TIMEOUT; i++) {
   1501 		DELAY(100);
   1502 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
   1503 			break;
   1504 	}
   1505 	if (i == RGE_TIMEOUT)
   1506 		device_printf(sc->sc_dev, "reset never completed!\n");
   1507 }
   1508 
   1509 void
   1510 rge_iff(struct rge_softc *sc)
   1511 {
   1512 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1513 	struct ethercom *ec = &sc->sc_ec;
   1514 	struct ether_multi *enm;
   1515 	struct ether_multistep step;
   1516 	uint32_t hashes[2];
   1517 	uint32_t rxfilt;
   1518 	int h = 0;
   1519 
   1520 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
   1521 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
   1522 	ifp->if_flags &= ~IFF_ALLMULTI;
   1523 
   1524 	/*
   1525 	 * Always accept frames destined to our station address.
   1526 	 * Always accept broadcast frames.
   1527 	 */
   1528 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
   1529 
   1530 	if (ifp->if_flags & IFF_PROMISC) {
   1531  allmulti:
   1532 		ifp->if_flags |= IFF_ALLMULTI;
   1533 		rxfilt |= RGE_RXCFG_MULTI;
   1534 		if (ifp->if_flags & IFF_PROMISC)
   1535 			rxfilt |= RGE_RXCFG_ALLPHYS;
   1536 		hashes[0] = hashes[1] = 0xffffffff;
   1537 	} else {
   1538 		rxfilt |= RGE_RXCFG_MULTI;
   1539 		/* Program new filter. */
   1540 		memset(hashes, 0, sizeof(hashes));
   1541 
   1542 		ETHER_LOCK(ec);
   1543 		ETHER_FIRST_MULTI(step, ec, enm);
   1544 		while (enm != NULL) {
   1545 			if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1546 			    ETHER_ADDR_LEN) != 0) {
   1547 			    	ETHER_UNLOCK(ec);
   1548 				goto allmulti;
   1549 			}
   1550 			h = ether_crc32_be(enm->enm_addrlo,
   1551 			    ETHER_ADDR_LEN) >> 26;
   1552 
   1553 			if (h < 32)
   1554 				hashes[0] |= (1U << h);
   1555 			else
   1556 				hashes[1] |= (1U << (h - 32));
   1557 
   1558 			ETHER_NEXT_MULTI(step, enm);
   1559 		}
   1560 		ETHER_UNLOCK(ec);
   1561 	}
   1562 
   1563 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
   1564 	RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
   1565 	RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
   1566 }
   1567 
   1568 void
   1569 rge_set_phy_power(struct rge_softc *sc, int on)
   1570 {
   1571 	int i;
   1572 
   1573 	if (on) {
   1574 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
   1575 
   1576 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
   1577 
   1578 		for (i = 0; i < RGE_TIMEOUT; i++) {
   1579 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
   1580 				break;
   1581 			DELAY(1000);
   1582 		}
   1583 	} else {
   1584 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
   1585 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
   1586 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
   1587 	}
   1588 }
   1589 
   1590 void
   1591 rge_phy_config(struct rge_softc *sc)
   1592 {
   1593 	/* Read microcode version. */
   1594 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1595 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
   1596 
   1597 	switch (sc->rge_type) {
   1598 	case MAC_CFG2_8126:
   1599 		rge_phy_config_mac_cfg2_8126(sc);
   1600 		break;
   1601 	case MAC_CFG2:
   1602 		rge_phy_config_mac_cfg2(sc);
   1603 		break;
   1604 	case MAC_CFG3:
   1605 		rge_phy_config_mac_cfg3(sc);
   1606 		break;
   1607 	case MAC_CFG4:
   1608 		rge_phy_config_mac_cfg4(sc);
   1609 		break;
   1610 	case MAC_CFG5:
   1611 		rge_phy_config_mac_cfg5(sc);
   1612 		break;
   1613 	default:
   1614 		break;	/* Can't happen. */
   1615 	}
   1616 
   1617 	rge_write_phy(sc, 0x0a5b, 0x12,
   1618 	    rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
   1619 
   1620 	/* Disable EEE. */
   1621 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
   1622 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
   1623 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
   1624 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
   1625 	}
   1626 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
   1627 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
   1628 	if (sc->rge_type == MAC_CFG2_8126)
   1629 		RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0002);
   1630 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
   1631 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
   1632 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
   1633 
   1634 	/* Disable advanced EEE. */
   1635 	if (sc->rge_type != MAC_CFG2_8126)
   1636 		rge_patch_phy_mcu(sc, 1);
   1637 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
   1638 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
   1639 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
   1640 	if (sc->rge_type != MAC_CFG2_8126)
   1641 		rge_patch_phy_mcu(sc, 0);
   1642 }
   1643 
   1644 void
   1645 rge_phy_config_mac_cfg2_8126(struct rge_softc *sc)
   1646 {
   1647 	uint16_t val;
   1648 	int i;
   1649 	static const uint16_t mac_cfg2_a438_value[] =
   1650 	    { 0x0044, 0x00a8, 0x00d6, 0x00ec, 0x00f6, 0x00fc, 0x00fe,
   1651 	      0x00fe, 0x00bc, 0x0058, 0x002a, 0x003f, 0x3f02, 0x023c,
   1652 	      0x3b0a, 0x1c00, 0x0000, 0x0000, 0x0000, 0x0000 };
   1653 
   1654 	static const uint16_t mac_cfg2_b87e_value[] =
   1655 	    { 0x03ed, 0x03ff, 0x0009, 0x03fe, 0x000b, 0x0021, 0x03f7,
   1656 	      0x03b8, 0x03e0, 0x0049, 0x0049, 0x03e0, 0x03b8, 0x03f7,
   1657 	      0x0021, 0x000b, 0x03fe, 0x0009, 0x03ff, 0x03ed, 0x000e,
   1658 	      0x03fe, 0x03ed, 0x0006, 0x001a, 0x03f1, 0x03d8, 0x0023,
   1659 	      0x0054, 0x0322, 0x00dd, 0x03ab, 0x03dc, 0x0027, 0x000e,
   1660 	      0x03e5, 0x03f9, 0x0012, 0x0001, 0x03f1 };
   1661 
   1662 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_8126_MCODE_VER);
   1663 
   1664 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1665 	rge_write_phy_ocp(sc, 0xa436, 0x80bf);
   1666 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1667 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
   1668 	rge_write_phy_ocp(sc, 0xa436, 0x80cd);
   1669 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1670 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
   1671 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
   1672 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1673 	rge_write_phy_ocp(sc, 0xa438, val | 0xc800);
   1674 	rge_write_phy_ocp(sc, 0xa436, 0x80d4);
   1675 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1676 	rge_write_phy_ocp(sc, 0xa438, val | 0xc800);
   1677 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
   1678 	rge_write_phy_ocp(sc, 0xa438, 0x10cc);
   1679 	rge_write_phy_ocp(sc, 0xa436, 0x80e5);
   1680 	rge_write_phy_ocp(sc, 0xa438, 0x4f0c);
   1681 	rge_write_phy_ocp(sc, 0xa436, 0x8387);
   1682 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1683 	rge_write_phy_ocp(sc, 0xa438, val | 0x4700);
   1684 	val = rge_read_phy_ocp(sc, 0xa80c) & ~0x00c0;
   1685 	rge_write_phy_ocp(sc, 0xa80c, val | 0x0080);
   1686 	RGE_PHY_CLRBIT(sc, 0xac90, 0x0010);
   1687 	RGE_PHY_CLRBIT(sc, 0xad2c, 0x8000);
   1688 	rge_write_phy_ocp(sc, 0xb87c, 0x8321);
   1689 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1690 	rge_write_phy_ocp(sc, 0xb87e, val | 0x1100);
   1691 	RGE_PHY_SETBIT(sc, 0xacf8, 0x000c);
   1692 	rge_write_phy_ocp(sc, 0xa436, 0x8183);
   1693 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1694 	rge_write_phy_ocp(sc, 0xa438, val | 0x5900);
   1695 	RGE_PHY_SETBIT(sc, 0xad94, 0x0020);
   1696 	RGE_PHY_CLRBIT(sc, 0xa654, 0x0800);
   1697 	RGE_PHY_SETBIT(sc, 0xb648, 0x4000);
   1698 	rge_write_phy_ocp(sc, 0xb87c, 0x839e);
   1699 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1700 	rge_write_phy_ocp(sc, 0xb87e, val | 0x2f00);
   1701 	rge_write_phy_ocp(sc, 0xb87c, 0x83f2);
   1702 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1703 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
   1704 	RGE_PHY_SETBIT(sc, 0xada0, 0x0002);
   1705 	rge_write_phy_ocp(sc, 0xb87c, 0x80f3);
   1706 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1707 	rge_write_phy_ocp(sc, 0xb87e, val | 0x9900);
   1708 	rge_write_phy_ocp(sc, 0xb87c, 0x8126);
   1709 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1710 	rge_write_phy_ocp(sc, 0xb87e, val | 0xc100);
   1711 	rge_write_phy_ocp(sc, 0xb87c, 0x893a);
   1712 	rge_write_phy_ocp(sc, 0xb87e, 0x8080);
   1713 	rge_write_phy_ocp(sc, 0xb87c, 0x8647);
   1714 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1715 	rge_write_phy_ocp(sc, 0xb87e, val | 0xe600);
   1716 	rge_write_phy_ocp(sc, 0xb87c, 0x862c);
   1717 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1718 	rge_write_phy_ocp(sc, 0xb87e, val | 0x1200);
   1719 	rge_write_phy_ocp(sc, 0xb87c, 0x864a);
   1720 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1721 	rge_write_phy_ocp(sc, 0xb87e, val | 0xe600);
   1722 	rge_write_phy_ocp(sc, 0xb87c, 0x80a0);
   1723 	rge_write_phy_ocp(sc, 0xb87e, 0xbcbc);
   1724 	rge_write_phy_ocp(sc, 0xb87c, 0x805e);
   1725 	rge_write_phy_ocp(sc, 0xb87e, 0xbcbc);
   1726 	rge_write_phy_ocp(sc, 0xb87c, 0x8056);
   1727 	rge_write_phy_ocp(sc, 0xb87e, 0x3077);
   1728 	rge_write_phy_ocp(sc, 0xb87c, 0x8058);
   1729 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1730 	rge_write_phy_ocp(sc, 0xb87e, val | 0x5a00);
   1731 	rge_write_phy_ocp(sc, 0xb87c, 0x8098);
   1732 	rge_write_phy_ocp(sc, 0xb87e, 0x3077);
   1733 	rge_write_phy_ocp(sc, 0xb87c, 0x809a);
   1734 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1735 	rge_write_phy_ocp(sc, 0xb87e, val | 0x5a00);
   1736 	rge_write_phy_ocp(sc, 0xb87c, 0x8052);
   1737 	rge_write_phy_ocp(sc, 0xb87e, 0x3733);
   1738 	rge_write_phy_ocp(sc, 0xb87c, 0x8094);
   1739 	rge_write_phy_ocp(sc, 0xb87e, 0x3733);
   1740 	rge_write_phy_ocp(sc, 0xb87c, 0x807f);
   1741 	rge_write_phy_ocp(sc, 0xb87e, 0x7c75);
   1742 	rge_write_phy_ocp(sc, 0xb87c, 0x803d);
   1743 	rge_write_phy_ocp(sc, 0xb87e, 0x7c75);
   1744 	rge_write_phy_ocp(sc, 0xb87c, 0x8036);
   1745 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1746 	rge_write_phy_ocp(sc, 0xb87e, val | 0x3000);
   1747 	rge_write_phy_ocp(sc, 0xb87c, 0x8078);
   1748 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1749 	rge_write_phy_ocp(sc, 0xb87e, val | 0x3000);
   1750 	rge_write_phy_ocp(sc, 0xb87c, 0x8031);
   1751 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1752 	rge_write_phy_ocp(sc, 0xb87e, val | 0x3300);
   1753 	rge_write_phy_ocp(sc, 0xb87c, 0x8073);
   1754 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1755 	rge_write_phy_ocp(sc, 0xb87e, val | 0x3300);
   1756 	val = rge_read_phy_ocp(sc, 0xae06) & ~0xfc00;
   1757 	rge_write_phy_ocp(sc, 0xae06, val | 0x7c00);
   1758 	rge_write_phy_ocp(sc, 0xb87c, 0x89D1);
   1759 	rge_write_phy_ocp(sc, 0xb87e, 0x0004);
   1760 	rge_write_phy_ocp(sc, 0xa436, 0x8fbd);
   1761 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1762 	rge_write_phy_ocp(sc, 0xa438, val | 0x0a00);
   1763 	rge_write_phy_ocp(sc, 0xa436, 0x8fbe);
   1764 	rge_write_phy_ocp(sc, 0xa438, 0x0d09);
   1765 	rge_write_phy_ocp(sc, 0xb87c, 0x89cd);
   1766 	rge_write_phy_ocp(sc, 0xb87e, 0x0f0f);
   1767 	rge_write_phy_ocp(sc, 0xb87c, 0x89cf);
   1768 	rge_write_phy_ocp(sc, 0xb87e, 0x0f0f);
   1769 	rge_write_phy_ocp(sc, 0xb87c, 0x83a4);
   1770 	rge_write_phy_ocp(sc, 0xb87e, 0x6600);
   1771 	rge_write_phy_ocp(sc, 0xb87c, 0x83a6);
   1772 	rge_write_phy_ocp(sc, 0xb87e, 0x6601);
   1773 	rge_write_phy_ocp(sc, 0xb87c, 0x83c0);
   1774 	rge_write_phy_ocp(sc, 0xb87e, 0x6600);
   1775 	rge_write_phy_ocp(sc, 0xb87c, 0x83c2);
   1776 	rge_write_phy_ocp(sc, 0xb87e, 0x6601);
   1777 	rge_write_phy_ocp(sc, 0xb87c, 0x8414);
   1778 	rge_write_phy_ocp(sc, 0xb87e, 0x6600);
   1779 	rge_write_phy_ocp(sc, 0xb87c, 0x8416);
   1780 	rge_write_phy_ocp(sc, 0xb87e, 0x6601);
   1781 	rge_write_phy_ocp(sc, 0xb87c, 0x83f8);
   1782 	rge_write_phy_ocp(sc, 0xb87e, 0x6600);
   1783 	rge_write_phy_ocp(sc, 0xb87c, 0x83fa);
   1784 	rge_write_phy_ocp(sc, 0xb87e, 0x6601);
   1785 
   1786 	rge_patch_phy_mcu(sc, 1);
   1787 	val = rge_read_phy_ocp(sc, 0xbd96) & ~0x1f00;
   1788 	rge_write_phy_ocp(sc, 0xbd96, val | 0x1000);
   1789 	val = rge_read_phy_ocp(sc, 0xbf1c) & ~0x0007;
   1790 	rge_write_phy_ocp(sc, 0xbf1c, val | 0x0007);
   1791 	RGE_PHY_CLRBIT(sc, 0xbfbe, 0x8000);
   1792 	val = rge_read_phy_ocp(sc, 0xbf40) & ~0x0380;
   1793 	rge_write_phy_ocp(sc, 0xbf40, val | 0x0280);
   1794 	val = rge_read_phy_ocp(sc, 0xbf90) & ~0x0080;
   1795 	rge_write_phy_ocp(sc, 0xbf90, val | 0x0060);
   1796 	val = rge_read_phy_ocp(sc, 0xbf90) & ~0x0010;
   1797 	rge_write_phy_ocp(sc, 0xbf90, val | 0x000c);
   1798 	rge_patch_phy_mcu(sc, 0);
   1799 
   1800 	rge_write_phy_ocp(sc, 0xa436, 0x843b);
   1801 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1802 	rge_write_phy_ocp(sc, 0xa438, val | 0x2000);
   1803 	rge_write_phy_ocp(sc, 0xa436, 0x843d);
   1804 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1805 	rge_write_phy_ocp(sc, 0xa438, val | 0x2000);
   1806 	RGE_PHY_CLRBIT(sc, 0xb516, 0x007f);
   1807 	RGE_PHY_CLRBIT(sc, 0xbf80, 0x0030);
   1808 
   1809 	rge_write_phy_ocp(sc, 0xa436, 0x8188);
   1810 	for (i = 0; i < 11; i++)
   1811 		rge_write_phy_ocp(sc, 0xa438, mac_cfg2_a438_value[i]);
   1812 
   1813 	rge_write_phy_ocp(sc, 0xb87c, 0x8015);
   1814 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1815 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
   1816 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffd);
   1817 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1818 	rge_write_phy_ocp(sc, 0xb87e, val | 0);
   1819 	rge_write_phy_ocp(sc, 0xb87c, 0x8fff);
   1820 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1821 	rge_write_phy_ocp(sc, 0xb87e, val | 0x7f00);
   1822 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffb);
   1823 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1824 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1825 	rge_write_phy_ocp(sc, 0xb87c, 0x8fe9);
   1826 	rge_write_phy_ocp(sc, 0xb87e, 0x0002);
   1827 	rge_write_phy_ocp(sc, 0xb87c, 0x8fef);
   1828 	rge_write_phy_ocp(sc, 0xb87e, 0x00a5);
   1829 	rge_write_phy_ocp(sc, 0xb87c, 0x8ff1);
   1830 	rge_write_phy_ocp(sc, 0xb87e, 0x0106);
   1831 	rge_write_phy_ocp(sc, 0xb87c, 0x8fe1);
   1832 	rge_write_phy_ocp(sc, 0xb87e, 0x0102);
   1833 	rge_write_phy_ocp(sc, 0xb87c, 0x8fe3);
   1834 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1835 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0400);
   1836 	RGE_PHY_SETBIT(sc, 0xa654, 0x0800);
   1837 	RGE_PHY_CLRBIT(sc, 0xa654, 0x0003);
   1838 	rge_write_phy_ocp(sc, 0xac3a, 0x5851);
   1839 	val = rge_read_phy_ocp(sc, 0xac3c) & ~0xd000;
   1840 	rge_write_phy_ocp(sc, 0xac3c, val | 0x2000);
   1841 	val = rge_read_phy_ocp(sc, 0xac42) & ~0x0200;
   1842 	rge_write_phy_ocp(sc, 0xac42, val | 0x01c0);
   1843 	RGE_PHY_CLRBIT(sc, 0xac3e, 0xe000);
   1844 	RGE_PHY_CLRBIT(sc, 0xac42, 0x0038);
   1845 	val = rge_read_phy_ocp(sc, 0xac42) & ~0x0002;
   1846 	rge_write_phy_ocp(sc, 0xac42, val | 0x0005);
   1847 	rge_write_phy_ocp(sc, 0xac1a, 0x00db);
   1848 	rge_write_phy_ocp(sc, 0xade4, 0x01b5);
   1849 	RGE_PHY_CLRBIT(sc, 0xad9c, 0x0c00);
   1850 	rge_write_phy_ocp(sc, 0xb87c, 0x814b);
   1851 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1852 	rge_write_phy_ocp(sc, 0xb87e, val | 0x1100);
   1853 	rge_write_phy_ocp(sc, 0xb87c, 0x814d);
   1854 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1855 	rge_write_phy_ocp(sc, 0xb87e, val | 0x1100);
   1856 	rge_write_phy_ocp(sc, 0xb87c, 0x814f);
   1857 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1858 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0b00);
   1859 	rge_write_phy_ocp(sc, 0xb87c, 0x8142);
   1860 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1861 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1862 	rge_write_phy_ocp(sc, 0xb87c, 0x8144);
   1863 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1864 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1865 	rge_write_phy_ocp(sc, 0xb87c, 0x8150);
   1866 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1867 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1868 	rge_write_phy_ocp(sc, 0xb87c, 0x8118);
   1869 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1870 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
   1871 	rge_write_phy_ocp(sc, 0xb87c, 0x811a);
   1872 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1873 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
   1874 	rge_write_phy_ocp(sc, 0xb87c, 0x811c);
   1875 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1876 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
   1877 	rge_write_phy_ocp(sc, 0xb87c, 0x810f);
   1878 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1879 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1880 	rge_write_phy_ocp(sc, 0xb87c, 0x8111);
   1881 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1882 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1883 	rge_write_phy_ocp(sc, 0xb87c, 0x811d);
   1884 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1885 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1886 	RGE_PHY_SETBIT(sc, 0xac36, 0x1000);
   1887 	RGE_PHY_CLRBIT(sc, 0xad1c, 0x0100);
   1888 	val = rge_read_phy_ocp(sc, 0xade8) & ~0xffc0;
   1889 	rge_write_phy_ocp(sc, 0xade8, val | 0x1400);
   1890 	rge_write_phy_ocp(sc, 0xb87c, 0x864b);
   1891 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1892 	rge_write_phy_ocp(sc, 0xb87e, val | 0x9d00);
   1893 
   1894 	rge_write_phy_ocp(sc, 0xa436, 0x8f97);
   1895 	for (; i < nitems(mac_cfg2_a438_value); i++)
   1896 		rge_write_phy_ocp(sc, 0xa438, mac_cfg2_a438_value[i]);
   1897 
   1898 	RGE_PHY_SETBIT(sc, 0xad9c, 0x0020);
   1899 	rge_write_phy_ocp(sc, 0xb87c, 0x8122);
   1900 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1901 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0c00);
   1902 
   1903 	rge_write_phy_ocp(sc, 0xb87c, 0x82c8);
   1904 	for (i = 0; i < 20; i++)
   1905 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg2_b87e_value[i]);
   1906 
   1907 	rge_write_phy_ocp(sc, 0xb87c, 0x80ef);
   1908 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1909 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0c00);
   1910 
   1911 	rge_write_phy_ocp(sc, 0xb87c, 0x82a0);
   1912 	for (; i < nitems(mac_cfg2_b87e_value); i++)
   1913 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg2_b87e_value[i]);
   1914 
   1915 	rge_write_phy_ocp(sc, 0xa436, 0x8018);
   1916 	RGE_PHY_SETBIT(sc, 0xa438, 0x2000);
   1917 	rge_write_phy_ocp(sc, 0xb87c, 0x8fe4);
   1918 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1919 	rge_write_phy_ocp(sc, 0xb87e, val | 0);
   1920 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xffc0;
   1921 	rge_write_phy_ocp(sc, 0xb54c, val | 0x3700);
   1922 }
   1923 
   1924 void
   1925 rge_phy_config_mac_cfg2(struct rge_softc *sc)
   1926 {
   1927 	uint16_t val;
   1928 	int i;
   1929 
   1930 	for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
   1931 		rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
   1932 		    rtl8125_mac_cfg2_ephy[i].val);
   1933 
   1934 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
   1935 
   1936 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
   1937 	rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
   1938 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   1939 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   1940 	rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
   1941 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
   1942 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   1943 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
   1944 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
   1945 	rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
   1946 	RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
   1947 	RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
   1948 	RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
   1949 
   1950 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   1951 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1952 	rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
   1953 	rge_write_phy_ocp(sc, 0xa436, 0x80eb);
   1954 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
   1955 	rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
   1956 	rge_write_phy_ocp(sc, 0xa436, 0x80f8);
   1957 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1958 	rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
   1959 	rge_write_phy_ocp(sc, 0xa436, 0x80f1);
   1960 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1961 	rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
   1962 	rge_write_phy_ocp(sc, 0xa436, 0x80fe);
   1963 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1964 	rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
   1965 	rge_write_phy_ocp(sc, 0xa436, 0x8102);
   1966 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1967 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   1968 	rge_write_phy_ocp(sc, 0xa436, 0x8105);
   1969 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1970 	rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
   1971 	rge_write_phy_ocp(sc, 0xa436, 0x8100);
   1972 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1973 	rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
   1974 	rge_write_phy_ocp(sc, 0xa436, 0x8104);
   1975 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1976 	rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
   1977 	rge_write_phy_ocp(sc, 0xa436, 0x8106);
   1978 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1979 	rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
   1980 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
   1981 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1982 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
   1983 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
   1984 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   1985 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
   1986 	RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
   1987 	val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
   1988 	rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
   1989 	rge_write_phy_ocp(sc, 0xa436, 0x819f);
   1990 	rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
   1991 	rge_write_phy_ocp(sc, 0xbc34, 0x5555);
   1992 	val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
   1993 	rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
   1994 	RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
   1995 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1996 }
   1997 
   1998 void
   1999 rge_phy_config_mac_cfg3(struct rge_softc *sc)
   2000 {
   2001 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2002 	uint16_t val;
   2003 	int i;
   2004 	static const uint16_t mac_cfg3_a438_value[] =
   2005 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
   2006 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
   2007 
   2008 	static const uint16_t mac_cfg3_b88e_value[] =
   2009 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
   2010 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
   2011 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
   2012 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
   2013 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
   2014 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
   2015 
   2016 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
   2017 		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
   2018 		    rtl8125_mac_cfg3_ephy[i].val);
   2019 
   2020 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
   2021 	rge_write_ephy(sc, 0x002a, val | 0x3000);
   2022 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
   2023 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
   2024 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
   2025 	rge_write_ephy(sc, 0x0002, 0x6042);
   2026 	rge_write_ephy(sc, 0x0006, 0x0014);
   2027 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
   2028 	rge_write_ephy(sc, 0x006a, val | 0x3000);
   2029 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
   2030 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
   2031 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
   2032 	rge_write_ephy(sc, 0x0042, 0x6042);
   2033 	rge_write_ephy(sc, 0x0046, 0x0014);
   2034 
   2035 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
   2036 
   2037 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   2038 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   2039 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
   2040 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
   2041 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   2042 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
   2043 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
   2044 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
   2045 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
   2046 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
   2047 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
   2048 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
   2049 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
   2050 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
   2051 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
   2052 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
   2053 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
   2054 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
   2055 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
   2056 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
   2057 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
   2058 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
   2059 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2060 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
   2061 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
   2062 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2063 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
   2064 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
   2065 	    32);
   2066 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
   2067 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   2068 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
   2069 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   2070 
   2071 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
   2072 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
   2073 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
   2074 	for (i = 0; i < 26; i++)
   2075 		rge_write_phy_ocp(sc, 0xa438, 0);
   2076 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
   2077 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
   2078 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   2079 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
   2080 
   2081 	rge_patch_phy_mcu(sc, 1);
   2082 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
   2083 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
   2084 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
   2085 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
   2086 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
   2087 	}
   2088 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
   2089 	rge_patch_phy_mcu(sc, 0);
   2090 
   2091 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
   2092 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
   2093 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   2094 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
   2095 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
   2096 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
   2097 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
   2098 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
   2099 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
   2100 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   2101 }
   2102 
   2103 void
   2104 rge_phy_config_mac_cfg4(struct rge_softc *sc)
   2105 {
   2106 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2107 	uint16_t val;
   2108 	int i;
   2109 	static const uint16_t mac_cfg4_b87c_value[] =
   2110 	    { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
   2111 	      0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
   2112 	      0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
   2113 	      0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
   2114 	      0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
   2115 	      0x80b0, 0x0f31 };
   2116 
   2117 	for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
   2118 		rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
   2119 		    rtl8125_mac_cfg4_ephy[i].val);
   2120 
   2121 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
   2122 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
   2123 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
   2124 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
   2125 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
   2126 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
   2127 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
   2128 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
   2129 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
   2130 
   2131 	rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
   2132 
   2133 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   2134 	RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
   2135 	rge_write_phy_ocp(sc, 0xa436, 0x8fff);
   2136 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2137 	rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
   2138 	for (i = 0; i < 6; i++) {
   2139 		rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
   2140 		if (i < 3)
   2141 			rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
   2142 		else
   2143 			rge_write_phy_ocp(sc, 0xb87e, 0x147d);
   2144 	}
   2145 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
   2146 	rge_write_phy_ocp(sc, 0xb87e, 0x0907);
   2147 	val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
   2148 	rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
   2149 	val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
   2150 	rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
   2151 	rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
   2152 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
   2153 	rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
   2154 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
   2155 	rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
   2156 	rge_write_phy_ocp(sc, 0xb87e, 0x6077);
   2157 	rge_write_phy_ocp(sc, 0xb506, 0x01e7);
   2158 	rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
   2159 	rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
   2160 	rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
   2161 	rge_write_phy_ocp(sc, 0xac3c, 0x9240);
   2162 	rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
   2163 	rge_write_phy_ocp(sc, 0xacc6, 0x0707);
   2164 	rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
   2165 	rge_write_phy_ocp(sc, 0xad08, 0x0007);
   2166 	for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
   2167 		rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
   2168 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
   2169 	}
   2170 	RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
   2171 	RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
   2172 	rge_write_phy_ocp(sc, 0xb87c, 0x8141);
   2173 	rge_write_phy_ocp(sc, 0xb87e, 0x320e);
   2174 	rge_write_phy_ocp(sc, 0xb87c, 0x8153);
   2175 	rge_write_phy_ocp(sc, 0xb87e, 0x720e);
   2176 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
   2177 	rge_write_phy_ocp(sc, 0xb87c, 0x8529);
   2178 	rge_write_phy_ocp(sc, 0xb87e, 0x050e);
   2179 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
   2180 	    32);
   2181 	rge_write_phy_ocp(sc, 0xa436, 0x816c);
   2182 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
   2183 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
   2184 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
   2185 	rge_write_phy_ocp(sc, 0xa436, 0x8174);
   2186 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
   2187 	rge_write_phy_ocp(sc, 0xa436, 0x8178);
   2188 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
   2189 	rge_write_phy_ocp(sc, 0xa436, 0x817c);
   2190 	rge_write_phy_ocp(sc, 0xa438, 0x0719);
   2191 	rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
   2192 	rge_write_phy_ocp(sc, 0xa438, 0x0400);
   2193 	rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
   2194 	rge_write_phy_ocp(sc, 0xa438, 0x0404);
   2195 	rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
   2196 	for (i = 0; i < 6; i++) {
   2197 		rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
   2198 		if (i == 2)
   2199 			rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
   2200 		else
   2201 			rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
   2202 	}
   2203 	rge_write_phy_ocp(sc, 0xb87c, 0x8145);
   2204 	rge_write_phy_ocp(sc, 0xb87e, 0x370e);
   2205 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
   2206 	rge_write_phy_ocp(sc, 0xb87e, 0x770e);
   2207 	rge_write_phy_ocp(sc, 0xb87c, 0x8169);
   2208 	rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
   2209 	rge_write_phy_ocp(sc, 0xb87c, 0x817b);
   2210 	rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
   2211 	rge_write_phy_ocp(sc, 0xa436, 0x8217);
   2212 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2213 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   2214 	rge_write_phy_ocp(sc, 0xa436, 0x821a);
   2215 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2216 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   2217 	rge_write_phy_ocp(sc, 0xa436, 0x80da);
   2218 	rge_write_phy_ocp(sc, 0xa438, 0x0403);
   2219 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
   2220 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2221 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
   2222 	rge_write_phy_ocp(sc, 0xa436, 0x80b3);
   2223 	rge_write_phy_ocp(sc, 0xa438, 0x0384);
   2224 	rge_write_phy_ocp(sc, 0xa436, 0x80b7);
   2225 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
   2226 	rge_write_phy_ocp(sc, 0xa436, 0x80ba);
   2227 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2228 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
   2229 	rge_write_phy_ocp(sc, 0xa436, 0x80b5);
   2230 	rge_write_phy_ocp(sc, 0xa438, 0xf009);
   2231 	rge_write_phy_ocp(sc, 0xa436, 0x80bd);
   2232 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2233 	rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
   2234 	rge_write_phy_ocp(sc, 0xa436, 0x80c7);
   2235 	rge_write_phy_ocp(sc, 0xa438, 0xf083);
   2236 	rge_write_phy_ocp(sc, 0xa436, 0x80dd);
   2237 	rge_write_phy_ocp(sc, 0xa438, 0x03f0);
   2238 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
   2239 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2240 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
   2241 	rge_write_phy_ocp(sc, 0xa436, 0x80cb);
   2242 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
   2243 	rge_write_phy_ocp(sc, 0xa436, 0x80ce);
   2244 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2245 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
   2246 	rge_write_phy_ocp(sc, 0xa436, 0x80c9);
   2247 	rge_write_phy_ocp(sc, 0xa438, 0x8009);
   2248 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
   2249 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2250 	rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
   2251 	rge_write_phy_ocp(sc, 0xa436, 0x80a3);
   2252 	rge_write_phy_ocp(sc, 0xa438, 0x200a);
   2253 	rge_write_phy_ocp(sc, 0xa436, 0x80a5);
   2254 	rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
   2255 	rge_write_phy_ocp(sc, 0xa436, 0x809f);
   2256 	rge_write_phy_ocp(sc, 0xa438, 0x6073);
   2257 	rge_write_phy_ocp(sc, 0xa436, 0x80a1);
   2258 	rge_write_phy_ocp(sc, 0xa438, 0x000b);
   2259 	rge_write_phy_ocp(sc, 0xa436, 0x80a9);
   2260 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2261 	rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
   2262 	rge_patch_phy_mcu(sc, 1);
   2263 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
   2264 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
   2265 	rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
   2266 	rge_write_phy_ocp(sc, 0xb890, 0x0000);
   2267 	rge_write_phy_ocp(sc, 0xb88e, 0xc240);
   2268 	rge_write_phy_ocp(sc, 0xb890, 0x0103);
   2269 	rge_write_phy_ocp(sc, 0xb88e, 0xc242);
   2270 	rge_write_phy_ocp(sc, 0xb890, 0x0507);
   2271 	rge_write_phy_ocp(sc, 0xb88e, 0xc244);
   2272 	rge_write_phy_ocp(sc, 0xb890, 0x090b);
   2273 	rge_write_phy_ocp(sc, 0xb88e, 0xc246);
   2274 	rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
   2275 	rge_write_phy_ocp(sc, 0xb88e, 0xc248);
   2276 	rge_write_phy_ocp(sc, 0xb890, 0x1012);
   2277 	rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
   2278 	rge_write_phy_ocp(sc, 0xb890, 0x1416);
   2279 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
   2280 	rge_patch_phy_mcu(sc, 0);
   2281 	RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
   2282 	RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
   2283 	rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
   2284 	rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
   2285 	rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
   2286 	rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
   2287 	rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
   2288 	RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
   2289 	rge_write_phy_ocp(sc, 0xa436, 0x817d);
   2290 	RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
   2291 }
   2292 
   2293 void
   2294 rge_phy_config_mac_cfg5(struct rge_softc *sc)
   2295 {
   2296 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2297 	uint16_t val;
   2298 	int i;
   2299 
   2300 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
   2301 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
   2302 		    rtl8125_mac_cfg5_ephy[i].val);
   2303 
   2304 	val = rge_read_ephy(sc, 0x0022) & ~0x0030;
   2305 	rge_write_ephy(sc, 0x0022, val | 0x0020);
   2306 	val = rge_read_ephy(sc, 0x0062) & ~0x0030;
   2307 	rge_write_ephy(sc, 0x0062, val | 0x0020);
   2308 
   2309 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
   2310 
   2311 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   2312 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
   2313 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
   2314 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
   2315 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
   2316 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
   2317 	    32);
   2318 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
   2319 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
   2320 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
   2321 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
   2322 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
   2323 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2324 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
   2325 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
   2326 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
   2327 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
   2328 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
   2329 	for (i = 0; i < 10; i++) {
   2330 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
   2331 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
   2332 	}
   2333 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
   2334 	val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
   2335 	rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
   2336 }
   2337 
   2338 void
   2339 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
   2340 {
   2341 	if (sc->rge_mcodever != mcode_version) {
   2342 		int i;
   2343 
   2344 		rge_patch_phy_mcu(sc, 1);
   2345 
   2346 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
   2347 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   2348 			if (sc->rge_type == MAC_CFG2)
   2349 				rge_write_phy_ocp(sc, 0xa438, 0x8600);
   2350 			else
   2351 				rge_write_phy_ocp(sc, 0xa438, 0x8601);
   2352 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
   2353 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
   2354 
   2355 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
   2356 		}
   2357 
   2358 		if (sc->rge_type == MAC_CFG2) {
   2359 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
   2360 				rge_write_phy_ocp(sc,
   2361 				    rtl8125_mac_cfg2_mcu[i].reg,
   2362 				    rtl8125_mac_cfg2_mcu[i].val);
   2363 			}
   2364 		} else if (sc->rge_type == MAC_CFG3) {
   2365 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
   2366 				rge_write_phy_ocp(sc,
   2367 				    rtl8125_mac_cfg3_mcu[i].reg,
   2368 				    rtl8125_mac_cfg3_mcu[i].val);
   2369 			}
   2370 		} else if (sc->rge_type == MAC_CFG4) {
   2371 			for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
   2372 				rge_write_phy_ocp(sc,
   2373 				    rtl8125_mac_cfg4_mcu[i].reg,
   2374 				    rtl8125_mac_cfg4_mcu[i].val);
   2375 			}
   2376 		} else if (sc->rge_type == MAC_CFG5) {
   2377 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
   2378 				rge_write_phy_ocp(sc,
   2379 				    rtl8125_mac_cfg5_mcu[i].reg,
   2380 				    rtl8125_mac_cfg5_mcu[i].val);
   2381 			}
   2382 		} else if (sc->rge_type == MAC_CFG2_8126) {
   2383 			for (i = 0; i < nitems(rtl8126_mac_cfg2_mcu); i++) {
   2384 				rge_write_phy_ocp(sc,
   2385 				    rtl8126_mac_cfg2_mcu[i].reg,
   2386 				    rtl8126_mac_cfg2_mcu[i].val);
   2387 			}
   2388 		}
   2389 
   2390 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
   2391 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
   2392 
   2393 			rge_write_phy_ocp(sc, 0xa436, 0);
   2394 			rge_write_phy_ocp(sc, 0xa438, 0);
   2395 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
   2396 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   2397 			rge_write_phy_ocp(sc, 0xa438, 0);
   2398 		}
   2399 
   2400 		rge_patch_phy_mcu(sc, 0);
   2401 
   2402 		/* Write microcode version. */
   2403 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
   2404 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
   2405 	}
   2406 }
   2407 
   2408 void
   2409 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
   2410 {
   2411 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2412 	RGE_WRITE_4(sc, RGE_MAC0,
   2413 	    (uint32_t)addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
   2414 	RGE_WRITE_4(sc, RGE_MAC4,
   2415 	    addr[5] <<  8 | addr[4]);
   2416 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2417 }
   2418 
   2419 void
   2420 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
   2421 {
   2422 	int i;
   2423 
   2424 	for (i = 0; i < ETHER_ADDR_LEN; i++)
   2425 		addr[i] = RGE_READ_1(sc, RGE_ADDR0 + i);
   2426 }
   2427 
   2428 void
   2429 rge_hw_init(struct rge_softc *sc)
   2430 {
   2431 	int i;
   2432 
   2433 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2434 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
   2435 	if (sc->rge_type == MAC_CFG2_8126)
   2436 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, 0x08);
   2437 	else
   2438 		RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
   2439 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2440 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
   2441 
   2442 	/* Disable UPS. */
   2443 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
   2444 
   2445 	/* Configure MAC MCU. */
   2446 	rge_write_mac_ocp(sc, 0xfc38, 0);
   2447 
   2448 	for (i = 0xfc28; i < 0xfc38; i += 2)
   2449 		rge_write_mac_ocp(sc, i, 0);
   2450 
   2451 	DELAY(3000);
   2452 	rge_write_mac_ocp(sc, 0xfc26, 0);
   2453 
   2454 	if (sc->rge_type == MAC_CFG3) {
   2455 		for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
   2456 			rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
   2457 			    rtl8125_mac_bps[i].val);
   2458 		}
   2459 	} else if (sc->rge_type == MAC_CFG5) {
   2460 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
   2461 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
   2462 			    rtl8125b_mac_bps[i].val);
   2463 		}
   2464 	}
   2465 
   2466 	/* Disable PHY power saving. */
   2467 	if (sc->rge_type != MAC_CFG2_8126)
   2468 		rge_disable_phy_ocp_pwrsave(sc);
   2469 
   2470 	/* Set PCIe uncorrectable error status. */
   2471 	rge_write_csi(sc, 0x108,
   2472 	    rge_read_csi(sc, 0x108) | 0x00100000);
   2473 }
   2474 
   2475 void
   2476 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
   2477 {
   2478 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
   2479 		rge_patch_phy_mcu(sc, 1);
   2480 		rge_write_phy_ocp(sc, 0xc416, 0);
   2481 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
   2482 		rge_patch_phy_mcu(sc, 0);
   2483 	}
   2484 }
   2485 
   2486 void
   2487 rge_patch_phy_mcu(struct rge_softc *sc, int set)
   2488 {
   2489 	int i;
   2490 
   2491 	if (set)
   2492 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
   2493 	else
   2494 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
   2495 
   2496 	for (i = 0; i < 1000; i++) {
   2497 		if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
   2498 			break;
   2499 		DELAY(100);
   2500 	}
   2501 	if (i == 1000) {
   2502 		DPRINTF(("timeout waiting to patch phy mcu\n"));
   2503 		return;
   2504 	}
   2505 }
   2506 
   2507 void
   2508 rge_add_media_types(struct rge_softc *sc)
   2509 {
   2510 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
   2511 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
   2512 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
   2513 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
   2514 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
   2515 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
   2516 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
   2517 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
   2518 
   2519 	if (sc->rge_type == MAC_CFG2_8126) {
   2520 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T, 0, NULL);
   2521 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T | IFM_FDX,
   2522 		    0, NULL);
   2523 	}
   2524 }
   2525 
   2526 void
   2527 rge_config_imtype(struct rge_softc *sc, int imtype)
   2528 {
   2529 	switch (imtype) {
   2530 	case RGE_IMTYPE_NONE:
   2531 		sc->rge_intrs = RGE_INTRS;
   2532 		sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
   2533 		    RGE_ISR_RX_FIFO_OFLOW;
   2534 		sc->rge_tx_ack = RGE_ISR_TX_OK;
   2535 		break;
   2536 	case RGE_IMTYPE_SIM:
   2537 		sc->rge_intrs = RGE_INTRS_TIMER;
   2538 		sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
   2539 		sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
   2540 		break;
   2541 	default:
   2542 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
   2543 	}
   2544 }
   2545 
   2546 void
   2547 rge_disable_hw_im(struct rge_softc *sc)
   2548 {
   2549 	RGE_WRITE_2(sc, RGE_IM, 0);
   2550 }
   2551 
   2552 void
   2553 rge_disable_sim_im(struct rge_softc *sc)
   2554 {
   2555 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
   2556 	sc->rge_timerintr = 0;
   2557 }
   2558 
   2559 void
   2560 rge_setup_sim_im(struct rge_softc *sc)
   2561 {
   2562 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
   2563 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
   2564 	sc->rge_timerintr = 1;
   2565 }
   2566 
   2567 void
   2568 rge_setup_intr(struct rge_softc *sc, int imtype)
   2569 {
   2570 	rge_config_imtype(sc, imtype);
   2571 
   2572 	/* Enable interrupts. */
   2573 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
   2574 
   2575 	switch (imtype) {
   2576 	case RGE_IMTYPE_NONE:
   2577 		rge_disable_sim_im(sc);
   2578 		rge_disable_hw_im(sc);
   2579 		break;
   2580 	case RGE_IMTYPE_SIM:
   2581 		rge_disable_hw_im(sc);
   2582 		rge_setup_sim_im(sc);
   2583 		break;
   2584 	default:
   2585 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
   2586 	}
   2587 }
   2588 
   2589 void
   2590 rge_exit_oob(struct rge_softc *sc)
   2591 {
   2592 	int i;
   2593 
   2594 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
   2595 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
   2596 	    RGE_RXCFG_ERRPKT);
   2597 
   2598 	/* Disable RealWoW. */
   2599 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
   2600 
   2601 	rge_reset(sc);
   2602 
   2603 	/* Disable OOB. */
   2604 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
   2605 
   2606 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
   2607 
   2608 	for (i = 0; i < 10; i++) {
   2609 		DELAY(100);
   2610 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   2611 			break;
   2612 	}
   2613 
   2614 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
   2615 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
   2616 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
   2617 
   2618 	for (i = 0; i < 10; i++) {
   2619 		DELAY(100);
   2620 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   2621 			break;
   2622 	}
   2623 
   2624 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
   2625 		printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
   2626 		    device_xname(sc->sc_dev));
   2627 		for (i = 0; i < RGE_TIMEOUT; i++) {
   2628 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
   2629 				break;
   2630 			DELAY(1000);
   2631 		}
   2632 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
   2633 		if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
   2634 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
   2635 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
   2636 	}
   2637 }
   2638 
   2639 void
   2640 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
   2641 {
   2642 	int i;
   2643 
   2644 	RGE_WRITE_4(sc, RGE_CSIDR, val);
   2645 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
   2646 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
   2647 
   2648 	for (i = 0; i < 10; i++) {
   2649 		 DELAY(100);
   2650 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
   2651 			break;
   2652 	}
   2653 
   2654 	DELAY(20);
   2655 }
   2656 
   2657 uint32_t
   2658 rge_read_csi(struct rge_softc *sc, uint32_t reg)
   2659 {
   2660 	int i;
   2661 
   2662 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
   2663 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
   2664 
   2665 	for (i = 0; i < 10; i++) {
   2666 		 DELAY(100);
   2667 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
   2668 			break;
   2669 	}
   2670 
   2671 	DELAY(20);
   2672 
   2673 	return (RGE_READ_4(sc, RGE_CSIDR));
   2674 }
   2675 
   2676 void
   2677 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   2678 {
   2679 	uint32_t tmp;
   2680 
   2681 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   2682 	tmp += val;
   2683 	tmp |= RGE_MACOCP_BUSY;
   2684 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
   2685 }
   2686 
   2687 uint16_t
   2688 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
   2689 {
   2690 	uint32_t val;
   2691 
   2692 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   2693 	RGE_WRITE_4(sc, RGE_MACOCP, val);
   2694 
   2695 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
   2696 }
   2697 
   2698 void
   2699 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
   2700 {
   2701 	uint32_t tmp;
   2702 	int i;
   2703 
   2704 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
   2705 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
   2706 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
   2707 
   2708 	for (i = 0; i < 10; i++) {
   2709 		DELAY(100);
   2710 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
   2711 			break;
   2712 	}
   2713 
   2714 	DELAY(20);
   2715 }
   2716 
   2717 uint16_t
   2718 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
   2719 {
   2720 	uint32_t val;
   2721 	int i;
   2722 
   2723 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
   2724 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
   2725 
   2726 	for (i = 0; i < 10; i++) {
   2727 		DELAY(100);
   2728 		val = RGE_READ_4(sc, RGE_EPHYAR);
   2729 		if (val & RGE_EPHYAR_BUSY)
   2730 			break;
   2731 	}
   2732 
   2733 	DELAY(20);
   2734 
   2735 	return (val & RGE_EPHYAR_DATA_MASK);
   2736 }
   2737 
   2738 void
   2739 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
   2740 {
   2741 	uint16_t off, phyaddr;
   2742 
   2743 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
   2744 	phyaddr <<= 4;
   2745 
   2746 	off = addr ? reg : 0x10 + (reg % 8);
   2747 
   2748 	phyaddr += (off - 16) << 1;
   2749 
   2750 	rge_write_phy_ocp(sc, phyaddr, val);
   2751 }
   2752 
   2753 uint16_t
   2754 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
   2755 {
   2756 	uint16_t off, phyaddr;
   2757 
   2758 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
   2759 	phyaddr <<= 4;
   2760 
   2761 	off = addr ? reg : 0x10 + (reg % 8);
   2762 
   2763 	phyaddr += (off - 16) << 1;
   2764 
   2765 	return (rge_read_phy_ocp(sc, phyaddr));
   2766 }
   2767 
   2768 void
   2769 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   2770 {
   2771 	uint32_t tmp;
   2772 	int i;
   2773 
   2774 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   2775 	tmp |= RGE_PHYOCP_BUSY | val;
   2776 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
   2777 
   2778 	for (i = 0; i < RGE_TIMEOUT; i++) {
   2779 		DELAY(1);
   2780 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
   2781 			break;
   2782 	}
   2783 }
   2784 
   2785 uint16_t
   2786 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
   2787 {
   2788 	uint32_t val;
   2789 	int i;
   2790 
   2791 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   2792 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
   2793 
   2794 	for (i = 0; i < RGE_TIMEOUT; i++) {
   2795 		DELAY(1);
   2796 		val = RGE_READ_4(sc, RGE_PHYOCP);
   2797 		if (val & RGE_PHYOCP_BUSY)
   2798 			break;
   2799 	}
   2800 
   2801 	return (val & RGE_PHYOCP_DATA_MASK);
   2802 }
   2803 
   2804 int
   2805 rge_get_link_status(struct rge_softc *sc)
   2806 {
   2807 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
   2808 }
   2809 
   2810 void
   2811 rge_txstart(void *arg)
   2812 {
   2813 	struct rge_softc *sc = arg;
   2814 
   2815 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
   2816 }
   2817 
   2818 void
   2819 rge_tick(void *arg)
   2820 {
   2821 	struct rge_softc *sc = arg;
   2822 	int s;
   2823 
   2824 	s = splnet();
   2825 	rge_link_state(sc);
   2826 	splx(s);
   2827 
   2828 	callout_schedule(&sc->sc_timeout, hz);
   2829 }
   2830 
   2831 void
   2832 rge_link_state(struct rge_softc *sc)
   2833 {
   2834 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2835 	int link = LINK_STATE_DOWN;
   2836 
   2837 	if (rge_get_link_status(sc))
   2838 		link = LINK_STATE_UP;
   2839 
   2840 	if (ifp->if_link_state != link) { /* XXX not safe to access */
   2841 		if_link_state_change(ifp, link);
   2842 	}
   2843 }
   2844