Home | History | Annotate | Line # | Download | only in pci
      1 /*	$NetBSD: if_rge.c,v 1.35 2025/10/21 04:24:00 pgoyette Exp $	*/
      2 /*	$OpenBSD: if_rge.c,v 1.9 2020/12/12 11:48:53 jan Exp $	*/
      3 
      4 /*
      5  * Copyright (c) 2019, 2020 Kevin Lo <kevlo (at) openbsd.org>
      6  *
      7  * Permission to use, copy, modify, and distribute this software for any
      8  * purpose with or without fee is hereby granted, provided that the above
      9  * copyright notice and this permission notice appear in all copies.
     10  *
     11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     18  */
     19 
     20 #include <sys/cdefs.h>
     21 __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.35 2025/10/21 04:24:00 pgoyette Exp $");
     22 
     23 #if defined(_KERNEL_OPT)
     24 #include "opt_net_mpsafe.h"
     25 #endif
     26 
     27 #include <sys/types.h>
     28 
     29 #include <sys/param.h>
     30 #include <sys/systm.h>
     31 #include <sys/sockio.h>
     32 #include <sys/mbuf.h>
     33 #include <sys/kernel.h>
     34 #include <sys/socket.h>
     35 #include <sys/device.h>
     36 #include <sys/endian.h>
     37 #include <sys/callout.h>
     38 #include <sys/workqueue.h>
     39 #include <sys/module.h>
     40 
     41 #include <net/if.h>
     42 
     43 #include <net/if_dl.h>
     44 #include <net/if_ether.h>
     45 
     46 #include <net/if_media.h>
     47 
     48 #include <netinet/in.h>
     49 #include <net/if_ether.h>
     50 
     51 #include <net/bpf.h>
     52 
     53 #include <sys/bus.h>
     54 #include <machine/intr.h>
     55 
     56 #include <dev/mii/mii.h>
     57 
     58 #include <dev/pci/pcivar.h>
     59 #include <dev/pci/pcireg.h>
     60 #include <dev/pci/pcidevs.h>
     61 
     62 #include <dev/pci/if_rgereg.h>
     63 
     64 #ifdef __NetBSD__
     65 #define letoh32 	htole32
     66 #define nitems(x) 	__arraycount(x)
     67 
     68 static struct mbuf *
     69 MCLGETL(struct rge_softc *sc __unused, int how,
     70     u_int size)
     71 {
     72 	struct mbuf *m;
     73 
     74 	MGETHDR(m, how, MT_DATA);
     75 	if (m == NULL)
     76 		return NULL;
     77 
     78 	MEXTMALLOC(m, size, how);
     79 	if ((m->m_flags & M_EXT) == 0) {
     80 		m_freem(m);
     81 		return NULL;
     82 	}
     83 	return m;
     84 }
     85 
     86 #ifdef NET_MPSAFE
     87 #define 	RGE_MPSAFE	1
     88 #define 	CALLOUT_FLAGS	CALLOUT_MPSAFE
     89 #else
     90 #define 	CALLOUT_FLAGS	0
     91 #endif
     92 #endif
     93 
     94 #ifdef RGE_DEBUG
     95 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
     96 int rge_debug = 0;
     97 #else
     98 #define DPRINTF(x)
     99 #endif
    100 
    101 static int		rge_match(device_t, cfdata_t, void *);
    102 static void		rge_attach(device_t, device_t, void *);
    103 int		rge_intr(void *);
    104 int		rge_encap(struct rge_softc *, struct mbuf *, int);
    105 int		rge_ioctl(struct ifnet *, u_long, void *);
    106 void		rge_start(struct ifnet *);
    107 void		rge_watchdog(struct ifnet *);
    108 int		rge_init(struct ifnet *);
    109 void		rge_stop(struct ifnet *, int);
    110 int		rge_ifmedia_upd(struct ifnet *);
    111 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
    112 int		rge_allocmem(struct rge_softc *);
    113 int		rge_newbuf(struct rge_softc *, int);
    114 static int	rge_rx_list_init(struct rge_softc *);
    115 static void	rge_rx_list_fini(struct rge_softc *);
    116 static void	rge_tx_list_init(struct rge_softc *);
    117 static void	rge_tx_list_fini(struct rge_softc *);
    118 int		rge_rxeof(struct rge_softc *);
    119 int		rge_txeof(struct rge_softc *);
    120 void		rge_reset(struct rge_softc *);
    121 void		rge_iff(struct rge_softc *);
    122 void		rge_set_phy_power(struct rge_softc *, int);
    123 void		rge_phy_config(struct rge_softc *);
    124 void		rge_phy_config_mac_cfg2_8126(struct rge_softc *);
    125 void		rge_phy_config_mac_cfg2(struct rge_softc *);
    126 void		rge_phy_config_mac_cfg3(struct rge_softc *);
    127 void		rge_phy_config_mac_cfg4(struct rge_softc *);
    128 void		rge_phy_config_mac_cfg5(struct rge_softc *);
    129 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
    130 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
    131 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
    132 void		rge_hw_init(struct rge_softc *);
    133 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
    134 void		rge_patch_phy_mcu(struct rge_softc *, int);
    135 void		rge_add_media_types(struct rge_softc *);
    136 void		rge_config_imtype(struct rge_softc *, int);
    137 void		rge_disable_hw_im(struct rge_softc *);
    138 void		rge_disable_sim_im(struct rge_softc *);
    139 void		rge_setup_sim_im(struct rge_softc *);
    140 void		rge_setup_intr(struct rge_softc *, int);
    141 void		rge_exit_oob(struct rge_softc *);
    142 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
    143 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
    144 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
    145 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
    146 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
    147 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
    148 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
    149 uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
    150 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
    151 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
    152 int		rge_get_link_status(struct rge_softc *);
    153 void		rge_txstart(void *);
    154 void		rge_tick(void *);
    155 void		rge_link_state(struct rge_softc *);
    156 
    157 static const struct {
    158 	uint16_t reg;
    159 	uint16_t val;
    160 }  rtl8125_mac_cfg2_mcu[] = {
    161 	RTL8125_MAC_CFG2_MCU
    162 }, rtl8125_mac_cfg3_mcu[] = {
    163 	RTL8125_MAC_CFG3_MCU
    164 }, rtl8125_mac_cfg4_mcu[] = {
    165 	RTL8125_MAC_CFG4_MCU
    166 }, rtl8125_mac_cfg5_mcu[] = {
    167 	RTL8125_MAC_CFG5_MCU
    168 }, rtl8126_mac_cfg2_mcu[] = {
    169 	RTL8126_MAC_CFG2_MCU
    170 };
    171 
    172 CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
    173 		NULL, NULL); /* Sevan - detach function? */
    174 
    175 static const struct device_compatible_entry compat_data[] = {
    176 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000) },
    177 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125) },
    178 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8126) },
    179 
    180 	PCI_COMPAT_EOL
    181 };
    182 
    183 static int
    184 rge_match(device_t parent, cfdata_t match, void *aux)
    185 {
    186 	struct pci_attach_args *pa =aux;
    187 
    188 	return pci_compatible_match(pa, compat_data);
    189 }
    190 
    191 void
    192 rge_attach(device_t parent, device_t self, void *aux)
    193 {
    194 	struct rge_softc *sc = device_private(self);
    195 	struct pci_attach_args *pa = aux;
    196 	pci_chipset_tag_t pc = pa->pa_pc;
    197 	pci_intr_handle_t *ihp;
    198 	char intrbuf[PCI_INTRSTR_LEN];
    199 	const char *intrstr = NULL;
    200 	struct ifnet *ifp;
    201 	pcireg_t reg;
    202 	uint32_t hwrev;
    203 	uint8_t eaddr[ETHER_ADDR_LEN];
    204 	int offset;
    205 	pcireg_t command;
    206 	const char *revstr;
    207 
    208 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
    209 
    210 	sc->sc_dev = self;
    211 
    212 	pci_aprint_devinfo(pa, "Ethernet controller");
    213 
    214 	/*
    215 	 * Map control/status registers.
    216 	 */
    217 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
    218 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
    219 	    NULL, &sc->rge_bsize)) {
    220 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
    221 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
    222 		    &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
    223 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
    224 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
    225 			    &sc->rge_bsize)) {
    226 				aprint_error(": can't map mem or i/o space\n");
    227 				return;
    228 			}
    229 		}
    230 	}
    231 
    232 	int counts[PCI_INTR_TYPE_SIZE] = {
    233  		[PCI_INTR_TYPE_INTX] = 1,
    234  		[PCI_INTR_TYPE_MSI] = 1,
    235  		[PCI_INTR_TYPE_MSIX] = 1,
    236  	};
    237 	int max_type = PCI_INTR_TYPE_MSIX;
    238 	/*
    239 	 * Allocate interrupt.
    240 	 */
    241 	if (pci_intr_alloc(pa, &ihp, counts, max_type) != 0) {
    242 		aprint_error(": couldn't map interrupt\n");
    243 		return;
    244 	}
    245 	switch (pci_intr_type(pc, ihp[0])) {
    246 	case PCI_INTR_TYPE_MSIX:
    247 	case PCI_INTR_TYPE_MSI:
    248 		sc->rge_flags |= RGE_FLAG_MSI;
    249 		break;
    250 	default:
    251 		break;
    252 	}
    253 	intrstr = pci_intr_string(pc, ihp[0], intrbuf, sizeof(intrbuf));
    254 	sc->sc_ih = pci_intr_establish_xname(pc, ihp[0], IPL_NET, rge_intr,
    255 	    sc, device_xname(sc->sc_dev));
    256 	if (sc->sc_ih == NULL) {
    257 		aprint_error_dev(sc->sc_dev, ": couldn't establish interrupt");
    258 		if (intrstr != NULL)
    259 			aprint_error(" at %s\n", intrstr);
    260 		aprint_error("\n");
    261 		return;
    262 	}
    263 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
    264 
    265 	if (pci_dma64_available(pa))
    266 		sc->sc_dmat = pa->pa_dmat64;
    267 	else
    268 		sc->sc_dmat = pa->pa_dmat;
    269 
    270 	sc->sc_pc = pa->pa_pc;
    271 	sc->sc_tag = pa->pa_tag;
    272 
    273 	/* Determine hardware revision */
    274 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
    275 	switch (hwrev) {
    276 	case 0x60800000:
    277 		sc->rge_type = MAC_CFG2;
    278 		revstr = "Z1";
    279 		break;
    280 	case 0x60900000:
    281 		sc->rge_type = MAC_CFG3;
    282 		revstr = "Z2";
    283 		break;
    284 	case 0x64000000:
    285 		sc->rge_type = MAC_CFG4;
    286 		revstr = "A";
    287 		break;
    288 	case 0x64100000:
    289 		sc->rge_type = MAC_CFG5;
    290 		revstr = "B";
    291 		break;
    292 	case 0x64900000:
    293 		sc->rge_type = MAC_CFG2_8126;
    294 		revstr = "A";
    295 		break;
    296 	default:
    297 		aprint_error(": unknown version 0x%08x\n", hwrev);
    298 		return;
    299 	}
    300 
    301 	aprint_normal_dev(sc->sc_dev, "HW rev. %s\n", revstr);
    302 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
    303 
    304 	/*
    305 	 * PCI Express check.
    306 	 */
    307 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
    308 	    &offset, NULL)) {
    309 		/* Disable PCIe ASPM and ECPM. */
    310 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
    311 		    offset + PCIE_LCSR);
    312 		reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 |
    313 		    PCIE_LCSR_ENCLKPM);
    314 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
    315 		    reg);
    316 	}
    317 
    318 	rge_exit_oob(sc);
    319 	rge_hw_init(sc);
    320 
    321 	rge_get_macaddr(sc, eaddr);
    322 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
    323 	    ether_sprintf(eaddr));
    324 
    325 	memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
    326 
    327 	rge_set_phy_power(sc, 1);
    328 	rge_phy_config(sc);
    329 
    330 	if (rge_allocmem(sc))
    331 		return;
    332 
    333 	ifp = &sc->sc_ec.ec_if;
    334 	ifp->if_softc = sc;
    335 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    336 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    337 #ifdef RGE_MPSAFE
    338 	ifp->if_extflags = IFEF_MPSAFE;
    339 #endif
    340 	ifp->if_ioctl = rge_ioctl;
    341 	ifp->if_stop = rge_stop;
    342 	ifp->if_start = rge_start;
    343 	ifp->if_init = rge_init;
    344 	ifp->if_watchdog = rge_watchdog;
    345 	IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
    346 
    347 #if notyet
    348 	ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx |
    349 	    IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
    350 	    IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
    351 #endif
    352 
    353 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
    354 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
    355 
    356 	callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
    357 	callout_setfunc(&sc->sc_timeout, rge_tick, sc);
    358 
    359 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
    360 	command |= PCI_COMMAND_MASTER_ENABLE;
    361 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
    362 
    363 	/* Initialize ifmedia structures. */
    364 	sc->sc_ec.ec_ifmedia = &sc->sc_media;
    365 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
    366 	    rge_ifmedia_sts);
    367 	rge_add_media_types(sc);
    368 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
    369 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
    370 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
    371 
    372 	if_attach(ifp);
    373 	if_deferred_start_init(ifp, NULL);
    374 	ether_ifattach(ifp, eaddr);
    375 
    376 	if (pmf_device_register(self, NULL, NULL))
    377 		pmf_class_network_register(self, ifp);
    378 	else
    379 		aprint_error_dev(self, "couldn't establish power handler\n");
    380 }
    381 
    382 int
    383 rge_intr(void *arg)
    384 {
    385 	struct rge_softc *sc = arg;
    386 	struct ifnet *ifp = &sc->sc_ec.ec_if;
    387 	uint32_t status;
    388 	int claimed = 0, rx, tx;
    389 
    390 	if (!(ifp->if_flags & IFF_RUNNING))
    391 		return (0);
    392 
    393 	/* Disable interrupts. */
    394 	RGE_WRITE_4(sc, RGE_IMR, 0);
    395 
    396 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
    397 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
    398 			return (0);
    399 	}
    400 
    401 	status = RGE_READ_4(sc, RGE_ISR);
    402 	if (status)
    403 		RGE_WRITE_4(sc, RGE_ISR, status);
    404 
    405 	if (status & RGE_ISR_PCS_TIMEOUT)
    406 		claimed = 1;
    407 
    408 	rx = tx = 0;
    409 	if (status & sc->rge_intrs) {
    410 		if (status &
    411 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
    412 			rx |= rge_rxeof(sc);
    413 			claimed = 1;
    414 		}
    415 
    416 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
    417 			tx |= rge_txeof(sc);
    418 			claimed = 1;
    419 		}
    420 
    421 		if (status & RGE_ISR_SYSTEM_ERR) {
    422 			KERNEL_LOCK(1, NULL);
    423 			rge_init(ifp);
    424 			KERNEL_UNLOCK_ONE(NULL);
    425 			claimed = 1;
    426 		}
    427 	}
    428 
    429 	if (sc->rge_timerintr) {
    430 		if ((tx | rx) == 0) {
    431 			/*
    432 			 * Nothing needs to be processed, fallback
    433 			 * to use TX/RX interrupts.
    434 			 */
    435 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
    436 
    437 			/*
    438 			 * Recollect, mainly to avoid the possible
    439 			 * race introduced by changing interrupt
    440 			 * masks.
    441 			 */
    442 			rge_rxeof(sc);
    443 			rge_txeof(sc);
    444 		} else
    445 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
    446 	} else if (tx | rx) {
    447 		/*
    448 		 * Assume that using simulated interrupt moderation
    449 		 * (hardware timer based) could reduce the interrupt
    450 		 * rate.
    451 		 */
    452 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
    453 	}
    454 
    455 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
    456 
    457 	return (claimed);
    458 }
    459 
    460 int
    461 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
    462 {
    463 	struct rge_tx_desc *d = NULL;
    464 	struct rge_txq *txq;
    465 	bus_dmamap_t txmap;
    466 	uint32_t cmdsts, cflags = 0;
    467 	int cur, error, i, last, nsegs;
    468 
    469 #if notyet
    470 	/*
    471 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
    472 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
    473 	 * take affect.
    474 	 */
    475 	if ((m->m_pkthdr.csum_flags &
    476 	    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
    477 		cflags |= RGE_TDEXTSTS_IPCSUM;
    478 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
    479 			cflags |= RGE_TDEXTSTS_TCPCSUM;
    480 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
    481 			cflags |= RGE_TDEXTSTS_UDPCSUM;
    482 	}
    483 #endif
    484 
    485 	txq = &sc->rge_ldata.rge_txq[idx];
    486 	txmap = txq->txq_dmamap;
    487 
    488 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
    489 	switch (error) {
    490 	case 0:
    491 		break;
    492 	case EFBIG: /* mbuf chain is too fragmented */
    493 		if (m_defrag(m, M_DONTWAIT) == 0 &&
    494 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
    495 		    BUS_DMA_NOWAIT) == 0)
    496 			break;
    497 
    498 		/* FALLTHROUGH */
    499 	default:
    500 		return (0);
    501 	}
    502 
    503 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
    504 	    BUS_DMASYNC_PREWRITE);
    505 
    506 	nsegs = txmap->dm_nsegs;
    507 
    508 	/* Set up hardware VLAN tagging. */
    509 	if (vlan_has_tag(m))
    510 		cflags |= bswap16(vlan_get_tag(m)) | RGE_TDEXTSTS_VTAG;
    511 
    512 	last = cur = idx;
    513 	cmdsts = RGE_TDCMDSTS_SOF;
    514 
    515 	for (i = 0; i < txmap->dm_nsegs; i++) {
    516 		d = &sc->rge_ldata.rge_tx_list[cur];
    517 
    518 		d->rge_extsts = htole32(cflags);
    519 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
    520 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
    521 
    522 		cmdsts |= txmap->dm_segs[i].ds_len;
    523 
    524 		if (cur == RGE_TX_LIST_CNT - 1)
    525 			cmdsts |= RGE_TDCMDSTS_EOR;
    526 
    527 		d->rge_cmdsts = htole32(cmdsts);
    528 
    529 		last = cur;
    530 		cmdsts = RGE_TDCMDSTS_OWN;
    531 		cur = RGE_NEXT_TX_DESC(cur);
    532 	}
    533 
    534 	/* Set EOF on the last descriptor. */
    535 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
    536 
    537 	/* Transfer ownership of packet to the chip. */
    538 	d = &sc->rge_ldata.rge_tx_list[idx];
    539 
    540 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
    541 
    542 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
    543 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
    544 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    545 
    546 	/* Update info of TX queue and descriptors. */
    547 	txq->txq_mbuf = m;
    548 	txq->txq_descidx = last;
    549 
    550 	return (nsegs);
    551 }
    552 
    553 int
    554 rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    555 {
    556 	struct rge_softc *sc = ifp->if_softc;
    557 	//struct ifreq *ifr = (struct ifreq *)data;
    558 	int s, error = 0;
    559 
    560 	s = splnet();
    561 
    562 	switch (cmd) {
    563 	case SIOCSIFFLAGS:
    564 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
    565 			break;
    566 		/* XXX set an ifflags callback and let ether_ioctl
    567 		 * handle all of this.
    568 		 */
    569 		if (ifp->if_flags & IFF_UP) {
    570 			if (ifp->if_flags & IFF_RUNNING)
    571 				error = ENETRESET;
    572 			else
    573 				rge_init(ifp);
    574 		} else {
    575 			if (ifp->if_flags & IFF_RUNNING)
    576 				rge_stop(ifp, 1);
    577 		}
    578 		break;
    579 	default:
    580 		error = ether_ioctl(ifp, cmd, data);
    581 	}
    582 
    583 	if (error == ENETRESET) {
    584 		if (ifp->if_flags & IFF_RUNNING)
    585 			rge_iff(sc);
    586 		error = 0;
    587 	}
    588 
    589 	splx(s);
    590 	return (error);
    591 }
    592 
    593 void
    594 rge_start(struct ifnet *ifp)
    595 {
    596 	struct rge_softc *sc = ifp->if_softc;
    597 	struct mbuf *m;
    598 	int free, idx, used;
    599 	int queued = 0;
    600 
    601 #define LINK_STATE_IS_UP(_s)    \
    602 	((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
    603 
    604 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
    605 		IFQ_PURGE(&ifp->if_snd);
    606 		return;
    607 	}
    608 
    609 	/* Calculate free space. */
    610 	idx = sc->rge_ldata.rge_txq_prodidx;
    611 	free = sc->rge_ldata.rge_txq_considx;
    612 	if (free <= idx)
    613 		free += RGE_TX_LIST_CNT;
    614 	free -= idx;
    615 
    616 	for (;;) {
    617 		if (RGE_TX_NSEGS >= free + 2) {
    618 			SET(ifp->if_flags, IFF_OACTIVE);
    619 			break;
    620 		}
    621 
    622 		IFQ_DEQUEUE(&ifp->if_snd, m);
    623 		if (m == NULL)
    624 			break;
    625 
    626 		used = rge_encap(sc, m, idx);
    627 		if (used == 0) {
    628 			m_freem(m);
    629 			continue;
    630 		}
    631 
    632 		KASSERT(used <= free);
    633 		free -= used;
    634 
    635 		bpf_mtap(ifp, m, BPF_D_OUT);
    636 
    637 		idx += used;
    638 		if (idx >= RGE_TX_LIST_CNT)
    639 			idx -= RGE_TX_LIST_CNT;
    640 
    641 		queued++;
    642 	}
    643 
    644 	if (queued == 0)
    645 		return;
    646 
    647 	/* Set a timeout in case the chip goes out to lunch. */
    648 	ifp->if_timer = 5;
    649 
    650 	sc->rge_ldata.rge_txq_prodidx = idx;
    651 	rge_txstart(sc);
    652 }
    653 
    654 void
    655 rge_watchdog(struct ifnet *ifp)
    656 {
    657 	struct rge_softc *sc = ifp->if_softc;
    658 
    659 	device_printf(sc->sc_dev, "watchdog timeout\n");
    660 	if_statinc(ifp, if_oerrors);
    661 
    662 	rge_init(ifp);
    663 }
    664 
    665 int
    666 rge_init(struct ifnet *ifp)
    667 {
    668 	struct rge_softc *sc = ifp->if_softc;
    669 	uint32_t val;
    670 	unsigned i;
    671 
    672 	rge_stop(ifp, 0);
    673 
    674 	/* Set MAC address. */
    675 	rge_set_macaddr(sc, CLLADDR(ifp->if_sadl));
    676 
    677 	/* Set Maximum frame size. */
    678 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
    679 
    680 	/* Initialize RX descriptors list. */
    681 	int error = rge_rx_list_init(sc);
    682 	if (error != 0) {
    683 		device_printf(sc->sc_dev,
    684 		    "init failed: no memory for RX buffers\n");
    685 		rge_stop(ifp, 1);
    686 		return error;
    687 	}
    688 
    689 	/* Initialize TX descriptors. */
    690 	rge_tx_list_init(sc);
    691 
    692 	/* Load the addresses of the RX and TX lists into the chip. */
    693 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
    694 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    695 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
    696 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    697 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
    698 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    699 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
    700 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    701 
    702 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    703 
    704 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
    705 	if (sc->rge_type == MAC_CFG2_8126)
    706 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, 0x08);
    707 	else
    708 		RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    709 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    710 	if (sc->rge_type != MAC_CFG2_8126)
    711 		RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
    712 
    713 	/* Clear interrupt moderation timer. */
    714 	for (i = 0; i < 64; i++)
    715 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
    716 
    717 	/* Set the initial RX and TX configurations. */
    718 	if (sc->rge_type == MAC_CFG2_8126)
    719 		RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG_8126);
    720 	else
    721 		RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
    722 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
    723 
    724 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
    725 	rge_write_csi(sc, 0x70c, val | 0x27000000);
    726 
    727 	/* Enable hardware optimization function. */
    728 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
    729 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
    730 
    731 	if (sc->rge_type == MAC_CFG2_8126) {
    732 		/* Disable L1 timeout. */
    733 		val = rge_read_csi(sc, 0x890) & ~0x00000001;
    734 		rge_write_csi(sc, 0x890, val);
    735 	} else
    736 		RGE_WRITE_2(sc, 0x0382, 0x221b);
    737 	RGE_WRITE_1(sc, 0x4500, 0);
    738 	RGE_WRITE_2(sc, 0x4800, 0);
    739 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
    740 
    741 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
    742 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
    743 
    744 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
    745 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
    746 
    747 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
    748 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
    749 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
    750 
    751 	if (sc->rge_type == MAC_CFG2_8126)
    752 		RGE_CLRBIT_1(sc, 0xd8, 0x02);
    753 
    754 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
    755 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3 ||
    756 	    sc->rge_type == MAC_CFG2_8126)
    757 		rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
    758 	else
    759 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
    760 
    761 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
    762 
    763 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3 ||
    764 	    sc->rge_type == MAC_CFG2_8126) {
    765 		val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
    766 		rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
    767 	} else
    768 		RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
    769 
    770 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
    771 
    772 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
    773 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
    774 
    775 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
    776 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
    777 
    778 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
    779 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
    780 
    781 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
    782 
    783 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
    784 
    785 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
    786 
    787 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
    788 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
    789 
    790 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
    791 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
    792 
    793 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
    794 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
    795 
    796 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
    797 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
    798 
    799 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
    800 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
    801 
    802 	val = rge_read_mac_ocp(sc, 0xe84c) & ~0x0040;
    803 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
    804 		rge_write_mac_ocp(sc, 0xe84c, 0x00c0);
    805 	else
    806 		rge_write_mac_ocp(sc, 0xe84c, 0x0080);
    807 
    808 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN);
    809 
    810 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
    811 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
    812 
    813 	/* Disable EEE plus. */
    814 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
    815 
    816 	if (sc->rge_type == MAC_CFG2_8126)
    817 		RGE_MAC_CLRBIT(sc, 0xea1c, 0x0304);
    818 	else
    819 		RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
    820 
    821 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
    822 	DELAY(1);
    823 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
    824 
    825 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
    826 
    827 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
    828 
    829 	if ((sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0)
    830 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
    831 	else
    832 		RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
    833 
    834 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
    835 
    836 	for (i = 0; i < 10; i++) {
    837 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
    838 			break;
    839 		DELAY(1000);
    840 	}
    841 
    842 	/* Disable RXDV gate. */
    843 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
    844 	DELAY(2000);
    845 
    846 	rge_ifmedia_upd(ifp);
    847 
    848 	/* Enable transmit and receive. */
    849 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
    850 
    851 	/* Program promiscuous mode and multicast filters. */
    852 	rge_iff(sc);
    853 
    854 	if (sc->rge_type == MAC_CFG2_8126)
    855 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, 0x08);
    856 	else
    857 		RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    858 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    859 
    860 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    861 
    862 	/* Enable interrupts. */
    863 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
    864 
    865 	ifp->if_flags |= IFF_RUNNING;
    866 	CLR(ifp->if_flags, IFF_OACTIVE);
    867 
    868 	callout_schedule(&sc->sc_timeout, 1);
    869 
    870 	return (0);
    871 }
    872 
    873 /*
    874  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
    875  */
    876 void
    877 rge_stop(struct ifnet *ifp, int disable)
    878 {
    879 	struct rge_softc *sc = ifp->if_softc;
    880 
    881 	callout_halt(&sc->sc_timeout, NULL);
    882 
    883 	ifp->if_timer = 0;
    884 	ifp->if_flags &= ~IFF_RUNNING;
    885 	sc->rge_timerintr = 0;
    886 
    887 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
    888 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
    889 	    RGE_RXCFG_ERRPKT);
    890 
    891 	RGE_WRITE_4(sc, RGE_IMR, 0);
    892 
    893 	/* Config interrupt type for RTL8126. */
    894 	if (sc->rge_type == MAC_CFG2_8126)
    895 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, RGE_INT_CFG0_EN);
    896 
    897 	/* Clear timer interrupts. */
    898 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
    899 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
    900 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
    901 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
    902 
    903 	rge_reset(sc);
    904 
    905 //	intr_barrier(sc->sc_ih);
    906 //	ifq_barrier(&ifp->if_snd);
    907 /*	ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
    908 
    909 	if (sc->rge_head != NULL) {
    910 		m_freem(sc->rge_head);
    911 		sc->rge_head = sc->rge_tail = NULL;
    912 	}
    913 
    914 	rge_tx_list_fini(sc);
    915 	rge_rx_list_fini(sc);
    916 }
    917 
    918 /*
    919  * Set media options.
    920  */
    921 int
    922 rge_ifmedia_upd(struct ifnet *ifp)
    923 {
    924 	struct rge_softc *sc = ifp->if_softc;
    925 	struct ifmedia *ifm = &sc->sc_media;
    926 	int anar, gig, val;
    927 
    928 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    929 		return (EINVAL);
    930 
    931 	/* Disable Gigabit Lite. */
    932 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
    933 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
    934 	if (sc->rge_type == MAC_CFG2_8126)
    935 		RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0002);
    936 
    937 	val = rge_read_phy_ocp(sc, 0xa5d4);
    938 	val &= ~RGE_ADV_2500TFDX;
    939 	if (sc->rge_type == MAC_CFG2_8126)
    940 		val &= ~RGE_ADV_5000TFDX;
    941 
    942 	anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    943 	gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    944 
    945 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
    946 	case IFM_AUTO:
    947 		val |= (sc->rge_type != MAC_CFG2_8126) ?
    948 		    RGE_ADV_2500TFDX : (RGE_ADV_2500TFDX | RGE_ADV_5000TFDX);
    949 		break;
    950 	case IFM_5000_T:
    951 		val |= RGE_ADV_5000TFDX;
    952 		ifp->if_baudrate = IF_Gbps(5);
    953 		break;
    954 	case IFM_2500_T:
    955 		val |= RGE_ADV_2500TFDX;
    956 		ifp->if_baudrate = IF_Mbps(2500);
    957 		break;
    958 	case IFM_1000_T:
    959 		ifp->if_baudrate = IF_Gbps(1);
    960 		break;
    961 	case IFM_100_TX:
    962 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
    963 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
    964 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
    965 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
    966 		    ANAR_TX | ANAR_10_FD | ANAR_10;
    967 		ifp->if_baudrate = IF_Mbps(100);
    968 		break;
    969 	case IFM_10_T:
    970 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
    971 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
    972 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
    973 		    ANAR_10_FD | ANAR_10 : ANAR_10;
    974 		ifp->if_baudrate = IF_Mbps(10);
    975 		break;
    976 	default:
    977 		device_printf(sc->sc_dev,
    978 		    "unsupported media type\n");
    979 		return (EINVAL);
    980 	}
    981 
    982 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
    983 	rge_write_phy(sc, 0, MII_100T2CR, gig);
    984 	rge_write_phy_ocp(sc, 0xa5d4, val);
    985 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
    986 	    BMCR_STARTNEG);
    987 
    988 	return (0);
    989 }
    990 
    991 /*
    992  * Report current media status.
    993  */
    994 void
    995 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
    996 {
    997 	struct rge_softc *sc = ifp->if_softc;
    998 	uint16_t status = 0;
    999 
   1000 	ifmr->ifm_status = IFM_AVALID;
   1001 	ifmr->ifm_active = IFM_ETHER;
   1002 
   1003 	if (rge_get_link_status(sc)) {
   1004 		ifmr->ifm_status |= IFM_ACTIVE;
   1005 
   1006 		status = RGE_READ_2(sc, RGE_PHYSTAT);
   1007 		if ((status & RGE_PHYSTAT_FDX) ||
   1008 		    (status & (RGE_PHYSTAT_2500MBPS | RGE_PHYSTAT_5000MBPS)))
   1009 			ifmr->ifm_active |= IFM_FDX;
   1010 		else
   1011 			ifmr->ifm_active |= IFM_HDX;
   1012 
   1013 		if (status & RGE_PHYSTAT_10MBPS)
   1014 			ifmr->ifm_active |= IFM_10_T;
   1015 		else if (status & RGE_PHYSTAT_100MBPS)
   1016 			ifmr->ifm_active |= IFM_100_TX;
   1017 		else if (status & RGE_PHYSTAT_1000MBPS)
   1018 			ifmr->ifm_active |= IFM_1000_T;
   1019 		else if (status & RGE_PHYSTAT_2500MBPS)
   1020 			ifmr->ifm_active |= IFM_2500_T;
   1021 		else if (status & RGE_PHYSTAT_5000MBPS)
   1022 			ifmr->ifm_active |= IFM_5000_T;
   1023 	}
   1024 }
   1025 
   1026 /*
   1027  * Allocate memory for RX/TX rings.
   1028  *
   1029  * XXX There is no tear-down for this if it any part fails, so everything
   1030  * remains allocated.
   1031  */
   1032 int
   1033 rge_allocmem(struct rge_softc *sc)
   1034 {
   1035 	int error, i;
   1036 
   1037 	/* Allocate DMA'able memory for the TX ring. */
   1038 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
   1039 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
   1040 	if (error) {
   1041 		aprint_error_dev(sc->sc_dev, "can't create TX list map\n");
   1042 		return (error);
   1043 	}
   1044 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
   1045 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
   1046 	    BUS_DMA_NOWAIT);
   1047 	if (error) {
   1048 		aprint_error_dev(sc->sc_dev, "can't alloc TX list\n");
   1049 		return (error);
   1050 	}
   1051 
   1052 	/* Load the map for the TX ring. */
   1053 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1054 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
   1055 	    (void **) &sc->rge_ldata.rge_tx_list,
   1056 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
   1057 	if (error) {
   1058 		aprint_error_dev(sc->sc_dev, "can't map TX dma buffers\n");
   1059 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1060 		    sc->rge_ldata.rge_tx_listnseg);
   1061 		return (error);
   1062 	}
   1063 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
   1064 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1065 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
   1066 	if (error) {
   1067 		aprint_error_dev(sc->sc_dev, "can't load TX dma map\n");
   1068 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
   1069 		bus_dmamem_unmap(sc->sc_dmat,
   1070 		    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
   1071 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1072 		    sc->rge_ldata.rge_tx_listnseg);
   1073 		return (error);
   1074 	}
   1075 
   1076 	/* Create DMA maps for TX buffers. */
   1077 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
   1078 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
   1079 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
   1080 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
   1081 		if (error) {
   1082 			aprint_error_dev(sc->sc_dev, "can't create DMA map for TX\n");
   1083 			return (error);
   1084 		}
   1085 	}
   1086 
   1087 	/* Allocate DMA'able memory for the RX ring. */
   1088 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
   1089 	    RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
   1090 	if (error) {
   1091 		aprint_error_dev(sc->sc_dev, "can't create RX list map\n");
   1092 		return (error);
   1093 	}
   1094 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
   1095 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
   1096 	    BUS_DMA_NOWAIT);
   1097 	if (error) {
   1098 		aprint_error_dev(sc->sc_dev, "can't alloc RX list\n");
   1099 		return (error);
   1100 	}
   1101 
   1102 	/* Load the map for the RX ring. */
   1103 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1104 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
   1105 	    (void **) &sc->rge_ldata.rge_rx_list,
   1106 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
   1107 	if (error) {
   1108 		aprint_error_dev(sc->sc_dev, "can't map RX dma buffers\n");
   1109 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1110 		    sc->rge_ldata.rge_rx_listnseg);
   1111 		return (error);
   1112 	}
   1113 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
   1114 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1115 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
   1116 	if (error) {
   1117 		aprint_error_dev(sc->sc_dev, "can't load RX dma map\n");
   1118 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
   1119 		bus_dmamem_unmap(sc->sc_dmat,
   1120 		    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
   1121 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1122 		    sc->rge_ldata.rge_rx_listnseg);
   1123 		return (error);
   1124 	}
   1125 
   1126 	/*
   1127 	 * Create DMA maps for RX buffers.  Use BUS_DMA_ALLOCNOW to avoid any
   1128 	 * potential failure in bus_dmamap_load_mbuf() in the RX path.
   1129 	 */
   1130 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1131 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
   1132 		    RGE_JUMBO_FRAMELEN, 0, BUS_DMA_ALLOCNOW,
   1133 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
   1134 		if (error) {
   1135 			aprint_error_dev(sc->sc_dev, "can't create DMA map for RX\n");
   1136 			return (error);
   1137 		}
   1138 	}
   1139 
   1140 	return (error);
   1141 }
   1142 
   1143 /*
   1144  * Set an RX descriptor and sync it.
   1145  */
   1146 static void
   1147 rge_load_rxbuf(struct rge_softc *sc, int idx)
   1148 {
   1149 	struct rge_rx_desc *r = &sc->rge_ldata.rge_rx_list[idx];
   1150 	struct rge_rxq *rxq = &sc->rge_ldata.rge_rxq[idx];
   1151 	bus_dmamap_t rxmap = rxq->rxq_dmamap;
   1152 	uint32_t cmdsts;
   1153 
   1154 	cmdsts = rxmap->dm_segs[0].ds_len | RGE_RDCMDSTS_OWN;
   1155 	if (idx == RGE_RX_LIST_CNT - 1)
   1156 		cmdsts |= RGE_RDCMDSTS_EOR;
   1157 
   1158 	r->hi_qword0.rge_addr = htole64(rxmap->dm_segs[0].ds_addr);
   1159 	r->hi_qword1.rx_qword4.rge_extsts = 0;
   1160 	r->hi_qword1.rx_qword4.rge_cmdsts = htole32(cmdsts);
   1161 
   1162 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1163 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1164 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1165 }
   1166 
   1167 /*
   1168  * Initialize the RX descriptor and attach an mbuf cluster.
   1169  */
   1170 int
   1171 rge_newbuf(struct rge_softc *sc, int idx)
   1172 {
   1173 	struct mbuf *m;
   1174 	struct rge_rxq *rxq;
   1175 	bus_dmamap_t rxmap;
   1176 	int error __diagused;
   1177 
   1178 	m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
   1179 	if (m == NULL)
   1180 		return (ENOBUFS);
   1181 	MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
   1182 
   1183 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
   1184 
   1185 	rxq = &sc->rge_ldata.rge_rxq[idx];
   1186 	rxmap = rxq->rxq_dmamap;
   1187 
   1188 	if (rxq->rxq_mbuf != NULL)
   1189 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
   1190 
   1191 	/* This map was created with BUS_DMA_ALLOCNOW so should never fail. */
   1192 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT);
   1193 	KASSERTMSG(error == 0, "error=%d", error);
   1194 
   1195 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
   1196 	    BUS_DMASYNC_PREREAD);
   1197 
   1198 	/* Map the segments into RX descriptors. */
   1199 
   1200 	rxq->rxq_mbuf = m;
   1201 	rge_load_rxbuf(sc, idx);
   1202 
   1203 	return 0;
   1204 }
   1205 
   1206 static int
   1207 rge_rx_list_init(struct rge_softc *sc)
   1208 {
   1209 	unsigned i;
   1210 
   1211 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
   1212 
   1213 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1214 		sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
   1215 		if (rge_newbuf(sc, i) != 0) {
   1216 			rge_rx_list_fini(sc);
   1217 			return (ENOBUFS);
   1218 		}
   1219 	}
   1220 
   1221 	sc->rge_ldata.rge_rxq_prodidx = sc->rge_ldata.rge_rxq_considx = 0;
   1222 	sc->rge_head = sc->rge_tail = NULL;
   1223 
   1224 	return (0);
   1225 }
   1226 
   1227 static void
   1228 rge_rx_list_fini(struct rge_softc *sc)
   1229 {
   1230 	unsigned i;
   1231 
   1232 	/* Free the RX list buffers. */
   1233 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1234 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
   1235 			bus_dmamap_unload(sc->sc_dmat,
   1236 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
   1237 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
   1238 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
   1239 		}
   1240 	}
   1241 }
   1242 
   1243 static void
   1244 rge_tx_list_init(struct rge_softc *sc)
   1245 {
   1246 	unsigned i;
   1247 
   1248 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
   1249 
   1250 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
   1251 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
   1252 
   1253 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
   1254 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
   1255 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1256 
   1257 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
   1258 }
   1259 
   1260 static void
   1261 rge_tx_list_fini(struct rge_softc *sc)
   1262 {
   1263 	unsigned i;
   1264 
   1265 	/* Free the TX list buffers. */
   1266 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
   1267 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
   1268 			bus_dmamap_unload(sc->sc_dmat,
   1269 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
   1270 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
   1271 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
   1272 		}
   1273 	}
   1274 }
   1275 
   1276 int
   1277 rge_rxeof(struct rge_softc *sc)
   1278 {
   1279 	struct mbuf *m;
   1280 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1281 	struct rge_rx_desc *cur_rx;
   1282 	struct rge_rxq *rxq;
   1283 	uint32_t rxstat, extsts;
   1284 	int i, total_len, rx = 0;
   1285 
   1286 	for (i = sc->rge_ldata.rge_rxq_considx; ; i = RGE_NEXT_RX_DESC(i)) {
   1287 		/* Invalidate the descriptor memory. */
   1288 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1289 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1290 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1291 
   1292 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
   1293 
   1294 		if (RGE_OWN(cur_rx))
   1295 			break;
   1296 
   1297 		rxstat = letoh32(cur_rx->hi_qword1.rx_qword4.rge_cmdsts);
   1298 		extsts = letoh32(cur_rx->hi_qword1.rx_qword4.rge_extsts);
   1299 
   1300 		total_len = RGE_RXBYTES(cur_rx);
   1301 		rxq = &sc->rge_ldata.rge_rxq[i];
   1302 		m = rxq->rxq_mbuf;
   1303 		rx = 1;
   1304 
   1305 		/* Invalidate the RX mbuf. */
   1306 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
   1307 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1308 
   1309 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
   1310 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
   1311 			if_statinc(ifp, if_ierrors);
   1312 			rge_load_rxbuf(sc, i);
   1313 			continue;
   1314 		}
   1315 
   1316 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
   1317 			if_statinc(ifp, if_ierrors);
   1318 			/*
   1319 			 * If this is part of a multi-fragment packet,
   1320 			 * discard all the pieces.
   1321 			 */
   1322 			if (sc->rge_head != NULL) {
   1323 				m_freem(sc->rge_head);
   1324 				sc->rge_head = sc->rge_tail = NULL;
   1325 			}
   1326 			rge_load_rxbuf(sc, i);
   1327 			continue;
   1328 		}
   1329 
   1330 		/*
   1331 		 * If allocating a replacement mbuf fails,
   1332 		 * reload the current one.
   1333 		 */
   1334 		if (rge_newbuf(sc, i) != 0) {
   1335 			if_statinc(ifp, if_iqdrops);
   1336 			if (sc->rge_head != NULL) {
   1337 				m_freem(sc->rge_head);
   1338 				sc->rge_head = sc->rge_tail = NULL;
   1339 			}
   1340 			rge_load_rxbuf(sc, i);
   1341 			continue;
   1342 		}
   1343 
   1344 		m_set_rcvif(m, ifp);
   1345 		if (sc->rge_head != NULL) {
   1346 			m->m_len = total_len;
   1347 			/*
   1348 			 * Special case: if there's 4 bytes or less
   1349 			 * in this buffer, the mbuf can be discarded:
   1350 			 * the last 4 bytes is the CRC, which we don't
   1351 			 * care about anyway.
   1352 			 */
   1353 			if (m->m_len <= ETHER_CRC_LEN) {
   1354 				sc->rge_tail->m_len -=
   1355 				    (ETHER_CRC_LEN - m->m_len);
   1356 				m_freem(m);
   1357 			} else {
   1358 				m->m_len -= ETHER_CRC_LEN;
   1359 				m->m_flags &= ~M_PKTHDR;
   1360 				sc->rge_tail->m_next = m;
   1361 			}
   1362 			m = sc->rge_head;
   1363 			sc->rge_head = sc->rge_tail = NULL;
   1364 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
   1365 		} else
   1366 	#if 0
   1367 			m->m_pkthdr.len = m->m_len =
   1368 			    (total_len - ETHER_CRC_LEN);
   1369 	#else
   1370 		{
   1371 			m->m_pkthdr.len = m->m_len = total_len;
   1372 			m->m_flags |= M_HASFCS;
   1373 		}
   1374 	#endif
   1375 
   1376 #if notyet
   1377 		/* Check IP header checksum. */
   1378 		if (!(extsts & RGE_RDEXTSTS_IPCSUMERR) &&
   1379 		    (extsts & RGE_RDEXTSTS_IPV4))
   1380 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
   1381 
   1382 		/* Check TCP/UDP checksum. */
   1383 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
   1384 		    (((extsts & RGE_RDEXTSTS_TCPPKT) &&
   1385 		    !(extsts & RGE_RDEXTSTS_TCPCSUMERR)) ||
   1386 		    ((extsts & RGE_RDEXTSTS_UDPPKT) &&
   1387 		    !(extsts & RGE_RDEXTSTS_UDPCSUMERR))))
   1388 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
   1389 			    M_UDP_CSUM_IN_OK;
   1390 #endif
   1391 
   1392 		if (extsts & RGE_RDEXTSTS_VTAG) {
   1393 			vlan_set_tag(m,
   1394 			    bswap16(extsts & RGE_RDEXTSTS_VLAN_MASK));
   1395 		}
   1396 
   1397 		if_percpuq_enqueue(ifp->if_percpuq, m);
   1398 	}
   1399 
   1400 	sc->rge_ldata.rge_rxq_considx = i;
   1401 
   1402 	return (rx);
   1403 }
   1404 
   1405 int
   1406 rge_txeof(struct rge_softc *sc)
   1407 {
   1408 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1409 	struct rge_txq *txq;
   1410 	uint32_t txstat;
   1411 	int cons, idx, prod;
   1412 	int free = 0;
   1413 
   1414 	prod = sc->rge_ldata.rge_txq_prodidx;
   1415 	cons = sc->rge_ldata.rge_txq_considx;
   1416 
   1417 	while (prod != cons) {
   1418 		txq = &sc->rge_ldata.rge_txq[cons];
   1419 		idx = txq->txq_descidx;
   1420 
   1421 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1422 		    idx * sizeof(struct rge_tx_desc),
   1423 		    sizeof(struct rge_tx_desc),
   1424 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1425 
   1426 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
   1427 
   1428 		if (txstat & RGE_TDCMDSTS_OWN) {
   1429 			free = 2;
   1430 			break;
   1431 		}
   1432 
   1433 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
   1434 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1435 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
   1436 		m_freem(txq->txq_mbuf);
   1437 		txq->txq_mbuf = NULL;
   1438 
   1439 		net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   1440 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
   1441 			if_statinc_ref(ifp, nsr, if_collisions);
   1442 		if (txstat & RGE_TDCMDSTS_TXERR)
   1443 			if_statinc_ref(ifp, nsr, if_oerrors);
   1444 		else
   1445 			if_statinc_ref(ifp, nsr, if_opackets);
   1446 		IF_STAT_PUTREF(ifp);
   1447 
   1448 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1449 		    idx * sizeof(struct rge_tx_desc),
   1450 		    sizeof(struct rge_tx_desc),
   1451 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1452 
   1453 		cons = RGE_NEXT_TX_DESC(idx);
   1454 		free = 1;
   1455 	}
   1456 
   1457 	if (free == 0)
   1458 		return (0);
   1459 
   1460 	sc->rge_ldata.rge_txq_considx = cons;
   1461 
   1462 	if (free == 2)
   1463 		rge_txstart(sc);
   1464 
   1465 	CLR(ifp->if_flags, IFF_OACTIVE);
   1466 	ifp->if_timer = 0;
   1467 	if_schedule_deferred_start(ifp);
   1468 
   1469 	return (1);
   1470 }
   1471 
   1472 void
   1473 rge_reset(struct rge_softc *sc)
   1474 {
   1475 	int i;
   1476 
   1477 	/* Enable RXDV gate. */
   1478 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
   1479 	DELAY(2000);
   1480 
   1481 	for (i = 0; i < 3000; i++) {
   1482 		DELAY(50);
   1483 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
   1484 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
   1485 		    RGE_MCUCMD_TXFIFO_EMPTY))
   1486 			break;
   1487 	}
   1488 	if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
   1489 		for (i = 0; i < 3000; i++) {
   1490 			DELAY(50);
   1491 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
   1492 				break;
   1493 		}
   1494 	}
   1495 
   1496 	DELAY(2000);
   1497 
   1498 	/* Soft reset. */
   1499 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
   1500 
   1501 	for (i = 0; i < RGE_TIMEOUT; i++) {
   1502 		DELAY(100);
   1503 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
   1504 			break;
   1505 	}
   1506 	if (i == RGE_TIMEOUT)
   1507 		device_printf(sc->sc_dev, "reset never completed!\n");
   1508 }
   1509 
   1510 void
   1511 rge_iff(struct rge_softc *sc)
   1512 {
   1513 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1514 	struct ethercom *ec = &sc->sc_ec;
   1515 	struct ether_multi *enm;
   1516 	struct ether_multistep step;
   1517 	uint32_t hashes[2];
   1518 	uint32_t rxfilt;
   1519 	int h = 0;
   1520 
   1521 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
   1522 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
   1523 	ifp->if_flags &= ~IFF_ALLMULTI;
   1524 
   1525 	/*
   1526 	 * Always accept frames destined to our station address.
   1527 	 * Always accept broadcast frames.
   1528 	 */
   1529 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
   1530 
   1531 	if (ifp->if_flags & IFF_PROMISC) {
   1532  allmulti:
   1533 		ifp->if_flags |= IFF_ALLMULTI;
   1534 		rxfilt |= RGE_RXCFG_MULTI;
   1535 		if (ifp->if_flags & IFF_PROMISC)
   1536 			rxfilt |= RGE_RXCFG_ALLPHYS;
   1537 		hashes[0] = hashes[1] = 0xffffffff;
   1538 	} else {
   1539 		rxfilt |= RGE_RXCFG_MULTI;
   1540 		/* Program new filter. */
   1541 		memset(hashes, 0, sizeof(hashes));
   1542 
   1543 		ETHER_LOCK(ec);
   1544 		ETHER_FIRST_MULTI(step, ec, enm);
   1545 		while (enm != NULL) {
   1546 			if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1547 			    ETHER_ADDR_LEN) != 0) {
   1548 			    	ETHER_UNLOCK(ec);
   1549 				goto allmulti;
   1550 			}
   1551 			h = ether_crc32_be(enm->enm_addrlo,
   1552 			    ETHER_ADDR_LEN) >> 26;
   1553 
   1554 			if (h < 32)
   1555 				hashes[0] |= (1U << h);
   1556 			else
   1557 				hashes[1] |= (1U << (h - 32));
   1558 
   1559 			ETHER_NEXT_MULTI(step, enm);
   1560 		}
   1561 		ETHER_UNLOCK(ec);
   1562 	}
   1563 
   1564 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
   1565 	RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
   1566 	RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
   1567 }
   1568 
   1569 void
   1570 rge_set_phy_power(struct rge_softc *sc, int on)
   1571 {
   1572 	int i;
   1573 
   1574 	if (on) {
   1575 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
   1576 
   1577 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
   1578 
   1579 		for (i = 0; i < RGE_TIMEOUT; i++) {
   1580 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
   1581 				break;
   1582 			DELAY(1000);
   1583 		}
   1584 	} else {
   1585 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
   1586 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
   1587 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
   1588 	}
   1589 }
   1590 
   1591 void
   1592 rge_phy_config(struct rge_softc *sc)
   1593 {
   1594 	/* Read microcode version. */
   1595 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1596 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
   1597 
   1598 	switch (sc->rge_type) {
   1599 	case MAC_CFG2_8126:
   1600 		rge_phy_config_mac_cfg2_8126(sc);
   1601 		break;
   1602 	case MAC_CFG2:
   1603 		rge_phy_config_mac_cfg2(sc);
   1604 		break;
   1605 	case MAC_CFG3:
   1606 		rge_phy_config_mac_cfg3(sc);
   1607 		break;
   1608 	case MAC_CFG4:
   1609 		rge_phy_config_mac_cfg4(sc);
   1610 		break;
   1611 	case MAC_CFG5:
   1612 		rge_phy_config_mac_cfg5(sc);
   1613 		break;
   1614 	default:
   1615 		break;	/* Can't happen. */
   1616 	}
   1617 
   1618 	rge_write_phy(sc, 0x0a5b, 0x12,
   1619 	    rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
   1620 
   1621 	/* Disable EEE. */
   1622 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
   1623 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
   1624 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
   1625 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
   1626 	}
   1627 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
   1628 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
   1629 	if (sc->rge_type == MAC_CFG2_8126)
   1630 		RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0002);
   1631 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
   1632 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
   1633 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
   1634 
   1635 	/* Disable advanced EEE. */
   1636 	if (sc->rge_type != MAC_CFG2_8126)
   1637 		rge_patch_phy_mcu(sc, 1);
   1638 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
   1639 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
   1640 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
   1641 	if (sc->rge_type != MAC_CFG2_8126)
   1642 		rge_patch_phy_mcu(sc, 0);
   1643 }
   1644 
   1645 void
   1646 rge_phy_config_mac_cfg2_8126(struct rge_softc *sc)
   1647 {
   1648 	uint16_t val;
   1649 	int i;
   1650 	static const uint16_t mac_cfg2_a438_value[] =
   1651 	    { 0x0044, 0x00a8, 0x00d6, 0x00ec, 0x00f6, 0x00fc, 0x00fe,
   1652 	      0x00fe, 0x00bc, 0x0058, 0x002a, 0x003f, 0x3f02, 0x023c,
   1653 	      0x3b0a, 0x1c00, 0x0000, 0x0000, 0x0000, 0x0000 };
   1654 
   1655 	static const uint16_t mac_cfg2_b87e_value[] =
   1656 	    { 0x03ed, 0x03ff, 0x0009, 0x03fe, 0x000b, 0x0021, 0x03f7,
   1657 	      0x03b8, 0x03e0, 0x0049, 0x0049, 0x03e0, 0x03b8, 0x03f7,
   1658 	      0x0021, 0x000b, 0x03fe, 0x0009, 0x03ff, 0x03ed, 0x000e,
   1659 	      0x03fe, 0x03ed, 0x0006, 0x001a, 0x03f1, 0x03d8, 0x0023,
   1660 	      0x0054, 0x0322, 0x00dd, 0x03ab, 0x03dc, 0x0027, 0x000e,
   1661 	      0x03e5, 0x03f9, 0x0012, 0x0001, 0x03f1 };
   1662 
   1663 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_8126_MCODE_VER);
   1664 
   1665 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1666 	rge_write_phy_ocp(sc, 0xa436, 0x80bf);
   1667 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1668 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
   1669 	rge_write_phy_ocp(sc, 0xa436, 0x80cd);
   1670 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1671 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
   1672 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
   1673 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1674 	rge_write_phy_ocp(sc, 0xa438, val | 0xc800);
   1675 	rge_write_phy_ocp(sc, 0xa436, 0x80d4);
   1676 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1677 	rge_write_phy_ocp(sc, 0xa438, val | 0xc800);
   1678 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
   1679 	rge_write_phy_ocp(sc, 0xa438, 0x10cc);
   1680 	rge_write_phy_ocp(sc, 0xa436, 0x80e5);
   1681 	rge_write_phy_ocp(sc, 0xa438, 0x4f0c);
   1682 	rge_write_phy_ocp(sc, 0xa436, 0x8387);
   1683 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1684 	rge_write_phy_ocp(sc, 0xa438, val | 0x4700);
   1685 	val = rge_read_phy_ocp(sc, 0xa80c) & ~0x00c0;
   1686 	rge_write_phy_ocp(sc, 0xa80c, val | 0x0080);
   1687 	RGE_PHY_CLRBIT(sc, 0xac90, 0x0010);
   1688 	RGE_PHY_CLRBIT(sc, 0xad2c, 0x8000);
   1689 	rge_write_phy_ocp(sc, 0xb87c, 0x8321);
   1690 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1691 	rge_write_phy_ocp(sc, 0xb87e, val | 0x1100);
   1692 	RGE_PHY_SETBIT(sc, 0xacf8, 0x000c);
   1693 	rge_write_phy_ocp(sc, 0xa436, 0x8183);
   1694 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1695 	rge_write_phy_ocp(sc, 0xa438, val | 0x5900);
   1696 	RGE_PHY_SETBIT(sc, 0xad94, 0x0020);
   1697 	RGE_PHY_CLRBIT(sc, 0xa654, 0x0800);
   1698 	RGE_PHY_SETBIT(sc, 0xb648, 0x4000);
   1699 	rge_write_phy_ocp(sc, 0xb87c, 0x839e);
   1700 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1701 	rge_write_phy_ocp(sc, 0xb87e, val | 0x2f00);
   1702 	rge_write_phy_ocp(sc, 0xb87c, 0x83f2);
   1703 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1704 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
   1705 	RGE_PHY_SETBIT(sc, 0xada0, 0x0002);
   1706 	rge_write_phy_ocp(sc, 0xb87c, 0x80f3);
   1707 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1708 	rge_write_phy_ocp(sc, 0xb87e, val | 0x9900);
   1709 	rge_write_phy_ocp(sc, 0xb87c, 0x8126);
   1710 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1711 	rge_write_phy_ocp(sc, 0xb87e, val | 0xc100);
   1712 	rge_write_phy_ocp(sc, 0xb87c, 0x893a);
   1713 	rge_write_phy_ocp(sc, 0xb87e, 0x8080);
   1714 	rge_write_phy_ocp(sc, 0xb87c, 0x8647);
   1715 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1716 	rge_write_phy_ocp(sc, 0xb87e, val | 0xe600);
   1717 	rge_write_phy_ocp(sc, 0xb87c, 0x862c);
   1718 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1719 	rge_write_phy_ocp(sc, 0xb87e, val | 0x1200);
   1720 	rge_write_phy_ocp(sc, 0xb87c, 0x864a);
   1721 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1722 	rge_write_phy_ocp(sc, 0xb87e, val | 0xe600);
   1723 	rge_write_phy_ocp(sc, 0xb87c, 0x80a0);
   1724 	rge_write_phy_ocp(sc, 0xb87e, 0xbcbc);
   1725 	rge_write_phy_ocp(sc, 0xb87c, 0x805e);
   1726 	rge_write_phy_ocp(sc, 0xb87e, 0xbcbc);
   1727 	rge_write_phy_ocp(sc, 0xb87c, 0x8056);
   1728 	rge_write_phy_ocp(sc, 0xb87e, 0x3077);
   1729 	rge_write_phy_ocp(sc, 0xb87c, 0x8058);
   1730 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1731 	rge_write_phy_ocp(sc, 0xb87e, val | 0x5a00);
   1732 	rge_write_phy_ocp(sc, 0xb87c, 0x8098);
   1733 	rge_write_phy_ocp(sc, 0xb87e, 0x3077);
   1734 	rge_write_phy_ocp(sc, 0xb87c, 0x809a);
   1735 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1736 	rge_write_phy_ocp(sc, 0xb87e, val | 0x5a00);
   1737 	rge_write_phy_ocp(sc, 0xb87c, 0x8052);
   1738 	rge_write_phy_ocp(sc, 0xb87e, 0x3733);
   1739 	rge_write_phy_ocp(sc, 0xb87c, 0x8094);
   1740 	rge_write_phy_ocp(sc, 0xb87e, 0x3733);
   1741 	rge_write_phy_ocp(sc, 0xb87c, 0x807f);
   1742 	rge_write_phy_ocp(sc, 0xb87e, 0x7c75);
   1743 	rge_write_phy_ocp(sc, 0xb87c, 0x803d);
   1744 	rge_write_phy_ocp(sc, 0xb87e, 0x7c75);
   1745 	rge_write_phy_ocp(sc, 0xb87c, 0x8036);
   1746 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1747 	rge_write_phy_ocp(sc, 0xb87e, val | 0x3000);
   1748 	rge_write_phy_ocp(sc, 0xb87c, 0x8078);
   1749 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1750 	rge_write_phy_ocp(sc, 0xb87e, val | 0x3000);
   1751 	rge_write_phy_ocp(sc, 0xb87c, 0x8031);
   1752 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1753 	rge_write_phy_ocp(sc, 0xb87e, val | 0x3300);
   1754 	rge_write_phy_ocp(sc, 0xb87c, 0x8073);
   1755 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1756 	rge_write_phy_ocp(sc, 0xb87e, val | 0x3300);
   1757 	val = rge_read_phy_ocp(sc, 0xae06) & ~0xfc00;
   1758 	rge_write_phy_ocp(sc, 0xae06, val | 0x7c00);
   1759 	rge_write_phy_ocp(sc, 0xb87c, 0x89D1);
   1760 	rge_write_phy_ocp(sc, 0xb87e, 0x0004);
   1761 	rge_write_phy_ocp(sc, 0xa436, 0x8fbd);
   1762 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1763 	rge_write_phy_ocp(sc, 0xa438, val | 0x0a00);
   1764 	rge_write_phy_ocp(sc, 0xa436, 0x8fbe);
   1765 	rge_write_phy_ocp(sc, 0xa438, 0x0d09);
   1766 	rge_write_phy_ocp(sc, 0xb87c, 0x89cd);
   1767 	rge_write_phy_ocp(sc, 0xb87e, 0x0f0f);
   1768 	rge_write_phy_ocp(sc, 0xb87c, 0x89cf);
   1769 	rge_write_phy_ocp(sc, 0xb87e, 0x0f0f);
   1770 	rge_write_phy_ocp(sc, 0xb87c, 0x83a4);
   1771 	rge_write_phy_ocp(sc, 0xb87e, 0x6600);
   1772 	rge_write_phy_ocp(sc, 0xb87c, 0x83a6);
   1773 	rge_write_phy_ocp(sc, 0xb87e, 0x6601);
   1774 	rge_write_phy_ocp(sc, 0xb87c, 0x83c0);
   1775 	rge_write_phy_ocp(sc, 0xb87e, 0x6600);
   1776 	rge_write_phy_ocp(sc, 0xb87c, 0x83c2);
   1777 	rge_write_phy_ocp(sc, 0xb87e, 0x6601);
   1778 	rge_write_phy_ocp(sc, 0xb87c, 0x8414);
   1779 	rge_write_phy_ocp(sc, 0xb87e, 0x6600);
   1780 	rge_write_phy_ocp(sc, 0xb87c, 0x8416);
   1781 	rge_write_phy_ocp(sc, 0xb87e, 0x6601);
   1782 	rge_write_phy_ocp(sc, 0xb87c, 0x83f8);
   1783 	rge_write_phy_ocp(sc, 0xb87e, 0x6600);
   1784 	rge_write_phy_ocp(sc, 0xb87c, 0x83fa);
   1785 	rge_write_phy_ocp(sc, 0xb87e, 0x6601);
   1786 
   1787 	rge_patch_phy_mcu(sc, 1);
   1788 	val = rge_read_phy_ocp(sc, 0xbd96) & ~0x1f00;
   1789 	rge_write_phy_ocp(sc, 0xbd96, val | 0x1000);
   1790 	val = rge_read_phy_ocp(sc, 0xbf1c) & ~0x0007;
   1791 	rge_write_phy_ocp(sc, 0xbf1c, val | 0x0007);
   1792 	RGE_PHY_CLRBIT(sc, 0xbfbe, 0x8000);
   1793 	val = rge_read_phy_ocp(sc, 0xbf40) & ~0x0380;
   1794 	rge_write_phy_ocp(sc, 0xbf40, val | 0x0280);
   1795 	val = rge_read_phy_ocp(sc, 0xbf90) & ~0x0080;
   1796 	rge_write_phy_ocp(sc, 0xbf90, val | 0x0060);
   1797 	val = rge_read_phy_ocp(sc, 0xbf90) & ~0x0010;
   1798 	rge_write_phy_ocp(sc, 0xbf90, val | 0x000c);
   1799 	rge_patch_phy_mcu(sc, 0);
   1800 
   1801 	rge_write_phy_ocp(sc, 0xa436, 0x843b);
   1802 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1803 	rge_write_phy_ocp(sc, 0xa438, val | 0x2000);
   1804 	rge_write_phy_ocp(sc, 0xa436, 0x843d);
   1805 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1806 	rge_write_phy_ocp(sc, 0xa438, val | 0x2000);
   1807 	RGE_PHY_CLRBIT(sc, 0xb516, 0x007f);
   1808 	RGE_PHY_CLRBIT(sc, 0xbf80, 0x0030);
   1809 
   1810 	rge_write_phy_ocp(sc, 0xa436, 0x8188);
   1811 	for (i = 0; i < 11; i++)
   1812 		rge_write_phy_ocp(sc, 0xa438, mac_cfg2_a438_value[i]);
   1813 
   1814 	rge_write_phy_ocp(sc, 0xb87c, 0x8015);
   1815 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1816 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
   1817 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffd);
   1818 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1819 	rge_write_phy_ocp(sc, 0xb87e, val | 0);
   1820 	rge_write_phy_ocp(sc, 0xb87c, 0x8fff);
   1821 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1822 	rge_write_phy_ocp(sc, 0xb87e, val | 0x7f00);
   1823 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffb);
   1824 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1825 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1826 	rge_write_phy_ocp(sc, 0xb87c, 0x8fe9);
   1827 	rge_write_phy_ocp(sc, 0xb87e, 0x0002);
   1828 	rge_write_phy_ocp(sc, 0xb87c, 0x8fef);
   1829 	rge_write_phy_ocp(sc, 0xb87e, 0x00a5);
   1830 	rge_write_phy_ocp(sc, 0xb87c, 0x8ff1);
   1831 	rge_write_phy_ocp(sc, 0xb87e, 0x0106);
   1832 	rge_write_phy_ocp(sc, 0xb87c, 0x8fe1);
   1833 	rge_write_phy_ocp(sc, 0xb87e, 0x0102);
   1834 	rge_write_phy_ocp(sc, 0xb87c, 0x8fe3);
   1835 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1836 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0400);
   1837 	RGE_PHY_SETBIT(sc, 0xa654, 0x0800);
   1838 	RGE_PHY_CLRBIT(sc, 0xa654, 0x0003);
   1839 	rge_write_phy_ocp(sc, 0xac3a, 0x5851);
   1840 	val = rge_read_phy_ocp(sc, 0xac3c) & ~0xd000;
   1841 	rge_write_phy_ocp(sc, 0xac3c, val | 0x2000);
   1842 	val = rge_read_phy_ocp(sc, 0xac42) & ~0x0200;
   1843 	rge_write_phy_ocp(sc, 0xac42, val | 0x01c0);
   1844 	RGE_PHY_CLRBIT(sc, 0xac3e, 0xe000);
   1845 	RGE_PHY_CLRBIT(sc, 0xac42, 0x0038);
   1846 	val = rge_read_phy_ocp(sc, 0xac42) & ~0x0002;
   1847 	rge_write_phy_ocp(sc, 0xac42, val | 0x0005);
   1848 	rge_write_phy_ocp(sc, 0xac1a, 0x00db);
   1849 	rge_write_phy_ocp(sc, 0xade4, 0x01b5);
   1850 	RGE_PHY_CLRBIT(sc, 0xad9c, 0x0c00);
   1851 	rge_write_phy_ocp(sc, 0xb87c, 0x814b);
   1852 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1853 	rge_write_phy_ocp(sc, 0xb87e, val | 0x1100);
   1854 	rge_write_phy_ocp(sc, 0xb87c, 0x814d);
   1855 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1856 	rge_write_phy_ocp(sc, 0xb87e, val | 0x1100);
   1857 	rge_write_phy_ocp(sc, 0xb87c, 0x814f);
   1858 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1859 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0b00);
   1860 	rge_write_phy_ocp(sc, 0xb87c, 0x8142);
   1861 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1862 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1863 	rge_write_phy_ocp(sc, 0xb87c, 0x8144);
   1864 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1865 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1866 	rge_write_phy_ocp(sc, 0xb87c, 0x8150);
   1867 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1868 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1869 	rge_write_phy_ocp(sc, 0xb87c, 0x8118);
   1870 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1871 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
   1872 	rge_write_phy_ocp(sc, 0xb87c, 0x811a);
   1873 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1874 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
   1875 	rge_write_phy_ocp(sc, 0xb87c, 0x811c);
   1876 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1877 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
   1878 	rge_write_phy_ocp(sc, 0xb87c, 0x810f);
   1879 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1880 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1881 	rge_write_phy_ocp(sc, 0xb87c, 0x8111);
   1882 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1883 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1884 	rge_write_phy_ocp(sc, 0xb87c, 0x811d);
   1885 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1886 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1887 	RGE_PHY_SETBIT(sc, 0xac36, 0x1000);
   1888 	RGE_PHY_CLRBIT(sc, 0xad1c, 0x0100);
   1889 	val = rge_read_phy_ocp(sc, 0xade8) & ~0xffc0;
   1890 	rge_write_phy_ocp(sc, 0xade8, val | 0x1400);
   1891 	rge_write_phy_ocp(sc, 0xb87c, 0x864b);
   1892 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1893 	rge_write_phy_ocp(sc, 0xb87e, val | 0x9d00);
   1894 
   1895 	rge_write_phy_ocp(sc, 0xa436, 0x8f97);
   1896 	for (; i < nitems(mac_cfg2_a438_value); i++)
   1897 		rge_write_phy_ocp(sc, 0xa438, mac_cfg2_a438_value[i]);
   1898 
   1899 	RGE_PHY_SETBIT(sc, 0xad9c, 0x0020);
   1900 	rge_write_phy_ocp(sc, 0xb87c, 0x8122);
   1901 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1902 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0c00);
   1903 
   1904 	rge_write_phy_ocp(sc, 0xb87c, 0x82c8);
   1905 	for (i = 0; i < 20; i++)
   1906 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg2_b87e_value[i]);
   1907 
   1908 	rge_write_phy_ocp(sc, 0xb87c, 0x80ef);
   1909 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1910 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0c00);
   1911 
   1912 	rge_write_phy_ocp(sc, 0xb87c, 0x82a0);
   1913 	for (; i < nitems(mac_cfg2_b87e_value); i++)
   1914 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg2_b87e_value[i]);
   1915 
   1916 	rge_write_phy_ocp(sc, 0xa436, 0x8018);
   1917 	RGE_PHY_SETBIT(sc, 0xa438, 0x2000);
   1918 	rge_write_phy_ocp(sc, 0xb87c, 0x8fe4);
   1919 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1920 	rge_write_phy_ocp(sc, 0xb87e, val | 0);
   1921 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xffc0;
   1922 	rge_write_phy_ocp(sc, 0xb54c, val | 0x3700);
   1923 }
   1924 
   1925 void
   1926 rge_phy_config_mac_cfg2(struct rge_softc *sc)
   1927 {
   1928 	uint16_t val;
   1929 	int i;
   1930 
   1931 	for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
   1932 		rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
   1933 		    rtl8125_mac_cfg2_ephy[i].val);
   1934 
   1935 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
   1936 
   1937 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
   1938 	rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
   1939 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   1940 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   1941 	rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
   1942 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
   1943 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   1944 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
   1945 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
   1946 	rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
   1947 	RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
   1948 	RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
   1949 	RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
   1950 
   1951 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   1952 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1953 	rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
   1954 	rge_write_phy_ocp(sc, 0xa436, 0x80eb);
   1955 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
   1956 	rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
   1957 	rge_write_phy_ocp(sc, 0xa436, 0x80f8);
   1958 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1959 	rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
   1960 	rge_write_phy_ocp(sc, 0xa436, 0x80f1);
   1961 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1962 	rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
   1963 	rge_write_phy_ocp(sc, 0xa436, 0x80fe);
   1964 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1965 	rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
   1966 	rge_write_phy_ocp(sc, 0xa436, 0x8102);
   1967 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1968 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   1969 	rge_write_phy_ocp(sc, 0xa436, 0x8105);
   1970 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1971 	rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
   1972 	rge_write_phy_ocp(sc, 0xa436, 0x8100);
   1973 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1974 	rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
   1975 	rge_write_phy_ocp(sc, 0xa436, 0x8104);
   1976 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1977 	rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
   1978 	rge_write_phy_ocp(sc, 0xa436, 0x8106);
   1979 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1980 	rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
   1981 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
   1982 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1983 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
   1984 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
   1985 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   1986 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
   1987 	RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
   1988 	val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
   1989 	rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
   1990 	rge_write_phy_ocp(sc, 0xa436, 0x819f);
   1991 	rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
   1992 	rge_write_phy_ocp(sc, 0xbc34, 0x5555);
   1993 	val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
   1994 	rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
   1995 	RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
   1996 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1997 }
   1998 
   1999 void
   2000 rge_phy_config_mac_cfg3(struct rge_softc *sc)
   2001 {
   2002 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2003 	uint16_t val;
   2004 	int i;
   2005 	static const uint16_t mac_cfg3_a438_value[] =
   2006 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
   2007 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
   2008 
   2009 	static const uint16_t mac_cfg3_b88e_value[] =
   2010 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
   2011 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
   2012 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
   2013 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
   2014 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
   2015 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
   2016 
   2017 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
   2018 		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
   2019 		    rtl8125_mac_cfg3_ephy[i].val);
   2020 
   2021 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
   2022 	rge_write_ephy(sc, 0x002a, val | 0x3000);
   2023 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
   2024 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
   2025 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
   2026 	rge_write_ephy(sc, 0x0002, 0x6042);
   2027 	rge_write_ephy(sc, 0x0006, 0x0014);
   2028 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
   2029 	rge_write_ephy(sc, 0x006a, val | 0x3000);
   2030 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
   2031 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
   2032 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
   2033 	rge_write_ephy(sc, 0x0042, 0x6042);
   2034 	rge_write_ephy(sc, 0x0046, 0x0014);
   2035 
   2036 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
   2037 
   2038 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   2039 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   2040 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
   2041 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
   2042 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   2043 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
   2044 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
   2045 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
   2046 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
   2047 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
   2048 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
   2049 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
   2050 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
   2051 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
   2052 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
   2053 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
   2054 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
   2055 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
   2056 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
   2057 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
   2058 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
   2059 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
   2060 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2061 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
   2062 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
   2063 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2064 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
   2065 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
   2066 	    32);
   2067 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
   2068 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   2069 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
   2070 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   2071 
   2072 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
   2073 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
   2074 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
   2075 	for (i = 0; i < 26; i++)
   2076 		rge_write_phy_ocp(sc, 0xa438, 0);
   2077 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
   2078 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
   2079 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   2080 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
   2081 
   2082 	rge_patch_phy_mcu(sc, 1);
   2083 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
   2084 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
   2085 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
   2086 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
   2087 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
   2088 	}
   2089 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
   2090 	rge_patch_phy_mcu(sc, 0);
   2091 
   2092 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
   2093 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
   2094 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   2095 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
   2096 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
   2097 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
   2098 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
   2099 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
   2100 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
   2101 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   2102 }
   2103 
   2104 void
   2105 rge_phy_config_mac_cfg4(struct rge_softc *sc)
   2106 {
   2107 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2108 	uint16_t val;
   2109 	int i;
   2110 	static const uint16_t mac_cfg4_b87c_value[] =
   2111 	    { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
   2112 	      0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
   2113 	      0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
   2114 	      0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
   2115 	      0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
   2116 	      0x80b0, 0x0f31 };
   2117 
   2118 	for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
   2119 		rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
   2120 		    rtl8125_mac_cfg4_ephy[i].val);
   2121 
   2122 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
   2123 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
   2124 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
   2125 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
   2126 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
   2127 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
   2128 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
   2129 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
   2130 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
   2131 
   2132 	rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
   2133 
   2134 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   2135 	RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
   2136 	rge_write_phy_ocp(sc, 0xa436, 0x8fff);
   2137 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2138 	rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
   2139 	for (i = 0; i < 6; i++) {
   2140 		rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
   2141 		if (i < 3)
   2142 			rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
   2143 		else
   2144 			rge_write_phy_ocp(sc, 0xb87e, 0x147d);
   2145 	}
   2146 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
   2147 	rge_write_phy_ocp(sc, 0xb87e, 0x0907);
   2148 	val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
   2149 	rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
   2150 	val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
   2151 	rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
   2152 	rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
   2153 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
   2154 	rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
   2155 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
   2156 	rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
   2157 	rge_write_phy_ocp(sc, 0xb87e, 0x6077);
   2158 	rge_write_phy_ocp(sc, 0xb506, 0x01e7);
   2159 	rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
   2160 	rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
   2161 	rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
   2162 	rge_write_phy_ocp(sc, 0xac3c, 0x9240);
   2163 	rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
   2164 	rge_write_phy_ocp(sc, 0xacc6, 0x0707);
   2165 	rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
   2166 	rge_write_phy_ocp(sc, 0xad08, 0x0007);
   2167 	for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
   2168 		rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
   2169 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
   2170 	}
   2171 	RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
   2172 	RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
   2173 	rge_write_phy_ocp(sc, 0xb87c, 0x8141);
   2174 	rge_write_phy_ocp(sc, 0xb87e, 0x320e);
   2175 	rge_write_phy_ocp(sc, 0xb87c, 0x8153);
   2176 	rge_write_phy_ocp(sc, 0xb87e, 0x720e);
   2177 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
   2178 	rge_write_phy_ocp(sc, 0xb87c, 0x8529);
   2179 	rge_write_phy_ocp(sc, 0xb87e, 0x050e);
   2180 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
   2181 	    32);
   2182 	rge_write_phy_ocp(sc, 0xa436, 0x816c);
   2183 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
   2184 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
   2185 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
   2186 	rge_write_phy_ocp(sc, 0xa436, 0x8174);
   2187 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
   2188 	rge_write_phy_ocp(sc, 0xa436, 0x8178);
   2189 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
   2190 	rge_write_phy_ocp(sc, 0xa436, 0x817c);
   2191 	rge_write_phy_ocp(sc, 0xa438, 0x0719);
   2192 	rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
   2193 	rge_write_phy_ocp(sc, 0xa438, 0x0400);
   2194 	rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
   2195 	rge_write_phy_ocp(sc, 0xa438, 0x0404);
   2196 	rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
   2197 	for (i = 0; i < 6; i++) {
   2198 		rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
   2199 		if (i == 2)
   2200 			rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
   2201 		else
   2202 			rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
   2203 	}
   2204 	rge_write_phy_ocp(sc, 0xb87c, 0x8145);
   2205 	rge_write_phy_ocp(sc, 0xb87e, 0x370e);
   2206 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
   2207 	rge_write_phy_ocp(sc, 0xb87e, 0x770e);
   2208 	rge_write_phy_ocp(sc, 0xb87c, 0x8169);
   2209 	rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
   2210 	rge_write_phy_ocp(sc, 0xb87c, 0x817b);
   2211 	rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
   2212 	rge_write_phy_ocp(sc, 0xa436, 0x8217);
   2213 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2214 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   2215 	rge_write_phy_ocp(sc, 0xa436, 0x821a);
   2216 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2217 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   2218 	rge_write_phy_ocp(sc, 0xa436, 0x80da);
   2219 	rge_write_phy_ocp(sc, 0xa438, 0x0403);
   2220 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
   2221 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2222 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
   2223 	rge_write_phy_ocp(sc, 0xa436, 0x80b3);
   2224 	rge_write_phy_ocp(sc, 0xa438, 0x0384);
   2225 	rge_write_phy_ocp(sc, 0xa436, 0x80b7);
   2226 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
   2227 	rge_write_phy_ocp(sc, 0xa436, 0x80ba);
   2228 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2229 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
   2230 	rge_write_phy_ocp(sc, 0xa436, 0x80b5);
   2231 	rge_write_phy_ocp(sc, 0xa438, 0xf009);
   2232 	rge_write_phy_ocp(sc, 0xa436, 0x80bd);
   2233 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2234 	rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
   2235 	rge_write_phy_ocp(sc, 0xa436, 0x80c7);
   2236 	rge_write_phy_ocp(sc, 0xa438, 0xf083);
   2237 	rge_write_phy_ocp(sc, 0xa436, 0x80dd);
   2238 	rge_write_phy_ocp(sc, 0xa438, 0x03f0);
   2239 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
   2240 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2241 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
   2242 	rge_write_phy_ocp(sc, 0xa436, 0x80cb);
   2243 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
   2244 	rge_write_phy_ocp(sc, 0xa436, 0x80ce);
   2245 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2246 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
   2247 	rge_write_phy_ocp(sc, 0xa436, 0x80c9);
   2248 	rge_write_phy_ocp(sc, 0xa438, 0x8009);
   2249 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
   2250 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2251 	rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
   2252 	rge_write_phy_ocp(sc, 0xa436, 0x80a3);
   2253 	rge_write_phy_ocp(sc, 0xa438, 0x200a);
   2254 	rge_write_phy_ocp(sc, 0xa436, 0x80a5);
   2255 	rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
   2256 	rge_write_phy_ocp(sc, 0xa436, 0x809f);
   2257 	rge_write_phy_ocp(sc, 0xa438, 0x6073);
   2258 	rge_write_phy_ocp(sc, 0xa436, 0x80a1);
   2259 	rge_write_phy_ocp(sc, 0xa438, 0x000b);
   2260 	rge_write_phy_ocp(sc, 0xa436, 0x80a9);
   2261 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2262 	rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
   2263 	rge_patch_phy_mcu(sc, 1);
   2264 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
   2265 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
   2266 	rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
   2267 	rge_write_phy_ocp(sc, 0xb890, 0x0000);
   2268 	rge_write_phy_ocp(sc, 0xb88e, 0xc240);
   2269 	rge_write_phy_ocp(sc, 0xb890, 0x0103);
   2270 	rge_write_phy_ocp(sc, 0xb88e, 0xc242);
   2271 	rge_write_phy_ocp(sc, 0xb890, 0x0507);
   2272 	rge_write_phy_ocp(sc, 0xb88e, 0xc244);
   2273 	rge_write_phy_ocp(sc, 0xb890, 0x090b);
   2274 	rge_write_phy_ocp(sc, 0xb88e, 0xc246);
   2275 	rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
   2276 	rge_write_phy_ocp(sc, 0xb88e, 0xc248);
   2277 	rge_write_phy_ocp(sc, 0xb890, 0x1012);
   2278 	rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
   2279 	rge_write_phy_ocp(sc, 0xb890, 0x1416);
   2280 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
   2281 	rge_patch_phy_mcu(sc, 0);
   2282 	RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
   2283 	RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
   2284 	rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
   2285 	rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
   2286 	rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
   2287 	rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
   2288 	rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
   2289 	RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
   2290 	rge_write_phy_ocp(sc, 0xa436, 0x817d);
   2291 	RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
   2292 }
   2293 
   2294 void
   2295 rge_phy_config_mac_cfg5(struct rge_softc *sc)
   2296 {
   2297 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2298 	uint16_t val;
   2299 	int i;
   2300 
   2301 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
   2302 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
   2303 		    rtl8125_mac_cfg5_ephy[i].val);
   2304 
   2305 	val = rge_read_ephy(sc, 0x0022) & ~0x0030;
   2306 	rge_write_ephy(sc, 0x0022, val | 0x0020);
   2307 	val = rge_read_ephy(sc, 0x0062) & ~0x0030;
   2308 	rge_write_ephy(sc, 0x0062, val | 0x0020);
   2309 
   2310 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
   2311 
   2312 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   2313 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
   2314 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
   2315 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
   2316 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
   2317 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
   2318 	    32);
   2319 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
   2320 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
   2321 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
   2322 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
   2323 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
   2324 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2325 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
   2326 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
   2327 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
   2328 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
   2329 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
   2330 	for (i = 0; i < 10; i++) {
   2331 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
   2332 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
   2333 	}
   2334 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
   2335 	val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
   2336 	rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
   2337 }
   2338 
   2339 void
   2340 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
   2341 {
   2342 	if (sc->rge_mcodever != mcode_version) {
   2343 		int i;
   2344 
   2345 		rge_patch_phy_mcu(sc, 1);
   2346 
   2347 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
   2348 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   2349 			if (sc->rge_type == MAC_CFG2)
   2350 				rge_write_phy_ocp(sc, 0xa438, 0x8600);
   2351 			else
   2352 				rge_write_phy_ocp(sc, 0xa438, 0x8601);
   2353 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
   2354 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
   2355 
   2356 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
   2357 		}
   2358 
   2359 		if (sc->rge_type == MAC_CFG2) {
   2360 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
   2361 				rge_write_phy_ocp(sc,
   2362 				    rtl8125_mac_cfg2_mcu[i].reg,
   2363 				    rtl8125_mac_cfg2_mcu[i].val);
   2364 			}
   2365 		} else if (sc->rge_type == MAC_CFG3) {
   2366 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
   2367 				rge_write_phy_ocp(sc,
   2368 				    rtl8125_mac_cfg3_mcu[i].reg,
   2369 				    rtl8125_mac_cfg3_mcu[i].val);
   2370 			}
   2371 		} else if (sc->rge_type == MAC_CFG4) {
   2372 			for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
   2373 				rge_write_phy_ocp(sc,
   2374 				    rtl8125_mac_cfg4_mcu[i].reg,
   2375 				    rtl8125_mac_cfg4_mcu[i].val);
   2376 			}
   2377 		} else if (sc->rge_type == MAC_CFG5) {
   2378 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
   2379 				rge_write_phy_ocp(sc,
   2380 				    rtl8125_mac_cfg5_mcu[i].reg,
   2381 				    rtl8125_mac_cfg5_mcu[i].val);
   2382 			}
   2383 		} else if (sc->rge_type == MAC_CFG2_8126) {
   2384 			for (i = 0; i < nitems(rtl8126_mac_cfg2_mcu); i++) {
   2385 				rge_write_phy_ocp(sc,
   2386 				    rtl8126_mac_cfg2_mcu[i].reg,
   2387 				    rtl8126_mac_cfg2_mcu[i].val);
   2388 			}
   2389 		}
   2390 
   2391 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
   2392 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
   2393 
   2394 			rge_write_phy_ocp(sc, 0xa436, 0);
   2395 			rge_write_phy_ocp(sc, 0xa438, 0);
   2396 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
   2397 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   2398 			rge_write_phy_ocp(sc, 0xa438, 0);
   2399 		}
   2400 
   2401 		rge_patch_phy_mcu(sc, 0);
   2402 
   2403 		/* Write microcode version. */
   2404 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
   2405 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
   2406 	}
   2407 }
   2408 
   2409 void
   2410 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
   2411 {
   2412 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2413 	RGE_WRITE_4(sc, RGE_MAC0,
   2414 	    (uint32_t)addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
   2415 	RGE_WRITE_4(sc, RGE_MAC4,
   2416 	    addr[5] <<  8 | addr[4]);
   2417 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2418 }
   2419 
   2420 void
   2421 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
   2422 {
   2423 	int i;
   2424 
   2425 	for (i = 0; i < ETHER_ADDR_LEN; i++)
   2426 		addr[i] = RGE_READ_1(sc, RGE_ADDR0 + i);
   2427 }
   2428 
   2429 void
   2430 rge_hw_init(struct rge_softc *sc)
   2431 {
   2432 	int i;
   2433 
   2434 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2435 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
   2436 	if (sc->rge_type == MAC_CFG2_8126)
   2437 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, 0x08);
   2438 	else
   2439 		RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
   2440 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2441 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
   2442 
   2443 	/* Disable UPS. */
   2444 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
   2445 
   2446 	/* Configure MAC MCU. */
   2447 	rge_write_mac_ocp(sc, 0xfc38, 0);
   2448 
   2449 	for (i = 0xfc28; i < 0xfc38; i += 2)
   2450 		rge_write_mac_ocp(sc, i, 0);
   2451 
   2452 	DELAY(3000);
   2453 	rge_write_mac_ocp(sc, 0xfc26, 0);
   2454 
   2455 	if (sc->rge_type == MAC_CFG3) {
   2456 		for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
   2457 			rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
   2458 			    rtl8125_mac_bps[i].val);
   2459 		}
   2460 	} else if (sc->rge_type == MAC_CFG5) {
   2461 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
   2462 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
   2463 			    rtl8125b_mac_bps[i].val);
   2464 		}
   2465 	}
   2466 
   2467 	/* Disable PHY power saving. */
   2468 	if (sc->rge_type != MAC_CFG2_8126)
   2469 		rge_disable_phy_ocp_pwrsave(sc);
   2470 
   2471 	/* Set PCIe uncorrectable error status. */
   2472 	rge_write_csi(sc, 0x108,
   2473 	    rge_read_csi(sc, 0x108) | 0x00100000);
   2474 }
   2475 
   2476 void
   2477 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
   2478 {
   2479 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
   2480 		rge_patch_phy_mcu(sc, 1);
   2481 		rge_write_phy_ocp(sc, 0xc416, 0);
   2482 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
   2483 		rge_patch_phy_mcu(sc, 0);
   2484 	}
   2485 }
   2486 
   2487 void
   2488 rge_patch_phy_mcu(struct rge_softc *sc, int set)
   2489 {
   2490 	int i;
   2491 
   2492 	if (set)
   2493 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
   2494 	else
   2495 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
   2496 
   2497 	for (i = 0; i < 1000; i++) {
   2498 		if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
   2499 			break;
   2500 		DELAY(100);
   2501 	}
   2502 	if (i == 1000) {
   2503 		DPRINTF(("timeout waiting to patch phy mcu\n"));
   2504 		return;
   2505 	}
   2506 }
   2507 
   2508 void
   2509 rge_add_media_types(struct rge_softc *sc)
   2510 {
   2511 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
   2512 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
   2513 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
   2514 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
   2515 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
   2516 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
   2517 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
   2518 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
   2519 
   2520 	if (sc->rge_type == MAC_CFG2_8126) {
   2521 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T, 0, NULL);
   2522 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T | IFM_FDX,
   2523 		    0, NULL);
   2524 	}
   2525 }
   2526 
   2527 void
   2528 rge_config_imtype(struct rge_softc *sc, int imtype)
   2529 {
   2530 	switch (imtype) {
   2531 	case RGE_IMTYPE_NONE:
   2532 		sc->rge_intrs = RGE_INTRS;
   2533 		sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
   2534 		    RGE_ISR_RX_FIFO_OFLOW;
   2535 		sc->rge_tx_ack = RGE_ISR_TX_OK;
   2536 		break;
   2537 	case RGE_IMTYPE_SIM:
   2538 		sc->rge_intrs = RGE_INTRS_TIMER;
   2539 		sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
   2540 		sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
   2541 		break;
   2542 	default:
   2543 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
   2544 	}
   2545 }
   2546 
   2547 void
   2548 rge_disable_hw_im(struct rge_softc *sc)
   2549 {
   2550 	RGE_WRITE_2(sc, RGE_IM, 0);
   2551 }
   2552 
   2553 void
   2554 rge_disable_sim_im(struct rge_softc *sc)
   2555 {
   2556 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
   2557 	sc->rge_timerintr = 0;
   2558 }
   2559 
   2560 void
   2561 rge_setup_sim_im(struct rge_softc *sc)
   2562 {
   2563 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
   2564 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
   2565 	sc->rge_timerintr = 1;
   2566 }
   2567 
   2568 void
   2569 rge_setup_intr(struct rge_softc *sc, int imtype)
   2570 {
   2571 	rge_config_imtype(sc, imtype);
   2572 
   2573 	/* Enable interrupts. */
   2574 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
   2575 
   2576 	switch (imtype) {
   2577 	case RGE_IMTYPE_NONE:
   2578 		rge_disable_sim_im(sc);
   2579 		rge_disable_hw_im(sc);
   2580 		break;
   2581 	case RGE_IMTYPE_SIM:
   2582 		rge_disable_hw_im(sc);
   2583 		rge_setup_sim_im(sc);
   2584 		break;
   2585 	default:
   2586 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
   2587 	}
   2588 }
   2589 
   2590 void
   2591 rge_exit_oob(struct rge_softc *sc)
   2592 {
   2593 	int i;
   2594 
   2595 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
   2596 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
   2597 	    RGE_RXCFG_ERRPKT);
   2598 
   2599 	/* Disable RealWoW. */
   2600 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
   2601 
   2602 	rge_reset(sc);
   2603 
   2604 	/* Disable OOB. */
   2605 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
   2606 
   2607 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
   2608 
   2609 	for (i = 0; i < 10; i++) {
   2610 		DELAY(100);
   2611 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   2612 			break;
   2613 	}
   2614 
   2615 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
   2616 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
   2617 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
   2618 
   2619 	for (i = 0; i < 10; i++) {
   2620 		DELAY(100);
   2621 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   2622 			break;
   2623 	}
   2624 
   2625 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
   2626 		printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
   2627 		    device_xname(sc->sc_dev));
   2628 		for (i = 0; i < RGE_TIMEOUT; i++) {
   2629 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
   2630 				break;
   2631 			DELAY(1000);
   2632 		}
   2633 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
   2634 		if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
   2635 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
   2636 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
   2637 	}
   2638 }
   2639 
   2640 void
   2641 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
   2642 {
   2643 	int i;
   2644 
   2645 	RGE_WRITE_4(sc, RGE_CSIDR, val);
   2646 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
   2647 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
   2648 
   2649 	for (i = 0; i < 10; i++) {
   2650 		 DELAY(100);
   2651 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
   2652 			break;
   2653 	}
   2654 
   2655 	DELAY(20);
   2656 }
   2657 
   2658 uint32_t
   2659 rge_read_csi(struct rge_softc *sc, uint32_t reg)
   2660 {
   2661 	int i;
   2662 
   2663 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
   2664 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
   2665 
   2666 	for (i = 0; i < 10; i++) {
   2667 		 DELAY(100);
   2668 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
   2669 			break;
   2670 	}
   2671 
   2672 	DELAY(20);
   2673 
   2674 	return (RGE_READ_4(sc, RGE_CSIDR));
   2675 }
   2676 
   2677 void
   2678 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   2679 {
   2680 	uint32_t tmp;
   2681 
   2682 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   2683 	tmp += val;
   2684 	tmp |= RGE_MACOCP_BUSY;
   2685 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
   2686 }
   2687 
   2688 uint16_t
   2689 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
   2690 {
   2691 	uint32_t val;
   2692 
   2693 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   2694 	RGE_WRITE_4(sc, RGE_MACOCP, val);
   2695 
   2696 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
   2697 }
   2698 
   2699 void
   2700 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
   2701 {
   2702 	uint32_t tmp;
   2703 	int i;
   2704 
   2705 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
   2706 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
   2707 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
   2708 
   2709 	for (i = 0; i < 10; i++) {
   2710 		DELAY(100);
   2711 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
   2712 			break;
   2713 	}
   2714 
   2715 	DELAY(20);
   2716 }
   2717 
   2718 uint16_t
   2719 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
   2720 {
   2721 	uint32_t val;
   2722 	int i;
   2723 
   2724 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
   2725 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
   2726 
   2727 	for (i = 0; i < 10; i++) {
   2728 		DELAY(100);
   2729 		val = RGE_READ_4(sc, RGE_EPHYAR);
   2730 		if (val & RGE_EPHYAR_BUSY)
   2731 			break;
   2732 	}
   2733 
   2734 	DELAY(20);
   2735 
   2736 	return (val & RGE_EPHYAR_DATA_MASK);
   2737 }
   2738 
   2739 void
   2740 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
   2741 {
   2742 	uint16_t off, phyaddr;
   2743 
   2744 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
   2745 	phyaddr <<= 4;
   2746 
   2747 	off = addr ? reg : 0x10 + (reg % 8);
   2748 
   2749 	phyaddr += (off - 16) << 1;
   2750 
   2751 	rge_write_phy_ocp(sc, phyaddr, val);
   2752 }
   2753 
   2754 uint16_t
   2755 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
   2756 {
   2757 	uint16_t off, phyaddr;
   2758 
   2759 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
   2760 	phyaddr <<= 4;
   2761 
   2762 	off = addr ? reg : 0x10 + (reg % 8);
   2763 
   2764 	phyaddr += (off - 16) << 1;
   2765 
   2766 	return (rge_read_phy_ocp(sc, phyaddr));
   2767 }
   2768 
   2769 void
   2770 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   2771 {
   2772 	uint32_t tmp;
   2773 	int i;
   2774 
   2775 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   2776 	tmp |= RGE_PHYOCP_BUSY | val;
   2777 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
   2778 
   2779 	for (i = 0; i < RGE_TIMEOUT; i++) {
   2780 		DELAY(1);
   2781 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
   2782 			break;
   2783 	}
   2784 }
   2785 
   2786 uint16_t
   2787 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
   2788 {
   2789 	uint32_t val;
   2790 	int i;
   2791 
   2792 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   2793 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
   2794 
   2795 	for (i = 0; i < RGE_TIMEOUT; i++) {
   2796 		DELAY(1);
   2797 		val = RGE_READ_4(sc, RGE_PHYOCP);
   2798 		if (val & RGE_PHYOCP_BUSY)
   2799 			break;
   2800 	}
   2801 
   2802 	return (val & RGE_PHYOCP_DATA_MASK);
   2803 }
   2804 
   2805 int
   2806 rge_get_link_status(struct rge_softc *sc)
   2807 {
   2808 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
   2809 }
   2810 
   2811 void
   2812 rge_txstart(void *arg)
   2813 {
   2814 	struct rge_softc *sc = arg;
   2815 
   2816 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
   2817 }
   2818 
   2819 void
   2820 rge_tick(void *arg)
   2821 {
   2822 	struct rge_softc *sc = arg;
   2823 	int s;
   2824 
   2825 	s = splnet();
   2826 	rge_link_state(sc);
   2827 	splx(s);
   2828 
   2829 	callout_schedule(&sc->sc_timeout, hz);
   2830 }
   2831 
   2832 void
   2833 rge_link_state(struct rge_softc *sc)
   2834 {
   2835 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2836 	int link = LINK_STATE_DOWN;
   2837 
   2838 	if (rge_get_link_status(sc))
   2839 		link = LINK_STATE_UP;
   2840 
   2841 	if (ifp->if_link_state != link) { /* XXX not safe to access */
   2842 		if_link_state_change(ifp, link);
   2843 	}
   2844 }
   2845 
   2846 /* Module interface */
   2847 
   2848 MODULE(MODULE_CLASS_DRIVER, if_rge, "pci");
   2849 
   2850 #ifdef _MODULE
   2851 #include "ioconf.c"
   2852 #endif
   2853 
   2854 static int
   2855 if_rge_modcmd(modcmd_t cmd, void *opaque)
   2856 {
   2857 	int error = 0;
   2858 
   2859 	switch (cmd) {
   2860 	case MODULE_CMD_INIT:
   2861 #ifdef _MODULE
   2862 		error = config_init_component(cfdriver_ioconf_rge,
   2863 		    cfattach_ioconf_rge, cfdata_ioconf_rge);
   2864 #endif
   2865 		return error;
   2866 	case MODULE_CMD_FINI:
   2867 #ifdef _MODULE
   2868 		error = config_fini_component(cfdriver_ioconf_rge,
   2869 		    cfattach_ioconf_rge, cfdata_ioconf_rge);
   2870 #endif
   2871 		return error;
   2872 	default:
   2873 		return ENOTTY;
   2874 	}
   2875 }
   2876 
   2877