Home | History | Annotate | Line # | Download | only in pci
      1 /*	$NetBSD: if_rge.c,v 1.40 2025/10/28 01:36:59 pgoyette Exp $	*/
      2 /*	$OpenBSD: if_rge.c,v 1.23 2023/12/22 05:28:14 kevlo Exp $	*/
      3 
      4 /*
      5  * Copyright (c) 2019, 2020, 2023 Kevin Lo <kevlo (at) openbsd.org>
      6  *
      7  * Permission to use, copy, modify, and distribute this software for any
      8  * purpose with or without fee is hereby granted, provided that the above
      9  * copyright notice and this permission notice appear in all copies.
     10  *
     11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     18  */
     19 
     20 #include <sys/cdefs.h>
     21 __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.40 2025/10/28 01:36:59 pgoyette Exp $");
     22 
     23 #if defined(_KERNEL_OPT)
     24 #include "opt_net_mpsafe.h"
     25 #endif
     26 
     27 #include <sys/types.h>
     28 
     29 #include <sys/param.h>
     30 #include <sys/systm.h>
     31 #include <sys/sockio.h>
     32 #include <sys/mbuf.h>
     33 #include <sys/kernel.h>
     34 #include <sys/socket.h>
     35 #include <sys/device.h>
     36 #include <sys/endian.h>
     37 #include <sys/callout.h>
     38 #include <sys/workqueue.h>
     39 #include <sys/module.h>
     40 
     41 #include <net/if.h>
     42 
     43 #include <net/if_dl.h>
     44 #include <net/if_ether.h>
     45 
     46 #include <net/if_media.h>
     47 
     48 #include <netinet/in.h>
     49 #include <net/if_ether.h>
     50 
     51 #include <net/bpf.h>
     52 
     53 #include <sys/bus.h>
     54 #include <machine/intr.h>
     55 
     56 #include <dev/mii/mii.h>
     57 
     58 #include <dev/pci/pcivar.h>
     59 #include <dev/pci/pcireg.h>
     60 #include <dev/pci/pcidevs.h>
     61 
     62 #include <dev/pci/if_rgereg.h>
     63 
     64 /* interrupt type counts */
     65 
     66 int counts[PCI_INTR_TYPE_SIZE] = {
     67 	[PCI_INTR_TYPE_INTX] = 1,
     68 	[PCI_INTR_TYPE_MSI] = 1,
     69 	[PCI_INTR_TYPE_MSIX] = 1,
     70  };
     71 	int max_type = PCI_INTR_TYPE_MSIX;
     72 
     73 #ifdef __NetBSD__
     74 #define letoh32 	htole32
     75 #define nitems(x) 	__arraycount(x)
     76 
     77 static struct mbuf *
     78 MCLGETL(struct rge_softc *sc __unused, int how,
     79     u_int size)
     80 {
     81 	struct mbuf *m;
     82 
     83 	MGETHDR(m, how, MT_DATA);
     84 	if (m == NULL)
     85 		return NULL;
     86 
     87 	MEXTMALLOC(m, size, how);
     88 	if ((m->m_flags & M_EXT) == 0) {
     89 		m_freem(m);
     90 		return NULL;
     91 	}
     92 	return m;
     93 }
     94 
     95 #ifdef NET_MPSAFE
     96 #define 	RGE_MPSAFE	1
     97 #define 	CALLOUT_FLAGS	CALLOUT_MPSAFE
     98 #else
     99 #define 	CALLOUT_FLAGS	0
    100 #endif
    101 #endif
    102 
    103 #ifdef RGE_DEBUG
    104 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
    105 int rge_debug = 0;
    106 #else
    107 #define DPRINTF(x)
    108 #endif
    109 
    110 static int		rge_match(device_t, cfdata_t, void *);
    111 static void		rge_attach(device_t, device_t, void *);
    112 static int		rge_detach(device_t, int);
    113 
    114 int		rge_intr(void *);
    115 int		rge_encap(struct rge_softc *, struct mbuf *, int);
    116 int		rge_ioctl(struct ifnet *, u_long, void *);
    117 void		rge_start(struct ifnet *);
    118 void		rge_watchdog(struct ifnet *);
    119 int		rge_init(struct ifnet *);
    120 void		rge_stop(struct ifnet *, int);
    121 int		rge_ifmedia_upd(struct ifnet *);
    122 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
    123 static int	rge_allocmem(struct rge_softc *);
    124 static int	rge_deallocmem(struct rge_softc *);
    125 int		rge_newbuf(struct rge_softc *, int);
    126 static int	rge_rx_list_init(struct rge_softc *);
    127 static void	rge_rx_list_fini(struct rge_softc *);
    128 static void	rge_tx_list_init(struct rge_softc *);
    129 static void	rge_tx_list_fini(struct rge_softc *);
    130 int		rge_rxeof(struct rge_softc *);
    131 int		rge_txeof(struct rge_softc *);
    132 void		rge_reset(struct rge_softc *);
    133 void		rge_iff(struct rge_softc *);
    134 void		rge_chipinit(struct rge_softc *);
    135 void		rge_set_phy_power(struct rge_softc *, int);
    136 void		rge_ephy_config(struct rge_softc *);
    137 void		rge_ephy_config_mac_cfg3(struct rge_softc *);
    138 void		rge_ephy_config_mac_cfg5(struct rge_softc *);
    139 int		rge_phy_config(struct rge_softc *);
    140 void		rge_phy_config_mac_cfg3(struct rge_softc *);
    141 void		rge_phy_config_mac_cfg5(struct rge_softc *);
    142 void		rge_phy_config_mac_cfg2_8126(struct rge_softc *);
    143 void		rge_phy_config_mac_cfg2(struct rge_softc *);
    144 void		rge_phy_config_mac_cfg3(struct rge_softc *);
    145 void		rge_phy_config_mac_cfg4(struct rge_softc *);
    146 void		rge_phy_config_mac_cfg5(struct rge_softc *);
    147 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
    148 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
    149 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
    150 void		rge_hw_init(struct rge_softc *);
    151 void		rge_hw_reset(struct rge_softc *);
    152 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
    153 void		rge_patch_phy_mcu(struct rge_softc *, int);
    154 void		rge_add_media_types(struct rge_softc *);
    155 void		rge_config_imtype(struct rge_softc *, int);
    156 void		rge_disable_aspm_clkreq(struct rge_softc *);
    157 void		rge_disable_hw_im(struct rge_softc *);
    158 void		rge_disable_sim_im(struct rge_softc *);
    159 void		rge_setup_sim_im(struct rge_softc *);
    160 void		rge_setup_intr(struct rge_softc *, int);
    161 void		rge_switch_mcu_ram_page(struct rge_softc *, int);
    162 void		rge_exit_oob(struct rge_softc *);
    163 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
    164 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
    165 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
    166 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
    167 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
    168 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
    169 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
    170 uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
    171 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
    172 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
    173 int		rge_get_link_status(struct rge_softc *);
    174 void		rge_txstart(void *);
    175 void		rge_tick(void *);
    176 void		rge_link_state(struct rge_softc *);
    177 
    178 static const struct {
    179 	uint16_t reg;
    180 	uint16_t val;
    181 }  rtl8125_mac_cfg2_mcu[] = {
    182 	RTL8125_MAC_CFG2_MCU
    183 }, rtl8125_mac_cfg3_mcu[] = {
    184 	RTL8125_MAC_CFG3_MCU
    185 }, rtl8125_mac_cfg4_mcu[] = {
    186 	RTL8125_MAC_CFG4_MCU
    187 }, rtl8125_mac_cfg5_mcu[] = {
    188 	RTL8125_MAC_CFG5_MCU
    189 }, rtl8126_mac_cfg2_mcu[] = {
    190 	RTL8126_MAC_CFG2_MCU
    191 };
    192 
    193 CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
    194 		rge_detach, NULL);
    195 
    196 static const struct device_compatible_entry compat_data[] = {
    197 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000) },
    198 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125) },
    199 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8126) },
    200 
    201 	PCI_COMPAT_EOL
    202 };
    203 
    204 static int
    205 rge_match(device_t parent, cfdata_t match, void *aux)
    206 {
    207 	struct pci_attach_args *pa =aux;
    208 
    209 	return pci_compatible_match(pa, compat_data);
    210 }
    211 
    212 void
    213 rge_attach(device_t parent, device_t self, void *aux)
    214 {
    215 	struct rge_softc *sc = device_private(self);
    216 	struct pci_attach_args *pa = aux;
    217 	pci_chipset_tag_t pc = pa->pa_pc;
    218 	char intrbuf[PCI_INTRSTR_LEN];
    219 	const char *intrstr = NULL;
    220 	struct ifnet *ifp;
    221 	pcireg_t reg;
    222 	uint32_t hwrev;
    223 	uint8_t eaddr[ETHER_ADDR_LEN];
    224 	int offset;
    225 	pcireg_t command;
    226 	const char *revstr;
    227 
    228 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
    229 
    230 	sc->sc_dev = self;
    231 
    232 	pci_aprint_devinfo(pa, "Ethernet controller");
    233 
    234 	/*
    235 	 * Map control/status registers.
    236 	 */
    237 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
    238 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
    239 	    NULL, &sc->rge_bsize)) {
    240 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
    241 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
    242 		    &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
    243 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
    244 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
    245 			    &sc->rge_bsize)) {
    246 				aprint_error(": can't map mem or i/o space\n");
    247 				return;
    248 			}
    249 		}
    250 	}
    251 
    252 	/*
    253 	 * Allocate interrupt.
    254 	 */
    255 	if (pci_intr_alloc(pa, &sc->sc_intrs, NULL, 0) != 0) {
    256 		aprint_error(": couldn't map interrupt\n");
    257 		return;
    258 	}
    259 	switch (pci_intr_type(pc, sc->sc_intrs[0])) {
    260 	case PCI_INTR_TYPE_MSIX:
    261 	case PCI_INTR_TYPE_MSI:
    262 		sc->rge_flags |= RGE_FLAG_MSI;
    263 		break;
    264 	default:
    265 		break;
    266 	}
    267 	intrstr = pci_intr_string(pc, sc->sc_intrs[0],
    268 	    intrbuf, sizeof(intrbuf));
    269 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
    270 	    IPL_NET, rge_intr, sc, device_xname(sc->sc_dev));
    271 
    272 	if (sc->sc_ihs[0] == NULL) {
    273 		aprint_error_dev(sc->sc_dev, ": couldn't establish interrupt");
    274 		if (intrstr != NULL)
    275 			aprint_error(" at %s\n", intrstr);
    276 		aprint_error("\n");
    277 		return;
    278 	}
    279 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
    280 
    281 	if (pci_dma64_available(pa))
    282 		sc->sc_dmat = pa->pa_dmat64;
    283 	else
    284 		sc->sc_dmat = pa->pa_dmat;
    285 
    286 	sc->sc_pc = pa->pa_pc;
    287 	sc->sc_tag = pa->pa_tag;
    288 
    289 	/* Determine hardware revision */
    290 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
    291 	switch (hwrev) {
    292 	case 0x60800000:
    293 		sc->rge_type = MAC_CFG2;
    294 		revstr = "Z1";
    295 		break;
    296 	case 0x60900000:
    297 		sc->rge_type = MAC_CFG3;
    298 		revstr = "Z2";
    299 		break;
    300 	case 0x64000000:
    301 		sc->rge_type = MAC_CFG4;
    302 		revstr = "A";
    303 		break;
    304 	case 0x64100000:
    305 		sc->rge_type = MAC_CFG5;
    306 		revstr = "B";
    307 		break;
    308 #if 0
    309 	case 0x64800000:
    310 		sc->rge_type = MAC_CFG1_8126;
    311 		revstr = "A";
    312 		break;
    313 #endif
    314 	case 0x64900000:
    315 		sc->rge_type = MAC_CFG2_8126;
    316 		revstr = "A";
    317 		break;
    318 #if 0
    319 	case 0x64a00000:
    320 		sc->rge_type = MAC_CFG3_8126;
    321 		revstr = "A";
    322 		break;
    323 #endif
    324 	default:
    325 		aprint_error(": unknown version 0x%08x\n", hwrev);
    326 		return;
    327 	}
    328 
    329 	aprint_normal_dev(sc->sc_dev, "HW rev. %s\n", revstr);
    330 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
    331 
    332 	/*
    333 	 * PCI Express check.
    334 	 */
    335 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
    336 	    &offset, NULL)) {
    337 		/* Disable PCIe ASPM and ECPM. */
    338 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
    339 		    offset + PCIE_LCSR);
    340 		reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 |
    341 		    PCIE_LCSR_ENCLKPM);
    342 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
    343 		    reg);
    344 	}
    345 
    346 	rge_chipinit(sc);
    347 
    348 	rge_get_macaddr(sc, eaddr);
    349 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
    350 	    ether_sprintf(eaddr));
    351 
    352 	memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
    353 
    354 	if (rge_allocmem(sc))
    355 		return;
    356 
    357 	ifp = &sc->sc_ec.ec_if;
    358 	ifp->if_softc = sc;
    359 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    360 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    361 #ifdef RGE_MPSAFE
    362 	ifp->if_extflags = IFEF_MPSAFE;
    363 #endif
    364 	ifp->if_ioctl = rge_ioctl;
    365 	ifp->if_stop = rge_stop;
    366 	ifp->if_start = rge_start;
    367 	ifp->if_init = rge_init;
    368 	ifp->if_watchdog = rge_watchdog;
    369 	IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
    370 
    371 #if notyet
    372 	ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx |
    373 	    IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
    374 	    IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
    375 #endif
    376 
    377 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
    378 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
    379 
    380 	callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
    381 	callout_setfunc(&sc->sc_timeout, rge_tick, sc);
    382 
    383 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
    384 	command |= PCI_COMMAND_MASTER_ENABLE;
    385 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
    386 
    387 	/* Initialize ifmedia structures. */
    388 	sc->sc_ec.ec_ifmedia = &sc->sc_media;
    389 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
    390 	    rge_ifmedia_sts);
    391 	rge_add_media_types(sc);
    392 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
    393 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
    394 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
    395 
    396 	if_attach(ifp);
    397 	if_deferred_start_init(ifp, NULL);
    398 	ether_ifattach(ifp, eaddr);
    399 
    400 	if (pmf_device_register(self, NULL, NULL))
    401 		pmf_class_network_register(self, ifp);
    402 	else
    403 		aprint_error_dev(self, "couldn't establish power handler\n");
    404 }
    405 
    406 static int
    407 rge_detach(device_t self, int flags)
    408 {
    409 	struct rge_softc *sc = device_private(self);
    410 	struct ifnet *ifp = &sc->sc_ec.ec_if;
    411 	pci_chipset_tag_t pc = sc->sc_pc;
    412 
    413 	rge_stop(ifp, 1);
    414 
    415 	pmf_device_deregister(self);
    416 
    417 	ether_ifdetach(ifp);
    418 
    419 	if_detach(ifp);
    420 
    421 	ifmedia_fini(&sc->sc_media);
    422 
    423 	if (sc->sc_ihs[0] != NULL) {
    424 		pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[0]);
    425 		sc->sc_ihs[0] = NULL;
    426 	}
    427 
    428 	pci_intr_release(pc, sc->sc_intrs, 1);
    429 
    430 	rge_deallocmem(sc);
    431 
    432         return 0;
    433 }
    434 
    435 int
    436 rge_intr(void *arg)
    437 {
    438 	struct rge_softc *sc = arg;
    439 	struct ifnet *ifp = &sc->sc_ec.ec_if;
    440 	uint32_t status;
    441 	int claimed = 0, rx, tx;
    442 
    443 	if (!(ifp->if_flags & IFF_RUNNING))
    444 		return (0);
    445 
    446 	/* Disable interrupts. */
    447 	RGE_WRITE_4(sc, RGE_IMR, 0);
    448 
    449 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
    450 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
    451 			return (0);
    452 	}
    453 
    454 	status = RGE_READ_4(sc, RGE_ISR);
    455 	if (status)
    456 		RGE_WRITE_4(sc, RGE_ISR, status);
    457 
    458 	if (status & RGE_ISR_PCS_TIMEOUT)
    459 		claimed = 1;
    460 
    461 	rx = tx = 0;
    462 	if (status & sc->rge_intrs) {
    463 		if (status &
    464 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
    465 			rx |= rge_rxeof(sc);
    466 			claimed = 1;
    467 		}
    468 
    469 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
    470 			tx |= rge_txeof(sc);
    471 			claimed = 1;
    472 		}
    473 
    474 		if (status & RGE_ISR_SYSTEM_ERR) {
    475 			KERNEL_LOCK(1, NULL);
    476 			rge_init(ifp);
    477 			KERNEL_UNLOCK_ONE(NULL);
    478 			claimed = 1;
    479 		}
    480 	}
    481 
    482 	if (sc->rge_timerintr) {
    483 		if ((tx | rx) == 0) {
    484 			/*
    485 			 * Nothing needs to be processed, fallback
    486 			 * to use TX/RX interrupts.
    487 			 */
    488 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
    489 
    490 			/*
    491 			 * Recollect, mainly to avoid the possible
    492 			 * race introduced by changing interrupt
    493 			 * masks.
    494 			 */
    495 			rge_rxeof(sc);
    496 			rge_txeof(sc);
    497 		} else
    498 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
    499 	} else if (tx | rx) {
    500 		/*
    501 		 * Assume that using simulated interrupt moderation
    502 		 * (hardware timer based) could reduce the interrupt
    503 		 * rate.
    504 		 */
    505 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
    506 	}
    507 
    508 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
    509 
    510 	return (claimed);
    511 }
    512 
    513 int
    514 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
    515 {
    516 	struct rge_tx_desc *d = NULL;
    517 	struct rge_txq *txq;
    518 	bus_dmamap_t txmap;
    519 	uint32_t cmdsts, cflags = 0;
    520 	int cur, error, i, last, nsegs;
    521 
    522 #if notyet
    523 	/*
    524 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
    525 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
    526 	 * take affect.
    527 	 */
    528 	if ((m->m_pkthdr.csum_flags &
    529 	    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
    530 		cflags |= RGE_TDEXTSTS_IPCSUM;
    531 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
    532 			cflags |= RGE_TDEXTSTS_TCPCSUM;
    533 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
    534 			cflags |= RGE_TDEXTSTS_UDPCSUM;
    535 	}
    536 #endif
    537 
    538 	txq = &sc->rge_ldata.rge_txq[idx];
    539 	txmap = txq->txq_dmamap;
    540 
    541 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
    542 	switch (error) {
    543 	case 0:
    544 		break;
    545 	case EFBIG: /* mbuf chain is too fragmented */
    546 		if (m_defrag(m, M_DONTWAIT) == 0 &&
    547 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
    548 		    BUS_DMA_NOWAIT) == 0)
    549 			break;
    550 
    551 		/* FALLTHROUGH */
    552 	default:
    553 		return (0);
    554 	}
    555 
    556 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
    557 	    BUS_DMASYNC_PREWRITE);
    558 
    559 	nsegs = txmap->dm_nsegs;
    560 
    561 	/* Set up hardware VLAN tagging. */
    562 	if (vlan_has_tag(m))
    563 		cflags |= bswap16(vlan_get_tag(m)) | RGE_TDEXTSTS_VTAG;
    564 
    565 	last = cur = idx;
    566 	cmdsts = RGE_TDCMDSTS_SOF;
    567 
    568 	for (i = 0; i < txmap->dm_nsegs; i++) {
    569 		d = &sc->rge_ldata.rge_tx_list[cur];
    570 
    571 		d->rge_extsts = htole32(cflags);
    572 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
    573 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
    574 
    575 		cmdsts |= txmap->dm_segs[i].ds_len;
    576 
    577 		if (cur == RGE_TX_LIST_CNT - 1)
    578 			cmdsts |= RGE_TDCMDSTS_EOR;
    579 
    580 		d->rge_cmdsts = htole32(cmdsts);
    581 
    582 		last = cur;
    583 		cmdsts = RGE_TDCMDSTS_OWN;
    584 		cur = RGE_NEXT_TX_DESC(cur);
    585 	}
    586 
    587 	/* Set EOF on the last descriptor. */
    588 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
    589 
    590 	/* Transfer ownership of packet to the chip. */
    591 	d = &sc->rge_ldata.rge_tx_list[idx];
    592 
    593 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
    594 
    595 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
    596 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
    597 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    598 
    599 	/* Update info of TX queue and descriptors. */
    600 	txq->txq_mbuf = m;
    601 	txq->txq_descidx = last;
    602 
    603 	return (nsegs);
    604 }
    605 
    606 int
    607 rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    608 {
    609 	struct rge_softc *sc = ifp->if_softc;
    610 	//struct ifreq *ifr = (struct ifreq *)data;
    611 	int s, error = 0;
    612 
    613 	s = splnet();
    614 
    615 	switch (cmd) {
    616 	case SIOCSIFFLAGS:
    617 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
    618 			break;
    619 		/* XXX set an ifflags callback and let ether_ioctl
    620 		 * handle all of this.
    621 		 */
    622 		if (ifp->if_flags & IFF_UP) {
    623 			if (ifp->if_flags & IFF_RUNNING)
    624 				error = ENETRESET;
    625 			else
    626 				rge_init(ifp);
    627 		} else {
    628 			if (ifp->if_flags & IFF_RUNNING)
    629 				rge_stop(ifp, 1);
    630 		}
    631 		break;
    632 	default:
    633 		error = ether_ioctl(ifp, cmd, data);
    634 	}
    635 
    636 	if (error == ENETRESET) {
    637 		if (ifp->if_flags & IFF_RUNNING)
    638 			rge_iff(sc);
    639 		error = 0;
    640 	}
    641 
    642 	splx(s);
    643 	return (error);
    644 }
    645 
    646 void
    647 rge_start(struct ifnet *ifp)
    648 {
    649 	struct rge_softc *sc = ifp->if_softc;
    650 	struct mbuf *m;
    651 	int free, idx, used;
    652 	int queued = 0;
    653 
    654 #define LINK_STATE_IS_UP(_s)    \
    655 	((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
    656 
    657 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
    658 		IFQ_PURGE(&ifp->if_snd);
    659 		return;
    660 	}
    661 
    662 	/* Calculate free space. */
    663 	idx = sc->rge_ldata.rge_txq_prodidx;
    664 	free = sc->rge_ldata.rge_txq_considx;
    665 	if (free <= idx)
    666 		free += RGE_TX_LIST_CNT;
    667 	free -= idx;
    668 
    669 	for (;;) {
    670 		if (RGE_TX_NSEGS >= free + 2) {
    671 			SET(ifp->if_flags, IFF_OACTIVE);
    672 			break;
    673 		}
    674 
    675 		IFQ_DEQUEUE(&ifp->if_snd, m);
    676 		if (m == NULL)
    677 			break;
    678 
    679 		used = rge_encap(sc, m, idx);
    680 		if (used == 0) {
    681 			m_freem(m);
    682 			continue;
    683 		}
    684 
    685 		KASSERT(used <= free);
    686 		free -= used;
    687 
    688 		bpf_mtap(ifp, m, BPF_D_OUT);
    689 
    690 		idx += used;
    691 		if (idx >= RGE_TX_LIST_CNT)
    692 			idx -= RGE_TX_LIST_CNT;
    693 
    694 		queued++;
    695 	}
    696 
    697 	if (queued == 0)
    698 		return;
    699 
    700 	/* Set a timeout in case the chip goes out to lunch. */
    701 	ifp->if_timer = 5;
    702 
    703 	sc->rge_ldata.rge_txq_prodidx = idx;
    704 	rge_txstart(sc);
    705 }
    706 
    707 void
    708 rge_watchdog(struct ifnet *ifp)
    709 {
    710 	struct rge_softc *sc = ifp->if_softc;
    711 
    712 	device_printf(sc->sc_dev, "watchdog timeout\n");
    713 	if_statinc(ifp, if_oerrors);
    714 
    715 	rge_init(ifp);
    716 }
    717 
    718 int
    719 rge_init(struct ifnet *ifp)
    720 {
    721 	struct rge_softc *sc = ifp->if_softc;
    722 	uint32_t val;
    723 	unsigned i;
    724 	int num_miti;
    725 
    726 	rge_stop(ifp, 0);
    727 
    728 	/* Set MAC address. */
    729 	rge_set_macaddr(sc, CLLADDR(ifp->if_sadl));
    730 
    731 	/* Initialize RX descriptors list. */
    732 	int error = rge_rx_list_init(sc);
    733 	if (error != 0) {
    734 		device_printf(sc->sc_dev,
    735 		    "init failed: no memory for RX buffers\n");
    736 		rge_stop(ifp, 1);
    737 		return error;
    738 	}
    739 
    740 	/* Initialize TX descriptors. */
    741 	rge_tx_list_init(sc);
    742 
    743 	rge_chipinit(sc);
    744 
    745 	error = rge_phy_config(sc);
    746 	if (error)
    747 		return error;
    748 
    749 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    750 
    751 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
    752 	rge_disable_aspm_clkreq(sc);
    753 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER,
    754 	    RGE_JUMBO_MTU + ETHER_HDR_LEN + 32);
    755 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
    756 
    757 	/* Load the addresses of the RX and TX lists into the chip. */
    758 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
    759 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    760 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
    761 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    762 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
    763 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    764 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
    765 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    766 
    767 	/* Set the initial RX and TX configurations. */
    768 	if (sc->rge_type == MAC_CFG2_8126)
    769 		RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG_8126);
    770 	else
    771 		RGE_WRITE_4(sc, RGE_RXCFG,
    772 		    (sc->rge_type == MAC_CFG3) ? RGE_RXCFG_CONFIG :
    773 		    RGE_RXCFG_CONFIG_8125B);
    774 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
    775 
    776 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
    777 	rge_write_csi(sc, 0x70c, val | 0x27000000);
    778 
    779 	if (sc->rge_type == MAC_CFG2_8126) {
    780 		/* Disable L1 timeout. */
    781 		val = rge_read_csi(sc, 0x890) & ~0x00000001;
    782 		rge_write_csi(sc, 0x890, val);
    783 	} else
    784 		RGE_WRITE_2(sc, 0x0382, 0x221b);
    785 	RGE_WRITE_1(sc, 0x4500, 0);
    786 	RGE_WRITE_2(sc, 0x4800, 0);
    787 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
    788 
    789 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
    790 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
    791 
    792 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
    793 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
    794 
    795 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
    796 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
    797 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
    798 
    799 	if (sc->rge_type == MAC_CFG2_8126)
    800 		RGE_CLRBIT_1(sc, 0xd8, 0x02);
    801 
    802 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
    803 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3 ||
    804 	    sc->rge_type == MAC_CFG2_8126)
    805 		rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
    806 	else
    807 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
    808 
    809 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
    810 
    811 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3 ||
    812 	    sc->rge_type == MAC_CFG2_8126) {
    813 		val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
    814 		rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
    815 	} else
    816 		RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
    817 
    818 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
    819 
    820 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
    821 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
    822 
    823 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
    824 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
    825 
    826 	RGE_MAC_CLRBIT(sc, 0xe056, 0x00f0);
    827 
    828 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
    829 
    830 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
    831 
    832 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
    833 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
    834 
    835 	rge_write_mac_ocp(sc, 0xe0c0, 0x4000);
    836 
    837 	RGE_MAC_SETBIT(sc, 0xe052, 0x0060);
    838 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0088);
    839 
    840 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
    841 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
    842 
    843 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
    844 	rge_write_mac_ocp(sc, 0xd430, val | 0x045f);
    845 
    846 	val = rge_read_mac_ocp(sc, 0xe84c) & ~0x0040;
    847 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
    848 		rge_write_mac_ocp(sc, 0xe84c, 0x00c0);
    849 	else
    850 		rge_write_mac_ocp(sc, 0xe84c, 0x0080);
    851 
    852 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN);
    853 
    854 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
    855 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
    856 
    857 	/* Disable EEE plus. */
    858 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
    859 
    860 	if (sc->rge_type == MAC_CFG2_8126)
    861 		RGE_MAC_CLRBIT(sc, 0xea1c, 0x0304);
    862 	else
    863 		RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
    864 
    865 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
    866 	DELAY(1);
    867 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
    868 
    869 	RGE_CLRBIT_2(sc, 0x1880, 0x0030);
    870 
    871 	/* Config interrupt type for RTL8125B. */
    872 	if (sc->rge_type == MAC_CFG5)
    873 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, RGE_INT_CFG0_EN);
    874 
    875 	/* Clear timer interrupts. */
    876 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
    877 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
    878 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
    879 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
    880 
    881 	num_miti = (sc->rge_type == MAC_CFG3) ? 64 : 32;
    882 	/* Clear interrupt moderation timer. */
    883 	for (i = 0; i < num_miti; i++)
    884 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
    885 
    886 	if (sc->rge_type == MAC_CFG5) {
    887 		RGE_CLRBIT_1(sc, RGE_INT_CFG0,
    888 		    RGE_INT_CFG0_TIMEOUT_BYPASS |
    889 		    RGE_INT_CFG0_MITIGATION_BYPASS);
    890 		RGE_WRITE_2(sc, RGE_INT_CFG1, 0);
    891 	}
    892 
    893 	RGE_MAC_SETBIT(sc, 0xc0ac, 0x1f80);
    894 
    895 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
    896 
    897 	RGE_MAC_CLRBIT(sc, 0xe032, 0x0003);
    898 	val = rge_read_csi(sc, 0x98) & ~0x0000ff00;
    899 	rge_write_csi(sc, 0x98, val);
    900 
    901 	val = rge_read_mac_ocp(sc, 0xe092) & ~0x00ff;
    902 	rge_write_mac_ocp(sc, 0xe092, val);
    903 
    904 	if ((sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0)
    905 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
    906 	else
    907 		RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
    908 
    909 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
    910 
    911 	/* Set Maximum frame size. */
    912 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
    913 
    914 	/* Disable RXDV gate. */
    915 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
    916 	DELAY(2000);
    917 
    918 	/* Program promiscuous mode and multicast filters. */
    919 	rge_iff(sc);
    920 
    921 /* XXX-PRG Do we still need this? */
    922 	if (sc->rge_type == MAC_CFG2_8126)
    923 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, 0x08);
    924 
    925 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    926 	DELAY(10);
    927 
    928 	rge_ifmedia_upd(ifp);
    929 
    930 	/* Enable transmit and receive. */
    931 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
    932 
    933 	/* Enable interrupts. */
    934 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
    935 
    936 	ifp->if_flags |= IFF_RUNNING;
    937 	CLR(ifp->if_flags, IFF_OACTIVE);
    938 
    939 	callout_schedule(&sc->sc_timeout, 1);
    940 
    941 	return 0;
    942 }
    943 
    944 /*
    945  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
    946  */
    947 void
    948 rge_stop(struct ifnet *ifp, int disable)
    949 {
    950 	struct rge_softc *sc = ifp->if_softc;
    951 
    952 	callout_halt(&sc->sc_timeout, NULL);
    953 
    954 	ifp->if_timer = 0;
    955 	ifp->if_flags &= ~IFF_RUNNING;
    956 	sc->rge_timerintr = 0;
    957 
    958 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
    959 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
    960 	    RGE_RXCFG_ERRPKT);
    961 
    962 	RGE_WRITE_4(sc, RGE_IMR, 0);
    963 
    964 	/* Config interrupt type for RTL8126. */
    965 	if (sc->rge_type == MAC_CFG2_8126)
    966 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, RGE_INT_CFG0_EN);
    967 
    968 	rge_hw_reset(sc);
    969 
    970 	/* Clear timer interrupts. */
    971 	RGE_MAC_CLRBIT(sc, 0xc0ac, 0x1f80);
    972 
    973 	rge_reset(sc);
    974 
    975 //	intr_barrier(sc->sc_ih);
    976 //	ifq_barrier(&ifp->if_snd);
    977 /*	ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
    978 
    979 	if (sc->rge_head != NULL) {
    980 		m_freem(sc->rge_head);
    981 		sc->rge_head = sc->rge_tail = NULL;
    982 	}
    983 
    984 	rge_tx_list_fini(sc);
    985 	rge_rx_list_fini(sc);
    986 }
    987 
    988 /*
    989  * Set media options.
    990  */
    991 int
    992 rge_ifmedia_upd(struct ifnet *ifp)
    993 {
    994 	struct rge_softc *sc = ifp->if_softc;
    995 	struct ifmedia *ifm = &sc->sc_media;
    996 	int anar, gig, val;
    997 
    998 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    999 		return (EINVAL);
   1000 
   1001 	/* Disable Gigabit Lite. */
   1002 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
   1003 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
   1004 	if (sc->rge_type == MAC_CFG2_8126)
   1005 		RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0002);
   1006 
   1007 	val = rge_read_phy_ocp(sc, 0xa5d4);
   1008 	val &= ~RGE_ADV_2500TFDX;
   1009 	if (sc->rge_type == MAC_CFG2_8126)
   1010 		val &= ~RGE_ADV_5000TFDX;
   1011 
   1012 	anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
   1013 	gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
   1014 
   1015 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1016 	case IFM_AUTO:
   1017 		val |= (sc->rge_type != MAC_CFG2_8126) ?
   1018 		    RGE_ADV_2500TFDX : (RGE_ADV_2500TFDX | RGE_ADV_5000TFDX);
   1019 		break;
   1020 	case IFM_5000_T:
   1021 		val |= RGE_ADV_5000TFDX;
   1022 		ifp->if_baudrate = IF_Gbps(5);
   1023 		break;
   1024 	case IFM_2500_T:
   1025 		val |= RGE_ADV_2500TFDX;
   1026 		ifp->if_baudrate = IF_Mbps(2500);
   1027 		break;
   1028 	case IFM_1000_T:
   1029 		ifp->if_baudrate = IF_Gbps(1);
   1030 		break;
   1031 	case IFM_100_TX:
   1032 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
   1033 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
   1034 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
   1035 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
   1036 		    ANAR_TX | ANAR_10_FD | ANAR_10;
   1037 		ifp->if_baudrate = IF_Mbps(100);
   1038 		break;
   1039 	case IFM_10_T:
   1040 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
   1041 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
   1042 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
   1043 		    ANAR_10_FD | ANAR_10 : ANAR_10;
   1044 		ifp->if_baudrate = IF_Mbps(10);
   1045 		break;
   1046 	default:
   1047 		device_printf(sc->sc_dev,
   1048 		    "unsupported media type\n");
   1049 		return (EINVAL);
   1050 	}
   1051 
   1052 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
   1053 	rge_write_phy(sc, 0, MII_100T2CR, gig);
   1054 	rge_write_phy_ocp(sc, 0xa5d4, val);
   1055 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
   1056 	    BMCR_STARTNEG);
   1057 
   1058 	return (0);
   1059 }
   1060 
   1061 /*
   1062  * Report current media status.
   1063  */
   1064 void
   1065 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
   1066 {
   1067 	struct rge_softc *sc = ifp->if_softc;
   1068 	uint16_t status = 0;
   1069 
   1070 	ifmr->ifm_status = IFM_AVALID;
   1071 	ifmr->ifm_active = IFM_ETHER;
   1072 
   1073 	if (rge_get_link_status(sc)) {
   1074 		ifmr->ifm_status |= IFM_ACTIVE;
   1075 
   1076 		status = RGE_READ_2(sc, RGE_PHYSTAT);
   1077 		if ((status & RGE_PHYSTAT_FDX) ||
   1078 		    (status & (RGE_PHYSTAT_2500MBPS | RGE_PHYSTAT_5000MBPS)))
   1079 			ifmr->ifm_active |= IFM_FDX;
   1080 		else
   1081 			ifmr->ifm_active |= IFM_HDX;
   1082 
   1083 		if (status & RGE_PHYSTAT_10MBPS)
   1084 			ifmr->ifm_active |= IFM_10_T;
   1085 		else if (status & RGE_PHYSTAT_100MBPS)
   1086 			ifmr->ifm_active |= IFM_100_TX;
   1087 		else if (status & RGE_PHYSTAT_1000MBPS)
   1088 			ifmr->ifm_active |= IFM_1000_T;
   1089 		else if (status & RGE_PHYSTAT_2500MBPS)
   1090 			ifmr->ifm_active |= IFM_2500_T;
   1091 		else if (status & RGE_PHYSTAT_5000MBPS)
   1092 			ifmr->ifm_active |= IFM_5000_T;
   1093 	}
   1094 }
   1095 
   1096 /*
   1097  * Allocate memory for RX/TX rings.
   1098  *
   1099  * XXX There is no tear-down for this if it any part fails, so everything
   1100  * remains allocated.
   1101  */
   1102 static int
   1103 rge_allocmem(struct rge_softc *sc)
   1104 {
   1105 	int error, i;
   1106 
   1107 	/* Allocate DMA'able memory for the TX ring. */
   1108 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
   1109 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT |BUS_DMA_ALLOCNOW,
   1110 	    &sc->rge_ldata.rge_tx_list_map);
   1111 	if (error) {
   1112 		aprint_error_dev(sc->sc_dev, "can't create TX list map\n");
   1113 		return (error);
   1114 	}
   1115 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
   1116 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
   1117 	    BUS_DMA_NOWAIT);
   1118 	if (error) {
   1119 		aprint_error_dev(sc->sc_dev, "can't alloc TX list\n");
   1120 		return (error);
   1121 	}
   1122 
   1123 	/* Load the map for the TX ring. */
   1124 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1125 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
   1126 	    (void **) &sc->rge_ldata.rge_tx_list,
   1127 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
   1128 	if (error) {
   1129 		aprint_error_dev(sc->sc_dev, "can't map TX dma buffers\n");
   1130 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1131 		    sc->rge_ldata.rge_tx_listnseg);
   1132 		return (error);
   1133 	}
   1134 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
   1135 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1136 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
   1137 	if (error) {
   1138 		aprint_error_dev(sc->sc_dev, "can't load TX dma map\n");
   1139 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
   1140 		bus_dmamem_unmap(sc->sc_dmat,
   1141 		    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
   1142 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1143 		    sc->rge_ldata.rge_tx_listnseg);
   1144 		return (error);
   1145 	}
   1146 
   1147 	/* Create DMA maps for TX buffers. */
   1148 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
   1149 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
   1150 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0,
   1151 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
   1152 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
   1153 		if (error) {
   1154 			aprint_error_dev(sc->sc_dev, "can't create DMA map for TX\n");
   1155 			return (error);
   1156 		}
   1157 	}
   1158 
   1159 	/* Allocate DMA'able memory for the RX ring. */
   1160 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
   1161 	    RGE_RX_LIST_SZ, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
   1162 	    &sc->rge_ldata.rge_rx_list_map);
   1163 	if (error) {
   1164 		aprint_error_dev(sc->sc_dev, "can't create RX list map\n");
   1165 		return (error);
   1166 	}
   1167 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
   1168 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
   1169 	    BUS_DMA_NOWAIT);
   1170 	if (error) {
   1171 		aprint_error_dev(sc->sc_dev, "can't alloc RX list\n");
   1172 		return (error);
   1173 	}
   1174 
   1175 	/* Load the map for the RX ring. */
   1176 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1177 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
   1178 	    (void **) &sc->rge_ldata.rge_rx_list,
   1179 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
   1180 	if (error) {
   1181 		aprint_error_dev(sc->sc_dev, "can't map RX dma buffers\n");
   1182 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1183 		    sc->rge_ldata.rge_rx_listnseg);
   1184 		return (error);
   1185 	}
   1186 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
   1187 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1188 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
   1189 	if (error) {
   1190 		aprint_error_dev(sc->sc_dev, "can't load RX dma map\n");
   1191 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
   1192 		bus_dmamem_unmap(sc->sc_dmat,
   1193 		    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
   1194 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1195 		    sc->rge_ldata.rge_rx_listnseg);
   1196 		return (error);
   1197 	}
   1198 
   1199 	/*
   1200 	 * Create DMA maps for RX buffers.  Use BUS_DMA_ALLOCNOW to avoid any
   1201 	 * potential failure in bus_dmamap_load_mbuf() in the RX path.
   1202 	 */
   1203 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1204 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
   1205 		    RGE_JUMBO_FRAMELEN, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
   1206 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
   1207 		if (error) {
   1208 			aprint_error_dev(sc->sc_dev, "can't create DMA map for RX\n");
   1209 			return (error);
   1210 		}
   1211 	}
   1212 
   1213 	return (error);
   1214 }
   1215 
   1216 /*
   1217  * release memory allocated to RX/TX rings
   1218  */
   1219 static int
   1220 rge_deallocmem(struct rge_softc *sc)
   1221 {
   1222 	int i;
   1223 
   1224 	/* Destroy DMA maps for RX buffers */
   1225 	for (i = 0; i < RGE_RX_LIST_CNT; i++)
   1226 		bus_dmamap_destroy(sc->sc_dmat,
   1227 		    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
   1228 
   1229 	/* Unload the map for the RX ring */
   1230 	bus_dmamap_unload(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
   1231 	bus_dmamem_unmap(sc->sc_dmat,
   1232 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
   1233 
   1234 	/* Deallocate DMA'able memory for the RX ring. */
   1235 	bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1236 	    sc->rge_ldata.rge_rx_listnseg);
   1237 	bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
   1238 
   1239 	/* Destroy DMA maps for TX buffers. */
   1240 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
   1241 		bus_dmamap_destroy(sc->sc_dmat,
   1242 		    sc->rge_ldata.rge_txq[i].txq_dmamap);
   1243 
   1244 	/* Unload the map for the TX ring */
   1245 	bus_dmamap_unload(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
   1246 	bus_dmamem_unmap(sc->sc_dmat,
   1247 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
   1248 
   1249 	/* Deallocate DMA'able memory for the TX ring. */
   1250 	bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1251 	    sc->rge_ldata.rge_tx_listnseg);
   1252 	bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
   1253 
   1254 	return 0;
   1255 }
   1256 
   1257 /*
   1258  * Set an RX descriptor and sync it.
   1259  */
   1260 static void
   1261 rge_load_rxbuf(struct rge_softc *sc, int idx)
   1262 {
   1263 	struct rge_rx_desc *r = &sc->rge_ldata.rge_rx_list[idx];
   1264 	struct rge_rxq *rxq = &sc->rge_ldata.rge_rxq[idx];
   1265 	bus_dmamap_t rxmap = rxq->rxq_dmamap;
   1266 	uint32_t cmdsts;
   1267 
   1268 	cmdsts = rxmap->dm_segs[0].ds_len | RGE_RDCMDSTS_OWN;
   1269 	if (idx == RGE_RX_LIST_CNT - 1)
   1270 		cmdsts |= RGE_RDCMDSTS_EOR;
   1271 
   1272 	r->hi_qword0.rge_addr = htole64(rxmap->dm_segs[0].ds_addr);
   1273 	r->hi_qword1.rx_qword4.rge_extsts = 0;
   1274 	r->hi_qword1.rx_qword4.rge_cmdsts = htole32(cmdsts);
   1275 
   1276 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1277 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1278 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1279 }
   1280 
   1281 /*
   1282  * Initialize the RX descriptor and attach an mbuf cluster.
   1283  */
   1284 int
   1285 rge_newbuf(struct rge_softc *sc, int idx)
   1286 {
   1287 	struct mbuf *m;
   1288 	struct rge_rxq *rxq;
   1289 	bus_dmamap_t rxmap;
   1290 	int error __diagused;
   1291 
   1292 	m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
   1293 	if (m == NULL)
   1294 		return (ENOBUFS);
   1295 	MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
   1296 
   1297 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
   1298 
   1299 	rxq = &sc->rge_ldata.rge_rxq[idx];
   1300 	rxmap = rxq->rxq_dmamap;
   1301 
   1302 	if (rxq->rxq_mbuf != NULL)
   1303 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
   1304 
   1305 	/* This map was created with BUS_DMA_ALLOCNOW so should never fail. */
   1306 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT);
   1307 	KASSERTMSG(error == 0, "error=%d", error);
   1308 
   1309 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
   1310 	    BUS_DMASYNC_PREREAD);
   1311 
   1312 	/* Map the segments into RX descriptors. */
   1313 
   1314 	rxq->rxq_mbuf = m;
   1315 	rge_load_rxbuf(sc, idx);
   1316 
   1317 	return 0;
   1318 }
   1319 
   1320 static int
   1321 rge_rx_list_init(struct rge_softc *sc)
   1322 {
   1323 	unsigned i;
   1324 
   1325 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
   1326 
   1327 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1328 		sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
   1329 		if (rge_newbuf(sc, i) != 0) {
   1330 			rge_rx_list_fini(sc);
   1331 			return (ENOBUFS);
   1332 		}
   1333 	}
   1334 
   1335 	sc->rge_ldata.rge_rxq_prodidx = sc->rge_ldata.rge_rxq_considx = 0;
   1336 	sc->rge_head = sc->rge_tail = NULL;
   1337 
   1338 	return (0);
   1339 }
   1340 
   1341 static void
   1342 rge_rx_list_fini(struct rge_softc *sc)
   1343 {
   1344 	unsigned i;
   1345 
   1346 	/* Free the RX list buffers. */
   1347 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1348 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
   1349 			bus_dmamap_unload(sc->sc_dmat,
   1350 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
   1351 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
   1352 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
   1353 		}
   1354 	}
   1355 }
   1356 
   1357 static void
   1358 rge_tx_list_init(struct rge_softc *sc)
   1359 {
   1360 	unsigned i;
   1361 
   1362 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
   1363 
   1364 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
   1365 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
   1366 
   1367 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
   1368 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
   1369 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1370 
   1371 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
   1372 }
   1373 
   1374 static void
   1375 rge_tx_list_fini(struct rge_softc *sc)
   1376 {
   1377 	unsigned i;
   1378 
   1379 	/* Free the TX list buffers. */
   1380 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
   1381 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
   1382 			bus_dmamap_unload(sc->sc_dmat,
   1383 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
   1384 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
   1385 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
   1386 		}
   1387 	}
   1388 }
   1389 
   1390 int
   1391 rge_rxeof(struct rge_softc *sc)
   1392 {
   1393 	struct mbuf *m;
   1394 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1395 	struct rge_rx_desc *cur_rx;
   1396 	struct rge_rxq *rxq;
   1397 	uint32_t rxstat, extsts;
   1398 	int i, total_len, rx = 0;
   1399 
   1400 	for (i = sc->rge_ldata.rge_rxq_considx; ; i = RGE_NEXT_RX_DESC(i)) {
   1401 		/* Invalidate the descriptor memory. */
   1402 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1403 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1404 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1405 
   1406 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
   1407 		rxstat = letoh32(cur_rx->hi_qword1.rx_qword4.rge_cmdsts);
   1408 		extsts = letoh32(cur_rx->hi_qword1.rx_qword4.rge_extsts);
   1409 
   1410 		if (rxstat & RGE_RDCMDSTS_OWN)
   1411 			break;
   1412 
   1413 		total_len = rxstat & RGE_RDCMDSTS_FRAGLEN;
   1414 		rxq = &sc->rge_ldata.rge_rxq[i];
   1415 		m = rxq->rxq_mbuf;
   1416 		rx = 1;
   1417 
   1418 		/* Invalidate the RX mbuf. */
   1419 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
   1420 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1421 
   1422 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
   1423 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
   1424 			if_statinc(ifp, if_ierrors);
   1425 			rge_load_rxbuf(sc, i);
   1426 			continue;
   1427 		}
   1428 
   1429 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
   1430 			if_statinc(ifp, if_ierrors);
   1431 			/*
   1432 			 * If this is part of a multi-fragment packet,
   1433 			 * discard all the pieces.
   1434 			 */
   1435 			if (sc->rge_head != NULL) {
   1436 				m_freem(sc->rge_head);
   1437 				sc->rge_head = sc->rge_tail = NULL;
   1438 			}
   1439 			rge_load_rxbuf(sc, i);
   1440 			continue;
   1441 		}
   1442 
   1443 		/*
   1444 		 * If allocating a replacement mbuf fails,
   1445 		 * reload the current one.
   1446 		 */
   1447 		if (rge_newbuf(sc, i) != 0) {
   1448 			if_statinc(ifp, if_iqdrops);
   1449 			if (sc->rge_head != NULL) {
   1450 				m_freem(sc->rge_head);
   1451 				sc->rge_head = sc->rge_tail = NULL;
   1452 			}
   1453 			rge_load_rxbuf(sc, i);
   1454 			continue;
   1455 		}
   1456 
   1457 		m_set_rcvif(m, ifp);
   1458 		if (sc->rge_head != NULL) {
   1459 			m->m_len = total_len;
   1460 			/*
   1461 			 * Special case: if there's 4 bytes or less
   1462 			 * in this buffer, the mbuf can be discarded:
   1463 			 * the last 4 bytes is the CRC, which we don't
   1464 			 * care about anyway.
   1465 			 */
   1466 			if (m->m_len <= ETHER_CRC_LEN) {
   1467 				sc->rge_tail->m_len -=
   1468 				    (ETHER_CRC_LEN - m->m_len);
   1469 				m_freem(m);
   1470 			} else {
   1471 				m->m_len -= ETHER_CRC_LEN;
   1472 				m->m_flags &= ~M_PKTHDR;
   1473 				sc->rge_tail->m_next = m;
   1474 			}
   1475 			m = sc->rge_head;
   1476 			sc->rge_head = sc->rge_tail = NULL;
   1477 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
   1478 		} else
   1479 	#if 0
   1480 			m->m_pkthdr.len = m->m_len =
   1481 			    (total_len - ETHER_CRC_LEN);
   1482 	#else
   1483 		{
   1484 			m->m_pkthdr.len = m->m_len = total_len;
   1485 			m->m_flags |= M_HASFCS;
   1486 		}
   1487 	#endif
   1488 
   1489 #if notyet
   1490 		/* Check IP header checksum. */
   1491 		if (!(extsts & RGE_RDEXTSTS_IPCSUMERR) &&
   1492 		    (extsts & RGE_RDEXTSTS_IPV4))
   1493 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
   1494 
   1495 		/* Check TCP/UDP checksum. */
   1496 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
   1497 		    (((extsts & RGE_RDEXTSTS_TCPPKT) &&
   1498 		    !(extsts & RGE_RDEXTSTS_TCPCSUMERR)) ||
   1499 		    ((extsts & RGE_RDEXTSTS_UDPPKT) &&
   1500 		    !(extsts & RGE_RDEXTSTS_UDPCSUMERR))))
   1501 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
   1502 			    M_UDP_CSUM_IN_OK;
   1503 #endif
   1504 
   1505 		if (extsts & RGE_RDEXTSTS_VTAG) {
   1506 			vlan_set_tag(m,
   1507 			    bswap16(extsts & RGE_RDEXTSTS_VLAN_MASK));
   1508 		}
   1509 
   1510 		if_percpuq_enqueue(ifp->if_percpuq, m);
   1511 	}
   1512 
   1513 	sc->rge_ldata.rge_rxq_considx = i;
   1514 
   1515 	return (rx);
   1516 }
   1517 
   1518 int
   1519 rge_txeof(struct rge_softc *sc)
   1520 {
   1521 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1522 	struct rge_txq *txq;
   1523 	uint32_t txstat;
   1524 	int cons, idx, prod;
   1525 	int free = 0;
   1526 
   1527 	prod = sc->rge_ldata.rge_txq_prodidx;
   1528 	cons = sc->rge_ldata.rge_txq_considx;
   1529 
   1530 	while (prod != cons) {
   1531 		txq = &sc->rge_ldata.rge_txq[cons];
   1532 		idx = txq->txq_descidx;
   1533 
   1534 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1535 		    idx * sizeof(struct rge_tx_desc),
   1536 		    sizeof(struct rge_tx_desc),
   1537 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1538 
   1539 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
   1540 
   1541 		if (txstat & RGE_TDCMDSTS_OWN) {
   1542 			free = 2;
   1543 			break;
   1544 		}
   1545 
   1546 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
   1547 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1548 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
   1549 		m_freem(txq->txq_mbuf);
   1550 		txq->txq_mbuf = NULL;
   1551 
   1552 		net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   1553 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
   1554 			if_statinc_ref(ifp, nsr, if_collisions);
   1555 		if (txstat & RGE_TDCMDSTS_TXERR)
   1556 			if_statinc_ref(ifp, nsr, if_oerrors);
   1557 		else
   1558 			if_statinc_ref(ifp, nsr, if_opackets);
   1559 		IF_STAT_PUTREF(ifp);
   1560 
   1561 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1562 		    idx * sizeof(struct rge_tx_desc),
   1563 		    sizeof(struct rge_tx_desc),
   1564 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1565 
   1566 		cons = RGE_NEXT_TX_DESC(idx);
   1567 		free = 1;
   1568 	}
   1569 
   1570 	if (free == 0)
   1571 		return (0);
   1572 
   1573 	sc->rge_ldata.rge_txq_considx = cons;
   1574 
   1575 	if (free == 2)
   1576 		rge_txstart(sc);
   1577 
   1578 	CLR(ifp->if_flags, IFF_OACTIVE);
   1579 	ifp->if_timer = 0;
   1580 	if_schedule_deferred_start(ifp);
   1581 
   1582 	return (1);
   1583 }
   1584 
   1585 void
   1586 rge_reset(struct rge_softc *sc)
   1587 {
   1588 	int i;
   1589 
   1590 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
   1591 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
   1592 	    RGE_RXCFG_ERRPKT);
   1593 
   1594 	/* Enable RXDV gate. */
   1595 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
   1596 	DELAY(2000);
   1597 
   1598 	RGE_SETBIT_1(sc, RGE_CMD, RGE_CMD_STOPREQ);
   1599 	for (i = 0; i < 20; i++) {
   1600 		DELAY(10);
   1601 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_STOPREQ))
   1602 			break;
   1603 	}
   1604 
   1605 	for (i = 0; i < 3000; i++) {
   1606 		DELAY(50);
   1607 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
   1608 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
   1609 		    RGE_MCUCMD_TXFIFO_EMPTY))
   1610 			break;
   1611 	}
   1612 	if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
   1613 		for (i = 0; i < 3000; i++) {
   1614 			DELAY(50);
   1615 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
   1616 				break;
   1617 		}
   1618 	}
   1619 
   1620 	DELAY(2000);
   1621 
   1622 	/* Soft reset. */
   1623 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
   1624 
   1625 	for (i = 0; i < RGE_TIMEOUT; i++) {
   1626 		DELAY(100);
   1627 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
   1628 			break;
   1629 	}
   1630 	if (i == RGE_TIMEOUT)
   1631 		device_printf(sc->sc_dev, "reset never completed!\n");
   1632 }
   1633 
   1634 void
   1635 rge_iff(struct rge_softc *sc)
   1636 {
   1637 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1638 	struct ethercom *ec = &sc->sc_ec;
   1639 	struct ether_multi *enm;
   1640 	struct ether_multistep step;
   1641 	uint32_t hashes[2];
   1642 	uint32_t rxfilt;
   1643 	int h = 0;
   1644 
   1645 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
   1646 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
   1647 	ifp->if_flags &= ~IFF_ALLMULTI;
   1648 
   1649 	/*
   1650 	 * Always accept frames destined to our station address.
   1651 	 * Always accept broadcast frames.
   1652 	 */
   1653 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
   1654 
   1655 	if (ifp->if_flags & IFF_PROMISC) {
   1656  allmulti:
   1657 		ifp->if_flags |= IFF_ALLMULTI;
   1658 		rxfilt |= RGE_RXCFG_MULTI;
   1659 		if (ifp->if_flags & IFF_PROMISC)
   1660 			rxfilt |= RGE_RXCFG_ALLPHYS;
   1661 		hashes[0] = hashes[1] = 0xffffffff;
   1662 	} else {
   1663 		rxfilt |= RGE_RXCFG_MULTI;
   1664 		/* Program new filter. */
   1665 		memset(hashes, 0, sizeof(hashes));
   1666 
   1667 		ETHER_LOCK(ec);
   1668 		ETHER_FIRST_MULTI(step, ec, enm);
   1669 		while (enm != NULL) {
   1670 			if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1671 			    ETHER_ADDR_LEN) != 0) {
   1672 			    	ETHER_UNLOCK(ec);
   1673 				goto allmulti;
   1674 			}
   1675 			h = ether_crc32_be(enm->enm_addrlo,
   1676 			    ETHER_ADDR_LEN) >> 26;
   1677 
   1678 			if (h < 32)
   1679 				hashes[0] |= (1U << h);
   1680 			else
   1681 				hashes[1] |= (1U << (h - 32));
   1682 
   1683 			ETHER_NEXT_MULTI(step, enm);
   1684 		}
   1685 		ETHER_UNLOCK(ec);
   1686 	}
   1687 
   1688 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
   1689 	RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
   1690 	RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
   1691 }
   1692 
   1693 void
   1694 rge_chipinit(struct rge_softc *sc)
   1695 {
   1696 	rge_exit_oob(sc);
   1697 	rge_set_phy_power(sc, 1);
   1698 	rge_hw_init(sc);
   1699 	rge_hw_reset(sc);
   1700 }
   1701 
   1702 void
   1703 rge_set_phy_power(struct rge_softc *sc, int on)
   1704 {
   1705 	int i;
   1706 
   1707 	if (on) {
   1708 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
   1709 
   1710 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
   1711 
   1712 		for (i = 0; i < RGE_TIMEOUT; i++) {
   1713 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
   1714 				break;
   1715 			DELAY(1000);
   1716 		}
   1717 	} else {
   1718 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
   1719 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
   1720 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
   1721 	}
   1722 }
   1723 
   1724 void
   1725 rge_ephy_config(struct rge_softc *sc)
   1726 {
   1727 	switch (sc->rge_type) {
   1728 	case MAC_CFG3:
   1729 		rge_ephy_config_mac_cfg3(sc);
   1730 		break;
   1731 	case MAC_CFG5:
   1732 		rge_ephy_config_mac_cfg5(sc);
   1733 		break;
   1734 	default:
   1735 		break;  /* Can't happen. */
   1736 	}
   1737 }
   1738 
   1739 void
   1740 rge_ephy_config_mac_cfg3(struct rge_softc *sc)
   1741 {
   1742 	uint16_t val;
   1743 	int i;
   1744 
   1745 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
   1746 	rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
   1747 	    rtl8125_mac_cfg3_ephy[i].val);
   1748 
   1749 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
   1750 	rge_write_ephy(sc, 0x002a, val | 0x3000);
   1751 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
   1752 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
   1753 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
   1754 	rge_write_ephy(sc, 0x0002, 0x6042);
   1755 	rge_write_ephy(sc, 0x0006, 0x0014);
   1756 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
   1757 	rge_write_ephy(sc, 0x006a, val | 0x3000);
   1758 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
   1759 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
   1760 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
   1761 	rge_write_ephy(sc, 0x0042, 0x6042);
   1762 	rge_write_ephy(sc, 0x0046, 0x0014);
   1763 }
   1764 
   1765 void
   1766 rge_ephy_config_mac_cfg5(struct rge_softc *sc)
   1767 {
   1768 	int i;
   1769 
   1770 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
   1771 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
   1772 		    rtl8125_mac_cfg5_ephy[i].val);
   1773 }
   1774 
   1775 int
   1776 rge_phy_config(struct rge_softc *sc)
   1777 {
   1778 	int i;
   1779 
   1780 	rge_ephy_config(sc);
   1781 
   1782 	/* PHY reset. */
   1783 	rge_write_phy(sc, 0, MII_ANAR,
   1784 	    rge_read_phy(sc, 0, MII_ANAR) &
   1785 	    ~(ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10));
   1786 	rge_write_phy(sc, 0, MII_100T2CR,
   1787 	    rge_read_phy(sc, 0, MII_100T2CR) &
   1788 	    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX));
   1789 	RGE_PHY_CLRBIT(sc, 0xa5d4, RGE_ADV_2500TFDX);
   1790 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
   1791 	    BMCR_STARTNEG);
   1792 	for (i = 0; i < 2500; i++) {
   1793 		if (!(rge_read_phy(sc, 0, MII_BMCR) & BMCR_RESET))
   1794 			break;
   1795 		DELAY(1000);
   1796 	}
   1797 	if (i == 2500) {
   1798 		aprint_error_dev(sc->sc_dev, "%s: PHY reset failed\n",
   1799 		    "rge_phy_config");
   1800 		return (ETIMEDOUT);
   1801 	}
   1802 
   1803 	/* Read microcode version. */
   1804 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1805 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
   1806 
   1807 	switch (sc->rge_type) {
   1808 	case MAC_CFG2_8126:
   1809 		rge_phy_config_mac_cfg2_8126(sc);
   1810 		break;
   1811 	case MAC_CFG2:
   1812 		rge_phy_config_mac_cfg2(sc);
   1813 		break;
   1814 	case MAC_CFG3:
   1815 		rge_phy_config_mac_cfg3(sc);
   1816 		break;
   1817 	case MAC_CFG4:
   1818 		rge_phy_config_mac_cfg4(sc);
   1819 		break;
   1820 	case MAC_CFG5:
   1821 		rge_phy_config_mac_cfg5(sc);
   1822 		break;
   1823 	default:
   1824 		break;	/* Can't happen. */
   1825 	}
   1826 
   1827 	RGE_PHY_CLRBIT(sc, 0xa5b4, 0x8000);
   1828 
   1829 	/* Disable EEE. */
   1830 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
   1831 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
   1832 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
   1833 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
   1834 	}
   1835 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
   1836 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
   1837 	if (sc->rge_type == MAC_CFG2_8126)
   1838 		RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0002);
   1839 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
   1840 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
   1841 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
   1842 
   1843 	/* Advanced EEE. */
   1844 	if (sc->rge_type != MAC_CFG2_8126)
   1845 		rge_patch_phy_mcu(sc, 1);
   1846 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
   1847 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
   1848 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
   1849 	if (sc->rge_type != MAC_CFG2_8126)
   1850 		rge_patch_phy_mcu(sc, 0);
   1851 	return 0;
   1852 }
   1853 
   1854 void
   1855 rge_phy_config_mac_cfg2_8126(struct rge_softc *sc)
   1856 {
   1857 	uint16_t val;
   1858 	int i;
   1859 	static const uint16_t mac_cfg2_a438_value[] =
   1860 	    { 0x0044, 0x00a8, 0x00d6, 0x00ec, 0x00f6, 0x00fc, 0x00fe,
   1861 	      0x00fe, 0x00bc, 0x0058, 0x002a, 0x003f, 0x3f02, 0x023c,
   1862 	      0x3b0a, 0x1c00, 0x0000, 0x0000, 0x0000, 0x0000 };
   1863 
   1864 	static const uint16_t mac_cfg2_b87e_value[] =
   1865 	    { 0x03ed, 0x03ff, 0x0009, 0x03fe, 0x000b, 0x0021, 0x03f7,
   1866 	      0x03b8, 0x03e0, 0x0049, 0x0049, 0x03e0, 0x03b8, 0x03f7,
   1867 	      0x0021, 0x000b, 0x03fe, 0x0009, 0x03ff, 0x03ed, 0x000e,
   1868 	      0x03fe, 0x03ed, 0x0006, 0x001a, 0x03f1, 0x03d8, 0x0023,
   1869 	      0x0054, 0x0322, 0x00dd, 0x03ab, 0x03dc, 0x0027, 0x000e,
   1870 	      0x03e5, 0x03f9, 0x0012, 0x0001, 0x03f1 };
   1871 
   1872 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_8126_MCODE_VER);
   1873 
   1874 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1875 	rge_write_phy_ocp(sc, 0xa436, 0x80bf);
   1876 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1877 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
   1878 	rge_write_phy_ocp(sc, 0xa436, 0x80cd);
   1879 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1880 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
   1881 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
   1882 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1883 	rge_write_phy_ocp(sc, 0xa438, val | 0xc800);
   1884 	rge_write_phy_ocp(sc, 0xa436, 0x80d4);
   1885 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1886 	rge_write_phy_ocp(sc, 0xa438, val | 0xc800);
   1887 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
   1888 	rge_write_phy_ocp(sc, 0xa438, 0x10cc);
   1889 	rge_write_phy_ocp(sc, 0xa436, 0x80e5);
   1890 	rge_write_phy_ocp(sc, 0xa438, 0x4f0c);
   1891 	rge_write_phy_ocp(sc, 0xa436, 0x8387);
   1892 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1893 	rge_write_phy_ocp(sc, 0xa438, val | 0x4700);
   1894 	val = rge_read_phy_ocp(sc, 0xa80c) & ~0x00c0;
   1895 	rge_write_phy_ocp(sc, 0xa80c, val | 0x0080);
   1896 	RGE_PHY_CLRBIT(sc, 0xac90, 0x0010);
   1897 	RGE_PHY_CLRBIT(sc, 0xad2c, 0x8000);
   1898 	rge_write_phy_ocp(sc, 0xb87c, 0x8321);
   1899 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1900 	rge_write_phy_ocp(sc, 0xb87e, val | 0x1100);
   1901 	RGE_PHY_SETBIT(sc, 0xacf8, 0x000c);
   1902 	rge_write_phy_ocp(sc, 0xa436, 0x8183);
   1903 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1904 	rge_write_phy_ocp(sc, 0xa438, val | 0x5900);
   1905 	RGE_PHY_SETBIT(sc, 0xad94, 0x0020);
   1906 	RGE_PHY_CLRBIT(sc, 0xa654, 0x0800);
   1907 	RGE_PHY_SETBIT(sc, 0xb648, 0x4000);
   1908 	rge_write_phy_ocp(sc, 0xb87c, 0x839e);
   1909 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1910 	rge_write_phy_ocp(sc, 0xb87e, val | 0x2f00);
   1911 	rge_write_phy_ocp(sc, 0xb87c, 0x83f2);
   1912 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1913 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
   1914 	RGE_PHY_SETBIT(sc, 0xada0, 0x0002);
   1915 	rge_write_phy_ocp(sc, 0xb87c, 0x80f3);
   1916 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1917 	rge_write_phy_ocp(sc, 0xb87e, val | 0x9900);
   1918 	rge_write_phy_ocp(sc, 0xb87c, 0x8126);
   1919 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1920 	rge_write_phy_ocp(sc, 0xb87e, val | 0xc100);
   1921 	rge_write_phy_ocp(sc, 0xb87c, 0x893a);
   1922 	rge_write_phy_ocp(sc, 0xb87e, 0x8080);
   1923 	rge_write_phy_ocp(sc, 0xb87c, 0x8647);
   1924 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1925 	rge_write_phy_ocp(sc, 0xb87e, val | 0xe600);
   1926 	rge_write_phy_ocp(sc, 0xb87c, 0x862c);
   1927 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1928 	rge_write_phy_ocp(sc, 0xb87e, val | 0x1200);
   1929 	rge_write_phy_ocp(sc, 0xb87c, 0x864a);
   1930 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1931 	rge_write_phy_ocp(sc, 0xb87e, val | 0xe600);
   1932 	rge_write_phy_ocp(sc, 0xb87c, 0x80a0);
   1933 	rge_write_phy_ocp(sc, 0xb87e, 0xbcbc);
   1934 	rge_write_phy_ocp(sc, 0xb87c, 0x805e);
   1935 	rge_write_phy_ocp(sc, 0xb87e, 0xbcbc);
   1936 	rge_write_phy_ocp(sc, 0xb87c, 0x8056);
   1937 	rge_write_phy_ocp(sc, 0xb87e, 0x3077);
   1938 	rge_write_phy_ocp(sc, 0xb87c, 0x8058);
   1939 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1940 	rge_write_phy_ocp(sc, 0xb87e, val | 0x5a00);
   1941 	rge_write_phy_ocp(sc, 0xb87c, 0x8098);
   1942 	rge_write_phy_ocp(sc, 0xb87e, 0x3077);
   1943 	rge_write_phy_ocp(sc, 0xb87c, 0x809a);
   1944 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1945 	rge_write_phy_ocp(sc, 0xb87e, val | 0x5a00);
   1946 	rge_write_phy_ocp(sc, 0xb87c, 0x8052);
   1947 	rge_write_phy_ocp(sc, 0xb87e, 0x3733);
   1948 	rge_write_phy_ocp(sc, 0xb87c, 0x8094);
   1949 	rge_write_phy_ocp(sc, 0xb87e, 0x3733);
   1950 	rge_write_phy_ocp(sc, 0xb87c, 0x807f);
   1951 	rge_write_phy_ocp(sc, 0xb87e, 0x7c75);
   1952 	rge_write_phy_ocp(sc, 0xb87c, 0x803d);
   1953 	rge_write_phy_ocp(sc, 0xb87e, 0x7c75);
   1954 	rge_write_phy_ocp(sc, 0xb87c, 0x8036);
   1955 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1956 	rge_write_phy_ocp(sc, 0xb87e, val | 0x3000);
   1957 	rge_write_phy_ocp(sc, 0xb87c, 0x8078);
   1958 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1959 	rge_write_phy_ocp(sc, 0xb87e, val | 0x3000);
   1960 	rge_write_phy_ocp(sc, 0xb87c, 0x8031);
   1961 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1962 	rge_write_phy_ocp(sc, 0xb87e, val | 0x3300);
   1963 	rge_write_phy_ocp(sc, 0xb87c, 0x8073);
   1964 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1965 	rge_write_phy_ocp(sc, 0xb87e, val | 0x3300);
   1966 	val = rge_read_phy_ocp(sc, 0xae06) & ~0xfc00;
   1967 	rge_write_phy_ocp(sc, 0xae06, val | 0x7c00);
   1968 	rge_write_phy_ocp(sc, 0xb87c, 0x89D1);
   1969 	rge_write_phy_ocp(sc, 0xb87e, 0x0004);
   1970 	rge_write_phy_ocp(sc, 0xa436, 0x8fbd);
   1971 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1972 	rge_write_phy_ocp(sc, 0xa438, val | 0x0a00);
   1973 	rge_write_phy_ocp(sc, 0xa436, 0x8fbe);
   1974 	rge_write_phy_ocp(sc, 0xa438, 0x0d09);
   1975 	rge_write_phy_ocp(sc, 0xb87c, 0x89cd);
   1976 	rge_write_phy_ocp(sc, 0xb87e, 0x0f0f);
   1977 	rge_write_phy_ocp(sc, 0xb87c, 0x89cf);
   1978 	rge_write_phy_ocp(sc, 0xb87e, 0x0f0f);
   1979 	rge_write_phy_ocp(sc, 0xb87c, 0x83a4);
   1980 	rge_write_phy_ocp(sc, 0xb87e, 0x6600);
   1981 	rge_write_phy_ocp(sc, 0xb87c, 0x83a6);
   1982 	rge_write_phy_ocp(sc, 0xb87e, 0x6601);
   1983 	rge_write_phy_ocp(sc, 0xb87c, 0x83c0);
   1984 	rge_write_phy_ocp(sc, 0xb87e, 0x6600);
   1985 	rge_write_phy_ocp(sc, 0xb87c, 0x83c2);
   1986 	rge_write_phy_ocp(sc, 0xb87e, 0x6601);
   1987 	rge_write_phy_ocp(sc, 0xb87c, 0x8414);
   1988 	rge_write_phy_ocp(sc, 0xb87e, 0x6600);
   1989 	rge_write_phy_ocp(sc, 0xb87c, 0x8416);
   1990 	rge_write_phy_ocp(sc, 0xb87e, 0x6601);
   1991 	rge_write_phy_ocp(sc, 0xb87c, 0x83f8);
   1992 	rge_write_phy_ocp(sc, 0xb87e, 0x6600);
   1993 	rge_write_phy_ocp(sc, 0xb87c, 0x83fa);
   1994 	rge_write_phy_ocp(sc, 0xb87e, 0x6601);
   1995 
   1996 	rge_patch_phy_mcu(sc, 1);
   1997 	val = rge_read_phy_ocp(sc, 0xbd96) & ~0x1f00;
   1998 	rge_write_phy_ocp(sc, 0xbd96, val | 0x1000);
   1999 	val = rge_read_phy_ocp(sc, 0xbf1c) & ~0x0007;
   2000 	rge_write_phy_ocp(sc, 0xbf1c, val | 0x0007);
   2001 	RGE_PHY_CLRBIT(sc, 0xbfbe, 0x8000);
   2002 	val = rge_read_phy_ocp(sc, 0xbf40) & ~0x0380;
   2003 	rge_write_phy_ocp(sc, 0xbf40, val | 0x0280);
   2004 	val = rge_read_phy_ocp(sc, 0xbf90) & ~0x0080;
   2005 	rge_write_phy_ocp(sc, 0xbf90, val | 0x0060);
   2006 	val = rge_read_phy_ocp(sc, 0xbf90) & ~0x0010;
   2007 	rge_write_phy_ocp(sc, 0xbf90, val | 0x000c);
   2008 	rge_patch_phy_mcu(sc, 0);
   2009 
   2010 	rge_write_phy_ocp(sc, 0xa436, 0x843b);
   2011 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2012 	rge_write_phy_ocp(sc, 0xa438, val | 0x2000);
   2013 	rge_write_phy_ocp(sc, 0xa436, 0x843d);
   2014 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2015 	rge_write_phy_ocp(sc, 0xa438, val | 0x2000);
   2016 	RGE_PHY_CLRBIT(sc, 0xb516, 0x007f);
   2017 	RGE_PHY_CLRBIT(sc, 0xbf80, 0x0030);
   2018 
   2019 	rge_write_phy_ocp(sc, 0xa436, 0x8188);
   2020 	for (i = 0; i < 11; i++)
   2021 		rge_write_phy_ocp(sc, 0xa438, mac_cfg2_a438_value[i]);
   2022 
   2023 	rge_write_phy_ocp(sc, 0xb87c, 0x8015);
   2024 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2025 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
   2026 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffd);
   2027 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2028 	rge_write_phy_ocp(sc, 0xb87e, val | 0);
   2029 	rge_write_phy_ocp(sc, 0xb87c, 0x8fff);
   2030 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2031 	rge_write_phy_ocp(sc, 0xb87e, val | 0x7f00);
   2032 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffb);
   2033 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2034 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   2035 	rge_write_phy_ocp(sc, 0xb87c, 0x8fe9);
   2036 	rge_write_phy_ocp(sc, 0xb87e, 0x0002);
   2037 	rge_write_phy_ocp(sc, 0xb87c, 0x8fef);
   2038 	rge_write_phy_ocp(sc, 0xb87e, 0x00a5);
   2039 	rge_write_phy_ocp(sc, 0xb87c, 0x8ff1);
   2040 	rge_write_phy_ocp(sc, 0xb87e, 0x0106);
   2041 	rge_write_phy_ocp(sc, 0xb87c, 0x8fe1);
   2042 	rge_write_phy_ocp(sc, 0xb87e, 0x0102);
   2043 	rge_write_phy_ocp(sc, 0xb87c, 0x8fe3);
   2044 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2045 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0400);
   2046 	RGE_PHY_SETBIT(sc, 0xa654, 0x0800);
   2047 	RGE_PHY_CLRBIT(sc, 0xa654, 0x0003);
   2048 	rge_write_phy_ocp(sc, 0xac3a, 0x5851);
   2049 	val = rge_read_phy_ocp(sc, 0xac3c) & ~0xd000;
   2050 	rge_write_phy_ocp(sc, 0xac3c, val | 0x2000);
   2051 	val = rge_read_phy_ocp(sc, 0xac42) & ~0x0200;
   2052 	rge_write_phy_ocp(sc, 0xac42, val | 0x01c0);
   2053 	RGE_PHY_CLRBIT(sc, 0xac3e, 0xe000);
   2054 	RGE_PHY_CLRBIT(sc, 0xac42, 0x0038);
   2055 	val = rge_read_phy_ocp(sc, 0xac42) & ~0x0002;
   2056 	rge_write_phy_ocp(sc, 0xac42, val | 0x0005);
   2057 	rge_write_phy_ocp(sc, 0xac1a, 0x00db);
   2058 	rge_write_phy_ocp(sc, 0xade4, 0x01b5);
   2059 	RGE_PHY_CLRBIT(sc, 0xad9c, 0x0c00);
   2060 	rge_write_phy_ocp(sc, 0xb87c, 0x814b);
   2061 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2062 	rge_write_phy_ocp(sc, 0xb87e, val | 0x1100);
   2063 	rge_write_phy_ocp(sc, 0xb87c, 0x814d);
   2064 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2065 	rge_write_phy_ocp(sc, 0xb87e, val | 0x1100);
   2066 	rge_write_phy_ocp(sc, 0xb87c, 0x814f);
   2067 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2068 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0b00);
   2069 	rge_write_phy_ocp(sc, 0xb87c, 0x8142);
   2070 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2071 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   2072 	rge_write_phy_ocp(sc, 0xb87c, 0x8144);
   2073 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2074 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   2075 	rge_write_phy_ocp(sc, 0xb87c, 0x8150);
   2076 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2077 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   2078 	rge_write_phy_ocp(sc, 0xb87c, 0x8118);
   2079 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2080 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
   2081 	rge_write_phy_ocp(sc, 0xb87c, 0x811a);
   2082 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2083 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
   2084 	rge_write_phy_ocp(sc, 0xb87c, 0x811c);
   2085 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2086 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
   2087 	rge_write_phy_ocp(sc, 0xb87c, 0x810f);
   2088 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2089 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   2090 	rge_write_phy_ocp(sc, 0xb87c, 0x8111);
   2091 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2092 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   2093 	rge_write_phy_ocp(sc, 0xb87c, 0x811d);
   2094 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2095 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   2096 	RGE_PHY_SETBIT(sc, 0xac36, 0x1000);
   2097 	RGE_PHY_CLRBIT(sc, 0xad1c, 0x0100);
   2098 	val = rge_read_phy_ocp(sc, 0xade8) & ~0xffc0;
   2099 	rge_write_phy_ocp(sc, 0xade8, val | 0x1400);
   2100 	rge_write_phy_ocp(sc, 0xb87c, 0x864b);
   2101 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2102 	rge_write_phy_ocp(sc, 0xb87e, val | 0x9d00);
   2103 
   2104 	rge_write_phy_ocp(sc, 0xa436, 0x8f97);
   2105 	for (; i < nitems(mac_cfg2_a438_value); i++)
   2106 		rge_write_phy_ocp(sc, 0xa438, mac_cfg2_a438_value[i]);
   2107 
   2108 	RGE_PHY_SETBIT(sc, 0xad9c, 0x0020);
   2109 	rge_write_phy_ocp(sc, 0xb87c, 0x8122);
   2110 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2111 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0c00);
   2112 
   2113 	rge_write_phy_ocp(sc, 0xb87c, 0x82c8);
   2114 	for (i = 0; i < 20; i++)
   2115 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg2_b87e_value[i]);
   2116 
   2117 	rge_write_phy_ocp(sc, 0xb87c, 0x80ef);
   2118 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2119 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0c00);
   2120 
   2121 	rge_write_phy_ocp(sc, 0xb87c, 0x82a0);
   2122 	for (; i < nitems(mac_cfg2_b87e_value); i++)
   2123 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg2_b87e_value[i]);
   2124 
   2125 	rge_write_phy_ocp(sc, 0xa436, 0x8018);
   2126 	RGE_PHY_SETBIT(sc, 0xa438, 0x2000);
   2127 	rge_write_phy_ocp(sc, 0xb87c, 0x8fe4);
   2128 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2129 	rge_write_phy_ocp(sc, 0xb87e, val | 0);
   2130 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xffc0;
   2131 	rge_write_phy_ocp(sc, 0xb54c, val | 0x3700);
   2132 }
   2133 
   2134 void
   2135 rge_phy_config_mac_cfg2(struct rge_softc *sc)
   2136 {
   2137 	uint16_t val;
   2138 	int i;
   2139 
   2140 	for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
   2141 		rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
   2142 		    rtl8125_mac_cfg2_ephy[i].val);
   2143 
   2144 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
   2145 
   2146 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
   2147 	rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
   2148 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   2149 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   2150 	rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
   2151 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
   2152 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   2153 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
   2154 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
   2155 	rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
   2156 	RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
   2157 	RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
   2158 	RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
   2159 
   2160 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   2161 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2162 	rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
   2163 	rge_write_phy_ocp(sc, 0xa436, 0x80eb);
   2164 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
   2165 	rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
   2166 	rge_write_phy_ocp(sc, 0xa436, 0x80f8);
   2167 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2168 	rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
   2169 	rge_write_phy_ocp(sc, 0xa436, 0x80f1);
   2170 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2171 	rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
   2172 	rge_write_phy_ocp(sc, 0xa436, 0x80fe);
   2173 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2174 	rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
   2175 	rge_write_phy_ocp(sc, 0xa436, 0x8102);
   2176 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2177 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   2178 	rge_write_phy_ocp(sc, 0xa436, 0x8105);
   2179 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2180 	rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
   2181 	rge_write_phy_ocp(sc, 0xa436, 0x8100);
   2182 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2183 	rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
   2184 	rge_write_phy_ocp(sc, 0xa436, 0x8104);
   2185 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2186 	rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
   2187 	rge_write_phy_ocp(sc, 0xa436, 0x8106);
   2188 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2189 	rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
   2190 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
   2191 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2192 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
   2193 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
   2194 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   2195 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
   2196 	RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
   2197 	val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
   2198 	rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
   2199 	rge_write_phy_ocp(sc, 0xa436, 0x819f);
   2200 	rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
   2201 	rge_write_phy_ocp(sc, 0xbc34, 0x5555);
   2202 	val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
   2203 	rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
   2204 	RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
   2205 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   2206 }
   2207 
   2208 void
   2209 rge_phy_config_mac_cfg3(struct rge_softc *sc)
   2210 {
   2211 	uint16_t val;
   2212 	int i;
   2213 	static const uint16_t mac_cfg3_a438_value[] =
   2214 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
   2215 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
   2216 
   2217 	static const uint16_t mac_cfg3_b88e_value[] =
   2218 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
   2219 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
   2220 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
   2221 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
   2222 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
   2223 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
   2224 
   2225 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
   2226 
   2227 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   2228 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   2229 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
   2230 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
   2231 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   2232 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
   2233 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
   2234 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
   2235 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
   2236 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
   2237 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
   2238 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
   2239 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
   2240 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
   2241 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
   2242 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
   2243 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
   2244 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
   2245 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
   2246 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
   2247 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
   2248 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
   2249 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2250 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
   2251 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
   2252 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2253 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
   2254 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
   2255 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   2256 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
   2257 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   2258 
   2259 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
   2260 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
   2261 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
   2262 	for (i = 0; i < 26; i++)
   2263 		rge_write_phy_ocp(sc, 0xa438, 0);
   2264 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
   2265 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
   2266 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   2267 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
   2268 
   2269 	rge_patch_phy_mcu(sc, 1);
   2270 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
   2271 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
   2272 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
   2273 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
   2274 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
   2275 	}
   2276 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
   2277 	rge_patch_phy_mcu(sc, 0);
   2278 
   2279 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
   2280 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
   2281 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   2282 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
   2283 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
   2284 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
   2285 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
   2286 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
   2287 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
   2288 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   2289 	RGE_PHY_SETBIT(sc, 0xa424, 0x0008);
   2290 }
   2291 
   2292 void
   2293 rge_phy_config_mac_cfg4(struct rge_softc *sc)
   2294 {
   2295 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2296 	uint16_t val;
   2297 	int i;
   2298 	static const uint16_t mac_cfg4_b87c_value[] =
   2299 	    { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
   2300 	      0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
   2301 	      0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
   2302 	      0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
   2303 	      0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
   2304 	      0x80b0, 0x0f31 };
   2305 
   2306 	for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
   2307 		rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
   2308 		    rtl8125_mac_cfg4_ephy[i].val);
   2309 
   2310 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
   2311 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
   2312 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
   2313 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
   2314 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
   2315 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
   2316 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
   2317 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
   2318 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
   2319 
   2320 	rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
   2321 
   2322 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   2323 	RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
   2324 	rge_write_phy_ocp(sc, 0xa436, 0x8fff);
   2325 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2326 	rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
   2327 	for (i = 0; i < 6; i++) {
   2328 		rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
   2329 		if (i < 3)
   2330 			rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
   2331 		else
   2332 			rge_write_phy_ocp(sc, 0xb87e, 0x147d);
   2333 	}
   2334 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
   2335 	rge_write_phy_ocp(sc, 0xb87e, 0x0907);
   2336 	val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
   2337 	rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
   2338 	val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
   2339 	rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
   2340 	rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
   2341 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
   2342 	rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
   2343 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
   2344 	rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
   2345 	rge_write_phy_ocp(sc, 0xb87e, 0x6077);
   2346 	rge_write_phy_ocp(sc, 0xb506, 0x01e7);
   2347 	rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
   2348 	rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
   2349 	rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
   2350 	rge_write_phy_ocp(sc, 0xac3c, 0x9240);
   2351 	rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
   2352 	rge_write_phy_ocp(sc, 0xacc6, 0x0707);
   2353 	rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
   2354 	rge_write_phy_ocp(sc, 0xad08, 0x0007);
   2355 	for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
   2356 		rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
   2357 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
   2358 	}
   2359 	RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
   2360 	RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
   2361 	rge_write_phy_ocp(sc, 0xb87c, 0x8141);
   2362 	rge_write_phy_ocp(sc, 0xb87e, 0x320e);
   2363 	rge_write_phy_ocp(sc, 0xb87c, 0x8153);
   2364 	rge_write_phy_ocp(sc, 0xb87e, 0x720e);
   2365 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
   2366 	rge_write_phy_ocp(sc, 0xb87c, 0x8529);
   2367 	rge_write_phy_ocp(sc, 0xb87e, 0x050e);
   2368 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
   2369 	    32);
   2370 	rge_write_phy_ocp(sc, 0xa436, 0x816c);
   2371 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
   2372 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
   2373 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
   2374 	rge_write_phy_ocp(sc, 0xa436, 0x8174);
   2375 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
   2376 	rge_write_phy_ocp(sc, 0xa436, 0x8178);
   2377 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
   2378 	rge_write_phy_ocp(sc, 0xa436, 0x817c);
   2379 	rge_write_phy_ocp(sc, 0xa438, 0x0719);
   2380 	rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
   2381 	rge_write_phy_ocp(sc, 0xa438, 0x0400);
   2382 	rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
   2383 	rge_write_phy_ocp(sc, 0xa438, 0x0404);
   2384 	rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
   2385 	for (i = 0; i < 6; i++) {
   2386 		rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
   2387 		if (i == 2)
   2388 			rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
   2389 		else
   2390 			rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
   2391 	}
   2392 	rge_write_phy_ocp(sc, 0xb87c, 0x8145);
   2393 	rge_write_phy_ocp(sc, 0xb87e, 0x370e);
   2394 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
   2395 	rge_write_phy_ocp(sc, 0xb87e, 0x770e);
   2396 	rge_write_phy_ocp(sc, 0xb87c, 0x8169);
   2397 	rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
   2398 	rge_write_phy_ocp(sc, 0xb87c, 0x817b);
   2399 	rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
   2400 	rge_write_phy_ocp(sc, 0xa436, 0x8217);
   2401 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2402 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   2403 	rge_write_phy_ocp(sc, 0xa436, 0x821a);
   2404 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2405 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   2406 	rge_write_phy_ocp(sc, 0xa436, 0x80da);
   2407 	rge_write_phy_ocp(sc, 0xa438, 0x0403);
   2408 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
   2409 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2410 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
   2411 	rge_write_phy_ocp(sc, 0xa436, 0x80b3);
   2412 	rge_write_phy_ocp(sc, 0xa438, 0x0384);
   2413 	rge_write_phy_ocp(sc, 0xa436, 0x80b7);
   2414 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
   2415 	rge_write_phy_ocp(sc, 0xa436, 0x80ba);
   2416 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2417 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
   2418 	rge_write_phy_ocp(sc, 0xa436, 0x80b5);
   2419 	rge_write_phy_ocp(sc, 0xa438, 0xf009);
   2420 	rge_write_phy_ocp(sc, 0xa436, 0x80bd);
   2421 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2422 	rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
   2423 	rge_write_phy_ocp(sc, 0xa436, 0x80c7);
   2424 	rge_write_phy_ocp(sc, 0xa438, 0xf083);
   2425 	rge_write_phy_ocp(sc, 0xa436, 0x80dd);
   2426 	rge_write_phy_ocp(sc, 0xa438, 0x03f0);
   2427 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
   2428 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2429 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
   2430 	rge_write_phy_ocp(sc, 0xa436, 0x80cb);
   2431 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
   2432 	rge_write_phy_ocp(sc, 0xa436, 0x80ce);
   2433 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2434 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
   2435 	rge_write_phy_ocp(sc, 0xa436, 0x80c9);
   2436 	rge_write_phy_ocp(sc, 0xa438, 0x8009);
   2437 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
   2438 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2439 	rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
   2440 	rge_write_phy_ocp(sc, 0xa436, 0x80a3);
   2441 	rge_write_phy_ocp(sc, 0xa438, 0x200a);
   2442 	rge_write_phy_ocp(sc, 0xa436, 0x80a5);
   2443 	rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
   2444 	rge_write_phy_ocp(sc, 0xa436, 0x809f);
   2445 	rge_write_phy_ocp(sc, 0xa438, 0x6073);
   2446 	rge_write_phy_ocp(sc, 0xa436, 0x80a1);
   2447 	rge_write_phy_ocp(sc, 0xa438, 0x000b);
   2448 	rge_write_phy_ocp(sc, 0xa436, 0x80a9);
   2449 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2450 	rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
   2451 	rge_patch_phy_mcu(sc, 1);
   2452 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
   2453 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
   2454 	rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
   2455 	rge_write_phy_ocp(sc, 0xb890, 0x0000);
   2456 	rge_write_phy_ocp(sc, 0xb88e, 0xc240);
   2457 	rge_write_phy_ocp(sc, 0xb890, 0x0103);
   2458 	rge_write_phy_ocp(sc, 0xb88e, 0xc242);
   2459 	rge_write_phy_ocp(sc, 0xb890, 0x0507);
   2460 	rge_write_phy_ocp(sc, 0xb88e, 0xc244);
   2461 	rge_write_phy_ocp(sc, 0xb890, 0x090b);
   2462 	rge_write_phy_ocp(sc, 0xb88e, 0xc246);
   2463 	rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
   2464 	rge_write_phy_ocp(sc, 0xb88e, 0xc248);
   2465 	rge_write_phy_ocp(sc, 0xb890, 0x1012);
   2466 	rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
   2467 	rge_write_phy_ocp(sc, 0xb890, 0x1416);
   2468 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
   2469 	rge_patch_phy_mcu(sc, 0);
   2470 	RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
   2471 	RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
   2472 	rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
   2473 	rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
   2474 	rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
   2475 	rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
   2476 	rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
   2477 	RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
   2478 	rge_write_phy_ocp(sc, 0xa436, 0x817d);
   2479 	RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
   2480 }
   2481 
   2482 void
   2483 rge_phy_config_mac_cfg5(struct rge_softc *sc)
   2484 {
   2485 	uint16_t val;
   2486 	int i;
   2487 
   2488 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
   2489 
   2490 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   2491 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
   2492 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
   2493 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
   2494 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
   2495 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
   2496 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
   2497 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
   2498 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
   2499 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
   2500 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2501 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
   2502 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
   2503 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
   2504 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
   2505 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
   2506 	for (i = 0; i < 10; i++) {
   2507 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
   2508 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
   2509 	}
   2510 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
   2511 	val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
   2512 	rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
   2513 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
   2514 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x2700;
   2515 	rge_write_phy_ocp(sc, 0xa438, val | 0xd800);
   2516 	RGE_PHY_SETBIT(sc, 0xa424, 0x0008);
   2517 }
   2518 
   2519 void
   2520 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
   2521 {
   2522 	if (sc->rge_mcodever != mcode_version) {
   2523 		int i;
   2524 
   2525 		rge_patch_phy_mcu(sc, 1);
   2526 
   2527 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
   2528 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   2529 			if (sc->rge_type == MAC_CFG2)
   2530 				rge_write_phy_ocp(sc, 0xa438, 0x8600);
   2531 			else
   2532 				rge_write_phy_ocp(sc, 0xa438, 0x8601);
   2533 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
   2534 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
   2535 
   2536 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
   2537 		}
   2538 
   2539 		if (sc->rge_type == MAC_CFG2) {
   2540 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
   2541 				rge_write_phy_ocp(sc,
   2542 				    rtl8125_mac_cfg2_mcu[i].reg,
   2543 				    rtl8125_mac_cfg2_mcu[i].val);
   2544 			}
   2545 		} else if (sc->rge_type == MAC_CFG3) {
   2546 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
   2547 				rge_write_phy_ocp(sc,
   2548 				    rtl8125_mac_cfg3_mcu[i].reg,
   2549 				    rtl8125_mac_cfg3_mcu[i].val);
   2550 			}
   2551 		} else if (sc->rge_type == MAC_CFG4) {
   2552 			for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
   2553 				rge_write_phy_ocp(sc,
   2554 				    rtl8125_mac_cfg4_mcu[i].reg,
   2555 				    rtl8125_mac_cfg4_mcu[i].val);
   2556 			}
   2557 		} else if (sc->rge_type == MAC_CFG5) {
   2558 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
   2559 				rge_write_phy_ocp(sc,
   2560 				    rtl8125_mac_cfg5_mcu[i].reg,
   2561 				    rtl8125_mac_cfg5_mcu[i].val);
   2562 			}
   2563 		} else if (sc->rge_type == MAC_CFG2_8126) {
   2564 			for (i = 0; i < nitems(rtl8126_mac_cfg2_mcu); i++) {
   2565 				rge_write_phy_ocp(sc,
   2566 				    rtl8126_mac_cfg2_mcu[i].reg,
   2567 				    rtl8126_mac_cfg2_mcu[i].val);
   2568 			}
   2569 		}
   2570 
   2571 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
   2572 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
   2573 
   2574 			rge_write_phy_ocp(sc, 0xa436, 0);
   2575 			rge_write_phy_ocp(sc, 0xa438, 0);
   2576 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
   2577 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   2578 			rge_write_phy_ocp(sc, 0xa438, 0);
   2579 		}
   2580 
   2581 		rge_patch_phy_mcu(sc, 0);
   2582 
   2583 		/* Write microcode version. */
   2584 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
   2585 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
   2586 	}
   2587 }
   2588 
   2589 void
   2590 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
   2591 {
   2592 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2593 	RGE_WRITE_4(sc, RGE_MAC0,
   2594 	    (uint32_t)addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
   2595 	RGE_WRITE_4(sc, RGE_MAC4,
   2596 	    addr[5] <<  8 | addr[4]);
   2597 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2598 }
   2599 
   2600 void
   2601 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
   2602 {
   2603 	int i;
   2604 
   2605 	for (i = 0; i < ETHER_ADDR_LEN; i++)
   2606 		addr[i] = RGE_READ_1(sc, RGE_MAC0 + i);
   2607 
   2608 	*(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
   2609 	*(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
   2610 
   2611 	rge_set_macaddr(sc, addr);
   2612 }
   2613 
   2614 void
   2615 rge_hw_init(struct rge_softc *sc)
   2616 {
   2617 	uint16_t reg;
   2618 	int i, npages;
   2619 
   2620 	if (sc->rge_type == MAC_CFG2_8126)
   2621 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, 0x08);
   2622 	rge_disable_aspm_clkreq(sc);
   2623 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
   2624 
   2625 	/* Disable UPS. */
   2626 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
   2627 
   2628 	/* Disable MAC MCU. */
   2629 	rge_disable_aspm_clkreq(sc);
   2630 	rge_write_mac_ocp(sc, 0xfc48, 0);
   2631 	for (reg = 0xfc28; reg < 0xfc48; reg += 2)
   2632 		rge_write_mac_ocp(sc, reg, 0);
   2633 	DELAY(3000);
   2634 	rge_write_mac_ocp(sc, 0xfc26, 0);
   2635 
   2636 	if (sc->rge_type == MAC_CFG3) {
   2637 		for (npages = 0; npages < 3; npages++) {
   2638 			rge_switch_mcu_ram_page(sc, npages);
   2639 			for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
   2640 				if (npages == 0)
   2641 					rge_write_mac_ocp(sc,
   2642 					    rtl8125_mac_bps[i].reg,
   2643 					    rtl8125_mac_bps[i].val);
   2644 				else if (npages == 1)
   2645 					rge_write_mac_ocp(sc,
   2646 					    rtl8125_mac_bps[i].reg, 0);
   2647 				else {
   2648 					if (rtl8125_mac_bps[i].reg < 0xf9f8)
   2649 						rge_write_mac_ocp(sc,
   2650 						    rtl8125_mac_bps[i].reg, 0);
   2651 				}
   2652 			}
   2653 			if (npages == 2) {
   2654 				rge_write_mac_ocp(sc, 0xf9f8, 0x6486);
   2655 				rge_write_mac_ocp(sc, 0xf9fa, 0x0b15);
   2656 				rge_write_mac_ocp(sc, 0xf9fc, 0x090e);
   2657 				rge_write_mac_ocp(sc, 0xf9fe, 0x1139);
   2658 			}
   2659 		}
   2660 		rge_write_mac_ocp(sc, 0xfc26, 0x8000);
   2661 		rge_write_mac_ocp(sc, 0xfc2a, 0x0540);
   2662 		rge_write_mac_ocp(sc, 0xfc2e, 0x0a06);
   2663 		rge_write_mac_ocp(sc, 0xfc30, 0x0eb8);
   2664 		rge_write_mac_ocp(sc, 0xfc32, 0x3a5c);
   2665 		rge_write_mac_ocp(sc, 0xfc34, 0x10a8);
   2666 		rge_write_mac_ocp(sc, 0xfc40, 0x0d54);
   2667 		rge_write_mac_ocp(sc, 0xfc42, 0x0e24);
   2668 		rge_write_mac_ocp(sc, 0xfc48, 0x307a);
   2669 	} else if (sc->rge_type == MAC_CFG5) {
   2670 		rge_switch_mcu_ram_page(sc, 0);
   2671 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
   2672 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
   2673 			    rtl8125b_mac_bps[i].val);
   2674 		}
   2675 	}
   2676 
   2677 	/* Disable PHY power saving. */
   2678 	if (sc->rge_type != MAC_CFG2_8126)
   2679 		rge_disable_phy_ocp_pwrsave(sc);
   2680 
   2681 	/* Set PCIe uncorrectable error status. */
   2682 	rge_write_csi(sc, 0x108,
   2683 	    rge_read_csi(sc, 0x108) | 0x00100000);
   2684 }
   2685 
   2686 void
   2687 rge_hw_reset(struct rge_softc *sc)
   2688 {
   2689 	/* Disable interrupts */
   2690 	RGE_WRITE_4(sc, RGE_IMR, 0);
   2691 	RGE_WRITE_4(sc, RGE_ISR, RGE_READ_4(sc, RGE_ISR));
   2692 
   2693 	/* Clear timer interrupts. */
   2694 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
   2695 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
   2696 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
   2697 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
   2698 
   2699 	rge_reset(sc);
   2700 }
   2701 
   2702 void
   2703 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
   2704 {
   2705 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
   2706 		rge_patch_phy_mcu(sc, 1);
   2707 		rge_write_phy_ocp(sc, 0xc416, 0);
   2708 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
   2709 		rge_patch_phy_mcu(sc, 0);
   2710 	}
   2711 }
   2712 
   2713 void
   2714 rge_patch_phy_mcu(struct rge_softc *sc, int set)
   2715 {
   2716 	int i;
   2717 
   2718 	if (set)
   2719 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
   2720 	else
   2721 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
   2722 
   2723 	for (i = 0; i < 1000; i++) {
   2724 		if (set) {
   2725 			if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) != 0)
   2726 				break;
   2727 		} else {
   2728 			if (!(rge_read_phy_ocp(sc, 0xb800) & 0x0040))
   2729 				break;
   2730 		}
   2731 		DELAY(100);
   2732 	}
   2733 	if (i == 1000)
   2734 		aprint_error_dev(sc->sc_dev,
   2735 		    "timeout waiting to patch phy mcu\n");
   2736 }
   2737 
   2738 void
   2739 rge_add_media_types(struct rge_softc *sc)
   2740 {
   2741 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
   2742 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
   2743 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
   2744 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
   2745 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
   2746 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
   2747 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
   2748 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
   2749 
   2750 	if (sc->rge_type == MAC_CFG2_8126) {
   2751 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T, 0, NULL);
   2752 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T | IFM_FDX,
   2753 		    0, NULL);
   2754 	}
   2755 }
   2756 
   2757 void
   2758 rge_config_imtype(struct rge_softc *sc, int imtype)
   2759 {
   2760 	switch (imtype) {
   2761 	case RGE_IMTYPE_NONE:
   2762 		sc->rge_intrs = RGE_INTRS;
   2763 		break;
   2764 	case RGE_IMTYPE_SIM:
   2765 		sc->rge_intrs = RGE_INTRS_TIMER;
   2766 		break;
   2767 	default:
   2768 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
   2769 	}
   2770 }
   2771 
   2772 void
   2773 rge_disable_aspm_clkreq(struct rge_softc *sc)
   2774 {
   2775 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2776 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
   2777 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
   2778 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2779 }
   2780 
   2781 void
   2782 rge_disable_hw_im(struct rge_softc *sc)
   2783 {
   2784 	RGE_WRITE_2(sc, RGE_IM, 0);
   2785 }
   2786 
   2787 void
   2788 rge_disable_sim_im(struct rge_softc *sc)
   2789 {
   2790 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
   2791 	sc->rge_timerintr = 0;
   2792 }
   2793 
   2794 void
   2795 rge_setup_sim_im(struct rge_softc *sc)
   2796 {
   2797 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
   2798 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
   2799 	sc->rge_timerintr = 1;
   2800 }
   2801 
   2802 void
   2803 rge_setup_intr(struct rge_softc *sc, int imtype)
   2804 {
   2805 	rge_config_imtype(sc, imtype);
   2806 
   2807 	/* Enable interrupts. */
   2808 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
   2809 
   2810 	switch (imtype) {
   2811 	case RGE_IMTYPE_NONE:
   2812 		rge_disable_sim_im(sc);
   2813 		rge_disable_hw_im(sc);
   2814 		break;
   2815 	case RGE_IMTYPE_SIM:
   2816 		rge_disable_hw_im(sc);
   2817 		rge_setup_sim_im(sc);
   2818 		break;
   2819 	default:
   2820 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
   2821 	}
   2822 }
   2823 
   2824 void
   2825 rge_switch_mcu_ram_page(struct rge_softc *sc, int page)
   2826 {
   2827 	uint16_t val;
   2828 
   2829 	val = rge_read_mac_ocp(sc, 0xe446) & ~0x0003;
   2830 	val |= page;
   2831 	rge_write_mac_ocp(sc, 0xe446, val);
   2832 }
   2833 
   2834 void
   2835 rge_exit_oob(struct rge_softc *sc)
   2836 {
   2837 	int i;
   2838 
   2839 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
   2840 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
   2841 	    RGE_RXCFG_ERRPKT);
   2842 
   2843 	/* Disable RealWoW. */
   2844 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
   2845 
   2846 	rge_reset(sc);
   2847 
   2848 	/* Disable OOB. */
   2849 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
   2850 
   2851 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
   2852 
   2853 	for (i = 0; i < 10; i++) {
   2854 		DELAY(100);
   2855 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   2856 			break;
   2857 	}
   2858 
   2859 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
   2860 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
   2861 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
   2862 
   2863 	for (i = 0; i < 10; i++) {
   2864 		DELAY(100);
   2865 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   2866 			break;
   2867 	}
   2868 
   2869 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
   2870 		aprint_error_dev(sc->sc_dev,
   2871 		    "%s: rtl8125_is_ups_resume!!\n", "rge_exit_oob");
   2872 		for (i = 0; i < RGE_TIMEOUT; i++) {
   2873 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
   2874 				break;
   2875 			DELAY(1000);
   2876 		}
   2877 		RGE_MAC_CLRBIT(sc, 0xd42c, 0x0100);
   2878 		if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
   2879 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
   2880 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
   2881 	}
   2882 }
   2883 
   2884 void
   2885 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
   2886 {
   2887 	int i;
   2888 
   2889 	RGE_WRITE_4(sc, RGE_CSIDR, val);
   2890 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
   2891 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
   2892 
   2893 	for (i = 0; i < 20000; i++) {
   2894 		 DELAY(1);
   2895 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
   2896 			break;
   2897 	}
   2898 
   2899 	DELAY(20);
   2900 }
   2901 
   2902 uint32_t
   2903 rge_read_csi(struct rge_softc *sc, uint32_t reg)
   2904 {
   2905 	int i;
   2906 
   2907 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
   2908 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
   2909 
   2910 	for (i = 0; i < 20000; i++) {
   2911 		 DELAY(1);
   2912 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
   2913 			break;
   2914 	}
   2915 
   2916 	DELAY(20);
   2917 
   2918 	return (RGE_READ_4(sc, RGE_CSIDR));
   2919 }
   2920 
   2921 void
   2922 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   2923 {
   2924 	uint32_t tmp;
   2925 
   2926 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   2927 	tmp += val;
   2928 	tmp |= RGE_MACOCP_BUSY;
   2929 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
   2930 }
   2931 
   2932 uint16_t
   2933 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
   2934 {
   2935 	uint32_t val;
   2936 
   2937 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   2938 	RGE_WRITE_4(sc, RGE_MACOCP, val);
   2939 
   2940 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
   2941 }
   2942 
   2943 void
   2944 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
   2945 {
   2946 	uint32_t tmp;
   2947 	int i;
   2948 
   2949 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
   2950 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
   2951 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
   2952 
   2953 	for (i = 0; i < 10; i++) {
   2954 		DELAY(100);
   2955 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
   2956 			break;
   2957 	}
   2958 
   2959 	DELAY(20);
   2960 }
   2961 
   2962 uint16_t
   2963 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
   2964 {
   2965 	uint32_t val;
   2966 	int i;
   2967 
   2968 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
   2969 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
   2970 
   2971 	for (i = 0; i < 10; i++) {
   2972 		DELAY(100);
   2973 		val = RGE_READ_4(sc, RGE_EPHYAR);
   2974 		if (val & RGE_EPHYAR_BUSY)
   2975 			break;
   2976 	}
   2977 
   2978 	DELAY(20);
   2979 
   2980 	return (val & RGE_EPHYAR_DATA_MASK);
   2981 }
   2982 
   2983 void
   2984 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
   2985 {
   2986 	uint16_t off, phyaddr;
   2987 
   2988 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
   2989 	phyaddr <<= 4;
   2990 
   2991 	off = addr ? reg : 0x10 + (reg % 8);
   2992 
   2993 	phyaddr += (off - 16) << 1;
   2994 
   2995 	rge_write_phy_ocp(sc, phyaddr, val);
   2996 }
   2997 
   2998 uint16_t
   2999 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
   3000 {
   3001 	uint16_t off, phyaddr;
   3002 
   3003 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
   3004 	phyaddr <<= 4;
   3005 
   3006 	off = addr ? reg : 0x10 + (reg % 8);
   3007 
   3008 	phyaddr += (off - 16) << 1;
   3009 
   3010 	return (rge_read_phy_ocp(sc, phyaddr));
   3011 }
   3012 
   3013 void
   3014 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   3015 {
   3016 	uint32_t tmp;
   3017 	int i;
   3018 
   3019 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   3020 	tmp |= RGE_PHYOCP_BUSY | val;
   3021 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
   3022 
   3023 	for (i = 0; i < RGE_TIMEOUT; i++) {
   3024 		DELAY(1);
   3025 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
   3026 			break;
   3027 	}
   3028 }
   3029 
   3030 uint16_t
   3031 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
   3032 {
   3033 	uint32_t val;
   3034 	int i;
   3035 
   3036 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   3037 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
   3038 
   3039 	for (i = 0; i < RGE_TIMEOUT; i++) {
   3040 		DELAY(1);
   3041 		val = RGE_READ_4(sc, RGE_PHYOCP);
   3042 		if (val & RGE_PHYOCP_BUSY)
   3043 			break;
   3044 	}
   3045 
   3046 	return (val & RGE_PHYOCP_DATA_MASK);
   3047 }
   3048 
   3049 int
   3050 rge_get_link_status(struct rge_softc *sc)
   3051 {
   3052 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
   3053 }
   3054 
   3055 void
   3056 rge_txstart(void *arg)
   3057 {
   3058 	struct rge_softc *sc = arg;
   3059 
   3060 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
   3061 }
   3062 
   3063 void
   3064 rge_tick(void *arg)
   3065 {
   3066 	struct rge_softc *sc = arg;
   3067 	int s;
   3068 
   3069 	s = splnet();
   3070 	rge_link_state(sc);
   3071 	splx(s);
   3072 
   3073 	callout_schedule(&sc->sc_timeout, hz);
   3074 }
   3075 
   3076 void
   3077 rge_link_state(struct rge_softc *sc)
   3078 {
   3079 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   3080 	int link = LINK_STATE_DOWN;
   3081 
   3082 	if (rge_get_link_status(sc))
   3083 		link = LINK_STATE_UP;
   3084 
   3085 	if (ifp->if_link_state != link) { /* XXX not safe to access */
   3086 		if_link_state_change(ifp, link);
   3087 	}
   3088 }
   3089 
   3090 /* Module interface */
   3091 
   3092 MODULE(MODULE_CLASS_DRIVER, if_rge, "pci");
   3093 
   3094 #ifdef _MODULE
   3095 #include "ioconf.c"
   3096 #endif
   3097 
   3098 static int
   3099 if_rge_modcmd(modcmd_t cmd, void *opaque)
   3100 {
   3101 	int error = 0;
   3102 
   3103 	switch (cmd) {
   3104 	case MODULE_CMD_INIT:
   3105 #ifdef _MODULE
   3106 		error = config_init_component(cfdriver_ioconf_rge,
   3107 		    cfattach_ioconf_rge, cfdata_ioconf_rge);
   3108 #endif
   3109 		return error;
   3110 	case MODULE_CMD_FINI:
   3111 #ifdef _MODULE
   3112 		error = config_fini_component(cfdriver_ioconf_rge,
   3113 		    cfattach_ioconf_rge, cfdata_ioconf_rge);
   3114 #endif
   3115 		return error;
   3116 	default:
   3117 		return ENOTTY;
   3118 	}
   3119 }
   3120