Home | History | Annotate | Line # | Download | only in pci
      1 /*	$NetBSD: if_rge.c,v 1.39 2025/10/27 15:21:32 pgoyette Exp $	*/
      2 /*	$OpenBSD: if_rge.c,v 1.9 2020/12/12 11:48:53 jan Exp $	*/
      3 
      4 /*
      5  * Copyright (c) 2019, 2020 Kevin Lo <kevlo (at) openbsd.org>
      6  *
      7  * Permission to use, copy, modify, and distribute this software for any
      8  * purpose with or without fee is hereby granted, provided that the above
      9  * copyright notice and this permission notice appear in all copies.
     10  *
     11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     18  */
     19 
     20 #include <sys/cdefs.h>
     21 __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.39 2025/10/27 15:21:32 pgoyette Exp $");
     22 
     23 #if defined(_KERNEL_OPT)
     24 #include "opt_net_mpsafe.h"
     25 #endif
     26 
     27 #include <sys/types.h>
     28 
     29 #include <sys/param.h>
     30 #include <sys/systm.h>
     31 #include <sys/sockio.h>
     32 #include <sys/mbuf.h>
     33 #include <sys/kernel.h>
     34 #include <sys/socket.h>
     35 #include <sys/device.h>
     36 #include <sys/endian.h>
     37 #include <sys/callout.h>
     38 #include <sys/workqueue.h>
     39 #include <sys/module.h>
     40 
     41 #include <net/if.h>
     42 
     43 #include <net/if_dl.h>
     44 #include <net/if_ether.h>
     45 
     46 #include <net/if_media.h>
     47 
     48 #include <netinet/in.h>
     49 #include <net/if_ether.h>
     50 
     51 #include <net/bpf.h>
     52 
     53 #include <sys/bus.h>
     54 #include <machine/intr.h>
     55 
     56 #include <dev/mii/mii.h>
     57 
     58 #include <dev/pci/pcivar.h>
     59 #include <dev/pci/pcireg.h>
     60 #include <dev/pci/pcidevs.h>
     61 
     62 #include <dev/pci/if_rgereg.h>
     63 
     64 /* interrupt type counts */
     65 
     66 int counts[PCI_INTR_TYPE_SIZE] = {
     67 	[PCI_INTR_TYPE_INTX] = 1,
     68 	[PCI_INTR_TYPE_MSI] = 1,
     69 	[PCI_INTR_TYPE_MSIX] = 1,
     70  };
     71 	int max_type = PCI_INTR_TYPE_MSIX;
     72 
     73 #ifdef __NetBSD__
     74 #define letoh32 	htole32
     75 #define nitems(x) 	__arraycount(x)
     76 
     77 static struct mbuf *
     78 MCLGETL(struct rge_softc *sc __unused, int how,
     79     u_int size)
     80 {
     81 	struct mbuf *m;
     82 
     83 	MGETHDR(m, how, MT_DATA);
     84 	if (m == NULL)
     85 		return NULL;
     86 
     87 	MEXTMALLOC(m, size, how);
     88 	if ((m->m_flags & M_EXT) == 0) {
     89 		m_freem(m);
     90 		return NULL;
     91 	}
     92 	return m;
     93 }
     94 
     95 #ifdef NET_MPSAFE
     96 #define 	RGE_MPSAFE	1
     97 #define 	CALLOUT_FLAGS	CALLOUT_MPSAFE
     98 #else
     99 #define 	CALLOUT_FLAGS	0
    100 #endif
    101 #endif
    102 
    103 #ifdef RGE_DEBUG
    104 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
    105 int rge_debug = 0;
    106 #else
    107 #define DPRINTF(x)
    108 #endif
    109 
    110 static int		rge_match(device_t, cfdata_t, void *);
    111 static void		rge_attach(device_t, device_t, void *);
    112 static int		rge_detach(device_t, int);
    113 
    114 int		rge_intr(void *);
    115 int		rge_encap(struct rge_softc *, struct mbuf *, int);
    116 int		rge_ioctl(struct ifnet *, u_long, void *);
    117 void		rge_start(struct ifnet *);
    118 void		rge_watchdog(struct ifnet *);
    119 int		rge_init(struct ifnet *);
    120 void		rge_stop(struct ifnet *, int);
    121 int		rge_ifmedia_upd(struct ifnet *);
    122 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
    123 static int	rge_allocmem(struct rge_softc *);
    124 static int	rge_deallocmem(struct rge_softc *);
    125 int		rge_newbuf(struct rge_softc *, int);
    126 static int	rge_rx_list_init(struct rge_softc *);
    127 static void	rge_rx_list_fini(struct rge_softc *);
    128 static void	rge_tx_list_init(struct rge_softc *);
    129 static void	rge_tx_list_fini(struct rge_softc *);
    130 int		rge_rxeof(struct rge_softc *);
    131 int		rge_txeof(struct rge_softc *);
    132 void		rge_reset(struct rge_softc *);
    133 void		rge_iff(struct rge_softc *);
    134 void		rge_set_phy_power(struct rge_softc *, int);
    135 void		rge_phy_config(struct rge_softc *);
    136 void		rge_phy_config_mac_cfg2_8126(struct rge_softc *);
    137 void		rge_phy_config_mac_cfg2(struct rge_softc *);
    138 void		rge_phy_config_mac_cfg3(struct rge_softc *);
    139 void		rge_phy_config_mac_cfg4(struct rge_softc *);
    140 void		rge_phy_config_mac_cfg5(struct rge_softc *);
    141 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
    142 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
    143 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
    144 void		rge_hw_init(struct rge_softc *);
    145 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
    146 void		rge_patch_phy_mcu(struct rge_softc *, int);
    147 void		rge_add_media_types(struct rge_softc *);
    148 void		rge_config_imtype(struct rge_softc *, int);
    149 void		rge_disable_hw_im(struct rge_softc *);
    150 void		rge_disable_sim_im(struct rge_softc *);
    151 void		rge_setup_sim_im(struct rge_softc *);
    152 void		rge_setup_intr(struct rge_softc *, int);
    153 void		rge_exit_oob(struct rge_softc *);
    154 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
    155 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
    156 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
    157 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
    158 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
    159 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
    160 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
    161 uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
    162 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
    163 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
    164 int		rge_get_link_status(struct rge_softc *);
    165 void		rge_txstart(void *);
    166 void		rge_tick(void *);
    167 void		rge_link_state(struct rge_softc *);
    168 
    169 static const struct {
    170 	uint16_t reg;
    171 	uint16_t val;
    172 }  rtl8125_mac_cfg2_mcu[] = {
    173 	RTL8125_MAC_CFG2_MCU
    174 }, rtl8125_mac_cfg3_mcu[] = {
    175 	RTL8125_MAC_CFG3_MCU
    176 }, rtl8125_mac_cfg4_mcu[] = {
    177 	RTL8125_MAC_CFG4_MCU
    178 }, rtl8125_mac_cfg5_mcu[] = {
    179 	RTL8125_MAC_CFG5_MCU
    180 }, rtl8126_mac_cfg2_mcu[] = {
    181 	RTL8126_MAC_CFG2_MCU
    182 };
    183 
    184 CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
    185 		rge_detach, NULL);
    186 
    187 static const struct device_compatible_entry compat_data[] = {
    188 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000) },
    189 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125) },
    190 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8126) },
    191 
    192 	PCI_COMPAT_EOL
    193 };
    194 
    195 static int
    196 rge_match(device_t parent, cfdata_t match, void *aux)
    197 {
    198 	struct pci_attach_args *pa =aux;
    199 
    200 	return pci_compatible_match(pa, compat_data);
    201 }
    202 
    203 void
    204 rge_attach(device_t parent, device_t self, void *aux)
    205 {
    206 	struct rge_softc *sc = device_private(self);
    207 	struct pci_attach_args *pa = aux;
    208 	pci_chipset_tag_t pc = pa->pa_pc;
    209 	char intrbuf[PCI_INTRSTR_LEN];
    210 	const char *intrstr = NULL;
    211 	struct ifnet *ifp;
    212 	pcireg_t reg;
    213 	uint32_t hwrev;
    214 	uint8_t eaddr[ETHER_ADDR_LEN];
    215 	int offset;
    216 	pcireg_t command;
    217 	const char *revstr;
    218 
    219 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
    220 
    221 	sc->sc_dev = self;
    222 
    223 	pci_aprint_devinfo(pa, "Ethernet controller");
    224 
    225 	/*
    226 	 * Map control/status registers.
    227 	 */
    228 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
    229 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
    230 	    NULL, &sc->rge_bsize)) {
    231 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
    232 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
    233 		    &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
    234 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
    235 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
    236 			    &sc->rge_bsize)) {
    237 				aprint_error(": can't map mem or i/o space\n");
    238 				return;
    239 			}
    240 		}
    241 	}
    242 
    243 	/*
    244 	 * Allocate interrupt.
    245 	 */
    246 	if (pci_intr_alloc(pa, &sc->sc_intrs, NULL, 0) != 0) {
    247 		aprint_error(": couldn't map interrupt\n");
    248 		return;
    249 	}
    250 	switch (pci_intr_type(pc, sc->sc_intrs[0])) {
    251 	case PCI_INTR_TYPE_MSIX:
    252 	case PCI_INTR_TYPE_MSI:
    253 		sc->rge_flags |= RGE_FLAG_MSI;
    254 		break;
    255 	default:
    256 		break;
    257 	}
    258 	intrstr = pci_intr_string(pc, sc->sc_intrs[0],
    259 	    intrbuf, sizeof(intrbuf));
    260 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
    261 	    IPL_NET, rge_intr, sc, device_xname(sc->sc_dev));
    262 
    263 	if (sc->sc_ihs[0] == NULL) {
    264 		aprint_error_dev(sc->sc_dev, ": couldn't establish interrupt");
    265 		if (intrstr != NULL)
    266 			aprint_error(" at %s\n", intrstr);
    267 		aprint_error("\n");
    268 		return;
    269 	}
    270 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
    271 
    272 	if (pci_dma64_available(pa))
    273 		sc->sc_dmat = pa->pa_dmat64;
    274 	else
    275 		sc->sc_dmat = pa->pa_dmat;
    276 
    277 	sc->sc_pc = pa->pa_pc;
    278 	sc->sc_tag = pa->pa_tag;
    279 
    280 	/* Determine hardware revision */
    281 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
    282 	switch (hwrev) {
    283 	case 0x60800000:
    284 		sc->rge_type = MAC_CFG2;
    285 		revstr = "Z1";
    286 		break;
    287 	case 0x60900000:
    288 		sc->rge_type = MAC_CFG3;
    289 		revstr = "Z2";
    290 		break;
    291 	case 0x64000000:
    292 		sc->rge_type = MAC_CFG4;
    293 		revstr = "A";
    294 		break;
    295 	case 0x64100000:
    296 		sc->rge_type = MAC_CFG5;
    297 		revstr = "B";
    298 		break;
    299 #if 0
    300 	case 0x64800000:
    301 		sc->rge_type = MAC_CFG1_8126;
    302 		revstr = "A";
    303 		break;
    304 #endif
    305 	case 0x64900000:
    306 		sc->rge_type = MAC_CFG2_8126;
    307 		revstr = "A";
    308 		break;
    309 #if 0
    310 	case 0x64a00000:
    311 		sc->rge_type = MAC_CFG3_8126;
    312 		revstr = "A";
    313 		break;
    314 #endif
    315 	default:
    316 		aprint_error(": unknown version 0x%08x\n", hwrev);
    317 		return;
    318 	}
    319 
    320 	aprint_normal_dev(sc->sc_dev, "HW rev. %s\n", revstr);
    321 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
    322 
    323 	/*
    324 	 * PCI Express check.
    325 	 */
    326 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
    327 	    &offset, NULL)) {
    328 		/* Disable PCIe ASPM and ECPM. */
    329 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
    330 		    offset + PCIE_LCSR);
    331 		reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 |
    332 		    PCIE_LCSR_ENCLKPM);
    333 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
    334 		    reg);
    335 	}
    336 
    337 	rge_exit_oob(sc);
    338 	rge_hw_init(sc);
    339 
    340 	rge_get_macaddr(sc, eaddr);
    341 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
    342 	    ether_sprintf(eaddr));
    343 
    344 	memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
    345 
    346 	rge_set_phy_power(sc, 1);
    347 	rge_phy_config(sc);
    348 
    349 	if (rge_allocmem(sc))
    350 		return;
    351 
    352 	ifp = &sc->sc_ec.ec_if;
    353 	ifp->if_softc = sc;
    354 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    355 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    356 #ifdef RGE_MPSAFE
    357 	ifp->if_extflags = IFEF_MPSAFE;
    358 #endif
    359 	ifp->if_ioctl = rge_ioctl;
    360 	ifp->if_stop = rge_stop;
    361 	ifp->if_start = rge_start;
    362 	ifp->if_init = rge_init;
    363 	ifp->if_watchdog = rge_watchdog;
    364 	IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
    365 
    366 #if notyet
    367 	ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx |
    368 	    IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
    369 	    IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
    370 #endif
    371 
    372 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
    373 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
    374 
    375 	callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
    376 	callout_setfunc(&sc->sc_timeout, rge_tick, sc);
    377 
    378 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
    379 	command |= PCI_COMMAND_MASTER_ENABLE;
    380 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
    381 
    382 	/* Initialize ifmedia structures. */
    383 	sc->sc_ec.ec_ifmedia = &sc->sc_media;
    384 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
    385 	    rge_ifmedia_sts);
    386 	rge_add_media_types(sc);
    387 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
    388 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
    389 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
    390 
    391 	if_attach(ifp);
    392 	if_deferred_start_init(ifp, NULL);
    393 	ether_ifattach(ifp, eaddr);
    394 
    395 	if (pmf_device_register(self, NULL, NULL))
    396 		pmf_class_network_register(self, ifp);
    397 	else
    398 		aprint_error_dev(self, "couldn't establish power handler\n");
    399 }
    400 
    401 static int
    402 rge_detach(device_t self, int flags)
    403 {
    404 	struct rge_softc *sc = device_private(self);
    405 	struct ifnet *ifp = &sc->sc_ec.ec_if;
    406 	pci_chipset_tag_t pc = sc->sc_pc;
    407 
    408 	rge_stop(ifp, 1);
    409 
    410 	pmf_device_deregister(self);
    411 
    412 	ether_ifdetach(ifp);
    413 
    414 	if_detach(ifp);
    415 
    416 	ifmedia_fini(&sc->sc_media);
    417 
    418 	if (sc->sc_ihs[0] != NULL) {
    419 		pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[0]);
    420 		sc->sc_ihs[0] = NULL;
    421 	}
    422 
    423 	pci_intr_release(pc, sc->sc_intrs, 1);
    424 
    425 	rge_deallocmem(sc);
    426 
    427         return 0;
    428 }
    429 
    430 int
    431 rge_intr(void *arg)
    432 {
    433 	struct rge_softc *sc = arg;
    434 	struct ifnet *ifp = &sc->sc_ec.ec_if;
    435 	uint32_t status;
    436 	int claimed = 0, rx, tx;
    437 
    438 	if (!(ifp->if_flags & IFF_RUNNING))
    439 		return (0);
    440 
    441 	/* Disable interrupts. */
    442 	RGE_WRITE_4(sc, RGE_IMR, 0);
    443 
    444 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
    445 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
    446 			return (0);
    447 	}
    448 
    449 	status = RGE_READ_4(sc, RGE_ISR);
    450 	if (status)
    451 		RGE_WRITE_4(sc, RGE_ISR, status);
    452 
    453 	if (status & RGE_ISR_PCS_TIMEOUT)
    454 		claimed = 1;
    455 
    456 	rx = tx = 0;
    457 	if (status & sc->rge_intrs) {
    458 		if (status &
    459 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
    460 			rx |= rge_rxeof(sc);
    461 			claimed = 1;
    462 		}
    463 
    464 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
    465 			tx |= rge_txeof(sc);
    466 			claimed = 1;
    467 		}
    468 
    469 		if (status & RGE_ISR_SYSTEM_ERR) {
    470 			KERNEL_LOCK(1, NULL);
    471 			rge_init(ifp);
    472 			KERNEL_UNLOCK_ONE(NULL);
    473 			claimed = 1;
    474 		}
    475 	}
    476 
    477 	if (sc->rge_timerintr) {
    478 		if ((tx | rx) == 0) {
    479 			/*
    480 			 * Nothing needs to be processed, fallback
    481 			 * to use TX/RX interrupts.
    482 			 */
    483 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
    484 
    485 			/*
    486 			 * Recollect, mainly to avoid the possible
    487 			 * race introduced by changing interrupt
    488 			 * masks.
    489 			 */
    490 			rge_rxeof(sc);
    491 			rge_txeof(sc);
    492 		} else
    493 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
    494 	} else if (tx | rx) {
    495 		/*
    496 		 * Assume that using simulated interrupt moderation
    497 		 * (hardware timer based) could reduce the interrupt
    498 		 * rate.
    499 		 */
    500 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
    501 	}
    502 
    503 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
    504 
    505 	return (claimed);
    506 }
    507 
    508 int
    509 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
    510 {
    511 	struct rge_tx_desc *d = NULL;
    512 	struct rge_txq *txq;
    513 	bus_dmamap_t txmap;
    514 	uint32_t cmdsts, cflags = 0;
    515 	int cur, error, i, last, nsegs;
    516 
    517 #if notyet
    518 	/*
    519 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
    520 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
    521 	 * take affect.
    522 	 */
    523 	if ((m->m_pkthdr.csum_flags &
    524 	    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
    525 		cflags |= RGE_TDEXTSTS_IPCSUM;
    526 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
    527 			cflags |= RGE_TDEXTSTS_TCPCSUM;
    528 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
    529 			cflags |= RGE_TDEXTSTS_UDPCSUM;
    530 	}
    531 #endif
    532 
    533 	txq = &sc->rge_ldata.rge_txq[idx];
    534 	txmap = txq->txq_dmamap;
    535 
    536 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
    537 	switch (error) {
    538 	case 0:
    539 		break;
    540 	case EFBIG: /* mbuf chain is too fragmented */
    541 		if (m_defrag(m, M_DONTWAIT) == 0 &&
    542 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
    543 		    BUS_DMA_NOWAIT) == 0)
    544 			break;
    545 
    546 		/* FALLTHROUGH */
    547 	default:
    548 		return (0);
    549 	}
    550 
    551 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
    552 	    BUS_DMASYNC_PREWRITE);
    553 
    554 	nsegs = txmap->dm_nsegs;
    555 
    556 	/* Set up hardware VLAN tagging. */
    557 	if (vlan_has_tag(m))
    558 		cflags |= bswap16(vlan_get_tag(m)) | RGE_TDEXTSTS_VTAG;
    559 
    560 	last = cur = idx;
    561 	cmdsts = RGE_TDCMDSTS_SOF;
    562 
    563 	for (i = 0; i < txmap->dm_nsegs; i++) {
    564 		d = &sc->rge_ldata.rge_tx_list[cur];
    565 
    566 		d->rge_extsts = htole32(cflags);
    567 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
    568 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
    569 
    570 		cmdsts |= txmap->dm_segs[i].ds_len;
    571 
    572 		if (cur == RGE_TX_LIST_CNT - 1)
    573 			cmdsts |= RGE_TDCMDSTS_EOR;
    574 
    575 		d->rge_cmdsts = htole32(cmdsts);
    576 
    577 		last = cur;
    578 		cmdsts = RGE_TDCMDSTS_OWN;
    579 		cur = RGE_NEXT_TX_DESC(cur);
    580 	}
    581 
    582 	/* Set EOF on the last descriptor. */
    583 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
    584 
    585 	/* Transfer ownership of packet to the chip. */
    586 	d = &sc->rge_ldata.rge_tx_list[idx];
    587 
    588 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
    589 
    590 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
    591 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
    592 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    593 
    594 	/* Update info of TX queue and descriptors. */
    595 	txq->txq_mbuf = m;
    596 	txq->txq_descidx = last;
    597 
    598 	return (nsegs);
    599 }
    600 
    601 int
    602 rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    603 {
    604 	struct rge_softc *sc = ifp->if_softc;
    605 	//struct ifreq *ifr = (struct ifreq *)data;
    606 	int s, error = 0;
    607 
    608 	s = splnet();
    609 
    610 	switch (cmd) {
    611 	case SIOCSIFFLAGS:
    612 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
    613 			break;
    614 		/* XXX set an ifflags callback and let ether_ioctl
    615 		 * handle all of this.
    616 		 */
    617 		if (ifp->if_flags & IFF_UP) {
    618 			if (ifp->if_flags & IFF_RUNNING)
    619 				error = ENETRESET;
    620 			else
    621 				rge_init(ifp);
    622 		} else {
    623 			if (ifp->if_flags & IFF_RUNNING)
    624 				rge_stop(ifp, 1);
    625 		}
    626 		break;
    627 	default:
    628 		error = ether_ioctl(ifp, cmd, data);
    629 	}
    630 
    631 	if (error == ENETRESET) {
    632 		if (ifp->if_flags & IFF_RUNNING)
    633 			rge_iff(sc);
    634 		error = 0;
    635 	}
    636 
    637 	splx(s);
    638 	return (error);
    639 }
    640 
    641 void
    642 rge_start(struct ifnet *ifp)
    643 {
    644 	struct rge_softc *sc = ifp->if_softc;
    645 	struct mbuf *m;
    646 	int free, idx, used;
    647 	int queued = 0;
    648 
    649 #define LINK_STATE_IS_UP(_s)    \
    650 	((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
    651 
    652 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
    653 		IFQ_PURGE(&ifp->if_snd);
    654 		return;
    655 	}
    656 
    657 	/* Calculate free space. */
    658 	idx = sc->rge_ldata.rge_txq_prodidx;
    659 	free = sc->rge_ldata.rge_txq_considx;
    660 	if (free <= idx)
    661 		free += RGE_TX_LIST_CNT;
    662 	free -= idx;
    663 
    664 	for (;;) {
    665 		if (RGE_TX_NSEGS >= free + 2) {
    666 			SET(ifp->if_flags, IFF_OACTIVE);
    667 			break;
    668 		}
    669 
    670 		IFQ_DEQUEUE(&ifp->if_snd, m);
    671 		if (m == NULL)
    672 			break;
    673 
    674 		used = rge_encap(sc, m, idx);
    675 		if (used == 0) {
    676 			m_freem(m);
    677 			continue;
    678 		}
    679 
    680 		KASSERT(used <= free);
    681 		free -= used;
    682 
    683 		bpf_mtap(ifp, m, BPF_D_OUT);
    684 
    685 		idx += used;
    686 		if (idx >= RGE_TX_LIST_CNT)
    687 			idx -= RGE_TX_LIST_CNT;
    688 
    689 		queued++;
    690 	}
    691 
    692 	if (queued == 0)
    693 		return;
    694 
    695 	/* Set a timeout in case the chip goes out to lunch. */
    696 	ifp->if_timer = 5;
    697 
    698 	sc->rge_ldata.rge_txq_prodidx = idx;
    699 	rge_txstart(sc);
    700 }
    701 
    702 void
    703 rge_watchdog(struct ifnet *ifp)
    704 {
    705 	struct rge_softc *sc = ifp->if_softc;
    706 
    707 	device_printf(sc->sc_dev, "watchdog timeout\n");
    708 	if_statinc(ifp, if_oerrors);
    709 
    710 	rge_init(ifp);
    711 }
    712 
    713 int
    714 rge_init(struct ifnet *ifp)
    715 {
    716 	struct rge_softc *sc = ifp->if_softc;
    717 	uint32_t val;
    718 	unsigned i;
    719 
    720 	rge_stop(ifp, 0);
    721 
    722 	/* Set MAC address. */
    723 	rge_set_macaddr(sc, CLLADDR(ifp->if_sadl));
    724 
    725 	/* Set Maximum frame size. */
    726 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
    727 
    728 	/* Initialize RX descriptors list. */
    729 	int error = rge_rx_list_init(sc);
    730 	if (error != 0) {
    731 		device_printf(sc->sc_dev,
    732 		    "init failed: no memory for RX buffers\n");
    733 		rge_stop(ifp, 1);
    734 		return error;
    735 	}
    736 
    737 	/* Initialize TX descriptors. */
    738 	rge_tx_list_init(sc);
    739 
    740 	/* Load the addresses of the RX and TX lists into the chip. */
    741 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
    742 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    743 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
    744 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    745 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
    746 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    747 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
    748 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    749 
    750 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    751 
    752 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
    753 	if (sc->rge_type == MAC_CFG2_8126)
    754 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, 0x08);
    755 	else
    756 		RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    757 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    758 	if (sc->rge_type != MAC_CFG2_8126)
    759 		RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
    760 
    761 	/* Clear interrupt moderation timer. */
    762 	for (i = 0; i < 64; i++)
    763 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
    764 
    765 	/* Set the initial RX and TX configurations. */
    766 	if (sc->rge_type == MAC_CFG2_8126)
    767 		RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG_8126);
    768 	else
    769 		RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
    770 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
    771 
    772 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
    773 	rge_write_csi(sc, 0x70c, val | 0x27000000);
    774 
    775 	/* Enable hardware optimization function. */
    776 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
    777 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
    778 
    779 	if (sc->rge_type == MAC_CFG2_8126) {
    780 		/* Disable L1 timeout. */
    781 		val = rge_read_csi(sc, 0x890) & ~0x00000001;
    782 		rge_write_csi(sc, 0x890, val);
    783 	} else
    784 		RGE_WRITE_2(sc, 0x0382, 0x221b);
    785 	RGE_WRITE_1(sc, 0x4500, 0);
    786 	RGE_WRITE_2(sc, 0x4800, 0);
    787 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
    788 
    789 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
    790 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
    791 
    792 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
    793 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
    794 
    795 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
    796 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
    797 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
    798 
    799 	if (sc->rge_type == MAC_CFG2_8126)
    800 		RGE_CLRBIT_1(sc, 0xd8, 0x02);
    801 
    802 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
    803 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3 ||
    804 	    sc->rge_type == MAC_CFG2_8126)
    805 		rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
    806 	else
    807 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
    808 
    809 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
    810 
    811 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3 ||
    812 	    sc->rge_type == MAC_CFG2_8126) {
    813 		val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
    814 		rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
    815 	} else
    816 		RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
    817 
    818 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
    819 
    820 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
    821 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
    822 
    823 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
    824 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
    825 
    826 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
    827 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
    828 
    829 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
    830 
    831 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
    832 
    833 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
    834 
    835 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
    836 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
    837 
    838 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
    839 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
    840 
    841 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
    842 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
    843 
    844 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
    845 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
    846 
    847 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
    848 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
    849 
    850 	val = rge_read_mac_ocp(sc, 0xe84c) & ~0x0040;
    851 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
    852 		rge_write_mac_ocp(sc, 0xe84c, 0x00c0);
    853 	else
    854 		rge_write_mac_ocp(sc, 0xe84c, 0x0080);
    855 
    856 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN);
    857 
    858 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
    859 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
    860 
    861 	/* Disable EEE plus. */
    862 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
    863 
    864 	if (sc->rge_type == MAC_CFG2_8126)
    865 		RGE_MAC_CLRBIT(sc, 0xea1c, 0x0304);
    866 	else
    867 		RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
    868 
    869 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
    870 	DELAY(1);
    871 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
    872 
    873 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
    874 
    875 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
    876 
    877 	if ((sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0)
    878 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
    879 	else
    880 		RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
    881 
    882 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
    883 
    884 	for (i = 0; i < 10; i++) {
    885 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
    886 			break;
    887 		DELAY(1000);
    888 	}
    889 
    890 	/* Disable RXDV gate. */
    891 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
    892 	DELAY(2000);
    893 
    894 	rge_ifmedia_upd(ifp);
    895 
    896 	/* Enable transmit and receive. */
    897 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
    898 
    899 	/* Program promiscuous mode and multicast filters. */
    900 	rge_iff(sc);
    901 
    902 	if (sc->rge_type == MAC_CFG2_8126)
    903 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, 0x08);
    904 	else
    905 		RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    906 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    907 
    908 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    909 
    910 	/* Enable interrupts. */
    911 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
    912 
    913 	ifp->if_flags |= IFF_RUNNING;
    914 	CLR(ifp->if_flags, IFF_OACTIVE);
    915 
    916 	callout_schedule(&sc->sc_timeout, 1);
    917 
    918 	return (0);
    919 }
    920 
    921 /*
    922  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
    923  */
    924 void
    925 rge_stop(struct ifnet *ifp, int disable)
    926 {
    927 	struct rge_softc *sc = ifp->if_softc;
    928 
    929 	callout_halt(&sc->sc_timeout, NULL);
    930 
    931 	ifp->if_timer = 0;
    932 	ifp->if_flags &= ~IFF_RUNNING;
    933 	sc->rge_timerintr = 0;
    934 
    935 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
    936 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
    937 	    RGE_RXCFG_ERRPKT);
    938 
    939 	RGE_WRITE_4(sc, RGE_IMR, 0);
    940 
    941 	/* Config interrupt type for RTL8126. */
    942 	if (sc->rge_type == MAC_CFG2_8126)
    943 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, RGE_INT_CFG0_EN);
    944 
    945 	/* Clear timer interrupts. */
    946 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
    947 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
    948 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
    949 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
    950 
    951 	rge_reset(sc);
    952 
    953 //	intr_barrier(sc->sc_ih);
    954 //	ifq_barrier(&ifp->if_snd);
    955 /*	ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
    956 
    957 	if (sc->rge_head != NULL) {
    958 		m_freem(sc->rge_head);
    959 		sc->rge_head = sc->rge_tail = NULL;
    960 	}
    961 
    962 	rge_tx_list_fini(sc);
    963 	rge_rx_list_fini(sc);
    964 }
    965 
    966 /*
    967  * Set media options.
    968  */
    969 int
    970 rge_ifmedia_upd(struct ifnet *ifp)
    971 {
    972 	struct rge_softc *sc = ifp->if_softc;
    973 	struct ifmedia *ifm = &sc->sc_media;
    974 	int anar, gig, val;
    975 
    976 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    977 		return (EINVAL);
    978 
    979 	/* Disable Gigabit Lite. */
    980 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
    981 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
    982 	if (sc->rge_type == MAC_CFG2_8126)
    983 		RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0002);
    984 
    985 	val = rge_read_phy_ocp(sc, 0xa5d4);
    986 	val &= ~RGE_ADV_2500TFDX;
    987 	if (sc->rge_type == MAC_CFG2_8126)
    988 		val &= ~RGE_ADV_5000TFDX;
    989 
    990 	anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    991 	gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    992 
    993 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
    994 	case IFM_AUTO:
    995 		val |= (sc->rge_type != MAC_CFG2_8126) ?
    996 		    RGE_ADV_2500TFDX : (RGE_ADV_2500TFDX | RGE_ADV_5000TFDX);
    997 		break;
    998 	case IFM_5000_T:
    999 		val |= RGE_ADV_5000TFDX;
   1000 		ifp->if_baudrate = IF_Gbps(5);
   1001 		break;
   1002 	case IFM_2500_T:
   1003 		val |= RGE_ADV_2500TFDX;
   1004 		ifp->if_baudrate = IF_Mbps(2500);
   1005 		break;
   1006 	case IFM_1000_T:
   1007 		ifp->if_baudrate = IF_Gbps(1);
   1008 		break;
   1009 	case IFM_100_TX:
   1010 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
   1011 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
   1012 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
   1013 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
   1014 		    ANAR_TX | ANAR_10_FD | ANAR_10;
   1015 		ifp->if_baudrate = IF_Mbps(100);
   1016 		break;
   1017 	case IFM_10_T:
   1018 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
   1019 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
   1020 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
   1021 		    ANAR_10_FD | ANAR_10 : ANAR_10;
   1022 		ifp->if_baudrate = IF_Mbps(10);
   1023 		break;
   1024 	default:
   1025 		device_printf(sc->sc_dev,
   1026 		    "unsupported media type\n");
   1027 		return (EINVAL);
   1028 	}
   1029 
   1030 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
   1031 	rge_write_phy(sc, 0, MII_100T2CR, gig);
   1032 	rge_write_phy_ocp(sc, 0xa5d4, val);
   1033 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
   1034 	    BMCR_STARTNEG);
   1035 
   1036 	return (0);
   1037 }
   1038 
   1039 /*
   1040  * Report current media status.
   1041  */
   1042 void
   1043 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
   1044 {
   1045 	struct rge_softc *sc = ifp->if_softc;
   1046 	uint16_t status = 0;
   1047 
   1048 	ifmr->ifm_status = IFM_AVALID;
   1049 	ifmr->ifm_active = IFM_ETHER;
   1050 
   1051 	if (rge_get_link_status(sc)) {
   1052 		ifmr->ifm_status |= IFM_ACTIVE;
   1053 
   1054 		status = RGE_READ_2(sc, RGE_PHYSTAT);
   1055 		if ((status & RGE_PHYSTAT_FDX) ||
   1056 		    (status & (RGE_PHYSTAT_2500MBPS | RGE_PHYSTAT_5000MBPS)))
   1057 			ifmr->ifm_active |= IFM_FDX;
   1058 		else
   1059 			ifmr->ifm_active |= IFM_HDX;
   1060 
   1061 		if (status & RGE_PHYSTAT_10MBPS)
   1062 			ifmr->ifm_active |= IFM_10_T;
   1063 		else if (status & RGE_PHYSTAT_100MBPS)
   1064 			ifmr->ifm_active |= IFM_100_TX;
   1065 		else if (status & RGE_PHYSTAT_1000MBPS)
   1066 			ifmr->ifm_active |= IFM_1000_T;
   1067 		else if (status & RGE_PHYSTAT_2500MBPS)
   1068 			ifmr->ifm_active |= IFM_2500_T;
   1069 		else if (status & RGE_PHYSTAT_5000MBPS)
   1070 			ifmr->ifm_active |= IFM_5000_T;
   1071 	}
   1072 }
   1073 
   1074 /*
   1075  * Allocate memory for RX/TX rings.
   1076  *
   1077  * XXX There is no tear-down for this if it any part fails, so everything
   1078  * remains allocated.
   1079  */
   1080 static int
   1081 rge_allocmem(struct rge_softc *sc)
   1082 {
   1083 	int error, i;
   1084 
   1085 	/* Allocate DMA'able memory for the TX ring. */
   1086 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
   1087 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
   1088 	if (error) {
   1089 		aprint_error_dev(sc->sc_dev, "can't create TX list map\n");
   1090 		return (error);
   1091 	}
   1092 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
   1093 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
   1094 	    BUS_DMA_NOWAIT);
   1095 	if (error) {
   1096 		aprint_error_dev(sc->sc_dev, "can't alloc TX list\n");
   1097 		return (error);
   1098 	}
   1099 
   1100 	/* Load the map for the TX ring. */
   1101 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1102 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
   1103 	    (void **) &sc->rge_ldata.rge_tx_list,
   1104 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
   1105 	if (error) {
   1106 		aprint_error_dev(sc->sc_dev, "can't map TX dma buffers\n");
   1107 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1108 		    sc->rge_ldata.rge_tx_listnseg);
   1109 		return (error);
   1110 	}
   1111 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
   1112 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1113 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
   1114 	if (error) {
   1115 		aprint_error_dev(sc->sc_dev, "can't load TX dma map\n");
   1116 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
   1117 		bus_dmamem_unmap(sc->sc_dmat,
   1118 		    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
   1119 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1120 		    sc->rge_ldata.rge_tx_listnseg);
   1121 		return (error);
   1122 	}
   1123 
   1124 	/* Create DMA maps for TX buffers. */
   1125 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
   1126 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
   1127 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
   1128 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
   1129 		if (error) {
   1130 			aprint_error_dev(sc->sc_dev, "can't create DMA map for TX\n");
   1131 			return (error);
   1132 		}
   1133 	}
   1134 
   1135 	/* Allocate DMA'able memory for the RX ring. */
   1136 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
   1137 	    RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
   1138 	if (error) {
   1139 		aprint_error_dev(sc->sc_dev, "can't create RX list map\n");
   1140 		return (error);
   1141 	}
   1142 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
   1143 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
   1144 	    BUS_DMA_NOWAIT);
   1145 	if (error) {
   1146 		aprint_error_dev(sc->sc_dev, "can't alloc RX list\n");
   1147 		return (error);
   1148 	}
   1149 
   1150 	/* Load the map for the RX ring. */
   1151 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1152 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
   1153 	    (void **) &sc->rge_ldata.rge_rx_list,
   1154 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
   1155 	if (error) {
   1156 		aprint_error_dev(sc->sc_dev, "can't map RX dma buffers\n");
   1157 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1158 		    sc->rge_ldata.rge_rx_listnseg);
   1159 		return (error);
   1160 	}
   1161 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
   1162 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1163 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
   1164 	if (error) {
   1165 		aprint_error_dev(sc->sc_dev, "can't load RX dma map\n");
   1166 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
   1167 		bus_dmamem_unmap(sc->sc_dmat,
   1168 		    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
   1169 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1170 		    sc->rge_ldata.rge_rx_listnseg);
   1171 		return (error);
   1172 	}
   1173 
   1174 	/*
   1175 	 * Create DMA maps for RX buffers.  Use BUS_DMA_ALLOCNOW to avoid any
   1176 	 * potential failure in bus_dmamap_load_mbuf() in the RX path.
   1177 	 */
   1178 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1179 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
   1180 		    RGE_JUMBO_FRAMELEN, 0, BUS_DMA_ALLOCNOW,
   1181 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
   1182 		if (error) {
   1183 			aprint_error_dev(sc->sc_dev, "can't create DMA map for RX\n");
   1184 			return (error);
   1185 		}
   1186 	}
   1187 
   1188 	return (error);
   1189 }
   1190 
   1191 /*
   1192  * release memory allocated to RX/TX rings
   1193  */
   1194 static int
   1195 rge_deallocmem(struct rge_softc *sc)
   1196 {
   1197 	int i;
   1198 
   1199 	/* Destroy DMA maps for RX buffers */
   1200 	for (i = 0; i < RGE_RX_LIST_CNT; i++)
   1201 		bus_dmamap_destroy(sc->sc_dmat,
   1202 		    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
   1203 
   1204 	/* Unload the map for the RX ring */
   1205 	bus_dmamap_unload(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
   1206 	bus_dmamem_unmap(sc->sc_dmat,
   1207 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
   1208 
   1209 	/* Deallocate DMA'able memory for the RX ring. */
   1210 	bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1211 	    sc->rge_ldata.rge_rx_listnseg);
   1212 	bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
   1213 
   1214 	/* Destroy DMA maps for TX buffers. */
   1215 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
   1216 		bus_dmamap_destroy(sc->sc_dmat,
   1217 		    sc->rge_ldata.rge_txq[i].txq_dmamap);
   1218 
   1219 	/* Unload the map for the TX ring */
   1220 	bus_dmamap_unload(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
   1221 	bus_dmamem_unmap(sc->sc_dmat,
   1222 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
   1223 
   1224 	/* Deallocate DMA'able memory for the TX ring. */
   1225 	bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1226 	    sc->rge_ldata.rge_tx_listnseg);
   1227 	bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
   1228 
   1229 	return 0;
   1230 }
   1231 
   1232 /*
   1233  * Set an RX descriptor and sync it.
   1234  */
   1235 static void
   1236 rge_load_rxbuf(struct rge_softc *sc, int idx)
   1237 {
   1238 	struct rge_rx_desc *r = &sc->rge_ldata.rge_rx_list[idx];
   1239 	struct rge_rxq *rxq = &sc->rge_ldata.rge_rxq[idx];
   1240 	bus_dmamap_t rxmap = rxq->rxq_dmamap;
   1241 	uint32_t cmdsts;
   1242 
   1243 	cmdsts = rxmap->dm_segs[0].ds_len | RGE_RDCMDSTS_OWN;
   1244 	if (idx == RGE_RX_LIST_CNT - 1)
   1245 		cmdsts |= RGE_RDCMDSTS_EOR;
   1246 
   1247 	r->hi_qword0.rge_addr = htole64(rxmap->dm_segs[0].ds_addr);
   1248 	r->hi_qword1.rx_qword4.rge_extsts = 0;
   1249 	r->hi_qword1.rx_qword4.rge_cmdsts = htole32(cmdsts);
   1250 
   1251 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1252 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1253 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1254 }
   1255 
   1256 /*
   1257  * Initialize the RX descriptor and attach an mbuf cluster.
   1258  */
   1259 int
   1260 rge_newbuf(struct rge_softc *sc, int idx)
   1261 {
   1262 	struct mbuf *m;
   1263 	struct rge_rxq *rxq;
   1264 	bus_dmamap_t rxmap;
   1265 	int error __diagused;
   1266 
   1267 	m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
   1268 	if (m == NULL)
   1269 		return (ENOBUFS);
   1270 	MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
   1271 
   1272 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
   1273 
   1274 	rxq = &sc->rge_ldata.rge_rxq[idx];
   1275 	rxmap = rxq->rxq_dmamap;
   1276 
   1277 	if (rxq->rxq_mbuf != NULL)
   1278 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
   1279 
   1280 	/* This map was created with BUS_DMA_ALLOCNOW so should never fail. */
   1281 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT);
   1282 	KASSERTMSG(error == 0, "error=%d", error);
   1283 
   1284 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
   1285 	    BUS_DMASYNC_PREREAD);
   1286 
   1287 	/* Map the segments into RX descriptors. */
   1288 
   1289 	rxq->rxq_mbuf = m;
   1290 	rge_load_rxbuf(sc, idx);
   1291 
   1292 	return 0;
   1293 }
   1294 
   1295 static int
   1296 rge_rx_list_init(struct rge_softc *sc)
   1297 {
   1298 	unsigned i;
   1299 
   1300 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
   1301 
   1302 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1303 		sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
   1304 		if (rge_newbuf(sc, i) != 0) {
   1305 			rge_rx_list_fini(sc);
   1306 			return (ENOBUFS);
   1307 		}
   1308 	}
   1309 
   1310 	sc->rge_ldata.rge_rxq_prodidx = sc->rge_ldata.rge_rxq_considx = 0;
   1311 	sc->rge_head = sc->rge_tail = NULL;
   1312 
   1313 	return (0);
   1314 }
   1315 
   1316 static void
   1317 rge_rx_list_fini(struct rge_softc *sc)
   1318 {
   1319 	unsigned i;
   1320 
   1321 	/* Free the RX list buffers. */
   1322 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1323 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
   1324 			bus_dmamap_unload(sc->sc_dmat,
   1325 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
   1326 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
   1327 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
   1328 		}
   1329 	}
   1330 }
   1331 
   1332 static void
   1333 rge_tx_list_init(struct rge_softc *sc)
   1334 {
   1335 	unsigned i;
   1336 
   1337 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
   1338 
   1339 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
   1340 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
   1341 
   1342 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
   1343 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
   1344 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1345 
   1346 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
   1347 }
   1348 
   1349 static void
   1350 rge_tx_list_fini(struct rge_softc *sc)
   1351 {
   1352 	unsigned i;
   1353 
   1354 	/* Free the TX list buffers. */
   1355 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
   1356 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
   1357 			bus_dmamap_unload(sc->sc_dmat,
   1358 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
   1359 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
   1360 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
   1361 		}
   1362 	}
   1363 }
   1364 
   1365 int
   1366 rge_rxeof(struct rge_softc *sc)
   1367 {
   1368 	struct mbuf *m;
   1369 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1370 	struct rge_rx_desc *cur_rx;
   1371 	struct rge_rxq *rxq;
   1372 	uint32_t rxstat, extsts;
   1373 	int i, total_len, rx = 0;
   1374 
   1375 	for (i = sc->rge_ldata.rge_rxq_considx; ; i = RGE_NEXT_RX_DESC(i)) {
   1376 		/* Invalidate the descriptor memory. */
   1377 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1378 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1379 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1380 
   1381 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
   1382 
   1383 		if (RGE_OWN(cur_rx))
   1384 			break;
   1385 
   1386 		rxstat = letoh32(cur_rx->hi_qword1.rx_qword4.rge_cmdsts);
   1387 		extsts = letoh32(cur_rx->hi_qword1.rx_qword4.rge_extsts);
   1388 
   1389 		total_len = RGE_RXBYTES(cur_rx);
   1390 		rxq = &sc->rge_ldata.rge_rxq[i];
   1391 		m = rxq->rxq_mbuf;
   1392 		rx = 1;
   1393 
   1394 		/* Invalidate the RX mbuf. */
   1395 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
   1396 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1397 
   1398 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
   1399 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
   1400 			if_statinc(ifp, if_ierrors);
   1401 			rge_load_rxbuf(sc, i);
   1402 			continue;
   1403 		}
   1404 
   1405 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
   1406 			if_statinc(ifp, if_ierrors);
   1407 			/*
   1408 			 * If this is part of a multi-fragment packet,
   1409 			 * discard all the pieces.
   1410 			 */
   1411 			if (sc->rge_head != NULL) {
   1412 				m_freem(sc->rge_head);
   1413 				sc->rge_head = sc->rge_tail = NULL;
   1414 			}
   1415 			rge_load_rxbuf(sc, i);
   1416 			continue;
   1417 		}
   1418 
   1419 		/*
   1420 		 * If allocating a replacement mbuf fails,
   1421 		 * reload the current one.
   1422 		 */
   1423 		if (rge_newbuf(sc, i) != 0) {
   1424 			if_statinc(ifp, if_iqdrops);
   1425 			if (sc->rge_head != NULL) {
   1426 				m_freem(sc->rge_head);
   1427 				sc->rge_head = sc->rge_tail = NULL;
   1428 			}
   1429 			rge_load_rxbuf(sc, i);
   1430 			continue;
   1431 		}
   1432 
   1433 		m_set_rcvif(m, ifp);
   1434 		if (sc->rge_head != NULL) {
   1435 			m->m_len = total_len;
   1436 			/*
   1437 			 * Special case: if there's 4 bytes or less
   1438 			 * in this buffer, the mbuf can be discarded:
   1439 			 * the last 4 bytes is the CRC, which we don't
   1440 			 * care about anyway.
   1441 			 */
   1442 			if (m->m_len <= ETHER_CRC_LEN) {
   1443 				sc->rge_tail->m_len -=
   1444 				    (ETHER_CRC_LEN - m->m_len);
   1445 				m_freem(m);
   1446 			} else {
   1447 				m->m_len -= ETHER_CRC_LEN;
   1448 				m->m_flags &= ~M_PKTHDR;
   1449 				sc->rge_tail->m_next = m;
   1450 			}
   1451 			m = sc->rge_head;
   1452 			sc->rge_head = sc->rge_tail = NULL;
   1453 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
   1454 		} else
   1455 	#if 0
   1456 			m->m_pkthdr.len = m->m_len =
   1457 			    (total_len - ETHER_CRC_LEN);
   1458 	#else
   1459 		{
   1460 			m->m_pkthdr.len = m->m_len = total_len;
   1461 			m->m_flags |= M_HASFCS;
   1462 		}
   1463 	#endif
   1464 
   1465 #if notyet
   1466 		/* Check IP header checksum. */
   1467 		if (!(extsts & RGE_RDEXTSTS_IPCSUMERR) &&
   1468 		    (extsts & RGE_RDEXTSTS_IPV4))
   1469 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
   1470 
   1471 		/* Check TCP/UDP checksum. */
   1472 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
   1473 		    (((extsts & RGE_RDEXTSTS_TCPPKT) &&
   1474 		    !(extsts & RGE_RDEXTSTS_TCPCSUMERR)) ||
   1475 		    ((extsts & RGE_RDEXTSTS_UDPPKT) &&
   1476 		    !(extsts & RGE_RDEXTSTS_UDPCSUMERR))))
   1477 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
   1478 			    M_UDP_CSUM_IN_OK;
   1479 #endif
   1480 
   1481 		if (extsts & RGE_RDEXTSTS_VTAG) {
   1482 			vlan_set_tag(m,
   1483 			    bswap16(extsts & RGE_RDEXTSTS_VLAN_MASK));
   1484 		}
   1485 
   1486 		if_percpuq_enqueue(ifp->if_percpuq, m);
   1487 	}
   1488 
   1489 	sc->rge_ldata.rge_rxq_considx = i;
   1490 
   1491 	return (rx);
   1492 }
   1493 
   1494 int
   1495 rge_txeof(struct rge_softc *sc)
   1496 {
   1497 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1498 	struct rge_txq *txq;
   1499 	uint32_t txstat;
   1500 	int cons, idx, prod;
   1501 	int free = 0;
   1502 
   1503 	prod = sc->rge_ldata.rge_txq_prodidx;
   1504 	cons = sc->rge_ldata.rge_txq_considx;
   1505 
   1506 	while (prod != cons) {
   1507 		txq = &sc->rge_ldata.rge_txq[cons];
   1508 		idx = txq->txq_descidx;
   1509 
   1510 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1511 		    idx * sizeof(struct rge_tx_desc),
   1512 		    sizeof(struct rge_tx_desc),
   1513 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1514 
   1515 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
   1516 
   1517 		if (txstat & RGE_TDCMDSTS_OWN) {
   1518 			free = 2;
   1519 			break;
   1520 		}
   1521 
   1522 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
   1523 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1524 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
   1525 		m_freem(txq->txq_mbuf);
   1526 		txq->txq_mbuf = NULL;
   1527 
   1528 		net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   1529 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
   1530 			if_statinc_ref(ifp, nsr, if_collisions);
   1531 		if (txstat & RGE_TDCMDSTS_TXERR)
   1532 			if_statinc_ref(ifp, nsr, if_oerrors);
   1533 		else
   1534 			if_statinc_ref(ifp, nsr, if_opackets);
   1535 		IF_STAT_PUTREF(ifp);
   1536 
   1537 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1538 		    idx * sizeof(struct rge_tx_desc),
   1539 		    sizeof(struct rge_tx_desc),
   1540 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1541 
   1542 		cons = RGE_NEXT_TX_DESC(idx);
   1543 		free = 1;
   1544 	}
   1545 
   1546 	if (free == 0)
   1547 		return (0);
   1548 
   1549 	sc->rge_ldata.rge_txq_considx = cons;
   1550 
   1551 	if (free == 2)
   1552 		rge_txstart(sc);
   1553 
   1554 	CLR(ifp->if_flags, IFF_OACTIVE);
   1555 	ifp->if_timer = 0;
   1556 	if_schedule_deferred_start(ifp);
   1557 
   1558 	return (1);
   1559 }
   1560 
   1561 void
   1562 rge_reset(struct rge_softc *sc)
   1563 {
   1564 	int i;
   1565 
   1566 	/* Enable RXDV gate. */
   1567 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
   1568 	DELAY(2000);
   1569 
   1570 	for (i = 0; i < 3000; i++) {
   1571 		DELAY(50);
   1572 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
   1573 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
   1574 		    RGE_MCUCMD_TXFIFO_EMPTY))
   1575 			break;
   1576 	}
   1577 	if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
   1578 		for (i = 0; i < 3000; i++) {
   1579 			DELAY(50);
   1580 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
   1581 				break;
   1582 		}
   1583 	}
   1584 
   1585 	DELAY(2000);
   1586 
   1587 	/* Soft reset. */
   1588 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
   1589 
   1590 	for (i = 0; i < RGE_TIMEOUT; i++) {
   1591 		DELAY(100);
   1592 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
   1593 			break;
   1594 	}
   1595 	if (i == RGE_TIMEOUT)
   1596 		device_printf(sc->sc_dev, "reset never completed!\n");
   1597 }
   1598 
   1599 void
   1600 rge_iff(struct rge_softc *sc)
   1601 {
   1602 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1603 	struct ethercom *ec = &sc->sc_ec;
   1604 	struct ether_multi *enm;
   1605 	struct ether_multistep step;
   1606 	uint32_t hashes[2];
   1607 	uint32_t rxfilt;
   1608 	int h = 0;
   1609 
   1610 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
   1611 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
   1612 	ifp->if_flags &= ~IFF_ALLMULTI;
   1613 
   1614 	/*
   1615 	 * Always accept frames destined to our station address.
   1616 	 * Always accept broadcast frames.
   1617 	 */
   1618 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
   1619 
   1620 	if (ifp->if_flags & IFF_PROMISC) {
   1621  allmulti:
   1622 		ifp->if_flags |= IFF_ALLMULTI;
   1623 		rxfilt |= RGE_RXCFG_MULTI;
   1624 		if (ifp->if_flags & IFF_PROMISC)
   1625 			rxfilt |= RGE_RXCFG_ALLPHYS;
   1626 		hashes[0] = hashes[1] = 0xffffffff;
   1627 	} else {
   1628 		rxfilt |= RGE_RXCFG_MULTI;
   1629 		/* Program new filter. */
   1630 		memset(hashes, 0, sizeof(hashes));
   1631 
   1632 		ETHER_LOCK(ec);
   1633 		ETHER_FIRST_MULTI(step, ec, enm);
   1634 		while (enm != NULL) {
   1635 			if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1636 			    ETHER_ADDR_LEN) != 0) {
   1637 			    	ETHER_UNLOCK(ec);
   1638 				goto allmulti;
   1639 			}
   1640 			h = ether_crc32_be(enm->enm_addrlo,
   1641 			    ETHER_ADDR_LEN) >> 26;
   1642 
   1643 			if (h < 32)
   1644 				hashes[0] |= (1U << h);
   1645 			else
   1646 				hashes[1] |= (1U << (h - 32));
   1647 
   1648 			ETHER_NEXT_MULTI(step, enm);
   1649 		}
   1650 		ETHER_UNLOCK(ec);
   1651 	}
   1652 
   1653 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
   1654 	RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
   1655 	RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
   1656 }
   1657 
   1658 void
   1659 rge_set_phy_power(struct rge_softc *sc, int on)
   1660 {
   1661 	int i;
   1662 
   1663 	if (on) {
   1664 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
   1665 
   1666 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
   1667 
   1668 		for (i = 0; i < RGE_TIMEOUT; i++) {
   1669 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
   1670 				break;
   1671 			DELAY(1000);
   1672 		}
   1673 	} else {
   1674 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
   1675 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
   1676 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
   1677 	}
   1678 }
   1679 
   1680 void
   1681 rge_phy_config(struct rge_softc *sc)
   1682 {
   1683 	/* Read microcode version. */
   1684 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1685 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
   1686 
   1687 	switch (sc->rge_type) {
   1688 	case MAC_CFG2_8126:
   1689 		rge_phy_config_mac_cfg2_8126(sc);
   1690 		break;
   1691 	case MAC_CFG2:
   1692 		rge_phy_config_mac_cfg2(sc);
   1693 		break;
   1694 	case MAC_CFG3:
   1695 		rge_phy_config_mac_cfg3(sc);
   1696 		break;
   1697 	case MAC_CFG4:
   1698 		rge_phy_config_mac_cfg4(sc);
   1699 		break;
   1700 	case MAC_CFG5:
   1701 		rge_phy_config_mac_cfg5(sc);
   1702 		break;
   1703 	default:
   1704 		break;	/* Can't happen. */
   1705 	}
   1706 
   1707 	rge_write_phy(sc, 0x0a5b, 0x12,
   1708 	    rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
   1709 
   1710 	/* Disable EEE. */
   1711 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
   1712 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
   1713 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
   1714 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
   1715 	}
   1716 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
   1717 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
   1718 	if (sc->rge_type == MAC_CFG2_8126)
   1719 		RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0002);
   1720 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
   1721 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
   1722 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
   1723 
   1724 	/* Disable advanced EEE. */
   1725 	if (sc->rge_type != MAC_CFG2_8126)
   1726 		rge_patch_phy_mcu(sc, 1);
   1727 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
   1728 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
   1729 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
   1730 	if (sc->rge_type != MAC_CFG2_8126)
   1731 		rge_patch_phy_mcu(sc, 0);
   1732 }
   1733 
   1734 void
   1735 rge_phy_config_mac_cfg2_8126(struct rge_softc *sc)
   1736 {
   1737 	uint16_t val;
   1738 	int i;
   1739 	static const uint16_t mac_cfg2_a438_value[] =
   1740 	    { 0x0044, 0x00a8, 0x00d6, 0x00ec, 0x00f6, 0x00fc, 0x00fe,
   1741 	      0x00fe, 0x00bc, 0x0058, 0x002a, 0x003f, 0x3f02, 0x023c,
   1742 	      0x3b0a, 0x1c00, 0x0000, 0x0000, 0x0000, 0x0000 };
   1743 
   1744 	static const uint16_t mac_cfg2_b87e_value[] =
   1745 	    { 0x03ed, 0x03ff, 0x0009, 0x03fe, 0x000b, 0x0021, 0x03f7,
   1746 	      0x03b8, 0x03e0, 0x0049, 0x0049, 0x03e0, 0x03b8, 0x03f7,
   1747 	      0x0021, 0x000b, 0x03fe, 0x0009, 0x03ff, 0x03ed, 0x000e,
   1748 	      0x03fe, 0x03ed, 0x0006, 0x001a, 0x03f1, 0x03d8, 0x0023,
   1749 	      0x0054, 0x0322, 0x00dd, 0x03ab, 0x03dc, 0x0027, 0x000e,
   1750 	      0x03e5, 0x03f9, 0x0012, 0x0001, 0x03f1 };
   1751 
   1752 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_8126_MCODE_VER);
   1753 
   1754 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1755 	rge_write_phy_ocp(sc, 0xa436, 0x80bf);
   1756 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1757 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
   1758 	rge_write_phy_ocp(sc, 0xa436, 0x80cd);
   1759 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1760 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
   1761 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
   1762 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1763 	rge_write_phy_ocp(sc, 0xa438, val | 0xc800);
   1764 	rge_write_phy_ocp(sc, 0xa436, 0x80d4);
   1765 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1766 	rge_write_phy_ocp(sc, 0xa438, val | 0xc800);
   1767 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
   1768 	rge_write_phy_ocp(sc, 0xa438, 0x10cc);
   1769 	rge_write_phy_ocp(sc, 0xa436, 0x80e5);
   1770 	rge_write_phy_ocp(sc, 0xa438, 0x4f0c);
   1771 	rge_write_phy_ocp(sc, 0xa436, 0x8387);
   1772 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1773 	rge_write_phy_ocp(sc, 0xa438, val | 0x4700);
   1774 	val = rge_read_phy_ocp(sc, 0xa80c) & ~0x00c0;
   1775 	rge_write_phy_ocp(sc, 0xa80c, val | 0x0080);
   1776 	RGE_PHY_CLRBIT(sc, 0xac90, 0x0010);
   1777 	RGE_PHY_CLRBIT(sc, 0xad2c, 0x8000);
   1778 	rge_write_phy_ocp(sc, 0xb87c, 0x8321);
   1779 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1780 	rge_write_phy_ocp(sc, 0xb87e, val | 0x1100);
   1781 	RGE_PHY_SETBIT(sc, 0xacf8, 0x000c);
   1782 	rge_write_phy_ocp(sc, 0xa436, 0x8183);
   1783 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1784 	rge_write_phy_ocp(sc, 0xa438, val | 0x5900);
   1785 	RGE_PHY_SETBIT(sc, 0xad94, 0x0020);
   1786 	RGE_PHY_CLRBIT(sc, 0xa654, 0x0800);
   1787 	RGE_PHY_SETBIT(sc, 0xb648, 0x4000);
   1788 	rge_write_phy_ocp(sc, 0xb87c, 0x839e);
   1789 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1790 	rge_write_phy_ocp(sc, 0xb87e, val | 0x2f00);
   1791 	rge_write_phy_ocp(sc, 0xb87c, 0x83f2);
   1792 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1793 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
   1794 	RGE_PHY_SETBIT(sc, 0xada0, 0x0002);
   1795 	rge_write_phy_ocp(sc, 0xb87c, 0x80f3);
   1796 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1797 	rge_write_phy_ocp(sc, 0xb87e, val | 0x9900);
   1798 	rge_write_phy_ocp(sc, 0xb87c, 0x8126);
   1799 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1800 	rge_write_phy_ocp(sc, 0xb87e, val | 0xc100);
   1801 	rge_write_phy_ocp(sc, 0xb87c, 0x893a);
   1802 	rge_write_phy_ocp(sc, 0xb87e, 0x8080);
   1803 	rge_write_phy_ocp(sc, 0xb87c, 0x8647);
   1804 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1805 	rge_write_phy_ocp(sc, 0xb87e, val | 0xe600);
   1806 	rge_write_phy_ocp(sc, 0xb87c, 0x862c);
   1807 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1808 	rge_write_phy_ocp(sc, 0xb87e, val | 0x1200);
   1809 	rge_write_phy_ocp(sc, 0xb87c, 0x864a);
   1810 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1811 	rge_write_phy_ocp(sc, 0xb87e, val | 0xe600);
   1812 	rge_write_phy_ocp(sc, 0xb87c, 0x80a0);
   1813 	rge_write_phy_ocp(sc, 0xb87e, 0xbcbc);
   1814 	rge_write_phy_ocp(sc, 0xb87c, 0x805e);
   1815 	rge_write_phy_ocp(sc, 0xb87e, 0xbcbc);
   1816 	rge_write_phy_ocp(sc, 0xb87c, 0x8056);
   1817 	rge_write_phy_ocp(sc, 0xb87e, 0x3077);
   1818 	rge_write_phy_ocp(sc, 0xb87c, 0x8058);
   1819 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1820 	rge_write_phy_ocp(sc, 0xb87e, val | 0x5a00);
   1821 	rge_write_phy_ocp(sc, 0xb87c, 0x8098);
   1822 	rge_write_phy_ocp(sc, 0xb87e, 0x3077);
   1823 	rge_write_phy_ocp(sc, 0xb87c, 0x809a);
   1824 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1825 	rge_write_phy_ocp(sc, 0xb87e, val | 0x5a00);
   1826 	rge_write_phy_ocp(sc, 0xb87c, 0x8052);
   1827 	rge_write_phy_ocp(sc, 0xb87e, 0x3733);
   1828 	rge_write_phy_ocp(sc, 0xb87c, 0x8094);
   1829 	rge_write_phy_ocp(sc, 0xb87e, 0x3733);
   1830 	rge_write_phy_ocp(sc, 0xb87c, 0x807f);
   1831 	rge_write_phy_ocp(sc, 0xb87e, 0x7c75);
   1832 	rge_write_phy_ocp(sc, 0xb87c, 0x803d);
   1833 	rge_write_phy_ocp(sc, 0xb87e, 0x7c75);
   1834 	rge_write_phy_ocp(sc, 0xb87c, 0x8036);
   1835 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1836 	rge_write_phy_ocp(sc, 0xb87e, val | 0x3000);
   1837 	rge_write_phy_ocp(sc, 0xb87c, 0x8078);
   1838 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1839 	rge_write_phy_ocp(sc, 0xb87e, val | 0x3000);
   1840 	rge_write_phy_ocp(sc, 0xb87c, 0x8031);
   1841 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1842 	rge_write_phy_ocp(sc, 0xb87e, val | 0x3300);
   1843 	rge_write_phy_ocp(sc, 0xb87c, 0x8073);
   1844 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1845 	rge_write_phy_ocp(sc, 0xb87e, val | 0x3300);
   1846 	val = rge_read_phy_ocp(sc, 0xae06) & ~0xfc00;
   1847 	rge_write_phy_ocp(sc, 0xae06, val | 0x7c00);
   1848 	rge_write_phy_ocp(sc, 0xb87c, 0x89D1);
   1849 	rge_write_phy_ocp(sc, 0xb87e, 0x0004);
   1850 	rge_write_phy_ocp(sc, 0xa436, 0x8fbd);
   1851 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1852 	rge_write_phy_ocp(sc, 0xa438, val | 0x0a00);
   1853 	rge_write_phy_ocp(sc, 0xa436, 0x8fbe);
   1854 	rge_write_phy_ocp(sc, 0xa438, 0x0d09);
   1855 	rge_write_phy_ocp(sc, 0xb87c, 0x89cd);
   1856 	rge_write_phy_ocp(sc, 0xb87e, 0x0f0f);
   1857 	rge_write_phy_ocp(sc, 0xb87c, 0x89cf);
   1858 	rge_write_phy_ocp(sc, 0xb87e, 0x0f0f);
   1859 	rge_write_phy_ocp(sc, 0xb87c, 0x83a4);
   1860 	rge_write_phy_ocp(sc, 0xb87e, 0x6600);
   1861 	rge_write_phy_ocp(sc, 0xb87c, 0x83a6);
   1862 	rge_write_phy_ocp(sc, 0xb87e, 0x6601);
   1863 	rge_write_phy_ocp(sc, 0xb87c, 0x83c0);
   1864 	rge_write_phy_ocp(sc, 0xb87e, 0x6600);
   1865 	rge_write_phy_ocp(sc, 0xb87c, 0x83c2);
   1866 	rge_write_phy_ocp(sc, 0xb87e, 0x6601);
   1867 	rge_write_phy_ocp(sc, 0xb87c, 0x8414);
   1868 	rge_write_phy_ocp(sc, 0xb87e, 0x6600);
   1869 	rge_write_phy_ocp(sc, 0xb87c, 0x8416);
   1870 	rge_write_phy_ocp(sc, 0xb87e, 0x6601);
   1871 	rge_write_phy_ocp(sc, 0xb87c, 0x83f8);
   1872 	rge_write_phy_ocp(sc, 0xb87e, 0x6600);
   1873 	rge_write_phy_ocp(sc, 0xb87c, 0x83fa);
   1874 	rge_write_phy_ocp(sc, 0xb87e, 0x6601);
   1875 
   1876 	rge_patch_phy_mcu(sc, 1);
   1877 	val = rge_read_phy_ocp(sc, 0xbd96) & ~0x1f00;
   1878 	rge_write_phy_ocp(sc, 0xbd96, val | 0x1000);
   1879 	val = rge_read_phy_ocp(sc, 0xbf1c) & ~0x0007;
   1880 	rge_write_phy_ocp(sc, 0xbf1c, val | 0x0007);
   1881 	RGE_PHY_CLRBIT(sc, 0xbfbe, 0x8000);
   1882 	val = rge_read_phy_ocp(sc, 0xbf40) & ~0x0380;
   1883 	rge_write_phy_ocp(sc, 0xbf40, val | 0x0280);
   1884 	val = rge_read_phy_ocp(sc, 0xbf90) & ~0x0080;
   1885 	rge_write_phy_ocp(sc, 0xbf90, val | 0x0060);
   1886 	val = rge_read_phy_ocp(sc, 0xbf90) & ~0x0010;
   1887 	rge_write_phy_ocp(sc, 0xbf90, val | 0x000c);
   1888 	rge_patch_phy_mcu(sc, 0);
   1889 
   1890 	rge_write_phy_ocp(sc, 0xa436, 0x843b);
   1891 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1892 	rge_write_phy_ocp(sc, 0xa438, val | 0x2000);
   1893 	rge_write_phy_ocp(sc, 0xa436, 0x843d);
   1894 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1895 	rge_write_phy_ocp(sc, 0xa438, val | 0x2000);
   1896 	RGE_PHY_CLRBIT(sc, 0xb516, 0x007f);
   1897 	RGE_PHY_CLRBIT(sc, 0xbf80, 0x0030);
   1898 
   1899 	rge_write_phy_ocp(sc, 0xa436, 0x8188);
   1900 	for (i = 0; i < 11; i++)
   1901 		rge_write_phy_ocp(sc, 0xa438, mac_cfg2_a438_value[i]);
   1902 
   1903 	rge_write_phy_ocp(sc, 0xb87c, 0x8015);
   1904 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1905 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
   1906 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffd);
   1907 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1908 	rge_write_phy_ocp(sc, 0xb87e, val | 0);
   1909 	rge_write_phy_ocp(sc, 0xb87c, 0x8fff);
   1910 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1911 	rge_write_phy_ocp(sc, 0xb87e, val | 0x7f00);
   1912 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffb);
   1913 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1914 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1915 	rge_write_phy_ocp(sc, 0xb87c, 0x8fe9);
   1916 	rge_write_phy_ocp(sc, 0xb87e, 0x0002);
   1917 	rge_write_phy_ocp(sc, 0xb87c, 0x8fef);
   1918 	rge_write_phy_ocp(sc, 0xb87e, 0x00a5);
   1919 	rge_write_phy_ocp(sc, 0xb87c, 0x8ff1);
   1920 	rge_write_phy_ocp(sc, 0xb87e, 0x0106);
   1921 	rge_write_phy_ocp(sc, 0xb87c, 0x8fe1);
   1922 	rge_write_phy_ocp(sc, 0xb87e, 0x0102);
   1923 	rge_write_phy_ocp(sc, 0xb87c, 0x8fe3);
   1924 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1925 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0400);
   1926 	RGE_PHY_SETBIT(sc, 0xa654, 0x0800);
   1927 	RGE_PHY_CLRBIT(sc, 0xa654, 0x0003);
   1928 	rge_write_phy_ocp(sc, 0xac3a, 0x5851);
   1929 	val = rge_read_phy_ocp(sc, 0xac3c) & ~0xd000;
   1930 	rge_write_phy_ocp(sc, 0xac3c, val | 0x2000);
   1931 	val = rge_read_phy_ocp(sc, 0xac42) & ~0x0200;
   1932 	rge_write_phy_ocp(sc, 0xac42, val | 0x01c0);
   1933 	RGE_PHY_CLRBIT(sc, 0xac3e, 0xe000);
   1934 	RGE_PHY_CLRBIT(sc, 0xac42, 0x0038);
   1935 	val = rge_read_phy_ocp(sc, 0xac42) & ~0x0002;
   1936 	rge_write_phy_ocp(sc, 0xac42, val | 0x0005);
   1937 	rge_write_phy_ocp(sc, 0xac1a, 0x00db);
   1938 	rge_write_phy_ocp(sc, 0xade4, 0x01b5);
   1939 	RGE_PHY_CLRBIT(sc, 0xad9c, 0x0c00);
   1940 	rge_write_phy_ocp(sc, 0xb87c, 0x814b);
   1941 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1942 	rge_write_phy_ocp(sc, 0xb87e, val | 0x1100);
   1943 	rge_write_phy_ocp(sc, 0xb87c, 0x814d);
   1944 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1945 	rge_write_phy_ocp(sc, 0xb87e, val | 0x1100);
   1946 	rge_write_phy_ocp(sc, 0xb87c, 0x814f);
   1947 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1948 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0b00);
   1949 	rge_write_phy_ocp(sc, 0xb87c, 0x8142);
   1950 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1951 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1952 	rge_write_phy_ocp(sc, 0xb87c, 0x8144);
   1953 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1954 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1955 	rge_write_phy_ocp(sc, 0xb87c, 0x8150);
   1956 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1957 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1958 	rge_write_phy_ocp(sc, 0xb87c, 0x8118);
   1959 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1960 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
   1961 	rge_write_phy_ocp(sc, 0xb87c, 0x811a);
   1962 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1963 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
   1964 	rge_write_phy_ocp(sc, 0xb87c, 0x811c);
   1965 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1966 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
   1967 	rge_write_phy_ocp(sc, 0xb87c, 0x810f);
   1968 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1969 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1970 	rge_write_phy_ocp(sc, 0xb87c, 0x8111);
   1971 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1972 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1973 	rge_write_phy_ocp(sc, 0xb87c, 0x811d);
   1974 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1975 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1976 	RGE_PHY_SETBIT(sc, 0xac36, 0x1000);
   1977 	RGE_PHY_CLRBIT(sc, 0xad1c, 0x0100);
   1978 	val = rge_read_phy_ocp(sc, 0xade8) & ~0xffc0;
   1979 	rge_write_phy_ocp(sc, 0xade8, val | 0x1400);
   1980 	rge_write_phy_ocp(sc, 0xb87c, 0x864b);
   1981 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1982 	rge_write_phy_ocp(sc, 0xb87e, val | 0x9d00);
   1983 
   1984 	rge_write_phy_ocp(sc, 0xa436, 0x8f97);
   1985 	for (; i < nitems(mac_cfg2_a438_value); i++)
   1986 		rge_write_phy_ocp(sc, 0xa438, mac_cfg2_a438_value[i]);
   1987 
   1988 	RGE_PHY_SETBIT(sc, 0xad9c, 0x0020);
   1989 	rge_write_phy_ocp(sc, 0xb87c, 0x8122);
   1990 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1991 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0c00);
   1992 
   1993 	rge_write_phy_ocp(sc, 0xb87c, 0x82c8);
   1994 	for (i = 0; i < 20; i++)
   1995 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg2_b87e_value[i]);
   1996 
   1997 	rge_write_phy_ocp(sc, 0xb87c, 0x80ef);
   1998 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1999 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0c00);
   2000 
   2001 	rge_write_phy_ocp(sc, 0xb87c, 0x82a0);
   2002 	for (; i < nitems(mac_cfg2_b87e_value); i++)
   2003 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg2_b87e_value[i]);
   2004 
   2005 	rge_write_phy_ocp(sc, 0xa436, 0x8018);
   2006 	RGE_PHY_SETBIT(sc, 0xa438, 0x2000);
   2007 	rge_write_phy_ocp(sc, 0xb87c, 0x8fe4);
   2008 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2009 	rge_write_phy_ocp(sc, 0xb87e, val | 0);
   2010 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xffc0;
   2011 	rge_write_phy_ocp(sc, 0xb54c, val | 0x3700);
   2012 }
   2013 
   2014 void
   2015 rge_phy_config_mac_cfg2(struct rge_softc *sc)
   2016 {
   2017 	uint16_t val;
   2018 	int i;
   2019 
   2020 	for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
   2021 		rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
   2022 		    rtl8125_mac_cfg2_ephy[i].val);
   2023 
   2024 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
   2025 
   2026 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
   2027 	rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
   2028 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   2029 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   2030 	rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
   2031 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
   2032 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   2033 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
   2034 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
   2035 	rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
   2036 	RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
   2037 	RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
   2038 	RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
   2039 
   2040 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   2041 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2042 	rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
   2043 	rge_write_phy_ocp(sc, 0xa436, 0x80eb);
   2044 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
   2045 	rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
   2046 	rge_write_phy_ocp(sc, 0xa436, 0x80f8);
   2047 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2048 	rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
   2049 	rge_write_phy_ocp(sc, 0xa436, 0x80f1);
   2050 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2051 	rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
   2052 	rge_write_phy_ocp(sc, 0xa436, 0x80fe);
   2053 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2054 	rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
   2055 	rge_write_phy_ocp(sc, 0xa436, 0x8102);
   2056 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2057 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   2058 	rge_write_phy_ocp(sc, 0xa436, 0x8105);
   2059 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2060 	rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
   2061 	rge_write_phy_ocp(sc, 0xa436, 0x8100);
   2062 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2063 	rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
   2064 	rge_write_phy_ocp(sc, 0xa436, 0x8104);
   2065 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2066 	rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
   2067 	rge_write_phy_ocp(sc, 0xa436, 0x8106);
   2068 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2069 	rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
   2070 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
   2071 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2072 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
   2073 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
   2074 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   2075 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
   2076 	RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
   2077 	val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
   2078 	rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
   2079 	rge_write_phy_ocp(sc, 0xa436, 0x819f);
   2080 	rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
   2081 	rge_write_phy_ocp(sc, 0xbc34, 0x5555);
   2082 	val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
   2083 	rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
   2084 	RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
   2085 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   2086 }
   2087 
   2088 void
   2089 rge_phy_config_mac_cfg3(struct rge_softc *sc)
   2090 {
   2091 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2092 	uint16_t val;
   2093 	int i;
   2094 	static const uint16_t mac_cfg3_a438_value[] =
   2095 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
   2096 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
   2097 
   2098 	static const uint16_t mac_cfg3_b88e_value[] =
   2099 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
   2100 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
   2101 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
   2102 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
   2103 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
   2104 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
   2105 
   2106 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
   2107 		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
   2108 		    rtl8125_mac_cfg3_ephy[i].val);
   2109 
   2110 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
   2111 	rge_write_ephy(sc, 0x002a, val | 0x3000);
   2112 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
   2113 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
   2114 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
   2115 	rge_write_ephy(sc, 0x0002, 0x6042);
   2116 	rge_write_ephy(sc, 0x0006, 0x0014);
   2117 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
   2118 	rge_write_ephy(sc, 0x006a, val | 0x3000);
   2119 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
   2120 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
   2121 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
   2122 	rge_write_ephy(sc, 0x0042, 0x6042);
   2123 	rge_write_ephy(sc, 0x0046, 0x0014);
   2124 
   2125 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
   2126 
   2127 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   2128 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   2129 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
   2130 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
   2131 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   2132 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
   2133 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
   2134 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
   2135 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
   2136 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
   2137 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
   2138 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
   2139 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
   2140 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
   2141 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
   2142 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
   2143 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
   2144 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
   2145 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
   2146 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
   2147 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
   2148 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
   2149 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2150 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
   2151 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
   2152 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2153 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
   2154 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
   2155 	    32);
   2156 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
   2157 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   2158 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
   2159 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   2160 
   2161 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
   2162 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
   2163 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
   2164 	for (i = 0; i < 26; i++)
   2165 		rge_write_phy_ocp(sc, 0xa438, 0);
   2166 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
   2167 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
   2168 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   2169 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
   2170 
   2171 	rge_patch_phy_mcu(sc, 1);
   2172 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
   2173 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
   2174 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
   2175 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
   2176 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
   2177 	}
   2178 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
   2179 	rge_patch_phy_mcu(sc, 0);
   2180 
   2181 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
   2182 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
   2183 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   2184 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
   2185 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
   2186 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
   2187 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
   2188 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
   2189 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
   2190 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   2191 }
   2192 
   2193 void
   2194 rge_phy_config_mac_cfg4(struct rge_softc *sc)
   2195 {
   2196 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2197 	uint16_t val;
   2198 	int i;
   2199 	static const uint16_t mac_cfg4_b87c_value[] =
   2200 	    { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
   2201 	      0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
   2202 	      0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
   2203 	      0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
   2204 	      0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
   2205 	      0x80b0, 0x0f31 };
   2206 
   2207 	for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
   2208 		rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
   2209 		    rtl8125_mac_cfg4_ephy[i].val);
   2210 
   2211 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
   2212 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
   2213 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
   2214 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
   2215 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
   2216 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
   2217 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
   2218 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
   2219 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
   2220 
   2221 	rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
   2222 
   2223 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   2224 	RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
   2225 	rge_write_phy_ocp(sc, 0xa436, 0x8fff);
   2226 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2227 	rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
   2228 	for (i = 0; i < 6; i++) {
   2229 		rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
   2230 		if (i < 3)
   2231 			rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
   2232 		else
   2233 			rge_write_phy_ocp(sc, 0xb87e, 0x147d);
   2234 	}
   2235 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
   2236 	rge_write_phy_ocp(sc, 0xb87e, 0x0907);
   2237 	val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
   2238 	rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
   2239 	val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
   2240 	rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
   2241 	rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
   2242 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
   2243 	rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
   2244 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
   2245 	rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
   2246 	rge_write_phy_ocp(sc, 0xb87e, 0x6077);
   2247 	rge_write_phy_ocp(sc, 0xb506, 0x01e7);
   2248 	rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
   2249 	rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
   2250 	rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
   2251 	rge_write_phy_ocp(sc, 0xac3c, 0x9240);
   2252 	rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
   2253 	rge_write_phy_ocp(sc, 0xacc6, 0x0707);
   2254 	rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
   2255 	rge_write_phy_ocp(sc, 0xad08, 0x0007);
   2256 	for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
   2257 		rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
   2258 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
   2259 	}
   2260 	RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
   2261 	RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
   2262 	rge_write_phy_ocp(sc, 0xb87c, 0x8141);
   2263 	rge_write_phy_ocp(sc, 0xb87e, 0x320e);
   2264 	rge_write_phy_ocp(sc, 0xb87c, 0x8153);
   2265 	rge_write_phy_ocp(sc, 0xb87e, 0x720e);
   2266 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
   2267 	rge_write_phy_ocp(sc, 0xb87c, 0x8529);
   2268 	rge_write_phy_ocp(sc, 0xb87e, 0x050e);
   2269 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
   2270 	    32);
   2271 	rge_write_phy_ocp(sc, 0xa436, 0x816c);
   2272 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
   2273 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
   2274 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
   2275 	rge_write_phy_ocp(sc, 0xa436, 0x8174);
   2276 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
   2277 	rge_write_phy_ocp(sc, 0xa436, 0x8178);
   2278 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
   2279 	rge_write_phy_ocp(sc, 0xa436, 0x817c);
   2280 	rge_write_phy_ocp(sc, 0xa438, 0x0719);
   2281 	rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
   2282 	rge_write_phy_ocp(sc, 0xa438, 0x0400);
   2283 	rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
   2284 	rge_write_phy_ocp(sc, 0xa438, 0x0404);
   2285 	rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
   2286 	for (i = 0; i < 6; i++) {
   2287 		rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
   2288 		if (i == 2)
   2289 			rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
   2290 		else
   2291 			rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
   2292 	}
   2293 	rge_write_phy_ocp(sc, 0xb87c, 0x8145);
   2294 	rge_write_phy_ocp(sc, 0xb87e, 0x370e);
   2295 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
   2296 	rge_write_phy_ocp(sc, 0xb87e, 0x770e);
   2297 	rge_write_phy_ocp(sc, 0xb87c, 0x8169);
   2298 	rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
   2299 	rge_write_phy_ocp(sc, 0xb87c, 0x817b);
   2300 	rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
   2301 	rge_write_phy_ocp(sc, 0xa436, 0x8217);
   2302 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2303 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   2304 	rge_write_phy_ocp(sc, 0xa436, 0x821a);
   2305 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2306 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   2307 	rge_write_phy_ocp(sc, 0xa436, 0x80da);
   2308 	rge_write_phy_ocp(sc, 0xa438, 0x0403);
   2309 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
   2310 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2311 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
   2312 	rge_write_phy_ocp(sc, 0xa436, 0x80b3);
   2313 	rge_write_phy_ocp(sc, 0xa438, 0x0384);
   2314 	rge_write_phy_ocp(sc, 0xa436, 0x80b7);
   2315 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
   2316 	rge_write_phy_ocp(sc, 0xa436, 0x80ba);
   2317 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2318 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
   2319 	rge_write_phy_ocp(sc, 0xa436, 0x80b5);
   2320 	rge_write_phy_ocp(sc, 0xa438, 0xf009);
   2321 	rge_write_phy_ocp(sc, 0xa436, 0x80bd);
   2322 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2323 	rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
   2324 	rge_write_phy_ocp(sc, 0xa436, 0x80c7);
   2325 	rge_write_phy_ocp(sc, 0xa438, 0xf083);
   2326 	rge_write_phy_ocp(sc, 0xa436, 0x80dd);
   2327 	rge_write_phy_ocp(sc, 0xa438, 0x03f0);
   2328 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
   2329 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2330 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
   2331 	rge_write_phy_ocp(sc, 0xa436, 0x80cb);
   2332 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
   2333 	rge_write_phy_ocp(sc, 0xa436, 0x80ce);
   2334 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2335 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
   2336 	rge_write_phy_ocp(sc, 0xa436, 0x80c9);
   2337 	rge_write_phy_ocp(sc, 0xa438, 0x8009);
   2338 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
   2339 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2340 	rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
   2341 	rge_write_phy_ocp(sc, 0xa436, 0x80a3);
   2342 	rge_write_phy_ocp(sc, 0xa438, 0x200a);
   2343 	rge_write_phy_ocp(sc, 0xa436, 0x80a5);
   2344 	rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
   2345 	rge_write_phy_ocp(sc, 0xa436, 0x809f);
   2346 	rge_write_phy_ocp(sc, 0xa438, 0x6073);
   2347 	rge_write_phy_ocp(sc, 0xa436, 0x80a1);
   2348 	rge_write_phy_ocp(sc, 0xa438, 0x000b);
   2349 	rge_write_phy_ocp(sc, 0xa436, 0x80a9);
   2350 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2351 	rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
   2352 	rge_patch_phy_mcu(sc, 1);
   2353 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
   2354 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
   2355 	rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
   2356 	rge_write_phy_ocp(sc, 0xb890, 0x0000);
   2357 	rge_write_phy_ocp(sc, 0xb88e, 0xc240);
   2358 	rge_write_phy_ocp(sc, 0xb890, 0x0103);
   2359 	rge_write_phy_ocp(sc, 0xb88e, 0xc242);
   2360 	rge_write_phy_ocp(sc, 0xb890, 0x0507);
   2361 	rge_write_phy_ocp(sc, 0xb88e, 0xc244);
   2362 	rge_write_phy_ocp(sc, 0xb890, 0x090b);
   2363 	rge_write_phy_ocp(sc, 0xb88e, 0xc246);
   2364 	rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
   2365 	rge_write_phy_ocp(sc, 0xb88e, 0xc248);
   2366 	rge_write_phy_ocp(sc, 0xb890, 0x1012);
   2367 	rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
   2368 	rge_write_phy_ocp(sc, 0xb890, 0x1416);
   2369 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
   2370 	rge_patch_phy_mcu(sc, 0);
   2371 	RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
   2372 	RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
   2373 	rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
   2374 	rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
   2375 	rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
   2376 	rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
   2377 	rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
   2378 	RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
   2379 	rge_write_phy_ocp(sc, 0xa436, 0x817d);
   2380 	RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
   2381 }
   2382 
   2383 void
   2384 rge_phy_config_mac_cfg5(struct rge_softc *sc)
   2385 {
   2386 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2387 	uint16_t val;
   2388 	int i;
   2389 
   2390 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
   2391 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
   2392 		    rtl8125_mac_cfg5_ephy[i].val);
   2393 
   2394 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
   2395 
   2396 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   2397 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
   2398 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
   2399 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
   2400 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
   2401 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
   2402 	    32);
   2403 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
   2404 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
   2405 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
   2406 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
   2407 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
   2408 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2409 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
   2410 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
   2411 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
   2412 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
   2413 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
   2414 	for (i = 0; i < 10; i++) {
   2415 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
   2416 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
   2417 	}
   2418 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
   2419 	val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
   2420 	rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
   2421 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
   2422 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x2700;
   2423 	rge_write_phy_ocp(sc, 0xa438, val | 0xd800);
   2424 }
   2425 
   2426 void
   2427 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
   2428 {
   2429 	if (sc->rge_mcodever != mcode_version) {
   2430 		int i;
   2431 
   2432 		rge_patch_phy_mcu(sc, 1);
   2433 
   2434 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
   2435 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   2436 			if (sc->rge_type == MAC_CFG2)
   2437 				rge_write_phy_ocp(sc, 0xa438, 0x8600);
   2438 			else
   2439 				rge_write_phy_ocp(sc, 0xa438, 0x8601);
   2440 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
   2441 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
   2442 
   2443 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
   2444 		}
   2445 
   2446 		if (sc->rge_type == MAC_CFG2) {
   2447 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
   2448 				rge_write_phy_ocp(sc,
   2449 				    rtl8125_mac_cfg2_mcu[i].reg,
   2450 				    rtl8125_mac_cfg2_mcu[i].val);
   2451 			}
   2452 		} else if (sc->rge_type == MAC_CFG3) {
   2453 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
   2454 				rge_write_phy_ocp(sc,
   2455 				    rtl8125_mac_cfg3_mcu[i].reg,
   2456 				    rtl8125_mac_cfg3_mcu[i].val);
   2457 			}
   2458 		} else if (sc->rge_type == MAC_CFG4) {
   2459 			for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
   2460 				rge_write_phy_ocp(sc,
   2461 				    rtl8125_mac_cfg4_mcu[i].reg,
   2462 				    rtl8125_mac_cfg4_mcu[i].val);
   2463 			}
   2464 		} else if (sc->rge_type == MAC_CFG5) {
   2465 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
   2466 				rge_write_phy_ocp(sc,
   2467 				    rtl8125_mac_cfg5_mcu[i].reg,
   2468 				    rtl8125_mac_cfg5_mcu[i].val);
   2469 			}
   2470 		} else if (sc->rge_type == MAC_CFG2_8126) {
   2471 			for (i = 0; i < nitems(rtl8126_mac_cfg2_mcu); i++) {
   2472 				rge_write_phy_ocp(sc,
   2473 				    rtl8126_mac_cfg2_mcu[i].reg,
   2474 				    rtl8126_mac_cfg2_mcu[i].val);
   2475 			}
   2476 		}
   2477 
   2478 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
   2479 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
   2480 
   2481 			rge_write_phy_ocp(sc, 0xa436, 0);
   2482 			rge_write_phy_ocp(sc, 0xa438, 0);
   2483 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
   2484 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   2485 			rge_write_phy_ocp(sc, 0xa438, 0);
   2486 		}
   2487 
   2488 		rge_patch_phy_mcu(sc, 0);
   2489 
   2490 		/* Write microcode version. */
   2491 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
   2492 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
   2493 	}
   2494 }
   2495 
   2496 void
   2497 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
   2498 {
   2499 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2500 	RGE_WRITE_4(sc, RGE_MAC0,
   2501 	    (uint32_t)addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
   2502 	RGE_WRITE_4(sc, RGE_MAC4,
   2503 	    addr[5] <<  8 | addr[4]);
   2504 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2505 }
   2506 
   2507 void
   2508 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
   2509 {
   2510 	int i;
   2511 
   2512 	for (i = 0; i < ETHER_ADDR_LEN; i++)
   2513 		addr[i] = RGE_READ_1(sc, RGE_ADDR0 + i);
   2514 }
   2515 
   2516 void
   2517 rge_hw_init(struct rge_softc *sc)
   2518 {
   2519 	int i;
   2520 
   2521 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2522 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
   2523 	if (sc->rge_type == MAC_CFG2_8126)
   2524 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, 0x08);
   2525 	else
   2526 		RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
   2527 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2528 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
   2529 
   2530 	/* Disable UPS. */
   2531 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
   2532 
   2533 	/* Configure MAC MCU. */
   2534 	rge_write_mac_ocp(sc, 0xfc38, 0);
   2535 
   2536 	for (i = 0xfc28; i < 0xfc38; i += 2)
   2537 		rge_write_mac_ocp(sc, i, 0);
   2538 
   2539 	DELAY(3000);
   2540 	rge_write_mac_ocp(sc, 0xfc26, 0);
   2541 
   2542 	if (sc->rge_type == MAC_CFG3) {
   2543 		for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
   2544 			rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
   2545 			    rtl8125_mac_bps[i].val);
   2546 		}
   2547 	} else if (sc->rge_type == MAC_CFG5) {
   2548 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
   2549 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
   2550 			    rtl8125b_mac_bps[i].val);
   2551 		}
   2552 	}
   2553 
   2554 	/* Disable PHY power saving. */
   2555 	if (sc->rge_type != MAC_CFG2_8126)
   2556 		rge_disable_phy_ocp_pwrsave(sc);
   2557 
   2558 	/* Set PCIe uncorrectable error status. */
   2559 	rge_write_csi(sc, 0x108,
   2560 	    rge_read_csi(sc, 0x108) | 0x00100000);
   2561 }
   2562 
   2563 void
   2564 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
   2565 {
   2566 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
   2567 		rge_patch_phy_mcu(sc, 1);
   2568 		rge_write_phy_ocp(sc, 0xc416, 0);
   2569 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
   2570 		rge_patch_phy_mcu(sc, 0);
   2571 	}
   2572 }
   2573 
   2574 void
   2575 rge_patch_phy_mcu(struct rge_softc *sc, int set)
   2576 {
   2577 	int i;
   2578 
   2579 	if (set)
   2580 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
   2581 	else
   2582 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
   2583 
   2584 	for (i = 0; i < 1000; i++) {
   2585 		if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
   2586 			break;
   2587 		DELAY(100);
   2588 	}
   2589 	if (i == 1000) {
   2590 		DPRINTF(("timeout waiting to patch phy mcu\n"));
   2591 		return;
   2592 	}
   2593 }
   2594 
   2595 void
   2596 rge_add_media_types(struct rge_softc *sc)
   2597 {
   2598 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
   2599 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
   2600 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
   2601 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
   2602 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
   2603 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
   2604 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
   2605 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
   2606 
   2607 	if (sc->rge_type == MAC_CFG2_8126) {
   2608 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T, 0, NULL);
   2609 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T | IFM_FDX,
   2610 		    0, NULL);
   2611 	}
   2612 }
   2613 
   2614 void
   2615 rge_config_imtype(struct rge_softc *sc, int imtype)
   2616 {
   2617 	switch (imtype) {
   2618 	case RGE_IMTYPE_NONE:
   2619 		sc->rge_intrs = RGE_INTRS;
   2620 		break;
   2621 	case RGE_IMTYPE_SIM:
   2622 		sc->rge_intrs = RGE_INTRS_TIMER;
   2623 		break;
   2624 	default:
   2625 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
   2626 	}
   2627 }
   2628 
   2629 void
   2630 rge_disable_hw_im(struct rge_softc *sc)
   2631 {
   2632 	RGE_WRITE_2(sc, RGE_IM, 0);
   2633 }
   2634 
   2635 void
   2636 rge_disable_sim_im(struct rge_softc *sc)
   2637 {
   2638 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
   2639 	sc->rge_timerintr = 0;
   2640 }
   2641 
   2642 void
   2643 rge_setup_sim_im(struct rge_softc *sc)
   2644 {
   2645 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
   2646 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
   2647 	sc->rge_timerintr = 1;
   2648 }
   2649 
   2650 void
   2651 rge_setup_intr(struct rge_softc *sc, int imtype)
   2652 {
   2653 	rge_config_imtype(sc, imtype);
   2654 
   2655 	/* Enable interrupts. */
   2656 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
   2657 
   2658 	switch (imtype) {
   2659 	case RGE_IMTYPE_NONE:
   2660 		rge_disable_sim_im(sc);
   2661 		rge_disable_hw_im(sc);
   2662 		break;
   2663 	case RGE_IMTYPE_SIM:
   2664 		rge_disable_hw_im(sc);
   2665 		rge_setup_sim_im(sc);
   2666 		break;
   2667 	default:
   2668 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
   2669 	}
   2670 }
   2671 
   2672 void
   2673 rge_exit_oob(struct rge_softc *sc)
   2674 {
   2675 	int i;
   2676 
   2677 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
   2678 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
   2679 	    RGE_RXCFG_ERRPKT);
   2680 
   2681 	/* Disable RealWoW. */
   2682 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
   2683 
   2684 	rge_reset(sc);
   2685 
   2686 	/* Disable OOB. */
   2687 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
   2688 
   2689 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
   2690 
   2691 	for (i = 0; i < 10; i++) {
   2692 		DELAY(100);
   2693 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   2694 			break;
   2695 	}
   2696 
   2697 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
   2698 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
   2699 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
   2700 
   2701 	for (i = 0; i < 10; i++) {
   2702 		DELAY(100);
   2703 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   2704 			break;
   2705 	}
   2706 
   2707 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
   2708 		printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
   2709 		    device_xname(sc->sc_dev));
   2710 		for (i = 0; i < RGE_TIMEOUT; i++) {
   2711 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
   2712 				break;
   2713 			DELAY(1000);
   2714 		}
   2715 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
   2716 		if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
   2717 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
   2718 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
   2719 	}
   2720 }
   2721 
   2722 void
   2723 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
   2724 {
   2725 	int i;
   2726 
   2727 	RGE_WRITE_4(sc, RGE_CSIDR, val);
   2728 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
   2729 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
   2730 
   2731 	for (i = 0; i < 10; i++) {
   2732 		 DELAY(100);
   2733 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
   2734 			break;
   2735 	}
   2736 
   2737 	DELAY(20);
   2738 }
   2739 
   2740 uint32_t
   2741 rge_read_csi(struct rge_softc *sc, uint32_t reg)
   2742 {
   2743 	int i;
   2744 
   2745 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
   2746 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
   2747 
   2748 	for (i = 0; i < 10; i++) {
   2749 		 DELAY(100);
   2750 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
   2751 			break;
   2752 	}
   2753 
   2754 	DELAY(20);
   2755 
   2756 	return (RGE_READ_4(sc, RGE_CSIDR));
   2757 }
   2758 
   2759 void
   2760 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   2761 {
   2762 	uint32_t tmp;
   2763 
   2764 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   2765 	tmp += val;
   2766 	tmp |= RGE_MACOCP_BUSY;
   2767 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
   2768 }
   2769 
   2770 uint16_t
   2771 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
   2772 {
   2773 	uint32_t val;
   2774 
   2775 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   2776 	RGE_WRITE_4(sc, RGE_MACOCP, val);
   2777 
   2778 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
   2779 }
   2780 
   2781 void
   2782 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
   2783 {
   2784 	uint32_t tmp;
   2785 	int i;
   2786 
   2787 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
   2788 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
   2789 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
   2790 
   2791 	for (i = 0; i < 10; i++) {
   2792 		DELAY(100);
   2793 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
   2794 			break;
   2795 	}
   2796 
   2797 	DELAY(20);
   2798 }
   2799 
   2800 uint16_t
   2801 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
   2802 {
   2803 	uint32_t val;
   2804 	int i;
   2805 
   2806 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
   2807 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
   2808 
   2809 	for (i = 0; i < 10; i++) {
   2810 		DELAY(100);
   2811 		val = RGE_READ_4(sc, RGE_EPHYAR);
   2812 		if (val & RGE_EPHYAR_BUSY)
   2813 			break;
   2814 	}
   2815 
   2816 	DELAY(20);
   2817 
   2818 	return (val & RGE_EPHYAR_DATA_MASK);
   2819 }
   2820 
   2821 void
   2822 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
   2823 {
   2824 	uint16_t off, phyaddr;
   2825 
   2826 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
   2827 	phyaddr <<= 4;
   2828 
   2829 	off = addr ? reg : 0x10 + (reg % 8);
   2830 
   2831 	phyaddr += (off - 16) << 1;
   2832 
   2833 	rge_write_phy_ocp(sc, phyaddr, val);
   2834 }
   2835 
   2836 uint16_t
   2837 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
   2838 {
   2839 	uint16_t off, phyaddr;
   2840 
   2841 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
   2842 	phyaddr <<= 4;
   2843 
   2844 	off = addr ? reg : 0x10 + (reg % 8);
   2845 
   2846 	phyaddr += (off - 16) << 1;
   2847 
   2848 	return (rge_read_phy_ocp(sc, phyaddr));
   2849 }
   2850 
   2851 void
   2852 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   2853 {
   2854 	uint32_t tmp;
   2855 	int i;
   2856 
   2857 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   2858 	tmp |= RGE_PHYOCP_BUSY | val;
   2859 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
   2860 
   2861 	for (i = 0; i < RGE_TIMEOUT; i++) {
   2862 		DELAY(1);
   2863 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
   2864 			break;
   2865 	}
   2866 }
   2867 
   2868 uint16_t
   2869 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
   2870 {
   2871 	uint32_t val;
   2872 	int i;
   2873 
   2874 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   2875 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
   2876 
   2877 	for (i = 0; i < RGE_TIMEOUT; i++) {
   2878 		DELAY(1);
   2879 		val = RGE_READ_4(sc, RGE_PHYOCP);
   2880 		if (val & RGE_PHYOCP_BUSY)
   2881 			break;
   2882 	}
   2883 
   2884 	return (val & RGE_PHYOCP_DATA_MASK);
   2885 }
   2886 
   2887 int
   2888 rge_get_link_status(struct rge_softc *sc)
   2889 {
   2890 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
   2891 }
   2892 
   2893 void
   2894 rge_txstart(void *arg)
   2895 {
   2896 	struct rge_softc *sc = arg;
   2897 
   2898 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
   2899 }
   2900 
   2901 void
   2902 rge_tick(void *arg)
   2903 {
   2904 	struct rge_softc *sc = arg;
   2905 	int s;
   2906 
   2907 	s = splnet();
   2908 	rge_link_state(sc);
   2909 	splx(s);
   2910 
   2911 	callout_schedule(&sc->sc_timeout, hz);
   2912 }
   2913 
   2914 void
   2915 rge_link_state(struct rge_softc *sc)
   2916 {
   2917 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2918 	int link = LINK_STATE_DOWN;
   2919 
   2920 	if (rge_get_link_status(sc))
   2921 		link = LINK_STATE_UP;
   2922 
   2923 	if (ifp->if_link_state != link) { /* XXX not safe to access */
   2924 		if_link_state_change(ifp, link);
   2925 	}
   2926 }
   2927 
   2928 /* Module interface */
   2929 
   2930 MODULE(MODULE_CLASS_DRIVER, if_rge, "pci");
   2931 
   2932 #ifdef _MODULE
   2933 #include "ioconf.c"
   2934 #endif
   2935 
   2936 static int
   2937 if_rge_modcmd(modcmd_t cmd, void *opaque)
   2938 {
   2939 	int error = 0;
   2940 
   2941 	switch (cmd) {
   2942 	case MODULE_CMD_INIT:
   2943 #ifdef _MODULE
   2944 		error = config_init_component(cfdriver_ioconf_rge,
   2945 		    cfattach_ioconf_rge, cfdata_ioconf_rge);
   2946 #endif
   2947 		return error;
   2948 	case MODULE_CMD_FINI:
   2949 #ifdef _MODULE
   2950 		error = config_fini_component(cfdriver_ioconf_rge,
   2951 		    cfattach_ioconf_rge, cfdata_ioconf_rge);
   2952 #endif
   2953 		return error;
   2954 	default:
   2955 		return ENOTTY;
   2956 	}
   2957 }
   2958