Home | History | Annotate | Line # | Download | only in pci
      1 /*	$NetBSD: if_rge.c,v 1.36 2025/10/23 17:09:37 pgoyette Exp $	*/
      2 /*	$OpenBSD: if_rge.c,v 1.9 2020/12/12 11:48:53 jan Exp $	*/
      3 
      4 /*
      5  * Copyright (c) 2019, 2020 Kevin Lo <kevlo (at) openbsd.org>
      6  *
      7  * Permission to use, copy, modify, and distribute this software for any
      8  * purpose with or without fee is hereby granted, provided that the above
      9  * copyright notice and this permission notice appear in all copies.
     10  *
     11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     18  */
     19 
     20 #include <sys/cdefs.h>
     21 __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.36 2025/10/23 17:09:37 pgoyette Exp $");
     22 
     23 #if defined(_KERNEL_OPT)
     24 #include "opt_net_mpsafe.h"
     25 #endif
     26 
     27 #include <sys/types.h>
     28 
     29 #include <sys/param.h>
     30 #include <sys/systm.h>
     31 #include <sys/sockio.h>
     32 #include <sys/mbuf.h>
     33 #include <sys/kernel.h>
     34 #include <sys/socket.h>
     35 #include <sys/device.h>
     36 #include <sys/endian.h>
     37 #include <sys/callout.h>
     38 #include <sys/workqueue.h>
     39 #include <sys/module.h>
     40 
     41 #include <net/if.h>
     42 
     43 #include <net/if_dl.h>
     44 #include <net/if_ether.h>
     45 
     46 #include <net/if_media.h>
     47 
     48 #include <netinet/in.h>
     49 #include <net/if_ether.h>
     50 
     51 #include <net/bpf.h>
     52 
     53 #include <sys/bus.h>
     54 #include <machine/intr.h>
     55 
     56 #include <dev/mii/mii.h>
     57 
     58 #include <dev/pci/pcivar.h>
     59 #include <dev/pci/pcireg.h>
     60 #include <dev/pci/pcidevs.h>
     61 
     62 #include <dev/pci/if_rgereg.h>
     63 
     64 #ifdef __NetBSD__
     65 #define letoh32 	htole32
     66 #define nitems(x) 	__arraycount(x)
     67 
     68 static struct mbuf *
     69 MCLGETL(struct rge_softc *sc __unused, int how,
     70     u_int size)
     71 {
     72 	struct mbuf *m;
     73 
     74 	MGETHDR(m, how, MT_DATA);
     75 	if (m == NULL)
     76 		return NULL;
     77 
     78 	MEXTMALLOC(m, size, how);
     79 	if ((m->m_flags & M_EXT) == 0) {
     80 		m_freem(m);
     81 		return NULL;
     82 	}
     83 	return m;
     84 }
     85 
     86 #ifdef NET_MPSAFE
     87 #define 	RGE_MPSAFE	1
     88 #define 	CALLOUT_FLAGS	CALLOUT_MPSAFE
     89 #else
     90 #define 	CALLOUT_FLAGS	0
     91 #endif
     92 #endif
     93 
     94 #ifdef RGE_DEBUG
     95 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
     96 int rge_debug = 0;
     97 #else
     98 #define DPRINTF(x)
     99 #endif
    100 
    101 static int		rge_match(device_t, cfdata_t, void *);
    102 static void		rge_attach(device_t, device_t, void *);
    103 int		rge_intr(void *);
    104 int		rge_encap(struct rge_softc *, struct mbuf *, int);
    105 int		rge_ioctl(struct ifnet *, u_long, void *);
    106 void		rge_start(struct ifnet *);
    107 void		rge_watchdog(struct ifnet *);
    108 int		rge_init(struct ifnet *);
    109 void		rge_stop(struct ifnet *, int);
    110 int		rge_ifmedia_upd(struct ifnet *);
    111 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
    112 int		rge_allocmem(struct rge_softc *);
    113 int		rge_newbuf(struct rge_softc *, int);
    114 static int	rge_rx_list_init(struct rge_softc *);
    115 static void	rge_rx_list_fini(struct rge_softc *);
    116 static void	rge_tx_list_init(struct rge_softc *);
    117 static void	rge_tx_list_fini(struct rge_softc *);
    118 int		rge_rxeof(struct rge_softc *);
    119 int		rge_txeof(struct rge_softc *);
    120 void		rge_reset(struct rge_softc *);
    121 void		rge_iff(struct rge_softc *);
    122 void		rge_set_phy_power(struct rge_softc *, int);
    123 void		rge_phy_config(struct rge_softc *);
    124 void		rge_phy_config_mac_cfg2_8126(struct rge_softc *);
    125 void		rge_phy_config_mac_cfg2(struct rge_softc *);
    126 void		rge_phy_config_mac_cfg3(struct rge_softc *);
    127 void		rge_phy_config_mac_cfg4(struct rge_softc *);
    128 void		rge_phy_config_mac_cfg5(struct rge_softc *);
    129 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
    130 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
    131 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
    132 void		rge_hw_init(struct rge_softc *);
    133 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
    134 void		rge_patch_phy_mcu(struct rge_softc *, int);
    135 void		rge_add_media_types(struct rge_softc *);
    136 void		rge_config_imtype(struct rge_softc *, int);
    137 void		rge_disable_hw_im(struct rge_softc *);
    138 void		rge_disable_sim_im(struct rge_softc *);
    139 void		rge_setup_sim_im(struct rge_softc *);
    140 void		rge_setup_intr(struct rge_softc *, int);
    141 void		rge_exit_oob(struct rge_softc *);
    142 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
    143 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
    144 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
    145 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
    146 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
    147 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
    148 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
    149 uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
    150 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
    151 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
    152 int		rge_get_link_status(struct rge_softc *);
    153 void		rge_txstart(void *);
    154 void		rge_tick(void *);
    155 void		rge_link_state(struct rge_softc *);
    156 
    157 static const struct {
    158 	uint16_t reg;
    159 	uint16_t val;
    160 }  rtl8125_mac_cfg2_mcu[] = {
    161 	RTL8125_MAC_CFG2_MCU
    162 }, rtl8125_mac_cfg3_mcu[] = {
    163 	RTL8125_MAC_CFG3_MCU
    164 }, rtl8125_mac_cfg4_mcu[] = {
    165 	RTL8125_MAC_CFG4_MCU
    166 }, rtl8125_mac_cfg5_mcu[] = {
    167 	RTL8125_MAC_CFG5_MCU
    168 }, rtl8126_mac_cfg2_mcu[] = {
    169 	RTL8126_MAC_CFG2_MCU
    170 };
    171 
    172 CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
    173 		NULL, NULL); /* Sevan - detach function? */
    174 
    175 static const struct device_compatible_entry compat_data[] = {
    176 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000) },
    177 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125) },
    178 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8126) },
    179 
    180 	PCI_COMPAT_EOL
    181 };
    182 
    183 static int
    184 rge_match(device_t parent, cfdata_t match, void *aux)
    185 {
    186 	struct pci_attach_args *pa =aux;
    187 
    188 	return pci_compatible_match(pa, compat_data);
    189 }
    190 
    191 void
    192 rge_attach(device_t parent, device_t self, void *aux)
    193 {
    194 	struct rge_softc *sc = device_private(self);
    195 	struct pci_attach_args *pa = aux;
    196 	pci_chipset_tag_t pc = pa->pa_pc;
    197 	pci_intr_handle_t *ihp;
    198 	char intrbuf[PCI_INTRSTR_LEN];
    199 	const char *intrstr = NULL;
    200 	struct ifnet *ifp;
    201 	pcireg_t reg;
    202 	uint32_t hwrev;
    203 	uint8_t eaddr[ETHER_ADDR_LEN];
    204 	int offset;
    205 	pcireg_t command;
    206 	const char *revstr;
    207 
    208 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
    209 
    210 	sc->sc_dev = self;
    211 
    212 	pci_aprint_devinfo(pa, "Ethernet controller");
    213 
    214 	/*
    215 	 * Map control/status registers.
    216 	 */
    217 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
    218 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
    219 	    NULL, &sc->rge_bsize)) {
    220 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
    221 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
    222 		    &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
    223 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
    224 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
    225 			    &sc->rge_bsize)) {
    226 				aprint_error(": can't map mem or i/o space\n");
    227 				return;
    228 			}
    229 		}
    230 	}
    231 
    232 	int counts[PCI_INTR_TYPE_SIZE] = {
    233  		[PCI_INTR_TYPE_INTX] = 1,
    234  		[PCI_INTR_TYPE_MSI] = 1,
    235  		[PCI_INTR_TYPE_MSIX] = 1,
    236  	};
    237 	int max_type = PCI_INTR_TYPE_MSIX;
    238 	/*
    239 	 * Allocate interrupt.
    240 	 */
    241 	if (pci_intr_alloc(pa, &ihp, counts, max_type) != 0) {
    242 		aprint_error(": couldn't map interrupt\n");
    243 		return;
    244 	}
    245 	switch (pci_intr_type(pc, ihp[0])) {
    246 	case PCI_INTR_TYPE_MSIX:
    247 	case PCI_INTR_TYPE_MSI:
    248 		sc->rge_flags |= RGE_FLAG_MSI;
    249 		break;
    250 	default:
    251 		break;
    252 	}
    253 	intrstr = pci_intr_string(pc, ihp[0], intrbuf, sizeof(intrbuf));
    254 	sc->sc_ih = pci_intr_establish_xname(pc, ihp[0], IPL_NET, rge_intr,
    255 	    sc, device_xname(sc->sc_dev));
    256 	if (sc->sc_ih == NULL) {
    257 		aprint_error_dev(sc->sc_dev, ": couldn't establish interrupt");
    258 		if (intrstr != NULL)
    259 			aprint_error(" at %s\n", intrstr);
    260 		aprint_error("\n");
    261 		return;
    262 	}
    263 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
    264 
    265 	if (pci_dma64_available(pa))
    266 		sc->sc_dmat = pa->pa_dmat64;
    267 	else
    268 		sc->sc_dmat = pa->pa_dmat;
    269 
    270 	sc->sc_pc = pa->pa_pc;
    271 	sc->sc_tag = pa->pa_tag;
    272 
    273 	/* Determine hardware revision */
    274 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
    275 	switch (hwrev) {
    276 	case 0x60800000:
    277 		sc->rge_type = MAC_CFG2;
    278 		revstr = "Z1";
    279 		break;
    280 	case 0x60900000:
    281 		sc->rge_type = MAC_CFG3;
    282 		revstr = "Z2";
    283 		break;
    284 	case 0x64000000:
    285 		sc->rge_type = MAC_CFG4;
    286 		revstr = "A";
    287 		break;
    288 	case 0x64100000:
    289 		sc->rge_type = MAC_CFG5;
    290 		revstr = "B";
    291 		break;
    292 	case 0x64900000:
    293 		sc->rge_type = MAC_CFG2_8126;
    294 		revstr = "A";
    295 		break;
    296 	case 0x64a00000:
    297 		sc->rge_type = MAC_CFG2_8126;
    298 		revstr = "B";
    299 		break;
    300 	default:
    301 		aprint_error(": unknown version 0x%08x\n", hwrev);
    302 		return;
    303 	}
    304 
    305 	aprint_normal_dev(sc->sc_dev, "HW rev. %s\n", revstr);
    306 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
    307 
    308 	/*
    309 	 * PCI Express check.
    310 	 */
    311 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
    312 	    &offset, NULL)) {
    313 		/* Disable PCIe ASPM and ECPM. */
    314 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
    315 		    offset + PCIE_LCSR);
    316 		reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 |
    317 		    PCIE_LCSR_ENCLKPM);
    318 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
    319 		    reg);
    320 	}
    321 
    322 	rge_exit_oob(sc);
    323 	rge_hw_init(sc);
    324 
    325 	rge_get_macaddr(sc, eaddr);
    326 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
    327 	    ether_sprintf(eaddr));
    328 
    329 	memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
    330 
    331 	rge_set_phy_power(sc, 1);
    332 	rge_phy_config(sc);
    333 
    334 	if (rge_allocmem(sc))
    335 		return;
    336 
    337 	ifp = &sc->sc_ec.ec_if;
    338 	ifp->if_softc = sc;
    339 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    340 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    341 #ifdef RGE_MPSAFE
    342 	ifp->if_extflags = IFEF_MPSAFE;
    343 #endif
    344 	ifp->if_ioctl = rge_ioctl;
    345 	ifp->if_stop = rge_stop;
    346 	ifp->if_start = rge_start;
    347 	ifp->if_init = rge_init;
    348 	ifp->if_watchdog = rge_watchdog;
    349 	IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
    350 
    351 #if notyet
    352 	ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx |
    353 	    IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
    354 	    IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
    355 #endif
    356 
    357 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
    358 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
    359 
    360 	callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
    361 	callout_setfunc(&sc->sc_timeout, rge_tick, sc);
    362 
    363 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
    364 	command |= PCI_COMMAND_MASTER_ENABLE;
    365 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
    366 
    367 	/* Initialize ifmedia structures. */
    368 	sc->sc_ec.ec_ifmedia = &sc->sc_media;
    369 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
    370 	    rge_ifmedia_sts);
    371 	rge_add_media_types(sc);
    372 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
    373 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
    374 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
    375 
    376 	if_attach(ifp);
    377 	if_deferred_start_init(ifp, NULL);
    378 	ether_ifattach(ifp, eaddr);
    379 
    380 	if (pmf_device_register(self, NULL, NULL))
    381 		pmf_class_network_register(self, ifp);
    382 	else
    383 		aprint_error_dev(self, "couldn't establish power handler\n");
    384 }
    385 
    386 int
    387 rge_intr(void *arg)
    388 {
    389 	struct rge_softc *sc = arg;
    390 	struct ifnet *ifp = &sc->sc_ec.ec_if;
    391 	uint32_t status;
    392 	int claimed = 0, rx, tx;
    393 
    394 	if (!(ifp->if_flags & IFF_RUNNING))
    395 		return (0);
    396 
    397 	/* Disable interrupts. */
    398 	RGE_WRITE_4(sc, RGE_IMR, 0);
    399 
    400 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
    401 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
    402 			return (0);
    403 	}
    404 
    405 	status = RGE_READ_4(sc, RGE_ISR);
    406 	if (status)
    407 		RGE_WRITE_4(sc, RGE_ISR, status);
    408 
    409 	if (status & RGE_ISR_PCS_TIMEOUT)
    410 		claimed = 1;
    411 
    412 	rx = tx = 0;
    413 	if (status & sc->rge_intrs) {
    414 		if (status &
    415 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
    416 			rx |= rge_rxeof(sc);
    417 			claimed = 1;
    418 		}
    419 
    420 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
    421 			tx |= rge_txeof(sc);
    422 			claimed = 1;
    423 		}
    424 
    425 		if (status & RGE_ISR_SYSTEM_ERR) {
    426 			KERNEL_LOCK(1, NULL);
    427 			rge_init(ifp);
    428 			KERNEL_UNLOCK_ONE(NULL);
    429 			claimed = 1;
    430 		}
    431 	}
    432 
    433 	if (sc->rge_timerintr) {
    434 		if ((tx | rx) == 0) {
    435 			/*
    436 			 * Nothing needs to be processed, fallback
    437 			 * to use TX/RX interrupts.
    438 			 */
    439 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
    440 
    441 			/*
    442 			 * Recollect, mainly to avoid the possible
    443 			 * race introduced by changing interrupt
    444 			 * masks.
    445 			 */
    446 			rge_rxeof(sc);
    447 			rge_txeof(sc);
    448 		} else
    449 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
    450 	} else if (tx | rx) {
    451 		/*
    452 		 * Assume that using simulated interrupt moderation
    453 		 * (hardware timer based) could reduce the interrupt
    454 		 * rate.
    455 		 */
    456 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
    457 	}
    458 
    459 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
    460 
    461 	return (claimed);
    462 }
    463 
    464 int
    465 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
    466 {
    467 	struct rge_tx_desc *d = NULL;
    468 	struct rge_txq *txq;
    469 	bus_dmamap_t txmap;
    470 	uint32_t cmdsts, cflags = 0;
    471 	int cur, error, i, last, nsegs;
    472 
    473 #if notyet
    474 	/*
    475 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
    476 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
    477 	 * take affect.
    478 	 */
    479 	if ((m->m_pkthdr.csum_flags &
    480 	    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
    481 		cflags |= RGE_TDEXTSTS_IPCSUM;
    482 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
    483 			cflags |= RGE_TDEXTSTS_TCPCSUM;
    484 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
    485 			cflags |= RGE_TDEXTSTS_UDPCSUM;
    486 	}
    487 #endif
    488 
    489 	txq = &sc->rge_ldata.rge_txq[idx];
    490 	txmap = txq->txq_dmamap;
    491 
    492 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
    493 	switch (error) {
    494 	case 0:
    495 		break;
    496 	case EFBIG: /* mbuf chain is too fragmented */
    497 		if (m_defrag(m, M_DONTWAIT) == 0 &&
    498 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
    499 		    BUS_DMA_NOWAIT) == 0)
    500 			break;
    501 
    502 		/* FALLTHROUGH */
    503 	default:
    504 		return (0);
    505 	}
    506 
    507 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
    508 	    BUS_DMASYNC_PREWRITE);
    509 
    510 	nsegs = txmap->dm_nsegs;
    511 
    512 	/* Set up hardware VLAN tagging. */
    513 	if (vlan_has_tag(m))
    514 		cflags |= bswap16(vlan_get_tag(m)) | RGE_TDEXTSTS_VTAG;
    515 
    516 	last = cur = idx;
    517 	cmdsts = RGE_TDCMDSTS_SOF;
    518 
    519 	for (i = 0; i < txmap->dm_nsegs; i++) {
    520 		d = &sc->rge_ldata.rge_tx_list[cur];
    521 
    522 		d->rge_extsts = htole32(cflags);
    523 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
    524 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
    525 
    526 		cmdsts |= txmap->dm_segs[i].ds_len;
    527 
    528 		if (cur == RGE_TX_LIST_CNT - 1)
    529 			cmdsts |= RGE_TDCMDSTS_EOR;
    530 
    531 		d->rge_cmdsts = htole32(cmdsts);
    532 
    533 		last = cur;
    534 		cmdsts = RGE_TDCMDSTS_OWN;
    535 		cur = RGE_NEXT_TX_DESC(cur);
    536 	}
    537 
    538 	/* Set EOF on the last descriptor. */
    539 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
    540 
    541 	/* Transfer ownership of packet to the chip. */
    542 	d = &sc->rge_ldata.rge_tx_list[idx];
    543 
    544 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
    545 
    546 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
    547 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
    548 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    549 
    550 	/* Update info of TX queue and descriptors. */
    551 	txq->txq_mbuf = m;
    552 	txq->txq_descidx = last;
    553 
    554 	return (nsegs);
    555 }
    556 
    557 int
    558 rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    559 {
    560 	struct rge_softc *sc = ifp->if_softc;
    561 	//struct ifreq *ifr = (struct ifreq *)data;
    562 	int s, error = 0;
    563 
    564 	s = splnet();
    565 
    566 	switch (cmd) {
    567 	case SIOCSIFFLAGS:
    568 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
    569 			break;
    570 		/* XXX set an ifflags callback and let ether_ioctl
    571 		 * handle all of this.
    572 		 */
    573 		if (ifp->if_flags & IFF_UP) {
    574 			if (ifp->if_flags & IFF_RUNNING)
    575 				error = ENETRESET;
    576 			else
    577 				rge_init(ifp);
    578 		} else {
    579 			if (ifp->if_flags & IFF_RUNNING)
    580 				rge_stop(ifp, 1);
    581 		}
    582 		break;
    583 	default:
    584 		error = ether_ioctl(ifp, cmd, data);
    585 	}
    586 
    587 	if (error == ENETRESET) {
    588 		if (ifp->if_flags & IFF_RUNNING)
    589 			rge_iff(sc);
    590 		error = 0;
    591 	}
    592 
    593 	splx(s);
    594 	return (error);
    595 }
    596 
    597 void
    598 rge_start(struct ifnet *ifp)
    599 {
    600 	struct rge_softc *sc = ifp->if_softc;
    601 	struct mbuf *m;
    602 	int free, idx, used;
    603 	int queued = 0;
    604 
    605 #define LINK_STATE_IS_UP(_s)    \
    606 	((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
    607 
    608 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
    609 		IFQ_PURGE(&ifp->if_snd);
    610 		return;
    611 	}
    612 
    613 	/* Calculate free space. */
    614 	idx = sc->rge_ldata.rge_txq_prodidx;
    615 	free = sc->rge_ldata.rge_txq_considx;
    616 	if (free <= idx)
    617 		free += RGE_TX_LIST_CNT;
    618 	free -= idx;
    619 
    620 	for (;;) {
    621 		if (RGE_TX_NSEGS >= free + 2) {
    622 			SET(ifp->if_flags, IFF_OACTIVE);
    623 			break;
    624 		}
    625 
    626 		IFQ_DEQUEUE(&ifp->if_snd, m);
    627 		if (m == NULL)
    628 			break;
    629 
    630 		used = rge_encap(sc, m, idx);
    631 		if (used == 0) {
    632 			m_freem(m);
    633 			continue;
    634 		}
    635 
    636 		KASSERT(used <= free);
    637 		free -= used;
    638 
    639 		bpf_mtap(ifp, m, BPF_D_OUT);
    640 
    641 		idx += used;
    642 		if (idx >= RGE_TX_LIST_CNT)
    643 			idx -= RGE_TX_LIST_CNT;
    644 
    645 		queued++;
    646 	}
    647 
    648 	if (queued == 0)
    649 		return;
    650 
    651 	/* Set a timeout in case the chip goes out to lunch. */
    652 	ifp->if_timer = 5;
    653 
    654 	sc->rge_ldata.rge_txq_prodidx = idx;
    655 	rge_txstart(sc);
    656 }
    657 
    658 void
    659 rge_watchdog(struct ifnet *ifp)
    660 {
    661 	struct rge_softc *sc = ifp->if_softc;
    662 
    663 	device_printf(sc->sc_dev, "watchdog timeout\n");
    664 	if_statinc(ifp, if_oerrors);
    665 
    666 	rge_init(ifp);
    667 }
    668 
    669 int
    670 rge_init(struct ifnet *ifp)
    671 {
    672 	struct rge_softc *sc = ifp->if_softc;
    673 	uint32_t val;
    674 	unsigned i;
    675 
    676 	rge_stop(ifp, 0);
    677 
    678 	/* Set MAC address. */
    679 	rge_set_macaddr(sc, CLLADDR(ifp->if_sadl));
    680 
    681 	/* Set Maximum frame size. */
    682 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
    683 
    684 	/* Initialize RX descriptors list. */
    685 	int error = rge_rx_list_init(sc);
    686 	if (error != 0) {
    687 		device_printf(sc->sc_dev,
    688 		    "init failed: no memory for RX buffers\n");
    689 		rge_stop(ifp, 1);
    690 		return error;
    691 	}
    692 
    693 	/* Initialize TX descriptors. */
    694 	rge_tx_list_init(sc);
    695 
    696 	/* Load the addresses of the RX and TX lists into the chip. */
    697 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
    698 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    699 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
    700 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    701 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
    702 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    703 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
    704 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    705 
    706 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    707 
    708 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
    709 	if (sc->rge_type == MAC_CFG2_8126)
    710 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, 0x08);
    711 	else
    712 		RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    713 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    714 	if (sc->rge_type != MAC_CFG2_8126)
    715 		RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
    716 
    717 	/* Clear interrupt moderation timer. */
    718 	for (i = 0; i < 64; i++)
    719 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
    720 
    721 	/* Set the initial RX and TX configurations. */
    722 	if (sc->rge_type == MAC_CFG2_8126)
    723 		RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG_8126);
    724 	else
    725 		RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
    726 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
    727 
    728 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
    729 	rge_write_csi(sc, 0x70c, val | 0x27000000);
    730 
    731 	/* Enable hardware optimization function. */
    732 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
    733 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
    734 
    735 	if (sc->rge_type == MAC_CFG2_8126) {
    736 		/* Disable L1 timeout. */
    737 		val = rge_read_csi(sc, 0x890) & ~0x00000001;
    738 		rge_write_csi(sc, 0x890, val);
    739 	} else
    740 		RGE_WRITE_2(sc, 0x0382, 0x221b);
    741 	RGE_WRITE_1(sc, 0x4500, 0);
    742 	RGE_WRITE_2(sc, 0x4800, 0);
    743 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
    744 
    745 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
    746 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
    747 
    748 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
    749 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
    750 
    751 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
    752 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
    753 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
    754 
    755 	if (sc->rge_type == MAC_CFG2_8126)
    756 		RGE_CLRBIT_1(sc, 0xd8, 0x02);
    757 
    758 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
    759 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3 ||
    760 	    sc->rge_type == MAC_CFG2_8126)
    761 		rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
    762 	else
    763 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
    764 
    765 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
    766 
    767 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3 ||
    768 	    sc->rge_type == MAC_CFG2_8126) {
    769 		val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
    770 		rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
    771 	} else
    772 		RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
    773 
    774 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
    775 
    776 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
    777 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
    778 
    779 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
    780 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
    781 
    782 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
    783 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
    784 
    785 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
    786 
    787 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
    788 
    789 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
    790 
    791 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
    792 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
    793 
    794 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
    795 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
    796 
    797 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
    798 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
    799 
    800 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
    801 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
    802 
    803 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
    804 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
    805 
    806 	val = rge_read_mac_ocp(sc, 0xe84c) & ~0x0040;
    807 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
    808 		rge_write_mac_ocp(sc, 0xe84c, 0x00c0);
    809 	else
    810 		rge_write_mac_ocp(sc, 0xe84c, 0x0080);
    811 
    812 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN);
    813 
    814 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
    815 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
    816 
    817 	/* Disable EEE plus. */
    818 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
    819 
    820 	if (sc->rge_type == MAC_CFG2_8126)
    821 		RGE_MAC_CLRBIT(sc, 0xea1c, 0x0304);
    822 	else
    823 		RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
    824 
    825 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
    826 	DELAY(1);
    827 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
    828 
    829 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
    830 
    831 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
    832 
    833 	if ((sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0)
    834 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
    835 	else
    836 		RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
    837 
    838 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
    839 
    840 	for (i = 0; i < 10; i++) {
    841 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
    842 			break;
    843 		DELAY(1000);
    844 	}
    845 
    846 	/* Disable RXDV gate. */
    847 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
    848 	DELAY(2000);
    849 
    850 	rge_ifmedia_upd(ifp);
    851 
    852 	/* Enable transmit and receive. */
    853 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
    854 
    855 	/* Program promiscuous mode and multicast filters. */
    856 	rge_iff(sc);
    857 
    858 	if (sc->rge_type == MAC_CFG2_8126)
    859 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, 0x08);
    860 	else
    861 		RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    862 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    863 
    864 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    865 
    866 	/* Enable interrupts. */
    867 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
    868 
    869 	ifp->if_flags |= IFF_RUNNING;
    870 	CLR(ifp->if_flags, IFF_OACTIVE);
    871 
    872 	callout_schedule(&sc->sc_timeout, 1);
    873 
    874 	return (0);
    875 }
    876 
    877 /*
    878  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
    879  */
    880 void
    881 rge_stop(struct ifnet *ifp, int disable)
    882 {
    883 	struct rge_softc *sc = ifp->if_softc;
    884 
    885 	callout_halt(&sc->sc_timeout, NULL);
    886 
    887 	ifp->if_timer = 0;
    888 	ifp->if_flags &= ~IFF_RUNNING;
    889 	sc->rge_timerintr = 0;
    890 
    891 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
    892 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
    893 	    RGE_RXCFG_ERRPKT);
    894 
    895 	RGE_WRITE_4(sc, RGE_IMR, 0);
    896 
    897 	/* Config interrupt type for RTL8126. */
    898 	if (sc->rge_type == MAC_CFG2_8126)
    899 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, RGE_INT_CFG0_EN);
    900 
    901 	/* Clear timer interrupts. */
    902 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
    903 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
    904 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
    905 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
    906 
    907 	rge_reset(sc);
    908 
    909 //	intr_barrier(sc->sc_ih);
    910 //	ifq_barrier(&ifp->if_snd);
    911 /*	ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
    912 
    913 	if (sc->rge_head != NULL) {
    914 		m_freem(sc->rge_head);
    915 		sc->rge_head = sc->rge_tail = NULL;
    916 	}
    917 
    918 	rge_tx_list_fini(sc);
    919 	rge_rx_list_fini(sc);
    920 }
    921 
    922 /*
    923  * Set media options.
    924  */
    925 int
    926 rge_ifmedia_upd(struct ifnet *ifp)
    927 {
    928 	struct rge_softc *sc = ifp->if_softc;
    929 	struct ifmedia *ifm = &sc->sc_media;
    930 	int anar, gig, val;
    931 
    932 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    933 		return (EINVAL);
    934 
    935 	/* Disable Gigabit Lite. */
    936 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
    937 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
    938 	if (sc->rge_type == MAC_CFG2_8126)
    939 		RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0002);
    940 
    941 	val = rge_read_phy_ocp(sc, 0xa5d4);
    942 	val &= ~RGE_ADV_2500TFDX;
    943 	if (sc->rge_type == MAC_CFG2_8126)
    944 		val &= ~RGE_ADV_5000TFDX;
    945 
    946 	anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    947 	gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    948 
    949 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
    950 	case IFM_AUTO:
    951 		val |= (sc->rge_type != MAC_CFG2_8126) ?
    952 		    RGE_ADV_2500TFDX : (RGE_ADV_2500TFDX | RGE_ADV_5000TFDX);
    953 		break;
    954 	case IFM_5000_T:
    955 		val |= RGE_ADV_5000TFDX;
    956 		ifp->if_baudrate = IF_Gbps(5);
    957 		break;
    958 	case IFM_2500_T:
    959 		val |= RGE_ADV_2500TFDX;
    960 		ifp->if_baudrate = IF_Mbps(2500);
    961 		break;
    962 	case IFM_1000_T:
    963 		ifp->if_baudrate = IF_Gbps(1);
    964 		break;
    965 	case IFM_100_TX:
    966 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
    967 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
    968 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
    969 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
    970 		    ANAR_TX | ANAR_10_FD | ANAR_10;
    971 		ifp->if_baudrate = IF_Mbps(100);
    972 		break;
    973 	case IFM_10_T:
    974 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
    975 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
    976 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
    977 		    ANAR_10_FD | ANAR_10 : ANAR_10;
    978 		ifp->if_baudrate = IF_Mbps(10);
    979 		break;
    980 	default:
    981 		device_printf(sc->sc_dev,
    982 		    "unsupported media type\n");
    983 		return (EINVAL);
    984 	}
    985 
    986 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
    987 	rge_write_phy(sc, 0, MII_100T2CR, gig);
    988 	rge_write_phy_ocp(sc, 0xa5d4, val);
    989 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
    990 	    BMCR_STARTNEG);
    991 
    992 	return (0);
    993 }
    994 
    995 /*
    996  * Report current media status.
    997  */
    998 void
    999 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
   1000 {
   1001 	struct rge_softc *sc = ifp->if_softc;
   1002 	uint16_t status = 0;
   1003 
   1004 	ifmr->ifm_status = IFM_AVALID;
   1005 	ifmr->ifm_active = IFM_ETHER;
   1006 
   1007 	if (rge_get_link_status(sc)) {
   1008 		ifmr->ifm_status |= IFM_ACTIVE;
   1009 
   1010 		status = RGE_READ_2(sc, RGE_PHYSTAT);
   1011 		if ((status & RGE_PHYSTAT_FDX) ||
   1012 		    (status & (RGE_PHYSTAT_2500MBPS | RGE_PHYSTAT_5000MBPS)))
   1013 			ifmr->ifm_active |= IFM_FDX;
   1014 		else
   1015 			ifmr->ifm_active |= IFM_HDX;
   1016 
   1017 		if (status & RGE_PHYSTAT_10MBPS)
   1018 			ifmr->ifm_active |= IFM_10_T;
   1019 		else if (status & RGE_PHYSTAT_100MBPS)
   1020 			ifmr->ifm_active |= IFM_100_TX;
   1021 		else if (status & RGE_PHYSTAT_1000MBPS)
   1022 			ifmr->ifm_active |= IFM_1000_T;
   1023 		else if (status & RGE_PHYSTAT_2500MBPS)
   1024 			ifmr->ifm_active |= IFM_2500_T;
   1025 		else if (status & RGE_PHYSTAT_5000MBPS)
   1026 			ifmr->ifm_active |= IFM_5000_T;
   1027 	}
   1028 }
   1029 
   1030 /*
   1031  * Allocate memory for RX/TX rings.
   1032  *
   1033  * XXX There is no tear-down for this if it any part fails, so everything
   1034  * remains allocated.
   1035  */
   1036 int
   1037 rge_allocmem(struct rge_softc *sc)
   1038 {
   1039 	int error, i;
   1040 
   1041 	/* Allocate DMA'able memory for the TX ring. */
   1042 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
   1043 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
   1044 	if (error) {
   1045 		aprint_error_dev(sc->sc_dev, "can't create TX list map\n");
   1046 		return (error);
   1047 	}
   1048 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
   1049 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
   1050 	    BUS_DMA_NOWAIT);
   1051 	if (error) {
   1052 		aprint_error_dev(sc->sc_dev, "can't alloc TX list\n");
   1053 		return (error);
   1054 	}
   1055 
   1056 	/* Load the map for the TX ring. */
   1057 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1058 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
   1059 	    (void **) &sc->rge_ldata.rge_tx_list,
   1060 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
   1061 	if (error) {
   1062 		aprint_error_dev(sc->sc_dev, "can't map TX dma buffers\n");
   1063 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1064 		    sc->rge_ldata.rge_tx_listnseg);
   1065 		return (error);
   1066 	}
   1067 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
   1068 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1069 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
   1070 	if (error) {
   1071 		aprint_error_dev(sc->sc_dev, "can't load TX dma map\n");
   1072 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
   1073 		bus_dmamem_unmap(sc->sc_dmat,
   1074 		    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
   1075 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1076 		    sc->rge_ldata.rge_tx_listnseg);
   1077 		return (error);
   1078 	}
   1079 
   1080 	/* Create DMA maps for TX buffers. */
   1081 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
   1082 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
   1083 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
   1084 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
   1085 		if (error) {
   1086 			aprint_error_dev(sc->sc_dev, "can't create DMA map for TX\n");
   1087 			return (error);
   1088 		}
   1089 	}
   1090 
   1091 	/* Allocate DMA'able memory for the RX ring. */
   1092 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
   1093 	    RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
   1094 	if (error) {
   1095 		aprint_error_dev(sc->sc_dev, "can't create RX list map\n");
   1096 		return (error);
   1097 	}
   1098 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
   1099 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
   1100 	    BUS_DMA_NOWAIT);
   1101 	if (error) {
   1102 		aprint_error_dev(sc->sc_dev, "can't alloc RX list\n");
   1103 		return (error);
   1104 	}
   1105 
   1106 	/* Load the map for the RX ring. */
   1107 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1108 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
   1109 	    (void **) &sc->rge_ldata.rge_rx_list,
   1110 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
   1111 	if (error) {
   1112 		aprint_error_dev(sc->sc_dev, "can't map RX dma buffers\n");
   1113 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1114 		    sc->rge_ldata.rge_rx_listnseg);
   1115 		return (error);
   1116 	}
   1117 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
   1118 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1119 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
   1120 	if (error) {
   1121 		aprint_error_dev(sc->sc_dev, "can't load RX dma map\n");
   1122 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
   1123 		bus_dmamem_unmap(sc->sc_dmat,
   1124 		    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
   1125 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1126 		    sc->rge_ldata.rge_rx_listnseg);
   1127 		return (error);
   1128 	}
   1129 
   1130 	/*
   1131 	 * Create DMA maps for RX buffers.  Use BUS_DMA_ALLOCNOW to avoid any
   1132 	 * potential failure in bus_dmamap_load_mbuf() in the RX path.
   1133 	 */
   1134 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1135 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
   1136 		    RGE_JUMBO_FRAMELEN, 0, BUS_DMA_ALLOCNOW,
   1137 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
   1138 		if (error) {
   1139 			aprint_error_dev(sc->sc_dev, "can't create DMA map for RX\n");
   1140 			return (error);
   1141 		}
   1142 	}
   1143 
   1144 	return (error);
   1145 }
   1146 
   1147 /*
   1148  * Set an RX descriptor and sync it.
   1149  */
   1150 static void
   1151 rge_load_rxbuf(struct rge_softc *sc, int idx)
   1152 {
   1153 	struct rge_rx_desc *r = &sc->rge_ldata.rge_rx_list[idx];
   1154 	struct rge_rxq *rxq = &sc->rge_ldata.rge_rxq[idx];
   1155 	bus_dmamap_t rxmap = rxq->rxq_dmamap;
   1156 	uint32_t cmdsts;
   1157 
   1158 	cmdsts = rxmap->dm_segs[0].ds_len | RGE_RDCMDSTS_OWN;
   1159 	if (idx == RGE_RX_LIST_CNT - 1)
   1160 		cmdsts |= RGE_RDCMDSTS_EOR;
   1161 
   1162 	r->hi_qword0.rge_addr = htole64(rxmap->dm_segs[0].ds_addr);
   1163 	r->hi_qword1.rx_qword4.rge_extsts = 0;
   1164 	r->hi_qword1.rx_qword4.rge_cmdsts = htole32(cmdsts);
   1165 
   1166 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1167 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1168 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1169 }
   1170 
   1171 /*
   1172  * Initialize the RX descriptor and attach an mbuf cluster.
   1173  */
   1174 int
   1175 rge_newbuf(struct rge_softc *sc, int idx)
   1176 {
   1177 	struct mbuf *m;
   1178 	struct rge_rxq *rxq;
   1179 	bus_dmamap_t rxmap;
   1180 	int error __diagused;
   1181 
   1182 	m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
   1183 	if (m == NULL)
   1184 		return (ENOBUFS);
   1185 	MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
   1186 
   1187 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
   1188 
   1189 	rxq = &sc->rge_ldata.rge_rxq[idx];
   1190 	rxmap = rxq->rxq_dmamap;
   1191 
   1192 	if (rxq->rxq_mbuf != NULL)
   1193 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
   1194 
   1195 	/* This map was created with BUS_DMA_ALLOCNOW so should never fail. */
   1196 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT);
   1197 	KASSERTMSG(error == 0, "error=%d", error);
   1198 
   1199 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
   1200 	    BUS_DMASYNC_PREREAD);
   1201 
   1202 	/* Map the segments into RX descriptors. */
   1203 
   1204 	rxq->rxq_mbuf = m;
   1205 	rge_load_rxbuf(sc, idx);
   1206 
   1207 	return 0;
   1208 }
   1209 
   1210 static int
   1211 rge_rx_list_init(struct rge_softc *sc)
   1212 {
   1213 	unsigned i;
   1214 
   1215 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
   1216 
   1217 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1218 		sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
   1219 		if (rge_newbuf(sc, i) != 0) {
   1220 			rge_rx_list_fini(sc);
   1221 			return (ENOBUFS);
   1222 		}
   1223 	}
   1224 
   1225 	sc->rge_ldata.rge_rxq_prodidx = sc->rge_ldata.rge_rxq_considx = 0;
   1226 	sc->rge_head = sc->rge_tail = NULL;
   1227 
   1228 	return (0);
   1229 }
   1230 
   1231 static void
   1232 rge_rx_list_fini(struct rge_softc *sc)
   1233 {
   1234 	unsigned i;
   1235 
   1236 	/* Free the RX list buffers. */
   1237 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1238 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
   1239 			bus_dmamap_unload(sc->sc_dmat,
   1240 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
   1241 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
   1242 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
   1243 		}
   1244 	}
   1245 }
   1246 
   1247 static void
   1248 rge_tx_list_init(struct rge_softc *sc)
   1249 {
   1250 	unsigned i;
   1251 
   1252 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
   1253 
   1254 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
   1255 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
   1256 
   1257 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
   1258 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
   1259 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1260 
   1261 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
   1262 }
   1263 
   1264 static void
   1265 rge_tx_list_fini(struct rge_softc *sc)
   1266 {
   1267 	unsigned i;
   1268 
   1269 	/* Free the TX list buffers. */
   1270 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
   1271 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
   1272 			bus_dmamap_unload(sc->sc_dmat,
   1273 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
   1274 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
   1275 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
   1276 		}
   1277 	}
   1278 }
   1279 
   1280 int
   1281 rge_rxeof(struct rge_softc *sc)
   1282 {
   1283 	struct mbuf *m;
   1284 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1285 	struct rge_rx_desc *cur_rx;
   1286 	struct rge_rxq *rxq;
   1287 	uint32_t rxstat, extsts;
   1288 	int i, total_len, rx = 0;
   1289 
   1290 	for (i = sc->rge_ldata.rge_rxq_considx; ; i = RGE_NEXT_RX_DESC(i)) {
   1291 		/* Invalidate the descriptor memory. */
   1292 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1293 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1294 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1295 
   1296 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
   1297 
   1298 		if (RGE_OWN(cur_rx))
   1299 			break;
   1300 
   1301 		rxstat = letoh32(cur_rx->hi_qword1.rx_qword4.rge_cmdsts);
   1302 		extsts = letoh32(cur_rx->hi_qword1.rx_qword4.rge_extsts);
   1303 
   1304 		total_len = RGE_RXBYTES(cur_rx);
   1305 		rxq = &sc->rge_ldata.rge_rxq[i];
   1306 		m = rxq->rxq_mbuf;
   1307 		rx = 1;
   1308 
   1309 		/* Invalidate the RX mbuf. */
   1310 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
   1311 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1312 
   1313 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
   1314 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
   1315 			if_statinc(ifp, if_ierrors);
   1316 			rge_load_rxbuf(sc, i);
   1317 			continue;
   1318 		}
   1319 
   1320 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
   1321 			if_statinc(ifp, if_ierrors);
   1322 			/*
   1323 			 * If this is part of a multi-fragment packet,
   1324 			 * discard all the pieces.
   1325 			 */
   1326 			if (sc->rge_head != NULL) {
   1327 				m_freem(sc->rge_head);
   1328 				sc->rge_head = sc->rge_tail = NULL;
   1329 			}
   1330 			rge_load_rxbuf(sc, i);
   1331 			continue;
   1332 		}
   1333 
   1334 		/*
   1335 		 * If allocating a replacement mbuf fails,
   1336 		 * reload the current one.
   1337 		 */
   1338 		if (rge_newbuf(sc, i) != 0) {
   1339 			if_statinc(ifp, if_iqdrops);
   1340 			if (sc->rge_head != NULL) {
   1341 				m_freem(sc->rge_head);
   1342 				sc->rge_head = sc->rge_tail = NULL;
   1343 			}
   1344 			rge_load_rxbuf(sc, i);
   1345 			continue;
   1346 		}
   1347 
   1348 		m_set_rcvif(m, ifp);
   1349 		if (sc->rge_head != NULL) {
   1350 			m->m_len = total_len;
   1351 			/*
   1352 			 * Special case: if there's 4 bytes or less
   1353 			 * in this buffer, the mbuf can be discarded:
   1354 			 * the last 4 bytes is the CRC, which we don't
   1355 			 * care about anyway.
   1356 			 */
   1357 			if (m->m_len <= ETHER_CRC_LEN) {
   1358 				sc->rge_tail->m_len -=
   1359 				    (ETHER_CRC_LEN - m->m_len);
   1360 				m_freem(m);
   1361 			} else {
   1362 				m->m_len -= ETHER_CRC_LEN;
   1363 				m->m_flags &= ~M_PKTHDR;
   1364 				sc->rge_tail->m_next = m;
   1365 			}
   1366 			m = sc->rge_head;
   1367 			sc->rge_head = sc->rge_tail = NULL;
   1368 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
   1369 		} else
   1370 	#if 0
   1371 			m->m_pkthdr.len = m->m_len =
   1372 			    (total_len - ETHER_CRC_LEN);
   1373 	#else
   1374 		{
   1375 			m->m_pkthdr.len = m->m_len = total_len;
   1376 			m->m_flags |= M_HASFCS;
   1377 		}
   1378 	#endif
   1379 
   1380 #if notyet
   1381 		/* Check IP header checksum. */
   1382 		if (!(extsts & RGE_RDEXTSTS_IPCSUMERR) &&
   1383 		    (extsts & RGE_RDEXTSTS_IPV4))
   1384 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
   1385 
   1386 		/* Check TCP/UDP checksum. */
   1387 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
   1388 		    (((extsts & RGE_RDEXTSTS_TCPPKT) &&
   1389 		    !(extsts & RGE_RDEXTSTS_TCPCSUMERR)) ||
   1390 		    ((extsts & RGE_RDEXTSTS_UDPPKT) &&
   1391 		    !(extsts & RGE_RDEXTSTS_UDPCSUMERR))))
   1392 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
   1393 			    M_UDP_CSUM_IN_OK;
   1394 #endif
   1395 
   1396 		if (extsts & RGE_RDEXTSTS_VTAG) {
   1397 			vlan_set_tag(m,
   1398 			    bswap16(extsts & RGE_RDEXTSTS_VLAN_MASK));
   1399 		}
   1400 
   1401 		if_percpuq_enqueue(ifp->if_percpuq, m);
   1402 	}
   1403 
   1404 	sc->rge_ldata.rge_rxq_considx = i;
   1405 
   1406 	return (rx);
   1407 }
   1408 
   1409 int
   1410 rge_txeof(struct rge_softc *sc)
   1411 {
   1412 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1413 	struct rge_txq *txq;
   1414 	uint32_t txstat;
   1415 	int cons, idx, prod;
   1416 	int free = 0;
   1417 
   1418 	prod = sc->rge_ldata.rge_txq_prodidx;
   1419 	cons = sc->rge_ldata.rge_txq_considx;
   1420 
   1421 	while (prod != cons) {
   1422 		txq = &sc->rge_ldata.rge_txq[cons];
   1423 		idx = txq->txq_descidx;
   1424 
   1425 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1426 		    idx * sizeof(struct rge_tx_desc),
   1427 		    sizeof(struct rge_tx_desc),
   1428 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1429 
   1430 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
   1431 
   1432 		if (txstat & RGE_TDCMDSTS_OWN) {
   1433 			free = 2;
   1434 			break;
   1435 		}
   1436 
   1437 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
   1438 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1439 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
   1440 		m_freem(txq->txq_mbuf);
   1441 		txq->txq_mbuf = NULL;
   1442 
   1443 		net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   1444 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
   1445 			if_statinc_ref(ifp, nsr, if_collisions);
   1446 		if (txstat & RGE_TDCMDSTS_TXERR)
   1447 			if_statinc_ref(ifp, nsr, if_oerrors);
   1448 		else
   1449 			if_statinc_ref(ifp, nsr, if_opackets);
   1450 		IF_STAT_PUTREF(ifp);
   1451 
   1452 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1453 		    idx * sizeof(struct rge_tx_desc),
   1454 		    sizeof(struct rge_tx_desc),
   1455 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1456 
   1457 		cons = RGE_NEXT_TX_DESC(idx);
   1458 		free = 1;
   1459 	}
   1460 
   1461 	if (free == 0)
   1462 		return (0);
   1463 
   1464 	sc->rge_ldata.rge_txq_considx = cons;
   1465 
   1466 	if (free == 2)
   1467 		rge_txstart(sc);
   1468 
   1469 	CLR(ifp->if_flags, IFF_OACTIVE);
   1470 	ifp->if_timer = 0;
   1471 	if_schedule_deferred_start(ifp);
   1472 
   1473 	return (1);
   1474 }
   1475 
   1476 void
   1477 rge_reset(struct rge_softc *sc)
   1478 {
   1479 	int i;
   1480 
   1481 	/* Enable RXDV gate. */
   1482 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
   1483 	DELAY(2000);
   1484 
   1485 	for (i = 0; i < 3000; i++) {
   1486 		DELAY(50);
   1487 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
   1488 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
   1489 		    RGE_MCUCMD_TXFIFO_EMPTY))
   1490 			break;
   1491 	}
   1492 	if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
   1493 		for (i = 0; i < 3000; i++) {
   1494 			DELAY(50);
   1495 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
   1496 				break;
   1497 		}
   1498 	}
   1499 
   1500 	DELAY(2000);
   1501 
   1502 	/* Soft reset. */
   1503 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
   1504 
   1505 	for (i = 0; i < RGE_TIMEOUT; i++) {
   1506 		DELAY(100);
   1507 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
   1508 			break;
   1509 	}
   1510 	if (i == RGE_TIMEOUT)
   1511 		device_printf(sc->sc_dev, "reset never completed!\n");
   1512 }
   1513 
   1514 void
   1515 rge_iff(struct rge_softc *sc)
   1516 {
   1517 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1518 	struct ethercom *ec = &sc->sc_ec;
   1519 	struct ether_multi *enm;
   1520 	struct ether_multistep step;
   1521 	uint32_t hashes[2];
   1522 	uint32_t rxfilt;
   1523 	int h = 0;
   1524 
   1525 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
   1526 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
   1527 	ifp->if_flags &= ~IFF_ALLMULTI;
   1528 
   1529 	/*
   1530 	 * Always accept frames destined to our station address.
   1531 	 * Always accept broadcast frames.
   1532 	 */
   1533 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
   1534 
   1535 	if (ifp->if_flags & IFF_PROMISC) {
   1536  allmulti:
   1537 		ifp->if_flags |= IFF_ALLMULTI;
   1538 		rxfilt |= RGE_RXCFG_MULTI;
   1539 		if (ifp->if_flags & IFF_PROMISC)
   1540 			rxfilt |= RGE_RXCFG_ALLPHYS;
   1541 		hashes[0] = hashes[1] = 0xffffffff;
   1542 	} else {
   1543 		rxfilt |= RGE_RXCFG_MULTI;
   1544 		/* Program new filter. */
   1545 		memset(hashes, 0, sizeof(hashes));
   1546 
   1547 		ETHER_LOCK(ec);
   1548 		ETHER_FIRST_MULTI(step, ec, enm);
   1549 		while (enm != NULL) {
   1550 			if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1551 			    ETHER_ADDR_LEN) != 0) {
   1552 			    	ETHER_UNLOCK(ec);
   1553 				goto allmulti;
   1554 			}
   1555 			h = ether_crc32_be(enm->enm_addrlo,
   1556 			    ETHER_ADDR_LEN) >> 26;
   1557 
   1558 			if (h < 32)
   1559 				hashes[0] |= (1U << h);
   1560 			else
   1561 				hashes[1] |= (1U << (h - 32));
   1562 
   1563 			ETHER_NEXT_MULTI(step, enm);
   1564 		}
   1565 		ETHER_UNLOCK(ec);
   1566 	}
   1567 
   1568 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
   1569 	RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
   1570 	RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
   1571 }
   1572 
   1573 void
   1574 rge_set_phy_power(struct rge_softc *sc, int on)
   1575 {
   1576 	int i;
   1577 
   1578 	if (on) {
   1579 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
   1580 
   1581 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
   1582 
   1583 		for (i = 0; i < RGE_TIMEOUT; i++) {
   1584 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
   1585 				break;
   1586 			DELAY(1000);
   1587 		}
   1588 	} else {
   1589 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
   1590 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
   1591 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
   1592 	}
   1593 }
   1594 
   1595 void
   1596 rge_phy_config(struct rge_softc *sc)
   1597 {
   1598 	/* Read microcode version. */
   1599 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1600 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
   1601 
   1602 	switch (sc->rge_type) {
   1603 	case MAC_CFG2_8126:
   1604 		rge_phy_config_mac_cfg2_8126(sc);
   1605 		break;
   1606 	case MAC_CFG2:
   1607 		rge_phy_config_mac_cfg2(sc);
   1608 		break;
   1609 	case MAC_CFG3:
   1610 		rge_phy_config_mac_cfg3(sc);
   1611 		break;
   1612 	case MAC_CFG4:
   1613 		rge_phy_config_mac_cfg4(sc);
   1614 		break;
   1615 	case MAC_CFG5:
   1616 		rge_phy_config_mac_cfg5(sc);
   1617 		break;
   1618 	default:
   1619 		break;	/* Can't happen. */
   1620 	}
   1621 
   1622 	rge_write_phy(sc, 0x0a5b, 0x12,
   1623 	    rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
   1624 
   1625 	/* Disable EEE. */
   1626 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
   1627 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
   1628 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
   1629 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
   1630 	}
   1631 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
   1632 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
   1633 	if (sc->rge_type == MAC_CFG2_8126)
   1634 		RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0002);
   1635 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
   1636 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
   1637 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
   1638 
   1639 	/* Disable advanced EEE. */
   1640 	if (sc->rge_type != MAC_CFG2_8126)
   1641 		rge_patch_phy_mcu(sc, 1);
   1642 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
   1643 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
   1644 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
   1645 	if (sc->rge_type != MAC_CFG2_8126)
   1646 		rge_patch_phy_mcu(sc, 0);
   1647 }
   1648 
   1649 void
   1650 rge_phy_config_mac_cfg2_8126(struct rge_softc *sc)
   1651 {
   1652 	uint16_t val;
   1653 	int i;
   1654 	static const uint16_t mac_cfg2_a438_value[] =
   1655 	    { 0x0044, 0x00a8, 0x00d6, 0x00ec, 0x00f6, 0x00fc, 0x00fe,
   1656 	      0x00fe, 0x00bc, 0x0058, 0x002a, 0x003f, 0x3f02, 0x023c,
   1657 	      0x3b0a, 0x1c00, 0x0000, 0x0000, 0x0000, 0x0000 };
   1658 
   1659 	static const uint16_t mac_cfg2_b87e_value[] =
   1660 	    { 0x03ed, 0x03ff, 0x0009, 0x03fe, 0x000b, 0x0021, 0x03f7,
   1661 	      0x03b8, 0x03e0, 0x0049, 0x0049, 0x03e0, 0x03b8, 0x03f7,
   1662 	      0x0021, 0x000b, 0x03fe, 0x0009, 0x03ff, 0x03ed, 0x000e,
   1663 	      0x03fe, 0x03ed, 0x0006, 0x001a, 0x03f1, 0x03d8, 0x0023,
   1664 	      0x0054, 0x0322, 0x00dd, 0x03ab, 0x03dc, 0x0027, 0x000e,
   1665 	      0x03e5, 0x03f9, 0x0012, 0x0001, 0x03f1 };
   1666 
   1667 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_8126_MCODE_VER);
   1668 
   1669 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1670 	rge_write_phy_ocp(sc, 0xa436, 0x80bf);
   1671 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1672 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
   1673 	rge_write_phy_ocp(sc, 0xa436, 0x80cd);
   1674 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1675 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
   1676 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
   1677 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1678 	rge_write_phy_ocp(sc, 0xa438, val | 0xc800);
   1679 	rge_write_phy_ocp(sc, 0xa436, 0x80d4);
   1680 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1681 	rge_write_phy_ocp(sc, 0xa438, val | 0xc800);
   1682 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
   1683 	rge_write_phy_ocp(sc, 0xa438, 0x10cc);
   1684 	rge_write_phy_ocp(sc, 0xa436, 0x80e5);
   1685 	rge_write_phy_ocp(sc, 0xa438, 0x4f0c);
   1686 	rge_write_phy_ocp(sc, 0xa436, 0x8387);
   1687 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1688 	rge_write_phy_ocp(sc, 0xa438, val | 0x4700);
   1689 	val = rge_read_phy_ocp(sc, 0xa80c) & ~0x00c0;
   1690 	rge_write_phy_ocp(sc, 0xa80c, val | 0x0080);
   1691 	RGE_PHY_CLRBIT(sc, 0xac90, 0x0010);
   1692 	RGE_PHY_CLRBIT(sc, 0xad2c, 0x8000);
   1693 	rge_write_phy_ocp(sc, 0xb87c, 0x8321);
   1694 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1695 	rge_write_phy_ocp(sc, 0xb87e, val | 0x1100);
   1696 	RGE_PHY_SETBIT(sc, 0xacf8, 0x000c);
   1697 	rge_write_phy_ocp(sc, 0xa436, 0x8183);
   1698 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1699 	rge_write_phy_ocp(sc, 0xa438, val | 0x5900);
   1700 	RGE_PHY_SETBIT(sc, 0xad94, 0x0020);
   1701 	RGE_PHY_CLRBIT(sc, 0xa654, 0x0800);
   1702 	RGE_PHY_SETBIT(sc, 0xb648, 0x4000);
   1703 	rge_write_phy_ocp(sc, 0xb87c, 0x839e);
   1704 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1705 	rge_write_phy_ocp(sc, 0xb87e, val | 0x2f00);
   1706 	rge_write_phy_ocp(sc, 0xb87c, 0x83f2);
   1707 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1708 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
   1709 	RGE_PHY_SETBIT(sc, 0xada0, 0x0002);
   1710 	rge_write_phy_ocp(sc, 0xb87c, 0x80f3);
   1711 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1712 	rge_write_phy_ocp(sc, 0xb87e, val | 0x9900);
   1713 	rge_write_phy_ocp(sc, 0xb87c, 0x8126);
   1714 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1715 	rge_write_phy_ocp(sc, 0xb87e, val | 0xc100);
   1716 	rge_write_phy_ocp(sc, 0xb87c, 0x893a);
   1717 	rge_write_phy_ocp(sc, 0xb87e, 0x8080);
   1718 	rge_write_phy_ocp(sc, 0xb87c, 0x8647);
   1719 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1720 	rge_write_phy_ocp(sc, 0xb87e, val | 0xe600);
   1721 	rge_write_phy_ocp(sc, 0xb87c, 0x862c);
   1722 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1723 	rge_write_phy_ocp(sc, 0xb87e, val | 0x1200);
   1724 	rge_write_phy_ocp(sc, 0xb87c, 0x864a);
   1725 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1726 	rge_write_phy_ocp(sc, 0xb87e, val | 0xe600);
   1727 	rge_write_phy_ocp(sc, 0xb87c, 0x80a0);
   1728 	rge_write_phy_ocp(sc, 0xb87e, 0xbcbc);
   1729 	rge_write_phy_ocp(sc, 0xb87c, 0x805e);
   1730 	rge_write_phy_ocp(sc, 0xb87e, 0xbcbc);
   1731 	rge_write_phy_ocp(sc, 0xb87c, 0x8056);
   1732 	rge_write_phy_ocp(sc, 0xb87e, 0x3077);
   1733 	rge_write_phy_ocp(sc, 0xb87c, 0x8058);
   1734 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1735 	rge_write_phy_ocp(sc, 0xb87e, val | 0x5a00);
   1736 	rge_write_phy_ocp(sc, 0xb87c, 0x8098);
   1737 	rge_write_phy_ocp(sc, 0xb87e, 0x3077);
   1738 	rge_write_phy_ocp(sc, 0xb87c, 0x809a);
   1739 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1740 	rge_write_phy_ocp(sc, 0xb87e, val | 0x5a00);
   1741 	rge_write_phy_ocp(sc, 0xb87c, 0x8052);
   1742 	rge_write_phy_ocp(sc, 0xb87e, 0x3733);
   1743 	rge_write_phy_ocp(sc, 0xb87c, 0x8094);
   1744 	rge_write_phy_ocp(sc, 0xb87e, 0x3733);
   1745 	rge_write_phy_ocp(sc, 0xb87c, 0x807f);
   1746 	rge_write_phy_ocp(sc, 0xb87e, 0x7c75);
   1747 	rge_write_phy_ocp(sc, 0xb87c, 0x803d);
   1748 	rge_write_phy_ocp(sc, 0xb87e, 0x7c75);
   1749 	rge_write_phy_ocp(sc, 0xb87c, 0x8036);
   1750 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1751 	rge_write_phy_ocp(sc, 0xb87e, val | 0x3000);
   1752 	rge_write_phy_ocp(sc, 0xb87c, 0x8078);
   1753 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1754 	rge_write_phy_ocp(sc, 0xb87e, val | 0x3000);
   1755 	rge_write_phy_ocp(sc, 0xb87c, 0x8031);
   1756 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1757 	rge_write_phy_ocp(sc, 0xb87e, val | 0x3300);
   1758 	rge_write_phy_ocp(sc, 0xb87c, 0x8073);
   1759 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1760 	rge_write_phy_ocp(sc, 0xb87e, val | 0x3300);
   1761 	val = rge_read_phy_ocp(sc, 0xae06) & ~0xfc00;
   1762 	rge_write_phy_ocp(sc, 0xae06, val | 0x7c00);
   1763 	rge_write_phy_ocp(sc, 0xb87c, 0x89D1);
   1764 	rge_write_phy_ocp(sc, 0xb87e, 0x0004);
   1765 	rge_write_phy_ocp(sc, 0xa436, 0x8fbd);
   1766 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1767 	rge_write_phy_ocp(sc, 0xa438, val | 0x0a00);
   1768 	rge_write_phy_ocp(sc, 0xa436, 0x8fbe);
   1769 	rge_write_phy_ocp(sc, 0xa438, 0x0d09);
   1770 	rge_write_phy_ocp(sc, 0xb87c, 0x89cd);
   1771 	rge_write_phy_ocp(sc, 0xb87e, 0x0f0f);
   1772 	rge_write_phy_ocp(sc, 0xb87c, 0x89cf);
   1773 	rge_write_phy_ocp(sc, 0xb87e, 0x0f0f);
   1774 	rge_write_phy_ocp(sc, 0xb87c, 0x83a4);
   1775 	rge_write_phy_ocp(sc, 0xb87e, 0x6600);
   1776 	rge_write_phy_ocp(sc, 0xb87c, 0x83a6);
   1777 	rge_write_phy_ocp(sc, 0xb87e, 0x6601);
   1778 	rge_write_phy_ocp(sc, 0xb87c, 0x83c0);
   1779 	rge_write_phy_ocp(sc, 0xb87e, 0x6600);
   1780 	rge_write_phy_ocp(sc, 0xb87c, 0x83c2);
   1781 	rge_write_phy_ocp(sc, 0xb87e, 0x6601);
   1782 	rge_write_phy_ocp(sc, 0xb87c, 0x8414);
   1783 	rge_write_phy_ocp(sc, 0xb87e, 0x6600);
   1784 	rge_write_phy_ocp(sc, 0xb87c, 0x8416);
   1785 	rge_write_phy_ocp(sc, 0xb87e, 0x6601);
   1786 	rge_write_phy_ocp(sc, 0xb87c, 0x83f8);
   1787 	rge_write_phy_ocp(sc, 0xb87e, 0x6600);
   1788 	rge_write_phy_ocp(sc, 0xb87c, 0x83fa);
   1789 	rge_write_phy_ocp(sc, 0xb87e, 0x6601);
   1790 
   1791 	rge_patch_phy_mcu(sc, 1);
   1792 	val = rge_read_phy_ocp(sc, 0xbd96) & ~0x1f00;
   1793 	rge_write_phy_ocp(sc, 0xbd96, val | 0x1000);
   1794 	val = rge_read_phy_ocp(sc, 0xbf1c) & ~0x0007;
   1795 	rge_write_phy_ocp(sc, 0xbf1c, val | 0x0007);
   1796 	RGE_PHY_CLRBIT(sc, 0xbfbe, 0x8000);
   1797 	val = rge_read_phy_ocp(sc, 0xbf40) & ~0x0380;
   1798 	rge_write_phy_ocp(sc, 0xbf40, val | 0x0280);
   1799 	val = rge_read_phy_ocp(sc, 0xbf90) & ~0x0080;
   1800 	rge_write_phy_ocp(sc, 0xbf90, val | 0x0060);
   1801 	val = rge_read_phy_ocp(sc, 0xbf90) & ~0x0010;
   1802 	rge_write_phy_ocp(sc, 0xbf90, val | 0x000c);
   1803 	rge_patch_phy_mcu(sc, 0);
   1804 
   1805 	rge_write_phy_ocp(sc, 0xa436, 0x843b);
   1806 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1807 	rge_write_phy_ocp(sc, 0xa438, val | 0x2000);
   1808 	rge_write_phy_ocp(sc, 0xa436, 0x843d);
   1809 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1810 	rge_write_phy_ocp(sc, 0xa438, val | 0x2000);
   1811 	RGE_PHY_CLRBIT(sc, 0xb516, 0x007f);
   1812 	RGE_PHY_CLRBIT(sc, 0xbf80, 0x0030);
   1813 
   1814 	rge_write_phy_ocp(sc, 0xa436, 0x8188);
   1815 	for (i = 0; i < 11; i++)
   1816 		rge_write_phy_ocp(sc, 0xa438, mac_cfg2_a438_value[i]);
   1817 
   1818 	rge_write_phy_ocp(sc, 0xb87c, 0x8015);
   1819 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1820 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
   1821 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffd);
   1822 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1823 	rge_write_phy_ocp(sc, 0xb87e, val | 0);
   1824 	rge_write_phy_ocp(sc, 0xb87c, 0x8fff);
   1825 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1826 	rge_write_phy_ocp(sc, 0xb87e, val | 0x7f00);
   1827 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffb);
   1828 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1829 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1830 	rge_write_phy_ocp(sc, 0xb87c, 0x8fe9);
   1831 	rge_write_phy_ocp(sc, 0xb87e, 0x0002);
   1832 	rge_write_phy_ocp(sc, 0xb87c, 0x8fef);
   1833 	rge_write_phy_ocp(sc, 0xb87e, 0x00a5);
   1834 	rge_write_phy_ocp(sc, 0xb87c, 0x8ff1);
   1835 	rge_write_phy_ocp(sc, 0xb87e, 0x0106);
   1836 	rge_write_phy_ocp(sc, 0xb87c, 0x8fe1);
   1837 	rge_write_phy_ocp(sc, 0xb87e, 0x0102);
   1838 	rge_write_phy_ocp(sc, 0xb87c, 0x8fe3);
   1839 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1840 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0400);
   1841 	RGE_PHY_SETBIT(sc, 0xa654, 0x0800);
   1842 	RGE_PHY_CLRBIT(sc, 0xa654, 0x0003);
   1843 	rge_write_phy_ocp(sc, 0xac3a, 0x5851);
   1844 	val = rge_read_phy_ocp(sc, 0xac3c) & ~0xd000;
   1845 	rge_write_phy_ocp(sc, 0xac3c, val | 0x2000);
   1846 	val = rge_read_phy_ocp(sc, 0xac42) & ~0x0200;
   1847 	rge_write_phy_ocp(sc, 0xac42, val | 0x01c0);
   1848 	RGE_PHY_CLRBIT(sc, 0xac3e, 0xe000);
   1849 	RGE_PHY_CLRBIT(sc, 0xac42, 0x0038);
   1850 	val = rge_read_phy_ocp(sc, 0xac42) & ~0x0002;
   1851 	rge_write_phy_ocp(sc, 0xac42, val | 0x0005);
   1852 	rge_write_phy_ocp(sc, 0xac1a, 0x00db);
   1853 	rge_write_phy_ocp(sc, 0xade4, 0x01b5);
   1854 	RGE_PHY_CLRBIT(sc, 0xad9c, 0x0c00);
   1855 	rge_write_phy_ocp(sc, 0xb87c, 0x814b);
   1856 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1857 	rge_write_phy_ocp(sc, 0xb87e, val | 0x1100);
   1858 	rge_write_phy_ocp(sc, 0xb87c, 0x814d);
   1859 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1860 	rge_write_phy_ocp(sc, 0xb87e, val | 0x1100);
   1861 	rge_write_phy_ocp(sc, 0xb87c, 0x814f);
   1862 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1863 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0b00);
   1864 	rge_write_phy_ocp(sc, 0xb87c, 0x8142);
   1865 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1866 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1867 	rge_write_phy_ocp(sc, 0xb87c, 0x8144);
   1868 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1869 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1870 	rge_write_phy_ocp(sc, 0xb87c, 0x8150);
   1871 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1872 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1873 	rge_write_phy_ocp(sc, 0xb87c, 0x8118);
   1874 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1875 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
   1876 	rge_write_phy_ocp(sc, 0xb87c, 0x811a);
   1877 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1878 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
   1879 	rge_write_phy_ocp(sc, 0xb87c, 0x811c);
   1880 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1881 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
   1882 	rge_write_phy_ocp(sc, 0xb87c, 0x810f);
   1883 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1884 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1885 	rge_write_phy_ocp(sc, 0xb87c, 0x8111);
   1886 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1887 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1888 	rge_write_phy_ocp(sc, 0xb87c, 0x811d);
   1889 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1890 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0100);
   1891 	RGE_PHY_SETBIT(sc, 0xac36, 0x1000);
   1892 	RGE_PHY_CLRBIT(sc, 0xad1c, 0x0100);
   1893 	val = rge_read_phy_ocp(sc, 0xade8) & ~0xffc0;
   1894 	rge_write_phy_ocp(sc, 0xade8, val | 0x1400);
   1895 	rge_write_phy_ocp(sc, 0xb87c, 0x864b);
   1896 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1897 	rge_write_phy_ocp(sc, 0xb87e, val | 0x9d00);
   1898 
   1899 	rge_write_phy_ocp(sc, 0xa436, 0x8f97);
   1900 	for (; i < nitems(mac_cfg2_a438_value); i++)
   1901 		rge_write_phy_ocp(sc, 0xa438, mac_cfg2_a438_value[i]);
   1902 
   1903 	RGE_PHY_SETBIT(sc, 0xad9c, 0x0020);
   1904 	rge_write_phy_ocp(sc, 0xb87c, 0x8122);
   1905 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1906 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0c00);
   1907 
   1908 	rge_write_phy_ocp(sc, 0xb87c, 0x82c8);
   1909 	for (i = 0; i < 20; i++)
   1910 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg2_b87e_value[i]);
   1911 
   1912 	rge_write_phy_ocp(sc, 0xb87c, 0x80ef);
   1913 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1914 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0c00);
   1915 
   1916 	rge_write_phy_ocp(sc, 0xb87c, 0x82a0);
   1917 	for (; i < nitems(mac_cfg2_b87e_value); i++)
   1918 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg2_b87e_value[i]);
   1919 
   1920 	rge_write_phy_ocp(sc, 0xa436, 0x8018);
   1921 	RGE_PHY_SETBIT(sc, 0xa438, 0x2000);
   1922 	rge_write_phy_ocp(sc, 0xb87c, 0x8fe4);
   1923 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1924 	rge_write_phy_ocp(sc, 0xb87e, val | 0);
   1925 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xffc0;
   1926 	rge_write_phy_ocp(sc, 0xb54c, val | 0x3700);
   1927 }
   1928 
   1929 void
   1930 rge_phy_config_mac_cfg2(struct rge_softc *sc)
   1931 {
   1932 	uint16_t val;
   1933 	int i;
   1934 
   1935 	for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
   1936 		rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
   1937 		    rtl8125_mac_cfg2_ephy[i].val);
   1938 
   1939 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
   1940 
   1941 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
   1942 	rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
   1943 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   1944 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   1945 	rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
   1946 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
   1947 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   1948 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
   1949 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
   1950 	rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
   1951 	RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
   1952 	RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
   1953 	RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
   1954 
   1955 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   1956 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1957 	rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
   1958 	rge_write_phy_ocp(sc, 0xa436, 0x80eb);
   1959 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
   1960 	rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
   1961 	rge_write_phy_ocp(sc, 0xa436, 0x80f8);
   1962 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1963 	rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
   1964 	rge_write_phy_ocp(sc, 0xa436, 0x80f1);
   1965 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1966 	rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
   1967 	rge_write_phy_ocp(sc, 0xa436, 0x80fe);
   1968 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1969 	rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
   1970 	rge_write_phy_ocp(sc, 0xa436, 0x8102);
   1971 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1972 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   1973 	rge_write_phy_ocp(sc, 0xa436, 0x8105);
   1974 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1975 	rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
   1976 	rge_write_phy_ocp(sc, 0xa436, 0x8100);
   1977 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1978 	rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
   1979 	rge_write_phy_ocp(sc, 0xa436, 0x8104);
   1980 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1981 	rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
   1982 	rge_write_phy_ocp(sc, 0xa436, 0x8106);
   1983 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1984 	rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
   1985 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
   1986 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1987 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
   1988 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
   1989 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   1990 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
   1991 	RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
   1992 	val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
   1993 	rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
   1994 	rge_write_phy_ocp(sc, 0xa436, 0x819f);
   1995 	rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
   1996 	rge_write_phy_ocp(sc, 0xbc34, 0x5555);
   1997 	val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
   1998 	rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
   1999 	RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
   2000 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   2001 }
   2002 
   2003 void
   2004 rge_phy_config_mac_cfg3(struct rge_softc *sc)
   2005 {
   2006 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2007 	uint16_t val;
   2008 	int i;
   2009 	static const uint16_t mac_cfg3_a438_value[] =
   2010 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
   2011 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
   2012 
   2013 	static const uint16_t mac_cfg3_b88e_value[] =
   2014 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
   2015 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
   2016 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
   2017 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
   2018 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
   2019 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
   2020 
   2021 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
   2022 		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
   2023 		    rtl8125_mac_cfg3_ephy[i].val);
   2024 
   2025 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
   2026 	rge_write_ephy(sc, 0x002a, val | 0x3000);
   2027 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
   2028 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
   2029 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
   2030 	rge_write_ephy(sc, 0x0002, 0x6042);
   2031 	rge_write_ephy(sc, 0x0006, 0x0014);
   2032 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
   2033 	rge_write_ephy(sc, 0x006a, val | 0x3000);
   2034 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
   2035 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
   2036 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
   2037 	rge_write_ephy(sc, 0x0042, 0x6042);
   2038 	rge_write_ephy(sc, 0x0046, 0x0014);
   2039 
   2040 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
   2041 
   2042 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   2043 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   2044 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
   2045 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
   2046 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   2047 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
   2048 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
   2049 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
   2050 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
   2051 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
   2052 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
   2053 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
   2054 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
   2055 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
   2056 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
   2057 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
   2058 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
   2059 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
   2060 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
   2061 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
   2062 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
   2063 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
   2064 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2065 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
   2066 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
   2067 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2068 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
   2069 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
   2070 	    32);
   2071 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
   2072 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   2073 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
   2074 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   2075 
   2076 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
   2077 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
   2078 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
   2079 	for (i = 0; i < 26; i++)
   2080 		rge_write_phy_ocp(sc, 0xa438, 0);
   2081 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
   2082 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
   2083 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   2084 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
   2085 
   2086 	rge_patch_phy_mcu(sc, 1);
   2087 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
   2088 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
   2089 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
   2090 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
   2091 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
   2092 	}
   2093 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
   2094 	rge_patch_phy_mcu(sc, 0);
   2095 
   2096 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
   2097 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
   2098 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   2099 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
   2100 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
   2101 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
   2102 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
   2103 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
   2104 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
   2105 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   2106 }
   2107 
   2108 void
   2109 rge_phy_config_mac_cfg4(struct rge_softc *sc)
   2110 {
   2111 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2112 	uint16_t val;
   2113 	int i;
   2114 	static const uint16_t mac_cfg4_b87c_value[] =
   2115 	    { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
   2116 	      0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
   2117 	      0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
   2118 	      0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
   2119 	      0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
   2120 	      0x80b0, 0x0f31 };
   2121 
   2122 	for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
   2123 		rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
   2124 		    rtl8125_mac_cfg4_ephy[i].val);
   2125 
   2126 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
   2127 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
   2128 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
   2129 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
   2130 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
   2131 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
   2132 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
   2133 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
   2134 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
   2135 
   2136 	rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
   2137 
   2138 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   2139 	RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
   2140 	rge_write_phy_ocp(sc, 0xa436, 0x8fff);
   2141 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2142 	rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
   2143 	for (i = 0; i < 6; i++) {
   2144 		rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
   2145 		if (i < 3)
   2146 			rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
   2147 		else
   2148 			rge_write_phy_ocp(sc, 0xb87e, 0x147d);
   2149 	}
   2150 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
   2151 	rge_write_phy_ocp(sc, 0xb87e, 0x0907);
   2152 	val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
   2153 	rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
   2154 	val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
   2155 	rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
   2156 	rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
   2157 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
   2158 	rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
   2159 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
   2160 	rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
   2161 	rge_write_phy_ocp(sc, 0xb87e, 0x6077);
   2162 	rge_write_phy_ocp(sc, 0xb506, 0x01e7);
   2163 	rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
   2164 	rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
   2165 	rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
   2166 	rge_write_phy_ocp(sc, 0xac3c, 0x9240);
   2167 	rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
   2168 	rge_write_phy_ocp(sc, 0xacc6, 0x0707);
   2169 	rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
   2170 	rge_write_phy_ocp(sc, 0xad08, 0x0007);
   2171 	for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
   2172 		rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
   2173 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
   2174 	}
   2175 	RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
   2176 	RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
   2177 	rge_write_phy_ocp(sc, 0xb87c, 0x8141);
   2178 	rge_write_phy_ocp(sc, 0xb87e, 0x320e);
   2179 	rge_write_phy_ocp(sc, 0xb87c, 0x8153);
   2180 	rge_write_phy_ocp(sc, 0xb87e, 0x720e);
   2181 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
   2182 	rge_write_phy_ocp(sc, 0xb87c, 0x8529);
   2183 	rge_write_phy_ocp(sc, 0xb87e, 0x050e);
   2184 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
   2185 	    32);
   2186 	rge_write_phy_ocp(sc, 0xa436, 0x816c);
   2187 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
   2188 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
   2189 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
   2190 	rge_write_phy_ocp(sc, 0xa436, 0x8174);
   2191 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
   2192 	rge_write_phy_ocp(sc, 0xa436, 0x8178);
   2193 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
   2194 	rge_write_phy_ocp(sc, 0xa436, 0x817c);
   2195 	rge_write_phy_ocp(sc, 0xa438, 0x0719);
   2196 	rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
   2197 	rge_write_phy_ocp(sc, 0xa438, 0x0400);
   2198 	rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
   2199 	rge_write_phy_ocp(sc, 0xa438, 0x0404);
   2200 	rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
   2201 	for (i = 0; i < 6; i++) {
   2202 		rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
   2203 		if (i == 2)
   2204 			rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
   2205 		else
   2206 			rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
   2207 	}
   2208 	rge_write_phy_ocp(sc, 0xb87c, 0x8145);
   2209 	rge_write_phy_ocp(sc, 0xb87e, 0x370e);
   2210 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
   2211 	rge_write_phy_ocp(sc, 0xb87e, 0x770e);
   2212 	rge_write_phy_ocp(sc, 0xb87c, 0x8169);
   2213 	rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
   2214 	rge_write_phy_ocp(sc, 0xb87c, 0x817b);
   2215 	rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
   2216 	rge_write_phy_ocp(sc, 0xa436, 0x8217);
   2217 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2218 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   2219 	rge_write_phy_ocp(sc, 0xa436, 0x821a);
   2220 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2221 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   2222 	rge_write_phy_ocp(sc, 0xa436, 0x80da);
   2223 	rge_write_phy_ocp(sc, 0xa438, 0x0403);
   2224 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
   2225 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2226 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
   2227 	rge_write_phy_ocp(sc, 0xa436, 0x80b3);
   2228 	rge_write_phy_ocp(sc, 0xa438, 0x0384);
   2229 	rge_write_phy_ocp(sc, 0xa436, 0x80b7);
   2230 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
   2231 	rge_write_phy_ocp(sc, 0xa436, 0x80ba);
   2232 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2233 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
   2234 	rge_write_phy_ocp(sc, 0xa436, 0x80b5);
   2235 	rge_write_phy_ocp(sc, 0xa438, 0xf009);
   2236 	rge_write_phy_ocp(sc, 0xa436, 0x80bd);
   2237 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2238 	rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
   2239 	rge_write_phy_ocp(sc, 0xa436, 0x80c7);
   2240 	rge_write_phy_ocp(sc, 0xa438, 0xf083);
   2241 	rge_write_phy_ocp(sc, 0xa436, 0x80dd);
   2242 	rge_write_phy_ocp(sc, 0xa438, 0x03f0);
   2243 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
   2244 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2245 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
   2246 	rge_write_phy_ocp(sc, 0xa436, 0x80cb);
   2247 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
   2248 	rge_write_phy_ocp(sc, 0xa436, 0x80ce);
   2249 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2250 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
   2251 	rge_write_phy_ocp(sc, 0xa436, 0x80c9);
   2252 	rge_write_phy_ocp(sc, 0xa438, 0x8009);
   2253 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
   2254 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2255 	rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
   2256 	rge_write_phy_ocp(sc, 0xa436, 0x80a3);
   2257 	rge_write_phy_ocp(sc, 0xa438, 0x200a);
   2258 	rge_write_phy_ocp(sc, 0xa436, 0x80a5);
   2259 	rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
   2260 	rge_write_phy_ocp(sc, 0xa436, 0x809f);
   2261 	rge_write_phy_ocp(sc, 0xa438, 0x6073);
   2262 	rge_write_phy_ocp(sc, 0xa436, 0x80a1);
   2263 	rge_write_phy_ocp(sc, 0xa438, 0x000b);
   2264 	rge_write_phy_ocp(sc, 0xa436, 0x80a9);
   2265 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   2266 	rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
   2267 	rge_patch_phy_mcu(sc, 1);
   2268 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
   2269 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
   2270 	rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
   2271 	rge_write_phy_ocp(sc, 0xb890, 0x0000);
   2272 	rge_write_phy_ocp(sc, 0xb88e, 0xc240);
   2273 	rge_write_phy_ocp(sc, 0xb890, 0x0103);
   2274 	rge_write_phy_ocp(sc, 0xb88e, 0xc242);
   2275 	rge_write_phy_ocp(sc, 0xb890, 0x0507);
   2276 	rge_write_phy_ocp(sc, 0xb88e, 0xc244);
   2277 	rge_write_phy_ocp(sc, 0xb890, 0x090b);
   2278 	rge_write_phy_ocp(sc, 0xb88e, 0xc246);
   2279 	rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
   2280 	rge_write_phy_ocp(sc, 0xb88e, 0xc248);
   2281 	rge_write_phy_ocp(sc, 0xb890, 0x1012);
   2282 	rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
   2283 	rge_write_phy_ocp(sc, 0xb890, 0x1416);
   2284 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
   2285 	rge_patch_phy_mcu(sc, 0);
   2286 	RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
   2287 	RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
   2288 	rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
   2289 	rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
   2290 	rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
   2291 	rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
   2292 	rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
   2293 	RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
   2294 	rge_write_phy_ocp(sc, 0xa436, 0x817d);
   2295 	RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
   2296 }
   2297 
   2298 void
   2299 rge_phy_config_mac_cfg5(struct rge_softc *sc)
   2300 {
   2301 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2302 	uint16_t val;
   2303 	int i;
   2304 
   2305 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
   2306 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
   2307 		    rtl8125_mac_cfg5_ephy[i].val);
   2308 
   2309 	val = rge_read_ephy(sc, 0x0022) & ~0x0030;
   2310 	rge_write_ephy(sc, 0x0022, val | 0x0020);
   2311 	val = rge_read_ephy(sc, 0x0062) & ~0x0030;
   2312 	rge_write_ephy(sc, 0x0062, val | 0x0020);
   2313 
   2314 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
   2315 
   2316 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   2317 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
   2318 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
   2319 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
   2320 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
   2321 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
   2322 	    32);
   2323 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
   2324 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
   2325 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
   2326 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
   2327 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
   2328 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   2329 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
   2330 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
   2331 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
   2332 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
   2333 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
   2334 	for (i = 0; i < 10; i++) {
   2335 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
   2336 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
   2337 	}
   2338 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
   2339 	val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
   2340 	rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
   2341 }
   2342 
   2343 void
   2344 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
   2345 {
   2346 	if (sc->rge_mcodever != mcode_version) {
   2347 		int i;
   2348 
   2349 		rge_patch_phy_mcu(sc, 1);
   2350 
   2351 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
   2352 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   2353 			if (sc->rge_type == MAC_CFG2)
   2354 				rge_write_phy_ocp(sc, 0xa438, 0x8600);
   2355 			else
   2356 				rge_write_phy_ocp(sc, 0xa438, 0x8601);
   2357 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
   2358 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
   2359 
   2360 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
   2361 		}
   2362 
   2363 		if (sc->rge_type == MAC_CFG2) {
   2364 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
   2365 				rge_write_phy_ocp(sc,
   2366 				    rtl8125_mac_cfg2_mcu[i].reg,
   2367 				    rtl8125_mac_cfg2_mcu[i].val);
   2368 			}
   2369 		} else if (sc->rge_type == MAC_CFG3) {
   2370 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
   2371 				rge_write_phy_ocp(sc,
   2372 				    rtl8125_mac_cfg3_mcu[i].reg,
   2373 				    rtl8125_mac_cfg3_mcu[i].val);
   2374 			}
   2375 		} else if (sc->rge_type == MAC_CFG4) {
   2376 			for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
   2377 				rge_write_phy_ocp(sc,
   2378 				    rtl8125_mac_cfg4_mcu[i].reg,
   2379 				    rtl8125_mac_cfg4_mcu[i].val);
   2380 			}
   2381 		} else if (sc->rge_type == MAC_CFG5) {
   2382 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
   2383 				rge_write_phy_ocp(sc,
   2384 				    rtl8125_mac_cfg5_mcu[i].reg,
   2385 				    rtl8125_mac_cfg5_mcu[i].val);
   2386 			}
   2387 		} else if (sc->rge_type == MAC_CFG2_8126) {
   2388 			for (i = 0; i < nitems(rtl8126_mac_cfg2_mcu); i++) {
   2389 				rge_write_phy_ocp(sc,
   2390 				    rtl8126_mac_cfg2_mcu[i].reg,
   2391 				    rtl8126_mac_cfg2_mcu[i].val);
   2392 			}
   2393 		}
   2394 
   2395 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
   2396 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
   2397 
   2398 			rge_write_phy_ocp(sc, 0xa436, 0);
   2399 			rge_write_phy_ocp(sc, 0xa438, 0);
   2400 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
   2401 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   2402 			rge_write_phy_ocp(sc, 0xa438, 0);
   2403 		}
   2404 
   2405 		rge_patch_phy_mcu(sc, 0);
   2406 
   2407 		/* Write microcode version. */
   2408 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
   2409 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
   2410 	}
   2411 }
   2412 
   2413 void
   2414 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
   2415 {
   2416 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2417 	RGE_WRITE_4(sc, RGE_MAC0,
   2418 	    (uint32_t)addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
   2419 	RGE_WRITE_4(sc, RGE_MAC4,
   2420 	    addr[5] <<  8 | addr[4]);
   2421 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2422 }
   2423 
   2424 void
   2425 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
   2426 {
   2427 	int i;
   2428 
   2429 	for (i = 0; i < ETHER_ADDR_LEN; i++)
   2430 		addr[i] = RGE_READ_1(sc, RGE_ADDR0 + i);
   2431 }
   2432 
   2433 void
   2434 rge_hw_init(struct rge_softc *sc)
   2435 {
   2436 	int i;
   2437 
   2438 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2439 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
   2440 	if (sc->rge_type == MAC_CFG2_8126)
   2441 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, 0x08);
   2442 	else
   2443 		RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
   2444 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2445 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
   2446 
   2447 	/* Disable UPS. */
   2448 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
   2449 
   2450 	/* Configure MAC MCU. */
   2451 	rge_write_mac_ocp(sc, 0xfc38, 0);
   2452 
   2453 	for (i = 0xfc28; i < 0xfc38; i += 2)
   2454 		rge_write_mac_ocp(sc, i, 0);
   2455 
   2456 	DELAY(3000);
   2457 	rge_write_mac_ocp(sc, 0xfc26, 0);
   2458 
   2459 	if (sc->rge_type == MAC_CFG3) {
   2460 		for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
   2461 			rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
   2462 			    rtl8125_mac_bps[i].val);
   2463 		}
   2464 	} else if (sc->rge_type == MAC_CFG5) {
   2465 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
   2466 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
   2467 			    rtl8125b_mac_bps[i].val);
   2468 		}
   2469 	}
   2470 
   2471 	/* Disable PHY power saving. */
   2472 	if (sc->rge_type != MAC_CFG2_8126)
   2473 		rge_disable_phy_ocp_pwrsave(sc);
   2474 
   2475 	/* Set PCIe uncorrectable error status. */
   2476 	rge_write_csi(sc, 0x108,
   2477 	    rge_read_csi(sc, 0x108) | 0x00100000);
   2478 }
   2479 
   2480 void
   2481 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
   2482 {
   2483 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
   2484 		rge_patch_phy_mcu(sc, 1);
   2485 		rge_write_phy_ocp(sc, 0xc416, 0);
   2486 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
   2487 		rge_patch_phy_mcu(sc, 0);
   2488 	}
   2489 }
   2490 
   2491 void
   2492 rge_patch_phy_mcu(struct rge_softc *sc, int set)
   2493 {
   2494 	int i;
   2495 
   2496 	if (set)
   2497 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
   2498 	else
   2499 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
   2500 
   2501 	for (i = 0; i < 1000; i++) {
   2502 		if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
   2503 			break;
   2504 		DELAY(100);
   2505 	}
   2506 	if (i == 1000) {
   2507 		DPRINTF(("timeout waiting to patch phy mcu\n"));
   2508 		return;
   2509 	}
   2510 }
   2511 
   2512 void
   2513 rge_add_media_types(struct rge_softc *sc)
   2514 {
   2515 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
   2516 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
   2517 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
   2518 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
   2519 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
   2520 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
   2521 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
   2522 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
   2523 
   2524 	if (sc->rge_type == MAC_CFG2_8126) {
   2525 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T, 0, NULL);
   2526 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T | IFM_FDX,
   2527 		    0, NULL);
   2528 	}
   2529 }
   2530 
   2531 void
   2532 rge_config_imtype(struct rge_softc *sc, int imtype)
   2533 {
   2534 	switch (imtype) {
   2535 	case RGE_IMTYPE_NONE:
   2536 		sc->rge_intrs = RGE_INTRS;
   2537 		sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
   2538 		    RGE_ISR_RX_FIFO_OFLOW;
   2539 		sc->rge_tx_ack = RGE_ISR_TX_OK;
   2540 		break;
   2541 	case RGE_IMTYPE_SIM:
   2542 		sc->rge_intrs = RGE_INTRS_TIMER;
   2543 		sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
   2544 		sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
   2545 		break;
   2546 	default:
   2547 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
   2548 	}
   2549 }
   2550 
   2551 void
   2552 rge_disable_hw_im(struct rge_softc *sc)
   2553 {
   2554 	RGE_WRITE_2(sc, RGE_IM, 0);
   2555 }
   2556 
   2557 void
   2558 rge_disable_sim_im(struct rge_softc *sc)
   2559 {
   2560 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
   2561 	sc->rge_timerintr = 0;
   2562 }
   2563 
   2564 void
   2565 rge_setup_sim_im(struct rge_softc *sc)
   2566 {
   2567 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
   2568 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
   2569 	sc->rge_timerintr = 1;
   2570 }
   2571 
   2572 void
   2573 rge_setup_intr(struct rge_softc *sc, int imtype)
   2574 {
   2575 	rge_config_imtype(sc, imtype);
   2576 
   2577 	/* Enable interrupts. */
   2578 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
   2579 
   2580 	switch (imtype) {
   2581 	case RGE_IMTYPE_NONE:
   2582 		rge_disable_sim_im(sc);
   2583 		rge_disable_hw_im(sc);
   2584 		break;
   2585 	case RGE_IMTYPE_SIM:
   2586 		rge_disable_hw_im(sc);
   2587 		rge_setup_sim_im(sc);
   2588 		break;
   2589 	default:
   2590 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
   2591 	}
   2592 }
   2593 
   2594 void
   2595 rge_exit_oob(struct rge_softc *sc)
   2596 {
   2597 	int i;
   2598 
   2599 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
   2600 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
   2601 	    RGE_RXCFG_ERRPKT);
   2602 
   2603 	/* Disable RealWoW. */
   2604 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
   2605 
   2606 	rge_reset(sc);
   2607 
   2608 	/* Disable OOB. */
   2609 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
   2610 
   2611 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
   2612 
   2613 	for (i = 0; i < 10; i++) {
   2614 		DELAY(100);
   2615 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   2616 			break;
   2617 	}
   2618 
   2619 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
   2620 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
   2621 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
   2622 
   2623 	for (i = 0; i < 10; i++) {
   2624 		DELAY(100);
   2625 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   2626 			break;
   2627 	}
   2628 
   2629 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
   2630 		printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
   2631 		    device_xname(sc->sc_dev));
   2632 		for (i = 0; i < RGE_TIMEOUT; i++) {
   2633 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
   2634 				break;
   2635 			DELAY(1000);
   2636 		}
   2637 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
   2638 		if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
   2639 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
   2640 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
   2641 	}
   2642 }
   2643 
   2644 void
   2645 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
   2646 {
   2647 	int i;
   2648 
   2649 	RGE_WRITE_4(sc, RGE_CSIDR, val);
   2650 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
   2651 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
   2652 
   2653 	for (i = 0; i < 10; i++) {
   2654 		 DELAY(100);
   2655 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
   2656 			break;
   2657 	}
   2658 
   2659 	DELAY(20);
   2660 }
   2661 
   2662 uint32_t
   2663 rge_read_csi(struct rge_softc *sc, uint32_t reg)
   2664 {
   2665 	int i;
   2666 
   2667 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
   2668 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
   2669 
   2670 	for (i = 0; i < 10; i++) {
   2671 		 DELAY(100);
   2672 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
   2673 			break;
   2674 	}
   2675 
   2676 	DELAY(20);
   2677 
   2678 	return (RGE_READ_4(sc, RGE_CSIDR));
   2679 }
   2680 
   2681 void
   2682 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   2683 {
   2684 	uint32_t tmp;
   2685 
   2686 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   2687 	tmp += val;
   2688 	tmp |= RGE_MACOCP_BUSY;
   2689 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
   2690 }
   2691 
   2692 uint16_t
   2693 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
   2694 {
   2695 	uint32_t val;
   2696 
   2697 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   2698 	RGE_WRITE_4(sc, RGE_MACOCP, val);
   2699 
   2700 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
   2701 }
   2702 
   2703 void
   2704 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
   2705 {
   2706 	uint32_t tmp;
   2707 	int i;
   2708 
   2709 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
   2710 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
   2711 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
   2712 
   2713 	for (i = 0; i < 10; i++) {
   2714 		DELAY(100);
   2715 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
   2716 			break;
   2717 	}
   2718 
   2719 	DELAY(20);
   2720 }
   2721 
   2722 uint16_t
   2723 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
   2724 {
   2725 	uint32_t val;
   2726 	int i;
   2727 
   2728 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
   2729 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
   2730 
   2731 	for (i = 0; i < 10; i++) {
   2732 		DELAY(100);
   2733 		val = RGE_READ_4(sc, RGE_EPHYAR);
   2734 		if (val & RGE_EPHYAR_BUSY)
   2735 			break;
   2736 	}
   2737 
   2738 	DELAY(20);
   2739 
   2740 	return (val & RGE_EPHYAR_DATA_MASK);
   2741 }
   2742 
   2743 void
   2744 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
   2745 {
   2746 	uint16_t off, phyaddr;
   2747 
   2748 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
   2749 	phyaddr <<= 4;
   2750 
   2751 	off = addr ? reg : 0x10 + (reg % 8);
   2752 
   2753 	phyaddr += (off - 16) << 1;
   2754 
   2755 	rge_write_phy_ocp(sc, phyaddr, val);
   2756 }
   2757 
   2758 uint16_t
   2759 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
   2760 {
   2761 	uint16_t off, phyaddr;
   2762 
   2763 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
   2764 	phyaddr <<= 4;
   2765 
   2766 	off = addr ? reg : 0x10 + (reg % 8);
   2767 
   2768 	phyaddr += (off - 16) << 1;
   2769 
   2770 	return (rge_read_phy_ocp(sc, phyaddr));
   2771 }
   2772 
   2773 void
   2774 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   2775 {
   2776 	uint32_t tmp;
   2777 	int i;
   2778 
   2779 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   2780 	tmp |= RGE_PHYOCP_BUSY | val;
   2781 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
   2782 
   2783 	for (i = 0; i < RGE_TIMEOUT; i++) {
   2784 		DELAY(1);
   2785 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
   2786 			break;
   2787 	}
   2788 }
   2789 
   2790 uint16_t
   2791 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
   2792 {
   2793 	uint32_t val;
   2794 	int i;
   2795 
   2796 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   2797 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
   2798 
   2799 	for (i = 0; i < RGE_TIMEOUT; i++) {
   2800 		DELAY(1);
   2801 		val = RGE_READ_4(sc, RGE_PHYOCP);
   2802 		if (val & RGE_PHYOCP_BUSY)
   2803 			break;
   2804 	}
   2805 
   2806 	return (val & RGE_PHYOCP_DATA_MASK);
   2807 }
   2808 
   2809 int
   2810 rge_get_link_status(struct rge_softc *sc)
   2811 {
   2812 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
   2813 }
   2814 
   2815 void
   2816 rge_txstart(void *arg)
   2817 {
   2818 	struct rge_softc *sc = arg;
   2819 
   2820 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
   2821 }
   2822 
   2823 void
   2824 rge_tick(void *arg)
   2825 {
   2826 	struct rge_softc *sc = arg;
   2827 	int s;
   2828 
   2829 	s = splnet();
   2830 	rge_link_state(sc);
   2831 	splx(s);
   2832 
   2833 	callout_schedule(&sc->sc_timeout, hz);
   2834 }
   2835 
   2836 void
   2837 rge_link_state(struct rge_softc *sc)
   2838 {
   2839 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2840 	int link = LINK_STATE_DOWN;
   2841 
   2842 	if (rge_get_link_status(sc))
   2843 		link = LINK_STATE_UP;
   2844 
   2845 	if (ifp->if_link_state != link) { /* XXX not safe to access */
   2846 		if_link_state_change(ifp, link);
   2847 	}
   2848 }
   2849 
   2850 /* Module interface */
   2851 
   2852 MODULE(MODULE_CLASS_DRIVER, if_rge, "pci");
   2853 
   2854 #ifdef _MODULE
   2855 #include "ioconf.c"
   2856 #endif
   2857 
   2858 static int
   2859 if_rge_modcmd(modcmd_t cmd, void *opaque)
   2860 {
   2861 	int error = 0;
   2862 
   2863 	switch (cmd) {
   2864 	case MODULE_CMD_INIT:
   2865 #ifdef _MODULE
   2866 		error = config_init_component(cfdriver_ioconf_rge,
   2867 		    cfattach_ioconf_rge, cfdata_ioconf_rge);
   2868 #endif
   2869 		return error;
   2870 	case MODULE_CMD_FINI:
   2871 #ifdef _MODULE
   2872 		error = config_fini_component(cfdriver_ioconf_rge,
   2873 		    cfattach_ioconf_rge, cfdata_ioconf_rge);
   2874 #endif
   2875 		return error;
   2876 	default:
   2877 		return ENOTTY;
   2878 	}
   2879 }
   2880 
   2881