Home | History | Annotate | Line # | Download | only in pci
if_rge.c revision 1.31
      1 /*	$NetBSD: if_rge.c,v 1.31 2024/01/18 03:47:26 msaitoh Exp $	*/
      2 /*	$OpenBSD: if_rge.c,v 1.9 2020/12/12 11:48:53 jan Exp $	*/
      3 
      4 /*
      5  * Copyright (c) 2019, 2020 Kevin Lo <kevlo (at) openbsd.org>
      6  *
      7  * Permission to use, copy, modify, and distribute this software for any
      8  * purpose with or without fee is hereby granted, provided that the above
      9  * copyright notice and this permission notice appear in all copies.
     10  *
     11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     18  */
     19 
     20 #include <sys/cdefs.h>
     21 __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.31 2024/01/18 03:47:26 msaitoh Exp $");
     22 
     23 #if defined(_KERNEL_OPT)
     24 #include "opt_net_mpsafe.h"
     25 #endif
     26 
     27 #include <sys/types.h>
     28 
     29 #include <sys/param.h>
     30 #include <sys/systm.h>
     31 #include <sys/sockio.h>
     32 #include <sys/mbuf.h>
     33 #include <sys/kernel.h>
     34 #include <sys/socket.h>
     35 #include <sys/device.h>
     36 #include <sys/endian.h>
     37 #include <sys/callout.h>
     38 #include <sys/workqueue.h>
     39 
     40 #include <net/if.h>
     41 
     42 #include <net/if_dl.h>
     43 #include <net/if_ether.h>
     44 
     45 #include <net/if_media.h>
     46 
     47 #include <netinet/in.h>
     48 #include <net/if_ether.h>
     49 
     50 #include <net/bpf.h>
     51 
     52 #include <sys/bus.h>
     53 #include <machine/intr.h>
     54 
     55 #include <dev/mii/mii.h>
     56 
     57 #include <dev/pci/pcivar.h>
     58 #include <dev/pci/pcireg.h>
     59 #include <dev/pci/pcidevs.h>
     60 
     61 #include <dev/pci/if_rgereg.h>
     62 
     63 #ifdef __NetBSD__
     64 #define letoh32 	htole32
     65 #define nitems(x) 	__arraycount(x)
     66 
     67 static struct mbuf *
     68 MCLGETL(struct rge_softc *sc __unused, int how,
     69     u_int size)
     70 {
     71 	struct mbuf *m;
     72 
     73 	MGETHDR(m, how, MT_DATA);
     74 	if (m == NULL)
     75 		return NULL;
     76 
     77 	MEXTMALLOC(m, size, how);
     78 	if ((m->m_flags & M_EXT) == 0) {
     79 		m_freem(m);
     80 		return NULL;
     81 	}
     82 	return m;
     83 }
     84 
     85 #ifdef NET_MPSAFE
     86 #define 	RGE_MPSAFE	1
     87 #define 	CALLOUT_FLAGS	CALLOUT_MPSAFE
     88 #else
     89 #define 	CALLOUT_FLAGS	0
     90 #endif
     91 #endif
     92 
     93 #ifdef RGE_DEBUG
     94 #define DPRINTF(x)	do { if (rge_debug > 0) printf x; } while (0)
     95 int rge_debug = 0;
     96 #else
     97 #define DPRINTF(x)
     98 #endif
     99 
    100 static int		rge_match(device_t, cfdata_t, void *);
    101 static void		rge_attach(device_t, device_t, void *);
    102 int		rge_intr(void *);
    103 int		rge_encap(struct rge_softc *, struct mbuf *, int);
    104 int		rge_ioctl(struct ifnet *, u_long, void *);
    105 void		rge_start(struct ifnet *);
    106 void		rge_watchdog(struct ifnet *);
    107 int		rge_init(struct ifnet *);
    108 void		rge_stop(struct ifnet *, int);
    109 int		rge_ifmedia_upd(struct ifnet *);
    110 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
    111 int		rge_allocmem(struct rge_softc *);
    112 int		rge_newbuf(struct rge_softc *, int);
    113 static int	rge_rx_list_init(struct rge_softc *);
    114 static void	rge_rx_list_fini(struct rge_softc *);
    115 static void	rge_tx_list_init(struct rge_softc *);
    116 static void	rge_tx_list_fini(struct rge_softc *);
    117 int		rge_rxeof(struct rge_softc *);
    118 int		rge_txeof(struct rge_softc *);
    119 void		rge_reset(struct rge_softc *);
    120 void		rge_iff(struct rge_softc *);
    121 void		rge_set_phy_power(struct rge_softc *, int);
    122 void		rge_phy_config(struct rge_softc *);
    123 void		rge_phy_config_mac_cfg2(struct rge_softc *);
    124 void		rge_phy_config_mac_cfg3(struct rge_softc *);
    125 void		rge_phy_config_mac_cfg4(struct rge_softc *);
    126 void		rge_phy_config_mac_cfg5(struct rge_softc *);
    127 void		rge_phy_config_mcu(struct rge_softc *, uint16_t);
    128 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
    129 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
    130 void		rge_hw_init(struct rge_softc *);
    131 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
    132 void		rge_patch_phy_mcu(struct rge_softc *, int);
    133 void		rge_add_media_types(struct rge_softc *);
    134 void		rge_config_imtype(struct rge_softc *, int);
    135 void		rge_disable_hw_im(struct rge_softc *);
    136 void		rge_disable_sim_im(struct rge_softc *);
    137 void		rge_setup_sim_im(struct rge_softc *);
    138 void		rge_setup_intr(struct rge_softc *, int);
    139 void		rge_exit_oob(struct rge_softc *);
    140 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
    141 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
    142 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
    143 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
    144 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
    145 uint16_t	rge_read_ephy(struct rge_softc *, uint16_t);
    146 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
    147 uint16_t	rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
    148 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
    149 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
    150 int		rge_get_link_status(struct rge_softc *);
    151 void		rge_txstart(void *);
    152 void		rge_tick(void *);
    153 void		rge_link_state(struct rge_softc *);
    154 
    155 static const struct {
    156 	uint16_t reg;
    157 	uint16_t val;
    158 }  rtl8125_mac_cfg2_mcu[] = {
    159 	RTL8125_MAC_CFG2_MCU
    160 }, rtl8125_mac_cfg3_mcu[] = {
    161 	RTL8125_MAC_CFG3_MCU
    162 }, rtl8125_mac_cfg4_mcu[] = {
    163 	RTL8125_MAC_CFG4_MCU
    164 }, rtl8125_mac_cfg5_mcu[] = {
    165 	RTL8125_MAC_CFG5_MCU
    166 };
    167 
    168 CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
    169 		NULL, NULL); /* Sevan - detach function? */
    170 
    171 static const struct device_compatible_entry compat_data[] = {
    172 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000) },
    173 	{ .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125) },
    174 
    175 	PCI_COMPAT_EOL
    176 };
    177 
    178 static int
    179 rge_match(device_t parent, cfdata_t match, void *aux)
    180 {
    181 	struct pci_attach_args *pa =aux;
    182 
    183 	return pci_compatible_match(pa, compat_data);
    184 }
    185 
    186 void
    187 rge_attach(device_t parent, device_t self, void *aux)
    188 {
    189 	struct rge_softc *sc = device_private(self);
    190 	struct pci_attach_args *pa = aux;
    191 	pci_chipset_tag_t pc = pa->pa_pc;
    192 	pci_intr_handle_t *ihp;
    193 	char intrbuf[PCI_INTRSTR_LEN];
    194 	const char *intrstr = NULL;
    195 	struct ifnet *ifp;
    196 	pcireg_t reg;
    197 	uint32_t hwrev;
    198 	uint8_t eaddr[ETHER_ADDR_LEN];
    199 	int offset;
    200 	pcireg_t command;
    201 	const char *revstr;
    202 
    203 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
    204 
    205 	sc->sc_dev = self;
    206 
    207 	pci_aprint_devinfo(pa, "Ethernet controller");
    208 
    209 	/*
    210 	 * Map control/status registers.
    211 	 */
    212 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
    213 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
    214 	    NULL, &sc->rge_bsize)) {
    215 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
    216 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
    217 		    &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
    218 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
    219 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
    220 			    &sc->rge_bsize)) {
    221 				aprint_error(": can't map mem or i/o space\n");
    222 				return;
    223 			}
    224 		}
    225 	}
    226 
    227 	int counts[PCI_INTR_TYPE_SIZE] = {
    228  		[PCI_INTR_TYPE_INTX] = 1,
    229  		[PCI_INTR_TYPE_MSI] = 1,
    230  		[PCI_INTR_TYPE_MSIX] = 1,
    231  	};
    232 	int max_type = PCI_INTR_TYPE_MSIX;
    233 	/*
    234 	 * Allocate interrupt.
    235 	 */
    236 	if (pci_intr_alloc(pa, &ihp, counts, max_type) != 0) {
    237 		aprint_error(": couldn't map interrupt\n");
    238 		return;
    239 	}
    240 	switch (pci_intr_type(pc, ihp[0])) {
    241 	case PCI_INTR_TYPE_MSIX:
    242 	case PCI_INTR_TYPE_MSI:
    243 		sc->rge_flags |= RGE_FLAG_MSI;
    244 		break;
    245 	default:
    246 		break;
    247 	}
    248 	intrstr = pci_intr_string(pc, ihp[0], intrbuf, sizeof(intrbuf));
    249 	sc->sc_ih = pci_intr_establish_xname(pc, ihp[0], IPL_NET, rge_intr,
    250 	    sc, device_xname(sc->sc_dev));
    251 	if (sc->sc_ih == NULL) {
    252 		aprint_error_dev(sc->sc_dev, ": couldn't establish interrupt");
    253 		if (intrstr != NULL)
    254 			aprint_error(" at %s\n", intrstr);
    255 		aprint_error("\n");
    256 		return;
    257 	}
    258 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
    259 
    260 	if (pci_dma64_available(pa))
    261 		sc->sc_dmat = pa->pa_dmat64;
    262 	else
    263 		sc->sc_dmat = pa->pa_dmat;
    264 
    265 	sc->sc_pc = pa->pa_pc;
    266 	sc->sc_tag = pa->pa_tag;
    267 
    268 	/* Determine hardware revision */
    269 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
    270 	switch (hwrev) {
    271 	case 0x60800000:
    272 		sc->rge_type = MAC_CFG2;
    273 		revstr = "Z1";
    274 		break;
    275 	case 0x60900000:
    276 		sc->rge_type = MAC_CFG3;
    277 		revstr = "Z2";
    278 		break;
    279 	case 0x64000000:
    280 		sc->rge_type = MAC_CFG4;
    281 		revstr = "A";
    282 		break;
    283 	case 0x64100000:
    284 		sc->rge_type = MAC_CFG5;
    285 		revstr = "B";
    286 		break;
    287 	default:
    288 		aprint_error(": unknown version 0x%08x\n", hwrev);
    289 		return;
    290 	}
    291 
    292 	aprint_normal_dev(sc->sc_dev, "HW rev. %s\n", revstr);
    293 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
    294 
    295 	/*
    296 	 * PCI Express check.
    297 	 */
    298 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
    299 	    &offset, NULL)) {
    300 		/* Disable PCIe ASPM and ECPM. */
    301 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
    302 		    offset + PCIE_LCSR);
    303 		reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 |
    304 		    PCIE_LCSR_ENCLKPM);
    305 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
    306 		    reg);
    307 	}
    308 
    309 	rge_exit_oob(sc);
    310 	rge_hw_init(sc);
    311 
    312 	rge_get_macaddr(sc, eaddr);
    313 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
    314 	    ether_sprintf(eaddr));
    315 
    316 	memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
    317 
    318 	rge_set_phy_power(sc, 1);
    319 	rge_phy_config(sc);
    320 
    321 	if (rge_allocmem(sc))
    322 		return;
    323 
    324 	ifp = &sc->sc_ec.ec_if;
    325 	ifp->if_softc = sc;
    326 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    327 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    328 #ifdef RGE_MPSAFE
    329 	ifp->if_extflags = IFEF_MPSAFE;
    330 #endif
    331 	ifp->if_ioctl = rge_ioctl;
    332 	ifp->if_stop = rge_stop;
    333 	ifp->if_start = rge_start;
    334 	ifp->if_init = rge_init;
    335 	ifp->if_watchdog = rge_watchdog;
    336 	IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
    337 
    338 #if notyet
    339 	ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx |
    340 	    IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
    341 	    IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
    342 #endif
    343 
    344 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
    345 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
    346 
    347 	callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
    348 	callout_setfunc(&sc->sc_timeout, rge_tick, sc);
    349 
    350 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
    351 	command |= PCI_COMMAND_MASTER_ENABLE;
    352 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
    353 
    354 	/* Initialize ifmedia structures. */
    355 	sc->sc_ec.ec_ifmedia = &sc->sc_media;
    356 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
    357 	    rge_ifmedia_sts);
    358 	rge_add_media_types(sc);
    359 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
    360 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
    361 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
    362 
    363 	if_attach(ifp);
    364 	if_deferred_start_init(ifp, NULL);
    365 	ether_ifattach(ifp, eaddr);
    366 
    367 	if (pmf_device_register(self, NULL, NULL))
    368 		pmf_class_network_register(self, ifp);
    369 	else
    370 		aprint_error_dev(self, "couldn't establish power handler\n");
    371 }
    372 
    373 int
    374 rge_intr(void *arg)
    375 {
    376 	struct rge_softc *sc = arg;
    377 	struct ifnet *ifp = &sc->sc_ec.ec_if;
    378 	uint32_t status;
    379 	int claimed = 0, rx, tx;
    380 
    381 	if (!(ifp->if_flags & IFF_RUNNING))
    382 		return (0);
    383 
    384 	/* Disable interrupts. */
    385 	RGE_WRITE_4(sc, RGE_IMR, 0);
    386 
    387 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
    388 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
    389 			return (0);
    390 	}
    391 
    392 	status = RGE_READ_4(sc, RGE_ISR);
    393 	if (status)
    394 		RGE_WRITE_4(sc, RGE_ISR, status);
    395 
    396 	if (status & RGE_ISR_PCS_TIMEOUT)
    397 		claimed = 1;
    398 
    399 	rx = tx = 0;
    400 	if (status & sc->rge_intrs) {
    401 		if (status &
    402 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
    403 			rx |= rge_rxeof(sc);
    404 			claimed = 1;
    405 		}
    406 
    407 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
    408 			tx |= rge_txeof(sc);
    409 			claimed = 1;
    410 		}
    411 
    412 		if (status & RGE_ISR_SYSTEM_ERR) {
    413 			KERNEL_LOCK(1, NULL);
    414 			rge_init(ifp);
    415 			KERNEL_UNLOCK_ONE(NULL);
    416 			claimed = 1;
    417 		}
    418 	}
    419 
    420 	if (sc->rge_timerintr) {
    421 		if ((tx | rx) == 0) {
    422 			/*
    423 			 * Nothing needs to be processed, fallback
    424 			 * to use TX/RX interrupts.
    425 			 */
    426 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
    427 
    428 			/*
    429 			 * Recollect, mainly to avoid the possible
    430 			 * race introduced by changing interrupt
    431 			 * masks.
    432 			 */
    433 			rge_rxeof(sc);
    434 			rge_txeof(sc);
    435 		} else
    436 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
    437 	} else if (tx | rx) {
    438 		/*
    439 		 * Assume that using simulated interrupt moderation
    440 		 * (hardware timer based) could reduce the interrupt
    441 		 * rate.
    442 		 */
    443 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
    444 	}
    445 
    446 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
    447 
    448 	return (claimed);
    449 }
    450 
    451 int
    452 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
    453 {
    454 	struct rge_tx_desc *d = NULL;
    455 	struct rge_txq *txq;
    456 	bus_dmamap_t txmap;
    457 	uint32_t cmdsts, cflags = 0;
    458 	int cur, error, i, last, nsegs;
    459 
    460 #if notyet
    461 	/*
    462 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
    463 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
    464 	 * take affect.
    465 	 */
    466 	if ((m->m_pkthdr.csum_flags &
    467 	    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
    468 		cflags |= RGE_TDEXTSTS_IPCSUM;
    469 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
    470 			cflags |= RGE_TDEXTSTS_TCPCSUM;
    471 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
    472 			cflags |= RGE_TDEXTSTS_UDPCSUM;
    473 	}
    474 #endif
    475 
    476 	txq = &sc->rge_ldata.rge_txq[idx];
    477 	txmap = txq->txq_dmamap;
    478 
    479 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
    480 	switch (error) {
    481 	case 0:
    482 		break;
    483 	case EFBIG: /* mbuf chain is too fragmented */
    484 		if (m_defrag(m, M_DONTWAIT) == 0 &&
    485 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
    486 		    BUS_DMA_NOWAIT) == 0)
    487 			break;
    488 
    489 		/* FALLTHROUGH */
    490 	default:
    491 		return (0);
    492 	}
    493 
    494 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
    495 	    BUS_DMASYNC_PREWRITE);
    496 
    497 	nsegs = txmap->dm_nsegs;
    498 
    499 	/* Set up hardware VLAN tagging. */
    500 	if (vlan_has_tag(m))
    501 		cflags |= bswap16(vlan_get_tag(m)) | RGE_TDEXTSTS_VTAG;
    502 
    503 	last = cur = idx;
    504 	cmdsts = RGE_TDCMDSTS_SOF;
    505 
    506 	for (i = 0; i < txmap->dm_nsegs; i++) {
    507 		d = &sc->rge_ldata.rge_tx_list[cur];
    508 
    509 		d->rge_extsts = htole32(cflags);
    510 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
    511 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
    512 
    513 		cmdsts |= txmap->dm_segs[i].ds_len;
    514 
    515 		if (cur == RGE_TX_LIST_CNT - 1)
    516 			cmdsts |= RGE_TDCMDSTS_EOR;
    517 
    518 		d->rge_cmdsts = htole32(cmdsts);
    519 
    520 		last = cur;
    521 		cmdsts = RGE_TDCMDSTS_OWN;
    522 		cur = RGE_NEXT_TX_DESC(cur);
    523 	}
    524 
    525 	/* Set EOF on the last descriptor. */
    526 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
    527 
    528 	/* Transfer ownership of packet to the chip. */
    529 	d = &sc->rge_ldata.rge_tx_list[idx];
    530 
    531 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
    532 
    533 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
    534 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
    535 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    536 
    537 	/* Update info of TX queue and descriptors. */
    538 	txq->txq_mbuf = m;
    539 	txq->txq_descidx = last;
    540 
    541 	return (nsegs);
    542 }
    543 
    544 int
    545 rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    546 {
    547 	struct rge_softc *sc = ifp->if_softc;
    548 	//struct ifreq *ifr = (struct ifreq *)data;
    549 	int s, error = 0;
    550 
    551 	s = splnet();
    552 
    553 	switch (cmd) {
    554 	case SIOCSIFFLAGS:
    555 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
    556 			break;
    557 		/* XXX set an ifflags callback and let ether_ioctl
    558 		 * handle all of this.
    559 		 */
    560 		if (ifp->if_flags & IFF_UP) {
    561 			if (ifp->if_flags & IFF_RUNNING)
    562 				error = ENETRESET;
    563 			else
    564 				rge_init(ifp);
    565 		} else {
    566 			if (ifp->if_flags & IFF_RUNNING)
    567 				rge_stop(ifp, 1);
    568 		}
    569 		break;
    570 	default:
    571 		error = ether_ioctl(ifp, cmd, data);
    572 	}
    573 
    574 	if (error == ENETRESET) {
    575 		if (ifp->if_flags & IFF_RUNNING)
    576 			rge_iff(sc);
    577 		error = 0;
    578 	}
    579 
    580 	splx(s);
    581 	return (error);
    582 }
    583 
    584 void
    585 rge_start(struct ifnet *ifp)
    586 {
    587 	struct rge_softc *sc = ifp->if_softc;
    588 	struct mbuf *m;
    589 	int free, idx, used;
    590 	int queued = 0;
    591 
    592 #define LINK_STATE_IS_UP(_s)    \
    593 	((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
    594 
    595 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
    596 		IFQ_PURGE(&ifp->if_snd);
    597 		return;
    598 	}
    599 
    600 	/* Calculate free space. */
    601 	idx = sc->rge_ldata.rge_txq_prodidx;
    602 	free = sc->rge_ldata.rge_txq_considx;
    603 	if (free <= idx)
    604 		free += RGE_TX_LIST_CNT;
    605 	free -= idx;
    606 
    607 	for (;;) {
    608 		if (RGE_TX_NSEGS >= free + 2) {
    609 			SET(ifp->if_flags, IFF_OACTIVE);
    610 			break;
    611 		}
    612 
    613 		IFQ_DEQUEUE(&ifp->if_snd, m);
    614 		if (m == NULL)
    615 			break;
    616 
    617 		used = rge_encap(sc, m, idx);
    618 		if (used == 0) {
    619 			m_freem(m);
    620 			continue;
    621 		}
    622 
    623 		KASSERT(used <= free);
    624 		free -= used;
    625 
    626 		bpf_mtap(ifp, m, BPF_D_OUT);
    627 
    628 		idx += used;
    629 		if (idx >= RGE_TX_LIST_CNT)
    630 			idx -= RGE_TX_LIST_CNT;
    631 
    632 		queued++;
    633 	}
    634 
    635 	if (queued == 0)
    636 		return;
    637 
    638 	/* Set a timeout in case the chip goes out to lunch. */
    639 	ifp->if_timer = 5;
    640 
    641 	sc->rge_ldata.rge_txq_prodidx = idx;
    642 	rge_txstart(sc);
    643 }
    644 
    645 void
    646 rge_watchdog(struct ifnet *ifp)
    647 {
    648 	struct rge_softc *sc = ifp->if_softc;
    649 
    650 	device_printf(sc->sc_dev, "watchdog timeout\n");
    651 	if_statinc(ifp, if_oerrors);
    652 
    653 	rge_init(ifp);
    654 }
    655 
    656 int
    657 rge_init(struct ifnet *ifp)
    658 {
    659 	struct rge_softc *sc = ifp->if_softc;
    660 	uint32_t val;
    661 	unsigned i;
    662 
    663 	rge_stop(ifp, 0);
    664 
    665 	/* Set MAC address. */
    666 	rge_set_macaddr(sc, CLLADDR(ifp->if_sadl));
    667 
    668 	/* Set Maximum frame size. */
    669 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
    670 
    671 	/* Initialize RX descriptors list. */
    672 	int error = rge_rx_list_init(sc);
    673 	if (error != 0) {
    674 		device_printf(sc->sc_dev,
    675 		    "init failed: no memory for RX buffers\n");
    676 		rge_stop(ifp, 1);
    677 		return error;
    678 	}
    679 
    680 	/* Initialize TX descriptors. */
    681 	rge_tx_list_init(sc);
    682 
    683 	/* Load the addresses of the RX and TX lists into the chip. */
    684 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
    685 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    686 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
    687 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    688 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
    689 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    690 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
    691 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    692 
    693 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    694 
    695 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
    696 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    697 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    698 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
    699 
    700 	/* Clear interrupt moderation timer. */
    701 	for (i = 0; i < 64; i++)
    702 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
    703 
    704 	/* Set the initial RX and TX configurations. */
    705 	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
    706 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
    707 
    708 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
    709 	rge_write_csi(sc, 0x70c, val | 0x27000000);
    710 
    711 	/* Enable hardware optimization function. */
    712 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
    713 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
    714 
    715 	RGE_WRITE_2(sc, 0x0382, 0x221b);
    716 	RGE_WRITE_1(sc, 0x4500, 0);
    717 	RGE_WRITE_2(sc, 0x4800, 0);
    718 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
    719 
    720 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
    721 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
    722 
    723 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
    724 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
    725 
    726 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
    727 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
    728 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
    729 
    730 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
    731 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
    732 		rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
    733 	else
    734 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
    735 
    736 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
    737 
    738 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
    739 		val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
    740 		rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
    741 	} else
    742 		RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
    743 
    744 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
    745 
    746 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
    747 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
    748 
    749 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
    750 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
    751 
    752 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
    753 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
    754 
    755 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
    756 
    757 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
    758 
    759 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
    760 
    761 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
    762 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
    763 
    764 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
    765 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
    766 
    767 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
    768 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
    769 
    770 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
    771 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
    772 
    773 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
    774 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
    775 
    776 	val = rge_read_mac_ocp(sc, 0xe84c) & ~0x0040;
    777 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
    778 		rge_write_mac_ocp(sc, 0xe84c, 0x00c0);
    779 	else
    780 		rge_write_mac_ocp(sc, 0xe84c, 0x0080);
    781 
    782 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN);
    783 
    784 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
    785 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
    786 
    787 	/* Disable EEE plus. */
    788 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
    789 
    790 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
    791 
    792 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
    793 	DELAY(1);
    794 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
    795 
    796 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
    797 
    798 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
    799 
    800 	if ((sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0)
    801 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
    802 	else
    803 		RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
    804 
    805 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
    806 
    807 	for (i = 0; i < 10; i++) {
    808 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
    809 			break;
    810 		DELAY(1000);
    811 	}
    812 
    813 	/* Disable RXDV gate. */
    814 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
    815 	DELAY(2000);
    816 
    817 	rge_ifmedia_upd(ifp);
    818 
    819 	/* Enable transmit and receive. */
    820 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
    821 
    822 	/* Program promiscuous mode and multicast filters. */
    823 	rge_iff(sc);
    824 
    825 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    826 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    827 
    828 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    829 
    830 	/* Enable interrupts. */
    831 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
    832 
    833 	ifp->if_flags |= IFF_RUNNING;
    834 	CLR(ifp->if_flags, IFF_OACTIVE);
    835 
    836 	callout_schedule(&sc->sc_timeout, 1);
    837 
    838 	return (0);
    839 }
    840 
    841 /*
    842  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
    843  */
    844 void
    845 rge_stop(struct ifnet *ifp, int disable)
    846 {
    847 	struct rge_softc *sc = ifp->if_softc;
    848 
    849 	callout_halt(&sc->sc_timeout, NULL);
    850 
    851 	ifp->if_timer = 0;
    852 	ifp->if_flags &= ~IFF_RUNNING;
    853 	sc->rge_timerintr = 0;
    854 
    855 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
    856 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
    857 	    RGE_RXCFG_ERRPKT);
    858 
    859 	RGE_WRITE_4(sc, RGE_IMR, 0);
    860 
    861 	/* Clear timer interrupts. */
    862 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
    863 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
    864 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
    865 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
    866 
    867 	rge_reset(sc);
    868 
    869 //	intr_barrier(sc->sc_ih);
    870 //	ifq_barrier(&ifp->if_snd);
    871 /*	ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
    872 
    873 	if (sc->rge_head != NULL) {
    874 		m_freem(sc->rge_head);
    875 		sc->rge_head = sc->rge_tail = NULL;
    876 	}
    877 
    878 	rge_tx_list_fini(sc);
    879 	rge_rx_list_fini(sc);
    880 }
    881 
    882 /*
    883  * Set media options.
    884  */
    885 int
    886 rge_ifmedia_upd(struct ifnet *ifp)
    887 {
    888 	struct rge_softc *sc = ifp->if_softc;
    889 	struct ifmedia *ifm = &sc->sc_media;
    890 	int anar, gig, val;
    891 
    892 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    893 		return (EINVAL);
    894 
    895 	/* Disable Gigabit Lite. */
    896 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
    897 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
    898 
    899 	val = rge_read_phy_ocp(sc, 0xa5d4);
    900 	val &= ~RGE_ADV_2500TFDX;
    901 
    902 	anar = gig = 0;
    903 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
    904 	case IFM_AUTO:
    905 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    906 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    907 		val |= RGE_ADV_2500TFDX;
    908 		break;
    909 	case IFM_2500_T:
    910 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    911 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    912 		val |= RGE_ADV_2500TFDX;
    913 		ifp->if_baudrate = IF_Mbps(2500);
    914 		break;
    915 	case IFM_1000_T:
    916 		anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    917 		gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    918 		ifp->if_baudrate = IF_Gbps(1);
    919 		break;
    920 	case IFM_100_TX:
    921 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
    922 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
    923 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
    924 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
    925 		    ANAR_TX | ANAR_10_FD | ANAR_10;
    926 		ifp->if_baudrate = IF_Mbps(100);
    927 		break;
    928 	case IFM_10_T:
    929 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
    930 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
    931 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
    932 		    ANAR_10_FD | ANAR_10 : ANAR_10;
    933 		ifp->if_baudrate = IF_Mbps(10);
    934 		break;
    935 	default:
    936 		device_printf(sc->sc_dev,
    937 		    "unsupported media type\n");
    938 		return (EINVAL);
    939 	}
    940 
    941 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
    942 	rge_write_phy(sc, 0, MII_100T2CR, gig);
    943 	rge_write_phy_ocp(sc, 0xa5d4, val);
    944 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
    945 	    BMCR_STARTNEG);
    946 
    947 	return (0);
    948 }
    949 
    950 /*
    951  * Report current media status.
    952  */
    953 void
    954 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
    955 {
    956 	struct rge_softc *sc = ifp->if_softc;
    957 	uint16_t status = 0;
    958 
    959 	ifmr->ifm_status = IFM_AVALID;
    960 	ifmr->ifm_active = IFM_ETHER;
    961 
    962 	if (rge_get_link_status(sc)) {
    963 		ifmr->ifm_status |= IFM_ACTIVE;
    964 
    965 		status = RGE_READ_2(sc, RGE_PHYSTAT);
    966 		if ((status & RGE_PHYSTAT_FDX) ||
    967 		    (status & RGE_PHYSTAT_2500MBPS))
    968 			ifmr->ifm_active |= IFM_FDX;
    969 		else
    970 			ifmr->ifm_active |= IFM_HDX;
    971 
    972 		if (status & RGE_PHYSTAT_10MBPS)
    973 			ifmr->ifm_active |= IFM_10_T;
    974 		else if (status & RGE_PHYSTAT_100MBPS)
    975 			ifmr->ifm_active |= IFM_100_TX;
    976 		else if (status & RGE_PHYSTAT_1000MBPS)
    977 			ifmr->ifm_active |= IFM_1000_T;
    978 		else if (status & RGE_PHYSTAT_2500MBPS)
    979 			ifmr->ifm_active |= IFM_2500_T;
    980 	}
    981 }
    982 
    983 /*
    984  * Allocate memory for RX/TX rings.
    985  *
    986  * XXX There is no tear-down for this if it any part fails, so everything
    987  * remains allocated.
    988  */
    989 int
    990 rge_allocmem(struct rge_softc *sc)
    991 {
    992 	int error, i;
    993 
    994 	/* Allocate DMA'able memory for the TX ring. */
    995 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
    996 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
    997 	if (error) {
    998 		aprint_error_dev(sc->sc_dev, "can't create TX list map\n");
    999 		return (error);
   1000 	}
   1001 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
   1002 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
   1003 	    BUS_DMA_NOWAIT);
   1004 	if (error) {
   1005 		aprint_error_dev(sc->sc_dev, "can't alloc TX list\n");
   1006 		return (error);
   1007 	}
   1008 
   1009 	/* Load the map for the TX ring. */
   1010 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1011 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
   1012 	    (void **) &sc->rge_ldata.rge_tx_list,
   1013 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
   1014 	if (error) {
   1015 		aprint_error_dev(sc->sc_dev, "can't map TX dma buffers\n");
   1016 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1017 		    sc->rge_ldata.rge_tx_listnseg);
   1018 		return (error);
   1019 	}
   1020 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
   1021 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1022 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
   1023 	if (error) {
   1024 		aprint_error_dev(sc->sc_dev, "can't load TX dma map\n");
   1025 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
   1026 		bus_dmamem_unmap(sc->sc_dmat,
   1027 		    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
   1028 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
   1029 		    sc->rge_ldata.rge_tx_listnseg);
   1030 		return (error);
   1031 	}
   1032 
   1033 	/* Create DMA maps for TX buffers. */
   1034 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
   1035 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
   1036 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
   1037 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
   1038 		if (error) {
   1039 			aprint_error_dev(sc->sc_dev, "can't create DMA map for TX\n");
   1040 			return (error);
   1041 		}
   1042 	}
   1043 
   1044 	/* Allocate DMA'able memory for the RX ring. */
   1045 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
   1046 	    RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
   1047 	if (error) {
   1048 		aprint_error_dev(sc->sc_dev, "can't create RX list map\n");
   1049 		return (error);
   1050 	}
   1051 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
   1052 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
   1053 	    BUS_DMA_NOWAIT);
   1054 	if (error) {
   1055 		aprint_error_dev(sc->sc_dev, "can't alloc RX list\n");
   1056 		return (error);
   1057 	}
   1058 
   1059 	/* Load the map for the RX ring. */
   1060 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1061 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
   1062 	    (void **) &sc->rge_ldata.rge_rx_list,
   1063 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
   1064 	if (error) {
   1065 		aprint_error_dev(sc->sc_dev, "can't map RX dma buffers\n");
   1066 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1067 		    sc->rge_ldata.rge_rx_listnseg);
   1068 		return (error);
   1069 	}
   1070 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
   1071 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1072 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
   1073 	if (error) {
   1074 		aprint_error_dev(sc->sc_dev, "can't load RX dma map\n");
   1075 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
   1076 		bus_dmamem_unmap(sc->sc_dmat,
   1077 		    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
   1078 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1079 		    sc->rge_ldata.rge_rx_listnseg);
   1080 		return (error);
   1081 	}
   1082 
   1083 	/*
   1084 	 * Create DMA maps for RX buffers.  Use BUS_DMA_ALLOCNOW to avoid any
   1085 	 * potential failure in bus_dmamap_load_mbuf() in the RX path.
   1086 	 */
   1087 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1088 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
   1089 		    RGE_JUMBO_FRAMELEN, 0, BUS_DMA_ALLOCNOW,
   1090 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
   1091 		if (error) {
   1092 			aprint_error_dev(sc->sc_dev, "can't create DMA map for RX\n");
   1093 			return (error);
   1094 		}
   1095 	}
   1096 
   1097 	return (error);
   1098 }
   1099 
   1100 /*
   1101  * Set an RX descriptor and sync it.
   1102  */
   1103 static void
   1104 rge_load_rxbuf(struct rge_softc *sc, int idx)
   1105 {
   1106 	struct rge_rx_desc *r = &sc->rge_ldata.rge_rx_list[idx];
   1107 	struct rge_rxq *rxq = &sc->rge_ldata.rge_rxq[idx];
   1108 	bus_dmamap_t rxmap = rxq->rxq_dmamap;
   1109 	uint32_t cmdsts;
   1110 
   1111 	cmdsts = rxmap->dm_segs[0].ds_len | RGE_RDCMDSTS_OWN;
   1112 	if (idx == RGE_RX_LIST_CNT - 1)
   1113 		cmdsts |= RGE_RDCMDSTS_EOR;
   1114 
   1115 	r->hi_qword0.rge_addr = htole64(rxmap->dm_segs[0].ds_addr);
   1116 	r->hi_qword1.rx_qword4.rge_extsts = 0;
   1117 	r->hi_qword1.rx_qword4.rge_cmdsts = htole32(cmdsts);
   1118 
   1119 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1120 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1121 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1122 }
   1123 
   1124 /*
   1125  * Initialize the RX descriptor and attach an mbuf cluster.
   1126  */
   1127 int
   1128 rge_newbuf(struct rge_softc *sc, int idx)
   1129 {
   1130 	struct mbuf *m;
   1131 	struct rge_rxq *rxq;
   1132 	bus_dmamap_t rxmap;
   1133 	int error __diagused;
   1134 
   1135 	m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
   1136 	if (m == NULL)
   1137 		return (ENOBUFS);
   1138 
   1139 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
   1140 
   1141 	rxq = &sc->rge_ldata.rge_rxq[idx];
   1142 	rxmap = rxq->rxq_dmamap;
   1143 
   1144 	if (rxq->rxq_mbuf != NULL)
   1145 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
   1146 
   1147 	/* This map was created with BUS_DMA_ALLOCNOW so should never fail. */
   1148 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT);
   1149 	KASSERTMSG(error == 0, "error=%d", error);
   1150 
   1151 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
   1152 	    BUS_DMASYNC_PREREAD);
   1153 
   1154 	/* Map the segments into RX descriptors. */
   1155 
   1156 	rxq->rxq_mbuf = m;
   1157 	rge_load_rxbuf(sc, idx);
   1158 
   1159 	return 0;
   1160 }
   1161 
   1162 static int
   1163 rge_rx_list_init(struct rge_softc *sc)
   1164 {
   1165 	unsigned i;
   1166 
   1167 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
   1168 
   1169 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1170 		sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
   1171 		if (rge_newbuf(sc, i) != 0) {
   1172 			rge_rx_list_fini(sc);
   1173 			return (ENOBUFS);
   1174 		}
   1175 	}
   1176 
   1177 	sc->rge_ldata.rge_rxq_prodidx = sc->rge_ldata.rge_rxq_considx = 0;
   1178 	sc->rge_head = sc->rge_tail = NULL;
   1179 
   1180 	return (0);
   1181 }
   1182 
   1183 static void
   1184 rge_rx_list_fini(struct rge_softc *sc)
   1185 {
   1186 	unsigned i;
   1187 
   1188 	/* Free the RX list buffers. */
   1189 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1190 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
   1191 			bus_dmamap_unload(sc->sc_dmat,
   1192 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
   1193 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
   1194 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
   1195 		}
   1196 	}
   1197 }
   1198 
   1199 static void
   1200 rge_tx_list_init(struct rge_softc *sc)
   1201 {
   1202 	unsigned i;
   1203 
   1204 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
   1205 
   1206 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
   1207 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
   1208 
   1209 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
   1210 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
   1211 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1212 
   1213 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
   1214 }
   1215 
   1216 static void
   1217 rge_tx_list_fini(struct rge_softc *sc)
   1218 {
   1219 	unsigned i;
   1220 
   1221 	/* Free the TX list buffers. */
   1222 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
   1223 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
   1224 			bus_dmamap_unload(sc->sc_dmat,
   1225 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
   1226 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
   1227 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
   1228 		}
   1229 	}
   1230 }
   1231 
   1232 int
   1233 rge_rxeof(struct rge_softc *sc)
   1234 {
   1235 	struct mbuf *m;
   1236 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1237 	struct rge_rx_desc *cur_rx;
   1238 	struct rge_rxq *rxq;
   1239 	uint32_t rxstat, extsts;
   1240 	int i, total_len, rx = 0;
   1241 
   1242 	for (i = sc->rge_ldata.rge_rxq_considx; ; i = RGE_NEXT_RX_DESC(i)) {
   1243 		/* Invalidate the descriptor memory. */
   1244 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1245 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1246 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1247 
   1248 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
   1249 
   1250 		if (RGE_OWN(cur_rx))
   1251 			break;
   1252 
   1253 		rxstat = letoh32(cur_rx->hi_qword1.rx_qword4.rge_cmdsts);
   1254 		extsts = letoh32(cur_rx->hi_qword1.rx_qword4.rge_extsts);
   1255 
   1256 		total_len = RGE_RXBYTES(cur_rx);
   1257 		rxq = &sc->rge_ldata.rge_rxq[i];
   1258 		m = rxq->rxq_mbuf;
   1259 		rx = 1;
   1260 
   1261 		/* Invalidate the RX mbuf. */
   1262 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
   1263 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1264 
   1265 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
   1266 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
   1267 			if_statinc(ifp, if_ierrors);
   1268 			rge_load_rxbuf(sc, i);
   1269 			continue;
   1270 		}
   1271 
   1272 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
   1273 			if_statinc(ifp, if_ierrors);
   1274 			/*
   1275 			 * If this is part of a multi-fragment packet,
   1276 			 * discard all the pieces.
   1277 			 */
   1278 			if (sc->rge_head != NULL) {
   1279 				m_freem(sc->rge_head);
   1280 				sc->rge_head = sc->rge_tail = NULL;
   1281 			}
   1282 			rge_load_rxbuf(sc, i);
   1283 			continue;
   1284 		}
   1285 
   1286 		/*
   1287 		 * If allocating a replacement mbuf fails,
   1288 		 * reload the current one.
   1289 		 */
   1290 		if (rge_newbuf(sc, i) != 0) {
   1291 			if_statinc(ifp, if_iqdrops);
   1292 			if (sc->rge_head != NULL) {
   1293 				m_freem(sc->rge_head);
   1294 				sc->rge_head = sc->rge_tail = NULL;
   1295 			}
   1296 			rge_load_rxbuf(sc, i);
   1297 			continue;
   1298 		}
   1299 
   1300 		m_set_rcvif(m, ifp);
   1301 		if (sc->rge_head != NULL) {
   1302 			m->m_len = total_len;
   1303 			/*
   1304 			 * Special case: if there's 4 bytes or less
   1305 			 * in this buffer, the mbuf can be discarded:
   1306 			 * the last 4 bytes is the CRC, which we don't
   1307 			 * care about anyway.
   1308 			 */
   1309 			if (m->m_len <= ETHER_CRC_LEN) {
   1310 				sc->rge_tail->m_len -=
   1311 				    (ETHER_CRC_LEN - m->m_len);
   1312 				m_freem(m);
   1313 			} else {
   1314 				m->m_len -= ETHER_CRC_LEN;
   1315 				m->m_flags &= ~M_PKTHDR;
   1316 				sc->rge_tail->m_next = m;
   1317 			}
   1318 			m = sc->rge_head;
   1319 			sc->rge_head = sc->rge_tail = NULL;
   1320 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
   1321 		} else
   1322 	#if 0
   1323 			m->m_pkthdr.len = m->m_len =
   1324 			    (total_len - ETHER_CRC_LEN);
   1325 	#else
   1326 		{
   1327 			m->m_pkthdr.len = m->m_len = total_len;
   1328 			m->m_flags |= M_HASFCS;
   1329 		}
   1330 	#endif
   1331 
   1332 #if notyet
   1333 		/* Check IP header checksum. */
   1334 		if (!(extsts & RGE_RDEXTSTS_IPCSUMERR) &&
   1335 		    (extsts & RGE_RDEXTSTS_IPV4))
   1336 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
   1337 
   1338 		/* Check TCP/UDP checksum. */
   1339 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
   1340 		    (((extsts & RGE_RDEXTSTS_TCPPKT) &&
   1341 		    !(extsts & RGE_RDEXTSTS_TCPCSUMERR)) ||
   1342 		    ((extsts & RGE_RDEXTSTS_UDPPKT) &&
   1343 		    !(extsts & RGE_RDEXTSTS_UDPCSUMERR))))
   1344 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
   1345 			    M_UDP_CSUM_IN_OK;
   1346 #endif
   1347 
   1348 		if (extsts & RGE_RDEXTSTS_VTAG) {
   1349 			vlan_set_tag(m,
   1350 			    bswap16(extsts & RGE_RDEXTSTS_VLAN_MASK));
   1351 		}
   1352 
   1353 		if_percpuq_enqueue(ifp->if_percpuq, m);
   1354 	}
   1355 
   1356 	sc->rge_ldata.rge_rxq_considx = i;
   1357 
   1358 	return (rx);
   1359 }
   1360 
   1361 int
   1362 rge_txeof(struct rge_softc *sc)
   1363 {
   1364 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1365 	struct rge_txq *txq;
   1366 	uint32_t txstat;
   1367 	int cons, idx, prod;
   1368 	int free = 0;
   1369 
   1370 	prod = sc->rge_ldata.rge_txq_prodidx;
   1371 	cons = sc->rge_ldata.rge_txq_considx;
   1372 
   1373 	while (prod != cons) {
   1374 		txq = &sc->rge_ldata.rge_txq[cons];
   1375 		idx = txq->txq_descidx;
   1376 
   1377 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1378 		    idx * sizeof(struct rge_tx_desc),
   1379 		    sizeof(struct rge_tx_desc),
   1380 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1381 
   1382 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
   1383 
   1384 		if (txstat & RGE_TDCMDSTS_OWN) {
   1385 			free = 2;
   1386 			break;
   1387 		}
   1388 
   1389 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
   1390 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1391 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
   1392 		m_freem(txq->txq_mbuf);
   1393 		txq->txq_mbuf = NULL;
   1394 
   1395 		net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   1396 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
   1397 			if_statinc_ref(nsr, if_collisions);
   1398 		if (txstat & RGE_TDCMDSTS_TXERR)
   1399 			if_statinc_ref(nsr, if_oerrors);
   1400 		else
   1401 			if_statinc_ref(nsr, if_opackets);
   1402 		IF_STAT_PUTREF(ifp);
   1403 
   1404 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1405 		    idx * sizeof(struct rge_tx_desc),
   1406 		    sizeof(struct rge_tx_desc),
   1407 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1408 
   1409 		cons = RGE_NEXT_TX_DESC(idx);
   1410 		free = 1;
   1411 	}
   1412 
   1413 	if (free == 0)
   1414 		return (0);
   1415 
   1416 	sc->rge_ldata.rge_txq_considx = cons;
   1417 
   1418 	if (free == 2)
   1419 		rge_txstart(sc);
   1420 
   1421 	CLR(ifp->if_flags, IFF_OACTIVE);
   1422 	ifp->if_timer = 0;
   1423 	if_schedule_deferred_start(ifp);
   1424 
   1425 	return (1);
   1426 }
   1427 
   1428 void
   1429 rge_reset(struct rge_softc *sc)
   1430 {
   1431 	int i;
   1432 
   1433 	/* Enable RXDV gate. */
   1434 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
   1435 	DELAY(2000);
   1436 
   1437 	for (i = 0; i < 3000; i++) {
   1438 		DELAY(50);
   1439 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
   1440 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
   1441 		    RGE_MCUCMD_TXFIFO_EMPTY))
   1442 			break;
   1443 	}
   1444 	if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
   1445 		for (i = 0; i < 3000; i++) {
   1446 			DELAY(50);
   1447 			if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
   1448 				break;
   1449 		}
   1450 	}
   1451 
   1452 	DELAY(2000);
   1453 
   1454 	/* Soft reset. */
   1455 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
   1456 
   1457 	for (i = 0; i < RGE_TIMEOUT; i++) {
   1458 		DELAY(100);
   1459 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
   1460 			break;
   1461 	}
   1462 	if (i == RGE_TIMEOUT)
   1463 		device_printf(sc->sc_dev, "reset never completed!\n");
   1464 }
   1465 
   1466 void
   1467 rge_iff(struct rge_softc *sc)
   1468 {
   1469 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1470 	struct ethercom *ec = &sc->sc_ec;
   1471 	struct ether_multi *enm;
   1472 	struct ether_multistep step;
   1473 	uint32_t hashes[2];
   1474 	uint32_t rxfilt;
   1475 	int h = 0;
   1476 
   1477 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
   1478 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
   1479 	ifp->if_flags &= ~IFF_ALLMULTI;
   1480 
   1481 	/*
   1482 	 * Always accept frames destined to our station address.
   1483 	 * Always accept broadcast frames.
   1484 	 */
   1485 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
   1486 
   1487 	if (ifp->if_flags & IFF_PROMISC) {
   1488  allmulti:
   1489 		ifp->if_flags |= IFF_ALLMULTI;
   1490 		rxfilt |= RGE_RXCFG_MULTI;
   1491 		if (ifp->if_flags & IFF_PROMISC)
   1492 			rxfilt |= RGE_RXCFG_ALLPHYS;
   1493 		hashes[0] = hashes[1] = 0xffffffff;
   1494 	} else {
   1495 		rxfilt |= RGE_RXCFG_MULTI;
   1496 		/* Program new filter. */
   1497 		memset(hashes, 0, sizeof(hashes));
   1498 
   1499 		ETHER_LOCK(ec);
   1500 		ETHER_FIRST_MULTI(step, ec, enm);
   1501 		while (enm != NULL) {
   1502 			if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1503 			    ETHER_ADDR_LEN) != 0) {
   1504 			    	ETHER_UNLOCK(ec);
   1505 				goto allmulti;
   1506 			}
   1507 			h = ether_crc32_be(enm->enm_addrlo,
   1508 			    ETHER_ADDR_LEN) >> 26;
   1509 
   1510 			if (h < 32)
   1511 				hashes[0] |= (1U << h);
   1512 			else
   1513 				hashes[1] |= (1U << (h - 32));
   1514 
   1515 			ETHER_NEXT_MULTI(step, enm);
   1516 		}
   1517 		ETHER_UNLOCK(ec);
   1518 	}
   1519 
   1520 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
   1521 	RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
   1522 	RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
   1523 }
   1524 
   1525 void
   1526 rge_set_phy_power(struct rge_softc *sc, int on)
   1527 {
   1528 	int i;
   1529 
   1530 	if (on) {
   1531 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
   1532 
   1533 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
   1534 
   1535 		for (i = 0; i < RGE_TIMEOUT; i++) {
   1536 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
   1537 				break;
   1538 			DELAY(1000);
   1539 		}
   1540 	} else {
   1541 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
   1542 		RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
   1543 		RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
   1544 	}
   1545 }
   1546 
   1547 void
   1548 rge_phy_config(struct rge_softc *sc)
   1549 {
   1550 	/* Read microcode version. */
   1551 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1552 	sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
   1553 
   1554 	switch (sc->rge_type) {
   1555 	case MAC_CFG2:
   1556 		rge_phy_config_mac_cfg2(sc);
   1557 		break;
   1558 	case MAC_CFG3:
   1559 		rge_phy_config_mac_cfg3(sc);
   1560 		break;
   1561 	case MAC_CFG4:
   1562 		rge_phy_config_mac_cfg4(sc);
   1563 		break;
   1564 	case MAC_CFG5:
   1565 		rge_phy_config_mac_cfg5(sc);
   1566 		break;
   1567 	default:
   1568 		break;	/* Can't happen. */
   1569 	}
   1570 
   1571 	rge_write_phy(sc, 0x0a5b, 0x12,
   1572 	    rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
   1573 
   1574 	/* Disable EEE. */
   1575 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
   1576 	if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
   1577 		RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
   1578 		RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
   1579 	}
   1580 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
   1581 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
   1582 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
   1583 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
   1584 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
   1585 
   1586 	rge_patch_phy_mcu(sc, 1);
   1587 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
   1588 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
   1589 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
   1590 	rge_patch_phy_mcu(sc, 0);
   1591 }
   1592 
   1593 void
   1594 rge_phy_config_mac_cfg2(struct rge_softc *sc)
   1595 {
   1596 	uint16_t val;
   1597 	int i;
   1598 
   1599 	for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
   1600 		rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
   1601 		    rtl8125_mac_cfg2_ephy[i].val);
   1602 
   1603 	rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
   1604 
   1605 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
   1606 	rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
   1607 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   1608 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   1609 	rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
   1610 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
   1611 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   1612 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
   1613 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
   1614 	rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
   1615 	RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
   1616 	RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
   1617 	RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
   1618 
   1619 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   1620 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1621 	rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
   1622 	rge_write_phy_ocp(sc, 0xa436, 0x80eb);
   1623 	val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
   1624 	rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
   1625 	rge_write_phy_ocp(sc, 0xa436, 0x80f8);
   1626 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1627 	rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
   1628 	rge_write_phy_ocp(sc, 0xa436, 0x80f1);
   1629 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1630 	rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
   1631 	rge_write_phy_ocp(sc, 0xa436, 0x80fe);
   1632 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1633 	rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
   1634 	rge_write_phy_ocp(sc, 0xa436, 0x8102);
   1635 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1636 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   1637 	rge_write_phy_ocp(sc, 0xa436, 0x8105);
   1638 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1639 	rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
   1640 	rge_write_phy_ocp(sc, 0xa436, 0x8100);
   1641 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1642 	rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
   1643 	rge_write_phy_ocp(sc, 0xa436, 0x8104);
   1644 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1645 	rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
   1646 	rge_write_phy_ocp(sc, 0xa436, 0x8106);
   1647 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1648 	rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
   1649 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
   1650 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1651 	rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
   1652 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
   1653 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   1654 	rge_write_phy_ocp(sc, 0xa436, 0x80e1);
   1655 	RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
   1656 	val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
   1657 	rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
   1658 	rge_write_phy_ocp(sc, 0xa436, 0x819f);
   1659 	rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
   1660 	rge_write_phy_ocp(sc, 0xbc34, 0x5555);
   1661 	val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
   1662 	rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
   1663 	RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
   1664 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1665 }
   1666 
   1667 void
   1668 rge_phy_config_mac_cfg3(struct rge_softc *sc)
   1669 {
   1670 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1671 	uint16_t val;
   1672 	int i;
   1673 	static const uint16_t mac_cfg3_a438_value[] =
   1674 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
   1675 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
   1676 
   1677 	static const uint16_t mac_cfg3_b88e_value[] =
   1678 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
   1679 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
   1680 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
   1681 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
   1682 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
   1683 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
   1684 
   1685 	for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
   1686 		rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
   1687 		    rtl8125_mac_cfg3_ephy[i].val);
   1688 
   1689 	val = rge_read_ephy(sc, 0x002a) & ~0x7000;
   1690 	rge_write_ephy(sc, 0x002a, val | 0x3000);
   1691 	RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
   1692 	RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
   1693 	RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
   1694 	rge_write_ephy(sc, 0x0002, 0x6042);
   1695 	rge_write_ephy(sc, 0x0006, 0x0014);
   1696 	val = rge_read_ephy(sc, 0x006a) & ~0x7000;
   1697 	rge_write_ephy(sc, 0x006a, val | 0x3000);
   1698 	RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
   1699 	RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
   1700 	RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
   1701 	rge_write_ephy(sc, 0x0042, 0x6042);
   1702 	rge_write_ephy(sc, 0x0046, 0x0014);
   1703 
   1704 	rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
   1705 
   1706 	RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   1707 	val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   1708 	rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
   1709 	val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
   1710 	rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   1711 	RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
   1712 	RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
   1713 	val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
   1714 	rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
   1715 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
   1716 	rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
   1717 	val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
   1718 	rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
   1719 	RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
   1720 	RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
   1721 	val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
   1722 	rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
   1723 	rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
   1724 	rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
   1725 	val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
   1726 	rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
   1727 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
   1728 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1729 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
   1730 	rge_write_phy_ocp(sc, 0xb87c, 0x8159);
   1731 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1732 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
   1733 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
   1734 	    32);
   1735 	rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
   1736 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   1737 	rge_write_phy_ocp(sc, 0xb87c, 0x809c);
   1738 	rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   1739 
   1740 	rge_write_phy_ocp(sc, 0xa436, 0x81b3);
   1741 	for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
   1742 		rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
   1743 	for (i = 0; i < 26; i++)
   1744 		rge_write_phy_ocp(sc, 0xa438, 0);
   1745 	rge_write_phy_ocp(sc, 0xa436, 0x8257);
   1746 	rge_write_phy_ocp(sc, 0xa438, 0x020f);
   1747 	rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   1748 	rge_write_phy_ocp(sc, 0xa438, 0x7843);
   1749 
   1750 	rge_patch_phy_mcu(sc, 1);
   1751 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
   1752 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
   1753 	for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
   1754 		rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
   1755 		rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
   1756 	}
   1757 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
   1758 	rge_patch_phy_mcu(sc, 0);
   1759 
   1760 	RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
   1761 	rge_write_phy_ocp(sc, 0xa436, 0x81a2);
   1762 	RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   1763 	val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
   1764 	rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
   1765 	RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
   1766 	RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
   1767 	RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
   1768 	RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
   1769 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1770 }
   1771 
   1772 void
   1773 rge_phy_config_mac_cfg4(struct rge_softc *sc)
   1774 {
   1775 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1776 	uint16_t val;
   1777 	int i;
   1778 	static const uint16_t mac_cfg4_b87c_value[] =
   1779 	    { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
   1780 	      0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
   1781 	      0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
   1782 	      0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
   1783 	      0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
   1784 	      0x80b0, 0x0f31 };
   1785 
   1786 	for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
   1787 		rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
   1788 		    rtl8125_mac_cfg4_ephy[i].val);
   1789 
   1790 	rge_write_phy_ocp(sc, 0xbf86, 0x9000);
   1791 	RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
   1792 	RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
   1793 	rge_write_phy_ocp(sc, 0xbd86, 0x1010);
   1794 	rge_write_phy_ocp(sc, 0xbd88, 0x1010);
   1795 	val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
   1796 	rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
   1797 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
   1798 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
   1799 
   1800 	rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
   1801 
   1802 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1803 	RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
   1804 	rge_write_phy_ocp(sc, 0xa436, 0x8fff);
   1805 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1806 	rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
   1807 	for (i = 0; i < 6; i++) {
   1808 		rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
   1809 		if (i < 3)
   1810 			rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
   1811 		else
   1812 			rge_write_phy_ocp(sc, 0xb87e, 0x147d);
   1813 	}
   1814 	rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
   1815 	rge_write_phy_ocp(sc, 0xb87e, 0x0907);
   1816 	val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
   1817 	rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
   1818 	val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
   1819 	rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
   1820 	rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
   1821 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
   1822 	rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
   1823 	rge_write_phy_ocp(sc, 0xb87e, 0x2801);
   1824 	rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
   1825 	rge_write_phy_ocp(sc, 0xb87e, 0x6077);
   1826 	rge_write_phy_ocp(sc, 0xb506, 0x01e7);
   1827 	rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
   1828 	rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
   1829 	rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
   1830 	rge_write_phy_ocp(sc, 0xac3c, 0x9240);
   1831 	rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
   1832 	rge_write_phy_ocp(sc, 0xacc6, 0x0707);
   1833 	rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
   1834 	rge_write_phy_ocp(sc, 0xad08, 0x0007);
   1835 	for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
   1836 		rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
   1837 		rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
   1838 	}
   1839 	RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
   1840 	RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
   1841 	rge_write_phy_ocp(sc, 0xb87c, 0x8141);
   1842 	rge_write_phy_ocp(sc, 0xb87e, 0x320e);
   1843 	rge_write_phy_ocp(sc, 0xb87c, 0x8153);
   1844 	rge_write_phy_ocp(sc, 0xb87e, 0x720e);
   1845 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
   1846 	rge_write_phy_ocp(sc, 0xb87c, 0x8529);
   1847 	rge_write_phy_ocp(sc, 0xb87e, 0x050e);
   1848 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
   1849 	    32);
   1850 	rge_write_phy_ocp(sc, 0xa436, 0x816c);
   1851 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
   1852 	rge_write_phy_ocp(sc, 0xa436, 0x8170);
   1853 	rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
   1854 	rge_write_phy_ocp(sc, 0xa436, 0x8174);
   1855 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
   1856 	rge_write_phy_ocp(sc, 0xa436, 0x8178);
   1857 	rge_write_phy_ocp(sc, 0xa438, 0x04a0);
   1858 	rge_write_phy_ocp(sc, 0xa436, 0x817c);
   1859 	rge_write_phy_ocp(sc, 0xa438, 0x0719);
   1860 	rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
   1861 	rge_write_phy_ocp(sc, 0xa438, 0x0400);
   1862 	rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
   1863 	rge_write_phy_ocp(sc, 0xa438, 0x0404);
   1864 	rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
   1865 	for (i = 0; i < 6; i++) {
   1866 		rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
   1867 		if (i == 2)
   1868 			rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
   1869 		else
   1870 			rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
   1871 	}
   1872 	rge_write_phy_ocp(sc, 0xb87c, 0x8145);
   1873 	rge_write_phy_ocp(sc, 0xb87e, 0x370e);
   1874 	rge_write_phy_ocp(sc, 0xb87c, 0x8157);
   1875 	rge_write_phy_ocp(sc, 0xb87e, 0x770e);
   1876 	rge_write_phy_ocp(sc, 0xb87c, 0x8169);
   1877 	rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
   1878 	rge_write_phy_ocp(sc, 0xb87c, 0x817b);
   1879 	rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
   1880 	rge_write_phy_ocp(sc, 0xa436, 0x8217);
   1881 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1882 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   1883 	rge_write_phy_ocp(sc, 0xa436, 0x821a);
   1884 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1885 	rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   1886 	rge_write_phy_ocp(sc, 0xa436, 0x80da);
   1887 	rge_write_phy_ocp(sc, 0xa438, 0x0403);
   1888 	rge_write_phy_ocp(sc, 0xa436, 0x80dc);
   1889 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1890 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
   1891 	rge_write_phy_ocp(sc, 0xa436, 0x80b3);
   1892 	rge_write_phy_ocp(sc, 0xa438, 0x0384);
   1893 	rge_write_phy_ocp(sc, 0xa436, 0x80b7);
   1894 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
   1895 	rge_write_phy_ocp(sc, 0xa436, 0x80ba);
   1896 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1897 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
   1898 	rge_write_phy_ocp(sc, 0xa436, 0x80b5);
   1899 	rge_write_phy_ocp(sc, 0xa438, 0xf009);
   1900 	rge_write_phy_ocp(sc, 0xa436, 0x80bd);
   1901 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1902 	rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
   1903 	rge_write_phy_ocp(sc, 0xa436, 0x80c7);
   1904 	rge_write_phy_ocp(sc, 0xa438, 0xf083);
   1905 	rge_write_phy_ocp(sc, 0xa436, 0x80dd);
   1906 	rge_write_phy_ocp(sc, 0xa438, 0x03f0);
   1907 	rge_write_phy_ocp(sc, 0xa436, 0x80df);
   1908 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1909 	rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
   1910 	rge_write_phy_ocp(sc, 0xa436, 0x80cb);
   1911 	rge_write_phy_ocp(sc, 0xa438, 0x2007);
   1912 	rge_write_phy_ocp(sc, 0xa436, 0x80ce);
   1913 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1914 	rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
   1915 	rge_write_phy_ocp(sc, 0xa436, 0x80c9);
   1916 	rge_write_phy_ocp(sc, 0xa438, 0x8009);
   1917 	rge_write_phy_ocp(sc, 0xa436, 0x80d1);
   1918 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1919 	rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
   1920 	rge_write_phy_ocp(sc, 0xa436, 0x80a3);
   1921 	rge_write_phy_ocp(sc, 0xa438, 0x200a);
   1922 	rge_write_phy_ocp(sc, 0xa436, 0x80a5);
   1923 	rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
   1924 	rge_write_phy_ocp(sc, 0xa436, 0x809f);
   1925 	rge_write_phy_ocp(sc, 0xa438, 0x6073);
   1926 	rge_write_phy_ocp(sc, 0xa436, 0x80a1);
   1927 	rge_write_phy_ocp(sc, 0xa438, 0x000b);
   1928 	rge_write_phy_ocp(sc, 0xa436, 0x80a9);
   1929 	val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1930 	rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
   1931 	rge_patch_phy_mcu(sc, 1);
   1932 	RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
   1933 	RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
   1934 	rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
   1935 	rge_write_phy_ocp(sc, 0xb890, 0x0000);
   1936 	rge_write_phy_ocp(sc, 0xb88e, 0xc240);
   1937 	rge_write_phy_ocp(sc, 0xb890, 0x0103);
   1938 	rge_write_phy_ocp(sc, 0xb88e, 0xc242);
   1939 	rge_write_phy_ocp(sc, 0xb890, 0x0507);
   1940 	rge_write_phy_ocp(sc, 0xb88e, 0xc244);
   1941 	rge_write_phy_ocp(sc, 0xb890, 0x090b);
   1942 	rge_write_phy_ocp(sc, 0xb88e, 0xc246);
   1943 	rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
   1944 	rge_write_phy_ocp(sc, 0xb88e, 0xc248);
   1945 	rge_write_phy_ocp(sc, 0xb890, 0x1012);
   1946 	rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
   1947 	rge_write_phy_ocp(sc, 0xb890, 0x1416);
   1948 	RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
   1949 	rge_patch_phy_mcu(sc, 0);
   1950 	RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
   1951 	RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
   1952 	rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
   1953 	rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
   1954 	rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
   1955 	rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
   1956 	rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
   1957 	RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
   1958 	rge_write_phy_ocp(sc, 0xa436, 0x817d);
   1959 	RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
   1960 }
   1961 
   1962 void
   1963 rge_phy_config_mac_cfg5(struct rge_softc *sc)
   1964 {
   1965 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1966 	uint16_t val;
   1967 	int i;
   1968 
   1969 	for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
   1970 		rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
   1971 		    rtl8125_mac_cfg5_ephy[i].val);
   1972 
   1973 	val = rge_read_ephy(sc, 0x0022) & ~0x0030;
   1974 	rge_write_ephy(sc, 0x0022, val | 0x0020);
   1975 	val = rge_read_ephy(sc, 0x0062) & ~0x0030;
   1976 	rge_write_ephy(sc, 0x0062, val | 0x0020);
   1977 
   1978 	rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
   1979 
   1980 	RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1981 	val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
   1982 	rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
   1983 	val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
   1984 	rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
   1985 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
   1986 	    32);
   1987 	rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
   1988 	rge_write_phy_ocp(sc, 0xb87e, 0x760e);
   1989 	rge_write_phy_ocp(sc, 0xb87c, 0x8107);
   1990 	rge_write_phy_ocp(sc, 0xb87e, 0x360e);
   1991 	rge_write_phy_ocp(sc, 0xb87c, 0x8551);
   1992 	val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
   1993 	rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
   1994 	val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
   1995 	rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
   1996 	val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
   1997 	rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
   1998 	for (i = 0; i < 10; i++) {
   1999 		rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
   2000 		rge_write_phy_ocp(sc, 0xa438, 0x2417);
   2001 	}
   2002 	RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
   2003 	val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
   2004 	rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
   2005 }
   2006 
   2007 void
   2008 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
   2009 {
   2010 	if (sc->rge_mcodever != mcode_version) {
   2011 		int i;
   2012 
   2013 		rge_patch_phy_mcu(sc, 1);
   2014 
   2015 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
   2016 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   2017 			if (sc->rge_type == MAC_CFG2)
   2018 				rge_write_phy_ocp(sc, 0xa438, 0x8600);
   2019 			else
   2020 				rge_write_phy_ocp(sc, 0xa438, 0x8601);
   2021 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
   2022 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
   2023 
   2024 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
   2025 		}
   2026 
   2027 		if (sc->rge_type == MAC_CFG2) {
   2028 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
   2029 				rge_write_phy_ocp(sc,
   2030 				    rtl8125_mac_cfg2_mcu[i].reg,
   2031 				    rtl8125_mac_cfg2_mcu[i].val);
   2032 			}
   2033 		} else if (sc->rge_type == MAC_CFG3) {
   2034 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
   2035 				rge_write_phy_ocp(sc,
   2036 				    rtl8125_mac_cfg3_mcu[i].reg,
   2037 				    rtl8125_mac_cfg3_mcu[i].val);
   2038 			}
   2039 		} else if (sc->rge_type == MAC_CFG4) {
   2040 			for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
   2041 				rge_write_phy_ocp(sc,
   2042 				    rtl8125_mac_cfg4_mcu[i].reg,
   2043 				    rtl8125_mac_cfg4_mcu[i].val);
   2044 			}
   2045 		} else if (sc->rge_type == MAC_CFG5) {
   2046 			for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
   2047 				rge_write_phy_ocp(sc,
   2048 				    rtl8125_mac_cfg5_mcu[i].reg,
   2049 				    rtl8125_mac_cfg5_mcu[i].val);
   2050 			}
   2051 		}
   2052 
   2053 		if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
   2054 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
   2055 
   2056 			rge_write_phy_ocp(sc, 0xa436, 0);
   2057 			rge_write_phy_ocp(sc, 0xa438, 0);
   2058 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
   2059 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   2060 			rge_write_phy_ocp(sc, 0xa438, 0);
   2061 		}
   2062 
   2063 		rge_patch_phy_mcu(sc, 0);
   2064 
   2065 		/* Write microcode version. */
   2066 		rge_write_phy_ocp(sc, 0xa436, 0x801e);
   2067 		rge_write_phy_ocp(sc, 0xa438, mcode_version);
   2068 	}
   2069 }
   2070 
   2071 void
   2072 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
   2073 {
   2074 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2075 	RGE_WRITE_4(sc, RGE_MAC0,
   2076 	    (uint32_t)addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
   2077 	RGE_WRITE_4(sc, RGE_MAC4,
   2078 	    addr[5] <<  8 | addr[4]);
   2079 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2080 }
   2081 
   2082 void
   2083 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
   2084 {
   2085 	int i;
   2086 
   2087 	for (i = 0; i < ETHER_ADDR_LEN; i++)
   2088 		addr[i] = RGE_READ_1(sc, RGE_ADDR0 + i);
   2089 }
   2090 
   2091 void
   2092 rge_hw_init(struct rge_softc *sc)
   2093 {
   2094 	int i;
   2095 
   2096 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2097 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
   2098 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
   2099 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   2100 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
   2101 
   2102 	/* Disable UPS. */
   2103 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
   2104 
   2105 	/* Configure MAC MCU. */
   2106 	rge_write_mac_ocp(sc, 0xfc38, 0);
   2107 
   2108 	for (i = 0xfc28; i < 0xfc38; i += 2)
   2109 		rge_write_mac_ocp(sc, i, 0);
   2110 
   2111 	DELAY(3000);
   2112 	rge_write_mac_ocp(sc, 0xfc26, 0);
   2113 
   2114 	if (sc->rge_type == MAC_CFG3) {
   2115 		for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
   2116 			rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
   2117 			    rtl8125_mac_bps[i].val);
   2118 		}
   2119 	} else if (sc->rge_type == MAC_CFG5) {
   2120 		for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
   2121 			rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
   2122 			    rtl8125b_mac_bps[i].val);
   2123 		}
   2124 	}
   2125 
   2126 	/* Disable PHY power saving. */
   2127 	rge_disable_phy_ocp_pwrsave(sc);
   2128 
   2129 	/* Set PCIe uncorrectable error status. */
   2130 	rge_write_csi(sc, 0x108,
   2131 	    rge_read_csi(sc, 0x108) | 0x00100000);
   2132 }
   2133 
   2134 void
   2135 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
   2136 {
   2137 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
   2138 		rge_patch_phy_mcu(sc, 1);
   2139 		rge_write_phy_ocp(sc, 0xc416, 0);
   2140 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
   2141 		rge_patch_phy_mcu(sc, 0);
   2142 	}
   2143 }
   2144 
   2145 void
   2146 rge_patch_phy_mcu(struct rge_softc *sc, int set)
   2147 {
   2148 	int i;
   2149 
   2150 	if (set)
   2151 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
   2152 	else
   2153 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
   2154 
   2155 	for (i = 0; i < 1000; i++) {
   2156 		if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
   2157 			break;
   2158 		DELAY(100);
   2159 	}
   2160 	if (i == 1000) {
   2161 		DPRINTF(("timeout waiting to patch phy mcu\n"));
   2162 		return;
   2163 	}
   2164 }
   2165 
   2166 void
   2167 rge_add_media_types(struct rge_softc *sc)
   2168 {
   2169 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
   2170 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
   2171 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
   2172 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
   2173 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
   2174 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
   2175 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
   2176 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
   2177 }
   2178 
   2179 void
   2180 rge_config_imtype(struct rge_softc *sc, int imtype)
   2181 {
   2182 	switch (imtype) {
   2183 	case RGE_IMTYPE_NONE:
   2184 		sc->rge_intrs = RGE_INTRS;
   2185 		sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
   2186 		    RGE_ISR_RX_FIFO_OFLOW;
   2187 		sc->rge_tx_ack = RGE_ISR_TX_OK;
   2188 		break;
   2189 	case RGE_IMTYPE_SIM:
   2190 		sc->rge_intrs = RGE_INTRS_TIMER;
   2191 		sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
   2192 		sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
   2193 		break;
   2194 	default:
   2195 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
   2196 	}
   2197 }
   2198 
   2199 void
   2200 rge_disable_hw_im(struct rge_softc *sc)
   2201 {
   2202 	RGE_WRITE_2(sc, RGE_IM, 0);
   2203 }
   2204 
   2205 void
   2206 rge_disable_sim_im(struct rge_softc *sc)
   2207 {
   2208 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
   2209 	sc->rge_timerintr = 0;
   2210 }
   2211 
   2212 void
   2213 rge_setup_sim_im(struct rge_softc *sc)
   2214 {
   2215 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
   2216 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
   2217 	sc->rge_timerintr = 1;
   2218 }
   2219 
   2220 void
   2221 rge_setup_intr(struct rge_softc *sc, int imtype)
   2222 {
   2223 	rge_config_imtype(sc, imtype);
   2224 
   2225 	/* Enable interrupts. */
   2226 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
   2227 
   2228 	switch (imtype) {
   2229 	case RGE_IMTYPE_NONE:
   2230 		rge_disable_sim_im(sc);
   2231 		rge_disable_hw_im(sc);
   2232 		break;
   2233 	case RGE_IMTYPE_SIM:
   2234 		rge_disable_hw_im(sc);
   2235 		rge_setup_sim_im(sc);
   2236 		break;
   2237 	default:
   2238 		panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
   2239 	}
   2240 }
   2241 
   2242 void
   2243 rge_exit_oob(struct rge_softc *sc)
   2244 {
   2245 	int i;
   2246 
   2247 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
   2248 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
   2249 	    RGE_RXCFG_ERRPKT);
   2250 
   2251 	/* Disable RealWoW. */
   2252 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
   2253 
   2254 	rge_reset(sc);
   2255 
   2256 	/* Disable OOB. */
   2257 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
   2258 
   2259 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
   2260 
   2261 	for (i = 0; i < 10; i++) {
   2262 		DELAY(100);
   2263 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   2264 			break;
   2265 	}
   2266 
   2267 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
   2268 	rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
   2269 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
   2270 
   2271 	for (i = 0; i < 10; i++) {
   2272 		DELAY(100);
   2273 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   2274 			break;
   2275 	}
   2276 
   2277 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
   2278 		printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
   2279 		    device_xname(sc->sc_dev));
   2280 		for (i = 0; i < RGE_TIMEOUT; i++) {
   2281 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
   2282 				break;
   2283 			DELAY(1000);
   2284 		}
   2285 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
   2286 		if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
   2287 			RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
   2288 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
   2289 	}
   2290 }
   2291 
   2292 void
   2293 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
   2294 {
   2295 	int i;
   2296 
   2297 	RGE_WRITE_4(sc, RGE_CSIDR, val);
   2298 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
   2299 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
   2300 
   2301 	for (i = 0; i < 10; i++) {
   2302 		 DELAY(100);
   2303 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
   2304 			break;
   2305 	}
   2306 
   2307 	DELAY(20);
   2308 }
   2309 
   2310 uint32_t
   2311 rge_read_csi(struct rge_softc *sc, uint32_t reg)
   2312 {
   2313 	int i;
   2314 
   2315 	RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
   2316 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
   2317 
   2318 	for (i = 0; i < 10; i++) {
   2319 		 DELAY(100);
   2320 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
   2321 			break;
   2322 	}
   2323 
   2324 	DELAY(20);
   2325 
   2326 	return (RGE_READ_4(sc, RGE_CSIDR));
   2327 }
   2328 
   2329 void
   2330 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   2331 {
   2332 	uint32_t tmp;
   2333 
   2334 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   2335 	tmp += val;
   2336 	tmp |= RGE_MACOCP_BUSY;
   2337 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
   2338 }
   2339 
   2340 uint16_t
   2341 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
   2342 {
   2343 	uint32_t val;
   2344 
   2345 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   2346 	RGE_WRITE_4(sc, RGE_MACOCP, val);
   2347 
   2348 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
   2349 }
   2350 
   2351 void
   2352 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
   2353 {
   2354 	uint32_t tmp;
   2355 	int i;
   2356 
   2357 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
   2358 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
   2359 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
   2360 
   2361 	for (i = 0; i < 10; i++) {
   2362 		DELAY(100);
   2363 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
   2364 			break;
   2365 	}
   2366 
   2367 	DELAY(20);
   2368 }
   2369 
   2370 uint16_t
   2371 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
   2372 {
   2373 	uint32_t val;
   2374 	int i;
   2375 
   2376 	val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
   2377 	RGE_WRITE_4(sc, RGE_EPHYAR, val);
   2378 
   2379 	for (i = 0; i < 10; i++) {
   2380 		DELAY(100);
   2381 		val = RGE_READ_4(sc, RGE_EPHYAR);
   2382 		if (val & RGE_EPHYAR_BUSY)
   2383 			break;
   2384 	}
   2385 
   2386 	DELAY(20);
   2387 
   2388 	return (val & RGE_EPHYAR_DATA_MASK);
   2389 }
   2390 
   2391 void
   2392 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
   2393 {
   2394 	uint16_t off, phyaddr;
   2395 
   2396 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
   2397 	phyaddr <<= 4;
   2398 
   2399 	off = addr ? reg : 0x10 + (reg % 8);
   2400 
   2401 	phyaddr += (off - 16) << 1;
   2402 
   2403 	rge_write_phy_ocp(sc, phyaddr, val);
   2404 }
   2405 
   2406 uint16_t
   2407 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
   2408 {
   2409 	uint16_t off, phyaddr;
   2410 
   2411 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
   2412 	phyaddr <<= 4;
   2413 
   2414 	off = addr ? reg : 0x10 + (reg % 8);
   2415 
   2416 	phyaddr += (off - 16) << 1;
   2417 
   2418 	return (rge_read_phy_ocp(sc, phyaddr));
   2419 }
   2420 
   2421 void
   2422 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   2423 {
   2424 	uint32_t tmp;
   2425 	int i;
   2426 
   2427 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   2428 	tmp |= RGE_PHYOCP_BUSY | val;
   2429 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
   2430 
   2431 	for (i = 0; i < RGE_TIMEOUT; i++) {
   2432 		DELAY(1);
   2433 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
   2434 			break;
   2435 	}
   2436 }
   2437 
   2438 uint16_t
   2439 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
   2440 {
   2441 	uint32_t val;
   2442 	int i;
   2443 
   2444 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   2445 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
   2446 
   2447 	for (i = 0; i < RGE_TIMEOUT; i++) {
   2448 		DELAY(1);
   2449 		val = RGE_READ_4(sc, RGE_PHYOCP);
   2450 		if (val & RGE_PHYOCP_BUSY)
   2451 			break;
   2452 	}
   2453 
   2454 	return (val & RGE_PHYOCP_DATA_MASK);
   2455 }
   2456 
   2457 int
   2458 rge_get_link_status(struct rge_softc *sc)
   2459 {
   2460 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
   2461 }
   2462 
   2463 void
   2464 rge_txstart(void *arg)
   2465 {
   2466 	struct rge_softc *sc = arg;
   2467 
   2468 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
   2469 }
   2470 
   2471 void
   2472 rge_tick(void *arg)
   2473 {
   2474 	struct rge_softc *sc = arg;
   2475 	int s;
   2476 
   2477 	s = splnet();
   2478 	rge_link_state(sc);
   2479 	splx(s);
   2480 
   2481 	callout_schedule(&sc->sc_timeout, hz);
   2482 }
   2483 
   2484 void
   2485 rge_link_state(struct rge_softc *sc)
   2486 {
   2487 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2488 	int link = LINK_STATE_DOWN;
   2489 
   2490 	if (rge_get_link_status(sc))
   2491 		link = LINK_STATE_UP;
   2492 
   2493 	if (ifp->if_link_state != link) { /* XXX not safe to access */
   2494 		if_link_state_change(ifp, link);
   2495 	}
   2496 }
   2497