Home | History | Annotate | Line # | Download | only in pci
if_rge.c revision 1.4
      1 /*	$NetBSD: if_rge.c,v 1.4 2020/02/04 07:37:00 skrll Exp $	*/
      2 /*	$OpenBSD: if_rge.c,v 1.2 2020/01/02 09:00:45 kevlo Exp $	*/
      3 
      4 /*
      5  * Copyright (c) 2019 Kevin Lo <kevlo (at) openbsd.org>
      6  *
      7  * Permission to use, copy, modify, and distribute this software for any
      8  * purpose with or without fee is hereby granted, provided that the above
      9  * copyright notice and this permission notice appear in all copies.
     10  *
     11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     18  */
     19 
     20 #include <sys/cdefs.h>
     21 __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.4 2020/02/04 07:37:00 skrll Exp $");
     22 
     23 /* #include "bpfilter.h" Sevan */
     24 /* #include "vlan.h" Sevan */
     25 
     26 #include <sys/types.h>
     27 
     28 #include <sys/param.h>
     29 #include <sys/systm.h>
     30 #include <sys/sockio.h>
     31 #include <sys/mbuf.h>
     32 #include <sys/malloc.h>
     33 #include <sys/kernel.h>
     34 #include <sys/socket.h>
     35 #include <sys/device.h>
     36 #include <sys/endian.h>
     37 #include <sys/callout.h>
     38 #include <sys/workqueue.h>
     39 
     40 #include <net/if.h>
     41 
     42 #include <net/if_dl.h>
     43 #include <net/if_ether.h>
     44 
     45 #include <net/if_media.h>
     46 
     47 #include <netinet/in.h>
     48 #include <net/if_ether.h>
     49 
     50 #if NBPFILTER > 0
     51 #include <net/bpf.h>
     52 #endif
     53 
     54 #include <sys/bus.h>
     55 #include <machine/intr.h>
     56 
     57 #include <dev/mii/mii.h>
     58 
     59 #include <dev/pci/pcivar.h>
     60 #include <dev/pci/pcireg.h>
     61 #include <dev/pci/pcidevs.h>
     62 
     63 #include <dev/pci/if_rgereg.h>
     64 
     65 #ifdef __NetBSD__
     66 #define letoh32 	htole32
     67 #define nitems(x) 	__arraycount(x)
     68 #define MBUF_LIST_INITIALIZER() 	{ NULL, NULL, 0 }
     69 struct mbuf_list {
     70 	struct mbuf 	*ml_head;
     71 	struct mbuf 	*ml_tail;
     72 	u_int 	ml_len;
     73 };
     74 #ifdef NET_MPSAFE
     75 #define 	RGE_MPSAFE	1
     76 #define 	CALLOUT_FLAGS	CALLOUT_MPSAFE
     77 #else
     78 #define 	CALLOUT_FLAGS	0
     79 #endif
     80 #endif
     81 
     82 static int		rge_match(device_t, cfdata_t, void *);
     83 static void		rge_attach(device_t, device_t, void *);
     84 int		rge_intr(void *);
     85 int		rge_encap(struct rge_softc *, struct mbuf *, int);
     86 int		rge_ioctl(struct ifnet *, u_long, void *);
     87 void		rge_start(struct ifnet *);
     88 void		rge_watchdog(struct ifnet *);
     89 int		rge_init(struct ifnet *);
     90 void		rge_stop(struct ifnet *);
     91 int		rge_ifmedia_upd(struct ifnet *);
     92 void		rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
     93 int		rge_allocmem(struct rge_softc *);
     94 int		rge_newbuf(struct rge_softc *, int);
     95 void		rge_discard_rxbuf(struct rge_softc *, int);
     96 int		rge_rx_list_init(struct rge_softc *);
     97 void		rge_tx_list_init(struct rge_softc *);
     98 int		rge_rxeof(struct rge_softc *);
     99 int		rge_txeof(struct rge_softc *);
    100 void		rge_reset(struct rge_softc *);
    101 void		rge_iff(struct rge_softc *);
    102 void		rge_set_phy_power(struct rge_softc *, int);
    103 void		rge_phy_config(struct rge_softc *);
    104 void		rge_set_macaddr(struct rge_softc *, const uint8_t *);
    105 void		rge_get_macaddr(struct rge_softc *, uint8_t *);
    106 void		rge_hw_init(struct rge_softc *);
    107 void		rge_disable_phy_ocp_pwrsave(struct rge_softc *);
    108 void		rge_patch_phy_mcu(struct rge_softc *, int);
    109 void		rge_add_media_types(struct rge_softc *);
    110 void		rge_config_imtype(struct rge_softc *, int);
    111 void		rge_disable_sim_im(struct rge_softc *);
    112 void		rge_setup_sim_im(struct rge_softc *);
    113 void		rge_setup_intr(struct rge_softc *, int);
    114 void		rge_exit_oob(struct rge_softc *);
    115 void		rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
    116 uint32_t	rge_read_csi(struct rge_softc *, uint32_t);
    117 void		rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
    118 uint16_t	rge_read_mac_ocp(struct rge_softc *, uint16_t);
    119 void		rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
    120 void		rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
    121 void		rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
    122 uint16_t	rge_read_phy_ocp(struct rge_softc *, uint16_t);
    123 int		rge_get_link_status(struct rge_softc *);
    124 void		rge_txstart(struct work *, void *);
    125 void		rge_tick(void *);
    126 void		rge_link_state(struct rge_softc *);
    127 
    128 static const struct {
    129 	uint16_t reg;
    130 	uint16_t val;
    131 }  rtl8125_def_bps[] = {
    132 	RTL8125_DEF_BPS
    133 }, rtl8125_mac_cfg2_ephy[] = {
    134 	RTL8125_MAC_CFG2_EPHY
    135 }, rtl8125_mac_cfg2_mcu[] = {
    136 	RTL8125_MAC_CFG2_MCU
    137 }, rtl8125_mac_cfg3_ephy[] = {
    138 	RTL8125_MAC_CFG3_EPHY
    139 }, rtl8125_mac_cfg3_mcu[] = {
    140 	RTL8125_MAC_CFG3_MCU
    141 };
    142 
    143 CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
    144 		NULL, NULL); /* Sevan - detach function? */
    145 
    146 extern struct cfdriver rge_cd;
    147 
    148 static const struct {
    149 	pci_vendor_id_t 	vendor;
    150 	pci_product_id_t 	product;
    151 }rge_devices[] = {
    152 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
    153 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125 },
    154 };
    155 
    156 static int
    157 rge_match(device_t parent, cfdata_t match, void *aux)
    158 {
    159 	struct pci_attach_args *pa =aux;
    160 	int n;
    161 
    162 	for (n =0; n < __arraycount(rge_devices); n++) {
    163 		if (PCI_VENDOR(pa->pa_id) == rge_devices[n].vendor &&
    164 		    PCI_PRODUCT(pa->pa_id) == rge_devices[n].product)
    165 			return 1;
    166 	}
    167 
    168 	return 0;
    169 }
    170 
    171 void
    172 rge_attach(device_t parent, device_t self, void *aux)
    173 {
    174 	struct rge_softc *sc = (struct rge_softc *)self;
    175 	struct pci_attach_args *pa = aux;
    176 	pci_chipset_tag_t pc = pa->pa_pc;
    177 	pci_intr_handle_t ih;
    178 	char intrbuf[PCI_INTRSTR_LEN];
    179 	const char *intrstr = NULL;
    180 	struct ifnet *ifp;
    181 	pcireg_t reg;
    182 	uint32_t hwrev;
    183 	uint8_t eaddr[ETHER_ADDR_LEN];
    184 	int offset;
    185 
    186 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
    187 
    188 	/*
    189 	 * Map control/status registers.
    190 	 */
    191 	if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
    192 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
    193 	    NULL, &sc->rge_bsize)) {
    194 		if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
    195 		    PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
    196 		    &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
    197 			if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
    198 			    0, &sc->rge_btag, &sc->rge_bhandle, NULL,
    199 			    &sc->rge_bsize)) {
    200 				printf(": can't map mem or i/o space\n");
    201 				return;
    202 			}
    203 		}
    204 	}
    205 
    206 	/*
    207 	 * Allocate interrupt.
    208 	 */
    209 	if (pci_intr_map(pa, &ih) == 0)
    210 		sc->rge_flags |= RGE_FLAG_MSI;
    211 	else if (pci_intr_map(pa, &ih) != 0) {
    212 		printf(": couldn't map interrupt\n");
    213 		return;
    214 	}
    215 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
    216 	sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, rge_intr,
    217 	    sc, sc->sc_dev.dv_xname);
    218 	if (sc->sc_ih == NULL) {
    219 		printf(": couldn't establish interrupt");
    220 		if (intrstr != NULL)
    221 			printf(" at %s", intrstr);
    222 		printf("\n");
    223 		return;
    224 	}
    225 	printf(": %s", intrstr);
    226 
    227 	sc->sc_dmat = pa->pa_dmat;
    228 	sc->sc_pc = pa->pa_pc;
    229 	sc->sc_tag = pa->pa_tag;
    230 
    231 	/* Determine hardware revision */
    232 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
    233 	switch (hwrev) {
    234 	case 0x60800000:
    235 		sc->rge_type = MAC_CFG2;
    236 		break;
    237 	case 0x60900000:
    238 		sc->rge_type = MAC_CFG3;
    239 		break;
    240 	default:
    241 		printf(": unknown version 0x%08x\n", hwrev);
    242 		return;
    243 	}
    244 
    245 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
    246 
    247 	/*
    248 	 * PCI Express check.
    249 	 */
    250 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
    251 	    &offset, NULL)) {
    252 		/* Disable PCIe ASPM. */
    253 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
    254 		    offset + PCIE_LCSR);
    255 		reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 );
    256 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
    257 		    reg);
    258 	}
    259 
    260 	rge_exit_oob(sc);
    261 	rge_hw_init(sc);
    262 
    263 	rge_get_macaddr(sc, eaddr);
    264 	printf(", address %s\n", ether_sprintf(eaddr));
    265 
    266 	memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
    267 
    268 	rge_set_phy_power(sc, 1);
    269 	rge_phy_config(sc);
    270 
    271 	if (rge_allocmem(sc))
    272 		return;
    273 
    274 	ifp = &sc->sc_ec.ec_if;
    275 	ifp->if_softc = sc;
    276 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
    277 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    278 #ifdef RGE_MPSAFE
    279 	ifp->if_xflags = IFEF_MPSAFE;
    280 #endif
    281 	ifp->if_ioctl = rge_ioctl;
    282 	ifp->if_start = rge_start;
    283 	ifp->if_watchdog = rge_watchdog;
    284 	IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT);
    285 	ifp->if_mtu = RGE_JUMBO_MTU;
    286 
    287 	ifp->if_capabilities = ETHERCAP_VLAN_MTU | IFCAP_CSUM_IPv4_Rx |
    288 	    IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
    289 	    IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
    290 
    291 #if NVLAN > 0
    292 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
    293 #endif
    294 
    295 	callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
    296 	callout_setfunc(&sc->sc_timeout, rge_tick, sc);
    297 	rge_txstart(&sc->sc_task, sc);
    298 
    299 	/* Initialize ifmedia structures. */
    300 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
    301 	    rge_ifmedia_sts);
    302 	rge_add_media_types(sc);
    303 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
    304 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
    305 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
    306 
    307 	if_attach(ifp);
    308 	ether_ifattach(ifp, eaddr);
    309 }
    310 
    311 int
    312 rge_intr(void *arg)
    313 {
    314 	struct rge_softc *sc = arg;
    315 	struct ifnet *ifp = &sc->sc_ec.ec_if;
    316 	uint32_t status;
    317 	int claimed = 0, rx, tx;
    318 
    319 	if (!(ifp->if_flags & IFF_RUNNING))
    320 		return (0);
    321 
    322 	/* Disable interrupts. */
    323 	RGE_WRITE_4(sc, RGE_IMR, 0);
    324 
    325 	status = RGE_READ_4(sc, RGE_ISR);
    326 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
    327 		if ((status & RGE_INTRS) == 0 || status == 0xffffffff)
    328 			return (0);
    329 	}
    330 	if (status)
    331 		RGE_WRITE_4(sc, RGE_ISR, status);
    332 
    333 	if (status & RGE_ISR_PCS_TIMEOUT)
    334 		claimed = 1;
    335 
    336 	rx = tx = 0;
    337 	if (status & RGE_INTRS) {
    338 		if (status &
    339 		    (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
    340 			rx |= rge_rxeof(sc);
    341 			claimed = 1;
    342 		}
    343 
    344 		if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
    345 			tx |= rge_txeof(sc);
    346 			claimed = 1;
    347 		}
    348 
    349 		if (status & RGE_ISR_SYSTEM_ERR) {
    350 			KERNEL_LOCK(1, NULL);
    351 			rge_init(ifp);
    352 			KERNEL_UNLOCK_ONE(NULL);
    353 			claimed = 1;
    354 		}
    355 	}
    356 
    357 	if (sc->rge_timerintr) {
    358 		if ((tx | rx) == 0) {
    359 			/*
    360 			 * Nothing needs to be processed, fallback
    361 			 * to use TX/RX interrupts.
    362 			 */
    363 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
    364 
    365 			/*
    366 			 * Recollect, mainly to avoid the possible
    367 			 * race introduced by changing interrupt
    368 			 * masks.
    369 			 */
    370 			rge_rxeof(sc);
    371 			rge_txeof(sc);
    372 		} else
    373 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
    374 	} else if (tx | rx) {
    375 		/*
    376 		 * Assume that using simulated interrupt moderation
    377 		 * (hardware timer based) could reduce the interrupt
    378 		 * rate.
    379 		 */
    380 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
    381 	}
    382 
    383 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
    384 
    385 	return (claimed);
    386 }
    387 
    388 int
    389 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
    390 {
    391 	struct rge_tx_desc *d = NULL;
    392 	struct rge_txq *txq;
    393 	bus_dmamap_t txmap;
    394 	uint32_t cmdsts, cflags = 0;
    395 	int cur, error, i, last, nsegs;
    396 
    397 	/*
    398 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
    399 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
    400 	 * take affect.
    401 	 */
    402 	if ((m->m_pkthdr.csum_flags &
    403 	    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
    404 		cflags |= RGE_TDEXTSTS_IPCSUM;
    405 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
    406 			cflags |= RGE_TDEXTSTS_TCPCSUM;
    407 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
    408 			cflags |= RGE_TDEXTSTS_UDPCSUM;
    409 	}
    410 
    411 	txq = &sc->rge_ldata.rge_txq[idx];
    412 	txmap = txq->txq_dmamap;
    413 
    414 	error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
    415 	switch (error) {
    416 	case 0:
    417 		break;
    418 	case EFBIG: /* mbuf chain is too fragmented */
    419 		if (m_defrag(m, M_DONTWAIT) == 0 &&
    420 		    bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
    421 		    BUS_DMA_NOWAIT) == 0)
    422 			break;
    423 
    424 		/* FALLTHROUGH */
    425 	default:
    426 		return (0);
    427 	}
    428 
    429 	bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
    430 	    BUS_DMASYNC_PREWRITE);
    431 
    432 	nsegs = txmap->dm_nsegs;
    433 
    434 	/* Set up hardware VLAN tagging. */
    435 #if NVLAN > 0
    436 	if (m->m_flags & M_VLANTAG)
    437 		cflags |= swap16(m->m_pkthdr.ether_vtag | RGE_TDEXTSTS_VTAG);
    438 #endif
    439 
    440 	cur = idx;
    441 	cmdsts = RGE_TDCMDSTS_SOF;
    442 
    443 	for (i = 0; i < txmap->dm_nsegs; i++) {
    444 		d = &sc->rge_ldata.rge_tx_list[cur];
    445 
    446 		d->rge_extsts = htole32(cflags);
    447 		d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
    448 		d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
    449 
    450 		cmdsts |= txmap->dm_segs[i].ds_len;
    451 
    452 		if (cur == RGE_TX_LIST_CNT - 1)
    453 			cmdsts |= RGE_TDCMDSTS_EOR;
    454 
    455 		d->rge_cmdsts = htole32(cmdsts);
    456 
    457 		last = cur;
    458 		cmdsts = RGE_TDCMDSTS_OWN;
    459 		cur = RGE_NEXT_TX_DESC(cur);
    460 	}
    461 
    462 	/* Set EOF on the last descriptor. */
    463 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
    464 
    465 	/* Transfer ownership of packet to the chip. */
    466 	d = &sc->rge_ldata.rge_tx_list[idx];
    467 
    468 	d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
    469 
    470 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
    471 	    cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
    472 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    473 
    474 	/* Update info of TX queue and descriptors. */
    475 	txq->txq_mbuf = m;
    476 	txq->txq_descidx = last;
    477 
    478 	return (nsegs);
    479 }
    480 
    481 int
    482 rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    483 {
    484 	struct rge_softc *sc = ifp->if_softc;
    485 	struct ifreq *ifr = (struct ifreq *)data;
    486 	int s, error = 0;
    487 
    488 	s = splnet();
    489 
    490 	switch (cmd) {
    491 	case SIOCSIFADDR:
    492 		ifp->if_flags |= IFF_UP;
    493 		if (!(ifp->if_flags & IFF_RUNNING))
    494 			rge_init(ifp);
    495 		break;
    496 	case SIOCSIFFLAGS:
    497 		if (ifp->if_flags & IFF_UP) {
    498 			if (ifp->if_flags & IFF_RUNNING)
    499 				error = ENETRESET;
    500 			else
    501 				rge_init(ifp);
    502 		} else {
    503 			if (ifp->if_flags & IFF_RUNNING)
    504 				rge_stop(ifp);
    505 		}
    506 		break;
    507 	case SIOCGIFMEDIA:
    508 	case SIOCSIFMEDIA:
    509 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
    510 		break;
    511 	case SIOCSIFMTU:
    512 		if (ifr->ifr_mtu > ifp->if_mtu) {
    513 			error = EINVAL;
    514 			break;
    515 		}
    516 		ifp->if_mtu = ifr->ifr_mtu;
    517 		break;
    518 	default:
    519 		error = ether_ioctl(ifp, cmd, data);
    520 	}
    521 
    522 	if (error == ENETRESET) {
    523 		if (ifp->if_flags & IFF_RUNNING)
    524 			rge_iff(sc);
    525 		error = 0;
    526 	}
    527 
    528 	splx(s);
    529 	return (error);
    530 }
    531 
    532 void
    533 rge_start(struct ifnet *ifp)
    534 {
    535 	struct rge_softc *sc = ifp->if_softc;
    536 	struct mbuf *m;
    537 	int free, idx, used;
    538 	int queued = 0;
    539 
    540 #define LINK_STATE_IS_UP(_s)    \
    541 	((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
    542 
    543 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
    544 		ifq_purge(ifq);
    545 		return;
    546 	}
    547 
    548 	/* Calculate free space. */
    549 	idx = sc->rge_ldata.rge_txq_prodidx;
    550 	free = sc->rge_ldata.rge_txq_considx;
    551 	if (free <= idx)
    552 		free += RGE_TX_LIST_CNT;
    553 	free -= idx;
    554 
    555 	for (;;) {
    556 		if (RGE_TX_NSEGS >= free + 2) {
    557 			SET(ifp->if_flags, IFF_OACTIVE);
    558 			break;
    559 		}
    560 
    561 		IFQ_DEQUEUE(&ifp->if_snd, m);
    562 		if (m == NULL)
    563 			break;
    564 
    565 		used = rge_encap(sc, m, idx);
    566 		if (used == 0) {
    567 			m_freem(m);
    568 			continue;
    569 		}
    570 
    571 		KASSERT(used <= free);
    572 		free -= used;
    573 
    574 #if NBPFILTER > 0
    575 		if (ifp->if_bpf)
    576 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
    577 #endif
    578 
    579 		idx += used;
    580 		if (idx >= RGE_TX_LIST_CNT)
    581 			idx -= RGE_TX_LIST_CNT;
    582 
    583 		queued++;
    584 	}
    585 
    586 	if (queued == 0)
    587 		return;
    588 
    589 	/* Set a timeout in case the chip goes out to lunch. */
    590 	ifp->if_timer = 5;
    591 
    592 	sc->rge_ldata.rge_txq_prodidx = idx;
    593 	ifq_serialize(ifq, &sc->sc_task);
    594 }
    595 
    596 void
    597 rge_watchdog(struct ifnet *ifp)
    598 {
    599 	struct rge_softc *sc = ifp->if_softc;
    600 
    601 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
    602 	if_statinc(ifp, if_oerrors);
    603 
    604 	rge_init(ifp);
    605 }
    606 
    607 int
    608 rge_init(struct ifnet *ifp)
    609 {
    610 	struct rge_softc *sc = ifp->if_softc;
    611 	uint32_t val;
    612 	uint16_t max_frame_size;
    613 	int i;
    614 
    615 	rge_stop(ifp);
    616 
    617 	/* Set MAC address. */
    618 	rge_set_macaddr(sc, sc->sc_enaddr);
    619 
    620 	/* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
    621 	if (ifp->if_mtu < ETHERMTU)
    622 		max_frame_size = ETHERMTU;
    623 	else
    624 		max_frame_size = ifp->if_mtu;
    625 
    626 	max_frame_size += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
    627 	    ETHER_CRC_LEN + 1;
    628 
    629 	if (max_frame_size > RGE_JUMBO_FRAMELEN)
    630 		max_frame_size -= 1;
    631 
    632 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, max_frame_size);
    633 
    634 	/* Initialize RX descriptors list. */
    635 	if (rge_rx_list_init(sc) == ENOBUFS) {
    636 		printf("%s: init failed: no memory for RX buffers\n",
    637 		    sc->sc_dev.dv_xname);
    638 		rge_stop(ifp);
    639 		return (ENOBUFS);
    640 	}
    641 
    642 	/* Initialize TX descriptors. */
    643 	rge_tx_list_init(sc);
    644 
    645 	/* Load the addresses of the RX and TX lists into the chip. */
    646 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
    647 	    RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    648 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
    649 	    RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
    650 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
    651 	    RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    652 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
    653 	    RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
    654 
    655 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    656 
    657 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
    658 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    659 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    660 	RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
    661 
    662 	/* Clear interrupt moderation timer. */
    663 	for (i = 0; i < 64; i++)
    664 		RGE_WRITE_4(sc, RGE_IM(i), 0);
    665 
    666 	/* Set the initial RX and TX configurations. */
    667 	RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
    668 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
    669 
    670 	val = rge_read_csi(sc, 0x70c) & ~0xff000000;
    671 	rge_write_csi(sc, 0x70c, val | 0x27000000);
    672 
    673 	/* Enable hardware optimization function. */
    674 	val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
    675 	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
    676 
    677 	RGE_WRITE_2(sc, 0x0382, 0x221b);
    678 	RGE_WRITE_1(sc, 0x4500, 0);
    679 	RGE_WRITE_2(sc, 0x4800, 0);
    680 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
    681 
    682 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
    683 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
    684 
    685 	val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
    686 	rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
    687 
    688 	RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
    689 	RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
    690 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
    691 
    692 	val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
    693 	rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
    694 
    695 	RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
    696 
    697 	val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
    698 	rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
    699 
    700 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
    701 
    702 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x007f;
    703 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
    704 
    705 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
    706 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
    707 
    708 	val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
    709 	rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
    710 
    711 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
    712 
    713 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
    714 
    715 	val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
    716 	rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
    717 
    718 	RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
    719 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
    720 
    721 	val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
    722 	rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
    723 
    724 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
    725 	rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
    726 
    727 	RGE_MAC_SETBIT(sc, 0xe84c, 0x00c0);
    728 
    729 	/* Disable EEE plus. */
    730 	RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
    731 
    732 	RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
    733 
    734 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
    735 	DELAY(1);
    736 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
    737 
    738 	RGE_CLRBIT_4(sc, 0x1880, 0x0030);
    739 
    740 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
    741 
    742 	if (ifp->if_capabilities & ETHERCAP_VLAN_HWTAGGING)
    743 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
    744 
    745 	RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
    746 
    747 	for (i = 0; i < 10; i++) {
    748 		if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
    749 			break;
    750 		DELAY(1000);
    751 	}
    752 
    753 	/* Disable RXDV gate. */
    754 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
    755 	DELAY(2000);
    756 
    757 	rge_ifmedia_upd(ifp);
    758 
    759 	/* Enable transmit and receive. */
    760 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
    761 
    762 	/* Program promiscuous mode and multicast filters. */
    763 	rge_iff(sc);
    764 
    765 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
    766 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
    767 
    768 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
    769 
    770 	/* Enable interrupts. */
    771 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
    772 
    773 	ifp->if_flags |= IFF_RUNNING;
    774 	CLR(ifp->if_flags, IFF_OACTIVE);
    775 
    776 	callout_schedule(&sc->sc_timeout, 1);
    777 
    778 	return (0);
    779 }
    780 
    781 /*
    782  * Stop the adapter and free any mbufs allocated to the RX and TX lists.
    783  */
    784 void
    785 rge_stop(struct ifnet *ifp)
    786 {
    787 	struct rge_softc *sc = ifp->if_softc;
    788 	int i;
    789 
    790 	timeout_del(&sc->sc_timeout);
    791 
    792 	ifp->if_timer = 0;
    793 	ifp->if_flags &= ~IFF_RUNNING;
    794 	sc->rge_timerintr = 0;
    795 
    796 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
    797 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
    798 	    RGE_RXCFG_ERRPKT);
    799 
    800 	RGE_WRITE_4(sc, RGE_IMR, 0);
    801 	RGE_WRITE_4(sc, RGE_ISR, 0xffffffff);
    802 
    803 	rge_reset(sc);
    804 
    805 	intr_barrier(sc->sc_ih);
    806 	ifq_barrier(&ifp->if_snd);
    807 /*	ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
    808 
    809 	if (sc->rge_head != NULL) {
    810 		m_freem(sc->rge_head);
    811 		sc->rge_head = sc->rge_tail = NULL;
    812 	}
    813 
    814 	/* Free the TX list buffers. */
    815 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
    816 		if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
    817 			bus_dmamap_unload(sc->sc_dmat,
    818 			    sc->rge_ldata.rge_txq[i].txq_dmamap);
    819 			m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
    820 			sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
    821 		}
    822 	}
    823 
    824 	/* Free the RX list buffers. */
    825 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
    826 		if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
    827 			bus_dmamap_unload(sc->sc_dmat,
    828 			    sc->rge_ldata.rge_rxq[i].rxq_dmamap);
    829 			m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
    830 			sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
    831 		}
    832 	}
    833 }
    834 
    835 /*
    836  * Set media options.
    837  */
    838 int
    839 rge_ifmedia_upd(struct ifnet *ifp)
    840 {
    841 	struct rge_softc *sc = ifp->if_softc;
    842 	struct ifmedia *ifm = &sc->sc_media;
    843 	int anar, gig, val;
    844 
    845 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    846 		return (EINVAL);
    847 
    848 	/* Disable Gigabit Lite. */
    849 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
    850 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
    851 
    852 	val = rge_read_phy_ocp(sc, 0xa5d4);
    853 	val &= ~RGE_ADV_2500TFDX;
    854 
    855 	anar = gig = 0;
    856 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
    857 	case IFM_AUTO:
    858 		anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    859 		gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    860 		val |= RGE_ADV_2500TFDX;
    861 		break;
    862 	case IFM_2500_T:
    863 		anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    864 		gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    865 		val |= RGE_ADV_2500TFDX;
    866 		ifp->if_baudrate = IF_Mbps(2500);
    867 		break;
    868 	case IFM_1000_T:
    869 		anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
    870 		gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
    871 		ifp->if_baudrate = IF_Gbps(1);
    872 		break;
    873 	case IFM_100_TX:
    874 		anar |= ANAR_TX | ANAR_TX_FD;
    875 		ifp->if_baudrate = IF_Mbps(100);
    876 		break;
    877 	case IFM_10_T:
    878 		anar |= ANAR_10 | ANAR_10_FD;
    879 		ifp->if_baudrate = IF_Mbps(10);
    880 		break;
    881 	default:
    882 		printf("%s: unsupported media type\n", sc->sc_dev.dv_xname);
    883 		return (EINVAL);
    884 	}
    885 
    886 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
    887 	rge_write_phy(sc, 0, MII_100T2CR, gig);
    888 	rge_write_phy_ocp(sc, 0xa5d4, val);
    889 	rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG);
    890 
    891 	return (0);
    892 }
    893 
    894 /*
    895  * Report current media status.
    896  */
    897 void
    898 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
    899 {
    900 	struct rge_softc *sc = ifp->if_softc;
    901 	uint16_t status = 0;
    902 
    903 	ifmr->ifm_status = IFM_AVALID;
    904 	ifmr->ifm_active = IFM_ETHER;
    905 
    906 	if (rge_get_link_status(sc)) {
    907 		ifmr->ifm_status |= IFM_ACTIVE;
    908 
    909 		status = RGE_READ_2(sc, RGE_PHYSTAT);
    910 		if ((status & RGE_PHYSTAT_FDX) ||
    911 		    (status & RGE_PHYSTAT_2500MBPS))
    912 			ifmr->ifm_active |= IFM_FDX;
    913 		else
    914 			ifmr->ifm_active |= IFM_HDX;
    915 
    916 		if (status & RGE_PHYSTAT_10MBPS)
    917 			ifmr->ifm_active |= IFM_10_T;
    918 		else if (status & RGE_PHYSTAT_100MBPS)
    919 			ifmr->ifm_active |= IFM_100_TX;
    920 		else if (status & RGE_PHYSTAT_1000MBPS)
    921 			ifmr->ifm_active |= IFM_1000_T;
    922 		else if (status & RGE_PHYSTAT_2500MBPS)
    923 			ifmr->ifm_active |= IFM_2500_T;
    924 	}
    925 }
    926 
    927 /*
    928  * Allocate memory for RX/TX rings.
    929  */
    930 int
    931 rge_allocmem(struct rge_softc *sc)
    932 {
    933 	int error, i;
    934 
    935 	/* Allocate DMA'able memory for the TX ring. */
    936 	error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
    937 	    RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
    938 	if (error) {
    939 		printf("%s: can't create TX list map\n", sc->sc_dev.dv_xname);
    940 		return (error);
    941 	}
    942 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
    943 	    &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
    944 	    BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_ZERO */
    945 	if (error) {
    946 		printf("%s: can't alloc TX list\n", sc->sc_dev.dv_xname);
    947 		return (error);
    948 	}
    949 
    950 	/* Load the map for the TX ring. */
    951 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
    952 	    sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
    953 	    &sc->rge_ldata.rge_tx_list,
    954 	    BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_COHERENT */
    955 	if (error) {
    956 		printf("%s: can't map TX dma buffers\n", sc->sc_dev.dv_xname);
    957 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
    958 		    sc->rge_ldata.rge_tx_listnseg);
    959 		return (error);
    960 	}
    961 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
    962 	    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
    963 	if (error) {
    964 		printf("%s: can't load TX dma map\n", sc->sc_dev.dv_xname);
    965 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
    966 		bus_dmamem_unmap(sc->sc_dmat,
    967 		    sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
    968 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
    969 		    sc->rge_ldata.rge_tx_listnseg);
    970 		return (error);
    971 	}
    972 
    973 	/* Create DMA maps for TX buffers. */
    974 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
    975 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
    976 		    RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
    977 		    &sc->rge_ldata.rge_txq[i].txq_dmamap);
    978 		if (error) {
    979 			printf("%s: can't create DMA map for TX\n",
    980 			    sc->sc_dev.dv_xname);
    981 			return (error);
    982 		}
    983 	}
    984 
    985 	/* Allocate DMA'able memory for the RX ring. */
    986 	error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
    987 	    RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
    988 	if (error) {
    989 		printf("%s: can't create RX list map\n", sc->sc_dev.dv_xname);
    990 		return (error);
    991 	}
    992 	error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
    993 	    &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
    994 	    BUS_DMA_NOWAIT);  /* XXX OpenBSD adds BUS_DMA_ZERO */
    995 	if (error) {
    996 		printf("%s: can't alloc RX list\n", sc->sc_dev.dv_xname);
    997 		return (error);
    998 	}
    999 
   1000 	/* Load the map for the RX ring. */
   1001 	error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1002 	    sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
   1003 	    &sc->rge_ldata.rge_rx_list,
   1004 	    BUS_DMA_NOWAIT);  /* XXX OpenBSD adds BUS_DMA_COHERENT */
   1005 	if (error) {
   1006 		printf("%s: can't map RX dma buffers\n", sc->sc_dev.dv_xname);
   1007 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1008 		    sc->rge_ldata.rge_rx_listnseg);
   1009 		return (error);
   1010 	}
   1011 	error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1012 	    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
   1013 	if (error) {
   1014 		printf("%s: can't load RX dma map\n", sc->sc_dev.dv_xname);
   1015 		bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
   1016 		bus_dmamem_unmap(sc->sc_dmat,
   1017 		    sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
   1018 		bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
   1019 		    sc->rge_ldata.rge_rx_listnseg);
   1020 		return (error);
   1021 	}
   1022 
   1023 	/* Create DMA maps for RX buffers. */
   1024 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1025 		error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
   1026 		    RGE_JUMBO_FRAMELEN, 0, 0,
   1027 		    &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
   1028 		if (error) {
   1029 			printf("%s: can't create DMA map for RX\n",
   1030 			    sc->sc_dev.dv_xname);
   1031 			return (error);
   1032 		}
   1033 	}
   1034 
   1035 	return (error);
   1036 }
   1037 
   1038 /*
   1039  * Initialize the RX descriptor and attach an mbuf cluster.
   1040  */
   1041 int
   1042 rge_newbuf(struct rge_softc *sc, int idx)
   1043 {
   1044 	struct mbuf *m;
   1045 	struct rge_rx_desc *r;
   1046 	struct rge_rxq *rxq;
   1047 	bus_dmamap_t rxmap;
   1048 
   1049 	m = MCLGETI(NULL, M_DONTWAIT, NULL, RGE_JUMBO_FRAMELEN);
   1050 	if (m == NULL)
   1051 		return (ENOBUFS);
   1052 
   1053 	m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
   1054 
   1055 	rxq = &sc->rge_ldata.rge_rxq[idx];
   1056 	rxmap = rxq->rxq_dmamap;
   1057 
   1058 	if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT))
   1059 		goto out;
   1060 
   1061 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
   1062 	    BUS_DMASYNC_PREREAD);
   1063 
   1064 	/* Map the segments into RX descriptors. */
   1065 	r = &sc->rge_ldata.rge_rx_list[idx];
   1066 
   1067 	if (RGE_OWN(r)) {
   1068 		printf("%s: tried to map busy RX descriptor\n",
   1069 		    sc->sc_dev.dv_xname);
   1070 		goto out;
   1071 	}
   1072 
   1073 	rxq->rxq_mbuf = m;
   1074 
   1075 	r->rge_extsts = 0;
   1076 	r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
   1077 	r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr));
   1078 
   1079 	r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
   1080 	if (idx == RGE_RX_LIST_CNT - 1)
   1081 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
   1082 
   1083 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
   1084 
   1085 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1086 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1087 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1088 
   1089 	return (0);
   1090 out:
   1091 	if (m != NULL)
   1092 		m_freem(m);
   1093 	return (ENOMEM);
   1094 }
   1095 
   1096 void
   1097 rge_discard_rxbuf(struct rge_softc *sc, int idx)
   1098 {
   1099 	struct rge_rx_desc *r;
   1100 
   1101 	r = &sc->rge_ldata.rge_rx_list[idx];
   1102 
   1103 	r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
   1104 	r->rge_extsts = 0;
   1105 	if (idx == RGE_RX_LIST_CNT - 1)
   1106 		r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
   1107 	r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
   1108 
   1109 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1110 	    idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1111 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1112 }
   1113 
   1114 int
   1115 rge_rx_list_init(struct rge_softc *sc)
   1116 {
   1117 	int i;
   1118 
   1119 	memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
   1120 
   1121 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
   1122 		sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
   1123 		if (rge_newbuf(sc, i) == ENOBUFS)
   1124 			return (ENOBUFS);
   1125 	}
   1126 
   1127 	sc->rge_ldata.rge_rxq_prodidx = 0;
   1128 	sc->rge_head = sc->rge_tail = NULL;
   1129 
   1130 	return (0);
   1131 }
   1132 
   1133 void
   1134 rge_tx_list_init(struct rge_softc *sc)
   1135 {
   1136 	int i;
   1137 
   1138 	memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
   1139 
   1140 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
   1141 		sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
   1142 
   1143 	bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
   1144 	    sc->rge_ldata.rge_tx_list_map->dm_mapsize,
   1145 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1146 
   1147 	sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
   1148 }
   1149 
   1150 int
   1151 rge_rxeof(struct rge_softc *sc)
   1152 {
   1153 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
   1154 	struct mbuf *m;
   1155 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1156 	struct rge_rx_desc *cur_rx;
   1157 	struct rge_rxq *rxq;
   1158 	uint32_t rxstat, extsts;
   1159 	int i, total_len, rx = 0;
   1160 
   1161 	for (i = sc->rge_ldata.rge_rxq_prodidx; ; i = RGE_NEXT_RX_DESC(i)) {
   1162 		/* Invalidate the descriptor memory. */
   1163 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
   1164 		    i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
   1165 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1166 
   1167 		cur_rx = &sc->rge_ldata.rge_rx_list[i];
   1168 
   1169 		if (RGE_OWN(cur_rx))
   1170 			break;
   1171 
   1172 		rxstat = letoh32(cur_rx->rge_cmdsts);
   1173 		extsts = letoh32(cur_rx->rge_extsts);
   1174 
   1175 		total_len = RGE_RXBYTES(cur_rx);
   1176 		rxq = &sc->rge_ldata.rge_rxq[i];
   1177 		m = rxq->rxq_mbuf;
   1178 		rx = 1;
   1179 
   1180 		/* Invalidate the RX mbuf and unload its map. */
   1181 		bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
   1182 		    rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1183 		bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
   1184 
   1185 		if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
   1186 		    (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
   1187 			rge_discard_rxbuf(sc, i);
   1188 			continue;
   1189 		}
   1190 
   1191 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
   1192 			if_statinc(ifp, if_ierrors);
   1193 			/*
   1194 			 * If this is part of a multi-fragment packet,
   1195 			 * discard all the pieces.
   1196 			 */
   1197 			 if (sc->rge_head != NULL) {
   1198 				m_freem(sc->rge_head);
   1199 				sc->rge_head = sc->rge_tail = NULL;
   1200 			}
   1201 			rge_discard_rxbuf(sc, i);
   1202 			continue;
   1203 		}
   1204 
   1205 		/*
   1206 		 * If allocating a replacement mbuf fails,
   1207 		 * reload the current one.
   1208 		 */
   1209 
   1210 		if (rge_newbuf(sc, i) == ENOBUFS) {
   1211 			if (sc->rge_head != NULL) {
   1212 				m_freem(sc->rge_head);
   1213 				sc->rge_head = sc->rge_tail = NULL;
   1214 			}
   1215 			rge_discard_rxbuf(sc, i);
   1216 			continue;
   1217 		}
   1218 
   1219 		if (sc->rge_head != NULL) {
   1220 			m->m_len = total_len;
   1221 			/*
   1222 			 * Special case: if there's 4 bytes or less
   1223 			 * in this buffer, the mbuf can be discarded:
   1224 			 * the last 4 bytes is the CRC, which we don't
   1225 			 * care about anyway.
   1226 			 */
   1227 			if (m->m_len <= ETHER_CRC_LEN) {
   1228 				sc->rge_tail->m_len -=
   1229 				    (ETHER_CRC_LEN - m->m_len);
   1230 				m_freem(m);
   1231 			} else {
   1232 				m->m_len -= ETHER_CRC_LEN;
   1233 				m->m_flags &= ~M_PKTHDR;
   1234 				sc->rge_tail->m_next = m;
   1235 			}
   1236 			m = sc->rge_head;
   1237 			sc->rge_head = sc->rge_tail = NULL;
   1238 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
   1239 		} else
   1240 			m->m_pkthdr.len = m->m_len =
   1241 			    (total_len - ETHER_CRC_LEN);
   1242 
   1243 		/* Check IP header checksum. */
   1244 		if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) &&
   1245 		    (extsts & RGE_RDEXTSTS_IPV4))
   1246 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
   1247 
   1248 		/* Check TCP/UDP checksum. */
   1249 		if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
   1250 		    (((rxstat & RGE_RDCMDSTS_TCPPKT) &&
   1251 		    !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) ||
   1252 		    ((rxstat & RGE_RDCMDSTS_UDPPKT) &&
   1253 		    !(rxstat & RGE_RDCMDSTS_UDPCSUMERR))))
   1254 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
   1255 			    M_UDP_CSUM_IN_OK;
   1256 
   1257 #if NVLAN > 0
   1258 		if (extsts & RGE_RDEXTSTS_VTAG) {
   1259 			m->m_pkthdr.ether_vtag =
   1260 			    ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
   1261 			m->m_flags |= M_VLANTAG;
   1262 		}
   1263 #endif
   1264 
   1265 		ml_enqueue(&ml, m);
   1266 	}
   1267 
   1268 	sc->rge_ldata.rge_rxq_prodidx = i;
   1269 
   1270 	if_input(ifp, &ml);
   1271 
   1272 	return (rx);
   1273 }
   1274 
   1275 int
   1276 rge_txeof(struct rge_softc *sc)
   1277 {
   1278 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1279 	struct rge_txq *txq;
   1280 	uint32_t txstat;
   1281 	int cons, idx, prod;
   1282 	int free = 0;
   1283 
   1284 	prod = sc->rge_ldata.rge_txq_prodidx;
   1285 	cons = sc->rge_ldata.rge_txq_considx;
   1286 
   1287 	while (prod != cons) {
   1288 		txq = &sc->rge_ldata.rge_txq[cons];
   1289 		idx = txq->txq_descidx;
   1290 
   1291 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1292 		    idx * sizeof(struct rge_tx_desc),
   1293 		    sizeof(struct rge_tx_desc),
   1294 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1295 
   1296 		txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
   1297 
   1298 		if (txstat & RGE_TDCMDSTS_OWN) {
   1299 			free = 2;
   1300 			break;
   1301 		}
   1302 
   1303 		bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
   1304 		    txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1305 		bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
   1306 		m_freem(txq->txq_mbuf);
   1307 		txq->txq_mbuf = NULL;
   1308 
   1309 		if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
   1310 			if_statinc(ifp, if_collisions);
   1311 		if (txstat & RGE_TDCMDSTS_TXERR)
   1312 			if_statinc(ifp, if_oerrors);
   1313 
   1314 		bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
   1315 		    idx * sizeof(struct rge_tx_desc),
   1316 		    sizeof(struct rge_tx_desc),
   1317 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1318 
   1319 		cons = RGE_NEXT_TX_DESC(idx);
   1320 		free = 1;
   1321 	}
   1322 
   1323 	if (free == 0)
   1324 		return (0);
   1325 
   1326 	sc->rge_ldata.rge_txq_considx = cons;
   1327 
   1328 	if (ifq_is_oactive(&ifp->if_snd))
   1329 		ifq_restart(&ifp->if_snd);
   1330 	else if (free == 2)
   1331 		ifq_serialize(&ifp->if_snd, &sc->sc_task);
   1332 	else
   1333 		ifp->if_timer = 0;
   1334 
   1335 	return (1);
   1336 }
   1337 
   1338 void
   1339 rge_reset(struct rge_softc *sc)
   1340 {
   1341 	int i;
   1342 
   1343 	/* Enable RXDV gate. */
   1344 	RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
   1345 	DELAY(2000);
   1346 
   1347 	for (i = 0; i < 10; i++) {
   1348 		DELAY(100);
   1349 		if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
   1350 		    RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
   1351 		    RGE_MCUCMD_TXFIFO_EMPTY))
   1352 			break;
   1353 	}
   1354 
   1355 	/* Soft reset. */
   1356 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
   1357 
   1358 	for (i = 0; i < RGE_TIMEOUT; i++) {
   1359 		DELAY(100);
   1360 		if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
   1361 			break;
   1362 	}
   1363 	if (i == RGE_TIMEOUT)
   1364 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
   1365 }
   1366 
   1367 void
   1368 rge_iff(struct rge_softc *sc)
   1369 {
   1370 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   1371 	struct ethercom *ac = &sc->sc_ec;
   1372 	struct ether_multi *enm;
   1373 	struct ether_multistep step;
   1374 	uint32_t hashes[2];
   1375 	uint32_t rxfilt;
   1376 	int h = 0;
   1377 
   1378 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
   1379 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
   1380 	ifp->if_flags &= ~IFF_ALLMULTI;
   1381 
   1382 	/*
   1383 	 * Always accept frames destined to our station address.
   1384 	 * Always accept broadcast frames.
   1385 	 */
   1386 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
   1387 
   1388 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
   1389 		ifp->if_flags |= IFF_ALLMULTI;
   1390 		rxfilt |= RGE_RXCFG_MULTI;
   1391 		if (ifp->if_flags & IFF_PROMISC)
   1392 			rxfilt |= RGE_RXCFG_ALLPHYS;
   1393 		hashes[0] = hashes[1] = 0xffffffff;
   1394 	} else {
   1395 		rxfilt |= RGE_RXCFG_MULTI;
   1396 		/* Program new filter. */
   1397 		memset(hashes, 0, sizeof(hashes));
   1398 
   1399 		ETHER_FIRST_MULTI(step, ac, enm);
   1400 		while (enm != NULL) {
   1401 			h = ether_crc32_be(enm->enm_addrlo,
   1402 			    ETHER_ADDR_LEN) >> 26;
   1403 
   1404 			if (h < 32)
   1405 				hashes[0] |= (1 << h);
   1406 			else
   1407 				hashes[1] |= (1 << (h - 32));
   1408 
   1409 			ETHER_NEXT_MULTI(step, enm);
   1410 		}
   1411 	}
   1412 
   1413 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
   1414 	RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
   1415 	RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
   1416 }
   1417 
   1418 void
   1419 rge_set_phy_power(struct rge_softc *sc, int on)
   1420 {
   1421 	int i;
   1422 
   1423 	if (on) {
   1424 		RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
   1425 
   1426 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
   1427 
   1428 		for (i = 0; i < RGE_TIMEOUT; i++) {
   1429 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0080) == 3)
   1430 				break;
   1431 			DELAY(1000);
   1432 		}
   1433 	} else
   1434 		rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
   1435 }
   1436 
   1437 void
   1438 rge_phy_config(struct rge_softc *sc)
   1439 {
   1440 	uint16_t mcode_ver, val;
   1441 	int i;
   1442 	static const uint16_t mac_cfg3_a438_value[] =
   1443 	    { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
   1444 	      0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
   1445 
   1446 	static const uint16_t mac_cfg3_b88e_value[] =
   1447 	    { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
   1448 	      0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
   1449 	      0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
   1450 	      0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
   1451 	      0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
   1452 	      0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
   1453 
   1454 	/* Read microcode version. */
   1455 	rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1456 	mcode_ver = rge_read_phy_ocp(sc, 0xa438);
   1457 
   1458 	if (sc->rge_type == MAC_CFG2) {
   1459 		for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++) {
   1460 			rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
   1461 			    rtl8125_mac_cfg2_ephy[i].val);
   1462 		}
   1463 
   1464 		if (mcode_ver != RGE_MAC_CFG2_MCODE_VER) {
   1465 			/* Disable PHY config. */
   1466 			RGE_CLRBIT_1(sc, 0xf2, 0x20);
   1467 			DELAY(1000);
   1468 
   1469 			rge_patch_phy_mcu(sc, 1);
   1470 
   1471 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1472 			rge_write_phy_ocp(sc, 0xa438, 0x8600);
   1473 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
   1474 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
   1475 
   1476 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
   1477 			for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
   1478 				rge_write_phy_ocp(sc,
   1479 				    rtl8125_mac_cfg2_mcu[i].reg,
   1480 				    rtl8125_mac_cfg2_mcu[i].val);
   1481 			}
   1482 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
   1483 
   1484 			rge_write_phy_ocp(sc, 0xa436, 0);
   1485 			rge_write_phy_ocp(sc, 0xa438, 0);
   1486 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
   1487 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1488 			rge_write_phy_ocp(sc, 0xa438, 0);
   1489 
   1490 			rge_patch_phy_mcu(sc, 0);
   1491 
   1492 			/* Enable PHY config. */
   1493 			RGE_SETBIT_1(sc, 0xf2, 0x20);
   1494 
   1495 			/* Write microcode version. */
   1496 			rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1497 			rge_write_phy_ocp(sc, 0xa438, RGE_MAC_CFG2_MCODE_VER);
   1498 		}
   1499 
   1500 		val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
   1501 		rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
   1502 		RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   1503 		val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   1504 		rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
   1505 		val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
   1506 		rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   1507 		RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
   1508 		val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
   1509 		rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
   1510 		RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
   1511 		RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
   1512 		RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
   1513 
   1514 		rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   1515 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1516 		rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
   1517 		rge_write_phy_ocp(sc, 0xa436, 0x80eb);
   1518 		val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
   1519 		rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
   1520 		rge_write_phy_ocp(sc, 0xa436, 0x80f8);
   1521 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1522 		rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
   1523 		rge_write_phy_ocp(sc, 0xa436, 0x80f1);
   1524 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1525 		rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
   1526 		rge_write_phy_ocp(sc, 0xa436, 0x80fe);
   1527 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1528 		rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
   1529 		rge_write_phy_ocp(sc, 0xa436, 0x8102);
   1530 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1531 		rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
   1532 		rge_write_phy_ocp(sc, 0xa436, 0x8105);
   1533 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1534 		rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
   1535 		rge_write_phy_ocp(sc, 0xa436, 0x8100);
   1536 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1537 		rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
   1538 		rge_write_phy_ocp(sc, 0xa436, 0x8104);
   1539 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1540 		rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
   1541 		rge_write_phy_ocp(sc, 0xa436, 0x8106);
   1542 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1543 		rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
   1544 		rge_write_phy_ocp(sc, 0xa436, 0x80dc);
   1545 		val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
   1546 		rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
   1547 		rge_write_phy_ocp(sc, 0xa436, 0x80df);
   1548 		RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   1549 		rge_write_phy_ocp(sc, 0xa436, 0x80e1);
   1550 		RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
   1551 		val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
   1552 		rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
   1553 		rge_write_phy_ocp(sc, 0xa436, 0x819f);
   1554 		rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
   1555 		rge_write_phy_ocp(sc, 0xbc34, 0x5555);
   1556 		val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
   1557 		rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
   1558 		RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
   1559 		RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1560 	} else {
   1561 		for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
   1562 			rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
   1563 			    rtl8125_mac_cfg3_ephy[i].val);
   1564 
   1565 		if (mcode_ver != RGE_MAC_CFG3_MCODE_VER) {
   1566 			/* Disable PHY config. */
   1567 			RGE_CLRBIT_1(sc, 0xf2, 0x20);
   1568 			DELAY(1000);
   1569 
   1570 			rge_patch_phy_mcu(sc, 1);
   1571 
   1572 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1573 			rge_write_phy_ocp(sc, 0xa438, 0x8601);
   1574 			rge_write_phy_ocp(sc, 0xa436, 0xb82e);
   1575 			rge_write_phy_ocp(sc, 0xa438, 0x0001);
   1576 
   1577 			RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
   1578 			for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
   1579 				rge_write_phy_ocp(sc,
   1580 				    rtl8125_mac_cfg3_mcu[i].reg,
   1581 				    rtl8125_mac_cfg3_mcu[i].val);
   1582 			}
   1583 			RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
   1584 
   1585 			rge_write_phy_ocp(sc, 0xa436, 0);
   1586 			rge_write_phy_ocp(sc, 0xa438, 0);
   1587 			RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
   1588 			rge_write_phy_ocp(sc, 0xa436, 0x8024);
   1589 			rge_write_phy_ocp(sc, 0xa438, 0);
   1590 
   1591 			rge_patch_phy_mcu(sc, 0);
   1592 
   1593 			/* Enable PHY config. */
   1594 			RGE_SETBIT_1(sc, 0xf2, 0x20);
   1595 
   1596 			/* Write microcode version. */
   1597 			rge_write_phy_ocp(sc, 0xa436, 0x801e);
   1598 			rge_write_phy_ocp(sc, 0xa438, RGE_MAC_CFG3_MCODE_VER);
   1599 		}
   1600 
   1601 		RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
   1602 		val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
   1603 		rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
   1604 		val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
   1605 		rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
   1606 		RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
   1607 		RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
   1608 		val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
   1609 		rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
   1610 		val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
   1611 		rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
   1612 		val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
   1613 		rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
   1614 		RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
   1615 		RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
   1616 		val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
   1617 		rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
   1618 		rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
   1619 		rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
   1620 		val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
   1621 		rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
   1622 		rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
   1623 		rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   1624 		rge_write_phy_ocp(sc, 0xb87c, 0x809c);
   1625 		rge_write_phy_ocp(sc, 0xb87e, 0x0153);
   1626 
   1627 		rge_write_phy_ocp(sc, 0xa436, 0x81b3);
   1628 		for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
   1629 			rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
   1630 		for (i = 0; i < 26; i++)
   1631 			rge_write_phy_ocp(sc, 0xa438, 0);
   1632 		rge_write_phy_ocp(sc, 0xa436, 0x8257);
   1633 		rge_write_phy_ocp(sc, 0xa438, 0x020f);
   1634 		rge_write_phy_ocp(sc, 0xa436, 0x80ea);
   1635 		rge_write_phy_ocp(sc, 0xa438, 0x7843);
   1636 
   1637 		rge_patch_phy_mcu(sc, 1);
   1638 		RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
   1639 		RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
   1640 		for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
   1641 			rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
   1642 			rge_write_phy_ocp(sc, 0xb890,
   1643 			    mac_cfg3_b88e_value[i + 1]);
   1644 		}
   1645 		RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
   1646 		rge_patch_phy_mcu(sc, 0);
   1647 
   1648 		RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
   1649 		rge_write_phy_ocp(sc, 0xa436, 0x81a2);
   1650 		RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
   1651 		val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
   1652 		rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
   1653 		RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
   1654 		RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
   1655 		RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
   1656 		RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
   1657 		RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
   1658 	}
   1659 
   1660 	/* Disable EEE. */
   1661 	RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
   1662 	RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
   1663 	RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
   1664 	RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
   1665 	RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
   1666 	RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
   1667 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
   1668 	RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
   1669 
   1670 	rge_patch_phy_mcu(sc, 1);
   1671 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
   1672 	RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
   1673 	RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
   1674 	rge_patch_phy_mcu(sc, 0);
   1675 }
   1676 
   1677 void
   1678 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
   1679 {
   1680 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1681 	RGE_WRITE_4(sc, RGE_MAC0,
   1682 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
   1683 	RGE_WRITE_4(sc, RGE_MAC4,
   1684 	    addr[5] <<  8 | addr[4]);
   1685 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1686 }
   1687 
   1688 void
   1689 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
   1690 {
   1691 	*(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
   1692 	*(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
   1693 }
   1694 
   1695 void
   1696 rge_hw_init(struct rge_softc *sc)
   1697 {
   1698 	int i;
   1699 
   1700 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1701 	RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
   1702 	RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
   1703 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
   1704 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
   1705 
   1706 	/* Disable UPS. */
   1707 	RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
   1708 
   1709 	/* Configure MAC MCU. */
   1710 	rge_write_mac_ocp(sc, 0xfc38, 0);
   1711 
   1712 	for (i = 0xfc28; i < 0xfc38; i += 2)
   1713 		rge_write_mac_ocp(sc, i, 0);
   1714 
   1715 	DELAY(3000);
   1716 	rge_write_mac_ocp(sc, 0xfc26, 0);
   1717 
   1718 	if (sc->rge_type == MAC_CFG3) {
   1719 		for (i = 0; i < nitems(rtl8125_def_bps); i++)
   1720 			rge_write_mac_ocp(sc, rtl8125_def_bps[i].reg,
   1721 			    rtl8125_def_bps[i].val);
   1722 	}
   1723 
   1724 	/* Disable PHY power saving. */
   1725 	rge_disable_phy_ocp_pwrsave(sc);
   1726 
   1727 	/* Set PCIe uncorrectable error status. */
   1728 	rge_write_csi(sc, 0x108,
   1729 	    rge_read_csi(sc, 0x108) | 0x00100000);
   1730 }
   1731 
   1732 void
   1733 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
   1734 {
   1735 	if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
   1736 		rge_patch_phy_mcu(sc, 1);
   1737 		rge_write_phy_ocp(sc, 0xc416, 0);
   1738 		rge_write_phy_ocp(sc, 0xc416, 0x0500);
   1739 		rge_patch_phy_mcu(sc, 0);
   1740 	}
   1741 }
   1742 
   1743 void
   1744 rge_patch_phy_mcu(struct rge_softc *sc, int set)
   1745 {
   1746 	uint16_t val;
   1747 	int i;
   1748 
   1749 	if (set)
   1750 		RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
   1751 	else
   1752 		RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
   1753 
   1754 	for (i = 0; i < 1000; i++) {
   1755 		val = rge_read_phy_ocp(sc, 0xb800) & 0x0040;
   1756 		DELAY(100);
   1757 		if (val == 0x0040)
   1758 			break;
   1759 	}
   1760 	if (i == 1000)
   1761 		printf("%s: timeout waiting to patch phy mcu\n",
   1762 		    sc->sc_dev.dv_xname);
   1763 }
   1764 
   1765 void
   1766 rge_add_media_types(struct rge_softc *sc)
   1767 {
   1768 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
   1769 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
   1770 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
   1771 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
   1772 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
   1773 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
   1774 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
   1775 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
   1776 }
   1777 
   1778 void
   1779 rge_config_imtype(struct rge_softc *sc, int imtype)
   1780 {
   1781 	switch (imtype) {
   1782 	case RGE_IMTYPE_NONE:
   1783 		sc->rge_intrs = RGE_INTRS;
   1784 		sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
   1785 		    RGE_ISR_RX_FIFO_OFLOW;
   1786 		sc->rge_tx_ack = RGE_ISR_TX_OK;
   1787 		break;
   1788 	case RGE_IMTYPE_SIM:
   1789 		sc->rge_intrs = RGE_INTRS_TIMER;
   1790 		sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
   1791 		sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
   1792 		break;
   1793 	default:
   1794 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
   1795 	}
   1796 }
   1797 
   1798 void
   1799 rge_disable_sim_im(struct rge_softc *sc)
   1800 {
   1801 	RGE_WRITE_4(sc, RGE_TIMERINT, 0);
   1802 	sc->rge_timerintr = 0;
   1803 }
   1804 
   1805 void
   1806 rge_setup_sim_im(struct rge_softc *sc)
   1807 {
   1808 	RGE_WRITE_4(sc, RGE_TIMERINT, 0x2600);
   1809 	RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
   1810 	sc->rge_timerintr = 1;
   1811 }
   1812 
   1813 void
   1814 rge_setup_intr(struct rge_softc *sc, int imtype)
   1815 {
   1816 	rge_config_imtype(sc, imtype);
   1817 
   1818 	/* Enable interrupts. */
   1819 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
   1820 
   1821 	switch (imtype) {
   1822 	case RGE_IMTYPE_NONE:
   1823 		rge_disable_sim_im(sc);
   1824 		break;
   1825 	case RGE_IMTYPE_SIM:
   1826 		rge_setup_sim_im(sc);
   1827 		break;
   1828 	default:
   1829 		panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
   1830 	}
   1831 }
   1832 
   1833 void
   1834 rge_exit_oob(struct rge_softc *sc)
   1835 {
   1836 	int i;
   1837 
   1838 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
   1839 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
   1840 	    RGE_RXCFG_ERRPKT);
   1841 
   1842 	/* Disable RealWoW. */
   1843 	rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
   1844 
   1845 	rge_reset(sc);
   1846 
   1847 	/* Disable OOB. */
   1848 	RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
   1849 
   1850 	RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
   1851 
   1852 	for (i = 0; i < 10; i++) {
   1853 		DELAY(100);
   1854 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   1855 			break;
   1856 	}
   1857 
   1858 	rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
   1859 	rge_write_mac_ocp(sc, 0xc0a6, 0x0150);
   1860 	rge_write_mac_ocp(sc, 0xc01e, 0x5555);
   1861 
   1862 	for (i = 0; i < 10; i++) {
   1863 		DELAY(100);
   1864 		if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
   1865 			break;
   1866 	}
   1867 
   1868 	if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
   1869 		for (i = 0; i < RGE_TIMEOUT; i++) {
   1870 			if ((rge_read_phy_ocp(sc, 0xa420) & 0x0080) == 2)
   1871 				break;
   1872 			DELAY(1000);
   1873 		}
   1874 		RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
   1875 		RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
   1876 	}
   1877 }
   1878 
   1879 void
   1880 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
   1881 {
   1882 	int i;
   1883 
   1884 	RGE_WRITE_4(sc, RGE_CSIDR, val);
   1885 	RGE_WRITE_4(sc, RGE_CSIAR, (1 << 16) | (reg & RGE_CSIAR_ADDR_MASK) |
   1886 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
   1887 
   1888 	for (i = 0; i < 10; i++) {
   1889 		 DELAY(100);
   1890 		 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
   1891 			break;
   1892 	}
   1893 
   1894 	DELAY(20);
   1895 }
   1896 
   1897 uint32_t
   1898 rge_read_csi(struct rge_softc *sc, uint32_t reg)
   1899 {
   1900 	int i;
   1901 
   1902 	RGE_WRITE_4(sc, RGE_CSIAR, (1 << 16) | (reg & RGE_CSIAR_ADDR_MASK) |
   1903 	    (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
   1904 
   1905 	for (i = 0; i < 10; i++) {
   1906 		 DELAY(100);
   1907 		 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
   1908 			break;
   1909 	}
   1910 
   1911 	DELAY(20);
   1912 
   1913 	return (RGE_READ_4(sc, RGE_CSIDR));
   1914 }
   1915 
   1916 void
   1917 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   1918 {
   1919 	uint32_t tmp;
   1920 
   1921 	tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   1922 	tmp += val;
   1923 	tmp |= RGE_MACOCP_BUSY;
   1924 	RGE_WRITE_4(sc, RGE_MACOCP, tmp);
   1925 }
   1926 
   1927 uint16_t
   1928 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
   1929 {
   1930 	uint32_t val;
   1931 
   1932 	val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
   1933 	RGE_WRITE_4(sc, RGE_MACOCP, val);
   1934 
   1935 	return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
   1936 }
   1937 
   1938 void
   1939 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
   1940 {
   1941 	uint32_t tmp;
   1942 	int i;
   1943 
   1944 	tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
   1945 	tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
   1946 	RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
   1947 
   1948 	for (i = 0; i < 10; i++) {
   1949 		DELAY(100);
   1950 		if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
   1951 			break;
   1952 	}
   1953 
   1954 	DELAY(20);
   1955 }
   1956 
   1957 void
   1958 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
   1959 {
   1960 	uint16_t off, phyaddr;
   1961 
   1962 	phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
   1963 	phyaddr <<= 4;
   1964 
   1965 	off = addr ? reg : 0x10 + (reg % 8);
   1966 
   1967 	phyaddr += (off - 16) << 1;
   1968 
   1969 	rge_write_phy_ocp(sc, phyaddr, val);
   1970 }
   1971 
   1972 void
   1973 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
   1974 {
   1975 	uint32_t tmp;
   1976 	int i;
   1977 
   1978 	tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   1979 	tmp |= RGE_PHYOCP_BUSY | val;
   1980 	RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
   1981 
   1982 	for (i = 0; i < RGE_TIMEOUT; i++) {
   1983 		DELAY(1);
   1984 		if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
   1985 			break;
   1986 	}
   1987 }
   1988 
   1989 uint16_t
   1990 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
   1991 {
   1992 	uint32_t val;
   1993 	int i;
   1994 
   1995 	val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
   1996 	RGE_WRITE_4(sc, RGE_PHYOCP, val);
   1997 
   1998 	for (i = 0; i < RGE_TIMEOUT; i++) {
   1999 		DELAY(1);
   2000 		val = RGE_READ_4(sc, RGE_PHYOCP);
   2001 		if (val & RGE_PHYOCP_BUSY)
   2002 			break;
   2003 	}
   2004 
   2005 	return (val & RGE_PHYOCP_DATA_MASK);
   2006 }
   2007 
   2008 int
   2009 rge_get_link_status(struct rge_softc *sc)
   2010 {
   2011 	return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
   2012 }
   2013 
   2014 void
   2015 rge_txstart(struct work *wk, void *arg)
   2016 {
   2017 	struct rge_softc *sc = arg;
   2018 
   2019 	RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
   2020 }
   2021 
   2022 void
   2023 rge_tick(void *arg)
   2024 {
   2025 	struct rge_softc *sc = arg;
   2026 	int s;
   2027 
   2028 	s = splnet();
   2029 	rge_link_state(sc);
   2030 	splx(s);
   2031 
   2032 	timeout_add_sec(&sc->sc_timeout, 1);
   2033 }
   2034 
   2035 void
   2036 rge_link_state(struct rge_softc *sc)
   2037 {
   2038 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   2039 	int link = LINK_STATE_DOWN;
   2040 
   2041 	if (rge_get_link_status(sc))
   2042 		link = LINK_STATE_UP;
   2043 
   2044 	if (ifp->if_link_state != link) {
   2045 		ifp->if_link_state = link;
   2046 		if_link_state_change(ifp, LINK_STATE_DOWN);
   2047 	}
   2048 }
   2049