if_rge.c revision 1.1 1 1.1 sevan /* $NetBSD: if_rge.c,v 1.1 2020/01/11 20:56:51 sevan Exp $ */
2 1.1 sevan /* $OpenBSD: if_rge.c,v 1.2 2020/01/02 09:00:45 kevlo Exp $ */
3 1.1 sevan
4 1.1 sevan /*
5 1.1 sevan * Copyright (c) 2019 Kevin Lo <kevlo (at) openbsd.org>
6 1.1 sevan *
7 1.1 sevan * Permission to use, copy, modify, and distribute this software for any
8 1.1 sevan * purpose with or without fee is hereby granted, provided that the above
9 1.1 sevan * copyright notice and this permission notice appear in all copies.
10 1.1 sevan *
11 1.1 sevan * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 1.1 sevan * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 1.1 sevan * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 1.1 sevan * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 1.1 sevan * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 1.1 sevan * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 1.1 sevan * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 1.1 sevan */
19 1.1 sevan
20 1.1 sevan #include "bpfilter.h"
21 1.1 sevan #include "vlan.h"
22 1.1 sevan
23 1.1 sevan #include <sys/param.h>
24 1.1 sevan #include <sys/systm.h>
25 1.1 sevan #include <sys/sockio.h>
26 1.1 sevan #include <sys/mbuf.h>
27 1.1 sevan #include <sys/malloc.h>
28 1.1 sevan #include <sys/kernel.h>
29 1.1 sevan #include <sys/socket.h>
30 1.1 sevan #include <sys/device.h>
31 1.1 sevan #include <sys/endian.h>
32 1.1 sevan
33 1.1 sevan #include <net/if.h>
34 1.1 sevan #include <net/if_media.h>
35 1.1 sevan
36 1.1 sevan #include <netinet/in.h>
37 1.1 sevan #include <netinet/if_ether.h>
38 1.1 sevan
39 1.1 sevan #if NBPFILTER > 0
40 1.1 sevan #include <net/bpf.h>
41 1.1 sevan #endif
42 1.1 sevan
43 1.1 sevan #include <machine/bus.h>
44 1.1 sevan #include <machine/intr.h>
45 1.1 sevan
46 1.1 sevan #include <dev/mii/mii.h>
47 1.1 sevan
48 1.1 sevan #include <dev/pci/pcivar.h>
49 1.1 sevan #include <dev/pci/pcireg.h>
50 1.1 sevan #include <dev/pci/pcidevs.h>
51 1.1 sevan
52 1.1 sevan #include <dev/pci/if_rgereg.h>
53 1.1 sevan
54 1.1 sevan int rge_match(struct device *, void *, void *);
55 1.1 sevan void rge_attach(struct device *, struct device *, void *);
56 1.1 sevan int rge_intr(void *);
57 1.1 sevan int rge_encap(struct rge_softc *, struct mbuf *, int);
58 1.1 sevan int rge_ioctl(struct ifnet *, u_long, caddr_t);
59 1.1 sevan void rge_start(struct ifqueue *);
60 1.1 sevan void rge_watchdog(struct ifnet *);
61 1.1 sevan int rge_init(struct ifnet *);
62 1.1 sevan void rge_stop(struct ifnet *);
63 1.1 sevan int rge_ifmedia_upd(struct ifnet *);
64 1.1 sevan void rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
65 1.1 sevan int rge_allocmem(struct rge_softc *);
66 1.1 sevan int rge_newbuf(struct rge_softc *, int);
67 1.1 sevan void rge_discard_rxbuf(struct rge_softc *, int);
68 1.1 sevan int rge_rx_list_init(struct rge_softc *);
69 1.1 sevan void rge_tx_list_init(struct rge_softc *);
70 1.1 sevan int rge_rxeof(struct rge_softc *);
71 1.1 sevan int rge_txeof(struct rge_softc *);
72 1.1 sevan void rge_reset(struct rge_softc *);
73 1.1 sevan void rge_iff(struct rge_softc *);
74 1.1 sevan void rge_set_phy_power(struct rge_softc *, int);
75 1.1 sevan void rge_phy_config(struct rge_softc *);
76 1.1 sevan void rge_set_macaddr(struct rge_softc *, const uint8_t *);
77 1.1 sevan void rge_get_macaddr(struct rge_softc *, uint8_t *);
78 1.1 sevan void rge_hw_init(struct rge_softc *);
79 1.1 sevan void rge_disable_phy_ocp_pwrsave(struct rge_softc *);
80 1.1 sevan void rge_patch_phy_mcu(struct rge_softc *, int);
81 1.1 sevan void rge_add_media_types(struct rge_softc *);
82 1.1 sevan void rge_config_imtype(struct rge_softc *, int);
83 1.1 sevan void rge_disable_sim_im(struct rge_softc *);
84 1.1 sevan void rge_setup_sim_im(struct rge_softc *);
85 1.1 sevan void rge_setup_intr(struct rge_softc *, int);
86 1.1 sevan void rge_exit_oob(struct rge_softc *);
87 1.1 sevan void rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
88 1.1 sevan uint32_t rge_read_csi(struct rge_softc *, uint32_t);
89 1.1 sevan void rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
90 1.1 sevan uint16_t rge_read_mac_ocp(struct rge_softc *, uint16_t);
91 1.1 sevan void rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
92 1.1 sevan void rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
93 1.1 sevan void rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
94 1.1 sevan uint16_t rge_read_phy_ocp(struct rge_softc *, uint16_t);
95 1.1 sevan int rge_get_link_status(struct rge_softc *);
96 1.1 sevan void rge_txstart(void *);
97 1.1 sevan void rge_tick(void *);
98 1.1 sevan void rge_link_state(struct rge_softc *);
99 1.1 sevan
100 1.1 sevan static const struct {
101 1.1 sevan uint16_t reg;
102 1.1 sevan uint16_t val;
103 1.1 sevan } rtl8125_def_bps[] = {
104 1.1 sevan RTL8125_DEF_BPS
105 1.1 sevan }, rtl8125_mac_cfg2_ephy[] = {
106 1.1 sevan RTL8125_MAC_CFG2_EPHY
107 1.1 sevan }, rtl8125_mac_cfg2_mcu[] = {
108 1.1 sevan RTL8125_MAC_CFG2_MCU
109 1.1 sevan }, rtl8125_mac_cfg3_ephy[] = {
110 1.1 sevan RTL8125_MAC_CFG3_EPHY
111 1.1 sevan }, rtl8125_mac_cfg3_mcu[] = {
112 1.1 sevan RTL8125_MAC_CFG3_MCU
113 1.1 sevan };
114 1.1 sevan
115 1.1 sevan struct cfattach rge_ca = {
116 1.1 sevan sizeof(struct rge_softc), rge_match, rge_attach
117 1.1 sevan };
118 1.1 sevan
119 1.1 sevan struct cfdriver rge_cd = {
120 1.1 sevan NULL, "rge", DV_IFNET
121 1.1 sevan };
122 1.1 sevan
123 1.1 sevan const struct pci_matchid rge_devices[] = {
124 1.1 sevan { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
125 1.1 sevan { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8125 }
126 1.1 sevan };
127 1.1 sevan
128 1.1 sevan int
129 1.1 sevan rge_match(struct device *parent, void *match, void *aux)
130 1.1 sevan {
131 1.1 sevan return (pci_matchbyid((struct pci_attach_args *)aux, rge_devices,
132 1.1 sevan nitems(rge_devices)));
133 1.1 sevan }
134 1.1 sevan
135 1.1 sevan void
136 1.1 sevan rge_attach(struct device *parent, struct device *self, void *aux)
137 1.1 sevan {
138 1.1 sevan struct rge_softc *sc = (struct rge_softc *)self;
139 1.1 sevan struct pci_attach_args *pa = aux;
140 1.1 sevan pci_chipset_tag_t pc = pa->pa_pc;
141 1.1 sevan pci_intr_handle_t ih;
142 1.1 sevan const char *intrstr = NULL;
143 1.1 sevan struct ifnet *ifp;
144 1.1 sevan pcireg_t reg;
145 1.1 sevan uint32_t hwrev;
146 1.1 sevan uint8_t eaddr[ETHER_ADDR_LEN];
147 1.1 sevan int offset;
148 1.1 sevan
149 1.1 sevan pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
150 1.1 sevan
151 1.1 sevan /*
152 1.1 sevan * Map control/status registers.
153 1.1 sevan */
154 1.1 sevan if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
155 1.1 sevan PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
156 1.1 sevan NULL, &sc->rge_bsize, 0)) {
157 1.1 sevan if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
158 1.1 sevan PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
159 1.1 sevan &sc->rge_bhandle, NULL, &sc->rge_bsize, 0)) {
160 1.1 sevan if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
161 1.1 sevan 0, &sc->rge_btag, &sc->rge_bhandle, NULL,
162 1.1 sevan &sc->rge_bsize, 0)) {
163 1.1 sevan printf(": can't map mem or i/o space\n");
164 1.1 sevan return;
165 1.1 sevan }
166 1.1 sevan }
167 1.1 sevan }
168 1.1 sevan
169 1.1 sevan /*
170 1.1 sevan * Allocate interrupt.
171 1.1 sevan */
172 1.1 sevan if (pci_intr_map_msi(pa, &ih) == 0)
173 1.1 sevan sc->rge_flags |= RGE_FLAG_MSI;
174 1.1 sevan else if (pci_intr_map(pa, &ih) != 0) {
175 1.1 sevan printf(": couldn't map interrupt\n");
176 1.1 sevan return;
177 1.1 sevan }
178 1.1 sevan intrstr = pci_intr_string(pc, ih);
179 1.1 sevan sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE, rge_intr,
180 1.1 sevan sc, sc->sc_dev.dv_xname);
181 1.1 sevan if (sc->sc_ih == NULL) {
182 1.1 sevan printf(": couldn't establish interrupt");
183 1.1 sevan if (intrstr != NULL)
184 1.1 sevan printf(" at %s", intrstr);
185 1.1 sevan printf("\n");
186 1.1 sevan return;
187 1.1 sevan }
188 1.1 sevan printf(": %s", intrstr);
189 1.1 sevan
190 1.1 sevan sc->sc_dmat = pa->pa_dmat;
191 1.1 sevan sc->sc_pc = pa->pa_pc;
192 1.1 sevan sc->sc_tag = pa->pa_tag;
193 1.1 sevan
194 1.1 sevan /* Determine hardware revision */
195 1.1 sevan hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
196 1.1 sevan switch (hwrev) {
197 1.1 sevan case 0x60800000:
198 1.1 sevan sc->rge_type = MAC_CFG2;
199 1.1 sevan break;
200 1.1 sevan case 0x60900000:
201 1.1 sevan sc->rge_type = MAC_CFG3;
202 1.1 sevan break;
203 1.1 sevan default:
204 1.1 sevan printf(": unknown version 0x%08x\n", hwrev);
205 1.1 sevan return;
206 1.1 sevan }
207 1.1 sevan
208 1.1 sevan rge_config_imtype(sc, RGE_IMTYPE_SIM);
209 1.1 sevan
210 1.1 sevan /*
211 1.1 sevan * PCI Express check.
212 1.1 sevan */
213 1.1 sevan if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
214 1.1 sevan &offset, NULL)) {
215 1.1 sevan /* Disable PCIe ASPM and ECPM. */
216 1.1 sevan reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
217 1.1 sevan offset + PCI_PCIE_LCSR);
218 1.1 sevan reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1 |
219 1.1 sevan PCI_PCIE_LCSR_ECPM);
220 1.1 sevan pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCI_PCIE_LCSR,
221 1.1 sevan reg);
222 1.1 sevan }
223 1.1 sevan
224 1.1 sevan rge_exit_oob(sc);
225 1.1 sevan rge_hw_init(sc);
226 1.1 sevan
227 1.1 sevan rge_get_macaddr(sc, eaddr);
228 1.1 sevan printf(", address %s\n", ether_sprintf(eaddr));
229 1.1 sevan
230 1.1 sevan memcpy(sc->sc_arpcom.ac_enaddr, eaddr, ETHER_ADDR_LEN);
231 1.1 sevan
232 1.1 sevan rge_set_phy_power(sc, 1);
233 1.1 sevan rge_phy_config(sc);
234 1.1 sevan
235 1.1 sevan if (rge_allocmem(sc))
236 1.1 sevan return;
237 1.1 sevan
238 1.1 sevan ifp = &sc->sc_arpcom.ac_if;
239 1.1 sevan ifp->if_softc = sc;
240 1.1 sevan strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
241 1.1 sevan ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
242 1.1 sevan ifp->if_xflags = IFXF_MPSAFE;
243 1.1 sevan ifp->if_ioctl = rge_ioctl;
244 1.1 sevan ifp->if_qstart = rge_start;
245 1.1 sevan ifp->if_watchdog = rge_watchdog;
246 1.1 sevan IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT);
247 1.1 sevan ifp->if_hardmtu = RGE_JUMBO_MTU;
248 1.1 sevan
249 1.1 sevan ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
250 1.1 sevan IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
251 1.1 sevan
252 1.1 sevan #if NVLAN > 0
253 1.1 sevan ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
254 1.1 sevan #endif
255 1.1 sevan
256 1.1 sevan timeout_set(&sc->sc_timeout, rge_tick, sc);
257 1.1 sevan task_set(&sc->sc_task, rge_txstart, sc);
258 1.1 sevan
259 1.1 sevan /* Initialize ifmedia structures. */
260 1.1 sevan ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
261 1.1 sevan rge_ifmedia_sts);
262 1.1 sevan rge_add_media_types(sc);
263 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
264 1.1 sevan ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
265 1.1 sevan sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
266 1.1 sevan
267 1.1 sevan if_attach(ifp);
268 1.1 sevan ether_ifattach(ifp);
269 1.1 sevan }
270 1.1 sevan
271 1.1 sevan int
272 1.1 sevan rge_intr(void *arg)
273 1.1 sevan {
274 1.1 sevan struct rge_softc *sc = arg;
275 1.1 sevan struct ifnet *ifp = &sc->sc_arpcom.ac_if;
276 1.1 sevan uint32_t status;
277 1.1 sevan int claimed = 0, rx, tx;
278 1.1 sevan
279 1.1 sevan if (!(ifp->if_flags & IFF_RUNNING))
280 1.1 sevan return (0);
281 1.1 sevan
282 1.1 sevan /* Disable interrupts. */
283 1.1 sevan RGE_WRITE_4(sc, RGE_IMR, 0);
284 1.1 sevan
285 1.1 sevan status = RGE_READ_4(sc, RGE_ISR);
286 1.1 sevan if (!(sc->rge_flags & RGE_FLAG_MSI)) {
287 1.1 sevan if ((status & RGE_INTRS) == 0 || status == 0xffffffff)
288 1.1 sevan return (0);
289 1.1 sevan }
290 1.1 sevan if (status)
291 1.1 sevan RGE_WRITE_4(sc, RGE_ISR, status);
292 1.1 sevan
293 1.1 sevan if (status & RGE_ISR_PCS_TIMEOUT)
294 1.1 sevan claimed = 1;
295 1.1 sevan
296 1.1 sevan rx = tx = 0;
297 1.1 sevan if (status & RGE_INTRS) {
298 1.1 sevan if (status &
299 1.1 sevan (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
300 1.1 sevan rx |= rge_rxeof(sc);
301 1.1 sevan claimed = 1;
302 1.1 sevan }
303 1.1 sevan
304 1.1 sevan if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
305 1.1 sevan tx |= rge_txeof(sc);
306 1.1 sevan claimed = 1;
307 1.1 sevan }
308 1.1 sevan
309 1.1 sevan if (status & RGE_ISR_SYSTEM_ERR) {
310 1.1 sevan KERNEL_LOCK();
311 1.1 sevan rge_init(ifp);
312 1.1 sevan KERNEL_UNLOCK();
313 1.1 sevan claimed = 1;
314 1.1 sevan }
315 1.1 sevan }
316 1.1 sevan
317 1.1 sevan if (sc->rge_timerintr) {
318 1.1 sevan if ((tx | rx) == 0) {
319 1.1 sevan /*
320 1.1 sevan * Nothing needs to be processed, fallback
321 1.1 sevan * to use TX/RX interrupts.
322 1.1 sevan */
323 1.1 sevan rge_setup_intr(sc, RGE_IMTYPE_NONE);
324 1.1 sevan
325 1.1 sevan /*
326 1.1 sevan * Recollect, mainly to avoid the possible
327 1.1 sevan * race introduced by changing interrupt
328 1.1 sevan * masks.
329 1.1 sevan */
330 1.1 sevan rge_rxeof(sc);
331 1.1 sevan rge_txeof(sc);
332 1.1 sevan } else
333 1.1 sevan RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
334 1.1 sevan } else if (tx | rx) {
335 1.1 sevan /*
336 1.1 sevan * Assume that using simulated interrupt moderation
337 1.1 sevan * (hardware timer based) could reduce the interrupt
338 1.1 sevan * rate.
339 1.1 sevan */
340 1.1 sevan rge_setup_intr(sc, RGE_IMTYPE_SIM);
341 1.1 sevan }
342 1.1 sevan
343 1.1 sevan RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
344 1.1 sevan
345 1.1 sevan return (claimed);
346 1.1 sevan }
347 1.1 sevan
348 1.1 sevan int
349 1.1 sevan rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
350 1.1 sevan {
351 1.1 sevan struct rge_tx_desc *d = NULL;
352 1.1 sevan struct rge_txq *txq;
353 1.1 sevan bus_dmamap_t txmap;
354 1.1 sevan uint32_t cmdsts, cflags = 0;
355 1.1 sevan int cur, error, i, last, nsegs;
356 1.1 sevan
357 1.1 sevan /*
358 1.1 sevan * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
359 1.1 sevan * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
360 1.1 sevan * take affect.
361 1.1 sevan */
362 1.1 sevan if ((m->m_pkthdr.csum_flags &
363 1.1 sevan (M_IPV4_CSUM_OUT | M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) != 0) {
364 1.1 sevan cflags |= RGE_TDEXTSTS_IPCSUM;
365 1.1 sevan if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
366 1.1 sevan cflags |= RGE_TDEXTSTS_TCPCSUM;
367 1.1 sevan if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
368 1.1 sevan cflags |= RGE_TDEXTSTS_UDPCSUM;
369 1.1 sevan }
370 1.1 sevan
371 1.1 sevan txq = &sc->rge_ldata.rge_txq[idx];
372 1.1 sevan txmap = txq->txq_dmamap;
373 1.1 sevan
374 1.1 sevan error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
375 1.1 sevan switch (error) {
376 1.1 sevan case 0:
377 1.1 sevan break;
378 1.1 sevan case EFBIG: /* mbuf chain is too fragmented */
379 1.1 sevan if (m_defrag(m, M_DONTWAIT) == 0 &&
380 1.1 sevan bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
381 1.1 sevan BUS_DMA_NOWAIT) == 0)
382 1.1 sevan break;
383 1.1 sevan
384 1.1 sevan /* FALLTHROUGH */
385 1.1 sevan default:
386 1.1 sevan return (0);
387 1.1 sevan }
388 1.1 sevan
389 1.1 sevan bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
390 1.1 sevan BUS_DMASYNC_PREWRITE);
391 1.1 sevan
392 1.1 sevan nsegs = txmap->dm_nsegs;
393 1.1 sevan
394 1.1 sevan /* Set up hardware VLAN tagging. */
395 1.1 sevan #if NVLAN > 0
396 1.1 sevan if (m->m_flags & M_VLANTAG)
397 1.1 sevan cflags |= swap16(m->m_pkthdr.ether_vtag | RGE_TDEXTSTS_VTAG);
398 1.1 sevan #endif
399 1.1 sevan
400 1.1 sevan cur = idx;
401 1.1 sevan cmdsts = RGE_TDCMDSTS_SOF;
402 1.1 sevan
403 1.1 sevan for (i = 0; i < txmap->dm_nsegs; i++) {
404 1.1 sevan d = &sc->rge_ldata.rge_tx_list[cur];
405 1.1 sevan
406 1.1 sevan d->rge_extsts = htole32(cflags);
407 1.1 sevan d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
408 1.1 sevan d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
409 1.1 sevan
410 1.1 sevan cmdsts |= txmap->dm_segs[i].ds_len;
411 1.1 sevan
412 1.1 sevan if (cur == RGE_TX_LIST_CNT - 1)
413 1.1 sevan cmdsts |= RGE_TDCMDSTS_EOR;
414 1.1 sevan
415 1.1 sevan d->rge_cmdsts = htole32(cmdsts);
416 1.1 sevan
417 1.1 sevan last = cur;
418 1.1 sevan cmdsts = RGE_TDCMDSTS_OWN;
419 1.1 sevan cur = RGE_NEXT_TX_DESC(cur);
420 1.1 sevan }
421 1.1 sevan
422 1.1 sevan /* Set EOF on the last descriptor. */
423 1.1 sevan d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
424 1.1 sevan
425 1.1 sevan /* Transfer ownership of packet to the chip. */
426 1.1 sevan d = &sc->rge_ldata.rge_tx_list[idx];
427 1.1 sevan
428 1.1 sevan d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
429 1.1 sevan
430 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
431 1.1 sevan cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
432 1.1 sevan BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
433 1.1 sevan
434 1.1 sevan /* Update info of TX queue and descriptors. */
435 1.1 sevan txq->txq_mbuf = m;
436 1.1 sevan txq->txq_descidx = last;
437 1.1 sevan
438 1.1 sevan return (nsegs);
439 1.1 sevan }
440 1.1 sevan
441 1.1 sevan int
442 1.1 sevan rge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
443 1.1 sevan {
444 1.1 sevan struct rge_softc *sc = ifp->if_softc;
445 1.1 sevan struct ifreq *ifr = (struct ifreq *)data;
446 1.1 sevan int s, error = 0;
447 1.1 sevan
448 1.1 sevan s = splnet();
449 1.1 sevan
450 1.1 sevan switch (cmd) {
451 1.1 sevan case SIOCSIFADDR:
452 1.1 sevan ifp->if_flags |= IFF_UP;
453 1.1 sevan if (!(ifp->if_flags & IFF_RUNNING))
454 1.1 sevan rge_init(ifp);
455 1.1 sevan break;
456 1.1 sevan case SIOCSIFFLAGS:
457 1.1 sevan if (ifp->if_flags & IFF_UP) {
458 1.1 sevan if (ifp->if_flags & IFF_RUNNING)
459 1.1 sevan error = ENETRESET;
460 1.1 sevan else
461 1.1 sevan rge_init(ifp);
462 1.1 sevan } else {
463 1.1 sevan if (ifp->if_flags & IFF_RUNNING)
464 1.1 sevan rge_stop(ifp);
465 1.1 sevan }
466 1.1 sevan break;
467 1.1 sevan case SIOCGIFMEDIA:
468 1.1 sevan case SIOCSIFMEDIA:
469 1.1 sevan error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
470 1.1 sevan break;
471 1.1 sevan case SIOCSIFMTU:
472 1.1 sevan if (ifr->ifr_mtu > ifp->if_hardmtu) {
473 1.1 sevan error = EINVAL;
474 1.1 sevan break;
475 1.1 sevan }
476 1.1 sevan ifp->if_mtu = ifr->ifr_mtu;
477 1.1 sevan break;
478 1.1 sevan default:
479 1.1 sevan error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
480 1.1 sevan }
481 1.1 sevan
482 1.1 sevan if (error == ENETRESET) {
483 1.1 sevan if (ifp->if_flags & IFF_RUNNING)
484 1.1 sevan rge_iff(sc);
485 1.1 sevan error = 0;
486 1.1 sevan }
487 1.1 sevan
488 1.1 sevan splx(s);
489 1.1 sevan return (error);
490 1.1 sevan }
491 1.1 sevan
492 1.1 sevan void
493 1.1 sevan rge_start(struct ifqueue *ifq)
494 1.1 sevan {
495 1.1 sevan struct ifnet *ifp = ifq->ifq_if;
496 1.1 sevan struct rge_softc *sc = ifp->if_softc;
497 1.1 sevan struct mbuf *m;
498 1.1 sevan int free, idx, used;
499 1.1 sevan int queued = 0;
500 1.1 sevan
501 1.1 sevan if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
502 1.1 sevan ifq_purge(ifq);
503 1.1 sevan return;
504 1.1 sevan }
505 1.1 sevan
506 1.1 sevan /* Calculate free space. */
507 1.1 sevan idx = sc->rge_ldata.rge_txq_prodidx;
508 1.1 sevan free = sc->rge_ldata.rge_txq_considx;
509 1.1 sevan if (free <= idx)
510 1.1 sevan free += RGE_TX_LIST_CNT;
511 1.1 sevan free -= idx;
512 1.1 sevan
513 1.1 sevan for (;;) {
514 1.1 sevan if (RGE_TX_NSEGS >= free + 2) {
515 1.1 sevan ifq_set_oactive(&ifp->if_snd);
516 1.1 sevan break;
517 1.1 sevan }
518 1.1 sevan
519 1.1 sevan m = ifq_dequeue(ifq);
520 1.1 sevan if (m == NULL)
521 1.1 sevan break;
522 1.1 sevan
523 1.1 sevan used = rge_encap(sc, m, idx);
524 1.1 sevan if (used == 0) {
525 1.1 sevan m_freem(m);
526 1.1 sevan continue;
527 1.1 sevan }
528 1.1 sevan
529 1.1 sevan KASSERT(used <= free);
530 1.1 sevan free -= used;
531 1.1 sevan
532 1.1 sevan #if NBPFILTER > 0
533 1.1 sevan if (ifp->if_bpf)
534 1.1 sevan bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
535 1.1 sevan #endif
536 1.1 sevan
537 1.1 sevan idx += used;
538 1.1 sevan if (idx >= RGE_TX_LIST_CNT)
539 1.1 sevan idx -= RGE_TX_LIST_CNT;
540 1.1 sevan
541 1.1 sevan queued++;
542 1.1 sevan }
543 1.1 sevan
544 1.1 sevan if (queued == 0)
545 1.1 sevan return;
546 1.1 sevan
547 1.1 sevan /* Set a timeout in case the chip goes out to lunch. */
548 1.1 sevan ifp->if_timer = 5;
549 1.1 sevan
550 1.1 sevan sc->rge_ldata.rge_txq_prodidx = idx;
551 1.1 sevan ifq_serialize(ifq, &sc->sc_task);
552 1.1 sevan }
553 1.1 sevan
554 1.1 sevan void
555 1.1 sevan rge_watchdog(struct ifnet *ifp)
556 1.1 sevan {
557 1.1 sevan struct rge_softc *sc = ifp->if_softc;
558 1.1 sevan
559 1.1 sevan printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
560 1.1 sevan ifp->if_oerrors++;
561 1.1 sevan
562 1.1 sevan rge_init(ifp);
563 1.1 sevan }
564 1.1 sevan
565 1.1 sevan int
566 1.1 sevan rge_init(struct ifnet *ifp)
567 1.1 sevan {
568 1.1 sevan struct rge_softc *sc = ifp->if_softc;
569 1.1 sevan uint32_t val;
570 1.1 sevan uint16_t max_frame_size;
571 1.1 sevan int i;
572 1.1 sevan
573 1.1 sevan rge_stop(ifp);
574 1.1 sevan
575 1.1 sevan /* Set MAC address. */
576 1.1 sevan rge_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
577 1.1 sevan
578 1.1 sevan /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
579 1.1 sevan if (ifp->if_mtu < ETHERMTU)
580 1.1 sevan max_frame_size = ETHERMTU;
581 1.1 sevan else
582 1.1 sevan max_frame_size = ifp->if_mtu;
583 1.1 sevan
584 1.1 sevan max_frame_size += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
585 1.1 sevan ETHER_CRC_LEN + 1;
586 1.1 sevan
587 1.1 sevan if (max_frame_size > RGE_JUMBO_FRAMELEN)
588 1.1 sevan max_frame_size -= 1;
589 1.1 sevan
590 1.1 sevan RGE_WRITE_2(sc, RGE_RXMAXSIZE, max_frame_size);
591 1.1 sevan
592 1.1 sevan /* Initialize RX descriptors list. */
593 1.1 sevan if (rge_rx_list_init(sc) == ENOBUFS) {
594 1.1 sevan printf("%s: init failed: no memory for RX buffers\n",
595 1.1 sevan sc->sc_dev.dv_xname);
596 1.1 sevan rge_stop(ifp);
597 1.1 sevan return (ENOBUFS);
598 1.1 sevan }
599 1.1 sevan
600 1.1 sevan /* Initialize TX descriptors. */
601 1.1 sevan rge_tx_list_init(sc);
602 1.1 sevan
603 1.1 sevan /* Load the addresses of the RX and TX lists into the chip. */
604 1.1 sevan RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
605 1.1 sevan RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
606 1.1 sevan RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
607 1.1 sevan RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
608 1.1 sevan RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
609 1.1 sevan RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
610 1.1 sevan RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
611 1.1 sevan RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
612 1.1 sevan
613 1.1 sevan RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
614 1.1 sevan
615 1.1 sevan RGE_CLRBIT_1(sc, 0xf1, 0x80);
616 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
617 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
618 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
619 1.1 sevan
620 1.1 sevan /* Clear interrupt moderation timer. */
621 1.1 sevan for (i = 0; i < 64; i++)
622 1.1 sevan RGE_WRITE_4(sc, RGE_IM(i), 0);
623 1.1 sevan
624 1.1 sevan /* Set the initial RX and TX configurations. */
625 1.1 sevan RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
626 1.1 sevan RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
627 1.1 sevan
628 1.1 sevan val = rge_read_csi(sc, 0x70c) & ~0xff000000;
629 1.1 sevan rge_write_csi(sc, 0x70c, val | 0x27000000);
630 1.1 sevan
631 1.1 sevan /* Enable hardware optimization function. */
632 1.1 sevan val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
633 1.1 sevan pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
634 1.1 sevan
635 1.1 sevan RGE_WRITE_2(sc, 0x0382, 0x221b);
636 1.1 sevan RGE_WRITE_1(sc, 0x4500, 0);
637 1.1 sevan RGE_WRITE_2(sc, 0x4800, 0);
638 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
639 1.1 sevan
640 1.1 sevan rge_write_mac_ocp(sc, 0xc140, 0xffff);
641 1.1 sevan rge_write_mac_ocp(sc, 0xc142, 0xffff);
642 1.1 sevan
643 1.1 sevan val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
644 1.1 sevan rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
645 1.1 sevan
646 1.1 sevan RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
647 1.1 sevan RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
648 1.1 sevan RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
649 1.1 sevan
650 1.1 sevan val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
651 1.1 sevan rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
652 1.1 sevan
653 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
654 1.1 sevan
655 1.1 sevan val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
656 1.1 sevan rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
657 1.1 sevan
658 1.1 sevan RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
659 1.1 sevan
660 1.1 sevan val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x007f;
661 1.1 sevan rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
662 1.1 sevan
663 1.1 sevan val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
664 1.1 sevan rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
665 1.1 sevan
666 1.1 sevan val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
667 1.1 sevan rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
668 1.1 sevan
669 1.1 sevan RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
670 1.1 sevan
671 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
672 1.1 sevan
673 1.1 sevan val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
674 1.1 sevan rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
675 1.1 sevan
676 1.1 sevan RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
677 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
678 1.1 sevan
679 1.1 sevan val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
680 1.1 sevan rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
681 1.1 sevan
682 1.1 sevan val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
683 1.1 sevan rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
684 1.1 sevan
685 1.1 sevan RGE_MAC_SETBIT(sc, 0xe84c, 0x00c0);
686 1.1 sevan
687 1.1 sevan /* Disable EEE plus. */
688 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
689 1.1 sevan
690 1.1 sevan RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
691 1.1 sevan
692 1.1 sevan RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
693 1.1 sevan DELAY(1);
694 1.1 sevan RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
695 1.1 sevan
696 1.1 sevan RGE_CLRBIT_4(sc, 0x1880, 0x0030);
697 1.1 sevan
698 1.1 sevan rge_write_mac_ocp(sc, 0xe098, 0xc302);
699 1.1 sevan
700 1.1 sevan if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
701 1.1 sevan RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
702 1.1 sevan
703 1.1 sevan RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
704 1.1 sevan
705 1.1 sevan for (i = 0; i < 10; i++) {
706 1.1 sevan if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
707 1.1 sevan break;
708 1.1 sevan DELAY(1000);
709 1.1 sevan }
710 1.1 sevan
711 1.1 sevan /* Disable RXDV gate. */
712 1.1 sevan RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
713 1.1 sevan DELAY(2000);
714 1.1 sevan
715 1.1 sevan rge_ifmedia_upd(ifp);
716 1.1 sevan
717 1.1 sevan /* Enable transmit and receive. */
718 1.1 sevan RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
719 1.1 sevan
720 1.1 sevan /* Program promiscuous mode and multicast filters. */
721 1.1 sevan rge_iff(sc);
722 1.1 sevan
723 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
724 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
725 1.1 sevan
726 1.1 sevan RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
727 1.1 sevan
728 1.1 sevan /* Enable interrupts. */
729 1.1 sevan rge_setup_intr(sc, RGE_IMTYPE_SIM);
730 1.1 sevan
731 1.1 sevan ifp->if_flags |= IFF_RUNNING;
732 1.1 sevan ifq_clr_oactive(&ifp->if_snd);
733 1.1 sevan
734 1.1 sevan timeout_add_sec(&sc->sc_timeout, 1);
735 1.1 sevan
736 1.1 sevan return (0);
737 1.1 sevan }
738 1.1 sevan
739 1.1 sevan /*
740 1.1 sevan * Stop the adapter and free any mbufs allocated to the RX and TX lists.
741 1.1 sevan */
742 1.1 sevan void
743 1.1 sevan rge_stop(struct ifnet *ifp)
744 1.1 sevan {
745 1.1 sevan struct rge_softc *sc = ifp->if_softc;
746 1.1 sevan int i;
747 1.1 sevan
748 1.1 sevan timeout_del(&sc->sc_timeout);
749 1.1 sevan
750 1.1 sevan ifp->if_timer = 0;
751 1.1 sevan ifp->if_flags &= ~IFF_RUNNING;
752 1.1 sevan sc->rge_timerintr = 0;
753 1.1 sevan
754 1.1 sevan RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
755 1.1 sevan RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
756 1.1 sevan RGE_RXCFG_ERRPKT);
757 1.1 sevan
758 1.1 sevan RGE_WRITE_4(sc, RGE_IMR, 0);
759 1.1 sevan RGE_WRITE_4(sc, RGE_ISR, 0xffffffff);
760 1.1 sevan
761 1.1 sevan rge_reset(sc);
762 1.1 sevan
763 1.1 sevan intr_barrier(sc->sc_ih);
764 1.1 sevan ifq_barrier(&ifp->if_snd);
765 1.1 sevan ifq_clr_oactive(&ifp->if_snd);
766 1.1 sevan
767 1.1 sevan if (sc->rge_head != NULL) {
768 1.1 sevan m_freem(sc->rge_head);
769 1.1 sevan sc->rge_head = sc->rge_tail = NULL;
770 1.1 sevan }
771 1.1 sevan
772 1.1 sevan /* Free the TX list buffers. */
773 1.1 sevan for (i = 0; i < RGE_TX_LIST_CNT; i++) {
774 1.1 sevan if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
775 1.1 sevan bus_dmamap_unload(sc->sc_dmat,
776 1.1 sevan sc->rge_ldata.rge_txq[i].txq_dmamap);
777 1.1 sevan m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
778 1.1 sevan sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
779 1.1 sevan }
780 1.1 sevan }
781 1.1 sevan
782 1.1 sevan /* Free the RX list buffers. */
783 1.1 sevan for (i = 0; i < RGE_RX_LIST_CNT; i++) {
784 1.1 sevan if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
785 1.1 sevan bus_dmamap_unload(sc->sc_dmat,
786 1.1 sevan sc->rge_ldata.rge_rxq[i].rxq_dmamap);
787 1.1 sevan m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
788 1.1 sevan sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
789 1.1 sevan }
790 1.1 sevan }
791 1.1 sevan }
792 1.1 sevan
793 1.1 sevan /*
794 1.1 sevan * Set media options.
795 1.1 sevan */
796 1.1 sevan int
797 1.1 sevan rge_ifmedia_upd(struct ifnet *ifp)
798 1.1 sevan {
799 1.1 sevan struct rge_softc *sc = ifp->if_softc;
800 1.1 sevan struct ifmedia *ifm = &sc->sc_media;
801 1.1 sevan int anar, gig, val;
802 1.1 sevan
803 1.1 sevan if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
804 1.1 sevan return (EINVAL);
805 1.1 sevan
806 1.1 sevan /* Disable Gigabit Lite. */
807 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
808 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
809 1.1 sevan
810 1.1 sevan val = rge_read_phy_ocp(sc, 0xa5d4);
811 1.1 sevan val &= ~RGE_ADV_2500TFDX;
812 1.1 sevan
813 1.1 sevan anar = gig = 0;
814 1.1 sevan switch (IFM_SUBTYPE(ifm->ifm_media)) {
815 1.1 sevan case IFM_AUTO:
816 1.1 sevan anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
817 1.1 sevan gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
818 1.1 sevan val |= RGE_ADV_2500TFDX;
819 1.1 sevan break;
820 1.1 sevan case IFM_2500_T:
821 1.1 sevan anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
822 1.1 sevan gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
823 1.1 sevan val |= RGE_ADV_2500TFDX;
824 1.1 sevan ifp->if_baudrate = IF_Mbps(2500);
825 1.1 sevan break;
826 1.1 sevan case IFM_1000_T:
827 1.1 sevan anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
828 1.1 sevan gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
829 1.1 sevan ifp->if_baudrate = IF_Gbps(1);
830 1.1 sevan break;
831 1.1 sevan case IFM_100_TX:
832 1.1 sevan anar |= ANAR_TX | ANAR_TX_FD;
833 1.1 sevan ifp->if_baudrate = IF_Mbps(100);
834 1.1 sevan break;
835 1.1 sevan case IFM_10_T:
836 1.1 sevan anar |= ANAR_10 | ANAR_10_FD;
837 1.1 sevan ifp->if_baudrate = IF_Mbps(10);
838 1.1 sevan break;
839 1.1 sevan default:
840 1.1 sevan printf("%s: unsupported media type\n", sc->sc_dev.dv_xname);
841 1.1 sevan return (EINVAL);
842 1.1 sevan }
843 1.1 sevan
844 1.1 sevan rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
845 1.1 sevan rge_write_phy(sc, 0, MII_100T2CR, gig);
846 1.1 sevan rge_write_phy_ocp(sc, 0xa5d4, val);
847 1.1 sevan rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG);
848 1.1 sevan
849 1.1 sevan return (0);
850 1.1 sevan }
851 1.1 sevan
852 1.1 sevan /*
853 1.1 sevan * Report current media status.
854 1.1 sevan */
855 1.1 sevan void
856 1.1 sevan rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
857 1.1 sevan {
858 1.1 sevan struct rge_softc *sc = ifp->if_softc;
859 1.1 sevan uint16_t status = 0;
860 1.1 sevan
861 1.1 sevan ifmr->ifm_status = IFM_AVALID;
862 1.1 sevan ifmr->ifm_active = IFM_ETHER;
863 1.1 sevan
864 1.1 sevan if (rge_get_link_status(sc)) {
865 1.1 sevan ifmr->ifm_status |= IFM_ACTIVE;
866 1.1 sevan
867 1.1 sevan status = RGE_READ_2(sc, RGE_PHYSTAT);
868 1.1 sevan if ((status & RGE_PHYSTAT_FDX) ||
869 1.1 sevan (status & RGE_PHYSTAT_2500MBPS))
870 1.1 sevan ifmr->ifm_active |= IFM_FDX;
871 1.1 sevan else
872 1.1 sevan ifmr->ifm_active |= IFM_HDX;
873 1.1 sevan
874 1.1 sevan if (status & RGE_PHYSTAT_10MBPS)
875 1.1 sevan ifmr->ifm_active |= IFM_10_T;
876 1.1 sevan else if (status & RGE_PHYSTAT_100MBPS)
877 1.1 sevan ifmr->ifm_active |= IFM_100_TX;
878 1.1 sevan else if (status & RGE_PHYSTAT_1000MBPS)
879 1.1 sevan ifmr->ifm_active |= IFM_1000_T;
880 1.1 sevan else if (status & RGE_PHYSTAT_2500MBPS)
881 1.1 sevan ifmr->ifm_active |= IFM_2500_T;
882 1.1 sevan }
883 1.1 sevan }
884 1.1 sevan
885 1.1 sevan /*
886 1.1 sevan * Allocate memory for RX/TX rings.
887 1.1 sevan */
888 1.1 sevan int
889 1.1 sevan rge_allocmem(struct rge_softc *sc)
890 1.1 sevan {
891 1.1 sevan int error, i;
892 1.1 sevan
893 1.1 sevan /* Allocate DMA'able memory for the TX ring. */
894 1.1 sevan error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
895 1.1 sevan RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
896 1.1 sevan if (error) {
897 1.1 sevan printf("%s: can't create TX list map\n", sc->sc_dev.dv_xname);
898 1.1 sevan return (error);
899 1.1 sevan }
900 1.1 sevan error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
901 1.1 sevan &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
902 1.1 sevan BUS_DMA_NOWAIT| BUS_DMA_ZERO);
903 1.1 sevan if (error) {
904 1.1 sevan printf("%s: can't alloc TX list\n", sc->sc_dev.dv_xname);
905 1.1 sevan return (error);
906 1.1 sevan }
907 1.1 sevan
908 1.1 sevan /* Load the map for the TX ring. */
909 1.1 sevan error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
910 1.1 sevan sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
911 1.1 sevan (caddr_t *)&sc->rge_ldata.rge_tx_list,
912 1.1 sevan BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
913 1.1 sevan if (error) {
914 1.1 sevan printf("%s: can't map TX dma buffers\n", sc->sc_dev.dv_xname);
915 1.1 sevan bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
916 1.1 sevan sc->rge_ldata.rge_tx_listnseg);
917 1.1 sevan return (error);
918 1.1 sevan }
919 1.1 sevan error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
920 1.1 sevan sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
921 1.1 sevan if (error) {
922 1.1 sevan printf("%s: can't load TX dma map\n", sc->sc_dev.dv_xname);
923 1.1 sevan bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
924 1.1 sevan bus_dmamem_unmap(sc->sc_dmat,
925 1.1 sevan (caddr_t)sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
926 1.1 sevan bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
927 1.1 sevan sc->rge_ldata.rge_tx_listnseg);
928 1.1 sevan return (error);
929 1.1 sevan }
930 1.1 sevan
931 1.1 sevan /* Create DMA maps for TX buffers. */
932 1.1 sevan for (i = 0; i < RGE_TX_LIST_CNT; i++) {
933 1.1 sevan error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
934 1.1 sevan RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
935 1.1 sevan &sc->rge_ldata.rge_txq[i].txq_dmamap);
936 1.1 sevan if (error) {
937 1.1 sevan printf("%s: can't create DMA map for TX\n",
938 1.1 sevan sc->sc_dev.dv_xname);
939 1.1 sevan return (error);
940 1.1 sevan }
941 1.1 sevan }
942 1.1 sevan
943 1.1 sevan /* Allocate DMA'able memory for the RX ring. */
944 1.1 sevan error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
945 1.1 sevan RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
946 1.1 sevan if (error) {
947 1.1 sevan printf("%s: can't create RX list map\n", sc->sc_dev.dv_xname);
948 1.1 sevan return (error);
949 1.1 sevan }
950 1.1 sevan error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
951 1.1 sevan &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
952 1.1 sevan BUS_DMA_NOWAIT| BUS_DMA_ZERO);
953 1.1 sevan if (error) {
954 1.1 sevan printf("%s: can't alloc RX list\n", sc->sc_dev.dv_xname);
955 1.1 sevan return (error);
956 1.1 sevan }
957 1.1 sevan
958 1.1 sevan /* Load the map for the RX ring. */
959 1.1 sevan error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
960 1.1 sevan sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
961 1.1 sevan (caddr_t *)&sc->rge_ldata.rge_rx_list,
962 1.1 sevan BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
963 1.1 sevan if (error) {
964 1.1 sevan printf("%s: can't map RX dma buffers\n", sc->sc_dev.dv_xname);
965 1.1 sevan bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
966 1.1 sevan sc->rge_ldata.rge_rx_listnseg);
967 1.1 sevan return (error);
968 1.1 sevan }
969 1.1 sevan error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
970 1.1 sevan sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
971 1.1 sevan if (error) {
972 1.1 sevan printf("%s: can't load RX dma map\n", sc->sc_dev.dv_xname);
973 1.1 sevan bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
974 1.1 sevan bus_dmamem_unmap(sc->sc_dmat,
975 1.1 sevan (caddr_t)sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
976 1.1 sevan bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
977 1.1 sevan sc->rge_ldata.rge_rx_listnseg);
978 1.1 sevan return (error);
979 1.1 sevan }
980 1.1 sevan
981 1.1 sevan /* Create DMA maps for RX buffers. */
982 1.1 sevan for (i = 0; i < RGE_RX_LIST_CNT; i++) {
983 1.1 sevan error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
984 1.1 sevan RGE_JUMBO_FRAMELEN, 0, 0,
985 1.1 sevan &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
986 1.1 sevan if (error) {
987 1.1 sevan printf("%s: can't create DMA map for RX\n",
988 1.1 sevan sc->sc_dev.dv_xname);
989 1.1 sevan return (error);
990 1.1 sevan }
991 1.1 sevan }
992 1.1 sevan
993 1.1 sevan return (error);
994 1.1 sevan }
995 1.1 sevan
996 1.1 sevan /*
997 1.1 sevan * Initialize the RX descriptor and attach an mbuf cluster.
998 1.1 sevan */
999 1.1 sevan int
1000 1.1 sevan rge_newbuf(struct rge_softc *sc, int idx)
1001 1.1 sevan {
1002 1.1 sevan struct mbuf *m;
1003 1.1 sevan struct rge_rx_desc *r;
1004 1.1 sevan struct rge_rxq *rxq;
1005 1.1 sevan bus_dmamap_t rxmap;
1006 1.1 sevan
1007 1.1 sevan m = MCLGETI(NULL, M_DONTWAIT, NULL, RGE_JUMBO_FRAMELEN);
1008 1.1 sevan if (m == NULL)
1009 1.1 sevan return (ENOBUFS);
1010 1.1 sevan
1011 1.1 sevan m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
1012 1.1 sevan
1013 1.1 sevan rxq = &sc->rge_ldata.rge_rxq[idx];
1014 1.1 sevan rxmap = rxq->rxq_dmamap;
1015 1.1 sevan
1016 1.1 sevan if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT))
1017 1.1 sevan goto out;
1018 1.1 sevan
1019 1.1 sevan bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1020 1.1 sevan BUS_DMASYNC_PREREAD);
1021 1.1 sevan
1022 1.1 sevan /* Map the segments into RX descriptors. */
1023 1.1 sevan r = &sc->rge_ldata.rge_rx_list[idx];
1024 1.1 sevan
1025 1.1 sevan if (RGE_OWN(r)) {
1026 1.1 sevan printf("%s: tried to map busy RX descriptor\n",
1027 1.1 sevan sc->sc_dev.dv_xname);
1028 1.1 sevan goto out;
1029 1.1 sevan }
1030 1.1 sevan
1031 1.1 sevan rxq->rxq_mbuf = m;
1032 1.1 sevan
1033 1.1 sevan r->rge_extsts = 0;
1034 1.1 sevan r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
1035 1.1 sevan r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr));
1036 1.1 sevan
1037 1.1 sevan r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
1038 1.1 sevan if (idx == RGE_RX_LIST_CNT - 1)
1039 1.1 sevan r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1040 1.1 sevan
1041 1.1 sevan r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1042 1.1 sevan
1043 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1044 1.1 sevan idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1045 1.1 sevan BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1046 1.1 sevan
1047 1.1 sevan return (0);
1048 1.1 sevan out:
1049 1.1 sevan if (m != NULL)
1050 1.1 sevan m_freem(m);
1051 1.1 sevan return (ENOMEM);
1052 1.1 sevan }
1053 1.1 sevan
1054 1.1 sevan void
1055 1.1 sevan rge_discard_rxbuf(struct rge_softc *sc, int idx)
1056 1.1 sevan {
1057 1.1 sevan struct rge_rx_desc *r;
1058 1.1 sevan
1059 1.1 sevan r = &sc->rge_ldata.rge_rx_list[idx];
1060 1.1 sevan
1061 1.1 sevan r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
1062 1.1 sevan r->rge_extsts = 0;
1063 1.1 sevan if (idx == RGE_RX_LIST_CNT - 1)
1064 1.1 sevan r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1065 1.1 sevan r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1066 1.1 sevan
1067 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1068 1.1 sevan idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1069 1.1 sevan BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1070 1.1 sevan }
1071 1.1 sevan
1072 1.1 sevan int
1073 1.1 sevan rge_rx_list_init(struct rge_softc *sc)
1074 1.1 sevan {
1075 1.1 sevan int i;
1076 1.1 sevan
1077 1.1 sevan memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1078 1.1 sevan
1079 1.1 sevan for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1080 1.1 sevan sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
1081 1.1 sevan if (rge_newbuf(sc, i) == ENOBUFS)
1082 1.1 sevan return (ENOBUFS);
1083 1.1 sevan }
1084 1.1 sevan
1085 1.1 sevan sc->rge_ldata.rge_rxq_prodidx = 0;
1086 1.1 sevan sc->rge_head = sc->rge_tail = NULL;
1087 1.1 sevan
1088 1.1 sevan return (0);
1089 1.1 sevan }
1090 1.1 sevan
1091 1.1 sevan void
1092 1.1 sevan rge_tx_list_init(struct rge_softc *sc)
1093 1.1 sevan {
1094 1.1 sevan int i;
1095 1.1 sevan
1096 1.1 sevan memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1097 1.1 sevan
1098 1.1 sevan for (i = 0; i < RGE_TX_LIST_CNT; i++)
1099 1.1 sevan sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
1100 1.1 sevan
1101 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
1102 1.1 sevan sc->rge_ldata.rge_tx_list_map->dm_mapsize,
1103 1.1 sevan BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1104 1.1 sevan
1105 1.1 sevan sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
1106 1.1 sevan }
1107 1.1 sevan
1108 1.1 sevan int
1109 1.1 sevan rge_rxeof(struct rge_softc *sc)
1110 1.1 sevan {
1111 1.1 sevan struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1112 1.1 sevan struct mbuf *m;
1113 1.1 sevan struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1114 1.1 sevan struct rge_rx_desc *cur_rx;
1115 1.1 sevan struct rge_rxq *rxq;
1116 1.1 sevan uint32_t rxstat, extsts;
1117 1.1 sevan int i, total_len, rx = 0;
1118 1.1 sevan
1119 1.1 sevan for (i = sc->rge_ldata.rge_rxq_prodidx; ; i = RGE_NEXT_RX_DESC(i)) {
1120 1.1 sevan /* Invalidate the descriptor memory. */
1121 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1122 1.1 sevan i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1123 1.1 sevan BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1124 1.1 sevan
1125 1.1 sevan cur_rx = &sc->rge_ldata.rge_rx_list[i];
1126 1.1 sevan
1127 1.1 sevan if (RGE_OWN(cur_rx))
1128 1.1 sevan break;
1129 1.1 sevan
1130 1.1 sevan rxstat = letoh32(cur_rx->rge_cmdsts);
1131 1.1 sevan extsts = letoh32(cur_rx->rge_extsts);
1132 1.1 sevan
1133 1.1 sevan total_len = RGE_RXBYTES(cur_rx);
1134 1.1 sevan rxq = &sc->rge_ldata.rge_rxq[i];
1135 1.1 sevan m = rxq->rxq_mbuf;
1136 1.1 sevan rx = 1;
1137 1.1 sevan
1138 1.1 sevan /* Invalidate the RX mbuf and unload its map. */
1139 1.1 sevan bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1140 1.1 sevan rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1141 1.1 sevan bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1142 1.1 sevan
1143 1.1 sevan if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1144 1.1 sevan (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1145 1.1 sevan rge_discard_rxbuf(sc, i);
1146 1.1 sevan continue;
1147 1.1 sevan }
1148 1.1 sevan
1149 1.1 sevan if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1150 1.1 sevan ifp->if_ierrors++;
1151 1.1 sevan /*
1152 1.1 sevan * If this is part of a multi-fragment packet,
1153 1.1 sevan * discard all the pieces.
1154 1.1 sevan */
1155 1.1 sevan if (sc->rge_head != NULL) {
1156 1.1 sevan m_freem(sc->rge_head);
1157 1.1 sevan sc->rge_head = sc->rge_tail = NULL;
1158 1.1 sevan }
1159 1.1 sevan rge_discard_rxbuf(sc, i);
1160 1.1 sevan continue;
1161 1.1 sevan }
1162 1.1 sevan
1163 1.1 sevan /*
1164 1.1 sevan * If allocating a replacement mbuf fails,
1165 1.1 sevan * reload the current one.
1166 1.1 sevan */
1167 1.1 sevan
1168 1.1 sevan if (rge_newbuf(sc, i) == ENOBUFS) {
1169 1.1 sevan if (sc->rge_head != NULL) {
1170 1.1 sevan m_freem(sc->rge_head);
1171 1.1 sevan sc->rge_head = sc->rge_tail = NULL;
1172 1.1 sevan }
1173 1.1 sevan rge_discard_rxbuf(sc, i);
1174 1.1 sevan continue;
1175 1.1 sevan }
1176 1.1 sevan
1177 1.1 sevan if (sc->rge_head != NULL) {
1178 1.1 sevan m->m_len = total_len;
1179 1.1 sevan /*
1180 1.1 sevan * Special case: if there's 4 bytes or less
1181 1.1 sevan * in this buffer, the mbuf can be discarded:
1182 1.1 sevan * the last 4 bytes is the CRC, which we don't
1183 1.1 sevan * care about anyway.
1184 1.1 sevan */
1185 1.1 sevan if (m->m_len <= ETHER_CRC_LEN) {
1186 1.1 sevan sc->rge_tail->m_len -=
1187 1.1 sevan (ETHER_CRC_LEN - m->m_len);
1188 1.1 sevan m_freem(m);
1189 1.1 sevan } else {
1190 1.1 sevan m->m_len -= ETHER_CRC_LEN;
1191 1.1 sevan m->m_flags &= ~M_PKTHDR;
1192 1.1 sevan sc->rge_tail->m_next = m;
1193 1.1 sevan }
1194 1.1 sevan m = sc->rge_head;
1195 1.1 sevan sc->rge_head = sc->rge_tail = NULL;
1196 1.1 sevan m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1197 1.1 sevan } else
1198 1.1 sevan m->m_pkthdr.len = m->m_len =
1199 1.1 sevan (total_len - ETHER_CRC_LEN);
1200 1.1 sevan
1201 1.1 sevan /* Check IP header checksum. */
1202 1.1 sevan if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) &&
1203 1.1 sevan (extsts & RGE_RDEXTSTS_IPV4))
1204 1.1 sevan m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1205 1.1 sevan
1206 1.1 sevan /* Check TCP/UDP checksum. */
1207 1.1 sevan if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1208 1.1 sevan (((rxstat & RGE_RDCMDSTS_TCPPKT) &&
1209 1.1 sevan !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) ||
1210 1.1 sevan ((rxstat & RGE_RDCMDSTS_UDPPKT) &&
1211 1.1 sevan !(rxstat & RGE_RDCMDSTS_UDPCSUMERR))))
1212 1.1 sevan m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1213 1.1 sevan M_UDP_CSUM_IN_OK;
1214 1.1 sevan
1215 1.1 sevan #if NVLAN > 0
1216 1.1 sevan if (extsts & RGE_RDEXTSTS_VTAG) {
1217 1.1 sevan m->m_pkthdr.ether_vtag =
1218 1.1 sevan ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
1219 1.1 sevan m->m_flags |= M_VLANTAG;
1220 1.1 sevan }
1221 1.1 sevan #endif
1222 1.1 sevan
1223 1.1 sevan ml_enqueue(&ml, m);
1224 1.1 sevan }
1225 1.1 sevan
1226 1.1 sevan sc->rge_ldata.rge_rxq_prodidx = i;
1227 1.1 sevan
1228 1.1 sevan if_input(ifp, &ml);
1229 1.1 sevan
1230 1.1 sevan return (rx);
1231 1.1 sevan }
1232 1.1 sevan
1233 1.1 sevan int
1234 1.1 sevan rge_txeof(struct rge_softc *sc)
1235 1.1 sevan {
1236 1.1 sevan struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1237 1.1 sevan struct rge_txq *txq;
1238 1.1 sevan uint32_t txstat;
1239 1.1 sevan int cons, idx, prod;
1240 1.1 sevan int free = 0;
1241 1.1 sevan
1242 1.1 sevan prod = sc->rge_ldata.rge_txq_prodidx;
1243 1.1 sevan cons = sc->rge_ldata.rge_txq_considx;
1244 1.1 sevan
1245 1.1 sevan while (prod != cons) {
1246 1.1 sevan txq = &sc->rge_ldata.rge_txq[cons];
1247 1.1 sevan idx = txq->txq_descidx;
1248 1.1 sevan
1249 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1250 1.1 sevan idx * sizeof(struct rge_tx_desc),
1251 1.1 sevan sizeof(struct rge_tx_desc),
1252 1.1 sevan BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1253 1.1 sevan
1254 1.1 sevan txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
1255 1.1 sevan
1256 1.1 sevan if (txstat & RGE_TDCMDSTS_OWN) {
1257 1.1 sevan free = 2;
1258 1.1 sevan break;
1259 1.1 sevan }
1260 1.1 sevan
1261 1.1 sevan bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1262 1.1 sevan txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1263 1.1 sevan bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1264 1.1 sevan m_freem(txq->txq_mbuf);
1265 1.1 sevan txq->txq_mbuf = NULL;
1266 1.1 sevan
1267 1.1 sevan if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1268 1.1 sevan ifp->if_collisions++;
1269 1.1 sevan if (txstat & RGE_TDCMDSTS_TXERR)
1270 1.1 sevan ifp->if_oerrors++;
1271 1.1 sevan
1272 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1273 1.1 sevan idx * sizeof(struct rge_tx_desc),
1274 1.1 sevan sizeof(struct rge_tx_desc),
1275 1.1 sevan BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1276 1.1 sevan
1277 1.1 sevan cons = RGE_NEXT_TX_DESC(idx);
1278 1.1 sevan free = 1;
1279 1.1 sevan }
1280 1.1 sevan
1281 1.1 sevan if (free == 0)
1282 1.1 sevan return (0);
1283 1.1 sevan
1284 1.1 sevan sc->rge_ldata.rge_txq_considx = cons;
1285 1.1 sevan
1286 1.1 sevan if (ifq_is_oactive(&ifp->if_snd))
1287 1.1 sevan ifq_restart(&ifp->if_snd);
1288 1.1 sevan else if (free == 2)
1289 1.1 sevan ifq_serialize(&ifp->if_snd, &sc->sc_task);
1290 1.1 sevan else
1291 1.1 sevan ifp->if_timer = 0;
1292 1.1 sevan
1293 1.1 sevan return (1);
1294 1.1 sevan }
1295 1.1 sevan
1296 1.1 sevan void
1297 1.1 sevan rge_reset(struct rge_softc *sc)
1298 1.1 sevan {
1299 1.1 sevan int i;
1300 1.1 sevan
1301 1.1 sevan /* Enable RXDV gate. */
1302 1.1 sevan RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1303 1.1 sevan DELAY(2000);
1304 1.1 sevan
1305 1.1 sevan for (i = 0; i < 10; i++) {
1306 1.1 sevan DELAY(100);
1307 1.1 sevan if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1308 1.1 sevan RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1309 1.1 sevan RGE_MCUCMD_TXFIFO_EMPTY))
1310 1.1 sevan break;
1311 1.1 sevan }
1312 1.1 sevan
1313 1.1 sevan /* Soft reset. */
1314 1.1 sevan RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1315 1.1 sevan
1316 1.1 sevan for (i = 0; i < RGE_TIMEOUT; i++) {
1317 1.1 sevan DELAY(100);
1318 1.1 sevan if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1319 1.1 sevan break;
1320 1.1 sevan }
1321 1.1 sevan if (i == RGE_TIMEOUT)
1322 1.1 sevan printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
1323 1.1 sevan }
1324 1.1 sevan
1325 1.1 sevan void
1326 1.1 sevan rge_iff(struct rge_softc *sc)
1327 1.1 sevan {
1328 1.1 sevan struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1329 1.1 sevan struct arpcom *ac = &sc->sc_arpcom;
1330 1.1 sevan struct ether_multi *enm;
1331 1.1 sevan struct ether_multistep step;
1332 1.1 sevan uint32_t hashes[2];
1333 1.1 sevan uint32_t rxfilt;
1334 1.1 sevan int h = 0;
1335 1.1 sevan
1336 1.1 sevan rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1337 1.1 sevan rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1338 1.1 sevan ifp->if_flags &= ~IFF_ALLMULTI;
1339 1.1 sevan
1340 1.1 sevan /*
1341 1.1 sevan * Always accept frames destined to our station address.
1342 1.1 sevan * Always accept broadcast frames.
1343 1.1 sevan */
1344 1.1 sevan rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1345 1.1 sevan
1346 1.1 sevan if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1347 1.1 sevan ifp->if_flags |= IFF_ALLMULTI;
1348 1.1 sevan rxfilt |= RGE_RXCFG_MULTI;
1349 1.1 sevan if (ifp->if_flags & IFF_PROMISC)
1350 1.1 sevan rxfilt |= RGE_RXCFG_ALLPHYS;
1351 1.1 sevan hashes[0] = hashes[1] = 0xffffffff;
1352 1.1 sevan } else {
1353 1.1 sevan rxfilt |= RGE_RXCFG_MULTI;
1354 1.1 sevan /* Program new filter. */
1355 1.1 sevan memset(hashes, 0, sizeof(hashes));
1356 1.1 sevan
1357 1.1 sevan ETHER_FIRST_MULTI(step, ac, enm);
1358 1.1 sevan while (enm != NULL) {
1359 1.1 sevan h = ether_crc32_be(enm->enm_addrlo,
1360 1.1 sevan ETHER_ADDR_LEN) >> 26;
1361 1.1 sevan
1362 1.1 sevan if (h < 32)
1363 1.1 sevan hashes[0] |= (1 << h);
1364 1.1 sevan else
1365 1.1 sevan hashes[1] |= (1 << (h - 32));
1366 1.1 sevan
1367 1.1 sevan ETHER_NEXT_MULTI(step, enm);
1368 1.1 sevan }
1369 1.1 sevan }
1370 1.1 sevan
1371 1.1 sevan RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1372 1.1 sevan RGE_WRITE_4(sc, RGE_MAR0, swap32(hashes[1]));
1373 1.1 sevan RGE_WRITE_4(sc, RGE_MAR4, swap32(hashes[0]));
1374 1.1 sevan }
1375 1.1 sevan
1376 1.1 sevan void
1377 1.1 sevan rge_set_phy_power(struct rge_softc *sc, int on)
1378 1.1 sevan {
1379 1.1 sevan int i;
1380 1.1 sevan
1381 1.1 sevan if (on) {
1382 1.1 sevan RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1383 1.1 sevan
1384 1.1 sevan rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1385 1.1 sevan
1386 1.1 sevan for (i = 0; i < RGE_TIMEOUT; i++) {
1387 1.1 sevan if ((rge_read_phy_ocp(sc, 0xa420) & 0x0080) == 3)
1388 1.1 sevan break;
1389 1.1 sevan DELAY(1000);
1390 1.1 sevan }
1391 1.1 sevan } else
1392 1.1 sevan rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1393 1.1 sevan }
1394 1.1 sevan
1395 1.1 sevan void
1396 1.1 sevan rge_phy_config(struct rge_softc *sc)
1397 1.1 sevan {
1398 1.1 sevan uint16_t mcode_ver, val;
1399 1.1 sevan int i;
1400 1.1 sevan static const uint16_t mac_cfg3_a438_value[] =
1401 1.1 sevan { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1402 1.1 sevan 0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1403 1.1 sevan
1404 1.1 sevan static const uint16_t mac_cfg3_b88e_value[] =
1405 1.1 sevan { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1406 1.1 sevan 0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1407 1.1 sevan 0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1408 1.1 sevan 0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1409 1.1 sevan 0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1410 1.1 sevan 0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1411 1.1 sevan
1412 1.1 sevan /* Read microcode version. */
1413 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x801e);
1414 1.1 sevan mcode_ver = rge_read_phy_ocp(sc, 0xa438);
1415 1.1 sevan
1416 1.1 sevan if (sc->rge_type == MAC_CFG2) {
1417 1.1 sevan for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++) {
1418 1.1 sevan rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
1419 1.1 sevan rtl8125_mac_cfg2_ephy[i].val);
1420 1.1 sevan }
1421 1.1 sevan
1422 1.1 sevan if (mcode_ver != RGE_MAC_CFG2_MCODE_VER) {
1423 1.1 sevan /* Disable PHY config. */
1424 1.1 sevan RGE_CLRBIT_1(sc, 0xf2, 0x20);
1425 1.1 sevan DELAY(1000);
1426 1.1 sevan
1427 1.1 sevan rge_patch_phy_mcu(sc, 1);
1428 1.1 sevan
1429 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8024);
1430 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0x8600);
1431 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0xb82e);
1432 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0x0001);
1433 1.1 sevan
1434 1.1 sevan RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
1435 1.1 sevan for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
1436 1.1 sevan rge_write_phy_ocp(sc,
1437 1.1 sevan rtl8125_mac_cfg2_mcu[i].reg,
1438 1.1 sevan rtl8125_mac_cfg2_mcu[i].val);
1439 1.1 sevan }
1440 1.1 sevan RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
1441 1.1 sevan
1442 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0);
1443 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0);
1444 1.1 sevan RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
1445 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8024);
1446 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0);
1447 1.1 sevan
1448 1.1 sevan rge_patch_phy_mcu(sc, 0);
1449 1.1 sevan
1450 1.1 sevan /* Enable PHY config. */
1451 1.1 sevan RGE_SETBIT_1(sc, 0xf2, 0x20);
1452 1.1 sevan
1453 1.1 sevan /* Write microcode version. */
1454 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x801e);
1455 1.1 sevan rge_write_phy_ocp(sc, 0xa438, RGE_MAC_CFG2_MCODE_VER);
1456 1.1 sevan }
1457 1.1 sevan
1458 1.1 sevan val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
1459 1.1 sevan rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
1460 1.1 sevan RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1461 1.1 sevan val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1462 1.1 sevan rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
1463 1.1 sevan val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
1464 1.1 sevan rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1465 1.1 sevan RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
1466 1.1 sevan val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
1467 1.1 sevan rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
1468 1.1 sevan RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
1469 1.1 sevan RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
1470 1.1 sevan RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
1471 1.1 sevan
1472 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1473 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1474 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
1475 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80eb);
1476 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
1477 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
1478 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80f8);
1479 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1480 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
1481 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80f1);
1482 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1483 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
1484 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80fe);
1485 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1486 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
1487 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8102);
1488 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1489 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1490 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8105);
1491 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1492 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
1493 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8100);
1494 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1495 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
1496 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8104);
1497 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1498 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
1499 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8106);
1500 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1501 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
1502 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1503 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1504 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
1505 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80df);
1506 1.1 sevan RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1507 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80e1);
1508 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
1509 1.1 sevan val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
1510 1.1 sevan rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
1511 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x819f);
1512 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
1513 1.1 sevan rge_write_phy_ocp(sc, 0xbc34, 0x5555);
1514 1.1 sevan val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
1515 1.1 sevan rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
1516 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
1517 1.1 sevan RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1518 1.1 sevan } else {
1519 1.1 sevan for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1520 1.1 sevan rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1521 1.1 sevan rtl8125_mac_cfg3_ephy[i].val);
1522 1.1 sevan
1523 1.1 sevan if (mcode_ver != RGE_MAC_CFG3_MCODE_VER) {
1524 1.1 sevan /* Disable PHY config. */
1525 1.1 sevan RGE_CLRBIT_1(sc, 0xf2, 0x20);
1526 1.1 sevan DELAY(1000);
1527 1.1 sevan
1528 1.1 sevan rge_patch_phy_mcu(sc, 1);
1529 1.1 sevan
1530 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8024);
1531 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0x8601);
1532 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0xb82e);
1533 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0x0001);
1534 1.1 sevan
1535 1.1 sevan RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
1536 1.1 sevan for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
1537 1.1 sevan rge_write_phy_ocp(sc,
1538 1.1 sevan rtl8125_mac_cfg3_mcu[i].reg,
1539 1.1 sevan rtl8125_mac_cfg3_mcu[i].val);
1540 1.1 sevan }
1541 1.1 sevan RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
1542 1.1 sevan
1543 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0);
1544 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0);
1545 1.1 sevan RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
1546 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8024);
1547 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0);
1548 1.1 sevan
1549 1.1 sevan rge_patch_phy_mcu(sc, 0);
1550 1.1 sevan
1551 1.1 sevan /* Enable PHY config. */
1552 1.1 sevan RGE_SETBIT_1(sc, 0xf2, 0x20);
1553 1.1 sevan
1554 1.1 sevan /* Write microcode version. */
1555 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x801e);
1556 1.1 sevan rge_write_phy_ocp(sc, 0xa438, RGE_MAC_CFG3_MCODE_VER);
1557 1.1 sevan }
1558 1.1 sevan
1559 1.1 sevan RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1560 1.1 sevan val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1561 1.1 sevan rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1562 1.1 sevan val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1563 1.1 sevan rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1564 1.1 sevan RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1565 1.1 sevan RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1566 1.1 sevan val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1567 1.1 sevan rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1568 1.1 sevan val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1569 1.1 sevan rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1570 1.1 sevan val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1571 1.1 sevan rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1572 1.1 sevan RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1573 1.1 sevan RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1574 1.1 sevan val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1575 1.1 sevan rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1576 1.1 sevan rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1577 1.1 sevan rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1578 1.1 sevan val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1579 1.1 sevan rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1580 1.1 sevan rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1581 1.1 sevan rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1582 1.1 sevan rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1583 1.1 sevan rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1584 1.1 sevan
1585 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1586 1.1 sevan for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1587 1.1 sevan rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1588 1.1 sevan for (i = 0; i < 26; i++)
1589 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0);
1590 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8257);
1591 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0x020f);
1592 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1593 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0x7843);
1594 1.1 sevan
1595 1.1 sevan rge_patch_phy_mcu(sc, 1);
1596 1.1 sevan RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1597 1.1 sevan RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1598 1.1 sevan for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1599 1.1 sevan rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1600 1.1 sevan rge_write_phy_ocp(sc, 0xb890,
1601 1.1 sevan mac_cfg3_b88e_value[i + 1]);
1602 1.1 sevan }
1603 1.1 sevan RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1604 1.1 sevan rge_patch_phy_mcu(sc, 0);
1605 1.1 sevan
1606 1.1 sevan RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1607 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1608 1.1 sevan RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1609 1.1 sevan val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1610 1.1 sevan rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1611 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1612 1.1 sevan RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1613 1.1 sevan RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1614 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1615 1.1 sevan RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1616 1.1 sevan }
1617 1.1 sevan
1618 1.1 sevan /* Disable EEE. */
1619 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1620 1.1 sevan RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1621 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1622 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1623 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1624 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1625 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1626 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1627 1.1 sevan
1628 1.1 sevan rge_patch_phy_mcu(sc, 1);
1629 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1630 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1631 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1632 1.1 sevan rge_patch_phy_mcu(sc, 0);
1633 1.1 sevan }
1634 1.1 sevan
1635 1.1 sevan void
1636 1.1 sevan rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
1637 1.1 sevan {
1638 1.1 sevan RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1639 1.1 sevan RGE_WRITE_4(sc, RGE_MAC0,
1640 1.1 sevan addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1641 1.1 sevan RGE_WRITE_4(sc, RGE_MAC4,
1642 1.1 sevan addr[5] << 8 | addr[4]);
1643 1.1 sevan RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1644 1.1 sevan }
1645 1.1 sevan
1646 1.1 sevan void
1647 1.1 sevan rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
1648 1.1 sevan {
1649 1.1 sevan *(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
1650 1.1 sevan *(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
1651 1.1 sevan }
1652 1.1 sevan
1653 1.1 sevan void
1654 1.1 sevan rge_hw_init(struct rge_softc *sc)
1655 1.1 sevan {
1656 1.1 sevan int i;
1657 1.1 sevan
1658 1.1 sevan RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1659 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
1660 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
1661 1.1 sevan RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1662 1.1 sevan RGE_CLRBIT_1(sc, 0xf1, 0x80);
1663 1.1 sevan
1664 1.1 sevan /* Disable UPS. */
1665 1.1 sevan RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
1666 1.1 sevan
1667 1.1 sevan /* Configure MAC MCU. */
1668 1.1 sevan rge_write_mac_ocp(sc, 0xfc38, 0);
1669 1.1 sevan
1670 1.1 sevan for (i = 0xfc28; i < 0xfc38; i += 2)
1671 1.1 sevan rge_write_mac_ocp(sc, i, 0);
1672 1.1 sevan
1673 1.1 sevan DELAY(3000);
1674 1.1 sevan rge_write_mac_ocp(sc, 0xfc26, 0);
1675 1.1 sevan
1676 1.1 sevan if (sc->rge_type == MAC_CFG3) {
1677 1.1 sevan for (i = 0; i < nitems(rtl8125_def_bps); i++)
1678 1.1 sevan rge_write_mac_ocp(sc, rtl8125_def_bps[i].reg,
1679 1.1 sevan rtl8125_def_bps[i].val);
1680 1.1 sevan }
1681 1.1 sevan
1682 1.1 sevan /* Disable PHY power saving. */
1683 1.1 sevan rge_disable_phy_ocp_pwrsave(sc);
1684 1.1 sevan
1685 1.1 sevan /* Set PCIe uncorrectable error status. */
1686 1.1 sevan rge_write_csi(sc, 0x108,
1687 1.1 sevan rge_read_csi(sc, 0x108) | 0x00100000);
1688 1.1 sevan }
1689 1.1 sevan
1690 1.1 sevan void
1691 1.1 sevan rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
1692 1.1 sevan {
1693 1.1 sevan if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
1694 1.1 sevan rge_patch_phy_mcu(sc, 1);
1695 1.1 sevan rge_write_phy_ocp(sc, 0xc416, 0);
1696 1.1 sevan rge_write_phy_ocp(sc, 0xc416, 0x0500);
1697 1.1 sevan rge_patch_phy_mcu(sc, 0);
1698 1.1 sevan }
1699 1.1 sevan }
1700 1.1 sevan
1701 1.1 sevan void
1702 1.1 sevan rge_patch_phy_mcu(struct rge_softc *sc, int set)
1703 1.1 sevan {
1704 1.1 sevan uint16_t val;
1705 1.1 sevan int i;
1706 1.1 sevan
1707 1.1 sevan if (set)
1708 1.1 sevan RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
1709 1.1 sevan else
1710 1.1 sevan RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
1711 1.1 sevan
1712 1.1 sevan for (i = 0; i < 1000; i++) {
1713 1.1 sevan val = rge_read_phy_ocp(sc, 0xb800) & 0x0040;
1714 1.1 sevan DELAY(100);
1715 1.1 sevan if (val == 0x0040)
1716 1.1 sevan break;
1717 1.1 sevan }
1718 1.1 sevan if (i == 1000)
1719 1.1 sevan printf("%s: timeout waiting to patch phy mcu\n",
1720 1.1 sevan sc->sc_dev.dv_xname);
1721 1.1 sevan }
1722 1.1 sevan
1723 1.1 sevan void
1724 1.1 sevan rge_add_media_types(struct rge_softc *sc)
1725 1.1 sevan {
1726 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
1727 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
1728 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
1729 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
1730 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
1731 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1732 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
1733 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
1734 1.1 sevan }
1735 1.1 sevan
1736 1.1 sevan void
1737 1.1 sevan rge_config_imtype(struct rge_softc *sc, int imtype)
1738 1.1 sevan {
1739 1.1 sevan switch (imtype) {
1740 1.1 sevan case RGE_IMTYPE_NONE:
1741 1.1 sevan sc->rge_intrs = RGE_INTRS;
1742 1.1 sevan sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
1743 1.1 sevan RGE_ISR_RX_FIFO_OFLOW;
1744 1.1 sevan sc->rge_tx_ack = RGE_ISR_TX_OK;
1745 1.1 sevan break;
1746 1.1 sevan case RGE_IMTYPE_SIM:
1747 1.1 sevan sc->rge_intrs = RGE_INTRS_TIMER;
1748 1.1 sevan sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
1749 1.1 sevan sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
1750 1.1 sevan break;
1751 1.1 sevan default:
1752 1.1 sevan panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
1753 1.1 sevan }
1754 1.1 sevan }
1755 1.1 sevan
1756 1.1 sevan void
1757 1.1 sevan rge_disable_sim_im(struct rge_softc *sc)
1758 1.1 sevan {
1759 1.1 sevan RGE_WRITE_4(sc, RGE_TIMERINT, 0);
1760 1.1 sevan sc->rge_timerintr = 0;
1761 1.1 sevan }
1762 1.1 sevan
1763 1.1 sevan void
1764 1.1 sevan rge_setup_sim_im(struct rge_softc *sc)
1765 1.1 sevan {
1766 1.1 sevan RGE_WRITE_4(sc, RGE_TIMERINT, 0x2600);
1767 1.1 sevan RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
1768 1.1 sevan sc->rge_timerintr = 1;
1769 1.1 sevan }
1770 1.1 sevan
1771 1.1 sevan void
1772 1.1 sevan rge_setup_intr(struct rge_softc *sc, int imtype)
1773 1.1 sevan {
1774 1.1 sevan rge_config_imtype(sc, imtype);
1775 1.1 sevan
1776 1.1 sevan /* Enable interrupts. */
1777 1.1 sevan RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
1778 1.1 sevan
1779 1.1 sevan switch (imtype) {
1780 1.1 sevan case RGE_IMTYPE_NONE:
1781 1.1 sevan rge_disable_sim_im(sc);
1782 1.1 sevan break;
1783 1.1 sevan case RGE_IMTYPE_SIM:
1784 1.1 sevan rge_setup_sim_im(sc);
1785 1.1 sevan break;
1786 1.1 sevan default:
1787 1.1 sevan panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
1788 1.1 sevan }
1789 1.1 sevan }
1790 1.1 sevan
1791 1.1 sevan void
1792 1.1 sevan rge_exit_oob(struct rge_softc *sc)
1793 1.1 sevan {
1794 1.1 sevan int i;
1795 1.1 sevan
1796 1.1 sevan RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
1797 1.1 sevan RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
1798 1.1 sevan RGE_RXCFG_ERRPKT);
1799 1.1 sevan
1800 1.1 sevan /* Disable RealWoW. */
1801 1.1 sevan rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
1802 1.1 sevan
1803 1.1 sevan rge_reset(sc);
1804 1.1 sevan
1805 1.1 sevan /* Disable OOB. */
1806 1.1 sevan RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
1807 1.1 sevan
1808 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
1809 1.1 sevan
1810 1.1 sevan for (i = 0; i < 10; i++) {
1811 1.1 sevan DELAY(100);
1812 1.1 sevan if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
1813 1.1 sevan break;
1814 1.1 sevan }
1815 1.1 sevan
1816 1.1 sevan rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
1817 1.1 sevan rge_write_mac_ocp(sc, 0xc0a6, 0x0150);
1818 1.1 sevan rge_write_mac_ocp(sc, 0xc01e, 0x5555);
1819 1.1 sevan
1820 1.1 sevan for (i = 0; i < 10; i++) {
1821 1.1 sevan DELAY(100);
1822 1.1 sevan if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
1823 1.1 sevan break;
1824 1.1 sevan }
1825 1.1 sevan
1826 1.1 sevan if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
1827 1.1 sevan for (i = 0; i < RGE_TIMEOUT; i++) {
1828 1.1 sevan if ((rge_read_phy_ocp(sc, 0xa420) & 0x0080) == 2)
1829 1.1 sevan break;
1830 1.1 sevan DELAY(1000);
1831 1.1 sevan }
1832 1.1 sevan RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
1833 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
1834 1.1 sevan }
1835 1.1 sevan }
1836 1.1 sevan
1837 1.1 sevan void
1838 1.1 sevan rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
1839 1.1 sevan {
1840 1.1 sevan int i;
1841 1.1 sevan
1842 1.1 sevan RGE_WRITE_4(sc, RGE_CSIDR, val);
1843 1.1 sevan RGE_WRITE_4(sc, RGE_CSIAR, (1 << 16) | (reg & RGE_CSIAR_ADDR_MASK) |
1844 1.1 sevan (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
1845 1.1 sevan
1846 1.1 sevan for (i = 0; i < 10; i++) {
1847 1.1 sevan DELAY(100);
1848 1.1 sevan if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
1849 1.1 sevan break;
1850 1.1 sevan }
1851 1.1 sevan
1852 1.1 sevan DELAY(20);
1853 1.1 sevan }
1854 1.1 sevan
1855 1.1 sevan uint32_t
1856 1.1 sevan rge_read_csi(struct rge_softc *sc, uint32_t reg)
1857 1.1 sevan {
1858 1.1 sevan int i;
1859 1.1 sevan
1860 1.1 sevan RGE_WRITE_4(sc, RGE_CSIAR, (1 << 16) | (reg & RGE_CSIAR_ADDR_MASK) |
1861 1.1 sevan (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
1862 1.1 sevan
1863 1.1 sevan for (i = 0; i < 10; i++) {
1864 1.1 sevan DELAY(100);
1865 1.1 sevan if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
1866 1.1 sevan break;
1867 1.1 sevan }
1868 1.1 sevan
1869 1.1 sevan DELAY(20);
1870 1.1 sevan
1871 1.1 sevan return (RGE_READ_4(sc, RGE_CSIDR));
1872 1.1 sevan }
1873 1.1 sevan
1874 1.1 sevan void
1875 1.1 sevan rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
1876 1.1 sevan {
1877 1.1 sevan uint32_t tmp;
1878 1.1 sevan
1879 1.1 sevan tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
1880 1.1 sevan tmp += val;
1881 1.1 sevan tmp |= RGE_MACOCP_BUSY;
1882 1.1 sevan RGE_WRITE_4(sc, RGE_MACOCP, tmp);
1883 1.1 sevan }
1884 1.1 sevan
1885 1.1 sevan uint16_t
1886 1.1 sevan rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
1887 1.1 sevan {
1888 1.1 sevan uint32_t val;
1889 1.1 sevan
1890 1.1 sevan val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
1891 1.1 sevan RGE_WRITE_4(sc, RGE_MACOCP, val);
1892 1.1 sevan
1893 1.1 sevan return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
1894 1.1 sevan }
1895 1.1 sevan
1896 1.1 sevan void
1897 1.1 sevan rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
1898 1.1 sevan {
1899 1.1 sevan uint32_t tmp;
1900 1.1 sevan int i;
1901 1.1 sevan
1902 1.1 sevan tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
1903 1.1 sevan tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
1904 1.1 sevan RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
1905 1.1 sevan
1906 1.1 sevan for (i = 0; i < 10; i++) {
1907 1.1 sevan DELAY(100);
1908 1.1 sevan if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
1909 1.1 sevan break;
1910 1.1 sevan }
1911 1.1 sevan
1912 1.1 sevan DELAY(20);
1913 1.1 sevan }
1914 1.1 sevan
1915 1.1 sevan void
1916 1.1 sevan rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
1917 1.1 sevan {
1918 1.1 sevan uint16_t off, phyaddr;
1919 1.1 sevan
1920 1.1 sevan phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
1921 1.1 sevan phyaddr <<= 4;
1922 1.1 sevan
1923 1.1 sevan off = addr ? reg : 0x10 + (reg % 8);
1924 1.1 sevan
1925 1.1 sevan phyaddr += (off - 16) << 1;
1926 1.1 sevan
1927 1.1 sevan rge_write_phy_ocp(sc, phyaddr, val);
1928 1.1 sevan }
1929 1.1 sevan
1930 1.1 sevan void
1931 1.1 sevan rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
1932 1.1 sevan {
1933 1.1 sevan uint32_t tmp;
1934 1.1 sevan int i;
1935 1.1 sevan
1936 1.1 sevan tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
1937 1.1 sevan tmp |= RGE_PHYOCP_BUSY | val;
1938 1.1 sevan RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
1939 1.1 sevan
1940 1.1 sevan for (i = 0; i < RGE_TIMEOUT; i++) {
1941 1.1 sevan DELAY(1);
1942 1.1 sevan if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
1943 1.1 sevan break;
1944 1.1 sevan }
1945 1.1 sevan }
1946 1.1 sevan
1947 1.1 sevan uint16_t
1948 1.1 sevan rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
1949 1.1 sevan {
1950 1.1 sevan uint32_t val;
1951 1.1 sevan int i;
1952 1.1 sevan
1953 1.1 sevan val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
1954 1.1 sevan RGE_WRITE_4(sc, RGE_PHYOCP, val);
1955 1.1 sevan
1956 1.1 sevan for (i = 0; i < RGE_TIMEOUT; i++) {
1957 1.1 sevan DELAY(1);
1958 1.1 sevan val = RGE_READ_4(sc, RGE_PHYOCP);
1959 1.1 sevan if (val & RGE_PHYOCP_BUSY)
1960 1.1 sevan break;
1961 1.1 sevan }
1962 1.1 sevan
1963 1.1 sevan return (val & RGE_PHYOCP_DATA_MASK);
1964 1.1 sevan }
1965 1.1 sevan
1966 1.1 sevan int
1967 1.1 sevan rge_get_link_status(struct rge_softc *sc)
1968 1.1 sevan {
1969 1.1 sevan return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
1970 1.1 sevan }
1971 1.1 sevan
1972 1.1 sevan void
1973 1.1 sevan rge_txstart(void *arg)
1974 1.1 sevan {
1975 1.1 sevan struct rge_softc *sc = arg;
1976 1.1 sevan
1977 1.1 sevan RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
1978 1.1 sevan }
1979 1.1 sevan
1980 1.1 sevan void
1981 1.1 sevan rge_tick(void *arg)
1982 1.1 sevan {
1983 1.1 sevan struct rge_softc *sc = arg;
1984 1.1 sevan int s;
1985 1.1 sevan
1986 1.1 sevan s = splnet();
1987 1.1 sevan rge_link_state(sc);
1988 1.1 sevan splx(s);
1989 1.1 sevan
1990 1.1 sevan timeout_add_sec(&sc->sc_timeout, 1);
1991 1.1 sevan }
1992 1.1 sevan
1993 1.1 sevan void
1994 1.1 sevan rge_link_state(struct rge_softc *sc)
1995 1.1 sevan {
1996 1.1 sevan struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1997 1.1 sevan int link = LINK_STATE_DOWN;
1998 1.1 sevan
1999 1.1 sevan if (rge_get_link_status(sc))
2000 1.1 sevan link = LINK_STATE_UP;
2001 1.1 sevan
2002 1.1 sevan if (ifp->if_link_state != link) {
2003 1.1 sevan ifp->if_link_state = link;
2004 1.1 sevan if_link_state_change(ifp);
2005 1.1 sevan }
2006 1.1 sevan }
2007