if_rge.c revision 1.4 1 1.4 skrll /* $NetBSD: if_rge.c,v 1.4 2020/02/04 07:37:00 skrll Exp $ */
2 1.1 sevan /* $OpenBSD: if_rge.c,v 1.2 2020/01/02 09:00:45 kevlo Exp $ */
3 1.1 sevan
4 1.1 sevan /*
5 1.1 sevan * Copyright (c) 2019 Kevin Lo <kevlo (at) openbsd.org>
6 1.1 sevan *
7 1.1 sevan * Permission to use, copy, modify, and distribute this software for any
8 1.1 sevan * purpose with or without fee is hereby granted, provided that the above
9 1.1 sevan * copyright notice and this permission notice appear in all copies.
10 1.1 sevan *
11 1.1 sevan * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 1.1 sevan * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 1.1 sevan * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 1.1 sevan * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 1.1 sevan * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 1.1 sevan * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 1.1 sevan * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 1.1 sevan */
19 1.1 sevan
20 1.2 sevan #include <sys/cdefs.h>
21 1.4 skrll __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.4 2020/02/04 07:37:00 skrll Exp $");
22 1.2 sevan
23 1.2 sevan /* #include "bpfilter.h" Sevan */
24 1.2 sevan /* #include "vlan.h" Sevan */
25 1.2 sevan
26 1.2 sevan #include <sys/types.h>
27 1.1 sevan
28 1.1 sevan #include <sys/param.h>
29 1.1 sevan #include <sys/systm.h>
30 1.1 sevan #include <sys/sockio.h>
31 1.1 sevan #include <sys/mbuf.h>
32 1.1 sevan #include <sys/malloc.h>
33 1.1 sevan #include <sys/kernel.h>
34 1.1 sevan #include <sys/socket.h>
35 1.1 sevan #include <sys/device.h>
36 1.1 sevan #include <sys/endian.h>
37 1.3 sevan #include <sys/callout.h>
38 1.3 sevan #include <sys/workqueue.h>
39 1.1 sevan
40 1.1 sevan #include <net/if.h>
41 1.2 sevan
42 1.2 sevan #include <net/if_dl.h>
43 1.2 sevan #include <net/if_ether.h>
44 1.2 sevan
45 1.1 sevan #include <net/if_media.h>
46 1.1 sevan
47 1.1 sevan #include <netinet/in.h>
48 1.2 sevan #include <net/if_ether.h>
49 1.1 sevan
50 1.1 sevan #if NBPFILTER > 0
51 1.1 sevan #include <net/bpf.h>
52 1.1 sevan #endif
53 1.1 sevan
54 1.2 sevan #include <sys/bus.h>
55 1.1 sevan #include <machine/intr.h>
56 1.1 sevan
57 1.1 sevan #include <dev/mii/mii.h>
58 1.1 sevan
59 1.1 sevan #include <dev/pci/pcivar.h>
60 1.1 sevan #include <dev/pci/pcireg.h>
61 1.1 sevan #include <dev/pci/pcidevs.h>
62 1.1 sevan
63 1.1 sevan #include <dev/pci/if_rgereg.h>
64 1.1 sevan
65 1.2 sevan #ifdef __NetBSD__
66 1.2 sevan #define letoh32 htole32
67 1.2 sevan #define nitems(x) __arraycount(x)
68 1.2 sevan #define MBUF_LIST_INITIALIZER() { NULL, NULL, 0 }
69 1.2 sevan struct mbuf_list {
70 1.2 sevan struct mbuf *ml_head;
71 1.2 sevan struct mbuf *ml_tail;
72 1.2 sevan u_int ml_len;
73 1.2 sevan };
74 1.3 sevan #ifdef NET_MPSAFE
75 1.3 sevan #define RGE_MPSAFE 1
76 1.3 sevan #define CALLOUT_FLAGS CALLOUT_MPSAFE
77 1.3 sevan #else
78 1.3 sevan #define CALLOUT_FLAGS 0
79 1.3 sevan #endif
80 1.2 sevan #endif
81 1.2 sevan
82 1.2 sevan static int rge_match(device_t, cfdata_t, void *);
83 1.2 sevan static void rge_attach(device_t, device_t, void *);
84 1.1 sevan int rge_intr(void *);
85 1.1 sevan int rge_encap(struct rge_softc *, struct mbuf *, int);
86 1.2 sevan int rge_ioctl(struct ifnet *, u_long, void *);
87 1.2 sevan void rge_start(struct ifnet *);
88 1.1 sevan void rge_watchdog(struct ifnet *);
89 1.1 sevan int rge_init(struct ifnet *);
90 1.1 sevan void rge_stop(struct ifnet *);
91 1.1 sevan int rge_ifmedia_upd(struct ifnet *);
92 1.1 sevan void rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
93 1.1 sevan int rge_allocmem(struct rge_softc *);
94 1.1 sevan int rge_newbuf(struct rge_softc *, int);
95 1.1 sevan void rge_discard_rxbuf(struct rge_softc *, int);
96 1.1 sevan int rge_rx_list_init(struct rge_softc *);
97 1.1 sevan void rge_tx_list_init(struct rge_softc *);
98 1.1 sevan int rge_rxeof(struct rge_softc *);
99 1.1 sevan int rge_txeof(struct rge_softc *);
100 1.1 sevan void rge_reset(struct rge_softc *);
101 1.1 sevan void rge_iff(struct rge_softc *);
102 1.1 sevan void rge_set_phy_power(struct rge_softc *, int);
103 1.1 sevan void rge_phy_config(struct rge_softc *);
104 1.1 sevan void rge_set_macaddr(struct rge_softc *, const uint8_t *);
105 1.1 sevan void rge_get_macaddr(struct rge_softc *, uint8_t *);
106 1.1 sevan void rge_hw_init(struct rge_softc *);
107 1.1 sevan void rge_disable_phy_ocp_pwrsave(struct rge_softc *);
108 1.1 sevan void rge_patch_phy_mcu(struct rge_softc *, int);
109 1.1 sevan void rge_add_media_types(struct rge_softc *);
110 1.1 sevan void rge_config_imtype(struct rge_softc *, int);
111 1.1 sevan void rge_disable_sim_im(struct rge_softc *);
112 1.1 sevan void rge_setup_sim_im(struct rge_softc *);
113 1.1 sevan void rge_setup_intr(struct rge_softc *, int);
114 1.1 sevan void rge_exit_oob(struct rge_softc *);
115 1.1 sevan void rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
116 1.1 sevan uint32_t rge_read_csi(struct rge_softc *, uint32_t);
117 1.1 sevan void rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
118 1.1 sevan uint16_t rge_read_mac_ocp(struct rge_softc *, uint16_t);
119 1.1 sevan void rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
120 1.1 sevan void rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
121 1.1 sevan void rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
122 1.1 sevan uint16_t rge_read_phy_ocp(struct rge_softc *, uint16_t);
123 1.1 sevan int rge_get_link_status(struct rge_softc *);
124 1.3 sevan void rge_txstart(struct work *, void *);
125 1.1 sevan void rge_tick(void *);
126 1.1 sevan void rge_link_state(struct rge_softc *);
127 1.1 sevan
128 1.1 sevan static const struct {
129 1.1 sevan uint16_t reg;
130 1.1 sevan uint16_t val;
131 1.1 sevan } rtl8125_def_bps[] = {
132 1.1 sevan RTL8125_DEF_BPS
133 1.1 sevan }, rtl8125_mac_cfg2_ephy[] = {
134 1.1 sevan RTL8125_MAC_CFG2_EPHY
135 1.1 sevan }, rtl8125_mac_cfg2_mcu[] = {
136 1.1 sevan RTL8125_MAC_CFG2_MCU
137 1.1 sevan }, rtl8125_mac_cfg3_ephy[] = {
138 1.1 sevan RTL8125_MAC_CFG3_EPHY
139 1.1 sevan }, rtl8125_mac_cfg3_mcu[] = {
140 1.1 sevan RTL8125_MAC_CFG3_MCU
141 1.1 sevan };
142 1.1 sevan
143 1.2 sevan CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
144 1.2 sevan NULL, NULL); /* Sevan - detach function? */
145 1.1 sevan
146 1.2 sevan extern struct cfdriver rge_cd;
147 1.1 sevan
148 1.2 sevan static const struct {
149 1.2 sevan pci_vendor_id_t vendor;
150 1.2 sevan pci_product_id_t product;
151 1.2 sevan }rge_devices[] = {
152 1.1 sevan { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
153 1.2 sevan { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125 },
154 1.1 sevan };
155 1.1 sevan
156 1.2 sevan static int
157 1.2 sevan rge_match(device_t parent, cfdata_t match, void *aux)
158 1.1 sevan {
159 1.2 sevan struct pci_attach_args *pa =aux;
160 1.2 sevan int n;
161 1.2 sevan
162 1.2 sevan for (n =0; n < __arraycount(rge_devices); n++) {
163 1.2 sevan if (PCI_VENDOR(pa->pa_id) == rge_devices[n].vendor &&
164 1.2 sevan PCI_PRODUCT(pa->pa_id) == rge_devices[n].product)
165 1.2 sevan return 1;
166 1.2 sevan }
167 1.2 sevan
168 1.2 sevan return 0;
169 1.1 sevan }
170 1.1 sevan
171 1.1 sevan void
172 1.2 sevan rge_attach(device_t parent, device_t self, void *aux)
173 1.1 sevan {
174 1.1 sevan struct rge_softc *sc = (struct rge_softc *)self;
175 1.1 sevan struct pci_attach_args *pa = aux;
176 1.1 sevan pci_chipset_tag_t pc = pa->pa_pc;
177 1.1 sevan pci_intr_handle_t ih;
178 1.2 sevan char intrbuf[PCI_INTRSTR_LEN];
179 1.1 sevan const char *intrstr = NULL;
180 1.1 sevan struct ifnet *ifp;
181 1.1 sevan pcireg_t reg;
182 1.1 sevan uint32_t hwrev;
183 1.1 sevan uint8_t eaddr[ETHER_ADDR_LEN];
184 1.1 sevan int offset;
185 1.1 sevan
186 1.1 sevan pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
187 1.1 sevan
188 1.1 sevan /*
189 1.1 sevan * Map control/status registers.
190 1.1 sevan */
191 1.1 sevan if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
192 1.1 sevan PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
193 1.2 sevan NULL, &sc->rge_bsize)) {
194 1.1 sevan if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
195 1.1 sevan PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
196 1.2 sevan &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
197 1.1 sevan if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
198 1.1 sevan 0, &sc->rge_btag, &sc->rge_bhandle, NULL,
199 1.2 sevan &sc->rge_bsize)) {
200 1.1 sevan printf(": can't map mem or i/o space\n");
201 1.1 sevan return;
202 1.1 sevan }
203 1.1 sevan }
204 1.1 sevan }
205 1.1 sevan
206 1.1 sevan /*
207 1.1 sevan * Allocate interrupt.
208 1.1 sevan */
209 1.2 sevan if (pci_intr_map(pa, &ih) == 0)
210 1.1 sevan sc->rge_flags |= RGE_FLAG_MSI;
211 1.1 sevan else if (pci_intr_map(pa, &ih) != 0) {
212 1.1 sevan printf(": couldn't map interrupt\n");
213 1.1 sevan return;
214 1.1 sevan }
215 1.2 sevan intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
216 1.2 sevan sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, rge_intr,
217 1.1 sevan sc, sc->sc_dev.dv_xname);
218 1.1 sevan if (sc->sc_ih == NULL) {
219 1.1 sevan printf(": couldn't establish interrupt");
220 1.1 sevan if (intrstr != NULL)
221 1.1 sevan printf(" at %s", intrstr);
222 1.1 sevan printf("\n");
223 1.1 sevan return;
224 1.1 sevan }
225 1.1 sevan printf(": %s", intrstr);
226 1.1 sevan
227 1.1 sevan sc->sc_dmat = pa->pa_dmat;
228 1.1 sevan sc->sc_pc = pa->pa_pc;
229 1.1 sevan sc->sc_tag = pa->pa_tag;
230 1.1 sevan
231 1.1 sevan /* Determine hardware revision */
232 1.1 sevan hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
233 1.1 sevan switch (hwrev) {
234 1.1 sevan case 0x60800000:
235 1.1 sevan sc->rge_type = MAC_CFG2;
236 1.1 sevan break;
237 1.1 sevan case 0x60900000:
238 1.1 sevan sc->rge_type = MAC_CFG3;
239 1.1 sevan break;
240 1.1 sevan default:
241 1.1 sevan printf(": unknown version 0x%08x\n", hwrev);
242 1.1 sevan return;
243 1.1 sevan }
244 1.1 sevan
245 1.1 sevan rge_config_imtype(sc, RGE_IMTYPE_SIM);
246 1.1 sevan
247 1.1 sevan /*
248 1.1 sevan * PCI Express check.
249 1.1 sevan */
250 1.1 sevan if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
251 1.1 sevan &offset, NULL)) {
252 1.2 sevan /* Disable PCIe ASPM. */
253 1.1 sevan reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
254 1.2 sevan offset + PCIE_LCSR);
255 1.2 sevan reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 );
256 1.2 sevan pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
257 1.1 sevan reg);
258 1.1 sevan }
259 1.1 sevan
260 1.1 sevan rge_exit_oob(sc);
261 1.1 sevan rge_hw_init(sc);
262 1.1 sevan
263 1.1 sevan rge_get_macaddr(sc, eaddr);
264 1.1 sevan printf(", address %s\n", ether_sprintf(eaddr));
265 1.1 sevan
266 1.2 sevan memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
267 1.1 sevan
268 1.1 sevan rge_set_phy_power(sc, 1);
269 1.1 sevan rge_phy_config(sc);
270 1.1 sevan
271 1.1 sevan if (rge_allocmem(sc))
272 1.1 sevan return;
273 1.1 sevan
274 1.2 sevan ifp = &sc->sc_ec.ec_if;
275 1.1 sevan ifp->if_softc = sc;
276 1.1 sevan strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
277 1.1 sevan ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
278 1.2 sevan #ifdef RGE_MPSAFE
279 1.2 sevan ifp->if_xflags = IFEF_MPSAFE;
280 1.2 sevan #endif
281 1.1 sevan ifp->if_ioctl = rge_ioctl;
282 1.2 sevan ifp->if_start = rge_start;
283 1.1 sevan ifp->if_watchdog = rge_watchdog;
284 1.1 sevan IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT);
285 1.2 sevan ifp->if_mtu = RGE_JUMBO_MTU;
286 1.1 sevan
287 1.2 sevan ifp->if_capabilities = ETHERCAP_VLAN_MTU | IFCAP_CSUM_IPv4_Rx |
288 1.2 sevan IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
289 1.2 sevan IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
290 1.1 sevan
291 1.1 sevan #if NVLAN > 0
292 1.1 sevan ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
293 1.1 sevan #endif
294 1.1 sevan
295 1.3 sevan callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
296 1.3 sevan callout_setfunc(&sc->sc_timeout, rge_tick, sc);
297 1.3 sevan rge_txstart(&sc->sc_task, sc);
298 1.1 sevan
299 1.1 sevan /* Initialize ifmedia structures. */
300 1.1 sevan ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
301 1.1 sevan rge_ifmedia_sts);
302 1.1 sevan rge_add_media_types(sc);
303 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
304 1.1 sevan ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
305 1.1 sevan sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
306 1.1 sevan
307 1.1 sevan if_attach(ifp);
308 1.2 sevan ether_ifattach(ifp, eaddr);
309 1.1 sevan }
310 1.1 sevan
311 1.1 sevan int
312 1.1 sevan rge_intr(void *arg)
313 1.1 sevan {
314 1.1 sevan struct rge_softc *sc = arg;
315 1.2 sevan struct ifnet *ifp = &sc->sc_ec.ec_if;
316 1.1 sevan uint32_t status;
317 1.1 sevan int claimed = 0, rx, tx;
318 1.1 sevan
319 1.1 sevan if (!(ifp->if_flags & IFF_RUNNING))
320 1.1 sevan return (0);
321 1.1 sevan
322 1.1 sevan /* Disable interrupts. */
323 1.1 sevan RGE_WRITE_4(sc, RGE_IMR, 0);
324 1.1 sevan
325 1.1 sevan status = RGE_READ_4(sc, RGE_ISR);
326 1.1 sevan if (!(sc->rge_flags & RGE_FLAG_MSI)) {
327 1.1 sevan if ((status & RGE_INTRS) == 0 || status == 0xffffffff)
328 1.1 sevan return (0);
329 1.1 sevan }
330 1.1 sevan if (status)
331 1.1 sevan RGE_WRITE_4(sc, RGE_ISR, status);
332 1.1 sevan
333 1.1 sevan if (status & RGE_ISR_PCS_TIMEOUT)
334 1.1 sevan claimed = 1;
335 1.1 sevan
336 1.1 sevan rx = tx = 0;
337 1.1 sevan if (status & RGE_INTRS) {
338 1.1 sevan if (status &
339 1.1 sevan (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
340 1.1 sevan rx |= rge_rxeof(sc);
341 1.1 sevan claimed = 1;
342 1.1 sevan }
343 1.1 sevan
344 1.1 sevan if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
345 1.1 sevan tx |= rge_txeof(sc);
346 1.1 sevan claimed = 1;
347 1.1 sevan }
348 1.1 sevan
349 1.1 sevan if (status & RGE_ISR_SYSTEM_ERR) {
350 1.2 sevan KERNEL_LOCK(1, NULL);
351 1.1 sevan rge_init(ifp);
352 1.2 sevan KERNEL_UNLOCK_ONE(NULL);
353 1.1 sevan claimed = 1;
354 1.1 sevan }
355 1.1 sevan }
356 1.1 sevan
357 1.1 sevan if (sc->rge_timerintr) {
358 1.1 sevan if ((tx | rx) == 0) {
359 1.1 sevan /*
360 1.1 sevan * Nothing needs to be processed, fallback
361 1.1 sevan * to use TX/RX interrupts.
362 1.1 sevan */
363 1.1 sevan rge_setup_intr(sc, RGE_IMTYPE_NONE);
364 1.1 sevan
365 1.1 sevan /*
366 1.1 sevan * Recollect, mainly to avoid the possible
367 1.1 sevan * race introduced by changing interrupt
368 1.1 sevan * masks.
369 1.1 sevan */
370 1.1 sevan rge_rxeof(sc);
371 1.1 sevan rge_txeof(sc);
372 1.1 sevan } else
373 1.1 sevan RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
374 1.1 sevan } else if (tx | rx) {
375 1.1 sevan /*
376 1.1 sevan * Assume that using simulated interrupt moderation
377 1.1 sevan * (hardware timer based) could reduce the interrupt
378 1.1 sevan * rate.
379 1.1 sevan */
380 1.1 sevan rge_setup_intr(sc, RGE_IMTYPE_SIM);
381 1.1 sevan }
382 1.1 sevan
383 1.1 sevan RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
384 1.1 sevan
385 1.1 sevan return (claimed);
386 1.1 sevan }
387 1.1 sevan
388 1.1 sevan int
389 1.1 sevan rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
390 1.1 sevan {
391 1.1 sevan struct rge_tx_desc *d = NULL;
392 1.1 sevan struct rge_txq *txq;
393 1.1 sevan bus_dmamap_t txmap;
394 1.1 sevan uint32_t cmdsts, cflags = 0;
395 1.1 sevan int cur, error, i, last, nsegs;
396 1.1 sevan
397 1.1 sevan /*
398 1.1 sevan * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
399 1.1 sevan * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
400 1.1 sevan * take affect.
401 1.1 sevan */
402 1.1 sevan if ((m->m_pkthdr.csum_flags &
403 1.2 sevan (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
404 1.1 sevan cflags |= RGE_TDEXTSTS_IPCSUM;
405 1.1 sevan if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
406 1.1 sevan cflags |= RGE_TDEXTSTS_TCPCSUM;
407 1.1 sevan if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
408 1.1 sevan cflags |= RGE_TDEXTSTS_UDPCSUM;
409 1.1 sevan }
410 1.1 sevan
411 1.1 sevan txq = &sc->rge_ldata.rge_txq[idx];
412 1.1 sevan txmap = txq->txq_dmamap;
413 1.1 sevan
414 1.1 sevan error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
415 1.1 sevan switch (error) {
416 1.1 sevan case 0:
417 1.1 sevan break;
418 1.1 sevan case EFBIG: /* mbuf chain is too fragmented */
419 1.1 sevan if (m_defrag(m, M_DONTWAIT) == 0 &&
420 1.1 sevan bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
421 1.1 sevan BUS_DMA_NOWAIT) == 0)
422 1.1 sevan break;
423 1.1 sevan
424 1.1 sevan /* FALLTHROUGH */
425 1.1 sevan default:
426 1.1 sevan return (0);
427 1.1 sevan }
428 1.1 sevan
429 1.1 sevan bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
430 1.1 sevan BUS_DMASYNC_PREWRITE);
431 1.1 sevan
432 1.1 sevan nsegs = txmap->dm_nsegs;
433 1.1 sevan
434 1.1 sevan /* Set up hardware VLAN tagging. */
435 1.1 sevan #if NVLAN > 0
436 1.1 sevan if (m->m_flags & M_VLANTAG)
437 1.1 sevan cflags |= swap16(m->m_pkthdr.ether_vtag | RGE_TDEXTSTS_VTAG);
438 1.1 sevan #endif
439 1.1 sevan
440 1.1 sevan cur = idx;
441 1.1 sevan cmdsts = RGE_TDCMDSTS_SOF;
442 1.1 sevan
443 1.1 sevan for (i = 0; i < txmap->dm_nsegs; i++) {
444 1.1 sevan d = &sc->rge_ldata.rge_tx_list[cur];
445 1.1 sevan
446 1.1 sevan d->rge_extsts = htole32(cflags);
447 1.1 sevan d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
448 1.1 sevan d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
449 1.1 sevan
450 1.1 sevan cmdsts |= txmap->dm_segs[i].ds_len;
451 1.1 sevan
452 1.1 sevan if (cur == RGE_TX_LIST_CNT - 1)
453 1.1 sevan cmdsts |= RGE_TDCMDSTS_EOR;
454 1.1 sevan
455 1.1 sevan d->rge_cmdsts = htole32(cmdsts);
456 1.1 sevan
457 1.1 sevan last = cur;
458 1.1 sevan cmdsts = RGE_TDCMDSTS_OWN;
459 1.1 sevan cur = RGE_NEXT_TX_DESC(cur);
460 1.1 sevan }
461 1.1 sevan
462 1.1 sevan /* Set EOF on the last descriptor. */
463 1.1 sevan d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
464 1.1 sevan
465 1.1 sevan /* Transfer ownership of packet to the chip. */
466 1.1 sevan d = &sc->rge_ldata.rge_tx_list[idx];
467 1.1 sevan
468 1.1 sevan d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
469 1.1 sevan
470 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
471 1.1 sevan cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
472 1.1 sevan BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
473 1.1 sevan
474 1.1 sevan /* Update info of TX queue and descriptors. */
475 1.1 sevan txq->txq_mbuf = m;
476 1.1 sevan txq->txq_descidx = last;
477 1.1 sevan
478 1.1 sevan return (nsegs);
479 1.1 sevan }
480 1.1 sevan
481 1.1 sevan int
482 1.2 sevan rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
483 1.1 sevan {
484 1.1 sevan struct rge_softc *sc = ifp->if_softc;
485 1.1 sevan struct ifreq *ifr = (struct ifreq *)data;
486 1.1 sevan int s, error = 0;
487 1.1 sevan
488 1.1 sevan s = splnet();
489 1.1 sevan
490 1.1 sevan switch (cmd) {
491 1.1 sevan case SIOCSIFADDR:
492 1.1 sevan ifp->if_flags |= IFF_UP;
493 1.1 sevan if (!(ifp->if_flags & IFF_RUNNING))
494 1.1 sevan rge_init(ifp);
495 1.1 sevan break;
496 1.1 sevan case SIOCSIFFLAGS:
497 1.1 sevan if (ifp->if_flags & IFF_UP) {
498 1.1 sevan if (ifp->if_flags & IFF_RUNNING)
499 1.1 sevan error = ENETRESET;
500 1.1 sevan else
501 1.1 sevan rge_init(ifp);
502 1.1 sevan } else {
503 1.1 sevan if (ifp->if_flags & IFF_RUNNING)
504 1.1 sevan rge_stop(ifp);
505 1.1 sevan }
506 1.1 sevan break;
507 1.1 sevan case SIOCGIFMEDIA:
508 1.1 sevan case SIOCSIFMEDIA:
509 1.1 sevan error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
510 1.1 sevan break;
511 1.1 sevan case SIOCSIFMTU:
512 1.2 sevan if (ifr->ifr_mtu > ifp->if_mtu) {
513 1.1 sevan error = EINVAL;
514 1.1 sevan break;
515 1.1 sevan }
516 1.1 sevan ifp->if_mtu = ifr->ifr_mtu;
517 1.1 sevan break;
518 1.1 sevan default:
519 1.2 sevan error = ether_ioctl(ifp, cmd, data);
520 1.1 sevan }
521 1.1 sevan
522 1.1 sevan if (error == ENETRESET) {
523 1.1 sevan if (ifp->if_flags & IFF_RUNNING)
524 1.1 sevan rge_iff(sc);
525 1.1 sevan error = 0;
526 1.1 sevan }
527 1.1 sevan
528 1.1 sevan splx(s);
529 1.1 sevan return (error);
530 1.1 sevan }
531 1.1 sevan
532 1.1 sevan void
533 1.3 sevan rge_start(struct ifnet *ifp)
534 1.1 sevan {
535 1.1 sevan struct rge_softc *sc = ifp->if_softc;
536 1.1 sevan struct mbuf *m;
537 1.1 sevan int free, idx, used;
538 1.1 sevan int queued = 0;
539 1.1 sevan
540 1.2 sevan #define LINK_STATE_IS_UP(_s) \
541 1.2 sevan ((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
542 1.2 sevan
543 1.1 sevan if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
544 1.1 sevan ifq_purge(ifq);
545 1.1 sevan return;
546 1.1 sevan }
547 1.1 sevan
548 1.1 sevan /* Calculate free space. */
549 1.1 sevan idx = sc->rge_ldata.rge_txq_prodidx;
550 1.1 sevan free = sc->rge_ldata.rge_txq_considx;
551 1.1 sevan if (free <= idx)
552 1.1 sevan free += RGE_TX_LIST_CNT;
553 1.1 sevan free -= idx;
554 1.1 sevan
555 1.1 sevan for (;;) {
556 1.1 sevan if (RGE_TX_NSEGS >= free + 2) {
557 1.3 sevan SET(ifp->if_flags, IFF_OACTIVE);
558 1.1 sevan break;
559 1.1 sevan }
560 1.1 sevan
561 1.3 sevan IFQ_DEQUEUE(&ifp->if_snd, m);
562 1.1 sevan if (m == NULL)
563 1.1 sevan break;
564 1.1 sevan
565 1.1 sevan used = rge_encap(sc, m, idx);
566 1.1 sevan if (used == 0) {
567 1.1 sevan m_freem(m);
568 1.1 sevan continue;
569 1.1 sevan }
570 1.1 sevan
571 1.1 sevan KASSERT(used <= free);
572 1.1 sevan free -= used;
573 1.1 sevan
574 1.1 sevan #if NBPFILTER > 0
575 1.1 sevan if (ifp->if_bpf)
576 1.1 sevan bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
577 1.1 sevan #endif
578 1.1 sevan
579 1.1 sevan idx += used;
580 1.1 sevan if (idx >= RGE_TX_LIST_CNT)
581 1.1 sevan idx -= RGE_TX_LIST_CNT;
582 1.1 sevan
583 1.1 sevan queued++;
584 1.1 sevan }
585 1.1 sevan
586 1.1 sevan if (queued == 0)
587 1.1 sevan return;
588 1.1 sevan
589 1.1 sevan /* Set a timeout in case the chip goes out to lunch. */
590 1.1 sevan ifp->if_timer = 5;
591 1.1 sevan
592 1.1 sevan sc->rge_ldata.rge_txq_prodidx = idx;
593 1.1 sevan ifq_serialize(ifq, &sc->sc_task);
594 1.1 sevan }
595 1.1 sevan
596 1.1 sevan void
597 1.1 sevan rge_watchdog(struct ifnet *ifp)
598 1.1 sevan {
599 1.1 sevan struct rge_softc *sc = ifp->if_softc;
600 1.1 sevan
601 1.1 sevan printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
602 1.4 skrll if_statinc(ifp, if_oerrors);
603 1.1 sevan
604 1.1 sevan rge_init(ifp);
605 1.1 sevan }
606 1.1 sevan
607 1.1 sevan int
608 1.1 sevan rge_init(struct ifnet *ifp)
609 1.1 sevan {
610 1.1 sevan struct rge_softc *sc = ifp->if_softc;
611 1.1 sevan uint32_t val;
612 1.1 sevan uint16_t max_frame_size;
613 1.1 sevan int i;
614 1.1 sevan
615 1.1 sevan rge_stop(ifp);
616 1.1 sevan
617 1.1 sevan /* Set MAC address. */
618 1.2 sevan rge_set_macaddr(sc, sc->sc_enaddr);
619 1.1 sevan
620 1.1 sevan /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
621 1.1 sevan if (ifp->if_mtu < ETHERMTU)
622 1.1 sevan max_frame_size = ETHERMTU;
623 1.1 sevan else
624 1.1 sevan max_frame_size = ifp->if_mtu;
625 1.1 sevan
626 1.1 sevan max_frame_size += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
627 1.1 sevan ETHER_CRC_LEN + 1;
628 1.1 sevan
629 1.1 sevan if (max_frame_size > RGE_JUMBO_FRAMELEN)
630 1.1 sevan max_frame_size -= 1;
631 1.1 sevan
632 1.1 sevan RGE_WRITE_2(sc, RGE_RXMAXSIZE, max_frame_size);
633 1.1 sevan
634 1.1 sevan /* Initialize RX descriptors list. */
635 1.1 sevan if (rge_rx_list_init(sc) == ENOBUFS) {
636 1.1 sevan printf("%s: init failed: no memory for RX buffers\n",
637 1.1 sevan sc->sc_dev.dv_xname);
638 1.1 sevan rge_stop(ifp);
639 1.1 sevan return (ENOBUFS);
640 1.1 sevan }
641 1.1 sevan
642 1.1 sevan /* Initialize TX descriptors. */
643 1.1 sevan rge_tx_list_init(sc);
644 1.1 sevan
645 1.1 sevan /* Load the addresses of the RX and TX lists into the chip. */
646 1.1 sevan RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
647 1.1 sevan RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
648 1.1 sevan RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
649 1.1 sevan RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
650 1.1 sevan RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
651 1.1 sevan RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
652 1.1 sevan RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
653 1.1 sevan RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
654 1.1 sevan
655 1.1 sevan RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
656 1.1 sevan
657 1.1 sevan RGE_CLRBIT_1(sc, 0xf1, 0x80);
658 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
659 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
660 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
661 1.1 sevan
662 1.1 sevan /* Clear interrupt moderation timer. */
663 1.1 sevan for (i = 0; i < 64; i++)
664 1.1 sevan RGE_WRITE_4(sc, RGE_IM(i), 0);
665 1.1 sevan
666 1.1 sevan /* Set the initial RX and TX configurations. */
667 1.1 sevan RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
668 1.1 sevan RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
669 1.1 sevan
670 1.1 sevan val = rge_read_csi(sc, 0x70c) & ~0xff000000;
671 1.1 sevan rge_write_csi(sc, 0x70c, val | 0x27000000);
672 1.1 sevan
673 1.1 sevan /* Enable hardware optimization function. */
674 1.1 sevan val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
675 1.1 sevan pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
676 1.1 sevan
677 1.1 sevan RGE_WRITE_2(sc, 0x0382, 0x221b);
678 1.1 sevan RGE_WRITE_1(sc, 0x4500, 0);
679 1.1 sevan RGE_WRITE_2(sc, 0x4800, 0);
680 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
681 1.1 sevan
682 1.1 sevan rge_write_mac_ocp(sc, 0xc140, 0xffff);
683 1.1 sevan rge_write_mac_ocp(sc, 0xc142, 0xffff);
684 1.1 sevan
685 1.1 sevan val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
686 1.1 sevan rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
687 1.1 sevan
688 1.1 sevan RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
689 1.1 sevan RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
690 1.1 sevan RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
691 1.1 sevan
692 1.1 sevan val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
693 1.1 sevan rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
694 1.1 sevan
695 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
696 1.1 sevan
697 1.1 sevan val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
698 1.1 sevan rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
699 1.1 sevan
700 1.1 sevan RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
701 1.1 sevan
702 1.1 sevan val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x007f;
703 1.1 sevan rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
704 1.1 sevan
705 1.1 sevan val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
706 1.1 sevan rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
707 1.1 sevan
708 1.1 sevan val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
709 1.1 sevan rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
710 1.1 sevan
711 1.1 sevan RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
712 1.1 sevan
713 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
714 1.1 sevan
715 1.1 sevan val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
716 1.1 sevan rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
717 1.1 sevan
718 1.1 sevan RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
719 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
720 1.1 sevan
721 1.1 sevan val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
722 1.1 sevan rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
723 1.1 sevan
724 1.1 sevan val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
725 1.1 sevan rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
726 1.1 sevan
727 1.1 sevan RGE_MAC_SETBIT(sc, 0xe84c, 0x00c0);
728 1.1 sevan
729 1.1 sevan /* Disable EEE plus. */
730 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
731 1.1 sevan
732 1.1 sevan RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
733 1.1 sevan
734 1.1 sevan RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
735 1.1 sevan DELAY(1);
736 1.1 sevan RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
737 1.1 sevan
738 1.1 sevan RGE_CLRBIT_4(sc, 0x1880, 0x0030);
739 1.1 sevan
740 1.1 sevan rge_write_mac_ocp(sc, 0xe098, 0xc302);
741 1.1 sevan
742 1.2 sevan if (ifp->if_capabilities & ETHERCAP_VLAN_HWTAGGING)
743 1.1 sevan RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
744 1.1 sevan
745 1.1 sevan RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
746 1.1 sevan
747 1.1 sevan for (i = 0; i < 10; i++) {
748 1.1 sevan if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
749 1.1 sevan break;
750 1.1 sevan DELAY(1000);
751 1.1 sevan }
752 1.1 sevan
753 1.1 sevan /* Disable RXDV gate. */
754 1.1 sevan RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
755 1.1 sevan DELAY(2000);
756 1.1 sevan
757 1.1 sevan rge_ifmedia_upd(ifp);
758 1.1 sevan
759 1.1 sevan /* Enable transmit and receive. */
760 1.1 sevan RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
761 1.1 sevan
762 1.1 sevan /* Program promiscuous mode and multicast filters. */
763 1.1 sevan rge_iff(sc);
764 1.1 sevan
765 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
766 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
767 1.1 sevan
768 1.1 sevan RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
769 1.1 sevan
770 1.1 sevan /* Enable interrupts. */
771 1.1 sevan rge_setup_intr(sc, RGE_IMTYPE_SIM);
772 1.1 sevan
773 1.1 sevan ifp->if_flags |= IFF_RUNNING;
774 1.3 sevan CLR(ifp->if_flags, IFF_OACTIVE);
775 1.1 sevan
776 1.3 sevan callout_schedule(&sc->sc_timeout, 1);
777 1.1 sevan
778 1.1 sevan return (0);
779 1.1 sevan }
780 1.1 sevan
781 1.1 sevan /*
782 1.1 sevan * Stop the adapter and free any mbufs allocated to the RX and TX lists.
783 1.1 sevan */
784 1.1 sevan void
785 1.1 sevan rge_stop(struct ifnet *ifp)
786 1.1 sevan {
787 1.1 sevan struct rge_softc *sc = ifp->if_softc;
788 1.1 sevan int i;
789 1.1 sevan
790 1.1 sevan timeout_del(&sc->sc_timeout);
791 1.1 sevan
792 1.1 sevan ifp->if_timer = 0;
793 1.1 sevan ifp->if_flags &= ~IFF_RUNNING;
794 1.1 sevan sc->rge_timerintr = 0;
795 1.1 sevan
796 1.1 sevan RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
797 1.1 sevan RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
798 1.1 sevan RGE_RXCFG_ERRPKT);
799 1.1 sevan
800 1.1 sevan RGE_WRITE_4(sc, RGE_IMR, 0);
801 1.1 sevan RGE_WRITE_4(sc, RGE_ISR, 0xffffffff);
802 1.1 sevan
803 1.1 sevan rge_reset(sc);
804 1.1 sevan
805 1.1 sevan intr_barrier(sc->sc_ih);
806 1.1 sevan ifq_barrier(&ifp->if_snd);
807 1.2 sevan /* ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
808 1.1 sevan
809 1.1 sevan if (sc->rge_head != NULL) {
810 1.1 sevan m_freem(sc->rge_head);
811 1.1 sevan sc->rge_head = sc->rge_tail = NULL;
812 1.1 sevan }
813 1.1 sevan
814 1.1 sevan /* Free the TX list buffers. */
815 1.1 sevan for (i = 0; i < RGE_TX_LIST_CNT; i++) {
816 1.1 sevan if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
817 1.1 sevan bus_dmamap_unload(sc->sc_dmat,
818 1.1 sevan sc->rge_ldata.rge_txq[i].txq_dmamap);
819 1.1 sevan m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
820 1.1 sevan sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
821 1.1 sevan }
822 1.1 sevan }
823 1.1 sevan
824 1.1 sevan /* Free the RX list buffers. */
825 1.1 sevan for (i = 0; i < RGE_RX_LIST_CNT; i++) {
826 1.1 sevan if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
827 1.1 sevan bus_dmamap_unload(sc->sc_dmat,
828 1.1 sevan sc->rge_ldata.rge_rxq[i].rxq_dmamap);
829 1.1 sevan m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
830 1.1 sevan sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
831 1.1 sevan }
832 1.1 sevan }
833 1.1 sevan }
834 1.1 sevan
835 1.1 sevan /*
836 1.1 sevan * Set media options.
837 1.1 sevan */
838 1.1 sevan int
839 1.1 sevan rge_ifmedia_upd(struct ifnet *ifp)
840 1.1 sevan {
841 1.1 sevan struct rge_softc *sc = ifp->if_softc;
842 1.1 sevan struct ifmedia *ifm = &sc->sc_media;
843 1.1 sevan int anar, gig, val;
844 1.1 sevan
845 1.1 sevan if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
846 1.1 sevan return (EINVAL);
847 1.1 sevan
848 1.1 sevan /* Disable Gigabit Lite. */
849 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
850 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
851 1.1 sevan
852 1.1 sevan val = rge_read_phy_ocp(sc, 0xa5d4);
853 1.1 sevan val &= ~RGE_ADV_2500TFDX;
854 1.1 sevan
855 1.1 sevan anar = gig = 0;
856 1.1 sevan switch (IFM_SUBTYPE(ifm->ifm_media)) {
857 1.1 sevan case IFM_AUTO:
858 1.1 sevan anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
859 1.1 sevan gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
860 1.1 sevan val |= RGE_ADV_2500TFDX;
861 1.1 sevan break;
862 1.1 sevan case IFM_2500_T:
863 1.1 sevan anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
864 1.1 sevan gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
865 1.1 sevan val |= RGE_ADV_2500TFDX;
866 1.1 sevan ifp->if_baudrate = IF_Mbps(2500);
867 1.1 sevan break;
868 1.1 sevan case IFM_1000_T:
869 1.1 sevan anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
870 1.1 sevan gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
871 1.1 sevan ifp->if_baudrate = IF_Gbps(1);
872 1.1 sevan break;
873 1.1 sevan case IFM_100_TX:
874 1.1 sevan anar |= ANAR_TX | ANAR_TX_FD;
875 1.1 sevan ifp->if_baudrate = IF_Mbps(100);
876 1.1 sevan break;
877 1.1 sevan case IFM_10_T:
878 1.1 sevan anar |= ANAR_10 | ANAR_10_FD;
879 1.1 sevan ifp->if_baudrate = IF_Mbps(10);
880 1.1 sevan break;
881 1.1 sevan default:
882 1.1 sevan printf("%s: unsupported media type\n", sc->sc_dev.dv_xname);
883 1.1 sevan return (EINVAL);
884 1.1 sevan }
885 1.1 sevan
886 1.1 sevan rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
887 1.1 sevan rge_write_phy(sc, 0, MII_100T2CR, gig);
888 1.1 sevan rge_write_phy_ocp(sc, 0xa5d4, val);
889 1.1 sevan rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG);
890 1.1 sevan
891 1.1 sevan return (0);
892 1.1 sevan }
893 1.1 sevan
894 1.1 sevan /*
895 1.1 sevan * Report current media status.
896 1.1 sevan */
897 1.1 sevan void
898 1.1 sevan rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
899 1.1 sevan {
900 1.1 sevan struct rge_softc *sc = ifp->if_softc;
901 1.1 sevan uint16_t status = 0;
902 1.1 sevan
903 1.1 sevan ifmr->ifm_status = IFM_AVALID;
904 1.1 sevan ifmr->ifm_active = IFM_ETHER;
905 1.1 sevan
906 1.1 sevan if (rge_get_link_status(sc)) {
907 1.1 sevan ifmr->ifm_status |= IFM_ACTIVE;
908 1.1 sevan
909 1.1 sevan status = RGE_READ_2(sc, RGE_PHYSTAT);
910 1.1 sevan if ((status & RGE_PHYSTAT_FDX) ||
911 1.1 sevan (status & RGE_PHYSTAT_2500MBPS))
912 1.1 sevan ifmr->ifm_active |= IFM_FDX;
913 1.1 sevan else
914 1.1 sevan ifmr->ifm_active |= IFM_HDX;
915 1.1 sevan
916 1.1 sevan if (status & RGE_PHYSTAT_10MBPS)
917 1.1 sevan ifmr->ifm_active |= IFM_10_T;
918 1.1 sevan else if (status & RGE_PHYSTAT_100MBPS)
919 1.1 sevan ifmr->ifm_active |= IFM_100_TX;
920 1.1 sevan else if (status & RGE_PHYSTAT_1000MBPS)
921 1.1 sevan ifmr->ifm_active |= IFM_1000_T;
922 1.1 sevan else if (status & RGE_PHYSTAT_2500MBPS)
923 1.1 sevan ifmr->ifm_active |= IFM_2500_T;
924 1.1 sevan }
925 1.1 sevan }
926 1.1 sevan
927 1.1 sevan /*
928 1.1 sevan * Allocate memory for RX/TX rings.
929 1.1 sevan */
930 1.1 sevan int
931 1.1 sevan rge_allocmem(struct rge_softc *sc)
932 1.1 sevan {
933 1.1 sevan int error, i;
934 1.1 sevan
935 1.1 sevan /* Allocate DMA'able memory for the TX ring. */
936 1.1 sevan error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
937 1.1 sevan RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
938 1.1 sevan if (error) {
939 1.1 sevan printf("%s: can't create TX list map\n", sc->sc_dev.dv_xname);
940 1.1 sevan return (error);
941 1.1 sevan }
942 1.1 sevan error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
943 1.1 sevan &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
944 1.2 sevan BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_ZERO */
945 1.1 sevan if (error) {
946 1.1 sevan printf("%s: can't alloc TX list\n", sc->sc_dev.dv_xname);
947 1.1 sevan return (error);
948 1.1 sevan }
949 1.1 sevan
950 1.1 sevan /* Load the map for the TX ring. */
951 1.1 sevan error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
952 1.1 sevan sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
953 1.2 sevan &sc->rge_ldata.rge_tx_list,
954 1.2 sevan BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_COHERENT */
955 1.1 sevan if (error) {
956 1.1 sevan printf("%s: can't map TX dma buffers\n", sc->sc_dev.dv_xname);
957 1.1 sevan bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
958 1.1 sevan sc->rge_ldata.rge_tx_listnseg);
959 1.1 sevan return (error);
960 1.1 sevan }
961 1.1 sevan error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
962 1.1 sevan sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
963 1.1 sevan if (error) {
964 1.1 sevan printf("%s: can't load TX dma map\n", sc->sc_dev.dv_xname);
965 1.1 sevan bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
966 1.1 sevan bus_dmamem_unmap(sc->sc_dmat,
967 1.2 sevan sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
968 1.1 sevan bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
969 1.1 sevan sc->rge_ldata.rge_tx_listnseg);
970 1.1 sevan return (error);
971 1.1 sevan }
972 1.1 sevan
973 1.1 sevan /* Create DMA maps for TX buffers. */
974 1.1 sevan for (i = 0; i < RGE_TX_LIST_CNT; i++) {
975 1.1 sevan error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
976 1.1 sevan RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
977 1.1 sevan &sc->rge_ldata.rge_txq[i].txq_dmamap);
978 1.1 sevan if (error) {
979 1.1 sevan printf("%s: can't create DMA map for TX\n",
980 1.1 sevan sc->sc_dev.dv_xname);
981 1.1 sevan return (error);
982 1.1 sevan }
983 1.1 sevan }
984 1.1 sevan
985 1.1 sevan /* Allocate DMA'able memory for the RX ring. */
986 1.1 sevan error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
987 1.1 sevan RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
988 1.1 sevan if (error) {
989 1.1 sevan printf("%s: can't create RX list map\n", sc->sc_dev.dv_xname);
990 1.1 sevan return (error);
991 1.1 sevan }
992 1.1 sevan error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
993 1.1 sevan &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
994 1.2 sevan BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_ZERO */
995 1.1 sevan if (error) {
996 1.1 sevan printf("%s: can't alloc RX list\n", sc->sc_dev.dv_xname);
997 1.1 sevan return (error);
998 1.1 sevan }
999 1.1 sevan
1000 1.1 sevan /* Load the map for the RX ring. */
1001 1.1 sevan error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1002 1.1 sevan sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
1003 1.2 sevan &sc->rge_ldata.rge_rx_list,
1004 1.2 sevan BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_COHERENT */
1005 1.1 sevan if (error) {
1006 1.1 sevan printf("%s: can't map RX dma buffers\n", sc->sc_dev.dv_xname);
1007 1.1 sevan bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1008 1.1 sevan sc->rge_ldata.rge_rx_listnseg);
1009 1.1 sevan return (error);
1010 1.1 sevan }
1011 1.1 sevan error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1012 1.1 sevan sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1013 1.1 sevan if (error) {
1014 1.1 sevan printf("%s: can't load RX dma map\n", sc->sc_dev.dv_xname);
1015 1.1 sevan bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
1016 1.1 sevan bus_dmamem_unmap(sc->sc_dmat,
1017 1.2 sevan sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
1018 1.1 sevan bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1019 1.1 sevan sc->rge_ldata.rge_rx_listnseg);
1020 1.1 sevan return (error);
1021 1.1 sevan }
1022 1.1 sevan
1023 1.1 sevan /* Create DMA maps for RX buffers. */
1024 1.1 sevan for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1025 1.1 sevan error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1026 1.1 sevan RGE_JUMBO_FRAMELEN, 0, 0,
1027 1.1 sevan &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
1028 1.1 sevan if (error) {
1029 1.1 sevan printf("%s: can't create DMA map for RX\n",
1030 1.1 sevan sc->sc_dev.dv_xname);
1031 1.1 sevan return (error);
1032 1.1 sevan }
1033 1.1 sevan }
1034 1.1 sevan
1035 1.1 sevan return (error);
1036 1.1 sevan }
1037 1.1 sevan
1038 1.1 sevan /*
1039 1.1 sevan * Initialize the RX descriptor and attach an mbuf cluster.
1040 1.1 sevan */
1041 1.1 sevan int
1042 1.1 sevan rge_newbuf(struct rge_softc *sc, int idx)
1043 1.1 sevan {
1044 1.1 sevan struct mbuf *m;
1045 1.1 sevan struct rge_rx_desc *r;
1046 1.1 sevan struct rge_rxq *rxq;
1047 1.1 sevan bus_dmamap_t rxmap;
1048 1.1 sevan
1049 1.1 sevan m = MCLGETI(NULL, M_DONTWAIT, NULL, RGE_JUMBO_FRAMELEN);
1050 1.1 sevan if (m == NULL)
1051 1.1 sevan return (ENOBUFS);
1052 1.1 sevan
1053 1.1 sevan m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
1054 1.1 sevan
1055 1.1 sevan rxq = &sc->rge_ldata.rge_rxq[idx];
1056 1.1 sevan rxmap = rxq->rxq_dmamap;
1057 1.1 sevan
1058 1.1 sevan if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT))
1059 1.1 sevan goto out;
1060 1.1 sevan
1061 1.1 sevan bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1062 1.1 sevan BUS_DMASYNC_PREREAD);
1063 1.1 sevan
1064 1.1 sevan /* Map the segments into RX descriptors. */
1065 1.1 sevan r = &sc->rge_ldata.rge_rx_list[idx];
1066 1.1 sevan
1067 1.1 sevan if (RGE_OWN(r)) {
1068 1.1 sevan printf("%s: tried to map busy RX descriptor\n",
1069 1.1 sevan sc->sc_dev.dv_xname);
1070 1.1 sevan goto out;
1071 1.1 sevan }
1072 1.1 sevan
1073 1.1 sevan rxq->rxq_mbuf = m;
1074 1.1 sevan
1075 1.1 sevan r->rge_extsts = 0;
1076 1.1 sevan r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
1077 1.1 sevan r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr));
1078 1.1 sevan
1079 1.1 sevan r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
1080 1.1 sevan if (idx == RGE_RX_LIST_CNT - 1)
1081 1.1 sevan r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1082 1.1 sevan
1083 1.1 sevan r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1084 1.1 sevan
1085 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1086 1.1 sevan idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1087 1.1 sevan BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1088 1.1 sevan
1089 1.1 sevan return (0);
1090 1.1 sevan out:
1091 1.1 sevan if (m != NULL)
1092 1.1 sevan m_freem(m);
1093 1.1 sevan return (ENOMEM);
1094 1.1 sevan }
1095 1.1 sevan
1096 1.1 sevan void
1097 1.1 sevan rge_discard_rxbuf(struct rge_softc *sc, int idx)
1098 1.1 sevan {
1099 1.1 sevan struct rge_rx_desc *r;
1100 1.1 sevan
1101 1.1 sevan r = &sc->rge_ldata.rge_rx_list[idx];
1102 1.1 sevan
1103 1.1 sevan r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
1104 1.1 sevan r->rge_extsts = 0;
1105 1.1 sevan if (idx == RGE_RX_LIST_CNT - 1)
1106 1.1 sevan r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1107 1.1 sevan r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1108 1.1 sevan
1109 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1110 1.1 sevan idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1111 1.1 sevan BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1112 1.1 sevan }
1113 1.1 sevan
1114 1.1 sevan int
1115 1.1 sevan rge_rx_list_init(struct rge_softc *sc)
1116 1.1 sevan {
1117 1.1 sevan int i;
1118 1.1 sevan
1119 1.1 sevan memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1120 1.1 sevan
1121 1.1 sevan for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1122 1.1 sevan sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
1123 1.1 sevan if (rge_newbuf(sc, i) == ENOBUFS)
1124 1.1 sevan return (ENOBUFS);
1125 1.1 sevan }
1126 1.1 sevan
1127 1.1 sevan sc->rge_ldata.rge_rxq_prodidx = 0;
1128 1.1 sevan sc->rge_head = sc->rge_tail = NULL;
1129 1.1 sevan
1130 1.1 sevan return (0);
1131 1.1 sevan }
1132 1.1 sevan
1133 1.1 sevan void
1134 1.1 sevan rge_tx_list_init(struct rge_softc *sc)
1135 1.1 sevan {
1136 1.1 sevan int i;
1137 1.1 sevan
1138 1.1 sevan memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1139 1.1 sevan
1140 1.1 sevan for (i = 0; i < RGE_TX_LIST_CNT; i++)
1141 1.1 sevan sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
1142 1.1 sevan
1143 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
1144 1.1 sevan sc->rge_ldata.rge_tx_list_map->dm_mapsize,
1145 1.1 sevan BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1146 1.1 sevan
1147 1.1 sevan sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
1148 1.1 sevan }
1149 1.1 sevan
1150 1.1 sevan int
1151 1.1 sevan rge_rxeof(struct rge_softc *sc)
1152 1.1 sevan {
1153 1.1 sevan struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1154 1.1 sevan struct mbuf *m;
1155 1.2 sevan struct ifnet *ifp = &sc->sc_ec.ec_if;
1156 1.1 sevan struct rge_rx_desc *cur_rx;
1157 1.1 sevan struct rge_rxq *rxq;
1158 1.1 sevan uint32_t rxstat, extsts;
1159 1.1 sevan int i, total_len, rx = 0;
1160 1.1 sevan
1161 1.1 sevan for (i = sc->rge_ldata.rge_rxq_prodidx; ; i = RGE_NEXT_RX_DESC(i)) {
1162 1.1 sevan /* Invalidate the descriptor memory. */
1163 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1164 1.1 sevan i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1165 1.1 sevan BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1166 1.1 sevan
1167 1.1 sevan cur_rx = &sc->rge_ldata.rge_rx_list[i];
1168 1.1 sevan
1169 1.1 sevan if (RGE_OWN(cur_rx))
1170 1.1 sevan break;
1171 1.1 sevan
1172 1.1 sevan rxstat = letoh32(cur_rx->rge_cmdsts);
1173 1.1 sevan extsts = letoh32(cur_rx->rge_extsts);
1174 1.1 sevan
1175 1.1 sevan total_len = RGE_RXBYTES(cur_rx);
1176 1.1 sevan rxq = &sc->rge_ldata.rge_rxq[i];
1177 1.1 sevan m = rxq->rxq_mbuf;
1178 1.1 sevan rx = 1;
1179 1.1 sevan
1180 1.1 sevan /* Invalidate the RX mbuf and unload its map. */
1181 1.1 sevan bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1182 1.1 sevan rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1183 1.1 sevan bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1184 1.1 sevan
1185 1.1 sevan if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1186 1.1 sevan (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1187 1.1 sevan rge_discard_rxbuf(sc, i);
1188 1.1 sevan continue;
1189 1.1 sevan }
1190 1.1 sevan
1191 1.1 sevan if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1192 1.4 skrll if_statinc(ifp, if_ierrors);
1193 1.1 sevan /*
1194 1.1 sevan * If this is part of a multi-fragment packet,
1195 1.1 sevan * discard all the pieces.
1196 1.1 sevan */
1197 1.1 sevan if (sc->rge_head != NULL) {
1198 1.1 sevan m_freem(sc->rge_head);
1199 1.1 sevan sc->rge_head = sc->rge_tail = NULL;
1200 1.1 sevan }
1201 1.1 sevan rge_discard_rxbuf(sc, i);
1202 1.1 sevan continue;
1203 1.1 sevan }
1204 1.1 sevan
1205 1.1 sevan /*
1206 1.1 sevan * If allocating a replacement mbuf fails,
1207 1.1 sevan * reload the current one.
1208 1.1 sevan */
1209 1.1 sevan
1210 1.1 sevan if (rge_newbuf(sc, i) == ENOBUFS) {
1211 1.1 sevan if (sc->rge_head != NULL) {
1212 1.1 sevan m_freem(sc->rge_head);
1213 1.1 sevan sc->rge_head = sc->rge_tail = NULL;
1214 1.1 sevan }
1215 1.1 sevan rge_discard_rxbuf(sc, i);
1216 1.1 sevan continue;
1217 1.1 sevan }
1218 1.1 sevan
1219 1.1 sevan if (sc->rge_head != NULL) {
1220 1.1 sevan m->m_len = total_len;
1221 1.1 sevan /*
1222 1.1 sevan * Special case: if there's 4 bytes or less
1223 1.1 sevan * in this buffer, the mbuf can be discarded:
1224 1.1 sevan * the last 4 bytes is the CRC, which we don't
1225 1.1 sevan * care about anyway.
1226 1.1 sevan */
1227 1.1 sevan if (m->m_len <= ETHER_CRC_LEN) {
1228 1.1 sevan sc->rge_tail->m_len -=
1229 1.1 sevan (ETHER_CRC_LEN - m->m_len);
1230 1.1 sevan m_freem(m);
1231 1.1 sevan } else {
1232 1.1 sevan m->m_len -= ETHER_CRC_LEN;
1233 1.1 sevan m->m_flags &= ~M_PKTHDR;
1234 1.1 sevan sc->rge_tail->m_next = m;
1235 1.1 sevan }
1236 1.1 sevan m = sc->rge_head;
1237 1.1 sevan sc->rge_head = sc->rge_tail = NULL;
1238 1.1 sevan m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1239 1.1 sevan } else
1240 1.1 sevan m->m_pkthdr.len = m->m_len =
1241 1.1 sevan (total_len - ETHER_CRC_LEN);
1242 1.1 sevan
1243 1.1 sevan /* Check IP header checksum. */
1244 1.1 sevan if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) &&
1245 1.1 sevan (extsts & RGE_RDEXTSTS_IPV4))
1246 1.1 sevan m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1247 1.1 sevan
1248 1.1 sevan /* Check TCP/UDP checksum. */
1249 1.1 sevan if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1250 1.1 sevan (((rxstat & RGE_RDCMDSTS_TCPPKT) &&
1251 1.1 sevan !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) ||
1252 1.1 sevan ((rxstat & RGE_RDCMDSTS_UDPPKT) &&
1253 1.1 sevan !(rxstat & RGE_RDCMDSTS_UDPCSUMERR))))
1254 1.1 sevan m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1255 1.1 sevan M_UDP_CSUM_IN_OK;
1256 1.1 sevan
1257 1.1 sevan #if NVLAN > 0
1258 1.1 sevan if (extsts & RGE_RDEXTSTS_VTAG) {
1259 1.1 sevan m->m_pkthdr.ether_vtag =
1260 1.1 sevan ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
1261 1.1 sevan m->m_flags |= M_VLANTAG;
1262 1.1 sevan }
1263 1.1 sevan #endif
1264 1.1 sevan
1265 1.1 sevan ml_enqueue(&ml, m);
1266 1.1 sevan }
1267 1.1 sevan
1268 1.1 sevan sc->rge_ldata.rge_rxq_prodidx = i;
1269 1.1 sevan
1270 1.1 sevan if_input(ifp, &ml);
1271 1.1 sevan
1272 1.1 sevan return (rx);
1273 1.1 sevan }
1274 1.1 sevan
1275 1.1 sevan int
1276 1.1 sevan rge_txeof(struct rge_softc *sc)
1277 1.1 sevan {
1278 1.2 sevan struct ifnet *ifp = &sc->sc_ec.ec_if;
1279 1.1 sevan struct rge_txq *txq;
1280 1.1 sevan uint32_t txstat;
1281 1.1 sevan int cons, idx, prod;
1282 1.1 sevan int free = 0;
1283 1.1 sevan
1284 1.1 sevan prod = sc->rge_ldata.rge_txq_prodidx;
1285 1.1 sevan cons = sc->rge_ldata.rge_txq_considx;
1286 1.1 sevan
1287 1.1 sevan while (prod != cons) {
1288 1.1 sevan txq = &sc->rge_ldata.rge_txq[cons];
1289 1.1 sevan idx = txq->txq_descidx;
1290 1.1 sevan
1291 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1292 1.1 sevan idx * sizeof(struct rge_tx_desc),
1293 1.1 sevan sizeof(struct rge_tx_desc),
1294 1.1 sevan BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1295 1.1 sevan
1296 1.1 sevan txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
1297 1.1 sevan
1298 1.1 sevan if (txstat & RGE_TDCMDSTS_OWN) {
1299 1.1 sevan free = 2;
1300 1.1 sevan break;
1301 1.1 sevan }
1302 1.1 sevan
1303 1.1 sevan bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1304 1.1 sevan txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1305 1.1 sevan bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1306 1.1 sevan m_freem(txq->txq_mbuf);
1307 1.1 sevan txq->txq_mbuf = NULL;
1308 1.1 sevan
1309 1.1 sevan if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1310 1.4 skrll if_statinc(ifp, if_collisions);
1311 1.1 sevan if (txstat & RGE_TDCMDSTS_TXERR)
1312 1.4 skrll if_statinc(ifp, if_oerrors);
1313 1.1 sevan
1314 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1315 1.1 sevan idx * sizeof(struct rge_tx_desc),
1316 1.1 sevan sizeof(struct rge_tx_desc),
1317 1.1 sevan BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1318 1.1 sevan
1319 1.1 sevan cons = RGE_NEXT_TX_DESC(idx);
1320 1.1 sevan free = 1;
1321 1.1 sevan }
1322 1.1 sevan
1323 1.1 sevan if (free == 0)
1324 1.1 sevan return (0);
1325 1.1 sevan
1326 1.1 sevan sc->rge_ldata.rge_txq_considx = cons;
1327 1.1 sevan
1328 1.1 sevan if (ifq_is_oactive(&ifp->if_snd))
1329 1.1 sevan ifq_restart(&ifp->if_snd);
1330 1.1 sevan else if (free == 2)
1331 1.1 sevan ifq_serialize(&ifp->if_snd, &sc->sc_task);
1332 1.1 sevan else
1333 1.1 sevan ifp->if_timer = 0;
1334 1.1 sevan
1335 1.1 sevan return (1);
1336 1.1 sevan }
1337 1.1 sevan
1338 1.1 sevan void
1339 1.1 sevan rge_reset(struct rge_softc *sc)
1340 1.1 sevan {
1341 1.1 sevan int i;
1342 1.1 sevan
1343 1.1 sevan /* Enable RXDV gate. */
1344 1.1 sevan RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1345 1.1 sevan DELAY(2000);
1346 1.1 sevan
1347 1.1 sevan for (i = 0; i < 10; i++) {
1348 1.1 sevan DELAY(100);
1349 1.1 sevan if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1350 1.1 sevan RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1351 1.1 sevan RGE_MCUCMD_TXFIFO_EMPTY))
1352 1.1 sevan break;
1353 1.1 sevan }
1354 1.1 sevan
1355 1.1 sevan /* Soft reset. */
1356 1.1 sevan RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1357 1.1 sevan
1358 1.1 sevan for (i = 0; i < RGE_TIMEOUT; i++) {
1359 1.1 sevan DELAY(100);
1360 1.1 sevan if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1361 1.1 sevan break;
1362 1.1 sevan }
1363 1.1 sevan if (i == RGE_TIMEOUT)
1364 1.1 sevan printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
1365 1.1 sevan }
1366 1.1 sevan
1367 1.1 sevan void
1368 1.1 sevan rge_iff(struct rge_softc *sc)
1369 1.1 sevan {
1370 1.2 sevan struct ifnet *ifp = &sc->sc_ec.ec_if;
1371 1.2 sevan struct ethercom *ac = &sc->sc_ec;
1372 1.1 sevan struct ether_multi *enm;
1373 1.1 sevan struct ether_multistep step;
1374 1.1 sevan uint32_t hashes[2];
1375 1.1 sevan uint32_t rxfilt;
1376 1.1 sevan int h = 0;
1377 1.1 sevan
1378 1.1 sevan rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1379 1.1 sevan rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1380 1.1 sevan ifp->if_flags &= ~IFF_ALLMULTI;
1381 1.1 sevan
1382 1.1 sevan /*
1383 1.1 sevan * Always accept frames destined to our station address.
1384 1.1 sevan * Always accept broadcast frames.
1385 1.1 sevan */
1386 1.1 sevan rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1387 1.1 sevan
1388 1.1 sevan if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1389 1.1 sevan ifp->if_flags |= IFF_ALLMULTI;
1390 1.1 sevan rxfilt |= RGE_RXCFG_MULTI;
1391 1.1 sevan if (ifp->if_flags & IFF_PROMISC)
1392 1.1 sevan rxfilt |= RGE_RXCFG_ALLPHYS;
1393 1.1 sevan hashes[0] = hashes[1] = 0xffffffff;
1394 1.1 sevan } else {
1395 1.1 sevan rxfilt |= RGE_RXCFG_MULTI;
1396 1.1 sevan /* Program new filter. */
1397 1.1 sevan memset(hashes, 0, sizeof(hashes));
1398 1.1 sevan
1399 1.1 sevan ETHER_FIRST_MULTI(step, ac, enm);
1400 1.1 sevan while (enm != NULL) {
1401 1.1 sevan h = ether_crc32_be(enm->enm_addrlo,
1402 1.1 sevan ETHER_ADDR_LEN) >> 26;
1403 1.1 sevan
1404 1.1 sevan if (h < 32)
1405 1.1 sevan hashes[0] |= (1 << h);
1406 1.1 sevan else
1407 1.1 sevan hashes[1] |= (1 << (h - 32));
1408 1.1 sevan
1409 1.1 sevan ETHER_NEXT_MULTI(step, enm);
1410 1.1 sevan }
1411 1.1 sevan }
1412 1.1 sevan
1413 1.1 sevan RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1414 1.2 sevan RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
1415 1.2 sevan RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
1416 1.1 sevan }
1417 1.1 sevan
1418 1.1 sevan void
1419 1.1 sevan rge_set_phy_power(struct rge_softc *sc, int on)
1420 1.1 sevan {
1421 1.1 sevan int i;
1422 1.1 sevan
1423 1.1 sevan if (on) {
1424 1.1 sevan RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1425 1.1 sevan
1426 1.1 sevan rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1427 1.1 sevan
1428 1.1 sevan for (i = 0; i < RGE_TIMEOUT; i++) {
1429 1.1 sevan if ((rge_read_phy_ocp(sc, 0xa420) & 0x0080) == 3)
1430 1.1 sevan break;
1431 1.1 sevan DELAY(1000);
1432 1.1 sevan }
1433 1.1 sevan } else
1434 1.1 sevan rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1435 1.1 sevan }
1436 1.1 sevan
1437 1.1 sevan void
1438 1.1 sevan rge_phy_config(struct rge_softc *sc)
1439 1.1 sevan {
1440 1.1 sevan uint16_t mcode_ver, val;
1441 1.1 sevan int i;
1442 1.1 sevan static const uint16_t mac_cfg3_a438_value[] =
1443 1.1 sevan { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1444 1.1 sevan 0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1445 1.1 sevan
1446 1.1 sevan static const uint16_t mac_cfg3_b88e_value[] =
1447 1.1 sevan { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1448 1.1 sevan 0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1449 1.1 sevan 0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1450 1.1 sevan 0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1451 1.1 sevan 0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1452 1.1 sevan 0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1453 1.1 sevan
1454 1.1 sevan /* Read microcode version. */
1455 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x801e);
1456 1.1 sevan mcode_ver = rge_read_phy_ocp(sc, 0xa438);
1457 1.1 sevan
1458 1.1 sevan if (sc->rge_type == MAC_CFG2) {
1459 1.1 sevan for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++) {
1460 1.1 sevan rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
1461 1.1 sevan rtl8125_mac_cfg2_ephy[i].val);
1462 1.1 sevan }
1463 1.1 sevan
1464 1.1 sevan if (mcode_ver != RGE_MAC_CFG2_MCODE_VER) {
1465 1.1 sevan /* Disable PHY config. */
1466 1.1 sevan RGE_CLRBIT_1(sc, 0xf2, 0x20);
1467 1.1 sevan DELAY(1000);
1468 1.1 sevan
1469 1.1 sevan rge_patch_phy_mcu(sc, 1);
1470 1.1 sevan
1471 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8024);
1472 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0x8600);
1473 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0xb82e);
1474 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0x0001);
1475 1.1 sevan
1476 1.1 sevan RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
1477 1.1 sevan for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
1478 1.1 sevan rge_write_phy_ocp(sc,
1479 1.1 sevan rtl8125_mac_cfg2_mcu[i].reg,
1480 1.1 sevan rtl8125_mac_cfg2_mcu[i].val);
1481 1.1 sevan }
1482 1.1 sevan RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
1483 1.1 sevan
1484 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0);
1485 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0);
1486 1.1 sevan RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
1487 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8024);
1488 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0);
1489 1.1 sevan
1490 1.1 sevan rge_patch_phy_mcu(sc, 0);
1491 1.1 sevan
1492 1.1 sevan /* Enable PHY config. */
1493 1.1 sevan RGE_SETBIT_1(sc, 0xf2, 0x20);
1494 1.1 sevan
1495 1.1 sevan /* Write microcode version. */
1496 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x801e);
1497 1.1 sevan rge_write_phy_ocp(sc, 0xa438, RGE_MAC_CFG2_MCODE_VER);
1498 1.1 sevan }
1499 1.1 sevan
1500 1.1 sevan val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
1501 1.1 sevan rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
1502 1.1 sevan RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1503 1.1 sevan val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1504 1.1 sevan rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
1505 1.1 sevan val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
1506 1.1 sevan rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1507 1.1 sevan RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
1508 1.1 sevan val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
1509 1.1 sevan rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
1510 1.1 sevan RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
1511 1.1 sevan RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
1512 1.1 sevan RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
1513 1.1 sevan
1514 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1515 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1516 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
1517 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80eb);
1518 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
1519 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
1520 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80f8);
1521 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1522 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
1523 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80f1);
1524 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1525 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
1526 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80fe);
1527 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1528 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
1529 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8102);
1530 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1531 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1532 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8105);
1533 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1534 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
1535 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8100);
1536 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1537 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
1538 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8104);
1539 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1540 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
1541 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8106);
1542 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1543 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
1544 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1545 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1546 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
1547 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80df);
1548 1.1 sevan RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1549 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80e1);
1550 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
1551 1.1 sevan val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
1552 1.1 sevan rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
1553 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x819f);
1554 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
1555 1.1 sevan rge_write_phy_ocp(sc, 0xbc34, 0x5555);
1556 1.1 sevan val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
1557 1.1 sevan rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
1558 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
1559 1.1 sevan RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1560 1.1 sevan } else {
1561 1.1 sevan for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1562 1.1 sevan rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1563 1.1 sevan rtl8125_mac_cfg3_ephy[i].val);
1564 1.1 sevan
1565 1.1 sevan if (mcode_ver != RGE_MAC_CFG3_MCODE_VER) {
1566 1.1 sevan /* Disable PHY config. */
1567 1.1 sevan RGE_CLRBIT_1(sc, 0xf2, 0x20);
1568 1.1 sevan DELAY(1000);
1569 1.1 sevan
1570 1.1 sevan rge_patch_phy_mcu(sc, 1);
1571 1.1 sevan
1572 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8024);
1573 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0x8601);
1574 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0xb82e);
1575 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0x0001);
1576 1.1 sevan
1577 1.1 sevan RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
1578 1.1 sevan for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
1579 1.1 sevan rge_write_phy_ocp(sc,
1580 1.1 sevan rtl8125_mac_cfg3_mcu[i].reg,
1581 1.1 sevan rtl8125_mac_cfg3_mcu[i].val);
1582 1.1 sevan }
1583 1.1 sevan RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
1584 1.1 sevan
1585 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0);
1586 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0);
1587 1.1 sevan RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
1588 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8024);
1589 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0);
1590 1.1 sevan
1591 1.1 sevan rge_patch_phy_mcu(sc, 0);
1592 1.1 sevan
1593 1.1 sevan /* Enable PHY config. */
1594 1.1 sevan RGE_SETBIT_1(sc, 0xf2, 0x20);
1595 1.1 sevan
1596 1.1 sevan /* Write microcode version. */
1597 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x801e);
1598 1.1 sevan rge_write_phy_ocp(sc, 0xa438, RGE_MAC_CFG3_MCODE_VER);
1599 1.1 sevan }
1600 1.1 sevan
1601 1.1 sevan RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1602 1.1 sevan val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1603 1.1 sevan rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1604 1.1 sevan val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1605 1.1 sevan rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1606 1.1 sevan RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1607 1.1 sevan RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1608 1.1 sevan val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1609 1.1 sevan rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1610 1.1 sevan val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1611 1.1 sevan rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1612 1.1 sevan val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1613 1.1 sevan rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1614 1.1 sevan RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1615 1.1 sevan RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1616 1.1 sevan val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1617 1.1 sevan rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1618 1.1 sevan rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1619 1.1 sevan rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1620 1.1 sevan val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1621 1.1 sevan rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1622 1.1 sevan rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1623 1.1 sevan rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1624 1.1 sevan rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1625 1.1 sevan rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1626 1.1 sevan
1627 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1628 1.1 sevan for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1629 1.1 sevan rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1630 1.1 sevan for (i = 0; i < 26; i++)
1631 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0);
1632 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8257);
1633 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0x020f);
1634 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1635 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0x7843);
1636 1.1 sevan
1637 1.1 sevan rge_patch_phy_mcu(sc, 1);
1638 1.1 sevan RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1639 1.1 sevan RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1640 1.1 sevan for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1641 1.1 sevan rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1642 1.1 sevan rge_write_phy_ocp(sc, 0xb890,
1643 1.1 sevan mac_cfg3_b88e_value[i + 1]);
1644 1.1 sevan }
1645 1.1 sevan RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1646 1.1 sevan rge_patch_phy_mcu(sc, 0);
1647 1.1 sevan
1648 1.1 sevan RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1649 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1650 1.1 sevan RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1651 1.1 sevan val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1652 1.1 sevan rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1653 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1654 1.1 sevan RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1655 1.1 sevan RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1656 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1657 1.1 sevan RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1658 1.1 sevan }
1659 1.1 sevan
1660 1.1 sevan /* Disable EEE. */
1661 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1662 1.1 sevan RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1663 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1664 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1665 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1666 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1667 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1668 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1669 1.1 sevan
1670 1.1 sevan rge_patch_phy_mcu(sc, 1);
1671 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1672 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1673 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1674 1.1 sevan rge_patch_phy_mcu(sc, 0);
1675 1.1 sevan }
1676 1.1 sevan
1677 1.1 sevan void
1678 1.1 sevan rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
1679 1.1 sevan {
1680 1.1 sevan RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1681 1.1 sevan RGE_WRITE_4(sc, RGE_MAC0,
1682 1.1 sevan addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1683 1.1 sevan RGE_WRITE_4(sc, RGE_MAC4,
1684 1.1 sevan addr[5] << 8 | addr[4]);
1685 1.1 sevan RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1686 1.1 sevan }
1687 1.1 sevan
1688 1.1 sevan void
1689 1.1 sevan rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
1690 1.1 sevan {
1691 1.1 sevan *(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
1692 1.1 sevan *(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
1693 1.1 sevan }
1694 1.1 sevan
1695 1.1 sevan void
1696 1.1 sevan rge_hw_init(struct rge_softc *sc)
1697 1.1 sevan {
1698 1.1 sevan int i;
1699 1.1 sevan
1700 1.1 sevan RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1701 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
1702 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
1703 1.1 sevan RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1704 1.1 sevan RGE_CLRBIT_1(sc, 0xf1, 0x80);
1705 1.1 sevan
1706 1.1 sevan /* Disable UPS. */
1707 1.1 sevan RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
1708 1.1 sevan
1709 1.1 sevan /* Configure MAC MCU. */
1710 1.1 sevan rge_write_mac_ocp(sc, 0xfc38, 0);
1711 1.1 sevan
1712 1.1 sevan for (i = 0xfc28; i < 0xfc38; i += 2)
1713 1.1 sevan rge_write_mac_ocp(sc, i, 0);
1714 1.1 sevan
1715 1.1 sevan DELAY(3000);
1716 1.1 sevan rge_write_mac_ocp(sc, 0xfc26, 0);
1717 1.1 sevan
1718 1.1 sevan if (sc->rge_type == MAC_CFG3) {
1719 1.1 sevan for (i = 0; i < nitems(rtl8125_def_bps); i++)
1720 1.1 sevan rge_write_mac_ocp(sc, rtl8125_def_bps[i].reg,
1721 1.1 sevan rtl8125_def_bps[i].val);
1722 1.1 sevan }
1723 1.1 sevan
1724 1.1 sevan /* Disable PHY power saving. */
1725 1.1 sevan rge_disable_phy_ocp_pwrsave(sc);
1726 1.1 sevan
1727 1.1 sevan /* Set PCIe uncorrectable error status. */
1728 1.1 sevan rge_write_csi(sc, 0x108,
1729 1.1 sevan rge_read_csi(sc, 0x108) | 0x00100000);
1730 1.1 sevan }
1731 1.1 sevan
1732 1.1 sevan void
1733 1.1 sevan rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
1734 1.1 sevan {
1735 1.1 sevan if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
1736 1.1 sevan rge_patch_phy_mcu(sc, 1);
1737 1.1 sevan rge_write_phy_ocp(sc, 0xc416, 0);
1738 1.1 sevan rge_write_phy_ocp(sc, 0xc416, 0x0500);
1739 1.1 sevan rge_patch_phy_mcu(sc, 0);
1740 1.1 sevan }
1741 1.1 sevan }
1742 1.1 sevan
1743 1.1 sevan void
1744 1.1 sevan rge_patch_phy_mcu(struct rge_softc *sc, int set)
1745 1.1 sevan {
1746 1.1 sevan uint16_t val;
1747 1.1 sevan int i;
1748 1.1 sevan
1749 1.1 sevan if (set)
1750 1.1 sevan RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
1751 1.1 sevan else
1752 1.1 sevan RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
1753 1.1 sevan
1754 1.1 sevan for (i = 0; i < 1000; i++) {
1755 1.1 sevan val = rge_read_phy_ocp(sc, 0xb800) & 0x0040;
1756 1.1 sevan DELAY(100);
1757 1.1 sevan if (val == 0x0040)
1758 1.1 sevan break;
1759 1.1 sevan }
1760 1.1 sevan if (i == 1000)
1761 1.1 sevan printf("%s: timeout waiting to patch phy mcu\n",
1762 1.1 sevan sc->sc_dev.dv_xname);
1763 1.1 sevan }
1764 1.1 sevan
1765 1.1 sevan void
1766 1.1 sevan rge_add_media_types(struct rge_softc *sc)
1767 1.1 sevan {
1768 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
1769 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
1770 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
1771 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
1772 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
1773 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1774 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
1775 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
1776 1.1 sevan }
1777 1.1 sevan
1778 1.1 sevan void
1779 1.1 sevan rge_config_imtype(struct rge_softc *sc, int imtype)
1780 1.1 sevan {
1781 1.1 sevan switch (imtype) {
1782 1.1 sevan case RGE_IMTYPE_NONE:
1783 1.1 sevan sc->rge_intrs = RGE_INTRS;
1784 1.1 sevan sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
1785 1.1 sevan RGE_ISR_RX_FIFO_OFLOW;
1786 1.1 sevan sc->rge_tx_ack = RGE_ISR_TX_OK;
1787 1.1 sevan break;
1788 1.1 sevan case RGE_IMTYPE_SIM:
1789 1.1 sevan sc->rge_intrs = RGE_INTRS_TIMER;
1790 1.1 sevan sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
1791 1.1 sevan sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
1792 1.1 sevan break;
1793 1.1 sevan default:
1794 1.1 sevan panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
1795 1.1 sevan }
1796 1.1 sevan }
1797 1.1 sevan
1798 1.1 sevan void
1799 1.1 sevan rge_disable_sim_im(struct rge_softc *sc)
1800 1.1 sevan {
1801 1.1 sevan RGE_WRITE_4(sc, RGE_TIMERINT, 0);
1802 1.1 sevan sc->rge_timerintr = 0;
1803 1.1 sevan }
1804 1.1 sevan
1805 1.1 sevan void
1806 1.1 sevan rge_setup_sim_im(struct rge_softc *sc)
1807 1.1 sevan {
1808 1.1 sevan RGE_WRITE_4(sc, RGE_TIMERINT, 0x2600);
1809 1.1 sevan RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
1810 1.1 sevan sc->rge_timerintr = 1;
1811 1.1 sevan }
1812 1.1 sevan
1813 1.1 sevan void
1814 1.1 sevan rge_setup_intr(struct rge_softc *sc, int imtype)
1815 1.1 sevan {
1816 1.1 sevan rge_config_imtype(sc, imtype);
1817 1.1 sevan
1818 1.1 sevan /* Enable interrupts. */
1819 1.1 sevan RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
1820 1.1 sevan
1821 1.1 sevan switch (imtype) {
1822 1.1 sevan case RGE_IMTYPE_NONE:
1823 1.1 sevan rge_disable_sim_im(sc);
1824 1.1 sevan break;
1825 1.1 sevan case RGE_IMTYPE_SIM:
1826 1.1 sevan rge_setup_sim_im(sc);
1827 1.1 sevan break;
1828 1.1 sevan default:
1829 1.1 sevan panic("%s: unknown imtype %d", sc->sc_dev.dv_xname, imtype);
1830 1.1 sevan }
1831 1.1 sevan }
1832 1.1 sevan
1833 1.1 sevan void
1834 1.1 sevan rge_exit_oob(struct rge_softc *sc)
1835 1.1 sevan {
1836 1.1 sevan int i;
1837 1.1 sevan
1838 1.1 sevan RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
1839 1.1 sevan RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
1840 1.1 sevan RGE_RXCFG_ERRPKT);
1841 1.1 sevan
1842 1.1 sevan /* Disable RealWoW. */
1843 1.1 sevan rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
1844 1.1 sevan
1845 1.1 sevan rge_reset(sc);
1846 1.1 sevan
1847 1.1 sevan /* Disable OOB. */
1848 1.1 sevan RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
1849 1.1 sevan
1850 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
1851 1.1 sevan
1852 1.1 sevan for (i = 0; i < 10; i++) {
1853 1.1 sevan DELAY(100);
1854 1.1 sevan if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
1855 1.1 sevan break;
1856 1.1 sevan }
1857 1.1 sevan
1858 1.1 sevan rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
1859 1.1 sevan rge_write_mac_ocp(sc, 0xc0a6, 0x0150);
1860 1.1 sevan rge_write_mac_ocp(sc, 0xc01e, 0x5555);
1861 1.1 sevan
1862 1.1 sevan for (i = 0; i < 10; i++) {
1863 1.1 sevan DELAY(100);
1864 1.1 sevan if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
1865 1.1 sevan break;
1866 1.1 sevan }
1867 1.1 sevan
1868 1.1 sevan if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
1869 1.1 sevan for (i = 0; i < RGE_TIMEOUT; i++) {
1870 1.1 sevan if ((rge_read_phy_ocp(sc, 0xa420) & 0x0080) == 2)
1871 1.1 sevan break;
1872 1.1 sevan DELAY(1000);
1873 1.1 sevan }
1874 1.1 sevan RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
1875 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
1876 1.1 sevan }
1877 1.1 sevan }
1878 1.1 sevan
1879 1.1 sevan void
1880 1.1 sevan rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
1881 1.1 sevan {
1882 1.1 sevan int i;
1883 1.1 sevan
1884 1.1 sevan RGE_WRITE_4(sc, RGE_CSIDR, val);
1885 1.1 sevan RGE_WRITE_4(sc, RGE_CSIAR, (1 << 16) | (reg & RGE_CSIAR_ADDR_MASK) |
1886 1.1 sevan (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
1887 1.1 sevan
1888 1.1 sevan for (i = 0; i < 10; i++) {
1889 1.1 sevan DELAY(100);
1890 1.1 sevan if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
1891 1.1 sevan break;
1892 1.1 sevan }
1893 1.1 sevan
1894 1.1 sevan DELAY(20);
1895 1.1 sevan }
1896 1.1 sevan
1897 1.1 sevan uint32_t
1898 1.1 sevan rge_read_csi(struct rge_softc *sc, uint32_t reg)
1899 1.1 sevan {
1900 1.1 sevan int i;
1901 1.1 sevan
1902 1.1 sevan RGE_WRITE_4(sc, RGE_CSIAR, (1 << 16) | (reg & RGE_CSIAR_ADDR_MASK) |
1903 1.1 sevan (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
1904 1.1 sevan
1905 1.1 sevan for (i = 0; i < 10; i++) {
1906 1.1 sevan DELAY(100);
1907 1.1 sevan if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
1908 1.1 sevan break;
1909 1.1 sevan }
1910 1.1 sevan
1911 1.1 sevan DELAY(20);
1912 1.1 sevan
1913 1.1 sevan return (RGE_READ_4(sc, RGE_CSIDR));
1914 1.1 sevan }
1915 1.1 sevan
1916 1.1 sevan void
1917 1.1 sevan rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
1918 1.1 sevan {
1919 1.1 sevan uint32_t tmp;
1920 1.1 sevan
1921 1.1 sevan tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
1922 1.1 sevan tmp += val;
1923 1.1 sevan tmp |= RGE_MACOCP_BUSY;
1924 1.1 sevan RGE_WRITE_4(sc, RGE_MACOCP, tmp);
1925 1.1 sevan }
1926 1.1 sevan
1927 1.1 sevan uint16_t
1928 1.1 sevan rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
1929 1.1 sevan {
1930 1.1 sevan uint32_t val;
1931 1.1 sevan
1932 1.1 sevan val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
1933 1.1 sevan RGE_WRITE_4(sc, RGE_MACOCP, val);
1934 1.1 sevan
1935 1.1 sevan return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
1936 1.1 sevan }
1937 1.1 sevan
1938 1.1 sevan void
1939 1.1 sevan rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
1940 1.1 sevan {
1941 1.1 sevan uint32_t tmp;
1942 1.1 sevan int i;
1943 1.1 sevan
1944 1.1 sevan tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
1945 1.1 sevan tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
1946 1.1 sevan RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
1947 1.1 sevan
1948 1.1 sevan for (i = 0; i < 10; i++) {
1949 1.1 sevan DELAY(100);
1950 1.1 sevan if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
1951 1.1 sevan break;
1952 1.1 sevan }
1953 1.1 sevan
1954 1.1 sevan DELAY(20);
1955 1.1 sevan }
1956 1.1 sevan
1957 1.1 sevan void
1958 1.1 sevan rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
1959 1.1 sevan {
1960 1.1 sevan uint16_t off, phyaddr;
1961 1.1 sevan
1962 1.1 sevan phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
1963 1.1 sevan phyaddr <<= 4;
1964 1.1 sevan
1965 1.1 sevan off = addr ? reg : 0x10 + (reg % 8);
1966 1.1 sevan
1967 1.1 sevan phyaddr += (off - 16) << 1;
1968 1.1 sevan
1969 1.1 sevan rge_write_phy_ocp(sc, phyaddr, val);
1970 1.1 sevan }
1971 1.1 sevan
1972 1.1 sevan void
1973 1.1 sevan rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
1974 1.1 sevan {
1975 1.1 sevan uint32_t tmp;
1976 1.1 sevan int i;
1977 1.1 sevan
1978 1.1 sevan tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
1979 1.1 sevan tmp |= RGE_PHYOCP_BUSY | val;
1980 1.1 sevan RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
1981 1.1 sevan
1982 1.1 sevan for (i = 0; i < RGE_TIMEOUT; i++) {
1983 1.1 sevan DELAY(1);
1984 1.1 sevan if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
1985 1.1 sevan break;
1986 1.1 sevan }
1987 1.1 sevan }
1988 1.1 sevan
1989 1.1 sevan uint16_t
1990 1.1 sevan rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
1991 1.1 sevan {
1992 1.1 sevan uint32_t val;
1993 1.1 sevan int i;
1994 1.1 sevan
1995 1.1 sevan val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
1996 1.1 sevan RGE_WRITE_4(sc, RGE_PHYOCP, val);
1997 1.1 sevan
1998 1.1 sevan for (i = 0; i < RGE_TIMEOUT; i++) {
1999 1.1 sevan DELAY(1);
2000 1.1 sevan val = RGE_READ_4(sc, RGE_PHYOCP);
2001 1.1 sevan if (val & RGE_PHYOCP_BUSY)
2002 1.1 sevan break;
2003 1.1 sevan }
2004 1.1 sevan
2005 1.1 sevan return (val & RGE_PHYOCP_DATA_MASK);
2006 1.1 sevan }
2007 1.1 sevan
2008 1.1 sevan int
2009 1.1 sevan rge_get_link_status(struct rge_softc *sc)
2010 1.1 sevan {
2011 1.1 sevan return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2012 1.1 sevan }
2013 1.1 sevan
2014 1.1 sevan void
2015 1.3 sevan rge_txstart(struct work *wk, void *arg)
2016 1.1 sevan {
2017 1.1 sevan struct rge_softc *sc = arg;
2018 1.1 sevan
2019 1.1 sevan RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2020 1.1 sevan }
2021 1.1 sevan
2022 1.1 sevan void
2023 1.1 sevan rge_tick(void *arg)
2024 1.1 sevan {
2025 1.1 sevan struct rge_softc *sc = arg;
2026 1.1 sevan int s;
2027 1.1 sevan
2028 1.1 sevan s = splnet();
2029 1.1 sevan rge_link_state(sc);
2030 1.1 sevan splx(s);
2031 1.1 sevan
2032 1.1 sevan timeout_add_sec(&sc->sc_timeout, 1);
2033 1.1 sevan }
2034 1.1 sevan
2035 1.1 sevan void
2036 1.1 sevan rge_link_state(struct rge_softc *sc)
2037 1.1 sevan {
2038 1.2 sevan struct ifnet *ifp = &sc->sc_ec.ec_if;
2039 1.1 sevan int link = LINK_STATE_DOWN;
2040 1.1 sevan
2041 1.1 sevan if (rge_get_link_status(sc))
2042 1.1 sevan link = LINK_STATE_UP;
2043 1.1 sevan
2044 1.1 sevan if (ifp->if_link_state != link) {
2045 1.1 sevan ifp->if_link_state = link;
2046 1.2 sevan if_link_state_change(ifp, LINK_STATE_DOWN);
2047 1.1 sevan }
2048 1.1 sevan }
2049