if_rge.c revision 1.16 1 1.16 jakllsch /* $NetBSD: if_rge.c,v 1.16 2021/01/27 14:25:22 jakllsch Exp $ */
2 1.15 jakllsch /* $OpenBSD: if_rge.c,v 1.4 2020/07/10 13:26:38 patrick Exp $ */
3 1.1 sevan
4 1.1 sevan /*
5 1.1 sevan * Copyright (c) 2019 Kevin Lo <kevlo (at) openbsd.org>
6 1.1 sevan *
7 1.1 sevan * Permission to use, copy, modify, and distribute this software for any
8 1.1 sevan * purpose with or without fee is hereby granted, provided that the above
9 1.1 sevan * copyright notice and this permission notice appear in all copies.
10 1.1 sevan *
11 1.1 sevan * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 1.1 sevan * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 1.1 sevan * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 1.1 sevan * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 1.1 sevan * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 1.1 sevan * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 1.1 sevan * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 1.1 sevan */
19 1.1 sevan
20 1.2 sevan #include <sys/cdefs.h>
21 1.16 jakllsch __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.16 2021/01/27 14:25:22 jakllsch Exp $");
22 1.2 sevan
23 1.2 sevan /* #include "vlan.h" Sevan */
24 1.2 sevan
25 1.2 sevan #include <sys/types.h>
26 1.1 sevan
27 1.1 sevan #include <sys/param.h>
28 1.1 sevan #include <sys/systm.h>
29 1.1 sevan #include <sys/sockio.h>
30 1.1 sevan #include <sys/mbuf.h>
31 1.1 sevan #include <sys/malloc.h>
32 1.1 sevan #include <sys/kernel.h>
33 1.1 sevan #include <sys/socket.h>
34 1.1 sevan #include <sys/device.h>
35 1.1 sevan #include <sys/endian.h>
36 1.3 sevan #include <sys/callout.h>
37 1.3 sevan #include <sys/workqueue.h>
38 1.1 sevan
39 1.1 sevan #include <net/if.h>
40 1.2 sevan
41 1.2 sevan #include <net/if_dl.h>
42 1.2 sevan #include <net/if_ether.h>
43 1.2 sevan
44 1.1 sevan #include <net/if_media.h>
45 1.1 sevan
46 1.1 sevan #include <netinet/in.h>
47 1.2 sevan #include <net/if_ether.h>
48 1.1 sevan
49 1.1 sevan #if NBPFILTER > 0
50 1.1 sevan #include <net/bpf.h>
51 1.1 sevan #endif
52 1.1 sevan
53 1.2 sevan #include <sys/bus.h>
54 1.1 sevan #include <machine/intr.h>
55 1.1 sevan
56 1.1 sevan #include <dev/mii/mii.h>
57 1.1 sevan
58 1.1 sevan #include <dev/pci/pcivar.h>
59 1.1 sevan #include <dev/pci/pcireg.h>
60 1.1 sevan #include <dev/pci/pcidevs.h>
61 1.1 sevan
62 1.1 sevan #include <dev/pci/if_rgereg.h>
63 1.1 sevan
64 1.2 sevan #ifdef __NetBSD__
65 1.2 sevan #define letoh32 htole32
66 1.2 sevan #define nitems(x) __arraycount(x)
67 1.2 sevan #define MBUF_LIST_INITIALIZER() { NULL, NULL, 0 }
68 1.2 sevan struct mbuf_list {
69 1.2 sevan struct mbuf *ml_head;
70 1.2 sevan struct mbuf *ml_tail;
71 1.2 sevan u_int ml_len;
72 1.2 sevan };
73 1.7 sevan
74 1.7 sevan static struct mbuf *
75 1.7 sevan MCLGETI(struct rge_softc *sc __unused, int how,
76 1.7 sevan struct ifnet *ifp __unused, u_int size)
77 1.7 sevan {
78 1.7 sevan struct mbuf *m;
79 1.7 sevan
80 1.7 sevan MGETHDR(m, how, MT_DATA);
81 1.7 sevan if (m == NULL)
82 1.7 sevan return NULL;
83 1.7 sevan
84 1.7 sevan MEXTMALLOC(m, size, how);
85 1.7 sevan if ((m->m_flags & M_EXT) == 0) {
86 1.7 sevan m_freem(m);
87 1.7 sevan return NULL;
88 1.7 sevan }
89 1.7 sevan return m;
90 1.7 sevan }
91 1.7 sevan
92 1.3 sevan #ifdef NET_MPSAFE
93 1.3 sevan #define RGE_MPSAFE 1
94 1.3 sevan #define CALLOUT_FLAGS CALLOUT_MPSAFE
95 1.3 sevan #else
96 1.3 sevan #define CALLOUT_FLAGS 0
97 1.3 sevan #endif
98 1.2 sevan #endif
99 1.2 sevan
100 1.2 sevan static int rge_match(device_t, cfdata_t, void *);
101 1.5 skrll static void rge_attach(device_t, device_t, void *);
102 1.1 sevan int rge_intr(void *);
103 1.1 sevan int rge_encap(struct rge_softc *, struct mbuf *, int);
104 1.2 sevan int rge_ioctl(struct ifnet *, u_long, void *);
105 1.2 sevan void rge_start(struct ifnet *);
106 1.1 sevan void rge_watchdog(struct ifnet *);
107 1.1 sevan int rge_init(struct ifnet *);
108 1.1 sevan void rge_stop(struct ifnet *);
109 1.1 sevan int rge_ifmedia_upd(struct ifnet *);
110 1.1 sevan void rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
111 1.1 sevan int rge_allocmem(struct rge_softc *);
112 1.1 sevan int rge_newbuf(struct rge_softc *, int);
113 1.1 sevan void rge_discard_rxbuf(struct rge_softc *, int);
114 1.1 sevan int rge_rx_list_init(struct rge_softc *);
115 1.1 sevan void rge_tx_list_init(struct rge_softc *);
116 1.1 sevan int rge_rxeof(struct rge_softc *);
117 1.1 sevan int rge_txeof(struct rge_softc *);
118 1.1 sevan void rge_reset(struct rge_softc *);
119 1.1 sevan void rge_iff(struct rge_softc *);
120 1.1 sevan void rge_set_phy_power(struct rge_softc *, int);
121 1.1 sevan void rge_phy_config(struct rge_softc *);
122 1.1 sevan void rge_set_macaddr(struct rge_softc *, const uint8_t *);
123 1.1 sevan void rge_get_macaddr(struct rge_softc *, uint8_t *);
124 1.1 sevan void rge_hw_init(struct rge_softc *);
125 1.1 sevan void rge_disable_phy_ocp_pwrsave(struct rge_softc *);
126 1.1 sevan void rge_patch_phy_mcu(struct rge_softc *, int);
127 1.1 sevan void rge_add_media_types(struct rge_softc *);
128 1.1 sevan void rge_config_imtype(struct rge_softc *, int);
129 1.1 sevan void rge_disable_sim_im(struct rge_softc *);
130 1.1 sevan void rge_setup_sim_im(struct rge_softc *);
131 1.1 sevan void rge_setup_intr(struct rge_softc *, int);
132 1.1 sevan void rge_exit_oob(struct rge_softc *);
133 1.1 sevan void rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
134 1.1 sevan uint32_t rge_read_csi(struct rge_softc *, uint32_t);
135 1.1 sevan void rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
136 1.1 sevan uint16_t rge_read_mac_ocp(struct rge_softc *, uint16_t);
137 1.1 sevan void rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
138 1.1 sevan void rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
139 1.1 sevan void rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
140 1.1 sevan uint16_t rge_read_phy_ocp(struct rge_softc *, uint16_t);
141 1.1 sevan int rge_get_link_status(struct rge_softc *);
142 1.3 sevan void rge_txstart(struct work *, void *);
143 1.1 sevan void rge_tick(void *);
144 1.1 sevan void rge_link_state(struct rge_softc *);
145 1.1 sevan
146 1.1 sevan static const struct {
147 1.1 sevan uint16_t reg;
148 1.1 sevan uint16_t val;
149 1.1 sevan } rtl8125_def_bps[] = {
150 1.1 sevan RTL8125_DEF_BPS
151 1.1 sevan }, rtl8125_mac_cfg2_ephy[] = {
152 1.1 sevan RTL8125_MAC_CFG2_EPHY
153 1.1 sevan }, rtl8125_mac_cfg2_mcu[] = {
154 1.1 sevan RTL8125_MAC_CFG2_MCU
155 1.1 sevan }, rtl8125_mac_cfg3_ephy[] = {
156 1.1 sevan RTL8125_MAC_CFG3_EPHY
157 1.1 sevan }, rtl8125_mac_cfg3_mcu[] = {
158 1.1 sevan RTL8125_MAC_CFG3_MCU
159 1.1 sevan };
160 1.1 sevan
161 1.2 sevan CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
162 1.2 sevan NULL, NULL); /* Sevan - detach function? */
163 1.1 sevan
164 1.2 sevan extern struct cfdriver rge_cd;
165 1.1 sevan
166 1.2 sevan static const struct {
167 1.2 sevan pci_vendor_id_t vendor;
168 1.2 sevan pci_product_id_t product;
169 1.2 sevan }rge_devices[] = {
170 1.1 sevan { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
171 1.2 sevan { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125 },
172 1.1 sevan };
173 1.1 sevan
174 1.2 sevan static int
175 1.2 sevan rge_match(device_t parent, cfdata_t match, void *aux)
176 1.1 sevan {
177 1.2 sevan struct pci_attach_args *pa =aux;
178 1.2 sevan int n;
179 1.2 sevan
180 1.2 sevan for (n =0; n < __arraycount(rge_devices); n++) {
181 1.2 sevan if (PCI_VENDOR(pa->pa_id) == rge_devices[n].vendor &&
182 1.2 sevan PCI_PRODUCT(pa->pa_id) == rge_devices[n].product)
183 1.2 sevan return 1;
184 1.2 sevan }
185 1.2 sevan
186 1.2 sevan return 0;
187 1.1 sevan }
188 1.1 sevan
189 1.1 sevan void
190 1.2 sevan rge_attach(device_t parent, device_t self, void *aux)
191 1.1 sevan {
192 1.11 sevan struct rge_softc *sc = device_private(self);
193 1.1 sevan struct pci_attach_args *pa = aux;
194 1.1 sevan pci_chipset_tag_t pc = pa->pa_pc;
195 1.1 sevan pci_intr_handle_t ih;
196 1.2 sevan char intrbuf[PCI_INTRSTR_LEN];
197 1.1 sevan const char *intrstr = NULL;
198 1.1 sevan struct ifnet *ifp;
199 1.1 sevan pcireg_t reg;
200 1.1 sevan uint32_t hwrev;
201 1.1 sevan uint8_t eaddr[ETHER_ADDR_LEN];
202 1.1 sevan int offset;
203 1.1 sevan
204 1.1 sevan pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
205 1.1 sevan
206 1.12 sevan sc->sc_dev = self;
207 1.12 sevan
208 1.5 skrll /*
209 1.1 sevan * Map control/status registers.
210 1.1 sevan */
211 1.1 sevan if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
212 1.1 sevan PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
213 1.2 sevan NULL, &sc->rge_bsize)) {
214 1.1 sevan if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
215 1.1 sevan PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
216 1.2 sevan &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
217 1.1 sevan if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
218 1.1 sevan 0, &sc->rge_btag, &sc->rge_bhandle, NULL,
219 1.2 sevan &sc->rge_bsize)) {
220 1.13 sevan aprint_error(": can't map mem or i/o space\n");
221 1.1 sevan return;
222 1.1 sevan }
223 1.1 sevan }
224 1.1 sevan }
225 1.1 sevan
226 1.5 skrll /*
227 1.1 sevan * Allocate interrupt.
228 1.1 sevan */
229 1.2 sevan if (pci_intr_map(pa, &ih) == 0)
230 1.1 sevan sc->rge_flags |= RGE_FLAG_MSI;
231 1.1 sevan else if (pci_intr_map(pa, &ih) != 0) {
232 1.13 sevan aprint_error(": couldn't map interrupt\n");
233 1.1 sevan return;
234 1.1 sevan }
235 1.2 sevan intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
236 1.2 sevan sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, rge_intr,
237 1.14 sevan sc, device_xname(sc->sc_dev));
238 1.1 sevan if (sc->sc_ih == NULL) {
239 1.13 sevan aprint_error_dev(sc->sc_dev, ": couldn't establish interrupt");
240 1.1 sevan if (intrstr != NULL)
241 1.13 sevan aprint_error(" at %s\n", intrstr);
242 1.13 sevan aprint_error("\n");
243 1.1 sevan return;
244 1.1 sevan }
245 1.13 sevan aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
246 1.1 sevan
247 1.9 thorpej if (pci_dma64_available(pa))
248 1.9 thorpej sc->sc_dmat = pa->pa_dmat64;
249 1.9 thorpej else
250 1.9 thorpej sc->sc_dmat = pa->pa_dmat;
251 1.9 thorpej
252 1.1 sevan sc->sc_pc = pa->pa_pc;
253 1.1 sevan sc->sc_tag = pa->pa_tag;
254 1.1 sevan
255 1.1 sevan /* Determine hardware revision */
256 1.1 sevan hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
257 1.1 sevan switch (hwrev) {
258 1.1 sevan case 0x60800000:
259 1.1 sevan sc->rge_type = MAC_CFG2;
260 1.1 sevan break;
261 1.1 sevan case 0x60900000:
262 1.1 sevan sc->rge_type = MAC_CFG3;
263 1.1 sevan break;
264 1.1 sevan default:
265 1.13 sevan aprint_error(": unknown version 0x%08x\n", hwrev);
266 1.1 sevan return;
267 1.1 sevan }
268 1.1 sevan
269 1.1 sevan rge_config_imtype(sc, RGE_IMTYPE_SIM);
270 1.1 sevan
271 1.5 skrll /*
272 1.1 sevan * PCI Express check.
273 1.1 sevan */
274 1.1 sevan if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
275 1.1 sevan &offset, NULL)) {
276 1.2 sevan /* Disable PCIe ASPM. */
277 1.1 sevan reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
278 1.2 sevan offset + PCIE_LCSR);
279 1.2 sevan reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 );
280 1.2 sevan pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
281 1.1 sevan reg);
282 1.1 sevan }
283 1.1 sevan
284 1.1 sevan rge_exit_oob(sc);
285 1.1 sevan rge_hw_init(sc);
286 1.1 sevan
287 1.1 sevan rge_get_macaddr(sc, eaddr);
288 1.13 sevan aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
289 1.13 sevan ether_sprintf(eaddr));
290 1.1 sevan
291 1.2 sevan memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
292 1.1 sevan
293 1.1 sevan rge_set_phy_power(sc, 1);
294 1.1 sevan rge_phy_config(sc);
295 1.1 sevan
296 1.1 sevan if (rge_allocmem(sc))
297 1.1 sevan return;
298 1.1 sevan
299 1.2 sevan ifp = &sc->sc_ec.ec_if;
300 1.1 sevan ifp->if_softc = sc;
301 1.14 sevan strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
302 1.1 sevan ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
303 1.2 sevan #ifdef RGE_MPSAFE
304 1.2 sevan ifp->if_xflags = IFEF_MPSAFE;
305 1.2 sevan #endif
306 1.1 sevan ifp->if_ioctl = rge_ioctl;
307 1.2 sevan ifp->if_start = rge_start;
308 1.1 sevan ifp->if_watchdog = rge_watchdog;
309 1.15 jakllsch ifq_set_maxlen(&ifp->if_snd, RGE_TX_LIST_CNT);
310 1.2 sevan ifp->if_mtu = RGE_JUMBO_MTU;
311 1.1 sevan
312 1.2 sevan ifp->if_capabilities = ETHERCAP_VLAN_MTU | IFCAP_CSUM_IPv4_Rx |
313 1.2 sevan IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
314 1.2 sevan IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
315 1.1 sevan
316 1.1 sevan #if NVLAN > 0
317 1.1 sevan ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
318 1.1 sevan #endif
319 1.1 sevan
320 1.3 sevan callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
321 1.3 sevan callout_setfunc(&sc->sc_timeout, rge_tick, sc);
322 1.3 sevan rge_txstart(&sc->sc_task, sc);
323 1.1 sevan
324 1.1 sevan /* Initialize ifmedia structures. */
325 1.1 sevan ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
326 1.1 sevan rge_ifmedia_sts);
327 1.1 sevan rge_add_media_types(sc);
328 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
329 1.1 sevan ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
330 1.1 sevan sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
331 1.1 sevan
332 1.1 sevan if_attach(ifp);
333 1.2 sevan ether_ifattach(ifp, eaddr);
334 1.1 sevan }
335 1.1 sevan
336 1.1 sevan int
337 1.1 sevan rge_intr(void *arg)
338 1.1 sevan {
339 1.1 sevan struct rge_softc *sc = arg;
340 1.2 sevan struct ifnet *ifp = &sc->sc_ec.ec_if;
341 1.1 sevan uint32_t status;
342 1.1 sevan int claimed = 0, rx, tx;
343 1.1 sevan
344 1.1 sevan if (!(ifp->if_flags & IFF_RUNNING))
345 1.1 sevan return (0);
346 1.1 sevan
347 1.1 sevan /* Disable interrupts. */
348 1.1 sevan RGE_WRITE_4(sc, RGE_IMR, 0);
349 1.1 sevan
350 1.1 sevan status = RGE_READ_4(sc, RGE_ISR);
351 1.1 sevan if (!(sc->rge_flags & RGE_FLAG_MSI)) {
352 1.1 sevan if ((status & RGE_INTRS) == 0 || status == 0xffffffff)
353 1.1 sevan return (0);
354 1.1 sevan }
355 1.1 sevan if (status)
356 1.1 sevan RGE_WRITE_4(sc, RGE_ISR, status);
357 1.1 sevan
358 1.1 sevan if (status & RGE_ISR_PCS_TIMEOUT)
359 1.1 sevan claimed = 1;
360 1.1 sevan
361 1.1 sevan rx = tx = 0;
362 1.1 sevan if (status & RGE_INTRS) {
363 1.1 sevan if (status &
364 1.1 sevan (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
365 1.1 sevan rx |= rge_rxeof(sc);
366 1.1 sevan claimed = 1;
367 1.1 sevan }
368 1.1 sevan
369 1.1 sevan if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
370 1.1 sevan tx |= rge_txeof(sc);
371 1.1 sevan claimed = 1;
372 1.1 sevan }
373 1.1 sevan
374 1.1 sevan if (status & RGE_ISR_SYSTEM_ERR) {
375 1.2 sevan KERNEL_LOCK(1, NULL);
376 1.1 sevan rge_init(ifp);
377 1.2 sevan KERNEL_UNLOCK_ONE(NULL);
378 1.1 sevan claimed = 1;
379 1.1 sevan }
380 1.1 sevan }
381 1.1 sevan
382 1.1 sevan if (sc->rge_timerintr) {
383 1.1 sevan if ((tx | rx) == 0) {
384 1.1 sevan /*
385 1.1 sevan * Nothing needs to be processed, fallback
386 1.1 sevan * to use TX/RX interrupts.
387 1.1 sevan */
388 1.1 sevan rge_setup_intr(sc, RGE_IMTYPE_NONE);
389 1.1 sevan
390 1.1 sevan /*
391 1.1 sevan * Recollect, mainly to avoid the possible
392 1.1 sevan * race introduced by changing interrupt
393 1.1 sevan * masks.
394 1.1 sevan */
395 1.1 sevan rge_rxeof(sc);
396 1.1 sevan rge_txeof(sc);
397 1.1 sevan } else
398 1.1 sevan RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
399 1.1 sevan } else if (tx | rx) {
400 1.1 sevan /*
401 1.1 sevan * Assume that using simulated interrupt moderation
402 1.1 sevan * (hardware timer based) could reduce the interrupt
403 1.1 sevan * rate.
404 1.1 sevan */
405 1.1 sevan rge_setup_intr(sc, RGE_IMTYPE_SIM);
406 1.1 sevan }
407 1.1 sevan
408 1.1 sevan RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
409 1.1 sevan
410 1.1 sevan return (claimed);
411 1.1 sevan }
412 1.1 sevan
413 1.1 sevan int
414 1.1 sevan rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
415 1.1 sevan {
416 1.1 sevan struct rge_tx_desc *d = NULL;
417 1.1 sevan struct rge_txq *txq;
418 1.1 sevan bus_dmamap_t txmap;
419 1.1 sevan uint32_t cmdsts, cflags = 0;
420 1.1 sevan int cur, error, i, last, nsegs;
421 1.1 sevan
422 1.1 sevan /*
423 1.1 sevan * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
424 1.1 sevan * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
425 1.1 sevan * take affect.
426 1.1 sevan */
427 1.1 sevan if ((m->m_pkthdr.csum_flags &
428 1.2 sevan (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
429 1.1 sevan cflags |= RGE_TDEXTSTS_IPCSUM;
430 1.1 sevan if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
431 1.1 sevan cflags |= RGE_TDEXTSTS_TCPCSUM;
432 1.1 sevan if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
433 1.1 sevan cflags |= RGE_TDEXTSTS_UDPCSUM;
434 1.1 sevan }
435 1.1 sevan
436 1.1 sevan txq = &sc->rge_ldata.rge_txq[idx];
437 1.1 sevan txmap = txq->txq_dmamap;
438 1.1 sevan
439 1.1 sevan error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
440 1.1 sevan switch (error) {
441 1.1 sevan case 0:
442 1.1 sevan break;
443 1.1 sevan case EFBIG: /* mbuf chain is too fragmented */
444 1.1 sevan if (m_defrag(m, M_DONTWAIT) == 0 &&
445 1.1 sevan bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
446 1.1 sevan BUS_DMA_NOWAIT) == 0)
447 1.1 sevan break;
448 1.1 sevan
449 1.1 sevan /* FALLTHROUGH */
450 1.1 sevan default:
451 1.1 sevan return (0);
452 1.1 sevan }
453 1.1 sevan
454 1.1 sevan bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
455 1.1 sevan BUS_DMASYNC_PREWRITE);
456 1.1 sevan
457 1.1 sevan nsegs = txmap->dm_nsegs;
458 1.1 sevan
459 1.1 sevan /* Set up hardware VLAN tagging. */
460 1.1 sevan #if NVLAN > 0
461 1.1 sevan if (m->m_flags & M_VLANTAG)
462 1.1 sevan cflags |= swap16(m->m_pkthdr.ether_vtag | RGE_TDEXTSTS_VTAG);
463 1.1 sevan #endif
464 1.1 sevan
465 1.1 sevan cur = idx;
466 1.1 sevan cmdsts = RGE_TDCMDSTS_SOF;
467 1.1 sevan
468 1.1 sevan for (i = 0; i < txmap->dm_nsegs; i++) {
469 1.1 sevan d = &sc->rge_ldata.rge_tx_list[cur];
470 1.1 sevan
471 1.1 sevan d->rge_extsts = htole32(cflags);
472 1.1 sevan d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
473 1.1 sevan d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
474 1.1 sevan
475 1.1 sevan cmdsts |= txmap->dm_segs[i].ds_len;
476 1.1 sevan
477 1.1 sevan if (cur == RGE_TX_LIST_CNT - 1)
478 1.1 sevan cmdsts |= RGE_TDCMDSTS_EOR;
479 1.1 sevan
480 1.1 sevan d->rge_cmdsts = htole32(cmdsts);
481 1.1 sevan
482 1.1 sevan last = cur;
483 1.1 sevan cmdsts = RGE_TDCMDSTS_OWN;
484 1.1 sevan cur = RGE_NEXT_TX_DESC(cur);
485 1.1 sevan }
486 1.1 sevan
487 1.1 sevan /* Set EOF on the last descriptor. */
488 1.1 sevan d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
489 1.1 sevan
490 1.1 sevan /* Transfer ownership of packet to the chip. */
491 1.1 sevan d = &sc->rge_ldata.rge_tx_list[idx];
492 1.1 sevan
493 1.1 sevan d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
494 1.1 sevan
495 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
496 1.1 sevan cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
497 1.1 sevan BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
498 1.1 sevan
499 1.1 sevan /* Update info of TX queue and descriptors. */
500 1.1 sevan txq->txq_mbuf = m;
501 1.1 sevan txq->txq_descidx = last;
502 1.1 sevan
503 1.1 sevan return (nsegs);
504 1.1 sevan }
505 1.1 sevan
506 1.1 sevan int
507 1.2 sevan rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
508 1.1 sevan {
509 1.1 sevan struct rge_softc *sc = ifp->if_softc;
510 1.1 sevan struct ifreq *ifr = (struct ifreq *)data;
511 1.1 sevan int s, error = 0;
512 1.1 sevan
513 1.1 sevan s = splnet();
514 1.1 sevan
515 1.1 sevan switch (cmd) {
516 1.1 sevan case SIOCSIFADDR:
517 1.1 sevan ifp->if_flags |= IFF_UP;
518 1.1 sevan if (!(ifp->if_flags & IFF_RUNNING))
519 1.1 sevan rge_init(ifp);
520 1.1 sevan break;
521 1.1 sevan case SIOCSIFFLAGS:
522 1.1 sevan if (ifp->if_flags & IFF_UP) {
523 1.1 sevan if (ifp->if_flags & IFF_RUNNING)
524 1.1 sevan error = ENETRESET;
525 1.1 sevan else
526 1.1 sevan rge_init(ifp);
527 1.1 sevan } else {
528 1.1 sevan if (ifp->if_flags & IFF_RUNNING)
529 1.1 sevan rge_stop(ifp);
530 1.1 sevan }
531 1.1 sevan break;
532 1.1 sevan case SIOCGIFMEDIA:
533 1.1 sevan case SIOCSIFMEDIA:
534 1.1 sevan error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
535 1.1 sevan break;
536 1.1 sevan case SIOCSIFMTU:
537 1.2 sevan if (ifr->ifr_mtu > ifp->if_mtu) {
538 1.1 sevan error = EINVAL;
539 1.1 sevan break;
540 1.1 sevan }
541 1.1 sevan ifp->if_mtu = ifr->ifr_mtu;
542 1.1 sevan break;
543 1.1 sevan default:
544 1.2 sevan error = ether_ioctl(ifp, cmd, data);
545 1.1 sevan }
546 1.1 sevan
547 1.1 sevan if (error == ENETRESET) {
548 1.1 sevan if (ifp->if_flags & IFF_RUNNING)
549 1.1 sevan rge_iff(sc);
550 1.1 sevan error = 0;
551 1.1 sevan }
552 1.1 sevan
553 1.1 sevan splx(s);
554 1.1 sevan return (error);
555 1.1 sevan }
556 1.1 sevan
557 1.1 sevan void
558 1.3 sevan rge_start(struct ifnet *ifp)
559 1.1 sevan {
560 1.1 sevan struct rge_softc *sc = ifp->if_softc;
561 1.1 sevan struct mbuf *m;
562 1.1 sevan int free, idx, used;
563 1.1 sevan int queued = 0;
564 1.1 sevan
565 1.2 sevan #define LINK_STATE_IS_UP(_s) \
566 1.2 sevan ((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
567 1.2 sevan
568 1.1 sevan if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
569 1.1 sevan ifq_purge(ifq);
570 1.1 sevan return;
571 1.1 sevan }
572 1.1 sevan
573 1.1 sevan /* Calculate free space. */
574 1.1 sevan idx = sc->rge_ldata.rge_txq_prodidx;
575 1.1 sevan free = sc->rge_ldata.rge_txq_considx;
576 1.1 sevan if (free <= idx)
577 1.1 sevan free += RGE_TX_LIST_CNT;
578 1.1 sevan free -= idx;
579 1.1 sevan
580 1.1 sevan for (;;) {
581 1.1 sevan if (RGE_TX_NSEGS >= free + 2) {
582 1.3 sevan SET(ifp->if_flags, IFF_OACTIVE);
583 1.1 sevan break;
584 1.1 sevan }
585 1.1 sevan
586 1.3 sevan IFQ_DEQUEUE(&ifp->if_snd, m);
587 1.1 sevan if (m == NULL)
588 1.1 sevan break;
589 1.1 sevan
590 1.1 sevan used = rge_encap(sc, m, idx);
591 1.1 sevan if (used == 0) {
592 1.1 sevan m_freem(m);
593 1.1 sevan continue;
594 1.1 sevan }
595 1.1 sevan
596 1.1 sevan KASSERT(used <= free);
597 1.1 sevan free -= used;
598 1.1 sevan
599 1.1 sevan #if NBPFILTER > 0
600 1.1 sevan if (ifp->if_bpf)
601 1.1 sevan bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
602 1.1 sevan #endif
603 1.1 sevan
604 1.1 sevan idx += used;
605 1.1 sevan if (idx >= RGE_TX_LIST_CNT)
606 1.1 sevan idx -= RGE_TX_LIST_CNT;
607 1.1 sevan
608 1.1 sevan queued++;
609 1.1 sevan }
610 1.1 sevan
611 1.1 sevan if (queued == 0)
612 1.1 sevan return;
613 1.1 sevan
614 1.1 sevan /* Set a timeout in case the chip goes out to lunch. */
615 1.1 sevan ifp->if_timer = 5;
616 1.1 sevan
617 1.1 sevan sc->rge_ldata.rge_txq_prodidx = idx;
618 1.1 sevan ifq_serialize(ifq, &sc->sc_task);
619 1.1 sevan }
620 1.1 sevan
621 1.1 sevan void
622 1.1 sevan rge_watchdog(struct ifnet *ifp)
623 1.1 sevan {
624 1.1 sevan struct rge_softc *sc = ifp->if_softc;
625 1.1 sevan
626 1.16 jakllsch device_printf(sc->sc_dev, "watchdog timeout\n");
627 1.4 skrll if_statinc(ifp, if_oerrors);
628 1.1 sevan
629 1.1 sevan rge_init(ifp);
630 1.1 sevan }
631 1.1 sevan
632 1.1 sevan int
633 1.1 sevan rge_init(struct ifnet *ifp)
634 1.1 sevan {
635 1.1 sevan struct rge_softc *sc = ifp->if_softc;
636 1.1 sevan uint32_t val;
637 1.1 sevan uint16_t max_frame_size;
638 1.1 sevan int i;
639 1.1 sevan
640 1.1 sevan rge_stop(ifp);
641 1.1 sevan
642 1.1 sevan /* Set MAC address. */
643 1.2 sevan rge_set_macaddr(sc, sc->sc_enaddr);
644 1.1 sevan
645 1.1 sevan /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
646 1.1 sevan if (ifp->if_mtu < ETHERMTU)
647 1.1 sevan max_frame_size = ETHERMTU;
648 1.1 sevan else
649 1.1 sevan max_frame_size = ifp->if_mtu;
650 1.1 sevan
651 1.1 sevan max_frame_size += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
652 1.1 sevan ETHER_CRC_LEN + 1;
653 1.1 sevan
654 1.1 sevan if (max_frame_size > RGE_JUMBO_FRAMELEN)
655 1.5 skrll max_frame_size -= 1;
656 1.1 sevan
657 1.1 sevan RGE_WRITE_2(sc, RGE_RXMAXSIZE, max_frame_size);
658 1.1 sevan
659 1.1 sevan /* Initialize RX descriptors list. */
660 1.1 sevan if (rge_rx_list_init(sc) == ENOBUFS) {
661 1.16 jakllsch device_printf(sc->sc_dev,
662 1.13 sevan "init failed: no memory for RX buffers\n");
663 1.1 sevan rge_stop(ifp);
664 1.1 sevan return (ENOBUFS);
665 1.1 sevan }
666 1.1 sevan
667 1.1 sevan /* Initialize TX descriptors. */
668 1.1 sevan rge_tx_list_init(sc);
669 1.1 sevan
670 1.1 sevan /* Load the addresses of the RX and TX lists into the chip. */
671 1.1 sevan RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
672 1.1 sevan RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
673 1.1 sevan RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
674 1.1 sevan RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
675 1.1 sevan RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
676 1.1 sevan RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
677 1.1 sevan RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
678 1.1 sevan RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
679 1.1 sevan
680 1.1 sevan RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
681 1.1 sevan
682 1.1 sevan RGE_CLRBIT_1(sc, 0xf1, 0x80);
683 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
684 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
685 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
686 1.1 sevan
687 1.1 sevan /* Clear interrupt moderation timer. */
688 1.1 sevan for (i = 0; i < 64; i++)
689 1.1 sevan RGE_WRITE_4(sc, RGE_IM(i), 0);
690 1.1 sevan
691 1.1 sevan /* Set the initial RX and TX configurations. */
692 1.1 sevan RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
693 1.1 sevan RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
694 1.1 sevan
695 1.1 sevan val = rge_read_csi(sc, 0x70c) & ~0xff000000;
696 1.1 sevan rge_write_csi(sc, 0x70c, val | 0x27000000);
697 1.1 sevan
698 1.1 sevan /* Enable hardware optimization function. */
699 1.1 sevan val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
700 1.1 sevan pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
701 1.1 sevan
702 1.1 sevan RGE_WRITE_2(sc, 0x0382, 0x221b);
703 1.1 sevan RGE_WRITE_1(sc, 0x4500, 0);
704 1.1 sevan RGE_WRITE_2(sc, 0x4800, 0);
705 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
706 1.1 sevan
707 1.1 sevan rge_write_mac_ocp(sc, 0xc140, 0xffff);
708 1.1 sevan rge_write_mac_ocp(sc, 0xc142, 0xffff);
709 1.1 sevan
710 1.1 sevan val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
711 1.1 sevan rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
712 1.1 sevan
713 1.1 sevan RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
714 1.1 sevan RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
715 1.1 sevan RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
716 1.1 sevan
717 1.1 sevan val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
718 1.1 sevan rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
719 1.1 sevan
720 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
721 1.1 sevan
722 1.1 sevan val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
723 1.1 sevan rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
724 1.1 sevan
725 1.1 sevan RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
726 1.1 sevan
727 1.1 sevan val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x007f;
728 1.1 sevan rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
729 1.1 sevan
730 1.1 sevan val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
731 1.1 sevan rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
732 1.1 sevan
733 1.1 sevan val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
734 1.1 sevan rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
735 1.1 sevan
736 1.1 sevan RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
737 1.1 sevan
738 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
739 1.1 sevan
740 1.1 sevan val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
741 1.1 sevan rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
742 1.1 sevan
743 1.1 sevan RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
744 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
745 1.1 sevan
746 1.1 sevan val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
747 1.1 sevan rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
748 1.1 sevan
749 1.1 sevan val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
750 1.1 sevan rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
751 1.1 sevan
752 1.5 skrll RGE_MAC_SETBIT(sc, 0xe84c, 0x00c0);
753 1.1 sevan
754 1.1 sevan /* Disable EEE plus. */
755 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
756 1.1 sevan
757 1.1 sevan RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
758 1.1 sevan
759 1.1 sevan RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
760 1.1 sevan DELAY(1);
761 1.1 sevan RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
762 1.1 sevan
763 1.1 sevan RGE_CLRBIT_4(sc, 0x1880, 0x0030);
764 1.1 sevan
765 1.1 sevan rge_write_mac_ocp(sc, 0xe098, 0xc302);
766 1.1 sevan
767 1.2 sevan if (ifp->if_capabilities & ETHERCAP_VLAN_HWTAGGING)
768 1.1 sevan RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
769 1.1 sevan
770 1.1 sevan RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
771 1.1 sevan
772 1.1 sevan for (i = 0; i < 10; i++) {
773 1.1 sevan if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
774 1.1 sevan break;
775 1.1 sevan DELAY(1000);
776 1.1 sevan }
777 1.1 sevan
778 1.1 sevan /* Disable RXDV gate. */
779 1.1 sevan RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
780 1.1 sevan DELAY(2000);
781 1.1 sevan
782 1.1 sevan rge_ifmedia_upd(ifp);
783 1.1 sevan
784 1.1 sevan /* Enable transmit and receive. */
785 1.1 sevan RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
786 1.1 sevan
787 1.1 sevan /* Program promiscuous mode and multicast filters. */
788 1.1 sevan rge_iff(sc);
789 1.1 sevan
790 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
791 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
792 1.1 sevan
793 1.1 sevan RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
794 1.1 sevan
795 1.1 sevan /* Enable interrupts. */
796 1.1 sevan rge_setup_intr(sc, RGE_IMTYPE_SIM);
797 1.1 sevan
798 1.1 sevan ifp->if_flags |= IFF_RUNNING;
799 1.3 sevan CLR(ifp->if_flags, IFF_OACTIVE);
800 1.1 sevan
801 1.3 sevan callout_schedule(&sc->sc_timeout, 1);
802 1.1 sevan
803 1.1 sevan return (0);
804 1.1 sevan }
805 1.1 sevan
806 1.1 sevan /*
807 1.1 sevan * Stop the adapter and free any mbufs allocated to the RX and TX lists.
808 1.1 sevan */
809 1.1 sevan void
810 1.1 sevan rge_stop(struct ifnet *ifp)
811 1.1 sevan {
812 1.1 sevan struct rge_softc *sc = ifp->if_softc;
813 1.1 sevan int i;
814 1.1 sevan
815 1.1 sevan timeout_del(&sc->sc_timeout);
816 1.1 sevan
817 1.1 sevan ifp->if_timer = 0;
818 1.1 sevan ifp->if_flags &= ~IFF_RUNNING;
819 1.1 sevan sc->rge_timerintr = 0;
820 1.1 sevan
821 1.1 sevan RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
822 1.1 sevan RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
823 1.1 sevan RGE_RXCFG_ERRPKT);
824 1.1 sevan
825 1.1 sevan RGE_WRITE_4(sc, RGE_IMR, 0);
826 1.1 sevan RGE_WRITE_4(sc, RGE_ISR, 0xffffffff);
827 1.1 sevan
828 1.1 sevan rge_reset(sc);
829 1.1 sevan
830 1.1 sevan intr_barrier(sc->sc_ih);
831 1.1 sevan ifq_barrier(&ifp->if_snd);
832 1.2 sevan /* ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
833 1.1 sevan
834 1.1 sevan if (sc->rge_head != NULL) {
835 1.1 sevan m_freem(sc->rge_head);
836 1.1 sevan sc->rge_head = sc->rge_tail = NULL;
837 1.1 sevan }
838 1.1 sevan
839 1.1 sevan /* Free the TX list buffers. */
840 1.1 sevan for (i = 0; i < RGE_TX_LIST_CNT; i++) {
841 1.1 sevan if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
842 1.1 sevan bus_dmamap_unload(sc->sc_dmat,
843 1.1 sevan sc->rge_ldata.rge_txq[i].txq_dmamap);
844 1.1 sevan m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
845 1.1 sevan sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
846 1.1 sevan }
847 1.1 sevan }
848 1.1 sevan
849 1.1 sevan /* Free the RX list buffers. */
850 1.1 sevan for (i = 0; i < RGE_RX_LIST_CNT; i++) {
851 1.1 sevan if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
852 1.1 sevan bus_dmamap_unload(sc->sc_dmat,
853 1.1 sevan sc->rge_ldata.rge_rxq[i].rxq_dmamap);
854 1.1 sevan m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
855 1.1 sevan sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
856 1.1 sevan }
857 1.1 sevan }
858 1.1 sevan }
859 1.1 sevan
860 1.1 sevan /*
861 1.1 sevan * Set media options.
862 1.1 sevan */
863 1.1 sevan int
864 1.1 sevan rge_ifmedia_upd(struct ifnet *ifp)
865 1.1 sevan {
866 1.1 sevan struct rge_softc *sc = ifp->if_softc;
867 1.1 sevan struct ifmedia *ifm = &sc->sc_media;
868 1.1 sevan int anar, gig, val;
869 1.1 sevan
870 1.1 sevan if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
871 1.1 sevan return (EINVAL);
872 1.1 sevan
873 1.1 sevan /* Disable Gigabit Lite. */
874 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
875 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
876 1.1 sevan
877 1.1 sevan val = rge_read_phy_ocp(sc, 0xa5d4);
878 1.1 sevan val &= ~RGE_ADV_2500TFDX;
879 1.1 sevan
880 1.1 sevan anar = gig = 0;
881 1.1 sevan switch (IFM_SUBTYPE(ifm->ifm_media)) {
882 1.1 sevan case IFM_AUTO:
883 1.1 sevan anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
884 1.1 sevan gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
885 1.1 sevan val |= RGE_ADV_2500TFDX;
886 1.1 sevan break;
887 1.1 sevan case IFM_2500_T:
888 1.1 sevan anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
889 1.1 sevan gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
890 1.1 sevan val |= RGE_ADV_2500TFDX;
891 1.1 sevan ifp->if_baudrate = IF_Mbps(2500);
892 1.1 sevan break;
893 1.1 sevan case IFM_1000_T:
894 1.1 sevan anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
895 1.1 sevan gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
896 1.1 sevan ifp->if_baudrate = IF_Gbps(1);
897 1.1 sevan break;
898 1.1 sevan case IFM_100_TX:
899 1.1 sevan anar |= ANAR_TX | ANAR_TX_FD;
900 1.1 sevan ifp->if_baudrate = IF_Mbps(100);
901 1.1 sevan break;
902 1.1 sevan case IFM_10_T:
903 1.1 sevan anar |= ANAR_10 | ANAR_10_FD;
904 1.1 sevan ifp->if_baudrate = IF_Mbps(10);
905 1.1 sevan break;
906 1.1 sevan default:
907 1.16 jakllsch device_printf(sc->sc_dev,
908 1.13 sevan "unsupported media type\n");
909 1.1 sevan return (EINVAL);
910 1.1 sevan }
911 1.1 sevan
912 1.1 sevan rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
913 1.1 sevan rge_write_phy(sc, 0, MII_100T2CR, gig);
914 1.1 sevan rge_write_phy_ocp(sc, 0xa5d4, val);
915 1.1 sevan rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG);
916 1.1 sevan
917 1.1 sevan return (0);
918 1.1 sevan }
919 1.1 sevan
920 1.1 sevan /*
921 1.1 sevan * Report current media status.
922 1.1 sevan */
923 1.1 sevan void
924 1.1 sevan rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
925 1.1 sevan {
926 1.1 sevan struct rge_softc *sc = ifp->if_softc;
927 1.1 sevan uint16_t status = 0;
928 1.1 sevan
929 1.1 sevan ifmr->ifm_status = IFM_AVALID;
930 1.1 sevan ifmr->ifm_active = IFM_ETHER;
931 1.1 sevan
932 1.1 sevan if (rge_get_link_status(sc)) {
933 1.1 sevan ifmr->ifm_status |= IFM_ACTIVE;
934 1.1 sevan
935 1.1 sevan status = RGE_READ_2(sc, RGE_PHYSTAT);
936 1.1 sevan if ((status & RGE_PHYSTAT_FDX) ||
937 1.1 sevan (status & RGE_PHYSTAT_2500MBPS))
938 1.1 sevan ifmr->ifm_active |= IFM_FDX;
939 1.1 sevan else
940 1.1 sevan ifmr->ifm_active |= IFM_HDX;
941 1.1 sevan
942 1.1 sevan if (status & RGE_PHYSTAT_10MBPS)
943 1.1 sevan ifmr->ifm_active |= IFM_10_T;
944 1.1 sevan else if (status & RGE_PHYSTAT_100MBPS)
945 1.1 sevan ifmr->ifm_active |= IFM_100_TX;
946 1.1 sevan else if (status & RGE_PHYSTAT_1000MBPS)
947 1.1 sevan ifmr->ifm_active |= IFM_1000_T;
948 1.1 sevan else if (status & RGE_PHYSTAT_2500MBPS)
949 1.1 sevan ifmr->ifm_active |= IFM_2500_T;
950 1.1 sevan }
951 1.1 sevan }
952 1.1 sevan
953 1.5 skrll /*
954 1.1 sevan * Allocate memory for RX/TX rings.
955 1.1 sevan */
956 1.1 sevan int
957 1.1 sevan rge_allocmem(struct rge_softc *sc)
958 1.1 sevan {
959 1.1 sevan int error, i;
960 1.1 sevan
961 1.1 sevan /* Allocate DMA'able memory for the TX ring. */
962 1.1 sevan error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
963 1.1 sevan RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
964 1.1 sevan if (error) {
965 1.13 sevan aprint_error_dev(sc->sc_dev, "can't create TX list map\n");
966 1.1 sevan return (error);
967 1.1 sevan }
968 1.1 sevan error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
969 1.1 sevan &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
970 1.2 sevan BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_ZERO */
971 1.1 sevan if (error) {
972 1.13 sevan aprint_error_dev(sc->sc_dev, "can't alloc TX list\n");
973 1.1 sevan return (error);
974 1.1 sevan }
975 1.1 sevan
976 1.1 sevan /* Load the map for the TX ring. */
977 1.1 sevan error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
978 1.1 sevan sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
979 1.8 sevan (void **) &sc->rge_ldata.rge_tx_list,
980 1.2 sevan BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_COHERENT */
981 1.1 sevan if (error) {
982 1.13 sevan aprint_error_dev(sc->sc_dev, "can't map TX dma buffers\n");
983 1.1 sevan bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
984 1.1 sevan sc->rge_ldata.rge_tx_listnseg);
985 1.1 sevan return (error);
986 1.1 sevan }
987 1.1 sevan error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
988 1.1 sevan sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
989 1.1 sevan if (error) {
990 1.13 sevan aprint_error_dev(sc->sc_dev, "can't load TX dma map\n");
991 1.1 sevan bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
992 1.1 sevan bus_dmamem_unmap(sc->sc_dmat,
993 1.2 sevan sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
994 1.1 sevan bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
995 1.1 sevan sc->rge_ldata.rge_tx_listnseg);
996 1.1 sevan return (error);
997 1.1 sevan }
998 1.1 sevan
999 1.1 sevan /* Create DMA maps for TX buffers. */
1000 1.1 sevan for (i = 0; i < RGE_TX_LIST_CNT; i++) {
1001 1.1 sevan error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
1002 1.1 sevan RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
1003 1.1 sevan &sc->rge_ldata.rge_txq[i].txq_dmamap);
1004 1.1 sevan if (error) {
1005 1.13 sevan aprint_error_dev(sc->sc_dev, "can't create DMA map for TX\n");
1006 1.1 sevan return (error);
1007 1.1 sevan }
1008 1.1 sevan }
1009 1.1 sevan
1010 1.1 sevan /* Allocate DMA'able memory for the RX ring. */
1011 1.1 sevan error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
1012 1.1 sevan RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
1013 1.1 sevan if (error) {
1014 1.13 sevan aprint_error_dev(sc->sc_dev, "can't create RX list map\n");
1015 1.1 sevan return (error);
1016 1.1 sevan }
1017 1.1 sevan error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
1018 1.1 sevan &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
1019 1.2 sevan BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_ZERO */
1020 1.1 sevan if (error) {
1021 1.13 sevan aprint_error_dev(sc->sc_dev, "can't alloc RX list\n");
1022 1.1 sevan return (error);
1023 1.1 sevan }
1024 1.1 sevan
1025 1.1 sevan /* Load the map for the RX ring. */
1026 1.1 sevan error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1027 1.1 sevan sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
1028 1.8 sevan (void **) &sc->rge_ldata.rge_rx_list,
1029 1.2 sevan BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_COHERENT */
1030 1.1 sevan if (error) {
1031 1.13 sevan aprint_error_dev(sc->sc_dev, "can't map RX dma buffers\n");
1032 1.1 sevan bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1033 1.1 sevan sc->rge_ldata.rge_rx_listnseg);
1034 1.1 sevan return (error);
1035 1.1 sevan }
1036 1.1 sevan error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1037 1.1 sevan sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1038 1.1 sevan if (error) {
1039 1.13 sevan aprint_error_dev(sc->sc_dev, "can't load RX dma map\n");
1040 1.1 sevan bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
1041 1.1 sevan bus_dmamem_unmap(sc->sc_dmat,
1042 1.2 sevan sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
1043 1.1 sevan bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1044 1.1 sevan sc->rge_ldata.rge_rx_listnseg);
1045 1.1 sevan return (error);
1046 1.1 sevan }
1047 1.1 sevan
1048 1.1 sevan /* Create DMA maps for RX buffers. */
1049 1.1 sevan for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1050 1.1 sevan error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1051 1.1 sevan RGE_JUMBO_FRAMELEN, 0, 0,
1052 1.1 sevan &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
1053 1.1 sevan if (error) {
1054 1.13 sevan aprint_error_dev(sc->sc_dev, "can't create DMA map for RX\n");
1055 1.1 sevan return (error);
1056 1.1 sevan }
1057 1.1 sevan }
1058 1.1 sevan
1059 1.1 sevan return (error);
1060 1.1 sevan }
1061 1.1 sevan
1062 1.1 sevan /*
1063 1.1 sevan * Initialize the RX descriptor and attach an mbuf cluster.
1064 1.1 sevan */
1065 1.1 sevan int
1066 1.1 sevan rge_newbuf(struct rge_softc *sc, int idx)
1067 1.1 sevan {
1068 1.1 sevan struct mbuf *m;
1069 1.1 sevan struct rge_rx_desc *r;
1070 1.1 sevan struct rge_rxq *rxq;
1071 1.1 sevan bus_dmamap_t rxmap;
1072 1.1 sevan
1073 1.1 sevan m = MCLGETI(NULL, M_DONTWAIT, NULL, RGE_JUMBO_FRAMELEN);
1074 1.1 sevan if (m == NULL)
1075 1.1 sevan return (ENOBUFS);
1076 1.1 sevan
1077 1.1 sevan m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
1078 1.1 sevan
1079 1.1 sevan rxq = &sc->rge_ldata.rge_rxq[idx];
1080 1.1 sevan rxmap = rxq->rxq_dmamap;
1081 1.1 sevan
1082 1.1 sevan if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT))
1083 1.1 sevan goto out;
1084 1.1 sevan
1085 1.1 sevan bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1086 1.1 sevan BUS_DMASYNC_PREREAD);
1087 1.1 sevan
1088 1.1 sevan /* Map the segments into RX descriptors. */
1089 1.1 sevan r = &sc->rge_ldata.rge_rx_list[idx];
1090 1.1 sevan
1091 1.1 sevan if (RGE_OWN(r)) {
1092 1.16 jakllsch device_printf(sc->sc_dev, "tried to map busy RX descriptor\n");
1093 1.1 sevan goto out;
1094 1.1 sevan }
1095 1.1 sevan
1096 1.1 sevan rxq->rxq_mbuf = m;
1097 1.1 sevan
1098 1.1 sevan r->rge_extsts = 0;
1099 1.1 sevan r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
1100 1.1 sevan r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr));
1101 1.1 sevan
1102 1.1 sevan r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
1103 1.1 sevan if (idx == RGE_RX_LIST_CNT - 1)
1104 1.1 sevan r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1105 1.1 sevan
1106 1.1 sevan r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1107 1.1 sevan
1108 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1109 1.1 sevan idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1110 1.1 sevan BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1111 1.1 sevan
1112 1.1 sevan return (0);
1113 1.1 sevan out:
1114 1.1 sevan if (m != NULL)
1115 1.1 sevan m_freem(m);
1116 1.1 sevan return (ENOMEM);
1117 1.1 sevan }
1118 1.1 sevan
1119 1.1 sevan void
1120 1.1 sevan rge_discard_rxbuf(struct rge_softc *sc, int idx)
1121 1.1 sevan {
1122 1.1 sevan struct rge_rx_desc *r;
1123 1.1 sevan
1124 1.1 sevan r = &sc->rge_ldata.rge_rx_list[idx];
1125 1.1 sevan
1126 1.1 sevan r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
1127 1.1 sevan r->rge_extsts = 0;
1128 1.1 sevan if (idx == RGE_RX_LIST_CNT - 1)
1129 1.1 sevan r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1130 1.1 sevan r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1131 1.1 sevan
1132 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1133 1.1 sevan idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1134 1.1 sevan BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1135 1.1 sevan }
1136 1.1 sevan
1137 1.1 sevan int
1138 1.1 sevan rge_rx_list_init(struct rge_softc *sc)
1139 1.1 sevan {
1140 1.1 sevan int i;
1141 1.1 sevan
1142 1.1 sevan memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1143 1.1 sevan
1144 1.1 sevan for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1145 1.1 sevan sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
1146 1.1 sevan if (rge_newbuf(sc, i) == ENOBUFS)
1147 1.1 sevan return (ENOBUFS);
1148 1.1 sevan }
1149 1.1 sevan
1150 1.1 sevan sc->rge_ldata.rge_rxq_prodidx = 0;
1151 1.1 sevan sc->rge_head = sc->rge_tail = NULL;
1152 1.1 sevan
1153 1.1 sevan return (0);
1154 1.1 sevan }
1155 1.1 sevan
1156 1.1 sevan void
1157 1.1 sevan rge_tx_list_init(struct rge_softc *sc)
1158 1.1 sevan {
1159 1.1 sevan int i;
1160 1.1 sevan
1161 1.1 sevan memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1162 1.1 sevan
1163 1.1 sevan for (i = 0; i < RGE_TX_LIST_CNT; i++)
1164 1.1 sevan sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
1165 1.1 sevan
1166 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
1167 1.1 sevan sc->rge_ldata.rge_tx_list_map->dm_mapsize,
1168 1.1 sevan BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1169 1.1 sevan
1170 1.1 sevan sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
1171 1.1 sevan }
1172 1.1 sevan
1173 1.1 sevan int
1174 1.1 sevan rge_rxeof(struct rge_softc *sc)
1175 1.1 sevan {
1176 1.1 sevan struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1177 1.1 sevan struct mbuf *m;
1178 1.2 sevan struct ifnet *ifp = &sc->sc_ec.ec_if;
1179 1.1 sevan struct rge_rx_desc *cur_rx;
1180 1.1 sevan struct rge_rxq *rxq;
1181 1.1 sevan uint32_t rxstat, extsts;
1182 1.1 sevan int i, total_len, rx = 0;
1183 1.1 sevan
1184 1.1 sevan for (i = sc->rge_ldata.rge_rxq_prodidx; ; i = RGE_NEXT_RX_DESC(i)) {
1185 1.1 sevan /* Invalidate the descriptor memory. */
1186 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1187 1.1 sevan i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1188 1.1 sevan BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1189 1.1 sevan
1190 1.1 sevan cur_rx = &sc->rge_ldata.rge_rx_list[i];
1191 1.1 sevan
1192 1.1 sevan if (RGE_OWN(cur_rx))
1193 1.1 sevan break;
1194 1.1 sevan
1195 1.1 sevan rxstat = letoh32(cur_rx->rge_cmdsts);
1196 1.1 sevan extsts = letoh32(cur_rx->rge_extsts);
1197 1.5 skrll
1198 1.1 sevan total_len = RGE_RXBYTES(cur_rx);
1199 1.1 sevan rxq = &sc->rge_ldata.rge_rxq[i];
1200 1.1 sevan m = rxq->rxq_mbuf;
1201 1.1 sevan rx = 1;
1202 1.1 sevan
1203 1.1 sevan /* Invalidate the RX mbuf and unload its map. */
1204 1.1 sevan bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1205 1.1 sevan rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1206 1.1 sevan bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1207 1.1 sevan
1208 1.1 sevan if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1209 1.1 sevan (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1210 1.1 sevan rge_discard_rxbuf(sc, i);
1211 1.1 sevan continue;
1212 1.1 sevan }
1213 1.1 sevan
1214 1.1 sevan if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1215 1.4 skrll if_statinc(ifp, if_ierrors);
1216 1.1 sevan /*
1217 1.1 sevan * If this is part of a multi-fragment packet,
1218 1.1 sevan * discard all the pieces.
1219 1.1 sevan */
1220 1.1 sevan if (sc->rge_head != NULL) {
1221 1.1 sevan m_freem(sc->rge_head);
1222 1.1 sevan sc->rge_head = sc->rge_tail = NULL;
1223 1.1 sevan }
1224 1.1 sevan rge_discard_rxbuf(sc, i);
1225 1.1 sevan continue;
1226 1.1 sevan }
1227 1.1 sevan
1228 1.1 sevan /*
1229 1.1 sevan * If allocating a replacement mbuf fails,
1230 1.1 sevan * reload the current one.
1231 1.1 sevan */
1232 1.1 sevan
1233 1.1 sevan if (rge_newbuf(sc, i) == ENOBUFS) {
1234 1.1 sevan if (sc->rge_head != NULL) {
1235 1.1 sevan m_freem(sc->rge_head);
1236 1.1 sevan sc->rge_head = sc->rge_tail = NULL;
1237 1.1 sevan }
1238 1.1 sevan rge_discard_rxbuf(sc, i);
1239 1.1 sevan continue;
1240 1.1 sevan }
1241 1.1 sevan
1242 1.1 sevan if (sc->rge_head != NULL) {
1243 1.1 sevan m->m_len = total_len;
1244 1.1 sevan /*
1245 1.1 sevan * Special case: if there's 4 bytes or less
1246 1.1 sevan * in this buffer, the mbuf can be discarded:
1247 1.1 sevan * the last 4 bytes is the CRC, which we don't
1248 1.1 sevan * care about anyway.
1249 1.1 sevan */
1250 1.1 sevan if (m->m_len <= ETHER_CRC_LEN) {
1251 1.1 sevan sc->rge_tail->m_len -=
1252 1.1 sevan (ETHER_CRC_LEN - m->m_len);
1253 1.1 sevan m_freem(m);
1254 1.1 sevan } else {
1255 1.1 sevan m->m_len -= ETHER_CRC_LEN;
1256 1.1 sevan m->m_flags &= ~M_PKTHDR;
1257 1.1 sevan sc->rge_tail->m_next = m;
1258 1.1 sevan }
1259 1.1 sevan m = sc->rge_head;
1260 1.1 sevan sc->rge_head = sc->rge_tail = NULL;
1261 1.1 sevan m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1262 1.1 sevan } else
1263 1.1 sevan m->m_pkthdr.len = m->m_len =
1264 1.1 sevan (total_len - ETHER_CRC_LEN);
1265 1.1 sevan
1266 1.1 sevan /* Check IP header checksum. */
1267 1.1 sevan if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) &&
1268 1.1 sevan (extsts & RGE_RDEXTSTS_IPV4))
1269 1.1 sevan m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1270 1.1 sevan
1271 1.1 sevan /* Check TCP/UDP checksum. */
1272 1.1 sevan if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1273 1.1 sevan (((rxstat & RGE_RDCMDSTS_TCPPKT) &&
1274 1.1 sevan !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) ||
1275 1.1 sevan ((rxstat & RGE_RDCMDSTS_UDPPKT) &&
1276 1.1 sevan !(rxstat & RGE_RDCMDSTS_UDPCSUMERR))))
1277 1.1 sevan m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1278 1.1 sevan M_UDP_CSUM_IN_OK;
1279 1.1 sevan
1280 1.1 sevan #if NVLAN > 0
1281 1.1 sevan if (extsts & RGE_RDEXTSTS_VTAG) {
1282 1.1 sevan m->m_pkthdr.ether_vtag =
1283 1.1 sevan ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
1284 1.1 sevan m->m_flags |= M_VLANTAG;
1285 1.1 sevan }
1286 1.1 sevan #endif
1287 1.1 sevan
1288 1.1 sevan ml_enqueue(&ml, m);
1289 1.1 sevan }
1290 1.1 sevan
1291 1.1 sevan sc->rge_ldata.rge_rxq_prodidx = i;
1292 1.1 sevan
1293 1.1 sevan if_input(ifp, &ml);
1294 1.1 sevan
1295 1.1 sevan return (rx);
1296 1.1 sevan }
1297 1.1 sevan
1298 1.1 sevan int
1299 1.1 sevan rge_txeof(struct rge_softc *sc)
1300 1.1 sevan {
1301 1.2 sevan struct ifnet *ifp = &sc->sc_ec.ec_if;
1302 1.1 sevan struct rge_txq *txq;
1303 1.1 sevan uint32_t txstat;
1304 1.1 sevan int cons, idx, prod;
1305 1.1 sevan int free = 0;
1306 1.1 sevan
1307 1.1 sevan prod = sc->rge_ldata.rge_txq_prodidx;
1308 1.1 sevan cons = sc->rge_ldata.rge_txq_considx;
1309 1.1 sevan
1310 1.1 sevan while (prod != cons) {
1311 1.1 sevan txq = &sc->rge_ldata.rge_txq[cons];
1312 1.1 sevan idx = txq->txq_descidx;
1313 1.1 sevan
1314 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1315 1.1 sevan idx * sizeof(struct rge_tx_desc),
1316 1.1 sevan sizeof(struct rge_tx_desc),
1317 1.1 sevan BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1318 1.1 sevan
1319 1.1 sevan txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
1320 1.1 sevan
1321 1.1 sevan if (txstat & RGE_TDCMDSTS_OWN) {
1322 1.1 sevan free = 2;
1323 1.1 sevan break;
1324 1.1 sevan }
1325 1.1 sevan
1326 1.5 skrll bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1327 1.1 sevan txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1328 1.1 sevan bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1329 1.1 sevan m_freem(txq->txq_mbuf);
1330 1.1 sevan txq->txq_mbuf = NULL;
1331 1.1 sevan
1332 1.1 sevan if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1333 1.4 skrll if_statinc(ifp, if_collisions);
1334 1.1 sevan if (txstat & RGE_TDCMDSTS_TXERR)
1335 1.4 skrll if_statinc(ifp, if_oerrors);
1336 1.1 sevan
1337 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1338 1.1 sevan idx * sizeof(struct rge_tx_desc),
1339 1.1 sevan sizeof(struct rge_tx_desc),
1340 1.1 sevan BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1341 1.1 sevan
1342 1.1 sevan cons = RGE_NEXT_TX_DESC(idx);
1343 1.1 sevan free = 1;
1344 1.1 sevan }
1345 1.1 sevan
1346 1.1 sevan if (free == 0)
1347 1.1 sevan return (0);
1348 1.1 sevan
1349 1.1 sevan sc->rge_ldata.rge_txq_considx = cons;
1350 1.1 sevan
1351 1.1 sevan if (ifq_is_oactive(&ifp->if_snd))
1352 1.1 sevan ifq_restart(&ifp->if_snd);
1353 1.1 sevan else if (free == 2)
1354 1.1 sevan ifq_serialize(&ifp->if_snd, &sc->sc_task);
1355 1.1 sevan else
1356 1.1 sevan ifp->if_timer = 0;
1357 1.1 sevan
1358 1.1 sevan return (1);
1359 1.1 sevan }
1360 1.1 sevan
1361 1.1 sevan void
1362 1.1 sevan rge_reset(struct rge_softc *sc)
1363 1.1 sevan {
1364 1.1 sevan int i;
1365 1.1 sevan
1366 1.1 sevan /* Enable RXDV gate. */
1367 1.1 sevan RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1368 1.1 sevan DELAY(2000);
1369 1.1 sevan
1370 1.1 sevan for (i = 0; i < 10; i++) {
1371 1.1 sevan DELAY(100);
1372 1.1 sevan if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1373 1.1 sevan RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1374 1.1 sevan RGE_MCUCMD_TXFIFO_EMPTY))
1375 1.1 sevan break;
1376 1.1 sevan }
1377 1.1 sevan
1378 1.1 sevan /* Soft reset. */
1379 1.1 sevan RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1380 1.1 sevan
1381 1.1 sevan for (i = 0; i < RGE_TIMEOUT; i++) {
1382 1.1 sevan DELAY(100);
1383 1.1 sevan if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1384 1.1 sevan break;
1385 1.1 sevan }
1386 1.1 sevan if (i == RGE_TIMEOUT)
1387 1.16 jakllsch device_printf(sc->sc_dev, "reset never completed!\n");
1388 1.1 sevan }
1389 1.1 sevan
1390 1.1 sevan void
1391 1.1 sevan rge_iff(struct rge_softc *sc)
1392 1.1 sevan {
1393 1.2 sevan struct ifnet *ifp = &sc->sc_ec.ec_if;
1394 1.2 sevan struct ethercom *ac = &sc->sc_ec;
1395 1.1 sevan struct ether_multi *enm;
1396 1.1 sevan struct ether_multistep step;
1397 1.1 sevan uint32_t hashes[2];
1398 1.1 sevan uint32_t rxfilt;
1399 1.1 sevan int h = 0;
1400 1.1 sevan
1401 1.1 sevan rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1402 1.1 sevan rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1403 1.1 sevan ifp->if_flags &= ~IFF_ALLMULTI;
1404 1.1 sevan
1405 1.1 sevan /*
1406 1.1 sevan * Always accept frames destined to our station address.
1407 1.1 sevan * Always accept broadcast frames.
1408 1.1 sevan */
1409 1.1 sevan rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1410 1.1 sevan
1411 1.1 sevan if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1412 1.1 sevan ifp->if_flags |= IFF_ALLMULTI;
1413 1.1 sevan rxfilt |= RGE_RXCFG_MULTI;
1414 1.1 sevan if (ifp->if_flags & IFF_PROMISC)
1415 1.1 sevan rxfilt |= RGE_RXCFG_ALLPHYS;
1416 1.1 sevan hashes[0] = hashes[1] = 0xffffffff;
1417 1.1 sevan } else {
1418 1.1 sevan rxfilt |= RGE_RXCFG_MULTI;
1419 1.1 sevan /* Program new filter. */
1420 1.1 sevan memset(hashes, 0, sizeof(hashes));
1421 1.1 sevan
1422 1.1 sevan ETHER_FIRST_MULTI(step, ac, enm);
1423 1.1 sevan while (enm != NULL) {
1424 1.1 sevan h = ether_crc32_be(enm->enm_addrlo,
1425 1.1 sevan ETHER_ADDR_LEN) >> 26;
1426 1.1 sevan
1427 1.1 sevan if (h < 32)
1428 1.1 sevan hashes[0] |= (1 << h);
1429 1.1 sevan else
1430 1.1 sevan hashes[1] |= (1 << (h - 32));
1431 1.1 sevan
1432 1.1 sevan ETHER_NEXT_MULTI(step, enm);
1433 1.1 sevan }
1434 1.1 sevan }
1435 1.1 sevan
1436 1.1 sevan RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1437 1.2 sevan RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
1438 1.2 sevan RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
1439 1.1 sevan }
1440 1.1 sevan
1441 1.1 sevan void
1442 1.1 sevan rge_set_phy_power(struct rge_softc *sc, int on)
1443 1.1 sevan {
1444 1.1 sevan int i;
1445 1.1 sevan
1446 1.1 sevan if (on) {
1447 1.1 sevan RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1448 1.1 sevan
1449 1.1 sevan rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1450 1.1 sevan
1451 1.1 sevan for (i = 0; i < RGE_TIMEOUT; i++) {
1452 1.10 sevan if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
1453 1.1 sevan break;
1454 1.1 sevan DELAY(1000);
1455 1.1 sevan }
1456 1.1 sevan } else
1457 1.1 sevan rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1458 1.1 sevan }
1459 1.1 sevan
1460 1.1 sevan void
1461 1.1 sevan rge_phy_config(struct rge_softc *sc)
1462 1.1 sevan {
1463 1.1 sevan uint16_t mcode_ver, val;
1464 1.1 sevan int i;
1465 1.1 sevan static const uint16_t mac_cfg3_a438_value[] =
1466 1.1 sevan { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1467 1.1 sevan 0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1468 1.1 sevan
1469 1.1 sevan static const uint16_t mac_cfg3_b88e_value[] =
1470 1.5 skrll { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1471 1.1 sevan 0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1472 1.1 sevan 0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1473 1.1 sevan 0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1474 1.1 sevan 0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1475 1.1 sevan 0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1476 1.1 sevan
1477 1.1 sevan /* Read microcode version. */
1478 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x801e);
1479 1.1 sevan mcode_ver = rge_read_phy_ocp(sc, 0xa438);
1480 1.1 sevan
1481 1.1 sevan if (sc->rge_type == MAC_CFG2) {
1482 1.1 sevan for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++) {
1483 1.1 sevan rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
1484 1.1 sevan rtl8125_mac_cfg2_ephy[i].val);
1485 1.1 sevan }
1486 1.1 sevan
1487 1.1 sevan if (mcode_ver != RGE_MAC_CFG2_MCODE_VER) {
1488 1.1 sevan /* Disable PHY config. */
1489 1.1 sevan RGE_CLRBIT_1(sc, 0xf2, 0x20);
1490 1.1 sevan DELAY(1000);
1491 1.1 sevan
1492 1.1 sevan rge_patch_phy_mcu(sc, 1);
1493 1.1 sevan
1494 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8024);
1495 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0x8600);
1496 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0xb82e);
1497 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0x0001);
1498 1.1 sevan
1499 1.1 sevan RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
1500 1.1 sevan for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
1501 1.1 sevan rge_write_phy_ocp(sc,
1502 1.1 sevan rtl8125_mac_cfg2_mcu[i].reg,
1503 1.1 sevan rtl8125_mac_cfg2_mcu[i].val);
1504 1.1 sevan }
1505 1.1 sevan RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
1506 1.1 sevan
1507 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0);
1508 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0);
1509 1.1 sevan RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
1510 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8024);
1511 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0);
1512 1.1 sevan
1513 1.1 sevan rge_patch_phy_mcu(sc, 0);
1514 1.1 sevan
1515 1.1 sevan /* Enable PHY config. */
1516 1.1 sevan RGE_SETBIT_1(sc, 0xf2, 0x20);
1517 1.1 sevan
1518 1.1 sevan /* Write microcode version. */
1519 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x801e);
1520 1.1 sevan rge_write_phy_ocp(sc, 0xa438, RGE_MAC_CFG2_MCODE_VER);
1521 1.1 sevan }
1522 1.5 skrll
1523 1.1 sevan val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
1524 1.1 sevan rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
1525 1.1 sevan RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1526 1.1 sevan val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1527 1.1 sevan rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
1528 1.1 sevan val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
1529 1.1 sevan rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1530 1.1 sevan RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
1531 1.1 sevan val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
1532 1.1 sevan rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
1533 1.1 sevan RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
1534 1.1 sevan RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
1535 1.1 sevan RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
1536 1.1 sevan
1537 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1538 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1539 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
1540 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80eb);
1541 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
1542 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
1543 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80f8);
1544 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1545 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
1546 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80f1);
1547 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1548 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
1549 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80fe);
1550 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1551 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
1552 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8102);
1553 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1554 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1555 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8105);
1556 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1557 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
1558 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8100);
1559 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1560 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
1561 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8104);
1562 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1563 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
1564 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8106);
1565 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1566 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
1567 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1568 1.1 sevan val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1569 1.1 sevan rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
1570 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80df);
1571 1.1 sevan RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1572 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80e1);
1573 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
1574 1.1 sevan val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
1575 1.1 sevan rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
1576 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x819f);
1577 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
1578 1.1 sevan rge_write_phy_ocp(sc, 0xbc34, 0x5555);
1579 1.1 sevan val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
1580 1.1 sevan rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
1581 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
1582 1.1 sevan RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1583 1.1 sevan } else {
1584 1.1 sevan for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1585 1.1 sevan rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1586 1.1 sevan rtl8125_mac_cfg3_ephy[i].val);
1587 1.1 sevan
1588 1.1 sevan if (mcode_ver != RGE_MAC_CFG3_MCODE_VER) {
1589 1.1 sevan /* Disable PHY config. */
1590 1.1 sevan RGE_CLRBIT_1(sc, 0xf2, 0x20);
1591 1.1 sevan DELAY(1000);
1592 1.1 sevan
1593 1.1 sevan rge_patch_phy_mcu(sc, 1);
1594 1.1 sevan
1595 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8024);
1596 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0x8601);
1597 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0xb82e);
1598 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0x0001);
1599 1.1 sevan
1600 1.1 sevan RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
1601 1.1 sevan for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
1602 1.1 sevan rge_write_phy_ocp(sc,
1603 1.1 sevan rtl8125_mac_cfg3_mcu[i].reg,
1604 1.1 sevan rtl8125_mac_cfg3_mcu[i].val);
1605 1.1 sevan }
1606 1.1 sevan RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
1607 1.1 sevan
1608 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0);
1609 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0);
1610 1.1 sevan RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
1611 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8024);
1612 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0);
1613 1.1 sevan
1614 1.1 sevan rge_patch_phy_mcu(sc, 0);
1615 1.1 sevan
1616 1.1 sevan /* Enable PHY config. */
1617 1.1 sevan RGE_SETBIT_1(sc, 0xf2, 0x20);
1618 1.1 sevan
1619 1.1 sevan /* Write microcode version. */
1620 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x801e);
1621 1.1 sevan rge_write_phy_ocp(sc, 0xa438, RGE_MAC_CFG3_MCODE_VER);
1622 1.1 sevan }
1623 1.1 sevan
1624 1.1 sevan RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1625 1.1 sevan val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1626 1.1 sevan rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1627 1.1 sevan val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1628 1.1 sevan rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1629 1.1 sevan RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1630 1.1 sevan RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1631 1.1 sevan val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1632 1.1 sevan rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1633 1.1 sevan val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1634 1.1 sevan rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1635 1.1 sevan val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1636 1.1 sevan rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1637 1.1 sevan RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1638 1.1 sevan RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1639 1.1 sevan val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1640 1.1 sevan rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1641 1.1 sevan rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1642 1.1 sevan rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1643 1.1 sevan val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1644 1.1 sevan rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1645 1.1 sevan rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1646 1.1 sevan rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1647 1.1 sevan rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1648 1.1 sevan rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1649 1.1 sevan
1650 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1651 1.1 sevan for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1652 1.1 sevan rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1653 1.1 sevan for (i = 0; i < 26; i++)
1654 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0);
1655 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8257);
1656 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0x020f);
1657 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1658 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0x7843);
1659 1.1 sevan
1660 1.1 sevan rge_patch_phy_mcu(sc, 1);
1661 1.1 sevan RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1662 1.1 sevan RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1663 1.1 sevan for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1664 1.1 sevan rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1665 1.1 sevan rge_write_phy_ocp(sc, 0xb890,
1666 1.1 sevan mac_cfg3_b88e_value[i + 1]);
1667 1.1 sevan }
1668 1.1 sevan RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1669 1.1 sevan rge_patch_phy_mcu(sc, 0);
1670 1.1 sevan
1671 1.1 sevan RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1672 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1673 1.1 sevan RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1674 1.1 sevan val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1675 1.1 sevan rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1676 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1677 1.1 sevan RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1678 1.1 sevan RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1679 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1680 1.1 sevan RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1681 1.1 sevan }
1682 1.1 sevan
1683 1.1 sevan /* Disable EEE. */
1684 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1685 1.1 sevan RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1686 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1687 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1688 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1689 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1690 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1691 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1692 1.1 sevan
1693 1.1 sevan rge_patch_phy_mcu(sc, 1);
1694 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1695 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1696 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1697 1.1 sevan rge_patch_phy_mcu(sc, 0);
1698 1.1 sevan }
1699 1.1 sevan
1700 1.1 sevan void
1701 1.1 sevan rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
1702 1.1 sevan {
1703 1.1 sevan RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1704 1.1 sevan RGE_WRITE_4(sc, RGE_MAC0,
1705 1.1 sevan addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1706 1.1 sevan RGE_WRITE_4(sc, RGE_MAC4,
1707 1.1 sevan addr[5] << 8 | addr[4]);
1708 1.1 sevan RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1709 1.1 sevan }
1710 1.1 sevan
1711 1.1 sevan void
1712 1.1 sevan rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
1713 1.1 sevan {
1714 1.1 sevan *(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
1715 1.1 sevan *(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
1716 1.1 sevan }
1717 1.1 sevan
1718 1.1 sevan void
1719 1.1 sevan rge_hw_init(struct rge_softc *sc)
1720 1.1 sevan {
1721 1.1 sevan int i;
1722 1.1 sevan
1723 1.1 sevan RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1724 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
1725 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
1726 1.1 sevan RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1727 1.1 sevan RGE_CLRBIT_1(sc, 0xf1, 0x80);
1728 1.1 sevan
1729 1.1 sevan /* Disable UPS. */
1730 1.1 sevan RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
1731 1.1 sevan
1732 1.1 sevan /* Configure MAC MCU. */
1733 1.1 sevan rge_write_mac_ocp(sc, 0xfc38, 0);
1734 1.1 sevan
1735 1.1 sevan for (i = 0xfc28; i < 0xfc38; i += 2)
1736 1.1 sevan rge_write_mac_ocp(sc, i, 0);
1737 1.1 sevan
1738 1.1 sevan DELAY(3000);
1739 1.1 sevan rge_write_mac_ocp(sc, 0xfc26, 0);
1740 1.1 sevan
1741 1.1 sevan if (sc->rge_type == MAC_CFG3) {
1742 1.1 sevan for (i = 0; i < nitems(rtl8125_def_bps); i++)
1743 1.1 sevan rge_write_mac_ocp(sc, rtl8125_def_bps[i].reg,
1744 1.1 sevan rtl8125_def_bps[i].val);
1745 1.1 sevan }
1746 1.1 sevan
1747 1.1 sevan /* Disable PHY power saving. */
1748 1.1 sevan rge_disable_phy_ocp_pwrsave(sc);
1749 1.1 sevan
1750 1.1 sevan /* Set PCIe uncorrectable error status. */
1751 1.1 sevan rge_write_csi(sc, 0x108,
1752 1.1 sevan rge_read_csi(sc, 0x108) | 0x00100000);
1753 1.1 sevan }
1754 1.1 sevan
1755 1.1 sevan void
1756 1.1 sevan rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
1757 1.1 sevan {
1758 1.1 sevan if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
1759 1.1 sevan rge_patch_phy_mcu(sc, 1);
1760 1.1 sevan rge_write_phy_ocp(sc, 0xc416, 0);
1761 1.1 sevan rge_write_phy_ocp(sc, 0xc416, 0x0500);
1762 1.1 sevan rge_patch_phy_mcu(sc, 0);
1763 1.1 sevan }
1764 1.1 sevan }
1765 1.1 sevan
1766 1.1 sevan void
1767 1.1 sevan rge_patch_phy_mcu(struct rge_softc *sc, int set)
1768 1.1 sevan {
1769 1.1 sevan uint16_t val;
1770 1.1 sevan int i;
1771 1.1 sevan
1772 1.1 sevan if (set)
1773 1.1 sevan RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
1774 1.1 sevan else
1775 1.1 sevan RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
1776 1.1 sevan
1777 1.1 sevan for (i = 0; i < 1000; i++) {
1778 1.1 sevan val = rge_read_phy_ocp(sc, 0xb800) & 0x0040;
1779 1.1 sevan DELAY(100);
1780 1.1 sevan if (val == 0x0040)
1781 1.1 sevan break;
1782 1.1 sevan }
1783 1.1 sevan if (i == 1000)
1784 1.13 sevan aprint_error_dev(sc->sc_dev, "timeout waiting to patch phy mcu\n");
1785 1.1 sevan }
1786 1.1 sevan
1787 1.1 sevan void
1788 1.1 sevan rge_add_media_types(struct rge_softc *sc)
1789 1.1 sevan {
1790 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
1791 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
1792 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
1793 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
1794 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
1795 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1796 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
1797 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
1798 1.1 sevan }
1799 1.1 sevan
1800 1.1 sevan void
1801 1.1 sevan rge_config_imtype(struct rge_softc *sc, int imtype)
1802 1.1 sevan {
1803 1.1 sevan switch (imtype) {
1804 1.1 sevan case RGE_IMTYPE_NONE:
1805 1.1 sevan sc->rge_intrs = RGE_INTRS;
1806 1.1 sevan sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
1807 1.1 sevan RGE_ISR_RX_FIFO_OFLOW;
1808 1.1 sevan sc->rge_tx_ack = RGE_ISR_TX_OK;
1809 1.1 sevan break;
1810 1.1 sevan case RGE_IMTYPE_SIM:
1811 1.1 sevan sc->rge_intrs = RGE_INTRS_TIMER;
1812 1.1 sevan sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
1813 1.1 sevan sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
1814 1.1 sevan break;
1815 1.1 sevan default:
1816 1.14 sevan panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
1817 1.1 sevan }
1818 1.1 sevan }
1819 1.1 sevan
1820 1.1 sevan void
1821 1.1 sevan rge_disable_sim_im(struct rge_softc *sc)
1822 1.1 sevan {
1823 1.1 sevan RGE_WRITE_4(sc, RGE_TIMERINT, 0);
1824 1.1 sevan sc->rge_timerintr = 0;
1825 1.1 sevan }
1826 1.1 sevan
1827 1.1 sevan void
1828 1.1 sevan rge_setup_sim_im(struct rge_softc *sc)
1829 1.1 sevan {
1830 1.1 sevan RGE_WRITE_4(sc, RGE_TIMERINT, 0x2600);
1831 1.1 sevan RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
1832 1.1 sevan sc->rge_timerintr = 1;
1833 1.1 sevan }
1834 1.1 sevan
1835 1.1 sevan void
1836 1.1 sevan rge_setup_intr(struct rge_softc *sc, int imtype)
1837 1.1 sevan {
1838 1.1 sevan rge_config_imtype(sc, imtype);
1839 1.1 sevan
1840 1.1 sevan /* Enable interrupts. */
1841 1.1 sevan RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
1842 1.1 sevan
1843 1.1 sevan switch (imtype) {
1844 1.1 sevan case RGE_IMTYPE_NONE:
1845 1.1 sevan rge_disable_sim_im(sc);
1846 1.1 sevan break;
1847 1.1 sevan case RGE_IMTYPE_SIM:
1848 1.1 sevan rge_setup_sim_im(sc);
1849 1.1 sevan break;
1850 1.1 sevan default:
1851 1.14 sevan panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
1852 1.1 sevan }
1853 1.1 sevan }
1854 1.1 sevan
1855 1.1 sevan void
1856 1.1 sevan rge_exit_oob(struct rge_softc *sc)
1857 1.1 sevan {
1858 1.1 sevan int i;
1859 1.1 sevan
1860 1.1 sevan RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
1861 1.1 sevan RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
1862 1.1 sevan RGE_RXCFG_ERRPKT);
1863 1.1 sevan
1864 1.1 sevan /* Disable RealWoW. */
1865 1.1 sevan rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
1866 1.1 sevan
1867 1.1 sevan rge_reset(sc);
1868 1.1 sevan
1869 1.1 sevan /* Disable OOB. */
1870 1.1 sevan RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
1871 1.1 sevan
1872 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
1873 1.1 sevan
1874 1.1 sevan for (i = 0; i < 10; i++) {
1875 1.1 sevan DELAY(100);
1876 1.1 sevan if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
1877 1.1 sevan break;
1878 1.1 sevan }
1879 1.1 sevan
1880 1.1 sevan rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
1881 1.1 sevan rge_write_mac_ocp(sc, 0xc0a6, 0x0150);
1882 1.1 sevan rge_write_mac_ocp(sc, 0xc01e, 0x5555);
1883 1.1 sevan
1884 1.1 sevan for (i = 0; i < 10; i++) {
1885 1.1 sevan DELAY(100);
1886 1.1 sevan if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
1887 1.1 sevan break;
1888 1.1 sevan }
1889 1.1 sevan
1890 1.1 sevan if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
1891 1.1 sevan for (i = 0; i < RGE_TIMEOUT; i++) {
1892 1.10 sevan if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
1893 1.1 sevan break;
1894 1.1 sevan DELAY(1000);
1895 1.1 sevan }
1896 1.1 sevan RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
1897 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
1898 1.1 sevan }
1899 1.1 sevan }
1900 1.1 sevan
1901 1.1 sevan void
1902 1.1 sevan rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
1903 1.1 sevan {
1904 1.1 sevan int i;
1905 1.1 sevan
1906 1.1 sevan RGE_WRITE_4(sc, RGE_CSIDR, val);
1907 1.1 sevan RGE_WRITE_4(sc, RGE_CSIAR, (1 << 16) | (reg & RGE_CSIAR_ADDR_MASK) |
1908 1.1 sevan (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
1909 1.1 sevan
1910 1.1 sevan for (i = 0; i < 10; i++) {
1911 1.1 sevan DELAY(100);
1912 1.1 sevan if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
1913 1.1 sevan break;
1914 1.1 sevan }
1915 1.1 sevan
1916 1.1 sevan DELAY(20);
1917 1.1 sevan }
1918 1.1 sevan
1919 1.1 sevan uint32_t
1920 1.1 sevan rge_read_csi(struct rge_softc *sc, uint32_t reg)
1921 1.1 sevan {
1922 1.1 sevan int i;
1923 1.1 sevan
1924 1.1 sevan RGE_WRITE_4(sc, RGE_CSIAR, (1 << 16) | (reg & RGE_CSIAR_ADDR_MASK) |
1925 1.1 sevan (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
1926 1.1 sevan
1927 1.1 sevan for (i = 0; i < 10; i++) {
1928 1.1 sevan DELAY(100);
1929 1.1 sevan if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
1930 1.1 sevan break;
1931 1.1 sevan }
1932 1.1 sevan
1933 1.1 sevan DELAY(20);
1934 1.1 sevan
1935 1.1 sevan return (RGE_READ_4(sc, RGE_CSIDR));
1936 1.1 sevan }
1937 1.1 sevan
1938 1.1 sevan void
1939 1.1 sevan rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
1940 1.1 sevan {
1941 1.1 sevan uint32_t tmp;
1942 1.1 sevan
1943 1.1 sevan tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
1944 1.1 sevan tmp += val;
1945 1.1 sevan tmp |= RGE_MACOCP_BUSY;
1946 1.1 sevan RGE_WRITE_4(sc, RGE_MACOCP, tmp);
1947 1.1 sevan }
1948 1.1 sevan
1949 1.1 sevan uint16_t
1950 1.1 sevan rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
1951 1.1 sevan {
1952 1.1 sevan uint32_t val;
1953 1.1 sevan
1954 1.1 sevan val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
1955 1.1 sevan RGE_WRITE_4(sc, RGE_MACOCP, val);
1956 1.1 sevan
1957 1.1 sevan return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
1958 1.1 sevan }
1959 1.1 sevan
1960 1.1 sevan void
1961 1.1 sevan rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
1962 1.1 sevan {
1963 1.1 sevan uint32_t tmp;
1964 1.1 sevan int i;
1965 1.1 sevan
1966 1.1 sevan tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
1967 1.1 sevan tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
1968 1.1 sevan RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
1969 1.1 sevan
1970 1.1 sevan for (i = 0; i < 10; i++) {
1971 1.1 sevan DELAY(100);
1972 1.1 sevan if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
1973 1.1 sevan break;
1974 1.1 sevan }
1975 1.1 sevan
1976 1.1 sevan DELAY(20);
1977 1.1 sevan }
1978 1.1 sevan
1979 1.1 sevan void
1980 1.1 sevan rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
1981 1.1 sevan {
1982 1.1 sevan uint16_t off, phyaddr;
1983 1.1 sevan
1984 1.1 sevan phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
1985 1.1 sevan phyaddr <<= 4;
1986 1.1 sevan
1987 1.1 sevan off = addr ? reg : 0x10 + (reg % 8);
1988 1.1 sevan
1989 1.1 sevan phyaddr += (off - 16) << 1;
1990 1.1 sevan
1991 1.1 sevan rge_write_phy_ocp(sc, phyaddr, val);
1992 1.1 sevan }
1993 1.1 sevan
1994 1.1 sevan void
1995 1.1 sevan rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
1996 1.1 sevan {
1997 1.1 sevan uint32_t tmp;
1998 1.1 sevan int i;
1999 1.1 sevan
2000 1.1 sevan tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2001 1.1 sevan tmp |= RGE_PHYOCP_BUSY | val;
2002 1.1 sevan RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
2003 1.1 sevan
2004 1.1 sevan for (i = 0; i < RGE_TIMEOUT; i++) {
2005 1.1 sevan DELAY(1);
2006 1.1 sevan if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
2007 1.1 sevan break;
2008 1.1 sevan }
2009 1.1 sevan }
2010 1.1 sevan
2011 1.1 sevan uint16_t
2012 1.1 sevan rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
2013 1.1 sevan {
2014 1.1 sevan uint32_t val;
2015 1.1 sevan int i;
2016 1.1 sevan
2017 1.1 sevan val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2018 1.1 sevan RGE_WRITE_4(sc, RGE_PHYOCP, val);
2019 1.1 sevan
2020 1.1 sevan for (i = 0; i < RGE_TIMEOUT; i++) {
2021 1.1 sevan DELAY(1);
2022 1.1 sevan val = RGE_READ_4(sc, RGE_PHYOCP);
2023 1.1 sevan if (val & RGE_PHYOCP_BUSY)
2024 1.1 sevan break;
2025 1.1 sevan }
2026 1.1 sevan
2027 1.1 sevan return (val & RGE_PHYOCP_DATA_MASK);
2028 1.1 sevan }
2029 1.1 sevan
2030 1.1 sevan int
2031 1.1 sevan rge_get_link_status(struct rge_softc *sc)
2032 1.1 sevan {
2033 1.1 sevan return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2034 1.1 sevan }
2035 1.1 sevan
2036 1.1 sevan void
2037 1.3 sevan rge_txstart(struct work *wk, void *arg)
2038 1.1 sevan {
2039 1.1 sevan struct rge_softc *sc = arg;
2040 1.1 sevan
2041 1.1 sevan RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2042 1.1 sevan }
2043 1.1 sevan
2044 1.1 sevan void
2045 1.1 sevan rge_tick(void *arg)
2046 1.1 sevan {
2047 1.1 sevan struct rge_softc *sc = arg;
2048 1.1 sevan int s;
2049 1.1 sevan
2050 1.1 sevan s = splnet();
2051 1.1 sevan rge_link_state(sc);
2052 1.1 sevan splx(s);
2053 1.1 sevan
2054 1.1 sevan timeout_add_sec(&sc->sc_timeout, 1);
2055 1.1 sevan }
2056 1.1 sevan
2057 1.1 sevan void
2058 1.1 sevan rge_link_state(struct rge_softc *sc)
2059 1.1 sevan {
2060 1.2 sevan struct ifnet *ifp = &sc->sc_ec.ec_if;
2061 1.1 sevan int link = LINK_STATE_DOWN;
2062 1.1 sevan
2063 1.1 sevan if (rge_get_link_status(sc))
2064 1.1 sevan link = LINK_STATE_UP;
2065 1.1 sevan
2066 1.1 sevan if (ifp->if_link_state != link) {
2067 1.1 sevan ifp->if_link_state = link;
2068 1.2 sevan if_link_state_change(ifp, LINK_STATE_DOWN);
2069 1.1 sevan }
2070 1.1 sevan }
2071