if_rge.c revision 1.18 1 1.18 knakahar /* $NetBSD: if_rge.c,v 1.18 2021/03/02 07:55:16 knakahara Exp $ */
2 1.17 jakllsch /* $OpenBSD: if_rge.c,v 1.9 2020/12/12 11:48:53 jan Exp $ */
3 1.1 sevan
4 1.1 sevan /*
5 1.17 jakllsch * Copyright (c) 2019, 2020 Kevin Lo <kevlo (at) openbsd.org>
6 1.1 sevan *
7 1.1 sevan * Permission to use, copy, modify, and distribute this software for any
8 1.1 sevan * purpose with or without fee is hereby granted, provided that the above
9 1.1 sevan * copyright notice and this permission notice appear in all copies.
10 1.1 sevan *
11 1.1 sevan * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 1.1 sevan * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 1.1 sevan * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 1.1 sevan * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 1.1 sevan * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 1.1 sevan * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 1.1 sevan * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 1.1 sevan */
19 1.1 sevan
20 1.2 sevan #include <sys/cdefs.h>
21 1.18 knakahar __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.18 2021/03/02 07:55:16 knakahara Exp $");
22 1.2 sevan
23 1.2 sevan #include <sys/types.h>
24 1.1 sevan
25 1.1 sevan #include <sys/param.h>
26 1.1 sevan #include <sys/systm.h>
27 1.1 sevan #include <sys/sockio.h>
28 1.1 sevan #include <sys/mbuf.h>
29 1.1 sevan #include <sys/malloc.h>
30 1.1 sevan #include <sys/kernel.h>
31 1.1 sevan #include <sys/socket.h>
32 1.1 sevan #include <sys/device.h>
33 1.1 sevan #include <sys/endian.h>
34 1.3 sevan #include <sys/callout.h>
35 1.3 sevan #include <sys/workqueue.h>
36 1.1 sevan
37 1.1 sevan #include <net/if.h>
38 1.2 sevan
39 1.2 sevan #include <net/if_dl.h>
40 1.2 sevan #include <net/if_ether.h>
41 1.2 sevan
42 1.1 sevan #include <net/if_media.h>
43 1.1 sevan
44 1.1 sevan #include <netinet/in.h>
45 1.2 sevan #include <net/if_ether.h>
46 1.1 sevan
47 1.1 sevan #include <net/bpf.h>
48 1.1 sevan
49 1.2 sevan #include <sys/bus.h>
50 1.1 sevan #include <machine/intr.h>
51 1.1 sevan
52 1.1 sevan #include <dev/mii/mii.h>
53 1.1 sevan
54 1.1 sevan #include <dev/pci/pcivar.h>
55 1.1 sevan #include <dev/pci/pcireg.h>
56 1.1 sevan #include <dev/pci/pcidevs.h>
57 1.1 sevan
58 1.1 sevan #include <dev/pci/if_rgereg.h>
59 1.1 sevan
60 1.2 sevan #ifdef __NetBSD__
61 1.2 sevan #define letoh32 htole32
62 1.2 sevan #define nitems(x) __arraycount(x)
63 1.7 sevan
64 1.7 sevan static struct mbuf *
65 1.17 jakllsch MCLGETL(struct rge_softc *sc __unused, int how,
66 1.17 jakllsch u_int size)
67 1.7 sevan {
68 1.7 sevan struct mbuf *m;
69 1.7 sevan
70 1.7 sevan MGETHDR(m, how, MT_DATA);
71 1.7 sevan if (m == NULL)
72 1.7 sevan return NULL;
73 1.7 sevan
74 1.7 sevan MEXTMALLOC(m, size, how);
75 1.7 sevan if ((m->m_flags & M_EXT) == 0) {
76 1.7 sevan m_freem(m);
77 1.7 sevan return NULL;
78 1.7 sevan }
79 1.7 sevan return m;
80 1.7 sevan }
81 1.7 sevan
82 1.3 sevan #ifdef NET_MPSAFE
83 1.3 sevan #define RGE_MPSAFE 1
84 1.3 sevan #define CALLOUT_FLAGS CALLOUT_MPSAFE
85 1.3 sevan #else
86 1.3 sevan #define CALLOUT_FLAGS 0
87 1.3 sevan #endif
88 1.2 sevan #endif
89 1.2 sevan
90 1.17 jakllsch #ifdef RGE_DEBUG
91 1.17 jakllsch #define DPRINTF(x) do { if (rge_debug > 0) printf x; } while (0)
92 1.17 jakllsch int rge_debug = 0;
93 1.17 jakllsch #else
94 1.17 jakllsch #define DPRINTF(x)
95 1.17 jakllsch #endif
96 1.17 jakllsch
97 1.2 sevan static int rge_match(device_t, cfdata_t, void *);
98 1.5 skrll static void rge_attach(device_t, device_t, void *);
99 1.1 sevan int rge_intr(void *);
100 1.1 sevan int rge_encap(struct rge_softc *, struct mbuf *, int);
101 1.2 sevan int rge_ioctl(struct ifnet *, u_long, void *);
102 1.2 sevan void rge_start(struct ifnet *);
103 1.1 sevan void rge_watchdog(struct ifnet *);
104 1.1 sevan int rge_init(struct ifnet *);
105 1.17 jakllsch void rge_stop(struct ifnet *, int);
106 1.1 sevan int rge_ifmedia_upd(struct ifnet *);
107 1.1 sevan void rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
108 1.1 sevan int rge_allocmem(struct rge_softc *);
109 1.1 sevan int rge_newbuf(struct rge_softc *, int);
110 1.1 sevan void rge_discard_rxbuf(struct rge_softc *, int);
111 1.1 sevan int rge_rx_list_init(struct rge_softc *);
112 1.1 sevan void rge_tx_list_init(struct rge_softc *);
113 1.1 sevan int rge_rxeof(struct rge_softc *);
114 1.1 sevan int rge_txeof(struct rge_softc *);
115 1.1 sevan void rge_reset(struct rge_softc *);
116 1.1 sevan void rge_iff(struct rge_softc *);
117 1.1 sevan void rge_set_phy_power(struct rge_softc *, int);
118 1.1 sevan void rge_phy_config(struct rge_softc *);
119 1.17 jakllsch void rge_phy_config_mac_cfg2(struct rge_softc *);
120 1.17 jakllsch void rge_phy_config_mac_cfg3(struct rge_softc *);
121 1.17 jakllsch void rge_phy_config_mac_cfg4(struct rge_softc *);
122 1.17 jakllsch void rge_phy_config_mac_cfg5(struct rge_softc *);
123 1.17 jakllsch void rge_phy_config_mcu(struct rge_softc *, uint16_t);
124 1.1 sevan void rge_set_macaddr(struct rge_softc *, const uint8_t *);
125 1.1 sevan void rge_get_macaddr(struct rge_softc *, uint8_t *);
126 1.1 sevan void rge_hw_init(struct rge_softc *);
127 1.1 sevan void rge_disable_phy_ocp_pwrsave(struct rge_softc *);
128 1.1 sevan void rge_patch_phy_mcu(struct rge_softc *, int);
129 1.1 sevan void rge_add_media_types(struct rge_softc *);
130 1.1 sevan void rge_config_imtype(struct rge_softc *, int);
131 1.17 jakllsch void rge_disable_hw_im(struct rge_softc *);
132 1.1 sevan void rge_disable_sim_im(struct rge_softc *);
133 1.1 sevan void rge_setup_sim_im(struct rge_softc *);
134 1.1 sevan void rge_setup_intr(struct rge_softc *, int);
135 1.1 sevan void rge_exit_oob(struct rge_softc *);
136 1.1 sevan void rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
137 1.1 sevan uint32_t rge_read_csi(struct rge_softc *, uint32_t);
138 1.1 sevan void rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
139 1.1 sevan uint16_t rge_read_mac_ocp(struct rge_softc *, uint16_t);
140 1.1 sevan void rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
141 1.17 jakllsch uint16_t rge_read_ephy(struct rge_softc *, uint16_t);
142 1.1 sevan void rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
143 1.17 jakllsch uint16_t rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
144 1.1 sevan void rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
145 1.1 sevan uint16_t rge_read_phy_ocp(struct rge_softc *, uint16_t);
146 1.1 sevan int rge_get_link_status(struct rge_softc *);
147 1.3 sevan void rge_txstart(struct work *, void *);
148 1.1 sevan void rge_tick(void *);
149 1.1 sevan void rge_link_state(struct rge_softc *);
150 1.1 sevan
151 1.1 sevan static const struct {
152 1.1 sevan uint16_t reg;
153 1.1 sevan uint16_t val;
154 1.17 jakllsch } rtl8125_mac_cfg2_mcu[] = {
155 1.1 sevan RTL8125_MAC_CFG2_MCU
156 1.1 sevan }, rtl8125_mac_cfg3_mcu[] = {
157 1.1 sevan RTL8125_MAC_CFG3_MCU
158 1.17 jakllsch }, rtl8125_mac_cfg4_mcu[] = {
159 1.17 jakllsch RTL8125_MAC_CFG4_MCU
160 1.17 jakllsch }, rtl8125_mac_cfg5_mcu[] = {
161 1.17 jakllsch RTL8125_MAC_CFG5_MCU
162 1.1 sevan };
163 1.1 sevan
164 1.2 sevan CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
165 1.2 sevan NULL, NULL); /* Sevan - detach function? */
166 1.1 sevan
167 1.2 sevan static const struct {
168 1.2 sevan pci_vendor_id_t vendor;
169 1.2 sevan pci_product_id_t product;
170 1.2 sevan }rge_devices[] = {
171 1.1 sevan { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
172 1.2 sevan { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125 },
173 1.1 sevan };
174 1.1 sevan
175 1.2 sevan static int
176 1.2 sevan rge_match(device_t parent, cfdata_t match, void *aux)
177 1.1 sevan {
178 1.2 sevan struct pci_attach_args *pa =aux;
179 1.2 sevan int n;
180 1.2 sevan
181 1.2 sevan for (n =0; n < __arraycount(rge_devices); n++) {
182 1.2 sevan if (PCI_VENDOR(pa->pa_id) == rge_devices[n].vendor &&
183 1.2 sevan PCI_PRODUCT(pa->pa_id) == rge_devices[n].product)
184 1.17 jakllsch return 3;
185 1.2 sevan }
186 1.2 sevan
187 1.2 sevan return 0;
188 1.1 sevan }
189 1.1 sevan
190 1.1 sevan void
191 1.2 sevan rge_attach(device_t parent, device_t self, void *aux)
192 1.1 sevan {
193 1.11 sevan struct rge_softc *sc = device_private(self);
194 1.1 sevan struct pci_attach_args *pa = aux;
195 1.1 sevan pci_chipset_tag_t pc = pa->pa_pc;
196 1.17 jakllsch pci_intr_handle_t *ihp;
197 1.2 sevan char intrbuf[PCI_INTRSTR_LEN];
198 1.1 sevan const char *intrstr = NULL;
199 1.1 sevan struct ifnet *ifp;
200 1.1 sevan pcireg_t reg;
201 1.1 sevan uint32_t hwrev;
202 1.1 sevan uint8_t eaddr[ETHER_ADDR_LEN];
203 1.1 sevan int offset;
204 1.17 jakllsch pcireg_t command;
205 1.1 sevan
206 1.1 sevan pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
207 1.1 sevan
208 1.12 sevan sc->sc_dev = self;
209 1.12 sevan
210 1.17 jakllsch pci_aprint_devinfo(pa, "Ethernet controller");
211 1.17 jakllsch
212 1.5 skrll /*
213 1.1 sevan * Map control/status registers.
214 1.1 sevan */
215 1.1 sevan if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
216 1.1 sevan PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
217 1.2 sevan NULL, &sc->rge_bsize)) {
218 1.1 sevan if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
219 1.1 sevan PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
220 1.2 sevan &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
221 1.1 sevan if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
222 1.1 sevan 0, &sc->rge_btag, &sc->rge_bhandle, NULL,
223 1.2 sevan &sc->rge_bsize)) {
224 1.13 sevan aprint_error(": can't map mem or i/o space\n");
225 1.1 sevan return;
226 1.1 sevan }
227 1.1 sevan }
228 1.1 sevan }
229 1.1 sevan
230 1.17 jakllsch int counts[PCI_INTR_TYPE_SIZE] = {
231 1.17 jakllsch [PCI_INTR_TYPE_INTX] = 1,
232 1.17 jakllsch [PCI_INTR_TYPE_MSI] = 1,
233 1.17 jakllsch [PCI_INTR_TYPE_MSIX] = 1,
234 1.17 jakllsch };
235 1.17 jakllsch int max_type = PCI_INTR_TYPE_MSIX;
236 1.5 skrll /*
237 1.1 sevan * Allocate interrupt.
238 1.1 sevan */
239 1.17 jakllsch if (pci_intr_alloc(pa, &ihp, counts, max_type) != 0) {
240 1.13 sevan aprint_error(": couldn't map interrupt\n");
241 1.1 sevan return;
242 1.1 sevan }
243 1.17 jakllsch switch (pci_intr_type(pc, ihp[0])) {
244 1.17 jakllsch case PCI_INTR_TYPE_MSIX:
245 1.17 jakllsch case PCI_INTR_TYPE_MSI:
246 1.17 jakllsch sc->rge_flags |= RGE_FLAG_MSI;
247 1.17 jakllsch break;
248 1.17 jakllsch default:
249 1.17 jakllsch break;
250 1.17 jakllsch }
251 1.17 jakllsch intrstr = pci_intr_string(pc, ihp[0], intrbuf, sizeof(intrbuf));
252 1.17 jakllsch sc->sc_ih = pci_intr_establish_xname(pc, ihp[0], IPL_NET, rge_intr,
253 1.14 sevan sc, device_xname(sc->sc_dev));
254 1.1 sevan if (sc->sc_ih == NULL) {
255 1.13 sevan aprint_error_dev(sc->sc_dev, ": couldn't establish interrupt");
256 1.1 sevan if (intrstr != NULL)
257 1.13 sevan aprint_error(" at %s\n", intrstr);
258 1.13 sevan aprint_error("\n");
259 1.1 sevan return;
260 1.1 sevan }
261 1.13 sevan aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
262 1.1 sevan
263 1.9 thorpej if (pci_dma64_available(pa))
264 1.9 thorpej sc->sc_dmat = pa->pa_dmat64;
265 1.9 thorpej else
266 1.9 thorpej sc->sc_dmat = pa->pa_dmat;
267 1.9 thorpej
268 1.1 sevan sc->sc_pc = pa->pa_pc;
269 1.1 sevan sc->sc_tag = pa->pa_tag;
270 1.1 sevan
271 1.1 sevan /* Determine hardware revision */
272 1.1 sevan hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
273 1.1 sevan switch (hwrev) {
274 1.1 sevan case 0x60800000:
275 1.1 sevan sc->rge_type = MAC_CFG2;
276 1.1 sevan break;
277 1.1 sevan case 0x60900000:
278 1.1 sevan sc->rge_type = MAC_CFG3;
279 1.1 sevan break;
280 1.17 jakllsch case 0x64000000:
281 1.17 jakllsch sc->rge_type = MAC_CFG4;
282 1.17 jakllsch break;
283 1.17 jakllsch case 0x64100000:
284 1.17 jakllsch sc->rge_type = MAC_CFG5;
285 1.17 jakllsch break;
286 1.1 sevan default:
287 1.13 sevan aprint_error(": unknown version 0x%08x\n", hwrev);
288 1.1 sevan return;
289 1.1 sevan }
290 1.1 sevan
291 1.1 sevan rge_config_imtype(sc, RGE_IMTYPE_SIM);
292 1.1 sevan
293 1.5 skrll /*
294 1.1 sevan * PCI Express check.
295 1.1 sevan */
296 1.1 sevan if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
297 1.1 sevan &offset, NULL)) {
298 1.17 jakllsch /* Disable PCIe ASPM and ECPM. */
299 1.1 sevan reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
300 1.2 sevan offset + PCIE_LCSR);
301 1.17 jakllsch reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 |
302 1.17 jakllsch PCIE_LCSR_ENCLKPM);
303 1.2 sevan pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
304 1.1 sevan reg);
305 1.1 sevan }
306 1.1 sevan
307 1.1 sevan rge_exit_oob(sc);
308 1.1 sevan rge_hw_init(sc);
309 1.1 sevan
310 1.1 sevan rge_get_macaddr(sc, eaddr);
311 1.13 sevan aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
312 1.13 sevan ether_sprintf(eaddr));
313 1.1 sevan
314 1.2 sevan memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
315 1.1 sevan
316 1.1 sevan rge_set_phy_power(sc, 1);
317 1.1 sevan rge_phy_config(sc);
318 1.1 sevan
319 1.1 sevan if (rge_allocmem(sc))
320 1.1 sevan return;
321 1.1 sevan
322 1.2 sevan ifp = &sc->sc_ec.ec_if;
323 1.1 sevan ifp->if_softc = sc;
324 1.14 sevan strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
325 1.1 sevan ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
326 1.2 sevan #ifdef RGE_MPSAFE
327 1.18 knakahar ifp->if_extflags = IFEF_MPSAFE;
328 1.2 sevan #endif
329 1.1 sevan ifp->if_ioctl = rge_ioctl;
330 1.17 jakllsch ifp->if_stop = rge_stop;
331 1.2 sevan ifp->if_start = rge_start;
332 1.17 jakllsch ifp->if_init = rge_init;
333 1.1 sevan ifp->if_watchdog = rge_watchdog;
334 1.17 jakllsch IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
335 1.1 sevan
336 1.17 jakllsch #if notyet
337 1.17 jakllsch ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx |
338 1.2 sevan IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
339 1.2 sevan IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
340 1.17 jakllsch #endif
341 1.1 sevan
342 1.17 jakllsch sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
343 1.17 jakllsch sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
344 1.1 sevan
345 1.3 sevan callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
346 1.3 sevan callout_setfunc(&sc->sc_timeout, rge_tick, sc);
347 1.17 jakllsch
348 1.17 jakllsch command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
349 1.17 jakllsch command |= PCI_COMMAND_MASTER_ENABLE;
350 1.17 jakllsch pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
351 1.1 sevan
352 1.1 sevan /* Initialize ifmedia structures. */
353 1.17 jakllsch sc->sc_ec.ec_ifmedia = &sc->sc_media;
354 1.1 sevan ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
355 1.1 sevan rge_ifmedia_sts);
356 1.1 sevan rge_add_media_types(sc);
357 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
358 1.1 sevan ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
359 1.1 sevan sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
360 1.1 sevan
361 1.1 sevan if_attach(ifp);
362 1.2 sevan ether_ifattach(ifp, eaddr);
363 1.1 sevan }
364 1.1 sevan
365 1.1 sevan int
366 1.1 sevan rge_intr(void *arg)
367 1.1 sevan {
368 1.1 sevan struct rge_softc *sc = arg;
369 1.2 sevan struct ifnet *ifp = &sc->sc_ec.ec_if;
370 1.1 sevan uint32_t status;
371 1.1 sevan int claimed = 0, rx, tx;
372 1.1 sevan
373 1.1 sevan if (!(ifp->if_flags & IFF_RUNNING))
374 1.1 sevan return (0);
375 1.1 sevan
376 1.1 sevan /* Disable interrupts. */
377 1.1 sevan RGE_WRITE_4(sc, RGE_IMR, 0);
378 1.1 sevan
379 1.1 sevan if (!(sc->rge_flags & RGE_FLAG_MSI)) {
380 1.17 jakllsch if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
381 1.1 sevan return (0);
382 1.1 sevan }
383 1.17 jakllsch
384 1.17 jakllsch status = RGE_READ_4(sc, RGE_ISR);
385 1.1 sevan if (status)
386 1.1 sevan RGE_WRITE_4(sc, RGE_ISR, status);
387 1.1 sevan
388 1.1 sevan if (status & RGE_ISR_PCS_TIMEOUT)
389 1.1 sevan claimed = 1;
390 1.1 sevan
391 1.1 sevan rx = tx = 0;
392 1.17 jakllsch if (status & sc->rge_intrs) {
393 1.1 sevan if (status &
394 1.1 sevan (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
395 1.1 sevan rx |= rge_rxeof(sc);
396 1.1 sevan claimed = 1;
397 1.1 sevan }
398 1.1 sevan
399 1.1 sevan if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
400 1.1 sevan tx |= rge_txeof(sc);
401 1.1 sevan claimed = 1;
402 1.1 sevan }
403 1.1 sevan
404 1.1 sevan if (status & RGE_ISR_SYSTEM_ERR) {
405 1.2 sevan KERNEL_LOCK(1, NULL);
406 1.1 sevan rge_init(ifp);
407 1.2 sevan KERNEL_UNLOCK_ONE(NULL);
408 1.1 sevan claimed = 1;
409 1.1 sevan }
410 1.1 sevan }
411 1.1 sevan
412 1.1 sevan if (sc->rge_timerintr) {
413 1.1 sevan if ((tx | rx) == 0) {
414 1.1 sevan /*
415 1.1 sevan * Nothing needs to be processed, fallback
416 1.1 sevan * to use TX/RX interrupts.
417 1.1 sevan */
418 1.1 sevan rge_setup_intr(sc, RGE_IMTYPE_NONE);
419 1.1 sevan
420 1.1 sevan /*
421 1.1 sevan * Recollect, mainly to avoid the possible
422 1.1 sevan * race introduced by changing interrupt
423 1.1 sevan * masks.
424 1.1 sevan */
425 1.1 sevan rge_rxeof(sc);
426 1.1 sevan rge_txeof(sc);
427 1.1 sevan } else
428 1.1 sevan RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
429 1.1 sevan } else if (tx | rx) {
430 1.1 sevan /*
431 1.1 sevan * Assume that using simulated interrupt moderation
432 1.1 sevan * (hardware timer based) could reduce the interrupt
433 1.1 sevan * rate.
434 1.1 sevan */
435 1.1 sevan rge_setup_intr(sc, RGE_IMTYPE_SIM);
436 1.1 sevan }
437 1.1 sevan
438 1.1 sevan RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
439 1.1 sevan
440 1.1 sevan return (claimed);
441 1.1 sevan }
442 1.1 sevan
443 1.1 sevan int
444 1.1 sevan rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
445 1.1 sevan {
446 1.1 sevan struct rge_tx_desc *d = NULL;
447 1.1 sevan struct rge_txq *txq;
448 1.1 sevan bus_dmamap_t txmap;
449 1.1 sevan uint32_t cmdsts, cflags = 0;
450 1.1 sevan int cur, error, i, last, nsegs;
451 1.1 sevan
452 1.17 jakllsch #if notyet
453 1.1 sevan /*
454 1.1 sevan * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
455 1.1 sevan * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
456 1.1 sevan * take affect.
457 1.1 sevan */
458 1.1 sevan if ((m->m_pkthdr.csum_flags &
459 1.2 sevan (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
460 1.1 sevan cflags |= RGE_TDEXTSTS_IPCSUM;
461 1.1 sevan if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
462 1.1 sevan cflags |= RGE_TDEXTSTS_TCPCSUM;
463 1.1 sevan if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
464 1.1 sevan cflags |= RGE_TDEXTSTS_UDPCSUM;
465 1.1 sevan }
466 1.17 jakllsch #endif
467 1.1 sevan
468 1.1 sevan txq = &sc->rge_ldata.rge_txq[idx];
469 1.1 sevan txmap = txq->txq_dmamap;
470 1.1 sevan
471 1.1 sevan error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
472 1.1 sevan switch (error) {
473 1.1 sevan case 0:
474 1.1 sevan break;
475 1.1 sevan case EFBIG: /* mbuf chain is too fragmented */
476 1.1 sevan if (m_defrag(m, M_DONTWAIT) == 0 &&
477 1.1 sevan bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
478 1.1 sevan BUS_DMA_NOWAIT) == 0)
479 1.1 sevan break;
480 1.1 sevan
481 1.1 sevan /* FALLTHROUGH */
482 1.1 sevan default:
483 1.1 sevan return (0);
484 1.1 sevan }
485 1.1 sevan
486 1.1 sevan bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
487 1.1 sevan BUS_DMASYNC_PREWRITE);
488 1.1 sevan
489 1.1 sevan nsegs = txmap->dm_nsegs;
490 1.1 sevan
491 1.1 sevan /* Set up hardware VLAN tagging. */
492 1.17 jakllsch if (vlan_has_tag(m))
493 1.17 jakllsch cflags |= bswap16(vlan_get_tag(m)) | RGE_TDEXTSTS_VTAG;
494 1.1 sevan
495 1.17 jakllsch last = cur = idx;
496 1.1 sevan cmdsts = RGE_TDCMDSTS_SOF;
497 1.1 sevan
498 1.1 sevan for (i = 0; i < txmap->dm_nsegs; i++) {
499 1.1 sevan d = &sc->rge_ldata.rge_tx_list[cur];
500 1.1 sevan
501 1.1 sevan d->rge_extsts = htole32(cflags);
502 1.1 sevan d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
503 1.1 sevan d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
504 1.1 sevan
505 1.1 sevan cmdsts |= txmap->dm_segs[i].ds_len;
506 1.1 sevan
507 1.1 sevan if (cur == RGE_TX_LIST_CNT - 1)
508 1.1 sevan cmdsts |= RGE_TDCMDSTS_EOR;
509 1.1 sevan
510 1.1 sevan d->rge_cmdsts = htole32(cmdsts);
511 1.1 sevan
512 1.1 sevan last = cur;
513 1.1 sevan cmdsts = RGE_TDCMDSTS_OWN;
514 1.1 sevan cur = RGE_NEXT_TX_DESC(cur);
515 1.1 sevan }
516 1.1 sevan
517 1.1 sevan /* Set EOF on the last descriptor. */
518 1.1 sevan d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
519 1.1 sevan
520 1.1 sevan /* Transfer ownership of packet to the chip. */
521 1.1 sevan d = &sc->rge_ldata.rge_tx_list[idx];
522 1.1 sevan
523 1.1 sevan d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
524 1.1 sevan
525 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
526 1.1 sevan cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
527 1.1 sevan BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
528 1.1 sevan
529 1.1 sevan /* Update info of TX queue and descriptors. */
530 1.1 sevan txq->txq_mbuf = m;
531 1.1 sevan txq->txq_descidx = last;
532 1.1 sevan
533 1.1 sevan return (nsegs);
534 1.1 sevan }
535 1.1 sevan
536 1.1 sevan int
537 1.2 sevan rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
538 1.1 sevan {
539 1.1 sevan struct rge_softc *sc = ifp->if_softc;
540 1.17 jakllsch //struct ifreq *ifr = (struct ifreq *)data;
541 1.1 sevan int s, error = 0;
542 1.1 sevan
543 1.1 sevan s = splnet();
544 1.1 sevan
545 1.1 sevan switch (cmd) {
546 1.1 sevan case SIOCSIFFLAGS:
547 1.17 jakllsch if ((error = ifioctl_common(ifp, cmd, data)) != 0)
548 1.17 jakllsch break;
549 1.17 jakllsch /* XXX set an ifflags callback and let ether_ioctl
550 1.17 jakllsch * handle all of this.
551 1.17 jakllsch */
552 1.1 sevan if (ifp->if_flags & IFF_UP) {
553 1.1 sevan if (ifp->if_flags & IFF_RUNNING)
554 1.1 sevan error = ENETRESET;
555 1.1 sevan else
556 1.1 sevan rge_init(ifp);
557 1.1 sevan } else {
558 1.1 sevan if (ifp->if_flags & IFF_RUNNING)
559 1.17 jakllsch rge_stop(ifp, 1);
560 1.1 sevan }
561 1.1 sevan break;
562 1.1 sevan default:
563 1.2 sevan error = ether_ioctl(ifp, cmd, data);
564 1.1 sevan }
565 1.1 sevan
566 1.1 sevan if (error == ENETRESET) {
567 1.1 sevan if (ifp->if_flags & IFF_RUNNING)
568 1.1 sevan rge_iff(sc);
569 1.1 sevan error = 0;
570 1.1 sevan }
571 1.1 sevan
572 1.1 sevan splx(s);
573 1.1 sevan return (error);
574 1.1 sevan }
575 1.1 sevan
576 1.1 sevan void
577 1.3 sevan rge_start(struct ifnet *ifp)
578 1.1 sevan {
579 1.1 sevan struct rge_softc *sc = ifp->if_softc;
580 1.1 sevan struct mbuf *m;
581 1.1 sevan int free, idx, used;
582 1.1 sevan int queued = 0;
583 1.1 sevan
584 1.2 sevan #define LINK_STATE_IS_UP(_s) \
585 1.2 sevan ((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
586 1.2 sevan
587 1.1 sevan if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
588 1.17 jakllsch IFQ_PURGE(&ifp->if_snd);
589 1.1 sevan return;
590 1.1 sevan }
591 1.1 sevan
592 1.1 sevan /* Calculate free space. */
593 1.1 sevan idx = sc->rge_ldata.rge_txq_prodidx;
594 1.1 sevan free = sc->rge_ldata.rge_txq_considx;
595 1.1 sevan if (free <= idx)
596 1.1 sevan free += RGE_TX_LIST_CNT;
597 1.1 sevan free -= idx;
598 1.1 sevan
599 1.1 sevan for (;;) {
600 1.1 sevan if (RGE_TX_NSEGS >= free + 2) {
601 1.3 sevan SET(ifp->if_flags, IFF_OACTIVE);
602 1.1 sevan break;
603 1.1 sevan }
604 1.1 sevan
605 1.3 sevan IFQ_DEQUEUE(&ifp->if_snd, m);
606 1.1 sevan if (m == NULL)
607 1.1 sevan break;
608 1.1 sevan
609 1.1 sevan used = rge_encap(sc, m, idx);
610 1.1 sevan if (used == 0) {
611 1.1 sevan m_freem(m);
612 1.1 sevan continue;
613 1.1 sevan }
614 1.1 sevan
615 1.1 sevan KASSERT(used <= free);
616 1.1 sevan free -= used;
617 1.1 sevan
618 1.17 jakllsch bpf_mtap(ifp, m, BPF_D_OUT);
619 1.1 sevan
620 1.1 sevan idx += used;
621 1.1 sevan if (idx >= RGE_TX_LIST_CNT)
622 1.1 sevan idx -= RGE_TX_LIST_CNT;
623 1.1 sevan
624 1.1 sevan queued++;
625 1.1 sevan }
626 1.1 sevan
627 1.1 sevan if (queued == 0)
628 1.1 sevan return;
629 1.1 sevan
630 1.1 sevan /* Set a timeout in case the chip goes out to lunch. */
631 1.1 sevan ifp->if_timer = 5;
632 1.1 sevan
633 1.1 sevan sc->rge_ldata.rge_txq_prodidx = idx;
634 1.17 jakllsch #if 0
635 1.1 sevan ifq_serialize(ifq, &sc->sc_task);
636 1.17 jakllsch #else
637 1.17 jakllsch rge_txstart(&sc->sc_task, sc);
638 1.17 jakllsch #endif
639 1.1 sevan }
640 1.1 sevan
641 1.1 sevan void
642 1.1 sevan rge_watchdog(struct ifnet *ifp)
643 1.1 sevan {
644 1.1 sevan struct rge_softc *sc = ifp->if_softc;
645 1.1 sevan
646 1.16 jakllsch device_printf(sc->sc_dev, "watchdog timeout\n");
647 1.4 skrll if_statinc(ifp, if_oerrors);
648 1.1 sevan
649 1.1 sevan rge_init(ifp);
650 1.1 sevan }
651 1.1 sevan
652 1.1 sevan int
653 1.1 sevan rge_init(struct ifnet *ifp)
654 1.1 sevan {
655 1.1 sevan struct rge_softc *sc = ifp->if_softc;
656 1.1 sevan uint32_t val;
657 1.1 sevan int i;
658 1.1 sevan
659 1.17 jakllsch rge_stop(ifp, 0);
660 1.1 sevan
661 1.1 sevan /* Set MAC address. */
662 1.17 jakllsch rge_set_macaddr(sc, CLLADDR(ifp->if_sadl));
663 1.1 sevan
664 1.17 jakllsch /* Set Maximum frame size. */
665 1.17 jakllsch RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
666 1.1 sevan
667 1.1 sevan /* Initialize RX descriptors list. */
668 1.1 sevan if (rge_rx_list_init(sc) == ENOBUFS) {
669 1.16 jakllsch device_printf(sc->sc_dev,
670 1.13 sevan "init failed: no memory for RX buffers\n");
671 1.17 jakllsch rge_stop(ifp, 1);
672 1.1 sevan return (ENOBUFS);
673 1.1 sevan }
674 1.1 sevan
675 1.1 sevan /* Initialize TX descriptors. */
676 1.1 sevan rge_tx_list_init(sc);
677 1.1 sevan
678 1.1 sevan /* Load the addresses of the RX and TX lists into the chip. */
679 1.1 sevan RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
680 1.1 sevan RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
681 1.1 sevan RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
682 1.1 sevan RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
683 1.1 sevan RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
684 1.1 sevan RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
685 1.1 sevan RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
686 1.1 sevan RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
687 1.1 sevan
688 1.1 sevan RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
689 1.1 sevan
690 1.1 sevan RGE_CLRBIT_1(sc, 0xf1, 0x80);
691 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
692 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
693 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
694 1.1 sevan
695 1.1 sevan /* Clear interrupt moderation timer. */
696 1.1 sevan for (i = 0; i < 64; i++)
697 1.17 jakllsch RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
698 1.1 sevan
699 1.1 sevan /* Set the initial RX and TX configurations. */
700 1.1 sevan RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
701 1.1 sevan RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
702 1.1 sevan
703 1.1 sevan val = rge_read_csi(sc, 0x70c) & ~0xff000000;
704 1.1 sevan rge_write_csi(sc, 0x70c, val | 0x27000000);
705 1.1 sevan
706 1.1 sevan /* Enable hardware optimization function. */
707 1.1 sevan val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
708 1.1 sevan pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
709 1.1 sevan
710 1.1 sevan RGE_WRITE_2(sc, 0x0382, 0x221b);
711 1.1 sevan RGE_WRITE_1(sc, 0x4500, 0);
712 1.1 sevan RGE_WRITE_2(sc, 0x4800, 0);
713 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
714 1.1 sevan
715 1.1 sevan rge_write_mac_ocp(sc, 0xc140, 0xffff);
716 1.1 sevan rge_write_mac_ocp(sc, 0xc142, 0xffff);
717 1.1 sevan
718 1.1 sevan val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
719 1.1 sevan rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
720 1.1 sevan
721 1.1 sevan RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
722 1.1 sevan RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
723 1.1 sevan RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
724 1.1 sevan
725 1.1 sevan val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
726 1.17 jakllsch if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
727 1.17 jakllsch rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
728 1.17 jakllsch else
729 1.17 jakllsch rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
730 1.1 sevan
731 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
732 1.1 sevan
733 1.17 jakllsch if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
734 1.17 jakllsch val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
735 1.17 jakllsch rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
736 1.17 jakllsch } else
737 1.17 jakllsch RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
738 1.1 sevan
739 1.1 sevan RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
740 1.1 sevan
741 1.17 jakllsch val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
742 1.1 sevan rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
743 1.1 sevan
744 1.1 sevan val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
745 1.1 sevan rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
746 1.1 sevan
747 1.1 sevan val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
748 1.1 sevan rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
749 1.1 sevan
750 1.1 sevan RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
751 1.1 sevan
752 1.17 jakllsch RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
753 1.17 jakllsch
754 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
755 1.1 sevan
756 1.17 jakllsch val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
757 1.17 jakllsch rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
758 1.17 jakllsch
759 1.1 sevan val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
760 1.1 sevan rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
761 1.1 sevan
762 1.1 sevan RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
763 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
764 1.1 sevan
765 1.1 sevan val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
766 1.1 sevan rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
767 1.1 sevan
768 1.1 sevan val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
769 1.1 sevan rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
770 1.1 sevan
771 1.17 jakllsch val = rge_read_mac_ocp(sc, 0xe84c) & ~0x0040;
772 1.17 jakllsch if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
773 1.17 jakllsch rge_write_mac_ocp(sc, 0xe84c, 0x00c0);
774 1.17 jakllsch else
775 1.17 jakllsch rge_write_mac_ocp(sc, 0xe84c, 0x0080);
776 1.17 jakllsch
777 1.17 jakllsch RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN);
778 1.17 jakllsch
779 1.17 jakllsch if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
780 1.17 jakllsch RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
781 1.1 sevan
782 1.1 sevan /* Disable EEE plus. */
783 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
784 1.1 sevan
785 1.1 sevan RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
786 1.1 sevan
787 1.1 sevan RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
788 1.1 sevan DELAY(1);
789 1.1 sevan RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
790 1.1 sevan
791 1.1 sevan RGE_CLRBIT_4(sc, 0x1880, 0x0030);
792 1.1 sevan
793 1.1 sevan rge_write_mac_ocp(sc, 0xe098, 0xc302);
794 1.1 sevan
795 1.17 jakllsch if ((sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0)
796 1.1 sevan RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
797 1.17 jakllsch else
798 1.17 jakllsch RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
799 1.1 sevan
800 1.1 sevan RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
801 1.1 sevan
802 1.1 sevan for (i = 0; i < 10; i++) {
803 1.1 sevan if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
804 1.1 sevan break;
805 1.1 sevan DELAY(1000);
806 1.1 sevan }
807 1.1 sevan
808 1.1 sevan /* Disable RXDV gate. */
809 1.1 sevan RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
810 1.1 sevan DELAY(2000);
811 1.1 sevan
812 1.1 sevan rge_ifmedia_upd(ifp);
813 1.1 sevan
814 1.1 sevan /* Enable transmit and receive. */
815 1.1 sevan RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
816 1.1 sevan
817 1.1 sevan /* Program promiscuous mode and multicast filters. */
818 1.1 sevan rge_iff(sc);
819 1.1 sevan
820 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
821 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
822 1.1 sevan
823 1.1 sevan RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
824 1.1 sevan
825 1.1 sevan /* Enable interrupts. */
826 1.1 sevan rge_setup_intr(sc, RGE_IMTYPE_SIM);
827 1.1 sevan
828 1.1 sevan ifp->if_flags |= IFF_RUNNING;
829 1.3 sevan CLR(ifp->if_flags, IFF_OACTIVE);
830 1.1 sevan
831 1.3 sevan callout_schedule(&sc->sc_timeout, 1);
832 1.1 sevan
833 1.1 sevan return (0);
834 1.1 sevan }
835 1.1 sevan
836 1.1 sevan /*
837 1.1 sevan * Stop the adapter and free any mbufs allocated to the RX and TX lists.
838 1.1 sevan */
839 1.1 sevan void
840 1.17 jakllsch rge_stop(struct ifnet *ifp, int disable)
841 1.1 sevan {
842 1.1 sevan struct rge_softc *sc = ifp->if_softc;
843 1.1 sevan int i;
844 1.1 sevan
845 1.17 jakllsch if (disable) {
846 1.17 jakllsch callout_halt(&sc->sc_timeout, NULL);
847 1.17 jakllsch } else
848 1.17 jakllsch callout_stop(&sc->sc_timeout);
849 1.1 sevan
850 1.1 sevan ifp->if_timer = 0;
851 1.1 sevan ifp->if_flags &= ~IFF_RUNNING;
852 1.1 sevan sc->rge_timerintr = 0;
853 1.1 sevan
854 1.1 sevan RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
855 1.1 sevan RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
856 1.1 sevan RGE_RXCFG_ERRPKT);
857 1.1 sevan
858 1.1 sevan RGE_WRITE_4(sc, RGE_IMR, 0);
859 1.17 jakllsch
860 1.17 jakllsch /* Clear timer interrupts. */
861 1.17 jakllsch RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
862 1.17 jakllsch RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
863 1.17 jakllsch RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
864 1.17 jakllsch RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
865 1.1 sevan
866 1.1 sevan rge_reset(sc);
867 1.1 sevan
868 1.17 jakllsch // intr_barrier(sc->sc_ih);
869 1.17 jakllsch // ifq_barrier(&ifp->if_snd);
870 1.2 sevan /* ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
871 1.1 sevan
872 1.1 sevan if (sc->rge_head != NULL) {
873 1.1 sevan m_freem(sc->rge_head);
874 1.1 sevan sc->rge_head = sc->rge_tail = NULL;
875 1.1 sevan }
876 1.1 sevan
877 1.1 sevan /* Free the TX list buffers. */
878 1.1 sevan for (i = 0; i < RGE_TX_LIST_CNT; i++) {
879 1.1 sevan if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
880 1.1 sevan bus_dmamap_unload(sc->sc_dmat,
881 1.1 sevan sc->rge_ldata.rge_txq[i].txq_dmamap);
882 1.1 sevan m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
883 1.1 sevan sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
884 1.1 sevan }
885 1.1 sevan }
886 1.1 sevan
887 1.1 sevan /* Free the RX list buffers. */
888 1.1 sevan for (i = 0; i < RGE_RX_LIST_CNT; i++) {
889 1.1 sevan if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
890 1.1 sevan bus_dmamap_unload(sc->sc_dmat,
891 1.1 sevan sc->rge_ldata.rge_rxq[i].rxq_dmamap);
892 1.1 sevan m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
893 1.1 sevan sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
894 1.1 sevan }
895 1.1 sevan }
896 1.1 sevan }
897 1.1 sevan
898 1.1 sevan /*
899 1.1 sevan * Set media options.
900 1.1 sevan */
901 1.1 sevan int
902 1.1 sevan rge_ifmedia_upd(struct ifnet *ifp)
903 1.1 sevan {
904 1.1 sevan struct rge_softc *sc = ifp->if_softc;
905 1.1 sevan struct ifmedia *ifm = &sc->sc_media;
906 1.1 sevan int anar, gig, val;
907 1.1 sevan
908 1.1 sevan if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
909 1.1 sevan return (EINVAL);
910 1.1 sevan
911 1.1 sevan /* Disable Gigabit Lite. */
912 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
913 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
914 1.1 sevan
915 1.1 sevan val = rge_read_phy_ocp(sc, 0xa5d4);
916 1.1 sevan val &= ~RGE_ADV_2500TFDX;
917 1.1 sevan
918 1.1 sevan anar = gig = 0;
919 1.1 sevan switch (IFM_SUBTYPE(ifm->ifm_media)) {
920 1.1 sevan case IFM_AUTO:
921 1.17 jakllsch anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
922 1.17 jakllsch gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
923 1.1 sevan val |= RGE_ADV_2500TFDX;
924 1.1 sevan break;
925 1.1 sevan case IFM_2500_T:
926 1.17 jakllsch anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
927 1.17 jakllsch gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
928 1.1 sevan val |= RGE_ADV_2500TFDX;
929 1.1 sevan ifp->if_baudrate = IF_Mbps(2500);
930 1.1 sevan break;
931 1.1 sevan case IFM_1000_T:
932 1.17 jakllsch anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
933 1.17 jakllsch gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
934 1.1 sevan ifp->if_baudrate = IF_Gbps(1);
935 1.1 sevan break;
936 1.1 sevan case IFM_100_TX:
937 1.17 jakllsch gig = rge_read_phy(sc, 0, MII_100T2CR) &
938 1.17 jakllsch ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
939 1.17 jakllsch anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
940 1.17 jakllsch ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
941 1.17 jakllsch ANAR_TX | ANAR_10_FD | ANAR_10;
942 1.1 sevan ifp->if_baudrate = IF_Mbps(100);
943 1.1 sevan break;
944 1.1 sevan case IFM_10_T:
945 1.17 jakllsch gig = rge_read_phy(sc, 0, MII_100T2CR) &
946 1.17 jakllsch ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
947 1.17 jakllsch anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
948 1.17 jakllsch ANAR_10_FD | ANAR_10 : ANAR_10;
949 1.1 sevan ifp->if_baudrate = IF_Mbps(10);
950 1.1 sevan break;
951 1.1 sevan default:
952 1.16 jakllsch device_printf(sc->sc_dev,
953 1.13 sevan "unsupported media type\n");
954 1.1 sevan return (EINVAL);
955 1.1 sevan }
956 1.1 sevan
957 1.1 sevan rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
958 1.1 sevan rge_write_phy(sc, 0, MII_100T2CR, gig);
959 1.1 sevan rge_write_phy_ocp(sc, 0xa5d4, val);
960 1.17 jakllsch rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
961 1.17 jakllsch BMCR_STARTNEG);
962 1.1 sevan
963 1.1 sevan return (0);
964 1.1 sevan }
965 1.1 sevan
966 1.1 sevan /*
967 1.1 sevan * Report current media status.
968 1.1 sevan */
969 1.1 sevan void
970 1.1 sevan rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
971 1.1 sevan {
972 1.1 sevan struct rge_softc *sc = ifp->if_softc;
973 1.1 sevan uint16_t status = 0;
974 1.1 sevan
975 1.1 sevan ifmr->ifm_status = IFM_AVALID;
976 1.1 sevan ifmr->ifm_active = IFM_ETHER;
977 1.1 sevan
978 1.1 sevan if (rge_get_link_status(sc)) {
979 1.1 sevan ifmr->ifm_status |= IFM_ACTIVE;
980 1.1 sevan
981 1.1 sevan status = RGE_READ_2(sc, RGE_PHYSTAT);
982 1.1 sevan if ((status & RGE_PHYSTAT_FDX) ||
983 1.1 sevan (status & RGE_PHYSTAT_2500MBPS))
984 1.1 sevan ifmr->ifm_active |= IFM_FDX;
985 1.1 sevan else
986 1.1 sevan ifmr->ifm_active |= IFM_HDX;
987 1.1 sevan
988 1.1 sevan if (status & RGE_PHYSTAT_10MBPS)
989 1.1 sevan ifmr->ifm_active |= IFM_10_T;
990 1.1 sevan else if (status & RGE_PHYSTAT_100MBPS)
991 1.1 sevan ifmr->ifm_active |= IFM_100_TX;
992 1.1 sevan else if (status & RGE_PHYSTAT_1000MBPS)
993 1.1 sevan ifmr->ifm_active |= IFM_1000_T;
994 1.1 sevan else if (status & RGE_PHYSTAT_2500MBPS)
995 1.1 sevan ifmr->ifm_active |= IFM_2500_T;
996 1.1 sevan }
997 1.1 sevan }
998 1.1 sevan
999 1.5 skrll /*
1000 1.1 sevan * Allocate memory for RX/TX rings.
1001 1.1 sevan */
1002 1.1 sevan int
1003 1.1 sevan rge_allocmem(struct rge_softc *sc)
1004 1.1 sevan {
1005 1.1 sevan int error, i;
1006 1.1 sevan
1007 1.1 sevan /* Allocate DMA'able memory for the TX ring. */
1008 1.1 sevan error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
1009 1.1 sevan RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
1010 1.1 sevan if (error) {
1011 1.13 sevan aprint_error_dev(sc->sc_dev, "can't create TX list map\n");
1012 1.1 sevan return (error);
1013 1.1 sevan }
1014 1.1 sevan error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
1015 1.1 sevan &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
1016 1.17 jakllsch BUS_DMA_NOWAIT);
1017 1.1 sevan if (error) {
1018 1.13 sevan aprint_error_dev(sc->sc_dev, "can't alloc TX list\n");
1019 1.1 sevan return (error);
1020 1.1 sevan }
1021 1.1 sevan
1022 1.1 sevan /* Load the map for the TX ring. */
1023 1.1 sevan error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1024 1.1 sevan sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
1025 1.8 sevan (void **) &sc->rge_ldata.rge_tx_list,
1026 1.17 jakllsch BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1027 1.1 sevan if (error) {
1028 1.13 sevan aprint_error_dev(sc->sc_dev, "can't map TX dma buffers\n");
1029 1.1 sevan bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1030 1.1 sevan sc->rge_ldata.rge_tx_listnseg);
1031 1.1 sevan return (error);
1032 1.1 sevan }
1033 1.17 jakllsch memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1034 1.1 sevan error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1035 1.1 sevan sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1036 1.1 sevan if (error) {
1037 1.13 sevan aprint_error_dev(sc->sc_dev, "can't load TX dma map\n");
1038 1.1 sevan bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
1039 1.1 sevan bus_dmamem_unmap(sc->sc_dmat,
1040 1.2 sevan sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
1041 1.1 sevan bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1042 1.1 sevan sc->rge_ldata.rge_tx_listnseg);
1043 1.1 sevan return (error);
1044 1.1 sevan }
1045 1.1 sevan
1046 1.1 sevan /* Create DMA maps for TX buffers. */
1047 1.1 sevan for (i = 0; i < RGE_TX_LIST_CNT; i++) {
1048 1.1 sevan error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
1049 1.1 sevan RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
1050 1.1 sevan &sc->rge_ldata.rge_txq[i].txq_dmamap);
1051 1.1 sevan if (error) {
1052 1.13 sevan aprint_error_dev(sc->sc_dev, "can't create DMA map for TX\n");
1053 1.1 sevan return (error);
1054 1.1 sevan }
1055 1.1 sevan }
1056 1.1 sevan
1057 1.1 sevan /* Allocate DMA'able memory for the RX ring. */
1058 1.1 sevan error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
1059 1.1 sevan RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
1060 1.1 sevan if (error) {
1061 1.13 sevan aprint_error_dev(sc->sc_dev, "can't create RX list map\n");
1062 1.1 sevan return (error);
1063 1.1 sevan }
1064 1.1 sevan error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
1065 1.1 sevan &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
1066 1.17 jakllsch BUS_DMA_NOWAIT);
1067 1.1 sevan if (error) {
1068 1.13 sevan aprint_error_dev(sc->sc_dev, "can't alloc RX list\n");
1069 1.1 sevan return (error);
1070 1.1 sevan }
1071 1.1 sevan
1072 1.1 sevan /* Load the map for the RX ring. */
1073 1.1 sevan error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1074 1.1 sevan sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
1075 1.8 sevan (void **) &sc->rge_ldata.rge_rx_list,
1076 1.17 jakllsch BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1077 1.1 sevan if (error) {
1078 1.13 sevan aprint_error_dev(sc->sc_dev, "can't map RX dma buffers\n");
1079 1.1 sevan bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1080 1.1 sevan sc->rge_ldata.rge_rx_listnseg);
1081 1.1 sevan return (error);
1082 1.1 sevan }
1083 1.17 jakllsch memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1084 1.1 sevan error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1085 1.1 sevan sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1086 1.1 sevan if (error) {
1087 1.13 sevan aprint_error_dev(sc->sc_dev, "can't load RX dma map\n");
1088 1.1 sevan bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
1089 1.1 sevan bus_dmamem_unmap(sc->sc_dmat,
1090 1.2 sevan sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
1091 1.1 sevan bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1092 1.1 sevan sc->rge_ldata.rge_rx_listnseg);
1093 1.1 sevan return (error);
1094 1.1 sevan }
1095 1.1 sevan
1096 1.1 sevan /* Create DMA maps for RX buffers. */
1097 1.1 sevan for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1098 1.1 sevan error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1099 1.1 sevan RGE_JUMBO_FRAMELEN, 0, 0,
1100 1.1 sevan &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
1101 1.1 sevan if (error) {
1102 1.13 sevan aprint_error_dev(sc->sc_dev, "can't create DMA map for RX\n");
1103 1.1 sevan return (error);
1104 1.1 sevan }
1105 1.1 sevan }
1106 1.1 sevan
1107 1.1 sevan return (error);
1108 1.1 sevan }
1109 1.1 sevan
1110 1.1 sevan /*
1111 1.1 sevan * Initialize the RX descriptor and attach an mbuf cluster.
1112 1.1 sevan */
1113 1.1 sevan int
1114 1.1 sevan rge_newbuf(struct rge_softc *sc, int idx)
1115 1.1 sevan {
1116 1.1 sevan struct mbuf *m;
1117 1.1 sevan struct rge_rx_desc *r;
1118 1.1 sevan struct rge_rxq *rxq;
1119 1.1 sevan bus_dmamap_t rxmap;
1120 1.1 sevan
1121 1.17 jakllsch m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
1122 1.1 sevan if (m == NULL)
1123 1.1 sevan return (ENOBUFS);
1124 1.1 sevan
1125 1.1 sevan m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
1126 1.1 sevan
1127 1.1 sevan rxq = &sc->rge_ldata.rge_rxq[idx];
1128 1.1 sevan rxmap = rxq->rxq_dmamap;
1129 1.1 sevan
1130 1.1 sevan if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT))
1131 1.1 sevan goto out;
1132 1.1 sevan
1133 1.1 sevan bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1134 1.1 sevan BUS_DMASYNC_PREREAD);
1135 1.1 sevan
1136 1.1 sevan /* Map the segments into RX descriptors. */
1137 1.1 sevan r = &sc->rge_ldata.rge_rx_list[idx];
1138 1.1 sevan
1139 1.1 sevan if (RGE_OWN(r)) {
1140 1.16 jakllsch device_printf(sc->sc_dev, "tried to map busy RX descriptor\n");
1141 1.1 sevan goto out;
1142 1.1 sevan }
1143 1.1 sevan
1144 1.1 sevan rxq->rxq_mbuf = m;
1145 1.1 sevan
1146 1.1 sevan r->rge_extsts = 0;
1147 1.1 sevan r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
1148 1.1 sevan r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr));
1149 1.1 sevan
1150 1.1 sevan r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
1151 1.1 sevan if (idx == RGE_RX_LIST_CNT - 1)
1152 1.1 sevan r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1153 1.1 sevan
1154 1.1 sevan r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1155 1.1 sevan
1156 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1157 1.1 sevan idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1158 1.1 sevan BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1159 1.1 sevan
1160 1.1 sevan return (0);
1161 1.1 sevan out:
1162 1.1 sevan if (m != NULL)
1163 1.1 sevan m_freem(m);
1164 1.1 sevan return (ENOMEM);
1165 1.1 sevan }
1166 1.1 sevan
1167 1.1 sevan void
1168 1.1 sevan rge_discard_rxbuf(struct rge_softc *sc, int idx)
1169 1.1 sevan {
1170 1.1 sevan struct rge_rx_desc *r;
1171 1.1 sevan
1172 1.1 sevan r = &sc->rge_ldata.rge_rx_list[idx];
1173 1.1 sevan
1174 1.1 sevan r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
1175 1.1 sevan r->rge_extsts = 0;
1176 1.1 sevan if (idx == RGE_RX_LIST_CNT - 1)
1177 1.1 sevan r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1178 1.1 sevan r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1179 1.1 sevan
1180 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1181 1.1 sevan idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1182 1.1 sevan BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1183 1.1 sevan }
1184 1.1 sevan
1185 1.1 sevan int
1186 1.1 sevan rge_rx_list_init(struct rge_softc *sc)
1187 1.1 sevan {
1188 1.1 sevan int i;
1189 1.1 sevan
1190 1.1 sevan memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1191 1.1 sevan
1192 1.1 sevan for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1193 1.1 sevan sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
1194 1.1 sevan if (rge_newbuf(sc, i) == ENOBUFS)
1195 1.1 sevan return (ENOBUFS);
1196 1.1 sevan }
1197 1.1 sevan
1198 1.17 jakllsch sc->rge_ldata.rge_rxq_prodidx = sc->rge_ldata.rge_rxq_considx = 0;
1199 1.1 sevan sc->rge_head = sc->rge_tail = NULL;
1200 1.1 sevan
1201 1.1 sevan return (0);
1202 1.1 sevan }
1203 1.1 sevan
1204 1.1 sevan void
1205 1.1 sevan rge_tx_list_init(struct rge_softc *sc)
1206 1.1 sevan {
1207 1.1 sevan int i;
1208 1.1 sevan
1209 1.1 sevan memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1210 1.1 sevan
1211 1.1 sevan for (i = 0; i < RGE_TX_LIST_CNT; i++)
1212 1.1 sevan sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
1213 1.1 sevan
1214 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
1215 1.1 sevan sc->rge_ldata.rge_tx_list_map->dm_mapsize,
1216 1.1 sevan BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1217 1.1 sevan
1218 1.1 sevan sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
1219 1.1 sevan }
1220 1.1 sevan
1221 1.1 sevan int
1222 1.1 sevan rge_rxeof(struct rge_softc *sc)
1223 1.1 sevan {
1224 1.1 sevan struct mbuf *m;
1225 1.2 sevan struct ifnet *ifp = &sc->sc_ec.ec_if;
1226 1.1 sevan struct rge_rx_desc *cur_rx;
1227 1.1 sevan struct rge_rxq *rxq;
1228 1.1 sevan uint32_t rxstat, extsts;
1229 1.1 sevan int i, total_len, rx = 0;
1230 1.1 sevan
1231 1.17 jakllsch for (i = sc->rge_ldata.rge_rxq_considx; ; i = RGE_NEXT_RX_DESC(i)) {
1232 1.1 sevan /* Invalidate the descriptor memory. */
1233 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1234 1.1 sevan i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1235 1.1 sevan BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1236 1.1 sevan
1237 1.1 sevan cur_rx = &sc->rge_ldata.rge_rx_list[i];
1238 1.1 sevan
1239 1.1 sevan if (RGE_OWN(cur_rx))
1240 1.1 sevan break;
1241 1.1 sevan
1242 1.1 sevan rxstat = letoh32(cur_rx->rge_cmdsts);
1243 1.1 sevan extsts = letoh32(cur_rx->rge_extsts);
1244 1.5 skrll
1245 1.1 sevan total_len = RGE_RXBYTES(cur_rx);
1246 1.1 sevan rxq = &sc->rge_ldata.rge_rxq[i];
1247 1.1 sevan m = rxq->rxq_mbuf;
1248 1.17 jakllsch rxq->rxq_mbuf = NULL;
1249 1.1 sevan rx = 1;
1250 1.1 sevan
1251 1.1 sevan /* Invalidate the RX mbuf and unload its map. */
1252 1.1 sevan bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1253 1.1 sevan rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1254 1.1 sevan bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1255 1.1 sevan
1256 1.1 sevan if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1257 1.1 sevan (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1258 1.1 sevan rge_discard_rxbuf(sc, i);
1259 1.1 sevan continue;
1260 1.1 sevan }
1261 1.1 sevan
1262 1.1 sevan if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1263 1.4 skrll if_statinc(ifp, if_ierrors);
1264 1.1 sevan /*
1265 1.1 sevan * If this is part of a multi-fragment packet,
1266 1.1 sevan * discard all the pieces.
1267 1.1 sevan */
1268 1.1 sevan if (sc->rge_head != NULL) {
1269 1.1 sevan m_freem(sc->rge_head);
1270 1.1 sevan sc->rge_head = sc->rge_tail = NULL;
1271 1.1 sevan }
1272 1.1 sevan rge_discard_rxbuf(sc, i);
1273 1.1 sevan continue;
1274 1.1 sevan }
1275 1.1 sevan
1276 1.1 sevan /*
1277 1.1 sevan * If allocating a replacement mbuf fails,
1278 1.1 sevan * reload the current one.
1279 1.1 sevan */
1280 1.1 sevan
1281 1.1 sevan if (rge_newbuf(sc, i) == ENOBUFS) {
1282 1.1 sevan if (sc->rge_head != NULL) {
1283 1.1 sevan m_freem(sc->rge_head);
1284 1.1 sevan sc->rge_head = sc->rge_tail = NULL;
1285 1.1 sevan }
1286 1.1 sevan rge_discard_rxbuf(sc, i);
1287 1.1 sevan continue;
1288 1.1 sevan }
1289 1.1 sevan
1290 1.17 jakllsch m_set_rcvif(m, ifp);
1291 1.1 sevan if (sc->rge_head != NULL) {
1292 1.1 sevan m->m_len = total_len;
1293 1.1 sevan /*
1294 1.1 sevan * Special case: if there's 4 bytes or less
1295 1.1 sevan * in this buffer, the mbuf can be discarded:
1296 1.1 sevan * the last 4 bytes is the CRC, which we don't
1297 1.1 sevan * care about anyway.
1298 1.1 sevan */
1299 1.1 sevan if (m->m_len <= ETHER_CRC_LEN) {
1300 1.1 sevan sc->rge_tail->m_len -=
1301 1.1 sevan (ETHER_CRC_LEN - m->m_len);
1302 1.1 sevan m_freem(m);
1303 1.1 sevan } else {
1304 1.1 sevan m->m_len -= ETHER_CRC_LEN;
1305 1.1 sevan m->m_flags &= ~M_PKTHDR;
1306 1.1 sevan sc->rge_tail->m_next = m;
1307 1.1 sevan }
1308 1.1 sevan m = sc->rge_head;
1309 1.1 sevan sc->rge_head = sc->rge_tail = NULL;
1310 1.1 sevan m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1311 1.1 sevan } else
1312 1.17 jakllsch #if 0
1313 1.1 sevan m->m_pkthdr.len = m->m_len =
1314 1.1 sevan (total_len - ETHER_CRC_LEN);
1315 1.17 jakllsch #else
1316 1.17 jakllsch {
1317 1.17 jakllsch m->m_pkthdr.len = m->m_len = total_len;
1318 1.17 jakllsch m->m_flags |= M_HASFCS;
1319 1.17 jakllsch }
1320 1.17 jakllsch #endif
1321 1.1 sevan
1322 1.17 jakllsch #if notyet
1323 1.1 sevan /* Check IP header checksum. */
1324 1.1 sevan if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) &&
1325 1.1 sevan (extsts & RGE_RDEXTSTS_IPV4))
1326 1.1 sevan m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1327 1.1 sevan
1328 1.1 sevan /* Check TCP/UDP checksum. */
1329 1.1 sevan if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1330 1.1 sevan (((rxstat & RGE_RDCMDSTS_TCPPKT) &&
1331 1.1 sevan !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) ||
1332 1.1 sevan ((rxstat & RGE_RDCMDSTS_UDPPKT) &&
1333 1.1 sevan !(rxstat & RGE_RDCMDSTS_UDPCSUMERR))))
1334 1.1 sevan m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1335 1.1 sevan M_UDP_CSUM_IN_OK;
1336 1.17 jakllsch #endif
1337 1.1 sevan
1338 1.1 sevan if (extsts & RGE_RDEXTSTS_VTAG) {
1339 1.17 jakllsch vlan_set_tag(m,
1340 1.17 jakllsch bswap16(extsts & RGE_RDEXTSTS_VLAN_MASK));
1341 1.1 sevan }
1342 1.1 sevan
1343 1.17 jakllsch if_percpuq_enqueue(ifp->if_percpuq, m);
1344 1.1 sevan }
1345 1.1 sevan
1346 1.17 jakllsch sc->rge_ldata.rge_rxq_considx = i;
1347 1.1 sevan
1348 1.1 sevan return (rx);
1349 1.1 sevan }
1350 1.1 sevan
1351 1.1 sevan int
1352 1.1 sevan rge_txeof(struct rge_softc *sc)
1353 1.1 sevan {
1354 1.2 sevan struct ifnet *ifp = &sc->sc_ec.ec_if;
1355 1.1 sevan struct rge_txq *txq;
1356 1.1 sevan uint32_t txstat;
1357 1.1 sevan int cons, idx, prod;
1358 1.1 sevan int free = 0;
1359 1.1 sevan
1360 1.1 sevan prod = sc->rge_ldata.rge_txq_prodidx;
1361 1.1 sevan cons = sc->rge_ldata.rge_txq_considx;
1362 1.1 sevan
1363 1.1 sevan while (prod != cons) {
1364 1.1 sevan txq = &sc->rge_ldata.rge_txq[cons];
1365 1.1 sevan idx = txq->txq_descidx;
1366 1.1 sevan
1367 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1368 1.1 sevan idx * sizeof(struct rge_tx_desc),
1369 1.1 sevan sizeof(struct rge_tx_desc),
1370 1.1 sevan BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1371 1.1 sevan
1372 1.1 sevan txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
1373 1.1 sevan
1374 1.1 sevan if (txstat & RGE_TDCMDSTS_OWN) {
1375 1.1 sevan free = 2;
1376 1.1 sevan break;
1377 1.1 sevan }
1378 1.1 sevan
1379 1.5 skrll bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1380 1.1 sevan txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1381 1.1 sevan bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1382 1.1 sevan m_freem(txq->txq_mbuf);
1383 1.1 sevan txq->txq_mbuf = NULL;
1384 1.1 sevan
1385 1.1 sevan if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1386 1.4 skrll if_statinc(ifp, if_collisions);
1387 1.1 sevan if (txstat & RGE_TDCMDSTS_TXERR)
1388 1.4 skrll if_statinc(ifp, if_oerrors);
1389 1.1 sevan
1390 1.1 sevan bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1391 1.1 sevan idx * sizeof(struct rge_tx_desc),
1392 1.1 sevan sizeof(struct rge_tx_desc),
1393 1.1 sevan BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1394 1.1 sevan
1395 1.1 sevan cons = RGE_NEXT_TX_DESC(idx);
1396 1.1 sevan free = 1;
1397 1.1 sevan }
1398 1.1 sevan
1399 1.1 sevan if (free == 0)
1400 1.1 sevan return (0);
1401 1.1 sevan
1402 1.1 sevan sc->rge_ldata.rge_txq_considx = cons;
1403 1.1 sevan
1404 1.17 jakllsch #if 0
1405 1.1 sevan if (ifq_is_oactive(&ifp->if_snd))
1406 1.1 sevan ifq_restart(&ifp->if_snd);
1407 1.1 sevan else if (free == 2)
1408 1.1 sevan ifq_serialize(&ifp->if_snd, &sc->sc_task);
1409 1.1 sevan else
1410 1.1 sevan ifp->if_timer = 0;
1411 1.17 jakllsch #else
1412 1.17 jakllsch #if 0
1413 1.17 jakllsch if (!IF_IS_EMPTY(&ifp->if_snd))
1414 1.17 jakllsch rge_start(ifp);
1415 1.17 jakllsch else
1416 1.17 jakllsch if (free == 2)
1417 1.17 jakllsch if (0) { rge_txstart(&sc->sc_task, sc); }
1418 1.17 jakllsch else
1419 1.17 jakllsch #endif
1420 1.17 jakllsch ifp->if_timer = 0;
1421 1.17 jakllsch #endif
1422 1.1 sevan
1423 1.1 sevan return (1);
1424 1.1 sevan }
1425 1.1 sevan
1426 1.1 sevan void
1427 1.1 sevan rge_reset(struct rge_softc *sc)
1428 1.1 sevan {
1429 1.1 sevan int i;
1430 1.1 sevan
1431 1.1 sevan /* Enable RXDV gate. */
1432 1.1 sevan RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1433 1.1 sevan DELAY(2000);
1434 1.1 sevan
1435 1.17 jakllsch for (i = 0; i < 3000; i++) {
1436 1.17 jakllsch DELAY(50);
1437 1.1 sevan if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1438 1.1 sevan RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1439 1.1 sevan RGE_MCUCMD_TXFIFO_EMPTY))
1440 1.1 sevan break;
1441 1.1 sevan }
1442 1.17 jakllsch if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
1443 1.17 jakllsch for (i = 0; i < 3000; i++) {
1444 1.17 jakllsch DELAY(50);
1445 1.17 jakllsch if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
1446 1.17 jakllsch break;
1447 1.17 jakllsch }
1448 1.17 jakllsch }
1449 1.17 jakllsch
1450 1.17 jakllsch DELAY(2000);
1451 1.1 sevan
1452 1.1 sevan /* Soft reset. */
1453 1.1 sevan RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1454 1.1 sevan
1455 1.1 sevan for (i = 0; i < RGE_TIMEOUT; i++) {
1456 1.1 sevan DELAY(100);
1457 1.1 sevan if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1458 1.1 sevan break;
1459 1.1 sevan }
1460 1.1 sevan if (i == RGE_TIMEOUT)
1461 1.16 jakllsch device_printf(sc->sc_dev, "reset never completed!\n");
1462 1.1 sevan }
1463 1.1 sevan
1464 1.1 sevan void
1465 1.1 sevan rge_iff(struct rge_softc *sc)
1466 1.1 sevan {
1467 1.2 sevan struct ifnet *ifp = &sc->sc_ec.ec_if;
1468 1.17 jakllsch struct ethercom *ec = &sc->sc_ec;
1469 1.1 sevan struct ether_multi *enm;
1470 1.1 sevan struct ether_multistep step;
1471 1.1 sevan uint32_t hashes[2];
1472 1.1 sevan uint32_t rxfilt;
1473 1.1 sevan int h = 0;
1474 1.1 sevan
1475 1.1 sevan rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1476 1.1 sevan rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1477 1.1 sevan ifp->if_flags &= ~IFF_ALLMULTI;
1478 1.1 sevan
1479 1.1 sevan /*
1480 1.1 sevan * Always accept frames destined to our station address.
1481 1.1 sevan * Always accept broadcast frames.
1482 1.1 sevan */
1483 1.1 sevan rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1484 1.1 sevan
1485 1.17 jakllsch if (ifp->if_flags & IFF_PROMISC) {
1486 1.17 jakllsch allmulti:
1487 1.1 sevan ifp->if_flags |= IFF_ALLMULTI;
1488 1.1 sevan rxfilt |= RGE_RXCFG_MULTI;
1489 1.1 sevan if (ifp->if_flags & IFF_PROMISC)
1490 1.1 sevan rxfilt |= RGE_RXCFG_ALLPHYS;
1491 1.1 sevan hashes[0] = hashes[1] = 0xffffffff;
1492 1.1 sevan } else {
1493 1.1 sevan rxfilt |= RGE_RXCFG_MULTI;
1494 1.1 sevan /* Program new filter. */
1495 1.1 sevan memset(hashes, 0, sizeof(hashes));
1496 1.1 sevan
1497 1.17 jakllsch ETHER_LOCK(ec);
1498 1.17 jakllsch ETHER_FIRST_MULTI(step, ec, enm);
1499 1.1 sevan while (enm != NULL) {
1500 1.17 jakllsch if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1501 1.17 jakllsch ETHER_ADDR_LEN) != 0) {
1502 1.17 jakllsch ETHER_UNLOCK(ec);
1503 1.17 jakllsch goto allmulti;
1504 1.17 jakllsch }
1505 1.1 sevan h = ether_crc32_be(enm->enm_addrlo,
1506 1.1 sevan ETHER_ADDR_LEN) >> 26;
1507 1.1 sevan
1508 1.1 sevan if (h < 32)
1509 1.1 sevan hashes[0] |= (1 << h);
1510 1.1 sevan else
1511 1.1 sevan hashes[1] |= (1 << (h - 32));
1512 1.1 sevan
1513 1.1 sevan ETHER_NEXT_MULTI(step, enm);
1514 1.1 sevan }
1515 1.17 jakllsch ETHER_UNLOCK(ec);
1516 1.1 sevan }
1517 1.1 sevan
1518 1.1 sevan RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1519 1.2 sevan RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
1520 1.2 sevan RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
1521 1.1 sevan }
1522 1.1 sevan
1523 1.1 sevan void
1524 1.1 sevan rge_set_phy_power(struct rge_softc *sc, int on)
1525 1.1 sevan {
1526 1.1 sevan int i;
1527 1.1 sevan
1528 1.1 sevan if (on) {
1529 1.1 sevan RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1530 1.1 sevan
1531 1.1 sevan rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1532 1.1 sevan
1533 1.1 sevan for (i = 0; i < RGE_TIMEOUT; i++) {
1534 1.10 sevan if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
1535 1.1 sevan break;
1536 1.1 sevan DELAY(1000);
1537 1.1 sevan }
1538 1.17 jakllsch } else {
1539 1.1 sevan rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1540 1.17 jakllsch RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
1541 1.17 jakllsch RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
1542 1.17 jakllsch }
1543 1.1 sevan }
1544 1.1 sevan
1545 1.1 sevan void
1546 1.1 sevan rge_phy_config(struct rge_softc *sc)
1547 1.1 sevan {
1548 1.17 jakllsch /* Read microcode version. */
1549 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x801e);
1550 1.17 jakllsch sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
1551 1.17 jakllsch
1552 1.17 jakllsch switch (sc->rge_type) {
1553 1.17 jakllsch case MAC_CFG2:
1554 1.17 jakllsch rge_phy_config_mac_cfg2(sc);
1555 1.17 jakllsch break;
1556 1.17 jakllsch case MAC_CFG3:
1557 1.17 jakllsch rge_phy_config_mac_cfg3(sc);
1558 1.17 jakllsch break;
1559 1.17 jakllsch case MAC_CFG4:
1560 1.17 jakllsch rge_phy_config_mac_cfg4(sc);
1561 1.17 jakllsch break;
1562 1.17 jakllsch case MAC_CFG5:
1563 1.17 jakllsch rge_phy_config_mac_cfg5(sc);
1564 1.17 jakllsch break;
1565 1.17 jakllsch default:
1566 1.17 jakllsch break; /* Can't happen. */
1567 1.17 jakllsch }
1568 1.17 jakllsch
1569 1.17 jakllsch rge_write_phy(sc, 0x0a5b, 0x12,
1570 1.17 jakllsch rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
1571 1.17 jakllsch
1572 1.17 jakllsch /* Disable EEE. */
1573 1.17 jakllsch RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1574 1.17 jakllsch if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1575 1.17 jakllsch RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1576 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1577 1.17 jakllsch }
1578 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1579 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1580 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1581 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1582 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1583 1.17 jakllsch
1584 1.17 jakllsch rge_patch_phy_mcu(sc, 1);
1585 1.17 jakllsch RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1586 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1587 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1588 1.17 jakllsch rge_patch_phy_mcu(sc, 0);
1589 1.17 jakllsch }
1590 1.17 jakllsch
1591 1.17 jakllsch void
1592 1.17 jakllsch rge_phy_config_mac_cfg2(struct rge_softc *sc)
1593 1.17 jakllsch {
1594 1.17 jakllsch uint16_t val;
1595 1.17 jakllsch int i;
1596 1.17 jakllsch
1597 1.17 jakllsch for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
1598 1.17 jakllsch rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
1599 1.17 jakllsch rtl8125_mac_cfg2_ephy[i].val);
1600 1.17 jakllsch
1601 1.17 jakllsch rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
1602 1.17 jakllsch
1603 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
1604 1.17 jakllsch rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
1605 1.17 jakllsch RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1606 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1607 1.17 jakllsch rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
1608 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
1609 1.17 jakllsch rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1610 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
1611 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
1612 1.17 jakllsch rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
1613 1.17 jakllsch RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
1614 1.17 jakllsch RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
1615 1.17 jakllsch RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
1616 1.17 jakllsch
1617 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1618 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1619 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
1620 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80eb);
1621 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
1622 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
1623 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80f8);
1624 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1625 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
1626 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80f1);
1627 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1628 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
1629 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80fe);
1630 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1631 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
1632 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x8102);
1633 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1634 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1635 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x8105);
1636 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1637 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
1638 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x8100);
1639 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1640 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
1641 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x8104);
1642 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1643 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
1644 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x8106);
1645 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1646 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
1647 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1648 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1649 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
1650 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80df);
1651 1.17 jakllsch RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1652 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80e1);
1653 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
1654 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
1655 1.17 jakllsch rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
1656 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x819f);
1657 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
1658 1.17 jakllsch rge_write_phy_ocp(sc, 0xbc34, 0x5555);
1659 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
1660 1.17 jakllsch rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
1661 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
1662 1.17 jakllsch RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1663 1.17 jakllsch }
1664 1.17 jakllsch
1665 1.17 jakllsch void
1666 1.17 jakllsch rge_phy_config_mac_cfg3(struct rge_softc *sc)
1667 1.17 jakllsch {
1668 1.17 jakllsch struct ifnet *ifp = &sc->sc_ec.ec_if;
1669 1.17 jakllsch uint16_t val;
1670 1.1 sevan int i;
1671 1.1 sevan static const uint16_t mac_cfg3_a438_value[] =
1672 1.1 sevan { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1673 1.1 sevan 0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1674 1.1 sevan
1675 1.1 sevan static const uint16_t mac_cfg3_b88e_value[] =
1676 1.5 skrll { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1677 1.1 sevan 0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1678 1.1 sevan 0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1679 1.1 sevan 0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1680 1.1 sevan 0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1681 1.1 sevan 0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1682 1.1 sevan
1683 1.17 jakllsch for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1684 1.17 jakllsch rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1685 1.17 jakllsch rtl8125_mac_cfg3_ephy[i].val);
1686 1.17 jakllsch
1687 1.17 jakllsch val = rge_read_ephy(sc, 0x002a) & ~0x7000;
1688 1.17 jakllsch rge_write_ephy(sc, 0x002a, val | 0x3000);
1689 1.17 jakllsch RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
1690 1.17 jakllsch RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
1691 1.17 jakllsch RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
1692 1.17 jakllsch rge_write_ephy(sc, 0x0002, 0x6042);
1693 1.17 jakllsch rge_write_ephy(sc, 0x0006, 0x0014);
1694 1.17 jakllsch val = rge_read_ephy(sc, 0x006a) & ~0x7000;
1695 1.17 jakllsch rge_write_ephy(sc, 0x006a, val | 0x3000);
1696 1.17 jakllsch RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
1697 1.17 jakllsch RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
1698 1.17 jakllsch RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
1699 1.17 jakllsch rge_write_ephy(sc, 0x0042, 0x6042);
1700 1.17 jakllsch rge_write_ephy(sc, 0x0046, 0x0014);
1701 1.17 jakllsch
1702 1.17 jakllsch rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
1703 1.17 jakllsch
1704 1.17 jakllsch RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1705 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1706 1.17 jakllsch rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1707 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1708 1.17 jakllsch rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1709 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1710 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1711 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1712 1.17 jakllsch rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1713 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1714 1.17 jakllsch rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1715 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1716 1.17 jakllsch rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1717 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1718 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1719 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1720 1.17 jakllsch rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1721 1.17 jakllsch rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1722 1.17 jakllsch rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1723 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1724 1.17 jakllsch rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1725 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1726 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1727 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
1728 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87c, 0x8159);
1729 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1730 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
1731 1.17 jakllsch RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1732 1.17 jakllsch 32);
1733 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1734 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1735 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1736 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1737 1.17 jakllsch
1738 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1739 1.17 jakllsch for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1740 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1741 1.17 jakllsch for (i = 0; i < 26; i++)
1742 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0);
1743 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x8257);
1744 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0x020f);
1745 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1746 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0x7843);
1747 1.17 jakllsch
1748 1.17 jakllsch rge_patch_phy_mcu(sc, 1);
1749 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1750 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1751 1.17 jakllsch for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1752 1.17 jakllsch rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1753 1.17 jakllsch rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
1754 1.17 jakllsch }
1755 1.17 jakllsch RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1756 1.17 jakllsch rge_patch_phy_mcu(sc, 0);
1757 1.17 jakllsch
1758 1.17 jakllsch RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1759 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1760 1.17 jakllsch RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1761 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1762 1.17 jakllsch rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1763 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1764 1.17 jakllsch RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1765 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1766 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1767 1.17 jakllsch RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1768 1.17 jakllsch }
1769 1.17 jakllsch
1770 1.17 jakllsch void
1771 1.17 jakllsch rge_phy_config_mac_cfg4(struct rge_softc *sc)
1772 1.17 jakllsch {
1773 1.17 jakllsch struct ifnet *ifp = &sc->sc_ec.ec_if;
1774 1.17 jakllsch uint16_t val;
1775 1.17 jakllsch int i;
1776 1.17 jakllsch static const uint16_t mac_cfg4_b87c_value[] =
1777 1.17 jakllsch { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
1778 1.17 jakllsch 0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
1779 1.17 jakllsch 0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
1780 1.17 jakllsch 0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
1781 1.17 jakllsch 0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
1782 1.17 jakllsch 0x80b0, 0x0f31 };
1783 1.17 jakllsch
1784 1.17 jakllsch for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
1785 1.17 jakllsch rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
1786 1.17 jakllsch rtl8125_mac_cfg4_ephy[i].val);
1787 1.17 jakllsch
1788 1.17 jakllsch rge_write_phy_ocp(sc, 0xbf86, 0x9000);
1789 1.17 jakllsch RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
1790 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
1791 1.17 jakllsch rge_write_phy_ocp(sc, 0xbd86, 0x1010);
1792 1.17 jakllsch rge_write_phy_ocp(sc, 0xbd88, 0x1010);
1793 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
1794 1.17 jakllsch rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
1795 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1796 1.17 jakllsch rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
1797 1.17 jakllsch
1798 1.17 jakllsch rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
1799 1.17 jakllsch
1800 1.17 jakllsch RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1801 1.17 jakllsch RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
1802 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x8fff);
1803 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1804 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
1805 1.17 jakllsch for (i = 0; i < 6; i++) {
1806 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
1807 1.17 jakllsch if (i < 3)
1808 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
1809 1.17 jakllsch else
1810 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87e, 0x147d);
1811 1.17 jakllsch }
1812 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
1813 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87e, 0x0907);
1814 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
1815 1.17 jakllsch rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
1816 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
1817 1.17 jakllsch rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
1818 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
1819 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1820 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
1821 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1822 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
1823 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87e, 0x6077);
1824 1.17 jakllsch rge_write_phy_ocp(sc, 0xb506, 0x01e7);
1825 1.17 jakllsch rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
1826 1.17 jakllsch rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
1827 1.17 jakllsch rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
1828 1.17 jakllsch rge_write_phy_ocp(sc, 0xac3c, 0x9240);
1829 1.17 jakllsch rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
1830 1.17 jakllsch rge_write_phy_ocp(sc, 0xacc6, 0x0707);
1831 1.17 jakllsch rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
1832 1.17 jakllsch rge_write_phy_ocp(sc, 0xad08, 0x0007);
1833 1.17 jakllsch for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
1834 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
1835 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
1836 1.17 jakllsch }
1837 1.17 jakllsch RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
1838 1.17 jakllsch RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
1839 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87c, 0x8141);
1840 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87e, 0x320e);
1841 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87c, 0x8153);
1842 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87e, 0x720e);
1843 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
1844 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87c, 0x8529);
1845 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87e, 0x050e);
1846 1.17 jakllsch RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1847 1.17 jakllsch 32);
1848 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x816c);
1849 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1850 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x8170);
1851 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1852 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x8174);
1853 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1854 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x8178);
1855 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1856 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x817c);
1857 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0x0719);
1858 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
1859 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0x0400);
1860 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
1861 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0x0404);
1862 1.17 jakllsch rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
1863 1.17 jakllsch for (i = 0; i < 6; i++) {
1864 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
1865 1.17 jakllsch if (i == 2)
1866 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
1867 1.17 jakllsch else
1868 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
1869 1.17 jakllsch }
1870 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87c, 0x8145);
1871 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87e, 0x370e);
1872 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1873 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87e, 0x770e);
1874 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87c, 0x8169);
1875 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
1876 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87c, 0x817b);
1877 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
1878 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x8217);
1879 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1880 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1881 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x821a);
1882 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1883 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1884 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80da);
1885 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0x0403);
1886 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1887 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1888 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1889 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80b3);
1890 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0x0384);
1891 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80b7);
1892 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0x2007);
1893 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80ba);
1894 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1895 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1896 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80b5);
1897 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0xf009);
1898 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80bd);
1899 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1900 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
1901 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80c7);
1902 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0xf083);
1903 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80dd);
1904 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0x03f0);
1905 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80df);
1906 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1907 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1908 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80cb);
1909 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0x2007);
1910 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80ce);
1911 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1912 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1913 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80c9);
1914 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0x8009);
1915 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80d1);
1916 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1917 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
1918 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80a3);
1919 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0x200a);
1920 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80a5);
1921 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
1922 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x809f);
1923 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0x6073);
1924 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80a1);
1925 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0x000b);
1926 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x80a9);
1927 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1928 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
1929 1.17 jakllsch rge_patch_phy_mcu(sc, 1);
1930 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1931 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1932 1.17 jakllsch rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
1933 1.17 jakllsch rge_write_phy_ocp(sc, 0xb890, 0x0000);
1934 1.17 jakllsch rge_write_phy_ocp(sc, 0xb88e, 0xc240);
1935 1.17 jakllsch rge_write_phy_ocp(sc, 0xb890, 0x0103);
1936 1.17 jakllsch rge_write_phy_ocp(sc, 0xb88e, 0xc242);
1937 1.17 jakllsch rge_write_phy_ocp(sc, 0xb890, 0x0507);
1938 1.17 jakllsch rge_write_phy_ocp(sc, 0xb88e, 0xc244);
1939 1.17 jakllsch rge_write_phy_ocp(sc, 0xb890, 0x090b);
1940 1.17 jakllsch rge_write_phy_ocp(sc, 0xb88e, 0xc246);
1941 1.17 jakllsch rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
1942 1.17 jakllsch rge_write_phy_ocp(sc, 0xb88e, 0xc248);
1943 1.17 jakllsch rge_write_phy_ocp(sc, 0xb890, 0x1012);
1944 1.17 jakllsch rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
1945 1.17 jakllsch rge_write_phy_ocp(sc, 0xb890, 0x1416);
1946 1.17 jakllsch RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1947 1.17 jakllsch rge_patch_phy_mcu(sc, 0);
1948 1.17 jakllsch RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
1949 1.17 jakllsch RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
1950 1.17 jakllsch rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
1951 1.17 jakllsch rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
1952 1.17 jakllsch rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
1953 1.17 jakllsch rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
1954 1.17 jakllsch rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
1955 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
1956 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x817d);
1957 1.17 jakllsch RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
1958 1.17 jakllsch }
1959 1.17 jakllsch
1960 1.17 jakllsch void
1961 1.17 jakllsch rge_phy_config_mac_cfg5(struct rge_softc *sc)
1962 1.17 jakllsch {
1963 1.17 jakllsch struct ifnet *ifp = &sc->sc_ec.ec_if;
1964 1.17 jakllsch uint16_t val;
1965 1.17 jakllsch int i;
1966 1.1 sevan
1967 1.17 jakllsch for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
1968 1.17 jakllsch rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
1969 1.17 jakllsch rtl8125_mac_cfg5_ephy[i].val);
1970 1.17 jakllsch
1971 1.17 jakllsch val = rge_read_ephy(sc, 0x0022) & ~0x0030;
1972 1.17 jakllsch rge_write_ephy(sc, 0x0022, val | 0x0020);
1973 1.17 jakllsch val = rge_read_ephy(sc, 0x0062) & ~0x0030;
1974 1.17 jakllsch rge_write_ephy(sc, 0x0062, val | 0x0020);
1975 1.17 jakllsch
1976 1.17 jakllsch rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
1977 1.17 jakllsch
1978 1.17 jakllsch RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1979 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
1980 1.17 jakllsch rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
1981 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
1982 1.17 jakllsch rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
1983 1.17 jakllsch RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1984 1.17 jakllsch 32);
1985 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
1986 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87e, 0x760e);
1987 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87c, 0x8107);
1988 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87e, 0x360e);
1989 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87c, 0x8551);
1990 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1991 1.17 jakllsch rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
1992 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
1993 1.17 jakllsch rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
1994 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1995 1.17 jakllsch rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
1996 1.17 jakllsch for (i = 0; i < 10; i++) {
1997 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
1998 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0x2417);
1999 1.17 jakllsch }
2000 1.17 jakllsch RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
2001 1.17 jakllsch val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
2002 1.17 jakllsch rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
2003 1.17 jakllsch }
2004 1.1 sevan
2005 1.17 jakllsch void
2006 1.17 jakllsch rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
2007 1.17 jakllsch {
2008 1.17 jakllsch if (sc->rge_mcodever != mcode_version) {
2009 1.17 jakllsch int i;
2010 1.1 sevan
2011 1.17 jakllsch rge_patch_phy_mcu(sc, 1);
2012 1.1 sevan
2013 1.17 jakllsch if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
2014 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8024);
2015 1.17 jakllsch if (sc->rge_type == MAC_CFG2)
2016 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0x8600);
2017 1.17 jakllsch else
2018 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, 0x8601);
2019 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0xb82e);
2020 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0x0001);
2021 1.1 sevan
2022 1.1 sevan RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
2023 1.17 jakllsch }
2024 1.17 jakllsch
2025 1.17 jakllsch if (sc->rge_type == MAC_CFG2) {
2026 1.1 sevan for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
2027 1.1 sevan rge_write_phy_ocp(sc,
2028 1.1 sevan rtl8125_mac_cfg2_mcu[i].reg,
2029 1.1 sevan rtl8125_mac_cfg2_mcu[i].val);
2030 1.1 sevan }
2031 1.17 jakllsch } else if (sc->rge_type == MAC_CFG3) {
2032 1.1 sevan for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
2033 1.1 sevan rge_write_phy_ocp(sc,
2034 1.1 sevan rtl8125_mac_cfg3_mcu[i].reg,
2035 1.1 sevan rtl8125_mac_cfg3_mcu[i].val);
2036 1.1 sevan }
2037 1.17 jakllsch } else if (sc->rge_type == MAC_CFG4) {
2038 1.17 jakllsch for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
2039 1.17 jakllsch rge_write_phy_ocp(sc,
2040 1.17 jakllsch rtl8125_mac_cfg4_mcu[i].reg,
2041 1.17 jakllsch rtl8125_mac_cfg4_mcu[i].val);
2042 1.17 jakllsch }
2043 1.17 jakllsch } else if (sc->rge_type == MAC_CFG5) {
2044 1.17 jakllsch for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
2045 1.17 jakllsch rge_write_phy_ocp(sc,
2046 1.17 jakllsch rtl8125_mac_cfg5_mcu[i].reg,
2047 1.17 jakllsch rtl8125_mac_cfg5_mcu[i].val);
2048 1.17 jakllsch }
2049 1.17 jakllsch }
2050 1.17 jakllsch
2051 1.17 jakllsch if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
2052 1.1 sevan RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
2053 1.1 sevan
2054 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0);
2055 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0);
2056 1.1 sevan RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
2057 1.1 sevan rge_write_phy_ocp(sc, 0xa436, 0x8024);
2058 1.1 sevan rge_write_phy_ocp(sc, 0xa438, 0);
2059 1.17 jakllsch }
2060 1.1 sevan
2061 1.1 sevan rge_patch_phy_mcu(sc, 0);
2062 1.1 sevan
2063 1.17 jakllsch /* Write microcode version. */
2064 1.17 jakllsch rge_write_phy_ocp(sc, 0xa436, 0x801e);
2065 1.17 jakllsch rge_write_phy_ocp(sc, 0xa438, mcode_version);
2066 1.1 sevan }
2067 1.1 sevan }
2068 1.1 sevan
2069 1.1 sevan void
2070 1.1 sevan rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
2071 1.1 sevan {
2072 1.1 sevan RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2073 1.1 sevan RGE_WRITE_4(sc, RGE_MAC0,
2074 1.1 sevan addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2075 1.1 sevan RGE_WRITE_4(sc, RGE_MAC4,
2076 1.1 sevan addr[5] << 8 | addr[4]);
2077 1.1 sevan RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2078 1.1 sevan }
2079 1.1 sevan
2080 1.1 sevan void
2081 1.1 sevan rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
2082 1.1 sevan {
2083 1.1 sevan *(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
2084 1.1 sevan *(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
2085 1.1 sevan }
2086 1.1 sevan
2087 1.1 sevan void
2088 1.1 sevan rge_hw_init(struct rge_softc *sc)
2089 1.1 sevan {
2090 1.1 sevan int i;
2091 1.1 sevan
2092 1.1 sevan RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2093 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
2094 1.1 sevan RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
2095 1.1 sevan RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2096 1.1 sevan RGE_CLRBIT_1(sc, 0xf1, 0x80);
2097 1.1 sevan
2098 1.1 sevan /* Disable UPS. */
2099 1.1 sevan RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
2100 1.1 sevan
2101 1.1 sevan /* Configure MAC MCU. */
2102 1.1 sevan rge_write_mac_ocp(sc, 0xfc38, 0);
2103 1.1 sevan
2104 1.1 sevan for (i = 0xfc28; i < 0xfc38; i += 2)
2105 1.1 sevan rge_write_mac_ocp(sc, i, 0);
2106 1.1 sevan
2107 1.1 sevan DELAY(3000);
2108 1.1 sevan rge_write_mac_ocp(sc, 0xfc26, 0);
2109 1.1 sevan
2110 1.1 sevan if (sc->rge_type == MAC_CFG3) {
2111 1.17 jakllsch for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
2112 1.17 jakllsch rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
2113 1.17 jakllsch rtl8125_mac_bps[i].val);
2114 1.17 jakllsch }
2115 1.17 jakllsch } else if (sc->rge_type == MAC_CFG5) {
2116 1.17 jakllsch for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
2117 1.17 jakllsch rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
2118 1.17 jakllsch rtl8125b_mac_bps[i].val);
2119 1.17 jakllsch }
2120 1.1 sevan }
2121 1.1 sevan
2122 1.1 sevan /* Disable PHY power saving. */
2123 1.1 sevan rge_disable_phy_ocp_pwrsave(sc);
2124 1.1 sevan
2125 1.1 sevan /* Set PCIe uncorrectable error status. */
2126 1.1 sevan rge_write_csi(sc, 0x108,
2127 1.1 sevan rge_read_csi(sc, 0x108) | 0x00100000);
2128 1.1 sevan }
2129 1.1 sevan
2130 1.1 sevan void
2131 1.1 sevan rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
2132 1.1 sevan {
2133 1.1 sevan if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
2134 1.1 sevan rge_patch_phy_mcu(sc, 1);
2135 1.1 sevan rge_write_phy_ocp(sc, 0xc416, 0);
2136 1.1 sevan rge_write_phy_ocp(sc, 0xc416, 0x0500);
2137 1.1 sevan rge_patch_phy_mcu(sc, 0);
2138 1.1 sevan }
2139 1.1 sevan }
2140 1.1 sevan
2141 1.1 sevan void
2142 1.1 sevan rge_patch_phy_mcu(struct rge_softc *sc, int set)
2143 1.1 sevan {
2144 1.1 sevan int i;
2145 1.1 sevan
2146 1.1 sevan if (set)
2147 1.1 sevan RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
2148 1.1 sevan else
2149 1.1 sevan RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
2150 1.1 sevan
2151 1.1 sevan for (i = 0; i < 1000; i++) {
2152 1.17 jakllsch if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
2153 1.17 jakllsch break;
2154 1.1 sevan DELAY(100);
2155 1.1 sevan }
2156 1.17 jakllsch if (i == 1000) {
2157 1.17 jakllsch DPRINTF(("timeout waiting to patch phy mcu\n"));
2158 1.17 jakllsch return;
2159 1.17 jakllsch }
2160 1.1 sevan }
2161 1.1 sevan
2162 1.1 sevan void
2163 1.1 sevan rge_add_media_types(struct rge_softc *sc)
2164 1.1 sevan {
2165 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
2166 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
2167 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
2168 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
2169 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
2170 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2171 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
2172 1.1 sevan ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
2173 1.1 sevan }
2174 1.1 sevan
2175 1.1 sevan void
2176 1.1 sevan rge_config_imtype(struct rge_softc *sc, int imtype)
2177 1.1 sevan {
2178 1.1 sevan switch (imtype) {
2179 1.1 sevan case RGE_IMTYPE_NONE:
2180 1.1 sevan sc->rge_intrs = RGE_INTRS;
2181 1.1 sevan sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
2182 1.1 sevan RGE_ISR_RX_FIFO_OFLOW;
2183 1.1 sevan sc->rge_tx_ack = RGE_ISR_TX_OK;
2184 1.1 sevan break;
2185 1.1 sevan case RGE_IMTYPE_SIM:
2186 1.1 sevan sc->rge_intrs = RGE_INTRS_TIMER;
2187 1.1 sevan sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
2188 1.1 sevan sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
2189 1.1 sevan break;
2190 1.1 sevan default:
2191 1.14 sevan panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
2192 1.1 sevan }
2193 1.1 sevan }
2194 1.1 sevan
2195 1.1 sevan void
2196 1.17 jakllsch rge_disable_hw_im(struct rge_softc *sc)
2197 1.17 jakllsch {
2198 1.17 jakllsch RGE_WRITE_2(sc, RGE_IM, 0);
2199 1.17 jakllsch }
2200 1.17 jakllsch
2201 1.17 jakllsch void
2202 1.1 sevan rge_disable_sim_im(struct rge_softc *sc)
2203 1.1 sevan {
2204 1.17 jakllsch RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
2205 1.1 sevan sc->rge_timerintr = 0;
2206 1.1 sevan }
2207 1.1 sevan
2208 1.1 sevan void
2209 1.1 sevan rge_setup_sim_im(struct rge_softc *sc)
2210 1.1 sevan {
2211 1.17 jakllsch RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
2212 1.1 sevan RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
2213 1.1 sevan sc->rge_timerintr = 1;
2214 1.1 sevan }
2215 1.1 sevan
2216 1.1 sevan void
2217 1.1 sevan rge_setup_intr(struct rge_softc *sc, int imtype)
2218 1.1 sevan {
2219 1.1 sevan rge_config_imtype(sc, imtype);
2220 1.1 sevan
2221 1.1 sevan /* Enable interrupts. */
2222 1.1 sevan RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
2223 1.1 sevan
2224 1.1 sevan switch (imtype) {
2225 1.1 sevan case RGE_IMTYPE_NONE:
2226 1.1 sevan rge_disable_sim_im(sc);
2227 1.17 jakllsch rge_disable_hw_im(sc);
2228 1.1 sevan break;
2229 1.1 sevan case RGE_IMTYPE_SIM:
2230 1.17 jakllsch rge_disable_hw_im(sc);
2231 1.1 sevan rge_setup_sim_im(sc);
2232 1.1 sevan break;
2233 1.1 sevan default:
2234 1.14 sevan panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
2235 1.1 sevan }
2236 1.1 sevan }
2237 1.1 sevan
2238 1.1 sevan void
2239 1.1 sevan rge_exit_oob(struct rge_softc *sc)
2240 1.1 sevan {
2241 1.1 sevan int i;
2242 1.1 sevan
2243 1.1 sevan RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
2244 1.1 sevan RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
2245 1.1 sevan RGE_RXCFG_ERRPKT);
2246 1.1 sevan
2247 1.1 sevan /* Disable RealWoW. */
2248 1.1 sevan rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
2249 1.1 sevan
2250 1.1 sevan rge_reset(sc);
2251 1.1 sevan
2252 1.1 sevan /* Disable OOB. */
2253 1.1 sevan RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
2254 1.1 sevan
2255 1.1 sevan RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
2256 1.1 sevan
2257 1.1 sevan for (i = 0; i < 10; i++) {
2258 1.1 sevan DELAY(100);
2259 1.1 sevan if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2260 1.1 sevan break;
2261 1.1 sevan }
2262 1.1 sevan
2263 1.1 sevan rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
2264 1.17 jakllsch rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
2265 1.1 sevan rge_write_mac_ocp(sc, 0xc01e, 0x5555);
2266 1.1 sevan
2267 1.1 sevan for (i = 0; i < 10; i++) {
2268 1.1 sevan DELAY(100);
2269 1.1 sevan if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2270 1.1 sevan break;
2271 1.1 sevan }
2272 1.1 sevan
2273 1.1 sevan if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
2274 1.17 jakllsch printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
2275 1.17 jakllsch device_xname(sc->sc_dev));
2276 1.1 sevan for (i = 0; i < RGE_TIMEOUT; i++) {
2277 1.10 sevan if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
2278 1.1 sevan break;
2279 1.1 sevan DELAY(1000);
2280 1.1 sevan }
2281 1.1 sevan RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
2282 1.17 jakllsch if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
2283 1.17 jakllsch RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
2284 1.1 sevan RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
2285 1.1 sevan }
2286 1.1 sevan }
2287 1.1 sevan
2288 1.1 sevan void
2289 1.1 sevan rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
2290 1.1 sevan {
2291 1.1 sevan int i;
2292 1.1 sevan
2293 1.1 sevan RGE_WRITE_4(sc, RGE_CSIDR, val);
2294 1.17 jakllsch RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2295 1.1 sevan (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
2296 1.1 sevan
2297 1.1 sevan for (i = 0; i < 10; i++) {
2298 1.1 sevan DELAY(100);
2299 1.1 sevan if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
2300 1.1 sevan break;
2301 1.1 sevan }
2302 1.1 sevan
2303 1.1 sevan DELAY(20);
2304 1.1 sevan }
2305 1.1 sevan
2306 1.1 sevan uint32_t
2307 1.1 sevan rge_read_csi(struct rge_softc *sc, uint32_t reg)
2308 1.1 sevan {
2309 1.1 sevan int i;
2310 1.1 sevan
2311 1.17 jakllsch RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2312 1.1 sevan (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
2313 1.1 sevan
2314 1.1 sevan for (i = 0; i < 10; i++) {
2315 1.1 sevan DELAY(100);
2316 1.1 sevan if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
2317 1.1 sevan break;
2318 1.1 sevan }
2319 1.1 sevan
2320 1.1 sevan DELAY(20);
2321 1.1 sevan
2322 1.1 sevan return (RGE_READ_4(sc, RGE_CSIDR));
2323 1.1 sevan }
2324 1.1 sevan
2325 1.1 sevan void
2326 1.1 sevan rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2327 1.1 sevan {
2328 1.1 sevan uint32_t tmp;
2329 1.1 sevan
2330 1.1 sevan tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2331 1.1 sevan tmp += val;
2332 1.1 sevan tmp |= RGE_MACOCP_BUSY;
2333 1.1 sevan RGE_WRITE_4(sc, RGE_MACOCP, tmp);
2334 1.1 sevan }
2335 1.1 sevan
2336 1.1 sevan uint16_t
2337 1.1 sevan rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
2338 1.1 sevan {
2339 1.1 sevan uint32_t val;
2340 1.1 sevan
2341 1.1 sevan val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2342 1.1 sevan RGE_WRITE_4(sc, RGE_MACOCP, val);
2343 1.1 sevan
2344 1.1 sevan return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
2345 1.1 sevan }
2346 1.1 sevan
2347 1.1 sevan void
2348 1.1 sevan rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
2349 1.1 sevan {
2350 1.1 sevan uint32_t tmp;
2351 1.1 sevan int i;
2352 1.1 sevan
2353 1.1 sevan tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2354 1.1 sevan tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
2355 1.1 sevan RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
2356 1.1 sevan
2357 1.1 sevan for (i = 0; i < 10; i++) {
2358 1.1 sevan DELAY(100);
2359 1.1 sevan if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
2360 1.1 sevan break;
2361 1.1 sevan }
2362 1.1 sevan
2363 1.1 sevan DELAY(20);
2364 1.1 sevan }
2365 1.1 sevan
2366 1.17 jakllsch uint16_t
2367 1.17 jakllsch rge_read_ephy(struct rge_softc *sc, uint16_t reg)
2368 1.17 jakllsch {
2369 1.17 jakllsch uint32_t val;
2370 1.17 jakllsch int i;
2371 1.17 jakllsch
2372 1.17 jakllsch val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2373 1.17 jakllsch RGE_WRITE_4(sc, RGE_EPHYAR, val);
2374 1.17 jakllsch
2375 1.17 jakllsch for (i = 0; i < 10; i++) {
2376 1.17 jakllsch DELAY(100);
2377 1.17 jakllsch val = RGE_READ_4(sc, RGE_EPHYAR);
2378 1.17 jakllsch if (val & RGE_EPHYAR_BUSY)
2379 1.17 jakllsch break;
2380 1.17 jakllsch }
2381 1.17 jakllsch
2382 1.17 jakllsch DELAY(20);
2383 1.17 jakllsch
2384 1.17 jakllsch return (val & RGE_EPHYAR_DATA_MASK);
2385 1.17 jakllsch }
2386 1.17 jakllsch
2387 1.1 sevan void
2388 1.1 sevan rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
2389 1.1 sevan {
2390 1.1 sevan uint16_t off, phyaddr;
2391 1.1 sevan
2392 1.1 sevan phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2393 1.1 sevan phyaddr <<= 4;
2394 1.1 sevan
2395 1.1 sevan off = addr ? reg : 0x10 + (reg % 8);
2396 1.1 sevan
2397 1.1 sevan phyaddr += (off - 16) << 1;
2398 1.1 sevan
2399 1.1 sevan rge_write_phy_ocp(sc, phyaddr, val);
2400 1.1 sevan }
2401 1.1 sevan
2402 1.17 jakllsch uint16_t
2403 1.17 jakllsch rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
2404 1.17 jakllsch {
2405 1.17 jakllsch uint16_t off, phyaddr;
2406 1.17 jakllsch
2407 1.17 jakllsch phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2408 1.17 jakllsch phyaddr <<= 4;
2409 1.17 jakllsch
2410 1.17 jakllsch off = addr ? reg : 0x10 + (reg % 8);
2411 1.17 jakllsch
2412 1.17 jakllsch phyaddr += (off - 16) << 1;
2413 1.17 jakllsch
2414 1.17 jakllsch return (rge_read_phy_ocp(sc, phyaddr));
2415 1.17 jakllsch }
2416 1.17 jakllsch
2417 1.1 sevan void
2418 1.1 sevan rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2419 1.1 sevan {
2420 1.1 sevan uint32_t tmp;
2421 1.1 sevan int i;
2422 1.1 sevan
2423 1.1 sevan tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2424 1.1 sevan tmp |= RGE_PHYOCP_BUSY | val;
2425 1.1 sevan RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
2426 1.1 sevan
2427 1.1 sevan for (i = 0; i < RGE_TIMEOUT; i++) {
2428 1.1 sevan DELAY(1);
2429 1.1 sevan if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
2430 1.1 sevan break;
2431 1.1 sevan }
2432 1.1 sevan }
2433 1.1 sevan
2434 1.1 sevan uint16_t
2435 1.1 sevan rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
2436 1.1 sevan {
2437 1.1 sevan uint32_t val;
2438 1.1 sevan int i;
2439 1.1 sevan
2440 1.1 sevan val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2441 1.1 sevan RGE_WRITE_4(sc, RGE_PHYOCP, val);
2442 1.1 sevan
2443 1.1 sevan for (i = 0; i < RGE_TIMEOUT; i++) {
2444 1.1 sevan DELAY(1);
2445 1.1 sevan val = RGE_READ_4(sc, RGE_PHYOCP);
2446 1.1 sevan if (val & RGE_PHYOCP_BUSY)
2447 1.1 sevan break;
2448 1.1 sevan }
2449 1.1 sevan
2450 1.1 sevan return (val & RGE_PHYOCP_DATA_MASK);
2451 1.1 sevan }
2452 1.1 sevan
2453 1.1 sevan int
2454 1.1 sevan rge_get_link_status(struct rge_softc *sc)
2455 1.1 sevan {
2456 1.1 sevan return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2457 1.1 sevan }
2458 1.1 sevan
2459 1.1 sevan void
2460 1.3 sevan rge_txstart(struct work *wk, void *arg)
2461 1.1 sevan {
2462 1.1 sevan struct rge_softc *sc = arg;
2463 1.1 sevan
2464 1.1 sevan RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2465 1.1 sevan }
2466 1.1 sevan
2467 1.1 sevan void
2468 1.1 sevan rge_tick(void *arg)
2469 1.1 sevan {
2470 1.1 sevan struct rge_softc *sc = arg;
2471 1.1 sevan int s;
2472 1.1 sevan
2473 1.1 sevan s = splnet();
2474 1.1 sevan rge_link_state(sc);
2475 1.1 sevan splx(s);
2476 1.1 sevan
2477 1.17 jakllsch callout_schedule(&sc->sc_timeout, hz);
2478 1.1 sevan }
2479 1.1 sevan
2480 1.1 sevan void
2481 1.1 sevan rge_link_state(struct rge_softc *sc)
2482 1.1 sevan {
2483 1.2 sevan struct ifnet *ifp = &sc->sc_ec.ec_if;
2484 1.1 sevan int link = LINK_STATE_DOWN;
2485 1.1 sevan
2486 1.1 sevan if (rge_get_link_status(sc))
2487 1.1 sevan link = LINK_STATE_UP;
2488 1.1 sevan
2489 1.17 jakllsch if (ifp->if_link_state != link) { /* XXX not safe to access */
2490 1.17 jakllsch if_link_state_change(ifp, link);
2491 1.1 sevan }
2492 1.1 sevan }
2493