if_rge.c revision 1.30 1 /* $NetBSD: if_rge.c,v 1.30 2023/12/21 08:50:22 skrll Exp $ */
2 /* $OpenBSD: if_rge.c,v 1.9 2020/12/12 11:48:53 jan Exp $ */
3
4 /*
5 * Copyright (c) 2019, 2020 Kevin Lo <kevlo (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.30 2023/12/21 08:50:22 skrll Exp $");
22
23 #if defined(_KERNEL_OPT)
24 #include "opt_net_mpsafe.h"
25 #endif
26
27 #include <sys/types.h>
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/sockio.h>
32 #include <sys/mbuf.h>
33 #include <sys/kernel.h>
34 #include <sys/socket.h>
35 #include <sys/device.h>
36 #include <sys/endian.h>
37 #include <sys/callout.h>
38 #include <sys/workqueue.h>
39
40 #include <net/if.h>
41
42 #include <net/if_dl.h>
43 #include <net/if_ether.h>
44
45 #include <net/if_media.h>
46
47 #include <netinet/in.h>
48 #include <net/if_ether.h>
49
50 #include <net/bpf.h>
51
52 #include <sys/bus.h>
53 #include <machine/intr.h>
54
55 #include <dev/mii/mii.h>
56
57 #include <dev/pci/pcivar.h>
58 #include <dev/pci/pcireg.h>
59 #include <dev/pci/pcidevs.h>
60
61 #include <dev/pci/if_rgereg.h>
62
63 #ifdef __NetBSD__
64 #define letoh32 htole32
65 #define nitems(x) __arraycount(x)
66
67 static struct mbuf *
68 MCLGETL(struct rge_softc *sc __unused, int how,
69 u_int size)
70 {
71 struct mbuf *m;
72
73 MGETHDR(m, how, MT_DATA);
74 if (m == NULL)
75 return NULL;
76
77 MEXTMALLOC(m, size, how);
78 if ((m->m_flags & M_EXT) == 0) {
79 m_freem(m);
80 return NULL;
81 }
82 return m;
83 }
84
85 #ifdef NET_MPSAFE
86 #define RGE_MPSAFE 1
87 #define CALLOUT_FLAGS CALLOUT_MPSAFE
88 #else
89 #define CALLOUT_FLAGS 0
90 #endif
91 #endif
92
93 #ifdef RGE_DEBUG
94 #define DPRINTF(x) do { if (rge_debug > 0) printf x; } while (0)
95 int rge_debug = 0;
96 #else
97 #define DPRINTF(x)
98 #endif
99
100 static int rge_match(device_t, cfdata_t, void *);
101 static void rge_attach(device_t, device_t, void *);
102 int rge_intr(void *);
103 int rge_encap(struct rge_softc *, struct mbuf *, int);
104 int rge_ioctl(struct ifnet *, u_long, void *);
105 void rge_start(struct ifnet *);
106 void rge_watchdog(struct ifnet *);
107 int rge_init(struct ifnet *);
108 void rge_stop(struct ifnet *, int);
109 int rge_ifmedia_upd(struct ifnet *);
110 void rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
111 int rge_allocmem(struct rge_softc *);
112 int rge_newbuf(struct rge_softc *, int);
113 static int rge_rx_list_init(struct rge_softc *);
114 static void rge_rx_list_fini(struct rge_softc *);
115 static void rge_tx_list_init(struct rge_softc *);
116 static void rge_tx_list_fini(struct rge_softc *);
117 int rge_rxeof(struct rge_softc *);
118 int rge_txeof(struct rge_softc *);
119 void rge_reset(struct rge_softc *);
120 void rge_iff(struct rge_softc *);
121 void rge_set_phy_power(struct rge_softc *, int);
122 void rge_phy_config(struct rge_softc *);
123 void rge_phy_config_mac_cfg2(struct rge_softc *);
124 void rge_phy_config_mac_cfg3(struct rge_softc *);
125 void rge_phy_config_mac_cfg4(struct rge_softc *);
126 void rge_phy_config_mac_cfg5(struct rge_softc *);
127 void rge_phy_config_mcu(struct rge_softc *, uint16_t);
128 void rge_set_macaddr(struct rge_softc *, const uint8_t *);
129 void rge_get_macaddr(struct rge_softc *, uint8_t *);
130 void rge_hw_init(struct rge_softc *);
131 void rge_disable_phy_ocp_pwrsave(struct rge_softc *);
132 void rge_patch_phy_mcu(struct rge_softc *, int);
133 void rge_add_media_types(struct rge_softc *);
134 void rge_config_imtype(struct rge_softc *, int);
135 void rge_disable_hw_im(struct rge_softc *);
136 void rge_disable_sim_im(struct rge_softc *);
137 void rge_setup_sim_im(struct rge_softc *);
138 void rge_setup_intr(struct rge_softc *, int);
139 void rge_exit_oob(struct rge_softc *);
140 void rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
141 uint32_t rge_read_csi(struct rge_softc *, uint32_t);
142 void rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
143 uint16_t rge_read_mac_ocp(struct rge_softc *, uint16_t);
144 void rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
145 uint16_t rge_read_ephy(struct rge_softc *, uint16_t);
146 void rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
147 uint16_t rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
148 void rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
149 uint16_t rge_read_phy_ocp(struct rge_softc *, uint16_t);
150 int rge_get_link_status(struct rge_softc *);
151 void rge_txstart(void *);
152 void rge_tick(void *);
153 void rge_link_state(struct rge_softc *);
154
155 static const struct {
156 uint16_t reg;
157 uint16_t val;
158 } rtl8125_mac_cfg2_mcu[] = {
159 RTL8125_MAC_CFG2_MCU
160 }, rtl8125_mac_cfg3_mcu[] = {
161 RTL8125_MAC_CFG3_MCU
162 }, rtl8125_mac_cfg4_mcu[] = {
163 RTL8125_MAC_CFG4_MCU
164 }, rtl8125_mac_cfg5_mcu[] = {
165 RTL8125_MAC_CFG5_MCU
166 };
167
168 CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
169 NULL, NULL); /* Sevan - detach function? */
170
171 static const struct device_compatible_entry compat_data[] = {
172 { .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000) },
173 { .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125) },
174
175 PCI_COMPAT_EOL
176 };
177
178 static int
179 rge_match(device_t parent, cfdata_t match, void *aux)
180 {
181 struct pci_attach_args *pa =aux;
182
183 return pci_compatible_match(pa, compat_data);
184 }
185
186 void
187 rge_attach(device_t parent, device_t self, void *aux)
188 {
189 struct rge_softc *sc = device_private(self);
190 struct pci_attach_args *pa = aux;
191 pci_chipset_tag_t pc = pa->pa_pc;
192 pci_intr_handle_t *ihp;
193 char intrbuf[PCI_INTRSTR_LEN];
194 const char *intrstr = NULL;
195 struct ifnet *ifp;
196 pcireg_t reg;
197 uint32_t hwrev;
198 uint8_t eaddr[ETHER_ADDR_LEN];
199 int offset;
200 pcireg_t command;
201
202 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
203
204 sc->sc_dev = self;
205
206 pci_aprint_devinfo(pa, "Ethernet controller");
207
208 /*
209 * Map control/status registers.
210 */
211 if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
212 PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
213 NULL, &sc->rge_bsize)) {
214 if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
215 PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
216 &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
217 if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
218 0, &sc->rge_btag, &sc->rge_bhandle, NULL,
219 &sc->rge_bsize)) {
220 aprint_error(": can't map mem or i/o space\n");
221 return;
222 }
223 }
224 }
225
226 int counts[PCI_INTR_TYPE_SIZE] = {
227 [PCI_INTR_TYPE_INTX] = 1,
228 [PCI_INTR_TYPE_MSI] = 1,
229 [PCI_INTR_TYPE_MSIX] = 1,
230 };
231 int max_type = PCI_INTR_TYPE_MSIX;
232 /*
233 * Allocate interrupt.
234 */
235 if (pci_intr_alloc(pa, &ihp, counts, max_type) != 0) {
236 aprint_error(": couldn't map interrupt\n");
237 return;
238 }
239 switch (pci_intr_type(pc, ihp[0])) {
240 case PCI_INTR_TYPE_MSIX:
241 case PCI_INTR_TYPE_MSI:
242 sc->rge_flags |= RGE_FLAG_MSI;
243 break;
244 default:
245 break;
246 }
247 intrstr = pci_intr_string(pc, ihp[0], intrbuf, sizeof(intrbuf));
248 sc->sc_ih = pci_intr_establish_xname(pc, ihp[0], IPL_NET, rge_intr,
249 sc, device_xname(sc->sc_dev));
250 if (sc->sc_ih == NULL) {
251 aprint_error_dev(sc->sc_dev, ": couldn't establish interrupt");
252 if (intrstr != NULL)
253 aprint_error(" at %s\n", intrstr);
254 aprint_error("\n");
255 return;
256 }
257 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
258
259 if (pci_dma64_available(pa))
260 sc->sc_dmat = pa->pa_dmat64;
261 else
262 sc->sc_dmat = pa->pa_dmat;
263
264 sc->sc_pc = pa->pa_pc;
265 sc->sc_tag = pa->pa_tag;
266
267 /* Determine hardware revision */
268 hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
269 switch (hwrev) {
270 case 0x60800000:
271 sc->rge_type = MAC_CFG2;
272 break;
273 case 0x60900000:
274 sc->rge_type = MAC_CFG3;
275 break;
276 case 0x64000000:
277 sc->rge_type = MAC_CFG4;
278 break;
279 case 0x64100000:
280 sc->rge_type = MAC_CFG5;
281 break;
282 default:
283 aprint_error(": unknown version 0x%08x\n", hwrev);
284 return;
285 }
286
287 rge_config_imtype(sc, RGE_IMTYPE_SIM);
288
289 /*
290 * PCI Express check.
291 */
292 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
293 &offset, NULL)) {
294 /* Disable PCIe ASPM and ECPM. */
295 reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
296 offset + PCIE_LCSR);
297 reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 |
298 PCIE_LCSR_ENCLKPM);
299 pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
300 reg);
301 }
302
303 rge_exit_oob(sc);
304 rge_hw_init(sc);
305
306 rge_get_macaddr(sc, eaddr);
307 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
308 ether_sprintf(eaddr));
309
310 memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
311
312 rge_set_phy_power(sc, 1);
313 rge_phy_config(sc);
314
315 if (rge_allocmem(sc))
316 return;
317
318 ifp = &sc->sc_ec.ec_if;
319 ifp->if_softc = sc;
320 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
321 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
322 #ifdef RGE_MPSAFE
323 ifp->if_extflags = IFEF_MPSAFE;
324 #endif
325 ifp->if_ioctl = rge_ioctl;
326 ifp->if_stop = rge_stop;
327 ifp->if_start = rge_start;
328 ifp->if_init = rge_init;
329 ifp->if_watchdog = rge_watchdog;
330 IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
331
332 #if notyet
333 ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx |
334 IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
335 IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
336 #endif
337
338 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
339 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
340
341 callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
342 callout_setfunc(&sc->sc_timeout, rge_tick, sc);
343
344 command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
345 command |= PCI_COMMAND_MASTER_ENABLE;
346 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
347
348 /* Initialize ifmedia structures. */
349 sc->sc_ec.ec_ifmedia = &sc->sc_media;
350 ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
351 rge_ifmedia_sts);
352 rge_add_media_types(sc);
353 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
354 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
355 sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
356
357 if_attach(ifp);
358 if_deferred_start_init(ifp, NULL);
359 ether_ifattach(ifp, eaddr);
360
361 if (pmf_device_register(self, NULL, NULL))
362 pmf_class_network_register(self, ifp);
363 else
364 aprint_error_dev(self, "couldn't establish power handler\n");
365 }
366
367 int
368 rge_intr(void *arg)
369 {
370 struct rge_softc *sc = arg;
371 struct ifnet *ifp = &sc->sc_ec.ec_if;
372 uint32_t status;
373 int claimed = 0, rx, tx;
374
375 if (!(ifp->if_flags & IFF_RUNNING))
376 return (0);
377
378 /* Disable interrupts. */
379 RGE_WRITE_4(sc, RGE_IMR, 0);
380
381 if (!(sc->rge_flags & RGE_FLAG_MSI)) {
382 if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
383 return (0);
384 }
385
386 status = RGE_READ_4(sc, RGE_ISR);
387 if (status)
388 RGE_WRITE_4(sc, RGE_ISR, status);
389
390 if (status & RGE_ISR_PCS_TIMEOUT)
391 claimed = 1;
392
393 rx = tx = 0;
394 if (status & sc->rge_intrs) {
395 if (status &
396 (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
397 rx |= rge_rxeof(sc);
398 claimed = 1;
399 }
400
401 if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
402 tx |= rge_txeof(sc);
403 claimed = 1;
404 }
405
406 if (status & RGE_ISR_SYSTEM_ERR) {
407 KERNEL_LOCK(1, NULL);
408 rge_init(ifp);
409 KERNEL_UNLOCK_ONE(NULL);
410 claimed = 1;
411 }
412 }
413
414 if (sc->rge_timerintr) {
415 if ((tx | rx) == 0) {
416 /*
417 * Nothing needs to be processed, fallback
418 * to use TX/RX interrupts.
419 */
420 rge_setup_intr(sc, RGE_IMTYPE_NONE);
421
422 /*
423 * Recollect, mainly to avoid the possible
424 * race introduced by changing interrupt
425 * masks.
426 */
427 rge_rxeof(sc);
428 rge_txeof(sc);
429 } else
430 RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
431 } else if (tx | rx) {
432 /*
433 * Assume that using simulated interrupt moderation
434 * (hardware timer based) could reduce the interrupt
435 * rate.
436 */
437 rge_setup_intr(sc, RGE_IMTYPE_SIM);
438 }
439
440 RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
441
442 return (claimed);
443 }
444
445 int
446 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
447 {
448 struct rge_tx_desc *d = NULL;
449 struct rge_txq *txq;
450 bus_dmamap_t txmap;
451 uint32_t cmdsts, cflags = 0;
452 int cur, error, i, last, nsegs;
453
454 #if notyet
455 /*
456 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
457 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
458 * take affect.
459 */
460 if ((m->m_pkthdr.csum_flags &
461 (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
462 cflags |= RGE_TDEXTSTS_IPCSUM;
463 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
464 cflags |= RGE_TDEXTSTS_TCPCSUM;
465 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
466 cflags |= RGE_TDEXTSTS_UDPCSUM;
467 }
468 #endif
469
470 txq = &sc->rge_ldata.rge_txq[idx];
471 txmap = txq->txq_dmamap;
472
473 error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
474 switch (error) {
475 case 0:
476 break;
477 case EFBIG: /* mbuf chain is too fragmented */
478 if (m_defrag(m, M_DONTWAIT) == 0 &&
479 bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
480 BUS_DMA_NOWAIT) == 0)
481 break;
482
483 /* FALLTHROUGH */
484 default:
485 return (0);
486 }
487
488 bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
489 BUS_DMASYNC_PREWRITE);
490
491 nsegs = txmap->dm_nsegs;
492
493 /* Set up hardware VLAN tagging. */
494 if (vlan_has_tag(m))
495 cflags |= bswap16(vlan_get_tag(m)) | RGE_TDEXTSTS_VTAG;
496
497 last = cur = idx;
498 cmdsts = RGE_TDCMDSTS_SOF;
499
500 for (i = 0; i < txmap->dm_nsegs; i++) {
501 d = &sc->rge_ldata.rge_tx_list[cur];
502
503 d->rge_extsts = htole32(cflags);
504 d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
505 d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
506
507 cmdsts |= txmap->dm_segs[i].ds_len;
508
509 if (cur == RGE_TX_LIST_CNT - 1)
510 cmdsts |= RGE_TDCMDSTS_EOR;
511
512 d->rge_cmdsts = htole32(cmdsts);
513
514 last = cur;
515 cmdsts = RGE_TDCMDSTS_OWN;
516 cur = RGE_NEXT_TX_DESC(cur);
517 }
518
519 /* Set EOF on the last descriptor. */
520 d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
521
522 /* Transfer ownership of packet to the chip. */
523 d = &sc->rge_ldata.rge_tx_list[idx];
524
525 d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
526
527 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
528 cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
529 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
530
531 /* Update info of TX queue and descriptors. */
532 txq->txq_mbuf = m;
533 txq->txq_descidx = last;
534
535 return (nsegs);
536 }
537
538 int
539 rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
540 {
541 struct rge_softc *sc = ifp->if_softc;
542 //struct ifreq *ifr = (struct ifreq *)data;
543 int s, error = 0;
544
545 s = splnet();
546
547 switch (cmd) {
548 case SIOCSIFFLAGS:
549 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
550 break;
551 /* XXX set an ifflags callback and let ether_ioctl
552 * handle all of this.
553 */
554 if (ifp->if_flags & IFF_UP) {
555 if (ifp->if_flags & IFF_RUNNING)
556 error = ENETRESET;
557 else
558 rge_init(ifp);
559 } else {
560 if (ifp->if_flags & IFF_RUNNING)
561 rge_stop(ifp, 1);
562 }
563 break;
564 default:
565 error = ether_ioctl(ifp, cmd, data);
566 }
567
568 if (error == ENETRESET) {
569 if (ifp->if_flags & IFF_RUNNING)
570 rge_iff(sc);
571 error = 0;
572 }
573
574 splx(s);
575 return (error);
576 }
577
578 void
579 rge_start(struct ifnet *ifp)
580 {
581 struct rge_softc *sc = ifp->if_softc;
582 struct mbuf *m;
583 int free, idx, used;
584 int queued = 0;
585
586 #define LINK_STATE_IS_UP(_s) \
587 ((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
588
589 if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
590 IFQ_PURGE(&ifp->if_snd);
591 return;
592 }
593
594 /* Calculate free space. */
595 idx = sc->rge_ldata.rge_txq_prodidx;
596 free = sc->rge_ldata.rge_txq_considx;
597 if (free <= idx)
598 free += RGE_TX_LIST_CNT;
599 free -= idx;
600
601 for (;;) {
602 if (RGE_TX_NSEGS >= free + 2) {
603 SET(ifp->if_flags, IFF_OACTIVE);
604 break;
605 }
606
607 IFQ_DEQUEUE(&ifp->if_snd, m);
608 if (m == NULL)
609 break;
610
611 used = rge_encap(sc, m, idx);
612 if (used == 0) {
613 m_freem(m);
614 continue;
615 }
616
617 KASSERT(used <= free);
618 free -= used;
619
620 bpf_mtap(ifp, m, BPF_D_OUT);
621
622 idx += used;
623 if (idx >= RGE_TX_LIST_CNT)
624 idx -= RGE_TX_LIST_CNT;
625
626 queued++;
627 }
628
629 if (queued == 0)
630 return;
631
632 /* Set a timeout in case the chip goes out to lunch. */
633 ifp->if_timer = 5;
634
635 sc->rge_ldata.rge_txq_prodidx = idx;
636 rge_txstart(sc);
637 }
638
639 void
640 rge_watchdog(struct ifnet *ifp)
641 {
642 struct rge_softc *sc = ifp->if_softc;
643
644 device_printf(sc->sc_dev, "watchdog timeout\n");
645 if_statinc(ifp, if_oerrors);
646
647 rge_init(ifp);
648 }
649
650 int
651 rge_init(struct ifnet *ifp)
652 {
653 struct rge_softc *sc = ifp->if_softc;
654 uint32_t val;
655 unsigned i;
656
657 rge_stop(ifp, 0);
658
659 /* Set MAC address. */
660 rge_set_macaddr(sc, CLLADDR(ifp->if_sadl));
661
662 /* Set Maximum frame size. */
663 RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
664
665 /* Initialize RX descriptors list. */
666 int error = rge_rx_list_init(sc);
667 if (error != 0) {
668 device_printf(sc->sc_dev,
669 "init failed: no memory for RX buffers\n");
670 rge_stop(ifp, 1);
671 return error;
672 }
673
674 /* Initialize TX descriptors. */
675 rge_tx_list_init(sc);
676
677 /* Load the addresses of the RX and TX lists into the chip. */
678 RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
679 RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
680 RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
681 RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
682 RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
683 RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
684 RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
685 RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
686
687 RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
688
689 RGE_CLRBIT_1(sc, 0xf1, 0x80);
690 RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
691 RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
692 RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
693
694 /* Clear interrupt moderation timer. */
695 for (i = 0; i < 64; i++)
696 RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
697
698 /* Set the initial RX and TX configurations. */
699 RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
700 RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
701
702 val = rge_read_csi(sc, 0x70c) & ~0xff000000;
703 rge_write_csi(sc, 0x70c, val | 0x27000000);
704
705 /* Enable hardware optimization function. */
706 val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
707 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
708
709 RGE_WRITE_2(sc, 0x0382, 0x221b);
710 RGE_WRITE_1(sc, 0x4500, 0);
711 RGE_WRITE_2(sc, 0x4800, 0);
712 RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
713
714 rge_write_mac_ocp(sc, 0xc140, 0xffff);
715 rge_write_mac_ocp(sc, 0xc142, 0xffff);
716
717 val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
718 rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
719
720 RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
721 RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
722 RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
723
724 val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
725 if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
726 rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
727 else
728 rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
729
730 RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
731
732 if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
733 val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
734 rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
735 } else
736 RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
737
738 RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
739
740 val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
741 rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
742
743 val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
744 rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
745
746 val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
747 rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
748
749 RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
750
751 RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
752
753 RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
754
755 val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
756 rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
757
758 val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
759 rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
760
761 RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
762 RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
763
764 val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
765 rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
766
767 val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
768 rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
769
770 val = rge_read_mac_ocp(sc, 0xe84c) & ~0x0040;
771 if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
772 rge_write_mac_ocp(sc, 0xe84c, 0x00c0);
773 else
774 rge_write_mac_ocp(sc, 0xe84c, 0x0080);
775
776 RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN);
777
778 if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
779 RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
780
781 /* Disable EEE plus. */
782 RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
783
784 RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
785
786 RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
787 DELAY(1);
788 RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
789
790 RGE_CLRBIT_4(sc, 0x1880, 0x0030);
791
792 rge_write_mac_ocp(sc, 0xe098, 0xc302);
793
794 if ((sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0)
795 RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
796 else
797 RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
798
799 RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
800
801 for (i = 0; i < 10; i++) {
802 if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
803 break;
804 DELAY(1000);
805 }
806
807 /* Disable RXDV gate. */
808 RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
809 DELAY(2000);
810
811 rge_ifmedia_upd(ifp);
812
813 /* Enable transmit and receive. */
814 RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
815
816 /* Program promiscuous mode and multicast filters. */
817 rge_iff(sc);
818
819 RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
820 RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
821
822 RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
823
824 /* Enable interrupts. */
825 rge_setup_intr(sc, RGE_IMTYPE_SIM);
826
827 ifp->if_flags |= IFF_RUNNING;
828 CLR(ifp->if_flags, IFF_OACTIVE);
829
830 callout_schedule(&sc->sc_timeout, 1);
831
832 return (0);
833 }
834
835 /*
836 * Stop the adapter and free any mbufs allocated to the RX and TX lists.
837 */
838 void
839 rge_stop(struct ifnet *ifp, int disable)
840 {
841 struct rge_softc *sc = ifp->if_softc;
842
843 callout_halt(&sc->sc_timeout, NULL);
844
845 ifp->if_timer = 0;
846 ifp->if_flags &= ~IFF_RUNNING;
847 sc->rge_timerintr = 0;
848
849 RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
850 RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
851 RGE_RXCFG_ERRPKT);
852
853 RGE_WRITE_4(sc, RGE_IMR, 0);
854
855 /* Clear timer interrupts. */
856 RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
857 RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
858 RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
859 RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
860
861 rge_reset(sc);
862
863 // intr_barrier(sc->sc_ih);
864 // ifq_barrier(&ifp->if_snd);
865 /* ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
866
867 if (sc->rge_head != NULL) {
868 m_freem(sc->rge_head);
869 sc->rge_head = sc->rge_tail = NULL;
870 }
871
872 rge_tx_list_fini(sc);
873 rge_rx_list_fini(sc);
874 }
875
876 /*
877 * Set media options.
878 */
879 int
880 rge_ifmedia_upd(struct ifnet *ifp)
881 {
882 struct rge_softc *sc = ifp->if_softc;
883 struct ifmedia *ifm = &sc->sc_media;
884 int anar, gig, val;
885
886 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
887 return (EINVAL);
888
889 /* Disable Gigabit Lite. */
890 RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
891 RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
892
893 val = rge_read_phy_ocp(sc, 0xa5d4);
894 val &= ~RGE_ADV_2500TFDX;
895
896 anar = gig = 0;
897 switch (IFM_SUBTYPE(ifm->ifm_media)) {
898 case IFM_AUTO:
899 anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
900 gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
901 val |= RGE_ADV_2500TFDX;
902 break;
903 case IFM_2500_T:
904 anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
905 gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
906 val |= RGE_ADV_2500TFDX;
907 ifp->if_baudrate = IF_Mbps(2500);
908 break;
909 case IFM_1000_T:
910 anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
911 gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
912 ifp->if_baudrate = IF_Gbps(1);
913 break;
914 case IFM_100_TX:
915 gig = rge_read_phy(sc, 0, MII_100T2CR) &
916 ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
917 anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
918 ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
919 ANAR_TX | ANAR_10_FD | ANAR_10;
920 ifp->if_baudrate = IF_Mbps(100);
921 break;
922 case IFM_10_T:
923 gig = rge_read_phy(sc, 0, MII_100T2CR) &
924 ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
925 anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
926 ANAR_10_FD | ANAR_10 : ANAR_10;
927 ifp->if_baudrate = IF_Mbps(10);
928 break;
929 default:
930 device_printf(sc->sc_dev,
931 "unsupported media type\n");
932 return (EINVAL);
933 }
934
935 rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
936 rge_write_phy(sc, 0, MII_100T2CR, gig);
937 rge_write_phy_ocp(sc, 0xa5d4, val);
938 rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
939 BMCR_STARTNEG);
940
941 return (0);
942 }
943
944 /*
945 * Report current media status.
946 */
947 void
948 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
949 {
950 struct rge_softc *sc = ifp->if_softc;
951 uint16_t status = 0;
952
953 ifmr->ifm_status = IFM_AVALID;
954 ifmr->ifm_active = IFM_ETHER;
955
956 if (rge_get_link_status(sc)) {
957 ifmr->ifm_status |= IFM_ACTIVE;
958
959 status = RGE_READ_2(sc, RGE_PHYSTAT);
960 if ((status & RGE_PHYSTAT_FDX) ||
961 (status & RGE_PHYSTAT_2500MBPS))
962 ifmr->ifm_active |= IFM_FDX;
963 else
964 ifmr->ifm_active |= IFM_HDX;
965
966 if (status & RGE_PHYSTAT_10MBPS)
967 ifmr->ifm_active |= IFM_10_T;
968 else if (status & RGE_PHYSTAT_100MBPS)
969 ifmr->ifm_active |= IFM_100_TX;
970 else if (status & RGE_PHYSTAT_1000MBPS)
971 ifmr->ifm_active |= IFM_1000_T;
972 else if (status & RGE_PHYSTAT_2500MBPS)
973 ifmr->ifm_active |= IFM_2500_T;
974 }
975 }
976
977 /*
978 * Allocate memory for RX/TX rings.
979 *
980 * XXX There is no tear-down for this if it any part fails, so everything
981 * remains allocated.
982 */
983 int
984 rge_allocmem(struct rge_softc *sc)
985 {
986 int error, i;
987
988 /* Allocate DMA'able memory for the TX ring. */
989 error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
990 RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
991 if (error) {
992 aprint_error_dev(sc->sc_dev, "can't create TX list map\n");
993 return (error);
994 }
995 error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
996 &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
997 BUS_DMA_NOWAIT);
998 if (error) {
999 aprint_error_dev(sc->sc_dev, "can't alloc TX list\n");
1000 return (error);
1001 }
1002
1003 /* Load the map for the TX ring. */
1004 error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1005 sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
1006 (void **) &sc->rge_ldata.rge_tx_list,
1007 BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1008 if (error) {
1009 aprint_error_dev(sc->sc_dev, "can't map TX dma buffers\n");
1010 bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1011 sc->rge_ldata.rge_tx_listnseg);
1012 return (error);
1013 }
1014 memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1015 error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1016 sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1017 if (error) {
1018 aprint_error_dev(sc->sc_dev, "can't load TX dma map\n");
1019 bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
1020 bus_dmamem_unmap(sc->sc_dmat,
1021 sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
1022 bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1023 sc->rge_ldata.rge_tx_listnseg);
1024 return (error);
1025 }
1026
1027 /* Create DMA maps for TX buffers. */
1028 for (i = 0; i < RGE_TX_LIST_CNT; i++) {
1029 error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
1030 RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
1031 &sc->rge_ldata.rge_txq[i].txq_dmamap);
1032 if (error) {
1033 aprint_error_dev(sc->sc_dev, "can't create DMA map for TX\n");
1034 return (error);
1035 }
1036 }
1037
1038 /* Allocate DMA'able memory for the RX ring. */
1039 error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
1040 RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
1041 if (error) {
1042 aprint_error_dev(sc->sc_dev, "can't create RX list map\n");
1043 return (error);
1044 }
1045 error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
1046 &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
1047 BUS_DMA_NOWAIT);
1048 if (error) {
1049 aprint_error_dev(sc->sc_dev, "can't alloc RX list\n");
1050 return (error);
1051 }
1052
1053 /* Load the map for the RX ring. */
1054 error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1055 sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
1056 (void **) &sc->rge_ldata.rge_rx_list,
1057 BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1058 if (error) {
1059 aprint_error_dev(sc->sc_dev, "can't map RX dma buffers\n");
1060 bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1061 sc->rge_ldata.rge_rx_listnseg);
1062 return (error);
1063 }
1064 memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1065 error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1066 sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1067 if (error) {
1068 aprint_error_dev(sc->sc_dev, "can't load RX dma map\n");
1069 bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
1070 bus_dmamem_unmap(sc->sc_dmat,
1071 sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
1072 bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1073 sc->rge_ldata.rge_rx_listnseg);
1074 return (error);
1075 }
1076
1077 /*
1078 * Create DMA maps for RX buffers. Use BUS_DMA_ALLOCNOW to avoid any
1079 * potential failure in bus_dmamap_load_mbuf() in the RX path.
1080 */
1081 for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1082 error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1083 RGE_JUMBO_FRAMELEN, 0, BUS_DMA_ALLOCNOW,
1084 &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
1085 if (error) {
1086 aprint_error_dev(sc->sc_dev, "can't create DMA map for RX\n");
1087 return (error);
1088 }
1089 }
1090
1091 return (error);
1092 }
1093
1094 /*
1095 * Set an RX descriptor and sync it.
1096 */
1097 static void
1098 rge_load_rxbuf(struct rge_softc *sc, int idx)
1099 {
1100 struct rge_rx_desc *r = &sc->rge_ldata.rge_rx_list[idx];
1101 struct rge_rxq *rxq = &sc->rge_ldata.rge_rxq[idx];
1102 bus_dmamap_t rxmap = rxq->rxq_dmamap;
1103 uint32_t cmdsts;
1104
1105 cmdsts = rxmap->dm_segs[0].ds_len | RGE_RDCMDSTS_OWN;
1106 if (idx == RGE_RX_LIST_CNT - 1)
1107 cmdsts |= RGE_RDCMDSTS_EOR;
1108
1109 r->hi_qword0.rge_addr = htole64(rxmap->dm_segs[0].ds_addr);
1110 r->hi_qword1.rx_qword4.rge_extsts = 0;
1111 r->hi_qword1.rx_qword4.rge_cmdsts = htole32(cmdsts);
1112
1113 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1114 idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1115 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1116 }
1117
1118 /*
1119 * Initialize the RX descriptor and attach an mbuf cluster.
1120 */
1121 int
1122 rge_newbuf(struct rge_softc *sc, int idx)
1123 {
1124 struct mbuf *m;
1125 struct rge_rxq *rxq;
1126 bus_dmamap_t rxmap;
1127 int error __diagused;
1128
1129 m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
1130 if (m == NULL)
1131 return (ENOBUFS);
1132
1133 m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
1134
1135 rxq = &sc->rge_ldata.rge_rxq[idx];
1136 rxmap = rxq->rxq_dmamap;
1137
1138 if (rxq->rxq_mbuf != NULL)
1139 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1140
1141 /* This map was created with BUS_DMA_ALLOCNOW so should never fail. */
1142 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT);
1143 KASSERTMSG(error == 0, "error=%d", error);
1144
1145 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1146 BUS_DMASYNC_PREREAD);
1147
1148 /* Map the segments into RX descriptors. */
1149
1150 rxq->rxq_mbuf = m;
1151 rge_load_rxbuf(sc, idx);
1152
1153 return 0;
1154 }
1155
1156 static int
1157 rge_rx_list_init(struct rge_softc *sc)
1158 {
1159 unsigned i;
1160
1161 memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1162
1163 for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1164 sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
1165 if (rge_newbuf(sc, i) != 0) {
1166 rge_rx_list_fini(sc);
1167 return (ENOBUFS);
1168 }
1169 }
1170
1171 sc->rge_ldata.rge_rxq_prodidx = sc->rge_ldata.rge_rxq_considx = 0;
1172 sc->rge_head = sc->rge_tail = NULL;
1173
1174 return (0);
1175 }
1176
1177 static void
1178 rge_rx_list_fini(struct rge_softc *sc)
1179 {
1180 unsigned i;
1181
1182 /* Free the RX list buffers. */
1183 for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1184 if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
1185 bus_dmamap_unload(sc->sc_dmat,
1186 sc->rge_ldata.rge_rxq[i].rxq_dmamap);
1187 m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
1188 sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
1189 }
1190 }
1191 }
1192
1193 static void
1194 rge_tx_list_init(struct rge_softc *sc)
1195 {
1196 unsigned i;
1197
1198 memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1199
1200 for (i = 0; i < RGE_TX_LIST_CNT; i++)
1201 sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
1202
1203 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
1204 sc->rge_ldata.rge_tx_list_map->dm_mapsize,
1205 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1206
1207 sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
1208 }
1209
1210 static void
1211 rge_tx_list_fini(struct rge_softc *sc)
1212 {
1213 unsigned i;
1214
1215 /* Free the TX list buffers. */
1216 for (i = 0; i < RGE_TX_LIST_CNT; i++) {
1217 if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
1218 bus_dmamap_unload(sc->sc_dmat,
1219 sc->rge_ldata.rge_txq[i].txq_dmamap);
1220 m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
1221 sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
1222 }
1223 }
1224 }
1225
1226 int
1227 rge_rxeof(struct rge_softc *sc)
1228 {
1229 struct mbuf *m;
1230 struct ifnet *ifp = &sc->sc_ec.ec_if;
1231 struct rge_rx_desc *cur_rx;
1232 struct rge_rxq *rxq;
1233 uint32_t rxstat, extsts;
1234 int i, total_len, rx = 0;
1235
1236 for (i = sc->rge_ldata.rge_rxq_considx; ; i = RGE_NEXT_RX_DESC(i)) {
1237 /* Invalidate the descriptor memory. */
1238 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1239 i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1240 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1241
1242 cur_rx = &sc->rge_ldata.rge_rx_list[i];
1243
1244 if (RGE_OWN(cur_rx))
1245 break;
1246
1247 rxstat = letoh32(cur_rx->hi_qword1.rx_qword4.rge_cmdsts);
1248 extsts = letoh32(cur_rx->hi_qword1.rx_qword4.rge_extsts);
1249
1250 total_len = RGE_RXBYTES(cur_rx);
1251 rxq = &sc->rge_ldata.rge_rxq[i];
1252 m = rxq->rxq_mbuf;
1253 rx = 1;
1254
1255 /* Invalidate the RX mbuf. */
1256 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1257 rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1258
1259 if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1260 (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1261 if_statinc(ifp, if_ierrors);
1262 rge_load_rxbuf(sc, i);
1263 continue;
1264 }
1265
1266 if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1267 if_statinc(ifp, if_ierrors);
1268 /*
1269 * If this is part of a multi-fragment packet,
1270 * discard all the pieces.
1271 */
1272 if (sc->rge_head != NULL) {
1273 m_freem(sc->rge_head);
1274 sc->rge_head = sc->rge_tail = NULL;
1275 }
1276 rge_load_rxbuf(sc, i);
1277 continue;
1278 }
1279
1280 /*
1281 * If allocating a replacement mbuf fails,
1282 * reload the current one.
1283 */
1284 if (rge_newbuf(sc, i) != 0) {
1285 if_statinc(ifp, if_iqdrops);
1286 if (sc->rge_head != NULL) {
1287 m_freem(sc->rge_head);
1288 sc->rge_head = sc->rge_tail = NULL;
1289 }
1290 rge_load_rxbuf(sc, i);
1291 continue;
1292 }
1293
1294 m_set_rcvif(m, ifp);
1295 if (sc->rge_head != NULL) {
1296 m->m_len = total_len;
1297 /*
1298 * Special case: if there's 4 bytes or less
1299 * in this buffer, the mbuf can be discarded:
1300 * the last 4 bytes is the CRC, which we don't
1301 * care about anyway.
1302 */
1303 if (m->m_len <= ETHER_CRC_LEN) {
1304 sc->rge_tail->m_len -=
1305 (ETHER_CRC_LEN - m->m_len);
1306 m_freem(m);
1307 } else {
1308 m->m_len -= ETHER_CRC_LEN;
1309 m->m_flags &= ~M_PKTHDR;
1310 sc->rge_tail->m_next = m;
1311 }
1312 m = sc->rge_head;
1313 sc->rge_head = sc->rge_tail = NULL;
1314 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1315 } else
1316 #if 0
1317 m->m_pkthdr.len = m->m_len =
1318 (total_len - ETHER_CRC_LEN);
1319 #else
1320 {
1321 m->m_pkthdr.len = m->m_len = total_len;
1322 m->m_flags |= M_HASFCS;
1323 }
1324 #endif
1325
1326 #if notyet
1327 /* Check IP header checksum. */
1328 if (!(extsts & RGE_RDEXTSTS_IPCSUMERR) &&
1329 (extsts & RGE_RDEXTSTS_IPV4))
1330 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1331
1332 /* Check TCP/UDP checksum. */
1333 if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1334 (((extsts & RGE_RDEXTSTS_TCPPKT) &&
1335 !(extsts & RGE_RDEXTSTS_TCPCSUMERR)) ||
1336 ((extsts & RGE_RDEXTSTS_UDPPKT) &&
1337 !(extsts & RGE_RDEXTSTS_UDPCSUMERR))))
1338 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1339 M_UDP_CSUM_IN_OK;
1340 #endif
1341
1342 if (extsts & RGE_RDEXTSTS_VTAG) {
1343 vlan_set_tag(m,
1344 bswap16(extsts & RGE_RDEXTSTS_VLAN_MASK));
1345 }
1346
1347 if_percpuq_enqueue(ifp->if_percpuq, m);
1348 }
1349
1350 sc->rge_ldata.rge_rxq_considx = i;
1351
1352 return (rx);
1353 }
1354
1355 int
1356 rge_txeof(struct rge_softc *sc)
1357 {
1358 struct ifnet *ifp = &sc->sc_ec.ec_if;
1359 struct rge_txq *txq;
1360 uint32_t txstat;
1361 int cons, idx, prod;
1362 int free = 0;
1363
1364 prod = sc->rge_ldata.rge_txq_prodidx;
1365 cons = sc->rge_ldata.rge_txq_considx;
1366
1367 while (prod != cons) {
1368 txq = &sc->rge_ldata.rge_txq[cons];
1369 idx = txq->txq_descidx;
1370
1371 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1372 idx * sizeof(struct rge_tx_desc),
1373 sizeof(struct rge_tx_desc),
1374 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1375
1376 txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
1377
1378 if (txstat & RGE_TDCMDSTS_OWN) {
1379 free = 2;
1380 break;
1381 }
1382
1383 bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1384 txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1385 bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1386 m_freem(txq->txq_mbuf);
1387 txq->txq_mbuf = NULL;
1388
1389 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1390 if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1391 if_statinc_ref(nsr, if_collisions);
1392 if (txstat & RGE_TDCMDSTS_TXERR)
1393 if_statinc_ref(nsr, if_oerrors);
1394 else
1395 if_statinc_ref(nsr, if_opackets);
1396 IF_STAT_PUTREF(ifp);
1397
1398 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1399 idx * sizeof(struct rge_tx_desc),
1400 sizeof(struct rge_tx_desc),
1401 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1402
1403 cons = RGE_NEXT_TX_DESC(idx);
1404 free = 1;
1405 }
1406
1407 if (free == 0)
1408 return (0);
1409
1410 sc->rge_ldata.rge_txq_considx = cons;
1411
1412 if (free == 2)
1413 rge_txstart(sc);
1414
1415 CLR(ifp->if_flags, IFF_OACTIVE);
1416 ifp->if_timer = 0;
1417 if_schedule_deferred_start(ifp);
1418
1419 return (1);
1420 }
1421
1422 void
1423 rge_reset(struct rge_softc *sc)
1424 {
1425 int i;
1426
1427 /* Enable RXDV gate. */
1428 RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1429 DELAY(2000);
1430
1431 for (i = 0; i < 3000; i++) {
1432 DELAY(50);
1433 if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1434 RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1435 RGE_MCUCMD_TXFIFO_EMPTY))
1436 break;
1437 }
1438 if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
1439 for (i = 0; i < 3000; i++) {
1440 DELAY(50);
1441 if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
1442 break;
1443 }
1444 }
1445
1446 DELAY(2000);
1447
1448 /* Soft reset. */
1449 RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1450
1451 for (i = 0; i < RGE_TIMEOUT; i++) {
1452 DELAY(100);
1453 if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1454 break;
1455 }
1456 if (i == RGE_TIMEOUT)
1457 device_printf(sc->sc_dev, "reset never completed!\n");
1458 }
1459
1460 void
1461 rge_iff(struct rge_softc *sc)
1462 {
1463 struct ifnet *ifp = &sc->sc_ec.ec_if;
1464 struct ethercom *ec = &sc->sc_ec;
1465 struct ether_multi *enm;
1466 struct ether_multistep step;
1467 uint32_t hashes[2];
1468 uint32_t rxfilt;
1469 int h = 0;
1470
1471 rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1472 rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1473 ifp->if_flags &= ~IFF_ALLMULTI;
1474
1475 /*
1476 * Always accept frames destined to our station address.
1477 * Always accept broadcast frames.
1478 */
1479 rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1480
1481 if (ifp->if_flags & IFF_PROMISC) {
1482 allmulti:
1483 ifp->if_flags |= IFF_ALLMULTI;
1484 rxfilt |= RGE_RXCFG_MULTI;
1485 if (ifp->if_flags & IFF_PROMISC)
1486 rxfilt |= RGE_RXCFG_ALLPHYS;
1487 hashes[0] = hashes[1] = 0xffffffff;
1488 } else {
1489 rxfilt |= RGE_RXCFG_MULTI;
1490 /* Program new filter. */
1491 memset(hashes, 0, sizeof(hashes));
1492
1493 ETHER_LOCK(ec);
1494 ETHER_FIRST_MULTI(step, ec, enm);
1495 while (enm != NULL) {
1496 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1497 ETHER_ADDR_LEN) != 0) {
1498 ETHER_UNLOCK(ec);
1499 goto allmulti;
1500 }
1501 h = ether_crc32_be(enm->enm_addrlo,
1502 ETHER_ADDR_LEN) >> 26;
1503
1504 if (h < 32)
1505 hashes[0] |= (1U << h);
1506 else
1507 hashes[1] |= (1U << (h - 32));
1508
1509 ETHER_NEXT_MULTI(step, enm);
1510 }
1511 ETHER_UNLOCK(ec);
1512 }
1513
1514 RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1515 RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
1516 RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
1517 }
1518
1519 void
1520 rge_set_phy_power(struct rge_softc *sc, int on)
1521 {
1522 int i;
1523
1524 if (on) {
1525 RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1526
1527 rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1528
1529 for (i = 0; i < RGE_TIMEOUT; i++) {
1530 if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
1531 break;
1532 DELAY(1000);
1533 }
1534 } else {
1535 rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1536 RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
1537 RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
1538 }
1539 }
1540
1541 void
1542 rge_phy_config(struct rge_softc *sc)
1543 {
1544 /* Read microcode version. */
1545 rge_write_phy_ocp(sc, 0xa436, 0x801e);
1546 sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
1547
1548 switch (sc->rge_type) {
1549 case MAC_CFG2:
1550 rge_phy_config_mac_cfg2(sc);
1551 break;
1552 case MAC_CFG3:
1553 rge_phy_config_mac_cfg3(sc);
1554 break;
1555 case MAC_CFG4:
1556 rge_phy_config_mac_cfg4(sc);
1557 break;
1558 case MAC_CFG5:
1559 rge_phy_config_mac_cfg5(sc);
1560 break;
1561 default:
1562 break; /* Can't happen. */
1563 }
1564
1565 rge_write_phy(sc, 0x0a5b, 0x12,
1566 rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
1567
1568 /* Disable EEE. */
1569 RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1570 if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1571 RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1572 RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1573 }
1574 RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1575 RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1576 RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1577 RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1578 RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1579
1580 rge_patch_phy_mcu(sc, 1);
1581 RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1582 RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1583 RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1584 rge_patch_phy_mcu(sc, 0);
1585 }
1586
1587 void
1588 rge_phy_config_mac_cfg2(struct rge_softc *sc)
1589 {
1590 uint16_t val;
1591 int i;
1592
1593 for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
1594 rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
1595 rtl8125_mac_cfg2_ephy[i].val);
1596
1597 rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
1598
1599 val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
1600 rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
1601 RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1602 val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1603 rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
1604 val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
1605 rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1606 RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
1607 val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
1608 rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
1609 RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
1610 RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
1611 RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
1612
1613 rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1614 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1615 rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
1616 rge_write_phy_ocp(sc, 0xa436, 0x80eb);
1617 val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
1618 rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
1619 rge_write_phy_ocp(sc, 0xa436, 0x80f8);
1620 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1621 rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
1622 rge_write_phy_ocp(sc, 0xa436, 0x80f1);
1623 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1624 rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
1625 rge_write_phy_ocp(sc, 0xa436, 0x80fe);
1626 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1627 rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
1628 rge_write_phy_ocp(sc, 0xa436, 0x8102);
1629 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1630 rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1631 rge_write_phy_ocp(sc, 0xa436, 0x8105);
1632 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1633 rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
1634 rge_write_phy_ocp(sc, 0xa436, 0x8100);
1635 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1636 rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
1637 rge_write_phy_ocp(sc, 0xa436, 0x8104);
1638 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1639 rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
1640 rge_write_phy_ocp(sc, 0xa436, 0x8106);
1641 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1642 rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
1643 rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1644 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1645 rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
1646 rge_write_phy_ocp(sc, 0xa436, 0x80df);
1647 RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1648 rge_write_phy_ocp(sc, 0xa436, 0x80e1);
1649 RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
1650 val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
1651 rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
1652 rge_write_phy_ocp(sc, 0xa436, 0x819f);
1653 rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
1654 rge_write_phy_ocp(sc, 0xbc34, 0x5555);
1655 val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
1656 rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
1657 RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
1658 RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1659 }
1660
1661 void
1662 rge_phy_config_mac_cfg3(struct rge_softc *sc)
1663 {
1664 struct ifnet *ifp = &sc->sc_ec.ec_if;
1665 uint16_t val;
1666 int i;
1667 static const uint16_t mac_cfg3_a438_value[] =
1668 { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1669 0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1670
1671 static const uint16_t mac_cfg3_b88e_value[] =
1672 { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1673 0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1674 0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1675 0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1676 0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1677 0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1678
1679 for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1680 rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1681 rtl8125_mac_cfg3_ephy[i].val);
1682
1683 val = rge_read_ephy(sc, 0x002a) & ~0x7000;
1684 rge_write_ephy(sc, 0x002a, val | 0x3000);
1685 RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
1686 RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
1687 RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
1688 rge_write_ephy(sc, 0x0002, 0x6042);
1689 rge_write_ephy(sc, 0x0006, 0x0014);
1690 val = rge_read_ephy(sc, 0x006a) & ~0x7000;
1691 rge_write_ephy(sc, 0x006a, val | 0x3000);
1692 RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
1693 RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
1694 RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
1695 rge_write_ephy(sc, 0x0042, 0x6042);
1696 rge_write_ephy(sc, 0x0046, 0x0014);
1697
1698 rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
1699
1700 RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1701 val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1702 rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1703 val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1704 rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1705 RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1706 RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1707 val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1708 rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1709 val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1710 rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1711 val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1712 rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1713 RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1714 RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1715 val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1716 rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1717 rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1718 rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1719 val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1720 rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1721 rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1722 val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1723 rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
1724 rge_write_phy_ocp(sc, 0xb87c, 0x8159);
1725 val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1726 rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
1727 RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1728 32);
1729 rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1730 rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1731 rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1732 rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1733
1734 rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1735 for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1736 rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1737 for (i = 0; i < 26; i++)
1738 rge_write_phy_ocp(sc, 0xa438, 0);
1739 rge_write_phy_ocp(sc, 0xa436, 0x8257);
1740 rge_write_phy_ocp(sc, 0xa438, 0x020f);
1741 rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1742 rge_write_phy_ocp(sc, 0xa438, 0x7843);
1743
1744 rge_patch_phy_mcu(sc, 1);
1745 RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1746 RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1747 for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1748 rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1749 rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
1750 }
1751 RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1752 rge_patch_phy_mcu(sc, 0);
1753
1754 RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1755 rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1756 RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1757 val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1758 rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1759 RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1760 RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1761 RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1762 RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1763 RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1764 }
1765
1766 void
1767 rge_phy_config_mac_cfg4(struct rge_softc *sc)
1768 {
1769 struct ifnet *ifp = &sc->sc_ec.ec_if;
1770 uint16_t val;
1771 int i;
1772 static const uint16_t mac_cfg4_b87c_value[] =
1773 { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
1774 0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
1775 0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
1776 0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
1777 0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
1778 0x80b0, 0x0f31 };
1779
1780 for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
1781 rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
1782 rtl8125_mac_cfg4_ephy[i].val);
1783
1784 rge_write_phy_ocp(sc, 0xbf86, 0x9000);
1785 RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
1786 RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
1787 rge_write_phy_ocp(sc, 0xbd86, 0x1010);
1788 rge_write_phy_ocp(sc, 0xbd88, 0x1010);
1789 val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
1790 rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
1791 val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1792 rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
1793
1794 rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
1795
1796 RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1797 RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
1798 rge_write_phy_ocp(sc, 0xa436, 0x8fff);
1799 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1800 rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
1801 for (i = 0; i < 6; i++) {
1802 rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
1803 if (i < 3)
1804 rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
1805 else
1806 rge_write_phy_ocp(sc, 0xb87e, 0x147d);
1807 }
1808 rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
1809 rge_write_phy_ocp(sc, 0xb87e, 0x0907);
1810 val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
1811 rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
1812 val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
1813 rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
1814 rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
1815 rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1816 rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
1817 rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1818 rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
1819 rge_write_phy_ocp(sc, 0xb87e, 0x6077);
1820 rge_write_phy_ocp(sc, 0xb506, 0x01e7);
1821 rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
1822 rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
1823 rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
1824 rge_write_phy_ocp(sc, 0xac3c, 0x9240);
1825 rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
1826 rge_write_phy_ocp(sc, 0xacc6, 0x0707);
1827 rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
1828 rge_write_phy_ocp(sc, 0xad08, 0x0007);
1829 for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
1830 rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
1831 rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
1832 }
1833 RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
1834 RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
1835 rge_write_phy_ocp(sc, 0xb87c, 0x8141);
1836 rge_write_phy_ocp(sc, 0xb87e, 0x320e);
1837 rge_write_phy_ocp(sc, 0xb87c, 0x8153);
1838 rge_write_phy_ocp(sc, 0xb87e, 0x720e);
1839 RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
1840 rge_write_phy_ocp(sc, 0xb87c, 0x8529);
1841 rge_write_phy_ocp(sc, 0xb87e, 0x050e);
1842 RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1843 32);
1844 rge_write_phy_ocp(sc, 0xa436, 0x816c);
1845 rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1846 rge_write_phy_ocp(sc, 0xa436, 0x8170);
1847 rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1848 rge_write_phy_ocp(sc, 0xa436, 0x8174);
1849 rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1850 rge_write_phy_ocp(sc, 0xa436, 0x8178);
1851 rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1852 rge_write_phy_ocp(sc, 0xa436, 0x817c);
1853 rge_write_phy_ocp(sc, 0xa438, 0x0719);
1854 rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
1855 rge_write_phy_ocp(sc, 0xa438, 0x0400);
1856 rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
1857 rge_write_phy_ocp(sc, 0xa438, 0x0404);
1858 rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
1859 for (i = 0; i < 6; i++) {
1860 rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
1861 if (i == 2)
1862 rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
1863 else
1864 rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
1865 }
1866 rge_write_phy_ocp(sc, 0xb87c, 0x8145);
1867 rge_write_phy_ocp(sc, 0xb87e, 0x370e);
1868 rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1869 rge_write_phy_ocp(sc, 0xb87e, 0x770e);
1870 rge_write_phy_ocp(sc, 0xb87c, 0x8169);
1871 rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
1872 rge_write_phy_ocp(sc, 0xb87c, 0x817b);
1873 rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
1874 rge_write_phy_ocp(sc, 0xa436, 0x8217);
1875 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1876 rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1877 rge_write_phy_ocp(sc, 0xa436, 0x821a);
1878 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1879 rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1880 rge_write_phy_ocp(sc, 0xa436, 0x80da);
1881 rge_write_phy_ocp(sc, 0xa438, 0x0403);
1882 rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1883 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1884 rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1885 rge_write_phy_ocp(sc, 0xa436, 0x80b3);
1886 rge_write_phy_ocp(sc, 0xa438, 0x0384);
1887 rge_write_phy_ocp(sc, 0xa436, 0x80b7);
1888 rge_write_phy_ocp(sc, 0xa438, 0x2007);
1889 rge_write_phy_ocp(sc, 0xa436, 0x80ba);
1890 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1891 rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1892 rge_write_phy_ocp(sc, 0xa436, 0x80b5);
1893 rge_write_phy_ocp(sc, 0xa438, 0xf009);
1894 rge_write_phy_ocp(sc, 0xa436, 0x80bd);
1895 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1896 rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
1897 rge_write_phy_ocp(sc, 0xa436, 0x80c7);
1898 rge_write_phy_ocp(sc, 0xa438, 0xf083);
1899 rge_write_phy_ocp(sc, 0xa436, 0x80dd);
1900 rge_write_phy_ocp(sc, 0xa438, 0x03f0);
1901 rge_write_phy_ocp(sc, 0xa436, 0x80df);
1902 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1903 rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1904 rge_write_phy_ocp(sc, 0xa436, 0x80cb);
1905 rge_write_phy_ocp(sc, 0xa438, 0x2007);
1906 rge_write_phy_ocp(sc, 0xa436, 0x80ce);
1907 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1908 rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1909 rge_write_phy_ocp(sc, 0xa436, 0x80c9);
1910 rge_write_phy_ocp(sc, 0xa438, 0x8009);
1911 rge_write_phy_ocp(sc, 0xa436, 0x80d1);
1912 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1913 rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
1914 rge_write_phy_ocp(sc, 0xa436, 0x80a3);
1915 rge_write_phy_ocp(sc, 0xa438, 0x200a);
1916 rge_write_phy_ocp(sc, 0xa436, 0x80a5);
1917 rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
1918 rge_write_phy_ocp(sc, 0xa436, 0x809f);
1919 rge_write_phy_ocp(sc, 0xa438, 0x6073);
1920 rge_write_phy_ocp(sc, 0xa436, 0x80a1);
1921 rge_write_phy_ocp(sc, 0xa438, 0x000b);
1922 rge_write_phy_ocp(sc, 0xa436, 0x80a9);
1923 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1924 rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
1925 rge_patch_phy_mcu(sc, 1);
1926 RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1927 RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1928 rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
1929 rge_write_phy_ocp(sc, 0xb890, 0x0000);
1930 rge_write_phy_ocp(sc, 0xb88e, 0xc240);
1931 rge_write_phy_ocp(sc, 0xb890, 0x0103);
1932 rge_write_phy_ocp(sc, 0xb88e, 0xc242);
1933 rge_write_phy_ocp(sc, 0xb890, 0x0507);
1934 rge_write_phy_ocp(sc, 0xb88e, 0xc244);
1935 rge_write_phy_ocp(sc, 0xb890, 0x090b);
1936 rge_write_phy_ocp(sc, 0xb88e, 0xc246);
1937 rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
1938 rge_write_phy_ocp(sc, 0xb88e, 0xc248);
1939 rge_write_phy_ocp(sc, 0xb890, 0x1012);
1940 rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
1941 rge_write_phy_ocp(sc, 0xb890, 0x1416);
1942 RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1943 rge_patch_phy_mcu(sc, 0);
1944 RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
1945 RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
1946 rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
1947 rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
1948 rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
1949 rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
1950 rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
1951 RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
1952 rge_write_phy_ocp(sc, 0xa436, 0x817d);
1953 RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
1954 }
1955
1956 void
1957 rge_phy_config_mac_cfg5(struct rge_softc *sc)
1958 {
1959 struct ifnet *ifp = &sc->sc_ec.ec_if;
1960 uint16_t val;
1961 int i;
1962
1963 for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
1964 rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
1965 rtl8125_mac_cfg5_ephy[i].val);
1966
1967 val = rge_read_ephy(sc, 0x0022) & ~0x0030;
1968 rge_write_ephy(sc, 0x0022, val | 0x0020);
1969 val = rge_read_ephy(sc, 0x0062) & ~0x0030;
1970 rge_write_ephy(sc, 0x0062, val | 0x0020);
1971
1972 rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
1973
1974 RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1975 val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
1976 rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
1977 val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
1978 rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
1979 RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1980 32);
1981 rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
1982 rge_write_phy_ocp(sc, 0xb87e, 0x760e);
1983 rge_write_phy_ocp(sc, 0xb87c, 0x8107);
1984 rge_write_phy_ocp(sc, 0xb87e, 0x360e);
1985 rge_write_phy_ocp(sc, 0xb87c, 0x8551);
1986 val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1987 rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
1988 val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
1989 rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
1990 val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1991 rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
1992 for (i = 0; i < 10; i++) {
1993 rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
1994 rge_write_phy_ocp(sc, 0xa438, 0x2417);
1995 }
1996 RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
1997 val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
1998 rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
1999 }
2000
2001 void
2002 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
2003 {
2004 if (sc->rge_mcodever != mcode_version) {
2005 int i;
2006
2007 rge_patch_phy_mcu(sc, 1);
2008
2009 if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
2010 rge_write_phy_ocp(sc, 0xa436, 0x8024);
2011 if (sc->rge_type == MAC_CFG2)
2012 rge_write_phy_ocp(sc, 0xa438, 0x8600);
2013 else
2014 rge_write_phy_ocp(sc, 0xa438, 0x8601);
2015 rge_write_phy_ocp(sc, 0xa436, 0xb82e);
2016 rge_write_phy_ocp(sc, 0xa438, 0x0001);
2017
2018 RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
2019 }
2020
2021 if (sc->rge_type == MAC_CFG2) {
2022 for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
2023 rge_write_phy_ocp(sc,
2024 rtl8125_mac_cfg2_mcu[i].reg,
2025 rtl8125_mac_cfg2_mcu[i].val);
2026 }
2027 } else if (sc->rge_type == MAC_CFG3) {
2028 for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
2029 rge_write_phy_ocp(sc,
2030 rtl8125_mac_cfg3_mcu[i].reg,
2031 rtl8125_mac_cfg3_mcu[i].val);
2032 }
2033 } else if (sc->rge_type == MAC_CFG4) {
2034 for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
2035 rge_write_phy_ocp(sc,
2036 rtl8125_mac_cfg4_mcu[i].reg,
2037 rtl8125_mac_cfg4_mcu[i].val);
2038 }
2039 } else if (sc->rge_type == MAC_CFG5) {
2040 for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
2041 rge_write_phy_ocp(sc,
2042 rtl8125_mac_cfg5_mcu[i].reg,
2043 rtl8125_mac_cfg5_mcu[i].val);
2044 }
2045 }
2046
2047 if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
2048 RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
2049
2050 rge_write_phy_ocp(sc, 0xa436, 0);
2051 rge_write_phy_ocp(sc, 0xa438, 0);
2052 RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
2053 rge_write_phy_ocp(sc, 0xa436, 0x8024);
2054 rge_write_phy_ocp(sc, 0xa438, 0);
2055 }
2056
2057 rge_patch_phy_mcu(sc, 0);
2058
2059 /* Write microcode version. */
2060 rge_write_phy_ocp(sc, 0xa436, 0x801e);
2061 rge_write_phy_ocp(sc, 0xa438, mcode_version);
2062 }
2063 }
2064
2065 void
2066 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
2067 {
2068 RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2069 RGE_WRITE_4(sc, RGE_MAC0,
2070 (uint32_t)addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2071 RGE_WRITE_4(sc, RGE_MAC4,
2072 addr[5] << 8 | addr[4]);
2073 RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2074 }
2075
2076 void
2077 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
2078 {
2079 int i;
2080
2081 for (i = 0; i < ETHER_ADDR_LEN; i++)
2082 addr[i] = RGE_READ_1(sc, RGE_ADDR0 + i);
2083 }
2084
2085 void
2086 rge_hw_init(struct rge_softc *sc)
2087 {
2088 int i;
2089
2090 RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2091 RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
2092 RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
2093 RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2094 RGE_CLRBIT_1(sc, 0xf1, 0x80);
2095
2096 /* Disable UPS. */
2097 RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
2098
2099 /* Configure MAC MCU. */
2100 rge_write_mac_ocp(sc, 0xfc38, 0);
2101
2102 for (i = 0xfc28; i < 0xfc38; i += 2)
2103 rge_write_mac_ocp(sc, i, 0);
2104
2105 DELAY(3000);
2106 rge_write_mac_ocp(sc, 0xfc26, 0);
2107
2108 if (sc->rge_type == MAC_CFG3) {
2109 for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
2110 rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
2111 rtl8125_mac_bps[i].val);
2112 }
2113 } else if (sc->rge_type == MAC_CFG5) {
2114 for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
2115 rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
2116 rtl8125b_mac_bps[i].val);
2117 }
2118 }
2119
2120 /* Disable PHY power saving. */
2121 rge_disable_phy_ocp_pwrsave(sc);
2122
2123 /* Set PCIe uncorrectable error status. */
2124 rge_write_csi(sc, 0x108,
2125 rge_read_csi(sc, 0x108) | 0x00100000);
2126 }
2127
2128 void
2129 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
2130 {
2131 if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
2132 rge_patch_phy_mcu(sc, 1);
2133 rge_write_phy_ocp(sc, 0xc416, 0);
2134 rge_write_phy_ocp(sc, 0xc416, 0x0500);
2135 rge_patch_phy_mcu(sc, 0);
2136 }
2137 }
2138
2139 void
2140 rge_patch_phy_mcu(struct rge_softc *sc, int set)
2141 {
2142 int i;
2143
2144 if (set)
2145 RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
2146 else
2147 RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
2148
2149 for (i = 0; i < 1000; i++) {
2150 if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
2151 break;
2152 DELAY(100);
2153 }
2154 if (i == 1000) {
2155 DPRINTF(("timeout waiting to patch phy mcu\n"));
2156 return;
2157 }
2158 }
2159
2160 void
2161 rge_add_media_types(struct rge_softc *sc)
2162 {
2163 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
2164 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
2165 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
2166 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
2167 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
2168 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2169 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
2170 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
2171 }
2172
2173 void
2174 rge_config_imtype(struct rge_softc *sc, int imtype)
2175 {
2176 switch (imtype) {
2177 case RGE_IMTYPE_NONE:
2178 sc->rge_intrs = RGE_INTRS;
2179 sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
2180 RGE_ISR_RX_FIFO_OFLOW;
2181 sc->rge_tx_ack = RGE_ISR_TX_OK;
2182 break;
2183 case RGE_IMTYPE_SIM:
2184 sc->rge_intrs = RGE_INTRS_TIMER;
2185 sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
2186 sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
2187 break;
2188 default:
2189 panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
2190 }
2191 }
2192
2193 void
2194 rge_disable_hw_im(struct rge_softc *sc)
2195 {
2196 RGE_WRITE_2(sc, RGE_IM, 0);
2197 }
2198
2199 void
2200 rge_disable_sim_im(struct rge_softc *sc)
2201 {
2202 RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
2203 sc->rge_timerintr = 0;
2204 }
2205
2206 void
2207 rge_setup_sim_im(struct rge_softc *sc)
2208 {
2209 RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
2210 RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
2211 sc->rge_timerintr = 1;
2212 }
2213
2214 void
2215 rge_setup_intr(struct rge_softc *sc, int imtype)
2216 {
2217 rge_config_imtype(sc, imtype);
2218
2219 /* Enable interrupts. */
2220 RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
2221
2222 switch (imtype) {
2223 case RGE_IMTYPE_NONE:
2224 rge_disable_sim_im(sc);
2225 rge_disable_hw_im(sc);
2226 break;
2227 case RGE_IMTYPE_SIM:
2228 rge_disable_hw_im(sc);
2229 rge_setup_sim_im(sc);
2230 break;
2231 default:
2232 panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
2233 }
2234 }
2235
2236 void
2237 rge_exit_oob(struct rge_softc *sc)
2238 {
2239 int i;
2240
2241 RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
2242 RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
2243 RGE_RXCFG_ERRPKT);
2244
2245 /* Disable RealWoW. */
2246 rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
2247
2248 rge_reset(sc);
2249
2250 /* Disable OOB. */
2251 RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
2252
2253 RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
2254
2255 for (i = 0; i < 10; i++) {
2256 DELAY(100);
2257 if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2258 break;
2259 }
2260
2261 rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
2262 rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
2263 rge_write_mac_ocp(sc, 0xc01e, 0x5555);
2264
2265 for (i = 0; i < 10; i++) {
2266 DELAY(100);
2267 if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2268 break;
2269 }
2270
2271 if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
2272 printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
2273 device_xname(sc->sc_dev));
2274 for (i = 0; i < RGE_TIMEOUT; i++) {
2275 if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
2276 break;
2277 DELAY(1000);
2278 }
2279 RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
2280 if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
2281 RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
2282 RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
2283 }
2284 }
2285
2286 void
2287 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
2288 {
2289 int i;
2290
2291 RGE_WRITE_4(sc, RGE_CSIDR, val);
2292 RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2293 (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
2294
2295 for (i = 0; i < 10; i++) {
2296 DELAY(100);
2297 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
2298 break;
2299 }
2300
2301 DELAY(20);
2302 }
2303
2304 uint32_t
2305 rge_read_csi(struct rge_softc *sc, uint32_t reg)
2306 {
2307 int i;
2308
2309 RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2310 (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
2311
2312 for (i = 0; i < 10; i++) {
2313 DELAY(100);
2314 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
2315 break;
2316 }
2317
2318 DELAY(20);
2319
2320 return (RGE_READ_4(sc, RGE_CSIDR));
2321 }
2322
2323 void
2324 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2325 {
2326 uint32_t tmp;
2327
2328 tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2329 tmp += val;
2330 tmp |= RGE_MACOCP_BUSY;
2331 RGE_WRITE_4(sc, RGE_MACOCP, tmp);
2332 }
2333
2334 uint16_t
2335 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
2336 {
2337 uint32_t val;
2338
2339 val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2340 RGE_WRITE_4(sc, RGE_MACOCP, val);
2341
2342 return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
2343 }
2344
2345 void
2346 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
2347 {
2348 uint32_t tmp;
2349 int i;
2350
2351 tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2352 tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
2353 RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
2354
2355 for (i = 0; i < 10; i++) {
2356 DELAY(100);
2357 if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
2358 break;
2359 }
2360
2361 DELAY(20);
2362 }
2363
2364 uint16_t
2365 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
2366 {
2367 uint32_t val;
2368 int i;
2369
2370 val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2371 RGE_WRITE_4(sc, RGE_EPHYAR, val);
2372
2373 for (i = 0; i < 10; i++) {
2374 DELAY(100);
2375 val = RGE_READ_4(sc, RGE_EPHYAR);
2376 if (val & RGE_EPHYAR_BUSY)
2377 break;
2378 }
2379
2380 DELAY(20);
2381
2382 return (val & RGE_EPHYAR_DATA_MASK);
2383 }
2384
2385 void
2386 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
2387 {
2388 uint16_t off, phyaddr;
2389
2390 phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2391 phyaddr <<= 4;
2392
2393 off = addr ? reg : 0x10 + (reg % 8);
2394
2395 phyaddr += (off - 16) << 1;
2396
2397 rge_write_phy_ocp(sc, phyaddr, val);
2398 }
2399
2400 uint16_t
2401 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
2402 {
2403 uint16_t off, phyaddr;
2404
2405 phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2406 phyaddr <<= 4;
2407
2408 off = addr ? reg : 0x10 + (reg % 8);
2409
2410 phyaddr += (off - 16) << 1;
2411
2412 return (rge_read_phy_ocp(sc, phyaddr));
2413 }
2414
2415 void
2416 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2417 {
2418 uint32_t tmp;
2419 int i;
2420
2421 tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2422 tmp |= RGE_PHYOCP_BUSY | val;
2423 RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
2424
2425 for (i = 0; i < RGE_TIMEOUT; i++) {
2426 DELAY(1);
2427 if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
2428 break;
2429 }
2430 }
2431
2432 uint16_t
2433 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
2434 {
2435 uint32_t val;
2436 int i;
2437
2438 val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2439 RGE_WRITE_4(sc, RGE_PHYOCP, val);
2440
2441 for (i = 0; i < RGE_TIMEOUT; i++) {
2442 DELAY(1);
2443 val = RGE_READ_4(sc, RGE_PHYOCP);
2444 if (val & RGE_PHYOCP_BUSY)
2445 break;
2446 }
2447
2448 return (val & RGE_PHYOCP_DATA_MASK);
2449 }
2450
2451 int
2452 rge_get_link_status(struct rge_softc *sc)
2453 {
2454 return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2455 }
2456
2457 void
2458 rge_txstart(void *arg)
2459 {
2460 struct rge_softc *sc = arg;
2461
2462 RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2463 }
2464
2465 void
2466 rge_tick(void *arg)
2467 {
2468 struct rge_softc *sc = arg;
2469 int s;
2470
2471 s = splnet();
2472 rge_link_state(sc);
2473 splx(s);
2474
2475 callout_schedule(&sc->sc_timeout, hz);
2476 }
2477
2478 void
2479 rge_link_state(struct rge_softc *sc)
2480 {
2481 struct ifnet *ifp = &sc->sc_ec.ec_if;
2482 int link = LINK_STATE_DOWN;
2483
2484 if (rge_get_link_status(sc))
2485 link = LINK_STATE_UP;
2486
2487 if (ifp->if_link_state != link) { /* XXX not safe to access */
2488 if_link_state_change(ifp, link);
2489 }
2490 }
2491