if_rge.c revision 1.27 1 /* $NetBSD: if_rge.c,v 1.27 2023/10/09 11:55:22 riastradh Exp $ */
2 /* $OpenBSD: if_rge.c,v 1.9 2020/12/12 11:48:53 jan Exp $ */
3
4 /*
5 * Copyright (c) 2019, 2020 Kevin Lo <kevlo (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.27 2023/10/09 11:55:22 riastradh Exp $");
22
23 #include <sys/types.h>
24
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/sockio.h>
28 #include <sys/mbuf.h>
29 #include <sys/kernel.h>
30 #include <sys/socket.h>
31 #include <sys/device.h>
32 #include <sys/endian.h>
33 #include <sys/callout.h>
34 #include <sys/workqueue.h>
35
36 #include <net/if.h>
37
38 #include <net/if_dl.h>
39 #include <net/if_ether.h>
40
41 #include <net/if_media.h>
42
43 #include <netinet/in.h>
44 #include <net/if_ether.h>
45
46 #include <net/bpf.h>
47
48 #include <sys/bus.h>
49 #include <machine/intr.h>
50
51 #include <dev/mii/mii.h>
52
53 #include <dev/pci/pcivar.h>
54 #include <dev/pci/pcireg.h>
55 #include <dev/pci/pcidevs.h>
56
57 #include <dev/pci/if_rgereg.h>
58
59 #ifdef __NetBSD__
60 #define letoh32 htole32
61 #define nitems(x) __arraycount(x)
62
63 static struct mbuf *
64 MCLGETL(struct rge_softc *sc __unused, int how,
65 u_int size)
66 {
67 struct mbuf *m;
68
69 MGETHDR(m, how, MT_DATA);
70 if (m == NULL)
71 return NULL;
72
73 MEXTMALLOC(m, size, how);
74 if ((m->m_flags & M_EXT) == 0) {
75 m_freem(m);
76 return NULL;
77 }
78 return m;
79 }
80
81 #ifdef NET_MPSAFE
82 #define RGE_MPSAFE 1
83 #define CALLOUT_FLAGS CALLOUT_MPSAFE
84 #else
85 #define CALLOUT_FLAGS 0
86 #endif
87 #endif
88
89 #ifdef RGE_DEBUG
90 #define DPRINTF(x) do { if (rge_debug > 0) printf x; } while (0)
91 int rge_debug = 0;
92 #else
93 #define DPRINTF(x)
94 #endif
95
96 static int rge_match(device_t, cfdata_t, void *);
97 static void rge_attach(device_t, device_t, void *);
98 int rge_intr(void *);
99 int rge_encap(struct rge_softc *, struct mbuf *, int);
100 int rge_ioctl(struct ifnet *, u_long, void *);
101 void rge_start(struct ifnet *);
102 void rge_watchdog(struct ifnet *);
103 int rge_init(struct ifnet *);
104 void rge_stop(struct ifnet *, int);
105 int rge_ifmedia_upd(struct ifnet *);
106 void rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
107 int rge_allocmem(struct rge_softc *);
108 int rge_newbuf(struct rge_softc *, int);
109 void rge_discard_rxbuf(struct rge_softc *, int);
110 static int rge_rx_list_init(struct rge_softc *);
111 static void rge_rx_list_fini(struct rge_softc *);
112 static void rge_tx_list_init(struct rge_softc *);
113 static void rge_tx_list_fini(struct rge_softc *);
114 int rge_rxeof(struct rge_softc *);
115 int rge_txeof(struct rge_softc *);
116 void rge_reset(struct rge_softc *);
117 void rge_iff(struct rge_softc *);
118 void rge_set_phy_power(struct rge_softc *, int);
119 void rge_phy_config(struct rge_softc *);
120 void rge_phy_config_mac_cfg2(struct rge_softc *);
121 void rge_phy_config_mac_cfg3(struct rge_softc *);
122 void rge_phy_config_mac_cfg4(struct rge_softc *);
123 void rge_phy_config_mac_cfg5(struct rge_softc *);
124 void rge_phy_config_mcu(struct rge_softc *, uint16_t);
125 void rge_set_macaddr(struct rge_softc *, const uint8_t *);
126 void rge_get_macaddr(struct rge_softc *, uint8_t *);
127 void rge_hw_init(struct rge_softc *);
128 void rge_disable_phy_ocp_pwrsave(struct rge_softc *);
129 void rge_patch_phy_mcu(struct rge_softc *, int);
130 void rge_add_media_types(struct rge_softc *);
131 void rge_config_imtype(struct rge_softc *, int);
132 void rge_disable_hw_im(struct rge_softc *);
133 void rge_disable_sim_im(struct rge_softc *);
134 void rge_setup_sim_im(struct rge_softc *);
135 void rge_setup_intr(struct rge_softc *, int);
136 void rge_exit_oob(struct rge_softc *);
137 void rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
138 uint32_t rge_read_csi(struct rge_softc *, uint32_t);
139 void rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
140 uint16_t rge_read_mac_ocp(struct rge_softc *, uint16_t);
141 void rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
142 uint16_t rge_read_ephy(struct rge_softc *, uint16_t);
143 void rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
144 uint16_t rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
145 void rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
146 uint16_t rge_read_phy_ocp(struct rge_softc *, uint16_t);
147 int rge_get_link_status(struct rge_softc *);
148 void rge_txstart(struct work *, void *);
149 void rge_tick(void *);
150 void rge_link_state(struct rge_softc *);
151
152 static const struct {
153 uint16_t reg;
154 uint16_t val;
155 } rtl8125_mac_cfg2_mcu[] = {
156 RTL8125_MAC_CFG2_MCU
157 }, rtl8125_mac_cfg3_mcu[] = {
158 RTL8125_MAC_CFG3_MCU
159 }, rtl8125_mac_cfg4_mcu[] = {
160 RTL8125_MAC_CFG4_MCU
161 }, rtl8125_mac_cfg5_mcu[] = {
162 RTL8125_MAC_CFG5_MCU
163 };
164
165 CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
166 NULL, NULL); /* Sevan - detach function? */
167
168 static const struct device_compatible_entry compat_data[] = {
169 { .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000) },
170 { .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125) },
171
172 PCI_COMPAT_EOL
173 };
174
175 static int
176 rge_match(device_t parent, cfdata_t match, void *aux)
177 {
178 struct pci_attach_args *pa =aux;
179
180 return pci_compatible_match(pa, compat_data);
181 }
182
183 void
184 rge_attach(device_t parent, device_t self, void *aux)
185 {
186 struct rge_softc *sc = device_private(self);
187 struct pci_attach_args *pa = aux;
188 pci_chipset_tag_t pc = pa->pa_pc;
189 pci_intr_handle_t *ihp;
190 char intrbuf[PCI_INTRSTR_LEN];
191 const char *intrstr = NULL;
192 struct ifnet *ifp;
193 pcireg_t reg;
194 uint32_t hwrev;
195 uint8_t eaddr[ETHER_ADDR_LEN];
196 int offset;
197 pcireg_t command;
198
199 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
200
201 sc->sc_dev = self;
202
203 pci_aprint_devinfo(pa, "Ethernet controller");
204
205 /*
206 * Map control/status registers.
207 */
208 if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
209 PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
210 NULL, &sc->rge_bsize)) {
211 if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
212 PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
213 &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
214 if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
215 0, &sc->rge_btag, &sc->rge_bhandle, NULL,
216 &sc->rge_bsize)) {
217 aprint_error(": can't map mem or i/o space\n");
218 return;
219 }
220 }
221 }
222
223 int counts[PCI_INTR_TYPE_SIZE] = {
224 [PCI_INTR_TYPE_INTX] = 1,
225 [PCI_INTR_TYPE_MSI] = 1,
226 [PCI_INTR_TYPE_MSIX] = 1,
227 };
228 int max_type = PCI_INTR_TYPE_MSIX;
229 /*
230 * Allocate interrupt.
231 */
232 if (pci_intr_alloc(pa, &ihp, counts, max_type) != 0) {
233 aprint_error(": couldn't map interrupt\n");
234 return;
235 }
236 switch (pci_intr_type(pc, ihp[0])) {
237 case PCI_INTR_TYPE_MSIX:
238 case PCI_INTR_TYPE_MSI:
239 sc->rge_flags |= RGE_FLAG_MSI;
240 break;
241 default:
242 break;
243 }
244 intrstr = pci_intr_string(pc, ihp[0], intrbuf, sizeof(intrbuf));
245 sc->sc_ih = pci_intr_establish_xname(pc, ihp[0], IPL_NET, rge_intr,
246 sc, device_xname(sc->sc_dev));
247 if (sc->sc_ih == NULL) {
248 aprint_error_dev(sc->sc_dev, ": couldn't establish interrupt");
249 if (intrstr != NULL)
250 aprint_error(" at %s\n", intrstr);
251 aprint_error("\n");
252 return;
253 }
254 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
255
256 if (pci_dma64_available(pa))
257 sc->sc_dmat = pa->pa_dmat64;
258 else
259 sc->sc_dmat = pa->pa_dmat;
260
261 sc->sc_pc = pa->pa_pc;
262 sc->sc_tag = pa->pa_tag;
263
264 /* Determine hardware revision */
265 hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
266 switch (hwrev) {
267 case 0x60800000:
268 sc->rge_type = MAC_CFG2;
269 break;
270 case 0x60900000:
271 sc->rge_type = MAC_CFG3;
272 break;
273 case 0x64000000:
274 sc->rge_type = MAC_CFG4;
275 break;
276 case 0x64100000:
277 sc->rge_type = MAC_CFG5;
278 break;
279 default:
280 aprint_error(": unknown version 0x%08x\n", hwrev);
281 return;
282 }
283
284 rge_config_imtype(sc, RGE_IMTYPE_SIM);
285
286 /*
287 * PCI Express check.
288 */
289 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
290 &offset, NULL)) {
291 /* Disable PCIe ASPM and ECPM. */
292 reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
293 offset + PCIE_LCSR);
294 reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 |
295 PCIE_LCSR_ENCLKPM);
296 pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
297 reg);
298 }
299
300 rge_exit_oob(sc);
301 rge_hw_init(sc);
302
303 rge_get_macaddr(sc, eaddr);
304 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
305 ether_sprintf(eaddr));
306
307 memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
308
309 rge_set_phy_power(sc, 1);
310 rge_phy_config(sc);
311
312 if (rge_allocmem(sc))
313 return;
314
315 ifp = &sc->sc_ec.ec_if;
316 ifp->if_softc = sc;
317 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
318 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
319 #ifdef RGE_MPSAFE
320 ifp->if_extflags = IFEF_MPSAFE;
321 #endif
322 ifp->if_ioctl = rge_ioctl;
323 ifp->if_stop = rge_stop;
324 ifp->if_start = rge_start;
325 ifp->if_init = rge_init;
326 ifp->if_watchdog = rge_watchdog;
327 IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
328
329 #if notyet
330 ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx |
331 IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
332 IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
333 #endif
334
335 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
336 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
337
338 callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
339 callout_setfunc(&sc->sc_timeout, rge_tick, sc);
340
341 command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
342 command |= PCI_COMMAND_MASTER_ENABLE;
343 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
344
345 /* Initialize ifmedia structures. */
346 sc->sc_ec.ec_ifmedia = &sc->sc_media;
347 ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
348 rge_ifmedia_sts);
349 rge_add_media_types(sc);
350 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
351 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
352 sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
353
354 if_attach(ifp);
355 ether_ifattach(ifp, eaddr);
356
357 if (pmf_device_register(self, NULL, NULL))
358 pmf_class_network_register(self, ifp);
359 else
360 aprint_error_dev(self, "couldn't establish power handler\n");
361 }
362
363 int
364 rge_intr(void *arg)
365 {
366 struct rge_softc *sc = arg;
367 struct ifnet *ifp = &sc->sc_ec.ec_if;
368 uint32_t status;
369 int claimed = 0, rx, tx;
370
371 if (!(ifp->if_flags & IFF_RUNNING))
372 return (0);
373
374 /* Disable interrupts. */
375 RGE_WRITE_4(sc, RGE_IMR, 0);
376
377 if (!(sc->rge_flags & RGE_FLAG_MSI)) {
378 if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
379 return (0);
380 }
381
382 status = RGE_READ_4(sc, RGE_ISR);
383 if (status)
384 RGE_WRITE_4(sc, RGE_ISR, status);
385
386 if (status & RGE_ISR_PCS_TIMEOUT)
387 claimed = 1;
388
389 rx = tx = 0;
390 if (status & sc->rge_intrs) {
391 if (status &
392 (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
393 rx |= rge_rxeof(sc);
394 claimed = 1;
395 }
396
397 if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
398 tx |= rge_txeof(sc);
399 claimed = 1;
400 }
401
402 if (status & RGE_ISR_SYSTEM_ERR) {
403 KERNEL_LOCK(1, NULL);
404 rge_init(ifp);
405 KERNEL_UNLOCK_ONE(NULL);
406 claimed = 1;
407 }
408 }
409
410 if (sc->rge_timerintr) {
411 if ((tx | rx) == 0) {
412 /*
413 * Nothing needs to be processed, fallback
414 * to use TX/RX interrupts.
415 */
416 rge_setup_intr(sc, RGE_IMTYPE_NONE);
417
418 /*
419 * Recollect, mainly to avoid the possible
420 * race introduced by changing interrupt
421 * masks.
422 */
423 rge_rxeof(sc);
424 rge_txeof(sc);
425 } else
426 RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
427 } else if (tx | rx) {
428 /*
429 * Assume that using simulated interrupt moderation
430 * (hardware timer based) could reduce the interrupt
431 * rate.
432 */
433 rge_setup_intr(sc, RGE_IMTYPE_SIM);
434 }
435
436 RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
437
438 return (claimed);
439 }
440
441 int
442 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
443 {
444 struct rge_tx_desc *d = NULL;
445 struct rge_txq *txq;
446 bus_dmamap_t txmap;
447 uint32_t cmdsts, cflags = 0;
448 int cur, error, i, last, nsegs;
449
450 #if notyet
451 /*
452 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
453 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
454 * take affect.
455 */
456 if ((m->m_pkthdr.csum_flags &
457 (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
458 cflags |= RGE_TDEXTSTS_IPCSUM;
459 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
460 cflags |= RGE_TDEXTSTS_TCPCSUM;
461 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
462 cflags |= RGE_TDEXTSTS_UDPCSUM;
463 }
464 #endif
465
466 txq = &sc->rge_ldata.rge_txq[idx];
467 txmap = txq->txq_dmamap;
468
469 error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
470 switch (error) {
471 case 0:
472 break;
473 case EFBIG: /* mbuf chain is too fragmented */
474 if (m_defrag(m, M_DONTWAIT) == 0 &&
475 bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
476 BUS_DMA_NOWAIT) == 0)
477 break;
478
479 /* FALLTHROUGH */
480 default:
481 return (0);
482 }
483
484 bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
485 BUS_DMASYNC_PREWRITE);
486
487 nsegs = txmap->dm_nsegs;
488
489 /* Set up hardware VLAN tagging. */
490 if (vlan_has_tag(m))
491 cflags |= bswap16(vlan_get_tag(m)) | RGE_TDEXTSTS_VTAG;
492
493 last = cur = idx;
494 cmdsts = RGE_TDCMDSTS_SOF;
495
496 for (i = 0; i < txmap->dm_nsegs; i++) {
497 d = &sc->rge_ldata.rge_tx_list[cur];
498
499 d->rge_extsts = htole32(cflags);
500 d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
501 d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
502
503 cmdsts |= txmap->dm_segs[i].ds_len;
504
505 if (cur == RGE_TX_LIST_CNT - 1)
506 cmdsts |= RGE_TDCMDSTS_EOR;
507
508 d->rge_cmdsts = htole32(cmdsts);
509
510 last = cur;
511 cmdsts = RGE_TDCMDSTS_OWN;
512 cur = RGE_NEXT_TX_DESC(cur);
513 }
514
515 /* Set EOF on the last descriptor. */
516 d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
517
518 /* Transfer ownership of packet to the chip. */
519 d = &sc->rge_ldata.rge_tx_list[idx];
520
521 d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
522
523 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
524 cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
525 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
526
527 /* Update info of TX queue and descriptors. */
528 txq->txq_mbuf = m;
529 txq->txq_descidx = last;
530
531 return (nsegs);
532 }
533
534 int
535 rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
536 {
537 struct rge_softc *sc = ifp->if_softc;
538 //struct ifreq *ifr = (struct ifreq *)data;
539 int s, error = 0;
540
541 s = splnet();
542
543 switch (cmd) {
544 case SIOCSIFFLAGS:
545 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
546 break;
547 /* XXX set an ifflags callback and let ether_ioctl
548 * handle all of this.
549 */
550 if (ifp->if_flags & IFF_UP) {
551 if (ifp->if_flags & IFF_RUNNING)
552 error = ENETRESET;
553 else
554 rge_init(ifp);
555 } else {
556 if (ifp->if_flags & IFF_RUNNING)
557 rge_stop(ifp, 1);
558 }
559 break;
560 default:
561 error = ether_ioctl(ifp, cmd, data);
562 }
563
564 if (error == ENETRESET) {
565 if (ifp->if_flags & IFF_RUNNING)
566 rge_iff(sc);
567 error = 0;
568 }
569
570 splx(s);
571 return (error);
572 }
573
574 void
575 rge_start(struct ifnet *ifp)
576 {
577 struct rge_softc *sc = ifp->if_softc;
578 struct mbuf *m;
579 int free, idx, used;
580 int queued = 0;
581
582 #define LINK_STATE_IS_UP(_s) \
583 ((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
584
585 if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
586 IFQ_PURGE(&ifp->if_snd);
587 return;
588 }
589
590 /* Calculate free space. */
591 idx = sc->rge_ldata.rge_txq_prodidx;
592 free = sc->rge_ldata.rge_txq_considx;
593 if (free <= idx)
594 free += RGE_TX_LIST_CNT;
595 free -= idx;
596
597 for (;;) {
598 if (RGE_TX_NSEGS >= free + 2) {
599 SET(ifp->if_flags, IFF_OACTIVE);
600 break;
601 }
602
603 IFQ_DEQUEUE(&ifp->if_snd, m);
604 if (m == NULL)
605 break;
606
607 used = rge_encap(sc, m, idx);
608 if (used == 0) {
609 m_freem(m);
610 continue;
611 }
612
613 KASSERT(used <= free);
614 free -= used;
615
616 bpf_mtap(ifp, m, BPF_D_OUT);
617
618 idx += used;
619 if (idx >= RGE_TX_LIST_CNT)
620 idx -= RGE_TX_LIST_CNT;
621
622 queued++;
623 }
624
625 if (queued == 0)
626 return;
627
628 /* Set a timeout in case the chip goes out to lunch. */
629 ifp->if_timer = 5;
630
631 sc->rge_ldata.rge_txq_prodidx = idx;
632 #if 0
633 ifq_serialize(ifq, &sc->sc_task);
634 #else
635 rge_txstart(&sc->sc_task, sc);
636 #endif
637 }
638
639 void
640 rge_watchdog(struct ifnet *ifp)
641 {
642 struct rge_softc *sc = ifp->if_softc;
643
644 device_printf(sc->sc_dev, "watchdog timeout\n");
645 if_statinc(ifp, if_oerrors);
646
647 rge_init(ifp);
648 }
649
650 int
651 rge_init(struct ifnet *ifp)
652 {
653 struct rge_softc *sc = ifp->if_softc;
654 uint32_t val;
655 unsigned i;
656
657 rge_stop(ifp, 0);
658
659 /* Set MAC address. */
660 rge_set_macaddr(sc, CLLADDR(ifp->if_sadl));
661
662 /* Set Maximum frame size. */
663 RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
664
665 /* Initialize RX descriptors list. */
666 int error = rge_rx_list_init(sc);
667 if (error != 0) {
668 device_printf(sc->sc_dev,
669 "init failed: no memory for RX buffers\n");
670 rge_stop(ifp, 1);
671 return error;
672 }
673
674 /* Initialize TX descriptors. */
675 rge_tx_list_init(sc);
676
677 /* Load the addresses of the RX and TX lists into the chip. */
678 RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
679 RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
680 RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
681 RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
682 RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
683 RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
684 RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
685 RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
686
687 RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
688
689 RGE_CLRBIT_1(sc, 0xf1, 0x80);
690 RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
691 RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
692 RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
693
694 /* Clear interrupt moderation timer. */
695 for (i = 0; i < 64; i++)
696 RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
697
698 /* Set the initial RX and TX configurations. */
699 RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
700 RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
701
702 val = rge_read_csi(sc, 0x70c) & ~0xff000000;
703 rge_write_csi(sc, 0x70c, val | 0x27000000);
704
705 /* Enable hardware optimization function. */
706 val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
707 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
708
709 RGE_WRITE_2(sc, 0x0382, 0x221b);
710 RGE_WRITE_1(sc, 0x4500, 0);
711 RGE_WRITE_2(sc, 0x4800, 0);
712 RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
713
714 rge_write_mac_ocp(sc, 0xc140, 0xffff);
715 rge_write_mac_ocp(sc, 0xc142, 0xffff);
716
717 val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
718 rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
719
720 RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
721 RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
722 RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
723
724 val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
725 if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
726 rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
727 else
728 rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
729
730 RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
731
732 if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
733 val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
734 rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
735 } else
736 RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
737
738 RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
739
740 val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
741 rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
742
743 val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
744 rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
745
746 val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
747 rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
748
749 RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
750
751 RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
752
753 RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
754
755 val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
756 rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
757
758 val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
759 rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
760
761 RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
762 RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
763
764 val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
765 rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
766
767 val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
768 rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
769
770 val = rge_read_mac_ocp(sc, 0xe84c) & ~0x0040;
771 if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
772 rge_write_mac_ocp(sc, 0xe84c, 0x00c0);
773 else
774 rge_write_mac_ocp(sc, 0xe84c, 0x0080);
775
776 RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN);
777
778 if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
779 RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
780
781 /* Disable EEE plus. */
782 RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
783
784 RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
785
786 RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
787 DELAY(1);
788 RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
789
790 RGE_CLRBIT_4(sc, 0x1880, 0x0030);
791
792 rge_write_mac_ocp(sc, 0xe098, 0xc302);
793
794 if ((sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0)
795 RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
796 else
797 RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
798
799 RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
800
801 for (i = 0; i < 10; i++) {
802 if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
803 break;
804 DELAY(1000);
805 }
806
807 /* Disable RXDV gate. */
808 RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
809 DELAY(2000);
810
811 rge_ifmedia_upd(ifp);
812
813 /* Enable transmit and receive. */
814 RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
815
816 /* Program promiscuous mode and multicast filters. */
817 rge_iff(sc);
818
819 RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
820 RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
821
822 RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
823
824 /* Enable interrupts. */
825 rge_setup_intr(sc, RGE_IMTYPE_SIM);
826
827 ifp->if_flags |= IFF_RUNNING;
828 CLR(ifp->if_flags, IFF_OACTIVE);
829
830 callout_schedule(&sc->sc_timeout, 1);
831
832 return (0);
833 }
834
835 /*
836 * Stop the adapter and free any mbufs allocated to the RX and TX lists.
837 */
838 void
839 rge_stop(struct ifnet *ifp, int disable)
840 {
841 struct rge_softc *sc = ifp->if_softc;
842
843 callout_halt(&sc->sc_timeout, NULL);
844
845 ifp->if_timer = 0;
846 ifp->if_flags &= ~IFF_RUNNING;
847 sc->rge_timerintr = 0;
848
849 RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
850 RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
851 RGE_RXCFG_ERRPKT);
852
853 RGE_WRITE_4(sc, RGE_IMR, 0);
854
855 /* Clear timer interrupts. */
856 RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
857 RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
858 RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
859 RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
860
861 rge_reset(sc);
862
863 // intr_barrier(sc->sc_ih);
864 // ifq_barrier(&ifp->if_snd);
865 /* ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
866
867 if (sc->rge_head != NULL) {
868 m_freem(sc->rge_head);
869 sc->rge_head = sc->rge_tail = NULL;
870 }
871
872 rge_tx_list_fini(sc);
873 rge_rx_list_fini(sc);
874 }
875
876 /*
877 * Set media options.
878 */
879 int
880 rge_ifmedia_upd(struct ifnet *ifp)
881 {
882 struct rge_softc *sc = ifp->if_softc;
883 struct ifmedia *ifm = &sc->sc_media;
884 int anar, gig, val;
885
886 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
887 return (EINVAL);
888
889 /* Disable Gigabit Lite. */
890 RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
891 RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
892
893 val = rge_read_phy_ocp(sc, 0xa5d4);
894 val &= ~RGE_ADV_2500TFDX;
895
896 anar = gig = 0;
897 switch (IFM_SUBTYPE(ifm->ifm_media)) {
898 case IFM_AUTO:
899 anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
900 gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
901 val |= RGE_ADV_2500TFDX;
902 break;
903 case IFM_2500_T:
904 anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
905 gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
906 val |= RGE_ADV_2500TFDX;
907 ifp->if_baudrate = IF_Mbps(2500);
908 break;
909 case IFM_1000_T:
910 anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
911 gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
912 ifp->if_baudrate = IF_Gbps(1);
913 break;
914 case IFM_100_TX:
915 gig = rge_read_phy(sc, 0, MII_100T2CR) &
916 ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
917 anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
918 ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
919 ANAR_TX | ANAR_10_FD | ANAR_10;
920 ifp->if_baudrate = IF_Mbps(100);
921 break;
922 case IFM_10_T:
923 gig = rge_read_phy(sc, 0, MII_100T2CR) &
924 ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
925 anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
926 ANAR_10_FD | ANAR_10 : ANAR_10;
927 ifp->if_baudrate = IF_Mbps(10);
928 break;
929 default:
930 device_printf(sc->sc_dev,
931 "unsupported media type\n");
932 return (EINVAL);
933 }
934
935 rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
936 rge_write_phy(sc, 0, MII_100T2CR, gig);
937 rge_write_phy_ocp(sc, 0xa5d4, val);
938 rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
939 BMCR_STARTNEG);
940
941 return (0);
942 }
943
944 /*
945 * Report current media status.
946 */
947 void
948 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
949 {
950 struct rge_softc *sc = ifp->if_softc;
951 uint16_t status = 0;
952
953 ifmr->ifm_status = IFM_AVALID;
954 ifmr->ifm_active = IFM_ETHER;
955
956 if (rge_get_link_status(sc)) {
957 ifmr->ifm_status |= IFM_ACTIVE;
958
959 status = RGE_READ_2(sc, RGE_PHYSTAT);
960 if ((status & RGE_PHYSTAT_FDX) ||
961 (status & RGE_PHYSTAT_2500MBPS))
962 ifmr->ifm_active |= IFM_FDX;
963 else
964 ifmr->ifm_active |= IFM_HDX;
965
966 if (status & RGE_PHYSTAT_10MBPS)
967 ifmr->ifm_active |= IFM_10_T;
968 else if (status & RGE_PHYSTAT_100MBPS)
969 ifmr->ifm_active |= IFM_100_TX;
970 else if (status & RGE_PHYSTAT_1000MBPS)
971 ifmr->ifm_active |= IFM_1000_T;
972 else if (status & RGE_PHYSTAT_2500MBPS)
973 ifmr->ifm_active |= IFM_2500_T;
974 }
975 }
976
977 /*
978 * Allocate memory for RX/TX rings.
979 */
980 int
981 rge_allocmem(struct rge_softc *sc)
982 {
983 int error, i;
984
985 /* Allocate DMA'able memory for the TX ring. */
986 error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
987 RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
988 if (error) {
989 aprint_error_dev(sc->sc_dev, "can't create TX list map\n");
990 return (error);
991 }
992 error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
993 &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
994 BUS_DMA_NOWAIT);
995 if (error) {
996 aprint_error_dev(sc->sc_dev, "can't alloc TX list\n");
997 return (error);
998 }
999
1000 /* Load the map for the TX ring. */
1001 error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1002 sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
1003 (void **) &sc->rge_ldata.rge_tx_list,
1004 BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1005 if (error) {
1006 aprint_error_dev(sc->sc_dev, "can't map TX dma buffers\n");
1007 bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1008 sc->rge_ldata.rge_tx_listnseg);
1009 return (error);
1010 }
1011 memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1012 error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1013 sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1014 if (error) {
1015 aprint_error_dev(sc->sc_dev, "can't load TX dma map\n");
1016 bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
1017 bus_dmamem_unmap(sc->sc_dmat,
1018 sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
1019 bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1020 sc->rge_ldata.rge_tx_listnseg);
1021 return (error);
1022 }
1023
1024 /* Create DMA maps for TX buffers. */
1025 for (i = 0; i < RGE_TX_LIST_CNT; i++) {
1026 error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
1027 RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
1028 &sc->rge_ldata.rge_txq[i].txq_dmamap);
1029 if (error) {
1030 aprint_error_dev(sc->sc_dev, "can't create DMA map for TX\n");
1031 return (error);
1032 }
1033 }
1034
1035 /* Allocate DMA'able memory for the RX ring. */
1036 error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
1037 RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
1038 if (error) {
1039 aprint_error_dev(sc->sc_dev, "can't create RX list map\n");
1040 return (error);
1041 }
1042 error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
1043 &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
1044 BUS_DMA_NOWAIT);
1045 if (error) {
1046 aprint_error_dev(sc->sc_dev, "can't alloc RX list\n");
1047 return (error);
1048 }
1049
1050 /* Load the map for the RX ring. */
1051 error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1052 sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
1053 (void **) &sc->rge_ldata.rge_rx_list,
1054 BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1055 if (error) {
1056 aprint_error_dev(sc->sc_dev, "can't map RX dma buffers\n");
1057 bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1058 sc->rge_ldata.rge_rx_listnseg);
1059 return (error);
1060 }
1061 memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1062 error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1063 sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1064 if (error) {
1065 aprint_error_dev(sc->sc_dev, "can't load RX dma map\n");
1066 bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
1067 bus_dmamem_unmap(sc->sc_dmat,
1068 sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
1069 bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1070 sc->rge_ldata.rge_rx_listnseg);
1071 return (error);
1072 }
1073
1074 /* Create DMA maps for RX buffers. */
1075 for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1076 error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1077 RGE_JUMBO_FRAMELEN, 0, 0,
1078 &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
1079 if (error) {
1080 aprint_error_dev(sc->sc_dev, "can't create DMA map for RX\n");
1081 return (error);
1082 }
1083 }
1084
1085 return (error);
1086 }
1087
1088 /*
1089 * Initialize the RX descriptor and attach an mbuf cluster.
1090 */
1091 int
1092 rge_newbuf(struct rge_softc *sc, int idx)
1093 {
1094 struct mbuf *m;
1095 struct rge_rx_desc *r;
1096 struct rge_rxq *rxq;
1097 bus_dmamap_t rxmap;
1098
1099 m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
1100 if (m == NULL)
1101 return (ENOBUFS);
1102
1103 m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
1104
1105 rxq = &sc->rge_ldata.rge_rxq[idx];
1106 rxmap = rxq->rxq_dmamap;
1107
1108 if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT))
1109 goto out;
1110
1111 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1112 BUS_DMASYNC_PREREAD);
1113
1114 /* Map the segments into RX descriptors. */
1115 r = &sc->rge_ldata.rge_rx_list[idx];
1116
1117 rxq->rxq_mbuf = m;
1118
1119 r->hi_qword1.rx_qword4.rge_extsts = 0;
1120 r->hi_qword0.rge_addr = htole64(rxmap->dm_segs[0].ds_addr);
1121
1122 r->hi_qword1.rx_qword4.rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
1123 if (idx == RGE_RX_LIST_CNT - 1)
1124 r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1125
1126 r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1127
1128 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1129 idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1130 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1131
1132 return (0);
1133 out:
1134 if (m != NULL)
1135 m_freem(m);
1136 return (ENOMEM);
1137 }
1138
1139 void
1140 rge_discard_rxbuf(struct rge_softc *sc, int idx)
1141 {
1142 struct rge_rx_desc *r;
1143
1144 r = &sc->rge_ldata.rge_rx_list[idx];
1145
1146 r->hi_qword1.rx_qword4.rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
1147 r->hi_qword1.rx_qword4.rge_extsts = 0;
1148 if (idx == RGE_RX_LIST_CNT - 1)
1149 r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1150 r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1151
1152 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1153 idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1154 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1155 }
1156
1157 static int
1158 rge_rx_list_init(struct rge_softc *sc)
1159 {
1160 unsigned i;
1161
1162 memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1163
1164 for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1165 sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
1166 if (rge_newbuf(sc, i) != 0) {
1167 rge_rx_list_fini(sc);
1168 return (ENOBUFS);
1169 }
1170 }
1171
1172 sc->rge_ldata.rge_rxq_prodidx = sc->rge_ldata.rge_rxq_considx = 0;
1173 sc->rge_head = sc->rge_tail = NULL;
1174
1175 return (0);
1176 }
1177
1178 static void
1179 rge_rx_list_fini(struct rge_softc *sc)
1180 {
1181 unsigned i;
1182
1183 /* Free the RX list buffers. */
1184 for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1185 if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
1186 bus_dmamap_unload(sc->sc_dmat,
1187 sc->rge_ldata.rge_rxq[i].rxq_dmamap);
1188 m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
1189 sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
1190 }
1191 }
1192 }
1193
1194 static void
1195 rge_tx_list_init(struct rge_softc *sc)
1196 {
1197 unsigned i;
1198
1199 memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1200
1201 for (i = 0; i < RGE_TX_LIST_CNT; i++)
1202 sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
1203
1204 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
1205 sc->rge_ldata.rge_tx_list_map->dm_mapsize,
1206 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1207
1208 sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
1209 }
1210
1211 static void
1212 rge_tx_list_fini(struct rge_softc *sc)
1213 {
1214 unsigned i;
1215
1216 /* Free the TX list buffers. */
1217 for (i = 0; i < RGE_TX_LIST_CNT; i++) {
1218 if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
1219 bus_dmamap_unload(sc->sc_dmat,
1220 sc->rge_ldata.rge_txq[i].txq_dmamap);
1221 m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
1222 sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
1223 }
1224 }
1225 }
1226
1227 int
1228 rge_rxeof(struct rge_softc *sc)
1229 {
1230 struct mbuf *m;
1231 struct ifnet *ifp = &sc->sc_ec.ec_if;
1232 struct rge_rx_desc *cur_rx;
1233 struct rge_rxq *rxq;
1234 uint32_t rxstat, extsts;
1235 int i, total_len, rx = 0;
1236
1237 for (i = sc->rge_ldata.rge_rxq_considx; ; i = RGE_NEXT_RX_DESC(i)) {
1238 /* Invalidate the descriptor memory. */
1239 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1240 i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1241 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1242
1243 cur_rx = &sc->rge_ldata.rge_rx_list[i];
1244
1245 if (RGE_OWN(cur_rx))
1246 break;
1247
1248 rxstat = letoh32(cur_rx->hi_qword1.rx_qword4.rge_cmdsts);
1249 extsts = letoh32(cur_rx->hi_qword1.rx_qword4.rge_extsts);
1250
1251 total_len = RGE_RXBYTES(cur_rx);
1252 rxq = &sc->rge_ldata.rge_rxq[i];
1253 m = rxq->rxq_mbuf;
1254 rxq->rxq_mbuf = NULL;
1255 rx = 1;
1256
1257 /* Invalidate the RX mbuf and unload its map. */
1258 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1259 rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1260 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1261
1262 if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1263 (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1264 rge_discard_rxbuf(sc, i);
1265 continue;
1266 }
1267
1268 if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1269 if_statinc(ifp, if_ierrors);
1270 /*
1271 * If this is part of a multi-fragment packet,
1272 * discard all the pieces.
1273 */
1274 if (sc->rge_head != NULL) {
1275 m_freem(sc->rge_head);
1276 sc->rge_head = sc->rge_tail = NULL;
1277 }
1278 rge_discard_rxbuf(sc, i);
1279 continue;
1280 }
1281
1282 /*
1283 * If allocating a replacement mbuf fails,
1284 * reload the current one.
1285 */
1286
1287 if (rge_newbuf(sc, i) != 0) {
1288 if (sc->rge_head != NULL) {
1289 m_freem(sc->rge_head);
1290 sc->rge_head = sc->rge_tail = NULL;
1291 }
1292 rge_discard_rxbuf(sc, i);
1293 continue;
1294 }
1295
1296 m_set_rcvif(m, ifp);
1297 if (sc->rge_head != NULL) {
1298 m->m_len = total_len;
1299 /*
1300 * Special case: if there's 4 bytes or less
1301 * in this buffer, the mbuf can be discarded:
1302 * the last 4 bytes is the CRC, which we don't
1303 * care about anyway.
1304 */
1305 if (m->m_len <= ETHER_CRC_LEN) {
1306 sc->rge_tail->m_len -=
1307 (ETHER_CRC_LEN - m->m_len);
1308 m_freem(m);
1309 } else {
1310 m->m_len -= ETHER_CRC_LEN;
1311 m->m_flags &= ~M_PKTHDR;
1312 sc->rge_tail->m_next = m;
1313 }
1314 m = sc->rge_head;
1315 sc->rge_head = sc->rge_tail = NULL;
1316 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1317 } else
1318 #if 0
1319 m->m_pkthdr.len = m->m_len =
1320 (total_len - ETHER_CRC_LEN);
1321 #else
1322 {
1323 m->m_pkthdr.len = m->m_len = total_len;
1324 m->m_flags |= M_HASFCS;
1325 }
1326 #endif
1327
1328 #if notyet
1329 /* Check IP header checksum. */
1330 if (!(extsts & RGE_RDEXTSTS_IPCSUMERR) &&
1331 (extsts & RGE_RDEXTSTS_IPV4))
1332 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1333
1334 /* Check TCP/UDP checksum. */
1335 if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1336 (((extsts & RGE_RDEXTSTS_TCPPKT) &&
1337 !(extsts & RGE_RDEXTSTS_TCPCSUMERR)) ||
1338 ((extsts & RGE_RDEXTSTS_UDPPKT) &&
1339 !(extsts & RGE_RDEXTSTS_UDPCSUMERR))))
1340 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1341 M_UDP_CSUM_IN_OK;
1342 #endif
1343
1344 if (extsts & RGE_RDEXTSTS_VTAG) {
1345 vlan_set_tag(m,
1346 bswap16(extsts & RGE_RDEXTSTS_VLAN_MASK));
1347 }
1348
1349 if_percpuq_enqueue(ifp->if_percpuq, m);
1350 }
1351
1352 sc->rge_ldata.rge_rxq_considx = i;
1353
1354 return (rx);
1355 }
1356
1357 int
1358 rge_txeof(struct rge_softc *sc)
1359 {
1360 struct ifnet *ifp = &sc->sc_ec.ec_if;
1361 struct rge_txq *txq;
1362 uint32_t txstat;
1363 int cons, idx, prod;
1364 int free = 0;
1365
1366 prod = sc->rge_ldata.rge_txq_prodidx;
1367 cons = sc->rge_ldata.rge_txq_considx;
1368
1369 while (prod != cons) {
1370 txq = &sc->rge_ldata.rge_txq[cons];
1371 idx = txq->txq_descidx;
1372
1373 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1374 idx * sizeof(struct rge_tx_desc),
1375 sizeof(struct rge_tx_desc),
1376 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1377
1378 txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
1379
1380 if (txstat & RGE_TDCMDSTS_OWN) {
1381 free = 2;
1382 break;
1383 }
1384
1385 bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1386 txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1387 bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1388 m_freem(txq->txq_mbuf);
1389 txq->txq_mbuf = NULL;
1390
1391 if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1392 if_statinc(ifp, if_collisions);
1393 if (txstat & RGE_TDCMDSTS_TXERR)
1394 if_statinc(ifp, if_oerrors);
1395
1396 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1397 idx * sizeof(struct rge_tx_desc),
1398 sizeof(struct rge_tx_desc),
1399 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1400
1401 cons = RGE_NEXT_TX_DESC(idx);
1402 free = 1;
1403 }
1404
1405 if (free == 0)
1406 return (0);
1407
1408 sc->rge_ldata.rge_txq_considx = cons;
1409
1410 #if 0
1411 if (ifq_is_oactive(&ifp->if_snd))
1412 ifq_restart(&ifp->if_snd);
1413 else if (free == 2)
1414 ifq_serialize(&ifp->if_snd, &sc->sc_task);
1415 else
1416 ifp->if_timer = 0;
1417 #else
1418 #if 0
1419 if (!IF_IS_EMPTY(&ifp->if_snd))
1420 rge_start(ifp);
1421 else
1422 if (free == 2)
1423 if (0) { rge_txstart(&sc->sc_task, sc); }
1424 else
1425 #endif
1426 ifp->if_timer = 0;
1427 #endif
1428
1429 return (1);
1430 }
1431
1432 void
1433 rge_reset(struct rge_softc *sc)
1434 {
1435 int i;
1436
1437 /* Enable RXDV gate. */
1438 RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1439 DELAY(2000);
1440
1441 for (i = 0; i < 3000; i++) {
1442 DELAY(50);
1443 if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1444 RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1445 RGE_MCUCMD_TXFIFO_EMPTY))
1446 break;
1447 }
1448 if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
1449 for (i = 0; i < 3000; i++) {
1450 DELAY(50);
1451 if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
1452 break;
1453 }
1454 }
1455
1456 DELAY(2000);
1457
1458 /* Soft reset. */
1459 RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1460
1461 for (i = 0; i < RGE_TIMEOUT; i++) {
1462 DELAY(100);
1463 if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1464 break;
1465 }
1466 if (i == RGE_TIMEOUT)
1467 device_printf(sc->sc_dev, "reset never completed!\n");
1468 }
1469
1470 void
1471 rge_iff(struct rge_softc *sc)
1472 {
1473 struct ifnet *ifp = &sc->sc_ec.ec_if;
1474 struct ethercom *ec = &sc->sc_ec;
1475 struct ether_multi *enm;
1476 struct ether_multistep step;
1477 uint32_t hashes[2];
1478 uint32_t rxfilt;
1479 int h = 0;
1480
1481 rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1482 rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1483 ifp->if_flags &= ~IFF_ALLMULTI;
1484
1485 /*
1486 * Always accept frames destined to our station address.
1487 * Always accept broadcast frames.
1488 */
1489 rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1490
1491 if (ifp->if_flags & IFF_PROMISC) {
1492 allmulti:
1493 ifp->if_flags |= IFF_ALLMULTI;
1494 rxfilt |= RGE_RXCFG_MULTI;
1495 if (ifp->if_flags & IFF_PROMISC)
1496 rxfilt |= RGE_RXCFG_ALLPHYS;
1497 hashes[0] = hashes[1] = 0xffffffff;
1498 } else {
1499 rxfilt |= RGE_RXCFG_MULTI;
1500 /* Program new filter. */
1501 memset(hashes, 0, sizeof(hashes));
1502
1503 ETHER_LOCK(ec);
1504 ETHER_FIRST_MULTI(step, ec, enm);
1505 while (enm != NULL) {
1506 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1507 ETHER_ADDR_LEN) != 0) {
1508 ETHER_UNLOCK(ec);
1509 goto allmulti;
1510 }
1511 h = ether_crc32_be(enm->enm_addrlo,
1512 ETHER_ADDR_LEN) >> 26;
1513
1514 if (h < 32)
1515 hashes[0] |= (1U << h);
1516 else
1517 hashes[1] |= (1U << (h - 32));
1518
1519 ETHER_NEXT_MULTI(step, enm);
1520 }
1521 ETHER_UNLOCK(ec);
1522 }
1523
1524 RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1525 RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
1526 RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
1527 }
1528
1529 void
1530 rge_set_phy_power(struct rge_softc *sc, int on)
1531 {
1532 int i;
1533
1534 if (on) {
1535 RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1536
1537 rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1538
1539 for (i = 0; i < RGE_TIMEOUT; i++) {
1540 if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
1541 break;
1542 DELAY(1000);
1543 }
1544 } else {
1545 rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1546 RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
1547 RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
1548 }
1549 }
1550
1551 void
1552 rge_phy_config(struct rge_softc *sc)
1553 {
1554 /* Read microcode version. */
1555 rge_write_phy_ocp(sc, 0xa436, 0x801e);
1556 sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
1557
1558 switch (sc->rge_type) {
1559 case MAC_CFG2:
1560 rge_phy_config_mac_cfg2(sc);
1561 break;
1562 case MAC_CFG3:
1563 rge_phy_config_mac_cfg3(sc);
1564 break;
1565 case MAC_CFG4:
1566 rge_phy_config_mac_cfg4(sc);
1567 break;
1568 case MAC_CFG5:
1569 rge_phy_config_mac_cfg5(sc);
1570 break;
1571 default:
1572 break; /* Can't happen. */
1573 }
1574
1575 rge_write_phy(sc, 0x0a5b, 0x12,
1576 rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
1577
1578 /* Disable EEE. */
1579 RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1580 if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1581 RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1582 RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1583 }
1584 RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1585 RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1586 RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1587 RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1588 RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1589
1590 rge_patch_phy_mcu(sc, 1);
1591 RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1592 RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1593 RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1594 rge_patch_phy_mcu(sc, 0);
1595 }
1596
1597 void
1598 rge_phy_config_mac_cfg2(struct rge_softc *sc)
1599 {
1600 uint16_t val;
1601 int i;
1602
1603 for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
1604 rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
1605 rtl8125_mac_cfg2_ephy[i].val);
1606
1607 rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
1608
1609 val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
1610 rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
1611 RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1612 val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1613 rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
1614 val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
1615 rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1616 RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
1617 val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
1618 rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
1619 RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
1620 RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
1621 RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
1622
1623 rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1624 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1625 rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
1626 rge_write_phy_ocp(sc, 0xa436, 0x80eb);
1627 val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
1628 rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
1629 rge_write_phy_ocp(sc, 0xa436, 0x80f8);
1630 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1631 rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
1632 rge_write_phy_ocp(sc, 0xa436, 0x80f1);
1633 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1634 rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
1635 rge_write_phy_ocp(sc, 0xa436, 0x80fe);
1636 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1637 rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
1638 rge_write_phy_ocp(sc, 0xa436, 0x8102);
1639 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1640 rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1641 rge_write_phy_ocp(sc, 0xa436, 0x8105);
1642 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1643 rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
1644 rge_write_phy_ocp(sc, 0xa436, 0x8100);
1645 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1646 rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
1647 rge_write_phy_ocp(sc, 0xa436, 0x8104);
1648 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1649 rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
1650 rge_write_phy_ocp(sc, 0xa436, 0x8106);
1651 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1652 rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
1653 rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1654 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1655 rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
1656 rge_write_phy_ocp(sc, 0xa436, 0x80df);
1657 RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1658 rge_write_phy_ocp(sc, 0xa436, 0x80e1);
1659 RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
1660 val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
1661 rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
1662 rge_write_phy_ocp(sc, 0xa436, 0x819f);
1663 rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
1664 rge_write_phy_ocp(sc, 0xbc34, 0x5555);
1665 val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
1666 rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
1667 RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
1668 RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1669 }
1670
1671 void
1672 rge_phy_config_mac_cfg3(struct rge_softc *sc)
1673 {
1674 struct ifnet *ifp = &sc->sc_ec.ec_if;
1675 uint16_t val;
1676 int i;
1677 static const uint16_t mac_cfg3_a438_value[] =
1678 { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1679 0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1680
1681 static const uint16_t mac_cfg3_b88e_value[] =
1682 { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1683 0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1684 0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1685 0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1686 0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1687 0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1688
1689 for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1690 rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1691 rtl8125_mac_cfg3_ephy[i].val);
1692
1693 val = rge_read_ephy(sc, 0x002a) & ~0x7000;
1694 rge_write_ephy(sc, 0x002a, val | 0x3000);
1695 RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
1696 RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
1697 RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
1698 rge_write_ephy(sc, 0x0002, 0x6042);
1699 rge_write_ephy(sc, 0x0006, 0x0014);
1700 val = rge_read_ephy(sc, 0x006a) & ~0x7000;
1701 rge_write_ephy(sc, 0x006a, val | 0x3000);
1702 RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
1703 RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
1704 RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
1705 rge_write_ephy(sc, 0x0042, 0x6042);
1706 rge_write_ephy(sc, 0x0046, 0x0014);
1707
1708 rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
1709
1710 RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1711 val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1712 rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1713 val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1714 rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1715 RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1716 RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1717 val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1718 rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1719 val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1720 rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1721 val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1722 rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1723 RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1724 RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1725 val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1726 rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1727 rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1728 rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1729 val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1730 rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1731 rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1732 val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1733 rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
1734 rge_write_phy_ocp(sc, 0xb87c, 0x8159);
1735 val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1736 rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
1737 RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1738 32);
1739 rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1740 rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1741 rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1742 rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1743
1744 rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1745 for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1746 rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1747 for (i = 0; i < 26; i++)
1748 rge_write_phy_ocp(sc, 0xa438, 0);
1749 rge_write_phy_ocp(sc, 0xa436, 0x8257);
1750 rge_write_phy_ocp(sc, 0xa438, 0x020f);
1751 rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1752 rge_write_phy_ocp(sc, 0xa438, 0x7843);
1753
1754 rge_patch_phy_mcu(sc, 1);
1755 RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1756 RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1757 for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1758 rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1759 rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
1760 }
1761 RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1762 rge_patch_phy_mcu(sc, 0);
1763
1764 RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1765 rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1766 RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1767 val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1768 rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1769 RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1770 RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1771 RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1772 RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1773 RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1774 }
1775
1776 void
1777 rge_phy_config_mac_cfg4(struct rge_softc *sc)
1778 {
1779 struct ifnet *ifp = &sc->sc_ec.ec_if;
1780 uint16_t val;
1781 int i;
1782 static const uint16_t mac_cfg4_b87c_value[] =
1783 { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
1784 0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
1785 0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
1786 0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
1787 0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
1788 0x80b0, 0x0f31 };
1789
1790 for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
1791 rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
1792 rtl8125_mac_cfg4_ephy[i].val);
1793
1794 rge_write_phy_ocp(sc, 0xbf86, 0x9000);
1795 RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
1796 RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
1797 rge_write_phy_ocp(sc, 0xbd86, 0x1010);
1798 rge_write_phy_ocp(sc, 0xbd88, 0x1010);
1799 val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
1800 rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
1801 val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1802 rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
1803
1804 rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
1805
1806 RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1807 RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
1808 rge_write_phy_ocp(sc, 0xa436, 0x8fff);
1809 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1810 rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
1811 for (i = 0; i < 6; i++) {
1812 rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
1813 if (i < 3)
1814 rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
1815 else
1816 rge_write_phy_ocp(sc, 0xb87e, 0x147d);
1817 }
1818 rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
1819 rge_write_phy_ocp(sc, 0xb87e, 0x0907);
1820 val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
1821 rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
1822 val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
1823 rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
1824 rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
1825 rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1826 rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
1827 rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1828 rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
1829 rge_write_phy_ocp(sc, 0xb87e, 0x6077);
1830 rge_write_phy_ocp(sc, 0xb506, 0x01e7);
1831 rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
1832 rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
1833 rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
1834 rge_write_phy_ocp(sc, 0xac3c, 0x9240);
1835 rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
1836 rge_write_phy_ocp(sc, 0xacc6, 0x0707);
1837 rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
1838 rge_write_phy_ocp(sc, 0xad08, 0x0007);
1839 for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
1840 rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
1841 rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
1842 }
1843 RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
1844 RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
1845 rge_write_phy_ocp(sc, 0xb87c, 0x8141);
1846 rge_write_phy_ocp(sc, 0xb87e, 0x320e);
1847 rge_write_phy_ocp(sc, 0xb87c, 0x8153);
1848 rge_write_phy_ocp(sc, 0xb87e, 0x720e);
1849 RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
1850 rge_write_phy_ocp(sc, 0xb87c, 0x8529);
1851 rge_write_phy_ocp(sc, 0xb87e, 0x050e);
1852 RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1853 32);
1854 rge_write_phy_ocp(sc, 0xa436, 0x816c);
1855 rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1856 rge_write_phy_ocp(sc, 0xa436, 0x8170);
1857 rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1858 rge_write_phy_ocp(sc, 0xa436, 0x8174);
1859 rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1860 rge_write_phy_ocp(sc, 0xa436, 0x8178);
1861 rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1862 rge_write_phy_ocp(sc, 0xa436, 0x817c);
1863 rge_write_phy_ocp(sc, 0xa438, 0x0719);
1864 rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
1865 rge_write_phy_ocp(sc, 0xa438, 0x0400);
1866 rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
1867 rge_write_phy_ocp(sc, 0xa438, 0x0404);
1868 rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
1869 for (i = 0; i < 6; i++) {
1870 rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
1871 if (i == 2)
1872 rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
1873 else
1874 rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
1875 }
1876 rge_write_phy_ocp(sc, 0xb87c, 0x8145);
1877 rge_write_phy_ocp(sc, 0xb87e, 0x370e);
1878 rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1879 rge_write_phy_ocp(sc, 0xb87e, 0x770e);
1880 rge_write_phy_ocp(sc, 0xb87c, 0x8169);
1881 rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
1882 rge_write_phy_ocp(sc, 0xb87c, 0x817b);
1883 rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
1884 rge_write_phy_ocp(sc, 0xa436, 0x8217);
1885 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1886 rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1887 rge_write_phy_ocp(sc, 0xa436, 0x821a);
1888 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1889 rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1890 rge_write_phy_ocp(sc, 0xa436, 0x80da);
1891 rge_write_phy_ocp(sc, 0xa438, 0x0403);
1892 rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1893 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1894 rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1895 rge_write_phy_ocp(sc, 0xa436, 0x80b3);
1896 rge_write_phy_ocp(sc, 0xa438, 0x0384);
1897 rge_write_phy_ocp(sc, 0xa436, 0x80b7);
1898 rge_write_phy_ocp(sc, 0xa438, 0x2007);
1899 rge_write_phy_ocp(sc, 0xa436, 0x80ba);
1900 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1901 rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1902 rge_write_phy_ocp(sc, 0xa436, 0x80b5);
1903 rge_write_phy_ocp(sc, 0xa438, 0xf009);
1904 rge_write_phy_ocp(sc, 0xa436, 0x80bd);
1905 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1906 rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
1907 rge_write_phy_ocp(sc, 0xa436, 0x80c7);
1908 rge_write_phy_ocp(sc, 0xa438, 0xf083);
1909 rge_write_phy_ocp(sc, 0xa436, 0x80dd);
1910 rge_write_phy_ocp(sc, 0xa438, 0x03f0);
1911 rge_write_phy_ocp(sc, 0xa436, 0x80df);
1912 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1913 rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1914 rge_write_phy_ocp(sc, 0xa436, 0x80cb);
1915 rge_write_phy_ocp(sc, 0xa438, 0x2007);
1916 rge_write_phy_ocp(sc, 0xa436, 0x80ce);
1917 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1918 rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1919 rge_write_phy_ocp(sc, 0xa436, 0x80c9);
1920 rge_write_phy_ocp(sc, 0xa438, 0x8009);
1921 rge_write_phy_ocp(sc, 0xa436, 0x80d1);
1922 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1923 rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
1924 rge_write_phy_ocp(sc, 0xa436, 0x80a3);
1925 rge_write_phy_ocp(sc, 0xa438, 0x200a);
1926 rge_write_phy_ocp(sc, 0xa436, 0x80a5);
1927 rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
1928 rge_write_phy_ocp(sc, 0xa436, 0x809f);
1929 rge_write_phy_ocp(sc, 0xa438, 0x6073);
1930 rge_write_phy_ocp(sc, 0xa436, 0x80a1);
1931 rge_write_phy_ocp(sc, 0xa438, 0x000b);
1932 rge_write_phy_ocp(sc, 0xa436, 0x80a9);
1933 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1934 rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
1935 rge_patch_phy_mcu(sc, 1);
1936 RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1937 RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1938 rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
1939 rge_write_phy_ocp(sc, 0xb890, 0x0000);
1940 rge_write_phy_ocp(sc, 0xb88e, 0xc240);
1941 rge_write_phy_ocp(sc, 0xb890, 0x0103);
1942 rge_write_phy_ocp(sc, 0xb88e, 0xc242);
1943 rge_write_phy_ocp(sc, 0xb890, 0x0507);
1944 rge_write_phy_ocp(sc, 0xb88e, 0xc244);
1945 rge_write_phy_ocp(sc, 0xb890, 0x090b);
1946 rge_write_phy_ocp(sc, 0xb88e, 0xc246);
1947 rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
1948 rge_write_phy_ocp(sc, 0xb88e, 0xc248);
1949 rge_write_phy_ocp(sc, 0xb890, 0x1012);
1950 rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
1951 rge_write_phy_ocp(sc, 0xb890, 0x1416);
1952 RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1953 rge_patch_phy_mcu(sc, 0);
1954 RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
1955 RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
1956 rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
1957 rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
1958 rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
1959 rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
1960 rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
1961 RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
1962 rge_write_phy_ocp(sc, 0xa436, 0x817d);
1963 RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
1964 }
1965
1966 void
1967 rge_phy_config_mac_cfg5(struct rge_softc *sc)
1968 {
1969 struct ifnet *ifp = &sc->sc_ec.ec_if;
1970 uint16_t val;
1971 int i;
1972
1973 for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
1974 rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
1975 rtl8125_mac_cfg5_ephy[i].val);
1976
1977 val = rge_read_ephy(sc, 0x0022) & ~0x0030;
1978 rge_write_ephy(sc, 0x0022, val | 0x0020);
1979 val = rge_read_ephy(sc, 0x0062) & ~0x0030;
1980 rge_write_ephy(sc, 0x0062, val | 0x0020);
1981
1982 rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
1983
1984 RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1985 val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
1986 rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
1987 val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
1988 rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
1989 RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1990 32);
1991 rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
1992 rge_write_phy_ocp(sc, 0xb87e, 0x760e);
1993 rge_write_phy_ocp(sc, 0xb87c, 0x8107);
1994 rge_write_phy_ocp(sc, 0xb87e, 0x360e);
1995 rge_write_phy_ocp(sc, 0xb87c, 0x8551);
1996 val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1997 rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
1998 val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
1999 rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
2000 val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
2001 rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
2002 for (i = 0; i < 10; i++) {
2003 rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
2004 rge_write_phy_ocp(sc, 0xa438, 0x2417);
2005 }
2006 RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
2007 val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
2008 rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
2009 }
2010
2011 void
2012 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
2013 {
2014 if (sc->rge_mcodever != mcode_version) {
2015 int i;
2016
2017 rge_patch_phy_mcu(sc, 1);
2018
2019 if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
2020 rge_write_phy_ocp(sc, 0xa436, 0x8024);
2021 if (sc->rge_type == MAC_CFG2)
2022 rge_write_phy_ocp(sc, 0xa438, 0x8600);
2023 else
2024 rge_write_phy_ocp(sc, 0xa438, 0x8601);
2025 rge_write_phy_ocp(sc, 0xa436, 0xb82e);
2026 rge_write_phy_ocp(sc, 0xa438, 0x0001);
2027
2028 RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
2029 }
2030
2031 if (sc->rge_type == MAC_CFG2) {
2032 for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
2033 rge_write_phy_ocp(sc,
2034 rtl8125_mac_cfg2_mcu[i].reg,
2035 rtl8125_mac_cfg2_mcu[i].val);
2036 }
2037 } else if (sc->rge_type == MAC_CFG3) {
2038 for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
2039 rge_write_phy_ocp(sc,
2040 rtl8125_mac_cfg3_mcu[i].reg,
2041 rtl8125_mac_cfg3_mcu[i].val);
2042 }
2043 } else if (sc->rge_type == MAC_CFG4) {
2044 for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
2045 rge_write_phy_ocp(sc,
2046 rtl8125_mac_cfg4_mcu[i].reg,
2047 rtl8125_mac_cfg4_mcu[i].val);
2048 }
2049 } else if (sc->rge_type == MAC_CFG5) {
2050 for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
2051 rge_write_phy_ocp(sc,
2052 rtl8125_mac_cfg5_mcu[i].reg,
2053 rtl8125_mac_cfg5_mcu[i].val);
2054 }
2055 }
2056
2057 if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
2058 RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
2059
2060 rge_write_phy_ocp(sc, 0xa436, 0);
2061 rge_write_phy_ocp(sc, 0xa438, 0);
2062 RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
2063 rge_write_phy_ocp(sc, 0xa436, 0x8024);
2064 rge_write_phy_ocp(sc, 0xa438, 0);
2065 }
2066
2067 rge_patch_phy_mcu(sc, 0);
2068
2069 /* Write microcode version. */
2070 rge_write_phy_ocp(sc, 0xa436, 0x801e);
2071 rge_write_phy_ocp(sc, 0xa438, mcode_version);
2072 }
2073 }
2074
2075 void
2076 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
2077 {
2078 RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2079 RGE_WRITE_4(sc, RGE_MAC0,
2080 (uint32_t)addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2081 RGE_WRITE_4(sc, RGE_MAC4,
2082 addr[5] << 8 | addr[4]);
2083 RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2084 }
2085
2086 void
2087 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
2088 {
2089 int i;
2090
2091 for (i = 0; i < ETHER_ADDR_LEN; i++)
2092 addr[i] = RGE_READ_1(sc, RGE_ADDR0 + i);
2093 }
2094
2095 void
2096 rge_hw_init(struct rge_softc *sc)
2097 {
2098 int i;
2099
2100 RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2101 RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
2102 RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
2103 RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2104 RGE_CLRBIT_1(sc, 0xf1, 0x80);
2105
2106 /* Disable UPS. */
2107 RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
2108
2109 /* Configure MAC MCU. */
2110 rge_write_mac_ocp(sc, 0xfc38, 0);
2111
2112 for (i = 0xfc28; i < 0xfc38; i += 2)
2113 rge_write_mac_ocp(sc, i, 0);
2114
2115 DELAY(3000);
2116 rge_write_mac_ocp(sc, 0xfc26, 0);
2117
2118 if (sc->rge_type == MAC_CFG3) {
2119 for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
2120 rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
2121 rtl8125_mac_bps[i].val);
2122 }
2123 } else if (sc->rge_type == MAC_CFG5) {
2124 for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
2125 rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
2126 rtl8125b_mac_bps[i].val);
2127 }
2128 }
2129
2130 /* Disable PHY power saving. */
2131 rge_disable_phy_ocp_pwrsave(sc);
2132
2133 /* Set PCIe uncorrectable error status. */
2134 rge_write_csi(sc, 0x108,
2135 rge_read_csi(sc, 0x108) | 0x00100000);
2136 }
2137
2138 void
2139 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
2140 {
2141 if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
2142 rge_patch_phy_mcu(sc, 1);
2143 rge_write_phy_ocp(sc, 0xc416, 0);
2144 rge_write_phy_ocp(sc, 0xc416, 0x0500);
2145 rge_patch_phy_mcu(sc, 0);
2146 }
2147 }
2148
2149 void
2150 rge_patch_phy_mcu(struct rge_softc *sc, int set)
2151 {
2152 int i;
2153
2154 if (set)
2155 RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
2156 else
2157 RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
2158
2159 for (i = 0; i < 1000; i++) {
2160 if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
2161 break;
2162 DELAY(100);
2163 }
2164 if (i == 1000) {
2165 DPRINTF(("timeout waiting to patch phy mcu\n"));
2166 return;
2167 }
2168 }
2169
2170 void
2171 rge_add_media_types(struct rge_softc *sc)
2172 {
2173 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
2174 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
2175 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
2176 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
2177 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
2178 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2179 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
2180 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
2181 }
2182
2183 void
2184 rge_config_imtype(struct rge_softc *sc, int imtype)
2185 {
2186 switch (imtype) {
2187 case RGE_IMTYPE_NONE:
2188 sc->rge_intrs = RGE_INTRS;
2189 sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
2190 RGE_ISR_RX_FIFO_OFLOW;
2191 sc->rge_tx_ack = RGE_ISR_TX_OK;
2192 break;
2193 case RGE_IMTYPE_SIM:
2194 sc->rge_intrs = RGE_INTRS_TIMER;
2195 sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
2196 sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
2197 break;
2198 default:
2199 panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
2200 }
2201 }
2202
2203 void
2204 rge_disable_hw_im(struct rge_softc *sc)
2205 {
2206 RGE_WRITE_2(sc, RGE_IM, 0);
2207 }
2208
2209 void
2210 rge_disable_sim_im(struct rge_softc *sc)
2211 {
2212 RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
2213 sc->rge_timerintr = 0;
2214 }
2215
2216 void
2217 rge_setup_sim_im(struct rge_softc *sc)
2218 {
2219 RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
2220 RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
2221 sc->rge_timerintr = 1;
2222 }
2223
2224 void
2225 rge_setup_intr(struct rge_softc *sc, int imtype)
2226 {
2227 rge_config_imtype(sc, imtype);
2228
2229 /* Enable interrupts. */
2230 RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
2231
2232 switch (imtype) {
2233 case RGE_IMTYPE_NONE:
2234 rge_disable_sim_im(sc);
2235 rge_disable_hw_im(sc);
2236 break;
2237 case RGE_IMTYPE_SIM:
2238 rge_disable_hw_im(sc);
2239 rge_setup_sim_im(sc);
2240 break;
2241 default:
2242 panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
2243 }
2244 }
2245
2246 void
2247 rge_exit_oob(struct rge_softc *sc)
2248 {
2249 int i;
2250
2251 RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
2252 RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
2253 RGE_RXCFG_ERRPKT);
2254
2255 /* Disable RealWoW. */
2256 rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
2257
2258 rge_reset(sc);
2259
2260 /* Disable OOB. */
2261 RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
2262
2263 RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
2264
2265 for (i = 0; i < 10; i++) {
2266 DELAY(100);
2267 if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2268 break;
2269 }
2270
2271 rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
2272 rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
2273 rge_write_mac_ocp(sc, 0xc01e, 0x5555);
2274
2275 for (i = 0; i < 10; i++) {
2276 DELAY(100);
2277 if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2278 break;
2279 }
2280
2281 if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
2282 printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
2283 device_xname(sc->sc_dev));
2284 for (i = 0; i < RGE_TIMEOUT; i++) {
2285 if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
2286 break;
2287 DELAY(1000);
2288 }
2289 RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
2290 if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
2291 RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
2292 RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
2293 }
2294 }
2295
2296 void
2297 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
2298 {
2299 int i;
2300
2301 RGE_WRITE_4(sc, RGE_CSIDR, val);
2302 RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2303 (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
2304
2305 for (i = 0; i < 10; i++) {
2306 DELAY(100);
2307 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
2308 break;
2309 }
2310
2311 DELAY(20);
2312 }
2313
2314 uint32_t
2315 rge_read_csi(struct rge_softc *sc, uint32_t reg)
2316 {
2317 int i;
2318
2319 RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2320 (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
2321
2322 for (i = 0; i < 10; i++) {
2323 DELAY(100);
2324 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
2325 break;
2326 }
2327
2328 DELAY(20);
2329
2330 return (RGE_READ_4(sc, RGE_CSIDR));
2331 }
2332
2333 void
2334 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2335 {
2336 uint32_t tmp;
2337
2338 tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2339 tmp += val;
2340 tmp |= RGE_MACOCP_BUSY;
2341 RGE_WRITE_4(sc, RGE_MACOCP, tmp);
2342 }
2343
2344 uint16_t
2345 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
2346 {
2347 uint32_t val;
2348
2349 val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2350 RGE_WRITE_4(sc, RGE_MACOCP, val);
2351
2352 return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
2353 }
2354
2355 void
2356 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
2357 {
2358 uint32_t tmp;
2359 int i;
2360
2361 tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2362 tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
2363 RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
2364
2365 for (i = 0; i < 10; i++) {
2366 DELAY(100);
2367 if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
2368 break;
2369 }
2370
2371 DELAY(20);
2372 }
2373
2374 uint16_t
2375 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
2376 {
2377 uint32_t val;
2378 int i;
2379
2380 val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2381 RGE_WRITE_4(sc, RGE_EPHYAR, val);
2382
2383 for (i = 0; i < 10; i++) {
2384 DELAY(100);
2385 val = RGE_READ_4(sc, RGE_EPHYAR);
2386 if (val & RGE_EPHYAR_BUSY)
2387 break;
2388 }
2389
2390 DELAY(20);
2391
2392 return (val & RGE_EPHYAR_DATA_MASK);
2393 }
2394
2395 void
2396 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
2397 {
2398 uint16_t off, phyaddr;
2399
2400 phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2401 phyaddr <<= 4;
2402
2403 off = addr ? reg : 0x10 + (reg % 8);
2404
2405 phyaddr += (off - 16) << 1;
2406
2407 rge_write_phy_ocp(sc, phyaddr, val);
2408 }
2409
2410 uint16_t
2411 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
2412 {
2413 uint16_t off, phyaddr;
2414
2415 phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2416 phyaddr <<= 4;
2417
2418 off = addr ? reg : 0x10 + (reg % 8);
2419
2420 phyaddr += (off - 16) << 1;
2421
2422 return (rge_read_phy_ocp(sc, phyaddr));
2423 }
2424
2425 void
2426 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2427 {
2428 uint32_t tmp;
2429 int i;
2430
2431 tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2432 tmp |= RGE_PHYOCP_BUSY | val;
2433 RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
2434
2435 for (i = 0; i < RGE_TIMEOUT; i++) {
2436 DELAY(1);
2437 if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
2438 break;
2439 }
2440 }
2441
2442 uint16_t
2443 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
2444 {
2445 uint32_t val;
2446 int i;
2447
2448 val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2449 RGE_WRITE_4(sc, RGE_PHYOCP, val);
2450
2451 for (i = 0; i < RGE_TIMEOUT; i++) {
2452 DELAY(1);
2453 val = RGE_READ_4(sc, RGE_PHYOCP);
2454 if (val & RGE_PHYOCP_BUSY)
2455 break;
2456 }
2457
2458 return (val & RGE_PHYOCP_DATA_MASK);
2459 }
2460
2461 int
2462 rge_get_link_status(struct rge_softc *sc)
2463 {
2464 return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2465 }
2466
2467 void
2468 rge_txstart(struct work *wk, void *arg)
2469 {
2470 struct rge_softc *sc = arg;
2471
2472 RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2473 }
2474
2475 void
2476 rge_tick(void *arg)
2477 {
2478 struct rge_softc *sc = arg;
2479 int s;
2480
2481 s = splnet();
2482 rge_link_state(sc);
2483 splx(s);
2484
2485 callout_schedule(&sc->sc_timeout, hz);
2486 }
2487
2488 void
2489 rge_link_state(struct rge_softc *sc)
2490 {
2491 struct ifnet *ifp = &sc->sc_ec.ec_if;
2492 int link = LINK_STATE_DOWN;
2493
2494 if (rge_get_link_status(sc))
2495 link = LINK_STATE_UP;
2496
2497 if (ifp->if_link_state != link) { /* XXX not safe to access */
2498 if_link_state_change(ifp, link);
2499 }
2500 }
2501