if_rge.c revision 1.24.4.2 1 /* $NetBSD: if_rge.c,v 1.24.4.2 2023/10/14 06:59:43 martin Exp $ */
2 /* $OpenBSD: if_rge.c,v 1.9 2020/12/12 11:48:53 jan Exp $ */
3
4 /*
5 * Copyright (c) 2019, 2020 Kevin Lo <kevlo (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.24.4.2 2023/10/14 06:59:43 martin Exp $");
22
23 #include <sys/types.h>
24
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/sockio.h>
28 #include <sys/mbuf.h>
29 #include <sys/kernel.h>
30 #include <sys/socket.h>
31 #include <sys/device.h>
32 #include <sys/endian.h>
33 #include <sys/callout.h>
34 #include <sys/workqueue.h>
35
36 #include <net/if.h>
37
38 #include <net/if_dl.h>
39 #include <net/if_ether.h>
40
41 #include <net/if_media.h>
42
43 #include <netinet/in.h>
44 #include <net/if_ether.h>
45
46 #include <net/bpf.h>
47
48 #include <sys/bus.h>
49 #include <machine/intr.h>
50
51 #include <dev/mii/mii.h>
52
53 #include <dev/pci/pcivar.h>
54 #include <dev/pci/pcireg.h>
55 #include <dev/pci/pcidevs.h>
56
57 #include <dev/pci/if_rgereg.h>
58
59 #ifdef __NetBSD__
60 #define letoh32 htole32
61 #define nitems(x) __arraycount(x)
62
63 static struct mbuf *
64 MCLGETL(struct rge_softc *sc __unused, int how,
65 u_int size)
66 {
67 struct mbuf *m;
68
69 MGETHDR(m, how, MT_DATA);
70 if (m == NULL)
71 return NULL;
72
73 MEXTMALLOC(m, size, how);
74 if ((m->m_flags & M_EXT) == 0) {
75 m_freem(m);
76 return NULL;
77 }
78 return m;
79 }
80
81 #ifdef NET_MPSAFE
82 #define RGE_MPSAFE 1
83 #define CALLOUT_FLAGS CALLOUT_MPSAFE
84 #else
85 #define CALLOUT_FLAGS 0
86 #endif
87 #endif
88
89 #ifdef RGE_DEBUG
90 #define DPRINTF(x) do { if (rge_debug > 0) printf x; } while (0)
91 int rge_debug = 0;
92 #else
93 #define DPRINTF(x)
94 #endif
95
96 static int rge_match(device_t, cfdata_t, void *);
97 static void rge_attach(device_t, device_t, void *);
98 int rge_intr(void *);
99 int rge_encap(struct rge_softc *, struct mbuf *, int);
100 int rge_ioctl(struct ifnet *, u_long, void *);
101 void rge_start(struct ifnet *);
102 void rge_watchdog(struct ifnet *);
103 int rge_init(struct ifnet *);
104 void rge_stop(struct ifnet *, int);
105 int rge_ifmedia_upd(struct ifnet *);
106 void rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
107 int rge_allocmem(struct rge_softc *);
108 int rge_newbuf(struct rge_softc *, int);
109 void rge_discard_rxbuf(struct rge_softc *, int);
110 int rge_rx_list_init(struct rge_softc *);
111 void rge_tx_list_init(struct rge_softc *);
112 int rge_rxeof(struct rge_softc *);
113 int rge_txeof(struct rge_softc *);
114 void rge_reset(struct rge_softc *);
115 void rge_iff(struct rge_softc *);
116 void rge_set_phy_power(struct rge_softc *, int);
117 void rge_phy_config(struct rge_softc *);
118 void rge_phy_config_mac_cfg2(struct rge_softc *);
119 void rge_phy_config_mac_cfg3(struct rge_softc *);
120 void rge_phy_config_mac_cfg4(struct rge_softc *);
121 void rge_phy_config_mac_cfg5(struct rge_softc *);
122 void rge_phy_config_mcu(struct rge_softc *, uint16_t);
123 void rge_set_macaddr(struct rge_softc *, const uint8_t *);
124 void rge_get_macaddr(struct rge_softc *, uint8_t *);
125 void rge_hw_init(struct rge_softc *);
126 void rge_disable_phy_ocp_pwrsave(struct rge_softc *);
127 void rge_patch_phy_mcu(struct rge_softc *, int);
128 void rge_add_media_types(struct rge_softc *);
129 void rge_config_imtype(struct rge_softc *, int);
130 void rge_disable_hw_im(struct rge_softc *);
131 void rge_disable_sim_im(struct rge_softc *);
132 void rge_setup_sim_im(struct rge_softc *);
133 void rge_setup_intr(struct rge_softc *, int);
134 void rge_exit_oob(struct rge_softc *);
135 void rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
136 uint32_t rge_read_csi(struct rge_softc *, uint32_t);
137 void rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
138 uint16_t rge_read_mac_ocp(struct rge_softc *, uint16_t);
139 void rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
140 uint16_t rge_read_ephy(struct rge_softc *, uint16_t);
141 void rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
142 uint16_t rge_read_phy(struct rge_softc *, uint16_t, uint16_t);
143 void rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
144 uint16_t rge_read_phy_ocp(struct rge_softc *, uint16_t);
145 int rge_get_link_status(struct rge_softc *);
146 void rge_txstart(struct work *, void *);
147 void rge_tick(void *);
148 void rge_link_state(struct rge_softc *);
149
150 static const struct {
151 uint16_t reg;
152 uint16_t val;
153 } rtl8125_mac_cfg2_mcu[] = {
154 RTL8125_MAC_CFG2_MCU
155 }, rtl8125_mac_cfg3_mcu[] = {
156 RTL8125_MAC_CFG3_MCU
157 }, rtl8125_mac_cfg4_mcu[] = {
158 RTL8125_MAC_CFG4_MCU
159 }, rtl8125_mac_cfg5_mcu[] = {
160 RTL8125_MAC_CFG5_MCU
161 };
162
163 CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
164 NULL, NULL); /* Sevan - detach function? */
165
166 static const struct device_compatible_entry compat_data[] = {
167 { .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000) },
168 { .id = PCI_ID_CODE(PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125) },
169
170 PCI_COMPAT_EOL
171 };
172
173 static int
174 rge_match(device_t parent, cfdata_t match, void *aux)
175 {
176 struct pci_attach_args *pa =aux;
177
178 return pci_compatible_match(pa, compat_data);
179 }
180
181 void
182 rge_attach(device_t parent, device_t self, void *aux)
183 {
184 struct rge_softc *sc = device_private(self);
185 struct pci_attach_args *pa = aux;
186 pci_chipset_tag_t pc = pa->pa_pc;
187 pci_intr_handle_t *ihp;
188 char intrbuf[PCI_INTRSTR_LEN];
189 const char *intrstr = NULL;
190 struct ifnet *ifp;
191 pcireg_t reg;
192 uint32_t hwrev;
193 uint8_t eaddr[ETHER_ADDR_LEN];
194 int offset;
195 pcireg_t command;
196
197 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
198
199 sc->sc_dev = self;
200
201 pci_aprint_devinfo(pa, "Ethernet controller");
202
203 /*
204 * Map control/status registers.
205 */
206 if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
207 PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
208 NULL, &sc->rge_bsize)) {
209 if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
210 PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
211 &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
212 if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
213 0, &sc->rge_btag, &sc->rge_bhandle, NULL,
214 &sc->rge_bsize)) {
215 aprint_error(": can't map mem or i/o space\n");
216 return;
217 }
218 }
219 }
220
221 int counts[PCI_INTR_TYPE_SIZE] = {
222 [PCI_INTR_TYPE_INTX] = 1,
223 [PCI_INTR_TYPE_MSI] = 1,
224 [PCI_INTR_TYPE_MSIX] = 1,
225 };
226 int max_type = PCI_INTR_TYPE_MSIX;
227 /*
228 * Allocate interrupt.
229 */
230 if (pci_intr_alloc(pa, &ihp, counts, max_type) != 0) {
231 aprint_error(": couldn't map interrupt\n");
232 return;
233 }
234 switch (pci_intr_type(pc, ihp[0])) {
235 case PCI_INTR_TYPE_MSIX:
236 case PCI_INTR_TYPE_MSI:
237 sc->rge_flags |= RGE_FLAG_MSI;
238 break;
239 default:
240 break;
241 }
242 intrstr = pci_intr_string(pc, ihp[0], intrbuf, sizeof(intrbuf));
243 sc->sc_ih = pci_intr_establish_xname(pc, ihp[0], IPL_NET, rge_intr,
244 sc, device_xname(sc->sc_dev));
245 if (sc->sc_ih == NULL) {
246 aprint_error_dev(sc->sc_dev, ": couldn't establish interrupt");
247 if (intrstr != NULL)
248 aprint_error(" at %s\n", intrstr);
249 aprint_error("\n");
250 return;
251 }
252 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
253
254 if (pci_dma64_available(pa))
255 sc->sc_dmat = pa->pa_dmat64;
256 else
257 sc->sc_dmat = pa->pa_dmat;
258
259 sc->sc_pc = pa->pa_pc;
260 sc->sc_tag = pa->pa_tag;
261
262 /* Determine hardware revision */
263 hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
264 switch (hwrev) {
265 case 0x60800000:
266 sc->rge_type = MAC_CFG2;
267 break;
268 case 0x60900000:
269 sc->rge_type = MAC_CFG3;
270 break;
271 case 0x64000000:
272 sc->rge_type = MAC_CFG4;
273 break;
274 case 0x64100000:
275 sc->rge_type = MAC_CFG5;
276 break;
277 default:
278 aprint_error(": unknown version 0x%08x\n", hwrev);
279 return;
280 }
281
282 rge_config_imtype(sc, RGE_IMTYPE_SIM);
283
284 /*
285 * PCI Express check.
286 */
287 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
288 &offset, NULL)) {
289 /* Disable PCIe ASPM and ECPM. */
290 reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
291 offset + PCIE_LCSR);
292 reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 |
293 PCIE_LCSR_ENCLKPM);
294 pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
295 reg);
296 }
297
298 rge_exit_oob(sc);
299 rge_hw_init(sc);
300
301 rge_get_macaddr(sc, eaddr);
302 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
303 ether_sprintf(eaddr));
304
305 memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
306
307 rge_set_phy_power(sc, 1);
308 rge_phy_config(sc);
309
310 if (rge_allocmem(sc))
311 return;
312
313 ifp = &sc->sc_ec.ec_if;
314 ifp->if_softc = sc;
315 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
316 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
317 #ifdef RGE_MPSAFE
318 ifp->if_extflags = IFEF_MPSAFE;
319 #endif
320 ifp->if_ioctl = rge_ioctl;
321 ifp->if_stop = rge_stop;
322 ifp->if_start = rge_start;
323 ifp->if_init = rge_init;
324 ifp->if_watchdog = rge_watchdog;
325 IFQ_SET_MAXLEN(&ifp->if_snd, RGE_TX_LIST_CNT - 1);
326
327 #if notyet
328 ifp->if_capabilities = IFCAP_CSUM_IPv4_Rx |
329 IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
330 IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
331 #endif
332
333 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
334 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
335
336 callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
337 callout_setfunc(&sc->sc_timeout, rge_tick, sc);
338
339 command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
340 command |= PCI_COMMAND_MASTER_ENABLE;
341 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
342
343 /* Initialize ifmedia structures. */
344 sc->sc_ec.ec_ifmedia = &sc->sc_media;
345 ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
346 rge_ifmedia_sts);
347 rge_add_media_types(sc);
348 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
349 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
350 sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
351
352 if_attach(ifp);
353 ether_ifattach(ifp, eaddr);
354
355 if (pmf_device_register(self, NULL, NULL))
356 pmf_class_network_register(self, ifp);
357 else
358 aprint_error_dev(self, "couldn't establish power handler\n");
359 }
360
361 int
362 rge_intr(void *arg)
363 {
364 struct rge_softc *sc = arg;
365 struct ifnet *ifp = &sc->sc_ec.ec_if;
366 uint32_t status;
367 int claimed = 0, rx, tx;
368
369 if (!(ifp->if_flags & IFF_RUNNING))
370 return (0);
371
372 /* Disable interrupts. */
373 RGE_WRITE_4(sc, RGE_IMR, 0);
374
375 if (!(sc->rge_flags & RGE_FLAG_MSI)) {
376 if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
377 return (0);
378 }
379
380 status = RGE_READ_4(sc, RGE_ISR);
381 if (status)
382 RGE_WRITE_4(sc, RGE_ISR, status);
383
384 if (status & RGE_ISR_PCS_TIMEOUT)
385 claimed = 1;
386
387 rx = tx = 0;
388 if (status & sc->rge_intrs) {
389 if (status &
390 (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
391 rx |= rge_rxeof(sc);
392 claimed = 1;
393 }
394
395 if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
396 tx |= rge_txeof(sc);
397 claimed = 1;
398 }
399
400 if (status & RGE_ISR_SYSTEM_ERR) {
401 KERNEL_LOCK(1, NULL);
402 rge_init(ifp);
403 KERNEL_UNLOCK_ONE(NULL);
404 claimed = 1;
405 }
406 }
407
408 if (sc->rge_timerintr) {
409 if ((tx | rx) == 0) {
410 /*
411 * Nothing needs to be processed, fallback
412 * to use TX/RX interrupts.
413 */
414 rge_setup_intr(sc, RGE_IMTYPE_NONE);
415
416 /*
417 * Recollect, mainly to avoid the possible
418 * race introduced by changing interrupt
419 * masks.
420 */
421 rge_rxeof(sc);
422 rge_txeof(sc);
423 } else
424 RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
425 } else if (tx | rx) {
426 /*
427 * Assume that using simulated interrupt moderation
428 * (hardware timer based) could reduce the interrupt
429 * rate.
430 */
431 rge_setup_intr(sc, RGE_IMTYPE_SIM);
432 }
433
434 RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
435
436 return (claimed);
437 }
438
439 int
440 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
441 {
442 struct rge_tx_desc *d = NULL;
443 struct rge_txq *txq;
444 bus_dmamap_t txmap;
445 uint32_t cmdsts, cflags = 0;
446 int cur, error, i, last, nsegs;
447
448 #if notyet
449 /*
450 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
451 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
452 * take affect.
453 */
454 if ((m->m_pkthdr.csum_flags &
455 (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
456 cflags |= RGE_TDEXTSTS_IPCSUM;
457 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
458 cflags |= RGE_TDEXTSTS_TCPCSUM;
459 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
460 cflags |= RGE_TDEXTSTS_UDPCSUM;
461 }
462 #endif
463
464 txq = &sc->rge_ldata.rge_txq[idx];
465 txmap = txq->txq_dmamap;
466
467 error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
468 switch (error) {
469 case 0:
470 break;
471 case EFBIG: /* mbuf chain is too fragmented */
472 if (m_defrag(m, M_DONTWAIT) == 0 &&
473 bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
474 BUS_DMA_NOWAIT) == 0)
475 break;
476
477 /* FALLTHROUGH */
478 default:
479 return (0);
480 }
481
482 bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
483 BUS_DMASYNC_PREWRITE);
484
485 nsegs = txmap->dm_nsegs;
486
487 /* Set up hardware VLAN tagging. */
488 if (vlan_has_tag(m))
489 cflags |= bswap16(vlan_get_tag(m)) | RGE_TDEXTSTS_VTAG;
490
491 last = cur = idx;
492 cmdsts = RGE_TDCMDSTS_SOF;
493
494 for (i = 0; i < txmap->dm_nsegs; i++) {
495 d = &sc->rge_ldata.rge_tx_list[cur];
496
497 d->rge_extsts = htole32(cflags);
498 d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
499 d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
500
501 cmdsts |= txmap->dm_segs[i].ds_len;
502
503 if (cur == RGE_TX_LIST_CNT - 1)
504 cmdsts |= RGE_TDCMDSTS_EOR;
505
506 d->rge_cmdsts = htole32(cmdsts);
507
508 last = cur;
509 cmdsts = RGE_TDCMDSTS_OWN;
510 cur = RGE_NEXT_TX_DESC(cur);
511 }
512
513 /* Set EOF on the last descriptor. */
514 d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
515
516 /* Transfer ownership of packet to the chip. */
517 d = &sc->rge_ldata.rge_tx_list[idx];
518
519 d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
520
521 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
522 cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
523 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
524
525 /* Update info of TX queue and descriptors. */
526 txq->txq_mbuf = m;
527 txq->txq_descidx = last;
528
529 return (nsegs);
530 }
531
532 int
533 rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
534 {
535 struct rge_softc *sc = ifp->if_softc;
536 //struct ifreq *ifr = (struct ifreq *)data;
537 int s, error = 0;
538
539 s = splnet();
540
541 switch (cmd) {
542 case SIOCSIFFLAGS:
543 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
544 break;
545 /* XXX set an ifflags callback and let ether_ioctl
546 * handle all of this.
547 */
548 if (ifp->if_flags & IFF_UP) {
549 if (ifp->if_flags & IFF_RUNNING)
550 error = ENETRESET;
551 else
552 rge_init(ifp);
553 } else {
554 if (ifp->if_flags & IFF_RUNNING)
555 rge_stop(ifp, 1);
556 }
557 break;
558 default:
559 error = ether_ioctl(ifp, cmd, data);
560 }
561
562 if (error == ENETRESET) {
563 if (ifp->if_flags & IFF_RUNNING)
564 rge_iff(sc);
565 error = 0;
566 }
567
568 splx(s);
569 return (error);
570 }
571
572 void
573 rge_start(struct ifnet *ifp)
574 {
575 struct rge_softc *sc = ifp->if_softc;
576 struct mbuf *m;
577 int free, idx, used;
578 int queued = 0;
579
580 #define LINK_STATE_IS_UP(_s) \
581 ((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
582
583 if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
584 IFQ_PURGE(&ifp->if_snd);
585 return;
586 }
587
588 /* Calculate free space. */
589 idx = sc->rge_ldata.rge_txq_prodidx;
590 free = sc->rge_ldata.rge_txq_considx;
591 if (free <= idx)
592 free += RGE_TX_LIST_CNT;
593 free -= idx;
594
595 for (;;) {
596 if (RGE_TX_NSEGS >= free + 2) {
597 SET(ifp->if_flags, IFF_OACTIVE);
598 break;
599 }
600
601 IFQ_DEQUEUE(&ifp->if_snd, m);
602 if (m == NULL)
603 break;
604
605 used = rge_encap(sc, m, idx);
606 if (used == 0) {
607 m_freem(m);
608 continue;
609 }
610
611 KASSERT(used <= free);
612 free -= used;
613
614 bpf_mtap(ifp, m, BPF_D_OUT);
615
616 idx += used;
617 if (idx >= RGE_TX_LIST_CNT)
618 idx -= RGE_TX_LIST_CNT;
619
620 queued++;
621 }
622
623 if (queued == 0)
624 return;
625
626 /* Set a timeout in case the chip goes out to lunch. */
627 ifp->if_timer = 5;
628
629 sc->rge_ldata.rge_txq_prodidx = idx;
630 #if 0
631 ifq_serialize(ifq, &sc->sc_task);
632 #else
633 rge_txstart(&sc->sc_task, sc);
634 #endif
635 }
636
637 void
638 rge_watchdog(struct ifnet *ifp)
639 {
640 struct rge_softc *sc = ifp->if_softc;
641
642 device_printf(sc->sc_dev, "watchdog timeout\n");
643 if_statinc(ifp, if_oerrors);
644
645 rge_init(ifp);
646 }
647
648 int
649 rge_init(struct ifnet *ifp)
650 {
651 struct rge_softc *sc = ifp->if_softc;
652 uint32_t val;
653 int i;
654
655 rge_stop(ifp, 0);
656
657 /* Set MAC address. */
658 rge_set_macaddr(sc, CLLADDR(ifp->if_sadl));
659
660 /* Set Maximum frame size. */
661 RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
662
663 /* Initialize RX descriptors list. */
664 if (rge_rx_list_init(sc) == ENOBUFS) {
665 device_printf(sc->sc_dev,
666 "init failed: no memory for RX buffers\n");
667 rge_stop(ifp, 1);
668 return (ENOBUFS);
669 }
670
671 /* Initialize TX descriptors. */
672 rge_tx_list_init(sc);
673
674 /* Load the addresses of the RX and TX lists into the chip. */
675 RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
676 RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
677 RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
678 RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
679 RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
680 RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
681 RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
682 RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
683
684 RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
685
686 RGE_CLRBIT_1(sc, 0xf1, 0x80);
687 RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
688 RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
689 RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
690
691 /* Clear interrupt moderation timer. */
692 for (i = 0; i < 64; i++)
693 RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
694
695 /* Set the initial RX and TX configurations. */
696 RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
697 RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
698
699 val = rge_read_csi(sc, 0x70c) & ~0xff000000;
700 rge_write_csi(sc, 0x70c, val | 0x27000000);
701
702 /* Enable hardware optimization function. */
703 val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
704 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
705
706 RGE_WRITE_2(sc, 0x0382, 0x221b);
707 RGE_WRITE_1(sc, 0x4500, 0);
708 RGE_WRITE_2(sc, 0x4800, 0);
709 RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
710
711 rge_write_mac_ocp(sc, 0xc140, 0xffff);
712 rge_write_mac_ocp(sc, 0xc142, 0xffff);
713
714 val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
715 rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
716
717 RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
718 RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
719 RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
720
721 val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
722 if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
723 rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
724 else
725 rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
726
727 RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
728
729 if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
730 val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
731 rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
732 } else
733 RGE_MAC_CLRBIT(sc, 0xe63e, 0x0030);
734
735 RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
736
737 val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
738 rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
739
740 val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
741 rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
742
743 val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
744 rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
745
746 RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
747
748 RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_TX_10M_PS_EN);
749
750 RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
751
752 val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
753 rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
754
755 val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
756 rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
757
758 RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
759 RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
760
761 val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
762 rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
763
764 val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
765 rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
766
767 val = rge_read_mac_ocp(sc, 0xe84c) & ~0x0040;
768 if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
769 rge_write_mac_ocp(sc, 0xe84c, 0x00c0);
770 else
771 rge_write_mac_ocp(sc, 0xe84c, 0x0080);
772
773 RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN);
774
775 if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3)
776 RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
777
778 /* Disable EEE plus. */
779 RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
780
781 RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
782
783 RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
784 DELAY(1);
785 RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
786
787 RGE_CLRBIT_4(sc, 0x1880, 0x0030);
788
789 rge_write_mac_ocp(sc, 0xe098, 0xc302);
790
791 if ((sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0)
792 RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
793 else
794 RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
795
796 RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
797
798 for (i = 0; i < 10; i++) {
799 if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
800 break;
801 DELAY(1000);
802 }
803
804 /* Disable RXDV gate. */
805 RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
806 DELAY(2000);
807
808 rge_ifmedia_upd(ifp);
809
810 /* Enable transmit and receive. */
811 RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
812
813 /* Program promiscuous mode and multicast filters. */
814 rge_iff(sc);
815
816 RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
817 RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
818
819 RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
820
821 /* Enable interrupts. */
822 rge_setup_intr(sc, RGE_IMTYPE_SIM);
823
824 ifp->if_flags |= IFF_RUNNING;
825 CLR(ifp->if_flags, IFF_OACTIVE);
826
827 callout_schedule(&sc->sc_timeout, 1);
828
829 return (0);
830 }
831
832 /*
833 * Stop the adapter and free any mbufs allocated to the RX and TX lists.
834 */
835 void
836 rge_stop(struct ifnet *ifp, int disable)
837 {
838 struct rge_softc *sc = ifp->if_softc;
839 int i;
840
841 callout_halt(&sc->sc_timeout, NULL);
842
843 ifp->if_timer = 0;
844 ifp->if_flags &= ~IFF_RUNNING;
845 sc->rge_timerintr = 0;
846
847 RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
848 RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
849 RGE_RXCFG_ERRPKT);
850
851 RGE_WRITE_4(sc, RGE_IMR, 0);
852
853 /* Clear timer interrupts. */
854 RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
855 RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
856 RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
857 RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
858
859 rge_reset(sc);
860
861 // intr_barrier(sc->sc_ih);
862 // ifq_barrier(&ifp->if_snd);
863 /* ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
864
865 if (sc->rge_head != NULL) {
866 m_freem(sc->rge_head);
867 sc->rge_head = sc->rge_tail = NULL;
868 }
869
870 /* Free the TX list buffers. */
871 for (i = 0; i < RGE_TX_LIST_CNT; i++) {
872 if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
873 bus_dmamap_unload(sc->sc_dmat,
874 sc->rge_ldata.rge_txq[i].txq_dmamap);
875 m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
876 sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
877 }
878 }
879
880 /* Free the RX list buffers. */
881 for (i = 0; i < RGE_RX_LIST_CNT; i++) {
882 if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
883 bus_dmamap_unload(sc->sc_dmat,
884 sc->rge_ldata.rge_rxq[i].rxq_dmamap);
885 m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
886 sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
887 }
888 }
889 }
890
891 /*
892 * Set media options.
893 */
894 int
895 rge_ifmedia_upd(struct ifnet *ifp)
896 {
897 struct rge_softc *sc = ifp->if_softc;
898 struct ifmedia *ifm = &sc->sc_media;
899 int anar, gig, val;
900
901 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
902 return (EINVAL);
903
904 /* Disable Gigabit Lite. */
905 RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
906 RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
907
908 val = rge_read_phy_ocp(sc, 0xa5d4);
909 val &= ~RGE_ADV_2500TFDX;
910
911 anar = gig = 0;
912 switch (IFM_SUBTYPE(ifm->ifm_media)) {
913 case IFM_AUTO:
914 anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
915 gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
916 val |= RGE_ADV_2500TFDX;
917 break;
918 case IFM_2500_T:
919 anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
920 gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
921 val |= RGE_ADV_2500TFDX;
922 ifp->if_baudrate = IF_Mbps(2500);
923 break;
924 case IFM_1000_T:
925 anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
926 gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
927 ifp->if_baudrate = IF_Gbps(1);
928 break;
929 case IFM_100_TX:
930 gig = rge_read_phy(sc, 0, MII_100T2CR) &
931 ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
932 anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
933 ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
934 ANAR_TX | ANAR_10_FD | ANAR_10;
935 ifp->if_baudrate = IF_Mbps(100);
936 break;
937 case IFM_10_T:
938 gig = rge_read_phy(sc, 0, MII_100T2CR) &
939 ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
940 anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
941 ANAR_10_FD | ANAR_10 : ANAR_10;
942 ifp->if_baudrate = IF_Mbps(10);
943 break;
944 default:
945 device_printf(sc->sc_dev,
946 "unsupported media type\n");
947 return (EINVAL);
948 }
949
950 rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
951 rge_write_phy(sc, 0, MII_100T2CR, gig);
952 rge_write_phy_ocp(sc, 0xa5d4, val);
953 rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
954 BMCR_STARTNEG);
955
956 return (0);
957 }
958
959 /*
960 * Report current media status.
961 */
962 void
963 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
964 {
965 struct rge_softc *sc = ifp->if_softc;
966 uint16_t status = 0;
967
968 ifmr->ifm_status = IFM_AVALID;
969 ifmr->ifm_active = IFM_ETHER;
970
971 if (rge_get_link_status(sc)) {
972 ifmr->ifm_status |= IFM_ACTIVE;
973
974 status = RGE_READ_2(sc, RGE_PHYSTAT);
975 if ((status & RGE_PHYSTAT_FDX) ||
976 (status & RGE_PHYSTAT_2500MBPS))
977 ifmr->ifm_active |= IFM_FDX;
978 else
979 ifmr->ifm_active |= IFM_HDX;
980
981 if (status & RGE_PHYSTAT_10MBPS)
982 ifmr->ifm_active |= IFM_10_T;
983 else if (status & RGE_PHYSTAT_100MBPS)
984 ifmr->ifm_active |= IFM_100_TX;
985 else if (status & RGE_PHYSTAT_1000MBPS)
986 ifmr->ifm_active |= IFM_1000_T;
987 else if (status & RGE_PHYSTAT_2500MBPS)
988 ifmr->ifm_active |= IFM_2500_T;
989 }
990 }
991
992 /*
993 * Allocate memory for RX/TX rings.
994 */
995 int
996 rge_allocmem(struct rge_softc *sc)
997 {
998 int error, i;
999
1000 /* Allocate DMA'able memory for the TX ring. */
1001 error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
1002 RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
1003 if (error) {
1004 aprint_error_dev(sc->sc_dev, "can't create TX list map\n");
1005 return (error);
1006 }
1007 error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
1008 &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
1009 BUS_DMA_NOWAIT);
1010 if (error) {
1011 aprint_error_dev(sc->sc_dev, "can't alloc TX list\n");
1012 return (error);
1013 }
1014
1015 /* Load the map for the TX ring. */
1016 error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1017 sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
1018 (void **) &sc->rge_ldata.rge_tx_list,
1019 BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1020 if (error) {
1021 aprint_error_dev(sc->sc_dev, "can't map TX dma buffers\n");
1022 bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1023 sc->rge_ldata.rge_tx_listnseg);
1024 return (error);
1025 }
1026 memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1027 error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1028 sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1029 if (error) {
1030 aprint_error_dev(sc->sc_dev, "can't load TX dma map\n");
1031 bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
1032 bus_dmamem_unmap(sc->sc_dmat,
1033 sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
1034 bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
1035 sc->rge_ldata.rge_tx_listnseg);
1036 return (error);
1037 }
1038
1039 /* Create DMA maps for TX buffers. */
1040 for (i = 0; i < RGE_TX_LIST_CNT; i++) {
1041 error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
1042 RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
1043 &sc->rge_ldata.rge_txq[i].txq_dmamap);
1044 if (error) {
1045 aprint_error_dev(sc->sc_dev, "can't create DMA map for TX\n");
1046 return (error);
1047 }
1048 }
1049
1050 /* Allocate DMA'able memory for the RX ring. */
1051 error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
1052 RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
1053 if (error) {
1054 aprint_error_dev(sc->sc_dev, "can't create RX list map\n");
1055 return (error);
1056 }
1057 error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
1058 &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
1059 BUS_DMA_NOWAIT);
1060 if (error) {
1061 aprint_error_dev(sc->sc_dev, "can't alloc RX list\n");
1062 return (error);
1063 }
1064
1065 /* Load the map for the RX ring. */
1066 error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1067 sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
1068 (void **) &sc->rge_ldata.rge_rx_list,
1069 BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1070 if (error) {
1071 aprint_error_dev(sc->sc_dev, "can't map RX dma buffers\n");
1072 bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1073 sc->rge_ldata.rge_rx_listnseg);
1074 return (error);
1075 }
1076 memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1077 error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1078 sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1079 if (error) {
1080 aprint_error_dev(sc->sc_dev, "can't load RX dma map\n");
1081 bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
1082 bus_dmamem_unmap(sc->sc_dmat,
1083 sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
1084 bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1085 sc->rge_ldata.rge_rx_listnseg);
1086 return (error);
1087 }
1088
1089 /* Create DMA maps for RX buffers. */
1090 for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1091 error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1092 RGE_JUMBO_FRAMELEN, 0, 0,
1093 &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
1094 if (error) {
1095 aprint_error_dev(sc->sc_dev, "can't create DMA map for RX\n");
1096 return (error);
1097 }
1098 }
1099
1100 return (error);
1101 }
1102
1103 /*
1104 * Initialize the RX descriptor and attach an mbuf cluster.
1105 */
1106 int
1107 rge_newbuf(struct rge_softc *sc, int idx)
1108 {
1109 struct mbuf *m;
1110 struct rge_rx_desc *r;
1111 struct rge_rxq *rxq;
1112 bus_dmamap_t rxmap;
1113
1114 m = MCLGETL(NULL, M_DONTWAIT, RGE_JUMBO_FRAMELEN);
1115 if (m == NULL)
1116 return (ENOBUFS);
1117
1118 m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
1119
1120 rxq = &sc->rge_ldata.rge_rxq[idx];
1121 rxmap = rxq->rxq_dmamap;
1122
1123 if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT))
1124 goto out;
1125
1126 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1127 BUS_DMASYNC_PREREAD);
1128
1129 /* Map the segments into RX descriptors. */
1130 r = &sc->rge_ldata.rge_rx_list[idx];
1131
1132 rxq->rxq_mbuf = m;
1133
1134 r->hi_qword1.rx_qword4.rge_extsts = 0;
1135 r->hi_qword0.rge_addr = htole64(rxmap->dm_segs[0].ds_addr);
1136
1137 r->hi_qword1.rx_qword4.rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
1138 if (idx == RGE_RX_LIST_CNT - 1)
1139 r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1140
1141 r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1142
1143 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1144 idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1145 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1146
1147 return (0);
1148 out:
1149 if (m != NULL)
1150 m_freem(m);
1151 return (ENOMEM);
1152 }
1153
1154 void
1155 rge_discard_rxbuf(struct rge_softc *sc, int idx)
1156 {
1157 struct rge_rx_desc *r;
1158
1159 r = &sc->rge_ldata.rge_rx_list[idx];
1160
1161 r->hi_qword1.rx_qword4.rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
1162 r->hi_qword1.rx_qword4.rge_extsts = 0;
1163 if (idx == RGE_RX_LIST_CNT - 1)
1164 r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1165 r->hi_qword1.rx_qword4.rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1166
1167 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1168 idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1169 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1170 }
1171
1172 int
1173 rge_rx_list_init(struct rge_softc *sc)
1174 {
1175 int i;
1176
1177 memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1178
1179 for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1180 sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
1181 if (rge_newbuf(sc, i) == ENOBUFS)
1182 return (ENOBUFS);
1183 }
1184
1185 sc->rge_ldata.rge_rxq_prodidx = sc->rge_ldata.rge_rxq_considx = 0;
1186 sc->rge_head = sc->rge_tail = NULL;
1187
1188 return (0);
1189 }
1190
1191 void
1192 rge_tx_list_init(struct rge_softc *sc)
1193 {
1194 int i;
1195
1196 memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1197
1198 for (i = 0; i < RGE_TX_LIST_CNT; i++)
1199 sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
1200
1201 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
1202 sc->rge_ldata.rge_tx_list_map->dm_mapsize,
1203 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1204
1205 sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
1206 }
1207
1208 int
1209 rge_rxeof(struct rge_softc *sc)
1210 {
1211 struct mbuf *m;
1212 struct ifnet *ifp = &sc->sc_ec.ec_if;
1213 struct rge_rx_desc *cur_rx;
1214 struct rge_rxq *rxq;
1215 uint32_t rxstat, extsts;
1216 int i, total_len, rx = 0;
1217
1218 for (i = sc->rge_ldata.rge_rxq_considx; ; i = RGE_NEXT_RX_DESC(i)) {
1219 /* Invalidate the descriptor memory. */
1220 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1221 i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1222 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1223
1224 cur_rx = &sc->rge_ldata.rge_rx_list[i];
1225
1226 if (RGE_OWN(cur_rx))
1227 break;
1228
1229 rxstat = letoh32(cur_rx->hi_qword1.rx_qword4.rge_cmdsts);
1230 extsts = letoh32(cur_rx->hi_qword1.rx_qword4.rge_extsts);
1231
1232 total_len = RGE_RXBYTES(cur_rx);
1233 rxq = &sc->rge_ldata.rge_rxq[i];
1234 m = rxq->rxq_mbuf;
1235 rxq->rxq_mbuf = NULL;
1236 rx = 1;
1237
1238 /* Invalidate the RX mbuf and unload its map. */
1239 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1240 rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1241 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1242
1243 if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1244 (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1245 rge_discard_rxbuf(sc, i);
1246 continue;
1247 }
1248
1249 if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1250 if_statinc(ifp, if_ierrors);
1251 /*
1252 * If this is part of a multi-fragment packet,
1253 * discard all the pieces.
1254 */
1255 if (sc->rge_head != NULL) {
1256 m_freem(sc->rge_head);
1257 sc->rge_head = sc->rge_tail = NULL;
1258 }
1259 rge_discard_rxbuf(sc, i);
1260 continue;
1261 }
1262
1263 /*
1264 * If allocating a replacement mbuf fails,
1265 * reload the current one.
1266 */
1267
1268 if (rge_newbuf(sc, i) == ENOBUFS) {
1269 if (sc->rge_head != NULL) {
1270 m_freem(sc->rge_head);
1271 sc->rge_head = sc->rge_tail = NULL;
1272 }
1273 rge_discard_rxbuf(sc, i);
1274 continue;
1275 }
1276
1277 m_set_rcvif(m, ifp);
1278 if (sc->rge_head != NULL) {
1279 m->m_len = total_len;
1280 /*
1281 * Special case: if there's 4 bytes or less
1282 * in this buffer, the mbuf can be discarded:
1283 * the last 4 bytes is the CRC, which we don't
1284 * care about anyway.
1285 */
1286 if (m->m_len <= ETHER_CRC_LEN) {
1287 sc->rge_tail->m_len -=
1288 (ETHER_CRC_LEN - m->m_len);
1289 m_freem(m);
1290 } else {
1291 m->m_len -= ETHER_CRC_LEN;
1292 m->m_flags &= ~M_PKTHDR;
1293 sc->rge_tail->m_next = m;
1294 }
1295 m = sc->rge_head;
1296 sc->rge_head = sc->rge_tail = NULL;
1297 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1298 } else
1299 #if 0
1300 m->m_pkthdr.len = m->m_len =
1301 (total_len - ETHER_CRC_LEN);
1302 #else
1303 {
1304 m->m_pkthdr.len = m->m_len = total_len;
1305 m->m_flags |= M_HASFCS;
1306 }
1307 #endif
1308
1309 #if notyet
1310 /* Check IP header checksum. */
1311 if (!(extsts & RGE_RDEXTSTS_IPCSUMERR) &&
1312 (extsts & RGE_RDEXTSTS_IPV4))
1313 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1314
1315 /* Check TCP/UDP checksum. */
1316 if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1317 (((extsts & RGE_RDEXTSTS_TCPPKT) &&
1318 !(extsts & RGE_RDEXTSTS_TCPCSUMERR)) ||
1319 ((extsts & RGE_RDEXTSTS_UDPPKT) &&
1320 !(extsts & RGE_RDEXTSTS_UDPCSUMERR))))
1321 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1322 M_UDP_CSUM_IN_OK;
1323 #endif
1324
1325 if (extsts & RGE_RDEXTSTS_VTAG) {
1326 vlan_set_tag(m,
1327 bswap16(extsts & RGE_RDEXTSTS_VLAN_MASK));
1328 }
1329
1330 if_percpuq_enqueue(ifp->if_percpuq, m);
1331 }
1332
1333 sc->rge_ldata.rge_rxq_considx = i;
1334
1335 return (rx);
1336 }
1337
1338 int
1339 rge_txeof(struct rge_softc *sc)
1340 {
1341 struct ifnet *ifp = &sc->sc_ec.ec_if;
1342 struct rge_txq *txq;
1343 uint32_t txstat;
1344 int cons, idx, prod;
1345 int free = 0;
1346
1347 prod = sc->rge_ldata.rge_txq_prodidx;
1348 cons = sc->rge_ldata.rge_txq_considx;
1349
1350 while (prod != cons) {
1351 txq = &sc->rge_ldata.rge_txq[cons];
1352 idx = txq->txq_descidx;
1353
1354 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1355 idx * sizeof(struct rge_tx_desc),
1356 sizeof(struct rge_tx_desc),
1357 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1358
1359 txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
1360
1361 if (txstat & RGE_TDCMDSTS_OWN) {
1362 free = 2;
1363 break;
1364 }
1365
1366 bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1367 txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1368 bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1369 m_freem(txq->txq_mbuf);
1370 txq->txq_mbuf = NULL;
1371
1372 if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1373 if_statinc(ifp, if_collisions);
1374 if (txstat & RGE_TDCMDSTS_TXERR)
1375 if_statinc(ifp, if_oerrors);
1376
1377 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1378 idx * sizeof(struct rge_tx_desc),
1379 sizeof(struct rge_tx_desc),
1380 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1381
1382 cons = RGE_NEXT_TX_DESC(idx);
1383 free = 1;
1384 }
1385
1386 if (free == 0)
1387 return (0);
1388
1389 sc->rge_ldata.rge_txq_considx = cons;
1390
1391 #if 0
1392 if (ifq_is_oactive(&ifp->if_snd))
1393 ifq_restart(&ifp->if_snd);
1394 else if (free == 2)
1395 ifq_serialize(&ifp->if_snd, &sc->sc_task);
1396 else
1397 ifp->if_timer = 0;
1398 #else
1399 #if 0
1400 if (!IF_IS_EMPTY(&ifp->if_snd))
1401 rge_start(ifp);
1402 else
1403 if (free == 2)
1404 if (0) { rge_txstart(&sc->sc_task, sc); }
1405 else
1406 #endif
1407 ifp->if_timer = 0;
1408 #endif
1409
1410 return (1);
1411 }
1412
1413 void
1414 rge_reset(struct rge_softc *sc)
1415 {
1416 int i;
1417
1418 /* Enable RXDV gate. */
1419 RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1420 DELAY(2000);
1421
1422 for (i = 0; i < 3000; i++) {
1423 DELAY(50);
1424 if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1425 RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1426 RGE_MCUCMD_TXFIFO_EMPTY))
1427 break;
1428 }
1429 if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5) {
1430 for (i = 0; i < 3000; i++) {
1431 DELAY(50);
1432 if ((RGE_READ_2(sc, RGE_IM) & 0x0103) == 0x0103)
1433 break;
1434 }
1435 }
1436
1437 DELAY(2000);
1438
1439 /* Soft reset. */
1440 RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1441
1442 for (i = 0; i < RGE_TIMEOUT; i++) {
1443 DELAY(100);
1444 if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1445 break;
1446 }
1447 if (i == RGE_TIMEOUT)
1448 device_printf(sc->sc_dev, "reset never completed!\n");
1449 }
1450
1451 void
1452 rge_iff(struct rge_softc *sc)
1453 {
1454 struct ifnet *ifp = &sc->sc_ec.ec_if;
1455 struct ethercom *ec = &sc->sc_ec;
1456 struct ether_multi *enm;
1457 struct ether_multistep step;
1458 uint32_t hashes[2];
1459 uint32_t rxfilt;
1460 int h = 0;
1461
1462 rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1463 rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1464 ifp->if_flags &= ~IFF_ALLMULTI;
1465
1466 /*
1467 * Always accept frames destined to our station address.
1468 * Always accept broadcast frames.
1469 */
1470 rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1471
1472 if (ifp->if_flags & IFF_PROMISC) {
1473 allmulti:
1474 ifp->if_flags |= IFF_ALLMULTI;
1475 rxfilt |= RGE_RXCFG_MULTI;
1476 if (ifp->if_flags & IFF_PROMISC)
1477 rxfilt |= RGE_RXCFG_ALLPHYS;
1478 hashes[0] = hashes[1] = 0xffffffff;
1479 } else {
1480 rxfilt |= RGE_RXCFG_MULTI;
1481 /* Program new filter. */
1482 memset(hashes, 0, sizeof(hashes));
1483
1484 ETHER_LOCK(ec);
1485 ETHER_FIRST_MULTI(step, ec, enm);
1486 while (enm != NULL) {
1487 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1488 ETHER_ADDR_LEN) != 0) {
1489 ETHER_UNLOCK(ec);
1490 goto allmulti;
1491 }
1492 h = ether_crc32_be(enm->enm_addrlo,
1493 ETHER_ADDR_LEN) >> 26;
1494
1495 if (h < 32)
1496 hashes[0] |= (1U << h);
1497 else
1498 hashes[1] |= (1U << (h - 32));
1499
1500 ETHER_NEXT_MULTI(step, enm);
1501 }
1502 ETHER_UNLOCK(ec);
1503 }
1504
1505 RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1506 RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
1507 RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
1508 }
1509
1510 void
1511 rge_set_phy_power(struct rge_softc *sc, int on)
1512 {
1513 int i;
1514
1515 if (on) {
1516 RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1517
1518 rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1519
1520 for (i = 0; i < RGE_TIMEOUT; i++) {
1521 if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
1522 break;
1523 DELAY(1000);
1524 }
1525 } else {
1526 rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1527 RGE_CLRBIT_1(sc, RGE_PMCH, 0x80);
1528 RGE_CLRBIT_1(sc, RGE_PPSW, 0x40);
1529 }
1530 }
1531
1532 void
1533 rge_phy_config(struct rge_softc *sc)
1534 {
1535 /* Read microcode version. */
1536 rge_write_phy_ocp(sc, 0xa436, 0x801e);
1537 sc->rge_mcodever = rge_read_phy_ocp(sc, 0xa438);
1538
1539 switch (sc->rge_type) {
1540 case MAC_CFG2:
1541 rge_phy_config_mac_cfg2(sc);
1542 break;
1543 case MAC_CFG3:
1544 rge_phy_config_mac_cfg3(sc);
1545 break;
1546 case MAC_CFG4:
1547 rge_phy_config_mac_cfg4(sc);
1548 break;
1549 case MAC_CFG5:
1550 rge_phy_config_mac_cfg5(sc);
1551 break;
1552 default:
1553 break; /* Can't happen. */
1554 }
1555
1556 rge_write_phy(sc, 0x0a5b, 0x12,
1557 rge_read_phy(sc, 0x0a5b, 0x12) & ~0x8000);
1558
1559 /* Disable EEE. */
1560 RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1561 if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
1562 RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1563 RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1564 }
1565 RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1566 RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1567 RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1568 RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1569 RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1570
1571 rge_patch_phy_mcu(sc, 1);
1572 RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1573 RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1574 RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1575 rge_patch_phy_mcu(sc, 0);
1576 }
1577
1578 void
1579 rge_phy_config_mac_cfg2(struct rge_softc *sc)
1580 {
1581 uint16_t val;
1582 int i;
1583
1584 for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++)
1585 rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
1586 rtl8125_mac_cfg2_ephy[i].val);
1587
1588 rge_phy_config_mcu(sc, RGE_MAC_CFG2_MCODE_VER);
1589
1590 val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
1591 rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
1592 RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1593 val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1594 rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
1595 val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
1596 rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1597 RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
1598 val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
1599 rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
1600 RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
1601 RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
1602 RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
1603
1604 rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1605 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1606 rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
1607 rge_write_phy_ocp(sc, 0xa436, 0x80eb);
1608 val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
1609 rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
1610 rge_write_phy_ocp(sc, 0xa436, 0x80f8);
1611 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1612 rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
1613 rge_write_phy_ocp(sc, 0xa436, 0x80f1);
1614 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1615 rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
1616 rge_write_phy_ocp(sc, 0xa436, 0x80fe);
1617 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1618 rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
1619 rge_write_phy_ocp(sc, 0xa436, 0x8102);
1620 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1621 rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1622 rge_write_phy_ocp(sc, 0xa436, 0x8105);
1623 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1624 rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
1625 rge_write_phy_ocp(sc, 0xa436, 0x8100);
1626 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1627 rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
1628 rge_write_phy_ocp(sc, 0xa436, 0x8104);
1629 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1630 rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
1631 rge_write_phy_ocp(sc, 0xa436, 0x8106);
1632 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1633 rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
1634 rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1635 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1636 rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
1637 rge_write_phy_ocp(sc, 0xa436, 0x80df);
1638 RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1639 rge_write_phy_ocp(sc, 0xa436, 0x80e1);
1640 RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
1641 val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
1642 rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
1643 rge_write_phy_ocp(sc, 0xa436, 0x819f);
1644 rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
1645 rge_write_phy_ocp(sc, 0xbc34, 0x5555);
1646 val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
1647 rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
1648 RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
1649 RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1650 }
1651
1652 void
1653 rge_phy_config_mac_cfg3(struct rge_softc *sc)
1654 {
1655 struct ifnet *ifp = &sc->sc_ec.ec_if;
1656 uint16_t val;
1657 int i;
1658 static const uint16_t mac_cfg3_a438_value[] =
1659 { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1660 0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1661
1662 static const uint16_t mac_cfg3_b88e_value[] =
1663 { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1664 0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1665 0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1666 0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1667 0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1668 0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1669
1670 for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1671 rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1672 rtl8125_mac_cfg3_ephy[i].val);
1673
1674 val = rge_read_ephy(sc, 0x002a) & ~0x7000;
1675 rge_write_ephy(sc, 0x002a, val | 0x3000);
1676 RGE_EPHY_CLRBIT(sc, 0x0019, 0x0040);
1677 RGE_EPHY_SETBIT(sc, 0x001b, 0x0e00);
1678 RGE_EPHY_CLRBIT(sc, 0x001b, 0x7000);
1679 rge_write_ephy(sc, 0x0002, 0x6042);
1680 rge_write_ephy(sc, 0x0006, 0x0014);
1681 val = rge_read_ephy(sc, 0x006a) & ~0x7000;
1682 rge_write_ephy(sc, 0x006a, val | 0x3000);
1683 RGE_EPHY_CLRBIT(sc, 0x0059, 0x0040);
1684 RGE_EPHY_SETBIT(sc, 0x005b, 0x0e00);
1685 RGE_EPHY_CLRBIT(sc, 0x005b, 0x7000);
1686 rge_write_ephy(sc, 0x0042, 0x6042);
1687 rge_write_ephy(sc, 0x0046, 0x0014);
1688
1689 rge_phy_config_mcu(sc, RGE_MAC_CFG3_MCODE_VER);
1690
1691 RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1692 val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1693 rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1694 val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1695 rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1696 RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1697 RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1698 val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1699 rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1700 val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1701 rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1702 val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1703 rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1704 RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1705 RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1706 val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1707 rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1708 rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1709 rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1710 val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1711 rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1712 rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1713 val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1714 rge_write_phy_ocp(sc, 0xb87e, val | 0x0500);
1715 rge_write_phy_ocp(sc, 0xb87c, 0x8159);
1716 val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1717 rge_write_phy_ocp(sc, 0xb87e, val | 0x0700);
1718 RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1719 32);
1720 rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1721 rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1722 rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1723 rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1724
1725 rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1726 for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1727 rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1728 for (i = 0; i < 26; i++)
1729 rge_write_phy_ocp(sc, 0xa438, 0);
1730 rge_write_phy_ocp(sc, 0xa436, 0x8257);
1731 rge_write_phy_ocp(sc, 0xa438, 0x020f);
1732 rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1733 rge_write_phy_ocp(sc, 0xa438, 0x7843);
1734
1735 rge_patch_phy_mcu(sc, 1);
1736 RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1737 RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1738 for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1739 rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1740 rge_write_phy_ocp(sc, 0xb890, mac_cfg3_b88e_value[i + 1]);
1741 }
1742 RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1743 rge_patch_phy_mcu(sc, 0);
1744
1745 RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1746 rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1747 RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1748 val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1749 rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1750 RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1751 RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1752 RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1753 RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1754 RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1755 }
1756
1757 void
1758 rge_phy_config_mac_cfg4(struct rge_softc *sc)
1759 {
1760 struct ifnet *ifp = &sc->sc_ec.ec_if;
1761 uint16_t val;
1762 int i;
1763 static const uint16_t mac_cfg4_b87c_value[] =
1764 { 0x8013, 0x0700, 0x8fb9, 0x2801, 0x8fba, 0x0100, 0x8fbc, 0x1900,
1765 0x8fbe, 0xe100, 0x8fc0, 0x0800, 0x8fc2, 0xe500, 0x8fc4, 0x0f00,
1766 0x8fc6, 0xf100, 0x8fc8, 0x0400, 0x8fca, 0xf300, 0x8fcc, 0xfd00,
1767 0x8fce, 0xff00, 0x8fd0, 0xfb00, 0x8fd2, 0x0100, 0x8fd4, 0xf400,
1768 0x8fd6, 0xff00, 0x8fd8, 0xf600, 0x813d, 0x390e, 0x814f, 0x790e,
1769 0x80b0, 0x0f31 };
1770
1771 for (i = 0; i < nitems(rtl8125_mac_cfg4_ephy); i++)
1772 rge_write_ephy(sc, rtl8125_mac_cfg4_ephy[i].reg,
1773 rtl8125_mac_cfg4_ephy[i].val);
1774
1775 rge_write_phy_ocp(sc, 0xbf86, 0x9000);
1776 RGE_PHY_SETBIT(sc, 0xc402, 0x0400);
1777 RGE_PHY_CLRBIT(sc, 0xc402, 0x0400);
1778 rge_write_phy_ocp(sc, 0xbd86, 0x1010);
1779 rge_write_phy_ocp(sc, 0xbd88, 0x1010);
1780 val = rge_read_phy_ocp(sc, 0xbd4e) & ~0x0c00;
1781 rge_write_phy_ocp(sc, 0xbd4e, val | 0x0800);
1782 val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1783 rge_write_phy_ocp(sc, 0xbf46, val | 0x0700);
1784
1785 rge_phy_config_mcu(sc, RGE_MAC_CFG4_MCODE_VER);
1786
1787 RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1788 RGE_PHY_SETBIT(sc, 0xbc08, 0x000c);
1789 rge_write_phy_ocp(sc, 0xa436, 0x8fff);
1790 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1791 rge_write_phy_ocp(sc, 0xa438, val | 0x0400);
1792 for (i = 0; i < 6; i++) {
1793 rge_write_phy_ocp(sc, 0xb87c, 0x8560 + i * 2);
1794 if (i < 3)
1795 rge_write_phy_ocp(sc, 0xb87e, 0x19cc);
1796 else
1797 rge_write_phy_ocp(sc, 0xb87e, 0x147d);
1798 }
1799 rge_write_phy_ocp(sc, 0xb87c, 0x8ffe);
1800 rge_write_phy_ocp(sc, 0xb87e, 0x0907);
1801 val = rge_read_phy_ocp(sc, 0xacda) & ~0xff00;
1802 rge_write_phy_ocp(sc, 0xacda, val | 0xff00);
1803 val = rge_read_phy_ocp(sc, 0xacde) & ~0xf000;
1804 rge_write_phy_ocp(sc, 0xacde, val | 0xf000);
1805 rge_write_phy_ocp(sc, 0xb87c, 0x80d6);
1806 rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1807 rge_write_phy_ocp(sc, 0xb87c, 0x80F2);
1808 rge_write_phy_ocp(sc, 0xb87e, 0x2801);
1809 rge_write_phy_ocp(sc, 0xb87c, 0x80f4);
1810 rge_write_phy_ocp(sc, 0xb87e, 0x6077);
1811 rge_write_phy_ocp(sc, 0xb506, 0x01e7);
1812 rge_write_phy_ocp(sc, 0xac8c, 0x0ffc);
1813 rge_write_phy_ocp(sc, 0xac46, 0xb7b4);
1814 rge_write_phy_ocp(sc, 0xac50, 0x0fbc);
1815 rge_write_phy_ocp(sc, 0xac3c, 0x9240);
1816 rge_write_phy_ocp(sc, 0xac4E, 0x0db4);
1817 rge_write_phy_ocp(sc, 0xacc6, 0x0707);
1818 rge_write_phy_ocp(sc, 0xacc8, 0xa0d3);
1819 rge_write_phy_ocp(sc, 0xad08, 0x0007);
1820 for (i = 0; i < nitems(mac_cfg4_b87c_value); i += 2) {
1821 rge_write_phy_ocp(sc, 0xb87c, mac_cfg4_b87c_value[i]);
1822 rge_write_phy_ocp(sc, 0xb87e, mac_cfg4_b87c_value[i + 1]);
1823 }
1824 RGE_PHY_SETBIT(sc, 0xbf4c, 0x0002);
1825 RGE_PHY_SETBIT(sc, 0xbcca, 0x0300);
1826 rge_write_phy_ocp(sc, 0xb87c, 0x8141);
1827 rge_write_phy_ocp(sc, 0xb87e, 0x320e);
1828 rge_write_phy_ocp(sc, 0xb87c, 0x8153);
1829 rge_write_phy_ocp(sc, 0xb87e, 0x720e);
1830 RGE_PHY_CLRBIT(sc, 0xa432, 0x0040);
1831 rge_write_phy_ocp(sc, 0xb87c, 0x8529);
1832 rge_write_phy_ocp(sc, 0xb87e, 0x050e);
1833 RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1834 32);
1835 rge_write_phy_ocp(sc, 0xa436, 0x816c);
1836 rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1837 rge_write_phy_ocp(sc, 0xa436, 0x8170);
1838 rge_write_phy_ocp(sc, 0xa438, 0xc4a0);
1839 rge_write_phy_ocp(sc, 0xa436, 0x8174);
1840 rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1841 rge_write_phy_ocp(sc, 0xa436, 0x8178);
1842 rge_write_phy_ocp(sc, 0xa438, 0x04a0);
1843 rge_write_phy_ocp(sc, 0xa436, 0x817c);
1844 rge_write_phy_ocp(sc, 0xa438, 0x0719);
1845 rge_write_phy_ocp(sc, 0xa436, 0x8ff4);
1846 rge_write_phy_ocp(sc, 0xa438, 0x0400);
1847 rge_write_phy_ocp(sc, 0xa436, 0x8ff1);
1848 rge_write_phy_ocp(sc, 0xa438, 0x0404);
1849 rge_write_phy_ocp(sc, 0xbf4a, 0x001b);
1850 for (i = 0; i < 6; i++) {
1851 rge_write_phy_ocp(sc, 0xb87c, 0x8033 + i * 4);
1852 if (i == 2)
1853 rge_write_phy_ocp(sc, 0xb87e, 0xfc32);
1854 else
1855 rge_write_phy_ocp(sc, 0xb87e, 0x7c13);
1856 }
1857 rge_write_phy_ocp(sc, 0xb87c, 0x8145);
1858 rge_write_phy_ocp(sc, 0xb87e, 0x370e);
1859 rge_write_phy_ocp(sc, 0xb87c, 0x8157);
1860 rge_write_phy_ocp(sc, 0xb87e, 0x770e);
1861 rge_write_phy_ocp(sc, 0xb87c, 0x8169);
1862 rge_write_phy_ocp(sc, 0xb87e, 0x0d0a);
1863 rge_write_phy_ocp(sc, 0xb87c, 0x817b);
1864 rge_write_phy_ocp(sc, 0xb87e, 0x1d0a);
1865 rge_write_phy_ocp(sc, 0xa436, 0x8217);
1866 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1867 rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1868 rge_write_phy_ocp(sc, 0xa436, 0x821a);
1869 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1870 rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1871 rge_write_phy_ocp(sc, 0xa436, 0x80da);
1872 rge_write_phy_ocp(sc, 0xa438, 0x0403);
1873 rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1874 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1875 rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1876 rge_write_phy_ocp(sc, 0xa436, 0x80b3);
1877 rge_write_phy_ocp(sc, 0xa438, 0x0384);
1878 rge_write_phy_ocp(sc, 0xa436, 0x80b7);
1879 rge_write_phy_ocp(sc, 0xa438, 0x2007);
1880 rge_write_phy_ocp(sc, 0xa436, 0x80ba);
1881 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1882 rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1883 rge_write_phy_ocp(sc, 0xa436, 0x80b5);
1884 rge_write_phy_ocp(sc, 0xa438, 0xf009);
1885 rge_write_phy_ocp(sc, 0xa436, 0x80bd);
1886 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1887 rge_write_phy_ocp(sc, 0xa438, val | 0x9f00);
1888 rge_write_phy_ocp(sc, 0xa436, 0x80c7);
1889 rge_write_phy_ocp(sc, 0xa438, 0xf083);
1890 rge_write_phy_ocp(sc, 0xa436, 0x80dd);
1891 rge_write_phy_ocp(sc, 0xa438, 0x03f0);
1892 rge_write_phy_ocp(sc, 0xa436, 0x80df);
1893 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1894 rge_write_phy_ocp(sc, 0xa438, val | 0x1000);
1895 rge_write_phy_ocp(sc, 0xa436, 0x80cb);
1896 rge_write_phy_ocp(sc, 0xa438, 0x2007);
1897 rge_write_phy_ocp(sc, 0xa436, 0x80ce);
1898 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1899 rge_write_phy_ocp(sc, 0xa438, val | 0x6c00);
1900 rge_write_phy_ocp(sc, 0xa436, 0x80c9);
1901 rge_write_phy_ocp(sc, 0xa438, 0x8009);
1902 rge_write_phy_ocp(sc, 0xa436, 0x80d1);
1903 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1904 rge_write_phy_ocp(sc, 0xa438, val | 0x8000);
1905 rge_write_phy_ocp(sc, 0xa436, 0x80a3);
1906 rge_write_phy_ocp(sc, 0xa438, 0x200a);
1907 rge_write_phy_ocp(sc, 0xa436, 0x80a5);
1908 rge_write_phy_ocp(sc, 0xa438, 0xf0ad);
1909 rge_write_phy_ocp(sc, 0xa436, 0x809f);
1910 rge_write_phy_ocp(sc, 0xa438, 0x6073);
1911 rge_write_phy_ocp(sc, 0xa436, 0x80a1);
1912 rge_write_phy_ocp(sc, 0xa438, 0x000b);
1913 rge_write_phy_ocp(sc, 0xa436, 0x80a9);
1914 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1915 rge_write_phy_ocp(sc, 0xa438, val | 0xc000);
1916 rge_patch_phy_mcu(sc, 1);
1917 RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1918 RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1919 rge_write_phy_ocp(sc, 0xb88e, 0xc23e);
1920 rge_write_phy_ocp(sc, 0xb890, 0x0000);
1921 rge_write_phy_ocp(sc, 0xb88e, 0xc240);
1922 rge_write_phy_ocp(sc, 0xb890, 0x0103);
1923 rge_write_phy_ocp(sc, 0xb88e, 0xc242);
1924 rge_write_phy_ocp(sc, 0xb890, 0x0507);
1925 rge_write_phy_ocp(sc, 0xb88e, 0xc244);
1926 rge_write_phy_ocp(sc, 0xb890, 0x090b);
1927 rge_write_phy_ocp(sc, 0xb88e, 0xc246);
1928 rge_write_phy_ocp(sc, 0xb890, 0x0c0e);
1929 rge_write_phy_ocp(sc, 0xb88e, 0xc248);
1930 rge_write_phy_ocp(sc, 0xb890, 0x1012);
1931 rge_write_phy_ocp(sc, 0xb88e, 0xc24a);
1932 rge_write_phy_ocp(sc, 0xb890, 0x1416);
1933 RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1934 rge_patch_phy_mcu(sc, 0);
1935 RGE_PHY_SETBIT(sc, 0xa86a, 0x0001);
1936 RGE_PHY_SETBIT(sc, 0xa6f0, 0x0001);
1937 rge_write_phy_ocp(sc, 0xbfa0, 0xd70d);
1938 rge_write_phy_ocp(sc, 0xbfa2, 0x4100);
1939 rge_write_phy_ocp(sc, 0xbfa4, 0xe868);
1940 rge_write_phy_ocp(sc, 0xbfa6, 0xdc59);
1941 rge_write_phy_ocp(sc, 0xb54c, 0x3c18);
1942 RGE_PHY_CLRBIT(sc, 0xbfa4, 0x0020);
1943 rge_write_phy_ocp(sc, 0xa436, 0x817d);
1944 RGE_PHY_SETBIT(sc, 0xa438, 0x1000);
1945 }
1946
1947 void
1948 rge_phy_config_mac_cfg5(struct rge_softc *sc)
1949 {
1950 struct ifnet *ifp = &sc->sc_ec.ec_if;
1951 uint16_t val;
1952 int i;
1953
1954 for (i = 0; i < nitems(rtl8125_mac_cfg5_ephy); i++)
1955 rge_write_ephy(sc, rtl8125_mac_cfg5_ephy[i].reg,
1956 rtl8125_mac_cfg5_ephy[i].val);
1957
1958 val = rge_read_ephy(sc, 0x0022) & ~0x0030;
1959 rge_write_ephy(sc, 0x0022, val | 0x0020);
1960 val = rge_read_ephy(sc, 0x0062) & ~0x0030;
1961 rge_write_ephy(sc, 0x0062, val | 0x0020);
1962
1963 rge_phy_config_mcu(sc, RGE_MAC_CFG5_MCODE_VER);
1964
1965 RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1966 val = rge_read_phy_ocp(sc, 0xac46) & ~0x00f0;
1967 rge_write_phy_ocp(sc, 0xac46, val | 0x0090);
1968 val = rge_read_phy_ocp(sc, 0xad30) & ~0x0003;
1969 rge_write_phy_ocp(sc, 0xad30, val | 0x0001);
1970 RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, ifp->if_mtu + ETHER_HDR_LEN +
1971 32);
1972 rge_write_phy_ocp(sc, 0xb87c, 0x80f5);
1973 rge_write_phy_ocp(sc, 0xb87e, 0x760e);
1974 rge_write_phy_ocp(sc, 0xb87c, 0x8107);
1975 rge_write_phy_ocp(sc, 0xb87e, 0x360e);
1976 rge_write_phy_ocp(sc, 0xb87c, 0x8551);
1977 val = rge_read_phy_ocp(sc, 0xb87e) & ~0xff00;
1978 rge_write_phy_ocp(sc, 0xb87e, val | 0x0800);
1979 val = rge_read_phy_ocp(sc, 0xbf00) & ~0xe000;
1980 rge_write_phy_ocp(sc, 0xbf00, val | 0xa000);
1981 val = rge_read_phy_ocp(sc, 0xbf46) & ~0x0f00;
1982 rge_write_phy_ocp(sc, 0xbf46, val | 0x0300);
1983 for (i = 0; i < 10; i++) {
1984 rge_write_phy_ocp(sc, 0xa436, 0x8044 + i * 6);
1985 rge_write_phy_ocp(sc, 0xa438, 0x2417);
1986 }
1987 RGE_PHY_SETBIT(sc, 0xa4ca, 0x0040);
1988 val = rge_read_phy_ocp(sc, 0xbf84) & ~0xe000;
1989 rge_write_phy_ocp(sc, 0xbf84, val | 0xa000);
1990 }
1991
1992 void
1993 rge_phy_config_mcu(struct rge_softc *sc, uint16_t mcode_version)
1994 {
1995 if (sc->rge_mcodever != mcode_version) {
1996 int i;
1997
1998 rge_patch_phy_mcu(sc, 1);
1999
2000 if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
2001 rge_write_phy_ocp(sc, 0xa436, 0x8024);
2002 if (sc->rge_type == MAC_CFG2)
2003 rge_write_phy_ocp(sc, 0xa438, 0x8600);
2004 else
2005 rge_write_phy_ocp(sc, 0xa438, 0x8601);
2006 rge_write_phy_ocp(sc, 0xa436, 0xb82e);
2007 rge_write_phy_ocp(sc, 0xa438, 0x0001);
2008
2009 RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
2010 }
2011
2012 if (sc->rge_type == MAC_CFG2) {
2013 for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
2014 rge_write_phy_ocp(sc,
2015 rtl8125_mac_cfg2_mcu[i].reg,
2016 rtl8125_mac_cfg2_mcu[i].val);
2017 }
2018 } else if (sc->rge_type == MAC_CFG3) {
2019 for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
2020 rge_write_phy_ocp(sc,
2021 rtl8125_mac_cfg3_mcu[i].reg,
2022 rtl8125_mac_cfg3_mcu[i].val);
2023 }
2024 } else if (sc->rge_type == MAC_CFG4) {
2025 for (i = 0; i < nitems(rtl8125_mac_cfg4_mcu); i++) {
2026 rge_write_phy_ocp(sc,
2027 rtl8125_mac_cfg4_mcu[i].reg,
2028 rtl8125_mac_cfg4_mcu[i].val);
2029 }
2030 } else if (sc->rge_type == MAC_CFG5) {
2031 for (i = 0; i < nitems(rtl8125_mac_cfg5_mcu); i++) {
2032 rge_write_phy_ocp(sc,
2033 rtl8125_mac_cfg5_mcu[i].reg,
2034 rtl8125_mac_cfg5_mcu[i].val);
2035 }
2036 }
2037
2038 if (sc->rge_type == MAC_CFG2 || sc->rge_type == MAC_CFG3) {
2039 RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
2040
2041 rge_write_phy_ocp(sc, 0xa436, 0);
2042 rge_write_phy_ocp(sc, 0xa438, 0);
2043 RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
2044 rge_write_phy_ocp(sc, 0xa436, 0x8024);
2045 rge_write_phy_ocp(sc, 0xa438, 0);
2046 }
2047
2048 rge_patch_phy_mcu(sc, 0);
2049
2050 /* Write microcode version. */
2051 rge_write_phy_ocp(sc, 0xa436, 0x801e);
2052 rge_write_phy_ocp(sc, 0xa438, mcode_version);
2053 }
2054 }
2055
2056 void
2057 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
2058 {
2059 RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2060 RGE_WRITE_4(sc, RGE_MAC0,
2061 (uint32_t)addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2062 RGE_WRITE_4(sc, RGE_MAC4,
2063 addr[5] << 8 | addr[4]);
2064 RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2065 }
2066
2067 void
2068 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
2069 {
2070 int i;
2071
2072 for (i = 0; i < ETHER_ADDR_LEN; i++)
2073 addr[i] = RGE_READ_1(sc, RGE_ADDR0 + i);
2074 }
2075
2076 void
2077 rge_hw_init(struct rge_softc *sc)
2078 {
2079 int i;
2080
2081 RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2082 RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
2083 RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
2084 RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
2085 RGE_CLRBIT_1(sc, 0xf1, 0x80);
2086
2087 /* Disable UPS. */
2088 RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
2089
2090 /* Configure MAC MCU. */
2091 rge_write_mac_ocp(sc, 0xfc38, 0);
2092
2093 for (i = 0xfc28; i < 0xfc38; i += 2)
2094 rge_write_mac_ocp(sc, i, 0);
2095
2096 DELAY(3000);
2097 rge_write_mac_ocp(sc, 0xfc26, 0);
2098
2099 if (sc->rge_type == MAC_CFG3) {
2100 for (i = 0; i < nitems(rtl8125_mac_bps); i++) {
2101 rge_write_mac_ocp(sc, rtl8125_mac_bps[i].reg,
2102 rtl8125_mac_bps[i].val);
2103 }
2104 } else if (sc->rge_type == MAC_CFG5) {
2105 for (i = 0; i < nitems(rtl8125b_mac_bps); i++) {
2106 rge_write_mac_ocp(sc, rtl8125b_mac_bps[i].reg,
2107 rtl8125b_mac_bps[i].val);
2108 }
2109 }
2110
2111 /* Disable PHY power saving. */
2112 rge_disable_phy_ocp_pwrsave(sc);
2113
2114 /* Set PCIe uncorrectable error status. */
2115 rge_write_csi(sc, 0x108,
2116 rge_read_csi(sc, 0x108) | 0x00100000);
2117 }
2118
2119 void
2120 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
2121 {
2122 if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
2123 rge_patch_phy_mcu(sc, 1);
2124 rge_write_phy_ocp(sc, 0xc416, 0);
2125 rge_write_phy_ocp(sc, 0xc416, 0x0500);
2126 rge_patch_phy_mcu(sc, 0);
2127 }
2128 }
2129
2130 void
2131 rge_patch_phy_mcu(struct rge_softc *sc, int set)
2132 {
2133 int i;
2134
2135 if (set)
2136 RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
2137 else
2138 RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
2139
2140 for (i = 0; i < 1000; i++) {
2141 if ((rge_read_phy_ocp(sc, 0xb800) & 0x0040) == 0x0040)
2142 break;
2143 DELAY(100);
2144 }
2145 if (i == 1000) {
2146 DPRINTF(("timeout waiting to patch phy mcu\n"));
2147 return;
2148 }
2149 }
2150
2151 void
2152 rge_add_media_types(struct rge_softc *sc)
2153 {
2154 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
2155 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
2156 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
2157 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
2158 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
2159 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2160 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
2161 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
2162 }
2163
2164 void
2165 rge_config_imtype(struct rge_softc *sc, int imtype)
2166 {
2167 switch (imtype) {
2168 case RGE_IMTYPE_NONE:
2169 sc->rge_intrs = RGE_INTRS;
2170 sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
2171 RGE_ISR_RX_FIFO_OFLOW;
2172 sc->rge_tx_ack = RGE_ISR_TX_OK;
2173 break;
2174 case RGE_IMTYPE_SIM:
2175 sc->rge_intrs = RGE_INTRS_TIMER;
2176 sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
2177 sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
2178 break;
2179 default:
2180 panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
2181 }
2182 }
2183
2184 void
2185 rge_disable_hw_im(struct rge_softc *sc)
2186 {
2187 RGE_WRITE_2(sc, RGE_IM, 0);
2188 }
2189
2190 void
2191 rge_disable_sim_im(struct rge_softc *sc)
2192 {
2193 RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
2194 sc->rge_timerintr = 0;
2195 }
2196
2197 void
2198 rge_setup_sim_im(struct rge_softc *sc)
2199 {
2200 RGE_WRITE_4(sc, RGE_TIMERINT0, 0x2600);
2201 RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
2202 sc->rge_timerintr = 1;
2203 }
2204
2205 void
2206 rge_setup_intr(struct rge_softc *sc, int imtype)
2207 {
2208 rge_config_imtype(sc, imtype);
2209
2210 /* Enable interrupts. */
2211 RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
2212
2213 switch (imtype) {
2214 case RGE_IMTYPE_NONE:
2215 rge_disable_sim_im(sc);
2216 rge_disable_hw_im(sc);
2217 break;
2218 case RGE_IMTYPE_SIM:
2219 rge_disable_hw_im(sc);
2220 rge_setup_sim_im(sc);
2221 break;
2222 default:
2223 panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
2224 }
2225 }
2226
2227 void
2228 rge_exit_oob(struct rge_softc *sc)
2229 {
2230 int i;
2231
2232 RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
2233 RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
2234 RGE_RXCFG_ERRPKT);
2235
2236 /* Disable RealWoW. */
2237 rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
2238
2239 rge_reset(sc);
2240
2241 /* Disable OOB. */
2242 RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
2243
2244 RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
2245
2246 for (i = 0; i < 10; i++) {
2247 DELAY(100);
2248 if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2249 break;
2250 }
2251
2252 rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
2253 rge_write_mac_ocp(sc, 0xc0a6, 0x01b5);
2254 rge_write_mac_ocp(sc, 0xc01e, 0x5555);
2255
2256 for (i = 0; i < 10; i++) {
2257 DELAY(100);
2258 if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
2259 break;
2260 }
2261
2262 if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
2263 printf("%s: rge_exit_oob(): rtl8125_is_ups_resume!!\n",
2264 device_xname(sc->sc_dev));
2265 for (i = 0; i < RGE_TIMEOUT; i++) {
2266 if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
2267 break;
2268 DELAY(1000);
2269 }
2270 RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
2271 if (sc->rge_type == MAC_CFG4 || sc->rge_type == MAC_CFG5)
2272 RGE_PHY_CLRBIT(sc, 0xa466, 0x0001);
2273 RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
2274 }
2275 }
2276
2277 void
2278 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
2279 {
2280 int i;
2281
2282 RGE_WRITE_4(sc, RGE_CSIDR, val);
2283 RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2284 (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
2285
2286 for (i = 0; i < 10; i++) {
2287 DELAY(100);
2288 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
2289 break;
2290 }
2291
2292 DELAY(20);
2293 }
2294
2295 uint32_t
2296 rge_read_csi(struct rge_softc *sc, uint32_t reg)
2297 {
2298 int i;
2299
2300 RGE_WRITE_4(sc, RGE_CSIAR, (reg & RGE_CSIAR_ADDR_MASK) |
2301 (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
2302
2303 for (i = 0; i < 10; i++) {
2304 DELAY(100);
2305 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
2306 break;
2307 }
2308
2309 DELAY(20);
2310
2311 return (RGE_READ_4(sc, RGE_CSIDR));
2312 }
2313
2314 void
2315 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2316 {
2317 uint32_t tmp;
2318
2319 tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2320 tmp += val;
2321 tmp |= RGE_MACOCP_BUSY;
2322 RGE_WRITE_4(sc, RGE_MACOCP, tmp);
2323 }
2324
2325 uint16_t
2326 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
2327 {
2328 uint32_t val;
2329
2330 val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
2331 RGE_WRITE_4(sc, RGE_MACOCP, val);
2332
2333 return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
2334 }
2335
2336 void
2337 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
2338 {
2339 uint32_t tmp;
2340 int i;
2341
2342 tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2343 tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
2344 RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
2345
2346 for (i = 0; i < 10; i++) {
2347 DELAY(100);
2348 if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
2349 break;
2350 }
2351
2352 DELAY(20);
2353 }
2354
2355 uint16_t
2356 rge_read_ephy(struct rge_softc *sc, uint16_t reg)
2357 {
2358 uint32_t val;
2359 int i;
2360
2361 val = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
2362 RGE_WRITE_4(sc, RGE_EPHYAR, val);
2363
2364 for (i = 0; i < 10; i++) {
2365 DELAY(100);
2366 val = RGE_READ_4(sc, RGE_EPHYAR);
2367 if (val & RGE_EPHYAR_BUSY)
2368 break;
2369 }
2370
2371 DELAY(20);
2372
2373 return (val & RGE_EPHYAR_DATA_MASK);
2374 }
2375
2376 void
2377 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
2378 {
2379 uint16_t off, phyaddr;
2380
2381 phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2382 phyaddr <<= 4;
2383
2384 off = addr ? reg : 0x10 + (reg % 8);
2385
2386 phyaddr += (off - 16) << 1;
2387
2388 rge_write_phy_ocp(sc, phyaddr, val);
2389 }
2390
2391 uint16_t
2392 rge_read_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg)
2393 {
2394 uint16_t off, phyaddr;
2395
2396 phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
2397 phyaddr <<= 4;
2398
2399 off = addr ? reg : 0x10 + (reg % 8);
2400
2401 phyaddr += (off - 16) << 1;
2402
2403 return (rge_read_phy_ocp(sc, phyaddr));
2404 }
2405
2406 void
2407 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
2408 {
2409 uint32_t tmp;
2410 int i;
2411
2412 tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2413 tmp |= RGE_PHYOCP_BUSY | val;
2414 RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
2415
2416 for (i = 0; i < RGE_TIMEOUT; i++) {
2417 DELAY(1);
2418 if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
2419 break;
2420 }
2421 }
2422
2423 uint16_t
2424 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
2425 {
2426 uint32_t val;
2427 int i;
2428
2429 val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2430 RGE_WRITE_4(sc, RGE_PHYOCP, val);
2431
2432 for (i = 0; i < RGE_TIMEOUT; i++) {
2433 DELAY(1);
2434 val = RGE_READ_4(sc, RGE_PHYOCP);
2435 if (val & RGE_PHYOCP_BUSY)
2436 break;
2437 }
2438
2439 return (val & RGE_PHYOCP_DATA_MASK);
2440 }
2441
2442 int
2443 rge_get_link_status(struct rge_softc *sc)
2444 {
2445 return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2446 }
2447
2448 void
2449 rge_txstart(struct work *wk, void *arg)
2450 {
2451 struct rge_softc *sc = arg;
2452
2453 RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2454 }
2455
2456 void
2457 rge_tick(void *arg)
2458 {
2459 struct rge_softc *sc = arg;
2460 int s;
2461
2462 s = splnet();
2463 rge_link_state(sc);
2464 splx(s);
2465
2466 callout_schedule(&sc->sc_timeout, hz);
2467 }
2468
2469 void
2470 rge_link_state(struct rge_softc *sc)
2471 {
2472 struct ifnet *ifp = &sc->sc_ec.ec_if;
2473 int link = LINK_STATE_DOWN;
2474
2475 if (rge_get_link_status(sc))
2476 link = LINK_STATE_UP;
2477
2478 if (ifp->if_link_state != link) { /* XXX not safe to access */
2479 if_link_state_change(ifp, link);
2480 }
2481 }
2482