if_rge.c revision 1.15 1 /* $NetBSD: if_rge.c,v 1.15 2021/01/20 18:26:11 jakllsch Exp $ */
2 /* $OpenBSD: if_rge.c,v 1.4 2020/07/10 13:26:38 patrick Exp $ */
3
4 /*
5 * Copyright (c) 2019 Kevin Lo <kevlo (at) openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: if_rge.c,v 1.15 2021/01/20 18:26:11 jakllsch Exp $");
22
23 /* #include "vlan.h" Sevan */
24
25 #include <sys/types.h>
26
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/sockio.h>
30 #include <sys/mbuf.h>
31 #include <sys/malloc.h>
32 #include <sys/kernel.h>
33 #include <sys/socket.h>
34 #include <sys/device.h>
35 #include <sys/endian.h>
36 #include <sys/callout.h>
37 #include <sys/workqueue.h>
38
39 #include <net/if.h>
40
41 #include <net/if_dl.h>
42 #include <net/if_ether.h>
43
44 #include <net/if_media.h>
45
46 #include <netinet/in.h>
47 #include <net/if_ether.h>
48
49 #if NBPFILTER > 0
50 #include <net/bpf.h>
51 #endif
52
53 #include <sys/bus.h>
54 #include <machine/intr.h>
55
56 #include <dev/mii/mii.h>
57
58 #include <dev/pci/pcivar.h>
59 #include <dev/pci/pcireg.h>
60 #include <dev/pci/pcidevs.h>
61
62 #include <dev/pci/if_rgereg.h>
63
64 #ifdef __NetBSD__
65 #define letoh32 htole32
66 #define nitems(x) __arraycount(x)
67 #define MBUF_LIST_INITIALIZER() { NULL, NULL, 0 }
68 struct mbuf_list {
69 struct mbuf *ml_head;
70 struct mbuf *ml_tail;
71 u_int ml_len;
72 };
73
74 static struct mbuf *
75 MCLGETI(struct rge_softc *sc __unused, int how,
76 struct ifnet *ifp __unused, u_int size)
77 {
78 struct mbuf *m;
79
80 MGETHDR(m, how, MT_DATA);
81 if (m == NULL)
82 return NULL;
83
84 MEXTMALLOC(m, size, how);
85 if ((m->m_flags & M_EXT) == 0) {
86 m_freem(m);
87 return NULL;
88 }
89 return m;
90 }
91
92 #ifdef NET_MPSAFE
93 #define RGE_MPSAFE 1
94 #define CALLOUT_FLAGS CALLOUT_MPSAFE
95 #else
96 #define CALLOUT_FLAGS 0
97 #endif
98 #endif
99
100 static int rge_match(device_t, cfdata_t, void *);
101 static void rge_attach(device_t, device_t, void *);
102 int rge_intr(void *);
103 int rge_encap(struct rge_softc *, struct mbuf *, int);
104 int rge_ioctl(struct ifnet *, u_long, void *);
105 void rge_start(struct ifnet *);
106 void rge_watchdog(struct ifnet *);
107 int rge_init(struct ifnet *);
108 void rge_stop(struct ifnet *);
109 int rge_ifmedia_upd(struct ifnet *);
110 void rge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
111 int rge_allocmem(struct rge_softc *);
112 int rge_newbuf(struct rge_softc *, int);
113 void rge_discard_rxbuf(struct rge_softc *, int);
114 int rge_rx_list_init(struct rge_softc *);
115 void rge_tx_list_init(struct rge_softc *);
116 int rge_rxeof(struct rge_softc *);
117 int rge_txeof(struct rge_softc *);
118 void rge_reset(struct rge_softc *);
119 void rge_iff(struct rge_softc *);
120 void rge_set_phy_power(struct rge_softc *, int);
121 void rge_phy_config(struct rge_softc *);
122 void rge_set_macaddr(struct rge_softc *, const uint8_t *);
123 void rge_get_macaddr(struct rge_softc *, uint8_t *);
124 void rge_hw_init(struct rge_softc *);
125 void rge_disable_phy_ocp_pwrsave(struct rge_softc *);
126 void rge_patch_phy_mcu(struct rge_softc *, int);
127 void rge_add_media_types(struct rge_softc *);
128 void rge_config_imtype(struct rge_softc *, int);
129 void rge_disable_sim_im(struct rge_softc *);
130 void rge_setup_sim_im(struct rge_softc *);
131 void rge_setup_intr(struct rge_softc *, int);
132 void rge_exit_oob(struct rge_softc *);
133 void rge_write_csi(struct rge_softc *, uint32_t, uint32_t);
134 uint32_t rge_read_csi(struct rge_softc *, uint32_t);
135 void rge_write_mac_ocp(struct rge_softc *, uint16_t, uint16_t);
136 uint16_t rge_read_mac_ocp(struct rge_softc *, uint16_t);
137 void rge_write_ephy(struct rge_softc *, uint16_t, uint16_t);
138 void rge_write_phy(struct rge_softc *, uint16_t, uint16_t, uint16_t);
139 void rge_write_phy_ocp(struct rge_softc *, uint16_t, uint16_t);
140 uint16_t rge_read_phy_ocp(struct rge_softc *, uint16_t);
141 int rge_get_link_status(struct rge_softc *);
142 void rge_txstart(struct work *, void *);
143 void rge_tick(void *);
144 void rge_link_state(struct rge_softc *);
145
146 static const struct {
147 uint16_t reg;
148 uint16_t val;
149 } rtl8125_def_bps[] = {
150 RTL8125_DEF_BPS
151 }, rtl8125_mac_cfg2_ephy[] = {
152 RTL8125_MAC_CFG2_EPHY
153 }, rtl8125_mac_cfg2_mcu[] = {
154 RTL8125_MAC_CFG2_MCU
155 }, rtl8125_mac_cfg3_ephy[] = {
156 RTL8125_MAC_CFG3_EPHY
157 }, rtl8125_mac_cfg3_mcu[] = {
158 RTL8125_MAC_CFG3_MCU
159 };
160
161 CFATTACH_DECL_NEW(rge, sizeof(struct rge_softc), rge_match, rge_attach,
162 NULL, NULL); /* Sevan - detach function? */
163
164 extern struct cfdriver rge_cd;
165
166 static const struct {
167 pci_vendor_id_t vendor;
168 pci_product_id_t product;
169 }rge_devices[] = {
170 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000 },
171 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8125 },
172 };
173
174 static int
175 rge_match(device_t parent, cfdata_t match, void *aux)
176 {
177 struct pci_attach_args *pa =aux;
178 int n;
179
180 for (n =0; n < __arraycount(rge_devices); n++) {
181 if (PCI_VENDOR(pa->pa_id) == rge_devices[n].vendor &&
182 PCI_PRODUCT(pa->pa_id) == rge_devices[n].product)
183 return 1;
184 }
185
186 return 0;
187 }
188
189 void
190 rge_attach(device_t parent, device_t self, void *aux)
191 {
192 struct rge_softc *sc = device_private(self);
193 struct pci_attach_args *pa = aux;
194 pci_chipset_tag_t pc = pa->pa_pc;
195 pci_intr_handle_t ih;
196 char intrbuf[PCI_INTRSTR_LEN];
197 const char *intrstr = NULL;
198 struct ifnet *ifp;
199 pcireg_t reg;
200 uint32_t hwrev;
201 uint8_t eaddr[ETHER_ADDR_LEN];
202 int offset;
203
204 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
205
206 sc->sc_dev = self;
207
208 /*
209 * Map control/status registers.
210 */
211 if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
212 PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->rge_btag, &sc->rge_bhandle,
213 NULL, &sc->rge_bsize)) {
214 if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
215 PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->rge_btag,
216 &sc->rge_bhandle, NULL, &sc->rge_bsize)) {
217 if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
218 0, &sc->rge_btag, &sc->rge_bhandle, NULL,
219 &sc->rge_bsize)) {
220 aprint_error(": can't map mem or i/o space\n");
221 return;
222 }
223 }
224 }
225
226 /*
227 * Allocate interrupt.
228 */
229 if (pci_intr_map(pa, &ih) == 0)
230 sc->rge_flags |= RGE_FLAG_MSI;
231 else if (pci_intr_map(pa, &ih) != 0) {
232 aprint_error(": couldn't map interrupt\n");
233 return;
234 }
235 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
236 sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, rge_intr,
237 sc, device_xname(sc->sc_dev));
238 if (sc->sc_ih == NULL) {
239 aprint_error_dev(sc->sc_dev, ": couldn't establish interrupt");
240 if (intrstr != NULL)
241 aprint_error(" at %s\n", intrstr);
242 aprint_error("\n");
243 return;
244 }
245 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
246
247 if (pci_dma64_available(pa))
248 sc->sc_dmat = pa->pa_dmat64;
249 else
250 sc->sc_dmat = pa->pa_dmat;
251
252 sc->sc_pc = pa->pa_pc;
253 sc->sc_tag = pa->pa_tag;
254
255 /* Determine hardware revision */
256 hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
257 switch (hwrev) {
258 case 0x60800000:
259 sc->rge_type = MAC_CFG2;
260 break;
261 case 0x60900000:
262 sc->rge_type = MAC_CFG3;
263 break;
264 default:
265 aprint_error(": unknown version 0x%08x\n", hwrev);
266 return;
267 }
268
269 rge_config_imtype(sc, RGE_IMTYPE_SIM);
270
271 /*
272 * PCI Express check.
273 */
274 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
275 &offset, NULL)) {
276 /* Disable PCIe ASPM. */
277 reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
278 offset + PCIE_LCSR);
279 reg &= ~(PCIE_LCSR_ASPM_L0S | PCIE_LCSR_ASPM_L1 );
280 pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCIE_LCSR,
281 reg);
282 }
283
284 rge_exit_oob(sc);
285 rge_hw_init(sc);
286
287 rge_get_macaddr(sc, eaddr);
288 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
289 ether_sprintf(eaddr));
290
291 memcpy(sc->sc_enaddr, eaddr, ETHER_ADDR_LEN);
292
293 rge_set_phy_power(sc, 1);
294 rge_phy_config(sc);
295
296 if (rge_allocmem(sc))
297 return;
298
299 ifp = &sc->sc_ec.ec_if;
300 ifp->if_softc = sc;
301 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
302 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
303 #ifdef RGE_MPSAFE
304 ifp->if_xflags = IFEF_MPSAFE;
305 #endif
306 ifp->if_ioctl = rge_ioctl;
307 ifp->if_start = rge_start;
308 ifp->if_watchdog = rge_watchdog;
309 ifq_set_maxlen(&ifp->if_snd, RGE_TX_LIST_CNT);
310 ifp->if_mtu = RGE_JUMBO_MTU;
311
312 ifp->if_capabilities = ETHERCAP_VLAN_MTU | IFCAP_CSUM_IPv4_Rx |
313 IFCAP_CSUM_IPv4_Tx |IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx|
314 IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx;
315
316 #if NVLAN > 0
317 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
318 #endif
319
320 callout_init(&sc->sc_timeout, CALLOUT_FLAGS);
321 callout_setfunc(&sc->sc_timeout, rge_tick, sc);
322 rge_txstart(&sc->sc_task, sc);
323
324 /* Initialize ifmedia structures. */
325 ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
326 rge_ifmedia_sts);
327 rge_add_media_types(sc);
328 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
329 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
330 sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
331
332 if_attach(ifp);
333 ether_ifattach(ifp, eaddr);
334 }
335
336 int
337 rge_intr(void *arg)
338 {
339 struct rge_softc *sc = arg;
340 struct ifnet *ifp = &sc->sc_ec.ec_if;
341 uint32_t status;
342 int claimed = 0, rx, tx;
343
344 if (!(ifp->if_flags & IFF_RUNNING))
345 return (0);
346
347 /* Disable interrupts. */
348 RGE_WRITE_4(sc, RGE_IMR, 0);
349
350 status = RGE_READ_4(sc, RGE_ISR);
351 if (!(sc->rge_flags & RGE_FLAG_MSI)) {
352 if ((status & RGE_INTRS) == 0 || status == 0xffffffff)
353 return (0);
354 }
355 if (status)
356 RGE_WRITE_4(sc, RGE_ISR, status);
357
358 if (status & RGE_ISR_PCS_TIMEOUT)
359 claimed = 1;
360
361 rx = tx = 0;
362 if (status & RGE_INTRS) {
363 if (status &
364 (sc->rge_rx_ack | RGE_ISR_RX_ERR | RGE_ISR_RX_FIFO_OFLOW)) {
365 rx |= rge_rxeof(sc);
366 claimed = 1;
367 }
368
369 if (status & (sc->rge_tx_ack | RGE_ISR_TX_ERR)) {
370 tx |= rge_txeof(sc);
371 claimed = 1;
372 }
373
374 if (status & RGE_ISR_SYSTEM_ERR) {
375 KERNEL_LOCK(1, NULL);
376 rge_init(ifp);
377 KERNEL_UNLOCK_ONE(NULL);
378 claimed = 1;
379 }
380 }
381
382 if (sc->rge_timerintr) {
383 if ((tx | rx) == 0) {
384 /*
385 * Nothing needs to be processed, fallback
386 * to use TX/RX interrupts.
387 */
388 rge_setup_intr(sc, RGE_IMTYPE_NONE);
389
390 /*
391 * Recollect, mainly to avoid the possible
392 * race introduced by changing interrupt
393 * masks.
394 */
395 rge_rxeof(sc);
396 rge_txeof(sc);
397 } else
398 RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
399 } else if (tx | rx) {
400 /*
401 * Assume that using simulated interrupt moderation
402 * (hardware timer based) could reduce the interrupt
403 * rate.
404 */
405 rge_setup_intr(sc, RGE_IMTYPE_SIM);
406 }
407
408 RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
409
410 return (claimed);
411 }
412
413 int
414 rge_encap(struct rge_softc *sc, struct mbuf *m, int idx)
415 {
416 struct rge_tx_desc *d = NULL;
417 struct rge_txq *txq;
418 bus_dmamap_t txmap;
419 uint32_t cmdsts, cflags = 0;
420 int cur, error, i, last, nsegs;
421
422 /*
423 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
424 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
425 * take affect.
426 */
427 if ((m->m_pkthdr.csum_flags &
428 (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
429 cflags |= RGE_TDEXTSTS_IPCSUM;
430 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
431 cflags |= RGE_TDEXTSTS_TCPCSUM;
432 if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
433 cflags |= RGE_TDEXTSTS_UDPCSUM;
434 }
435
436 txq = &sc->rge_ldata.rge_txq[idx];
437 txmap = txq->txq_dmamap;
438
439 error = bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m, BUS_DMA_NOWAIT);
440 switch (error) {
441 case 0:
442 break;
443 case EFBIG: /* mbuf chain is too fragmented */
444 if (m_defrag(m, M_DONTWAIT) == 0 &&
445 bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m,
446 BUS_DMA_NOWAIT) == 0)
447 break;
448
449 /* FALLTHROUGH */
450 default:
451 return (0);
452 }
453
454 bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
455 BUS_DMASYNC_PREWRITE);
456
457 nsegs = txmap->dm_nsegs;
458
459 /* Set up hardware VLAN tagging. */
460 #if NVLAN > 0
461 if (m->m_flags & M_VLANTAG)
462 cflags |= swap16(m->m_pkthdr.ether_vtag | RGE_TDEXTSTS_VTAG);
463 #endif
464
465 cur = idx;
466 cmdsts = RGE_TDCMDSTS_SOF;
467
468 for (i = 0; i < txmap->dm_nsegs; i++) {
469 d = &sc->rge_ldata.rge_tx_list[cur];
470
471 d->rge_extsts = htole32(cflags);
472 d->rge_addrlo = htole32(RGE_ADDR_LO(txmap->dm_segs[i].ds_addr));
473 d->rge_addrhi = htole32(RGE_ADDR_HI(txmap->dm_segs[i].ds_addr));
474
475 cmdsts |= txmap->dm_segs[i].ds_len;
476
477 if (cur == RGE_TX_LIST_CNT - 1)
478 cmdsts |= RGE_TDCMDSTS_EOR;
479
480 d->rge_cmdsts = htole32(cmdsts);
481
482 last = cur;
483 cmdsts = RGE_TDCMDSTS_OWN;
484 cur = RGE_NEXT_TX_DESC(cur);
485 }
486
487 /* Set EOF on the last descriptor. */
488 d->rge_cmdsts |= htole32(RGE_TDCMDSTS_EOF);
489
490 /* Transfer ownership of packet to the chip. */
491 d = &sc->rge_ldata.rge_tx_list[idx];
492
493 d->rge_cmdsts |= htole32(RGE_TDCMDSTS_OWN);
494
495 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
496 cur * sizeof(struct rge_tx_desc), sizeof(struct rge_tx_desc),
497 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
498
499 /* Update info of TX queue and descriptors. */
500 txq->txq_mbuf = m;
501 txq->txq_descidx = last;
502
503 return (nsegs);
504 }
505
506 int
507 rge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
508 {
509 struct rge_softc *sc = ifp->if_softc;
510 struct ifreq *ifr = (struct ifreq *)data;
511 int s, error = 0;
512
513 s = splnet();
514
515 switch (cmd) {
516 case SIOCSIFADDR:
517 ifp->if_flags |= IFF_UP;
518 if (!(ifp->if_flags & IFF_RUNNING))
519 rge_init(ifp);
520 break;
521 case SIOCSIFFLAGS:
522 if (ifp->if_flags & IFF_UP) {
523 if (ifp->if_flags & IFF_RUNNING)
524 error = ENETRESET;
525 else
526 rge_init(ifp);
527 } else {
528 if (ifp->if_flags & IFF_RUNNING)
529 rge_stop(ifp);
530 }
531 break;
532 case SIOCGIFMEDIA:
533 case SIOCSIFMEDIA:
534 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
535 break;
536 case SIOCSIFMTU:
537 if (ifr->ifr_mtu > ifp->if_mtu) {
538 error = EINVAL;
539 break;
540 }
541 ifp->if_mtu = ifr->ifr_mtu;
542 break;
543 default:
544 error = ether_ioctl(ifp, cmd, data);
545 }
546
547 if (error == ENETRESET) {
548 if (ifp->if_flags & IFF_RUNNING)
549 rge_iff(sc);
550 error = 0;
551 }
552
553 splx(s);
554 return (error);
555 }
556
557 void
558 rge_start(struct ifnet *ifp)
559 {
560 struct rge_softc *sc = ifp->if_softc;
561 struct mbuf *m;
562 int free, idx, used;
563 int queued = 0;
564
565 #define LINK_STATE_IS_UP(_s) \
566 ((_s) >= LINK_STATE_UP || (_s) == LINK_STATE_UNKNOWN)
567
568 if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
569 ifq_purge(ifq);
570 return;
571 }
572
573 /* Calculate free space. */
574 idx = sc->rge_ldata.rge_txq_prodidx;
575 free = sc->rge_ldata.rge_txq_considx;
576 if (free <= idx)
577 free += RGE_TX_LIST_CNT;
578 free -= idx;
579
580 for (;;) {
581 if (RGE_TX_NSEGS >= free + 2) {
582 SET(ifp->if_flags, IFF_OACTIVE);
583 break;
584 }
585
586 IFQ_DEQUEUE(&ifp->if_snd, m);
587 if (m == NULL)
588 break;
589
590 used = rge_encap(sc, m, idx);
591 if (used == 0) {
592 m_freem(m);
593 continue;
594 }
595
596 KASSERT(used <= free);
597 free -= used;
598
599 #if NBPFILTER > 0
600 if (ifp->if_bpf)
601 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
602 #endif
603
604 idx += used;
605 if (idx >= RGE_TX_LIST_CNT)
606 idx -= RGE_TX_LIST_CNT;
607
608 queued++;
609 }
610
611 if (queued == 0)
612 return;
613
614 /* Set a timeout in case the chip goes out to lunch. */
615 ifp->if_timer = 5;
616
617 sc->rge_ldata.rge_txq_prodidx = idx;
618 ifq_serialize(ifq, &sc->sc_task);
619 }
620
621 void
622 rge_watchdog(struct ifnet *ifp)
623 {
624 struct rge_softc *sc = ifp->if_softc;
625
626 aprint_error_dev(sc->sc_dev, "watchdog timeout\n");
627 if_statinc(ifp, if_oerrors);
628
629 rge_init(ifp);
630 }
631
632 int
633 rge_init(struct ifnet *ifp)
634 {
635 struct rge_softc *sc = ifp->if_softc;
636 uint32_t val;
637 uint16_t max_frame_size;
638 int i;
639
640 rge_stop(ifp);
641
642 /* Set MAC address. */
643 rge_set_macaddr(sc, sc->sc_enaddr);
644
645 /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
646 if (ifp->if_mtu < ETHERMTU)
647 max_frame_size = ETHERMTU;
648 else
649 max_frame_size = ifp->if_mtu;
650
651 max_frame_size += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
652 ETHER_CRC_LEN + 1;
653
654 if (max_frame_size > RGE_JUMBO_FRAMELEN)
655 max_frame_size -= 1;
656
657 RGE_WRITE_2(sc, RGE_RXMAXSIZE, max_frame_size);
658
659 /* Initialize RX descriptors list. */
660 if (rge_rx_list_init(sc) == ENOBUFS) {
661 aprint_error_dev(sc->sc_dev,
662 "init failed: no memory for RX buffers\n");
663 rge_stop(ifp);
664 return (ENOBUFS);
665 }
666
667 /* Initialize TX descriptors. */
668 rge_tx_list_init(sc);
669
670 /* Load the addresses of the RX and TX lists into the chip. */
671 RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
672 RGE_ADDR_LO(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
673 RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
674 RGE_ADDR_HI(sc->rge_ldata.rge_rx_list_map->dm_segs[0].ds_addr));
675 RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
676 RGE_ADDR_LO(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
677 RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
678 RGE_ADDR_HI(sc->rge_ldata.rge_tx_list_map->dm_segs[0].ds_addr));
679
680 RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
681
682 RGE_CLRBIT_1(sc, 0xf1, 0x80);
683 RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
684 RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
685 RGE_CLRBIT_1(sc, RGE_CFG3, RGE_CFG3_RDY_TO_L23);
686
687 /* Clear interrupt moderation timer. */
688 for (i = 0; i < 64; i++)
689 RGE_WRITE_4(sc, RGE_IM(i), 0);
690
691 /* Set the initial RX and TX configurations. */
692 RGE_WRITE_4(sc, RGE_RXCFG, RGE_RXCFG_CONFIG);
693 RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
694
695 val = rge_read_csi(sc, 0x70c) & ~0xff000000;
696 rge_write_csi(sc, 0x70c, val | 0x27000000);
697
698 /* Enable hardware optimization function. */
699 val = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x78) & ~0x00007000;
700 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x78, val | 0x00005000);
701
702 RGE_WRITE_2(sc, 0x0382, 0x221b);
703 RGE_WRITE_1(sc, 0x4500, 0);
704 RGE_WRITE_2(sc, 0x4800, 0);
705 RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
706
707 rge_write_mac_ocp(sc, 0xc140, 0xffff);
708 rge_write_mac_ocp(sc, 0xc142, 0xffff);
709
710 val = rge_read_mac_ocp(sc, 0xd3e2) & ~0x0fff;
711 rge_write_mac_ocp(sc, 0xd3e2, val | 0x03a9);
712
713 RGE_MAC_CLRBIT(sc, 0xd3e4, 0x00ff);
714 RGE_MAC_SETBIT(sc, 0xe860, 0x0080);
715 RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
716
717 val = rge_read_mac_ocp(sc, 0xe614) & ~0x0700;
718 rge_write_mac_ocp(sc, 0xe614, val | 0x0400);
719
720 RGE_MAC_CLRBIT(sc, 0xe63e, 0x0c00);
721
722 val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
723 rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
724
725 RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
726
727 val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x007f;
728 rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
729
730 val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
731 rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
732
733 val = rge_read_mac_ocp(sc, 0xe056) & ~0x00f0;
734 rge_write_mac_ocp(sc, 0xe056, val | 0x0030);
735
736 RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
737
738 RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
739
740 val = rge_read_mac_ocp(sc, 0xe0c0) & ~0x4f0f;
741 rge_write_mac_ocp(sc, 0xe0c0, val | 0x4403);
742
743 RGE_MAC_SETBIT(sc, 0xe052, 0x0068);
744 RGE_MAC_CLRBIT(sc, 0xe052, 0x0080);
745
746 val = rge_read_mac_ocp(sc, 0xc0ac) & ~0x0080;
747 rge_write_mac_ocp(sc, 0xc0ac, val | 0x1f00);
748
749 val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
750 rge_write_mac_ocp(sc, 0xd430, val | 0x047f);
751
752 RGE_MAC_SETBIT(sc, 0xe84c, 0x00c0);
753
754 /* Disable EEE plus. */
755 RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
756
757 RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
758
759 RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
760 DELAY(1);
761 RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
762
763 RGE_CLRBIT_4(sc, 0x1880, 0x0030);
764
765 rge_write_mac_ocp(sc, 0xe098, 0xc302);
766
767 if (ifp->if_capabilities & ETHERCAP_VLAN_HWTAGGING)
768 RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
769
770 RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
771
772 for (i = 0; i < 10; i++) {
773 if (!(rge_read_mac_ocp(sc, 0xe00e) & 0x2000))
774 break;
775 DELAY(1000);
776 }
777
778 /* Disable RXDV gate. */
779 RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
780 DELAY(2000);
781
782 rge_ifmedia_upd(ifp);
783
784 /* Enable transmit and receive. */
785 RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
786
787 /* Program promiscuous mode and multicast filters. */
788 rge_iff(sc);
789
790 RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
791 RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
792
793 RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
794
795 /* Enable interrupts. */
796 rge_setup_intr(sc, RGE_IMTYPE_SIM);
797
798 ifp->if_flags |= IFF_RUNNING;
799 CLR(ifp->if_flags, IFF_OACTIVE);
800
801 callout_schedule(&sc->sc_timeout, 1);
802
803 return (0);
804 }
805
806 /*
807 * Stop the adapter and free any mbufs allocated to the RX and TX lists.
808 */
809 void
810 rge_stop(struct ifnet *ifp)
811 {
812 struct rge_softc *sc = ifp->if_softc;
813 int i;
814
815 timeout_del(&sc->sc_timeout);
816
817 ifp->if_timer = 0;
818 ifp->if_flags &= ~IFF_RUNNING;
819 sc->rge_timerintr = 0;
820
821 RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
822 RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
823 RGE_RXCFG_ERRPKT);
824
825 RGE_WRITE_4(sc, RGE_IMR, 0);
826 RGE_WRITE_4(sc, RGE_ISR, 0xffffffff);
827
828 rge_reset(sc);
829
830 intr_barrier(sc->sc_ih);
831 ifq_barrier(&ifp->if_snd);
832 /* ifq_clr_oactive(&ifp->if_snd); Sevan - OpenBSD queue API */
833
834 if (sc->rge_head != NULL) {
835 m_freem(sc->rge_head);
836 sc->rge_head = sc->rge_tail = NULL;
837 }
838
839 /* Free the TX list buffers. */
840 for (i = 0; i < RGE_TX_LIST_CNT; i++) {
841 if (sc->rge_ldata.rge_txq[i].txq_mbuf != NULL) {
842 bus_dmamap_unload(sc->sc_dmat,
843 sc->rge_ldata.rge_txq[i].txq_dmamap);
844 m_freem(sc->rge_ldata.rge_txq[i].txq_mbuf);
845 sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
846 }
847 }
848
849 /* Free the RX list buffers. */
850 for (i = 0; i < RGE_RX_LIST_CNT; i++) {
851 if (sc->rge_ldata.rge_rxq[i].rxq_mbuf != NULL) {
852 bus_dmamap_unload(sc->sc_dmat,
853 sc->rge_ldata.rge_rxq[i].rxq_dmamap);
854 m_freem(sc->rge_ldata.rge_rxq[i].rxq_mbuf);
855 sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
856 }
857 }
858 }
859
860 /*
861 * Set media options.
862 */
863 int
864 rge_ifmedia_upd(struct ifnet *ifp)
865 {
866 struct rge_softc *sc = ifp->if_softc;
867 struct ifmedia *ifm = &sc->sc_media;
868 int anar, gig, val;
869
870 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
871 return (EINVAL);
872
873 /* Disable Gigabit Lite. */
874 RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
875 RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
876
877 val = rge_read_phy_ocp(sc, 0xa5d4);
878 val &= ~RGE_ADV_2500TFDX;
879
880 anar = gig = 0;
881 switch (IFM_SUBTYPE(ifm->ifm_media)) {
882 case IFM_AUTO:
883 anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
884 gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
885 val |= RGE_ADV_2500TFDX;
886 break;
887 case IFM_2500_T:
888 anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
889 gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
890 val |= RGE_ADV_2500TFDX;
891 ifp->if_baudrate = IF_Mbps(2500);
892 break;
893 case IFM_1000_T:
894 anar |= ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
895 gig |= GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
896 ifp->if_baudrate = IF_Gbps(1);
897 break;
898 case IFM_100_TX:
899 anar |= ANAR_TX | ANAR_TX_FD;
900 ifp->if_baudrate = IF_Mbps(100);
901 break;
902 case IFM_10_T:
903 anar |= ANAR_10 | ANAR_10_FD;
904 ifp->if_baudrate = IF_Mbps(10);
905 break;
906 default:
907 aprint_error_dev(sc->sc_dev,
908 "unsupported media type\n");
909 return (EINVAL);
910 }
911
912 rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
913 rge_write_phy(sc, 0, MII_100T2CR, gig);
914 rge_write_phy_ocp(sc, 0xa5d4, val);
915 rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG);
916
917 return (0);
918 }
919
920 /*
921 * Report current media status.
922 */
923 void
924 rge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
925 {
926 struct rge_softc *sc = ifp->if_softc;
927 uint16_t status = 0;
928
929 ifmr->ifm_status = IFM_AVALID;
930 ifmr->ifm_active = IFM_ETHER;
931
932 if (rge_get_link_status(sc)) {
933 ifmr->ifm_status |= IFM_ACTIVE;
934
935 status = RGE_READ_2(sc, RGE_PHYSTAT);
936 if ((status & RGE_PHYSTAT_FDX) ||
937 (status & RGE_PHYSTAT_2500MBPS))
938 ifmr->ifm_active |= IFM_FDX;
939 else
940 ifmr->ifm_active |= IFM_HDX;
941
942 if (status & RGE_PHYSTAT_10MBPS)
943 ifmr->ifm_active |= IFM_10_T;
944 else if (status & RGE_PHYSTAT_100MBPS)
945 ifmr->ifm_active |= IFM_100_TX;
946 else if (status & RGE_PHYSTAT_1000MBPS)
947 ifmr->ifm_active |= IFM_1000_T;
948 else if (status & RGE_PHYSTAT_2500MBPS)
949 ifmr->ifm_active |= IFM_2500_T;
950 }
951 }
952
953 /*
954 * Allocate memory for RX/TX rings.
955 */
956 int
957 rge_allocmem(struct rge_softc *sc)
958 {
959 int error, i;
960
961 /* Allocate DMA'able memory for the TX ring. */
962 error = bus_dmamap_create(sc->sc_dmat, RGE_TX_LIST_SZ, 1,
963 RGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT, &sc->rge_ldata.rge_tx_list_map);
964 if (error) {
965 aprint_error_dev(sc->sc_dev, "can't create TX list map\n");
966 return (error);
967 }
968 error = bus_dmamem_alloc(sc->sc_dmat, RGE_TX_LIST_SZ, RGE_ALIGN, 0,
969 &sc->rge_ldata.rge_tx_listseg, 1, &sc->rge_ldata.rge_tx_listnseg,
970 BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_ZERO */
971 if (error) {
972 aprint_error_dev(sc->sc_dev, "can't alloc TX list\n");
973 return (error);
974 }
975
976 /* Load the map for the TX ring. */
977 error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
978 sc->rge_ldata.rge_tx_listnseg, RGE_TX_LIST_SZ,
979 (void **) &sc->rge_ldata.rge_tx_list,
980 BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_COHERENT */
981 if (error) {
982 aprint_error_dev(sc->sc_dev, "can't map TX dma buffers\n");
983 bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
984 sc->rge_ldata.rge_tx_listnseg);
985 return (error);
986 }
987 error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
988 sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
989 if (error) {
990 aprint_error_dev(sc->sc_dev, "can't load TX dma map\n");
991 bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map);
992 bus_dmamem_unmap(sc->sc_dmat,
993 sc->rge_ldata.rge_tx_list, RGE_TX_LIST_SZ);
994 bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_tx_listseg,
995 sc->rge_ldata.rge_tx_listnseg);
996 return (error);
997 }
998
999 /* Create DMA maps for TX buffers. */
1000 for (i = 0; i < RGE_TX_LIST_CNT; i++) {
1001 error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN,
1002 RGE_TX_NSEGS, RGE_JUMBO_FRAMELEN, 0, 0,
1003 &sc->rge_ldata.rge_txq[i].txq_dmamap);
1004 if (error) {
1005 aprint_error_dev(sc->sc_dev, "can't create DMA map for TX\n");
1006 return (error);
1007 }
1008 }
1009
1010 /* Allocate DMA'able memory for the RX ring. */
1011 error = bus_dmamap_create(sc->sc_dmat, RGE_RX_LIST_SZ, 1,
1012 RGE_RX_LIST_SZ, 0, 0, &sc->rge_ldata.rge_rx_list_map);
1013 if (error) {
1014 aprint_error_dev(sc->sc_dev, "can't create RX list map\n");
1015 return (error);
1016 }
1017 error = bus_dmamem_alloc(sc->sc_dmat, RGE_RX_LIST_SZ, RGE_ALIGN, 0,
1018 &sc->rge_ldata.rge_rx_listseg, 1, &sc->rge_ldata.rge_rx_listnseg,
1019 BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_ZERO */
1020 if (error) {
1021 aprint_error_dev(sc->sc_dev, "can't alloc RX list\n");
1022 return (error);
1023 }
1024
1025 /* Load the map for the RX ring. */
1026 error = bus_dmamem_map(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1027 sc->rge_ldata.rge_rx_listnseg, RGE_RX_LIST_SZ,
1028 (void **) &sc->rge_ldata.rge_rx_list,
1029 BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_COHERENT */
1030 if (error) {
1031 aprint_error_dev(sc->sc_dev, "can't map RX dma buffers\n");
1032 bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1033 sc->rge_ldata.rge_rx_listnseg);
1034 return (error);
1035 }
1036 error = bus_dmamap_load(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1037 sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
1038 if (error) {
1039 aprint_error_dev(sc->sc_dev, "can't load RX dma map\n");
1040 bus_dmamap_destroy(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map);
1041 bus_dmamem_unmap(sc->sc_dmat,
1042 sc->rge_ldata.rge_rx_list, RGE_RX_LIST_SZ);
1043 bus_dmamem_free(sc->sc_dmat, &sc->rge_ldata.rge_rx_listseg,
1044 sc->rge_ldata.rge_rx_listnseg);
1045 return (error);
1046 }
1047
1048 /* Create DMA maps for RX buffers. */
1049 for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1050 error = bus_dmamap_create(sc->sc_dmat, RGE_JUMBO_FRAMELEN, 1,
1051 RGE_JUMBO_FRAMELEN, 0, 0,
1052 &sc->rge_ldata.rge_rxq[i].rxq_dmamap);
1053 if (error) {
1054 aprint_error_dev(sc->sc_dev, "can't create DMA map for RX\n");
1055 return (error);
1056 }
1057 }
1058
1059 return (error);
1060 }
1061
1062 /*
1063 * Initialize the RX descriptor and attach an mbuf cluster.
1064 */
1065 int
1066 rge_newbuf(struct rge_softc *sc, int idx)
1067 {
1068 struct mbuf *m;
1069 struct rge_rx_desc *r;
1070 struct rge_rxq *rxq;
1071 bus_dmamap_t rxmap;
1072
1073 m = MCLGETI(NULL, M_DONTWAIT, NULL, RGE_JUMBO_FRAMELEN);
1074 if (m == NULL)
1075 return (ENOBUFS);
1076
1077 m->m_len = m->m_pkthdr.len = RGE_JUMBO_FRAMELEN;
1078
1079 rxq = &sc->rge_ldata.rge_rxq[idx];
1080 rxmap = rxq->rxq_dmamap;
1081
1082 if (bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, BUS_DMA_NOWAIT))
1083 goto out;
1084
1085 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1086 BUS_DMASYNC_PREREAD);
1087
1088 /* Map the segments into RX descriptors. */
1089 r = &sc->rge_ldata.rge_rx_list[idx];
1090
1091 if (RGE_OWN(r)) {
1092 aprint_error_dev(sc->sc_dev, "tried to map busy RX descriptor\n");
1093 goto out;
1094 }
1095
1096 rxq->rxq_mbuf = m;
1097
1098 r->rge_extsts = 0;
1099 r->rge_addrlo = htole32(RGE_ADDR_LO(rxmap->dm_segs[0].ds_addr));
1100 r->rge_addrhi = htole32(RGE_ADDR_HI(rxmap->dm_segs[0].ds_addr));
1101
1102 r->rge_cmdsts = htole32(rxmap->dm_segs[0].ds_len);
1103 if (idx == RGE_RX_LIST_CNT - 1)
1104 r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1105
1106 r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1107
1108 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1109 idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1110 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1111
1112 return (0);
1113 out:
1114 if (m != NULL)
1115 m_freem(m);
1116 return (ENOMEM);
1117 }
1118
1119 void
1120 rge_discard_rxbuf(struct rge_softc *sc, int idx)
1121 {
1122 struct rge_rx_desc *r;
1123
1124 r = &sc->rge_ldata.rge_rx_list[idx];
1125
1126 r->rge_cmdsts = htole32(RGE_JUMBO_FRAMELEN);
1127 r->rge_extsts = 0;
1128 if (idx == RGE_RX_LIST_CNT - 1)
1129 r->rge_cmdsts |= htole32(RGE_RDCMDSTS_EOR);
1130 r->rge_cmdsts |= htole32(RGE_RDCMDSTS_OWN);
1131
1132 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1133 idx * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1134 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1135 }
1136
1137 int
1138 rge_rx_list_init(struct rge_softc *sc)
1139 {
1140 int i;
1141
1142 memset(sc->rge_ldata.rge_rx_list, 0, RGE_RX_LIST_SZ);
1143
1144 for (i = 0; i < RGE_RX_LIST_CNT; i++) {
1145 sc->rge_ldata.rge_rxq[i].rxq_mbuf = NULL;
1146 if (rge_newbuf(sc, i) == ENOBUFS)
1147 return (ENOBUFS);
1148 }
1149
1150 sc->rge_ldata.rge_rxq_prodidx = 0;
1151 sc->rge_head = sc->rge_tail = NULL;
1152
1153 return (0);
1154 }
1155
1156 void
1157 rge_tx_list_init(struct rge_softc *sc)
1158 {
1159 int i;
1160
1161 memset(sc->rge_ldata.rge_tx_list, 0, RGE_TX_LIST_SZ);
1162
1163 for (i = 0; i < RGE_TX_LIST_CNT; i++)
1164 sc->rge_ldata.rge_txq[i].txq_mbuf = NULL;
1165
1166 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map, 0,
1167 sc->rge_ldata.rge_tx_list_map->dm_mapsize,
1168 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1169
1170 sc->rge_ldata.rge_txq_prodidx = sc->rge_ldata.rge_txq_considx = 0;
1171 }
1172
1173 int
1174 rge_rxeof(struct rge_softc *sc)
1175 {
1176 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1177 struct mbuf *m;
1178 struct ifnet *ifp = &sc->sc_ec.ec_if;
1179 struct rge_rx_desc *cur_rx;
1180 struct rge_rxq *rxq;
1181 uint32_t rxstat, extsts;
1182 int i, total_len, rx = 0;
1183
1184 for (i = sc->rge_ldata.rge_rxq_prodidx; ; i = RGE_NEXT_RX_DESC(i)) {
1185 /* Invalidate the descriptor memory. */
1186 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_rx_list_map,
1187 i * sizeof(struct rge_rx_desc), sizeof(struct rge_rx_desc),
1188 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1189
1190 cur_rx = &sc->rge_ldata.rge_rx_list[i];
1191
1192 if (RGE_OWN(cur_rx))
1193 break;
1194
1195 rxstat = letoh32(cur_rx->rge_cmdsts);
1196 extsts = letoh32(cur_rx->rge_extsts);
1197
1198 total_len = RGE_RXBYTES(cur_rx);
1199 rxq = &sc->rge_ldata.rge_rxq[i];
1200 m = rxq->rxq_mbuf;
1201 rx = 1;
1202
1203 /* Invalidate the RX mbuf and unload its map. */
1204 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_dmamap, 0,
1205 rxq->rxq_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1206 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_dmamap);
1207
1208 if ((rxstat & (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) !=
1209 (RGE_RDCMDSTS_SOF | RGE_RDCMDSTS_EOF)) {
1210 rge_discard_rxbuf(sc, i);
1211 continue;
1212 }
1213
1214 if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
1215 if_statinc(ifp, if_ierrors);
1216 /*
1217 * If this is part of a multi-fragment packet,
1218 * discard all the pieces.
1219 */
1220 if (sc->rge_head != NULL) {
1221 m_freem(sc->rge_head);
1222 sc->rge_head = sc->rge_tail = NULL;
1223 }
1224 rge_discard_rxbuf(sc, i);
1225 continue;
1226 }
1227
1228 /*
1229 * If allocating a replacement mbuf fails,
1230 * reload the current one.
1231 */
1232
1233 if (rge_newbuf(sc, i) == ENOBUFS) {
1234 if (sc->rge_head != NULL) {
1235 m_freem(sc->rge_head);
1236 sc->rge_head = sc->rge_tail = NULL;
1237 }
1238 rge_discard_rxbuf(sc, i);
1239 continue;
1240 }
1241
1242 if (sc->rge_head != NULL) {
1243 m->m_len = total_len;
1244 /*
1245 * Special case: if there's 4 bytes or less
1246 * in this buffer, the mbuf can be discarded:
1247 * the last 4 bytes is the CRC, which we don't
1248 * care about anyway.
1249 */
1250 if (m->m_len <= ETHER_CRC_LEN) {
1251 sc->rge_tail->m_len -=
1252 (ETHER_CRC_LEN - m->m_len);
1253 m_freem(m);
1254 } else {
1255 m->m_len -= ETHER_CRC_LEN;
1256 m->m_flags &= ~M_PKTHDR;
1257 sc->rge_tail->m_next = m;
1258 }
1259 m = sc->rge_head;
1260 sc->rge_head = sc->rge_tail = NULL;
1261 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1262 } else
1263 m->m_pkthdr.len = m->m_len =
1264 (total_len - ETHER_CRC_LEN);
1265
1266 /* Check IP header checksum. */
1267 if (!(rxstat & RGE_RDCMDSTS_IPCSUMERR) &&
1268 (extsts & RGE_RDEXTSTS_IPV4))
1269 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1270
1271 /* Check TCP/UDP checksum. */
1272 if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
1273 (((rxstat & RGE_RDCMDSTS_TCPPKT) &&
1274 !(rxstat & RGE_RDCMDSTS_TCPCSUMERR)) ||
1275 ((rxstat & RGE_RDCMDSTS_UDPPKT) &&
1276 !(rxstat & RGE_RDCMDSTS_UDPCSUMERR))))
1277 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
1278 M_UDP_CSUM_IN_OK;
1279
1280 #if NVLAN > 0
1281 if (extsts & RGE_RDEXTSTS_VTAG) {
1282 m->m_pkthdr.ether_vtag =
1283 ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
1284 m->m_flags |= M_VLANTAG;
1285 }
1286 #endif
1287
1288 ml_enqueue(&ml, m);
1289 }
1290
1291 sc->rge_ldata.rge_rxq_prodidx = i;
1292
1293 if_input(ifp, &ml);
1294
1295 return (rx);
1296 }
1297
1298 int
1299 rge_txeof(struct rge_softc *sc)
1300 {
1301 struct ifnet *ifp = &sc->sc_ec.ec_if;
1302 struct rge_txq *txq;
1303 uint32_t txstat;
1304 int cons, idx, prod;
1305 int free = 0;
1306
1307 prod = sc->rge_ldata.rge_txq_prodidx;
1308 cons = sc->rge_ldata.rge_txq_considx;
1309
1310 while (prod != cons) {
1311 txq = &sc->rge_ldata.rge_txq[cons];
1312 idx = txq->txq_descidx;
1313
1314 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1315 idx * sizeof(struct rge_tx_desc),
1316 sizeof(struct rge_tx_desc),
1317 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1318
1319 txstat = letoh32(sc->rge_ldata.rge_tx_list[idx].rge_cmdsts);
1320
1321 if (txstat & RGE_TDCMDSTS_OWN) {
1322 free = 2;
1323 break;
1324 }
1325
1326 bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0,
1327 txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1328 bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1329 m_freem(txq->txq_mbuf);
1330 txq->txq_mbuf = NULL;
1331
1332 if (txstat & (RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL))
1333 if_statinc(ifp, if_collisions);
1334 if (txstat & RGE_TDCMDSTS_TXERR)
1335 if_statinc(ifp, if_oerrors);
1336
1337 bus_dmamap_sync(sc->sc_dmat, sc->rge_ldata.rge_tx_list_map,
1338 idx * sizeof(struct rge_tx_desc),
1339 sizeof(struct rge_tx_desc),
1340 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1341
1342 cons = RGE_NEXT_TX_DESC(idx);
1343 free = 1;
1344 }
1345
1346 if (free == 0)
1347 return (0);
1348
1349 sc->rge_ldata.rge_txq_considx = cons;
1350
1351 if (ifq_is_oactive(&ifp->if_snd))
1352 ifq_restart(&ifp->if_snd);
1353 else if (free == 2)
1354 ifq_serialize(&ifp->if_snd, &sc->sc_task);
1355 else
1356 ifp->if_timer = 0;
1357
1358 return (1);
1359 }
1360
1361 void
1362 rge_reset(struct rge_softc *sc)
1363 {
1364 int i;
1365
1366 /* Enable RXDV gate. */
1367 RGE_SETBIT_1(sc, RGE_PPSW, 0x08);
1368 DELAY(2000);
1369
1370 for (i = 0; i < 10; i++) {
1371 DELAY(100);
1372 if ((RGE_READ_1(sc, RGE_MCUCMD) & (RGE_MCUCMD_RXFIFO_EMPTY |
1373 RGE_MCUCMD_TXFIFO_EMPTY)) == (RGE_MCUCMD_RXFIFO_EMPTY |
1374 RGE_MCUCMD_TXFIFO_EMPTY))
1375 break;
1376 }
1377
1378 /* Soft reset. */
1379 RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_RESET);
1380
1381 for (i = 0; i < RGE_TIMEOUT; i++) {
1382 DELAY(100);
1383 if (!(RGE_READ_1(sc, RGE_CMD) & RGE_CMD_RESET))
1384 break;
1385 }
1386 if (i == RGE_TIMEOUT)
1387 aprint_error_dev(sc->sc_dev, "reset never completed!\n");
1388 }
1389
1390 void
1391 rge_iff(struct rge_softc *sc)
1392 {
1393 struct ifnet *ifp = &sc->sc_ec.ec_if;
1394 struct ethercom *ac = &sc->sc_ec;
1395 struct ether_multi *enm;
1396 struct ether_multistep step;
1397 uint32_t hashes[2];
1398 uint32_t rxfilt;
1399 int h = 0;
1400
1401 rxfilt = RGE_READ_4(sc, RGE_RXCFG);
1402 rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
1403 ifp->if_flags &= ~IFF_ALLMULTI;
1404
1405 /*
1406 * Always accept frames destined to our station address.
1407 * Always accept broadcast frames.
1408 */
1409 rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
1410
1411 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1412 ifp->if_flags |= IFF_ALLMULTI;
1413 rxfilt |= RGE_RXCFG_MULTI;
1414 if (ifp->if_flags & IFF_PROMISC)
1415 rxfilt |= RGE_RXCFG_ALLPHYS;
1416 hashes[0] = hashes[1] = 0xffffffff;
1417 } else {
1418 rxfilt |= RGE_RXCFG_MULTI;
1419 /* Program new filter. */
1420 memset(hashes, 0, sizeof(hashes));
1421
1422 ETHER_FIRST_MULTI(step, ac, enm);
1423 while (enm != NULL) {
1424 h = ether_crc32_be(enm->enm_addrlo,
1425 ETHER_ADDR_LEN) >> 26;
1426
1427 if (h < 32)
1428 hashes[0] |= (1 << h);
1429 else
1430 hashes[1] |= (1 << (h - 32));
1431
1432 ETHER_NEXT_MULTI(step, enm);
1433 }
1434 }
1435
1436 RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
1437 RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
1438 RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
1439 }
1440
1441 void
1442 rge_set_phy_power(struct rge_softc *sc, int on)
1443 {
1444 int i;
1445
1446 if (on) {
1447 RGE_SETBIT_1(sc, RGE_PMCH, 0xc0);
1448
1449 rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN);
1450
1451 for (i = 0; i < RGE_TIMEOUT; i++) {
1452 if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 3)
1453 break;
1454 DELAY(1000);
1455 }
1456 } else
1457 rge_write_phy(sc, 0, MII_BMCR, BMCR_AUTOEN | BMCR_PDOWN);
1458 }
1459
1460 void
1461 rge_phy_config(struct rge_softc *sc)
1462 {
1463 uint16_t mcode_ver, val;
1464 int i;
1465 static const uint16_t mac_cfg3_a438_value[] =
1466 { 0x0043, 0x00a7, 0x00d6, 0x00ec, 0x00f6, 0x00fb, 0x00fd, 0x00ff,
1467 0x00bb, 0x0058, 0x0029, 0x0013, 0x0009, 0x0004, 0x0002 };
1468
1469 static const uint16_t mac_cfg3_b88e_value[] =
1470 { 0xc091, 0x6e12, 0xc092, 0x1214, 0xc094, 0x1516, 0xc096, 0x171b,
1471 0xc098, 0x1b1c, 0xc09a, 0x1f1f, 0xc09c, 0x2021, 0xc09e, 0x2224,
1472 0xc0a0, 0x2424, 0xc0a2, 0x2424, 0xc0a4, 0x2424, 0xc018, 0x0af2,
1473 0xc01a, 0x0d4a, 0xc01c, 0x0f26, 0xc01e, 0x118d, 0xc020, 0x14f3,
1474 0xc022, 0x175a, 0xc024, 0x19c0, 0xc026, 0x1c26, 0xc089, 0x6050,
1475 0xc08a, 0x5f6e, 0xc08c, 0x6e6e, 0xc08e, 0x6e6e, 0xc090, 0x6e12 };
1476
1477 /* Read microcode version. */
1478 rge_write_phy_ocp(sc, 0xa436, 0x801e);
1479 mcode_ver = rge_read_phy_ocp(sc, 0xa438);
1480
1481 if (sc->rge_type == MAC_CFG2) {
1482 for (i = 0; i < nitems(rtl8125_mac_cfg2_ephy); i++) {
1483 rge_write_ephy(sc, rtl8125_mac_cfg2_ephy[i].reg,
1484 rtl8125_mac_cfg2_ephy[i].val);
1485 }
1486
1487 if (mcode_ver != RGE_MAC_CFG2_MCODE_VER) {
1488 /* Disable PHY config. */
1489 RGE_CLRBIT_1(sc, 0xf2, 0x20);
1490 DELAY(1000);
1491
1492 rge_patch_phy_mcu(sc, 1);
1493
1494 rge_write_phy_ocp(sc, 0xa436, 0x8024);
1495 rge_write_phy_ocp(sc, 0xa438, 0x8600);
1496 rge_write_phy_ocp(sc, 0xa436, 0xb82e);
1497 rge_write_phy_ocp(sc, 0xa438, 0x0001);
1498
1499 RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
1500 for (i = 0; i < nitems(rtl8125_mac_cfg2_mcu); i++) {
1501 rge_write_phy_ocp(sc,
1502 rtl8125_mac_cfg2_mcu[i].reg,
1503 rtl8125_mac_cfg2_mcu[i].val);
1504 }
1505 RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
1506
1507 rge_write_phy_ocp(sc, 0xa436, 0);
1508 rge_write_phy_ocp(sc, 0xa438, 0);
1509 RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
1510 rge_write_phy_ocp(sc, 0xa436, 0x8024);
1511 rge_write_phy_ocp(sc, 0xa438, 0);
1512
1513 rge_patch_phy_mcu(sc, 0);
1514
1515 /* Enable PHY config. */
1516 RGE_SETBIT_1(sc, 0xf2, 0x20);
1517
1518 /* Write microcode version. */
1519 rge_write_phy_ocp(sc, 0xa436, 0x801e);
1520 rge_write_phy_ocp(sc, 0xa438, RGE_MAC_CFG2_MCODE_VER);
1521 }
1522
1523 val = rge_read_phy_ocp(sc, 0xad40) & ~0x03ff;
1524 rge_write_phy_ocp(sc, 0xad40, val | 0x0084);
1525 RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1526 val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1527 rge_write_phy_ocp(sc, 0xad16, val | 0x0006);
1528 val = rge_read_phy_ocp(sc, 0xad32) & ~0x03ff;
1529 rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1530 RGE_PHY_CLRBIT(sc, 0xac08, 0x1100);
1531 val = rge_read_phy_ocp(sc, 0xac8a) & ~0xf000;
1532 rge_write_phy_ocp(sc, 0xac8a, val | 0x7000);
1533 RGE_PHY_SETBIT(sc, 0xad18, 0x0400);
1534 RGE_PHY_SETBIT(sc, 0xad1a, 0x03ff);
1535 RGE_PHY_SETBIT(sc, 0xad1c, 0x03ff);
1536
1537 rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1538 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1539 rge_write_phy_ocp(sc, 0xa438, val | 0xc400);
1540 rge_write_phy_ocp(sc, 0xa436, 0x80eb);
1541 val = rge_read_phy_ocp(sc, 0xa438) & ~0x0700;
1542 rge_write_phy_ocp(sc, 0xa438, val | 0x0300);
1543 rge_write_phy_ocp(sc, 0xa436, 0x80f8);
1544 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1545 rge_write_phy_ocp(sc, 0xa438, val | 0x1c00);
1546 rge_write_phy_ocp(sc, 0xa436, 0x80f1);
1547 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1548 rge_write_phy_ocp(sc, 0xa438, val | 0x3000);
1549 rge_write_phy_ocp(sc, 0xa436, 0x80fe);
1550 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1551 rge_write_phy_ocp(sc, 0xa438, val | 0xa500);
1552 rge_write_phy_ocp(sc, 0xa436, 0x8102);
1553 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1554 rge_write_phy_ocp(sc, 0xa438, val | 0x5000);
1555 rge_write_phy_ocp(sc, 0xa436, 0x8105);
1556 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1557 rge_write_phy_ocp(sc, 0xa438, val | 0x3300);
1558 rge_write_phy_ocp(sc, 0xa436, 0x8100);
1559 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1560 rge_write_phy_ocp(sc, 0xa438, val | 0x7000);
1561 rge_write_phy_ocp(sc, 0xa436, 0x8104);
1562 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1563 rge_write_phy_ocp(sc, 0xa438, val | 0xf000);
1564 rge_write_phy_ocp(sc, 0xa436, 0x8106);
1565 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1566 rge_write_phy_ocp(sc, 0xa438, val | 0x6500);
1567 rge_write_phy_ocp(sc, 0xa436, 0x80dc);
1568 val = rge_read_phy_ocp(sc, 0xa438) & ~0xff00;
1569 rge_write_phy_ocp(sc, 0xa438, val | 0xed00);
1570 rge_write_phy_ocp(sc, 0xa436, 0x80df);
1571 RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1572 rge_write_phy_ocp(sc, 0xa436, 0x80e1);
1573 RGE_PHY_CLRBIT(sc, 0xa438, 0x0100);
1574 val = rge_read_phy_ocp(sc, 0xbf06) & ~0x003f;
1575 rge_write_phy_ocp(sc, 0xbf06, val | 0x0038);
1576 rge_write_phy_ocp(sc, 0xa436, 0x819f);
1577 rge_write_phy_ocp(sc, 0xa438, 0xd0b6);
1578 rge_write_phy_ocp(sc, 0xbc34, 0x5555);
1579 val = rge_read_phy_ocp(sc, 0xbf0a) & ~0x0e00;
1580 rge_write_phy_ocp(sc, 0xbf0a, val | 0x0a00);
1581 RGE_PHY_CLRBIT(sc, 0xa5c0, 0x0400);
1582 RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1583 } else {
1584 for (i = 0; i < nitems(rtl8125_mac_cfg3_ephy); i++)
1585 rge_write_ephy(sc, rtl8125_mac_cfg3_ephy[i].reg,
1586 rtl8125_mac_cfg3_ephy[i].val);
1587
1588 if (mcode_ver != RGE_MAC_CFG3_MCODE_VER) {
1589 /* Disable PHY config. */
1590 RGE_CLRBIT_1(sc, 0xf2, 0x20);
1591 DELAY(1000);
1592
1593 rge_patch_phy_mcu(sc, 1);
1594
1595 rge_write_phy_ocp(sc, 0xa436, 0x8024);
1596 rge_write_phy_ocp(sc, 0xa438, 0x8601);
1597 rge_write_phy_ocp(sc, 0xa436, 0xb82e);
1598 rge_write_phy_ocp(sc, 0xa438, 0x0001);
1599
1600 RGE_PHY_SETBIT(sc, 0xb820, 0x0080);
1601 for (i = 0; i < nitems(rtl8125_mac_cfg3_mcu); i++) {
1602 rge_write_phy_ocp(sc,
1603 rtl8125_mac_cfg3_mcu[i].reg,
1604 rtl8125_mac_cfg3_mcu[i].val);
1605 }
1606 RGE_PHY_CLRBIT(sc, 0xb820, 0x0080);
1607
1608 rge_write_phy_ocp(sc, 0xa436, 0);
1609 rge_write_phy_ocp(sc, 0xa438, 0);
1610 RGE_PHY_CLRBIT(sc, 0xb82e, 0x0001);
1611 rge_write_phy_ocp(sc, 0xa436, 0x8024);
1612 rge_write_phy_ocp(sc, 0xa438, 0);
1613
1614 rge_patch_phy_mcu(sc, 0);
1615
1616 /* Enable PHY config. */
1617 RGE_SETBIT_1(sc, 0xf2, 0x20);
1618
1619 /* Write microcode version. */
1620 rge_write_phy_ocp(sc, 0xa436, 0x801e);
1621 rge_write_phy_ocp(sc, 0xa438, RGE_MAC_CFG3_MCODE_VER);
1622 }
1623
1624 RGE_PHY_SETBIT(sc, 0xad4e, 0x0010);
1625 val = rge_read_phy_ocp(sc, 0xad16) & ~0x03ff;
1626 rge_write_phy_ocp(sc, 0xad16, val | 0x03ff);
1627 val = rge_read_phy_ocp(sc, 0xad32) & ~0x003f;
1628 rge_write_phy_ocp(sc, 0xad32, val | 0x0006);
1629 RGE_PHY_CLRBIT(sc, 0xac08, 0x1000);
1630 RGE_PHY_CLRBIT(sc, 0xac08, 0x0100);
1631 val = rge_read_phy_ocp(sc, 0xacc0) & ~0x0003;
1632 rge_write_phy_ocp(sc, 0xacc0, val | 0x0002);
1633 val = rge_read_phy_ocp(sc, 0xad40) & ~0x00e0;
1634 rge_write_phy_ocp(sc, 0xad40, val | 0x0040);
1635 val = rge_read_phy_ocp(sc, 0xad40) & ~0x0007;
1636 rge_write_phy_ocp(sc, 0xad40, val | 0x0004);
1637 RGE_PHY_CLRBIT(sc, 0xac14, 0x0080);
1638 RGE_PHY_CLRBIT(sc, 0xac80, 0x0300);
1639 val = rge_read_phy_ocp(sc, 0xac5e) & ~0x0007;
1640 rge_write_phy_ocp(sc, 0xac5e, val | 0x0002);
1641 rge_write_phy_ocp(sc, 0xad4c, 0x00a8);
1642 rge_write_phy_ocp(sc, 0xac5c, 0x01ff);
1643 val = rge_read_phy_ocp(sc, 0xac8a) & ~0x00f0;
1644 rge_write_phy_ocp(sc, 0xac8a, val | 0x0030);
1645 rge_write_phy_ocp(sc, 0xb87c, 0x80a2);
1646 rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1647 rge_write_phy_ocp(sc, 0xb87c, 0x809c);
1648 rge_write_phy_ocp(sc, 0xb87e, 0x0153);
1649
1650 rge_write_phy_ocp(sc, 0xa436, 0x81b3);
1651 for (i = 0; i < nitems(mac_cfg3_a438_value); i++)
1652 rge_write_phy_ocp(sc, 0xa438, mac_cfg3_a438_value[i]);
1653 for (i = 0; i < 26; i++)
1654 rge_write_phy_ocp(sc, 0xa438, 0);
1655 rge_write_phy_ocp(sc, 0xa436, 0x8257);
1656 rge_write_phy_ocp(sc, 0xa438, 0x020f);
1657 rge_write_phy_ocp(sc, 0xa436, 0x80ea);
1658 rge_write_phy_ocp(sc, 0xa438, 0x7843);
1659
1660 rge_patch_phy_mcu(sc, 1);
1661 RGE_PHY_CLRBIT(sc, 0xb896, 0x0001);
1662 RGE_PHY_CLRBIT(sc, 0xb892, 0xff00);
1663 for (i = 0; i < nitems(mac_cfg3_b88e_value); i += 2) {
1664 rge_write_phy_ocp(sc, 0xb88e, mac_cfg3_b88e_value[i]);
1665 rge_write_phy_ocp(sc, 0xb890,
1666 mac_cfg3_b88e_value[i + 1]);
1667 }
1668 RGE_PHY_SETBIT(sc, 0xb896, 0x0001);
1669 rge_patch_phy_mcu(sc, 0);
1670
1671 RGE_PHY_SETBIT(sc, 0xd068, 0x2000);
1672 rge_write_phy_ocp(sc, 0xa436, 0x81a2);
1673 RGE_PHY_SETBIT(sc, 0xa438, 0x0100);
1674 val = rge_read_phy_ocp(sc, 0xb54c) & ~0xff00;
1675 rge_write_phy_ocp(sc, 0xb54c, val | 0xdb00);
1676 RGE_PHY_CLRBIT(sc, 0xa454, 0x0001);
1677 RGE_PHY_SETBIT(sc, 0xa5d4, 0x0020);
1678 RGE_PHY_CLRBIT(sc, 0xad4e, 0x0010);
1679 RGE_PHY_CLRBIT(sc, 0xa86a, 0x0001);
1680 RGE_PHY_SETBIT(sc, 0xa442, 0x0800);
1681 }
1682
1683 /* Disable EEE. */
1684 RGE_MAC_CLRBIT(sc, 0xe040, 0x0003);
1685 RGE_MAC_CLRBIT(sc, 0xeb62, 0x0006);
1686 RGE_PHY_CLRBIT(sc, 0xa432, 0x0010);
1687 RGE_PHY_CLRBIT(sc, 0xa5d0, 0x0006);
1688 RGE_PHY_CLRBIT(sc, 0xa6d4, 0x0001);
1689 RGE_PHY_CLRBIT(sc, 0xa6d8, 0x0010);
1690 RGE_PHY_CLRBIT(sc, 0xa428, 0x0080);
1691 RGE_PHY_CLRBIT(sc, 0xa4a2, 0x0200);
1692
1693 rge_patch_phy_mcu(sc, 1);
1694 RGE_MAC_CLRBIT(sc, 0xe052, 0x0001);
1695 RGE_PHY_CLRBIT(sc, 0xa442, 0x3000);
1696 RGE_PHY_CLRBIT(sc, 0xa430, 0x8000);
1697 rge_patch_phy_mcu(sc, 0);
1698 }
1699
1700 void
1701 rge_set_macaddr(struct rge_softc *sc, const uint8_t *addr)
1702 {
1703 RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1704 RGE_WRITE_4(sc, RGE_MAC0,
1705 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1706 RGE_WRITE_4(sc, RGE_MAC4,
1707 addr[5] << 8 | addr[4]);
1708 RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1709 }
1710
1711 void
1712 rge_get_macaddr(struct rge_softc *sc, uint8_t *addr)
1713 {
1714 *(uint32_t *)&addr[0] = RGE_READ_4(sc, RGE_ADDR0);
1715 *(uint16_t *)&addr[4] = RGE_READ_2(sc, RGE_ADDR1);
1716 }
1717
1718 void
1719 rge_hw_init(struct rge_softc *sc)
1720 {
1721 int i;
1722
1723 RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1724 RGE_CLRBIT_1(sc, RGE_CFG5, RGE_CFG5_PME_STS);
1725 RGE_CLRBIT_1(sc, RGE_CFG2, RGE_CFG2_CLKREQ_EN);
1726 RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
1727 RGE_CLRBIT_1(sc, 0xf1, 0x80);
1728
1729 /* Disable UPS. */
1730 RGE_MAC_CLRBIT(sc, 0xd40a, 0x0010);
1731
1732 /* Configure MAC MCU. */
1733 rge_write_mac_ocp(sc, 0xfc38, 0);
1734
1735 for (i = 0xfc28; i < 0xfc38; i += 2)
1736 rge_write_mac_ocp(sc, i, 0);
1737
1738 DELAY(3000);
1739 rge_write_mac_ocp(sc, 0xfc26, 0);
1740
1741 if (sc->rge_type == MAC_CFG3) {
1742 for (i = 0; i < nitems(rtl8125_def_bps); i++)
1743 rge_write_mac_ocp(sc, rtl8125_def_bps[i].reg,
1744 rtl8125_def_bps[i].val);
1745 }
1746
1747 /* Disable PHY power saving. */
1748 rge_disable_phy_ocp_pwrsave(sc);
1749
1750 /* Set PCIe uncorrectable error status. */
1751 rge_write_csi(sc, 0x108,
1752 rge_read_csi(sc, 0x108) | 0x00100000);
1753 }
1754
1755 void
1756 rge_disable_phy_ocp_pwrsave(struct rge_softc *sc)
1757 {
1758 if (rge_read_phy_ocp(sc, 0xc416) != 0x0500) {
1759 rge_patch_phy_mcu(sc, 1);
1760 rge_write_phy_ocp(sc, 0xc416, 0);
1761 rge_write_phy_ocp(sc, 0xc416, 0x0500);
1762 rge_patch_phy_mcu(sc, 0);
1763 }
1764 }
1765
1766 void
1767 rge_patch_phy_mcu(struct rge_softc *sc, int set)
1768 {
1769 uint16_t val;
1770 int i;
1771
1772 if (set)
1773 RGE_PHY_SETBIT(sc, 0xb820, 0x0010);
1774 else
1775 RGE_PHY_CLRBIT(sc, 0xb820, 0x0010);
1776
1777 for (i = 0; i < 1000; i++) {
1778 val = rge_read_phy_ocp(sc, 0xb800) & 0x0040;
1779 DELAY(100);
1780 if (val == 0x0040)
1781 break;
1782 }
1783 if (i == 1000)
1784 aprint_error_dev(sc->sc_dev, "timeout waiting to patch phy mcu\n");
1785 }
1786
1787 void
1788 rge_add_media_types(struct rge_softc *sc)
1789 {
1790 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
1791 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
1792 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
1793 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
1794 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
1795 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1796 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
1797 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
1798 }
1799
1800 void
1801 rge_config_imtype(struct rge_softc *sc, int imtype)
1802 {
1803 switch (imtype) {
1804 case RGE_IMTYPE_NONE:
1805 sc->rge_intrs = RGE_INTRS;
1806 sc->rge_rx_ack = RGE_ISR_RX_OK | RGE_ISR_RX_DESC_UNAVAIL |
1807 RGE_ISR_RX_FIFO_OFLOW;
1808 sc->rge_tx_ack = RGE_ISR_TX_OK;
1809 break;
1810 case RGE_IMTYPE_SIM:
1811 sc->rge_intrs = RGE_INTRS_TIMER;
1812 sc->rge_rx_ack = RGE_ISR_PCS_TIMEOUT;
1813 sc->rge_tx_ack = RGE_ISR_PCS_TIMEOUT;
1814 break;
1815 default:
1816 panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
1817 }
1818 }
1819
1820 void
1821 rge_disable_sim_im(struct rge_softc *sc)
1822 {
1823 RGE_WRITE_4(sc, RGE_TIMERINT, 0);
1824 sc->rge_timerintr = 0;
1825 }
1826
1827 void
1828 rge_setup_sim_im(struct rge_softc *sc)
1829 {
1830 RGE_WRITE_4(sc, RGE_TIMERINT, 0x2600);
1831 RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
1832 sc->rge_timerintr = 1;
1833 }
1834
1835 void
1836 rge_setup_intr(struct rge_softc *sc, int imtype)
1837 {
1838 rge_config_imtype(sc, imtype);
1839
1840 /* Enable interrupts. */
1841 RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
1842
1843 switch (imtype) {
1844 case RGE_IMTYPE_NONE:
1845 rge_disable_sim_im(sc);
1846 break;
1847 case RGE_IMTYPE_SIM:
1848 rge_setup_sim_im(sc);
1849 break;
1850 default:
1851 panic("%s: unknown imtype %d", device_xname(sc->sc_dev), imtype);
1852 }
1853 }
1854
1855 void
1856 rge_exit_oob(struct rge_softc *sc)
1857 {
1858 int i;
1859
1860 RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
1861 RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
1862 RGE_RXCFG_ERRPKT);
1863
1864 /* Disable RealWoW. */
1865 rge_write_mac_ocp(sc, 0xc0bc, 0x00ff);
1866
1867 rge_reset(sc);
1868
1869 /* Disable OOB. */
1870 RGE_CLRBIT_1(sc, RGE_MCUCMD, RGE_MCUCMD_IS_OOB);
1871
1872 RGE_MAC_CLRBIT(sc, 0xe8de, 0x4000);
1873
1874 for (i = 0; i < 10; i++) {
1875 DELAY(100);
1876 if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
1877 break;
1878 }
1879
1880 rge_write_mac_ocp(sc, 0xc0aa, 0x07d0);
1881 rge_write_mac_ocp(sc, 0xc0a6, 0x0150);
1882 rge_write_mac_ocp(sc, 0xc01e, 0x5555);
1883
1884 for (i = 0; i < 10; i++) {
1885 DELAY(100);
1886 if (RGE_READ_2(sc, RGE_TWICMD) & 0x0200)
1887 break;
1888 }
1889
1890 if (rge_read_mac_ocp(sc, 0xd42c) & 0x0100) {
1891 for (i = 0; i < RGE_TIMEOUT; i++) {
1892 if ((rge_read_phy_ocp(sc, 0xa420) & 0x0007) == 2)
1893 break;
1894 DELAY(1000);
1895 }
1896 RGE_MAC_CLRBIT(sc, 0xd408, 0x0100);
1897 RGE_PHY_CLRBIT(sc, 0xa468, 0x000a);
1898 }
1899 }
1900
1901 void
1902 rge_write_csi(struct rge_softc *sc, uint32_t reg, uint32_t val)
1903 {
1904 int i;
1905
1906 RGE_WRITE_4(sc, RGE_CSIDR, val);
1907 RGE_WRITE_4(sc, RGE_CSIAR, (1 << 16) | (reg & RGE_CSIAR_ADDR_MASK) |
1908 (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT) | RGE_CSIAR_BUSY);
1909
1910 for (i = 0; i < 10; i++) {
1911 DELAY(100);
1912 if (!(RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY))
1913 break;
1914 }
1915
1916 DELAY(20);
1917 }
1918
1919 uint32_t
1920 rge_read_csi(struct rge_softc *sc, uint32_t reg)
1921 {
1922 int i;
1923
1924 RGE_WRITE_4(sc, RGE_CSIAR, (1 << 16) | (reg & RGE_CSIAR_ADDR_MASK) |
1925 (RGE_CSIAR_BYTE_EN << RGE_CSIAR_BYTE_EN_SHIFT));
1926
1927 for (i = 0; i < 10; i++) {
1928 DELAY(100);
1929 if (RGE_READ_4(sc, RGE_CSIAR) & RGE_CSIAR_BUSY)
1930 break;
1931 }
1932
1933 DELAY(20);
1934
1935 return (RGE_READ_4(sc, RGE_CSIDR));
1936 }
1937
1938 void
1939 rge_write_mac_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
1940 {
1941 uint32_t tmp;
1942
1943 tmp = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
1944 tmp += val;
1945 tmp |= RGE_MACOCP_BUSY;
1946 RGE_WRITE_4(sc, RGE_MACOCP, tmp);
1947 }
1948
1949 uint16_t
1950 rge_read_mac_ocp(struct rge_softc *sc, uint16_t reg)
1951 {
1952 uint32_t val;
1953
1954 val = (reg >> 1) << RGE_MACOCP_ADDR_SHIFT;
1955 RGE_WRITE_4(sc, RGE_MACOCP, val);
1956
1957 return (RGE_READ_4(sc, RGE_MACOCP) & RGE_MACOCP_DATA_MASK);
1958 }
1959
1960 void
1961 rge_write_ephy(struct rge_softc *sc, uint16_t reg, uint16_t val)
1962 {
1963 uint32_t tmp;
1964 int i;
1965
1966 tmp = (reg & RGE_EPHYAR_ADDR_MASK) << RGE_EPHYAR_ADDR_SHIFT;
1967 tmp |= RGE_EPHYAR_BUSY | (val & RGE_EPHYAR_DATA_MASK);
1968 RGE_WRITE_4(sc, RGE_EPHYAR, tmp);
1969
1970 for (i = 0; i < 10; i++) {
1971 DELAY(100);
1972 if (!(RGE_READ_4(sc, RGE_EPHYAR) & RGE_EPHYAR_BUSY))
1973 break;
1974 }
1975
1976 DELAY(20);
1977 }
1978
1979 void
1980 rge_write_phy(struct rge_softc *sc, uint16_t addr, uint16_t reg, uint16_t val)
1981 {
1982 uint16_t off, phyaddr;
1983
1984 phyaddr = addr ? addr : RGE_PHYBASE + (reg / 8);
1985 phyaddr <<= 4;
1986
1987 off = addr ? reg : 0x10 + (reg % 8);
1988
1989 phyaddr += (off - 16) << 1;
1990
1991 rge_write_phy_ocp(sc, phyaddr, val);
1992 }
1993
1994 void
1995 rge_write_phy_ocp(struct rge_softc *sc, uint16_t reg, uint16_t val)
1996 {
1997 uint32_t tmp;
1998 int i;
1999
2000 tmp = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2001 tmp |= RGE_PHYOCP_BUSY | val;
2002 RGE_WRITE_4(sc, RGE_PHYOCP, tmp);
2003
2004 for (i = 0; i < RGE_TIMEOUT; i++) {
2005 DELAY(1);
2006 if (!(RGE_READ_4(sc, RGE_PHYOCP) & RGE_PHYOCP_BUSY))
2007 break;
2008 }
2009 }
2010
2011 uint16_t
2012 rge_read_phy_ocp(struct rge_softc *sc, uint16_t reg)
2013 {
2014 uint32_t val;
2015 int i;
2016
2017 val = (reg >> 1) << RGE_PHYOCP_ADDR_SHIFT;
2018 RGE_WRITE_4(sc, RGE_PHYOCP, val);
2019
2020 for (i = 0; i < RGE_TIMEOUT; i++) {
2021 DELAY(1);
2022 val = RGE_READ_4(sc, RGE_PHYOCP);
2023 if (val & RGE_PHYOCP_BUSY)
2024 break;
2025 }
2026
2027 return (val & RGE_PHYOCP_DATA_MASK);
2028 }
2029
2030 int
2031 rge_get_link_status(struct rge_softc *sc)
2032 {
2033 return ((RGE_READ_2(sc, RGE_PHYSTAT) & RGE_PHYSTAT_LINK) ? 1 : 0);
2034 }
2035
2036 void
2037 rge_txstart(struct work *wk, void *arg)
2038 {
2039 struct rge_softc *sc = arg;
2040
2041 RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
2042 }
2043
2044 void
2045 rge_tick(void *arg)
2046 {
2047 struct rge_softc *sc = arg;
2048 int s;
2049
2050 s = splnet();
2051 rge_link_state(sc);
2052 splx(s);
2053
2054 timeout_add_sec(&sc->sc_timeout, 1);
2055 }
2056
2057 void
2058 rge_link_state(struct rge_softc *sc)
2059 {
2060 struct ifnet *ifp = &sc->sc_ec.ec_if;
2061 int link = LINK_STATE_DOWN;
2062
2063 if (rge_get_link_status(sc))
2064 link = LINK_STATE_UP;
2065
2066 if (ifp->if_link_state != link) {
2067 ifp->if_link_state = link;
2068 if_link_state_change(ifp, LINK_STATE_DOWN);
2069 }
2070 }
2071