rtl8169.c revision 1.90.2.1 1 /* $NetBSD: rtl8169.c,v 1.90.2.1 2007/12/13 21:55:36 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 1997, 1998-2003
5 * Bill Paul <wpaul (at) windriver.com>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: rtl8169.c,v 1.90.2.1 2007/12/13 21:55:36 bouyer Exp $");
37 /* $FreeBSD: /repoman/r/ncvs/src/sys/dev/re/if_re.c,v 1.20 2004/04/11 20:34:08 ru Exp $ */
38
39 /*
40 * RealTek 8139C+/8169/8169S/8110S PCI NIC driver
41 *
42 * Written by Bill Paul <wpaul (at) windriver.com>
43 * Senior Networking Software Engineer
44 * Wind River Systems
45 */
46
47 /*
48 * This driver is designed to support RealTek's next generation of
49 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
50 * four devices in this family: the RTL8139C+, the RTL8169, the RTL8169S
51 * and the RTL8110S.
52 *
53 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
54 * with the older 8139 family, however it also supports a special
55 * C+ mode of operation that provides several new performance enhancing
56 * features. These include:
57 *
58 * o Descriptor based DMA mechanism. Each descriptor represents
59 * a single packet fragment. Data buffers may be aligned on
60 * any byte boundary.
61 *
62 * o 64-bit DMA
63 *
64 * o TCP/IP checksum offload for both RX and TX
65 *
66 * o High and normal priority transmit DMA rings
67 *
68 * o VLAN tag insertion and extraction
69 *
70 * o TCP large send (segmentation offload)
71 *
72 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
73 * programming API is fairly straightforward. The RX filtering, EEPROM
74 * access and PHY access is the same as it is on the older 8139 series
75 * chips.
76 *
77 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
78 * same programming API and feature set as the 8139C+ with the following
79 * differences and additions:
80 *
81 * o 1000Mbps mode
82 *
83 * o Jumbo frames
84 *
85 * o GMII and TBI ports/registers for interfacing with copper
86 * or fiber PHYs
87 *
88 * o RX and TX DMA rings can have up to 1024 descriptors
89 * (the 8139C+ allows a maximum of 64)
90 *
91 * o Slight differences in register layout from the 8139C+
92 *
93 * The TX start and timer interrupt registers are at different locations
94 * on the 8169 than they are on the 8139C+. Also, the status word in the
95 * RX descriptor has a slightly different bit layout. The 8169 does not
96 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
97 * copper gigE PHY.
98 *
99 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
100 * (the 'S' stands for 'single-chip'). These devices have the same
101 * programming API as the older 8169, but also have some vendor-specific
102 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
103 * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
104 *
105 * This driver takes advantage of the RX and TX checksum offload and
106 * VLAN tag insertion/extraction features. It also implements TX
107 * interrupt moderation using the timer interrupt registers, which
108 * significantly reduces TX interrupt load. There is also support
109 * for jumbo frames, however the 8169/8169S/8110S can not transmit
110 * jumbo frames larger than 7.5K, so the max MTU possible with this
111 * driver is 7500 bytes.
112 */
113
114 #include "bpfilter.h"
115 #include "vlan.h"
116
117 #include <sys/param.h>
118 #include <sys/endian.h>
119 #include <sys/systm.h>
120 #include <sys/sockio.h>
121 #include <sys/mbuf.h>
122 #include <sys/malloc.h>
123 #include <sys/kernel.h>
124 #include <sys/socket.h>
125 #include <sys/device.h>
126
127 #include <net/if.h>
128 #include <net/if_arp.h>
129 #include <net/if_dl.h>
130 #include <net/if_ether.h>
131 #include <net/if_media.h>
132 #include <net/if_vlanvar.h>
133
134 #include <netinet/in_systm.h> /* XXX for IP_MAXPACKET */
135 #include <netinet/in.h> /* XXX for IP_MAXPACKET */
136 #include <netinet/ip.h> /* XXX for IP_MAXPACKET */
137
138 #if NBPFILTER > 0
139 #include <net/bpf.h>
140 #endif
141
142 #include <sys/bus.h>
143
144 #include <dev/mii/mii.h>
145 #include <dev/mii/miivar.h>
146
147 #include <dev/ic/rtl81x9reg.h>
148 #include <dev/ic/rtl81x9var.h>
149
150 #include <dev/ic/rtl8169var.h>
151
152 static inline void re_set_bufaddr(struct re_desc *, bus_addr_t);
153
154 static int re_newbuf(struct rtk_softc *, int, struct mbuf *);
155 static int re_rx_list_init(struct rtk_softc *);
156 static int re_tx_list_init(struct rtk_softc *);
157 static void re_rxeof(struct rtk_softc *);
158 static void re_txeof(struct rtk_softc *);
159 static void re_tick(void *);
160 static void re_start(struct ifnet *);
161 static int re_ioctl(struct ifnet *, u_long, void *);
162 static int re_init(struct ifnet *);
163 static void re_stop(struct ifnet *, int);
164 static void re_watchdog(struct ifnet *);
165
166 static int re_enable(struct rtk_softc *);
167 static void re_disable(struct rtk_softc *);
168
169 static int re_ifmedia_upd(struct ifnet *);
170 static void re_ifmedia_sts(struct ifnet *, struct ifmediareq *);
171
172 static int re_gmii_readreg(struct device *, int, int);
173 static void re_gmii_writereg(struct device *, int, int, int);
174
175 static int re_miibus_readreg(struct device *, int, int);
176 static void re_miibus_writereg(struct device *, int, int, int);
177 static void re_miibus_statchg(struct device *);
178
179 static void re_reset(struct rtk_softc *);
180
181 static inline void
182 re_set_bufaddr(struct re_desc *d, bus_addr_t addr)
183 {
184
185 d->re_bufaddr_lo = htole32((uint32_t)addr);
186 if (sizeof(bus_addr_t) == sizeof(uint64_t))
187 d->re_bufaddr_hi = htole32((uint64_t)addr >> 32);
188 else
189 d->re_bufaddr_hi = 0;
190 }
191
192 static int
193 re_gmii_readreg(struct device *self, int phy, int reg)
194 {
195 struct rtk_softc *sc = (void *)self;
196 uint32_t rval;
197 int i;
198
199 if (phy != 7)
200 return 0;
201
202 /* Let the rgephy driver read the GMEDIASTAT register */
203
204 if (reg == RTK_GMEDIASTAT) {
205 rval = CSR_READ_1(sc, RTK_GMEDIASTAT);
206 return rval;
207 }
208
209 CSR_WRITE_4(sc, RTK_PHYAR, reg << 16);
210 DELAY(1000);
211
212 for (i = 0; i < RTK_TIMEOUT; i++) {
213 rval = CSR_READ_4(sc, RTK_PHYAR);
214 if (rval & RTK_PHYAR_BUSY)
215 break;
216 DELAY(100);
217 }
218
219 if (i == RTK_TIMEOUT) {
220 aprint_error("%s: PHY read failed\n", sc->sc_dev.dv_xname);
221 return 0;
222 }
223
224 return rval & RTK_PHYAR_PHYDATA;
225 }
226
227 static void
228 re_gmii_writereg(struct device *dev, int phy, int reg, int data)
229 {
230 struct rtk_softc *sc = (void *)dev;
231 uint32_t rval;
232 int i;
233
234 CSR_WRITE_4(sc, RTK_PHYAR, (reg << 16) |
235 (data & RTK_PHYAR_PHYDATA) | RTK_PHYAR_BUSY);
236 DELAY(1000);
237
238 for (i = 0; i < RTK_TIMEOUT; i++) {
239 rval = CSR_READ_4(sc, RTK_PHYAR);
240 if (!(rval & RTK_PHYAR_BUSY))
241 break;
242 DELAY(100);
243 }
244
245 if (i == RTK_TIMEOUT) {
246 aprint_error("%s: PHY write reg %x <- %x failed\n",
247 sc->sc_dev.dv_xname, reg, data);
248 }
249 }
250
251 static int
252 re_miibus_readreg(struct device *dev, int phy, int reg)
253 {
254 struct rtk_softc *sc = (void *)dev;
255 uint16_t rval = 0;
256 uint16_t re8139_reg = 0;
257 int s;
258
259 s = splnet();
260
261 if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) {
262 rval = re_gmii_readreg(dev, phy, reg);
263 splx(s);
264 return rval;
265 }
266
267 /* Pretend the internal PHY is only at address 0 */
268 if (phy) {
269 splx(s);
270 return 0;
271 }
272 switch (reg) {
273 case MII_BMCR:
274 re8139_reg = RTK_BMCR;
275 break;
276 case MII_BMSR:
277 re8139_reg = RTK_BMSR;
278 break;
279 case MII_ANAR:
280 re8139_reg = RTK_ANAR;
281 break;
282 case MII_ANER:
283 re8139_reg = RTK_ANER;
284 break;
285 case MII_ANLPAR:
286 re8139_reg = RTK_LPAR;
287 break;
288 case MII_PHYIDR1:
289 case MII_PHYIDR2:
290 splx(s);
291 return 0;
292 /*
293 * Allow the rlphy driver to read the media status
294 * register. If we have a link partner which does not
295 * support NWAY, this is the register which will tell
296 * us the results of parallel detection.
297 */
298 case RTK_MEDIASTAT:
299 rval = CSR_READ_1(sc, RTK_MEDIASTAT);
300 splx(s);
301 return rval;
302 default:
303 aprint_error("%s: bad phy register\n", sc->sc_dev.dv_xname);
304 splx(s);
305 return 0;
306 }
307 rval = CSR_READ_2(sc, re8139_reg);
308 if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0 && re8139_reg == RTK_BMCR) {
309 /* 8139C+ has different bit layout. */
310 rval &= ~(BMCR_LOOP | BMCR_ISO);
311 }
312 splx(s);
313 return rval;
314 }
315
316 static void
317 re_miibus_writereg(struct device *dev, int phy, int reg, int data)
318 {
319 struct rtk_softc *sc = (void *)dev;
320 uint16_t re8139_reg = 0;
321 int s;
322
323 s = splnet();
324
325 if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) {
326 re_gmii_writereg(dev, phy, reg, data);
327 splx(s);
328 return;
329 }
330
331 /* Pretend the internal PHY is only at address 0 */
332 if (phy) {
333 splx(s);
334 return;
335 }
336 switch (reg) {
337 case MII_BMCR:
338 re8139_reg = RTK_BMCR;
339 if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0) {
340 /* 8139C+ has different bit layout. */
341 data &= ~(BMCR_LOOP | BMCR_ISO);
342 }
343 break;
344 case MII_BMSR:
345 re8139_reg = RTK_BMSR;
346 break;
347 case MII_ANAR:
348 re8139_reg = RTK_ANAR;
349 break;
350 case MII_ANER:
351 re8139_reg = RTK_ANER;
352 break;
353 case MII_ANLPAR:
354 re8139_reg = RTK_LPAR;
355 break;
356 case MII_PHYIDR1:
357 case MII_PHYIDR2:
358 splx(s);
359 return;
360 break;
361 default:
362 aprint_error("%s: bad phy register\n", sc->sc_dev.dv_xname);
363 splx(s);
364 return;
365 }
366 CSR_WRITE_2(sc, re8139_reg, data);
367 splx(s);
368 return;
369 }
370
371 static void
372 re_miibus_statchg(struct device *dev)
373 {
374
375 return;
376 }
377
378 static void
379 re_reset(struct rtk_softc *sc)
380 {
381 int i;
382
383 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_RESET);
384
385 for (i = 0; i < RTK_TIMEOUT; i++) {
386 DELAY(10);
387 if ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_RESET) == 0)
388 break;
389 }
390 if (i == RTK_TIMEOUT)
391 aprint_error("%s: reset never completed!\n",
392 sc->sc_dev.dv_xname);
393
394 /*
395 * NB: Realtek-supplied Linux driver does this only for
396 * MCFG_METHOD_2, which corresponds to sc->sc_rev == 2.
397 */
398 if (1) /* XXX check softc flag for 8169s version */
399 CSR_WRITE_1(sc, RTK_LDPS, 1);
400
401 return;
402 }
403
404 /*
405 * The following routine is designed to test for a defect on some
406 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
407 * lines connected to the bus, however for a 32-bit only card, they
408 * should be pulled high. The result of this defect is that the
409 * NIC will not work right if you plug it into a 64-bit slot: DMA
410 * operations will be done with 64-bit transfers, which will fail
411 * because the 64-bit data lines aren't connected.
412 *
413 * There's no way to work around this (short of talking a soldering
414 * iron to the board), however we can detect it. The method we use
415 * here is to put the NIC into digital loopback mode, set the receiver
416 * to promiscuous mode, and then try to send a frame. We then compare
417 * the frame data we sent to what was received. If the data matches,
418 * then the NIC is working correctly, otherwise we know the user has
419 * a defective NIC which has been mistakenly plugged into a 64-bit PCI
420 * slot. In the latter case, there's no way the NIC can work correctly,
421 * so we print out a message on the console and abort the device attach.
422 */
423
424 int
425 re_diag(struct rtk_softc *sc)
426 {
427 struct ifnet *ifp = &sc->ethercom.ec_if;
428 struct mbuf *m0;
429 struct ether_header *eh;
430 struct re_rxsoft *rxs;
431 struct re_desc *cur_rx;
432 bus_dmamap_t dmamap;
433 uint16_t status;
434 uint32_t rxstat;
435 int total_len, i, s, error = 0;
436 static const uint8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
437 static const uint8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
438
439 /* Allocate a single mbuf */
440
441 MGETHDR(m0, M_DONTWAIT, MT_DATA);
442 if (m0 == NULL)
443 return ENOBUFS;
444
445 /*
446 * Initialize the NIC in test mode. This sets the chip up
447 * so that it can send and receive frames, but performs the
448 * following special functions:
449 * - Puts receiver in promiscuous mode
450 * - Enables digital loopback mode
451 * - Leaves interrupts turned off
452 */
453
454 ifp->if_flags |= IFF_PROMISC;
455 sc->re_testmode = 1;
456 re_init(ifp);
457 re_stop(ifp, 0);
458 DELAY(100000);
459 re_init(ifp);
460
461 /* Put some data in the mbuf */
462
463 eh = mtod(m0, struct ether_header *);
464 memcpy(eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN);
465 memcpy(eh->ether_shost, (char *)&src, ETHER_ADDR_LEN);
466 eh->ether_type = htons(ETHERTYPE_IP);
467 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
468
469 /*
470 * Queue the packet, start transmission.
471 */
472
473 CSR_WRITE_2(sc, RTK_ISR, 0xFFFF);
474 s = splnet();
475 IF_ENQUEUE(&ifp->if_snd, m0);
476 re_start(ifp);
477 splx(s);
478 m0 = NULL;
479
480 /* Wait for it to propagate through the chip */
481
482 DELAY(100000);
483 for (i = 0; i < RTK_TIMEOUT; i++) {
484 status = CSR_READ_2(sc, RTK_ISR);
485 if ((status & (RTK_ISR_TIMEOUT_EXPIRED | RTK_ISR_RX_OK)) ==
486 (RTK_ISR_TIMEOUT_EXPIRED | RTK_ISR_RX_OK))
487 break;
488 DELAY(10);
489 }
490 if (i == RTK_TIMEOUT) {
491 aprint_error("%s: diagnostic failed, failed to receive packet "
492 "in loopback mode\n", sc->sc_dev.dv_xname);
493 error = EIO;
494 goto done;
495 }
496
497 /*
498 * The packet should have been dumped into the first
499 * entry in the RX DMA ring. Grab it from there.
500 */
501
502 rxs = &sc->re_ldata.re_rxsoft[0];
503 dmamap = rxs->rxs_dmamap;
504 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
505 BUS_DMASYNC_POSTREAD);
506 bus_dmamap_unload(sc->sc_dmat, dmamap);
507
508 m0 = rxs->rxs_mbuf;
509 rxs->rxs_mbuf = NULL;
510 eh = mtod(m0, struct ether_header *);
511
512 RE_RXDESCSYNC(sc, 0, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
513 cur_rx = &sc->re_ldata.re_rx_list[0];
514 rxstat = le32toh(cur_rx->re_cmdstat);
515 total_len = rxstat & sc->re_rxlenmask;
516
517 if (total_len != ETHER_MIN_LEN) {
518 aprint_error("%s: diagnostic failed, received short packet\n",
519 sc->sc_dev.dv_xname);
520 error = EIO;
521 goto done;
522 }
523
524 /* Test that the received packet data matches what we sent. */
525
526 if (memcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) ||
527 memcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) ||
528 ntohs(eh->ether_type) != ETHERTYPE_IP) {
529 aprint_error("%s: WARNING, DMA FAILURE!\n",
530 sc->sc_dev.dv_xname);
531 aprint_error("%s: expected TX data: %s",
532 sc->sc_dev.dv_xname, ether_sprintf(dst));
533 aprint_error("/%s/0x%x\n", ether_sprintf(src), ETHERTYPE_IP);
534 aprint_error("%s: received RX data: %s",
535 sc->sc_dev.dv_xname,
536 ether_sprintf(eh->ether_dhost));
537 aprint_error("/%s/0x%x\n", ether_sprintf(eh->ether_shost),
538 ntohs(eh->ether_type));
539 aprint_error("%s: You may have a defective 32-bit NIC plugged "
540 "into a 64-bit PCI slot.\n", sc->sc_dev.dv_xname);
541 aprint_error("%s: Please re-install the NIC in a 32-bit slot "
542 "for proper operation.\n", sc->sc_dev.dv_xname);
543 aprint_error("%s: Read the re(4) man page for more details.\n",
544 sc->sc_dev.dv_xname);
545 error = EIO;
546 }
547
548 done:
549 /* Turn interface off, release resources */
550
551 sc->re_testmode = 0;
552 ifp->if_flags &= ~IFF_PROMISC;
553 re_stop(ifp, 0);
554 if (m0 != NULL)
555 m_freem(m0);
556
557 return error;
558 }
559
560
561 /*
562 * Attach the interface. Allocate softc structures, do ifmedia
563 * setup and ethernet/BPF attach.
564 */
565 void
566 re_attach(struct rtk_softc *sc)
567 {
568 u_char eaddr[ETHER_ADDR_LEN];
569 uint16_t val;
570 struct ifnet *ifp;
571 int error = 0, i, addr_len;
572
573 /* Reset the adapter. */
574 re_reset(sc);
575
576 if (rtk_read_eeprom(sc, RTK_EE_ID, RTK_EEADDR_LEN1) == 0x8129)
577 addr_len = RTK_EEADDR_LEN1;
578 else
579 addr_len = RTK_EEADDR_LEN0;
580
581 /*
582 * Get station address from the EEPROM.
583 */
584 for (i = 0; i < 3; i++) {
585 val = rtk_read_eeprom(sc, RTK_EE_EADDR0 + i, addr_len);
586 eaddr[(i * 2) + 0] = val & 0xff;
587 eaddr[(i * 2) + 1] = val >> 8;
588 }
589
590 if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0) {
591 uint32_t hwrev;
592
593 /* Revision of 8169/8169S/8110s in bits 30..26, 23 */
594 hwrev = CSR_READ_4(sc, RTK_TXCFG) & RTK_TXCFG_HWREV;
595 /* These rev numbers are taken from Realtek's driver */
596 if ( hwrev == RTK_HWREV_8100E_SPIN2) {
597 sc->sc_rev = 15;
598 } else if (hwrev == RTK_HWREV_8100E) {
599 sc->sc_rev = 14;
600 } else if (hwrev == RTK_HWREV_8101E) {
601 sc->sc_rev = 13;
602 } else if (hwrev == RTK_HWREV_8168_SPIN2 ||
603 hwrev == RTK_HWREV_8168_SPIN3) {
604 sc->sc_rev = 12;
605 } else if (hwrev == RTK_HWREV_8168_SPIN1) {
606 sc->sc_rev = 11;
607 } else if (hwrev == RTK_HWREV_8169_8110SC) {
608 sc->sc_rev = 5;
609 } else if (hwrev == RTK_HWREV_8169_8110SB) {
610 sc->sc_rev = 4;
611 } else if (hwrev == RTK_HWREV_8169S) {
612 sc->sc_rev = 3;
613 } else if (hwrev == RTK_HWREV_8110S) {
614 sc->sc_rev = 2;
615 } else if (hwrev == RTK_HWREV_8169) {
616 sc->sc_rev = 1;
617 sc->sc_quirk |= RTKQ_8169NONS;
618 } else {
619 aprint_normal("%s: Unknown revision (0x%08x)\n",
620 sc->sc_dev.dv_xname, hwrev);
621 /* assume the latest one */
622 sc->sc_rev = 15;
623 }
624
625 /* Set RX length mask */
626 sc->re_rxlenmask = RE_RDESC_STAT_GFRAGLEN;
627 sc->re_ldata.re_tx_desc_cnt = RE_TX_DESC_CNT_8169;
628 } else {
629 /* Set RX length mask */
630 sc->re_rxlenmask = RE_RDESC_STAT_FRAGLEN;
631 sc->re_ldata.re_tx_desc_cnt = RE_TX_DESC_CNT_8139;
632 }
633
634 aprint_normal("%s: Ethernet address %s\n",
635 sc->sc_dev.dv_xname, ether_sprintf(eaddr));
636
637 if (sc->re_ldata.re_tx_desc_cnt >
638 PAGE_SIZE / sizeof(struct re_desc)) {
639 sc->re_ldata.re_tx_desc_cnt =
640 PAGE_SIZE / sizeof(struct re_desc);
641 }
642
643 aprint_verbose("%s: using %d tx descriptors\n",
644 sc->sc_dev.dv_xname, sc->re_ldata.re_tx_desc_cnt);
645 KASSERT(RE_NEXT_TX_DESC(sc, RE_TX_DESC_CNT(sc) - 1) == 0);
646
647 /* Allocate DMA'able memory for the TX ring */
648 if ((error = bus_dmamem_alloc(sc->sc_dmat, RE_TX_LIST_SZ(sc),
649 RE_RING_ALIGN, 0, &sc->re_ldata.re_tx_listseg, 1,
650 &sc->re_ldata.re_tx_listnseg, BUS_DMA_NOWAIT)) != 0) {
651 aprint_error("%s: can't allocate tx listseg, error = %d\n",
652 sc->sc_dev.dv_xname, error);
653 goto fail_0;
654 }
655
656 /* Load the map for the TX ring. */
657 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->re_ldata.re_tx_listseg,
658 sc->re_ldata.re_tx_listnseg, RE_TX_LIST_SZ(sc),
659 (void **)&sc->re_ldata.re_tx_list,
660 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) {
661 aprint_error("%s: can't map tx list, error = %d\n",
662 sc->sc_dev.dv_xname, error);
663 goto fail_1;
664 }
665 memset(sc->re_ldata.re_tx_list, 0, RE_TX_LIST_SZ(sc));
666
667 if ((error = bus_dmamap_create(sc->sc_dmat, RE_TX_LIST_SZ(sc), 1,
668 RE_TX_LIST_SZ(sc), 0, 0,
669 &sc->re_ldata.re_tx_list_map)) != 0) {
670 aprint_error("%s: can't create tx list map, error = %d\n",
671 sc->sc_dev.dv_xname, error);
672 goto fail_2;
673 }
674
675
676 if ((error = bus_dmamap_load(sc->sc_dmat,
677 sc->re_ldata.re_tx_list_map, sc->re_ldata.re_tx_list,
678 RE_TX_LIST_SZ(sc), NULL, BUS_DMA_NOWAIT)) != 0) {
679 aprint_error("%s: can't load tx list, error = %d\n",
680 sc->sc_dev.dv_xname, error);
681 goto fail_3;
682 }
683
684 /* Create DMA maps for TX buffers */
685 for (i = 0; i < RE_TX_QLEN; i++) {
686 error = bus_dmamap_create(sc->sc_dmat,
687 round_page(IP_MAXPACKET),
688 RE_TX_DESC_CNT(sc) - RE_NTXDESC_RSVD, RE_TDESC_CMD_FRAGLEN,
689 0, 0, &sc->re_ldata.re_txq[i].txq_dmamap);
690 if (error) {
691 aprint_error("%s: can't create DMA map for TX\n",
692 sc->sc_dev.dv_xname);
693 goto fail_4;
694 }
695 }
696
697 /* Allocate DMA'able memory for the RX ring */
698 /* XXX see also a comment about RE_RX_DMAMEM_SZ in rtl81x9var.h */
699 if ((error = bus_dmamem_alloc(sc->sc_dmat,
700 RE_RX_DMAMEM_SZ, RE_RING_ALIGN, 0, &sc->re_ldata.re_rx_listseg, 1,
701 &sc->re_ldata.re_rx_listnseg, BUS_DMA_NOWAIT)) != 0) {
702 aprint_error("%s: can't allocate rx listseg, error = %d\n",
703 sc->sc_dev.dv_xname, error);
704 goto fail_4;
705 }
706
707 /* Load the map for the RX ring. */
708 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->re_ldata.re_rx_listseg,
709 sc->re_ldata.re_rx_listnseg, RE_RX_DMAMEM_SZ,
710 (void **)&sc->re_ldata.re_rx_list,
711 BUS_DMA_COHERENT | BUS_DMA_NOWAIT)) != 0) {
712 aprint_error("%s: can't map rx list, error = %d\n",
713 sc->sc_dev.dv_xname, error);
714 goto fail_5;
715 }
716 memset(sc->re_ldata.re_rx_list, 0, RE_RX_DMAMEM_SZ);
717
718 if ((error = bus_dmamap_create(sc->sc_dmat,
719 RE_RX_DMAMEM_SZ, 1, RE_RX_DMAMEM_SZ, 0, 0,
720 &sc->re_ldata.re_rx_list_map)) != 0) {
721 aprint_error("%s: can't create rx list map, error = %d\n",
722 sc->sc_dev.dv_xname, error);
723 goto fail_6;
724 }
725
726 if ((error = bus_dmamap_load(sc->sc_dmat,
727 sc->re_ldata.re_rx_list_map, sc->re_ldata.re_rx_list,
728 RE_RX_DMAMEM_SZ, NULL, BUS_DMA_NOWAIT)) != 0) {
729 aprint_error("%s: can't load rx list, error = %d\n",
730 sc->sc_dev.dv_xname, error);
731 goto fail_7;
732 }
733
734 /* Create DMA maps for RX buffers */
735 for (i = 0; i < RE_RX_DESC_CNT; i++) {
736 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
737 0, 0, &sc->re_ldata.re_rxsoft[i].rxs_dmamap);
738 if (error) {
739 aprint_error("%s: can't create DMA map for RX\n",
740 sc->sc_dev.dv_xname);
741 goto fail_8;
742 }
743 }
744
745 /*
746 * Record interface as attached. From here, we should not fail.
747 */
748 sc->sc_flags |= RTK_ATTACHED;
749
750 ifp = &sc->ethercom.ec_if;
751 ifp->if_softc = sc;
752 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
753 ifp->if_mtu = ETHERMTU;
754 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
755 ifp->if_ioctl = re_ioctl;
756 sc->ethercom.ec_capabilities |=
757 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
758 ifp->if_start = re_start;
759 ifp->if_stop = re_stop;
760
761 /*
762 * IFCAP_CSUM_IPv4_Tx on re(4) is broken for small packets,
763 * so we have a workaround to handle the bug by padding
764 * such packets manually.
765 */
766 ifp->if_capabilities |=
767 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
768 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
769 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
770 IFCAP_TSOv4;
771 ifp->if_watchdog = re_watchdog;
772 ifp->if_init = re_init;
773 ifp->if_snd.ifq_maxlen = RE_IFQ_MAXLEN;
774 ifp->if_capenable = ifp->if_capabilities;
775 IFQ_SET_READY(&ifp->if_snd);
776
777 callout_init(&sc->rtk_tick_ch, 0);
778
779 /* Do MII setup */
780 sc->mii.mii_ifp = ifp;
781 sc->mii.mii_readreg = re_miibus_readreg;
782 sc->mii.mii_writereg = re_miibus_writereg;
783 sc->mii.mii_statchg = re_miibus_statchg;
784 ifmedia_init(&sc->mii.mii_media, IFM_IMASK, re_ifmedia_upd,
785 re_ifmedia_sts);
786 mii_attach(&sc->sc_dev, &sc->mii, 0xffffffff, MII_PHY_ANY,
787 MII_OFFSET_ANY, 0);
788 ifmedia_set(&sc->mii.mii_media, IFM_ETHER | IFM_AUTO);
789
790 /*
791 * Call MI attach routine.
792 */
793 if_attach(ifp);
794 ether_ifattach(ifp, eaddr);
795
796 return;
797
798 fail_8:
799 /* Destroy DMA maps for RX buffers. */
800 for (i = 0; i < RE_RX_DESC_CNT; i++)
801 if (sc->re_ldata.re_rxsoft[i].rxs_dmamap != NULL)
802 bus_dmamap_destroy(sc->sc_dmat,
803 sc->re_ldata.re_rxsoft[i].rxs_dmamap);
804
805 /* Free DMA'able memory for the RX ring. */
806 bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_rx_list_map);
807 fail_7:
808 bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_rx_list_map);
809 fail_6:
810 bus_dmamem_unmap(sc->sc_dmat,
811 (void *)sc->re_ldata.re_rx_list, RE_RX_DMAMEM_SZ);
812 fail_5:
813 bus_dmamem_free(sc->sc_dmat,
814 &sc->re_ldata.re_rx_listseg, sc->re_ldata.re_rx_listnseg);
815
816 fail_4:
817 /* Destroy DMA maps for TX buffers. */
818 for (i = 0; i < RE_TX_QLEN; i++)
819 if (sc->re_ldata.re_txq[i].txq_dmamap != NULL)
820 bus_dmamap_destroy(sc->sc_dmat,
821 sc->re_ldata.re_txq[i].txq_dmamap);
822
823 /* Free DMA'able memory for the TX ring. */
824 bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_tx_list_map);
825 fail_3:
826 bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_tx_list_map);
827 fail_2:
828 bus_dmamem_unmap(sc->sc_dmat,
829 (void *)sc->re_ldata.re_tx_list, RE_TX_LIST_SZ(sc));
830 fail_1:
831 bus_dmamem_free(sc->sc_dmat,
832 &sc->re_ldata.re_tx_listseg, sc->re_ldata.re_tx_listnseg);
833 fail_0:
834 return;
835 }
836
837
838 /*
839 * re_activate:
840 * Handle device activation/deactivation requests.
841 */
842 int
843 re_activate(struct device *self, enum devact act)
844 {
845 struct rtk_softc *sc = (void *)self;
846 int s, error = 0;
847
848 s = splnet();
849 switch (act) {
850 case DVACT_ACTIVATE:
851 error = EOPNOTSUPP;
852 break;
853 case DVACT_DEACTIVATE:
854 mii_activate(&sc->mii, act, MII_PHY_ANY, MII_OFFSET_ANY);
855 if_deactivate(&sc->ethercom.ec_if);
856 break;
857 }
858 splx(s);
859
860 return error;
861 }
862
863 /*
864 * re_detach:
865 * Detach a rtk interface.
866 */
867 int
868 re_detach(struct rtk_softc *sc)
869 {
870 struct ifnet *ifp = &sc->ethercom.ec_if;
871 int i;
872
873 /*
874 * Succeed now if there isn't any work to do.
875 */
876 if ((sc->sc_flags & RTK_ATTACHED) == 0)
877 return 0;
878
879 /* Unhook our tick handler. */
880 callout_stop(&sc->rtk_tick_ch);
881
882 /* Detach all PHYs. */
883 mii_detach(&sc->mii, MII_PHY_ANY, MII_OFFSET_ANY);
884
885 /* Delete all remaining media. */
886 ifmedia_delete_instance(&sc->mii.mii_media, IFM_INST_ANY);
887
888 ether_ifdetach(ifp);
889 if_detach(ifp);
890
891 /* Destroy DMA maps for RX buffers. */
892 for (i = 0; i < RE_RX_DESC_CNT; i++)
893 if (sc->re_ldata.re_rxsoft[i].rxs_dmamap != NULL)
894 bus_dmamap_destroy(sc->sc_dmat,
895 sc->re_ldata.re_rxsoft[i].rxs_dmamap);
896
897 /* Free DMA'able memory for the RX ring. */
898 bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_rx_list_map);
899 bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_rx_list_map);
900 bus_dmamem_unmap(sc->sc_dmat,
901 (void *)sc->re_ldata.re_rx_list, RE_RX_DMAMEM_SZ);
902 bus_dmamem_free(sc->sc_dmat,
903 &sc->re_ldata.re_rx_listseg, sc->re_ldata.re_rx_listnseg);
904
905 /* Destroy DMA maps for TX buffers. */
906 for (i = 0; i < RE_TX_QLEN; i++)
907 if (sc->re_ldata.re_txq[i].txq_dmamap != NULL)
908 bus_dmamap_destroy(sc->sc_dmat,
909 sc->re_ldata.re_txq[i].txq_dmamap);
910
911 /* Free DMA'able memory for the TX ring. */
912 bus_dmamap_unload(sc->sc_dmat, sc->re_ldata.re_tx_list_map);
913 bus_dmamap_destroy(sc->sc_dmat, sc->re_ldata.re_tx_list_map);
914 bus_dmamem_unmap(sc->sc_dmat,
915 (void *)sc->re_ldata.re_tx_list, RE_TX_LIST_SZ(sc));
916 bus_dmamem_free(sc->sc_dmat,
917 &sc->re_ldata.re_tx_listseg, sc->re_ldata.re_tx_listnseg);
918
919 return 0;
920 }
921
922 /*
923 * re_enable:
924 * Enable the RTL81X9 chip.
925 */
926 static int
927 re_enable(struct rtk_softc *sc)
928 {
929
930 if (RTK_IS_ENABLED(sc) == 0 && sc->sc_enable != NULL) {
931 if ((*sc->sc_enable)(sc) != 0) {
932 aprint_error("%s: device enable failed\n",
933 sc->sc_dev.dv_xname);
934 return EIO;
935 }
936 sc->sc_flags |= RTK_ENABLED;
937 }
938 return 0;
939 }
940
941 /*
942 * re_disable:
943 * Disable the RTL81X9 chip.
944 */
945 static void
946 re_disable(struct rtk_softc *sc)
947 {
948
949 if (RTK_IS_ENABLED(sc) && sc->sc_disable != NULL) {
950 (*sc->sc_disable)(sc);
951 sc->sc_flags &= ~RTK_ENABLED;
952 }
953 }
954
955 static int
956 re_newbuf(struct rtk_softc *sc, int idx, struct mbuf *m)
957 {
958 struct mbuf *n = NULL;
959 bus_dmamap_t map;
960 struct re_desc *d;
961 struct re_rxsoft *rxs;
962 uint32_t cmdstat;
963 int error;
964
965 if (m == NULL) {
966 MGETHDR(n, M_DONTWAIT, MT_DATA);
967 if (n == NULL)
968 return ENOBUFS;
969
970 MCLGET(n, M_DONTWAIT);
971 if ((n->m_flags & M_EXT) == 0) {
972 m_freem(n);
973 return ENOBUFS;
974 }
975 m = n;
976 } else
977 m->m_data = m->m_ext.ext_buf;
978
979 /*
980 * Initialize mbuf length fields and fixup
981 * alignment so that the frame payload is
982 * longword aligned.
983 */
984 m->m_len = m->m_pkthdr.len = MCLBYTES - RE_ETHER_ALIGN;
985 m->m_data += RE_ETHER_ALIGN;
986
987 rxs = &sc->re_ldata.re_rxsoft[idx];
988 map = rxs->rxs_dmamap;
989 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
990 BUS_DMA_READ|BUS_DMA_NOWAIT);
991
992 if (error)
993 goto out;
994
995 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
996 BUS_DMASYNC_PREREAD);
997
998 d = &sc->re_ldata.re_rx_list[idx];
999 #ifdef DIAGNOSTIC
1000 RE_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1001 cmdstat = le32toh(d->re_cmdstat);
1002 RE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1003 if (cmdstat & RE_RDESC_STAT_OWN) {
1004 panic("%s: tried to map busy RX descriptor",
1005 sc->sc_dev.dv_xname);
1006 }
1007 #endif
1008
1009 rxs->rxs_mbuf = m;
1010
1011 d->re_vlanctl = 0;
1012 cmdstat = map->dm_segs[0].ds_len;
1013 if (idx == (RE_RX_DESC_CNT - 1))
1014 cmdstat |= RE_RDESC_CMD_EOR;
1015 re_set_bufaddr(d, map->dm_segs[0].ds_addr);
1016 d->re_cmdstat = htole32(cmdstat);
1017 RE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1018 cmdstat |= RE_RDESC_CMD_OWN;
1019 d->re_cmdstat = htole32(cmdstat);
1020 RE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1021
1022 return 0;
1023 out:
1024 if (n != NULL)
1025 m_freem(n);
1026 return ENOMEM;
1027 }
1028
1029 static int
1030 re_tx_list_init(struct rtk_softc *sc)
1031 {
1032 int i;
1033
1034 memset(sc->re_ldata.re_tx_list, 0, RE_TX_LIST_SZ(sc));
1035 for (i = 0; i < RE_TX_QLEN; i++) {
1036 sc->re_ldata.re_txq[i].txq_mbuf = NULL;
1037 }
1038
1039 bus_dmamap_sync(sc->sc_dmat,
1040 sc->re_ldata.re_tx_list_map, 0,
1041 sc->re_ldata.re_tx_list_map->dm_mapsize,
1042 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1043 sc->re_ldata.re_txq_prodidx = 0;
1044 sc->re_ldata.re_txq_considx = 0;
1045 sc->re_ldata.re_txq_free = RE_TX_QLEN;
1046 sc->re_ldata.re_tx_free = RE_TX_DESC_CNT(sc);
1047 sc->re_ldata.re_tx_nextfree = 0;
1048
1049 return 0;
1050 }
1051
1052 static int
1053 re_rx_list_init(struct rtk_softc *sc)
1054 {
1055 int i;
1056
1057 memset((char *)sc->re_ldata.re_rx_list, 0, RE_RX_LIST_SZ);
1058
1059 for (i = 0; i < RE_RX_DESC_CNT; i++) {
1060 if (re_newbuf(sc, i, NULL) == ENOBUFS)
1061 return ENOBUFS;
1062 }
1063
1064 sc->re_ldata.re_rx_prodidx = 0;
1065 sc->re_head = sc->re_tail = NULL;
1066
1067 return 0;
1068 }
1069
1070 /*
1071 * RX handler for C+ and 8169. For the gigE chips, we support
1072 * the reception of jumbo frames that have been fragmented
1073 * across multiple 2K mbuf cluster buffers.
1074 */
1075 static void
1076 re_rxeof(struct rtk_softc *sc)
1077 {
1078 struct mbuf *m;
1079 struct ifnet *ifp;
1080 int i, total_len;
1081 struct re_desc *cur_rx;
1082 struct re_rxsoft *rxs;
1083 uint32_t rxstat, rxvlan;
1084
1085 ifp = &sc->ethercom.ec_if;
1086
1087 for (i = sc->re_ldata.re_rx_prodidx;; i = RE_NEXT_RX_DESC(sc, i)) {
1088 cur_rx = &sc->re_ldata.re_rx_list[i];
1089 RE_RXDESCSYNC(sc, i,
1090 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1091 rxstat = le32toh(cur_rx->re_cmdstat);
1092 RE_RXDESCSYNC(sc, i, BUS_DMASYNC_PREREAD);
1093 if ((rxstat & RE_RDESC_STAT_OWN) != 0) {
1094 break;
1095 }
1096 total_len = rxstat & sc->re_rxlenmask;
1097 rxvlan = le32toh(cur_rx->re_vlanctl);
1098 rxs = &sc->re_ldata.re_rxsoft[i];
1099 m = rxs->rxs_mbuf;
1100
1101 /* Invalidate the RX mbuf and unload its map */
1102
1103 bus_dmamap_sync(sc->sc_dmat,
1104 rxs->rxs_dmamap, 0, rxs->rxs_dmamap->dm_mapsize,
1105 BUS_DMASYNC_POSTREAD);
1106 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1107
1108 if ((rxstat & RE_RDESC_STAT_EOF) == 0) {
1109 m->m_len = MCLBYTES - RE_ETHER_ALIGN;
1110 if (sc->re_head == NULL)
1111 sc->re_head = sc->re_tail = m;
1112 else {
1113 m->m_flags &= ~M_PKTHDR;
1114 sc->re_tail->m_next = m;
1115 sc->re_tail = m;
1116 }
1117 re_newbuf(sc, i, NULL);
1118 continue;
1119 }
1120
1121 /*
1122 * NOTE: for the 8139C+, the frame length field
1123 * is always 12 bits in size, but for the gigE chips,
1124 * it is 13 bits (since the max RX frame length is 16K).
1125 * Unfortunately, all 32 bits in the status word
1126 * were already used, so to make room for the extra
1127 * length bit, RealTek took out the 'frame alignment
1128 * error' bit and shifted the other status bits
1129 * over one slot. The OWN, EOR, FS and LS bits are
1130 * still in the same places. We have already extracted
1131 * the frame length and checked the OWN bit, so rather
1132 * than using an alternate bit mapping, we shift the
1133 * status bits one space to the right so we can evaluate
1134 * them using the 8169 status as though it was in the
1135 * same format as that of the 8139C+.
1136 */
1137 if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0)
1138 rxstat >>= 1;
1139
1140 if (__predict_false((rxstat & RE_RDESC_STAT_RXERRSUM) != 0)) {
1141 #ifdef RE_DEBUG
1142 aprint_error("%s: RX error (rxstat = 0x%08x)",
1143 sc->sc_dev.dv_xname, rxstat);
1144 if (rxstat & RE_RDESC_STAT_FRALIGN)
1145 aprint_error(", frame alignment error");
1146 if (rxstat & RE_RDESC_STAT_BUFOFLOW)
1147 aprint_error(", out of buffer space");
1148 if (rxstat & RE_RDESC_STAT_FIFOOFLOW)
1149 aprint_error(", FIFO overrun");
1150 if (rxstat & RE_RDESC_STAT_GIANT)
1151 aprint_error(", giant packet");
1152 if (rxstat & RE_RDESC_STAT_RUNT)
1153 aprint_error(", runt packet");
1154 if (rxstat & RE_RDESC_STAT_CRCERR)
1155 aprint_error(", CRC error");
1156 aprint_error("\n");
1157 #endif
1158 ifp->if_ierrors++;
1159 /*
1160 * If this is part of a multi-fragment packet,
1161 * discard all the pieces.
1162 */
1163 if (sc->re_head != NULL) {
1164 m_freem(sc->re_head);
1165 sc->re_head = sc->re_tail = NULL;
1166 }
1167 re_newbuf(sc, i, m);
1168 continue;
1169 }
1170
1171 /*
1172 * If allocating a replacement mbuf fails,
1173 * reload the current one.
1174 */
1175
1176 if (__predict_false(re_newbuf(sc, i, NULL) != 0)) {
1177 ifp->if_ierrors++;
1178 if (sc->re_head != NULL) {
1179 m_freem(sc->re_head);
1180 sc->re_head = sc->re_tail = NULL;
1181 }
1182 re_newbuf(sc, i, m);
1183 continue;
1184 }
1185
1186 if (sc->re_head != NULL) {
1187 m->m_len = total_len % (MCLBYTES - RE_ETHER_ALIGN);
1188 /*
1189 * Special case: if there's 4 bytes or less
1190 * in this buffer, the mbuf can be discarded:
1191 * the last 4 bytes is the CRC, which we don't
1192 * care about anyway.
1193 */
1194 if (m->m_len <= ETHER_CRC_LEN) {
1195 sc->re_tail->m_len -=
1196 (ETHER_CRC_LEN - m->m_len);
1197 m_freem(m);
1198 } else {
1199 m->m_len -= ETHER_CRC_LEN;
1200 m->m_flags &= ~M_PKTHDR;
1201 sc->re_tail->m_next = m;
1202 }
1203 m = sc->re_head;
1204 sc->re_head = sc->re_tail = NULL;
1205 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1206 } else
1207 m->m_pkthdr.len = m->m_len =
1208 (total_len - ETHER_CRC_LEN);
1209
1210 ifp->if_ipackets++;
1211 m->m_pkthdr.rcvif = ifp;
1212
1213 /* Do RX checksumming */
1214
1215 /* Check IP header checksum */
1216 if (rxstat & RE_RDESC_STAT_PROTOID) {
1217 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1218 if (rxstat & RE_RDESC_STAT_IPSUMBAD)
1219 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1220 }
1221
1222 /* Check TCP/UDP checksum */
1223 if (RE_TCPPKT(rxstat)) {
1224 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1225 if (rxstat & RE_RDESC_STAT_TCPSUMBAD)
1226 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1227 } else if (RE_UDPPKT(rxstat)) {
1228 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1229 if (rxstat & RE_RDESC_STAT_UDPSUMBAD)
1230 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1231 }
1232
1233 if (rxvlan & RE_RDESC_VLANCTL_TAG) {
1234 VLAN_INPUT_TAG(ifp, m,
1235 bswap16(rxvlan & RE_RDESC_VLANCTL_DATA),
1236 continue);
1237 }
1238 #if NBPFILTER > 0
1239 if (ifp->if_bpf)
1240 bpf_mtap(ifp->if_bpf, m);
1241 #endif
1242 (*ifp->if_input)(ifp, m);
1243 }
1244
1245 sc->re_ldata.re_rx_prodidx = i;
1246 }
1247
1248 static void
1249 re_txeof(struct rtk_softc *sc)
1250 {
1251 struct ifnet *ifp;
1252 struct re_txq *txq;
1253 uint32_t txstat;
1254 int idx, descidx;
1255
1256 ifp = &sc->ethercom.ec_if;
1257
1258 for (idx = sc->re_ldata.re_txq_considx;
1259 sc->re_ldata.re_txq_free < RE_TX_QLEN;
1260 idx = RE_NEXT_TXQ(sc, idx), sc->re_ldata.re_txq_free++) {
1261 txq = &sc->re_ldata.re_txq[idx];
1262 KASSERT(txq->txq_mbuf != NULL);
1263
1264 descidx = txq->txq_descidx;
1265 RE_TXDESCSYNC(sc, descidx,
1266 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1267 txstat =
1268 le32toh(sc->re_ldata.re_tx_list[descidx].re_cmdstat);
1269 RE_TXDESCSYNC(sc, descidx, BUS_DMASYNC_PREREAD);
1270 KASSERT((txstat & RE_TDESC_CMD_EOF) != 0);
1271 if (txstat & RE_TDESC_CMD_OWN) {
1272 break;
1273 }
1274
1275 sc->re_ldata.re_tx_free += txq->txq_nsegs;
1276 KASSERT(sc->re_ldata.re_tx_free <= RE_TX_DESC_CNT(sc));
1277 bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap,
1278 0, txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1279 bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap);
1280 m_freem(txq->txq_mbuf);
1281 txq->txq_mbuf = NULL;
1282
1283 if (txstat & (RE_TDESC_STAT_EXCESSCOL | RE_TDESC_STAT_COLCNT))
1284 ifp->if_collisions++;
1285 if (txstat & RE_TDESC_STAT_TXERRSUM)
1286 ifp->if_oerrors++;
1287 else
1288 ifp->if_opackets++;
1289 }
1290
1291 sc->re_ldata.re_txq_considx = idx;
1292
1293 if (sc->re_ldata.re_txq_free > RE_NTXDESC_RSVD)
1294 ifp->if_flags &= ~IFF_OACTIVE;
1295
1296 /*
1297 * If not all descriptors have been released reaped yet,
1298 * reload the timer so that we will eventually get another
1299 * interrupt that will cause us to re-enter this routine.
1300 * This is done in case the transmitter has gone idle.
1301 */
1302 if (sc->re_ldata.re_txq_free < RE_TX_QLEN) {
1303 CSR_WRITE_4(sc, RTK_TIMERCNT, 1);
1304 if ((sc->sc_quirk & RTKQ_PCIE) != 0) {
1305 /*
1306 * Some chips will ignore a second TX request
1307 * issued while an existing transmission is in
1308 * progress. If the transmitter goes idle but
1309 * there are still packets waiting to be sent,
1310 * we need to restart the channel here to flush
1311 * them out. This only seems to be required with
1312 * the PCIe devices.
1313 */
1314 CSR_WRITE_2(sc, RTK_GTXSTART, RTK_TXSTART_START);
1315 }
1316 } else
1317 ifp->if_timer = 0;
1318 }
1319
1320 static void
1321 re_tick(void *xsc)
1322 {
1323 struct rtk_softc *sc = xsc;
1324 int s;
1325
1326 /*XXX: just return for 8169S/8110S with rev 2 or newer phy */
1327 s = splnet();
1328
1329 mii_tick(&sc->mii);
1330 splx(s);
1331
1332 callout_reset(&sc->rtk_tick_ch, hz, re_tick, sc);
1333 }
1334
1335 #ifdef DEVICE_POLLING
1336 static void
1337 re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1338 {
1339 struct rtk_softc *sc = ifp->if_softc;
1340
1341 RTK_LOCK(sc);
1342 if ((ifp->if_capenable & IFCAP_POLLING) == 0) {
1343 ether_poll_deregister(ifp);
1344 cmd = POLL_DEREGISTER;
1345 }
1346 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
1347 CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS_CPLUS);
1348 goto done;
1349 }
1350
1351 sc->rxcycles = count;
1352 re_rxeof(sc);
1353 re_txeof(sc);
1354
1355 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
1356 (*ifp->if_start)(ifp);
1357
1358 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1359 uint16_t status;
1360
1361 status = CSR_READ_2(sc, RTK_ISR);
1362 if (status == 0xffff)
1363 goto done;
1364 if (status)
1365 CSR_WRITE_2(sc, RTK_ISR, status);
1366
1367 /*
1368 * XXX check behaviour on receiver stalls.
1369 */
1370
1371 if (status & RTK_ISR_SYSTEM_ERR) {
1372 re_init(sc);
1373 }
1374 }
1375 done:
1376 RTK_UNLOCK(sc);
1377 }
1378 #endif /* DEVICE_POLLING */
1379
1380 int
1381 re_intr(void *arg)
1382 {
1383 struct rtk_softc *sc = arg;
1384 struct ifnet *ifp;
1385 uint16_t status;
1386 int handled = 0;
1387
1388 ifp = &sc->ethercom.ec_if;
1389
1390 if ((ifp->if_flags & IFF_UP) == 0)
1391 return 0;
1392
1393 #ifdef DEVICE_POLLING
1394 if (ifp->if_flags & IFF_POLLING)
1395 goto done;
1396 if ((ifp->if_capenable & IFCAP_POLLING) &&
1397 ether_poll_register(re_poll, ifp)) { /* ok, disable interrupts */
1398 CSR_WRITE_2(sc, RTK_IMR, 0x0000);
1399 re_poll(ifp, 0, 1);
1400 goto done;
1401 }
1402 #endif /* DEVICE_POLLING */
1403
1404 for (;;) {
1405
1406 status = CSR_READ_2(sc, RTK_ISR);
1407 /* If the card has gone away the read returns 0xffff. */
1408 if (status == 0xffff)
1409 break;
1410 if (status) {
1411 handled = 1;
1412 CSR_WRITE_2(sc, RTK_ISR, status);
1413 }
1414
1415 if ((status & RTK_INTRS_CPLUS) == 0)
1416 break;
1417
1418 if (status & (RTK_ISR_RX_OK | RTK_ISR_RX_ERR))
1419 re_rxeof(sc);
1420
1421 if (status & (RTK_ISR_TIMEOUT_EXPIRED | RTK_ISR_TX_ERR |
1422 RTK_ISR_TX_DESC_UNAVAIL))
1423 re_txeof(sc);
1424
1425 if (status & RTK_ISR_SYSTEM_ERR) {
1426 re_init(ifp);
1427 }
1428
1429 if (status & RTK_ISR_LINKCHG) {
1430 callout_stop(&sc->rtk_tick_ch);
1431 re_tick(sc);
1432 }
1433 }
1434
1435 if (handled && !IFQ_IS_EMPTY(&ifp->if_snd))
1436 re_start(ifp);
1437
1438 #ifdef DEVICE_POLLING
1439 done:
1440 #endif
1441
1442 return handled;
1443 }
1444
1445
1446
1447 /*
1448 * Main transmit routine for C+ and gigE NICs.
1449 */
1450
1451 static void
1452 re_start(struct ifnet *ifp)
1453 {
1454 struct rtk_softc *sc;
1455 struct mbuf *m;
1456 bus_dmamap_t map;
1457 struct re_txq *txq;
1458 struct re_desc *d;
1459 struct m_tag *mtag;
1460 uint32_t cmdstat, re_flags;
1461 int ofree, idx, error, nsegs, seg;
1462 int startdesc, curdesc, lastdesc;
1463 bool pad;
1464
1465 sc = ifp->if_softc;
1466 ofree = sc->re_ldata.re_txq_free;
1467
1468 for (idx = sc->re_ldata.re_txq_prodidx;; idx = RE_NEXT_TXQ(sc, idx)) {
1469
1470 IFQ_POLL(&ifp->if_snd, m);
1471 if (m == NULL)
1472 break;
1473
1474 if (sc->re_ldata.re_txq_free == 0 ||
1475 sc->re_ldata.re_tx_free <= RE_NTXDESC_RSVD) {
1476 /* no more free slots left */
1477 ifp->if_flags |= IFF_OACTIVE;
1478 break;
1479 }
1480
1481 /*
1482 * Set up checksum offload. Note: checksum offload bits must
1483 * appear in all descriptors of a multi-descriptor transmit
1484 * attempt. (This is according to testing done with an 8169
1485 * chip. I'm not sure if this is a requirement or a bug.)
1486 */
1487
1488 if ((m->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0) {
1489 uint32_t segsz = m->m_pkthdr.segsz;
1490
1491 re_flags = RE_TDESC_CMD_LGSEND |
1492 (segsz << RE_TDESC_CMD_MSSVAL_SHIFT);
1493 } else {
1494 /*
1495 * set RE_TDESC_CMD_IPCSUM if any checksum offloading
1496 * is requested. otherwise, RE_TDESC_CMD_TCPCSUM/
1497 * RE_TDESC_CMD_UDPCSUM doesn't make effects.
1498 */
1499 re_flags = 0;
1500 if ((m->m_pkthdr.csum_flags &
1501 (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4))
1502 != 0) {
1503 re_flags |= RE_TDESC_CMD_IPCSUM;
1504 if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4) {
1505 re_flags |= RE_TDESC_CMD_TCPCSUM;
1506 } else if (m->m_pkthdr.csum_flags &
1507 M_CSUM_UDPv4) {
1508 re_flags |= RE_TDESC_CMD_UDPCSUM;
1509 }
1510 }
1511 }
1512
1513 txq = &sc->re_ldata.re_txq[idx];
1514 map = txq->txq_dmamap;
1515 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1516 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1517
1518 if (__predict_false(error)) {
1519 /* XXX try to defrag if EFBIG? */
1520 aprint_error("%s: can't map mbuf (error %d)\n",
1521 sc->sc_dev.dv_xname, error);
1522
1523 IFQ_DEQUEUE(&ifp->if_snd, m);
1524 m_freem(m);
1525 ifp->if_oerrors++;
1526 continue;
1527 }
1528
1529 nsegs = map->dm_nsegs;
1530 pad = false;
1531 if (__predict_false(m->m_pkthdr.len <= RE_IP4CSUMTX_PADLEN &&
1532 (re_flags & RE_TDESC_CMD_IPCSUM) != 0)) {
1533 pad = true;
1534 nsegs++;
1535 }
1536
1537 if (nsegs > sc->re_ldata.re_tx_free - RE_NTXDESC_RSVD) {
1538 /*
1539 * Not enough free descriptors to transmit this packet.
1540 */
1541 ifp->if_flags |= IFF_OACTIVE;
1542 bus_dmamap_unload(sc->sc_dmat, map);
1543 break;
1544 }
1545
1546 IFQ_DEQUEUE(&ifp->if_snd, m);
1547
1548 /*
1549 * Make sure that the caches are synchronized before we
1550 * ask the chip to start DMA for the packet data.
1551 */
1552 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1553 BUS_DMASYNC_PREWRITE);
1554
1555 /*
1556 * Map the segment array into descriptors.
1557 * Note that we set the start-of-frame and
1558 * end-of-frame markers for either TX or RX,
1559 * but they really only have meaning in the TX case.
1560 * (In the RX case, it's the chip that tells us
1561 * where packets begin and end.)
1562 * We also keep track of the end of the ring
1563 * and set the end-of-ring bits as needed,
1564 * and we set the ownership bits in all except
1565 * the very first descriptor. (The caller will
1566 * set this descriptor later when it start
1567 * transmission or reception.)
1568 */
1569 curdesc = startdesc = sc->re_ldata.re_tx_nextfree;
1570 lastdesc = -1;
1571 for (seg = 0; seg < map->dm_nsegs;
1572 seg++, curdesc = RE_NEXT_TX_DESC(sc, curdesc)) {
1573 d = &sc->re_ldata.re_tx_list[curdesc];
1574 #ifdef DIAGNOSTIC
1575 RE_TXDESCSYNC(sc, curdesc,
1576 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1577 cmdstat = le32toh(d->re_cmdstat);
1578 RE_TXDESCSYNC(sc, curdesc, BUS_DMASYNC_PREREAD);
1579 if (cmdstat & RE_TDESC_STAT_OWN) {
1580 panic("%s: tried to map busy TX descriptor",
1581 sc->sc_dev.dv_xname);
1582 }
1583 #endif
1584
1585 d->re_vlanctl = 0;
1586 re_set_bufaddr(d, map->dm_segs[seg].ds_addr);
1587 cmdstat = re_flags | map->dm_segs[seg].ds_len;
1588 if (seg == 0)
1589 cmdstat |= RE_TDESC_CMD_SOF;
1590 else
1591 cmdstat |= RE_TDESC_CMD_OWN;
1592 if (curdesc == (RE_TX_DESC_CNT(sc) - 1))
1593 cmdstat |= RE_TDESC_CMD_EOR;
1594 if (seg == nsegs - 1) {
1595 cmdstat |= RE_TDESC_CMD_EOF;
1596 lastdesc = curdesc;
1597 }
1598 d->re_cmdstat = htole32(cmdstat);
1599 RE_TXDESCSYNC(sc, curdesc,
1600 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1601 }
1602 if (__predict_false(pad)) {
1603 bus_addr_t paddaddr;
1604
1605 d = &sc->re_ldata.re_tx_list[curdesc];
1606 d->re_vlanctl = 0;
1607 paddaddr = RE_TXPADDADDR(sc);
1608 re_set_bufaddr(d, paddaddr);
1609 cmdstat = re_flags |
1610 RE_TDESC_CMD_OWN | RE_TDESC_CMD_EOF |
1611 (RE_IP4CSUMTX_PADLEN + 1 - m->m_pkthdr.len);
1612 if (curdesc == (RE_TX_DESC_CNT(sc) - 1))
1613 cmdstat |= RE_TDESC_CMD_EOR;
1614 d->re_cmdstat = htole32(cmdstat);
1615 RE_TXDESCSYNC(sc, curdesc,
1616 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1617 lastdesc = curdesc;
1618 curdesc = RE_NEXT_TX_DESC(sc, curdesc);
1619 }
1620 KASSERT(lastdesc != -1);
1621
1622 /*
1623 * Set up hardware VLAN tagging. Note: vlan tag info must
1624 * appear in the first descriptor of a multi-descriptor
1625 * transmission attempt.
1626 */
1627 if ((mtag = VLAN_OUTPUT_TAG(&sc->ethercom, m)) != NULL) {
1628 sc->re_ldata.re_tx_list[startdesc].re_vlanctl =
1629 htole32(bswap16(VLAN_TAG_VALUE(mtag)) |
1630 RE_TDESC_VLANCTL_TAG);
1631 }
1632
1633 /* Transfer ownership of packet to the chip. */
1634
1635 sc->re_ldata.re_tx_list[startdesc].re_cmdstat |=
1636 htole32(RE_TDESC_CMD_OWN);
1637 RE_TXDESCSYNC(sc, startdesc,
1638 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1639
1640 /* update info of TX queue and descriptors */
1641 txq->txq_mbuf = m;
1642 txq->txq_descidx = lastdesc;
1643 txq->txq_nsegs = nsegs;
1644
1645 sc->re_ldata.re_txq_free--;
1646 sc->re_ldata.re_tx_free -= nsegs;
1647 sc->re_ldata.re_tx_nextfree = curdesc;
1648
1649 #if NBPFILTER > 0
1650 /*
1651 * If there's a BPF listener, bounce a copy of this frame
1652 * to him.
1653 */
1654 if (ifp->if_bpf)
1655 bpf_mtap(ifp->if_bpf, m);
1656 #endif
1657 }
1658
1659 if (sc->re_ldata.re_txq_free < ofree) {
1660 /*
1661 * TX packets are enqueued.
1662 */
1663 sc->re_ldata.re_txq_prodidx = idx;
1664
1665 /*
1666 * Start the transmitter to poll.
1667 *
1668 * RealTek put the TX poll request register in a different
1669 * location on the 8169 gigE chip. I don't know why.
1670 */
1671 if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0)
1672 CSR_WRITE_1(sc, RTK_TXSTART, RTK_TXSTART_START);
1673 else
1674 CSR_WRITE_2(sc, RTK_GTXSTART, RTK_TXSTART_START);
1675
1676 /*
1677 * Use the countdown timer for interrupt moderation.
1678 * 'TX done' interrupts are disabled. Instead, we reset the
1679 * countdown timer, which will begin counting until it hits
1680 * the value in the TIMERINT register, and then trigger an
1681 * interrupt. Each time we write to the TIMERCNT register,
1682 * the timer count is reset to 0.
1683 */
1684 CSR_WRITE_4(sc, RTK_TIMERCNT, 1);
1685
1686 /*
1687 * Set a timeout in case the chip goes out to lunch.
1688 */
1689 ifp->if_timer = 5;
1690 }
1691 }
1692
1693 static int
1694 re_init(struct ifnet *ifp)
1695 {
1696 struct rtk_softc *sc = ifp->if_softc;
1697 const uint8_t *enaddr;
1698 uint32_t rxcfg = 0;
1699 uint32_t reg;
1700 int error;
1701
1702 if ((error = re_enable(sc)) != 0)
1703 goto out;
1704
1705 /*
1706 * Cancel pending I/O and free all RX/TX buffers.
1707 */
1708 re_stop(ifp, 0);
1709
1710 re_reset(sc);
1711
1712 /*
1713 * Enable C+ RX and TX mode, as well as VLAN stripping and
1714 * RX checksum offload. We must configure the C+ register
1715 * before all others.
1716 */
1717 reg = 0;
1718
1719 /*
1720 * XXX: Realtek docs say bits 0 and 1 are reserved, for 8169S/8110S.
1721 * FreeBSD drivers set these bits anyway (for 8139C+?).
1722 * So far, it works.
1723 */
1724
1725 /*
1726 * XXX: For old 8169 set bit 14.
1727 * For 8169S/8110S and above, do not set bit 14.
1728 */
1729 if ((sc->sc_quirk & RTKQ_8169NONS) != 0)
1730 reg |= (0x1 << 14) | RTK_CPLUSCMD_PCI_MRW;;
1731
1732 if (1) {/* not for 8169S ? */
1733 reg |=
1734 RTK_CPLUSCMD_VLANSTRIP |
1735 (ifp->if_capenable &
1736 (IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_TCPv4_Rx |
1737 IFCAP_CSUM_UDPv4_Rx) ?
1738 RTK_CPLUSCMD_RXCSUM_ENB : 0);
1739 }
1740
1741 CSR_WRITE_2(sc, RTK_CPLUS_CMD,
1742 reg | RTK_CPLUSCMD_RXENB | RTK_CPLUSCMD_TXENB);
1743
1744 /* XXX: from Realtek-supplied Linux driver. Wholly undocumented. */
1745 if ((sc->sc_quirk & RTKQ_8139CPLUS) == 0)
1746 CSR_WRITE_2(sc, RTK_IM, 0x0000);
1747
1748 DELAY(10000);
1749
1750 /*
1751 * Init our MAC address. Even though the chipset
1752 * documentation doesn't mention it, we need to enter "Config
1753 * register write enable" mode to modify the ID registers.
1754 */
1755 CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_WRITECFG);
1756 enaddr = CLLADDR(ifp->if_sadl);
1757 reg = enaddr[0] | (enaddr[1] << 8) |
1758 (enaddr[2] << 16) | (enaddr[3] << 24);
1759 CSR_WRITE_4(sc, RTK_IDR0, reg);
1760 reg = enaddr[4] | (enaddr[5] << 8);
1761 CSR_WRITE_4(sc, RTK_IDR4, reg);
1762 CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_OFF);
1763
1764 /*
1765 * For C+ mode, initialize the RX descriptors and mbufs.
1766 */
1767 re_rx_list_init(sc);
1768 re_tx_list_init(sc);
1769
1770 /*
1771 * Load the addresses of the RX and TX lists into the chip.
1772 */
1773 CSR_WRITE_4(sc, RTK_RXLIST_ADDR_HI,
1774 RE_ADDR_HI(sc->re_ldata.re_rx_list_map->dm_segs[0].ds_addr));
1775 CSR_WRITE_4(sc, RTK_RXLIST_ADDR_LO,
1776 RE_ADDR_LO(sc->re_ldata.re_rx_list_map->dm_segs[0].ds_addr));
1777
1778 CSR_WRITE_4(sc, RTK_TXLIST_ADDR_HI,
1779 RE_ADDR_HI(sc->re_ldata.re_tx_list_map->dm_segs[0].ds_addr));
1780 CSR_WRITE_4(sc, RTK_TXLIST_ADDR_LO,
1781 RE_ADDR_LO(sc->re_ldata.re_tx_list_map->dm_segs[0].ds_addr));
1782
1783 /*
1784 * Enable transmit and receive.
1785 */
1786 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB);
1787
1788 /*
1789 * Set the initial TX and RX configuration.
1790 */
1791 if (sc->re_testmode && (sc->sc_quirk & RTKQ_8169NONS) != 0) {
1792 /* test mode is needed only for old 8169 */
1793 CSR_WRITE_4(sc, RTK_TXCFG,
1794 RE_TXCFG_CONFIG | RTK_LOOPTEST_ON);
1795 } else
1796 CSR_WRITE_4(sc, RTK_TXCFG, RE_TXCFG_CONFIG);
1797
1798 CSR_WRITE_1(sc, RTK_EARLY_TX_THRESH, 16);
1799
1800 CSR_WRITE_4(sc, RTK_RXCFG, RE_RXCFG_CONFIG);
1801
1802 /* Set the individual bit to receive frames for this host only. */
1803 rxcfg = CSR_READ_4(sc, RTK_RXCFG);
1804 rxcfg |= RTK_RXCFG_RX_INDIV;
1805
1806 /* If we want promiscuous mode, set the allframes bit. */
1807 if (ifp->if_flags & IFF_PROMISC)
1808 rxcfg |= RTK_RXCFG_RX_ALLPHYS;
1809 else
1810 rxcfg &= ~RTK_RXCFG_RX_ALLPHYS;
1811 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1812
1813 /*
1814 * Set capture broadcast bit to capture broadcast frames.
1815 */
1816 if (ifp->if_flags & IFF_BROADCAST)
1817 rxcfg |= RTK_RXCFG_RX_BROAD;
1818 else
1819 rxcfg &= ~RTK_RXCFG_RX_BROAD;
1820 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1821
1822 /*
1823 * Program the multicast filter, if necessary.
1824 */
1825 rtk_setmulti(sc);
1826
1827 #ifdef DEVICE_POLLING
1828 /*
1829 * Disable interrupts if we are polling.
1830 */
1831 if (ifp->if_flags & IFF_POLLING)
1832 CSR_WRITE_2(sc, RTK_IMR, 0);
1833 else /* otherwise ... */
1834 #endif /* DEVICE_POLLING */
1835 /*
1836 * Enable interrupts.
1837 */
1838 if (sc->re_testmode)
1839 CSR_WRITE_2(sc, RTK_IMR, 0);
1840 else
1841 CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS_CPLUS);
1842
1843 /* Start RX/TX process. */
1844 CSR_WRITE_4(sc, RTK_MISSEDPKT, 0);
1845 #ifdef notdef
1846 /* Enable receiver and transmitter. */
1847 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB);
1848 #endif
1849
1850 /*
1851 * Initialize the timer interrupt register so that
1852 * a timer interrupt will be generated once the timer
1853 * reaches a certain number of ticks. The timer is
1854 * reloaded on each transmit. This gives us TX interrupt
1855 * moderation, which dramatically improves TX frame rate.
1856 */
1857
1858 if ((sc->sc_quirk & RTKQ_8139CPLUS) != 0)
1859 CSR_WRITE_4(sc, RTK_TIMERINT, 0x400);
1860 else {
1861 CSR_WRITE_4(sc, RTK_TIMERINT_8169, 0x800);
1862
1863 /*
1864 * For 8169 gigE NICs, set the max allowed RX packet
1865 * size so we can receive jumbo frames.
1866 */
1867 CSR_WRITE_2(sc, RTK_MAXRXPKTLEN, 16383);
1868 }
1869
1870 if (sc->re_testmode)
1871 return 0;
1872
1873 CSR_WRITE_1(sc, RTK_CFG1, RTK_CFG1_DRVLOAD);
1874
1875 ifp->if_flags |= IFF_RUNNING;
1876 ifp->if_flags &= ~IFF_OACTIVE;
1877
1878 callout_reset(&sc->rtk_tick_ch, hz, re_tick, sc);
1879
1880 out:
1881 if (error) {
1882 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1883 ifp->if_timer = 0;
1884 aprint_error("%s: interface not running\n",
1885 sc->sc_dev.dv_xname);
1886 }
1887
1888 return error;
1889 }
1890
1891 /*
1892 * Set media options.
1893 */
1894 static int
1895 re_ifmedia_upd(struct ifnet *ifp)
1896 {
1897 struct rtk_softc *sc;
1898
1899 sc = ifp->if_softc;
1900
1901 return mii_mediachg(&sc->mii);
1902 }
1903
1904 /*
1905 * Report current media status.
1906 */
1907 static void
1908 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1909 {
1910 struct rtk_softc *sc;
1911
1912 sc = ifp->if_softc;
1913
1914 mii_pollstat(&sc->mii);
1915 ifmr->ifm_active = sc->mii.mii_media_active;
1916 ifmr->ifm_status = sc->mii.mii_media_status;
1917 }
1918
1919 static int
1920 re_ioctl(struct ifnet *ifp, u_long command, void *data)
1921 {
1922 struct rtk_softc *sc = ifp->if_softc;
1923 struct ifreq *ifr = (struct ifreq *) data;
1924 int s, error = 0;
1925
1926 s = splnet();
1927
1928 switch (command) {
1929 case SIOCSIFMTU:
1930 if (ifr->ifr_mtu > RE_JUMBO_MTU)
1931 error = EINVAL;
1932 ifp->if_mtu = ifr->ifr_mtu;
1933 break;
1934 case SIOCGIFMEDIA:
1935 case SIOCSIFMEDIA:
1936 error = ifmedia_ioctl(ifp, ifr, &sc->mii.mii_media, command);
1937 break;
1938 default:
1939 error = ether_ioctl(ifp, command, data);
1940 if (error == ENETRESET) {
1941 if (ifp->if_flags & IFF_RUNNING)
1942 rtk_setmulti(sc);
1943 error = 0;
1944 }
1945 break;
1946 }
1947
1948 splx(s);
1949
1950 return error;
1951 }
1952
1953 static void
1954 re_watchdog(struct ifnet *ifp)
1955 {
1956 struct rtk_softc *sc;
1957 int s;
1958
1959 sc = ifp->if_softc;
1960 s = splnet();
1961 aprint_error("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1962 ifp->if_oerrors++;
1963
1964 re_txeof(sc);
1965 re_rxeof(sc);
1966
1967 re_init(ifp);
1968
1969 splx(s);
1970 }
1971
1972 /*
1973 * Stop the adapter and free any mbufs allocated to the
1974 * RX and TX lists.
1975 */
1976 static void
1977 re_stop(struct ifnet *ifp, int disable)
1978 {
1979 int i;
1980 struct rtk_softc *sc = ifp->if_softc;
1981
1982 callout_stop(&sc->rtk_tick_ch);
1983
1984 #ifdef DEVICE_POLLING
1985 ether_poll_deregister(ifp);
1986 #endif /* DEVICE_POLLING */
1987
1988 mii_down(&sc->mii);
1989
1990 CSR_WRITE_1(sc, RTK_COMMAND, 0x00);
1991 CSR_WRITE_2(sc, RTK_IMR, 0x0000);
1992
1993 if (sc->re_head != NULL) {
1994 m_freem(sc->re_head);
1995 sc->re_head = sc->re_tail = NULL;
1996 }
1997
1998 /* Free the TX list buffers. */
1999 for (i = 0; i < RE_TX_QLEN; i++) {
2000 if (sc->re_ldata.re_txq[i].txq_mbuf != NULL) {
2001 bus_dmamap_unload(sc->sc_dmat,
2002 sc->re_ldata.re_txq[i].txq_dmamap);
2003 m_freem(sc->re_ldata.re_txq[i].txq_mbuf);
2004 sc->re_ldata.re_txq[i].txq_mbuf = NULL;
2005 }
2006 }
2007
2008 /* Free the RX list buffers. */
2009 for (i = 0; i < RE_RX_DESC_CNT; i++) {
2010 if (sc->re_ldata.re_rxsoft[i].rxs_mbuf != NULL) {
2011 bus_dmamap_unload(sc->sc_dmat,
2012 sc->re_ldata.re_rxsoft[i].rxs_dmamap);
2013 m_freem(sc->re_ldata.re_rxsoft[i].rxs_mbuf);
2014 sc->re_ldata.re_rxsoft[i].rxs_mbuf = NULL;
2015 }
2016 }
2017
2018 if (disable)
2019 re_disable(sc);
2020
2021 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2022 ifp->if_timer = 0;
2023 }
2024