rtl81x9.c revision 1.112 1 /* $NetBSD: rtl81x9.c,v 1.112 2022/06/25 02:46:15 tsutsui Exp $ */
2
3 /*
4 * Copyright (c) 1997, 1998
5 * Bill Paul <wpaul (at) ctr.columbia.edu>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * FreeBSD Id: if_rl.c,v 1.17 1999/06/19 20:17:37 wpaul Exp
35 */
36
37 /*
38 * RealTek 8129/8139 PCI NIC driver
39 *
40 * Supports several extremely cheap PCI 10/100 adapters based on
41 * the RealTek chipset. Datasheets can be obtained from
42 * www.realtek.com.tw.
43 *
44 * Written by Bill Paul <wpaul (at) ctr.columbia.edu>
45 * Electrical Engineering Department
46 * Columbia University, New York City
47 */
48
49 /*
50 * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is
51 * probably the worst PCI ethernet controller ever made, with the possible
52 * exception of the FEAST chip made by SMC. The 8139 supports bus-master
53 * DMA, but it has a terrible interface that nullifies any performance
54 * gains that bus-master DMA usually offers.
55 *
56 * For transmission, the chip offers a series of four TX descriptor
57 * registers. Each transmit frame must be in a contiguous buffer, aligned
58 * on a longword (32-bit) boundary. This means we almost always have to
59 * do mbuf copies in order to transmit a frame, except in the unlikely
60 * case where a) the packet fits into a single mbuf, and b) the packet
61 * is 32-bit aligned within the mbuf's data area. The presence of only
62 * four descriptor registers means that we can never have more than four
63 * packets queued for transmission at any one time.
64 *
65 * Reception is not much better. The driver has to allocate a single large
66 * buffer area (up to 64K in size) into which the chip will DMA received
67 * frames. Because we don't know where within this region received packets
68 * will begin or end, we have no choice but to copy data from the buffer
69 * area into mbufs in order to pass the packets up to the higher protocol
70 * levels.
71 *
72 * It's impossible given this rotten design to really achieve decent
73 * performance at 100Mbps, unless you happen to have a 400MHz PII or
74 * some equally overmuscled CPU to drive it.
75 *
76 * On the bright side, the 8139 does have a built-in PHY, although
77 * rather than using an MDIO serial interface like most other NICs, the
78 * PHY registers are directly accessible through the 8139's register
79 * space. The 8139 supports autonegotiation, as well as a 64-bit multicast
80 * filter.
81 *
82 * The 8129 chip is an older version of the 8139 that uses an external PHY
83 * chip. The 8129 has a serial MDIO interface for accessing the MII where
84 * the 8139 lets you directly access the on-board PHY registers. We need
85 * to select which interface to use depending on the chip type.
86 */
87
88 #include <sys/cdefs.h>
89 __KERNEL_RCSID(0, "$NetBSD: rtl81x9.c,v 1.112 2022/06/25 02:46:15 tsutsui Exp $");
90
91
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/callout.h>
95 #include <sys/device.h>
96 #include <sys/sockio.h>
97 #include <sys/mbuf.h>
98 #include <sys/malloc.h>
99 #include <sys/kernel.h>
100 #include <sys/socket.h>
101
102 #include <net/if.h>
103 #include <net/if_arp.h>
104 #include <net/if_ether.h>
105 #include <net/if_dl.h>
106 #include <net/if_media.h>
107
108 #include <net/bpf.h>
109 #include <sys/rndsource.h>
110
111 #include <sys/bus.h>
112 #include <machine/endian.h>
113
114 #include <dev/mii/mii.h>
115 #include <dev/mii/miivar.h>
116
117 #include <dev/ic/rtl81x9reg.h>
118 #include <dev/ic/rtl81x9var.h>
119
120 static void rtk_reset(struct rtk_softc *);
121 static void rtk_rxeof(struct rtk_softc *);
122 static void rtk_txeof(struct rtk_softc *);
123 static void rtk_start(struct ifnet *);
124 static int rtk_ioctl(struct ifnet *, u_long, void *);
125 static int rtk_init(struct ifnet *);
126 static void rtk_stop(struct ifnet *, int);
127
128 static void rtk_watchdog(struct ifnet *);
129
130 static void rtk_eeprom_putbyte(struct rtk_softc *, int, int);
131 static void rtk_mii_sync(struct rtk_softc *);
132 static void rtk_mii_send(struct rtk_softc *, uint32_t, int);
133 static int rtk_mii_readreg(struct rtk_softc *, struct rtk_mii_frame *);
134 static int rtk_mii_writereg(struct rtk_softc *, struct rtk_mii_frame *);
135
136 static int rtk_phy_readreg(device_t, int, int, uint16_t *);
137 static int rtk_phy_writereg(device_t, int, int, uint16_t);
138 static void rtk_phy_statchg(struct ifnet *);
139 static void rtk_tick(void *);
140
141 static int rtk_enable(struct rtk_softc *);
142 static void rtk_disable(struct rtk_softc *);
143
144 static void rtk_list_tx_init(struct rtk_softc *);
145
146 #define EE_SET(x) \
147 CSR_WRITE_1(sc, RTK_EECMD, \
148 CSR_READ_1(sc, RTK_EECMD) | (x))
149
150 #define EE_CLR(x) \
151 CSR_WRITE_1(sc, RTK_EECMD, \
152 CSR_READ_1(sc, RTK_EECMD) & ~(x))
153
154 #define EE_DELAY() DELAY(100)
155
156 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
157
158 /*
159 * Send a read command and address to the EEPROM, check for ACK.
160 */
161 static void
162 rtk_eeprom_putbyte(struct rtk_softc *sc, int addr, int addr_len)
163 {
164 int d, i;
165
166 d = (RTK_EECMD_READ << addr_len) | addr;
167
168 /*
169 * Feed in each bit and stobe the clock.
170 */
171 for (i = RTK_EECMD_LEN + addr_len; i > 0; i--) {
172 if (d & (1 << (i - 1))) {
173 EE_SET(RTK_EE_DATAIN);
174 } else {
175 EE_CLR(RTK_EE_DATAIN);
176 }
177 EE_DELAY();
178 EE_SET(RTK_EE_CLK);
179 EE_DELAY();
180 EE_CLR(RTK_EE_CLK);
181 EE_DELAY();
182 }
183 }
184
185 /*
186 * Read a word of data stored in the EEPROM at address 'addr.'
187 */
188 uint16_t
189 rtk_read_eeprom(struct rtk_softc *sc, int addr, int addr_len)
190 {
191 uint16_t word;
192 int i;
193
194 /* Enter EEPROM access mode. */
195 CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_PROGRAM);
196 EE_DELAY();
197 EE_SET(RTK_EE_SEL);
198
199 /*
200 * Send address of word we want to read.
201 */
202 rtk_eeprom_putbyte(sc, addr, addr_len);
203
204 /*
205 * Start reading bits from EEPROM.
206 */
207 word = 0;
208 for (i = 16; i > 0; i--) {
209 EE_SET(RTK_EE_CLK);
210 EE_DELAY();
211 if (CSR_READ_1(sc, RTK_EECMD) & RTK_EE_DATAOUT)
212 word |= 1 << (i - 1);
213 EE_CLR(RTK_EE_CLK);
214 EE_DELAY();
215 }
216
217 /* Turn off EEPROM access mode. */
218 CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_OFF);
219
220 return word;
221 }
222
223 /*
224 * MII access routines are provided for the 8129, which
225 * doesn't have a built-in PHY. For the 8139, we fake things
226 * up by diverting rtk_phy_readreg()/rtk_phy_writereg() to the
227 * direct access PHY registers.
228 */
229 #define MII_SET(x) \
230 CSR_WRITE_1(sc, RTK_MII, \
231 CSR_READ_1(sc, RTK_MII) | (x))
232
233 #define MII_CLR(x) \
234 CSR_WRITE_1(sc, RTK_MII, \
235 CSR_READ_1(sc, RTK_MII) & ~(x))
236
237 /*
238 * Sync the PHYs by setting data bit and strobing the clock 32 times.
239 */
240 static void
241 rtk_mii_sync(struct rtk_softc *sc)
242 {
243 int i;
244
245 MII_SET(RTK_MII_DIR | RTK_MII_DATAOUT);
246
247 for (i = 0; i < 32; i++) {
248 MII_SET(RTK_MII_CLK);
249 DELAY(1);
250 MII_CLR(RTK_MII_CLK);
251 DELAY(1);
252 }
253 }
254
255 /*
256 * Clock a series of bits through the MII.
257 */
258 static void
259 rtk_mii_send(struct rtk_softc *sc, uint32_t bits, int cnt)
260 {
261 int i;
262
263 MII_CLR(RTK_MII_CLK);
264
265 for (i = cnt; i > 0; i--) {
266 if (bits & (1 << (i - 1))) {
267 MII_SET(RTK_MII_DATAOUT);
268 } else {
269 MII_CLR(RTK_MII_DATAOUT);
270 }
271 DELAY(1);
272 MII_CLR(RTK_MII_CLK);
273 DELAY(1);
274 MII_SET(RTK_MII_CLK);
275 }
276 }
277
278 /*
279 * Read an PHY register through the MII.
280 */
281 static int
282 rtk_mii_readreg(struct rtk_softc *sc, struct rtk_mii_frame *frame)
283 {
284 int i, ack, s, rv = 0;
285
286 s = splnet();
287
288 /*
289 * Set up frame for RX.
290 */
291 frame->mii_stdelim = RTK_MII_STARTDELIM;
292 frame->mii_opcode = RTK_MII_READOP;
293 frame->mii_turnaround = 0;
294 frame->mii_data = 0;
295
296 CSR_WRITE_2(sc, RTK_MII, 0);
297
298 /*
299 * Turn on data xmit.
300 */
301 MII_SET(RTK_MII_DIR);
302
303 rtk_mii_sync(sc);
304
305 /*
306 * Send command/address info.
307 */
308 rtk_mii_send(sc, frame->mii_stdelim, 2);
309 rtk_mii_send(sc, frame->mii_opcode, 2);
310 rtk_mii_send(sc, frame->mii_phyaddr, 5);
311 rtk_mii_send(sc, frame->mii_regaddr, 5);
312
313 /* Idle bit */
314 MII_CLR((RTK_MII_CLK | RTK_MII_DATAOUT));
315 DELAY(1);
316 MII_SET(RTK_MII_CLK);
317 DELAY(1);
318
319 /* Turn off xmit. */
320 MII_CLR(RTK_MII_DIR);
321
322 /* Check for ack */
323 MII_CLR(RTK_MII_CLK);
324 DELAY(1);
325 ack = CSR_READ_2(sc, RTK_MII) & RTK_MII_DATAIN;
326 MII_SET(RTK_MII_CLK);
327 DELAY(1);
328
329 /*
330 * Now try reading data bits. If the ack failed, we still
331 * need to clock through 16 cycles to keep the PHY(s) in sync.
332 */
333 if (ack) {
334 for (i = 0; i < 16; i++) {
335 MII_CLR(RTK_MII_CLK);
336 DELAY(1);
337 MII_SET(RTK_MII_CLK);
338 DELAY(1);
339 }
340 rv = -1;
341 goto fail;
342 }
343
344 for (i = 16; i > 0; i--) {
345 MII_CLR(RTK_MII_CLK);
346 DELAY(1);
347 if (!ack) {
348 if (CSR_READ_2(sc, RTK_MII) & RTK_MII_DATAIN)
349 frame->mii_data |= 1 << (i - 1);
350 DELAY(1);
351 }
352 MII_SET(RTK_MII_CLK);
353 DELAY(1);
354 }
355
356 fail:
357 MII_CLR(RTK_MII_CLK);
358 DELAY(1);
359 MII_SET(RTK_MII_CLK);
360 DELAY(1);
361
362 splx(s);
363
364 return rv;
365 }
366
367 /*
368 * Write to a PHY register through the MII.
369 */
370 static int
371 rtk_mii_writereg(struct rtk_softc *sc, struct rtk_mii_frame *frame)
372 {
373 int s;
374
375 s = splnet();
376 /*
377 * Set up frame for TX.
378 */
379 frame->mii_stdelim = RTK_MII_STARTDELIM;
380 frame->mii_opcode = RTK_MII_WRITEOP;
381 frame->mii_turnaround = RTK_MII_TURNAROUND;
382
383 /*
384 * Turn on data output.
385 */
386 MII_SET(RTK_MII_DIR);
387
388 rtk_mii_sync(sc);
389
390 rtk_mii_send(sc, frame->mii_stdelim, 2);
391 rtk_mii_send(sc, frame->mii_opcode, 2);
392 rtk_mii_send(sc, frame->mii_phyaddr, 5);
393 rtk_mii_send(sc, frame->mii_regaddr, 5);
394 rtk_mii_send(sc, frame->mii_turnaround, 2);
395 rtk_mii_send(sc, frame->mii_data, 16);
396
397 /* Idle bit. */
398 MII_SET(RTK_MII_CLK);
399 DELAY(1);
400 MII_CLR(RTK_MII_CLK);
401 DELAY(1);
402
403 /*
404 * Turn off xmit.
405 */
406 MII_CLR(RTK_MII_DIR);
407
408 splx(s);
409
410 return 0;
411 }
412
413 static int
414 rtk_phy_readreg(device_t self, int phy, int reg, uint16_t *val)
415 {
416 struct rtk_softc *sc = device_private(self);
417 struct rtk_mii_frame frame;
418 int rv;
419 int rtk8139_reg;
420
421 if ((sc->sc_quirk & RTKQ_8129) == 0) {
422 if (phy != 7)
423 return -1;
424
425 switch (reg) {
426 case MII_BMCR:
427 rtk8139_reg = RTK_BMCR;
428 break;
429 case MII_BMSR:
430 rtk8139_reg = RTK_BMSR;
431 break;
432 case MII_ANAR:
433 rtk8139_reg = RTK_ANAR;
434 break;
435 case MII_ANER:
436 rtk8139_reg = RTK_ANER;
437 break;
438 case MII_ANLPAR:
439 rtk8139_reg = RTK_LPAR;
440 break;
441 case MII_PHYIDR1:
442 case MII_PHYIDR2:
443 *val = 0;
444 return 0;
445 default:
446 #if 0
447 printf("%s: bad phy register\n", device_xname(self));
448 #endif
449 return -1;
450 }
451 *val = CSR_READ_2(sc, rtk8139_reg);
452 return 0;
453 }
454
455 memset(&frame, 0, sizeof(frame));
456
457 frame.mii_phyaddr = phy;
458 frame.mii_regaddr = reg;
459 rv = rtk_mii_readreg(sc, &frame);
460 *val = frame.mii_data;
461
462 return rv;
463 }
464
465 static int
466 rtk_phy_writereg(device_t self, int phy, int reg, uint16_t val)
467 {
468 struct rtk_softc *sc = device_private(self);
469 struct rtk_mii_frame frame;
470 int rtk8139_reg;
471
472 if ((sc->sc_quirk & RTKQ_8129) == 0) {
473 if (phy != 7)
474 return -1;
475
476 switch (reg) {
477 case MII_BMCR:
478 rtk8139_reg = RTK_BMCR;
479 break;
480 case MII_BMSR:
481 rtk8139_reg = RTK_BMSR;
482 break;
483 case MII_ANAR:
484 rtk8139_reg = RTK_ANAR;
485 break;
486 case MII_ANER:
487 rtk8139_reg = RTK_ANER;
488 break;
489 case MII_ANLPAR:
490 rtk8139_reg = RTK_LPAR;
491 break;
492 default:
493 #if 0
494 printf("%s: bad phy register\n", device_xname(self));
495 #endif
496 return -1;
497 }
498 CSR_WRITE_2(sc, rtk8139_reg, val);
499 return 0;
500 }
501
502 memset(&frame, 0, sizeof(frame));
503
504 frame.mii_phyaddr = phy;
505 frame.mii_regaddr = reg;
506 frame.mii_data = val;
507
508 return rtk_mii_writereg(sc, &frame);
509 }
510
511 static void
512 rtk_phy_statchg(struct ifnet *ifp)
513 {
514
515 /* Nothing to do. */
516 }
517
518 #define rtk_calchash(addr) \
519 (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
520
521 /*
522 * Program the 64-bit multicast hash filter.
523 */
524 void
525 rtk_setmulti(struct rtk_softc *sc)
526 {
527 struct ethercom *ec = &sc->ethercom;
528 struct ifnet *ifp = &ec->ec_if;
529 uint32_t hashes[2] = { 0, 0 };
530 uint32_t rxfilt;
531 struct ether_multi *enm;
532 struct ether_multistep step;
533 int h, mcnt;
534
535 rxfilt = CSR_READ_4(sc, RTK_RXCFG);
536
537 if (ifp->if_flags & IFF_PROMISC) {
538 allmulti:
539 ifp->if_flags |= IFF_ALLMULTI;
540 rxfilt |= RTK_RXCFG_RX_MULTI;
541 CSR_WRITE_4(sc, RTK_RXCFG, rxfilt);
542 CSR_WRITE_4(sc, RTK_MAR0, 0xFFFFFFFF);
543 CSR_WRITE_4(sc, RTK_MAR4, 0xFFFFFFFF);
544 return;
545 }
546
547 /* first, zot all the existing hash bits */
548 CSR_WRITE_4(sc, RTK_MAR0, 0);
549 CSR_WRITE_4(sc, RTK_MAR4, 0);
550
551 /* now program new ones */
552 ETHER_LOCK(ec);
553 ETHER_FIRST_MULTI(step, ec, enm);
554 mcnt = 0;
555 while (enm != NULL) {
556 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
557 ETHER_ADDR_LEN) != 0) {
558 ETHER_UNLOCK(ec);
559 goto allmulti;
560 }
561
562 h = rtk_calchash(enm->enm_addrlo);
563 if (h < 32)
564 hashes[0] |= __BIT(h);
565 else
566 hashes[1] |= __BIT(h - 32);
567 mcnt++;
568 ETHER_NEXT_MULTI(step, enm);
569 }
570 ETHER_UNLOCK(ec);
571
572 ifp->if_flags &= ~IFF_ALLMULTI;
573
574 if (mcnt)
575 rxfilt |= RTK_RXCFG_RX_MULTI;
576 else
577 rxfilt &= ~RTK_RXCFG_RX_MULTI;
578
579 CSR_WRITE_4(sc, RTK_RXCFG, rxfilt);
580
581 /*
582 * For some unfathomable reason, RealTek decided to reverse
583 * the order of the multicast hash registers in the PCI Express
584 * parts. This means we have to write the hash pattern in reverse
585 * order for those devices.
586 */
587 if ((sc->sc_quirk & RTKQ_PCIE) != 0) {
588 CSR_WRITE_4(sc, RTK_MAR0, bswap32(hashes[1]));
589 CSR_WRITE_4(sc, RTK_MAR4, bswap32(hashes[0]));
590 } else {
591 CSR_WRITE_4(sc, RTK_MAR0, hashes[0]);
592 CSR_WRITE_4(sc, RTK_MAR4, hashes[1]);
593 }
594 }
595
596 void
597 rtk_reset(struct rtk_softc *sc)
598 {
599 int i;
600
601 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_RESET);
602
603 for (i = 0; i < RTK_TIMEOUT; i++) {
604 DELAY(10);
605 if ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_RESET) == 0)
606 break;
607 }
608 if (i == RTK_TIMEOUT)
609 printf("%s: reset never completed!\n",
610 device_xname(sc->sc_dev));
611 }
612
613 /*
614 * Attach the interface. Allocate softc structures, do ifmedia
615 * setup and ethernet/BPF attach.
616 */
617 void
618 rtk_attach(struct rtk_softc *sc)
619 {
620 device_t self = sc->sc_dev;
621 struct ifnet *ifp;
622 struct mii_data * const mii = &sc->mii;
623 struct rtk_tx_desc *txd;
624 uint16_t val;
625 uint8_t eaddr[ETHER_ADDR_LEN];
626 int error;
627 int i, addr_len;
628
629 callout_init(&sc->rtk_tick_ch, 0);
630 callout_setfunc(&sc->rtk_tick_ch, rtk_tick, sc);
631
632 /*
633 * Check EEPROM type 9346 or 9356.
634 */
635 if (rtk_read_eeprom(sc, RTK_EE_ID, RTK_EEADDR_LEN1) == 0x8129)
636 addr_len = RTK_EEADDR_LEN1;
637 else
638 addr_len = RTK_EEADDR_LEN0;
639
640 /*
641 * Get station address.
642 */
643 val = rtk_read_eeprom(sc, RTK_EE_EADDR0, addr_len);
644 eaddr[0] = val & 0xff;
645 eaddr[1] = val >> 8;
646 val = rtk_read_eeprom(sc, RTK_EE_EADDR1, addr_len);
647 eaddr[2] = val & 0xff;
648 eaddr[3] = val >> 8;
649 val = rtk_read_eeprom(sc, RTK_EE_EADDR2, addr_len);
650 eaddr[4] = val & 0xff;
651 eaddr[5] = val >> 8;
652
653 if ((error = bus_dmamem_alloc(sc->sc_dmat,
654 RTK_RXBUFLEN + 16, PAGE_SIZE, 0, &sc->sc_dmaseg, 1, &sc->sc_dmanseg,
655 BUS_DMA_NOWAIT)) != 0) {
656 aprint_error_dev(self,
657 "can't allocate recv buffer, error = %d\n", error);
658 goto fail_0;
659 }
660
661 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg,
662 RTK_RXBUFLEN + 16, (void **)&sc->rtk_rx_buf,
663 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
664 aprint_error_dev(self,
665 "can't map recv buffer, error = %d\n", error);
666 goto fail_1;
667 }
668
669 if ((error = bus_dmamap_create(sc->sc_dmat,
670 RTK_RXBUFLEN + 16, 1, RTK_RXBUFLEN + 16, 0, BUS_DMA_NOWAIT,
671 &sc->recv_dmamap)) != 0) {
672 aprint_error_dev(self,
673 "can't create recv buffer DMA map, error = %d\n", error);
674 goto fail_2;
675 }
676
677 if ((error = bus_dmamap_load(sc->sc_dmat, sc->recv_dmamap,
678 sc->rtk_rx_buf, RTK_RXBUFLEN + 16,
679 NULL, BUS_DMA_READ | BUS_DMA_NOWAIT)) != 0) {
680 aprint_error_dev(self,
681 "can't load recv buffer DMA map, error = %d\n", error);
682 goto fail_3;
683 }
684
685 for (i = 0; i < RTK_TX_LIST_CNT; i++) {
686 txd = &sc->rtk_tx_descs[i];
687 if ((error = bus_dmamap_create(sc->sc_dmat,
688 MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT,
689 &txd->txd_dmamap)) != 0) {
690 aprint_error_dev(self,
691 "can't create snd buffer DMA map, error = %d\n",
692 error);
693 goto fail_4;
694 }
695 txd->txd_txaddr = RTK_TXADDR0 + (i * 4);
696 txd->txd_txstat = RTK_TXSTAT0 + (i * 4);
697 }
698 SIMPLEQ_INIT(&sc->rtk_tx_free);
699 SIMPLEQ_INIT(&sc->rtk_tx_dirty);
700
701 /*
702 * From this point forward, the attachment cannot fail. A failure
703 * before this releases all resources thar may have been
704 * allocated.
705 */
706 sc->sc_flags |= RTK_ATTACHED;
707
708 /* Reset the adapter. */
709 rtk_reset(sc);
710
711 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(eaddr));
712
713 ifp = &sc->ethercom.ec_if;
714 ifp->if_softc = sc;
715 strcpy(ifp->if_xname, device_xname(self));
716 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
717 ifp->if_ioctl = rtk_ioctl;
718 ifp->if_start = rtk_start;
719 ifp->if_watchdog = rtk_watchdog;
720 ifp->if_init = rtk_init;
721 ifp->if_stop = rtk_stop;
722 IFQ_SET_READY(&ifp->if_snd);
723
724 /*
725 * Do ifmedia setup.
726 */
727 mii->mii_ifp = ifp;
728 mii->mii_readreg = rtk_phy_readreg;
729 mii->mii_writereg = rtk_phy_writereg;
730 mii->mii_statchg = rtk_phy_statchg;
731 sc->ethercom.ec_mii = mii;
732 ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange,
733 ether_mediastatus);
734 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
735
736 /* Choose a default media. */
737 if (LIST_FIRST(&mii->mii_phys) == NULL) {
738 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
739 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
740 } else
741 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
742
743 /*
744 * Call MI attach routines.
745 */
746 if_attach(ifp);
747 if_deferred_start_init(ifp, NULL);
748 ether_ifattach(ifp, eaddr);
749
750 rnd_attach_source(&sc->rnd_source, device_xname(self),
751 RND_TYPE_NET, RND_FLAG_DEFAULT);
752
753 return;
754 fail_4:
755 for (i = 0; i < RTK_TX_LIST_CNT; i++) {
756 txd = &sc->rtk_tx_descs[i];
757 if (txd->txd_dmamap != NULL)
758 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap);
759 }
760 fail_3:
761 bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap);
762 fail_2:
763 bus_dmamem_unmap(sc->sc_dmat, sc->rtk_rx_buf,
764 RTK_RXBUFLEN + 16);
765 fail_1:
766 bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg);
767 fail_0:
768 return;
769 }
770
771 /*
772 * Initialize the transmit descriptors.
773 */
774 static void
775 rtk_list_tx_init(struct rtk_softc *sc)
776 {
777 struct rtk_tx_desc *txd;
778 int i;
779
780 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL)
781 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
782 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_free)) != NULL)
783 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_free, txd_q);
784
785 for (i = 0; i < RTK_TX_LIST_CNT; i++) {
786 txd = &sc->rtk_tx_descs[i];
787 CSR_WRITE_4(sc, txd->txd_txaddr, 0);
788 SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_free, txd, txd_q);
789 }
790 }
791
792 /*
793 * rtk_activate:
794 * Handle device activation/deactivation requests.
795 */
796 int
797 rtk_activate(device_t self, enum devact act)
798 {
799 struct rtk_softc *sc = device_private(self);
800
801 switch (act) {
802 case DVACT_DEACTIVATE:
803 if_deactivate(&sc->ethercom.ec_if);
804 return 0;
805 default:
806 return EOPNOTSUPP;
807 }
808 }
809
810 /*
811 * rtk_detach:
812 * Detach a rtk interface.
813 */
814 int
815 rtk_detach(struct rtk_softc *sc)
816 {
817 struct ifnet *ifp = &sc->ethercom.ec_if;
818 struct rtk_tx_desc *txd;
819 int i;
820
821 /*
822 * Succeed now if there isn't any work to do.
823 */
824 if ((sc->sc_flags & RTK_ATTACHED) == 0)
825 return 0;
826
827 /* Unhook our tick handler. */
828 callout_stop(&sc->rtk_tick_ch);
829
830 /* Detach all PHYs. */
831 mii_detach(&sc->mii, MII_PHY_ANY, MII_OFFSET_ANY);
832
833 rnd_detach_source(&sc->rnd_source);
834
835 ether_ifdetach(ifp);
836 if_detach(ifp);
837
838 /* Delete all remaining media. */
839 ifmedia_fini(&sc->mii.mii_media);
840
841 for (i = 0; i < RTK_TX_LIST_CNT; i++) {
842 txd = &sc->rtk_tx_descs[i];
843 if (txd->txd_dmamap != NULL)
844 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap);
845 }
846 bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap);
847 bus_dmamem_unmap(sc->sc_dmat, sc->rtk_rx_buf,
848 RTK_RXBUFLEN + 16);
849 bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg);
850
851 /* we don't want to run again */
852 sc->sc_flags &= ~RTK_ATTACHED;
853
854 return 0;
855 }
856
857 /*
858 * rtk_enable:
859 * Enable the RTL81X9 chip.
860 */
861 int
862 rtk_enable(struct rtk_softc *sc)
863 {
864
865 if (RTK_IS_ENABLED(sc) == 0 && sc->sc_enable != NULL) {
866 if ((*sc->sc_enable)(sc) != 0) {
867 printf("%s: device enable failed\n",
868 device_xname(sc->sc_dev));
869 return EIO;
870 }
871 sc->sc_flags |= RTK_ENABLED;
872 }
873 return 0;
874 }
875
876 /*
877 * rtk_disable:
878 * Disable the RTL81X9 chip.
879 */
880 void
881 rtk_disable(struct rtk_softc *sc)
882 {
883
884 if (RTK_IS_ENABLED(sc) && sc->sc_disable != NULL) {
885 (*sc->sc_disable)(sc);
886 sc->sc_flags &= ~RTK_ENABLED;
887 }
888 }
889
890 /*
891 * A frame has been uploaded: pass the resulting mbuf chain up to
892 * the higher level protocols.
893 *
894 * You know there's something wrong with a PCI bus-master chip design.
895 *
896 * The receive operation is badly documented in the datasheet, so I'll
897 * attempt to document it here. The driver provides a buffer area and
898 * places its base address in the RX buffer start address register.
899 * The chip then begins copying frames into the RX buffer. Each frame
900 * is preceded by a 32-bit RX status word which specifies the length
901 * of the frame and certain other status bits. Each frame (starting with
902 * the status word) is also 32-bit aligned. The frame length is in the
903 * first 16 bits of the status word; the lower 15 bits correspond with
904 * the 'rx status register' mentioned in the datasheet.
905 *
906 * Note: to make the Alpha happy, the frame payload needs to be aligned
907 * on a 32-bit boundary. To achieve this, we copy the data to mbuf
908 * shifted forward 2 bytes.
909 */
910 static void
911 rtk_rxeof(struct rtk_softc *sc)
912 {
913 struct mbuf *m;
914 struct ifnet *ifp;
915 uint8_t *rxbufpos, *dst;
916 u_int total_len, wrap;
917 uint32_t rxstat;
918 uint16_t cur_rx, new_rx;
919 uint16_t limit;
920 uint16_t rx_bytes, max_bytes;
921
922 ifp = &sc->ethercom.ec_if;
923
924 cur_rx = (CSR_READ_2(sc, RTK_CURRXADDR) + 16) % RTK_RXBUFLEN;
925
926 /* Do not try to read past this point. */
927 limit = CSR_READ_2(sc, RTK_CURRXBUF) % RTK_RXBUFLEN;
928
929 if (limit < cur_rx)
930 max_bytes = (RTK_RXBUFLEN - cur_rx) + limit;
931 else
932 max_bytes = limit - cur_rx;
933 rx_bytes = 0;
934
935 while ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_EMPTY_RXBUF) == 0) {
936 rxbufpos = sc->rtk_rx_buf + cur_rx;
937 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, cur_rx,
938 RTK_RXSTAT_LEN, BUS_DMASYNC_POSTREAD);
939 rxstat = le32toh(*(uint32_t *)rxbufpos);
940 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, cur_rx,
941 RTK_RXSTAT_LEN, BUS_DMASYNC_PREREAD);
942
943 /*
944 * Here's a totally undocumented fact for you. When the
945 * RealTek chip is in the process of copying a packet into
946 * RAM for you, the length will be 0xfff0. If you spot a
947 * packet header with this value, you need to stop. The
948 * datasheet makes absolutely no mention of this and
949 * RealTek should be shot for this.
950 */
951 total_len = rxstat >> 16;
952 if (total_len == RTK_RXSTAT_UNFINISHED)
953 break;
954
955 if ((rxstat & RTK_RXSTAT_RXOK) == 0 ||
956 total_len < ETHER_MIN_LEN ||
957 total_len > (MCLBYTES - RTK_ETHER_ALIGN)) {
958 if_statinc(ifp, if_ierrors);
959
960 /*
961 * submitted by:[netbsd-pcmcia:00484]
962 * Takahiro Kambe <taca (at) sky.yamashina.kyoto.jp>
963 * obtain from:
964 * FreeBSD if_rl.c rev 1.24->1.25
965 *
966 */
967 #if 0
968 if (rxstat & (RTK_RXSTAT_BADSYM | RTK_RXSTAT_RUNT |
969 RTK_RXSTAT_GIANT | RTK_RXSTAT_CRCERR |
970 RTK_RXSTAT_ALIGNERR)) {
971 CSR_WRITE_2(sc, RTK_COMMAND, RTK_CMD_TX_ENB);
972 CSR_WRITE_2(sc, RTK_COMMAND,
973 RTK_CMD_TX_ENB | RTK_CMD_RX_ENB);
974 CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG);
975 CSR_WRITE_4(sc, RTK_RXADDR,
976 sc->recv_dmamap->dm_segs[0].ds_addr);
977 cur_rx = 0;
978 }
979 break;
980 #else
981 rtk_init(ifp);
982 return;
983 #endif
984 }
985
986 /* No errors; receive the packet. */
987 rx_bytes += total_len + RTK_RXSTAT_LEN;
988
989 /*
990 * Avoid trying to read more bytes than we know
991 * the chip has prepared for us.
992 */
993 if (rx_bytes > max_bytes)
994 break;
995
996 /*
997 * Skip the status word, wrapping around to the beginning
998 * of the Rx area, if necessary.
999 */
1000 cur_rx = (cur_rx + RTK_RXSTAT_LEN) % RTK_RXBUFLEN;
1001 rxbufpos = sc->rtk_rx_buf + cur_rx;
1002
1003 /*
1004 * Compute the number of bytes at which the packet
1005 * will wrap to the beginning of the ring buffer.
1006 */
1007 wrap = RTK_RXBUFLEN - cur_rx;
1008
1009 /*
1010 * Compute where the next pending packet is.
1011 */
1012 if (total_len > wrap)
1013 new_rx = total_len - wrap;
1014 else
1015 new_rx = cur_rx + total_len;
1016 /* Round up to 32-bit boundary. */
1017 new_rx = roundup2(new_rx, sizeof(uint32_t)) % RTK_RXBUFLEN;
1018
1019 /*
1020 * The RealTek chip includes the CRC with every
1021 * incoming packet; trim it off here.
1022 */
1023 total_len -= ETHER_CRC_LEN;
1024
1025 /*
1026 * Now allocate an mbuf (and possibly a cluster) to hold
1027 * the packet. Note we offset the packet 2 bytes so that
1028 * data after the Ethernet header will be 4-byte aligned.
1029 */
1030 MGETHDR(m, M_DONTWAIT, MT_DATA);
1031 if (m == NULL) {
1032 printf("%s: unable to allocate Rx mbuf\n",
1033 device_xname(sc->sc_dev));
1034 if_statinc(ifp, if_ierrors);
1035 goto next_packet;
1036 }
1037 MCLAIM(m, &sc->ethercom.ec_rx_mowner);
1038 if (total_len > (MHLEN - RTK_ETHER_ALIGN)) {
1039 MCLGET(m, M_DONTWAIT);
1040 if ((m->m_flags & M_EXT) == 0) {
1041 printf("%s: unable to allocate Rx cluster\n",
1042 device_xname(sc->sc_dev));
1043 if_statinc(ifp, if_ierrors);
1044 m_freem(m);
1045 m = NULL;
1046 goto next_packet;
1047 }
1048 }
1049 m->m_data += RTK_ETHER_ALIGN; /* for alignment */
1050 m_set_rcvif(m, ifp);
1051 m->m_pkthdr.len = m->m_len = total_len;
1052 dst = mtod(m, void *);
1053
1054 /*
1055 * If the packet wraps, copy up to the wrapping point.
1056 */
1057 if (total_len > wrap) {
1058 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1059 cur_rx, wrap, BUS_DMASYNC_POSTREAD);
1060 memcpy(dst, rxbufpos, wrap);
1061 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1062 cur_rx, wrap, BUS_DMASYNC_PREREAD);
1063 cur_rx = 0;
1064 rxbufpos = sc->rtk_rx_buf;
1065 total_len -= wrap;
1066 dst += wrap;
1067 }
1068
1069 /*
1070 * ...and now the rest.
1071 */
1072 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1073 cur_rx, total_len, BUS_DMASYNC_POSTREAD);
1074 memcpy(dst, rxbufpos, total_len);
1075 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1076 cur_rx, total_len, BUS_DMASYNC_PREREAD);
1077
1078 next_packet:
1079 CSR_WRITE_2(sc, RTK_CURRXADDR, (new_rx - 16) % RTK_RXBUFLEN);
1080 cur_rx = new_rx;
1081
1082 if (m == NULL)
1083 continue;
1084
1085 /* pass it on. */
1086 if_percpuq_enqueue(ifp->if_percpuq, m);
1087 }
1088 }
1089
1090 /*
1091 * A frame was downloaded to the chip. It's safe for us to clean up
1092 * the list buffers.
1093 */
1094 static void
1095 rtk_txeof(struct rtk_softc *sc)
1096 {
1097 struct ifnet *ifp;
1098 struct rtk_tx_desc *txd;
1099 uint32_t txstat;
1100
1101 ifp = &sc->ethercom.ec_if;
1102
1103 /*
1104 * Go through our tx list and free mbufs for those
1105 * frames that have been uploaded.
1106 */
1107 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) {
1108 txstat = CSR_READ_4(sc, txd->txd_txstat);
1109 if ((txstat & (RTK_TXSTAT_TX_OK |
1110 RTK_TXSTAT_TX_UNDERRUN | RTK_TXSTAT_TXABRT)) == 0)
1111 break;
1112
1113 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
1114
1115 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmamap, 0,
1116 txd->txd_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1117 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmamap);
1118 m_freem(txd->txd_mbuf);
1119 txd->txd_mbuf = NULL;
1120
1121 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1122 if_statadd_ref(nsr, if_collisions,
1123 (txstat & RTK_TXSTAT_COLLCNT) >> 24);
1124
1125 if (txstat & RTK_TXSTAT_TX_OK)
1126 if_statinc_ref(nsr, if_opackets);
1127 else {
1128 if_statinc_ref(nsr, if_oerrors);
1129
1130 /*
1131 * Increase Early TX threshold if underrun occurred.
1132 * Increase step 64 bytes.
1133 */
1134 if (txstat & RTK_TXSTAT_TX_UNDERRUN) {
1135 #ifdef DEBUG
1136 printf("%s: transmit underrun;",
1137 device_xname(sc->sc_dev));
1138 #endif
1139 if (sc->sc_txthresh < RTK_TXTH_MAX) {
1140 sc->sc_txthresh += 2;
1141 #ifdef DEBUG
1142 printf(" new threshold: %d bytes",
1143 sc->sc_txthresh * 32);
1144 #endif
1145 }
1146 #ifdef DEBUG
1147 printf("\n");
1148 #endif
1149 }
1150 if (txstat & (RTK_TXSTAT_TXABRT | RTK_TXSTAT_OUTOFWIN))
1151 CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG);
1152 }
1153 IF_STAT_PUTREF(ifp);
1154 SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_free, txd, txd_q);
1155 ifp->if_flags &= ~IFF_OACTIVE;
1156 }
1157
1158 /* Clear the timeout timer if there is no pending packet. */
1159 if (SIMPLEQ_EMPTY(&sc->rtk_tx_dirty))
1160 ifp->if_timer = 0;
1161
1162 }
1163
1164 int
1165 rtk_intr(void *arg)
1166 {
1167 struct rtk_softc *sc;
1168 struct ifnet *ifp;
1169 uint16_t status, rndstatus = 0;
1170 int handled;
1171
1172 sc = arg;
1173 ifp = &sc->ethercom.ec_if;
1174
1175 if (!device_has_power(sc->sc_dev))
1176 return 0;
1177
1178 /* Disable interrupts. */
1179 CSR_WRITE_2(sc, RTK_IMR, 0x0000);
1180
1181 handled = 0;
1182 for (;;) {
1183
1184 status = CSR_READ_2(sc, RTK_ISR);
1185
1186 if (status == 0xffff)
1187 break; /* Card is gone... */
1188
1189 if (status) {
1190 CSR_WRITE_2(sc, RTK_ISR, status);
1191 rndstatus = status;
1192 }
1193
1194 if ((status & RTK_INTRS) == 0)
1195 break;
1196
1197 handled = 1;
1198
1199 if (status & RTK_ISR_RX_OK)
1200 rtk_rxeof(sc);
1201
1202 if (status & RTK_ISR_RX_ERR)
1203 rtk_rxeof(sc);
1204
1205 if (status & (RTK_ISR_TX_OK | RTK_ISR_TX_ERR))
1206 rtk_txeof(sc);
1207
1208 if (status & RTK_ISR_SYSTEM_ERR) {
1209 rtk_reset(sc);
1210 rtk_init(ifp);
1211 }
1212 }
1213
1214 /* Re-enable interrupts. */
1215 CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS);
1216
1217 if_schedule_deferred_start(ifp);
1218
1219 rnd_add_uint32(&sc->rnd_source, rndstatus);
1220
1221 return handled;
1222 }
1223
1224 /*
1225 * Main transmit routine.
1226 */
1227
1228 static void
1229 rtk_start(struct ifnet *ifp)
1230 {
1231 struct rtk_softc *sc;
1232 struct rtk_tx_desc *txd;
1233 struct mbuf *m_head, *m_new;
1234 int error, len;
1235
1236 sc = ifp->if_softc;
1237
1238 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_free)) != NULL) {
1239 IFQ_POLL(&ifp->if_snd, m_head);
1240 if (m_head == NULL)
1241 break;
1242 m_new = NULL;
1243
1244 /*
1245 * Load the DMA map. If this fails, the packet didn't
1246 * fit in one DMA segment, and we need to copy. Note,
1247 * the packet must also be aligned.
1248 * if the packet is too small, copy it too, so we're sure
1249 * so have enough room for the pad buffer.
1250 */
1251 if ((mtod(m_head, uintptr_t) & 3) != 0 ||
1252 m_head->m_pkthdr.len < ETHER_PAD_LEN ||
1253 bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmamap,
1254 m_head, BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
1255 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1256 if (m_new == NULL) {
1257 printf("%s: unable to allocate Tx mbuf\n",
1258 device_xname(sc->sc_dev));
1259 break;
1260 }
1261 MCLAIM(m_new, &sc->ethercom.ec_rx_mowner);
1262 if (m_head->m_pkthdr.len > MHLEN) {
1263 MCLGET(m_new, M_DONTWAIT);
1264 if ((m_new->m_flags & M_EXT) == 0) {
1265 printf("%s: unable to allocate Tx "
1266 "cluster\n",
1267 device_xname(sc->sc_dev));
1268 m_freem(m_new);
1269 break;
1270 }
1271 }
1272 m_copydata(m_head, 0, m_head->m_pkthdr.len,
1273 mtod(m_new, void *));
1274 m_new->m_pkthdr.len = m_new->m_len =
1275 m_head->m_pkthdr.len;
1276 if (m_head->m_pkthdr.len < ETHER_PAD_LEN) {
1277 memset(
1278 mtod(m_new, char *) + m_head->m_pkthdr.len,
1279 0, ETHER_PAD_LEN - m_head->m_pkthdr.len);
1280 m_new->m_pkthdr.len = m_new->m_len =
1281 ETHER_PAD_LEN;
1282 }
1283 error = bus_dmamap_load_mbuf(sc->sc_dmat,
1284 txd->txd_dmamap, m_new,
1285 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1286 if (error) {
1287 printf("%s: unable to load Tx buffer, "
1288 "error = %d\n",
1289 device_xname(sc->sc_dev), error);
1290 break;
1291 }
1292 }
1293 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1294 /*
1295 * If there's a BPF listener, bounce a copy of this frame
1296 * to him.
1297 */
1298 bpf_mtap(ifp, m_head, BPF_D_OUT);
1299 if (m_new != NULL) {
1300 m_freem(m_head);
1301 m_head = m_new;
1302 }
1303 txd->txd_mbuf = m_head;
1304
1305 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_free, txd_q);
1306 SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_dirty, txd, txd_q);
1307
1308 /*
1309 * Transmit the frame.
1310 */
1311 bus_dmamap_sync(sc->sc_dmat,
1312 txd->txd_dmamap, 0, txd->txd_dmamap->dm_mapsize,
1313 BUS_DMASYNC_PREWRITE);
1314
1315 len = txd->txd_dmamap->dm_segs[0].ds_len;
1316
1317 CSR_WRITE_4(sc, txd->txd_txaddr,
1318 txd->txd_dmamap->dm_segs[0].ds_addr);
1319 CSR_WRITE_4(sc, txd->txd_txstat,
1320 RTK_TXSTAT_THRESH(sc->sc_txthresh) | len);
1321
1322 /*
1323 * Set a timeout in case the chip goes out to lunch.
1324 */
1325 ifp->if_timer = 5;
1326 }
1327
1328 /*
1329 * We broke out of the loop because all our TX slots are
1330 * full. Mark the NIC as busy until it drains some of the
1331 * packets from the queue.
1332 */
1333 if (SIMPLEQ_EMPTY(&sc->rtk_tx_free))
1334 ifp->if_flags |= IFF_OACTIVE;
1335 }
1336
1337 static int
1338 rtk_init(struct ifnet *ifp)
1339 {
1340 struct rtk_softc *sc = ifp->if_softc;
1341 int error, i;
1342 uint32_t rxcfg;
1343
1344 if ((error = rtk_enable(sc)) != 0)
1345 goto out;
1346
1347 /*
1348 * Cancel pending I/O.
1349 */
1350 rtk_stop(ifp, 0);
1351
1352 /* Init our MAC address */
1353 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1354 CSR_WRITE_1(sc, RTK_IDR0 + i, CLLADDR(ifp->if_sadl)[i]);
1355 }
1356
1357 /* Init the RX buffer pointer register. */
1358 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, 0,
1359 sc->recv_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1360 CSR_WRITE_4(sc, RTK_RXADDR, sc->recv_dmamap->dm_segs[0].ds_addr);
1361
1362 /* Init TX descriptors. */
1363 rtk_list_tx_init(sc);
1364
1365 /* Init Early TX threshold. */
1366 sc->sc_txthresh = RTK_TXTH_256;
1367 /*
1368 * Enable transmit and receive.
1369 */
1370 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB);
1371
1372 /*
1373 * Set the initial TX and RX configuration.
1374 */
1375 CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG);
1376 CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG);
1377
1378 /* Set the individual bit to receive frames for this host only. */
1379 rxcfg = CSR_READ_4(sc, RTK_RXCFG);
1380 rxcfg |= RTK_RXCFG_RX_INDIV;
1381
1382 /* If we want promiscuous mode, set the allframes bit. */
1383 if (ifp->if_flags & IFF_PROMISC) {
1384 rxcfg |= RTK_RXCFG_RX_ALLPHYS;
1385 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1386 } else {
1387 rxcfg &= ~RTK_RXCFG_RX_ALLPHYS;
1388 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1389 }
1390
1391 /*
1392 * Set capture broadcast bit to capture broadcast frames.
1393 */
1394 if (ifp->if_flags & IFF_BROADCAST) {
1395 rxcfg |= RTK_RXCFG_RX_BROAD;
1396 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1397 } else {
1398 rxcfg &= ~RTK_RXCFG_RX_BROAD;
1399 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1400 }
1401
1402 /*
1403 * Program the multicast filter, if necessary.
1404 */
1405 rtk_setmulti(sc);
1406
1407 /*
1408 * Enable interrupts.
1409 */
1410 CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS);
1411
1412 /* Start RX/TX process. */
1413 CSR_WRITE_4(sc, RTK_MISSEDPKT, 0);
1414
1415 /* Enable receiver and transmitter. */
1416 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB | RTK_CMD_RX_ENB);
1417
1418 CSR_WRITE_1(sc, RTK_CFG1, RTK_CFG1_DRVLOAD | RTK_CFG1_FULLDUPLEX);
1419
1420 /*
1421 * Set current media.
1422 */
1423 if ((error = ether_mediachange(ifp)) != 0)
1424 goto out;
1425
1426 ifp->if_flags |= IFF_RUNNING;
1427 ifp->if_flags &= ~IFF_OACTIVE;
1428
1429 callout_schedule(&sc->rtk_tick_ch, hz);
1430
1431 out:
1432 if (error) {
1433 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1434 ifp->if_timer = 0;
1435 printf("%s: interface not running\n", device_xname(sc->sc_dev));
1436 }
1437 return error;
1438 }
1439
1440 static int
1441 rtk_ioctl(struct ifnet *ifp, u_long command, void *data)
1442 {
1443 struct rtk_softc *sc = ifp->if_softc;
1444 int s, error;
1445
1446 s = splnet();
1447 error = ether_ioctl(ifp, command, data);
1448 if (error == ENETRESET) {
1449 if (ifp->if_flags & IFF_RUNNING) {
1450 /*
1451 * Multicast list has changed. Set the
1452 * hardware filter accordingly.
1453 */
1454 rtk_setmulti(sc);
1455 }
1456 error = 0;
1457 }
1458 splx(s);
1459
1460 return error;
1461 }
1462
1463 static void
1464 rtk_watchdog(struct ifnet *ifp)
1465 {
1466 struct rtk_softc *sc;
1467
1468 sc = ifp->if_softc;
1469
1470 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
1471 if_statinc(ifp, if_oerrors);
1472 rtk_txeof(sc);
1473 rtk_rxeof(sc);
1474 rtk_init(ifp);
1475 }
1476
1477 /*
1478 * Stop the adapter and free any mbufs allocated to the
1479 * RX and TX lists.
1480 */
1481 static void
1482 rtk_stop(struct ifnet *ifp, int disable)
1483 {
1484 struct rtk_softc *sc = ifp->if_softc;
1485 struct rtk_tx_desc *txd;
1486
1487 callout_stop(&sc->rtk_tick_ch);
1488
1489 mii_down(&sc->mii);
1490
1491 CSR_WRITE_1(sc, RTK_COMMAND, 0x00);
1492 CSR_WRITE_2(sc, RTK_IMR, 0x0000);
1493
1494 /*
1495 * Free the TX list buffers.
1496 */
1497 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) {
1498 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
1499 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmamap);
1500 m_freem(txd->txd_mbuf);
1501 txd->txd_mbuf = NULL;
1502 CSR_WRITE_4(sc, txd->txd_txaddr, 0);
1503 }
1504
1505 if (disable)
1506 rtk_disable(sc);
1507
1508 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1509 ifp->if_timer = 0;
1510 }
1511
1512 static void
1513 rtk_tick(void *arg)
1514 {
1515 struct rtk_softc *sc = arg;
1516 int s;
1517
1518 s = splnet();
1519 mii_tick(&sc->mii);
1520 splx(s);
1521
1522 callout_schedule(&sc->rtk_tick_ch, hz);
1523 }
1524