rtl81x9.c revision 1.81 1 /* $NetBSD: rtl81x9.c,v 1.81 2008/01/19 22:10:17 dyoung Exp $ */
2
3 /*
4 * Copyright (c) 1997, 1998
5 * Bill Paul <wpaul (at) ctr.columbia.edu>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * FreeBSD Id: if_rl.c,v 1.17 1999/06/19 20:17:37 wpaul Exp
35 */
36
37 /*
38 * RealTek 8129/8139 PCI NIC driver
39 *
40 * Supports several extremely cheap PCI 10/100 adapters based on
41 * the RealTek chipset. Datasheets can be obtained from
42 * www.realtek.com.tw.
43 *
44 * Written by Bill Paul <wpaul (at) ctr.columbia.edu>
45 * Electrical Engineering Department
46 * Columbia University, New York City
47 */
48
49 /*
50 * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is
51 * probably the worst PCI ethernet controller ever made, with the possible
52 * exception of the FEAST chip made by SMC. The 8139 supports bus-master
53 * DMA, but it has a terrible interface that nullifies any performance
54 * gains that bus-master DMA usually offers.
55 *
56 * For transmission, the chip offers a series of four TX descriptor
57 * registers. Each transmit frame must be in a contiguous buffer, aligned
58 * on a longword (32-bit) boundary. This means we almost always have to
59 * do mbuf copies in order to transmit a frame, except in the unlikely
60 * case where a) the packet fits into a single mbuf, and b) the packet
61 * is 32-bit aligned within the mbuf's data area. The presence of only
62 * four descriptor registers means that we can never have more than four
63 * packets queued for transmission at any one time.
64 *
65 * Reception is not much better. The driver has to allocate a single large
66 * buffer area (up to 64K in size) into which the chip will DMA received
67 * frames. Because we don't know where within this region received packets
68 * will begin or end, we have no choice but to copy data from the buffer
69 * area into mbufs in order to pass the packets up to the higher protocol
70 * levels.
71 *
72 * It's impossible given this rotten design to really achieve decent
73 * performance at 100Mbps, unless you happen to have a 400MHz PII or
74 * some equally overmuscled CPU to drive it.
75 *
76 * On the bright side, the 8139 does have a built-in PHY, although
77 * rather than using an MDIO serial interface like most other NICs, the
78 * PHY registers are directly accessible through the 8139's register
79 * space. The 8139 supports autonegotiation, as well as a 64-bit multicast
80 * filter.
81 *
82 * The 8129 chip is an older version of the 8139 that uses an external PHY
83 * chip. The 8129 has a serial MDIO interface for accessing the MII where
84 * the 8139 lets you directly access the on-board PHY registers. We need
85 * to select which interface to use depending on the chip type.
86 */
87
88 #include <sys/cdefs.h>
89 __KERNEL_RCSID(0, "$NetBSD: rtl81x9.c,v 1.81 2008/01/19 22:10:17 dyoung Exp $");
90
91 #include "bpfilter.h"
92 #include "rnd.h"
93
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/callout.h>
97 #include <sys/device.h>
98 #include <sys/sockio.h>
99 #include <sys/mbuf.h>
100 #include <sys/malloc.h>
101 #include <sys/kernel.h>
102 #include <sys/socket.h>
103
104 #include <uvm/uvm_extern.h>
105
106 #include <net/if.h>
107 #include <net/if_arp.h>
108 #include <net/if_ether.h>
109 #include <net/if_dl.h>
110 #include <net/if_media.h>
111
112 #if NBPFILTER > 0
113 #include <net/bpf.h>
114 #endif
115 #if NRND > 0
116 #include <sys/rnd.h>
117 #endif
118
119 #include <sys/bus.h>
120 #include <machine/endian.h>
121
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124
125 #include <dev/ic/rtl81x9reg.h>
126 #include <dev/ic/rtl81x9var.h>
127
128 #if defined(DEBUG)
129 #define STATIC
130 #else
131 #define STATIC static
132 #endif
133
134 STATIC void rtk_reset(struct rtk_softc *);
135 STATIC void rtk_rxeof(struct rtk_softc *);
136 STATIC void rtk_txeof(struct rtk_softc *);
137 STATIC void rtk_start(struct ifnet *);
138 STATIC int rtk_ioctl(struct ifnet *, u_long, void *);
139 STATIC int rtk_init(struct ifnet *);
140 STATIC void rtk_stop(struct ifnet *, int);
141
142 STATIC void rtk_watchdog(struct ifnet *);
143
144 STATIC void rtk_eeprom_putbyte(struct rtk_softc *, int, int);
145 STATIC void rtk_mii_sync(struct rtk_softc *);
146 STATIC void rtk_mii_send(struct rtk_softc *, uint32_t, int);
147 STATIC int rtk_mii_readreg(struct rtk_softc *, struct rtk_mii_frame *);
148 STATIC int rtk_mii_writereg(struct rtk_softc *, struct rtk_mii_frame *);
149
150 STATIC int rtk_phy_readreg(device_t, int, int);
151 STATIC void rtk_phy_writereg(device_t, int, int, int);
152 STATIC void rtk_phy_statchg(device_t);
153 STATIC void rtk_tick(void *);
154
155 STATIC int rtk_enable(struct rtk_softc *);
156 STATIC void rtk_disable(struct rtk_softc *);
157
158 STATIC void rtk_list_tx_init(struct rtk_softc *);
159
160 #define EE_SET(x) \
161 CSR_WRITE_1(sc, RTK_EECMD, \
162 CSR_READ_1(sc, RTK_EECMD) | (x))
163
164 #define EE_CLR(x) \
165 CSR_WRITE_1(sc, RTK_EECMD, \
166 CSR_READ_1(sc, RTK_EECMD) & ~(x))
167
168 #define EE_DELAY() DELAY(100)
169
170 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
171
172 /*
173 * Send a read command and address to the EEPROM, check for ACK.
174 */
175 STATIC void
176 rtk_eeprom_putbyte(struct rtk_softc *sc, int addr, int addr_len)
177 {
178 int d, i;
179
180 d = (RTK_EECMD_READ << addr_len) | addr;
181
182 /*
183 * Feed in each bit and stobe the clock.
184 */
185 for (i = RTK_EECMD_LEN + addr_len; i > 0; i--) {
186 if (d & (1 << (i - 1))) {
187 EE_SET(RTK_EE_DATAIN);
188 } else {
189 EE_CLR(RTK_EE_DATAIN);
190 }
191 EE_DELAY();
192 EE_SET(RTK_EE_CLK);
193 EE_DELAY();
194 EE_CLR(RTK_EE_CLK);
195 EE_DELAY();
196 }
197 }
198
199 /*
200 * Read a word of data stored in the EEPROM at address 'addr.'
201 */
202 uint16_t
203 rtk_read_eeprom(struct rtk_softc *sc, int addr, int addr_len)
204 {
205 uint16_t word;
206 int i;
207
208 /* Enter EEPROM access mode. */
209 CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_PROGRAM);
210 EE_DELAY();
211 EE_SET(RTK_EE_SEL);
212
213 /*
214 * Send address of word we want to read.
215 */
216 rtk_eeprom_putbyte(sc, addr, addr_len);
217
218 /*
219 * Start reading bits from EEPROM.
220 */
221 word = 0;
222 for (i = 16; i > 0; i--) {
223 EE_SET(RTK_EE_CLK);
224 EE_DELAY();
225 if (CSR_READ_1(sc, RTK_EECMD) & RTK_EE_DATAOUT)
226 word |= 1 << (i - 1);
227 EE_CLR(RTK_EE_CLK);
228 EE_DELAY();
229 }
230
231 /* Turn off EEPROM access mode. */
232 CSR_WRITE_1(sc, RTK_EECMD, RTK_EEMODE_OFF);
233
234 return word;
235 }
236
237 /*
238 * MII access routines are provided for the 8129, which
239 * doesn't have a built-in PHY. For the 8139, we fake things
240 * up by diverting rtk_phy_readreg()/rtk_phy_writereg() to the
241 * direct access PHY registers.
242 */
243 #define MII_SET(x) \
244 CSR_WRITE_1(sc, RTK_MII, \
245 CSR_READ_1(sc, RTK_MII) | (x))
246
247 #define MII_CLR(x) \
248 CSR_WRITE_1(sc, RTK_MII, \
249 CSR_READ_1(sc, RTK_MII) & ~(x))
250
251 /*
252 * Sync the PHYs by setting data bit and strobing the clock 32 times.
253 */
254 STATIC void
255 rtk_mii_sync(struct rtk_softc *sc)
256 {
257 int i;
258
259 MII_SET(RTK_MII_DIR|RTK_MII_DATAOUT);
260
261 for (i = 0; i < 32; i++) {
262 MII_SET(RTK_MII_CLK);
263 DELAY(1);
264 MII_CLR(RTK_MII_CLK);
265 DELAY(1);
266 }
267 }
268
269 /*
270 * Clock a series of bits through the MII.
271 */
272 STATIC void
273 rtk_mii_send(struct rtk_softc *sc, uint32_t bits, int cnt)
274 {
275 int i;
276
277 MII_CLR(RTK_MII_CLK);
278
279 for (i = cnt; i > 0; i--) {
280 if (bits & (1 << (i - 1))) {
281 MII_SET(RTK_MII_DATAOUT);
282 } else {
283 MII_CLR(RTK_MII_DATAOUT);
284 }
285 DELAY(1);
286 MII_CLR(RTK_MII_CLK);
287 DELAY(1);
288 MII_SET(RTK_MII_CLK);
289 }
290 }
291
292 /*
293 * Read an PHY register through the MII.
294 */
295 STATIC int
296 rtk_mii_readreg(struct rtk_softc *sc, struct rtk_mii_frame *frame)
297 {
298 int i, ack, s;
299
300 s = splnet();
301
302 /*
303 * Set up frame for RX.
304 */
305 frame->mii_stdelim = RTK_MII_STARTDELIM;
306 frame->mii_opcode = RTK_MII_READOP;
307 frame->mii_turnaround = 0;
308 frame->mii_data = 0;
309
310 CSR_WRITE_2(sc, RTK_MII, 0);
311
312 /*
313 * Turn on data xmit.
314 */
315 MII_SET(RTK_MII_DIR);
316
317 rtk_mii_sync(sc);
318
319 /*
320 * Send command/address info.
321 */
322 rtk_mii_send(sc, frame->mii_stdelim, 2);
323 rtk_mii_send(sc, frame->mii_opcode, 2);
324 rtk_mii_send(sc, frame->mii_phyaddr, 5);
325 rtk_mii_send(sc, frame->mii_regaddr, 5);
326
327 /* Idle bit */
328 MII_CLR((RTK_MII_CLK|RTK_MII_DATAOUT));
329 DELAY(1);
330 MII_SET(RTK_MII_CLK);
331 DELAY(1);
332
333 /* Turn off xmit. */
334 MII_CLR(RTK_MII_DIR);
335
336 /* Check for ack */
337 MII_CLR(RTK_MII_CLK);
338 DELAY(1);
339 ack = CSR_READ_2(sc, RTK_MII) & RTK_MII_DATAIN;
340 MII_SET(RTK_MII_CLK);
341 DELAY(1);
342
343 /*
344 * Now try reading data bits. If the ack failed, we still
345 * need to clock through 16 cycles to keep the PHY(s) in sync.
346 */
347 if (ack) {
348 for (i = 0; i < 16; i++) {
349 MII_CLR(RTK_MII_CLK);
350 DELAY(1);
351 MII_SET(RTK_MII_CLK);
352 DELAY(1);
353 }
354 goto fail;
355 }
356
357 for (i = 16; i > 0; i--) {
358 MII_CLR(RTK_MII_CLK);
359 DELAY(1);
360 if (!ack) {
361 if (CSR_READ_2(sc, RTK_MII) & RTK_MII_DATAIN)
362 frame->mii_data |= 1 << (i - 1);
363 DELAY(1);
364 }
365 MII_SET(RTK_MII_CLK);
366 DELAY(1);
367 }
368
369 fail:
370 MII_CLR(RTK_MII_CLK);
371 DELAY(1);
372 MII_SET(RTK_MII_CLK);
373 DELAY(1);
374
375 splx(s);
376
377 if (ack)
378 return 1;
379 return 0;
380 }
381
382 /*
383 * Write to a PHY register through the MII.
384 */
385 STATIC int
386 rtk_mii_writereg(struct rtk_softc *sc, struct rtk_mii_frame *frame)
387 {
388 int s;
389
390 s = splnet();
391 /*
392 * Set up frame for TX.
393 */
394 frame->mii_stdelim = RTK_MII_STARTDELIM;
395 frame->mii_opcode = RTK_MII_WRITEOP;
396 frame->mii_turnaround = RTK_MII_TURNAROUND;
397
398 /*
399 * Turn on data output.
400 */
401 MII_SET(RTK_MII_DIR);
402
403 rtk_mii_sync(sc);
404
405 rtk_mii_send(sc, frame->mii_stdelim, 2);
406 rtk_mii_send(sc, frame->mii_opcode, 2);
407 rtk_mii_send(sc, frame->mii_phyaddr, 5);
408 rtk_mii_send(sc, frame->mii_regaddr, 5);
409 rtk_mii_send(sc, frame->mii_turnaround, 2);
410 rtk_mii_send(sc, frame->mii_data, 16);
411
412 /* Idle bit. */
413 MII_SET(RTK_MII_CLK);
414 DELAY(1);
415 MII_CLR(RTK_MII_CLK);
416 DELAY(1);
417
418 /*
419 * Turn off xmit.
420 */
421 MII_CLR(RTK_MII_DIR);
422
423 splx(s);
424
425 return 0;
426 }
427
428 STATIC int
429 rtk_phy_readreg(device_t self, int phy, int reg)
430 {
431 struct rtk_softc *sc = device_private(self);
432 struct rtk_mii_frame frame;
433 int rval;
434 int rtk8139_reg;
435
436 if ((sc->sc_quirk & RTKQ_8129) == 0) {
437 if (phy != 7)
438 return 0;
439
440 switch (reg) {
441 case MII_BMCR:
442 rtk8139_reg = RTK_BMCR;
443 break;
444 case MII_BMSR:
445 rtk8139_reg = RTK_BMSR;
446 break;
447 case MII_ANAR:
448 rtk8139_reg = RTK_ANAR;
449 break;
450 case MII_ANER:
451 rtk8139_reg = RTK_ANER;
452 break;
453 case MII_ANLPAR:
454 rtk8139_reg = RTK_LPAR;
455 break;
456 default:
457 #if 0
458 printf("%s: bad phy register\n", device_xname(self));
459 #endif
460 return 0;
461 }
462 rval = CSR_READ_2(sc, rtk8139_reg);
463 return rval;
464 }
465
466 memset((char *)&frame, 0, sizeof(frame));
467
468 frame.mii_phyaddr = phy;
469 frame.mii_regaddr = reg;
470 rtk_mii_readreg(sc, &frame);
471
472 return frame.mii_data;
473 }
474
475 STATIC void
476 rtk_phy_writereg(device_t self, int phy, int reg, int data)
477 {
478 struct rtk_softc *sc = device_private(self);
479 struct rtk_mii_frame frame;
480 int rtk8139_reg;
481
482 if ((sc->sc_quirk & RTKQ_8129) == 0) {
483 if (phy != 7)
484 return;
485
486 switch (reg) {
487 case MII_BMCR:
488 rtk8139_reg = RTK_BMCR;
489 break;
490 case MII_BMSR:
491 rtk8139_reg = RTK_BMSR;
492 break;
493 case MII_ANAR:
494 rtk8139_reg = RTK_ANAR;
495 break;
496 case MII_ANER:
497 rtk8139_reg = RTK_ANER;
498 break;
499 case MII_ANLPAR:
500 rtk8139_reg = RTK_LPAR;
501 break;
502 default:
503 #if 0
504 printf("%s: bad phy register\n", device_xname(self));
505 #endif
506 return;
507 }
508 CSR_WRITE_2(sc, rtk8139_reg, data);
509 return;
510 }
511
512 memset((char *)&frame, 0, sizeof(frame));
513
514 frame.mii_phyaddr = phy;
515 frame.mii_regaddr = reg;
516 frame.mii_data = data;
517
518 rtk_mii_writereg(sc, &frame);
519 }
520
521 STATIC void
522 rtk_phy_statchg(device_t v)
523 {
524
525 /* Nothing to do. */
526 }
527
528 #define rtk_calchash(addr) \
529 (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
530
531 /*
532 * Program the 64-bit multicast hash filter.
533 */
534 void
535 rtk_setmulti(struct rtk_softc *sc)
536 {
537 struct ifnet *ifp;
538 uint32_t hashes[2] = { 0, 0 };
539 uint32_t rxfilt;
540 struct ether_multi *enm;
541 struct ether_multistep step;
542 int h, mcnt;
543
544 ifp = &sc->ethercom.ec_if;
545
546 rxfilt = CSR_READ_4(sc, RTK_RXCFG);
547
548 if (ifp->if_flags & IFF_PROMISC) {
549 allmulti:
550 ifp->if_flags |= IFF_ALLMULTI;
551 rxfilt |= RTK_RXCFG_RX_MULTI;
552 CSR_WRITE_4(sc, RTK_RXCFG, rxfilt);
553 CSR_WRITE_4(sc, RTK_MAR0, 0xFFFFFFFF);
554 CSR_WRITE_4(sc, RTK_MAR4, 0xFFFFFFFF);
555 return;
556 }
557
558 /* first, zot all the existing hash bits */
559 CSR_WRITE_4(sc, RTK_MAR0, 0);
560 CSR_WRITE_4(sc, RTK_MAR4, 0);
561
562 /* now program new ones */
563 ETHER_FIRST_MULTI(step, &sc->ethercom, enm);
564 mcnt = 0;
565 while (enm != NULL) {
566 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
567 ETHER_ADDR_LEN) != 0)
568 goto allmulti;
569
570 h = rtk_calchash(enm->enm_addrlo);
571 if (h < 32)
572 hashes[0] |= (1 << h);
573 else
574 hashes[1] |= (1 << (h - 32));
575 mcnt++;
576 ETHER_NEXT_MULTI(step, enm);
577 }
578
579 ifp->if_flags &= ~IFF_ALLMULTI;
580
581 if (mcnt)
582 rxfilt |= RTK_RXCFG_RX_MULTI;
583 else
584 rxfilt &= ~RTK_RXCFG_RX_MULTI;
585
586 CSR_WRITE_4(sc, RTK_RXCFG, rxfilt);
587
588 /*
589 * For some unfathomable reason, RealTek decided to reverse
590 * the order of the multicast hash registers in the PCI Express
591 * parts. This means we have to write the hash pattern in reverse
592 * order for those devices.
593 */
594 if ((sc->sc_quirk & RTKQ_PCIE) != 0) {
595 CSR_WRITE_4(sc, RTK_MAR0, bswap32(hashes[1]));
596 CSR_WRITE_4(sc, RTK_MAR4, bswap32(hashes[0]));
597 } else {
598 CSR_WRITE_4(sc, RTK_MAR0, hashes[0]);
599 CSR_WRITE_4(sc, RTK_MAR4, hashes[1]);
600 }
601 }
602
603 void
604 rtk_reset(struct rtk_softc *sc)
605 {
606 int i;
607
608 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_RESET);
609
610 for (i = 0; i < RTK_TIMEOUT; i++) {
611 DELAY(10);
612 if ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_RESET) == 0)
613 break;
614 }
615 if (i == RTK_TIMEOUT)
616 printf("%s: reset never completed!\n", device_xname(&sc->sc_dev));
617 }
618
619 /*
620 * Attach the interface. Allocate softc structures, do ifmedia
621 * setup and ethernet/BPF attach.
622 */
623 void
624 rtk_attach(struct rtk_softc *sc)
625 {
626 device_t self = &sc->sc_dev;
627 struct ifnet *ifp;
628 struct rtk_tx_desc *txd;
629 uint16_t val;
630 uint8_t eaddr[ETHER_ADDR_LEN];
631 int error;
632 int i, addr_len;
633
634 callout_init(&sc->rtk_tick_ch, 0);
635
636 /*
637 * Check EEPROM type 9346 or 9356.
638 */
639 if (rtk_read_eeprom(sc, RTK_EE_ID, RTK_EEADDR_LEN1) == 0x8129)
640 addr_len = RTK_EEADDR_LEN1;
641 else
642 addr_len = RTK_EEADDR_LEN0;
643
644 /*
645 * Get station address.
646 */
647 val = rtk_read_eeprom(sc, RTK_EE_EADDR0, addr_len);
648 eaddr[0] = val & 0xff;
649 eaddr[1] = val >> 8;
650 val = rtk_read_eeprom(sc, RTK_EE_EADDR1, addr_len);
651 eaddr[2] = val & 0xff;
652 eaddr[3] = val >> 8;
653 val = rtk_read_eeprom(sc, RTK_EE_EADDR2, addr_len);
654 eaddr[4] = val & 0xff;
655 eaddr[5] = val >> 8;
656
657 if ((error = bus_dmamem_alloc(sc->sc_dmat,
658 RTK_RXBUFLEN + 16, PAGE_SIZE, 0, &sc->sc_dmaseg, 1, &sc->sc_dmanseg,
659 BUS_DMA_NOWAIT)) != 0) {
660 aprint_error_dev(self,
661 "can't allocate recv buffer, error = %d\n", error);
662 goto fail_0;
663 }
664
665 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg,
666 RTK_RXBUFLEN + 16, (void **)&sc->rtk_rx_buf,
667 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
668 aprint_error_dev(self,
669 "can't map recv buffer, error = %d\n", error);
670 goto fail_1;
671 }
672
673 if ((error = bus_dmamap_create(sc->sc_dmat,
674 RTK_RXBUFLEN + 16, 1, RTK_RXBUFLEN + 16, 0, BUS_DMA_NOWAIT,
675 &sc->recv_dmamap)) != 0) {
676 aprint_error_dev(self,
677 "can't create recv buffer DMA map, error = %d\n", error);
678 goto fail_2;
679 }
680
681 if ((error = bus_dmamap_load(sc->sc_dmat, sc->recv_dmamap,
682 sc->rtk_rx_buf, RTK_RXBUFLEN + 16,
683 NULL, BUS_DMA_READ|BUS_DMA_NOWAIT)) != 0) {
684 aprint_error_dev(self,
685 "can't load recv buffer DMA map, error = %d\n", error);
686 goto fail_3;
687 }
688
689 for (i = 0; i < RTK_TX_LIST_CNT; i++) {
690 txd = &sc->rtk_tx_descs[i];
691 if ((error = bus_dmamap_create(sc->sc_dmat,
692 MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT,
693 &txd->txd_dmamap)) != 0) {
694 aprint_error_dev(self,
695 "can't create snd buffer DMA map,"
696 " error = %d\n", error);
697 goto fail_4;
698 }
699 txd->txd_txaddr = RTK_TXADDR0 + (i * 4);
700 txd->txd_txstat = RTK_TXSTAT0 + (i * 4);
701 }
702 SIMPLEQ_INIT(&sc->rtk_tx_free);
703 SIMPLEQ_INIT(&sc->rtk_tx_dirty);
704
705 /*
706 * From this point forward, the attachment cannot fail. A failure
707 * before this releases all resources thar may have been
708 * allocated.
709 */
710 sc->sc_flags |= RTK_ATTACHED;
711
712 /* Reset the adapter. */
713 rtk_reset(sc);
714
715 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(eaddr));
716
717 ifp = &sc->ethercom.ec_if;
718 ifp->if_softc = sc;
719 strcpy(ifp->if_xname, device_xname(self));
720 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
721 ifp->if_ioctl = rtk_ioctl;
722 ifp->if_start = rtk_start;
723 ifp->if_watchdog = rtk_watchdog;
724 ifp->if_init = rtk_init;
725 ifp->if_stop = rtk_stop;
726 IFQ_SET_READY(&ifp->if_snd);
727
728 /*
729 * Do ifmedia setup.
730 */
731 sc->mii.mii_ifp = ifp;
732 sc->mii.mii_readreg = rtk_phy_readreg;
733 sc->mii.mii_writereg = rtk_phy_writereg;
734 sc->mii.mii_statchg = rtk_phy_statchg;
735 sc->ethercom.ec_mii = &sc->mii;
736 ifmedia_init(&sc->mii.mii_media, IFM_IMASK, ether_mediachange,
737 ether_mediastatus);
738 mii_attach(self, &sc->mii, 0xffffffff,
739 MII_PHY_ANY, MII_OFFSET_ANY, 0);
740
741 /* Choose a default media. */
742 if (LIST_FIRST(&sc->mii.mii_phys) == NULL) {
743 ifmedia_add(&sc->mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
744 ifmedia_set(&sc->mii.mii_media, IFM_ETHER|IFM_NONE);
745 } else {
746 ifmedia_set(&sc->mii.mii_media, IFM_ETHER|IFM_AUTO);
747 }
748
749 /*
750 * Call MI attach routines.
751 */
752 if_attach(ifp);
753 ether_ifattach(ifp, eaddr);
754
755 #if NRND > 0
756 rnd_attach_source(&sc->rnd_source, device_xname(self),
757 RND_TYPE_NET, 0);
758 #endif
759
760 return;
761 fail_4:
762 for (i = 0; i < RTK_TX_LIST_CNT; i++) {
763 txd = &sc->rtk_tx_descs[i];
764 if (txd->txd_dmamap != NULL)
765 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap);
766 }
767 fail_3:
768 bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap);
769 fail_2:
770 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->rtk_rx_buf,
771 RTK_RXBUFLEN + 16);
772 fail_1:
773 bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg);
774 fail_0:
775 return;
776 }
777
778 /*
779 * Initialize the transmit descriptors.
780 */
781 STATIC void
782 rtk_list_tx_init(struct rtk_softc *sc)
783 {
784 struct rtk_tx_desc *txd;
785 int i;
786
787 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL)
788 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
789 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_free)) != NULL)
790 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_free, txd_q);
791
792 for (i = 0; i < RTK_TX_LIST_CNT; i++) {
793 txd = &sc->rtk_tx_descs[i];
794 CSR_WRITE_4(sc, txd->txd_txaddr, 0);
795 SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_free, txd, txd_q);
796 }
797 }
798
799 /*
800 * rtk_activate:
801 * Handle device activation/deactivation requests.
802 */
803 int
804 rtk_activate(device_t self, enum devact act)
805 {
806 struct rtk_softc *sc = device_private(self);
807 int s, error;
808
809 error = 0;
810 s = splnet();
811 switch (act) {
812 case DVACT_ACTIVATE:
813 error = EOPNOTSUPP;
814 break;
815 case DVACT_DEACTIVATE:
816 mii_activate(&sc->mii, act, MII_PHY_ANY, MII_OFFSET_ANY);
817 if_deactivate(&sc->ethercom.ec_if);
818 break;
819 }
820 splx(s);
821
822 return error;
823 }
824
825 /*
826 * rtk_detach:
827 * Detach a rtk interface.
828 */
829 int
830 rtk_detach(struct rtk_softc *sc)
831 {
832 struct ifnet *ifp = &sc->ethercom.ec_if;
833 struct rtk_tx_desc *txd;
834 int i;
835
836 /*
837 * Succeed now if there isn't any work to do.
838 */
839 if ((sc->sc_flags & RTK_ATTACHED) == 0)
840 return 0;
841
842 /* Unhook our tick handler. */
843 callout_stop(&sc->rtk_tick_ch);
844
845 /* Detach all PHYs. */
846 mii_detach(&sc->mii, MII_PHY_ANY, MII_OFFSET_ANY);
847
848 /* Delete all remaining media. */
849 ifmedia_delete_instance(&sc->mii.mii_media, IFM_INST_ANY);
850
851 #if NRND > 0
852 rnd_detach_source(&sc->rnd_source);
853 #endif
854
855 ether_ifdetach(ifp);
856 if_detach(ifp);
857
858 for (i = 0; i < RTK_TX_LIST_CNT; i++) {
859 txd = &sc->rtk_tx_descs[i];
860 if (txd->txd_dmamap != NULL)
861 bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap);
862 }
863 bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap);
864 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->rtk_rx_buf,
865 RTK_RXBUFLEN + 16);
866 bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg);
867
868 return 0;
869 }
870
871 /*
872 * rtk_enable:
873 * Enable the RTL81X9 chip.
874 */
875 int
876 rtk_enable(struct rtk_softc *sc)
877 {
878
879 if (RTK_IS_ENABLED(sc) == 0 && sc->sc_enable != NULL) {
880 if ((*sc->sc_enable)(sc) != 0) {
881 printf("%s: device enable failed\n",
882 device_xname(&sc->sc_dev));
883 return EIO;
884 }
885 sc->sc_flags |= RTK_ENABLED;
886 }
887 return 0;
888 }
889
890 /*
891 * rtk_disable:
892 * Disable the RTL81X9 chip.
893 */
894 void
895 rtk_disable(struct rtk_softc *sc)
896 {
897
898 if (RTK_IS_ENABLED(sc) && sc->sc_disable != NULL) {
899 (*sc->sc_disable)(sc);
900 sc->sc_flags &= ~RTK_ENABLED;
901 }
902 }
903
904 /*
905 * A frame has been uploaded: pass the resulting mbuf chain up to
906 * the higher level protocols.
907 *
908 * You know there's something wrong with a PCI bus-master chip design.
909 *
910 * The receive operation is badly documented in the datasheet, so I'll
911 * attempt to document it here. The driver provides a buffer area and
912 * places its base address in the RX buffer start address register.
913 * The chip then begins copying frames into the RX buffer. Each frame
914 * is preceded by a 32-bit RX status word which specifies the length
915 * of the frame and certain other status bits. Each frame (starting with
916 * the status word) is also 32-bit aligned. The frame length is in the
917 * first 16 bits of the status word; the lower 15 bits correspond with
918 * the 'rx status register' mentioned in the datasheet.
919 *
920 * Note: to make the Alpha happy, the frame payload needs to be aligned
921 * on a 32-bit boundary. To achieve this, we copy the data to mbuf
922 * shifted forward 2 bytes.
923 */
924 STATIC void
925 rtk_rxeof(struct rtk_softc *sc)
926 {
927 struct mbuf *m;
928 struct ifnet *ifp;
929 char *rxbufpos, *dst;
930 u_int total_len, wrap;
931 uint32_t rxstat;
932 uint16_t cur_rx, new_rx;
933 uint16_t limit;
934 uint16_t rx_bytes, max_bytes;
935
936 ifp = &sc->ethercom.ec_if;
937
938 cur_rx = (CSR_READ_2(sc, RTK_CURRXADDR) + 16) % RTK_RXBUFLEN;
939
940 /* Do not try to read past this point. */
941 limit = CSR_READ_2(sc, RTK_CURRXBUF) % RTK_RXBUFLEN;
942
943 if (limit < cur_rx)
944 max_bytes = (RTK_RXBUFLEN - cur_rx) + limit;
945 else
946 max_bytes = limit - cur_rx;
947 rx_bytes = 0;
948
949 while ((CSR_READ_1(sc, RTK_COMMAND) & RTK_CMD_EMPTY_RXBUF) == 0) {
950 rxbufpos = (char *)sc->rtk_rx_buf + cur_rx;
951 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, cur_rx,
952 RTK_RXSTAT_LEN, BUS_DMASYNC_POSTREAD);
953 rxstat = le32toh(*(uint32_t *)rxbufpos);
954 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, cur_rx,
955 RTK_RXSTAT_LEN, BUS_DMASYNC_PREREAD);
956
957 /*
958 * Here's a totally undocumented fact for you. When the
959 * RealTek chip is in the process of copying a packet into
960 * RAM for you, the length will be 0xfff0. If you spot a
961 * packet header with this value, you need to stop. The
962 * datasheet makes absolutely no mention of this and
963 * RealTek should be shot for this.
964 */
965 total_len = rxstat >> 16;
966 if (total_len == RTK_RXSTAT_UNFINISHED)
967 break;
968
969 if ((rxstat & RTK_RXSTAT_RXOK) == 0 ||
970 total_len < ETHER_MIN_LEN ||
971 total_len > (MCLBYTES - RTK_ETHER_ALIGN)) {
972 ifp->if_ierrors++;
973
974 /*
975 * submitted by:[netbsd-pcmcia:00484]
976 * Takahiro Kambe <taca (at) sky.yamashina.kyoto.jp>
977 * obtain from:
978 * FreeBSD if_rl.c rev 1.24->1.25
979 *
980 */
981 #if 0
982 if (rxstat & (RTK_RXSTAT_BADSYM|RTK_RXSTAT_RUNT|
983 RTK_RXSTAT_GIANT|RTK_RXSTAT_CRCERR|
984 RTK_RXSTAT_ALIGNERR)) {
985 CSR_WRITE_2(sc, RTK_COMMAND, RTK_CMD_TX_ENB);
986 CSR_WRITE_2(sc, RTK_COMMAND,
987 RTK_CMD_TX_ENB|RTK_CMD_RX_ENB);
988 CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG);
989 CSR_WRITE_4(sc, RTK_RXADDR,
990 sc->recv_dmamap->dm_segs[0].ds_addr);
991 cur_rx = 0;
992 }
993 break;
994 #else
995 rtk_init(ifp);
996 return;
997 #endif
998 }
999
1000 /* No errors; receive the packet. */
1001 rx_bytes += total_len + RTK_RXSTAT_LEN;
1002
1003 /*
1004 * Avoid trying to read more bytes than we know
1005 * the chip has prepared for us.
1006 */
1007 if (rx_bytes > max_bytes)
1008 break;
1009
1010 /*
1011 * Skip the status word, wrapping around to the beginning
1012 * of the Rx area, if necessary.
1013 */
1014 cur_rx = (cur_rx + RTK_RXSTAT_LEN) % RTK_RXBUFLEN;
1015 rxbufpos = (char *)sc->rtk_rx_buf + cur_rx;
1016
1017 /*
1018 * Compute the number of bytes at which the packet
1019 * will wrap to the beginning of the ring buffer.
1020 */
1021 wrap = RTK_RXBUFLEN - cur_rx;
1022
1023 /*
1024 * Compute where the next pending packet is.
1025 */
1026 if (total_len > wrap)
1027 new_rx = total_len - wrap;
1028 else
1029 new_rx = cur_rx + total_len;
1030 /* Round up to 32-bit boundary. */
1031 new_rx = ((new_rx + 3) & ~3) % RTK_RXBUFLEN;
1032
1033 /*
1034 * The RealTek chip includes the CRC with every
1035 * incoming packet; trim it off here.
1036 */
1037 total_len -= ETHER_CRC_LEN;
1038
1039 /*
1040 * Now allocate an mbuf (and possibly a cluster) to hold
1041 * the packet. Note we offset the packet 2 bytes so that
1042 * data after the Ethernet header will be 4-byte aligned.
1043 */
1044 MGETHDR(m, M_DONTWAIT, MT_DATA);
1045 if (m == NULL) {
1046 printf("%s: unable to allocate Rx mbuf\n",
1047 device_xname(&sc->sc_dev));
1048 ifp->if_ierrors++;
1049 goto next_packet;
1050 }
1051 if (total_len > (MHLEN - RTK_ETHER_ALIGN)) {
1052 MCLGET(m, M_DONTWAIT);
1053 if ((m->m_flags & M_EXT) == 0) {
1054 printf("%s: unable to allocate Rx cluster\n",
1055 device_xname(&sc->sc_dev));
1056 ifp->if_ierrors++;
1057 m_freem(m);
1058 m = NULL;
1059 goto next_packet;
1060 }
1061 }
1062 m->m_data += RTK_ETHER_ALIGN; /* for alignment */
1063 m->m_pkthdr.rcvif = ifp;
1064 m->m_pkthdr.len = m->m_len = total_len;
1065 dst = mtod(m, void *);
1066
1067 /*
1068 * If the packet wraps, copy up to the wrapping point.
1069 */
1070 if (total_len > wrap) {
1071 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1072 cur_rx, wrap, BUS_DMASYNC_POSTREAD);
1073 memcpy(dst, rxbufpos, wrap);
1074 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1075 cur_rx, wrap, BUS_DMASYNC_PREREAD);
1076 cur_rx = 0;
1077 rxbufpos = sc->rtk_rx_buf;
1078 total_len -= wrap;
1079 dst += wrap;
1080 }
1081
1082 /*
1083 * ...and now the rest.
1084 */
1085 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1086 cur_rx, total_len, BUS_DMASYNC_POSTREAD);
1087 memcpy(dst, rxbufpos, total_len);
1088 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap,
1089 cur_rx, total_len, BUS_DMASYNC_PREREAD);
1090
1091 next_packet:
1092 CSR_WRITE_2(sc, RTK_CURRXADDR, (new_rx - 16) % RTK_RXBUFLEN);
1093 cur_rx = new_rx;
1094
1095 if (m == NULL)
1096 continue;
1097
1098 ifp->if_ipackets++;
1099
1100 #if NBPFILTER > 0
1101 if (ifp->if_bpf)
1102 bpf_mtap(ifp->if_bpf, m);
1103 #endif
1104 /* pass it on. */
1105 (*ifp->if_input)(ifp, m);
1106 }
1107 }
1108
1109 /*
1110 * A frame was downloaded to the chip. It's safe for us to clean up
1111 * the list buffers.
1112 */
1113 STATIC void
1114 rtk_txeof(struct rtk_softc *sc)
1115 {
1116 struct ifnet *ifp;
1117 struct rtk_tx_desc *txd;
1118 uint32_t txstat;
1119
1120 ifp = &sc->ethercom.ec_if;
1121
1122 /*
1123 * Go through our tx list and free mbufs for those
1124 * frames that have been uploaded.
1125 */
1126 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) {
1127 txstat = CSR_READ_4(sc, txd->txd_txstat);
1128 if ((txstat & (RTK_TXSTAT_TX_OK|
1129 RTK_TXSTAT_TX_UNDERRUN|RTK_TXSTAT_TXABRT)) == 0)
1130 break;
1131
1132 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
1133
1134 bus_dmamap_sync(sc->sc_dmat, txd->txd_dmamap, 0,
1135 txd->txd_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1136 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmamap);
1137 m_freem(txd->txd_mbuf);
1138 txd->txd_mbuf = NULL;
1139
1140 ifp->if_collisions += (txstat & RTK_TXSTAT_COLLCNT) >> 24;
1141
1142 if (txstat & RTK_TXSTAT_TX_OK)
1143 ifp->if_opackets++;
1144 else {
1145 ifp->if_oerrors++;
1146
1147 /*
1148 * Increase Early TX threshold if underrun occurred.
1149 * Increase step 64 bytes.
1150 */
1151 if (txstat & RTK_TXSTAT_TX_UNDERRUN) {
1152 #ifdef DEBUG
1153 printf("%s: transmit underrun;",
1154 device_xname(&sc->sc_dev));
1155 #endif
1156 if (sc->sc_txthresh < RTK_TXTH_MAX) {
1157 sc->sc_txthresh += 2;
1158 #ifdef DEBUG
1159 printf(" new threshold: %d bytes",
1160 sc->sc_txthresh * 32);
1161 #endif
1162 }
1163 printf("\n");
1164 }
1165 if (txstat & (RTK_TXSTAT_TXABRT|RTK_TXSTAT_OUTOFWIN))
1166 CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG);
1167 }
1168 SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_free, txd, txd_q);
1169 ifp->if_flags &= ~IFF_OACTIVE;
1170 }
1171
1172 /* Clear the timeout timer if there is no pending packet. */
1173 if (SIMPLEQ_EMPTY(&sc->rtk_tx_dirty))
1174 ifp->if_timer = 0;
1175
1176 }
1177
1178 int
1179 rtk_intr(void *arg)
1180 {
1181 struct rtk_softc *sc;
1182 struct ifnet *ifp;
1183 uint16_t status;
1184 int handled;
1185
1186 sc = arg;
1187 ifp = &sc->ethercom.ec_if;
1188
1189 if (!device_has_power(&sc->sc_dev))
1190 return 0;
1191
1192 /* Disable interrupts. */
1193 CSR_WRITE_2(sc, RTK_IMR, 0x0000);
1194
1195 handled = 0;
1196 for (;;) {
1197
1198 status = CSR_READ_2(sc, RTK_ISR);
1199
1200 if (status == 0xffff)
1201 break; /* Card is gone... */
1202
1203 if (status)
1204 CSR_WRITE_2(sc, RTK_ISR, status);
1205
1206 if ((status & RTK_INTRS) == 0)
1207 break;
1208
1209 handled = 1;
1210
1211 if (status & RTK_ISR_RX_OK)
1212 rtk_rxeof(sc);
1213
1214 if (status & RTK_ISR_RX_ERR)
1215 rtk_rxeof(sc);
1216
1217 if (status & (RTK_ISR_TX_OK|RTK_ISR_TX_ERR))
1218 rtk_txeof(sc);
1219
1220 if (status & RTK_ISR_SYSTEM_ERR) {
1221 rtk_reset(sc);
1222 rtk_init(ifp);
1223 }
1224 }
1225
1226 /* Re-enable interrupts. */
1227 CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS);
1228
1229 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
1230 rtk_start(ifp);
1231
1232 #if NRND > 0
1233 if (RND_ENABLED(&sc->rnd_source))
1234 rnd_add_uint32(&sc->rnd_source, status);
1235 #endif
1236
1237 return handled;
1238 }
1239
1240 /*
1241 * Main transmit routine.
1242 */
1243
1244 STATIC void
1245 rtk_start(struct ifnet *ifp)
1246 {
1247 struct rtk_softc *sc;
1248 struct rtk_tx_desc *txd;
1249 struct mbuf *m_head, *m_new;
1250 int error, len;
1251
1252 sc = ifp->if_softc;
1253
1254 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_free)) != NULL) {
1255 IFQ_POLL(&ifp->if_snd, m_head);
1256 if (m_head == NULL)
1257 break;
1258 m_new = NULL;
1259
1260 /*
1261 * Load the DMA map. If this fails, the packet didn't
1262 * fit in one DMA segment, and we need to copy. Note,
1263 * the packet must also be aligned.
1264 * if the packet is too small, copy it too, so we're sure
1265 * so have enouth room for the pad buffer.
1266 */
1267 if ((mtod(m_head, uintptr_t) & 3) != 0 ||
1268 m_head->m_pkthdr.len < ETHER_PAD_LEN ||
1269 bus_dmamap_load_mbuf(sc->sc_dmat, txd->txd_dmamap,
1270 m_head, BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
1271 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1272 if (m_new == NULL) {
1273 printf("%s: unable to allocate Tx mbuf\n",
1274 device_xname(&sc->sc_dev));
1275 break;
1276 }
1277 if (m_head->m_pkthdr.len > MHLEN) {
1278 MCLGET(m_new, M_DONTWAIT);
1279 if ((m_new->m_flags & M_EXT) == 0) {
1280 printf("%s: unable to allocate Tx "
1281 "cluster\n", device_xname(&sc->sc_dev));
1282 m_freem(m_new);
1283 break;
1284 }
1285 }
1286 m_copydata(m_head, 0, m_head->m_pkthdr.len,
1287 mtod(m_new, void *));
1288 m_new->m_pkthdr.len = m_new->m_len =
1289 m_head->m_pkthdr.len;
1290 if (m_head->m_pkthdr.len < ETHER_PAD_LEN) {
1291 memset(
1292 mtod(m_new, char *) + m_head->m_pkthdr.len,
1293 0, ETHER_PAD_LEN - m_head->m_pkthdr.len);
1294 m_new->m_pkthdr.len = m_new->m_len =
1295 ETHER_PAD_LEN;
1296 }
1297 error = bus_dmamap_load_mbuf(sc->sc_dmat,
1298 txd->txd_dmamap, m_new,
1299 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1300 if (error) {
1301 printf("%s: unable to load Tx buffer, "
1302 "error = %d\n", device_xname(&sc->sc_dev), error);
1303 break;
1304 }
1305 }
1306 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1307 #if NBPFILTER > 0
1308 /*
1309 * If there's a BPF listener, bounce a copy of this frame
1310 * to him.
1311 */
1312 if (ifp->if_bpf)
1313 bpf_mtap(ifp->if_bpf, m_head);
1314 #endif
1315 if (m_new != NULL) {
1316 m_freem(m_head);
1317 m_head = m_new;
1318 }
1319 txd->txd_mbuf = m_head;
1320
1321 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_free, txd_q);
1322 SIMPLEQ_INSERT_TAIL(&sc->rtk_tx_dirty, txd, txd_q);
1323
1324 /*
1325 * Transmit the frame.
1326 */
1327 bus_dmamap_sync(sc->sc_dmat,
1328 txd->txd_dmamap, 0, txd->txd_dmamap->dm_mapsize,
1329 BUS_DMASYNC_PREWRITE);
1330
1331 len = txd->txd_dmamap->dm_segs[0].ds_len;
1332
1333 CSR_WRITE_4(sc, txd->txd_txaddr,
1334 txd->txd_dmamap->dm_segs[0].ds_addr);
1335 CSR_WRITE_4(sc, txd->txd_txstat,
1336 RTK_TXSTAT_THRESH(sc->sc_txthresh) | len);
1337
1338 /*
1339 * Set a timeout in case the chip goes out to lunch.
1340 */
1341 ifp->if_timer = 5;
1342 }
1343
1344 /*
1345 * We broke out of the loop because all our TX slots are
1346 * full. Mark the NIC as busy until it drains some of the
1347 * packets from the queue.
1348 */
1349 if (SIMPLEQ_EMPTY(&sc->rtk_tx_free))
1350 ifp->if_flags |= IFF_OACTIVE;
1351 }
1352
1353 STATIC int
1354 rtk_init(struct ifnet *ifp)
1355 {
1356 struct rtk_softc *sc = ifp->if_softc;
1357 int error, i;
1358 uint32_t rxcfg;
1359
1360 if ((error = rtk_enable(sc)) != 0)
1361 goto out;
1362
1363 /*
1364 * Cancel pending I/O.
1365 */
1366 rtk_stop(ifp, 0);
1367
1368 /* Init our MAC address */
1369 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1370 CSR_WRITE_1(sc, RTK_IDR0 + i, CLLADDR(ifp->if_sadl)[i]);
1371 }
1372
1373 /* Init the RX buffer pointer register. */
1374 bus_dmamap_sync(sc->sc_dmat, sc->recv_dmamap, 0,
1375 sc->recv_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1376 CSR_WRITE_4(sc, RTK_RXADDR, sc->recv_dmamap->dm_segs[0].ds_addr);
1377
1378 /* Init TX descriptors. */
1379 rtk_list_tx_init(sc);
1380
1381 /* Init Early TX threshold. */
1382 sc->sc_txthresh = RTK_TXTH_256;
1383 /*
1384 * Enable transmit and receive.
1385 */
1386 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB|RTK_CMD_RX_ENB);
1387
1388 /*
1389 * Set the initial TX and RX configuration.
1390 */
1391 CSR_WRITE_4(sc, RTK_TXCFG, RTK_TXCFG_CONFIG);
1392 CSR_WRITE_4(sc, RTK_RXCFG, RTK_RXCFG_CONFIG);
1393
1394 /* Set the individual bit to receive frames for this host only. */
1395 rxcfg = CSR_READ_4(sc, RTK_RXCFG);
1396 rxcfg |= RTK_RXCFG_RX_INDIV;
1397
1398 /* If we want promiscuous mode, set the allframes bit. */
1399 if (ifp->if_flags & IFF_PROMISC) {
1400 rxcfg |= RTK_RXCFG_RX_ALLPHYS;
1401 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1402 } else {
1403 rxcfg &= ~RTK_RXCFG_RX_ALLPHYS;
1404 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1405 }
1406
1407 /*
1408 * Set capture broadcast bit to capture broadcast frames.
1409 */
1410 if (ifp->if_flags & IFF_BROADCAST) {
1411 rxcfg |= RTK_RXCFG_RX_BROAD;
1412 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1413 } else {
1414 rxcfg &= ~RTK_RXCFG_RX_BROAD;
1415 CSR_WRITE_4(sc, RTK_RXCFG, rxcfg);
1416 }
1417
1418 /*
1419 * Program the multicast filter, if necessary.
1420 */
1421 rtk_setmulti(sc);
1422
1423 /*
1424 * Enable interrupts.
1425 */
1426 CSR_WRITE_2(sc, RTK_IMR, RTK_INTRS);
1427
1428 /* Start RX/TX process. */
1429 CSR_WRITE_4(sc, RTK_MISSEDPKT, 0);
1430
1431 /* Enable receiver and transmitter. */
1432 CSR_WRITE_1(sc, RTK_COMMAND, RTK_CMD_TX_ENB|RTK_CMD_RX_ENB);
1433
1434 CSR_WRITE_1(sc, RTK_CFG1, RTK_CFG1_DRVLOAD|RTK_CFG1_FULLDUPLEX);
1435
1436 /*
1437 * Set current media.
1438 */
1439 if ((error = ether_mediachange(ifp)) != 0)
1440 goto out;
1441
1442 ifp->if_flags |= IFF_RUNNING;
1443 ifp->if_flags &= ~IFF_OACTIVE;
1444
1445 callout_reset(&sc->rtk_tick_ch, hz, rtk_tick, sc);
1446
1447 out:
1448 if (error) {
1449 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1450 ifp->if_timer = 0;
1451 printf("%s: interface not running\n", device_xname(&sc->sc_dev));
1452 }
1453 return error;
1454 }
1455
1456 STATIC int
1457 rtk_ioctl(struct ifnet *ifp, u_long command, void *data)
1458 {
1459 struct rtk_softc *sc = ifp->if_softc;
1460 int s, error;
1461
1462 s = splnet();
1463 error = ether_ioctl(ifp, command, data);
1464 if (error == ENETRESET) {
1465 if (ifp->if_flags & IFF_RUNNING) {
1466 /*
1467 * Multicast list has changed. Set the
1468 * hardware filter accordingly.
1469 */
1470 rtk_setmulti(sc);
1471 }
1472 error = 0;
1473 }
1474 splx(s);
1475
1476 return error;
1477 }
1478
1479 STATIC void
1480 rtk_watchdog(struct ifnet *ifp)
1481 {
1482 struct rtk_softc *sc;
1483
1484 sc = ifp->if_softc;
1485
1486 printf("%s: watchdog timeout\n", device_xname(&sc->sc_dev));
1487 ifp->if_oerrors++;
1488 rtk_txeof(sc);
1489 rtk_rxeof(sc);
1490 rtk_init(ifp);
1491 }
1492
1493 /*
1494 * Stop the adapter and free any mbufs allocated to the
1495 * RX and TX lists.
1496 */
1497 STATIC void
1498 rtk_stop(struct ifnet *ifp, int disable)
1499 {
1500 struct rtk_softc *sc = ifp->if_softc;
1501 struct rtk_tx_desc *txd;
1502
1503 callout_stop(&sc->rtk_tick_ch);
1504
1505 mii_down(&sc->mii);
1506
1507 CSR_WRITE_1(sc, RTK_COMMAND, 0x00);
1508 CSR_WRITE_2(sc, RTK_IMR, 0x0000);
1509
1510 /*
1511 * Free the TX list buffers.
1512 */
1513 while ((txd = SIMPLEQ_FIRST(&sc->rtk_tx_dirty)) != NULL) {
1514 SIMPLEQ_REMOVE_HEAD(&sc->rtk_tx_dirty, txd_q);
1515 bus_dmamap_unload(sc->sc_dmat, txd->txd_dmamap);
1516 m_freem(txd->txd_mbuf);
1517 txd->txd_mbuf = NULL;
1518 CSR_WRITE_4(sc, txd->txd_txaddr, 0);
1519 }
1520
1521 if (disable)
1522 rtk_disable(sc);
1523
1524 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1525 ifp->if_timer = 0;
1526 }
1527
1528 STATIC void
1529 rtk_tick(void *arg)
1530 {
1531 struct rtk_softc *sc = arg;
1532 int s;
1533
1534 s = splnet();
1535 mii_tick(&sc->mii);
1536 splx(s);
1537
1538 callout_reset(&sc->rtk_tick_ch, hz, rtk_tick, sc);
1539 }
1540