if_tl.c revision 1.44 1 /* $NetBSD: if_tl.c,v 1.44 2001/08/06 19:20:26 bouyer Exp $ */
2
3 /* XXX ALTQ XXX */
4
5 /*
6 * Copyright (c) 1997 Manuel Bouyer. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Manuel Bouyer.
19 * 4. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Texas Instruments ThunderLAN ethernet controller
36 * ThunderLAN Programmer's Guide (TI Literature Number SPWU013A)
37 * available from www.ti.com
38 */
39
40 #undef TLDEBUG
41 #define TL_PRIV_STATS
42 #undef TLDEBUG_RX
43 #undef TLDEBUG_TX
44 #undef TLDEBUG_ADDR
45
46 #include "opt_inet.h"
47 #include "opt_ns.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/mbuf.h>
52 #include <sys/protosw.h>
53 #include <sys/socket.h>
54 #include <sys/ioctl.h>
55 #include <sys/errno.h>
56 #include <sys/malloc.h>
57 #include <sys/kernel.h>
58 #include <sys/proc.h> /* only for declaration of wakeup() used by vm.h */
59 #include <sys/device.h>
60
61 #include <net/if.h>
62 #if defined(SIOCSIFMEDIA)
63 #include <net/if_media.h>
64 #endif
65 #include <net/if_types.h>
66 #include <net/if_dl.h>
67 #include <net/route.h>
68 #include <net/netisr.h>
69
70 #include "bpfilter.h"
71 #if NBPFILTER > 0
72 #include <net/bpf.h>
73 #include <net/bpfdesc.h>
74 #endif
75
76 #ifdef INET
77 #include <netinet/in.h>
78 #include <netinet/in_systm.h>
79 #include <netinet/in_var.h>
80 #include <netinet/ip.h>
81 #endif
82
83 #ifdef NS
84 #include <netns/ns.h>
85 #include <netns/ns_if.h>
86 #endif
87
88 #if defined(__NetBSD__)
89 #include <net/if_ether.h>
90 #include <uvm/uvm_extern.h>
91 #if defined(INET)
92 #include <netinet/if_inarp.h>
93 #endif
94
95 #include <machine/bus.h>
96 #include <machine/intr.h>
97
98 #include <dev/pci/pcireg.h>
99 #include <dev/pci/pcivar.h>
100 #include <dev/pci/pcidevs.h>
101
102 #include <dev/i2c/i2c_bus.h>
103 #include <dev/i2c/i2c_eeprom.h>
104
105 #include <dev/mii/mii.h>
106 #include <dev/mii/miivar.h>
107
108 #include <dev/mii/tlphyvar.h>
109
110 #include <dev/pci/if_tlregs.h>
111 #include <dev/pci/if_tlvar.h>
112 #endif /* __NetBSD__ */
113
114 /* number of transmit/receive buffers */
115 #ifndef TL_NBUF
116 #define TL_NBUF 10
117 #endif
118
119 static int tl_pci_match __P((struct device *, struct cfdata *, void *));
120 static void tl_pci_attach __P((struct device *, struct device *, void *));
121 static int tl_intr __P((void *));
122
123 static int tl_ifioctl __P((struct ifnet *, ioctl_cmd_t, caddr_t));
124 static int tl_mediachange __P((struct ifnet *));
125 static void tl_mediastatus __P((struct ifnet *, struct ifmediareq *));
126 static void tl_ifwatchdog __P((struct ifnet *));
127 static void tl_shutdown __P((void*));
128
129 static void tl_ifstart __P((struct ifnet *));
130 static void tl_reset __P((tl_softc_t*));
131 static int tl_init __P((tl_softc_t*));
132 static void tl_restart __P((void *));
133 static int tl_add_RxBuff __P((tl_softc_t*, struct Rx_list*, struct mbuf*));
134 static void tl_read_stats __P((tl_softc_t*));
135 static void tl_ticks __P((void*));
136 static int tl_multicast_hash __P((u_int8_t*));
137 static void tl_addr_filter __P((tl_softc_t*));
138
139 static u_int32_t tl_intreg_read __P((tl_softc_t*, u_int32_t));
140 static void tl_intreg_write __P((tl_softc_t*, u_int32_t, u_int32_t));
141 static u_int8_t tl_intreg_read_byte __P((tl_softc_t*, u_int32_t));
142 static void tl_intreg_write_byte __P((tl_softc_t*, u_int32_t, u_int8_t));
143
144 void tl_mii_sync __P((struct tl_softc *));
145 void tl_mii_sendbits __P((struct tl_softc *, u_int32_t, int));
146
147
148 #if defined(TLDEBUG_RX)
149 static void ether_printheader __P((struct ether_header*));
150 #endif
151
152 int tl_mii_read __P((struct device *, int, int));
153 void tl_mii_write __P((struct device *, int, int, int));
154
155 void tl_statchg __P((struct device *));
156
157 void tl_i2c_set __P((void*, u_int8_t));
158 void tl_i2c_clr __P((void*, u_int8_t));
159 int tl_i2c_read __P((void*, u_int8_t));
160
161 static __inline void netsio_clr __P((tl_softc_t*, u_int8_t));
162 static __inline void netsio_set __P((tl_softc_t*, u_int8_t));
163 static __inline u_int8_t netsio_read __P((tl_softc_t*, u_int8_t));
164 static __inline void netsio_clr(sc, bits)
165 tl_softc_t* sc;
166 u_int8_t bits;
167 {
168 tl_intreg_write_byte(sc, TL_INT_NET + TL_INT_NetSio,
169 tl_intreg_read_byte(sc, TL_INT_NET + TL_INT_NetSio) & (~bits));
170 }
171 static __inline void netsio_set(sc, bits)
172 tl_softc_t* sc;
173 u_int8_t bits;
174 {
175 tl_intreg_write_byte(sc, TL_INT_NET + TL_INT_NetSio,
176 tl_intreg_read_byte(sc, TL_INT_NET + TL_INT_NetSio) | bits);
177 }
178 static __inline u_int8_t netsio_read(sc, bits)
179 tl_softc_t* sc;
180 u_int8_t bits;
181 {
182 return (tl_intreg_read_byte(sc, TL_INT_NET + TL_INT_NetSio) & bits);
183 }
184
185 struct cfattach tl_ca = {
186 sizeof(tl_softc_t), tl_pci_match, tl_pci_attach
187 };
188
189 const struct tl_product_desc tl_compaq_products[] = {
190 { PCI_PRODUCT_COMPAQ_N100TX, TLPHY_MEDIA_NO_10_T,
191 "Compaq Netelligent 10/100 TX" },
192 { PCI_PRODUCT_COMPAQ_N10T, TLPHY_MEDIA_10_5,
193 "Compaq Netelligent 10 T" },
194 { PCI_PRODUCT_COMPAQ_IntNF3P, TLPHY_MEDIA_10_2,
195 "Compaq Integrated NetFlex 3/P" },
196 { PCI_PRODUCT_COMPAQ_IntPL100TX, TLPHY_MEDIA_10_2|TLPHY_MEDIA_NO_10_T,
197 "Compaq ProLiant Integrated Netelligent 10/100 TX" },
198 { PCI_PRODUCT_COMPAQ_DPNet100TX, TLPHY_MEDIA_10_5|TLPHY_MEDIA_NO_10_T,
199 "Compaq Dual Port Netelligent 10/100 TX" },
200 { PCI_PRODUCT_COMPAQ_DP4000, TLPHY_MEDIA_10_5|TLPHY_MEDIA_NO_10_T,
201 "Compaq Deskpro 4000 5233MMX" },
202 { PCI_PRODUCT_COMPAQ_NF3P_BNC, TLPHY_MEDIA_10_2,
203 "Compaq NetFlex 3/P w/ BNC" },
204 { PCI_PRODUCT_COMPAQ_NF3P, TLPHY_MEDIA_10_5,
205 "Compaq NetFlex 3/P" },
206 { 0, 0, NULL },
207 };
208
209 const struct tl_product_desc tl_ti_products[] = {
210 /*
211 * Built-in Ethernet on the TI TravelMate 5000
212 * docking station; better product description?
213 */
214 { PCI_PRODUCT_TI_TLAN, 0,
215 "Texas Instruments ThunderLAN" },
216 { 0, 0, NULL },
217 };
218
219 struct tl_vendor_desc {
220 u_int32_t tv_vendor;
221 const struct tl_product_desc *tv_products;
222 };
223
224 const struct tl_vendor_desc tl_vendors[] = {
225 { PCI_VENDOR_COMPAQ, tl_compaq_products },
226 { PCI_VENDOR_TI, tl_ti_products },
227 { 0, NULL },
228 };
229
230 const struct tl_product_desc *tl_lookup_product __P((u_int32_t));
231
232 const struct tl_product_desc *
233 tl_lookup_product(id)
234 u_int32_t id;
235 {
236 const struct tl_product_desc *tp;
237 const struct tl_vendor_desc *tv;
238
239 for (tv = tl_vendors; tv->tv_products != NULL; tv++)
240 if (PCI_VENDOR(id) == tv->tv_vendor)
241 break;
242
243 if ((tp = tv->tv_products) == NULL)
244 return (NULL);
245
246 for (; tp->tp_desc != NULL; tp++)
247 if (PCI_PRODUCT(id) == tp->tp_product)
248 break;
249
250 if (tp->tp_desc == NULL)
251 return (NULL);
252
253 return (tp);
254 }
255
256 static int
257 tl_pci_match(parent, match, aux)
258 struct device *parent;
259 struct cfdata *match;
260 void *aux;
261 {
262 struct pci_attach_args *pa = (struct pci_attach_args *) aux;
263
264 if (tl_lookup_product(pa->pa_id) != NULL)
265 return (1);
266
267 return (0);
268 }
269
270 static void
271 tl_pci_attach(parent, self, aux)
272 struct device * parent;
273 struct device * self;
274 void * aux;
275 {
276 tl_softc_t *sc = (tl_softc_t *)self;
277 struct pci_attach_args * const pa = (struct pci_attach_args *) aux;
278 const struct tl_product_desc *tp;
279 struct ifnet * const ifp = &sc->tl_if;
280 bus_space_tag_t iot, memt;
281 bus_space_handle_t ioh, memh;
282 pci_intr_handle_t intrhandle;
283 const char *intrstr;
284 int i, tmp, ioh_valid, memh_valid;
285 int reg_io, reg_mem;
286 pcireg_t reg10, reg14;
287 pcireg_t csr;
288
289 printf("\n");
290
291 callout_init(&sc->tl_tick_ch);
292 callout_init(&sc->tl_restart_ch);
293
294 tp = tl_lookup_product(pa->pa_id);
295 if (tp == NULL)
296 panic("tl_pci_attach: impossible");
297 sc->tl_product = tp;
298
299 /*
300 * Map the card space. Fisrt we have to find the I/O and MEM
301 * registers. I/O is supposed to be at 0x10, MEM at 0x14,
302 * but some boards (Compaq Netflex 3/P PCI) seem to have it reversed.
303 * The ThunderLAN manual is not consistent about this either (there
304 * are both cases in code examples).
305 */
306 reg10 = pci_conf_read(pa->pa_pc, pa->pa_tag, 0x10);
307 reg14 = pci_conf_read(pa->pa_pc, pa->pa_tag, 0x14);
308 if (PCI_MAPREG_TYPE(reg10) == PCI_MAPREG_TYPE_IO)
309 reg_io = 0x10;
310 else if (PCI_MAPREG_TYPE(reg14) == PCI_MAPREG_TYPE_IO)
311 reg_io = 0x14;
312 else
313 reg_io = 0;
314 if (PCI_MAPREG_TYPE(reg10) == PCI_MAPREG_TYPE_MEM)
315 reg_mem = 0x10;
316 else if (PCI_MAPREG_TYPE(reg14) == PCI_MAPREG_TYPE_MEM)
317 reg_mem = 0x14;
318 else
319 reg_mem = 0;
320
321 if (reg_io != 0)
322 ioh_valid = (pci_mapreg_map(pa, reg_io, PCI_MAPREG_TYPE_IO,
323 0, &iot, &ioh, NULL, NULL) == 0);
324 else
325 ioh_valid = 0;
326 if (reg_mem != 0)
327 memh_valid = (pci_mapreg_map(pa, PCI_CBMA,
328 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
329 0, &memt, &memh, NULL, NULL) == 0);
330 else
331 memh_valid = 0;
332
333 if (ioh_valid) {
334 sc->tl_bustag = iot;
335 sc->tl_bushandle = ioh;
336 } else if (memh_valid) {
337 sc->tl_bustag = memt;
338 sc->tl_bushandle = memh;
339 } else {
340 printf("%s: unable to map device registers\n",
341 sc->sc_dev.dv_xname);
342 return;
343 }
344 sc->tl_dmatag = pa->pa_dmat;
345
346 /* Enable the device. */
347 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
348 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
349 csr | PCI_COMMAND_MASTER_ENABLE);
350
351 printf("%s: %s\n", sc->sc_dev.dv_xname, tp->tp_desc);
352
353 tl_reset(sc);
354
355 /* fill in the i2c struct */
356 sc->i2cbus.adapter_softc = sc;
357 sc->i2cbus.set_bit = tl_i2c_set;
358 sc->i2cbus.clr_bit = tl_i2c_clr;
359 sc->i2cbus.read_bit = tl_i2c_read;
360
361 #ifdef TLDEBUG
362 printf("default values of INTreg: 0x%x\n",
363 tl_intreg_read(sc, TL_INT_Defaults));
364 #endif
365
366 /* read mac addr */
367 for (i=0; i<ETHER_ADDR_LEN; i++) {
368 tmp = i2c_eeprom_read(&sc->i2cbus, 0x83 + i);
369 if (tmp < 0) {
370 printf("%s: error reading Ethernet adress\n",
371 sc->sc_dev.dv_xname);
372 return;
373 } else {
374 sc->tl_enaddr[i] = tmp;
375 }
376 }
377 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
378 ether_sprintf(sc->tl_enaddr));
379
380 /* Map and establish interrupts */
381 if (pci_intr_map(pa, &intrhandle)) {
382 printf("%s: couldn't map interrupt\n", sc->sc_dev.dv_xname);
383 return;
384 }
385 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
386 sc->tl_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET,
387 tl_intr, sc);
388 if (sc->tl_ih == NULL) {
389 printf("%s: couldn't establish interrupt",
390 sc->sc_dev.dv_xname);
391 if (intrstr != NULL)
392 printf(" at %s", intrstr);
393 printf("\n");
394 return;
395 }
396 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
397
398 /* init these pointers, so that tl_shutdown won't try to read them */
399 sc->Rx_list = NULL;
400 sc->Tx_list = NULL;
401
402 /*
403 * Add shutdown hook so that DMA is disabled prior to reboot. Not
404 * doing do could allow DMA to corrupt kernel memory during the
405 * reboot before the driver initializes.
406 */
407 (void) shutdownhook_establish(tl_shutdown, sc);
408
409 /*
410 * Initialize our media structures and probe the MII.
411 *
412 * Note that we don't care about the media instance. We
413 * are expecting to have multiple PHYs on the 10/100 cards,
414 * and on those cards we exclude the internal PHY from providing
415 * 10baseT. By ignoring the instance, it allows us to not have
416 * to specify it on the command line when switching media.
417 */
418 sc->tl_mii.mii_ifp = ifp;
419 sc->tl_mii.mii_readreg = tl_mii_read;
420 sc->tl_mii.mii_writereg = tl_mii_write;
421 sc->tl_mii.mii_statchg = tl_statchg;
422 ifmedia_init(&sc->tl_mii.mii_media, IFM_IMASK, tl_mediachange,
423 tl_mediastatus);
424 mii_attach(self, &sc->tl_mii, 0xffffffff, MII_PHY_ANY,
425 MII_OFFSET_ANY, 0);
426 if (LIST_FIRST(&sc->tl_mii.mii_phys) == NULL) {
427 ifmedia_add(&sc->tl_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
428 ifmedia_set(&sc->tl_mii.mii_media, IFM_ETHER|IFM_NONE);
429 } else
430 ifmedia_set(&sc->tl_mii.mii_media, IFM_ETHER|IFM_AUTO);
431
432 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
433 sc->tl_if.if_softc = sc;
434 ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST;
435 ifp->if_ioctl = tl_ifioctl;
436 ifp->if_start = tl_ifstart;
437 ifp->if_watchdog = tl_ifwatchdog;
438 ifp->if_timer = 0;
439 if_attach(ifp);
440 ether_ifattach(&(sc)->tl_if, (sc)->tl_enaddr);
441 }
442
443 static void
444 tl_reset(sc)
445 tl_softc_t *sc;
446 {
447 int i;
448
449 /* read stats */
450 if (sc->tl_if.if_flags & IFF_RUNNING) {
451 callout_stop(&sc->tl_tick_ch);
452 tl_read_stats(sc);
453 }
454 /* Reset adapter */
455 TL_HR_WRITE(sc, TL_HOST_CMD,
456 TL_HR_READ(sc, TL_HOST_CMD) | HOST_CMD_Ad_Rst);
457 DELAY(100000);
458 /* Disable interrupts */
459 TL_HR_WRITE(sc, TL_HOST_CMD, HOST_CMD_IntOff);
460 /* setup aregs & hash */
461 for (i = TL_INT_Areg0; i <= TL_INT_HASH2; i = i + 4)
462 tl_intreg_write(sc, i, 0);
463 #ifdef TLDEBUG_ADDR
464 printf("Areg & hash registers: \n");
465 for (i = TL_INT_Areg0; i <= TL_INT_HASH2; i = i + 4)
466 printf(" reg %x: %x\n", i, tl_intreg_read(sc, i));
467 #endif
468 /* Setup NetConfig */
469 tl_intreg_write(sc, TL_INT_NetConfig,
470 TL_NETCONFIG_1F | TL_NETCONFIG_1chn | TL_NETCONFIG_PHY_EN);
471 /* Bsize: accept default */
472 /* TX commit in Acommit: accept default */
473 /* Load Ld_tmr and Ld_thr */
474 /* Ld_tmr = 3 */
475 TL_HR_WRITE(sc, TL_HOST_CMD, 0x3 | HOST_CMD_LdTmr);
476 /* Ld_thr = 0 */
477 TL_HR_WRITE(sc, TL_HOST_CMD, 0x0 | HOST_CMD_LdThr);
478 /* Unreset MII */
479 netsio_set(sc, TL_NETSIO_NMRST);
480 DELAY(100000);
481 sc->tl_mii.mii_media_status &= ~IFM_ACTIVE;
482 }
483
484 static void tl_shutdown(v)
485 void *v;
486 {
487 tl_softc_t *sc = v;
488 struct Tx_list *Tx;
489 int i;
490
491 if ((sc->tl_if.if_flags & IFF_RUNNING) == 0)
492 return;
493 /* disable interrupts */
494 TL_HR_WRITE(sc, TL_HOST_CMD, HOST_CMD_IntOff);
495 /* stop TX and RX channels */
496 TL_HR_WRITE(sc, TL_HOST_CMD,
497 HOST_CMD_STOP | HOST_CMD_RT | HOST_CMD_Nes);
498 TL_HR_WRITE(sc, TL_HOST_CMD, HOST_CMD_STOP);
499 DELAY(100000);
500
501 /* stop statistics reading loop, read stats */
502 callout_stop(&sc->tl_tick_ch);
503 tl_read_stats(sc);
504
505 /* Down the MII. */
506 mii_down(&sc->tl_mii);
507
508 /* deallocate memory allocations */
509 if (sc->Rx_list) {
510 for (i=0; i< TL_NBUF; i++) {
511 if (sc->Rx_list[i].m) {
512 bus_dmamap_unload(sc->tl_dmatag,
513 sc->Rx_list[i].m_dmamap);
514 m_freem(sc->Rx_list[i].m);
515 }
516 bus_dmamap_destroy(sc->tl_dmatag,
517 sc->Rx_list[i].m_dmamap);
518 sc->Rx_list[i].m = NULL;
519 }
520 free(sc->Rx_list, M_DEVBUF);
521 sc->Rx_list = NULL;
522 bus_dmamap_unload(sc->tl_dmatag, sc->Rx_dmamap);
523 bus_dmamap_destroy(sc->tl_dmatag, sc->Rx_dmamap);
524 sc->hw_Rx_list = NULL;
525 while ((Tx = sc->active_Tx) != NULL) {
526 Tx->hw_list->stat = 0;
527 bus_dmamap_unload(sc->tl_dmatag, Tx->m_dmamap);
528 bus_dmamap_destroy(sc->tl_dmatag, Tx->m_dmamap);
529 m_freem(Tx->m);
530 sc->active_Tx = Tx->next;
531 Tx->next = sc->Free_Tx;
532 sc->Free_Tx = Tx;
533 }
534 sc->last_Tx = NULL;
535 free(sc->Tx_list, M_DEVBUF);
536 sc->Tx_list = NULL;
537 bus_dmamap_unload(sc->tl_dmatag, sc->Tx_dmamap);
538 bus_dmamap_destroy(sc->tl_dmatag, sc->Tx_dmamap);
539 bus_dmamem_free(sc->tl_dmatag, &sc->ctrl_segs, sc->ctrl_nsegs);
540 sc->hw_Tx_list = NULL;
541 }
542 sc->tl_if.if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
543 sc->tl_mii.mii_media_status &= ~IFM_ACTIVE;
544 }
545
546 static void tl_restart(v)
547 void *v;
548 {
549 tl_init(v);
550 }
551
552 static int tl_init(sc)
553 tl_softc_t *sc;
554 {
555 struct ifnet *ifp = &sc->tl_if;
556 int i, s, error;
557 char *errstring;
558 char *ctrl;
559 char *nullbuf;
560
561 s = splnet();
562 /* cancel any pending IO */
563 tl_shutdown(sc);
564 tl_reset(sc);
565 if ((sc->tl_if.if_flags & IFF_UP) == 0) {
566 splx(s);
567 return 0;
568 }
569 /* Set various register to reasonable value */
570 /* setup NetCmd in promisc mode if needed */
571 i = (ifp->if_flags & IFF_PROMISC) ? TL_NETCOMMAND_CAF : 0;
572 tl_intreg_write_byte(sc, TL_INT_NET + TL_INT_NetCmd,
573 TL_NETCOMMAND_NRESET | TL_NETCOMMAND_NWRAP | i);
574 /* Max receive size : MCLBYTES */
575 tl_intreg_write_byte(sc, TL_INT_MISC + TL_MISC_MaxRxL, MCLBYTES & 0xff);
576 tl_intreg_write_byte(sc, TL_INT_MISC + TL_MISC_MaxRxH,
577 (MCLBYTES >> 8) & 0xff);
578
579 /* init MAC addr */
580 for (i = 0; i < ETHER_ADDR_LEN; i++)
581 tl_intreg_write_byte(sc, TL_INT_Areg0 + i , sc->tl_enaddr[i]);
582 /* add multicast filters */
583 tl_addr_filter(sc);
584 #ifdef TLDEBUG_ADDR
585 printf("Wrote Mac addr, Areg & hash registers are now: \n");
586 for (i = TL_INT_Areg0; i <= TL_INT_HASH2; i = i + 4)
587 printf(" reg %x: %x\n", i, tl_intreg_read(sc, i));
588 #endif
589
590 /* Pre-allocate receivers mbuf, make the lists */
591 sc->Rx_list = malloc(sizeof(struct Rx_list) * TL_NBUF, M_DEVBUF,
592 M_NOWAIT);
593 sc->Tx_list = malloc(sizeof(struct Tx_list) * TL_NBUF, M_DEVBUF,
594 M_NOWAIT);
595 if (sc->Rx_list == NULL || sc->Tx_list == NULL) {
596 errstring = "out of memory for lists";
597 error = ENOMEM;
598 goto bad;
599 }
600 memset(sc->Rx_list, 0, sizeof(struct Rx_list) * TL_NBUF);
601 memset(sc->Tx_list, 0, sizeof(struct Tx_list) * TL_NBUF);
602 error = bus_dmamap_create(sc->tl_dmatag,
603 sizeof(struct tl_Rx_list) * TL_NBUF, 1,
604 sizeof(struct tl_Rx_list) * TL_NBUF, 0, BUS_DMA_WAITOK,
605 &sc->Rx_dmamap);
606 if (error == 0)
607 error = bus_dmamap_create(sc->tl_dmatag,
608 sizeof(struct tl_Tx_list) * TL_NBUF, 1,
609 sizeof(struct tl_Tx_list) * TL_NBUF, 0, BUS_DMA_WAITOK,
610 &sc->Tx_dmamap);
611 if (error == 0)
612 error = bus_dmamap_create(sc->tl_dmatag, ETHER_MIN_TX, 1,
613 ETHER_MIN_TX, 0, BUS_DMA_WAITOK,
614 &sc->null_dmamap);
615 if (error) {
616 errstring = "can't allocate DMA maps for lists";
617 goto bad;
618 }
619 error = bus_dmamem_alloc(sc->tl_dmatag,
620 PAGE_SIZE, 0, PAGE_SIZE,
621 &sc->ctrl_segs, 1, &sc->ctrl_nsegs, BUS_DMA_NOWAIT);
622 if (error == 0)
623 error = bus_dmamem_map(sc->tl_dmatag, &sc->ctrl_segs,
624 sc->ctrl_nsegs, PAGE_SIZE, (caddr_t*)&ctrl,
625 BUS_DMA_WAITOK | BUS_DMA_COHERENT);
626 if (error) {
627 errstring = "can't allocate DMA memory for lists";
628 goto bad;
629 }
630 memset(ctrl, 0, PAGE_SIZE);
631 sc->hw_Rx_list = (void*)ctrl;
632 sc->hw_Tx_list = (void*)(ctrl + sizeof(struct tl_Rx_list) * TL_NBUF);
633 nullbuf = ctrl + sizeof(struct tl_Rx_list) * TL_NBUF +
634 sizeof(struct tl_Tx_list) * TL_NBUF;
635 error = bus_dmamap_load(sc->tl_dmatag, sc->Rx_dmamap,
636 sc->hw_Rx_list, sizeof(struct tl_Rx_list) * TL_NBUF, NULL,
637 BUS_DMA_WAITOK);
638 if (error == 0)
639 error = bus_dmamap_load(sc->tl_dmatag, sc->Tx_dmamap,
640 sc->hw_Tx_list, sizeof(struct tl_Tx_list) * TL_NBUF, NULL,
641 BUS_DMA_WAITOK);
642 if (error == 0)
643 error = bus_dmamap_load(sc->tl_dmatag, sc->null_dmamap,
644 nullbuf, ETHER_MIN_TX, NULL, BUS_DMA_WAITOK);
645 if (error) {
646 errstring = "can't DMA map DMA memory for lists";
647 goto bad;
648 }
649 for (i=0; i< TL_NBUF; i++) {
650 error = bus_dmamap_create(sc->tl_dmatag, MCLBYTES,
651 1, MCLBYTES, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
652 &sc->Rx_list[i].m_dmamap);
653 if (error == 0) {
654 error = bus_dmamap_create(sc->tl_dmatag, MCLBYTES,
655 TL_NSEG, MCLBYTES, 0,
656 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
657 &sc->Tx_list[i].m_dmamap);
658 }
659 if (error) {
660 errstring = "can't allocate DMA maps for mbufs";
661 goto bad;
662 }
663 sc->Rx_list[i].hw_list = &sc->hw_Rx_list[i];
664 sc->Rx_list[i].hw_listaddr = sc->Rx_dmamap->dm_segs[0].ds_addr
665 + sizeof(struct tl_Rx_list) * i;
666 sc->Tx_list[i].hw_list = &sc->hw_Tx_list[i];
667 sc->Tx_list[i].hw_listaddr = sc->Tx_dmamap->dm_segs[0].ds_addr
668 + sizeof(struct tl_Tx_list) * i;
669 if (tl_add_RxBuff(sc, &sc->Rx_list[i], NULL) == 0) {
670 errstring = "out of mbuf for receive list";
671 error = ENOMEM;
672 goto bad;
673 }
674 if (i > 0) { /* chain the list */
675 sc->Rx_list[i-1].next = &sc->Rx_list[i];
676 sc->hw_Rx_list[i-1].fwd =
677 htole32(sc->Rx_list[i].hw_listaddr);
678 sc->Tx_list[i-1].next = &sc->Tx_list[i];
679 }
680 }
681 sc->hw_Rx_list[TL_NBUF-1].fwd = 0;
682 sc->Rx_list[TL_NBUF-1].next = NULL;
683 sc->hw_Tx_list[TL_NBUF-1].fwd = 0;
684 sc->Tx_list[TL_NBUF-1].next = NULL;
685
686 sc->active_Rx = &sc->Rx_list[0];
687 sc->last_Rx = &sc->Rx_list[TL_NBUF-1];
688 sc->active_Tx = sc->last_Tx = NULL;
689 sc->Free_Tx = &sc->Tx_list[0];
690 bus_dmamap_sync(sc->tl_dmatag, sc->Rx_dmamap, 0,
691 sizeof(struct tl_Rx_list) * TL_NBUF,
692 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
693 bus_dmamap_sync(sc->tl_dmatag, sc->Tx_dmamap, 0,
694 sizeof(struct tl_Tx_list) * TL_NBUF,
695 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
696 bus_dmamap_sync(sc->tl_dmatag, sc->null_dmamap, 0, ETHER_MIN_TX,
697 BUS_DMASYNC_PREWRITE);
698
699 /* set media */
700 mii_mediachg(&sc->tl_mii);
701
702 /* start ticks calls */
703 callout_reset(&sc->tl_tick_ch, hz, tl_ticks, sc);
704 /* write adress of Rx list and enable interrupts */
705 TL_HR_WRITE(sc, TL_HOST_CH_PARM, sc->Rx_list[0].hw_listaddr);
706 TL_HR_WRITE(sc, TL_HOST_CMD,
707 HOST_CMD_GO | HOST_CMD_RT | HOST_CMD_Nes | HOST_CMD_IntOn);
708 sc->tl_if.if_flags |= IFF_RUNNING;
709 sc->tl_if.if_flags &= ~IFF_OACTIVE;
710 return 0;
711 bad:
712 printf("%s: %s\n", sc->sc_dev.dv_xname, errstring);
713 sc->tl_if.if_flags &= ~IFF_UP;
714 splx(s);
715 return error;
716 }
717
718
719 static u_int32_t
720 tl_intreg_read(sc, reg)
721 tl_softc_t *sc;
722 u_int32_t reg;
723 {
724 TL_HR_WRITE(sc, TL_HOST_INTR_DIOADR, reg & TL_HOST_DIOADR_MASK);
725 return TL_HR_READ(sc, TL_HOST_DIO_DATA);
726 }
727
728 static u_int8_t
729 tl_intreg_read_byte(sc, reg)
730 tl_softc_t *sc;
731 u_int32_t reg;
732 {
733 TL_HR_WRITE(sc, TL_HOST_INTR_DIOADR,
734 (reg & (~0x07)) & TL_HOST_DIOADR_MASK);
735 return TL_HR_READ_BYTE(sc, TL_HOST_DIO_DATA + (reg & 0x07));
736 }
737
738 static void
739 tl_intreg_write(sc, reg, val)
740 tl_softc_t *sc;
741 u_int32_t reg;
742 u_int32_t val;
743 {
744 TL_HR_WRITE(sc, TL_HOST_INTR_DIOADR, reg & TL_HOST_DIOADR_MASK);
745 TL_HR_WRITE(sc, TL_HOST_DIO_DATA, val);
746 }
747
748 static void
749 tl_intreg_write_byte(sc, reg, val)
750 tl_softc_t *sc;
751 u_int32_t reg;
752 u_int8_t val;
753 {
754 TL_HR_WRITE(sc, TL_HOST_INTR_DIOADR,
755 (reg & (~0x03)) & TL_HOST_DIOADR_MASK);
756 TL_HR_WRITE_BYTE(sc, TL_HOST_DIO_DATA + (reg & 0x03), val);
757 }
758
759 void
760 tl_mii_sync(sc)
761 struct tl_softc *sc;
762 {
763 int i;
764
765 netsio_clr(sc, TL_NETSIO_MTXEN);
766 for (i = 0; i < 32; i++) {
767 netsio_clr(sc, TL_NETSIO_MCLK);
768 netsio_set(sc, TL_NETSIO_MCLK);
769 }
770 }
771
772 void
773 tl_mii_sendbits(sc, data, nbits)
774 struct tl_softc *sc;
775 u_int32_t data;
776 int nbits;
777 {
778 int i;
779
780 netsio_set(sc, TL_NETSIO_MTXEN);
781 for (i = 1 << (nbits - 1); i; i = i >> 1) {
782 netsio_clr(sc, TL_NETSIO_MCLK);
783 netsio_read(sc, TL_NETSIO_MCLK);
784 if (data & i)
785 netsio_set(sc, TL_NETSIO_MDATA);
786 else
787 netsio_clr(sc, TL_NETSIO_MDATA);
788 netsio_set(sc, TL_NETSIO_MCLK);
789 netsio_read(sc, TL_NETSIO_MCLK);
790 }
791 }
792
793 int
794 tl_mii_read(self, phy, reg)
795 struct device *self;
796 int phy, reg;
797 {
798 struct tl_softc *sc = (struct tl_softc *)self;
799 int val = 0, i, err;
800
801 /*
802 * Read the PHY register by manually driving the MII control lines.
803 */
804
805 tl_mii_sync(sc);
806 tl_mii_sendbits(sc, MII_COMMAND_START, 2);
807 tl_mii_sendbits(sc, MII_COMMAND_READ, 2);
808 tl_mii_sendbits(sc, phy, 5);
809 tl_mii_sendbits(sc, reg, 5);
810
811 netsio_clr(sc, TL_NETSIO_MTXEN);
812 netsio_clr(sc, TL_NETSIO_MCLK);
813 netsio_set(sc, TL_NETSIO_MCLK);
814 netsio_clr(sc, TL_NETSIO_MCLK);
815
816 err = netsio_read(sc, TL_NETSIO_MDATA);
817 netsio_set(sc, TL_NETSIO_MCLK);
818
819 /* Even if an error occurs, must still clock out the cycle. */
820 for (i = 0; i < 16; i++) {
821 val <<= 1;
822 netsio_clr(sc, TL_NETSIO_MCLK);
823 if (err == 0 && netsio_read(sc, TL_NETSIO_MDATA))
824 val |= 1;
825 netsio_set(sc, TL_NETSIO_MCLK);
826 }
827 netsio_clr(sc, TL_NETSIO_MCLK);
828 netsio_set(sc, TL_NETSIO_MCLK);
829
830 return (err ? 0 : val);
831 }
832
833 void
834 tl_mii_write(self, phy, reg, val)
835 struct device *self;
836 int phy, reg, val;
837 {
838 struct tl_softc *sc = (struct tl_softc *)self;
839
840 /*
841 * Write the PHY register by manually driving the MII control lines.
842 */
843
844 tl_mii_sync(sc);
845 tl_mii_sendbits(sc, MII_COMMAND_START, 2);
846 tl_mii_sendbits(sc, MII_COMMAND_WRITE, 2);
847 tl_mii_sendbits(sc, phy, 5);
848 tl_mii_sendbits(sc, reg, 5);
849 tl_mii_sendbits(sc, MII_COMMAND_ACK, 2);
850 tl_mii_sendbits(sc, val, 16);
851
852 netsio_clr(sc, TL_NETSIO_MCLK);
853 netsio_set(sc, TL_NETSIO_MCLK);
854 }
855
856 void
857 tl_statchg(self)
858 struct device *self;
859 {
860 tl_softc_t *sc = (struct tl_softc *)self;
861 u_int32_t reg;
862
863 #ifdef TLDEBUG
864 printf("tl_statchg, media %x\n", sc->tl_ifmedia.ifm_media);
865 #endif
866
867 /*
868 * We must keep the ThunderLAN and the PHY in sync as
869 * to the status of full-duplex!
870 */
871 reg = tl_intreg_read_byte(sc, TL_INT_NET + TL_INT_NetCmd);
872 if (sc->tl_mii.mii_media_active & IFM_FDX)
873 reg |= TL_NETCOMMAND_DUPLEX;
874 else
875 reg &= ~TL_NETCOMMAND_DUPLEX;
876 tl_intreg_write_byte(sc, TL_INT_NET + TL_INT_NetCmd, reg);
877 }
878
879 void tl_i2c_set(v, bit)
880 void *v;
881 u_int8_t bit;
882 {
883 tl_softc_t *sc = v;
884
885 switch (bit) {
886 case I2C_DATA:
887 netsio_set(sc, TL_NETSIO_EDATA);
888 break;
889 case I2C_CLOCK:
890 netsio_set(sc, TL_NETSIO_ECLOCK);
891 break;
892 case I2C_TXEN:
893 netsio_set(sc, TL_NETSIO_ETXEN);
894 break;
895 default:
896 printf("tl_i2c_set: unknown bit %d\n", bit);
897 }
898 return;
899 }
900
901 void tl_i2c_clr(v, bit)
902 void *v;
903 u_int8_t bit;
904 {
905 tl_softc_t *sc = v;
906
907 switch (bit) {
908 case I2C_DATA:
909 netsio_clr(sc, TL_NETSIO_EDATA);
910 break;
911 case I2C_CLOCK:
912 netsio_clr(sc, TL_NETSIO_ECLOCK);
913 break;
914 case I2C_TXEN:
915 netsio_clr(sc, TL_NETSIO_ETXEN);
916 break;
917 default:
918 printf("tl_i2c_clr: unknown bit %d\n", bit);
919 }
920 return;
921 }
922
923 int tl_i2c_read(v, bit)
924 void *v;
925 u_int8_t bit;
926 {
927 tl_softc_t *sc = v;
928
929 switch (bit) {
930 case I2C_DATA:
931 return netsio_read(sc, TL_NETSIO_EDATA);
932 break;
933 case I2C_CLOCK:
934 return netsio_read(sc, TL_NETSIO_ECLOCK);
935 break;
936 case I2C_TXEN:
937 return netsio_read(sc, TL_NETSIO_ETXEN);
938 break;
939 default:
940 printf("tl_i2c_read: unknown bit %d\n", bit);
941 return -1;
942 }
943 }
944
945 static int
946 tl_intr(v)
947 void *v;
948 {
949 tl_softc_t *sc = v;
950 struct ifnet *ifp = &sc->tl_if;
951 struct Rx_list *Rx;
952 struct Tx_list *Tx;
953 struct mbuf *m;
954 u_int32_t int_type, int_reg;
955 int ack = 0;
956 int size;
957
958 int_reg = TL_HR_READ(sc, TL_HOST_INTR_DIOADR);
959 int_type = int_reg & TL_INTR_MASK;
960 if (int_type == 0)
961 return 0;
962 #if defined(TLDEBUG_RX) || defined(TLDEBUG_TX)
963 printf("%s: interrupt type %x, intr_reg %x\n", sc->sc_dev.dv_xname,
964 int_type, int_reg);
965 #endif
966 /* disable interrupts */
967 TL_HR_WRITE(sc, TL_HOST_CMD, HOST_CMD_IntOff);
968 switch(int_type & TL_INTR_MASK) {
969 case TL_INTR_RxEOF:
970 bus_dmamap_sync(sc->tl_dmatag, sc->Rx_dmamap, 0,
971 sizeof(struct tl_Rx_list) * TL_NBUF,
972 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
973 while(le32toh(sc->active_Rx->hw_list->stat) &
974 TL_RX_CSTAT_CPLT) {
975 /* dequeue and requeue at end of list */
976 ack++;
977 Rx = sc->active_Rx;
978 sc->active_Rx = Rx->next;
979 bus_dmamap_sync(sc->tl_dmatag, Rx->m_dmamap, 0,
980 MCLBYTES, BUS_DMASYNC_POSTREAD);
981 bus_dmamap_unload(sc->tl_dmatag, Rx->m_dmamap);
982 m = Rx->m;
983 size = le32toh(Rx->hw_list->stat) >> 16;
984 #ifdef TLDEBUG_RX
985 printf("tl_intr: RX list complete, Rx %p, size=%d\n",
986 Rx, size);
987 #endif
988 if (tl_add_RxBuff(sc, Rx, m ) == 0) {
989 /*
990 * No new mbuf, reuse the same. This means
991 * that this packet
992 * is lost
993 */
994 m = NULL;
995 #ifdef TL_PRIV_STATS
996 sc->ierr_nomem++;
997 #endif
998 #ifdef TLDEBUG
999 printf("%s: out of mbuf, lost input packet\n",
1000 sc->sc_dev.dv_xname);
1001 #endif
1002 }
1003 Rx->next = NULL;
1004 Rx->hw_list->fwd = 0;
1005 sc->last_Rx->hw_list->fwd = htole32(Rx->hw_listaddr);
1006 sc->last_Rx->next = Rx;
1007 sc->last_Rx = Rx;
1008
1009 /* deliver packet */
1010 if (m) {
1011 if (size < sizeof(struct ether_header)) {
1012 m_freem(m);
1013 continue;
1014 }
1015 m->m_pkthdr.rcvif = ifp;
1016 m->m_pkthdr.len = m->m_len = size;
1017 #ifdef TLDEBUG_RX
1018 { struct ether_header *eh =
1019 mtod(m, struct ether_header *);
1020 printf("tl_intr: Rx packet:\n");
1021 ether_printheader(eh); }
1022 #endif
1023 #if NBPFILTER > 0
1024 if (ifp->if_bpf)
1025 bpf_mtap(ifp->if_bpf, m);
1026 #endif /* NBPFILTER > 0 */
1027 (*ifp->if_input)(ifp, m);
1028 }
1029 }
1030 bus_dmamap_sync(sc->tl_dmatag, sc->Rx_dmamap, 0,
1031 sizeof(struct tl_Rx_list) * TL_NBUF,
1032 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1033 #ifdef TLDEBUG_RX
1034 printf("TL_INTR_RxEOF: ack %d\n", ack);
1035 #else
1036 if (ack == 0) {
1037 printf("%s: EOF intr without anything to read !\n",
1038 sc->sc_dev.dv_xname);
1039 tl_reset(sc);
1040 /* shedule reinit of the board */
1041 callout_reset(&sc->tl_restart_ch, 1, tl_restart, sc);
1042 return(1);
1043 }
1044 #endif
1045 break;
1046 case TL_INTR_RxEOC:
1047 ack++;
1048 bus_dmamap_sync(sc->tl_dmatag, sc->Rx_dmamap, 0,
1049 sizeof(struct tl_Rx_list) * TL_NBUF,
1050 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1051 #ifdef TLDEBUG_RX
1052 printf("TL_INTR_RxEOC: ack %d\n", ack);
1053 #endif
1054 #ifdef DIAGNOSTIC
1055 if (le32toh(sc->active_Rx->hw_list->stat) & TL_RX_CSTAT_CPLT) {
1056 printf("%s: Rx EOC interrupt and active Tx list not "
1057 "cleared\n", sc->sc_dev.dv_xname);
1058 return 0;
1059 } else
1060 #endif
1061 {
1062 /*
1063 * write adress of Rx list and send Rx GO command, ack
1064 * interrupt and enable interrupts in one command
1065 */
1066 TL_HR_WRITE(sc, TL_HOST_CH_PARM, sc->active_Rx->hw_listaddr);
1067 TL_HR_WRITE(sc, TL_HOST_CMD,
1068 HOST_CMD_GO | HOST_CMD_RT | HOST_CMD_Nes | ack | int_type |
1069 HOST_CMD_ACK | HOST_CMD_IntOn);
1070 return 1;
1071 }
1072 case TL_INTR_TxEOF:
1073 case TL_INTR_TxEOC:
1074 bus_dmamap_sync(sc->tl_dmatag, sc->Tx_dmamap, 0,
1075 sizeof(struct tl_Tx_list) * TL_NBUF,
1076 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1077 while ((Tx = sc->active_Tx) != NULL) {
1078 if((le32toh(Tx->hw_list->stat) & TL_TX_CSTAT_CPLT) == 0)
1079 break;
1080 ack++;
1081 #ifdef TLDEBUG_TX
1082 printf("TL_INTR_TxEOC: list 0x%x done\n",
1083 (int)Tx->hw_listaddr);
1084 #endif
1085 Tx->hw_list->stat = 0;
1086 bus_dmamap_sync(sc->tl_dmatag, Tx->m_dmamap, 0,
1087 MCLBYTES, BUS_DMASYNC_POSTWRITE);
1088 bus_dmamap_unload(sc->tl_dmatag, Tx->m_dmamap);
1089 m_freem(Tx->m);
1090 Tx->m = NULL;
1091 sc->active_Tx = Tx->next;
1092 if (sc->active_Tx == NULL)
1093 sc->last_Tx = NULL;
1094 Tx->next = sc->Free_Tx;
1095 sc->Free_Tx = Tx;
1096 }
1097 bus_dmamap_sync(sc->tl_dmatag, sc->Tx_dmamap, 0,
1098 sizeof(struct tl_Tx_list) * TL_NBUF,
1099 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1100 /* if this was an EOC, ACK immediatly */
1101 if (int_type == TL_INTR_TxEOC) {
1102 #ifdef TLDEBUG_TX
1103 printf("TL_INTR_TxEOC: ack %d (will be set to 1)\n",
1104 ack);
1105 #endif
1106 TL_HR_WRITE(sc, TL_HOST_CMD, 1 | int_type |
1107 HOST_CMD_ACK | HOST_CMD_IntOn);
1108 if ( sc->active_Tx != NULL) {
1109 /* needs a Tx go command */
1110 TL_HR_WRITE(sc, TL_HOST_CH_PARM,
1111 sc->active_Tx->hw_listaddr);
1112 TL_HR_WRITE(sc, TL_HOST_CMD, HOST_CMD_GO);
1113 }
1114 sc->tl_if.if_timer = 0;
1115 if (sc->tl_if.if_snd.ifq_head != NULL)
1116 tl_ifstart(&sc->tl_if);
1117 return 1;
1118 }
1119 #ifdef TLDEBUG
1120 else {
1121 printf("TL_INTR_TxEOF: ack %d\n", ack);
1122 }
1123 #endif
1124 sc->tl_if.if_timer = 0;
1125 if (sc->tl_if.if_snd.ifq_head != NULL)
1126 tl_ifstart(&sc->tl_if);
1127 break;
1128 case TL_INTR_Stat:
1129 ack++;
1130 #ifdef TLDEBUG
1131 printf("TL_INTR_Stat: ack %d\n", ack);
1132 #endif
1133 tl_read_stats(sc);
1134 break;
1135 case TL_INTR_Adc:
1136 if (int_reg & TL_INTVec_MASK) {
1137 /* adapter check conditions */
1138 printf("%s: check condition, intvect=0x%x, "
1139 "ch_param=0x%x\n", sc->sc_dev.dv_xname,
1140 int_reg & TL_INTVec_MASK,
1141 TL_HR_READ(sc, TL_HOST_CH_PARM));
1142 tl_reset(sc);
1143 /* shedule reinit of the board */
1144 callout_reset(&sc->tl_restart_ch, 1, tl_restart, sc);
1145 return(1);
1146 } else {
1147 u_int8_t netstat;
1148 /* Network status */
1149 netstat =
1150 tl_intreg_read_byte(sc, TL_INT_NET+TL_INT_NetSts);
1151 printf("%s: network status, NetSts=%x\n",
1152 sc->sc_dev.dv_xname, netstat);
1153 /* Ack interrupts */
1154 tl_intreg_write_byte(sc, TL_INT_NET+TL_INT_NetSts,
1155 netstat);
1156 ack++;
1157 }
1158 break;
1159 default:
1160 printf("%s: unhandled interrupt code %x!\n",
1161 sc->sc_dev.dv_xname, int_type);
1162 ack++;
1163 }
1164
1165 if (ack) {
1166 /* Ack the interrupt and enable interrupts */
1167 TL_HR_WRITE(sc, TL_HOST_CMD, ack | int_type | HOST_CMD_ACK |
1168 HOST_CMD_IntOn);
1169 return 1;
1170 }
1171 /* ack = 0 ; interrupt was perhaps not our. Just enable interrupts */
1172 TL_HR_WRITE(sc, TL_HOST_CMD, HOST_CMD_IntOn);
1173 return 0;
1174 }
1175
1176 static int
1177 tl_ifioctl(ifp, cmd, data)
1178 struct ifnet *ifp;
1179 ioctl_cmd_t cmd;
1180 caddr_t data;
1181 {
1182 struct tl_softc *sc = ifp->if_softc;
1183 struct ifreq *ifr = (struct ifreq *)data;
1184 int s, error;
1185
1186 s = splnet();
1187 switch(cmd) {
1188 case SIOCSIFADDR: {
1189 struct ifaddr *ifa = (struct ifaddr *)data;
1190 sc->tl_if.if_flags |= IFF_UP;
1191 if ((error = tl_init(sc)) != NULL) {
1192 sc->tl_if.if_flags &= ~IFF_UP;
1193 break;
1194 }
1195 switch (ifa->ifa_addr->sa_family) {
1196 #ifdef INET
1197 case AF_INET:
1198 arp_ifinit(ifp, ifa);
1199 break;
1200 #endif
1201 #ifdef NS
1202 case AF_NS: {
1203 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
1204
1205 if (ns_nullhost(*ina))
1206 ina->x_host =
1207 *(union ns_host*) LLADDR(ifp->if_sadl);
1208 else
1209 memcpy(LLADDR(ifp->if_sadl), ina->x_host.c_host,
1210 ifp->if_addrlen);
1211 break;
1212 }
1213 #endif
1214 default:
1215 break;
1216 }
1217 break;
1218 }
1219 case SIOCSIFFLAGS:
1220 {
1221 u_int8_t reg;
1222 /*
1223 * If interface is marked up and not running, then start it.
1224 * If it is marked down and running, stop it.
1225 */
1226 if (ifp->if_flags & IFF_UP) {
1227 if ((ifp->if_flags & IFF_RUNNING) == 0) {
1228 error = tl_init(sc);
1229 /* all flags have been handled by init */
1230 break;
1231 }
1232 error = 0;
1233 reg = tl_intreg_read_byte(sc,
1234 TL_INT_NET + TL_INT_NetCmd);
1235 if (ifp->if_flags & IFF_PROMISC)
1236 reg |= TL_NETCOMMAND_CAF;
1237 else
1238 reg &= ~TL_NETCOMMAND_CAF;
1239 tl_intreg_write_byte(sc, TL_INT_NET + TL_INT_NetCmd,
1240 reg);
1241 #ifdef TL_PRIV_STATS
1242 if (ifp->if_flags & IFF_LINK0) {
1243 ifp->if_flags &= ~IFF_LINK0;
1244 printf("%s errors statistics\n",
1245 sc->sc_dev.dv_xname);
1246 printf(" %4d RX buffer overrun\n",
1247 sc->ierr_overr);
1248 printf(" %4d RX code error\n",
1249 sc->ierr_code);
1250 printf(" %4d RX crc error\n",
1251 sc->ierr_crc);
1252 printf(" %4d RX out of memory\n",
1253 sc->ierr_nomem);
1254 printf(" %4d TX buffer underrun\n",
1255 sc->oerr_underr);
1256 printf(" %4d TX deffered frames\n",
1257 sc->oerr_deffered);
1258 printf(" %4d TX single collisions\n",
1259 sc->oerr_coll);
1260 printf(" %4d TX multi collisions\n",
1261 sc->oerr_multicoll);
1262 printf(" %4d TX exessive collisions\n",
1263 sc->oerr_exesscoll);
1264 printf(" %4d TX late collisions\n",
1265 sc->oerr_latecoll);
1266 printf(" %4d TX carrier loss\n",
1267 sc->oerr_carrloss);
1268 printf(" %4d TX mbuf copy\n",
1269 sc->oerr_mcopy);
1270 }
1271 #endif
1272 } else {
1273 if (ifp->if_flags & IFF_RUNNING)
1274 tl_shutdown(sc);
1275 error = 0;
1276 }
1277 break;
1278 }
1279 case SIOCADDMULTI:
1280 case SIOCDELMULTI:
1281 /*
1282 * Update multicast listeners
1283 */
1284 if (cmd == SIOCADDMULTI)
1285 error = ether_addmulti(ifr, &sc->tl_ec);
1286 else
1287 error = ether_delmulti(ifr, &sc->tl_ec);
1288 if (error == ENETRESET) {
1289 tl_addr_filter(sc);
1290 error = 0;
1291 }
1292 break;
1293 case SIOCSIFMEDIA:
1294 case SIOCGIFMEDIA:
1295 error = ifmedia_ioctl(ifp, ifr, &sc->tl_mii.mii_media, cmd);
1296 break;
1297 default:
1298 error = EINVAL;
1299 }
1300 splx(s);
1301 return error;
1302 }
1303
1304 static void
1305 tl_ifstart(ifp)
1306 struct ifnet *ifp;
1307 {
1308 tl_softc_t *sc = ifp->if_softc;
1309 struct mbuf *mb_head;
1310 struct Tx_list *Tx;
1311 int segment, size;
1312 int again = 0, error;
1313
1314 txloop:
1315 /* If we don't have more space ... */
1316 if (sc->Free_Tx == NULL) {
1317 #ifdef TLDEBUG
1318 printf("tl_ifstart: No free TX list\n");
1319 #endif
1320 return;
1321 }
1322 /* Grab a paquet for output */
1323 IF_DEQUEUE(&ifp->if_snd, mb_head);
1324 if (mb_head == NULL) {
1325 #ifdef TLDEBUG_TX
1326 printf("tl_ifstart: nothing to send\n");
1327 #endif
1328 return;
1329 }
1330 Tx = sc->Free_Tx;
1331 sc->Free_Tx = Tx->next;
1332 Tx->next = NULL;
1333 /*
1334 * Go through each of the mbufs in the chain and initialize
1335 * the transmit list descriptors with the physical address
1336 * and size of the mbuf.
1337 */
1338 tbdinit:
1339 memset(Tx->hw_list, 0, sizeof(struct tl_Tx_list));
1340 Tx->m = mb_head;
1341 size = mb_head->m_pkthdr.len;
1342 if ((error = bus_dmamap_load_mbuf(sc->tl_dmatag, Tx->m_dmamap, mb_head,
1343 BUS_DMA_NOWAIT)) || (size < ETHER_MIN_TX &&
1344 Tx->m_dmamap->dm_nsegs == TL_NSEG)) {
1345 struct mbuf *mn;
1346 /*
1347 * We ran out of segments, or we will. We have to recopy this
1348 * mbuf chain first.
1349 */
1350 if (error == 0)
1351 bus_dmamap_unload(sc->tl_dmatag, Tx->m_dmamap);
1352 if (again) {
1353 /* already copyed, can't do much more */
1354 m_freem(mb_head);
1355 goto bad;
1356 }
1357 again = 1;
1358 #ifdef TLDEBUG_TX
1359 printf("tl_ifstart: need to copy mbuf\n");
1360 #endif
1361 #ifdef TL_PRIV_STATS
1362 sc->oerr_mcopy++;
1363 #endif
1364 MGETHDR(mn, M_DONTWAIT, MT_DATA);
1365 if (mn == NULL) {
1366 m_freem(mb_head);
1367 goto bad;
1368 }
1369 if (mb_head->m_pkthdr.len > MHLEN) {
1370 MCLGET(mn, M_DONTWAIT);
1371 if ((mn->m_flags & M_EXT) == 0) {
1372 m_freem(mn);
1373 m_freem(mb_head);
1374 goto bad;
1375 }
1376 }
1377 m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
1378 mtod(mn, caddr_t));
1379 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len;
1380 m_freem(mb_head);
1381 mb_head = mn;
1382 goto tbdinit;
1383 }
1384 for (segment = 0; segment < Tx->m_dmamap->dm_nsegs; segment++) {
1385 Tx->hw_list->seg[segment].data_addr =
1386 htole32(Tx->m_dmamap->dm_segs[segment].ds_addr);
1387 Tx->hw_list->seg[segment].data_count =
1388 htole32(Tx->m_dmamap->dm_segs[segment].ds_len);
1389 }
1390 bus_dmamap_sync(sc->tl_dmatag, Tx->m_dmamap, 0, size,
1391 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1392 /* We are at end of mbuf chain. check the size and
1393 * see if it needs to be extended
1394 */
1395 if (size < ETHER_MIN_TX) {
1396 #ifdef DIAGNOSTIC
1397 if (segment >= TL_NSEG) {
1398 panic("tl_ifstart: to much segmets (%d)\n", segment);
1399 }
1400 #endif
1401 /*
1402 * add the nullbuf in the seg
1403 */
1404 Tx->hw_list->seg[segment].data_count =
1405 htole32(ETHER_MIN_TX - size);
1406 Tx->hw_list->seg[segment].data_addr =
1407 htole32(sc->null_dmamap->dm_segs[0].ds_addr);
1408 size = ETHER_MIN_TX;
1409 segment++;
1410 }
1411 /* The list is done, finish the list init */
1412 Tx->hw_list->seg[segment-1].data_count |=
1413 htole32(TL_LAST_SEG);
1414 Tx->hw_list->stat = htole32((size << 16) | 0x3000);
1415 #ifdef TLDEBUG_TX
1416 printf("%s: sending, Tx : stat = 0x%x\n", sc->sc_dev.dv_xname,
1417 le32toh(Tx->hw_list->stat));
1418 #if 0
1419 for(segment = 0; segment < TL_NSEG; segment++) {
1420 printf(" seg %d addr 0x%x len 0x%x\n",
1421 segment,
1422 le32toh(Tx->hw_list->seg[segment].data_addr),
1423 le32toh(Tx->hw_list->seg[segment].data_count));
1424 }
1425 #endif
1426 #endif
1427 if (sc->active_Tx == NULL) {
1428 sc->active_Tx = sc->last_Tx = Tx;
1429 #ifdef TLDEBUG_TX
1430 printf("%s: Tx GO, addr=0x%ux\n", sc->sc_dev.dv_xname,
1431 (int)Tx->hw_listaddr);
1432 #endif
1433 bus_dmamap_sync(sc->tl_dmatag, sc->Tx_dmamap, 0,
1434 sizeof(struct tl_Tx_list) * TL_NBUF,
1435 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1436 TL_HR_WRITE(sc, TL_HOST_CH_PARM, Tx->hw_listaddr);
1437 TL_HR_WRITE(sc, TL_HOST_CMD, HOST_CMD_GO);
1438 } else {
1439 #ifdef TLDEBUG_TX
1440 printf("%s: Tx addr=0x%ux queued\n", sc->sc_dev.dv_xname,
1441 (int)Tx->hw_listaddr);
1442 #endif
1443 sc->last_Tx->hw_list->fwd = htole32(Tx->hw_listaddr);
1444 sc->last_Tx->next = Tx;
1445 sc->last_Tx = Tx;
1446 #ifdef DIAGNOSTIC
1447 if (sc->last_Tx->hw_list->fwd & 0x7)
1448 printf("%s: physical addr 0x%x of list not properly "
1449 "aligned\n",
1450 sc->sc_dev.dv_xname, sc->last_Rx->hw_list->fwd);
1451 #endif
1452 bus_dmamap_sync(sc->tl_dmatag, sc->Tx_dmamap, 0,
1453 sizeof(struct tl_Tx_list) * TL_NBUF,
1454 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1455 }
1456 #if NBPFILTER > 0
1457 /* Pass packet to bpf if there is a listener */
1458 if (ifp->if_bpf)
1459 bpf_mtap(ifp->if_bpf, mb_head);
1460 #endif
1461 /*
1462 * Set a 5 second timer just in case we don't hear from the card again.
1463 */
1464 ifp->if_timer = 5;
1465 goto txloop;
1466 bad:
1467 #ifdef TLDEBUG
1468 printf("tl_ifstart: Out of mbuf, Tx pkt lost\n");
1469 #endif
1470 Tx->next = sc->Free_Tx;
1471 sc->Free_Tx = Tx;
1472 return;
1473 }
1474
1475 static void
1476 tl_ifwatchdog(ifp)
1477 struct ifnet *ifp;
1478 {
1479 tl_softc_t *sc = ifp->if_softc;
1480
1481 if ((ifp->if_flags & IFF_RUNNING) == 0)
1482 return;
1483 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1484 ifp->if_oerrors++;
1485 tl_init(sc);
1486 }
1487
1488 static int
1489 tl_mediachange(ifp)
1490 struct ifnet *ifp;
1491 {
1492
1493 if (ifp->if_flags & IFF_UP)
1494 tl_init(ifp->if_softc);
1495 return (0);
1496 }
1497
1498 static void
1499 tl_mediastatus(ifp, ifmr)
1500 struct ifnet *ifp;
1501 struct ifmediareq *ifmr;
1502 {
1503 tl_softc_t *sc = ifp->if_softc;
1504
1505 mii_pollstat(&sc->tl_mii);
1506 ifmr->ifm_active = sc->tl_mii.mii_media_active;
1507 ifmr->ifm_status = sc->tl_mii.mii_media_status;
1508 }
1509
1510 static int tl_add_RxBuff(sc, Rx, oldm)
1511 tl_softc_t *sc;
1512 struct Rx_list *Rx;
1513 struct mbuf *oldm;
1514 {
1515 struct mbuf *m;
1516 int error;
1517
1518 MGETHDR(m, M_DONTWAIT, MT_DATA);
1519 if (m != NULL) {
1520 MCLGET(m, M_DONTWAIT);
1521 if ((m->m_flags & M_EXT) == 0) {
1522 m_freem(m);
1523 if (oldm == NULL)
1524 return 0;
1525 m = oldm;
1526 m->m_data = m->m_ext.ext_buf;
1527 }
1528 } else {
1529 if (oldm == NULL)
1530 return 0;
1531 m = oldm;
1532 m->m_data = m->m_ext.ext_buf;
1533 }
1534
1535 /* (re)init the Rx_list struct */
1536
1537 Rx->m = m;
1538 if ((error = bus_dmamap_load(sc->tl_dmatag, Rx->m_dmamap,
1539 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1540 printf("%s: bus_dmamap_load() failed (error %d) for "
1541 "tl_add_RxBuff\n", sc->sc_dev.dv_xname, error);
1542 printf("size %d (%d)\n", m->m_pkthdr.len, MCLBYTES);
1543 m_freem(m);
1544 Rx->m = NULL;
1545 return 0;
1546 }
1547 bus_dmamap_sync(sc->tl_dmatag, Rx->m_dmamap, 0,
1548 MCLBYTES, BUS_DMASYNC_PREREAD);
1549 /*
1550 * Move the data pointer up so that the incoming data packet
1551 * will be 32-bit aligned.
1552 */
1553 m->m_data += 2;
1554
1555 Rx->hw_list->stat =
1556 htole32(((Rx->m_dmamap->dm_segs[0].ds_len -2) << 16) | 0x3000);
1557 Rx->hw_list->seg.data_count =
1558 htole32(Rx->m_dmamap->dm_segs[0].ds_len -2);
1559 Rx->hw_list->seg.data_addr =
1560 htole32(Rx->m_dmamap->dm_segs[0].ds_addr + 2);
1561 return (m != oldm);
1562 }
1563
1564 static void tl_ticks(v)
1565 void *v;
1566 {
1567 tl_softc_t *sc = v;
1568
1569 tl_read_stats(sc);
1570
1571 /* Tick the MII. */
1572 mii_tick(&sc->tl_mii);
1573
1574 /* read statistics every seconds */
1575 callout_reset(&sc->tl_tick_ch, hz, tl_ticks, sc);
1576 }
1577
1578 static void
1579 tl_read_stats(sc)
1580 tl_softc_t *sc;
1581 {
1582 u_int32_t reg;
1583 int ierr_overr;
1584 int ierr_code;
1585 int ierr_crc;
1586 int oerr_underr;
1587 int oerr_deffered;
1588 int oerr_coll;
1589 int oerr_multicoll;
1590 int oerr_exesscoll;
1591 int oerr_latecoll;
1592 int oerr_carrloss;
1593 struct ifnet *ifp = &sc->tl_if;
1594
1595 reg = tl_intreg_read(sc, TL_INT_STATS_TX);
1596 ifp->if_opackets += reg & 0x00ffffff;
1597 oerr_underr = reg >> 24;
1598
1599 reg = tl_intreg_read(sc, TL_INT_STATS_RX);
1600 ifp->if_ipackets += reg & 0x00ffffff;
1601 ierr_overr = reg >> 24;
1602
1603 reg = tl_intreg_read(sc, TL_INT_STATS_FERR);
1604 ierr_crc = (reg & TL_FERR_CRC) >> 16;
1605 ierr_code = (reg & TL_FERR_CODE) >> 24;
1606 oerr_deffered = (reg & TL_FERR_DEF);
1607
1608 reg = tl_intreg_read(sc, TL_INT_STATS_COLL);
1609 oerr_multicoll = (reg & TL_COL_MULTI);
1610 oerr_coll = (reg & TL_COL_SINGLE) >> 16;
1611
1612 reg = tl_intreg_read(sc, TL_INT_LERR);
1613 oerr_exesscoll = (reg & TL_LERR_ECOLL);
1614 oerr_latecoll = (reg & TL_LERR_LCOLL) >> 8;
1615 oerr_carrloss = (reg & TL_LERR_CL) >> 16;
1616
1617
1618 ifp->if_oerrors += oerr_underr + oerr_exesscoll + oerr_latecoll +
1619 oerr_carrloss;
1620 ifp->if_collisions += oerr_coll + oerr_multicoll;
1621 ifp->if_ierrors += ierr_overr + ierr_code + ierr_crc;
1622
1623 if (ierr_overr)
1624 printf("%s: receiver ring buffer overrun\n",
1625 sc->sc_dev.dv_xname);
1626 if (oerr_underr)
1627 printf("%s: transmit buffer underrun\n",
1628 sc->sc_dev.dv_xname);
1629 #ifdef TL_PRIV_STATS
1630 sc->ierr_overr += ierr_overr;
1631 sc->ierr_code += ierr_code;
1632 sc->ierr_crc += ierr_crc;
1633 sc->oerr_underr += oerr_underr;
1634 sc->oerr_deffered += oerr_deffered;
1635 sc->oerr_coll += oerr_coll;
1636 sc->oerr_multicoll += oerr_multicoll;
1637 sc->oerr_exesscoll += oerr_exesscoll;
1638 sc->oerr_latecoll += oerr_latecoll;
1639 sc->oerr_carrloss += oerr_carrloss;
1640 #endif
1641 }
1642
1643 static void tl_addr_filter(sc)
1644 tl_softc_t *sc;
1645 {
1646 struct ether_multistep step;
1647 struct ether_multi *enm;
1648 u_int32_t hash[2] = {0, 0};
1649 int i;
1650
1651 sc->tl_if.if_flags &= ~IFF_ALLMULTI;
1652 ETHER_FIRST_MULTI(step, &sc->tl_ec, enm);
1653 while (enm != NULL) {
1654 #ifdef TLDEBUG
1655 printf("tl_addr_filter: addrs %s %s\n",
1656 ether_sprintf(enm->enm_addrlo),
1657 ether_sprintf(enm->enm_addrhi));
1658 #endif
1659 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6) == 0) {
1660 i = tl_multicast_hash(enm->enm_addrlo);
1661 hash[i/32] |= 1 << (i%32);
1662 } else {
1663 hash[0] = hash[1] = 0xffffffff;
1664 sc->tl_if.if_flags |= IFF_ALLMULTI;
1665 break;
1666 }
1667 ETHER_NEXT_MULTI(step, enm);
1668 }
1669 #ifdef TLDEBUG
1670 printf("tl_addr_filer: hash1 %x has2 %x\n", hash[0], hash[1]);
1671 #endif
1672 tl_intreg_write(sc, TL_INT_HASH1, hash[0]);
1673 tl_intreg_write(sc, TL_INT_HASH2, hash[1]);
1674 }
1675
1676 static int tl_multicast_hash(a)
1677 u_int8_t *a;
1678 {
1679 int hash;
1680
1681 #define DA(addr,bit) (addr[5 - (bit/8)] & (1 << bit%8))
1682 #define xor8(a,b,c,d,e,f,g,h) (((a != 0) + (b != 0) + (c != 0) + (d != 0) + (e != 0) + (f != 0) + (g != 0) + (h != 0)) & 1)
1683
1684 hash = xor8( DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24), DA(a,30),
1685 DA(a,36), DA(a,42));
1686 hash |= xor8( DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25), DA(a,31),
1687 DA(a,37), DA(a,43)) << 1;
1688 hash |= xor8( DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26), DA(a,32),
1689 DA(a,38), DA(a,44)) << 2;
1690 hash |= xor8( DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27), DA(a,33),
1691 DA(a,39), DA(a,45)) << 3;
1692 hash |= xor8( DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28), DA(a,34),
1693 DA(a,40), DA(a,46)) << 4;
1694 hash |= xor8( DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29), DA(a,35),
1695 DA(a,41), DA(a,47)) << 5;
1696
1697 return hash;
1698 }
1699
1700 #if defined(TLDEBUG_RX)
1701 void
1702 ether_printheader(eh)
1703 struct ether_header *eh;
1704 {
1705 u_char *c = (char*)eh;
1706 int i;
1707 for (i=0; i<sizeof(struct ether_header); i++)
1708 printf("%x ", (u_int)c[i]);
1709 printf("\n");
1710 }
1711 #endif
1712