if_pcn.c revision 1.31 1 /* $NetBSD: if_pcn.c,v 1.31 2006/06/17 23:34:27 christos Exp $ */
2
3 /*
4 * Copyright (c) 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the AMD PCnet-PCI series of Ethernet
40 * chips:
41 *
42 * * Am79c970 PCnet-PCI Single-Chip Ethernet Controller for PCI
43 * Local Bus
44 *
45 * * Am79c970A PCnet-PCI II Single-Chip Full-Duplex Ethernet Controller
46 * for PCI Local Bus
47 *
48 * * Am79c971 PCnet-FAST Single-Chip Full-Duplex 10/100Mbps
49 * Ethernet Controller for PCI Local Bus
50 *
51 * * Am79c972 PCnet-FAST+ Enhanced 10/100Mbps PCI Ethernet Controller
52 * with OnNow Support
53 *
54 * * Am79c973/Am79c975 PCnet-FAST III Single-Chip 10/100Mbps PCI
55 * Ethernet Controller with Integrated PHY
56 *
57 * This also supports the virtual PCnet-PCI Ethernet interface found
58 * in VMware.
59 *
60 * TODO:
61 *
62 * * Split this into bus-specific and bus-independent portions.
63 * The core could also be used for the ILACC (Am79900) 32-bit
64 * Ethernet chip (XXX only if we use an ILACC-compatible SWSTYLE).
65 */
66
67 #include "opt_pcn.h"
68
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: if_pcn.c,v 1.31 2006/06/17 23:34:27 christos Exp $");
71
72 #include "bpfilter.h"
73 #include "rnd.h"
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/callout.h>
78 #include <sys/mbuf.h>
79 #include <sys/malloc.h>
80 #include <sys/kernel.h>
81 #include <sys/socket.h>
82 #include <sys/ioctl.h>
83 #include <sys/errno.h>
84 #include <sys/device.h>
85 #include <sys/queue.h>
86
87 #if NRND > 0
88 #include <sys/rnd.h>
89 #endif
90
91 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
92
93 #include <net/if.h>
94 #include <net/if_dl.h>
95 #include <net/if_media.h>
96 #include <net/if_ether.h>
97
98 #if NBPFILTER > 0
99 #include <net/bpf.h>
100 #endif
101
102 #include <machine/bus.h>
103 #include <machine/intr.h>
104 #include <machine/endian.h>
105
106 #include <dev/mii/mii.h>
107 #include <dev/mii/miivar.h>
108
109 #include <dev/ic/am79900reg.h>
110 #include <dev/ic/lancereg.h>
111
112 #include <dev/pci/pcireg.h>
113 #include <dev/pci/pcivar.h>
114 #include <dev/pci/pcidevs.h>
115
116 #include <dev/pci/if_pcnreg.h>
117
118 /*
119 * Transmit descriptor list size. This is arbitrary, but allocate
120 * enough descriptors for 128 pending transmissions, and 4 segments
121 * per packet. This MUST work out to a power of 2.
122 *
123 * NOTE: We can't have any more than 512 Tx descriptors, SO BE CAREFUL!
124 *
125 * So we play a little trick here. We give each packet up to 16
126 * DMA segments, but only allocate the max of 512 descriptors. The
127 * transmit logic can deal with this, we just are hoping to sneak by.
128 */
129 #define PCN_NTXSEGS 16
130
131 #define PCN_TXQUEUELEN 128
132 #define PCN_TXQUEUELEN_MASK (PCN_TXQUEUELEN - 1)
133 #define PCN_NTXDESC 512
134 #define PCN_NTXDESC_MASK (PCN_NTXDESC - 1)
135 #define PCN_NEXTTX(x) (((x) + 1) & PCN_NTXDESC_MASK)
136 #define PCN_NEXTTXS(x) (((x) + 1) & PCN_TXQUEUELEN_MASK)
137
138 /* Tx interrupt every N + 1 packets. */
139 #define PCN_TXINTR_MASK 7
140
141 /*
142 * Receive descriptor list size. We have one Rx buffer per incoming
143 * packet, so this logic is a little simpler.
144 */
145 #define PCN_NRXDESC 128
146 #define PCN_NRXDESC_MASK (PCN_NRXDESC - 1)
147 #define PCN_NEXTRX(x) (((x) + 1) & PCN_NRXDESC_MASK)
148
149 /*
150 * Control structures are DMA'd to the PCnet chip. We allocate them in
151 * a single clump that maps to a single DMA segment to make several things
152 * easier.
153 */
154 struct pcn_control_data {
155 /* The transmit descriptors. */
156 struct letmd pcd_txdescs[PCN_NTXDESC];
157
158 /* The receive descriptors. */
159 struct lermd pcd_rxdescs[PCN_NRXDESC];
160
161 /* The init block. */
162 struct leinit pcd_initblock;
163 };
164
165 #define PCN_CDOFF(x) offsetof(struct pcn_control_data, x)
166 #define PCN_CDTXOFF(x) PCN_CDOFF(pcd_txdescs[(x)])
167 #define PCN_CDRXOFF(x) PCN_CDOFF(pcd_rxdescs[(x)])
168 #define PCN_CDINITOFF PCN_CDOFF(pcd_initblock)
169
170 /*
171 * Software state for transmit jobs.
172 */
173 struct pcn_txsoft {
174 struct mbuf *txs_mbuf; /* head of our mbuf chain */
175 bus_dmamap_t txs_dmamap; /* our DMA map */
176 int txs_firstdesc; /* first descriptor in packet */
177 int txs_lastdesc; /* last descriptor in packet */
178 };
179
180 /*
181 * Software state for receive jobs.
182 */
183 struct pcn_rxsoft {
184 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
185 bus_dmamap_t rxs_dmamap; /* our DMA map */
186 };
187
188 /*
189 * Description of Rx FIFO watermarks for various revisions.
190 */
191 static const char * const pcn_79c970_rcvfw[] = {
192 "16 bytes",
193 "64 bytes",
194 "128 bytes",
195 NULL,
196 };
197
198 static const char * const pcn_79c971_rcvfw[] = {
199 "16 bytes",
200 "64 bytes",
201 "112 bytes",
202 NULL,
203 };
204
205 /*
206 * Description of Tx start points for various revisions.
207 */
208 static const char * const pcn_79c970_xmtsp[] = {
209 "8 bytes",
210 "64 bytes",
211 "128 bytes",
212 "248 bytes",
213 };
214
215 static const char * const pcn_79c971_xmtsp[] = {
216 "20 bytes",
217 "64 bytes",
218 "128 bytes",
219 "248 bytes",
220 };
221
222 static const char * const pcn_79c971_xmtsp_sram[] = {
223 "44 bytes",
224 "64 bytes",
225 "128 bytes",
226 "store-and-forward",
227 };
228
229 /*
230 * Description of Tx FIFO watermarks for various revisions.
231 */
232 static const char * const pcn_79c970_xmtfw[] = {
233 "16 bytes",
234 "64 bytes",
235 "128 bytes",
236 NULL,
237 };
238
239 static const char * const pcn_79c971_xmtfw[] = {
240 "16 bytes",
241 "64 bytes",
242 "108 bytes",
243 NULL,
244 };
245
246 /*
247 * Software state per device.
248 */
249 struct pcn_softc {
250 struct device sc_dev; /* generic device information */
251 bus_space_tag_t sc_st; /* bus space tag */
252 bus_space_handle_t sc_sh; /* bus space handle */
253 bus_dma_tag_t sc_dmat; /* bus DMA tag */
254 struct ethercom sc_ethercom; /* Ethernet common data */
255 void *sc_sdhook; /* shutdown hook */
256
257 /* Points to our media routines, etc. */
258 const struct pcn_variant *sc_variant;
259
260 void *sc_ih; /* interrupt cookie */
261
262 struct mii_data sc_mii; /* MII/media information */
263
264 struct callout sc_tick_ch; /* tick callout */
265
266 bus_dmamap_t sc_cddmamap; /* control data DMA map */
267 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
268
269 /* Software state for transmit and receive descriptors. */
270 struct pcn_txsoft sc_txsoft[PCN_TXQUEUELEN];
271 struct pcn_rxsoft sc_rxsoft[PCN_NRXDESC];
272
273 /* Control data structures */
274 struct pcn_control_data *sc_control_data;
275 #define sc_txdescs sc_control_data->pcd_txdescs
276 #define sc_rxdescs sc_control_data->pcd_rxdescs
277 #define sc_initblock sc_control_data->pcd_initblock
278
279 #ifdef PCN_EVENT_COUNTERS
280 /* Event counters. */
281 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
282 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
283 struct evcnt sc_ev_txintr; /* Tx interrupts */
284 struct evcnt sc_ev_rxintr; /* Rx interrupts */
285 struct evcnt sc_ev_babl; /* BABL in pcn_intr() */
286 struct evcnt sc_ev_miss; /* MISS in pcn_intr() */
287 struct evcnt sc_ev_merr; /* MERR in pcn_intr() */
288
289 struct evcnt sc_ev_txseg1; /* Tx packets w/ 1 segment */
290 struct evcnt sc_ev_txseg2; /* Tx packets w/ 2 segments */
291 struct evcnt sc_ev_txseg3; /* Tx packets w/ 3 segments */
292 struct evcnt sc_ev_txseg4; /* Tx packets w/ 4 segments */
293 struct evcnt sc_ev_txseg5; /* Tx packets w/ 5 segments */
294 struct evcnt sc_ev_txsegmore; /* Tx packets w/ more than 5 segments */
295 struct evcnt sc_ev_txcopy; /* Tx copies required */
296 #endif /* PCN_EVENT_COUNTERS */
297
298 const char * const *sc_rcvfw_desc; /* Rx FIFO watermark info */
299 int sc_rcvfw;
300
301 const char * const *sc_xmtsp_desc; /* Tx start point info */
302 int sc_xmtsp;
303
304 const char * const *sc_xmtfw_desc; /* Tx FIFO watermark info */
305 int sc_xmtfw;
306
307 int sc_flags; /* misc. flags; see below */
308 int sc_swstyle; /* the software style in use */
309
310 int sc_txfree; /* number of free Tx descriptors */
311 int sc_txnext; /* next ready Tx descriptor */
312
313 int sc_txsfree; /* number of free Tx jobs */
314 int sc_txsnext; /* next free Tx job */
315 int sc_txsdirty; /* dirty Tx jobs */
316
317 int sc_rxptr; /* next ready Rx descriptor/job */
318
319 uint32_t sc_csr5; /* prototype CSR5 register */
320 uint32_t sc_mode; /* prototype MODE register */
321
322 #if NRND > 0
323 rndsource_element_t rnd_source; /* random source */
324 #endif
325 };
326
327 /* sc_flags */
328 #define PCN_F_HAS_MII 0x0001 /* has MII */
329
330 #ifdef PCN_EVENT_COUNTERS
331 #define PCN_EVCNT_INCR(ev) (ev)->ev_count++
332 #else
333 #define PCN_EVCNT_INCR(ev) /* nothing */
334 #endif
335
336 #define PCN_CDTXADDR(sc, x) ((sc)->sc_cddma + PCN_CDTXOFF((x)))
337 #define PCN_CDRXADDR(sc, x) ((sc)->sc_cddma + PCN_CDRXOFF((x)))
338 #define PCN_CDINITADDR(sc) ((sc)->sc_cddma + PCN_CDINITOFF)
339
340 #define PCN_CDTXSYNC(sc, x, n, ops) \
341 do { \
342 int __x, __n; \
343 \
344 __x = (x); \
345 __n = (n); \
346 \
347 /* If it will wrap around, sync to the end of the ring. */ \
348 if ((__x + __n) > PCN_NTXDESC) { \
349 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
350 PCN_CDTXOFF(__x), sizeof(struct letmd) * \
351 (PCN_NTXDESC - __x), (ops)); \
352 __n -= (PCN_NTXDESC - __x); \
353 __x = 0; \
354 } \
355 \
356 /* Now sync whatever is left. */ \
357 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
358 PCN_CDTXOFF(__x), sizeof(struct letmd) * __n, (ops)); \
359 } while (/*CONSTCOND*/0)
360
361 #define PCN_CDRXSYNC(sc, x, ops) \
362 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
363 PCN_CDRXOFF((x)), sizeof(struct lermd), (ops))
364
365 #define PCN_CDINITSYNC(sc, ops) \
366 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
367 PCN_CDINITOFF, sizeof(struct leinit), (ops))
368
369 #define PCN_INIT_RXDESC(sc, x) \
370 do { \
371 struct pcn_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
372 struct lermd *__rmd = &(sc)->sc_rxdescs[(x)]; \
373 struct mbuf *__m = __rxs->rxs_mbuf; \
374 \
375 /* \
376 * Note: We scoot the packet forward 2 bytes in the buffer \
377 * so that the payload after the Ethernet header is aligned \
378 * to a 4-byte boundary. \
379 */ \
380 __m->m_data = __m->m_ext.ext_buf + 2; \
381 \
382 if ((sc)->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3) { \
383 __rmd->rmd2 = \
384 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
385 __rmd->rmd0 = 0; \
386 } else { \
387 __rmd->rmd2 = 0; \
388 __rmd->rmd0 = \
389 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
390 } \
391 __rmd->rmd1 = htole32(LE_R1_OWN|LE_R1_ONES| \
392 (LE_BCNT(MCLBYTES - 2) & LE_R1_BCNT_MASK)); \
393 PCN_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);\
394 } while(/*CONSTCOND*/0)
395
396 static void pcn_start(struct ifnet *);
397 static void pcn_watchdog(struct ifnet *);
398 static int pcn_ioctl(struct ifnet *, u_long, caddr_t);
399 static int pcn_init(struct ifnet *);
400 static void pcn_stop(struct ifnet *, int);
401
402 static void pcn_shutdown(void *);
403
404 static void pcn_reset(struct pcn_softc *);
405 static void pcn_rxdrain(struct pcn_softc *);
406 static int pcn_add_rxbuf(struct pcn_softc *, int);
407 static void pcn_tick(void *);
408
409 static void pcn_spnd(struct pcn_softc *);
410
411 static void pcn_set_filter(struct pcn_softc *);
412
413 static int pcn_intr(void *);
414 static void pcn_txintr(struct pcn_softc *);
415 static int pcn_rxintr(struct pcn_softc *);
416
417 static int pcn_mii_readreg(struct device *, int, int);
418 static void pcn_mii_writereg(struct device *, int, int, int);
419 static void pcn_mii_statchg(struct device *);
420
421 static void pcn_79c970_mediainit(struct pcn_softc *);
422 static int pcn_79c970_mediachange(struct ifnet *);
423 static void pcn_79c970_mediastatus(struct ifnet *, struct ifmediareq *);
424
425 static void pcn_79c971_mediainit(struct pcn_softc *);
426 static int pcn_79c971_mediachange(struct ifnet *);
427 static void pcn_79c971_mediastatus(struct ifnet *, struct ifmediareq *);
428
429 /*
430 * Description of a PCnet-PCI variant. Used to select media access
431 * method, mostly, and to print a nice description of the chip.
432 */
433 static const struct pcn_variant {
434 const char *pcv_desc;
435 void (*pcv_mediainit)(struct pcn_softc *);
436 uint16_t pcv_chipid;
437 } pcn_variants[] = {
438 { "Am79c970 PCnet-PCI",
439 pcn_79c970_mediainit,
440 PARTID_Am79c970 },
441
442 { "Am79c970A PCnet-PCI II",
443 pcn_79c970_mediainit,
444 PARTID_Am79c970A },
445
446 { "Am79c971 PCnet-FAST",
447 pcn_79c971_mediainit,
448 PARTID_Am79c971 },
449
450 { "Am79c972 PCnet-FAST+",
451 pcn_79c971_mediainit,
452 PARTID_Am79c972 },
453
454 { "Am79c973 PCnet-FAST III",
455 pcn_79c971_mediainit,
456 PARTID_Am79c973 },
457
458 { "Am79c975 PCnet-FAST III",
459 pcn_79c971_mediainit,
460 PARTID_Am79c975 },
461
462 { "Unknown PCnet-PCI variant",
463 pcn_79c971_mediainit,
464 0 },
465 };
466
467 int pcn_copy_small = 0;
468
469 static int pcn_match(struct device *, struct cfdata *, void *);
470 static void pcn_attach(struct device *, struct device *, void *);
471
472 CFATTACH_DECL(pcn, sizeof(struct pcn_softc),
473 pcn_match, pcn_attach, NULL, NULL);
474
475 /*
476 * Routines to read and write the PCnet-PCI CSR/BCR space.
477 */
478
479 static inline uint32_t
480 pcn_csr_read(struct pcn_softc *sc, int reg)
481 {
482
483 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg);
484 return (bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_RDP));
485 }
486
487 static inline void
488 pcn_csr_write(struct pcn_softc *sc, int reg, uint32_t val)
489 {
490
491 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg);
492 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RDP, val);
493 }
494
495 static inline uint32_t
496 pcn_bcr_read(struct pcn_softc *sc, int reg)
497 {
498
499 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg);
500 return (bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_BDP));
501 }
502
503 static inline void
504 pcn_bcr_write(struct pcn_softc *sc, int reg, uint32_t val)
505 {
506
507 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg);
508 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_BDP, val);
509 }
510
511 static const struct pcn_variant *
512 pcn_lookup_variant(uint16_t chipid)
513 {
514 const struct pcn_variant *pcv;
515
516 for (pcv = pcn_variants; pcv->pcv_chipid != 0; pcv++) {
517 if (chipid == pcv->pcv_chipid)
518 return (pcv);
519 }
520
521 /*
522 * This covers unknown chips, which we simply treat like
523 * a generic PCnet-FAST.
524 */
525 return (pcv);
526 }
527
528 static int
529 pcn_match(struct device *parent, struct cfdata *cf, void *aux)
530 {
531 struct pci_attach_args *pa = aux;
532
533 /*
534 * IBM Makes a PCI variant of this card which shows up as a
535 * Trident Microsystems 4DWAVE DX (ethernet network, revision 0x25)
536 * this card is truly a pcn card, so we have a special case match for
537 * it
538 */
539
540 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_TRIDENT &&
541 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_TRIDENT_4DWAVE_DX &&
542 PCI_CLASS(pa->pa_class) == PCI_CLASS_NETWORK)
543 return(1);
544
545 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_AMD)
546 return (0);
547
548 switch (PCI_PRODUCT(pa->pa_id)) {
549 case PCI_PRODUCT_AMD_PCNET_PCI:
550 /* Beat if_le_pci.c */
551 return (10);
552 }
553
554 return (0);
555 }
556
557 static void
558 pcn_attach(struct device *parent, struct device *self, void *aux)
559 {
560 struct pcn_softc *sc = (struct pcn_softc *) self;
561 struct pci_attach_args *pa = aux;
562 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
563 pci_chipset_tag_t pc = pa->pa_pc;
564 pci_intr_handle_t ih;
565 const char *intrstr = NULL;
566 bus_space_tag_t iot, memt;
567 bus_space_handle_t ioh, memh;
568 bus_dma_segment_t seg;
569 int ioh_valid, memh_valid;
570 int i, rseg, error;
571 uint32_t chipid, reg;
572 uint8_t enaddr[ETHER_ADDR_LEN];
573
574 callout_init(&sc->sc_tick_ch);
575
576 printf(": AMD PCnet-PCI Ethernet\n");
577
578 /*
579 * Map the device.
580 */
581 ioh_valid = (pci_mapreg_map(pa, PCN_PCI_CBIO, PCI_MAPREG_TYPE_IO, 0,
582 &iot, &ioh, NULL, NULL) == 0);
583 memh_valid = (pci_mapreg_map(pa, PCN_PCI_CBMEM,
584 PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
585 &memt, &memh, NULL, NULL) == 0);
586
587 if (memh_valid) {
588 sc->sc_st = memt;
589 sc->sc_sh = memh;
590 } else if (ioh_valid) {
591 sc->sc_st = iot;
592 sc->sc_sh = ioh;
593 } else {
594 printf("%s: unable to map device registers\n",
595 sc->sc_dev.dv_xname);
596 return;
597 }
598
599 sc->sc_dmat = pa->pa_dmat;
600
601 /* Make sure bus mastering is enabled. */
602 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
603 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
604 PCI_COMMAND_MASTER_ENABLE);
605
606 /* power up chip */
607 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, sc,
608 NULL)) && error != EOPNOTSUPP) {
609 aprint_error("%s: cannot activate %d\n", sc->sc_dev.dv_xname,
610 error);
611 return;
612 }
613
614 /*
615 * Reset the chip to a known state. This also puts the
616 * chip into 32-bit mode.
617 */
618 pcn_reset(sc);
619
620 #if !defined(PCN_NO_PROM)
621
622 /*
623 * Read the Ethernet address from the EEPROM.
624 */
625 for (i = 0; i < ETHER_ADDR_LEN; i++)
626 enaddr[i] = bus_space_read_1(sc->sc_st, sc->sc_sh,
627 PCN32_APROM + i);
628 #else
629 /*
630 * The PROM is not used; instead we assume that the MAC address
631 * has been programmed into the device's physical address
632 * registers by the boot firmware
633 */
634
635 for (i=0; i < 3; i++) {
636 uint32_t val;
637 val = pcn_csr_read(sc, LE_CSR12 + i);
638 enaddr[2*i] = val & 0x0ff;
639 enaddr[2*i+1] = (val >> 8) & 0x0ff;
640 }
641 #endif
642
643 /*
644 * Now that the device is mapped, attempt to figure out what
645 * kind of chip we have. Note that IDL has all 32 bits of
646 * the chip ID when we're in 32-bit mode.
647 */
648 chipid = pcn_csr_read(sc, LE_CSR88);
649 sc->sc_variant = pcn_lookup_variant(CHIPID_PARTID(chipid));
650
651 printf("%s: %s rev %d, Ethernet address %s\n",
652 sc->sc_dev.dv_xname, sc->sc_variant->pcv_desc, CHIPID_VER(chipid),
653 ether_sprintf(enaddr));
654
655 /*
656 * Map and establish our interrupt.
657 */
658 if (pci_intr_map(pa, &ih)) {
659 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
660 return;
661 }
662 intrstr = pci_intr_string(pc, ih);
663 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, pcn_intr, sc);
664 if (sc->sc_ih == NULL) {
665 printf("%s: unable to establish interrupt",
666 sc->sc_dev.dv_xname);
667 if (intrstr != NULL)
668 printf(" at %s", intrstr);
669 printf("\n");
670 return;
671 }
672 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
673
674 /*
675 * Allocate the control data structures, and create and load the
676 * DMA map for it.
677 */
678 if ((error = bus_dmamem_alloc(sc->sc_dmat,
679 sizeof(struct pcn_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
680 0)) != 0) {
681 printf("%s: unable to allocate control data, error = %d\n",
682 sc->sc_dev.dv_xname, error);
683 goto fail_0;
684 }
685
686 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
687 sizeof(struct pcn_control_data), (caddr_t *)&sc->sc_control_data,
688 BUS_DMA_COHERENT)) != 0) {
689 printf("%s: unable to map control data, error = %d\n",
690 sc->sc_dev.dv_xname, error);
691 goto fail_1;
692 }
693
694 if ((error = bus_dmamap_create(sc->sc_dmat,
695 sizeof(struct pcn_control_data), 1,
696 sizeof(struct pcn_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
697 printf("%s: unable to create control data DMA map, "
698 "error = %d\n", sc->sc_dev.dv_xname, error);
699 goto fail_2;
700 }
701
702 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
703 sc->sc_control_data, sizeof(struct pcn_control_data), NULL,
704 0)) != 0) {
705 printf("%s: unable to load control data DMA map, error = %d\n",
706 sc->sc_dev.dv_xname, error);
707 goto fail_3;
708 }
709
710 /* Create the transmit buffer DMA maps. */
711 for (i = 0; i < PCN_TXQUEUELEN; i++) {
712 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
713 PCN_NTXSEGS, MCLBYTES, 0, 0,
714 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
715 printf("%s: unable to create tx DMA map %d, "
716 "error = %d\n", sc->sc_dev.dv_xname, i, error);
717 goto fail_4;
718 }
719 }
720
721 /* Create the receive buffer DMA maps. */
722 for (i = 0; i < PCN_NRXDESC; i++) {
723 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
724 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
725 printf("%s: unable to create rx DMA map %d, "
726 "error = %d\n", sc->sc_dev.dv_xname, i, error);
727 goto fail_5;
728 }
729 sc->sc_rxsoft[i].rxs_mbuf = NULL;
730 }
731
732 /* Initialize our media structures. */
733 (*sc->sc_variant->pcv_mediainit)(sc);
734
735 /*
736 * Initialize FIFO watermark info.
737 */
738 switch (sc->sc_variant->pcv_chipid) {
739 case PARTID_Am79c970:
740 case PARTID_Am79c970A:
741 sc->sc_rcvfw_desc = pcn_79c970_rcvfw;
742 sc->sc_xmtsp_desc = pcn_79c970_xmtsp;
743 sc->sc_xmtfw_desc = pcn_79c970_xmtfw;
744 break;
745
746 default:
747 sc->sc_rcvfw_desc = pcn_79c971_rcvfw;
748 /*
749 * Read BCR25 to determine how much SRAM is
750 * on the board. If > 0, then we the chip
751 * uses different Start Point thresholds.
752 *
753 * Note BCR25 and BCR26 are loaded from the
754 * EEPROM on RST, and unaffected by S_RESET,
755 * so we don't really have to worry about
756 * them except for this.
757 */
758 reg = pcn_bcr_read(sc, LE_BCR25) & 0x00ff;
759 if (reg != 0)
760 sc->sc_xmtsp_desc = pcn_79c971_xmtsp_sram;
761 else
762 sc->sc_xmtsp_desc = pcn_79c971_xmtsp;
763 sc->sc_xmtfw_desc = pcn_79c971_xmtfw;
764 break;
765 }
766
767 /*
768 * Set up defaults -- see the tables above for what these
769 * values mean.
770 *
771 * XXX How should we tune RCVFW and XMTFW?
772 */
773 sc->sc_rcvfw = 1; /* minimum for full-duplex */
774 sc->sc_xmtsp = 1;
775 sc->sc_xmtfw = 0;
776
777 ifp = &sc->sc_ethercom.ec_if;
778 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
779 ifp->if_softc = sc;
780 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
781 ifp->if_ioctl = pcn_ioctl;
782 ifp->if_start = pcn_start;
783 ifp->if_watchdog = pcn_watchdog;
784 ifp->if_init = pcn_init;
785 ifp->if_stop = pcn_stop;
786 IFQ_SET_READY(&ifp->if_snd);
787
788 /* Attach the interface. */
789 if_attach(ifp);
790 ether_ifattach(ifp, enaddr);
791 #if NRND > 0
792 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
793 RND_TYPE_NET, 0);
794 #endif
795
796 #ifdef PCN_EVENT_COUNTERS
797 /* Attach event counters. */
798 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
799 NULL, sc->sc_dev.dv_xname, "txsstall");
800 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
801 NULL, sc->sc_dev.dv_xname, "txdstall");
802 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR,
803 NULL, sc->sc_dev.dv_xname, "txintr");
804 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
805 NULL, sc->sc_dev.dv_xname, "rxintr");
806 evcnt_attach_dynamic(&sc->sc_ev_babl, EVCNT_TYPE_MISC,
807 NULL, sc->sc_dev.dv_xname, "babl");
808 evcnt_attach_dynamic(&sc->sc_ev_miss, EVCNT_TYPE_MISC,
809 NULL, sc->sc_dev.dv_xname, "miss");
810 evcnt_attach_dynamic(&sc->sc_ev_merr, EVCNT_TYPE_MISC,
811 NULL, sc->sc_dev.dv_xname, "merr");
812
813 evcnt_attach_dynamic(&sc->sc_ev_txseg1, EVCNT_TYPE_MISC,
814 NULL, sc->sc_dev.dv_xname, "txseg1");
815 evcnt_attach_dynamic(&sc->sc_ev_txseg2, EVCNT_TYPE_MISC,
816 NULL, sc->sc_dev.dv_xname, "txseg2");
817 evcnt_attach_dynamic(&sc->sc_ev_txseg3, EVCNT_TYPE_MISC,
818 NULL, sc->sc_dev.dv_xname, "txseg3");
819 evcnt_attach_dynamic(&sc->sc_ev_txseg4, EVCNT_TYPE_MISC,
820 NULL, sc->sc_dev.dv_xname, "txseg4");
821 evcnt_attach_dynamic(&sc->sc_ev_txseg5, EVCNT_TYPE_MISC,
822 NULL, sc->sc_dev.dv_xname, "txseg5");
823 evcnt_attach_dynamic(&sc->sc_ev_txsegmore, EVCNT_TYPE_MISC,
824 NULL, sc->sc_dev.dv_xname, "txsegmore");
825 evcnt_attach_dynamic(&sc->sc_ev_txcopy, EVCNT_TYPE_MISC,
826 NULL, sc->sc_dev.dv_xname, "txcopy");
827 #endif /* PCN_EVENT_COUNTERS */
828
829 /* Make sure the interface is shutdown during reboot. */
830 sc->sc_sdhook = shutdownhook_establish(pcn_shutdown, sc);
831 if (sc->sc_sdhook == NULL)
832 printf("%s: WARNING: unable to establish shutdown hook\n",
833 sc->sc_dev.dv_xname);
834 return;
835
836 /*
837 * Free any resources we've allocated during the failed attach
838 * attempt. Do this in reverse order and fall through.
839 */
840 fail_5:
841 for (i = 0; i < PCN_NRXDESC; i++) {
842 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
843 bus_dmamap_destroy(sc->sc_dmat,
844 sc->sc_rxsoft[i].rxs_dmamap);
845 }
846 fail_4:
847 for (i = 0; i < PCN_TXQUEUELEN; i++) {
848 if (sc->sc_txsoft[i].txs_dmamap != NULL)
849 bus_dmamap_destroy(sc->sc_dmat,
850 sc->sc_txsoft[i].txs_dmamap);
851 }
852 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
853 fail_3:
854 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
855 fail_2:
856 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
857 sizeof(struct pcn_control_data));
858 fail_1:
859 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
860 fail_0:
861 return;
862 }
863
864 /*
865 * pcn_shutdown:
866 *
867 * Make sure the interface is stopped at reboot time.
868 */
869 static void
870 pcn_shutdown(void *arg)
871 {
872 struct pcn_softc *sc = arg;
873
874 pcn_stop(&sc->sc_ethercom.ec_if, 1);
875 /* explicitly reset the chip for some onboard one with lazy firmware */
876 pcn_reset(sc);
877 }
878
879 /*
880 * pcn_start: [ifnet interface function]
881 *
882 * Start packet transmission on the interface.
883 */
884 static void
885 pcn_start(struct ifnet *ifp)
886 {
887 struct pcn_softc *sc = ifp->if_softc;
888 struct mbuf *m0, *m;
889 struct pcn_txsoft *txs;
890 bus_dmamap_t dmamap;
891 int error, nexttx, lasttx = -1, ofree, seg;
892
893 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
894 return;
895
896 /*
897 * Remember the previous number of free descriptors and
898 * the first descriptor we'll use.
899 */
900 ofree = sc->sc_txfree;
901
902 /*
903 * Loop through the send queue, setting up transmit descriptors
904 * until we drain the queue, or use up all available transmit
905 * descriptors.
906 */
907 for (;;) {
908 /* Grab a packet off the queue. */
909 IFQ_POLL(&ifp->if_snd, m0);
910 if (m0 == NULL)
911 break;
912 m = NULL;
913
914 /* Get a work queue entry. */
915 if (sc->sc_txsfree == 0) {
916 PCN_EVCNT_INCR(&sc->sc_ev_txsstall);
917 break;
918 }
919
920 txs = &sc->sc_txsoft[sc->sc_txsnext];
921 dmamap = txs->txs_dmamap;
922
923 /*
924 * Load the DMA map. If this fails, the packet either
925 * didn't fit in the alloted number of segments, or we
926 * were short on resources. In this case, we'll copy
927 * and try again.
928 */
929 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
930 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
931 PCN_EVCNT_INCR(&sc->sc_ev_txcopy);
932 MGETHDR(m, M_DONTWAIT, MT_DATA);
933 if (m == NULL) {
934 printf("%s: unable to allocate Tx mbuf\n",
935 sc->sc_dev.dv_xname);
936 break;
937 }
938 if (m0->m_pkthdr.len > MHLEN) {
939 MCLGET(m, M_DONTWAIT);
940 if ((m->m_flags & M_EXT) == 0) {
941 printf("%s: unable to allocate Tx "
942 "cluster\n", sc->sc_dev.dv_xname);
943 m_freem(m);
944 break;
945 }
946 }
947 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
948 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
949 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
950 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
951 if (error) {
952 printf("%s: unable to load Tx buffer, "
953 "error = %d\n", sc->sc_dev.dv_xname, error);
954 break;
955 }
956 }
957
958 /*
959 * Ensure we have enough descriptors free to describe
960 * the packet. Note, we always reserve one descriptor
961 * at the end of the ring as a termination point, to
962 * prevent wrap-around.
963 */
964 if (dmamap->dm_nsegs > (sc->sc_txfree - 1)) {
965 /*
966 * Not enough free descriptors to transmit this
967 * packet. We haven't committed anything yet,
968 * so just unload the DMA map, put the packet
969 * back on the queue, and punt. Notify the upper
970 * layer that there are not more slots left.
971 *
972 * XXX We could allocate an mbuf and copy, but
973 * XXX is it worth it?
974 */
975 ifp->if_flags |= IFF_OACTIVE;
976 bus_dmamap_unload(sc->sc_dmat, dmamap);
977 if (m != NULL)
978 m_freem(m);
979 PCN_EVCNT_INCR(&sc->sc_ev_txdstall);
980 break;
981 }
982
983 IFQ_DEQUEUE(&ifp->if_snd, m0);
984 if (m != NULL) {
985 m_freem(m0);
986 m0 = m;
987 }
988
989 /*
990 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
991 */
992
993 /* Sync the DMA map. */
994 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
995 BUS_DMASYNC_PREWRITE);
996
997 #ifdef PCN_EVENT_COUNTERS
998 switch (dmamap->dm_nsegs) {
999 case 1:
1000 PCN_EVCNT_INCR(&sc->sc_ev_txseg1);
1001 break;
1002 case 2:
1003 PCN_EVCNT_INCR(&sc->sc_ev_txseg2);
1004 break;
1005 case 3:
1006 PCN_EVCNT_INCR(&sc->sc_ev_txseg3);
1007 break;
1008 case 4:
1009 PCN_EVCNT_INCR(&sc->sc_ev_txseg4);
1010 break;
1011 case 5:
1012 PCN_EVCNT_INCR(&sc->sc_ev_txseg5);
1013 break;
1014 default:
1015 PCN_EVCNT_INCR(&sc->sc_ev_txsegmore);
1016 break;
1017 }
1018 #endif /* PCN_EVENT_COUNTERS */
1019
1020 /*
1021 * Initialize the transmit descriptors.
1022 */
1023 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3) {
1024 for (nexttx = sc->sc_txnext, seg = 0;
1025 seg < dmamap->dm_nsegs;
1026 seg++, nexttx = PCN_NEXTTX(nexttx)) {
1027 /*
1028 * If this is the first descriptor we're
1029 * enqueueing, don't set the OWN bit just
1030 * yet. That could cause a race condition.
1031 * We'll do it below.
1032 */
1033 sc->sc_txdescs[nexttx].tmd0 = 0;
1034 sc->sc_txdescs[nexttx].tmd2 =
1035 htole32(dmamap->dm_segs[seg].ds_addr);
1036 sc->sc_txdescs[nexttx].tmd1 =
1037 htole32(LE_T1_ONES |
1038 (nexttx == sc->sc_txnext ? 0 : LE_T1_OWN) |
1039 (LE_BCNT(dmamap->dm_segs[seg].ds_len) &
1040 LE_T1_BCNT_MASK));
1041 lasttx = nexttx;
1042 }
1043 } else {
1044 for (nexttx = sc->sc_txnext, seg = 0;
1045 seg < dmamap->dm_nsegs;
1046 seg++, nexttx = PCN_NEXTTX(nexttx)) {
1047 /*
1048 * If this is the first descriptor we're
1049 * enqueueing, don't set the OWN bit just
1050 * yet. That could cause a race condition.
1051 * We'll do it below.
1052 */
1053 sc->sc_txdescs[nexttx].tmd0 =
1054 htole32(dmamap->dm_segs[seg].ds_addr);
1055 sc->sc_txdescs[nexttx].tmd2 = 0;
1056 sc->sc_txdescs[nexttx].tmd1 =
1057 htole32(LE_T1_ONES |
1058 (nexttx == sc->sc_txnext ? 0 : LE_T1_OWN) |
1059 (LE_BCNT(dmamap->dm_segs[seg].ds_len) &
1060 LE_T1_BCNT_MASK));
1061 lasttx = nexttx;
1062 }
1063 }
1064
1065 KASSERT(lasttx != -1);
1066 /* Interrupt on the packet, if appropriate. */
1067 if ((sc->sc_txsnext & PCN_TXINTR_MASK) == 0)
1068 sc->sc_txdescs[lasttx].tmd1 |= htole32(LE_T1_LTINT);
1069
1070 /* Set `start of packet' and `end of packet' appropriately. */
1071 sc->sc_txdescs[lasttx].tmd1 |= htole32(LE_T1_ENP);
1072 sc->sc_txdescs[sc->sc_txnext].tmd1 |=
1073 htole32(LE_T1_OWN|LE_T1_STP);
1074
1075 /* Sync the descriptors we're using. */
1076 PCN_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1077 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1078
1079 /* Kick the transmitter. */
1080 pcn_csr_write(sc, LE_CSR0, LE_C0_INEA|LE_C0_TDMD);
1081
1082 /*
1083 * Store a pointer to the packet so we can free it later,
1084 * and remember what txdirty will be once the packet is
1085 * done.
1086 */
1087 txs->txs_mbuf = m0;
1088 txs->txs_firstdesc = sc->sc_txnext;
1089 txs->txs_lastdesc = lasttx;
1090
1091 /* Advance the tx pointer. */
1092 sc->sc_txfree -= dmamap->dm_nsegs;
1093 sc->sc_txnext = nexttx;
1094
1095 sc->sc_txsfree--;
1096 sc->sc_txsnext = PCN_NEXTTXS(sc->sc_txsnext);
1097
1098 #if NBPFILTER > 0
1099 /* Pass the packet to any BPF listeners. */
1100 if (ifp->if_bpf)
1101 bpf_mtap(ifp->if_bpf, m0);
1102 #endif /* NBPFILTER > 0 */
1103 }
1104
1105 if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) {
1106 /* No more slots left; notify upper layer. */
1107 ifp->if_flags |= IFF_OACTIVE;
1108 }
1109
1110 if (sc->sc_txfree != ofree) {
1111 /* Set a watchdog timer in case the chip flakes out. */
1112 ifp->if_timer = 5;
1113 }
1114 }
1115
1116 /*
1117 * pcn_watchdog: [ifnet interface function]
1118 *
1119 * Watchdog timer handler.
1120 */
1121 static void
1122 pcn_watchdog(struct ifnet *ifp)
1123 {
1124 struct pcn_softc *sc = ifp->if_softc;
1125
1126 /*
1127 * Since we're not interrupting every packet, sweep
1128 * up before we report an error.
1129 */
1130 pcn_txintr(sc);
1131
1132 if (sc->sc_txfree != PCN_NTXDESC) {
1133 printf("%s: device timeout (txfree %d txsfree %d)\n",
1134 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree);
1135 ifp->if_oerrors++;
1136
1137 /* Reset the interface. */
1138 (void) pcn_init(ifp);
1139 }
1140
1141 /* Try to get more packets going. */
1142 pcn_start(ifp);
1143 }
1144
1145 /*
1146 * pcn_ioctl: [ifnet interface function]
1147 *
1148 * Handle control requests from the operator.
1149 */
1150 static int
1151 pcn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1152 {
1153 struct pcn_softc *sc = ifp->if_softc;
1154 struct ifreq *ifr = (struct ifreq *) data;
1155 int s, error;
1156
1157 s = splnet();
1158
1159 switch (cmd) {
1160 case SIOCSIFMEDIA:
1161 case SIOCGIFMEDIA:
1162 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1163 break;
1164
1165 default:
1166 error = ether_ioctl(ifp, cmd, data);
1167 if (error == ENETRESET) {
1168 /*
1169 * Multicast list has changed; set the hardware filter
1170 * accordingly.
1171 */
1172 if (ifp->if_flags & IFF_RUNNING)
1173 error = pcn_init(ifp);
1174 else
1175 error = 0;
1176 }
1177 break;
1178 }
1179
1180 /* Try to get more packets going. */
1181 pcn_start(ifp);
1182
1183 splx(s);
1184 return (error);
1185 }
1186
1187 /*
1188 * pcn_intr:
1189 *
1190 * Interrupt service routine.
1191 */
1192 static int
1193 pcn_intr(void *arg)
1194 {
1195 struct pcn_softc *sc = arg;
1196 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1197 uint32_t csr0;
1198 int wantinit, handled = 0;
1199
1200 for (wantinit = 0; wantinit == 0;) {
1201 csr0 = pcn_csr_read(sc, LE_CSR0);
1202 if ((csr0 & LE_C0_INTR) == 0)
1203 break;
1204
1205 #if NRND > 0
1206 if (RND_ENABLED(&sc->rnd_source))
1207 rnd_add_uint32(&sc->rnd_source, csr0);
1208 #endif
1209
1210 /* ACK the bits and re-enable interrupts. */
1211 pcn_csr_write(sc, LE_CSR0, csr0 &
1212 (LE_C0_INEA|LE_C0_BABL|LE_C0_MISS|LE_C0_MERR|LE_C0_RINT|
1213 LE_C0_TINT|LE_C0_IDON));
1214
1215 handled = 1;
1216
1217 if (csr0 & LE_C0_RINT) {
1218 PCN_EVCNT_INCR(&sc->sc_ev_rxintr);
1219 wantinit = pcn_rxintr(sc);
1220 }
1221
1222 if (csr0 & LE_C0_TINT) {
1223 PCN_EVCNT_INCR(&sc->sc_ev_txintr);
1224 pcn_txintr(sc);
1225 }
1226
1227 if (csr0 & LE_C0_ERR) {
1228 if (csr0 & LE_C0_BABL) {
1229 PCN_EVCNT_INCR(&sc->sc_ev_babl);
1230 ifp->if_oerrors++;
1231 }
1232 if (csr0 & LE_C0_MISS) {
1233 PCN_EVCNT_INCR(&sc->sc_ev_miss);
1234 ifp->if_ierrors++;
1235 }
1236 if (csr0 & LE_C0_MERR) {
1237 PCN_EVCNT_INCR(&sc->sc_ev_merr);
1238 printf("%s: memory error\n",
1239 sc->sc_dev.dv_xname);
1240 wantinit = 1;
1241 break;
1242 }
1243 }
1244
1245 if ((csr0 & LE_C0_RXON) == 0) {
1246 printf("%s: receiver disabled\n",
1247 sc->sc_dev.dv_xname);
1248 ifp->if_ierrors++;
1249 wantinit = 1;
1250 }
1251
1252 if ((csr0 & LE_C0_TXON) == 0) {
1253 printf("%s: transmitter disabled\n",
1254 sc->sc_dev.dv_xname);
1255 ifp->if_oerrors++;
1256 wantinit = 1;
1257 }
1258 }
1259
1260 if (handled) {
1261 if (wantinit)
1262 pcn_init(ifp);
1263
1264 /* Try to get more packets going. */
1265 pcn_start(ifp);
1266 }
1267
1268 return (handled);
1269 }
1270
1271 /*
1272 * pcn_spnd:
1273 *
1274 * Suspend the chip.
1275 */
1276 static void
1277 pcn_spnd(struct pcn_softc *sc)
1278 {
1279 int i;
1280
1281 pcn_csr_write(sc, LE_CSR5, sc->sc_csr5 | LE_C5_SPND);
1282
1283 for (i = 0; i < 10000; i++) {
1284 if (pcn_csr_read(sc, LE_CSR5) & LE_C5_SPND)
1285 return;
1286 delay(5);
1287 }
1288
1289 printf("%s: WARNING: chip failed to enter suspended state\n",
1290 sc->sc_dev.dv_xname);
1291 }
1292
1293 /*
1294 * pcn_txintr:
1295 *
1296 * Helper; handle transmit interrupts.
1297 */
1298 static void
1299 pcn_txintr(struct pcn_softc *sc)
1300 {
1301 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1302 struct pcn_txsoft *txs;
1303 uint32_t tmd1, tmd2, tmd;
1304 int i, j;
1305
1306 ifp->if_flags &= ~IFF_OACTIVE;
1307
1308 /*
1309 * Go through our Tx list and free mbufs for those
1310 * frames which have been transmitted.
1311 */
1312 for (i = sc->sc_txsdirty; sc->sc_txsfree != PCN_TXQUEUELEN;
1313 i = PCN_NEXTTXS(i), sc->sc_txsfree++) {
1314 txs = &sc->sc_txsoft[i];
1315
1316 PCN_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1317 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1318
1319 tmd1 = le32toh(sc->sc_txdescs[txs->txs_lastdesc].tmd1);
1320 if (tmd1 & LE_T1_OWN)
1321 break;
1322
1323 /*
1324 * Slightly annoying -- we have to loop through the
1325 * descriptors we've used looking for ERR, since it
1326 * can appear on any descriptor in the chain.
1327 */
1328 for (j = txs->txs_firstdesc;; j = PCN_NEXTTX(j)) {
1329 tmd = le32toh(sc->sc_txdescs[j].tmd1);
1330 if (tmd & LE_T1_ERR) {
1331 ifp->if_oerrors++;
1332 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3)
1333 tmd2 = le32toh(sc->sc_txdescs[j].tmd0);
1334 else
1335 tmd2 = le32toh(sc->sc_txdescs[j].tmd2);
1336 if (tmd2 & LE_T2_UFLO) {
1337 if (sc->sc_xmtsp < LE_C80_XMTSP_MAX) {
1338 sc->sc_xmtsp++;
1339 printf("%s: transmit "
1340 "underrun; new threshold: "
1341 "%s\n",
1342 sc->sc_dev.dv_xname,
1343 sc->sc_xmtsp_desc[
1344 sc->sc_xmtsp]);
1345 pcn_spnd(sc);
1346 pcn_csr_write(sc, LE_CSR80,
1347 LE_C80_RCVFW(sc->sc_rcvfw) |
1348 LE_C80_XMTSP(sc->sc_xmtsp) |
1349 LE_C80_XMTFW(sc->sc_xmtfw));
1350 pcn_csr_write(sc, LE_CSR5,
1351 sc->sc_csr5);
1352 } else {
1353 printf("%s: transmit "
1354 "underrun\n",
1355 sc->sc_dev.dv_xname);
1356 }
1357 } else if (tmd2 & LE_T2_BUFF) {
1358 printf("%s: transmit buffer error\n",
1359 sc->sc_dev.dv_xname);
1360 }
1361 if (tmd2 & LE_T2_LCOL)
1362 ifp->if_collisions++;
1363 if (tmd2 & LE_T2_RTRY)
1364 ifp->if_collisions += 16;
1365 goto next_packet;
1366 }
1367 if (j == txs->txs_lastdesc)
1368 break;
1369 }
1370 if (tmd1 & LE_T1_ONE)
1371 ifp->if_collisions++;
1372 else if (tmd & LE_T1_MORE) {
1373 /* Real number is unknown. */
1374 ifp->if_collisions += 2;
1375 }
1376 ifp->if_opackets++;
1377 next_packet:
1378 sc->sc_txfree += txs->txs_dmamap->dm_nsegs;
1379 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1380 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1381 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1382 m_freem(txs->txs_mbuf);
1383 txs->txs_mbuf = NULL;
1384 }
1385
1386 /* Update the dirty transmit buffer pointer. */
1387 sc->sc_txsdirty = i;
1388
1389 /*
1390 * If there are no more pending transmissions, cancel the watchdog
1391 * timer.
1392 */
1393 if (sc->sc_txsfree == PCN_TXQUEUELEN)
1394 ifp->if_timer = 0;
1395 }
1396
1397 /*
1398 * pcn_rxintr:
1399 *
1400 * Helper; handle receive interrupts.
1401 */
1402 static int
1403 pcn_rxintr(struct pcn_softc *sc)
1404 {
1405 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1406 struct pcn_rxsoft *rxs;
1407 struct mbuf *m;
1408 uint32_t rmd1;
1409 int i, len;
1410
1411 for (i = sc->sc_rxptr;; i = PCN_NEXTRX(i)) {
1412 rxs = &sc->sc_rxsoft[i];
1413
1414 PCN_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1415
1416 rmd1 = le32toh(sc->sc_rxdescs[i].rmd1);
1417
1418 if (rmd1 & LE_R1_OWN)
1419 break;
1420
1421 /*
1422 * Check for errors and make sure the packet fit into
1423 * a single buffer. We have structured this block of
1424 * code the way it is in order to compress it into
1425 * one test in the common case (no error).
1426 */
1427 if (__predict_false((rmd1 & (LE_R1_STP|LE_R1_ENP|LE_R1_ERR)) !=
1428 (LE_R1_STP|LE_R1_ENP))) {
1429 /* Make sure the packet is in a single buffer. */
1430 if ((rmd1 & (LE_R1_STP|LE_R1_ENP)) !=
1431 (LE_R1_STP|LE_R1_ENP)) {
1432 printf("%s: packet spilled into next buffer\n",
1433 sc->sc_dev.dv_xname);
1434 return (1); /* pcn_intr() will re-init */
1435 }
1436
1437 /*
1438 * If the packet had an error, simple recycle the
1439 * buffer.
1440 */
1441 if (rmd1 & LE_R1_ERR) {
1442 ifp->if_ierrors++;
1443 /*
1444 * If we got an overflow error, chances
1445 * are there will be a CRC error. In
1446 * this case, just print the overflow
1447 * error, and skip the others.
1448 */
1449 if (rmd1 & LE_R1_OFLO)
1450 printf("%s: overflow error\n",
1451 sc->sc_dev.dv_xname);
1452 else {
1453 #define PRINTIT(x, str) \
1454 if (rmd1 & (x)) \
1455 printf("%s: %s\n", \
1456 sc->sc_dev.dv_xname, str);
1457 PRINTIT(LE_R1_FRAM, "framing error");
1458 PRINTIT(LE_R1_CRC, "CRC error");
1459 PRINTIT(LE_R1_BUFF, "buffer error");
1460 }
1461 #undef PRINTIT
1462 PCN_INIT_RXDESC(sc, i);
1463 continue;
1464 }
1465 }
1466
1467 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1468 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1469
1470 /*
1471 * No errors; receive the packet.
1472 */
1473 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3)
1474 len = le32toh(sc->sc_rxdescs[i].rmd0) & LE_R1_BCNT_MASK;
1475 else
1476 len = le32toh(sc->sc_rxdescs[i].rmd2) & LE_R1_BCNT_MASK;
1477
1478 /*
1479 * The LANCE family includes the CRC with every packet;
1480 * trim it off here.
1481 */
1482 len -= ETHER_CRC_LEN;
1483
1484 /*
1485 * If the packet is small enough to fit in a
1486 * single header mbuf, allocate one and copy
1487 * the data into it. This greatly reduces
1488 * memory consumption when we receive lots
1489 * of small packets.
1490 *
1491 * Otherwise, we add a new buffer to the receive
1492 * chain. If this fails, we drop the packet and
1493 * recycle the old buffer.
1494 */
1495 if (pcn_copy_small != 0 && len <= (MHLEN - 2)) {
1496 MGETHDR(m, M_DONTWAIT, MT_DATA);
1497 if (m == NULL)
1498 goto dropit;
1499 m->m_data += 2;
1500 memcpy(mtod(m, caddr_t),
1501 mtod(rxs->rxs_mbuf, caddr_t), len);
1502 PCN_INIT_RXDESC(sc, i);
1503 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1504 rxs->rxs_dmamap->dm_mapsize,
1505 BUS_DMASYNC_PREREAD);
1506 } else {
1507 m = rxs->rxs_mbuf;
1508 if (pcn_add_rxbuf(sc, i) != 0) {
1509 dropit:
1510 ifp->if_ierrors++;
1511 PCN_INIT_RXDESC(sc, i);
1512 bus_dmamap_sync(sc->sc_dmat,
1513 rxs->rxs_dmamap, 0,
1514 rxs->rxs_dmamap->dm_mapsize,
1515 BUS_DMASYNC_PREREAD);
1516 continue;
1517 }
1518 }
1519
1520 m->m_pkthdr.rcvif = ifp;
1521 m->m_pkthdr.len = m->m_len = len;
1522
1523 #if NBPFILTER > 0
1524 /* Pass this up to any BPF listeners. */
1525 if (ifp->if_bpf)
1526 bpf_mtap(ifp->if_bpf, m);
1527 #endif /* NBPFILTER > 0 */
1528
1529 /* Pass it on. */
1530 (*ifp->if_input)(ifp, m);
1531 ifp->if_ipackets++;
1532 }
1533
1534 /* Update the receive pointer. */
1535 sc->sc_rxptr = i;
1536 return (0);
1537 }
1538
1539 /*
1540 * pcn_tick:
1541 *
1542 * One second timer, used to tick the MII.
1543 */
1544 static void
1545 pcn_tick(void *arg)
1546 {
1547 struct pcn_softc *sc = arg;
1548 int s;
1549
1550 s = splnet();
1551 mii_tick(&sc->sc_mii);
1552 splx(s);
1553
1554 callout_reset(&sc->sc_tick_ch, hz, pcn_tick, sc);
1555 }
1556
1557 /*
1558 * pcn_reset:
1559 *
1560 * Perform a soft reset on the PCnet-PCI.
1561 */
1562 static void
1563 pcn_reset(struct pcn_softc *sc)
1564 {
1565
1566 /*
1567 * The PCnet-PCI chip is reset by reading from the
1568 * RESET register. Note that while the NE2100 LANCE
1569 * boards require a write after the read, the PCnet-PCI
1570 * chips do not require this.
1571 *
1572 * Since we don't know if we're in 16-bit or 32-bit
1573 * mode right now, issue both (it's safe) in the
1574 * hopes that one will succeed.
1575 */
1576 (void) bus_space_read_2(sc->sc_st, sc->sc_sh, PCN16_RESET);
1577 (void) bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_RESET);
1578
1579 /* Wait 1ms for it to finish. */
1580 delay(1000);
1581
1582 /*
1583 * Select 32-bit I/O mode by issuing a 32-bit write to the
1584 * RDP. Since the RAP is 0 after a reset, writing a 0
1585 * to RDP is safe (since it simply clears CSR0).
1586 */
1587 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RDP, 0);
1588 }
1589
1590 /*
1591 * pcn_init: [ifnet interface function]
1592 *
1593 * Initialize the interface. Must be called at splnet().
1594 */
1595 static int
1596 pcn_init(struct ifnet *ifp)
1597 {
1598 struct pcn_softc *sc = ifp->if_softc;
1599 struct pcn_rxsoft *rxs;
1600 uint8_t *enaddr = LLADDR(ifp->if_sadl);
1601 int i, error = 0;
1602 uint32_t reg;
1603
1604 /* Cancel any pending I/O. */
1605 pcn_stop(ifp, 0);
1606
1607 /* Reset the chip to a known state. */
1608 pcn_reset(sc);
1609
1610 /*
1611 * On the Am79c970, select SSTYLE 2, and SSTYLE 3 on everything
1612 * else.
1613 *
1614 * XXX It'd be really nice to use SSTYLE 2 on all the chips,
1615 * because the structure layout is compatible with ILACC,
1616 * but the burst mode is only available in SSTYLE 3, and
1617 * burst mode should provide some performance enhancement.
1618 */
1619 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970)
1620 sc->sc_swstyle = LE_B20_SSTYLE_PCNETPCI2;
1621 else
1622 sc->sc_swstyle = LE_B20_SSTYLE_PCNETPCI3;
1623 pcn_bcr_write(sc, LE_BCR20, sc->sc_swstyle);
1624
1625 /* Initialize the transmit descriptor ring. */
1626 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1627 PCN_CDTXSYNC(sc, 0, PCN_NTXDESC,
1628 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1629 sc->sc_txfree = PCN_NTXDESC;
1630 sc->sc_txnext = 0;
1631
1632 /* Initialize the transmit job descriptors. */
1633 for (i = 0; i < PCN_TXQUEUELEN; i++)
1634 sc->sc_txsoft[i].txs_mbuf = NULL;
1635 sc->sc_txsfree = PCN_TXQUEUELEN;
1636 sc->sc_txsnext = 0;
1637 sc->sc_txsdirty = 0;
1638
1639 /*
1640 * Initialize the receive descriptor and receive job
1641 * descriptor rings.
1642 */
1643 for (i = 0; i < PCN_NRXDESC; i++) {
1644 rxs = &sc->sc_rxsoft[i];
1645 if (rxs->rxs_mbuf == NULL) {
1646 if ((error = pcn_add_rxbuf(sc, i)) != 0) {
1647 printf("%s: unable to allocate or map rx "
1648 "buffer %d, error = %d\n",
1649 sc->sc_dev.dv_xname, i, error);
1650 /*
1651 * XXX Should attempt to run with fewer receive
1652 * XXX buffers instead of just failing.
1653 */
1654 pcn_rxdrain(sc);
1655 goto out;
1656 }
1657 } else
1658 PCN_INIT_RXDESC(sc, i);
1659 }
1660 sc->sc_rxptr = 0;
1661
1662 /* Initialize MODE for the initialization block. */
1663 sc->sc_mode = 0;
1664 if (ifp->if_flags & IFF_PROMISC)
1665 sc->sc_mode |= LE_C15_PROM;
1666 if ((ifp->if_flags & IFF_BROADCAST) == 0)
1667 sc->sc_mode |= LE_C15_DRCVBC;
1668
1669 /*
1670 * If we have MII, simply select MII in the MODE register,
1671 * and clear ASEL. Otherwise, let ASEL stand (for now),
1672 * and leave PORTSEL alone (it is ignored with ASEL is set).
1673 */
1674 if (sc->sc_flags & PCN_F_HAS_MII) {
1675 pcn_bcr_write(sc, LE_BCR2,
1676 pcn_bcr_read(sc, LE_BCR2) & ~LE_B2_ASEL);
1677 sc->sc_mode |= LE_C15_PORTSEL(PORTSEL_MII);
1678
1679 /*
1680 * Disable MII auto-negotiation. We handle that in
1681 * our own MII layer.
1682 */
1683 pcn_bcr_write(sc, LE_BCR32,
1684 pcn_bcr_read(sc, LE_BCR32) | LE_B32_DANAS);
1685 }
1686
1687 /*
1688 * Set the Tx and Rx descriptor ring addresses in the init
1689 * block, the TLEN and RLEN other fields of the init block
1690 * MODE register.
1691 */
1692 sc->sc_initblock.init_rdra = htole32(PCN_CDRXADDR(sc, 0));
1693 sc->sc_initblock.init_tdra = htole32(PCN_CDTXADDR(sc, 0));
1694 sc->sc_initblock.init_mode = htole32(sc->sc_mode |
1695 ((ffs(PCN_NTXDESC) - 1) << 28) |
1696 ((ffs(PCN_NRXDESC) - 1) << 20));
1697
1698 /* Set the station address in the init block. */
1699 sc->sc_initblock.init_padr[0] = htole32(enaddr[0] |
1700 (enaddr[1] << 8) | (enaddr[2] << 16) | (enaddr[3] << 24));
1701 sc->sc_initblock.init_padr[1] = htole32(enaddr[4] |
1702 (enaddr[5] << 8));
1703
1704 /* Set the multicast filter in the init block. */
1705 pcn_set_filter(sc);
1706
1707 /* Initialize CSR3. */
1708 pcn_csr_write(sc, LE_CSR3, LE_C3_MISSM|LE_C3_IDONM|LE_C3_DXSUFLO);
1709
1710 /* Initialize CSR4. */
1711 pcn_csr_write(sc, LE_CSR4, LE_C4_DMAPLUS|LE_C4_APAD_XMT|
1712 LE_C4_MFCOM|LE_C4_RCVCCOM|LE_C4_TXSTRTM);
1713
1714 /* Initialize CSR5. */
1715 sc->sc_csr5 = LE_C5_LTINTEN|LE_C5_SINTE;
1716 pcn_csr_write(sc, LE_CSR5, sc->sc_csr5);
1717
1718 /*
1719 * If we have an Am79c971 or greater, initialize CSR7.
1720 *
1721 * XXX Might be nice to use the MII auto-poll interrupt someday.
1722 */
1723 switch (sc->sc_variant->pcv_chipid) {
1724 case PARTID_Am79c970:
1725 case PARTID_Am79c970A:
1726 /* Not available on these chips. */
1727 break;
1728
1729 default:
1730 pcn_csr_write(sc, LE_CSR7, LE_C7_FASTSPNDE);
1731 break;
1732 }
1733
1734 /*
1735 * On the Am79c970A and greater, initialize BCR18 to
1736 * enable burst mode.
1737 *
1738 * Also enable the "no underflow" option on the Am79c971 and
1739 * higher, which prevents the chip from generating transmit
1740 * underflows, yet sill provides decent performance. Note if
1741 * chip is not connected to external SRAM, then we still have
1742 * to handle underflow errors (the NOUFLO bit is ignored in
1743 * that case).
1744 */
1745 reg = pcn_bcr_read(sc, LE_BCR18);
1746 switch (sc->sc_variant->pcv_chipid) {
1747 case PARTID_Am79c970:
1748 break;
1749
1750 case PARTID_Am79c970A:
1751 reg |= LE_B18_BREADE|LE_B18_BWRITE;
1752 break;
1753
1754 default:
1755 reg |= LE_B18_BREADE|LE_B18_BWRITE|LE_B18_NOUFLO;
1756 break;
1757 }
1758 pcn_bcr_write(sc, LE_BCR18, reg);
1759
1760 /*
1761 * Initialize CSR80 (FIFO thresholds for Tx and Rx).
1762 */
1763 pcn_csr_write(sc, LE_CSR80, LE_C80_RCVFW(sc->sc_rcvfw) |
1764 LE_C80_XMTSP(sc->sc_xmtsp) | LE_C80_XMTFW(sc->sc_xmtfw));
1765
1766 /*
1767 * Send the init block to the chip, and wait for it
1768 * to be processed.
1769 */
1770 PCN_CDINITSYNC(sc, BUS_DMASYNC_PREWRITE);
1771 pcn_csr_write(sc, LE_CSR1, PCN_CDINITADDR(sc) & 0xffff);
1772 pcn_csr_write(sc, LE_CSR2, (PCN_CDINITADDR(sc) >> 16) & 0xffff);
1773 pcn_csr_write(sc, LE_CSR0, LE_C0_INIT);
1774 delay(100);
1775 for (i = 0; i < 10000; i++) {
1776 if (pcn_csr_read(sc, LE_CSR0) & LE_C0_IDON)
1777 break;
1778 delay(10);
1779 }
1780 PCN_CDINITSYNC(sc, BUS_DMASYNC_POSTWRITE);
1781 if (i == 10000) {
1782 printf("%s: timeout processing init block\n",
1783 sc->sc_dev.dv_xname);
1784 error = EIO;
1785 goto out;
1786 }
1787
1788 /* Set the media. */
1789 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
1790
1791 /* Enable interrupts and external activity (and ACK IDON). */
1792 pcn_csr_write(sc, LE_CSR0, LE_C0_INEA|LE_C0_STRT|LE_C0_IDON);
1793
1794 if (sc->sc_flags & PCN_F_HAS_MII) {
1795 /* Start the one second MII clock. */
1796 callout_reset(&sc->sc_tick_ch, hz, pcn_tick, sc);
1797 }
1798
1799 /* ...all done! */
1800 ifp->if_flags |= IFF_RUNNING;
1801 ifp->if_flags &= ~IFF_OACTIVE;
1802
1803 out:
1804 if (error)
1805 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1806 return (error);
1807 }
1808
1809 /*
1810 * pcn_rxdrain:
1811 *
1812 * Drain the receive queue.
1813 */
1814 static void
1815 pcn_rxdrain(struct pcn_softc *sc)
1816 {
1817 struct pcn_rxsoft *rxs;
1818 int i;
1819
1820 for (i = 0; i < PCN_NRXDESC; i++) {
1821 rxs = &sc->sc_rxsoft[i];
1822 if (rxs->rxs_mbuf != NULL) {
1823 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1824 m_freem(rxs->rxs_mbuf);
1825 rxs->rxs_mbuf = NULL;
1826 }
1827 }
1828 }
1829
1830 /*
1831 * pcn_stop: [ifnet interface function]
1832 *
1833 * Stop transmission on the interface.
1834 */
1835 static void
1836 pcn_stop(struct ifnet *ifp, int disable)
1837 {
1838 struct pcn_softc *sc = ifp->if_softc;
1839 struct pcn_txsoft *txs;
1840 int i;
1841
1842 if (sc->sc_flags & PCN_F_HAS_MII) {
1843 /* Stop the one second clock. */
1844 callout_stop(&sc->sc_tick_ch);
1845
1846 /* Down the MII. */
1847 mii_down(&sc->sc_mii);
1848 }
1849
1850 /* Stop the chip. */
1851 pcn_csr_write(sc, LE_CSR0, LE_C0_STOP);
1852
1853 /* Release any queued transmit buffers. */
1854 for (i = 0; i < PCN_TXQUEUELEN; i++) {
1855 txs = &sc->sc_txsoft[i];
1856 if (txs->txs_mbuf != NULL) {
1857 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1858 m_freem(txs->txs_mbuf);
1859 txs->txs_mbuf = NULL;
1860 }
1861 }
1862
1863 if (disable)
1864 pcn_rxdrain(sc);
1865
1866 /* Mark the interface as down and cancel the watchdog timer. */
1867 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1868 ifp->if_timer = 0;
1869 }
1870
1871 /*
1872 * pcn_add_rxbuf:
1873 *
1874 * Add a receive buffer to the indicated descriptor.
1875 */
1876 static int
1877 pcn_add_rxbuf(struct pcn_softc *sc, int idx)
1878 {
1879 struct pcn_rxsoft *rxs = &sc->sc_rxsoft[idx];
1880 struct mbuf *m;
1881 int error;
1882
1883 MGETHDR(m, M_DONTWAIT, MT_DATA);
1884 if (m == NULL)
1885 return (ENOBUFS);
1886
1887 MCLGET(m, M_DONTWAIT);
1888 if ((m->m_flags & M_EXT) == 0) {
1889 m_freem(m);
1890 return (ENOBUFS);
1891 }
1892
1893 if (rxs->rxs_mbuf != NULL)
1894 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1895
1896 rxs->rxs_mbuf = m;
1897
1898 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1899 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1900 BUS_DMA_READ|BUS_DMA_NOWAIT);
1901 if (error) {
1902 printf("%s: can't load rx DMA map %d, error = %d\n",
1903 sc->sc_dev.dv_xname, idx, error);
1904 panic("pcn_add_rxbuf");
1905 }
1906
1907 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1908 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1909
1910 PCN_INIT_RXDESC(sc, idx);
1911
1912 return (0);
1913 }
1914
1915 /*
1916 * pcn_set_filter:
1917 *
1918 * Set up the receive filter.
1919 */
1920 static void
1921 pcn_set_filter(struct pcn_softc *sc)
1922 {
1923 struct ethercom *ec = &sc->sc_ethercom;
1924 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1925 struct ether_multi *enm;
1926 struct ether_multistep step;
1927 uint32_t crc;
1928
1929 /*
1930 * Set up the multicast address filter by passing all multicast
1931 * addresses through a CRC generator, and then using the high
1932 * order 6 bits as an index into the 64-bit logical address
1933 * filter. The high order bits select the word, while the rest
1934 * of the bits select the bit within the word.
1935 */
1936
1937 if (ifp->if_flags & IFF_PROMISC)
1938 goto allmulti;
1939
1940 sc->sc_initblock.init_ladrf[0] =
1941 sc->sc_initblock.init_ladrf[1] =
1942 sc->sc_initblock.init_ladrf[2] =
1943 sc->sc_initblock.init_ladrf[3] = 0;
1944
1945 ETHER_FIRST_MULTI(step, ec, enm);
1946 while (enm != NULL) {
1947 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1948 /*
1949 * We must listen to a range of multicast addresses.
1950 * For now, just accept all multicasts, rather than
1951 * trying to set only those filter bits needed to match
1952 * the range. (At this time, the only use of address
1953 * ranges is for IP multicast routing, for which the
1954 * range is big enough to require all bits set.)
1955 */
1956 goto allmulti;
1957 }
1958
1959 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1960
1961 /* Just want the 6 most significant bits. */
1962 crc >>= 26;
1963
1964 /* Set the corresponding bit in the filter. */
1965 sc->sc_initblock.init_ladrf[crc >> 4] |=
1966 htole16(1 << (crc & 0xf));
1967
1968 ETHER_NEXT_MULTI(step, enm);
1969 }
1970
1971 ifp->if_flags &= ~IFF_ALLMULTI;
1972 return;
1973
1974 allmulti:
1975 ifp->if_flags |= IFF_ALLMULTI;
1976 sc->sc_initblock.init_ladrf[0] =
1977 sc->sc_initblock.init_ladrf[1] =
1978 sc->sc_initblock.init_ladrf[2] =
1979 sc->sc_initblock.init_ladrf[3] = 0xffff;
1980 }
1981
1982 /*
1983 * pcn_79c970_mediainit:
1984 *
1985 * Initialize media for the Am79c970.
1986 */
1987 static void
1988 pcn_79c970_mediainit(struct pcn_softc *sc)
1989 {
1990 const char *sep = "";
1991
1992 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, pcn_79c970_mediachange,
1993 pcn_79c970_mediastatus);
1994
1995 #define ADD(str, m, d) \
1996 do { \
1997 printf("%s%s", sep, str); \
1998 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(m), (d), NULL); \
1999 sep = ", "; \
2000 } while (/*CONSTCOND*/0)
2001
2002 printf("%s: ", sc->sc_dev.dv_xname);
2003 ADD("10base5", IFM_10_5, PORTSEL_AUI);
2004 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A)
2005 ADD("10base5-FDX", IFM_10_5|IFM_FDX, PORTSEL_AUI);
2006 ADD("10baseT", IFM_10_T, PORTSEL_10T);
2007 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A)
2008 ADD("10baseT-FDX", IFM_10_T|IFM_FDX, PORTSEL_10T);
2009 ADD("auto", IFM_AUTO, 0);
2010 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A)
2011 ADD("auto-FDX", IFM_AUTO|IFM_FDX, 0);
2012 printf("\n");
2013
2014 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2015 }
2016
2017 /*
2018 * pcn_79c970_mediastatus: [ifmedia interface function]
2019 *
2020 * Get the current interface media status (Am79c970 version).
2021 */
2022 static void
2023 pcn_79c970_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2024 {
2025 struct pcn_softc *sc = ifp->if_softc;
2026
2027 /*
2028 * The currently selected media is always the active media.
2029 * Note: We have no way to determine what media the AUTO
2030 * process picked.
2031 */
2032 ifmr->ifm_active = sc->sc_mii.mii_media.ifm_media;
2033 }
2034
2035 /*
2036 * pcn_79c970_mediachange: [ifmedia interface function]
2037 *
2038 * Set hardware to newly-selected media (Am79c970 version).
2039 */
2040 static int
2041 pcn_79c970_mediachange(struct ifnet *ifp)
2042 {
2043 struct pcn_softc *sc = ifp->if_softc;
2044 uint32_t reg;
2045
2046 if (IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media) == IFM_AUTO) {
2047 /*
2048 * CSR15:PORTSEL doesn't matter. Just set BCR2:ASEL.
2049 */
2050 reg = pcn_bcr_read(sc, LE_BCR2);
2051 reg |= LE_B2_ASEL;
2052 pcn_bcr_write(sc, LE_BCR2, reg);
2053 } else {
2054 /*
2055 * Clear BCR2:ASEL and set the new CSR15:PORTSEL value.
2056 */
2057 reg = pcn_bcr_read(sc, LE_BCR2);
2058 reg &= ~LE_B2_ASEL;
2059 pcn_bcr_write(sc, LE_BCR2, reg);
2060
2061 reg = pcn_csr_read(sc, LE_CSR15);
2062 reg = (reg & ~LE_C15_PORTSEL(PORTSEL_MASK)) |
2063 LE_C15_PORTSEL(sc->sc_mii.mii_media.ifm_cur->ifm_data);
2064 pcn_csr_write(sc, LE_CSR15, reg);
2065 }
2066
2067 if ((sc->sc_mii.mii_media.ifm_media & IFM_FDX) != 0) {
2068 reg = LE_B9_FDEN;
2069 if (IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media) == IFM_10_5)
2070 reg |= LE_B9_AUIFD;
2071 pcn_bcr_write(sc, LE_BCR9, reg);
2072 } else
2073 pcn_bcr_write(sc, LE_BCR9, 0);
2074
2075 return (0);
2076 }
2077
2078 /*
2079 * pcn_79c971_mediainit:
2080 *
2081 * Initialize media for the Am79c971.
2082 */
2083 static void
2084 pcn_79c971_mediainit(struct pcn_softc *sc)
2085 {
2086 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2087
2088 /* We have MII. */
2089 sc->sc_flags |= PCN_F_HAS_MII;
2090
2091 /*
2092 * The built-in 10BASE-T interface is mapped to the MII
2093 * on the PCNet-FAST. Unfortunately, there's no EEPROM
2094 * word that tells us which PHY to use.
2095 * This driver used to ignore all but the first PHY to
2096 * answer, but this code was removed to support multiple
2097 * external PHYs. As the default instance will be the first
2098 * one to answer, no harm is done by letting the possibly
2099 * non-connected internal PHY show up.
2100 */
2101
2102 /* Initialize our media structures and probe the MII. */
2103 sc->sc_mii.mii_ifp = ifp;
2104 sc->sc_mii.mii_readreg = pcn_mii_readreg;
2105 sc->sc_mii.mii_writereg = pcn_mii_writereg;
2106 sc->sc_mii.mii_statchg = pcn_mii_statchg;
2107 ifmedia_init(&sc->sc_mii.mii_media, 0, pcn_79c971_mediachange,
2108 pcn_79c971_mediastatus);
2109
2110 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2111 MII_OFFSET_ANY, 0);
2112 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2113 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2114 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2115 } else
2116 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2117 }
2118
2119 /*
2120 * pcn_79c971_mediastatus: [ifmedia interface function]
2121 *
2122 * Get the current interface media status (Am79c971 version).
2123 */
2124 static void
2125 pcn_79c971_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2126 {
2127 struct pcn_softc *sc = ifp->if_softc;
2128
2129 mii_pollstat(&sc->sc_mii);
2130 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2131 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2132 }
2133
2134 /*
2135 * pcn_79c971_mediachange: [ifmedia interface function]
2136 *
2137 * Set hardware to newly-selected media (Am79c971 version).
2138 */
2139 static int
2140 pcn_79c971_mediachange(struct ifnet *ifp)
2141 {
2142 struct pcn_softc *sc = ifp->if_softc;
2143
2144 if (ifp->if_flags & IFF_UP)
2145 mii_mediachg(&sc->sc_mii);
2146 return (0);
2147 }
2148
2149 /*
2150 * pcn_mii_readreg: [mii interface function]
2151 *
2152 * Read a PHY register on the MII.
2153 */
2154 static int
2155 pcn_mii_readreg(struct device *self, int phy, int reg)
2156 {
2157 struct pcn_softc *sc = (void *) self;
2158 uint32_t rv;
2159
2160 pcn_bcr_write(sc, LE_BCR33, reg | (phy << PHYAD_SHIFT));
2161 rv = pcn_bcr_read(sc, LE_BCR34) & LE_B34_MIIMD;
2162 if (rv == 0xffff)
2163 return (0);
2164
2165 return (rv);
2166 }
2167
2168 /*
2169 * pcn_mii_writereg: [mii interface function]
2170 *
2171 * Write a PHY register on the MII.
2172 */
2173 static void
2174 pcn_mii_writereg(struct device *self, int phy, int reg, int val)
2175 {
2176 struct pcn_softc *sc = (void *) self;
2177
2178 pcn_bcr_write(sc, LE_BCR33, reg | (phy << PHYAD_SHIFT));
2179 pcn_bcr_write(sc, LE_BCR34, val);
2180 }
2181
2182 /*
2183 * pcn_mii_statchg: [mii interface function]
2184 *
2185 * Callback from MII layer when media changes.
2186 */
2187 static void
2188 pcn_mii_statchg(struct device *self)
2189 {
2190 struct pcn_softc *sc = (void *) self;
2191
2192 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
2193 pcn_bcr_write(sc, LE_BCR9, LE_B9_FDEN);
2194 else
2195 pcn_bcr_write(sc, LE_BCR9, 0);
2196 }
2197