if_pcn.c revision 1.1 1 /* $NetBSD: if_pcn.c,v 1.1 2001/08/27 19:42:18 thorpej Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the AMD PCnet-PCI series of Ethernet
40 * chips:
41 *
42 * * Am79c970 PCnet-PCI Single-Chip Ethernet Controller for PCI
43 * Local Bus
44 *
45 * * Am79c970A PCnet-PCI II Single-Chip Full-Duplex Ethernet Controller
46 * for PCI Local Bus
47 *
48 * * Am79c971 PCnet-FAST Single-Chip Full-Duplex 10/100Mbps
49 * Ethernet Controller for PCI Local Bus
50 *
51 * * Am79c972 PCnet-FAST+ Enhanced 10/100Mbps PCI Ethernet Controller
52 * with OnNow Support
53 *
54 * * Am79c973/Am79c975 PCnet-FAST III Single-Chip 10/100Mbps PCI
55 * Ethernet Controller with Integrated PHY
56 *
57 * This also supports the virtual PCnet-PCI Ethernet interface found
58 * in VMware.
59 *
60 * TODO:
61 *
62 * * Split this into bus-specific and bus-independent portions.
63 * The core could also be used for the ILACC (Am79900) 32-bit
64 * Ethernet chip (XXX only if we use an ILACC-compatible SWSTYLE).
65 */
66
67 #include "bpfilter.h"
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/callout.h>
72 #include <sys/mbuf.h>
73 #include <sys/malloc.h>
74 #include <sys/kernel.h>
75 #include <sys/socket.h>
76 #include <sys/ioctl.h>
77 #include <sys/errno.h>
78 #include <sys/device.h>
79 #include <sys/queue.h>
80
81 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
82
83 #include <net/if.h>
84 #include <net/if_dl.h>
85 #include <net/if_media.h>
86 #include <net/if_ether.h>
87
88 #if NBPFILTER > 0
89 #include <net/bpf.h>
90 #endif
91
92 #include <machine/bus.h>
93 #include <machine/intr.h>
94 #include <machine/endian.h>
95
96 #include <dev/mii/mii.h>
97 #include <dev/mii/miivar.h>
98
99 #include <dev/ic/am79900reg.h>
100 #include <dev/ic/lancereg.h>
101
102 #include <dev/pci/pcireg.h>
103 #include <dev/pci/pcivar.h>
104 #include <dev/pci/pcidevs.h>
105
106 #include <dev/pci/if_pcnreg.h>
107
108 /*
109 * Transmit descriptor list size. This is arbitrary, but allocate
110 * enough descriptors for 128 pending transmissions, and 4 segments
111 * per packet. This MUST work out to a power of 2.
112 *
113 * NOTE: We can't have any more than 512 Tx descriptors, SO BE CAREFUL!
114 *
115 * So we play a little trick here. We give each packet up to 8
116 * DMA segments, but only allocate 4 DMA segments per packet.
117 * The transmit logic can deal with this, we just are hoping to
118 * sneak by.
119 */
120 #define PCN_NTXSEGS 8
121 #define PCN_NTXSEGS_ALLOC 4
122
123 #define PCN_TXQUEUELEN 128
124 #define PCN_TXQUEUELEN_MASK (PCN_TXQUEUELEN - 1)
125 #define PCN_NTXDESC (PCN_TXQUEUELEN * PCN_NTXSEGS_ALLOC)
126 #define PCN_NTXDESC_MASK (PCN_NTXDESC - 1)
127 #define PCN_NEXTTX(x) (((x) + 1) & PCN_NTXDESC_MASK)
128 #define PCN_NEXTTXS(x) (((x) + 1) & PCN_TXQUEUELEN_MASK)
129
130 /* Tx interrupt every N + 1 packets. */
131 #define PCN_TXINTR_MASK 7
132
133 /*
134 * Receive descriptor list size. We have one Rx buffer per incoming
135 * packet, so this logic is a little simpler.
136 */
137 #define PCN_NRXDESC 128
138 #define PCN_NRXDESC_MASK (PCN_NRXDESC - 1)
139 #define PCN_NEXTRX(x) (((x) + 1) & PCN_NRXDESC_MASK)
140
141 /*
142 * Control structures are DMA'd to the PCnet chip. We allocate them in
143 * a single clump that maps to a single DMA segment to make several things
144 * easier.
145 */
146 struct pcn_control_data {
147 /* The transmit descriptors. */
148 struct letmd pcd_txdescs[PCN_NTXDESC];
149
150 /* The receive descriptors. */
151 struct lermd pcd_rxdescs[PCN_NRXDESC];
152
153 /* The init block. */
154 struct leinit pcd_initblock;
155 };
156
157 #define PCN_CDOFF(x) offsetof(struct pcn_control_data, x)
158 #define PCN_CDTXOFF(x) PCN_CDOFF(pcd_txdescs[(x)])
159 #define PCN_CDRXOFF(x) PCN_CDOFF(pcd_rxdescs[(x)])
160 #define PCN_CDINITOFF PCN_CDOFF(pcd_initblock)
161
162 /*
163 * Software state for transmit jobs.
164 */
165 struct pcn_txsoft {
166 struct mbuf *txs_mbuf; /* head of our mbuf chain */
167 bus_dmamap_t txs_dmamap; /* our DMA map */
168 int txs_firstdesc; /* first descriptor in packet */
169 int txs_lastdesc; /* last descriptor in packet */
170 };
171
172 /*
173 * Software state for receive jobs.
174 */
175 struct pcn_rxsoft {
176 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
177 bus_dmamap_t rxs_dmamap; /* our DMA map */
178 };
179
180 /*
181 * Description of Rx FIFO watermarks for various revisions.
182 */
183 const char *pcn_79c970_rcvfw[] = {
184 "16 bytes",
185 "64 bytes",
186 "128 bytes",
187 NULL,
188 };
189
190 const char *pcn_79c971_rcvfw[] = {
191 "16 bytes",
192 "64 bytes",
193 "112 bytes",
194 NULL,
195 };
196
197 /*
198 * Description of Tx start points for various revisions.
199 */
200 const char *pcn_79c970_xmtsp[] = {
201 "8 bytes",
202 "64 bytes",
203 "128 bytes",
204 "248 bytes",
205 };
206
207 const char *pcn_79c971_xmtsp[] = {
208 "20 bytes",
209 "64 bytes",
210 "128 bytes",
211 "248 bytes",
212 };
213
214 const char *pcn_79c971_xmtsp_sram[] = {
215 "44 bytes",
216 "64 bytes",
217 "128 bytes",
218 "store-and-forward",
219 };
220
221 /*
222 * Description of Tx FIFO watermarks for various revisions.
223 */
224 const char *pcn_79c970_xmtfw[] = {
225 "16 bytes",
226 "64 bytes",
227 "128 bytes",
228 NULL,
229 };
230
231 const char *pcn_79c971_xmtfw[] = {
232 "16 bytes",
233 "64 bytes",
234 "108 bytes",
235 NULL,
236 };
237
238 /*
239 * Software state per device.
240 */
241 struct pcn_softc {
242 struct device sc_dev; /* generic device information */
243 bus_space_tag_t sc_st; /* bus space tag */
244 bus_space_handle_t sc_sh; /* bus space handle */
245 bus_dma_tag_t sc_dmat; /* bus DMA tag */
246 struct ethercom sc_ethercom; /* Ethernet common data */
247 void *sc_sdhook; /* shutdown hook */
248
249 /* Points to our media routines, etc. */
250 const struct pcn_variant *sc_variant;
251
252 void *sc_ih; /* interrupt cookie */
253
254 struct mii_data sc_mii; /* MII/media information */
255
256 struct callout sc_tick_ch; /* tick callout */
257
258 bus_dmamap_t sc_cddmamap; /* control data DMA map */
259 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
260
261 /* Software state for transmit and receive descriptors. */
262 struct pcn_txsoft sc_txsoft[PCN_TXQUEUELEN];
263 struct pcn_rxsoft sc_rxsoft[PCN_NRXDESC];
264
265 /* Control data structures */
266 struct pcn_control_data *sc_control_data;
267 #define sc_txdescs sc_control_data->pcd_txdescs
268 #define sc_rxdescs sc_control_data->pcd_rxdescs
269 #define sc_initblock sc_control_data->pcd_initblock
270
271 #ifdef PCN_EVENT_COUNTERS
272 /* Event counters. */
273 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
274 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
275 struct evcnt sc_ev_txintr; /* Tx interrupts */
276 struct evcnt sc_ev_rxintr; /* Rx interrupts */
277 struct evcnt sc_ev_babl; /* BABL in pcn_intr() */
278 struct evcnt sc_ev_miss; /* MISS in pcn_intr() */
279 struct evcnt sc_ev_merr; /* MERR in pcn_intr() */
280
281 struct evcnt sc_ev_txseg1; /* Tx packets w/ 1 segment */
282 struct evcnt sc_ev_txseg2; /* Tx packets w/ 2 segments */
283 struct evcnt sc_ev_txseg3; /* Tx packets w/ 3 segments */
284 struct evcnt sc_ev_txseg4; /* Tx packets w/ 4 segments */
285 struct evcnt sc_ev_txseg5; /* Tx packets w/ 5 segments */
286 struct evcnt sc_ev_txsegmore; /* Tx packets w/ more than 5 segments */
287 struct evcnt sc_ev_txcopy; /* Tx copies required */
288 #endif /* PCN_EVENT_COUNTERS */
289
290 const char **sc_rcvfw_desc; /* Rx FIFO watermark info */
291 int sc_rcvfw;
292
293 const char **sc_xmtsp_desc; /* Tx start point info */
294 int sc_xmtsp;
295
296 const char **sc_xmtfw_desc; /* Tx FIFO watermark info */
297 int sc_xmtfw;
298
299 int sc_flags; /* misc. flags; see below */
300 int sc_swstyle; /* the software style in use */
301
302 int sc_txfree; /* number of free Tx descriptors */
303 int sc_txnext; /* next ready Tx descriptor */
304
305 int sc_txsfree; /* number of free Tx jobs */
306 int sc_txsnext; /* next free Tx job */
307 int sc_txsdirty; /* dirty Tx jobs */
308
309 int sc_rxptr; /* next ready Rx descriptor/job */
310
311 uint32_t sc_csr5; /* prototype CSR5 register */
312 uint32_t sc_mode; /* prototype MODE register */
313 int sc_phyaddr; /* PHY address */
314 };
315
316 /* sc_flags */
317 #define PCN_F_HAS_MII 0x0001 /* has MII */
318
319 #ifdef PCN_EVENT_COUNTERS
320 #define PCN_EVCNT_INCR(ev) (ev)->ev_count++
321 #else
322 #define PCN_EVCNT_INCR(ev) /* nothing */
323 #endif
324
325 #define PCN_CDTXADDR(sc, x) ((sc)->sc_cddma + PCN_CDTXOFF((x)))
326 #define PCN_CDRXADDR(sc, x) ((sc)->sc_cddma + PCN_CDRXOFF((x)))
327 #define PCN_CDINITADDR(sc) ((sc)->sc_cddma + PCN_CDINITOFF)
328
329 #define PCN_CDTXSYNC(sc, x, n, ops) \
330 do { \
331 int __x, __n; \
332 \
333 __x = (x); \
334 __n = (n); \
335 \
336 /* If it will wrap around, sync to the end of the ring. */ \
337 if ((__x + __n) > PCN_NTXDESC) { \
338 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
339 PCN_CDTXOFF(__x), sizeof(struct letmd) * \
340 (PCN_NTXDESC - __x), (ops)); \
341 __n -= (PCN_NTXDESC - __x); \
342 __x = 0; \
343 } \
344 \
345 /* Now sync whatever is left. */ \
346 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
347 PCN_CDTXOFF(__x), sizeof(struct letmd) * __n, (ops)); \
348 } while (/*CONSTCOND*/0)
349
350 #define PCN_CDRXSYNC(sc, x, ops) \
351 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
352 PCN_CDRXOFF((x)), sizeof(struct lermd), (ops))
353
354 #define PCN_CDINITSYNC(sc, ops) \
355 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
356 PCN_CDINITOFF, sizeof(struct leinit), (ops))
357
358 #define PCN_INIT_RXDESC(sc, x) \
359 do { \
360 struct pcn_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
361 struct lermd *__rmd = &(sc)->sc_rxdescs[(x)]; \
362 struct mbuf *__m = __rxs->rxs_mbuf; \
363 \
364 /* \
365 * Note: We scoot the packet forward 2 bytes in the buffer \
366 * so that the payload after the Ethernet header is aligned \
367 * to a 4-byte boundary. \
368 */ \
369 __m->m_data = __m->m_ext.ext_buf + 2; \
370 \
371 if ((sc)->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3) { \
372 __rmd->rmd2 = \
373 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
374 __rmd->rmd0 = 0; \
375 } else { \
376 __rmd->rmd2 = 0; \
377 __rmd->rmd0 = \
378 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
379 } \
380 __rmd->rmd1 = htole32(LE_R1_OWN|LE_R1_ONES| \
381 (LE_BCNT(MCLBYTES - 2) & LE_R1_BCNT_MASK)); \
382 PCN_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);\
383 } while(/*CONSTCOND*/0)
384
385 void pcn_start(struct ifnet *);
386 void pcn_watchdog(struct ifnet *);
387 int pcn_ioctl(struct ifnet *, u_long, caddr_t);
388 int pcn_init(struct ifnet *);
389 void pcn_stop(struct ifnet *, int);
390
391 void pcn_shutdown(void *);
392
393 void pcn_reset(struct pcn_softc *);
394 void pcn_rxdrain(struct pcn_softc *);
395 int pcn_add_rxbuf(struct pcn_softc *, int);
396 void pcn_tick(void *);
397
398 void pcn_spnd(struct pcn_softc *);
399
400 void pcn_set_filter(struct pcn_softc *);
401
402 int pcn_intr(void *);
403 void pcn_txintr(struct pcn_softc *);
404 int pcn_rxintr(struct pcn_softc *);
405
406 int pcn_mii_readreg(struct device *, int, int);
407 void pcn_mii_writereg(struct device *, int, int, int);
408 void pcn_mii_statchg(struct device *);
409
410 void pcn_79c970_mediainit(struct pcn_softc *);
411 int pcn_79c970_mediachange(struct ifnet *);
412 void pcn_79c970_mediastatus(struct ifnet *, struct ifmediareq *);
413
414 void pcn_79c971_mediainit(struct pcn_softc *);
415 int pcn_79c971_mediachange(struct ifnet *);
416 void pcn_79c971_mediastatus(struct ifnet *, struct ifmediareq *);
417
418 /*
419 * Description of a PCnet-PCI variant. Used to select media access
420 * method, mostly, and to print a nice description of the chip.
421 */
422 const struct pcn_variant {
423 const char *pcv_desc;
424 void (*pcv_mediainit)(struct pcn_softc *);
425 uint16_t pcv_chipid;
426 } pcn_variants[] = {
427 { "Am79c970 PCnet-PCI",
428 pcn_79c970_mediainit,
429 PARTID_Am79c970 },
430
431 { "Am79c970A PCnet-PCI II",
432 pcn_79c970_mediainit,
433 PARTID_Am79c970A },
434
435 { "Am79c971 PCnet-FAST",
436 pcn_79c971_mediainit,
437 PARTID_Am79c971 },
438
439 { "Am79c972 PCnet-FAST+",
440 pcn_79c971_mediainit,
441 PARTID_Am79c972 },
442
443 { "Am79c973 PCnet-FAST III",
444 pcn_79c971_mediainit,
445 PARTID_Am79c973 },
446
447 { "Am79c975 PCnet-FAST III",
448 pcn_79c971_mediainit,
449 PARTID_Am79c975 },
450
451 { "Unknown PCnet-PCI variant",
452 pcn_79c971_mediainit,
453 0 },
454 };
455
456 int pcn_copy_small = 0;
457
458 int pcn_match(struct device *, struct cfdata *, void *);
459 void pcn_attach(struct device *, struct device *, void *);
460
461 struct cfattach pcn_ca = {
462 sizeof(struct pcn_softc), pcn_match, pcn_attach,
463 };
464
465 /*
466 * Routines to read and write the PCnet-PCI CSR/BCR space.
467 */
468
469 static __inline uint32_t
470 pcn_csr_read(struct pcn_softc *sc, int reg)
471 {
472
473 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg);
474 return (bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_RDP));
475 }
476
477 static __inline void
478 pcn_csr_write(struct pcn_softc *sc, int reg, uint32_t val)
479 {
480
481 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg);
482 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RDP, val);
483 }
484
485 static __inline uint32_t
486 pcn_bcr_read(struct pcn_softc *sc, int reg)
487 {
488
489 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg);
490 return (bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_BDP));
491 }
492
493 static __inline void
494 pcn_bcr_write(struct pcn_softc *sc, int reg, uint32_t val)
495 {
496
497 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg);
498 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_BDP, val);
499 }
500
501 static const struct pcn_variant *
502 pcn_lookup_variant(uint16_t chipid)
503 {
504 const struct pcn_variant *pcv;
505
506 for (pcv = pcn_variants; pcv->pcv_chipid != 0; pcv++) {
507 if (chipid == pcv->pcv_chipid)
508 return (pcv);
509 }
510
511 /*
512 * This covers unknown chips, which we simply treat like
513 * a generic PCnet-FAST.
514 */
515 return (pcv);
516 }
517
518 int
519 pcn_match(struct device *parent, struct cfdata *cf, void *aux)
520 {
521 struct pci_attach_args *pa = aux;
522
523 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_AMD)
524 return (0);
525
526 switch (PCI_PRODUCT(pa->pa_id)) {
527 case PCI_PRODUCT_AMD_PCNET_PCI:
528 /* Beat if_le_pci.c */
529 return (10);
530 }
531
532 return (0);
533 }
534
535 void
536 pcn_attach(struct device *parent, struct device *self, void *aux)
537 {
538 struct pcn_softc *sc = (struct pcn_softc *) self;
539 struct pci_attach_args *pa = aux;
540 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
541 pci_chipset_tag_t pc = pa->pa_pc;
542 pci_intr_handle_t ih;
543 const char *intrstr = NULL;
544 bus_space_tag_t iot;
545 bus_space_handle_t ioh;
546 bus_dma_segment_t seg;
547 int ioh_valid;
548 int i, rseg, error;
549 pcireg_t pmode;
550 uint32_t chipid, reg;
551 uint8_t enaddr[ETHER_ADDR_LEN];
552 int pmreg;
553
554 callout_init(&sc->sc_tick_ch);
555
556 printf(": AMD PCnet-PCI Ethernet\n");
557
558 /*
559 * Map the device.
560 */
561 ioh_valid = (pci_mapreg_map(pa, PCN_PCI_CBIO, PCI_MAPREG_TYPE_IO, 0,
562 &iot, &ioh, NULL, NULL) == 0);
563
564 if (ioh_valid) {
565 sc->sc_st = iot;
566 sc->sc_sh = ioh;
567 } else {
568 printf("%s: unable to map device registers\n",
569 sc->sc_dev.dv_xname);
570 return;
571 }
572
573 sc->sc_dmat = pa->pa_dmat;
574
575 /* Make sure bus mastering is enabled. */
576 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
577 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
578 PCI_COMMAND_MASTER_ENABLE);
579
580 /* Get it out of power save mode, if needed. */
581 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
582 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
583 if (pmode == 3) {
584 /*
585 * The card has lost all configuration data in
586 * this state, so punt.
587 */
588 printf("%s: unable to wake from power state D3\n",
589 sc->sc_dev.dv_xname);
590 return;
591 }
592 if (pmode != 0) {
593 printf("%s: waking up from power date D%d\n",
594 sc->sc_dev.dv_xname, pmode);
595 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
596 }
597 }
598
599 /*
600 * Reset the chip to a known state. This also puts the
601 * chip into 32-bit mode.
602 */
603 pcn_reset(sc);
604
605 /*
606 * Read the Ethernet address from the EEPROM.
607 */
608 for (i = 0; i < ETHER_ADDR_LEN; i++)
609 enaddr[i] = bus_space_read_1(sc->sc_st, sc->sc_sh,
610 PCN32_APROM + i);
611
612 /*
613 * Now that the device is mapped, attempt to figure out what
614 * kind of chip we have. Note that IDL has all 32 bits of
615 * the chip ID when we're in 32-bit mode.
616 */
617 chipid = pcn_csr_read(sc, LE_CSR88);
618 sc->sc_variant = pcn_lookup_variant(CHIPID_PARTID(chipid));
619
620 printf("%s: %s rev %d, Ethernet address %s\n",
621 sc->sc_dev.dv_xname, sc->sc_variant->pcv_desc, CHIPID_VER(chipid),
622 ether_sprintf(enaddr));
623
624 /*
625 * Map and establish our interrupt.
626 */
627 if (pci_intr_map(pa, &ih)) {
628 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
629 return;
630 }
631 intrstr = pci_intr_string(pc, ih);
632 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, pcn_intr, sc);
633 if (sc->sc_ih == NULL) {
634 printf("%s: unable to establish interrupt",
635 sc->sc_dev.dv_xname);
636 if (intrstr != NULL)
637 printf(" at %s", intrstr);
638 printf("\n");
639 return;
640 }
641 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
642
643 /*
644 * Allocate the control data structures, and create and load the
645 * DMA map for it.
646 */
647 if ((error = bus_dmamem_alloc(sc->sc_dmat,
648 sizeof(struct pcn_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
649 0)) != 0) {
650 printf("%s: unable to allocate control data, error = %d\n",
651 sc->sc_dev.dv_xname, error);
652 goto fail_0;
653 }
654
655 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
656 sizeof(struct pcn_control_data), (caddr_t *)&sc->sc_control_data,
657 BUS_DMA_COHERENT)) != 0) {
658 printf("%s: unable to map control data, error = %d\n",
659 sc->sc_dev.dv_xname, error);
660 goto fail_1;
661 }
662
663 if ((error = bus_dmamap_create(sc->sc_dmat,
664 sizeof(struct pcn_control_data), 1,
665 sizeof(struct pcn_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
666 printf("%s: unable to create control data DMA map, "
667 "error = %d\n", sc->sc_dev.dv_xname, error);
668 goto fail_2;
669 }
670
671 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
672 sc->sc_control_data, sizeof(struct pcn_control_data), NULL,
673 0)) != 0) {
674 printf("%s: unable to load control data DMA map, error = %d\n",
675 sc->sc_dev.dv_xname, error);
676 goto fail_3;
677 }
678
679 /* Create the transmit buffer DMA maps. */
680 for (i = 0; i < PCN_TXQUEUELEN; i++) {
681 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
682 PCN_NTXSEGS, MCLBYTES, 0, 0,
683 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
684 printf("%s: unable to create tx DMA map %d, "
685 "error = %d\n", sc->sc_dev.dv_xname, i, error);
686 goto fail_4;
687 }
688 }
689
690 /* Create the receive buffer DMA maps. */
691 for (i = 0; i < PCN_NRXDESC; i++) {
692 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
693 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
694 printf("%s: unable to create rx DMA map %d, "
695 "error = %d\n", sc->sc_dev.dv_xname, i, error);
696 goto fail_5;
697 }
698 sc->sc_rxsoft[i].rxs_mbuf = NULL;
699 }
700
701 /* Initialize our media structures. */
702 (*sc->sc_variant->pcv_mediainit)(sc);
703
704 /*
705 * Initialize FIFO watermark info.
706 */
707 switch (sc->sc_variant->pcv_chipid) {
708 case PARTID_Am79c970:
709 case PARTID_Am79c970A:
710 sc->sc_rcvfw_desc = pcn_79c970_rcvfw;
711 sc->sc_xmtsp_desc = pcn_79c970_xmtsp;
712 sc->sc_xmtfw_desc = pcn_79c970_xmtfw;
713 break;
714
715 default:
716 sc->sc_rcvfw_desc = pcn_79c971_rcvfw;
717 /*
718 * Read BCR25 to determine how much SRAM is
719 * on the board. If > 0, then we the chip
720 * uses different Start Point thresholds.
721 *
722 * Note BCR25 and BCR26 are loaded from the
723 * EEPROM on RST, and unaffected by S_RESET,
724 * so we don't really have to worry about
725 * them except for this.
726 */
727 reg = pcn_bcr_read(sc, LE_BCR25) & 0x00ff;
728 if (reg != 0)
729 sc->sc_xmtsp_desc = pcn_79c971_xmtsp_sram;
730 else
731 sc->sc_xmtsp_desc = pcn_79c971_xmtsp;
732 sc->sc_xmtfw_desc = pcn_79c971_xmtfw;
733 break;
734 }
735
736 /*
737 * Set up defaults -- see the tables above for what these
738 * values mean.
739 *
740 * XXX How should we tune RCVFW and XMTFW?
741 */
742 sc->sc_rcvfw = 1; /* minimum for full-duplex */
743 sc->sc_xmtsp = 1;
744 sc->sc_xmtfw = 0;
745
746 ifp = &sc->sc_ethercom.ec_if;
747 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
748 ifp->if_softc = sc;
749 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
750 ifp->if_ioctl = pcn_ioctl;
751 ifp->if_start = pcn_start;
752 ifp->if_watchdog = pcn_watchdog;
753 ifp->if_init = pcn_init;
754 ifp->if_stop = pcn_stop;
755 IFQ_SET_READY(&ifp->if_snd);
756
757 /* Attach the interface. */
758 if_attach(ifp);
759 ether_ifattach(ifp, enaddr);
760
761 #ifdef PCN_EVENT_COUNTERS
762 /* Attach event counters. */
763 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
764 NULL, sc->sc_dev.dv_xname, "txsstall");
765 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
766 NULL, sc->sc_dev.dv_xname, "txdstall");
767 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR,
768 NULL, sc->sc_dev.dv_xname, "txintr");
769 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
770 NULL, sc->sc_dev.dv_xname, "rxintr");
771 evcnt_attach_dynamic(&sc->sc_ev_babl, EVCNT_TYPE_MISC,
772 NULL, sc->sc_dev.dv_xname, "babl");
773 evcnt_attach_dynamic(&sc->sc_ev_miss, EVCNT_TYPE_MISC,
774 NULL, sc->sc_dev.dv_xname, "miss");
775 evcnt_attach_dynamic(&sc->sc_ev_merr, EVCNT_TYPE_MISC,
776 NULL, sc->sc_dev.dv_xname, "merr");
777
778 evcnt_attach_dynamic(&sc->sc_ev_txseg1, EVCNT_TYPE_MISC,
779 NULL, sc->sc_dev.dv_xname, "txseg1");
780 evcnt_attach_dynamic(&sc->sc_ev_txseg2, EVCNT_TYPE_MISC,
781 NULL, sc->sc_dev.dv_xname, "txseg2");
782 evcnt_attach_dynamic(&sc->sc_ev_txseg3, EVCNT_TYPE_MISC,
783 NULL, sc->sc_dev.dv_xname, "txseg3");
784 evcnt_attach_dynamic(&sc->sc_ev_txseg4, EVCNT_TYPE_MISC,
785 NULL, sc->sc_dev.dv_xname, "txseg4");
786 evcnt_attach_dynamic(&sc->sc_ev_txseg5, EVCNT_TYPE_MISC,
787 NULL, sc->sc_dev.dv_xname, "txseg5");
788 evcnt_attach_dynamic(&sc->sc_ev_txsegmore, EVCNT_TYPE_MISC,
789 NULL, sc->sc_dev.dv_xname, "txsegmore");
790 evcnt_attach_dynamic(&sc->sc_ev_txcopy, EVCNT_TYPE_MISC,
791 NULL, sc->sc_dev.dv_xname, "txcopy");
792 #endif /* PCN_EVENT_COUNTERS */
793
794 /* Make sure the interface is shutdown during reboot. */
795 sc->sc_sdhook = shutdownhook_establish(pcn_shutdown, sc);
796 if (sc->sc_sdhook == NULL)
797 printf("%s: WARNING: unable to establish shutdown hook\n",
798 sc->sc_dev.dv_xname);
799 return;
800
801 /*
802 * Free any resources we've allocated during the failed attach
803 * attempt. Do this in reverse order and fall through.
804 */
805 fail_5:
806 for (i = 0; i < PCN_NRXDESC; i++) {
807 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
808 bus_dmamap_destroy(sc->sc_dmat,
809 sc->sc_rxsoft[i].rxs_dmamap);
810 }
811 fail_4:
812 for (i = 0; i < PCN_TXQUEUELEN; i++) {
813 if (sc->sc_txsoft[i].txs_dmamap != NULL)
814 bus_dmamap_destroy(sc->sc_dmat,
815 sc->sc_txsoft[i].txs_dmamap);
816 }
817 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
818 fail_3:
819 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
820 fail_2:
821 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
822 sizeof(struct pcn_control_data));
823 fail_1:
824 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
825 fail_0:
826 return;
827 }
828
829 /*
830 * pcn_shutdown:
831 *
832 * Make sure the interface is stopped at reboot time.
833 */
834 void
835 pcn_shutdown(void *arg)
836 {
837 struct pcn_softc *sc = arg;
838
839 pcn_stop(&sc->sc_ethercom.ec_if, 1);
840 }
841
842 /*
843 * pcn_start: [ifnet interface function]
844 *
845 * Start packet transmission on the interface.
846 */
847 void
848 pcn_start(struct ifnet *ifp)
849 {
850 struct pcn_softc *sc = ifp->if_softc;
851 struct mbuf *m0, *m;
852 struct pcn_txsoft *txs;
853 bus_dmamap_t dmamap;
854 int error, nexttx, lasttx, ofree, seg;
855
856 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
857 return;
858
859 /*
860 * Remember the previous number of free descriptors and
861 * the first descriptor we'll use.
862 */
863 ofree = sc->sc_txfree;
864
865 /*
866 * Loop through the send queue, setting up transmit descriptors
867 * until we drain the queue, or use up all available transmit
868 * descriptors.
869 */
870 for (;;) {
871 /* Grab a packet off the queue. */
872 IFQ_POLL(&ifp->if_snd, m0);
873 if (m0 == NULL)
874 break;
875 m = NULL;
876
877 /* Get a work queue entry. */
878 if (sc->sc_txsfree == 0) {
879 PCN_EVCNT_INCR(&sc->sc_ev_txsstall);
880 break;
881 }
882
883 txs = &sc->sc_txsoft[sc->sc_txsnext];
884 dmamap = txs->txs_dmamap;
885
886 /*
887 * Load the DMA map. If this fails, the packet either
888 * didn't fit in the alloted number of segments, or we
889 * were short on resources. In this case, we'll copy
890 * and try again.
891 */
892 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
893 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
894 PCN_EVCNT_INCR(&sc->sc_ev_txcopy);
895 MGETHDR(m, M_DONTWAIT, MT_DATA);
896 if (m == NULL) {
897 printf("%s: unable to allocate Tx mbuf\n",
898 sc->sc_dev.dv_xname);
899 break;
900 }
901 if (m0->m_pkthdr.len > MHLEN) {
902 MCLGET(m, M_DONTWAIT);
903 if ((m->m_flags & M_EXT) == 0) {
904 printf("%s: unable to allocate Tx "
905 "cluster\n", sc->sc_dev.dv_xname);
906 m_freem(m);
907 break;
908 }
909 }
910 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
911 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
912 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
913 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
914 if (error) {
915 printf("%s: unable to load Tx buffer, "
916 "error = %d\n", sc->sc_dev.dv_xname, error);
917 break;
918 }
919 }
920
921 /*
922 * Ensure we have enough descriptors free to describe
923 * the packet. Note, we always reserve one descriptor
924 * at the end of the ring as a termination point, to
925 * prevent wrap-around.
926 */
927 if (dmamap->dm_nsegs > (sc->sc_txfree - 1)) {
928 /*
929 * Not enough free descriptors to transmit this
930 * packet. We haven't committed anything yet,
931 * so just unload the DMA map, put the packet
932 * back on the queue, and punt. Notify the upper
933 * layer that there are not more slots left.
934 *
935 * XXX We could allocate an mbuf and copy, but
936 * XXX is it worth it?
937 */
938 ifp->if_flags |= IFF_OACTIVE;
939 bus_dmamap_unload(sc->sc_dmat, dmamap);
940 if (m != NULL)
941 m_freem(m);
942 PCN_EVCNT_INCR(&sc->sc_ev_txdstall);
943 break;
944 }
945
946 IFQ_DEQUEUE(&ifp->if_snd, m0);
947 if (m != NULL) {
948 m_freem(m0);
949 m0 = m;
950 }
951
952 /*
953 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
954 */
955
956 /* Sync the DMA map. */
957 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
958 BUS_DMASYNC_PREWRITE);
959
960 #ifdef PCN_EVENT_COUNTERS
961 switch (dmamap->dm_nsegs) {
962 case 1:
963 PCN_EVCNT_INCR(&sc->sc_ev_txseg1);
964 break;
965 case 2:
966 PCN_EVCNT_INCR(&sc->sc_ev_txseg2);
967 break;
968 case 3:
969 PCN_EVCNT_INCR(&sc->sc_ev_txseg3);
970 break;
971 case 4:
972 PCN_EVCNT_INCR(&sc->sc_ev_txseg4);
973 break;
974 case 5:
975 PCN_EVCNT_INCR(&sc->sc_ev_txseg5);
976 break;
977 default:
978 PCN_EVCNT_INCR(&sc->sc_ev_txsegmore);
979 break;
980 }
981 #endif /* PCN_EVENT_COUNTERS */
982
983 /*
984 * Initialize the transmit descriptors.
985 */
986 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3) {
987 for (nexttx = sc->sc_txnext, seg = 0;
988 seg < dmamap->dm_nsegs;
989 seg++, nexttx = PCN_NEXTTX(nexttx)) {
990 /*
991 * If this is the first descriptor we're
992 * enqueueing, don't set the OWN bit just
993 * yet. That could cause a race condition.
994 * We'll do it below.
995 */
996 sc->sc_txdescs[nexttx].tmd0 = 0;
997 sc->sc_txdescs[nexttx].tmd2 =
998 htole32(dmamap->dm_segs[seg].ds_addr);
999 sc->sc_txdescs[nexttx].tmd1 =
1000 ((nexttx == sc->sc_txnext) ? 0 :
1001 htole32(LE_T1_OWN)) |
1002 htole32((LE_BCNT(dmamap->dm_segs[
1003 seg].ds_len) &
1004 LE_T1_BCNT_MASK));
1005 lasttx = nexttx;
1006 }
1007 } else {
1008 for (nexttx = sc->sc_txnext, seg = 0;
1009 seg < dmamap->dm_nsegs;
1010 seg++, nexttx = PCN_NEXTTX(nexttx)) {
1011 /*
1012 * If this is the first descriptor we're
1013 * enqueueing, don't set the OWN bit just
1014 * yet. That could cause a race condition.
1015 * We'll do it below.
1016 */
1017 sc->sc_txdescs[nexttx].tmd0 =
1018 htole32(dmamap->dm_segs[seg].ds_addr);
1019 sc->sc_txdescs[nexttx].tmd2 = 0;
1020 sc->sc_txdescs[nexttx].tmd1 =
1021 ((nexttx == sc->sc_txnext) ? 0 :
1022 htole32(LE_T1_OWN)) |
1023 htole32((LE_BCNT(dmamap->dm_segs[
1024 seg].ds_len) &
1025 LE_T1_BCNT_MASK));
1026 lasttx = nexttx;
1027 }
1028 }
1029
1030 /* Interrupt on the packet, if appropriate. */
1031 if ((sc->sc_txsnext & PCN_TXINTR_MASK) == 0)
1032 sc->sc_txdescs[lasttx].tmd1 |= htole32(LE_T1_LTINT);
1033
1034 /* Set `start of packet' and `end of packet' appropriately. */
1035 sc->sc_txdescs[lasttx].tmd1 |= htole32(LE_T1_ENP);
1036 sc->sc_txdescs[sc->sc_txnext].tmd1 |=
1037 htole32(LE_T1_OWN|LE_T1_STP);
1038
1039 /* Sync the descriptors we're using. */
1040 PCN_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1041 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1042
1043 /* Kick the transmitter. */
1044 pcn_csr_write(sc, LE_CSR0, LE_C0_INEA|LE_C0_TDMD);
1045
1046 /*
1047 * Store a pointer to the packet so we can free it later,
1048 * and remember what txdirty will be once the packet is
1049 * done.
1050 */
1051 txs->txs_mbuf = m0;
1052 txs->txs_firstdesc = sc->sc_txnext;
1053 txs->txs_lastdesc = lasttx;
1054
1055 /* Advance the tx pointer. */
1056 sc->sc_txfree -= dmamap->dm_nsegs;
1057 sc->sc_txnext = nexttx;
1058
1059 sc->sc_txsfree--;
1060 sc->sc_txsnext = PCN_NEXTTXS(sc->sc_txsnext);
1061
1062 #if NBPFILTER > 0
1063 /* Pass the packet to any BPF listeners. */
1064 if (ifp->if_bpf)
1065 bpf_mtap(ifp->if_bpf, m0);
1066 #endif /* NBPFILTER > 0 */
1067 }
1068
1069 if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) {
1070 /* No more slots left; notify upper layer. */
1071 ifp->if_flags |= IFF_OACTIVE;
1072 }
1073
1074 if (sc->sc_txfree != ofree) {
1075 /* Set a watchdog timer in case the chip flakes out. */
1076 ifp->if_timer = 5;
1077 }
1078 }
1079
1080 /*
1081 * pcn_watchdog: [ifnet interface function]
1082 *
1083 * Watchdog timer handler.
1084 */
1085 void
1086 pcn_watchdog(struct ifnet *ifp)
1087 {
1088 struct pcn_softc *sc = ifp->if_softc;
1089
1090 /*
1091 * Since we're not interrupting every packet, sweep
1092 * up before we report an error.
1093 */
1094 pcn_txintr(sc);
1095
1096 if (sc->sc_txfree != PCN_NTXDESC) {
1097 printf("%s: device timeout (txfree %d txsfree %d)\n",
1098 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree);
1099 ifp->if_oerrors++;
1100
1101 /* Reset the interface. */
1102 (void) pcn_init(ifp);
1103 }
1104
1105 /* Try to get more packets going. */
1106 pcn_start(ifp);
1107 }
1108
1109 /*
1110 * pcn_ioctl: [ifnet interface function]
1111 *
1112 * Handle control requests from the operator.
1113 */
1114 int
1115 pcn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1116 {
1117 struct pcn_softc *sc = ifp->if_softc;
1118 struct ifreq *ifr = (struct ifreq *) data;
1119 int s, error;
1120
1121 s = splnet();
1122
1123 switch (cmd) {
1124 case SIOCSIFMEDIA:
1125 case SIOCGIFMEDIA:
1126 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1127 break;
1128
1129 default:
1130 error = ether_ioctl(ifp, cmd, data);
1131 if (error == ENETRESET) {
1132 /*
1133 * Multicast list has changed; set the hardware filter
1134 * accordingly.
1135 */
1136 error = pcn_init(ifp);
1137 }
1138 break;
1139 }
1140
1141 /* Try to get more packets going. */
1142 pcn_start(ifp);
1143
1144 splx(s);
1145 return (error);
1146 }
1147
1148 /*
1149 * pcn_intr:
1150 *
1151 * Interrupt service routine.
1152 */
1153 int
1154 pcn_intr(void *arg)
1155 {
1156 struct pcn_softc *sc = arg;
1157 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1158 uint32_t csr0;
1159 int wantinit, handled = 0;
1160
1161 for (wantinit = 0; wantinit == 0;) {
1162 csr0 = pcn_csr_read(sc, LE_CSR0);
1163 if ((csr0 & LE_C0_INTR) == 0)
1164 break;
1165
1166 /* ACK the bits and re-enable interrupts. */
1167 pcn_csr_write(sc, LE_CSR0, csr0 &
1168 (LE_C0_INEA|LE_C0_BABL|LE_C0_MISS|LE_C0_MERR|LE_C0_RINT|
1169 LE_C0_TINT|LE_C0_IDON));
1170
1171 handled = 1;
1172
1173 if (csr0 & LE_C0_RINT) {
1174 PCN_EVCNT_INCR(&sc->sc_ev_rxintr);
1175 wantinit = pcn_rxintr(sc);
1176 }
1177
1178 if (csr0 & LE_C0_TINT) {
1179 PCN_EVCNT_INCR(&sc->sc_ev_txintr);
1180 pcn_txintr(sc);
1181 }
1182
1183 if (csr0 & LE_C0_ERR) {
1184 if (csr0 & LE_C0_BABL) {
1185 PCN_EVCNT_INCR(&sc->sc_ev_babl);
1186 ifp->if_oerrors++;
1187 }
1188 if (csr0 & LE_C0_MISS) {
1189 PCN_EVCNT_INCR(&sc->sc_ev_miss);
1190 ifp->if_ierrors++;
1191 }
1192 if (csr0 & LE_C0_MERR) {
1193 PCN_EVCNT_INCR(&sc->sc_ev_merr);
1194 printf("%s: memory error\n",
1195 sc->sc_dev.dv_xname);
1196 wantinit = 1;
1197 break;
1198 }
1199 }
1200
1201 if ((csr0 & LE_C0_RXON) == 0) {
1202 printf("%s: receiver disabled\n",
1203 sc->sc_dev.dv_xname);
1204 ifp->if_ierrors++;
1205 wantinit = 1;
1206 }
1207
1208 if ((csr0 & LE_C0_TXON) == 0) {
1209 printf("%s: transmitter disabled\n",
1210 sc->sc_dev.dv_xname);
1211 ifp->if_oerrors++;
1212 wantinit = 1;
1213 }
1214 }
1215
1216 if (handled) {
1217 if (wantinit)
1218 pcn_init(ifp);
1219
1220 /* Try to get more packets going. */
1221 pcn_start(ifp);
1222 }
1223
1224 return (handled);
1225 }
1226
1227 /*
1228 * pcn_spnd:
1229 *
1230 * Suspend the chip.
1231 */
1232 void
1233 pcn_spnd(struct pcn_softc *sc)
1234 {
1235 int i;
1236
1237 pcn_csr_write(sc, LE_CSR5, sc->sc_csr5 | LE_C5_SPND);
1238
1239 for (i = 0; i < 10000; i++) {
1240 if (pcn_csr_read(sc, LE_CSR5) & LE_C5_SPND)
1241 return;
1242 delay(5);
1243 }
1244
1245 printf("%s: WARNING: chip failed to enter suspended state\n",
1246 sc->sc_dev.dv_xname);
1247 }
1248
1249 /*
1250 * pcn_txintr:
1251 *
1252 * Helper; handle transmit interrupts.
1253 */
1254 void
1255 pcn_txintr(struct pcn_softc *sc)
1256 {
1257 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1258 struct pcn_txsoft *txs;
1259 uint32_t tmd1, tmd2, tmd;
1260 int i, j;
1261
1262 ifp->if_flags &= ~IFF_OACTIVE;
1263
1264 /*
1265 * Go through our Tx list and free mbufs for those
1266 * frames which have been transmitted.
1267 */
1268 for (i = sc->sc_txsdirty; sc->sc_txsfree != PCN_TXQUEUELEN;
1269 i = PCN_NEXTTXS(i), sc->sc_txsfree++) {
1270 txs = &sc->sc_txsoft[i];
1271
1272 PCN_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1273 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1274
1275 tmd1 = le32toh(sc->sc_txdescs[txs->txs_lastdesc].tmd1);
1276 if (tmd1 & LE_T1_OWN)
1277 break;
1278
1279 /*
1280 * Slightly annoying -- we have to loop through the
1281 * descriptors we've used looking for ERR, since it
1282 * can appear on any descriptor in the chain.
1283 */
1284 for (j = txs->txs_firstdesc;; j = PCN_NEXTTX(j)) {
1285 tmd = le32toh(sc->sc_txdescs[j].tmd1);
1286 if (tmd & LE_T1_ERR) {
1287 ifp->if_oerrors++;
1288 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3)
1289 tmd2 = le32toh(sc->sc_txdescs[j].tmd0);
1290 else
1291 tmd2 = le32toh(sc->sc_txdescs[j].tmd2);
1292 if (tmd2 & LE_T2_UFLO) {
1293 if (sc->sc_xmtsp < LE_C80_XMTSP_MAX) {
1294 sc->sc_xmtsp++;
1295 printf("%s: transmit "
1296 "underrun; new threshold: "
1297 "%s\n",
1298 sc->sc_dev.dv_xname,
1299 sc->sc_xmtsp_desc[
1300 sc->sc_xmtsp]);
1301 pcn_spnd(sc);
1302 pcn_csr_write(sc, LE_CSR80,
1303 LE_C80_RCVFW(sc->sc_rcvfw) |
1304 LE_C80_XMTSP(sc->sc_xmtsp) |
1305 LE_C80_XMTFW(sc->sc_xmtfw));
1306 pcn_csr_write(sc, LE_CSR5,
1307 sc->sc_csr5);
1308 } else {
1309 printf("%s: transmit "
1310 "underrun\n",
1311 sc->sc_dev.dv_xname);
1312 }
1313 } else if (tmd2 & LE_T2_BUFF) {
1314 printf("%s: transmit buffer error\n",
1315 sc->sc_dev.dv_xname);
1316 }
1317 if (tmd2 & LE_T2_LCOL)
1318 ifp->if_collisions++;
1319 if (tmd2 & LE_T2_RTRY)
1320 ifp->if_collisions += 16;
1321 goto next_packet;
1322 }
1323 if (j == txs->txs_lastdesc)
1324 break;
1325 }
1326 if (tmd1 & LE_T1_ONE)
1327 ifp->if_collisions++;
1328 else if (tmd & LE_T1_MORE) {
1329 /* Real number is unknown. */
1330 ifp->if_collisions += 2;
1331 }
1332 ifp->if_opackets++;
1333 next_packet:
1334 sc->sc_txfree += txs->txs_dmamap->dm_nsegs;
1335 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1336 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1337 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1338 m_freem(txs->txs_mbuf);
1339 txs->txs_mbuf = NULL;
1340 }
1341
1342 /* Update the dirty transmit buffer pointer. */
1343 sc->sc_txsdirty = i;
1344
1345 /*
1346 * If there are no more pending transmissions, cancel the watchdog
1347 * timer.
1348 */
1349 if (sc->sc_txsfree == PCN_TXQUEUELEN)
1350 ifp->if_timer = 0;
1351 }
1352
1353 /*
1354 * pcn_rxintr:
1355 *
1356 * Helper; handle receive interrupts.
1357 */
1358 int
1359 pcn_rxintr(struct pcn_softc *sc)
1360 {
1361 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1362 struct pcn_rxsoft *rxs;
1363 struct mbuf *m;
1364 uint32_t rmd1;
1365 int i, len;
1366
1367 for (i = sc->sc_rxptr;; i = PCN_NEXTRX(i)) {
1368 rxs = &sc->sc_rxsoft[i];
1369
1370 PCN_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1371
1372 rmd1 = le32toh(sc->sc_rxdescs[i].rmd1);
1373
1374 if (rmd1 & LE_R1_OWN)
1375 break;
1376
1377 /*
1378 * Check for errors and make sure the packet fit into
1379 * a single buffer. We have structured this block of
1380 * code the way it is in order to compress it into
1381 * one test in the common case (no error).
1382 */
1383 if (__predict_false((rmd1 & (LE_R1_STP|LE_R1_ENP|LE_R1_ERR)) !=
1384 (LE_R1_STP|LE_R1_ENP))) {
1385 /* Make sure the packet is in a single buffer. */
1386 if ((rmd1 & (LE_R1_STP|LE_R1_ENP)) !=
1387 (LE_R1_STP|LE_R1_ENP)) {
1388 printf("%s: packet spilled into next buffer\n",
1389 sc->sc_dev.dv_xname);
1390 return (1); /* pcn_intr() will re-init */
1391 }
1392
1393 /*
1394 * If the packet had an error, simple recycle the
1395 * buffer.
1396 */
1397 if (rmd1 & LE_R1_ERR) {
1398 ifp->if_ierrors++;
1399 /*
1400 * If we got an overflow error, chances
1401 * are there will be a CRC error. In
1402 * this case, just print the overflow
1403 * error, and skip the others.
1404 */
1405 if (rmd1 & LE_R1_OFLO)
1406 printf("%s: overflow error\n",
1407 sc->sc_dev.dv_xname);
1408 else {
1409 #define PRINTIT(x, s) \
1410 if (rmd1 & (x)) \
1411 printf("%s: %s\n", \
1412 sc->sc_dev.dv_xname, s);
1413 PRINTIT(LE_R1_FRAM, "framing error");
1414 PRINTIT(LE_R1_CRC, "CRC error");
1415 PRINTIT(LE_R1_BUFF, "buffer error");
1416 }
1417 #undef PRINTIT
1418 PCN_INIT_RXDESC(sc, i);
1419 continue;
1420 }
1421 }
1422
1423 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1424 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1425
1426 /*
1427 * No errors; receive the packet.
1428 */
1429 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3)
1430 len = le32toh(sc->sc_rxdescs[i].rmd0) & LE_R1_BCNT_MASK;
1431 else
1432 len = le32toh(sc->sc_rxdescs[i].rmd2) & LE_R1_BCNT_MASK;
1433
1434 /*
1435 * The LANCE family includes the CRC with every packet;
1436 * trim it off here.
1437 */
1438 len -= ETHER_CRC_LEN;
1439
1440 /*
1441 * If the packet is small enough to fit in a
1442 * single header mbuf, allocate one and copy
1443 * the data into it. This greatly reduces
1444 * memory consumption when we receive lots
1445 * of small packets.
1446 *
1447 * Otherwise, we add a new buffer to the receive
1448 * chain. If this fails, we drop the packet and
1449 * recycle the old buffer.
1450 */
1451 if (pcn_copy_small != 0 && len <= (MHLEN - 2)) {
1452 MGETHDR(m, M_DONTWAIT, MT_DATA);
1453 if (m == NULL)
1454 goto dropit;
1455 m->m_data += 2;
1456 memcpy(mtod(m, caddr_t),
1457 mtod(rxs->rxs_mbuf, caddr_t), len);
1458 PCN_INIT_RXDESC(sc, i);
1459 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1460 rxs->rxs_dmamap->dm_mapsize,
1461 BUS_DMASYNC_PREREAD);
1462 } else {
1463 m = rxs->rxs_mbuf;
1464 if (pcn_add_rxbuf(sc, i) != 0) {
1465 dropit:
1466 ifp->if_ierrors++;
1467 PCN_INIT_RXDESC(sc, i);
1468 bus_dmamap_sync(sc->sc_dmat,
1469 rxs->rxs_dmamap, 0,
1470 rxs->rxs_dmamap->dm_mapsize,
1471 BUS_DMASYNC_PREREAD);
1472 continue;
1473 }
1474 }
1475
1476 m->m_pkthdr.rcvif = ifp;
1477 m->m_pkthdr.len = m->m_len = len;
1478
1479 #if NBPFILTER > 0
1480 /* Pass this up to any BPF listeners. */
1481 if (ifp->if_bpf)
1482 bpf_mtap(ifp->if_bpf, m);
1483 #endif /* NBPFILTER > 0 */
1484
1485 /* Pass it on. */
1486 (*ifp->if_input)(ifp, m);
1487 ifp->if_ipackets++;
1488 }
1489
1490 /* Update the receive pointer. */
1491 sc->sc_rxptr = i;
1492 return (0);
1493 }
1494
1495 /*
1496 * pcn_tick:
1497 *
1498 * One second timer, used to tick the MII.
1499 */
1500 void
1501 pcn_tick(void *arg)
1502 {
1503 struct pcn_softc *sc = arg;
1504 int s;
1505
1506 s = splnet();
1507 mii_tick(&sc->sc_mii);
1508 splx(s);
1509
1510 callout_reset(&sc->sc_tick_ch, hz, pcn_tick, sc);
1511 }
1512
1513 /*
1514 * pcn_reset:
1515 *
1516 * Perform a soft reset on the PCnet-PCI.
1517 */
1518 void
1519 pcn_reset(struct pcn_softc *sc)
1520 {
1521
1522 /*
1523 * The PCnet-PCI chip is reset by reading from the
1524 * RESET register. Note that while the NE2100 LANCE
1525 * boards require a write after the read, the PCnet-PCI
1526 * chips do not require this.
1527 *
1528 * Since we don't know if we're in 16-bit or 32-bit
1529 * mode right now, issue both (it's safe) in the
1530 * hopes that one will succeed.
1531 */
1532 (void) bus_space_read_2(sc->sc_st, sc->sc_sh, PCN16_RESET);
1533 (void) bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_RESET);
1534
1535 /* Wait 1ms for it to finish. */
1536 delay(1000);
1537
1538 /*
1539 * Select 32-bit I/O mode by issuing a 32-bit write to the
1540 * RDP. Since the RAP is 0 after a reset, writing a 0
1541 * to RDP is safe (since it simply clears CSR0).
1542 */
1543 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RDP, 0);
1544 }
1545
1546 /*
1547 * pcn_init: [ifnet interface function]
1548 *
1549 * Initialize the interface. Must be called at splnet().
1550 */
1551 int
1552 pcn_init(struct ifnet *ifp)
1553 {
1554 struct pcn_softc *sc = ifp->if_softc;
1555 struct pcn_rxsoft *rxs;
1556 uint8_t *enaddr = LLADDR(ifp->if_sadl);
1557 int i, error = 0;
1558 uint32_t reg;
1559
1560 /* Cancel any pending I/O. */
1561 pcn_stop(ifp, 0);
1562
1563 /* Reset the chip to a known state. */
1564 pcn_reset(sc);
1565
1566 /*
1567 * On the Am79c970, select SSTYLE 2, and SSTYLE 3 on everything
1568 * else.
1569 *
1570 * XXX It'd be really nice to use SSTYLE 2 on all the chips,
1571 * because the structure layout is compatible with ILACC,
1572 * but the burst mode is only available in SSTYLE 3, and
1573 * burst mode should provide some performance enhancement.
1574 */
1575 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970)
1576 sc->sc_swstyle = LE_B20_SSTYLE_PCNETPCI2;
1577 else
1578 sc->sc_swstyle = LE_B20_SSTYLE_PCNETPCI3;
1579 pcn_bcr_write(sc, LE_BCR20, sc->sc_swstyle);
1580
1581 /* Initialize the transmit descriptor ring. */
1582 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1583 PCN_CDTXSYNC(sc, 0, PCN_NTXDESC,
1584 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1585 sc->sc_txfree = PCN_NTXDESC;
1586 sc->sc_txnext = 0;
1587
1588 /* Initialize the transmit job descriptors. */
1589 for (i = 0; i < PCN_TXQUEUELEN; i++)
1590 sc->sc_txsoft[i].txs_mbuf = NULL;
1591 sc->sc_txsfree = PCN_TXQUEUELEN;
1592 sc->sc_txsnext = 0;
1593 sc->sc_txsdirty = 0;
1594
1595 /*
1596 * Initialize the receive descriptor and receive job
1597 * descriptor rings.
1598 */
1599 for (i = 0; i < PCN_NRXDESC; i++) {
1600 rxs = &sc->sc_rxsoft[i];
1601 if (rxs->rxs_mbuf == NULL) {
1602 if ((error = pcn_add_rxbuf(sc, i)) != 0) {
1603 printf("%s: unable to allocate or map rx "
1604 "buffer %d, error = %d\n",
1605 sc->sc_dev.dv_xname, i, error);
1606 /*
1607 * XXX Should attempt to run with fewer receive
1608 * XXX buffers instead of just failing.
1609 */
1610 pcn_rxdrain(sc);
1611 goto out;
1612 }
1613 } else
1614 PCN_INIT_RXDESC(sc, i);
1615 }
1616 sc->sc_rxptr = 0;
1617
1618 /* Initialize MODE for the initialization block. */
1619 sc->sc_mode = 0;
1620 if (ifp->if_flags & IFF_PROMISC)
1621 sc->sc_mode |= LE_C15_PROM;
1622 if ((ifp->if_flags & IFF_BROADCAST) == 0)
1623 sc->sc_mode |= LE_C15_DRCVBC;
1624
1625 /*
1626 * If we have MII, simply select MII in the MODE register,
1627 * and clear ASEL. Otherwise, let ASEL stand (for now),
1628 * and leave PORTSEL alone (it is ignored with ASEL is set).
1629 */
1630 if (sc->sc_flags & PCN_F_HAS_MII) {
1631 pcn_bcr_write(sc, LE_BCR2,
1632 pcn_bcr_read(sc, LE_BCR2) & ~LE_B2_ASEL);
1633 sc->sc_mode |= LE_C15_PORTSEL(PORTSEL_MII);
1634
1635 /*
1636 * Disable MII auto-negotiation. We handle that in
1637 * our own MII layer.
1638 */
1639 pcn_bcr_write(sc, LE_BCR32,
1640 pcn_csr_read(sc, LE_BCR32) & ~LE_B32_DANAS);
1641 }
1642
1643 /*
1644 * Set the Tx and Rx descriptor ring addresses in the init
1645 * block, the TLEN and RLEN other fields of the init block
1646 * MODE register.
1647 */
1648 sc->sc_initblock.init_rdra = htole32(PCN_CDRXADDR(sc, 0));
1649 sc->sc_initblock.init_tdra = htole32(PCN_CDTXADDR(sc, 0));
1650 sc->sc_initblock.init_mode = htole32(sc->sc_mode |
1651 ((ffs(PCN_NTXDESC) - 1) << 28) |
1652 ((ffs(PCN_NRXDESC) - 1) << 20));
1653
1654 /* Set the station address in the init block. */
1655 sc->sc_initblock.init_padr[0] = htole32(enaddr[0] |
1656 (enaddr[1] << 8) | (enaddr[2] << 16) | (enaddr[3] << 24));
1657 sc->sc_initblock.init_padr[1] = htole32(enaddr[4] |
1658 (enaddr[5] << 8));
1659
1660 /* Set the multicast filter in the init block. */
1661 pcn_set_filter(sc);
1662
1663 /* Initialize CSR3. */
1664 pcn_csr_write(sc, LE_CSR3, LE_C3_MISSM|LE_C3_IDONM|LE_C3_DXSUFLO);
1665
1666 /* Initialize CSR4. */
1667 pcn_csr_write(sc, LE_CSR4, LE_C4_DMAPLUS|LE_C4_APAD_XMT|
1668 LE_C4_MFCOM|LE_C4_RCVCCOM|LE_C4_TXSTRTM);
1669
1670 /* Initialize CSR5. */
1671 sc->sc_csr5 = LE_C5_LTINTEN|LE_C5_SINTE;
1672 pcn_csr_write(sc, LE_CSR5, sc->sc_csr5);
1673
1674 /*
1675 * If we have an Am79c971 or greater, initialize CSR7.
1676 *
1677 * XXX Might be nice to use the MII auto-poll interrupt someday.
1678 */
1679 switch (sc->sc_variant->pcv_chipid) {
1680 case PARTID_Am79c970:
1681 case PARTID_Am79c970A:
1682 /* Not available on these chips. */
1683 break;
1684
1685 default:
1686 pcn_csr_write(sc, LE_CSR7, LE_C7_FASTSPNDE);
1687 break;
1688 }
1689
1690 /*
1691 * On the Am79c970A and greater, initialize BCR18 to
1692 * enable burst mode.
1693 *
1694 * Also enable the "no underflow" option on the Am79c971 and
1695 * higher, which prevents the chip from generating transmit
1696 * underflows, yet sill provides decent performance. Note if
1697 * chip is not connected to external SRAM, then we still have
1698 * to handle underflow errors (the NOUFLO bit is ignored in
1699 * that case).
1700 */
1701 reg = pcn_bcr_read(sc, LE_BCR18);
1702 switch (sc->sc_variant->pcv_chipid) {
1703 case PARTID_Am79c970:
1704 break;
1705
1706 case PARTID_Am79c970A:
1707 reg |= LE_B18_BREADE|LE_B18_BWRITE;
1708 break;
1709
1710 default:
1711 reg |= LE_B18_BREADE|LE_B18_BWRITE|LE_B18_NOUFLO;
1712 break;
1713 }
1714 pcn_bcr_write(sc, LE_BCR18, reg);
1715
1716 /*
1717 * Initialize CSR80 (FIFO thresholds for Tx and Rx).
1718 */
1719 pcn_csr_write(sc, LE_CSR80, LE_C80_RCVFW(sc->sc_rcvfw) |
1720 LE_C80_XMTSP(sc->sc_xmtsp) | LE_C80_XMTFW(sc->sc_xmtfw));
1721
1722 /*
1723 * Send the init block to the chip, and wait for it
1724 * to be processed.
1725 */
1726 pcn_csr_write(sc, LE_CSR1, PCN_CDINITADDR(sc) & 0xffff);
1727 pcn_csr_write(sc, LE_CSR2, (PCN_CDINITADDR(sc) >> 16) & 0xffff);
1728 pcn_csr_write(sc, LE_CSR0, LE_C0_INIT);
1729 delay(100);
1730 for (i = 0; i < 10000; i++) {
1731 if (pcn_csr_read(sc, LE_CSR0) & LE_C0_IDON)
1732 break;
1733 delay(10);
1734 }
1735 if (i == 10000) {
1736 printf("%s: timeout processing init block\n",
1737 sc->sc_dev.dv_xname);
1738 error = EIO;
1739 goto out;
1740 }
1741
1742 /* Set the media. */
1743 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
1744
1745 /* Enable interrupts and external activity (and ACK IDON). */
1746 pcn_csr_write(sc, LE_CSR0, LE_C0_INEA|LE_C0_STRT|LE_C0_IDON);
1747
1748 if (sc->sc_flags & PCN_F_HAS_MII) {
1749 /* Start the one second MII clock. */
1750 callout_reset(&sc->sc_tick_ch, hz, pcn_tick, sc);
1751 }
1752
1753 /* ...all done! */
1754 ifp->if_flags |= IFF_RUNNING;
1755 ifp->if_flags &= ~IFF_OACTIVE;
1756
1757 out:
1758 if (error)
1759 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1760 return (error);
1761 }
1762
1763 /*
1764 * pcn_rxdrain:
1765 *
1766 * Drain the receive queue.
1767 */
1768 void
1769 pcn_rxdrain(struct pcn_softc *sc)
1770 {
1771 struct pcn_rxsoft *rxs;
1772 int i;
1773
1774 for (i = 0; i < PCN_NRXDESC; i++) {
1775 rxs = &sc->sc_rxsoft[i];
1776 if (rxs->rxs_mbuf == NULL) {
1777 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1778 m_freem(rxs->rxs_mbuf);
1779 rxs->rxs_mbuf = NULL;
1780 }
1781 }
1782 }
1783
1784 /*
1785 * pcn_stop: [ifnet interface function]
1786 *
1787 * Stop transmission on the interface.
1788 */
1789 void
1790 pcn_stop(struct ifnet *ifp, int disable)
1791 {
1792 struct pcn_softc *sc = ifp->if_softc;
1793 struct pcn_txsoft *txs;
1794 int i;
1795
1796 if (sc->sc_flags & PCN_F_HAS_MII) {
1797 /* Stop the one second clock. */
1798 callout_stop(&sc->sc_tick_ch);
1799
1800 /* Down the MII. */
1801 mii_down(&sc->sc_mii);
1802 }
1803
1804 /* Stop the chip. */
1805 pcn_csr_write(sc, LE_CSR0, LE_C0_STOP);
1806
1807 /* Release any queued transmit buffers. */
1808 for (i = 0; i < PCN_TXQUEUELEN; i++) {
1809 txs = &sc->sc_txsoft[i];
1810 if (txs->txs_mbuf != NULL) {
1811 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1812 m_freem(txs->txs_mbuf);
1813 txs->txs_mbuf = NULL;
1814 }
1815 }
1816
1817 if (disable)
1818 pcn_rxdrain(sc);
1819
1820 /* Mark the interface as down and cancel the watchdog timer. */
1821 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1822 ifp->if_timer = 0;
1823 }
1824
1825 /*
1826 * pcn_add_rxbuf:
1827 *
1828 * Add a receive buffer to the indicated descriptor.
1829 */
1830 int
1831 pcn_add_rxbuf(struct pcn_softc *sc, int idx)
1832 {
1833 struct pcn_rxsoft *rxs = &sc->sc_rxsoft[idx];
1834 struct mbuf *m;
1835 int error;
1836
1837 MGETHDR(m, M_DONTWAIT, MT_DATA);
1838 if (m == NULL)
1839 return (ENOBUFS);
1840
1841 MCLGET(m, M_DONTWAIT);
1842 if ((m->m_flags & M_EXT) == 0) {
1843 m_freem(m);
1844 return (ENOBUFS);
1845 }
1846
1847 if (rxs->rxs_mbuf != NULL)
1848 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1849
1850 rxs->rxs_mbuf = m;
1851
1852 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1853 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1854 BUS_DMA_READ|BUS_DMA_NOWAIT);
1855 if (error) {
1856 printf("%s: can't load rx DMA map %d, error = %d\n",
1857 sc->sc_dev.dv_xname, idx, error);
1858 panic("pcn_add_rxbuf");
1859 }
1860
1861 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1862 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1863
1864 PCN_INIT_RXDESC(sc, idx);
1865
1866 return (0);
1867 }
1868
1869 /*
1870 * pcn_set_filter:
1871 *
1872 * Set up the receive filter.
1873 */
1874 void
1875 pcn_set_filter(struct pcn_softc *sc)
1876 {
1877 struct ethercom *ec = &sc->sc_ethercom;
1878 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1879 struct ether_multi *enm;
1880 struct ether_multistep step;
1881 uint32_t crc;
1882
1883 /*
1884 * Set up the multicast address filter by passing all multicast
1885 * addresses through a CRC generator, and then using the high
1886 * order 6 bits as an index into the 64-bit logical address
1887 * filter. The high order bits select the word, while the rest
1888 * of the bits select the bit within the word.
1889 */
1890
1891 if (ifp->if_flags & IFF_PROMISC)
1892 goto allmulti;
1893
1894 sc->sc_initblock.init_ladrf[0] =
1895 sc->sc_initblock.init_ladrf[1] =
1896 sc->sc_initblock.init_ladrf[2] =
1897 sc->sc_initblock.init_ladrf[3] = 0;
1898
1899 ETHER_FIRST_MULTI(step, ec, enm);
1900 while (enm != NULL) {
1901 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1902 /*
1903 * We must listen to a range of multicast addresses.
1904 * For now, just accept all multicasts, rather than
1905 * trying to set only those filter bits needed to match
1906 * the range. (At this time, the only use of address
1907 * ranges is for IP multicast routing, for which the
1908 * range is big enough to require all bits set.)
1909 */
1910 goto allmulti;
1911 }
1912
1913 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1914
1915 /* Just want the 6 most significant bits. */
1916 crc >>= 26;
1917
1918 /* Set the corresponding bit in the filter. */
1919 sc->sc_initblock.init_ladrf[crc >> 4] |= 1 << (crc & 0xf);
1920
1921 ETHER_NEXT_MULTI(step, enm);
1922 }
1923
1924 ifp->if_flags &= ~IFF_ALLMULTI;
1925 return;
1926
1927 allmulti:
1928 ifp->if_flags |= IFF_ALLMULTI;
1929 sc->sc_initblock.init_ladrf[0] =
1930 sc->sc_initblock.init_ladrf[1] =
1931 sc->sc_initblock.init_ladrf[2] =
1932 sc->sc_initblock.init_ladrf[3] = 0xffff;
1933 }
1934
1935 /*
1936 * pcn_79c970_mediainit:
1937 *
1938 * Initialize media for the Am79c970.
1939 */
1940 void
1941 pcn_79c970_mediainit(struct pcn_softc *sc)
1942 {
1943 const char *sep = "";
1944
1945 ifmedia_init(&sc->sc_mii.mii_media, 0, pcn_79c970_mediachange,
1946 pcn_79c970_mediastatus);
1947
1948 #define ADD(s, m, d) \
1949 do { \
1950 printf("%s%s", sep, s); \
1951 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(m), (d), NULL); \
1952 sep = ", "; \
1953 } while (/*CONSTCOND*/0)
1954
1955 printf("%s: ", sc->sc_dev.dv_xname);
1956 ADD("10base5", IFM_10_5, PORTSEL_AUI);
1957 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A)
1958 ADD("10base5-FDX", IFM_10_5|IFM_FDX, PORTSEL_AUI);
1959 ADD("10baseT", IFM_10_T, PORTSEL_10T);
1960 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A)
1961 ADD("10baseT-FDX", IFM_10_T|IFM_FDX, PORTSEL_10T);
1962 ADD("auto", IFM_AUTO, 0);
1963 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A)
1964 ADD("auto", IFM_AUTO|IFM_FDX, 0);
1965 printf("\n");
1966
1967 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1968 }
1969
1970 /*
1971 * pcn_79c970_mediastatus: [ifmedia interface function]
1972 *
1973 * Get the current interface media status (Am79c970 version).
1974 */
1975 void
1976 pcn_79c970_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1977 {
1978 struct pcn_softc *sc = ifp->if_softc;
1979
1980 /*
1981 * The currently selected media is always the active media.
1982 * Note: We have no way to determine what media the AUTO
1983 * process picked.
1984 */
1985 ifmr->ifm_active = sc->sc_mii.mii_media.ifm_media;
1986 }
1987
1988 /*
1989 * pcn_79c970_mediachange: [ifmedia interface function]
1990 *
1991 * Set hardware to newly-selected media (Am79c970 version).
1992 */
1993 int
1994 pcn_79c970_mediachange(struct ifnet *ifp)
1995 {
1996 struct pcn_softc *sc = ifp->if_softc;
1997 uint32_t reg;
1998
1999 if (IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media) == IFM_AUTO) {
2000 /*
2001 * CSR15:PORTSEL doesn't matter. Just set BCR2:ASEL.
2002 */
2003 reg = pcn_bcr_read(sc, LE_BCR2);
2004 reg |= LE_B2_ASEL;
2005 pcn_bcr_write(sc, LE_BCR2, reg);
2006 } else {
2007 /*
2008 * Clear BCR2:ASEL and set the new CSR15:PORTSEL value.
2009 */
2010 reg = pcn_bcr_read(sc, LE_BCR2);
2011 reg &= ~LE_B2_ASEL;
2012 pcn_bcr_write(sc, LE_BCR2, reg);
2013
2014 reg = pcn_csr_read(sc, LE_CSR15);
2015 reg = (reg & ~LE_C15_PORTSEL(PORTSEL_MASK)) |
2016 LE_C15_PORTSEL(sc->sc_mii.mii_media.ifm_cur->ifm_data);
2017 pcn_csr_write(sc, LE_CSR15, reg);
2018 }
2019
2020 if ((sc->sc_mii.mii_media.ifm_media & IFM_FDX) != 0) {
2021 reg = LE_B9_FDEN;
2022 if (IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media) == IFM_10_5)
2023 reg |= LE_B9_AUIFD;
2024 pcn_bcr_write(sc, LE_BCR9, reg);
2025 } else
2026 pcn_bcr_write(sc, LE_BCR0, 0);
2027
2028 return (0);
2029 }
2030
2031 /*
2032 * pcn_79c971_mediainit:
2033 *
2034 * Initialize media for the Am79c971.
2035 */
2036 void
2037 pcn_79c971_mediainit(struct pcn_softc *sc)
2038 {
2039 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2040
2041 /* We have MII. */
2042 sc->sc_flags |= PCN_F_HAS_MII;
2043
2044 /*
2045 * The built-in 10BASE-T interface is mapped to the MII
2046 * on the PCNet-FAST. Unfortunately, there's no EEPROM
2047 * word that tells us which PHY to use. Since the 10BASE-T
2048 * interface is always at PHY 31, we make a note of the
2049 * first PHY that responds, and disallow any PHYs after
2050 * it. This is all handled in the MII read routine.
2051 */
2052 sc->sc_phyaddr = -1;
2053
2054 /* Initialize our media structures and probe the MII. */
2055 sc->sc_mii.mii_ifp = ifp;
2056 sc->sc_mii.mii_readreg = pcn_mii_readreg;
2057 sc->sc_mii.mii_writereg = pcn_mii_writereg;
2058 sc->sc_mii.mii_statchg = pcn_mii_statchg;
2059 ifmedia_init(&sc->sc_mii.mii_media, 0, pcn_79c971_mediachange,
2060 pcn_79c971_mediastatus);
2061
2062 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2063 MII_OFFSET_ANY, 0);
2064 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2065 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2066 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2067 } else
2068 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2069 }
2070
2071 /*
2072 * pcn_79c971_mediastatus: [ifmedia interface function]
2073 *
2074 * Get the current interface media status (Am79c971 version).
2075 */
2076 void
2077 pcn_79c971_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2078 {
2079 struct pcn_softc *sc = ifp->if_softc;
2080
2081 mii_pollstat(&sc->sc_mii);
2082 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2083 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2084 }
2085
2086 /*
2087 * pcn_79c971_mediachange: [ifmedia interface function]
2088 *
2089 * Set hardware to newly-selected media (Am79c971 version).
2090 */
2091 int
2092 pcn_79c971_mediachange(struct ifnet *ifp)
2093 {
2094 struct pcn_softc *sc = ifp->if_softc;
2095
2096 if (ifp->if_flags & IFF_UP)
2097 mii_mediachg(&sc->sc_mii);
2098 return (0);
2099 }
2100
2101 /*
2102 * pcn_mii_readreg: [mii interface function]
2103 *
2104 * Read a PHY register on the MII.
2105 */
2106 int
2107 pcn_mii_readreg(struct device *self, int phy, int reg)
2108 {
2109 struct pcn_softc *sc = (void *) self;
2110 uint32_t rv;
2111
2112 if (sc->sc_phyaddr != -1 && phy != sc->sc_phyaddr)
2113 return (0);
2114
2115 pcn_bcr_write(sc, LE_BCR33, reg | (phy << PHYAD_SHIFT));
2116 rv = pcn_bcr_read(sc, LE_BCR34) & LE_B34_MIIMD;
2117 if (rv == 0xffff)
2118 return (0);
2119
2120 if (sc->sc_phyaddr == -1)
2121 sc->sc_phyaddr = phy;
2122
2123 return (rv);
2124 }
2125
2126 /*
2127 * pcn_mii_writereg: [mii interface function]
2128 *
2129 * Write a PHY register on the MII.
2130 */
2131 void
2132 pcn_mii_writereg(struct device *self, int phy, int reg, int val)
2133 {
2134 struct pcn_softc *sc = (void *) self;
2135
2136 pcn_bcr_write(sc, LE_BCR33, reg | (phy << PHYAD_SHIFT));
2137 pcn_bcr_write(sc, LE_BCR34, val);
2138 }
2139
2140 /*
2141 * pcn_mii_statchg: [mii interface function]
2142 *
2143 * Callback from MII layer when media changes.
2144 */
2145 void
2146 pcn_mii_statchg(struct device *self)
2147 {
2148 struct pcn_softc *sc = (void *) self;
2149
2150 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
2151 pcn_bcr_write(sc, LE_BCR9, LE_B9_FDEN);
2152 else
2153 pcn_bcr_write(sc, LE_BCR0, 0);
2154 }
2155