if_kse.c revision 1.24 1 /* $NetBSD: if_kse.c,v 1.24 2013/03/30 03:21:06 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2006 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Tohru Nishimura.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: if_kse.c,v 1.24 2013/03/30 03:21:06 christos Exp $");
34
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/callout.h>
39 #include <sys/mbuf.h>
40 #include <sys/malloc.h>
41 #include <sys/kernel.h>
42 #include <sys/ioctl.h>
43 #include <sys/errno.h>
44 #include <sys/device.h>
45 #include <sys/queue.h>
46
47 #include <machine/endian.h>
48 #include <sys/bus.h>
49 #include <sys/intr.h>
50
51 #include <net/if.h>
52 #include <net/if_media.h>
53 #include <net/if_dl.h>
54 #include <net/if_ether.h>
55
56 #include <net/bpf.h>
57
58 #include <dev/pci/pcivar.h>
59 #include <dev/pci/pcireg.h>
60 #include <dev/pci/pcidevs.h>
61
62 #define CSR_READ_4(sc, off) \
63 bus_space_read_4(sc->sc_st, sc->sc_sh, off)
64 #define CSR_WRITE_4(sc, off, val) \
65 bus_space_write_4(sc->sc_st, sc->sc_sh, off, val)
66 #define CSR_READ_2(sc, off) \
67 bus_space_read_2(sc->sc_st, sc->sc_sh, off)
68 #define CSR_WRITE_2(sc, off, val) \
69 bus_space_write_2(sc->sc_st, sc->sc_sh, off, val)
70
71 #define MDTXC 0x000 /* DMA transmit control */
72 #define MDRXC 0x004 /* DMA receive control */
73 #define MDTSC 0x008 /* DMA transmit start */
74 #define MDRSC 0x00c /* DMA receive start */
75 #define TDLB 0x010 /* transmit descriptor list base */
76 #define RDLB 0x014 /* receive descriptor list base */
77 #define MTR0 0x020 /* multicast table 31:0 */
78 #define MTR1 0x024 /* multicast table 63:32 */
79 #define INTEN 0x028 /* interrupt enable */
80 #define INTST 0x02c /* interrupt status */
81 #define MARL 0x200 /* MAC address low */
82 #define MARM 0x202 /* MAC address middle */
83 #define MARH 0x204 /* MAC address high */
84 #define GRR 0x216 /* global reset */
85 #define CIDR 0x400 /* chip ID and enable */
86 #define CGCR 0x40a /* chip global control */
87 #define IACR 0x4a0 /* indirect access control */
88 #define IADR1 0x4a2 /* indirect access data 66:63 */
89 #define IADR2 0x4a4 /* indirect access data 47:32 */
90 #define IADR3 0x4a6 /* indirect access data 63:48 */
91 #define IADR4 0x4a8 /* indirect access data 15:0 */
92 #define IADR5 0x4aa /* indirect access data 31:16 */
93 #define P1CR4 0x512 /* port 1 control 4 */
94 #define P1SR 0x514 /* port 1 status */
95 #define P2CR4 0x532 /* port 2 control 4 */
96 #define P2SR 0x534 /* port 2 status */
97
98 #define TXC_BS_MSK 0x3f000000 /* burst size */
99 #define TXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */
100 #define TXC_UCG (1U<<18) /* generate UDP checksum */
101 #define TXC_TCG (1U<<17) /* generate TCP checksum */
102 #define TXC_ICG (1U<<16) /* generate IP checksum */
103 #define TXC_FCE (1U<<9) /* enable flowcontrol */
104 #define TXC_EP (1U<<2) /* enable automatic padding */
105 #define TXC_AC (1U<<1) /* add CRC to frame */
106 #define TXC_TEN (1) /* enable DMA to run */
107
108 #define RXC_BS_MSK 0x3f000000 /* burst size */
109 #define RXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */
110 #define RXC_IHAE (1U<<19) /* IP header alignment enable */
111 #define RXC_UCC (1U<<18) /* run UDP checksum */
112 #define RXC_TCC (1U<<17) /* run TDP checksum */
113 #define RXC_ICC (1U<<16) /* run IP checksum */
114 #define RXC_FCE (1U<<9) /* enable flowcontrol */
115 #define RXC_RB (1U<<6) /* receive broadcast frame */
116 #define RXC_RM (1U<<5) /* receive multicast frame */
117 #define RXC_RU (1U<<4) /* receive unicast frame */
118 #define RXC_RE (1U<<3) /* accept error frame */
119 #define RXC_RA (1U<<2) /* receive all frame */
120 #define RXC_MHTE (1U<<1) /* use multicast hash table */
121 #define RXC_REN (1) /* enable DMA to run */
122
123 #define INT_DMLCS (1U<<31) /* link status change */
124 #define INT_DMTS (1U<<30) /* sending desc. has posted Tx done */
125 #define INT_DMRS (1U<<29) /* frame was received */
126 #define INT_DMRBUS (1U<<27) /* Rx descriptor pool is full */
127
128 #define T0_OWN (1U<<31) /* desc is ready to Tx */
129
130 #define R0_OWN (1U<<31) /* desc is empty */
131 #define R0_FS (1U<<30) /* first segment of frame */
132 #define R0_LS (1U<<29) /* last segment of frame */
133 #define R0_IPE (1U<<28) /* IP checksum error */
134 #define R0_TCPE (1U<<27) /* TCP checksum error */
135 #define R0_UDPE (1U<<26) /* UDP checksum error */
136 #define R0_ES (1U<<25) /* error summary */
137 #define R0_MF (1U<<24) /* multicast frame */
138 #define R0_SPN 0x00300000 /* 21:20 switch port 1/2 */
139 #define R0_ALIGN 0x00300000 /* 21:20 (KSZ8692P) Rx align amount */
140 #define R0_RE (1U<<19) /* MII reported error */
141 #define R0_TL (1U<<18) /* frame too long, beyond 1518 */
142 #define R0_RF (1U<<17) /* damaged runt frame */
143 #define R0_CE (1U<<16) /* CRC error */
144 #define R0_FT (1U<<15) /* frame type */
145 #define R0_FL_MASK 0x7ff /* frame length 10:0 */
146
147 #define T1_IC (1U<<31) /* post interrupt on complete */
148 #define T1_FS (1U<<30) /* first segment of frame */
149 #define T1_LS (1U<<29) /* last segment of frame */
150 #define T1_IPCKG (1U<<28) /* generate IP checksum */
151 #define T1_TCPCKG (1U<<27) /* generate TCP checksum */
152 #define T1_UDPCKG (1U<<26) /* generate UDP checksum */
153 #define T1_TER (1U<<25) /* end of ring */
154 #define T1_SPN 0x00300000 /* 21:20 switch port 1/2 */
155 #define T1_TBS_MASK 0x7ff /* segment size 10:0 */
156
157 #define R1_RER (1U<<25) /* end of ring */
158 #define R1_RBS_MASK 0x7fc /* segment size 10:0 */
159
160 #define KSE_NTXSEGS 16
161 #define KSE_TXQUEUELEN 64
162 #define KSE_TXQUEUELEN_MASK (KSE_TXQUEUELEN - 1)
163 #define KSE_TXQUEUE_GC (KSE_TXQUEUELEN / 4)
164 #define KSE_NTXDESC 256
165 #define KSE_NTXDESC_MASK (KSE_NTXDESC - 1)
166 #define KSE_NEXTTX(x) (((x) + 1) & KSE_NTXDESC_MASK)
167 #define KSE_NEXTTXS(x) (((x) + 1) & KSE_TXQUEUELEN_MASK)
168
169 #define KSE_NRXDESC 64
170 #define KSE_NRXDESC_MASK (KSE_NRXDESC - 1)
171 #define KSE_NEXTRX(x) (((x) + 1) & KSE_NRXDESC_MASK)
172
173 struct tdes {
174 uint32_t t0, t1, t2, t3;
175 };
176
177 struct rdes {
178 uint32_t r0, r1, r2, r3;
179 };
180
181 struct kse_control_data {
182 struct tdes kcd_txdescs[KSE_NTXDESC];
183 struct rdes kcd_rxdescs[KSE_NRXDESC];
184 };
185 #define KSE_CDOFF(x) offsetof(struct kse_control_data, x)
186 #define KSE_CDTXOFF(x) KSE_CDOFF(kcd_txdescs[(x)])
187 #define KSE_CDRXOFF(x) KSE_CDOFF(kcd_rxdescs[(x)])
188
189 struct kse_txsoft {
190 struct mbuf *txs_mbuf; /* head of our mbuf chain */
191 bus_dmamap_t txs_dmamap; /* our DMA map */
192 int txs_firstdesc; /* first descriptor in packet */
193 int txs_lastdesc; /* last descriptor in packet */
194 int txs_ndesc; /* # of descriptors used */
195 };
196
197 struct kse_rxsoft {
198 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
199 bus_dmamap_t rxs_dmamap; /* our DMA map */
200 };
201
202 struct kse_softc {
203 device_t sc_dev; /* generic device information */
204 bus_space_tag_t sc_st; /* bus space tag */
205 bus_space_handle_t sc_sh; /* bus space handle */
206 bus_dma_tag_t sc_dmat; /* bus DMA tag */
207 struct ethercom sc_ethercom; /* Ethernet common data */
208 void *sc_ih; /* interrupt cookie */
209
210 struct ifmedia sc_media; /* ifmedia information */
211 int sc_media_status; /* PHY */
212 int sc_media_active; /* PHY */
213 callout_t sc_callout; /* MII tick callout */
214 callout_t sc_stat_ch; /* statistics counter callout */
215
216 bus_dmamap_t sc_cddmamap; /* control data DMA map */
217 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
218
219 struct kse_control_data *sc_control_data;
220 #define sc_txdescs sc_control_data->kcd_txdescs
221 #define sc_rxdescs sc_control_data->kcd_rxdescs
222
223 struct kse_txsoft sc_txsoft[KSE_TXQUEUELEN];
224 struct kse_rxsoft sc_rxsoft[KSE_NRXDESC];
225 int sc_txfree; /* number of free Tx descriptors */
226 int sc_txnext; /* next ready Tx descriptor */
227 int sc_txsfree; /* number of free Tx jobs */
228 int sc_txsnext; /* next ready Tx job */
229 int sc_txsdirty; /* dirty Tx jobs */
230 int sc_rxptr; /* next ready Rx descriptor/descsoft */
231
232 uint32_t sc_txc, sc_rxc;
233 uint32_t sc_t1csum;
234 int sc_mcsum;
235 uint32_t sc_inten;
236
237 uint32_t sc_chip;
238 uint8_t sc_altmac[16][ETHER_ADDR_LEN];
239 uint16_t sc_vlan[16];
240
241 #ifdef KSE_EVENT_COUNTERS
242 struct ksext {
243 char evcntname[3][8];
244 struct evcnt pev[3][34];
245 } sc_ext; /* switch statistics */
246 #endif
247 };
248
249 #define KSE_CDTXADDR(sc, x) ((sc)->sc_cddma + KSE_CDTXOFF((x)))
250 #define KSE_CDRXADDR(sc, x) ((sc)->sc_cddma + KSE_CDRXOFF((x)))
251
252 #define KSE_CDTXSYNC(sc, x, n, ops) \
253 do { \
254 int __x, __n; \
255 \
256 __x = (x); \
257 __n = (n); \
258 \
259 /* If it will wrap around, sync to the end of the ring. */ \
260 if ((__x + __n) > KSE_NTXDESC) { \
261 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
262 KSE_CDTXOFF(__x), sizeof(struct tdes) * \
263 (KSE_NTXDESC - __x), (ops)); \
264 __n -= (KSE_NTXDESC - __x); \
265 __x = 0; \
266 } \
267 \
268 /* Now sync whatever is left. */ \
269 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
270 KSE_CDTXOFF(__x), sizeof(struct tdes) * __n, (ops)); \
271 } while (/*CONSTCOND*/0)
272
273 #define KSE_CDRXSYNC(sc, x, ops) \
274 do { \
275 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
276 KSE_CDRXOFF((x)), sizeof(struct rdes), (ops)); \
277 } while (/*CONSTCOND*/0)
278
279 #define KSE_INIT_RXDESC(sc, x) \
280 do { \
281 struct kse_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
282 struct rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \
283 struct mbuf *__m = __rxs->rxs_mbuf; \
284 \
285 __m->m_data = __m->m_ext.ext_buf; \
286 __rxd->r2 = __rxs->rxs_dmamap->dm_segs[0].ds_addr; \
287 __rxd->r1 = R1_RBS_MASK /* __m->m_ext.ext_size */; \
288 __rxd->r0 = R0_OWN; \
289 KSE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
290 } while (/*CONSTCOND*/0)
291
292 u_int kse_burstsize = 8; /* DMA burst length tuning knob */
293
294 #ifdef KSEDIAGNOSTIC
295 u_int kse_monitor_rxintr; /* fragmented UDP csum HW bug hook */
296 #endif
297
298 static int kse_match(device_t, cfdata_t, void *);
299 static void kse_attach(device_t, device_t, void *);
300
301 CFATTACH_DECL_NEW(kse, sizeof(struct kse_softc),
302 kse_match, kse_attach, NULL, NULL);
303
304 static int kse_ioctl(struct ifnet *, u_long, void *);
305 static void kse_start(struct ifnet *);
306 static void kse_watchdog(struct ifnet *);
307 static int kse_init(struct ifnet *);
308 static void kse_stop(struct ifnet *, int);
309 static void kse_reset(struct kse_softc *);
310 static void kse_set_filter(struct kse_softc *);
311 static int add_rxbuf(struct kse_softc *, int);
312 static void rxdrain(struct kse_softc *);
313 static int kse_intr(void *);
314 static void rxintr(struct kse_softc *);
315 static void txreap(struct kse_softc *);
316 static void lnkchg(struct kse_softc *);
317 static int ifmedia_upd(struct ifnet *);
318 static void ifmedia_sts(struct ifnet *, struct ifmediareq *);
319 static void phy_tick(void *);
320 static int ifmedia2_upd(struct ifnet *);
321 static void ifmedia2_sts(struct ifnet *, struct ifmediareq *);
322 #ifdef KSE_EVENT_COUNTERS
323 static void stat_tick(void *);
324 static void zerostats(struct kse_softc *);
325 #endif
326
327 static int
328 kse_match(device_t parent, cfdata_t match, void *aux)
329 {
330 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
331
332 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_MICREL &&
333 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8842 ||
334 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8841) &&
335 PCI_CLASS(pa->pa_class) == PCI_CLASS_NETWORK)
336 return 1;
337
338 return 0;
339 }
340
341 static void
342 kse_attach(device_t parent, device_t self, void *aux)
343 {
344 struct kse_softc *sc = device_private(self);
345 struct pci_attach_args *pa = aux;
346 pci_chipset_tag_t pc = pa->pa_pc;
347 pci_intr_handle_t ih;
348 const char *intrstr;
349 struct ifnet *ifp;
350 struct ifmedia *ifm;
351 uint8_t enaddr[ETHER_ADDR_LEN];
352 bus_dma_segment_t seg;
353 int i, p, error, nseg;
354 pcireg_t pmode;
355 int pmreg;
356
357 if (pci_mapreg_map(pa, 0x10,
358 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
359 0, &sc->sc_st, &sc->sc_sh, NULL, NULL) != 0) {
360 printf(": unable to map device registers\n");
361 return;
362 }
363
364 sc->sc_dev = self;
365 sc->sc_dmat = pa->pa_dmat;
366
367 /* Make sure bus mastering is enabled. */
368 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
369 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
370 PCI_COMMAND_MASTER_ENABLE);
371
372 /* Get it out of power save mode, if needed. */
373 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
374 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
375 PCI_PMCSR_STATE_MASK;
376 if (pmode == PCI_PMCSR_STATE_D3) {
377 /*
378 * The card has lost all configuration data in
379 * this state, so punt.
380 */
381 printf("%s: unable to wake from power state D3\n",
382 device_xname(sc->sc_dev));
383 return;
384 }
385 if (pmode != PCI_PMCSR_STATE_D0) {
386 printf("%s: waking up from power date D%d\n",
387 device_xname(sc->sc_dev), pmode);
388 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
389 PCI_PMCSR_STATE_D0);
390 }
391 }
392
393 sc->sc_chip = PCI_PRODUCT(pa->pa_id);
394 printf(": Micrel KSZ%04x Ethernet (rev. 0x%02x)\n",
395 sc->sc_chip, PCI_REVISION(pa->pa_class));
396
397 /*
398 * Read the Ethernet address from the EEPROM.
399 */
400 i = CSR_READ_2(sc, MARL);
401 enaddr[5] = i; enaddr[4] = i >> 8;
402 i = CSR_READ_2(sc, MARM);
403 enaddr[3] = i; enaddr[2] = i >> 8;
404 i = CSR_READ_2(sc, MARH);
405 enaddr[1] = i; enaddr[0] = i >> 8;
406 printf("%s: Ethernet address: %s\n",
407 device_xname(sc->sc_dev), ether_sprintf(enaddr));
408
409 /*
410 * Enable chip function.
411 */
412 CSR_WRITE_2(sc, CIDR, 1);
413
414 /*
415 * Map and establish our interrupt.
416 */
417 if (pci_intr_map(pa, &ih)) {
418 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
419 return;
420 }
421 intrstr = pci_intr_string(pc, ih);
422 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, kse_intr, sc);
423 if (sc->sc_ih == NULL) {
424 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
425 if (intrstr != NULL)
426 aprint_error(" at %s", intrstr);
427 aprint_error("\n");
428 return;
429 }
430 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
431
432 /*
433 * Allocate the control data structures, and create and load the
434 * DMA map for it.
435 */
436 error = bus_dmamem_alloc(sc->sc_dmat,
437 sizeof(struct kse_control_data), PAGE_SIZE, 0, &seg, 1, &nseg, 0);
438 if (error != 0) {
439 aprint_error_dev(sc->sc_dev, "unable to allocate control data, error = %d\n", error);
440 goto fail_0;
441 }
442 error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
443 sizeof(struct kse_control_data), (void **)&sc->sc_control_data,
444 BUS_DMA_COHERENT);
445 if (error != 0) {
446 aprint_error_dev(sc->sc_dev, "unable to map control data, error = %d\n", error);
447 goto fail_1;
448 }
449 error = bus_dmamap_create(sc->sc_dmat,
450 sizeof(struct kse_control_data), 1,
451 sizeof(struct kse_control_data), 0, 0, &sc->sc_cddmamap);
452 if (error != 0) {
453 aprint_error_dev(sc->sc_dev, "unable to create control data DMA map, "
454 "error = %d\n", error);
455 goto fail_2;
456 }
457 error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
458 sc->sc_control_data, sizeof(struct kse_control_data), NULL, 0);
459 if (error != 0) {
460 aprint_error_dev(sc->sc_dev, "unable to load control data DMA map, error = %d\n",
461 error);
462 goto fail_3;
463 }
464 for (i = 0; i < KSE_TXQUEUELEN; i++) {
465 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
466 KSE_NTXSEGS, MCLBYTES, 0, 0,
467 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
468 aprint_error_dev(sc->sc_dev, "unable to create tx DMA map %d, "
469 "error = %d\n", i, error);
470 goto fail_4;
471 }
472 }
473 for (i = 0; i < KSE_NRXDESC; i++) {
474 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
475 1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
476 aprint_error_dev(sc->sc_dev, "unable to create rx DMA map %d, "
477 "error = %d\n", i, error);
478 goto fail_5;
479 }
480 sc->sc_rxsoft[i].rxs_mbuf = NULL;
481 }
482
483 callout_init(&sc->sc_callout, 0);
484 callout_init(&sc->sc_stat_ch, 0);
485
486 ifm = &sc->sc_media;
487 if (sc->sc_chip == 0x8841) {
488 ifmedia_init(ifm, 0, ifmedia_upd, ifmedia_sts);
489 ifmedia_add(ifm, IFM_ETHER|IFM_10_T, 0, NULL);
490 ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
491 ifmedia_add(ifm, IFM_ETHER|IFM_100_TX, 0, NULL);
492 ifmedia_add(ifm, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
493 ifmedia_add(ifm, IFM_ETHER|IFM_AUTO, 0, NULL);
494 ifmedia_set(ifm, IFM_ETHER|IFM_AUTO);
495 }
496 else {
497 ifmedia_init(ifm, 0, ifmedia2_upd, ifmedia2_sts);
498 ifmedia_add(ifm, IFM_ETHER|IFM_AUTO, 0, NULL);
499 ifmedia_set(ifm, IFM_ETHER|IFM_AUTO);
500 }
501
502 printf("%s: 10baseT, 10baseT-FDX, 100baseTX, 100baseTX-FDX, auto\n",
503 device_xname(sc->sc_dev));
504
505 ifp = &sc->sc_ethercom.ec_if;
506 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
507 ifp->if_softc = sc;
508 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
509 ifp->if_ioctl = kse_ioctl;
510 ifp->if_start = kse_start;
511 ifp->if_watchdog = kse_watchdog;
512 ifp->if_init = kse_init;
513 ifp->if_stop = kse_stop;
514 IFQ_SET_READY(&ifp->if_snd);
515
516 /*
517 * KSZ8842 can handle 802.1Q VLAN-sized frames,
518 * can do IPv4, TCPv4, and UDPv4 checksums in hardware.
519 */
520 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
521 ifp->if_capabilities |=
522 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
523 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
524 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
525
526 if_attach(ifp);
527 ether_ifattach(ifp, enaddr);
528
529 p = (sc->sc_chip == 0x8842) ? 3 : 1;
530 #ifdef KSE_EVENT_COUNTERS
531 for (i = 0; i < p; i++) {
532 struct ksext *ee = &sc->sc_ext;
533 sprintf(ee->evcntname[i], "%s.%d", device_xname(sc->sc_dev), i+1);
534 evcnt_attach_dynamic(&ee->pev[i][0], EVCNT_TYPE_MISC,
535 NULL, ee->evcntname[i], "RxLoPriotyByte");
536 evcnt_attach_dynamic(&ee->pev[i][1], EVCNT_TYPE_MISC,
537 NULL, ee->evcntname[i], "RxHiPriotyByte");
538 evcnt_attach_dynamic(&ee->pev[i][2], EVCNT_TYPE_MISC,
539 NULL, ee->evcntname[i], "RxUndersizePkt");
540 evcnt_attach_dynamic(&ee->pev[i][3], EVCNT_TYPE_MISC,
541 NULL, ee->evcntname[i], "RxFragments");
542 evcnt_attach_dynamic(&ee->pev[i][4], EVCNT_TYPE_MISC,
543 NULL, ee->evcntname[i], "RxOversize");
544 evcnt_attach_dynamic(&ee->pev[i][5], EVCNT_TYPE_MISC,
545 NULL, ee->evcntname[i], "RxJabbers");
546 evcnt_attach_dynamic(&ee->pev[i][6], EVCNT_TYPE_MISC,
547 NULL, ee->evcntname[i], "RxSymbolError");
548 evcnt_attach_dynamic(&ee->pev[i][7], EVCNT_TYPE_MISC,
549 NULL, ee->evcntname[i], "RxCRCError");
550 evcnt_attach_dynamic(&ee->pev[i][8], EVCNT_TYPE_MISC,
551 NULL, ee->evcntname[i], "RxAlignmentError");
552 evcnt_attach_dynamic(&ee->pev[i][9], EVCNT_TYPE_MISC,
553 NULL, ee->evcntname[i], "RxControl8808Pkts");
554 evcnt_attach_dynamic(&ee->pev[i][10], EVCNT_TYPE_MISC,
555 NULL, ee->evcntname[i], "RxPausePkts");
556 evcnt_attach_dynamic(&ee->pev[i][11], EVCNT_TYPE_MISC,
557 NULL, ee->evcntname[i], "RxBroadcast");
558 evcnt_attach_dynamic(&ee->pev[i][12], EVCNT_TYPE_MISC,
559 NULL, ee->evcntname[i], "RxMulticast");
560 evcnt_attach_dynamic(&ee->pev[i][13], EVCNT_TYPE_MISC,
561 NULL, ee->evcntname[i], "RxUnicast");
562 evcnt_attach_dynamic(&ee->pev[i][14], EVCNT_TYPE_MISC,
563 NULL, ee->evcntname[i], "Rx64Octets");
564 evcnt_attach_dynamic(&ee->pev[i][15], EVCNT_TYPE_MISC,
565 NULL, ee->evcntname[i], "Rx65To127Octets");
566 evcnt_attach_dynamic(&ee->pev[i][16], EVCNT_TYPE_MISC,
567 NULL, ee->evcntname[i], "Rx128To255Octets");
568 evcnt_attach_dynamic(&ee->pev[i][17], EVCNT_TYPE_MISC,
569 NULL, ee->evcntname[i], "Rx255To511Octets");
570 evcnt_attach_dynamic(&ee->pev[i][18], EVCNT_TYPE_MISC,
571 NULL, ee->evcntname[i], "Rx512To1023Octets");
572 evcnt_attach_dynamic(&ee->pev[i][19], EVCNT_TYPE_MISC,
573 NULL, ee->evcntname[i], "Rx1024To1522Octets");
574 evcnt_attach_dynamic(&ee->pev[i][20], EVCNT_TYPE_MISC,
575 NULL, ee->evcntname[i], "TxLoPriotyByte");
576 evcnt_attach_dynamic(&ee->pev[i][21], EVCNT_TYPE_MISC,
577 NULL, ee->evcntname[i], "TxHiPriotyByte");
578 evcnt_attach_dynamic(&ee->pev[i][22], EVCNT_TYPE_MISC,
579 NULL, ee->evcntname[i], "TxLateCollision");
580 evcnt_attach_dynamic(&ee->pev[i][23], EVCNT_TYPE_MISC,
581 NULL, ee->evcntname[i], "TxPausePkts");
582 evcnt_attach_dynamic(&ee->pev[i][24], EVCNT_TYPE_MISC,
583 NULL, ee->evcntname[i], "TxBroadcastPkts");
584 evcnt_attach_dynamic(&ee->pev[i][25], EVCNT_TYPE_MISC,
585 NULL, ee->evcntname[i], "TxMulticastPkts");
586 evcnt_attach_dynamic(&ee->pev[i][26], EVCNT_TYPE_MISC,
587 NULL, ee->evcntname[i], "TxUnicastPkts");
588 evcnt_attach_dynamic(&ee->pev[i][27], EVCNT_TYPE_MISC,
589 NULL, ee->evcntname[i], "TxDeferred");
590 evcnt_attach_dynamic(&ee->pev[i][28], EVCNT_TYPE_MISC,
591 NULL, ee->evcntname[i], "TxTotalCollision");
592 evcnt_attach_dynamic(&ee->pev[i][29], EVCNT_TYPE_MISC,
593 NULL, ee->evcntname[i], "TxExcessiveCollision");
594 evcnt_attach_dynamic(&ee->pev[i][30], EVCNT_TYPE_MISC,
595 NULL, ee->evcntname[i], "TxSingleCollision");
596 evcnt_attach_dynamic(&ee->pev[i][31], EVCNT_TYPE_MISC,
597 NULL, ee->evcntname[i], "TxMultipleCollision");
598 evcnt_attach_dynamic(&ee->pev[i][32], EVCNT_TYPE_MISC,
599 NULL, ee->evcntname[i], "TxDropPkts");
600 evcnt_attach_dynamic(&ee->pev[i][33], EVCNT_TYPE_MISC,
601 NULL, ee->evcntname[i], "RxDropPkts");
602 }
603 #endif
604 return;
605
606 fail_5:
607 for (i = 0; i < KSE_NRXDESC; i++) {
608 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
609 bus_dmamap_destroy(sc->sc_dmat,
610 sc->sc_rxsoft[i].rxs_dmamap);
611 }
612 fail_4:
613 for (i = 0; i < KSE_TXQUEUELEN; i++) {
614 if (sc->sc_txsoft[i].txs_dmamap != NULL)
615 bus_dmamap_destroy(sc->sc_dmat,
616 sc->sc_txsoft[i].txs_dmamap);
617 }
618 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
619 fail_3:
620 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
621 fail_2:
622 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
623 sizeof(struct kse_control_data));
624 fail_1:
625 bus_dmamem_free(sc->sc_dmat, &seg, nseg);
626 fail_0:
627 return;
628 }
629
630 static int
631 kse_ioctl(struct ifnet *ifp, u_long cmd, void *data)
632 {
633 struct kse_softc *sc = ifp->if_softc;
634 struct ifreq *ifr = (struct ifreq *)data;
635 int s, error;
636
637 s = splnet();
638
639 switch (cmd) {
640 case SIOCSIFMEDIA:
641 case SIOCGIFMEDIA:
642 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
643 break;
644
645 default:
646 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
647 break;
648
649 error = 0;
650
651 if (cmd == SIOCSIFCAP)
652 error = (*ifp->if_init)(ifp);
653 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
654 ;
655 else if (ifp->if_flags & IFF_RUNNING) {
656 /*
657 * Multicast list has changed; set the hardware filter
658 * accordingly.
659 */
660 kse_set_filter(sc);
661 }
662 break;
663 }
664
665 kse_start(ifp);
666
667 splx(s);
668 return error;
669 }
670
671 static int
672 kse_init(struct ifnet *ifp)
673 {
674 struct kse_softc *sc = ifp->if_softc;
675 uint32_t paddr;
676 int i, error = 0;
677
678 /* cancel pending I/O */
679 kse_stop(ifp, 0);
680
681 /* reset all registers but PCI configuration */
682 kse_reset(sc);
683
684 /* craft Tx descriptor ring */
685 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
686 for (i = 0, paddr = KSE_CDTXADDR(sc, 1); i < KSE_NTXDESC - 1; i++) {
687 sc->sc_txdescs[i].t3 = paddr;
688 paddr += sizeof(struct tdes);
689 }
690 sc->sc_txdescs[KSE_NTXDESC - 1].t3 = KSE_CDTXADDR(sc, 0);
691 KSE_CDTXSYNC(sc, 0, KSE_NTXDESC,
692 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
693 sc->sc_txfree = KSE_NTXDESC;
694 sc->sc_txnext = 0;
695
696 for (i = 0; i < KSE_TXQUEUELEN; i++)
697 sc->sc_txsoft[i].txs_mbuf = NULL;
698 sc->sc_txsfree = KSE_TXQUEUELEN;
699 sc->sc_txsnext = 0;
700 sc->sc_txsdirty = 0;
701
702 /* craft Rx descriptor ring */
703 memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs));
704 for (i = 0, paddr = KSE_CDRXADDR(sc, 1); i < KSE_NRXDESC - 1; i++) {
705 sc->sc_rxdescs[i].r3 = paddr;
706 paddr += sizeof(struct rdes);
707 }
708 sc->sc_rxdescs[KSE_NRXDESC - 1].r3 = KSE_CDRXADDR(sc, 0);
709 for (i = 0; i < KSE_NRXDESC; i++) {
710 if (sc->sc_rxsoft[i].rxs_mbuf == NULL) {
711 if ((error = add_rxbuf(sc, i)) != 0) {
712 printf("%s: unable to allocate or map rx "
713 "buffer %d, error = %d\n",
714 device_xname(sc->sc_dev), i, error);
715 rxdrain(sc);
716 goto out;
717 }
718 }
719 else
720 KSE_INIT_RXDESC(sc, i);
721 }
722 sc->sc_rxptr = 0;
723
724 /* hand Tx/Rx rings to HW */
725 CSR_WRITE_4(sc, TDLB, KSE_CDTXADDR(sc, 0));
726 CSR_WRITE_4(sc, RDLB, KSE_CDRXADDR(sc, 0));
727
728 sc->sc_txc = TXC_TEN | TXC_EP | TXC_AC | TXC_FCE;
729 sc->sc_rxc = RXC_REN | RXC_RU | RXC_FCE;
730 if (ifp->if_flags & IFF_PROMISC)
731 sc->sc_rxc |= RXC_RA;
732 if (ifp->if_flags & IFF_BROADCAST)
733 sc->sc_rxc |= RXC_RB;
734 sc->sc_t1csum = sc->sc_mcsum = 0;
735 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) {
736 sc->sc_rxc |= RXC_ICC;
737 sc->sc_mcsum |= M_CSUM_IPv4;
738 }
739 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Tx) {
740 sc->sc_txc |= TXC_ICG;
741 sc->sc_t1csum |= T1_IPCKG;
742 }
743 if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx) {
744 sc->sc_rxc |= RXC_TCC;
745 sc->sc_mcsum |= M_CSUM_TCPv4;
746 }
747 if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Tx) {
748 sc->sc_txc |= TXC_TCG;
749 sc->sc_t1csum |= T1_TCPCKG;
750 }
751 if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx) {
752 sc->sc_rxc |= RXC_UCC;
753 sc->sc_mcsum |= M_CSUM_UDPv4;
754 }
755 if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Tx) {
756 sc->sc_txc |= TXC_UCG;
757 sc->sc_t1csum |= T1_UDPCKG;
758 }
759 sc->sc_txc |= (kse_burstsize << TXC_BS_SFT);
760 sc->sc_rxc |= (kse_burstsize << RXC_BS_SFT);
761
762 /* build multicast hash filter if necessary */
763 kse_set_filter(sc);
764
765 /* set current media */
766 (void)ifmedia_upd(ifp);
767
768 /* enable transmitter and receiver */
769 CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
770 CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
771 CSR_WRITE_4(sc, MDRSC, 1);
772
773 /* enable interrupts */
774 sc->sc_inten = INT_DMTS|INT_DMRS|INT_DMRBUS;
775 if (sc->sc_chip == 0x8841)
776 sc->sc_inten |= INT_DMLCS;
777 CSR_WRITE_4(sc, INTST, ~0);
778 CSR_WRITE_4(sc, INTEN, sc->sc_inten);
779
780 ifp->if_flags |= IFF_RUNNING;
781 ifp->if_flags &= ~IFF_OACTIVE;
782
783 if (sc->sc_chip == 0x8841) {
784 /* start one second timer */
785 callout_reset(&sc->sc_callout, hz, phy_tick, sc);
786 }
787 #ifdef KSE_EVENT_COUNTERS
788 /* start statistics gather 1 minute timer */
789 zerostats(sc);
790 callout_reset(&sc->sc_stat_ch, hz * 60, stat_tick, sc);
791 #endif
792
793 out:
794 if (error) {
795 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
796 ifp->if_timer = 0;
797 printf("%s: interface not running\n", device_xname(sc->sc_dev));
798 }
799 return error;
800 }
801
802 static void
803 kse_stop(struct ifnet *ifp, int disable)
804 {
805 struct kse_softc *sc = ifp->if_softc;
806 struct kse_txsoft *txs;
807 int i;
808
809 if (sc->sc_chip == 0x8841)
810 callout_stop(&sc->sc_callout);
811 callout_stop(&sc->sc_stat_ch);
812
813 sc->sc_txc &= ~TXC_TEN;
814 sc->sc_rxc &= ~RXC_REN;
815 CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
816 CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
817
818 for (i = 0; i < KSE_TXQUEUELEN; i++) {
819 txs = &sc->sc_txsoft[i];
820 if (txs->txs_mbuf != NULL) {
821 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
822 m_freem(txs->txs_mbuf);
823 txs->txs_mbuf = NULL;
824 }
825 }
826
827 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
828 ifp->if_timer = 0;
829
830 if (disable)
831 rxdrain(sc);
832 }
833
834 static void
835 kse_reset(struct kse_softc *sc)
836 {
837
838 CSR_WRITE_2(sc, GRR, 1);
839 delay(1000); /* PDF does not mention the delay amount */
840 CSR_WRITE_2(sc, GRR, 0);
841
842 CSR_WRITE_2(sc, CIDR, 1);
843 }
844
845 static void
846 kse_watchdog(struct ifnet *ifp)
847 {
848 struct kse_softc *sc = ifp->if_softc;
849
850 /*
851 * Since we're not interrupting every packet, sweep
852 * up before we report an error.
853 */
854 txreap(sc);
855
856 if (sc->sc_txfree != KSE_NTXDESC) {
857 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
858 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
859 sc->sc_txnext);
860 ifp->if_oerrors++;
861
862 /* Reset the interface. */
863 kse_init(ifp);
864 }
865 else if (ifp->if_flags & IFF_DEBUG)
866 printf("%s: recovered from device timeout\n",
867 device_xname(sc->sc_dev));
868
869 /* Try to get more packets going. */
870 kse_start(ifp);
871 }
872
873 static void
874 kse_start(struct ifnet *ifp)
875 {
876 struct kse_softc *sc = ifp->if_softc;
877 struct mbuf *m0, *m;
878 struct kse_txsoft *txs;
879 bus_dmamap_t dmamap;
880 int error, nexttx, lasttx, ofree, seg;
881 uint32_t tdes0;
882
883 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
884 return;
885
886 /*
887 * Remember the previous number of free descriptors.
888 */
889 ofree = sc->sc_txfree;
890
891 /*
892 * Loop through the send queue, setting up transmit descriptors
893 * until we drain the queue, or use up all available transmit
894 * descriptors.
895 */
896 for (;;) {
897 IFQ_POLL(&ifp->if_snd, m0);
898 if (m0 == NULL)
899 break;
900
901 if (sc->sc_txsfree < KSE_TXQUEUE_GC) {
902 txreap(sc);
903 if (sc->sc_txsfree == 0)
904 break;
905 }
906 txs = &sc->sc_txsoft[sc->sc_txsnext];
907 dmamap = txs->txs_dmamap;
908
909 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
910 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
911 if (error) {
912 if (error == EFBIG) {
913 printf("%s: Tx packet consumes too many "
914 "DMA segments, dropping...\n",
915 device_xname(sc->sc_dev));
916 IFQ_DEQUEUE(&ifp->if_snd, m0);
917 m_freem(m0);
918 continue;
919 }
920 /* Short on resources, just stop for now. */
921 break;
922 }
923
924 if (dmamap->dm_nsegs > sc->sc_txfree) {
925 /*
926 * Not enough free descriptors to transmit this
927 * packet. We haven't committed anything yet,
928 * so just unload the DMA map, put the packet
929 * back on the queue, and punt. Notify the upper
930 * layer that there are not more slots left.
931 */
932 ifp->if_flags |= IFF_OACTIVE;
933 bus_dmamap_unload(sc->sc_dmat, dmamap);
934 break;
935 }
936
937 IFQ_DEQUEUE(&ifp->if_snd, m0);
938
939 /*
940 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
941 */
942
943 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
944 BUS_DMASYNC_PREWRITE);
945
946 lasttx = -1; tdes0 = 0;
947 for (nexttx = sc->sc_txnext, seg = 0;
948 seg < dmamap->dm_nsegs;
949 seg++, nexttx = KSE_NEXTTX(nexttx)) {
950 struct tdes *tdes = &sc->sc_txdescs[nexttx];
951 /*
952 * If this is the first descriptor we're
953 * enqueueing, don't set the OWN bit just
954 * yet. That could cause a race condition.
955 * We'll do it below.
956 */
957 tdes->t2 = dmamap->dm_segs[seg].ds_addr;
958 tdes->t1 = sc->sc_t1csum
959 | (dmamap->dm_segs[seg].ds_len & T1_TBS_MASK);
960 tdes->t0 = tdes0;
961 tdes0 |= T0_OWN;
962 lasttx = nexttx;
963 }
964
965 /*
966 * Outgoing NFS mbuf must be unloaded when Tx completed.
967 * Without T1_IC NFS mbuf is left unack'ed for excessive
968 * time and NFS stops to proceed until kse_watchdog()
969 * calls txreap() to reclaim the unack'ed mbuf.
970 * It's painful to traverse every mbuf chain to determine
971 * whether someone is waiting for Tx completion.
972 */
973 m = m0;
974 do {
975 if ((m->m_flags & M_EXT) && m->m_ext.ext_free) {
976 sc->sc_txdescs[lasttx].t1 |= T1_IC;
977 break;
978 }
979 } while ((m = m->m_next) != NULL);
980
981 /* write last T0_OWN bit of the 1st segment */
982 sc->sc_txdescs[lasttx].t1 |= T1_LS;
983 sc->sc_txdescs[sc->sc_txnext].t1 |= T1_FS;
984 sc->sc_txdescs[sc->sc_txnext].t0 = T0_OWN;
985 KSE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
986 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
987
988 /* tell DMA start transmit */
989 CSR_WRITE_4(sc, MDTSC, 1);
990
991 txs->txs_mbuf = m0;
992 txs->txs_firstdesc = sc->sc_txnext;
993 txs->txs_lastdesc = lasttx;
994 txs->txs_ndesc = dmamap->dm_nsegs;
995
996 sc->sc_txfree -= txs->txs_ndesc;
997 sc->sc_txnext = nexttx;
998 sc->sc_txsfree--;
999 sc->sc_txsnext = KSE_NEXTTXS(sc->sc_txsnext);
1000 /*
1001 * Pass the packet to any BPF listeners.
1002 */
1003 bpf_mtap(ifp, m0);
1004 }
1005
1006 if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) {
1007 /* No more slots left; notify upper layer. */
1008 ifp->if_flags |= IFF_OACTIVE;
1009 }
1010 if (sc->sc_txfree != ofree) {
1011 /* Set a watchdog timer in case the chip flakes out. */
1012 ifp->if_timer = 5;
1013 }
1014 }
1015
1016 static void
1017 kse_set_filter(struct kse_softc *sc)
1018 {
1019 struct ether_multistep step;
1020 struct ether_multi *enm;
1021 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1022 uint32_t h, hashes[2];
1023
1024 sc->sc_rxc &= ~(RXC_MHTE | RXC_RM);
1025 ifp->if_flags &= ~IFF_ALLMULTI;
1026 if (ifp->if_flags & IFF_PROMISC)
1027 return;
1028
1029 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
1030 if (enm == NULL)
1031 return;
1032 hashes[0] = hashes[1] = 0;
1033 do {
1034 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1035 /*
1036 * We must listen to a range of multicast addresses.
1037 * For now, just accept all multicasts, rather than
1038 * trying to set only those filter bits needed to match
1039 * the range. (At this time, the only use of address
1040 * ranges is for IP multicast routing, for which the
1041 * range is big enough to require all bits set.)
1042 */
1043 goto allmulti;
1044 }
1045 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
1046 hashes[h >> 5] |= 1 << (h & 0x1f);
1047 ETHER_NEXT_MULTI(step, enm);
1048 } while (enm != NULL);
1049 sc->sc_rxc |= RXC_MHTE;
1050 CSR_WRITE_4(sc, MTR0, hashes[0]);
1051 CSR_WRITE_4(sc, MTR1, hashes[1]);
1052 return;
1053 allmulti:
1054 sc->sc_rxc |= RXC_RM;
1055 ifp->if_flags |= IFF_ALLMULTI;
1056 }
1057
1058 static int
1059 add_rxbuf(struct kse_softc *sc, int idx)
1060 {
1061 struct kse_rxsoft *rxs = &sc->sc_rxsoft[idx];
1062 struct mbuf *m;
1063 int error;
1064
1065 MGETHDR(m, M_DONTWAIT, MT_DATA);
1066 if (m == NULL)
1067 return ENOBUFS;
1068
1069 MCLGET(m, M_DONTWAIT);
1070 if ((m->m_flags & M_EXT) == 0) {
1071 m_freem(m);
1072 return ENOBUFS;
1073 }
1074
1075 if (rxs->rxs_mbuf != NULL)
1076 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1077
1078 rxs->rxs_mbuf = m;
1079
1080 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1081 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1082 if (error) {
1083 printf("%s: can't load rx DMA map %d, error = %d\n",
1084 device_xname(sc->sc_dev), idx, error);
1085 panic("kse_add_rxbuf");
1086 }
1087
1088 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1089 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1090
1091 KSE_INIT_RXDESC(sc, idx);
1092
1093 return 0;
1094 }
1095
1096 static void
1097 rxdrain(struct kse_softc *sc)
1098 {
1099 struct kse_rxsoft *rxs;
1100 int i;
1101
1102 for (i = 0; i < KSE_NRXDESC; i++) {
1103 rxs = &sc->sc_rxsoft[i];
1104 if (rxs->rxs_mbuf != NULL) {
1105 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1106 m_freem(rxs->rxs_mbuf);
1107 rxs->rxs_mbuf = NULL;
1108 }
1109 }
1110 }
1111
1112 static int
1113 kse_intr(void *arg)
1114 {
1115 struct kse_softc *sc = arg;
1116 uint32_t isr;
1117
1118 if ((isr = CSR_READ_4(sc, INTST)) == 0)
1119 return 0;
1120
1121 if (isr & INT_DMRS)
1122 rxintr(sc);
1123 if (isr & INT_DMTS)
1124 txreap(sc);
1125 if (isr & INT_DMLCS)
1126 lnkchg(sc);
1127 if (isr & INT_DMRBUS)
1128 printf("%s: Rx descriptor full\n", device_xname(sc->sc_dev));
1129
1130 CSR_WRITE_4(sc, INTST, isr);
1131 return 1;
1132 }
1133
1134 static void
1135 rxintr(struct kse_softc *sc)
1136 {
1137 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1138 struct kse_rxsoft *rxs;
1139 struct mbuf *m;
1140 uint32_t rxstat;
1141 int i, len;
1142
1143 for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = KSE_NEXTRX(i)) {
1144 rxs = &sc->sc_rxsoft[i];
1145
1146 KSE_CDRXSYNC(sc, i,
1147 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1148
1149 rxstat = sc->sc_rxdescs[i].r0;
1150
1151 if (rxstat & R0_OWN) /* desc is left empty */
1152 break;
1153
1154 /* R0_FS|R0_LS must have been marked for this desc */
1155
1156 if (rxstat & R0_ES) {
1157 ifp->if_ierrors++;
1158 #define PRINTERR(bit, str) \
1159 if (rxstat & (bit)) \
1160 printf("%s: receive error: %s\n", \
1161 device_xname(sc->sc_dev), str)
1162 PRINTERR(R0_TL, "frame too long");
1163 PRINTERR(R0_RF, "runt frame");
1164 PRINTERR(R0_CE, "bad FCS");
1165 #undef PRINTERR
1166 KSE_INIT_RXDESC(sc, i);
1167 continue;
1168 }
1169
1170 /* HW errata; frame might be too small or too large */
1171
1172 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1173 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1174
1175 len = rxstat & R0_FL_MASK;
1176 len -= ETHER_CRC_LEN; /* trim CRC off */
1177 m = rxs->rxs_mbuf;
1178
1179 if (add_rxbuf(sc, i) != 0) {
1180 ifp->if_ierrors++;
1181 KSE_INIT_RXDESC(sc, i);
1182 bus_dmamap_sync(sc->sc_dmat,
1183 rxs->rxs_dmamap, 0,
1184 rxs->rxs_dmamap->dm_mapsize,
1185 BUS_DMASYNC_PREREAD);
1186 continue;
1187 }
1188
1189 ifp->if_ipackets++;
1190 m->m_pkthdr.rcvif = ifp;
1191 m->m_pkthdr.len = m->m_len = len;
1192
1193 if (sc->sc_mcsum) {
1194 m->m_pkthdr.csum_flags |= sc->sc_mcsum;
1195 if (rxstat & R0_IPE)
1196 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1197 if (rxstat & (R0_TCPE | R0_UDPE))
1198 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1199 }
1200 bpf_mtap(ifp, m);
1201 (*ifp->if_input)(ifp, m);
1202 #ifdef KSEDIAGNOSTIC
1203 if (kse_monitor_rxintr > 0) {
1204 printf("m stat %x data %p len %d\n",
1205 rxstat, m->m_data, m->m_len);
1206 }
1207 #endif
1208 }
1209 sc->sc_rxptr = i;
1210 }
1211
1212 static void
1213 txreap(struct kse_softc *sc)
1214 {
1215 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1216 struct kse_txsoft *txs;
1217 uint32_t txstat;
1218 int i;
1219
1220 ifp->if_flags &= ~IFF_OACTIVE;
1221
1222 for (i = sc->sc_txsdirty; sc->sc_txsfree != KSE_TXQUEUELEN;
1223 i = KSE_NEXTTXS(i), sc->sc_txsfree++) {
1224 txs = &sc->sc_txsoft[i];
1225
1226 KSE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
1227 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1228
1229 txstat = sc->sc_txdescs[txs->txs_lastdesc].t0;
1230
1231 if (txstat & T0_OWN) /* desc is still in use */
1232 break;
1233
1234 /* there is no way to tell transmission status per frame */
1235
1236 ifp->if_opackets++;
1237
1238 sc->sc_txfree += txs->txs_ndesc;
1239 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1240 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1241 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1242 m_freem(txs->txs_mbuf);
1243 txs->txs_mbuf = NULL;
1244 }
1245 sc->sc_txsdirty = i;
1246 if (sc->sc_txsfree == KSE_TXQUEUELEN)
1247 ifp->if_timer = 0;
1248 }
1249
1250 static void
1251 lnkchg(struct kse_softc *sc)
1252 {
1253 struct ifmediareq ifmr;
1254
1255 #if 0 /* rambling link status */
1256 printf("%s: link %s\n", device_xname(sc->sc_dev),
1257 (CSR_READ_2(sc, P1SR) & (1U << 5)) ? "up" : "down");
1258 #endif
1259 ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr);
1260 }
1261
1262 static int
1263 ifmedia_upd(struct ifnet *ifp)
1264 {
1265 struct kse_softc *sc = ifp->if_softc;
1266 struct ifmedia *ifm = &sc->sc_media;
1267 uint16_t ctl;
1268
1269 ctl = 0;
1270 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
1271 ctl |= (1U << 13); /* restart AN */
1272 ctl |= (1U << 7); /* enable AN */
1273 ctl |= (1U << 4); /* advertise flow control pause */
1274 ctl |= (1U << 3) | (1U << 2) | (1U << 1) | (1U << 0);
1275 }
1276 else {
1277 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX)
1278 ctl |= (1U << 6);
1279 if (ifm->ifm_media & IFM_FDX)
1280 ctl |= (1U << 5);
1281 }
1282 CSR_WRITE_2(sc, P1CR4, ctl);
1283
1284 sc->sc_media_active = IFM_NONE;
1285 sc->sc_media_status = IFM_AVALID;
1286
1287 return 0;
1288 }
1289
1290 static void
1291 ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1292 {
1293 struct kse_softc *sc = ifp->if_softc;
1294 struct ifmedia *ifm = &sc->sc_media;
1295 uint16_t ctl, sts, result;
1296
1297 ifmr->ifm_status = IFM_AVALID;
1298 ifmr->ifm_active = IFM_ETHER;
1299
1300 ctl = CSR_READ_2(sc, P1CR4);
1301 sts = CSR_READ_2(sc, P1SR);
1302 if ((sts & (1U << 5)) == 0) {
1303 ifmr->ifm_active |= IFM_NONE;
1304 goto out; /* link is down */
1305 }
1306 ifmr->ifm_status |= IFM_ACTIVE;
1307 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
1308 if ((sts & (1U << 6)) == 0) {
1309 ifmr->ifm_active |= IFM_NONE;
1310 goto out; /* negotiation in progress */
1311 }
1312 result = ctl & sts & 017;
1313 if (result & (1U << 3))
1314 ifmr->ifm_active |= IFM_100_TX|IFM_FDX;
1315 else if (result & (1U << 2))
1316 ifmr->ifm_active |= IFM_100_TX;
1317 else if (result & (1U << 1))
1318 ifmr->ifm_active |= IFM_10_T|IFM_FDX;
1319 else if (result & (1U << 0))
1320 ifmr->ifm_active |= IFM_10_T;
1321 else
1322 ifmr->ifm_active |= IFM_NONE;
1323 if (ctl & (1U << 4))
1324 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
1325 if (sts & (1U << 4))
1326 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
1327 }
1328 else {
1329 ifmr->ifm_active |= (sts & (1U << 10)) ? IFM_100_TX : IFM_10_T;
1330 if (sts & (1U << 9))
1331 ifmr->ifm_active |= IFM_FDX;
1332 if (sts & (1U << 12))
1333 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
1334 if (sts & (1U << 11))
1335 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
1336 }
1337
1338 out:
1339 sc->sc_media_status = ifmr->ifm_status;
1340 sc->sc_media_active = ifmr->ifm_active;
1341 }
1342
1343 static void
1344 phy_tick(void *arg)
1345 {
1346 struct kse_softc *sc = arg;
1347 struct ifmediareq ifmr;
1348 int s;
1349
1350 s = splnet();
1351 ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr);
1352 splx(s);
1353
1354 callout_reset(&sc->sc_callout, hz, phy_tick, sc);
1355 }
1356
1357 static int
1358 ifmedia2_upd(struct ifnet *ifp)
1359 {
1360 struct kse_softc *sc = ifp->if_softc;
1361
1362 sc->sc_media_status = IFM_AVALID;
1363 sc->sc_media_active = IFM_NONE;
1364 return 0;
1365 }
1366
1367 static void
1368 ifmedia2_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1369 {
1370 struct kse_softc *sc = ifp->if_softc;
1371 int p1sts, p2sts;
1372
1373 ifmr->ifm_status = IFM_AVALID;
1374 ifmr->ifm_active = IFM_ETHER;
1375 p1sts = CSR_READ_2(sc, P1SR);
1376 p2sts = CSR_READ_2(sc, P2SR);
1377 if (((p1sts | p2sts) & (1U << 5)) == 0)
1378 ifmr->ifm_active |= IFM_NONE;
1379 else {
1380 ifmr->ifm_status |= IFM_ACTIVE;
1381 ifmr->ifm_active |= IFM_100_TX|IFM_FDX;
1382 ifmr->ifm_active |= IFM_FLOW|IFM_ETH_RXPAUSE|IFM_ETH_TXPAUSE;
1383 }
1384 sc->sc_media_status = ifmr->ifm_status;
1385 sc->sc_media_active = ifmr->ifm_active;
1386 }
1387
1388 #ifdef KSE_EVENT_COUNTERS
1389 static void
1390 stat_tick(void *arg)
1391 {
1392 struct kse_softc *sc = arg;
1393 struct ksext *ee = &sc->sc_ext;
1394 int nport, p, i, val;
1395
1396 nport = (sc->sc_chip == 0x8842) ? 3 : 1;
1397 for (p = 0; p < nport; p++) {
1398 for (i = 0; i < 32; i++) {
1399 val = 0x1c00 | (p * 0x20 + i);
1400 CSR_WRITE_2(sc, IACR, val);
1401 do {
1402 val = CSR_READ_2(sc, IADR5) << 16;
1403 } while ((val & (1U << 30)) == 0);
1404 if (val & (1U << 31)) {
1405 (void)CSR_READ_2(sc, IADR4);
1406 val = 0x3fffffff; /* has made overflow */
1407 }
1408 else {
1409 val &= 0x3fff0000; /* 29:16 */
1410 val |= CSR_READ_2(sc, IADR4); /* 15:0 */
1411 }
1412 ee->pev[p][i].ev_count += val; /* i (0-31) */
1413 }
1414 CSR_WRITE_2(sc, IACR, 0x1c00 + 0x100 + p);
1415 ee->pev[p][32].ev_count = CSR_READ_2(sc, IADR4); /* 32 */
1416 CSR_WRITE_2(sc, IACR, 0x1c00 + 0x100 + p * 3 + 1);
1417 ee->pev[p][33].ev_count = CSR_READ_2(sc, IADR4); /* 33 */
1418 }
1419 callout_reset(&sc->sc_stat_ch, hz * 60, stat_tick, arg);
1420 }
1421
1422 static void
1423 zerostats(struct kse_softc *sc)
1424 {
1425 struct ksext *ee = &sc->sc_ext;
1426 int nport, p, i, val;
1427
1428 /* make sure all the HW counters get zero */
1429 nport = (sc->sc_chip == 0x8842) ? 3 : 1;
1430 for (p = 0; p < nport; p++) {
1431 for (i = 0; i < 31; i++) {
1432 val = 0x1c00 | (p * 0x20 + i);
1433 CSR_WRITE_2(sc, IACR, val);
1434 do {
1435 val = CSR_READ_2(sc, IADR5) << 16;
1436 } while ((val & (1U << 30)) == 0);
1437 (void)CSR_READ_2(sc, IADR4);
1438 ee->pev[p][i].ev_count = 0;
1439 }
1440 }
1441 }
1442 #endif
1443