if_kse.c revision 1.59 1 1.59 thorpej /* $NetBSD: if_kse.c,v 1.59 2022/09/24 18:12:42 thorpej Exp $ */
2 1.1 nisimura
3 1.15 nisimura /*-
4 1.15 nisimura * Copyright (c) 2006 The NetBSD Foundation, Inc.
5 1.15 nisimura * All rights reserved.
6 1.15 nisimura *
7 1.15 nisimura * This code is derived from software contributed to The NetBSD Foundation
8 1.15 nisimura * by Tohru Nishimura.
9 1.1 nisimura *
10 1.1 nisimura * Redistribution and use in source and binary forms, with or without
11 1.1 nisimura * modification, are permitted provided that the following conditions
12 1.1 nisimura * are met:
13 1.1 nisimura * 1. Redistributions of source code must retain the above copyright
14 1.1 nisimura * notice, this list of conditions and the following disclaimer.
15 1.1 nisimura * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 nisimura * notice, this list of conditions and the following disclaimer in the
17 1.1 nisimura * documentation and/or other materials provided with the distribution.
18 1.1 nisimura *
19 1.15 nisimura * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.15 nisimura * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.15 nisimura * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.15 nisimura * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.15 nisimura * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.15 nisimura * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.15 nisimura * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.15 nisimura * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.15 nisimura * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.15 nisimura * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.15 nisimura * POSSIBILITY OF SUCH DAMAGE.
30 1.1 nisimura */
31 1.1 nisimura
32 1.41 nisimura /*
33 1.42 nisimura * Micrel 8841/8842 10/100 PCI ethernet driver
34 1.41 nisimura */
35 1.41 nisimura
36 1.1 nisimura #include <sys/cdefs.h>
37 1.59 thorpej __KERNEL_RCSID(0, "$NetBSD: if_kse.c,v 1.59 2022/09/24 18:12:42 thorpej Exp $");
38 1.1 nisimura
39 1.1 nisimura #include <sys/param.h>
40 1.51 nisimura #include <sys/bus.h>
41 1.51 nisimura #include <sys/intr.h>
42 1.51 nisimura #include <sys/device.h>
43 1.1 nisimura #include <sys/callout.h>
44 1.51 nisimura #include <sys/ioctl.h>
45 1.56 nisimura #include <sys/mbuf.h>
46 1.56 nisimura #include <sys/rndsource.h>
47 1.51 nisimura #include <sys/errno.h>
48 1.51 nisimura #include <sys/systm.h>
49 1.1 nisimura #include <sys/kernel.h>
50 1.1 nisimura
51 1.1 nisimura #include <net/if.h>
52 1.1 nisimura #include <net/if_media.h>
53 1.1 nisimura #include <net/if_dl.h>
54 1.1 nisimura #include <net/if_ether.h>
55 1.42 nisimura #include <dev/mii/mii.h>
56 1.42 nisimura #include <dev/mii/miivar.h>
57 1.1 nisimura #include <net/bpf.h>
58 1.1 nisimura
59 1.1 nisimura #include <dev/pci/pcivar.h>
60 1.1 nisimura #include <dev/pci/pcireg.h>
61 1.1 nisimura #include <dev/pci/pcidevs.h>
62 1.1 nisimura
63 1.47 nisimura #define KSE_LINKDEBUG 0
64 1.39 nisimura
65 1.1 nisimura #define CSR_READ_4(sc, off) \
66 1.49 nisimura bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (off))
67 1.1 nisimura #define CSR_WRITE_4(sc, off, val) \
68 1.49 nisimura bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (off), (val))
69 1.1 nisimura #define CSR_READ_2(sc, off) \
70 1.42 nisimura bus_space_read_2((sc)->sc_st, (sc)->sc_sh, (off))
71 1.1 nisimura #define CSR_WRITE_2(sc, off, val) \
72 1.42 nisimura bus_space_write_2((sc)->sc_st, (sc)->sc_sh, (off), (val))
73 1.1 nisimura
74 1.56 nisimura #define MDTXC 0x000 /* DMA transmit control */
75 1.56 nisimura #define MDRXC 0x004 /* DMA receive control */
76 1.56 nisimura #define MDTSC 0x008 /* trigger DMA transmit (SC) */
77 1.56 nisimura #define MDRSC 0x00c /* trigger DMA receive (SC) */
78 1.56 nisimura #define TDLB 0x010 /* transmit descriptor list base */
79 1.56 nisimura #define RDLB 0x014 /* receive descriptor list base */
80 1.56 nisimura #define MTR0 0x020 /* multicast table 31:0 */
81 1.56 nisimura #define MTR1 0x024 /* multicast table 63:32 */
82 1.56 nisimura #define INTEN 0x028 /* interrupt enable */
83 1.56 nisimura #define INTST 0x02c /* interrupt status */
84 1.56 nisimura #define MAAL0 0x080 /* additional MAC address 0 low */
85 1.56 nisimura #define MAAH0 0x084 /* additional MAC address 0 high */
86 1.56 nisimura #define MARL 0x200 /* MAC address low */
87 1.56 nisimura #define MARM 0x202 /* MAC address middle */
88 1.56 nisimura #define MARH 0x204 /* MAC address high */
89 1.56 nisimura #define GRR 0x216 /* global reset */
90 1.56 nisimura #define SIDER 0x400 /* switch ID and function enable */
91 1.56 nisimura #define SGCR3 0x406 /* switch function control 3 */
92 1.56 nisimura #define CR3_USEHDX (1U<<6) /* use half-duplex 8842 host port */
93 1.56 nisimura #define CR3_USEFC (1U<<5) /* use flowcontrol 8842 host port */
94 1.56 nisimura #define IACR 0x4a0 /* indirect access control */
95 1.56 nisimura #define IADR1 0x4a2 /* indirect access data 66:63 */
96 1.56 nisimura #define IADR2 0x4a4 /* indirect access data 47:32 */
97 1.56 nisimura #define IADR3 0x4a6 /* indirect access data 63:48 */
98 1.56 nisimura #define IADR4 0x4a8 /* indirect access data 15:0 */
99 1.56 nisimura #define IADR5 0x4aa /* indirect access data 31:16 */
100 1.56 nisimura #define IADR_LATCH (1U<<30) /* latch completed indication */
101 1.56 nisimura #define IADR_OVF (1U<<31) /* overflow detected */
102 1.56 nisimura #define P1CR4 0x512 /* port 1 control 4 */
103 1.56 nisimura #define P1SR 0x514 /* port 1 status */
104 1.56 nisimura #define P2CR4 0x532 /* port 2 control 4 */
105 1.56 nisimura #define P2SR 0x534 /* port 2 status */
106 1.42 nisimura #define PxCR_STARTNEG (1U<<9) /* restart auto negotiation */
107 1.42 nisimura #define PxCR_AUTOEN (1U<<7) /* auto negotiation enable */
108 1.42 nisimura #define PxCR_SPD100 (1U<<6) /* force speed 100 */
109 1.42 nisimura #define PxCR_USEFDX (1U<<5) /* force full duplex */
110 1.42 nisimura #define PxCR_USEFC (1U<<4) /* advertise pause flow control */
111 1.42 nisimura #define PxSR_ACOMP (1U<<6) /* auto negotiation completed */
112 1.42 nisimura #define PxSR_SPD100 (1U<<10) /* speed is 100Mbps */
113 1.42 nisimura #define PxSR_FDX (1U<<9) /* full duplex */
114 1.42 nisimura #define PxSR_LINKUP (1U<<5) /* link is good */
115 1.42 nisimura #define PxSR_RXFLOW (1U<<12) /* receive flow control active */
116 1.42 nisimura #define PxSR_TXFLOW (1U<<11) /* transmit flow control active */
117 1.56 nisimura #define P1VIDCR 0x504 /* port 1 vtag */
118 1.56 nisimura #define P2VIDCR 0x524 /* port 2 vtag */
119 1.56 nisimura #define P3VIDCR 0x544 /* 8842 host vtag */
120 1.56 nisimura #define EVCNTBR 0x1c00 /* 3 sets of 34 event counters */
121 1.1 nisimura
122 1.1 nisimura #define TXC_BS_MSK 0x3f000000 /* burst size */
123 1.1 nisimura #define TXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */
124 1.1 nisimura #define TXC_UCG (1U<<18) /* generate UDP checksum */
125 1.1 nisimura #define TXC_TCG (1U<<17) /* generate TCP checksum */
126 1.1 nisimura #define TXC_ICG (1U<<16) /* generate IP checksum */
127 1.42 nisimura #define TXC_FCE (1U<<9) /* generate PAUSE to moderate Rx lvl */
128 1.1 nisimura #define TXC_EP (1U<<2) /* enable automatic padding */
129 1.1 nisimura #define TXC_AC (1U<<1) /* add CRC to frame */
130 1.1 nisimura #define TXC_TEN (1) /* enable DMA to run */
131 1.1 nisimura
132 1.1 nisimura #define RXC_BS_MSK 0x3f000000 /* burst size */
133 1.1 nisimura #define RXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */
134 1.6 nisimura #define RXC_IHAE (1U<<19) /* IP header alignment enable */
135 1.5 nisimura #define RXC_UCC (1U<<18) /* run UDP checksum */
136 1.5 nisimura #define RXC_TCC (1U<<17) /* run TDP checksum */
137 1.5 nisimura #define RXC_ICC (1U<<16) /* run IP checksum */
138 1.42 nisimura #define RXC_FCE (1U<<9) /* accept PAUSE to throttle Tx */
139 1.1 nisimura #define RXC_RB (1U<<6) /* receive broadcast frame */
140 1.44 nisimura #define RXC_RM (1U<<5) /* receive all multicast (inc. RB) */
141 1.44 nisimura #define RXC_RU (1U<<4) /* receive 16 additional unicasts */
142 1.1 nisimura #define RXC_RE (1U<<3) /* accept error frame */
143 1.1 nisimura #define RXC_RA (1U<<2) /* receive all frame */
144 1.6 nisimura #define RXC_MHTE (1U<<1) /* use multicast hash table */
145 1.1 nisimura #define RXC_REN (1) /* enable DMA to run */
146 1.1 nisimura
147 1.1 nisimura #define INT_DMLCS (1U<<31) /* link status change */
148 1.1 nisimura #define INT_DMTS (1U<<30) /* sending desc. has posted Tx done */
149 1.1 nisimura #define INT_DMRS (1U<<29) /* frame was received */
150 1.1 nisimura #define INT_DMRBUS (1U<<27) /* Rx descriptor pool is full */
151 1.46 nisimura #define INT_DMxPSS (3U<<25) /* 26:25 DMA Tx/Rx have stopped */
152 1.1 nisimura
153 1.56 nisimura struct tdes {
154 1.56 nisimura uint32_t t0, t1, t2, t3;
155 1.56 nisimura };
156 1.56 nisimura
157 1.56 nisimura struct rdes {
158 1.56 nisimura uint32_t r0, r1, r2, r3;
159 1.56 nisimura };
160 1.56 nisimura
161 1.1 nisimura #define T0_OWN (1U<<31) /* desc is ready to Tx */
162 1.1 nisimura
163 1.1 nisimura #define R0_OWN (1U<<31) /* desc is empty */
164 1.1 nisimura #define R0_FS (1U<<30) /* first segment of frame */
165 1.1 nisimura #define R0_LS (1U<<29) /* last segment of frame */
166 1.1 nisimura #define R0_IPE (1U<<28) /* IP checksum error */
167 1.1 nisimura #define R0_TCPE (1U<<27) /* TCP checksum error */
168 1.1 nisimura #define R0_UDPE (1U<<26) /* UDP checksum error */
169 1.1 nisimura #define R0_ES (1U<<25) /* error summary */
170 1.1 nisimura #define R0_MF (1U<<24) /* multicast frame */
171 1.5 nisimura #define R0_SPN 0x00300000 /* 21:20 switch port 1/2 */
172 1.5 nisimura #define R0_ALIGN 0x00300000 /* 21:20 (KSZ8692P) Rx align amount */
173 1.5 nisimura #define R0_RE (1U<<19) /* MII reported error */
174 1.5 nisimura #define R0_TL (1U<<18) /* frame too long, beyond 1518 */
175 1.1 nisimura #define R0_RF (1U<<17) /* damaged runt frame */
176 1.1 nisimura #define R0_CE (1U<<16) /* CRC error */
177 1.1 nisimura #define R0_FT (1U<<15) /* frame type */
178 1.1 nisimura #define R0_FL_MASK 0x7ff /* frame length 10:0 */
179 1.1 nisimura
180 1.1 nisimura #define T1_IC (1U<<31) /* post interrupt on complete */
181 1.1 nisimura #define T1_FS (1U<<30) /* first segment of frame */
182 1.1 nisimura #define T1_LS (1U<<29) /* last segment of frame */
183 1.1 nisimura #define T1_IPCKG (1U<<28) /* generate IP checksum */
184 1.1 nisimura #define T1_TCPCKG (1U<<27) /* generate TCP checksum */
185 1.1 nisimura #define T1_UDPCKG (1U<<26) /* generate UDP checksum */
186 1.1 nisimura #define T1_TER (1U<<25) /* end of ring */
187 1.5 nisimura #define T1_SPN 0x00300000 /* 21:20 switch port 1/2 */
188 1.1 nisimura #define T1_TBS_MASK 0x7ff /* segment size 10:0 */
189 1.1 nisimura
190 1.1 nisimura #define R1_RER (1U<<25) /* end of ring */
191 1.8 nisimura #define R1_RBS_MASK 0x7fc /* segment size 10:0 */
192 1.1 nisimura
193 1.1 nisimura #define KSE_NTXSEGS 16
194 1.1 nisimura #define KSE_TXQUEUELEN 64
195 1.1 nisimura #define KSE_TXQUEUELEN_MASK (KSE_TXQUEUELEN - 1)
196 1.1 nisimura #define KSE_TXQUEUE_GC (KSE_TXQUEUELEN / 4)
197 1.1 nisimura #define KSE_NTXDESC 256
198 1.1 nisimura #define KSE_NTXDESC_MASK (KSE_NTXDESC - 1)
199 1.1 nisimura #define KSE_NEXTTX(x) (((x) + 1) & KSE_NTXDESC_MASK)
200 1.1 nisimura #define KSE_NEXTTXS(x) (((x) + 1) & KSE_TXQUEUELEN_MASK)
201 1.1 nisimura
202 1.1 nisimura #define KSE_NRXDESC 64
203 1.1 nisimura #define KSE_NRXDESC_MASK (KSE_NRXDESC - 1)
204 1.1 nisimura #define KSE_NEXTRX(x) (((x) + 1) & KSE_NRXDESC_MASK)
205 1.1 nisimura
206 1.1 nisimura struct kse_control_data {
207 1.1 nisimura struct tdes kcd_txdescs[KSE_NTXDESC];
208 1.1 nisimura struct rdes kcd_rxdescs[KSE_NRXDESC];
209 1.1 nisimura };
210 1.1 nisimura #define KSE_CDOFF(x) offsetof(struct kse_control_data, x)
211 1.1 nisimura #define KSE_CDTXOFF(x) KSE_CDOFF(kcd_txdescs[(x)])
212 1.1 nisimura #define KSE_CDRXOFF(x) KSE_CDOFF(kcd_rxdescs[(x)])
213 1.1 nisimura
214 1.1 nisimura struct kse_txsoft {
215 1.1 nisimura struct mbuf *txs_mbuf; /* head of our mbuf chain */
216 1.1 nisimura bus_dmamap_t txs_dmamap; /* our DMA map */
217 1.1 nisimura int txs_firstdesc; /* first descriptor in packet */
218 1.1 nisimura int txs_lastdesc; /* last descriptor in packet */
219 1.1 nisimura int txs_ndesc; /* # of descriptors used */
220 1.1 nisimura };
221 1.1 nisimura
222 1.1 nisimura struct kse_rxsoft {
223 1.1 nisimura struct mbuf *rxs_mbuf; /* head of our mbuf chain */
224 1.1 nisimura bus_dmamap_t rxs_dmamap; /* our DMA map */
225 1.1 nisimura };
226 1.1 nisimura
227 1.1 nisimura struct kse_softc {
228 1.23 chs device_t sc_dev; /* generic device information */
229 1.1 nisimura bus_space_tag_t sc_st; /* bus space tag */
230 1.1 nisimura bus_space_handle_t sc_sh; /* bus space handle */
231 1.42 nisimura bus_size_t sc_memsize; /* csr map size */
232 1.1 nisimura bus_dma_tag_t sc_dmat; /* bus DMA tag */
233 1.42 nisimura pci_chipset_tag_t sc_pc; /* PCI chipset tag */
234 1.1 nisimura struct ethercom sc_ethercom; /* Ethernet common data */
235 1.1 nisimura void *sc_ih; /* interrupt cookie */
236 1.1 nisimura
237 1.42 nisimura struct mii_data sc_mii; /* mii 8841 */
238 1.42 nisimura struct ifmedia sc_media; /* ifmedia 8842 */
239 1.42 nisimura int sc_flowflags; /* 802.3x PAUSE flow control */
240 1.39 nisimura
241 1.42 nisimura callout_t sc_tick_ch; /* MII tick callout */
242 1.1 nisimura
243 1.1 nisimura bus_dmamap_t sc_cddmamap; /* control data DMA map */
244 1.1 nisimura #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
245 1.1 nisimura
246 1.1 nisimura struct kse_control_data *sc_control_data;
247 1.8 nisimura #define sc_txdescs sc_control_data->kcd_txdescs
248 1.8 nisimura #define sc_rxdescs sc_control_data->kcd_rxdescs
249 1.1 nisimura
250 1.1 nisimura struct kse_txsoft sc_txsoft[KSE_TXQUEUELEN];
251 1.1 nisimura struct kse_rxsoft sc_rxsoft[KSE_NRXDESC];
252 1.1 nisimura int sc_txfree; /* number of free Tx descriptors */
253 1.1 nisimura int sc_txnext; /* next ready Tx descriptor */
254 1.1 nisimura int sc_txsfree; /* number of free Tx jobs */
255 1.1 nisimura int sc_txsnext; /* next ready Tx job */
256 1.1 nisimura int sc_txsdirty; /* dirty Tx jobs */
257 1.1 nisimura int sc_rxptr; /* next ready Rx descriptor/descsoft */
258 1.1 nisimura
259 1.2 tsutsui uint32_t sc_txc, sc_rxc;
260 1.2 tsutsui uint32_t sc_t1csum;
261 1.2 tsutsui int sc_mcsum;
262 1.8 nisimura uint32_t sc_inten;
263 1.2 tsutsui uint32_t sc_chip;
264 1.8 nisimura
265 1.56 nisimura krndsource_t rnd_source; /* random source */
266 1.56 nisimura
267 1.8 nisimura #ifdef KSE_EVENT_COUNTERS
268 1.8 nisimura struct ksext {
269 1.8 nisimura char evcntname[3][8];
270 1.8 nisimura struct evcnt pev[3][34];
271 1.8 nisimura } sc_ext; /* switch statistics */
272 1.8 nisimura #endif
273 1.1 nisimura };
274 1.1 nisimura
275 1.1 nisimura #define KSE_CDTXADDR(sc, x) ((sc)->sc_cddma + KSE_CDTXOFF((x)))
276 1.1 nisimura #define KSE_CDRXADDR(sc, x) ((sc)->sc_cddma + KSE_CDRXOFF((x)))
277 1.1 nisimura
278 1.1 nisimura #define KSE_CDTXSYNC(sc, x, n, ops) \
279 1.1 nisimura do { \
280 1.1 nisimura int __x, __n; \
281 1.1 nisimura \
282 1.1 nisimura __x = (x); \
283 1.1 nisimura __n = (n); \
284 1.1 nisimura \
285 1.1 nisimura /* If it will wrap around, sync to the end of the ring. */ \
286 1.1 nisimura if ((__x + __n) > KSE_NTXDESC) { \
287 1.1 nisimura bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
288 1.1 nisimura KSE_CDTXOFF(__x), sizeof(struct tdes) * \
289 1.1 nisimura (KSE_NTXDESC - __x), (ops)); \
290 1.1 nisimura __n -= (KSE_NTXDESC - __x); \
291 1.1 nisimura __x = 0; \
292 1.1 nisimura } \
293 1.1 nisimura \
294 1.1 nisimura /* Now sync whatever is left. */ \
295 1.1 nisimura bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
296 1.1 nisimura KSE_CDTXOFF(__x), sizeof(struct tdes) * __n, (ops)); \
297 1.1 nisimura } while (/*CONSTCOND*/0)
298 1.1 nisimura
299 1.1 nisimura #define KSE_CDRXSYNC(sc, x, ops) \
300 1.1 nisimura do { \
301 1.1 nisimura bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
302 1.1 nisimura KSE_CDRXOFF((x)), sizeof(struct rdes), (ops)); \
303 1.1 nisimura } while (/*CONSTCOND*/0)
304 1.1 nisimura
305 1.1 nisimura #define KSE_INIT_RXDESC(sc, x) \
306 1.1 nisimura do { \
307 1.1 nisimura struct kse_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
308 1.1 nisimura struct rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \
309 1.1 nisimura struct mbuf *__m = __rxs->rxs_mbuf; \
310 1.1 nisimura \
311 1.1 nisimura __m->m_data = __m->m_ext.ext_buf; \
312 1.1 nisimura __rxd->r2 = __rxs->rxs_dmamap->dm_segs[0].ds_addr; \
313 1.1 nisimura __rxd->r1 = R1_RBS_MASK /* __m->m_ext.ext_size */; \
314 1.1 nisimura __rxd->r0 = R0_OWN; \
315 1.35 msaitoh KSE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \
316 1.1 nisimura } while (/*CONSTCOND*/0)
317 1.1 nisimura
318 1.11 nisimura u_int kse_burstsize = 8; /* DMA burst length tuning knob */
319 1.1 nisimura
320 1.1 nisimura #ifdef KSEDIAGNOSTIC
321 1.2 tsutsui u_int kse_monitor_rxintr; /* fragmented UDP csum HW bug hook */
322 1.1 nisimura #endif
323 1.1 nisimura
324 1.18 cegger static int kse_match(device_t, cfdata_t, void *);
325 1.18 cegger static void kse_attach(device_t, device_t, void *);
326 1.1 nisimura
327 1.23 chs CFATTACH_DECL_NEW(kse, sizeof(struct kse_softc),
328 1.1 nisimura kse_match, kse_attach, NULL, NULL);
329 1.1 nisimura
330 1.3 christos static int kse_ioctl(struct ifnet *, u_long, void *);
331 1.1 nisimura static void kse_start(struct ifnet *);
332 1.1 nisimura static void kse_watchdog(struct ifnet *);
333 1.1 nisimura static int kse_init(struct ifnet *);
334 1.1 nisimura static void kse_stop(struct ifnet *, int);
335 1.1 nisimura static void kse_reset(struct kse_softc *);
336 1.53 nisimura static void kse_set_rcvfilt(struct kse_softc *);
337 1.1 nisimura static int add_rxbuf(struct kse_softc *, int);
338 1.1 nisimura static void rxdrain(struct kse_softc *);
339 1.1 nisimura static int kse_intr(void *);
340 1.1 nisimura static void rxintr(struct kse_softc *);
341 1.1 nisimura static void txreap(struct kse_softc *);
342 1.1 nisimura static void lnkchg(struct kse_softc *);
343 1.42 nisimura static int kse_ifmedia_upd(struct ifnet *);
344 1.42 nisimura static void kse_ifmedia_sts(struct ifnet *, struct ifmediareq *);
345 1.42 nisimura static void nopifmedia_sts(struct ifnet *, struct ifmediareq *);
346 1.1 nisimura static void phy_tick(void *);
347 1.42 nisimura int kse_mii_readreg(device_t, int, int, uint16_t *);
348 1.42 nisimura int kse_mii_writereg(device_t, int, int, uint16_t);
349 1.42 nisimura void kse_mii_statchg(struct ifnet *);
350 1.8 nisimura #ifdef KSE_EVENT_COUNTERS
351 1.8 nisimura static void stat_tick(void *);
352 1.8 nisimura static void zerostats(struct kse_softc *);
353 1.8 nisimura #endif
354 1.1 nisimura
355 1.57 thorpej static const struct device_compatible_entry compat_data[] = {
356 1.57 thorpej { .id = PCI_ID_CODE(PCI_VENDOR_MICREL,
357 1.57 thorpej PCI_PRODUCT_MICREL_KSZ8842) },
358 1.57 thorpej { .id = PCI_ID_CODE(PCI_VENDOR_MICREL,
359 1.57 thorpej PCI_PRODUCT_MICREL_KSZ8841) },
360 1.57 thorpej
361 1.57 thorpej PCI_COMPAT_EOL
362 1.57 thorpej };
363 1.57 thorpej
364 1.1 nisimura static int
365 1.18 cegger kse_match(device_t parent, cfdata_t match, void *aux)
366 1.1 nisimura {
367 1.1 nisimura struct pci_attach_args *pa = (struct pci_attach_args *)aux;
368 1.1 nisimura
369 1.57 thorpej return PCI_CLASS(pa->pa_class) == PCI_CLASS_NETWORK &&
370 1.57 thorpej pci_compatible_match(pa, compat_data);
371 1.1 nisimura }
372 1.1 nisimura
373 1.1 nisimura static void
374 1.18 cegger kse_attach(device_t parent, device_t self, void *aux)
375 1.1 nisimura {
376 1.19 cegger struct kse_softc *sc = device_private(self);
377 1.1 nisimura struct pci_attach_args *pa = aux;
378 1.1 nisimura pci_chipset_tag_t pc = pa->pa_pc;
379 1.1 nisimura pci_intr_handle_t ih;
380 1.1 nisimura const char *intrstr;
381 1.42 nisimura struct ifnet *ifp = &sc->sc_ethercom.ec_if;
382 1.42 nisimura struct mii_data * const mii = &sc->sc_mii;
383 1.8 nisimura struct ifmedia *ifm;
384 1.1 nisimura uint8_t enaddr[ETHER_ADDR_LEN];
385 1.1 nisimura bus_dma_segment_t seg;
386 1.25 nisimura int i, error, nseg;
387 1.27 christos char intrbuf[PCI_INTRSTR_LEN];
388 1.1 nisimura
389 1.42 nisimura aprint_normal(": Micrel KSZ%04x Ethernet (rev. 0x%02x)\n",
390 1.42 nisimura PCI_PRODUCT(pa->pa_id), PCI_REVISION(pa->pa_class));
391 1.42 nisimura
392 1.1 nisimura if (pci_mapreg_map(pa, 0x10,
393 1.1 nisimura PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
394 1.42 nisimura 0, &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_memsize) != 0) {
395 1.42 nisimura aprint_error_dev(self, "unable to map device registers\n");
396 1.1 nisimura return;
397 1.1 nisimura }
398 1.1 nisimura
399 1.1 nisimura /* Make sure bus mastering is enabled. */
400 1.1 nisimura pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
401 1.1 nisimura pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
402 1.1 nisimura PCI_COMMAND_MASTER_ENABLE);
403 1.1 nisimura
404 1.42 nisimura /* Power up chip if necessary. */
405 1.42 nisimura if ((error = pci_activate(pc, pa->pa_tag, self, NULL))
406 1.42 nisimura && error != EOPNOTSUPP) {
407 1.42 nisimura aprint_error_dev(self, "cannot activate %d\n", error);
408 1.42 nisimura return;
409 1.42 nisimura }
410 1.42 nisimura
411 1.42 nisimura /* Map and establish our interrupt. */
412 1.42 nisimura if (pci_intr_map(pa, &ih)) {
413 1.42 nisimura aprint_error_dev(self, "unable to map interrupt\n");
414 1.54 nisimura goto fail;
415 1.42 nisimura }
416 1.42 nisimura intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
417 1.42 nisimura sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, kse_intr, sc,
418 1.42 nisimura device_xname(self));
419 1.42 nisimura if (sc->sc_ih == NULL) {
420 1.42 nisimura aprint_error_dev(self, "unable to establish interrupt");
421 1.42 nisimura if (intrstr != NULL)
422 1.42 nisimura aprint_error(" at %s", intrstr);
423 1.42 nisimura aprint_error("\n");
424 1.54 nisimura goto fail;
425 1.1 nisimura }
426 1.42 nisimura aprint_normal_dev(self, "interrupting at %s\n", intrstr);
427 1.1 nisimura
428 1.42 nisimura sc->sc_dev = self;
429 1.42 nisimura sc->sc_dmat = pa->pa_dmat;
430 1.42 nisimura sc->sc_pc = pa->pa_pc;
431 1.1 nisimura sc->sc_chip = PCI_PRODUCT(pa->pa_id);
432 1.1 nisimura
433 1.1 nisimura /*
434 1.1 nisimura * Read the Ethernet address from the EEPROM.
435 1.1 nisimura */
436 1.1 nisimura i = CSR_READ_2(sc, MARL);
437 1.42 nisimura enaddr[5] = i;
438 1.42 nisimura enaddr[4] = i >> 8;
439 1.1 nisimura i = CSR_READ_2(sc, MARM);
440 1.42 nisimura enaddr[3] = i;
441 1.42 nisimura enaddr[2] = i >> 8;
442 1.1 nisimura i = CSR_READ_2(sc, MARH);
443 1.42 nisimura enaddr[1] = i;
444 1.42 nisimura enaddr[0] = i >> 8;
445 1.42 nisimura aprint_normal_dev(self,
446 1.42 nisimura "Ethernet address %s\n", ether_sprintf(enaddr));
447 1.1 nisimura
448 1.1 nisimura /*
449 1.1 nisimura * Enable chip function.
450 1.1 nisimura */
451 1.42 nisimura CSR_WRITE_2(sc, SIDER, 1);
452 1.1 nisimura
453 1.1 nisimura /*
454 1.1 nisimura * Allocate the control data structures, and create and load the
455 1.1 nisimura * DMA map for it.
456 1.1 nisimura */
457 1.1 nisimura error = bus_dmamem_alloc(sc->sc_dmat,
458 1.1 nisimura sizeof(struct kse_control_data), PAGE_SIZE, 0, &seg, 1, &nseg, 0);
459 1.1 nisimura if (error != 0) {
460 1.42 nisimura aprint_error_dev(self,
461 1.35 msaitoh "unable to allocate control data, error = %d\n", error);
462 1.1 nisimura goto fail_0;
463 1.1 nisimura }
464 1.1 nisimura error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
465 1.9 nisimura sizeof(struct kse_control_data), (void **)&sc->sc_control_data,
466 1.1 nisimura BUS_DMA_COHERENT);
467 1.1 nisimura if (error != 0) {
468 1.42 nisimura aprint_error_dev(self,
469 1.35 msaitoh "unable to map control data, error = %d\n", error);
470 1.1 nisimura goto fail_1;
471 1.1 nisimura }
472 1.1 nisimura error = bus_dmamap_create(sc->sc_dmat,
473 1.1 nisimura sizeof(struct kse_control_data), 1,
474 1.1 nisimura sizeof(struct kse_control_data), 0, 0, &sc->sc_cddmamap);
475 1.1 nisimura if (error != 0) {
476 1.42 nisimura aprint_error_dev(self,
477 1.35 msaitoh "unable to create control data DMA map, "
478 1.14 cegger "error = %d\n", error);
479 1.1 nisimura goto fail_2;
480 1.1 nisimura }
481 1.1 nisimura error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
482 1.1 nisimura sc->sc_control_data, sizeof(struct kse_control_data), NULL, 0);
483 1.1 nisimura if (error != 0) {
484 1.42 nisimura aprint_error_dev(self,
485 1.35 msaitoh "unable to load control data DMA map, error = %d\n",
486 1.14 cegger error);
487 1.1 nisimura goto fail_3;
488 1.1 nisimura }
489 1.1 nisimura for (i = 0; i < KSE_TXQUEUELEN; i++) {
490 1.1 nisimura if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
491 1.1 nisimura KSE_NTXSEGS, MCLBYTES, 0, 0,
492 1.1 nisimura &sc->sc_txsoft[i].txs_dmamap)) != 0) {
493 1.42 nisimura aprint_error_dev(self,
494 1.35 msaitoh "unable to create tx DMA map %d, error = %d\n",
495 1.35 msaitoh i, error);
496 1.1 nisimura goto fail_4;
497 1.1 nisimura }
498 1.1 nisimura }
499 1.1 nisimura for (i = 0; i < KSE_NRXDESC; i++) {
500 1.1 nisimura if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
501 1.1 nisimura 1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
502 1.42 nisimura aprint_error_dev(self,
503 1.35 msaitoh "unable to create rx DMA map %d, error = %d\n",
504 1.35 msaitoh i, error);
505 1.1 nisimura goto fail_5;
506 1.1 nisimura }
507 1.1 nisimura sc->sc_rxsoft[i].rxs_mbuf = NULL;
508 1.1 nisimura }
509 1.1 nisimura
510 1.42 nisimura mii->mii_ifp = ifp;
511 1.42 nisimura mii->mii_readreg = kse_mii_readreg;
512 1.42 nisimura mii->mii_writereg = kse_mii_writereg;
513 1.42 nisimura mii->mii_statchg = kse_mii_statchg;
514 1.1 nisimura
515 1.38 msaitoh /* Initialize ifmedia structures. */
516 1.8 nisimura if (sc->sc_chip == 0x8841) {
517 1.42 nisimura /* use port 1 builtin PHY as index 1 device */
518 1.42 nisimura sc->sc_ethercom.ec_mii = mii;
519 1.42 nisimura ifm = &mii->mii_media;
520 1.42 nisimura ifmedia_init(ifm, 0, kse_ifmedia_upd, kse_ifmedia_sts);
521 1.42 nisimura mii_attach(sc->sc_dev, mii, 0xffffffff, 1 /* PHY1 */,
522 1.42 nisimura MII_OFFSET_ANY, MIIF_DOPAUSE);
523 1.42 nisimura if (LIST_FIRST(&mii->mii_phys) == NULL) {
524 1.42 nisimura ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
525 1.42 nisimura ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
526 1.42 nisimura } else
527 1.42 nisimura ifmedia_set(ifm, IFM_ETHER | IFM_AUTO);
528 1.35 msaitoh } else {
529 1.40 nisimura /*
530 1.40 nisimura * pretend 100FDX w/ no alternative media selection.
531 1.42 nisimura * 8842 MAC is tied with a builtin 3 port switch. It can do
532 1.42 nisimura * 4 degree priotised rate control over either of tx/rx
533 1.42 nisimura * direction for any of ports, respectively. Tough, this
534 1.42 nisimura * driver leaves the rate unlimited intending 100Mbps maximum.
535 1.42 nisimura * 2 external ports behave in AN mode and this driver provides
536 1.42 nisimura * no mean to manipulate and see their operational details.
537 1.40 nisimura */
538 1.42 nisimura sc->sc_ethercom.ec_ifmedia = ifm = &sc->sc_media;
539 1.42 nisimura ifmedia_init(ifm, 0, NULL, nopifmedia_sts);
540 1.39 nisimura ifmedia_add(ifm, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
541 1.39 nisimura ifmedia_set(ifm, IFM_ETHER | IFM_100_TX | IFM_FDX);
542 1.42 nisimura
543 1.42 nisimura aprint_normal_dev(self,
544 1.42 nisimura "10baseT, 10baseT-FDX, 100baseTX, 100baseTX-FDX, auto\n");
545 1.8 nisimura }
546 1.42 nisimura ifm->ifm_media = ifm->ifm_cur->ifm_media; /* as if user has requested */
547 1.1 nisimura
548 1.23 chs strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
549 1.1 nisimura ifp->if_softc = sc;
550 1.1 nisimura ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
551 1.1 nisimura ifp->if_ioctl = kse_ioctl;
552 1.1 nisimura ifp->if_start = kse_start;
553 1.1 nisimura ifp->if_watchdog = kse_watchdog;
554 1.1 nisimura ifp->if_init = kse_init;
555 1.1 nisimura ifp->if_stop = kse_stop;
556 1.1 nisimura IFQ_SET_READY(&ifp->if_snd);
557 1.1 nisimura
558 1.1 nisimura /*
559 1.42 nisimura * capable of 802.1Q VLAN-sized frames and hw assisted tagging.
560 1.1 nisimura * can do IPv4, TCPv4, and UDPv4 checksums in hardware.
561 1.1 nisimura */
562 1.42 nisimura sc->sc_ethercom.ec_capabilities = ETHERCAP_VLAN_MTU;
563 1.42 nisimura ifp->if_capabilities =
564 1.1 nisimura IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
565 1.1 nisimura IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
566 1.1 nisimura IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
567 1.1 nisimura
568 1.56 nisimura sc->sc_flowflags = 0;
569 1.56 nisimura
570 1.1 nisimura if_attach(ifp);
571 1.43 nisimura if_deferred_start_init(ifp, NULL);
572 1.1 nisimura ether_ifattach(ifp, enaddr);
573 1.8 nisimura
574 1.54 nisimura callout_init(&sc->sc_tick_ch, 0);
575 1.54 nisimura callout_setfunc(&sc->sc_tick_ch, phy_tick, sc);
576 1.54 nisimura
577 1.56 nisimura rnd_attach_source(&sc->rnd_source, device_xname(self),
578 1.56 nisimura RND_TYPE_NET, RND_FLAG_DEFAULT);
579 1.56 nisimura
580 1.8 nisimura #ifdef KSE_EVENT_COUNTERS
581 1.56 nisimura const char *events[34] = {
582 1.56 nisimura "RxLoPriotyByte",
583 1.56 nisimura "RxHiPriotyByte",
584 1.56 nisimura "RxUndersizePkt",
585 1.56 nisimura "RxFragments",
586 1.56 nisimura "RxOversize",
587 1.56 nisimura "RxJabbers",
588 1.56 nisimura "RxSymbolError",
589 1.56 nisimura "RxCRCError",
590 1.56 nisimura "RxAlignmentError",
591 1.56 nisimura "RxControl8808Pkts",
592 1.56 nisimura "RxPausePkts",
593 1.56 nisimura "RxBroadcast",
594 1.56 nisimura "RxMulticast",
595 1.56 nisimura "RxUnicast",
596 1.56 nisimura "Rx64Octets",
597 1.56 nisimura "Rx65To127Octets",
598 1.56 nisimura "Rx128To255Octets",
599 1.56 nisimura "Rx255To511Octets",
600 1.56 nisimura "Rx512To1023Octets",
601 1.56 nisimura "Rx1024To1522Octets",
602 1.56 nisimura "TxLoPriotyByte",
603 1.56 nisimura "TxHiPriotyByte",
604 1.56 nisimura "TxLateCollision",
605 1.56 nisimura "TxPausePkts",
606 1.56 nisimura "TxBroadcastPkts",
607 1.56 nisimura "TxMulticastPkts",
608 1.56 nisimura "TxUnicastPkts",
609 1.56 nisimura "TxDeferred",
610 1.56 nisimura "TxTotalCollision",
611 1.56 nisimura "TxExcessiveCollision",
612 1.56 nisimura "TxSingleCollision",
613 1.56 nisimura "TxMultipleCollision",
614 1.56 nisimura "TxDropPkts",
615 1.56 nisimura "RxDropPkts",
616 1.56 nisimura };
617 1.56 nisimura struct ksext *ee = &sc->sc_ext;
618 1.25 nisimura int p = (sc->sc_chip == 0x8842) ? 3 : 1;
619 1.8 nisimura for (i = 0; i < p; i++) {
620 1.26 christos snprintf(ee->evcntname[i], sizeof(ee->evcntname[i]),
621 1.26 christos "%s.%d", device_xname(sc->sc_dev), i+1);
622 1.56 nisimura for (int ev = 0; ev < 34; ev++) {
623 1.56 nisimura evcnt_attach_dynamic(&ee->pev[i][ev], EVCNT_TYPE_MISC,
624 1.56 nisimura NULL, ee->evcntname[i], events[ev]);
625 1.56 nisimura }
626 1.8 nisimura }
627 1.8 nisimura #endif
628 1.1 nisimura return;
629 1.1 nisimura
630 1.1 nisimura fail_5:
631 1.1 nisimura for (i = 0; i < KSE_NRXDESC; i++) {
632 1.1 nisimura if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
633 1.1 nisimura bus_dmamap_destroy(sc->sc_dmat,
634 1.1 nisimura sc->sc_rxsoft[i].rxs_dmamap);
635 1.24 christos }
636 1.1 nisimura fail_4:
637 1.1 nisimura for (i = 0; i < KSE_TXQUEUELEN; i++) {
638 1.1 nisimura if (sc->sc_txsoft[i].txs_dmamap != NULL)
639 1.1 nisimura bus_dmamap_destroy(sc->sc_dmat,
640 1.1 nisimura sc->sc_txsoft[i].txs_dmamap);
641 1.1 nisimura }
642 1.1 nisimura bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
643 1.1 nisimura fail_3:
644 1.1 nisimura bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
645 1.1 nisimura fail_2:
646 1.3 christos bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
647 1.1 nisimura sizeof(struct kse_control_data));
648 1.1 nisimura fail_1:
649 1.1 nisimura bus_dmamem_free(sc->sc_dmat, &seg, nseg);
650 1.1 nisimura fail_0:
651 1.54 nisimura pci_intr_disestablish(pc, sc->sc_ih);
652 1.54 nisimura fail:
653 1.54 nisimura bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_memsize);
654 1.1 nisimura return;
655 1.1 nisimura }
656 1.1 nisimura
657 1.1 nisimura static int
658 1.3 christos kse_ioctl(struct ifnet *ifp, u_long cmd, void *data)
659 1.1 nisimura {
660 1.1 nisimura struct kse_softc *sc = ifp->if_softc;
661 1.42 nisimura struct ifreq *ifr = (struct ifreq *)data;
662 1.42 nisimura struct ifmedia *ifm;
663 1.1 nisimura int s, error;
664 1.1 nisimura
665 1.1 nisimura s = splnet();
666 1.1 nisimura
667 1.1 nisimura switch (cmd) {
668 1.42 nisimura case SIOCSIFMEDIA:
669 1.42 nisimura /* Flow control requires full-duplex mode. */
670 1.42 nisimura if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
671 1.42 nisimura (ifr->ifr_media & IFM_FDX) == 0)
672 1.42 nisimura ifr->ifr_media &= ~IFM_ETH_FMASK;
673 1.42 nisimura if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
674 1.42 nisimura if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
675 1.42 nisimura /* We can do both TXPAUSE and RXPAUSE. */
676 1.42 nisimura ifr->ifr_media |=
677 1.42 nisimura IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
678 1.42 nisimura }
679 1.42 nisimura sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
680 1.42 nisimura }
681 1.42 nisimura ifm = (sc->sc_chip == 0x8841)
682 1.42 nisimura ? &sc->sc_mii.mii_media : &sc->sc_media;
683 1.42 nisimura error = ifmedia_ioctl(ifp, ifr, ifm, cmd);
684 1.42 nisimura break;
685 1.1 nisimura default:
686 1.54 nisimura error = ether_ioctl(ifp, cmd, data);
687 1.54 nisimura if (error != ENETRESET)
688 1.12 dyoung break;
689 1.12 dyoung error = 0;
690 1.12 dyoung if (cmd == SIOCSIFCAP)
691 1.58 riastrad error = if_init(ifp);
692 1.12 dyoung if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
693 1.12 dyoung ;
694 1.12 dyoung else if (ifp->if_flags & IFF_RUNNING) {
695 1.1 nisimura /*
696 1.1 nisimura * Multicast list has changed; set the hardware filter
697 1.1 nisimura * accordingly.
698 1.1 nisimura */
699 1.53 nisimura kse_set_rcvfilt(sc);
700 1.1 nisimura }
701 1.1 nisimura break;
702 1.1 nisimura }
703 1.1 nisimura
704 1.1 nisimura splx(s);
705 1.54 nisimura
706 1.1 nisimura return error;
707 1.1 nisimura }
708 1.1 nisimura
709 1.1 nisimura static int
710 1.1 nisimura kse_init(struct ifnet *ifp)
711 1.1 nisimura {
712 1.1 nisimura struct kse_softc *sc = ifp->if_softc;
713 1.2 tsutsui uint32_t paddr;
714 1.1 nisimura int i, error = 0;
715 1.1 nisimura
716 1.1 nisimura /* cancel pending I/O */
717 1.1 nisimura kse_stop(ifp, 0);
718 1.1 nisimura
719 1.1 nisimura /* reset all registers but PCI configuration */
720 1.1 nisimura kse_reset(sc);
721 1.1 nisimura
722 1.1 nisimura /* craft Tx descriptor ring */
723 1.1 nisimura memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
724 1.1 nisimura for (i = 0, paddr = KSE_CDTXADDR(sc, 1); i < KSE_NTXDESC - 1; i++) {
725 1.1 nisimura sc->sc_txdescs[i].t3 = paddr;
726 1.1 nisimura paddr += sizeof(struct tdes);
727 1.1 nisimura }
728 1.1 nisimura sc->sc_txdescs[KSE_NTXDESC - 1].t3 = KSE_CDTXADDR(sc, 0);
729 1.1 nisimura KSE_CDTXSYNC(sc, 0, KSE_NTXDESC,
730 1.1 nisimura BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
731 1.1 nisimura sc->sc_txfree = KSE_NTXDESC;
732 1.1 nisimura sc->sc_txnext = 0;
733 1.1 nisimura
734 1.1 nisimura for (i = 0; i < KSE_TXQUEUELEN; i++)
735 1.1 nisimura sc->sc_txsoft[i].txs_mbuf = NULL;
736 1.1 nisimura sc->sc_txsfree = KSE_TXQUEUELEN;
737 1.1 nisimura sc->sc_txsnext = 0;
738 1.1 nisimura sc->sc_txsdirty = 0;
739 1.1 nisimura
740 1.1 nisimura /* craft Rx descriptor ring */
741 1.1 nisimura memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs));
742 1.1 nisimura for (i = 0, paddr = KSE_CDRXADDR(sc, 1); i < KSE_NRXDESC - 1; i++) {
743 1.1 nisimura sc->sc_rxdescs[i].r3 = paddr;
744 1.1 nisimura paddr += sizeof(struct rdes);
745 1.1 nisimura }
746 1.1 nisimura sc->sc_rxdescs[KSE_NRXDESC - 1].r3 = KSE_CDRXADDR(sc, 0);
747 1.1 nisimura for (i = 0; i < KSE_NRXDESC; i++) {
748 1.1 nisimura if (sc->sc_rxsoft[i].rxs_mbuf == NULL) {
749 1.1 nisimura if ((error = add_rxbuf(sc, i)) != 0) {
750 1.42 nisimura aprint_error_dev(sc->sc_dev,
751 1.42 nisimura "unable to allocate or map rx "
752 1.1 nisimura "buffer %d, error = %d\n",
753 1.42 nisimura i, error);
754 1.1 nisimura rxdrain(sc);
755 1.1 nisimura goto out;
756 1.1 nisimura }
757 1.1 nisimura }
758 1.1 nisimura else
759 1.1 nisimura KSE_INIT_RXDESC(sc, i);
760 1.1 nisimura }
761 1.1 nisimura sc->sc_rxptr = 0;
762 1.1 nisimura
763 1.1 nisimura /* hand Tx/Rx rings to HW */
764 1.1 nisimura CSR_WRITE_4(sc, TDLB, KSE_CDTXADDR(sc, 0));
765 1.1 nisimura CSR_WRITE_4(sc, RDLB, KSE_CDRXADDR(sc, 0));
766 1.1 nisimura
767 1.42 nisimura sc->sc_txc = TXC_TEN | TXC_EP | TXC_AC;
768 1.44 nisimura sc->sc_rxc = RXC_REN | RXC_RU | RXC_RB;
769 1.1 nisimura sc->sc_t1csum = sc->sc_mcsum = 0;
770 1.1 nisimura if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) {
771 1.5 nisimura sc->sc_rxc |= RXC_ICC;
772 1.1 nisimura sc->sc_mcsum |= M_CSUM_IPv4;
773 1.1 nisimura }
774 1.1 nisimura if (ifp->if_capenable & IFCAP_CSUM_IPv4_Tx) {
775 1.1 nisimura sc->sc_txc |= TXC_ICG;
776 1.1 nisimura sc->sc_t1csum |= T1_IPCKG;
777 1.1 nisimura }
778 1.1 nisimura if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx) {
779 1.5 nisimura sc->sc_rxc |= RXC_TCC;
780 1.1 nisimura sc->sc_mcsum |= M_CSUM_TCPv4;
781 1.1 nisimura }
782 1.1 nisimura if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Tx) {
783 1.1 nisimura sc->sc_txc |= TXC_TCG;
784 1.1 nisimura sc->sc_t1csum |= T1_TCPCKG;
785 1.1 nisimura }
786 1.1 nisimura if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx) {
787 1.5 nisimura sc->sc_rxc |= RXC_UCC;
788 1.1 nisimura sc->sc_mcsum |= M_CSUM_UDPv4;
789 1.1 nisimura }
790 1.1 nisimura if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Tx) {
791 1.1 nisimura sc->sc_txc |= TXC_UCG;
792 1.1 nisimura sc->sc_t1csum |= T1_UDPCKG;
793 1.1 nisimura }
794 1.1 nisimura sc->sc_txc |= (kse_burstsize << TXC_BS_SFT);
795 1.1 nisimura sc->sc_rxc |= (kse_burstsize << RXC_BS_SFT);
796 1.1 nisimura
797 1.42 nisimura if (sc->sc_chip == 0x8842) {
798 1.56 nisimura /* make PAUSE flow control to run */
799 1.42 nisimura sc->sc_txc |= TXC_FCE;
800 1.42 nisimura sc->sc_rxc |= RXC_FCE;
801 1.56 nisimura i = CSR_READ_2(sc, SGCR3);
802 1.56 nisimura CSR_WRITE_2(sc, SGCR3, i | CR3_USEFC);
803 1.42 nisimura }
804 1.42 nisimura
805 1.49 nisimura /* accept multicast frame or run promisc mode */
806 1.53 nisimura kse_set_rcvfilt(sc);
807 1.6 nisimura
808 1.1 nisimura /* set current media */
809 1.39 nisimura if (sc->sc_chip == 0x8841)
810 1.42 nisimura (void)kse_ifmedia_upd(ifp);
811 1.1 nisimura
812 1.1 nisimura /* enable transmitter and receiver */
813 1.1 nisimura CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
814 1.1 nisimura CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
815 1.1 nisimura CSR_WRITE_4(sc, MDRSC, 1);
816 1.1 nisimura
817 1.1 nisimura /* enable interrupts */
818 1.35 msaitoh sc->sc_inten = INT_DMTS | INT_DMRS | INT_DMRBUS;
819 1.8 nisimura if (sc->sc_chip == 0x8841)
820 1.8 nisimura sc->sc_inten |= INT_DMLCS;
821 1.1 nisimura CSR_WRITE_4(sc, INTST, ~0);
822 1.8 nisimura CSR_WRITE_4(sc, INTEN, sc->sc_inten);
823 1.1 nisimura
824 1.1 nisimura ifp->if_flags |= IFF_RUNNING;
825 1.1 nisimura ifp->if_flags &= ~IFF_OACTIVE;
826 1.1 nisimura
827 1.56 nisimura /* start one second timer */
828 1.56 nisimura callout_schedule(&sc->sc_tick_ch, hz);
829 1.56 nisimura
830 1.8 nisimura #ifdef KSE_EVENT_COUNTERS
831 1.8 nisimura zerostats(sc);
832 1.8 nisimura #endif
833 1.1 nisimura
834 1.1 nisimura out:
835 1.1 nisimura if (error) {
836 1.1 nisimura ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
837 1.1 nisimura ifp->if_timer = 0;
838 1.42 nisimura aprint_error_dev(sc->sc_dev, "interface not running\n");
839 1.1 nisimura }
840 1.1 nisimura return error;
841 1.1 nisimura }
842 1.1 nisimura
843 1.1 nisimura static void
844 1.1 nisimura kse_stop(struct ifnet *ifp, int disable)
845 1.1 nisimura {
846 1.1 nisimura struct kse_softc *sc = ifp->if_softc;
847 1.1 nisimura struct kse_txsoft *txs;
848 1.1 nisimura int i;
849 1.1 nisimura
850 1.56 nisimura callout_stop(&sc->sc_tick_ch);
851 1.56 nisimura
852 1.1 nisimura sc->sc_txc &= ~TXC_TEN;
853 1.1 nisimura sc->sc_rxc &= ~RXC_REN;
854 1.1 nisimura CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
855 1.1 nisimura CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
856 1.1 nisimura
857 1.1 nisimura for (i = 0; i < KSE_TXQUEUELEN; i++) {
858 1.1 nisimura txs = &sc->sc_txsoft[i];
859 1.1 nisimura if (txs->txs_mbuf != NULL) {
860 1.1 nisimura bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
861 1.1 nisimura m_freem(txs->txs_mbuf);
862 1.1 nisimura txs->txs_mbuf = NULL;
863 1.1 nisimura }
864 1.1 nisimura }
865 1.1 nisimura
866 1.13 dyoung ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
867 1.13 dyoung ifp->if_timer = 0;
868 1.13 dyoung
869 1.1 nisimura if (disable)
870 1.1 nisimura rxdrain(sc);
871 1.1 nisimura }
872 1.1 nisimura
873 1.1 nisimura static void
874 1.1 nisimura kse_reset(struct kse_softc *sc)
875 1.1 nisimura {
876 1.1 nisimura
877 1.42 nisimura /* software reset */
878 1.1 nisimura CSR_WRITE_2(sc, GRR, 1);
879 1.1 nisimura delay(1000); /* PDF does not mention the delay amount */
880 1.1 nisimura CSR_WRITE_2(sc, GRR, 0);
881 1.1 nisimura
882 1.42 nisimura /* enable switch function */
883 1.42 nisimura CSR_WRITE_2(sc, SIDER, 1);
884 1.1 nisimura }
885 1.1 nisimura
886 1.1 nisimura static void
887 1.1 nisimura kse_watchdog(struct ifnet *ifp)
888 1.1 nisimura {
889 1.1 nisimura struct kse_softc *sc = ifp->if_softc;
890 1.1 nisimura
891 1.24 christos /*
892 1.1 nisimura * Since we're not interrupting every packet, sweep
893 1.1 nisimura * up before we report an error.
894 1.1 nisimura */
895 1.1 nisimura txreap(sc);
896 1.1 nisimura
897 1.1 nisimura if (sc->sc_txfree != KSE_NTXDESC) {
898 1.42 nisimura aprint_error_dev(sc->sc_dev,
899 1.42 nisimura "device timeout (txfree %d txsfree %d txnext %d)\n",
900 1.42 nisimura sc->sc_txfree, sc->sc_txsfree, sc->sc_txnext);
901 1.48 skrll if_statinc(ifp, if_oerrors);
902 1.1 nisimura
903 1.1 nisimura /* Reset the interface. */
904 1.1 nisimura kse_init(ifp);
905 1.1 nisimura }
906 1.1 nisimura else if (ifp->if_flags & IFF_DEBUG)
907 1.42 nisimura aprint_error_dev(sc->sc_dev, "recovered from device timeout\n");
908 1.1 nisimura
909 1.1 nisimura /* Try to get more packets going. */
910 1.1 nisimura kse_start(ifp);
911 1.1 nisimura }
912 1.1 nisimura
913 1.1 nisimura static void
914 1.1 nisimura kse_start(struct ifnet *ifp)
915 1.1 nisimura {
916 1.1 nisimura struct kse_softc *sc = ifp->if_softc;
917 1.8 nisimura struct mbuf *m0, *m;
918 1.1 nisimura struct kse_txsoft *txs;
919 1.1 nisimura bus_dmamap_t dmamap;
920 1.1 nisimura int error, nexttx, lasttx, ofree, seg;
921 1.6 nisimura uint32_t tdes0;
922 1.1 nisimura
923 1.35 msaitoh if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
924 1.1 nisimura return;
925 1.1 nisimura
926 1.35 msaitoh /* Remember the previous number of free descriptors. */
927 1.1 nisimura ofree = sc->sc_txfree;
928 1.1 nisimura
929 1.1 nisimura /*
930 1.1 nisimura * Loop through the send queue, setting up transmit descriptors
931 1.1 nisimura * until we drain the queue, or use up all available transmit
932 1.1 nisimura * descriptors.
933 1.1 nisimura */
934 1.1 nisimura for (;;) {
935 1.1 nisimura IFQ_POLL(&ifp->if_snd, m0);
936 1.1 nisimura if (m0 == NULL)
937 1.1 nisimura break;
938 1.1 nisimura
939 1.1 nisimura if (sc->sc_txsfree < KSE_TXQUEUE_GC) {
940 1.1 nisimura txreap(sc);
941 1.1 nisimura if (sc->sc_txsfree == 0)
942 1.1 nisimura break;
943 1.1 nisimura }
944 1.1 nisimura txs = &sc->sc_txsoft[sc->sc_txsnext];
945 1.1 nisimura dmamap = txs->txs_dmamap;
946 1.1 nisimura
947 1.1 nisimura error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
948 1.35 msaitoh BUS_DMA_WRITE | BUS_DMA_NOWAIT);
949 1.1 nisimura if (error) {
950 1.1 nisimura if (error == EFBIG) {
951 1.42 nisimura aprint_error_dev(sc->sc_dev,
952 1.42 nisimura "Tx packet consumes too many "
953 1.42 nisimura "DMA segments, dropping...\n");
954 1.1 nisimura IFQ_DEQUEUE(&ifp->if_snd, m0);
955 1.1 nisimura m_freem(m0);
956 1.1 nisimura continue;
957 1.1 nisimura }
958 1.1 nisimura /* Short on resources, just stop for now. */
959 1.1 nisimura break;
960 1.1 nisimura }
961 1.1 nisimura
962 1.1 nisimura if (dmamap->dm_nsegs > sc->sc_txfree) {
963 1.1 nisimura /*
964 1.1 nisimura * Not enough free descriptors to transmit this
965 1.1 nisimura * packet. We haven't committed anything yet,
966 1.1 nisimura * so just unload the DMA map, put the packet
967 1.1 nisimura * back on the queue, and punt. Notify the upper
968 1.1 nisimura * layer that there are not more slots left.
969 1.1 nisimura */
970 1.1 nisimura ifp->if_flags |= IFF_OACTIVE;
971 1.1 nisimura bus_dmamap_unload(sc->sc_dmat, dmamap);
972 1.1 nisimura break;
973 1.1 nisimura }
974 1.1 nisimura
975 1.1 nisimura IFQ_DEQUEUE(&ifp->if_snd, m0);
976 1.1 nisimura
977 1.1 nisimura /*
978 1.1 nisimura * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
979 1.1 nisimura */
980 1.1 nisimura
981 1.1 nisimura bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
982 1.1 nisimura BUS_DMASYNC_PREWRITE);
983 1.1 nisimura
984 1.43 nisimura tdes0 = 0; /* to postpone 1st segment T0_OWN write */
985 1.43 nisimura lasttx = -1;
986 1.1 nisimura for (nexttx = sc->sc_txnext, seg = 0;
987 1.1 nisimura seg < dmamap->dm_nsegs;
988 1.1 nisimura seg++, nexttx = KSE_NEXTTX(nexttx)) {
989 1.1 nisimura struct tdes *tdes = &sc->sc_txdescs[nexttx];
990 1.1 nisimura /*
991 1.1 nisimura * If this is the first descriptor we're
992 1.1 nisimura * enqueueing, don't set the OWN bit just
993 1.1 nisimura * yet. That could cause a race condition.
994 1.1 nisimura * We'll do it below.
995 1.1 nisimura */
996 1.1 nisimura tdes->t2 = dmamap->dm_segs[seg].ds_addr;
997 1.1 nisimura tdes->t1 = sc->sc_t1csum
998 1.1 nisimura | (dmamap->dm_segs[seg].ds_len & T1_TBS_MASK);
999 1.6 nisimura tdes->t0 = tdes0;
1000 1.43 nisimura tdes0 = T0_OWN; /* 2nd and other segments */
1001 1.1 nisimura lasttx = nexttx;
1002 1.1 nisimura }
1003 1.1 nisimura /*
1004 1.1 nisimura * Outgoing NFS mbuf must be unloaded when Tx completed.
1005 1.1 nisimura * Without T1_IC NFS mbuf is left unack'ed for excessive
1006 1.1 nisimura * time and NFS stops to proceed until kse_watchdog()
1007 1.1 nisimura * calls txreap() to reclaim the unack'ed mbuf.
1008 1.5 nisimura * It's painful to traverse every mbuf chain to determine
1009 1.1 nisimura * whether someone is waiting for Tx completion.
1010 1.1 nisimura */
1011 1.8 nisimura m = m0;
1012 1.1 nisimura do {
1013 1.1 nisimura if ((m->m_flags & M_EXT) && m->m_ext.ext_free) {
1014 1.1 nisimura sc->sc_txdescs[lasttx].t1 |= T1_IC;
1015 1.1 nisimura break;
1016 1.1 nisimura }
1017 1.1 nisimura } while ((m = m->m_next) != NULL);
1018 1.1 nisimura
1019 1.43 nisimura /* Write deferred 1st segment T0_OWN at the final stage */
1020 1.1 nisimura sc->sc_txdescs[lasttx].t1 |= T1_LS;
1021 1.1 nisimura sc->sc_txdescs[sc->sc_txnext].t1 |= T1_FS;
1022 1.1 nisimura sc->sc_txdescs[sc->sc_txnext].t0 = T0_OWN;
1023 1.1 nisimura KSE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1024 1.35 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1025 1.1 nisimura
1026 1.35 msaitoh /* Tell DMA start transmit */
1027 1.1 nisimura CSR_WRITE_4(sc, MDTSC, 1);
1028 1.1 nisimura
1029 1.1 nisimura txs->txs_mbuf = m0;
1030 1.1 nisimura txs->txs_firstdesc = sc->sc_txnext;
1031 1.1 nisimura txs->txs_lastdesc = lasttx;
1032 1.1 nisimura txs->txs_ndesc = dmamap->dm_nsegs;
1033 1.1 nisimura
1034 1.1 nisimura sc->sc_txfree -= txs->txs_ndesc;
1035 1.1 nisimura sc->sc_txnext = nexttx;
1036 1.1 nisimura sc->sc_txsfree--;
1037 1.1 nisimura sc->sc_txsnext = KSE_NEXTTXS(sc->sc_txsnext);
1038 1.1 nisimura /*
1039 1.1 nisimura * Pass the packet to any BPF listeners.
1040 1.1 nisimura */
1041 1.32 msaitoh bpf_mtap(ifp, m0, BPF_D_OUT);
1042 1.1 nisimura }
1043 1.1 nisimura
1044 1.1 nisimura if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) {
1045 1.1 nisimura /* No more slots left; notify upper layer. */
1046 1.1 nisimura ifp->if_flags |= IFF_OACTIVE;
1047 1.1 nisimura }
1048 1.1 nisimura if (sc->sc_txfree != ofree) {
1049 1.1 nisimura /* Set a watchdog timer in case the chip flakes out. */
1050 1.1 nisimura ifp->if_timer = 5;
1051 1.1 nisimura }
1052 1.1 nisimura }
1053 1.1 nisimura
1054 1.1 nisimura static void
1055 1.53 nisimura kse_set_rcvfilt(struct kse_softc *sc)
1056 1.1 nisimura {
1057 1.1 nisimura struct ether_multistep step;
1058 1.1 nisimura struct ether_multi *enm;
1059 1.36 msaitoh struct ethercom *ec = &sc->sc_ethercom;
1060 1.36 msaitoh struct ifnet *ifp = &ec->ec_if;
1061 1.44 nisimura uint32_t crc, mchash[2];
1062 1.45 nisimura int i;
1063 1.6 nisimura
1064 1.44 nisimura sc->sc_rxc &= ~(RXC_MHTE | RXC_RM | RXC_RA);
1065 1.1 nisimura
1066 1.49 nisimura /* clear perfect match filter and prepare mcast hash table */
1067 1.45 nisimura for (i = 0; i < 16; i++)
1068 1.45 nisimura CSR_WRITE_4(sc, MAAH0 + i*8, 0);
1069 1.45 nisimura crc = mchash[0] = mchash[1] = 0;
1070 1.49 nisimura
1071 1.37 msaitoh ETHER_LOCK(ec);
1072 1.52 nisimura if (ifp->if_flags & IFF_PROMISC) {
1073 1.52 nisimura ec->ec_flags |= ETHER_F_ALLMULTI;
1074 1.53 nisimura ETHER_UNLOCK(ec);
1075 1.52 nisimura /* run promisc. mode */
1076 1.52 nisimura sc->sc_rxc |= RXC_RA;
1077 1.52 nisimura goto update;
1078 1.52 nisimura }
1079 1.52 nisimura ec->ec_flags &= ~ETHER_F_ALLMULTI;
1080 1.36 msaitoh ETHER_FIRST_MULTI(step, ec, enm);
1081 1.45 nisimura i = 0;
1082 1.44 nisimura while (enm != NULL) {
1083 1.6 nisimura if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1084 1.6 nisimura /*
1085 1.6 nisimura * We must listen to a range of multicast addresses.
1086 1.6 nisimura * For now, just accept all multicasts, rather than
1087 1.6 nisimura * trying to set only those filter bits needed to match
1088 1.6 nisimura * the range. (At this time, the only use of address
1089 1.6 nisimura * ranges is for IP multicast routing, for which the
1090 1.6 nisimura * range is big enough to require all bits set.)
1091 1.6 nisimura */
1092 1.52 nisimura ec->ec_flags |= ETHER_F_ALLMULTI;
1093 1.37 msaitoh ETHER_UNLOCK(ec);
1094 1.52 nisimura /* accept all multicast */
1095 1.52 nisimura sc->sc_rxc |= RXC_RM;
1096 1.44 nisimura goto update;
1097 1.1 nisimura }
1098 1.50 nisimura #if KSE_MCASTDEBUG == 1
1099 1.50 nisimura printf("[%d] %s\n", i, ether_sprintf(enm->enm_addrlo));
1100 1.50 nisimura #endif
1101 1.45 nisimura if (i < 16) {
1102 1.45 nisimura /* use 16 additional MAC addr to accept mcast */
1103 1.45 nisimura uint32_t addr;
1104 1.45 nisimura uint8_t *ep = enm->enm_addrlo;
1105 1.45 nisimura addr = (ep[3] << 24) | (ep[2] << 16)
1106 1.45 nisimura | (ep[1] << 8) | ep[0];
1107 1.45 nisimura CSR_WRITE_4(sc, MAAL0 + i*8, addr);
1108 1.50 nisimura addr = (ep[5] << 8) | ep[4];
1109 1.50 nisimura CSR_WRITE_4(sc, MAAH0 + i*8, addr | (1U << 31));
1110 1.45 nisimura } else {
1111 1.45 nisimura /* use hash table when too many */
1112 1.45 nisimura crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1113 1.45 nisimura mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
1114 1.45 nisimura }
1115 1.1 nisimura ETHER_NEXT_MULTI(step, enm);
1116 1.45 nisimura i++;
1117 1.44 nisimura }
1118 1.37 msaitoh ETHER_UNLOCK(ec);
1119 1.44 nisimura
1120 1.50 nisimura if (crc)
1121 1.44 nisimura sc->sc_rxc |= RXC_MHTE;
1122 1.50 nisimura CSR_WRITE_4(sc, MTR0, mchash[0]);
1123 1.50 nisimura CSR_WRITE_4(sc, MTR1, mchash[1]);
1124 1.44 nisimura update:
1125 1.44 nisimura /* With RA or RM, MHTE/MTR0/MTR1 are never consulted. */
1126 1.1 nisimura return;
1127 1.1 nisimura }
1128 1.1 nisimura
1129 1.1 nisimura static int
1130 1.1 nisimura add_rxbuf(struct kse_softc *sc, int idx)
1131 1.1 nisimura {
1132 1.1 nisimura struct kse_rxsoft *rxs = &sc->sc_rxsoft[idx];
1133 1.1 nisimura struct mbuf *m;
1134 1.1 nisimura int error;
1135 1.1 nisimura
1136 1.1 nisimura MGETHDR(m, M_DONTWAIT, MT_DATA);
1137 1.1 nisimura if (m == NULL)
1138 1.1 nisimura return ENOBUFS;
1139 1.1 nisimura
1140 1.1 nisimura MCLGET(m, M_DONTWAIT);
1141 1.1 nisimura if ((m->m_flags & M_EXT) == 0) {
1142 1.1 nisimura m_freem(m);
1143 1.1 nisimura return ENOBUFS;
1144 1.1 nisimura }
1145 1.1 nisimura
1146 1.1 nisimura if (rxs->rxs_mbuf != NULL)
1147 1.1 nisimura bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1148 1.1 nisimura
1149 1.1 nisimura rxs->rxs_mbuf = m;
1150 1.1 nisimura
1151 1.1 nisimura error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1152 1.1 nisimura m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1153 1.1 nisimura if (error) {
1154 1.42 nisimura aprint_error_dev(sc->sc_dev,
1155 1.42 nisimura "can't load rx DMA map %d, error = %d\n", idx, error);
1156 1.1 nisimura panic("kse_add_rxbuf");
1157 1.1 nisimura }
1158 1.1 nisimura
1159 1.1 nisimura bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1160 1.1 nisimura rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1161 1.1 nisimura
1162 1.1 nisimura KSE_INIT_RXDESC(sc, idx);
1163 1.1 nisimura
1164 1.1 nisimura return 0;
1165 1.1 nisimura }
1166 1.1 nisimura
1167 1.1 nisimura static void
1168 1.1 nisimura rxdrain(struct kse_softc *sc)
1169 1.1 nisimura {
1170 1.1 nisimura struct kse_rxsoft *rxs;
1171 1.1 nisimura int i;
1172 1.1 nisimura
1173 1.1 nisimura for (i = 0; i < KSE_NRXDESC; i++) {
1174 1.1 nisimura rxs = &sc->sc_rxsoft[i];
1175 1.1 nisimura if (rxs->rxs_mbuf != NULL) {
1176 1.1 nisimura bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1177 1.1 nisimura m_freem(rxs->rxs_mbuf);
1178 1.1 nisimura rxs->rxs_mbuf = NULL;
1179 1.1 nisimura }
1180 1.1 nisimura }
1181 1.1 nisimura }
1182 1.1 nisimura
1183 1.1 nisimura static int
1184 1.1 nisimura kse_intr(void *arg)
1185 1.1 nisimura {
1186 1.1 nisimura struct kse_softc *sc = arg;
1187 1.43 nisimura struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1188 1.2 tsutsui uint32_t isr;
1189 1.1 nisimura
1190 1.1 nisimura if ((isr = CSR_READ_4(sc, INTST)) == 0)
1191 1.1 nisimura return 0;
1192 1.1 nisimura
1193 1.1 nisimura if (isr & INT_DMRS)
1194 1.1 nisimura rxintr(sc);
1195 1.1 nisimura if (isr & INT_DMTS)
1196 1.1 nisimura txreap(sc);
1197 1.1 nisimura if (isr & INT_DMLCS)
1198 1.1 nisimura lnkchg(sc);
1199 1.1 nisimura if (isr & INT_DMRBUS)
1200 1.42 nisimura aprint_error_dev(sc->sc_dev, "Rx descriptor full\n");
1201 1.1 nisimura
1202 1.1 nisimura CSR_WRITE_4(sc, INTST, isr);
1203 1.43 nisimura
1204 1.43 nisimura if (ifp->if_flags & IFF_RUNNING)
1205 1.43 nisimura if_schedule_deferred_start(ifp);
1206 1.43 nisimura
1207 1.1 nisimura return 1;
1208 1.1 nisimura }
1209 1.1 nisimura
1210 1.1 nisimura static void
1211 1.1 nisimura rxintr(struct kse_softc *sc)
1212 1.1 nisimura {
1213 1.1 nisimura struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1214 1.1 nisimura struct kse_rxsoft *rxs;
1215 1.1 nisimura struct mbuf *m;
1216 1.2 tsutsui uint32_t rxstat;
1217 1.1 nisimura int i, len;
1218 1.1 nisimura
1219 1.1 nisimura for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = KSE_NEXTRX(i)) {
1220 1.1 nisimura rxs = &sc->sc_rxsoft[i];
1221 1.1 nisimura
1222 1.1 nisimura KSE_CDRXSYNC(sc, i,
1223 1.35 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1224 1.1 nisimura
1225 1.1 nisimura rxstat = sc->sc_rxdescs[i].r0;
1226 1.35 msaitoh
1227 1.1 nisimura if (rxstat & R0_OWN) /* desc is left empty */
1228 1.1 nisimura break;
1229 1.1 nisimura
1230 1.35 msaitoh /* R0_FS | R0_LS must have been marked for this desc */
1231 1.1 nisimura
1232 1.1 nisimura if (rxstat & R0_ES) {
1233 1.48 skrll if_statinc(ifp, if_ierrors);
1234 1.1 nisimura #define PRINTERR(bit, str) \
1235 1.1 nisimura if (rxstat & (bit)) \
1236 1.42 nisimura aprint_error_dev(sc->sc_dev, \
1237 1.42 nisimura "%s\n", str)
1238 1.1 nisimura PRINTERR(R0_TL, "frame too long");
1239 1.1 nisimura PRINTERR(R0_RF, "runt frame");
1240 1.1 nisimura PRINTERR(R0_CE, "bad FCS");
1241 1.1 nisimura #undef PRINTERR
1242 1.1 nisimura KSE_INIT_RXDESC(sc, i);
1243 1.1 nisimura continue;
1244 1.1 nisimura }
1245 1.1 nisimura
1246 1.1 nisimura /* HW errata; frame might be too small or too large */
1247 1.1 nisimura
1248 1.1 nisimura bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1249 1.1 nisimura rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1250 1.1 nisimura
1251 1.1 nisimura len = rxstat & R0_FL_MASK;
1252 1.35 msaitoh len -= ETHER_CRC_LEN; /* Trim CRC off */
1253 1.1 nisimura m = rxs->rxs_mbuf;
1254 1.1 nisimura
1255 1.1 nisimura if (add_rxbuf(sc, i) != 0) {
1256 1.48 skrll if_statinc(ifp, if_ierrors);
1257 1.1 nisimura KSE_INIT_RXDESC(sc, i);
1258 1.1 nisimura bus_dmamap_sync(sc->sc_dmat,
1259 1.1 nisimura rxs->rxs_dmamap, 0,
1260 1.1 nisimura rxs->rxs_dmamap->dm_mapsize,
1261 1.1 nisimura BUS_DMASYNC_PREREAD);
1262 1.1 nisimura continue;
1263 1.1 nisimura }
1264 1.1 nisimura
1265 1.30 ozaki m_set_rcvif(m, ifp);
1266 1.1 nisimura m->m_pkthdr.len = m->m_len = len;
1267 1.1 nisimura
1268 1.1 nisimura if (sc->sc_mcsum) {
1269 1.1 nisimura m->m_pkthdr.csum_flags |= sc->sc_mcsum;
1270 1.1 nisimura if (rxstat & R0_IPE)
1271 1.1 nisimura m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1272 1.1 nisimura if (rxstat & (R0_TCPE | R0_UDPE))
1273 1.1 nisimura m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1274 1.1 nisimura }
1275 1.29 ozaki if_percpuq_enqueue(ifp->if_percpuq, m);
1276 1.1 nisimura #ifdef KSEDIAGNOSTIC
1277 1.1 nisimura if (kse_monitor_rxintr > 0) {
1278 1.42 nisimura aprint_error_dev(sc->sc_dev,
1279 1.42 nisimura "m stat %x data %p len %d\n",
1280 1.1 nisimura rxstat, m->m_data, m->m_len);
1281 1.1 nisimura }
1282 1.1 nisimura #endif
1283 1.1 nisimura }
1284 1.1 nisimura sc->sc_rxptr = i;
1285 1.1 nisimura }
1286 1.1 nisimura
1287 1.1 nisimura static void
1288 1.1 nisimura txreap(struct kse_softc *sc)
1289 1.1 nisimura {
1290 1.1 nisimura struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1291 1.1 nisimura struct kse_txsoft *txs;
1292 1.2 tsutsui uint32_t txstat;
1293 1.1 nisimura int i;
1294 1.1 nisimura
1295 1.1 nisimura ifp->if_flags &= ~IFF_OACTIVE;
1296 1.1 nisimura
1297 1.1 nisimura for (i = sc->sc_txsdirty; sc->sc_txsfree != KSE_TXQUEUELEN;
1298 1.1 nisimura i = KSE_NEXTTXS(i), sc->sc_txsfree++) {
1299 1.1 nisimura txs = &sc->sc_txsoft[i];
1300 1.1 nisimura
1301 1.1 nisimura KSE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
1302 1.35 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1303 1.1 nisimura
1304 1.1 nisimura txstat = sc->sc_txdescs[txs->txs_lastdesc].t0;
1305 1.1 nisimura
1306 1.1 nisimura if (txstat & T0_OWN) /* desc is still in use */
1307 1.1 nisimura break;
1308 1.1 nisimura
1309 1.35 msaitoh /* There is no way to tell transmission status per frame */
1310 1.1 nisimura
1311 1.48 skrll if_statinc(ifp, if_opackets);
1312 1.1 nisimura
1313 1.1 nisimura sc->sc_txfree += txs->txs_ndesc;
1314 1.1 nisimura bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1315 1.1 nisimura 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1316 1.1 nisimura bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1317 1.1 nisimura m_freem(txs->txs_mbuf);
1318 1.1 nisimura txs->txs_mbuf = NULL;
1319 1.1 nisimura }
1320 1.1 nisimura sc->sc_txsdirty = i;
1321 1.1 nisimura if (sc->sc_txsfree == KSE_TXQUEUELEN)
1322 1.1 nisimura ifp->if_timer = 0;
1323 1.1 nisimura }
1324 1.1 nisimura
1325 1.1 nisimura static void
1326 1.1 nisimura lnkchg(struct kse_softc *sc)
1327 1.1 nisimura {
1328 1.1 nisimura struct ifmediareq ifmr;
1329 1.1 nisimura
1330 1.42 nisimura #if KSE_LINKDEBUG == 1
1331 1.42 nisimura uint16_t p1sr = CSR_READ_2(sc, P1SR);
1332 1.42 nisimura printf("link %s detected\n", (p1sr & PxSR_LINKUP) ? "up" : "down");
1333 1.1 nisimura #endif
1334 1.42 nisimura kse_ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr);
1335 1.1 nisimura }
1336 1.1 nisimura
1337 1.1 nisimura static int
1338 1.42 nisimura kse_ifmedia_upd(struct ifnet *ifp)
1339 1.1 nisimura {
1340 1.1 nisimura struct kse_softc *sc = ifp->if_softc;
1341 1.42 nisimura struct ifmedia *ifm = &sc->sc_mii.mii_media;
1342 1.39 nisimura uint16_t p1cr4;
1343 1.42 nisimura
1344 1.39 nisimura p1cr4 = 0;
1345 1.39 nisimura if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_AUTO) {
1346 1.39 nisimura p1cr4 |= PxCR_STARTNEG; /* restart AN */
1347 1.39 nisimura p1cr4 |= PxCR_AUTOEN; /* enable AN */
1348 1.39 nisimura p1cr4 |= PxCR_USEFC; /* advertise flow control pause */
1349 1.42 nisimura p1cr4 |= 0xf; /* adv. 100FDX,100HDX,10FDX,10HDX */
1350 1.39 nisimura } else {
1351 1.39 nisimura if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_100_TX)
1352 1.39 nisimura p1cr4 |= PxCR_SPD100;
1353 1.1 nisimura if (ifm->ifm_media & IFM_FDX)
1354 1.39 nisimura p1cr4 |= PxCR_USEFDX;
1355 1.1 nisimura }
1356 1.39 nisimura CSR_WRITE_2(sc, P1CR4, p1cr4);
1357 1.42 nisimura #if KSE_LINKDEBUG == 1
1358 1.39 nisimura printf("P1CR4: %04x\n", p1cr4);
1359 1.39 nisimura #endif
1360 1.1 nisimura return 0;
1361 1.1 nisimura }
1362 1.1 nisimura
1363 1.1 nisimura static void
1364 1.42 nisimura kse_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1365 1.1 nisimura {
1366 1.1 nisimura struct kse_softc *sc = ifp->if_softc;
1367 1.42 nisimura struct mii_data *mii = &sc->sc_mii;
1368 1.1 nisimura
1369 1.42 nisimura mii_pollstat(mii);
1370 1.42 nisimura ifmr->ifm_status = mii->mii_media_status;
1371 1.56 nisimura ifmr->ifm_active = sc->sc_flowflags |
1372 1.56 nisimura (mii->mii_media_active & ~IFM_ETH_FMASK);
1373 1.1 nisimura }
1374 1.1 nisimura
1375 1.1 nisimura static void
1376 1.42 nisimura nopifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1377 1.40 nisimura {
1378 1.40 nisimura struct kse_softc *sc = ifp->if_softc;
1379 1.40 nisimura struct ifmedia *ifm = &sc->sc_media;
1380 1.40 nisimura
1381 1.42 nisimura #if KSE_LINKDEBUG == 2
1382 1.40 nisimura printf("p1sr: %04x, p2sr: %04x\n", CSR_READ_2(sc, P1SR), CSR_READ_2(sc, P2SR));
1383 1.40 nisimura #endif
1384 1.40 nisimura
1385 1.40 nisimura /* 8842 MAC pretends 100FDX all the time */
1386 1.40 nisimura ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1387 1.42 nisimura ifmr->ifm_active = ifm->ifm_cur->ifm_media |
1388 1.42 nisimura IFM_FLOW | IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE;
1389 1.40 nisimura }
1390 1.40 nisimura
1391 1.40 nisimura static void
1392 1.1 nisimura phy_tick(void *arg)
1393 1.1 nisimura {
1394 1.1 nisimura struct kse_softc *sc = arg;
1395 1.42 nisimura struct mii_data *mii = &sc->sc_mii;
1396 1.1 nisimura int s;
1397 1.1 nisimura
1398 1.56 nisimura if (sc->sc_chip == 0x8841) {
1399 1.56 nisimura s = splnet();
1400 1.56 nisimura mii_tick(mii);
1401 1.56 nisimura splx(s);
1402 1.56 nisimura }
1403 1.56 nisimura #ifdef KSE_EVENT_COUNTERS
1404 1.56 nisimura stat_tick(arg);
1405 1.56 nisimura #endif
1406 1.42 nisimura callout_schedule(&sc->sc_tick_ch, hz);
1407 1.42 nisimura }
1408 1.42 nisimura
1409 1.42 nisimura static const uint16_t phy1csr[] = {
1410 1.42 nisimura /* 0 BMCR */ 0x4d0,
1411 1.42 nisimura /* 1 BMSR */ 0x4d2,
1412 1.42 nisimura /* 2 PHYID1 */ 0x4d6, /* 0x0022 - PHY1HR */
1413 1.42 nisimura /* 3 PHYID2 */ 0x4d4, /* 0x1430 - PHY1LR */
1414 1.42 nisimura /* 4 ANAR */ 0x4d8,
1415 1.42 nisimura /* 5 ANLPAR */ 0x4da,
1416 1.42 nisimura };
1417 1.42 nisimura
1418 1.42 nisimura int
1419 1.42 nisimura kse_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
1420 1.42 nisimura {
1421 1.42 nisimura struct kse_softc *sc = device_private(self);
1422 1.42 nisimura
1423 1.42 nisimura if (phy != 1 || reg >= __arraycount(phy1csr) || reg < 0)
1424 1.42 nisimura return EINVAL;
1425 1.42 nisimura *val = CSR_READ_2(sc, phy1csr[reg]);
1426 1.42 nisimura return 0;
1427 1.42 nisimura }
1428 1.42 nisimura
1429 1.42 nisimura int
1430 1.42 nisimura kse_mii_writereg(device_t self, int phy, int reg, uint16_t val)
1431 1.42 nisimura {
1432 1.42 nisimura struct kse_softc *sc = device_private(self);
1433 1.42 nisimura
1434 1.42 nisimura if (phy != 1 || reg >= __arraycount(phy1csr) || reg < 0)
1435 1.42 nisimura return EINVAL;
1436 1.42 nisimura CSR_WRITE_2(sc, phy1csr[reg], val);
1437 1.42 nisimura return 0;
1438 1.42 nisimura }
1439 1.42 nisimura
1440 1.42 nisimura void
1441 1.42 nisimura kse_mii_statchg(struct ifnet *ifp)
1442 1.42 nisimura {
1443 1.42 nisimura struct kse_softc *sc = ifp->if_softc;
1444 1.42 nisimura struct mii_data *mii = &sc->sc_mii;
1445 1.42 nisimura
1446 1.42 nisimura #if KSE_LINKDEBUG == 1
1447 1.42 nisimura /* decode P1SR register value */
1448 1.42 nisimura uint16_t p1sr = CSR_READ_2(sc, P1SR);
1449 1.42 nisimura printf("P1SR %04x, spd%d", p1sr, (p1sr & PxSR_SPD100) ? 100 : 10);
1450 1.42 nisimura if (p1sr & PxSR_FDX)
1451 1.42 nisimura printf(",full-duplex");
1452 1.42 nisimura if (p1sr & PxSR_RXFLOW)
1453 1.42 nisimura printf(",rxpause");
1454 1.42 nisimura if (p1sr & PxSR_TXFLOW)
1455 1.42 nisimura printf(",txpause");
1456 1.42 nisimura printf("\n");
1457 1.42 nisimura /* show resolved mii(4) parameters to compare against above */
1458 1.42 nisimura printf("MII spd%d",
1459 1.42 nisimura (int)(sc->sc_ethercom.ec_if.if_baudrate / IF_Mbps(1)));
1460 1.42 nisimura if (mii->mii_media_active & IFM_FDX)
1461 1.42 nisimura printf(",full-duplex");
1462 1.42 nisimura if (mii->mii_media_active & IFM_FLOW) {
1463 1.42 nisimura printf(",flowcontrol");
1464 1.42 nisimura if (mii->mii_media_active & IFM_ETH_RXPAUSE)
1465 1.42 nisimura printf(",rxpause");
1466 1.42 nisimura if (mii->mii_media_active & IFM_ETH_TXPAUSE)
1467 1.42 nisimura printf(",txpause");
1468 1.42 nisimura }
1469 1.42 nisimura printf("\n");
1470 1.42 nisimura #endif
1471 1.42 nisimura /* Get flow control negotiation result. */
1472 1.42 nisimura if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
1473 1.42 nisimura (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags)
1474 1.42 nisimura sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
1475 1.42 nisimura
1476 1.42 nisimura /* Adjust MAC PAUSE flow control. */
1477 1.42 nisimura if ((mii->mii_media_active & IFM_FDX)
1478 1.42 nisimura && (sc->sc_flowflags & IFM_ETH_TXPAUSE))
1479 1.42 nisimura sc->sc_txc |= TXC_FCE;
1480 1.42 nisimura else
1481 1.42 nisimura sc->sc_txc &= ~TXC_FCE;
1482 1.42 nisimura if ((mii->mii_media_active & IFM_FDX)
1483 1.42 nisimura && (sc->sc_flowflags & IFM_ETH_RXPAUSE))
1484 1.42 nisimura sc->sc_rxc |= RXC_FCE;
1485 1.42 nisimura else
1486 1.42 nisimura sc->sc_rxc &= ~RXC_FCE;
1487 1.42 nisimura CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
1488 1.42 nisimura CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
1489 1.42 nisimura #if KSE_LINKDEBUG == 1
1490 1.42 nisimura printf("%ctxfce, %crxfce\n",
1491 1.42 nisimura (sc->sc_txc & TXC_FCE) ? '+' : '-',
1492 1.42 nisimura (sc->sc_rxc & RXC_FCE) ? '+' : '-');
1493 1.42 nisimura #endif
1494 1.1 nisimura }
1495 1.8 nisimura
1496 1.8 nisimura #ifdef KSE_EVENT_COUNTERS
1497 1.8 nisimura static void
1498 1.16 dsl stat_tick(void *arg)
1499 1.8 nisimura {
1500 1.8 nisimura struct kse_softc *sc = arg;
1501 1.8 nisimura struct ksext *ee = &sc->sc_ext;
1502 1.56 nisimura int nport, p, i, reg, val;
1503 1.8 nisimura
1504 1.8 nisimura nport = (sc->sc_chip == 0x8842) ? 3 : 1;
1505 1.8 nisimura for (p = 0; p < nport; p++) {
1506 1.56 nisimura /* read 34 ev counters by indirect read via IACR */
1507 1.9 nisimura for (i = 0; i < 32; i++) {
1508 1.56 nisimura reg = EVCNTBR + p * 0x20 + i;
1509 1.56 nisimura CSR_WRITE_2(sc, IACR, reg);
1510 1.56 nisimura /* 30-bit counter value are halved in IADR5 & IADR4 */
1511 1.8 nisimura do {
1512 1.8 nisimura val = CSR_READ_2(sc, IADR5) << 16;
1513 1.56 nisimura } while ((val & IADR_LATCH) == 0);
1514 1.56 nisimura if (val & IADR_OVF) {
1515 1.9 nisimura (void)CSR_READ_2(sc, IADR4);
1516 1.8 nisimura val = 0x3fffffff; /* has made overflow */
1517 1.9 nisimura }
1518 1.9 nisimura else {
1519 1.9 nisimura val &= 0x3fff0000; /* 29:16 */
1520 1.9 nisimura val |= CSR_READ_2(sc, IADR4); /* 15:0 */
1521 1.9 nisimura }
1522 1.56 nisimura ee->pev[p][i].ev_count += val; /* ev0 thru 31 */
1523 1.8 nisimura }
1524 1.56 nisimura /* ev32 and ev33 are 16-bit counter */
1525 1.56 nisimura CSR_WRITE_2(sc, IACR, EVCNTBR + 0x100 + p);
1526 1.56 nisimura ee->pev[p][32].ev_count += CSR_READ_2(sc, IADR4); /* ev32 */
1527 1.56 nisimura CSR_WRITE_2(sc, IACR, EVCNTBR + 0x100 + p * 3 + 1);
1528 1.56 nisimura ee->pev[p][33].ev_count += CSR_READ_2(sc, IADR4); /* ev33 */
1529 1.8 nisimura }
1530 1.8 nisimura }
1531 1.8 nisimura
1532 1.8 nisimura static void
1533 1.8 nisimura zerostats(struct kse_softc *sc)
1534 1.8 nisimura {
1535 1.8 nisimura struct ksext *ee = &sc->sc_ext;
1536 1.56 nisimura int nport, p, i, reg, val;
1537 1.8 nisimura
1538 1.35 msaitoh /* Make sure all the HW counters get zero */
1539 1.8 nisimura nport = (sc->sc_chip == 0x8842) ? 3 : 1;
1540 1.8 nisimura for (p = 0; p < nport; p++) {
1541 1.56 nisimura for (i = 0; i < 32; i++) {
1542 1.56 nisimura reg = EVCNTBR + p * 0x20 + i;
1543 1.56 nisimura CSR_WRITE_2(sc, IACR, reg);
1544 1.8 nisimura do {
1545 1.8 nisimura val = CSR_READ_2(sc, IADR5) << 16;
1546 1.56 nisimura } while ((val & IADR_LATCH) == 0);
1547 1.9 nisimura (void)CSR_READ_2(sc, IADR4);
1548 1.8 nisimura ee->pev[p][i].ev_count = 0;
1549 1.8 nisimura }
1550 1.56 nisimura CSR_WRITE_2(sc, IACR, EVCNTBR + 0x100 + p);
1551 1.56 nisimura (void)CSR_READ_2(sc, IADR4);
1552 1.56 nisimura CSR_WRITE_2(sc, IACR, EVCNTBR + 0x100 + p * 3 + 1);
1553 1.56 nisimura (void)CSR_READ_2(sc, IADR4);
1554 1.56 nisimura ee->pev[p][32].ev_count = 0;
1555 1.56 nisimura ee->pev[p][33].ev_count = 0;
1556 1.8 nisimura }
1557 1.8 nisimura }
1558 1.8 nisimura #endif
1559