if_kse.c revision 1.57 1 1.57 thorpej /* $NetBSD: if_kse.c,v 1.57 2021/05/08 00:27:02 thorpej Exp $ */
2 1.1 nisimura
3 1.15 nisimura /*-
4 1.15 nisimura * Copyright (c) 2006 The NetBSD Foundation, Inc.
5 1.15 nisimura * All rights reserved.
6 1.15 nisimura *
7 1.15 nisimura * This code is derived from software contributed to The NetBSD Foundation
8 1.15 nisimura * by Tohru Nishimura.
9 1.1 nisimura *
10 1.1 nisimura * Redistribution and use in source and binary forms, with or without
11 1.1 nisimura * modification, are permitted provided that the following conditions
12 1.1 nisimura * are met:
13 1.1 nisimura * 1. Redistributions of source code must retain the above copyright
14 1.1 nisimura * notice, this list of conditions and the following disclaimer.
15 1.1 nisimura * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 nisimura * notice, this list of conditions and the following disclaimer in the
17 1.1 nisimura * documentation and/or other materials provided with the distribution.
18 1.1 nisimura *
19 1.15 nisimura * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.15 nisimura * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.15 nisimura * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.15 nisimura * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.15 nisimura * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.15 nisimura * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.15 nisimura * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.15 nisimura * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.15 nisimura * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.15 nisimura * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.15 nisimura * POSSIBILITY OF SUCH DAMAGE.
30 1.1 nisimura */
31 1.1 nisimura
32 1.41 nisimura /*
33 1.42 nisimura * Micrel 8841/8842 10/100 PCI ethernet driver
34 1.41 nisimura */
35 1.41 nisimura
36 1.1 nisimura #include <sys/cdefs.h>
37 1.57 thorpej __KERNEL_RCSID(0, "$NetBSD: if_kse.c,v 1.57 2021/05/08 00:27:02 thorpej Exp $");
38 1.1 nisimura
39 1.1 nisimura #include <sys/param.h>
40 1.51 nisimura #include <sys/bus.h>
41 1.51 nisimura #include <sys/intr.h>
42 1.51 nisimura #include <sys/device.h>
43 1.1 nisimura #include <sys/callout.h>
44 1.51 nisimura #include <sys/ioctl.h>
45 1.56 nisimura #include <sys/mbuf.h>
46 1.51 nisimura #include <sys/malloc.h>
47 1.56 nisimura #include <sys/rndsource.h>
48 1.51 nisimura #include <sys/errno.h>
49 1.51 nisimura #include <sys/systm.h>
50 1.1 nisimura #include <sys/kernel.h>
51 1.1 nisimura
52 1.1 nisimura #include <net/if.h>
53 1.1 nisimura #include <net/if_media.h>
54 1.1 nisimura #include <net/if_dl.h>
55 1.1 nisimura #include <net/if_ether.h>
56 1.42 nisimura #include <dev/mii/mii.h>
57 1.42 nisimura #include <dev/mii/miivar.h>
58 1.1 nisimura #include <net/bpf.h>
59 1.1 nisimura
60 1.1 nisimura #include <dev/pci/pcivar.h>
61 1.1 nisimura #include <dev/pci/pcireg.h>
62 1.1 nisimura #include <dev/pci/pcidevs.h>
63 1.1 nisimura
64 1.47 nisimura #define KSE_LINKDEBUG 0
65 1.39 nisimura
66 1.1 nisimura #define CSR_READ_4(sc, off) \
67 1.49 nisimura bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (off))
68 1.1 nisimura #define CSR_WRITE_4(sc, off, val) \
69 1.49 nisimura bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (off), (val))
70 1.1 nisimura #define CSR_READ_2(sc, off) \
71 1.42 nisimura bus_space_read_2((sc)->sc_st, (sc)->sc_sh, (off))
72 1.1 nisimura #define CSR_WRITE_2(sc, off, val) \
73 1.42 nisimura bus_space_write_2((sc)->sc_st, (sc)->sc_sh, (off), (val))
74 1.1 nisimura
75 1.56 nisimura #define MDTXC 0x000 /* DMA transmit control */
76 1.56 nisimura #define MDRXC 0x004 /* DMA receive control */
77 1.56 nisimura #define MDTSC 0x008 /* trigger DMA transmit (SC) */
78 1.56 nisimura #define MDRSC 0x00c /* trigger DMA receive (SC) */
79 1.56 nisimura #define TDLB 0x010 /* transmit descriptor list base */
80 1.56 nisimura #define RDLB 0x014 /* receive descriptor list base */
81 1.56 nisimura #define MTR0 0x020 /* multicast table 31:0 */
82 1.56 nisimura #define MTR1 0x024 /* multicast table 63:32 */
83 1.56 nisimura #define INTEN 0x028 /* interrupt enable */
84 1.56 nisimura #define INTST 0x02c /* interrupt status */
85 1.56 nisimura #define MAAL0 0x080 /* additional MAC address 0 low */
86 1.56 nisimura #define MAAH0 0x084 /* additional MAC address 0 high */
87 1.56 nisimura #define MARL 0x200 /* MAC address low */
88 1.56 nisimura #define MARM 0x202 /* MAC address middle */
89 1.56 nisimura #define MARH 0x204 /* MAC address high */
90 1.56 nisimura #define GRR 0x216 /* global reset */
91 1.56 nisimura #define SIDER 0x400 /* switch ID and function enable */
92 1.56 nisimura #define SGCR3 0x406 /* switch function control 3 */
93 1.56 nisimura #define CR3_USEHDX (1U<<6) /* use half-duplex 8842 host port */
94 1.56 nisimura #define CR3_USEFC (1U<<5) /* use flowcontrol 8842 host port */
95 1.56 nisimura #define IACR 0x4a0 /* indirect access control */
96 1.56 nisimura #define IADR1 0x4a2 /* indirect access data 66:63 */
97 1.56 nisimura #define IADR2 0x4a4 /* indirect access data 47:32 */
98 1.56 nisimura #define IADR3 0x4a6 /* indirect access data 63:48 */
99 1.56 nisimura #define IADR4 0x4a8 /* indirect access data 15:0 */
100 1.56 nisimura #define IADR5 0x4aa /* indirect access data 31:16 */
101 1.56 nisimura #define IADR_LATCH (1U<<30) /* latch completed indication */
102 1.56 nisimura #define IADR_OVF (1U<<31) /* overflow detected */
103 1.56 nisimura #define P1CR4 0x512 /* port 1 control 4 */
104 1.56 nisimura #define P1SR 0x514 /* port 1 status */
105 1.56 nisimura #define P2CR4 0x532 /* port 2 control 4 */
106 1.56 nisimura #define P2SR 0x534 /* port 2 status */
107 1.42 nisimura #define PxCR_STARTNEG (1U<<9) /* restart auto negotiation */
108 1.42 nisimura #define PxCR_AUTOEN (1U<<7) /* auto negotiation enable */
109 1.42 nisimura #define PxCR_SPD100 (1U<<6) /* force speed 100 */
110 1.42 nisimura #define PxCR_USEFDX (1U<<5) /* force full duplex */
111 1.42 nisimura #define PxCR_USEFC (1U<<4) /* advertise pause flow control */
112 1.42 nisimura #define PxSR_ACOMP (1U<<6) /* auto negotiation completed */
113 1.42 nisimura #define PxSR_SPD100 (1U<<10) /* speed is 100Mbps */
114 1.42 nisimura #define PxSR_FDX (1U<<9) /* full duplex */
115 1.42 nisimura #define PxSR_LINKUP (1U<<5) /* link is good */
116 1.42 nisimura #define PxSR_RXFLOW (1U<<12) /* receive flow control active */
117 1.42 nisimura #define PxSR_TXFLOW (1U<<11) /* transmit flow control active */
118 1.56 nisimura #define P1VIDCR 0x504 /* port 1 vtag */
119 1.56 nisimura #define P2VIDCR 0x524 /* port 2 vtag */
120 1.56 nisimura #define P3VIDCR 0x544 /* 8842 host vtag */
121 1.56 nisimura #define EVCNTBR 0x1c00 /* 3 sets of 34 event counters */
122 1.1 nisimura
123 1.1 nisimura #define TXC_BS_MSK 0x3f000000 /* burst size */
124 1.1 nisimura #define TXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */
125 1.1 nisimura #define TXC_UCG (1U<<18) /* generate UDP checksum */
126 1.1 nisimura #define TXC_TCG (1U<<17) /* generate TCP checksum */
127 1.1 nisimura #define TXC_ICG (1U<<16) /* generate IP checksum */
128 1.42 nisimura #define TXC_FCE (1U<<9) /* generate PAUSE to moderate Rx lvl */
129 1.1 nisimura #define TXC_EP (1U<<2) /* enable automatic padding */
130 1.1 nisimura #define TXC_AC (1U<<1) /* add CRC to frame */
131 1.1 nisimura #define TXC_TEN (1) /* enable DMA to run */
132 1.1 nisimura
133 1.1 nisimura #define RXC_BS_MSK 0x3f000000 /* burst size */
134 1.1 nisimura #define RXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */
135 1.6 nisimura #define RXC_IHAE (1U<<19) /* IP header alignment enable */
136 1.5 nisimura #define RXC_UCC (1U<<18) /* run UDP checksum */
137 1.5 nisimura #define RXC_TCC (1U<<17) /* run TDP checksum */
138 1.5 nisimura #define RXC_ICC (1U<<16) /* run IP checksum */
139 1.42 nisimura #define RXC_FCE (1U<<9) /* accept PAUSE to throttle Tx */
140 1.1 nisimura #define RXC_RB (1U<<6) /* receive broadcast frame */
141 1.44 nisimura #define RXC_RM (1U<<5) /* receive all multicast (inc. RB) */
142 1.44 nisimura #define RXC_RU (1U<<4) /* receive 16 additional unicasts */
143 1.1 nisimura #define RXC_RE (1U<<3) /* accept error frame */
144 1.1 nisimura #define RXC_RA (1U<<2) /* receive all frame */
145 1.6 nisimura #define RXC_MHTE (1U<<1) /* use multicast hash table */
146 1.1 nisimura #define RXC_REN (1) /* enable DMA to run */
147 1.1 nisimura
148 1.1 nisimura #define INT_DMLCS (1U<<31) /* link status change */
149 1.1 nisimura #define INT_DMTS (1U<<30) /* sending desc. has posted Tx done */
150 1.1 nisimura #define INT_DMRS (1U<<29) /* frame was received */
151 1.1 nisimura #define INT_DMRBUS (1U<<27) /* Rx descriptor pool is full */
152 1.46 nisimura #define INT_DMxPSS (3U<<25) /* 26:25 DMA Tx/Rx have stopped */
153 1.1 nisimura
154 1.56 nisimura struct tdes {
155 1.56 nisimura uint32_t t0, t1, t2, t3;
156 1.56 nisimura };
157 1.56 nisimura
158 1.56 nisimura struct rdes {
159 1.56 nisimura uint32_t r0, r1, r2, r3;
160 1.56 nisimura };
161 1.56 nisimura
162 1.1 nisimura #define T0_OWN (1U<<31) /* desc is ready to Tx */
163 1.1 nisimura
164 1.1 nisimura #define R0_OWN (1U<<31) /* desc is empty */
165 1.1 nisimura #define R0_FS (1U<<30) /* first segment of frame */
166 1.1 nisimura #define R0_LS (1U<<29) /* last segment of frame */
167 1.1 nisimura #define R0_IPE (1U<<28) /* IP checksum error */
168 1.1 nisimura #define R0_TCPE (1U<<27) /* TCP checksum error */
169 1.1 nisimura #define R0_UDPE (1U<<26) /* UDP checksum error */
170 1.1 nisimura #define R0_ES (1U<<25) /* error summary */
171 1.1 nisimura #define R0_MF (1U<<24) /* multicast frame */
172 1.5 nisimura #define R0_SPN 0x00300000 /* 21:20 switch port 1/2 */
173 1.5 nisimura #define R0_ALIGN 0x00300000 /* 21:20 (KSZ8692P) Rx align amount */
174 1.5 nisimura #define R0_RE (1U<<19) /* MII reported error */
175 1.5 nisimura #define R0_TL (1U<<18) /* frame too long, beyond 1518 */
176 1.1 nisimura #define R0_RF (1U<<17) /* damaged runt frame */
177 1.1 nisimura #define R0_CE (1U<<16) /* CRC error */
178 1.1 nisimura #define R0_FT (1U<<15) /* frame type */
179 1.1 nisimura #define R0_FL_MASK 0x7ff /* frame length 10:0 */
180 1.1 nisimura
181 1.1 nisimura #define T1_IC (1U<<31) /* post interrupt on complete */
182 1.1 nisimura #define T1_FS (1U<<30) /* first segment of frame */
183 1.1 nisimura #define T1_LS (1U<<29) /* last segment of frame */
184 1.1 nisimura #define T1_IPCKG (1U<<28) /* generate IP checksum */
185 1.1 nisimura #define T1_TCPCKG (1U<<27) /* generate TCP checksum */
186 1.1 nisimura #define T1_UDPCKG (1U<<26) /* generate UDP checksum */
187 1.1 nisimura #define T1_TER (1U<<25) /* end of ring */
188 1.5 nisimura #define T1_SPN 0x00300000 /* 21:20 switch port 1/2 */
189 1.1 nisimura #define T1_TBS_MASK 0x7ff /* segment size 10:0 */
190 1.1 nisimura
191 1.1 nisimura #define R1_RER (1U<<25) /* end of ring */
192 1.8 nisimura #define R1_RBS_MASK 0x7fc /* segment size 10:0 */
193 1.1 nisimura
194 1.1 nisimura #define KSE_NTXSEGS 16
195 1.1 nisimura #define KSE_TXQUEUELEN 64
196 1.1 nisimura #define KSE_TXQUEUELEN_MASK (KSE_TXQUEUELEN - 1)
197 1.1 nisimura #define KSE_TXQUEUE_GC (KSE_TXQUEUELEN / 4)
198 1.1 nisimura #define KSE_NTXDESC 256
199 1.1 nisimura #define KSE_NTXDESC_MASK (KSE_NTXDESC - 1)
200 1.1 nisimura #define KSE_NEXTTX(x) (((x) + 1) & KSE_NTXDESC_MASK)
201 1.1 nisimura #define KSE_NEXTTXS(x) (((x) + 1) & KSE_TXQUEUELEN_MASK)
202 1.1 nisimura
203 1.1 nisimura #define KSE_NRXDESC 64
204 1.1 nisimura #define KSE_NRXDESC_MASK (KSE_NRXDESC - 1)
205 1.1 nisimura #define KSE_NEXTRX(x) (((x) + 1) & KSE_NRXDESC_MASK)
206 1.1 nisimura
207 1.1 nisimura struct kse_control_data {
208 1.1 nisimura struct tdes kcd_txdescs[KSE_NTXDESC];
209 1.1 nisimura struct rdes kcd_rxdescs[KSE_NRXDESC];
210 1.1 nisimura };
211 1.1 nisimura #define KSE_CDOFF(x) offsetof(struct kse_control_data, x)
212 1.1 nisimura #define KSE_CDTXOFF(x) KSE_CDOFF(kcd_txdescs[(x)])
213 1.1 nisimura #define KSE_CDRXOFF(x) KSE_CDOFF(kcd_rxdescs[(x)])
214 1.1 nisimura
215 1.1 nisimura struct kse_txsoft {
216 1.1 nisimura struct mbuf *txs_mbuf; /* head of our mbuf chain */
217 1.1 nisimura bus_dmamap_t txs_dmamap; /* our DMA map */
218 1.1 nisimura int txs_firstdesc; /* first descriptor in packet */
219 1.1 nisimura int txs_lastdesc; /* last descriptor in packet */
220 1.1 nisimura int txs_ndesc; /* # of descriptors used */
221 1.1 nisimura };
222 1.1 nisimura
223 1.1 nisimura struct kse_rxsoft {
224 1.1 nisimura struct mbuf *rxs_mbuf; /* head of our mbuf chain */
225 1.1 nisimura bus_dmamap_t rxs_dmamap; /* our DMA map */
226 1.1 nisimura };
227 1.1 nisimura
228 1.1 nisimura struct kse_softc {
229 1.23 chs device_t sc_dev; /* generic device information */
230 1.1 nisimura bus_space_tag_t sc_st; /* bus space tag */
231 1.1 nisimura bus_space_handle_t sc_sh; /* bus space handle */
232 1.42 nisimura bus_size_t sc_memsize; /* csr map size */
233 1.1 nisimura bus_dma_tag_t sc_dmat; /* bus DMA tag */
234 1.42 nisimura pci_chipset_tag_t sc_pc; /* PCI chipset tag */
235 1.1 nisimura struct ethercom sc_ethercom; /* Ethernet common data */
236 1.1 nisimura void *sc_ih; /* interrupt cookie */
237 1.1 nisimura
238 1.42 nisimura struct mii_data sc_mii; /* mii 8841 */
239 1.42 nisimura struct ifmedia sc_media; /* ifmedia 8842 */
240 1.42 nisimura int sc_flowflags; /* 802.3x PAUSE flow control */
241 1.39 nisimura
242 1.42 nisimura callout_t sc_tick_ch; /* MII tick callout */
243 1.1 nisimura
244 1.1 nisimura bus_dmamap_t sc_cddmamap; /* control data DMA map */
245 1.1 nisimura #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
246 1.1 nisimura
247 1.1 nisimura struct kse_control_data *sc_control_data;
248 1.8 nisimura #define sc_txdescs sc_control_data->kcd_txdescs
249 1.8 nisimura #define sc_rxdescs sc_control_data->kcd_rxdescs
250 1.1 nisimura
251 1.1 nisimura struct kse_txsoft sc_txsoft[KSE_TXQUEUELEN];
252 1.1 nisimura struct kse_rxsoft sc_rxsoft[KSE_NRXDESC];
253 1.1 nisimura int sc_txfree; /* number of free Tx descriptors */
254 1.1 nisimura int sc_txnext; /* next ready Tx descriptor */
255 1.1 nisimura int sc_txsfree; /* number of free Tx jobs */
256 1.1 nisimura int sc_txsnext; /* next ready Tx job */
257 1.1 nisimura int sc_txsdirty; /* dirty Tx jobs */
258 1.1 nisimura int sc_rxptr; /* next ready Rx descriptor/descsoft */
259 1.1 nisimura
260 1.2 tsutsui uint32_t sc_txc, sc_rxc;
261 1.2 tsutsui uint32_t sc_t1csum;
262 1.2 tsutsui int sc_mcsum;
263 1.8 nisimura uint32_t sc_inten;
264 1.2 tsutsui uint32_t sc_chip;
265 1.8 nisimura
266 1.56 nisimura krndsource_t rnd_source; /* random source */
267 1.56 nisimura
268 1.8 nisimura #ifdef KSE_EVENT_COUNTERS
269 1.8 nisimura struct ksext {
270 1.8 nisimura char evcntname[3][8];
271 1.8 nisimura struct evcnt pev[3][34];
272 1.8 nisimura } sc_ext; /* switch statistics */
273 1.8 nisimura #endif
274 1.1 nisimura };
275 1.1 nisimura
276 1.1 nisimura #define KSE_CDTXADDR(sc, x) ((sc)->sc_cddma + KSE_CDTXOFF((x)))
277 1.1 nisimura #define KSE_CDRXADDR(sc, x) ((sc)->sc_cddma + KSE_CDRXOFF((x)))
278 1.1 nisimura
279 1.1 nisimura #define KSE_CDTXSYNC(sc, x, n, ops) \
280 1.1 nisimura do { \
281 1.1 nisimura int __x, __n; \
282 1.1 nisimura \
283 1.1 nisimura __x = (x); \
284 1.1 nisimura __n = (n); \
285 1.1 nisimura \
286 1.1 nisimura /* If it will wrap around, sync to the end of the ring. */ \
287 1.1 nisimura if ((__x + __n) > KSE_NTXDESC) { \
288 1.1 nisimura bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
289 1.1 nisimura KSE_CDTXOFF(__x), sizeof(struct tdes) * \
290 1.1 nisimura (KSE_NTXDESC - __x), (ops)); \
291 1.1 nisimura __n -= (KSE_NTXDESC - __x); \
292 1.1 nisimura __x = 0; \
293 1.1 nisimura } \
294 1.1 nisimura \
295 1.1 nisimura /* Now sync whatever is left. */ \
296 1.1 nisimura bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
297 1.1 nisimura KSE_CDTXOFF(__x), sizeof(struct tdes) * __n, (ops)); \
298 1.1 nisimura } while (/*CONSTCOND*/0)
299 1.1 nisimura
300 1.1 nisimura #define KSE_CDRXSYNC(sc, x, ops) \
301 1.1 nisimura do { \
302 1.1 nisimura bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
303 1.1 nisimura KSE_CDRXOFF((x)), sizeof(struct rdes), (ops)); \
304 1.1 nisimura } while (/*CONSTCOND*/0)
305 1.1 nisimura
306 1.1 nisimura #define KSE_INIT_RXDESC(sc, x) \
307 1.1 nisimura do { \
308 1.1 nisimura struct kse_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
309 1.1 nisimura struct rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \
310 1.1 nisimura struct mbuf *__m = __rxs->rxs_mbuf; \
311 1.1 nisimura \
312 1.1 nisimura __m->m_data = __m->m_ext.ext_buf; \
313 1.1 nisimura __rxd->r2 = __rxs->rxs_dmamap->dm_segs[0].ds_addr; \
314 1.1 nisimura __rxd->r1 = R1_RBS_MASK /* __m->m_ext.ext_size */; \
315 1.1 nisimura __rxd->r0 = R0_OWN; \
316 1.35 msaitoh KSE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \
317 1.1 nisimura } while (/*CONSTCOND*/0)
318 1.1 nisimura
319 1.11 nisimura u_int kse_burstsize = 8; /* DMA burst length tuning knob */
320 1.1 nisimura
321 1.1 nisimura #ifdef KSEDIAGNOSTIC
322 1.2 tsutsui u_int kse_monitor_rxintr; /* fragmented UDP csum HW bug hook */
323 1.1 nisimura #endif
324 1.1 nisimura
325 1.18 cegger static int kse_match(device_t, cfdata_t, void *);
326 1.18 cegger static void kse_attach(device_t, device_t, void *);
327 1.1 nisimura
328 1.23 chs CFATTACH_DECL_NEW(kse, sizeof(struct kse_softc),
329 1.1 nisimura kse_match, kse_attach, NULL, NULL);
330 1.1 nisimura
331 1.3 christos static int kse_ioctl(struct ifnet *, u_long, void *);
332 1.1 nisimura static void kse_start(struct ifnet *);
333 1.1 nisimura static void kse_watchdog(struct ifnet *);
334 1.1 nisimura static int kse_init(struct ifnet *);
335 1.1 nisimura static void kse_stop(struct ifnet *, int);
336 1.1 nisimura static void kse_reset(struct kse_softc *);
337 1.53 nisimura static void kse_set_rcvfilt(struct kse_softc *);
338 1.1 nisimura static int add_rxbuf(struct kse_softc *, int);
339 1.1 nisimura static void rxdrain(struct kse_softc *);
340 1.1 nisimura static int kse_intr(void *);
341 1.1 nisimura static void rxintr(struct kse_softc *);
342 1.1 nisimura static void txreap(struct kse_softc *);
343 1.1 nisimura static void lnkchg(struct kse_softc *);
344 1.42 nisimura static int kse_ifmedia_upd(struct ifnet *);
345 1.42 nisimura static void kse_ifmedia_sts(struct ifnet *, struct ifmediareq *);
346 1.42 nisimura static void nopifmedia_sts(struct ifnet *, struct ifmediareq *);
347 1.1 nisimura static void phy_tick(void *);
348 1.42 nisimura int kse_mii_readreg(device_t, int, int, uint16_t *);
349 1.42 nisimura int kse_mii_writereg(device_t, int, int, uint16_t);
350 1.42 nisimura void kse_mii_statchg(struct ifnet *);
351 1.8 nisimura #ifdef KSE_EVENT_COUNTERS
352 1.8 nisimura static void stat_tick(void *);
353 1.8 nisimura static void zerostats(struct kse_softc *);
354 1.8 nisimura #endif
355 1.1 nisimura
356 1.57 thorpej static const struct device_compatible_entry compat_data[] = {
357 1.57 thorpej { .id = PCI_ID_CODE(PCI_VENDOR_MICREL,
358 1.57 thorpej PCI_PRODUCT_MICREL_KSZ8842) },
359 1.57 thorpej { .id = PCI_ID_CODE(PCI_VENDOR_MICREL,
360 1.57 thorpej PCI_PRODUCT_MICREL_KSZ8841) },
361 1.57 thorpej
362 1.57 thorpej PCI_COMPAT_EOL
363 1.57 thorpej };
364 1.57 thorpej
365 1.1 nisimura static int
366 1.18 cegger kse_match(device_t parent, cfdata_t match, void *aux)
367 1.1 nisimura {
368 1.1 nisimura struct pci_attach_args *pa = (struct pci_attach_args *)aux;
369 1.1 nisimura
370 1.57 thorpej return PCI_CLASS(pa->pa_class) == PCI_CLASS_NETWORK &&
371 1.57 thorpej pci_compatible_match(pa, compat_data);
372 1.1 nisimura }
373 1.1 nisimura
374 1.1 nisimura static void
375 1.18 cegger kse_attach(device_t parent, device_t self, void *aux)
376 1.1 nisimura {
377 1.19 cegger struct kse_softc *sc = device_private(self);
378 1.1 nisimura struct pci_attach_args *pa = aux;
379 1.1 nisimura pci_chipset_tag_t pc = pa->pa_pc;
380 1.1 nisimura pci_intr_handle_t ih;
381 1.1 nisimura const char *intrstr;
382 1.42 nisimura struct ifnet *ifp = &sc->sc_ethercom.ec_if;
383 1.42 nisimura struct mii_data * const mii = &sc->sc_mii;
384 1.8 nisimura struct ifmedia *ifm;
385 1.1 nisimura uint8_t enaddr[ETHER_ADDR_LEN];
386 1.1 nisimura bus_dma_segment_t seg;
387 1.25 nisimura int i, error, nseg;
388 1.27 christos char intrbuf[PCI_INTRSTR_LEN];
389 1.1 nisimura
390 1.42 nisimura aprint_normal(": Micrel KSZ%04x Ethernet (rev. 0x%02x)\n",
391 1.42 nisimura PCI_PRODUCT(pa->pa_id), PCI_REVISION(pa->pa_class));
392 1.42 nisimura
393 1.1 nisimura if (pci_mapreg_map(pa, 0x10,
394 1.1 nisimura PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
395 1.42 nisimura 0, &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_memsize) != 0) {
396 1.42 nisimura aprint_error_dev(self, "unable to map device registers\n");
397 1.1 nisimura return;
398 1.1 nisimura }
399 1.1 nisimura
400 1.1 nisimura /* Make sure bus mastering is enabled. */
401 1.1 nisimura pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
402 1.1 nisimura pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
403 1.1 nisimura PCI_COMMAND_MASTER_ENABLE);
404 1.1 nisimura
405 1.42 nisimura /* Power up chip if necessary. */
406 1.42 nisimura if ((error = pci_activate(pc, pa->pa_tag, self, NULL))
407 1.42 nisimura && error != EOPNOTSUPP) {
408 1.42 nisimura aprint_error_dev(self, "cannot activate %d\n", error);
409 1.42 nisimura return;
410 1.42 nisimura }
411 1.42 nisimura
412 1.42 nisimura /* Map and establish our interrupt. */
413 1.42 nisimura if (pci_intr_map(pa, &ih)) {
414 1.42 nisimura aprint_error_dev(self, "unable to map interrupt\n");
415 1.54 nisimura goto fail;
416 1.42 nisimura }
417 1.42 nisimura intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
418 1.42 nisimura sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, kse_intr, sc,
419 1.42 nisimura device_xname(self));
420 1.42 nisimura if (sc->sc_ih == NULL) {
421 1.42 nisimura aprint_error_dev(self, "unable to establish interrupt");
422 1.42 nisimura if (intrstr != NULL)
423 1.42 nisimura aprint_error(" at %s", intrstr);
424 1.42 nisimura aprint_error("\n");
425 1.54 nisimura goto fail;
426 1.1 nisimura }
427 1.42 nisimura aprint_normal_dev(self, "interrupting at %s\n", intrstr);
428 1.1 nisimura
429 1.42 nisimura sc->sc_dev = self;
430 1.42 nisimura sc->sc_dmat = pa->pa_dmat;
431 1.42 nisimura sc->sc_pc = pa->pa_pc;
432 1.1 nisimura sc->sc_chip = PCI_PRODUCT(pa->pa_id);
433 1.1 nisimura
434 1.1 nisimura /*
435 1.1 nisimura * Read the Ethernet address from the EEPROM.
436 1.1 nisimura */
437 1.1 nisimura i = CSR_READ_2(sc, MARL);
438 1.42 nisimura enaddr[5] = i;
439 1.42 nisimura enaddr[4] = i >> 8;
440 1.1 nisimura i = CSR_READ_2(sc, MARM);
441 1.42 nisimura enaddr[3] = i;
442 1.42 nisimura enaddr[2] = i >> 8;
443 1.1 nisimura i = CSR_READ_2(sc, MARH);
444 1.42 nisimura enaddr[1] = i;
445 1.42 nisimura enaddr[0] = i >> 8;
446 1.42 nisimura aprint_normal_dev(self,
447 1.42 nisimura "Ethernet address %s\n", ether_sprintf(enaddr));
448 1.1 nisimura
449 1.1 nisimura /*
450 1.1 nisimura * Enable chip function.
451 1.1 nisimura */
452 1.42 nisimura CSR_WRITE_2(sc, SIDER, 1);
453 1.1 nisimura
454 1.1 nisimura /*
455 1.1 nisimura * Allocate the control data structures, and create and load the
456 1.1 nisimura * DMA map for it.
457 1.1 nisimura */
458 1.1 nisimura error = bus_dmamem_alloc(sc->sc_dmat,
459 1.1 nisimura sizeof(struct kse_control_data), PAGE_SIZE, 0, &seg, 1, &nseg, 0);
460 1.1 nisimura if (error != 0) {
461 1.42 nisimura aprint_error_dev(self,
462 1.35 msaitoh "unable to allocate control data, error = %d\n", error);
463 1.1 nisimura goto fail_0;
464 1.1 nisimura }
465 1.1 nisimura error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
466 1.9 nisimura sizeof(struct kse_control_data), (void **)&sc->sc_control_data,
467 1.1 nisimura BUS_DMA_COHERENT);
468 1.1 nisimura if (error != 0) {
469 1.42 nisimura aprint_error_dev(self,
470 1.35 msaitoh "unable to map control data, error = %d\n", error);
471 1.1 nisimura goto fail_1;
472 1.1 nisimura }
473 1.1 nisimura error = bus_dmamap_create(sc->sc_dmat,
474 1.1 nisimura sizeof(struct kse_control_data), 1,
475 1.1 nisimura sizeof(struct kse_control_data), 0, 0, &sc->sc_cddmamap);
476 1.1 nisimura if (error != 0) {
477 1.42 nisimura aprint_error_dev(self,
478 1.35 msaitoh "unable to create control data DMA map, "
479 1.14 cegger "error = %d\n", error);
480 1.1 nisimura goto fail_2;
481 1.1 nisimura }
482 1.1 nisimura error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
483 1.1 nisimura sc->sc_control_data, sizeof(struct kse_control_data), NULL, 0);
484 1.1 nisimura if (error != 0) {
485 1.42 nisimura aprint_error_dev(self,
486 1.35 msaitoh "unable to load control data DMA map, error = %d\n",
487 1.14 cegger error);
488 1.1 nisimura goto fail_3;
489 1.1 nisimura }
490 1.1 nisimura for (i = 0; i < KSE_TXQUEUELEN; i++) {
491 1.1 nisimura if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
492 1.1 nisimura KSE_NTXSEGS, MCLBYTES, 0, 0,
493 1.1 nisimura &sc->sc_txsoft[i].txs_dmamap)) != 0) {
494 1.42 nisimura aprint_error_dev(self,
495 1.35 msaitoh "unable to create tx DMA map %d, error = %d\n",
496 1.35 msaitoh i, error);
497 1.1 nisimura goto fail_4;
498 1.1 nisimura }
499 1.1 nisimura }
500 1.1 nisimura for (i = 0; i < KSE_NRXDESC; i++) {
501 1.1 nisimura if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
502 1.1 nisimura 1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
503 1.42 nisimura aprint_error_dev(self,
504 1.35 msaitoh "unable to create rx DMA map %d, error = %d\n",
505 1.35 msaitoh i, error);
506 1.1 nisimura goto fail_5;
507 1.1 nisimura }
508 1.1 nisimura sc->sc_rxsoft[i].rxs_mbuf = NULL;
509 1.1 nisimura }
510 1.1 nisimura
511 1.42 nisimura mii->mii_ifp = ifp;
512 1.42 nisimura mii->mii_readreg = kse_mii_readreg;
513 1.42 nisimura mii->mii_writereg = kse_mii_writereg;
514 1.42 nisimura mii->mii_statchg = kse_mii_statchg;
515 1.1 nisimura
516 1.38 msaitoh /* Initialize ifmedia structures. */
517 1.8 nisimura if (sc->sc_chip == 0x8841) {
518 1.42 nisimura /* use port 1 builtin PHY as index 1 device */
519 1.42 nisimura sc->sc_ethercom.ec_mii = mii;
520 1.42 nisimura ifm = &mii->mii_media;
521 1.42 nisimura ifmedia_init(ifm, 0, kse_ifmedia_upd, kse_ifmedia_sts);
522 1.42 nisimura mii_attach(sc->sc_dev, mii, 0xffffffff, 1 /* PHY1 */,
523 1.42 nisimura MII_OFFSET_ANY, MIIF_DOPAUSE);
524 1.42 nisimura if (LIST_FIRST(&mii->mii_phys) == NULL) {
525 1.42 nisimura ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
526 1.42 nisimura ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
527 1.42 nisimura } else
528 1.42 nisimura ifmedia_set(ifm, IFM_ETHER | IFM_AUTO);
529 1.35 msaitoh } else {
530 1.40 nisimura /*
531 1.40 nisimura * pretend 100FDX w/ no alternative media selection.
532 1.42 nisimura * 8842 MAC is tied with a builtin 3 port switch. It can do
533 1.42 nisimura * 4 degree priotised rate control over either of tx/rx
534 1.42 nisimura * direction for any of ports, respectively. Tough, this
535 1.42 nisimura * driver leaves the rate unlimited intending 100Mbps maximum.
536 1.42 nisimura * 2 external ports behave in AN mode and this driver provides
537 1.42 nisimura * no mean to manipulate and see their operational details.
538 1.40 nisimura */
539 1.42 nisimura sc->sc_ethercom.ec_ifmedia = ifm = &sc->sc_media;
540 1.42 nisimura ifmedia_init(ifm, 0, NULL, nopifmedia_sts);
541 1.39 nisimura ifmedia_add(ifm, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
542 1.39 nisimura ifmedia_set(ifm, IFM_ETHER | IFM_100_TX | IFM_FDX);
543 1.42 nisimura
544 1.42 nisimura aprint_normal_dev(self,
545 1.42 nisimura "10baseT, 10baseT-FDX, 100baseTX, 100baseTX-FDX, auto\n");
546 1.8 nisimura }
547 1.42 nisimura ifm->ifm_media = ifm->ifm_cur->ifm_media; /* as if user has requested */
548 1.1 nisimura
549 1.23 chs strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
550 1.1 nisimura ifp->if_softc = sc;
551 1.1 nisimura ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
552 1.1 nisimura ifp->if_ioctl = kse_ioctl;
553 1.1 nisimura ifp->if_start = kse_start;
554 1.1 nisimura ifp->if_watchdog = kse_watchdog;
555 1.1 nisimura ifp->if_init = kse_init;
556 1.1 nisimura ifp->if_stop = kse_stop;
557 1.1 nisimura IFQ_SET_READY(&ifp->if_snd);
558 1.1 nisimura
559 1.1 nisimura /*
560 1.42 nisimura * capable of 802.1Q VLAN-sized frames and hw assisted tagging.
561 1.1 nisimura * can do IPv4, TCPv4, and UDPv4 checksums in hardware.
562 1.1 nisimura */
563 1.42 nisimura sc->sc_ethercom.ec_capabilities = ETHERCAP_VLAN_MTU;
564 1.42 nisimura ifp->if_capabilities =
565 1.1 nisimura IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
566 1.1 nisimura IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
567 1.1 nisimura IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
568 1.1 nisimura
569 1.56 nisimura sc->sc_flowflags = 0;
570 1.56 nisimura
571 1.1 nisimura if_attach(ifp);
572 1.43 nisimura if_deferred_start_init(ifp, NULL);
573 1.1 nisimura ether_ifattach(ifp, enaddr);
574 1.8 nisimura
575 1.54 nisimura callout_init(&sc->sc_tick_ch, 0);
576 1.54 nisimura callout_setfunc(&sc->sc_tick_ch, phy_tick, sc);
577 1.54 nisimura
578 1.56 nisimura rnd_attach_source(&sc->rnd_source, device_xname(self),
579 1.56 nisimura RND_TYPE_NET, RND_FLAG_DEFAULT);
580 1.56 nisimura
581 1.8 nisimura #ifdef KSE_EVENT_COUNTERS
582 1.56 nisimura const char *events[34] = {
583 1.56 nisimura "RxLoPriotyByte",
584 1.56 nisimura "RxHiPriotyByte",
585 1.56 nisimura "RxUndersizePkt",
586 1.56 nisimura "RxFragments",
587 1.56 nisimura "RxOversize",
588 1.56 nisimura "RxJabbers",
589 1.56 nisimura "RxSymbolError",
590 1.56 nisimura "RxCRCError",
591 1.56 nisimura "RxAlignmentError",
592 1.56 nisimura "RxControl8808Pkts",
593 1.56 nisimura "RxPausePkts",
594 1.56 nisimura "RxBroadcast",
595 1.56 nisimura "RxMulticast",
596 1.56 nisimura "RxUnicast",
597 1.56 nisimura "Rx64Octets",
598 1.56 nisimura "Rx65To127Octets",
599 1.56 nisimura "Rx128To255Octets",
600 1.56 nisimura "Rx255To511Octets",
601 1.56 nisimura "Rx512To1023Octets",
602 1.56 nisimura "Rx1024To1522Octets",
603 1.56 nisimura "TxLoPriotyByte",
604 1.56 nisimura "TxHiPriotyByte",
605 1.56 nisimura "TxLateCollision",
606 1.56 nisimura "TxPausePkts",
607 1.56 nisimura "TxBroadcastPkts",
608 1.56 nisimura "TxMulticastPkts",
609 1.56 nisimura "TxUnicastPkts",
610 1.56 nisimura "TxDeferred",
611 1.56 nisimura "TxTotalCollision",
612 1.56 nisimura "TxExcessiveCollision",
613 1.56 nisimura "TxSingleCollision",
614 1.56 nisimura "TxMultipleCollision",
615 1.56 nisimura "TxDropPkts",
616 1.56 nisimura "RxDropPkts",
617 1.56 nisimura };
618 1.56 nisimura struct ksext *ee = &sc->sc_ext;
619 1.25 nisimura int p = (sc->sc_chip == 0x8842) ? 3 : 1;
620 1.8 nisimura for (i = 0; i < p; i++) {
621 1.26 christos snprintf(ee->evcntname[i], sizeof(ee->evcntname[i]),
622 1.26 christos "%s.%d", device_xname(sc->sc_dev), i+1);
623 1.56 nisimura for (int ev = 0; ev < 34; ev++) {
624 1.56 nisimura evcnt_attach_dynamic(&ee->pev[i][ev], EVCNT_TYPE_MISC,
625 1.56 nisimura NULL, ee->evcntname[i], events[ev]);
626 1.56 nisimura }
627 1.8 nisimura }
628 1.8 nisimura #endif
629 1.1 nisimura return;
630 1.1 nisimura
631 1.1 nisimura fail_5:
632 1.1 nisimura for (i = 0; i < KSE_NRXDESC; i++) {
633 1.1 nisimura if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
634 1.1 nisimura bus_dmamap_destroy(sc->sc_dmat,
635 1.1 nisimura sc->sc_rxsoft[i].rxs_dmamap);
636 1.24 christos }
637 1.1 nisimura fail_4:
638 1.1 nisimura for (i = 0; i < KSE_TXQUEUELEN; i++) {
639 1.1 nisimura if (sc->sc_txsoft[i].txs_dmamap != NULL)
640 1.1 nisimura bus_dmamap_destroy(sc->sc_dmat,
641 1.1 nisimura sc->sc_txsoft[i].txs_dmamap);
642 1.1 nisimura }
643 1.1 nisimura bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
644 1.1 nisimura fail_3:
645 1.1 nisimura bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
646 1.1 nisimura fail_2:
647 1.3 christos bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
648 1.1 nisimura sizeof(struct kse_control_data));
649 1.1 nisimura fail_1:
650 1.1 nisimura bus_dmamem_free(sc->sc_dmat, &seg, nseg);
651 1.1 nisimura fail_0:
652 1.54 nisimura pci_intr_disestablish(pc, sc->sc_ih);
653 1.54 nisimura fail:
654 1.54 nisimura bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_memsize);
655 1.1 nisimura return;
656 1.1 nisimura }
657 1.1 nisimura
658 1.1 nisimura static int
659 1.3 christos kse_ioctl(struct ifnet *ifp, u_long cmd, void *data)
660 1.1 nisimura {
661 1.1 nisimura struct kse_softc *sc = ifp->if_softc;
662 1.42 nisimura struct ifreq *ifr = (struct ifreq *)data;
663 1.42 nisimura struct ifmedia *ifm;
664 1.1 nisimura int s, error;
665 1.1 nisimura
666 1.1 nisimura s = splnet();
667 1.1 nisimura
668 1.1 nisimura switch (cmd) {
669 1.42 nisimura case SIOCSIFMEDIA:
670 1.42 nisimura /* Flow control requires full-duplex mode. */
671 1.42 nisimura if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
672 1.42 nisimura (ifr->ifr_media & IFM_FDX) == 0)
673 1.42 nisimura ifr->ifr_media &= ~IFM_ETH_FMASK;
674 1.42 nisimura if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
675 1.42 nisimura if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
676 1.42 nisimura /* We can do both TXPAUSE and RXPAUSE. */
677 1.42 nisimura ifr->ifr_media |=
678 1.42 nisimura IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
679 1.42 nisimura }
680 1.42 nisimura sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
681 1.42 nisimura }
682 1.42 nisimura ifm = (sc->sc_chip == 0x8841)
683 1.42 nisimura ? &sc->sc_mii.mii_media : &sc->sc_media;
684 1.42 nisimura error = ifmedia_ioctl(ifp, ifr, ifm, cmd);
685 1.42 nisimura break;
686 1.1 nisimura default:
687 1.54 nisimura error = ether_ioctl(ifp, cmd, data);
688 1.54 nisimura if (error != ENETRESET)
689 1.12 dyoung break;
690 1.12 dyoung error = 0;
691 1.12 dyoung if (cmd == SIOCSIFCAP)
692 1.12 dyoung error = (*ifp->if_init)(ifp);
693 1.12 dyoung if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
694 1.12 dyoung ;
695 1.12 dyoung else if (ifp->if_flags & IFF_RUNNING) {
696 1.1 nisimura /*
697 1.1 nisimura * Multicast list has changed; set the hardware filter
698 1.1 nisimura * accordingly.
699 1.1 nisimura */
700 1.53 nisimura kse_set_rcvfilt(sc);
701 1.1 nisimura }
702 1.1 nisimura break;
703 1.1 nisimura }
704 1.1 nisimura
705 1.1 nisimura splx(s);
706 1.54 nisimura
707 1.1 nisimura return error;
708 1.1 nisimura }
709 1.1 nisimura
710 1.1 nisimura static int
711 1.1 nisimura kse_init(struct ifnet *ifp)
712 1.1 nisimura {
713 1.1 nisimura struct kse_softc *sc = ifp->if_softc;
714 1.2 tsutsui uint32_t paddr;
715 1.1 nisimura int i, error = 0;
716 1.1 nisimura
717 1.1 nisimura /* cancel pending I/O */
718 1.1 nisimura kse_stop(ifp, 0);
719 1.1 nisimura
720 1.1 nisimura /* reset all registers but PCI configuration */
721 1.1 nisimura kse_reset(sc);
722 1.1 nisimura
723 1.1 nisimura /* craft Tx descriptor ring */
724 1.1 nisimura memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
725 1.1 nisimura for (i = 0, paddr = KSE_CDTXADDR(sc, 1); i < KSE_NTXDESC - 1; i++) {
726 1.1 nisimura sc->sc_txdescs[i].t3 = paddr;
727 1.1 nisimura paddr += sizeof(struct tdes);
728 1.1 nisimura }
729 1.1 nisimura sc->sc_txdescs[KSE_NTXDESC - 1].t3 = KSE_CDTXADDR(sc, 0);
730 1.1 nisimura KSE_CDTXSYNC(sc, 0, KSE_NTXDESC,
731 1.1 nisimura BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
732 1.1 nisimura sc->sc_txfree = KSE_NTXDESC;
733 1.1 nisimura sc->sc_txnext = 0;
734 1.1 nisimura
735 1.1 nisimura for (i = 0; i < KSE_TXQUEUELEN; i++)
736 1.1 nisimura sc->sc_txsoft[i].txs_mbuf = NULL;
737 1.1 nisimura sc->sc_txsfree = KSE_TXQUEUELEN;
738 1.1 nisimura sc->sc_txsnext = 0;
739 1.1 nisimura sc->sc_txsdirty = 0;
740 1.1 nisimura
741 1.1 nisimura /* craft Rx descriptor ring */
742 1.1 nisimura memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs));
743 1.1 nisimura for (i = 0, paddr = KSE_CDRXADDR(sc, 1); i < KSE_NRXDESC - 1; i++) {
744 1.1 nisimura sc->sc_rxdescs[i].r3 = paddr;
745 1.1 nisimura paddr += sizeof(struct rdes);
746 1.1 nisimura }
747 1.1 nisimura sc->sc_rxdescs[KSE_NRXDESC - 1].r3 = KSE_CDRXADDR(sc, 0);
748 1.1 nisimura for (i = 0; i < KSE_NRXDESC; i++) {
749 1.1 nisimura if (sc->sc_rxsoft[i].rxs_mbuf == NULL) {
750 1.1 nisimura if ((error = add_rxbuf(sc, i)) != 0) {
751 1.42 nisimura aprint_error_dev(sc->sc_dev,
752 1.42 nisimura "unable to allocate or map rx "
753 1.1 nisimura "buffer %d, error = %d\n",
754 1.42 nisimura i, error);
755 1.1 nisimura rxdrain(sc);
756 1.1 nisimura goto out;
757 1.1 nisimura }
758 1.1 nisimura }
759 1.1 nisimura else
760 1.1 nisimura KSE_INIT_RXDESC(sc, i);
761 1.1 nisimura }
762 1.1 nisimura sc->sc_rxptr = 0;
763 1.1 nisimura
764 1.1 nisimura /* hand Tx/Rx rings to HW */
765 1.1 nisimura CSR_WRITE_4(sc, TDLB, KSE_CDTXADDR(sc, 0));
766 1.1 nisimura CSR_WRITE_4(sc, RDLB, KSE_CDRXADDR(sc, 0));
767 1.1 nisimura
768 1.42 nisimura sc->sc_txc = TXC_TEN | TXC_EP | TXC_AC;
769 1.44 nisimura sc->sc_rxc = RXC_REN | RXC_RU | RXC_RB;
770 1.1 nisimura sc->sc_t1csum = sc->sc_mcsum = 0;
771 1.1 nisimura if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) {
772 1.5 nisimura sc->sc_rxc |= RXC_ICC;
773 1.1 nisimura sc->sc_mcsum |= M_CSUM_IPv4;
774 1.1 nisimura }
775 1.1 nisimura if (ifp->if_capenable & IFCAP_CSUM_IPv4_Tx) {
776 1.1 nisimura sc->sc_txc |= TXC_ICG;
777 1.1 nisimura sc->sc_t1csum |= T1_IPCKG;
778 1.1 nisimura }
779 1.1 nisimura if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx) {
780 1.5 nisimura sc->sc_rxc |= RXC_TCC;
781 1.1 nisimura sc->sc_mcsum |= M_CSUM_TCPv4;
782 1.1 nisimura }
783 1.1 nisimura if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Tx) {
784 1.1 nisimura sc->sc_txc |= TXC_TCG;
785 1.1 nisimura sc->sc_t1csum |= T1_TCPCKG;
786 1.1 nisimura }
787 1.1 nisimura if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx) {
788 1.5 nisimura sc->sc_rxc |= RXC_UCC;
789 1.1 nisimura sc->sc_mcsum |= M_CSUM_UDPv4;
790 1.1 nisimura }
791 1.1 nisimura if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Tx) {
792 1.1 nisimura sc->sc_txc |= TXC_UCG;
793 1.1 nisimura sc->sc_t1csum |= T1_UDPCKG;
794 1.1 nisimura }
795 1.1 nisimura sc->sc_txc |= (kse_burstsize << TXC_BS_SFT);
796 1.1 nisimura sc->sc_rxc |= (kse_burstsize << RXC_BS_SFT);
797 1.1 nisimura
798 1.42 nisimura if (sc->sc_chip == 0x8842) {
799 1.56 nisimura /* make PAUSE flow control to run */
800 1.42 nisimura sc->sc_txc |= TXC_FCE;
801 1.42 nisimura sc->sc_rxc |= RXC_FCE;
802 1.56 nisimura i = CSR_READ_2(sc, SGCR3);
803 1.56 nisimura CSR_WRITE_2(sc, SGCR3, i | CR3_USEFC);
804 1.42 nisimura }
805 1.42 nisimura
806 1.49 nisimura /* accept multicast frame or run promisc mode */
807 1.53 nisimura kse_set_rcvfilt(sc);
808 1.6 nisimura
809 1.1 nisimura /* set current media */
810 1.39 nisimura if (sc->sc_chip == 0x8841)
811 1.42 nisimura (void)kse_ifmedia_upd(ifp);
812 1.1 nisimura
813 1.1 nisimura /* enable transmitter and receiver */
814 1.1 nisimura CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
815 1.1 nisimura CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
816 1.1 nisimura CSR_WRITE_4(sc, MDRSC, 1);
817 1.1 nisimura
818 1.1 nisimura /* enable interrupts */
819 1.35 msaitoh sc->sc_inten = INT_DMTS | INT_DMRS | INT_DMRBUS;
820 1.8 nisimura if (sc->sc_chip == 0x8841)
821 1.8 nisimura sc->sc_inten |= INT_DMLCS;
822 1.1 nisimura CSR_WRITE_4(sc, INTST, ~0);
823 1.8 nisimura CSR_WRITE_4(sc, INTEN, sc->sc_inten);
824 1.1 nisimura
825 1.1 nisimura ifp->if_flags |= IFF_RUNNING;
826 1.1 nisimura ifp->if_flags &= ~IFF_OACTIVE;
827 1.1 nisimura
828 1.56 nisimura /* start one second timer */
829 1.56 nisimura callout_schedule(&sc->sc_tick_ch, hz);
830 1.56 nisimura
831 1.8 nisimura #ifdef KSE_EVENT_COUNTERS
832 1.8 nisimura zerostats(sc);
833 1.8 nisimura #endif
834 1.1 nisimura
835 1.1 nisimura out:
836 1.1 nisimura if (error) {
837 1.1 nisimura ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
838 1.1 nisimura ifp->if_timer = 0;
839 1.42 nisimura aprint_error_dev(sc->sc_dev, "interface not running\n");
840 1.1 nisimura }
841 1.1 nisimura return error;
842 1.1 nisimura }
843 1.1 nisimura
844 1.1 nisimura static void
845 1.1 nisimura kse_stop(struct ifnet *ifp, int disable)
846 1.1 nisimura {
847 1.1 nisimura struct kse_softc *sc = ifp->if_softc;
848 1.1 nisimura struct kse_txsoft *txs;
849 1.1 nisimura int i;
850 1.1 nisimura
851 1.56 nisimura callout_stop(&sc->sc_tick_ch);
852 1.56 nisimura
853 1.1 nisimura sc->sc_txc &= ~TXC_TEN;
854 1.1 nisimura sc->sc_rxc &= ~RXC_REN;
855 1.1 nisimura CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
856 1.1 nisimura CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
857 1.1 nisimura
858 1.1 nisimura for (i = 0; i < KSE_TXQUEUELEN; i++) {
859 1.1 nisimura txs = &sc->sc_txsoft[i];
860 1.1 nisimura if (txs->txs_mbuf != NULL) {
861 1.1 nisimura bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
862 1.1 nisimura m_freem(txs->txs_mbuf);
863 1.1 nisimura txs->txs_mbuf = NULL;
864 1.1 nisimura }
865 1.1 nisimura }
866 1.1 nisimura
867 1.13 dyoung ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
868 1.13 dyoung ifp->if_timer = 0;
869 1.13 dyoung
870 1.1 nisimura if (disable)
871 1.1 nisimura rxdrain(sc);
872 1.1 nisimura }
873 1.1 nisimura
874 1.1 nisimura static void
875 1.1 nisimura kse_reset(struct kse_softc *sc)
876 1.1 nisimura {
877 1.1 nisimura
878 1.42 nisimura /* software reset */
879 1.1 nisimura CSR_WRITE_2(sc, GRR, 1);
880 1.1 nisimura delay(1000); /* PDF does not mention the delay amount */
881 1.1 nisimura CSR_WRITE_2(sc, GRR, 0);
882 1.1 nisimura
883 1.42 nisimura /* enable switch function */
884 1.42 nisimura CSR_WRITE_2(sc, SIDER, 1);
885 1.1 nisimura }
886 1.1 nisimura
887 1.1 nisimura static void
888 1.1 nisimura kse_watchdog(struct ifnet *ifp)
889 1.1 nisimura {
890 1.1 nisimura struct kse_softc *sc = ifp->if_softc;
891 1.1 nisimura
892 1.24 christos /*
893 1.1 nisimura * Since we're not interrupting every packet, sweep
894 1.1 nisimura * up before we report an error.
895 1.1 nisimura */
896 1.1 nisimura txreap(sc);
897 1.1 nisimura
898 1.1 nisimura if (sc->sc_txfree != KSE_NTXDESC) {
899 1.42 nisimura aprint_error_dev(sc->sc_dev,
900 1.42 nisimura "device timeout (txfree %d txsfree %d txnext %d)\n",
901 1.42 nisimura sc->sc_txfree, sc->sc_txsfree, sc->sc_txnext);
902 1.48 skrll if_statinc(ifp, if_oerrors);
903 1.1 nisimura
904 1.1 nisimura /* Reset the interface. */
905 1.1 nisimura kse_init(ifp);
906 1.1 nisimura }
907 1.1 nisimura else if (ifp->if_flags & IFF_DEBUG)
908 1.42 nisimura aprint_error_dev(sc->sc_dev, "recovered from device timeout\n");
909 1.1 nisimura
910 1.1 nisimura /* Try to get more packets going. */
911 1.1 nisimura kse_start(ifp);
912 1.1 nisimura }
913 1.1 nisimura
914 1.1 nisimura static void
915 1.1 nisimura kse_start(struct ifnet *ifp)
916 1.1 nisimura {
917 1.1 nisimura struct kse_softc *sc = ifp->if_softc;
918 1.8 nisimura struct mbuf *m0, *m;
919 1.1 nisimura struct kse_txsoft *txs;
920 1.1 nisimura bus_dmamap_t dmamap;
921 1.1 nisimura int error, nexttx, lasttx, ofree, seg;
922 1.6 nisimura uint32_t tdes0;
923 1.1 nisimura
924 1.35 msaitoh if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
925 1.1 nisimura return;
926 1.1 nisimura
927 1.35 msaitoh /* Remember the previous number of free descriptors. */
928 1.1 nisimura ofree = sc->sc_txfree;
929 1.1 nisimura
930 1.1 nisimura /*
931 1.1 nisimura * Loop through the send queue, setting up transmit descriptors
932 1.1 nisimura * until we drain the queue, or use up all available transmit
933 1.1 nisimura * descriptors.
934 1.1 nisimura */
935 1.1 nisimura for (;;) {
936 1.1 nisimura IFQ_POLL(&ifp->if_snd, m0);
937 1.1 nisimura if (m0 == NULL)
938 1.1 nisimura break;
939 1.1 nisimura
940 1.1 nisimura if (sc->sc_txsfree < KSE_TXQUEUE_GC) {
941 1.1 nisimura txreap(sc);
942 1.1 nisimura if (sc->sc_txsfree == 0)
943 1.1 nisimura break;
944 1.1 nisimura }
945 1.1 nisimura txs = &sc->sc_txsoft[sc->sc_txsnext];
946 1.1 nisimura dmamap = txs->txs_dmamap;
947 1.1 nisimura
948 1.1 nisimura error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
949 1.35 msaitoh BUS_DMA_WRITE | BUS_DMA_NOWAIT);
950 1.1 nisimura if (error) {
951 1.1 nisimura if (error == EFBIG) {
952 1.42 nisimura aprint_error_dev(sc->sc_dev,
953 1.42 nisimura "Tx packet consumes too many "
954 1.42 nisimura "DMA segments, dropping...\n");
955 1.1 nisimura IFQ_DEQUEUE(&ifp->if_snd, m0);
956 1.1 nisimura m_freem(m0);
957 1.1 nisimura continue;
958 1.1 nisimura }
959 1.1 nisimura /* Short on resources, just stop for now. */
960 1.1 nisimura break;
961 1.1 nisimura }
962 1.1 nisimura
963 1.1 nisimura if (dmamap->dm_nsegs > sc->sc_txfree) {
964 1.1 nisimura /*
965 1.1 nisimura * Not enough free descriptors to transmit this
966 1.1 nisimura * packet. We haven't committed anything yet,
967 1.1 nisimura * so just unload the DMA map, put the packet
968 1.1 nisimura * back on the queue, and punt. Notify the upper
969 1.1 nisimura * layer that there are not more slots left.
970 1.1 nisimura */
971 1.1 nisimura ifp->if_flags |= IFF_OACTIVE;
972 1.1 nisimura bus_dmamap_unload(sc->sc_dmat, dmamap);
973 1.1 nisimura break;
974 1.1 nisimura }
975 1.1 nisimura
976 1.1 nisimura IFQ_DEQUEUE(&ifp->if_snd, m0);
977 1.1 nisimura
978 1.1 nisimura /*
979 1.1 nisimura * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
980 1.1 nisimura */
981 1.1 nisimura
982 1.1 nisimura bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
983 1.1 nisimura BUS_DMASYNC_PREWRITE);
984 1.1 nisimura
985 1.43 nisimura tdes0 = 0; /* to postpone 1st segment T0_OWN write */
986 1.43 nisimura lasttx = -1;
987 1.1 nisimura for (nexttx = sc->sc_txnext, seg = 0;
988 1.1 nisimura seg < dmamap->dm_nsegs;
989 1.1 nisimura seg++, nexttx = KSE_NEXTTX(nexttx)) {
990 1.1 nisimura struct tdes *tdes = &sc->sc_txdescs[nexttx];
991 1.1 nisimura /*
992 1.1 nisimura * If this is the first descriptor we're
993 1.1 nisimura * enqueueing, don't set the OWN bit just
994 1.1 nisimura * yet. That could cause a race condition.
995 1.1 nisimura * We'll do it below.
996 1.1 nisimura */
997 1.1 nisimura tdes->t2 = dmamap->dm_segs[seg].ds_addr;
998 1.1 nisimura tdes->t1 = sc->sc_t1csum
999 1.1 nisimura | (dmamap->dm_segs[seg].ds_len & T1_TBS_MASK);
1000 1.6 nisimura tdes->t0 = tdes0;
1001 1.43 nisimura tdes0 = T0_OWN; /* 2nd and other segments */
1002 1.1 nisimura lasttx = nexttx;
1003 1.1 nisimura }
1004 1.1 nisimura /*
1005 1.1 nisimura * Outgoing NFS mbuf must be unloaded when Tx completed.
1006 1.1 nisimura * Without T1_IC NFS mbuf is left unack'ed for excessive
1007 1.1 nisimura * time and NFS stops to proceed until kse_watchdog()
1008 1.1 nisimura * calls txreap() to reclaim the unack'ed mbuf.
1009 1.5 nisimura * It's painful to traverse every mbuf chain to determine
1010 1.1 nisimura * whether someone is waiting for Tx completion.
1011 1.1 nisimura */
1012 1.8 nisimura m = m0;
1013 1.1 nisimura do {
1014 1.1 nisimura if ((m->m_flags & M_EXT) && m->m_ext.ext_free) {
1015 1.1 nisimura sc->sc_txdescs[lasttx].t1 |= T1_IC;
1016 1.1 nisimura break;
1017 1.1 nisimura }
1018 1.1 nisimura } while ((m = m->m_next) != NULL);
1019 1.1 nisimura
1020 1.43 nisimura /* Write deferred 1st segment T0_OWN at the final stage */
1021 1.1 nisimura sc->sc_txdescs[lasttx].t1 |= T1_LS;
1022 1.1 nisimura sc->sc_txdescs[sc->sc_txnext].t1 |= T1_FS;
1023 1.1 nisimura sc->sc_txdescs[sc->sc_txnext].t0 = T0_OWN;
1024 1.1 nisimura KSE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1025 1.35 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1026 1.1 nisimura
1027 1.35 msaitoh /* Tell DMA start transmit */
1028 1.1 nisimura CSR_WRITE_4(sc, MDTSC, 1);
1029 1.1 nisimura
1030 1.1 nisimura txs->txs_mbuf = m0;
1031 1.1 nisimura txs->txs_firstdesc = sc->sc_txnext;
1032 1.1 nisimura txs->txs_lastdesc = lasttx;
1033 1.1 nisimura txs->txs_ndesc = dmamap->dm_nsegs;
1034 1.1 nisimura
1035 1.1 nisimura sc->sc_txfree -= txs->txs_ndesc;
1036 1.1 nisimura sc->sc_txnext = nexttx;
1037 1.1 nisimura sc->sc_txsfree--;
1038 1.1 nisimura sc->sc_txsnext = KSE_NEXTTXS(sc->sc_txsnext);
1039 1.1 nisimura /*
1040 1.1 nisimura * Pass the packet to any BPF listeners.
1041 1.1 nisimura */
1042 1.32 msaitoh bpf_mtap(ifp, m0, BPF_D_OUT);
1043 1.1 nisimura }
1044 1.1 nisimura
1045 1.1 nisimura if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) {
1046 1.1 nisimura /* No more slots left; notify upper layer. */
1047 1.1 nisimura ifp->if_flags |= IFF_OACTIVE;
1048 1.1 nisimura }
1049 1.1 nisimura if (sc->sc_txfree != ofree) {
1050 1.1 nisimura /* Set a watchdog timer in case the chip flakes out. */
1051 1.1 nisimura ifp->if_timer = 5;
1052 1.1 nisimura }
1053 1.1 nisimura }
1054 1.1 nisimura
1055 1.1 nisimura static void
1056 1.53 nisimura kse_set_rcvfilt(struct kse_softc *sc)
1057 1.1 nisimura {
1058 1.1 nisimura struct ether_multistep step;
1059 1.1 nisimura struct ether_multi *enm;
1060 1.36 msaitoh struct ethercom *ec = &sc->sc_ethercom;
1061 1.36 msaitoh struct ifnet *ifp = &ec->ec_if;
1062 1.44 nisimura uint32_t crc, mchash[2];
1063 1.45 nisimura int i;
1064 1.6 nisimura
1065 1.44 nisimura sc->sc_rxc &= ~(RXC_MHTE | RXC_RM | RXC_RA);
1066 1.1 nisimura
1067 1.49 nisimura /* clear perfect match filter and prepare mcast hash table */
1068 1.45 nisimura for (i = 0; i < 16; i++)
1069 1.45 nisimura CSR_WRITE_4(sc, MAAH0 + i*8, 0);
1070 1.45 nisimura crc = mchash[0] = mchash[1] = 0;
1071 1.49 nisimura
1072 1.37 msaitoh ETHER_LOCK(ec);
1073 1.52 nisimura if (ifp->if_flags & IFF_PROMISC) {
1074 1.52 nisimura ec->ec_flags |= ETHER_F_ALLMULTI;
1075 1.53 nisimura ETHER_UNLOCK(ec);
1076 1.52 nisimura /* run promisc. mode */
1077 1.52 nisimura sc->sc_rxc |= RXC_RA;
1078 1.52 nisimura goto update;
1079 1.52 nisimura }
1080 1.52 nisimura ec->ec_flags &= ~ETHER_F_ALLMULTI;
1081 1.36 msaitoh ETHER_FIRST_MULTI(step, ec, enm);
1082 1.45 nisimura i = 0;
1083 1.44 nisimura while (enm != NULL) {
1084 1.6 nisimura if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1085 1.6 nisimura /*
1086 1.6 nisimura * We must listen to a range of multicast addresses.
1087 1.6 nisimura * For now, just accept all multicasts, rather than
1088 1.6 nisimura * trying to set only those filter bits needed to match
1089 1.6 nisimura * the range. (At this time, the only use of address
1090 1.6 nisimura * ranges is for IP multicast routing, for which the
1091 1.6 nisimura * range is big enough to require all bits set.)
1092 1.6 nisimura */
1093 1.52 nisimura ec->ec_flags |= ETHER_F_ALLMULTI;
1094 1.37 msaitoh ETHER_UNLOCK(ec);
1095 1.52 nisimura /* accept all multicast */
1096 1.52 nisimura sc->sc_rxc |= RXC_RM;
1097 1.44 nisimura goto update;
1098 1.1 nisimura }
1099 1.50 nisimura #if KSE_MCASTDEBUG == 1
1100 1.50 nisimura printf("[%d] %s\n", i, ether_sprintf(enm->enm_addrlo));
1101 1.50 nisimura #endif
1102 1.45 nisimura if (i < 16) {
1103 1.45 nisimura /* use 16 additional MAC addr to accept mcast */
1104 1.45 nisimura uint32_t addr;
1105 1.45 nisimura uint8_t *ep = enm->enm_addrlo;
1106 1.45 nisimura addr = (ep[3] << 24) | (ep[2] << 16)
1107 1.45 nisimura | (ep[1] << 8) | ep[0];
1108 1.45 nisimura CSR_WRITE_4(sc, MAAL0 + i*8, addr);
1109 1.50 nisimura addr = (ep[5] << 8) | ep[4];
1110 1.50 nisimura CSR_WRITE_4(sc, MAAH0 + i*8, addr | (1U << 31));
1111 1.45 nisimura } else {
1112 1.45 nisimura /* use hash table when too many */
1113 1.45 nisimura crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1114 1.45 nisimura mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
1115 1.45 nisimura }
1116 1.1 nisimura ETHER_NEXT_MULTI(step, enm);
1117 1.45 nisimura i++;
1118 1.44 nisimura }
1119 1.37 msaitoh ETHER_UNLOCK(ec);
1120 1.44 nisimura
1121 1.50 nisimura if (crc)
1122 1.44 nisimura sc->sc_rxc |= RXC_MHTE;
1123 1.50 nisimura CSR_WRITE_4(sc, MTR0, mchash[0]);
1124 1.50 nisimura CSR_WRITE_4(sc, MTR1, mchash[1]);
1125 1.44 nisimura update:
1126 1.44 nisimura /* With RA or RM, MHTE/MTR0/MTR1 are never consulted. */
1127 1.1 nisimura return;
1128 1.1 nisimura }
1129 1.1 nisimura
1130 1.1 nisimura static int
1131 1.1 nisimura add_rxbuf(struct kse_softc *sc, int idx)
1132 1.1 nisimura {
1133 1.1 nisimura struct kse_rxsoft *rxs = &sc->sc_rxsoft[idx];
1134 1.1 nisimura struct mbuf *m;
1135 1.1 nisimura int error;
1136 1.1 nisimura
1137 1.1 nisimura MGETHDR(m, M_DONTWAIT, MT_DATA);
1138 1.1 nisimura if (m == NULL)
1139 1.1 nisimura return ENOBUFS;
1140 1.1 nisimura
1141 1.1 nisimura MCLGET(m, M_DONTWAIT);
1142 1.1 nisimura if ((m->m_flags & M_EXT) == 0) {
1143 1.1 nisimura m_freem(m);
1144 1.1 nisimura return ENOBUFS;
1145 1.1 nisimura }
1146 1.1 nisimura
1147 1.1 nisimura if (rxs->rxs_mbuf != NULL)
1148 1.1 nisimura bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1149 1.1 nisimura
1150 1.1 nisimura rxs->rxs_mbuf = m;
1151 1.1 nisimura
1152 1.1 nisimura error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1153 1.1 nisimura m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1154 1.1 nisimura if (error) {
1155 1.42 nisimura aprint_error_dev(sc->sc_dev,
1156 1.42 nisimura "can't load rx DMA map %d, error = %d\n", idx, error);
1157 1.1 nisimura panic("kse_add_rxbuf");
1158 1.1 nisimura }
1159 1.1 nisimura
1160 1.1 nisimura bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1161 1.1 nisimura rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1162 1.1 nisimura
1163 1.1 nisimura KSE_INIT_RXDESC(sc, idx);
1164 1.1 nisimura
1165 1.1 nisimura return 0;
1166 1.1 nisimura }
1167 1.1 nisimura
1168 1.1 nisimura static void
1169 1.1 nisimura rxdrain(struct kse_softc *sc)
1170 1.1 nisimura {
1171 1.1 nisimura struct kse_rxsoft *rxs;
1172 1.1 nisimura int i;
1173 1.1 nisimura
1174 1.1 nisimura for (i = 0; i < KSE_NRXDESC; i++) {
1175 1.1 nisimura rxs = &sc->sc_rxsoft[i];
1176 1.1 nisimura if (rxs->rxs_mbuf != NULL) {
1177 1.1 nisimura bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1178 1.1 nisimura m_freem(rxs->rxs_mbuf);
1179 1.1 nisimura rxs->rxs_mbuf = NULL;
1180 1.1 nisimura }
1181 1.1 nisimura }
1182 1.1 nisimura }
1183 1.1 nisimura
1184 1.1 nisimura static int
1185 1.1 nisimura kse_intr(void *arg)
1186 1.1 nisimura {
1187 1.1 nisimura struct kse_softc *sc = arg;
1188 1.43 nisimura struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1189 1.2 tsutsui uint32_t isr;
1190 1.1 nisimura
1191 1.1 nisimura if ((isr = CSR_READ_4(sc, INTST)) == 0)
1192 1.1 nisimura return 0;
1193 1.1 nisimura
1194 1.1 nisimura if (isr & INT_DMRS)
1195 1.1 nisimura rxintr(sc);
1196 1.1 nisimura if (isr & INT_DMTS)
1197 1.1 nisimura txreap(sc);
1198 1.1 nisimura if (isr & INT_DMLCS)
1199 1.1 nisimura lnkchg(sc);
1200 1.1 nisimura if (isr & INT_DMRBUS)
1201 1.42 nisimura aprint_error_dev(sc->sc_dev, "Rx descriptor full\n");
1202 1.1 nisimura
1203 1.1 nisimura CSR_WRITE_4(sc, INTST, isr);
1204 1.43 nisimura
1205 1.43 nisimura if (ifp->if_flags & IFF_RUNNING)
1206 1.43 nisimura if_schedule_deferred_start(ifp);
1207 1.43 nisimura
1208 1.1 nisimura return 1;
1209 1.1 nisimura }
1210 1.1 nisimura
1211 1.1 nisimura static void
1212 1.1 nisimura rxintr(struct kse_softc *sc)
1213 1.1 nisimura {
1214 1.1 nisimura struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1215 1.1 nisimura struct kse_rxsoft *rxs;
1216 1.1 nisimura struct mbuf *m;
1217 1.2 tsutsui uint32_t rxstat;
1218 1.1 nisimura int i, len;
1219 1.1 nisimura
1220 1.1 nisimura for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = KSE_NEXTRX(i)) {
1221 1.1 nisimura rxs = &sc->sc_rxsoft[i];
1222 1.1 nisimura
1223 1.1 nisimura KSE_CDRXSYNC(sc, i,
1224 1.35 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1225 1.1 nisimura
1226 1.1 nisimura rxstat = sc->sc_rxdescs[i].r0;
1227 1.35 msaitoh
1228 1.1 nisimura if (rxstat & R0_OWN) /* desc is left empty */
1229 1.1 nisimura break;
1230 1.1 nisimura
1231 1.35 msaitoh /* R0_FS | R0_LS must have been marked for this desc */
1232 1.1 nisimura
1233 1.1 nisimura if (rxstat & R0_ES) {
1234 1.48 skrll if_statinc(ifp, if_ierrors);
1235 1.1 nisimura #define PRINTERR(bit, str) \
1236 1.1 nisimura if (rxstat & (bit)) \
1237 1.42 nisimura aprint_error_dev(sc->sc_dev, \
1238 1.42 nisimura "%s\n", str)
1239 1.1 nisimura PRINTERR(R0_TL, "frame too long");
1240 1.1 nisimura PRINTERR(R0_RF, "runt frame");
1241 1.1 nisimura PRINTERR(R0_CE, "bad FCS");
1242 1.1 nisimura #undef PRINTERR
1243 1.1 nisimura KSE_INIT_RXDESC(sc, i);
1244 1.1 nisimura continue;
1245 1.1 nisimura }
1246 1.1 nisimura
1247 1.1 nisimura /* HW errata; frame might be too small or too large */
1248 1.1 nisimura
1249 1.1 nisimura bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1250 1.1 nisimura rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1251 1.1 nisimura
1252 1.1 nisimura len = rxstat & R0_FL_MASK;
1253 1.35 msaitoh len -= ETHER_CRC_LEN; /* Trim CRC off */
1254 1.1 nisimura m = rxs->rxs_mbuf;
1255 1.1 nisimura
1256 1.1 nisimura if (add_rxbuf(sc, i) != 0) {
1257 1.48 skrll if_statinc(ifp, if_ierrors);
1258 1.1 nisimura KSE_INIT_RXDESC(sc, i);
1259 1.1 nisimura bus_dmamap_sync(sc->sc_dmat,
1260 1.1 nisimura rxs->rxs_dmamap, 0,
1261 1.1 nisimura rxs->rxs_dmamap->dm_mapsize,
1262 1.1 nisimura BUS_DMASYNC_PREREAD);
1263 1.1 nisimura continue;
1264 1.1 nisimura }
1265 1.1 nisimura
1266 1.30 ozaki m_set_rcvif(m, ifp);
1267 1.1 nisimura m->m_pkthdr.len = m->m_len = len;
1268 1.1 nisimura
1269 1.1 nisimura if (sc->sc_mcsum) {
1270 1.1 nisimura m->m_pkthdr.csum_flags |= sc->sc_mcsum;
1271 1.1 nisimura if (rxstat & R0_IPE)
1272 1.1 nisimura m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1273 1.1 nisimura if (rxstat & (R0_TCPE | R0_UDPE))
1274 1.1 nisimura m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1275 1.1 nisimura }
1276 1.29 ozaki if_percpuq_enqueue(ifp->if_percpuq, m);
1277 1.1 nisimura #ifdef KSEDIAGNOSTIC
1278 1.1 nisimura if (kse_monitor_rxintr > 0) {
1279 1.42 nisimura aprint_error_dev(sc->sc_dev,
1280 1.42 nisimura "m stat %x data %p len %d\n",
1281 1.1 nisimura rxstat, m->m_data, m->m_len);
1282 1.1 nisimura }
1283 1.1 nisimura #endif
1284 1.1 nisimura }
1285 1.1 nisimura sc->sc_rxptr = i;
1286 1.1 nisimura }
1287 1.1 nisimura
1288 1.1 nisimura static void
1289 1.1 nisimura txreap(struct kse_softc *sc)
1290 1.1 nisimura {
1291 1.1 nisimura struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1292 1.1 nisimura struct kse_txsoft *txs;
1293 1.2 tsutsui uint32_t txstat;
1294 1.1 nisimura int i;
1295 1.1 nisimura
1296 1.1 nisimura ifp->if_flags &= ~IFF_OACTIVE;
1297 1.1 nisimura
1298 1.1 nisimura for (i = sc->sc_txsdirty; sc->sc_txsfree != KSE_TXQUEUELEN;
1299 1.1 nisimura i = KSE_NEXTTXS(i), sc->sc_txsfree++) {
1300 1.1 nisimura txs = &sc->sc_txsoft[i];
1301 1.1 nisimura
1302 1.1 nisimura KSE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
1303 1.35 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1304 1.1 nisimura
1305 1.1 nisimura txstat = sc->sc_txdescs[txs->txs_lastdesc].t0;
1306 1.1 nisimura
1307 1.1 nisimura if (txstat & T0_OWN) /* desc is still in use */
1308 1.1 nisimura break;
1309 1.1 nisimura
1310 1.35 msaitoh /* There is no way to tell transmission status per frame */
1311 1.1 nisimura
1312 1.48 skrll if_statinc(ifp, if_opackets);
1313 1.1 nisimura
1314 1.1 nisimura sc->sc_txfree += txs->txs_ndesc;
1315 1.1 nisimura bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1316 1.1 nisimura 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1317 1.1 nisimura bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1318 1.1 nisimura m_freem(txs->txs_mbuf);
1319 1.1 nisimura txs->txs_mbuf = NULL;
1320 1.1 nisimura }
1321 1.1 nisimura sc->sc_txsdirty = i;
1322 1.1 nisimura if (sc->sc_txsfree == KSE_TXQUEUELEN)
1323 1.1 nisimura ifp->if_timer = 0;
1324 1.1 nisimura }
1325 1.1 nisimura
1326 1.1 nisimura static void
1327 1.1 nisimura lnkchg(struct kse_softc *sc)
1328 1.1 nisimura {
1329 1.1 nisimura struct ifmediareq ifmr;
1330 1.1 nisimura
1331 1.42 nisimura #if KSE_LINKDEBUG == 1
1332 1.42 nisimura uint16_t p1sr = CSR_READ_2(sc, P1SR);
1333 1.42 nisimura printf("link %s detected\n", (p1sr & PxSR_LINKUP) ? "up" : "down");
1334 1.1 nisimura #endif
1335 1.42 nisimura kse_ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr);
1336 1.1 nisimura }
1337 1.1 nisimura
1338 1.1 nisimura static int
1339 1.42 nisimura kse_ifmedia_upd(struct ifnet *ifp)
1340 1.1 nisimura {
1341 1.1 nisimura struct kse_softc *sc = ifp->if_softc;
1342 1.42 nisimura struct ifmedia *ifm = &sc->sc_mii.mii_media;
1343 1.39 nisimura uint16_t p1cr4;
1344 1.42 nisimura
1345 1.39 nisimura p1cr4 = 0;
1346 1.39 nisimura if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_AUTO) {
1347 1.39 nisimura p1cr4 |= PxCR_STARTNEG; /* restart AN */
1348 1.39 nisimura p1cr4 |= PxCR_AUTOEN; /* enable AN */
1349 1.39 nisimura p1cr4 |= PxCR_USEFC; /* advertise flow control pause */
1350 1.42 nisimura p1cr4 |= 0xf; /* adv. 100FDX,100HDX,10FDX,10HDX */
1351 1.39 nisimura } else {
1352 1.39 nisimura if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_100_TX)
1353 1.39 nisimura p1cr4 |= PxCR_SPD100;
1354 1.1 nisimura if (ifm->ifm_media & IFM_FDX)
1355 1.39 nisimura p1cr4 |= PxCR_USEFDX;
1356 1.1 nisimura }
1357 1.39 nisimura CSR_WRITE_2(sc, P1CR4, p1cr4);
1358 1.42 nisimura #if KSE_LINKDEBUG == 1
1359 1.39 nisimura printf("P1CR4: %04x\n", p1cr4);
1360 1.39 nisimura #endif
1361 1.1 nisimura return 0;
1362 1.1 nisimura }
1363 1.1 nisimura
1364 1.1 nisimura static void
1365 1.42 nisimura kse_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1366 1.1 nisimura {
1367 1.1 nisimura struct kse_softc *sc = ifp->if_softc;
1368 1.42 nisimura struct mii_data *mii = &sc->sc_mii;
1369 1.1 nisimura
1370 1.42 nisimura mii_pollstat(mii);
1371 1.42 nisimura ifmr->ifm_status = mii->mii_media_status;
1372 1.56 nisimura ifmr->ifm_active = sc->sc_flowflags |
1373 1.56 nisimura (mii->mii_media_active & ~IFM_ETH_FMASK);
1374 1.1 nisimura }
1375 1.1 nisimura
1376 1.1 nisimura static void
1377 1.42 nisimura nopifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1378 1.40 nisimura {
1379 1.40 nisimura struct kse_softc *sc = ifp->if_softc;
1380 1.40 nisimura struct ifmedia *ifm = &sc->sc_media;
1381 1.40 nisimura
1382 1.42 nisimura #if KSE_LINKDEBUG == 2
1383 1.40 nisimura printf("p1sr: %04x, p2sr: %04x\n", CSR_READ_2(sc, P1SR), CSR_READ_2(sc, P2SR));
1384 1.40 nisimura #endif
1385 1.40 nisimura
1386 1.40 nisimura /* 8842 MAC pretends 100FDX all the time */
1387 1.40 nisimura ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1388 1.42 nisimura ifmr->ifm_active = ifm->ifm_cur->ifm_media |
1389 1.42 nisimura IFM_FLOW | IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE;
1390 1.40 nisimura }
1391 1.40 nisimura
1392 1.40 nisimura static void
1393 1.1 nisimura phy_tick(void *arg)
1394 1.1 nisimura {
1395 1.1 nisimura struct kse_softc *sc = arg;
1396 1.42 nisimura struct mii_data *mii = &sc->sc_mii;
1397 1.1 nisimura int s;
1398 1.1 nisimura
1399 1.56 nisimura if (sc->sc_chip == 0x8841) {
1400 1.56 nisimura s = splnet();
1401 1.56 nisimura mii_tick(mii);
1402 1.56 nisimura splx(s);
1403 1.56 nisimura }
1404 1.56 nisimura #ifdef KSE_EVENT_COUNTERS
1405 1.56 nisimura stat_tick(arg);
1406 1.56 nisimura #endif
1407 1.42 nisimura callout_schedule(&sc->sc_tick_ch, hz);
1408 1.42 nisimura }
1409 1.42 nisimura
1410 1.42 nisimura static const uint16_t phy1csr[] = {
1411 1.42 nisimura /* 0 BMCR */ 0x4d0,
1412 1.42 nisimura /* 1 BMSR */ 0x4d2,
1413 1.42 nisimura /* 2 PHYID1 */ 0x4d6, /* 0x0022 - PHY1HR */
1414 1.42 nisimura /* 3 PHYID2 */ 0x4d4, /* 0x1430 - PHY1LR */
1415 1.42 nisimura /* 4 ANAR */ 0x4d8,
1416 1.42 nisimura /* 5 ANLPAR */ 0x4da,
1417 1.42 nisimura };
1418 1.42 nisimura
1419 1.42 nisimura int
1420 1.42 nisimura kse_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
1421 1.42 nisimura {
1422 1.42 nisimura struct kse_softc *sc = device_private(self);
1423 1.42 nisimura
1424 1.42 nisimura if (phy != 1 || reg >= __arraycount(phy1csr) || reg < 0)
1425 1.42 nisimura return EINVAL;
1426 1.42 nisimura *val = CSR_READ_2(sc, phy1csr[reg]);
1427 1.42 nisimura return 0;
1428 1.42 nisimura }
1429 1.42 nisimura
1430 1.42 nisimura int
1431 1.42 nisimura kse_mii_writereg(device_t self, int phy, int reg, uint16_t val)
1432 1.42 nisimura {
1433 1.42 nisimura struct kse_softc *sc = device_private(self);
1434 1.42 nisimura
1435 1.42 nisimura if (phy != 1 || reg >= __arraycount(phy1csr) || reg < 0)
1436 1.42 nisimura return EINVAL;
1437 1.42 nisimura CSR_WRITE_2(sc, phy1csr[reg], val);
1438 1.42 nisimura return 0;
1439 1.42 nisimura }
1440 1.42 nisimura
1441 1.42 nisimura void
1442 1.42 nisimura kse_mii_statchg(struct ifnet *ifp)
1443 1.42 nisimura {
1444 1.42 nisimura struct kse_softc *sc = ifp->if_softc;
1445 1.42 nisimura struct mii_data *mii = &sc->sc_mii;
1446 1.42 nisimura
1447 1.42 nisimura #if KSE_LINKDEBUG == 1
1448 1.42 nisimura /* decode P1SR register value */
1449 1.42 nisimura uint16_t p1sr = CSR_READ_2(sc, P1SR);
1450 1.42 nisimura printf("P1SR %04x, spd%d", p1sr, (p1sr & PxSR_SPD100) ? 100 : 10);
1451 1.42 nisimura if (p1sr & PxSR_FDX)
1452 1.42 nisimura printf(",full-duplex");
1453 1.42 nisimura if (p1sr & PxSR_RXFLOW)
1454 1.42 nisimura printf(",rxpause");
1455 1.42 nisimura if (p1sr & PxSR_TXFLOW)
1456 1.42 nisimura printf(",txpause");
1457 1.42 nisimura printf("\n");
1458 1.42 nisimura /* show resolved mii(4) parameters to compare against above */
1459 1.42 nisimura printf("MII spd%d",
1460 1.42 nisimura (int)(sc->sc_ethercom.ec_if.if_baudrate / IF_Mbps(1)));
1461 1.42 nisimura if (mii->mii_media_active & IFM_FDX)
1462 1.42 nisimura printf(",full-duplex");
1463 1.42 nisimura if (mii->mii_media_active & IFM_FLOW) {
1464 1.42 nisimura printf(",flowcontrol");
1465 1.42 nisimura if (mii->mii_media_active & IFM_ETH_RXPAUSE)
1466 1.42 nisimura printf(",rxpause");
1467 1.42 nisimura if (mii->mii_media_active & IFM_ETH_TXPAUSE)
1468 1.42 nisimura printf(",txpause");
1469 1.42 nisimura }
1470 1.42 nisimura printf("\n");
1471 1.42 nisimura #endif
1472 1.42 nisimura /* Get flow control negotiation result. */
1473 1.42 nisimura if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
1474 1.42 nisimura (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags)
1475 1.42 nisimura sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
1476 1.42 nisimura
1477 1.42 nisimura /* Adjust MAC PAUSE flow control. */
1478 1.42 nisimura if ((mii->mii_media_active & IFM_FDX)
1479 1.42 nisimura && (sc->sc_flowflags & IFM_ETH_TXPAUSE))
1480 1.42 nisimura sc->sc_txc |= TXC_FCE;
1481 1.42 nisimura else
1482 1.42 nisimura sc->sc_txc &= ~TXC_FCE;
1483 1.42 nisimura if ((mii->mii_media_active & IFM_FDX)
1484 1.42 nisimura && (sc->sc_flowflags & IFM_ETH_RXPAUSE))
1485 1.42 nisimura sc->sc_rxc |= RXC_FCE;
1486 1.42 nisimura else
1487 1.42 nisimura sc->sc_rxc &= ~RXC_FCE;
1488 1.42 nisimura CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
1489 1.42 nisimura CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
1490 1.42 nisimura #if KSE_LINKDEBUG == 1
1491 1.42 nisimura printf("%ctxfce, %crxfce\n",
1492 1.42 nisimura (sc->sc_txc & TXC_FCE) ? '+' : '-',
1493 1.42 nisimura (sc->sc_rxc & RXC_FCE) ? '+' : '-');
1494 1.42 nisimura #endif
1495 1.1 nisimura }
1496 1.8 nisimura
1497 1.8 nisimura #ifdef KSE_EVENT_COUNTERS
1498 1.8 nisimura static void
1499 1.16 dsl stat_tick(void *arg)
1500 1.8 nisimura {
1501 1.8 nisimura struct kse_softc *sc = arg;
1502 1.8 nisimura struct ksext *ee = &sc->sc_ext;
1503 1.56 nisimura int nport, p, i, reg, val;
1504 1.8 nisimura
1505 1.8 nisimura nport = (sc->sc_chip == 0x8842) ? 3 : 1;
1506 1.8 nisimura for (p = 0; p < nport; p++) {
1507 1.56 nisimura /* read 34 ev counters by indirect read via IACR */
1508 1.9 nisimura for (i = 0; i < 32; i++) {
1509 1.56 nisimura reg = EVCNTBR + p * 0x20 + i;
1510 1.56 nisimura CSR_WRITE_2(sc, IACR, reg);
1511 1.56 nisimura /* 30-bit counter value are halved in IADR5 & IADR4 */
1512 1.8 nisimura do {
1513 1.8 nisimura val = CSR_READ_2(sc, IADR5) << 16;
1514 1.56 nisimura } while ((val & IADR_LATCH) == 0);
1515 1.56 nisimura if (val & IADR_OVF) {
1516 1.9 nisimura (void)CSR_READ_2(sc, IADR4);
1517 1.8 nisimura val = 0x3fffffff; /* has made overflow */
1518 1.9 nisimura }
1519 1.9 nisimura else {
1520 1.9 nisimura val &= 0x3fff0000; /* 29:16 */
1521 1.9 nisimura val |= CSR_READ_2(sc, IADR4); /* 15:0 */
1522 1.9 nisimura }
1523 1.56 nisimura ee->pev[p][i].ev_count += val; /* ev0 thru 31 */
1524 1.8 nisimura }
1525 1.56 nisimura /* ev32 and ev33 are 16-bit counter */
1526 1.56 nisimura CSR_WRITE_2(sc, IACR, EVCNTBR + 0x100 + p);
1527 1.56 nisimura ee->pev[p][32].ev_count += CSR_READ_2(sc, IADR4); /* ev32 */
1528 1.56 nisimura CSR_WRITE_2(sc, IACR, EVCNTBR + 0x100 + p * 3 + 1);
1529 1.56 nisimura ee->pev[p][33].ev_count += CSR_READ_2(sc, IADR4); /* ev33 */
1530 1.8 nisimura }
1531 1.8 nisimura }
1532 1.8 nisimura
1533 1.8 nisimura static void
1534 1.8 nisimura zerostats(struct kse_softc *sc)
1535 1.8 nisimura {
1536 1.8 nisimura struct ksext *ee = &sc->sc_ext;
1537 1.56 nisimura int nport, p, i, reg, val;
1538 1.8 nisimura
1539 1.35 msaitoh /* Make sure all the HW counters get zero */
1540 1.8 nisimura nport = (sc->sc_chip == 0x8842) ? 3 : 1;
1541 1.8 nisimura for (p = 0; p < nport; p++) {
1542 1.56 nisimura for (i = 0; i < 32; i++) {
1543 1.56 nisimura reg = EVCNTBR + p * 0x20 + i;
1544 1.56 nisimura CSR_WRITE_2(sc, IACR, reg);
1545 1.8 nisimura do {
1546 1.8 nisimura val = CSR_READ_2(sc, IADR5) << 16;
1547 1.56 nisimura } while ((val & IADR_LATCH) == 0);
1548 1.9 nisimura (void)CSR_READ_2(sc, IADR4);
1549 1.8 nisimura ee->pev[p][i].ev_count = 0;
1550 1.8 nisimura }
1551 1.56 nisimura CSR_WRITE_2(sc, IACR, EVCNTBR + 0x100 + p);
1552 1.56 nisimura (void)CSR_READ_2(sc, IADR4);
1553 1.56 nisimura CSR_WRITE_2(sc, IACR, EVCNTBR + 0x100 + p * 3 + 1);
1554 1.56 nisimura (void)CSR_READ_2(sc, IADR4);
1555 1.56 nisimura ee->pev[p][32].ev_count = 0;
1556 1.56 nisimura ee->pev[p][33].ev_count = 0;
1557 1.8 nisimura }
1558 1.8 nisimura }
1559 1.8 nisimura #endif
1560