if_kse.c revision 1.56 1 1.56 nisimura /* $NetBSD: if_kse.c,v 1.56 2020/09/20 23:48:09 nisimura Exp $ */
2 1.1 nisimura
3 1.15 nisimura /*-
4 1.15 nisimura * Copyright (c) 2006 The NetBSD Foundation, Inc.
5 1.15 nisimura * All rights reserved.
6 1.15 nisimura *
7 1.15 nisimura * This code is derived from software contributed to The NetBSD Foundation
8 1.15 nisimura * by Tohru Nishimura.
9 1.1 nisimura *
10 1.1 nisimura * Redistribution and use in source and binary forms, with or without
11 1.1 nisimura * modification, are permitted provided that the following conditions
12 1.1 nisimura * are met:
13 1.1 nisimura * 1. Redistributions of source code must retain the above copyright
14 1.1 nisimura * notice, this list of conditions and the following disclaimer.
15 1.1 nisimura * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 nisimura * notice, this list of conditions and the following disclaimer in the
17 1.1 nisimura * documentation and/or other materials provided with the distribution.
18 1.1 nisimura *
19 1.15 nisimura * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.15 nisimura * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.15 nisimura * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.15 nisimura * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.15 nisimura * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.15 nisimura * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.15 nisimura * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.15 nisimura * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.15 nisimura * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.15 nisimura * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.15 nisimura * POSSIBILITY OF SUCH DAMAGE.
30 1.1 nisimura */
31 1.1 nisimura
32 1.41 nisimura /*
33 1.42 nisimura * Micrel 8841/8842 10/100 PCI ethernet driver
34 1.41 nisimura */
35 1.41 nisimura
36 1.1 nisimura #include <sys/cdefs.h>
37 1.56 nisimura __KERNEL_RCSID(0, "$NetBSD: if_kse.c,v 1.56 2020/09/20 23:48:09 nisimura Exp $");
38 1.1 nisimura
39 1.1 nisimura #include <sys/param.h>
40 1.51 nisimura #include <sys/bus.h>
41 1.51 nisimura #include <sys/intr.h>
42 1.51 nisimura #include <sys/device.h>
43 1.1 nisimura #include <sys/callout.h>
44 1.51 nisimura #include <sys/ioctl.h>
45 1.56 nisimura #include <sys/mbuf.h>
46 1.51 nisimura #include <sys/malloc.h>
47 1.56 nisimura #include <sys/rndsource.h>
48 1.51 nisimura #include <sys/errno.h>
49 1.51 nisimura #include <sys/systm.h>
50 1.1 nisimura #include <sys/kernel.h>
51 1.1 nisimura
52 1.1 nisimura #include <net/if.h>
53 1.1 nisimura #include <net/if_media.h>
54 1.1 nisimura #include <net/if_dl.h>
55 1.1 nisimura #include <net/if_ether.h>
56 1.42 nisimura #include <dev/mii/mii.h>
57 1.42 nisimura #include <dev/mii/miivar.h>
58 1.1 nisimura #include <net/bpf.h>
59 1.1 nisimura
60 1.1 nisimura #include <dev/pci/pcivar.h>
61 1.1 nisimura #include <dev/pci/pcireg.h>
62 1.1 nisimura #include <dev/pci/pcidevs.h>
63 1.1 nisimura
64 1.47 nisimura #define KSE_LINKDEBUG 0
65 1.39 nisimura
66 1.1 nisimura #define CSR_READ_4(sc, off) \
67 1.49 nisimura bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (off))
68 1.1 nisimura #define CSR_WRITE_4(sc, off, val) \
69 1.49 nisimura bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (off), (val))
70 1.1 nisimura #define CSR_READ_2(sc, off) \
71 1.42 nisimura bus_space_read_2((sc)->sc_st, (sc)->sc_sh, (off))
72 1.1 nisimura #define CSR_WRITE_2(sc, off, val) \
73 1.42 nisimura bus_space_write_2((sc)->sc_st, (sc)->sc_sh, (off), (val))
74 1.1 nisimura
75 1.56 nisimura #define MDTXC 0x000 /* DMA transmit control */
76 1.56 nisimura #define MDRXC 0x004 /* DMA receive control */
77 1.56 nisimura #define MDTSC 0x008 /* trigger DMA transmit (SC) */
78 1.56 nisimura #define MDRSC 0x00c /* trigger DMA receive (SC) */
79 1.56 nisimura #define TDLB 0x010 /* transmit descriptor list base */
80 1.56 nisimura #define RDLB 0x014 /* receive descriptor list base */
81 1.56 nisimura #define MTR0 0x020 /* multicast table 31:0 */
82 1.56 nisimura #define MTR1 0x024 /* multicast table 63:32 */
83 1.56 nisimura #define INTEN 0x028 /* interrupt enable */
84 1.56 nisimura #define INTST 0x02c /* interrupt status */
85 1.56 nisimura #define MAAL0 0x080 /* additional MAC address 0 low */
86 1.56 nisimura #define MAAH0 0x084 /* additional MAC address 0 high */
87 1.56 nisimura #define MARL 0x200 /* MAC address low */
88 1.56 nisimura #define MARM 0x202 /* MAC address middle */
89 1.56 nisimura #define MARH 0x204 /* MAC address high */
90 1.56 nisimura #define GRR 0x216 /* global reset */
91 1.56 nisimura #define SIDER 0x400 /* switch ID and function enable */
92 1.56 nisimura #define SGCR3 0x406 /* switch function control 3 */
93 1.56 nisimura #define CR3_USEHDX (1U<<6) /* use half-duplex 8842 host port */
94 1.56 nisimura #define CR3_USEFC (1U<<5) /* use flowcontrol 8842 host port */
95 1.56 nisimura #define IACR 0x4a0 /* indirect access control */
96 1.56 nisimura #define IADR1 0x4a2 /* indirect access data 66:63 */
97 1.56 nisimura #define IADR2 0x4a4 /* indirect access data 47:32 */
98 1.56 nisimura #define IADR3 0x4a6 /* indirect access data 63:48 */
99 1.56 nisimura #define IADR4 0x4a8 /* indirect access data 15:0 */
100 1.56 nisimura #define IADR5 0x4aa /* indirect access data 31:16 */
101 1.56 nisimura #define IADR_LATCH (1U<<30) /* latch completed indication */
102 1.56 nisimura #define IADR_OVF (1U<<31) /* overflow detected */
103 1.56 nisimura #define P1CR4 0x512 /* port 1 control 4 */
104 1.56 nisimura #define P1SR 0x514 /* port 1 status */
105 1.56 nisimura #define P2CR4 0x532 /* port 2 control 4 */
106 1.56 nisimura #define P2SR 0x534 /* port 2 status */
107 1.42 nisimura #define PxCR_STARTNEG (1U<<9) /* restart auto negotiation */
108 1.42 nisimura #define PxCR_AUTOEN (1U<<7) /* auto negotiation enable */
109 1.42 nisimura #define PxCR_SPD100 (1U<<6) /* force speed 100 */
110 1.42 nisimura #define PxCR_USEFDX (1U<<5) /* force full duplex */
111 1.42 nisimura #define PxCR_USEFC (1U<<4) /* advertise pause flow control */
112 1.42 nisimura #define PxSR_ACOMP (1U<<6) /* auto negotiation completed */
113 1.42 nisimura #define PxSR_SPD100 (1U<<10) /* speed is 100Mbps */
114 1.42 nisimura #define PxSR_FDX (1U<<9) /* full duplex */
115 1.42 nisimura #define PxSR_LINKUP (1U<<5) /* link is good */
116 1.42 nisimura #define PxSR_RXFLOW (1U<<12) /* receive flow control active */
117 1.42 nisimura #define PxSR_TXFLOW (1U<<11) /* transmit flow control active */
118 1.56 nisimura #define P1VIDCR 0x504 /* port 1 vtag */
119 1.56 nisimura #define P2VIDCR 0x524 /* port 2 vtag */
120 1.56 nisimura #define P3VIDCR 0x544 /* 8842 host vtag */
121 1.56 nisimura #define EVCNTBR 0x1c00 /* 3 sets of 34 event counters */
122 1.1 nisimura
123 1.1 nisimura #define TXC_BS_MSK 0x3f000000 /* burst size */
124 1.1 nisimura #define TXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */
125 1.1 nisimura #define TXC_UCG (1U<<18) /* generate UDP checksum */
126 1.1 nisimura #define TXC_TCG (1U<<17) /* generate TCP checksum */
127 1.1 nisimura #define TXC_ICG (1U<<16) /* generate IP checksum */
128 1.42 nisimura #define TXC_FCE (1U<<9) /* generate PAUSE to moderate Rx lvl */
129 1.1 nisimura #define TXC_EP (1U<<2) /* enable automatic padding */
130 1.1 nisimura #define TXC_AC (1U<<1) /* add CRC to frame */
131 1.1 nisimura #define TXC_TEN (1) /* enable DMA to run */
132 1.1 nisimura
133 1.1 nisimura #define RXC_BS_MSK 0x3f000000 /* burst size */
134 1.1 nisimura #define RXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */
135 1.6 nisimura #define RXC_IHAE (1U<<19) /* IP header alignment enable */
136 1.5 nisimura #define RXC_UCC (1U<<18) /* run UDP checksum */
137 1.5 nisimura #define RXC_TCC (1U<<17) /* run TDP checksum */
138 1.5 nisimura #define RXC_ICC (1U<<16) /* run IP checksum */
139 1.42 nisimura #define RXC_FCE (1U<<9) /* accept PAUSE to throttle Tx */
140 1.1 nisimura #define RXC_RB (1U<<6) /* receive broadcast frame */
141 1.44 nisimura #define RXC_RM (1U<<5) /* receive all multicast (inc. RB) */
142 1.44 nisimura #define RXC_RU (1U<<4) /* receive 16 additional unicasts */
143 1.1 nisimura #define RXC_RE (1U<<3) /* accept error frame */
144 1.1 nisimura #define RXC_RA (1U<<2) /* receive all frame */
145 1.6 nisimura #define RXC_MHTE (1U<<1) /* use multicast hash table */
146 1.1 nisimura #define RXC_REN (1) /* enable DMA to run */
147 1.1 nisimura
148 1.1 nisimura #define INT_DMLCS (1U<<31) /* link status change */
149 1.1 nisimura #define INT_DMTS (1U<<30) /* sending desc. has posted Tx done */
150 1.1 nisimura #define INT_DMRS (1U<<29) /* frame was received */
151 1.1 nisimura #define INT_DMRBUS (1U<<27) /* Rx descriptor pool is full */
152 1.46 nisimura #define INT_DMxPSS (3U<<25) /* 26:25 DMA Tx/Rx have stopped */
153 1.1 nisimura
154 1.56 nisimura struct tdes {
155 1.56 nisimura uint32_t t0, t1, t2, t3;
156 1.56 nisimura };
157 1.56 nisimura
158 1.56 nisimura struct rdes {
159 1.56 nisimura uint32_t r0, r1, r2, r3;
160 1.56 nisimura };
161 1.56 nisimura
162 1.1 nisimura #define T0_OWN (1U<<31) /* desc is ready to Tx */
163 1.1 nisimura
164 1.1 nisimura #define R0_OWN (1U<<31) /* desc is empty */
165 1.1 nisimura #define R0_FS (1U<<30) /* first segment of frame */
166 1.1 nisimura #define R0_LS (1U<<29) /* last segment of frame */
167 1.1 nisimura #define R0_IPE (1U<<28) /* IP checksum error */
168 1.1 nisimura #define R0_TCPE (1U<<27) /* TCP checksum error */
169 1.1 nisimura #define R0_UDPE (1U<<26) /* UDP checksum error */
170 1.1 nisimura #define R0_ES (1U<<25) /* error summary */
171 1.1 nisimura #define R0_MF (1U<<24) /* multicast frame */
172 1.5 nisimura #define R0_SPN 0x00300000 /* 21:20 switch port 1/2 */
173 1.5 nisimura #define R0_ALIGN 0x00300000 /* 21:20 (KSZ8692P) Rx align amount */
174 1.5 nisimura #define R0_RE (1U<<19) /* MII reported error */
175 1.5 nisimura #define R0_TL (1U<<18) /* frame too long, beyond 1518 */
176 1.1 nisimura #define R0_RF (1U<<17) /* damaged runt frame */
177 1.1 nisimura #define R0_CE (1U<<16) /* CRC error */
178 1.1 nisimura #define R0_FT (1U<<15) /* frame type */
179 1.1 nisimura #define R0_FL_MASK 0x7ff /* frame length 10:0 */
180 1.1 nisimura
181 1.1 nisimura #define T1_IC (1U<<31) /* post interrupt on complete */
182 1.1 nisimura #define T1_FS (1U<<30) /* first segment of frame */
183 1.1 nisimura #define T1_LS (1U<<29) /* last segment of frame */
184 1.1 nisimura #define T1_IPCKG (1U<<28) /* generate IP checksum */
185 1.1 nisimura #define T1_TCPCKG (1U<<27) /* generate TCP checksum */
186 1.1 nisimura #define T1_UDPCKG (1U<<26) /* generate UDP checksum */
187 1.1 nisimura #define T1_TER (1U<<25) /* end of ring */
188 1.5 nisimura #define T1_SPN 0x00300000 /* 21:20 switch port 1/2 */
189 1.1 nisimura #define T1_TBS_MASK 0x7ff /* segment size 10:0 */
190 1.1 nisimura
191 1.1 nisimura #define R1_RER (1U<<25) /* end of ring */
192 1.8 nisimura #define R1_RBS_MASK 0x7fc /* segment size 10:0 */
193 1.1 nisimura
194 1.1 nisimura #define KSE_NTXSEGS 16
195 1.1 nisimura #define KSE_TXQUEUELEN 64
196 1.1 nisimura #define KSE_TXQUEUELEN_MASK (KSE_TXQUEUELEN - 1)
197 1.1 nisimura #define KSE_TXQUEUE_GC (KSE_TXQUEUELEN / 4)
198 1.1 nisimura #define KSE_NTXDESC 256
199 1.1 nisimura #define KSE_NTXDESC_MASK (KSE_NTXDESC - 1)
200 1.1 nisimura #define KSE_NEXTTX(x) (((x) + 1) & KSE_NTXDESC_MASK)
201 1.1 nisimura #define KSE_NEXTTXS(x) (((x) + 1) & KSE_TXQUEUELEN_MASK)
202 1.1 nisimura
203 1.1 nisimura #define KSE_NRXDESC 64
204 1.1 nisimura #define KSE_NRXDESC_MASK (KSE_NRXDESC - 1)
205 1.1 nisimura #define KSE_NEXTRX(x) (((x) + 1) & KSE_NRXDESC_MASK)
206 1.1 nisimura
207 1.1 nisimura struct kse_control_data {
208 1.1 nisimura struct tdes kcd_txdescs[KSE_NTXDESC];
209 1.1 nisimura struct rdes kcd_rxdescs[KSE_NRXDESC];
210 1.1 nisimura };
211 1.1 nisimura #define KSE_CDOFF(x) offsetof(struct kse_control_data, x)
212 1.1 nisimura #define KSE_CDTXOFF(x) KSE_CDOFF(kcd_txdescs[(x)])
213 1.1 nisimura #define KSE_CDRXOFF(x) KSE_CDOFF(kcd_rxdescs[(x)])
214 1.1 nisimura
215 1.1 nisimura struct kse_txsoft {
216 1.1 nisimura struct mbuf *txs_mbuf; /* head of our mbuf chain */
217 1.1 nisimura bus_dmamap_t txs_dmamap; /* our DMA map */
218 1.1 nisimura int txs_firstdesc; /* first descriptor in packet */
219 1.1 nisimura int txs_lastdesc; /* last descriptor in packet */
220 1.1 nisimura int txs_ndesc; /* # of descriptors used */
221 1.1 nisimura };
222 1.1 nisimura
223 1.1 nisimura struct kse_rxsoft {
224 1.1 nisimura struct mbuf *rxs_mbuf; /* head of our mbuf chain */
225 1.1 nisimura bus_dmamap_t rxs_dmamap; /* our DMA map */
226 1.1 nisimura };
227 1.1 nisimura
228 1.1 nisimura struct kse_softc {
229 1.23 chs device_t sc_dev; /* generic device information */
230 1.1 nisimura bus_space_tag_t sc_st; /* bus space tag */
231 1.1 nisimura bus_space_handle_t sc_sh; /* bus space handle */
232 1.42 nisimura bus_size_t sc_memsize; /* csr map size */
233 1.1 nisimura bus_dma_tag_t sc_dmat; /* bus DMA tag */
234 1.42 nisimura pci_chipset_tag_t sc_pc; /* PCI chipset tag */
235 1.1 nisimura struct ethercom sc_ethercom; /* Ethernet common data */
236 1.1 nisimura void *sc_ih; /* interrupt cookie */
237 1.1 nisimura
238 1.42 nisimura struct mii_data sc_mii; /* mii 8841 */
239 1.42 nisimura struct ifmedia sc_media; /* ifmedia 8842 */
240 1.42 nisimura int sc_flowflags; /* 802.3x PAUSE flow control */
241 1.39 nisimura
242 1.42 nisimura callout_t sc_tick_ch; /* MII tick callout */
243 1.1 nisimura
244 1.1 nisimura bus_dmamap_t sc_cddmamap; /* control data DMA map */
245 1.1 nisimura #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
246 1.1 nisimura
247 1.1 nisimura struct kse_control_data *sc_control_data;
248 1.8 nisimura #define sc_txdescs sc_control_data->kcd_txdescs
249 1.8 nisimura #define sc_rxdescs sc_control_data->kcd_rxdescs
250 1.1 nisimura
251 1.1 nisimura struct kse_txsoft sc_txsoft[KSE_TXQUEUELEN];
252 1.1 nisimura struct kse_rxsoft sc_rxsoft[KSE_NRXDESC];
253 1.1 nisimura int sc_txfree; /* number of free Tx descriptors */
254 1.1 nisimura int sc_txnext; /* next ready Tx descriptor */
255 1.1 nisimura int sc_txsfree; /* number of free Tx jobs */
256 1.1 nisimura int sc_txsnext; /* next ready Tx job */
257 1.1 nisimura int sc_txsdirty; /* dirty Tx jobs */
258 1.1 nisimura int sc_rxptr; /* next ready Rx descriptor/descsoft */
259 1.1 nisimura
260 1.2 tsutsui uint32_t sc_txc, sc_rxc;
261 1.2 tsutsui uint32_t sc_t1csum;
262 1.2 tsutsui int sc_mcsum;
263 1.8 nisimura uint32_t sc_inten;
264 1.2 tsutsui uint32_t sc_chip;
265 1.8 nisimura
266 1.56 nisimura krndsource_t rnd_source; /* random source */
267 1.56 nisimura
268 1.8 nisimura #ifdef KSE_EVENT_COUNTERS
269 1.8 nisimura struct ksext {
270 1.8 nisimura char evcntname[3][8];
271 1.8 nisimura struct evcnt pev[3][34];
272 1.8 nisimura } sc_ext; /* switch statistics */
273 1.8 nisimura #endif
274 1.1 nisimura };
275 1.1 nisimura
276 1.1 nisimura #define KSE_CDTXADDR(sc, x) ((sc)->sc_cddma + KSE_CDTXOFF((x)))
277 1.1 nisimura #define KSE_CDRXADDR(sc, x) ((sc)->sc_cddma + KSE_CDRXOFF((x)))
278 1.1 nisimura
279 1.1 nisimura #define KSE_CDTXSYNC(sc, x, n, ops) \
280 1.1 nisimura do { \
281 1.1 nisimura int __x, __n; \
282 1.1 nisimura \
283 1.1 nisimura __x = (x); \
284 1.1 nisimura __n = (n); \
285 1.1 nisimura \
286 1.1 nisimura /* If it will wrap around, sync to the end of the ring. */ \
287 1.1 nisimura if ((__x + __n) > KSE_NTXDESC) { \
288 1.1 nisimura bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
289 1.1 nisimura KSE_CDTXOFF(__x), sizeof(struct tdes) * \
290 1.1 nisimura (KSE_NTXDESC - __x), (ops)); \
291 1.1 nisimura __n -= (KSE_NTXDESC - __x); \
292 1.1 nisimura __x = 0; \
293 1.1 nisimura } \
294 1.1 nisimura \
295 1.1 nisimura /* Now sync whatever is left. */ \
296 1.1 nisimura bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
297 1.1 nisimura KSE_CDTXOFF(__x), sizeof(struct tdes) * __n, (ops)); \
298 1.1 nisimura } while (/*CONSTCOND*/0)
299 1.1 nisimura
300 1.1 nisimura #define KSE_CDRXSYNC(sc, x, ops) \
301 1.1 nisimura do { \
302 1.1 nisimura bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
303 1.1 nisimura KSE_CDRXOFF((x)), sizeof(struct rdes), (ops)); \
304 1.1 nisimura } while (/*CONSTCOND*/0)
305 1.1 nisimura
306 1.1 nisimura #define KSE_INIT_RXDESC(sc, x) \
307 1.1 nisimura do { \
308 1.1 nisimura struct kse_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
309 1.1 nisimura struct rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \
310 1.1 nisimura struct mbuf *__m = __rxs->rxs_mbuf; \
311 1.1 nisimura \
312 1.1 nisimura __m->m_data = __m->m_ext.ext_buf; \
313 1.1 nisimura __rxd->r2 = __rxs->rxs_dmamap->dm_segs[0].ds_addr; \
314 1.1 nisimura __rxd->r1 = R1_RBS_MASK /* __m->m_ext.ext_size */; \
315 1.1 nisimura __rxd->r0 = R0_OWN; \
316 1.35 msaitoh KSE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \
317 1.1 nisimura } while (/*CONSTCOND*/0)
318 1.1 nisimura
319 1.11 nisimura u_int kse_burstsize = 8; /* DMA burst length tuning knob */
320 1.1 nisimura
321 1.1 nisimura #ifdef KSEDIAGNOSTIC
322 1.2 tsutsui u_int kse_monitor_rxintr; /* fragmented UDP csum HW bug hook */
323 1.1 nisimura #endif
324 1.1 nisimura
325 1.18 cegger static int kse_match(device_t, cfdata_t, void *);
326 1.18 cegger static void kse_attach(device_t, device_t, void *);
327 1.1 nisimura
328 1.23 chs CFATTACH_DECL_NEW(kse, sizeof(struct kse_softc),
329 1.1 nisimura kse_match, kse_attach, NULL, NULL);
330 1.1 nisimura
331 1.3 christos static int kse_ioctl(struct ifnet *, u_long, void *);
332 1.1 nisimura static void kse_start(struct ifnet *);
333 1.1 nisimura static void kse_watchdog(struct ifnet *);
334 1.1 nisimura static int kse_init(struct ifnet *);
335 1.1 nisimura static void kse_stop(struct ifnet *, int);
336 1.1 nisimura static void kse_reset(struct kse_softc *);
337 1.53 nisimura static void kse_set_rcvfilt(struct kse_softc *);
338 1.1 nisimura static int add_rxbuf(struct kse_softc *, int);
339 1.1 nisimura static void rxdrain(struct kse_softc *);
340 1.1 nisimura static int kse_intr(void *);
341 1.1 nisimura static void rxintr(struct kse_softc *);
342 1.1 nisimura static void txreap(struct kse_softc *);
343 1.1 nisimura static void lnkchg(struct kse_softc *);
344 1.42 nisimura static int kse_ifmedia_upd(struct ifnet *);
345 1.42 nisimura static void kse_ifmedia_sts(struct ifnet *, struct ifmediareq *);
346 1.42 nisimura static void nopifmedia_sts(struct ifnet *, struct ifmediareq *);
347 1.1 nisimura static void phy_tick(void *);
348 1.42 nisimura int kse_mii_readreg(device_t, int, int, uint16_t *);
349 1.42 nisimura int kse_mii_writereg(device_t, int, int, uint16_t);
350 1.42 nisimura void kse_mii_statchg(struct ifnet *);
351 1.8 nisimura #ifdef KSE_EVENT_COUNTERS
352 1.8 nisimura static void stat_tick(void *);
353 1.8 nisimura static void zerostats(struct kse_softc *);
354 1.8 nisimura #endif
355 1.1 nisimura
356 1.1 nisimura static int
357 1.18 cegger kse_match(device_t parent, cfdata_t match, void *aux)
358 1.1 nisimura {
359 1.1 nisimura struct pci_attach_args *pa = (struct pci_attach_args *)aux;
360 1.1 nisimura
361 1.1 nisimura if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_MICREL &&
362 1.1 nisimura (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8842 ||
363 1.1 nisimura PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8841) &&
364 1.1 nisimura PCI_CLASS(pa->pa_class) == PCI_CLASS_NETWORK)
365 1.1 nisimura return 1;
366 1.1 nisimura
367 1.1 nisimura return 0;
368 1.1 nisimura }
369 1.1 nisimura
370 1.1 nisimura static void
371 1.18 cegger kse_attach(device_t parent, device_t self, void *aux)
372 1.1 nisimura {
373 1.19 cegger struct kse_softc *sc = device_private(self);
374 1.1 nisimura struct pci_attach_args *pa = aux;
375 1.1 nisimura pci_chipset_tag_t pc = pa->pa_pc;
376 1.1 nisimura pci_intr_handle_t ih;
377 1.1 nisimura const char *intrstr;
378 1.42 nisimura struct ifnet *ifp = &sc->sc_ethercom.ec_if;
379 1.42 nisimura struct mii_data * const mii = &sc->sc_mii;
380 1.8 nisimura struct ifmedia *ifm;
381 1.1 nisimura uint8_t enaddr[ETHER_ADDR_LEN];
382 1.1 nisimura bus_dma_segment_t seg;
383 1.25 nisimura int i, error, nseg;
384 1.27 christos char intrbuf[PCI_INTRSTR_LEN];
385 1.1 nisimura
386 1.42 nisimura aprint_normal(": Micrel KSZ%04x Ethernet (rev. 0x%02x)\n",
387 1.42 nisimura PCI_PRODUCT(pa->pa_id), PCI_REVISION(pa->pa_class));
388 1.42 nisimura
389 1.1 nisimura if (pci_mapreg_map(pa, 0x10,
390 1.1 nisimura PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
391 1.42 nisimura 0, &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_memsize) != 0) {
392 1.42 nisimura aprint_error_dev(self, "unable to map device registers\n");
393 1.1 nisimura return;
394 1.1 nisimura }
395 1.1 nisimura
396 1.1 nisimura /* Make sure bus mastering is enabled. */
397 1.1 nisimura pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
398 1.1 nisimura pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
399 1.1 nisimura PCI_COMMAND_MASTER_ENABLE);
400 1.1 nisimura
401 1.42 nisimura /* Power up chip if necessary. */
402 1.42 nisimura if ((error = pci_activate(pc, pa->pa_tag, self, NULL))
403 1.42 nisimura && error != EOPNOTSUPP) {
404 1.42 nisimura aprint_error_dev(self, "cannot activate %d\n", error);
405 1.42 nisimura return;
406 1.42 nisimura }
407 1.42 nisimura
408 1.42 nisimura /* Map and establish our interrupt. */
409 1.42 nisimura if (pci_intr_map(pa, &ih)) {
410 1.42 nisimura aprint_error_dev(self, "unable to map interrupt\n");
411 1.54 nisimura goto fail;
412 1.42 nisimura }
413 1.42 nisimura intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
414 1.42 nisimura sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, kse_intr, sc,
415 1.42 nisimura device_xname(self));
416 1.42 nisimura if (sc->sc_ih == NULL) {
417 1.42 nisimura aprint_error_dev(self, "unable to establish interrupt");
418 1.42 nisimura if (intrstr != NULL)
419 1.42 nisimura aprint_error(" at %s", intrstr);
420 1.42 nisimura aprint_error("\n");
421 1.54 nisimura goto fail;
422 1.1 nisimura }
423 1.42 nisimura aprint_normal_dev(self, "interrupting at %s\n", intrstr);
424 1.1 nisimura
425 1.42 nisimura sc->sc_dev = self;
426 1.42 nisimura sc->sc_dmat = pa->pa_dmat;
427 1.42 nisimura sc->sc_pc = pa->pa_pc;
428 1.1 nisimura sc->sc_chip = PCI_PRODUCT(pa->pa_id);
429 1.1 nisimura
430 1.1 nisimura /*
431 1.1 nisimura * Read the Ethernet address from the EEPROM.
432 1.1 nisimura */
433 1.1 nisimura i = CSR_READ_2(sc, MARL);
434 1.42 nisimura enaddr[5] = i;
435 1.42 nisimura enaddr[4] = i >> 8;
436 1.1 nisimura i = CSR_READ_2(sc, MARM);
437 1.42 nisimura enaddr[3] = i;
438 1.42 nisimura enaddr[2] = i >> 8;
439 1.1 nisimura i = CSR_READ_2(sc, MARH);
440 1.42 nisimura enaddr[1] = i;
441 1.42 nisimura enaddr[0] = i >> 8;
442 1.42 nisimura aprint_normal_dev(self,
443 1.42 nisimura "Ethernet address %s\n", ether_sprintf(enaddr));
444 1.1 nisimura
445 1.1 nisimura /*
446 1.1 nisimura * Enable chip function.
447 1.1 nisimura */
448 1.42 nisimura CSR_WRITE_2(sc, SIDER, 1);
449 1.1 nisimura
450 1.1 nisimura /*
451 1.1 nisimura * Allocate the control data structures, and create and load the
452 1.1 nisimura * DMA map for it.
453 1.1 nisimura */
454 1.1 nisimura error = bus_dmamem_alloc(sc->sc_dmat,
455 1.1 nisimura sizeof(struct kse_control_data), PAGE_SIZE, 0, &seg, 1, &nseg, 0);
456 1.1 nisimura if (error != 0) {
457 1.42 nisimura aprint_error_dev(self,
458 1.35 msaitoh "unable to allocate control data, error = %d\n", error);
459 1.1 nisimura goto fail_0;
460 1.1 nisimura }
461 1.1 nisimura error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
462 1.9 nisimura sizeof(struct kse_control_data), (void **)&sc->sc_control_data,
463 1.1 nisimura BUS_DMA_COHERENT);
464 1.1 nisimura if (error != 0) {
465 1.42 nisimura aprint_error_dev(self,
466 1.35 msaitoh "unable to map control data, error = %d\n", error);
467 1.1 nisimura goto fail_1;
468 1.1 nisimura }
469 1.1 nisimura error = bus_dmamap_create(sc->sc_dmat,
470 1.1 nisimura sizeof(struct kse_control_data), 1,
471 1.1 nisimura sizeof(struct kse_control_data), 0, 0, &sc->sc_cddmamap);
472 1.1 nisimura if (error != 0) {
473 1.42 nisimura aprint_error_dev(self,
474 1.35 msaitoh "unable to create control data DMA map, "
475 1.14 cegger "error = %d\n", error);
476 1.1 nisimura goto fail_2;
477 1.1 nisimura }
478 1.1 nisimura error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
479 1.1 nisimura sc->sc_control_data, sizeof(struct kse_control_data), NULL, 0);
480 1.1 nisimura if (error != 0) {
481 1.42 nisimura aprint_error_dev(self,
482 1.35 msaitoh "unable to load control data DMA map, error = %d\n",
483 1.14 cegger error);
484 1.1 nisimura goto fail_3;
485 1.1 nisimura }
486 1.1 nisimura for (i = 0; i < KSE_TXQUEUELEN; i++) {
487 1.1 nisimura if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
488 1.1 nisimura KSE_NTXSEGS, MCLBYTES, 0, 0,
489 1.1 nisimura &sc->sc_txsoft[i].txs_dmamap)) != 0) {
490 1.42 nisimura aprint_error_dev(self,
491 1.35 msaitoh "unable to create tx DMA map %d, error = %d\n",
492 1.35 msaitoh i, error);
493 1.1 nisimura goto fail_4;
494 1.1 nisimura }
495 1.1 nisimura }
496 1.1 nisimura for (i = 0; i < KSE_NRXDESC; i++) {
497 1.1 nisimura if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
498 1.1 nisimura 1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
499 1.42 nisimura aprint_error_dev(self,
500 1.35 msaitoh "unable to create rx DMA map %d, error = %d\n",
501 1.35 msaitoh i, error);
502 1.1 nisimura goto fail_5;
503 1.1 nisimura }
504 1.1 nisimura sc->sc_rxsoft[i].rxs_mbuf = NULL;
505 1.1 nisimura }
506 1.1 nisimura
507 1.42 nisimura mii->mii_ifp = ifp;
508 1.42 nisimura mii->mii_readreg = kse_mii_readreg;
509 1.42 nisimura mii->mii_writereg = kse_mii_writereg;
510 1.42 nisimura mii->mii_statchg = kse_mii_statchg;
511 1.1 nisimura
512 1.38 msaitoh /* Initialize ifmedia structures. */
513 1.8 nisimura if (sc->sc_chip == 0x8841) {
514 1.42 nisimura /* use port 1 builtin PHY as index 1 device */
515 1.42 nisimura sc->sc_ethercom.ec_mii = mii;
516 1.42 nisimura ifm = &mii->mii_media;
517 1.42 nisimura ifmedia_init(ifm, 0, kse_ifmedia_upd, kse_ifmedia_sts);
518 1.42 nisimura mii_attach(sc->sc_dev, mii, 0xffffffff, 1 /* PHY1 */,
519 1.42 nisimura MII_OFFSET_ANY, MIIF_DOPAUSE);
520 1.42 nisimura if (LIST_FIRST(&mii->mii_phys) == NULL) {
521 1.42 nisimura ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
522 1.42 nisimura ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
523 1.42 nisimura } else
524 1.42 nisimura ifmedia_set(ifm, IFM_ETHER | IFM_AUTO);
525 1.35 msaitoh } else {
526 1.40 nisimura /*
527 1.40 nisimura * pretend 100FDX w/ no alternative media selection.
528 1.42 nisimura * 8842 MAC is tied with a builtin 3 port switch. It can do
529 1.42 nisimura * 4 degree priotised rate control over either of tx/rx
530 1.42 nisimura * direction for any of ports, respectively. Tough, this
531 1.42 nisimura * driver leaves the rate unlimited intending 100Mbps maximum.
532 1.42 nisimura * 2 external ports behave in AN mode and this driver provides
533 1.42 nisimura * no mean to manipulate and see their operational details.
534 1.40 nisimura */
535 1.42 nisimura sc->sc_ethercom.ec_ifmedia = ifm = &sc->sc_media;
536 1.42 nisimura ifmedia_init(ifm, 0, NULL, nopifmedia_sts);
537 1.39 nisimura ifmedia_add(ifm, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
538 1.39 nisimura ifmedia_set(ifm, IFM_ETHER | IFM_100_TX | IFM_FDX);
539 1.42 nisimura
540 1.42 nisimura aprint_normal_dev(self,
541 1.42 nisimura "10baseT, 10baseT-FDX, 100baseTX, 100baseTX-FDX, auto\n");
542 1.8 nisimura }
543 1.42 nisimura ifm->ifm_media = ifm->ifm_cur->ifm_media; /* as if user has requested */
544 1.1 nisimura
545 1.23 chs strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
546 1.1 nisimura ifp->if_softc = sc;
547 1.1 nisimura ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
548 1.1 nisimura ifp->if_ioctl = kse_ioctl;
549 1.1 nisimura ifp->if_start = kse_start;
550 1.1 nisimura ifp->if_watchdog = kse_watchdog;
551 1.1 nisimura ifp->if_init = kse_init;
552 1.1 nisimura ifp->if_stop = kse_stop;
553 1.1 nisimura IFQ_SET_READY(&ifp->if_snd);
554 1.1 nisimura
555 1.1 nisimura /*
556 1.42 nisimura * capable of 802.1Q VLAN-sized frames and hw assisted tagging.
557 1.1 nisimura * can do IPv4, TCPv4, and UDPv4 checksums in hardware.
558 1.1 nisimura */
559 1.42 nisimura sc->sc_ethercom.ec_capabilities = ETHERCAP_VLAN_MTU;
560 1.42 nisimura ifp->if_capabilities =
561 1.1 nisimura IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
562 1.1 nisimura IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
563 1.1 nisimura IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
564 1.1 nisimura
565 1.56 nisimura sc->sc_flowflags = 0;
566 1.56 nisimura
567 1.1 nisimura if_attach(ifp);
568 1.43 nisimura if_deferred_start_init(ifp, NULL);
569 1.1 nisimura ether_ifattach(ifp, enaddr);
570 1.8 nisimura
571 1.54 nisimura callout_init(&sc->sc_tick_ch, 0);
572 1.54 nisimura callout_setfunc(&sc->sc_tick_ch, phy_tick, sc);
573 1.54 nisimura
574 1.56 nisimura rnd_attach_source(&sc->rnd_source, device_xname(self),
575 1.56 nisimura RND_TYPE_NET, RND_FLAG_DEFAULT);
576 1.56 nisimura
577 1.8 nisimura #ifdef KSE_EVENT_COUNTERS
578 1.56 nisimura const char *events[34] = {
579 1.56 nisimura "RxLoPriotyByte",
580 1.56 nisimura "RxHiPriotyByte",
581 1.56 nisimura "RxUndersizePkt",
582 1.56 nisimura "RxFragments",
583 1.56 nisimura "RxOversize",
584 1.56 nisimura "RxJabbers",
585 1.56 nisimura "RxSymbolError",
586 1.56 nisimura "RxCRCError",
587 1.56 nisimura "RxAlignmentError",
588 1.56 nisimura "RxControl8808Pkts",
589 1.56 nisimura "RxPausePkts",
590 1.56 nisimura "RxBroadcast",
591 1.56 nisimura "RxMulticast",
592 1.56 nisimura "RxUnicast",
593 1.56 nisimura "Rx64Octets",
594 1.56 nisimura "Rx65To127Octets",
595 1.56 nisimura "Rx128To255Octets",
596 1.56 nisimura "Rx255To511Octets",
597 1.56 nisimura "Rx512To1023Octets",
598 1.56 nisimura "Rx1024To1522Octets",
599 1.56 nisimura "TxLoPriotyByte",
600 1.56 nisimura "TxHiPriotyByte",
601 1.56 nisimura "TxLateCollision",
602 1.56 nisimura "TxPausePkts",
603 1.56 nisimura "TxBroadcastPkts",
604 1.56 nisimura "TxMulticastPkts",
605 1.56 nisimura "TxUnicastPkts",
606 1.56 nisimura "TxDeferred",
607 1.56 nisimura "TxTotalCollision",
608 1.56 nisimura "TxExcessiveCollision",
609 1.56 nisimura "TxSingleCollision",
610 1.56 nisimura "TxMultipleCollision",
611 1.56 nisimura "TxDropPkts",
612 1.56 nisimura "RxDropPkts",
613 1.56 nisimura };
614 1.56 nisimura struct ksext *ee = &sc->sc_ext;
615 1.25 nisimura int p = (sc->sc_chip == 0x8842) ? 3 : 1;
616 1.8 nisimura for (i = 0; i < p; i++) {
617 1.26 christos snprintf(ee->evcntname[i], sizeof(ee->evcntname[i]),
618 1.26 christos "%s.%d", device_xname(sc->sc_dev), i+1);
619 1.56 nisimura for (int ev = 0; ev < 34; ev++) {
620 1.56 nisimura evcnt_attach_dynamic(&ee->pev[i][ev], EVCNT_TYPE_MISC,
621 1.56 nisimura NULL, ee->evcntname[i], events[ev]);
622 1.56 nisimura }
623 1.8 nisimura }
624 1.8 nisimura #endif
625 1.1 nisimura return;
626 1.1 nisimura
627 1.1 nisimura fail_5:
628 1.1 nisimura for (i = 0; i < KSE_NRXDESC; i++) {
629 1.1 nisimura if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
630 1.1 nisimura bus_dmamap_destroy(sc->sc_dmat,
631 1.1 nisimura sc->sc_rxsoft[i].rxs_dmamap);
632 1.24 christos }
633 1.1 nisimura fail_4:
634 1.1 nisimura for (i = 0; i < KSE_TXQUEUELEN; i++) {
635 1.1 nisimura if (sc->sc_txsoft[i].txs_dmamap != NULL)
636 1.1 nisimura bus_dmamap_destroy(sc->sc_dmat,
637 1.1 nisimura sc->sc_txsoft[i].txs_dmamap);
638 1.1 nisimura }
639 1.1 nisimura bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
640 1.1 nisimura fail_3:
641 1.1 nisimura bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
642 1.1 nisimura fail_2:
643 1.3 christos bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
644 1.1 nisimura sizeof(struct kse_control_data));
645 1.1 nisimura fail_1:
646 1.1 nisimura bus_dmamem_free(sc->sc_dmat, &seg, nseg);
647 1.1 nisimura fail_0:
648 1.54 nisimura pci_intr_disestablish(pc, sc->sc_ih);
649 1.54 nisimura fail:
650 1.54 nisimura bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_memsize);
651 1.1 nisimura return;
652 1.1 nisimura }
653 1.1 nisimura
654 1.1 nisimura static int
655 1.3 christos kse_ioctl(struct ifnet *ifp, u_long cmd, void *data)
656 1.1 nisimura {
657 1.1 nisimura struct kse_softc *sc = ifp->if_softc;
658 1.42 nisimura struct ifreq *ifr = (struct ifreq *)data;
659 1.42 nisimura struct ifmedia *ifm;
660 1.1 nisimura int s, error;
661 1.1 nisimura
662 1.1 nisimura s = splnet();
663 1.1 nisimura
664 1.1 nisimura switch (cmd) {
665 1.42 nisimura case SIOCSIFMEDIA:
666 1.42 nisimura /* Flow control requires full-duplex mode. */
667 1.42 nisimura if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
668 1.42 nisimura (ifr->ifr_media & IFM_FDX) == 0)
669 1.42 nisimura ifr->ifr_media &= ~IFM_ETH_FMASK;
670 1.42 nisimura if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
671 1.42 nisimura if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
672 1.42 nisimura /* We can do both TXPAUSE and RXPAUSE. */
673 1.42 nisimura ifr->ifr_media |=
674 1.42 nisimura IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
675 1.42 nisimura }
676 1.42 nisimura sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
677 1.42 nisimura }
678 1.42 nisimura ifm = (sc->sc_chip == 0x8841)
679 1.42 nisimura ? &sc->sc_mii.mii_media : &sc->sc_media;
680 1.42 nisimura error = ifmedia_ioctl(ifp, ifr, ifm, cmd);
681 1.42 nisimura break;
682 1.1 nisimura default:
683 1.54 nisimura error = ether_ioctl(ifp, cmd, data);
684 1.54 nisimura if (error != ENETRESET)
685 1.12 dyoung break;
686 1.12 dyoung error = 0;
687 1.12 dyoung if (cmd == SIOCSIFCAP)
688 1.12 dyoung error = (*ifp->if_init)(ifp);
689 1.12 dyoung if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
690 1.12 dyoung ;
691 1.12 dyoung else if (ifp->if_flags & IFF_RUNNING) {
692 1.1 nisimura /*
693 1.1 nisimura * Multicast list has changed; set the hardware filter
694 1.1 nisimura * accordingly.
695 1.1 nisimura */
696 1.53 nisimura kse_set_rcvfilt(sc);
697 1.1 nisimura }
698 1.1 nisimura break;
699 1.1 nisimura }
700 1.1 nisimura
701 1.1 nisimura splx(s);
702 1.54 nisimura
703 1.1 nisimura return error;
704 1.1 nisimura }
705 1.1 nisimura
706 1.1 nisimura static int
707 1.1 nisimura kse_init(struct ifnet *ifp)
708 1.1 nisimura {
709 1.1 nisimura struct kse_softc *sc = ifp->if_softc;
710 1.2 tsutsui uint32_t paddr;
711 1.1 nisimura int i, error = 0;
712 1.1 nisimura
713 1.1 nisimura /* cancel pending I/O */
714 1.1 nisimura kse_stop(ifp, 0);
715 1.1 nisimura
716 1.1 nisimura /* reset all registers but PCI configuration */
717 1.1 nisimura kse_reset(sc);
718 1.1 nisimura
719 1.1 nisimura /* craft Tx descriptor ring */
720 1.1 nisimura memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
721 1.1 nisimura for (i = 0, paddr = KSE_CDTXADDR(sc, 1); i < KSE_NTXDESC - 1; i++) {
722 1.1 nisimura sc->sc_txdescs[i].t3 = paddr;
723 1.1 nisimura paddr += sizeof(struct tdes);
724 1.1 nisimura }
725 1.1 nisimura sc->sc_txdescs[KSE_NTXDESC - 1].t3 = KSE_CDTXADDR(sc, 0);
726 1.1 nisimura KSE_CDTXSYNC(sc, 0, KSE_NTXDESC,
727 1.1 nisimura BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
728 1.1 nisimura sc->sc_txfree = KSE_NTXDESC;
729 1.1 nisimura sc->sc_txnext = 0;
730 1.1 nisimura
731 1.1 nisimura for (i = 0; i < KSE_TXQUEUELEN; i++)
732 1.1 nisimura sc->sc_txsoft[i].txs_mbuf = NULL;
733 1.1 nisimura sc->sc_txsfree = KSE_TXQUEUELEN;
734 1.1 nisimura sc->sc_txsnext = 0;
735 1.1 nisimura sc->sc_txsdirty = 0;
736 1.1 nisimura
737 1.1 nisimura /* craft Rx descriptor ring */
738 1.1 nisimura memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs));
739 1.1 nisimura for (i = 0, paddr = KSE_CDRXADDR(sc, 1); i < KSE_NRXDESC - 1; i++) {
740 1.1 nisimura sc->sc_rxdescs[i].r3 = paddr;
741 1.1 nisimura paddr += sizeof(struct rdes);
742 1.1 nisimura }
743 1.1 nisimura sc->sc_rxdescs[KSE_NRXDESC - 1].r3 = KSE_CDRXADDR(sc, 0);
744 1.1 nisimura for (i = 0; i < KSE_NRXDESC; i++) {
745 1.1 nisimura if (sc->sc_rxsoft[i].rxs_mbuf == NULL) {
746 1.1 nisimura if ((error = add_rxbuf(sc, i)) != 0) {
747 1.42 nisimura aprint_error_dev(sc->sc_dev,
748 1.42 nisimura "unable to allocate or map rx "
749 1.1 nisimura "buffer %d, error = %d\n",
750 1.42 nisimura i, error);
751 1.1 nisimura rxdrain(sc);
752 1.1 nisimura goto out;
753 1.1 nisimura }
754 1.1 nisimura }
755 1.1 nisimura else
756 1.1 nisimura KSE_INIT_RXDESC(sc, i);
757 1.1 nisimura }
758 1.1 nisimura sc->sc_rxptr = 0;
759 1.1 nisimura
760 1.1 nisimura /* hand Tx/Rx rings to HW */
761 1.1 nisimura CSR_WRITE_4(sc, TDLB, KSE_CDTXADDR(sc, 0));
762 1.1 nisimura CSR_WRITE_4(sc, RDLB, KSE_CDRXADDR(sc, 0));
763 1.1 nisimura
764 1.42 nisimura sc->sc_txc = TXC_TEN | TXC_EP | TXC_AC;
765 1.44 nisimura sc->sc_rxc = RXC_REN | RXC_RU | RXC_RB;
766 1.1 nisimura sc->sc_t1csum = sc->sc_mcsum = 0;
767 1.1 nisimura if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) {
768 1.5 nisimura sc->sc_rxc |= RXC_ICC;
769 1.1 nisimura sc->sc_mcsum |= M_CSUM_IPv4;
770 1.1 nisimura }
771 1.1 nisimura if (ifp->if_capenable & IFCAP_CSUM_IPv4_Tx) {
772 1.1 nisimura sc->sc_txc |= TXC_ICG;
773 1.1 nisimura sc->sc_t1csum |= T1_IPCKG;
774 1.1 nisimura }
775 1.1 nisimura if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx) {
776 1.5 nisimura sc->sc_rxc |= RXC_TCC;
777 1.1 nisimura sc->sc_mcsum |= M_CSUM_TCPv4;
778 1.1 nisimura }
779 1.1 nisimura if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Tx) {
780 1.1 nisimura sc->sc_txc |= TXC_TCG;
781 1.1 nisimura sc->sc_t1csum |= T1_TCPCKG;
782 1.1 nisimura }
783 1.1 nisimura if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx) {
784 1.5 nisimura sc->sc_rxc |= RXC_UCC;
785 1.1 nisimura sc->sc_mcsum |= M_CSUM_UDPv4;
786 1.1 nisimura }
787 1.1 nisimura if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Tx) {
788 1.1 nisimura sc->sc_txc |= TXC_UCG;
789 1.1 nisimura sc->sc_t1csum |= T1_UDPCKG;
790 1.1 nisimura }
791 1.1 nisimura sc->sc_txc |= (kse_burstsize << TXC_BS_SFT);
792 1.1 nisimura sc->sc_rxc |= (kse_burstsize << RXC_BS_SFT);
793 1.1 nisimura
794 1.42 nisimura if (sc->sc_chip == 0x8842) {
795 1.56 nisimura /* make PAUSE flow control to run */
796 1.42 nisimura sc->sc_txc |= TXC_FCE;
797 1.42 nisimura sc->sc_rxc |= RXC_FCE;
798 1.56 nisimura i = CSR_READ_2(sc, SGCR3);
799 1.56 nisimura CSR_WRITE_2(sc, SGCR3, i | CR3_USEFC);
800 1.42 nisimura }
801 1.42 nisimura
802 1.49 nisimura /* accept multicast frame or run promisc mode */
803 1.53 nisimura kse_set_rcvfilt(sc);
804 1.6 nisimura
805 1.1 nisimura /* set current media */
806 1.39 nisimura if (sc->sc_chip == 0x8841)
807 1.42 nisimura (void)kse_ifmedia_upd(ifp);
808 1.1 nisimura
809 1.1 nisimura /* enable transmitter and receiver */
810 1.1 nisimura CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
811 1.1 nisimura CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
812 1.1 nisimura CSR_WRITE_4(sc, MDRSC, 1);
813 1.1 nisimura
814 1.1 nisimura /* enable interrupts */
815 1.35 msaitoh sc->sc_inten = INT_DMTS | INT_DMRS | INT_DMRBUS;
816 1.8 nisimura if (sc->sc_chip == 0x8841)
817 1.8 nisimura sc->sc_inten |= INT_DMLCS;
818 1.1 nisimura CSR_WRITE_4(sc, INTST, ~0);
819 1.8 nisimura CSR_WRITE_4(sc, INTEN, sc->sc_inten);
820 1.1 nisimura
821 1.1 nisimura ifp->if_flags |= IFF_RUNNING;
822 1.1 nisimura ifp->if_flags &= ~IFF_OACTIVE;
823 1.1 nisimura
824 1.56 nisimura /* start one second timer */
825 1.56 nisimura callout_schedule(&sc->sc_tick_ch, hz);
826 1.56 nisimura
827 1.8 nisimura #ifdef KSE_EVENT_COUNTERS
828 1.8 nisimura zerostats(sc);
829 1.8 nisimura #endif
830 1.1 nisimura
831 1.1 nisimura out:
832 1.1 nisimura if (error) {
833 1.1 nisimura ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
834 1.1 nisimura ifp->if_timer = 0;
835 1.42 nisimura aprint_error_dev(sc->sc_dev, "interface not running\n");
836 1.1 nisimura }
837 1.1 nisimura return error;
838 1.1 nisimura }
839 1.1 nisimura
840 1.1 nisimura static void
841 1.1 nisimura kse_stop(struct ifnet *ifp, int disable)
842 1.1 nisimura {
843 1.1 nisimura struct kse_softc *sc = ifp->if_softc;
844 1.1 nisimura struct kse_txsoft *txs;
845 1.1 nisimura int i;
846 1.1 nisimura
847 1.56 nisimura callout_stop(&sc->sc_tick_ch);
848 1.56 nisimura
849 1.1 nisimura sc->sc_txc &= ~TXC_TEN;
850 1.1 nisimura sc->sc_rxc &= ~RXC_REN;
851 1.1 nisimura CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
852 1.1 nisimura CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
853 1.1 nisimura
854 1.1 nisimura for (i = 0; i < KSE_TXQUEUELEN; i++) {
855 1.1 nisimura txs = &sc->sc_txsoft[i];
856 1.1 nisimura if (txs->txs_mbuf != NULL) {
857 1.1 nisimura bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
858 1.1 nisimura m_freem(txs->txs_mbuf);
859 1.1 nisimura txs->txs_mbuf = NULL;
860 1.1 nisimura }
861 1.1 nisimura }
862 1.1 nisimura
863 1.13 dyoung ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
864 1.13 dyoung ifp->if_timer = 0;
865 1.13 dyoung
866 1.1 nisimura if (disable)
867 1.1 nisimura rxdrain(sc);
868 1.1 nisimura }
869 1.1 nisimura
870 1.1 nisimura static void
871 1.1 nisimura kse_reset(struct kse_softc *sc)
872 1.1 nisimura {
873 1.1 nisimura
874 1.42 nisimura /* software reset */
875 1.1 nisimura CSR_WRITE_2(sc, GRR, 1);
876 1.1 nisimura delay(1000); /* PDF does not mention the delay amount */
877 1.1 nisimura CSR_WRITE_2(sc, GRR, 0);
878 1.1 nisimura
879 1.42 nisimura /* enable switch function */
880 1.42 nisimura CSR_WRITE_2(sc, SIDER, 1);
881 1.1 nisimura }
882 1.1 nisimura
883 1.1 nisimura static void
884 1.1 nisimura kse_watchdog(struct ifnet *ifp)
885 1.1 nisimura {
886 1.1 nisimura struct kse_softc *sc = ifp->if_softc;
887 1.1 nisimura
888 1.24 christos /*
889 1.1 nisimura * Since we're not interrupting every packet, sweep
890 1.1 nisimura * up before we report an error.
891 1.1 nisimura */
892 1.1 nisimura txreap(sc);
893 1.1 nisimura
894 1.1 nisimura if (sc->sc_txfree != KSE_NTXDESC) {
895 1.42 nisimura aprint_error_dev(sc->sc_dev,
896 1.42 nisimura "device timeout (txfree %d txsfree %d txnext %d)\n",
897 1.42 nisimura sc->sc_txfree, sc->sc_txsfree, sc->sc_txnext);
898 1.48 skrll if_statinc(ifp, if_oerrors);
899 1.1 nisimura
900 1.1 nisimura /* Reset the interface. */
901 1.1 nisimura kse_init(ifp);
902 1.1 nisimura }
903 1.1 nisimura else if (ifp->if_flags & IFF_DEBUG)
904 1.42 nisimura aprint_error_dev(sc->sc_dev, "recovered from device timeout\n");
905 1.1 nisimura
906 1.1 nisimura /* Try to get more packets going. */
907 1.1 nisimura kse_start(ifp);
908 1.1 nisimura }
909 1.1 nisimura
910 1.1 nisimura static void
911 1.1 nisimura kse_start(struct ifnet *ifp)
912 1.1 nisimura {
913 1.1 nisimura struct kse_softc *sc = ifp->if_softc;
914 1.8 nisimura struct mbuf *m0, *m;
915 1.1 nisimura struct kse_txsoft *txs;
916 1.1 nisimura bus_dmamap_t dmamap;
917 1.1 nisimura int error, nexttx, lasttx, ofree, seg;
918 1.6 nisimura uint32_t tdes0;
919 1.1 nisimura
920 1.35 msaitoh if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
921 1.1 nisimura return;
922 1.1 nisimura
923 1.35 msaitoh /* Remember the previous number of free descriptors. */
924 1.1 nisimura ofree = sc->sc_txfree;
925 1.1 nisimura
926 1.1 nisimura /*
927 1.1 nisimura * Loop through the send queue, setting up transmit descriptors
928 1.1 nisimura * until we drain the queue, or use up all available transmit
929 1.1 nisimura * descriptors.
930 1.1 nisimura */
931 1.1 nisimura for (;;) {
932 1.1 nisimura IFQ_POLL(&ifp->if_snd, m0);
933 1.1 nisimura if (m0 == NULL)
934 1.1 nisimura break;
935 1.1 nisimura
936 1.1 nisimura if (sc->sc_txsfree < KSE_TXQUEUE_GC) {
937 1.1 nisimura txreap(sc);
938 1.1 nisimura if (sc->sc_txsfree == 0)
939 1.1 nisimura break;
940 1.1 nisimura }
941 1.1 nisimura txs = &sc->sc_txsoft[sc->sc_txsnext];
942 1.1 nisimura dmamap = txs->txs_dmamap;
943 1.1 nisimura
944 1.1 nisimura error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
945 1.35 msaitoh BUS_DMA_WRITE | BUS_DMA_NOWAIT);
946 1.1 nisimura if (error) {
947 1.1 nisimura if (error == EFBIG) {
948 1.42 nisimura aprint_error_dev(sc->sc_dev,
949 1.42 nisimura "Tx packet consumes too many "
950 1.42 nisimura "DMA segments, dropping...\n");
951 1.1 nisimura IFQ_DEQUEUE(&ifp->if_snd, m0);
952 1.1 nisimura m_freem(m0);
953 1.1 nisimura continue;
954 1.1 nisimura }
955 1.1 nisimura /* Short on resources, just stop for now. */
956 1.1 nisimura break;
957 1.1 nisimura }
958 1.1 nisimura
959 1.1 nisimura if (dmamap->dm_nsegs > sc->sc_txfree) {
960 1.1 nisimura /*
961 1.1 nisimura * Not enough free descriptors to transmit this
962 1.1 nisimura * packet. We haven't committed anything yet,
963 1.1 nisimura * so just unload the DMA map, put the packet
964 1.1 nisimura * back on the queue, and punt. Notify the upper
965 1.1 nisimura * layer that there are not more slots left.
966 1.1 nisimura */
967 1.1 nisimura ifp->if_flags |= IFF_OACTIVE;
968 1.1 nisimura bus_dmamap_unload(sc->sc_dmat, dmamap);
969 1.1 nisimura break;
970 1.1 nisimura }
971 1.1 nisimura
972 1.1 nisimura IFQ_DEQUEUE(&ifp->if_snd, m0);
973 1.1 nisimura
974 1.1 nisimura /*
975 1.1 nisimura * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
976 1.1 nisimura */
977 1.1 nisimura
978 1.1 nisimura bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
979 1.1 nisimura BUS_DMASYNC_PREWRITE);
980 1.1 nisimura
981 1.43 nisimura tdes0 = 0; /* to postpone 1st segment T0_OWN write */
982 1.43 nisimura lasttx = -1;
983 1.1 nisimura for (nexttx = sc->sc_txnext, seg = 0;
984 1.1 nisimura seg < dmamap->dm_nsegs;
985 1.1 nisimura seg++, nexttx = KSE_NEXTTX(nexttx)) {
986 1.1 nisimura struct tdes *tdes = &sc->sc_txdescs[nexttx];
987 1.1 nisimura /*
988 1.1 nisimura * If this is the first descriptor we're
989 1.1 nisimura * enqueueing, don't set the OWN bit just
990 1.1 nisimura * yet. That could cause a race condition.
991 1.1 nisimura * We'll do it below.
992 1.1 nisimura */
993 1.1 nisimura tdes->t2 = dmamap->dm_segs[seg].ds_addr;
994 1.1 nisimura tdes->t1 = sc->sc_t1csum
995 1.1 nisimura | (dmamap->dm_segs[seg].ds_len & T1_TBS_MASK);
996 1.6 nisimura tdes->t0 = tdes0;
997 1.43 nisimura tdes0 = T0_OWN; /* 2nd and other segments */
998 1.1 nisimura lasttx = nexttx;
999 1.1 nisimura }
1000 1.1 nisimura /*
1001 1.1 nisimura * Outgoing NFS mbuf must be unloaded when Tx completed.
1002 1.1 nisimura * Without T1_IC NFS mbuf is left unack'ed for excessive
1003 1.1 nisimura * time and NFS stops to proceed until kse_watchdog()
1004 1.1 nisimura * calls txreap() to reclaim the unack'ed mbuf.
1005 1.5 nisimura * It's painful to traverse every mbuf chain to determine
1006 1.1 nisimura * whether someone is waiting for Tx completion.
1007 1.1 nisimura */
1008 1.8 nisimura m = m0;
1009 1.1 nisimura do {
1010 1.1 nisimura if ((m->m_flags & M_EXT) && m->m_ext.ext_free) {
1011 1.1 nisimura sc->sc_txdescs[lasttx].t1 |= T1_IC;
1012 1.1 nisimura break;
1013 1.1 nisimura }
1014 1.1 nisimura } while ((m = m->m_next) != NULL);
1015 1.1 nisimura
1016 1.43 nisimura /* Write deferred 1st segment T0_OWN at the final stage */
1017 1.1 nisimura sc->sc_txdescs[lasttx].t1 |= T1_LS;
1018 1.1 nisimura sc->sc_txdescs[sc->sc_txnext].t1 |= T1_FS;
1019 1.1 nisimura sc->sc_txdescs[sc->sc_txnext].t0 = T0_OWN;
1020 1.1 nisimura KSE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1021 1.35 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1022 1.1 nisimura
1023 1.35 msaitoh /* Tell DMA start transmit */
1024 1.1 nisimura CSR_WRITE_4(sc, MDTSC, 1);
1025 1.1 nisimura
1026 1.1 nisimura txs->txs_mbuf = m0;
1027 1.1 nisimura txs->txs_firstdesc = sc->sc_txnext;
1028 1.1 nisimura txs->txs_lastdesc = lasttx;
1029 1.1 nisimura txs->txs_ndesc = dmamap->dm_nsegs;
1030 1.1 nisimura
1031 1.1 nisimura sc->sc_txfree -= txs->txs_ndesc;
1032 1.1 nisimura sc->sc_txnext = nexttx;
1033 1.1 nisimura sc->sc_txsfree--;
1034 1.1 nisimura sc->sc_txsnext = KSE_NEXTTXS(sc->sc_txsnext);
1035 1.1 nisimura /*
1036 1.1 nisimura * Pass the packet to any BPF listeners.
1037 1.1 nisimura */
1038 1.32 msaitoh bpf_mtap(ifp, m0, BPF_D_OUT);
1039 1.1 nisimura }
1040 1.1 nisimura
1041 1.1 nisimura if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) {
1042 1.1 nisimura /* No more slots left; notify upper layer. */
1043 1.1 nisimura ifp->if_flags |= IFF_OACTIVE;
1044 1.1 nisimura }
1045 1.1 nisimura if (sc->sc_txfree != ofree) {
1046 1.1 nisimura /* Set a watchdog timer in case the chip flakes out. */
1047 1.1 nisimura ifp->if_timer = 5;
1048 1.1 nisimura }
1049 1.1 nisimura }
1050 1.1 nisimura
1051 1.1 nisimura static void
1052 1.53 nisimura kse_set_rcvfilt(struct kse_softc *sc)
1053 1.1 nisimura {
1054 1.1 nisimura struct ether_multistep step;
1055 1.1 nisimura struct ether_multi *enm;
1056 1.36 msaitoh struct ethercom *ec = &sc->sc_ethercom;
1057 1.36 msaitoh struct ifnet *ifp = &ec->ec_if;
1058 1.44 nisimura uint32_t crc, mchash[2];
1059 1.45 nisimura int i;
1060 1.6 nisimura
1061 1.44 nisimura sc->sc_rxc &= ~(RXC_MHTE | RXC_RM | RXC_RA);
1062 1.1 nisimura
1063 1.49 nisimura /* clear perfect match filter and prepare mcast hash table */
1064 1.45 nisimura for (i = 0; i < 16; i++)
1065 1.45 nisimura CSR_WRITE_4(sc, MAAH0 + i*8, 0);
1066 1.45 nisimura crc = mchash[0] = mchash[1] = 0;
1067 1.49 nisimura
1068 1.37 msaitoh ETHER_LOCK(ec);
1069 1.52 nisimura if (ifp->if_flags & IFF_PROMISC) {
1070 1.52 nisimura ec->ec_flags |= ETHER_F_ALLMULTI;
1071 1.53 nisimura ETHER_UNLOCK(ec);
1072 1.52 nisimura /* run promisc. mode */
1073 1.52 nisimura sc->sc_rxc |= RXC_RA;
1074 1.52 nisimura goto update;
1075 1.52 nisimura }
1076 1.52 nisimura ec->ec_flags &= ~ETHER_F_ALLMULTI;
1077 1.36 msaitoh ETHER_FIRST_MULTI(step, ec, enm);
1078 1.45 nisimura i = 0;
1079 1.44 nisimura while (enm != NULL) {
1080 1.6 nisimura if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1081 1.6 nisimura /*
1082 1.6 nisimura * We must listen to a range of multicast addresses.
1083 1.6 nisimura * For now, just accept all multicasts, rather than
1084 1.6 nisimura * trying to set only those filter bits needed to match
1085 1.6 nisimura * the range. (At this time, the only use of address
1086 1.6 nisimura * ranges is for IP multicast routing, for which the
1087 1.6 nisimura * range is big enough to require all bits set.)
1088 1.6 nisimura */
1089 1.52 nisimura ec->ec_flags |= ETHER_F_ALLMULTI;
1090 1.37 msaitoh ETHER_UNLOCK(ec);
1091 1.52 nisimura /* accept all multicast */
1092 1.52 nisimura sc->sc_rxc |= RXC_RM;
1093 1.44 nisimura goto update;
1094 1.1 nisimura }
1095 1.50 nisimura #if KSE_MCASTDEBUG == 1
1096 1.50 nisimura printf("[%d] %s\n", i, ether_sprintf(enm->enm_addrlo));
1097 1.50 nisimura #endif
1098 1.45 nisimura if (i < 16) {
1099 1.45 nisimura /* use 16 additional MAC addr to accept mcast */
1100 1.45 nisimura uint32_t addr;
1101 1.45 nisimura uint8_t *ep = enm->enm_addrlo;
1102 1.45 nisimura addr = (ep[3] << 24) | (ep[2] << 16)
1103 1.45 nisimura | (ep[1] << 8) | ep[0];
1104 1.45 nisimura CSR_WRITE_4(sc, MAAL0 + i*8, addr);
1105 1.50 nisimura addr = (ep[5] << 8) | ep[4];
1106 1.50 nisimura CSR_WRITE_4(sc, MAAH0 + i*8, addr | (1U << 31));
1107 1.45 nisimura } else {
1108 1.45 nisimura /* use hash table when too many */
1109 1.45 nisimura crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1110 1.45 nisimura mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
1111 1.45 nisimura }
1112 1.1 nisimura ETHER_NEXT_MULTI(step, enm);
1113 1.45 nisimura i++;
1114 1.44 nisimura }
1115 1.37 msaitoh ETHER_UNLOCK(ec);
1116 1.44 nisimura
1117 1.50 nisimura if (crc)
1118 1.44 nisimura sc->sc_rxc |= RXC_MHTE;
1119 1.50 nisimura CSR_WRITE_4(sc, MTR0, mchash[0]);
1120 1.50 nisimura CSR_WRITE_4(sc, MTR1, mchash[1]);
1121 1.44 nisimura update:
1122 1.44 nisimura /* With RA or RM, MHTE/MTR0/MTR1 are never consulted. */
1123 1.1 nisimura return;
1124 1.1 nisimura }
1125 1.1 nisimura
1126 1.1 nisimura static int
1127 1.1 nisimura add_rxbuf(struct kse_softc *sc, int idx)
1128 1.1 nisimura {
1129 1.1 nisimura struct kse_rxsoft *rxs = &sc->sc_rxsoft[idx];
1130 1.1 nisimura struct mbuf *m;
1131 1.1 nisimura int error;
1132 1.1 nisimura
1133 1.1 nisimura MGETHDR(m, M_DONTWAIT, MT_DATA);
1134 1.1 nisimura if (m == NULL)
1135 1.1 nisimura return ENOBUFS;
1136 1.1 nisimura
1137 1.1 nisimura MCLGET(m, M_DONTWAIT);
1138 1.1 nisimura if ((m->m_flags & M_EXT) == 0) {
1139 1.1 nisimura m_freem(m);
1140 1.1 nisimura return ENOBUFS;
1141 1.1 nisimura }
1142 1.1 nisimura
1143 1.1 nisimura if (rxs->rxs_mbuf != NULL)
1144 1.1 nisimura bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1145 1.1 nisimura
1146 1.1 nisimura rxs->rxs_mbuf = m;
1147 1.1 nisimura
1148 1.1 nisimura error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1149 1.1 nisimura m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1150 1.1 nisimura if (error) {
1151 1.42 nisimura aprint_error_dev(sc->sc_dev,
1152 1.42 nisimura "can't load rx DMA map %d, error = %d\n", idx, error);
1153 1.1 nisimura panic("kse_add_rxbuf");
1154 1.1 nisimura }
1155 1.1 nisimura
1156 1.1 nisimura bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1157 1.1 nisimura rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1158 1.1 nisimura
1159 1.1 nisimura KSE_INIT_RXDESC(sc, idx);
1160 1.1 nisimura
1161 1.1 nisimura return 0;
1162 1.1 nisimura }
1163 1.1 nisimura
1164 1.1 nisimura static void
1165 1.1 nisimura rxdrain(struct kse_softc *sc)
1166 1.1 nisimura {
1167 1.1 nisimura struct kse_rxsoft *rxs;
1168 1.1 nisimura int i;
1169 1.1 nisimura
1170 1.1 nisimura for (i = 0; i < KSE_NRXDESC; i++) {
1171 1.1 nisimura rxs = &sc->sc_rxsoft[i];
1172 1.1 nisimura if (rxs->rxs_mbuf != NULL) {
1173 1.1 nisimura bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1174 1.1 nisimura m_freem(rxs->rxs_mbuf);
1175 1.1 nisimura rxs->rxs_mbuf = NULL;
1176 1.1 nisimura }
1177 1.1 nisimura }
1178 1.1 nisimura }
1179 1.1 nisimura
1180 1.1 nisimura static int
1181 1.1 nisimura kse_intr(void *arg)
1182 1.1 nisimura {
1183 1.1 nisimura struct kse_softc *sc = arg;
1184 1.43 nisimura struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1185 1.2 tsutsui uint32_t isr;
1186 1.1 nisimura
1187 1.1 nisimura if ((isr = CSR_READ_4(sc, INTST)) == 0)
1188 1.1 nisimura return 0;
1189 1.1 nisimura
1190 1.1 nisimura if (isr & INT_DMRS)
1191 1.1 nisimura rxintr(sc);
1192 1.1 nisimura if (isr & INT_DMTS)
1193 1.1 nisimura txreap(sc);
1194 1.1 nisimura if (isr & INT_DMLCS)
1195 1.1 nisimura lnkchg(sc);
1196 1.1 nisimura if (isr & INT_DMRBUS)
1197 1.42 nisimura aprint_error_dev(sc->sc_dev, "Rx descriptor full\n");
1198 1.1 nisimura
1199 1.1 nisimura CSR_WRITE_4(sc, INTST, isr);
1200 1.43 nisimura
1201 1.43 nisimura if (ifp->if_flags & IFF_RUNNING)
1202 1.43 nisimura if_schedule_deferred_start(ifp);
1203 1.43 nisimura
1204 1.1 nisimura return 1;
1205 1.1 nisimura }
1206 1.1 nisimura
1207 1.1 nisimura static void
1208 1.1 nisimura rxintr(struct kse_softc *sc)
1209 1.1 nisimura {
1210 1.1 nisimura struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1211 1.1 nisimura struct kse_rxsoft *rxs;
1212 1.1 nisimura struct mbuf *m;
1213 1.2 tsutsui uint32_t rxstat;
1214 1.1 nisimura int i, len;
1215 1.1 nisimura
1216 1.1 nisimura for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = KSE_NEXTRX(i)) {
1217 1.1 nisimura rxs = &sc->sc_rxsoft[i];
1218 1.1 nisimura
1219 1.1 nisimura KSE_CDRXSYNC(sc, i,
1220 1.35 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1221 1.1 nisimura
1222 1.1 nisimura rxstat = sc->sc_rxdescs[i].r0;
1223 1.35 msaitoh
1224 1.1 nisimura if (rxstat & R0_OWN) /* desc is left empty */
1225 1.1 nisimura break;
1226 1.1 nisimura
1227 1.35 msaitoh /* R0_FS | R0_LS must have been marked for this desc */
1228 1.1 nisimura
1229 1.1 nisimura if (rxstat & R0_ES) {
1230 1.48 skrll if_statinc(ifp, if_ierrors);
1231 1.1 nisimura #define PRINTERR(bit, str) \
1232 1.1 nisimura if (rxstat & (bit)) \
1233 1.42 nisimura aprint_error_dev(sc->sc_dev, \
1234 1.42 nisimura "%s\n", str)
1235 1.1 nisimura PRINTERR(R0_TL, "frame too long");
1236 1.1 nisimura PRINTERR(R0_RF, "runt frame");
1237 1.1 nisimura PRINTERR(R0_CE, "bad FCS");
1238 1.1 nisimura #undef PRINTERR
1239 1.1 nisimura KSE_INIT_RXDESC(sc, i);
1240 1.1 nisimura continue;
1241 1.1 nisimura }
1242 1.1 nisimura
1243 1.1 nisimura /* HW errata; frame might be too small or too large */
1244 1.1 nisimura
1245 1.1 nisimura bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1246 1.1 nisimura rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1247 1.1 nisimura
1248 1.1 nisimura len = rxstat & R0_FL_MASK;
1249 1.35 msaitoh len -= ETHER_CRC_LEN; /* Trim CRC off */
1250 1.1 nisimura m = rxs->rxs_mbuf;
1251 1.1 nisimura
1252 1.1 nisimura if (add_rxbuf(sc, i) != 0) {
1253 1.48 skrll if_statinc(ifp, if_ierrors);
1254 1.1 nisimura KSE_INIT_RXDESC(sc, i);
1255 1.1 nisimura bus_dmamap_sync(sc->sc_dmat,
1256 1.1 nisimura rxs->rxs_dmamap, 0,
1257 1.1 nisimura rxs->rxs_dmamap->dm_mapsize,
1258 1.1 nisimura BUS_DMASYNC_PREREAD);
1259 1.1 nisimura continue;
1260 1.1 nisimura }
1261 1.1 nisimura
1262 1.30 ozaki m_set_rcvif(m, ifp);
1263 1.1 nisimura m->m_pkthdr.len = m->m_len = len;
1264 1.1 nisimura
1265 1.1 nisimura if (sc->sc_mcsum) {
1266 1.1 nisimura m->m_pkthdr.csum_flags |= sc->sc_mcsum;
1267 1.1 nisimura if (rxstat & R0_IPE)
1268 1.1 nisimura m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1269 1.1 nisimura if (rxstat & (R0_TCPE | R0_UDPE))
1270 1.1 nisimura m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1271 1.1 nisimura }
1272 1.29 ozaki if_percpuq_enqueue(ifp->if_percpuq, m);
1273 1.1 nisimura #ifdef KSEDIAGNOSTIC
1274 1.1 nisimura if (kse_monitor_rxintr > 0) {
1275 1.42 nisimura aprint_error_dev(sc->sc_dev,
1276 1.42 nisimura "m stat %x data %p len %d\n",
1277 1.1 nisimura rxstat, m->m_data, m->m_len);
1278 1.1 nisimura }
1279 1.1 nisimura #endif
1280 1.1 nisimura }
1281 1.1 nisimura sc->sc_rxptr = i;
1282 1.1 nisimura }
1283 1.1 nisimura
1284 1.1 nisimura static void
1285 1.1 nisimura txreap(struct kse_softc *sc)
1286 1.1 nisimura {
1287 1.1 nisimura struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1288 1.1 nisimura struct kse_txsoft *txs;
1289 1.2 tsutsui uint32_t txstat;
1290 1.1 nisimura int i;
1291 1.1 nisimura
1292 1.1 nisimura ifp->if_flags &= ~IFF_OACTIVE;
1293 1.1 nisimura
1294 1.1 nisimura for (i = sc->sc_txsdirty; sc->sc_txsfree != KSE_TXQUEUELEN;
1295 1.1 nisimura i = KSE_NEXTTXS(i), sc->sc_txsfree++) {
1296 1.1 nisimura txs = &sc->sc_txsoft[i];
1297 1.1 nisimura
1298 1.1 nisimura KSE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
1299 1.35 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1300 1.1 nisimura
1301 1.1 nisimura txstat = sc->sc_txdescs[txs->txs_lastdesc].t0;
1302 1.1 nisimura
1303 1.1 nisimura if (txstat & T0_OWN) /* desc is still in use */
1304 1.1 nisimura break;
1305 1.1 nisimura
1306 1.35 msaitoh /* There is no way to tell transmission status per frame */
1307 1.1 nisimura
1308 1.48 skrll if_statinc(ifp, if_opackets);
1309 1.1 nisimura
1310 1.1 nisimura sc->sc_txfree += txs->txs_ndesc;
1311 1.1 nisimura bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1312 1.1 nisimura 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1313 1.1 nisimura bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1314 1.1 nisimura m_freem(txs->txs_mbuf);
1315 1.1 nisimura txs->txs_mbuf = NULL;
1316 1.1 nisimura }
1317 1.1 nisimura sc->sc_txsdirty = i;
1318 1.1 nisimura if (sc->sc_txsfree == KSE_TXQUEUELEN)
1319 1.1 nisimura ifp->if_timer = 0;
1320 1.1 nisimura }
1321 1.1 nisimura
1322 1.1 nisimura static void
1323 1.1 nisimura lnkchg(struct kse_softc *sc)
1324 1.1 nisimura {
1325 1.1 nisimura struct ifmediareq ifmr;
1326 1.1 nisimura
1327 1.42 nisimura #if KSE_LINKDEBUG == 1
1328 1.42 nisimura uint16_t p1sr = CSR_READ_2(sc, P1SR);
1329 1.42 nisimura printf("link %s detected\n", (p1sr & PxSR_LINKUP) ? "up" : "down");
1330 1.1 nisimura #endif
1331 1.42 nisimura kse_ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr);
1332 1.1 nisimura }
1333 1.1 nisimura
1334 1.1 nisimura static int
1335 1.42 nisimura kse_ifmedia_upd(struct ifnet *ifp)
1336 1.1 nisimura {
1337 1.1 nisimura struct kse_softc *sc = ifp->if_softc;
1338 1.42 nisimura struct ifmedia *ifm = &sc->sc_mii.mii_media;
1339 1.39 nisimura uint16_t p1cr4;
1340 1.42 nisimura
1341 1.39 nisimura p1cr4 = 0;
1342 1.39 nisimura if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_AUTO) {
1343 1.39 nisimura p1cr4 |= PxCR_STARTNEG; /* restart AN */
1344 1.39 nisimura p1cr4 |= PxCR_AUTOEN; /* enable AN */
1345 1.39 nisimura p1cr4 |= PxCR_USEFC; /* advertise flow control pause */
1346 1.42 nisimura p1cr4 |= 0xf; /* adv. 100FDX,100HDX,10FDX,10HDX */
1347 1.39 nisimura } else {
1348 1.39 nisimura if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_100_TX)
1349 1.39 nisimura p1cr4 |= PxCR_SPD100;
1350 1.1 nisimura if (ifm->ifm_media & IFM_FDX)
1351 1.39 nisimura p1cr4 |= PxCR_USEFDX;
1352 1.1 nisimura }
1353 1.39 nisimura CSR_WRITE_2(sc, P1CR4, p1cr4);
1354 1.42 nisimura #if KSE_LINKDEBUG == 1
1355 1.39 nisimura printf("P1CR4: %04x\n", p1cr4);
1356 1.39 nisimura #endif
1357 1.1 nisimura return 0;
1358 1.1 nisimura }
1359 1.1 nisimura
1360 1.1 nisimura static void
1361 1.42 nisimura kse_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1362 1.1 nisimura {
1363 1.1 nisimura struct kse_softc *sc = ifp->if_softc;
1364 1.42 nisimura struct mii_data *mii = &sc->sc_mii;
1365 1.1 nisimura
1366 1.42 nisimura mii_pollstat(mii);
1367 1.42 nisimura ifmr->ifm_status = mii->mii_media_status;
1368 1.56 nisimura ifmr->ifm_active = sc->sc_flowflags |
1369 1.56 nisimura (mii->mii_media_active & ~IFM_ETH_FMASK);
1370 1.1 nisimura }
1371 1.1 nisimura
1372 1.1 nisimura static void
1373 1.42 nisimura nopifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1374 1.40 nisimura {
1375 1.40 nisimura struct kse_softc *sc = ifp->if_softc;
1376 1.40 nisimura struct ifmedia *ifm = &sc->sc_media;
1377 1.40 nisimura
1378 1.42 nisimura #if KSE_LINKDEBUG == 2
1379 1.40 nisimura printf("p1sr: %04x, p2sr: %04x\n", CSR_READ_2(sc, P1SR), CSR_READ_2(sc, P2SR));
1380 1.40 nisimura #endif
1381 1.40 nisimura
1382 1.40 nisimura /* 8842 MAC pretends 100FDX all the time */
1383 1.40 nisimura ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1384 1.42 nisimura ifmr->ifm_active = ifm->ifm_cur->ifm_media |
1385 1.42 nisimura IFM_FLOW | IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE;
1386 1.40 nisimura }
1387 1.40 nisimura
1388 1.40 nisimura static void
1389 1.1 nisimura phy_tick(void *arg)
1390 1.1 nisimura {
1391 1.1 nisimura struct kse_softc *sc = arg;
1392 1.42 nisimura struct mii_data *mii = &sc->sc_mii;
1393 1.1 nisimura int s;
1394 1.1 nisimura
1395 1.56 nisimura if (sc->sc_chip == 0x8841) {
1396 1.56 nisimura s = splnet();
1397 1.56 nisimura mii_tick(mii);
1398 1.56 nisimura splx(s);
1399 1.56 nisimura }
1400 1.56 nisimura #ifdef KSE_EVENT_COUNTERS
1401 1.56 nisimura stat_tick(arg);
1402 1.56 nisimura #endif
1403 1.42 nisimura callout_schedule(&sc->sc_tick_ch, hz);
1404 1.42 nisimura }
1405 1.42 nisimura
1406 1.42 nisimura static const uint16_t phy1csr[] = {
1407 1.42 nisimura /* 0 BMCR */ 0x4d0,
1408 1.42 nisimura /* 1 BMSR */ 0x4d2,
1409 1.42 nisimura /* 2 PHYID1 */ 0x4d6, /* 0x0022 - PHY1HR */
1410 1.42 nisimura /* 3 PHYID2 */ 0x4d4, /* 0x1430 - PHY1LR */
1411 1.42 nisimura /* 4 ANAR */ 0x4d8,
1412 1.42 nisimura /* 5 ANLPAR */ 0x4da,
1413 1.42 nisimura };
1414 1.42 nisimura
1415 1.42 nisimura int
1416 1.42 nisimura kse_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
1417 1.42 nisimura {
1418 1.42 nisimura struct kse_softc *sc = device_private(self);
1419 1.42 nisimura
1420 1.42 nisimura if (phy != 1 || reg >= __arraycount(phy1csr) || reg < 0)
1421 1.42 nisimura return EINVAL;
1422 1.42 nisimura *val = CSR_READ_2(sc, phy1csr[reg]);
1423 1.42 nisimura return 0;
1424 1.42 nisimura }
1425 1.42 nisimura
1426 1.42 nisimura int
1427 1.42 nisimura kse_mii_writereg(device_t self, int phy, int reg, uint16_t val)
1428 1.42 nisimura {
1429 1.42 nisimura struct kse_softc *sc = device_private(self);
1430 1.42 nisimura
1431 1.42 nisimura if (phy != 1 || reg >= __arraycount(phy1csr) || reg < 0)
1432 1.42 nisimura return EINVAL;
1433 1.42 nisimura CSR_WRITE_2(sc, phy1csr[reg], val);
1434 1.42 nisimura return 0;
1435 1.42 nisimura }
1436 1.42 nisimura
1437 1.42 nisimura void
1438 1.42 nisimura kse_mii_statchg(struct ifnet *ifp)
1439 1.42 nisimura {
1440 1.42 nisimura struct kse_softc *sc = ifp->if_softc;
1441 1.42 nisimura struct mii_data *mii = &sc->sc_mii;
1442 1.42 nisimura
1443 1.42 nisimura #if KSE_LINKDEBUG == 1
1444 1.42 nisimura /* decode P1SR register value */
1445 1.42 nisimura uint16_t p1sr = CSR_READ_2(sc, P1SR);
1446 1.42 nisimura printf("P1SR %04x, spd%d", p1sr, (p1sr & PxSR_SPD100) ? 100 : 10);
1447 1.42 nisimura if (p1sr & PxSR_FDX)
1448 1.42 nisimura printf(",full-duplex");
1449 1.42 nisimura if (p1sr & PxSR_RXFLOW)
1450 1.42 nisimura printf(",rxpause");
1451 1.42 nisimura if (p1sr & PxSR_TXFLOW)
1452 1.42 nisimura printf(",txpause");
1453 1.42 nisimura printf("\n");
1454 1.42 nisimura /* show resolved mii(4) parameters to compare against above */
1455 1.42 nisimura printf("MII spd%d",
1456 1.42 nisimura (int)(sc->sc_ethercom.ec_if.if_baudrate / IF_Mbps(1)));
1457 1.42 nisimura if (mii->mii_media_active & IFM_FDX)
1458 1.42 nisimura printf(",full-duplex");
1459 1.42 nisimura if (mii->mii_media_active & IFM_FLOW) {
1460 1.42 nisimura printf(",flowcontrol");
1461 1.42 nisimura if (mii->mii_media_active & IFM_ETH_RXPAUSE)
1462 1.42 nisimura printf(",rxpause");
1463 1.42 nisimura if (mii->mii_media_active & IFM_ETH_TXPAUSE)
1464 1.42 nisimura printf(",txpause");
1465 1.42 nisimura }
1466 1.42 nisimura printf("\n");
1467 1.42 nisimura #endif
1468 1.42 nisimura /* Get flow control negotiation result. */
1469 1.42 nisimura if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
1470 1.42 nisimura (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags)
1471 1.42 nisimura sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
1472 1.42 nisimura
1473 1.42 nisimura /* Adjust MAC PAUSE flow control. */
1474 1.42 nisimura if ((mii->mii_media_active & IFM_FDX)
1475 1.42 nisimura && (sc->sc_flowflags & IFM_ETH_TXPAUSE))
1476 1.42 nisimura sc->sc_txc |= TXC_FCE;
1477 1.42 nisimura else
1478 1.42 nisimura sc->sc_txc &= ~TXC_FCE;
1479 1.42 nisimura if ((mii->mii_media_active & IFM_FDX)
1480 1.42 nisimura && (sc->sc_flowflags & IFM_ETH_RXPAUSE))
1481 1.42 nisimura sc->sc_rxc |= RXC_FCE;
1482 1.42 nisimura else
1483 1.42 nisimura sc->sc_rxc &= ~RXC_FCE;
1484 1.42 nisimura CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
1485 1.42 nisimura CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
1486 1.42 nisimura #if KSE_LINKDEBUG == 1
1487 1.42 nisimura printf("%ctxfce, %crxfce\n",
1488 1.42 nisimura (sc->sc_txc & TXC_FCE) ? '+' : '-',
1489 1.42 nisimura (sc->sc_rxc & RXC_FCE) ? '+' : '-');
1490 1.42 nisimura #endif
1491 1.1 nisimura }
1492 1.8 nisimura
1493 1.8 nisimura #ifdef KSE_EVENT_COUNTERS
1494 1.8 nisimura static void
1495 1.16 dsl stat_tick(void *arg)
1496 1.8 nisimura {
1497 1.8 nisimura struct kse_softc *sc = arg;
1498 1.8 nisimura struct ksext *ee = &sc->sc_ext;
1499 1.56 nisimura int nport, p, i, reg, val;
1500 1.8 nisimura
1501 1.8 nisimura nport = (sc->sc_chip == 0x8842) ? 3 : 1;
1502 1.8 nisimura for (p = 0; p < nport; p++) {
1503 1.56 nisimura /* read 34 ev counters by indirect read via IACR */
1504 1.9 nisimura for (i = 0; i < 32; i++) {
1505 1.56 nisimura reg = EVCNTBR + p * 0x20 + i;
1506 1.56 nisimura CSR_WRITE_2(sc, IACR, reg);
1507 1.56 nisimura /* 30-bit counter value are halved in IADR5 & IADR4 */
1508 1.8 nisimura do {
1509 1.8 nisimura val = CSR_READ_2(sc, IADR5) << 16;
1510 1.56 nisimura } while ((val & IADR_LATCH) == 0);
1511 1.56 nisimura if (val & IADR_OVF) {
1512 1.9 nisimura (void)CSR_READ_2(sc, IADR4);
1513 1.8 nisimura val = 0x3fffffff; /* has made overflow */
1514 1.9 nisimura }
1515 1.9 nisimura else {
1516 1.9 nisimura val &= 0x3fff0000; /* 29:16 */
1517 1.9 nisimura val |= CSR_READ_2(sc, IADR4); /* 15:0 */
1518 1.9 nisimura }
1519 1.56 nisimura ee->pev[p][i].ev_count += val; /* ev0 thru 31 */
1520 1.8 nisimura }
1521 1.56 nisimura /* ev32 and ev33 are 16-bit counter */
1522 1.56 nisimura CSR_WRITE_2(sc, IACR, EVCNTBR + 0x100 + p);
1523 1.56 nisimura ee->pev[p][32].ev_count += CSR_READ_2(sc, IADR4); /* ev32 */
1524 1.56 nisimura CSR_WRITE_2(sc, IACR, EVCNTBR + 0x100 + p * 3 + 1);
1525 1.56 nisimura ee->pev[p][33].ev_count += CSR_READ_2(sc, IADR4); /* ev33 */
1526 1.8 nisimura }
1527 1.8 nisimura }
1528 1.8 nisimura
1529 1.8 nisimura static void
1530 1.8 nisimura zerostats(struct kse_softc *sc)
1531 1.8 nisimura {
1532 1.8 nisimura struct ksext *ee = &sc->sc_ext;
1533 1.56 nisimura int nport, p, i, reg, val;
1534 1.8 nisimura
1535 1.35 msaitoh /* Make sure all the HW counters get zero */
1536 1.8 nisimura nport = (sc->sc_chip == 0x8842) ? 3 : 1;
1537 1.8 nisimura for (p = 0; p < nport; p++) {
1538 1.56 nisimura for (i = 0; i < 32; i++) {
1539 1.56 nisimura reg = EVCNTBR + p * 0x20 + i;
1540 1.56 nisimura CSR_WRITE_2(sc, IACR, reg);
1541 1.8 nisimura do {
1542 1.8 nisimura val = CSR_READ_2(sc, IADR5) << 16;
1543 1.56 nisimura } while ((val & IADR_LATCH) == 0);
1544 1.9 nisimura (void)CSR_READ_2(sc, IADR4);
1545 1.8 nisimura ee->pev[p][i].ev_count = 0;
1546 1.8 nisimura }
1547 1.56 nisimura CSR_WRITE_2(sc, IACR, EVCNTBR + 0x100 + p);
1548 1.56 nisimura (void)CSR_READ_2(sc, IADR4);
1549 1.56 nisimura CSR_WRITE_2(sc, IACR, EVCNTBR + 0x100 + p * 3 + 1);
1550 1.56 nisimura (void)CSR_READ_2(sc, IADR4);
1551 1.56 nisimura ee->pev[p][32].ev_count = 0;
1552 1.56 nisimura ee->pev[p][33].ev_count = 0;
1553 1.8 nisimura }
1554 1.8 nisimura }
1555 1.8 nisimura #endif
1556