if_admsw.c revision 1.3.4.2 1 1.3.4.2 ad /* $NetBSD: if_admsw.c,v 1.3.4.2 2007/06/09 21:36:56 ad Exp $ */
2 1.3.4.2 ad
3 1.3.4.2 ad /*-
4 1.3.4.2 ad * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko.
5 1.3.4.2 ad * All rights reserved.
6 1.3.4.2 ad *
7 1.3.4.2 ad * Redistribution and use in source and binary forms, with or
8 1.3.4.2 ad * without modification, are permitted provided that the following
9 1.3.4.2 ad * conditions are met:
10 1.3.4.2 ad * 1. Redistributions of source code must retain the above copyright
11 1.3.4.2 ad * notice, this list of conditions and the following disclaimer.
12 1.3.4.2 ad * 2. Redistributions in binary form must reproduce the above
13 1.3.4.2 ad * copyright notice, this list of conditions and the following
14 1.3.4.2 ad * disclaimer in the documentation and/or other materials provided
15 1.3.4.2 ad * with the distribution.
16 1.3.4.2 ad * 3. The names of the authors may not be used to endorse or promote
17 1.3.4.2 ad * products derived from this software without specific prior
18 1.3.4.2 ad * written permission.
19 1.3.4.2 ad *
20 1.3.4.2 ad * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY
21 1.3.4.2 ad * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 1.3.4.2 ad * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
23 1.3.4.2 ad * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS
24 1.3.4.2 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
25 1.3.4.2 ad * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26 1.3.4.2 ad * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
27 1.3.4.2 ad * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 1.3.4.2 ad * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
29 1.3.4.2 ad * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 1.3.4.2 ad * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
31 1.3.4.2 ad * OF SUCH DAMAGE.
32 1.3.4.2 ad */
33 1.3.4.2 ad /*
34 1.3.4.2 ad * Copyright (c) 2001 Wasabi Systems, Inc.
35 1.3.4.2 ad * All rights reserved.
36 1.3.4.2 ad *
37 1.3.4.2 ad * Written by Jason R. Thorpe for Wasabi Systems, Inc.
38 1.3.4.2 ad *
39 1.3.4.2 ad * Redistribution and use in source and binary forms, with or without
40 1.3.4.2 ad * modification, are permitted provided that the following conditions
41 1.3.4.2 ad * are met:
42 1.3.4.2 ad * 1. Redistributions of source code must retain the above copyright
43 1.3.4.2 ad * notice, this list of conditions and the following disclaimer.
44 1.3.4.2 ad * 2. Redistributions in binary form must reproduce the above copyright
45 1.3.4.2 ad * notice, this list of conditions and the following disclaimer in the
46 1.3.4.2 ad * documentation and/or other materials provided with the distribution.
47 1.3.4.2 ad * 3. All advertising materials mentioning features or use of this software
48 1.3.4.2 ad * must display the following acknowledgement:
49 1.3.4.2 ad * This product includes software developed for the NetBSD Project by
50 1.3.4.2 ad * Wasabi Systems, Inc.
51 1.3.4.2 ad * 4. The name of Wasabi Systems, Inc. may not be used to endorse
52 1.3.4.2 ad * or promote products derived from this software without specific prior
53 1.3.4.2 ad * written permission.
54 1.3.4.2 ad *
55 1.3.4.2 ad * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
56 1.3.4.2 ad * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
57 1.3.4.2 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
58 1.3.4.2 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
59 1.3.4.2 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
60 1.3.4.2 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
61 1.3.4.2 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
62 1.3.4.2 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
63 1.3.4.2 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
64 1.3.4.2 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
65 1.3.4.2 ad * POSSIBILITY OF SUCH DAMAGE.
66 1.3.4.2 ad */
67 1.3.4.2 ad
68 1.3.4.2 ad /*
69 1.3.4.2 ad * Device driver for Alchemy Semiconductor Au1x00 Ethernet Media
70 1.3.4.2 ad * Access Controller.
71 1.3.4.2 ad *
72 1.3.4.2 ad * TODO:
73 1.3.4.2 ad *
74 1.3.4.2 ad * Better Rx buffer management; we want to get new Rx buffers
75 1.3.4.2 ad * to the chip more quickly than we currently do.
76 1.3.4.2 ad */
77 1.3.4.2 ad
78 1.3.4.2 ad #include <sys/cdefs.h>
79 1.3.4.2 ad __KERNEL_RCSID(0, "$NetBSD: if_admsw.c,v 1.3.4.2 2007/06/09 21:36:56 ad Exp $");
80 1.3.4.2 ad
81 1.3.4.2 ad #include "bpfilter.h"
82 1.3.4.2 ad
83 1.3.4.2 ad #include <sys/param.h>
84 1.3.4.2 ad #include <sys/systm.h>
85 1.3.4.2 ad #include <sys/callout.h>
86 1.3.4.2 ad #include <sys/mbuf.h>
87 1.3.4.2 ad #include <sys/malloc.h>
88 1.3.4.2 ad #include <sys/kernel.h>
89 1.3.4.2 ad #include <sys/socket.h>
90 1.3.4.2 ad #include <sys/ioctl.h>
91 1.3.4.2 ad #include <sys/errno.h>
92 1.3.4.2 ad #include <sys/device.h>
93 1.3.4.2 ad #include <sys/queue.h>
94 1.3.4.2 ad
95 1.3.4.2 ad #include <prop/proplib.h>
96 1.3.4.2 ad
97 1.3.4.2 ad #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
98 1.3.4.2 ad
99 1.3.4.2 ad #include <net/if.h>
100 1.3.4.2 ad #include <net/if_dl.h>
101 1.3.4.2 ad #include <net/if_media.h>
102 1.3.4.2 ad #include <net/if_ether.h>
103 1.3.4.2 ad
104 1.3.4.2 ad #if NBPFILTER > 0
105 1.3.4.2 ad #include <net/bpf.h>
106 1.3.4.2 ad #endif
107 1.3.4.2 ad
108 1.3.4.2 ad #include <machine/bus.h>
109 1.3.4.2 ad #include <machine/intr.h>
110 1.3.4.2 ad #include <machine/endian.h>
111 1.3.4.2 ad
112 1.3.4.2 ad #include <dev/mii/mii.h>
113 1.3.4.2 ad #include <dev/mii/miivar.h>
114 1.3.4.2 ad
115 1.3.4.2 ad #include <sys/gpio.h>
116 1.3.4.2 ad #include <dev/gpio/gpiovar.h>
117 1.3.4.2 ad
118 1.3.4.2 ad #include <mips/adm5120/include/adm5120reg.h>
119 1.3.4.2 ad #include <mips/adm5120/include/adm5120var.h>
120 1.3.4.2 ad #include <mips/adm5120/include/adm5120_obiovar.h>
121 1.3.4.2 ad #include <mips/adm5120/dev/if_admswreg.h>
122 1.3.4.2 ad #include <mips/adm5120/dev/if_admswvar.h>
123 1.3.4.2 ad
124 1.3.4.2 ad static uint8_t vlan_matrix[SW_DEVS] = {
125 1.3.4.2 ad (1 << 6) | (1 << 0), /* CPU + port0 */
126 1.3.4.2 ad (1 << 6) | (1 << 1), /* CPU + port1 */
127 1.3.4.2 ad (1 << 6) | (1 << 2), /* CPU + port2 */
128 1.3.4.2 ad (1 << 6) | (1 << 3), /* CPU + port3 */
129 1.3.4.2 ad (1 << 6) | (1 << 4), /* CPU + port4 */
130 1.3.4.2 ad (1 << 6) | (1 << 5), /* CPU + port5 */
131 1.3.4.2 ad };
132 1.3.4.2 ad
133 1.3.4.2 ad #ifdef ADMSW_EVENT_COUNTERS
134 1.3.4.2 ad #define ADMSW_EVCNT_INCR(ev) (ev)->ev_count++
135 1.3.4.2 ad #else
136 1.3.4.2 ad #define ADMSW_EVCNT_INCR(ev) /* nothing */
137 1.3.4.2 ad #endif
138 1.3.4.2 ad
139 1.3.4.2 ad static void admsw_start(struct ifnet *);
140 1.3.4.2 ad static void admsw_watchdog(struct ifnet *);
141 1.3.4.2 ad static int admsw_ioctl(struct ifnet *, u_long, void *);
142 1.3.4.2 ad static int admsw_init(struct ifnet *);
143 1.3.4.2 ad static void admsw_stop(struct ifnet *, int);
144 1.3.4.2 ad
145 1.3.4.2 ad static void admsw_shutdown(void *);
146 1.3.4.2 ad
147 1.3.4.2 ad static void admsw_reset(struct admsw_softc *);
148 1.3.4.2 ad static void admsw_set_filter(struct admsw_softc *);
149 1.3.4.2 ad
150 1.3.4.2 ad static int admsw_intr(void *);
151 1.3.4.2 ad static void admsw_txintr(struct admsw_softc *, int);
152 1.3.4.2 ad static void admsw_rxintr(struct admsw_softc *, int);
153 1.3.4.2 ad static int admsw_add_rxbuf(struct admsw_softc *, int, int);
154 1.3.4.2 ad #define admsw_add_rxhbuf(sc, idx) admsw_add_rxbuf(sc, idx, 1)
155 1.3.4.2 ad #define admsw_add_rxlbuf(sc, idx) admsw_add_rxbuf(sc, idx, 0)
156 1.3.4.2 ad
157 1.3.4.2 ad static int admsw_mediachange(struct ifnet *);
158 1.3.4.2 ad static void admsw_mediastatus(struct ifnet *, struct ifmediareq *);
159 1.3.4.2 ad
160 1.3.4.2 ad static int admsw_match(struct device *, struct cfdata *, void *);
161 1.3.4.2 ad static void admsw_attach(struct device *, struct device *, void *);
162 1.3.4.2 ad
163 1.3.4.2 ad CFATTACH_DECL(admsw, sizeof(struct admsw_softc),
164 1.3.4.2 ad admsw_match, admsw_attach, NULL, NULL);
165 1.3.4.2 ad
166 1.3.4.2 ad static int
167 1.3.4.2 ad admsw_match(struct device *parent, struct cfdata *cf, void *aux)
168 1.3.4.2 ad {
169 1.3.4.2 ad struct obio_attach_args *aa = aux;
170 1.3.4.2 ad
171 1.3.4.2 ad return strcmp(aa->oba_name, cf->cf_name) == 0;
172 1.3.4.2 ad }
173 1.3.4.2 ad
174 1.3.4.2 ad #define REG_READ(o) bus_space_read_4(sc->sc_st, sc->sc_ioh, (o))
175 1.3.4.2 ad #define REG_WRITE(o,v) bus_space_write_4(sc->sc_st, sc->sc_ioh, (o),(v))
176 1.3.4.2 ad
177 1.3.4.2 ad
178 1.3.4.2 ad static void
179 1.3.4.2 ad admsw_init_bufs(struct admsw_softc *sc)
180 1.3.4.2 ad {
181 1.3.4.2 ad int i;
182 1.3.4.2 ad struct admsw_desc *desc;
183 1.3.4.2 ad
184 1.3.4.2 ad for (i = 0; i < ADMSW_NTXHDESC; i++) {
185 1.3.4.2 ad if (sc->sc_txhsoft[i].ds_mbuf != NULL) {
186 1.3.4.2 ad m_freem(sc->sc_txhsoft[i].ds_mbuf);
187 1.3.4.2 ad sc->sc_txhsoft[i].ds_mbuf = NULL;
188 1.3.4.2 ad }
189 1.3.4.2 ad desc = &sc->sc_txhdescs[i];
190 1.3.4.2 ad desc->data = 0;
191 1.3.4.2 ad desc->cntl = 0;
192 1.3.4.2 ad desc->len = MAC_BUFLEN;
193 1.3.4.2 ad desc->status = 0;
194 1.3.4.2 ad ADMSW_CDTXHSYNC(sc, i,
195 1.3.4.2 ad BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
196 1.3.4.2 ad }
197 1.3.4.2 ad sc->sc_txhdescs[ADMSW_NTXHDESC - 1].data |= ADM5120_DMA_RINGEND;
198 1.3.4.2 ad ADMSW_CDTXHSYNC(sc, ADMSW_NTXHDESC - 1,
199 1.3.4.2 ad BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
200 1.3.4.2 ad
201 1.3.4.2 ad for (i = 0; i < ADMSW_NRXHDESC; i++) {
202 1.3.4.2 ad if (sc->sc_rxhsoft[i].ds_mbuf == NULL) {
203 1.3.4.2 ad if (admsw_add_rxhbuf(sc, i) != 0)
204 1.3.4.2 ad panic("admsw_init_bufs\n");
205 1.3.4.2 ad } else
206 1.3.4.2 ad ADMSW_INIT_RXHDESC(sc, i);
207 1.3.4.2 ad }
208 1.3.4.2 ad
209 1.3.4.2 ad for (i = 0; i < ADMSW_NTXLDESC; i++) {
210 1.3.4.2 ad if (sc->sc_txlsoft[i].ds_mbuf != NULL) {
211 1.3.4.2 ad m_freem(sc->sc_txlsoft[i].ds_mbuf);
212 1.3.4.2 ad sc->sc_txlsoft[i].ds_mbuf = NULL;
213 1.3.4.2 ad }
214 1.3.4.2 ad desc = &sc->sc_txldescs[i];
215 1.3.4.2 ad desc->data = 0;
216 1.3.4.2 ad desc->cntl = 0;
217 1.3.4.2 ad desc->len = MAC_BUFLEN;
218 1.3.4.2 ad desc->status = 0;
219 1.3.4.2 ad ADMSW_CDTXLSYNC(sc, i,
220 1.3.4.2 ad BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
221 1.3.4.2 ad }
222 1.3.4.2 ad sc->sc_txldescs[ADMSW_NTXLDESC - 1].data |= ADM5120_DMA_RINGEND;
223 1.3.4.2 ad ADMSW_CDTXLSYNC(sc, ADMSW_NTXLDESC - 1,
224 1.3.4.2 ad BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
225 1.3.4.2 ad
226 1.3.4.2 ad for (i = 0; i < ADMSW_NRXLDESC; i++) {
227 1.3.4.2 ad if (sc->sc_rxlsoft[i].ds_mbuf == NULL) {
228 1.3.4.2 ad if (admsw_add_rxlbuf(sc, i) != 0)
229 1.3.4.2 ad panic("admsw_init_bufs\n");
230 1.3.4.2 ad } else
231 1.3.4.2 ad ADMSW_INIT_RXLDESC(sc, i);
232 1.3.4.2 ad }
233 1.3.4.2 ad
234 1.3.4.2 ad REG_WRITE(SEND_HBADDR_REG, ADMSW_CDTXHADDR(sc, 0));
235 1.3.4.2 ad REG_WRITE(SEND_LBADDR_REG, ADMSW_CDTXLADDR(sc, 0));
236 1.3.4.2 ad REG_WRITE(RECV_HBADDR_REG, ADMSW_CDRXHADDR(sc, 0));
237 1.3.4.2 ad REG_WRITE(RECV_LBADDR_REG, ADMSW_CDRXLADDR(sc, 0));
238 1.3.4.2 ad
239 1.3.4.2 ad sc->sc_txfree = ADMSW_NTXLDESC;
240 1.3.4.2 ad sc->sc_txnext = 0;
241 1.3.4.2 ad sc->sc_txdirty = 0;
242 1.3.4.2 ad sc->sc_rxptr = 0;
243 1.3.4.2 ad }
244 1.3.4.2 ad
245 1.3.4.2 ad static void
246 1.3.4.2 ad admsw_setvlan(struct admsw_softc *sc, char matrix[6])
247 1.3.4.2 ad {
248 1.3.4.2 ad uint32_t i;
249 1.3.4.2 ad
250 1.3.4.2 ad i = matrix[0] + (matrix[1] << 8) + (matrix[2] << 16) + (matrix[3] << 24);
251 1.3.4.2 ad REG_WRITE(VLAN_G1_REG, i);
252 1.3.4.2 ad i = matrix[4] + (matrix[5] << 8);
253 1.3.4.2 ad REG_WRITE(VLAN_G2_REG, i);
254 1.3.4.2 ad }
255 1.3.4.2 ad
256 1.3.4.2 ad static void
257 1.3.4.2 ad admsw_reset(struct admsw_softc *sc)
258 1.3.4.2 ad {
259 1.3.4.2 ad uint32_t wdog1;
260 1.3.4.2 ad int i;
261 1.3.4.2 ad
262 1.3.4.2 ad REG_WRITE(PORT_CONF0_REG,
263 1.3.4.2 ad REG_READ(PORT_CONF0_REG) | PORT_CONF0_DP_MASK);
264 1.3.4.2 ad REG_WRITE(CPUP_CONF_REG,
265 1.3.4.2 ad REG_READ(CPUP_CONF_REG) | CPUP_CONF_DCPUP);
266 1.3.4.2 ad
267 1.3.4.2 ad /* Wait for DMA to complete. Overkill. In 3ms, we can
268 1.3.4.2 ad * send at least two entire 1500-byte packets at 10 Mb/s.
269 1.3.4.2 ad */
270 1.3.4.2 ad DELAY(3000);
271 1.3.4.2 ad
272 1.3.4.2 ad /* The datasheet recommends that we move all PHYs to reset
273 1.3.4.2 ad * state prior to software reset.
274 1.3.4.2 ad */
275 1.3.4.2 ad REG_WRITE(PHY_CNTL2_REG,
276 1.3.4.2 ad REG_READ(PHY_CNTL2_REG) & ~PHY_CNTL2_PHYR_MASK);
277 1.3.4.2 ad
278 1.3.4.2 ad /* Reset the switch. */
279 1.3.4.2 ad REG_WRITE(ADMSW_SW_RES, 0x1);
280 1.3.4.2 ad
281 1.3.4.2 ad DELAY(100 * 1000);
282 1.3.4.2 ad
283 1.3.4.2 ad REG_WRITE(ADMSW_BOOT_DONE, ADMSW_BOOT_DONE_BO);
284 1.3.4.2 ad
285 1.3.4.2 ad /* begin old code */
286 1.3.4.2 ad REG_WRITE(CPUP_CONF_REG,
287 1.3.4.2 ad CPUP_CONF_DCPUP | CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK |
288 1.3.4.2 ad CPUP_CONF_DMCP_MASK);
289 1.3.4.2 ad
290 1.3.4.2 ad REG_WRITE(PORT_CONF0_REG, PORT_CONF0_EMCP_MASK | PORT_CONF0_EMBP_MASK);
291 1.3.4.2 ad
292 1.3.4.2 ad REG_WRITE(PHY_CNTL2_REG,
293 1.3.4.2 ad REG_READ(PHY_CNTL2_REG) | PHY_CNTL2_ANE_MASK | PHY_CNTL2_PHYR_MASK |
294 1.3.4.2 ad PHY_CNTL2_AMDIX_MASK);
295 1.3.4.2 ad
296 1.3.4.2 ad REG_WRITE(PHY_CNTL3_REG, REG_READ(PHY_CNTL3_REG) | PHY_CNTL3_RNT);
297 1.3.4.2 ad
298 1.3.4.2 ad REG_WRITE(ADMSW_INT_MASK, INT_MASK);
299 1.3.4.2 ad REG_WRITE(ADMSW_INT_ST, INT_MASK);
300 1.3.4.2 ad
301 1.3.4.2 ad /*
302 1.3.4.2 ad * While in DDB, we stop servicing interrupts, RX ring
303 1.3.4.2 ad * fills up and when free block counter falls behind FC
304 1.3.4.2 ad * threshold, the switch starts to emit 802.3x PAUSE
305 1.3.4.2 ad * frames. This can upset peer switches.
306 1.3.4.2 ad *
307 1.3.4.2 ad * Stop this from happening by disabling FC and D2
308 1.3.4.2 ad * thresholds.
309 1.3.4.2 ad */
310 1.3.4.2 ad REG_WRITE(FC_TH_REG,
311 1.3.4.2 ad REG_READ(FC_TH_REG) & ~(FC_TH_FCS_MASK | FC_TH_D2S_MASK));
312 1.3.4.2 ad
313 1.3.4.2 ad admsw_setvlan(sc, vlan_matrix);
314 1.3.4.2 ad
315 1.3.4.2 ad for (i = 0; i < SW_DEVS; i++) {
316 1.3.4.2 ad REG_WRITE(MAC_WT1_REG,
317 1.3.4.2 ad sc->sc_enaddr[2] |
318 1.3.4.2 ad (sc->sc_enaddr[3]<<8) |
319 1.3.4.2 ad (sc->sc_enaddr[4]<<16) |
320 1.3.4.2 ad ((sc->sc_enaddr[5]+i)<<24));
321 1.3.4.2 ad REG_WRITE(MAC_WT0_REG, (i<<MAC_WT0_VLANID_SHIFT) |
322 1.3.4.2 ad (sc->sc_enaddr[0]<<16) | (sc->sc_enaddr[1]<<24) |
323 1.3.4.2 ad MAC_WT0_WRITE | MAC_WT0_VLANID_EN);
324 1.3.4.2 ad
325 1.3.4.2 ad while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE));
326 1.3.4.2 ad }
327 1.3.4.2 ad wdog1 = REG_READ(ADM5120_WDOG1);
328 1.3.4.2 ad REG_WRITE(ADM5120_WDOG1, wdog1 & ~ADM5120_WDOG1_WDE);
329 1.3.4.2 ad }
330 1.3.4.2 ad
331 1.3.4.2 ad static void
332 1.3.4.2 ad admsw_attach(struct device *parent, struct device *self, void *aux)
333 1.3.4.2 ad {
334 1.3.4.2 ad uint8_t enaddr[ETHER_ADDR_LEN];
335 1.3.4.2 ad struct admsw_softc *sc = (void *) self;
336 1.3.4.2 ad struct obio_attach_args *aa = aux;
337 1.3.4.2 ad struct ifnet *ifp;
338 1.3.4.2 ad bus_dma_segment_t seg;
339 1.3.4.2 ad int error, i, rseg;
340 1.3.4.2 ad prop_data_t pd;
341 1.3.4.2 ad
342 1.3.4.2 ad printf(": ADM5120 Switch Engine, %d ports\n", SW_DEVS);
343 1.3.4.2 ad
344 1.3.4.2 ad sc->sc_dmat = aa->oba_dt;
345 1.3.4.2 ad sc->sc_st = aa->oba_st;
346 1.3.4.2 ad
347 1.3.4.2 ad pd = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-addr");
348 1.3.4.2 ad
349 1.3.4.2 ad if (pd == NULL) {
350 1.3.4.2 ad enaddr[0] = 0x02;
351 1.3.4.2 ad enaddr[1] = 0xaa;
352 1.3.4.2 ad enaddr[2] = 0xbb;
353 1.3.4.2 ad enaddr[3] = 0xcc;
354 1.3.4.2 ad enaddr[4] = 0xdd;
355 1.3.4.2 ad enaddr[5] = 0xee;
356 1.3.4.2 ad } else
357 1.3.4.2 ad memcpy(enaddr, prop_data_data_nocopy(pd), sizeof(enaddr));
358 1.3.4.2 ad
359 1.3.4.2 ad memcpy(sc->sc_enaddr, enaddr, sizeof(sc->sc_enaddr));
360 1.3.4.2 ad
361 1.3.4.2 ad printf("%s: base Ethernet address %s\n", sc->sc_dev.dv_xname,
362 1.3.4.2 ad ether_sprintf(enaddr));
363 1.3.4.2 ad
364 1.3.4.2 ad /* Map the device. */
365 1.3.4.2 ad if (bus_space_map(sc->sc_st, aa->oba_addr, 512, 0, &sc->sc_ioh) != 0) {
366 1.3.4.2 ad printf("%s: unable to map device\n", device_xname(&sc->sc_dev));
367 1.3.4.2 ad return;
368 1.3.4.2 ad }
369 1.3.4.2 ad
370 1.3.4.2 ad /* Hook up the interrupt handler. */
371 1.3.4.2 ad sc->sc_ih = adm5120_intr_establish(aa->oba_irq, INTR_IRQ, admsw_intr, sc);
372 1.3.4.2 ad
373 1.3.4.2 ad if (sc->sc_ih == NULL) {
374 1.3.4.2 ad printf("%s: unable to register interrupt handler\n",
375 1.3.4.2 ad sc->sc_dev.dv_xname);
376 1.3.4.2 ad return;
377 1.3.4.2 ad }
378 1.3.4.2 ad
379 1.3.4.2 ad /*
380 1.3.4.2 ad * Allocate the control data structures, and create and load the
381 1.3.4.2 ad * DMA map for it.
382 1.3.4.2 ad */
383 1.3.4.2 ad if ((error = bus_dmamem_alloc(sc->sc_dmat,
384 1.3.4.2 ad sizeof(struct admsw_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
385 1.3.4.2 ad 0)) != 0) {
386 1.3.4.2 ad printf("%s: unable to allocate control data, error = %d\n",
387 1.3.4.2 ad sc->sc_dev.dv_xname, error);
388 1.3.4.2 ad return;
389 1.3.4.2 ad }
390 1.3.4.2 ad if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
391 1.3.4.2 ad sizeof(struct admsw_control_data), (void *)&sc->sc_control_data,
392 1.3.4.2 ad 0)) != 0) {
393 1.3.4.2 ad printf("%s: unable to map control data, error = %d\n",
394 1.3.4.2 ad sc->sc_dev.dv_xname, error);
395 1.3.4.2 ad return;
396 1.3.4.2 ad }
397 1.3.4.2 ad if ((error = bus_dmamap_create(sc->sc_dmat,
398 1.3.4.2 ad sizeof(struct admsw_control_data), 1,
399 1.3.4.2 ad sizeof(struct admsw_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
400 1.3.4.2 ad printf("%s: unable to create control data DMA map, "
401 1.3.4.2 ad "error = %d\n", sc->sc_dev.dv_xname, error);
402 1.3.4.2 ad return;
403 1.3.4.2 ad }
404 1.3.4.2 ad if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
405 1.3.4.2 ad sc->sc_control_data, sizeof(struct admsw_control_data), NULL,
406 1.3.4.2 ad 0)) != 0) {
407 1.3.4.2 ad printf("%s: unable to load control data DMA map, error = %d\n",
408 1.3.4.2 ad sc->sc_dev.dv_xname, error);
409 1.3.4.2 ad return;
410 1.3.4.2 ad }
411 1.3.4.2 ad
412 1.3.4.2 ad /*
413 1.3.4.2 ad * Create the transmit buffer DMA maps.
414 1.3.4.2 ad */
415 1.3.4.2 ad for (i = 0; i < ADMSW_NTXHDESC; i++) {
416 1.3.4.2 ad if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
417 1.3.4.2 ad 2, MCLBYTES, 0, 0,
418 1.3.4.2 ad &sc->sc_txhsoft[i].ds_dmamap)) != 0) {
419 1.3.4.2 ad printf("%s: unable to create txh DMA map %d, "
420 1.3.4.2 ad "error = %d\n", sc->sc_dev.dv_xname, i, error);
421 1.3.4.2 ad return;
422 1.3.4.2 ad }
423 1.3.4.2 ad sc->sc_txhsoft[i].ds_mbuf = NULL;
424 1.3.4.2 ad }
425 1.3.4.2 ad for (i = 0; i < ADMSW_NTXLDESC; i++) {
426 1.3.4.2 ad if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
427 1.3.4.2 ad 2, MCLBYTES, 0, 0,
428 1.3.4.2 ad &sc->sc_txlsoft[i].ds_dmamap)) != 0) {
429 1.3.4.2 ad printf("%s: unable to create txl DMA map %d, "
430 1.3.4.2 ad "error = %d\n", sc->sc_dev.dv_xname, i, error);
431 1.3.4.2 ad return;
432 1.3.4.2 ad }
433 1.3.4.2 ad sc->sc_txlsoft[i].ds_mbuf = NULL;
434 1.3.4.2 ad }
435 1.3.4.2 ad
436 1.3.4.2 ad /*
437 1.3.4.2 ad * Create the receive buffer DMA maps.
438 1.3.4.2 ad */
439 1.3.4.2 ad for (i = 0; i < ADMSW_NRXHDESC; i++) {
440 1.3.4.2 ad if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
441 1.3.4.2 ad MCLBYTES, 0, 0, &sc->sc_rxhsoft[i].ds_dmamap)) != 0) {
442 1.3.4.2 ad printf("%s: unable to create rxh DMA map %d, "
443 1.3.4.2 ad "error = %d\n", sc->sc_dev.dv_xname, i, error);
444 1.3.4.2 ad return;
445 1.3.4.2 ad }
446 1.3.4.2 ad sc->sc_rxhsoft[i].ds_mbuf = NULL;
447 1.3.4.2 ad }
448 1.3.4.2 ad for (i = 0; i < ADMSW_NRXLDESC; i++) {
449 1.3.4.2 ad if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
450 1.3.4.2 ad MCLBYTES, 0, 0, &sc->sc_rxlsoft[i].ds_dmamap)) != 0) {
451 1.3.4.2 ad printf("%s: unable to create rxl DMA map %d, "
452 1.3.4.2 ad "error = %d\n", sc->sc_dev.dv_xname, i, error);
453 1.3.4.2 ad return;
454 1.3.4.2 ad }
455 1.3.4.2 ad sc->sc_rxlsoft[i].ds_mbuf = NULL;
456 1.3.4.2 ad }
457 1.3.4.2 ad
458 1.3.4.2 ad admsw_init_bufs(sc);
459 1.3.4.2 ad
460 1.3.4.2 ad admsw_reset(sc);
461 1.3.4.2 ad
462 1.3.4.2 ad for (i = 0; i < SW_DEVS; i++) {
463 1.3.4.2 ad ifmedia_init(&sc->sc_ifmedia[i], 0, admsw_mediachange, admsw_mediastatus);
464 1.3.4.2 ad ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T, 0, NULL);
465 1.3.4.2 ad ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
466 1.3.4.2 ad ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX, 0, NULL);
467 1.3.4.2 ad ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
468 1.3.4.2 ad ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO, 0, NULL);
469 1.3.4.2 ad ifmedia_set(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO);
470 1.3.4.2 ad
471 1.3.4.2 ad ifp = &sc->sc_ethercom[i].ec_if;
472 1.3.4.2 ad strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
473 1.3.4.2 ad ifp->if_xname[5] += i;
474 1.3.4.2 ad ifp->if_softc = sc;
475 1.3.4.2 ad ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
476 1.3.4.2 ad ifp->if_ioctl = admsw_ioctl;
477 1.3.4.2 ad ifp->if_start = admsw_start;
478 1.3.4.2 ad ifp->if_watchdog = admsw_watchdog;
479 1.3.4.2 ad ifp->if_init = admsw_init;
480 1.3.4.2 ad ifp->if_stop = admsw_stop;
481 1.3.4.2 ad ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
482 1.3.4.2 ad IFQ_SET_MAXLEN(&ifp->if_snd, max(ADMSW_NTXLDESC, IFQ_MAXLEN));
483 1.3.4.2 ad IFQ_SET_READY(&ifp->if_snd);
484 1.3.4.2 ad
485 1.3.4.2 ad /* Attach the interface. */
486 1.3.4.2 ad if_attach(ifp);
487 1.3.4.2 ad ether_ifattach(ifp, enaddr);
488 1.3.4.2 ad enaddr[5]++;
489 1.3.4.2 ad }
490 1.3.4.2 ad
491 1.3.4.2 ad #ifdef ADMSW_EVENT_COUNTERS
492 1.3.4.2 ad evcnt_attach_dynamic(&sc->sc_ev_txstall, EVCNT_TYPE_MISC,
493 1.3.4.2 ad NULL, sc->sc_dev.dv_xname, "txstall");
494 1.3.4.2 ad evcnt_attach_dynamic(&sc->sc_ev_rxstall, EVCNT_TYPE_MISC,
495 1.3.4.2 ad NULL, sc->sc_dev.dv_xname, "rxstall");
496 1.3.4.2 ad evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_MISC,
497 1.3.4.2 ad NULL, sc->sc_dev.dv_xname, "txintr");
498 1.3.4.2 ad evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_MISC,
499 1.3.4.2 ad NULL, sc->sc_dev.dv_xname, "rxintr");
500 1.3.4.2 ad #if 1
501 1.3.4.2 ad evcnt_attach_dynamic(&sc->sc_ev_rxsync, EVCNT_TYPE_MISC,
502 1.3.4.2 ad NULL, sc->sc_dev.dv_xname, "rxsync");
503 1.3.4.2 ad #endif
504 1.3.4.2 ad #endif
505 1.3.4.2 ad
506 1.3.4.2 ad admwdog_attach(sc);
507 1.3.4.2 ad
508 1.3.4.2 ad /* Make sure the interface is shutdown during reboot. */
509 1.3.4.2 ad sc->sc_sdhook = shutdownhook_establish(admsw_shutdown, sc);
510 1.3.4.2 ad if (sc->sc_sdhook == NULL)
511 1.3.4.2 ad printf("%s: WARNING: unable to establish shutdown hook\n",
512 1.3.4.2 ad sc->sc_dev.dv_xname);
513 1.3.4.2 ad
514 1.3.4.2 ad /* leave interrupts and cpu port disabled */
515 1.3.4.2 ad return;
516 1.3.4.2 ad }
517 1.3.4.2 ad
518 1.3.4.2 ad
519 1.3.4.2 ad /*
520 1.3.4.2 ad * admsw_shutdown:
521 1.3.4.2 ad *
522 1.3.4.2 ad * Make sure the interface is stopped at reboot time.
523 1.3.4.2 ad */
524 1.3.4.2 ad static void
525 1.3.4.2 ad admsw_shutdown(void *arg)
526 1.3.4.2 ad {
527 1.3.4.2 ad struct admsw_softc *sc = arg;
528 1.3.4.2 ad int i;
529 1.3.4.2 ad
530 1.3.4.2 ad for (i = 0; i < SW_DEVS; i++)
531 1.3.4.2 ad admsw_stop(&sc->sc_ethercom[i].ec_if, 1);
532 1.3.4.2 ad }
533 1.3.4.2 ad
534 1.3.4.2 ad /*
535 1.3.4.2 ad * admsw_start: [ifnet interface function]
536 1.3.4.2 ad *
537 1.3.4.2 ad * Start packet transmission on the interface.
538 1.3.4.2 ad */
539 1.3.4.2 ad static void
540 1.3.4.2 ad admsw_start(struct ifnet *ifp)
541 1.3.4.2 ad {
542 1.3.4.2 ad struct admsw_softc *sc = ifp->if_softc;
543 1.3.4.2 ad struct mbuf *m0, *m;
544 1.3.4.2 ad struct admsw_descsoft *ds;
545 1.3.4.2 ad struct admsw_desc *desc;
546 1.3.4.2 ad bus_dmamap_t dmamap;
547 1.3.4.2 ad struct ether_header *eh;
548 1.3.4.2 ad int error, nexttx, len, i;
549 1.3.4.2 ad static int vlan = 0;
550 1.3.4.2 ad
551 1.3.4.2 ad /*
552 1.3.4.2 ad * Loop through the send queues, setting up transmit descriptors
553 1.3.4.2 ad * unitl we drain the queues, or use up all available transmit
554 1.3.4.2 ad * descriptors.
555 1.3.4.2 ad */
556 1.3.4.2 ad for (;;) {
557 1.3.4.2 ad vlan++;
558 1.3.4.2 ad if (vlan == SW_DEVS)
559 1.3.4.2 ad vlan = 0;
560 1.3.4.2 ad i = vlan;
561 1.3.4.2 ad for (;;) {
562 1.3.4.2 ad ifp = &sc->sc_ethercom[i].ec_if;
563 1.3.4.2 ad if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) ==
564 1.3.4.2 ad IFF_RUNNING) {
565 1.3.4.2 ad /* Grab a packet off the queue. */
566 1.3.4.2 ad IFQ_POLL(&ifp->if_snd, m0);
567 1.3.4.2 ad if (m0 != NULL)
568 1.3.4.2 ad break;
569 1.3.4.2 ad }
570 1.3.4.2 ad i++;
571 1.3.4.2 ad if (i == SW_DEVS)
572 1.3.4.2 ad i = 0;
573 1.3.4.2 ad if (i == vlan)
574 1.3.4.2 ad return;
575 1.3.4.2 ad }
576 1.3.4.2 ad vlan = i;
577 1.3.4.2 ad m = NULL;
578 1.3.4.2 ad
579 1.3.4.2 ad /* Get a spare descriptor. */
580 1.3.4.2 ad if (sc->sc_txfree == 0) {
581 1.3.4.2 ad /* No more slots left; notify upper layer. */
582 1.3.4.2 ad ifp->if_flags |= IFF_OACTIVE;
583 1.3.4.2 ad ADMSW_EVCNT_INCR(&sc->sc_ev_txstall);
584 1.3.4.2 ad break;
585 1.3.4.2 ad }
586 1.3.4.2 ad nexttx = sc->sc_txnext;
587 1.3.4.2 ad desc = &sc->sc_txldescs[nexttx];
588 1.3.4.2 ad ds = &sc->sc_txlsoft[nexttx];
589 1.3.4.2 ad dmamap = ds->ds_dmamap;
590 1.3.4.2 ad
591 1.3.4.2 ad /*
592 1.3.4.2 ad * Load the DMA map. If this fails, the packet either
593 1.3.4.2 ad * didn't fit in the alloted number of segments, or we
594 1.3.4.2 ad * were short on resources. In this case, we'll copy
595 1.3.4.2 ad * and try again.
596 1.3.4.2 ad */
597 1.3.4.2 ad if (m0->m_pkthdr.len < ETHER_MIN_LEN ||
598 1.3.4.2 ad bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
599 1.3.4.2 ad BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
600 1.3.4.2 ad MGETHDR(m, M_DONTWAIT, MT_DATA);
601 1.3.4.2 ad if (m == NULL) {
602 1.3.4.2 ad printf("%s: unable to allocate Tx mbuf\n",
603 1.3.4.2 ad sc->sc_dev.dv_xname);
604 1.3.4.2 ad break;
605 1.3.4.2 ad }
606 1.3.4.2 ad if (m0->m_pkthdr.len > MHLEN) {
607 1.3.4.2 ad MCLGET(m, M_DONTWAIT);
608 1.3.4.2 ad if ((m->m_flags & M_EXT) == 0) {
609 1.3.4.2 ad printf("%s: unable to allocate Tx "
610 1.3.4.2 ad "cluster\n", sc->sc_dev.dv_xname);
611 1.3.4.2 ad m_freem(m);
612 1.3.4.2 ad break;
613 1.3.4.2 ad }
614 1.3.4.2 ad }
615 1.3.4.2 ad m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags;
616 1.3.4.2 ad m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
617 1.3.4.2 ad m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
618 1.3.4.2 ad if (m->m_pkthdr.len < ETHER_MIN_LEN) {
619 1.3.4.2 ad if (M_TRAILINGSPACE(m) < ETHER_MIN_LEN - m->m_pkthdr.len)
620 1.3.4.2 ad panic("admsw_start: M_TRAILINGSPACE\n");
621 1.3.4.2 ad memset(mtod(m, uint8_t *) + m->m_pkthdr.len, 0,
622 1.3.4.2 ad ETHER_MIN_LEN - ETHER_CRC_LEN - m->m_pkthdr.len);
623 1.3.4.2 ad m->m_pkthdr.len = m->m_len = ETHER_MIN_LEN;
624 1.3.4.2 ad }
625 1.3.4.2 ad error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
626 1.3.4.2 ad m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
627 1.3.4.2 ad if (error) {
628 1.3.4.2 ad printf("%s: unable to load Tx buffer, "
629 1.3.4.2 ad "error = %d\n", sc->sc_dev.dv_xname, error);
630 1.3.4.2 ad break;
631 1.3.4.2 ad }
632 1.3.4.2 ad }
633 1.3.4.2 ad
634 1.3.4.2 ad IFQ_DEQUEUE(&ifp->if_snd, m0);
635 1.3.4.2 ad if (m != NULL) {
636 1.3.4.2 ad m_freem(m0);
637 1.3.4.2 ad m0 = m;
638 1.3.4.2 ad }
639 1.3.4.2 ad
640 1.3.4.2 ad /*
641 1.3.4.2 ad * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
642 1.3.4.2 ad */
643 1.3.4.2 ad
644 1.3.4.2 ad /* Sync the DMA map. */
645 1.3.4.2 ad bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
646 1.3.4.2 ad BUS_DMASYNC_PREWRITE);
647 1.3.4.2 ad
648 1.3.4.2 ad if (dmamap->dm_nsegs != 1 && dmamap->dm_nsegs != 2)
649 1.3.4.2 ad panic("admsw_start: dm_nsegs == %d\n", dmamap->dm_nsegs);
650 1.3.4.2 ad desc->data = dmamap->dm_segs[0].ds_addr;
651 1.3.4.2 ad desc->len = len = dmamap->dm_segs[0].ds_len;
652 1.3.4.2 ad if (dmamap->dm_nsegs > 1) {
653 1.3.4.2 ad len += dmamap->dm_segs[1].ds_len;
654 1.3.4.2 ad desc->cntl = dmamap->dm_segs[1].ds_addr | ADM5120_DMA_BUF2ENABLE;
655 1.3.4.2 ad } else
656 1.3.4.2 ad desc->cntl = 0;
657 1.3.4.2 ad desc->status = (len << ADM5120_DMA_LENSHIFT) | (1 << vlan);
658 1.3.4.2 ad eh = mtod(m0, struct ether_header *);
659 1.3.4.2 ad if (ntohs(eh->ether_type) == ETHERTYPE_IP &&
660 1.3.4.2 ad m0->m_pkthdr.csum_flags & M_CSUM_IPv4)
661 1.3.4.2 ad desc->status |= ADM5120_DMA_CSUM;
662 1.3.4.2 ad if (nexttx == ADMSW_NTXLDESC - 1)
663 1.3.4.2 ad desc->data |= ADM5120_DMA_RINGEND;
664 1.3.4.2 ad desc->data |= ADM5120_DMA_OWN;
665 1.3.4.2 ad
666 1.3.4.2 ad /* Sync the descriptor. */
667 1.3.4.2 ad ADMSW_CDTXLSYNC(sc, nexttx,
668 1.3.4.2 ad BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
669 1.3.4.2 ad
670 1.3.4.2 ad REG_WRITE(SEND_TRIG_REG, 1);
671 1.3.4.2 ad /* printf("send slot %d\n",nexttx); */
672 1.3.4.2 ad
673 1.3.4.2 ad /*
674 1.3.4.2 ad * Store a pointer to the packet so we can free it later.
675 1.3.4.2 ad */
676 1.3.4.2 ad ds->ds_mbuf = m0;
677 1.3.4.2 ad
678 1.3.4.2 ad /* Advance the Tx pointer. */
679 1.3.4.2 ad sc->sc_txfree--;
680 1.3.4.2 ad sc->sc_txnext = ADMSW_NEXTTXL(nexttx);
681 1.3.4.2 ad
682 1.3.4.2 ad #if NBPFILTER > 0
683 1.3.4.2 ad /* Pass the packet to any BPF listeners. */
684 1.3.4.2 ad if (ifp->if_bpf)
685 1.3.4.2 ad bpf_mtap(ifp->if_bpf, m0);
686 1.3.4.2 ad #endif /* NBPFILTER */
687 1.3.4.2 ad
688 1.3.4.2 ad /* Set a watchdog timer in case the chip flakes out. */
689 1.3.4.2 ad sc->sc_ethercom[0].ec_if.if_timer = 5;
690 1.3.4.2 ad }
691 1.3.4.2 ad }
692 1.3.4.2 ad
693 1.3.4.2 ad /*
694 1.3.4.2 ad * admsw_watchdog: [ifnet interface function]
695 1.3.4.2 ad *
696 1.3.4.2 ad * Watchdog timer handler.
697 1.3.4.2 ad */
698 1.3.4.2 ad static void
699 1.3.4.2 ad admsw_watchdog(struct ifnet *ifp)
700 1.3.4.2 ad {
701 1.3.4.2 ad struct admsw_softc *sc = ifp->if_softc;
702 1.3.4.2 ad int vlan;
703 1.3.4.2 ad
704 1.3.4.2 ad #if 1
705 1.3.4.2 ad /* Check if an interrupt was lost. */
706 1.3.4.2 ad if (sc->sc_txfree == ADMSW_NTXLDESC) {
707 1.3.4.2 ad printf("%s: watchdog false alarm\n", sc->sc_dev.dv_xname);
708 1.3.4.2 ad return;
709 1.3.4.2 ad }
710 1.3.4.2 ad if (sc->sc_ethercom[0].ec_if.if_timer != 0)
711 1.3.4.2 ad printf("%s: watchdog timer is %d!\n", sc->sc_dev.dv_xname, sc->sc_ethercom[0].ec_if.if_timer);
712 1.3.4.2 ad admsw_txintr(sc, 0);
713 1.3.4.2 ad if (sc->sc_txfree == ADMSW_NTXLDESC) {
714 1.3.4.2 ad printf("%s: tx IRQ lost (queue empty)\n", sc->sc_dev.dv_xname);
715 1.3.4.2 ad return;
716 1.3.4.2 ad }
717 1.3.4.2 ad if (sc->sc_ethercom[0].ec_if.if_timer != 0) {
718 1.3.4.2 ad printf("%s: tx IRQ lost (timer recharged)\n", sc->sc_dev.dv_xname);
719 1.3.4.2 ad return;
720 1.3.4.2 ad }
721 1.3.4.2 ad #endif
722 1.3.4.2 ad
723 1.3.4.2 ad printf("%s: device timeout, txfree = %d\n", sc->sc_dev.dv_xname, sc->sc_txfree);
724 1.3.4.2 ad for (vlan = 0; vlan < SW_DEVS; vlan++)
725 1.3.4.2 ad admsw_stop(&sc->sc_ethercom[vlan].ec_if, 0);
726 1.3.4.2 ad for (vlan = 0; vlan < SW_DEVS; vlan++)
727 1.3.4.2 ad (void) admsw_init(&sc->sc_ethercom[vlan].ec_if);
728 1.3.4.2 ad
729 1.3.4.2 ad /* Try to get more packets going. */
730 1.3.4.2 ad admsw_start(ifp);
731 1.3.4.2 ad }
732 1.3.4.2 ad
733 1.3.4.2 ad /*
734 1.3.4.2 ad * admsw_ioctl: [ifnet interface function]
735 1.3.4.2 ad *
736 1.3.4.2 ad * Handle control requests from the operator.
737 1.3.4.2 ad */
738 1.3.4.2 ad static int
739 1.3.4.2 ad admsw_ioctl(struct ifnet *ifp, u_long cmd, void *data)
740 1.3.4.2 ad {
741 1.3.4.2 ad struct admsw_softc *sc = ifp->if_softc;
742 1.3.4.2 ad struct ifdrv *ifd;
743 1.3.4.2 ad int s, error, port;
744 1.3.4.2 ad
745 1.3.4.2 ad s = splnet();
746 1.3.4.2 ad
747 1.3.4.2 ad switch (cmd) {
748 1.3.4.2 ad case SIOCSIFMEDIA:
749 1.3.4.2 ad case SIOCGIFMEDIA:
750 1.3.4.2 ad port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */
751 1.3.4.2 ad if (port >= SW_DEVS)
752 1.3.4.2 ad error = EOPNOTSUPP;
753 1.3.4.2 ad else
754 1.3.4.2 ad error = ifmedia_ioctl(ifp, (struct ifreq *)data,
755 1.3.4.2 ad &sc->sc_ifmedia[port], cmd);
756 1.3.4.2 ad break;
757 1.3.4.2 ad
758 1.3.4.2 ad case SIOCGDRVSPEC:
759 1.3.4.2 ad case SIOCSDRVSPEC:
760 1.3.4.2 ad ifd = (struct ifdrv *) data;
761 1.3.4.2 ad if (ifd->ifd_cmd != 0 || ifd->ifd_len != sizeof(vlan_matrix)) {
762 1.3.4.2 ad error = EINVAL;
763 1.3.4.2 ad break;
764 1.3.4.2 ad }
765 1.3.4.2 ad if (cmd == SIOCGDRVSPEC) {
766 1.3.4.2 ad error = copyout(vlan_matrix, ifd->ifd_data,
767 1.3.4.2 ad sizeof(vlan_matrix));
768 1.3.4.2 ad } else {
769 1.3.4.2 ad error = copyin(ifd->ifd_data, vlan_matrix,
770 1.3.4.2 ad sizeof(vlan_matrix));
771 1.3.4.2 ad admsw_setvlan(sc, vlan_matrix);
772 1.3.4.2 ad }
773 1.3.4.2 ad break;
774 1.3.4.2 ad
775 1.3.4.2 ad default:
776 1.3.4.2 ad error = ether_ioctl(ifp, cmd, data);
777 1.3.4.2 ad if (error == ENETRESET) {
778 1.3.4.2 ad /*
779 1.3.4.2 ad * Multicast list has changed; set the hardware filter
780 1.3.4.2 ad * accordingly.
781 1.3.4.2 ad */
782 1.3.4.2 ad admsw_set_filter(sc);
783 1.3.4.2 ad error = 0;
784 1.3.4.2 ad }
785 1.3.4.2 ad break;
786 1.3.4.2 ad }
787 1.3.4.2 ad
788 1.3.4.2 ad /* Try to get more packets going. */
789 1.3.4.2 ad admsw_start(ifp);
790 1.3.4.2 ad
791 1.3.4.2 ad splx(s);
792 1.3.4.2 ad return (error);
793 1.3.4.2 ad }
794 1.3.4.2 ad
795 1.3.4.2 ad
796 1.3.4.2 ad /*
797 1.3.4.2 ad * admsw_intr:
798 1.3.4.2 ad *
799 1.3.4.2 ad * Interrupt service routine.
800 1.3.4.2 ad */
801 1.3.4.2 ad static int
802 1.3.4.2 ad admsw_intr(void *arg)
803 1.3.4.2 ad {
804 1.3.4.2 ad struct admsw_softc *sc = arg;
805 1.3.4.2 ad uint32_t pending;
806 1.3.4.2 ad char buf[64];
807 1.3.4.2 ad
808 1.3.4.2 ad pending = REG_READ(ADMSW_INT_ST);
809 1.3.4.2 ad
810 1.3.4.2 ad if ((pending & ~(ADMSW_INTR_RHD|ADMSW_INTR_RLD|ADMSW_INTR_SHD|ADMSW_INTR_SLD|ADMSW_INTR_W1TE|ADMSW_INTR_W0TE)) != 0) {
811 1.3.4.2 ad printf("%s: pending=%s\n", __func__,
812 1.3.4.2 ad bitmask_snprintf(pending, ADMSW_INT_FMT, buf, sizeof(buf)));
813 1.3.4.2 ad }
814 1.3.4.2 ad REG_WRITE(ADMSW_INT_ST, pending);
815 1.3.4.2 ad
816 1.3.4.2 ad if (sc->ndevs == 0)
817 1.3.4.2 ad return (0);
818 1.3.4.2 ad
819 1.3.4.2 ad if ((pending & ADMSW_INTR_RHD) != 0)
820 1.3.4.2 ad admsw_rxintr(sc, 1);
821 1.3.4.2 ad
822 1.3.4.2 ad if ((pending & ADMSW_INTR_RLD) != 0)
823 1.3.4.2 ad admsw_rxintr(sc, 0);
824 1.3.4.2 ad
825 1.3.4.2 ad if ((pending & ADMSW_INTR_SHD) != 0)
826 1.3.4.2 ad admsw_txintr(sc, 1);
827 1.3.4.2 ad
828 1.3.4.2 ad if ((pending & ADMSW_INTR_SLD) != 0)
829 1.3.4.2 ad admsw_txintr(sc, 0);
830 1.3.4.2 ad
831 1.3.4.2 ad return (1);
832 1.3.4.2 ad }
833 1.3.4.2 ad
834 1.3.4.2 ad /*
835 1.3.4.2 ad * admsw_txintr:
836 1.3.4.2 ad *
837 1.3.4.2 ad * Helper; handle transmit interrupts.
838 1.3.4.2 ad */
839 1.3.4.2 ad static void
840 1.3.4.2 ad admsw_txintr(struct admsw_softc *sc, int prio)
841 1.3.4.2 ad {
842 1.3.4.2 ad struct ifnet *ifp;
843 1.3.4.2 ad struct admsw_desc *desc;
844 1.3.4.2 ad struct admsw_descsoft *ds;
845 1.3.4.2 ad int i, vlan;
846 1.3.4.2 ad int gotone = 0;
847 1.3.4.2 ad
848 1.3.4.2 ad /* printf("txintr: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */
849 1.3.4.2 ad for (i = sc->sc_txdirty; sc->sc_txfree != ADMSW_NTXLDESC;
850 1.3.4.2 ad i = ADMSW_NEXTTXL(i)) {
851 1.3.4.2 ad
852 1.3.4.2 ad ADMSW_CDTXLSYNC(sc, i,
853 1.3.4.2 ad BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
854 1.3.4.2 ad
855 1.3.4.2 ad desc = &sc->sc_txldescs[i];
856 1.3.4.2 ad ds = &sc->sc_txlsoft[i];
857 1.3.4.2 ad if (desc->data & ADM5120_DMA_OWN) {
858 1.3.4.2 ad ADMSW_CDTXLSYNC(sc, i,
859 1.3.4.2 ad BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
860 1.3.4.2 ad break;
861 1.3.4.2 ad }
862 1.3.4.2 ad
863 1.3.4.2 ad bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
864 1.3.4.2 ad 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
865 1.3.4.2 ad bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
866 1.3.4.2 ad m_freem(ds->ds_mbuf);
867 1.3.4.2 ad ds->ds_mbuf = NULL;
868 1.3.4.2 ad
869 1.3.4.2 ad vlan = ffs(desc->status & 0x3f) - 1;
870 1.3.4.2 ad if (vlan < 0 || vlan >= SW_DEVS)
871 1.3.4.2 ad panic("admsw_txintr: bad vlan\n");
872 1.3.4.2 ad ifp = &sc->sc_ethercom[vlan].ec_if;
873 1.3.4.2 ad gotone = 1;
874 1.3.4.2 ad /* printf("clear tx slot %d\n",i); */
875 1.3.4.2 ad
876 1.3.4.2 ad ifp->if_opackets++;
877 1.3.4.2 ad
878 1.3.4.2 ad sc->sc_txfree++;
879 1.3.4.2 ad }
880 1.3.4.2 ad
881 1.3.4.2 ad if (gotone) {
882 1.3.4.2 ad sc->sc_txdirty = i;
883 1.3.4.2 ad #ifdef ADMSW_EVENT_COUNTERS
884 1.3.4.2 ad ADMSW_EVCNT_INCR(&sc->sc_ev_txintr);
885 1.3.4.2 ad #endif
886 1.3.4.2 ad for (vlan = 0; vlan < SW_DEVS; vlan++)
887 1.3.4.2 ad sc->sc_ethercom[vlan].ec_if.if_flags &= ~IFF_OACTIVE;
888 1.3.4.2 ad
889 1.3.4.2 ad ifp = &sc->sc_ethercom[0].ec_if;
890 1.3.4.2 ad
891 1.3.4.2 ad /* Try to queue more packets. */
892 1.3.4.2 ad admsw_start(ifp);
893 1.3.4.2 ad
894 1.3.4.2 ad /*
895 1.3.4.2 ad * If there are no more pending transmissions,
896 1.3.4.2 ad * cancel the watchdog timer.
897 1.3.4.2 ad */
898 1.3.4.2 ad if (sc->sc_txfree == ADMSW_NTXLDESC)
899 1.3.4.2 ad ifp->if_timer = 0;
900 1.3.4.2 ad
901 1.3.4.2 ad }
902 1.3.4.2 ad
903 1.3.4.2 ad /* printf("txintr end: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */
904 1.3.4.2 ad }
905 1.3.4.2 ad
906 1.3.4.2 ad /*
907 1.3.4.2 ad * admsw_rxintr:
908 1.3.4.2 ad *
909 1.3.4.2 ad * Helper; handle receive interrupts.
910 1.3.4.2 ad */
911 1.3.4.2 ad static void
912 1.3.4.2 ad admsw_rxintr(struct admsw_softc *sc, int high)
913 1.3.4.2 ad {
914 1.3.4.2 ad struct ifnet *ifp;
915 1.3.4.2 ad struct admsw_descsoft *ds;
916 1.3.4.2 ad struct mbuf *m;
917 1.3.4.2 ad uint32_t stat;
918 1.3.4.2 ad int i, len, port, vlan;
919 1.3.4.2 ad
920 1.3.4.2 ad /* printf("rxintr\n"); */
921 1.3.4.2 ad if (high)
922 1.3.4.2 ad panic("admsw_rxintr: high priority packet\n");
923 1.3.4.2 ad
924 1.3.4.2 ad #ifdef ADMSW_EVENT_COUNTERS
925 1.3.4.2 ad int pkts = 0;
926 1.3.4.2 ad #endif
927 1.3.4.2 ad
928 1.3.4.2 ad #if 1
929 1.3.4.2 ad ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
930 1.3.4.2 ad if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0)
931 1.3.4.2 ad ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
932 1.3.4.2 ad else {
933 1.3.4.2 ad i = sc->sc_rxptr;
934 1.3.4.2 ad do {
935 1.3.4.2 ad ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
936 1.3.4.2 ad i = ADMSW_NEXTRXL(i);
937 1.3.4.2 ad /* the ring is empty, just return. */
938 1.3.4.2 ad if (i == sc->sc_rxptr)
939 1.3.4.2 ad return;
940 1.3.4.2 ad ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
941 1.3.4.2 ad } while (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN);
942 1.3.4.2 ad ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
943 1.3.4.2 ad
944 1.3.4.2 ad ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
945 1.3.4.2 ad if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0)
946 1.3.4.2 ad ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
947 1.3.4.2 ad else {
948 1.3.4.2 ad ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
949 1.3.4.2 ad /* We've fallen behind the chip: catch it. */
950 1.3.4.2 ad printf("%s: RX ring resync, base=%x, work=%x, %d -> %d\n",
951 1.3.4.2 ad sc->sc_dev.dv_xname, REG_READ(RECV_LBADDR_REG),
952 1.3.4.2 ad REG_READ(RECV_LWADDR_REG), sc->sc_rxptr, i);
953 1.3.4.2 ad sc->sc_rxptr = i;
954 1.3.4.2 ad ADMSW_EVCNT_INCR(&sc->sc_ev_rxsync);
955 1.3.4.2 ad }
956 1.3.4.2 ad }
957 1.3.4.2 ad #endif
958 1.3.4.2 ad for (i = sc->sc_rxptr;; i = ADMSW_NEXTRXL(i)) {
959 1.3.4.2 ad ds = &sc->sc_rxlsoft[i];
960 1.3.4.2 ad
961 1.3.4.2 ad ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
962 1.3.4.2 ad
963 1.3.4.2 ad if (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN) {
964 1.3.4.2 ad ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
965 1.3.4.2 ad break;
966 1.3.4.2 ad }
967 1.3.4.2 ad
968 1.3.4.2 ad /* printf("process slot %d\n",i); */
969 1.3.4.2 ad
970 1.3.4.2 ad #ifdef ADMSW_EVENT_COUNTERS
971 1.3.4.2 ad pkts++;
972 1.3.4.2 ad #endif
973 1.3.4.2 ad
974 1.3.4.2 ad bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
975 1.3.4.2 ad ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
976 1.3.4.2 ad
977 1.3.4.2 ad stat = sc->sc_rxldescs[i].status;
978 1.3.4.2 ad len = (stat & ADM5120_DMA_LEN) >> ADM5120_DMA_LENSHIFT;
979 1.3.4.2 ad len -= ETHER_CRC_LEN;
980 1.3.4.2 ad port = (stat & ADM5120_DMA_PORTID) >> ADM5120_DMA_PORTSHIFT;
981 1.3.4.2 ad for (vlan = 0; vlan < SW_DEVS; vlan++)
982 1.3.4.2 ad if ((1 << port) & vlan_matrix[vlan])
983 1.3.4.2 ad break;
984 1.3.4.2 ad if (vlan == SW_DEVS)
985 1.3.4.2 ad vlan = 0;
986 1.3.4.2 ad ifp = &sc->sc_ethercom[vlan].ec_if;
987 1.3.4.2 ad
988 1.3.4.2 ad m = ds->ds_mbuf;
989 1.3.4.2 ad if (admsw_add_rxlbuf(sc, i) != 0) {
990 1.3.4.2 ad ifp->if_ierrors++;
991 1.3.4.2 ad ADMSW_INIT_RXLDESC(sc, i);
992 1.3.4.2 ad bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
993 1.3.4.2 ad ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
994 1.3.4.2 ad continue;
995 1.3.4.2 ad }
996 1.3.4.2 ad
997 1.3.4.2 ad m->m_pkthdr.rcvif = ifp;
998 1.3.4.2 ad m->m_pkthdr.len = m->m_len = len;
999 1.3.4.2 ad if ((stat & ADM5120_DMA_TYPE) == ADM5120_DMA_TYPE_IP) {
1000 1.3.4.2 ad m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1001 1.3.4.2 ad if (stat & ADM5120_DMA_CSUMFAIL)
1002 1.3.4.2 ad m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1003 1.3.4.2 ad }
1004 1.3.4.2 ad #if NBPFILTER > 0
1005 1.3.4.2 ad /* Pass this up to any BPF listeners. */
1006 1.3.4.2 ad if (ifp->if_bpf)
1007 1.3.4.2 ad bpf_mtap(ifp->if_bpf, m);
1008 1.3.4.2 ad #endif /* NBPFILTER > 0 */
1009 1.3.4.2 ad
1010 1.3.4.2 ad /* Pass it on. */
1011 1.3.4.2 ad (*ifp->if_input)(ifp, m);
1012 1.3.4.2 ad ifp->if_ipackets++;
1013 1.3.4.2 ad }
1014 1.3.4.2 ad #ifdef ADMSW_EVENT_COUNTERS
1015 1.3.4.2 ad if (pkts)
1016 1.3.4.2 ad ADMSW_EVCNT_INCR(&sc->sc_ev_rxintr);
1017 1.3.4.2 ad
1018 1.3.4.2 ad if (pkts == ADMSW_NRXLDESC)
1019 1.3.4.2 ad ADMSW_EVCNT_INCR(&sc->sc_ev_rxstall);
1020 1.3.4.2 ad #endif
1021 1.3.4.2 ad
1022 1.3.4.2 ad /* Update the receive pointer. */
1023 1.3.4.2 ad sc->sc_rxptr = i;
1024 1.3.4.2 ad }
1025 1.3.4.2 ad
1026 1.3.4.2 ad /*
1027 1.3.4.2 ad * admsw_init: [ifnet interface function]
1028 1.3.4.2 ad *
1029 1.3.4.2 ad * Initialize the interface. Must be called at splnet().
1030 1.3.4.2 ad */
1031 1.3.4.2 ad static int
1032 1.3.4.2 ad admsw_init(struct ifnet *ifp)
1033 1.3.4.2 ad {
1034 1.3.4.2 ad struct admsw_softc *sc = ifp->if_softc;
1035 1.3.4.2 ad
1036 1.3.4.2 ad /* printf("admsw_init called\n"); */
1037 1.3.4.2 ad
1038 1.3.4.2 ad if ((ifp->if_flags & IFF_RUNNING) == 0) {
1039 1.3.4.2 ad if (sc->ndevs == 0) {
1040 1.3.4.2 ad admsw_init_bufs(sc);
1041 1.3.4.2 ad admsw_reset(sc);
1042 1.3.4.2 ad REG_WRITE(CPUP_CONF_REG,
1043 1.3.4.2 ad CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK |
1044 1.3.4.2 ad CPUP_CONF_DMCP_MASK);
1045 1.3.4.2 ad /* clear all pending interrupts */
1046 1.3.4.2 ad REG_WRITE(ADMSW_INT_ST, INT_MASK);
1047 1.3.4.2 ad
1048 1.3.4.2 ad /* enable needed interrupts */
1049 1.3.4.2 ad REG_WRITE(ADMSW_INT_MASK, REG_READ(ADMSW_INT_MASK) &
1050 1.3.4.2 ad ~(ADMSW_INTR_SHD | ADMSW_INTR_SLD | ADMSW_INTR_RHD |
1051 1.3.4.2 ad ADMSW_INTR_RLD | ADMSW_INTR_HDF | ADMSW_INTR_LDF));
1052 1.3.4.2 ad }
1053 1.3.4.2 ad sc->ndevs++;
1054 1.3.4.2 ad }
1055 1.3.4.2 ad
1056 1.3.4.2 ad /* Set the receive filter. */
1057 1.3.4.2 ad admsw_set_filter(sc);
1058 1.3.4.2 ad
1059 1.3.4.2 ad /* mark iface as running */
1060 1.3.4.2 ad ifp->if_flags |= IFF_RUNNING;
1061 1.3.4.2 ad ifp->if_flags &= ~IFF_OACTIVE;
1062 1.3.4.2 ad
1063 1.3.4.2 ad return 0;
1064 1.3.4.2 ad }
1065 1.3.4.2 ad
1066 1.3.4.2 ad /*
1067 1.3.4.2 ad * admsw_stop: [ifnet interface function]
1068 1.3.4.2 ad *
1069 1.3.4.2 ad * Stop transmission on the interface.
1070 1.3.4.2 ad */
1071 1.3.4.2 ad static void
1072 1.3.4.2 ad admsw_stop(struct ifnet *ifp, int disable)
1073 1.3.4.2 ad {
1074 1.3.4.2 ad struct admsw_softc *sc = ifp->if_softc;
1075 1.3.4.2 ad
1076 1.3.4.2 ad /* printf("admsw_stop: %d\n",disable); */
1077 1.3.4.2 ad
1078 1.3.4.2 ad if (!(ifp->if_flags & IFF_RUNNING))
1079 1.3.4.2 ad return;
1080 1.3.4.2 ad
1081 1.3.4.2 ad if (--sc->ndevs == 0) {
1082 1.3.4.2 ad /* printf("debug: de-initializing hardware\n"); */
1083 1.3.4.2 ad
1084 1.3.4.2 ad /* disable cpu port */
1085 1.3.4.2 ad REG_WRITE(CPUP_CONF_REG,
1086 1.3.4.2 ad CPUP_CONF_DCPUP | CPUP_CONF_CRCP |
1087 1.3.4.2 ad CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK);
1088 1.3.4.2 ad
1089 1.3.4.2 ad /* XXX We should disable, then clear? --dyoung */
1090 1.3.4.2 ad /* clear all pending interrupts */
1091 1.3.4.2 ad REG_WRITE(ADMSW_INT_ST, INT_MASK);
1092 1.3.4.2 ad
1093 1.3.4.2 ad /* disable interrupts */
1094 1.3.4.2 ad REG_WRITE(ADMSW_INT_MASK, INT_MASK);
1095 1.3.4.2 ad }
1096 1.3.4.2 ad
1097 1.3.4.2 ad /* Mark the interface as down and cancel the watchdog timer. */
1098 1.3.4.2 ad ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1099 1.3.4.2 ad ifp->if_timer = 0;
1100 1.3.4.2 ad
1101 1.3.4.2 ad return;
1102 1.3.4.2 ad }
1103 1.3.4.2 ad
1104 1.3.4.2 ad /*
1105 1.3.4.2 ad * admsw_set_filter:
1106 1.3.4.2 ad *
1107 1.3.4.2 ad * Set up the receive filter.
1108 1.3.4.2 ad */
1109 1.3.4.2 ad static void
1110 1.3.4.2 ad admsw_set_filter(struct admsw_softc *sc)
1111 1.3.4.2 ad {
1112 1.3.4.2 ad int i;
1113 1.3.4.2 ad uint32_t allmc, anymc, conf, promisc;
1114 1.3.4.2 ad struct ether_multi *enm;
1115 1.3.4.2 ad struct ethercom *ec;
1116 1.3.4.2 ad struct ifnet *ifp;
1117 1.3.4.2 ad struct ether_multistep step;
1118 1.3.4.2 ad
1119 1.3.4.2 ad /* Find which ports should be operated in promisc mode. */
1120 1.3.4.2 ad allmc = anymc = promisc = 0;
1121 1.3.4.2 ad for (i = 0; i < SW_DEVS; i++) {
1122 1.3.4.2 ad ec = &sc->sc_ethercom[i];
1123 1.3.4.2 ad ifp = &ec->ec_if;
1124 1.3.4.2 ad if (ifp->if_flags & IFF_PROMISC)
1125 1.3.4.2 ad promisc |= vlan_matrix[i];
1126 1.3.4.2 ad
1127 1.3.4.2 ad ifp->if_flags &= ~IFF_ALLMULTI;
1128 1.3.4.2 ad
1129 1.3.4.2 ad ETHER_FIRST_MULTI(step, ec, enm);
1130 1.3.4.2 ad while (enm != NULL) {
1131 1.3.4.2 ad if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1132 1.3.4.2 ad ETHER_ADDR_LEN) != 0) {
1133 1.3.4.2 ad printf("%s: punting on mcast range\n",
1134 1.3.4.2 ad __func__);
1135 1.3.4.2 ad ifp->if_flags |= IFF_ALLMULTI;
1136 1.3.4.2 ad allmc |= vlan_matrix[i];
1137 1.3.4.2 ad break;
1138 1.3.4.2 ad }
1139 1.3.4.2 ad
1140 1.3.4.2 ad anymc |= vlan_matrix[i];
1141 1.3.4.2 ad
1142 1.3.4.2 ad #if 0
1143 1.3.4.2 ad /* XXX extract subroutine --dyoung */
1144 1.3.4.2 ad REG_WRITE(MAC_WT1_REG,
1145 1.3.4.2 ad enm->enm_addrlo[2] |
1146 1.3.4.2 ad (enm->enm_addrlo[3] << 8) |
1147 1.3.4.2 ad (enm->enm_addrlo[4] << 16) |
1148 1.3.4.2 ad (enm->enm_addrlo[5] << 24));
1149 1.3.4.2 ad REG_WRITE(MAC_WT0_REG,
1150 1.3.4.2 ad (i << MAC_WT0_VLANID_SHIFT) |
1151 1.3.4.2 ad (enm->enm_addrlo[0] << 16) |
1152 1.3.4.2 ad (enm->enm_addrlo[1] << 24) |
1153 1.3.4.2 ad MAC_WT0_WRITE | MAC_WT0_VLANID_EN);
1154 1.3.4.2 ad /* timeout? */
1155 1.3.4.2 ad while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE));
1156 1.3.4.2 ad #endif
1157 1.3.4.2 ad
1158 1.3.4.2 ad /* load h/w with mcast address, port = CPU */
1159 1.3.4.2 ad ETHER_NEXT_MULTI(step, enm);
1160 1.3.4.2 ad }
1161 1.3.4.2 ad }
1162 1.3.4.2 ad
1163 1.3.4.2 ad conf = REG_READ(CPUP_CONF_REG);
1164 1.3.4.2 ad /* 1 Disable forwarding of unknown & multicast packets to
1165 1.3.4.2 ad * CPU on all ports.
1166 1.3.4.2 ad * 2 Enable forwarding of unknown & multicast packets to
1167 1.3.4.2 ad * CPU on ports where IFF_PROMISC or IFF_ALLMULTI is set.
1168 1.3.4.2 ad */
1169 1.3.4.2 ad conf |= CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK;
1170 1.3.4.2 ad /* Enable forwarding of unknown packets to CPU on selected ports. */
1171 1.3.4.2 ad conf ^= ((promisc << CPUP_CONF_DUNP_SHIFT) & CPUP_CONF_DUNP_MASK);
1172 1.3.4.2 ad conf ^= ((allmc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK);
1173 1.3.4.2 ad conf ^= ((anymc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK);
1174 1.3.4.2 ad REG_WRITE(CPUP_CONF_REG, conf);
1175 1.3.4.2 ad }
1176 1.3.4.2 ad
1177 1.3.4.2 ad /*
1178 1.3.4.2 ad * admsw_add_rxbuf:
1179 1.3.4.2 ad *
1180 1.3.4.2 ad * Add a receive buffer to the indicated descriptor.
1181 1.3.4.2 ad */
1182 1.3.4.2 ad int
1183 1.3.4.2 ad admsw_add_rxbuf(struct admsw_softc *sc, int idx, int high)
1184 1.3.4.2 ad {
1185 1.3.4.2 ad struct admsw_descsoft *ds;
1186 1.3.4.2 ad struct mbuf *m;
1187 1.3.4.2 ad int error;
1188 1.3.4.2 ad
1189 1.3.4.2 ad if (high)
1190 1.3.4.2 ad ds = &sc->sc_rxhsoft[idx];
1191 1.3.4.2 ad else
1192 1.3.4.2 ad ds = &sc->sc_rxlsoft[idx];
1193 1.3.4.2 ad
1194 1.3.4.2 ad MGETHDR(m, M_DONTWAIT, MT_DATA);
1195 1.3.4.2 ad if (m == NULL)
1196 1.3.4.2 ad return (ENOBUFS);
1197 1.3.4.2 ad
1198 1.3.4.2 ad MCLGET(m, M_DONTWAIT);
1199 1.3.4.2 ad if ((m->m_flags & M_EXT) == 0) {
1200 1.3.4.2 ad m_freem(m);
1201 1.3.4.2 ad return (ENOBUFS);
1202 1.3.4.2 ad }
1203 1.3.4.2 ad
1204 1.3.4.2 ad if (ds->ds_mbuf != NULL)
1205 1.3.4.2 ad bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1206 1.3.4.2 ad
1207 1.3.4.2 ad ds->ds_mbuf = m;
1208 1.3.4.2 ad
1209 1.3.4.2 ad error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1210 1.3.4.2 ad m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1211 1.3.4.2 ad BUS_DMA_READ | BUS_DMA_NOWAIT);
1212 1.3.4.2 ad if (error) {
1213 1.3.4.2 ad printf("%s: can't load rx DMA map %d, error = %d\n",
1214 1.3.4.2 ad sc->sc_dev.dv_xname, idx, error);
1215 1.3.4.2 ad panic("admsw_add_rxbuf"); /* XXX */
1216 1.3.4.2 ad }
1217 1.3.4.2 ad
1218 1.3.4.2 ad bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1219 1.3.4.2 ad ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1220 1.3.4.2 ad
1221 1.3.4.2 ad if (high)
1222 1.3.4.2 ad ADMSW_INIT_RXHDESC(sc, idx);
1223 1.3.4.2 ad else
1224 1.3.4.2 ad ADMSW_INIT_RXLDESC(sc, idx);
1225 1.3.4.2 ad
1226 1.3.4.2 ad return (0);
1227 1.3.4.2 ad }
1228 1.3.4.2 ad
1229 1.3.4.2 ad int
1230 1.3.4.2 ad admsw_mediachange(struct ifnet *ifp)
1231 1.3.4.2 ad {
1232 1.3.4.2 ad struct admsw_softc *sc = ifp->if_softc;
1233 1.3.4.2 ad int port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */
1234 1.3.4.2 ad struct ifmedia *ifm = &sc->sc_ifmedia[port];
1235 1.3.4.2 ad int old, new, val;
1236 1.3.4.2 ad
1237 1.3.4.2 ad if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1238 1.3.4.2 ad return (EINVAL);
1239 1.3.4.2 ad
1240 1.3.4.2 ad if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
1241 1.3.4.2 ad val = PHY_CNTL2_AUTONEG|PHY_CNTL2_100M|PHY_CNTL2_FDX;
1242 1.3.4.2 ad } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) {
1243 1.3.4.2 ad if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1244 1.3.4.2 ad val = PHY_CNTL2_100M|PHY_CNTL2_FDX;
1245 1.3.4.2 ad else
1246 1.3.4.2 ad val = PHY_CNTL2_100M;
1247 1.3.4.2 ad } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) {
1248 1.3.4.2 ad if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1249 1.3.4.2 ad val = PHY_CNTL2_FDX;
1250 1.3.4.2 ad else
1251 1.3.4.2 ad val = 0;
1252 1.3.4.2 ad } else
1253 1.3.4.2 ad return (EINVAL);
1254 1.3.4.2 ad
1255 1.3.4.2 ad old = REG_READ(PHY_CNTL2_REG);
1256 1.3.4.2 ad new = old & ~((PHY_CNTL2_AUTONEG|PHY_CNTL2_100M|PHY_CNTL2_FDX) << port);
1257 1.3.4.2 ad new |= (val << port);
1258 1.3.4.2 ad
1259 1.3.4.2 ad if (new != old)
1260 1.3.4.2 ad REG_WRITE(PHY_CNTL2_REG, new);
1261 1.3.4.2 ad
1262 1.3.4.2 ad return (0);
1263 1.3.4.2 ad }
1264 1.3.4.2 ad
1265 1.3.4.2 ad void
1266 1.3.4.2 ad admsw_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1267 1.3.4.2 ad {
1268 1.3.4.2 ad struct admsw_softc *sc = ifp->if_softc;
1269 1.3.4.2 ad int port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */
1270 1.3.4.2 ad int status;
1271 1.3.4.2 ad
1272 1.3.4.2 ad ifmr->ifm_status = IFM_AVALID;
1273 1.3.4.2 ad ifmr->ifm_active = IFM_ETHER;
1274 1.3.4.2 ad
1275 1.3.4.2 ad status = REG_READ(PHY_ST_REG) >> port;
1276 1.3.4.2 ad
1277 1.3.4.2 ad if ((status & PHY_ST_LINKUP) == 0) {
1278 1.3.4.2 ad ifmr->ifm_active |= IFM_NONE;
1279 1.3.4.2 ad return;
1280 1.3.4.2 ad }
1281 1.3.4.2 ad
1282 1.3.4.2 ad ifmr->ifm_status |= IFM_ACTIVE;
1283 1.3.4.2 ad ifmr->ifm_active |= (status & PHY_ST_100M) ? IFM_100_TX : IFM_10_T;
1284 1.3.4.2 ad if (status & PHY_ST_FDX)
1285 1.3.4.2 ad ifmr->ifm_active |= IFM_FDX;
1286 1.3.4.2 ad }
1287