if_emac.c revision 1.1.8.4 1 1.1.8.4 nathanw /* $NetBSD: if_emac.c,v 1.1.8.4 2002/08/27 23:45:10 nathanw Exp $ */
2 1.1.8.2 nathanw
3 1.1.8.2 nathanw /*
4 1.1.8.3 nathanw * Copyright 2001, 2002 Wasabi Systems, Inc.
5 1.1.8.2 nathanw * All rights reserved.
6 1.1.8.2 nathanw *
7 1.1.8.3 nathanw * Written by Simon Burge and Jason Thorpe for Wasabi Systems, Inc.
8 1.1.8.2 nathanw *
9 1.1.8.2 nathanw * Redistribution and use in source and binary forms, with or without
10 1.1.8.2 nathanw * modification, are permitted provided that the following conditions
11 1.1.8.2 nathanw * are met:
12 1.1.8.2 nathanw * 1. Redistributions of source code must retain the above copyright
13 1.1.8.2 nathanw * notice, this list of conditions and the following disclaimer.
14 1.1.8.2 nathanw * 2. Redistributions in binary form must reproduce the above copyright
15 1.1.8.2 nathanw * notice, this list of conditions and the following disclaimer in the
16 1.1.8.2 nathanw * documentation and/or other materials provided with the distribution.
17 1.1.8.2 nathanw * 3. All advertising materials mentioning features or use of this software
18 1.1.8.2 nathanw * must display the following acknowledgement:
19 1.1.8.2 nathanw * This product includes software developed for the NetBSD Project by
20 1.1.8.2 nathanw * Wasabi Systems, Inc.
21 1.1.8.2 nathanw * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 1.1.8.2 nathanw * or promote products derived from this software without specific prior
23 1.1.8.2 nathanw * written permission.
24 1.1.8.2 nathanw *
25 1.1.8.2 nathanw * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 1.1.8.2 nathanw * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 1.1.8.2 nathanw * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 1.1.8.2 nathanw * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 1.1.8.2 nathanw * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 1.1.8.2 nathanw * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 1.1.8.2 nathanw * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 1.1.8.2 nathanw * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 1.1.8.2 nathanw * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 1.1.8.2 nathanw * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 1.1.8.2 nathanw * POSSIBILITY OF SUCH DAMAGE.
36 1.1.8.2 nathanw */
37 1.1.8.2 nathanw
38 1.1.8.2 nathanw #include "bpfilter.h"
39 1.1.8.2 nathanw
40 1.1.8.2 nathanw #include <sys/param.h>
41 1.1.8.2 nathanw #include <sys/systm.h>
42 1.1.8.2 nathanw #include <sys/mbuf.h>
43 1.1.8.2 nathanw #include <sys/kernel.h>
44 1.1.8.2 nathanw #include <sys/socket.h>
45 1.1.8.2 nathanw #include <sys/ioctl.h>
46 1.1.8.2 nathanw
47 1.1.8.3 nathanw #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
48 1.1.8.2 nathanw
49 1.1.8.2 nathanw #include <net/if.h>
50 1.1.8.2 nathanw #include <net/if_dl.h>
51 1.1.8.2 nathanw #include <net/if_media.h>
52 1.1.8.2 nathanw #include <net/if_ether.h>
53 1.1.8.2 nathanw
54 1.1.8.2 nathanw #if NBPFILTER > 0
55 1.1.8.2 nathanw #include <net/bpf.h>
56 1.1.8.2 nathanw #endif
57 1.1.8.2 nathanw
58 1.1.8.3 nathanw #include <powerpc/ibm4xx/dev/opbvar.h>
59 1.1.8.3 nathanw
60 1.1.8.3 nathanw #include <powerpc/ibm4xx/ibm405gp.h>
61 1.1.8.3 nathanw #include <powerpc/ibm4xx/mal405gp.h>
62 1.1.8.3 nathanw #include <powerpc/ibm4xx/dcr405gp.h>
63 1.1.8.4 nathanw #include <powerpc/ibm4xx/dev/emacreg.h>
64 1.1.8.3 nathanw #include <powerpc/ibm4xx/dev/if_emacreg.h>
65 1.1.8.2 nathanw
66 1.1.8.2 nathanw #include <dev/mii/miivar.h>
67 1.1.8.2 nathanw
68 1.1.8.3 nathanw /*
69 1.1.8.3 nathanw * Transmit descriptor list size. There are two Tx channels, each with
70 1.1.8.3 nathanw * up to 256 hardware descriptors available. We currently use one Tx
71 1.1.8.3 nathanw * channel. We tell the upper layers that they can queue a lot of
72 1.1.8.3 nathanw * packets, and we go ahead and manage up to 64 of them at a time. We
73 1.1.8.3 nathanw * allow up to 16 DMA segments per packet.
74 1.1.8.3 nathanw */
75 1.1.8.3 nathanw #define EMAC_NTXSEGS 16
76 1.1.8.3 nathanw #define EMAC_TXQUEUELEN 64
77 1.1.8.3 nathanw #define EMAC_TXQUEUELEN_MASK (EMAC_TXQUEUELEN - 1)
78 1.1.8.3 nathanw #define EMAC_TXQUEUE_GC (EMAC_TXQUEUELEN / 4)
79 1.1.8.3 nathanw #define EMAC_NTXDESC 256
80 1.1.8.3 nathanw #define EMAC_NTXDESC_MASK (EMAC_NTXDESC - 1)
81 1.1.8.3 nathanw #define EMAC_NEXTTX(x) (((x) + 1) & EMAC_NTXDESC_MASK)
82 1.1.8.3 nathanw #define EMAC_NEXTTXS(x) (((x) + 1) & EMAC_TXQUEUELEN_MASK)
83 1.1.8.3 nathanw
84 1.1.8.3 nathanw /*
85 1.1.8.3 nathanw * Receive descriptor list size. There is one Rx channel with up to 256
86 1.1.8.3 nathanw * hardware descriptors available. We allocate 64 receive descriptors,
87 1.1.8.3 nathanw * each with a 2k buffer (MCLBYTES).
88 1.1.8.3 nathanw */
89 1.1.8.3 nathanw #define EMAC_NRXDESC 64
90 1.1.8.3 nathanw #define EMAC_NRXDESC_MASK (EMAC_NRXDESC - 1)
91 1.1.8.3 nathanw #define EMAC_NEXTRX(x) (((x) + 1) & EMAC_NRXDESC_MASK)
92 1.1.8.3 nathanw #define EMAC_PREVRX(x) (((x) - 1) & EMAC_NRXDESC_MASK)
93 1.1.8.3 nathanw
94 1.1.8.3 nathanw /*
95 1.1.8.3 nathanw * Transmit/receive descriptors that are DMA'd to the EMAC.
96 1.1.8.3 nathanw */
97 1.1.8.3 nathanw struct emac_control_data {
98 1.1.8.3 nathanw struct mal_descriptor ecd_txdesc[EMAC_NTXDESC];
99 1.1.8.3 nathanw struct mal_descriptor ecd_rxdesc[EMAC_NRXDESC];
100 1.1.8.3 nathanw };
101 1.1.8.3 nathanw
102 1.1.8.3 nathanw #define EMAC_CDOFF(x) offsetof(struct emac_control_data, x)
103 1.1.8.3 nathanw #define EMAC_CDTXOFF(x) EMAC_CDOFF(ecd_txdesc[(x)])
104 1.1.8.3 nathanw #define EMAC_CDRXOFF(x) EMAC_CDOFF(ecd_rxdesc[(x)])
105 1.1.8.3 nathanw
106 1.1.8.3 nathanw /*
107 1.1.8.3 nathanw * Software state for transmit jobs.
108 1.1.8.3 nathanw */
109 1.1.8.3 nathanw struct emac_txsoft {
110 1.1.8.3 nathanw struct mbuf *txs_mbuf; /* head of mbuf chain */
111 1.1.8.3 nathanw bus_dmamap_t txs_dmamap; /* our DMA map */
112 1.1.8.3 nathanw int txs_firstdesc; /* first descriptor in packet */
113 1.1.8.3 nathanw int txs_lastdesc; /* last descriptor in packet */
114 1.1.8.3 nathanw int txs_ndesc; /* # of descriptors used */
115 1.1.8.3 nathanw };
116 1.1.8.3 nathanw
117 1.1.8.3 nathanw /*
118 1.1.8.3 nathanw * Software state for receive descriptors.
119 1.1.8.3 nathanw */
120 1.1.8.3 nathanw struct emac_rxsoft {
121 1.1.8.3 nathanw struct mbuf *rxs_mbuf; /* head of mbuf chain */
122 1.1.8.3 nathanw bus_dmamap_t rxs_dmamap; /* our DMA map */
123 1.1.8.3 nathanw };
124 1.1.8.3 nathanw
125 1.1.8.3 nathanw /*
126 1.1.8.3 nathanw * Software state per device.
127 1.1.8.3 nathanw */
128 1.1.8.2 nathanw struct emac_softc {
129 1.1.8.2 nathanw struct device sc_dev; /* generic device information */
130 1.1.8.2 nathanw bus_space_tag_t sc_st; /* bus space tag */
131 1.1.8.2 nathanw bus_space_handle_t sc_sh; /* bus space handle */
132 1.1.8.2 nathanw bus_dma_tag_t sc_dmat; /* bus DMA tag */
133 1.1.8.2 nathanw struct ethercom sc_ethercom; /* ethernet common data */
134 1.1.8.2 nathanw void *sc_sdhook; /* shutdown hook */
135 1.1.8.3 nathanw void *sc_powerhook; /* power management hook */
136 1.1.8.3 nathanw
137 1.1.8.3 nathanw struct mii_data sc_mii; /* MII/media information */
138 1.1.8.3 nathanw struct callout sc_callout; /* tick callout */
139 1.1.8.3 nathanw
140 1.1.8.3 nathanw u_int32_t sc_mr1; /* copy of Mode Register 1 */
141 1.1.8.3 nathanw
142 1.1.8.3 nathanw bus_dmamap_t sc_cddmamap; /* control data dma map */
143 1.1.8.3 nathanw #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
144 1.1.8.3 nathanw
145 1.1.8.3 nathanw /* Software state for transmit/receive descriptors. */
146 1.1.8.3 nathanw struct emac_txsoft sc_txsoft[EMAC_TXQUEUELEN];
147 1.1.8.3 nathanw struct emac_rxsoft sc_rxsoft[EMAC_NRXDESC];
148 1.1.8.3 nathanw
149 1.1.8.3 nathanw /* Control data structures. */
150 1.1.8.3 nathanw struct emac_control_data *sc_control_data;
151 1.1.8.3 nathanw #define sc_txdescs sc_control_data->ecd_txdesc
152 1.1.8.3 nathanw #define sc_rxdescs sc_control_data->ecd_rxdesc
153 1.1.8.3 nathanw
154 1.1.8.3 nathanw #ifdef EMAC_EVENT_COUNTERS
155 1.1.8.3 nathanw struct evcnt sc_ev_rxintr; /* Rx interrupts */
156 1.1.8.3 nathanw struct evcnt sc_ev_txintr; /* Tx interrupts */
157 1.1.8.3 nathanw struct evcnt sc_ev_rxde; /* Rx descriptor interrupts */
158 1.1.8.3 nathanw struct evcnt sc_ev_txde; /* Tx descriptor interrupts */
159 1.1.8.3 nathanw struct evcnt sc_ev_wol; /* Wake-On-Lan interrupts */
160 1.1.8.3 nathanw struct evcnt sc_ev_serr; /* MAL system error interrupts */
161 1.1.8.3 nathanw struct evcnt sc_ev_intr; /* General EMAC interrupts */
162 1.1.8.3 nathanw
163 1.1.8.3 nathanw struct evcnt sc_ev_txreap; /* Calls to Tx descriptor reaper */
164 1.1.8.3 nathanw struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
165 1.1.8.3 nathanw struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
166 1.1.8.3 nathanw struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
167 1.1.8.3 nathanw struct evcnt sc_ev_tu; /* Tx underrun */
168 1.1.8.3 nathanw #endif /* EMAC_EVENT_COUNTERS */
169 1.1.8.3 nathanw
170 1.1.8.3 nathanw int sc_txfree; /* number of free Tx descriptors */
171 1.1.8.3 nathanw int sc_txnext; /* next ready Tx descriptor */
172 1.1.8.3 nathanw
173 1.1.8.3 nathanw int sc_txsfree; /* number of free Tx jobs */
174 1.1.8.3 nathanw int sc_txsnext; /* next ready Tx job */
175 1.1.8.3 nathanw int sc_txsdirty; /* dirty Tx jobs */
176 1.1.8.3 nathanw
177 1.1.8.3 nathanw int sc_rxptr; /* next ready RX descriptor/descsoft */
178 1.1.8.2 nathanw };
179 1.1.8.2 nathanw
180 1.1.8.3 nathanw #ifdef EMAC_EVENT_COUNTERS
181 1.1.8.3 nathanw #define EMAC_EVCNT_INCR(ev) (ev)->ev_count++
182 1.1.8.3 nathanw #else
183 1.1.8.3 nathanw #define EMAC_EVCNT_INCR(ev) /* nothing */
184 1.1.8.3 nathanw #endif
185 1.1.8.3 nathanw
186 1.1.8.3 nathanw #define EMAC_CDTXADDR(sc, x) ((sc)->sc_cddma + EMAC_CDTXOFF((x)))
187 1.1.8.3 nathanw #define EMAC_CDRXADDR(sc, x) ((sc)->sc_cddma + EMAC_CDRXOFF((x)))
188 1.1.8.3 nathanw
189 1.1.8.3 nathanw #define EMAC_CDTXSYNC(sc, x, n, ops) \
190 1.1.8.3 nathanw do { \
191 1.1.8.3 nathanw int __x, __n; \
192 1.1.8.3 nathanw \
193 1.1.8.3 nathanw __x = (x); \
194 1.1.8.3 nathanw __n = (n); \
195 1.1.8.3 nathanw \
196 1.1.8.3 nathanw /* If it will wrap around, sync to the end of the ring. */ \
197 1.1.8.3 nathanw if ((__x + __n) > EMAC_NTXDESC) { \
198 1.1.8.3 nathanw bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
199 1.1.8.3 nathanw EMAC_CDTXOFF(__x), sizeof(struct mal_descriptor) * \
200 1.1.8.3 nathanw (EMAC_NTXDESC - __x), (ops)); \
201 1.1.8.3 nathanw __n -= (EMAC_NTXDESC - __x); \
202 1.1.8.3 nathanw __x = 0; \
203 1.1.8.3 nathanw } \
204 1.1.8.3 nathanw \
205 1.1.8.3 nathanw /* Now sync whatever is left. */ \
206 1.1.8.3 nathanw bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
207 1.1.8.3 nathanw EMAC_CDTXOFF(__x), sizeof(struct mal_descriptor) * __n, (ops)); \
208 1.1.8.3 nathanw } while (/*CONSTCOND*/0)
209 1.1.8.3 nathanw
210 1.1.8.3 nathanw #define EMAC_CDRXSYNC(sc, x, ops) \
211 1.1.8.3 nathanw do { \
212 1.1.8.3 nathanw bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
213 1.1.8.3 nathanw EMAC_CDRXOFF((x)), sizeof(struct mal_descriptor), (ops)); \
214 1.1.8.3 nathanw } while (/*CONSTCOND*/0)
215 1.1.8.3 nathanw
216 1.1.8.3 nathanw #define EMAC_INIT_RXDESC(sc, x) \
217 1.1.8.3 nathanw do { \
218 1.1.8.3 nathanw struct emac_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
219 1.1.8.3 nathanw struct mal_descriptor *__rxd = &(sc)->sc_rxdescs[(x)]; \
220 1.1.8.3 nathanw struct mbuf *__m = __rxs->rxs_mbuf; \
221 1.1.8.3 nathanw \
222 1.1.8.3 nathanw /* \
223 1.1.8.3 nathanw * Note: We scoot the packet forward 2 bytes in the buffer \
224 1.1.8.3 nathanw * so that the payload after the Ethernet header is aligned \
225 1.1.8.3 nathanw * to a 4-byte boundary. \
226 1.1.8.3 nathanw */ \
227 1.1.8.3 nathanw __m->m_data = __m->m_ext.ext_buf + 2; \
228 1.1.8.3 nathanw \
229 1.1.8.3 nathanw __rxd->md_data = __rxs->rxs_dmamap->dm_segs[0].ds_addr + 2; \
230 1.1.8.3 nathanw __rxd->md_data_len = __m->m_ext.ext_size - 2; \
231 1.1.8.3 nathanw __rxd->md_stat_ctrl = MAL_RX_EMPTY | MAL_RX_INTERRUPT | \
232 1.1.8.3 nathanw /* Set wrap on last descriptor. */ \
233 1.1.8.3 nathanw (((x) == EMAC_NRXDESC - 1) ? MAL_RX_WRAP : 0); \
234 1.1.8.3 nathanw EMAC_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
235 1.1.8.3 nathanw } while (/*CONSTCOND*/0)
236 1.1.8.3 nathanw
237 1.1.8.3 nathanw #define EMAC_WRITE(sc, reg, val) \
238 1.1.8.3 nathanw bus_space_write_stream_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
239 1.1.8.3 nathanw #define EMAC_READ(sc, reg) \
240 1.1.8.3 nathanw bus_space_read_stream_4((sc)->sc_st, (sc)->sc_sh, (reg))
241 1.1.8.3 nathanw
242 1.1.8.2 nathanw static int emac_match(struct device *, struct cfdata *, void *);
243 1.1.8.2 nathanw static void emac_attach(struct device *, struct device *, void *);
244 1.1.8.3 nathanw
245 1.1.8.3 nathanw static int emac_add_rxbuf(struct emac_softc *, int);
246 1.1.8.3 nathanw static int emac_init(struct ifnet *);
247 1.1.8.3 nathanw static int emac_ioctl(struct ifnet *, u_long, caddr_t);
248 1.1.8.3 nathanw static void emac_reset(struct emac_softc *);
249 1.1.8.3 nathanw static void emac_rxdrain(struct emac_softc *);
250 1.1.8.3 nathanw static int emac_txreap(struct emac_softc *);
251 1.1.8.3 nathanw static void emac_shutdown(void *);
252 1.1.8.3 nathanw static void emac_start(struct ifnet *);
253 1.1.8.3 nathanw static void emac_stop(struct ifnet *, int);
254 1.1.8.3 nathanw static void emac_watchdog(struct ifnet *);
255 1.1.8.3 nathanw
256 1.1.8.3 nathanw static int emac_wol_intr(void *);
257 1.1.8.3 nathanw static int emac_serr_intr(void *);
258 1.1.8.3 nathanw static int emac_txeob_intr(void *);
259 1.1.8.3 nathanw static int emac_rxeob_intr(void *);
260 1.1.8.3 nathanw static int emac_txde_intr(void *);
261 1.1.8.3 nathanw static int emac_rxde_intr(void *);
262 1.1.8.2 nathanw static int emac_intr(void *);
263 1.1.8.2 nathanw
264 1.1.8.3 nathanw static int emac_mediachange(struct ifnet *);
265 1.1.8.3 nathanw static void emac_mediastatus(struct ifnet *, struct ifmediareq *);
266 1.1.8.3 nathanw static int emac_mii_readreg(struct device *, int, int);
267 1.1.8.3 nathanw static void emac_mii_statchg(struct device *);
268 1.1.8.3 nathanw static void emac_mii_tick(void *);
269 1.1.8.3 nathanw static uint32_t emac_mii_wait(struct emac_softc *);
270 1.1.8.3 nathanw static void emac_mii_writereg(struct device *, int, int, int);
271 1.1.8.3 nathanw
272 1.1.8.3 nathanw int emac_copy_small = 0;
273 1.1.8.3 nathanw
274 1.1.8.2 nathanw struct cfattach emac_ca = {
275 1.1.8.2 nathanw sizeof(struct emac_softc), emac_match, emac_attach
276 1.1.8.2 nathanw };
277 1.1.8.2 nathanw
278 1.1.8.2 nathanw static int
279 1.1.8.2 nathanw emac_match(struct device *parent, struct cfdata *cf, void *aux)
280 1.1.8.2 nathanw {
281 1.1.8.3 nathanw struct opb_attach_args *oaa = aux;
282 1.1.8.2 nathanw
283 1.1.8.3 nathanw /* match only on-chip ethernet devices */
284 1.1.8.3 nathanw if (strcmp(oaa->opb_name, cf->cf_driver->cd_name) == 0)
285 1.1.8.3 nathanw return (1);
286 1.1.8.2 nathanw
287 1.1.8.3 nathanw return (0);
288 1.1.8.2 nathanw }
289 1.1.8.2 nathanw
290 1.1.8.2 nathanw static void
291 1.1.8.2 nathanw emac_attach(struct device *parent, struct device *self, void *aux)
292 1.1.8.2 nathanw {
293 1.1.8.3 nathanw struct opb_attach_args *oaa = aux;
294 1.1.8.2 nathanw struct emac_softc *sc = (struct emac_softc *)self;
295 1.1.8.3 nathanw struct ifnet *ifp = &sc->sc_ethercom.ec_if;
296 1.1.8.3 nathanw struct mii_data *mii = &sc->sc_mii;
297 1.1.8.3 nathanw bus_dma_segment_t seg;
298 1.1.8.3 nathanw int error, i, nseg;
299 1.1.8.2 nathanw
300 1.1.8.4 nathanw sc->sc_st = oaa->opb_bt;
301 1.1.8.3 nathanw sc->sc_sh = oaa->opb_addr;
302 1.1.8.3 nathanw sc->sc_dmat = oaa->opb_dmat;
303 1.1.8.2 nathanw
304 1.1.8.2 nathanw printf(": 405GP EMAC\n");
305 1.1.8.3 nathanw
306 1.1.8.3 nathanw /*
307 1.1.8.3 nathanw * Set up Mode Register 1 - set receive and transmit FIFOs to maximum
308 1.1.8.3 nathanw * size, allow transmit of multiple packets (only channel 0 is used).
309 1.1.8.3 nathanw *
310 1.1.8.3 nathanw * XXX: Allow pause packets??
311 1.1.8.3 nathanw */
312 1.1.8.3 nathanw sc->sc_mr1 = MR1_RFS_4KB | MR1_TFS_2KB | MR1_TR0_MULTIPLE;
313 1.1.8.3 nathanw
314 1.1.8.3 nathanw intr_establish(oaa->opb_irq , IST_LEVEL, IPL_NET, emac_wol_intr, sc);
315 1.1.8.3 nathanw intr_establish(oaa->opb_irq + 1, IST_LEVEL, IPL_NET, emac_serr_intr, sc);
316 1.1.8.3 nathanw intr_establish(oaa->opb_irq + 2, IST_LEVEL, IPL_NET, emac_txeob_intr, sc);
317 1.1.8.3 nathanw intr_establish(oaa->opb_irq + 3, IST_LEVEL, IPL_NET, emac_rxeob_intr, sc);
318 1.1.8.3 nathanw intr_establish(oaa->opb_irq + 4, IST_LEVEL, IPL_NET, emac_txde_intr, sc);
319 1.1.8.3 nathanw intr_establish(oaa->opb_irq + 5, IST_LEVEL, IPL_NET, emac_rxde_intr, sc);
320 1.1.8.3 nathanw intr_establish(oaa->opb_irq + 6, IST_LEVEL, IPL_NET, emac_intr, sc);
321 1.1.8.3 nathanw printf("%s: interrupting at irqs %d .. %d\n", sc->sc_dev.dv_xname,
322 1.1.8.3 nathanw oaa->opb_irq, oaa->opb_irq + 6);
323 1.1.8.3 nathanw
324 1.1.8.3 nathanw /*
325 1.1.8.3 nathanw * Allocate the control data structures, and create and load the
326 1.1.8.3 nathanw * DMA map for it.
327 1.1.8.3 nathanw */
328 1.1.8.3 nathanw if ((error = bus_dmamem_alloc(sc->sc_dmat,
329 1.1.8.3 nathanw sizeof(struct emac_control_data), 0, 0, &seg, 1, &nseg, 0)) != 0) {
330 1.1.8.3 nathanw printf("%s: unable to allocate control data, error = %d\n",
331 1.1.8.3 nathanw sc->sc_dev.dv_xname, error);
332 1.1.8.3 nathanw goto fail_0;
333 1.1.8.3 nathanw }
334 1.1.8.3 nathanw
335 1.1.8.3 nathanw if ((error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
336 1.1.8.3 nathanw sizeof(struct emac_control_data), (caddr_t *)&sc->sc_control_data,
337 1.1.8.3 nathanw BUS_DMA_COHERENT)) != 0) {
338 1.1.8.3 nathanw printf("%s: unable to map control data, error = %d\n",
339 1.1.8.3 nathanw sc->sc_dev.dv_xname, error);
340 1.1.8.3 nathanw goto fail_1;
341 1.1.8.3 nathanw }
342 1.1.8.3 nathanw
343 1.1.8.3 nathanw if ((error = bus_dmamap_create(sc->sc_dmat,
344 1.1.8.3 nathanw sizeof(struct emac_control_data), 1,
345 1.1.8.3 nathanw sizeof(struct emac_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
346 1.1.8.3 nathanw printf("%s: unable to create control data DMA map, "
347 1.1.8.3 nathanw "error = %d\n", sc->sc_dev.dv_xname, error);
348 1.1.8.3 nathanw goto fail_2;
349 1.1.8.3 nathanw }
350 1.1.8.3 nathanw
351 1.1.8.3 nathanw if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
352 1.1.8.3 nathanw sc->sc_control_data, sizeof(struct emac_control_data), NULL,
353 1.1.8.3 nathanw 0)) != 0) {
354 1.1.8.3 nathanw printf("%s: unable to load control data DMA map, error = %d\n",
355 1.1.8.3 nathanw sc->sc_dev.dv_xname, error);
356 1.1.8.3 nathanw goto fail_3;
357 1.1.8.3 nathanw }
358 1.1.8.3 nathanw
359 1.1.8.3 nathanw /*
360 1.1.8.3 nathanw * Create the transmit buffer DMA maps.
361 1.1.8.3 nathanw */
362 1.1.8.3 nathanw for (i = 0; i < EMAC_TXQUEUELEN; i++) {
363 1.1.8.3 nathanw if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
364 1.1.8.3 nathanw EMAC_NTXSEGS, MCLBYTES, 0, 0,
365 1.1.8.3 nathanw &sc->sc_txsoft[i].txs_dmamap)) != 0) {
366 1.1.8.3 nathanw printf("%s: unable to create tx DMA map %d, "
367 1.1.8.3 nathanw "error = %d\n", sc->sc_dev.dv_xname, i, error);
368 1.1.8.3 nathanw goto fail_4;
369 1.1.8.3 nathanw }
370 1.1.8.3 nathanw }
371 1.1.8.3 nathanw
372 1.1.8.3 nathanw /*
373 1.1.8.3 nathanw * Create the receive buffer DMA maps.
374 1.1.8.3 nathanw */
375 1.1.8.3 nathanw for (i = 0; i < EMAC_NRXDESC; i++) {
376 1.1.8.3 nathanw if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
377 1.1.8.3 nathanw MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
378 1.1.8.3 nathanw printf("%s: unable to create rx DMA map %d, "
379 1.1.8.3 nathanw "error = %d\n", sc->sc_dev.dv_xname, i, error);
380 1.1.8.3 nathanw goto fail_5;
381 1.1.8.3 nathanw }
382 1.1.8.3 nathanw sc->sc_rxsoft[i].rxs_mbuf = NULL;
383 1.1.8.3 nathanw }
384 1.1.8.3 nathanw
385 1.1.8.3 nathanw /*
386 1.1.8.3 nathanw * Reset the chip to a known state.
387 1.1.8.3 nathanw */
388 1.1.8.3 nathanw emac_reset(sc);
389 1.1.8.3 nathanw
390 1.1.8.2 nathanw printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
391 1.1.8.2 nathanw ether_sprintf(board_data.mac_address_local));
392 1.1.8.2 nathanw
393 1.1.8.3 nathanw /*
394 1.1.8.3 nathanw * Initialise the media structures.
395 1.1.8.3 nathanw */
396 1.1.8.3 nathanw mii->mii_ifp = ifp;
397 1.1.8.3 nathanw mii->mii_readreg = emac_mii_readreg;
398 1.1.8.3 nathanw mii->mii_writereg = emac_mii_writereg;
399 1.1.8.3 nathanw mii->mii_statchg = emac_mii_statchg;
400 1.1.8.3 nathanw
401 1.1.8.3 nathanw ifmedia_init(&mii->mii_media, 0, emac_mediachange,
402 1.1.8.3 nathanw emac_mediastatus);
403 1.1.8.3 nathanw mii_attach(&sc->sc_dev, mii, 0xffffffff,
404 1.1.8.3 nathanw MII_PHY_ANY, MII_OFFSET_ANY, 0);
405 1.1.8.3 nathanw if (LIST_FIRST(&mii->mii_phys) == NULL) {
406 1.1.8.3 nathanw ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
407 1.1.8.3 nathanw ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
408 1.1.8.3 nathanw } else
409 1.1.8.3 nathanw ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
410 1.1.8.3 nathanw
411 1.1.8.3 nathanw ifp = &sc->sc_ethercom.ec_if;
412 1.1.8.3 nathanw strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
413 1.1.8.3 nathanw ifp->if_softc = sc;
414 1.1.8.3 nathanw ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
415 1.1.8.3 nathanw ifp->if_ioctl = emac_ioctl;
416 1.1.8.3 nathanw ifp->if_start = emac_start;
417 1.1.8.3 nathanw ifp->if_watchdog = emac_watchdog;
418 1.1.8.3 nathanw ifp->if_init = emac_init;
419 1.1.8.3 nathanw ifp->if_stop = emac_stop;
420 1.1.8.3 nathanw IFQ_SET_READY(&ifp->if_snd);
421 1.1.8.3 nathanw
422 1.1.8.3 nathanw /*
423 1.1.8.3 nathanw * We can support 802.1Q VLAN-sized frames.
424 1.1.8.3 nathanw */
425 1.1.8.3 nathanw sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
426 1.1.8.3 nathanw
427 1.1.8.3 nathanw /*
428 1.1.8.3 nathanw * Attach the interface.
429 1.1.8.3 nathanw */
430 1.1.8.3 nathanw if_attach(ifp);
431 1.1.8.3 nathanw ether_ifattach(ifp, board_data.mac_address_local);
432 1.1.8.3 nathanw
433 1.1.8.3 nathanw #ifdef EMAC_EVENT_COUNTERS
434 1.1.8.3 nathanw /*
435 1.1.8.3 nathanw * Attach the event counters.
436 1.1.8.3 nathanw */
437 1.1.8.3 nathanw evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
438 1.1.8.3 nathanw NULL, sc->sc_dev.dv_xname, "rxintr");
439 1.1.8.3 nathanw evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR,
440 1.1.8.3 nathanw NULL, sc->sc_dev.dv_xname, "txintr");
441 1.1.8.3 nathanw evcnt_attach_dynamic(&sc->sc_ev_rxde, EVCNT_TYPE_INTR,
442 1.1.8.3 nathanw NULL, sc->sc_dev.dv_xname, "rxde");
443 1.1.8.3 nathanw evcnt_attach_dynamic(&sc->sc_ev_txde, EVCNT_TYPE_INTR,
444 1.1.8.3 nathanw NULL, sc->sc_dev.dv_xname, "txde");
445 1.1.8.3 nathanw evcnt_attach_dynamic(&sc->sc_ev_wol, EVCNT_TYPE_INTR,
446 1.1.8.3 nathanw NULL, sc->sc_dev.dv_xname, "wol");
447 1.1.8.3 nathanw evcnt_attach_dynamic(&sc->sc_ev_serr, EVCNT_TYPE_INTR,
448 1.1.8.3 nathanw NULL, sc->sc_dev.dv_xname, "serr");
449 1.1.8.3 nathanw evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR,
450 1.1.8.3 nathanw NULL, sc->sc_dev.dv_xname, "intr");
451 1.1.8.3 nathanw
452 1.1.8.3 nathanw evcnt_attach_dynamic(&sc->sc_ev_txreap, EVCNT_TYPE_MISC,
453 1.1.8.3 nathanw NULL, sc->sc_dev.dv_xname, "txreap");
454 1.1.8.3 nathanw evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
455 1.1.8.3 nathanw NULL, sc->sc_dev.dv_xname, "txsstall");
456 1.1.8.3 nathanw evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
457 1.1.8.3 nathanw NULL, sc->sc_dev.dv_xname, "txdstall");
458 1.1.8.3 nathanw evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
459 1.1.8.3 nathanw NULL, sc->sc_dev.dv_xname, "txdrop");
460 1.1.8.3 nathanw evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
461 1.1.8.3 nathanw NULL, sc->sc_dev.dv_xname, "tu");
462 1.1.8.3 nathanw #endif /* EMAC_EVENT_COUNTERS */
463 1.1.8.3 nathanw
464 1.1.8.3 nathanw /*
465 1.1.8.3 nathanw * Make sure the interface is shutdown during reboot.
466 1.1.8.3 nathanw */
467 1.1.8.3 nathanw sc->sc_sdhook = shutdownhook_establish(emac_shutdown, sc);
468 1.1.8.3 nathanw if (sc->sc_sdhook == NULL)
469 1.1.8.3 nathanw printf("%s: WARNING: unable to establish shutdown hook\n",
470 1.1.8.3 nathanw sc->sc_dev.dv_xname);
471 1.1.8.3 nathanw
472 1.1.8.3 nathanw return;
473 1.1.8.3 nathanw
474 1.1.8.3 nathanw /*
475 1.1.8.3 nathanw * Free any resources we've allocated during the failed attach
476 1.1.8.3 nathanw * attempt. Do this in reverse order and fall through.
477 1.1.8.3 nathanw */
478 1.1.8.3 nathanw fail_5:
479 1.1.8.3 nathanw for (i = 0; i < EMAC_NRXDESC; i++) {
480 1.1.8.3 nathanw if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
481 1.1.8.3 nathanw bus_dmamap_destroy(sc->sc_dmat,
482 1.1.8.3 nathanw sc->sc_rxsoft[i].rxs_dmamap);
483 1.1.8.3 nathanw }
484 1.1.8.3 nathanw fail_4:
485 1.1.8.3 nathanw for (i = 0; i < EMAC_TXQUEUELEN; i++) {
486 1.1.8.3 nathanw if (sc->sc_txsoft[i].txs_dmamap != NULL)
487 1.1.8.3 nathanw bus_dmamap_destroy(sc->sc_dmat,
488 1.1.8.3 nathanw sc->sc_txsoft[i].txs_dmamap);
489 1.1.8.3 nathanw }
490 1.1.8.3 nathanw bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
491 1.1.8.3 nathanw fail_3:
492 1.1.8.3 nathanw bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
493 1.1.8.3 nathanw fail_2:
494 1.1.8.3 nathanw bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
495 1.1.8.3 nathanw sizeof(struct emac_control_data));
496 1.1.8.3 nathanw fail_1:
497 1.1.8.3 nathanw bus_dmamem_free(sc->sc_dmat, &seg, nseg);
498 1.1.8.3 nathanw fail_0:
499 1.1.8.3 nathanw return;
500 1.1.8.3 nathanw }
501 1.1.8.3 nathanw
502 1.1.8.3 nathanw /*
503 1.1.8.3 nathanw * Device shutdown routine.
504 1.1.8.3 nathanw */
505 1.1.8.3 nathanw static void
506 1.1.8.3 nathanw emac_shutdown(void *arg)
507 1.1.8.3 nathanw {
508 1.1.8.3 nathanw struct emac_softc *sc = arg;
509 1.1.8.3 nathanw
510 1.1.8.3 nathanw emac_stop(&sc->sc_ethercom.ec_if, 0);
511 1.1.8.3 nathanw }
512 1.1.8.3 nathanw
513 1.1.8.3 nathanw /* ifnet interface function */
514 1.1.8.3 nathanw static void
515 1.1.8.3 nathanw emac_start(struct ifnet *ifp)
516 1.1.8.3 nathanw {
517 1.1.8.3 nathanw struct emac_softc *sc = ifp->if_softc;
518 1.1.8.3 nathanw struct mbuf *m0;
519 1.1.8.3 nathanw struct emac_txsoft *txs;
520 1.1.8.3 nathanw bus_dmamap_t dmamap;
521 1.1.8.3 nathanw int error, firsttx, nexttx, lasttx, ofree, seg;
522 1.1.8.3 nathanw
523 1.1.8.3 nathanw if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
524 1.1.8.3 nathanw return;
525 1.1.8.3 nathanw
526 1.1.8.3 nathanw /*
527 1.1.8.3 nathanw * Remember the previous number of free descriptors.
528 1.1.8.3 nathanw */
529 1.1.8.3 nathanw ofree = sc->sc_txfree;
530 1.1.8.3 nathanw
531 1.1.8.3 nathanw /*
532 1.1.8.3 nathanw * Loop through the send queue, setting up transmit descriptors
533 1.1.8.3 nathanw * until we drain the queue, or use up all available transmit
534 1.1.8.3 nathanw * descriptors.
535 1.1.8.3 nathanw */
536 1.1.8.3 nathanw for (;;) {
537 1.1.8.3 nathanw /* Grab a packet off the queue. */
538 1.1.8.3 nathanw IFQ_POLL(&ifp->if_snd, m0);
539 1.1.8.3 nathanw if (m0 == NULL)
540 1.1.8.3 nathanw break;
541 1.1.8.3 nathanw
542 1.1.8.3 nathanw /*
543 1.1.8.3 nathanw * Get a work queue entry. Reclaim used Tx descriptors if
544 1.1.8.3 nathanw * we are running low.
545 1.1.8.3 nathanw */
546 1.1.8.3 nathanw if (sc->sc_txsfree < EMAC_TXQUEUE_GC) {
547 1.1.8.3 nathanw emac_txreap(sc);
548 1.1.8.3 nathanw if (sc->sc_txsfree == 0) {
549 1.1.8.3 nathanw EMAC_EVCNT_INCR(&sc->sc_ev_txsstall);
550 1.1.8.3 nathanw break;
551 1.1.8.3 nathanw }
552 1.1.8.3 nathanw }
553 1.1.8.3 nathanw
554 1.1.8.3 nathanw txs = &sc->sc_txsoft[sc->sc_txsnext];
555 1.1.8.3 nathanw dmamap = txs->txs_dmamap;
556 1.1.8.3 nathanw
557 1.1.8.3 nathanw /*
558 1.1.8.3 nathanw * Load the DMA map. If this fails, the packet either
559 1.1.8.3 nathanw * didn't fit in the alloted number of segments, or we
560 1.1.8.3 nathanw * were short on resources. In this case, we'll copy
561 1.1.8.3 nathanw * and try again.
562 1.1.8.3 nathanw */
563 1.1.8.3 nathanw error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
564 1.1.8.3 nathanw BUS_DMA_WRITE|BUS_DMA_NOWAIT);
565 1.1.8.3 nathanw if (error) {
566 1.1.8.3 nathanw if (error == EFBIG) {
567 1.1.8.3 nathanw EMAC_EVCNT_INCR(&sc->sc_ev_txdrop);
568 1.1.8.3 nathanw printf("%s: Tx packet consumes too many "
569 1.1.8.3 nathanw "DMA segments, dropping...\n",
570 1.1.8.3 nathanw sc->sc_dev.dv_xname);
571 1.1.8.3 nathanw IFQ_DEQUEUE(&ifp->if_snd, m0);
572 1.1.8.3 nathanw m_freem(m0);
573 1.1.8.3 nathanw continue;
574 1.1.8.3 nathanw }
575 1.1.8.3 nathanw /* Short on resources, just stop for now. */
576 1.1.8.3 nathanw break;
577 1.1.8.3 nathanw }
578 1.1.8.3 nathanw
579 1.1.8.3 nathanw /*
580 1.1.8.3 nathanw * Ensure we have enough descriptors free to describe
581 1.1.8.3 nathanw * the packet.
582 1.1.8.3 nathanw */
583 1.1.8.3 nathanw if (dmamap->dm_nsegs > sc->sc_txfree) {
584 1.1.8.3 nathanw /*
585 1.1.8.3 nathanw * Not enough free descriptors to transmit this
586 1.1.8.3 nathanw * packet. We haven't committed anything yet,
587 1.1.8.3 nathanw * so just unload the DMA map, put the packet
588 1.1.8.3 nathanw * back on the queue, and punt. Notify the upper
589 1.1.8.3 nathanw * layer that there are not more slots left.
590 1.1.8.3 nathanw *
591 1.1.8.3 nathanw */
592 1.1.8.3 nathanw ifp->if_flags |= IFF_OACTIVE;
593 1.1.8.3 nathanw bus_dmamap_unload(sc->sc_dmat, dmamap);
594 1.1.8.3 nathanw EMAC_EVCNT_INCR(&sc->sc_ev_txdstall);
595 1.1.8.3 nathanw break;
596 1.1.8.3 nathanw }
597 1.1.8.3 nathanw
598 1.1.8.3 nathanw IFQ_DEQUEUE(&ifp->if_snd, m0);
599 1.1.8.3 nathanw
600 1.1.8.3 nathanw /*
601 1.1.8.3 nathanw * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
602 1.1.8.3 nathanw */
603 1.1.8.3 nathanw
604 1.1.8.3 nathanw /* Sync the DMA map. */
605 1.1.8.3 nathanw bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
606 1.1.8.3 nathanw BUS_DMASYNC_PREWRITE);
607 1.1.8.3 nathanw
608 1.1.8.3 nathanw /*
609 1.1.8.3 nathanw * Store a pointer to the packet so that we can free it
610 1.1.8.3 nathanw * later.
611 1.1.8.3 nathanw */
612 1.1.8.3 nathanw txs->txs_mbuf = m0;
613 1.1.8.3 nathanw txs->txs_firstdesc = sc->sc_txnext;
614 1.1.8.3 nathanw txs->txs_ndesc = dmamap->dm_nsegs;
615 1.1.8.3 nathanw
616 1.1.8.3 nathanw /*
617 1.1.8.3 nathanw * Initialize the transmit descriptor.
618 1.1.8.3 nathanw */
619 1.1.8.3 nathanw firsttx = sc->sc_txnext;
620 1.1.8.3 nathanw for (nexttx = sc->sc_txnext, seg = 0;
621 1.1.8.3 nathanw seg < dmamap->dm_nsegs;
622 1.1.8.3 nathanw seg++, nexttx = EMAC_NEXTTX(nexttx)) {
623 1.1.8.3 nathanw /*
624 1.1.8.3 nathanw * If this is the first descriptor we're
625 1.1.8.3 nathanw * enqueueing, don't set the TX_READY bit just
626 1.1.8.3 nathanw * yet. That could cause a race condition.
627 1.1.8.3 nathanw * We'll do it below.
628 1.1.8.3 nathanw */
629 1.1.8.3 nathanw sc->sc_txdescs[nexttx].md_data =
630 1.1.8.3 nathanw dmamap->dm_segs[seg].ds_addr;
631 1.1.8.3 nathanw sc->sc_txdescs[nexttx].md_data_len =
632 1.1.8.3 nathanw dmamap->dm_segs[seg].ds_len;
633 1.1.8.3 nathanw sc->sc_txdescs[nexttx].md_stat_ctrl =
634 1.1.8.3 nathanw (sc->sc_txdescs[nexttx].md_stat_ctrl & MAL_TX_WRAP) |
635 1.1.8.3 nathanw (nexttx == firsttx ? 0 : MAL_TX_READY) |
636 1.1.8.3 nathanw EMAC_TXC_GFCS | EMAC_TXC_GPAD;
637 1.1.8.3 nathanw lasttx = nexttx;
638 1.1.8.3 nathanw }
639 1.1.8.3 nathanw
640 1.1.8.3 nathanw /* Set the LAST bit on the last segment. */
641 1.1.8.3 nathanw sc->sc_txdescs[lasttx].md_stat_ctrl |= MAL_TX_LAST;
642 1.1.8.3 nathanw
643 1.1.8.3 nathanw txs->txs_lastdesc = lasttx;
644 1.1.8.3 nathanw
645 1.1.8.3 nathanw /* Sync the descriptors we're using. */
646 1.1.8.3 nathanw EMAC_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
647 1.1.8.3 nathanw BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
648 1.1.8.3 nathanw
649 1.1.8.3 nathanw /*
650 1.1.8.3 nathanw * The entire packet chain is set up. Give the
651 1.1.8.3 nathanw * first descriptor to the chip now.
652 1.1.8.3 nathanw */
653 1.1.8.3 nathanw sc->sc_txdescs[firsttx].md_stat_ctrl |= MAL_TX_READY;
654 1.1.8.3 nathanw EMAC_CDTXSYNC(sc, firsttx, 1,
655 1.1.8.3 nathanw BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
656 1.1.8.3 nathanw /*
657 1.1.8.3 nathanw * Tell the EMAC that a new packet is available.
658 1.1.8.3 nathanw */
659 1.1.8.4 nathanw EMAC_WRITE(sc, EMAC_TMR0, TMR0_GNP0);
660 1.1.8.3 nathanw
661 1.1.8.3 nathanw /* Advance the tx pointer. */
662 1.1.8.3 nathanw sc->sc_txfree -= txs->txs_ndesc;
663 1.1.8.3 nathanw sc->sc_txnext = nexttx;
664 1.1.8.3 nathanw
665 1.1.8.3 nathanw sc->sc_txsfree--;
666 1.1.8.3 nathanw sc->sc_txsnext = EMAC_NEXTTXS(sc->sc_txsnext);
667 1.1.8.3 nathanw
668 1.1.8.3 nathanw #if NBPFILTER > 0
669 1.1.8.3 nathanw /*
670 1.1.8.3 nathanw * Pass the packet to any BPF listeners.
671 1.1.8.3 nathanw */
672 1.1.8.3 nathanw if (ifp->if_bpf)
673 1.1.8.3 nathanw bpf_mtap(ifp->if_bpf, m0);
674 1.1.8.3 nathanw #endif /* NBPFILTER > 0 */
675 1.1.8.3 nathanw }
676 1.1.8.3 nathanw
677 1.1.8.3 nathanw if (txs == NULL || sc->sc_txfree == 0) {
678 1.1.8.3 nathanw /* No more slots left; notify upper layer. */
679 1.1.8.3 nathanw ifp->if_flags |= IFF_OACTIVE;
680 1.1.8.3 nathanw }
681 1.1.8.3 nathanw
682 1.1.8.3 nathanw if (sc->sc_txfree != ofree) {
683 1.1.8.3 nathanw /* Set a watchdog timer in case the chip flakes out. */
684 1.1.8.3 nathanw ifp->if_timer = 5;
685 1.1.8.3 nathanw }
686 1.1.8.3 nathanw }
687 1.1.8.3 nathanw
688 1.1.8.3 nathanw static int
689 1.1.8.3 nathanw emac_init(struct ifnet *ifp)
690 1.1.8.3 nathanw {
691 1.1.8.3 nathanw struct emac_softc *sc = ifp->if_softc;
692 1.1.8.3 nathanw struct emac_rxsoft *rxs;
693 1.1.8.3 nathanw unsigned char *enaddr = board_data.mac_address_local;
694 1.1.8.3 nathanw int error, i;
695 1.1.8.3 nathanw
696 1.1.8.3 nathanw error = 0;
697 1.1.8.3 nathanw
698 1.1.8.3 nathanw /* Cancel any pending I/O. */
699 1.1.8.3 nathanw emac_stop(ifp, 0);
700 1.1.8.3 nathanw
701 1.1.8.3 nathanw /* Reset the chip to a known state. */
702 1.1.8.3 nathanw emac_reset(sc);
703 1.1.8.3 nathanw
704 1.1.8.3 nathanw /*
705 1.1.8.3 nathanw * Initialise the transmit descriptor ring.
706 1.1.8.3 nathanw */
707 1.1.8.3 nathanw memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
708 1.1.8.3 nathanw /* set wrap on last descriptor */
709 1.1.8.3 nathanw sc->sc_txdescs[EMAC_NTXDESC - 1].md_stat_ctrl |= MAL_TX_WRAP;
710 1.1.8.3 nathanw EMAC_CDTXSYNC(sc, 0, EMAC_NTXDESC,
711 1.1.8.3 nathanw BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
712 1.1.8.3 nathanw sc->sc_txfree = EMAC_NTXDESC;
713 1.1.8.3 nathanw sc->sc_txnext = 0;
714 1.1.8.3 nathanw
715 1.1.8.3 nathanw /*
716 1.1.8.3 nathanw * Initialise the transmit job descriptors.
717 1.1.8.3 nathanw */
718 1.1.8.3 nathanw for (i = 0; i < EMAC_TXQUEUELEN; i++)
719 1.1.8.3 nathanw sc->sc_txsoft[i].txs_mbuf = NULL;
720 1.1.8.3 nathanw sc->sc_txsfree = EMAC_TXQUEUELEN;
721 1.1.8.3 nathanw sc->sc_txsnext = 0;
722 1.1.8.3 nathanw sc->sc_txsdirty = 0;
723 1.1.8.3 nathanw
724 1.1.8.3 nathanw /*
725 1.1.8.3 nathanw * Initialise the receiver descriptor and receive job
726 1.1.8.3 nathanw * descriptor rings.
727 1.1.8.3 nathanw */
728 1.1.8.3 nathanw for (i = 0; i < EMAC_NRXDESC; i++) {
729 1.1.8.3 nathanw rxs = &sc->sc_rxsoft[i];
730 1.1.8.3 nathanw if (rxs->rxs_mbuf == NULL) {
731 1.1.8.3 nathanw if ((error = emac_add_rxbuf(sc, i)) != 0) {
732 1.1.8.3 nathanw printf("%s: unable to allocate or map rx "
733 1.1.8.3 nathanw "buffer %d, error = %d\n",
734 1.1.8.3 nathanw sc->sc_dev.dv_xname, i, error);
735 1.1.8.3 nathanw /*
736 1.1.8.3 nathanw * XXX Should attempt to run with fewer receive
737 1.1.8.3 nathanw * XXX buffers instead of just failing.
738 1.1.8.3 nathanw */
739 1.1.8.3 nathanw emac_rxdrain(sc);
740 1.1.8.3 nathanw goto out;
741 1.1.8.3 nathanw }
742 1.1.8.3 nathanw } else
743 1.1.8.3 nathanw EMAC_INIT_RXDESC(sc, i);
744 1.1.8.3 nathanw }
745 1.1.8.3 nathanw sc->sc_rxptr = 0;
746 1.1.8.3 nathanw
747 1.1.8.3 nathanw /*
748 1.1.8.3 nathanw * Set the current media.
749 1.1.8.3 nathanw */
750 1.1.8.3 nathanw mii_mediachg(&sc->sc_mii);
751 1.1.8.3 nathanw
752 1.1.8.3 nathanw /*
753 1.1.8.3 nathanw * Give the transmit and receive rings to the MAL.
754 1.1.8.3 nathanw */
755 1.1.8.3 nathanw mtdcr(DCR_MAL0_TXCTP0R, EMAC_CDTXADDR(sc, 0));
756 1.1.8.3 nathanw mtdcr(DCR_MAL0_RXCTP0R, EMAC_CDRXADDR(sc, 0));
757 1.1.8.3 nathanw
758 1.1.8.3 nathanw /*
759 1.1.8.3 nathanw * Load the MAC address.
760 1.1.8.3 nathanw */
761 1.1.8.4 nathanw EMAC_WRITE(sc, EMAC_IAHR, enaddr[0] << 8 | enaddr[1]);
762 1.1.8.4 nathanw EMAC_WRITE(sc, EMAC_IALR,
763 1.1.8.3 nathanw enaddr[2] << 24 | enaddr[3] << 16 | enaddr[4] << 8 | enaddr[5]);
764 1.1.8.3 nathanw
765 1.1.8.3 nathanw /*
766 1.1.8.3 nathanw * Set the receive channel buffer size (in units of 16 bytes).
767 1.1.8.3 nathanw */
768 1.1.8.3 nathanw #if MCLBYTES > (4096 - 16) /* XXX! */
769 1.1.8.3 nathanw # error MCLBYTES > max rx channel buffer size
770 1.1.8.3 nathanw #endif
771 1.1.8.3 nathanw mtdcr(DCR_MAL0_RCBS0, MCLBYTES / 16);
772 1.1.8.3 nathanw
773 1.1.8.3 nathanw /* Set fifos, media modes. */
774 1.1.8.4 nathanw EMAC_WRITE(sc, EMAC_MR1, sc->sc_mr1);
775 1.1.8.3 nathanw
776 1.1.8.3 nathanw /*
777 1.1.8.3 nathanw * Enable Individual and (possibly) Broadcast Address modes,
778 1.1.8.3 nathanw * runt packets, and strip padding.
779 1.1.8.3 nathanw *
780 1.1.8.3 nathanw * XXX: promiscuous mode (and promiscuous multicast mode) need to be
781 1.1.8.3 nathanw * dealt with here!
782 1.1.8.3 nathanw */
783 1.1.8.4 nathanw EMAC_WRITE(sc, EMAC_RMR, RMR_IAE | RMR_RRP | RMR_SP |
784 1.1.8.3 nathanw (ifp->if_flags & IFF_BROADCAST ? RMR_BAE : 0));
785 1.1.8.3 nathanw
786 1.1.8.3 nathanw /*
787 1.1.8.3 nathanw * Set low- and urgent-priority request thresholds.
788 1.1.8.3 nathanw */
789 1.1.8.4 nathanw EMAC_WRITE(sc, EMAC_TMR1,
790 1.1.8.3 nathanw ((7 << TMR1_TLR_SHIFT) & TMR1_TLR_MASK) | /* 16 word burst */
791 1.1.8.3 nathanw ((15 << TMR1_TUR_SHIFT) & TMR1_TUR_MASK));
792 1.1.8.3 nathanw /*
793 1.1.8.3 nathanw * Set Transmit Request Threshold Register.
794 1.1.8.3 nathanw */
795 1.1.8.4 nathanw EMAC_WRITE(sc, EMAC_TRTR, TRTR_256);
796 1.1.8.3 nathanw
797 1.1.8.3 nathanw /*
798 1.1.8.3 nathanw * Set high and low receive watermarks.
799 1.1.8.3 nathanw */
800 1.1.8.4 nathanw EMAC_WRITE(sc, EMAC_RWMR,
801 1.1.8.3 nathanw 30 << RWMR_RLWM_SHIFT | 64 << RWMR_RLWM_SHIFT);
802 1.1.8.3 nathanw
803 1.1.8.3 nathanw /*
804 1.1.8.3 nathanw * Set frame gap.
805 1.1.8.3 nathanw */
806 1.1.8.4 nathanw EMAC_WRITE(sc, EMAC_IPGVR, 8);
807 1.1.8.3 nathanw
808 1.1.8.3 nathanw /*
809 1.1.8.3 nathanw * Set interrupt status enable bits for EMAC and MAL.
810 1.1.8.3 nathanw */
811 1.1.8.4 nathanw EMAC_WRITE(sc, EMAC_ISER,
812 1.1.8.3 nathanw ISR_BP | ISR_SE | ISR_ALE | ISR_BFCS | ISR_PTLE | ISR_ORE | ISR_IRE);
813 1.1.8.3 nathanw mtdcr(DCR_MAL0_IER, MAL0_IER_DE | MAL0_IER_NWE | MAL0_IER_TO |
814 1.1.8.3 nathanw MAL0_IER_OPB | MAL0_IER_PLB);
815 1.1.8.3 nathanw
816 1.1.8.3 nathanw /*
817 1.1.8.3 nathanw * Enable the transmit and receive channel on the MAL.
818 1.1.8.3 nathanw */
819 1.1.8.3 nathanw mtdcr(DCR_MAL0_RXCASR, MAL0_RXCASR_CHAN0);
820 1.1.8.3 nathanw mtdcr(DCR_MAL0_TXCASR, MAL0_TXCASR_CHAN0);
821 1.1.8.3 nathanw
822 1.1.8.3 nathanw /*
823 1.1.8.3 nathanw * Enable the transmit and receive channel on the EMAC.
824 1.1.8.3 nathanw */
825 1.1.8.4 nathanw EMAC_WRITE(sc, EMAC_MR0, MR0_TXE | MR0_RXE);
826 1.1.8.3 nathanw
827 1.1.8.3 nathanw /*
828 1.1.8.3 nathanw * Start the one second MII clock.
829 1.1.8.3 nathanw */
830 1.1.8.3 nathanw callout_reset(&sc->sc_callout, hz, emac_mii_tick, sc);
831 1.1.8.3 nathanw
832 1.1.8.3 nathanw /*
833 1.1.8.3 nathanw * ... all done!
834 1.1.8.3 nathanw */
835 1.1.8.3 nathanw ifp->if_flags |= IFF_RUNNING;
836 1.1.8.3 nathanw ifp->if_flags &= ~IFF_OACTIVE;
837 1.1.8.3 nathanw
838 1.1.8.3 nathanw out:
839 1.1.8.3 nathanw if (error) {
840 1.1.8.3 nathanw ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
841 1.1.8.3 nathanw ifp->if_timer = 0;
842 1.1.8.3 nathanw printf("%s: interface not running\n", sc->sc_dev.dv_xname);
843 1.1.8.3 nathanw }
844 1.1.8.3 nathanw return (error);
845 1.1.8.3 nathanw }
846 1.1.8.3 nathanw
847 1.1.8.3 nathanw static int
848 1.1.8.3 nathanw emac_add_rxbuf(struct emac_softc *sc, int idx)
849 1.1.8.3 nathanw {
850 1.1.8.3 nathanw struct emac_rxsoft *rxs = &sc->sc_rxsoft[idx];
851 1.1.8.3 nathanw struct mbuf *m;
852 1.1.8.3 nathanw int error;
853 1.1.8.3 nathanw
854 1.1.8.3 nathanw MGETHDR(m, M_DONTWAIT, MT_DATA);
855 1.1.8.3 nathanw if (m == NULL)
856 1.1.8.3 nathanw return (ENOBUFS);
857 1.1.8.3 nathanw
858 1.1.8.3 nathanw MCLGET(m, M_DONTWAIT);
859 1.1.8.3 nathanw if ((m->m_flags & M_EXT) == 0) {
860 1.1.8.3 nathanw m_freem(m);
861 1.1.8.3 nathanw return (ENOBUFS);
862 1.1.8.3 nathanw }
863 1.1.8.3 nathanw
864 1.1.8.3 nathanw if (rxs->rxs_mbuf != NULL)
865 1.1.8.3 nathanw bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
866 1.1.8.3 nathanw
867 1.1.8.3 nathanw rxs->rxs_mbuf = m;
868 1.1.8.3 nathanw
869 1.1.8.3 nathanw error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
870 1.1.8.3 nathanw m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
871 1.1.8.3 nathanw if (error) {
872 1.1.8.3 nathanw printf("%s: can't load rx DMA map %d, error = %d\n",
873 1.1.8.3 nathanw sc->sc_dev.dv_xname, idx, error);
874 1.1.8.3 nathanw panic("emac_add_rxbuf"); /* XXX */
875 1.1.8.3 nathanw }
876 1.1.8.3 nathanw
877 1.1.8.3 nathanw bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
878 1.1.8.3 nathanw rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
879 1.1.8.3 nathanw
880 1.1.8.3 nathanw EMAC_INIT_RXDESC(sc, idx);
881 1.1.8.3 nathanw
882 1.1.8.3 nathanw return (0);
883 1.1.8.3 nathanw }
884 1.1.8.3 nathanw
885 1.1.8.3 nathanw /* ifnet interface function */
886 1.1.8.3 nathanw static void
887 1.1.8.3 nathanw emac_watchdog(struct ifnet *ifp)
888 1.1.8.3 nathanw {
889 1.1.8.3 nathanw struct emac_softc *sc = ifp->if_softc;
890 1.1.8.3 nathanw
891 1.1.8.3 nathanw /*
892 1.1.8.3 nathanw * Since we're not interrupting every packet, sweep
893 1.1.8.3 nathanw * up before we report an error.
894 1.1.8.3 nathanw */
895 1.1.8.3 nathanw emac_txreap(sc);
896 1.1.8.3 nathanw
897 1.1.8.3 nathanw if (sc->sc_txfree != EMAC_NTXDESC) {
898 1.1.8.3 nathanw printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
899 1.1.8.3 nathanw sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
900 1.1.8.3 nathanw sc->sc_txnext);
901 1.1.8.3 nathanw ifp->if_oerrors++;
902 1.1.8.3 nathanw
903 1.1.8.3 nathanw /* Reset the interface. */
904 1.1.8.3 nathanw (void)emac_init(ifp);
905 1.1.8.3 nathanw } else if (ifp->if_flags & IFF_DEBUG)
906 1.1.8.3 nathanw printf("%s: recovered from device timeout\n",
907 1.1.8.3 nathanw sc->sc_dev.dv_xname);
908 1.1.8.3 nathanw
909 1.1.8.3 nathanw /* try to get more packets going */
910 1.1.8.3 nathanw emac_start(ifp);
911 1.1.8.3 nathanw }
912 1.1.8.3 nathanw
913 1.1.8.3 nathanw static void
914 1.1.8.3 nathanw emac_rxdrain(struct emac_softc *sc)
915 1.1.8.3 nathanw {
916 1.1.8.3 nathanw struct emac_rxsoft *rxs;
917 1.1.8.3 nathanw int i;
918 1.1.8.3 nathanw
919 1.1.8.3 nathanw for (i = 0; i < EMAC_NRXDESC; i++) {
920 1.1.8.3 nathanw rxs = &sc->sc_rxsoft[i];
921 1.1.8.3 nathanw if (rxs->rxs_mbuf != NULL) {
922 1.1.8.3 nathanw bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
923 1.1.8.3 nathanw m_freem(rxs->rxs_mbuf);
924 1.1.8.3 nathanw rxs->rxs_mbuf = NULL;
925 1.1.8.3 nathanw }
926 1.1.8.3 nathanw }
927 1.1.8.3 nathanw }
928 1.1.8.3 nathanw
929 1.1.8.3 nathanw /* ifnet interface function */
930 1.1.8.3 nathanw static void
931 1.1.8.3 nathanw emac_stop(struct ifnet *ifp, int disable)
932 1.1.8.3 nathanw {
933 1.1.8.3 nathanw struct emac_softc *sc = ifp->if_softc;
934 1.1.8.3 nathanw struct emac_txsoft *txs;
935 1.1.8.3 nathanw int i;
936 1.1.8.3 nathanw
937 1.1.8.3 nathanw /* Stop the one second clock. */
938 1.1.8.3 nathanw callout_stop(&sc->sc_callout);
939 1.1.8.3 nathanw
940 1.1.8.3 nathanw /* Down the MII */
941 1.1.8.3 nathanw mii_down(&sc->sc_mii);
942 1.1.8.3 nathanw
943 1.1.8.3 nathanw /* Disable interrupts. */
944 1.1.8.3 nathanw #if 0 /* Can't disable MAL interrupts without a reset... */
945 1.1.8.4 nathanw EMAC_WRITE(sc, EMAC_ISER, 0);
946 1.1.8.3 nathanw #endif
947 1.1.8.3 nathanw mtdcr(DCR_MAL0_IER, 0);
948 1.1.8.3 nathanw
949 1.1.8.3 nathanw /* Disable the receive and transmit channels. */
950 1.1.8.3 nathanw mtdcr(DCR_MAL0_RXCARR, MAL0_RXCARR_CHAN0);
951 1.1.8.3 nathanw mtdcr(DCR_MAL0_TXCARR, MAL0_TXCARR_CHAN0 | MAL0_TXCARR_CHAN1);
952 1.1.8.3 nathanw
953 1.1.8.3 nathanw /* Disable the transmit enable and receive MACs. */
954 1.1.8.4 nathanw EMAC_WRITE(sc, EMAC_MR0,
955 1.1.8.4 nathanw EMAC_READ(sc, EMAC_MR0) & ~(MR0_TXE | MR0_RXE));
956 1.1.8.3 nathanw
957 1.1.8.3 nathanw /* Release any queued transmit buffers. */
958 1.1.8.3 nathanw for (i = 0; i < EMAC_TXQUEUELEN; i++) {
959 1.1.8.3 nathanw txs = &sc->sc_txsoft[i];
960 1.1.8.3 nathanw if (txs->txs_mbuf != NULL) {
961 1.1.8.3 nathanw bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
962 1.1.8.3 nathanw m_freem(txs->txs_mbuf);
963 1.1.8.3 nathanw txs->txs_mbuf = NULL;
964 1.1.8.3 nathanw }
965 1.1.8.3 nathanw }
966 1.1.8.3 nathanw
967 1.1.8.3 nathanw if (disable)
968 1.1.8.3 nathanw emac_rxdrain(sc);
969 1.1.8.3 nathanw
970 1.1.8.3 nathanw /*
971 1.1.8.3 nathanw * Mark the interface down and cancel the watchdog timer.
972 1.1.8.3 nathanw */
973 1.1.8.3 nathanw ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
974 1.1.8.3 nathanw ifp->if_timer = 0;
975 1.1.8.3 nathanw }
976 1.1.8.3 nathanw
977 1.1.8.3 nathanw /* ifnet interface function */
978 1.1.8.3 nathanw static int
979 1.1.8.3 nathanw emac_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
980 1.1.8.3 nathanw {
981 1.1.8.3 nathanw struct emac_softc *sc = ifp->if_softc;
982 1.1.8.3 nathanw struct ifreq *ifr = (struct ifreq *)data;
983 1.1.8.3 nathanw int s, error;
984 1.1.8.3 nathanw
985 1.1.8.3 nathanw s = splnet();
986 1.1.8.3 nathanw
987 1.1.8.3 nathanw switch (cmd) {
988 1.1.8.3 nathanw case SIOCSIFMEDIA:
989 1.1.8.3 nathanw case SIOCGIFMEDIA:
990 1.1.8.3 nathanw error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
991 1.1.8.3 nathanw break;
992 1.1.8.3 nathanw
993 1.1.8.3 nathanw default:
994 1.1.8.3 nathanw error = ether_ioctl(ifp, cmd, data);
995 1.1.8.3 nathanw if (error == ENETRESET) {
996 1.1.8.3 nathanw /*
997 1.1.8.3 nathanw * Multicast list has changed; set the hardware filter
998 1.1.8.3 nathanw * accordingly.
999 1.1.8.3 nathanw */
1000 1.1.8.3 nathanw #if 0
1001 1.1.8.3 nathanw error = emac_set_filter(sc); /* XXX not done yet */
1002 1.1.8.3 nathanw #else
1003 1.1.8.3 nathanw error = emac_init(ifp);
1004 1.1.8.3 nathanw #endif
1005 1.1.8.3 nathanw }
1006 1.1.8.3 nathanw break;
1007 1.1.8.3 nathanw }
1008 1.1.8.3 nathanw
1009 1.1.8.3 nathanw /* try to get more packets going */
1010 1.1.8.3 nathanw emac_start(ifp);
1011 1.1.8.3 nathanw
1012 1.1.8.3 nathanw splx(s);
1013 1.1.8.3 nathanw return (error);
1014 1.1.8.2 nathanw }
1015 1.1.8.2 nathanw
1016 1.1.8.3 nathanw static void
1017 1.1.8.3 nathanw emac_reset(struct emac_softc *sc)
1018 1.1.8.3 nathanw {
1019 1.1.8.3 nathanw
1020 1.1.8.3 nathanw /* reset the MAL */
1021 1.1.8.3 nathanw mtdcr(DCR_MAL0_CFG, MAL0_CFG_SR);
1022 1.1.8.3 nathanw
1023 1.1.8.4 nathanw EMAC_WRITE(sc, EMAC_MR0, MR0_SRST);
1024 1.1.8.3 nathanw delay(5);
1025 1.1.8.3 nathanw
1026 1.1.8.3 nathanw /* XXX: check if MR0_SRST is clear until a timeout instead? */
1027 1.1.8.4 nathanw EMAC_WRITE(sc, EMAC_MR0, EMAC_READ(sc, EMAC_MR0) & ~MR0_SRST);
1028 1.1.8.3 nathanw
1029 1.1.8.4 nathanw /* XXX clear interrupts in EMAC_ISR just to be sure?? */
1030 1.1.8.3 nathanw
1031 1.1.8.3 nathanw /* set the MAL config register */
1032 1.1.8.3 nathanw mtdcr(DCR_MAL0_CFG, MAL0_CFG_PLBB | MAL0_CFG_OPBBL | MAL0_CFG_LEA |
1033 1.1.8.3 nathanw MAL0_CFG_SD | MAL0_CFG_PLBLT);
1034 1.1.8.3 nathanw }
1035 1.1.8.3 nathanw
1036 1.1.8.3 nathanw /*
1037 1.1.8.3 nathanw * EMAC General interrupt handler
1038 1.1.8.3 nathanw */
1039 1.1.8.2 nathanw static int
1040 1.1.8.2 nathanw emac_intr(void *arg)
1041 1.1.8.2 nathanw {
1042 1.1.8.3 nathanw struct emac_softc *sc = arg;
1043 1.1.8.3 nathanw uint32_t status;
1044 1.1.8.3 nathanw
1045 1.1.8.3 nathanw EMAC_EVCNT_INCR(&sc->sc_ev_intr);
1046 1.1.8.4 nathanw status = EMAC_READ(sc, EMAC_ISR);
1047 1.1.8.3 nathanw
1048 1.1.8.3 nathanw /* Clear the interrupt status bits. */
1049 1.1.8.4 nathanw EMAC_WRITE(sc, EMAC_ISR, status);
1050 1.1.8.3 nathanw
1051 1.1.8.3 nathanw return (0);
1052 1.1.8.3 nathanw }
1053 1.1.8.3 nathanw
1054 1.1.8.3 nathanw /*
1055 1.1.8.3 nathanw * EMAC Wake-On-LAN interrupt handler
1056 1.1.8.3 nathanw */
1057 1.1.8.3 nathanw static int
1058 1.1.8.3 nathanw emac_wol_intr(void *arg)
1059 1.1.8.3 nathanw {
1060 1.1.8.3 nathanw struct emac_softc *sc = arg;
1061 1.1.8.3 nathanw
1062 1.1.8.3 nathanw EMAC_EVCNT_INCR(&sc->sc_ev_wol);
1063 1.1.8.3 nathanw printf("%s: emac_wol_intr\n", sc->sc_dev.dv_xname);
1064 1.1.8.3 nathanw return (0);
1065 1.1.8.3 nathanw }
1066 1.1.8.3 nathanw
1067 1.1.8.3 nathanw /*
1068 1.1.8.3 nathanw * MAL System ERRor interrupt handler
1069 1.1.8.3 nathanw */
1070 1.1.8.3 nathanw static int
1071 1.1.8.3 nathanw emac_serr_intr(void *arg)
1072 1.1.8.3 nathanw {
1073 1.1.8.3 nathanw #ifdef EMAC_EVENT_COUNTERS
1074 1.1.8.3 nathanw struct emac_softc *sc = arg;
1075 1.1.8.3 nathanw #endif
1076 1.1.8.3 nathanw u_int32_t esr;
1077 1.1.8.3 nathanw
1078 1.1.8.3 nathanw EMAC_EVCNT_INCR(&sc->sc_ev_serr);
1079 1.1.8.3 nathanw esr = mfdcr(DCR_MAL0_ESR);
1080 1.1.8.3 nathanw
1081 1.1.8.3 nathanw /* Clear the interrupt status bits. */
1082 1.1.8.3 nathanw mtdcr(DCR_MAL0_ESR, esr);
1083 1.1.8.3 nathanw return (0);
1084 1.1.8.3 nathanw }
1085 1.1.8.3 nathanw
1086 1.1.8.3 nathanw /*
1087 1.1.8.3 nathanw * MAL Transmit End-Of-Buffer interrupt handler.
1088 1.1.8.3 nathanw * NOTE: This shouldn't be called!
1089 1.1.8.3 nathanw */
1090 1.1.8.3 nathanw static int
1091 1.1.8.3 nathanw emac_txeob_intr(void *arg)
1092 1.1.8.3 nathanw {
1093 1.1.8.3 nathanw #ifdef EMAC_EVENT_COUNTERS
1094 1.1.8.3 nathanw struct emac_softc *sc = arg;
1095 1.1.8.3 nathanw #endif
1096 1.1.8.3 nathanw
1097 1.1.8.3 nathanw EMAC_EVCNT_INCR(&sc->sc_ev_txintr);
1098 1.1.8.3 nathanw emac_txreap(arg);
1099 1.1.8.3 nathanw
1100 1.1.8.3 nathanw return (0);
1101 1.1.8.3 nathanw
1102 1.1.8.3 nathanw }
1103 1.1.8.3 nathanw
1104 1.1.8.3 nathanw /*
1105 1.1.8.3 nathanw * Reap completed Tx descriptors.
1106 1.1.8.3 nathanw */
1107 1.1.8.3 nathanw static int
1108 1.1.8.3 nathanw emac_txreap(struct emac_softc *sc)
1109 1.1.8.3 nathanw {
1110 1.1.8.3 nathanw struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1111 1.1.8.3 nathanw struct emac_txsoft *txs;
1112 1.1.8.3 nathanw int i;
1113 1.1.8.3 nathanw u_int32_t txstat;
1114 1.1.8.3 nathanw
1115 1.1.8.3 nathanw EMAC_EVCNT_INCR(&sc->sc_ev_txreap);
1116 1.1.8.3 nathanw
1117 1.1.8.3 nathanw /* Clear the interrupt */
1118 1.1.8.3 nathanw mtdcr(DCR_MAL0_TXEOBISR, mfdcr(DCR_MAL0_TXEOBISR));
1119 1.1.8.3 nathanw
1120 1.1.8.3 nathanw ifp->if_flags &= ~IFF_OACTIVE;
1121 1.1.8.3 nathanw
1122 1.1.8.3 nathanw /*
1123 1.1.8.3 nathanw * Go through our Tx list and free mbufs for those
1124 1.1.8.3 nathanw * frames that have been transmitted.
1125 1.1.8.3 nathanw */
1126 1.1.8.3 nathanw for (i = sc->sc_txsdirty; sc->sc_txsfree != EMAC_TXQUEUELEN;
1127 1.1.8.3 nathanw i = EMAC_NEXTTXS(i), sc->sc_txsfree++) {
1128 1.1.8.3 nathanw txs = &sc->sc_txsoft[i];
1129 1.1.8.3 nathanw
1130 1.1.8.3 nathanw EMAC_CDTXSYNC(sc, txs->txs_lastdesc,
1131 1.1.8.3 nathanw txs->txs_dmamap->dm_nsegs,
1132 1.1.8.3 nathanw BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1133 1.1.8.3 nathanw
1134 1.1.8.3 nathanw txstat = sc->sc_txdescs[txs->txs_lastdesc].md_stat_ctrl;
1135 1.1.8.3 nathanw if (txstat & MAL_TX_READY)
1136 1.1.8.3 nathanw break;
1137 1.1.8.3 nathanw
1138 1.1.8.3 nathanw /*
1139 1.1.8.3 nathanw * Check for errors and collisions.
1140 1.1.8.3 nathanw */
1141 1.1.8.3 nathanw if (txstat & (EMAC_TXS_UR | EMAC_TXS_ED))
1142 1.1.8.3 nathanw ifp->if_oerrors++;
1143 1.1.8.3 nathanw
1144 1.1.8.3 nathanw #ifdef EMAC_EVENT_COUNTERS
1145 1.1.8.3 nathanw if (txstat & EMAC_TXS_UR)
1146 1.1.8.3 nathanw EMAC_EVCNT_INCR(&sc->sc_ev_tu);
1147 1.1.8.3 nathanw #endif /* EMAC_EVENT_COUNTERS */
1148 1.1.8.3 nathanw
1149 1.1.8.3 nathanw if (txstat & (EMAC_TXS_EC | EMAC_TXS_MC | EMAC_TXS_SC | EMAC_TXS_LC)) {
1150 1.1.8.3 nathanw if (txstat & EMAC_TXS_EC)
1151 1.1.8.3 nathanw ifp->if_collisions += 16;
1152 1.1.8.3 nathanw else if (txstat & EMAC_TXS_MC)
1153 1.1.8.3 nathanw ifp->if_collisions += 2; /* XXX? */
1154 1.1.8.3 nathanw else if (txstat & EMAC_TXS_SC)
1155 1.1.8.3 nathanw ifp->if_collisions++;
1156 1.1.8.3 nathanw if (txstat & EMAC_TXS_LC)
1157 1.1.8.3 nathanw ifp->if_collisions++;
1158 1.1.8.3 nathanw } else
1159 1.1.8.3 nathanw ifp->if_opackets++;
1160 1.1.8.3 nathanw
1161 1.1.8.3 nathanw if (ifp->if_flags & IFF_DEBUG) {
1162 1.1.8.3 nathanw if (txstat & EMAC_TXS_ED)
1163 1.1.8.3 nathanw printf("%s: excessive deferral\n",
1164 1.1.8.3 nathanw sc->sc_dev.dv_xname);
1165 1.1.8.3 nathanw if (txstat & EMAC_TXS_EC)
1166 1.1.8.3 nathanw printf("%s: excessive collisions\n",
1167 1.1.8.3 nathanw sc->sc_dev.dv_xname);
1168 1.1.8.3 nathanw }
1169 1.1.8.3 nathanw
1170 1.1.8.3 nathanw sc->sc_txfree += txs->txs_ndesc;
1171 1.1.8.3 nathanw bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1172 1.1.8.3 nathanw 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1173 1.1.8.3 nathanw bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1174 1.1.8.3 nathanw m_freem(txs->txs_mbuf);
1175 1.1.8.3 nathanw txs->txs_mbuf = NULL;
1176 1.1.8.3 nathanw }
1177 1.1.8.3 nathanw
1178 1.1.8.3 nathanw /* Update the dirty transmit buffer pointer. */
1179 1.1.8.3 nathanw sc->sc_txsdirty = i;
1180 1.1.8.3 nathanw
1181 1.1.8.3 nathanw /*
1182 1.1.8.3 nathanw * If there are no more pending transmissions, cancel the watchdog
1183 1.1.8.3 nathanw * timer.
1184 1.1.8.3 nathanw */
1185 1.1.8.3 nathanw if (sc->sc_txsfree == EMAC_TXQUEUELEN)
1186 1.1.8.3 nathanw ifp->if_timer = 0;
1187 1.1.8.3 nathanw
1188 1.1.8.3 nathanw return (0);
1189 1.1.8.3 nathanw }
1190 1.1.8.3 nathanw
1191 1.1.8.3 nathanw /*
1192 1.1.8.3 nathanw * MAL Receive End-Of-Buffer interrupt handler
1193 1.1.8.3 nathanw */
1194 1.1.8.3 nathanw static int
1195 1.1.8.3 nathanw emac_rxeob_intr(void *arg)
1196 1.1.8.3 nathanw {
1197 1.1.8.3 nathanw struct emac_softc *sc = arg;
1198 1.1.8.3 nathanw struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1199 1.1.8.3 nathanw struct emac_rxsoft *rxs;
1200 1.1.8.3 nathanw struct mbuf *m;
1201 1.1.8.3 nathanw u_int32_t rxstat;
1202 1.1.8.3 nathanw int i, len;
1203 1.1.8.3 nathanw
1204 1.1.8.3 nathanw EMAC_EVCNT_INCR(&sc->sc_ev_rxintr);
1205 1.1.8.3 nathanw
1206 1.1.8.3 nathanw /* Clear the interrupt */
1207 1.1.8.3 nathanw mtdcr(DCR_MAL0_RXEOBISR, mfdcr(DCR_MAL0_RXEOBISR));
1208 1.1.8.3 nathanw
1209 1.1.8.3 nathanw for (i = sc->sc_rxptr;; i = EMAC_NEXTRX(i)) {
1210 1.1.8.3 nathanw rxs = &sc->sc_rxsoft[i];
1211 1.1.8.3 nathanw
1212 1.1.8.3 nathanw EMAC_CDRXSYNC(sc, i,
1213 1.1.8.3 nathanw BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1214 1.1.8.3 nathanw
1215 1.1.8.3 nathanw rxstat = sc->sc_rxdescs[i].md_stat_ctrl;
1216 1.1.8.3 nathanw
1217 1.1.8.3 nathanw if (rxstat & MAL_RX_EMPTY)
1218 1.1.8.3 nathanw /*
1219 1.1.8.3 nathanw * We have processed all of the receive buffers.
1220 1.1.8.3 nathanw */
1221 1.1.8.3 nathanw break;
1222 1.1.8.3 nathanw
1223 1.1.8.3 nathanw /*
1224 1.1.8.3 nathanw * If an error occurred, update stats, clear the status
1225 1.1.8.3 nathanw * word, and leave the packet buffer in place. It will
1226 1.1.8.3 nathanw * simply be reused the next time the ring comes around.
1227 1.1.8.3 nathanw */
1228 1.1.8.3 nathanw if (rxstat & (EMAC_RXS_OE | EMAC_RXS_BP | EMAC_RXS_SE |
1229 1.1.8.3 nathanw EMAC_RXS_AE | EMAC_RXS_BFCS | EMAC_RXS_PTL | EMAC_RXS_ORE |
1230 1.1.8.3 nathanw EMAC_RXS_IRE)) {
1231 1.1.8.3 nathanw #define PRINTERR(bit, str) \
1232 1.1.8.3 nathanw if (rxstat & (bit)) \
1233 1.1.8.3 nathanw printf("%s: receive error: %s\n", \
1234 1.1.8.3 nathanw sc->sc_dev.dv_xname, str)
1235 1.1.8.3 nathanw ifp->if_ierrors++;
1236 1.1.8.3 nathanw PRINTERR(EMAC_RXS_OE, "overrun error");
1237 1.1.8.3 nathanw PRINTERR(EMAC_RXS_BP, "bad packet");
1238 1.1.8.3 nathanw PRINTERR(EMAC_RXS_RP, "runt packet");
1239 1.1.8.3 nathanw PRINTERR(EMAC_RXS_SE, "short event");
1240 1.1.8.3 nathanw PRINTERR(EMAC_RXS_AE, "alignment error");
1241 1.1.8.3 nathanw PRINTERR(EMAC_RXS_BFCS, "bad FCS");
1242 1.1.8.3 nathanw PRINTERR(EMAC_RXS_PTL, "packet too long");
1243 1.1.8.3 nathanw PRINTERR(EMAC_RXS_ORE, "out of range error");
1244 1.1.8.3 nathanw PRINTERR(EMAC_RXS_IRE, "in range error");
1245 1.1.8.3 nathanw #undef PRINTERR
1246 1.1.8.3 nathanw EMAC_INIT_RXDESC(sc, i);
1247 1.1.8.3 nathanw continue;
1248 1.1.8.3 nathanw }
1249 1.1.8.3 nathanw
1250 1.1.8.3 nathanw bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1251 1.1.8.3 nathanw rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1252 1.1.8.3 nathanw
1253 1.1.8.3 nathanw /*
1254 1.1.8.3 nathanw * No errors; receive the packet. Note, the 405GP emac
1255 1.1.8.3 nathanw * includes the CRC with every packet.
1256 1.1.8.3 nathanw */
1257 1.1.8.3 nathanw len = sc->sc_rxdescs[i].md_data_len;
1258 1.1.8.3 nathanw
1259 1.1.8.3 nathanw /*
1260 1.1.8.3 nathanw * If the packet is small enough to fit in a
1261 1.1.8.3 nathanw * single header mbuf, allocate one and copy
1262 1.1.8.3 nathanw * the data into it. This greatly reduces
1263 1.1.8.3 nathanw * memory consumption when we receive lots
1264 1.1.8.3 nathanw * of small packets.
1265 1.1.8.3 nathanw *
1266 1.1.8.3 nathanw * Otherwise, we add a new buffer to the receive
1267 1.1.8.3 nathanw * chain. If this fails, we drop the packet and
1268 1.1.8.3 nathanw * recycle the old buffer.
1269 1.1.8.3 nathanw */
1270 1.1.8.3 nathanw if (emac_copy_small != 0 && len <= MHLEN) {
1271 1.1.8.3 nathanw MGETHDR(m, M_DONTWAIT, MT_DATA);
1272 1.1.8.3 nathanw if (m == NULL)
1273 1.1.8.3 nathanw goto dropit;
1274 1.1.8.3 nathanw memcpy(mtod(m, caddr_t),
1275 1.1.8.3 nathanw mtod(rxs->rxs_mbuf, caddr_t), len);
1276 1.1.8.3 nathanw EMAC_INIT_RXDESC(sc, i);
1277 1.1.8.3 nathanw bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1278 1.1.8.3 nathanw rxs->rxs_dmamap->dm_mapsize,
1279 1.1.8.3 nathanw BUS_DMASYNC_PREREAD);
1280 1.1.8.3 nathanw } else {
1281 1.1.8.3 nathanw m = rxs->rxs_mbuf;
1282 1.1.8.3 nathanw if (emac_add_rxbuf(sc, i) != 0) {
1283 1.1.8.3 nathanw dropit:
1284 1.1.8.3 nathanw ifp->if_ierrors++;
1285 1.1.8.3 nathanw EMAC_INIT_RXDESC(sc, i);
1286 1.1.8.3 nathanw bus_dmamap_sync(sc->sc_dmat,
1287 1.1.8.3 nathanw rxs->rxs_dmamap, 0,
1288 1.1.8.3 nathanw rxs->rxs_dmamap->dm_mapsize,
1289 1.1.8.3 nathanw BUS_DMASYNC_PREREAD);
1290 1.1.8.3 nathanw continue;
1291 1.1.8.3 nathanw }
1292 1.1.8.3 nathanw }
1293 1.1.8.3 nathanw
1294 1.1.8.3 nathanw ifp->if_ipackets++;
1295 1.1.8.3 nathanw m->m_flags |= M_HASFCS;
1296 1.1.8.3 nathanw m->m_pkthdr.rcvif = ifp;
1297 1.1.8.3 nathanw m->m_pkthdr.len = m->m_len = len;
1298 1.1.8.3 nathanw
1299 1.1.8.3 nathanw #if NBPFILTER > 0
1300 1.1.8.3 nathanw /*
1301 1.1.8.3 nathanw * Pass this up to any BPF listeners, but only
1302 1.1.8.3 nathanw * pass if up the stack if it's for us.
1303 1.1.8.3 nathanw */
1304 1.1.8.3 nathanw if (ifp->if_bpf)
1305 1.1.8.3 nathanw bpf_mtap(ifp->if_bpf, m);
1306 1.1.8.3 nathanw #endif /* NBPFILTER > 0 */
1307 1.1.8.3 nathanw
1308 1.1.8.3 nathanw /* Pass it on. */
1309 1.1.8.3 nathanw (*ifp->if_input)(ifp, m);
1310 1.1.8.3 nathanw }
1311 1.1.8.3 nathanw
1312 1.1.8.3 nathanw /* Update the receive pointer. */
1313 1.1.8.3 nathanw sc->sc_rxptr = i;
1314 1.1.8.3 nathanw
1315 1.1.8.3 nathanw return (0);
1316 1.1.8.3 nathanw }
1317 1.1.8.3 nathanw
1318 1.1.8.3 nathanw /*
1319 1.1.8.3 nathanw * MAL Transmit Descriptor Error interrupt handler
1320 1.1.8.3 nathanw */
1321 1.1.8.3 nathanw static int
1322 1.1.8.3 nathanw emac_txde_intr(void *arg)
1323 1.1.8.3 nathanw {
1324 1.1.8.3 nathanw struct emac_softc *sc = arg;
1325 1.1.8.3 nathanw
1326 1.1.8.3 nathanw EMAC_EVCNT_INCR(&sc->sc_ev_txde);
1327 1.1.8.3 nathanw printf("%s: emac_txde_intr\n", sc->sc_dev.dv_xname);
1328 1.1.8.3 nathanw return (0);
1329 1.1.8.3 nathanw }
1330 1.1.8.3 nathanw
1331 1.1.8.3 nathanw /*
1332 1.1.8.3 nathanw * MAL Receive Descriptor Error interrupt handler
1333 1.1.8.3 nathanw */
1334 1.1.8.3 nathanw static int
1335 1.1.8.3 nathanw emac_rxde_intr(void *arg)
1336 1.1.8.3 nathanw {
1337 1.1.8.3 nathanw int i;
1338 1.1.8.3 nathanw struct emac_softc *sc = arg;
1339 1.1.8.3 nathanw
1340 1.1.8.3 nathanw EMAC_EVCNT_INCR(&sc->sc_ev_rxde);
1341 1.1.8.3 nathanw printf("%s: emac_rxde_intr\n", sc->sc_dev.dv_xname);
1342 1.1.8.3 nathanw /*
1343 1.1.8.3 nathanw * XXX!
1344 1.1.8.3 nathanw * This is a bit drastic; we just drop all descriptors that aren't
1345 1.1.8.3 nathanw * "clean". We should probably send any that are up the stack.
1346 1.1.8.3 nathanw */
1347 1.1.8.3 nathanw for (i = 0; i < EMAC_NRXDESC; i++) {
1348 1.1.8.3 nathanw EMAC_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1349 1.1.8.3 nathanw
1350 1.1.8.3 nathanw if (sc->sc_rxdescs[i].md_data_len != MCLBYTES) {
1351 1.1.8.3 nathanw EMAC_INIT_RXDESC(sc, i);
1352 1.1.8.3 nathanw }
1353 1.1.8.3 nathanw
1354 1.1.8.3 nathanw }
1355 1.1.8.3 nathanw
1356 1.1.8.3 nathanw /* Reenable the receive channel */
1357 1.1.8.3 nathanw mtdcr(DCR_MAL0_RXCASR, MAL0_RXCASR_CHAN0);
1358 1.1.8.3 nathanw
1359 1.1.8.3 nathanw /* Clear the interrupt */
1360 1.1.8.3 nathanw mtdcr(DCR_MAL0_RXDEIR, mfdcr(DCR_MAL0_RXDEIR));
1361 1.1.8.3 nathanw
1362 1.1.8.3 nathanw return (0);
1363 1.1.8.3 nathanw }
1364 1.1.8.3 nathanw
1365 1.1.8.3 nathanw static uint32_t
1366 1.1.8.3 nathanw emac_mii_wait(struct emac_softc *sc)
1367 1.1.8.3 nathanw {
1368 1.1.8.3 nathanw int i;
1369 1.1.8.3 nathanw uint32_t reg;
1370 1.1.8.3 nathanw
1371 1.1.8.3 nathanw /* wait for PHY data transfer to complete */
1372 1.1.8.3 nathanw i = 0;
1373 1.1.8.4 nathanw while ((reg = EMAC_READ(sc, EMAC_STACR) & STACR_OC) == 0) {
1374 1.1.8.3 nathanw delay(7);
1375 1.1.8.3 nathanw if (i++ > 5) {
1376 1.1.8.3 nathanw printf("%s: MII timed out\n", sc->sc_dev.dv_xname);
1377 1.1.8.3 nathanw return (0);
1378 1.1.8.3 nathanw }
1379 1.1.8.3 nathanw }
1380 1.1.8.3 nathanw return (reg);
1381 1.1.8.3 nathanw }
1382 1.1.8.3 nathanw
1383 1.1.8.3 nathanw static int
1384 1.1.8.3 nathanw emac_mii_readreg(struct device *self, int phy, int reg)
1385 1.1.8.3 nathanw {
1386 1.1.8.3 nathanw struct emac_softc *sc = (struct emac_softc *)self;
1387 1.1.8.3 nathanw uint32_t sta_reg;
1388 1.1.8.3 nathanw
1389 1.1.8.3 nathanw /* wait for PHY data transfer to complete */
1390 1.1.8.3 nathanw if (emac_mii_wait(sc) == 0)
1391 1.1.8.3 nathanw return (0);
1392 1.1.8.3 nathanw
1393 1.1.8.3 nathanw sta_reg = reg << STACR_PRASHIFT;
1394 1.1.8.3 nathanw sta_reg |= STACR_READ;
1395 1.1.8.3 nathanw sta_reg |= phy << STACR_PCDASHIFT;
1396 1.1.8.3 nathanw
1397 1.1.8.3 nathanw sta_reg &= ~STACR_OPBC_MASK;
1398 1.1.8.3 nathanw sta_reg |= STACR_OPBC_50MHZ;
1399 1.1.8.3 nathanw
1400 1.1.8.3 nathanw
1401 1.1.8.4 nathanw EMAC_WRITE(sc, EMAC_STACR, sta_reg);
1402 1.1.8.3 nathanw
1403 1.1.8.3 nathanw if ((sta_reg = emac_mii_wait(sc)) == 0)
1404 1.1.8.3 nathanw return (0);
1405 1.1.8.4 nathanw sta_reg = EMAC_READ(sc, EMAC_STACR);
1406 1.1.8.3 nathanw if ((sta_reg & STACR_PHYE) != 0)
1407 1.1.8.3 nathanw return (0);
1408 1.1.8.3 nathanw return (sta_reg >> STACR_PHYDSHIFT);
1409 1.1.8.3 nathanw }
1410 1.1.8.3 nathanw
1411 1.1.8.3 nathanw static void
1412 1.1.8.3 nathanw emac_mii_writereg(struct device *self, int phy, int reg, int val)
1413 1.1.8.3 nathanw {
1414 1.1.8.3 nathanw struct emac_softc *sc = (struct emac_softc *)self;
1415 1.1.8.3 nathanw uint32_t sta_reg;
1416 1.1.8.3 nathanw
1417 1.1.8.3 nathanw /* wait for PHY data transfer to complete */
1418 1.1.8.3 nathanw if (emac_mii_wait(sc) == 0)
1419 1.1.8.3 nathanw return;
1420 1.1.8.3 nathanw
1421 1.1.8.3 nathanw sta_reg = reg << STACR_PRASHIFT;
1422 1.1.8.3 nathanw sta_reg |= STACR_WRITE;
1423 1.1.8.3 nathanw sta_reg |= phy << STACR_PCDASHIFT;
1424 1.1.8.3 nathanw
1425 1.1.8.3 nathanw sta_reg &= ~STACR_OPBC_MASK;
1426 1.1.8.3 nathanw sta_reg |= STACR_OPBC_50MHZ;
1427 1.1.8.3 nathanw
1428 1.1.8.3 nathanw sta_reg |= val << STACR_PHYDSHIFT;
1429 1.1.8.3 nathanw
1430 1.1.8.4 nathanw EMAC_WRITE(sc, EMAC_STACR, sta_reg);
1431 1.1.8.3 nathanw
1432 1.1.8.3 nathanw if ((sta_reg = emac_mii_wait(sc)) == 0)
1433 1.1.8.3 nathanw return;
1434 1.1.8.3 nathanw if ((sta_reg & STACR_PHYE) != 0)
1435 1.1.8.3 nathanw /* error */
1436 1.1.8.3 nathanw return;
1437 1.1.8.3 nathanw }
1438 1.1.8.3 nathanw
1439 1.1.8.3 nathanw static void
1440 1.1.8.3 nathanw emac_mii_statchg(struct device *self)
1441 1.1.8.3 nathanw {
1442 1.1.8.3 nathanw struct emac_softc *sc = (void *)self;
1443 1.1.8.3 nathanw
1444 1.1.8.3 nathanw if (sc->sc_mii.mii_media_active & IFM_FDX)
1445 1.1.8.3 nathanw sc->sc_mr1 |= MR1_FDE;
1446 1.1.8.3 nathanw else
1447 1.1.8.3 nathanw sc->sc_mr1 &= ~(MR1_FDE | MR1_EIFC);
1448 1.1.8.3 nathanw
1449 1.1.8.3 nathanw /* XXX 802.1x flow-control? */
1450 1.1.8.3 nathanw
1451 1.1.8.3 nathanw /*
1452 1.1.8.3 nathanw * MR1 can only be written immediately after a reset...
1453 1.1.8.3 nathanw */
1454 1.1.8.3 nathanw emac_reset(sc);
1455 1.1.8.3 nathanw }
1456 1.1.8.3 nathanw
1457 1.1.8.3 nathanw static void
1458 1.1.8.3 nathanw emac_mii_tick(void *arg)
1459 1.1.8.3 nathanw {
1460 1.1.8.3 nathanw struct emac_softc *sc = arg;
1461 1.1.8.3 nathanw int s;
1462 1.1.8.3 nathanw
1463 1.1.8.3 nathanw if ((sc->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1464 1.1.8.3 nathanw return;
1465 1.1.8.3 nathanw
1466 1.1.8.3 nathanw s = splnet();
1467 1.1.8.3 nathanw mii_tick(&sc->sc_mii);
1468 1.1.8.3 nathanw splx(s);
1469 1.1.8.3 nathanw
1470 1.1.8.3 nathanw callout_reset(&sc->sc_callout, hz, emac_mii_tick, sc);
1471 1.1.8.3 nathanw }
1472 1.1.8.3 nathanw
1473 1.1.8.3 nathanw /* ifmedia interface function */
1474 1.1.8.3 nathanw static void
1475 1.1.8.3 nathanw emac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1476 1.1.8.3 nathanw {
1477 1.1.8.3 nathanw struct emac_softc *sc = ifp->if_softc;
1478 1.1.8.3 nathanw
1479 1.1.8.3 nathanw mii_pollstat(&sc->sc_mii);
1480 1.1.8.3 nathanw
1481 1.1.8.3 nathanw ifmr->ifm_status = sc->sc_mii.mii_media_status;
1482 1.1.8.3 nathanw ifmr->ifm_active = sc->sc_mii.mii_media_active;
1483 1.1.8.3 nathanw }
1484 1.1.8.3 nathanw
1485 1.1.8.3 nathanw /* ifmedia interface function */
1486 1.1.8.3 nathanw static int
1487 1.1.8.3 nathanw emac_mediachange(struct ifnet *ifp)
1488 1.1.8.3 nathanw {
1489 1.1.8.3 nathanw struct emac_softc *sc = ifp->if_softc;
1490 1.1.8.2 nathanw
1491 1.1.8.3 nathanw if (ifp->if_flags & IFF_UP)
1492 1.1.8.3 nathanw mii_mediachg(&sc->sc_mii);
1493 1.1.8.3 nathanw return (0);
1494 1.1.8.2 nathanw }
1495