at91emac.c revision 1.4 1 1.4 dsl /* $Id: at91emac.c,v 1.4 2009/03/14 21:04:05 dsl Exp $ */
2 1.4 dsl /* $NetBSD: at91emac.c,v 1.4 2009/03/14 21:04:05 dsl Exp $ */
3 1.2 matt
4 1.2 matt /*
5 1.2 matt * Copyright (c) 2007 Embedtronics Oy
6 1.2 matt * All rights reserved.
7 1.2 matt *
8 1.2 matt * Based on arch/arm/ep93xx/epe.c
9 1.2 matt *
10 1.2 matt * Copyright (c) 2004 Jesse Off
11 1.2 matt * All rights reserved.
12 1.2 matt *
13 1.2 matt * Redistribution and use in source and binary forms, with or without
14 1.2 matt * modification, are permitted provided that the following conditions
15 1.2 matt * are met:
16 1.2 matt * 1. Redistributions of source code must retain the above copyright
17 1.2 matt * notice, this list of conditions and the following disclaimer.
18 1.2 matt * 2. Redistributions in binary form must reproduce the above copyright
19 1.2 matt * notice, this list of conditions and the following disclaimer in the
20 1.2 matt * documentation and/or other materials provided with the distribution.
21 1.2 matt * 3. All advertising materials mentioning features or use of this software
22 1.2 matt * must display the following acknowledgement:
23 1.2 matt * This product includes software developed by the NetBSD
24 1.2 matt * Foundation, Inc. and its contributors.
25 1.2 matt * 4. Neither the name of The NetBSD Foundation nor the names of its
26 1.2 matt * contributors may be used to endorse or promote products derived
27 1.2 matt * from this software without specific prior written permission.
28 1.2 matt *
29 1.2 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
30 1.2 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
31 1.2 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
32 1.2 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
33 1.2 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 1.2 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 1.2 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
36 1.2 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
37 1.2 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 1.2 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 1.2 matt * POSSIBILITY OF SUCH DAMAGE.
40 1.2 matt */
41 1.2 matt
42 1.2 matt #include <sys/cdefs.h>
43 1.4 dsl __KERNEL_RCSID(0, "$NetBSD: at91emac.c,v 1.4 2009/03/14 21:04:05 dsl Exp $");
44 1.2 matt
45 1.2 matt #include <sys/types.h>
46 1.2 matt #include <sys/param.h>
47 1.2 matt #include <sys/systm.h>
48 1.2 matt #include <sys/ioctl.h>
49 1.2 matt #include <sys/kernel.h>
50 1.2 matt #include <sys/proc.h>
51 1.2 matt #include <sys/malloc.h>
52 1.2 matt #include <sys/time.h>
53 1.2 matt #include <sys/device.h>
54 1.2 matt #include <uvm/uvm_extern.h>
55 1.2 matt
56 1.2 matt #include <machine/bus.h>
57 1.2 matt #include <machine/intr.h>
58 1.2 matt
59 1.2 matt #include <arm/cpufunc.h>
60 1.2 matt
61 1.2 matt #include <net/if.h>
62 1.2 matt #include <net/if_dl.h>
63 1.2 matt #include <net/if_types.h>
64 1.2 matt #include <net/if_media.h>
65 1.2 matt #include <net/if_ether.h>
66 1.2 matt
67 1.2 matt #include <dev/mii/mii.h>
68 1.2 matt #include <dev/mii/miivar.h>
69 1.2 matt
70 1.2 matt #ifdef INET
71 1.2 matt #include <netinet/in.h>
72 1.2 matt #include <netinet/in_systm.h>
73 1.2 matt #include <netinet/in_var.h>
74 1.2 matt #include <netinet/ip.h>
75 1.2 matt #include <netinet/if_inarp.h>
76 1.2 matt #endif
77 1.2 matt
78 1.2 matt #ifdef NS
79 1.2 matt #include <netns/ns.h>
80 1.2 matt #include <netns/ns_if.h>
81 1.2 matt #endif
82 1.2 matt
83 1.2 matt #include "bpfilter.h"
84 1.2 matt #if NBPFILTER > 0
85 1.2 matt #include <net/bpf.h>
86 1.2 matt #include <net/bpfdesc.h>
87 1.2 matt #endif
88 1.2 matt
89 1.2 matt #ifdef IPKDB_AT91 // @@@
90 1.2 matt #include <ipkdb/ipkdb.h>
91 1.2 matt #endif
92 1.2 matt
93 1.2 matt #include <arm/at91/at91var.h>
94 1.2 matt #include <arm/at91/at91emacreg.h>
95 1.2 matt #include <arm/at91/at91emacvar.h>
96 1.2 matt
97 1.2 matt #define DEFAULT_MDCDIV 32
98 1.2 matt
99 1.2 matt #ifndef EMAC_FAST
100 1.2 matt #define EMAC_FAST
101 1.2 matt #endif
102 1.2 matt
103 1.2 matt #ifndef EMAC_FAST
104 1.2 matt #define EMAC_READ(x) \
105 1.2 matt bus_space_read_4(sc->sc_iot, sc->sc_ioh, (EPE_ ## x))
106 1.2 matt #define EMAC_WRITE(x, y) \
107 1.2 matt bus_space_write_4(sc->sc_iot, sc->sc_ioh, (EPE_ ## x), (y))
108 1.2 matt #else
109 1.2 matt #define EMAC_READ(x) ETHREG(x)
110 1.2 matt #define EMAC_WRITE(x, y) ETHREG(x) = (y)
111 1.2 matt #endif /* ! EMAC_FAST */
112 1.2 matt
113 1.2 matt static int emac_match(device_t, cfdata_t, void *);
114 1.2 matt static void emac_attach(device_t, device_t, void *);
115 1.2 matt static void emac_init(struct emac_softc *);
116 1.2 matt static int emac_intr(void* arg);
117 1.2 matt static int emac_gctx(struct emac_softc *);
118 1.2 matt static int emac_mediachange(struct ifnet *);
119 1.2 matt static void emac_mediastatus(struct ifnet *, struct ifmediareq *);
120 1.2 matt int emac_mii_readreg (device_t, int, int);
121 1.2 matt void emac_mii_writereg (device_t, int, int, int);
122 1.2 matt void emac_statchg (device_t );
123 1.2 matt void emac_tick (void *);
124 1.2 matt static int emac_ifioctl (struct ifnet *, u_long, void *);
125 1.2 matt static void emac_ifstart (struct ifnet *);
126 1.2 matt static void emac_ifwatchdog (struct ifnet *);
127 1.2 matt static int emac_ifinit (struct ifnet *);
128 1.2 matt static void emac_ifstop (struct ifnet *, int);
129 1.2 matt static void emac_setaddr (struct ifnet *);
130 1.2 matt
131 1.2 matt CFATTACH_DECL(at91emac, sizeof(struct emac_softc),
132 1.2 matt emac_match, emac_attach, NULL, NULL);
133 1.2 matt
134 1.2 matt #ifdef EMAC_DEBUG
135 1.2 matt int emac_debug = EMAC_DEBUG;
136 1.2 matt #define DPRINTFN(n,fmt) if (emac_debug >= (n)) printf fmt
137 1.2 matt #else
138 1.2 matt #define DPRINTFN(n,fmt)
139 1.2 matt #endif
140 1.2 matt
141 1.2 matt static int
142 1.2 matt emac_match(device_t parent, cfdata_t match, void *aux)
143 1.2 matt {
144 1.2 matt if (strcmp(match->cf_name, "at91emac") == 0)
145 1.2 matt return 2;
146 1.2 matt return 0;
147 1.2 matt }
148 1.2 matt
149 1.2 matt static void
150 1.2 matt emac_attach(device_t parent, device_t self, void *aux)
151 1.2 matt {
152 1.2 matt struct emac_softc *sc = device_private(self);
153 1.2 matt struct at91bus_attach_args *sa = aux;
154 1.2 matt prop_data_t enaddr;
155 1.2 matt uint32_t u;
156 1.2 matt
157 1.2 matt printf("\n");
158 1.2 matt sc->sc_dev = self;
159 1.2 matt sc->sc_iot = sa->sa_iot;
160 1.2 matt sc->sc_pid = sa->sa_pid;
161 1.2 matt sc->sc_dmat = sa->sa_dmat;
162 1.2 matt
163 1.2 matt if (bus_space_map(sa->sa_iot, sa->sa_addr, sa->sa_size, 0, &sc->sc_ioh))
164 1.2 matt panic("%s: Cannot map registers", device_xname(self));
165 1.2 matt
166 1.2 matt /* enable peripheral clock */
167 1.2 matt at91_peripheral_clock(sc->sc_pid, 1);
168 1.2 matt
169 1.2 matt /* configure emac: */
170 1.2 matt EMAC_WRITE(ETH_CTL, 0); // disable everything
171 1.2 matt EMAC_WRITE(ETH_IDR, -1); // disable interrupts
172 1.2 matt EMAC_WRITE(ETH_RBQP, 0); // clear receive
173 1.2 matt EMAC_WRITE(ETH_CFG, ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
174 1.2 matt EMAC_WRITE(ETH_TCR, 0); // send nothing
175 1.2 matt //(void)EMAC_READ(ETH_ISR);
176 1.2 matt u = EMAC_READ(ETH_TSR);
177 1.2 matt EMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ
178 1.2 matt | ETH_TSR_IDLE | ETH_TSR_RLE
179 1.2 matt | ETH_TSR_COL|ETH_TSR_OVR)));
180 1.2 matt u = EMAC_READ(ETH_RSR);
181 1.2 matt EMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR|ETH_RSR_REC|ETH_RSR_BNA)));
182 1.2 matt
183 1.2 matt /* Fetch the Ethernet address from property if set. */
184 1.2 matt enaddr = prop_dictionary_get(device_properties(self), "mac-addr");
185 1.2 matt
186 1.2 matt if (enaddr != NULL) {
187 1.2 matt KASSERT(prop_object_type(enaddr) == PROP_TYPE_DATA);
188 1.2 matt KASSERT(prop_data_size(enaddr) == ETHER_ADDR_LEN);
189 1.2 matt memcpy(sc->sc_enaddr, prop_data_data_nocopy(enaddr),
190 1.2 matt ETHER_ADDR_LEN);
191 1.2 matt } else {
192 1.2 matt static const uint8_t hardcoded[ETHER_ADDR_LEN] = {
193 1.2 matt 0x00, 0x0d, 0x10, 0x81, 0x0c, 0x94
194 1.2 matt };
195 1.2 matt memcpy(sc->sc_enaddr, hardcoded, ETHER_ADDR_LEN);
196 1.2 matt }
197 1.2 matt
198 1.2 matt at91_intr_establish(sc->sc_pid, IPL_NET, INTR_HIGH_LEVEL, emac_intr, sc);
199 1.2 matt emac_init(sc);
200 1.2 matt }
201 1.2 matt
202 1.2 matt static int
203 1.2 matt emac_gctx(struct emac_softc *sc)
204 1.2 matt {
205 1.2 matt struct ifnet * ifp = &sc->sc_ec.ec_if;
206 1.2 matt u_int32_t tsr;
207 1.2 matt
208 1.2 matt tsr = EMAC_READ(ETH_TSR);
209 1.2 matt if (!(tsr & ETH_TSR_BNQ)) {
210 1.2 matt // no space left
211 1.2 matt return 0;
212 1.2 matt }
213 1.2 matt
214 1.2 matt // free sent frames
215 1.2 matt while (sc->txqc > (tsr & ETH_TSR_IDLE ? 0 : 1)) {
216 1.2 matt int i = sc->txqi % TX_QLEN;
217 1.2 matt bus_dmamap_sync(sc->sc_dmat, sc->txq[i].m_dmamap, 0,
218 1.2 matt sc->txq[i].m->m_pkthdr.len, BUS_DMASYNC_POSTWRITE);
219 1.2 matt bus_dmamap_unload(sc->sc_dmat, sc->txq[i].m_dmamap);
220 1.2 matt m_freem(sc->txq[i].m);
221 1.2 matt DPRINTFN(2,("%s: freed idx #%i mbuf %p (txqc=%i)\n", __FUNCTION__, i, sc->txq[i].m, sc->txqc));
222 1.2 matt sc->txq[i].m = NULL;
223 1.2 matt sc->txqi = (i + 1) % TX_QLEN;
224 1.2 matt sc->txqc--;
225 1.2 matt }
226 1.2 matt
227 1.2 matt // mark we're free
228 1.2 matt if (ifp->if_flags & IFF_OACTIVE) {
229 1.2 matt ifp->if_flags &= ~IFF_OACTIVE;
230 1.2 matt /* Disable transmit-buffer-free interrupt */
231 1.2 matt /*EMAC_WRITE(ETH_IDR, ETH_ISR_TBRE);*/
232 1.2 matt }
233 1.2 matt
234 1.2 matt return 1;
235 1.2 matt }
236 1.2 matt
237 1.2 matt static int
238 1.2 matt emac_intr(void *arg)
239 1.2 matt {
240 1.2 matt struct emac_softc *sc = (struct emac_softc *)arg;
241 1.2 matt struct ifnet * ifp = &sc->sc_ec.ec_if;
242 1.2 matt u_int32_t imr, isr, rsr, ctl;
243 1.2 matt int bi;
244 1.2 matt
245 1.2 matt imr = ~EMAC_READ(ETH_IMR);
246 1.2 matt if (!(imr & (ETH_ISR_RCOM|ETH_ISR_TBRE|ETH_ISR_TIDLE|ETH_ISR_RBNA|ETH_ISR_ROVR))) {
247 1.2 matt // interrupt not enabled, can't be us
248 1.2 matt return 0;
249 1.2 matt }
250 1.2 matt
251 1.2 matt isr = EMAC_READ(ETH_ISR) & imr;
252 1.2 matt rsr = EMAC_READ(ETH_RSR); // get receive status register
253 1.2 matt
254 1.2 matt DPRINTFN(2, ("%s: isr=0x%08X rsr=0x%08X imr=0x%08X\n", __FUNCTION__, isr, rsr, imr));
255 1.2 matt
256 1.2 matt if (isr & ETH_ISR_RBNA) { // out of receive buffers
257 1.2 matt EMAC_WRITE(ETH_RSR, ETH_RSR_BNA); // clear interrupt
258 1.2 matt ctl = EMAC_READ(ETH_CTL); // get current control register value
259 1.2 matt EMAC_WRITE(ETH_CTL, ctl & ~ETH_CTL_RE); // disable receiver
260 1.2 matt EMAC_WRITE(ETH_RSR, ETH_RSR_BNA); // clear BNA bit
261 1.2 matt EMAC_WRITE(ETH_CTL, ctl | ETH_CTL_RE); // re-enable receiver
262 1.2 matt ifp->if_ierrors++;
263 1.2 matt ifp->if_ipackets++;
264 1.2 matt DPRINTFN(1,("%s: out of receive buffers\n", __FUNCTION__));
265 1.2 matt }
266 1.2 matt if (isr & ETH_ISR_ROVR) {
267 1.2 matt EMAC_WRITE(ETH_RSR, ETH_RSR_OVR); // clear interrupt
268 1.2 matt ifp->if_ierrors++;
269 1.2 matt ifp->if_ipackets++;
270 1.2 matt DPRINTFN(1,("%s: receive overrun\n", __FUNCTION__));
271 1.2 matt }
272 1.2 matt
273 1.2 matt if (isr & ETH_ISR_RCOM) { // packet has been received!
274 1.2 matt uint32_t nfo;
275 1.2 matt // @@@ if memory is NOT coherent, then we're in trouble @@@@
276 1.2 matt // bus_dmamap_sync(sc->sc_dmat, sc->rbqpage_dmamap, 0, sc->rbqlen, BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
277 1.2 matt // printf("## RDSC[%i].ADDR=0x%08X\n", sc->rxqi % RX_QLEN, sc->RDSC[sc->rxqi % RX_QLEN].Addr);
278 1.2 matt DPRINTFN(2,("#2 RDSC[%i].INFO=0x%08X\n", sc->rxqi % RX_QLEN, sc->RDSC[sc->rxqi % RX_QLEN].Info));
279 1.2 matt while (sc->RDSC[(bi = sc->rxqi % RX_QLEN)].Addr & ETH_RDSC_F_USED) {
280 1.2 matt int fl;
281 1.2 matt struct mbuf *m;
282 1.2 matt
283 1.2 matt nfo = sc->RDSC[bi].Info;
284 1.2 matt fl = (nfo & ETH_RDSC_I_LEN) - 4;
285 1.2 matt DPRINTFN(2,("## nfo=0x%08X\n", nfo));
286 1.2 matt
287 1.2 matt MGETHDR(m, M_DONTWAIT, MT_DATA);
288 1.2 matt if (m != NULL) MCLGET(m, M_DONTWAIT);
289 1.2 matt if (m != NULL && (m->m_flags & M_EXT)) {
290 1.2 matt bus_dmamap_sync(sc->sc_dmat, sc->rxq[bi].m_dmamap, 0,
291 1.2 matt MCLBYTES, BUS_DMASYNC_POSTREAD);
292 1.2 matt bus_dmamap_unload(sc->sc_dmat,
293 1.2 matt sc->rxq[bi].m_dmamap);
294 1.2 matt sc->rxq[bi].m->m_pkthdr.rcvif = ifp;
295 1.2 matt sc->rxq[bi].m->m_pkthdr.len =
296 1.2 matt sc->rxq[bi].m->m_len = fl;
297 1.2 matt #if NBPFILTER > 0
298 1.2 matt if (ifp->if_bpf)
299 1.2 matt bpf_mtap(ifp->if_bpf, sc->rxq[bi].m);
300 1.2 matt #endif /* NBPFILTER > 0 */
301 1.2 matt DPRINTFN(2,("received %u bytes packet\n", fl));
302 1.2 matt (*ifp->if_input)(ifp, sc->rxq[bi].m);
303 1.2 matt if (mtod(m, intptr_t) & 3) {
304 1.2 matt m_adj(m, mtod(m, intptr_t) & 3);
305 1.2 matt }
306 1.2 matt sc->rxq[bi].m = m;
307 1.2 matt bus_dmamap_load(sc->sc_dmat,
308 1.2 matt sc->rxq[bi].m_dmamap,
309 1.2 matt m->m_ext.ext_buf, MCLBYTES,
310 1.2 matt NULL, BUS_DMA_NOWAIT);
311 1.2 matt bus_dmamap_sync(sc->sc_dmat, sc->rxq[bi].m_dmamap, 0,
312 1.2 matt MCLBYTES, BUS_DMASYNC_PREREAD);
313 1.2 matt sc->RDSC[bi].Info = 0;
314 1.2 matt sc->RDSC[bi].Addr =
315 1.2 matt sc->rxq[bi].m_dmamap->dm_segs[0].ds_addr
316 1.2 matt | (bi == (RX_QLEN-1) ? ETH_RDSC_F_WRAP : 0);
317 1.2 matt } else {
318 1.2 matt /* Drop packets until we can get replacement
319 1.2 matt * empty mbufs for the RXDQ.
320 1.2 matt */
321 1.2 matt if (m != NULL) {
322 1.2 matt m_freem(m);
323 1.2 matt }
324 1.2 matt ifp->if_ierrors++;
325 1.2 matt }
326 1.2 matt sc->rxqi++;
327 1.2 matt }
328 1.2 matt // bus_dmamap_sync(sc->sc_dmat, sc->rbqpage_dmamap, 0, sc->rbqlen, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
329 1.2 matt }
330 1.2 matt
331 1.2 matt if (emac_gctx(sc) > 0 && IFQ_IS_EMPTY(&ifp->if_snd) == 0) {
332 1.2 matt emac_ifstart(ifp);
333 1.2 matt }
334 1.2 matt #if 0 // reloop
335 1.2 matt irq = EMAC_READ(IntStsC);
336 1.2 matt if ((irq & (IntSts_RxSQ|IntSts_ECI)) != 0)
337 1.2 matt goto begin;
338 1.2 matt #endif
339 1.2 matt
340 1.2 matt return (1);
341 1.2 matt }
342 1.2 matt
343 1.2 matt
344 1.2 matt static void
345 1.2 matt emac_init(struct emac_softc *sc)
346 1.2 matt {
347 1.2 matt bus_dma_segment_t segs;
348 1.2 matt void *addr;
349 1.2 matt int rsegs, err, i;
350 1.2 matt struct ifnet * ifp = &sc->sc_ec.ec_if;
351 1.2 matt uint32_t u;
352 1.2 matt #if 0
353 1.2 matt int mdcdiv = DEFAULT_MDCDIV;
354 1.2 matt #endif
355 1.2 matt
356 1.2 matt callout_init(&sc->emac_tick_ch, 0);
357 1.2 matt
358 1.2 matt // ok...
359 1.2 matt EMAC_WRITE(ETH_CTL, ETH_CTL_MPE); // disable everything
360 1.2 matt EMAC_WRITE(ETH_IDR, -1); // disable interrupts
361 1.2 matt EMAC_WRITE(ETH_RBQP, 0); // clear receive
362 1.2 matt EMAC_WRITE(ETH_CFG, ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
363 1.2 matt EMAC_WRITE(ETH_TCR, 0); // send nothing
364 1.2 matt // (void)EMAC_READ(ETH_ISR);
365 1.2 matt u = EMAC_READ(ETH_TSR);
366 1.2 matt EMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ
367 1.2 matt | ETH_TSR_IDLE | ETH_TSR_RLE
368 1.2 matt | ETH_TSR_COL|ETH_TSR_OVR)));
369 1.2 matt u = EMAC_READ(ETH_RSR);
370 1.2 matt EMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR|ETH_RSR_REC|ETH_RSR_BNA)));
371 1.2 matt
372 1.2 matt /* configure EMAC */
373 1.2 matt EMAC_WRITE(ETH_CFG, ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
374 1.2 matt EMAC_WRITE(ETH_CTL, ETH_CTL_MPE);
375 1.2 matt #if 0
376 1.2 matt if (device_cfdata(&sc->sc_dev)->cf_flags)
377 1.2 matt mdcdiv = device_cfdata(&sc->sc_dev)->cf_flags;
378 1.2 matt #endif
379 1.2 matt /* set ethernet address */
380 1.2 matt EMAC_WRITE(ETH_SA1L, (sc->sc_enaddr[3] << 24)
381 1.2 matt | (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[1] << 8)
382 1.2 matt | (sc->sc_enaddr[0]));
383 1.2 matt EMAC_WRITE(ETH_SA1H, (sc->sc_enaddr[5] << 8)
384 1.2 matt | (sc->sc_enaddr[4]));
385 1.2 matt EMAC_WRITE(ETH_SA2L, 0);
386 1.2 matt EMAC_WRITE(ETH_SA2H, 0);
387 1.2 matt EMAC_WRITE(ETH_SA3L, 0);
388 1.2 matt EMAC_WRITE(ETH_SA3H, 0);
389 1.2 matt EMAC_WRITE(ETH_SA4L, 0);
390 1.2 matt EMAC_WRITE(ETH_SA4H, 0);
391 1.2 matt
392 1.2 matt /* Allocate a page of memory for receive queue descriptors */
393 1.2 matt sc->rbqlen = (ETH_RDSC_SIZE * (RX_QLEN + 1) * 2 + PAGE_SIZE - 1) / PAGE_SIZE;
394 1.2 matt sc->rbqlen *= PAGE_SIZE;
395 1.2 matt DPRINTFN(1,("%s: rbqlen=%i\n", __FUNCTION__, sc->rbqlen));
396 1.2 matt
397 1.2 matt err = bus_dmamem_alloc(sc->sc_dmat, sc->rbqlen, 0,
398 1.2 matt MAX(16384, PAGE_SIZE), // see EMAC errata why forced to 16384 byte boundary
399 1.2 matt &segs, 1, &rsegs, BUS_DMA_WAITOK);
400 1.2 matt if (err == 0) {
401 1.2 matt DPRINTFN(1,("%s: -> bus_dmamem_map\n", __FUNCTION__));
402 1.2 matt err = bus_dmamem_map(sc->sc_dmat, &segs, 1, sc->rbqlen,
403 1.2 matt &sc->rbqpage, (BUS_DMA_WAITOK|BUS_DMA_COHERENT));
404 1.2 matt }
405 1.2 matt if (err == 0) {
406 1.2 matt DPRINTFN(1,("%s: -> bus_dmamap_create\n", __FUNCTION__));
407 1.2 matt err = bus_dmamap_create(sc->sc_dmat, sc->rbqlen, 1,
408 1.2 matt sc->rbqlen, MAX(16384, PAGE_SIZE), BUS_DMA_WAITOK,
409 1.2 matt &sc->rbqpage_dmamap);
410 1.2 matt }
411 1.2 matt if (err == 0) {
412 1.2 matt DPRINTFN(1,("%s: -> bus_dmamap_load\n", __FUNCTION__));
413 1.2 matt err = bus_dmamap_load(sc->sc_dmat, sc->rbqpage_dmamap,
414 1.2 matt sc->rbqpage, sc->rbqlen, NULL, BUS_DMA_WAITOK);
415 1.2 matt }
416 1.2 matt if (err != 0) {
417 1.2 matt panic("%s: Cannot get DMA memory", device_xname(sc->sc_dev));
418 1.2 matt }
419 1.2 matt sc->rbqpage_dsaddr = sc->rbqpage_dmamap->dm_segs[0].ds_addr;
420 1.2 matt
421 1.2 matt bzero(sc->rbqpage, sc->rbqlen);
422 1.2 matt
423 1.2 matt /* Set up pointers to start of each queue in kernel addr space.
424 1.2 matt * Each descriptor queue or status queue entry uses 2 words
425 1.2 matt */
426 1.2 matt sc->RDSC = (void*)sc->rbqpage;
427 1.2 matt
428 1.2 matt /* Populate the RXQ with mbufs */
429 1.2 matt sc->rxqi = 0;
430 1.2 matt for(i = 0; i < RX_QLEN; i++) {
431 1.2 matt struct mbuf *m;
432 1.2 matt
433 1.2 matt err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, PAGE_SIZE,
434 1.2 matt BUS_DMA_WAITOK, &sc->rxq[i].m_dmamap);
435 1.2 matt if (err) {
436 1.2 matt panic("%s: dmamap_create failed: %i\n", __FUNCTION__, err);
437 1.2 matt }
438 1.2 matt MGETHDR(m, M_WAIT, MT_DATA);
439 1.2 matt MCLGET(m, M_WAIT);
440 1.2 matt sc->rxq[i].m = m;
441 1.2 matt if (mtod(m, intptr_t) & 3) {
442 1.2 matt m_adj(m, mtod(m, intptr_t) & 3);
443 1.2 matt }
444 1.2 matt err = bus_dmamap_load(sc->sc_dmat, sc->rxq[i].m_dmamap,
445 1.2 matt m->m_ext.ext_buf, MCLBYTES, NULL,
446 1.2 matt BUS_DMA_WAITOK);
447 1.2 matt if (err) {
448 1.2 matt panic("%s: dmamap_load failed: %i\n", __FUNCTION__, err);
449 1.2 matt }
450 1.2 matt sc->RDSC[i].Addr = sc->rxq[i].m_dmamap->dm_segs[0].ds_addr
451 1.2 matt | (i == (RX_QLEN-1) ? ETH_RDSC_F_WRAP : 0);
452 1.2 matt sc->RDSC[i].Info = 0;
453 1.2 matt bus_dmamap_sync(sc->sc_dmat, sc->rxq[i].m_dmamap, 0,
454 1.2 matt MCLBYTES, BUS_DMASYNC_PREREAD);
455 1.2 matt }
456 1.2 matt
457 1.2 matt /* prepare transmit queue */
458 1.2 matt for (i = 0; i < TX_QLEN; i++) {
459 1.2 matt err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
460 1.2 matt (BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW),
461 1.2 matt &sc->txq[i].m_dmamap);
462 1.2 matt if (err)
463 1.2 matt panic("ARGH #1");
464 1.2 matt sc->txq[i].m = NULL;
465 1.2 matt }
466 1.2 matt
467 1.2 matt /* Program each queue's start addr, cur addr, and len registers
468 1.2 matt * with the physical addresses.
469 1.2 matt */
470 1.2 matt bus_dmamap_sync(sc->sc_dmat, sc->rbqpage_dmamap, 0, sc->rbqlen,
471 1.2 matt BUS_DMASYNC_PREREAD);
472 1.2 matt addr = (void *)sc->rbqpage_dmamap->dm_segs[0].ds_addr;
473 1.2 matt EMAC_WRITE(ETH_RBQP, (u_int32_t)addr);
474 1.2 matt
475 1.2 matt /* Divide HCLK by 32 for MDC clock */
476 1.2 matt sc->sc_mii.mii_ifp = ifp;
477 1.2 matt sc->sc_mii.mii_readreg = emac_mii_readreg;
478 1.2 matt sc->sc_mii.mii_writereg = emac_mii_writereg;
479 1.2 matt sc->sc_mii.mii_statchg = emac_statchg;
480 1.2 matt ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, emac_mediachange,
481 1.2 matt emac_mediastatus);
482 1.2 matt mii_attach((device_t )sc, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
483 1.2 matt MII_OFFSET_ANY, 0);
484 1.2 matt ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
485 1.2 matt
486 1.2 matt // enable / disable interrupts
487 1.2 matt
488 1.2 matt #if 0
489 1.2 matt // enable / disable interrupts
490 1.2 matt EMAC_WRITE(ETH_IDR, -1);
491 1.2 matt EMAC_WRITE(ETH_IER, ETH_ISR_RCOM | ETH_ISR_TBRE | ETH_ISR_TIDLE
492 1.2 matt | ETH_ISR_RBNA | ETH_ISR_ROVR);
493 1.2 matt // (void)EMAC_READ(ETH_ISR); // why
494 1.2 matt
495 1.2 matt // enable transmitter / receiver
496 1.2 matt EMAC_WRITE(ETH_CTL, ETH_CTL_TE | ETH_CTL_RE | ETH_CTL_ISR
497 1.2 matt | ETH_CTL_CSR | ETH_CTL_MPE);
498 1.2 matt #endif
499 1.2 matt /*
500 1.2 matt * We can support 802.1Q VLAN-sized frames.
501 1.2 matt */
502 1.2 matt sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
503 1.2 matt
504 1.2 matt strcpy(ifp->if_xname, device_xname(sc->sc_dev));
505 1.2 matt ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST;
506 1.2 matt ifp->if_ioctl = emac_ifioctl;
507 1.2 matt ifp->if_start = emac_ifstart;
508 1.2 matt ifp->if_watchdog = emac_ifwatchdog;
509 1.2 matt ifp->if_init = emac_ifinit;
510 1.2 matt ifp->if_stop = emac_ifstop;
511 1.2 matt ifp->if_timer = 0;
512 1.2 matt ifp->if_softc = sc;
513 1.2 matt IFQ_SET_READY(&ifp->if_snd);
514 1.2 matt if_attach(ifp);
515 1.2 matt ether_ifattach(ifp, (sc)->sc_enaddr);
516 1.2 matt }
517 1.2 matt
518 1.2 matt static int
519 1.3 dsl emac_mediachange(struct ifnet *ifp)
520 1.2 matt {
521 1.2 matt if (ifp->if_flags & IFF_UP)
522 1.2 matt emac_ifinit(ifp);
523 1.2 matt return (0);
524 1.2 matt }
525 1.2 matt
526 1.2 matt static void
527 1.3 dsl emac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
528 1.2 matt {
529 1.2 matt struct emac_softc *sc = ifp->if_softc;
530 1.2 matt
531 1.2 matt mii_pollstat(&sc->sc_mii);
532 1.2 matt ifmr->ifm_active = sc->sc_mii.mii_media_active;
533 1.2 matt ifmr->ifm_status = sc->sc_mii.mii_media_status;
534 1.2 matt }
535 1.2 matt
536 1.2 matt
537 1.2 matt int
538 1.4 dsl emac_mii_readreg(device_t self, int phy, int reg)
539 1.2 matt {
540 1.2 matt struct emac_softc *sc;
541 1.2 matt
542 1.2 matt sc = (struct emac_softc *)self;
543 1.2 matt EMAC_WRITE(ETH_MAN, (ETH_MAN_HIGH | ETH_MAN_RW_RD
544 1.2 matt | ((phy << ETH_MAN_PHYA_SHIFT) & ETH_MAN_PHYA)
545 1.2 matt | ((reg << ETH_MAN_REGA_SHIFT) & ETH_MAN_REGA)
546 1.2 matt | ETH_MAN_CODE_IEEE802_3));
547 1.2 matt while (!(EMAC_READ(ETH_SR) & ETH_SR_IDLE)) ;
548 1.2 matt return (EMAC_READ(ETH_MAN) & ETH_MAN_DATA);
549 1.2 matt }
550 1.2 matt
551 1.2 matt void
552 1.4 dsl emac_mii_writereg(device_t self, int phy, int reg, int val)
553 1.2 matt {
554 1.2 matt struct emac_softc *sc;
555 1.2 matt sc = (struct emac_softc *)self;
556 1.2 matt EMAC_WRITE(ETH_MAN, (ETH_MAN_HIGH | ETH_MAN_RW_WR
557 1.2 matt | ((phy << ETH_MAN_PHYA_SHIFT) & ETH_MAN_PHYA)
558 1.2 matt | ((reg << ETH_MAN_REGA_SHIFT) & ETH_MAN_REGA)
559 1.2 matt | ETH_MAN_CODE_IEEE802_3
560 1.2 matt | (val & ETH_MAN_DATA)));
561 1.2 matt while (!(EMAC_READ(ETH_SR) & ETH_SR_IDLE)) ;
562 1.2 matt }
563 1.2 matt
564 1.2 matt
565 1.2 matt void
566 1.3 dsl emac_statchg(device_t self)
567 1.2 matt {
568 1.2 matt struct emac_softc *sc = (struct emac_softc *)self;
569 1.2 matt u_int32_t reg;
570 1.2 matt
571 1.2 matt /*
572 1.2 matt * We must keep the MAC and the PHY in sync as
573 1.2 matt * to the status of full-duplex!
574 1.2 matt */
575 1.2 matt reg = EMAC_READ(ETH_CFG);
576 1.2 matt if (sc->sc_mii.mii_media_active & IFM_FDX)
577 1.2 matt reg |= ETH_CFG_FD;
578 1.2 matt else
579 1.2 matt reg &= ~ETH_CFG_FD;
580 1.2 matt EMAC_WRITE(ETH_CFG, reg);
581 1.2 matt }
582 1.2 matt
583 1.2 matt void
584 1.3 dsl emac_tick(void *arg)
585 1.2 matt {
586 1.2 matt struct emac_softc* sc = (struct emac_softc *)arg;
587 1.2 matt struct ifnet * ifp = &sc->sc_ec.ec_if;
588 1.2 matt int s;
589 1.2 matt u_int32_t misses;
590 1.2 matt
591 1.2 matt ifp->if_collisions += EMAC_READ(ETH_SCOL) + EMAC_READ(ETH_MCOL);
592 1.2 matt /* These misses are ok, they will happen if the RAM/CPU can't keep up */
593 1.2 matt misses = EMAC_READ(ETH_DRFC);
594 1.2 matt if (misses > 0)
595 1.2 matt printf("%s: %d rx misses\n", device_xname(sc->sc_dev), misses);
596 1.2 matt
597 1.2 matt s = splnet();
598 1.2 matt if (emac_gctx(sc) > 0 && IFQ_IS_EMPTY(&ifp->if_snd) == 0) {
599 1.2 matt emac_ifstart(ifp);
600 1.2 matt }
601 1.2 matt splx(s);
602 1.2 matt
603 1.2 matt mii_tick(&sc->sc_mii);
604 1.2 matt callout_reset(&sc->emac_tick_ch, hz, emac_tick, sc);
605 1.2 matt }
606 1.2 matt
607 1.2 matt
608 1.2 matt static int
609 1.2 matt emac_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
610 1.2 matt {
611 1.2 matt struct emac_softc *sc = ifp->if_softc;
612 1.2 matt struct ifreq *ifr = (struct ifreq *)data;
613 1.2 matt int s, error;
614 1.2 matt
615 1.2 matt s = splnet();
616 1.2 matt switch(cmd) {
617 1.2 matt case SIOCSIFMEDIA:
618 1.2 matt case SIOCGIFMEDIA:
619 1.2 matt error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
620 1.2 matt break;
621 1.2 matt default:
622 1.2 matt error = ether_ioctl(ifp, cmd, data);
623 1.2 matt if (error == ENETRESET) {
624 1.2 matt if (ifp->if_flags & IFF_RUNNING)
625 1.2 matt emac_setaddr(ifp);
626 1.2 matt error = 0;
627 1.2 matt }
628 1.2 matt }
629 1.2 matt splx(s);
630 1.2 matt return error;
631 1.2 matt }
632 1.2 matt
633 1.2 matt static void
634 1.3 dsl emac_ifstart(struct ifnet *ifp)
635 1.2 matt {
636 1.2 matt struct emac_softc *sc = (struct emac_softc *)ifp->if_softc;
637 1.2 matt struct mbuf *m;
638 1.2 matt bus_dma_segment_t *segs;
639 1.2 matt int s, bi, err, nsegs;
640 1.2 matt
641 1.2 matt s = splnet();
642 1.2 matt start:
643 1.2 matt if (emac_gctx(sc) == 0) {
644 1.2 matt /* Enable transmit-buffer-free interrupt */
645 1.2 matt EMAC_WRITE(ETH_IER, ETH_ISR_TBRE);
646 1.2 matt ifp->if_flags |= IFF_OACTIVE;
647 1.2 matt ifp->if_timer = 10;
648 1.2 matt splx(s);
649 1.2 matt return;
650 1.2 matt }
651 1.2 matt
652 1.2 matt ifp->if_timer = 0;
653 1.2 matt
654 1.2 matt IFQ_POLL(&ifp->if_snd, m);
655 1.2 matt if (m == NULL) {
656 1.2 matt splx(s);
657 1.2 matt return;
658 1.2 matt }
659 1.2 matt //more:
660 1.2 matt bi = (sc->txqi + sc->txqc) % TX_QLEN;
661 1.2 matt if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, sc->txq[bi].m_dmamap, m,
662 1.2 matt BUS_DMA_NOWAIT)) ||
663 1.2 matt sc->txq[bi].m_dmamap->dm_segs[0].ds_addr & 0x3 ||
664 1.2 matt sc->txq[bi].m_dmamap->dm_nsegs > 1) {
665 1.2 matt /* Copy entire mbuf chain to new single */
666 1.2 matt struct mbuf *mn;
667 1.2 matt
668 1.2 matt if (err == 0)
669 1.2 matt bus_dmamap_unload(sc->sc_dmat, sc->txq[bi].m_dmamap);
670 1.2 matt
671 1.2 matt MGETHDR(mn, M_DONTWAIT, MT_DATA);
672 1.2 matt if (mn == NULL) goto stop;
673 1.2 matt if (m->m_pkthdr.len > MHLEN) {
674 1.2 matt MCLGET(mn, M_DONTWAIT);
675 1.2 matt if ((mn->m_flags & M_EXT) == 0) {
676 1.2 matt m_freem(mn);
677 1.2 matt goto stop;
678 1.2 matt }
679 1.2 matt }
680 1.2 matt m_copydata(m, 0, m->m_pkthdr.len, mtod(mn, void *));
681 1.2 matt mn->m_pkthdr.len = mn->m_len = m->m_pkthdr.len;
682 1.2 matt IFQ_DEQUEUE(&ifp->if_snd, m);
683 1.2 matt m_freem(m);
684 1.2 matt m = mn;
685 1.2 matt bus_dmamap_load_mbuf(sc->sc_dmat, sc->txq[bi].m_dmamap, m,
686 1.2 matt BUS_DMA_NOWAIT);
687 1.2 matt } else {
688 1.2 matt IFQ_DEQUEUE(&ifp->if_snd, m);
689 1.2 matt }
690 1.2 matt
691 1.2 matt #if NBPFILTER > 0
692 1.2 matt if (ifp->if_bpf)
693 1.2 matt bpf_mtap(ifp->if_bpf, m);
694 1.2 matt #endif /* NBPFILTER > 0 */
695 1.2 matt
696 1.2 matt nsegs = sc->txq[bi].m_dmamap->dm_nsegs;
697 1.2 matt segs = sc->txq[bi].m_dmamap->dm_segs;
698 1.2 matt if (nsegs > 1) {
699 1.2 matt panic("#### ARGH #2");
700 1.2 matt }
701 1.2 matt
702 1.2 matt sc->txq[bi].m = m;
703 1.2 matt sc->txqc++;
704 1.2 matt
705 1.2 matt DPRINTFN(2,("%s: start sending idx #%i mbuf %p (txqc=%i, phys %p), len=%u\n", __FUNCTION__, bi, sc->txq[bi].m, sc->txqc, (void*)segs->ds_addr,
706 1.2 matt (unsigned)m->m_pkthdr.len));
707 1.2 matt #ifdef DIAGNOSTIC
708 1.2 matt if (sc->txqc > TX_QLEN) {
709 1.2 matt panic("%s: txqc %i > %i", __FUNCTION__, sc->txqc, TX_QLEN);
710 1.2 matt }
711 1.2 matt #endif
712 1.2 matt
713 1.2 matt bus_dmamap_sync(sc->sc_dmat, sc->txq[bi].m_dmamap, 0,
714 1.2 matt sc->txq[bi].m_dmamap->dm_mapsize,
715 1.2 matt BUS_DMASYNC_PREWRITE);
716 1.2 matt
717 1.2 matt EMAC_WRITE(ETH_TAR, segs->ds_addr);
718 1.2 matt EMAC_WRITE(ETH_TCR, m->m_pkthdr.len);
719 1.2 matt if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
720 1.2 matt goto start;
721 1.2 matt stop:
722 1.2 matt
723 1.2 matt splx(s);
724 1.2 matt return;
725 1.2 matt }
726 1.2 matt
727 1.2 matt static void
728 1.3 dsl emac_ifwatchdog(struct ifnet *ifp)
729 1.2 matt {
730 1.2 matt struct emac_softc *sc = (struct emac_softc *)ifp->if_softc;
731 1.2 matt
732 1.2 matt if ((ifp->if_flags & IFF_RUNNING) == 0)
733 1.2 matt return;
734 1.2 matt printf("%s: device timeout, CTL = 0x%08x, CFG = 0x%08x\n",
735 1.2 matt device_xname(sc->sc_dev), EMAC_READ(ETH_CTL), EMAC_READ(ETH_CFG));
736 1.2 matt }
737 1.2 matt
738 1.2 matt static int
739 1.3 dsl emac_ifinit(struct ifnet *ifp)
740 1.2 matt {
741 1.2 matt struct emac_softc *sc = ifp->if_softc;
742 1.2 matt int s = splnet();
743 1.2 matt
744 1.2 matt callout_stop(&sc->emac_tick_ch);
745 1.2 matt
746 1.2 matt // enable interrupts
747 1.2 matt EMAC_WRITE(ETH_IDR, -1);
748 1.2 matt EMAC_WRITE(ETH_IER, ETH_ISR_RCOM | ETH_ISR_TBRE | ETH_ISR_TIDLE
749 1.2 matt | ETH_ISR_RBNA | ETH_ISR_ROVR);
750 1.2 matt
751 1.2 matt // enable transmitter / receiver
752 1.2 matt EMAC_WRITE(ETH_CTL, ETH_CTL_TE | ETH_CTL_RE | ETH_CTL_ISR
753 1.2 matt | ETH_CTL_CSR | ETH_CTL_MPE);
754 1.2 matt
755 1.2 matt mii_mediachg(&sc->sc_mii);
756 1.2 matt callout_reset(&sc->emac_tick_ch, hz, emac_tick, sc);
757 1.2 matt ifp->if_flags |= IFF_RUNNING;
758 1.2 matt splx(s);
759 1.2 matt return 0;
760 1.2 matt }
761 1.2 matt
762 1.2 matt static void
763 1.3 dsl emac_ifstop(struct ifnet *ifp, int disable)
764 1.2 matt {
765 1.2 matt // u_int32_t u;
766 1.2 matt struct emac_softc *sc = ifp->if_softc;
767 1.2 matt
768 1.2 matt #if 0
769 1.2 matt EMAC_WRITE(ETH_CTL, ETH_CTL_MPE); // disable everything
770 1.2 matt EMAC_WRITE(ETH_IDR, -1); // disable interrupts
771 1.2 matt // EMAC_WRITE(ETH_RBQP, 0); // clear receive
772 1.2 matt EMAC_WRITE(ETH_CFG, ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
773 1.2 matt EMAC_WRITE(ETH_TCR, 0); // send nothing
774 1.2 matt // (void)EMAC_READ(ETH_ISR);
775 1.2 matt u = EMAC_READ(ETH_TSR);
776 1.2 matt EMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ
777 1.2 matt | ETH_TSR_IDLE | ETH_TSR_RLE
778 1.2 matt | ETH_TSR_COL|ETH_TSR_OVR)));
779 1.2 matt u = EMAC_READ(ETH_RSR);
780 1.2 matt EMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR|ETH_RSR_REC|ETH_RSR_BNA)));
781 1.2 matt #endif
782 1.2 matt callout_stop(&sc->emac_tick_ch);
783 1.2 matt
784 1.2 matt /* Down the MII. */
785 1.2 matt mii_down(&sc->sc_mii);
786 1.2 matt
787 1.2 matt ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
788 1.2 matt ifp->if_timer = 0;
789 1.2 matt sc->sc_mii.mii_media_status &= ~IFM_ACTIVE;
790 1.2 matt }
791 1.2 matt
792 1.2 matt static void
793 1.3 dsl emac_setaddr(struct ifnet *ifp)
794 1.2 matt {
795 1.2 matt struct emac_softc *sc = ifp->if_softc;
796 1.2 matt struct ethercom *ac = &sc->sc_ec;
797 1.2 matt struct ether_multi *enm;
798 1.2 matt struct ether_multistep step;
799 1.2 matt u_int8_t ias[3][ETHER_ADDR_LEN];
800 1.2 matt u_int32_t h, nma = 0, hashes[2] = { 0, 0 };
801 1.2 matt u_int32_t ctl = EMAC_READ(ETH_CTL);
802 1.2 matt u_int32_t cfg = EMAC_READ(ETH_CFG);
803 1.2 matt
804 1.2 matt /* disable receiver temporarily */
805 1.2 matt EMAC_WRITE(ETH_CTL, ctl & ~ETH_CTL_RE);
806 1.2 matt
807 1.2 matt cfg &= ~(ETH_CFG_MTI | ETH_CFG_UNI | ETH_CFG_CAF | ETH_CFG_UNI);
808 1.2 matt
809 1.2 matt if (ifp->if_flags & IFF_PROMISC) {
810 1.2 matt cfg |= ETH_CFG_CAF;
811 1.2 matt } else {
812 1.2 matt cfg &= ~ETH_CFG_CAF;
813 1.2 matt }
814 1.2 matt
815 1.2 matt // ETH_CFG_BIG?
816 1.2 matt
817 1.2 matt ifp->if_flags &= ~IFF_ALLMULTI;
818 1.2 matt
819 1.2 matt ETHER_FIRST_MULTI(step, ac, enm);
820 1.2 matt while (enm != NULL) {
821 1.2 matt if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
822 1.2 matt /*
823 1.2 matt * We must listen to a range of multicast addresses.
824 1.2 matt * For now, just accept all multicasts, rather than
825 1.2 matt * trying to set only those filter bits needed to match
826 1.2 matt * the range. (At this time, the only use of address
827 1.2 matt * ranges is for IP multicast routing, for which the
828 1.2 matt * range is big enough to require all bits set.)
829 1.2 matt */
830 1.2 matt cfg |= ETH_CFG_CAF;
831 1.2 matt hashes[0] = 0xffffffffUL;
832 1.2 matt hashes[1] = 0xffffffffUL;
833 1.2 matt ifp->if_flags |= IFF_ALLMULTI;
834 1.2 matt nma = 0;
835 1.2 matt break;
836 1.2 matt }
837 1.2 matt
838 1.2 matt if (nma < 3) {
839 1.2 matt /* We can program 3 perfect address filters for mcast */
840 1.2 matt memcpy(ias[nma], enm->enm_addrlo, ETHER_ADDR_LEN);
841 1.2 matt } else {
842 1.2 matt /*
843 1.2 matt * XXX: Datasheet is not very clear here, I'm not sure
844 1.2 matt * if I'm doing this right. --joff
845 1.2 matt */
846 1.2 matt h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
847 1.2 matt
848 1.2 matt /* Just want the 6 most-significant bits. */
849 1.2 matt h = h >> 26;
850 1.2 matt
851 1.2 matt hashes[ h / 32 ] |= (1 << (h % 32));
852 1.2 matt cfg |= ETH_CFG_MTI;
853 1.2 matt }
854 1.2 matt ETHER_NEXT_MULTI(step, enm);
855 1.2 matt nma++;
856 1.2 matt }
857 1.2 matt
858 1.2 matt // program...
859 1.2 matt DPRINTFN(1,("%s: en0 %02x:%02x:%02x:%02x:%02x:%02x\n", __FUNCTION__,
860 1.2 matt sc->sc_enaddr[0], sc->sc_enaddr[1], sc->sc_enaddr[2],
861 1.2 matt sc->sc_enaddr[3], sc->sc_enaddr[4], sc->sc_enaddr[5]));
862 1.2 matt EMAC_WRITE(ETH_SA1L, (sc->sc_enaddr[3] << 24)
863 1.2 matt | (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[1] << 8)
864 1.2 matt | (sc->sc_enaddr[0]));
865 1.2 matt EMAC_WRITE(ETH_SA1H, (sc->sc_enaddr[5] << 8)
866 1.2 matt | (sc->sc_enaddr[4]));
867 1.2 matt if (nma > 1) {
868 1.2 matt DPRINTFN(1,("%s: en1 %02x:%02x:%02x:%02x:%02x:%02x\n", __FUNCTION__,
869 1.2 matt ias[0][0], ias[0][1], ias[0][2],
870 1.2 matt ias[0][3], ias[0][4], ias[0][5]));
871 1.2 matt EMAC_WRITE(ETH_SA2L, (ias[0][3] << 24)
872 1.2 matt | (ias[0][2] << 16) | (ias[0][1] << 8)
873 1.2 matt | (ias[0][0]));
874 1.2 matt EMAC_WRITE(ETH_SA2H, (ias[0][4] << 8)
875 1.2 matt | (ias[0][5]));
876 1.2 matt }
877 1.2 matt if (nma > 2) {
878 1.2 matt DPRINTFN(1,("%s: en2 %02x:%02x:%02x:%02x:%02x:%02x\n", __FUNCTION__,
879 1.2 matt ias[1][0], ias[1][1], ias[1][2],
880 1.2 matt ias[1][3], ias[1][4], ias[1][5]));
881 1.2 matt EMAC_WRITE(ETH_SA3L, (ias[1][3] << 24)
882 1.2 matt | (ias[1][2] << 16) | (ias[1][1] << 8)
883 1.2 matt | (ias[1][0]));
884 1.2 matt EMAC_WRITE(ETH_SA3H, (ias[1][4] << 8)
885 1.2 matt | (ias[1][5]));
886 1.2 matt }
887 1.2 matt if (nma > 3) {
888 1.2 matt DPRINTFN(1,("%s: en3 %02x:%02x:%02x:%02x:%02x:%02x\n", __FUNCTION__,
889 1.2 matt ias[2][0], ias[2][1], ias[2][2],
890 1.2 matt ias[2][3], ias[2][4], ias[2][5]));
891 1.2 matt EMAC_WRITE(ETH_SA3L, (ias[2][3] << 24)
892 1.2 matt | (ias[2][2] << 16) | (ias[2][1] << 8)
893 1.2 matt | (ias[2][0]));
894 1.2 matt EMAC_WRITE(ETH_SA3H, (ias[2][4] << 8)
895 1.2 matt | (ias[2][5]));
896 1.2 matt }
897 1.2 matt EMAC_WRITE(ETH_HSH, hashes[0]);
898 1.2 matt EMAC_WRITE(ETH_HSL, hashes[1]);
899 1.2 matt EMAC_WRITE(ETH_CFG, cfg);
900 1.2 matt EMAC_WRITE(ETH_CTL, ctl | ETH_CTL_RE);
901 1.2 matt }
902