epe.c revision 1.8 1 /* $NetBSD: epe.c,v 1.8 2006/05/05 18:04:41 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2004 Jesse Off
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the NetBSD
18 * Foundation, Inc. and its contributors.
19 * 4. Neither the name of The NetBSD Foundation nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: epe.c,v 1.8 2006/05/05 18:04:41 thorpej Exp $");
38
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/ioctl.h>
43 #include <sys/kernel.h>
44 #include <sys/proc.h>
45 #include <sys/malloc.h>
46 #include <sys/time.h>
47 #include <sys/device.h>
48 #include <uvm/uvm_extern.h>
49
50 #include <machine/bus.h>
51 #include <machine/intr.h>
52
53 #include <arm/cpufunc.h>
54
55 #include <arm/ep93xx/epsocvar.h>
56 #include <arm/ep93xx/ep93xxvar.h>
57
58 #include <net/if.h>
59 #include <net/if_dl.h>
60 #include <net/if_types.h>
61 #include <net/if_media.h>
62 #include <net/if_ether.h>
63
64 #include <dev/mii/mii.h>
65 #include <dev/mii/miivar.h>
66
67 #ifdef INET
68 #include <netinet/in.h>
69 #include <netinet/in_systm.h>
70 #include <netinet/in_var.h>
71 #include <netinet/ip.h>
72 #include <netinet/if_inarp.h>
73 #endif
74
75 #ifdef NS
76 #include <netns/ns.h>
77 #include <netns/ns_if.h>
78 #endif
79
80 #include "bpfilter.h"
81 #if NBPFILTER > 0
82 #include <net/bpf.h>
83 #include <net/bpfdesc.h>
84 #endif
85
86 #include <machine/bus.h>
87
88 #ifdef IPKDB_EP93XX
89 #include <ipkdb/ipkdb.h>
90 #endif
91
92 #include <arm/ep93xx/ep93xxreg.h>
93 #include <arm/ep93xx/epereg.h>
94 #include <arm/ep93xx/epevar.h>
95
96 #define DEFAULT_MDCDIV 32
97
98 #ifndef EPE_FAST
99 #define EPE_FAST
100 #endif
101
102 #ifndef EPE_FAST
103 #define EPE_READ(x) \
104 bus_space_read_4(sc->sc_iot, sc->sc_ioh, (EPE_ ## x))
105 #define EPE_WRITE(x, y) \
106 bus_space_write_4(sc->sc_iot, sc->sc_ioh, (EPE_ ## x), (y))
107 #define CTRLPAGE_DMASYNC(x, y, z) \
108 bus_dmamap_sync(sc->sc_dmat, sc->ctrlpage_dmamap, (x), (y), (z))
109 #else
110 #define EPE_READ(x) *(volatile u_int32_t *) \
111 (EP93XX_AHB_VBASE + EP93XX_AHB_EPE + (EPE_ ## x))
112 #define EPE_WRITE(x, y) *(volatile u_int32_t *) \
113 (EP93XX_AHB_VBASE + EP93XX_AHB_EPE + (EPE_ ## x)) = y
114 #define CTRLPAGE_DMASYNC(x, y, z)
115 #endif /* ! EPE_FAST */
116
117 static int epe_match(struct device *, struct cfdata *, void *);
118 static void epe_attach(struct device *, struct device *, void *);
119 static void epe_init(struct epe_softc *);
120 static int epe_intr(void* arg);
121 static int epe_gctx(struct epe_softc *);
122 static int epe_mediachange(struct ifnet *);
123 static void epe_mediastatus(struct ifnet *, struct ifmediareq *);
124 int epe_mii_readreg (struct device *, int, int);
125 void epe_mii_writereg (struct device *, int, int, int);
126 void epe_statchg (struct device *);
127 void epe_tick (void *);
128 static int epe_ifioctl (struct ifnet *, u_long, caddr_t);
129 static void epe_ifstart (struct ifnet *);
130 static void epe_ifwatchdog (struct ifnet *);
131 static int epe_ifinit (struct ifnet *);
132 static void epe_ifstop (struct ifnet *, int);
133 static void epe_setaddr (struct ifnet *);
134
135 CFATTACH_DECL(epe, sizeof(struct epe_softc),
136 epe_match, epe_attach, NULL, NULL);
137
138 static int
139 epe_match(struct device *parent, struct cfdata *match, void *aux)
140 {
141 return 2;
142 }
143
144 static void
145 epe_attach(struct device *parent, struct device *self, void *aux)
146 {
147 struct epe_softc *sc;
148 struct epsoc_attach_args *sa;
149 prop_data_t enaddr;
150
151 printf("\n");
152 sc = (struct epe_softc*) self;
153 sa = aux;
154 sc->sc_iot = sa->sa_iot;
155 sc->sc_intr = sa->sa_intr;
156 sc->sc_dmat = sa->sa_dmat;
157
158 if (bus_space_map(sa->sa_iot, sa->sa_addr, sa->sa_size,
159 0, &sc->sc_ioh))
160 panic("%s: Cannot map registers", self->dv_xname);
161
162 /* Fetch the Ethernet address from property if set. */
163 enaddr = prop_dictionary_get(device_properties(self), "mac-addr");
164 if (enaddr != NULL) {
165 KASSERT(prop_object_type(enaddr) == PROP_TYPE_DATA);
166 KASSERT(prop_data_size(enaddr) == ETHER_ADDR_LEN);
167 memcpy(sc->sc_enaddr, prop_data_data_nocopy(enaddr),
168 ETHER_ADDR_LEN);
169 bus_space_write_4(sc->sc_iot, sc->sc_ioh, EPE_AFP, 0);
170 bus_space_write_region_1(sc->sc_iot, sc->sc_ioh, EPE_IndAd,
171 sc->sc_enaddr, ETHER_ADDR_LEN);
172 }
173
174 ep93xx_intr_establish(sc->sc_intr, IPL_NET, epe_intr, sc);
175 epe_init(sc);
176 }
177
178 static int
179 epe_gctx(struct epe_softc *sc)
180 {
181 struct ifnet * ifp = &sc->sc_ec.ec_if;
182 u_int32_t *cur, ndq = 0;
183
184 /* Handle transmit completions */
185 cur = (u_int32_t *)(EPE_READ(TXStsQCurAdd) -
186 sc->ctrlpage_dsaddr + sc->ctrlpage);
187
188 if (sc->TXStsQ_cur != cur) {
189 CTRLPAGE_DMASYNC(TX_QLEN * 2 * sizeof(u_int32_t),
190 TX_QLEN * sizeof(u_int32_t), BUS_DMASYNC_PREREAD);
191 } else {
192 return 0;
193 }
194
195 do {
196 u_int32_t tbi = *sc->TXStsQ_cur & 0x7fff;
197 struct mbuf *m = sc->txq[tbi].m;
198
199 if ((*sc->TXStsQ_cur & TXStsQ_TxWE) == 0) {
200 ifp->if_oerrors++;
201 }
202 bus_dmamap_unload(sc->sc_dmat, sc->txq[tbi].m_dmamap);
203 m_freem(m);
204 do {
205 sc->txq[tbi].m = NULL;
206 ndq++;
207 tbi = (tbi + 1) % TX_QLEN;
208 } while (sc->txq[tbi].m == m);
209
210 ifp->if_opackets++;
211 sc->TXStsQ_cur++;
212 if (sc->TXStsQ_cur >= sc->TXStsQ + TX_QLEN) {
213 sc->TXStsQ_cur = sc->TXStsQ;
214 }
215 } while (sc->TXStsQ_cur != cur);
216
217 sc->TXDQ_avail += ndq;
218 if (ifp->if_flags & IFF_OACTIVE) {
219 ifp->if_flags &= ~IFF_OACTIVE;
220 /* Disable end-of-tx-chain interrupt */
221 EPE_WRITE(IntEn, IntEn_REOFIE);
222 }
223 return ndq;
224 }
225
226 static int
227 epe_intr(void *arg)
228 {
229 struct epe_softc *sc = (struct epe_softc *)arg;
230 struct ifnet * ifp = &sc->sc_ec.ec_if;
231 u_int32_t ndq = 0, irq, *cur;
232
233 irq = EPE_READ(IntStsC);
234 begin:
235 cur = (u_int32_t *)(EPE_READ(RXStsQCurAdd) -
236 sc->ctrlpage_dsaddr + sc->ctrlpage);
237 CTRLPAGE_DMASYNC(TX_QLEN * 3 * sizeof(u_int32_t),
238 RX_QLEN * 4 * sizeof(u_int32_t),
239 BUS_DMASYNC_PREREAD);
240 while (sc->RXStsQ_cur != cur) {
241 if ((sc->RXStsQ_cur[0] & (RXStsQ_RWE|RXStsQ_RFP|RXStsQ_EOB)) ==
242 (RXStsQ_RWE|RXStsQ_RFP|RXStsQ_EOB)) {
243 u_int32_t bi = (sc->RXStsQ_cur[1] >> 16) & 0x7fff;
244 u_int32_t fl = sc->RXStsQ_cur[1] & 0xffff;
245 struct mbuf *m;
246
247 MGETHDR(m, M_DONTWAIT, MT_DATA);
248 if (m != NULL) MCLGET(m, M_DONTWAIT);
249 if (m != NULL && (m->m_flags & M_EXT)) {
250 bus_dmamap_unload(sc->sc_dmat,
251 sc->rxq[bi].m_dmamap);
252 sc->rxq[bi].m->m_pkthdr.rcvif = ifp;
253 sc->rxq[bi].m->m_pkthdr.len =
254 sc->rxq[bi].m->m_len = fl;
255 #if NBPFILTER > 0
256 if (ifp->if_bpf)
257 bpf_mtap(ifp->if_bpf, sc->rxq[bi].m);
258 #endif /* NBPFILTER > 0 */
259 (*ifp->if_input)(ifp, sc->rxq[bi].m);
260 sc->rxq[bi].m = m;
261 bus_dmamap_load(sc->sc_dmat,
262 sc->rxq[bi].m_dmamap,
263 m->m_ext.ext_buf, MCLBYTES,
264 NULL, BUS_DMA_NOWAIT);
265 sc->RXDQ[bi * 2] =
266 sc->rxq[bi].m_dmamap->dm_segs[0].ds_addr;
267 } else {
268 /* Drop packets until we can get replacement
269 * empty mbufs for the RXDQ.
270 */
271 if (m != NULL) {
272 m_freem(m);
273 }
274 ifp->if_ierrors++;
275 }
276 } else {
277 ifp->if_ierrors++;
278 }
279
280 ndq++;
281
282 sc->RXStsQ_cur += 2;
283 if (sc->RXStsQ_cur >= sc->RXStsQ + (RX_QLEN * 2)) {
284 sc->RXStsQ_cur = sc->RXStsQ;
285 }
286 }
287
288 if (ndq > 0) {
289 ifp->if_ipackets += ndq;
290 CTRLPAGE_DMASYNC(TX_QLEN * 3 * sizeof(u_int32_t),
291 RX_QLEN * 4 * sizeof(u_int32_t),
292 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
293 EPE_WRITE(RXStsEnq, ndq);
294 EPE_WRITE(RXDEnq, ndq);
295 ndq = 0;
296 }
297
298 if (epe_gctx(sc) > 0 && IFQ_IS_EMPTY(&ifp->if_snd) == 0) {
299 epe_ifstart(ifp);
300 }
301
302 irq = EPE_READ(IntStsC);
303 if ((irq & (IntSts_RxSQ|IntSts_ECI)) != 0)
304 goto begin;
305
306 return (1);
307 }
308
309
310 static void
311 epe_init(struct epe_softc *sc)
312 {
313 bus_dma_segment_t segs;
314 caddr_t addr;
315 int rsegs, err, i;
316 struct ifnet * ifp = &sc->sc_ec.ec_if;
317 int mdcdiv = DEFAULT_MDCDIV;
318
319 callout_init(&sc->epe_tick_ch);
320
321 /* Select primary Individual Address in Address Filter Pointer */
322 EPE_WRITE(AFP, 0);
323 /* Read ethernet MAC, should already be set by bootrom */
324 bus_space_read_region_1(sc->sc_iot, sc->sc_ioh, EPE_IndAd,
325 sc->sc_enaddr, ETHER_ADDR_LEN);
326 printf("%s: MAC address %s\n", sc->sc_dev.dv_xname,
327 ether_sprintf(sc->sc_enaddr));
328
329 /* Soft Reset the MAC */
330 EPE_WRITE(SelfCtl, SelfCtl_RESET);
331 while(EPE_READ(SelfCtl) & SelfCtl_RESET);
332
333 /* suggested magic initialization values from datasheet */
334 EPE_WRITE(RXBufThrshld, 0x800040);
335 EPE_WRITE(TXBufThrshld, 0x200010);
336 EPE_WRITE(RXStsThrshld, 0x40002);
337 EPE_WRITE(TXStsThrshld, 0x40002);
338 EPE_WRITE(RXDThrshld, 0x40002);
339 EPE_WRITE(TXDThrshld, 0x40002);
340
341 /* Allocate a page of memory for descriptor and status queues */
342 err = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, 0, PAGE_SIZE,
343 &segs, 1, &rsegs, BUS_DMA_WAITOK);
344 if (err == 0) {
345 err = bus_dmamem_map(sc->sc_dmat, &segs, 1, PAGE_SIZE,
346 &sc->ctrlpage, (BUS_DMA_WAITOK|BUS_DMA_COHERENT));
347 }
348 if (err == 0) {
349 err = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
350 0, BUS_DMA_WAITOK, &sc->ctrlpage_dmamap);
351 }
352 if (err == 0) {
353 err = bus_dmamap_load(sc->sc_dmat, sc->ctrlpage_dmamap,
354 sc->ctrlpage, PAGE_SIZE, NULL, BUS_DMA_WAITOK);
355 }
356 if (err != 0) {
357 panic("%s: Cannot get DMA memory", sc->sc_dev.dv_xname);
358 }
359 sc->ctrlpage_dsaddr = sc->ctrlpage_dmamap->dm_segs[0].ds_addr;
360 bzero(sc->ctrlpage, PAGE_SIZE);
361
362 /* Set up pointers to start of each queue in kernel addr space.
363 * Each descriptor queue or status queue entry uses 2 words
364 */
365 sc->TXDQ = (u_int32_t *)sc->ctrlpage;
366 sc->TXDQ_cur = sc->TXDQ;
367 sc->TXDQ_avail = TX_QLEN - 1;
368 sc->TXStsQ = &sc->TXDQ[TX_QLEN * 2];
369 sc->TXStsQ_cur = sc->TXStsQ;
370 sc->RXDQ = &sc->TXStsQ[TX_QLEN];
371 sc->RXStsQ = &sc->RXDQ[RX_QLEN * 2];
372 sc->RXStsQ_cur = sc->RXStsQ;
373
374 /* Program each queue's start addr, cur addr, and len registers
375 * with the physical addresses.
376 */
377 addr = (caddr_t)sc->ctrlpage_dmamap->dm_segs[0].ds_addr;
378 EPE_WRITE(TXDQBAdd, (u_int32_t)addr);
379 EPE_WRITE(TXDQCurAdd, (u_int32_t)addr);
380 EPE_WRITE(TXDQBLen, TX_QLEN * 2 * sizeof(u_int32_t));
381
382 addr += (sc->TXStsQ - sc->TXDQ) * sizeof(u_int32_t);
383 EPE_WRITE(TXStsQBAdd, (u_int32_t)addr);
384 EPE_WRITE(TXStsQCurAdd, (u_int32_t)addr);
385 EPE_WRITE(TXStsQBLen, TX_QLEN * sizeof(u_int32_t));
386
387 addr += (sc->RXDQ - sc->TXStsQ) * sizeof(u_int32_t);
388 EPE_WRITE(RXDQBAdd, (u_int32_t)addr);
389 EPE_WRITE(RXDCurAdd, (u_int32_t)addr);
390 EPE_WRITE(RXDQBLen, RX_QLEN * 2 * sizeof(u_int32_t));
391
392 addr += (sc->RXStsQ - sc->RXDQ) * sizeof(u_int32_t);
393 EPE_WRITE(RXStsQBAdd, (u_int32_t)addr);
394 EPE_WRITE(RXStsQCurAdd, (u_int32_t)addr);
395 EPE_WRITE(RXStsQBLen, RX_QLEN * 2 * sizeof(u_int32_t));
396
397 /* Populate the RXDQ with mbufs */
398 for(i = 0; i < RX_QLEN; i++) {
399 struct mbuf *m;
400
401 bus_dmamap_create(sc->sc_dmat, MCLBYTES, TX_QLEN/4, MCLBYTES, 0,
402 BUS_DMA_WAITOK, &sc->rxq[i].m_dmamap);
403 MGETHDR(m, M_WAIT, MT_DATA);
404 MCLGET(m, M_WAIT);
405 sc->rxq[i].m = m;
406 bus_dmamap_load(sc->sc_dmat, sc->rxq[i].m_dmamap,
407 m->m_ext.ext_buf, MCLBYTES, NULL,
408 BUS_DMA_WAITOK);
409
410 sc->RXDQ[i * 2] = sc->rxq[i].m_dmamap->dm_segs[0].ds_addr;
411 sc->RXDQ[i * 2 + 1] = (i << 16) | MCLBYTES;
412 bus_dmamap_sync(sc->sc_dmat, sc->rxq[i].m_dmamap, 0,
413 MCLBYTES, BUS_DMASYNC_PREREAD);
414 }
415
416 for(i = 0; i < TX_QLEN; i++) {
417 bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
418 (BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW),
419 &sc->txq[i].m_dmamap);
420 sc->txq[i].m = NULL;
421 sc->TXDQ[i * 2 + 1] = (i << 16);
422 }
423
424 /* Divide HCLK by 32 for MDC clock */
425 if (device_cfdata(&sc->sc_dev)->cf_flags)
426 mdcdiv = device_cfdata(&sc->sc_dev)->cf_flags;
427 EPE_WRITE(SelfCtl, (SelfCtl_MDCDIV(mdcdiv)|SelfCtl_PSPRS));
428
429 sc->sc_mii.mii_ifp = ifp;
430 sc->sc_mii.mii_readreg = epe_mii_readreg;
431 sc->sc_mii.mii_writereg = epe_mii_writereg;
432 sc->sc_mii.mii_statchg = epe_statchg;
433 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, epe_mediachange,
434 epe_mediastatus);
435 mii_attach((struct device *)sc, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
436 MII_OFFSET_ANY, 0);
437 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
438
439 EPE_WRITE(BMCtl, BMCtl_RxEn|BMCtl_TxEn);
440 EPE_WRITE(IntEn, IntEn_REOFIE);
441 /* maximum valid max frame length */
442 EPE_WRITE(MaxFrmLen, (0x7ff << 16)|MHLEN);
443 /* wait for receiver ready */
444 while((EPE_READ(BMSts) & BMSts_RxAct) == 0);
445 /* enqueue the entries in RXStsQ and RXDQ */
446 CTRLPAGE_DMASYNC(0, sc->ctrlpage_dmamap->dm_mapsize,
447 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
448 EPE_WRITE(RXDEnq, RX_QLEN - 1);
449 EPE_WRITE(RXStsEnq, RX_QLEN - 1);
450
451 /*
452 * We can support 802.1Q VLAN-sized frames.
453 */
454 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
455
456 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
457 ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST;
458 ifp->if_ioctl = epe_ifioctl;
459 ifp->if_start = epe_ifstart;
460 ifp->if_watchdog = epe_ifwatchdog;
461 ifp->if_init = epe_ifinit;
462 ifp->if_stop = epe_ifstop;
463 ifp->if_timer = 0;
464 ifp->if_softc = sc;
465 IFQ_SET_READY(&ifp->if_snd);
466 if_attach(ifp);
467 ether_ifattach(ifp, (sc)->sc_enaddr);
468 }
469
470 static int
471 epe_mediachange(ifp)
472 struct ifnet *ifp;
473 {
474 if (ifp->if_flags & IFF_UP)
475 epe_ifinit(ifp);
476 return (0);
477 }
478
479 static void
480 epe_mediastatus(ifp, ifmr)
481 struct ifnet *ifp;
482 struct ifmediareq *ifmr;
483 {
484 struct epe_softc *sc = ifp->if_softc;
485
486 mii_pollstat(&sc->sc_mii);
487 ifmr->ifm_active = sc->sc_mii.mii_media_active;
488 ifmr->ifm_status = sc->sc_mii.mii_media_status;
489 }
490
491
492 int
493 epe_mii_readreg(self, phy, reg)
494 struct device *self;
495 int phy, reg;
496 {
497 u_int32_t d, v;
498 struct epe_softc *sc;
499
500 sc = (struct epe_softc *)self;
501 d = EPE_READ(SelfCtl);
502 EPE_WRITE(SelfCtl, d & ~SelfCtl_PSPRS); /* no preamble suppress */
503 EPE_WRITE(MIICmd, (MIICmd_READ | (phy << 5) | reg));
504 while(EPE_READ(MIISts) & MIISts_BUSY);
505 v = EPE_READ(MIIData);
506 EPE_WRITE(SelfCtl, d); /* restore old value */
507 return v;
508 }
509
510 void
511 epe_mii_writereg(self, phy, reg, val)
512 struct device *self;
513 int phy, reg, val;
514 {
515 struct epe_softc *sc;
516 u_int32_t d;
517
518 sc = (struct epe_softc *)self;
519 d = EPE_READ(SelfCtl);
520 EPE_WRITE(SelfCtl, d & ~SelfCtl_PSPRS); /* no preamble suppress */
521 EPE_WRITE(MIIData, val);
522 EPE_WRITE(MIICmd, (MIICmd_WRITE | (phy << 5) | reg));
523 while(EPE_READ(MIISts) & MIISts_BUSY);
524 EPE_WRITE(SelfCtl, d); /* restore old value */
525 }
526
527
528 void
529 epe_statchg(self)
530 struct device *self;
531 {
532 struct epe_softc *sc = (struct epe_softc *)self;
533 u_int32_t reg;
534
535 /*
536 * We must keep the MAC and the PHY in sync as
537 * to the status of full-duplex!
538 */
539 reg = EPE_READ(TestCtl);
540 if (sc->sc_mii.mii_media_active & IFM_FDX)
541 reg |= TestCtl_MFDX;
542 else
543 reg &= ~TestCtl_MFDX;
544 EPE_WRITE(TestCtl, reg);
545 }
546
547 void
548 epe_tick(arg)
549 void *arg;
550 {
551 struct epe_softc* sc = (struct epe_softc *)arg;
552 struct ifnet * ifp = &sc->sc_ec.ec_if;
553 int s;
554 u_int32_t misses;
555
556 ifp->if_collisions += EPE_READ(TXCollCnt);
557 /* These misses are ok, they will happen if the RAM/CPU can't keep up */
558 misses = EPE_READ(RXMissCnt);
559 if (misses > 0)
560 printf("%s: %d rx misses\n", sc->sc_dev.dv_xname, misses);
561
562 s = splnet();
563 if (epe_gctx(sc) > 0 && IFQ_IS_EMPTY(&ifp->if_snd) == 0) {
564 epe_ifstart(ifp);
565 }
566 splx(s);
567
568 mii_tick(&sc->sc_mii);
569 callout_reset(&sc->epe_tick_ch, hz, epe_tick, sc);
570 }
571
572
573 static int
574 epe_ifioctl(ifp, cmd, data)
575 struct ifnet *ifp;
576 u_long cmd;
577 caddr_t data;
578 {
579 struct epe_softc *sc = ifp->if_softc;
580 struct ifreq *ifr = (struct ifreq *)data;
581 int s, error;
582
583 s = splnet();
584 switch(cmd) {
585 case SIOCSIFMEDIA:
586 case SIOCGIFMEDIA:
587 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
588 break;
589 default:
590 error = ether_ioctl(ifp, cmd, data);
591 if (error == ENETRESET) {
592 if (ifp->if_flags & IFF_RUNNING)
593 epe_setaddr(ifp);
594 error = 0;
595 }
596 }
597 splx(s);
598 return error;
599 }
600
601 static void
602 epe_ifstart(ifp)
603 struct ifnet *ifp;
604 {
605 struct epe_softc *sc = (struct epe_softc *)ifp->if_softc;
606 struct mbuf *m;
607 bus_dma_segment_t *segs;
608 int s, bi, err, nsegs, ndq;
609
610 s = splnet();
611 start:
612 ndq = 0;
613 if (sc->TXDQ_avail == 0) {
614 if (epe_gctx(sc) == 0) {
615 /* Enable End-Of-TX-Chain interrupt */
616 EPE_WRITE(IntEn, IntEn_REOFIE|IntEn_ECIE);
617 ifp->if_flags |= IFF_OACTIVE;
618 ifp->if_timer = 10;
619 splx(s);
620 return;
621 }
622 }
623
624 bi = sc->TXDQ_cur - sc->TXDQ;
625
626 IFQ_POLL(&ifp->if_snd, m);
627 if (m == NULL) {
628 splx(s);
629 return;
630 }
631 more:
632 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, sc->txq[bi].m_dmamap, m,
633 BUS_DMA_NOWAIT)) ||
634 sc->txq[bi].m_dmamap->dm_segs[0].ds_addr & 0x3 ||
635 sc->txq[bi].m_dmamap->dm_nsegs > (sc->TXDQ_avail - ndq)) {
636 /* Copy entire mbuf chain to new and 32-bit aligned storage */
637 struct mbuf *mn;
638
639 if (err == 0)
640 bus_dmamap_unload(sc->sc_dmat, sc->txq[bi].m_dmamap);
641
642 MGETHDR(mn, M_DONTWAIT, MT_DATA);
643 if (mn == NULL) goto stop;
644 if (m->m_pkthdr.len > (MHLEN & (~0x3))) {
645 MCLGET(mn, M_DONTWAIT);
646 if ((mn->m_flags & M_EXT) == 0) {
647 m_freem(mn);
648 goto stop;
649 }
650 }
651 mn->m_data = (caddr_t)(((u_int32_t)mn->m_data + 0x3) & (~0x3));
652 m_copydata(m, 0, m->m_pkthdr.len, mtod(mn, caddr_t));
653 mn->m_pkthdr.len = mn->m_len = m->m_pkthdr.len;
654 IFQ_DEQUEUE(&ifp->if_snd, m);
655 m_freem(m);
656 m = mn;
657 bus_dmamap_load_mbuf(sc->sc_dmat, sc->txq[bi].m_dmamap, m,
658 BUS_DMA_NOWAIT);
659 } else {
660 IFQ_DEQUEUE(&ifp->if_snd, m);
661 }
662
663 #if NBPFILTER > 0
664 if (ifp->if_bpf)
665 bpf_mtap(ifp->if_bpf, m);
666 #endif /* NBPFILTER > 0 */
667
668 nsegs = sc->txq[bi].m_dmamap->dm_nsegs;
669 segs = sc->txq[bi].m_dmamap->dm_segs;
670 bus_dmamap_sync(sc->sc_dmat, sc->txq[bi].m_dmamap, 0,
671 sc->txq[bi].m_dmamap->dm_mapsize,
672 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
673
674 /* XXX: This driver hasn't been tested w/nsegs > 1 */
675 while (nsegs > 0) {
676 nsegs--;
677 sc->txq[bi].m = m;
678 sc->TXDQ[bi * 2] = segs->ds_addr;
679 if (nsegs == 0)
680 sc->TXDQ[bi * 2 + 1] = segs->ds_len | (bi << 16) |
681 (1 << 31);
682 else
683 sc->TXDQ[bi * 2 + 1] = segs->ds_len | (bi << 16);
684 segs++;
685 bi = (bi + 1) % TX_QLEN;
686 ndq++;
687 }
688
689
690 /*
691 * Enqueue another. Don't do more than half the available
692 * descriptors before telling the MAC about them
693 */
694 if ((sc->TXDQ_avail - ndq) > 0 && ndq < TX_QLEN / 2) {
695 IFQ_POLL(&ifp->if_snd, m);
696 if (m != NULL) {
697 goto more;
698 }
699 }
700 stop:
701 if (ndq > 0) {
702 sc->TXDQ_avail -= ndq;
703 sc->TXDQ_cur = &sc->TXDQ[bi];
704 CTRLPAGE_DMASYNC(0, TX_QLEN * 2 * sizeof(u_int32_t),
705 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
706 EPE_WRITE(TXDEnq, ndq);
707 }
708
709 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
710 goto start;
711
712 splx(s);
713 return;
714 }
715
716 static void
717 epe_ifwatchdog(ifp)
718 struct ifnet *ifp;
719 {
720 struct epe_softc *sc = (struct epe_softc *)ifp->if_softc;
721
722 if ((ifp->if_flags & IFF_RUNNING) == 0)
723 return;
724 printf("%s: device timeout, BMCtl = 0x%08x, BMSts = 0x%08x\n",
725 sc->sc_dev.dv_xname, EPE_READ(BMCtl), EPE_READ(BMSts));
726 }
727
728 static int
729 epe_ifinit(ifp)
730 struct ifnet *ifp;
731 {
732 struct epe_softc *sc = ifp->if_softc;
733 int s = splnet();
734
735 callout_stop(&sc->epe_tick_ch);
736 EPE_WRITE(RXCtl, RXCtl_IA0|RXCtl_BA|RXCtl_RCRCA|RXCtl_SRxON);
737 EPE_WRITE(TXCtl, TXCtl_STxON);
738 EPE_WRITE(GIIntMsk, GIIntMsk_INT); /* start interrupting */
739 mii_mediachg(&sc->sc_mii);
740 callout_reset(&sc->epe_tick_ch, hz, epe_tick, sc);
741 ifp->if_flags |= IFF_RUNNING;
742 splx(s);
743 return 0;
744 }
745
746 static void
747 epe_ifstop(ifp, disable)
748 struct ifnet *ifp;
749 int disable;
750 {
751 struct epe_softc *sc = ifp->if_softc;
752
753
754 EPE_WRITE(RXCtl, 0);
755 EPE_WRITE(TXCtl, 0);
756 EPE_WRITE(GIIntMsk, 0);
757 callout_stop(&sc->epe_tick_ch);
758
759 /* Down the MII. */
760 mii_down(&sc->sc_mii);
761
762 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
763 ifp->if_timer = 0;
764 sc->sc_mii.mii_media_status &= ~IFM_ACTIVE;
765 }
766
767 static void
768 epe_setaddr(ifp)
769 struct ifnet *ifp;
770 {
771 struct epe_softc *sc = ifp->if_softc;
772 struct ethercom *ac = &sc->sc_ec;
773 struct ether_multi *enm;
774 struct ether_multistep step;
775 u_int8_t ias[2][ETHER_ADDR_LEN];
776 u_int32_t h, nma = 0, hashes[2] = { 0, 0 };
777 u_int32_t rxctl = EPE_READ(RXCtl);
778
779 /* disable receiver temporarily */
780 EPE_WRITE(RXCtl, rxctl & ~RXCtl_SRxON);
781
782 rxctl &= ~(RXCtl_MA|RXCtl_PA|RXCtl_IA2|RXCtl_IA3);
783
784 if (ifp->if_flags & IFF_PROMISC) {
785 rxctl |= RXCtl_PA;
786 }
787
788 ifp->if_flags &= ~IFF_ALLMULTI;
789
790 ETHER_FIRST_MULTI(step, ac, enm);
791 while (enm != NULL) {
792 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
793 /*
794 * We must listen to a range of multicast addresses.
795 * For now, just accept all multicasts, rather than
796 * trying to set only those filter bits needed to match
797 * the range. (At this time, the only use of address
798 * ranges is for IP multicast routing, for which the
799 * range is big enough to require all bits set.)
800 */
801 rxctl &= ~(RXCtl_IA2|RXCtl_IA3);
802 rxctl |= RXCtl_MA;
803 hashes[0] = 0xffffffffUL;
804 hashes[1] = 0xffffffffUL;
805 ifp->if_flags |= IFF_ALLMULTI;
806 break;
807 }
808
809 if (nma < 2) {
810 /* We can program 2 perfect address filters for mcast */
811 memcpy(ias[nma], enm->enm_addrlo, ETHER_ADDR_LEN);
812 rxctl |= (1 << (nma + 2));
813 } else {
814 /*
815 * XXX: Datasheet is not very clear here, I'm not sure
816 * if I'm doing this right. --joff
817 */
818 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
819
820 /* Just want the 6 most-significant bits. */
821 h = h >> 26;
822
823 hashes[ h / 32 ] |= (1 << (h % 32));
824 rxctl |= RXCtl_MA;
825 }
826 ETHER_NEXT_MULTI(step, enm);
827 nma++;
828 }
829
830 EPE_WRITE(AFP, 0);
831 bus_space_write_region_1(sc->sc_iot, sc->sc_ioh, EPE_IndAd,
832 sc->sc_enaddr, ETHER_ADDR_LEN);
833 if (rxctl & RXCtl_IA2) {
834 EPE_WRITE(AFP, 2);
835 bus_space_write_region_1(sc->sc_iot, sc->sc_ioh, EPE_IndAd,
836 ias[0], ETHER_ADDR_LEN);
837 }
838 if (rxctl & RXCtl_IA3) {
839 EPE_WRITE(AFP, 3);
840 bus_space_write_region_1(sc->sc_iot, sc->sc_ioh, EPE_IndAd,
841 ias[1], ETHER_ADDR_LEN);
842 }
843 if (hashes[0] != 0 && hashes[1] != 0) {
844 EPE_WRITE(AFP, 7);
845 EPE_WRITE(HashTbl, hashes[0]);
846 EPE_WRITE(HashTbl + 4, hashes[1]);
847 }
848 EPE_WRITE(RXCtl, rxctl);
849 }
850