if_temac.c revision 1.9 1 /* $NetBSD: if_temac.c,v 1.9 2012/07/22 14:32:51 matt Exp $ */
2
3 /*
4 * Copyright (c) 2006 Jachym Holecek
5 * All rights reserved.
6 *
7 * Written for DFC Design, s.r.o.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 *
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Driver for Xilinx LocalLink TEMAC as wired on the GSRD platform.
34 *
35 * TODO:
36 * - Optimize
37 * - Checksum offload
38 * - Address filters
39 * - Support jumbo frames
40 */
41
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: if_temac.c,v 1.9 2012/07/22 14:32:51 matt Exp $");
44
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/mbuf.h>
49 #include <sys/kernel.h>
50 #include <sys/socket.h>
51 #include <sys/ioctl.h>
52 #include <sys/device.h>
53 #include <sys/bus.h>
54 #include <sys/cpu.h>
55
56 #include <uvm/uvm_extern.h>
57
58 #include <net/if.h>
59 #include <net/if_dl.h>
60 #include <net/if_media.h>
61 #include <net/if_ether.h>
62
63 #include <net/bpf.h>
64
65 #include <powerpc/ibm4xx/cpu.h>
66
67 #include <evbppc/virtex/idcr.h>
68 #include <evbppc/virtex/dev/xcvbusvar.h>
69 #include <evbppc/virtex/dev/cdmacreg.h>
70 #include <evbppc/virtex/dev/temacreg.h>
71 #include <evbppc/virtex/dev/temacvar.h>
72
73 #include <dev/mii/miivar.h>
74
75
76 /* This is outside of TEMAC's DCR window, we have to hardcode it... */
77 #define DCR_ETH_BASE 0x0030
78
79 #define TEMAC_REGDEBUG 0
80 #define TEMAC_RXDEBUG 0
81 #define TEMAC_TXDEBUG 0
82
83 #if TEMAC_RXDEBUG > 0 || TEMAC_TXDEBUG > 0
84 #define TEMAC_DEBUG 1
85 #else
86 #define TEMAC_DEBUG 0
87 #endif
88
89 #if TEMAC_REGDEBUG > 0
90 #define TRACEREG(arg) printf arg
91 #else
92 #define TRACEREG(arg) /* nop */
93 #endif
94
95 /* DMA control chains take up one (16KB) page. */
96 #define TEMAC_NTXDESC 256
97 #define TEMAC_NRXDESC 256
98
99 #define TEMAC_TXQLEN 64 /* Software Tx queue length */
100 #define TEMAC_NTXSEG 16 /* Maximum Tx segments per packet */
101
102 #define TEMAC_NRXSEG 1 /* Maximum Rx segments per packet */
103 #define TEMAC_RXPERIOD 1 /* Interrupt every N descriptors. */
104 #define TEMAC_RXTIMO_HZ 100 /* Rx reaper frequency */
105
106 /* Next Tx descriptor and descriptor's offset WRT sc_cdaddr. */
107 #define TEMAC_TXSINC(n, i) (((n) + TEMAC_TXQLEN + (i)) % TEMAC_TXQLEN)
108 #define TEMAC_TXINC(n, i) (((n) + TEMAC_NTXDESC + (i)) % TEMAC_NTXDESC)
109
110 #define TEMAC_TXSNEXT(n) TEMAC_TXSINC((n), 1)
111 #define TEMAC_TXNEXT(n) TEMAC_TXINC((n), 1)
112 #define TEMAC_TXDOFF(n) (offsetof(struct temac_control, cd_txdesc) + \
113 (n) * sizeof(struct cdmac_descr))
114
115 /* Next Rx descriptor and descriptor's offset WRT sc_cdaddr. */
116 #define TEMAC_RXINC(n, i) (((n) + TEMAC_NRXDESC + (i)) % TEMAC_NRXDESC)
117 #define TEMAC_RXNEXT(n) TEMAC_RXINC((n), 1)
118 #define TEMAC_RXDOFF(n) (offsetof(struct temac_control, cd_rxdesc) + \
119 (n) * sizeof(struct cdmac_descr))
120 #define TEMAC_ISINTR(i) (((i) % TEMAC_RXPERIOD) == 0)
121 #define TEMAC_ISLAST(i) ((i) == (TEMAC_NRXDESC - 1))
122
123
124 struct temac_control {
125 struct cdmac_descr cd_txdesc[TEMAC_NTXDESC];
126 struct cdmac_descr cd_rxdesc[TEMAC_NRXDESC];
127 };
128
129 struct temac_txsoft {
130 bus_dmamap_t txs_dmap;
131 struct mbuf *txs_mbuf;
132 int txs_last;
133 };
134
135 struct temac_rxsoft {
136 bus_dmamap_t rxs_dmap;
137 struct mbuf *rxs_mbuf;
138 };
139
140 struct temac_softc {
141 device_t sc_dev;
142 struct ethercom sc_ec;
143 #define sc_if sc_ec.ec_if
144
145 /* Peripheral registers */
146 bus_space_tag_t sc_iot;
147 bus_space_handle_t sc_ioh;
148
149 /* CDMAC channel registers */
150 bus_space_tag_t sc_dma_rxt;
151 bus_space_handle_t sc_dma_rxh; /* Rx channel */
152 bus_space_handle_t sc_dma_rsh; /* Rx status */
153
154 bus_space_tag_t sc_dma_txt;
155 bus_space_handle_t sc_dma_txh; /* Tx channel */
156 bus_space_handle_t sc_dma_tsh; /* Tx status */
157
158 struct temac_txsoft sc_txsoft[TEMAC_TXQLEN];
159 struct temac_rxsoft sc_rxsoft[TEMAC_NRXDESC];
160
161 struct callout sc_rx_timo;
162 struct callout sc_mii_tick;
163 struct mii_data sc_mii;
164
165 bus_dmamap_t sc_control_dmap;
166 #define sc_cdaddr sc_control_dmap->dm_segs[0].ds_addr
167
168 struct temac_control *sc_control_data;
169 #define sc_rxdescs sc_control_data->cd_rxdesc
170 #define sc_txdescs sc_control_data->cd_txdesc
171
172 int sc_txbusy;
173
174 int sc_txfree;
175 int sc_txcur;
176 int sc_txreap;
177
178 int sc_rxreap;
179
180 int sc_txsfree;
181 int sc_txscur;
182 int sc_txsreap;
183
184 int sc_dead; /* Rx/Tx DMA error (fatal) */
185 int sc_rx_drained;
186
187 int sc_rx_chan;
188 int sc_tx_chan;
189
190 void *sc_sdhook;
191 void *sc_rx_ih;
192 void *sc_tx_ih;
193
194 bus_dma_tag_t sc_dmat;
195 };
196
197 /* Device interface. */
198 static void temac_attach(device_t, device_t, void *);
199
200 /* Ifnet interface. */
201 static int temac_init(struct ifnet *);
202 static int temac_ioctl(struct ifnet *, u_long, void *);
203 static void temac_start(struct ifnet *);
204 static void temac_stop(struct ifnet *, int);
205
206 /* Media management. */
207 static int temac_mii_readreg(device_t, int, int);
208 static void temac_mii_statchg(struct ifnet *);
209 static void temac_mii_tick(void *);
210 static void temac_mii_writereg(device_t, int, int, int);
211
212 /* Indirect hooks. */
213 static void temac_shutdown(void *);
214 static void temac_rx_intr(void *);
215 static void temac_tx_intr(void *);
216
217 /* Tools. */
218 static inline void temac_rxcdsync(struct temac_softc *, int, int, int);
219 static inline void temac_txcdsync(struct temac_softc *, int, int, int);
220 static void temac_txreap(struct temac_softc *);
221 static void temac_rxreap(struct temac_softc *);
222 static int temac_rxalloc(struct temac_softc *, int, int);
223 static void temac_rxtimo(void *);
224 static void temac_rxdrain(struct temac_softc *);
225 static void temac_reset(struct temac_softc *);
226 static void temac_txkick(struct temac_softc *);
227
228 /* Register access. */
229 static inline void gmi_write_8(uint32_t, uint32_t, uint32_t);
230 static inline void gmi_write_4(uint32_t, uint32_t);
231 static inline void gmi_read_8(uint32_t, uint32_t *, uint32_t *);
232 static inline uint32_t gmi_read_4(uint32_t);
233 static inline void hif_wait_stat(uint32_t);
234
235 #define cdmac_rx_stat(sc) \
236 bus_space_read_4((sc)->sc_dma_rxt, (sc)->sc_dma_rsh, 0 /* XXX hack */)
237
238 #define cdmac_rx_reset(sc) \
239 bus_space_write_4((sc)->sc_dma_rxt, (sc)->sc_dma_rsh, 0, CDMAC_STAT_RESET)
240
241 #define cdmac_rx_start(sc, val) \
242 bus_space_write_4((sc)->sc_dma_rxt, (sc)->sc_dma_rxh, CDMAC_CURDESC, (val))
243
244 #define cdmac_tx_stat(sc) \
245 bus_space_read_4((sc)->sc_dma_txt, (sc)->sc_dma_tsh, 0 /* XXX hack */)
246
247 #define cdmac_tx_reset(sc) \
248 bus_space_write_4((sc)->sc_dma_txt, (sc)->sc_dma_tsh, 0, CDMAC_STAT_RESET)
249
250 #define cdmac_tx_start(sc, val) \
251 bus_space_write_4((sc)->sc_dma_txt, (sc)->sc_dma_txh, CDMAC_CURDESC, (val))
252
253
254 CFATTACH_DECL_NEW(temac, sizeof(struct temac_softc),
255 xcvbus_child_match, temac_attach, NULL, NULL);
256
257
258 /*
259 * Private bus utilities.
260 */
261 static inline void
262 hif_wait_stat(uint32_t mask)
263 {
264 int i = 0;
265
266 while (mask != (mfidcr(IDCR_HIF_STAT) & mask)) {
267 if (i++ > 100) {
268 printf("%s: timeout waiting for 0x%08x\n",
269 __func__, mask);
270 break;
271 }
272 delay(5);
273 }
274
275 TRACEREG(("%s: stat %#08x loops %d\n", __func__, mask, i));
276 }
277
278 static inline void
279 gmi_write_4(uint32_t addr, uint32_t lo)
280 {
281 mtidcr(IDCR_HIF_ARG0, lo);
282 mtidcr(IDCR_HIF_CTRL, (addr & HIF_CTRL_GMIADDR) | HIF_CTRL_WRITE);
283 hif_wait_stat(HIF_STAT_GMIWR);
284
285 TRACEREG(("%s: %#08x <- %#08x\n", __func__, addr, lo));
286 }
287
288 static inline void
289 gmi_write_8(uint32_t addr, uint32_t lo, uint32_t hi)
290 {
291 mtidcr(IDCR_HIF_ARG1, hi);
292 gmi_write_4(addr, lo);
293 }
294
295 static inline void
296 gmi_read_8(uint32_t addr, uint32_t *lo, uint32_t *hi)
297 {
298 *lo = gmi_read_4(addr);
299 *hi = mfidcr(IDCR_HIF_ARG1);
300 }
301
302 static inline uint32_t
303 gmi_read_4(uint32_t addr)
304 {
305 uint32_t res;
306
307 mtidcr(IDCR_HIF_CTRL, addr & HIF_CTRL_GMIADDR);
308 hif_wait_stat(HIF_STAT_GMIRR);
309
310 res = mfidcr(IDCR_HIF_ARG0);
311 TRACEREG(("%s: %#08x -> %#08x\n", __func__, addr, res));
312 return (res);
313 }
314
315 /*
316 * Generic device.
317 */
318 static void
319 temac_attach(device_t parent, device_t self, void *aux)
320 {
321 struct xcvbus_attach_args *vaa = aux;
322 struct ll_dmac *rx = vaa->vaa_rx_dmac;
323 struct ll_dmac *tx = vaa->vaa_tx_dmac;
324 struct temac_softc *sc = device_private(self);
325 struct ifnet *ifp = &sc->sc_if;
326 struct mii_data *mii = &sc->sc_mii;
327 uint8_t enaddr[ETHER_ADDR_LEN];
328 bus_dma_segment_t seg;
329 int error, nseg, i;
330 const char * const xname = device_xname(self);
331
332 aprint_normal(": TEMAC\n"); /* XXX will be LL_TEMAC, PLB_TEMAC */
333
334 KASSERT(rx);
335 KASSERT(tx);
336
337 sc->sc_dev = self;
338 sc->sc_dmat = vaa->vaa_dmat;
339 sc->sc_dead = 0;
340 sc->sc_rx_drained = 1;
341 sc->sc_txbusy = 0;
342 sc->sc_iot = vaa->vaa_iot;
343 sc->sc_dma_rxt = rx->dmac_iot;
344 sc->sc_dma_txt = tx->dmac_iot;
345
346 /*
347 * Map HIF and receive/transmit dmac registers.
348 */
349 if ((error = bus_space_map(vaa->vaa_iot, vaa->vaa_addr, TEMAC_SIZE, 0,
350 &sc->sc_ioh)) != 0) {
351 aprint_error_dev(self, "could not map registers\n");
352 goto fail_0;
353 }
354
355 if ((error = bus_space_map(sc->sc_dma_rxt, rx->dmac_ctrl_addr,
356 CDMAC_CTRL_SIZE, 0, &sc->sc_dma_rxh)) != 0) {
357 aprint_error_dev(self, "could not map Rx control registers\n");
358 goto fail_0;
359 }
360 if ((error = bus_space_map(sc->sc_dma_rxt, rx->dmac_stat_addr,
361 CDMAC_STAT_SIZE, 0, &sc->sc_dma_rsh)) != 0) {
362 aprint_error_dev(self, "could not map Rx status register\n");
363 goto fail_0;
364 }
365
366 if ((error = bus_space_map(sc->sc_dma_txt, tx->dmac_ctrl_addr,
367 CDMAC_CTRL_SIZE, 0, &sc->sc_dma_txh)) != 0) {
368 aprint_error_dev(self, "could not map Tx control registers\n");
369 goto fail_0;
370 }
371 if ((error = bus_space_map(sc->sc_dma_txt, tx->dmac_stat_addr,
372 CDMAC_STAT_SIZE, 0, &sc->sc_dma_tsh)) != 0) {
373 aprint_error_dev(self, "could not map Tx status register\n");
374 goto fail_0;
375 }
376
377 /*
378 * Allocate and initialize DMA control chains.
379 */
380 if ((error = bus_dmamem_alloc(sc->sc_dmat,
381 sizeof(struct temac_control), 8, 0, &seg, 1, &nseg, 0)) != 0) {
382 aprint_error_dev(self, "could not allocate control data\n");
383 goto fail_0;
384 }
385
386 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
387 sizeof(struct temac_control),
388 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
389 aprint_error_dev(self, "could not map control data\n");
390 goto fail_1;
391 }
392
393 if ((error = bus_dmamap_create(sc->sc_dmat,
394 sizeof(struct temac_control), 1,
395 sizeof(struct temac_control), 0, 0, &sc->sc_control_dmap)) != 0) {
396 aprint_error_dev(self,
397 "could not create control data DMA map\n");
398 goto fail_2;
399 }
400
401 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_control_dmap,
402 sc->sc_control_data, sizeof(struct temac_control), NULL, 0)) != 0) {
403 aprint_error_dev(self, "could not load control data DMA map\n");
404 goto fail_3;
405 }
406
407 /*
408 * Link descriptor chains.
409 */
410 memset(sc->sc_control_data, 0, sizeof(struct temac_control));
411
412 for (i = 0; i < TEMAC_NTXDESC; i++) {
413 sc->sc_txdescs[i].desc_next = sc->sc_cdaddr +
414 TEMAC_TXDOFF(TEMAC_TXNEXT(i));
415 sc->sc_txdescs[i].desc_stat = CDMAC_STAT_DONE;
416 }
417 for (i = 0; i < TEMAC_NRXDESC; i++) {
418 sc->sc_rxdescs[i].desc_next = sc->sc_cdaddr +
419 TEMAC_RXDOFF(TEMAC_RXNEXT(i));
420 sc->sc_txdescs[i].desc_stat = CDMAC_STAT_DONE;
421 }
422
423 bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap, 0,
424 sizeof(struct temac_control),
425 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
426
427 /*
428 * Initialize software state for transmit/receive jobs.
429 */
430 for (i = 0; i < TEMAC_TXQLEN; i++) {
431 if ((error = bus_dmamap_create(sc->sc_dmat,
432 ETHER_MAX_LEN_JUMBO, TEMAC_NTXSEG, ETHER_MAX_LEN_JUMBO,
433 0, 0, &sc->sc_txsoft[i].txs_dmap)) != 0) {
434 aprint_error_dev(self,
435 "could not create Tx DMA map %d\n",
436 i);
437 goto fail_4;
438 }
439 sc->sc_txsoft[i].txs_mbuf = NULL;
440 sc->sc_txsoft[i].txs_last = 0;
441 }
442
443 for (i = 0; i < TEMAC_NRXDESC; i++) {
444 if ((error = bus_dmamap_create(sc->sc_dmat,
445 MCLBYTES, TEMAC_NRXSEG, MCLBYTES, 0, 0,
446 &sc->sc_rxsoft[i].rxs_dmap)) != 0) {
447 aprint_error_dev(self,
448 "could not create Rx DMA map %d\n", i);
449 goto fail_5;
450 }
451 sc->sc_rxsoft[i].rxs_mbuf = NULL;
452 }
453
454 /*
455 * Setup transfer interrupt handlers.
456 */
457 error = ENOMEM;
458
459 sc->sc_rx_ih = ll_dmac_intr_establish(rx->dmac_chan,
460 temac_rx_intr, sc);
461 if (sc->sc_rx_ih == NULL) {
462 aprint_error_dev(self, "could not establish Rx interrupt\n");
463 goto fail_5;
464 }
465
466 sc->sc_tx_ih = ll_dmac_intr_establish(tx->dmac_chan,
467 temac_tx_intr, sc);
468 if (sc->sc_tx_ih == NULL) {
469 aprint_error_dev(self, "could not establish Tx interrupt\n");
470 goto fail_6;
471 }
472
473 /* XXXFreza: faked, should read unicast address filter. */
474 enaddr[0] = 0x00;
475 enaddr[1] = 0x11;
476 enaddr[2] = 0x17;
477 enaddr[3] = 0xff;
478 enaddr[4] = 0xff;
479 enaddr[5] = 0x01;
480
481 /*
482 * Initialize the TEMAC.
483 */
484 temac_reset(sc);
485
486 /* Configure MDIO link. */
487 gmi_write_4(TEMAC_GMI_MGMTCF, GMI_MGMT_CLKDIV_100MHz | GMI_MGMT_MDIO);
488
489 /* Initialize PHY. */
490 mii->mii_ifp = ifp;
491 mii->mii_readreg = temac_mii_readreg;
492 mii->mii_writereg = temac_mii_writereg;
493 mii->mii_statchg = temac_mii_statchg;
494 sc->sc_ec.ec_mii = mii;
495 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
496
497 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
498 MII_OFFSET_ANY, 0);
499 if (LIST_FIRST(&mii->mii_phys) == NULL) {
500 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
501 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
502 } else {
503 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
504 }
505
506 /* Hold PHY in reset. */
507 bus_space_write_4(sc->sc_iot, sc->sc_ioh, TEMAC_RESET, TEMAC_RESET_PHY);
508
509 /* Reset EMAC. */
510 bus_space_write_4(sc->sc_iot, sc->sc_ioh, TEMAC_RESET,
511 TEMAC_RESET_EMAC);
512 delay(10000);
513
514 /* Reset peripheral, awakes PHY and EMAC. */
515 bus_space_write_4(sc->sc_iot, sc->sc_ioh, TEMAC_RESET,
516 TEMAC_RESET_PERIPH);
517 delay(40000);
518
519 /* (Re-)Configure MDIO link. */
520 gmi_write_4(TEMAC_GMI_MGMTCF, GMI_MGMT_CLKDIV_100MHz | GMI_MGMT_MDIO);
521
522 /*
523 * Hook up with network stack.
524 */
525 strcpy(ifp->if_xname, xname);
526 ifp->if_softc = sc;
527 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
528 ifp->if_ioctl = temac_ioctl;
529 ifp->if_start = temac_start;
530 ifp->if_init = temac_init;
531 ifp->if_stop = temac_stop;
532 ifp->if_watchdog = NULL;
533 IFQ_SET_READY(&ifp->if_snd);
534 IFQ_SET_MAXLEN(&ifp->if_snd, TEMAC_TXQLEN);
535
536 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
537
538 if_attach(ifp);
539 ether_ifattach(ifp, enaddr);
540
541 sc->sc_sdhook = shutdownhook_establish(temac_shutdown, sc);
542 if (sc->sc_sdhook == NULL)
543 aprint_error_dev(self,
544 "WARNING: unable to establish shutdown hook\n");
545
546 callout_setfunc(&sc->sc_mii_tick, temac_mii_tick, sc);
547 callout_setfunc(&sc->sc_rx_timo, temac_rxtimo, sc);
548
549 return ;
550
551 fail_6:
552 ll_dmac_intr_disestablish(rx->dmac_chan, sc->sc_rx_ih);
553 i = TEMAC_NRXDESC;
554 fail_5:
555 for (--i; i >= 0; i--)
556 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxsoft[i].rxs_dmap);
557 i = TEMAC_TXQLEN;
558 fail_4:
559 for (--i; i >= 0; i--)
560 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txsoft[i].txs_dmap);
561 fail_3:
562 bus_dmamap_destroy(sc->sc_dmat, sc->sc_control_dmap);
563 fail_2:
564 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
565 sizeof(struct temac_control));
566 fail_1:
567 bus_dmamem_free(sc->sc_dmat, &seg, nseg);
568 fail_0:
569 aprint_error_dev(self, "error = %d\n", error);
570 }
571
572 /*
573 * Network device.
574 */
575 static int
576 temac_init(struct ifnet *ifp)
577 {
578 struct temac_softc *sc = (struct temac_softc *)ifp->if_softc;
579 uint32_t rcr, tcr;
580 int i, error;
581
582 /* Reset DMA channels. */
583 cdmac_tx_reset(sc);
584 cdmac_rx_reset(sc);
585
586 /* Set current media. */
587 if ((error = ether_mediachange(ifp)) != 0)
588 return error;
589
590 callout_schedule(&sc->sc_mii_tick, hz);
591
592 /* Enable EMAC engine. */
593 rcr = (gmi_read_4(TEMAC_GMI_RXCF1) | GMI_RX_ENABLE) &
594 ~(GMI_RX_JUMBO | GMI_RX_FCS);
595 gmi_write_4(TEMAC_GMI_RXCF1, rcr);
596
597 tcr = (gmi_read_4(TEMAC_GMI_TXCF) | GMI_TX_ENABLE) &
598 ~(GMI_TX_JUMBO | GMI_TX_FCS);
599 gmi_write_4(TEMAC_GMI_TXCF, tcr);
600
601 /* XXXFreza: Force promiscuous mode, for now. */
602 gmi_write_4(TEMAC_GMI_AFM, GMI_AFM_PROMISC);
603 ifp->if_flags |= IFF_PROMISC;
604
605 /* Rx/Tx queues are drained -- either from attach() or stop(). */
606 sc->sc_txsfree = TEMAC_TXQLEN;
607 sc->sc_txsreap = 0;
608 sc->sc_txscur = 0;
609
610 sc->sc_txfree = TEMAC_NTXDESC;
611 sc->sc_txreap = 0;
612 sc->sc_txcur = 0;
613
614 sc->sc_rxreap = 0;
615
616 /* Allocate and map receive buffers. */
617 if (sc->sc_rx_drained) {
618 for (i = 0; i < TEMAC_NRXDESC; i++) {
619 if ((error = temac_rxalloc(sc, i, 1)) != 0) {
620 aprint_error_dev(sc->sc_dev,
621 "failed to allocate Rx descriptor %d\n",
622 i);
623 temac_rxdrain(sc);
624 return (error);
625 }
626 }
627 sc->sc_rx_drained = 0;
628
629 temac_rxcdsync(sc, 0, TEMAC_NRXDESC,
630 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
631 cdmac_rx_start(sc, sc->sc_cdaddr + TEMAC_RXDOFF(0));
632 }
633
634 ifp->if_flags |= IFF_RUNNING;
635 ifp->if_flags &= ~IFF_OACTIVE;
636
637 return (0);
638 }
639
640 static int
641 temac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
642 {
643 struct temac_softc *sc = (struct temac_softc *)ifp->if_softc;
644 int s, ret;
645
646 s = splnet();
647 if (sc->sc_dead)
648 ret = EIO;
649 else
650 ret = ether_ioctl(ifp, cmd, data);
651 splx(s);
652 return (ret);
653 }
654
655 static void
656 temac_start(struct ifnet *ifp)
657 {
658 struct temac_softc *sc = (struct temac_softc *)ifp->if_softc;
659 struct temac_txsoft *txs;
660 struct mbuf *m;
661 bus_dmamap_t dmap;
662 int error, head, nsegs, i;
663
664 nsegs = 0;
665 head = sc->sc_txcur;
666 txs = NULL; /* gcc */
667
668 if (sc->sc_dead)
669 return;
670
671 KASSERT(sc->sc_txfree >= 0);
672 KASSERT(sc->sc_txsfree >= 0);
673
674 /*
675 * Push mbufs into descriptor chain until we drain the interface
676 * queue or run out of descriptors. We'll mark the first segment
677 * as "done" in hope that we might put CDMAC interrupt above IPL_NET
678 * and have it start jobs & mark packets for GC preemtively for
679 * us -- creativity due to limitations in CDMAC transfer engine
680 * (it really consumes lists, not circular queues, AFAICS).
681 *
682 * We schedule one interrupt per Tx batch.
683 */
684 while (1) {
685 IFQ_POLL(&ifp->if_snd, m);
686 if (m == NULL)
687 break;
688
689 if (sc->sc_txsfree == 0) {
690 ifp->if_flags |= IFF_OACTIVE;
691 break;
692 }
693
694 txs = &sc->sc_txsoft[sc->sc_txscur];
695 dmap = txs->txs_dmap;
696
697 if (txs->txs_mbuf != NULL)
698 printf("FOO\n");
699 if (txs->txs_last)
700 printf("BAR\n");
701
702 if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m,
703 BUS_DMA_WRITE | BUS_DMA_NOWAIT)) != 0) {
704 if (error == EFBIG) {
705 aprint_error_dev(sc->sc_dev,
706 "Tx consumes too many segments, dropped\n");
707 IFQ_DEQUEUE(&ifp->if_snd, m);
708 m_freem(m);
709 continue;
710 } else {
711 aprint_debug_dev(sc->sc_dev,
712 "Tx stall due to resource shortage\n");
713 break;
714 }
715 }
716
717 /*
718 * If we're short on DMA descriptors, notify upper layers
719 * and leave this packet for later.
720 */
721 if (dmap->dm_nsegs > sc->sc_txfree) {
722 bus_dmamap_unload(sc->sc_dmat, dmap);
723 ifp->if_flags |= IFF_OACTIVE;
724 break;
725 }
726
727 IFQ_DEQUEUE(&ifp->if_snd, m);
728
729 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
730 BUS_DMASYNC_PREWRITE);
731 txs->txs_mbuf = m;
732
733 /*
734 * Map the packet into descriptor chain. XXX We'll want
735 * to fill checksum offload commands here.
736 *
737 * We would be in a race if we weren't blocking CDMAC intr
738 * at this point -- we need to be locked against txreap()
739 * because of dmasync ops.
740 */
741
742 temac_txcdsync(sc, sc->sc_txcur, dmap->dm_nsegs,
743 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
744
745 for (i = 0; i < dmap->dm_nsegs; i++) {
746 sc->sc_txdescs[sc->sc_txcur].desc_addr =
747 dmap->dm_segs[i].ds_addr;
748 sc->sc_txdescs[sc->sc_txcur].desc_size =
749 dmap->dm_segs[i].ds_len;
750 sc->sc_txdescs[sc->sc_txcur].desc_stat =
751 (i == 0 ? CDMAC_STAT_SOP : 0) |
752 (i == (dmap->dm_nsegs - 1) ? CDMAC_STAT_EOP : 0);
753
754 sc->sc_txcur = TEMAC_TXNEXT(sc->sc_txcur);
755 }
756
757 sc->sc_txfree -= dmap->dm_nsegs;
758 nsegs += dmap->dm_nsegs;
759
760 sc->sc_txscur = TEMAC_TXSNEXT(sc->sc_txscur);
761 sc->sc_txsfree--;
762 }
763
764 /* Get data running if we queued any. */
765 if (nsegs > 0) {
766 int tail = TEMAC_TXINC(sc->sc_txcur, -1);
767
768 /* Mark the last packet in this job. */
769 txs->txs_last = 1;
770
771 /* Mark the last descriptor in this job. */
772 sc->sc_txdescs[tail].desc_stat |= CDMAC_STAT_STOP |
773 CDMAC_STAT_INTR;
774 temac_txcdsync(sc, head, nsegs,
775 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
776
777 temac_txkick(sc);
778 #if TEMAC_TXDEBUG > 0
779 aprint_debug_dev(sc->sc_dev,
780 "start: txcur %03d -> %03d, nseg %03d\n",
781 head, sc->sc_txcur, nsegs);
782 #endif
783 }
784 }
785
786 static void
787 temac_stop(struct ifnet *ifp, int disable)
788 {
789 struct temac_softc *sc = (struct temac_softc *)ifp->if_softc;
790 struct temac_txsoft *txs;
791 int i;
792
793 #if TEMAC_DEBUG > 0
794 aprint_debug_dev(sc->sc_dev, "stop\n");
795 #endif
796
797 /* Down the MII. */
798 callout_stop(&sc->sc_mii_tick);
799 mii_down(&sc->sc_mii);
800
801 /* Stop the engine. */
802 temac_reset(sc);
803
804 /* Drain buffers queues (unconditionally). */
805 temac_rxdrain(sc);
806
807 for (i = 0; i < TEMAC_TXQLEN; i++) {
808 txs = &sc->sc_txsoft[i];
809
810 if (txs->txs_mbuf != NULL) {
811 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmap);
812 m_freem(txs->txs_mbuf);
813 txs->txs_mbuf = NULL;
814 txs->txs_last = 0;
815 }
816 }
817 sc->sc_txbusy = 0;
818
819 /* Acknowledge we're down. */
820 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
821 }
822
823 static int
824 temac_mii_readreg(device_t self, int phy, int reg)
825 {
826 mtidcr(IDCR_HIF_ARG0, (phy << 5) | reg);
827 mtidcr(IDCR_HIF_CTRL, TEMAC_GMI_MII_ADDR);
828 hif_wait_stat(HIF_STAT_MIIRR);
829
830 return (int)mfidcr(IDCR_HIF_ARG0);
831 }
832
833 static void
834 temac_mii_writereg(device_t self, int phy, int reg, int val)
835 {
836 mtidcr(IDCR_HIF_ARG0, val);
837 mtidcr(IDCR_HIF_CTRL, TEMAC_GMI_MII_WRVAL | HIF_CTRL_WRITE);
838 mtidcr(IDCR_HIF_ARG0, (phy << 5) | reg);
839 mtidcr(IDCR_HIF_CTRL, TEMAC_GMI_MII_ADDR | HIF_CTRL_WRITE);
840 hif_wait_stat(HIF_STAT_MIIWR);
841 }
842
843 static void
844 temac_mii_statchg(struct ifnet *ifp)
845 {
846 struct temac_softc *sc = ifp->if_softc;
847 uint32_t rcf, tcf, mmc;
848
849 /* Full/half duplex link. */
850 rcf = gmi_read_4(TEMAC_GMI_RXCF1);
851 tcf = gmi_read_4(TEMAC_GMI_TXCF);
852
853 if (sc->sc_mii.mii_media_active & IFM_FDX) {
854 gmi_write_4(TEMAC_GMI_RXCF1, rcf & ~GMI_RX_HDX);
855 gmi_write_4(TEMAC_GMI_TXCF, tcf & ~GMI_TX_HDX);
856 } else {
857 gmi_write_4(TEMAC_GMI_RXCF1, rcf | GMI_RX_HDX);
858 gmi_write_4(TEMAC_GMI_TXCF, tcf | GMI_TX_HDX);
859 }
860
861 /* Link speed. */
862 mmc = gmi_read_4(TEMAC_GMI_MMC) & ~GMI_MMC_SPEED_MASK;
863
864 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
865 case IFM_10_T:
866 /*
867 * XXXFreza: the GMAC is not happy with 10Mbit ethernet,
868 * although the documentation claims it's supported. Maybe
869 * it's just my equipment...
870 */
871 mmc |= GMI_MMC_SPEED_10;
872 break;
873 case IFM_100_TX:
874 mmc |= GMI_MMC_SPEED_100;
875 break;
876 case IFM_1000_T:
877 mmc |= GMI_MMC_SPEED_1000;
878 break;
879 }
880
881 gmi_write_4(TEMAC_GMI_MMC, mmc);
882 }
883
884 static void
885 temac_mii_tick(void *arg)
886 {
887 struct temac_softc *sc = (struct temac_softc *)arg;
888 int s;
889
890 if (!device_is_active(sc->sc_dev))
891 return;
892
893 s = splnet();
894 mii_tick(&sc->sc_mii);
895 splx(s);
896
897 callout_schedule(&sc->sc_mii_tick, hz);
898 }
899
900 /*
901 * External hooks.
902 */
903 static void
904 temac_shutdown(void *arg)
905 {
906 struct temac_softc *sc = (struct temac_softc *)arg;
907
908 temac_reset(sc);
909 }
910
911 static void
912 temac_tx_intr(void *arg)
913 {
914 struct temac_softc *sc = (struct temac_softc *)arg;
915 uint32_t stat;
916
917 /* XXX: We may need to splnet() here if cdmac(4) changes. */
918
919 if ((stat = cdmac_tx_stat(sc)) & CDMAC_STAT_ERROR) {
920 aprint_error_dev(sc->sc_dev,
921 "transmit DMA is toast (%#08x), halted!\n",
922 stat);
923
924 /* XXXFreza: how to signal this upstream? */
925 temac_stop(&sc->sc_if, 1);
926 sc->sc_dead = 1;
927 }
928
929 #if TEMAC_DEBUG > 0
930 aprint_debug_dev(sc->sc_dev, "tx intr 0x%08x\n", stat);
931 #endif
932 temac_txreap(sc);
933 }
934
935 static void
936 temac_rx_intr(void *arg)
937 {
938 struct temac_softc *sc = (struct temac_softc *)arg;
939 uint32_t stat;
940
941 /* XXX: We may need to splnet() here if cdmac(4) changes. */
942
943 if ((stat = cdmac_rx_stat(sc)) & CDMAC_STAT_ERROR) {
944 aprint_error_dev(sc->sc_dev,
945 "receive DMA is toast (%#08x), halted!\n",
946 stat);
947
948 /* XXXFreza: how to signal this upstream? */
949 temac_stop(&sc->sc_if, 1);
950 sc->sc_dead = 1;
951 }
952
953 #if TEMAC_DEBUG > 0
954 aprint_debug_dev(sc->sc_dev, "rx intr 0x%08x\n", stat);
955 #endif
956 temac_rxreap(sc);
957 }
958
959 /*
960 * Utils.
961 */
962 static inline void
963 temac_txcdsync(struct temac_softc *sc, int first, int cnt, int flag)
964 {
965 if ((first + cnt) > TEMAC_NTXDESC) {
966 bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap,
967 TEMAC_TXDOFF(first),
968 sizeof(struct cdmac_descr) * (TEMAC_NTXDESC - first),
969 flag);
970 cnt = (first + cnt) % TEMAC_NTXDESC;
971 first = 0;
972 }
973
974 bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap,
975 TEMAC_TXDOFF(first),
976 sizeof(struct cdmac_descr) * cnt,
977 flag);
978 }
979
980 static inline void
981 temac_rxcdsync(struct temac_softc *sc, int first, int cnt, int flag)
982 {
983 if ((first + cnt) > TEMAC_NRXDESC) {
984 bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap,
985 TEMAC_RXDOFF(first),
986 sizeof(struct cdmac_descr) * (TEMAC_NRXDESC - first),
987 flag);
988 cnt = (first + cnt) % TEMAC_NRXDESC;
989 first = 0;
990 }
991
992 bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap,
993 TEMAC_RXDOFF(first),
994 sizeof(struct cdmac_descr) * cnt,
995 flag);
996 }
997
998 static void
999 temac_txreap(struct temac_softc *sc)
1000 {
1001 struct temac_txsoft *txs;
1002 bus_dmamap_t dmap;
1003 int sent = 0;
1004
1005 /*
1006 * Transmit interrupts happen on the last descriptor of Tx jobs.
1007 * Hence, every time we're called (and we assume txintr is our
1008 * only caller!), we reap packets upto and including the one
1009 * marked as last-in-batch.
1010 *
1011 * XXX we rely on that we make EXACTLY one batch per intr, no more
1012 */
1013 while (sc->sc_txsfree != TEMAC_TXQLEN) {
1014 txs = &sc->sc_txsoft[sc->sc_txsreap];
1015 dmap = txs->txs_dmap;
1016
1017 sc->sc_txreap = TEMAC_TXINC(sc->sc_txreap, dmap->dm_nsegs);
1018 sc->sc_txfree += dmap->dm_nsegs;
1019
1020 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmap);
1021 m_freem(txs->txs_mbuf);
1022 txs->txs_mbuf = NULL;
1023
1024 sc->sc_if.if_opackets++;
1025 sent = 1;
1026
1027 sc->sc_txsreap = TEMAC_TXSNEXT(sc->sc_txsreap);
1028 sc->sc_txsfree++;
1029
1030 if (txs->txs_last) {
1031 txs->txs_last = 0;
1032 sc->sc_txbusy = 0; /* channel stopped now */
1033
1034 temac_txkick(sc);
1035 break;
1036 }
1037 }
1038
1039 if (sent && (sc->sc_if.if_flags & IFF_OACTIVE))
1040 sc->sc_if.if_flags &= ~IFF_OACTIVE;
1041 }
1042
1043 static int
1044 temac_rxalloc(struct temac_softc *sc, int which, int verbose)
1045 {
1046 struct temac_rxsoft *rxs;
1047 struct mbuf *m;
1048 uint32_t stat;
1049 int error;
1050
1051 rxs = &sc->sc_rxsoft[which];
1052
1053 /* The mbuf itself is not our problem, just clear DMA related stuff. */
1054 if (rxs->rxs_mbuf != NULL) {
1055 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmap);
1056 rxs->rxs_mbuf = NULL;
1057 }
1058
1059 /*
1060 * We would like to store mbuf and dmap in application specific
1061 * fields of the descriptor, but that doesn't work for Rx. Shame
1062 * on Xilinx for this (and for the useless timer architecture).
1063 *
1064 * Hence each descriptor needs its own soft state. We may want
1065 * to merge multiple rxs's into a monster mbuf when we support
1066 * jumbo frames though. Also, we use single set of indexing
1067 * variables for both sc_rxdescs[] and sc_rxsoft[].
1068 */
1069 MGETHDR(m, M_DONTWAIT, MT_DATA);
1070 if (m == NULL) {
1071 if (verbose)
1072 aprint_debug_dev(sc->sc_dev,
1073 "out of Rx header mbufs\n");
1074 return (ENOBUFS);
1075 }
1076 MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
1077
1078 MCLGET(m, M_DONTWAIT);
1079 if ((m->m_flags & M_EXT) == 0) {
1080 if (verbose)
1081 aprint_debug_dev(sc->sc_dev,
1082 "out of Rx cluster mbufs\n");
1083 m_freem(m);
1084 return (ENOBUFS);
1085 }
1086
1087 rxs->rxs_mbuf = m;
1088 m->m_pkthdr.len = m->m_len = MCLBYTES;
1089
1090 /* Make sure the payload after ethernet header is 4-aligned. */
1091 m_adj(m, 2);
1092
1093 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmap, m,
1094 BUS_DMA_NOWAIT);
1095 if (error) {
1096 if (verbose)
1097 aprint_debug_dev(sc->sc_dev,
1098 "could not map Rx descriptor %d, error = %d\n",
1099 which, error);
1100
1101 rxs->rxs_mbuf = NULL;
1102 m_freem(m);
1103
1104 return (error);
1105 }
1106
1107 stat =
1108 (TEMAC_ISINTR(which) ? CDMAC_STAT_INTR : 0) |
1109 (TEMAC_ISLAST(which) ? CDMAC_STAT_STOP : 0);
1110
1111 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmap, 0,
1112 rxs->rxs_dmap->dm_mapsize, BUS_DMASYNC_PREREAD);
1113
1114 /* Descriptor post-sync, if needed, left to the caller. */
1115
1116 sc->sc_rxdescs[which].desc_addr = rxs->rxs_dmap->dm_segs[0].ds_addr;
1117 sc->sc_rxdescs[which].desc_size = rxs->rxs_dmap->dm_segs[0].ds_len;
1118 sc->sc_rxdescs[which].desc_stat = stat;
1119
1120 /* Descriptor pre-sync, if needed, left to the caller. */
1121
1122 return (0);
1123 }
1124
1125 static void
1126 temac_rxreap(struct temac_softc *sc)
1127 {
1128 struct ifnet *ifp = &sc->sc_if;
1129 uint32_t stat, rxstat, rxsize;
1130 struct mbuf *m;
1131 int nseg, head, tail;
1132
1133 head = sc->sc_rxreap;
1134 tail = 0; /* gcc */
1135 nseg = 0;
1136
1137 /*
1138 * Collect finished entries on the Rx list, kick DMA if we hit
1139 * the end. DMA will always stop on the last descriptor in chain,
1140 * so it will never hit a reap-in-progress descriptor.
1141 */
1142 while (1) {
1143 /* Maybe we previously failed to refresh this one? */
1144 if (sc->sc_rxsoft[sc->sc_rxreap].rxs_mbuf == NULL) {
1145 if (temac_rxalloc(sc, sc->sc_rxreap, 0) != 0)
1146 break;
1147
1148 sc->sc_rxreap = TEMAC_RXNEXT(sc->sc_rxreap);
1149 continue;
1150 }
1151 temac_rxcdsync(sc, sc->sc_rxreap, 1,
1152 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1153
1154 stat = sc->sc_rxdescs[sc->sc_rxreap].desc_stat;
1155 m = NULL;
1156
1157 if ((stat & CDMAC_STAT_DONE) == 0)
1158 break;
1159
1160 /* Count any decriptor we've collected, regardless of status. */
1161 nseg ++;
1162
1163 /* XXXFreza: This won't work for jumbo frames. */
1164
1165 if ((stat & (CDMAC_STAT_EOP | CDMAC_STAT_SOP)) !=
1166 (CDMAC_STAT_EOP | CDMAC_STAT_SOP)) {
1167 aprint_error_dev(sc->sc_dev,
1168 "Rx packet doesn't fit in one descriptor, "
1169 "stat = %#08x\n", stat);
1170 goto badframe;
1171 }
1172
1173 /* Dissect TEMAC footer if this is end of packet. */
1174 rxstat = sc->sc_rxdescs[sc->sc_rxreap].desc_rxstat;
1175 rxsize = sc->sc_rxdescs[sc->sc_rxreap].desc_rxsize &
1176 RXSIZE_MASK;
1177
1178 if ((rxstat & RXSTAT_GOOD) == 0 ||
1179 (rxstat & RXSTAT_SICK) != 0) {
1180 aprint_error_dev(sc->sc_dev,
1181 "corrupt Rx packet, rxstat = %#08x\n",
1182 rxstat);
1183 goto badframe;
1184 }
1185
1186 /* We are now bound to succeed. */
1187 bus_dmamap_sync(sc->sc_dmat,
1188 sc->sc_rxsoft[sc->sc_rxreap].rxs_dmap, 0,
1189 sc->sc_rxsoft[sc->sc_rxreap].rxs_dmap->dm_mapsize,
1190 BUS_DMASYNC_POSTREAD);
1191
1192 m = sc->sc_rxsoft[sc->sc_rxreap].rxs_mbuf;
1193 m->m_pkthdr.rcvif = ifp;
1194 m->m_pkthdr.len = m->m_len = rxsize;
1195
1196 badframe:
1197 /* Get ready for more work. */
1198 tail = sc->sc_rxreap;
1199 sc->sc_rxreap = TEMAC_RXNEXT(sc->sc_rxreap);
1200
1201 /* On failures we reuse the descriptor and go ahead. */
1202 if (m == NULL) {
1203 sc->sc_rxdescs[tail].desc_stat =
1204 (TEMAC_ISINTR(tail) ? CDMAC_STAT_INTR : 0) |
1205 (TEMAC_ISLAST(tail) ? CDMAC_STAT_STOP : 0);
1206
1207 ifp->if_ierrors++;
1208 continue;
1209 }
1210
1211 bpf_mtap(ifp, m);
1212
1213 ifp->if_ipackets++;
1214 (ifp->if_input)(ifp, m);
1215
1216 /* Refresh descriptor, bail out if we're out of buffers. */
1217 if (temac_rxalloc(sc, tail, 1) != 0) {
1218 sc->sc_rxreap = TEMAC_RXINC(sc->sc_rxreap, -1);
1219 aprint_error_dev(sc->sc_dev, "Rx give up for now\n");
1220 break;
1221 }
1222 }
1223
1224 /* We may now have a contiguous ready-to-go chunk of descriptors. */
1225 if (nseg > 0) {
1226 #if TEMAC_RXDEBUG > 0
1227 aprint_debug_dev(sc->sc_dev,
1228 "rxreap: rxreap %03d -> %03d, nseg %03d\n",
1229 head, sc->sc_rxreap, nseg);
1230 #endif
1231 temac_rxcdsync(sc, head, nseg,
1232 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1233
1234 if (TEMAC_ISLAST(tail))
1235 cdmac_rx_start(sc, sc->sc_cdaddr + TEMAC_RXDOFF(0));
1236 }
1237
1238 /* Ensure maximum Rx latency is kept under control. */
1239 callout_schedule(&sc->sc_rx_timo, hz / TEMAC_RXTIMO_HZ);
1240 }
1241
1242 static void
1243 temac_rxtimo(void *arg)
1244 {
1245 struct temac_softc *sc = (struct temac_softc *)arg;
1246 int s;
1247
1248 /* We run TEMAC_RXTIMO_HZ times/sec to ensure Rx doesn't stall. */
1249 s = splnet();
1250 temac_rxreap(sc);
1251 splx(s);
1252 }
1253
1254 static void
1255 temac_reset(struct temac_softc *sc)
1256 {
1257 uint32_t rcr, tcr;
1258
1259 /* Kill CDMAC channels. */
1260 cdmac_tx_reset(sc);
1261 cdmac_rx_reset(sc);
1262
1263 /* Disable receiver. */
1264 rcr = gmi_read_4(TEMAC_GMI_RXCF1) & ~GMI_RX_ENABLE;
1265 gmi_write_4(TEMAC_GMI_RXCF1, rcr);
1266
1267 /* Disable transmitter. */
1268 tcr = gmi_read_4(TEMAC_GMI_TXCF) & ~GMI_TX_ENABLE;
1269 gmi_write_4(TEMAC_GMI_TXCF, tcr);
1270 }
1271
1272 static void
1273 temac_rxdrain(struct temac_softc *sc)
1274 {
1275 struct temac_rxsoft *rxs;
1276 int i;
1277
1278 for (i = 0; i < TEMAC_NRXDESC; i++) {
1279 rxs = &sc->sc_rxsoft[i];
1280
1281 if (rxs->rxs_mbuf != NULL) {
1282 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmap);
1283 m_freem(rxs->rxs_mbuf);
1284 rxs->rxs_mbuf = NULL;
1285 }
1286 }
1287
1288 sc->sc_rx_drained = 1;
1289 }
1290
1291 static void
1292 temac_txkick(struct temac_softc *sc)
1293 {
1294 if (sc->sc_txsoft[sc->sc_txsreap].txs_mbuf != NULL &&
1295 sc->sc_txbusy == 0) {
1296 cdmac_tx_start(sc, sc->sc_cdaddr + TEMAC_TXDOFF(sc->sc_txreap));
1297 sc->sc_txbusy = 1;
1298 }
1299 }
1300