if_mvgbe.c revision 1.1 1 /* $NetBSD: if_mvgbe.c,v 1.1 2010/06/02 06:18:11 kiyohara Exp $ */
2 /*
3 * Copyright (c) 2007, 2008 KIYOHARA Takashi
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: if_mvgbe.c,v 1.1 2010/06/02 06:18:11 kiyohara Exp $");
29
30 #include "rnd.h"
31
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/device.h>
35 #include <sys/endian.h>
36 #include <sys/errno.h>
37 #include <sys/kmem.h>
38 #include <sys/mutex.h>
39 #include <sys/sockio.h>
40
41 #include <dev/marvell/marvellreg.h>
42 #include <dev/marvell/marvellvar.h>
43 #include <dev/marvell/mvgbereg.h>
44
45 #include <net/if.h>
46 #include <net/if_ether.h>
47 #include <net/if_media.h>
48
49 #include <netinet/in.h>
50 #include <netinet/in_systm.h>
51 #include <netinet/ip.h>
52
53 #include <net/bpf.h>
54 #if NRND > 0
55 #include <sys/rnd.h>
56 #endif
57
58 #include <dev/mii/mii.h>
59 #include <dev/mii/miivar.h>
60
61 #include "locators.h"
62
63 /* #define MVGBE_DEBUG 3 */
64 #ifdef MVGBE_DEBUG
65 #define DPRINTF(x) if (mvgbe_debug) printf x
66 #define DPRINTFN(n,x) if (mvgbe_debug >= (n)) printf x
67 int mvgbe_debug = MVGBE_DEBUG;
68 #else
69 #define DPRINTF(x)
70 #define DPRINTFN(n,x)
71 #endif
72
73
74 #define MVGBE_READ(sc, reg) \
75 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))
76 #define MVGBE_WRITE(sc, reg, val) \
77 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
78 #define MVGBE_READ_FILTER(sc, reg) \
79 bus_space_read_4((sc)->sc_iot, (sc)->sc_dafh, (reg))
80 #define MVGBE_WRITE_FILTER(sc, reg, val, c) \
81 bus_space_set_region_4((sc)->sc_iot, (sc)->sc_dafh, (reg), (val), (c))
82
83 #define MVGBE_TX_RING_CNT 256
84 #define MVGBE_RX_RING_CNT 256
85
86 #define MVGBE_JSLOTS 384 /* XXXX */
87 #define MVGBE_JLEN (MVGBE_MRU + MVGBE_BUF_ALIGN)
88 #define MVGBE_NTXSEG 30
89 #define MVGBE_JPAGESZ PAGE_SIZE
90 #define MVGBE_RESID \
91 (MVGBE_JPAGESZ - (MVGBE_JLEN * MVGBE_JSLOTS) % MVGBE_JPAGESZ)
92 #define MVGBE_JMEM \
93 ((MVGBE_JLEN * MVGBE_JSLOTS) + MVGBE_RESID)
94
95 #define MVGBE_TX_RING_ADDR(sc, i) \
96 ((sc)->sc_ring_map->dm_segs[0].ds_addr + \
97 offsetof(struct mvgbe_ring_data, mvgbe_tx_ring[(i)]))
98
99 #define MVGBE_RX_RING_ADDR(sc, i) \
100 ((sc)->sc_ring_map->dm_segs[0].ds_addr + \
101 offsetof(struct mvgbe_ring_data, mvgbe_rx_ring[(i)]))
102
103 #define MVGBE_CDOFF(x) offsetof(struct mvgbe_ring_data, x)
104 #define MVGBE_CDTXOFF(x) MVGBE_CDOFF(mvgbe_tx_ring[(x)])
105 #define MVGBE_CDRXOFF(x) MVGBE_CDOFF(mvgbe_rx_ring[(x)])
106
107 #define MVGBE_CDTXSYNC(sc, x, n, ops) \
108 do { \
109 int __x, __n; \
110 const int __descsize = sizeof(struct mvgbe_tx_desc); \
111 \
112 __x = (x); \
113 __n = (n); \
114 \
115 /* If it will wrap around, sync to the end of the ring. */ \
116 if ((__x + __n) > MVGBE_TX_RING_CNT) { \
117 bus_dmamap_sync((sc)->sc_dmat, \
118 (sc)->sc_ring_map, MVGBE_CDTXOFF(__x), \
119 __descsize * (MVGBE_TX_RING_CNT - __x), (ops)); \
120 __n -= (MVGBE_TX_RING_CNT - __x); \
121 __x = 0; \
122 } \
123 \
124 /* Now sync whatever is left. */ \
125 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_ring_map, \
126 MVGBE_CDTXOFF((__x)), __descsize * __n, (ops)); \
127 } while (0 /*CONSTCOND*/)
128
129 #define MVGBE_CDRXSYNC(sc, x, ops) \
130 do { \
131 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_ring_map, \
132 MVGBE_CDRXOFF((x)), sizeof(struct mvgbe_rx_desc), (ops)); \
133 } while (/*CONSTCOND*/0)
134
135
136 struct mvgbe_jpool_entry {
137 int slot;
138 LIST_ENTRY(mvgbe_jpool_entry) jpool_entries;
139 };
140
141 struct mvgbe_chain {
142 void *mvgbe_desc;
143 struct mbuf *mvgbe_mbuf;
144 struct mvgbe_chain *mvgbe_next;
145 };
146
147 struct mvgbe_txmap_entry {
148 bus_dmamap_t dmamap;
149 SIMPLEQ_ENTRY(mvgbe_txmap_entry) link;
150 };
151
152 struct mvgbe_chain_data {
153 struct mvgbe_chain mvgbe_tx_chain[MVGBE_TX_RING_CNT];
154 struct mvgbe_txmap_entry *mvgbe_tx_map[MVGBE_TX_RING_CNT];
155 int mvgbe_tx_prod;
156 int mvgbe_tx_cons;
157 int mvgbe_tx_cnt;
158
159 struct mvgbe_chain mvgbe_rx_chain[MVGBE_RX_RING_CNT];
160 bus_dmamap_t mvgbe_rx_map[MVGBE_RX_RING_CNT];
161 bus_dmamap_t mvgbe_rx_jumbo_map;
162 int mvgbe_rx_prod;
163 int mvgbe_rx_cons;
164 int mvgbe_rx_cnt;
165
166 /* Stick the jumbo mem management stuff here too. */
167 void *mvgbe_jslots[MVGBE_JSLOTS];
168 void *mvgbe_jumbo_buf;
169 };
170
171 struct mvgbe_ring_data {
172 struct mvgbe_tx_desc mvgbe_tx_ring[MVGBE_TX_RING_CNT];
173 struct mvgbe_rx_desc mvgbe_rx_ring[MVGBE_RX_RING_CNT];
174 };
175
176 struct mvgbec_softc {
177 device_t sc_dev;
178
179 bus_space_tag_t sc_iot;
180 bus_space_handle_t sc_ioh;
181
182 kmutex_t sc_mtx;
183 };
184
185 struct mvgbe_softc {
186 device_t sc_dev;
187 int sc_port; /* port num (0 or 1) */
188
189 bus_space_tag_t sc_iot;
190 bus_space_handle_t sc_ioh;
191 bus_space_handle_t sc_dafh; /* dest address filter handle */
192 bus_dma_tag_t sc_dmat;
193
194 struct ethercom sc_ethercom;
195 struct mii_data sc_mii;
196 u_int8_t sc_enaddr[ETHER_ADDR_LEN]; /* station addr */
197
198 struct mvgbe_chain_data sc_cdata;
199 struct mvgbe_ring_data *sc_rdata;
200 bus_dmamap_t sc_ring_map;
201 int sc_if_flags;
202
203 LIST_HEAD(__mvgbe_jfreehead, mvgbe_jpool_entry) sc_jfree_listhead;
204 LIST_HEAD(__mvgbe_jinusehead, mvgbe_jpool_entry) sc_jinuse_listhead;
205 SIMPLEQ_HEAD(__mvgbe_txmaphead, mvgbe_txmap_entry) sc_txmap_head;
206
207 #if NRND > 0
208 rndsource_element_t sc_rnd_source;
209 #endif
210 };
211
212
213 /* Gigabit Ethernet Unit Global part functions */
214
215 static int mvgbec_match(device_t, struct cfdata *, void *);
216 static void mvgbec_attach(device_t, device_t, void *);
217
218 static int mvgbec_print(void *, const char *);
219 static int mvgbec_search(device_t, cfdata_t, const int *, void *);
220
221 /* MII funcstions */
222 static int mvgbec_miibus_readreg(device_t, int, int);
223 static void mvgbec_miibus_writereg(device_t, int, int, int);
224 static void mvgbec_miibus_statchg(device_t);
225
226 static void mvgbec_wininit(struct mvgbec_softc *);
227
228 /* Gigabit Ethernet Port part functions */
229
230 static int mvgbe_match(device_t, struct cfdata *, void *);
231 static void mvgbe_attach(device_t, device_t, void *);
232
233 static int mvgbe_intr(void *);
234
235 static void mvgbe_start(struct ifnet *);
236 static int mvgbe_ioctl(struct ifnet *, u_long, void *);
237 static int mvgbe_init(struct ifnet *);
238 static void mvgbe_stop(struct ifnet *, int);
239 static void mvgbe_watchdog(struct ifnet *);
240
241 /* MII funcstions */
242 static int mvgbe_ifmedia_upd(struct ifnet *);
243 static void mvgbe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
244
245 static int mvgbe_init_rx_ring(struct mvgbe_softc *);
246 static int mvgbe_init_tx_ring(struct mvgbe_softc *);
247 static int mvgbe_newbuf(struct mvgbe_softc *, int, struct mbuf *, bus_dmamap_t);
248 static int mvgbe_alloc_jumbo_mem(struct mvgbe_softc *);
249 static void *mvgbe_jalloc(struct mvgbe_softc *);
250 static void mvgbe_jfree(struct mbuf *, void *, size_t, void *);
251 static int mvgbe_encap(struct mvgbe_softc *, struct mbuf *, uint32_t *);
252 static void mvgbe_rxeof(struct mvgbe_softc *);
253 static void mvgbe_txeof(struct mvgbe_softc *);
254 static void mvgbe_setmulti(struct mvgbe_softc *);
255 #ifdef MVGBE_DEBUG
256 static void mvgbe_dump_txdesc(struct mvgbe_tx_desc *, int);
257 #endif
258
259 CFATTACH_DECL_NEW(mvgbec_gt, sizeof(struct mvgbec_softc),
260 mvgbec_match, mvgbec_attach, NULL, NULL);
261 CFATTACH_DECL_NEW(mvgbec_mbus, sizeof(struct mvgbec_softc),
262 mvgbec_match, mvgbec_attach, NULL, NULL);
263
264 CFATTACH_DECL_NEW(mvgbe, sizeof(struct mvgbe_softc),
265 mvgbe_match, mvgbe_attach, NULL, NULL);
266
267
268 struct mvgbe_port {
269 int model;
270 int ports;
271 int irqs[4];
272 } mvgbe_ports[] = {
273 { MARVELL_DISCOVERY_II, 3, { 32, 33, 34 } },
274 { MARVELL_DISCOVERY_III, 3, { 32, 33, 34 } },
275 #if 0
276 { MARVELL_DISCOVERY_LT, ?, { } },
277 { MARVELL_DISCOVERY_V, ?, { } },
278 { MARVELL_DISCOVERY_VI, ?, { } },
279 #endif
280 { MARVELL_ORION_1_88F5082, 1, { 21 } },
281 { MARVELL_ORION_1_88F5180N, 1, { 21 } },
282 { MARVELL_ORION_1_88F5181, 1, { 21 } },
283 { MARVELL_ORION_1_88F5182, 1, { 21 } },
284 { MARVELL_ORION_2_88F5281, 1, { 21 } },
285 { MARVELL_ORION_1_88F6082, 1, { 21 } },
286 { MARVELL_ORION_1_88W8660, 1, { 21 } },
287 };
288
289
290 /* ARGSUSED */
291 static int
292 mvgbec_match(device_t parent, struct cfdata *match, void *aux)
293 {
294 struct marvell_attach_args *mva = aux;
295 int i;
296
297 if (strcmp(mva->mva_name, match->cf_name) != 0)
298 return 0;
299
300 if (mva->mva_offset == GTCF_OFFSET_DEFAULT)
301 return 0;
302
303 for (i = 0; i < __arraycount(mvgbe_ports); i++)
304 if (mva->mva_model == mvgbe_ports[i].model) {
305 mva->mva_size = MVGBE_SIZE;
306 return 1;
307 }
308 return 0;
309 }
310
311 /* ARGSUSED */
312 static void
313 mvgbec_attach(device_t parent, device_t self, void *aux)
314 {
315 struct mvgbec_softc *sc = device_private(self);
316 struct marvell_attach_args *mva = aux, gbea;
317 struct mvgbe_softc *port;
318 struct mii_softc *mii;
319 device_t child;
320 uint32_t phyaddr;
321 int i, j;
322
323 aprint_naive("\n");
324 aprint_normal(": Marvell Gigabit Ethernet Controller\n");
325
326 sc->sc_dev = self;
327 sc->sc_iot = mva->mva_iot;
328 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, mva->mva_offset,
329 mva->mva_size, &sc->sc_ioh)) {
330 aprint_error_dev(self, "Cannot map registers\n");
331 return;
332 }
333 phyaddr = 0;
334 MVGBE_WRITE(sc, MVGBE_PHYADDR, phyaddr);
335
336 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET);
337
338 /* Disable and clear Gigabit Ethernet Unit interrupts */
339 MVGBE_WRITE(sc, MVGBE_EUIM, 0);
340 MVGBE_WRITE(sc, MVGBE_EUIC, 0);
341
342 mvgbec_wininit(sc);
343
344 memset(&gbea, 0, sizeof(gbea));
345 for (i = 0; i < __arraycount(mvgbe_ports); i++) {
346 if (mvgbe_ports[i].model != mva->mva_model)
347 continue;
348
349 for (j = 0; j < mvgbe_ports[i].ports; j++) {
350 gbea.mva_name = "mvgbe";
351 gbea.mva_model = mva->mva_model;
352 gbea.mva_iot = sc->sc_iot;
353 gbea.mva_ioh = sc->sc_ioh;
354 gbea.mva_unit = j;
355 gbea.mva_dmat = mva->mva_dmat;
356 gbea.mva_irq = mvgbe_ports[i].irqs[j];
357 child = config_found_sm_loc(sc->sc_dev, "mvgbec", NULL,
358 &gbea, mvgbec_print, mvgbec_search);
359 if (child) {
360 port = device_private(child);
361 mii = LIST_FIRST(&port->sc_mii.mii_phys);
362 phyaddr |= MVGBE_PHYADDR_PHYAD(j, mii->mii_phy);
363 }
364 }
365 }
366 MVGBE_WRITE(sc, MVGBE_PHYADDR, phyaddr);
367 }
368
369 static int
370 mvgbec_print(void *aux, const char *pnp)
371 {
372 struct marvell_attach_args *gbea = aux;
373
374 if (pnp)
375 aprint_normal("%s at %s port %d",
376 gbea->mva_name, pnp, gbea->mva_unit);
377 else {
378 if (gbea->mva_unit != MVGBECCF_PORT_DEFAULT)
379 aprint_normal(" port %d", gbea->mva_unit);
380 if (gbea->mva_irq != MVGBECCF_IRQ_DEFAULT)
381 aprint_normal(" irq %d", gbea->mva_irq);
382 }
383 return UNCONF;
384 }
385
386 /* ARGSUSED */
387 static int
388 mvgbec_search(device_t parent, cfdata_t cf, const int *ldesc, void *aux)
389 {
390 struct marvell_attach_args *gbea = aux;
391
392 if (cf->cf_loc[MVGBECCF_PORT] == gbea->mva_unit &&
393 cf->cf_loc[MVGBECCF_IRQ] != MVGBECCF_IRQ_DEFAULT)
394 gbea->mva_irq = cf->cf_loc[MVGBECCF_IRQ];
395
396 return config_match(parent, cf, aux);
397 }
398
399 static int
400 mvgbec_miibus_readreg(device_t dev, int phy, int reg)
401 {
402 struct mvgbe_softc *sc = device_private(dev);
403 struct mvgbec_softc *csc = device_private(device_parent(dev));
404 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
405 uint32_t smi, val;
406 int i;
407
408 mutex_enter(&csc->sc_mtx);
409
410 for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) {
411 DELAY(1);
412 if (!(MVGBE_READ(csc, MVGBE_SMI) & MVGBE_SMI_BUSY))
413 break;
414 }
415 if (i == MVGBE_PHY_TIMEOUT) {
416 aprint_error_ifnet(ifp, "SMI busy timeout\n");
417 mutex_exit(&csc->sc_mtx);
418 return -1;
419 }
420
421 smi =
422 MVGBE_SMI_PHYAD(phy) | MVGBE_SMI_REGAD(reg) | MVGBE_SMI_OPCODE_READ;
423 MVGBE_WRITE(csc, MVGBE_SMI, smi);
424
425 for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) {
426 DELAY(1);
427 smi = MVGBE_READ(csc, MVGBE_SMI);
428 if (smi & MVGBE_SMI_READVALID)
429 break;
430 }
431
432 mutex_exit(&csc->sc_mtx);
433
434 DPRINTFN(9, ("mvgbec_miibus_readreg: i=%d, timeout=%d\n",
435 i, MVGBE_PHY_TIMEOUT));
436
437 val = smi & MVGBE_SMI_DATA_MASK;
438
439 DPRINTFN(9, ("mvgbec_miibus_readreg phy=%d, reg=%#x, val=%#x\n",
440 phy, reg, val));
441
442 return val;
443 }
444
445 static void
446 mvgbec_miibus_writereg(device_t dev, int phy, int reg, int val)
447 {
448 struct mvgbe_softc *sc = device_private(dev);
449 struct mvgbec_softc *csc = device_private(device_parent(dev));
450 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
451 uint32_t smi;
452 int i;
453
454 DPRINTFN(9, ("mvgbec_miibus_writereg phy=%d reg=%#x val=%#x\n",
455 phy, reg, val));
456
457 mutex_enter(&csc->sc_mtx);
458
459 for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) {
460 DELAY(1);
461 if (!(MVGBE_READ(csc, MVGBE_SMI) & MVGBE_SMI_BUSY))
462 break;
463 }
464 if (i == MVGBE_PHY_TIMEOUT) {
465 aprint_error_ifnet(ifp, "SMI busy timeout\n");
466 mutex_exit(&csc->sc_mtx);
467 return;
468 }
469
470 smi = MVGBE_SMI_PHYAD(phy) | MVGBE_SMI_REGAD(reg) |
471 MVGBE_SMI_OPCODE_WRITE | (val & MVGBE_SMI_DATA_MASK);
472 MVGBE_WRITE(csc, MVGBE_SMI, smi);
473
474 for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) {
475 DELAY(1);
476 if (!(MVGBE_READ(csc, MVGBE_SMI) & MVGBE_SMI_BUSY))
477 break;
478 }
479
480 mutex_exit(&csc->sc_mtx);
481
482 if (i == MVGBE_PHY_TIMEOUT)
483 aprint_error_ifnet(ifp, "phy write timed out\n");
484 }
485
486 static void
487 mvgbec_miibus_statchg(device_t dev)
488 {
489
490 /* nothing to do */
491 }
492
493
494 static void
495 mvgbec_wininit(struct mvgbec_softc *sc)
496 {
497 device_t pdev = device_parent(sc->sc_dev);
498 uint64_t base;
499 uint32_t en, ac, size;
500 int window, target, attr, rv, i;
501 static int tags[] = {
502 MARVELL_TAG_SDRAM_CS0,
503 MARVELL_TAG_SDRAM_CS1,
504 MARVELL_TAG_SDRAM_CS2,
505 MARVELL_TAG_SDRAM_CS3,
506
507 MARVELL_TAG_UNDEFINED,
508 };
509
510 /* First disable all address decode windows */
511 en = MVGBE_BARE_EN_MASK;
512 MVGBE_WRITE(sc, MVGBE_BARE, en);
513
514 ac = 0;
515 for (window = 0, i = 0;
516 tags[i] != MARVELL_TAG_UNDEFINED && window < MVGBE_NWINDOW; i++) {
517 rv = marvell_winparams_by_tag(pdev, tags[i],
518 &target, &attr, &base, &size);
519 if (rv != 0 || size == 0)
520 continue;
521
522 if (base > 0xffffffffULL) {
523 if (window >= MVGBE_NREMAP) {
524 aprint_error_dev(sc->sc_dev,
525 "can't remap window %d\n", window);
526 continue;
527 }
528 MVGBE_WRITE(sc, MVGBE_HA(window),
529 (base >> 32) & 0xffffffff);
530 }
531
532 MVGBE_WRITE(sc, MVGBE_BASEADDR(window),
533 MVGBE_BASEADDR_TARGET(target) |
534 MVGBE_BASEADDR_ATTR(attr) |
535 MVGBE_BASEADDR_BASE(base));
536 MVGBE_WRITE(sc, MVGBE_S(window), MVGBE_S_SIZE(size));
537
538 en &= ~(1 << window);
539 /* set full access (r/w) */
540 ac |= MVGBE_EPAP_EPAR(window, MVGBE_EPAP_AC_FA);
541 window++;
542 }
543 /* allow to access decode window */
544 MVGBE_WRITE(sc, MVGBE_EPAP, ac);
545
546 MVGBE_WRITE(sc, MVGBE_BARE, en);
547 }
548
549
550 /* ARGSUSED */
551 static int
552 mvgbe_match(device_t parent, struct cfdata *match, void *aux)
553 {
554 struct marvell_attach_args *mva = aux;
555 uint32_t pbase, maddrh, maddrl;
556
557 pbase = MVGBE_PORTR_BASE + mva->mva_unit * MVGBE_PORTR_SIZE;
558 maddrh =
559 bus_space_read_4(mva->mva_iot, mva->mva_ioh, pbase + MVGBE_MACAH);
560 maddrl =
561 bus_space_read_4(mva->mva_iot, mva->mva_ioh, pbase + MVGBE_MACAL);
562 if ((maddrh | maddrl) == 0)
563 return 0;
564
565 return 1;
566 }
567
568 /* ARGSUSED */
569 static void
570 mvgbe_attach(device_t parent, device_t self, void *aux)
571 {
572 struct mvgbe_softc *sc = device_private(self);
573 struct marvell_attach_args *mva = aux;
574 struct mvgbe_txmap_entry *entry;
575 struct ifnet *ifp;
576 bus_dma_segment_t seg;
577 bus_dmamap_t dmamap;
578 int rseg, i;
579 uint32_t maddrh, maddrl;
580 void *kva;
581
582 aprint_naive("\n");
583 aprint_normal("\n");
584
585 sc->sc_dev = self;
586 sc->sc_port = mva->mva_unit;
587 sc->sc_iot = mva->mva_iot;
588 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
589 MVGBE_PORTR_BASE + mva->mva_unit * MVGBE_PORTR_SIZE,
590 MVGBE_PORTR_SIZE, &sc->sc_ioh)) {
591 aprint_error_dev(self, "Cannot map registers\n");
592 return;
593 }
594 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
595 MVGBE_PORTDAFR_BASE + mva->mva_unit * MVGBE_PORTDAFR_SIZE,
596 MVGBE_PORTDAFR_SIZE, &sc->sc_dafh)) {
597 aprint_error_dev(self,
598 "Cannot map destination address filter registers\n");
599 return;
600 }
601 sc->sc_dmat = mva->mva_dmat;
602
603 maddrh = MVGBE_READ(sc, MVGBE_MACAH);
604 maddrl = MVGBE_READ(sc, MVGBE_MACAL);
605 sc->sc_enaddr[0] = maddrh >> 24;
606 sc->sc_enaddr[1] = maddrh >> 16;
607 sc->sc_enaddr[2] = maddrh >> 8;
608 sc->sc_enaddr[3] = maddrh >> 0;
609 sc->sc_enaddr[4] = maddrl >> 8;
610 sc->sc_enaddr[5] = maddrl >> 0;
611 aprint_normal_dev(self, "Ethernet address %s\n",
612 ether_sprintf(sc->sc_enaddr));
613
614 /* clear all ethernet port interrupts */
615 MVGBE_WRITE(sc, MVGBE_IC, 0);
616 MVGBE_WRITE(sc, MVGBE_ICE, 0);
617
618 marvell_intr_establish(mva->mva_irq, IPL_NET, mvgbe_intr, sc);
619
620 /* Allocate the descriptor queues. */
621 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct mvgbe_ring_data),
622 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
623 aprint_error_dev(self, "can't alloc rx buffers\n");
624 return;
625 }
626 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg,
627 sizeof(struct mvgbe_ring_data), &kva, BUS_DMA_NOWAIT)) {
628 aprint_error_dev(self, "can't map dma buffers (%lu bytes)\n",
629 (u_long)sizeof(struct mvgbe_ring_data));
630 goto fail1;
631 }
632 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct mvgbe_ring_data), 1,
633 sizeof(struct mvgbe_ring_data), 0, BUS_DMA_NOWAIT,
634 &sc->sc_ring_map)) {
635 aprint_error_dev(self, "can't create dma map\n");
636 goto fail2;
637 }
638 if (bus_dmamap_load(sc->sc_dmat, sc->sc_ring_map, kva,
639 sizeof(struct mvgbe_ring_data), NULL, BUS_DMA_NOWAIT)) {
640 aprint_error_dev(self, "can't load dma map\n");
641 goto fail3;
642 }
643 for (i = 0; i < MVGBE_RX_RING_CNT; i++)
644 sc->sc_cdata.mvgbe_rx_chain[i].mvgbe_mbuf = NULL;
645
646 SIMPLEQ_INIT(&sc->sc_txmap_head);
647 for (i = 0; i < MVGBE_TX_RING_CNT; i++) {
648 sc->sc_cdata.mvgbe_tx_chain[i].mvgbe_mbuf = NULL;
649
650 if (bus_dmamap_create(sc->sc_dmat,
651 MVGBE_JLEN, MVGBE_NTXSEG, MVGBE_JLEN, 0,
652 BUS_DMA_NOWAIT, &dmamap)) {
653 aprint_error_dev(self, "Can't create TX dmamap\n");
654 goto fail4;
655 }
656
657 entry = kmem_alloc(sizeof(*entry), KM_SLEEP);
658 if (!entry) {
659 aprint_error_dev(self, "Can't alloc txmap entry\n");
660 bus_dmamap_destroy(sc->sc_dmat, dmamap);
661 goto fail4;
662 }
663 entry->dmamap = dmamap;
664 SIMPLEQ_INSERT_HEAD(&sc->sc_txmap_head, entry, link);
665 }
666
667 sc->sc_rdata = (struct mvgbe_ring_data *)kva;
668 memset(sc->sc_rdata, 0, sizeof(struct mvgbe_ring_data));
669
670 #if 0
671 /*
672 * We can support 802.1Q VLAN-sized frames and jumbo
673 * Ethernet frames.
674 */
675 sc->sc_ethercom.ec_capabilities |=
676 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING | ETHERCAP_JUMBO_MTU;
677 #else
678 /* XXXX: We don't know the usage of VLAN. */
679 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
680 #endif
681
682 /* Try to allocate memory for jumbo buffers. */
683 if (mvgbe_alloc_jumbo_mem(sc)) {
684 aprint_error_dev(self, "jumbo buffer allocation failed\n");
685 goto fail4;
686 }
687
688 ifp = &sc->sc_ethercom.ec_if;
689 ifp->if_softc = sc;
690 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
691 ifp->if_start = mvgbe_start;
692 ifp->if_ioctl = mvgbe_ioctl;
693 ifp->if_init = mvgbe_init;
694 ifp->if_stop = mvgbe_stop;
695 ifp->if_watchdog = mvgbe_watchdog;
696 /*
697 * We can do IPv4/TCPv4/UDPv4 checksums in hardware.
698 */
699 sc->sc_ethercom.ec_if.if_capabilities |=
700 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
701 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
702 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
703 IFQ_SET_MAXLEN(&ifp->if_snd, max(MVGBE_TX_RING_CNT - 1, IFQ_MAXLEN));
704 IFQ_SET_READY(&ifp->if_snd);
705 strcpy(ifp->if_xname, device_xname(sc->sc_dev));
706
707 mvgbe_stop(ifp, 0);
708
709 /*
710 * Do MII setup.
711 */
712 sc->sc_mii.mii_ifp = ifp;
713 sc->sc_mii.mii_readreg = mvgbec_miibus_readreg;
714 sc->sc_mii.mii_writereg = mvgbec_miibus_writereg;
715 sc->sc_mii.mii_statchg = mvgbec_miibus_statchg;
716
717 sc->sc_ethercom.ec_mii = &sc->sc_mii;
718 ifmedia_init(&sc->sc_mii.mii_media, 0,
719 mvgbe_ifmedia_upd, mvgbe_ifmedia_sts);
720 mii_attach(self, &sc->sc_mii, 0xffffffff,
721 MII_PHY_ANY, MII_OFFSET_ANY, 0);
722 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
723 aprint_error_dev(self, "no PHY found!\n");
724 ifmedia_add(&sc->sc_mii.mii_media,
725 IFM_ETHER|IFM_MANUAL, 0, NULL);
726 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
727 } else
728 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
729
730 /*
731 * Call MI attach routines.
732 */
733 if_attach(ifp);
734
735 ether_ifattach(ifp, sc->sc_enaddr);
736
737 #if NRND > 0
738 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev),
739 RND_TYPE_NET, 0);
740 #endif
741
742 return;
743
744 fail4:
745 while ((entry = SIMPLEQ_FIRST(&sc->sc_txmap_head)) != NULL) {
746 SIMPLEQ_REMOVE_HEAD(&sc->sc_txmap_head, link);
747 bus_dmamap_destroy(sc->sc_dmat, entry->dmamap);
748 }
749 bus_dmamap_unload(sc->sc_dmat, sc->sc_ring_map);
750 fail3:
751 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ring_map);
752 fail2:
753 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct mvgbe_ring_data));
754 fail1:
755 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
756 return;
757 }
758
759
760 static int
761 mvgbe_intr(void *arg)
762 {
763 struct mvgbe_softc *sc = arg;
764 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
765 uint32_t ic, ice, datum = 0;
766 int claimed = 0;
767
768 for (;;) {
769 ice = MVGBE_READ(sc, MVGBE_ICE);
770 ic = MVGBE_READ(sc, MVGBE_IC);
771
772 DPRINTFN(3, ("mvgbe_intr: ic=%#x, ice=%#x\n", ic, ice));
773 if (ic == 0 && ice == 0)
774 break;
775
776 datum = datum ^ ic ^ ice;
777
778 MVGBE_WRITE(sc, MVGBE_IC, ~ic);
779 MVGBE_WRITE(sc, MVGBE_ICE, ~ice);
780
781 claimed = 1;
782
783 if (ice & MVGBE_ICE_LINKCHG) {
784 if (MVGBE_READ(sc, MVGBE_PS) & MVGBE_PS_LINKUP) {
785 /* Enable port RX and TX. */
786 MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_ENQ(0));
787 MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_ENQ);
788 } else {
789 MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_DISQ(0));
790 MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_DISQ);
791 }
792 }
793
794 if (ic & (MVGBE_IC_RXBUF | MVGBE_IC_RXERROR))
795 mvgbe_rxeof(sc);
796
797 if (ice & (MVGBE_ICE_TXBUF | MVGBE_ICE_TXERR))
798 mvgbe_txeof(sc);
799 }
800
801 if (!IFQ_IS_EMPTY(&ifp->if_snd))
802 mvgbe_start(ifp);
803
804 #if NRND > 0
805 if (RND_ENABLED(&sc->sc_rnd_source))
806 rnd_add_uint32(&sc->sc_rnd_source, datum);
807 #endif
808
809 return claimed;
810 }
811
812 static void
813 mvgbe_start(struct ifnet *ifp)
814 {
815 struct mvgbe_softc *sc = ifp->if_softc;
816 struct mbuf *m_head = NULL;
817 uint32_t idx = sc->sc_cdata.mvgbe_tx_prod;
818 int pkts = 0;
819
820 DPRINTFN(3, ("mvgbe_start (idx %d, tx_chain[idx] %p)\n", idx,
821 sc->sc_cdata.mvgbe_tx_chain[idx].mvgbe_mbuf));
822
823 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
824 return;
825 /* If Link is DOWN, can't start TX */
826 if (!(MVGBE_READ(sc, MVGBE_PS) & MVGBE_PS_LINKUP))
827 return;
828
829 while (sc->sc_cdata.mvgbe_tx_chain[idx].mvgbe_mbuf == NULL) {
830 IFQ_POLL(&ifp->if_snd, m_head);
831 if (m_head == NULL)
832 break;
833
834 /*
835 * Pack the data into the transmit ring. If we
836 * don't have room, set the OACTIVE flag and wait
837 * for the NIC to drain the ring.
838 */
839 if (mvgbe_encap(sc, m_head, &idx)) {
840 ifp->if_flags |= IFF_OACTIVE;
841 break;
842 }
843
844 /* now we are committed to transmit the packet */
845 IFQ_DEQUEUE(&ifp->if_snd, m_head);
846 pkts++;
847
848 /*
849 * If there's a BPF listener, bounce a copy of this frame
850 * to him.
851 */
852 if (ifp->if_bpf)
853 bpf_ops->bpf_mtap(ifp->if_bpf, m_head);
854 }
855 if (pkts == 0)
856 return;
857
858 /* Transmit at Queue 0 */
859 if (idx != sc->sc_cdata.mvgbe_tx_prod) {
860 sc->sc_cdata.mvgbe_tx_prod = idx;
861 MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_ENQ);
862
863 /*
864 * Set a timeout in case the chip goes out to lunch.
865 */
866 ifp->if_timer = 5;
867 }
868 }
869
870 static int
871 mvgbe_ioctl(struct ifnet *ifp, u_long command, void *data)
872 {
873 struct mvgbe_softc *sc = ifp->if_softc;
874 struct ifreq *ifr = data;
875 struct mii_data *mii;
876 int s, error = 0;
877
878 s = splnet();
879
880 switch (command) {
881 case SIOCSIFFLAGS:
882 DPRINTFN(2, ("mvgbe_ioctl IFFLAGS\n"));
883 if (ifp->if_flags & IFF_UP)
884 mvgbe_init(ifp);
885 else
886 if (ifp->if_flags & IFF_RUNNING)
887 mvgbe_stop(ifp, 0);
888 sc->sc_if_flags = ifp->if_flags;
889 error = 0;
890 break;
891
892 case SIOCGIFMEDIA:
893 case SIOCSIFMEDIA:
894 DPRINTFN(2, ("mvgbe_ioctl MEDIA\n"));
895 mii = &sc->sc_mii;
896 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
897 break;
898
899 default:
900 DPRINTFN(2, ("mvgbe_ioctl ETHER\n"));
901 error = ether_ioctl(ifp, command, data);
902 if (error == ENETRESET) {
903 if (ifp->if_flags & IFF_RUNNING) {
904 mvgbe_setmulti(sc);
905 DPRINTFN(2,
906 ("mvgbe_ioctl setmulti called\n"));
907 }
908 error = 0;
909 }
910 break;
911 }
912
913 splx(s);
914
915 return error;
916 }
917
918 static int
919 mvgbe_init(struct ifnet *ifp)
920 {
921 struct mvgbe_softc *sc = ifp->if_softc;
922 struct mii_data *mii = &sc->sc_mii;
923 uint32_t reg, val;
924 int i, s;
925
926 DPRINTFN(2, ("mvgbe_init\n"));
927
928 s = splnet();
929
930 if (ifp->if_flags & IFF_RUNNING) {
931 splx(s);
932 return 0;
933 }
934
935 /* Cancel pending I/O and free all RX/TX buffers. */
936 mvgbe_stop(ifp, 0);
937
938 /* clear all ethernet port interrupts */
939 MVGBE_WRITE(sc, MVGBE_IC, 0);
940 MVGBE_WRITE(sc, MVGBE_ICE, 0);
941
942 /* Init TX/RX descriptors */
943 if (mvgbe_init_tx_ring(sc) == ENOBUFS) {
944 aprint_error_ifnet(ifp,
945 "initialization failed: no memory for tx buffers\n");
946 splx(s);
947 return ENOBUFS;
948 }
949 if (mvgbe_init_rx_ring(sc) == ENOBUFS) {
950 aprint_error_ifnet(ifp,
951 "initialization failed: no memory for rx buffers\n");
952 splx(s);
953 return ENOBUFS;
954 }
955
956 MVGBE_WRITE(sc, MVGBE_PSC,
957 MVGBE_PSC_ANFC | /* Enable Auto-Neg Flow Ctrl */
958 MVGBE_PSC_RESERVED | /* Must be set to 1 */
959 MVGBE_PSC_FLFAIL | /* Do NOT Force Link Fail */
960 MVGBE_PSC_MRU(MVGBE_PSC_MRU_9700) | /* Always 9700 OK */
961 MVGBE_PSC_SETFULLDX); /* Set_FullDx */
962
963 MVGBE_WRITE(sc, MVGBE_CRDP(0), MVGBE_RX_RING_ADDR(sc, 0));
964 MVGBE_WRITE(sc, MVGBE_TCQDP, MVGBE_TX_RING_ADDR(sc, 0));
965
966 val = 0x3fffffff;
967 for (i = 0; i < 8; i++) {
968 /*
969 * Queue 0 must be programmed to 0x3fffffff. Queue 1 through 7
970 * must be programmed to 0x00000000.
971 */
972 MVGBE_WRITE(sc, MVGBE_TQTBCOUNT(i), val);
973 MVGBE_WRITE(sc, MVGBE_TQTBCONFIG(i), val);
974 val = 0x00000000;
975 }
976
977 MVGBE_WRITE(sc, MVGBE_PXC, MVGBE_PXC_RXCS);
978 MVGBE_WRITE(sc, MVGBE_PXCX, 0);
979 MVGBE_WRITE(sc, MVGBE_SDC,
980 MVGBE_SDC_RXBSZ_16_64BITWORDS |
981 #if BYTE_ORDER == LITTLE_ENDIAN
982 MVGBE_SDC_BLMR | /* Big/Litlle Endian Receive Mode: No swap */
983 MVGBE_SDC_BLMT | /* Big/Litlle Endian Transmit Mode: No swap */
984 #endif
985 MVGBE_SDC_TXBSZ_16_64BITWORDS);
986
987 mii_mediachg(mii);
988
989 /* Enable port */
990 reg = MVGBE_READ(sc, MVGBE_PSC);
991 MVGBE_WRITE(sc, MVGBE_PSC, reg | MVGBE_PSC_PORTEN);
992
993 /* If Link is UP, Start RX and TX traffic */
994 if (MVGBE_READ(sc, MVGBE_PS) & MVGBE_PS_LINKUP) {
995 /* Enable port RX/TX. */
996 MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_ENQ(0));
997 MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_ENQ);
998 }
999
1000 /* Enable interrupt masks */
1001 MVGBE_WRITE(sc, MVGBE_PIM,
1002 MVGBE_IC_RXBUF |
1003 MVGBE_IC_EXTEND |
1004 MVGBE_IC_RXBUFQ_MASK |
1005 MVGBE_IC_RXERROR |
1006 MVGBE_IC_RXERRQ_MASK);
1007 MVGBE_WRITE(sc, MVGBE_PEIM,
1008 MVGBE_ICE_TXBUF |
1009 MVGBE_ICE_TXERR |
1010 MVGBE_ICE_LINKCHG);
1011
1012 ifp->if_flags |= IFF_RUNNING;
1013 ifp->if_flags &= ~IFF_OACTIVE;
1014
1015 splx(s);
1016
1017 return 0;
1018 }
1019
1020 /* ARGSUSED */
1021 static void
1022 mvgbe_stop(struct ifnet *ifp, int disable)
1023 {
1024 struct mvgbe_softc *sc = ifp->if_softc;
1025 struct mvgbe_chain_data *cdata = &sc->sc_cdata;
1026 uint32_t reg;
1027 int i, cnt;
1028
1029 DPRINTFN(2, ("mvgbe_stop\n"));
1030
1031 /* Stop Rx port activity. Check port Rx activity. */
1032 reg = MVGBE_READ(sc, MVGBE_RQC);
1033 if (reg & MVGBE_RQC_ENQ_MASK)
1034 /* Issue stop command for active channels only */
1035 MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_DISQ_DISABLE(reg));
1036
1037 /* Stop Tx port activity. Check port Tx activity. */
1038 if (MVGBE_READ(sc, MVGBE_TQC) & MVGBE_TQC_ENQ)
1039 MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_DISQ);
1040
1041 /* Force link down */
1042 reg = MVGBE_READ(sc, MVGBE_PSC);
1043 MVGBE_WRITE(sc, MVGBE_PSC, reg & ~MVGBE_PSC_FLFAIL);
1044
1045 #define RX_DISABLE_TIMEOUT 0x1000000
1046 #define TX_FIFO_EMPTY_TIMEOUT 0x1000000
1047 /* Wait for all Rx activity to terminate. */
1048 cnt = 0;
1049 do {
1050 if (cnt >= RX_DISABLE_TIMEOUT) {
1051 aprint_error_ifnet(ifp,
1052 "timeout for RX stopped. rqc 0x%x\n", reg);
1053 break;
1054 }
1055 cnt++;
1056
1057 /*
1058 * Check Receive Queue Command register that all Rx queues
1059 * are stopped
1060 */
1061 reg = MVGBE_READ(sc, MVGBE_RQC);
1062 } while (reg & 0xff);
1063
1064 /* Double check to verify that TX FIFO is empty */
1065 cnt = 0;
1066 while (1) {
1067 do {
1068 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
1069 aprint_error_ifnet(ifp,
1070 "timeout for TX FIFO empty. status 0x%x\n",
1071 reg);
1072 break;
1073 }
1074 cnt++;
1075
1076 reg = MVGBE_READ(sc, MVGBE_PS);
1077 } while
1078 (!(reg & MVGBE_PS_TXFIFOEMP) || reg & MVGBE_PS_TXINPROG);
1079
1080 if (cnt >= TX_FIFO_EMPTY_TIMEOUT)
1081 break;
1082
1083 /* Double check */
1084 reg = MVGBE_READ(sc, MVGBE_PS);
1085 if (reg & MVGBE_PS_TXFIFOEMP && !(reg & MVGBE_PS_TXINPROG))
1086 break;
1087 else
1088 aprint_error_ifnet(ifp,
1089 "TX FIFO empty double check failed."
1090 " %d loops, status 0x%x\n", cnt, reg);
1091 }
1092
1093 /* Reset the Enable bit in the Port Serial Control Register */
1094 reg = MVGBE_READ(sc, MVGBE_PSC);
1095 MVGBE_WRITE(sc, MVGBE_PSC, reg & ~MVGBE_PSC_PORTEN);
1096
1097 /* Disable interrupts */
1098 MVGBE_WRITE(sc, MVGBE_PIM, 0);
1099 MVGBE_WRITE(sc, MVGBE_PEIM, 0);
1100
1101 /* Free RX and TX mbufs still in the queues. */
1102 for (i = 0; i < MVGBE_RX_RING_CNT; i++) {
1103 if (cdata->mvgbe_rx_chain[i].mvgbe_mbuf != NULL) {
1104 m_freem(cdata->mvgbe_rx_chain[i].mvgbe_mbuf);
1105 cdata->mvgbe_rx_chain[i].mvgbe_mbuf = NULL;
1106 }
1107 }
1108 for (i = 0; i < MVGBE_TX_RING_CNT; i++) {
1109 if (cdata->mvgbe_tx_chain[i].mvgbe_mbuf != NULL) {
1110 m_freem(cdata->mvgbe_tx_chain[i].mvgbe_mbuf);
1111 cdata->mvgbe_tx_chain[i].mvgbe_mbuf = NULL;
1112 }
1113 }
1114
1115 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1116 }
1117
1118 static void
1119 mvgbe_watchdog(struct ifnet *ifp)
1120 {
1121 struct mvgbe_softc *sc = ifp->if_softc;
1122
1123 /*
1124 * Reclaim first as there is a possibility of losing Tx completion
1125 * interrupts.
1126 */
1127 mvgbe_txeof(sc);
1128 if (sc->sc_cdata.mvgbe_tx_cnt != 0) {
1129 aprint_error_ifnet(ifp, "watchdog timeout\n");
1130
1131 ifp->if_oerrors++;
1132
1133 mvgbe_init(ifp);
1134 }
1135 }
1136
1137
1138 /*
1139 * Set media options.
1140 */
1141 static int
1142 mvgbe_ifmedia_upd(struct ifnet *ifp)
1143 {
1144 struct mvgbe_softc *sc = ifp->if_softc;
1145
1146 mii_mediachg(&sc->sc_mii);
1147 return 0;
1148 }
1149
1150 /*
1151 * Report current media status.
1152 */
1153 static void
1154 mvgbe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1155 {
1156 struct mvgbe_softc *sc = ifp->if_softc;
1157
1158 mii_pollstat(&sc->sc_mii);
1159 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1160 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1161 }
1162
1163
1164 static int
1165 mvgbe_init_rx_ring(struct mvgbe_softc *sc)
1166 {
1167 struct mvgbe_chain_data *cd = &sc->sc_cdata;
1168 struct mvgbe_ring_data *rd = sc->sc_rdata;
1169 int i;
1170
1171 bzero((char *)rd->mvgbe_rx_ring,
1172 sizeof(struct mvgbe_rx_desc) * MVGBE_RX_RING_CNT);
1173
1174 for (i = 0; i < MVGBE_RX_RING_CNT; i++) {
1175 cd->mvgbe_rx_chain[i].mvgbe_desc =
1176 &rd->mvgbe_rx_ring[i];
1177 if (i == MVGBE_RX_RING_CNT - 1) {
1178 cd->mvgbe_rx_chain[i].mvgbe_next =
1179 &cd->mvgbe_rx_chain[0];
1180 rd->mvgbe_rx_ring[i].nextdescptr =
1181 MVGBE_RX_RING_ADDR(sc, 0);
1182 } else {
1183 cd->mvgbe_rx_chain[i].mvgbe_next =
1184 &cd->mvgbe_rx_chain[i + 1];
1185 rd->mvgbe_rx_ring[i].nextdescptr =
1186 MVGBE_RX_RING_ADDR(sc, i + 1);
1187 }
1188 }
1189
1190 for (i = 0; i < MVGBE_RX_RING_CNT; i++) {
1191 if (mvgbe_newbuf(sc, i, NULL,
1192 sc->sc_cdata.mvgbe_rx_jumbo_map) == ENOBUFS) {
1193 aprint_error_ifnet(&sc->sc_ethercom.ec_if,
1194 "failed alloc of %dth mbuf\n", i);
1195 return ENOBUFS;
1196 }
1197 }
1198 sc->sc_cdata.mvgbe_rx_prod = 0;
1199 sc->sc_cdata.mvgbe_rx_cons = 0;
1200
1201 return 0;
1202 }
1203
1204 static int
1205 mvgbe_init_tx_ring(struct mvgbe_softc *sc)
1206 {
1207 struct mvgbe_chain_data *cd = &sc->sc_cdata;
1208 struct mvgbe_ring_data *rd = sc->sc_rdata;
1209 int i;
1210
1211 bzero((char *)sc->sc_rdata->mvgbe_tx_ring,
1212 sizeof(struct mvgbe_tx_desc) * MVGBE_TX_RING_CNT);
1213
1214 for (i = 0; i < MVGBE_TX_RING_CNT; i++) {
1215 cd->mvgbe_tx_chain[i].mvgbe_desc =
1216 &rd->mvgbe_tx_ring[i];
1217 if (i == MVGBE_TX_RING_CNT - 1) {
1218 cd->mvgbe_tx_chain[i].mvgbe_next =
1219 &cd->mvgbe_tx_chain[0];
1220 rd->mvgbe_tx_ring[i].nextdescptr =
1221 MVGBE_TX_RING_ADDR(sc, 0);
1222 } else {
1223 cd->mvgbe_tx_chain[i].mvgbe_next =
1224 &cd->mvgbe_tx_chain[i + 1];
1225 rd->mvgbe_tx_ring[i].nextdescptr =
1226 MVGBE_TX_RING_ADDR(sc, i + 1);
1227 }
1228 rd->mvgbe_tx_ring[i].cmdsts = MVGBE_BUFFER_OWNED_BY_HOST;
1229 }
1230
1231 sc->sc_cdata.mvgbe_tx_prod = 0;
1232 sc->sc_cdata.mvgbe_tx_cons = 0;
1233 sc->sc_cdata.mvgbe_tx_cnt = 0;
1234
1235 MVGBE_CDTXSYNC(sc, 0, MVGBE_TX_RING_CNT,
1236 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1237
1238 return 0;
1239 }
1240
1241 static int
1242 mvgbe_newbuf(struct mvgbe_softc *sc, int i, struct mbuf *m,
1243 bus_dmamap_t dmamap)
1244 {
1245 struct mbuf *m_new = NULL;
1246 struct mvgbe_chain *c;
1247 struct mvgbe_rx_desc *r;
1248 int align;
1249
1250 if (m == NULL) {
1251 void *buf = NULL;
1252
1253 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1254 if (m_new == NULL) {
1255 aprint_error_ifnet(&sc->sc_ethercom.ec_if,
1256 "no memory for rx list -- packet dropped!\n");
1257 return ENOBUFS;
1258 }
1259
1260 /* Allocate the jumbo buffer */
1261 buf = mvgbe_jalloc(sc);
1262 if (buf == NULL) {
1263 m_freem(m_new);
1264 DPRINTFN(1, ("%s jumbo allocation failed -- packet "
1265 "dropped!\n", sc->sc_ethercom.ec_if.if_xname));
1266 return ENOBUFS;
1267 }
1268
1269 /* Attach the buffer to the mbuf */
1270 m_new->m_len = m_new->m_pkthdr.len = MVGBE_JLEN;
1271 MEXTADD(m_new, buf, MVGBE_JLEN, 0, mvgbe_jfree, sc);
1272 } else {
1273 /*
1274 * We're re-using a previously allocated mbuf;
1275 * be sure to re-init pointers and lengths to
1276 * default values.
1277 */
1278 m_new = m;
1279 m_new->m_len = m_new->m_pkthdr.len = MVGBE_JLEN;
1280 m_new->m_data = m_new->m_ext.ext_buf;
1281 }
1282 align = (u_long)m_new->m_data & MVGBE_BUF_MASK;
1283 if (align != 0)
1284 m_adj(m_new, MVGBE_BUF_ALIGN - align);
1285
1286 c = &sc->sc_cdata.mvgbe_rx_chain[i];
1287 r = c->mvgbe_desc;
1288 c->mvgbe_mbuf = m_new;
1289 r->bufptr = dmamap->dm_segs[0].ds_addr +
1290 (((vaddr_t)m_new->m_data - (vaddr_t)sc->sc_cdata.mvgbe_jumbo_buf));
1291 r->bufsize = MVGBE_JLEN & ~MVGBE_BUF_MASK;
1292 r->cmdsts = MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_ENABLE_INTERRUPT;
1293
1294 MVGBE_CDRXSYNC(sc, i, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1295
1296 return 0;
1297 }
1298
1299 /*
1300 * Memory management for jumbo frames.
1301 */
1302
1303 static int
1304 mvgbe_alloc_jumbo_mem(struct mvgbe_softc *sc)
1305 {
1306 char *ptr, *kva;
1307 bus_dma_segment_t seg;
1308 int i, rseg, state, error;
1309 struct mvgbe_jpool_entry *entry;
1310
1311 state = error = 0;
1312
1313 /* Grab a big chunk o' storage. */
1314 if (bus_dmamem_alloc(sc->sc_dmat, MVGBE_JMEM, PAGE_SIZE, 0,
1315 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1316 aprint_error_dev(sc->sc_dev, "can't alloc rx buffers\n");
1317 return ENOBUFS;
1318 }
1319
1320 state = 1;
1321 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, MVGBE_JMEM,
1322 (void **)&kva, BUS_DMA_NOWAIT)) {
1323 aprint_error_dev(sc->sc_dev,
1324 "can't map dma buffers (%d bytes)\n", MVGBE_JMEM);
1325 error = ENOBUFS;
1326 goto out;
1327 }
1328
1329 state = 2;
1330 if (bus_dmamap_create(sc->sc_dmat, MVGBE_JMEM, 1, MVGBE_JMEM, 0,
1331 BUS_DMA_NOWAIT, &sc->sc_cdata.mvgbe_rx_jumbo_map)) {
1332 aprint_error_dev(sc->sc_dev, "can't create dma map\n");
1333 error = ENOBUFS;
1334 goto out;
1335 }
1336
1337 state = 3;
1338 if (bus_dmamap_load(sc->sc_dmat, sc->sc_cdata.mvgbe_rx_jumbo_map,
1339 kva, MVGBE_JMEM, NULL, BUS_DMA_NOWAIT)) {
1340 aprint_error_dev(sc->sc_dev, "can't load dma map\n");
1341 error = ENOBUFS;
1342 goto out;
1343 }
1344
1345 state = 4;
1346 sc->sc_cdata.mvgbe_jumbo_buf = (void *)kva;
1347 DPRINTFN(1,("mvgbe_jumbo_buf = 0x%p\n", sc->sc_cdata.mvgbe_jumbo_buf));
1348
1349 LIST_INIT(&sc->sc_jfree_listhead);
1350 LIST_INIT(&sc->sc_jinuse_listhead);
1351
1352 /*
1353 * Now divide it up into 9K pieces and save the addresses
1354 * in an array.
1355 */
1356 ptr = sc->sc_cdata.mvgbe_jumbo_buf;
1357 for (i = 0; i < MVGBE_JSLOTS; i++) {
1358 sc->sc_cdata.mvgbe_jslots[i] = ptr;
1359 ptr += MVGBE_JLEN;
1360 entry = kmem_alloc(sizeof(struct mvgbe_jpool_entry), KM_SLEEP);
1361 if (entry == NULL) {
1362 aprint_error_dev(sc->sc_dev,
1363 "no memory for jumbo buffer queue!\n");
1364 error = ENOBUFS;
1365 goto out;
1366 }
1367 entry->slot = i;
1368 if (i)
1369 LIST_INSERT_HEAD(&sc->sc_jfree_listhead, entry,
1370 jpool_entries);
1371 else
1372 LIST_INSERT_HEAD(&sc->sc_jinuse_listhead, entry,
1373 jpool_entries);
1374 }
1375 out:
1376 if (error != 0) {
1377 switch (state) {
1378 case 4:
1379 bus_dmamap_unload(sc->sc_dmat,
1380 sc->sc_cdata.mvgbe_rx_jumbo_map);
1381 case 3:
1382 bus_dmamap_destroy(sc->sc_dmat,
1383 sc->sc_cdata.mvgbe_rx_jumbo_map);
1384 case 2:
1385 bus_dmamem_unmap(sc->sc_dmat, kva, MVGBE_JMEM);
1386 case 1:
1387 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1388 break;
1389 default:
1390 break;
1391 }
1392 }
1393
1394 return error;
1395 }
1396
1397 /*
1398 * Allocate a jumbo buffer.
1399 */
1400 static void *
1401 mvgbe_jalloc(struct mvgbe_softc *sc)
1402 {
1403 struct mvgbe_jpool_entry *entry;
1404
1405 entry = LIST_FIRST(&sc->sc_jfree_listhead);
1406
1407 if (entry == NULL)
1408 return NULL;
1409
1410 LIST_REMOVE(entry, jpool_entries);
1411 LIST_INSERT_HEAD(&sc->sc_jinuse_listhead, entry, jpool_entries);
1412 return sc->sc_cdata.mvgbe_jslots[entry->slot];
1413 }
1414
1415 /*
1416 * Release a jumbo buffer.
1417 */
1418 static void
1419 mvgbe_jfree(struct mbuf *m, void *buf, size_t size, void *arg)
1420 {
1421 struct mvgbe_jpool_entry *entry;
1422 struct mvgbe_softc *sc;
1423 int i, s;
1424
1425 /* Extract the softc struct pointer. */
1426 sc = (struct mvgbe_softc *)arg;
1427
1428 if (sc == NULL)
1429 panic("%s: can't find softc pointer!", __func__);
1430
1431 /* calculate the slot this buffer belongs to */
1432
1433 i = ((vaddr_t)buf - (vaddr_t)sc->sc_cdata.mvgbe_jumbo_buf) / MVGBE_JLEN;
1434
1435 if ((i < 0) || (i >= MVGBE_JSLOTS))
1436 panic("%s: asked to free buffer that we don't manage!",
1437 __func__);
1438
1439 s = splvm();
1440 entry = LIST_FIRST(&sc->sc_jinuse_listhead);
1441 if (entry == NULL)
1442 panic("%s: buffer not in use!", __func__);
1443 entry->slot = i;
1444 LIST_REMOVE(entry, jpool_entries);
1445 LIST_INSERT_HEAD(&sc->sc_jfree_listhead, entry, jpool_entries);
1446
1447 if (__predict_true(m != NULL))
1448 pool_cache_put(mb_cache, m);
1449 splx(s);
1450 }
1451
1452 static int
1453 mvgbe_encap(struct mvgbe_softc *sc, struct mbuf *m_head,
1454 uint32_t *txidx)
1455 {
1456 struct mvgbe_tx_desc *f = NULL;
1457 struct mvgbe_txmap_entry *entry;
1458 bus_dma_segment_t *txseg;
1459 bus_dmamap_t txmap;
1460 uint32_t first, current, last, cmdsts = 0;
1461 int m_csumflags, i;
1462
1463 DPRINTFN(3, ("mvgbe_encap\n"));
1464
1465 entry = SIMPLEQ_FIRST(&sc->sc_txmap_head);
1466 if (entry == NULL) {
1467 DPRINTFN(2, ("mvgbe_encap: no txmap available\n"));
1468 return ENOBUFS;
1469 }
1470 txmap = entry->dmamap;
1471
1472 first = current = last = *txidx;
1473
1474 /*
1475 * Preserve m_pkthdr.csum_flags here since m_head might be
1476 * updated by m_defrag()
1477 */
1478 m_csumflags = m_head->m_pkthdr.csum_flags;
1479
1480 /*
1481 * Start packing the mbufs in this chain into
1482 * the fragment pointers. Stop when we run out
1483 * of fragments or hit the end of the mbuf chain.
1484 */
1485 if (bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m_head, BUS_DMA_NOWAIT)) {
1486 DPRINTFN(1, ("mvgbe_encap: dmamap failed\n"));
1487 return ENOBUFS;
1488 }
1489
1490 /* Sync the DMA map. */
1491 bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize,
1492 BUS_DMASYNC_PREWRITE);
1493
1494 if (sc->sc_cdata.mvgbe_tx_cnt + txmap->dm_nsegs >=
1495 MVGBE_TX_RING_CNT) {
1496 DPRINTFN(2, ("mvgbe_encap: too few descriptors free\n"));
1497 bus_dmamap_unload(sc->sc_dmat, txmap);
1498 return ENOBUFS;
1499 }
1500
1501 txseg = txmap->dm_segs;
1502
1503 DPRINTFN(2, ("mvgbe_encap: dm_nsegs=%d\n", txmap->dm_nsegs));
1504
1505 for (i = 0; i < txmap->dm_nsegs; i++) {
1506 f = &sc->sc_rdata->mvgbe_tx_ring[current];
1507 f->bufptr = txseg[i].ds_addr;
1508 f->bytecnt = txseg[i].ds_len;
1509 f->cmdsts = MVGBE_BUFFER_OWNED_BY_DMA;
1510 last = current;
1511 current = (current + 1) % MVGBE_TX_RING_CNT;
1512 }
1513
1514 if (m_csumflags & M_CSUM_IPv4)
1515 cmdsts |= MVGBE_TX_GENERATE_IP_CHKSUM;
1516 if (m_csumflags & M_CSUM_TCPv4)
1517 cmdsts |=
1518 MVGBE_TX_GENERATE_L4_CHKSUM | MVGBE_TX_L4_TYPE_TCP;
1519 if (m_csumflags & M_CSUM_UDPv4)
1520 cmdsts |=
1521 MVGBE_TX_GENERATE_L4_CHKSUM | MVGBE_TX_L4_TYPE_UDP;
1522 if (m_csumflags & (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
1523 const int iphdr_unitlen = sizeof(struct ip) / sizeof(uint32_t);
1524
1525 cmdsts |= MVGBE_TX_IP_NO_FRAG |
1526 MVGBE_TX_IP_HEADER_LEN(iphdr_unitlen); /* unit is 4B */
1527 }
1528 if (txmap->dm_nsegs == 1)
1529 f->cmdsts = cmdsts |
1530 MVGBE_BUFFER_OWNED_BY_DMA |
1531 MVGBE_TX_GENERATE_CRC |
1532 MVGBE_TX_ENABLE_INTERRUPT |
1533 MVGBE_TX_ZERO_PADDING |
1534 MVGBE_TX_FIRST_DESC |
1535 MVGBE_TX_LAST_DESC;
1536 else {
1537 f = &sc->sc_rdata->mvgbe_tx_ring[first];
1538 f->cmdsts = cmdsts |
1539 MVGBE_BUFFER_OWNED_BY_DMA |
1540 MVGBE_TX_GENERATE_CRC |
1541 MVGBE_TX_FIRST_DESC;
1542
1543 f = &sc->sc_rdata->mvgbe_tx_ring[last];
1544 f->cmdsts =
1545 MVGBE_BUFFER_OWNED_BY_DMA |
1546 MVGBE_TX_ENABLE_INTERRUPT |
1547 MVGBE_TX_ZERO_PADDING |
1548 MVGBE_TX_LAST_DESC;
1549 }
1550
1551 sc->sc_cdata.mvgbe_tx_chain[last].mvgbe_mbuf = m_head;
1552 SIMPLEQ_REMOVE_HEAD(&sc->sc_txmap_head, link);
1553 sc->sc_cdata.mvgbe_tx_map[last] = entry;
1554
1555 /* Sync descriptors before handing to chip */
1556 MVGBE_CDTXSYNC(sc, *txidx, txmap->dm_nsegs,
1557 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1558
1559 sc->sc_cdata.mvgbe_tx_cnt += i;
1560 *txidx = current;
1561
1562 DPRINTFN(3, ("mvgbe_encap: completed successfully\n"));
1563
1564 return 0;
1565 }
1566
1567 static void
1568 mvgbe_rxeof(struct mvgbe_softc *sc)
1569 {
1570 struct mvgbe_chain_data *cdata = &sc->sc_cdata;
1571 struct mvgbe_rx_desc *cur_rx;
1572 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1573 struct mbuf *m;
1574 bus_dmamap_t dmamap;
1575 uint32_t rxstat;
1576 int idx, cur, total_len;
1577
1578 idx = sc->sc_cdata.mvgbe_rx_prod;
1579
1580 DPRINTFN(3, ("mvgbe_rxeof %d\n", idx));
1581
1582 for (;;) {
1583 cur = idx;
1584
1585 /* Sync the descriptor */
1586 MVGBE_CDRXSYNC(sc, idx,
1587 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1588
1589 cur_rx = &sc->sc_rdata->mvgbe_rx_ring[idx];
1590
1591 if ((cur_rx->cmdsts & MVGBE_BUFFER_OWNED_MASK) ==
1592 MVGBE_BUFFER_OWNED_BY_DMA) {
1593 /* Invalidate the descriptor -- it's not ready yet */
1594 MVGBE_CDRXSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1595 sc->sc_cdata.mvgbe_rx_prod = idx;
1596 break;
1597 }
1598 #ifdef DIAGNOSTIC
1599 if ((cur_rx->cmdsts &
1600 (MVGBE_RX_LAST_DESC | MVGBE_RX_FIRST_DESC)) !=
1601 (MVGBE_RX_LAST_DESC | MVGBE_RX_FIRST_DESC))
1602 panic(
1603 "mvgbe_rxeof: buffer size is smaller than packet");
1604 #endif
1605
1606 dmamap = sc->sc_cdata.mvgbe_rx_jumbo_map;
1607
1608 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1609 BUS_DMASYNC_POSTREAD);
1610
1611 m = cdata->mvgbe_rx_chain[idx].mvgbe_mbuf;
1612 cdata->mvgbe_rx_chain[idx].mvgbe_mbuf = NULL;
1613 total_len = cur_rx->bytecnt;
1614 rxstat = cur_rx->cmdsts;
1615
1616 cdata->mvgbe_rx_map[idx] = NULL;
1617
1618 idx = (idx + 1) % MVGBE_RX_RING_CNT;
1619
1620 if (rxstat & MVGBE_ERROR_SUMMARY) {
1621 #if 0
1622 int err = rxstat & MVGBE_RX_ERROR_CODE_MASK;
1623
1624 if (err == MVGBE_RX_CRC_ERROR)
1625 ifp->if_ierrors++;
1626 if (err == MVGBE_RX_OVERRUN_ERROR)
1627 ifp->if_ierrors++;
1628 if (err == MVGBE_RX_MAX_FRAME_LEN_ERROR)
1629 ifp->if_ierrors++;
1630 if (err == MVGBE_RX_RESOURCE_ERROR)
1631 ifp->if_ierrors++;
1632 #else
1633 ifp->if_ierrors++;
1634 #endif
1635 mvgbe_newbuf(sc, cur, m, dmamap);
1636 continue;
1637 }
1638
1639 if (total_len > MVGBE_RX_CSUM_MIN_BYTE) {
1640 /* Check IP header checksum */
1641 if (rxstat & MVGBE_RX_IP_FRAME_TYPE) {
1642 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1643 if (!(rxstat & MVGBE_RX_IP_HEADER_OK))
1644 m->m_pkthdr.csum_flags |=
1645 M_CSUM_IPv4_BAD;
1646 }
1647 /* Check TCP/UDP checksum */
1648 if (rxstat & MVGBE_RX_L4_TYPE_TCP)
1649 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1650 else if (rxstat & MVGBE_RX_L4_TYPE_UDP)
1651 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1652 if (!(rxstat & MVGBE_RX_L4_CHECKSUM))
1653 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1654 }
1655
1656 /*
1657 * Try to allocate a new jumbo buffer. If that
1658 * fails, copy the packet to mbufs and put the
1659 * jumbo buffer back in the ring so it can be
1660 * re-used. If allocating mbufs fails, then we
1661 * have to drop the packet.
1662 */
1663 if (mvgbe_newbuf(sc, cur, NULL, dmamap) == ENOBUFS) {
1664 struct mbuf *m0;
1665
1666 m0 = m_devget(mtod(m, char *), total_len, 0, ifp, NULL);
1667 mvgbe_newbuf(sc, cur, m, dmamap);
1668 if (m0 == NULL) {
1669 aprint_error_ifnet(ifp,
1670 "no receive buffers available --"
1671 " packet dropped!\n");
1672 ifp->if_ierrors++;
1673 continue;
1674 }
1675 m = m0;
1676 } else {
1677 m->m_pkthdr.rcvif = ifp;
1678 m->m_pkthdr.len = m->m_len = total_len;
1679 }
1680
1681 /* Skip on first 2byte (HW header) */
1682 m_adj(m, MVGBE_HWHEADER_SIZE);
1683 m->m_flags |= M_HASFCS;
1684
1685 ifp->if_ipackets++;
1686
1687 if (ifp->if_bpf)
1688 bpf_ops->bpf_mtap(ifp->if_bpf, m);
1689
1690 /* pass it on. */
1691 (*ifp->if_input)(ifp, m);
1692 }
1693 }
1694
1695 static void
1696 mvgbe_txeof(struct mvgbe_softc *sc)
1697 {
1698 struct mvgbe_chain_data *cdata = &sc->sc_cdata;
1699 struct mvgbe_tx_desc *cur_tx;
1700 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1701 struct mvgbe_txmap_entry *entry;
1702 int idx;
1703
1704 DPRINTFN(3, ("mvgbe_txeof\n"));
1705
1706 /*
1707 * Go through our tx ring and free mbufs for those
1708 * frames that have been sent.
1709 */
1710 idx = cdata->mvgbe_tx_cons;
1711 while (idx != cdata->mvgbe_tx_prod) {
1712 MVGBE_CDTXSYNC(sc, idx, 1,
1713 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1714
1715 cur_tx = &sc->sc_rdata->mvgbe_tx_ring[idx];
1716 #ifdef MVGBE_DEBUG
1717 if (mvgbe_debug >= 3)
1718 mvgbe_dump_txdesc(cur_tx, idx);
1719 #endif
1720 if ((cur_tx->cmdsts & MVGBE_BUFFER_OWNED_MASK) ==
1721 MVGBE_BUFFER_OWNED_BY_DMA) {
1722 MVGBE_CDTXSYNC(sc, idx, 1, BUS_DMASYNC_PREREAD);
1723 break;
1724 }
1725 if (cur_tx->cmdsts & MVGBE_TX_LAST_DESC)
1726 ifp->if_opackets++;
1727 if (cur_tx->cmdsts & MVGBE_ERROR_SUMMARY) {
1728 int err = cur_tx->cmdsts & MVGBE_TX_ERROR_CODE_MASK;
1729
1730 if (err == MVGBE_TX_LATE_COLLISION_ERROR)
1731 ifp->if_collisions++;
1732 if (err == MVGBE_TX_UNDERRUN_ERROR)
1733 ifp->if_oerrors++;
1734 if (err == MVGBE_TX_EXCESSIVE_COLLISION_ERRO)
1735 ifp->if_collisions++;
1736 }
1737 if (cdata->mvgbe_tx_chain[idx].mvgbe_mbuf != NULL) {
1738 entry = cdata->mvgbe_tx_map[idx];
1739
1740 m_freem(cdata->mvgbe_tx_chain[idx].mvgbe_mbuf);
1741 cdata->mvgbe_tx_chain[idx].mvgbe_mbuf = NULL;
1742
1743 bus_dmamap_sync(sc->sc_dmat, entry->dmamap, 0,
1744 entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1745
1746 bus_dmamap_unload(sc->sc_dmat, entry->dmamap);
1747 SIMPLEQ_INSERT_TAIL(&sc->sc_txmap_head, entry, link);
1748 cdata->mvgbe_tx_map[idx] = NULL;
1749 }
1750 cdata->mvgbe_tx_cnt--;
1751 idx = (idx + 1) % MVGBE_TX_RING_CNT;
1752 }
1753 if (cdata->mvgbe_tx_cnt == 0)
1754 ifp->if_timer = 0;
1755
1756 if (cdata->mvgbe_tx_cnt < MVGBE_TX_RING_CNT - 2)
1757 ifp->if_flags &= ~IFF_OACTIVE;
1758
1759 cdata->mvgbe_tx_cons = idx;
1760 }
1761
1762 static void
1763 mvgbe_setmulti(struct mvgbe_softc *sc)
1764 {
1765 struct ifnet *ifp= &sc->sc_ethercom.ec_if;
1766 uint32_t pxc, dfut, upm = 0, filter = 0;
1767 uint8_t ln = sc->sc_enaddr[5] & 0xf; /* last nibble */
1768
1769 if (ifp->if_flags & IFF_PROMISC) {
1770 upm = MVGBE_PXC_UPM;
1771 filter =
1772 MVGBE_DF(0, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) |
1773 MVGBE_DF(1, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) |
1774 MVGBE_DF(2, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) |
1775 MVGBE_DF(3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS);
1776 } else if (ifp->if_flags & IFF_ALLMULTI) {
1777 filter =
1778 MVGBE_DF(0, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) |
1779 MVGBE_DF(1, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) |
1780 MVGBE_DF(2, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) |
1781 MVGBE_DF(3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS);
1782 }
1783
1784 /* Set Unicast Promiscuous mode */
1785 pxc = MVGBE_READ(sc, MVGBE_PXC);
1786 pxc &= ~MVGBE_PXC_UPM;
1787 pxc |= upm;
1788 MVGBE_WRITE(sc, MVGBE_PXC, pxc);
1789
1790 /* Set Destination Address Filter Multicast Tables */
1791 MVGBE_WRITE_FILTER(sc, MVGBE_DFSMT, filter, MVGBE_NDFSMT);
1792 MVGBE_WRITE_FILTER(sc, MVGBE_DFOMT, filter, MVGBE_NDFOMT);
1793
1794 if (ifp->if_flags & IFF_PROMISC) {
1795 /* necessary ? */
1796 MVGBE_WRITE_FILTER(sc, MVGBE_DFUT, filter, MVGBE_NDFUT);
1797 return;
1798 }
1799
1800 /* Set Destination Address Filter Unicast Table */
1801 dfut = MVGBE_READ_FILTER(sc, MVGBE_DFUT + (ln & 0x0c));
1802 dfut &= ~MVGBE_DF(ln & 0x03, MVGBE_DF_QUEUE_MASK);;
1803 dfut |= MVGBE_DF(ln & 0x03, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS);
1804 MVGBE_WRITE_FILTER(sc, MVGBE_DFUT + (ln & 0x0c), dfut, 1);
1805 }
1806
1807 #ifdef MVGBE_DEBUG
1808 static void
1809 mvgbe_dump_txdesc(struct mvgbe_tx_desc *desc, int idx)
1810 {
1811 #define DESC_PRINT(X) \
1812 if (X) \
1813 printf("txdesc[%d]." #X "=%#x\n", idx, X);
1814
1815 #if BYTE_ORDER == BIG_ENDIAN
1816 DESC_PRINT(desc->bytecnt);
1817 DESC_PRINT(desc->l4ichk);
1818 DESC_PRINT(desc->cmdsts);
1819 DESC_PRINT(desc->nextdescptr);
1820 DESC_PRINT(desc->bufptr);
1821 #else /* LITTLE_ENDIAN */
1822 DESC_PRINT(desc->cmdsts);
1823 DESC_PRINT(desc->l4ichk);
1824 DESC_PRINT(desc->bytecnt);
1825 DESC_PRINT(desc->bufptr);
1826 DESC_PRINT(desc->nextdescptr);
1827 #endif
1828 #undef DESC_PRINT
1829 printf("txdesc[%d].desc->returninfo=%#lx\n", idx, desc->returninfo);
1830 printf("txdesc[%d].desc->alignbufptr=%p\n", idx, desc->alignbufptr);
1831 }
1832 #endif
1833