if_et.c revision 1.34 1 /* $NetBSD: if_et.c,v 1.34 2021/09/10 14:43:04 jakllsch Exp $ */
2 /* $OpenBSD: if_et.c,v 1.12 2008/07/11 09:29:02 kevlo $ */
3 /*
4 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Sepherosa Ziehau <sepherosa (at) gmail.com>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.1 2007/10/12 14:12:42 sephe Exp $
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: if_et.c,v 1.34 2021/09/10 14:43:04 jakllsch Exp $");
41
42 #include "opt_inet.h"
43
44 #include <sys/param.h>
45 #include <sys/endian.h>
46 #include <sys/systm.h>
47 #include <sys/types.h>
48 #include <sys/sockio.h>
49 #include <sys/mbuf.h>
50 #include <sys/queue.h>
51 #include <sys/kernel.h>
52 #include <sys/device.h>
53 #include <sys/callout.h>
54 #include <sys/socket.h>
55
56 #include <sys/bus.h>
57
58 #include <net/if.h>
59 #include <net/if_dl.h>
60 #include <net/if_media.h>
61 #include <net/if_ether.h>
62 #include <net/if_arp.h>
63
64 #ifdef INET
65 #include <netinet/in.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip.h>
69 #include <netinet/if_inarp.h>
70 #endif
71
72 #include <net/bpf.h>
73
74 #include <dev/mii/mii.h>
75 #include <dev/mii/miivar.h>
76
77 #include <dev/pci/pcireg.h>
78 #include <dev/pci/pcivar.h>
79 #include <dev/pci/pcidevs.h>
80
81 #include <dev/pci/if_etreg.h>
82
83 static int et_match(device_t, cfdata_t, void *);
84 static void et_attach(device_t, device_t, void *);
85 static int et_detach(device_t, int);
86
87 static int et_miibus_readreg(device_t, int, int, uint16_t *);
88 static int et_miibus_writereg(device_t, int, int, uint16_t);
89 static void et_miibus_statchg(struct ifnet *);
90
91 static int et_init(struct ifnet *);
92 static int et_ioctl(struct ifnet *, u_long, void *);
93 static void et_start(struct ifnet *);
94 static void et_watchdog(struct ifnet *);
95 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
96
97 static int et_intr(void *);
98 static void et_enable_intrs(struct et_softc *, uint32_t);
99 static void et_disable_intrs(struct et_softc *);
100 static void et_rxeof(struct et_softc *);
101 static void et_txeof(struct et_softc *);
102 static void et_txtick(void *);
103
104 static int et_dma_alloc(struct et_softc *);
105 static void et_dma_free(struct et_softc *);
106 static int et_dma_mem_create(struct et_softc *, bus_size_t,
107 void **, bus_addr_t *, bus_dmamap_t *, bus_dma_segment_t *);
108 static void et_dma_mem_destroy(struct et_softc *, void *, bus_dmamap_t);
109 static int et_dma_mbuf_create(struct et_softc *);
110 static void et_dma_mbuf_destroy(struct et_softc *, int, const int[]);
111
112 static int et_init_tx_ring(struct et_softc *);
113 static int et_init_rx_ring(struct et_softc *);
114 static void et_free_tx_ring(struct et_softc *);
115 static void et_free_rx_ring(struct et_softc *);
116 static int et_encap(struct et_softc *, struct mbuf **);
117 static int et_newbuf(struct et_rxbuf_data *, int, int, int);
118 static int et_newbuf_cluster(struct et_rxbuf_data *, int, int);
119 static int et_newbuf_hdr(struct et_rxbuf_data *, int, int);
120
121 static void et_stop(struct et_softc *);
122 static int et_chip_init(struct et_softc *);
123 static void et_chip_attach(struct et_softc *);
124 static void et_init_mac(struct et_softc *);
125 static void et_init_rxmac(struct et_softc *);
126 static void et_init_txmac(struct et_softc *);
127 static int et_init_rxdma(struct et_softc *);
128 static int et_init_txdma(struct et_softc *);
129 static int et_start_rxdma(struct et_softc *);
130 static int et_start_txdma(struct et_softc *);
131 static int et_stop_rxdma(struct et_softc *);
132 static int et_stop_txdma(struct et_softc *);
133 static void et_reset(struct et_softc *);
134 static int et_bus_config(struct et_softc *);
135 static void et_get_eaddr(struct et_softc *, uint8_t[]);
136 static void et_setmulti(struct et_softc *);
137 static void et_tick(void *);
138
139 static int et_rx_intr_npkts = 32;
140 static int et_rx_intr_delay = 20; /* x10 usec */
141 static int et_tx_intr_nsegs = 128;
142 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */
143
144 struct et_bsize {
145 int bufsize;
146 et_newbuf_t newbuf;
147 };
148
149 static const struct et_bsize et_bufsize[ET_RX_NRING] = {
150 { .bufsize = 0, .newbuf = et_newbuf_hdr },
151 { .bufsize = 0, .newbuf = et_newbuf_cluster },
152 };
153
154 static const struct device_compatible_entry compat_data[] = {
155 { .id = PCI_ID_CODE(PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310),
156 .value = 0 },
157
158
159 { .id = PCI_ID_CODE(PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1301),
160 .value = ET_FLAG_FASTETHER },
161
162 PCI_COMPAT_EOL
163 };
164
165 CFATTACH_DECL_NEW(et, sizeof(struct et_softc), et_match, et_attach, et_detach,
166 NULL);
167
168 static int
169 et_match(device_t dev, cfdata_t match, void *aux)
170 {
171 struct pci_attach_args *pa = aux;
172
173 return pci_compatible_match(pa, compat_data);
174 }
175
176 static void
177 et_attach(device_t parent, device_t self, void *aux)
178 {
179 struct et_softc *sc = device_private(self);
180 struct pci_attach_args *pa = aux;
181 const struct device_compatible_entry *dce;
182 pci_chipset_tag_t pc = pa->pa_pc;
183 pci_intr_handle_t ih;
184 const char *intrstr;
185 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
186 struct mii_data * const mii = &sc->sc_miibus;
187 uint32_t pmcfg;
188 pcireg_t memtype;
189 int error;
190 char intrbuf[PCI_INTRSTR_LEN];
191
192 pci_aprint_devinfo(pa, "Ethernet controller");
193
194 sc->sc_dev = self;
195
196 /*
197 * Initialize tunables
198 */
199 sc->sc_rx_intr_npkts = et_rx_intr_npkts;
200 sc->sc_rx_intr_delay = et_rx_intr_delay;
201 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
202 sc->sc_timer = et_timer;
203
204 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ET_PCIR_BAR);
205 if (pci_mapreg_map(pa, ET_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
206 &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) {
207 aprint_error_dev(self, "could not map mem space\n");
208 return;
209 }
210
211 if (pci_intr_map(pa, &ih) != 0) {
212 aprint_error_dev(self, "could not map interrupt\n");
213 goto fail;
214 }
215
216 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
217 sc->sc_irq_handle = pci_intr_establish_xname(pc, ih, IPL_NET, et_intr,
218 sc, device_xname(self));
219 if (sc->sc_irq_handle == NULL) {
220 aprint_error_dev(self, "could not establish interrupt");
221 if (intrstr != NULL)
222 aprint_error(" at %s", intrstr);
223 aprint_error("\n");
224 goto fail;
225 }
226 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
227
228 sc->sc_pct = pa->pa_pc;
229 sc->sc_pcitag = pa->pa_tag;
230
231 if (pci_dma64_available(pa))
232 sc->sc_dmat = pa->pa_dmat64;
233 else
234 sc->sc_dmat = pa->pa_dmat;
235
236 dce = pci_compatible_lookup(pa, compat_data);
237 KASSERT(dce != NULL);
238 sc->sc_flags = (uint32_t)dce->value;
239
240 error = et_bus_config(sc);
241 if (error)
242 goto fail;
243
244 et_get_eaddr(sc, sc->sc_enaddr);
245
246 aprint_normal_dev(self, "Ethernet address %s\n",
247 ether_sprintf(sc->sc_enaddr));
248
249 /* Take PHY out of COMA and enable clocks. */
250 pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE;
251 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
252 pmcfg |= EM_PM_GIGEPHY_ENB;
253 CSR_WRITE_4(sc, ET_PM, pmcfg);
254
255 et_reset(sc);
256
257 et_disable_intrs(sc);
258
259 error = et_dma_alloc(sc);
260 if (error)
261 goto fail;
262
263 ifp->if_softc = sc;
264 ifp->if_mtu = ETHERMTU;
265 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
266 ifp->if_init = et_init;
267 ifp->if_ioctl = et_ioctl;
268 ifp->if_start = et_start;
269 ifp->if_watchdog = et_watchdog;
270 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC);
271 IFQ_SET_READY(&ifp->if_snd);
272 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
273
274 et_chip_attach(sc);
275
276 mii->mii_ifp = ifp;
277 mii->mii_readreg = et_miibus_readreg;
278 mii->mii_writereg = et_miibus_writereg;
279 mii->mii_statchg = et_miibus_statchg;
280
281 sc->sc_ethercom.ec_mii = mii;
282 ifmedia_init(&mii->mii_media, 0, ether_mediachange,
283 et_ifmedia_sts);
284 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
285 if (LIST_FIRST(&mii->mii_phys) == NULL) {
286 aprint_error_dev(self, "no PHY found!\n");
287 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL,
288 0, NULL);
289 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
290 } else
291 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
292
293 if_attach(ifp);
294 if_deferred_start_init(ifp, NULL);
295 ether_ifattach(ifp, sc->sc_enaddr);
296
297 callout_init(&sc->sc_tick, 0);
298 callout_setfunc(&sc->sc_tick, et_tick, sc);
299 callout_init(&sc->sc_txtick, 0);
300 callout_setfunc(&sc->sc_txtick, et_txtick, sc);
301
302 if (pmf_device_register(self, NULL, NULL))
303 pmf_class_network_register(self, ifp);
304 else
305 aprint_error_dev(self, "couldn't establish power handler\n");
306
307 return;
308
309 fail:
310 et_dma_free(sc);
311 if (sc->sc_irq_handle != NULL) {
312 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
313 sc->sc_irq_handle = NULL;
314 }
315 if (sc->sc_mem_size) {
316 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
317 sc->sc_mem_size = 0;
318 }
319 }
320
321 static int
322 et_detach(device_t self, int flags)
323 {
324 struct et_softc *sc = device_private(self);
325 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
326 int s;
327
328 pmf_device_deregister(self);
329 s = splnet();
330 et_stop(sc);
331 splx(s);
332
333 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
334
335 ether_ifdetach(ifp);
336 if_detach(ifp);
337 et_dma_free(sc);
338
339 /* Delete all remaining media. */
340 ifmedia_fini(&sc->sc_miibus.mii_media);
341
342 if (sc->sc_irq_handle != NULL) {
343 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
344 sc->sc_irq_handle = NULL;
345 }
346
347 if (sc->sc_mem_size) {
348 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
349 sc->sc_mem_size = 0;
350 }
351
352 return 0;
353 }
354
355 #if 0 /* XXX XXX XXX UNUSED */
356 static int
357 et_shutdown(device_t self)
358 {
359 struct et_softc *sc = device_private(self);
360 int s;
361
362 s = splnet();
363 et_stop(sc);
364 splx(s);
365
366 return 0;
367 }
368 #endif
369
370 static int
371 et_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val)
372 {
373 struct et_softc *sc = device_private(dev);
374 uint32_t data;
375 int i, ret;
376
377 /* Stop any pending operations */
378 CSR_WRITE_4(sc, ET_MII_CMD, 0);
379
380 data = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
381 __SHIFTIN(reg, ET_MII_ADDR_REG);
382 CSR_WRITE_4(sc, ET_MII_ADDR, data);
383
384 /* Start reading */
385 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
386
387 #define NRETRY 50
388
389 for (i = 0; i < NRETRY; ++i) {
390 data = CSR_READ_4(sc, ET_MII_IND);
391 if ((data & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
392 break;
393 DELAY(50);
394 }
395 if (i == NRETRY) {
396 aprint_error_dev(sc->sc_dev, "read phy %d, reg %d timed out\n",
397 phy, reg);
398 ret = ETIMEDOUT;
399 goto back;
400 }
401
402 #undef NRETRY
403
404 data = CSR_READ_4(sc, ET_MII_STAT);
405 *val = __SHIFTOUT(data, ET_MII_STAT_VALUE);
406 ret = 0;
407
408 back:
409 /* Make sure that the current operation is stopped */
410 CSR_WRITE_4(sc, ET_MII_CMD, 0);
411 return ret;
412 }
413
414 static int
415 et_miibus_writereg(device_t dev, int phy, int reg, uint16_t val)
416 {
417 struct et_softc *sc = device_private(dev);
418 uint32_t data;
419 uint16_t tmp;
420 int rv = 0;
421 int i;
422
423 /* Stop any pending operations */
424 CSR_WRITE_4(sc, ET_MII_CMD, 0);
425
426 data = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
427 __SHIFTIN(reg, ET_MII_ADDR_REG);
428 CSR_WRITE_4(sc, ET_MII_ADDR, data);
429
430 /* Start writing */
431 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val, ET_MII_CTRL_VALUE));
432
433 #define NRETRY 100
434
435 for (i = 0; i < NRETRY; ++i) {
436 data = CSR_READ_4(sc, ET_MII_IND);
437 if ((data & ET_MII_IND_BUSY) == 0)
438 break;
439 DELAY(50);
440 }
441 if (i == NRETRY) {
442 aprint_error_dev(sc->sc_dev, "write phy %d, reg %d timed out\n",
443 phy, reg);
444 et_miibus_readreg(dev, phy, reg, &tmp);
445 rv = ETIMEDOUT;
446 }
447
448 #undef NRETRY
449
450 /* Make sure that the current operation is stopped */
451 CSR_WRITE_4(sc, ET_MII_CMD, 0);
452
453 return rv;
454 }
455
456 static void
457 et_miibus_statchg(struct ifnet *ifp)
458 {
459 struct et_softc *sc = ifp->if_softc;
460 struct mii_data *mii = &sc->sc_miibus;
461 uint32_t cfg1, cfg2, ctrl;
462 int i;
463
464 sc->sc_flags &= ~ET_FLAG_LINK;
465 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
466 (IFM_ACTIVE | IFM_AVALID)) {
467 switch (IFM_SUBTYPE(mii->mii_media_active)) {
468 case IFM_10_T:
469 case IFM_100_TX:
470 sc->sc_flags |= ET_FLAG_LINK;
471 break;
472 case IFM_1000_T:
473 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
474 sc->sc_flags |= ET_FLAG_LINK;
475 break;
476 }
477 }
478
479 /* XXX Stop TX/RX MAC? */
480 if ((sc->sc_flags & ET_FLAG_LINK) == 0)
481 return;
482
483 /* Program MACs with resolved speed/duplex/flow-control. */
484 ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
485 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
486 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
487 cfg1 &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
488 ET_MAC_CFG1_LOOPBACK);
489 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
490 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
491 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
492 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
493 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN);
494
495
496 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
497 cfg2 |= ET_MAC_CFG2_MODE_GMII;
498 else {
499 cfg2 |= ET_MAC_CFG2_MODE_MII;
500 ctrl |= ET_MAC_CTRL_MODE_MII;
501 }
502
503 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
504 cfg2 |= ET_MAC_CFG2_FDX;
505 /*
506 * Controller lacks automatic TX pause frame
507 * generation so it should be handled by driver.
508 * Even though driver can send pause frame with
509 * arbitrary pause time, controller does not
510 * provide a way that tells how many free RX
511 * buffers are available in controller. This
512 * limitation makes it hard to generate XON frame
513 * in time on driver side so don't enable TX flow
514 * control.
515 */
516 #ifdef notyet
517 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE)
518 cfg1 |= ET_MAC_CFG1_TXFLOW;
519 #endif
520 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE)
521 cfg1 |= ET_MAC_CFG1_RXFLOW;
522 } else
523 ctrl |= ET_MAC_CTRL_GHDX;
524
525 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
526 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
527 cfg1 |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
528 CSR_WRITE_4(sc, ET_MAC_CFG1, cfg1);
529
530 #define NRETRY 100
531
532 for (i = 0; i < NRETRY; ++i) {
533 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
534 if ((cfg1 & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
535 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
536 break;
537
538 DELAY(10);
539 }
540 /* Note: Timeout always happens when cable is not plugged in. */
541
542 sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
543
544 #undef NRETRY
545 }
546
547 static void
548 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
549 {
550 struct et_softc *sc;
551 struct mii_data *mii;
552
553 sc = ifp->if_softc;
554 mii = &sc->sc_miibus;
555 mii_pollstat(mii);
556 ifmr->ifm_active = mii->mii_media_active;
557 ifmr->ifm_status = mii->mii_media_status;
558 }
559
560 static void
561 et_stop(struct et_softc *sc)
562 {
563 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
564
565 callout_stop(&sc->sc_tick);
566 callout_stop(&sc->sc_txtick);
567
568 et_stop_rxdma(sc);
569 et_stop_txdma(sc);
570
571 et_disable_intrs(sc);
572
573 et_free_tx_ring(sc);
574 et_free_rx_ring(sc);
575
576 et_reset(sc);
577
578 sc->sc_tx = 0;
579 sc->sc_tx_intr = 0;
580 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
581
582 ifp->if_timer = 0;
583 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
584 }
585
586 static int
587 et_bus_config(struct et_softc *sc)
588 {
589 uint32_t val; //, max_plsz;
590 // uint16_t ack_latency, replay_timer;
591
592 /*
593 * Test whether EEPROM is valid
594 * NOTE: Read twice to get the correct value
595 */
596 pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC);
597 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC);
598
599 if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
600 aprint_error_dev(sc->sc_dev, "EEPROM status error 0x%02x\n", val);
601 return ENXIO;
602 }
603
604 /* TODO: LED */
605 #if 0
606 /*
607 * Configure ACK latency and replay timer according to
608 * max playload size
609 */
610 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CAPS);
611 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ;
612
613 switch (max_plsz) {
614 case ET_PCIV_DEVICE_CAPS_PLSZ_128:
615 ack_latency = ET_PCIV_ACK_LATENCY_128;
616 replay_timer = ET_PCIV_REPLAY_TIMER_128;
617 break;
618
619 case ET_PCIV_DEVICE_CAPS_PLSZ_256:
620 ack_latency = ET_PCIV_ACK_LATENCY_256;
621 replay_timer = ET_PCIV_REPLAY_TIMER_256;
622 break;
623
624 default:
625 ack_latency = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
626 ET_PCIR_ACK_LATENCY) >> 16;
627 replay_timer = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
628 ET_PCIR_REPLAY_TIMER) >> 16;
629 aprint_normal_dev(sc->sc_dev, "ack latency %u, replay timer %u\n",
630 ack_latency, replay_timer);
631 break;
632 }
633 if (ack_latency != 0) {
634 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
635 ET_PCIR_ACK_LATENCY, ack_latency << 16);
636 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
637 ET_PCIR_REPLAY_TIMER, replay_timer << 16);
638 }
639
640 /*
641 * Set L0s and L1 latency timer to 2us
642 */
643 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2);
644 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_L0S_L1_LATENCY,
645 val << 24);
646
647 /*
648 * Set max read request size to 2048 bytes
649 */
650 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
651 ET_PCIR_DEVICE_CTRL) >> 16;
652 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ;
653 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K;
654 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CTRL,
655 val << 16);
656 #endif
657
658 return 0;
659 }
660
661 static void
662 et_get_eaddr(struct et_softc *sc, uint8_t eaddr[])
663 {
664 uint32_t r;
665
666 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_LO);
667 eaddr[0] = r & 0xff;
668 eaddr[1] = (r >> 8) & 0xff;
669 eaddr[2] = (r >> 16) & 0xff;
670 eaddr[3] = (r >> 24) & 0xff;
671 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_HI);
672 eaddr[4] = r & 0xff;
673 eaddr[5] = (r >> 8) & 0xff;
674 }
675
676 static void
677 et_reset(struct et_softc *sc)
678 {
679
680 CSR_WRITE_4(sc, ET_MAC_CFG1,
681 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
682 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
683 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
684
685 CSR_WRITE_4(sc, ET_SWRST,
686 ET_SWRST_TXDMA | ET_SWRST_RXDMA |
687 ET_SWRST_TXMAC | ET_SWRST_RXMAC |
688 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
689
690 CSR_WRITE_4(sc, ET_MAC_CFG1,
691 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
692 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
693 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
694 }
695
696 static void
697 et_disable_intrs(struct et_softc *sc)
698 {
699 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
700 }
701
702 static void
703 et_enable_intrs(struct et_softc *sc, uint32_t intrs)
704 {
705 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs);
706 }
707
708 static int
709 et_dma_alloc(struct et_softc *sc)
710 {
711 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
712 struct et_txstatus_data *txsd = &sc->sc_tx_status;
713 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
714 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
715 int i, error;
716
717 /*
718 * Create TX ring DMA stuffs
719 */
720 error = et_dma_mem_create(sc, ET_TX_RING_SIZE,
721 (void **)&tx_ring->tr_desc, &tx_ring->tr_paddr, &tx_ring->tr_dmap,
722 &tx_ring->tr_seg);
723 if (error) {
724 aprint_error_dev(sc->sc_dev, "can't create TX ring DMA stuffs\n");
725 return error;
726 }
727
728 /*
729 * Create TX status DMA stuffs
730 */
731 error = et_dma_mem_create(sc, sizeof(uint32_t),
732 (void **)&txsd->txsd_status,
733 &txsd->txsd_paddr, &txsd->txsd_dmap, &txsd->txsd_seg);
734 if (error) {
735 aprint_error_dev(sc->sc_dev, "can't create TX status DMA stuffs\n");
736 return error;
737 }
738
739 /*
740 * Create DMA stuffs for RX rings
741 */
742 for (i = 0; i < ET_RX_NRING; ++i) {
743 static const uint32_t rx_ring_posreg[ET_RX_NRING] =
744 { ET_RX_RING0_POS, ET_RX_RING1_POS };
745
746 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
747
748 error = et_dma_mem_create(sc, ET_RX_RING_SIZE,
749 (void **)&rx_ring->rr_desc,
750 &rx_ring->rr_paddr, &rx_ring->rr_dmap, &rx_ring->rr_seg);
751 if (error) {
752 aprint_error_dev(sc->sc_dev, "can't create DMA stuffs for "
753 "the %d RX ring\n", i);
754 return error;
755 }
756 rx_ring->rr_posreg = rx_ring_posreg[i];
757 }
758
759 /*
760 * Create RX stat ring DMA stuffs
761 */
762 error = et_dma_mem_create(sc, ET_RXSTAT_RING_SIZE,
763 (void **)&rxst_ring->rsr_stat,
764 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap, &rxst_ring->rsr_seg);
765 if (error) {
766 aprint_error_dev(sc->sc_dev, "can't create RX stat ring DMA stuffs\n");
767 return error;
768 }
769
770 /*
771 * Create RX status DMA stuffs
772 */
773 error = et_dma_mem_create(sc, sizeof(struct et_rxstatus),
774 (void **)&rxsd->rxsd_status,
775 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap, &rxsd->rxsd_seg);
776 if (error) {
777 aprint_error_dev(sc->sc_dev, "can't create RX status DMA stuffs\n");
778 return error;
779 }
780
781 /*
782 * Create mbuf DMA stuffs
783 */
784 error = et_dma_mbuf_create(sc);
785 if (error)
786 return error;
787
788 return 0;
789 }
790
791 static void
792 et_dma_free(struct et_softc *sc)
793 {
794 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
795 struct et_txstatus_data *txsd = &sc->sc_tx_status;
796 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
797 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
798 int i, rx_done[ET_RX_NRING];
799
800 /*
801 * Destroy TX ring DMA stuffs
802 */
803 et_dma_mem_destroy(sc, tx_ring->tr_desc, tx_ring->tr_dmap);
804
805 /*
806 * Destroy TX status DMA stuffs
807 */
808 et_dma_mem_destroy(sc, txsd->txsd_status, txsd->txsd_dmap);
809
810 /*
811 * Destroy DMA stuffs for RX rings
812 */
813 for (i = 0; i < ET_RX_NRING; ++i) {
814 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
815
816 et_dma_mem_destroy(sc, rx_ring->rr_desc, rx_ring->rr_dmap);
817 }
818
819 /*
820 * Destroy RX stat ring DMA stuffs
821 */
822 et_dma_mem_destroy(sc, rxst_ring->rsr_stat, rxst_ring->rsr_dmap);
823
824 /*
825 * Destroy RX status DMA stuffs
826 */
827 et_dma_mem_destroy(sc, rxsd->rxsd_status, rxsd->rxsd_dmap);
828
829 /*
830 * Destroy mbuf DMA stuffs
831 */
832 for (i = 0; i < ET_RX_NRING; ++i)
833 rx_done[i] = ET_RX_NDESC;
834 et_dma_mbuf_destroy(sc, ET_TX_NDESC, rx_done);
835 }
836
837 static int
838 et_dma_mbuf_create(struct et_softc *sc)
839 {
840 struct et_txbuf_data *tbd = &sc->sc_tx_data;
841 int i, error, rx_done[ET_RX_NRING];
842
843 /*
844 * Create spare DMA map for RX mbufs
845 */
846 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
847 BUS_DMA_NOWAIT, &sc->sc_mbuf_tmp_dmap);
848 if (error) {
849 aprint_error_dev(sc->sc_dev, "can't create spare mbuf DMA map\n");
850 return error;
851 }
852
853 /*
854 * Create DMA maps for RX mbufs
855 */
856 bzero(rx_done, sizeof(rx_done));
857 for (i = 0; i < ET_RX_NRING; ++i) {
858 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
859 int j;
860
861 for (j = 0; j < ET_RX_NDESC; ++j) {
862 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
863 MCLBYTES, 0, BUS_DMA_NOWAIT,
864 &rbd->rbd_buf[j].rb_dmap);
865 if (error) {
866 aprint_error_dev(sc->sc_dev, "can't create %d RX mbuf "
867 "for %d RX ring\n", j, i);
868 rx_done[i] = j;
869 et_dma_mbuf_destroy(sc, 0, rx_done);
870 return error;
871 }
872 }
873 rx_done[i] = ET_RX_NDESC;
874
875 rbd->rbd_softc = sc;
876 rbd->rbd_ring = &sc->sc_rx_ring[i];
877 }
878
879 /*
880 * Create DMA maps for TX mbufs
881 */
882 for (i = 0; i < ET_TX_NDESC; ++i) {
883 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
884 0, BUS_DMA_NOWAIT, &tbd->tbd_buf[i].tb_dmap);
885 if (error) {
886 aprint_error_dev(sc->sc_dev, "can't create %d TX mbuf "
887 "DMA map\n", i);
888 et_dma_mbuf_destroy(sc, i, rx_done);
889 return error;
890 }
891 }
892
893 return 0;
894 }
895
896 static void
897 et_dma_mbuf_destroy(struct et_softc *sc, int tx_done, const int rx_done[])
898 {
899 struct et_txbuf_data *tbd = &sc->sc_tx_data;
900 int i;
901
902 /*
903 * Destroy DMA maps for RX mbufs
904 */
905 for (i = 0; i < ET_RX_NRING; ++i) {
906 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
907 int j;
908
909 for (j = 0; j < rx_done[i]; ++j) {
910 struct et_rxbuf *rb = &rbd->rbd_buf[j];
911
912 KASSERTMSG(rb->rb_mbuf == NULL,
913 "RX mbuf in %d RX ring is not freed yet\n", i);
914 bus_dmamap_destroy(sc->sc_dmat, rb->rb_dmap);
915 }
916 }
917
918 /*
919 * Destroy DMA maps for TX mbufs
920 */
921 for (i = 0; i < tx_done; ++i) {
922 struct et_txbuf *tb = &tbd->tbd_buf[i];
923
924 KASSERTMSG(tb->tb_mbuf == NULL, "TX mbuf is not freed yet\n");
925 bus_dmamap_destroy(sc->sc_dmat, tb->tb_dmap);
926 }
927
928 /*
929 * Destroy spare mbuf DMA map
930 */
931 bus_dmamap_destroy(sc->sc_dmat, sc->sc_mbuf_tmp_dmap);
932 }
933
934 static int
935 et_dma_mem_create(struct et_softc *sc, bus_size_t size,
936 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap, bus_dma_segment_t *seg)
937 {
938 int error, nsegs;
939
940 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT,
941 dmap);
942 if (error) {
943 aprint_error_dev(sc->sc_dev, "can't create DMA map\n");
944 return error;
945 }
946
947 error = bus_dmamem_alloc(sc->sc_dmat, size, ET_ALIGN, 0, seg,
948 1, &nsegs, BUS_DMA_WAITOK);
949 if (error) {
950 aprint_error_dev(sc->sc_dev, "can't allocate DMA mem\n");
951 return error;
952 }
953
954 error = bus_dmamem_map(sc->sc_dmat, seg, nsegs,
955 size, (void **)addr, BUS_DMA_NOWAIT);
956 if (error) {
957 aprint_error_dev(sc->sc_dev, "can't map DMA mem\n");
958 return (error);
959 }
960
961 error = bus_dmamap_load(sc->sc_dmat, *dmap, *addr, size, NULL,
962 BUS_DMA_WAITOK);
963 if (error) {
964 aprint_error_dev(sc->sc_dev, "can't load DMA mem\n");
965 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)addr, 1);
966 return error;
967 }
968
969 memset(*addr, 0, size);
970
971 *paddr = (*dmap)->dm_segs[0].ds_addr;
972
973 return 0;
974 }
975
976 static void
977 et_dma_mem_destroy(struct et_softc *sc, void *addr, bus_dmamap_t dmap)
978 {
979 bus_dmamap_unload(sc->sc_dmat, dmap);
980 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)&addr, 1);
981 }
982
983 static void
984 et_chip_attach(struct et_softc *sc)
985 {
986 uint32_t val;
987
988 /*
989 * Perform minimal initialization
990 */
991
992 /* Disable loopback */
993 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
994
995 /* Reset MAC */
996 CSR_WRITE_4(sc, ET_MAC_CFG1,
997 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
998 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
999 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1000
1001 /*
1002 * Setup half duplex mode
1003 */
1004 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
1005 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
1006 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
1007 ET_MAC_HDX_EXC_DEFER;
1008 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1009
1010 /* Clear MAC control */
1011 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1012
1013 /* Reset MII */
1014 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1015
1016 /* Bring MAC out of reset state */
1017 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1018
1019 /* Enable memory controllers */
1020 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1021 }
1022
1023 static int
1024 et_intr(void *xsc)
1025 {
1026 struct et_softc *sc = xsc;
1027 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1028 uint32_t intrs;
1029
1030 if ((ifp->if_flags & IFF_RUNNING) == 0)
1031 return (0);
1032
1033 intrs = CSR_READ_4(sc, ET_INTR_STATUS);
1034 if (intrs == 0 || intrs == 0xffffffff)
1035 return (0);
1036
1037 et_disable_intrs(sc);
1038 intrs &= ET_INTRS;
1039 if (intrs == 0) /* Not interested */
1040 goto back;
1041
1042 if (intrs & ET_INTR_RXEOF)
1043 et_rxeof(sc);
1044 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER))
1045 et_txeof(sc);
1046 if (intrs & ET_INTR_TIMER)
1047 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1048 back:
1049 et_enable_intrs(sc, ET_INTRS);
1050
1051 return (1);
1052 }
1053
1054 static int
1055 et_init(struct ifnet *ifp)
1056 {
1057 struct et_softc *sc = ifp->if_softc;
1058 int error, i, s;
1059
1060 if (ifp->if_flags & IFF_RUNNING)
1061 return 0;
1062
1063 s = splnet();
1064
1065 et_stop(sc);
1066 et_reset(sc);
1067
1068 for (i = 0; i < ET_RX_NRING; ++i) {
1069 sc->sc_rx_data[i].rbd_bufsize = et_bufsize[i].bufsize;
1070 sc->sc_rx_data[i].rbd_newbuf = et_bufsize[i].newbuf;
1071 }
1072
1073 error = et_init_tx_ring(sc);
1074 if (error)
1075 goto back;
1076
1077 error = et_init_rx_ring(sc);
1078 if (error)
1079 goto back;
1080
1081 error = et_chip_init(sc);
1082 if (error)
1083 goto back;
1084
1085 error = et_start_rxdma(sc);
1086 if (error)
1087 goto back;
1088
1089 error = et_start_txdma(sc);
1090 if (error)
1091 goto back;
1092
1093 /* Enable interrupts. */
1094 et_enable_intrs(sc, ET_INTRS);
1095
1096 callout_schedule(&sc->sc_tick, hz);
1097
1098 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1099
1100 ifp->if_flags |= IFF_RUNNING;
1101 ifp->if_flags &= ~IFF_OACTIVE;
1102
1103 sc->sc_flags &= ~ET_FLAG_LINK;
1104 ether_mediachange(ifp);
1105 back:
1106 if (error)
1107 et_stop(sc);
1108
1109 splx(s);
1110
1111 return (0);
1112 }
1113
1114 static int
1115 et_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1116 {
1117 struct et_softc *sc = ifp->if_softc;
1118 int s, error = 0;
1119
1120 s = splnet();
1121
1122 switch (cmd) {
1123 case SIOCSIFFLAGS:
1124 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1125 break;
1126 if (ifp->if_flags & IFF_UP) {
1127 /*
1128 * If only the PROMISC or ALLMULTI flag changes, then
1129 * don't do a full re-init of the chip, just update
1130 * the Rx filter.
1131 */
1132 if ((ifp->if_flags & IFF_RUNNING) &&
1133 ((ifp->if_flags ^ sc->sc_if_flags) &
1134 (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1135 et_setmulti(sc);
1136 } else {
1137 if (!(ifp->if_flags & IFF_RUNNING))
1138 et_init(ifp);
1139 }
1140 } else {
1141 if (ifp->if_flags & IFF_RUNNING)
1142 et_stop(sc);
1143 }
1144 sc->sc_if_flags = ifp->if_flags;
1145 break;
1146 default:
1147 error = ether_ioctl(ifp, cmd, data);
1148 if (error == ENETRESET) {
1149 if (ifp->if_flags & IFF_RUNNING)
1150 et_setmulti(sc);
1151 error = 0;
1152 }
1153 break;
1154 }
1155
1156 splx(s);
1157
1158 return error;
1159 }
1160
1161 static void
1162 et_start(struct ifnet *ifp)
1163 {
1164 struct et_softc *sc = ifp->if_softc;
1165 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1166 int trans;
1167 struct mbuf *m;
1168
1169 if (((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) ||
1170 ((sc->sc_flags & (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)) !=
1171 (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)))
1172 return;
1173
1174 trans = 0;
1175 for (;;) {
1176 IFQ_DEQUEUE(&ifp->if_snd, m);
1177 if (m == NULL)
1178 break;
1179
1180 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) {
1181 ifp->if_flags |= IFF_OACTIVE;
1182 break;
1183 }
1184
1185 if (et_encap(sc, &m)) {
1186 if_statinc(ifp, if_oerrors);
1187 ifp->if_flags |= IFF_OACTIVE;
1188 break;
1189 }
1190
1191 trans = 1;
1192
1193 bpf_mtap(ifp, m, BPF_D_OUT);
1194 }
1195
1196 if (trans) {
1197 callout_schedule(&sc->sc_txtick, hz);
1198 ifp->if_timer = 5;
1199 }
1200 }
1201
1202 static void
1203 et_watchdog(struct ifnet *ifp)
1204 {
1205 struct et_softc *sc = ifp->if_softc;
1206 aprint_error_dev(sc->sc_dev, "watchdog timed out\n");
1207
1208 ifp->if_flags &= ~IFF_RUNNING;
1209 et_init(ifp);
1210 et_start(ifp);
1211 }
1212
1213 static int
1214 et_stop_rxdma(struct et_softc *sc)
1215 {
1216
1217 CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1218 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1219
1220 DELAY(5);
1221 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1222 aprint_error_dev(sc->sc_dev, "can't stop RX DMA engine\n");
1223 return ETIMEDOUT;
1224 }
1225 return 0;
1226 }
1227
1228 static int
1229 et_stop_txdma(struct et_softc *sc)
1230 {
1231
1232 CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1233 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1234 return 0;
1235 }
1236
1237 static void
1238 et_free_tx_ring(struct et_softc *sc)
1239 {
1240 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1241 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1242 int i;
1243
1244 for (i = 0; i < ET_TX_NDESC; ++i) {
1245 struct et_txbuf *tb = &tbd->tbd_buf[i];
1246
1247 if (tb->tb_mbuf != NULL) {
1248 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap);
1249 m_freem(tb->tb_mbuf);
1250 tb->tb_mbuf = NULL;
1251 }
1252 }
1253
1254 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1255 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
1256 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1257 }
1258
1259 static void
1260 et_free_rx_ring(struct et_softc *sc)
1261 {
1262 int n;
1263
1264 for (n = 0; n < ET_RX_NRING; ++n) {
1265 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1266 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n];
1267 int i;
1268
1269 for (i = 0; i < ET_RX_NDESC; ++i) {
1270 struct et_rxbuf *rb = &rbd->rbd_buf[i];
1271
1272 if (rb->rb_mbuf != NULL) {
1273 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap);
1274 m_freem(rb->rb_mbuf);
1275 rb->rb_mbuf = NULL;
1276 }
1277 }
1278
1279 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE);
1280 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0,
1281 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1282 }
1283 }
1284
1285 static void
1286 et_setmulti(struct et_softc *sc)
1287 {
1288 struct ethercom *ec = &sc->sc_ethercom;
1289 struct ifnet *ifp = &ec->ec_if;
1290 uint32_t hash[4] = { 0, 0, 0, 0 };
1291 uint32_t rxmac_ctrl, pktfilt;
1292 struct ether_multi *enm;
1293 struct ether_multistep step;
1294 int i, count;
1295
1296 pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1297 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1298
1299 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1300 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1301 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1302 goto back;
1303 }
1304
1305 count = 0;
1306 ETHER_LOCK(ec);
1307 ETHER_FIRST_MULTI(step, ec, enm);
1308 while (enm != NULL) {
1309 uint32_t *hp, h;
1310
1311 h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1312 h = (h & 0x3f800000) >> 23;
1313
1314 hp = &hash[0];
1315 if (h >= 32 && h < 64) {
1316 h -= 32;
1317 hp = &hash[1];
1318 } else if (h >= 64 && h < 96) {
1319 h -= 64;
1320 hp = &hash[2];
1321 } else if (h >= 96) {
1322 h -= 96;
1323 hp = &hash[3];
1324 }
1325 *hp |= (1 << h);
1326
1327 ++count;
1328 ETHER_NEXT_MULTI(step, enm);
1329 }
1330 ETHER_UNLOCK(ec);
1331
1332 for (i = 0; i < 4; ++i)
1333 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1334
1335 if (count > 0)
1336 pktfilt |= ET_PKTFILT_MCAST;
1337 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1338 back:
1339 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1340 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1341 }
1342
1343 static int
1344 et_chip_init(struct et_softc *sc)
1345 {
1346 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1347 uint32_t rxq_end;
1348 int error;
1349
1350 /*
1351 * Split internal memory between TX and RX according to MTU
1352 */
1353 if (ifp->if_mtu < 2048)
1354 rxq_end = 0x2bc;
1355 else if (ifp->if_mtu < 8192)
1356 rxq_end = 0x1ff;
1357 else
1358 rxq_end = 0x1b3;
1359 CSR_WRITE_4(sc, ET_RXQ_START, 0);
1360 CSR_WRITE_4(sc, ET_RXQ_END, rxq_end);
1361 CSR_WRITE_4(sc, ET_TXQ_START, rxq_end + 1);
1362 CSR_WRITE_4(sc, ET_TXQ_END, ET_INTERN_MEM_END);
1363
1364 /* No loopback */
1365 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1366
1367 /* Clear MSI configure */
1368 CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1369
1370 /* Disable timer */
1371 CSR_WRITE_4(sc, ET_TIMER, 0);
1372
1373 /* Initialize MAC */
1374 et_init_mac(sc);
1375
1376 /* Enable memory controllers */
1377 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1378
1379 /* Initialize RX MAC */
1380 et_init_rxmac(sc);
1381
1382 /* Initialize TX MAC */
1383 et_init_txmac(sc);
1384
1385 /* Initialize RX DMA engine */
1386 error = et_init_rxdma(sc);
1387 if (error)
1388 return error;
1389
1390 /* Initialize TX DMA engine */
1391 error = et_init_txdma(sc);
1392 if (error)
1393 return error;
1394
1395 return 0;
1396 }
1397
1398 static int
1399 et_init_tx_ring(struct et_softc *sc)
1400 {
1401 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1402 struct et_txstatus_data *txsd = &sc->sc_tx_status;
1403 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1404
1405 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1406 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
1407 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1408
1409 tbd->tbd_start_index = 0;
1410 tbd->tbd_start_wrap = 0;
1411 tbd->tbd_used = 0;
1412
1413 bzero(txsd->txsd_status, sizeof(uint32_t));
1414 bus_dmamap_sync(sc->sc_dmat, txsd->txsd_dmap, 0,
1415 txsd->txsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1416 return 0;
1417 }
1418
1419 static int
1420 et_init_rx_ring(struct et_softc *sc)
1421 {
1422 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1423 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1424 int n;
1425
1426 for (n = 0; n < ET_RX_NRING; ++n) {
1427 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1428 int i, error;
1429
1430 for (i = 0; i < ET_RX_NDESC; ++i) {
1431 error = rbd->rbd_newbuf(rbd, i, 1);
1432 if (error) {
1433 aprint_error_dev(sc->sc_dev, "%d ring %d buf, newbuf failed: "
1434 "%d\n", n, i, error);
1435 return error;
1436 }
1437 }
1438 }
1439
1440 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1441 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0,
1442 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1443
1444 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1445 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0,
1446 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1447
1448 return 0;
1449 }
1450
1451 static int
1452 et_init_rxdma(struct et_softc *sc)
1453 {
1454 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1455 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1456 struct et_rxdesc_ring *rx_ring;
1457 int error;
1458
1459 error = et_stop_rxdma(sc);
1460 if (error) {
1461 aprint_error_dev(sc->sc_dev, "can't init RX DMA engine\n");
1462 return error;
1463 }
1464
1465 /*
1466 * Install RX status
1467 */
1468 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1469 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1470
1471 /*
1472 * Install RX stat ring
1473 */
1474 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1475 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1476 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1477 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1478 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1479
1480 /* Match ET_RXSTAT_POS */
1481 rxst_ring->rsr_index = 0;
1482 rxst_ring->rsr_wrap = 0;
1483
1484 /*
1485 * Install the 2nd RX descriptor ring
1486 */
1487 rx_ring = &sc->sc_rx_ring[1];
1488 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1489 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1490 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1491 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1492 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1493
1494 /* Match ET_RX_RING1_POS */
1495 rx_ring->rr_index = 0;
1496 rx_ring->rr_wrap = 1;
1497
1498 /*
1499 * Install the 1st RX descriptor ring
1500 */
1501 rx_ring = &sc->sc_rx_ring[0];
1502 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1503 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1504 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1505 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1506 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1507
1508 /* Match ET_RX_RING0_POS */
1509 rx_ring->rr_index = 0;
1510 rx_ring->rr_wrap = 1;
1511
1512 /*
1513 * RX intr moderation
1514 */
1515 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1516 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1517
1518 return 0;
1519 }
1520
1521 static int
1522 et_init_txdma(struct et_softc *sc)
1523 {
1524 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1525 struct et_txstatus_data *txsd = &sc->sc_tx_status;
1526 int error;
1527
1528 error = et_stop_txdma(sc);
1529 if (error) {
1530 aprint_error_dev(sc->sc_dev, "can't init TX DMA engine\n");
1531 return error;
1532 }
1533
1534 /*
1535 * Install TX descriptor ring
1536 */
1537 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1538 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1539 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1540
1541 /*
1542 * Install TX status
1543 */
1544 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1545 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1546
1547 CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1548
1549 /* Match ET_TX_READY_POS */
1550 tx_ring->tr_ready_index = 0;
1551 tx_ring->tr_ready_wrap = 0;
1552
1553 return 0;
1554 }
1555
1556 static void
1557 et_init_mac(struct et_softc *sc)
1558 {
1559 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1560 const uint8_t *eaddr = CLLADDR(ifp->if_sadl);
1561 uint32_t val;
1562
1563 /* Reset MAC */
1564 CSR_WRITE_4(sc, ET_MAC_CFG1,
1565 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1566 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1567 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1568
1569 /*
1570 * Setup inter packet gap
1571 */
1572 val = __SHIFTIN(56, ET_IPG_NONB2B_1) |
1573 __SHIFTIN(88, ET_IPG_NONB2B_2) |
1574 __SHIFTIN(80, ET_IPG_MINIFG) |
1575 __SHIFTIN(96, ET_IPG_B2B);
1576 CSR_WRITE_4(sc, ET_IPG, val);
1577
1578 /*
1579 * Setup half duplex mode
1580 */
1581 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
1582 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
1583 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
1584 ET_MAC_HDX_EXC_DEFER;
1585 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1586
1587 /* Clear MAC control */
1588 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1589
1590 /* Reset MII */
1591 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1592
1593 /*
1594 * Set MAC address
1595 */
1596 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1597 CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1598 val = (eaddr[0] << 16) | (eaddr[1] << 24);
1599 CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1600
1601 /* Set max frame length */
1602 CSR_WRITE_4(sc, ET_MAX_FRMLEN,
1603 ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu + ETHER_CRC_LEN);
1604
1605 /* Bring MAC out of reset state */
1606 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1607 }
1608
1609 static void
1610 et_init_rxmac(struct et_softc *sc)
1611 {
1612 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1613 const uint8_t *eaddr = CLLADDR(ifp->if_sadl);
1614 uint32_t val;
1615 int i;
1616
1617 /* Disable RX MAC and WOL */
1618 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1619
1620 /*
1621 * Clear all WOL related registers
1622 */
1623 for (i = 0; i < 3; ++i)
1624 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1625 for (i = 0; i < 20; ++i)
1626 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1627
1628 /*
1629 * Set WOL source address. XXX is this necessary?
1630 */
1631 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1632 CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1633 val = (eaddr[0] << 8) | eaddr[1];
1634 CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1635
1636 /* Clear packet filters */
1637 CSR_WRITE_4(sc, ET_PKTFILT, 0);
1638
1639 /* No ucast filtering */
1640 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1641 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1642 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1643
1644 if (ifp->if_mtu > 8192) {
1645 /*
1646 * In order to transmit jumbo packets greater than 8k,
1647 * the FIFO between RX MAC and RX DMA needs to be reduced
1648 * in size to (16k - MTU). In order to implement this, we
1649 * must use "cut through" mode in the RX MAC, which chops
1650 * packets down into segments which are (max_size * 16).
1651 * In this case we selected 256 bytes, since this is the
1652 * size of the PCI-Express TLP's that the 1310 uses.
1653 */
1654 val = __SHIFTIN(16, ET_RXMAC_MC_SEGSZ_MAX) |
1655 ET_RXMAC_MC_SEGSZ_ENABLE;
1656 } else {
1657 val = 0;
1658 }
1659 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1660
1661 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1662
1663 /* Initialize RX MAC management register */
1664 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1665
1666 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1667
1668 CSR_WRITE_4(sc, ET_RXMAC_MGT,
1669 ET_RXMAC_MGT_PASS_ECRC |
1670 ET_RXMAC_MGT_PASS_ELEN |
1671 ET_RXMAC_MGT_PASS_ETRUNC |
1672 ET_RXMAC_MGT_CHECK_PKT);
1673
1674 /*
1675 * Configure runt filtering (may not work on certain chip generation)
1676 */
1677 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG;
1678 CSR_WRITE_4(sc, ET_PKTFILT, val);
1679
1680 /* Enable RX MAC but leave WOL disabled */
1681 CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1682 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1683
1684 /*
1685 * Setup multicast hash and allmulti/promisc mode
1686 */
1687 et_setmulti(sc);
1688 }
1689
1690 static void
1691 et_init_txmac(struct et_softc *sc)
1692 {
1693
1694 /* Disable TX MAC and FC(?) */
1695 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1696
1697 /* No flow control yet */
1698 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0);
1699
1700 /* Enable TX MAC but leave FC(?) diabled */
1701 CSR_WRITE_4(sc, ET_TXMAC_CTRL,
1702 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
1703 }
1704
1705 static int
1706 et_start_rxdma(struct et_softc *sc)
1707 {
1708 uint32_t val = 0;
1709
1710 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize,
1711 ET_RXDMA_CTRL_RING0_SIZE) |
1712 ET_RXDMA_CTRL_RING0_ENABLE;
1713 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize,
1714 ET_RXDMA_CTRL_RING1_SIZE) |
1715 ET_RXDMA_CTRL_RING1_ENABLE;
1716
1717 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
1718
1719 DELAY(5);
1720
1721 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
1722 aprint_error_dev(sc->sc_dev, "can't start RX DMA engine\n");
1723 return ETIMEDOUT;
1724 }
1725 return 0;
1726 }
1727
1728 static int
1729 et_start_txdma(struct et_softc *sc)
1730 {
1731
1732 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
1733 return 0;
1734 }
1735
1736 static void
1737 et_rxeof(struct et_softc *sc)
1738 {
1739 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1740 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1741 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1742 uint32_t rxs_stat_ring;
1743 int rxst_wrap, rxst_index;
1744
1745 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1746 return;
1747
1748 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0,
1749 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1750 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0,
1751 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1752
1753 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring;
1754 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
1755 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX);
1756
1757 while (rxst_index != rxst_ring->rsr_index ||
1758 rxst_wrap != rxst_ring->rsr_wrap) {
1759 struct et_rxbuf_data *rbd;
1760 struct et_rxdesc_ring *rx_ring;
1761 struct et_rxstat *st;
1762 struct et_rxbuf *rb;
1763 struct mbuf *m;
1764 int buflen, buf_idx, ring_idx;
1765 uint32_t rxstat_pos, rxring_pos;
1766
1767 KASSERT(rxst_ring->rsr_index < ET_RX_NSTAT);
1768 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
1769
1770 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN);
1771 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX);
1772 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX);
1773
1774 if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
1775 rxst_ring->rsr_index = 0;
1776 rxst_ring->rsr_wrap ^= 1;
1777 }
1778 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index,
1779 ET_RXSTAT_POS_INDEX);
1780 if (rxst_ring->rsr_wrap)
1781 rxstat_pos |= ET_RXSTAT_POS_WRAP;
1782 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
1783
1784 if (ring_idx >= ET_RX_NRING) {
1785 if_statinc(ifp, if_ierrors);
1786 aprint_error_dev(sc->sc_dev, "invalid ring index %d\n",
1787 ring_idx);
1788 continue;
1789 }
1790 if (buf_idx >= ET_RX_NDESC) {
1791 if_statinc(ifp, if_ierrors);
1792 aprint_error_dev(sc->sc_dev, "invalid buf index %d\n",
1793 buf_idx);
1794 continue;
1795 }
1796
1797 rbd = &sc->sc_rx_data[ring_idx];
1798 rb = &rbd->rbd_buf[buf_idx];
1799 m = rb->rb_mbuf;
1800 bus_dmamap_sync(sc->sc_dmat, rb->rb_dmap, 0,
1801 rb->rb_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1802
1803 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) {
1804 if (buflen < ETHER_CRC_LEN) {
1805 m_freem(m);
1806 if_statinc(ifp, if_ierrors);
1807 } else {
1808 m->m_pkthdr.len = m->m_len = buflen -
1809 ETHER_CRC_LEN;
1810 m_set_rcvif(m, ifp);
1811
1812 if_percpuq_enqueue(ifp->if_percpuq, m);
1813 }
1814 } else {
1815 if_statinc(ifp, if_ierrors);
1816 }
1817
1818 rx_ring = &sc->sc_rx_ring[ring_idx];
1819
1820 if (buf_idx != rx_ring->rr_index) {
1821 aprint_error_dev(sc->sc_dev, "WARNING!! ring %d, "
1822 "buf_idx %d, rr_idx %d\n",
1823 ring_idx, buf_idx, rx_ring->rr_index);
1824 }
1825
1826 KASSERT(rx_ring->rr_index < ET_RX_NDESC);
1827 if (++rx_ring->rr_index == ET_RX_NDESC) {
1828 rx_ring->rr_index = 0;
1829 rx_ring->rr_wrap ^= 1;
1830 }
1831 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX);
1832 if (rx_ring->rr_wrap)
1833 rxring_pos |= ET_RX_RING_POS_WRAP;
1834 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
1835 }
1836 }
1837
1838 static int
1839 et_encap(struct et_softc *sc, struct mbuf **m0)
1840 {
1841 struct mbuf *m = *m0;
1842 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1843 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1844 struct et_txdesc *td;
1845 bus_dmamap_t map;
1846 int error, maxsegs, first_idx, last_idx, i;
1847 uint32_t tx_ready_pos, last_td_ctrl2;
1848
1849 maxsegs = ET_TX_NDESC - tbd->tbd_used;
1850 if (maxsegs > ET_NSEG_MAX)
1851 maxsegs = ET_NSEG_MAX;
1852 KASSERTMSG(maxsegs >= ET_NSEG_SPARE,
1853 "not enough spare TX desc (%d)\n", maxsegs);
1854
1855 KASSERT(tx_ring->tr_ready_index < ET_TX_NDESC);
1856 first_idx = tx_ring->tr_ready_index;
1857 map = tbd->tbd_buf[first_idx].tb_dmap;
1858
1859 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1860 BUS_DMA_NOWAIT);
1861 if (!error && map->dm_nsegs == 0) {
1862 bus_dmamap_unload(sc->sc_dmat, map);
1863 error = EFBIG;
1864 }
1865 if (error && error != EFBIG) {
1866 aprint_error_dev(sc->sc_dev, "can't load TX mbuf");
1867 goto back;
1868 }
1869 if (error) { /* error == EFBIG */
1870 struct mbuf *m_new;
1871
1872 error = 0;
1873
1874 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1875 if (m_new == NULL) {
1876 aprint_error_dev(sc->sc_dev, "can't defrag TX mbuf\n");
1877 error = ENOBUFS;
1878 goto back;
1879 }
1880
1881 m_copy_pkthdr(m_new, m);
1882 if (m->m_pkthdr.len > MHLEN) {
1883 MCLGET(m_new, M_DONTWAIT);
1884 if (!(m_new->m_flags & M_EXT)) {
1885 m_freem(m_new);
1886 error = ENOBUFS;
1887 }
1888 }
1889
1890 if (error) {
1891 aprint_error_dev(sc->sc_dev, "can't defrag TX buffer\n");
1892 goto back;
1893 }
1894
1895 m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, void *));
1896 m_freem(m);
1897 m_new->m_len = m_new->m_pkthdr.len;
1898 *m0 = m = m_new;
1899
1900 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1901 BUS_DMA_NOWAIT);
1902 if (error || map->dm_nsegs == 0) {
1903 if (map->dm_nsegs == 0) {
1904 bus_dmamap_unload(sc->sc_dmat, map);
1905 error = EFBIG;
1906 }
1907 aprint_error_dev(sc->sc_dev, "can't load defraged TX mbuf\n");
1908 goto back;
1909 }
1910 }
1911
1912 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1913 BUS_DMASYNC_PREWRITE);
1914
1915 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
1916 sc->sc_tx += map->dm_nsegs;
1917 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
1918 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
1919 last_td_ctrl2 |= ET_TDCTRL2_INTR;
1920 }
1921
1922 last_idx = -1;
1923 for (i = 0; i < map->dm_nsegs; ++i) {
1924 int idx;
1925
1926 idx = (first_idx + i) % ET_TX_NDESC;
1927 td = &tx_ring->tr_desc[idx];
1928 td->td_addr_hi = ET_ADDR_HI(map->dm_segs[i].ds_addr);
1929 td->td_addr_lo = ET_ADDR_LO(map->dm_segs[i].ds_addr);
1930 td->td_ctrl1 =
1931 __SHIFTIN(map->dm_segs[i].ds_len, ET_TDCTRL1_LEN);
1932
1933 if (i == map->dm_nsegs - 1) { /* Last frag */
1934 td->td_ctrl2 = last_td_ctrl2;
1935 last_idx = idx;
1936 }
1937
1938 KASSERT(tx_ring->tr_ready_index < ET_TX_NDESC);
1939 if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
1940 tx_ring->tr_ready_index = 0;
1941 tx_ring->tr_ready_wrap ^= 1;
1942 }
1943 }
1944 td = &tx_ring->tr_desc[first_idx];
1945 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */
1946
1947 KASSERT(last_idx >= 0);
1948 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
1949 tbd->tbd_buf[last_idx].tb_dmap = map;
1950 tbd->tbd_buf[last_idx].tb_mbuf = m;
1951
1952 tbd->tbd_used += map->dm_nsegs;
1953 KASSERT(tbd->tbd_used <= ET_TX_NDESC);
1954
1955 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
1956 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1957
1958 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index,
1959 ET_TX_READY_POS_INDEX);
1960 if (tx_ring->tr_ready_wrap)
1961 tx_ready_pos |= ET_TX_READY_POS_WRAP;
1962 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
1963
1964 error = 0;
1965 back:
1966 if (error) {
1967 m_freem(m);
1968 *m0 = NULL;
1969 }
1970 return error;
1971 }
1972
1973 static void
1974 et_txeof(struct et_softc *sc)
1975 {
1976 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1977 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1978 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1979 uint32_t tx_done;
1980 int end, wrap;
1981
1982 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1983 return;
1984
1985 if (tbd->tbd_used == 0)
1986 return;
1987
1988 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
1989 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX);
1990 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
1991
1992 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
1993 struct et_txbuf *tb;
1994
1995 KASSERT(tbd->tbd_start_index < ET_TX_NDESC);
1996 tb = &tbd->tbd_buf[tbd->tbd_start_index];
1997
1998 bzero(&tx_ring->tr_desc[tbd->tbd_start_index],
1999 sizeof(struct et_txdesc));
2000 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
2001 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
2002
2003 if (tb->tb_mbuf != NULL) {
2004 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap);
2005 m_freem(tb->tb_mbuf);
2006 tb->tb_mbuf = NULL;
2007 if_statinc(ifp, if_opackets);
2008 }
2009
2010 if (++tbd->tbd_start_index == ET_TX_NDESC) {
2011 tbd->tbd_start_index = 0;
2012 tbd->tbd_start_wrap ^= 1;
2013 }
2014
2015 KASSERT(tbd->tbd_used > 0);
2016 tbd->tbd_used--;
2017 }
2018
2019 if (tbd->tbd_used == 0) {
2020 callout_stop(&sc->sc_txtick);
2021 ifp->if_timer = 0;
2022 }
2023 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC)
2024 ifp->if_flags &= ~IFF_OACTIVE;
2025
2026 if_schedule_deferred_start(ifp);
2027 }
2028
2029 static void
2030 et_txtick(void *xsc)
2031 {
2032 struct et_softc *sc = xsc;
2033 int s;
2034
2035 s = splnet();
2036 et_txeof(sc);
2037 splx(s);
2038 }
2039
2040 static void
2041 et_tick(void *xsc)
2042 {
2043 struct et_softc *sc = xsc;
2044 int s;
2045
2046 s = splnet();
2047 mii_tick(&sc->sc_miibus);
2048 callout_schedule(&sc->sc_tick, hz);
2049 splx(s);
2050 }
2051
2052 static int
2053 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init)
2054 {
2055 return et_newbuf(rbd, buf_idx, init, MCLBYTES);
2056 }
2057
2058 static int
2059 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init)
2060 {
2061 return et_newbuf(rbd, buf_idx, init, MHLEN);
2062 }
2063
2064 static int
2065 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0)
2066 {
2067 struct et_softc *sc = rbd->rbd_softc;
2068 struct et_rxdesc_ring *rx_ring;
2069 struct et_rxdesc *desc;
2070 struct et_rxbuf *rb;
2071 struct mbuf *m;
2072 bus_dmamap_t dmap;
2073 int error, len;
2074
2075 KASSERT(buf_idx < ET_RX_NDESC);
2076 rb = &rbd->rbd_buf[buf_idx];
2077
2078 if (len0 >= MINCLSIZE) {
2079 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
2080 if (m == NULL)
2081 return (ENOBUFS);
2082 MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
2083 if ((m->m_flags & M_EXT) == 0) {
2084 m_freem(m);
2085 return (ENOBUFS);
2086 }
2087 len = MCLBYTES;
2088 } else {
2089 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
2090 len = MHLEN;
2091 }
2092
2093 if (m == NULL) {
2094 error = ENOBUFS;
2095
2096 /* XXX for debug */
2097 aprint_error_dev(sc->sc_dev, "M_CLGET failed, size %d\n", len0);
2098 if (init) {
2099 return error;
2100 } else {
2101 goto back;
2102 }
2103 }
2104 m->m_len = m->m_pkthdr.len = len;
2105
2106 /*
2107 * Try load RX mbuf into temporary DMA tag
2108 */
2109 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_mbuf_tmp_dmap, m,
2110 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
2111 if (error) {
2112 m_freem(m);
2113
2114 /* XXX for debug */
2115 aprint_error_dev(sc->sc_dev, "can't load RX mbuf\n");
2116 if (init) {
2117 return error;
2118 } else {
2119 goto back;
2120 }
2121 }
2122
2123 if (!init)
2124 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap);
2125 rb->rb_mbuf = m;
2126
2127 /*
2128 * Swap RX buf's DMA map with the loaded temporary one
2129 */
2130 dmap = rb->rb_dmap;
2131 rb->rb_dmap = sc->sc_mbuf_tmp_dmap;
2132 rb->rb_paddr = rb->rb_dmap->dm_segs[0].ds_addr;
2133 sc->sc_mbuf_tmp_dmap = dmap;
2134
2135 error = 0;
2136 back:
2137 rx_ring = rbd->rbd_ring;
2138 desc = &rx_ring->rr_desc[buf_idx];
2139
2140 desc->rd_addr_hi = ET_ADDR_HI(rb->rb_paddr);
2141 desc->rd_addr_lo = ET_ADDR_LO(rb->rb_paddr);
2142 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX);
2143
2144 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0,
2145 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
2146 return error;
2147 }
2148