if_et.c revision 1.28 1 /* $NetBSD: if_et.c,v 1.28 2019/11/28 17:09:10 maxv Exp $ */
2 /* $OpenBSD: if_et.c,v 1.12 2008/07/11 09:29:02 kevlo $ */
3 /*
4 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Sepherosa Ziehau <sepherosa (at) gmail.com>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.1 2007/10/12 14:12:42 sephe Exp $
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: if_et.c,v 1.28 2019/11/28 17:09:10 maxv Exp $");
41
42 #include "opt_inet.h"
43 #include "vlan.h"
44
45 #include <sys/param.h>
46 #include <sys/endian.h>
47 #include <sys/systm.h>
48 #include <sys/types.h>
49 #include <sys/sockio.h>
50 #include <sys/mbuf.h>
51 #include <sys/queue.h>
52 #include <sys/kernel.h>
53 #include <sys/device.h>
54 #include <sys/callout.h>
55 #include <sys/socket.h>
56
57 #include <sys/bus.h>
58
59 #include <net/if.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_ether.h>
63 #include <net/if_arp.h>
64
65 #ifdef INET
66 #include <netinet/in.h>
67 #include <netinet/in_systm.h>
68 #include <netinet/in_var.h>
69 #include <netinet/ip.h>
70 #include <netinet/if_inarp.h>
71 #endif
72
73 #include <net/bpf.h>
74
75 #include <dev/mii/mii.h>
76 #include <dev/mii/miivar.h>
77
78 #include <dev/pci/pcireg.h>
79 #include <dev/pci/pcivar.h>
80 #include <dev/pci/pcidevs.h>
81
82 #include <dev/pci/if_etreg.h>
83
84 static int et_match(device_t, cfdata_t, void *);
85 static void et_attach(device_t, device_t, void *);
86 static int et_detach(device_t, int);
87
88 static int et_miibus_readreg(device_t, int, int, uint16_t *);
89 static int et_miibus_writereg(device_t, int, int, uint16_t);
90 static void et_miibus_statchg(struct ifnet *);
91
92 static int et_init(struct ifnet *);
93 static int et_ioctl(struct ifnet *, u_long, void *);
94 static void et_start(struct ifnet *);
95 static void et_watchdog(struct ifnet *);
96 static int et_ifmedia_upd(struct ifnet *);
97 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
98
99 static int et_intr(void *);
100 static void et_enable_intrs(struct et_softc *, uint32_t);
101 static void et_disable_intrs(struct et_softc *);
102 static void et_rxeof(struct et_softc *);
103 static void et_txeof(struct et_softc *);
104 static void et_txtick(void *);
105
106 static int et_dma_alloc(struct et_softc *);
107 static void et_dma_free(struct et_softc *);
108 static int et_dma_mem_create(struct et_softc *, bus_size_t,
109 void **, bus_addr_t *, bus_dmamap_t *, bus_dma_segment_t *);
110 static void et_dma_mem_destroy(struct et_softc *, void *, bus_dmamap_t);
111 static int et_dma_mbuf_create(struct et_softc *);
112 static void et_dma_mbuf_destroy(struct et_softc *, int, const int[]);
113
114 static int et_init_tx_ring(struct et_softc *);
115 static int et_init_rx_ring(struct et_softc *);
116 static void et_free_tx_ring(struct et_softc *);
117 static void et_free_rx_ring(struct et_softc *);
118 static int et_encap(struct et_softc *, struct mbuf **);
119 static int et_newbuf(struct et_rxbuf_data *, int, int, int);
120 static int et_newbuf_cluster(struct et_rxbuf_data *, int, int);
121 static int et_newbuf_hdr(struct et_rxbuf_data *, int, int);
122
123 static void et_stop(struct et_softc *);
124 static int et_chip_init(struct et_softc *);
125 static void et_chip_attach(struct et_softc *);
126 static void et_init_mac(struct et_softc *);
127 static void et_init_rxmac(struct et_softc *);
128 static void et_init_txmac(struct et_softc *);
129 static int et_init_rxdma(struct et_softc *);
130 static int et_init_txdma(struct et_softc *);
131 static int et_start_rxdma(struct et_softc *);
132 static int et_start_txdma(struct et_softc *);
133 static int et_stop_rxdma(struct et_softc *);
134 static int et_stop_txdma(struct et_softc *);
135 static void et_reset(struct et_softc *);
136 static int et_bus_config(struct et_softc *);
137 static void et_get_eaddr(struct et_softc *, uint8_t[]);
138 static void et_setmulti(struct et_softc *);
139 static void et_tick(void *);
140
141 static int et_rx_intr_npkts = 32;
142 static int et_rx_intr_delay = 20; /* x10 usec */
143 static int et_tx_intr_nsegs = 128;
144 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */
145
146 struct et_bsize {
147 int bufsize;
148 et_newbuf_t newbuf;
149 };
150
151 static const struct et_bsize et_bufsize[ET_RX_NRING] = {
152 { .bufsize = 0, .newbuf = et_newbuf_hdr },
153 { .bufsize = 0, .newbuf = et_newbuf_cluster },
154 };
155
156 static const struct et_product {
157 pci_vendor_id_t vendor;
158 pci_product_id_t product;
159 } et_devices[] = {
160 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310 },
161 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1301 }
162 };
163
164 CFATTACH_DECL_NEW(et, sizeof(struct et_softc), et_match, et_attach, et_detach,
165 NULL);
166
167 static int
168 et_match(device_t dev, cfdata_t match, void *aux)
169 {
170 struct pci_attach_args *pa = aux;
171 const struct et_product *ep;
172 int i;
173
174 for (i = 0; i < __arraycount(et_devices); i++) {
175 ep = &et_devices[i];
176 if (PCI_VENDOR(pa->pa_id) == ep->vendor &&
177 PCI_PRODUCT(pa->pa_id) == ep->product)
178 return 1;
179 }
180 return 0;
181 }
182
183 static void
184 et_attach(device_t parent, device_t self, void *aux)
185 {
186 struct et_softc *sc = device_private(self);
187 struct pci_attach_args *pa = aux;
188 pci_chipset_tag_t pc = pa->pa_pc;
189 pci_intr_handle_t ih;
190 const char *intrstr;
191 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
192 struct mii_data * const mii = &sc->sc_miibus;
193 uint32_t pmcfg;
194 pcireg_t memtype;
195 int error;
196 char intrbuf[PCI_INTRSTR_LEN];
197
198 pci_aprint_devinfo(pa, "Ethernet controller");
199
200 sc->sc_dev = self;
201
202 /*
203 * Initialize tunables
204 */
205 sc->sc_rx_intr_npkts = et_rx_intr_npkts;
206 sc->sc_rx_intr_delay = et_rx_intr_delay;
207 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
208 sc->sc_timer = et_timer;
209
210 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ET_PCIR_BAR);
211 if (pci_mapreg_map(pa, ET_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
212 &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) {
213 aprint_error_dev(self, "could not map mem space\n");
214 return;
215 }
216
217 if (pci_intr_map(pa, &ih) != 0) {
218 aprint_error_dev(self, "could not map interrupt\n");
219 goto fail;
220 }
221
222 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
223 sc->sc_irq_handle = pci_intr_establish_xname(pc, ih, IPL_NET, et_intr,
224 sc, device_xname(self));
225 if (sc->sc_irq_handle == NULL) {
226 aprint_error_dev(self, "could not establish interrupt");
227 if (intrstr != NULL)
228 aprint_error(" at %s", intrstr);
229 aprint_error("\n");
230 goto fail;
231 }
232 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
233
234 sc->sc_dmat = pa->pa_dmat;
235 sc->sc_pct = pa->pa_pc;
236 sc->sc_pcitag = pa->pa_tag;
237
238 if (pa->pa_id == PCI_PRODUCT_LUCENT_ET1301)
239 sc->sc_flags |= ET_FLAG_FASTETHER;
240
241 error = et_bus_config(sc);
242 if (error)
243 goto fail;
244
245 et_get_eaddr(sc, sc->sc_enaddr);
246
247 aprint_normal_dev(self, "Ethernet address %s\n",
248 ether_sprintf(sc->sc_enaddr));
249
250 /* Take PHY out of COMA and enable clocks. */
251 pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE;
252 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
253 pmcfg |= EM_PM_GIGEPHY_ENB;
254 CSR_WRITE_4(sc, ET_PM, pmcfg);
255
256 et_reset(sc);
257
258 et_disable_intrs(sc);
259
260 error = et_dma_alloc(sc);
261 if (error)
262 goto fail;
263
264 ifp->if_softc = sc;
265 ifp->if_mtu = ETHERMTU;
266 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
267 ifp->if_init = et_init;
268 ifp->if_ioctl = et_ioctl;
269 ifp->if_start = et_start;
270 ifp->if_watchdog = et_watchdog;
271 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC);
272 IFQ_SET_READY(&ifp->if_snd);
273 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
274
275 et_chip_attach(sc);
276
277 mii->mii_ifp = ifp;
278 mii->mii_readreg = et_miibus_readreg;
279 mii->mii_writereg = et_miibus_writereg;
280 mii->mii_statchg = et_miibus_statchg;
281
282 sc->sc_ethercom.ec_mii = mii;
283 ifmedia_init(&mii->mii_media, 0, et_ifmedia_upd, et_ifmedia_sts);
284 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
285 if (LIST_FIRST(&mii->mii_phys) == NULL) {
286 aprint_error_dev(self, "no PHY found!\n");
287 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL,
288 0, NULL);
289 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
290 } else
291 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
292
293 if_attach(ifp);
294 if_deferred_start_init(ifp, NULL);
295 ether_ifattach(ifp, sc->sc_enaddr);
296
297 callout_init(&sc->sc_tick, 0);
298 callout_setfunc(&sc->sc_tick, et_tick, sc);
299 callout_init(&sc->sc_txtick, 0);
300 callout_setfunc(&sc->sc_txtick, et_txtick, sc);
301
302 if (pmf_device_register(self, NULL, NULL))
303 pmf_class_network_register(self, ifp);
304 else
305 aprint_error_dev(self, "couldn't establish power handler\n");
306
307 return;
308
309 fail:
310 et_dma_free(sc);
311 if (sc->sc_irq_handle != NULL) {
312 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
313 sc->sc_irq_handle = NULL;
314 }
315 if (sc->sc_mem_size) {
316 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
317 sc->sc_mem_size = 0;
318 }
319 }
320
321 static int
322 et_detach(device_t self, int flags)
323 {
324 struct et_softc *sc = device_private(self);
325 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
326 int s;
327
328 pmf_device_deregister(self);
329 s = splnet();
330 et_stop(sc);
331 splx(s);
332
333 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
334
335 /* Delete all remaining media. */
336 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
337
338 ether_ifdetach(ifp);
339 if_detach(ifp);
340 et_dma_free(sc);
341
342 if (sc->sc_irq_handle != NULL) {
343 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
344 sc->sc_irq_handle = NULL;
345 }
346
347 if (sc->sc_mem_size) {
348 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
349 sc->sc_mem_size = 0;
350 }
351
352 return 0;
353 }
354
355 #if 0 /* XXX XXX XXX UNUSED */
356 static int
357 et_shutdown(device_t self)
358 {
359 struct et_softc *sc = device_private(self);
360 int s;
361
362 s = splnet();
363 et_stop(sc);
364 splx(s);
365
366 return 0;
367 }
368 #endif
369
370 static int
371 et_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val)
372 {
373 struct et_softc *sc = device_private(dev);
374 uint32_t data;
375 int i, ret;
376
377 /* Stop any pending operations */
378 CSR_WRITE_4(sc, ET_MII_CMD, 0);
379
380 data = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
381 __SHIFTIN(reg, ET_MII_ADDR_REG);
382 CSR_WRITE_4(sc, ET_MII_ADDR, data);
383
384 /* Start reading */
385 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
386
387 #define NRETRY 50
388
389 for (i = 0; i < NRETRY; ++i) {
390 data = CSR_READ_4(sc, ET_MII_IND);
391 if ((data & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
392 break;
393 DELAY(50);
394 }
395 if (i == NRETRY) {
396 aprint_error_dev(sc->sc_dev, "read phy %d, reg %d timed out\n",
397 phy, reg);
398 ret = ETIMEDOUT;
399 goto back;
400 }
401
402 #undef NRETRY
403
404 data = CSR_READ_4(sc, ET_MII_STAT);
405 *val = __SHIFTOUT(data, ET_MII_STAT_VALUE);
406 ret = 0;
407
408 back:
409 /* Make sure that the current operation is stopped */
410 CSR_WRITE_4(sc, ET_MII_CMD, 0);
411 return ret;
412 }
413
414 static int
415 et_miibus_writereg(device_t dev, int phy, int reg, uint16_t val)
416 {
417 struct et_softc *sc = device_private(dev);
418 uint32_t data;
419 uint16_t tmp;
420 int rv = 0;
421 int i;
422
423 /* Stop any pending operations */
424 CSR_WRITE_4(sc, ET_MII_CMD, 0);
425
426 data = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
427 __SHIFTIN(reg, ET_MII_ADDR_REG);
428 CSR_WRITE_4(sc, ET_MII_ADDR, data);
429
430 /* Start writing */
431 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val, ET_MII_CTRL_VALUE));
432
433 #define NRETRY 100
434
435 for (i = 0; i < NRETRY; ++i) {
436 data = CSR_READ_4(sc, ET_MII_IND);
437 if ((data & ET_MII_IND_BUSY) == 0)
438 break;
439 DELAY(50);
440 }
441 if (i == NRETRY) {
442 aprint_error_dev(sc->sc_dev, "write phy %d, reg %d timed out\n",
443 phy, reg);
444 et_miibus_readreg(dev, phy, reg, &tmp);
445 rv = ETIMEDOUT;
446 }
447
448 #undef NRETRY
449
450 /* Make sure that the current operation is stopped */
451 CSR_WRITE_4(sc, ET_MII_CMD, 0);
452
453 return rv;
454 }
455
456 static void
457 et_miibus_statchg(struct ifnet *ifp)
458 {
459 struct et_softc *sc = ifp->if_softc;
460 struct mii_data *mii = &sc->sc_miibus;
461 uint32_t cfg1, cfg2, ctrl;
462 int i;
463
464 sc->sc_flags &= ~ET_FLAG_LINK;
465 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
466 (IFM_ACTIVE | IFM_AVALID)) {
467 switch (IFM_SUBTYPE(mii->mii_media_active)) {
468 case IFM_10_T:
469 case IFM_100_TX:
470 sc->sc_flags |= ET_FLAG_LINK;
471 break;
472 case IFM_1000_T:
473 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
474 sc->sc_flags |= ET_FLAG_LINK;
475 break;
476 }
477 }
478
479 /* XXX Stop TX/RX MAC? */
480 if ((sc->sc_flags & ET_FLAG_LINK) == 0)
481 return;
482
483 /* Program MACs with resolved speed/duplex/flow-control. */
484 ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
485 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
486 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
487 cfg1 &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
488 ET_MAC_CFG1_LOOPBACK);
489 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
490 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
491 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
492 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
493 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN);
494
495
496 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
497 cfg2 |= ET_MAC_CFG2_MODE_GMII;
498 else {
499 cfg2 |= ET_MAC_CFG2_MODE_MII;
500 ctrl |= ET_MAC_CTRL_MODE_MII;
501 }
502
503 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
504 cfg2 |= ET_MAC_CFG2_FDX;
505 /*
506 * Controller lacks automatic TX pause frame
507 * generation so it should be handled by driver.
508 * Even though driver can send pause frame with
509 * arbitrary pause time, controller does not
510 * provide a way that tells how many free RX
511 * buffers are available in controller. This
512 * limitation makes it hard to generate XON frame
513 * in time on driver side so don't enable TX flow
514 * control.
515 */
516 #ifdef notyet
517 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE)
518 cfg1 |= ET_MAC_CFG1_TXFLOW;
519 #endif
520 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE)
521 cfg1 |= ET_MAC_CFG1_RXFLOW;
522 } else
523 ctrl |= ET_MAC_CTRL_GHDX;
524
525 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
526 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
527 cfg1 |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
528 CSR_WRITE_4(sc, ET_MAC_CFG1, cfg1);
529
530 #define NRETRY 100
531
532 for (i = 0; i < NRETRY; ++i) {
533 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
534 if ((cfg1 & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
535 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
536 break;
537
538 DELAY(10);
539 }
540 /* Note: Timeout always happens when cable is not plugged in. */
541
542 sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
543
544 #undef NRETRY
545 }
546
547 static int
548 et_ifmedia_upd(struct ifnet *ifp)
549 {
550 struct et_softc *sc;
551 struct mii_data *mii;
552 struct mii_softc *miisc;
553
554 sc = ifp->if_softc;
555 mii = &sc->sc_miibus;
556 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
557 PHY_RESET(miisc);
558 return (mii_mediachg(mii));
559 }
560
561 static void
562 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
563 {
564 struct et_softc *sc;
565 struct mii_data *mii;
566
567 sc = ifp->if_softc;
568 mii = &sc->sc_miibus;
569 mii_pollstat(mii);
570 ifmr->ifm_active = mii->mii_media_active;
571 ifmr->ifm_status = mii->mii_media_status;
572 }
573
574 static void
575 et_stop(struct et_softc *sc)
576 {
577 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
578
579 callout_stop(&sc->sc_tick);
580 callout_stop(&sc->sc_txtick);
581
582 et_stop_rxdma(sc);
583 et_stop_txdma(sc);
584
585 et_disable_intrs(sc);
586
587 et_free_tx_ring(sc);
588 et_free_rx_ring(sc);
589
590 et_reset(sc);
591
592 sc->sc_tx = 0;
593 sc->sc_tx_intr = 0;
594 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
595
596 ifp->if_timer = 0;
597 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
598 }
599
600 static int
601 et_bus_config(struct et_softc *sc)
602 {
603 uint32_t val; //, max_plsz;
604 // uint16_t ack_latency, replay_timer;
605
606 /*
607 * Test whether EEPROM is valid
608 * NOTE: Read twice to get the correct value
609 */
610 pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC);
611 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC);
612
613 if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
614 aprint_error_dev(sc->sc_dev, "EEPROM status error 0x%02x\n", val);
615 return ENXIO;
616 }
617
618 /* TODO: LED */
619 #if 0
620 /*
621 * Configure ACK latency and replay timer according to
622 * max playload size
623 */
624 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CAPS);
625 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ;
626
627 switch (max_plsz) {
628 case ET_PCIV_DEVICE_CAPS_PLSZ_128:
629 ack_latency = ET_PCIV_ACK_LATENCY_128;
630 replay_timer = ET_PCIV_REPLAY_TIMER_128;
631 break;
632
633 case ET_PCIV_DEVICE_CAPS_PLSZ_256:
634 ack_latency = ET_PCIV_ACK_LATENCY_256;
635 replay_timer = ET_PCIV_REPLAY_TIMER_256;
636 break;
637
638 default:
639 ack_latency = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
640 ET_PCIR_ACK_LATENCY) >> 16;
641 replay_timer = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
642 ET_PCIR_REPLAY_TIMER) >> 16;
643 aprint_normal_dev(sc->sc_dev, "ack latency %u, replay timer %u\n",
644 ack_latency, replay_timer);
645 break;
646 }
647 if (ack_latency != 0) {
648 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
649 ET_PCIR_ACK_LATENCY, ack_latency << 16);
650 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
651 ET_PCIR_REPLAY_TIMER, replay_timer << 16);
652 }
653
654 /*
655 * Set L0s and L1 latency timer to 2us
656 */
657 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2);
658 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_L0S_L1_LATENCY,
659 val << 24);
660
661 /*
662 * Set max read request size to 2048 bytes
663 */
664 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
665 ET_PCIR_DEVICE_CTRL) >> 16;
666 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ;
667 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K;
668 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CTRL,
669 val << 16);
670 #endif
671
672 return 0;
673 }
674
675 static void
676 et_get_eaddr(struct et_softc *sc, uint8_t eaddr[])
677 {
678 uint32_t r;
679
680 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_LO);
681 eaddr[0] = r & 0xff;
682 eaddr[1] = (r >> 8) & 0xff;
683 eaddr[2] = (r >> 16) & 0xff;
684 eaddr[3] = (r >> 24) & 0xff;
685 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_HI);
686 eaddr[4] = r & 0xff;
687 eaddr[5] = (r >> 8) & 0xff;
688 }
689
690 static void
691 et_reset(struct et_softc *sc)
692 {
693
694 CSR_WRITE_4(sc, ET_MAC_CFG1,
695 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
696 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
697 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
698
699 CSR_WRITE_4(sc, ET_SWRST,
700 ET_SWRST_TXDMA | ET_SWRST_RXDMA |
701 ET_SWRST_TXMAC | ET_SWRST_RXMAC |
702 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
703
704 CSR_WRITE_4(sc, ET_MAC_CFG1,
705 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
706 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
707 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
708 }
709
710 static void
711 et_disable_intrs(struct et_softc *sc)
712 {
713 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
714 }
715
716 static void
717 et_enable_intrs(struct et_softc *sc, uint32_t intrs)
718 {
719 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs);
720 }
721
722 static int
723 et_dma_alloc(struct et_softc *sc)
724 {
725 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
726 struct et_txstatus_data *txsd = &sc->sc_tx_status;
727 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
728 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
729 int i, error;
730
731 /*
732 * Create TX ring DMA stuffs
733 */
734 error = et_dma_mem_create(sc, ET_TX_RING_SIZE,
735 (void **)&tx_ring->tr_desc, &tx_ring->tr_paddr, &tx_ring->tr_dmap,
736 &tx_ring->tr_seg);
737 if (error) {
738 aprint_error_dev(sc->sc_dev, "can't create TX ring DMA stuffs\n");
739 return error;
740 }
741
742 /*
743 * Create TX status DMA stuffs
744 */
745 error = et_dma_mem_create(sc, sizeof(uint32_t),
746 (void **)&txsd->txsd_status,
747 &txsd->txsd_paddr, &txsd->txsd_dmap, &txsd->txsd_seg);
748 if (error) {
749 aprint_error_dev(sc->sc_dev, "can't create TX status DMA stuffs\n");
750 return error;
751 }
752
753 /*
754 * Create DMA stuffs for RX rings
755 */
756 for (i = 0; i < ET_RX_NRING; ++i) {
757 static const uint32_t rx_ring_posreg[ET_RX_NRING] =
758 { ET_RX_RING0_POS, ET_RX_RING1_POS };
759
760 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
761
762 error = et_dma_mem_create(sc, ET_RX_RING_SIZE,
763 (void **)&rx_ring->rr_desc,
764 &rx_ring->rr_paddr, &rx_ring->rr_dmap, &rx_ring->rr_seg);
765 if (error) {
766 aprint_error_dev(sc->sc_dev, "can't create DMA stuffs for "
767 "the %d RX ring\n", i);
768 return error;
769 }
770 rx_ring->rr_posreg = rx_ring_posreg[i];
771 }
772
773 /*
774 * Create RX stat ring DMA stuffs
775 */
776 error = et_dma_mem_create(sc, ET_RXSTAT_RING_SIZE,
777 (void **)&rxst_ring->rsr_stat,
778 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap, &rxst_ring->rsr_seg);
779 if (error) {
780 aprint_error_dev(sc->sc_dev, "can't create RX stat ring DMA stuffs\n");
781 return error;
782 }
783
784 /*
785 * Create RX status DMA stuffs
786 */
787 error = et_dma_mem_create(sc, sizeof(struct et_rxstatus),
788 (void **)&rxsd->rxsd_status,
789 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap, &rxsd->rxsd_seg);
790 if (error) {
791 aprint_error_dev(sc->sc_dev, "can't create RX status DMA stuffs\n");
792 return error;
793 }
794
795 /*
796 * Create mbuf DMA stuffs
797 */
798 error = et_dma_mbuf_create(sc);
799 if (error)
800 return error;
801
802 return 0;
803 }
804
805 static void
806 et_dma_free(struct et_softc *sc)
807 {
808 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
809 struct et_txstatus_data *txsd = &sc->sc_tx_status;
810 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
811 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
812 int i, rx_done[ET_RX_NRING];
813
814 /*
815 * Destroy TX ring DMA stuffs
816 */
817 et_dma_mem_destroy(sc, tx_ring->tr_desc, tx_ring->tr_dmap);
818
819 /*
820 * Destroy TX status DMA stuffs
821 */
822 et_dma_mem_destroy(sc, txsd->txsd_status, txsd->txsd_dmap);
823
824 /*
825 * Destroy DMA stuffs for RX rings
826 */
827 for (i = 0; i < ET_RX_NRING; ++i) {
828 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
829
830 et_dma_mem_destroy(sc, rx_ring->rr_desc, rx_ring->rr_dmap);
831 }
832
833 /*
834 * Destroy RX stat ring DMA stuffs
835 */
836 et_dma_mem_destroy(sc, rxst_ring->rsr_stat, rxst_ring->rsr_dmap);
837
838 /*
839 * Destroy RX status DMA stuffs
840 */
841 et_dma_mem_destroy(sc, rxsd->rxsd_status, rxsd->rxsd_dmap);
842
843 /*
844 * Destroy mbuf DMA stuffs
845 */
846 for (i = 0; i < ET_RX_NRING; ++i)
847 rx_done[i] = ET_RX_NDESC;
848 et_dma_mbuf_destroy(sc, ET_TX_NDESC, rx_done);
849 }
850
851 static int
852 et_dma_mbuf_create(struct et_softc *sc)
853 {
854 struct et_txbuf_data *tbd = &sc->sc_tx_data;
855 int i, error, rx_done[ET_RX_NRING];
856
857 /*
858 * Create spare DMA map for RX mbufs
859 */
860 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
861 BUS_DMA_NOWAIT, &sc->sc_mbuf_tmp_dmap);
862 if (error) {
863 aprint_error_dev(sc->sc_dev, "can't create spare mbuf DMA map\n");
864 return error;
865 }
866
867 /*
868 * Create DMA maps for RX mbufs
869 */
870 bzero(rx_done, sizeof(rx_done));
871 for (i = 0; i < ET_RX_NRING; ++i) {
872 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
873 int j;
874
875 for (j = 0; j < ET_RX_NDESC; ++j) {
876 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
877 MCLBYTES, 0, BUS_DMA_NOWAIT,
878 &rbd->rbd_buf[j].rb_dmap);
879 if (error) {
880 aprint_error_dev(sc->sc_dev, "can't create %d RX mbuf "
881 "for %d RX ring\n", j, i);
882 rx_done[i] = j;
883 et_dma_mbuf_destroy(sc, 0, rx_done);
884 return error;
885 }
886 }
887 rx_done[i] = ET_RX_NDESC;
888
889 rbd->rbd_softc = sc;
890 rbd->rbd_ring = &sc->sc_rx_ring[i];
891 }
892
893 /*
894 * Create DMA maps for TX mbufs
895 */
896 for (i = 0; i < ET_TX_NDESC; ++i) {
897 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
898 0, BUS_DMA_NOWAIT, &tbd->tbd_buf[i].tb_dmap);
899 if (error) {
900 aprint_error_dev(sc->sc_dev, "can't create %d TX mbuf "
901 "DMA map\n", i);
902 et_dma_mbuf_destroy(sc, i, rx_done);
903 return error;
904 }
905 }
906
907 return 0;
908 }
909
910 static void
911 et_dma_mbuf_destroy(struct et_softc *sc, int tx_done, const int rx_done[])
912 {
913 struct et_txbuf_data *tbd = &sc->sc_tx_data;
914 int i;
915
916 /*
917 * Destroy DMA maps for RX mbufs
918 */
919 for (i = 0; i < ET_RX_NRING; ++i) {
920 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
921 int j;
922
923 for (j = 0; j < rx_done[i]; ++j) {
924 struct et_rxbuf *rb = &rbd->rbd_buf[j];
925
926 KASSERTMSG(rb->rb_mbuf == NULL,
927 "RX mbuf in %d RX ring is not freed yet\n", i);
928 bus_dmamap_destroy(sc->sc_dmat, rb->rb_dmap);
929 }
930 }
931
932 /*
933 * Destroy DMA maps for TX mbufs
934 */
935 for (i = 0; i < tx_done; ++i) {
936 struct et_txbuf *tb = &tbd->tbd_buf[i];
937
938 KASSERTMSG(tb->tb_mbuf == NULL, "TX mbuf is not freed yet\n");
939 bus_dmamap_destroy(sc->sc_dmat, tb->tb_dmap);
940 }
941
942 /*
943 * Destroy spare mbuf DMA map
944 */
945 bus_dmamap_destroy(sc->sc_dmat, sc->sc_mbuf_tmp_dmap);
946 }
947
948 static int
949 et_dma_mem_create(struct et_softc *sc, bus_size_t size,
950 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap, bus_dma_segment_t *seg)
951 {
952 int error, nsegs;
953
954 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT,
955 dmap);
956 if (error) {
957 aprint_error_dev(sc->sc_dev, "can't create DMA map\n");
958 return error;
959 }
960
961 error = bus_dmamem_alloc(sc->sc_dmat, size, ET_ALIGN, 0, seg,
962 1, &nsegs, BUS_DMA_WAITOK);
963 if (error) {
964 aprint_error_dev(sc->sc_dev, "can't allocate DMA mem\n");
965 return error;
966 }
967
968 error = bus_dmamem_map(sc->sc_dmat, seg, nsegs,
969 size, (void **)addr, BUS_DMA_NOWAIT);
970 if (error) {
971 aprint_error_dev(sc->sc_dev, "can't map DMA mem\n");
972 return (error);
973 }
974
975 error = bus_dmamap_load(sc->sc_dmat, *dmap, *addr, size, NULL,
976 BUS_DMA_WAITOK);
977 if (error) {
978 aprint_error_dev(sc->sc_dev, "can't load DMA mem\n");
979 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)addr, 1);
980 return error;
981 }
982
983 memset(*addr, 0, size);
984
985 *paddr = (*dmap)->dm_segs[0].ds_addr;
986
987 return 0;
988 }
989
990 static void
991 et_dma_mem_destroy(struct et_softc *sc, void *addr, bus_dmamap_t dmap)
992 {
993 bus_dmamap_unload(sc->sc_dmat, dmap);
994 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)&addr, 1);
995 }
996
997 static void
998 et_chip_attach(struct et_softc *sc)
999 {
1000 uint32_t val;
1001
1002 /*
1003 * Perform minimal initialization
1004 */
1005
1006 /* Disable loopback */
1007 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1008
1009 /* Reset MAC */
1010 CSR_WRITE_4(sc, ET_MAC_CFG1,
1011 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1012 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1013 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1014
1015 /*
1016 * Setup half duplex mode
1017 */
1018 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
1019 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
1020 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
1021 ET_MAC_HDX_EXC_DEFER;
1022 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1023
1024 /* Clear MAC control */
1025 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1026
1027 /* Reset MII */
1028 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1029
1030 /* Bring MAC out of reset state */
1031 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1032
1033 /* Enable memory controllers */
1034 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1035 }
1036
1037 static int
1038 et_intr(void *xsc)
1039 {
1040 struct et_softc *sc = xsc;
1041 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1042 uint32_t intrs;
1043
1044 if ((ifp->if_flags & IFF_RUNNING) == 0)
1045 return (0);
1046
1047 intrs = CSR_READ_4(sc, ET_INTR_STATUS);
1048 if (intrs == 0 || intrs == 0xffffffff)
1049 return (0);
1050
1051 et_disable_intrs(sc);
1052 intrs &= ET_INTRS;
1053 if (intrs == 0) /* Not interested */
1054 goto back;
1055
1056 if (intrs & ET_INTR_RXEOF)
1057 et_rxeof(sc);
1058 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER))
1059 et_txeof(sc);
1060 if (intrs & ET_INTR_TIMER)
1061 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1062 back:
1063 et_enable_intrs(sc, ET_INTRS);
1064
1065 return (1);
1066 }
1067
1068 static int
1069 et_init(struct ifnet *ifp)
1070 {
1071 struct et_softc *sc = ifp->if_softc;
1072 int error, i, s;
1073
1074 if (ifp->if_flags & IFF_RUNNING)
1075 return 0;
1076
1077 s = splnet();
1078
1079 et_stop(sc);
1080 et_reset(sc);
1081
1082 for (i = 0; i < ET_RX_NRING; ++i) {
1083 sc->sc_rx_data[i].rbd_bufsize = et_bufsize[i].bufsize;
1084 sc->sc_rx_data[i].rbd_newbuf = et_bufsize[i].newbuf;
1085 }
1086
1087 error = et_init_tx_ring(sc);
1088 if (error)
1089 goto back;
1090
1091 error = et_init_rx_ring(sc);
1092 if (error)
1093 goto back;
1094
1095 error = et_chip_init(sc);
1096 if (error)
1097 goto back;
1098
1099 error = et_start_rxdma(sc);
1100 if (error)
1101 goto back;
1102
1103 error = et_start_txdma(sc);
1104 if (error)
1105 goto back;
1106
1107 /* Enable interrupts. */
1108 et_enable_intrs(sc, ET_INTRS);
1109
1110 callout_schedule(&sc->sc_tick, hz);
1111
1112 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1113
1114 ifp->if_flags |= IFF_RUNNING;
1115 ifp->if_flags &= ~IFF_OACTIVE;
1116
1117 sc->sc_flags &= ~ET_FLAG_LINK;
1118 et_ifmedia_upd(ifp);
1119 back:
1120 if (error)
1121 et_stop(sc);
1122
1123 splx(s);
1124
1125 return (0);
1126 }
1127
1128 static int
1129 et_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1130 {
1131 struct et_softc *sc = ifp->if_softc;
1132 int s, error = 0;
1133
1134 s = splnet();
1135
1136 switch (cmd) {
1137 case SIOCSIFFLAGS:
1138 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1139 break;
1140 if (ifp->if_flags & IFF_UP) {
1141 /*
1142 * If only the PROMISC or ALLMULTI flag changes, then
1143 * don't do a full re-init of the chip, just update
1144 * the Rx filter.
1145 */
1146 if ((ifp->if_flags & IFF_RUNNING) &&
1147 ((ifp->if_flags ^ sc->sc_if_flags) &
1148 (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1149 et_setmulti(sc);
1150 } else {
1151 if (!(ifp->if_flags & IFF_RUNNING))
1152 et_init(ifp);
1153 }
1154 } else {
1155 if (ifp->if_flags & IFF_RUNNING)
1156 et_stop(sc);
1157 }
1158 sc->sc_if_flags = ifp->if_flags;
1159 break;
1160 default:
1161 error = ether_ioctl(ifp, cmd, data);
1162 if (error == ENETRESET) {
1163 if (ifp->if_flags & IFF_RUNNING)
1164 et_setmulti(sc);
1165 error = 0;
1166 }
1167 break;
1168 }
1169
1170 splx(s);
1171
1172 return error;
1173 }
1174
1175 static void
1176 et_start(struct ifnet *ifp)
1177 {
1178 struct et_softc *sc = ifp->if_softc;
1179 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1180 int trans;
1181 struct mbuf *m;
1182
1183 if (((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) ||
1184 ((sc->sc_flags & (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)) !=
1185 (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)))
1186 return;
1187
1188 trans = 0;
1189 for (;;) {
1190 IFQ_DEQUEUE(&ifp->if_snd, m);
1191 if (m == NULL)
1192 break;
1193
1194 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) {
1195 ifp->if_flags |= IFF_OACTIVE;
1196 break;
1197 }
1198
1199 if (et_encap(sc, &m)) {
1200 ifp->if_oerrors++;
1201 ifp->if_flags |= IFF_OACTIVE;
1202 break;
1203 }
1204
1205 trans = 1;
1206
1207 bpf_mtap(ifp, m, BPF_D_OUT);
1208 }
1209
1210 if (trans) {
1211 callout_schedule(&sc->sc_txtick, hz);
1212 ifp->if_timer = 5;
1213 }
1214 }
1215
1216 static void
1217 et_watchdog(struct ifnet *ifp)
1218 {
1219 struct et_softc *sc = ifp->if_softc;
1220 aprint_error_dev(sc->sc_dev, "watchdog timed out\n");
1221
1222 ifp->if_flags &= ~IFF_RUNNING;
1223 et_init(ifp);
1224 et_start(ifp);
1225 }
1226
1227 static int
1228 et_stop_rxdma(struct et_softc *sc)
1229 {
1230
1231 CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1232 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1233
1234 DELAY(5);
1235 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1236 aprint_error_dev(sc->sc_dev, "can't stop RX DMA engine\n");
1237 return ETIMEDOUT;
1238 }
1239 return 0;
1240 }
1241
1242 static int
1243 et_stop_txdma(struct et_softc *sc)
1244 {
1245
1246 CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1247 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1248 return 0;
1249 }
1250
1251 static void
1252 et_free_tx_ring(struct et_softc *sc)
1253 {
1254 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1255 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1256 int i;
1257
1258 for (i = 0; i < ET_TX_NDESC; ++i) {
1259 struct et_txbuf *tb = &tbd->tbd_buf[i];
1260
1261 if (tb->tb_mbuf != NULL) {
1262 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap);
1263 m_freem(tb->tb_mbuf);
1264 tb->tb_mbuf = NULL;
1265 }
1266 }
1267
1268 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1269 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
1270 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1271 }
1272
1273 static void
1274 et_free_rx_ring(struct et_softc *sc)
1275 {
1276 int n;
1277
1278 for (n = 0; n < ET_RX_NRING; ++n) {
1279 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1280 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n];
1281 int i;
1282
1283 for (i = 0; i < ET_RX_NDESC; ++i) {
1284 struct et_rxbuf *rb = &rbd->rbd_buf[i];
1285
1286 if (rb->rb_mbuf != NULL) {
1287 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap);
1288 m_freem(rb->rb_mbuf);
1289 rb->rb_mbuf = NULL;
1290 }
1291 }
1292
1293 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE);
1294 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0,
1295 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1296 }
1297 }
1298
1299 static void
1300 et_setmulti(struct et_softc *sc)
1301 {
1302 struct ethercom *ec = &sc->sc_ethercom;
1303 struct ifnet *ifp = &ec->ec_if;
1304 uint32_t hash[4] = { 0, 0, 0, 0 };
1305 uint32_t rxmac_ctrl, pktfilt;
1306 struct ether_multi *enm;
1307 struct ether_multistep step;
1308 int i, count;
1309
1310 pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1311 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1312
1313 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1314 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1315 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1316 goto back;
1317 }
1318
1319 count = 0;
1320 ETHER_LOCK(ec);
1321 ETHER_FIRST_MULTI(step, ec, enm);
1322 while (enm != NULL) {
1323 uint32_t *hp, h;
1324
1325 h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1326 h = (h & 0x3f800000) >> 23;
1327
1328 hp = &hash[0];
1329 if (h >= 32 && h < 64) {
1330 h -= 32;
1331 hp = &hash[1];
1332 } else if (h >= 64 && h < 96) {
1333 h -= 64;
1334 hp = &hash[2];
1335 } else if (h >= 96) {
1336 h -= 96;
1337 hp = &hash[3];
1338 }
1339 *hp |= (1 << h);
1340
1341 ++count;
1342 ETHER_NEXT_MULTI(step, enm);
1343 }
1344 ETHER_UNLOCK(ec);
1345
1346 for (i = 0; i < 4; ++i)
1347 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1348
1349 if (count > 0)
1350 pktfilt |= ET_PKTFILT_MCAST;
1351 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1352 back:
1353 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1354 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1355 }
1356
1357 static int
1358 et_chip_init(struct et_softc *sc)
1359 {
1360 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1361 uint32_t rxq_end;
1362 int error;
1363
1364 /*
1365 * Split internal memory between TX and RX according to MTU
1366 */
1367 if (ifp->if_mtu < 2048)
1368 rxq_end = 0x2bc;
1369 else if (ifp->if_mtu < 8192)
1370 rxq_end = 0x1ff;
1371 else
1372 rxq_end = 0x1b3;
1373 CSR_WRITE_4(sc, ET_RXQ_START, 0);
1374 CSR_WRITE_4(sc, ET_RXQ_END, rxq_end);
1375 CSR_WRITE_4(sc, ET_TXQ_START, rxq_end + 1);
1376 CSR_WRITE_4(sc, ET_TXQ_END, ET_INTERN_MEM_END);
1377
1378 /* No loopback */
1379 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1380
1381 /* Clear MSI configure */
1382 CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1383
1384 /* Disable timer */
1385 CSR_WRITE_4(sc, ET_TIMER, 0);
1386
1387 /* Initialize MAC */
1388 et_init_mac(sc);
1389
1390 /* Enable memory controllers */
1391 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1392
1393 /* Initialize RX MAC */
1394 et_init_rxmac(sc);
1395
1396 /* Initialize TX MAC */
1397 et_init_txmac(sc);
1398
1399 /* Initialize RX DMA engine */
1400 error = et_init_rxdma(sc);
1401 if (error)
1402 return error;
1403
1404 /* Initialize TX DMA engine */
1405 error = et_init_txdma(sc);
1406 if (error)
1407 return error;
1408
1409 return 0;
1410 }
1411
1412 static int
1413 et_init_tx_ring(struct et_softc *sc)
1414 {
1415 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1416 struct et_txstatus_data *txsd = &sc->sc_tx_status;
1417 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1418
1419 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1420 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
1421 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1422
1423 tbd->tbd_start_index = 0;
1424 tbd->tbd_start_wrap = 0;
1425 tbd->tbd_used = 0;
1426
1427 bzero(txsd->txsd_status, sizeof(uint32_t));
1428 bus_dmamap_sync(sc->sc_dmat, txsd->txsd_dmap, 0,
1429 txsd->txsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1430 return 0;
1431 }
1432
1433 static int
1434 et_init_rx_ring(struct et_softc *sc)
1435 {
1436 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1437 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1438 int n;
1439
1440 for (n = 0; n < ET_RX_NRING; ++n) {
1441 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1442 int i, error;
1443
1444 for (i = 0; i < ET_RX_NDESC; ++i) {
1445 error = rbd->rbd_newbuf(rbd, i, 1);
1446 if (error) {
1447 aprint_error_dev(sc->sc_dev, "%d ring %d buf, newbuf failed: "
1448 "%d\n", n, i, error);
1449 return error;
1450 }
1451 }
1452 }
1453
1454 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1455 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0,
1456 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1457
1458 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1459 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0,
1460 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1461
1462 return 0;
1463 }
1464
1465 static int
1466 et_init_rxdma(struct et_softc *sc)
1467 {
1468 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1469 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1470 struct et_rxdesc_ring *rx_ring;
1471 int error;
1472
1473 error = et_stop_rxdma(sc);
1474 if (error) {
1475 aprint_error_dev(sc->sc_dev, "can't init RX DMA engine\n");
1476 return error;
1477 }
1478
1479 /*
1480 * Install RX status
1481 */
1482 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1483 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1484
1485 /*
1486 * Install RX stat ring
1487 */
1488 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1489 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1490 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1491 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1492 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1493
1494 /* Match ET_RXSTAT_POS */
1495 rxst_ring->rsr_index = 0;
1496 rxst_ring->rsr_wrap = 0;
1497
1498 /*
1499 * Install the 2nd RX descriptor ring
1500 */
1501 rx_ring = &sc->sc_rx_ring[1];
1502 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1503 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1504 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1505 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1506 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1507
1508 /* Match ET_RX_RING1_POS */
1509 rx_ring->rr_index = 0;
1510 rx_ring->rr_wrap = 1;
1511
1512 /*
1513 * Install the 1st RX descriptor ring
1514 */
1515 rx_ring = &sc->sc_rx_ring[0];
1516 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1517 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1518 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1519 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1520 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1521
1522 /* Match ET_RX_RING0_POS */
1523 rx_ring->rr_index = 0;
1524 rx_ring->rr_wrap = 1;
1525
1526 /*
1527 * RX intr moderation
1528 */
1529 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1530 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1531
1532 return 0;
1533 }
1534
1535 static int
1536 et_init_txdma(struct et_softc *sc)
1537 {
1538 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1539 struct et_txstatus_data *txsd = &sc->sc_tx_status;
1540 int error;
1541
1542 error = et_stop_txdma(sc);
1543 if (error) {
1544 aprint_error_dev(sc->sc_dev, "can't init TX DMA engine\n");
1545 return error;
1546 }
1547
1548 /*
1549 * Install TX descriptor ring
1550 */
1551 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1552 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1553 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1554
1555 /*
1556 * Install TX status
1557 */
1558 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1559 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1560
1561 CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1562
1563 /* Match ET_TX_READY_POS */
1564 tx_ring->tr_ready_index = 0;
1565 tx_ring->tr_ready_wrap = 0;
1566
1567 return 0;
1568 }
1569
1570 static void
1571 et_init_mac(struct et_softc *sc)
1572 {
1573 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1574 const uint8_t *eaddr = CLLADDR(ifp->if_sadl);
1575 uint32_t val;
1576
1577 /* Reset MAC */
1578 CSR_WRITE_4(sc, ET_MAC_CFG1,
1579 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1580 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1581 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1582
1583 /*
1584 * Setup inter packet gap
1585 */
1586 val = __SHIFTIN(56, ET_IPG_NONB2B_1) |
1587 __SHIFTIN(88, ET_IPG_NONB2B_2) |
1588 __SHIFTIN(80, ET_IPG_MINIFG) |
1589 __SHIFTIN(96, ET_IPG_B2B);
1590 CSR_WRITE_4(sc, ET_IPG, val);
1591
1592 /*
1593 * Setup half duplex mode
1594 */
1595 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
1596 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
1597 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
1598 ET_MAC_HDX_EXC_DEFER;
1599 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1600
1601 /* Clear MAC control */
1602 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1603
1604 /* Reset MII */
1605 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1606
1607 /*
1608 * Set MAC address
1609 */
1610 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1611 CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1612 val = (eaddr[0] << 16) | (eaddr[1] << 24);
1613 CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1614
1615 /* Set max frame length */
1616 CSR_WRITE_4(sc, ET_MAX_FRMLEN,
1617 ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu + ETHER_CRC_LEN);
1618
1619 /* Bring MAC out of reset state */
1620 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1621 }
1622
1623 static void
1624 et_init_rxmac(struct et_softc *sc)
1625 {
1626 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1627 const uint8_t *eaddr = CLLADDR(ifp->if_sadl);
1628 uint32_t val;
1629 int i;
1630
1631 /* Disable RX MAC and WOL */
1632 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1633
1634 /*
1635 * Clear all WOL related registers
1636 */
1637 for (i = 0; i < 3; ++i)
1638 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1639 for (i = 0; i < 20; ++i)
1640 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1641
1642 /*
1643 * Set WOL source address. XXX is this necessary?
1644 */
1645 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1646 CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1647 val = (eaddr[0] << 8) | eaddr[1];
1648 CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1649
1650 /* Clear packet filters */
1651 CSR_WRITE_4(sc, ET_PKTFILT, 0);
1652
1653 /* No ucast filtering */
1654 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1655 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1656 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1657
1658 if (ifp->if_mtu > 8192) {
1659 /*
1660 * In order to transmit jumbo packets greater than 8k,
1661 * the FIFO between RX MAC and RX DMA needs to be reduced
1662 * in size to (16k - MTU). In order to implement this, we
1663 * must use "cut through" mode in the RX MAC, which chops
1664 * packets down into segments which are (max_size * 16).
1665 * In this case we selected 256 bytes, since this is the
1666 * size of the PCI-Express TLP's that the 1310 uses.
1667 */
1668 val = __SHIFTIN(16, ET_RXMAC_MC_SEGSZ_MAX) |
1669 ET_RXMAC_MC_SEGSZ_ENABLE;
1670 } else {
1671 val = 0;
1672 }
1673 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1674
1675 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1676
1677 /* Initialize RX MAC management register */
1678 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1679
1680 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1681
1682 CSR_WRITE_4(sc, ET_RXMAC_MGT,
1683 ET_RXMAC_MGT_PASS_ECRC |
1684 ET_RXMAC_MGT_PASS_ELEN |
1685 ET_RXMAC_MGT_PASS_ETRUNC |
1686 ET_RXMAC_MGT_CHECK_PKT);
1687
1688 /*
1689 * Configure runt filtering (may not work on certain chip generation)
1690 */
1691 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG;
1692 CSR_WRITE_4(sc, ET_PKTFILT, val);
1693
1694 /* Enable RX MAC but leave WOL disabled */
1695 CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1696 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1697
1698 /*
1699 * Setup multicast hash and allmulti/promisc mode
1700 */
1701 et_setmulti(sc);
1702 }
1703
1704 static void
1705 et_init_txmac(struct et_softc *sc)
1706 {
1707
1708 /* Disable TX MAC and FC(?) */
1709 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1710
1711 /* No flow control yet */
1712 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0);
1713
1714 /* Enable TX MAC but leave FC(?) diabled */
1715 CSR_WRITE_4(sc, ET_TXMAC_CTRL,
1716 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
1717 }
1718
1719 static int
1720 et_start_rxdma(struct et_softc *sc)
1721 {
1722 uint32_t val = 0;
1723
1724 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize,
1725 ET_RXDMA_CTRL_RING0_SIZE) |
1726 ET_RXDMA_CTRL_RING0_ENABLE;
1727 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize,
1728 ET_RXDMA_CTRL_RING1_SIZE) |
1729 ET_RXDMA_CTRL_RING1_ENABLE;
1730
1731 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
1732
1733 DELAY(5);
1734
1735 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
1736 aprint_error_dev(sc->sc_dev, "can't start RX DMA engine\n");
1737 return ETIMEDOUT;
1738 }
1739 return 0;
1740 }
1741
1742 static int
1743 et_start_txdma(struct et_softc *sc)
1744 {
1745
1746 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
1747 return 0;
1748 }
1749
1750 static void
1751 et_rxeof(struct et_softc *sc)
1752 {
1753 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1754 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1755 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1756 uint32_t rxs_stat_ring;
1757 int rxst_wrap, rxst_index;
1758
1759 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1760 return;
1761
1762 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0,
1763 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1764 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0,
1765 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1766
1767 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring;
1768 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
1769 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX);
1770
1771 while (rxst_index != rxst_ring->rsr_index ||
1772 rxst_wrap != rxst_ring->rsr_wrap) {
1773 struct et_rxbuf_data *rbd;
1774 struct et_rxdesc_ring *rx_ring;
1775 struct et_rxstat *st;
1776 struct et_rxbuf *rb;
1777 struct mbuf *m;
1778 int buflen, buf_idx, ring_idx;
1779 uint32_t rxstat_pos, rxring_pos;
1780
1781 KASSERT(rxst_ring->rsr_index < ET_RX_NSTAT);
1782 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
1783
1784 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN);
1785 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX);
1786 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX);
1787
1788 if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
1789 rxst_ring->rsr_index = 0;
1790 rxst_ring->rsr_wrap ^= 1;
1791 }
1792 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index,
1793 ET_RXSTAT_POS_INDEX);
1794 if (rxst_ring->rsr_wrap)
1795 rxstat_pos |= ET_RXSTAT_POS_WRAP;
1796 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
1797
1798 if (ring_idx >= ET_RX_NRING) {
1799 ifp->if_ierrors++;
1800 aprint_error_dev(sc->sc_dev, "invalid ring index %d\n",
1801 ring_idx);
1802 continue;
1803 }
1804 if (buf_idx >= ET_RX_NDESC) {
1805 ifp->if_ierrors++;
1806 aprint_error_dev(sc->sc_dev, "invalid buf index %d\n",
1807 buf_idx);
1808 continue;
1809 }
1810
1811 rbd = &sc->sc_rx_data[ring_idx];
1812 rb = &rbd->rbd_buf[buf_idx];
1813 m = rb->rb_mbuf;
1814 bus_dmamap_sync(sc->sc_dmat, rb->rb_dmap, 0,
1815 rb->rb_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1816
1817 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) {
1818 if (buflen < ETHER_CRC_LEN) {
1819 m_freem(m);
1820 ifp->if_ierrors++;
1821 } else {
1822 m->m_pkthdr.len = m->m_len = buflen -
1823 ETHER_CRC_LEN;
1824 m_set_rcvif(m, ifp);
1825
1826 if_percpuq_enqueue(ifp->if_percpuq, m);
1827 }
1828 } else {
1829 ifp->if_ierrors++;
1830 }
1831
1832 rx_ring = &sc->sc_rx_ring[ring_idx];
1833
1834 if (buf_idx != rx_ring->rr_index) {
1835 aprint_error_dev(sc->sc_dev, "WARNING!! ring %d, "
1836 "buf_idx %d, rr_idx %d\n",
1837 ring_idx, buf_idx, rx_ring->rr_index);
1838 }
1839
1840 KASSERT(rx_ring->rr_index < ET_RX_NDESC);
1841 if (++rx_ring->rr_index == ET_RX_NDESC) {
1842 rx_ring->rr_index = 0;
1843 rx_ring->rr_wrap ^= 1;
1844 }
1845 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX);
1846 if (rx_ring->rr_wrap)
1847 rxring_pos |= ET_RX_RING_POS_WRAP;
1848 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
1849 }
1850 }
1851
1852 static int
1853 et_encap(struct et_softc *sc, struct mbuf **m0)
1854 {
1855 struct mbuf *m = *m0;
1856 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1857 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1858 struct et_txdesc *td;
1859 bus_dmamap_t map;
1860 int error, maxsegs, first_idx, last_idx, i;
1861 uint32_t tx_ready_pos, last_td_ctrl2;
1862
1863 maxsegs = ET_TX_NDESC - tbd->tbd_used;
1864 if (maxsegs > ET_NSEG_MAX)
1865 maxsegs = ET_NSEG_MAX;
1866 KASSERTMSG(maxsegs >= ET_NSEG_SPARE,
1867 "not enough spare TX desc (%d)\n", maxsegs);
1868
1869 KASSERT(tx_ring->tr_ready_index < ET_TX_NDESC);
1870 first_idx = tx_ring->tr_ready_index;
1871 map = tbd->tbd_buf[first_idx].tb_dmap;
1872
1873 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1874 BUS_DMA_NOWAIT);
1875 if (!error && map->dm_nsegs == 0) {
1876 bus_dmamap_unload(sc->sc_dmat, map);
1877 error = EFBIG;
1878 }
1879 if (error && error != EFBIG) {
1880 aprint_error_dev(sc->sc_dev, "can't load TX mbuf");
1881 goto back;
1882 }
1883 if (error) { /* error == EFBIG */
1884 struct mbuf *m_new;
1885
1886 error = 0;
1887
1888 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1889 if (m_new == NULL) {
1890 aprint_error_dev(sc->sc_dev, "can't defrag TX mbuf\n");
1891 error = ENOBUFS;
1892 goto back;
1893 }
1894
1895 m_copy_pkthdr(m_new, m);
1896 if (m->m_pkthdr.len > MHLEN) {
1897 MCLGET(m_new, M_DONTWAIT);
1898 if (!(m_new->m_flags & M_EXT)) {
1899 m_freem(m_new);
1900 error = ENOBUFS;
1901 }
1902 }
1903
1904 if (error) {
1905 aprint_error_dev(sc->sc_dev, "can't defrag TX buffer\n");
1906 goto back;
1907 }
1908
1909 m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, void *));
1910 m_freem(m);
1911 m_new->m_len = m_new->m_pkthdr.len;
1912 *m0 = m = m_new;
1913
1914 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1915 BUS_DMA_NOWAIT);
1916 if (error || map->dm_nsegs == 0) {
1917 if (map->dm_nsegs == 0) {
1918 bus_dmamap_unload(sc->sc_dmat, map);
1919 error = EFBIG;
1920 }
1921 aprint_error_dev(sc->sc_dev, "can't load defraged TX mbuf\n");
1922 goto back;
1923 }
1924 }
1925
1926 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1927 BUS_DMASYNC_PREWRITE);
1928
1929 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
1930 sc->sc_tx += map->dm_nsegs;
1931 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
1932 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
1933 last_td_ctrl2 |= ET_TDCTRL2_INTR;
1934 }
1935
1936 last_idx = -1;
1937 for (i = 0; i < map->dm_nsegs; ++i) {
1938 int idx;
1939
1940 idx = (first_idx + i) % ET_TX_NDESC;
1941 td = &tx_ring->tr_desc[idx];
1942 td->td_addr_hi = ET_ADDR_HI(map->dm_segs[i].ds_addr);
1943 td->td_addr_lo = ET_ADDR_LO(map->dm_segs[i].ds_addr);
1944 td->td_ctrl1 =
1945 __SHIFTIN(map->dm_segs[i].ds_len, ET_TDCTRL1_LEN);
1946
1947 if (i == map->dm_nsegs - 1) { /* Last frag */
1948 td->td_ctrl2 = last_td_ctrl2;
1949 last_idx = idx;
1950 }
1951
1952 KASSERT(tx_ring->tr_ready_index < ET_TX_NDESC);
1953 if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
1954 tx_ring->tr_ready_index = 0;
1955 tx_ring->tr_ready_wrap ^= 1;
1956 }
1957 }
1958 td = &tx_ring->tr_desc[first_idx];
1959 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */
1960
1961 KASSERT(last_idx >= 0);
1962 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
1963 tbd->tbd_buf[last_idx].tb_dmap = map;
1964 tbd->tbd_buf[last_idx].tb_mbuf = m;
1965
1966 tbd->tbd_used += map->dm_nsegs;
1967 KASSERT(tbd->tbd_used <= ET_TX_NDESC);
1968
1969 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
1970 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1971
1972 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index,
1973 ET_TX_READY_POS_INDEX);
1974 if (tx_ring->tr_ready_wrap)
1975 tx_ready_pos |= ET_TX_READY_POS_WRAP;
1976 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
1977
1978 error = 0;
1979 back:
1980 if (error) {
1981 m_freem(m);
1982 *m0 = NULL;
1983 }
1984 return error;
1985 }
1986
1987 static void
1988 et_txeof(struct et_softc *sc)
1989 {
1990 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1991 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1992 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1993 uint32_t tx_done;
1994 int end, wrap;
1995
1996 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1997 return;
1998
1999 if (tbd->tbd_used == 0)
2000 return;
2001
2002 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
2003 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX);
2004 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
2005
2006 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
2007 struct et_txbuf *tb;
2008
2009 KASSERT(tbd->tbd_start_index < ET_TX_NDESC);
2010 tb = &tbd->tbd_buf[tbd->tbd_start_index];
2011
2012 bzero(&tx_ring->tr_desc[tbd->tbd_start_index],
2013 sizeof(struct et_txdesc));
2014 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
2015 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
2016
2017 if (tb->tb_mbuf != NULL) {
2018 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap);
2019 m_freem(tb->tb_mbuf);
2020 tb->tb_mbuf = NULL;
2021 ifp->if_opackets++;
2022 }
2023
2024 if (++tbd->tbd_start_index == ET_TX_NDESC) {
2025 tbd->tbd_start_index = 0;
2026 tbd->tbd_start_wrap ^= 1;
2027 }
2028
2029 KASSERT(tbd->tbd_used > 0);
2030 tbd->tbd_used--;
2031 }
2032
2033 if (tbd->tbd_used == 0) {
2034 callout_stop(&sc->sc_txtick);
2035 ifp->if_timer = 0;
2036 }
2037 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC)
2038 ifp->if_flags &= ~IFF_OACTIVE;
2039
2040 if_schedule_deferred_start(ifp);
2041 }
2042
2043 static void
2044 et_txtick(void *xsc)
2045 {
2046 struct et_softc *sc = xsc;
2047 int s;
2048
2049 s = splnet();
2050 et_txeof(sc);
2051 splx(s);
2052 }
2053
2054 static void
2055 et_tick(void *xsc)
2056 {
2057 struct et_softc *sc = xsc;
2058 int s;
2059
2060 s = splnet();
2061 mii_tick(&sc->sc_miibus);
2062 callout_schedule(&sc->sc_tick, hz);
2063 splx(s);
2064 }
2065
2066 static int
2067 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init)
2068 {
2069 return et_newbuf(rbd, buf_idx, init, MCLBYTES);
2070 }
2071
2072 static int
2073 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init)
2074 {
2075 return et_newbuf(rbd, buf_idx, init, MHLEN);
2076 }
2077
2078 static int
2079 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0)
2080 {
2081 struct et_softc *sc = rbd->rbd_softc;
2082 struct et_rxdesc_ring *rx_ring;
2083 struct et_rxdesc *desc;
2084 struct et_rxbuf *rb;
2085 struct mbuf *m;
2086 bus_dmamap_t dmap;
2087 int error, len;
2088
2089 KASSERT(buf_idx < ET_RX_NDESC);
2090 rb = &rbd->rbd_buf[buf_idx];
2091
2092 if (len0 >= MINCLSIZE) {
2093 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
2094 if (m == NULL)
2095 return (ENOBUFS);
2096 MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
2097 if ((m->m_flags & M_EXT) == 0) {
2098 m_freem(m);
2099 return (ENOBUFS);
2100 }
2101 len = MCLBYTES;
2102 } else {
2103 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
2104 len = MHLEN;
2105 }
2106
2107 if (m == NULL) {
2108 error = ENOBUFS;
2109
2110 /* XXX for debug */
2111 aprint_error_dev(sc->sc_dev, "M_CLGET failed, size %d\n", len0);
2112 if (init) {
2113 return error;
2114 } else {
2115 goto back;
2116 }
2117 }
2118 m->m_len = m->m_pkthdr.len = len;
2119
2120 /*
2121 * Try load RX mbuf into temporary DMA tag
2122 */
2123 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_mbuf_tmp_dmap, m,
2124 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
2125 if (error) {
2126 m_freem(m);
2127
2128 /* XXX for debug */
2129 aprint_error_dev(sc->sc_dev, "can't load RX mbuf\n");
2130 if (init) {
2131 return error;
2132 } else {
2133 goto back;
2134 }
2135 }
2136
2137 if (!init)
2138 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap);
2139 rb->rb_mbuf = m;
2140
2141 /*
2142 * Swap RX buf's DMA map with the loaded temporary one
2143 */
2144 dmap = rb->rb_dmap;
2145 rb->rb_dmap = sc->sc_mbuf_tmp_dmap;
2146 rb->rb_paddr = rb->rb_dmap->dm_segs[0].ds_addr;
2147 sc->sc_mbuf_tmp_dmap = dmap;
2148
2149 error = 0;
2150 back:
2151 rx_ring = rbd->rbd_ring;
2152 desc = &rx_ring->rr_desc[buf_idx];
2153
2154 desc->rd_addr_hi = ET_ADDR_HI(rb->rb_paddr);
2155 desc->rd_addr_lo = ET_ADDR_LO(rb->rb_paddr);
2156 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX);
2157
2158 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0,
2159 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
2160 return error;
2161 }
2162