if_et.c revision 1.2.4.2 1 /* $NetBSD: if_et.c,v 1.2.4.2 2012/10/30 17:21:28 yamt Exp $ */
2 /* $OpenBSD: if_et.c,v 1.11 2008/06/08 06:18:07 jsg Exp $ */
3 /*
4 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Sepherosa Ziehau <sepherosa (at) gmail.com>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.1 2007/10/12 14:12:42 sephe Exp $
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: if_et.c,v 1.2.4.2 2012/10/30 17:21:28 yamt Exp $");
41
42 #include "opt_inet.h"
43 #include "vlan.h"
44
45 #include <sys/param.h>
46 #include <sys/endian.h>
47 #include <sys/systm.h>
48 #include <sys/types.h>
49 #include <sys/sockio.h>
50 #include <sys/mbuf.h>
51 #include <sys/queue.h>
52 #include <sys/kernel.h>
53 #include <sys/device.h>
54 #include <sys/callout.h>
55 #include <sys/socket.h>
56
57 #include <sys/bus.h>
58
59 #include <net/if.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_ether.h>
63 #include <net/if_arp.h>
64
65 #ifdef INET
66 #include <netinet/in.h>
67 #include <netinet/in_systm.h>
68 #include <netinet/in_var.h>
69 #include <netinet/ip.h>
70 #include <netinet/if_inarp.h>
71 #endif
72
73 #include <net/bpf.h>
74
75 #include <dev/mii/mii.h>
76 #include <dev/mii/miivar.h>
77
78 #include <dev/pci/pcireg.h>
79 #include <dev/pci/pcivar.h>
80 #include <dev/pci/pcidevs.h>
81
82 #include <dev/pci/if_etreg.h>
83
84 /* XXX temporary porting goop */
85 #define KKASSERT(cond) if (!(cond)) panic("KKASSERT: %s in %s", #cond, __func__)
86 #undef KASSERT
87 #define KASSERT(cond, complaint) if (!(cond)) panic complaint
88
89 /* these macros in particular need to die, so gross */
90 #define __LOWEST_SET_BIT(__mask) ((((__mask) - 1) & (__mask)) ^ (__mask))
91 #define __SHIFTOUT(__x, __mask) (((__x) & (__mask)) / __LOWEST_SET_BIT(__mask))
92 #define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask))
93 /* XXX end porting goop */
94
95 int et_match(device_t, cfdata_t, void *);
96 void et_attach(device_t, device_t, void *);
97 int et_detach(device_t, int flags);
98 int et_shutdown(device_t);
99
100 int et_miibus_readreg(device_t, int, int);
101 void et_miibus_writereg(device_t, int, int, int);
102 void et_miibus_statchg(struct ifnet *);
103
104 int et_init(struct ifnet *ifp);
105 int et_ioctl(struct ifnet *, u_long, void *);
106 void et_start(struct ifnet *);
107 void et_watchdog(struct ifnet *);
108
109 int et_intr(void *);
110 void et_enable_intrs(struct et_softc *, uint32_t);
111 void et_disable_intrs(struct et_softc *);
112 void et_rxeof(struct et_softc *);
113 void et_txeof(struct et_softc *);
114 void et_txtick(void *);
115
116 int et_dma_alloc(struct et_softc *);
117 void et_dma_free(struct et_softc *);
118 int et_dma_mem_create(struct et_softc *, bus_size_t,
119 void **, bus_addr_t *, bus_dmamap_t *, bus_dma_segment_t *);
120 void et_dma_mem_destroy(struct et_softc *, void *, bus_dmamap_t);
121 int et_dma_mbuf_create(struct et_softc *);
122 void et_dma_mbuf_destroy(struct et_softc *, int, const int[]);
123 void et_dma_ring_addr(void *, bus_dma_segment_t *, int, int);
124
125 int et_init_tx_ring(struct et_softc *);
126 int et_init_rx_ring(struct et_softc *);
127 void et_free_tx_ring(struct et_softc *);
128 void et_free_rx_ring(struct et_softc *);
129 int et_encap(struct et_softc *, struct mbuf **);
130 int et_newbuf(struct et_rxbuf_data *, int, int, int);
131 int et_newbuf_cluster(struct et_rxbuf_data *, int, int);
132 int et_newbuf_hdr(struct et_rxbuf_data *, int, int);
133
134 void et_stop(struct et_softc *);
135 int et_chip_init(struct et_softc *);
136 void et_chip_attach(struct et_softc *);
137 void et_init_mac(struct et_softc *);
138 void et_init_rxmac(struct et_softc *);
139 void et_init_txmac(struct et_softc *);
140 int et_init_rxdma(struct et_softc *);
141 int et_init_txdma(struct et_softc *);
142 int et_start_rxdma(struct et_softc *);
143 int et_start_txdma(struct et_softc *);
144 int et_stop_rxdma(struct et_softc *);
145 int et_stop_txdma(struct et_softc *);
146 int et_enable_txrx(struct et_softc *);
147 void et_reset(struct et_softc *);
148 int et_bus_config(struct et_softc *);
149 void et_get_eaddr(struct et_softc *, uint8_t[]);
150 void et_setmulti(struct et_softc *);
151 void et_tick(void *);
152
153 static int et_rx_intr_npkts = 32;
154 static int et_rx_intr_delay = 20; /* x10 usec */
155 static int et_tx_intr_nsegs = 128;
156 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */
157
158 struct et_bsize {
159 int bufsize;
160 et_newbuf_t newbuf;
161 };
162
163 static const struct et_bsize et_bufsize[ET_RX_NRING] = {
164 { .bufsize = 0, .newbuf = et_newbuf_hdr },
165 { .bufsize = 0, .newbuf = et_newbuf_cluster },
166 };
167
168 const struct et_product {
169 pci_vendor_id_t vendor;
170 pci_product_id_t product;
171 } et_devices[] = {
172 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310 },
173 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1301 }
174 };
175
176 CFATTACH_DECL_NEW(et, sizeof(struct et_softc), et_match, et_attach, et_detach,
177 NULL);
178
179 int
180 et_match(device_t dev, cfdata_t match, void *aux)
181 {
182 struct pci_attach_args *pa = aux;
183 const struct et_product *ep;
184 int i;
185
186 for (i = 0; i < sizeof(et_devices) / sizeof(et_devices[0]); i++) {
187 ep = &et_devices[i];
188 if (PCI_VENDOR(pa->pa_id) == ep->vendor &&
189 PCI_PRODUCT(pa->pa_id) == ep->product)
190 return 1;
191 }
192 return 0;
193 }
194
195 void
196 et_attach(device_t parent, device_t self, void *aux)
197 {
198 struct et_softc *sc = device_private(self);
199 struct pci_attach_args *pa = aux;
200 pci_chipset_tag_t pc = pa->pa_pc;
201 pci_intr_handle_t ih;
202 const char *intrstr;
203 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
204 pcireg_t memtype;
205 int error;
206
207 pci_aprint_devinfo(pa, "Ethernet controller");
208
209 sc->sc_dev = self;
210
211 /*
212 * Initialize tunables
213 */
214 sc->sc_rx_intr_npkts = et_rx_intr_npkts;
215 sc->sc_rx_intr_delay = et_rx_intr_delay;
216 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
217 sc->sc_timer = et_timer;
218
219 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ET_PCIR_BAR);
220 if (pci_mapreg_map(pa, ET_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
221 &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) {
222 aprint_error_dev(self, "could not map mem space\n");
223 return;
224 }
225
226 if (pci_intr_map(pa, &ih) != 0) {
227 aprint_error_dev(self, "could not map interrupt\n");
228 goto fail;
229 }
230
231 intrstr = pci_intr_string(pc, ih);
232 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, et_intr, sc);
233 if (sc->sc_irq_handle == NULL) {
234 aprint_error_dev(self, "could not establish interrupt");
235 if (intrstr != NULL)
236 aprint_error(" at %s", intrstr);
237 aprint_error("\n");
238 goto fail;
239 }
240 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
241
242 sc->sc_dmat = pa->pa_dmat;
243 sc->sc_pct = pa->pa_pc;
244 sc->sc_pcitag = pa->pa_tag;
245
246 error = et_bus_config(sc);
247 if (error)
248 goto fail;
249
250 et_get_eaddr(sc, sc->sc_enaddr);
251
252 aprint_normal_dev(self, "Ethernet address %s\n",
253 ether_sprintf(sc->sc_enaddr));
254
255 CSR_WRITE_4(sc, ET_PM,
256 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE);
257
258 et_reset(sc);
259
260 et_disable_intrs(sc);
261
262 error = et_dma_alloc(sc);
263 if (error)
264 goto fail;
265
266 ifp->if_softc = sc;
267 ifp->if_mtu = ETHERMTU;
268 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
269 ifp->if_init = et_init;
270 ifp->if_ioctl = et_ioctl;
271 ifp->if_start = et_start;
272 ifp->if_watchdog = et_watchdog;
273 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC);
274 IFQ_SET_READY(&ifp->if_snd);
275 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
276
277 et_chip_attach(sc);
278
279 sc->sc_miibus.mii_ifp = ifp;
280 sc->sc_miibus.mii_readreg = et_miibus_readreg;
281 sc->sc_miibus.mii_writereg = et_miibus_writereg;
282 sc->sc_miibus.mii_statchg = et_miibus_statchg;
283
284 sc->sc_ethercom.ec_mii = &sc->sc_miibus;
285 ifmedia_init(&sc->sc_miibus.mii_media, 0, ether_mediachange,
286 ether_mediastatus);
287 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
288 MII_OFFSET_ANY, 0);
289 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
290 aprint_error_dev(self, "no PHY found!\n");
291 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
292 0, NULL);
293 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
294 } else
295 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
296
297 if_attach(ifp);
298 ether_ifattach(ifp, sc->sc_enaddr);
299
300 callout_init(&sc->sc_tick, 0);
301 callout_setfunc(&sc->sc_tick, et_tick, sc);
302 callout_init(&sc->sc_txtick, 0);
303 callout_setfunc(&sc->sc_txtick, et_txtick, sc);
304
305 if (pmf_device_register(self, NULL, NULL))
306 pmf_class_network_register(self, ifp);
307 else
308 aprint_error_dev(self, "couldn't establish power handler\n");
309
310 return;
311
312 fail:
313 et_dma_free(sc);
314 if (sc->sc_irq_handle != NULL) {
315 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
316 sc->sc_irq_handle = NULL;
317 }
318 if (sc->sc_mem_size) {
319 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
320 sc->sc_mem_size = 0;
321 }
322 }
323
324 int
325 et_detach(device_t self, int flags)
326 {
327 struct et_softc *sc = device_private(self);
328 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
329 int s;
330
331 pmf_device_deregister(self);
332 s = splnet();
333 et_stop(sc);
334 splx(s);
335
336 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
337
338 /* Delete all remaining media. */
339 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
340
341 ether_ifdetach(ifp);
342 if_detach(ifp);
343 et_dma_free(sc);
344
345 if (sc->sc_irq_handle != NULL) {
346 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
347 sc->sc_irq_handle = NULL;
348 }
349
350 if (sc->sc_mem_size) {
351 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
352 sc->sc_mem_size = 0;
353 }
354
355 return 0;
356 }
357
358 int
359 et_shutdown(device_t self)
360 {
361 struct et_softc *sc = device_private(self);
362 int s;
363
364 s = splnet();
365 et_stop(sc);
366 splx(s);
367
368 return 0;
369 }
370
371 int
372 et_miibus_readreg(device_t dev, int phy, int reg)
373 {
374 struct et_softc *sc = device_private(dev);
375 uint32_t val;
376 int i, ret;
377
378 /* Stop any pending operations */
379 CSR_WRITE_4(sc, ET_MII_CMD, 0);
380
381 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
382 __SHIFTIN(reg, ET_MII_ADDR_REG);
383 CSR_WRITE_4(sc, ET_MII_ADDR, val);
384
385 /* Start reading */
386 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
387
388 #define NRETRY 50
389
390 for (i = 0; i < NRETRY; ++i) {
391 val = CSR_READ_4(sc, ET_MII_IND);
392 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
393 break;
394 DELAY(50);
395 }
396 if (i == NRETRY) {
397 aprint_error_dev(sc->sc_dev, "read phy %d, reg %d timed out\n",
398 phy, reg);
399 ret = 0;
400 goto back;
401 }
402
403 #undef NRETRY
404
405 val = CSR_READ_4(sc, ET_MII_STAT);
406 ret = __SHIFTOUT(val, ET_MII_STAT_VALUE);
407
408 back:
409 /* Make sure that the current operation is stopped */
410 CSR_WRITE_4(sc, ET_MII_CMD, 0);
411 return ret;
412 }
413
414 void
415 et_miibus_writereg(device_t dev, int phy, int reg, int val0)
416 {
417 struct et_softc *sc = device_private(dev);
418 uint32_t val;
419 int i;
420
421 /* Stop any pending operations */
422 CSR_WRITE_4(sc, ET_MII_CMD, 0);
423
424 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
425 __SHIFTIN(reg, ET_MII_ADDR_REG);
426 CSR_WRITE_4(sc, ET_MII_ADDR, val);
427
428 /* Start writing */
429 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE));
430
431 #define NRETRY 100
432
433 for (i = 0; i < NRETRY; ++i) {
434 val = CSR_READ_4(sc, ET_MII_IND);
435 if ((val & ET_MII_IND_BUSY) == 0)
436 break;
437 DELAY(50);
438 }
439 if (i == NRETRY) {
440 aprint_error_dev(sc->sc_dev, "write phy %d, reg %d timed out\n",
441 phy, reg);
442 et_miibus_readreg(dev, phy, reg);
443 }
444
445 #undef NRETRY
446
447 /* Make sure that the current operation is stopped */
448 CSR_WRITE_4(sc, ET_MII_CMD, 0);
449 }
450
451 void
452 et_miibus_statchg(struct ifnet *ifp)
453 {
454 struct et_softc *sc = ifp->if_softc;
455 struct mii_data *mii = &sc->sc_miibus;
456 uint32_t cfg2, ctrl;
457
458 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
459 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
460 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
461 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
462 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN);
463
464 ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
465 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
466
467 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
468 cfg2 |= ET_MAC_CFG2_MODE_GMII;
469 } else {
470 cfg2 |= ET_MAC_CFG2_MODE_MII;
471 ctrl |= ET_MAC_CTRL_MODE_MII;
472 }
473
474 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
475 cfg2 |= ET_MAC_CFG2_FDX;
476 else
477 ctrl |= ET_MAC_CTRL_GHDX;
478
479 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
480 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
481 }
482
483 void
484 et_stop(struct et_softc *sc)
485 {
486 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
487
488 callout_stop(&sc->sc_tick);
489 callout_stop(&sc->sc_txtick);
490
491 et_stop_rxdma(sc);
492 et_stop_txdma(sc);
493
494 et_disable_intrs(sc);
495
496 et_free_tx_ring(sc);
497 et_free_rx_ring(sc);
498
499 et_reset(sc);
500
501 sc->sc_tx = 0;
502 sc->sc_tx_intr = 0;
503
504 ifp->if_timer = 0;
505 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
506 }
507
508 int
509 et_bus_config(struct et_softc *sc)
510 {
511 uint32_t val; //, max_plsz;
512 // uint16_t ack_latency, replay_timer;
513
514 /*
515 * Test whether EEPROM is valid
516 * NOTE: Read twice to get the correct value
517 */
518 pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC);
519 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_EEPROM_MISC);
520
521 if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
522 aprint_error_dev(sc->sc_dev, "EEPROM status error 0x%02x\n", val);
523 return ENXIO;
524 }
525
526 /* TODO: LED */
527 #if 0
528 /*
529 * Configure ACK latency and replay timer according to
530 * max playload size
531 */
532 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CAPS);
533 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ;
534
535 switch (max_plsz) {
536 case ET_PCIV_DEVICE_CAPS_PLSZ_128:
537 ack_latency = ET_PCIV_ACK_LATENCY_128;
538 replay_timer = ET_PCIV_REPLAY_TIMER_128;
539 break;
540
541 case ET_PCIV_DEVICE_CAPS_PLSZ_256:
542 ack_latency = ET_PCIV_ACK_LATENCY_256;
543 replay_timer = ET_PCIV_REPLAY_TIMER_256;
544 break;
545
546 default:
547 ack_latency = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
548 ET_PCIR_ACK_LATENCY) >> 16;
549 replay_timer = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
550 ET_PCIR_REPLAY_TIMER) >> 16;
551 aprint_normal_dev(sc->sc_dev, "ack latency %u, replay timer %u\n",
552 ack_latency, replay_timer);
553 break;
554 }
555 if (ack_latency != 0) {
556 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
557 ET_PCIR_ACK_LATENCY, ack_latency << 16);
558 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
559 ET_PCIR_REPLAY_TIMER, replay_timer << 16);
560 }
561
562 /*
563 * Set L0s and L1 latency timer to 2us
564 */
565 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2);
566 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_L0S_L1_LATENCY,
567 val << 24);
568
569 /*
570 * Set max read request size to 2048 bytes
571 */
572 val = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
573 ET_PCIR_DEVICE_CTRL) >> 16;
574 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ;
575 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K;
576 pci_conf_write(sc->sc_pct, sc->sc_pcitag, ET_PCIR_DEVICE_CTRL,
577 val << 16);
578 #endif
579
580 return 0;
581 }
582
583 void
584 et_get_eaddr(struct et_softc *sc, uint8_t eaddr[])
585 {
586 uint32_t r;
587
588 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_LO);
589 eaddr[0] = r & 0xff;
590 eaddr[1] = (r >> 8) & 0xff;
591 eaddr[2] = (r >> 16) & 0xff;
592 eaddr[3] = (r >> 24) & 0xff;
593 r = pci_conf_read(sc->sc_pct, sc->sc_pcitag, ET_PCIR_MACADDR_HI);
594 eaddr[4] = r & 0xff;
595 eaddr[5] = (r >> 8) & 0xff;
596 }
597
598 void
599 et_reset(struct et_softc *sc)
600 {
601 CSR_WRITE_4(sc, ET_MAC_CFG1,
602 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
603 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
604 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
605
606 CSR_WRITE_4(sc, ET_SWRST,
607 ET_SWRST_TXDMA | ET_SWRST_RXDMA |
608 ET_SWRST_TXMAC | ET_SWRST_RXMAC |
609 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
610
611 CSR_WRITE_4(sc, ET_MAC_CFG1,
612 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
613 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
614 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
615 }
616
617 void
618 et_disable_intrs(struct et_softc *sc)
619 {
620 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
621 }
622
623 void
624 et_enable_intrs(struct et_softc *sc, uint32_t intrs)
625 {
626 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs);
627 }
628
629 int
630 et_dma_alloc(struct et_softc *sc)
631 {
632 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
633 struct et_txstatus_data *txsd = &sc->sc_tx_status;
634 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
635 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
636 int i, error;
637
638 /*
639 * Create TX ring DMA stuffs
640 */
641 error = et_dma_mem_create(sc, ET_TX_RING_SIZE,
642 (void **)&tx_ring->tr_desc, &tx_ring->tr_paddr, &tx_ring->tr_dmap,
643 &tx_ring->tr_seg);
644 if (error) {
645 aprint_error_dev(sc->sc_dev, "can't create TX ring DMA stuffs\n");
646 return error;
647 }
648
649 /*
650 * Create TX status DMA stuffs
651 */
652 error = et_dma_mem_create(sc, sizeof(uint32_t),
653 (void **)&txsd->txsd_status,
654 &txsd->txsd_paddr, &txsd->txsd_dmap, &txsd->txsd_seg);
655 if (error) {
656 aprint_error_dev(sc->sc_dev, "can't create TX status DMA stuffs\n");
657 return error;
658 }
659
660 /*
661 * Create DMA stuffs for RX rings
662 */
663 for (i = 0; i < ET_RX_NRING; ++i) {
664 static const uint32_t rx_ring_posreg[ET_RX_NRING] =
665 { ET_RX_RING0_POS, ET_RX_RING1_POS };
666
667 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
668
669 error = et_dma_mem_create(sc, ET_RX_RING_SIZE,
670 (void **)&rx_ring->rr_desc,
671 &rx_ring->rr_paddr, &rx_ring->rr_dmap, &rx_ring->rr_seg);
672 if (error) {
673 aprint_error_dev(sc->sc_dev, "can't create DMA stuffs for "
674 "the %d RX ring\n", i);
675 return error;
676 }
677 rx_ring->rr_posreg = rx_ring_posreg[i];
678 }
679
680 /*
681 * Create RX stat ring DMA stuffs
682 */
683 error = et_dma_mem_create(sc, ET_RXSTAT_RING_SIZE,
684 (void **)&rxst_ring->rsr_stat,
685 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap, &rxst_ring->rsr_seg);
686 if (error) {
687 aprint_error_dev(sc->sc_dev, "can't create RX stat ring DMA stuffs\n");
688 return error;
689 }
690
691 /*
692 * Create RX status DMA stuffs
693 */
694 error = et_dma_mem_create(sc, sizeof(struct et_rxstatus),
695 (void **)&rxsd->rxsd_status,
696 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap, &rxsd->rxsd_seg);
697 if (error) {
698 aprint_error_dev(sc->sc_dev, "can't create RX status DMA stuffs\n");
699 return error;
700 }
701
702 /*
703 * Create mbuf DMA stuffs
704 */
705 error = et_dma_mbuf_create(sc);
706 if (error)
707 return error;
708
709 return 0;
710 }
711
712 void
713 et_dma_free(struct et_softc *sc)
714 {
715 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
716 struct et_txstatus_data *txsd = &sc->sc_tx_status;
717 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
718 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
719 int i, rx_done[ET_RX_NRING];
720
721 /*
722 * Destroy TX ring DMA stuffs
723 */
724 et_dma_mem_destroy(sc, tx_ring->tr_desc, tx_ring->tr_dmap);
725
726 /*
727 * Destroy TX status DMA stuffs
728 */
729 et_dma_mem_destroy(sc, txsd->txsd_status, txsd->txsd_dmap);
730
731 /*
732 * Destroy DMA stuffs for RX rings
733 */
734 for (i = 0; i < ET_RX_NRING; ++i) {
735 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
736
737 et_dma_mem_destroy(sc, rx_ring->rr_desc, rx_ring->rr_dmap);
738 }
739
740 /*
741 * Destroy RX stat ring DMA stuffs
742 */
743 et_dma_mem_destroy(sc, rxst_ring->rsr_stat, rxst_ring->rsr_dmap);
744
745 /*
746 * Destroy RX status DMA stuffs
747 */
748 et_dma_mem_destroy(sc, rxsd->rxsd_status, rxsd->rxsd_dmap);
749
750 /*
751 * Destroy mbuf DMA stuffs
752 */
753 for (i = 0; i < ET_RX_NRING; ++i)
754 rx_done[i] = ET_RX_NDESC;
755 et_dma_mbuf_destroy(sc, ET_TX_NDESC, rx_done);
756 }
757
758 int
759 et_dma_mbuf_create(struct et_softc *sc)
760 {
761 struct et_txbuf_data *tbd = &sc->sc_tx_data;
762 int i, error, rx_done[ET_RX_NRING];
763
764 /*
765 * Create spare DMA map for RX mbufs
766 */
767 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
768 BUS_DMA_NOWAIT, &sc->sc_mbuf_tmp_dmap);
769 if (error) {
770 aprint_error_dev(sc->sc_dev, "can't create spare mbuf DMA map\n");
771 return error;
772 }
773
774 /*
775 * Create DMA maps for RX mbufs
776 */
777 bzero(rx_done, sizeof(rx_done));
778 for (i = 0; i < ET_RX_NRING; ++i) {
779 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
780 int j;
781
782 for (j = 0; j < ET_RX_NDESC; ++j) {
783 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
784 MCLBYTES, 0, BUS_DMA_NOWAIT,
785 &rbd->rbd_buf[j].rb_dmap);
786 if (error) {
787 aprint_error_dev(sc->sc_dev, "can't create %d RX mbuf "
788 "for %d RX ring\n", j, i);
789 rx_done[i] = j;
790 et_dma_mbuf_destroy(sc, 0, rx_done);
791 return error;
792 }
793 }
794 rx_done[i] = ET_RX_NDESC;
795
796 rbd->rbd_softc = sc;
797 rbd->rbd_ring = &sc->sc_rx_ring[i];
798 }
799
800 /*
801 * Create DMA maps for TX mbufs
802 */
803 for (i = 0; i < ET_TX_NDESC; ++i) {
804 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
805 0, BUS_DMA_NOWAIT, &tbd->tbd_buf[i].tb_dmap);
806 if (error) {
807 aprint_error_dev(sc->sc_dev, "can't create %d TX mbuf "
808 "DMA map\n", i);
809 et_dma_mbuf_destroy(sc, i, rx_done);
810 return error;
811 }
812 }
813
814 return 0;
815 }
816
817 void
818 et_dma_mbuf_destroy(struct et_softc *sc, int tx_done, const int rx_done[])
819 {
820 struct et_txbuf_data *tbd = &sc->sc_tx_data;
821 int i;
822
823 /*
824 * Destroy DMA maps for RX mbufs
825 */
826 for (i = 0; i < ET_RX_NRING; ++i) {
827 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
828 int j;
829
830 for (j = 0; j < rx_done[i]; ++j) {
831 struct et_rxbuf *rb = &rbd->rbd_buf[j];
832
833 KASSERT(rb->rb_mbuf == NULL,
834 ("RX mbuf in %d RX ring is not freed yet\n", i));
835 bus_dmamap_destroy(sc->sc_dmat, rb->rb_dmap);
836 }
837 }
838
839 /*
840 * Destroy DMA maps for TX mbufs
841 */
842 for (i = 0; i < tx_done; ++i) {
843 struct et_txbuf *tb = &tbd->tbd_buf[i];
844
845 KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n"));
846 bus_dmamap_destroy(sc->sc_dmat, tb->tb_dmap);
847 }
848
849 /*
850 * Destroy spare mbuf DMA map
851 */
852 bus_dmamap_destroy(sc->sc_dmat, sc->sc_mbuf_tmp_dmap);
853 }
854
855 int
856 et_dma_mem_create(struct et_softc *sc, bus_size_t size,
857 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap, bus_dma_segment_t *seg)
858 {
859 int error, nsegs;
860
861 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT,
862 dmap);
863 if (error) {
864 aprint_error_dev(sc->sc_dev, "can't create DMA map\n");
865 return error;
866 }
867
868 error = bus_dmamem_alloc(sc->sc_dmat, size, ET_ALIGN, 0, seg,
869 1, &nsegs, BUS_DMA_WAITOK);
870 if (error) {
871 aprint_error_dev(sc->sc_dev, "can't allocate DMA mem\n");
872 return error;
873 }
874
875 error = bus_dmamem_map(sc->sc_dmat, seg, nsegs,
876 size, (void **)addr, BUS_DMA_NOWAIT);
877 if (error) {
878 aprint_error_dev(sc->sc_dev, "can't map DMA mem\n");
879 return (error);
880 }
881
882 error = bus_dmamap_load(sc->sc_dmat, *dmap, *addr, size, NULL,
883 BUS_DMA_WAITOK);
884 if (error) {
885 aprint_error_dev(sc->sc_dev, "can't load DMA mem\n");
886 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)addr, 1);
887 return error;
888 }
889
890 memset(*addr, 0, size);
891
892 *paddr = (*dmap)->dm_segs[0].ds_addr;
893
894 return 0;
895 }
896
897 void
898 et_dma_mem_destroy(struct et_softc *sc, void *addr, bus_dmamap_t dmap)
899 {
900 bus_dmamap_unload(sc->sc_dmat, dmap);
901 bus_dmamem_free(sc->sc_dmat, (bus_dma_segment_t *)&addr, 1);
902 }
903
904 void
905 et_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
906 {
907 KASSERT(nseg == 1, ("too many segments\n"));
908 *((bus_addr_t *)arg) = seg->ds_addr;
909 }
910
911 void
912 et_chip_attach(struct et_softc *sc)
913 {
914 uint32_t val;
915
916 /*
917 * Perform minimal initialization
918 */
919
920 /* Disable loopback */
921 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
922
923 /* Reset MAC */
924 CSR_WRITE_4(sc, ET_MAC_CFG1,
925 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
926 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
927 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
928
929 /*
930 * Setup half duplex mode
931 */
932 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
933 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
934 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
935 ET_MAC_HDX_EXC_DEFER;
936 CSR_WRITE_4(sc, ET_MAC_HDX, val);
937
938 /* Clear MAC control */
939 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
940
941 /* Reset MII */
942 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
943
944 /* Bring MAC out of reset state */
945 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
946
947 /* Enable memory controllers */
948 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
949 }
950
951 int
952 et_intr(void *xsc)
953 {
954 struct et_softc *sc = xsc;
955 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
956 uint32_t intrs;
957
958 if ((ifp->if_flags & IFF_RUNNING) == 0)
959 return (0);
960
961 intrs = CSR_READ_4(sc, ET_INTR_STATUS);
962 if (intrs == 0 || intrs == 0xffffffff)
963 return (0);
964
965 et_disable_intrs(sc);
966 intrs &= ET_INTRS;
967 if (intrs == 0) /* Not interested */
968 goto back;
969
970 if (intrs & ET_INTR_RXEOF)
971 et_rxeof(sc);
972 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER))
973 et_txeof(sc);
974 if (intrs & ET_INTR_TIMER)
975 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
976 back:
977 et_enable_intrs(sc, ET_INTRS);
978
979 return (1);
980 }
981
982 int
983 et_init(struct ifnet *ifp)
984 {
985 struct et_softc *sc = ifp->if_softc;
986 int error, i, s;
987
988 if (ifp->if_flags & IFF_RUNNING)
989 return 0;
990
991 s = splnet();
992
993 et_stop(sc);
994
995 for (i = 0; i < ET_RX_NRING; ++i) {
996 sc->sc_rx_data[i].rbd_bufsize = et_bufsize[i].bufsize;
997 sc->sc_rx_data[i].rbd_newbuf = et_bufsize[i].newbuf;
998 }
999
1000 error = et_init_tx_ring(sc);
1001 if (error)
1002 goto back;
1003
1004 error = et_init_rx_ring(sc);
1005 if (error)
1006 goto back;
1007
1008 error = et_chip_init(sc);
1009 if (error)
1010 goto back;
1011
1012 error = et_enable_txrx(sc);
1013 if (error)
1014 goto back;
1015
1016 error = et_start_rxdma(sc);
1017 if (error)
1018 goto back;
1019
1020 error = et_start_txdma(sc);
1021 if (error)
1022 goto back;
1023
1024 et_enable_intrs(sc, ET_INTRS);
1025
1026 callout_schedule(&sc->sc_tick, hz);
1027
1028 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1029
1030 ifp->if_flags |= IFF_RUNNING;
1031 ifp->if_flags &= ~IFF_OACTIVE;
1032 back:
1033 if (error)
1034 et_stop(sc);
1035
1036 splx(s);
1037
1038 return (0);
1039 }
1040
1041 int
1042 et_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1043 {
1044 struct et_softc *sc = ifp->if_softc;
1045 struct ifreq *ifr = (struct ifreq *)data;
1046 int s, error = 0;
1047
1048 s = splnet();
1049
1050 switch (cmd) {
1051 case SIOCSIFFLAGS:
1052 if (ifp->if_flags & IFF_UP) {
1053 /*
1054 * If only the PROMISC or ALLMULTI flag changes, then
1055 * don't do a full re-init of the chip, just update
1056 * the Rx filter.
1057 */
1058 if ((ifp->if_flags & IFF_RUNNING) &&
1059 ((ifp->if_flags ^ sc->sc_if_flags) &
1060 (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1061 et_setmulti(sc);
1062 } else {
1063 if (!(ifp->if_flags & IFF_RUNNING))
1064 et_init(ifp);
1065 }
1066 } else {
1067 if (ifp->if_flags & IFF_RUNNING)
1068 et_stop(sc);
1069 }
1070 sc->sc_if_flags = ifp->if_flags;
1071 break;
1072 case SIOCSIFMEDIA:
1073 case SIOCGIFMEDIA:
1074 error = ifmedia_ioctl(ifp, ifr, &sc->sc_miibus.mii_media, cmd);
1075 break;
1076 default:
1077 error = ether_ioctl(ifp, cmd, data);
1078 if (error == ENETRESET) {
1079 if (ifp->if_flags & IFF_RUNNING)
1080 et_setmulti(sc);
1081 error = 0;
1082 }
1083 break;
1084
1085 }
1086
1087 splx(s);
1088
1089 return error;
1090 }
1091
1092 void
1093 et_start(struct ifnet *ifp)
1094 {
1095 struct et_softc *sc = ifp->if_softc;
1096 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1097 int trans;
1098 struct mbuf *m;
1099
1100 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1101 return;
1102
1103 trans = 0;
1104 for (;;) {
1105 IFQ_DEQUEUE(&ifp->if_snd, m);
1106 if (m == NULL)
1107 break;
1108
1109 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) {
1110 ifp->if_flags |= IFF_OACTIVE;
1111 break;
1112 }
1113
1114 if (et_encap(sc, &m)) {
1115 ifp->if_oerrors++;
1116 ifp->if_flags |= IFF_OACTIVE;
1117 break;
1118 }
1119
1120 trans = 1;
1121
1122 bpf_mtap(ifp, m);
1123 }
1124
1125 if (trans) {
1126 callout_schedule(&sc->sc_txtick, hz);
1127 ifp->if_timer = 5;
1128 }
1129 }
1130
1131 void
1132 et_watchdog(struct ifnet *ifp)
1133 {
1134 struct et_softc *sc = ifp->if_softc;
1135 aprint_error_dev(sc->sc_dev, "watchdog timed out\n");
1136
1137 ifp->if_flags &= ~IFF_RUNNING;
1138 et_init(ifp);
1139 et_start(ifp);
1140 }
1141
1142 int
1143 et_stop_rxdma(struct et_softc *sc)
1144 {
1145 CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1146 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1147
1148 DELAY(5);
1149 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1150 aprint_error_dev(sc->sc_dev, "can't stop RX DMA engine\n");
1151 return ETIMEDOUT;
1152 }
1153 return 0;
1154 }
1155
1156 int
1157 et_stop_txdma(struct et_softc *sc)
1158 {
1159 CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1160 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1161 return 0;
1162 }
1163
1164 void
1165 et_free_tx_ring(struct et_softc *sc)
1166 {
1167 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1168 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1169 int i;
1170
1171 for (i = 0; i < ET_TX_NDESC; ++i) {
1172 struct et_txbuf *tb = &tbd->tbd_buf[i];
1173
1174 if (tb->tb_mbuf != NULL) {
1175 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap);
1176 m_freem(tb->tb_mbuf);
1177 tb->tb_mbuf = NULL;
1178 }
1179 }
1180
1181 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1182 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
1183 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1184 }
1185
1186 void
1187 et_free_rx_ring(struct et_softc *sc)
1188 {
1189 int n;
1190
1191 for (n = 0; n < ET_RX_NRING; ++n) {
1192 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1193 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n];
1194 int i;
1195
1196 for (i = 0; i < ET_RX_NDESC; ++i) {
1197 struct et_rxbuf *rb = &rbd->rbd_buf[i];
1198
1199 if (rb->rb_mbuf != NULL) {
1200 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap);
1201 m_freem(rb->rb_mbuf);
1202 rb->rb_mbuf = NULL;
1203 }
1204 }
1205
1206 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE);
1207 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0,
1208 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1209 }
1210 }
1211
1212 void
1213 et_setmulti(struct et_softc *sc)
1214 {
1215 struct ethercom *ec = &sc->sc_ethercom;
1216 struct ifnet *ifp = &ec->ec_if;
1217 uint32_t hash[4] = { 0, 0, 0, 0 };
1218 uint32_t rxmac_ctrl, pktfilt;
1219 struct ether_multi *enm;
1220 struct ether_multistep step;
1221 uint8_t addr[ETHER_ADDR_LEN];
1222 int i, count;
1223
1224 pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1225 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1226
1227 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1228 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1229 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1230 goto back;
1231 }
1232
1233 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1234
1235 count = 0;
1236 ETHER_FIRST_MULTI(step, ec, enm);
1237 while (enm != NULL) {
1238 uint32_t *hp, h;
1239
1240 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1241 addr[i] &= enm->enm_addrlo[i];
1242 }
1243
1244 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)addr),
1245 ETHER_ADDR_LEN);
1246 h = (h & 0x3f800000) >> 23;
1247
1248 hp = &hash[0];
1249 if (h >= 32 && h < 64) {
1250 h -= 32;
1251 hp = &hash[1];
1252 } else if (h >= 64 && h < 96) {
1253 h -= 64;
1254 hp = &hash[2];
1255 } else if (h >= 96) {
1256 h -= 96;
1257 hp = &hash[3];
1258 }
1259 *hp |= (1 << h);
1260
1261 ++count;
1262 ETHER_NEXT_MULTI(step, enm);
1263 }
1264
1265 for (i = 0; i < 4; ++i)
1266 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1267
1268 if (count > 0)
1269 pktfilt |= ET_PKTFILT_MCAST;
1270 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1271 back:
1272 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1273 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1274 }
1275
1276 int
1277 et_chip_init(struct et_softc *sc)
1278 {
1279 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1280 uint32_t rxq_end;
1281 int error;
1282
1283 /*
1284 * Split internal memory between TX and RX according to MTU
1285 */
1286 if (ifp->if_mtu < 2048)
1287 rxq_end = 0x2bc;
1288 else if (ifp->if_mtu < 8192)
1289 rxq_end = 0x1ff;
1290 else
1291 rxq_end = 0x1b3;
1292 CSR_WRITE_4(sc, ET_RXQ_START, 0);
1293 CSR_WRITE_4(sc, ET_RXQ_END, rxq_end);
1294 CSR_WRITE_4(sc, ET_TXQ_START, rxq_end + 1);
1295 CSR_WRITE_4(sc, ET_TXQ_END, ET_INTERN_MEM_END);
1296
1297 /* No loopback */
1298 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1299
1300 /* Clear MSI configure */
1301 CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1302
1303 /* Disable timer */
1304 CSR_WRITE_4(sc, ET_TIMER, 0);
1305
1306 /* Initialize MAC */
1307 et_init_mac(sc);
1308
1309 /* Enable memory controllers */
1310 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1311
1312 /* Initialize RX MAC */
1313 et_init_rxmac(sc);
1314
1315 /* Initialize TX MAC */
1316 et_init_txmac(sc);
1317
1318 /* Initialize RX DMA engine */
1319 error = et_init_rxdma(sc);
1320 if (error)
1321 return error;
1322
1323 /* Initialize TX DMA engine */
1324 error = et_init_txdma(sc);
1325 if (error)
1326 return error;
1327
1328 return 0;
1329 }
1330
1331 int
1332 et_init_tx_ring(struct et_softc *sc)
1333 {
1334 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1335 struct et_txstatus_data *txsd = &sc->sc_tx_status;
1336 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1337
1338 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1339 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
1340 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1341
1342 tbd->tbd_start_index = 0;
1343 tbd->tbd_start_wrap = 0;
1344 tbd->tbd_used = 0;
1345
1346 bzero(txsd->txsd_status, sizeof(uint32_t));
1347 bus_dmamap_sync(sc->sc_dmat, txsd->txsd_dmap, 0,
1348 txsd->txsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1349 return 0;
1350 }
1351
1352 int
1353 et_init_rx_ring(struct et_softc *sc)
1354 {
1355 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1356 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1357 int n;
1358
1359 for (n = 0; n < ET_RX_NRING; ++n) {
1360 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1361 int i, error;
1362
1363 for (i = 0; i < ET_RX_NDESC; ++i) {
1364 error = rbd->rbd_newbuf(rbd, i, 1);
1365 if (error) {
1366 aprint_error_dev(sc->sc_dev, "%d ring %d buf, newbuf failed: "
1367 "%d\n", n, i, error);
1368 return error;
1369 }
1370 }
1371 }
1372
1373 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1374 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0,
1375 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1376
1377 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1378 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0,
1379 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1380
1381 return 0;
1382 }
1383
1384 int
1385 et_init_rxdma(struct et_softc *sc)
1386 {
1387 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1388 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1389 struct et_rxdesc_ring *rx_ring;
1390 int error;
1391
1392 error = et_stop_rxdma(sc);
1393 if (error) {
1394 aprint_error_dev(sc->sc_dev, "can't init RX DMA engine\n");
1395 return error;
1396 }
1397
1398 /*
1399 * Install RX status
1400 */
1401 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1402 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1403
1404 /*
1405 * Install RX stat ring
1406 */
1407 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1408 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1409 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1410 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1411 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1412
1413 /* Match ET_RXSTAT_POS */
1414 rxst_ring->rsr_index = 0;
1415 rxst_ring->rsr_wrap = 0;
1416
1417 /*
1418 * Install the 2nd RX descriptor ring
1419 */
1420 rx_ring = &sc->sc_rx_ring[1];
1421 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1422 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1423 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1424 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1425 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1426
1427 /* Match ET_RX_RING1_POS */
1428 rx_ring->rr_index = 0;
1429 rx_ring->rr_wrap = 1;
1430
1431 /*
1432 * Install the 1st RX descriptor ring
1433 */
1434 rx_ring = &sc->sc_rx_ring[0];
1435 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1436 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1437 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1438 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1439 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1440
1441 /* Match ET_RX_RING0_POS */
1442 rx_ring->rr_index = 0;
1443 rx_ring->rr_wrap = 1;
1444
1445 /*
1446 * RX intr moderation
1447 */
1448 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1449 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1450
1451 return 0;
1452 }
1453
1454 int
1455 et_init_txdma(struct et_softc *sc)
1456 {
1457 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1458 struct et_txstatus_data *txsd = &sc->sc_tx_status;
1459 int error;
1460
1461 error = et_stop_txdma(sc);
1462 if (error) {
1463 aprint_error_dev(sc->sc_dev, "can't init TX DMA engine\n");
1464 return error;
1465 }
1466
1467 /*
1468 * Install TX descriptor ring
1469 */
1470 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1471 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1472 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1473
1474 /*
1475 * Install TX status
1476 */
1477 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1478 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1479
1480 CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1481
1482 /* Match ET_TX_READY_POS */
1483 tx_ring->tr_ready_index = 0;
1484 tx_ring->tr_ready_wrap = 0;
1485
1486 return 0;
1487 }
1488
1489 void
1490 et_init_mac(struct et_softc *sc)
1491 {
1492 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1493 const uint8_t *eaddr = CLLADDR(ifp->if_sadl);
1494 uint32_t val;
1495
1496 /* Reset MAC */
1497 CSR_WRITE_4(sc, ET_MAC_CFG1,
1498 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1499 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1500 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1501
1502 /*
1503 * Setup inter packet gap
1504 */
1505 val = __SHIFTIN(56, ET_IPG_NONB2B_1) |
1506 __SHIFTIN(88, ET_IPG_NONB2B_2) |
1507 __SHIFTIN(80, ET_IPG_MINIFG) |
1508 __SHIFTIN(96, ET_IPG_B2B);
1509 CSR_WRITE_4(sc, ET_IPG, val);
1510
1511 /*
1512 * Setup half duplex mode
1513 */
1514 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
1515 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
1516 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
1517 ET_MAC_HDX_EXC_DEFER;
1518 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1519
1520 /* Clear MAC control */
1521 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1522
1523 /* Reset MII */
1524 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1525
1526 /*
1527 * Set MAC address
1528 */
1529 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1530 CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1531 val = (eaddr[0] << 16) | (eaddr[1] << 24);
1532 CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1533
1534 /* Set max frame length */
1535 CSR_WRITE_4(sc, ET_MAX_FRMLEN,
1536 ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ifp->if_mtu + ETHER_CRC_LEN);
1537
1538 /* Bring MAC out of reset state */
1539 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1540 }
1541
1542 void
1543 et_init_rxmac(struct et_softc *sc)
1544 {
1545 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1546 const uint8_t *eaddr = CLLADDR(ifp->if_sadl);
1547 uint32_t val;
1548 int i;
1549
1550 /* Disable RX MAC and WOL */
1551 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1552
1553 /*
1554 * Clear all WOL related registers
1555 */
1556 for (i = 0; i < 3; ++i)
1557 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1558 for (i = 0; i < 20; ++i)
1559 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1560
1561 /*
1562 * Set WOL source address. XXX is this necessary?
1563 */
1564 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1565 CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1566 val = (eaddr[0] << 8) | eaddr[1];
1567 CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1568
1569 /* Clear packet filters */
1570 CSR_WRITE_4(sc, ET_PKTFILT, 0);
1571
1572 /* No ucast filtering */
1573 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1574 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1575 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1576
1577 if (ifp->if_mtu > 8192) {
1578 /*
1579 * In order to transmit jumbo packets greater than 8k,
1580 * the FIFO between RX MAC and RX DMA needs to be reduced
1581 * in size to (16k - MTU). In order to implement this, we
1582 * must use "cut through" mode in the RX MAC, which chops
1583 * packets down into segments which are (max_size * 16).
1584 * In this case we selected 256 bytes, since this is the
1585 * size of the PCI-Express TLP's that the 1310 uses.
1586 */
1587 val = __SHIFTIN(16, ET_RXMAC_MC_SEGSZ_MAX) |
1588 ET_RXMAC_MC_SEGSZ_ENABLE;
1589 } else {
1590 val = 0;
1591 }
1592 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1593
1594 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1595
1596 /* Initialize RX MAC management register */
1597 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1598
1599 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1600
1601 CSR_WRITE_4(sc, ET_RXMAC_MGT,
1602 ET_RXMAC_MGT_PASS_ECRC |
1603 ET_RXMAC_MGT_PASS_ELEN |
1604 ET_RXMAC_MGT_PASS_ETRUNC |
1605 ET_RXMAC_MGT_CHECK_PKT);
1606
1607 /*
1608 * Configure runt filtering (may not work on certain chip generation)
1609 */
1610 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG;
1611 CSR_WRITE_4(sc, ET_PKTFILT, val);
1612
1613 /* Enable RX MAC but leave WOL disabled */
1614 CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1615 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1616
1617 /*
1618 * Setup multicast hash and allmulti/promisc mode
1619 */
1620 et_setmulti(sc);
1621 }
1622
1623 void
1624 et_init_txmac(struct et_softc *sc)
1625 {
1626 /* Disable TX MAC and FC(?) */
1627 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1628
1629 /* No flow control yet */
1630 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0);
1631
1632 /* Enable TX MAC but leave FC(?) diabled */
1633 CSR_WRITE_4(sc, ET_TXMAC_CTRL,
1634 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
1635 }
1636
1637 int
1638 et_start_rxdma(struct et_softc *sc)
1639 {
1640 uint32_t val = 0;
1641
1642 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize,
1643 ET_RXDMA_CTRL_RING0_SIZE) |
1644 ET_RXDMA_CTRL_RING0_ENABLE;
1645 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize,
1646 ET_RXDMA_CTRL_RING1_SIZE) |
1647 ET_RXDMA_CTRL_RING1_ENABLE;
1648
1649 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
1650
1651 DELAY(5);
1652
1653 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
1654 aprint_error_dev(sc->sc_dev, "can't start RX DMA engine\n");
1655 return ETIMEDOUT;
1656 }
1657 return 0;
1658 }
1659
1660 int
1661 et_start_txdma(struct et_softc *sc)
1662 {
1663 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
1664 return 0;
1665 }
1666
1667 int
1668 et_enable_txrx(struct et_softc *sc)
1669 {
1670 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1671 uint32_t val;
1672 int i, rc = 0;
1673
1674 val = CSR_READ_4(sc, ET_MAC_CFG1);
1675 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
1676 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
1677 ET_MAC_CFG1_LOOPBACK);
1678 CSR_WRITE_4(sc, ET_MAC_CFG1, val);
1679
1680 if ((rc = ether_mediachange(ifp)) != 0)
1681 goto out;
1682
1683 #define NRETRY 100
1684
1685 for (i = 0; i < NRETRY; ++i) {
1686 val = CSR_READ_4(sc, ET_MAC_CFG1);
1687 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
1688 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
1689 break;
1690
1691 DELAY(10);
1692 }
1693 if (i == NRETRY) {
1694 aprint_error_dev(sc->sc_dev, "can't enable RX/TX\n");
1695 return ETIMEDOUT;
1696 }
1697
1698 #undef NRETRY
1699 return 0;
1700 out:
1701 return rc;
1702 }
1703
1704 void
1705 et_rxeof(struct et_softc *sc)
1706 {
1707 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1708 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1709 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1710 uint32_t rxs_stat_ring;
1711 int rxst_wrap, rxst_index;
1712
1713 bus_dmamap_sync(sc->sc_dmat, rxsd->rxsd_dmap, 0,
1714 rxsd->rxsd_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1715 bus_dmamap_sync(sc->sc_dmat, rxst_ring->rsr_dmap, 0,
1716 rxst_ring->rsr_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1717
1718 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring;
1719 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
1720 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX);
1721
1722 while (rxst_index != rxst_ring->rsr_index ||
1723 rxst_wrap != rxst_ring->rsr_wrap) {
1724 struct et_rxbuf_data *rbd;
1725 struct et_rxdesc_ring *rx_ring;
1726 struct et_rxstat *st;
1727 struct et_rxbuf *rb;
1728 struct mbuf *m;
1729 int buflen, buf_idx, ring_idx;
1730 uint32_t rxstat_pos, rxring_pos;
1731
1732 KKASSERT(rxst_ring->rsr_index < ET_RX_NSTAT);
1733 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
1734
1735 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN);
1736 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX);
1737 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX);
1738
1739 if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
1740 rxst_ring->rsr_index = 0;
1741 rxst_ring->rsr_wrap ^= 1;
1742 }
1743 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index,
1744 ET_RXSTAT_POS_INDEX);
1745 if (rxst_ring->rsr_wrap)
1746 rxstat_pos |= ET_RXSTAT_POS_WRAP;
1747 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
1748
1749 if (ring_idx >= ET_RX_NRING) {
1750 ifp->if_ierrors++;
1751 aprint_error_dev(sc->sc_dev, "invalid ring index %d\n",
1752 ring_idx);
1753 continue;
1754 }
1755 if (buf_idx >= ET_RX_NDESC) {
1756 ifp->if_ierrors++;
1757 aprint_error_dev(sc->sc_dev, "invalid buf index %d\n",
1758 buf_idx);
1759 continue;
1760 }
1761
1762 rbd = &sc->sc_rx_data[ring_idx];
1763 rb = &rbd->rbd_buf[buf_idx];
1764 m = rb->rb_mbuf;
1765 bus_dmamap_sync(sc->sc_dmat, rb->rb_dmap, 0,
1766 rb->rb_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1767
1768 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) {
1769 if (buflen < ETHER_CRC_LEN) {
1770 m_freem(m);
1771 ifp->if_ierrors++;
1772 } else {
1773 m->m_pkthdr.len = m->m_len = buflen -
1774 ETHER_CRC_LEN;
1775 m->m_pkthdr.rcvif = ifp;
1776
1777 bpf_mtap(ifp, m);
1778
1779 ifp->if_ipackets++;
1780 (*ifp->if_input)(ifp, m);
1781 }
1782 } else {
1783 ifp->if_ierrors++;
1784 }
1785
1786 rx_ring = &sc->sc_rx_ring[ring_idx];
1787
1788 if (buf_idx != rx_ring->rr_index) {
1789 aprint_error_dev(sc->sc_dev, "WARNING!! ring %d, "
1790 "buf_idx %d, rr_idx %d\n",
1791 ring_idx, buf_idx, rx_ring->rr_index);
1792 }
1793
1794 KKASSERT(rx_ring->rr_index < ET_RX_NDESC);
1795 if (++rx_ring->rr_index == ET_RX_NDESC) {
1796 rx_ring->rr_index = 0;
1797 rx_ring->rr_wrap ^= 1;
1798 }
1799 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX);
1800 if (rx_ring->rr_wrap)
1801 rxring_pos |= ET_RX_RING_POS_WRAP;
1802 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
1803 }
1804 }
1805
1806 int
1807 et_encap(struct et_softc *sc, struct mbuf **m0)
1808 {
1809 struct mbuf *m = *m0;
1810 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1811 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1812 struct et_txdesc *td;
1813 bus_dmamap_t map;
1814 int error, maxsegs, first_idx, last_idx, i;
1815 uint32_t tx_ready_pos, last_td_ctrl2;
1816
1817 maxsegs = ET_TX_NDESC - tbd->tbd_used;
1818 if (maxsegs > ET_NSEG_MAX)
1819 maxsegs = ET_NSEG_MAX;
1820 KASSERT(maxsegs >= ET_NSEG_SPARE,
1821 ("not enough spare TX desc (%d)\n", maxsegs));
1822
1823 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC);
1824 first_idx = tx_ring->tr_ready_index;
1825 map = tbd->tbd_buf[first_idx].tb_dmap;
1826
1827 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1828 BUS_DMA_NOWAIT);
1829 if (!error && map->dm_nsegs == 0) {
1830 bus_dmamap_unload(sc->sc_dmat, map);
1831 error = EFBIG;
1832 }
1833 if (error && error != EFBIG) {
1834 aprint_error_dev(sc->sc_dev, "can't load TX mbuf");
1835 goto back;
1836 }
1837 if (error) { /* error == EFBIG */
1838 struct mbuf *m_new;
1839
1840 error = 0;
1841
1842 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1843 if (m_new == NULL) {
1844 m_freem(m);
1845 aprint_error_dev(sc->sc_dev, "can't defrag TX mbuf\n");
1846 error = ENOBUFS;
1847 goto back;
1848 }
1849
1850 M_COPY_PKTHDR(m_new, m);
1851 if (m->m_pkthdr.len > MHLEN) {
1852 MCLGET(m_new, M_DONTWAIT);
1853 if (!(m_new->m_flags & M_EXT)) {
1854 m_freem(m);
1855 m_freem(m_new);
1856 error = ENOBUFS;
1857 }
1858 }
1859
1860 if (error) {
1861 aprint_error_dev(sc->sc_dev, "can't defrag TX buffer\n");
1862 goto back;
1863 }
1864
1865 m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, void *));
1866 m_freem(m);
1867 m_new->m_len = m_new->m_pkthdr.len;
1868 *m0 = m = m_new;
1869
1870 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1871 BUS_DMA_NOWAIT);
1872 if (error || map->dm_nsegs == 0) {
1873 if (map->dm_nsegs == 0) {
1874 bus_dmamap_unload(sc->sc_dmat, map);
1875 error = EFBIG;
1876 }
1877 aprint_error_dev(sc->sc_dev, "can't load defraged TX mbuf\n");
1878 goto back;
1879 }
1880 }
1881
1882 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1883 BUS_DMASYNC_PREWRITE);
1884
1885 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
1886 sc->sc_tx += map->dm_nsegs;
1887 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
1888 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
1889 last_td_ctrl2 |= ET_TDCTRL2_INTR;
1890 }
1891
1892 last_idx = -1;
1893 for (i = 0; i < map->dm_nsegs; ++i) {
1894 int idx;
1895
1896 idx = (first_idx + i) % ET_TX_NDESC;
1897 td = &tx_ring->tr_desc[idx];
1898 td->td_addr_hi = ET_ADDR_HI(map->dm_segs[i].ds_addr);
1899 td->td_addr_lo = ET_ADDR_LO(map->dm_segs[i].ds_addr);
1900 td->td_ctrl1 =
1901 __SHIFTIN(map->dm_segs[i].ds_len, ET_TDCTRL1_LEN);
1902
1903 if (i == map->dm_nsegs - 1) { /* Last frag */
1904 td->td_ctrl2 = last_td_ctrl2;
1905 last_idx = idx;
1906 }
1907
1908 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC);
1909 if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
1910 tx_ring->tr_ready_index = 0;
1911 tx_ring->tr_ready_wrap ^= 1;
1912 }
1913 }
1914 td = &tx_ring->tr_desc[first_idx];
1915 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */
1916
1917 KKASSERT(last_idx >= 0);
1918 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
1919 tbd->tbd_buf[last_idx].tb_dmap = map;
1920 tbd->tbd_buf[last_idx].tb_mbuf = m;
1921
1922 tbd->tbd_used += map->dm_nsegs;
1923 KKASSERT(tbd->tbd_used <= ET_TX_NDESC);
1924
1925 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
1926 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1927
1928
1929 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index,
1930 ET_TX_READY_POS_INDEX);
1931 if (tx_ring->tr_ready_wrap)
1932 tx_ready_pos |= ET_TX_READY_POS_WRAP;
1933 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
1934
1935 error = 0;
1936 back:
1937 if (error) {
1938 m_freem(m);
1939 *m0 = NULL;
1940 }
1941 return error;
1942 }
1943
1944 void
1945 et_txeof(struct et_softc *sc)
1946 {
1947 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1948 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1949 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1950 uint32_t tx_done;
1951 int end, wrap;
1952
1953 if (tbd->tbd_used == 0)
1954 return;
1955
1956 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
1957 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX);
1958 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
1959
1960 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
1961 struct et_txbuf *tb;
1962
1963 KKASSERT(tbd->tbd_start_index < ET_TX_NDESC);
1964 tb = &tbd->tbd_buf[tbd->tbd_start_index];
1965
1966 bzero(&tx_ring->tr_desc[tbd->tbd_start_index],
1967 sizeof(struct et_txdesc));
1968 bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
1969 tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1970
1971 if (tb->tb_mbuf != NULL) {
1972 bus_dmamap_unload(sc->sc_dmat, tb->tb_dmap);
1973 m_freem(tb->tb_mbuf);
1974 tb->tb_mbuf = NULL;
1975 ifp->if_opackets++;
1976 }
1977
1978 if (++tbd->tbd_start_index == ET_TX_NDESC) {
1979 tbd->tbd_start_index = 0;
1980 tbd->tbd_start_wrap ^= 1;
1981 }
1982
1983 KKASSERT(tbd->tbd_used > 0);
1984 tbd->tbd_used--;
1985 }
1986
1987 if (tbd->tbd_used == 0) {
1988 callout_stop(&sc->sc_txtick);
1989 ifp->if_timer = 0;
1990 }
1991 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC)
1992 ifp->if_flags &= ~IFF_OACTIVE;
1993
1994 et_start(ifp);
1995 }
1996
1997 void
1998 et_txtick(void *xsc)
1999 {
2000 struct et_softc *sc = xsc;
2001 int s;
2002
2003 s = splnet();
2004 et_txeof(sc);
2005 splx(s);
2006 }
2007
2008 void
2009 et_tick(void *xsc)
2010 {
2011 struct et_softc *sc = xsc;
2012 int s;
2013
2014 s = splnet();
2015 mii_tick(&sc->sc_miibus);
2016 callout_schedule(&sc->sc_tick, hz);
2017 splx(s);
2018 }
2019
2020 int
2021 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init)
2022 {
2023 return et_newbuf(rbd, buf_idx, init, MCLBYTES);
2024 }
2025
2026 int
2027 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init)
2028 {
2029 return et_newbuf(rbd, buf_idx, init, MHLEN);
2030 }
2031
2032 int
2033 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0)
2034 {
2035 struct et_softc *sc = rbd->rbd_softc;
2036 struct et_rxdesc_ring *rx_ring;
2037 struct et_rxdesc *desc;
2038 struct et_rxbuf *rb;
2039 struct mbuf *m;
2040 bus_dmamap_t dmap;
2041 int error, len;
2042
2043 KKASSERT(buf_idx < ET_RX_NDESC);
2044 rb = &rbd->rbd_buf[buf_idx];
2045
2046 if (len0 >= MINCLSIZE) {
2047 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
2048 if (m == NULL)
2049 return (ENOBUFS);
2050 MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
2051 len = MCLBYTES;
2052 } else {
2053 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
2054 len = MHLEN;
2055 }
2056
2057 if (m == NULL) {
2058 error = ENOBUFS;
2059
2060 /* XXX for debug */
2061 aprint_error_dev(sc->sc_dev, "M_CLGET failed, size %d\n", len0);
2062 if (init) {
2063 return error;
2064 } else {
2065 goto back;
2066 }
2067 }
2068 m->m_len = m->m_pkthdr.len = len;
2069
2070 /*
2071 * Try load RX mbuf into temporary DMA tag
2072 */
2073 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_mbuf_tmp_dmap, m,
2074 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
2075 if (error) {
2076 if (!error) {
2077 bus_dmamap_unload(sc->sc_dmat, sc->sc_mbuf_tmp_dmap);
2078 error = EFBIG;
2079 aprint_error_dev(sc->sc_dev, "too many segments?!\n");
2080 }
2081 m_freem(m);
2082
2083 /* XXX for debug */
2084 aprint_error_dev(sc->sc_dev, "can't load RX mbuf\n");
2085 if (init) {
2086 return error;
2087 } else {
2088 goto back;
2089 }
2090 }
2091
2092 if (!init)
2093 bus_dmamap_unload(sc->sc_dmat, rb->rb_dmap);
2094 rb->rb_mbuf = m;
2095
2096 /*
2097 * Swap RX buf's DMA map with the loaded temporary one
2098 */
2099 dmap = rb->rb_dmap;
2100 rb->rb_dmap = sc->sc_mbuf_tmp_dmap;
2101 rb->rb_paddr = rb->rb_dmap->dm_segs[0].ds_addr;
2102 sc->sc_mbuf_tmp_dmap = dmap;
2103
2104 error = 0;
2105 back:
2106 rx_ring = rbd->rbd_ring;
2107 desc = &rx_ring->rr_desc[buf_idx];
2108
2109 desc->rd_addr_hi = ET_ADDR_HI(rb->rb_paddr);
2110 desc->rd_addr_lo = ET_ADDR_LO(rb->rb_paddr);
2111 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX);
2112
2113 bus_dmamap_sync(sc->sc_dmat, rx_ring->rr_dmap, 0,
2114 rx_ring->rr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
2115 return error;
2116 }
2117