if_ste.c revision 1.56 1 /* $NetBSD: if_ste.c,v 1.56 2019/05/28 07:41:49 msaitoh Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Device driver for the Sundance Tech. ST-201 10/100
34 * Ethernet controller.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: if_ste.c,v 1.56 2019/05/28 07:41:49 msaitoh Exp $");
39
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/callout.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/ioctl.h>
49 #include <sys/errno.h>
50 #include <sys/device.h>
51 #include <sys/queue.h>
52
53 #include <net/if.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_ether.h>
57
58 #include <net/bpf.h>
59
60 #include <sys/bus.h>
61 #include <sys/intr.h>
62
63 #include <dev/mii/mii.h>
64 #include <dev/mii/miivar.h>
65 #include <dev/mii/mii_bitbang.h>
66
67 #include <dev/pci/pcireg.h>
68 #include <dev/pci/pcivar.h>
69 #include <dev/pci/pcidevs.h>
70
71 #include <dev/pci/if_stereg.h>
72
73 /*
74 * Transmit descriptor list size.
75 */
76 #define STE_NTXDESC 256
77 #define STE_NTXDESC_MASK (STE_NTXDESC - 1)
78 #define STE_NEXTTX(x) (((x) + 1) & STE_NTXDESC_MASK)
79
80 /*
81 * Receive descriptor list size.
82 */
83 #define STE_NRXDESC 128
84 #define STE_NRXDESC_MASK (STE_NRXDESC - 1)
85 #define STE_NEXTRX(x) (((x) + 1) & STE_NRXDESC_MASK)
86
87 /*
88 * Control structures are DMA'd to the ST-201 chip. We allocate them in
89 * a single clump that maps to a single DMA segment to make several things
90 * easier.
91 */
92 struct ste_control_data {
93 /*
94 * The transmit descriptors.
95 */
96 struct ste_tfd scd_txdescs[STE_NTXDESC];
97
98 /*
99 * The receive descriptors.
100 */
101 struct ste_rfd scd_rxdescs[STE_NRXDESC];
102 };
103
104 #define STE_CDOFF(x) offsetof(struct ste_control_data, x)
105 #define STE_CDTXOFF(x) STE_CDOFF(scd_txdescs[(x)])
106 #define STE_CDRXOFF(x) STE_CDOFF(scd_rxdescs[(x)])
107
108 /*
109 * Software state for transmit and receive jobs.
110 */
111 struct ste_descsoft {
112 struct mbuf *ds_mbuf; /* head of our mbuf chain */
113 bus_dmamap_t ds_dmamap; /* our DMA map */
114 };
115
116 /*
117 * Software state per device.
118 */
119 struct ste_softc {
120 device_t sc_dev; /* generic device information */
121 bus_space_tag_t sc_st; /* bus space tag */
122 bus_space_handle_t sc_sh; /* bus space handle */
123 bus_dma_tag_t sc_dmat; /* bus DMA tag */
124 struct ethercom sc_ethercom; /* ethernet common data */
125
126 void *sc_ih; /* interrupt cookie */
127
128 struct mii_data sc_mii; /* MII/media information */
129
130 callout_t sc_tick_ch; /* tick callout */
131
132 bus_dmamap_t sc_cddmamap; /* control data DMA map */
133 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
134
135 /*
136 * Software state for transmit and receive descriptors.
137 */
138 struct ste_descsoft sc_txsoft[STE_NTXDESC];
139 struct ste_descsoft sc_rxsoft[STE_NRXDESC];
140
141 /*
142 * Control data structures.
143 */
144 struct ste_control_data *sc_control_data;
145 #define sc_txdescs sc_control_data->scd_txdescs
146 #define sc_rxdescs sc_control_data->scd_rxdescs
147
148 int sc_txpending; /* number of Tx requests pending */
149 int sc_txdirty; /* first dirty Tx descriptor */
150 int sc_txlast; /* last used Tx descriptor */
151
152 int sc_rxptr; /* next ready Rx descriptor/descsoft */
153
154 int sc_txthresh; /* Tx threshold */
155 uint32_t sc_DMACtrl; /* prototype DMACtrl register */
156 uint16_t sc_IntEnable; /* prototype IntEnable register */
157 uint16_t sc_MacCtrl0; /* prototype MacCtrl0 register */
158 uint8_t sc_ReceiveMode; /* prototype ReceiveMode register */
159 };
160
161 #define STE_CDTXADDR(sc, x) ((sc)->sc_cddma + STE_CDTXOFF((x)))
162 #define STE_CDRXADDR(sc, x) ((sc)->sc_cddma + STE_CDRXOFF((x)))
163
164 #define STE_CDTXSYNC(sc, x, ops) \
165 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
166 STE_CDTXOFF((x)), sizeof(struct ste_tfd), (ops))
167
168 #define STE_CDRXSYNC(sc, x, ops) \
169 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
170 STE_CDRXOFF((x)), sizeof(struct ste_rfd), (ops))
171
172 #define STE_INIT_RXDESC(sc, x) \
173 do { \
174 struct ste_descsoft *__ds = &(sc)->sc_rxsoft[(x)]; \
175 struct ste_rfd *__rfd = &(sc)->sc_rxdescs[(x)]; \
176 struct mbuf *__m = __ds->ds_mbuf; \
177 \
178 /* \
179 * Note: We scoot the packet forward 2 bytes in the buffer \
180 * so that the payload after the Ethernet header is aligned \
181 * to a 4-byte boundary. \
182 */ \
183 __m->m_data = __m->m_ext.ext_buf + 2; \
184 __rfd->rfd_frag.frag_addr = \
185 htole32(__ds->ds_dmamap->dm_segs[0].ds_addr + 2); \
186 __rfd->rfd_frag.frag_len = htole32((MCLBYTES - 2) | FRAG_LAST); \
187 __rfd->rfd_next = htole32(STE_CDRXADDR((sc), STE_NEXTRX((x)))); \
188 __rfd->rfd_status = 0; \
189 STE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \
190 } while (/*CONSTCOND*/0)
191
192 #define STE_TIMEOUT 1000
193
194 static void ste_start(struct ifnet *);
195 static void ste_watchdog(struct ifnet *);
196 static int ste_ioctl(struct ifnet *, u_long, void *);
197 static int ste_init(struct ifnet *);
198 static void ste_stop(struct ifnet *, int);
199
200 static bool ste_shutdown(device_t, int);
201
202 static void ste_reset(struct ste_softc *, uint32_t);
203 static void ste_setthresh(struct ste_softc *);
204 static void ste_txrestart(struct ste_softc *, uint8_t);
205 static void ste_rxdrain(struct ste_softc *);
206 static int ste_add_rxbuf(struct ste_softc *, int);
207 static void ste_read_eeprom(struct ste_softc *, int, uint16_t *);
208 static void ste_tick(void *);
209
210 static void ste_stats_update(struct ste_softc *);
211
212 static void ste_set_filter(struct ste_softc *);
213
214 static int ste_intr(void *);
215 static void ste_txintr(struct ste_softc *);
216 static void ste_rxintr(struct ste_softc *);
217
218 static int ste_mii_readreg(device_t, int, int, uint16_t *);
219 static int ste_mii_writereg(device_t, int, int, uint16_t);
220 static void ste_mii_statchg(struct ifnet *);
221
222 static int ste_match(device_t, cfdata_t, void *);
223 static void ste_attach(device_t, device_t, void *);
224
225 int ste_copy_small = 0;
226
227 CFATTACH_DECL_NEW(ste, sizeof(struct ste_softc),
228 ste_match, ste_attach, NULL, NULL);
229
230 static uint32_t ste_mii_bitbang_read(device_t);
231 static void ste_mii_bitbang_write(device_t, uint32_t);
232
233 static const struct mii_bitbang_ops ste_mii_bitbang_ops = {
234 ste_mii_bitbang_read,
235 ste_mii_bitbang_write,
236 {
237 PC_MgmtData, /* MII_BIT_MDO */
238 PC_MgmtData, /* MII_BIT_MDI */
239 PC_MgmtClk, /* MII_BIT_MDC */
240 PC_MgmtDir, /* MII_BIT_DIR_HOST_PHY */
241 0, /* MII_BIT_DIR_PHY_HOST */
242 }
243 };
244
245 /*
246 * Devices supported by this driver.
247 */
248 static const struct ste_product {
249 pci_vendor_id_t ste_vendor;
250 pci_product_id_t ste_product;
251 const char *ste_name;
252 } ste_products[] = {
253 { PCI_VENDOR_SUNDANCETI, PCI_PRODUCT_SUNDANCETI_IP100A,
254 "IC Plus Corp. IP00A 10/100 Fast Ethernet Adapter" },
255
256 { PCI_VENDOR_SUNDANCETI, PCI_PRODUCT_SUNDANCETI_ST201,
257 "Sundance ST-201 10/100 Ethernet" },
258
259 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DL1002,
260 "D-Link DL-1002 10/100 Ethernet" },
261
262 { 0, 0,
263 NULL },
264 };
265
266 static const struct ste_product *
267 ste_lookup(const struct pci_attach_args *pa)
268 {
269 const struct ste_product *sp;
270
271 for (sp = ste_products; sp->ste_name != NULL; sp++) {
272 if (PCI_VENDOR(pa->pa_id) == sp->ste_vendor &&
273 PCI_PRODUCT(pa->pa_id) == sp->ste_product)
274 return (sp);
275 }
276 return (NULL);
277 }
278
279 static int
280 ste_match(device_t parent, cfdata_t cf, void *aux)
281 {
282 struct pci_attach_args *pa = aux;
283
284 if (ste_lookup(pa) != NULL)
285 return (1);
286
287 return (0);
288 }
289
290 static void
291 ste_attach(device_t parent, device_t self, void *aux)
292 {
293 struct ste_softc *sc = device_private(self);
294 struct pci_attach_args *pa = aux;
295 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
296 struct mii_data * const mii = &sc->sc_mii;
297 pci_chipset_tag_t pc = pa->pa_pc;
298 pci_intr_handle_t ih;
299 const char *intrstr = NULL;
300 bus_space_tag_t iot, memt;
301 bus_space_handle_t ioh, memh;
302 bus_dma_segment_t seg;
303 int ioh_valid, memh_valid;
304 int i, rseg, error;
305 const struct ste_product *sp;
306 uint8_t enaddr[ETHER_ADDR_LEN];
307 uint16_t myea[ETHER_ADDR_LEN / 2];
308 char intrbuf[PCI_INTRSTR_LEN];
309
310 sc->sc_dev = self;
311
312 callout_init(&sc->sc_tick_ch, 0);
313
314 sp = ste_lookup(pa);
315 if (sp == NULL) {
316 printf("\n");
317 panic("ste_attach: impossible");
318 }
319
320 printf(": %s\n", sp->ste_name);
321
322 /*
323 * Map the device.
324 */
325 ioh_valid = (pci_mapreg_map(pa, STE_PCI_IOBA,
326 PCI_MAPREG_TYPE_IO, 0,
327 &iot, &ioh, NULL, NULL) == 0);
328 memh_valid = (pci_mapreg_map(pa, STE_PCI_MMBA,
329 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
330 &memt, &memh, NULL, NULL) == 0);
331
332 if (memh_valid) {
333 sc->sc_st = memt;
334 sc->sc_sh = memh;
335 } else if (ioh_valid) {
336 sc->sc_st = iot;
337 sc->sc_sh = ioh;
338 } else {
339 aprint_error_dev(self, "unable to map device registers\n");
340 return;
341 }
342
343 sc->sc_dmat = pa->pa_dmat;
344
345 /* Enable bus mastering. */
346 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
347 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
348 PCI_COMMAND_MASTER_ENABLE);
349
350 /* power up chip */
351 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
352 NULL)) && error != EOPNOTSUPP) {
353 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
354 return;
355 }
356
357 /*
358 * Map and establish our interrupt.
359 */
360 if (pci_intr_map(pa, &ih)) {
361 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
362 return;
363 }
364 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
365 sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, ste_intr, sc,
366 device_xname(self));
367 if (sc->sc_ih == NULL) {
368 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
369 if (intrstr != NULL)
370 aprint_error(" at %s", intrstr);
371 aprint_error("\n");
372 return;
373 }
374 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
375
376 /*
377 * Allocate the control data structures, and create and load the
378 * DMA map for it.
379 */
380 if ((error = bus_dmamem_alloc(sc->sc_dmat,
381 sizeof(struct ste_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
382 0)) != 0) {
383 aprint_error_dev(sc->sc_dev,
384 "unable to allocate control data, error = %d\n", error);
385 goto fail_0;
386 }
387
388 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
389 sizeof(struct ste_control_data), (void **)&sc->sc_control_data,
390 BUS_DMA_COHERENT)) != 0) {
391 aprint_error_dev(sc->sc_dev,
392 "unable to map control data, error = %d\n", error);
393 goto fail_1;
394 }
395
396 if ((error = bus_dmamap_create(sc->sc_dmat,
397 sizeof(struct ste_control_data), 1,
398 sizeof(struct ste_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
399 aprint_error_dev(sc->sc_dev,
400 "unable to create control data DMA map, error = %d\n",
401 error);
402 goto fail_2;
403 }
404
405 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
406 sc->sc_control_data, sizeof(struct ste_control_data), NULL,
407 0)) != 0) {
408 aprint_error_dev(sc->sc_dev,
409 "unable to load control data DMA map, error = %d\n",
410 error);
411 goto fail_3;
412 }
413
414 /*
415 * Create the transmit buffer DMA maps.
416 */
417 for (i = 0; i < STE_NTXDESC; i++) {
418 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
419 STE_NTXFRAGS, MCLBYTES, 0, 0,
420 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
421 aprint_error_dev(sc->sc_dev,
422 "unable to create tx DMA map %d, error = %d\n", i,
423 error);
424 goto fail_4;
425 }
426 }
427
428 /*
429 * Create the receive buffer DMA maps.
430 */
431 for (i = 0; i < STE_NRXDESC; i++) {
432 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
433 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
434 aprint_error_dev(sc->sc_dev,
435 "unable to create rx DMA map %d, error = %d\n", i,
436 error);
437 goto fail_5;
438 }
439 sc->sc_rxsoft[i].ds_mbuf = NULL;
440 }
441
442 /*
443 * Reset the chip to a known state.
444 */
445 ste_reset(sc, AC_GlobalReset | AC_RxReset | AC_TxReset | AC_DMA |
446 AC_FIFO | AC_Network | AC_Host | AC_AutoInit | AC_RstOut);
447
448 /*
449 * Read the Ethernet address from the EEPROM.
450 */
451 for (i = 0; i < 3; i++) {
452 ste_read_eeprom(sc, STE_EEPROM_StationAddress0 + i, &myea[i]);
453 myea[i] = le16toh(myea[i]);
454 }
455 memcpy(enaddr, myea, sizeof(enaddr));
456
457 printf("%s: Ethernet address %s\n", device_xname(sc->sc_dev),
458 ether_sprintf(enaddr));
459
460 /*
461 * Initialize our media structures and probe the MII.
462 */
463 mii->mii_ifp = ifp;
464 mii->mii_readreg = ste_mii_readreg;
465 mii->mii_writereg = ste_mii_writereg;
466 mii->mii_statchg = ste_mii_statchg;
467 sc->sc_ethercom.ec_mii = mii;
468 ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange,
469 ether_mediastatus);
470 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
471 MII_OFFSET_ANY, 0);
472 if (LIST_FIRST(&mii->mii_phys) == NULL) {
473 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
474 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
475 } else
476 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
477
478 ifp = &sc->sc_ethercom.ec_if;
479 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
480 ifp->if_softc = sc;
481 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
482 ifp->if_ioctl = ste_ioctl;
483 ifp->if_start = ste_start;
484 ifp->if_watchdog = ste_watchdog;
485 ifp->if_init = ste_init;
486 ifp->if_stop = ste_stop;
487 IFQ_SET_READY(&ifp->if_snd);
488
489 /*
490 * Default the transmit threshold to 128 bytes.
491 */
492 sc->sc_txthresh = 128;
493
494 /*
495 * Disable MWI if the PCI layer tells us to.
496 */
497 sc->sc_DMACtrl = 0;
498 if ((pa->pa_flags & PCI_FLAGS_MWI_OKAY) == 0)
499 sc->sc_DMACtrl |= DC_MWIDisable;
500
501 /*
502 * We can support 802.1Q VLAN-sized frames.
503 */
504 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
505
506 /*
507 * Attach the interface.
508 */
509 if_attach(ifp);
510 if_deferred_start_init(ifp, NULL);
511 ether_ifattach(ifp, enaddr);
512
513 /*
514 * Make sure the interface is shutdown during reboot.
515 */
516 if (pmf_device_register1(self, NULL, NULL, ste_shutdown))
517 pmf_class_network_register(self, ifp);
518 else
519 aprint_error_dev(self, "couldn't establish power handler\n");
520
521 return;
522
523 /*
524 * Free any resources we've allocated during the failed attach
525 * attempt. Do this in reverse order and fall through.
526 */
527 fail_5:
528 for (i = 0; i < STE_NRXDESC; i++) {
529 if (sc->sc_rxsoft[i].ds_dmamap != NULL)
530 bus_dmamap_destroy(sc->sc_dmat,
531 sc->sc_rxsoft[i].ds_dmamap);
532 }
533 fail_4:
534 for (i = 0; i < STE_NTXDESC; i++) {
535 if (sc->sc_txsoft[i].ds_dmamap != NULL)
536 bus_dmamap_destroy(sc->sc_dmat,
537 sc->sc_txsoft[i].ds_dmamap);
538 }
539 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
540 fail_3:
541 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
542 fail_2:
543 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
544 sizeof(struct ste_control_data));
545 fail_1:
546 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
547 fail_0:
548 return;
549 }
550
551 /*
552 * ste_shutdown:
553 *
554 * Make sure the interface is stopped at reboot time.
555 */
556 static bool
557 ste_shutdown(device_t self, int howto)
558 {
559 struct ste_softc *sc;
560
561 sc = device_private(self);
562 ste_stop(&sc->sc_ethercom.ec_if, 1);
563
564 return true;
565 }
566
567 static void
568 ste_dmahalt_wait(struct ste_softc *sc)
569 {
570 int i;
571
572 for (i = 0; i < STE_TIMEOUT; i++) {
573 delay(2);
574 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, STE_DMACtrl) &
575 DC_DMAHaltBusy) == 0)
576 break;
577 }
578
579 if (i == STE_TIMEOUT)
580 printf("%s: DMA halt timed out\n", device_xname(sc->sc_dev));
581 }
582
583 /*
584 * ste_start: [ifnet interface function]
585 *
586 * Start packet transmission on the interface.
587 */
588 static void
589 ste_start(struct ifnet *ifp)
590 {
591 struct ste_softc *sc = ifp->if_softc;
592 struct mbuf *m0, *m;
593 struct ste_descsoft *ds;
594 struct ste_tfd *tfd;
595 bus_dmamap_t dmamap;
596 int error, olasttx, nexttx, opending, seg, totlen;
597
598 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
599 return;
600
601 /*
602 * Remember the previous number of pending transmissions
603 * and the current last descriptor in the list.
604 */
605 opending = sc->sc_txpending;
606 olasttx = sc->sc_txlast;
607
608 /*
609 * Loop through the send queue, setting up transmit descriptors
610 * until we drain the queue, or use up all available transmit
611 * descriptors.
612 */
613 while (sc->sc_txpending < STE_NTXDESC) {
614 /*
615 * Grab a packet off the queue.
616 */
617 IFQ_POLL(&ifp->if_snd, m0);
618 if (m0 == NULL)
619 break;
620 m = NULL;
621
622 /*
623 * Get the last and next available transmit descriptor.
624 */
625 nexttx = STE_NEXTTX(sc->sc_txlast);
626 tfd = &sc->sc_txdescs[nexttx];
627 ds = &sc->sc_txsoft[nexttx];
628
629 dmamap = ds->ds_dmamap;
630
631 /*
632 * Load the DMA map. If this fails, the packet either
633 * didn't fit in the alloted number of segments, or we
634 * were short on resources. In this case, we'll copy
635 * and try again.
636 */
637 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
638 BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
639 MGETHDR(m, M_DONTWAIT, MT_DATA);
640 if (m == NULL) {
641 printf("%s: unable to allocate Tx mbuf\n",
642 device_xname(sc->sc_dev));
643 break;
644 }
645 if (m0->m_pkthdr.len > MHLEN) {
646 MCLGET(m, M_DONTWAIT);
647 if ((m->m_flags & M_EXT) == 0) {
648 printf("%s: unable to allocate Tx "
649 "cluster\n",
650 device_xname(sc->sc_dev));
651 m_freem(m);
652 break;
653 }
654 }
655 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
656 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
657 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
658 m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
659 if (error) {
660 printf("%s: unable to load Tx buffer, "
661 "error = %d\n", device_xname(sc->sc_dev),
662 error);
663 break;
664 }
665 }
666
667 IFQ_DEQUEUE(&ifp->if_snd, m0);
668 if (m != NULL) {
669 m_freem(m0);
670 m0 = m;
671 }
672
673 /*
674 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
675 */
676
677 /* Sync the DMA map. */
678 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
679 BUS_DMASYNC_PREWRITE);
680
681 /* Initialize the fragment list. */
682 for (totlen = 0, seg = 0; seg < dmamap->dm_nsegs; seg++) {
683 tfd->tfd_frags[seg].frag_addr =
684 htole32(dmamap->dm_segs[seg].ds_addr);
685 tfd->tfd_frags[seg].frag_len =
686 htole32(dmamap->dm_segs[seg].ds_len);
687 totlen += dmamap->dm_segs[seg].ds_len;
688 }
689 tfd->tfd_frags[seg - 1].frag_len |= htole32(FRAG_LAST);
690
691 /* Initialize the descriptor. */
692 tfd->tfd_next = htole32(STE_CDTXADDR(sc, nexttx));
693 tfd->tfd_control = htole32(TFD_FrameId(nexttx) | (totlen & 3));
694
695 /* Sync the descriptor. */
696 STE_CDTXSYNC(sc, nexttx,
697 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
698
699 /*
700 * Store a pointer to the packet so we can free it later,
701 * and remember what txdirty will be once the packet is
702 * done.
703 */
704 ds->ds_mbuf = m0;
705
706 /* Advance the tx pointer. */
707 sc->sc_txpending++;
708 sc->sc_txlast = nexttx;
709
710 /*
711 * Pass the packet to any BPF listeners.
712 */
713 bpf_mtap(ifp, m0, BPF_D_OUT);
714 }
715
716 if (sc->sc_txpending == STE_NTXDESC) {
717 /* No more slots left; notify upper layer. */
718 ifp->if_flags |= IFF_OACTIVE;
719 }
720
721 if (sc->sc_txpending != opending) {
722 /*
723 * We enqueued packets. If the transmitter was idle,
724 * reset the txdirty pointer.
725 */
726 if (opending == 0)
727 sc->sc_txdirty = STE_NEXTTX(olasttx);
728
729 /*
730 * Cause a descriptor interrupt to happen on the
731 * last packet we enqueued, and also cause the
732 * DMA engine to wait after is has finished processing
733 * it.
734 */
735 sc->sc_txdescs[sc->sc_txlast].tfd_next = 0;
736 sc->sc_txdescs[sc->sc_txlast].tfd_control |=
737 htole32(TFD_TxDMAIndicate);
738 STE_CDTXSYNC(sc, sc->sc_txlast,
739 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
740
741 /*
742 * Link up the new chain of descriptors to the
743 * last.
744 */
745 sc->sc_txdescs[olasttx].tfd_next =
746 htole32(STE_CDTXADDR(sc, STE_NEXTTX(olasttx)));
747 STE_CDTXSYNC(sc, olasttx,
748 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
749
750 /*
751 * Kick the transmit DMA logic. Note that since we're
752 * using auto-polling, reading the Tx desc pointer will
753 * give it the nudge it needs to get going.
754 */
755 if (bus_space_read_4(sc->sc_st, sc->sc_sh,
756 STE_TxDMAListPtr) == 0) {
757 bus_space_write_4(sc->sc_st, sc->sc_sh,
758 STE_DMACtrl, DC_TxDMAHalt);
759 ste_dmahalt_wait(sc);
760 bus_space_write_4(sc->sc_st, sc->sc_sh,
761 STE_TxDMAListPtr,
762 STE_CDTXADDR(sc, STE_NEXTTX(olasttx)));
763 bus_space_write_4(sc->sc_st, sc->sc_sh,
764 STE_DMACtrl, DC_TxDMAResume);
765 }
766
767 /* Set a watchdog timer in case the chip flakes out. */
768 ifp->if_timer = 5;
769 }
770 }
771
772 /*
773 * ste_watchdog: [ifnet interface function]
774 *
775 * Watchdog timer handler.
776 */
777 static void
778 ste_watchdog(struct ifnet *ifp)
779 {
780 struct ste_softc *sc = ifp->if_softc;
781
782 printf("%s: device timeout\n", device_xname(sc->sc_dev));
783 ifp->if_oerrors++;
784
785 ste_txintr(sc);
786 ste_rxintr(sc);
787 (void) ste_init(ifp);
788
789 /* Try to get more packets going. */
790 ste_start(ifp);
791 }
792
793 /*
794 * ste_ioctl: [ifnet interface function]
795 *
796 * Handle control requests from the operator.
797 */
798 static int
799 ste_ioctl(struct ifnet *ifp, u_long cmd, void *data)
800 {
801 struct ste_softc *sc = ifp->if_softc;
802 int s, error;
803
804 s = splnet();
805
806 error = ether_ioctl(ifp, cmd, data);
807 if (error == ENETRESET) {
808 /*
809 * Multicast list has changed; set the hardware filter
810 * accordingly.
811 */
812 if (ifp->if_flags & IFF_RUNNING)
813 ste_set_filter(sc);
814 error = 0;
815 }
816
817 /* Try to get more packets going. */
818 ste_start(ifp);
819
820 splx(s);
821 return (error);
822 }
823
824 /*
825 * ste_intr:
826 *
827 * Interrupt service routine.
828 */
829 static int
830 ste_intr(void *arg)
831 {
832 struct ste_softc *sc = arg;
833 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
834 uint16_t isr;
835 uint8_t txstat;
836 int wantinit;
837
838 if ((bus_space_read_2(sc->sc_st, sc->sc_sh, STE_IntStatus) &
839 IS_InterruptStatus) == 0)
840 return (0);
841
842 for (wantinit = 0; wantinit == 0;) {
843 isr = bus_space_read_2(sc->sc_st, sc->sc_sh, STE_IntStatusAck);
844 if ((isr & sc->sc_IntEnable) == 0)
845 break;
846
847 /* Receive interrupts. */
848 if (isr & IE_RxDMAComplete)
849 ste_rxintr(sc);
850
851 /* Transmit interrupts. */
852 if (isr & (IE_TxDMAComplete | IE_TxComplete))
853 ste_txintr(sc);
854
855 /* Statistics overflow. */
856 if (isr & IE_UpdateStats)
857 ste_stats_update(sc);
858
859 /* Transmission errors. */
860 if (isr & IE_TxComplete) {
861 for (;;) {
862 txstat = bus_space_read_1(sc->sc_st, sc->sc_sh,
863 STE_TxStatus);
864 if ((txstat & TS_TxComplete) == 0)
865 break;
866 if (txstat & TS_TxUnderrun) {
867 sc->sc_txthresh += 32;
868 if (sc->sc_txthresh > 0x1ffc)
869 sc->sc_txthresh = 0x1ffc;
870 printf("%s: transmit underrun, new "
871 "threshold: %d bytes\n",
872 device_xname(sc->sc_dev),
873 sc->sc_txthresh);
874 ste_reset(sc, AC_TxReset | AC_DMA |
875 AC_FIFO | AC_Network);
876 ste_setthresh(sc);
877 bus_space_write_1(sc->sc_st, sc->sc_sh,
878 STE_TxDMAPollPeriod, 127);
879 ste_txrestart(sc,
880 bus_space_read_1(sc->sc_st,
881 sc->sc_sh, STE_TxFrameId));
882 }
883 if (txstat & TS_TxReleaseError) {
884 printf("%s: Tx FIFO release error\n",
885 device_xname(sc->sc_dev));
886 wantinit = 1;
887 }
888 if (txstat & TS_MaxCollisions) {
889 printf("%s: excessive collisions\n",
890 device_xname(sc->sc_dev));
891 wantinit = 1;
892 }
893 if (txstat & TS_TxStatusOverflow) {
894 printf("%s: status overflow\n",
895 device_xname(sc->sc_dev));
896 wantinit = 1;
897 }
898 bus_space_write_2(sc->sc_st, sc->sc_sh,
899 STE_TxStatus, 0);
900 }
901 }
902
903 /* Host interface errors. */
904 if (isr & IE_HostError) {
905 printf("%s: Host interface error\n",
906 device_xname(sc->sc_dev));
907 wantinit = 1;
908 }
909 }
910
911 if (wantinit)
912 ste_init(ifp);
913
914 bus_space_write_2(sc->sc_st, sc->sc_sh, STE_IntEnable,
915 sc->sc_IntEnable);
916
917 /* Try to get more packets going. */
918 if_schedule_deferred_start(ifp);
919
920 return (1);
921 }
922
923 /*
924 * ste_txintr:
925 *
926 * Helper; handle transmit interrupts.
927 */
928 static void
929 ste_txintr(struct ste_softc *sc)
930 {
931 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
932 struct ste_descsoft *ds;
933 uint32_t control;
934 int i;
935
936 ifp->if_flags &= ~IFF_OACTIVE;
937
938 /*
939 * Go through our Tx list and free mbufs for those
940 * frames which have been transmitted.
941 */
942 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
943 i = STE_NEXTTX(i), sc->sc_txpending--) {
944 ds = &sc->sc_txsoft[i];
945
946 STE_CDTXSYNC(sc, i,
947 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
948
949 control = le32toh(sc->sc_txdescs[i].tfd_control);
950 if ((control & TFD_TxDMAComplete) == 0)
951 break;
952
953 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
954 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
955 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
956 m_freem(ds->ds_mbuf);
957 ds->ds_mbuf = NULL;
958 }
959
960 /* Update the dirty transmit buffer pointer. */
961 sc->sc_txdirty = i;
962
963 /*
964 * If there are no more pending transmissions, cancel the watchdog
965 * timer.
966 */
967 if (sc->sc_txpending == 0)
968 ifp->if_timer = 0;
969 }
970
971 /*
972 * ste_rxintr:
973 *
974 * Helper; handle receive interrupts.
975 */
976 static void
977 ste_rxintr(struct ste_softc *sc)
978 {
979 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
980 struct ste_descsoft *ds;
981 struct mbuf *m;
982 uint32_t status;
983 int i, len;
984
985 for (i = sc->sc_rxptr;; i = STE_NEXTRX(i)) {
986 ds = &sc->sc_rxsoft[i];
987
988 STE_CDRXSYNC(sc, i,
989 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
990
991 status = le32toh(sc->sc_rxdescs[i].rfd_status);
992
993 if ((status & RFD_RxDMAComplete) == 0)
994 break;
995
996 /*
997 * If the packet had an error, simply recycle the
998 * buffer. Note, we count the error later in the
999 * periodic stats update.
1000 */
1001 if (status & RFD_RxFrameError) {
1002 STE_INIT_RXDESC(sc, i);
1003 continue;
1004 }
1005
1006 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1007 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1008
1009 /*
1010 * No errors; receive the packet. Note, we have
1011 * configured the chip to not include the CRC at
1012 * the end of the packet.
1013 */
1014 len = RFD_RxDMAFrameLen(status);
1015
1016 /*
1017 * If the packet is small enough to fit in a
1018 * single header mbuf, allocate one and copy
1019 * the data into it. This greatly reduces
1020 * memory consumption when we receive lots
1021 * of small packets.
1022 *
1023 * Otherwise, we add a new buffer to the receive
1024 * chain. If this fails, we drop the packet and
1025 * recycle the old buffer.
1026 */
1027 if (ste_copy_small != 0 && len <= (MHLEN - 2)) {
1028 MGETHDR(m, M_DONTWAIT, MT_DATA);
1029 if (m == NULL)
1030 goto dropit;
1031 m->m_data += 2;
1032 memcpy(mtod(m, void *),
1033 mtod(ds->ds_mbuf, void *), len);
1034 STE_INIT_RXDESC(sc, i);
1035 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1036 ds->ds_dmamap->dm_mapsize,
1037 BUS_DMASYNC_PREREAD);
1038 } else {
1039 m = ds->ds_mbuf;
1040 if (ste_add_rxbuf(sc, i) != 0) {
1041 dropit:
1042 ifp->if_ierrors++;
1043 STE_INIT_RXDESC(sc, i);
1044 bus_dmamap_sync(sc->sc_dmat,
1045 ds->ds_dmamap, 0,
1046 ds->ds_dmamap->dm_mapsize,
1047 BUS_DMASYNC_PREREAD);
1048 continue;
1049 }
1050 }
1051
1052 m_set_rcvif(m, ifp);
1053 m->m_pkthdr.len = m->m_len = len;
1054
1055 /* Pass it on. */
1056 if_percpuq_enqueue(ifp->if_percpuq, m);
1057 }
1058
1059 /* Update the receive pointer. */
1060 sc->sc_rxptr = i;
1061 }
1062
1063 /*
1064 * ste_tick:
1065 *
1066 * One second timer, used to tick the MII.
1067 */
1068 static void
1069 ste_tick(void *arg)
1070 {
1071 struct ste_softc *sc = arg;
1072 int s;
1073
1074 s = splnet();
1075 mii_tick(&sc->sc_mii);
1076 ste_stats_update(sc);
1077 splx(s);
1078
1079 callout_reset(&sc->sc_tick_ch, hz, ste_tick, sc);
1080 }
1081
1082 /*
1083 * ste_stats_update:
1084 *
1085 * Read the ST-201 statistics counters.
1086 */
1087 static void
1088 ste_stats_update(struct ste_softc *sc)
1089 {
1090 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1091 bus_space_tag_t st = sc->sc_st;
1092 bus_space_handle_t sh = sc->sc_sh;
1093
1094 (void) bus_space_read_2(st, sh, STE_OctetsReceivedOk0);
1095 (void) bus_space_read_2(st, sh, STE_OctetsReceivedOk1);
1096
1097 (void) bus_space_read_2(st, sh, STE_OctetsTransmittedOk0);
1098 (void) bus_space_read_2(st, sh, STE_OctetsTransmittedOk1);
1099
1100 ifp->if_opackets +=
1101 (u_int) bus_space_read_2(st, sh, STE_FramesTransmittedOK);
1102 ifp->if_ipackets +=
1103 (u_int) bus_space_read_2(st, sh, STE_FramesReceivedOK);
1104
1105 ifp->if_collisions +=
1106 (u_int) bus_space_read_1(st, sh, STE_LateCollisions) +
1107 (u_int) bus_space_read_1(st, sh, STE_MultipleColFrames) +
1108 (u_int) bus_space_read_1(st, sh, STE_SingleColFrames);
1109
1110 (void) bus_space_read_1(st, sh, STE_FramesWDeferredXmt);
1111
1112 ifp->if_ierrors +=
1113 (u_int) bus_space_read_1(st, sh, STE_FramesLostRxErrors);
1114
1115 ifp->if_oerrors +=
1116 (u_int) bus_space_read_1(st, sh, STE_FramesWExDeferral) +
1117 (u_int) bus_space_read_1(st, sh, STE_FramesXbortXSColls) +
1118 bus_space_read_1(st, sh, STE_CarrierSenseErrors);
1119
1120 (void) bus_space_read_1(st, sh, STE_BcstFramesXmtdOk);
1121 (void) bus_space_read_1(st, sh, STE_BcstFramesRcvdOk);
1122 (void) bus_space_read_1(st, sh, STE_McstFramesXmtdOk);
1123 (void) bus_space_read_1(st, sh, STE_McstFramesRcvdOk);
1124 }
1125
1126 /*
1127 * ste_reset:
1128 *
1129 * Perform a soft reset on the ST-201.
1130 */
1131 static void
1132 ste_reset(struct ste_softc *sc, uint32_t rstbits)
1133 {
1134 uint32_t ac;
1135 int i;
1136
1137 ac = bus_space_read_4(sc->sc_st, sc->sc_sh, STE_AsicCtrl);
1138
1139 bus_space_write_4(sc->sc_st, sc->sc_sh, STE_AsicCtrl, ac | rstbits);
1140
1141 delay(50000);
1142
1143 for (i = 0; i < STE_TIMEOUT; i++) {
1144 delay(1000);
1145 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, STE_AsicCtrl) &
1146 AC_ResetBusy) == 0)
1147 break;
1148 }
1149
1150 if (i == STE_TIMEOUT)
1151 printf("%s: reset failed to complete\n",
1152 device_xname(sc->sc_dev));
1153
1154 delay(1000);
1155 }
1156
1157 /*
1158 * ste_setthresh:
1159 *
1160 * set the various transmit threshold registers
1161 */
1162 static void
1163 ste_setthresh(struct ste_softc *sc)
1164 {
1165 /* set the TX threhold */
1166 bus_space_write_2(sc->sc_st, sc->sc_sh,
1167 STE_TxStartThresh, sc->sc_txthresh);
1168 /* Urgent threshold: set to sc_txthresh / 2 */
1169 bus_space_write_2(sc->sc_st, sc->sc_sh, STE_TxDMAUrgentThresh,
1170 sc->sc_txthresh >> 6);
1171 /* Burst threshold: use default value (256 bytes) */
1172 }
1173
1174 /*
1175 * restart TX at the given frame ID in the transmitter ring
1176 */
1177 static void
1178 ste_txrestart(struct ste_softc *sc, uint8_t id)
1179 {
1180 uint32_t control;
1181
1182 STE_CDTXSYNC(sc, id, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1183 control = le32toh(sc->sc_txdescs[id].tfd_control);
1184 control &= ~TFD_TxDMAComplete;
1185 sc->sc_txdescs[id].tfd_control = htole32(control);
1186 STE_CDTXSYNC(sc, id, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1187
1188 bus_space_write_4(sc->sc_st, sc->sc_sh, STE_TxDMAListPtr, 0);
1189 bus_space_write_2(sc->sc_st, sc->sc_sh, STE_MacCtrl1, MC1_TxEnable);
1190 bus_space_write_4(sc->sc_st, sc->sc_sh, STE_DMACtrl, DC_TxDMAHalt);
1191 ste_dmahalt_wait(sc);
1192 bus_space_write_4(sc->sc_st, sc->sc_sh, STE_TxDMAListPtr,
1193 STE_CDTXADDR(sc, id));
1194 bus_space_write_4(sc->sc_st, sc->sc_sh, STE_DMACtrl, DC_TxDMAResume);
1195 }
1196
1197 /*
1198 * ste_init: [ ifnet interface function ]
1199 *
1200 * Initialize the interface. Must be called at splnet().
1201 */
1202 static int
1203 ste_init(struct ifnet *ifp)
1204 {
1205 struct ste_softc *sc = ifp->if_softc;
1206 bus_space_tag_t st = sc->sc_st;
1207 bus_space_handle_t sh = sc->sc_sh;
1208 struct ste_descsoft *ds;
1209 int i, error = 0;
1210
1211 /*
1212 * Cancel any pending I/O.
1213 */
1214 ste_stop(ifp, 0);
1215
1216 /*
1217 * Reset the chip to a known state.
1218 */
1219 ste_reset(sc, AC_GlobalReset | AC_RxReset | AC_TxReset | AC_DMA |
1220 AC_FIFO | AC_Network | AC_Host | AC_AutoInit | AC_RstOut);
1221
1222 /*
1223 * Initialize the transmit descriptor ring.
1224 */
1225 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1226 sc->sc_txpending = 0;
1227 sc->sc_txdirty = 0;
1228 sc->sc_txlast = STE_NTXDESC - 1;
1229
1230 /*
1231 * Initialize the receive descriptor and receive job
1232 * descriptor rings.
1233 */
1234 for (i = 0; i < STE_NRXDESC; i++) {
1235 ds = &sc->sc_rxsoft[i];
1236 if (ds->ds_mbuf == NULL) {
1237 if ((error = ste_add_rxbuf(sc, i)) != 0) {
1238 printf("%s: unable to allocate or map rx "
1239 "buffer %d, error = %d\n",
1240 device_xname(sc->sc_dev), i, error);
1241 /*
1242 * XXX Should attempt to run with fewer receive
1243 * XXX buffers instead of just failing.
1244 */
1245 ste_rxdrain(sc);
1246 goto out;
1247 }
1248 } else
1249 STE_INIT_RXDESC(sc, i);
1250 }
1251 sc->sc_rxptr = 0;
1252
1253 /* Set the station address. */
1254 for (i = 0; i < ETHER_ADDR_LEN; i++)
1255 bus_space_write_1(st, sh, STE_StationAddress0 + 1,
1256 CLLADDR(ifp->if_sadl)[i]);
1257
1258 /* Set up the receive filter. */
1259 ste_set_filter(sc);
1260
1261 /*
1262 * Give the receive ring to the chip.
1263 */
1264 bus_space_write_4(st, sh, STE_RxDMAListPtr,
1265 STE_CDRXADDR(sc, sc->sc_rxptr));
1266
1267 /*
1268 * We defer giving the transmit ring to the chip until we
1269 * transmit the first packet.
1270 */
1271
1272 /*
1273 * Initialize the Tx auto-poll period. It's OK to make this number
1274 * large (127 is the max) -- we explicitly kick the transmit engine
1275 * when there's actually a packet. We are using auto-polling only
1276 * to make the interface to the transmit engine not suck.
1277 */
1278 bus_space_write_1(sc->sc_st, sc->sc_sh, STE_TxDMAPollPeriod, 127);
1279
1280 /* ..and the Rx auto-poll period. */
1281 bus_space_write_1(st, sh, STE_RxDMAPollPeriod, 64);
1282
1283 /* Initialize the Tx start threshold. */
1284 ste_setthresh(sc);
1285
1286 /* Set the FIFO release threshold to 512 bytes. */
1287 bus_space_write_1(st, sh, STE_TxReleaseThresh, 512 >> 4);
1288
1289 /* Set maximum packet size for VLAN. */
1290 if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU)
1291 bus_space_write_2(st, sh, STE_MaxFrameSize, ETHER_MAX_LEN + 4);
1292 else
1293 bus_space_write_2(st, sh, STE_MaxFrameSize, ETHER_MAX_LEN);
1294
1295 /*
1296 * Initialize the interrupt mask.
1297 */
1298 sc->sc_IntEnable = IE_HostError | IE_TxComplete | IE_UpdateStats |
1299 IE_TxDMAComplete | IE_RxDMAComplete;
1300
1301 bus_space_write_2(st, sh, STE_IntStatus, 0xffff);
1302 bus_space_write_2(st, sh, STE_IntEnable, sc->sc_IntEnable);
1303
1304 /*
1305 * Start the receive DMA engine.
1306 */
1307 bus_space_write_4(st, sh, STE_DMACtrl, sc->sc_DMACtrl | DC_RxDMAResume);
1308
1309 /*
1310 * Initialize MacCtrl0 -- do it before setting the media,
1311 * as setting the media will actually program the register.
1312 */
1313 sc->sc_MacCtrl0 = MC0_IFSSelect(0);
1314 if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU)
1315 sc->sc_MacCtrl0 |= MC0_RcvLargeFrames;
1316
1317 /*
1318 * Set the current media.
1319 */
1320 if ((error = ether_mediachange(ifp)) != 0)
1321 goto out;
1322
1323 /*
1324 * Start the MAC.
1325 */
1326 bus_space_write_2(st, sh, STE_MacCtrl1,
1327 MC1_StatisticsEnable | MC1_TxEnable | MC1_RxEnable);
1328
1329 /*
1330 * Start the one second MII clock.
1331 */
1332 callout_reset(&sc->sc_tick_ch, hz, ste_tick, sc);
1333
1334 /*
1335 * ...all done!
1336 */
1337 ifp->if_flags |= IFF_RUNNING;
1338 ifp->if_flags &= ~IFF_OACTIVE;
1339
1340 out:
1341 if (error)
1342 printf("%s: interface not running\n", device_xname(sc->sc_dev));
1343 return (error);
1344 }
1345
1346 /*
1347 * ste_drain:
1348 *
1349 * Drain the receive queue.
1350 */
1351 static void
1352 ste_rxdrain(struct ste_softc *sc)
1353 {
1354 struct ste_descsoft *ds;
1355 int i;
1356
1357 for (i = 0; i < STE_NRXDESC; i++) {
1358 ds = &sc->sc_rxsoft[i];
1359 if (ds->ds_mbuf != NULL) {
1360 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1361 m_freem(ds->ds_mbuf);
1362 ds->ds_mbuf = NULL;
1363 }
1364 }
1365 }
1366
1367 /*
1368 * ste_stop: [ ifnet interface function ]
1369 *
1370 * Stop transmission on the interface.
1371 */
1372 static void
1373 ste_stop(struct ifnet *ifp, int disable)
1374 {
1375 struct ste_softc *sc = ifp->if_softc;
1376 struct ste_descsoft *ds;
1377 int i;
1378
1379 /*
1380 * Stop the one second clock.
1381 */
1382 callout_stop(&sc->sc_tick_ch);
1383
1384 /* Down the MII. */
1385 mii_down(&sc->sc_mii);
1386
1387 /*
1388 * Disable interrupts.
1389 */
1390 bus_space_write_2(sc->sc_st, sc->sc_sh, STE_IntEnable, 0);
1391
1392 /*
1393 * Stop receiver, transmitter, and stats update.
1394 */
1395 bus_space_write_2(sc->sc_st, sc->sc_sh, STE_MacCtrl1,
1396 MC1_StatisticsDisable | MC1_TxDisable | MC1_RxDisable);
1397
1398 /*
1399 * Stop the transmit and receive DMA.
1400 */
1401 bus_space_write_4(sc->sc_st, sc->sc_sh, STE_DMACtrl,
1402 DC_RxDMAHalt | DC_TxDMAHalt);
1403 ste_dmahalt_wait(sc);
1404
1405 /*
1406 * Release any queued transmit buffers.
1407 */
1408 for (i = 0; i < STE_NTXDESC; i++) {
1409 ds = &sc->sc_txsoft[i];
1410 if (ds->ds_mbuf != NULL) {
1411 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1412 m_freem(ds->ds_mbuf);
1413 ds->ds_mbuf = NULL;
1414 }
1415 }
1416
1417 /*
1418 * Mark the interface down and cancel the watchdog timer.
1419 */
1420 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1421 ifp->if_timer = 0;
1422
1423 if (disable)
1424 ste_rxdrain(sc);
1425 }
1426
1427 static int
1428 ste_eeprom_wait(struct ste_softc *sc)
1429 {
1430 int i;
1431
1432 for (i = 0; i < STE_TIMEOUT; i++) {
1433 delay(1000);
1434 if ((bus_space_read_2(sc->sc_st, sc->sc_sh, STE_EepromCtrl) &
1435 EC_EepromBusy) == 0)
1436 return (0);
1437 }
1438 return (1);
1439 }
1440
1441 /*
1442 * ste_read_eeprom:
1443 *
1444 * Read data from the serial EEPROM.
1445 */
1446 static void
1447 ste_read_eeprom(struct ste_softc *sc, int offset, uint16_t *data)
1448 {
1449
1450 if (ste_eeprom_wait(sc))
1451 printf("%s: EEPROM failed to come ready\n",
1452 device_xname(sc->sc_dev));
1453
1454 bus_space_write_2(sc->sc_st, sc->sc_sh, STE_EepromCtrl,
1455 EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_R));
1456 if (ste_eeprom_wait(sc))
1457 printf("%s: EEPROM read timed out\n",
1458 device_xname(sc->sc_dev));
1459 *data = bus_space_read_2(sc->sc_st, sc->sc_sh, STE_EepromData);
1460 }
1461
1462 /*
1463 * ste_add_rxbuf:
1464 *
1465 * Add a receive buffer to the indicated descriptor.
1466 */
1467 static int
1468 ste_add_rxbuf(struct ste_softc *sc, int idx)
1469 {
1470 struct ste_descsoft *ds = &sc->sc_rxsoft[idx];
1471 struct mbuf *m;
1472 int error;
1473
1474 MGETHDR(m, M_DONTWAIT, MT_DATA);
1475 if (m == NULL)
1476 return (ENOBUFS);
1477
1478 MCLGET(m, M_DONTWAIT);
1479 if ((m->m_flags & M_EXT) == 0) {
1480 m_freem(m);
1481 return (ENOBUFS);
1482 }
1483
1484 if (ds->ds_mbuf != NULL)
1485 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1486
1487 ds->ds_mbuf = m;
1488
1489 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1490 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1491 BUS_DMA_READ | BUS_DMA_NOWAIT);
1492 if (error) {
1493 printf("%s: can't load rx DMA map %d, error = %d\n",
1494 device_xname(sc->sc_dev), idx, error);
1495 panic("ste_add_rxbuf"); /* XXX */
1496 }
1497
1498 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1499 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1500
1501 STE_INIT_RXDESC(sc, idx);
1502
1503 return (0);
1504 }
1505
1506 /*
1507 * ste_set_filter:
1508 *
1509 * Set up the receive filter.
1510 */
1511 static void
1512 ste_set_filter(struct ste_softc *sc)
1513 {
1514 struct ethercom *ec = &sc->sc_ethercom;
1515 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1516 struct ether_multi *enm;
1517 struct ether_multistep step;
1518 uint32_t crc;
1519 uint16_t mchash[4];
1520
1521 sc->sc_ReceiveMode = RM_ReceiveUnicast;
1522 if (ifp->if_flags & IFF_BROADCAST)
1523 sc->sc_ReceiveMode |= RM_ReceiveBroadcast;
1524
1525 if (ifp->if_flags & IFF_PROMISC) {
1526 sc->sc_ReceiveMode |= RM_ReceiveAllFrames;
1527 goto allmulti;
1528 }
1529
1530 /*
1531 * Set up the multicast address filter by passing all multicast
1532 * addresses through a CRC generator, and then using the low-order
1533 * 6 bits as an index into the 64 bit multicast hash table. The
1534 * high order bits select the register, while the rest of the bits
1535 * select the bit within the register.
1536 */
1537
1538 memset(mchash, 0, sizeof(mchash));
1539
1540 ETHER_LOCK(ec);
1541 ETHER_FIRST_MULTI(step, ec, enm);
1542 if (enm == NULL) {
1543 ETHER_UNLOCK(ec);
1544 goto done;
1545 }
1546
1547 while (enm != NULL) {
1548 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1549 /*
1550 * We must listen to a range of multicast addresses.
1551 * For now, just accept all multicasts, rather than
1552 * trying to set only those filter bits needed to match
1553 * the range. (At this time, the only use of address
1554 * ranges is for IP multicast routing, for which the
1555 * range is big enough to require all bits set.)
1556 */
1557 ETHER_UNLOCK(ec);
1558 goto allmulti;
1559 }
1560
1561 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1562
1563 /* Just want the 6 least significant bits. */
1564 crc &= 0x3f;
1565
1566 /* Set the corresponding bit in the hash table. */
1567 mchash[crc >> 4] |= 1 << (crc & 0xf);
1568
1569 ETHER_NEXT_MULTI(step, enm);
1570 }
1571 ETHER_UNLOCK(ec);
1572
1573 sc->sc_ReceiveMode |= RM_ReceiveMulticastHash;
1574
1575 ifp->if_flags &= ~IFF_ALLMULTI;
1576 goto done;
1577
1578 allmulti:
1579 ifp->if_flags |= IFF_ALLMULTI;
1580 sc->sc_ReceiveMode |= RM_ReceiveMulticast;
1581
1582 done:
1583 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
1584 /*
1585 * Program the multicast hash table.
1586 */
1587 bus_space_write_2(sc->sc_st, sc->sc_sh, STE_HashTable0,
1588 mchash[0]);
1589 bus_space_write_2(sc->sc_st, sc->sc_sh, STE_HashTable1,
1590 mchash[1]);
1591 bus_space_write_2(sc->sc_st, sc->sc_sh, STE_HashTable2,
1592 mchash[2]);
1593 bus_space_write_2(sc->sc_st, sc->sc_sh, STE_HashTable3,
1594 mchash[3]);
1595 }
1596
1597 bus_space_write_1(sc->sc_st, sc->sc_sh, STE_ReceiveMode,
1598 sc->sc_ReceiveMode);
1599 }
1600
1601 /*
1602 * ste_mii_readreg: [mii interface function]
1603 *
1604 * Read a PHY register on the MII of the ST-201.
1605 */
1606 static int
1607 ste_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
1608 {
1609
1610 return mii_bitbang_readreg(self, &ste_mii_bitbang_ops, phy, reg, val);
1611 }
1612
1613 /*
1614 * ste_mii_writereg: [mii interface function]
1615 *
1616 * Write a PHY register on the MII of the ST-201.
1617 */
1618 static int
1619 ste_mii_writereg(device_t self, int phy, int reg, uint16_t val)
1620 {
1621
1622 return mii_bitbang_writereg(self, &ste_mii_bitbang_ops, phy, reg, val);
1623 }
1624
1625 /*
1626 * ste_mii_statchg: [mii interface function]
1627 *
1628 * Callback from MII layer when media changes.
1629 */
1630 static void
1631 ste_mii_statchg(struct ifnet *ifp)
1632 {
1633 struct ste_softc *sc = ifp->if_softc;
1634
1635 if (sc->sc_mii.mii_media_active & IFM_FDX)
1636 sc->sc_MacCtrl0 |= MC0_FullDuplexEnable;
1637 else
1638 sc->sc_MacCtrl0 &= ~MC0_FullDuplexEnable;
1639
1640 /* XXX 802.1x flow-control? */
1641
1642 bus_space_write_2(sc->sc_st, sc->sc_sh, STE_MacCtrl0, sc->sc_MacCtrl0);
1643 }
1644
1645 /*
1646 * ste_mii_bitbang_read: [mii bit-bang interface function]
1647 *
1648 * Read the MII serial port for the MII bit-bang module.
1649 */
1650 static uint32_t
1651 ste_mii_bitbang_read(device_t self)
1652 {
1653 struct ste_softc *sc = device_private(self);
1654
1655 return (bus_space_read_1(sc->sc_st, sc->sc_sh, STE_PhyCtrl));
1656 }
1657
1658 /*
1659 * ste_mii_bitbang_write: [mii big-bang interface function]
1660 *
1661 * Write the MII serial port for the MII bit-bang module.
1662 */
1663 static void
1664 ste_mii_bitbang_write(device_t self, uint32_t val)
1665 {
1666 struct ste_softc *sc = device_private(self);
1667
1668 bus_space_write_1(sc->sc_st, sc->sc_sh, STE_PhyCtrl, val);
1669 }
1670