if_stge.c revision 1.74 1 /* $NetBSD: if_stge.c,v 1.74 2019/12/26 15:26:58 msaitoh Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Device driver for the Sundance Tech. TC9021 10/100/1000
34 * Ethernet controller.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: if_stge.c,v 1.74 2019/12/26 15:26:58 msaitoh Exp $");
39
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/callout.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/ioctl.h>
49 #include <sys/errno.h>
50 #include <sys/device.h>
51 #include <sys/queue.h>
52
53 #include <net/if.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_ether.h>
57
58 #include <net/bpf.h>
59
60 #include <sys/bus.h>
61 #include <sys/intr.h>
62
63 #include <dev/mii/mii.h>
64 #include <dev/mii/miivar.h>
65 #include <dev/mii/mii_bitbang.h>
66
67 #include <dev/pci/pcireg.h>
68 #include <dev/pci/pcivar.h>
69 #include <dev/pci/pcidevs.h>
70
71 #include <dev/pci/if_stgereg.h>
72
73 #include <prop/proplib.h>
74
75 /* #define STGE_CU_BUG 1 */
76 #define STGE_VLAN_UNTAG 1
77 /* #define STGE_VLAN_CFI 1 */
78
79 #define STGE_CDOFF(x) offsetof(struct stge_control_data, x)
80 #define STGE_CDTXOFF(x) STGE_CDOFF(scd_txdescs[(x)])
81 #define STGE_CDRXOFF(x) STGE_CDOFF(scd_rxdescs[(x)])
82
83 #define STGE_RXCHAIN_RESET(sc) \
84 do { \
85 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
86 *(sc)->sc_rxtailp = NULL; \
87 (sc)->sc_rxlen = 0; \
88 } while (/*CONSTCOND*/0)
89
90 #define STGE_RXCHAIN_LINK(sc, m) \
91 do { \
92 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
93 (sc)->sc_rxtailp = &(m)->m_next; \
94 } while (/*CONSTCOND*/0)
95
96 #ifdef STGE_EVENT_COUNTERS
97 #define STGE_EVCNT_INCR(ev) (ev)->ev_count++
98 #else
99 #define STGE_EVCNT_INCR(ev) /* nothing */
100 #endif
101
102 #define STGE_CDTXADDR(sc, x) ((sc)->sc_cddma + STGE_CDTXOFF((x)))
103 #define STGE_CDRXADDR(sc, x) ((sc)->sc_cddma + STGE_CDRXOFF((x)))
104
105 #define STGE_CDTXSYNC(sc, x, ops) \
106 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
107 STGE_CDTXOFF((x)), sizeof(struct stge_tfd), (ops))
108
109 #define STGE_CDRXSYNC(sc, x, ops) \
110 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
111 STGE_CDRXOFF((x)), sizeof(struct stge_rfd), (ops))
112
113 #define STGE_INIT_RXDESC(sc, x) \
114 do { \
115 struct stge_descsoft *__ds = &(sc)->sc_rxsoft[(x)]; \
116 struct stge_rfd *__rfd = &(sc)->sc_rxdescs[(x)]; \
117 \
118 /* \
119 * Note: We scoot the packet forward 2 bytes in the buffer \
120 * so that the payload after the Ethernet header is aligned \
121 * to a 4-byte boundary. \
122 */ \
123 __rfd->rfd_frag.frag_word0 = \
124 htole64(FRAG_ADDR(__ds->ds_dmamap->dm_segs[0].ds_addr + 2) |\
125 FRAG_LEN(MCLBYTES - 2)); \
126 __rfd->rfd_next = \
127 htole64((uint64_t)STGE_CDRXADDR((sc), STGE_NEXTRX((x)))); \
128 __rfd->rfd_status = 0; \
129 STGE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \
130 } while (/*CONSTCOND*/0)
131
132 #define STGE_TIMEOUT 1000
133
134 static void stge_start(struct ifnet *);
135 static void stge_watchdog(struct ifnet *);
136 static int stge_ioctl(struct ifnet *, u_long, void *);
137 static int stge_init(struct ifnet *);
138 static void stge_stop(struct ifnet *, int);
139
140 static bool stge_shutdown(device_t, int);
141
142 static void stge_reset(struct stge_softc *);
143 static void stge_rxdrain(struct stge_softc *);
144 static int stge_add_rxbuf(struct stge_softc *, int);
145 static void stge_read_eeprom(struct stge_softc *, int, uint16_t *);
146 static void stge_tick(void *);
147
148 static void stge_stats_update(struct stge_softc *);
149
150 static void stge_set_filter(struct stge_softc *);
151
152 static int stge_intr(void *);
153 static void stge_txintr(struct stge_softc *);
154 static void stge_rxintr(struct stge_softc *);
155
156 static int stge_mii_readreg(device_t, int, int, uint16_t *);
157 static int stge_mii_writereg(device_t, int, int, uint16_t);
158 static void stge_mii_statchg(struct ifnet *);
159
160 static int stge_match(device_t, cfdata_t, void *);
161 static void stge_attach(device_t, device_t, void *);
162
163 int stge_copy_small = 0;
164
165 CFATTACH_DECL_NEW(stge, sizeof(struct stge_softc),
166 stge_match, stge_attach, NULL, NULL);
167
168 static uint32_t stge_mii_bitbang_read(device_t);
169 static void stge_mii_bitbang_write(device_t, uint32_t);
170
171 static const struct mii_bitbang_ops stge_mii_bitbang_ops = {
172 stge_mii_bitbang_read,
173 stge_mii_bitbang_write,
174 {
175 PC_MgmtData, /* MII_BIT_MDO */
176 PC_MgmtData, /* MII_BIT_MDI */
177 PC_MgmtClk, /* MII_BIT_MDC */
178 PC_MgmtDir, /* MII_BIT_DIR_HOST_PHY */
179 0, /* MII_BIT_DIR_PHY_HOST */
180 }
181 };
182
183 /*
184 * Devices supported by this driver.
185 */
186 static const struct stge_product {
187 pci_vendor_id_t stge_vendor;
188 pci_product_id_t stge_product;
189 const char *stge_name;
190 } stge_products[] = {
191 { PCI_VENDOR_SUNDANCETI, PCI_PRODUCT_SUNDANCETI_ST1023,
192 "Sundance ST-1023 Gigabit Ethernet" },
193
194 { PCI_VENDOR_SUNDANCETI, PCI_PRODUCT_SUNDANCETI_ST2021,
195 "Sundance ST-2021 Gigabit Ethernet" },
196
197 { PCI_VENDOR_TAMARACK, PCI_PRODUCT_TAMARACK_TC9021,
198 "Tamarack TC9021 Gigabit Ethernet" },
199
200 { PCI_VENDOR_TAMARACK, PCI_PRODUCT_TAMARACK_TC9021_ALT,
201 "Tamarack TC9021 Gigabit Ethernet" },
202
203 /*
204 * The Sundance sample boards use the Sundance vendor ID,
205 * but the Tamarack product ID.
206 */
207 { PCI_VENDOR_SUNDANCETI, PCI_PRODUCT_TAMARACK_TC9021,
208 "Sundance TC9021 Gigabit Ethernet" },
209
210 { PCI_VENDOR_SUNDANCETI, PCI_PRODUCT_TAMARACK_TC9021_ALT,
211 "Sundance TC9021 Gigabit Ethernet" },
212
213 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DL4000,
214 "D-Link DL-4000 Gigabit Ethernet" },
215
216 { PCI_VENDOR_ANTARES, PCI_PRODUCT_ANTARES_TC9021,
217 "Antares Gigabit Ethernet" },
218
219 { 0, 0,
220 NULL },
221 };
222
223 static const struct stge_product *
224 stge_lookup(const struct pci_attach_args *pa)
225 {
226 const struct stge_product *sp;
227
228 for (sp = stge_products; sp->stge_name != NULL; sp++) {
229 if (PCI_VENDOR(pa->pa_id) == sp->stge_vendor &&
230 PCI_PRODUCT(pa->pa_id) == sp->stge_product)
231 return (sp);
232 }
233 return (NULL);
234 }
235
236 static int
237 stge_match(device_t parent, cfdata_t cf, void *aux)
238 {
239 struct pci_attach_args *pa = aux;
240
241 if (stge_lookup(pa) != NULL)
242 return (1);
243
244 return (0);
245 }
246
247 static void
248 stge_attach(device_t parent, device_t self, void *aux)
249 {
250 struct stge_softc *sc = device_private(self);
251 struct pci_attach_args *pa = aux;
252 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
253 struct mii_data * const mii = &sc->sc_mii;
254 pci_chipset_tag_t pc = pa->pa_pc;
255 pci_intr_handle_t ih;
256 const char *intrstr = NULL;
257 bus_space_tag_t iot, memt;
258 bus_space_handle_t ioh, memh;
259 bus_dma_segment_t seg;
260 prop_data_t data;
261 int ioh_valid, memh_valid;
262 int i, rseg, error;
263 const struct stge_product *sp;
264 uint8_t enaddr[ETHER_ADDR_LEN];
265 char intrbuf[PCI_INTRSTR_LEN];
266
267 sc->sc_dev = self;
268 callout_init(&sc->sc_tick_ch, 0);
269
270 sp = stge_lookup(pa);
271 if (sp == NULL) {
272 printf("\n");
273 panic("ste_attach: impossible");
274 }
275
276 sc->sc_rev = PCI_REVISION(pa->pa_class);
277
278 pci_aprint_devinfo_fancy(pa, NULL, sp->stge_name, 1);
279
280 /*
281 * Map the device.
282 */
283 ioh_valid = (pci_mapreg_map(pa, STGE_PCI_IOBA,
284 PCI_MAPREG_TYPE_IO, 0,
285 &iot, &ioh, NULL, NULL) == 0);
286 memh_valid = (pci_mapreg_map(pa, STGE_PCI_MMBA,
287 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
288 &memt, &memh, NULL, NULL) == 0);
289
290 if (memh_valid) {
291 sc->sc_st = memt;
292 sc->sc_sh = memh;
293 } else if (ioh_valid) {
294 sc->sc_st = iot;
295 sc->sc_sh = ioh;
296 } else {
297 aprint_error_dev(self, "unable to map device registers\n");
298 return;
299 }
300
301 sc->sc_dmat = pa->pa_dmat;
302
303 /* Enable bus mastering. */
304 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
305 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
306 PCI_COMMAND_MASTER_ENABLE);
307
308 /* power up chip */
309 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL)) &&
310 error != EOPNOTSUPP) {
311 aprint_error_dev(self, "cannot activate %d\n", error);
312 return;
313 }
314 /*
315 * Map and establish our interrupt.
316 */
317 if (pci_intr_map(pa, &ih)) {
318 aprint_error_dev(self, "unable to map interrupt\n");
319 return;
320 }
321 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
322 sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, stge_intr, sc,
323 device_xname(self));
324 if (sc->sc_ih == NULL) {
325 aprint_error_dev(self, "unable to establish interrupt");
326 if (intrstr != NULL)
327 aprint_error(" at %s", intrstr);
328 aprint_error("\n");
329 return;
330 }
331 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
332
333 /*
334 * Allocate the control data structures, and create and load the
335 * DMA map for it.
336 */
337 if ((error = bus_dmamem_alloc(sc->sc_dmat,
338 sizeof(struct stge_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
339 0)) != 0) {
340 aprint_error_dev(self,
341 "unable to allocate control data, error = %d\n", error);
342 goto fail_0;
343 }
344
345 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
346 sizeof(struct stge_control_data), (void **)&sc->sc_control_data,
347 BUS_DMA_COHERENT)) != 0) {
348 aprint_error_dev(self,
349 "unable to map control data, error = %d\n", error);
350 goto fail_1;
351 }
352
353 if ((error = bus_dmamap_create(sc->sc_dmat,
354 sizeof(struct stge_control_data), 1,
355 sizeof(struct stge_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
356 aprint_error_dev(self,
357 "unable to create control data DMA map, error = %d\n",
358 error);
359 goto fail_2;
360 }
361
362 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
363 sc->sc_control_data, sizeof(struct stge_control_data), NULL,
364 0)) != 0) {
365 aprint_error_dev(self,
366 "unable to load control data DMA map, error = %d\n",
367 error);
368 goto fail_3;
369 }
370
371 /*
372 * Create the transmit buffer DMA maps. Note that rev B.3
373 * and earlier seem to have a bug regarding multi-fragment
374 * packets. We need to limit the number of Tx segments on
375 * such chips to 1.
376 */
377 for (i = 0; i < STGE_NTXDESC; i++) {
378 if ((error = bus_dmamap_create(sc->sc_dmat,
379 ETHER_MAX_LEN_JUMBO, STGE_NTXFRAGS, MCLBYTES, 0, 0,
380 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
381 aprint_error_dev(self,
382 "unable to create tx DMA map %d, error = %d\n",
383 i, error);
384 goto fail_4;
385 }
386 }
387
388 /*
389 * Create the receive buffer DMA maps.
390 */
391 for (i = 0; i < STGE_NRXDESC; i++) {
392 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
393 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
394 aprint_error_dev(self,
395 "unable to create rx DMA map %d, error = %d\n",
396 i, error);
397 goto fail_5;
398 }
399 sc->sc_rxsoft[i].ds_mbuf = NULL;
400 }
401
402 /*
403 * Determine if we're copper or fiber. It affects how we
404 * reset the card.
405 */
406 if (bus_space_read_4(sc->sc_st, sc->sc_sh, STGE_AsicCtrl) &
407 AC_PhyMedia)
408 sc->sc_usefiber = 1;
409 else
410 sc->sc_usefiber = 0;
411
412 /*
413 * Reset the chip to a known state.
414 */
415 stge_reset(sc);
416
417 /*
418 * Reading the station address from the EEPROM doesn't seem
419 * to work, at least on my sample boards. Instead, since
420 * the reset sequence does AutoInit, read it from the station
421 * address registers. For Sundance 1023 you can only read it
422 * from EEPROM.
423 */
424 if (sp->stge_product != PCI_PRODUCT_SUNDANCETI_ST1023) {
425 enaddr[0] = bus_space_read_2(sc->sc_st, sc->sc_sh,
426 STGE_StationAddress0) & 0xff;
427 enaddr[1] = bus_space_read_2(sc->sc_st, sc->sc_sh,
428 STGE_StationAddress0) >> 8;
429 enaddr[2] = bus_space_read_2(sc->sc_st, sc->sc_sh,
430 STGE_StationAddress1) & 0xff;
431 enaddr[3] = bus_space_read_2(sc->sc_st, sc->sc_sh,
432 STGE_StationAddress1) >> 8;
433 enaddr[4] = bus_space_read_2(sc->sc_st, sc->sc_sh,
434 STGE_StationAddress2) & 0xff;
435 enaddr[5] = bus_space_read_2(sc->sc_st, sc->sc_sh,
436 STGE_StationAddress2) >> 8;
437 sc->sc_stge1023 = 0;
438 } else {
439 data = prop_dictionary_get(device_properties(self),
440 "mac-address");
441 if (data != NULL) {
442 /*
443 * Try to get the station address from device
444 * properties first, in case the EEPROM is missing.
445 */
446 KASSERT(prop_object_type(data) == PROP_TYPE_DATA);
447 KASSERT(prop_data_size(data) == ETHER_ADDR_LEN);
448 (void)memcpy(enaddr, prop_data_data_nocopy(data),
449 ETHER_ADDR_LEN);
450 } else {
451 uint16_t myaddr[ETHER_ADDR_LEN / 2];
452 for (i = 0; i <ETHER_ADDR_LEN / 2; i++) {
453 stge_read_eeprom(sc,
454 STGE_EEPROM_StationAddress0 + i,
455 &myaddr[i]);
456 myaddr[i] = le16toh(myaddr[i]);
457 }
458 (void)memcpy(enaddr, myaddr, sizeof(enaddr));
459 }
460 sc->sc_stge1023 = 1;
461 }
462
463 aprint_normal_dev(self, "Ethernet address %s\n",
464 ether_sprintf(enaddr));
465
466 /*
467 * Read some important bits from the PhyCtrl register.
468 */
469 sc->sc_PhyCtrl = bus_space_read_1(sc->sc_st, sc->sc_sh,
470 STGE_PhyCtrl) & (PC_PhyDuplexPolarity | PC_PhyLnkPolarity);
471
472 /*
473 * Initialize our media structures and probe the MII.
474 */
475 mii->mii_ifp = ifp;
476 mii->mii_readreg = stge_mii_readreg;
477 mii->mii_writereg = stge_mii_writereg;
478 mii->mii_statchg = stge_mii_statchg;
479 sc->sc_ethercom.ec_mii = mii;
480 ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange,
481 ether_mediastatus);
482 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY,
483 MII_OFFSET_ANY, MIIF_DOPAUSE);
484 if (LIST_FIRST(&mii->mii_phys) == NULL) {
485 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
486 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
487 } else
488 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
489
490 ifp = &sc->sc_ethercom.ec_if;
491 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
492 ifp->if_softc = sc;
493 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
494 ifp->if_ioctl = stge_ioctl;
495 ifp->if_start = stge_start;
496 ifp->if_watchdog = stge_watchdog;
497 ifp->if_init = stge_init;
498 ifp->if_stop = stge_stop;
499 IFQ_SET_READY(&ifp->if_snd);
500
501 /*
502 * The manual recommends disabling early transmit, so we
503 * do. It's disabled anyway, if using IP checksumming,
504 * since the entire packet must be in the FIFO in order
505 * for the chip to perform the checksum.
506 */
507 sc->sc_txthresh = 0x0fff;
508
509 /*
510 * Disable MWI if the PCI layer tells us to.
511 */
512 sc->sc_DMACtrl = 0;
513 if ((pa->pa_flags & PCI_FLAGS_MWI_OKAY) == 0)
514 sc->sc_DMACtrl |= DMAC_MWIDisable;
515
516 /*
517 * We can support 802.1Q VLAN-sized frames and jumbo
518 * Ethernet frames.
519 *
520 * XXX Figure out how to do hw-assisted VLAN tagging in
521 * XXX a reasonable way on this chip.
522 */
523 sc->sc_ethercom.ec_capabilities |=
524 ETHERCAP_VLAN_MTU | /* XXX ETHERCAP_JUMBO_MTU | */
525 ETHERCAP_VLAN_HWTAGGING;
526 sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
527
528 /*
529 * We can do IPv4/TCPv4/UDPv4 checksums in hardware.
530 */
531 sc->sc_ethercom.ec_if.if_capabilities |=
532 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
533 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
534 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
535
536 /*
537 * Attach the interface.
538 */
539 if_attach(ifp);
540 if_deferred_start_init(ifp, NULL);
541 ether_ifattach(ifp, enaddr);
542
543 #ifdef STGE_EVENT_COUNTERS
544 /*
545 * Attach event counters.
546 */
547 evcnt_attach_dynamic(&sc->sc_ev_txstall, EVCNT_TYPE_MISC,
548 NULL, device_xname(self), "txstall");
549 evcnt_attach_dynamic(&sc->sc_ev_txdmaintr, EVCNT_TYPE_INTR,
550 NULL, device_xname(self), "txdmaintr");
551 evcnt_attach_dynamic(&sc->sc_ev_txindintr, EVCNT_TYPE_INTR,
552 NULL, device_xname(self), "txindintr");
553 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
554 NULL, device_xname(self), "rxintr");
555
556 evcnt_attach_dynamic(&sc->sc_ev_txseg1, EVCNT_TYPE_MISC,
557 NULL, device_xname(self), "txseg1");
558 evcnt_attach_dynamic(&sc->sc_ev_txseg2, EVCNT_TYPE_MISC,
559 NULL, device_xname(self), "txseg2");
560 evcnt_attach_dynamic(&sc->sc_ev_txseg3, EVCNT_TYPE_MISC,
561 NULL, device_xname(self), "txseg3");
562 evcnt_attach_dynamic(&sc->sc_ev_txseg4, EVCNT_TYPE_MISC,
563 NULL, device_xname(self), "txseg4");
564 evcnt_attach_dynamic(&sc->sc_ev_txseg5, EVCNT_TYPE_MISC,
565 NULL, device_xname(self), "txseg5");
566 evcnt_attach_dynamic(&sc->sc_ev_txsegmore, EVCNT_TYPE_MISC,
567 NULL, device_xname(self), "txsegmore");
568 evcnt_attach_dynamic(&sc->sc_ev_txcopy, EVCNT_TYPE_MISC,
569 NULL, device_xname(self), "txcopy");
570
571 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
572 NULL, device_xname(self), "rxipsum");
573 evcnt_attach_dynamic(&sc->sc_ev_rxtcpsum, EVCNT_TYPE_MISC,
574 NULL, device_xname(self), "rxtcpsum");
575 evcnt_attach_dynamic(&sc->sc_ev_rxudpsum, EVCNT_TYPE_MISC,
576 NULL, device_xname(self), "rxudpsum");
577 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
578 NULL, device_xname(self), "txipsum");
579 evcnt_attach_dynamic(&sc->sc_ev_txtcpsum, EVCNT_TYPE_MISC,
580 NULL, device_xname(self), "txtcpsum");
581 evcnt_attach_dynamic(&sc->sc_ev_txudpsum, EVCNT_TYPE_MISC,
582 NULL, device_xname(self), "txudpsum");
583 #endif /* STGE_EVENT_COUNTERS */
584
585 /*
586 * Make sure the interface is shutdown during reboot.
587 */
588 if (pmf_device_register1(self, NULL, NULL, stge_shutdown))
589 pmf_class_network_register(self, ifp);
590 else
591 aprint_error_dev(self, "couldn't establish power handler\n");
592
593 return;
594
595 /*
596 * Free any resources we've allocated during the failed attach
597 * attempt. Do this in reverse order and fall through.
598 */
599 fail_5:
600 for (i = 0; i < STGE_NRXDESC; i++) {
601 if (sc->sc_rxsoft[i].ds_dmamap != NULL)
602 bus_dmamap_destroy(sc->sc_dmat,
603 sc->sc_rxsoft[i].ds_dmamap);
604 }
605 fail_4:
606 for (i = 0; i < STGE_NTXDESC; i++) {
607 if (sc->sc_txsoft[i].ds_dmamap != NULL)
608 bus_dmamap_destroy(sc->sc_dmat,
609 sc->sc_txsoft[i].ds_dmamap);
610 }
611 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
612 fail_3:
613 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
614 fail_2:
615 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
616 sizeof(struct stge_control_data));
617 fail_1:
618 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
619 fail_0:
620 return;
621 }
622
623 /*
624 * stge_shutdown:
625 *
626 * Make sure the interface is stopped at reboot time.
627 */
628 static bool
629 stge_shutdown(device_t self, int howto)
630 {
631 struct stge_softc *sc = device_private(self);
632 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
633
634 stge_stop(ifp, 1);
635 stge_reset(sc);
636 return true;
637 }
638
639 static void
640 stge_dma_wait(struct stge_softc *sc)
641 {
642 int i;
643
644 for (i = 0; i < STGE_TIMEOUT; i++) {
645 delay(2);
646 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, STGE_DMACtrl) &
647 DMAC_TxDMAInProg) == 0)
648 break;
649 }
650
651 if (i == STGE_TIMEOUT)
652 printf("%s: DMA wait timed out\n", device_xname(sc->sc_dev));
653 }
654
655 /*
656 * stge_start: [ifnet interface function]
657 *
658 * Start packet transmission on the interface.
659 */
660 static void
661 stge_start(struct ifnet *ifp)
662 {
663 struct stge_softc *sc = ifp->if_softc;
664 struct mbuf *m0;
665 struct stge_descsoft *ds;
666 struct stge_tfd *tfd;
667 bus_dmamap_t dmamap;
668 int error, firsttx, nexttx, opending, seg, totlen;
669 uint64_t csum_flags;
670
671 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
672 return;
673
674 /*
675 * Remember the previous number of pending transmissions
676 * and the first descriptor we will use.
677 */
678 opending = sc->sc_txpending;
679 firsttx = STGE_NEXTTX(sc->sc_txlast);
680
681 /*
682 * Loop through the send queue, setting up transmit descriptors
683 * until we drain the queue, or use up all available transmit
684 * descriptors.
685 */
686 for (;;) {
687 uint64_t tfc;
688 bool have_vtag;
689 uint16_t vtag;
690
691 /*
692 * Grab a packet off the queue.
693 */
694 IFQ_POLL(&ifp->if_snd, m0);
695 if (m0 == NULL)
696 break;
697
698 /*
699 * Leave one unused descriptor at the end of the
700 * list to prevent wrapping completely around.
701 */
702 if (sc->sc_txpending == (STGE_NTXDESC - 1)) {
703 STGE_EVCNT_INCR(&sc->sc_ev_txstall);
704 break;
705 }
706
707 /*
708 * See if we have any VLAN stuff.
709 */
710 have_vtag = vlan_has_tag(m0);
711 if (have_vtag)
712 vtag = vlan_get_tag(m0);
713
714 /*
715 * Get the last and next available transmit descriptor.
716 */
717 nexttx = STGE_NEXTTX(sc->sc_txlast);
718 tfd = &sc->sc_txdescs[nexttx];
719 ds = &sc->sc_txsoft[nexttx];
720
721 dmamap = ds->ds_dmamap;
722
723 /*
724 * Load the DMA map. If this fails, the packet either
725 * didn't fit in the alloted number of segments, or we
726 * were short on resources. For the too-many-segments
727 * case, we simply report an error and drop the packet,
728 * since we can't sanely copy a jumbo packet to a single
729 * buffer.
730 */
731 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
732 BUS_DMA_NOWAIT);
733 if (error) {
734 if (error == EFBIG) {
735 printf("%s: Tx packet consumes too many "
736 "DMA segments, dropping...\n",
737 device_xname(sc->sc_dev));
738 IFQ_DEQUEUE(&ifp->if_snd, m0);
739 m_freem(m0);
740 continue;
741 }
742 /*
743 * Short on resources, just stop for now.
744 */
745 break;
746 }
747
748 IFQ_DEQUEUE(&ifp->if_snd, m0);
749
750 /*
751 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
752 */
753
754 /* Sync the DMA map. */
755 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
756 BUS_DMASYNC_PREWRITE);
757
758 /* Initialize the fragment list. */
759 for (totlen = 0, seg = 0; seg < dmamap->dm_nsegs; seg++) {
760 tfd->tfd_frags[seg].frag_word0 =
761 htole64(FRAG_ADDR(dmamap->dm_segs[seg].ds_addr) |
762 FRAG_LEN(dmamap->dm_segs[seg].ds_len));
763 totlen += dmamap->dm_segs[seg].ds_len;
764 }
765
766 #ifdef STGE_EVENT_COUNTERS
767 switch (dmamap->dm_nsegs) {
768 case 1:
769 STGE_EVCNT_INCR(&sc->sc_ev_txseg1);
770 break;
771 case 2:
772 STGE_EVCNT_INCR(&sc->sc_ev_txseg2);
773 break;
774 case 3:
775 STGE_EVCNT_INCR(&sc->sc_ev_txseg3);
776 break;
777 case 4:
778 STGE_EVCNT_INCR(&sc->sc_ev_txseg4);
779 break;
780 case 5:
781 STGE_EVCNT_INCR(&sc->sc_ev_txseg5);
782 break;
783 default:
784 STGE_EVCNT_INCR(&sc->sc_ev_txsegmore);
785 break;
786 }
787 #endif /* STGE_EVENT_COUNTERS */
788
789 /*
790 * Initialize checksumming flags in the descriptor.
791 * Byte-swap constants so the compiler can optimize.
792 */
793 csum_flags = 0;
794 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
795 STGE_EVCNT_INCR(&sc->sc_ev_txipsum);
796 csum_flags |= TFD_IPChecksumEnable;
797 }
798
799 if (m0->m_pkthdr.csum_flags & M_CSUM_TCPv4) {
800 STGE_EVCNT_INCR(&sc->sc_ev_txtcpsum);
801 csum_flags |= TFD_TCPChecksumEnable;
802 } else if (m0->m_pkthdr.csum_flags & M_CSUM_UDPv4) {
803 STGE_EVCNT_INCR(&sc->sc_ev_txudpsum);
804 csum_flags |= TFD_UDPChecksumEnable;
805 }
806
807 /*
808 * Initialize the descriptor and give it to the chip.
809 * Check to see if we have a VLAN tag to insert.
810 */
811
812 tfc = TFD_FrameId(nexttx) | TFD_WordAlign(/*totlen & */3) |
813 TFD_FragCount(seg) | csum_flags |
814 (((nexttx & STGE_TXINTR_SPACING_MASK) == 0) ?
815 TFD_TxDMAIndicate : 0);
816 if (have_vtag) {
817 #if 0
818 struct ether_header *eh =
819 mtod(m0, struct ether_header *);
820 uint16_t etype = ntohs(eh->ether_type);
821 printf("%s: xmit (tag %d) etype %x\n",
822 ifp->if_xname, *mtod(n, int *), etype);
823 #endif
824 tfc |= TFD_VLANTagInsert |
825 #ifdef STGE_VLAN_CFI
826 TFD_CFI |
827 #endif
828 TFD_VID(vtag);
829 }
830 tfd->tfd_control = htole64(tfc);
831
832 /* Sync the descriptor. */
833 STGE_CDTXSYNC(sc, nexttx,
834 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
835
836 /*
837 * Kick the transmit DMA logic.
838 */
839 bus_space_write_4(sc->sc_st, sc->sc_sh, STGE_DMACtrl,
840 sc->sc_DMACtrl | DMAC_TxDMAPollNow);
841
842 /*
843 * Store a pointer to the packet so we can free it later.
844 */
845 ds->ds_mbuf = m0;
846
847 /* Advance the tx pointer. */
848 sc->sc_txpending++;
849 sc->sc_txlast = nexttx;
850
851 /*
852 * Pass the packet to any BPF listeners.
853 */
854 bpf_mtap(ifp, m0, BPF_D_OUT);
855 }
856
857 if (sc->sc_txpending == (STGE_NTXDESC - 1)) {
858 /* No more slots left; notify upper layer. */
859 ifp->if_flags |= IFF_OACTIVE;
860 }
861
862 if (sc->sc_txpending != opending) {
863 /*
864 * We enqueued packets. If the transmitter was idle,
865 * reset the txdirty pointer.
866 */
867 if (opending == 0)
868 sc->sc_txdirty = firsttx;
869
870 /* Set a watchdog timer in case the chip flakes out. */
871 ifp->if_timer = 5;
872 }
873 }
874
875 /*
876 * stge_watchdog: [ifnet interface function]
877 *
878 * Watchdog timer handler.
879 */
880 static void
881 stge_watchdog(struct ifnet *ifp)
882 {
883 struct stge_softc *sc = ifp->if_softc;
884
885 /*
886 * Sweep up first, since we don't interrupt every frame.
887 */
888 stge_txintr(sc);
889 if (sc->sc_txpending != 0) {
890 printf("%s: device timeout\n", device_xname(sc->sc_dev));
891 ifp->if_oerrors++;
892
893 (void) stge_init(ifp);
894
895 /* Try to get more packets going. */
896 stge_start(ifp);
897 }
898 }
899
900 /*
901 * stge_ioctl: [ifnet interface function]
902 *
903 * Handle control requests from the operator.
904 */
905 static int
906 stge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
907 {
908 struct stge_softc *sc = ifp->if_softc;
909 int s, error;
910
911 s = splnet();
912
913 error = ether_ioctl(ifp, cmd, data);
914 if (error == ENETRESET) {
915 error = 0;
916
917 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
918 ;
919 else if (ifp->if_flags & IFF_RUNNING) {
920 /*
921 * Multicast list has changed; set the hardware filter
922 * accordingly.
923 */
924 stge_set_filter(sc);
925 }
926 }
927
928 /* Try to get more packets going. */
929 stge_start(ifp);
930
931 splx(s);
932 return (error);
933 }
934
935 /*
936 * stge_intr:
937 *
938 * Interrupt service routine.
939 */
940 static int
941 stge_intr(void *arg)
942 {
943 struct stge_softc *sc = arg;
944 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
945 uint32_t txstat;
946 int wantinit;
947 uint16_t isr;
948
949 if ((bus_space_read_2(sc->sc_st, sc->sc_sh, STGE_IntStatus) &
950 IS_InterruptStatus) == 0)
951 return (0);
952
953 for (wantinit = 0; wantinit == 0;) {
954 isr = bus_space_read_2(sc->sc_st, sc->sc_sh, STGE_IntStatusAck);
955 if ((isr & sc->sc_IntEnable) == 0)
956 break;
957
958 /* Host interface errors. */
959 if (isr & IS_HostError) {
960 printf("%s: Host interface error\n",
961 device_xname(sc->sc_dev));
962 wantinit = 1;
963 continue;
964 }
965
966 /* Receive interrupts. */
967 if (isr & (IS_RxDMAComplete | IS_RFDListEnd)) {
968 STGE_EVCNT_INCR(&sc->sc_ev_rxintr);
969 stge_rxintr(sc);
970 if (isr & IS_RFDListEnd) {
971 printf("%s: receive ring overflow\n",
972 device_xname(sc->sc_dev));
973 /*
974 * XXX Should try to recover from this
975 * XXX more gracefully.
976 */
977 wantinit = 1;
978 }
979 }
980
981 /* Transmit interrupts. */
982 if (isr & (IS_TxDMAComplete | IS_TxComplete)) {
983 #ifdef STGE_EVENT_COUNTERS
984 if (isr & IS_TxDMAComplete)
985 STGE_EVCNT_INCR(&sc->sc_ev_txdmaintr);
986 #endif
987 stge_txintr(sc);
988 }
989
990 /* Statistics overflow. */
991 if (isr & IS_UpdateStats)
992 stge_stats_update(sc);
993
994 /* Transmission errors. */
995 if (isr & IS_TxComplete) {
996 STGE_EVCNT_INCR(&sc->sc_ev_txindintr);
997 for (;;) {
998 txstat = bus_space_read_4(sc->sc_st, sc->sc_sh,
999 STGE_TxStatus);
1000 if ((txstat & TS_TxComplete) == 0)
1001 break;
1002 if (txstat & TS_TxUnderrun) {
1003 sc->sc_txthresh++;
1004 if (sc->sc_txthresh > 0x0fff)
1005 sc->sc_txthresh = 0x0fff;
1006 printf("%s: transmit underrun, new "
1007 "threshold: %d bytes\n",
1008 device_xname(sc->sc_dev),
1009 sc->sc_txthresh << 5);
1010 }
1011 if (txstat & TS_MaxCollisions)
1012 printf("%s: excessive collisions\n",
1013 device_xname(sc->sc_dev));
1014 }
1015 wantinit = 1;
1016 }
1017
1018 }
1019
1020 if (wantinit)
1021 stge_init(ifp);
1022
1023 bus_space_write_2(sc->sc_st, sc->sc_sh, STGE_IntEnable,
1024 sc->sc_IntEnable);
1025
1026 /* Try to get more packets going. */
1027 if_schedule_deferred_start(ifp);
1028
1029 return (1);
1030 }
1031
1032 /*
1033 * stge_txintr:
1034 *
1035 * Helper; handle transmit interrupts.
1036 */
1037 static void
1038 stge_txintr(struct stge_softc *sc)
1039 {
1040 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1041 struct stge_descsoft *ds;
1042 uint64_t control;
1043 int i;
1044
1045 ifp->if_flags &= ~IFF_OACTIVE;
1046
1047 /*
1048 * Go through our Tx list and free mbufs for those
1049 * frames which have been transmitted.
1050 */
1051 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
1052 i = STGE_NEXTTX(i), sc->sc_txpending--) {
1053 ds = &sc->sc_txsoft[i];
1054
1055 STGE_CDTXSYNC(sc, i,
1056 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1057
1058 control = le64toh(sc->sc_txdescs[i].tfd_control);
1059 if ((control & TFD_TFDDone) == 0)
1060 break;
1061
1062 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
1063 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1064 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1065 m_freem(ds->ds_mbuf);
1066 ds->ds_mbuf = NULL;
1067 }
1068
1069 /* Update the dirty transmit buffer pointer. */
1070 sc->sc_txdirty = i;
1071
1072 /*
1073 * If there are no more pending transmissions, cancel the watchdog
1074 * timer.
1075 */
1076 if (sc->sc_txpending == 0)
1077 ifp->if_timer = 0;
1078 }
1079
1080 /*
1081 * stge_rxintr:
1082 *
1083 * Helper; handle receive interrupts.
1084 */
1085 static void
1086 stge_rxintr(struct stge_softc *sc)
1087 {
1088 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1089 struct stge_descsoft *ds;
1090 struct mbuf *m, *tailm;
1091 uint64_t status;
1092 int i, len;
1093
1094 for (i = sc->sc_rxptr;; i = STGE_NEXTRX(i)) {
1095 ds = &sc->sc_rxsoft[i];
1096
1097 STGE_CDRXSYNC(sc, i,
1098 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1099
1100 status = le64toh(sc->sc_rxdescs[i].rfd_status);
1101
1102 if ((status & RFD_RFDDone) == 0)
1103 break;
1104
1105 if (__predict_false(sc->sc_rxdiscard)) {
1106 STGE_INIT_RXDESC(sc, i);
1107 if (status & RFD_FrameEnd) {
1108 /* Reset our state. */
1109 sc->sc_rxdiscard = 0;
1110 }
1111 continue;
1112 }
1113
1114 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1115 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1116
1117 m = ds->ds_mbuf;
1118
1119 /*
1120 * Add a new receive buffer to the ring.
1121 */
1122 if (stge_add_rxbuf(sc, i) != 0) {
1123 /*
1124 * Failed, throw away what we've done so
1125 * far, and discard the rest of the packet.
1126 */
1127 ifp->if_ierrors++;
1128 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1129 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1130 STGE_INIT_RXDESC(sc, i);
1131 if ((status & RFD_FrameEnd) == 0)
1132 sc->sc_rxdiscard = 1;
1133 if (sc->sc_rxhead != NULL)
1134 m_freem(sc->sc_rxhead);
1135 STGE_RXCHAIN_RESET(sc);
1136 continue;
1137 }
1138
1139 #ifdef DIAGNOSTIC
1140 if (status & RFD_FrameStart) {
1141 KASSERT(sc->sc_rxhead == NULL);
1142 KASSERT(sc->sc_rxtailp == &sc->sc_rxhead);
1143 }
1144 #endif
1145
1146 STGE_RXCHAIN_LINK(sc, m);
1147
1148 /*
1149 * If this is not the end of the packet, keep
1150 * looking.
1151 */
1152 if ((status & RFD_FrameEnd) == 0) {
1153 sc->sc_rxlen += m->m_len;
1154 continue;
1155 }
1156
1157 /*
1158 * Okay, we have the entire packet now...
1159 */
1160 *sc->sc_rxtailp = NULL;
1161 m = sc->sc_rxhead;
1162 tailm = sc->sc_rxtail;
1163
1164 STGE_RXCHAIN_RESET(sc);
1165
1166 /*
1167 * If the packet had an error, drop it. Note we
1168 * count the error later in the periodic stats update.
1169 */
1170 if (status & (RFD_RxFIFOOverrun | RFD_RxRuntFrame |
1171 RFD_RxAlignmentError | RFD_RxFCSError |
1172 RFD_RxLengthError)) {
1173 m_freem(m);
1174 continue;
1175 }
1176
1177 /*
1178 * No errors.
1179 *
1180 * Note we have configured the chip to not include
1181 * the CRC at the end of the packet.
1182 */
1183 len = RFD_RxDMAFrameLen(status);
1184 tailm->m_len = len - sc->sc_rxlen;
1185
1186 /*
1187 * If the packet is small enough to fit in a
1188 * single header mbuf, allocate one and copy
1189 * the data into it. This greatly reduces
1190 * memory consumption when we receive lots
1191 * of small packets.
1192 */
1193 if (stge_copy_small != 0 && len <= (MHLEN - 2)) {
1194 struct mbuf *nm;
1195 MGETHDR(nm, M_DONTWAIT, MT_DATA);
1196 if (nm == NULL) {
1197 ifp->if_ierrors++;
1198 m_freem(m);
1199 continue;
1200 }
1201 nm->m_data += 2;
1202 nm->m_pkthdr.len = nm->m_len = len;
1203 m_copydata(m, 0, len, mtod(nm, void *));
1204 m_freem(m);
1205 m = nm;
1206 }
1207
1208 /*
1209 * Set the incoming checksum information for the packet.
1210 */
1211 if (status & RFD_IPDetected) {
1212 STGE_EVCNT_INCR(&sc->sc_ev_rxipsum);
1213 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1214 if (status & RFD_IPError)
1215 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1216 if (status & RFD_TCPDetected) {
1217 STGE_EVCNT_INCR(&sc->sc_ev_rxtcpsum);
1218 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1219 if (status & RFD_TCPError)
1220 m->m_pkthdr.csum_flags |=
1221 M_CSUM_TCP_UDP_BAD;
1222 } else if (status & RFD_UDPDetected) {
1223 STGE_EVCNT_INCR(&sc->sc_ev_rxudpsum);
1224 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1225 if (status & RFD_UDPError)
1226 m->m_pkthdr.csum_flags |=
1227 M_CSUM_TCP_UDP_BAD;
1228 }
1229 }
1230
1231 m_set_rcvif(m, ifp);
1232 m->m_pkthdr.len = len;
1233
1234 /*
1235 * Pass this up to any BPF listeners, but only
1236 * pass if up the stack if it's for us.
1237 */
1238 #ifdef STGE_VLAN_UNTAG
1239 /*
1240 * Check for VLAN tagged packets
1241 */
1242 if (status & RFD_VLANDetected)
1243 vlan_set_tag(m, RFD_TCI(status));
1244
1245 #endif
1246 #if 0
1247 if (status & RFD_VLANDetected) {
1248 struct ether_header *eh;
1249 uint16_t etype;
1250
1251 eh = mtod(m, struct ether_header *);
1252 etype = ntohs(eh->ether_type);
1253 printf("%s: VLANtag detected (TCI %d) etype %x\n",
1254 ifp->if_xname, (uint16_t) RFD_TCI(status),
1255 etype);
1256 }
1257 #endif
1258 /* Pass it on. */
1259 if_percpuq_enqueue(ifp->if_percpuq, m);
1260 }
1261
1262 /* Update the receive pointer. */
1263 sc->sc_rxptr = i;
1264 }
1265
1266 /*
1267 * stge_tick:
1268 *
1269 * One second timer, used to tick the MII.
1270 */
1271 static void
1272 stge_tick(void *arg)
1273 {
1274 struct stge_softc *sc = arg;
1275 int s;
1276
1277 s = splnet();
1278 mii_tick(&sc->sc_mii);
1279 stge_stats_update(sc);
1280 splx(s);
1281
1282 callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
1283 }
1284
1285 /*
1286 * stge_stats_update:
1287 *
1288 * Read the TC9021 statistics counters.
1289 */
1290 static void
1291 stge_stats_update(struct stge_softc *sc)
1292 {
1293 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1294 bus_space_tag_t st = sc->sc_st;
1295 bus_space_handle_t sh = sc->sc_sh;
1296
1297 (void) bus_space_read_4(st, sh, STGE_OctetRcvOk);
1298
1299 (void) bus_space_read_4(st, sh, STGE_FramesRcvdOk);
1300
1301 ifp->if_ierrors +=
1302 (u_int) bus_space_read_2(st, sh, STGE_FramesLostRxErrors);
1303
1304 (void) bus_space_read_4(st, sh, STGE_OctetXmtdOk);
1305
1306 ifp->if_opackets +=
1307 bus_space_read_4(st, sh, STGE_FramesXmtdOk);
1308
1309 ifp->if_collisions +=
1310 bus_space_read_4(st, sh, STGE_LateCollisions) +
1311 bus_space_read_4(st, sh, STGE_MultiColFrames) +
1312 bus_space_read_4(st, sh, STGE_SingleColFrames);
1313
1314 ifp->if_oerrors +=
1315 (u_int) bus_space_read_2(st, sh, STGE_FramesAbortXSColls) +
1316 (u_int) bus_space_read_2(st, sh, STGE_FramesWEXDeferal);
1317 }
1318
1319 /*
1320 * stge_reset:
1321 *
1322 * Perform a soft reset on the TC9021.
1323 */
1324 static void
1325 stge_reset(struct stge_softc *sc)
1326 {
1327 uint32_t ac;
1328 int i;
1329
1330 ac = bus_space_read_4(sc->sc_st, sc->sc_sh, STGE_AsicCtrl);
1331
1332 /*
1333 * Only assert RstOut if we're fiber. We need GMII clocks
1334 * to be present in order for the reset to complete on fiber
1335 * cards.
1336 */
1337 bus_space_write_4(sc->sc_st, sc->sc_sh, STGE_AsicCtrl,
1338 ac | AC_GlobalReset | AC_RxReset | AC_TxReset |
1339 AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit |
1340 (sc->sc_usefiber ? AC_RstOut : 0));
1341
1342 delay(50000);
1343
1344 for (i = 0; i < STGE_TIMEOUT; i++) {
1345 delay(5000);
1346 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, STGE_AsicCtrl) &
1347 AC_ResetBusy) == 0)
1348 break;
1349 }
1350
1351 if (i == STGE_TIMEOUT)
1352 printf("%s: reset failed to complete\n",
1353 device_xname(sc->sc_dev));
1354
1355 delay(1000);
1356 }
1357
1358 /*
1359 * stge_init: [ ifnet interface function ]
1360 *
1361 * Initialize the interface. Must be called at splnet().
1362 */
1363 static int
1364 stge_init(struct ifnet *ifp)
1365 {
1366 struct stge_softc *sc = ifp->if_softc;
1367 bus_space_tag_t st = sc->sc_st;
1368 bus_space_handle_t sh = sc->sc_sh;
1369 struct stge_descsoft *ds;
1370 int i, error = 0;
1371
1372 /*
1373 * Cancel any pending I/O.
1374 */
1375 stge_stop(ifp, 0);
1376
1377 /*
1378 * Reset the chip to a known state.
1379 */
1380 stge_reset(sc);
1381
1382 /*
1383 * Initialize the transmit descriptor ring.
1384 */
1385 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1386 for (i = 0; i < STGE_NTXDESC; i++) {
1387 sc->sc_txdescs[i].tfd_next = htole64(
1388 STGE_CDTXADDR(sc, STGE_NEXTTX(i)));
1389 sc->sc_txdescs[i].tfd_control = htole64(TFD_TFDDone);
1390 }
1391 sc->sc_txpending = 0;
1392 sc->sc_txdirty = 0;
1393 sc->sc_txlast = STGE_NTXDESC - 1;
1394
1395 /*
1396 * Initialize the receive descriptor and receive job
1397 * descriptor rings.
1398 */
1399 for (i = 0; i < STGE_NRXDESC; i++) {
1400 ds = &sc->sc_rxsoft[i];
1401 if (ds->ds_mbuf == NULL) {
1402 if ((error = stge_add_rxbuf(sc, i)) != 0) {
1403 printf("%s: unable to allocate or map rx "
1404 "buffer %d, error = %d\n",
1405 device_xname(sc->sc_dev), i, error);
1406 /*
1407 * XXX Should attempt to run with fewer receive
1408 * XXX buffers instead of just failing.
1409 */
1410 stge_rxdrain(sc);
1411 goto out;
1412 }
1413 } else
1414 STGE_INIT_RXDESC(sc, i);
1415 }
1416 sc->sc_rxptr = 0;
1417 sc->sc_rxdiscard = 0;
1418 STGE_RXCHAIN_RESET(sc);
1419
1420 /* Set the station address. */
1421 for (i = 0; i < 6; i++)
1422 bus_space_write_1(st, sh, STGE_StationAddress0 + i,
1423 CLLADDR(ifp->if_sadl)[i]);
1424
1425 /*
1426 * Set the statistics masks. Disable all the RMON stats,
1427 * and disable selected stats in the non-RMON stats registers.
1428 */
1429 bus_space_write_4(st, sh, STGE_RMONStatisticsMask, 0xffffffff);
1430 bus_space_write_4(st, sh, STGE_StatisticsMask,
1431 (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) |
1432 (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) |
1433 (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) |
1434 (1U << 21));
1435
1436 /* Set up the receive filter. */
1437 stge_set_filter(sc);
1438
1439 /*
1440 * Give the transmit and receive ring to the chip.
1441 */
1442 bus_space_write_4(st, sh, STGE_TFDListPtrHi, 0); /* NOTE: 32-bit DMA */
1443 bus_space_write_4(st, sh, STGE_TFDListPtrLo,
1444 STGE_CDTXADDR(sc, sc->sc_txdirty));
1445
1446 bus_space_write_4(st, sh, STGE_RFDListPtrHi, 0); /* NOTE: 32-bit DMA */
1447 bus_space_write_4(st, sh, STGE_RFDListPtrLo,
1448 STGE_CDRXADDR(sc, sc->sc_rxptr));
1449
1450 /*
1451 * Initialize the Tx auto-poll period. It's OK to make this number
1452 * large (255 is the max, but we use 127) -- we explicitly kick the
1453 * transmit engine when there's actually a packet.
1454 */
1455 bus_space_write_1(st, sh, STGE_TxDMAPollPeriod, 127);
1456
1457 /* ..and the Rx auto-poll period. */
1458 bus_space_write_1(st, sh, STGE_RxDMAPollPeriod, 64);
1459
1460 /* Initialize the Tx start threshold. */
1461 bus_space_write_2(st, sh, STGE_TxStartThresh, sc->sc_txthresh);
1462
1463 /* RX DMA thresholds, from linux */
1464 bus_space_write_1(st, sh, STGE_RxDMABurstThresh, 0x30);
1465 bus_space_write_1(st, sh, STGE_RxDMAUrgentThresh, 0x30);
1466
1467 /* Rx early threhold, from Linux */
1468 bus_space_write_2(st, sh, STGE_RxEarlyThresh, 0x7ff);
1469
1470 /* Tx DMA thresholds, from Linux */
1471 bus_space_write_1(st, sh, STGE_TxDMABurstThresh, 0x30);
1472 bus_space_write_1(st, sh, STGE_TxDMAUrgentThresh, 0x04);
1473
1474 /*
1475 * Initialize the Rx DMA interrupt control register. We
1476 * request an interrupt after every incoming packet, but
1477 * defer it for 32us (64 * 512 ns). When the number of
1478 * interrupts pending reaches 8, we stop deferring the
1479 * interrupt, and signal it immediately.
1480 */
1481 bus_space_write_4(st, sh, STGE_RxDMAIntCtrl,
1482 RDIC_RxFrameCount(8) | RDIC_RxDMAWaitTime(512));
1483
1484 /*
1485 * Initialize the interrupt mask.
1486 */
1487 sc->sc_IntEnable = IS_HostError | IS_TxComplete | IS_UpdateStats |
1488 IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd;
1489 bus_space_write_2(st, sh, STGE_IntStatus, 0xffff);
1490 bus_space_write_2(st, sh, STGE_IntEnable, sc->sc_IntEnable);
1491
1492 /*
1493 * Configure the DMA engine.
1494 * XXX Should auto-tune TxBurstLimit.
1495 */
1496 bus_space_write_4(st, sh, STGE_DMACtrl, sc->sc_DMACtrl |
1497 DMAC_TxBurstLimit(3));
1498
1499 /*
1500 * Send a PAUSE frame when we reach 29,696 bytes in the Rx
1501 * FIFO, and send an un-PAUSE frame when the FIFO is totally
1502 * empty again.
1503 */
1504 bus_space_write_2(st, sh, STGE_FlowOnTresh, 29696 / 16);
1505 bus_space_write_2(st, sh, STGE_FlowOffThresh, 0);
1506
1507 /*
1508 * Set the maximum frame size.
1509 */
1510 bus_space_write_2(st, sh, STGE_MaxFrameSize,
1511 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1512 ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ?
1513 ETHER_VLAN_ENCAP_LEN : 0));
1514
1515 /*
1516 * Initialize MacCtrl -- do it before setting the media,
1517 * as setting the media will actually program the register.
1518 *
1519 * Note: We have to poke the IFS value before poking
1520 * anything else.
1521 */
1522 sc->sc_MACCtrl = MC_IFSSelect(0);
1523 bus_space_write_4(st, sh, STGE_MACCtrl, sc->sc_MACCtrl);
1524 sc->sc_MACCtrl |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable;
1525 #ifdef STGE_VLAN_UNTAG
1526 sc->sc_MACCtrl |= MC_AutoVLANuntagging;
1527 #endif
1528
1529 if (sc->sc_rev >= 6) { /* >= B.2 */
1530 /* Multi-frag frame bug work-around. */
1531 bus_space_write_2(st, sh, STGE_DebugCtrl,
1532 bus_space_read_2(st, sh, STGE_DebugCtrl) | 0x0200);
1533
1534 /* Tx Poll Now bug work-around. */
1535 bus_space_write_2(st, sh, STGE_DebugCtrl,
1536 bus_space_read_2(st, sh, STGE_DebugCtrl) | 0x0010);
1537 /* XXX ? from linux */
1538 bus_space_write_2(st, sh, STGE_DebugCtrl,
1539 bus_space_read_2(st, sh, STGE_DebugCtrl) | 0x0020);
1540 }
1541
1542 /*
1543 * Set the current media.
1544 */
1545 if ((error = ether_mediachange(ifp)) != 0)
1546 goto out;
1547
1548 /*
1549 * Start the one second MII clock.
1550 */
1551 callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
1552
1553 /*
1554 * ...all done!
1555 */
1556 ifp->if_flags |= IFF_RUNNING;
1557 ifp->if_flags &= ~IFF_OACTIVE;
1558
1559 out:
1560 if (error)
1561 printf("%s: interface not running\n", device_xname(sc->sc_dev));
1562 return (error);
1563 }
1564
1565 /*
1566 * stge_drain:
1567 *
1568 * Drain the receive queue.
1569 */
1570 static void
1571 stge_rxdrain(struct stge_softc *sc)
1572 {
1573 struct stge_descsoft *ds;
1574 int i;
1575
1576 for (i = 0; i < STGE_NRXDESC; i++) {
1577 ds = &sc->sc_rxsoft[i];
1578 if (ds->ds_mbuf != NULL) {
1579 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1580 ds->ds_mbuf->m_next = NULL;
1581 m_freem(ds->ds_mbuf);
1582 ds->ds_mbuf = NULL;
1583 }
1584 }
1585 }
1586
1587 /*
1588 * stge_stop: [ ifnet interface function ]
1589 *
1590 * Stop transmission on the interface.
1591 */
1592 static void
1593 stge_stop(struct ifnet *ifp, int disable)
1594 {
1595 struct stge_softc *sc = ifp->if_softc;
1596 struct stge_descsoft *ds;
1597 int i;
1598
1599 /*
1600 * Stop the one second clock.
1601 */
1602 callout_stop(&sc->sc_tick_ch);
1603
1604 /* Down the MII. */
1605 mii_down(&sc->sc_mii);
1606
1607 /*
1608 * Disable interrupts.
1609 */
1610 bus_space_write_2(sc->sc_st, sc->sc_sh, STGE_IntEnable, 0);
1611
1612 /*
1613 * Stop receiver, transmitter, and stats update.
1614 */
1615 bus_space_write_4(sc->sc_st, sc->sc_sh, STGE_MACCtrl,
1616 MC_StatisticsDisable | MC_TxDisable | MC_RxDisable);
1617
1618 /*
1619 * Stop the transmit and receive DMA.
1620 */
1621 stge_dma_wait(sc);
1622 bus_space_write_4(sc->sc_st, sc->sc_sh, STGE_TFDListPtrHi, 0);
1623 bus_space_write_4(sc->sc_st, sc->sc_sh, STGE_TFDListPtrLo, 0);
1624 bus_space_write_4(sc->sc_st, sc->sc_sh, STGE_RFDListPtrHi, 0);
1625 bus_space_write_4(sc->sc_st, sc->sc_sh, STGE_RFDListPtrLo, 0);
1626
1627 /*
1628 * Release any queued transmit buffers.
1629 */
1630 for (i = 0; i < STGE_NTXDESC; i++) {
1631 ds = &sc->sc_txsoft[i];
1632 if (ds->ds_mbuf != NULL) {
1633 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1634 m_freem(ds->ds_mbuf);
1635 ds->ds_mbuf = NULL;
1636 }
1637 }
1638
1639 /*
1640 * Mark the interface down and cancel the watchdog timer.
1641 */
1642 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1643 ifp->if_timer = 0;
1644
1645 if (disable)
1646 stge_rxdrain(sc);
1647 }
1648
1649 static int
1650 stge_eeprom_wait(struct stge_softc *sc)
1651 {
1652 int i;
1653
1654 for (i = 0; i < STGE_TIMEOUT; i++) {
1655 delay(1000);
1656 if ((bus_space_read_2(sc->sc_st, sc->sc_sh, STGE_EepromCtrl) &
1657 EC_EepromBusy) == 0)
1658 return (0);
1659 }
1660 return (1);
1661 }
1662
1663 /*
1664 * stge_read_eeprom:
1665 *
1666 * Read data from the serial EEPROM.
1667 */
1668 static void
1669 stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data)
1670 {
1671
1672 if (stge_eeprom_wait(sc))
1673 printf("%s: EEPROM failed to come ready\n",
1674 device_xname(sc->sc_dev));
1675
1676 bus_space_write_2(sc->sc_st, sc->sc_sh, STGE_EepromCtrl,
1677 EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR));
1678 if (stge_eeprom_wait(sc))
1679 printf("%s: EEPROM read timed out\n",
1680 device_xname(sc->sc_dev));
1681 *data = bus_space_read_2(sc->sc_st, sc->sc_sh, STGE_EepromData);
1682 }
1683
1684 /*
1685 * stge_add_rxbuf:
1686 *
1687 * Add a receive buffer to the indicated descriptor.
1688 */
1689 static int
1690 stge_add_rxbuf(struct stge_softc *sc, int idx)
1691 {
1692 struct stge_descsoft *ds = &sc->sc_rxsoft[idx];
1693 struct mbuf *m;
1694 int error;
1695
1696 MGETHDR(m, M_DONTWAIT, MT_DATA);
1697 if (m == NULL)
1698 return (ENOBUFS);
1699
1700 MCLGET(m, M_DONTWAIT);
1701 if ((m->m_flags & M_EXT) == 0) {
1702 m_freem(m);
1703 return (ENOBUFS);
1704 }
1705
1706 m->m_data = m->m_ext.ext_buf + 2;
1707 m->m_len = MCLBYTES - 2;
1708
1709 if (ds->ds_mbuf != NULL)
1710 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1711
1712 ds->ds_mbuf = m;
1713
1714 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1715 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1716 if (error) {
1717 printf("%s: can't load rx DMA map %d, error = %d\n",
1718 device_xname(sc->sc_dev), idx, error);
1719 panic("stge_add_rxbuf"); /* XXX */
1720 }
1721
1722 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1723 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1724
1725 STGE_INIT_RXDESC(sc, idx);
1726
1727 return (0);
1728 }
1729
1730 /*
1731 * stge_set_filter:
1732 *
1733 * Set up the receive filter.
1734 */
1735 static void
1736 stge_set_filter(struct stge_softc *sc)
1737 {
1738 struct ethercom *ec = &sc->sc_ethercom;
1739 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1740 struct ether_multi *enm;
1741 struct ether_multistep step;
1742 uint32_t crc;
1743 uint32_t mchash[2];
1744
1745 sc->sc_ReceiveMode = RM_ReceiveUnicast;
1746 if (ifp->if_flags & IFF_BROADCAST)
1747 sc->sc_ReceiveMode |= RM_ReceiveBroadcast;
1748
1749 /* XXX: ST1023 only works in promiscuous mode */
1750 if (sc->sc_stge1023)
1751 ifp->if_flags |= IFF_PROMISC;
1752
1753 if (ifp->if_flags & IFF_PROMISC) {
1754 sc->sc_ReceiveMode |= RM_ReceiveAllFrames;
1755 goto allmulti;
1756 }
1757
1758 /*
1759 * Set up the multicast address filter by passing all multicast
1760 * addresses through a CRC generator, and then using the low-order
1761 * 6 bits as an index into the 64 bit multicast hash table. The
1762 * high order bits select the register, while the rest of the bits
1763 * select the bit within the register.
1764 */
1765
1766 memset(mchash, 0, sizeof(mchash));
1767
1768 ETHER_LOCK(ec);
1769 ETHER_FIRST_MULTI(step, ec, enm);
1770 if (enm == NULL) {
1771 ETHER_UNLOCK(ec);
1772 goto done;
1773 }
1774
1775 while (enm != NULL) {
1776 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1777 /*
1778 * We must listen to a range of multicast addresses.
1779 * For now, just accept all multicasts, rather than
1780 * trying to set only those filter bits needed to match
1781 * the range. (At this time, the only use of address
1782 * ranges is for IP multicast routing, for which the
1783 * range is big enough to require all bits set.)
1784 */
1785 ETHER_UNLOCK(ec);
1786 goto allmulti;
1787 }
1788
1789 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1790
1791 /* Just want the 6 least significant bits. */
1792 crc &= 0x3f;
1793
1794 /* Set the corresponding bit in the hash table. */
1795 mchash[crc >> 5] |= 1 << (crc & 0x1f);
1796
1797 ETHER_NEXT_MULTI(step, enm);
1798 }
1799 ETHER_UNLOCK(ec);
1800
1801 sc->sc_ReceiveMode |= RM_ReceiveMulticastHash;
1802
1803 ifp->if_flags &= ~IFF_ALLMULTI;
1804 goto done;
1805
1806 allmulti:
1807 ifp->if_flags |= IFF_ALLMULTI;
1808 sc->sc_ReceiveMode |= RM_ReceiveMulticast;
1809
1810 done:
1811 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
1812 /*
1813 * Program the multicast hash table.
1814 */
1815 bus_space_write_4(sc->sc_st, sc->sc_sh, STGE_HashTable0,
1816 mchash[0]);
1817 bus_space_write_4(sc->sc_st, sc->sc_sh, STGE_HashTable1,
1818 mchash[1]);
1819 }
1820
1821 bus_space_write_2(sc->sc_st, sc->sc_sh, STGE_ReceiveMode,
1822 sc->sc_ReceiveMode);
1823 }
1824
1825 /*
1826 * stge_mii_readreg: [mii interface function]
1827 *
1828 * Read a PHY register on the MII of the TC9021.
1829 */
1830 static int
1831 stge_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
1832 {
1833
1834 return mii_bitbang_readreg(self, &stge_mii_bitbang_ops, phy, reg, val);
1835 }
1836
1837 /*
1838 * stge_mii_writereg: [mii interface function]
1839 *
1840 * Write a PHY register on the MII of the TC9021.
1841 */
1842 static int
1843 stge_mii_writereg(device_t self, int phy, int reg, uint16_t val)
1844 {
1845
1846 return mii_bitbang_writereg(self, &stge_mii_bitbang_ops, phy, reg,
1847 val);
1848 }
1849
1850 /*
1851 * stge_mii_statchg: [mii interface function]
1852 *
1853 * Callback from MII layer when media changes.
1854 */
1855 static void
1856 stge_mii_statchg(struct ifnet *ifp)
1857 {
1858 struct stge_softc *sc = ifp->if_softc;
1859
1860 if (sc->sc_mii.mii_media_active & IFM_FDX)
1861 sc->sc_MACCtrl |= MC_DuplexSelect;
1862 else
1863 sc->sc_MACCtrl &= ~MC_DuplexSelect;
1864
1865 /* XXX 802.1x flow-control? */
1866
1867 bus_space_write_4(sc->sc_st, sc->sc_sh, STGE_MACCtrl, sc->sc_MACCtrl);
1868 }
1869
1870 /*
1871 * sste_mii_bitbang_read: [mii bit-bang interface function]
1872 *
1873 * Read the MII serial port for the MII bit-bang module.
1874 */
1875 static uint32_t
1876 stge_mii_bitbang_read(device_t self)
1877 {
1878 struct stge_softc *sc = device_private(self);
1879
1880 return (bus_space_read_1(sc->sc_st, sc->sc_sh, STGE_PhyCtrl));
1881 }
1882
1883 /*
1884 * stge_mii_bitbang_write: [mii big-bang interface function]
1885 *
1886 * Write the MII serial port for the MII bit-bang module.
1887 */
1888 static void
1889 stge_mii_bitbang_write(device_t self, uint32_t val)
1890 {
1891 struct stge_softc *sc = device_private(self);
1892
1893 bus_space_write_1(sc->sc_st, sc->sc_sh, STGE_PhyCtrl,
1894 val | sc->sc_PhyCtrl);
1895 }
1896