if_sip.c revision 1.7 1 /* $NetBSD: if_sip.c,v 1.7 2000/02/02 17:09:40 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1999 Network Computer, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Network Computer, Inc. nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY NETWORK COMPUTER, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Device driver for the Silicon Integrated Systems SiS 900 and
34 * SiS 7016 10/100 PCI Ethernet controllers.
35 *
36 * Written by Jason R. Thorpe for Network Computer, Inc.
37 */
38
39 #include "opt_inet.h"
40 #include "opt_ns.h"
41 #include "bpfilter.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/mbuf.h>
46 #include <sys/malloc.h>
47 #include <sys/kernel.h>
48 #include <sys/socket.h>
49 #include <sys/ioctl.h>
50 #include <sys/errno.h>
51 #include <sys/device.h>
52 #include <sys/queue.h>
53
54 #include <vm/vm.h> /* for PAGE_SIZE */
55
56 #include <net/if.h>
57 #include <net/if_dl.h>
58 #include <net/if_media.h>
59 #include <net/if_ether.h>
60
61 #if NBPFILTER > 0
62 #include <net/bpf.h>
63 #endif
64
65 #ifdef INET
66 #include <netinet/in.h>
67 #include <netinet/if_inarp.h>
68 #endif
69
70 #ifdef NS
71 #include <netns/ns.h>
72 #include <netns/ns_if.h>
73 #endif
74
75 #include <machine/bus.h>
76 #include <machine/intr.h>
77
78 #include <dev/mii/miivar.h>
79
80 #include <dev/pci/pcireg.h>
81 #include <dev/pci/pcivar.h>
82 #include <dev/pci/pcidevs.h>
83
84 #include <dev/pci/if_sipreg.h>
85
86 /*
87 * Devices supported by this driver.
88 */
89 const struct sip_product {
90 pci_vendor_id_t sip_vendor;
91 pci_product_id_t sip_product;
92 const char *sip_name;
93 } sip_products[] = {
94 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900,
95 "SiS 900 10/100 Ethernet" },
96 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_7016,
97 "SiS 7016 10/100 Ethernet" },
98
99 { 0, 0,
100 NULL },
101 };
102
103 /*
104 * Transmit descriptor list size. This is arbitrary, but allocate
105 * enough descriptors for 64 pending transmissions, and 16 segments
106 * per packet. This MUST work out to a power of 2.
107 */
108 #define SIP_NTXSEGS 16
109
110 #define SIP_TXQUEUELEN 64
111 #define SIP_NTXDESC (SIP_TXQUEUELEN * SIP_NTXSEGS)
112 #define SIP_NTXDESC_MASK (SIP_NTXDESC - 1)
113 #define SIP_NEXTTX(x) (((x) + 1) & SIP_NTXDESC_MASK)
114
115 /*
116 * Receive descriptor list size. We have one Rx buffer per incoming
117 * packet, so this logic is a little simpler.
118 */
119 #define SIP_NRXDESC 64
120 #define SIP_NRXDESC_MASK (SIP_NRXDESC - 1)
121 #define SIP_NEXTRX(x) (((x) + 1) & SIP_NRXDESC_MASK)
122
123 /*
124 * Control structures are DMA'd to the SiS900 chip. We allocate them in
125 * a single clump that maps to a single DMA segment to make several things
126 * easier.
127 */
128 struct sip_control_data {
129 /*
130 * The transmit descriptors.
131 */
132 struct sip_desc scd_txdescs[SIP_NTXDESC];
133
134 /*
135 * The receive descriptors.
136 */
137 struct sip_desc scd_rxdescs[SIP_NRXDESC];
138 };
139
140 #define SIP_CDOFF(x) offsetof(struct sip_control_data, x)
141 #define SIP_CDTXOFF(x) SIP_CDOFF(scd_txdescs[(x)])
142 #define SIP_CDRXOFF(x) SIP_CDOFF(scd_rxdescs[(x)])
143
144 /*
145 * Software state for transmit jobs.
146 */
147 struct sip_txsoft {
148 struct mbuf *txs_mbuf; /* head of our mbuf chain */
149 bus_dmamap_t txs_dmamap; /* our DMA map */
150 int txs_firstdesc; /* first descriptor in packet */
151 int txs_lastdesc; /* last descriptor in packet */
152 SIMPLEQ_ENTRY(sip_txsoft) txs_q;
153 };
154
155 SIMPLEQ_HEAD(sip_txsq, sip_txsoft);
156
157 /*
158 * Software state for receive jobs.
159 */
160 struct sip_rxsoft {
161 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
162 bus_dmamap_t rxs_dmamap; /* our DMA map */
163 };
164
165 /*
166 * Software state per device.
167 */
168 struct sip_softc {
169 struct device sc_dev; /* generic device information */
170 bus_space_tag_t sc_st; /* bus space tag */
171 bus_space_handle_t sc_sh; /* bus space handle */
172 bus_dma_tag_t sc_dmat; /* bus DMA tag */
173 struct ethercom sc_ethercom; /* ethernet common data */
174 void *sc_sdhook; /* shutdown hook */
175 pci_product_id_t sc_model; /* which model are we? */
176
177 void *sc_ih; /* interrupt cookie */
178
179 struct mii_data sc_mii; /* MII/media information */
180
181 bus_dmamap_t sc_cddmamap; /* control data DMA map */
182 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
183
184 /*
185 * Software state for transmit and receive descriptors.
186 */
187 struct sip_txsoft sc_txsoft[SIP_TXQUEUELEN];
188 struct sip_rxsoft sc_rxsoft[SIP_NRXDESC];
189
190 /*
191 * Control data structures.
192 */
193 struct sip_control_data *sc_control_data;
194 #define sc_txdescs sc_control_data->scd_txdescs
195 #define sc_rxdescs sc_control_data->scd_rxdescs
196
197 u_int32_t sc_txcfg; /* prototype TXCFG register */
198 u_int32_t sc_rxcfg; /* prototype RXCFG register */
199 u_int32_t sc_imr; /* prototype IMR register */
200 u_int32_t sc_rfcr; /* prototype RFCR register */
201
202 u_int32_t sc_tx_fill_thresh; /* transmit fill threshold */
203 u_int32_t sc_tx_drain_thresh; /* transmit drain threshold */
204
205 u_int32_t sc_rx_drain_thresh; /* receive drain threshold */
206
207 int sc_flags; /* misc. flags; see below */
208
209 int sc_txfree; /* number of free Tx descriptors */
210 int sc_txnext; /* next ready Tx descriptor */
211
212 struct sip_txsq sc_txfreeq; /* free Tx descsofts */
213 struct sip_txsq sc_txdirtyq; /* dirty Tx descsofts */
214
215 int sc_rxptr; /* next ready Rx descriptor/descsoft */
216 };
217
218 /* sc_flags */
219 #define SIPF_PAUSED 0x00000001 /* paused (802.3x flow control) */
220
221 #define SIP_CDTXADDR(sc, x) ((sc)->sc_cddma + SIP_CDTXOFF((x)))
222 #define SIP_CDRXADDR(sc, x) ((sc)->sc_cddma + SIP_CDRXOFF((x)))
223
224 #define SIP_CDTXSYNC(sc, x, n, ops) \
225 do { \
226 int __x, __n; \
227 \
228 __x = (x); \
229 __n = (n); \
230 \
231 /* If it will wrap around, sync to the end of the ring. */ \
232 if ((__x + __n) > SIP_NTXDESC) { \
233 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
234 SIP_CDTXOFF(__x), sizeof(struct sip_desc) * \
235 (SIP_NTXDESC - __x), (ops)); \
236 __n -= (SIP_NTXDESC - __x); \
237 __x = 0; \
238 } \
239 \
240 /* Now sync whatever is left. */ \
241 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
242 SIP_CDTXOFF(__x), sizeof(struct sip_desc) * __n, (ops)); \
243 } while (0)
244
245 #define SIP_CDRXSYNC(sc, x, ops) \
246 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
247 SIP_CDRXOFF((x)), sizeof(struct sip_desc), (ops))
248
249 /*
250 * Note we rely on MCLBYTES being a power of two below.
251 */
252 #define SIP_INIT_RXDESC(sc, x) \
253 do { \
254 struct sip_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
255 struct sip_desc *__sipd = &(sc)->sc_rxdescs[(x)]; \
256 \
257 __sipd->sipd_link = SIP_CDRXADDR((sc), SIP_NEXTRX((x))); \
258 __sipd->sipd_bufptr = __rxs->rxs_dmamap->dm_segs[0].ds_addr; \
259 __sipd->sipd_cmdsts = CMDSTS_INTR | \
260 ((MCLBYTES - 1) & CMDSTS_SIZE_MASK); \
261 SIP_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
262 } while (0)
263
264 void sip_start __P((struct ifnet *));
265 void sip_watchdog __P((struct ifnet *));
266 int sip_ioctl __P((struct ifnet *, u_long, caddr_t));
267
268 void sip_shutdown __P((void *));
269
270 void sip_reset __P((struct sip_softc *));
271 int sip_init __P((struct sip_softc *));
272 void sip_stop __P((struct sip_softc *, int));
273 void sip_rxdrain __P((struct sip_softc *));
274 int sip_add_rxbuf __P((struct sip_softc *, int));
275 void sip_read_eeprom __P((struct sip_softc *, int, int, u_int16_t *));
276 void sip_set_filter __P((struct sip_softc *));
277 void sip_tick __P((void *));
278
279 int sip_intr __P((void *));
280 void sip_txintr __P((struct sip_softc *));
281 void sip_rxintr __P((struct sip_softc *));
282
283 int sip_mii_readreg __P((struct device *, int, int));
284 void sip_mii_writereg __P((struct device *, int, int, int));
285 void sip_mii_statchg __P((struct device *));
286
287 int sip_mediachange __P((struct ifnet *));
288 void sip_mediastatus __P((struct ifnet *, struct ifmediareq *));
289
290 int sip_match __P((struct device *, struct cfdata *, void *));
291 void sip_attach __P((struct device *, struct device *, void *));
292
293 int sip_copy_small = 0;
294
295 struct cfattach sip_ca = {
296 sizeof(struct sip_softc), sip_match, sip_attach,
297 };
298
299 const struct sip_product *sip_lookup __P((const struct pci_attach_args *));
300
301 const struct sip_product *
302 sip_lookup(pa)
303 const struct pci_attach_args *pa;
304 {
305 const struct sip_product *sip;
306
307 for (sip = sip_products; sip->sip_name != NULL; sip++) {
308 if (PCI_VENDOR(pa->pa_id) == sip->sip_vendor &&
309 PCI_PRODUCT(pa->pa_id) == sip->sip_product)
310 return (sip);
311 }
312 return (NULL);
313 }
314
315 int
316 sip_match(parent, cf, aux)
317 struct device *parent;
318 struct cfdata *cf;
319 void *aux;
320 {
321 struct pci_attach_args *pa = aux;
322
323 if (sip_lookup(pa) != NULL)
324 return (1);
325
326 return (0);
327 }
328
329 void
330 sip_attach(parent, self, aux)
331 struct device *parent, *self;
332 void *aux;
333 {
334 struct sip_softc *sc = (struct sip_softc *) self;
335 struct pci_attach_args *pa = aux;
336 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
337 pci_chipset_tag_t pc = pa->pa_pc;
338 pci_intr_handle_t ih;
339 const char *intrstr = NULL;
340 bus_space_tag_t iot, memt;
341 bus_space_handle_t ioh, memh;
342 bus_dma_segment_t seg;
343 int ioh_valid, memh_valid;
344 int i, rseg, error;
345 const struct sip_product *sip;
346 pcireg_t pmode;
347 u_int16_t enaddr[ETHER_ADDR_LEN / 2];
348
349 sip = sip_lookup(pa);
350 if (sip == NULL) {
351 printf("\n");
352 panic("sip_attach: impossible");
353 }
354
355 printf(": %s\n", sip->sip_name);
356
357 sc->sc_model = PCI_PRODUCT(pa->pa_id);
358
359 /*
360 * Map the device.
361 */
362 ioh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGIOA,
363 PCI_MAPREG_TYPE_IO, 0,
364 &iot, &ioh, NULL, NULL) == 0);
365 memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA,
366 PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
367 &memt, &memh, NULL, NULL) == 0);
368
369 if (memh_valid) {
370 sc->sc_st = memt;
371 sc->sc_sh = memh;
372 } else if (ioh_valid) {
373 sc->sc_st = iot;
374 sc->sc_sh = ioh;
375 } else {
376 printf("%s: unable to map device registers\n",
377 sc->sc_dev.dv_xname);
378 return;
379 }
380
381 sc->sc_dmat = pa->pa_dmat;
382
383 /* Enable bus mastering. */
384 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
385 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
386 PCI_COMMAND_MASTER_ENABLE);
387
388 /* Get it out of power save mode if needed. */
389 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, 0, 0)) {
390 pmode = pci_conf_read(pc, pa->pa_tag, SIP_PCI_CFGPMCSR) & 0x3;
391 if (pmode == 3) {
392 /*
393 * The card has lost all configuration data in
394 * this state, so punt.
395 */
396 printf("%s: unable to wake up from power state D3\n",
397 sc->sc_dev.dv_xname);
398 return;
399 }
400 if (pmode != 0) {
401 printf("%s: waking up from power state D%d\n",
402 sc->sc_dev.dv_xname, pmode);
403 pci_conf_write(pc, pa->pa_tag, SIP_PCI_CFGPMCSR, 0);
404 }
405 }
406
407 /*
408 * Map and establish our interrupt.
409 */
410 if (pci_intr_map(pc, pa->pa_intrtag, pa->pa_intrpin,
411 pa->pa_intrline, &ih)) {
412 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
413 return;
414 }
415 intrstr = pci_intr_string(pc, ih);
416 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, sip_intr, sc);
417 if (sc->sc_ih == NULL) {
418 printf("%s: unable to establish interrupt",
419 sc->sc_dev.dv_xname);
420 if (intrstr != NULL)
421 printf(" at %s", intrstr);
422 printf("\n");
423 return;
424 }
425 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
426
427 SIMPLEQ_INIT(&sc->sc_txfreeq);
428 SIMPLEQ_INIT(&sc->sc_txdirtyq);
429
430 /*
431 * Allocate the control data structures, and create and load the
432 * DMA map for it.
433 */
434 if ((error = bus_dmamem_alloc(sc->sc_dmat,
435 sizeof(struct sip_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
436 0)) != 0) {
437 printf("%s: unable to allocate control data, error = %d\n",
438 sc->sc_dev.dv_xname, error);
439 goto fail_0;
440 }
441
442 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
443 sizeof(struct sip_control_data), (caddr_t *)&sc->sc_control_data,
444 BUS_DMA_COHERENT)) != 0) {
445 printf("%s: unable to map control data, error = %d\n",
446 sc->sc_dev.dv_xname, error);
447 goto fail_1;
448 }
449
450 if ((error = bus_dmamap_create(sc->sc_dmat,
451 sizeof(struct sip_control_data), 1,
452 sizeof(struct sip_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
453 printf("%s: unable to create control data DMA map, "
454 "error = %d\n", sc->sc_dev.dv_xname, error);
455 goto fail_2;
456 }
457
458 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
459 sc->sc_control_data, sizeof(struct sip_control_data), NULL,
460 0)) != 0) {
461 printf("%s: unable to load control data DMA map, error = %d\n",
462 sc->sc_dev.dv_xname, error);
463 goto fail_3;
464 }
465
466 /*
467 * Create the transmit buffer DMA maps.
468 */
469 for (i = 0; i < SIP_TXQUEUELEN; i++) {
470 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
471 SIP_NTXSEGS, MCLBYTES, 0, 0,
472 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
473 printf("%s: unable to create tx DMA map %d, "
474 "error = %d\n", sc->sc_dev.dv_xname, i, error);
475 goto fail_4;
476 }
477 }
478
479 /*
480 * Create the receive buffer DMA maps.
481 */
482 for (i = 0; i < SIP_NRXDESC; i++) {
483 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
484 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
485 printf("%s: unable to create rx DMA map %d, "
486 "error = %d\n", sc->sc_dev.dv_xname, i, error);
487 goto fail_5;
488 }
489 sc->sc_rxsoft[i].rxs_mbuf = NULL;
490 }
491
492 /*
493 * Reset the chip to a known state.
494 */
495 sip_reset(sc);
496
497 /*
498 * Read the Ethernet address from the EEPROM.
499 */
500 sip_read_eeprom(sc, SIP_EEPROM_ETHERNET_ID0 >> 1,
501 sizeof(enaddr) / sizeof(enaddr[0]), enaddr);
502
503 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
504 ether_sprintf((u_int8_t *)enaddr));
505
506 /*
507 * Initialize our media structures and probe the MII.
508 */
509 sc->sc_mii.mii_ifp = ifp;
510 sc->sc_mii.mii_readreg = sip_mii_readreg;
511 sc->sc_mii.mii_writereg = sip_mii_writereg;
512 sc->sc_mii.mii_statchg = sip_mii_statchg;
513 ifmedia_init(&sc->sc_mii.mii_media, 0, sip_mediachange,
514 sip_mediastatus);
515 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
516 MII_OFFSET_ANY, 0);
517 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
518 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
519 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
520 } else
521 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
522
523 ifp = &sc->sc_ethercom.ec_if;
524 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
525 ifp->if_softc = sc;
526 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
527 ifp->if_ioctl = sip_ioctl;
528 ifp->if_start = sip_start;
529 ifp->if_watchdog = sip_watchdog;
530
531 /*
532 * Attach the interface.
533 */
534 if_attach(ifp);
535 ether_ifattach(ifp, (u_int8_t *)enaddr);
536 #if NBPFILTER > 0
537 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
538 sizeof(struct ether_header));
539 #endif
540
541 /*
542 * Make sure the interface is shutdown during reboot.
543 */
544 sc->sc_sdhook = shutdownhook_establish(sip_shutdown, sc);
545 if (sc->sc_sdhook == NULL)
546 printf("%s: WARNING: unable to establish shutdown hook\n",
547 sc->sc_dev.dv_xname);
548 return;
549
550 /*
551 * Free any resources we've allocated during the failed attach
552 * attempt. Do this in reverse order and fall through.
553 */
554 fail_5:
555 for (i = 0; i < SIP_NRXDESC; i++) {
556 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
557 bus_dmamap_destroy(sc->sc_dmat,
558 sc->sc_rxsoft[i].rxs_dmamap);
559 }
560 fail_4:
561 for (i = 0; i < SIP_TXQUEUELEN; i++) {
562 if (sc->sc_txsoft[i].txs_dmamap != NULL)
563 bus_dmamap_destroy(sc->sc_dmat,
564 sc->sc_txsoft[i].txs_dmamap);
565 }
566 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
567 fail_3:
568 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
569 fail_2:
570 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
571 sizeof(struct sip_control_data));
572 fail_1:
573 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
574 fail_0:
575 return;
576 }
577
578 /*
579 * sip_shutdown:
580 *
581 * Make sure the interface is stopped at reboot time.
582 */
583 void
584 sip_shutdown(arg)
585 void *arg;
586 {
587 struct sip_softc *sc = arg;
588
589 sip_stop(sc, 1);
590 }
591
592 /*
593 * sip_start: [ifnet interface function]
594 *
595 * Start packet transmission on the interface.
596 */
597 void
598 sip_start(ifp)
599 struct ifnet *ifp;
600 {
601 struct sip_softc *sc = ifp->if_softc;
602 struct mbuf *m0, *m;
603 struct sip_txsoft *txs;
604 bus_dmamap_t dmamap;
605 int error, firsttx, nexttx, lasttx, ofree, seg;
606
607 /*
608 * If we've been told to pause, don't transmit any more packets.
609 */
610 if (sc->sc_flags & SIPF_PAUSED)
611 ifp->if_flags |= IFF_OACTIVE;
612
613 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
614 return;
615
616 /*
617 * Remember the previous number of free descriptors and
618 * the first descriptor we'll use.
619 */
620 ofree = sc->sc_txfree;
621 firsttx = sc->sc_txnext;
622
623 /*
624 * Loop through the send queue, setting up transmit descriptors
625 * until we drain the queue, or use up all available transmit
626 * descriptors.
627 */
628 while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL &&
629 sc->sc_txfree != 0) {
630 /*
631 * Grab a packet off the queue.
632 */
633 IF_DEQUEUE(&ifp->if_snd, m0);
634 if (m0 == NULL)
635 break;
636
637 dmamap = txs->txs_dmamap;
638
639 /*
640 * Load the DMA map. If this fails, the packet either
641 * didn't fit in the alloted number of segments, or we
642 * were short on resources. In this case, we'll copy
643 * and try again.
644 */
645 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
646 BUS_DMA_NOWAIT) != 0) {
647 MGETHDR(m, M_DONTWAIT, MT_DATA);
648 if (m == NULL) {
649 printf("%s: unable to allocate Tx mbuf\n",
650 sc->sc_dev.dv_xname);
651 IF_PREPEND(&ifp->if_snd, m0);
652 break;
653 }
654 if (m0->m_pkthdr.len > MHLEN) {
655 MCLGET(m, M_DONTWAIT);
656 if ((m->m_flags & M_EXT) == 0) {
657 printf("%s: unable to allocate Tx "
658 "cluster\n", sc->sc_dev.dv_xname);
659 m_freem(m);
660 IF_PREPEND(&ifp->if_snd, m0);
661 break;
662 }
663 }
664 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
665 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
666 m_freem(m0);
667 m0 = m;
668 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
669 m0, BUS_DMA_NOWAIT);
670 if (error) {
671 printf("%s: unable to load Tx buffer, "
672 "error = %d\n", sc->sc_dev.dv_xname, error);
673 IF_PREPEND(&ifp->if_snd, m0);
674 break;
675 }
676 }
677
678 /*
679 * Ensure we have enough descriptors free to describe
680 * the packet.
681 */
682 if (dmamap->dm_nsegs > sc->sc_txfree) {
683 /*
684 * Not enough free descriptors to transmit this
685 * packet. We haven't committed anything yet,
686 * so just unload the DMA map, put the packet
687 * back on the queue, and punt. Notify the upper
688 * layer that there are not more slots left.
689 *
690 * XXX We could allocate an mbuf and copy, but
691 * XXX is it worth it?
692 */
693 ifp->if_flags |= IFF_OACTIVE;
694 bus_dmamap_unload(sc->sc_dmat, dmamap);
695 IF_PREPEND(&ifp->if_snd, m0);
696 break;
697 }
698
699 /*
700 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
701 */
702
703 /* Sync the DMA map. */
704 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
705 BUS_DMASYNC_PREWRITE);
706
707 /*
708 * Initialize the transmit descriptors.
709 */
710 for (nexttx = sc->sc_txnext, seg = 0;
711 seg < dmamap->dm_nsegs;
712 seg++, nexttx = SIP_NEXTTX(nexttx)) {
713 /*
714 * If this is the first descriptor we're
715 * enqueueing, don't set the OWN bit just
716 * yet. That could cause a race condition.
717 * We'll do it below.
718 */
719 sc->sc_txdescs[nexttx].sipd_bufptr =
720 dmamap->dm_segs[seg].ds_addr;
721 sc->sc_txdescs[nexttx].sipd_cmdsts =
722 (nexttx == firsttx ? 0 : CMDSTS_OWN) |
723 CMDSTS_MORE | dmamap->dm_segs[seg].ds_len;
724 lasttx = nexttx;
725 }
726
727 /* Clear the MORE bit on the last segment. */
728 sc->sc_txdescs[lasttx].sipd_cmdsts &= ~CMDSTS_MORE;
729
730 /* Sync the descriptors we're using. */
731 SIP_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
732 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
733
734 /*
735 * Store a pointer to the packet so we can free it later,
736 * and remember what txdirty will be once the packet is
737 * done.
738 */
739 txs->txs_mbuf = m0;
740 txs->txs_firstdesc = sc->sc_txnext;
741 txs->txs_lastdesc = lasttx;
742
743 /* Advance the tx pointer. */
744 sc->sc_txfree -= dmamap->dm_nsegs;
745 sc->sc_txnext = nexttx;
746
747 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs, txs_q);
748 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
749
750 #if NBPFILTER > 0
751 /*
752 * Pass the packet to any BPF listeners.
753 */
754 if (ifp->if_bpf)
755 bpf_mtap(ifp->if_bpf, m0);
756 #endif /* NBPFILTER > 0 */
757 }
758
759 if (txs == NULL || sc->sc_txfree == 0) {
760 /* No more slots left; notify upper layer. */
761 ifp->if_flags |= IFF_OACTIVE;
762 }
763
764 if (sc->sc_txfree != ofree) {
765 /*
766 * Cause a descriptor interrupt to happen on the
767 * last packet we enqueued.
768 */
769 sc->sc_txdescs[lasttx].sipd_cmdsts |= CMDSTS_INTR;
770 SIP_CDTXSYNC(sc, lasttx, 1,
771 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
772
773 /*
774 * The entire packet chain is set up. Give the
775 * first descrptor to the chip now.
776 */
777 sc->sc_txdescs[firsttx].sipd_cmdsts |= CMDSTS_OWN;
778 SIP_CDTXSYNC(sc, firsttx, 1,
779 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
780
781 /* Start the transmit process. */
782 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CR) &
783 CR_TXE) == 0) {
784 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXDP,
785 SIP_CDTXADDR(sc, firsttx));
786 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE);
787 }
788
789 /* Set a watchdog timer in case the chip flakes out. */
790 ifp->if_timer = 5;
791 }
792 }
793
794 /*
795 * sip_watchdog: [ifnet interface function]
796 *
797 * Watchdog timer handler.
798 */
799 void
800 sip_watchdog(ifp)
801 struct ifnet *ifp;
802 {
803 struct sip_softc *sc = ifp->if_softc;
804
805 /*
806 * The chip seems to ignore the CMDSTS_INTR bit sometimes!
807 * If we get a timeout, try and sweep up transmit descriptors.
808 * If we manage to sweep them all up, ignore the lack of
809 * interrupt.
810 */
811 sip_txintr(sc);
812
813 if (sc->sc_txfree != SIP_NTXDESC) {
814 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
815 ifp->if_oerrors++;
816
817 /* Reset the interface. */
818 (void) sip_init(sc);
819 } else if (ifp->if_flags & IFF_DEBUG)
820 printf("%s: recovered from device timeout\n",
821 sc->sc_dev.dv_xname);
822
823 /* Try to get more packets going. */
824 sip_start(ifp);
825 }
826
827 /*
828 * sip_ioctl: [ifnet interface function]
829 *
830 * Handle control requests from the operator.
831 */
832 int
833 sip_ioctl(ifp, cmd, data)
834 struct ifnet *ifp;
835 u_long cmd;
836 caddr_t data;
837 {
838 struct sip_softc *sc = ifp->if_softc;
839 struct ifreq *ifr = (struct ifreq *)data;
840 struct ifaddr *ifa = (struct ifaddr *)data;
841 int s, error = 0;
842
843 s = splnet();
844
845 switch (cmd) {
846 case SIOCSIFADDR:
847 ifp->if_flags |= IFF_UP;
848
849 switch (ifa->ifa_addr->sa_family) {
850 #ifdef INET
851 case AF_INET:
852 if ((error = sip_init(sc)) != 0)
853 break;
854 arp_ifinit(ifp, ifa);
855 break;
856 #endif /* INET */
857 #ifdef NS
858 case AF_NS:
859 {
860 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
861
862 if (ns_nullhost(*ina))
863 ina->x_host = *(union ns_host *)
864 LLADDR(ifp->if_sadl);
865 else
866 memcpy(LLADDR(ifp->if_sadl),
867 ina->x_host.c_host, ifp->if_addrlen);
868 error = sip_init(sc);
869 break;
870 }
871 #endif /* NS */
872 default:
873 error = sip_init(sc);
874 break;
875 }
876 break;
877
878 case SIOCSIFMTU:
879 if (ifr->ifr_mtu > ETHERMTU)
880 error = EINVAL;
881 else
882 ifp->if_mtu = ifr->ifr_mtu;
883 break;
884
885 case SIOCSIFFLAGS:
886 if ((ifp->if_flags & IFF_UP) == 0 &&
887 (ifp->if_flags & IFF_RUNNING) != 0) {
888 /*
889 * If interface is marked down and it is running, then
890 * stop it.
891 */
892 sip_stop(sc, 1);
893 } else if ((ifp->if_flags & IFF_UP) != 0 &&
894 (ifp->if_flags & IFF_RUNNING) == 0) {
895 /*
896 * If interfase it marked up and it is stopped, then
897 * start it.
898 */
899 error = sip_init(sc);
900 } else if ((ifp->if_flags & IFF_UP) != 0) {
901 /*
902 * Reset the interface to pick up changes in any other
903 * flags that affect the hardware state.
904 */
905 error = sip_init(sc);
906 }
907 break;
908
909 case SIOCADDMULTI:
910 case SIOCDELMULTI:
911 error = (cmd == SIOCADDMULTI) ?
912 ether_addmulti(ifr, &sc->sc_ethercom) :
913 ether_delmulti(ifr, &sc->sc_ethercom);
914
915 if (error == ENETRESET) {
916 /*
917 * Multicast list has changed; set the hardware filter
918 * accordingly.
919 */
920 sip_set_filter(sc);
921 error = 0;
922 }
923 break;
924
925 case SIOCSIFMEDIA:
926 case SIOCGIFMEDIA:
927 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
928 break;
929
930 default:
931 error = EINVAL;
932 break;
933 }
934
935 /* Try to get more packets going. */
936 sip_start(ifp);
937
938 splx(s);
939 return (error);
940 }
941
942 /*
943 * sip_intr:
944 *
945 * Interrupt service routine.
946 */
947 int
948 sip_intr(arg)
949 void *arg;
950 {
951 struct sip_softc *sc = arg;
952 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
953 u_int32_t isr;
954 int handled = 0;
955
956 for (;;) {
957 /* Reading clears interrupt. */
958 isr = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ISR);
959 if ((isr & sc->sc_imr) == 0)
960 break;
961
962 handled = 1;
963
964 if (isr & (ISR_RXORN|ISR_RXIDLE|ISR_RXDESC)) {
965 /* Grab any new packets. */
966 sip_rxintr(sc);
967
968 if (isr & ISR_RXORN) {
969 printf("%s: receive FIFO overrun\n",
970 sc->sc_dev.dv_xname);
971
972 /* XXX adjust rx_drain_thresh? */
973 }
974
975 if (isr & ISR_RXIDLE) {
976 printf("%s: receive ring overrun\n",
977 sc->sc_dev.dv_xname);
978
979 /* Get the receive process going again. */
980 bus_space_write_4(sc->sc_st, sc->sc_sh,
981 SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr));
982 bus_space_write_4(sc->sc_st, sc->sc_sh,
983 SIP_CR, CR_RXE);
984 }
985 }
986
987 if (isr & (ISR_TXURN|ISR_TXDESC)) {
988 /* Sweep up transmit descriptors. */
989 sip_txintr(sc);
990
991 if (isr & ISR_TXURN) {
992 u_int32_t thresh;
993
994 printf("%s: transmit FIFO underrun",
995 sc->sc_dev.dv_xname);
996
997 thresh = sc->sc_tx_drain_thresh + 1;
998 if (thresh <= TXCFG_DRTH &&
999 (thresh * 32) <= (SIP_TXFIFO_SIZE -
1000 (sc->sc_tx_fill_thresh * 32))) {
1001 printf("; increasing Tx drain "
1002 "threshold to %u bytes\n",
1003 thresh * 32);
1004 sc->sc_tx_drain_thresh = thresh;
1005 (void) sip_init(sc);
1006 } else {
1007 (void) sip_init(sc);
1008 printf("\n");
1009 }
1010 }
1011 }
1012
1013 if (sc->sc_imr & (ISR_PAUSE_END|ISR_PAUSE_ST)) {
1014 if (isr & ISR_PAUSE_ST) {
1015 sc->sc_flags |= SIPF_PAUSED;
1016 ifp->if_flags |= IFF_OACTIVE;
1017 }
1018 if (isr & ISR_PAUSE_END) {
1019 sc->sc_flags &= ~SIPF_PAUSED;
1020 ifp->if_flags &= ~IFF_OACTIVE;
1021 }
1022 }
1023
1024 if (isr & ISR_HIBERR) {
1025 #define PRINTERR(bit, str) \
1026 if (isr & (bit)) \
1027 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
1028 PRINTERR(ISR_DPERR, "parity error");
1029 PRINTERR(ISR_SSERR, "system error");
1030 PRINTERR(ISR_RMABT, "master abort");
1031 PRINTERR(ISR_RTABT, "target abort");
1032 PRINTERR(ISR_RXSOVR, "receive status FIFO overrun");
1033 (void) sip_init(sc);
1034 #undef PRINTERR
1035 }
1036 }
1037
1038 /* Try to get more packets going. */
1039 sip_start(ifp);
1040
1041 return (handled);
1042 }
1043
1044 /*
1045 * sip_txintr:
1046 *
1047 * Helper; handle transmit interrupts.
1048 */
1049 void
1050 sip_txintr(sc)
1051 struct sip_softc *sc;
1052 {
1053 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1054 struct sip_txsoft *txs;
1055 u_int32_t cmdsts;
1056
1057 if ((sc->sc_flags & SIPF_PAUSED) == 0)
1058 ifp->if_flags &= ~IFF_OACTIVE;
1059
1060 /*
1061 * Go through our Tx list and free mbufs for those
1062 * frames which have been transmitted.
1063 */
1064 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1065 SIP_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1066 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1067
1068 cmdsts = sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts;
1069 if (cmdsts & CMDSTS_OWN)
1070 break;
1071
1072 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs, txs_q);
1073
1074 sc->sc_txfree += txs->txs_dmamap->dm_nsegs;
1075
1076 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1077 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1078 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1079 m_freem(txs->txs_mbuf);
1080 txs->txs_mbuf = NULL;
1081
1082 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1083
1084 /*
1085 * Check for errors and collisions.
1086 */
1087 if (cmdsts &
1088 (CMDSTS_Tx_TXA|CMDSTS_Tx_TFU|CMDSTS_Tx_ED|CMDSTS_Tx_EC)) {
1089 if (ifp->if_flags & IFF_DEBUG) {
1090 if (CMDSTS_Tx_ED)
1091 printf("%s: excessive deferral\n",
1092 sc->sc_dev.dv_xname);
1093 if (CMDSTS_Tx_EC) {
1094 printf("%s: excessive collisions\n",
1095 sc->sc_dev.dv_xname);
1096 ifp->if_collisions += 16;
1097 }
1098 }
1099 } else {
1100 /* Packet was transmitted successfully. */
1101 ifp->if_opackets++;
1102 ifp->if_collisions += CMDSTS_COLLISIONS(cmdsts);
1103 }
1104 }
1105
1106 /*
1107 * If there are no more pending transmissions, cancel the watchdog
1108 * timer.
1109 */
1110 if (txs == NULL)
1111 ifp->if_timer = 0;
1112 }
1113
1114 /*
1115 * sip_rxintr:
1116 *
1117 * Helper; handle receive interrupts.
1118 */
1119 void
1120 sip_rxintr(sc)
1121 struct sip_softc *sc;
1122 {
1123 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1124 struct ether_header *eh;
1125 struct sip_rxsoft *rxs;
1126 struct mbuf *m;
1127 u_int32_t cmdsts;
1128 int i, len;
1129
1130 for (i = sc->sc_rxptr;; i = SIP_NEXTRX(i)) {
1131 rxs = &sc->sc_rxsoft[i];
1132
1133 SIP_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1134
1135 cmdsts = sc->sc_rxdescs[i].sipd_cmdsts;
1136
1137 /*
1138 * NOTE: OWN is set if owned by _consumer_. We're the
1139 * consumer of the receive ring, so if the bit is clear,
1140 * we have processed all of the packets.
1141 */
1142 if ((cmdsts & CMDSTS_OWN) == 0) {
1143 /*
1144 * We have processed all of the receive buffers.
1145 */
1146 break;
1147 }
1148
1149 /*
1150 * If any collisions were seen on the wire, count one.
1151 */
1152 if (cmdsts & CMDSTS_Rx_COL)
1153 ifp->if_collisions++;
1154
1155 /*
1156 * If an error occurred, update stats, clear the status
1157 * word, and leave the packet buffer in place. It will
1158 * simply be reused the next time the ring comes around.
1159 */
1160 if (cmdsts & (CMDSTS_Rx_RXA|CMDSTS_Rx_LONG|CMDSTS_Rx_RUNT|
1161 CMDSTS_Rx_ISE|CMDSTS_Rx_CRCE|CMDSTS_Rx_FAE)) {
1162 ifp->if_ierrors++;
1163 if ((cmdsts & CMDSTS_Rx_RXA) != 0 &&
1164 (cmdsts & CMDSTS_Rx_RXO) == 0) {
1165 /* Receive overrun handled elsewhere. */
1166 printf("%s: receive descriptor error\n",
1167 sc->sc_dev.dv_xname);
1168 }
1169 #define PRINTERR(bit, str) \
1170 if (cmdsts & (bit)) \
1171 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
1172 PRINTERR(CMDSTS_Rx_LONG, "packet too long");
1173 PRINTERR(CMDSTS_Rx_RUNT, "runt packet");
1174 PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error");
1175 PRINTERR(CMDSTS_Rx_CRCE, "CRC error");
1176 PRINTERR(CMDSTS_Rx_FAE, "frame alignment error");
1177 #undef PRINTERR
1178 SIP_INIT_RXDESC(sc, i);
1179 continue;
1180 }
1181
1182 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1183 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1184
1185 /*
1186 * No errors; receive the packet. Note, the SiS 900
1187 * includes the CRC with every packet; trim it.
1188 */
1189 len = CMDSTS_SIZE(cmdsts) - ETHER_CRC_LEN;
1190
1191 #ifdef __NO_STRICT_ALIGNMENT
1192 /*
1193 * If the packet is small enough to fit in a
1194 * single header mbuf, allocate one and copy
1195 * the data into it. This greatly reduces
1196 * memory consumption when we receive lots
1197 * of small packets.
1198 *
1199 * Otherwise, we add a new buffer to the receive
1200 * chain. If this fails, we drop the packet and
1201 * recycle the old buffer.
1202 */
1203 if (sip_copy_small != 0 && len <= MHLEN) {
1204 MGETHDR(m, M_DONTWAIT, MT_DATA);
1205 if (m == NULL)
1206 goto dropit;
1207 memcpy(mtod(m, caddr_t),
1208 mtod(rxs->rxs_mbuf, caddr_t), len);
1209 SIP_INIT_RXDESC(sc, i);
1210 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1211 rxs->rxs_dmamap->dm_mapsize,
1212 BUS_DMASYNC_PREREAD);
1213 } else {
1214 m = rxs->rxs_mbuf;
1215 if (sip_add_rxbuf(sc, i) != 0) {
1216 dropit:
1217 ifp->if_ierrors++;
1218 SIP_INIT_RXDESC(sc, i);
1219 bus_dmamap_sync(sc->sc_dmat,
1220 rxs->rxs_dmamap, 0,
1221 rxs->rxs_dmamap->dm_mapsize,
1222 BUS_DMASYNC_PREREAD);
1223 continue;
1224 }
1225 }
1226 #else
1227 /*
1228 * The SiS 900's receive buffers must be 4-byte aligned.
1229 * But this means that the data after the Ethernet header
1230 * is misaligned. We must allocate a new buffer and
1231 * copy the data, shifted forward 2 bytes.
1232 */
1233 MGETHDR(m, M_DONTWAIT, MT_DATA);
1234 if (m == NULL) {
1235 dropit:
1236 ifp->if_ierrors++;
1237 SIP_INIT_RXDESC(sc, i);
1238 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1239 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1240 continue;
1241 }
1242 if (len > (MHLEN - 2)) {
1243 MCLGET(m, M_DONTWAIT);
1244 if ((m->m_flags & M_EXT) == 0) {
1245 m_freem(m);
1246 goto dropit;
1247 }
1248 }
1249 m->m_data += 2;
1250
1251 /*
1252 * Note that we use clusters for incoming frames, so the
1253 * buffer is virtually contiguous.
1254 */
1255 memcpy(mtod(m, caddr_t), mtod(rxs->rxs_mbuf, caddr_t), len);
1256
1257 /* Allow the receive descriptor to continue using its mbuf. */
1258 SIP_INIT_RXDESC(sc, i);
1259 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1260 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1261 #endif /* __NO_STRICT_ALIGNMENT */
1262
1263 ifp->if_ipackets++;
1264 eh = mtod(m, struct ether_header *);
1265 m->m_pkthdr.rcvif = ifp;
1266 m->m_pkthdr.len = m->m_len = len;
1267
1268 #if NBPFILTER > 0
1269 /*
1270 * Pass this up to any BPF listeners, but only
1271 * pass if up the stack if it's for us.
1272 */
1273 if (ifp->if_bpf) {
1274 bpf_mtap(ifp->if_bpf, m);
1275 if ((ifp->if_flags & IFF_PROMISC) != 0 &&
1276 (cmdsts & CMDSTS_Rx_DEST) == CMDSTS_Rx_DEST_REJ) {
1277 m_freem(m);
1278 continue;
1279 }
1280 }
1281 #endif /* NBPFILTER > 0 */
1282
1283 /* Pass it on. */
1284 (*ifp->if_input)(ifp, m);
1285 }
1286
1287 /* Update the receive pointer. */
1288 sc->sc_rxptr = i;
1289 }
1290
1291 /*
1292 * sip_tick:
1293 *
1294 * One second timer, used to tick the MII.
1295 */
1296 void
1297 sip_tick(arg)
1298 void *arg;
1299 {
1300 struct sip_softc *sc = arg;
1301 int s;
1302
1303 s = splnet();
1304 mii_tick(&sc->sc_mii);
1305 splx(s);
1306
1307 timeout(sip_tick, sc, hz);
1308 }
1309
1310 /*
1311 * sip_reset:
1312 *
1313 * Perform a soft reset on the SiS 900.
1314 */
1315 void
1316 sip_reset(sc)
1317 struct sip_softc *sc;
1318 {
1319 bus_space_tag_t st = sc->sc_st;
1320 bus_space_handle_t sh = sc->sc_sh;
1321 int i;
1322
1323 bus_space_write_4(st, sh, SIP_CR, CR_RST);
1324
1325 for (i = 0; i < 1000; i++) {
1326 if ((bus_space_read_4(st, sh, SIP_ISR) &
1327 (ISR_TXRCMP|ISR_RXRCMP)) == (ISR_TXRCMP|ISR_RXRCMP))
1328 return;
1329 delay(2);
1330 }
1331
1332 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
1333 }
1334
1335 /*
1336 * sip_init:
1337 *
1338 * Initialize the interface. Must be called at splnet().
1339 */
1340 int
1341 sip_init(sc)
1342 struct sip_softc *sc;
1343 {
1344 bus_space_tag_t st = sc->sc_st;
1345 bus_space_handle_t sh = sc->sc_sh;
1346 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1347 struct sip_txsoft *txs;
1348 struct sip_rxsoft *rxs;
1349 struct sip_desc *sipd;
1350 u_int32_t cfg;
1351 int i, error = 0;
1352
1353 /*
1354 * Cancel any pending I/O.
1355 */
1356 sip_stop(sc, 0);
1357
1358 /*
1359 * Reset the chip to a known state.
1360 */
1361 sip_reset(sc);
1362
1363 /*
1364 * Initialize the transmit descriptor ring.
1365 */
1366 for (i = 0; i < SIP_NTXDESC; i++) {
1367 sipd = &sc->sc_txdescs[i];
1368 memset(sipd, 0, sizeof(struct sip_desc));
1369 sipd->sipd_link = SIP_CDTXADDR(sc, SIP_NEXTTX(i));
1370 }
1371 SIP_CDTXSYNC(sc, 0, SIP_NTXDESC,
1372 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1373 sc->sc_txfree = SIP_NTXDESC;
1374 sc->sc_txnext = 0;
1375
1376 /*
1377 * Initialize the transmit job descriptors.
1378 */
1379 SIMPLEQ_INIT(&sc->sc_txfreeq);
1380 SIMPLEQ_INIT(&sc->sc_txdirtyq);
1381 for (i = 0; i < SIP_TXQUEUELEN; i++) {
1382 txs = &sc->sc_txsoft[i];
1383 txs->txs_mbuf = NULL;
1384 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1385 }
1386
1387 /*
1388 * Initialize the receive descriptor and receive job
1389 * descriptor rings.
1390 */
1391 for (i = 0; i < SIP_NRXDESC; i++) {
1392 rxs = &sc->sc_rxsoft[i];
1393 if (rxs->rxs_mbuf == NULL) {
1394 if ((error = sip_add_rxbuf(sc, i)) != 0) {
1395 printf("%s: unable to allocate or map rx "
1396 "buffer %d, error = %d\n",
1397 sc->sc_dev.dv_xname, i, error);
1398 /*
1399 * XXX Should attempt to run with fewer receive
1400 * XXX buffers instead of just failing.
1401 */
1402 sip_rxdrain(sc);
1403 goto out;
1404 }
1405 }
1406 }
1407 sc->sc_rxptr = 0;
1408
1409 /*
1410 * Initialize the configuration register: aggressive PCI
1411 * bus request algorithm, default backoff, default OW timer,
1412 * default parity error detection.
1413 */
1414 cfg = 0;
1415 #if BYTE_ORDER == BIG_ENDIAN
1416 /*
1417 * ...descriptors in big-endian mode.
1418 */
1419 cfg |= CFG_BEM;
1420 #endif
1421 bus_space_write_4(st, sh, SIP_CFG, cfg);
1422
1423 /*
1424 * Initialize the transmit fill and drain thresholds if
1425 * we have never done so.
1426 */
1427 if (sc->sc_tx_fill_thresh == 0) {
1428 /*
1429 * XXX This value should be tuned. This is the
1430 * minimum (32 bytes), and we may be able to
1431 * improve performance by increasing it.
1432 */
1433 sc->sc_tx_fill_thresh = 1;
1434 }
1435 if (sc->sc_tx_drain_thresh == 0) {
1436 /*
1437 * Start at a drain threshold of 128 bytes. We will
1438 * increase it if a DMA underrun occurs.
1439 *
1440 * XXX The minimum value of this variable should be
1441 * tuned. We may be able to improve performance
1442 * by starting with a lower value. That, however,
1443 * may trash the first few outgoing packets if the
1444 * PCI bus is saturated.
1445 */
1446 sc->sc_tx_drain_thresh = 4;
1447 }
1448
1449 /*
1450 * Initialize the prototype TXCFG register.
1451 */
1452 sc->sc_txcfg = TXCFG_ATP | TXCFG_MXDMA_512 |
1453 (sc->sc_tx_fill_thresh << TXCFG_FLTH_SHIFT) |
1454 sc->sc_tx_drain_thresh;
1455 bus_space_write_4(st, sh, SIP_TXCFG, sc->sc_txcfg);
1456
1457 /*
1458 * Initialize the receive drain threshold if we have never
1459 * done so.
1460 */
1461 if (sc->sc_rx_drain_thresh == 0) {
1462 /*
1463 * XXX This value should be tuned. This is set to the
1464 * maximum of 248 bytes, and we may be able to improve
1465 * performance by decreasing it (although we should never
1466 * set this value lower than 2; 14 bytes are required to
1467 * filter the packet).
1468 */
1469 sc->sc_rx_drain_thresh = RXCFG_DRTH >> RXCFG_DRTH_SHIFT;
1470 }
1471
1472 /*
1473 * Initialize the prototype RXCFG register.
1474 */
1475 sc->sc_rxcfg = RXCFG_MXDMA_512 |
1476 (sc->sc_rx_drain_thresh << RXCFG_DRTH_SHIFT);
1477 bus_space_write_4(st, sh, SIP_RXCFG, sc->sc_rxcfg);
1478
1479 /* Set up the receive filter. */
1480 sip_set_filter(sc);
1481
1482 /*
1483 * Give the transmit and receive rings to the chip.
1484 */
1485 bus_space_write_4(st, sh, SIP_TXDP, SIP_CDTXADDR(sc, sc->sc_txnext));
1486 bus_space_write_4(st, sh, SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr));
1487
1488 /*
1489 * Initialize the interrupt mask.
1490 */
1491 sc->sc_imr = ISR_DPERR|ISR_SSERR|ISR_RMABT|ISR_RTABT|ISR_RXSOVR|
1492 ISR_TXURN|ISR_TXDESC|ISR_RXORN|ISR_RXIDLE|ISR_RXDESC;
1493 bus_space_write_4(st, sh, SIP_IMR, sc->sc_imr);
1494
1495 /*
1496 * Set the current media. Do this after initializing the prototype
1497 * IMR, since sip_mii_statchg() modifies the IMR for 802.3x flow
1498 * control.
1499 */
1500 mii_mediachg(&sc->sc_mii);
1501
1502 /*
1503 * Enable interrupts.
1504 */
1505 bus_space_write_4(st, sh, SIP_IER, IER_IE);
1506
1507 /*
1508 * Start the transmit and receive processes.
1509 */
1510 bus_space_write_4(st, sh, SIP_CR, CR_RXE | CR_TXE);
1511
1512 /*
1513 * Start the one second MII clock.
1514 */
1515 timeout(sip_tick, sc, hz);
1516
1517 /*
1518 * ...all done!
1519 */
1520 ifp->if_flags |= IFF_RUNNING;
1521 ifp->if_flags &= ~IFF_OACTIVE;
1522
1523 out:
1524 if (error)
1525 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1526 return (error);
1527 }
1528
1529 /*
1530 * sip_drain:
1531 *
1532 * Drain the receive queue.
1533 */
1534 void
1535 sip_rxdrain(sc)
1536 struct sip_softc *sc;
1537 {
1538 struct sip_rxsoft *rxs;
1539 int i;
1540
1541 for (i = 0; i < SIP_NRXDESC; i++) {
1542 rxs = &sc->sc_rxsoft[i];
1543 if (rxs->rxs_mbuf != NULL) {
1544 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1545 m_freem(rxs->rxs_mbuf);
1546 rxs->rxs_mbuf = NULL;
1547 }
1548 }
1549 }
1550
1551 /*
1552 * sip_stop:
1553 *
1554 * Stop transmission on the interface.
1555 */
1556 void
1557 sip_stop(sc, drain)
1558 struct sip_softc *sc;
1559 {
1560 bus_space_tag_t st = sc->sc_st;
1561 bus_space_handle_t sh = sc->sc_sh;
1562 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1563 struct sip_txsoft *txs;
1564 u_int32_t cmdsts = 0; /* DEBUG */
1565
1566 /*
1567 * Stop the one second clock.
1568 */
1569 untimeout(sip_tick, sc);
1570
1571 /* Down the MII. */
1572 mii_down(&sc->sc_mii);
1573
1574 /*
1575 * Disable interrupts.
1576 */
1577 bus_space_write_4(st, sh, SIP_IER, 0);
1578
1579 /*
1580 * Stop receiver and transmitter.
1581 */
1582 bus_space_write_4(st, sh, SIP_CR, CR_RXD | CR_TXD);
1583
1584 /*
1585 * Release any queued transmit buffers.
1586 */
1587 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1588 if ((ifp->if_flags & IFF_DEBUG) != 0 &&
1589 SIMPLEQ_NEXT(txs, txs_q) == NULL &&
1590 (sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts &
1591 CMDSTS_INTR) == 0)
1592 printf("%s: sip_stop: last descriptor does not "
1593 "have INTR bit set\n", sc->sc_dev.dv_xname);
1594 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs, txs_q);
1595 #ifdef DIAGNOSTIC
1596 if (txs->txs_mbuf == NULL) {
1597 printf("%s: dirty txsoft with no mbuf chain\n",
1598 sc->sc_dev.dv_xname);
1599 panic("sip_stop");
1600 }
1601 #endif
1602 cmdsts |= /* DEBUG */
1603 sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts;
1604 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1605 m_freem(txs->txs_mbuf);
1606 txs->txs_mbuf = NULL;
1607 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1608 }
1609
1610 if (drain) {
1611 /*
1612 * Release the receive buffers.
1613 */
1614 sip_rxdrain(sc);
1615 }
1616
1617 /*
1618 * Mark the interface down and cancel the watchdog timer.
1619 */
1620 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1621 ifp->if_timer = 0;
1622
1623 if ((ifp->if_flags & IFF_DEBUG) != 0 &&
1624 (cmdsts & CMDSTS_INTR) == 0 && sc->sc_txfree != SIP_NTXDESC)
1625 printf("%s: sip_stop: no INTR bits set in dirty tx "
1626 "descriptors\n", sc->sc_dev.dv_xname);
1627 }
1628
1629 /*
1630 * sip_read_eeprom:
1631 *
1632 * Read data from the serial EEPROM.
1633 */
1634 void
1635 sip_read_eeprom(sc, word, wordcnt, data)
1636 struct sip_softc *sc;
1637 int word, wordcnt;
1638 u_int16_t *data;
1639 {
1640 bus_space_tag_t st = sc->sc_st;
1641 bus_space_handle_t sh = sc->sc_sh;
1642 u_int16_t reg;
1643 int i, x;
1644
1645 for (i = 0; i < wordcnt; i++) {
1646 /* Send CHIP SELECT. */
1647 reg = EROMAR_EECS;
1648 bus_space_write_4(st, sh, SIP_EROMAR, reg);
1649
1650 /* Shift in the READ opcode. */
1651 for (x = 3; x > 0; x--) {
1652 if (SIP_EEPROM_OPC_READ & (1 << (x - 1)))
1653 reg |= EROMAR_EEDI;
1654 else
1655 reg &= ~EROMAR_EEDI;
1656 bus_space_write_4(st, sh, SIP_EROMAR, reg);
1657 bus_space_write_4(st, sh, SIP_EROMAR,
1658 reg | EROMAR_EESK);
1659 delay(4);
1660 bus_space_write_4(st, sh, SIP_EROMAR, reg);
1661 delay(4);
1662 }
1663
1664 /* Shift in address. */
1665 for (x = 6; x > 0; x--) {
1666 if ((word + i) & (1 << (x - 1)))
1667 reg |= EROMAR_EEDI;
1668 else
1669 reg &= ~EROMAR_EEDI;
1670 bus_space_write_4(st, sh, SIP_EROMAR, reg);
1671 bus_space_write_4(st, sh, SIP_EROMAR,
1672 reg | EROMAR_EESK);
1673 delay(4);
1674 bus_space_write_4(st, sh, SIP_EROMAR, reg);
1675 delay(4);
1676 }
1677
1678 /* Shift out data. */
1679 reg = EROMAR_EECS;
1680 data[i] = 0;
1681 for (x = 16; x > 0; x--) {
1682 bus_space_write_4(st, sh, SIP_EROMAR,
1683 reg | EROMAR_EESK);
1684 delay(4);
1685 if (bus_space_read_4(st, sh, SIP_EROMAR) & EROMAR_EEDO)
1686 data[i] |= (1 << (x - 1));
1687 bus_space_write_4(st, sh, SIP_EROMAR, reg);
1688 }
1689
1690 /* Clear CHIP SELECT. */
1691 bus_space_write_4(st, sh, SIP_EROMAR, 0);
1692 delay(4);
1693 }
1694 }
1695
1696 /*
1697 * sip_add_rxbuf:
1698 *
1699 * Add a receive buffer to the indicated descriptor.
1700 */
1701 int
1702 sip_add_rxbuf(sc, idx)
1703 struct sip_softc *sc;
1704 int idx;
1705 {
1706 struct sip_rxsoft *rxs = &sc->sc_rxsoft[idx];
1707 struct mbuf *m;
1708 int error;
1709
1710 MGETHDR(m, M_DONTWAIT, MT_DATA);
1711 if (m == NULL)
1712 return (ENOBUFS);
1713
1714 MCLGET(m, M_DONTWAIT);
1715 if ((m->m_flags & M_EXT) == 0) {
1716 m_freem(m);
1717 return (ENOBUFS);
1718 }
1719
1720 if (rxs->rxs_mbuf != NULL)
1721 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1722
1723 rxs->rxs_mbuf = m;
1724
1725 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1726 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1727 if (error) {
1728 printf("%s: can't load rx DMA map %d, error = %d\n",
1729 sc->sc_dev.dv_xname, idx, error);
1730 panic("sip_add_rxbuf"); /* XXX */
1731 }
1732
1733 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1734 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1735
1736 SIP_INIT_RXDESC(sc, idx);
1737
1738 return (0);
1739 }
1740
1741 /*
1742 * sip_set_filter:
1743 *
1744 * Set up the receive filter.
1745 */
1746 void
1747 sip_set_filter(sc)
1748 struct sip_softc *sc;
1749 {
1750 bus_space_tag_t st = sc->sc_st;
1751 bus_space_handle_t sh = sc->sc_sh;
1752 struct ethercom *ec = &sc->sc_ethercom;
1753 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1754 struct ether_multi *enm;
1755 struct ether_multistep step;
1756 u_int8_t *cp;
1757 u_int32_t crc, mchash[8];
1758 int len;
1759 static const u_int32_t crctab[] = {
1760 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
1761 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
1762 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
1763 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
1764 };
1765
1766 /*
1767 * Initialize the prototype RFCR.
1768 */
1769 sc->sc_rfcr = RFCR_RFEN;
1770 if (ifp->if_flags & IFF_BROADCAST)
1771 sc->sc_rfcr |= RFCR_AAB;
1772 if (ifp->if_flags & IFF_PROMISC) {
1773 sc->sc_rfcr |= RFCR_AAP;
1774 goto allmulti;
1775 }
1776
1777 /*
1778 * Set up the multicast address filter by passing all multicast
1779 * addresses through a CRC generator, and then using the high-order
1780 * 6 bits as an index into the 128 bit multicast hash table (only
1781 * the lower 16 bits of each 32 bit multicast hash register are
1782 * valid). The high order bits select the register, while the
1783 * rest of the bits select the bit within the register.
1784 */
1785
1786 memset(mchash, 0, sizeof(mchash));
1787
1788 ETHER_FIRST_MULTI(step, ec, enm);
1789 while (enm != NULL) {
1790 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1791 /*
1792 * We must listen to a range of multicast addresses.
1793 * For now, just accept all multicasts, rather than
1794 * trying to set only those filter bits needed to match
1795 * the range. (At this time, the only use of address
1796 * ranges is for IP multicast routing, for which the
1797 * range is big enough to require all bits set.)
1798 */
1799 goto allmulti;
1800 }
1801
1802 cp = enm->enm_addrlo;
1803 crc = 0xffffffff;
1804 for (len = sizeof(enm->enm_addrlo); --len >= 0;) {
1805 crc ^= *cp++;
1806 crc = (crc >> 4) ^ crctab[crc & 0xf];
1807 crc = (crc >> 4) ^ crctab[crc & 0xf];
1808 }
1809 /* Just want the 7 most significant bits. */
1810 crc >>= 25;
1811
1812 /* Set the corresponding bit in the hash table. */
1813 mchash[crc >> 4] |= 1 << (crc & 0xf);
1814
1815 ETHER_NEXT_MULTI(step, enm);
1816 }
1817
1818 ifp->if_flags &= ~IFF_ALLMULTI;
1819 goto setit;
1820
1821 allmulti:
1822 ifp->if_flags |= IFF_ALLMULTI;
1823 sc->sc_rfcr |= RFCR_AAM;
1824
1825 setit:
1826 #define FILTER_EMIT(addr, data) \
1827 bus_space_write_4(st, sh, SIP_RFCR, (addr)); \
1828 bus_space_write_4(st, sh, SIP_RFDR, (data))
1829
1830 /*
1831 * Disable receive filter, and program the node address.
1832 */
1833 cp = LLADDR(ifp->if_sadl);
1834 FILTER_EMIT(RFCR_RFADDR_NODE0, (cp[1] << 8) | cp[0]);
1835 FILTER_EMIT(RFCR_RFADDR_NODE2, (cp[3] << 8) | cp[2]);
1836 FILTER_EMIT(RFCR_RFADDR_NODE4, (cp[5] << 8) | cp[4]);
1837
1838 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
1839 /*
1840 * Program the multicast hash table.
1841 */
1842 FILTER_EMIT(RFCR_RFADDR_MC0, mchash[0]);
1843 FILTER_EMIT(RFCR_RFADDR_MC1, mchash[1]);
1844 FILTER_EMIT(RFCR_RFADDR_MC2, mchash[2]);
1845 FILTER_EMIT(RFCR_RFADDR_MC3, mchash[3]);
1846 FILTER_EMIT(RFCR_RFADDR_MC4, mchash[4]);
1847 FILTER_EMIT(RFCR_RFADDR_MC5, mchash[5]);
1848 FILTER_EMIT(RFCR_RFADDR_MC6, mchash[6]);
1849 FILTER_EMIT(RFCR_RFADDR_MC7, mchash[7]);
1850 }
1851 #undef FILTER_EMIT
1852
1853 /*
1854 * Re-enable the receiver filter.
1855 */
1856 bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr);
1857 }
1858
1859 /*
1860 * sip_mii_readreg: [mii interface function]
1861 *
1862 * Read a PHY register on the MII.
1863 */
1864 int
1865 sip_mii_readreg(self, phy, reg)
1866 struct device *self;
1867 int phy, reg;
1868 {
1869 struct sip_softc *sc = (struct sip_softc *) self;
1870 u_int32_t enphy;
1871
1872 /*
1873 * The SiS 900 has only an internal PHY on the MII. Only allow
1874 * MII address 0.
1875 */
1876 if (sc->sc_model == PCI_PRODUCT_SIS_900 && phy != 0)
1877 return (0);
1878
1879 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY,
1880 (phy << ENPHY_PHYADDR_SHIFT) | (reg << ENPHY_REGADDR_SHIFT) |
1881 ENPHY_RWCMD | ENPHY_ACCESS);
1882 do {
1883 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY);
1884 } while (enphy & ENPHY_ACCESS);
1885 return ((enphy & ENPHY_PHYDATA) >> ENPHY_DATA_SHIFT);
1886 }
1887
1888 /*
1889 * sip_mii_writereg: [mii interface function]
1890 *
1891 * Write a PHY register on the MII.
1892 */
1893 void
1894 sip_mii_writereg(self, phy, reg, val)
1895 struct device *self;
1896 int phy, reg, val;
1897 {
1898 struct sip_softc *sc = (struct sip_softc *) self;
1899 u_int32_t enphy;
1900
1901 /*
1902 * The SiS 900 has only an internal PHY on the MII. Only allow
1903 * MII address 0.
1904 */
1905 if (sc->sc_model == PCI_PRODUCT_SIS_900 && phy != 0)
1906 return;
1907
1908 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY,
1909 (val << ENPHY_DATA_SHIFT) | (phy << ENPHY_PHYADDR_SHIFT) |
1910 (reg << ENPHY_REGADDR_SHIFT) | ENPHY_ACCESS);
1911 do {
1912 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY);
1913 } while (enphy & ENPHY_ACCESS);
1914 }
1915
1916 /*
1917 * sip_mii_statchg: [mii interface function]
1918 *
1919 * Callback from MII layer when media changes.
1920 */
1921 void
1922 sip_mii_statchg(self)
1923 struct device *self;
1924 {
1925 struct sip_softc *sc = (struct sip_softc *) self;
1926 u_int32_t flowctl;
1927
1928 /*
1929 * Update TXCFG for full-duplex operation.
1930 */
1931 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
1932 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
1933 else
1934 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
1935
1936 /*
1937 * Update RXCFG for full-duplex or loopback.
1938 */
1939 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
1940 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
1941 sc->sc_rxcfg |= RXCFG_ATX;
1942 else
1943 sc->sc_rxcfg &= ~RXCFG_ATX;
1944
1945 /*
1946 * Update IMR for use of 802.3x flow control.
1947 */
1948 if ((sc->sc_mii.mii_media_active & IFM_FLOW) != 0) {
1949 sc->sc_imr |= (ISR_PAUSE_END|ISR_PAUSE_ST);
1950 flowctl = FLOWCTL_FLOWEN;
1951 } else {
1952 sc->sc_imr &= ~(ISR_PAUSE_END|ISR_PAUSE_ST);
1953 flowctl = 0;
1954 }
1955
1956 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
1957 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
1958 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_IMR, sc->sc_imr);
1959 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_FLOWCTL, flowctl);
1960
1961 /* XXX Update ifp->if_baudrate */
1962 }
1963
1964 /*
1965 * sip_mediastatus: [ifmedia interface function]
1966 *
1967 * Get the current interface media status.
1968 */
1969 void
1970 sip_mediastatus(ifp, ifmr)
1971 struct ifnet *ifp;
1972 struct ifmediareq *ifmr;
1973 {
1974 struct sip_softc *sc = ifp->if_softc;
1975
1976 mii_pollstat(&sc->sc_mii);
1977 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1978 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1979 }
1980
1981 /*
1982 * sip_mediachange: [ifmedia interface function]
1983 *
1984 * Set hardware to newly-selected media.
1985 */
1986 int
1987 sip_mediachange(ifp)
1988 struct ifnet *ifp;
1989 {
1990 struct sip_softc *sc = ifp->if_softc;
1991
1992 if (ifp->if_flags & IFF_UP)
1993 mii_mediachg(&sc->sc_mii);
1994 return (0);
1995 }
1996