if_sip.c revision 1.1.2.1 1 /* $NetBSD: if_sip.c,v 1.1.2.1 1999/06/21 01:18:34 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1999 Network Computer, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Network Computer, Inc. nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY NETWORK COMPUTER, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Device driver for the Silicon Integrated Systems SiS900 10/100 PCI
34 * Ethernet controller.
35 *
36 * Written by Jason R. Thorpe for Network Computer, Inc.
37 */
38
39 #include "opt_inet.h"
40 #include "opt_ns.h"
41 #include "bpfilter.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/mbuf.h>
46 #include <sys/malloc.h>
47 #include <sys/kernel.h>
48 #include <sys/socket.h>
49 #include <sys/ioctl.h>
50 #include <sys/errno.h>
51 #include <sys/device.h>
52 #include <sys/queue.h>
53
54 #include <vm/vm.h> /* for PAGE_SIZE */
55
56 #include <net/if.h>
57 #include <net/if_dl.h>
58 #include <net/if_media.h>
59 #include <net/if_ether.h>
60
61 #if NBPFILTER > 0
62 #include <net/bpf.h>
63 #endif
64
65 #ifdef INET
66 #include <netinet/in.h>
67 #include <netinet/if_inarp.h>
68 #endif
69
70 #ifdef NS
71 #include <netns/ns.h>
72 #include <netns/ns_if.h>
73 #endif
74
75 #include <machine/bus.h>
76 #include <machine/intr.h>
77
78 #include <dev/mii/miivar.h>
79
80 #include <dev/pci/pcireg.h>
81 #include <dev/pci/pcivar.h>
82 #include <dev/pci/pcidevs.h>
83
84 #include <dev/pci/if_sipreg.h>
85
86 /*
87 * Devices supported by this driver.
88 */
89 const struct sip_product {
90 pci_vendor_id_t sip_vendor;
91 pci_product_id_t sip_product;
92 const char *sip_name;
93 } sip_products[] = {
94 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900,
95 "SiS 900 10/100 Ethernet" },
96
97 { 0, 0,
98 NULL },
99 };
100
101 /*
102 * Transmit descriptor list size. This is arbitrary, but allocate
103 * enough descriptors for 64 pending transmissions, and 16 segments
104 * per packet. This MUST work out to a power of 2.
105 */
106 #define SIP_NTXSEGS 16
107
108 #define SIP_TXQUEUELEN 64
109 #define SIP_NTXDESC (SIP_TXQUEUELEN * SIP_NTXSEGS)
110 #define SIP_NTXDESC_MASK (SIP_NTXDESC - 1)
111 #define SIP_NEXTTX(x) (((x) + 1) & SIP_NTXDESC_MASK)
112
113 /*
114 * Receive descriptor list size. We have one Rx buffer per incoming
115 * packet, so this logic is a little simpler.
116 */
117 #define SIP_NRXDESC 64
118 #define SIP_NRXDESC_MASK (SIP_NRXDESC - 1)
119 #define SIP_NEXTRX(x) (((x) + 1) & SIP_NRXDESC_MASK)
120
121 /*
122 * Control structures are DMA'd to the SiS900 chip. We allocate them in
123 * a single clump that maps to a single DMA segment to make several things
124 * easier.
125 */
126 struct sip_control_data {
127 /*
128 * The transmit descriptors.
129 */
130 struct sip_desc scd_txdescs[SIP_NTXDESC];
131
132 /*
133 * The receive descriptors.
134 */
135 struct sip_desc scd_rxdescs[SIP_NRXDESC];
136 };
137
138 #define SIP_CDOFF(x) offsetof(struct sip_control_data, x)
139 #define SIP_CDTXOFF(x) SIP_CDOFF(scd_txdescs[(x)])
140 #define SIP_CDRXOFF(x) SIP_CDOFF(scd_rxdescs[(x)])
141
142 /*
143 * Software state for transmit jobs.
144 */
145 struct sip_txsoft {
146 struct mbuf *txs_mbuf; /* head of our mbuf chain */
147 bus_dmamap_t txs_dmamap; /* our DMA map */
148 int txs_firstdesc; /* first descriptor in packet */
149 int txs_lastdesc; /* last descriptor in packet */
150 SIMPLEQ_ENTRY(sip_txsoft) txs_q;
151 };
152
153 SIMPLEQ_HEAD(sip_txsq, sip_txsoft);
154
155 /*
156 * Software state for receive jobs.
157 */
158 struct sip_rxsoft {
159 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
160 bus_dmamap_t rxs_dmamap; /* our DMA map */
161 };
162
163 /*
164 * Software state per device.
165 */
166 struct sip_softc {
167 struct device sc_dev; /* generic device information */
168 bus_space_tag_t sc_st; /* bus space tag */
169 bus_space_handle_t sc_sh; /* bus space handle */
170 bus_dma_tag_t sc_dmat; /* bus DMA tag */
171 struct ethercom sc_ethercom; /* ethernet common data */
172 void *sc_sdhook; /* shutdown hook */
173
174 void *sc_ih; /* interrupt cookie */
175
176 struct mii_data sc_mii; /* MII/media information */
177
178 bus_dmamap_t sc_cddmamap; /* control data DMA map */
179 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
180
181 /*
182 * Software state for transmit and receive descriptors.
183 */
184 struct sip_txsoft sc_txsoft[SIP_TXQUEUELEN];
185 struct sip_rxsoft sc_rxsoft[SIP_NRXDESC];
186
187 /*
188 * Control data structures.
189 */
190 struct sip_control_data *sc_control_data;
191 #define sc_txdescs sc_control_data->scd_txdescs
192 #define sc_rxdescs sc_control_data->scd_rxdescs
193
194 u_int32_t sc_txcfg; /* prototype TXCFG register */
195 u_int32_t sc_rxcfg; /* prototype RXCFG register */
196 u_int32_t sc_imr; /* prototype IMR register */
197 u_int32_t sc_rfcr; /* prototype RFCR register */
198
199 u_int32_t sc_tx_fill_thresh; /* transmit fill threshold */
200 u_int32_t sc_tx_drain_thresh; /* transmit drain threshold */
201
202 u_int32_t sc_rx_drain_thresh; /* receive drain threshold */
203
204 int sc_flags; /* misc. flags; see below */
205
206 int sc_txfree; /* number of free Tx descriptors */
207 int sc_txnext; /* next ready Tx descriptor */
208
209 struct sip_txsq sc_txfreeq; /* free Tx descsofts */
210 struct sip_txsq sc_txdirtyq; /* dirty Tx descsofts */
211
212 int sc_rxptr; /* next ready Rx descriptor/descsoft */
213 };
214
215 /* sc_flags */
216 #define SIPF_PAUSED 0x00000001 /* paused (802.3x flow control) */
217
218 #define SIP_CDTXADDR(sc, x) ((sc)->sc_cddma + SIP_CDTXOFF((x)))
219 #define SIP_CDRXADDR(sc, x) ((sc)->sc_cddma + SIP_CDRXOFF((x)))
220
221 #define SIP_CDTXSYNC(sc, x, n, ops) \
222 do { \
223 int __x, __n; \
224 \
225 __x = (x); \
226 __n = (n); \
227 \
228 /* If it will wrap around, sync to the end of the ring. */ \
229 if ((__x + __n) > SIP_NTXDESC) { \
230 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
231 SIP_CDTXOFF(__x), sizeof(struct sip_desc) * \
232 (SIP_NTXDESC - __x), (ops)); \
233 __n -= (SIP_NTXDESC - __x); \
234 __x = 0; \
235 } \
236 \
237 /* Now sync whatever is left. */ \
238 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
239 SIP_CDTXOFF(__x), sizeof(struct sip_desc) * __n, (ops)); \
240 } while (0)
241
242 #define SIP_CDRXSYNC(sc, x, ops) \
243 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
244 SIP_CDRXOFF((x)), sizeof(struct sip_desc), (ops))
245
246 /*
247 * Note we rely on MCLBYTES being a power of two below.
248 */
249 #define SIP_INIT_RXDESC(sc, x) \
250 do { \
251 struct sip_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
252 struct sip_desc *__sipd = &(sc)->sc_rxdescs[(x)]; \
253 \
254 __sipd->sipd_link = SIP_CDRXADDR((sc), SIP_NEXTRX((x))); \
255 __sipd->sipd_bufptr = __rxs->rxs_dmamap->dm_segs[0].ds_addr; \
256 __sipd->sipd_cmdsts = CMDSTS_INTR | \
257 ((MCLBYTES - 1) & CMDSTS_SIZE_MASK); \
258 SIP_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
259 } while (0)
260
261 void sip_start __P((struct ifnet *));
262 void sip_watchdog __P((struct ifnet *));
263 int sip_ioctl __P((struct ifnet *, u_long, caddr_t));
264
265 void sip_shutdown __P((void *));
266
267 void sip_reset __P((struct sip_softc *));
268 void sip_init __P((struct sip_softc *));
269 void sip_stop __P((struct sip_softc *));
270 int sip_add_rxbuf __P((struct sip_softc *, int));
271 void sip_read_eeprom __P((struct sip_softc *, int, int, u_int16_t *));
272 void sip_set_filter __P((struct sip_softc *));
273 void sip_tick __P((void *));
274
275 int sip_intr __P((void *));
276 void sip_txintr __P((struct sip_softc *));
277 void sip_rxintr __P((struct sip_softc *));
278
279 int sip_mii_readreg __P((struct device *, int, int));
280 void sip_mii_writereg __P((struct device *, int, int, int));
281 void sip_mii_statchg __P((struct device *));
282
283 int sip_mediachange __P((struct ifnet *));
284 void sip_mediastatus __P((struct ifnet *, struct ifmediareq *));
285
286 int sip_match __P((struct device *, struct cfdata *, void *));
287 void sip_attach __P((struct device *, struct device *, void *));
288
289 struct cfattach sip_ca = {
290 sizeof(struct sip_softc), sip_match, sip_attach,
291 };
292
293 const struct sip_product *sip_lookup __P((const struct pci_attach_args *));
294
295 const struct sip_product *
296 sip_lookup(pa)
297 const struct pci_attach_args *pa;
298 {
299 const struct sip_product *sip;
300
301 for (sip = sip_products; sip->sip_name != NULL; sip++) {
302 if (PCI_VENDOR(pa->pa_id) == sip->sip_vendor &&
303 PCI_PRODUCT(pa->pa_id) == sip->sip_product)
304 return (sip);
305 }
306 return (NULL);
307 }
308
309 int
310 sip_match(parent, cf, aux)
311 struct device *parent;
312 struct cfdata *cf;
313 void *aux;
314 {
315 struct pci_attach_args *pa = aux;
316
317 if (sip_lookup(pa) != NULL)
318 return (1);
319
320 return (0);
321 }
322
323 void
324 sip_attach(parent, self, aux)
325 struct device *parent, *self;
326 void *aux;
327 {
328 struct sip_softc *sc = (struct sip_softc *) self;
329 struct pci_attach_args *pa = aux;
330 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
331 pci_chipset_tag_t pc = pa->pa_pc;
332 pci_intr_handle_t ih;
333 const char *intrstr = NULL;
334 bus_space_tag_t iot, memt;
335 bus_space_handle_t ioh, memh;
336 bus_dma_segment_t seg;
337 int ioh_valid, memh_valid;
338 int i, rseg, error;
339 const struct sip_product *sip;
340 pcireg_t pmode;
341 u_int16_t enaddr[ETHER_ADDR_LEN / 2];
342
343 sip = sip_lookup(pa);
344 if (sip == NULL) {
345 printf("\n");
346 panic("sip_attach: impossible");
347 }
348
349 printf(": %s\n", sip->sip_name);
350
351 /*
352 * Map the device.
353 */
354 ioh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGIOA,
355 PCI_MAPREG_TYPE_IO, 0,
356 &iot, &ioh, NULL, NULL) == 0);
357 memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA,
358 PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
359 &memt, &memh, NULL, NULL) == 0);
360
361 if (memh_valid) {
362 sc->sc_st = memt;
363 sc->sc_sh = memh;
364 } else if (ioh_valid) {
365 sc->sc_st = iot;
366 sc->sc_sh = ioh;
367 } else {
368 printf("%s: unable to map device registers\n",
369 sc->sc_dev.dv_xname);
370 return;
371 }
372
373 sc->sc_dmat = pa->pa_dmat;
374
375 /* Enable bus mastering. */
376 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
377 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
378 PCI_COMMAND_MASTER_ENABLE);
379
380 /* Get it out of power save mode if needed. */
381 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, 0, 0)) {
382 pmode = pci_conf_read(pc, pa->pa_tag, SIP_PCI_CFGPMCSR) & 0x3;
383 if (pmode == 3) {
384 /*
385 * The card has lost all configuration data in
386 * this state, so punt.
387 */
388 printf("%s: unable to wake up from power state D3\n",
389 sc->sc_dev.dv_xname);
390 return;
391 }
392 if (pmode != 0) {
393 printf("%s: waking up from power state D%d\n",
394 sc->sc_dev.dv_xname, pmode);
395 pci_conf_write(pc, pa->pa_tag, SIP_PCI_CFGPMCSR, 0);
396 }
397 }
398
399 /*
400 * Map and establish our interrupt.
401 */
402 if (pci_intr_map(pc, pa->pa_intrtag, pa->pa_intrpin,
403 pa->pa_intrline, &ih)) {
404 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
405 return;
406 }
407 intrstr = pci_intr_string(pc, ih);
408 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, sip_intr, sc);
409 if (sc->sc_ih == NULL) {
410 printf("%s: unable to establish interrupt",
411 sc->sc_dev.dv_xname);
412 if (intrstr != NULL)
413 printf(" at %s", intrstr);
414 printf("\n");
415 return;
416 }
417 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
418
419 SIMPLEQ_INIT(&sc->sc_txfreeq);
420 SIMPLEQ_INIT(&sc->sc_txdirtyq);
421
422 /*
423 * Allocate the control data structures, and create and load the
424 * DMA map for it.
425 */
426 if ((error = bus_dmamem_alloc(sc->sc_dmat,
427 sizeof(struct sip_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
428 0)) != 0) {
429 printf("%s: unable to allocate control data, error = %d\n",
430 sc->sc_dev.dv_xname, error);
431 goto fail_0;
432 }
433
434 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
435 sizeof(struct sip_control_data), (caddr_t *)&sc->sc_control_data,
436 BUS_DMA_COHERENT)) != 0) {
437 printf("%s: unable to map control data, error = %d\n",
438 sc->sc_dev.dv_xname, error);
439 goto fail_1;
440 }
441
442 if ((error = bus_dmamap_create(sc->sc_dmat,
443 sizeof(struct sip_control_data), 1,
444 sizeof(struct sip_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
445 printf("%s: unable to create control data DMA map, "
446 "error = %d\n", sc->sc_dev.dv_xname, error);
447 goto fail_2;
448 }
449
450 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
451 sc->sc_control_data, sizeof(struct sip_control_data), NULL,
452 0)) != 0) {
453 printf("%s: unable to load control data DMA map, error = %d\n",
454 sc->sc_dev.dv_xname, error);
455 goto fail_3;
456 }
457
458 /*
459 * Create the transmit buffer DMA maps.
460 */
461 for (i = 0; i < SIP_TXQUEUELEN; i++) {
462 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
463 SIP_NTXSEGS, MCLBYTES, 0, 0,
464 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
465 printf("%s: unable to create tx DMA map %d, "
466 "error = %d\n", sc->sc_dev.dv_xname, i, error);
467 goto fail_4;
468 }
469 }
470
471 /*
472 * Create the receive buffer DMA maps.
473 */
474 for (i = 0; i < SIP_NRXDESC; i++) {
475 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
476 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
477 printf("%s: unable to create rx DMA map %d, "
478 "error = %d\n", sc->sc_dev.dv_xname, i, error);
479 goto fail_5;
480 }
481 }
482
483 /*
484 * Pre-allocate the receive buffers.
485 */
486 for (i = 0; i < SIP_NRXDESC; i++) {
487 if ((error = sip_add_rxbuf(sc, i)) != 0) {
488 printf("%s: unable to allocate or map rx buffer %d\n,"
489 " error = %d\n", sc->sc_dev.dv_xname, i, error);
490 goto fail_6;
491 }
492 }
493
494 /*
495 * Reset the chip to a known state.
496 */
497 sip_reset(sc);
498
499 /*
500 * Read the Ethernet address from the EEPROM.
501 */
502 sip_read_eeprom(sc, SIP_EEPROM_ETHERNET_ID0 >> 1,
503 sizeof(enaddr) / sizeof(enaddr[0]), enaddr);
504
505 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
506 ether_sprintf((u_int8_t *)enaddr));
507
508 /*
509 * Initialize our media structures and probe the MII.
510 */
511 sc->sc_mii.mii_ifp = ifp;
512 sc->sc_mii.mii_readreg = sip_mii_readreg;
513 sc->sc_mii.mii_writereg = sip_mii_writereg;
514 sc->sc_mii.mii_statchg = sip_mii_statchg;
515 ifmedia_init(&sc->sc_mii.mii_media, 0, sip_mediachange,
516 sip_mediastatus);
517 mii_phy_probe(&sc->sc_dev, &sc->sc_mii, 0xffffffff);
518 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
519 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
520 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
521 } else
522 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
523
524 ifp = &sc->sc_ethercom.ec_if;
525 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
526 ifp->if_softc = sc;
527 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
528 ifp->if_ioctl = sip_ioctl;
529 ifp->if_start = sip_start;
530 ifp->if_watchdog = sip_watchdog;
531
532 /*
533 * Attach the interface.
534 */
535 if_attach(ifp);
536 ether_ifattach(ifp, (u_int8_t *)enaddr);
537 #if NBPFILTER > 0
538 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
539 sizeof(struct ether_header));
540 #endif
541
542 /*
543 * Make sure the interface is shutdown during reboot.
544 */
545 sc->sc_sdhook = shutdownhook_establish(sip_shutdown, sc);
546 if (sc->sc_sdhook == NULL)
547 printf("%s: WARNING: unable to establish shutdown hook\n",
548 sc->sc_dev.dv_xname);
549 return;
550
551 /*
552 * Free any resources we've allocated during the failed attach
553 * attempt. Do this in reverse order and fall through.
554 */
555 fail_6:
556 for (i = 0; i < SIP_NRXDESC; i++) {
557 if (sc->sc_rxsoft[i].rxs_mbuf != NULL) {
558 bus_dmamap_unload(sc->sc_dmat,
559 sc->sc_rxsoft[i].rxs_dmamap);
560 m_freem(sc->sc_rxsoft[i].rxs_mbuf);
561 }
562 }
563 fail_5:
564 for (i = 0; i < SIP_NRXDESC; i++) {
565 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
566 bus_dmamap_destroy(sc->sc_dmat,
567 sc->sc_rxsoft[i].rxs_dmamap);
568 }
569 fail_4:
570 for (i = 0; i < SIP_TXQUEUELEN; i++) {
571 if (sc->sc_txsoft[i].txs_dmamap != NULL)
572 bus_dmamap_destroy(sc->sc_dmat,
573 sc->sc_txsoft[i].txs_dmamap);
574 }
575 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
576 fail_3:
577 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
578 fail_2:
579 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
580 sizeof(struct sip_control_data));
581 fail_1:
582 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
583 fail_0:
584 return;
585 }
586
587 /*
588 * sip_shutdown:
589 *
590 * Make sure the interface is stopped at reboot time.
591 */
592 void
593 sip_shutdown(arg)
594 void *arg;
595 {
596 struct sip_softc *sc = arg;
597
598 sip_stop(sc);
599 }
600
601 /*
602 * sip_start: [ifnet interface function]
603 *
604 * Start packet transmission on the interface.
605 */
606 void
607 sip_start(ifp)
608 struct ifnet *ifp;
609 {
610 struct sip_softc *sc = ifp->if_softc;
611 struct mbuf *m0, *m;
612 struct sip_txsoft *txs;
613 bus_dmamap_t dmamap;
614 int error, firsttx, nexttx, lasttx, ofree, seg;
615
616 /*
617 * If we've been told to pause, don't transmit any more packets.
618 */
619 if (sc->sc_flags & SIPF_PAUSED)
620 ifp->if_flags |= IFF_OACTIVE;
621
622 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
623 return;
624
625 /*
626 * Remember the previous number of free descriptors and
627 * the first descriptor we'll use.
628 */
629 ofree = sc->sc_txfree;
630 firsttx = sc->sc_txnext;
631
632 /*
633 * Loop through the send queue, setting up transmit descriptors
634 * until we drain the queue, or use up all available transmit
635 * descriptors.
636 */
637 while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL &&
638 sc->sc_txfree != 0) {
639 /*
640 * Grab a packet off the queue.
641 */
642 IF_DEQUEUE(&ifp->if_snd, m0);
643 if (m0 == NULL)
644 break;
645
646 dmamap = txs->txs_dmamap;
647
648 /*
649 * Load the DMA map. If this fails, the packet either
650 * didn't fit in the alloted number of segments, or we
651 * were short on resources. In this case, we'll copy
652 * and try again.
653 */
654 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
655 BUS_DMA_NOWAIT) != 0) {
656 MGETHDR(m, M_DONTWAIT, MT_DATA);
657 if (m == NULL) {
658 printf("%s: unable to allocate Tx mbuf\n",
659 sc->sc_dev.dv_xname);
660 IF_PREPEND(&ifp->if_snd, m0);
661 break;
662 }
663 if (m0->m_pkthdr.len > MHLEN) {
664 MCLGET(m, M_DONTWAIT);
665 if ((m->m_flags & M_EXT) == 0) {
666 printf("%s: unable to allocate Tx "
667 "cluster\n", sc->sc_dev.dv_xname);
668 m_freem(m);
669 IF_PREPEND(&ifp->if_snd, m0);
670 break;
671 }
672 }
673 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
674 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
675 m_freem(m0);
676 m0 = m;
677 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
678 m0, BUS_DMA_NOWAIT);
679 if (error) {
680 printf("%s: unable to load Tx buffer, "
681 "error = %d\n", sc->sc_dev.dv_xname, error);
682 IF_PREPEND(&ifp->if_snd, m0);
683 break;
684 }
685 }
686
687 /*
688 * Ensure we have enough descriptors free to describe
689 * the packet.
690 */
691 if (dmamap->dm_nsegs > sc->sc_txfree) {
692 /*
693 * Not enough free descriptors to transmit this
694 * packet. We haven't committed anything yet,
695 * so just unload the DMA map, put the packet
696 * back on the queue, and punt. Notify the upper
697 * layer that there are not more slots left.
698 *
699 * XXX We could allocate an mbuf and copy, but
700 * XXX is it worth it?
701 */
702 ifp->if_flags |= IFF_OACTIVE;
703 bus_dmamap_unload(sc->sc_dmat, dmamap);
704 IF_PREPEND(&ifp->if_snd, m0);
705 break;
706 }
707
708 /*
709 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
710 */
711
712 /* Sync the DMA map. */
713 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
714 BUS_DMASYNC_PREWRITE);
715
716 /*
717 * Initialize the transmit descriptors.
718 */
719 for (nexttx = sc->sc_txnext, seg = 0;
720 seg < dmamap->dm_nsegs;
721 seg++, nexttx = SIP_NEXTTX(nexttx)) {
722 /*
723 * If this is the first descriptor we're
724 * enqueueing, don't set the OWN bit just
725 * yet. That could cause a race condition.
726 * We'll do it below.
727 */
728 sc->sc_txdescs[nexttx].sipd_bufptr =
729 dmamap->dm_segs[seg].ds_addr;
730 sc->sc_txdescs[nexttx].sipd_cmdsts =
731 (nexttx == firsttx ? 0 : CMDSTS_OWN) |
732 CMDSTS_MORE | dmamap->dm_segs[seg].ds_len;
733 lasttx = nexttx;
734 }
735
736 /* Clear the MORE bit on the last segment. */
737 sc->sc_txdescs[lasttx].sipd_cmdsts &= ~CMDSTS_MORE;
738
739 /* Sync the descriptors we're using. */
740 SIP_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
741 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
742
743 /*
744 * Store a pointer to the packet so we can free it later,
745 * and remember what txdirty will be once the packet is
746 * done.
747 */
748 txs->txs_mbuf = m0;
749 txs->txs_firstdesc = sc->sc_txnext;
750 txs->txs_lastdesc = lasttx;
751
752 /* Advance the tx pointer. */
753 sc->sc_txfree -= dmamap->dm_nsegs;
754 sc->sc_txnext = nexttx;
755
756 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs, txs_q);
757 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
758
759 #if NBPFILTER > 0
760 /*
761 * Pass the packet to any BPF listeners.
762 */
763 if (ifp->if_bpf)
764 bpf_mtap(ifp->if_bpf, m0);
765 #endif /* NBPFILTER > 0 */
766 }
767
768 if (txs == NULL || sc->sc_txfree == 0) {
769 /* No more slots left; notify upper layer. */
770 ifp->if_flags |= IFF_OACTIVE;
771 }
772
773 if (sc->sc_txfree != ofree) {
774 /*
775 * Cause a descriptor interrupt to happen on the
776 * last packet we enqueued.
777 */
778 sc->sc_txdescs[lasttx].sipd_cmdsts |= CMDSTS_INTR;
779 SIP_CDTXSYNC(sc, lasttx, 1,
780 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
781
782 /*
783 * The entire packet chain is set up. Give the
784 * first descrptor to the chip now.
785 */
786 sc->sc_txdescs[firsttx].sipd_cmdsts |= CMDSTS_OWN;
787 SIP_CDTXSYNC(sc, firsttx, 1,
788 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
789
790 /* Start the transmit process. */
791 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CR) &
792 CR_TXE) == 0) {
793 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXDP,
794 SIP_CDTXADDR(sc, firsttx));
795 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE);
796 }
797
798 /* Set a watchdog timer in case the chip flakes out. */
799 ifp->if_timer = 5;
800 }
801 }
802
803 /*
804 * sip_watchdog: [ifnet interface function]
805 *
806 * Watchdog timer handler.
807 */
808 void
809 sip_watchdog(ifp)
810 struct ifnet *ifp;
811 {
812 struct sip_softc *sc = ifp->if_softc;
813
814 /*
815 * The chip seems to ignore the CMDSTS_INTR bit sometimes!
816 * If we get a timeout, try and sweep up transmit descriptors.
817 * If we manage to sweep them all up, ignore the lack of
818 * interrupt.
819 */
820 sip_txintr(sc);
821
822 if (sc->sc_txfree != SIP_NTXDESC) {
823 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
824 ifp->if_oerrors++;
825
826 /* Reset the interface. */
827 sip_init(sc);
828 } else if (ifp->if_flags & IFF_DEBUG)
829 printf("%s: recovered from device timeout\n",
830 sc->sc_dev.dv_xname);
831
832 /* Try to get more packets going. */
833 sip_start(ifp);
834 }
835
836 /*
837 * sip_ioctl: [ifnet interface function]
838 *
839 * Handle control requests from the operator.
840 */
841 int
842 sip_ioctl(ifp, cmd, data)
843 struct ifnet *ifp;
844 u_long cmd;
845 caddr_t data;
846 {
847 struct sip_softc *sc = ifp->if_softc;
848 struct ifreq *ifr = (struct ifreq *)data;
849 struct ifaddr *ifa = (struct ifaddr *)data;
850 int s, error = 0;
851
852 s = splnet();
853
854 switch (cmd) {
855 case SIOCSIFADDR:
856 ifp->if_flags |= IFF_UP;
857
858 switch (ifa->ifa_addr->sa_family) {
859 #ifdef INET
860 case AF_INET:
861 sip_init(sc);
862 arp_ifinit(ifp, ifa);
863 break;
864 #endif /* INET */
865 #ifdef NS
866 case AF_NS:
867 {
868 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
869
870 if (ns_nullhost(*ina))
871 ina->x_host = *(union ns_host *)
872 LLADDR(ifp->if_sadl);
873 else
874 memcpy(LLADDR(ifp->if_sadl),
875 ina->x_host.c_host, ifp->if_addrlen);
876 sip_init(sc);
877 break;
878 }
879 #endif /* NS */
880 default:
881 sip_init(sc);
882 break;
883 }
884 break;
885
886 case SIOCSIFMTU:
887 if (ifr->ifr_mtu > ETHERMTU)
888 error = EINVAL;
889 else
890 ifp->if_mtu = ifr->ifr_mtu;
891 break;
892
893 case SIOCSIFFLAGS:
894 if ((ifp->if_flags & IFF_UP) == 0 &&
895 (ifp->if_flags & IFF_RUNNING) != 0) {
896 /*
897 * If interface is marked down and it is running, then
898 * stop it.
899 */
900 sip_stop(sc);
901 } else if ((ifp->if_flags & IFF_UP) != 0 &&
902 (ifp->if_flags & IFF_RUNNING) == 0) {
903 /*
904 * If interfase it marked up and it is stopped, then
905 * start it.
906 */
907 sip_init(sc);
908 } else if ((ifp->if_flags & IFF_UP) != 0) {
909 /*
910 * Reset the interface to pick up changes in any other
911 * flags that affect the hardware state.
912 */
913 sip_init(sc);
914 }
915 break;
916
917 case SIOCADDMULTI:
918 case SIOCDELMULTI:
919 error = (cmd == SIOCADDMULTI) ?
920 ether_addmulti(ifr, &sc->sc_ethercom) :
921 ether_delmulti(ifr, &sc->sc_ethercom);
922
923 if (error == ENETRESET) {
924 /*
925 * Multicast list has changed; set the hardware filter
926 * accordingly.
927 */
928 sip_set_filter(sc);
929 error = 0;
930 }
931 break;
932
933 case SIOCSIFMEDIA:
934 case SIOCGIFMEDIA:
935 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
936 break;
937
938 default:
939 error = EINVAL;
940 break;
941 }
942
943 /* Try to get more packets going. */
944 sip_start(ifp);
945
946 splx(s);
947 return (error);
948 }
949
950 /*
951 * sip_intr:
952 *
953 * Interrupt service routine.
954 */
955 int
956 sip_intr(arg)
957 void *arg;
958 {
959 struct sip_softc *sc = arg;
960 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
961 u_int32_t isr;
962 int handled = 0;
963
964 for (;;) {
965 /* Reading clears interrupt. */
966 isr = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ISR);
967 if ((isr & sc->sc_imr) == 0)
968 break;
969
970 handled = 1;
971
972 if (isr & (ISR_RXORN|ISR_RXIDLE|ISR_RXDESC)) {
973 /* Grab any new packets. */
974 sip_rxintr(sc);
975
976 if (isr & ISR_RXORN) {
977 printf("%s: receive FIFO overrun\n",
978 sc->sc_dev.dv_xname);
979
980 /* XXX adjust rx_drain_thresh? */
981 }
982
983 if (isr & ISR_RXIDLE) {
984 printf("%s: receive ring overrun\n",
985 sc->sc_dev.dv_xname);
986
987 /* Get the receive process going again. */
988 bus_space_write_4(sc->sc_st, sc->sc_sh,
989 SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr));
990 bus_space_write_4(sc->sc_st, sc->sc_sh,
991 SIP_CR, CR_RXE);
992 }
993 }
994
995 if (isr & (ISR_TXURN|ISR_TXDESC)) {
996 /* Sweep up transmit descriptors. */
997 sip_txintr(sc);
998
999 if (isr & ISR_TXURN) {
1000 u_int32_t thresh;
1001
1002 printf("%s: transmit FIFO underrun",
1003 sc->sc_dev.dv_xname);
1004
1005 thresh = sc->sc_tx_drain_thresh + 1;
1006 if (thresh <= TXCFG_DRTH &&
1007 (thresh * 32) <= (SIP_TXFIFO_SIZE -
1008 (sc->sc_tx_fill_thresh * 32))) {
1009 printf("; increasing Tx drain "
1010 "threshold to %u bytes\n",
1011 thresh * 32);
1012 sc->sc_tx_drain_thresh = thresh;
1013 sip_init(sc);
1014 } else {
1015 sip_init(sc);
1016 printf("\n");
1017 }
1018 }
1019 }
1020
1021 if (sc->sc_imr & (ISR_PAUSE_END|ISR_PAUSE_ST)) {
1022 if (isr & ISR_PAUSE_ST) {
1023 sc->sc_flags |= SIPF_PAUSED;
1024 ifp->if_flags |= IFF_OACTIVE;
1025 }
1026 if (isr & ISR_PAUSE_END) {
1027 sc->sc_flags &= ~SIPF_PAUSED;
1028 ifp->if_flags &= ~IFF_OACTIVE;
1029 }
1030 }
1031
1032 if (isr & ISR_HIBERR) {
1033 #define PRINTERR(bit, str) \
1034 if (isr & (bit)) \
1035 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
1036 PRINTERR(ISR_DPERR, "parity error");
1037 PRINTERR(ISR_SSERR, "system error");
1038 PRINTERR(ISR_RMABT, "master abort");
1039 PRINTERR(ISR_RTABT, "target abort");
1040 PRINTERR(ISR_RXSOVR, "receive status FIFO overrun");
1041 sip_init(sc);
1042 #undef PRINTERR
1043 }
1044 }
1045
1046 /* Try to get more packets going. */
1047 sip_start(ifp);
1048
1049 return (handled);
1050 }
1051
1052 /*
1053 * sip_txintr:
1054 *
1055 * Helper; handle transmit interrupts.
1056 */
1057 void
1058 sip_txintr(sc)
1059 struct sip_softc *sc;
1060 {
1061 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1062 struct sip_txsoft *txs;
1063 u_int32_t cmdsts;
1064
1065 if ((sc->sc_flags & SIPF_PAUSED) == 0)
1066 ifp->if_flags &= ~IFF_OACTIVE;
1067
1068 /*
1069 * Go through our Tx list and free mbufs for those
1070 * frames which have been transmitted.
1071 */
1072 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1073 SIP_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1074 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1075
1076 cmdsts = sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts;
1077 if (cmdsts & CMDSTS_OWN)
1078 break;
1079
1080 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs, txs_q);
1081
1082 sc->sc_txfree += txs->txs_dmamap->dm_nsegs;
1083
1084 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1085 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1086 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1087 m_freem(txs->txs_mbuf);
1088 txs->txs_mbuf = NULL;
1089
1090 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1091
1092 /*
1093 * Check for errors and collisions.
1094 */
1095 if (cmdsts &
1096 (CMDSTS_Tx_TXA|CMDSTS_Tx_TFU|CMDSTS_Tx_ED|CMDSTS_Tx_EC)) {
1097 if (ifp->if_flags & IFF_DEBUG) {
1098 if (CMDSTS_Tx_ED)
1099 printf("%s: excessive deferral\n",
1100 sc->sc_dev.dv_xname);
1101 if (CMDSTS_Tx_EC) {
1102 printf("%s: excessive collisions\n",
1103 sc->sc_dev.dv_xname);
1104 ifp->if_collisions += 16;
1105 }
1106 }
1107 } else {
1108 /* Packet was transmitted successfully. */
1109 ifp->if_opackets++;
1110 ifp->if_collisions += CMDSTS_COLLISIONS(cmdsts);
1111 }
1112 }
1113
1114 /*
1115 * If there are no more pending transmissions, cancel the watchdog
1116 * timer.
1117 */
1118 if (txs == NULL)
1119 ifp->if_timer = 0;
1120 }
1121
1122 /*
1123 * sip_rxintr:
1124 *
1125 * Helper; handle receive interrupts.
1126 */
1127 void
1128 sip_rxintr(sc)
1129 struct sip_softc *sc;
1130 {
1131 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1132 struct ether_header *eh;
1133 struct sip_rxsoft *rxs;
1134 struct mbuf *m;
1135 u_int32_t cmdsts;
1136 int i, len;
1137
1138 for (i = sc->sc_rxptr;; i = SIP_NEXTRX(i)) {
1139 rxs = &sc->sc_rxsoft[i];
1140
1141 SIP_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1142
1143 cmdsts = sc->sc_rxdescs[i].sipd_cmdsts;
1144
1145 /*
1146 * NOTE: OWN is set if owned by _consumer_. We're the
1147 * consumer of the receive ring, so if the bit is clear,
1148 * we have processed all of the packets.
1149 */
1150 if ((cmdsts & CMDSTS_OWN) == 0) {
1151 /*
1152 * We have processed all of the receive buffers.
1153 */
1154 break;
1155 }
1156
1157 /*
1158 * If any collisions were seen on the wire, count one.
1159 */
1160 if (cmdsts & CMDSTS_Rx_COL)
1161 ifp->if_collisions++;
1162
1163 /*
1164 * If an error occurred, update stats, clear the status
1165 * word, and leave the packet buffer in place. It will
1166 * simply be reused the next time the ring comes around.
1167 */
1168 if (cmdsts & (CMDSTS_Rx_RXA|CMDSTS_Rx_LONG|CMDSTS_Rx_RUNT|
1169 CMDSTS_Rx_ISE|CMDSTS_Rx_CRCE|CMDSTS_Rx_FAE)) {
1170 ifp->if_ierrors++;
1171 if ((cmdsts & CMDSTS_Rx_RXA) != 0 &&
1172 (cmdsts & CMDSTS_Rx_RXO) == 0) {
1173 /* Receive overrun handled elsewhere. */
1174 printf("%s: receive descriptor error\n",
1175 sc->sc_dev.dv_xname);
1176 }
1177 #define PRINTERR(bit, str) \
1178 if (cmdsts & (bit)) \
1179 printf("%s: %s\n", sc->sc_dev.dv_xname, str)
1180 PRINTERR(CMDSTS_Rx_LONG, "packet too long");
1181 PRINTERR(CMDSTS_Rx_RUNT, "runt packet");
1182 PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error");
1183 PRINTERR(CMDSTS_Rx_CRCE, "CRC error");
1184 PRINTERR(CMDSTS_Rx_FAE, "frame alignment error");
1185 #undef PRINTERR
1186 SIP_INIT_RXDESC(sc, i);
1187 continue;
1188 }
1189
1190 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1191 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1192
1193 /*
1194 * No errors; receive the packet. Note, the SiS 900
1195 * includes the CRC with every packet; trim it.
1196 */
1197 len = CMDSTS_SIZE(cmdsts) - ETHER_CRC_LEN;
1198
1199 #ifdef __NO_STRICT_ALIGNMENT
1200 /*
1201 * Allocate a new mbuf cluster. If that fails, we are
1202 * out of memory, and must drop the packet and recycle
1203 * the buffer that's already attached to this descriptor.
1204 */
1205 m = rxs->rxs_mbuf;
1206 if (sip_add_rxbuf(sc, i) != 0) {
1207 ifp->if_ierrors++;
1208 SIP_INIT_RXDESC(sc, i);
1209 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1210 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1211 continue;
1212 }
1213 #else
1214 /*
1215 * The SiS 900's receive buffers must be 4-byte aligned.
1216 * But this means that the data after the Ethernet header
1217 * is misaligned. We must allocate a new buffer and
1218 * copy the data, shifted forward 2 bytes.
1219 */
1220 MGETHDR(m, M_DONTWAIT, MT_DATA);
1221 if (m == NULL) {
1222 dropit:
1223 ifp->if_ierrors++;
1224 SIP_INIT_RXDESC(sc, i);
1225 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1226 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1227 continue;
1228 }
1229 if (len > (MHLEN - 2)) {
1230 MCLGET(m, M_DONTWAIT);
1231 if ((m->m_flags & M_EXT) == 0) {
1232 m_freem(m);
1233 goto dropit;
1234 }
1235 }
1236 m->m_data += 2;
1237
1238 /*
1239 * Note that we use clusters for incoming frames, so the
1240 * buffer is virtually contiguous.
1241 */
1242 memcpy(mtod(m, caddr_t), mtod(rxs->rxs_mbuf, caddr_t), len);
1243
1244 /* Allow the receive descriptor to continue using its mbuf. */
1245 SIP_INIT_RXDESC(sc, i);
1246 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1247 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1248 #endif /* __NO_STRICT_ALIGNMENT */
1249
1250 ifp->if_ipackets++;
1251 eh = mtod(m, struct ether_header *);
1252 m->m_pkthdr.rcvif = ifp;
1253 m->m_pkthdr.len = m->m_len = len;
1254
1255 #if NBPFILTER > 0
1256 /*
1257 * Pass this up to any BPF listeners, but only
1258 * pass if up the stack if it's for us.
1259 */
1260 if (ifp->if_bpf) {
1261 bpf_mtap(ifp->if_bpf, m);
1262 if ((ifp->if_flags & IFF_PROMISC) != 0 &&
1263 (cmdsts & CMDSTS_Rx_DEST) == CMDSTS_Rx_DEST_REJ) {
1264 m_freem(m);
1265 continue;
1266 }
1267 }
1268 #endif /* NBPFILTER > 0 */
1269
1270 /* Pass it on. */
1271 (*ifp->if_input)(ifp, m);
1272 }
1273
1274 /* Update the receive pointer. */
1275 sc->sc_rxptr = i;
1276 }
1277
1278 /*
1279 * sip_tick:
1280 *
1281 * One second timer, used to tick the MII.
1282 */
1283 void
1284 sip_tick(arg)
1285 void *arg;
1286 {
1287 struct sip_softc *sc = arg;
1288 int s;
1289
1290 s = splnet();
1291 mii_tick(&sc->sc_mii);
1292 splx(s);
1293
1294 timeout(sip_tick, sc, hz);
1295 }
1296
1297 /*
1298 * sip_reset:
1299 *
1300 * Perform a soft reset on the SiS 900.
1301 */
1302 void
1303 sip_reset(sc)
1304 struct sip_softc *sc;
1305 {
1306 bus_space_tag_t st = sc->sc_st;
1307 bus_space_handle_t sh = sc->sc_sh;
1308 int i;
1309
1310 bus_space_write_4(st, sh, SIP_CR, CR_RST);
1311
1312 for (i = 0; i < 1000; i++) {
1313 if ((bus_space_read_4(st, sh, SIP_ISR) &
1314 (ISR_TXRCMP|ISR_RXRCMP)) == (ISR_TXRCMP|ISR_RXRCMP))
1315 return;
1316 delay(2);
1317 }
1318
1319 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
1320 }
1321
1322 /*
1323 * sip_init:
1324 *
1325 * Initialize the interface. Must be called at splnet().
1326 */
1327 void
1328 sip_init(sc)
1329 struct sip_softc *sc;
1330 {
1331 bus_space_tag_t st = sc->sc_st;
1332 bus_space_handle_t sh = sc->sc_sh;
1333 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1334 struct sip_txsoft *txs;
1335 struct sip_desc *sipd;
1336 u_int32_t cfg;
1337 int i;
1338
1339 /*
1340 * Cancel any pending I/O.
1341 */
1342 sip_stop(sc);
1343
1344 /*
1345 * Reset the chip to a known state.
1346 */
1347 sip_reset(sc);
1348
1349 /*
1350 * Initialize the transmit descriptor ring.
1351 */
1352 for (i = 0; i < SIP_NTXDESC; i++) {
1353 sipd = &sc->sc_txdescs[i];
1354 memset(sipd, 0, sizeof(struct sip_desc));
1355 sipd->sipd_link = SIP_CDTXADDR(sc, SIP_NEXTTX(i));
1356 }
1357 SIP_CDTXSYNC(sc, 0, SIP_NTXDESC,
1358 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1359 sc->sc_txfree = SIP_NTXDESC;
1360 sc->sc_txnext = 0;
1361
1362 /*
1363 * Initialize the transmit job descriptors.
1364 */
1365 SIMPLEQ_INIT(&sc->sc_txfreeq);
1366 SIMPLEQ_INIT(&sc->sc_txdirtyq);
1367 for (i = 0; i < SIP_TXQUEUELEN; i++) {
1368 txs = &sc->sc_txsoft[i];
1369 txs->txs_mbuf = NULL;
1370 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1371 }
1372
1373 /*
1374 * Initialize the receive descriptor and receive job
1375 * descriptor rings. The buffers are already allocated.
1376 */
1377 for (i = 0; i < SIP_NRXDESC; i++)
1378 SIP_INIT_RXDESC(sc, i);
1379 sc->sc_rxptr = 0;
1380
1381 /*
1382 * Initialize the configuration register: aggressive PCI
1383 * bus request algorithm, default backoff, default OW timer,
1384 * default parity error detection.
1385 */
1386 cfg = 0;
1387 #if BYTE_ORDER == BIG_ENDIAN
1388 /*
1389 * ...descriptors in big-endian mode.
1390 */
1391 cfg |= CFG_BEM;
1392 #endif
1393 bus_space_write_4(st, sh, SIP_CFG, cfg);
1394
1395 /*
1396 * Initialize the transmit fill and drain thresholds if
1397 * we have never done so.
1398 */
1399 if (sc->sc_tx_fill_thresh == 0) {
1400 /*
1401 * XXX This value should be tuned. This is the
1402 * minimum (32 bytes), and we may be able to
1403 * improve performance by increasing it.
1404 */
1405 sc->sc_tx_fill_thresh = 1;
1406 }
1407 if (sc->sc_tx_drain_thresh == 0) {
1408 /*
1409 * Start at a drain threshold of 128 bytes. We will
1410 * increase it if a DMA underrun occurs.
1411 *
1412 * XXX The minimum value of this variable should be
1413 * tuned. We may be able to improve performance
1414 * by starting with a lower value. That, however,
1415 * may trash the first few outgoing packets if the
1416 * PCI bus is saturated.
1417 */
1418 sc->sc_tx_drain_thresh = 4;
1419 }
1420
1421 /*
1422 * Initialize the prototype TXCFG register.
1423 */
1424 sc->sc_txcfg = TXCFG_ATP | TXCFG_MXDMA_512 |
1425 (sc->sc_tx_fill_thresh << TXCFG_FLTH_SHIFT) |
1426 sc->sc_tx_drain_thresh;
1427 bus_space_write_4(st, sh, SIP_TXCFG, sc->sc_txcfg);
1428
1429 /*
1430 * Initialize the receive drain threshold if we have never
1431 * done so.
1432 */
1433 if (sc->sc_rx_drain_thresh == 0) {
1434 /*
1435 * XXX This value should be tuned. This is set to the
1436 * maximum of 248 bytes, and we may be able to improve
1437 * performance by decreasing it (although we should never
1438 * set this value lower than 2; 14 bytes are required to
1439 * filter the packet).
1440 */
1441 sc->sc_rx_drain_thresh = RXCFG_DRTH >> RXCFG_DRTH_SHIFT;
1442 }
1443
1444 /*
1445 * Initialize the prototype RXCFG register.
1446 */
1447 sc->sc_rxcfg = RXCFG_MXDMA_512 |
1448 (sc->sc_rx_drain_thresh << RXCFG_DRTH_SHIFT);
1449 bus_space_write_4(st, sh, SIP_RXCFG, sc->sc_rxcfg);
1450
1451 /* Set up the receive filter. */
1452 sip_set_filter(sc);
1453
1454 /*
1455 * Give the transmit and receive rings to the chip.
1456 */
1457 bus_space_write_4(st, sh, SIP_TXDP, SIP_CDTXADDR(sc, sc->sc_txnext));
1458 bus_space_write_4(st, sh, SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr));
1459
1460 /*
1461 * Initialize the interrupt mask.
1462 */
1463 sc->sc_imr = ISR_DPERR|ISR_SSERR|ISR_RMABT|ISR_RTABT|ISR_RXSOVR|
1464 ISR_TXURN|ISR_TXDESC|ISR_RXORN|ISR_RXIDLE|ISR_RXDESC;
1465 bus_space_write_4(st, sh, SIP_IMR, sc->sc_imr);
1466
1467 /*
1468 * Set the current media. Do this after initializing the prototype
1469 * IMR, since sip_mii_statchg() modifies the IMR for 802.3x flow
1470 * control.
1471 */
1472 mii_mediachg(&sc->sc_mii);
1473
1474 /*
1475 * Enable interrupts.
1476 */
1477 bus_space_write_4(st, sh, SIP_IER, IER_IE);
1478
1479 /*
1480 * Start the transmit and receive processes.
1481 */
1482 bus_space_write_4(st, sh, SIP_CR, CR_RXE | CR_TXE);
1483
1484 /*
1485 * Start the one second MII clock.
1486 */
1487 timeout(sip_tick, sc, hz);
1488
1489 /*
1490 * ...all done!
1491 */
1492 ifp->if_flags |= IFF_RUNNING;
1493 ifp->if_flags &= ~IFF_OACTIVE;
1494 }
1495
1496 /*
1497 * sip_stop:
1498 *
1499 * Stop transmission on the interface.
1500 */
1501 void
1502 sip_stop(sc)
1503 struct sip_softc *sc;
1504 {
1505 bus_space_tag_t st = sc->sc_st;
1506 bus_space_handle_t sh = sc->sc_sh;
1507 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1508 struct sip_txsoft *txs;
1509 u_int32_t cmdsts = 0; /* DEBUG */
1510
1511 /*
1512 * Stop the one second clock.
1513 */
1514 untimeout(sip_tick, sc);
1515
1516 /*
1517 * Disable interrupts.
1518 */
1519 bus_space_write_4(st, sh, SIP_IER, 0);
1520
1521 /*
1522 * Stop receiver and transmitter.
1523 */
1524 bus_space_write_4(st, sh, SIP_CR, CR_RXD | CR_TXD);
1525
1526 /*
1527 * Release any queued transmit buffers.
1528 */
1529 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1530 if ((ifp->if_flags & IFF_DEBUG) != 0 &&
1531 SIMPLEQ_NEXT(txs, txs_q) == NULL &&
1532 (sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts &
1533 CMDSTS_INTR) == 0)
1534 printf("%s: sip_stop: last descriptor does not "
1535 "have INTR bit set\n", sc->sc_dev.dv_xname);
1536 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs, txs_q);
1537 #ifdef DIAGNOSTIC
1538 if (txs->txs_mbuf == NULL) {
1539 printf("%s: dirty txsoft with no mbuf chain\n",
1540 sc->sc_dev.dv_xname);
1541 panic("sip_stop");
1542 }
1543 #endif
1544 cmdsts |= /* DEBUG */
1545 sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts;
1546 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1547 m_freem(txs->txs_mbuf);
1548 txs->txs_mbuf = NULL;
1549 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1550 }
1551
1552 /*
1553 * Mark the interface down and cancel the watchdog timer.
1554 */
1555 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1556 ifp->if_timer = 0;
1557
1558 if ((ifp->if_flags & IFF_DEBUG) != 0 &&
1559 (cmdsts & CMDSTS_INTR) == 0 && sc->sc_txfree != SIP_NTXDESC)
1560 printf("%s: sip_stop: no INTR bits set in dirty tx "
1561 "descriptors\n", sc->sc_dev.dv_xname);
1562 }
1563
1564 /*
1565 * sip_read_eeprom:
1566 *
1567 * Read data from the serial EEPROM.
1568 */
1569 void
1570 sip_read_eeprom(sc, word, wordcnt, data)
1571 struct sip_softc *sc;
1572 int word, wordcnt;
1573 u_int16_t *data;
1574 {
1575 bus_space_tag_t st = sc->sc_st;
1576 bus_space_handle_t sh = sc->sc_sh;
1577 u_int16_t reg;
1578 int i, x;
1579
1580 for (i = 0; i < wordcnt; i++) {
1581 /* Send CHIP SELECT. */
1582 reg = EROMAR_EECS;
1583 bus_space_write_4(st, sh, SIP_EROMAR, reg);
1584
1585 /* Shift in the READ opcode. */
1586 for (x = 3; x > 0; x--) {
1587 if (SIP_EEPROM_OPC_READ & (1 << (x - 1)))
1588 reg |= EROMAR_EEDI;
1589 else
1590 reg &= ~EROMAR_EEDI;
1591 bus_space_write_4(st, sh, SIP_EROMAR, reg);
1592 bus_space_write_4(st, sh, SIP_EROMAR,
1593 reg | EROMAR_EESK);
1594 delay(4);
1595 bus_space_write_4(st, sh, SIP_EROMAR, reg);
1596 delay(4);
1597 }
1598
1599 /* Shift in address. */
1600 for (x = 6; x > 0; x--) {
1601 if ((word + i) & (1 << (x - 1)))
1602 reg |= EROMAR_EEDI;
1603 else
1604 reg &= ~EROMAR_EEDI;
1605 bus_space_write_4(st, sh, SIP_EROMAR, reg);
1606 bus_space_write_4(st, sh, SIP_EROMAR,
1607 reg | EROMAR_EESK);
1608 delay(4);
1609 bus_space_write_4(st, sh, SIP_EROMAR, reg);
1610 delay(4);
1611 }
1612
1613 /* Shift out data. */
1614 reg = EROMAR_EECS;
1615 data[i] = 0;
1616 for (x = 16; x > 0; x--) {
1617 bus_space_write_4(st, sh, SIP_EROMAR,
1618 reg | EROMAR_EESK);
1619 delay(4);
1620 if (bus_space_read_4(st, sh, SIP_EROMAR) & EROMAR_EEDO)
1621 data[i] |= (1 << (x - 1));
1622 bus_space_write_4(st, sh, SIP_EROMAR, reg);
1623 }
1624
1625 /* Clear CHIP SELECT. */
1626 bus_space_write_4(st, sh, SIP_EROMAR, 0);
1627 delay(4);
1628 }
1629 }
1630
1631 /*
1632 * sip_add_rxbuf:
1633 *
1634 * Add a receive buffer to the indicated descriptor.
1635 */
1636 int
1637 sip_add_rxbuf(sc, idx)
1638 struct sip_softc *sc;
1639 int idx;
1640 {
1641 struct sip_rxsoft *rxs = &sc->sc_rxsoft[idx];
1642 struct mbuf *m;
1643 int error;
1644
1645 MGETHDR(m, M_DONTWAIT, MT_DATA);
1646 if (m == NULL)
1647 return (ENOBUFS);
1648
1649 MCLGET(m, M_DONTWAIT);
1650 if ((m->m_flags & M_EXT) == 0) {
1651 m_freem(m);
1652 return (ENOBUFS);
1653 }
1654
1655 if (rxs->rxs_mbuf != NULL)
1656 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1657
1658 rxs->rxs_mbuf = m;
1659
1660 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1661 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1662 if (error) {
1663 printf("%s: can't load rx DMA map %d, error = %d\n",
1664 sc->sc_dev.dv_xname, idx, error);
1665 panic("sip_add_rxbuf"); /* XXX */
1666 }
1667
1668 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1669 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1670
1671 SIP_INIT_RXDESC(sc, idx);
1672
1673 return (0);
1674 }
1675
1676 /*
1677 * sip_set_filter:
1678 *
1679 * Set up the receive filter.
1680 */
1681 void
1682 sip_set_filter(sc)
1683 struct sip_softc *sc;
1684 {
1685 bus_space_tag_t st = sc->sc_st;
1686 bus_space_handle_t sh = sc->sc_sh;
1687 struct ethercom *ec = &sc->sc_ethercom;
1688 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1689 struct ether_multi *enm;
1690 struct ether_multistep step;
1691 u_int8_t *cp;
1692 u_int32_t crc, mchash[8];
1693 int len;
1694 static const u_int32_t crctab[] = {
1695 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
1696 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
1697 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
1698 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
1699 };
1700
1701 /*
1702 * Initialize the prototype RFCR.
1703 */
1704 sc->sc_rfcr = RFCR_RFEN;
1705 if (ifp->if_flags & IFF_BROADCAST)
1706 sc->sc_rfcr |= RFCR_AAB;
1707 if (ifp->if_flags & IFF_PROMISC) {
1708 sc->sc_rfcr |= RFCR_AAP;
1709 goto allmulti;
1710 }
1711
1712 /*
1713 * Set up the multicast address filter by passing all multicast
1714 * addresses through a CRC generator, and then using the high-order
1715 * 6 bits as an index into the 128 bit multicast hash table (only
1716 * the lower 16 bits of each 32 bit multicast hash register are
1717 * valid). The high order bits select the register, while the
1718 * rest of the bits select the bit within the register.
1719 */
1720
1721 memset(mchash, 0, sizeof(mchash));
1722
1723 ETHER_FIRST_MULTI(step, ec, enm);
1724 while (enm != NULL) {
1725 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1726 /*
1727 * We must listen to a range of multicast addresses.
1728 * For now, just accept all multicasts, rather than
1729 * trying to set only those filter bits needed to match
1730 * the range. (At this time, the only use of address
1731 * ranges is for IP multicast routing, for which the
1732 * range is big enough to require all bits set.)
1733 */
1734 goto allmulti;
1735 }
1736
1737 cp = enm->enm_addrlo;
1738 crc = 0xffffffff;
1739 for (len = sizeof(enm->enm_addrlo); --len >= 0;) {
1740 crc ^= *cp++;
1741 crc = (crc >> 4) ^ crctab[crc & 0xf];
1742 crc = (crc >> 4) ^ crctab[crc & 0xf];
1743 }
1744 /* Just want the 7 most significant bits. */
1745 crc >>= 25;
1746
1747 /* Set the corresponding bit in the hash table. */
1748 mchash[crc >> 4] |= 1 << (crc & 0xf);
1749
1750 ETHER_NEXT_MULTI(step, enm);
1751 }
1752
1753 ifp->if_flags &= ~IFF_ALLMULTI;
1754 goto setit;
1755
1756 allmulti:
1757 ifp->if_flags |= IFF_ALLMULTI;
1758 sc->sc_rfcr |= RFCR_AAM;
1759
1760 setit:
1761 #define FILTER_EMIT(addr, data) \
1762 bus_space_write_4(st, sh, SIP_RFCR, (addr)); \
1763 bus_space_write_4(st, sh, SIP_RFDR, (data))
1764
1765 /*
1766 * Disable receive filter, and program the node address.
1767 */
1768 cp = LLADDR(ifp->if_sadl);
1769 FILTER_EMIT(RFCR_RFADDR_NODE0, (cp[1] << 8) | cp[0]);
1770 FILTER_EMIT(RFCR_RFADDR_NODE2, (cp[3] << 8) | cp[2]);
1771 FILTER_EMIT(RFCR_RFADDR_NODE4, (cp[5] << 8) | cp[4]);
1772
1773 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
1774 /*
1775 * Program the multicast hash table.
1776 */
1777 FILTER_EMIT(RFCR_RFADDR_MC0, mchash[0]);
1778 FILTER_EMIT(RFCR_RFADDR_MC1, mchash[1]);
1779 FILTER_EMIT(RFCR_RFADDR_MC2, mchash[2]);
1780 FILTER_EMIT(RFCR_RFADDR_MC3, mchash[3]);
1781 FILTER_EMIT(RFCR_RFADDR_MC4, mchash[4]);
1782 FILTER_EMIT(RFCR_RFADDR_MC5, mchash[5]);
1783 FILTER_EMIT(RFCR_RFADDR_MC6, mchash[6]);
1784 FILTER_EMIT(RFCR_RFADDR_MC7, mchash[7]);
1785 }
1786 #undef FILTER_EMIT
1787
1788 /*
1789 * Re-enable the receiver filter.
1790 */
1791 bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr);
1792 }
1793
1794 /*
1795 * sip_mii_readreg: [mii interface function]
1796 *
1797 * Read a PHY register on the MII.
1798 */
1799 int
1800 sip_mii_readreg(self, phy, reg)
1801 struct device *self;
1802 int phy, reg;
1803 {
1804 struct sip_softc *sc = (struct sip_softc *) self;
1805 u_int32_t enphy;
1806
1807 /*
1808 * The SiS 900 has only an internal PHY on the MII. Only allow
1809 * MII address 0.
1810 */
1811 if (phy != 0)
1812 return (0);
1813
1814 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY,
1815 (reg << ENPHY_REGADDR_SHIFT) | ENPHY_RWCMD | ENPHY_ACCESS);
1816 do {
1817 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY);
1818 } while (enphy & ENPHY_ACCESS);
1819 return ((enphy & ENPHY_PHYDATA) >> ENPHY_DATA_SHIFT);
1820 }
1821
1822 /*
1823 * sip_mii_writereg: [mii interface function]
1824 *
1825 * Write a PHY register on the MII.
1826 */
1827 void
1828 sip_mii_writereg(self, phy, reg, val)
1829 struct device *self;
1830 int phy, reg, val;
1831 {
1832 struct sip_softc *sc = (struct sip_softc *) self;
1833 u_int32_t enphy;
1834
1835 /*
1836 * The SiS 900 has only an internal PHY on the MII. Only allow
1837 * MII address 0.
1838 */
1839 if (phy != 0)
1840 return;
1841
1842 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY,
1843 (val << ENPHY_DATA_SHIFT) | (reg << ENPHY_REGADDR_SHIFT) |
1844 ENPHY_ACCESS);
1845 do {
1846 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY);
1847 } while (enphy & ENPHY_ACCESS);
1848 }
1849
1850 /*
1851 * sip_mii_statchg: [mii interface function]
1852 *
1853 * Callback from MII layer when media changes.
1854 */
1855 void
1856 sip_mii_statchg(self)
1857 struct device *self;
1858 {
1859 struct sip_softc *sc = (struct sip_softc *) self;
1860 u_int32_t flowctl;
1861
1862 /*
1863 * Update TXCFG for full-duplex operation.
1864 */
1865 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0)
1866 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI);
1867 else
1868 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI);
1869
1870 /*
1871 * Update RXCFG for full-duplex or loopback.
1872 */
1873 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 ||
1874 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP)
1875 sc->sc_rxcfg |= RXCFG_ATX;
1876 else
1877 sc->sc_rxcfg &= ~RXCFG_ATX;
1878
1879 /*
1880 * Update IMR for use of 802.3x flow control.
1881 */
1882 if ((sc->sc_mii.mii_media_active & IFM_FLOW) != 0) {
1883 sc->sc_imr |= (ISR_PAUSE_END|ISR_PAUSE_ST);
1884 flowctl = FLOWCTL_FLOWEN;
1885 } else {
1886 sc->sc_imr &= ~(ISR_PAUSE_END|ISR_PAUSE_ST);
1887 flowctl = 0;
1888 }
1889
1890 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg);
1891 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg);
1892 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_IMR, sc->sc_imr);
1893 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_FLOWCTL, flowctl);
1894
1895 /* XXX Update ifp->if_baudrate */
1896 }
1897
1898 /*
1899 * sip_mediastatus: [ifmedia interface function]
1900 *
1901 * Get the current interface media status.
1902 */
1903 void
1904 sip_mediastatus(ifp, ifmr)
1905 struct ifnet *ifp;
1906 struct ifmediareq *ifmr;
1907 {
1908 struct sip_softc *sc = ifp->if_softc;
1909
1910 mii_pollstat(&sc->sc_mii);
1911 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1912 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1913 }
1914
1915 /*
1916 * sip_mediachange: [ifmedia interface function]
1917 *
1918 * Set hardware to newly-selected media.
1919 */
1920 int
1921 sip_mediachange(ifp)
1922 struct ifnet *ifp;
1923 {
1924 struct sip_softc *sc = ifp->if_softc;
1925
1926 if (ifp->if_flags & IFF_UP)
1927 mii_mediachg(&sc->sc_mii);
1928 return (0);
1929 }
1930