Home | History | Annotate | Line # | Download | only in pci
if_kse.c revision 1.54
      1 /*	$NetBSD: if_kse.c,v 1.54 2020/09/20 17:59:42 nisimura Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2006 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Tohru Nishimura.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Micrel 8841/8842 10/100 PCI ethernet driver
     34  */
     35 
     36 #include <sys/cdefs.h>
     37 __KERNEL_RCSID(0, "$NetBSD: if_kse.c,v 1.54 2020/09/20 17:59:42 nisimura Exp $");
     38 
     39 #include <sys/param.h>
     40 #include <sys/bus.h>
     41 #include <sys/intr.h>
     42 #include <sys/device.h>
     43 #include <sys/callout.h>
     44 #include <sys/ioctl.h>
     45 #include <sys/malloc.h>
     46 #include <sys/mbuf.h>
     47 #include <sys/errno.h>
     48 #include <sys/systm.h>
     49 #include <sys/kernel.h>
     50 
     51 #include <net/if.h>
     52 #include <net/if_media.h>
     53 #include <net/if_dl.h>
     54 #include <net/if_ether.h>
     55 #include <dev/mii/mii.h>
     56 #include <dev/mii/miivar.h>
     57 #include <net/bpf.h>
     58 
     59 #include <dev/pci/pcivar.h>
     60 #include <dev/pci/pcireg.h>
     61 #include <dev/pci/pcidevs.h>
     62 
     63 #define KSE_LINKDEBUG 0
     64 
     65 #define CSR_READ_4(sc, off) \
     66 	    bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (off))
     67 #define CSR_WRITE_4(sc, off, val) \
     68 	    bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (off), (val))
     69 #define CSR_READ_2(sc, off) \
     70 	    bus_space_read_2((sc)->sc_st, (sc)->sc_sh, (off))
     71 #define CSR_WRITE_2(sc, off, val) \
     72 	    bus_space_write_2((sc)->sc_st, (sc)->sc_sh, (off), (val))
     73 
     74 #define MDTXC	0x000	/* DMA transmit control */
     75 #define MDRXC	0x004	/* DMA receive control */
     76 #define MDTSC	0x008	/* DMA transmit start */
     77 #define MDRSC	0x00c	/* DMA receive start */
     78 #define TDLB	0x010	/* transmit descriptor list base */
     79 #define RDLB	0x014	/* receive descriptor list base */
     80 #define MTR0	0x020	/* multicast table 31:0 */
     81 #define MTR1	0x024	/* multicast table 63:32 */
     82 #define INTEN	0x028	/* interrupt enable */
     83 #define INTST	0x02c	/* interrupt status */
     84 #define MAAL0	0x080	/* additional MAC address 0 low */
     85 #define MAAH0	0x084	/* additional MAC address 0 high */
     86 #define MARL	0x200	/* MAC address low */
     87 #define MARM	0x202	/* MAC address middle */
     88 #define MARH	0x204	/* MAC address high */
     89 #define GRR	0x216	/* global reset */
     90 #define SIDER	0x400	/* switch ID and function enable */
     91 #define SGCR3	0x406	/* switch function control 3 */
     92 #define  CR3_USEHDX	(1U<<6)	/* use half-duplex 8842 host port */
     93 #define  CR3_USEFC	(1U<<5) /* use flowcontrol 8842 host port */
     94 #define IACR	0x4a0	/* indirect access control */
     95 #define IADR1	0x4a2	/* indirect access data 66:63 */
     96 #define IADR2	0x4a4	/* indirect access data 47:32 */
     97 #define IADR3	0x4a6	/* indirect access data 63:48 */
     98 #define IADR4	0x4a8	/* indirect access data 15:0 */
     99 #define IADR5	0x4aa	/* indirect access data 31:16 */
    100 #define P1CR4	0x512	/* port 1 control 4 */
    101 #define P1SR	0x514	/* port 1 status */
    102 #define P2CR4	0x532	/* port 2 control 4 */
    103 #define P2SR	0x534	/* port 2 status */
    104 #define  PxCR_STARTNEG	(1U<<9)		/* restart auto negotiation */
    105 #define  PxCR_AUTOEN	(1U<<7)		/* auto negotiation enable */
    106 #define  PxCR_SPD100	(1U<<6)		/* force speed 100 */
    107 #define  PxCR_USEFDX	(1U<<5)		/* force full duplex */
    108 #define  PxCR_USEFC	(1U<<4)		/* advertise pause flow control */
    109 #define  PxSR_ACOMP	(1U<<6)		/* auto negotiation completed */
    110 #define  PxSR_SPD100	(1U<<10)	/* speed is 100Mbps */
    111 #define  PxSR_FDX	(1U<<9)		/* full duplex */
    112 #define  PxSR_LINKUP	(1U<<5)		/* link is good */
    113 #define  PxSR_RXFLOW	(1U<<12)	/* receive flow control active */
    114 #define  PxSR_TXFLOW	(1U<<11)	/* transmit flow control active */
    115 #define P1VIDCR	0x504	/* port 1 vtag */
    116 #define P2VIDCR	0x524	/* port 2 vtag */
    117 #define P3VIDCR	0x544	/* 8842 host vtag */
    118 
    119 #define TXC_BS_MSK	0x3f000000	/* burst size */
    120 #define TXC_BS_SFT	(24)		/* 1,2,4,8,16,32 or 0 for unlimited */
    121 #define TXC_UCG		(1U<<18)	/* generate UDP checksum */
    122 #define TXC_TCG		(1U<<17)	/* generate TCP checksum */
    123 #define TXC_ICG		(1U<<16)	/* generate IP checksum */
    124 #define TXC_FCE		(1U<<9)		/* generate PAUSE to moderate Rx lvl */
    125 #define TXC_EP		(1U<<2)		/* enable automatic padding */
    126 #define TXC_AC		(1U<<1)		/* add CRC to frame */
    127 #define TXC_TEN		(1)		/* enable DMA to run */
    128 
    129 #define RXC_BS_MSK	0x3f000000	/* burst size */
    130 #define RXC_BS_SFT	(24)		/* 1,2,4,8,16,32 or 0 for unlimited */
    131 #define RXC_IHAE	(1U<<19)	/* IP header alignment enable */
    132 #define RXC_UCC		(1U<<18)	/* run UDP checksum */
    133 #define RXC_TCC		(1U<<17)	/* run TDP checksum */
    134 #define RXC_ICC		(1U<<16)	/* run IP checksum */
    135 #define RXC_FCE		(1U<<9)		/* accept PAUSE to throttle Tx */
    136 #define RXC_RB		(1U<<6)		/* receive broadcast frame */
    137 #define RXC_RM		(1U<<5)		/* receive all multicast (inc. RB) */
    138 #define RXC_RU		(1U<<4)		/* receive 16 additional unicasts */
    139 #define RXC_RE		(1U<<3)		/* accept error frame */
    140 #define RXC_RA		(1U<<2)		/* receive all frame */
    141 #define RXC_MHTE	(1U<<1)		/* use multicast hash table */
    142 #define RXC_REN		(1)		/* enable DMA to run */
    143 
    144 #define INT_DMLCS	(1U<<31)	/* link status change */
    145 #define INT_DMTS	(1U<<30)	/* sending desc. has posted Tx done */
    146 #define INT_DMRS	(1U<<29)	/* frame was received */
    147 #define INT_DMRBUS	(1U<<27)	/* Rx descriptor pool is full */
    148 #define INT_DMxPSS	(3U<<25)	/* 26:25 DMA Tx/Rx have stopped */
    149 
    150 #define T0_OWN		(1U<<31)	/* desc is ready to Tx */
    151 
    152 #define R0_OWN		(1U<<31)	/* desc is empty */
    153 #define R0_FS		(1U<<30)	/* first segment of frame */
    154 #define R0_LS		(1U<<29)	/* last segment of frame */
    155 #define R0_IPE		(1U<<28)	/* IP checksum error */
    156 #define R0_TCPE		(1U<<27)	/* TCP checksum error */
    157 #define R0_UDPE		(1U<<26)	/* UDP checksum error */
    158 #define R0_ES		(1U<<25)	/* error summary */
    159 #define R0_MF		(1U<<24)	/* multicast frame */
    160 #define R0_SPN		0x00300000	/* 21:20 switch port 1/2 */
    161 #define R0_ALIGN	0x00300000	/* 21:20 (KSZ8692P) Rx align amount */
    162 #define R0_RE		(1U<<19)	/* MII reported error */
    163 #define R0_TL		(1U<<18)	/* frame too long, beyond 1518 */
    164 #define R0_RF		(1U<<17)	/* damaged runt frame */
    165 #define R0_CE		(1U<<16)	/* CRC error */
    166 #define R0_FT		(1U<<15)	/* frame type */
    167 #define R0_FL_MASK	0x7ff		/* frame length 10:0 */
    168 
    169 #define T1_IC		(1U<<31)	/* post interrupt on complete */
    170 #define T1_FS		(1U<<30)	/* first segment of frame */
    171 #define T1_LS		(1U<<29)	/* last segment of frame */
    172 #define T1_IPCKG	(1U<<28)	/* generate IP checksum */
    173 #define T1_TCPCKG	(1U<<27)	/* generate TCP checksum */
    174 #define T1_UDPCKG	(1U<<26)	/* generate UDP checksum */
    175 #define T1_TER		(1U<<25)	/* end of ring */
    176 #define T1_SPN		0x00300000	/* 21:20 switch port 1/2 */
    177 #define T1_TBS_MASK	0x7ff		/* segment size 10:0 */
    178 
    179 #define R1_RER		(1U<<25)	/* end of ring */
    180 #define R1_RBS_MASK	0x7fc		/* segment size 10:0 */
    181 
    182 #define KSE_NTXSEGS		16
    183 #define KSE_TXQUEUELEN		64
    184 #define KSE_TXQUEUELEN_MASK	(KSE_TXQUEUELEN - 1)
    185 #define KSE_TXQUEUE_GC		(KSE_TXQUEUELEN / 4)
    186 #define KSE_NTXDESC		256
    187 #define KSE_NTXDESC_MASK	(KSE_NTXDESC - 1)
    188 #define KSE_NEXTTX(x)		(((x) + 1) & KSE_NTXDESC_MASK)
    189 #define KSE_NEXTTXS(x)		(((x) + 1) & KSE_TXQUEUELEN_MASK)
    190 
    191 #define KSE_NRXDESC		64
    192 #define KSE_NRXDESC_MASK	(KSE_NRXDESC - 1)
    193 #define KSE_NEXTRX(x)		(((x) + 1) & KSE_NRXDESC_MASK)
    194 
    195 struct tdes {
    196 	uint32_t t0, t1, t2, t3;
    197 };
    198 
    199 struct rdes {
    200 	uint32_t r0, r1, r2, r3;
    201 };
    202 
    203 struct kse_control_data {
    204 	struct tdes kcd_txdescs[KSE_NTXDESC];
    205 	struct rdes kcd_rxdescs[KSE_NRXDESC];
    206 };
    207 #define KSE_CDOFF(x)		offsetof(struct kse_control_data, x)
    208 #define KSE_CDTXOFF(x)		KSE_CDOFF(kcd_txdescs[(x)])
    209 #define KSE_CDRXOFF(x)		KSE_CDOFF(kcd_rxdescs[(x)])
    210 
    211 struct kse_txsoft {
    212 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    213 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    214 	int txs_firstdesc;		/* first descriptor in packet */
    215 	int txs_lastdesc;		/* last descriptor in packet */
    216 	int txs_ndesc;			/* # of descriptors used */
    217 };
    218 
    219 struct kse_rxsoft {
    220 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    221 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    222 };
    223 
    224 struct kse_softc {
    225 	device_t sc_dev;		/* generic device information */
    226 	bus_space_tag_t sc_st;		/* bus space tag */
    227 	bus_space_handle_t sc_sh;	/* bus space handle */
    228 	bus_size_t sc_memsize;		/* csr map size */
    229 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    230 	pci_chipset_tag_t sc_pc;	/* PCI chipset tag */
    231 	struct ethercom sc_ethercom;	/* Ethernet common data */
    232 	void *sc_ih;			/* interrupt cookie */
    233 
    234 	struct mii_data sc_mii;		/* mii 8841 */
    235 	struct ifmedia sc_media;	/* ifmedia 8842 */
    236 	int sc_flowflags;		/* 802.3x PAUSE flow control */
    237 
    238 	callout_t  sc_tick_ch;		/* MII tick callout */
    239 	callout_t  sc_stat_ch;		/* statistics counter callout */
    240 
    241 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
    242 #define sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
    243 
    244 	struct kse_control_data *sc_control_data;
    245 #define sc_txdescs	sc_control_data->kcd_txdescs
    246 #define sc_rxdescs	sc_control_data->kcd_rxdescs
    247 
    248 	struct kse_txsoft sc_txsoft[KSE_TXQUEUELEN];
    249 	struct kse_rxsoft sc_rxsoft[KSE_NRXDESC];
    250 	int sc_txfree;			/* number of free Tx descriptors */
    251 	int sc_txnext;			/* next ready Tx descriptor */
    252 	int sc_txsfree;			/* number of free Tx jobs */
    253 	int sc_txsnext;			/* next ready Tx job */
    254 	int sc_txsdirty;		/* dirty Tx jobs */
    255 	int sc_rxptr;			/* next ready Rx descriptor/descsoft */
    256 
    257 	uint32_t sc_txc, sc_rxc;
    258 	uint32_t sc_t1csum;
    259 	int sc_mcsum;
    260 	uint32_t sc_inten;
    261 	uint32_t sc_chip;
    262 
    263 #ifdef KSE_EVENT_COUNTERS
    264 	struct ksext {
    265 		char evcntname[3][8];
    266 		struct evcnt pev[3][34];
    267 	} sc_ext;			/* switch statistics */
    268 #endif
    269 };
    270 
    271 #define KSE_CDTXADDR(sc, x)	((sc)->sc_cddma + KSE_CDTXOFF((x)))
    272 #define KSE_CDRXADDR(sc, x)	((sc)->sc_cddma + KSE_CDRXOFF((x)))
    273 
    274 #define KSE_CDTXSYNC(sc, x, n, ops)					\
    275 do {									\
    276 	int __x, __n;							\
    277 									\
    278 	__x = (x);							\
    279 	__n = (n);							\
    280 									\
    281 	/* If it will wrap around, sync to the end of the ring. */	\
    282 	if ((__x + __n) > KSE_NTXDESC) {				\
    283 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
    284 		    KSE_CDTXOFF(__x), sizeof(struct tdes) *		\
    285 		    (KSE_NTXDESC - __x), (ops));			\
    286 		__n -= (KSE_NTXDESC - __x);				\
    287 		__x = 0;						\
    288 	}								\
    289 									\
    290 	/* Now sync whatever is left. */				\
    291 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    292 	    KSE_CDTXOFF(__x), sizeof(struct tdes) * __n, (ops));	\
    293 } while (/*CONSTCOND*/0)
    294 
    295 #define KSE_CDRXSYNC(sc, x, ops)					\
    296 do {									\
    297 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
    298 	    KSE_CDRXOFF((x)), sizeof(struct rdes), (ops));		\
    299 } while (/*CONSTCOND*/0)
    300 
    301 #define KSE_INIT_RXDESC(sc, x)						\
    302 do {									\
    303 	struct kse_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
    304 	struct rdes *__rxd = &(sc)->sc_rxdescs[(x)];			\
    305 	struct mbuf *__m = __rxs->rxs_mbuf;				\
    306 									\
    307 	__m->m_data = __m->m_ext.ext_buf;				\
    308 	__rxd->r2 = __rxs->rxs_dmamap->dm_segs[0].ds_addr;		\
    309 	__rxd->r1 = R1_RBS_MASK /* __m->m_ext.ext_size */;		\
    310 	__rxd->r0 = R0_OWN;						\
    311 	KSE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \
    312 } while (/*CONSTCOND*/0)
    313 
    314 u_int kse_burstsize = 8;	/* DMA burst length tuning knob */
    315 
    316 #ifdef KSEDIAGNOSTIC
    317 u_int kse_monitor_rxintr;	/* fragmented UDP csum HW bug hook */
    318 #endif
    319 
    320 static int kse_match(device_t, cfdata_t, void *);
    321 static void kse_attach(device_t, device_t, void *);
    322 
    323 CFATTACH_DECL_NEW(kse, sizeof(struct kse_softc),
    324     kse_match, kse_attach, NULL, NULL);
    325 
    326 static int kse_ioctl(struct ifnet *, u_long, void *);
    327 static void kse_start(struct ifnet *);
    328 static void kse_watchdog(struct ifnet *);
    329 static int kse_init(struct ifnet *);
    330 static void kse_stop(struct ifnet *, int);
    331 static void kse_reset(struct kse_softc *);
    332 static void kse_set_rcvfilt(struct kse_softc *);
    333 static int add_rxbuf(struct kse_softc *, int);
    334 static void rxdrain(struct kse_softc *);
    335 static int kse_intr(void *);
    336 static void rxintr(struct kse_softc *);
    337 static void txreap(struct kse_softc *);
    338 static void lnkchg(struct kse_softc *);
    339 static int kse_ifmedia_upd(struct ifnet *);
    340 static void kse_ifmedia_sts(struct ifnet *, struct ifmediareq *);
    341 static void nopifmedia_sts(struct ifnet *, struct ifmediareq *);
    342 static void phy_tick(void *);
    343 int kse_mii_readreg(device_t, int, int, uint16_t *);
    344 int kse_mii_writereg(device_t, int, int, uint16_t);
    345 void kse_mii_statchg(struct ifnet *);
    346 #ifdef KSE_EVENT_COUNTERS
    347 static void stat_tick(void *);
    348 static void zerostats(struct kse_softc *);
    349 #endif
    350 
    351 static int
    352 kse_match(device_t parent, cfdata_t match, void *aux)
    353 {
    354 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
    355 
    356 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_MICREL &&
    357 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8842 ||
    358 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8841) &&
    359 	    PCI_CLASS(pa->pa_class) == PCI_CLASS_NETWORK)
    360 		return 1;
    361 
    362 	return 0;
    363 }
    364 
    365 static void
    366 kse_attach(device_t parent, device_t self, void *aux)
    367 {
    368 	struct kse_softc *sc = device_private(self);
    369 	struct pci_attach_args *pa = aux;
    370 	pci_chipset_tag_t pc = pa->pa_pc;
    371 	pci_intr_handle_t ih;
    372 	const char *intrstr;
    373 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    374 	struct mii_data * const mii = &sc->sc_mii;
    375 	struct ifmedia *ifm;
    376 	uint8_t enaddr[ETHER_ADDR_LEN];
    377 	bus_dma_segment_t seg;
    378 	int i, error, nseg;
    379 	char intrbuf[PCI_INTRSTR_LEN];
    380 
    381 	aprint_normal(": Micrel KSZ%04x Ethernet (rev. 0x%02x)\n",
    382 	    PCI_PRODUCT(pa->pa_id), PCI_REVISION(pa->pa_class));
    383 
    384 	if (pci_mapreg_map(pa, 0x10,
    385 	    PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
    386 	    0, &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_memsize) != 0) {
    387 		aprint_error_dev(self, "unable to map device registers\n");
    388 		return;
    389 	}
    390 
    391 	/* Make sure bus mastering is enabled. */
    392 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
    393 	    pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
    394 	    PCI_COMMAND_MASTER_ENABLE);
    395 
    396 	/* Power up chip if necessary. */
    397 	if ((error = pci_activate(pc, pa->pa_tag, self, NULL))
    398 	    && error != EOPNOTSUPP) {
    399 		aprint_error_dev(self, "cannot activate %d\n", error);
    400 		return;
    401 	}
    402 
    403 	/* Map and establish our interrupt. */
    404 	if (pci_intr_map(pa, &ih)) {
    405 		aprint_error_dev(self, "unable to map interrupt\n");
    406 		goto fail;
    407 	}
    408 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
    409 	sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, kse_intr, sc,
    410 	    device_xname(self));
    411 	if (sc->sc_ih == NULL) {
    412 		aprint_error_dev(self, "unable to establish interrupt");
    413 		if (intrstr != NULL)
    414 			aprint_error(" at %s", intrstr);
    415 		aprint_error("\n");
    416 		goto fail;
    417 	}
    418 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
    419 
    420 	sc->sc_dev = self;
    421 	sc->sc_dmat = pa->pa_dmat;
    422 	sc->sc_pc = pa->pa_pc;
    423 	sc->sc_chip = PCI_PRODUCT(pa->pa_id);
    424 
    425 	/*
    426 	 * Read the Ethernet address from the EEPROM.
    427 	 */
    428 	i = CSR_READ_2(sc, MARL);
    429 	enaddr[5] = i;
    430 	enaddr[4] = i >> 8;
    431 	i = CSR_READ_2(sc, MARM);
    432 	enaddr[3] = i;
    433 	enaddr[2] = i >> 8;
    434 	i = CSR_READ_2(sc, MARH);
    435 	enaddr[1] = i;
    436 	enaddr[0] = i >> 8;
    437 	aprint_normal_dev(self,
    438 	    "Ethernet address %s\n", ether_sprintf(enaddr));
    439 
    440 	/*
    441 	 * Enable chip function.
    442 	 */
    443 	CSR_WRITE_2(sc, SIDER, 1);
    444 
    445 	/*
    446 	 * Allocate the control data structures, and create and load the
    447 	 * DMA map for it.
    448 	 */
    449 	error = bus_dmamem_alloc(sc->sc_dmat,
    450 	    sizeof(struct kse_control_data), PAGE_SIZE, 0, &seg, 1, &nseg, 0);
    451 	if (error != 0) {
    452 		aprint_error_dev(self,
    453 		    "unable to allocate control data, error = %d\n", error);
    454 		goto fail_0;
    455 	}
    456 	error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
    457 	    sizeof(struct kse_control_data), (void **)&sc->sc_control_data,
    458 	    BUS_DMA_COHERENT);
    459 	if (error != 0) {
    460 		aprint_error_dev(self,
    461 		    "unable to map control data, error = %d\n", error);
    462 		goto fail_1;
    463 	}
    464 	error = bus_dmamap_create(sc->sc_dmat,
    465 	    sizeof(struct kse_control_data), 1,
    466 	    sizeof(struct kse_control_data), 0, 0, &sc->sc_cddmamap);
    467 	if (error != 0) {
    468 		aprint_error_dev(self,
    469 		    "unable to create control data DMA map, "
    470 		    "error = %d\n", error);
    471 		goto fail_2;
    472 	}
    473 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
    474 	    sc->sc_control_data, sizeof(struct kse_control_data), NULL, 0);
    475 	if (error != 0) {
    476 		aprint_error_dev(self,
    477 		    "unable to load control data DMA map, error = %d\n",
    478 		    error);
    479 		goto fail_3;
    480 	}
    481 	for (i = 0; i < KSE_TXQUEUELEN; i++) {
    482 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    483 		    KSE_NTXSEGS, MCLBYTES, 0, 0,
    484 		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
    485 			aprint_error_dev(self,
    486 			    "unable to create tx DMA map %d, error = %d\n",
    487 			    i, error);
    488 			goto fail_4;
    489 		}
    490 	}
    491 	for (i = 0; i < KSE_NRXDESC; i++) {
    492 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
    493 		    1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
    494 			aprint_error_dev(self,
    495 			    "unable to create rx DMA map %d, error = %d\n",
    496 			    i, error);
    497 			goto fail_5;
    498 		}
    499 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
    500 	}
    501 
    502 	mii->mii_ifp = ifp;
    503 	mii->mii_readreg = kse_mii_readreg;
    504 	mii->mii_writereg = kse_mii_writereg;
    505 	mii->mii_statchg = kse_mii_statchg;
    506 
    507 	/* Initialize ifmedia structures. */
    508 	sc->sc_flowflags = 0;
    509 	if (sc->sc_chip == 0x8841) {
    510 		/* use port 1 builtin PHY as index 1 device */
    511 		sc->sc_ethercom.ec_mii = mii;
    512 		ifm = &mii->mii_media;
    513 		ifmedia_init(ifm, 0, kse_ifmedia_upd, kse_ifmedia_sts);
    514 		mii_attach(sc->sc_dev, mii, 0xffffffff, 1 /* PHY1 */,
    515 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
    516 		if (LIST_FIRST(&mii->mii_phys) == NULL) {
    517 			ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
    518 			ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
    519 		} else
    520 			ifmedia_set(ifm, IFM_ETHER | IFM_AUTO);
    521 	} else {
    522 		/*
    523 		 * pretend 100FDX w/ no alternative media selection.
    524 		 * 8842 MAC is tied with a builtin 3 port switch. It can do
    525 		 * 4 degree priotised rate control over either of tx/rx
    526 		 * direction for any of ports, respectively. Tough, this
    527 		 * driver leaves the rate unlimited intending 100Mbps maximum.
    528 		 * 2 external ports behave in AN mode and this driver provides
    529 		 * no mean to manipulate and see their operational details.
    530 		 */
    531 		sc->sc_ethercom.ec_ifmedia = ifm = &sc->sc_media;
    532 		ifmedia_init(ifm, 0, NULL, nopifmedia_sts);
    533 		ifmedia_add(ifm, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
    534 		ifmedia_set(ifm, IFM_ETHER | IFM_100_TX | IFM_FDX);
    535 
    536 		aprint_normal_dev(self,
    537 		    "10baseT, 10baseT-FDX, 100baseTX, 100baseTX-FDX, auto\n");
    538 	}
    539 	ifm->ifm_media = ifm->ifm_cur->ifm_media; /* as if user has requested */
    540 
    541 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
    542 	ifp->if_softc = sc;
    543 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    544 	ifp->if_ioctl = kse_ioctl;
    545 	ifp->if_start = kse_start;
    546 	ifp->if_watchdog = kse_watchdog;
    547 	ifp->if_init = kse_init;
    548 	ifp->if_stop = kse_stop;
    549 	IFQ_SET_READY(&ifp->if_snd);
    550 
    551 	/*
    552 	 * capable of 802.1Q VLAN-sized frames and hw assisted tagging.
    553 	 * can do IPv4, TCPv4, and UDPv4 checksums in hardware.
    554 	 */
    555 	sc->sc_ethercom.ec_capabilities = ETHERCAP_VLAN_MTU;
    556 	ifp->if_capabilities =
    557 	    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
    558 	    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
    559 	    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
    560 
    561 	if_attach(ifp);
    562 	if_deferred_start_init(ifp, NULL);
    563 	ether_ifattach(ifp, enaddr);
    564 
    565 	callout_init(&sc->sc_tick_ch, 0);
    566 	callout_setfunc(&sc->sc_tick_ch, phy_tick, sc);
    567 
    568 #ifdef KSE_EVENT_COUNTERS
    569 	int p = (sc->sc_chip == 0x8842) ? 3 : 1;
    570 	for (i = 0; i < p; i++) {
    571 		struct ksext *ee = &sc->sc_ext;
    572 		snprintf(ee->evcntname[i], sizeof(ee->evcntname[i]),
    573 		    "%s.%d", device_xname(sc->sc_dev), i+1);
    574 		evcnt_attach_dynamic(&ee->pev[i][0], EVCNT_TYPE_MISC,
    575 		    NULL, ee->evcntname[i], "RxLoPriotyByte");
    576 		evcnt_attach_dynamic(&ee->pev[i][1], EVCNT_TYPE_MISC,
    577 		    NULL, ee->evcntname[i], "RxHiPriotyByte");
    578 		evcnt_attach_dynamic(&ee->pev[i][2], EVCNT_TYPE_MISC,
    579 		    NULL, ee->evcntname[i], "RxUndersizePkt");
    580 		evcnt_attach_dynamic(&ee->pev[i][3], EVCNT_TYPE_MISC,
    581 		    NULL, ee->evcntname[i], "RxFragments");
    582 		evcnt_attach_dynamic(&ee->pev[i][4], EVCNT_TYPE_MISC,
    583 		    NULL, ee->evcntname[i], "RxOversize");
    584 		evcnt_attach_dynamic(&ee->pev[i][5], EVCNT_TYPE_MISC,
    585 		    NULL, ee->evcntname[i], "RxJabbers");
    586 		evcnt_attach_dynamic(&ee->pev[i][6], EVCNT_TYPE_MISC,
    587 		    NULL, ee->evcntname[i], "RxSymbolError");
    588 		evcnt_attach_dynamic(&ee->pev[i][7], EVCNT_TYPE_MISC,
    589 		    NULL, ee->evcntname[i], "RxCRCError");
    590 		evcnt_attach_dynamic(&ee->pev[i][8], EVCNT_TYPE_MISC,
    591 		    NULL, ee->evcntname[i], "RxAlignmentError");
    592 		evcnt_attach_dynamic(&ee->pev[i][9], EVCNT_TYPE_MISC,
    593 		    NULL, ee->evcntname[i], "RxControl8808Pkts");
    594 		evcnt_attach_dynamic(&ee->pev[i][10], EVCNT_TYPE_MISC,
    595 		    NULL, ee->evcntname[i], "RxPausePkts");
    596 		evcnt_attach_dynamic(&ee->pev[i][11], EVCNT_TYPE_MISC,
    597 		    NULL, ee->evcntname[i], "RxBroadcast");
    598 		evcnt_attach_dynamic(&ee->pev[i][12], EVCNT_TYPE_MISC,
    599 		    NULL, ee->evcntname[i], "RxMulticast");
    600 		evcnt_attach_dynamic(&ee->pev[i][13], EVCNT_TYPE_MISC,
    601 		    NULL, ee->evcntname[i], "RxUnicast");
    602 		evcnt_attach_dynamic(&ee->pev[i][14], EVCNT_TYPE_MISC,
    603 		    NULL, ee->evcntname[i], "Rx64Octets");
    604 		evcnt_attach_dynamic(&ee->pev[i][15], EVCNT_TYPE_MISC,
    605 		    NULL, ee->evcntname[i], "Rx65To127Octets");
    606 		evcnt_attach_dynamic(&ee->pev[i][16], EVCNT_TYPE_MISC,
    607 		    NULL, ee->evcntname[i], "Rx128To255Octets");
    608 		evcnt_attach_dynamic(&ee->pev[i][17], EVCNT_TYPE_MISC,
    609 		    NULL, ee->evcntname[i], "Rx255To511Octets");
    610 		evcnt_attach_dynamic(&ee->pev[i][18], EVCNT_TYPE_MISC,
    611 		    NULL, ee->evcntname[i], "Rx512To1023Octets");
    612 		evcnt_attach_dynamic(&ee->pev[i][19], EVCNT_TYPE_MISC,
    613 		    NULL, ee->evcntname[i], "Rx1024To1522Octets");
    614 		evcnt_attach_dynamic(&ee->pev[i][20], EVCNT_TYPE_MISC,
    615 		    NULL, ee->evcntname[i], "TxLoPriotyByte");
    616 		evcnt_attach_dynamic(&ee->pev[i][21], EVCNT_TYPE_MISC,
    617 		    NULL, ee->evcntname[i], "TxHiPriotyByte");
    618 		evcnt_attach_dynamic(&ee->pev[i][22], EVCNT_TYPE_MISC,
    619 		    NULL, ee->evcntname[i], "TxLateCollision");
    620 		evcnt_attach_dynamic(&ee->pev[i][23], EVCNT_TYPE_MISC,
    621 		    NULL, ee->evcntname[i], "TxPausePkts");
    622 		evcnt_attach_dynamic(&ee->pev[i][24], EVCNT_TYPE_MISC,
    623 		    NULL, ee->evcntname[i], "TxBroadcastPkts");
    624 		evcnt_attach_dynamic(&ee->pev[i][25], EVCNT_TYPE_MISC,
    625 		    NULL, ee->evcntname[i], "TxMulticastPkts");
    626 		evcnt_attach_dynamic(&ee->pev[i][26], EVCNT_TYPE_MISC,
    627 		    NULL, ee->evcntname[i], "TxUnicastPkts");
    628 		evcnt_attach_dynamic(&ee->pev[i][27], EVCNT_TYPE_MISC,
    629 		    NULL, ee->evcntname[i], "TxDeferred");
    630 		evcnt_attach_dynamic(&ee->pev[i][28], EVCNT_TYPE_MISC,
    631 		    NULL, ee->evcntname[i], "TxTotalCollision");
    632 		evcnt_attach_dynamic(&ee->pev[i][29], EVCNT_TYPE_MISC,
    633 		    NULL, ee->evcntname[i], "TxExcessiveCollision");
    634 		evcnt_attach_dynamic(&ee->pev[i][30], EVCNT_TYPE_MISC,
    635 		    NULL, ee->evcntname[i], "TxSingleCollision");
    636 		evcnt_attach_dynamic(&ee->pev[i][31], EVCNT_TYPE_MISC,
    637 		    NULL, ee->evcntname[i], "TxMultipleCollision");
    638 		evcnt_attach_dynamic(&ee->pev[i][32], EVCNT_TYPE_MISC,
    639 		    NULL, ee->evcntname[i], "TxDropPkts");
    640 		evcnt_attach_dynamic(&ee->pev[i][33], EVCNT_TYPE_MISC,
    641 		    NULL, ee->evcntname[i], "RxDropPkts");
    642 	}
    643 	callout_init(&sc->sc_stat_ch, 0);
    644 	callout_setfunc(&sc->sc_stat_ch, stat_tick, sc);
    645 #endif
    646 	return;
    647 
    648  fail_5:
    649 	for (i = 0; i < KSE_NRXDESC; i++) {
    650 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
    651 			bus_dmamap_destroy(sc->sc_dmat,
    652 			    sc->sc_rxsoft[i].rxs_dmamap);
    653 	}
    654  fail_4:
    655 	for (i = 0; i < KSE_TXQUEUELEN; i++) {
    656 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
    657 			bus_dmamap_destroy(sc->sc_dmat,
    658 			    sc->sc_txsoft[i].txs_dmamap);
    659 	}
    660 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
    661  fail_3:
    662 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
    663  fail_2:
    664 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
    665 	    sizeof(struct kse_control_data));
    666  fail_1:
    667 	bus_dmamem_free(sc->sc_dmat, &seg, nseg);
    668  fail_0:
    669 	pci_intr_disestablish(pc, sc->sc_ih);
    670  fail:
    671 	bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_memsize);
    672 	return;
    673 }
    674 
    675 static int
    676 kse_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    677 {
    678 	struct kse_softc *sc = ifp->if_softc;
    679 	struct ifreq *ifr = (struct ifreq *)data;
    680 	struct ifmedia *ifm;
    681 	int s, error;
    682 
    683 	s = splnet();
    684 
    685 	switch (cmd) {
    686 	case SIOCSIFMEDIA:
    687 		/* Flow control requires full-duplex mode. */
    688 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
    689 		    (ifr->ifr_media & IFM_FDX) == 0)
    690 			ifr->ifr_media &= ~IFM_ETH_FMASK;
    691 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
    692 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
    693 				/* We can do both TXPAUSE and RXPAUSE. */
    694 				ifr->ifr_media |=
    695 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
    696 			}
    697 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
    698 		}
    699 		ifm = (sc->sc_chip == 0x8841)
    700 		    ? &sc->sc_mii.mii_media : &sc->sc_media;
    701 		error = ifmedia_ioctl(ifp, ifr, ifm, cmd);
    702 		break;
    703 	default:
    704 		error = ether_ioctl(ifp, cmd, data);
    705 		if (error != ENETRESET)
    706 			break;
    707 		error = 0;
    708 		if (cmd == SIOCSIFCAP)
    709 			error = (*ifp->if_init)(ifp);
    710 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
    711 			;
    712 		else if (ifp->if_flags & IFF_RUNNING) {
    713 			/*
    714 			 * Multicast list has changed; set the hardware filter
    715 			 * accordingly.
    716 			 */
    717 			kse_set_rcvfilt(sc);
    718 		}
    719 		break;
    720 	}
    721 
    722 	splx(s);
    723 
    724 	return error;
    725 }
    726 
    727 static int
    728 kse_init(struct ifnet *ifp)
    729 {
    730 	struct kse_softc *sc = ifp->if_softc;
    731 	uint32_t paddr;
    732 	int i, error = 0;
    733 
    734 	/* cancel pending I/O */
    735 	kse_stop(ifp, 0);
    736 
    737 	/* reset all registers but PCI configuration */
    738 	kse_reset(sc);
    739 
    740 	/* craft Tx descriptor ring */
    741 	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
    742 	for (i = 0, paddr = KSE_CDTXADDR(sc, 1); i < KSE_NTXDESC - 1; i++) {
    743 		sc->sc_txdescs[i].t3 = paddr;
    744 		paddr += sizeof(struct tdes);
    745 	}
    746 	sc->sc_txdescs[KSE_NTXDESC - 1].t3 = KSE_CDTXADDR(sc, 0);
    747 	KSE_CDTXSYNC(sc, 0, KSE_NTXDESC,
    748 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    749 	sc->sc_txfree = KSE_NTXDESC;
    750 	sc->sc_txnext = 0;
    751 
    752 	for (i = 0; i < KSE_TXQUEUELEN; i++)
    753 		sc->sc_txsoft[i].txs_mbuf = NULL;
    754 	sc->sc_txsfree = KSE_TXQUEUELEN;
    755 	sc->sc_txsnext = 0;
    756 	sc->sc_txsdirty = 0;
    757 
    758 	/* craft Rx descriptor ring */
    759 	memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs));
    760 	for (i = 0, paddr = KSE_CDRXADDR(sc, 1); i < KSE_NRXDESC - 1; i++) {
    761 		sc->sc_rxdescs[i].r3 = paddr;
    762 		paddr += sizeof(struct rdes);
    763 	}
    764 	sc->sc_rxdescs[KSE_NRXDESC - 1].r3 = KSE_CDRXADDR(sc, 0);
    765 	for (i = 0; i < KSE_NRXDESC; i++) {
    766 		if (sc->sc_rxsoft[i].rxs_mbuf == NULL) {
    767 			if ((error = add_rxbuf(sc, i)) != 0) {
    768 				aprint_error_dev(sc->sc_dev,
    769 				    "unable to allocate or map rx "
    770 				    "buffer %d, error = %d\n",
    771 				    i, error);
    772 				rxdrain(sc);
    773 				goto out;
    774 			}
    775 		}
    776 		else
    777 			KSE_INIT_RXDESC(sc, i);
    778 	}
    779 	sc->sc_rxptr = 0;
    780 
    781 	/* hand Tx/Rx rings to HW */
    782 	CSR_WRITE_4(sc, TDLB, KSE_CDTXADDR(sc, 0));
    783 	CSR_WRITE_4(sc, RDLB, KSE_CDRXADDR(sc, 0));
    784 
    785 	sc->sc_txc = TXC_TEN | TXC_EP | TXC_AC;
    786 	sc->sc_rxc = RXC_REN | RXC_RU | RXC_RB;
    787 	sc->sc_t1csum = sc->sc_mcsum = 0;
    788 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) {
    789 		sc->sc_rxc |= RXC_ICC;
    790 		sc->sc_mcsum |= M_CSUM_IPv4;
    791 	}
    792 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Tx) {
    793 		sc->sc_txc |= TXC_ICG;
    794 		sc->sc_t1csum |= T1_IPCKG;
    795 	}
    796 	if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx) {
    797 		sc->sc_rxc |= RXC_TCC;
    798 		sc->sc_mcsum |= M_CSUM_TCPv4;
    799 	}
    800 	if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Tx) {
    801 		sc->sc_txc |= TXC_TCG;
    802 		sc->sc_t1csum |= T1_TCPCKG;
    803 	}
    804 	if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx) {
    805 		sc->sc_rxc |= RXC_UCC;
    806 		sc->sc_mcsum |= M_CSUM_UDPv4;
    807 	}
    808 	if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Tx) {
    809 		sc->sc_txc |= TXC_UCG;
    810 		sc->sc_t1csum |= T1_UDPCKG;
    811 	}
    812 	sc->sc_txc |= (kse_burstsize << TXC_BS_SFT);
    813 	sc->sc_rxc |= (kse_burstsize << RXC_BS_SFT);
    814 
    815 	if (sc->sc_chip == 0x8842) {
    816 		sc->sc_txc |= TXC_FCE;
    817 		sc->sc_rxc |= RXC_FCE;
    818 		CSR_WRITE_2(sc, SGCR3,
    819 		    CSR_READ_2(sc, SGCR3) | CR3_USEFC);
    820 	}
    821 
    822 	/* accept multicast frame or run promisc mode */
    823 	kse_set_rcvfilt(sc);
    824 
    825 	/* set current media */
    826 	if (sc->sc_chip == 0x8841)
    827 		(void)kse_ifmedia_upd(ifp);
    828 
    829 	/* enable transmitter and receiver */
    830 	CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
    831 	CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
    832 	CSR_WRITE_4(sc, MDRSC, 1);
    833 
    834 	/* enable interrupts */
    835 	sc->sc_inten = INT_DMTS | INT_DMRS | INT_DMRBUS;
    836 	if (sc->sc_chip == 0x8841)
    837 		sc->sc_inten |= INT_DMLCS;
    838 	CSR_WRITE_4(sc, INTST, ~0);
    839 	CSR_WRITE_4(sc, INTEN, sc->sc_inten);
    840 
    841 	ifp->if_flags |= IFF_RUNNING;
    842 	ifp->if_flags &= ~IFF_OACTIVE;
    843 
    844 	if (sc->sc_chip == 0x8841) {
    845 		/* start one second timer */
    846 		callout_schedule(&sc->sc_tick_ch, hz);
    847 	}
    848 #ifdef KSE_EVENT_COUNTERS
    849 	/* start statistics gather 1 minute timer. should be tunable */
    850 	zerostats(sc);
    851 	callout_schedule(&sc->sc_stat_ch, hz * 60);
    852 #endif
    853 
    854  out:
    855 	if (error) {
    856 		ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
    857 		ifp->if_timer = 0;
    858 		aprint_error_dev(sc->sc_dev, "interface not running\n");
    859 	}
    860 	return error;
    861 }
    862 
    863 static void
    864 kse_stop(struct ifnet *ifp, int disable)
    865 {
    866 	struct kse_softc *sc = ifp->if_softc;
    867 	struct kse_txsoft *txs;
    868 	int i;
    869 
    870 	if (sc->sc_chip == 0x8841)
    871 		callout_stop(&sc->sc_tick_ch);
    872 	callout_stop(&sc->sc_stat_ch);
    873 
    874 	sc->sc_txc &= ~TXC_TEN;
    875 	sc->sc_rxc &= ~RXC_REN;
    876 	CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
    877 	CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
    878 
    879 	for (i = 0; i < KSE_TXQUEUELEN; i++) {
    880 		txs = &sc->sc_txsoft[i];
    881 		if (txs->txs_mbuf != NULL) {
    882 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
    883 			m_freem(txs->txs_mbuf);
    884 			txs->txs_mbuf = NULL;
    885 		}
    886 	}
    887 
    888 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
    889 	ifp->if_timer = 0;
    890 
    891 	if (disable)
    892 		rxdrain(sc);
    893 }
    894 
    895 static void
    896 kse_reset(struct kse_softc *sc)
    897 {
    898 
    899 	/* software reset */
    900 	CSR_WRITE_2(sc, GRR, 1);
    901 	delay(1000); /* PDF does not mention the delay amount */
    902 	CSR_WRITE_2(sc, GRR, 0);
    903 
    904 	/* enable switch function */
    905 	CSR_WRITE_2(sc, SIDER, 1);
    906 }
    907 
    908 static void
    909 kse_watchdog(struct ifnet *ifp)
    910 {
    911 	struct kse_softc *sc = ifp->if_softc;
    912 
    913 	/*
    914 	 * Since we're not interrupting every packet, sweep
    915 	 * up before we report an error.
    916 	 */
    917 	txreap(sc);
    918 
    919 	if (sc->sc_txfree != KSE_NTXDESC) {
    920 		aprint_error_dev(sc->sc_dev,
    921 		    "device timeout (txfree %d txsfree %d txnext %d)\n",
    922 		    sc->sc_txfree, sc->sc_txsfree, sc->sc_txnext);
    923 		if_statinc(ifp, if_oerrors);
    924 
    925 		/* Reset the interface. */
    926 		kse_init(ifp);
    927 	}
    928 	else if (ifp->if_flags & IFF_DEBUG)
    929 		aprint_error_dev(sc->sc_dev, "recovered from device timeout\n");
    930 
    931 	/* Try to get more packets going. */
    932 	kse_start(ifp);
    933 }
    934 
    935 static void
    936 kse_start(struct ifnet *ifp)
    937 {
    938 	struct kse_softc *sc = ifp->if_softc;
    939 	struct mbuf *m0, *m;
    940 	struct kse_txsoft *txs;
    941 	bus_dmamap_t dmamap;
    942 	int error, nexttx, lasttx, ofree, seg;
    943 	uint32_t tdes0;
    944 
    945 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
    946 		return;
    947 
    948 	/* Remember the previous number of free descriptors. */
    949 	ofree = sc->sc_txfree;
    950 
    951 	/*
    952 	 * Loop through the send queue, setting up transmit descriptors
    953 	 * until we drain the queue, or use up all available transmit
    954 	 * descriptors.
    955 	 */
    956 	for (;;) {
    957 		IFQ_POLL(&ifp->if_snd, m0);
    958 		if (m0 == NULL)
    959 			break;
    960 
    961 		if (sc->sc_txsfree < KSE_TXQUEUE_GC) {
    962 			txreap(sc);
    963 			if (sc->sc_txsfree == 0)
    964 				break;
    965 		}
    966 		txs = &sc->sc_txsoft[sc->sc_txsnext];
    967 		dmamap = txs->txs_dmamap;
    968 
    969 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
    970 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
    971 		if (error) {
    972 			if (error == EFBIG) {
    973 				aprint_error_dev(sc->sc_dev,
    974 				    "Tx packet consumes too many "
    975 				    "DMA segments, dropping...\n");
    976 				    IFQ_DEQUEUE(&ifp->if_snd, m0);
    977 				    m_freem(m0);
    978 				    continue;
    979 			}
    980 			/* Short on resources, just stop for now. */
    981 			break;
    982 		}
    983 
    984 		if (dmamap->dm_nsegs > sc->sc_txfree) {
    985 			/*
    986 			 * Not enough free descriptors to transmit this
    987 			 * packet.  We haven't committed anything yet,
    988 			 * so just unload the DMA map, put the packet
    989 			 * back on the queue, and punt.	 Notify the upper
    990 			 * layer that there are not more slots left.
    991 			 */
    992 			ifp->if_flags |= IFF_OACTIVE;
    993 			bus_dmamap_unload(sc->sc_dmat, dmamap);
    994 			break;
    995 		}
    996 
    997 		IFQ_DEQUEUE(&ifp->if_snd, m0);
    998 
    999 		/*
   1000 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
   1001 		 */
   1002 
   1003 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   1004 		    BUS_DMASYNC_PREWRITE);
   1005 
   1006 		tdes0 = 0; /* to postpone 1st segment T0_OWN write */
   1007 		lasttx = -1;
   1008 		for (nexttx = sc->sc_txnext, seg = 0;
   1009 		     seg < dmamap->dm_nsegs;
   1010 		     seg++, nexttx = KSE_NEXTTX(nexttx)) {
   1011 			struct tdes *tdes = &sc->sc_txdescs[nexttx];
   1012 			/*
   1013 			 * If this is the first descriptor we're
   1014 			 * enqueueing, don't set the OWN bit just
   1015 			 * yet.	 That could cause a race condition.
   1016 			 * We'll do it below.
   1017 			 */
   1018 			tdes->t2 = dmamap->dm_segs[seg].ds_addr;
   1019 			tdes->t1 = sc->sc_t1csum
   1020 			     | (dmamap->dm_segs[seg].ds_len & T1_TBS_MASK);
   1021 			tdes->t0 = tdes0;
   1022 			tdes0 = T0_OWN; /* 2nd and other segments */
   1023 			lasttx = nexttx;
   1024 		}
   1025 		/*
   1026 		 * Outgoing NFS mbuf must be unloaded when Tx completed.
   1027 		 * Without T1_IC NFS mbuf is left unack'ed for excessive
   1028 		 * time and NFS stops to proceed until kse_watchdog()
   1029 		 * calls txreap() to reclaim the unack'ed mbuf.
   1030 		 * It's painful to traverse every mbuf chain to determine
   1031 		 * whether someone is waiting for Tx completion.
   1032 		 */
   1033 		m = m0;
   1034 		do {
   1035 			if ((m->m_flags & M_EXT) && m->m_ext.ext_free) {
   1036 				sc->sc_txdescs[lasttx].t1 |= T1_IC;
   1037 				break;
   1038 			}
   1039 		} while ((m = m->m_next) != NULL);
   1040 
   1041 		/* Write deferred 1st segment T0_OWN at the final stage */
   1042 		sc->sc_txdescs[lasttx].t1 |= T1_LS;
   1043 		sc->sc_txdescs[sc->sc_txnext].t1 |= T1_FS;
   1044 		sc->sc_txdescs[sc->sc_txnext].t0 = T0_OWN;
   1045 		KSE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
   1046 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1047 
   1048 		/* Tell DMA start transmit */
   1049 		CSR_WRITE_4(sc, MDTSC, 1);
   1050 
   1051 		txs->txs_mbuf = m0;
   1052 		txs->txs_firstdesc = sc->sc_txnext;
   1053 		txs->txs_lastdesc = lasttx;
   1054 		txs->txs_ndesc = dmamap->dm_nsegs;
   1055 
   1056 		sc->sc_txfree -= txs->txs_ndesc;
   1057 		sc->sc_txnext = nexttx;
   1058 		sc->sc_txsfree--;
   1059 		sc->sc_txsnext = KSE_NEXTTXS(sc->sc_txsnext);
   1060 		/*
   1061 		 * Pass the packet to any BPF listeners.
   1062 		 */
   1063 		bpf_mtap(ifp, m0, BPF_D_OUT);
   1064 	}
   1065 
   1066 	if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) {
   1067 		/* No more slots left; notify upper layer. */
   1068 		ifp->if_flags |= IFF_OACTIVE;
   1069 	}
   1070 	if (sc->sc_txfree != ofree) {
   1071 		/* Set a watchdog timer in case the chip flakes out. */
   1072 		ifp->if_timer = 5;
   1073 	}
   1074 }
   1075 
   1076 static void
   1077 kse_set_rcvfilt(struct kse_softc *sc)
   1078 {
   1079 	struct ether_multistep step;
   1080 	struct ether_multi *enm;
   1081 	struct ethercom *ec = &sc->sc_ethercom;
   1082 	struct ifnet *ifp = &ec->ec_if;
   1083 	uint32_t crc, mchash[2];
   1084 	int i;
   1085 
   1086 	sc->sc_rxc &= ~(RXC_MHTE | RXC_RM | RXC_RA);
   1087 
   1088 	/* clear perfect match filter and prepare mcast hash table */
   1089 	for (i = 0; i < 16; i++)
   1090 		 CSR_WRITE_4(sc, MAAH0 + i*8, 0);
   1091 	crc = mchash[0] = mchash[1] = 0;
   1092 
   1093 	ETHER_LOCK(ec);
   1094 	if (ifp->if_flags & IFF_PROMISC) {
   1095 		ec->ec_flags |= ETHER_F_ALLMULTI;
   1096 		ETHER_UNLOCK(ec);
   1097 		/* run promisc. mode */
   1098 		sc->sc_rxc |= RXC_RA;
   1099 		goto update;
   1100 	}
   1101 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   1102 	ETHER_FIRST_MULTI(step, ec, enm);
   1103 	i = 0;
   1104 	while (enm != NULL) {
   1105 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   1106 			/*
   1107 			 * We must listen to a range of multicast addresses.
   1108 			 * For now, just accept all multicasts, rather than
   1109 			 * trying to set only those filter bits needed to match
   1110 			 * the range.  (At this time, the only use of address
   1111 			 * ranges is for IP multicast routing, for which the
   1112 			 * range is big enough to require all bits set.)
   1113 			 */
   1114 			ec->ec_flags |= ETHER_F_ALLMULTI;
   1115 			ETHER_UNLOCK(ec);
   1116 			/* accept all multicast */
   1117 			sc->sc_rxc |= RXC_RM;
   1118 			goto update;
   1119 		}
   1120 #if KSE_MCASTDEBUG == 1
   1121 		printf("[%d] %s\n", i, ether_sprintf(enm->enm_addrlo));
   1122 #endif
   1123 		if (i < 16) {
   1124 			/* use 16 additional MAC addr to accept mcast */
   1125 			uint32_t addr;
   1126 			uint8_t *ep = enm->enm_addrlo;
   1127 			addr = (ep[3] << 24) | (ep[2] << 16)
   1128 			     | (ep[1] << 8)  |  ep[0];
   1129 			CSR_WRITE_4(sc, MAAL0 + i*8, addr);
   1130 			addr = (ep[5] << 8) | ep[4];
   1131 			CSR_WRITE_4(sc, MAAH0 + i*8, addr | (1U << 31));
   1132 		} else {
   1133 			/* use hash table when too many */
   1134 			crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
   1135 			mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
   1136 		}
   1137 		ETHER_NEXT_MULTI(step, enm);
   1138 		i++;
   1139 	}
   1140 	ETHER_UNLOCK(ec);
   1141 
   1142 	if (crc)
   1143 		sc->sc_rxc |= RXC_MHTE;
   1144 	CSR_WRITE_4(sc, MTR0, mchash[0]);
   1145 	CSR_WRITE_4(sc, MTR1, mchash[1]);
   1146  update:
   1147 	/* With RA or RM, MHTE/MTR0/MTR1 are never consulted. */
   1148 	return;
   1149 }
   1150 
   1151 static int
   1152 add_rxbuf(struct kse_softc *sc, int idx)
   1153 {
   1154 	struct kse_rxsoft *rxs = &sc->sc_rxsoft[idx];
   1155 	struct mbuf *m;
   1156 	int error;
   1157 
   1158 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   1159 	if (m == NULL)
   1160 		return ENOBUFS;
   1161 
   1162 	MCLGET(m, M_DONTWAIT);
   1163 	if ((m->m_flags & M_EXT) == 0) {
   1164 		m_freem(m);
   1165 		return ENOBUFS;
   1166 	}
   1167 
   1168 	if (rxs->rxs_mbuf != NULL)
   1169 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   1170 
   1171 	rxs->rxs_mbuf = m;
   1172 
   1173 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
   1174 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
   1175 	if (error) {
   1176 		aprint_error_dev(sc->sc_dev,
   1177 		    "can't load rx DMA map %d, error = %d\n", idx, error);
   1178 		panic("kse_add_rxbuf");
   1179 	}
   1180 
   1181 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   1182 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   1183 
   1184 	KSE_INIT_RXDESC(sc, idx);
   1185 
   1186 	return 0;
   1187 }
   1188 
   1189 static void
   1190 rxdrain(struct kse_softc *sc)
   1191 {
   1192 	struct kse_rxsoft *rxs;
   1193 	int i;
   1194 
   1195 	for (i = 0; i < KSE_NRXDESC; i++) {
   1196 		rxs = &sc->sc_rxsoft[i];
   1197 		if (rxs->rxs_mbuf != NULL) {
   1198 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   1199 			m_freem(rxs->rxs_mbuf);
   1200 			rxs->rxs_mbuf = NULL;
   1201 		}
   1202 	}
   1203 }
   1204 
   1205 static int
   1206 kse_intr(void *arg)
   1207 {
   1208 	struct kse_softc *sc = arg;
   1209 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1210 	uint32_t isr;
   1211 
   1212 	if ((isr = CSR_READ_4(sc, INTST)) == 0)
   1213 		return 0;
   1214 
   1215 	if (isr & INT_DMRS)
   1216 		rxintr(sc);
   1217 	if (isr & INT_DMTS)
   1218 		txreap(sc);
   1219 	if (isr & INT_DMLCS)
   1220 		lnkchg(sc);
   1221 	if (isr & INT_DMRBUS)
   1222 		aprint_error_dev(sc->sc_dev, "Rx descriptor full\n");
   1223 
   1224 	CSR_WRITE_4(sc, INTST, isr);
   1225 
   1226 	if (ifp->if_flags & IFF_RUNNING)
   1227 		if_schedule_deferred_start(ifp);
   1228 
   1229 	return 1;
   1230 }
   1231 
   1232 static void
   1233 rxintr(struct kse_softc *sc)
   1234 {
   1235 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1236 	struct kse_rxsoft *rxs;
   1237 	struct mbuf *m;
   1238 	uint32_t rxstat;
   1239 	int i, len;
   1240 
   1241 	for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = KSE_NEXTRX(i)) {
   1242 		rxs = &sc->sc_rxsoft[i];
   1243 
   1244 		KSE_CDRXSYNC(sc, i,
   1245 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1246 
   1247 		rxstat = sc->sc_rxdescs[i].r0;
   1248 
   1249 		if (rxstat & R0_OWN) /* desc is left empty */
   1250 			break;
   1251 
   1252 		/* R0_FS | R0_LS must have been marked for this desc */
   1253 
   1254 		if (rxstat & R0_ES) {
   1255 			if_statinc(ifp, if_ierrors);
   1256 #define PRINTERR(bit, str)						\
   1257 			if (rxstat & (bit))				\
   1258 				aprint_error_dev(sc->sc_dev,		\
   1259 				    "%s\n", str)
   1260 			PRINTERR(R0_TL, "frame too long");
   1261 			PRINTERR(R0_RF, "runt frame");
   1262 			PRINTERR(R0_CE, "bad FCS");
   1263 #undef PRINTERR
   1264 			KSE_INIT_RXDESC(sc, i);
   1265 			continue;
   1266 		}
   1267 
   1268 		/* HW errata; frame might be too small or too large */
   1269 
   1270 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   1271 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1272 
   1273 		len = rxstat & R0_FL_MASK;
   1274 		len -= ETHER_CRC_LEN;	/* Trim CRC off */
   1275 		m = rxs->rxs_mbuf;
   1276 
   1277 		if (add_rxbuf(sc, i) != 0) {
   1278 			if_statinc(ifp, if_ierrors);
   1279 			KSE_INIT_RXDESC(sc, i);
   1280 			bus_dmamap_sync(sc->sc_dmat,
   1281 			    rxs->rxs_dmamap, 0,
   1282 			    rxs->rxs_dmamap->dm_mapsize,
   1283 			    BUS_DMASYNC_PREREAD);
   1284 			continue;
   1285 		}
   1286 
   1287 		m_set_rcvif(m, ifp);
   1288 		m->m_pkthdr.len = m->m_len = len;
   1289 
   1290 		if (sc->sc_mcsum) {
   1291 			m->m_pkthdr.csum_flags |= sc->sc_mcsum;
   1292 			if (rxstat & R0_IPE)
   1293 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   1294 			if (rxstat & (R0_TCPE | R0_UDPE))
   1295 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   1296 		}
   1297 		if_percpuq_enqueue(ifp->if_percpuq, m);
   1298 #ifdef KSEDIAGNOSTIC
   1299 		if (kse_monitor_rxintr > 0) {
   1300 			aprint_error_dev(sc->sc_dev,
   1301 			    "m stat %x data %p len %d\n",
   1302 			    rxstat, m->m_data, m->m_len);
   1303 		}
   1304 #endif
   1305 	}
   1306 	sc->sc_rxptr = i;
   1307 }
   1308 
   1309 static void
   1310 txreap(struct kse_softc *sc)
   1311 {
   1312 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1313 	struct kse_txsoft *txs;
   1314 	uint32_t txstat;
   1315 	int i;
   1316 
   1317 	ifp->if_flags &= ~IFF_OACTIVE;
   1318 
   1319 	for (i = sc->sc_txsdirty; sc->sc_txsfree != KSE_TXQUEUELEN;
   1320 	     i = KSE_NEXTTXS(i), sc->sc_txsfree++) {
   1321 		txs = &sc->sc_txsoft[i];
   1322 
   1323 		KSE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
   1324 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1325 
   1326 		txstat = sc->sc_txdescs[txs->txs_lastdesc].t0;
   1327 
   1328 		if (txstat & T0_OWN) /* desc is still in use */
   1329 			break;
   1330 
   1331 		/* There is no way to tell transmission status per frame */
   1332 
   1333 		if_statinc(ifp, if_opackets);
   1334 
   1335 		sc->sc_txfree += txs->txs_ndesc;
   1336 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   1337 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1338 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   1339 		m_freem(txs->txs_mbuf);
   1340 		txs->txs_mbuf = NULL;
   1341 	}
   1342 	sc->sc_txsdirty = i;
   1343 	if (sc->sc_txsfree == KSE_TXQUEUELEN)
   1344 		ifp->if_timer = 0;
   1345 }
   1346 
   1347 static void
   1348 lnkchg(struct kse_softc *sc)
   1349 {
   1350 	struct ifmediareq ifmr;
   1351 
   1352 #if KSE_LINKDEBUG == 1
   1353 	uint16_t p1sr = CSR_READ_2(sc, P1SR);
   1354 printf("link %s detected\n", (p1sr & PxSR_LINKUP) ? "up" : "down");
   1355 #endif
   1356 	kse_ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr);
   1357 }
   1358 
   1359 static int
   1360 kse_ifmedia_upd(struct ifnet *ifp)
   1361 {
   1362 	struct kse_softc *sc = ifp->if_softc;
   1363 	struct ifmedia *ifm = &sc->sc_mii.mii_media;
   1364 	uint16_t p1cr4;
   1365 
   1366 	p1cr4 = 0;
   1367 	if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_AUTO) {
   1368 		p1cr4 |= PxCR_STARTNEG;	/* restart AN */
   1369 		p1cr4 |= PxCR_AUTOEN;	/* enable AN */
   1370 		p1cr4 |= PxCR_USEFC;	/* advertise flow control pause */
   1371 		p1cr4 |= 0xf;		/* adv. 100FDX,100HDX,10FDX,10HDX */
   1372 	} else {
   1373 		if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_100_TX)
   1374 			p1cr4 |= PxCR_SPD100;
   1375 		if (ifm->ifm_media & IFM_FDX)
   1376 			p1cr4 |= PxCR_USEFDX;
   1377 	}
   1378 	CSR_WRITE_2(sc, P1CR4, p1cr4);
   1379 #if KSE_LINKDEBUG == 1
   1380 printf("P1CR4: %04x\n", p1cr4);
   1381 #endif
   1382 	return 0;
   1383 }
   1384 
   1385 static void
   1386 kse_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
   1387 {
   1388 	struct kse_softc *sc = ifp->if_softc;
   1389 	struct mii_data *mii = &sc->sc_mii;
   1390 
   1391 	mii_pollstat(mii);
   1392 	ifmr->ifm_status = mii->mii_media_status;
   1393 	ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
   1394 	    sc->sc_flowflags;
   1395 }
   1396 
   1397 static void
   1398 nopifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
   1399 {
   1400 	struct kse_softc *sc = ifp->if_softc;
   1401 	struct ifmedia *ifm = &sc->sc_media;
   1402 
   1403 #if KSE_LINKDEBUG == 2
   1404 printf("p1sr: %04x, p2sr: %04x\n", CSR_READ_2(sc, P1SR), CSR_READ_2(sc, P2SR));
   1405 #endif
   1406 
   1407 	/* 8842 MAC pretends 100FDX all the time */
   1408 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
   1409 	ifmr->ifm_active = ifm->ifm_cur->ifm_media |
   1410 	    IFM_FLOW | IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE;
   1411 }
   1412 
   1413 static void
   1414 phy_tick(void *arg)
   1415 {
   1416 	struct kse_softc *sc = arg;
   1417 	struct mii_data *mii = &sc->sc_mii;
   1418 	int s;
   1419 
   1420 	s = splnet();
   1421 	mii_tick(mii);
   1422 	splx(s);
   1423 
   1424 	callout_schedule(&sc->sc_tick_ch, hz);
   1425 }
   1426 
   1427 static const uint16_t phy1csr[] = {
   1428 	/* 0 BMCR */	0x4d0,
   1429 	/* 1 BMSR */	0x4d2,
   1430 	/* 2 PHYID1 */	0x4d6,	/* 0x0022 - PHY1HR */
   1431 	/* 3 PHYID2 */	0x4d4,	/* 0x1430 - PHY1LR */
   1432 	/* 4 ANAR */	0x4d8,
   1433 	/* 5 ANLPAR */	0x4da,
   1434 };
   1435 
   1436 int
   1437 kse_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
   1438 {
   1439 	struct kse_softc *sc = device_private(self);
   1440 
   1441 	if (phy != 1 || reg >= __arraycount(phy1csr) || reg < 0)
   1442 		return EINVAL;
   1443 	*val = CSR_READ_2(sc, phy1csr[reg]);
   1444 	return 0;
   1445 }
   1446 
   1447 int
   1448 kse_mii_writereg(device_t self, int phy, int reg, uint16_t val)
   1449 {
   1450 	struct kse_softc *sc = device_private(self);
   1451 
   1452 	if (phy != 1 || reg >= __arraycount(phy1csr) || reg < 0)
   1453 		return EINVAL;
   1454 	CSR_WRITE_2(sc, phy1csr[reg], val);
   1455 	return 0;
   1456 }
   1457 
   1458 void
   1459 kse_mii_statchg(struct ifnet *ifp)
   1460 {
   1461 	struct kse_softc *sc = ifp->if_softc;
   1462 	struct mii_data *mii = &sc->sc_mii;
   1463 
   1464 #if KSE_LINKDEBUG == 1
   1465 	/* decode P1SR register value */
   1466 	uint16_t p1sr = CSR_READ_2(sc, P1SR);
   1467 	printf("P1SR %04x, spd%d", p1sr, (p1sr & PxSR_SPD100) ? 100 : 10);
   1468 	if (p1sr & PxSR_FDX)
   1469 		printf(",full-duplex");
   1470 	if (p1sr & PxSR_RXFLOW)
   1471 		printf(",rxpause");
   1472 	if (p1sr & PxSR_TXFLOW)
   1473 		printf(",txpause");
   1474 	printf("\n");
   1475 	/* show resolved mii(4) parameters to compare against above */
   1476 	printf("MII spd%d",
   1477 	    (int)(sc->sc_ethercom.ec_if.if_baudrate / IF_Mbps(1)));
   1478 	if (mii->mii_media_active & IFM_FDX)
   1479 		printf(",full-duplex");
   1480 	if (mii->mii_media_active & IFM_FLOW) {
   1481 		printf(",flowcontrol");
   1482 		if (mii->mii_media_active & IFM_ETH_RXPAUSE)
   1483 			printf(",rxpause");
   1484 		if (mii->mii_media_active & IFM_ETH_TXPAUSE)
   1485 			printf(",txpause");
   1486 	}
   1487 	printf("\n");
   1488 #endif
   1489 	/* Get flow control negotiation result. */
   1490 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   1491 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags)
   1492 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   1493 
   1494 	/* Adjust MAC PAUSE flow control. */
   1495 	if ((mii->mii_media_active & IFM_FDX)
   1496 	    && (sc->sc_flowflags & IFM_ETH_TXPAUSE))
   1497 		sc->sc_txc |= TXC_FCE;
   1498 	else
   1499 		sc->sc_txc &= ~TXC_FCE;
   1500 	if ((mii->mii_media_active & IFM_FDX)
   1501 	    && (sc->sc_flowflags & IFM_ETH_RXPAUSE))
   1502 		sc->sc_rxc |= RXC_FCE;
   1503 	else
   1504 		sc->sc_rxc &= ~RXC_FCE;
   1505 	CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
   1506 	CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
   1507 #if KSE_LINKDEBUG == 1
   1508 	printf("%ctxfce, %crxfce\n",
   1509 	    (sc->sc_txc & TXC_FCE) ? '+' : '-',
   1510 	    (sc->sc_rxc & RXC_FCE) ? '+' : '-');
   1511 #endif
   1512 }
   1513 
   1514 #ifdef KSE_EVENT_COUNTERS
   1515 static void
   1516 stat_tick(void *arg)
   1517 {
   1518 	struct kse_softc *sc = arg;
   1519 	struct ksext *ee = &sc->sc_ext;
   1520 	int nport, p, i, val;
   1521 
   1522 	nport = (sc->sc_chip == 0x8842) ? 3 : 1;
   1523 	for (p = 0; p < nport; p++) {
   1524 		for (i = 0; i < 32; i++) {
   1525 			val = 0x1c00 | (p * 0x20 + i);
   1526 			CSR_WRITE_2(sc, IACR, val);
   1527 			do {
   1528 				val = CSR_READ_2(sc, IADR5) << 16;
   1529 			} while ((val & (1U << 30)) == 0);
   1530 			if (val & (1U << 31)) {
   1531 				(void)CSR_READ_2(sc, IADR4);
   1532 				val = 0x3fffffff; /* has made overflow */
   1533 			}
   1534 			else {
   1535 				val &= 0x3fff0000;		/* 29:16 */
   1536 				val |= CSR_READ_2(sc, IADR4);	/* 15:0 */
   1537 			}
   1538 			ee->pev[p][i].ev_count += val; /* i (0-31) */
   1539 		}
   1540 		CSR_WRITE_2(sc, IACR, 0x1c00 + 0x100 + p);
   1541 		ee->pev[p][32].ev_count = CSR_READ_2(sc, IADR4); /* 32 */
   1542 		CSR_WRITE_2(sc, IACR, 0x1c00 + 0x100 + p * 3 + 1);
   1543 		ee->pev[p][33].ev_count = CSR_READ_2(sc, IADR4); /* 33 */
   1544 	}
   1545 	callout_schedule(&sc->sc_stat_ch, hz * 60);
   1546 }
   1547 
   1548 static void
   1549 zerostats(struct kse_softc *sc)
   1550 {
   1551 	struct ksext *ee = &sc->sc_ext;
   1552 	int nport, p, i, val;
   1553 
   1554 	/* Make sure all the HW counters get zero */
   1555 	nport = (sc->sc_chip == 0x8842) ? 3 : 1;
   1556 	for (p = 0; p < nport; p++) {
   1557 		for (i = 0; i < 31; i++) {
   1558 			val = 0x1c00 | (p * 0x20 + i);
   1559 			CSR_WRITE_2(sc, IACR, val);
   1560 			do {
   1561 				val = CSR_READ_2(sc, IADR5) << 16;
   1562 			} while ((val & (1U << 30)) == 0);
   1563 			(void)CSR_READ_2(sc, IADR4);
   1564 			ee->pev[p][i].ev_count = 0;
   1565 		}
   1566 	}
   1567 }
   1568 #endif
   1569