Home | History | Annotate | Line # | Download | only in hpc
if_sq.c revision 1.33
      1 /*	$NetBSD: if_sq.c,v 1.33 2007/03/04 06:00:39 christos Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001 Rafal K. Boni
      5  * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
      6  * All rights reserved.
      7  *
      8  * Portions of this code are derived from software contributed to The
      9  * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace
     10  * Simulation Facility, NASA Ames Research Center.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  */
     34 
     35 #include <sys/cdefs.h>
     36 __KERNEL_RCSID(0, "$NetBSD: if_sq.c,v 1.33 2007/03/04 06:00:39 christos Exp $");
     37 
     38 #include "bpfilter.h"
     39 
     40 #include <sys/param.h>
     41 #include <sys/systm.h>
     42 #include <sys/device.h>
     43 #include <sys/callout.h>
     44 #include <sys/mbuf.h>
     45 #include <sys/malloc.h>
     46 #include <sys/kernel.h>
     47 #include <sys/socket.h>
     48 #include <sys/ioctl.h>
     49 #include <sys/errno.h>
     50 #include <sys/syslog.h>
     51 
     52 #include <uvm/uvm_extern.h>
     53 
     54 #include <machine/endian.h>
     55 
     56 #include <net/if.h>
     57 #include <net/if_dl.h>
     58 #include <net/if_media.h>
     59 #include <net/if_ether.h>
     60 
     61 #if NBPFILTER > 0
     62 #include <net/bpf.h>
     63 #endif
     64 
     65 #include <machine/bus.h>
     66 #include <machine/intr.h>
     67 #include <machine/sysconf.h>
     68 
     69 #include <dev/ic/seeq8003reg.h>
     70 
     71 #include <sgimips/hpc/sqvar.h>
     72 #include <sgimips/hpc/hpcvar.h>
     73 #include <sgimips/hpc/hpcreg.h>
     74 
     75 #include <dev/arcbios/arcbios.h>
     76 #include <dev/arcbios/arcbiosvar.h>
     77 
     78 #define static
     79 
     80 /*
     81  * Short TODO list:
     82  *	(1) Do counters for bad-RX packets.
     83  *	(2) Allow multi-segment transmits, instead of copying to a single,
     84  *	    contiguous mbuf.
     85  *	(3) Verify sq_stop() turns off enough stuff; I was still getting
     86  *	    seeq interrupts after sq_stop().
     87  *	(4) Implement EDLC modes: especially packet auto-pad and simplex
     88  *	    mode.
     89  *	(5) Should the driver filter out its own transmissions in non-EDLC
     90  *	    mode?
     91  *	(6) Multicast support -- multicast filter, address management, ...
     92  *	(7) Deal with RB0 (recv buffer overflow) on reception.  Will need
     93  *	    to figure out if RB0 is read-only as stated in one spot in the
     94  *	    HPC spec or read-write (ie, is the 'write a one to clear it')
     95  *	    the correct thing?
     96  */
     97 
     98 #if defined(SQ_DEBUG)
     99  int sq_debug = 0;
    100  #define SQ_DPRINTF(x) if (sq_debug) printf x
    101 #else
    102  #define SQ_DPRINTF(x)
    103 #endif
    104 
    105 static int	sq_match(struct device *, struct cfdata *, void *);
    106 static void	sq_attach(struct device *, struct device *, void *);
    107 static int	sq_init(struct ifnet *);
    108 static void	sq_start(struct ifnet *);
    109 static void	sq_stop(struct ifnet *, int);
    110 static void	sq_watchdog(struct ifnet *);
    111 static int	sq_ioctl(struct ifnet *, u_long, void *);
    112 
    113 static void	sq_set_filter(struct sq_softc *);
    114 static int	sq_intr(void *);
    115 static int	sq_rxintr(struct sq_softc *);
    116 static int	sq_txintr(struct sq_softc *);
    117 static void	sq_txring_hpc1(struct sq_softc *);
    118 static void	sq_txring_hpc3(struct sq_softc *);
    119 static void	sq_reset(struct sq_softc *);
    120 static int 	sq_add_rxbuf(struct sq_softc *, int);
    121 static void 	sq_dump_buffer(u_int32_t addr, u_int32_t len);
    122 static void	sq_trace_dump(struct sq_softc *);
    123 
    124 static void	enaddr_aton(const char*, u_int8_t*);
    125 
    126 CFATTACH_DECL(sq, sizeof(struct sq_softc),
    127     sq_match, sq_attach, NULL, NULL);
    128 
    129 #define        ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
    130 
    131 #define sq_seeq_read(sc, off) \
    132 	bus_space_read_1(sc->sc_regt, sc->sc_regh, off)
    133 #define sq_seeq_write(sc, off, val) \
    134 	bus_space_write_1(sc->sc_regt, sc->sc_regh, off, val)
    135 
    136 #define sq_hpc_read(sc, off) \
    137 	bus_space_read_4(sc->sc_hpct, sc->sc_hpch, off)
    138 #define sq_hpc_write(sc, off, val) \
    139 	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, off, val)
    140 
    141 /* MAC address offset for non-onboard implementations */
    142 #define SQ_HPC_EEPROM_ENADDR	250
    143 
    144 #define SGI_OUI_0		0x08
    145 #define SGI_OUI_1		0x00
    146 #define SGI_OUI_2		0x69
    147 
    148 static int
    149 sq_match(struct device *parent, struct cfdata *cf, void *aux)
    150 {
    151 	struct hpc_attach_args *ha = aux;
    152 
    153 	if (strcmp(ha->ha_name, cf->cf_name) == 0) {
    154 		uint32_t reset, txstat;
    155 
    156 		reset = MIPS_PHYS_TO_KSEG1(ha->ha_sh +
    157 		    ha->ha_dmaoff + ha->hpc_regs->enetr_reset);
    158 		txstat = MIPS_PHYS_TO_KSEG1(ha->ha_sh +
    159 		    ha->ha_devoff + (SEEQ_TXSTAT << 2));
    160 
    161 		if (platform.badaddr((void *)reset, sizeof(reset)))
    162 			return (0);
    163 
    164 		*(volatile uint32_t *)reset = 0x1;
    165 		delay(20);
    166 		*(volatile uint32_t *)reset = 0x0;
    167 
    168 		if (platform.badaddr((void *)txstat, sizeof(txstat)))
    169 			return (0);
    170 
    171 		if ((*(volatile uint32_t *)txstat & 0xff) == TXSTAT_OLDNEW)
    172 			return (1);
    173 	}
    174 
    175 	return (0);
    176 }
    177 
    178 static void
    179 sq_attach(struct device *parent, struct device *self, void *aux)
    180 {
    181 	int i, err;
    182 	const char* macaddr;
    183 	struct sq_softc *sc = (void *)self;
    184 	struct hpc_attach_args *haa = aux;
    185 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    186 
    187 	sc->sc_hpct = haa->ha_st;
    188 	sc->hpc_regs = haa->hpc_regs;      /* HPC register definitions */
    189 
    190 	if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
    191 				       haa->ha_dmaoff,
    192 				       sc->hpc_regs->enet_regs_size,
    193 				       &sc->sc_hpch)) != 0) {
    194 		printf(": unable to map HPC DMA registers, error = %d\n", err);
    195 		goto fail_0;
    196 	}
    197 
    198 	sc->sc_regt = haa->ha_st;
    199 	if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
    200 				       haa->ha_devoff,
    201 				       sc->hpc_regs->enet_devregs_size,
    202 				       &sc->sc_regh)) != 0) {
    203 		printf(": unable to map Seeq registers, error = %d\n", err);
    204 		goto fail_0;
    205 	}
    206 
    207 	sc->sc_dmat = haa->ha_dmat;
    208 
    209 	if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control),
    210 				    PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg,
    211 				    1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) {
    212 		printf(": unable to allocate control data, error = %d\n", err);
    213 		goto fail_0;
    214 	}
    215 
    216 	if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg,
    217 				  sizeof(struct sq_control),
    218 				  (void **)&sc->sc_control,
    219 				  BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
    220 		printf(": unable to map control data, error = %d\n", err);
    221 		goto fail_1;
    222 	}
    223 
    224 	if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control),
    225 				     1, sizeof(struct sq_control), PAGE_SIZE,
    226 				     BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) {
    227 		printf(": unable to create DMA map for control data, error "
    228 			"= %d\n", err);
    229 		goto fail_2;
    230 	}
    231 
    232 	if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control,
    233 				   sizeof(struct sq_control),
    234 				   NULL, BUS_DMA_NOWAIT)) != 0) {
    235 		printf(": unable to load DMA map for control data, error "
    236 			"= %d\n", err);
    237 		goto fail_3;
    238 	}
    239 
    240 	memset(sc->sc_control, 0, sizeof(struct sq_control));
    241 
    242 	/* Create transmit buffer DMA maps */
    243 	for (i = 0; i < SQ_NTXDESC; i++) {
    244 	    if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
    245 					 0, BUS_DMA_NOWAIT,
    246 					 &sc->sc_txmap[i])) != 0) {
    247 		    printf(": unable to create tx DMA map %d, error = %d\n",
    248 			   i, err);
    249 		    goto fail_4;
    250 	    }
    251 	}
    252 
    253 	/* Create receive buffer DMA maps */
    254 	for (i = 0; i < SQ_NRXDESC; i++) {
    255 	    if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
    256 					 0, BUS_DMA_NOWAIT,
    257 					 &sc->sc_rxmap[i])) != 0) {
    258 		    printf(": unable to create rx DMA map %d, error = %d\n",
    259 			   i, err);
    260 		    goto fail_5;
    261 	    }
    262 	}
    263 
    264 	/* Pre-allocate the receive buffers.  */
    265 	for (i = 0; i < SQ_NRXDESC; i++) {
    266 		if ((err = sq_add_rxbuf(sc, i)) != 0) {
    267 			printf(": unable to allocate or map rx buffer %d\n,"
    268 			       " error = %d\n", i, err);
    269 			goto fail_6;
    270 		}
    271 	}
    272 
    273 	memcpy(sc->sc_enaddr, &haa->hpc_eeprom[SQ_HPC_EEPROM_ENADDR],
    274 	    ETHER_ADDR_LEN);
    275 
    276 	/*
    277 	 * If our mac address is bogus, obtain it from ARCBIOS. This will
    278 	 * be true of the onboard HPC3 on IP22, since there is no eeprom,
    279 	 * but rather the DS1386 RTC's battery-backed ram is used.
    280 	 */
    281 	if (sc->sc_enaddr[0] != SGI_OUI_0 || sc->sc_enaddr[1] != SGI_OUI_1 ||
    282 	    sc->sc_enaddr[2] != SGI_OUI_2) {
    283 		macaddr = ARCBIOS->GetEnvironmentVariable("eaddr");
    284 		if (macaddr == NULL) {
    285 			printf(": unable to get MAC address!\n");
    286 			goto fail_6;
    287 		}
    288 		enaddr_aton(macaddr, sc->sc_enaddr);
    289 	}
    290 
    291 	evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL,
    292 					      self->dv_xname, "intr");
    293 
    294 	if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) {
    295 		printf(": unable to establish interrupt!\n");
    296 		goto fail_6;
    297 	}
    298 
    299 	/* Reset the chip to a known state. */
    300 	sq_reset(sc);
    301 
    302 	/*
    303 	 * Determine if we're an 8003 or 80c03 by setting the first
    304 	 * MAC address register to non-zero, and then reading it back.
    305 	 * If it's zero, we have an 80c03, because we will have read
    306 	 * the TxCollLSB register.
    307 	 */
    308 	sq_seeq_write(sc, SEEQ_TXCOLLS0, 0xa5);
    309 	if (sq_seeq_read(sc, SEEQ_TXCOLLS0) == 0)
    310 		sc->sc_type = SQ_TYPE_80C03;
    311 	else
    312 		sc->sc_type = SQ_TYPE_8003;
    313 	sq_seeq_write(sc, SEEQ_TXCOLLS0, 0x00);
    314 
    315 	printf(": SGI Seeq %s\n",
    316 	    sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003");
    317 
    318 	printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
    319 					   ether_sprintf(sc->sc_enaddr));
    320 
    321 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
    322 	ifp->if_softc = sc;
    323 	ifp->if_mtu = ETHERMTU;
    324 	ifp->if_init = sq_init;
    325 	ifp->if_stop = sq_stop;
    326 	ifp->if_start = sq_start;
    327 	ifp->if_ioctl = sq_ioctl;
    328 	ifp->if_watchdog = sq_watchdog;
    329 	ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST;
    330 	IFQ_SET_READY(&ifp->if_snd);
    331 
    332 	if_attach(ifp);
    333 	ether_ifattach(ifp, sc->sc_enaddr);
    334 
    335 	memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
    336 	/* Done! */
    337 	return;
    338 
    339 	/*
    340 	 * Free any resources we've allocated during the failed attach
    341 	 * attempt.  Do this in reverse order and fall through.
    342 	 */
    343 fail_6:
    344 	for (i = 0; i < SQ_NRXDESC; i++) {
    345 		if (sc->sc_rxmbuf[i] != NULL) {
    346 			bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]);
    347 			m_freem(sc->sc_rxmbuf[i]);
    348 		}
    349 	}
    350 fail_5:
    351 	for (i = 0; i < SQ_NRXDESC; i++) {
    352 	    if (sc->sc_rxmap[i] != NULL)
    353 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]);
    354 	}
    355 fail_4:
    356 	for (i = 0; i < SQ_NTXDESC; i++) {
    357 	    if (sc->sc_txmap[i] !=  NULL)
    358 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]);
    359 	}
    360 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap);
    361 fail_3:
    362 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap);
    363 fail_2:
    364 	bus_dmamem_unmap(sc->sc_dmat, (void *) sc->sc_control,
    365 				      sizeof(struct sq_control));
    366 fail_1:
    367 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg);
    368 fail_0:
    369 	return;
    370 }
    371 
    372 /* Set up data to get the interface up and running. */
    373 int
    374 sq_init(struct ifnet *ifp)
    375 {
    376 	int i;
    377 	struct sq_softc *sc = ifp->if_softc;
    378 
    379 	/* Cancel any in-progress I/O */
    380 	sq_stop(ifp, 0);
    381 
    382 	sc->sc_nextrx = 0;
    383 
    384 	sc->sc_nfreetx = SQ_NTXDESC;
    385 	sc->sc_nexttx = sc->sc_prevtx = 0;
    386 
    387 	SQ_TRACE(SQ_RESET, sc, 0, 0);
    388 
    389 	/* Set into 8003 mode, bank 0 to program ethernet address */
    390 	sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_BANK0);
    391 
    392 	/* Now write the address */
    393 	for (i = 0; i < ETHER_ADDR_LEN; i++)
    394 		sq_seeq_write(sc, i, sc->sc_enaddr[i]);
    395 
    396 	sc->sc_rxcmd = RXCMD_IE_CRC |
    397 		       RXCMD_IE_DRIB |
    398 		       RXCMD_IE_SHORT |
    399 		       RXCMD_IE_END |
    400 		       RXCMD_IE_GOOD;
    401 
    402 	/*
    403 	 * Set the receive filter -- this will add some bits to the
    404 	 * prototype RXCMD register.  Do this before setting the
    405 	 * transmit config register, since we might need to switch
    406 	 * banks.
    407 	 */
    408 	sq_set_filter(sc);
    409 
    410 	/* Set up Seeq transmit command register */
    411 	sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_IE_UFLOW |
    412 				      TXCMD_IE_COLL |
    413 				      TXCMD_IE_16COLL |
    414 				      TXCMD_IE_GOOD);
    415 
    416 	/* Now write the receive command register. */
    417 	sq_seeq_write(sc, SEEQ_RXCMD, sc->sc_rxcmd);
    418 
    419 	/*
    420 	 * Set up HPC ethernet PIO and DMA configurations.
    421 	 *
    422 	 * The PROM appears to do most of this for the onboard HPC3, but
    423 	 * not for the Challenge S's IOPLUS chip. We copy how the onboard
    424 	 * chip is configured and assume that it's correct for both.
    425 	 */
    426 	if (sc->hpc_regs->revision == 3) {
    427 		u_int32_t dmareg, pioreg;
    428 
    429 		pioreg = HPC3_ENETR_PIOCFG_P1(1) |
    430 			 HPC3_ENETR_PIOCFG_P2(6) |
    431 			 HPC3_ENETR_PIOCFG_P3(1);
    432 
    433 		dmareg = HPC3_ENETR_DMACFG_D1(6) |
    434 			 HPC3_ENETR_DMACFG_D2(2) |
    435 			 HPC3_ENETR_DMACFG_D3(0) |
    436 			 HPC3_ENETR_DMACFG_FIX_RXDC |
    437 			 HPC3_ENETR_DMACFG_FIX_INTR |
    438 			 HPC3_ENETR_DMACFG_FIX_EOP |
    439 			 HPC3_ENETR_DMACFG_TIMEOUT;
    440 
    441 		sq_hpc_write(sc, HPC3_ENETR_PIOCFG, pioreg);
    442 		sq_hpc_write(sc, HPC3_ENETR_DMACFG, dmareg);
    443 	}
    444 
    445 	/* Pass the start of the receive ring to the HPC */
    446 	sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc, 0));
    447 
    448 	/* And turn on the HPC ethernet receive channel */
    449 	sq_hpc_write(sc, sc->hpc_regs->enetr_ctl,
    450 	    sc->hpc_regs->enetr_ctl_active);
    451 
    452 	/*
    453 	 * Turn off delayed receive interrupts on HPC1.
    454 	 * (see Hollywood HPC Specification 2.1.4.3)
    455 	 */
    456 	if (sc->hpc_regs->revision != 3)
    457 		sq_hpc_write(sc, HPC1_ENET_INTDELAY, HPC1_ENET_INTDELAY_OFF);
    458 
    459 	ifp->if_flags |= IFF_RUNNING;
    460 	ifp->if_flags &= ~IFF_OACTIVE;
    461 
    462 	return 0;
    463 }
    464 
    465 static void
    466 sq_set_filter(struct sq_softc *sc)
    467 {
    468 	struct ethercom *ec = &sc->sc_ethercom;
    469 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    470 	struct ether_multi *enm;
    471 	struct ether_multistep step;
    472 
    473 	/*
    474 	 * Check for promiscuous mode.  Also implies
    475 	 * all-multicast.
    476 	 */
    477 	if (ifp->if_flags & IFF_PROMISC) {
    478 		sc->sc_rxcmd |= RXCMD_REC_ALL;
    479 		ifp->if_flags |= IFF_ALLMULTI;
    480 		return;
    481 	}
    482 
    483 	/*
    484 	 * The 8003 has no hash table.  If we have any multicast
    485 	 * addresses on the list, enable reception of all multicast
    486 	 * frames.
    487 	 *
    488 	 * XXX The 80c03 has a hash table.  We should use it.
    489 	 */
    490 
    491 	ETHER_FIRST_MULTI(step, ec, enm);
    492 
    493 	if (enm == NULL) {
    494 		sc->sc_rxcmd &= ~RXCMD_REC_MASK;
    495 		sc->sc_rxcmd |= RXCMD_REC_BROAD;
    496 
    497 		ifp->if_flags &= ~IFF_ALLMULTI;
    498 		return;
    499 	}
    500 
    501 	sc->sc_rxcmd |= RXCMD_REC_MULTI;
    502 	ifp->if_flags |= IFF_ALLMULTI;
    503 }
    504 
    505 int
    506 sq_ioctl(struct ifnet *ifp, u_long cmd, void *data)
    507 {
    508 	int s, error = 0;
    509 
    510 	SQ_TRACE(SQ_IOCTL, (struct sq_softc *)ifp->if_softc, 0, 0);
    511 
    512 	s = splnet();
    513 
    514 	error = ether_ioctl(ifp, cmd, data);
    515 	if (error == ENETRESET) {
    516 		/*
    517 		 * Multicast list has changed; set the hardware filter
    518 		 * accordingly.
    519 		 */
    520 		if (ifp->if_flags & IFF_RUNNING)
    521 			error = sq_init(ifp);
    522 		else
    523 			error = 0;
    524 	}
    525 
    526 	splx(s);
    527 	return (error);
    528 }
    529 
    530 void
    531 sq_start(struct ifnet *ifp)
    532 {
    533 	struct sq_softc *sc = ifp->if_softc;
    534 	u_int32_t status;
    535 	struct mbuf *m0, *m;
    536 	bus_dmamap_t dmamap;
    537 	int err, totlen, nexttx, firsttx, lasttx = -1, ofree, seg;
    538 
    539 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
    540 		return;
    541 
    542 	/*
    543 	 * Remember the previous number of free descriptors and
    544 	 * the first descriptor we'll use.
    545 	 */
    546 	ofree = sc->sc_nfreetx;
    547 	firsttx = sc->sc_nexttx;
    548 
    549 	/*
    550 	 * Loop through the send queue, setting up transmit descriptors
    551 	 * until we drain the queue, or use up all available transmit
    552 	 * descriptors.
    553 	 */
    554 	while (sc->sc_nfreetx != 0) {
    555 		/*
    556 		 * Grab a packet off the queue.
    557 		 */
    558 		IFQ_POLL(&ifp->if_snd, m0);
    559 		if (m0 == NULL)
    560 			break;
    561 		m = NULL;
    562 
    563 		dmamap = sc->sc_txmap[sc->sc_nexttx];
    564 
    565 		/*
    566 		 * Load the DMA map.  If this fails, the packet either
    567 		 * didn't fit in the alloted number of segments, or we were
    568 		 * short on resources.  In this case, we'll copy and try
    569 		 * again.
    570 		 * Also copy it if we need to pad, so that we are sure there
    571 		 * is room for the pad buffer.
    572 		 * XXX the right way of doing this is to use a static buffer
    573 		 * for padding and adding it to the transmit descriptor (see
    574 		 * sys/dev/pci/if_tl.c for example). We can't do this here yet
    575 		 * because we can't send packets with more than one fragment.
    576 		 */
    577 		if (m0->m_pkthdr.len < ETHER_PAD_LEN ||
    578 		    bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
    579 						      BUS_DMA_NOWAIT) != 0) {
    580 			MGETHDR(m, M_DONTWAIT, MT_DATA);
    581 			if (m == NULL) {
    582 				printf("%s: unable to allocate Tx mbuf\n",
    583 				    sc->sc_dev.dv_xname);
    584 				break;
    585 			}
    586 			if (m0->m_pkthdr.len > MHLEN) {
    587 				MCLGET(m, M_DONTWAIT);
    588 				if ((m->m_flags & M_EXT) == 0) {
    589 					printf("%s: unable to allocate Tx "
    590 					    "cluster\n", sc->sc_dev.dv_xname);
    591 					m_freem(m);
    592 					break;
    593 				}
    594 			}
    595 
    596 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
    597 			if (m0->m_pkthdr.len < ETHER_PAD_LEN) {
    598 				memset(mtod(m, char *) + m0->m_pkthdr.len, 0,
    599 				    ETHER_PAD_LEN - m0->m_pkthdr.len);
    600 				m->m_pkthdr.len = m->m_len = ETHER_PAD_LEN;
    601 			} else
    602 				m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
    603 
    604 			if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
    605 						m, BUS_DMA_NOWAIT)) != 0) {
    606 				printf("%s: unable to load Tx buffer, "
    607 				    "error = %d\n", sc->sc_dev.dv_xname, err);
    608 				break;
    609 			}
    610 		}
    611 
    612 		/*
    613 		 * Ensure we have enough descriptors free to describe
    614 		 * the packet.
    615 		 */
    616 		if (dmamap->dm_nsegs > sc->sc_nfreetx) {
    617 			/*
    618 			 * Not enough free descriptors to transmit this
    619 			 * packet.  We haven't committed to anything yet,
    620 			 * so just unload the DMA map, put the packet
    621 			 * back on the queue, and punt.  Notify the upper
    622 			 * layer that there are no more slots left.
    623 			 *
    624 			 * XXX We could allocate an mbuf and copy, but
    625 			 * XXX it is worth it?
    626 			 */
    627 			ifp->if_flags |= IFF_OACTIVE;
    628 			bus_dmamap_unload(sc->sc_dmat, dmamap);
    629 			if (m != NULL)
    630 				m_freem(m);
    631 			break;
    632 		}
    633 
    634 		IFQ_DEQUEUE(&ifp->if_snd, m0);
    635 #if NBPFILTER > 0
    636 		/*
    637 		 * Pass the packet to any BPF listeners.
    638 		 */
    639 		if (ifp->if_bpf)
    640 			bpf_mtap(ifp->if_bpf, m0);
    641 #endif /* NBPFILTER > 0 */
    642 		if (m != NULL) {
    643 			m_freem(m0);
    644 			m0 = m;
    645 		}
    646 
    647 		/*
    648 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
    649 		 */
    650 
    651 		SQ_TRACE(SQ_ENQUEUE, sc, sc->sc_nexttx, 0);
    652 
    653 		/* Sync the DMA map. */
    654 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
    655 		    BUS_DMASYNC_PREWRITE);
    656 
    657 		/*
    658 		 * Initialize the transmit descriptors.
    659 		 */
    660 		for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0;
    661 		     seg < dmamap->dm_nsegs;
    662 		     seg++, nexttx = SQ_NEXTTX(nexttx)) {
    663 			if (sc->hpc_regs->revision == 3) {
    664 				sc->sc_txdesc[nexttx].hpc3_hdd_bufptr =
    665 					    dmamap->dm_segs[seg].ds_addr;
    666 				sc->sc_txdesc[nexttx].hpc3_hdd_ctl =
    667 					    dmamap->dm_segs[seg].ds_len;
    668 			} else {
    669 				sc->sc_txdesc[nexttx].hpc1_hdd_bufptr =
    670 					    dmamap->dm_segs[seg].ds_addr;
    671 				sc->sc_txdesc[nexttx].hpc1_hdd_ctl =
    672 					    dmamap->dm_segs[seg].ds_len;
    673 			}
    674 			sc->sc_txdesc[nexttx].hdd_descptr=
    675 					    SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx));
    676 			lasttx = nexttx;
    677 			totlen += dmamap->dm_segs[seg].ds_len;
    678 		}
    679 
    680 		/* Last descriptor gets end-of-packet */
    681 		KASSERT(lasttx != -1);
    682 		if (sc->hpc_regs->revision == 3)
    683 			sc->sc_txdesc[lasttx].hpc3_hdd_ctl |=
    684 			    HPC3_HDD_CTL_EOPACKET;
    685 		else
    686 			sc->sc_txdesc[lasttx].hpc1_hdd_ctl |=
    687 			    HPC1_HDD_CTL_EOPACKET;
    688 
    689 		SQ_DPRINTF(("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname,
    690 						       sc->sc_nexttx, lasttx,
    691 						       totlen));
    692 
    693 		if (ifp->if_flags & IFF_DEBUG) {
    694 			printf("     transmit chain:\n");
    695 			for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) {
    696 				printf("     descriptor %d:\n", seg);
    697 				printf("       hdd_bufptr:      0x%08x\n",
    698 					(sc->hpc_regs->revision == 3) ?
    699 					    sc->sc_txdesc[seg].hpc3_hdd_bufptr :
    700 					    sc->sc_txdesc[seg].hpc1_hdd_bufptr);
    701 				printf("       hdd_ctl: 0x%08x\n",
    702 					(sc->hpc_regs->revision == 3) ?
    703 					    sc->sc_txdesc[seg].hpc3_hdd_ctl:
    704 					    sc->sc_txdesc[seg].hpc1_hdd_ctl);
    705 				printf("       hdd_descptr:      0x%08x\n",
    706 					sc->sc_txdesc[seg].hdd_descptr);
    707 
    708 				if (seg == lasttx)
    709 					break;
    710 			}
    711 		}
    712 
    713 		/* Sync the descriptors we're using. */
    714 		SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs,
    715 				BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    716 
    717 		/* Store a pointer to the packet so we can free it later */
    718 		sc->sc_txmbuf[sc->sc_nexttx] = m0;
    719 
    720 		/* Advance the tx pointer. */
    721 		sc->sc_nfreetx -= dmamap->dm_nsegs;
    722 		sc->sc_nexttx = nexttx;
    723 	}
    724 
    725 	/* All transmit descriptors used up, let upper layers know */
    726 	if (sc->sc_nfreetx == 0)
    727 		ifp->if_flags |= IFF_OACTIVE;
    728 
    729 	if (sc->sc_nfreetx != ofree) {
    730 		SQ_DPRINTF(("%s: %d packets enqueued, first %d, INTR on %d\n",
    731 			    sc->sc_dev.dv_xname, lasttx - firsttx + 1,
    732 			    firsttx, lasttx));
    733 
    734 		/*
    735 		 * Cause a transmit interrupt to happen on the
    736 		 * last packet we enqueued, mark it as the last
    737 		 * descriptor.
    738 		 *
    739 		 * HPC1_HDD_CTL_INTR will generate an interrupt on
    740 		 * HPC1. HPC3 requires HPC3_HDD_CTL_EOPACKET in
    741 		 * addition to HPC3_HDD_CTL_INTR to interrupt.
    742 		 */
    743 		KASSERT(lasttx != -1);
    744 		if (sc->hpc_regs->revision == 3) {
    745 			sc->sc_txdesc[lasttx].hpc3_hdd_ctl |=
    746 			    HPC3_HDD_CTL_INTR | HPC3_HDD_CTL_EOCHAIN;
    747 		} else {
    748 			sc->sc_txdesc[lasttx].hpc1_hdd_ctl |= HPC1_HDD_CTL_INTR;
    749 			sc->sc_txdesc[lasttx].hpc1_hdd_bufptr |=
    750 			    HPC1_HDD_CTL_EOCHAIN;
    751 		}
    752 
    753 		SQ_CDTXSYNC(sc, lasttx, 1,
    754 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    755 
    756 		/*
    757 		 * There is a potential race condition here if the HPC
    758 		 * DMA channel is active and we try and either update
    759 		 * the 'next descriptor' pointer in the HPC PIO space
    760 		 * or the 'next descriptor' pointer in a previous desc-
    761 		 * riptor.
    762 		 *
    763 		 * To avoid this, if the channel is active, we rely on
    764 		 * the transmit interrupt routine noticing that there
    765 		 * are more packets to send and restarting the HPC DMA
    766 		 * engine, rather than mucking with the DMA state here.
    767 		 */
    768 		status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl);
    769 
    770 		if ((status & sc->hpc_regs->enetx_ctl_active) != 0) {
    771 			SQ_TRACE(SQ_ADD_TO_DMA, sc, firsttx, status);
    772 
    773 			/*
    774 			 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and
    775 			 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN
    776 			 */
    777 			sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc3_hdd_ctl &=
    778 			    ~HPC3_HDD_CTL_EOCHAIN;
    779 
    780 			if (sc->hpc_regs->revision != 3)
    781 				sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc1_hdd_ctl
    782 				    &= ~HPC1_HDD_CTL_INTR;
    783 
    784 			SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx),  1,
    785 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    786 		} else if (sc->hpc_regs->revision == 3) {
    787 			SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
    788 
    789 			sq_hpc_write(sc, HPC3_ENETX_NDBP, SQ_CDTXADDR(sc,
    790 			    firsttx));
    791 
    792 			/* Kick DMA channel into life */
    793 			sq_hpc_write(sc, HPC3_ENETX_CTL, HPC3_ENETX_CTL_ACTIVE);
    794 		} else {
    795 			/*
    796 			 * In the HPC1 case where transmit DMA is
    797 			 * inactive, we can either kick off if
    798 			 * the ring was previously empty, or call
    799 			 * our transmit interrupt handler to
    800 			 * figure out if the ring stopped short
    801 			 * and restart at the right place.
    802 			 */
    803 			if (ofree == SQ_NTXDESC) {
    804 				SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
    805 
    806 				sq_hpc_write(sc, HPC1_ENETX_NDBP,
    807 				    SQ_CDTXADDR(sc, firsttx));
    808 				sq_hpc_write(sc, HPC1_ENETX_CFXBP,
    809 				    SQ_CDTXADDR(sc, firsttx));
    810 				sq_hpc_write(sc, HPC1_ENETX_CBP,
    811 				    SQ_CDTXADDR(sc, firsttx));
    812 
    813 				/* Kick DMA channel into life */
    814 				sq_hpc_write(sc, HPC1_ENETX_CTL,
    815 				    HPC1_ENETX_CTL_ACTIVE);
    816 			} else
    817 				sq_txring_hpc1(sc);
    818 		}
    819 
    820 		/* Set a watchdog timer in case the chip flakes out. */
    821 		ifp->if_timer = 5;
    822 	}
    823 }
    824 
    825 void
    826 sq_stop(struct ifnet *ifp, int disable)
    827 {
    828 	int i;
    829 	struct sq_softc *sc = ifp->if_softc;
    830 
    831 	for (i =0; i < SQ_NTXDESC; i++) {
    832 		if (sc->sc_txmbuf[i] != NULL) {
    833 			bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
    834 			m_freem(sc->sc_txmbuf[i]);
    835 			sc->sc_txmbuf[i] = NULL;
    836 		}
    837 	}
    838 
    839 	/* Clear Seeq transmit/receive command registers */
    840 	sq_seeq_write(sc, SEEQ_TXCMD, 0);
    841 	sq_seeq_write(sc, SEEQ_RXCMD, 0);
    842 
    843 	sq_reset(sc);
    844 
    845 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
    846 	ifp->if_timer = 0;
    847 }
    848 
    849 /* Device timeout/watchdog routine. */
    850 void
    851 sq_watchdog(struct ifnet *ifp)
    852 {
    853 	u_int32_t status;
    854 	struct sq_softc *sc = ifp->if_softc;
    855 
    856 	status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl);
    857 	log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, "
    858 		     "status %08x)\n", sc->sc_dev.dv_xname, sc->sc_prevtx,
    859 				       sc->sc_nexttx, sc->sc_nfreetx, status);
    860 
    861 	sq_trace_dump(sc);
    862 
    863 	memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
    864 	sc->sq_trace_idx = 0;
    865 
    866 	++ifp->if_oerrors;
    867 
    868 	sq_init(ifp);
    869 }
    870 
    871 static void
    872 sq_trace_dump(struct sq_softc *sc)
    873 {
    874 	int i;
    875 	const char *act;
    876 
    877 	for (i = 0; i < sc->sq_trace_idx; i++) {
    878 		switch (sc->sq_trace[i].action) {
    879 		case SQ_RESET:		act = "SQ_RESET";		break;
    880 		case SQ_ADD_TO_DMA:	act = "SQ_ADD_TO_DMA";		break;
    881 		case SQ_START_DMA:	act = "SQ_START_DMA";		break;
    882 		case SQ_DONE_DMA:	act = "SQ_DONE_DMA";		break;
    883 		case SQ_RESTART_DMA:	act = "SQ_RESTART_DMA";		break;
    884 		case SQ_TXINTR_ENTER:	act = "SQ_TXINTR_ENTER";	break;
    885 		case SQ_TXINTR_EXIT:	act = "SQ_TXINTR_EXIT";		break;
    886 		case SQ_TXINTR_BUSY:	act = "SQ_TXINTR_BUSY";		break;
    887 		case SQ_IOCTL:		act = "SQ_IOCTL";		break;
    888 		case SQ_ENQUEUE:	act = "SQ_ENQUEUE";		break;
    889 		default:		act = "UNKNOWN";
    890 		}
    891 
    892 		printf("%s: [%03d] action %-16s buf %03d free %03d "
    893 		    "status %08x line %d\n", sc->sc_dev.dv_xname, i, act,
    894 		    sc->sq_trace[i].bufno, sc->sq_trace[i].freebuf,
    895 		    sc->sq_trace[i].status, sc->sq_trace[i].line);
    896 	}
    897 }
    898 
    899 static int
    900 sq_intr(void *arg)
    901 {
    902 	struct sq_softc *sc = arg;
    903 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    904 	int handled = 0;
    905 	u_int32_t stat;
    906 
    907 	stat = sq_hpc_read(sc, sc->hpc_regs->enetr_reset);
    908 
    909 	if ((stat & 2) == 0)
    910 		SQ_DPRINTF(("%s: Unexpected interrupt!\n",
    911 		    sc->sc_dev.dv_xname));
    912 	else
    913 		sq_hpc_write(sc, sc->hpc_regs->enetr_reset, (stat | 2));
    914 
    915 	/*
    916 	 * If the interface isn't running, the interrupt couldn't
    917 	 * possibly have come from us.
    918 	 */
    919 	if ((ifp->if_flags & IFF_RUNNING) == 0)
    920 		return 0;
    921 
    922 	sc->sq_intrcnt.ev_count++;
    923 
    924 	/* Always check for received packets */
    925 	if (sq_rxintr(sc) != 0)
    926 		handled++;
    927 
    928 	/* Only handle transmit interrupts if we actually sent something */
    929 	if (sc->sc_nfreetx < SQ_NTXDESC) {
    930 		sq_txintr(sc);
    931 		handled++;
    932 	}
    933 
    934 #if NRND > 0
    935 	if (handled)
    936 		rnd_add_uint32(&sc->rnd_source, stat);
    937 #endif
    938 	return (handled);
    939 }
    940 
    941 static int
    942 sq_rxintr(struct sq_softc *sc)
    943 {
    944 	int count = 0;
    945 	struct mbuf* m;
    946 	int i, framelen;
    947 	u_int8_t pktstat;
    948 	u_int32_t status;
    949 	u_int32_t ctl_reg;
    950 	int new_end, orig_end;
    951 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    952 
    953 	for (i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) {
    954 		SQ_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD |
    955 		    BUS_DMASYNC_POSTWRITE);
    956 
    957 		/*
    958 		 * If this is a CPU-owned buffer, we're at the end of the list.
    959 		 */
    960 		if (sc->hpc_regs->revision == 3)
    961 			ctl_reg = sc->sc_rxdesc[i].hpc3_hdd_ctl &
    962 			    HPC3_HDD_CTL_OWN;
    963 		else
    964 			ctl_reg = sc->sc_rxdesc[i].hpc1_hdd_ctl &
    965 			    HPC1_HDD_CTL_OWN;
    966 
    967 		if (ctl_reg) {
    968 #if defined(SQ_DEBUG)
    969 			u_int32_t reg;
    970 
    971 			reg = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl);
    972 			SQ_DPRINTF(("%s: rxintr: done at %d (ctl %08x)\n",
    973 			    sc->sc_dev.dv_xname, i, reg));
    974 #endif
    975 			break;
    976 		}
    977 
    978 		count++;
    979 
    980 		m = sc->sc_rxmbuf[i];
    981 		framelen = m->m_ext.ext_size - 3;
    982 		if (sc->hpc_regs->revision == 3)
    983 		    framelen -=
    984 			HPC3_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc3_hdd_ctl);
    985 		else
    986 		    framelen -=
    987 			HPC1_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc1_hdd_ctl);
    988 
    989 		/* Now sync the actual packet data */
    990 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
    991 		    sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);
    992 
    993 		pktstat = *((u_int8_t*)m->m_data + framelen + 2);
    994 
    995 		if ((pktstat & RXSTAT_GOOD) == 0) {
    996 			ifp->if_ierrors++;
    997 
    998 			if (pktstat & RXSTAT_OFLOW)
    999 				printf("%s: receive FIFO overflow\n",
   1000 				    sc->sc_dev.dv_xname);
   1001 
   1002 			bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
   1003 			    sc->sc_rxmap[i]->dm_mapsize,
   1004 			    BUS_DMASYNC_PREREAD);
   1005 			SQ_INIT_RXDESC(sc, i);
   1006 			SQ_DPRINTF(("%s: sq_rxintr: buf %d no RXSTAT_GOOD\n",
   1007 			    sc->sc_dev.dv_xname, i));
   1008 			continue;
   1009 		}
   1010 
   1011 		if (sq_add_rxbuf(sc, i) != 0) {
   1012 			ifp->if_ierrors++;
   1013 			bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
   1014 			    sc->sc_rxmap[i]->dm_mapsize,
   1015 			    BUS_DMASYNC_PREREAD);
   1016 			SQ_INIT_RXDESC(sc, i);
   1017 			SQ_DPRINTF(("%s: sq_rxintr: buf %d sq_add_rxbuf() "
   1018 			    "failed\n", sc->sc_dev.dv_xname, i));
   1019 			continue;
   1020 		}
   1021 
   1022 
   1023 		m->m_data += 2;
   1024 		m->m_pkthdr.rcvif = ifp;
   1025 		m->m_pkthdr.len = m->m_len = framelen;
   1026 
   1027 		ifp->if_ipackets++;
   1028 
   1029 		SQ_DPRINTF(("%s: sq_rxintr: buf %d len %d\n",
   1030 			    sc->sc_dev.dv_xname, i, framelen));
   1031 
   1032 #if NBPFILTER > 0
   1033 		if (ifp->if_bpf)
   1034 			bpf_mtap(ifp->if_bpf, m);
   1035 #endif
   1036 		(*ifp->if_input)(ifp, m);
   1037 	}
   1038 
   1039 
   1040 	/* If anything happened, move ring start/end pointers to new spot */
   1041 	if (i != sc->sc_nextrx) {
   1042 		/*
   1043 		 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and
   1044 		 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN
   1045 		 */
   1046 
   1047 		new_end = SQ_PREVRX(i);
   1048 		sc->sc_rxdesc[new_end].hpc3_hdd_ctl |= HPC3_HDD_CTL_EOCHAIN;
   1049 		SQ_CDRXSYNC(sc, new_end, BUS_DMASYNC_PREREAD |
   1050 		    BUS_DMASYNC_PREWRITE);
   1051 
   1052 		orig_end = SQ_PREVRX(sc->sc_nextrx);
   1053 		sc->sc_rxdesc[orig_end].hpc3_hdd_ctl &= ~HPC3_HDD_CTL_EOCHAIN;
   1054 		SQ_CDRXSYNC(sc, orig_end, BUS_DMASYNC_PREREAD |
   1055 		    BUS_DMASYNC_PREWRITE);
   1056 
   1057 		sc->sc_nextrx = i;
   1058 	}
   1059 
   1060 	status = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl);
   1061 
   1062 	/* If receive channel is stopped, restart it... */
   1063 	if ((status & sc->hpc_regs->enetr_ctl_active) == 0) {
   1064 		/* Pass the start of the receive ring to the HPC */
   1065 		sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc,
   1066 		    sc->sc_nextrx));
   1067 
   1068 		/* And turn on the HPC ethernet receive channel */
   1069 		sq_hpc_write(sc, sc->hpc_regs->enetr_ctl,
   1070 		    sc->hpc_regs->enetr_ctl_active);
   1071 	}
   1072 
   1073 	return count;
   1074 }
   1075 
   1076 static int
   1077 sq_txintr(struct sq_softc *sc)
   1078 {
   1079 	int shift = 0;
   1080 	u_int32_t status, tmp;
   1081 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1082 
   1083 	if (sc->hpc_regs->revision != 3)
   1084 		shift = 16;
   1085 
   1086 	status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl) >> shift;
   1087 
   1088 	SQ_TRACE(SQ_TXINTR_ENTER, sc, sc->sc_prevtx, status);
   1089 
   1090 	tmp = (sc->hpc_regs->enetx_ctl_active >> shift) | TXSTAT_GOOD;
   1091 	if ((status & tmp) == 0) {
   1092 		if (status & TXSTAT_COLL)
   1093 			ifp->if_collisions++;
   1094 
   1095 		if (status & TXSTAT_UFLOW) {
   1096 			printf("%s: transmit underflow\n", sc->sc_dev.dv_xname);
   1097 			ifp->if_oerrors++;
   1098 		}
   1099 
   1100 		if (status & TXSTAT_16COLL) {
   1101 			printf("%s: max collisions reached\n",
   1102 			    sc->sc_dev.dv_xname);
   1103 			ifp->if_oerrors++;
   1104 			ifp->if_collisions += 16;
   1105 		}
   1106 	}
   1107 
   1108 	/* prevtx now points to next xmit packet not yet finished */
   1109 	if (sc->hpc_regs->revision == 3)
   1110 		sq_txring_hpc3(sc);
   1111 	else
   1112 		sq_txring_hpc1(sc);
   1113 
   1114 	/* If we have buffers free, let upper layers know */
   1115 	if (sc->sc_nfreetx > 0)
   1116 		ifp->if_flags &= ~IFF_OACTIVE;
   1117 
   1118 	/* If all packets have left the coop, cancel watchdog */
   1119 	if (sc->sc_nfreetx == SQ_NTXDESC)
   1120 		ifp->if_timer = 0;
   1121 
   1122 	SQ_TRACE(SQ_TXINTR_EXIT, sc, sc->sc_prevtx, status);
   1123 	sq_start(ifp);
   1124 
   1125 	return 1;
   1126 }
   1127 
   1128 /*
   1129  * Reclaim used transmit descriptors and restart the transmit DMA
   1130  * engine if necessary.
   1131  */
   1132 static void
   1133 sq_txring_hpc1(struct sq_softc *sc)
   1134 {
   1135 	/*
   1136 	 * HPC1 doesn't tag transmitted descriptors, however,
   1137 	 * the NDBP register points to the next descriptor that
   1138 	 * has not yet been processed. If DMA is not in progress,
   1139 	 * we can safely reclaim all descriptors up to NDBP, and,
   1140 	 * if necessary, restart DMA at NDBP. Otherwise, if DMA
   1141 	 * is active, we can only safely reclaim up to CBP.
   1142 	 *
   1143 	 * For now, we'll only reclaim on inactive DMA and assume
   1144 	 * that a sufficiently large ring keeps us out of trouble.
   1145 	 */
   1146 	u_int32_t reclaimto, status;
   1147 	int reclaimall, i = sc->sc_prevtx;
   1148 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1149 
   1150 	status = sq_hpc_read(sc, HPC1_ENETX_CTL);
   1151 	if (status & HPC1_ENETX_CTL_ACTIVE) {
   1152 		SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
   1153 		return;
   1154 	} else
   1155 		reclaimto = sq_hpc_read(sc, HPC1_ENETX_NDBP);
   1156 
   1157 	if (sc->sc_nfreetx == 0 && SQ_CDTXADDR(sc, i) == reclaimto)
   1158 		reclaimall = 1;
   1159 	else
   1160 		reclaimall = 0;
   1161 
   1162 	while (sc->sc_nfreetx < SQ_NTXDESC) {
   1163 		if (SQ_CDTXADDR(sc, i) == reclaimto && !reclaimall)
   1164 			break;
   1165 
   1166 		SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
   1167 				BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1168 
   1169 		/* Sync the packet data, unload DMA map, free mbuf */
   1170 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
   1171 				sc->sc_txmap[i]->dm_mapsize,
   1172 				BUS_DMASYNC_POSTWRITE);
   1173 		bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
   1174 		m_freem(sc->sc_txmbuf[i]);
   1175 		sc->sc_txmbuf[i] = NULL;
   1176 
   1177 		ifp->if_opackets++;
   1178 		sc->sc_nfreetx++;
   1179 
   1180 		SQ_TRACE(SQ_DONE_DMA, sc, i, status);
   1181 
   1182 		i = SQ_NEXTTX(i);
   1183 	}
   1184 
   1185 	if (sc->sc_nfreetx < SQ_NTXDESC) {
   1186 		SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
   1187 
   1188 		KASSERT(reclaimto == SQ_CDTXADDR(sc, i));
   1189 
   1190 		sq_hpc_write(sc, HPC1_ENETX_CFXBP, reclaimto);
   1191 		sq_hpc_write(sc, HPC1_ENETX_CBP, reclaimto);
   1192 
   1193 		/* Kick DMA channel into life */
   1194 		sq_hpc_write(sc, HPC1_ENETX_CTL, HPC1_ENETX_CTL_ACTIVE);
   1195 
   1196 		/*
   1197 		 * Set a watchdog timer in case the chip
   1198 		 * flakes out.
   1199 		 */
   1200 		ifp->if_timer = 5;
   1201 	}
   1202 
   1203 	sc->sc_prevtx = i;
   1204 }
   1205 
   1206 /*
   1207  * Reclaim used transmit descriptors and restart the transmit DMA
   1208  * engine if necessary.
   1209  */
   1210 static void
   1211 sq_txring_hpc3(struct sq_softc *sc)
   1212 {
   1213 	/*
   1214 	 * HPC3 tags descriptors with a bit once they've been
   1215 	 * transmitted. We need only free each XMITDONE'd
   1216 	 * descriptor, and restart the DMA engine if any
   1217 	 * descriptors are left over.
   1218 	 */
   1219 	int i;
   1220 	u_int32_t status = 0;
   1221 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1222 
   1223 	i = sc->sc_prevtx;
   1224 	while (sc->sc_nfreetx < SQ_NTXDESC) {
   1225 		/*
   1226 		 * Check status first so we don't end up with a case of
   1227 		 * the buffer not being finished while the DMA channel
   1228 		 * has gone idle.
   1229 		 */
   1230 		status = sq_hpc_read(sc, HPC3_ENETX_CTL);
   1231 
   1232 		SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
   1233 				BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   1234 
   1235 		/* Check for used descriptor and restart DMA chain if needed */
   1236 		if (!(sc->sc_txdesc[i].hpc3_hdd_ctl & HPC3_HDD_CTL_XMITDONE)) {
   1237 			if ((status & HPC3_ENETX_CTL_ACTIVE) == 0) {
   1238 				SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
   1239 
   1240 				sq_hpc_write(sc, HPC3_ENETX_NDBP,
   1241 				    SQ_CDTXADDR(sc, i));
   1242 
   1243 				/* Kick DMA channel into life */
   1244 				sq_hpc_write(sc, HPC3_ENETX_CTL,
   1245 				    HPC3_ENETX_CTL_ACTIVE);
   1246 
   1247 				/*
   1248 				 * Set a watchdog timer in case the chip
   1249 				 * flakes out.
   1250 				 */
   1251 				ifp->if_timer = 5;
   1252 			} else
   1253 				SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
   1254 			break;
   1255 		}
   1256 
   1257 		/* Sync the packet data, unload DMA map, free mbuf */
   1258 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
   1259 				sc->sc_txmap[i]->dm_mapsize,
   1260 				BUS_DMASYNC_POSTWRITE);
   1261 		bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
   1262 		m_freem(sc->sc_txmbuf[i]);
   1263 		sc->sc_txmbuf[i] = NULL;
   1264 
   1265 		ifp->if_opackets++;
   1266 		sc->sc_nfreetx++;
   1267 
   1268 		SQ_TRACE(SQ_DONE_DMA, sc, i, status);
   1269 		i = SQ_NEXTTX(i);
   1270 	}
   1271 
   1272 	sc->sc_prevtx = i;
   1273 }
   1274 
   1275 void
   1276 sq_reset(struct sq_softc *sc)
   1277 {
   1278 	/* Stop HPC dma channels */
   1279 	sq_hpc_write(sc, sc->hpc_regs->enetr_ctl, 0);
   1280 	sq_hpc_write(sc, sc->hpc_regs->enetx_ctl, 0);
   1281 
   1282 	sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 3);
   1283 	delay(20);
   1284 	sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 0);
   1285 }
   1286 
   1287 /* sq_add_rxbuf: Add a receive buffer to the indicated descriptor. */
   1288 int
   1289 sq_add_rxbuf(struct sq_softc *sc, int idx)
   1290 {
   1291 	int err;
   1292 	struct mbuf *m;
   1293 
   1294 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   1295 	if (m == NULL)
   1296 		return (ENOBUFS);
   1297 
   1298 	MCLGET(m, M_DONTWAIT);
   1299 	if ((m->m_flags & M_EXT) == 0) {
   1300 		m_freem(m);
   1301 		return (ENOBUFS);
   1302 	}
   1303 
   1304 	if (sc->sc_rxmbuf[idx] != NULL)
   1305 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]);
   1306 
   1307 	sc->sc_rxmbuf[idx] = m;
   1308 
   1309 	if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx],
   1310 				   m->m_ext.ext_buf, m->m_ext.ext_size,
   1311 				   NULL, BUS_DMA_NOWAIT)) != 0) {
   1312 		printf("%s: can't load rx DMA map %d, error = %d\n",
   1313 		    sc->sc_dev.dv_xname, idx, err);
   1314 		panic("sq_add_rxbuf");	/* XXX */
   1315 	}
   1316 
   1317 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx], 0,
   1318 			sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD);
   1319 
   1320 	SQ_INIT_RXDESC(sc, idx);
   1321 
   1322 	return 0;
   1323 }
   1324 
   1325 void
   1326 sq_dump_buffer(u_int32_t addr, u_int32_t len)
   1327 {
   1328 	u_int i;
   1329 	u_char* physaddr = (char*) MIPS_PHYS_TO_KSEG1((void *)addr);
   1330 
   1331 	if (len == 0)
   1332 		return;
   1333 
   1334 	printf("%p: ", physaddr);
   1335 
   1336 	for (i = 0; i < len; i++) {
   1337 		printf("%02x ", *(physaddr + i) & 0xff);
   1338 		if ((i % 16) ==  15 && i != len - 1)
   1339 		    printf("\n%p: ", physaddr + i);
   1340 	}
   1341 
   1342 	printf("\n");
   1343 }
   1344 
   1345 void
   1346 enaddr_aton(const char* str, u_int8_t* eaddr)
   1347 {
   1348 	int i;
   1349 	char c;
   1350 
   1351 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
   1352 		if (*str == ':')
   1353 			str++;
   1354 
   1355 		c = *str++;
   1356 		if (isdigit(c)) {
   1357 			eaddr[i] = (c - '0');
   1358 		} else if (isxdigit(c)) {
   1359 			eaddr[i] = (toupper(c) + 10 - 'A');
   1360 		}
   1361 
   1362 		c = *str++;
   1363 		if (isdigit(c)) {
   1364 			eaddr[i] = (eaddr[i] << 4) | (c - '0');
   1365 		} else if (isxdigit(c)) {
   1366 			eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A');
   1367 		}
   1368 	}
   1369 }
   1370