Home | History | Annotate | Line # | Download | only in hpc
if_sq.c revision 1.4
      1 /*	$NetBSD: if_sq.c,v 1.4 2001/06/08 14:32:05 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001 Rafal K. Boni
      5  * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
      6  * All rights reserved.
      7  *
      8  * Portions of this code are derived from software contributed to The
      9  * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace
     10  * Simulation Facility, NASA Ames Research Center.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  */
     34 
     35 #include "opt_inet.h"
     36 #include "opt_ns.h"
     37 #include "bpfilter.h"
     38 
     39 #include <sys/param.h>
     40 #include <sys/systm.h>
     41 #include <sys/device.h>
     42 #include <sys/callout.h>
     43 #include <sys/mbuf.h>
     44 #include <sys/malloc.h>
     45 #include <sys/kernel.h>
     46 #include <sys/socket.h>
     47 #include <sys/ioctl.h>
     48 #include <sys/errno.h>
     49 #include <sys/syslog.h>
     50 
     51 #include <uvm/uvm_extern.h>
     52 
     53 #include <machine/endian.h>
     54 
     55 #include <net/if.h>
     56 #include <net/if_dl.h>
     57 #include <net/if_media.h>
     58 #include <net/if_ether.h>
     59 
     60 #if NBPFILTER > 0
     61 #include <net/bpf.h>
     62 #endif
     63 
     64 #ifdef INET
     65 #include <netinet/in.h>
     66 #include <netinet/if_inarp.h>
     67 #endif
     68 
     69 #ifdef NS
     70 #include <netns/ns.h>
     71 #include <netns/ns_if.h>
     72 #endif
     73 
     74 /* XXXrkb: cheap hack until parents pass in DMA tags */
     75 #define _SGIMIPS_BUS_DMA_PRIVATE
     76 
     77 #include <machine/bus.h>
     78 #include <machine/arcs.h>
     79 #include <machine/intr.h>
     80 
     81 #include <dev/ic/seeq8003reg.h>
     82 
     83 #include <sgimips/hpc/sqvar.h>
     84 #include <sgimips/hpc/hpcvar.h>
     85 #include <sgimips/hpc/hpcreg.h>
     86 
     87 #define static
     88 
     89 /*
     90  * Short TODO list:
     91  *	(1) Do counters for bad-RX packets.
     92  *	(2) Inherit DMA tag via config machinery, don't hard-code it.
     93  *	(3) Allow multi-segment transmits, instead of copying to a single,
     94  *	    contiguous mbuf.
     95  *	(4) Verify sq_stop() turns off enough stuff; I was still getting
     96  *	    seeq interrupts after sq_stop().
     97  *	(5) Fix up printfs in driver (most should only fire ifdef SQ_DEBUG
     98  *	    or something similar.
     99  *	(6) Implement EDLC modes: especially packet auto-pad and simplex
    100  *	    mode.
    101  *	(7) Should the driver filter out its own transmissions in non-EDLC
    102  *	    mode?
    103  *	(8) Multicast support -- multicast filter, address management, ...
    104  *	(9) Deal with RB0 (recv buffer overflow) on reception.  Will need
    105  *	    to figure out if RB0 is read-only as stated in one spot in the
    106  *	    HPC spec or read-write (ie, is the 'write a one to clear it')
    107  *	    the correct thing?
    108  */
    109 
    110 static int	sq_match(struct device *, struct cfdata *, void *);
    111 static void	sq_attach(struct device *, struct device *, void *);
    112 static int	sq_init(struct ifnet *);
    113 static void	sq_start(struct ifnet *);
    114 static void	sq_stop(struct ifnet *, int);
    115 static void	sq_watchdog(struct ifnet *);
    116 static int	sq_ioctl(struct ifnet *, u_long, caddr_t);
    117 
    118 static void	sq_set_filter(struct sq_softc *);
    119 static int	sq_intr(void *);
    120 static int	sq_rxintr(struct sq_softc *);
    121 static int	sq_txintr(struct sq_softc *);
    122 static void	sq_reset(struct sq_softc *);
    123 static int 	sq_add_rxbuf(struct sq_softc *, int);
    124 static void 	sq_dump_buffer(u_int32_t addr, u_int32_t len);
    125 
    126 static void	enaddr_aton(const char*, u_int8_t*);
    127 
    128 /* Actions */
    129 #define SQ_RESET		1
    130 #define SQ_ADD_TO_DMA		2
    131 #define SQ_START_DMA		3
    132 #define SQ_DONE_DMA		4
    133 #define SQ_RESTART_DMA		5
    134 #define SQ_TXINTR_ENTER		6
    135 #define SQ_TXINTR_EXIT		7
    136 #define SQ_TXINTR_BUSY		8
    137 
    138 struct sq_action_trace {
    139 	int action;
    140 	int bufno;
    141 	int status;
    142 	int freebuf;
    143 };
    144 
    145 #define SQ_TRACEBUF_SIZE	100
    146 int sq_trace_idx = 0;
    147 struct sq_action_trace sq_trace[SQ_TRACEBUF_SIZE];
    148 
    149 void sq_trace_dump(struct sq_softc* sc);
    150 
    151 #define SQ_TRACE(act, buf, stat, free) do {				\
    152 	sq_trace[sq_trace_idx].action = (act);				\
    153 	sq_trace[sq_trace_idx].bufno = (buf);				\
    154 	sq_trace[sq_trace_idx].status = (stat);				\
    155 	sq_trace[sq_trace_idx].freebuf = (free);			\
    156 	if (++sq_trace_idx == SQ_TRACEBUF_SIZE) {			\
    157 		bzero(&sq_trace, sizeof(sq_trace));			\
    158 		sq_trace_idx = 0;					\
    159 	}								\
    160 } while (0)
    161 
    162 struct cfattach sq_ca = {
    163 	sizeof(struct sq_softc), sq_match, sq_attach
    164 };
    165 
    166 static int
    167 sq_match(struct device *parent, struct cfdata *match, void *aux)
    168 {
    169 	/* XXX! */
    170 	return 1;
    171 }
    172 
    173 static void
    174 sq_attach(struct device *parent, struct device *self, void *aux)
    175 {
    176 	int i, err;
    177 	char* macaddr;
    178 	struct sq_softc *sc = (void *)self;
    179 	struct hpc_attach_args *haa = aux;
    180 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    181 
    182 	sc->sc_hpct = haa->ha_iot;
    183 	if ((err = bus_space_subregion(haa->ha_iot, haa->ha_ioh,
    184 				       HPC_ENET_REGS,
    185 				       HPC_ENET_REGS_SIZE,
    186 				       &sc->sc_hpch)) != 0) {
    187 		printf(": unable to map HPC DMA registers, error = %d\n", err);
    188 		goto fail_0;
    189 	}
    190 
    191 	sc->sc_regt = haa->ha_iot;
    192 	if ((err = bus_space_subregion(haa->ha_iot, haa->ha_ioh,
    193 				       HPC_ENET_DEVREGS,
    194 				       HPC_ENET_DEVREGS_SIZE,
    195 				       &sc->sc_regh)) != 0) {
    196 		printf(": unable to map Seeq registers, error = %d\n", err);
    197 		goto fail_0;
    198 	}
    199 
    200 	/* XXXrkb: should be inherited from parent bus, but works for now */
    201 	sc->sc_dmat = &sgimips_default_bus_dma_tag;
    202 
    203 	if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control),
    204 				    PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg,
    205 				    1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) {
    206 		printf(": unable to allocate control data, error = %d\n", err);
    207 		goto fail_0;
    208 	}
    209 
    210 	if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg,
    211 				  sizeof(struct sq_control),
    212 				  (caddr_t *)&sc->sc_control,
    213 				  BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
    214 		printf(": unable to map control data, error = %d\n", err);
    215 		goto fail_1;
    216 	}
    217 
    218 	if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control),
    219 				     1, sizeof(struct sq_control), PAGE_SIZE,
    220 				     BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) {
    221 		printf(": unable to create DMA map for control data, error "
    222 			"= %d\n", err);
    223 		goto fail_2;
    224 	}
    225 
    226 	if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control,
    227 				   sizeof(struct sq_control),
    228 				   NULL, BUS_DMA_NOWAIT)) != 0) {
    229 		printf(": unable to load DMA map for control data, error "
    230 			"= %d\n", err);
    231 		goto fail_3;
    232 	}
    233 
    234 	bzero(sc->sc_control, sizeof(struct sq_control));
    235 
    236 	/* Create transmit buffer DMA maps */
    237 	for (i = 0; i < SQ_NTXDESC; i++) {
    238 	    if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
    239 					 0, BUS_DMA_NOWAIT,
    240 					 &sc->sc_txmap[i])) != 0) {
    241 		    printf(": unable to create tx DMA map %d, error = %d\n",
    242 			   i, err);
    243 		    goto fail_4;
    244 	    }
    245 	}
    246 
    247 	/* Create transmit buffer DMA maps */
    248 	for (i = 0; i < SQ_NRXDESC; i++) {
    249 	    if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
    250 					 0, BUS_DMA_NOWAIT,
    251 					 &sc->sc_rxmap[i])) != 0) {
    252 		    printf(": unable to create rx DMA map %d, error = %d\n",
    253 			   i, err);
    254 		    goto fail_5;
    255 	    }
    256 	}
    257 
    258 	/* Pre-allocate the receive buffers.  */
    259 	for (i = 0; i < SQ_NRXDESC; i++) {
    260 		if ((err = sq_add_rxbuf(sc, i)) != 0) {
    261 			printf(": unable to allocate or map rx buffer %d\n,"
    262 			       " error = %d\n", i, err);
    263 			goto fail_6;
    264 		}
    265 	}
    266 
    267 	if ((macaddr = ARCS->GetEnvironmentVariable("eaddr")) == NULL) {
    268 		printf(": unable to get MAC address!\n");
    269 		goto fail_6;
    270 	}
    271 
    272 	if ((cpu_intr_establish(3, IPL_NET, sq_intr, sc)) == NULL) {
    273 		printf(": unable to establish interrupt!\n");
    274 		goto fail_6;
    275 	}
    276 
    277 	/* Reset the chip to a known state. */
    278 	sq_reset(sc);
    279 
    280 	/*
    281 	 * Determine if we're an 8003 or 80c03 by setting the first
    282 	 * MAC address register to non-zero, and then reading it back.
    283 	 * If it's zero, we have an 80c03, because we will have read
    284 	 * the TxCollLSB register.
    285 	 */
    286 	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0xa5);
    287 	if (bus_space_read_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0) == 0)
    288 		sc->sc_type = SQ_TYPE_80C03;
    289 	else
    290 		sc->sc_type = SQ_TYPE_8003;
    291 	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0x00);
    292 
    293 	printf(": SGI Seeq %s\n",
    294 	    sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003");
    295 
    296 	enaddr_aton(macaddr, sc->sc_enaddr);
    297 
    298 	printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
    299 					   ether_sprintf(sc->sc_enaddr));
    300 
    301 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
    302 	ifp->if_softc = sc;
    303 	ifp->if_mtu = ETHERMTU;
    304 	ifp->if_init = sq_init;
    305 	ifp->if_stop = sq_stop;
    306 	ifp->if_start = sq_start;
    307 	ifp->if_ioctl = sq_ioctl;
    308 	ifp->if_watchdog = sq_watchdog;
    309 	ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST;
    310 	IFQ_SET_READY(&ifp->if_snd);
    311 
    312 	if_attach(ifp);
    313 	ether_ifattach(ifp, sc->sc_enaddr);
    314 
    315 	bzero(&sq_trace, sizeof(sq_trace));
    316 	/* Done! */
    317 	return;
    318 
    319 	/*
    320 	 * Free any resources we've allocated during the failed attach
    321 	 * attempt.  Do this in reverse order and fall through.
    322 	 */
    323 fail_6:
    324 	for (i = 0; i < SQ_NRXDESC; i++) {
    325 		if (sc->sc_rxmbuf[i] != NULL) {
    326 			bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]);
    327 			m_freem(sc->sc_rxmbuf[i]);
    328 		}
    329 	}
    330 fail_5:
    331 	for (i = 0; i < SQ_NRXDESC; i++) {
    332 	    if (sc->sc_rxmap[i] !=  NULL)
    333 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]);
    334 	}
    335 fail_4:
    336 	for (i = 0; i < SQ_NTXDESC; i++) {
    337 	    if (sc->sc_txmap[i] !=  NULL)
    338 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]);
    339 	}
    340 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap);
    341 fail_3:
    342 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap);
    343 fail_2:
    344 	bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control,
    345 				      sizeof(struct sq_control));
    346 fail_1:
    347 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg);
    348 fail_0:
    349 	return;
    350 }
    351 
    352 /* Set up data to get the interface up and running. */
    353 int
    354 sq_init(struct ifnet *ifp)
    355 {
    356 	int i;
    357 	u_int32_t reg;
    358 	struct sq_softc *sc = ifp->if_softc;
    359 
    360 	/* Cancel any in-progress I/O */
    361 	sq_stop(ifp, 0);
    362 
    363 	sc->sc_nextrx = 0;
    364 
    365 	sc->sc_nfreetx = SQ_NTXDESC;
    366 	sc->sc_nexttx = sc->sc_prevtx = 0;
    367 
    368 	SQ_TRACE(SQ_RESET, 0, 0, sc->sc_nfreetx);
    369 
    370 	/* Set into 8003 mode, bank 0 to program ethernet address */
    371 	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, TXCMD_BANK0);
    372 
    373 	/* Now write the address */
    374 	for (i = 0; i < ETHER_ADDR_LEN; i++)
    375 		bus_space_write_1(sc->sc_regt, sc->sc_regh, i,
    376 		    sc->sc_enaddr[i]);
    377 
    378 	sc->sc_rxcmd = RXCMD_IE_CRC |
    379 		       RXCMD_IE_DRIB |
    380 		       RXCMD_IE_SHORT |
    381 		       RXCMD_IE_END |
    382 		       RXCMD_IE_GOOD;
    383 
    384 	/*
    385 	 * Set the receive filter -- this will add some bits to the
    386 	 * prototype RXCMD register.  Do this before setting the
    387 	 * transmit config register, since we might need to switch
    388 	 * banks.
    389 	 */
    390 	sq_set_filter(sc);
    391 
    392 	/* Set up Seeq transmit command register */
    393 	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD,
    394 						    TXCMD_IE_UFLOW |
    395 						    TXCMD_IE_COLL |
    396 						    TXCMD_IE_16COLL |
    397 						    TXCMD_IE_GOOD);
    398 
    399 	/* Now write the receive command register. */
    400 	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD, sc->sc_rxcmd);
    401 
    402 	/* Set up HPC ethernet DMA config */
    403 	reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_DMACFG);
    404 	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_DMACFG,
    405 			    	reg | ENETR_DMACFG_FIX_RXDC |
    406 				ENETR_DMACFG_FIX_INTR |
    407 				ENETR_DMACFG_FIX_EOP);
    408 
    409 	/* Pass the start of the receive ring to the HPC */
    410         bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_NDBP,
    411 						    SQ_CDRXADDR(sc, 0));
    412 
    413 	/* And turn on the HPC ethernet receive channel */
    414 	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL,
    415 						    ENETR_CTL_ACTIVE);
    416 
    417         ifp->if_flags |= IFF_RUNNING;
    418 	ifp->if_flags &= ~IFF_OACTIVE;
    419 
    420 	return 0;
    421 }
    422 
    423 static void
    424 sq_set_filter(struct sq_softc *sc)
    425 {
    426 	struct ethercom *ec = &sc->sc_ethercom;
    427 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    428 	struct ether_multi *enm;
    429 	struct ether_multistep step;
    430 
    431 	/*
    432 	 * Check for promiscuous mode.  Also implies
    433 	 * all-multicast.
    434 	 */
    435 	if (ifp->if_flags & IFF_PROMISC) {
    436 		sc->sc_rxcmd |= RXCMD_REC_ALL;
    437 		ifp->if_flags |= IFF_ALLMULTI;
    438 		return;
    439 	}
    440 
    441 	/*
    442 	 * The 8003 has no hash table.  If we have any multicast
    443 	 * addresses on the list, enable reception of all multicast
    444 	 * frames.
    445 	 *
    446 	 * XXX The 80c03 has a hash table.  We should use it.
    447 	 */
    448 
    449 	ETHER_FIRST_MULTI(step, ec, enm);
    450 
    451 	if (enm == NULL) {
    452 		sc->sc_rxcmd |= RXCMD_REC_BROAD;
    453 		return;
    454 	}
    455 
    456 	sc->sc_rxcmd |= RXCMD_REC_MULTI;
    457 	ifp->if_flags |= IFF_ALLMULTI;
    458 }
    459 
    460 int
    461 sq_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
    462 {
    463 	int s, error = 0;
    464 
    465 	s = splnet();
    466 
    467 	error = ether_ioctl(ifp, cmd, data);
    468 	if (error == ENETRESET) {
    469 		/*
    470 		 * Multicast list has changed; set the hardware filter
    471 		 * accordingly.
    472 		 */
    473 		error = 0;
    474 	}
    475 
    476 	splx(s);
    477 	return (error);
    478 }
    479 
    480 void
    481 sq_start(struct ifnet *ifp)
    482 {
    483 	struct sq_softc *sc = ifp->if_softc;
    484 	u_int32_t status;
    485 	struct mbuf *m0, *m;
    486 	bus_dmamap_t dmamap;
    487 	int err, totlen, nexttx, firsttx, lasttx, ofree, seg;
    488 
    489 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
    490 		return;
    491 
    492 	/*
    493 	 * Remember the previous number of free descriptors and
    494 	 * the first descriptor we'll use.
    495 	 */
    496 	ofree = sc->sc_nfreetx;
    497 	firsttx = sc->sc_nexttx;
    498 
    499 	/*
    500 	 * Loop through the send queue, setting up transmit descriptors
    501 	 * until we drain the queue, or use up all available transmit
    502 	 * descriptors.
    503 	 */
    504 	while (sc->sc_nfreetx != 0) {
    505 		/*
    506 		 * Grab a packet off the queue.
    507 		 */
    508 		IFQ_POLL(&ifp->if_snd, m0);
    509 		if (m0 == NULL)
    510 			break;
    511 		m = NULL;
    512 
    513 		dmamap = sc->sc_txmap[sc->sc_nexttx];
    514 
    515 		/*
    516 		 * Load the DMA map.  If this fails, the packet either
    517 		 * didn't fit in the alloted number of segments, or we were
    518 		 * short on resources.  In this case, we'll copy and try
    519 		 * again.
    520 		 */
    521 		if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
    522 						      BUS_DMA_NOWAIT) != 0) {
    523 			MGETHDR(m, M_DONTWAIT, MT_DATA);
    524 			if (m == NULL) {
    525 				printf("%s: unable to allocate Tx mbuf\n",
    526 				    sc->sc_dev.dv_xname);
    527 				break;
    528 			}
    529 			if (m0->m_pkthdr.len > MHLEN) {
    530 				MCLGET(m, M_DONTWAIT);
    531 				if ((m->m_flags & M_EXT) == 0) {
    532 					printf("%s: unable to allocate Tx "
    533 					    "cluster\n", sc->sc_dev.dv_xname);
    534 					m_freem(m);
    535 					break;
    536 				}
    537 			}
    538 
    539 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
    540 			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
    541 
    542 			if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
    543 						m, BUS_DMA_NOWAIT)) != 0) {
    544 				printf("%s: unable to load Tx buffer, "
    545 				    "error = %d\n", sc->sc_dev.dv_xname, err);
    546 				break;
    547 			}
    548 		}
    549 
    550 		/*
    551 		 * Ensure we have enough descriptors free to describe
    552 		 * the packet.
    553 		 */
    554 		if (dmamap->dm_nsegs > sc->sc_nfreetx) {
    555 			/*
    556 			 * Not enough free descriptors to transmit this
    557 			 * packet.  We haven't committed to anything yet,
    558 			 * so just unload the DMA map, put the packet
    559 			 * back on the queue, and punt.  Notify the upper
    560 			 * layer that there are no more slots left.
    561 			 *
    562 			 * XXX We could allocate an mbuf and copy, but
    563 			 * XXX it is worth it?
    564 			 */
    565 			ifp->if_flags |= IFF_OACTIVE;
    566 			bus_dmamap_unload(sc->sc_dmat, dmamap);
    567 			if (m != NULL)
    568 				m_freem(m);
    569 			break;
    570 		}
    571 
    572 		IFQ_DEQUEUE(&ifp->if_snd, m0);
    573 		if (m != NULL) {
    574 			m_freem(m0);
    575 			m0 = m;
    576 		}
    577 
    578 		/*
    579 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
    580 		 */
    581 
    582 		/* Sync the DMA map. */
    583 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
    584 		    BUS_DMASYNC_PREWRITE);
    585 
    586 		/*
    587 		 * Initialize the transmit descriptors.
    588 		 */
    589 		for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0;
    590 		     seg < dmamap->dm_nsegs;
    591 		     seg++, nexttx = SQ_NEXTTX(nexttx)) {
    592 			sc->sc_txdesc[nexttx].hdd_bufptr =
    593 					    dmamap->dm_segs[seg].ds_addr;
    594 			sc->sc_txdesc[nexttx].hdd_ctl =
    595 					    dmamap->dm_segs[seg].ds_len;
    596 			sc->sc_txdesc[nexttx].hdd_descptr=
    597 					    SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx));
    598 			lasttx = nexttx;
    599 			totlen += dmamap->dm_segs[seg].ds_len;
    600 		}
    601 
    602 		/* Last descriptor gets end-of-packet */
    603 		sc->sc_txdesc[lasttx].hdd_ctl |= HDD_CTL_EOPACKET;
    604 
    605 		/* XXXrkb: if not EDLC, pad to min len manually */
    606 		if (totlen < ETHER_MIN_LEN) {
    607 		    sc->sc_txdesc[lasttx].hdd_ctl += (ETHER_MIN_LEN - totlen);
    608 		    totlen = ETHER_MIN_LEN;
    609 		}
    610 
    611 #if 0
    612 		printf("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname,
    613 						       sc->sc_nexttx, lasttx,
    614 						       totlen);
    615 #endif
    616 
    617 		if (ifp->if_flags & IFF_DEBUG) {
    618 			printf("     transmit chain:\n");
    619 			for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) {
    620 				printf("     descriptor %d:\n", seg);
    621 				printf("       hdd_bufptr:      0x%08x\n",
    622 					sc->sc_txdesc[seg].hdd_bufptr);
    623 				printf("       hdd_ctl: 0x%08x\n",
    624 					sc->sc_txdesc[seg].hdd_ctl);
    625 				printf("       hdd_descptr:      0x%08x\n",
    626 					sc->sc_txdesc[seg].hdd_descptr);
    627 
    628 				if (seg == lasttx)
    629 					break;
    630 			}
    631 		}
    632 
    633 		/* Sync the descriptors we're using. */
    634 		SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs,
    635 				BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    636 
    637 		/* Store a pointer to the packet so we can free it later */
    638 		sc->sc_txmbuf[sc->sc_nexttx] = m0;
    639 
    640 		/* Advance the tx pointer. */
    641 		sc->sc_nfreetx -= dmamap->dm_nsegs;
    642 		sc->sc_nexttx = nexttx;
    643 
    644 #if NBPFILTER > 0
    645 		/*
    646 		 * Pass the packet to any BPF listeners.
    647 		 */
    648 		if (ifp->if_bpf)
    649 			bpf_mtap(ifp->if_bpf, m0);
    650 #endif /* NBPFILTER > 0 */
    651 	}
    652 
    653 	/* All transmit descriptors used up, let upper layers know */
    654 	if (sc->sc_nfreetx == 0)
    655 		ifp->if_flags |= IFF_OACTIVE;
    656 
    657 	if (sc->sc_nfreetx != ofree) {
    658 #if 0
    659 		printf("%s: %d packets enqueued, first %d, INTR on %d\n",
    660 			    sc->sc_dev.dv_xname, lasttx - firsttx + 1,
    661 			    firsttx, lasttx);
    662 #endif
    663 
    664 		/*
    665 		 * Cause a transmit interrupt to happen on the
    666 		 * last packet we enqueued, mark it as the last
    667 		 * descriptor.
    668 		 */
    669 		sc->sc_txdesc[lasttx].hdd_ctl |= (HDD_CTL_INTR |
    670 						  HDD_CTL_EOCHAIN);
    671 		SQ_CDTXSYNC(sc, lasttx, 1,
    672 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    673 
    674 		/*
    675 		 * There is a potential race condition here if the HPC
    676 		 * DMA channel is active and we try and either update
    677 		 * the 'next descriptor' pointer in the HPC PIO space
    678 		 * or the 'next descriptor' pointer in a previous desc-
    679 		 * riptor.
    680 		 *
    681 		 * To avoid this, if the channel is active, we rely on
    682 		 * the transmit interrupt routine noticing that there
    683 		 * are more packets to send and restarting the HPC DMA
    684 		 * engine, rather than mucking with the DMA state here.
    685 		 */
    686 		status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
    687 						       HPC_ENETX_CTL);
    688 
    689 		if ((status & ENETX_CTL_ACTIVE) != 0) {
    690 		    SQ_TRACE(SQ_ADD_TO_DMA, firsttx, status, sc->sc_nfreetx);
    691 
    692 		    sc->sc_txdesc[SQ_PREVTX(firsttx)].hdd_ctl &=
    693 						      	~HDD_CTL_EOCHAIN;
    694 		    SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx),  1,
    695 				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    696 		} else {
    697 		    SQ_TRACE(SQ_START_DMA, firsttx, status, sc->sc_nfreetx);
    698 
    699 		    bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
    700 				  HPC_ENETX_NDBP, SQ_CDTXADDR(sc, firsttx));
    701 
    702 		    /* Kick DMA channel into life */
    703 		    bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
    704 				      HPC_ENETX_CTL, ENETX_CTL_ACTIVE);
    705 		}
    706 
    707 		    /* Set a watchdog timer in case the chip flakes out. */
    708 		    ifp->if_timer = 5;
    709 		}
    710 }
    711 
    712 void
    713 sq_stop(struct ifnet *ifp, int disable)
    714 {
    715 	int i;
    716 	struct sq_softc *sc = ifp->if_softc;
    717 
    718 	for (i =0; i < SQ_NTXDESC; i++) {
    719 		if (sc->sc_txmbuf[i] != NULL) {
    720 			bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
    721 			m_freem(sc->sc_txmbuf[i]);
    722 			sc->sc_txmbuf[i] = NULL;
    723 		}
    724 	}
    725 
    726 	/* Clear Seeq transmit/receive command registers */
    727 	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, 0);
    728 	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD, 0);
    729 
    730 	sq_reset(sc);
    731 
    732         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
    733 	ifp->if_timer = 0;
    734 }
    735 
    736 /* Device timeout/watchdog routine. */
    737 void
    738 sq_watchdog(struct ifnet *ifp)
    739 {
    740 	u_int32_t status;
    741 	struct sq_softc *sc = ifp->if_softc;
    742 
    743 	status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL);
    744 	log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, "
    745 		     "status %08x)\n", sc->sc_dev.dv_xname, sc->sc_prevtx,
    746 				       sc->sc_nexttx, sc->sc_nfreetx, status);
    747 
    748 	sq_trace_dump(sc);
    749 
    750 	bzero(&sq_trace, sizeof(sq_trace));
    751 	sq_trace_idx = 0;
    752 
    753 	++ifp->if_oerrors;
    754 
    755 	sq_init(ifp);
    756 }
    757 
    758 void sq_trace_dump(struct sq_softc* sc)
    759 {
    760 	int i;
    761 
    762 	for(i = 0; i < sq_trace_idx; i++) {
    763 		printf("%s: [%d] action %d, buf %d, free %d, status %08x\n",
    764 			sc->sc_dev.dv_xname, i, sq_trace[i].action,
    765 			sq_trace[i].bufno, sq_trace[i].freebuf,
    766 			sq_trace[i].status);
    767 	}
    768 }
    769 
    770 static int
    771 sq_intr(void * arg)
    772 {
    773 	struct sq_softc *sc = arg;
    774 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    775 	int handled = 0;
    776 	u_int32_t stat;
    777 
    778         stat = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET);
    779 
    780 	if ((stat & 2) == 0) {
    781 		printf("%s: Unexpected interrupt!\n", sc->sc_dev.dv_xname);
    782 		return 0;
    783 	}
    784 
    785 	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET, 2);
    786 
    787 	/*
    788 	 * If the interface isn't running, the interrupt couldn't
    789 	 * possibly have come from us.
    790 	 */
    791 	if ((ifp->if_flags & IFF_RUNNING) == 0)
    792 		return 0;
    793 
    794 	/* Always check for received packets */
    795 	if (sq_rxintr(sc) != 0)
    796 		handled++;
    797 
    798 	/* Only handle transmit interrupts if we actually sent something */
    799 	if (sc->sc_nfreetx < SQ_NTXDESC) {
    800 		sq_txintr(sc);
    801 		handled++;
    802 	}
    803 
    804 #if NRND > 0
    805 	if (handled)
    806 		rnd_add_uint32(&sc->rnd_source, stat);
    807 #endif
    808 	return (handled);
    809 }
    810 
    811 static int
    812 sq_rxintr(struct sq_softc *sc)
    813 {
    814 	int count = 0;
    815 	struct mbuf* m;
    816 	int i, framelen;
    817 	u_int8_t pktstat;
    818 	u_int32_t status;
    819 	int new_end, orig_end;
    820 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    821 
    822 	for(i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) {
    823 	    SQ_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
    824 
    825 	    /* If this is a CPU-owned buffer, we're at the end of the list */
    826 	    if (sc->sc_rxdesc[i].hdd_ctl & HDD_CTL_OWN) {
    827 #if 0
    828 		u_int32_t reg;
    829 
    830 		reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL);
    831 		printf("%s: rxintr: done at %d (ctl %08x)\n",
    832 				sc->sc_dev.dv_xname, i, reg);
    833 #endif
    834 		break;
    835 	    }
    836 
    837 	    count++;
    838 
    839 	    m = sc->sc_rxmbuf[i];
    840 	    framelen = m->m_ext.ext_size -
    841 			HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hdd_ctl) - 3;
    842 
    843 	    /* Now sync the actual packet data */
    844 	    bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
    845 			    sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);
    846 
    847 	    pktstat = *((u_int8_t*)m->m_data + framelen + 2);
    848 
    849 	    if ((pktstat & RXSTAT_GOOD) == 0) {
    850 		ifp->if_ierrors++;
    851 
    852 		if (pktstat & RXSTAT_OFLOW)
    853 		    printf("%s: receive FIFO overflow\n", sc->sc_dev.dv_xname);
    854 
    855 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
    856 				sc->sc_rxmap[i]->dm_mapsize,
    857 				BUS_DMASYNC_PREREAD);
    858 		SQ_INIT_RXDESC(sc, i);
    859 		continue;
    860 	    }
    861 
    862 	    if (sq_add_rxbuf(sc, i) != 0) {
    863 		ifp->if_ierrors++;
    864 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
    865 				sc->sc_rxmap[i]->dm_mapsize,
    866 				BUS_DMASYNC_PREREAD);
    867 		SQ_INIT_RXDESC(sc, i);
    868 		continue;
    869 	    }
    870 
    871 
    872 	    m->m_data += 2;
    873 	    m->m_pkthdr.rcvif = ifp;
    874 	    m->m_pkthdr.len = m->m_len = framelen;
    875 
    876 	    ifp->if_ipackets++;
    877 
    878 #if 0
    879 	    printf("%s: sq_rxintr: buf %d len %d\n", sc->sc_dev.dv_xname,
    880 						     i, framelen);
    881 #endif
    882 
    883 #if NBPFILTER > 0
    884 	    if (ifp->if_bpf)
    885 		    bpf_mtap(ifp->if_bpf, m);
    886 #endif
    887 	    (*ifp->if_input)(ifp, m);
    888 	}
    889 
    890 
    891 	/* If anything happened, move ring start/end pointers to new spot */
    892 	if (i != sc->sc_nextrx) {
    893 	    new_end = SQ_PREVRX(i);
    894 	    sc->sc_rxdesc[new_end].hdd_ctl |= HDD_CTL_EOCHAIN;
    895 	    SQ_CDRXSYNC(sc, new_end, BUS_DMASYNC_PREREAD |
    896 				     BUS_DMASYNC_PREWRITE);
    897 
    898 	    orig_end = SQ_PREVRX(sc->sc_nextrx);
    899 	    sc->sc_rxdesc[orig_end].hdd_ctl &= ~HDD_CTL_EOCHAIN;
    900 	    SQ_CDRXSYNC(sc, orig_end, BUS_DMASYNC_PREREAD |
    901 				      BUS_DMASYNC_PREWRITE);
    902 
    903 	    sc->sc_nextrx = i;
    904 	}
    905 
    906 	status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
    907 					       HPC_ENETR_CTL);
    908 
    909 	/* If receive channel is stopped, restart it... */
    910 	if ((status & ENETR_CTL_ACTIVE) == 0) {
    911 	    /* Pass the start of the receive ring to the HPC */
    912 	    bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
    913 			      HPC_ENETR_NDBP, SQ_CDRXADDR(sc, sc->sc_nextrx));
    914 
    915 	    /* And turn on the HPC ethernet receive channel */
    916 	    bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL,
    917 							ENETR_CTL_ACTIVE);
    918 	}
    919 
    920 	return count;
    921 }
    922 
    923 static int
    924 sq_txintr(struct sq_softc *sc)
    925 {
    926 	int i;
    927 	u_int32_t status;
    928 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    929 
    930 	status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL);
    931 
    932 	SQ_TRACE(SQ_TXINTR_ENTER, sc->sc_prevtx, status, sc->sc_nfreetx);
    933 
    934 	if ((status & (ENETX_CTL_ACTIVE | TXSTAT_GOOD)) == 0) {
    935 		if (status & TXSTAT_COLL)
    936 		    ifp->if_collisions++;
    937 
    938 		if (status & TXSTAT_UFLOW) {
    939 		    printf("%s: transmit underflow\n", sc->sc_dev.dv_xname);
    940 		    ifp->if_oerrors++;
    941 		}
    942 
    943 		if (status & TXSTAT_16COLL) {
    944 		    printf("%s: max collisions reached\n", sc->sc_dev.dv_xname);
    945 		    ifp->if_oerrors++;
    946 		    ifp->if_collisions += 16;
    947 		}
    948 	}
    949 
    950 	i = sc->sc_prevtx;
    951 	while (sc->sc_nfreetx < SQ_NTXDESC) {
    952 		/*
    953 		 * Check status first so we don't end up with a case of
    954 		 * the buffer not being finished while the DMA channel
    955 		 * has gone idle.
    956 		 */
    957 		status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
    958 							HPC_ENETX_CTL);
    959 
    960 		SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
    961 				BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
    962 
    963 		/* If not yet transmitted, try and start DMA engine again */
    964 		if ((sc->sc_txdesc[i].hdd_ctl & HDD_CTL_XMITDONE) == 0) {
    965 		    if ((status & ENETX_CTL_ACTIVE) == 0) {
    966 			SQ_TRACE(SQ_RESTART_DMA, i, status, sc->sc_nfreetx);
    967 
    968 			bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
    969 					  HPC_ENETX_NDBP, SQ_CDTXADDR(sc, i));
    970 
    971 			/* Kick DMA channel into life */
    972 			bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
    973 					  HPC_ENETX_CTL, ENETX_CTL_ACTIVE);
    974 
    975 			/* Set a watchdog timer in case the chip flakes out. */
    976 			ifp->if_timer = 5;
    977 		    } else {
    978 			SQ_TRACE(SQ_TXINTR_BUSY, i, status, sc->sc_nfreetx);
    979 		    }
    980 		    break;
    981 		}
    982 
    983 		/* Sync the packet data, unload DMA map, free mbuf */
    984 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
    985 				sc->sc_txmap[i]->dm_mapsize,
    986 				BUS_DMASYNC_POSTWRITE);
    987 		bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
    988 		m_freem(sc->sc_txmbuf[i]);
    989 		sc->sc_txmbuf[i] = NULL;
    990 
    991 		ifp->if_opackets++;
    992 		sc->sc_nfreetx++;
    993 
    994 		SQ_TRACE(SQ_DONE_DMA, i, status, sc->sc_nfreetx);
    995 		i = SQ_NEXTTX(i);
    996 	}
    997 
    998 	/* prevtx now points to next xmit packet not yet finished */
    999 	sc->sc_prevtx = i;
   1000 
   1001 	/* If we have buffers free, let upper layers know */
   1002 	if (sc->sc_nfreetx > 0)
   1003 	    ifp->if_flags &= ~IFF_OACTIVE;
   1004 
   1005 	/* If all packets have left the coop, cancel watchdog */
   1006 	if (sc->sc_nfreetx == SQ_NTXDESC)
   1007 	    ifp->if_timer = 0;
   1008 
   1009 	SQ_TRACE(SQ_TXINTR_EXIT, sc->sc_prevtx, status, sc->sc_nfreetx);
   1010     	sq_start(ifp);
   1011 
   1012 	return 1;
   1013 }
   1014 
   1015 
   1016 void
   1017 sq_reset(struct sq_softc *sc)
   1018 {
   1019 	/* Stop HPC dma channels */
   1020 	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL, 0);
   1021 	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL, 0);
   1022 
   1023         bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET, 3);
   1024         delay(20);
   1025         bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET, 0);
   1026 }
   1027 
   1028 /* sq_add_rxbuf: Add a receive buffer to the indicated descriptor.  */
   1029 int
   1030 sq_add_rxbuf(struct sq_softc *sc, int idx)
   1031 {
   1032 	int err;
   1033 	struct mbuf *m;
   1034 
   1035 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   1036 	if (m == NULL)
   1037 		return (ENOBUFS);
   1038 
   1039 	MCLGET(m, M_DONTWAIT);
   1040 	if ((m->m_flags & M_EXT) == 0) {
   1041 		m_freem(m);
   1042 		return (ENOBUFS);
   1043 	}
   1044 
   1045 	if (sc->sc_rxmbuf[idx] != NULL)
   1046 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]);
   1047 
   1048 	sc->sc_rxmbuf[idx] = m;
   1049 
   1050 	if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx],
   1051 				   m->m_ext.ext_buf, m->m_ext.ext_size,
   1052 				   NULL, BUS_DMA_NOWAIT)) != 0) {
   1053 		printf("%s: can't load rx DMA map %d, error = %d\n",
   1054 		    sc->sc_dev.dv_xname, idx, err);
   1055 		panic("sq_add_rxbuf");	/* XXX */
   1056 	}
   1057 
   1058 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx], 0,
   1059 			sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD);
   1060 
   1061 	SQ_INIT_RXDESC(sc, idx);
   1062 
   1063 	return 0;
   1064 }
   1065 
   1066 void
   1067 sq_dump_buffer(u_int32_t addr, u_int32_t len)
   1068 {
   1069 	int i;
   1070 	u_char* physaddr = (char*) MIPS_PHYS_TO_KSEG1((caddr_t)addr);
   1071 
   1072 	if (len == 0)
   1073 		return;
   1074 
   1075 	printf("%p: ", physaddr);
   1076 
   1077 	for(i = 0; i < len; i++) {
   1078 		printf("%02x ", *(physaddr + i) & 0xff);
   1079 		if ((i % 16) ==  15 && i != len - 1)
   1080 		    printf("\n%p: ", physaddr + i);
   1081 	}
   1082 
   1083 	printf("\n");
   1084 }
   1085 
   1086 
   1087 void
   1088 enaddr_aton(const char* str, u_int8_t* eaddr)
   1089 {
   1090 	int i;
   1091 	char c;
   1092 
   1093 	for(i = 0; i < ETHER_ADDR_LEN; i++) {
   1094 		if (*str == ':')
   1095 			str++;
   1096 
   1097 		c = *str++;
   1098 		if (isdigit(c)) {
   1099 			eaddr[i] = (c - '0');
   1100 		} else if (isxdigit(c)) {
   1101 			eaddr[i] = (toupper(c) + 10 - 'A');
   1102 		}
   1103 
   1104 		c = *str++;
   1105 		if (isdigit(c)) {
   1106 			eaddr[i] = (eaddr[i] << 4) | (c - '0');
   1107 		} else if (isxdigit(c)) {
   1108 			eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A');
   1109 		}
   1110 	}
   1111 }
   1112