if_sq.c revision 1.22 1 /* $NetBSD: if_sq.c,v 1.22 2004/12/29 02:11:31 rumble Exp $ */
2
3 /*
4 * Copyright (c) 2001 Rafal K. Boni
5 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * Portions of this code are derived from software contributed to The
9 * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: if_sq.c,v 1.22 2004/12/29 02:11:31 rumble Exp $");
37
38 #include "bpfilter.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/device.h>
43 #include <sys/callout.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/ioctl.h>
49 #include <sys/errno.h>
50 #include <sys/syslog.h>
51
52 #include <uvm/uvm_extern.h>
53
54 #include <machine/endian.h>
55
56 #include <net/if.h>
57 #include <net/if_dl.h>
58 #include <net/if_media.h>
59 #include <net/if_ether.h>
60
61 #if NBPFILTER > 0
62 #include <net/bpf.h>
63 #endif
64
65 #include <machine/bus.h>
66 #include <machine/intr.h>
67
68 #include <dev/ic/seeq8003reg.h>
69
70 #include <sgimips/hpc/sqvar.h>
71 #include <sgimips/hpc/hpcvar.h>
72 #include <sgimips/hpc/hpcreg.h>
73
74 #include <dev/arcbios/arcbios.h>
75 #include <dev/arcbios/arcbiosvar.h>
76
77 #define static
78
79 /*
80 * Short TODO list:
81 * (1) Do counters for bad-RX packets.
82 * (2) Allow multi-segment transmits, instead of copying to a single,
83 * contiguous mbuf.
84 * (3) Verify sq_stop() turns off enough stuff; I was still getting
85 * seeq interrupts after sq_stop().
86 * (4) Implement EDLC modes: especially packet auto-pad and simplex
87 * mode.
88 * (5) Should the driver filter out its own transmissions in non-EDLC
89 * mode?
90 * (6) Multicast support -- multicast filter, address management, ...
91 * (7) Deal with RB0 (recv buffer overflow) on reception. Will need
92 * to figure out if RB0 is read-only as stated in one spot in the
93 * HPC spec or read-write (ie, is the 'write a one to clear it')
94 * the correct thing?
95 */
96
97 #if defined(SQ_DEBUG)
98 int sq_debug = 0;
99 #define SQ_DPRINTF(x) if (sq_debug) printf x
100 #else
101 #define SQ_DPRINTF(x)
102 #endif
103
104 static int sq_match(struct device *, struct cfdata *, void *);
105 static void sq_attach(struct device *, struct device *, void *);
106 static int sq_init(struct ifnet *);
107 static void sq_start(struct ifnet *);
108 static void sq_stop(struct ifnet *, int);
109 static void sq_watchdog(struct ifnet *);
110 static int sq_ioctl(struct ifnet *, u_long, caddr_t);
111
112 static void sq_set_filter(struct sq_softc *);
113 static int sq_intr(void *);
114 static int sq_rxintr(struct sq_softc *);
115 static int sq_txintr(struct sq_softc *);
116 static void sq_reset(struct sq_softc *);
117 static int sq_add_rxbuf(struct sq_softc *, int);
118 static void sq_dump_buffer(u_int32_t addr, u_int32_t len);
119 static void sq_trace_dump(struct sq_softc *);
120
121 static void enaddr_aton(const char*, u_int8_t*);
122
123 CFATTACH_DECL(sq, sizeof(struct sq_softc),
124 sq_match, sq_attach, NULL, NULL);
125
126 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
127
128 static int
129 sq_match(struct device *parent, struct cfdata *cf, void *aux)
130 {
131 struct hpc_attach_args *ha = aux;
132
133 if (strcmp(ha->ha_name, cf->cf_name) == 0)
134 return (1);
135
136 return (0);
137 }
138
139 static void
140 sq_attach(struct device *parent, struct device *self, void *aux)
141 {
142 int i, err;
143 char* macaddr;
144 struct sq_softc *sc = (void *)self;
145 struct hpc_attach_args *haa = aux;
146 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
147
148 sc->sc_hpct = haa->ha_st;
149 sc->hpc_regs = haa->hpc_regs; /* HPC register definitions */
150
151 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
152 haa->ha_dmaoff,
153 sc->hpc_regs->enet_regs_size,
154 &sc->sc_hpch)) != 0) {
155 printf(": unable to map HPC DMA registers, error = %d\n", err);
156 goto fail_0;
157 }
158
159 sc->sc_regt = haa->ha_st;
160 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
161 haa->ha_devoff,
162 sc->hpc_regs->enet_devregs_size,
163 &sc->sc_regh)) != 0) {
164 printf(": unable to map Seeq registers, error = %d\n", err);
165 goto fail_0;
166 }
167
168 sc->sc_dmat = haa->ha_dmat;
169
170 if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control),
171 PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg,
172 1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) {
173 printf(": unable to allocate control data, error = %d\n", err);
174 goto fail_0;
175 }
176
177 if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg,
178 sizeof(struct sq_control),
179 (caddr_t *)&sc->sc_control,
180 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
181 printf(": unable to map control data, error = %d\n", err);
182 goto fail_1;
183 }
184
185 if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control),
186 1, sizeof(struct sq_control), PAGE_SIZE,
187 BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) {
188 printf(": unable to create DMA map for control data, error "
189 "= %d\n", err);
190 goto fail_2;
191 }
192
193 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control,
194 sizeof(struct sq_control),
195 NULL, BUS_DMA_NOWAIT)) != 0) {
196 printf(": unable to load DMA map for control data, error "
197 "= %d\n", err);
198 goto fail_3;
199 }
200
201 memset(sc->sc_control, 0, sizeof(struct sq_control));
202
203 /* Create transmit buffer DMA maps */
204 for (i = 0; i < SQ_NTXDESC; i++) {
205 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
206 0, BUS_DMA_NOWAIT,
207 &sc->sc_txmap[i])) != 0) {
208 printf(": unable to create tx DMA map %d, error = %d\n",
209 i, err);
210 goto fail_4;
211 }
212 }
213
214 /* Create receive buffer DMA maps */
215 for (i = 0; i < SQ_NRXDESC; i++) {
216 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
217 0, BUS_DMA_NOWAIT,
218 &sc->sc_rxmap[i])) != 0) {
219 printf(": unable to create rx DMA map %d, error = %d\n",
220 i, err);
221 goto fail_5;
222 }
223 }
224
225 /* Pre-allocate the receive buffers. */
226 for (i = 0; i < SQ_NRXDESC; i++) {
227 if ((err = sq_add_rxbuf(sc, i)) != 0) {
228 printf(": unable to allocate or map rx buffer %d\n,"
229 " error = %d\n", i, err);
230 goto fail_6;
231 }
232 }
233
234 if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) {
235 printf(": unable to get MAC address!\n");
236 goto fail_6;
237 }
238
239 evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL,
240 self->dv_xname, "intr");
241
242 if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) {
243 printf(": unable to establish interrupt!\n");
244 goto fail_6;
245 }
246
247 /* Reset the chip to a known state. */
248 sq_reset(sc);
249
250 /*
251 * Determine if we're an 8003 or 80c03 by setting the first
252 * MAC address register to non-zero, and then reading it back.
253 * If it's zero, we have an 80c03, because we will have read
254 * the TxCollLSB register.
255 */
256 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0xa5);
257 if (bus_space_read_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0) == 0)
258 sc->sc_type = SQ_TYPE_80C03;
259 else
260 sc->sc_type = SQ_TYPE_8003;
261 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0x00);
262
263 printf(": SGI Seeq %s\n",
264 sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003");
265
266 enaddr_aton(macaddr, sc->sc_enaddr);
267
268 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
269 ether_sprintf(sc->sc_enaddr));
270
271 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
272 ifp->if_softc = sc;
273 ifp->if_mtu = ETHERMTU;
274 ifp->if_init = sq_init;
275 ifp->if_stop = sq_stop;
276 ifp->if_start = sq_start;
277 ifp->if_ioctl = sq_ioctl;
278 ifp->if_watchdog = sq_watchdog;
279 ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST;
280 IFQ_SET_READY(&ifp->if_snd);
281
282 if_attach(ifp);
283 ether_ifattach(ifp, sc->sc_enaddr);
284
285 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
286 /* Done! */
287 return;
288
289 /*
290 * Free any resources we've allocated during the failed attach
291 * attempt. Do this in reverse order and fall through.
292 */
293 fail_6:
294 for (i = 0; i < SQ_NRXDESC; i++) {
295 if (sc->sc_rxmbuf[i] != NULL) {
296 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]);
297 m_freem(sc->sc_rxmbuf[i]);
298 }
299 }
300 fail_5:
301 for (i = 0; i < SQ_NRXDESC; i++) {
302 if (sc->sc_rxmap[i] != NULL)
303 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]);
304 }
305 fail_4:
306 for (i = 0; i < SQ_NTXDESC; i++) {
307 if (sc->sc_txmap[i] != NULL)
308 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]);
309 }
310 bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap);
311 fail_3:
312 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap);
313 fail_2:
314 bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control,
315 sizeof(struct sq_control));
316 fail_1:
317 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg);
318 fail_0:
319 return;
320 }
321
322 /* Set up data to get the interface up and running. */
323 int
324 sq_init(struct ifnet *ifp)
325 {
326 int i;
327 u_int32_t reg;
328 struct sq_softc *sc = ifp->if_softc;
329
330 /* Cancel any in-progress I/O */
331 sq_stop(ifp, 0);
332
333 sc->sc_nextrx = 0;
334
335 sc->sc_nfreetx = SQ_NTXDESC;
336 sc->sc_nexttx = sc->sc_prevtx = 0;
337
338 SQ_TRACE(SQ_RESET, sc, 0, 0);
339
340 /* Set into 8003 mode, bank 0 to program ethernet address */
341 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, TXCMD_BANK0);
342
343 /* Now write the address */
344 for (i = 0; i < ETHER_ADDR_LEN; i++)
345 bus_space_write_1(sc->sc_regt, sc->sc_regh, i,
346 sc->sc_enaddr[i]);
347
348 sc->sc_rxcmd = RXCMD_IE_CRC |
349 RXCMD_IE_DRIB |
350 RXCMD_IE_SHORT |
351 RXCMD_IE_END |
352 RXCMD_IE_GOOD;
353
354 /*
355 * Set the receive filter -- this will add some bits to the
356 * prototype RXCMD register. Do this before setting the
357 * transmit config register, since we might need to switch
358 * banks.
359 */
360 sq_set_filter(sc);
361
362 /* Set up Seeq transmit command register */
363 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD,
364 TXCMD_IE_UFLOW |
365 TXCMD_IE_COLL |
366 TXCMD_IE_16COLL |
367 TXCMD_IE_GOOD);
368
369 /* Now write the receive command register. */
370 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD, sc->sc_rxcmd);
371
372 /* Set up HPC ethernet DMA config */
373 if (sc->hpc_regs->revision == 3) {
374 reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
375 sc->hpc_regs->enetr_dmacfg);
376 bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
377 sc->hpc_regs->enetr_dmacfg,
378 reg | ENETR_DMACFG_FIX_RXDC |
379 ENETR_DMACFG_FIX_INTR |
380 ENETR_DMACFG_FIX_EOP);
381 }
382
383 /* Pass the start of the receive ring to the HPC */
384 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetr_ndbp,
385 SQ_CDRXADDR(sc, 0));
386
387 /* And turn on the HPC ethernet receive channel */
388 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetr_ctl,
389 sc->hpc_regs->enetr_ctl_active);
390
391 ifp->if_flags |= IFF_RUNNING;
392 ifp->if_flags &= ~IFF_OACTIVE;
393
394 return 0;
395 }
396
397 static void
398 sq_set_filter(struct sq_softc *sc)
399 {
400 struct ethercom *ec = &sc->sc_ethercom;
401 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
402 struct ether_multi *enm;
403 struct ether_multistep step;
404
405 /*
406 * Check for promiscuous mode. Also implies
407 * all-multicast.
408 */
409 if (ifp->if_flags & IFF_PROMISC) {
410 sc->sc_rxcmd |= RXCMD_REC_ALL;
411 ifp->if_flags |= IFF_ALLMULTI;
412 return;
413 }
414
415 /*
416 * The 8003 has no hash table. If we have any multicast
417 * addresses on the list, enable reception of all multicast
418 * frames.
419 *
420 * XXX The 80c03 has a hash table. We should use it.
421 */
422
423 ETHER_FIRST_MULTI(step, ec, enm);
424
425 if (enm == NULL) {
426 sc->sc_rxcmd &= ~RXCMD_REC_MASK;
427 sc->sc_rxcmd |= RXCMD_REC_BROAD;
428
429 ifp->if_flags &= ~IFF_ALLMULTI;
430 return;
431 }
432
433 sc->sc_rxcmd |= RXCMD_REC_MULTI;
434 ifp->if_flags |= IFF_ALLMULTI;
435 }
436
437 int
438 sq_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
439 {
440 int s, error = 0;
441
442 SQ_TRACE(SQ_IOCTL, (struct sq_softc *)ifp->if_softc, 0, 0);
443
444 s = splnet();
445
446 error = ether_ioctl(ifp, cmd, data);
447 if (error == ENETRESET) {
448 /*
449 * Multicast list has changed; set the hardware filter
450 * accordingly.
451 */
452 if (ifp->if_flags & IFF_RUNNING)
453 error = sq_init(ifp);
454 else
455 error = 0;
456 }
457
458 splx(s);
459 return (error);
460 }
461
462 void
463 sq_start(struct ifnet *ifp)
464 {
465 struct sq_softc *sc = ifp->if_softc;
466 u_int32_t status;
467 struct mbuf *m0, *m;
468 bus_dmamap_t dmamap;
469 int err, totlen, nexttx, firsttx, lasttx = -1, ofree, seg;
470
471 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
472 return;
473
474 /*
475 * Remember the previous number of free descriptors and
476 * the first descriptor we'll use.
477 */
478 ofree = sc->sc_nfreetx;
479 firsttx = sc->sc_nexttx;
480
481 /*
482 * Loop through the send queue, setting up transmit descriptors
483 * until we drain the queue, or use up all available transmit
484 * descriptors.
485 */
486 while (sc->sc_nfreetx != 0) {
487 /*
488 * Grab a packet off the queue.
489 */
490 IFQ_POLL(&ifp->if_snd, m0);
491 if (m0 == NULL)
492 break;
493 m = NULL;
494
495 dmamap = sc->sc_txmap[sc->sc_nexttx];
496
497 /*
498 * Load the DMA map. If this fails, the packet either
499 * didn't fit in the alloted number of segments, or we were
500 * short on resources. In this case, we'll copy and try
501 * again.
502 * Also copy it if we need to pad, so that we are sure there
503 * is room for the pad buffer.
504 * XXX the right way of doing this is to use a static buffer
505 * for padding and adding it to the transmit descriptor (see
506 * sys/dev/pci/if_tl.c for example). We can't do this here yet
507 * because we can't send packets with more than one fragment.
508 */
509 if (m0->m_pkthdr.len < ETHER_PAD_LEN ||
510 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
511 BUS_DMA_NOWAIT) != 0) {
512 MGETHDR(m, M_DONTWAIT, MT_DATA);
513 if (m == NULL) {
514 printf("%s: unable to allocate Tx mbuf\n",
515 sc->sc_dev.dv_xname);
516 break;
517 }
518 if (m0->m_pkthdr.len > MHLEN) {
519 MCLGET(m, M_DONTWAIT);
520 if ((m->m_flags & M_EXT) == 0) {
521 printf("%s: unable to allocate Tx "
522 "cluster\n", sc->sc_dev.dv_xname);
523 m_freem(m);
524 break;
525 }
526 }
527
528 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
529 if (m0->m_pkthdr.len < ETHER_PAD_LEN) {
530 memset(mtod(m, char *) + m0->m_pkthdr.len, 0,
531 ETHER_PAD_LEN - m0->m_pkthdr.len);
532 m->m_pkthdr.len = m->m_len = ETHER_PAD_LEN;
533 } else
534 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
535
536 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
537 m, BUS_DMA_NOWAIT)) != 0) {
538 printf("%s: unable to load Tx buffer, "
539 "error = %d\n", sc->sc_dev.dv_xname, err);
540 break;
541 }
542 }
543
544 /*
545 * Ensure we have enough descriptors free to describe
546 * the packet.
547 */
548 if (dmamap->dm_nsegs > sc->sc_nfreetx) {
549 /*
550 * Not enough free descriptors to transmit this
551 * packet. We haven't committed to anything yet,
552 * so just unload the DMA map, put the packet
553 * back on the queue, and punt. Notify the upper
554 * layer that there are no more slots left.
555 *
556 * XXX We could allocate an mbuf and copy, but
557 * XXX it is worth it?
558 */
559 ifp->if_flags |= IFF_OACTIVE;
560 bus_dmamap_unload(sc->sc_dmat, dmamap);
561 if (m != NULL)
562 m_freem(m);
563 break;
564 }
565
566 IFQ_DEQUEUE(&ifp->if_snd, m0);
567 #if NBPFILTER > 0
568 /*
569 * Pass the packet to any BPF listeners.
570 */
571 if (ifp->if_bpf)
572 bpf_mtap(ifp->if_bpf, m0);
573 #endif /* NBPFILTER > 0 */
574 if (m != NULL) {
575 m_freem(m0);
576 m0 = m;
577 }
578
579 /*
580 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
581 */
582
583 SQ_TRACE(SQ_ENQUEUE, sc, sc->sc_nexttx, 0);
584
585 /* Sync the DMA map. */
586 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
587 BUS_DMASYNC_PREWRITE);
588
589 /*
590 * Initialize the transmit descriptors.
591 */
592 for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0;
593 seg < dmamap->dm_nsegs;
594 seg++, nexttx = SQ_NEXTTX(nexttx)) {
595 if (sc->hpc_regs->revision == 3) {
596 sc->sc_txdesc[nexttx].hpc3_hdd_bufptr =
597 dmamap->dm_segs[seg].ds_addr;
598 sc->sc_txdesc[nexttx].hpc3_hdd_ctl =
599 dmamap->dm_segs[seg].ds_len;
600 } else {
601 sc->sc_txdesc[nexttx].hpc1_hdd_bufptr =
602 dmamap->dm_segs[seg].ds_addr;
603 sc->sc_txdesc[nexttx].hpc1_hdd_ctl =
604 dmamap->dm_segs[seg].ds_len;
605 }
606 sc->sc_txdesc[nexttx].hdd_descptr=
607 SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx));
608 lasttx = nexttx;
609 totlen += dmamap->dm_segs[seg].ds_len;
610 }
611
612 /* Last descriptor gets end-of-packet */
613 KASSERT(lasttx != -1);
614 if (sc->hpc_regs->revision == 3)
615 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |= HDD_CTL_EOPACKET;
616 else
617 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |=
618 HPC1_HDD_CTL_EOPACKET;
619
620 SQ_DPRINTF(("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname,
621 sc->sc_nexttx, lasttx,
622 totlen));
623
624 if (ifp->if_flags & IFF_DEBUG) {
625 printf(" transmit chain:\n");
626 for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) {
627 printf(" descriptor %d:\n", seg);
628 printf(" hdd_bufptr: 0x%08x\n",
629 (sc->hpc_regs->revision == 3) ?
630 sc->sc_txdesc[seg].hpc3_hdd_bufptr :
631 sc->sc_txdesc[seg].hpc1_hdd_bufptr);
632 printf(" hdd_ctl: 0x%08x\n",
633 (sc->hpc_regs->revision == 3) ?
634 sc->sc_txdesc[seg].hpc3_hdd_ctl:
635 sc->sc_txdesc[seg].hpc1_hdd_ctl);
636 printf(" hdd_descptr: 0x%08x\n",
637 sc->sc_txdesc[seg].hdd_descptr);
638
639 if (seg == lasttx)
640 break;
641 }
642 }
643
644 /* Sync the descriptors we're using. */
645 SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs,
646 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
647
648 /* Store a pointer to the packet so we can free it later */
649 sc->sc_txmbuf[sc->sc_nexttx] = m0;
650
651 /* Advance the tx pointer. */
652 sc->sc_nfreetx -= dmamap->dm_nsegs;
653 sc->sc_nexttx = nexttx;
654
655 }
656
657 /* All transmit descriptors used up, let upper layers know */
658 if (sc->sc_nfreetx == 0)
659 ifp->if_flags |= IFF_OACTIVE;
660
661 if (sc->sc_nfreetx != ofree) {
662 SQ_DPRINTF(("%s: %d packets enqueued, first %d, INTR on %d\n",
663 sc->sc_dev.dv_xname, lasttx - firsttx + 1,
664 firsttx, lasttx));
665
666 /*
667 * Cause a transmit interrupt to happen on the
668 * last packet we enqueued, mark it as the last
669 * descriptor.
670 *
671 * HDD_CTL_EOPACKET && HDD_CTL_INTR cause an
672 * interrupt.
673 */
674 KASSERT(lasttx != -1);
675 if (sc->hpc_regs->revision == 3) {
676 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |= HDD_CTL_INTR |
677 HDD_CTL_EOCHAIN;
678 } else {
679 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |= HPC1_HDD_CTL_INTR;
680 sc->sc_txdesc[lasttx].hpc1_hdd_bufptr |=
681 HPC1_HDD_CTL_EOCHAIN;
682 }
683
684 SQ_CDTXSYNC(sc, lasttx, 1,
685 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
686
687 /*
688 * There is a potential race condition here if the HPC
689 * DMA channel is active and we try and either update
690 * the 'next descriptor' pointer in the HPC PIO space
691 * or the 'next descriptor' pointer in a previous desc-
692 * riptor.
693 *
694 * To avoid this, if the channel is active, we rely on
695 * the transmit interrupt routine noticing that there
696 * are more packets to send and restarting the HPC DMA
697 * engine, rather than mucking with the DMA state here.
698 */
699 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
700 sc->hpc_regs->enetx_ctl);
701
702 if ((status & sc->hpc_regs->enetx_ctl_active) != 0) {
703 SQ_TRACE(SQ_ADD_TO_DMA, sc, firsttx, status);
704
705 /* NB: hpc3_hdd_ctl is also hpc1_hdd_bufptr */
706 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc3_hdd_ctl &=
707 ~HDD_CTL_EOCHAIN;
708
709 SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx), 1,
710 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
711 } else {
712 SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
713
714 bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
715 sc->hpc_regs->enetx_ndbp, SQ_CDTXADDR(sc, firsttx));
716
717 if (sc->hpc_regs->revision != 3) {
718 bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
719 HPC1_ENETX_CFXBP, SQ_CDTXADDR(sc, firsttx));
720 bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
721 HPC1_ENETX_CBP, SQ_CDTXADDR(sc, firsttx));
722 }
723
724 /* Kick DMA channel into life */
725 bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
726 sc->hpc_regs->enetx_ctl,
727 sc->hpc_regs->enetx_ctl_active);
728 }
729
730 /* Set a watchdog timer in case the chip flakes out. */
731 ifp->if_timer = 5;
732 }
733 }
734
735 void
736 sq_stop(struct ifnet *ifp, int disable)
737 {
738 int i;
739 struct sq_softc *sc = ifp->if_softc;
740
741 for (i =0; i < SQ_NTXDESC; i++) {
742 if (sc->sc_txmbuf[i] != NULL) {
743 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
744 m_freem(sc->sc_txmbuf[i]);
745 sc->sc_txmbuf[i] = NULL;
746 }
747 }
748
749 /* Clear Seeq transmit/receive command registers */
750 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, 0);
751 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD, 0);
752
753 sq_reset(sc);
754
755 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
756 ifp->if_timer = 0;
757 }
758
759 /* Device timeout/watchdog routine. */
760 void
761 sq_watchdog(struct ifnet *ifp)
762 {
763 u_int32_t status;
764 struct sq_softc *sc = ifp->if_softc;
765
766 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
767 sc->hpc_regs->enetx_ctl);
768 log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, "
769 "status %08x)\n", sc->sc_dev.dv_xname, sc->sc_prevtx,
770 sc->sc_nexttx, sc->sc_nfreetx, status);
771
772 sq_trace_dump(sc);
773
774 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
775 sc->sq_trace_idx = 0;
776
777 ++ifp->if_oerrors;
778
779 sq_init(ifp);
780 }
781
782 static void
783 sq_trace_dump(struct sq_softc *sc)
784 {
785 int i;
786 char *act;
787
788 for (i = 0; i < sc->sq_trace_idx; i++) {
789 switch (sc->sq_trace[i].action) {
790 case SQ_RESET: act = "SQ_RESET"; break;
791 case SQ_ADD_TO_DMA: act = "SQ_ADD_TO_DMA"; break;
792 case SQ_START_DMA: act = "SQ_START_DMA"; break;
793 case SQ_DONE_DMA: act = "SQ_DONE_DMA"; break;
794 case SQ_RESTART_DMA: act = "SQ_RESTART_DMA"; break;
795 case SQ_TXINTR_ENTER: act = "SQ_TXINTR_ENTER"; break;
796 case SQ_TXINTR_EXIT: act = "SQ_TXINTR_EXIT"; break;
797 case SQ_TXINTR_BUSY: act = "SQ_TXINTR_BUSY"; break;
798 case SQ_IOCTL: act = "SQ_IOCTL"; break;
799 case SQ_ENQUEUE: act = "SQ_ENQUEUE"; break;
800 default: act = "UNKNOWN";
801 }
802
803 printf("%s: [%03d] action %-16s buf %03d free %03d "
804 "status %08x line %d\n", sc->sc_dev.dv_xname, i, act,
805 sc->sq_trace[i].bufno, sc->sq_trace[i].freebuf,
806 sc->sq_trace[i].status, sc->sq_trace[i].line);
807 }
808 }
809
810 static int
811 sq_intr(void * arg)
812 {
813 struct sq_softc *sc = arg;
814 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
815 int handled = 0;
816 u_int32_t stat;
817
818 stat = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
819 sc->hpc_regs->enetr_reset);
820
821 if ((stat & 2) == 0) {
822 printf("%s: Unexpected interrupt!\n", sc->sc_dev.dv_xname);
823 return 0;
824 }
825
826 bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
827 sc->hpc_regs->enetr_reset, (stat | 2));
828
829 /*
830 * If the interface isn't running, the interrupt couldn't
831 * possibly have come from us.
832 */
833 if ((ifp->if_flags & IFF_RUNNING) == 0)
834 return 0;
835
836 sc->sq_intrcnt.ev_count++;
837
838 /* Always check for received packets */
839 if (sq_rxintr(sc) != 0)
840 handled++;
841
842 /* Only handle transmit interrupts if we actually sent something */
843 if (sc->sc_nfreetx < SQ_NTXDESC) {
844 sq_txintr(sc);
845 handled++;
846 }
847
848 #if NRND > 0
849 if (handled)
850 rnd_add_uint32(&sc->rnd_source, stat);
851 #endif
852 return (handled);
853 }
854
855 static int
856 sq_rxintr(struct sq_softc *sc)
857 {
858 int count = 0;
859 struct mbuf* m;
860 int i, framelen;
861 u_int8_t pktstat;
862 u_int32_t status;
863 u_int32_t ctl_reg;
864 int new_end, orig_end;
865 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
866
867 for(i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) {
868 SQ_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
869
870 /* If this is a CPU-owned buffer, we're at the end of the list */
871 if (sc->hpc_regs->revision == 3)
872 ctl_reg = sc->sc_rxdesc[i].hpc3_hdd_ctl & HDD_CTL_OWN;
873 else
874 ctl_reg = sc->sc_rxdesc[i].hpc1_hdd_ctl &
875 HPC1_HDD_CTL_OWN;
876
877 if (ctl_reg) {
878 #if defined(SQ_DEBUG)
879 u_int32_t reg;
880
881 reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
882 sc->hpc_regs->enetr_ctl);
883 SQ_DPRINTF(("%s: rxintr: done at %d (ctl %08x)\n",
884 sc->sc_dev.dv_xname, i, reg));
885 #endif
886 break;
887 }
888
889 count++;
890
891 m = sc->sc_rxmbuf[i];
892 framelen = m->m_ext.ext_size - 3;
893 if (sc->hpc_regs->revision == 3)
894 framelen -=
895 HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc3_hdd_ctl);
896 else
897 framelen -=
898 HPC1_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc1_hdd_ctl);
899
900 /* Now sync the actual packet data */
901 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
902 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);
903
904 pktstat = *((u_int8_t*)m->m_data + framelen + 2);
905
906 if ((pktstat & RXSTAT_GOOD) == 0) {
907 ifp->if_ierrors++;
908
909 if (pktstat & RXSTAT_OFLOW)
910 printf("%s: receive FIFO overflow\n",
911 sc->sc_dev.dv_xname);
912
913 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
914 sc->sc_rxmap[i]->dm_mapsize,
915 BUS_DMASYNC_PREREAD);
916 SQ_INIT_RXDESC(sc, i);
917 continue;
918 }
919
920 if (sq_add_rxbuf(sc, i) != 0) {
921 ifp->if_ierrors++;
922 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
923 sc->sc_rxmap[i]->dm_mapsize,
924 BUS_DMASYNC_PREREAD);
925 SQ_INIT_RXDESC(sc, i);
926 continue;
927 }
928
929
930 m->m_data += 2;
931 m->m_pkthdr.rcvif = ifp;
932 m->m_pkthdr.len = m->m_len = framelen;
933
934 ifp->if_ipackets++;
935
936 SQ_DPRINTF(("%s: sq_rxintr: buf %d len %d\n",
937 sc->sc_dev.dv_xname, i, framelen));
938
939 #if NBPFILTER > 0
940 if (ifp->if_bpf)
941 bpf_mtap(ifp->if_bpf, m);
942 #endif
943 (*ifp->if_input)(ifp, m);
944 }
945
946
947 /* If anything happened, move ring start/end pointers to new spot */
948 if (i != sc->sc_nextrx) {
949 /* NB: hpc3_hdd_ctl is also hpc1_hdd_bufptr */
950
951 new_end = SQ_PREVRX(i);
952 sc->sc_rxdesc[new_end].hpc3_hdd_ctl |= HDD_CTL_EOCHAIN;
953 SQ_CDRXSYNC(sc, new_end, BUS_DMASYNC_PREREAD |
954 BUS_DMASYNC_PREWRITE);
955
956 orig_end = SQ_PREVRX(sc->sc_nextrx);
957 sc->sc_rxdesc[orig_end].hpc3_hdd_ctl &= ~HDD_CTL_EOCHAIN;
958 SQ_CDRXSYNC(sc, orig_end, BUS_DMASYNC_PREREAD |
959 BUS_DMASYNC_PREWRITE);
960
961 sc->sc_nextrx = i;
962 }
963
964 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
965 sc->hpc_regs->enetr_ctl);
966
967 /* If receive channel is stopped, restart it... */
968 if ((status & sc->hpc_regs->enetr_ctl_active) == 0) {
969 /* Pass the start of the receive ring to the HPC */
970 bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
971 sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc, sc->sc_nextrx));
972
973 /* And turn on the HPC ethernet receive channel */
974 bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
975 sc->hpc_regs->enetr_ctl, sc->hpc_regs->enetr_ctl_active);
976 }
977
978 return count;
979 }
980
981 static int
982 sq_txintr(struct sq_softc *sc)
983 {
984 int i;
985 int shift = 0;
986 u_int32_t status;
987 u_int32_t hpc1_ready = 0;
988 u_int32_t hpc3_not_ready = 1;
989 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
990
991 if (sc->hpc_regs->revision != 3)
992 shift = 16;
993
994 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
995 sc->hpc_regs->enetx_ctl) >> shift;
996
997 SQ_TRACE(SQ_TXINTR_ENTER, sc, sc->sc_prevtx, status);
998
999 if ((status & ( (sc->hpc_regs->enetx_ctl_active >> shift) | TXSTAT_GOOD)) == 0) {
1000 /* XXX */ printf("txstat: %x\n", status);
1001 if (status & TXSTAT_COLL)
1002 ifp->if_collisions++;
1003
1004 if (status & TXSTAT_UFLOW) {
1005 printf("%s: transmit underflow\n", sc->sc_dev.dv_xname);
1006 ifp->if_oerrors++;
1007 }
1008
1009 if (status & TXSTAT_16COLL) {
1010 printf("%s: max collisions reached\n", sc->sc_dev.dv_xname);
1011 ifp->if_oerrors++;
1012 ifp->if_collisions += 16;
1013 }
1014 }
1015
1016 i = sc->sc_prevtx;
1017 while (sc->sc_nfreetx < SQ_NTXDESC) {
1018 /*
1019 * Check status first so we don't end up with a case of
1020 * the buffer not being finished while the DMA channel
1021 * has gone idle.
1022 */
1023 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
1024 sc->hpc_regs->enetx_ctl) >> shift;
1025
1026 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
1027 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1028
1029 /*
1030 * If not yet transmitted, try and start DMA engine again.
1031 * HPC3 tags transmitted descriptors with XMITDONE whereas
1032 * HPC1 will not halt before sending through EOCHAIN.
1033 */
1034 if (sc->hpc_regs->revision == 3) {
1035 hpc3_not_ready =
1036 sc->sc_txdesc[i].hpc3_hdd_ctl & HDD_CTL_XMITDONE;
1037 } else {
1038 if (hpc1_ready)
1039 hpc1_ready++;
1040 else {
1041 if (sc->sc_txdesc[i].hpc1_hdd_ctl &
1042 HPC1_HDD_CTL_EOPACKET)
1043 hpc1_ready = 1;
1044 }
1045 }
1046
1047 if (hpc3_not_ready == 0 || hpc1_ready == 2) {
1048 if ((status & (sc->hpc_regs->enetx_ctl_active >> shift)) == 0) { // XXX
1049 SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
1050
1051 bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
1052 sc->hpc_regs->enetx_ndbp, SQ_CDTXADDR(sc, i));
1053
1054 if (sc->hpc_regs->revision != 3) {
1055 bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
1056 HPC1_ENETX_CFXBP, SQ_CDTXADDR(sc, i));
1057 bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
1058 HPC1_ENETX_CBP, SQ_CDTXADDR(sc, i));
1059 }
1060
1061 /* Kick DMA channel into life */
1062 bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
1063 sc->hpc_regs->enetx_ctl,
1064 sc->hpc_regs->enetx_ctl_active);
1065
1066 /*
1067 * Set a watchdog timer in case the chip
1068 * flakes out.
1069 */
1070 ifp->if_timer = 5;
1071 } else {
1072 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
1073 }
1074 break;
1075 }
1076
1077 /* Sync the packet data, unload DMA map, free mbuf */
1078 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
1079 sc->sc_txmap[i]->dm_mapsize,
1080 BUS_DMASYNC_POSTWRITE);
1081 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
1082 m_freem(sc->sc_txmbuf[i]);
1083 sc->sc_txmbuf[i] = NULL;
1084
1085 ifp->if_opackets++;
1086 sc->sc_nfreetx++;
1087
1088 SQ_TRACE(SQ_DONE_DMA, sc, i, status);
1089 i = SQ_NEXTTX(i);
1090 }
1091
1092 /* prevtx now points to next xmit packet not yet finished */
1093 sc->sc_prevtx = i;
1094
1095 /* If we have buffers free, let upper layers know */
1096 if (sc->sc_nfreetx > 0)
1097 ifp->if_flags &= ~IFF_OACTIVE;
1098
1099 /* If all packets have left the coop, cancel watchdog */
1100 if (sc->sc_nfreetx == SQ_NTXDESC)
1101 ifp->if_timer = 0;
1102
1103 SQ_TRACE(SQ_TXINTR_EXIT, sc, sc->sc_prevtx, status);
1104 sq_start(ifp);
1105
1106 return 1;
1107 }
1108
1109
1110 void
1111 sq_reset(struct sq_softc *sc)
1112 {
1113 /* Stop HPC dma channels */
1114 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetr_ctl, 0);
1115 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetx_ctl, 0);
1116
1117 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetr_reset, 3);
1118 delay(20);
1119 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetr_reset, 0);
1120 }
1121
1122 /* sq_add_rxbuf: Add a receive buffer to the indicated descriptor. */
1123 int
1124 sq_add_rxbuf(struct sq_softc *sc, int idx)
1125 {
1126 int err;
1127 struct mbuf *m;
1128
1129 MGETHDR(m, M_DONTWAIT, MT_DATA);
1130 if (m == NULL)
1131 return (ENOBUFS);
1132
1133 MCLGET(m, M_DONTWAIT);
1134 if ((m->m_flags & M_EXT) == 0) {
1135 m_freem(m);
1136 return (ENOBUFS);
1137 }
1138
1139 if (sc->sc_rxmbuf[idx] != NULL)
1140 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]);
1141
1142 sc->sc_rxmbuf[idx] = m;
1143
1144 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx],
1145 m->m_ext.ext_buf, m->m_ext.ext_size,
1146 NULL, BUS_DMA_NOWAIT)) != 0) {
1147 printf("%s: can't load rx DMA map %d, error = %d\n",
1148 sc->sc_dev.dv_xname, idx, err);
1149 panic("sq_add_rxbuf"); /* XXX */
1150 }
1151
1152 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx], 0,
1153 sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD);
1154
1155 SQ_INIT_RXDESC(sc, idx);
1156
1157 return 0;
1158 }
1159
1160 void
1161 sq_dump_buffer(u_int32_t addr, u_int32_t len)
1162 {
1163 u_int i;
1164 u_char* physaddr = (char*) MIPS_PHYS_TO_KSEG1((caddr_t)addr);
1165
1166 if (len == 0)
1167 return;
1168
1169 printf("%p: ", physaddr);
1170
1171 for(i = 0; i < len; i++) {
1172 printf("%02x ", *(physaddr + i) & 0xff);
1173 if ((i % 16) == 15 && i != len - 1)
1174 printf("\n%p: ", physaddr + i);
1175 }
1176
1177 printf("\n");
1178 }
1179
1180
1181 void
1182 enaddr_aton(const char* str, u_int8_t* eaddr)
1183 {
1184 int i;
1185 char c;
1186
1187 for(i = 0; i < ETHER_ADDR_LEN; i++) {
1188 if (*str == ':')
1189 str++;
1190
1191 c = *str++;
1192 if (isdigit(c)) {
1193 eaddr[i] = (c - '0');
1194 } else if (isxdigit(c)) {
1195 eaddr[i] = (toupper(c) + 10 - 'A');
1196 }
1197
1198 c = *str++;
1199 if (isdigit(c)) {
1200 eaddr[i] = (eaddr[i] << 4) | (c - '0');
1201 } else if (isxdigit(c)) {
1202 eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A');
1203 }
1204 }
1205 }
1206