if_sq.c revision 1.24 1 /* $NetBSD: if_sq.c,v 1.24 2004/12/30 02:26:20 rumble Exp $ */
2
3 /*
4 * Copyright (c) 2001 Rafal K. Boni
5 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * Portions of this code are derived from software contributed to The
9 * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: if_sq.c,v 1.24 2004/12/30 02:26:20 rumble Exp $");
37
38 #include "bpfilter.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/device.h>
43 #include <sys/callout.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/ioctl.h>
49 #include <sys/errno.h>
50 #include <sys/syslog.h>
51
52 #include <uvm/uvm_extern.h>
53
54 #include <machine/endian.h>
55
56 #include <net/if.h>
57 #include <net/if_dl.h>
58 #include <net/if_media.h>
59 #include <net/if_ether.h>
60
61 #if NBPFILTER > 0
62 #include <net/bpf.h>
63 #endif
64
65 #include <machine/bus.h>
66 #include <machine/intr.h>
67
68 #include <dev/ic/seeq8003reg.h>
69
70 #include <sgimips/hpc/sqvar.h>
71 #include <sgimips/hpc/hpcvar.h>
72 #include <sgimips/hpc/hpcreg.h>
73
74 #include <dev/arcbios/arcbios.h>
75 #include <dev/arcbios/arcbiosvar.h>
76
77 #define static
78
79 /*
80 * Short TODO list:
81 * (1) Do counters for bad-RX packets.
82 * (2) Allow multi-segment transmits, instead of copying to a single,
83 * contiguous mbuf.
84 * (3) Verify sq_stop() turns off enough stuff; I was still getting
85 * seeq interrupts after sq_stop().
86 * (4) Implement EDLC modes: especially packet auto-pad and simplex
87 * mode.
88 * (5) Should the driver filter out its own transmissions in non-EDLC
89 * mode?
90 * (6) Multicast support -- multicast filter, address management, ...
91 * (7) Deal with RB0 (recv buffer overflow) on reception. Will need
92 * to figure out if RB0 is read-only as stated in one spot in the
93 * HPC spec or read-write (ie, is the 'write a one to clear it')
94 * the correct thing?
95 */
96
97 #if defined(SQ_DEBUG)
98 int sq_debug = 0;
99 #define SQ_DPRINTF(x) if (sq_debug) printf x
100 #else
101 #define SQ_DPRINTF(x)
102 #endif
103
104 static int sq_match(struct device *, struct cfdata *, void *);
105 static void sq_attach(struct device *, struct device *, void *);
106 static int sq_init(struct ifnet *);
107 static void sq_start(struct ifnet *);
108 static void sq_stop(struct ifnet *, int);
109 static void sq_watchdog(struct ifnet *);
110 static int sq_ioctl(struct ifnet *, u_long, caddr_t);
111
112 static void sq_set_filter(struct sq_softc *);
113 static int sq_intr(void *);
114 static int sq_rxintr(struct sq_softc *);
115 static int sq_txintr(struct sq_softc *);
116 static void sq_txring_hpc1(struct sq_softc *);
117 static void sq_txring_hpc3(struct sq_softc *);
118 static void sq_reset(struct sq_softc *);
119 static int sq_add_rxbuf(struct sq_softc *, int);
120 static void sq_dump_buffer(u_int32_t addr, u_int32_t len);
121 static void sq_trace_dump(struct sq_softc *);
122
123 static void enaddr_aton(const char*, u_int8_t*);
124
125 CFATTACH_DECL(sq, sizeof(struct sq_softc),
126 sq_match, sq_attach, NULL, NULL);
127
128 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
129
130 #define sq_seeq_read(sc, off) \
131 bus_space_read_1(sc->sc_regt, sc->sc_regh, off)
132 #define sq_seeq_write(sc, off, val) \
133 bus_space_write_1(sc->sc_regt, sc->sc_regh, off, val)
134
135 #define sq_hpc_read(sc, off) \
136 bus_space_read_4(sc->sc_hpct, sc->sc_hpch, off)
137 #define sq_hpc_write(sc, off, val) \
138 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, off, val)
139
140 static int
141 sq_match(struct device *parent, struct cfdata *cf, void *aux)
142 {
143 struct hpc_attach_args *ha = aux;
144
145 if (strcmp(ha->ha_name, cf->cf_name) == 0)
146 return (1);
147
148 return (0);
149 }
150
151 static void
152 sq_attach(struct device *parent, struct device *self, void *aux)
153 {
154 int i, err;
155 char* macaddr;
156 struct sq_softc *sc = (void *)self;
157 struct hpc_attach_args *haa = aux;
158 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
159
160 sc->sc_hpct = haa->ha_st;
161 sc->hpc_regs = haa->hpc_regs; /* HPC register definitions */
162
163 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
164 haa->ha_dmaoff,
165 sc->hpc_regs->enet_regs_size,
166 &sc->sc_hpch)) != 0) {
167 printf(": unable to map HPC DMA registers, error = %d\n", err);
168 goto fail_0;
169 }
170
171 sc->sc_regt = haa->ha_st;
172 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
173 haa->ha_devoff,
174 sc->hpc_regs->enet_devregs_size,
175 &sc->sc_regh)) != 0) {
176 printf(": unable to map Seeq registers, error = %d\n", err);
177 goto fail_0;
178 }
179
180 sc->sc_dmat = haa->ha_dmat;
181
182 if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control),
183 PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg,
184 1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) {
185 printf(": unable to allocate control data, error = %d\n", err);
186 goto fail_0;
187 }
188
189 if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg,
190 sizeof(struct sq_control),
191 (caddr_t *)&sc->sc_control,
192 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
193 printf(": unable to map control data, error = %d\n", err);
194 goto fail_1;
195 }
196
197 if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control),
198 1, sizeof(struct sq_control), PAGE_SIZE,
199 BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) {
200 printf(": unable to create DMA map for control data, error "
201 "= %d\n", err);
202 goto fail_2;
203 }
204
205 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control,
206 sizeof(struct sq_control),
207 NULL, BUS_DMA_NOWAIT)) != 0) {
208 printf(": unable to load DMA map for control data, error "
209 "= %d\n", err);
210 goto fail_3;
211 }
212
213 memset(sc->sc_control, 0, sizeof(struct sq_control));
214
215 /* Create transmit buffer DMA maps */
216 for (i = 0; i < SQ_NTXDESC; i++) {
217 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
218 0, BUS_DMA_NOWAIT,
219 &sc->sc_txmap[i])) != 0) {
220 printf(": unable to create tx DMA map %d, error = %d\n",
221 i, err);
222 goto fail_4;
223 }
224 }
225
226 /* Create receive buffer DMA maps */
227 for (i = 0; i < SQ_NRXDESC; i++) {
228 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
229 0, BUS_DMA_NOWAIT,
230 &sc->sc_rxmap[i])) != 0) {
231 printf(": unable to create rx DMA map %d, error = %d\n",
232 i, err);
233 goto fail_5;
234 }
235 }
236
237 /* Pre-allocate the receive buffers. */
238 for (i = 0; i < SQ_NRXDESC; i++) {
239 if ((err = sq_add_rxbuf(sc, i)) != 0) {
240 printf(": unable to allocate or map rx buffer %d\n,"
241 " error = %d\n", i, err);
242 goto fail_6;
243 }
244 }
245
246 if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) {
247 printf(": unable to get MAC address!\n");
248 goto fail_6;
249 }
250
251 evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL,
252 self->dv_xname, "intr");
253
254 if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) {
255 printf(": unable to establish interrupt!\n");
256 goto fail_6;
257 }
258
259 /* Reset the chip to a known state. */
260 sq_reset(sc);
261
262 /*
263 * Determine if we're an 8003 or 80c03 by setting the first
264 * MAC address register to non-zero, and then reading it back.
265 * If it's zero, we have an 80c03, because we will have read
266 * the TxCollLSB register.
267 */
268 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0xa5);
269 if (sq_seeq_read(sc, SEEQ_TXCOLLS0) == 0)
270 sc->sc_type = SQ_TYPE_80C03;
271 else
272 sc->sc_type = SQ_TYPE_8003;
273 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0x00);
274
275 printf(": SGI Seeq %s\n",
276 sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003");
277
278 enaddr_aton(macaddr, sc->sc_enaddr);
279
280 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
281 ether_sprintf(sc->sc_enaddr));
282
283 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
284 ifp->if_softc = sc;
285 ifp->if_mtu = ETHERMTU;
286 ifp->if_init = sq_init;
287 ifp->if_stop = sq_stop;
288 ifp->if_start = sq_start;
289 ifp->if_ioctl = sq_ioctl;
290 ifp->if_watchdog = sq_watchdog;
291 ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST;
292 IFQ_SET_READY(&ifp->if_snd);
293
294 if_attach(ifp);
295 ether_ifattach(ifp, sc->sc_enaddr);
296
297 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
298 /* Done! */
299 return;
300
301 /*
302 * Free any resources we've allocated during the failed attach
303 * attempt. Do this in reverse order and fall through.
304 */
305 fail_6:
306 for (i = 0; i < SQ_NRXDESC; i++) {
307 if (sc->sc_rxmbuf[i] != NULL) {
308 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]);
309 m_freem(sc->sc_rxmbuf[i]);
310 }
311 }
312 fail_5:
313 for (i = 0; i < SQ_NRXDESC; i++) {
314 if (sc->sc_rxmap[i] != NULL)
315 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]);
316 }
317 fail_4:
318 for (i = 0; i < SQ_NTXDESC; i++) {
319 if (sc->sc_txmap[i] != NULL)
320 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]);
321 }
322 bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap);
323 fail_3:
324 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap);
325 fail_2:
326 bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control,
327 sizeof(struct sq_control));
328 fail_1:
329 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg);
330 fail_0:
331 return;
332 }
333
334 /* Set up data to get the interface up and running. */
335 int
336 sq_init(struct ifnet *ifp)
337 {
338 int i;
339 u_int32_t reg;
340 struct sq_softc *sc = ifp->if_softc;
341
342 /* Cancel any in-progress I/O */
343 sq_stop(ifp, 0);
344
345 sc->sc_nextrx = 0;
346
347 sc->sc_nfreetx = SQ_NTXDESC;
348 sc->sc_nexttx = sc->sc_prevtx = 0;
349
350 SQ_TRACE(SQ_RESET, sc, 0, 0);
351
352 /* Set into 8003 mode, bank 0 to program ethernet address */
353 sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_BANK0);
354
355 /* Now write the address */
356 for (i = 0; i < ETHER_ADDR_LEN; i++)
357 sq_seeq_write(sc, i, sc->sc_enaddr[i]);
358
359 sc->sc_rxcmd = RXCMD_IE_CRC |
360 RXCMD_IE_DRIB |
361 RXCMD_IE_SHORT |
362 RXCMD_IE_END |
363 RXCMD_IE_GOOD;
364
365 /*
366 * Set the receive filter -- this will add some bits to the
367 * prototype RXCMD register. Do this before setting the
368 * transmit config register, since we might need to switch
369 * banks.
370 */
371 sq_set_filter(sc);
372
373 /* Set up Seeq transmit command register */
374 sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_IE_UFLOW |
375 TXCMD_IE_COLL |
376 TXCMD_IE_16COLL |
377 TXCMD_IE_GOOD);
378
379 /* Now write the receive command register. */
380 sq_seeq_write(sc, SEEQ_RXCMD, sc->sc_rxcmd);
381
382 /* Set up HPC ethernet DMA config */
383 if (sc->hpc_regs->revision == 3) {
384 reg = sq_hpc_read(sc, HPC_ENETR_DMACFG);
385 sq_hpc_write(sc, HPC_ENETR_DMACFG, reg | ENETR_DMACFG_FIX_RXDC |
386 ENETR_DMACFG_FIX_INTR |
387 ENETR_DMACFG_FIX_EOP);
388 }
389
390 /* Pass the start of the receive ring to the HPC */
391 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc, 0));
392
393 /* And turn on the HPC ethernet receive channel */
394 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl,
395 sc->hpc_regs->enetr_ctl_active);
396
397 /*
398 * Turn off delayed receive interrupts on HPC1.
399 * (see Hollywood HPC Specification 2.1.4.3)
400 */
401 if (sc->hpc_regs->revision != 3)
402 sq_hpc_write(sc, HPC1_ENET_INTDELAY, HPC1_ENET_INTDELAYVAL);
403
404 ifp->if_flags |= IFF_RUNNING;
405 ifp->if_flags &= ~IFF_OACTIVE;
406
407 return 0;
408 }
409
410 static void
411 sq_set_filter(struct sq_softc *sc)
412 {
413 struct ethercom *ec = &sc->sc_ethercom;
414 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
415 struct ether_multi *enm;
416 struct ether_multistep step;
417
418 /*
419 * Check for promiscuous mode. Also implies
420 * all-multicast.
421 */
422 if (ifp->if_flags & IFF_PROMISC) {
423 sc->sc_rxcmd |= RXCMD_REC_ALL;
424 ifp->if_flags |= IFF_ALLMULTI;
425 return;
426 }
427
428 /*
429 * The 8003 has no hash table. If we have any multicast
430 * addresses on the list, enable reception of all multicast
431 * frames.
432 *
433 * XXX The 80c03 has a hash table. We should use it.
434 */
435
436 ETHER_FIRST_MULTI(step, ec, enm);
437
438 if (enm == NULL) {
439 sc->sc_rxcmd &= ~RXCMD_REC_MASK;
440 sc->sc_rxcmd |= RXCMD_REC_BROAD;
441
442 ifp->if_flags &= ~IFF_ALLMULTI;
443 return;
444 }
445
446 sc->sc_rxcmd |= RXCMD_REC_MULTI;
447 ifp->if_flags |= IFF_ALLMULTI;
448 }
449
450 int
451 sq_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
452 {
453 int s, error = 0;
454
455 SQ_TRACE(SQ_IOCTL, (struct sq_softc *)ifp->if_softc, 0, 0);
456
457 s = splnet();
458
459 error = ether_ioctl(ifp, cmd, data);
460 if (error == ENETRESET) {
461 /*
462 * Multicast list has changed; set the hardware filter
463 * accordingly.
464 */
465 if (ifp->if_flags & IFF_RUNNING)
466 error = sq_init(ifp);
467 else
468 error = 0;
469 }
470
471 splx(s);
472 return (error);
473 }
474
475 void
476 sq_start(struct ifnet *ifp)
477 {
478 struct sq_softc *sc = ifp->if_softc;
479 u_int32_t status;
480 struct mbuf *m0, *m;
481 bus_dmamap_t dmamap;
482 int err, totlen, nexttx, firsttx, lasttx = -1, ofree, seg;
483
484 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
485 return;
486
487 /*
488 * Remember the previous number of free descriptors and
489 * the first descriptor we'll use.
490 */
491 ofree = sc->sc_nfreetx;
492 firsttx = sc->sc_nexttx;
493
494 /*
495 * Loop through the send queue, setting up transmit descriptors
496 * until we drain the queue, or use up all available transmit
497 * descriptors.
498 */
499 while (sc->sc_nfreetx != 0) {
500 /*
501 * Grab a packet off the queue.
502 */
503 IFQ_POLL(&ifp->if_snd, m0);
504 if (m0 == NULL)
505 break;
506 m = NULL;
507
508 dmamap = sc->sc_txmap[sc->sc_nexttx];
509
510 /*
511 * Load the DMA map. If this fails, the packet either
512 * didn't fit in the alloted number of segments, or we were
513 * short on resources. In this case, we'll copy and try
514 * again.
515 * Also copy it if we need to pad, so that we are sure there
516 * is room for the pad buffer.
517 * XXX the right way of doing this is to use a static buffer
518 * for padding and adding it to the transmit descriptor (see
519 * sys/dev/pci/if_tl.c for example). We can't do this here yet
520 * because we can't send packets with more than one fragment.
521 */
522 if (m0->m_pkthdr.len < ETHER_PAD_LEN ||
523 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
524 BUS_DMA_NOWAIT) != 0) {
525 MGETHDR(m, M_DONTWAIT, MT_DATA);
526 if (m == NULL) {
527 printf("%s: unable to allocate Tx mbuf\n",
528 sc->sc_dev.dv_xname);
529 break;
530 }
531 if (m0->m_pkthdr.len > MHLEN) {
532 MCLGET(m, M_DONTWAIT);
533 if ((m->m_flags & M_EXT) == 0) {
534 printf("%s: unable to allocate Tx "
535 "cluster\n", sc->sc_dev.dv_xname);
536 m_freem(m);
537 break;
538 }
539 }
540
541 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
542 if (m0->m_pkthdr.len < ETHER_PAD_LEN) {
543 memset(mtod(m, char *) + m0->m_pkthdr.len, 0,
544 ETHER_PAD_LEN - m0->m_pkthdr.len);
545 m->m_pkthdr.len = m->m_len = ETHER_PAD_LEN;
546 } else
547 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
548
549 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
550 m, BUS_DMA_NOWAIT)) != 0) {
551 printf("%s: unable to load Tx buffer, "
552 "error = %d\n", sc->sc_dev.dv_xname, err);
553 break;
554 }
555 }
556
557 /*
558 * Ensure we have enough descriptors free to describe
559 * the packet.
560 */
561 if (dmamap->dm_nsegs > sc->sc_nfreetx) {
562 /*
563 * Not enough free descriptors to transmit this
564 * packet. We haven't committed to anything yet,
565 * so just unload the DMA map, put the packet
566 * back on the queue, and punt. Notify the upper
567 * layer that there are no more slots left.
568 *
569 * XXX We could allocate an mbuf and copy, but
570 * XXX it is worth it?
571 */
572 ifp->if_flags |= IFF_OACTIVE;
573 bus_dmamap_unload(sc->sc_dmat, dmamap);
574 if (m != NULL)
575 m_freem(m);
576 break;
577 }
578
579 IFQ_DEQUEUE(&ifp->if_snd, m0);
580 #if NBPFILTER > 0
581 /*
582 * Pass the packet to any BPF listeners.
583 */
584 if (ifp->if_bpf)
585 bpf_mtap(ifp->if_bpf, m0);
586 #endif /* NBPFILTER > 0 */
587 if (m != NULL) {
588 m_freem(m0);
589 m0 = m;
590 }
591
592 /*
593 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
594 */
595
596 SQ_TRACE(SQ_ENQUEUE, sc, sc->sc_nexttx, 0);
597
598 /* Sync the DMA map. */
599 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
600 BUS_DMASYNC_PREWRITE);
601
602 /*
603 * Initialize the transmit descriptors.
604 */
605 for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0;
606 seg < dmamap->dm_nsegs;
607 seg++, nexttx = SQ_NEXTTX(nexttx)) {
608 if (sc->hpc_regs->revision == 3) {
609 sc->sc_txdesc[nexttx].hpc3_hdd_bufptr =
610 dmamap->dm_segs[seg].ds_addr;
611 sc->sc_txdesc[nexttx].hpc3_hdd_ctl =
612 dmamap->dm_segs[seg].ds_len;
613 } else {
614 sc->sc_txdesc[nexttx].hpc1_hdd_bufptr =
615 dmamap->dm_segs[seg].ds_addr;
616 sc->sc_txdesc[nexttx].hpc1_hdd_ctl =
617 dmamap->dm_segs[seg].ds_len;
618 }
619 sc->sc_txdesc[nexttx].hdd_descptr=
620 SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx));
621 lasttx = nexttx;
622 totlen += dmamap->dm_segs[seg].ds_len;
623 }
624
625 /* Last descriptor gets end-of-packet */
626 KASSERT(lasttx != -1);
627 if (sc->hpc_regs->revision == 3)
628 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |= HDD_CTL_EOPACKET;
629 else
630 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |=
631 HPC1_HDD_CTL_EOPACKET;
632
633 SQ_DPRINTF(("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname,
634 sc->sc_nexttx, lasttx,
635 totlen));
636
637 if (ifp->if_flags & IFF_DEBUG) {
638 printf(" transmit chain:\n");
639 for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) {
640 printf(" descriptor %d:\n", seg);
641 printf(" hdd_bufptr: 0x%08x\n",
642 (sc->hpc_regs->revision == 3) ?
643 sc->sc_txdesc[seg].hpc3_hdd_bufptr :
644 sc->sc_txdesc[seg].hpc1_hdd_bufptr);
645 printf(" hdd_ctl: 0x%08x\n",
646 (sc->hpc_regs->revision == 3) ?
647 sc->sc_txdesc[seg].hpc3_hdd_ctl:
648 sc->sc_txdesc[seg].hpc1_hdd_ctl);
649 printf(" hdd_descptr: 0x%08x\n",
650 sc->sc_txdesc[seg].hdd_descptr);
651
652 if (seg == lasttx)
653 break;
654 }
655 }
656
657 /* Sync the descriptors we're using. */
658 SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs,
659 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
660
661 /* Store a pointer to the packet so we can free it later */
662 sc->sc_txmbuf[sc->sc_nexttx] = m0;
663
664 /* Advance the tx pointer. */
665 sc->sc_nfreetx -= dmamap->dm_nsegs;
666 sc->sc_nexttx = nexttx;
667 }
668
669 /* All transmit descriptors used up, let upper layers know */
670 if (sc->sc_nfreetx == 0)
671 ifp->if_flags |= IFF_OACTIVE;
672
673 if (sc->sc_nfreetx != ofree) {
674 SQ_DPRINTF(("%s: %d packets enqueued, first %d, INTR on %d\n",
675 sc->sc_dev.dv_xname, lasttx - firsttx + 1,
676 firsttx, lasttx));
677
678 /*
679 * Cause a transmit interrupt to happen on the
680 * last packet we enqueued, mark it as the last
681 * descriptor.
682 *
683 * HDD_CTL_INTR will generate an interrupt on
684 * HPC1 by itself. HPC3 will not interrupt unless
685 * HDD_CTL_EOPACKET is set as well.
686 */
687 KASSERT(lasttx != -1);
688 if (sc->hpc_regs->revision == 3) {
689 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |= HDD_CTL_INTR |
690 HDD_CTL_EOCHAIN;
691 } else {
692 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |= HPC1_HDD_CTL_INTR;
693 sc->sc_txdesc[lasttx].hpc1_hdd_bufptr |=
694 HPC1_HDD_CTL_EOCHAIN;
695 }
696
697 SQ_CDTXSYNC(sc, lasttx, 1,
698 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
699
700 /*
701 * There is a potential race condition here if the HPC
702 * DMA channel is active and we try and either update
703 * the 'next descriptor' pointer in the HPC PIO space
704 * or the 'next descriptor' pointer in a previous desc-
705 * riptor.
706 *
707 * To avoid this, if the channel is active, we rely on
708 * the transmit interrupt routine noticing that there
709 * are more packets to send and restarting the HPC DMA
710 * engine, rather than mucking with the DMA state here.
711 */
712 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl);
713
714 if ((status & sc->hpc_regs->enetx_ctl_active) != 0) {
715 SQ_TRACE(SQ_ADD_TO_DMA, sc, firsttx, status);
716
717 /* NB: hpc3_hdd_ctl is also hpc1_hdd_bufptr */
718 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc3_hdd_ctl &=
719 ~HDD_CTL_EOCHAIN;
720
721 if (sc->hpc_regs->revision != 3)
722 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc1_hdd_ctl
723 &= ~HPC1_HDD_CTL_INTR;
724
725 SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx), 1,
726 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
727 } else if (sc->hpc_regs->revision == 3) {
728 SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
729
730 sq_hpc_write(sc, HPC_ENETX_NDBP, SQ_CDTXADDR(sc,
731 firsttx));
732
733 /* Kick DMA channel into life */
734 sq_hpc_write(sc, HPC_ENETX_CTL, ENETX_CTL_ACTIVE);
735 } else {
736 /*
737 * In the HPC1 case where transmit DMA is
738 * inactive, we can either kick off if
739 * the ring was previously empty, or call
740 * our transmit interrupt handler to
741 * figure out if the ring stopped short
742 * and restart at the right place.
743 */
744 if (ofree == SQ_NTXDESC) {
745 SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
746
747 sq_hpc_write(sc, HPC1_ENETX_NDBP,
748 SQ_CDTXADDR(sc, firsttx));
749 sq_hpc_write(sc, HPC1_ENETX_CFXBP,
750 SQ_CDTXADDR(sc, firsttx));
751 sq_hpc_write(sc, HPC1_ENETX_CBP,
752 SQ_CDTXADDR(sc, firsttx));
753
754 /* Kick DMA channel into life */
755 sq_hpc_write(sc, HPC1_ENETX_CTL,
756 HPC1_ENETX_CTL_ACTIVE);
757 } else
758 sq_txring_hpc1(sc);
759 }
760
761 /* Set a watchdog timer in case the chip flakes out. */
762 ifp->if_timer = 5;
763 }
764 }
765
766 void
767 sq_stop(struct ifnet *ifp, int disable)
768 {
769 int i;
770 struct sq_softc *sc = ifp->if_softc;
771
772 for (i =0; i < SQ_NTXDESC; i++) {
773 if (sc->sc_txmbuf[i] != NULL) {
774 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
775 m_freem(sc->sc_txmbuf[i]);
776 sc->sc_txmbuf[i] = NULL;
777 }
778 }
779
780 /* Clear Seeq transmit/receive command registers */
781 sq_seeq_write(sc, SEEQ_TXCMD, 0);
782 sq_seeq_write(sc, SEEQ_RXCMD, 0);
783
784 sq_reset(sc);
785
786 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
787 ifp->if_timer = 0;
788 }
789
790 /* Device timeout/watchdog routine. */
791 void
792 sq_watchdog(struct ifnet *ifp)
793 {
794 u_int32_t status;
795 struct sq_softc *sc = ifp->if_softc;
796
797 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl);
798 log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, "
799 "status %08x)\n", sc->sc_dev.dv_xname, sc->sc_prevtx,
800 sc->sc_nexttx, sc->sc_nfreetx, status);
801
802 sq_trace_dump(sc);
803
804 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
805 sc->sq_trace_idx = 0;
806
807 ++ifp->if_oerrors;
808
809 sq_init(ifp);
810 }
811
812 static void
813 sq_trace_dump(struct sq_softc *sc)
814 {
815 int i;
816 char *act;
817
818 for (i = 0; i < sc->sq_trace_idx; i++) {
819 switch (sc->sq_trace[i].action) {
820 case SQ_RESET: act = "SQ_RESET"; break;
821 case SQ_ADD_TO_DMA: act = "SQ_ADD_TO_DMA"; break;
822 case SQ_START_DMA: act = "SQ_START_DMA"; break;
823 case SQ_DONE_DMA: act = "SQ_DONE_DMA"; break;
824 case SQ_RESTART_DMA: act = "SQ_RESTART_DMA"; break;
825 case SQ_TXINTR_ENTER: act = "SQ_TXINTR_ENTER"; break;
826 case SQ_TXINTR_EXIT: act = "SQ_TXINTR_EXIT"; break;
827 case SQ_TXINTR_BUSY: act = "SQ_TXINTR_BUSY"; break;
828 case SQ_IOCTL: act = "SQ_IOCTL"; break;
829 case SQ_ENQUEUE: act = "SQ_ENQUEUE"; break;
830 default: act = "UNKNOWN";
831 }
832
833 printf("%s: [%03d] action %-16s buf %03d free %03d "
834 "status %08x line %d\n", sc->sc_dev.dv_xname, i, act,
835 sc->sq_trace[i].bufno, sc->sq_trace[i].freebuf,
836 sc->sq_trace[i].status, sc->sq_trace[i].line);
837 }
838 }
839
840 static int
841 sq_intr(void * arg)
842 {
843 struct sq_softc *sc = arg;
844 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
845 int handled = 0;
846 u_int32_t stat;
847
848 stat = sq_hpc_read(sc, sc->hpc_regs->enetr_reset);
849
850 if ((stat & 2) == 0) {
851 printf("%s: Unexpected interrupt!\n", sc->sc_dev.dv_xname);
852 return 0;
853 }
854
855 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, (stat | 2));
856
857 /*
858 * If the interface isn't running, the interrupt couldn't
859 * possibly have come from us.
860 */
861 if ((ifp->if_flags & IFF_RUNNING) == 0)
862 return 0;
863
864 sc->sq_intrcnt.ev_count++;
865
866 /* Always check for received packets */
867 if (sq_rxintr(sc) != 0)
868 handled++;
869
870 /* Only handle transmit interrupts if we actually sent something */
871 if (sc->sc_nfreetx < SQ_NTXDESC) {
872 sq_txintr(sc);
873 handled++;
874 }
875
876 #if NRND > 0
877 if (handled)
878 rnd_add_uint32(&sc->rnd_source, stat);
879 #endif
880 return (handled);
881 }
882
883 static int
884 sq_rxintr(struct sq_softc *sc)
885 {
886 int count = 0;
887 struct mbuf* m;
888 int i, framelen;
889 u_int8_t pktstat;
890 u_int32_t status;
891 u_int32_t ctl_reg;
892 int new_end, orig_end;
893 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
894
895 for (i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) {
896 SQ_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD |
897 BUS_DMASYNC_POSTWRITE);
898
899 /*
900 * If this is a CPU-owned buffer, we're at the end of the list.
901 */
902 if (sc->hpc_regs->revision == 3)
903 ctl_reg = sc->sc_rxdesc[i].hpc3_hdd_ctl & HDD_CTL_OWN;
904 else
905 ctl_reg = sc->sc_rxdesc[i].hpc1_hdd_ctl &
906 HPC1_HDD_CTL_OWN;
907
908 if (ctl_reg) {
909 #if defined(SQ_DEBUG)
910 u_int32_t reg;
911
912 reg = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl);
913 SQ_DPRINTF(("%s: rxintr: done at %d (ctl %08x)\n",
914 sc->sc_dev.dv_xname, i, reg));
915 #endif
916 break;
917 }
918
919 count++;
920
921 m = sc->sc_rxmbuf[i];
922 framelen = m->m_ext.ext_size - 3;
923 if (sc->hpc_regs->revision == 3)
924 framelen -=
925 HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc3_hdd_ctl);
926 else
927 framelen -=
928 HPC1_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc1_hdd_ctl);
929
930 /* Now sync the actual packet data */
931 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
932 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);
933
934 pktstat = *((u_int8_t*)m->m_data + framelen + 2);
935
936 if ((pktstat & RXSTAT_GOOD) == 0) {
937 ifp->if_ierrors++;
938
939 if (pktstat & RXSTAT_OFLOW)
940 printf("%s: receive FIFO overflow\n",
941 sc->sc_dev.dv_xname);
942
943 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
944 sc->sc_rxmap[i]->dm_mapsize,
945 BUS_DMASYNC_PREREAD);
946 SQ_INIT_RXDESC(sc, i);
947 SQ_DPRINTF(("%s: sq_rxintr: buf %d no RXSTAT_GOOD\n",
948 sc->sc_dev.dv_xname, i));
949 continue;
950 }
951
952 if (sq_add_rxbuf(sc, i) != 0) {
953 ifp->if_ierrors++;
954 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
955 sc->sc_rxmap[i]->dm_mapsize,
956 BUS_DMASYNC_PREREAD);
957 SQ_INIT_RXDESC(sc, i);
958 SQ_DPRINTF(("%s: sq_rxintr: buf %d sq_add_rxbuf() "
959 "failed\n", sc->sc_dev.dv_xname, i));
960 continue;
961 }
962
963
964 m->m_data += 2;
965 m->m_pkthdr.rcvif = ifp;
966 m->m_pkthdr.len = m->m_len = framelen;
967
968 ifp->if_ipackets++;
969
970 SQ_DPRINTF(("%s: sq_rxintr: buf %d len %d\n",
971 sc->sc_dev.dv_xname, i, framelen));
972
973 #if NBPFILTER > 0
974 if (ifp->if_bpf)
975 bpf_mtap(ifp->if_bpf, m);
976 #endif
977 (*ifp->if_input)(ifp, m);
978 }
979
980
981 /* If anything happened, move ring start/end pointers to new spot */
982 if (i != sc->sc_nextrx) {
983 /* NB: hpc3_hdd_ctl is also hpc1_hdd_bufptr */
984
985 new_end = SQ_PREVRX(i);
986 sc->sc_rxdesc[new_end].hpc3_hdd_ctl |= HDD_CTL_EOCHAIN;
987 SQ_CDRXSYNC(sc, new_end, BUS_DMASYNC_PREREAD |
988 BUS_DMASYNC_PREWRITE);
989
990 orig_end = SQ_PREVRX(sc->sc_nextrx);
991 sc->sc_rxdesc[orig_end].hpc3_hdd_ctl &= ~HDD_CTL_EOCHAIN;
992 SQ_CDRXSYNC(sc, orig_end, BUS_DMASYNC_PREREAD |
993 BUS_DMASYNC_PREWRITE);
994
995 sc->sc_nextrx = i;
996 }
997
998 status = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl);
999
1000 /* If receive channel is stopped, restart it... */
1001 if ((status & sc->hpc_regs->enetr_ctl_active) == 0) {
1002 /* Pass the start of the receive ring to the HPC */
1003 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc,
1004 sc->sc_nextrx));
1005
1006 /* And turn on the HPC ethernet receive channel */
1007 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl,
1008 sc->hpc_regs->enetr_ctl_active);
1009 }
1010
1011 return count;
1012 }
1013
1014 static int
1015 sq_txintr(struct sq_softc *sc)
1016 {
1017 int shift = 0;
1018 u_int32_t status, tmp;
1019 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1020
1021 if (sc->hpc_regs->revision != 3)
1022 shift = 16;
1023
1024 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl) >> shift;
1025
1026 SQ_TRACE(SQ_TXINTR_ENTER, sc, sc->sc_prevtx, status);
1027
1028 tmp = (sc->hpc_regs->enetx_ctl_active >> shift) | TXSTAT_GOOD;
1029 if ((status & tmp) == 0) {
1030 if (status & TXSTAT_COLL)
1031 ifp->if_collisions++;
1032
1033 if (status & TXSTAT_UFLOW) {
1034 printf("%s: transmit underflow\n", sc->sc_dev.dv_xname);
1035 ifp->if_oerrors++;
1036 }
1037
1038 if (status & TXSTAT_16COLL) {
1039 printf("%s: max collisions reached\n",
1040 sc->sc_dev.dv_xname);
1041 ifp->if_oerrors++;
1042 ifp->if_collisions += 16;
1043 }
1044 }
1045
1046 /* prevtx now points to next xmit packet not yet finished */
1047 if (sc->hpc_regs->revision == 3)
1048 sq_txring_hpc3(sc);
1049 else
1050 sq_txring_hpc1(sc);
1051
1052 /* If we have buffers free, let upper layers know */
1053 if (sc->sc_nfreetx > 0)
1054 ifp->if_flags &= ~IFF_OACTIVE;
1055
1056 /* If all packets have left the coop, cancel watchdog */
1057 if (sc->sc_nfreetx == SQ_NTXDESC)
1058 ifp->if_timer = 0;
1059
1060 SQ_TRACE(SQ_TXINTR_EXIT, sc, sc->sc_prevtx, status);
1061 sq_start(ifp);
1062
1063 return 1;
1064 }
1065
1066 /*
1067 * Reclaim used transmit descriptors and restart the transmit DMA
1068 * engine if necessary.
1069 */
1070 static void
1071 sq_txring_hpc1(struct sq_softc *sc)
1072 {
1073 /*
1074 * HPC1 doesn't tag transmitted descriptors, however,
1075 * the NDBP register points to the next descriptor that
1076 * has not yet been processed. If DMA is not in progress,
1077 * we can safely reclaim all descriptors up to NDBP, and,
1078 * if necessary, restart DMA at NDBP. Otherwise, if DMA
1079 * is active, we can only safely reclaim up to CBP.
1080 *
1081 * For now, we'll only reclaim on inactive DMA and assume
1082 * that a sufficiently large ring keeps us out of trouble.
1083 */
1084 u_int32_t reclaimto, status;
1085 int reclaimall, i = sc->sc_prevtx;
1086 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1087
1088 status = sq_hpc_read(sc, HPC1_ENETX_CTL);
1089 if (status & HPC1_ENETX_CTL_ACTIVE) {
1090 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
1091 return;
1092 } else
1093 reclaimto = sq_hpc_read(sc, HPC1_ENETX_NDBP);
1094
1095 if (sc->sc_nfreetx == 0 && SQ_CDTXADDR(sc, i) == reclaimto)
1096 reclaimall = 1;
1097 else
1098 reclaimall = 0;
1099
1100 while (sc->sc_nfreetx < SQ_NTXDESC) {
1101 if (SQ_CDTXADDR(sc, i) == reclaimto && !reclaimall)
1102 break;
1103
1104 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
1105 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1106
1107 /* Sync the packet data, unload DMA map, free mbuf */
1108 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
1109 sc->sc_txmap[i]->dm_mapsize,
1110 BUS_DMASYNC_POSTWRITE);
1111 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
1112 m_freem(sc->sc_txmbuf[i]);
1113 sc->sc_txmbuf[i] = NULL;
1114
1115 ifp->if_opackets++;
1116 sc->sc_nfreetx++;
1117
1118 SQ_TRACE(SQ_DONE_DMA, sc, i, status);
1119
1120 i = SQ_NEXTTX(i);
1121 }
1122
1123 if (sc->sc_nfreetx < SQ_NTXDESC) {
1124 SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
1125
1126 KASSERT(reclaimto == SQ_CDTXADDR(sc, i));
1127
1128 sq_hpc_write(sc, HPC1_ENETX_CFXBP, reclaimto);
1129 sq_hpc_write(sc, HPC1_ENETX_CBP, reclaimto);
1130
1131 /* Kick DMA channel into life */
1132 sq_hpc_write(sc, HPC1_ENETX_CTL, HPC1_ENETX_CTL_ACTIVE);
1133
1134 /*
1135 * Set a watchdog timer in case the chip
1136 * flakes out.
1137 */
1138 ifp->if_timer = 5;
1139 }
1140
1141 sc->sc_prevtx = i;
1142 }
1143
1144 /*
1145 * Reclaim used transmit descriptors and restart the transmit DMA
1146 * engine if necessary.
1147 */
1148 static void
1149 sq_txring_hpc3(struct sq_softc *sc)
1150 {
1151 /*
1152 * HPC3 tags descriptors with a bit once they've been
1153 * transmitted. We need only free each XMITDONE'd
1154 * descriptor, and restart the DMA engine if any
1155 * descriptors are left over.
1156 */
1157 int i;
1158 u_int32_t status = 0;
1159 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1160
1161 i = sc->sc_prevtx;
1162 while (sc->sc_nfreetx < SQ_NTXDESC) {
1163 /*
1164 * Check status first so we don't end up with a case of
1165 * the buffer not being finished while the DMA channel
1166 * has gone idle.
1167 */
1168 status = sq_hpc_read(sc, HPC_ENETX_CTL);
1169
1170 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
1171 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1172
1173 /* Check for used descriptor and restart DMA chain if needed */
1174 if ((sc->sc_txdesc[i].hpc3_hdd_ctl & HDD_CTL_XMITDONE) == 0) {
1175 if ((status & ENETX_CTL_ACTIVE) == 0) {
1176 SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
1177
1178 sq_hpc_write(sc, HPC_ENETX_NDBP,
1179 SQ_CDTXADDR(sc, i));
1180
1181 /* Kick DMA channel into life */
1182 sq_hpc_write(sc, HPC_ENETX_CTL,
1183 ENETX_CTL_ACTIVE);
1184
1185 /*
1186 * Set a watchdog timer in case the chip
1187 * flakes out.
1188 */
1189 ifp->if_timer = 5;
1190 } else
1191 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
1192 break;
1193 }
1194
1195 /* Sync the packet data, unload DMA map, free mbuf */
1196 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
1197 sc->sc_txmap[i]->dm_mapsize,
1198 BUS_DMASYNC_POSTWRITE);
1199 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
1200 m_freem(sc->sc_txmbuf[i]);
1201 sc->sc_txmbuf[i] = NULL;
1202
1203 ifp->if_opackets++;
1204 sc->sc_nfreetx++;
1205
1206 SQ_TRACE(SQ_DONE_DMA, sc, i, status);
1207 i = SQ_NEXTTX(i);
1208 }
1209
1210 sc->sc_prevtx = i;
1211 }
1212
1213 void
1214 sq_reset(struct sq_softc *sc)
1215 {
1216 /* Stop HPC dma channels */
1217 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl, 0);
1218 sq_hpc_write(sc, sc->hpc_regs->enetx_ctl, 0);
1219
1220 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 3);
1221 delay(20);
1222 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 0);
1223 }
1224
1225 /* sq_add_rxbuf: Add a receive buffer to the indicated descriptor. */
1226 int
1227 sq_add_rxbuf(struct sq_softc *sc, int idx)
1228 {
1229 int err;
1230 struct mbuf *m;
1231
1232 MGETHDR(m, M_DONTWAIT, MT_DATA);
1233 if (m == NULL)
1234 return (ENOBUFS);
1235
1236 MCLGET(m, M_DONTWAIT);
1237 if ((m->m_flags & M_EXT) == 0) {
1238 m_freem(m);
1239 return (ENOBUFS);
1240 }
1241
1242 if (sc->sc_rxmbuf[idx] != NULL)
1243 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]);
1244
1245 sc->sc_rxmbuf[idx] = m;
1246
1247 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx],
1248 m->m_ext.ext_buf, m->m_ext.ext_size,
1249 NULL, BUS_DMA_NOWAIT)) != 0) {
1250 printf("%s: can't load rx DMA map %d, error = %d\n",
1251 sc->sc_dev.dv_xname, idx, err);
1252 panic("sq_add_rxbuf"); /* XXX */
1253 }
1254
1255 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx], 0,
1256 sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD);
1257
1258 SQ_INIT_RXDESC(sc, idx);
1259
1260 return 0;
1261 }
1262
1263 void
1264 sq_dump_buffer(u_int32_t addr, u_int32_t len)
1265 {
1266 u_int i;
1267 u_char* physaddr = (char*) MIPS_PHYS_TO_KSEG1((caddr_t)addr);
1268
1269 if (len == 0)
1270 return;
1271
1272 printf("%p: ", physaddr);
1273
1274 for (i = 0; i < len; i++) {
1275 printf("%02x ", *(physaddr + i) & 0xff);
1276 if ((i % 16) == 15 && i != len - 1)
1277 printf("\n%p: ", physaddr + i);
1278 }
1279
1280 printf("\n");
1281 }
1282
1283 void
1284 enaddr_aton(const char* str, u_int8_t* eaddr)
1285 {
1286 int i;
1287 char c;
1288
1289 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1290 if (*str == ':')
1291 str++;
1292
1293 c = *str++;
1294 if (isdigit(c)) {
1295 eaddr[i] = (c - '0');
1296 } else if (isxdigit(c)) {
1297 eaddr[i] = (toupper(c) + 10 - 'A');
1298 }
1299
1300 c = *str++;
1301 if (isdigit(c)) {
1302 eaddr[i] = (eaddr[i] << 4) | (c - '0');
1303 } else if (isxdigit(c)) {
1304 eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A');
1305 }
1306 }
1307 }
1308