if_sq.c revision 1.23 1 /* $NetBSD: if_sq.c,v 1.23 2004/12/29 06:57:52 rumble Exp $ */
2
3 /*
4 * Copyright (c) 2001 Rafal K. Boni
5 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * Portions of this code are derived from software contributed to The
9 * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: if_sq.c,v 1.23 2004/12/29 06:57:52 rumble Exp $");
37
38 #include "bpfilter.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/device.h>
43 #include <sys/callout.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/ioctl.h>
49 #include <sys/errno.h>
50 #include <sys/syslog.h>
51
52 #include <uvm/uvm_extern.h>
53
54 #include <machine/endian.h>
55
56 #include <net/if.h>
57 #include <net/if_dl.h>
58 #include <net/if_media.h>
59 #include <net/if_ether.h>
60
61 #if NBPFILTER > 0
62 #include <net/bpf.h>
63 #endif
64
65 #include <machine/bus.h>
66 #include <machine/intr.h>
67
68 #include <dev/ic/seeq8003reg.h>
69
70 #include <sgimips/hpc/sqvar.h>
71 #include <sgimips/hpc/hpcvar.h>
72 #include <sgimips/hpc/hpcreg.h>
73
74 #include <dev/arcbios/arcbios.h>
75 #include <dev/arcbios/arcbiosvar.h>
76
77 #define static
78
79 /*
80 * Short TODO list:
81 * (1) Do counters for bad-RX packets.
82 * (2) Allow multi-segment transmits, instead of copying to a single,
83 * contiguous mbuf.
84 * (3) Verify sq_stop() turns off enough stuff; I was still getting
85 * seeq interrupts after sq_stop().
86 * (4) Implement EDLC modes: especially packet auto-pad and simplex
87 * mode.
88 * (5) Should the driver filter out its own transmissions in non-EDLC
89 * mode?
90 * (6) Multicast support -- multicast filter, address management, ...
91 * (7) Deal with RB0 (recv buffer overflow) on reception. Will need
92 * to figure out if RB0 is read-only as stated in one spot in the
93 * HPC spec or read-write (ie, is the 'write a one to clear it')
94 * the correct thing?
95 */
96
97 #if defined(SQ_DEBUG)
98 int sq_debug = 0;
99 #define SQ_DPRINTF(x) if (sq_debug) printf x
100 #else
101 #define SQ_DPRINTF(x)
102 #endif
103
104 static int sq_match(struct device *, struct cfdata *, void *);
105 static void sq_attach(struct device *, struct device *, void *);
106 static int sq_init(struct ifnet *);
107 static void sq_start(struct ifnet *);
108 static void sq_stop(struct ifnet *, int);
109 static void sq_watchdog(struct ifnet *);
110 static int sq_ioctl(struct ifnet *, u_long, caddr_t);
111
112 static void sq_set_filter(struct sq_softc *);
113 static int sq_intr(void *);
114 static int sq_rxintr(struct sq_softc *);
115 static int sq_txintr(struct sq_softc *);
116 static void sq_txring_hpc1(struct sq_softc *);
117 static void sq_txring_hpc3(struct sq_softc *);
118 static void sq_reset(struct sq_softc *);
119 static int sq_add_rxbuf(struct sq_softc *, int);
120 static void sq_dump_buffer(u_int32_t addr, u_int32_t len);
121 static void sq_trace_dump(struct sq_softc *);
122
123 static void enaddr_aton(const char*, u_int8_t*);
124
125 CFATTACH_DECL(sq, sizeof(struct sq_softc),
126 sq_match, sq_attach, NULL, NULL);
127
128 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
129
130 static int
131 sq_match(struct device *parent, struct cfdata *cf, void *aux)
132 {
133 struct hpc_attach_args *ha = aux;
134
135 if (strcmp(ha->ha_name, cf->cf_name) == 0)
136 return (1);
137
138 return (0);
139 }
140
141 static void
142 sq_attach(struct device *parent, struct device *self, void *aux)
143 {
144 int i, err;
145 char* macaddr;
146 struct sq_softc *sc = (void *)self;
147 struct hpc_attach_args *haa = aux;
148 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
149
150 sc->sc_hpct = haa->ha_st;
151 sc->hpc_regs = haa->hpc_regs; /* HPC register definitions */
152
153 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
154 haa->ha_dmaoff,
155 sc->hpc_regs->enet_regs_size,
156 &sc->sc_hpch)) != 0) {
157 printf(": unable to map HPC DMA registers, error = %d\n", err);
158 goto fail_0;
159 }
160
161 sc->sc_regt = haa->ha_st;
162 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
163 haa->ha_devoff,
164 sc->hpc_regs->enet_devregs_size,
165 &sc->sc_regh)) != 0) {
166 printf(": unable to map Seeq registers, error = %d\n", err);
167 goto fail_0;
168 }
169
170 sc->sc_dmat = haa->ha_dmat;
171
172 if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control),
173 PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg,
174 1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) {
175 printf(": unable to allocate control data, error = %d\n", err);
176 goto fail_0;
177 }
178
179 if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg,
180 sizeof(struct sq_control),
181 (caddr_t *)&sc->sc_control,
182 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
183 printf(": unable to map control data, error = %d\n", err);
184 goto fail_1;
185 }
186
187 if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control),
188 1, sizeof(struct sq_control), PAGE_SIZE,
189 BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) {
190 printf(": unable to create DMA map for control data, error "
191 "= %d\n", err);
192 goto fail_2;
193 }
194
195 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control,
196 sizeof(struct sq_control),
197 NULL, BUS_DMA_NOWAIT)) != 0) {
198 printf(": unable to load DMA map for control data, error "
199 "= %d\n", err);
200 goto fail_3;
201 }
202
203 memset(sc->sc_control, 0, sizeof(struct sq_control));
204
205 /* Create transmit buffer DMA maps */
206 for (i = 0; i < SQ_NTXDESC; i++) {
207 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
208 0, BUS_DMA_NOWAIT,
209 &sc->sc_txmap[i])) != 0) {
210 printf(": unable to create tx DMA map %d, error = %d\n",
211 i, err);
212 goto fail_4;
213 }
214 }
215
216 /* Create receive buffer DMA maps */
217 for (i = 0; i < SQ_NRXDESC; i++) {
218 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
219 0, BUS_DMA_NOWAIT,
220 &sc->sc_rxmap[i])) != 0) {
221 printf(": unable to create rx DMA map %d, error = %d\n",
222 i, err);
223 goto fail_5;
224 }
225 }
226
227 /* Pre-allocate the receive buffers. */
228 for (i = 0; i < SQ_NRXDESC; i++) {
229 if ((err = sq_add_rxbuf(sc, i)) != 0) {
230 printf(": unable to allocate or map rx buffer %d\n,"
231 " error = %d\n", i, err);
232 goto fail_6;
233 }
234 }
235
236 if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) {
237 printf(": unable to get MAC address!\n");
238 goto fail_6;
239 }
240
241 evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL,
242 self->dv_xname, "intr");
243
244 if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) {
245 printf(": unable to establish interrupt!\n");
246 goto fail_6;
247 }
248
249 /* Reset the chip to a known state. */
250 sq_reset(sc);
251
252 /*
253 * Determine if we're an 8003 or 80c03 by setting the first
254 * MAC address register to non-zero, and then reading it back.
255 * If it's zero, we have an 80c03, because we will have read
256 * the TxCollLSB register.
257 */
258 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0xa5);
259 if (bus_space_read_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0) == 0)
260 sc->sc_type = SQ_TYPE_80C03;
261 else
262 sc->sc_type = SQ_TYPE_8003;
263 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0x00);
264
265 printf(": SGI Seeq %s\n",
266 sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003");
267
268 enaddr_aton(macaddr, sc->sc_enaddr);
269
270 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
271 ether_sprintf(sc->sc_enaddr));
272
273 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
274 ifp->if_softc = sc;
275 ifp->if_mtu = ETHERMTU;
276 ifp->if_init = sq_init;
277 ifp->if_stop = sq_stop;
278 ifp->if_start = sq_start;
279 ifp->if_ioctl = sq_ioctl;
280 ifp->if_watchdog = sq_watchdog;
281 ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST;
282 IFQ_SET_READY(&ifp->if_snd);
283
284 if_attach(ifp);
285 ether_ifattach(ifp, sc->sc_enaddr);
286
287 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
288 /* Done! */
289 return;
290
291 /*
292 * Free any resources we've allocated during the failed attach
293 * attempt. Do this in reverse order and fall through.
294 */
295 fail_6:
296 for (i = 0; i < SQ_NRXDESC; i++) {
297 if (sc->sc_rxmbuf[i] != NULL) {
298 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]);
299 m_freem(sc->sc_rxmbuf[i]);
300 }
301 }
302 fail_5:
303 for (i = 0; i < SQ_NRXDESC; i++) {
304 if (sc->sc_rxmap[i] != NULL)
305 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]);
306 }
307 fail_4:
308 for (i = 0; i < SQ_NTXDESC; i++) {
309 if (sc->sc_txmap[i] != NULL)
310 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]);
311 }
312 bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap);
313 fail_3:
314 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap);
315 fail_2:
316 bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control,
317 sizeof(struct sq_control));
318 fail_1:
319 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg);
320 fail_0:
321 return;
322 }
323
324 /* Set up data to get the interface up and running. */
325 int
326 sq_init(struct ifnet *ifp)
327 {
328 int i;
329 u_int32_t reg;
330 struct sq_softc *sc = ifp->if_softc;
331
332 /* Cancel any in-progress I/O */
333 sq_stop(ifp, 0);
334
335 sc->sc_nextrx = 0;
336
337 sc->sc_nfreetx = SQ_NTXDESC;
338 sc->sc_nexttx = sc->sc_prevtx = 0;
339
340 SQ_TRACE(SQ_RESET, sc, 0, 0);
341
342 /* Set into 8003 mode, bank 0 to program ethernet address */
343 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, TXCMD_BANK0);
344
345 /* Now write the address */
346 for (i = 0; i < ETHER_ADDR_LEN; i++)
347 bus_space_write_1(sc->sc_regt, sc->sc_regh, i,
348 sc->sc_enaddr[i]);
349
350 sc->sc_rxcmd = RXCMD_IE_CRC |
351 RXCMD_IE_DRIB |
352 RXCMD_IE_SHORT |
353 RXCMD_IE_END |
354 RXCMD_IE_GOOD;
355
356 /*
357 * Set the receive filter -- this will add some bits to the
358 * prototype RXCMD register. Do this before setting the
359 * transmit config register, since we might need to switch
360 * banks.
361 */
362 sq_set_filter(sc);
363
364 /* Set up Seeq transmit command register */
365 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD,
366 TXCMD_IE_UFLOW |
367 TXCMD_IE_COLL |
368 TXCMD_IE_16COLL |
369 TXCMD_IE_GOOD);
370
371 /* Now write the receive command register. */
372 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD, sc->sc_rxcmd);
373
374 /* Set up HPC ethernet DMA config */
375 if (sc->hpc_regs->revision == 3) {
376 reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
377 HPC_ENETR_DMACFG);
378 bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
379 HPC_ENETR_DMACFG,
380 reg | ENETR_DMACFG_FIX_RXDC |
381 ENETR_DMACFG_FIX_INTR |
382 ENETR_DMACFG_FIX_EOP);
383 }
384
385 /* Pass the start of the receive ring to the HPC */
386 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetr_ndbp,
387 SQ_CDRXADDR(sc, 0));
388
389 /* And turn on the HPC ethernet receive channel */
390 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetr_ctl,
391 sc->hpc_regs->enetr_ctl_active);
392
393 /*
394 * Turn off delayed receive interrupts on HPC1.
395 * (see Hollywood HPC Specification 2.1.4.3)
396 */
397 if (sc->hpc_regs->revision != 3)
398 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC1_ENET_INTDELAY,
399 HPC1_ENET_INTDELAYVAL);
400
401 ifp->if_flags |= IFF_RUNNING;
402 ifp->if_flags &= ~IFF_OACTIVE;
403
404 return 0;
405 }
406
407 static void
408 sq_set_filter(struct sq_softc *sc)
409 {
410 struct ethercom *ec = &sc->sc_ethercom;
411 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
412 struct ether_multi *enm;
413 struct ether_multistep step;
414
415 /*
416 * Check for promiscuous mode. Also implies
417 * all-multicast.
418 */
419 if (ifp->if_flags & IFF_PROMISC) {
420 sc->sc_rxcmd |= RXCMD_REC_ALL;
421 ifp->if_flags |= IFF_ALLMULTI;
422 return;
423 }
424
425 /*
426 * The 8003 has no hash table. If we have any multicast
427 * addresses on the list, enable reception of all multicast
428 * frames.
429 *
430 * XXX The 80c03 has a hash table. We should use it.
431 */
432
433 ETHER_FIRST_MULTI(step, ec, enm);
434
435 if (enm == NULL) {
436 sc->sc_rxcmd &= ~RXCMD_REC_MASK;
437 sc->sc_rxcmd |= RXCMD_REC_BROAD;
438
439 ifp->if_flags &= ~IFF_ALLMULTI;
440 return;
441 }
442
443 sc->sc_rxcmd |= RXCMD_REC_MULTI;
444 ifp->if_flags |= IFF_ALLMULTI;
445 }
446
447 int
448 sq_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
449 {
450 int s, error = 0;
451
452 SQ_TRACE(SQ_IOCTL, (struct sq_softc *)ifp->if_softc, 0, 0);
453
454 s = splnet();
455
456 error = ether_ioctl(ifp, cmd, data);
457 if (error == ENETRESET) {
458 /*
459 * Multicast list has changed; set the hardware filter
460 * accordingly.
461 */
462 if (ifp->if_flags & IFF_RUNNING)
463 error = sq_init(ifp);
464 else
465 error = 0;
466 }
467
468 splx(s);
469 return (error);
470 }
471
472 void
473 sq_start(struct ifnet *ifp)
474 {
475 struct sq_softc *sc = ifp->if_softc;
476 u_int32_t status;
477 struct mbuf *m0, *m;
478 bus_dmamap_t dmamap;
479 int err, totlen, nexttx, firsttx, lasttx = -1, ofree, seg;
480
481 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
482 return;
483
484 /*
485 * Remember the previous number of free descriptors and
486 * the first descriptor we'll use.
487 */
488 ofree = sc->sc_nfreetx;
489 firsttx = sc->sc_nexttx;
490
491 /*
492 * Loop through the send queue, setting up transmit descriptors
493 * until we drain the queue, or use up all available transmit
494 * descriptors.
495 */
496 while (sc->sc_nfreetx != 0) {
497 /*
498 * Grab a packet off the queue.
499 */
500 IFQ_POLL(&ifp->if_snd, m0);
501 if (m0 == NULL)
502 break;
503 m = NULL;
504
505 dmamap = sc->sc_txmap[sc->sc_nexttx];
506
507 /*
508 * Load the DMA map. If this fails, the packet either
509 * didn't fit in the alloted number of segments, or we were
510 * short on resources. In this case, we'll copy and try
511 * again.
512 * Also copy it if we need to pad, so that we are sure there
513 * is room for the pad buffer.
514 * XXX the right way of doing this is to use a static buffer
515 * for padding and adding it to the transmit descriptor (see
516 * sys/dev/pci/if_tl.c for example). We can't do this here yet
517 * because we can't send packets with more than one fragment.
518 */
519 if (m0->m_pkthdr.len < ETHER_PAD_LEN ||
520 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
521 BUS_DMA_NOWAIT) != 0) {
522 MGETHDR(m, M_DONTWAIT, MT_DATA);
523 if (m == NULL) {
524 printf("%s: unable to allocate Tx mbuf\n",
525 sc->sc_dev.dv_xname);
526 break;
527 }
528 if (m0->m_pkthdr.len > MHLEN) {
529 MCLGET(m, M_DONTWAIT);
530 if ((m->m_flags & M_EXT) == 0) {
531 printf("%s: unable to allocate Tx "
532 "cluster\n", sc->sc_dev.dv_xname);
533 m_freem(m);
534 break;
535 }
536 }
537
538 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
539 if (m0->m_pkthdr.len < ETHER_PAD_LEN) {
540 memset(mtod(m, char *) + m0->m_pkthdr.len, 0,
541 ETHER_PAD_LEN - m0->m_pkthdr.len);
542 m->m_pkthdr.len = m->m_len = ETHER_PAD_LEN;
543 } else
544 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
545
546 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
547 m, BUS_DMA_NOWAIT)) != 0) {
548 printf("%s: unable to load Tx buffer, "
549 "error = %d\n", sc->sc_dev.dv_xname, err);
550 break;
551 }
552 }
553
554 /*
555 * Ensure we have enough descriptors free to describe
556 * the packet.
557 */
558 if (dmamap->dm_nsegs > sc->sc_nfreetx) {
559 /*
560 * Not enough free descriptors to transmit this
561 * packet. We haven't committed to anything yet,
562 * so just unload the DMA map, put the packet
563 * back on the queue, and punt. Notify the upper
564 * layer that there are no more slots left.
565 *
566 * XXX We could allocate an mbuf and copy, but
567 * XXX it is worth it?
568 */
569 ifp->if_flags |= IFF_OACTIVE;
570 bus_dmamap_unload(sc->sc_dmat, dmamap);
571 if (m != NULL)
572 m_freem(m);
573 break;
574 }
575
576 IFQ_DEQUEUE(&ifp->if_snd, m0);
577 #if NBPFILTER > 0
578 /*
579 * Pass the packet to any BPF listeners.
580 */
581 if (ifp->if_bpf)
582 bpf_mtap(ifp->if_bpf, m0);
583 #endif /* NBPFILTER > 0 */
584 if (m != NULL) {
585 m_freem(m0);
586 m0 = m;
587 }
588
589 /*
590 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
591 */
592
593 SQ_TRACE(SQ_ENQUEUE, sc, sc->sc_nexttx, 0);
594
595 /* Sync the DMA map. */
596 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
597 BUS_DMASYNC_PREWRITE);
598
599 /*
600 * Initialize the transmit descriptors.
601 */
602 for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0;
603 seg < dmamap->dm_nsegs;
604 seg++, nexttx = SQ_NEXTTX(nexttx)) {
605 if (sc->hpc_regs->revision == 3) {
606 sc->sc_txdesc[nexttx].hpc3_hdd_bufptr =
607 dmamap->dm_segs[seg].ds_addr;
608 sc->sc_txdesc[nexttx].hpc3_hdd_ctl =
609 dmamap->dm_segs[seg].ds_len;
610 } else {
611 sc->sc_txdesc[nexttx].hpc1_hdd_bufptr =
612 dmamap->dm_segs[seg].ds_addr;
613 sc->sc_txdesc[nexttx].hpc1_hdd_ctl =
614 dmamap->dm_segs[seg].ds_len;
615 }
616 sc->sc_txdesc[nexttx].hdd_descptr=
617 SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx));
618 lasttx = nexttx;
619 totlen += dmamap->dm_segs[seg].ds_len;
620 }
621
622 /* Last descriptor gets end-of-packet */
623 KASSERT(lasttx != -1);
624 if (sc->hpc_regs->revision == 3)
625 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |= HDD_CTL_EOPACKET;
626 else
627 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |=
628 HPC1_HDD_CTL_EOPACKET;
629
630 SQ_DPRINTF(("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname,
631 sc->sc_nexttx, lasttx,
632 totlen));
633
634 if (ifp->if_flags & IFF_DEBUG) {
635 printf(" transmit chain:\n");
636 for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) {
637 printf(" descriptor %d:\n", seg);
638 printf(" hdd_bufptr: 0x%08x\n",
639 (sc->hpc_regs->revision == 3) ?
640 sc->sc_txdesc[seg].hpc3_hdd_bufptr :
641 sc->sc_txdesc[seg].hpc1_hdd_bufptr);
642 printf(" hdd_ctl: 0x%08x\n",
643 (sc->hpc_regs->revision == 3) ?
644 sc->sc_txdesc[seg].hpc3_hdd_ctl:
645 sc->sc_txdesc[seg].hpc1_hdd_ctl);
646 printf(" hdd_descptr: 0x%08x\n",
647 sc->sc_txdesc[seg].hdd_descptr);
648
649 if (seg == lasttx)
650 break;
651 }
652 }
653
654 /* Sync the descriptors we're using. */
655 SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs,
656 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
657
658 /* Store a pointer to the packet so we can free it later */
659 sc->sc_txmbuf[sc->sc_nexttx] = m0;
660
661 /* Advance the tx pointer. */
662 sc->sc_nfreetx -= dmamap->dm_nsegs;
663 sc->sc_nexttx = nexttx;
664 }
665
666 /* All transmit descriptors used up, let upper layers know */
667 if (sc->sc_nfreetx == 0)
668 ifp->if_flags |= IFF_OACTIVE;
669
670 if (sc->sc_nfreetx != ofree) {
671 SQ_DPRINTF(("%s: %d packets enqueued, first %d, INTR on %d\n",
672 sc->sc_dev.dv_xname, lasttx - firsttx + 1,
673 firsttx, lasttx));
674
675 /*
676 * Cause a transmit interrupt to happen on the
677 * last packet we enqueued, mark it as the last
678 * descriptor.
679 *
680 * HDD_CTL_INTR will generate an interrupt on
681 * HPC1 by itself. HPC3 will not interrupt unless
682 * HDD_CTL_EOPACKET is set as well.
683 */
684 KASSERT(lasttx != -1);
685 if (sc->hpc_regs->revision == 3) {
686 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |= HDD_CTL_INTR |
687 HDD_CTL_EOCHAIN;
688 } else {
689 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |= HPC1_HDD_CTL_INTR;
690 sc->sc_txdesc[lasttx].hpc1_hdd_bufptr |=
691 HPC1_HDD_CTL_EOCHAIN;
692 }
693
694 SQ_CDTXSYNC(sc, lasttx, 1,
695 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
696
697 /*
698 * There is a potential race condition here if the HPC
699 * DMA channel is active and we try and either update
700 * the 'next descriptor' pointer in the HPC PIO space
701 * or the 'next descriptor' pointer in a previous desc-
702 * riptor.
703 *
704 * To avoid this, if the channel is active, we rely on
705 * the transmit interrupt routine noticing that there
706 * are more packets to send and restarting the HPC DMA
707 * engine, rather than mucking with the DMA state here.
708 */
709 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
710 sc->hpc_regs->enetx_ctl);
711
712 if ((status & sc->hpc_regs->enetx_ctl_active) != 0) {
713 SQ_TRACE(SQ_ADD_TO_DMA, sc, firsttx, status);
714
715 /* NB: hpc3_hdd_ctl is also hpc1_hdd_bufptr */
716 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc3_hdd_ctl &=
717 ~HDD_CTL_EOCHAIN;
718
719 if (sc->hpc_regs->revision != 3)
720 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc1_hdd_ctl
721 &= ~HPC1_HDD_CTL_INTR;
722
723 SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx), 1,
724 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
725 } else if (sc->hpc_regs->revision == 3) {
726 SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
727
728 bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
729 HPC_ENETX_NDBP, SQ_CDTXADDR(sc, firsttx));
730
731 /* Kick DMA channel into life */
732 bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
733 HPC_ENETX_CTL, ENETX_CTL_ACTIVE);
734 } else {
735 /*
736 * In the HPC1 case where transmit DMA is
737 * inactive, we can either kick off if
738 * the ring was previously empty, or call
739 * our transmit interrupt handler to
740 * figure out if the ring stopped short
741 * and restart at the right place.
742 */
743 if (ofree == SQ_NTXDESC) {
744 SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
745
746 bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
747 HPC1_ENETX_NDBP,
748 SQ_CDTXADDR(sc, firsttx));
749 bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
750 HPC1_ENETX_CFXBP, SQ_CDTXADDR(sc, firsttx));
751 bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
752 HPC1_ENETX_CBP, SQ_CDTXADDR(sc, firsttx));
753
754 /* Kick DMA channel into life */
755 bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
756 HPC1_ENETX_CTL, HPC1_ENETX_CTL_ACTIVE);
757 } else
758 sq_txring_hpc1(sc);
759 }
760
761 /* Set a watchdog timer in case the chip flakes out. */
762 ifp->if_timer = 5;
763 }
764 }
765
766 void
767 sq_stop(struct ifnet *ifp, int disable)
768 {
769 int i;
770 struct sq_softc *sc = ifp->if_softc;
771
772 for (i =0; i < SQ_NTXDESC; i++) {
773 if (sc->sc_txmbuf[i] != NULL) {
774 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
775 m_freem(sc->sc_txmbuf[i]);
776 sc->sc_txmbuf[i] = NULL;
777 }
778 }
779
780 /* Clear Seeq transmit/receive command registers */
781 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, 0);
782 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD, 0);
783
784 sq_reset(sc);
785
786 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
787 ifp->if_timer = 0;
788 }
789
790 /* Device timeout/watchdog routine. */
791 void
792 sq_watchdog(struct ifnet *ifp)
793 {
794 u_int32_t status;
795 struct sq_softc *sc = ifp->if_softc;
796
797 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
798 sc->hpc_regs->enetx_ctl);
799 log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, "
800 "status %08x)\n", sc->sc_dev.dv_xname, sc->sc_prevtx,
801 sc->sc_nexttx, sc->sc_nfreetx, status);
802
803 sq_trace_dump(sc);
804
805 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
806 sc->sq_trace_idx = 0;
807
808 ++ifp->if_oerrors;
809
810 sq_init(ifp);
811 }
812
813 static void
814 sq_trace_dump(struct sq_softc *sc)
815 {
816 int i;
817 char *act;
818
819 for (i = 0; i < sc->sq_trace_idx; i++) {
820 switch (sc->sq_trace[i].action) {
821 case SQ_RESET: act = "SQ_RESET"; break;
822 case SQ_ADD_TO_DMA: act = "SQ_ADD_TO_DMA"; break;
823 case SQ_START_DMA: act = "SQ_START_DMA"; break;
824 case SQ_DONE_DMA: act = "SQ_DONE_DMA"; break;
825 case SQ_RESTART_DMA: act = "SQ_RESTART_DMA"; break;
826 case SQ_TXINTR_ENTER: act = "SQ_TXINTR_ENTER"; break;
827 case SQ_TXINTR_EXIT: act = "SQ_TXINTR_EXIT"; break;
828 case SQ_TXINTR_BUSY: act = "SQ_TXINTR_BUSY"; break;
829 case SQ_IOCTL: act = "SQ_IOCTL"; break;
830 case SQ_ENQUEUE: act = "SQ_ENQUEUE"; break;
831 default: act = "UNKNOWN";
832 }
833
834 printf("%s: [%03d] action %-16s buf %03d free %03d "
835 "status %08x line %d\n", sc->sc_dev.dv_xname, i, act,
836 sc->sq_trace[i].bufno, sc->sq_trace[i].freebuf,
837 sc->sq_trace[i].status, sc->sq_trace[i].line);
838 }
839 }
840
841 static int
842 sq_intr(void * arg)
843 {
844 struct sq_softc *sc = arg;
845 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
846 int handled = 0;
847 u_int32_t stat;
848
849 stat = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
850 sc->hpc_regs->enetr_reset);
851
852 if ((stat & 2) == 0) {
853 printf("%s: Unexpected interrupt!\n", sc->sc_dev.dv_xname);
854 return 0;
855 }
856
857 bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
858 sc->hpc_regs->enetr_reset, (stat | 2));
859
860 /*
861 * If the interface isn't running, the interrupt couldn't
862 * possibly have come from us.
863 */
864 if ((ifp->if_flags & IFF_RUNNING) == 0)
865 return 0;
866
867 sc->sq_intrcnt.ev_count++;
868
869 /* Always check for received packets */
870 if (sq_rxintr(sc) != 0)
871 handled++;
872
873 /* Only handle transmit interrupts if we actually sent something */
874 if (sc->sc_nfreetx < SQ_NTXDESC) {
875 sq_txintr(sc);
876 handled++;
877 }
878
879 #if NRND > 0
880 if (handled)
881 rnd_add_uint32(&sc->rnd_source, stat);
882 #endif
883 return (handled);
884 }
885
886 static int
887 sq_rxintr(struct sq_softc *sc)
888 {
889 int count = 0;
890 struct mbuf* m;
891 int i, framelen;
892 u_int8_t pktstat;
893 u_int32_t status;
894 u_int32_t ctl_reg;
895 int new_end, orig_end;
896 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
897
898 for(i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) {
899 SQ_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
900
901 /* If this is a CPU-owned buffer, we're at the end of the list */
902 if (sc->hpc_regs->revision == 3)
903 ctl_reg = sc->sc_rxdesc[i].hpc3_hdd_ctl & HDD_CTL_OWN;
904 else
905 ctl_reg = sc->sc_rxdesc[i].hpc1_hdd_ctl &
906 HPC1_HDD_CTL_OWN;
907
908 if (ctl_reg) {
909 #if defined(SQ_DEBUG)
910 u_int32_t reg;
911
912 reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
913 sc->hpc_regs->enetr_ctl);
914 SQ_DPRINTF(("%s: rxintr: done at %d (ctl %08x)\n",
915 sc->sc_dev.dv_xname, i, reg));
916 #endif
917 break;
918 }
919
920 count++;
921
922 m = sc->sc_rxmbuf[i];
923 framelen = m->m_ext.ext_size - 3;
924 if (sc->hpc_regs->revision == 3)
925 framelen -=
926 HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc3_hdd_ctl);
927 else
928 framelen -=
929 HPC1_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc1_hdd_ctl);
930
931 /* Now sync the actual packet data */
932 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
933 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);
934
935 pktstat = *((u_int8_t*)m->m_data + framelen + 2);
936
937 if ((pktstat & RXSTAT_GOOD) == 0) {
938 ifp->if_ierrors++;
939
940 if (pktstat & RXSTAT_OFLOW)
941 printf("%s: receive FIFO overflow\n",
942 sc->sc_dev.dv_xname);
943
944 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
945 sc->sc_rxmap[i]->dm_mapsize,
946 BUS_DMASYNC_PREREAD);
947 SQ_INIT_RXDESC(sc, i);
948 SQ_DPRINTF(("%s: sq_rxintr: buf %d no RXSTAT_GOOD\n",
949 sc->sc_dev.dv_xname, i));
950 continue;
951 }
952
953 if (sq_add_rxbuf(sc, i) != 0) {
954 ifp->if_ierrors++;
955 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
956 sc->sc_rxmap[i]->dm_mapsize,
957 BUS_DMASYNC_PREREAD);
958 SQ_INIT_RXDESC(sc, i);
959 SQ_DPRINTF(("%s: sq_rxintr: buf %d sq_add_rxbuf() "
960 "failed\n", sc->sc_dev.dv_xname, i));
961 continue;
962 }
963
964
965 m->m_data += 2;
966 m->m_pkthdr.rcvif = ifp;
967 m->m_pkthdr.len = m->m_len = framelen;
968
969 ifp->if_ipackets++;
970
971 SQ_DPRINTF(("%s: sq_rxintr: buf %d len %d\n",
972 sc->sc_dev.dv_xname, i, framelen));
973
974 #if NBPFILTER > 0
975 if (ifp->if_bpf)
976 bpf_mtap(ifp->if_bpf, m);
977 #endif
978 (*ifp->if_input)(ifp, m);
979 }
980
981
982 /* If anything happened, move ring start/end pointers to new spot */
983 if (i != sc->sc_nextrx) {
984 /* NB: hpc3_hdd_ctl is also hpc1_hdd_bufptr */
985
986 new_end = SQ_PREVRX(i);
987 sc->sc_rxdesc[new_end].hpc3_hdd_ctl |= HDD_CTL_EOCHAIN;
988 SQ_CDRXSYNC(sc, new_end, BUS_DMASYNC_PREREAD |
989 BUS_DMASYNC_PREWRITE);
990
991 orig_end = SQ_PREVRX(sc->sc_nextrx);
992 sc->sc_rxdesc[orig_end].hpc3_hdd_ctl &= ~HDD_CTL_EOCHAIN;
993 SQ_CDRXSYNC(sc, orig_end, BUS_DMASYNC_PREREAD |
994 BUS_DMASYNC_PREWRITE);
995
996 sc->sc_nextrx = i;
997 }
998
999 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
1000 sc->hpc_regs->enetr_ctl);
1001
1002 /* If receive channel is stopped, restart it... */
1003 if ((status & sc->hpc_regs->enetr_ctl_active) == 0) {
1004 /* Pass the start of the receive ring to the HPC */
1005 bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
1006 sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc, sc->sc_nextrx));
1007
1008 /* And turn on the HPC ethernet receive channel */
1009 bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
1010 sc->hpc_regs->enetr_ctl, sc->hpc_regs->enetr_ctl_active);
1011 }
1012
1013 return count;
1014 }
1015
1016 static int
1017 sq_txintr(struct sq_softc *sc)
1018 {
1019 int shift = 0;
1020 u_int32_t status;
1021 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1022
1023 if (sc->hpc_regs->revision != 3)
1024 shift = 16;
1025
1026 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
1027 sc->hpc_regs->enetx_ctl) >> shift;
1028
1029 SQ_TRACE(SQ_TXINTR_ENTER, sc, sc->sc_prevtx, status);
1030
1031 if ((status & ( (sc->hpc_regs->enetx_ctl_active >> shift) | TXSTAT_GOOD)) == 0) {
1032 if (status & TXSTAT_COLL)
1033 ifp->if_collisions++;
1034
1035 if (status & TXSTAT_UFLOW) {
1036 printf("%s: transmit underflow\n", sc->sc_dev.dv_xname);
1037 ifp->if_oerrors++;
1038 }
1039
1040 if (status & TXSTAT_16COLL) {
1041 printf("%s: max collisions reached\n",
1042 sc->sc_dev.dv_xname);
1043 ifp->if_oerrors++;
1044 ifp->if_collisions += 16;
1045 }
1046 }
1047
1048 /* prevtx now points to next xmit packet not yet finished */
1049 if (sc->hpc_regs->revision == 3)
1050 sq_txring_hpc3(sc);
1051 else
1052 sq_txring_hpc1(sc);
1053
1054 /* If we have buffers free, let upper layers know */
1055 if (sc->sc_nfreetx > 0)
1056 ifp->if_flags &= ~IFF_OACTIVE;
1057
1058 /* If all packets have left the coop, cancel watchdog */
1059 if (sc->sc_nfreetx == SQ_NTXDESC)
1060 ifp->if_timer = 0;
1061
1062 SQ_TRACE(SQ_TXINTR_EXIT, sc, sc->sc_prevtx, status);
1063 sq_start(ifp);
1064
1065 return 1;
1066 }
1067
1068 /*
1069 * Reclaim used transmit descriptors and restart the transmit DMA
1070 * engine if necessary.
1071 */
1072 static void
1073 sq_txring_hpc1(struct sq_softc *sc)
1074 {
1075 /*
1076 * HPC1 doesn't tag transmitted descriptors, however,
1077 * the NDBP register points to the next descriptor that
1078 * has not yet been processed. If DMA is not in progress,
1079 * we can safely reclaim all descriptors up to NDBP, and,
1080 * if necessary, restart DMA at NDBP. Otherwise, if DMA
1081 * is active, we can only safely reclaim up to CBP.
1082 *
1083 * For now, we'll only reclaim on inactive DMA and assume
1084 * that a sufficiently large ring keeps us out of trouble.
1085 */
1086 u_int32_t reclaimto, status;
1087 int reclaimall, i = sc->sc_prevtx;
1088 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1089
1090 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC1_ENETX_CTL);
1091 if (status & HPC1_ENETX_CTL_ACTIVE) {
1092 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
1093 return;
1094 } else {
1095 reclaimto = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
1096 HPC1_ENETX_NDBP);
1097 }
1098
1099 if (sc->sc_nfreetx == 0 && SQ_CDTXADDR(sc, i) == reclaimto)
1100 reclaimall = 1;
1101 else
1102 reclaimall = 0;
1103
1104 while (sc->sc_nfreetx < SQ_NTXDESC) {
1105 if (SQ_CDTXADDR(sc, i) == reclaimto && !reclaimall)
1106 break;
1107
1108 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
1109 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1110
1111 /* Sync the packet data, unload DMA map, free mbuf */
1112 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
1113 sc->sc_txmap[i]->dm_mapsize,
1114 BUS_DMASYNC_POSTWRITE);
1115 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
1116 m_freem(sc->sc_txmbuf[i]);
1117 sc->sc_txmbuf[i] = NULL;
1118
1119 ifp->if_opackets++;
1120 sc->sc_nfreetx++;
1121
1122 SQ_TRACE(SQ_DONE_DMA, sc, i, status);
1123
1124 i = SQ_NEXTTX(i);
1125 }
1126
1127 if (sc->sc_nfreetx < SQ_NTXDESC) {
1128 SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
1129
1130 KASSERT(reclaimto == SQ_CDTXADDR(sc, i));
1131
1132 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC1_ENETX_CFXBP,
1133 reclaimto);
1134 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC1_ENETX_CBP,
1135 reclaimto);
1136
1137 /* Kick DMA channel into life */
1138 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC1_ENETX_CTL,
1139 HPC1_ENETX_CTL_ACTIVE);
1140
1141 /*
1142 * Set a watchdog timer in case the chip
1143 * flakes out.
1144 */
1145 ifp->if_timer = 5;
1146 }
1147
1148 sc->sc_prevtx = i;
1149 }
1150
1151 /*
1152 * Reclaim used transmit descriptors and restart the transmit DMA
1153 * engine if necessary.
1154 */
1155 static void
1156 sq_txring_hpc3(struct sq_softc *sc)
1157 {
1158 /*
1159 * HPC3 tags descriptors with a bit once they've been
1160 * transmitted. We need only free each XMITDONE'd
1161 * descriptor, and restart the DMA engine if any
1162 * descriptors are left over.
1163 */
1164 int i;
1165 u_int32_t status = 0;
1166 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1167
1168 i = sc->sc_prevtx;
1169 while (sc->sc_nfreetx < SQ_NTXDESC) {
1170 /*
1171 * Check status first so we don't end up with a case of
1172 * the buffer not being finished while the DMA channel
1173 * has gone idle.
1174 */
1175 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
1176 HPC_ENETX_CTL);
1177
1178 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
1179 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1180
1181 /* Check for used descriptor and restart DMA chain if needed */
1182 if ((sc->sc_txdesc[i].hpc3_hdd_ctl & HDD_CTL_XMITDONE) == 0) {
1183 if ((status & ENETX_CTL_ACTIVE) == 0) {
1184 SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
1185
1186 bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
1187 HPC_ENETX_NDBP, SQ_CDTXADDR(sc, i));
1188
1189 /* Kick DMA channel into life */
1190 bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
1191 HPC_ENETX_CTL, ENETX_CTL_ACTIVE);
1192
1193 /*
1194 * Set a watchdog timer in case the chip
1195 * flakes out.
1196 */
1197 ifp->if_timer = 5;
1198 } else
1199 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
1200 break;
1201 }
1202
1203 /* Sync the packet data, unload DMA map, free mbuf */
1204 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
1205 sc->sc_txmap[i]->dm_mapsize,
1206 BUS_DMASYNC_POSTWRITE);
1207 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
1208 m_freem(sc->sc_txmbuf[i]);
1209 sc->sc_txmbuf[i] = NULL;
1210
1211 ifp->if_opackets++;
1212 sc->sc_nfreetx++;
1213
1214 SQ_TRACE(SQ_DONE_DMA, sc, i, status);
1215 i = SQ_NEXTTX(i);
1216 }
1217
1218 sc->sc_prevtx = i;
1219 }
1220
1221 void
1222 sq_reset(struct sq_softc *sc)
1223 {
1224 /* Stop HPC dma channels */
1225 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetr_ctl, 0);
1226 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetx_ctl, 0);
1227
1228 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetr_reset, 3);
1229 delay(20);
1230 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetr_reset, 0);
1231 }
1232
1233 /* sq_add_rxbuf: Add a receive buffer to the indicated descriptor. */
1234 int
1235 sq_add_rxbuf(struct sq_softc *sc, int idx)
1236 {
1237 int err;
1238 struct mbuf *m;
1239
1240 MGETHDR(m, M_DONTWAIT, MT_DATA);
1241 if (m == NULL)
1242 return (ENOBUFS);
1243
1244 MCLGET(m, M_DONTWAIT);
1245 if ((m->m_flags & M_EXT) == 0) {
1246 m_freem(m);
1247 return (ENOBUFS);
1248 }
1249
1250 if (sc->sc_rxmbuf[idx] != NULL)
1251 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]);
1252
1253 sc->sc_rxmbuf[idx] = m;
1254
1255 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx],
1256 m->m_ext.ext_buf, m->m_ext.ext_size,
1257 NULL, BUS_DMA_NOWAIT)) != 0) {
1258 printf("%s: can't load rx DMA map %d, error = %d\n",
1259 sc->sc_dev.dv_xname, idx, err);
1260 panic("sq_add_rxbuf"); /* XXX */
1261 }
1262
1263 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx], 0,
1264 sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD);
1265
1266 SQ_INIT_RXDESC(sc, idx);
1267
1268 return 0;
1269 }
1270
1271 void
1272 sq_dump_buffer(u_int32_t addr, u_int32_t len)
1273 {
1274 u_int i;
1275 u_char* physaddr = (char*) MIPS_PHYS_TO_KSEG1((caddr_t)addr);
1276
1277 if (len == 0)
1278 return;
1279
1280 printf("%p: ", physaddr);
1281
1282 for(i = 0; i < len; i++) {
1283 printf("%02x ", *(physaddr + i) & 0xff);
1284 if ((i % 16) == 15 && i != len - 1)
1285 printf("\n%p: ", physaddr + i);
1286 }
1287
1288 printf("\n");
1289 }
1290
1291 void
1292 enaddr_aton(const char* str, u_int8_t* eaddr)
1293 {
1294 int i;
1295 char c;
1296
1297 for(i = 0; i < ETHER_ADDR_LEN; i++) {
1298 if (*str == ':')
1299 str++;
1300
1301 c = *str++;
1302 if (isdigit(c)) {
1303 eaddr[i] = (c - '0');
1304 } else if (isxdigit(c)) {
1305 eaddr[i] = (toupper(c) + 10 - 'A');
1306 }
1307
1308 c = *str++;
1309 if (isdigit(c)) {
1310 eaddr[i] = (eaddr[i] << 4) | (c - '0');
1311 } else if (isxdigit(c)) {
1312 eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A');
1313 }
1314 }
1315 }
1316