if_sq.c revision 1.26 1 /* $NetBSD: if_sq.c,v 1.26 2004/12/30 23:18:09 rumble Exp $ */
2
3 /*
4 * Copyright (c) 2001 Rafal K. Boni
5 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * Portions of this code are derived from software contributed to The
9 * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: if_sq.c,v 1.26 2004/12/30 23:18:09 rumble Exp $");
37
38 #include "bpfilter.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/device.h>
43 #include <sys/callout.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/ioctl.h>
49 #include <sys/errno.h>
50 #include <sys/syslog.h>
51
52 #include <uvm/uvm_extern.h>
53
54 #include <machine/endian.h>
55
56 #include <net/if.h>
57 #include <net/if_dl.h>
58 #include <net/if_media.h>
59 #include <net/if_ether.h>
60
61 #if NBPFILTER > 0
62 #include <net/bpf.h>
63 #endif
64
65 #include <machine/bus.h>
66 #include <machine/intr.h>
67
68 #include <dev/ic/seeq8003reg.h>
69
70 #include <sgimips/hpc/sqvar.h>
71 #include <sgimips/hpc/hpcvar.h>
72 #include <sgimips/hpc/hpcreg.h>
73
74 #include <dev/arcbios/arcbios.h>
75 #include <dev/arcbios/arcbiosvar.h>
76
77 #define static
78
79 /*
80 * Short TODO list:
81 * (1) Do counters for bad-RX packets.
82 * (2) Allow multi-segment transmits, instead of copying to a single,
83 * contiguous mbuf.
84 * (3) Verify sq_stop() turns off enough stuff; I was still getting
85 * seeq interrupts after sq_stop().
86 * (4) Implement EDLC modes: especially packet auto-pad and simplex
87 * mode.
88 * (5) Should the driver filter out its own transmissions in non-EDLC
89 * mode?
90 * (6) Multicast support -- multicast filter, address management, ...
91 * (7) Deal with RB0 (recv buffer overflow) on reception. Will need
92 * to figure out if RB0 is read-only as stated in one spot in the
93 * HPC spec or read-write (ie, is the 'write a one to clear it')
94 * the correct thing?
95 */
96
97 #if defined(SQ_DEBUG)
98 int sq_debug = 0;
99 #define SQ_DPRINTF(x) if (sq_debug) printf x
100 #else
101 #define SQ_DPRINTF(x)
102 #endif
103
104 static int sq_match(struct device *, struct cfdata *, void *);
105 static void sq_attach(struct device *, struct device *, void *);
106 static int sq_init(struct ifnet *);
107 static void sq_start(struct ifnet *);
108 static void sq_stop(struct ifnet *, int);
109 static void sq_watchdog(struct ifnet *);
110 static int sq_ioctl(struct ifnet *, u_long, caddr_t);
111
112 static void sq_set_filter(struct sq_softc *);
113 static int sq_intr(void *);
114 static int sq_rxintr(struct sq_softc *);
115 static int sq_txintr(struct sq_softc *);
116 static void sq_txring_hpc1(struct sq_softc *);
117 static void sq_txring_hpc3(struct sq_softc *);
118 static void sq_reset(struct sq_softc *);
119 static int sq_add_rxbuf(struct sq_softc *, int);
120 static void sq_dump_buffer(u_int32_t addr, u_int32_t len);
121 static void sq_trace_dump(struct sq_softc *);
122
123 static void enaddr_aton(const char*, u_int8_t*);
124
125 CFATTACH_DECL(sq, sizeof(struct sq_softc),
126 sq_match, sq_attach, NULL, NULL);
127
128 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
129
130 #define sq_seeq_read(sc, off) \
131 bus_space_read_1(sc->sc_regt, sc->sc_regh, off)
132 #define sq_seeq_write(sc, off, val) \
133 bus_space_write_1(sc->sc_regt, sc->sc_regh, off, val)
134
135 #define sq_hpc_read(sc, off) \
136 bus_space_read_4(sc->sc_hpct, sc->sc_hpch, off)
137 #define sq_hpc_write(sc, off, val) \
138 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, off, val)
139
140 static int
141 sq_match(struct device *parent, struct cfdata *cf, void *aux)
142 {
143 struct hpc_attach_args *ha = aux;
144
145 if (strcmp(ha->ha_name, cf->cf_name) == 0)
146 return (1);
147
148 return (0);
149 }
150
151 static void
152 sq_attach(struct device *parent, struct device *self, void *aux)
153 {
154 int i, err;
155 char* macaddr;
156 struct sq_softc *sc = (void *)self;
157 struct hpc_attach_args *haa = aux;
158 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
159
160 sc->sc_hpct = haa->ha_st;
161 sc->hpc_regs = haa->hpc_regs; /* HPC register definitions */
162
163 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
164 haa->ha_dmaoff,
165 sc->hpc_regs->enet_regs_size,
166 &sc->sc_hpch)) != 0) {
167 printf(": unable to map HPC DMA registers, error = %d\n", err);
168 goto fail_0;
169 }
170
171 sc->sc_regt = haa->ha_st;
172 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
173 haa->ha_devoff,
174 sc->hpc_regs->enet_devregs_size,
175 &sc->sc_regh)) != 0) {
176 printf(": unable to map Seeq registers, error = %d\n", err);
177 goto fail_0;
178 }
179
180 sc->sc_dmat = haa->ha_dmat;
181
182 if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control),
183 PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg,
184 1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) {
185 printf(": unable to allocate control data, error = %d\n", err);
186 goto fail_0;
187 }
188
189 if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg,
190 sizeof(struct sq_control),
191 (caddr_t *)&sc->sc_control,
192 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
193 printf(": unable to map control data, error = %d\n", err);
194 goto fail_1;
195 }
196
197 if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control),
198 1, sizeof(struct sq_control), PAGE_SIZE,
199 BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) {
200 printf(": unable to create DMA map for control data, error "
201 "= %d\n", err);
202 goto fail_2;
203 }
204
205 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control,
206 sizeof(struct sq_control),
207 NULL, BUS_DMA_NOWAIT)) != 0) {
208 printf(": unable to load DMA map for control data, error "
209 "= %d\n", err);
210 goto fail_3;
211 }
212
213 memset(sc->sc_control, 0, sizeof(struct sq_control));
214
215 /* Create transmit buffer DMA maps */
216 for (i = 0; i < SQ_NTXDESC; i++) {
217 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
218 0, BUS_DMA_NOWAIT,
219 &sc->sc_txmap[i])) != 0) {
220 printf(": unable to create tx DMA map %d, error = %d\n",
221 i, err);
222 goto fail_4;
223 }
224 }
225
226 /* Create receive buffer DMA maps */
227 for (i = 0; i < SQ_NRXDESC; i++) {
228 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
229 0, BUS_DMA_NOWAIT,
230 &sc->sc_rxmap[i])) != 0) {
231 printf(": unable to create rx DMA map %d, error = %d\n",
232 i, err);
233 goto fail_5;
234 }
235 }
236
237 /* Pre-allocate the receive buffers. */
238 for (i = 0; i < SQ_NRXDESC; i++) {
239 if ((err = sq_add_rxbuf(sc, i)) != 0) {
240 printf(": unable to allocate or map rx buffer %d\n,"
241 " error = %d\n", i, err);
242 goto fail_6;
243 }
244 }
245
246 if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) {
247 printf(": unable to get MAC address!\n");
248 goto fail_6;
249 }
250
251 evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL,
252 self->dv_xname, "intr");
253
254 if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) {
255 printf(": unable to establish interrupt!\n");
256 goto fail_6;
257 }
258
259 /* Reset the chip to a known state. */
260 sq_reset(sc);
261
262 /*
263 * Determine if we're an 8003 or 80c03 by setting the first
264 * MAC address register to non-zero, and then reading it back.
265 * If it's zero, we have an 80c03, because we will have read
266 * the TxCollLSB register.
267 */
268 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0xa5);
269 if (sq_seeq_read(sc, SEEQ_TXCOLLS0) == 0)
270 sc->sc_type = SQ_TYPE_80C03;
271 else
272 sc->sc_type = SQ_TYPE_8003;
273 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0x00);
274
275 printf(": SGI Seeq %s\n",
276 sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003");
277
278 enaddr_aton(macaddr, sc->sc_enaddr);
279
280 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
281 ether_sprintf(sc->sc_enaddr));
282
283 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
284 ifp->if_softc = sc;
285 ifp->if_mtu = ETHERMTU;
286 ifp->if_init = sq_init;
287 ifp->if_stop = sq_stop;
288 ifp->if_start = sq_start;
289 ifp->if_ioctl = sq_ioctl;
290 ifp->if_watchdog = sq_watchdog;
291 ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST;
292 IFQ_SET_READY(&ifp->if_snd);
293
294 if_attach(ifp);
295 ether_ifattach(ifp, sc->sc_enaddr);
296
297 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
298 /* Done! */
299 return;
300
301 /*
302 * Free any resources we've allocated during the failed attach
303 * attempt. Do this in reverse order and fall through.
304 */
305 fail_6:
306 for (i = 0; i < SQ_NRXDESC; i++) {
307 if (sc->sc_rxmbuf[i] != NULL) {
308 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]);
309 m_freem(sc->sc_rxmbuf[i]);
310 }
311 }
312 fail_5:
313 for (i = 0; i < SQ_NRXDESC; i++) {
314 if (sc->sc_rxmap[i] != NULL)
315 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]);
316 }
317 fail_4:
318 for (i = 0; i < SQ_NTXDESC; i++) {
319 if (sc->sc_txmap[i] != NULL)
320 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]);
321 }
322 bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap);
323 fail_3:
324 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap);
325 fail_2:
326 bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control,
327 sizeof(struct sq_control));
328 fail_1:
329 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg);
330 fail_0:
331 return;
332 }
333
334 /* Set up data to get the interface up and running. */
335 int
336 sq_init(struct ifnet *ifp)
337 {
338 int i;
339 u_int32_t reg;
340 struct sq_softc *sc = ifp->if_softc;
341
342 /* Cancel any in-progress I/O */
343 sq_stop(ifp, 0);
344
345 sc->sc_nextrx = 0;
346
347 sc->sc_nfreetx = SQ_NTXDESC;
348 sc->sc_nexttx = sc->sc_prevtx = 0;
349
350 SQ_TRACE(SQ_RESET, sc, 0, 0);
351
352 /* Set into 8003 mode, bank 0 to program ethernet address */
353 sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_BANK0);
354
355 /* Now write the address */
356 for (i = 0; i < ETHER_ADDR_LEN; i++)
357 sq_seeq_write(sc, i, sc->sc_enaddr[i]);
358
359 sc->sc_rxcmd = RXCMD_IE_CRC |
360 RXCMD_IE_DRIB |
361 RXCMD_IE_SHORT |
362 RXCMD_IE_END |
363 RXCMD_IE_GOOD;
364
365 /*
366 * Set the receive filter -- this will add some bits to the
367 * prototype RXCMD register. Do this before setting the
368 * transmit config register, since we might need to switch
369 * banks.
370 */
371 sq_set_filter(sc);
372
373 /* Set up Seeq transmit command register */
374 sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_IE_UFLOW |
375 TXCMD_IE_COLL |
376 TXCMD_IE_16COLL |
377 TXCMD_IE_GOOD);
378
379 /* Now write the receive command register. */
380 sq_seeq_write(sc, SEEQ_RXCMD, sc->sc_rxcmd);
381
382 /* Set up HPC ethernet DMA config */
383 if (sc->hpc_regs->revision == 3) {
384 reg = sq_hpc_read(sc, HPC3_ENETR_DMACFG);
385 sq_hpc_write(sc, HPC3_ENETR_DMACFG, reg |
386 HPC3_ENETR_DMACFG_FIX_RXDC |
387 HPC3_ENETR_DMACFG_FIX_INTR |
388 HPC3_ENETR_DMACFG_FIX_EOP);
389 }
390
391 /* Pass the start of the receive ring to the HPC */
392 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc, 0));
393
394 /* And turn on the HPC ethernet receive channel */
395 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl,
396 sc->hpc_regs->enetr_ctl_active);
397
398 /*
399 * Turn off delayed receive interrupts on HPC1.
400 * (see Hollywood HPC Specification 2.1.4.3)
401 */
402 if (sc->hpc_regs->revision != 3)
403 sq_hpc_write(sc, HPC1_ENET_INTDELAY, HPC1_ENET_INTDELAY_OFF);
404
405 ifp->if_flags |= IFF_RUNNING;
406 ifp->if_flags &= ~IFF_OACTIVE;
407
408 return 0;
409 }
410
411 static void
412 sq_set_filter(struct sq_softc *sc)
413 {
414 struct ethercom *ec = &sc->sc_ethercom;
415 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
416 struct ether_multi *enm;
417 struct ether_multistep step;
418
419 /*
420 * Check for promiscuous mode. Also implies
421 * all-multicast.
422 */
423 if (ifp->if_flags & IFF_PROMISC) {
424 sc->sc_rxcmd |= RXCMD_REC_ALL;
425 ifp->if_flags |= IFF_ALLMULTI;
426 return;
427 }
428
429 /*
430 * The 8003 has no hash table. If we have any multicast
431 * addresses on the list, enable reception of all multicast
432 * frames.
433 *
434 * XXX The 80c03 has a hash table. We should use it.
435 */
436
437 ETHER_FIRST_MULTI(step, ec, enm);
438
439 if (enm == NULL) {
440 sc->sc_rxcmd &= ~RXCMD_REC_MASK;
441 sc->sc_rxcmd |= RXCMD_REC_BROAD;
442
443 ifp->if_flags &= ~IFF_ALLMULTI;
444 return;
445 }
446
447 sc->sc_rxcmd |= RXCMD_REC_MULTI;
448 ifp->if_flags |= IFF_ALLMULTI;
449 }
450
451 int
452 sq_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
453 {
454 int s, error = 0;
455
456 SQ_TRACE(SQ_IOCTL, (struct sq_softc *)ifp->if_softc, 0, 0);
457
458 s = splnet();
459
460 error = ether_ioctl(ifp, cmd, data);
461 if (error == ENETRESET) {
462 /*
463 * Multicast list has changed; set the hardware filter
464 * accordingly.
465 */
466 if (ifp->if_flags & IFF_RUNNING)
467 error = sq_init(ifp);
468 else
469 error = 0;
470 }
471
472 splx(s);
473 return (error);
474 }
475
476 void
477 sq_start(struct ifnet *ifp)
478 {
479 struct sq_softc *sc = ifp->if_softc;
480 u_int32_t status;
481 struct mbuf *m0, *m;
482 bus_dmamap_t dmamap;
483 int err, totlen, nexttx, firsttx, lasttx = -1, ofree, seg;
484
485 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
486 return;
487
488 /*
489 * Remember the previous number of free descriptors and
490 * the first descriptor we'll use.
491 */
492 ofree = sc->sc_nfreetx;
493 firsttx = sc->sc_nexttx;
494
495 /*
496 * Loop through the send queue, setting up transmit descriptors
497 * until we drain the queue, or use up all available transmit
498 * descriptors.
499 */
500 while (sc->sc_nfreetx != 0) {
501 /*
502 * Grab a packet off the queue.
503 */
504 IFQ_POLL(&ifp->if_snd, m0);
505 if (m0 == NULL)
506 break;
507 m = NULL;
508
509 dmamap = sc->sc_txmap[sc->sc_nexttx];
510
511 /*
512 * Load the DMA map. If this fails, the packet either
513 * didn't fit in the alloted number of segments, or we were
514 * short on resources. In this case, we'll copy and try
515 * again.
516 * Also copy it if we need to pad, so that we are sure there
517 * is room for the pad buffer.
518 * XXX the right way of doing this is to use a static buffer
519 * for padding and adding it to the transmit descriptor (see
520 * sys/dev/pci/if_tl.c for example). We can't do this here yet
521 * because we can't send packets with more than one fragment.
522 */
523 if (m0->m_pkthdr.len < ETHER_PAD_LEN ||
524 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
525 BUS_DMA_NOWAIT) != 0) {
526 MGETHDR(m, M_DONTWAIT, MT_DATA);
527 if (m == NULL) {
528 printf("%s: unable to allocate Tx mbuf\n",
529 sc->sc_dev.dv_xname);
530 break;
531 }
532 if (m0->m_pkthdr.len > MHLEN) {
533 MCLGET(m, M_DONTWAIT);
534 if ((m->m_flags & M_EXT) == 0) {
535 printf("%s: unable to allocate Tx "
536 "cluster\n", sc->sc_dev.dv_xname);
537 m_freem(m);
538 break;
539 }
540 }
541
542 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
543 if (m0->m_pkthdr.len < ETHER_PAD_LEN) {
544 memset(mtod(m, char *) + m0->m_pkthdr.len, 0,
545 ETHER_PAD_LEN - m0->m_pkthdr.len);
546 m->m_pkthdr.len = m->m_len = ETHER_PAD_LEN;
547 } else
548 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
549
550 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
551 m, BUS_DMA_NOWAIT)) != 0) {
552 printf("%s: unable to load Tx buffer, "
553 "error = %d\n", sc->sc_dev.dv_xname, err);
554 break;
555 }
556 }
557
558 /*
559 * Ensure we have enough descriptors free to describe
560 * the packet.
561 */
562 if (dmamap->dm_nsegs > sc->sc_nfreetx) {
563 /*
564 * Not enough free descriptors to transmit this
565 * packet. We haven't committed to anything yet,
566 * so just unload the DMA map, put the packet
567 * back on the queue, and punt. Notify the upper
568 * layer that there are no more slots left.
569 *
570 * XXX We could allocate an mbuf and copy, but
571 * XXX it is worth it?
572 */
573 ifp->if_flags |= IFF_OACTIVE;
574 bus_dmamap_unload(sc->sc_dmat, dmamap);
575 if (m != NULL)
576 m_freem(m);
577 break;
578 }
579
580 IFQ_DEQUEUE(&ifp->if_snd, m0);
581 #if NBPFILTER > 0
582 /*
583 * Pass the packet to any BPF listeners.
584 */
585 if (ifp->if_bpf)
586 bpf_mtap(ifp->if_bpf, m0);
587 #endif /* NBPFILTER > 0 */
588 if (m != NULL) {
589 m_freem(m0);
590 m0 = m;
591 }
592
593 /*
594 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
595 */
596
597 SQ_TRACE(SQ_ENQUEUE, sc, sc->sc_nexttx, 0);
598
599 /* Sync the DMA map. */
600 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
601 BUS_DMASYNC_PREWRITE);
602
603 /*
604 * Initialize the transmit descriptors.
605 */
606 for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0;
607 seg < dmamap->dm_nsegs;
608 seg++, nexttx = SQ_NEXTTX(nexttx)) {
609 if (sc->hpc_regs->revision == 3) {
610 sc->sc_txdesc[nexttx].hpc3_hdd_bufptr =
611 dmamap->dm_segs[seg].ds_addr;
612 sc->sc_txdesc[nexttx].hpc3_hdd_ctl =
613 dmamap->dm_segs[seg].ds_len;
614 } else {
615 sc->sc_txdesc[nexttx].hpc1_hdd_bufptr =
616 dmamap->dm_segs[seg].ds_addr;
617 sc->sc_txdesc[nexttx].hpc1_hdd_ctl =
618 dmamap->dm_segs[seg].ds_len;
619 }
620 sc->sc_txdesc[nexttx].hdd_descptr=
621 SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx));
622 lasttx = nexttx;
623 totlen += dmamap->dm_segs[seg].ds_len;
624 }
625
626 /* Last descriptor gets end-of-packet */
627 KASSERT(lasttx != -1);
628 if (sc->hpc_regs->revision == 3)
629 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |=
630 HPC3_HDD_CTL_EOPACKET;
631 else
632 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |=
633 HPC1_HDD_CTL_EOPACKET;
634
635 SQ_DPRINTF(("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname,
636 sc->sc_nexttx, lasttx,
637 totlen));
638
639 if (ifp->if_flags & IFF_DEBUG) {
640 printf(" transmit chain:\n");
641 for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) {
642 printf(" descriptor %d:\n", seg);
643 printf(" hdd_bufptr: 0x%08x\n",
644 (sc->hpc_regs->revision == 3) ?
645 sc->sc_txdesc[seg].hpc3_hdd_bufptr :
646 sc->sc_txdesc[seg].hpc1_hdd_bufptr);
647 printf(" hdd_ctl: 0x%08x\n",
648 (sc->hpc_regs->revision == 3) ?
649 sc->sc_txdesc[seg].hpc3_hdd_ctl:
650 sc->sc_txdesc[seg].hpc1_hdd_ctl);
651 printf(" hdd_descptr: 0x%08x\n",
652 sc->sc_txdesc[seg].hdd_descptr);
653
654 if (seg == lasttx)
655 break;
656 }
657 }
658
659 /* Sync the descriptors we're using. */
660 SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs,
661 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
662
663 /* Store a pointer to the packet so we can free it later */
664 sc->sc_txmbuf[sc->sc_nexttx] = m0;
665
666 /* Advance the tx pointer. */
667 sc->sc_nfreetx -= dmamap->dm_nsegs;
668 sc->sc_nexttx = nexttx;
669 }
670
671 /* All transmit descriptors used up, let upper layers know */
672 if (sc->sc_nfreetx == 0)
673 ifp->if_flags |= IFF_OACTIVE;
674
675 if (sc->sc_nfreetx != ofree) {
676 SQ_DPRINTF(("%s: %d packets enqueued, first %d, INTR on %d\n",
677 sc->sc_dev.dv_xname, lasttx - firsttx + 1,
678 firsttx, lasttx));
679
680 /*
681 * Cause a transmit interrupt to happen on the
682 * last packet we enqueued, mark it as the last
683 * descriptor.
684 *
685 * HPC1_HDD_CTL_INTR will generate an interrupt on
686 * HPC1. HPC3 requires HPC3_HDD_CTL_EOPACKET in
687 * addition to HPC3_HDD_CTL_INTR to interrupt.
688 */
689 KASSERT(lasttx != -1);
690 if (sc->hpc_regs->revision == 3) {
691 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |=
692 HPC3_HDD_CTL_INTR | HPC3_HDD_CTL_EOCHAIN;
693 } else {
694 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |= HPC1_HDD_CTL_INTR;
695 sc->sc_txdesc[lasttx].hpc1_hdd_bufptr |=
696 HPC1_HDD_CTL_EOCHAIN;
697 }
698
699 SQ_CDTXSYNC(sc, lasttx, 1,
700 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
701
702 /*
703 * There is a potential race condition here if the HPC
704 * DMA channel is active and we try and either update
705 * the 'next descriptor' pointer in the HPC PIO space
706 * or the 'next descriptor' pointer in a previous desc-
707 * riptor.
708 *
709 * To avoid this, if the channel is active, we rely on
710 * the transmit interrupt routine noticing that there
711 * are more packets to send and restarting the HPC DMA
712 * engine, rather than mucking with the DMA state here.
713 */
714 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl);
715
716 if ((status & sc->hpc_regs->enetx_ctl_active) != 0) {
717 SQ_TRACE(SQ_ADD_TO_DMA, sc, firsttx, status);
718
719 /*
720 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and
721 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN
722 */
723 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc3_hdd_ctl &=
724 ~HPC3_HDD_CTL_EOCHAIN;
725
726 if (sc->hpc_regs->revision != 3)
727 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc1_hdd_ctl
728 &= ~HPC1_HDD_CTL_INTR;
729
730 SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx), 1,
731 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
732 } else if (sc->hpc_regs->revision == 3) {
733 SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
734
735 sq_hpc_write(sc, HPC3_ENETX_NDBP, SQ_CDTXADDR(sc,
736 firsttx));
737
738 /* Kick DMA channel into life */
739 sq_hpc_write(sc, HPC3_ENETX_CTL, HPC3_ENETX_CTL_ACTIVE);
740 } else {
741 /*
742 * In the HPC1 case where transmit DMA is
743 * inactive, we can either kick off if
744 * the ring was previously empty, or call
745 * our transmit interrupt handler to
746 * figure out if the ring stopped short
747 * and restart at the right place.
748 */
749 if (ofree == SQ_NTXDESC) {
750 SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
751
752 sq_hpc_write(sc, HPC1_ENETX_NDBP,
753 SQ_CDTXADDR(sc, firsttx));
754 sq_hpc_write(sc, HPC1_ENETX_CFXBP,
755 SQ_CDTXADDR(sc, firsttx));
756 sq_hpc_write(sc, HPC1_ENETX_CBP,
757 SQ_CDTXADDR(sc, firsttx));
758
759 /* Kick DMA channel into life */
760 sq_hpc_write(sc, HPC1_ENETX_CTL,
761 HPC1_ENETX_CTL_ACTIVE);
762 } else
763 sq_txring_hpc1(sc);
764 }
765
766 /* Set a watchdog timer in case the chip flakes out. */
767 ifp->if_timer = 5;
768 }
769 }
770
771 void
772 sq_stop(struct ifnet *ifp, int disable)
773 {
774 int i;
775 struct sq_softc *sc = ifp->if_softc;
776
777 for (i =0; i < SQ_NTXDESC; i++) {
778 if (sc->sc_txmbuf[i] != NULL) {
779 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
780 m_freem(sc->sc_txmbuf[i]);
781 sc->sc_txmbuf[i] = NULL;
782 }
783 }
784
785 /* Clear Seeq transmit/receive command registers */
786 sq_seeq_write(sc, SEEQ_TXCMD, 0);
787 sq_seeq_write(sc, SEEQ_RXCMD, 0);
788
789 sq_reset(sc);
790
791 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
792 ifp->if_timer = 0;
793 }
794
795 /* Device timeout/watchdog routine. */
796 void
797 sq_watchdog(struct ifnet *ifp)
798 {
799 u_int32_t status;
800 struct sq_softc *sc = ifp->if_softc;
801
802 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl);
803 log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, "
804 "status %08x)\n", sc->sc_dev.dv_xname, sc->sc_prevtx,
805 sc->sc_nexttx, sc->sc_nfreetx, status);
806
807 sq_trace_dump(sc);
808
809 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
810 sc->sq_trace_idx = 0;
811
812 ++ifp->if_oerrors;
813
814 sq_init(ifp);
815 }
816
817 static void
818 sq_trace_dump(struct sq_softc *sc)
819 {
820 int i;
821 char *act;
822
823 for (i = 0; i < sc->sq_trace_idx; i++) {
824 switch (sc->sq_trace[i].action) {
825 case SQ_RESET: act = "SQ_RESET"; break;
826 case SQ_ADD_TO_DMA: act = "SQ_ADD_TO_DMA"; break;
827 case SQ_START_DMA: act = "SQ_START_DMA"; break;
828 case SQ_DONE_DMA: act = "SQ_DONE_DMA"; break;
829 case SQ_RESTART_DMA: act = "SQ_RESTART_DMA"; break;
830 case SQ_TXINTR_ENTER: act = "SQ_TXINTR_ENTER"; break;
831 case SQ_TXINTR_EXIT: act = "SQ_TXINTR_EXIT"; break;
832 case SQ_TXINTR_BUSY: act = "SQ_TXINTR_BUSY"; break;
833 case SQ_IOCTL: act = "SQ_IOCTL"; break;
834 case SQ_ENQUEUE: act = "SQ_ENQUEUE"; break;
835 default: act = "UNKNOWN";
836 }
837
838 printf("%s: [%03d] action %-16s buf %03d free %03d "
839 "status %08x line %d\n", sc->sc_dev.dv_xname, i, act,
840 sc->sq_trace[i].bufno, sc->sq_trace[i].freebuf,
841 sc->sq_trace[i].status, sc->sq_trace[i].line);
842 }
843 }
844
845 static int
846 sq_intr(void * arg)
847 {
848 struct sq_softc *sc = arg;
849 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
850 int handled = 0;
851 u_int32_t stat;
852
853 stat = sq_hpc_read(sc, sc->hpc_regs->enetr_reset);
854
855 if ((stat & 2) == 0) {
856 printf("%s: Unexpected interrupt!\n", sc->sc_dev.dv_xname);
857 return 0;
858 }
859
860 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, (stat | 2));
861
862 /*
863 * If the interface isn't running, the interrupt couldn't
864 * possibly have come from us.
865 */
866 if ((ifp->if_flags & IFF_RUNNING) == 0)
867 return 0;
868
869 sc->sq_intrcnt.ev_count++;
870
871 /* Always check for received packets */
872 if (sq_rxintr(sc) != 0)
873 handled++;
874
875 /* Only handle transmit interrupts if we actually sent something */
876 if (sc->sc_nfreetx < SQ_NTXDESC) {
877 sq_txintr(sc);
878 handled++;
879 }
880
881 #if NRND > 0
882 if (handled)
883 rnd_add_uint32(&sc->rnd_source, stat);
884 #endif
885 return (handled);
886 }
887
888 static int
889 sq_rxintr(struct sq_softc *sc)
890 {
891 int count = 0;
892 struct mbuf* m;
893 int i, framelen;
894 u_int8_t pktstat;
895 u_int32_t status;
896 u_int32_t ctl_reg;
897 int new_end, orig_end;
898 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
899
900 for (i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) {
901 SQ_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD |
902 BUS_DMASYNC_POSTWRITE);
903
904 /*
905 * If this is a CPU-owned buffer, we're at the end of the list.
906 */
907 if (sc->hpc_regs->revision == 3)
908 ctl_reg = sc->sc_rxdesc[i].hpc3_hdd_ctl &
909 HPC3_HDD_CTL_OWN;
910 else
911 ctl_reg = sc->sc_rxdesc[i].hpc1_hdd_ctl &
912 HPC1_HDD_CTL_OWN;
913
914 if (ctl_reg) {
915 #if defined(SQ_DEBUG)
916 u_int32_t reg;
917
918 reg = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl);
919 SQ_DPRINTF(("%s: rxintr: done at %d (ctl %08x)\n",
920 sc->sc_dev.dv_xname, i, reg));
921 #endif
922 break;
923 }
924
925 count++;
926
927 m = sc->sc_rxmbuf[i];
928 framelen = m->m_ext.ext_size - 3;
929 if (sc->hpc_regs->revision == 3)
930 framelen -=
931 HPC3_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc3_hdd_ctl);
932 else
933 framelen -=
934 HPC1_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc1_hdd_ctl);
935
936 /* Now sync the actual packet data */
937 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
938 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);
939
940 pktstat = *((u_int8_t*)m->m_data + framelen + 2);
941
942 if ((pktstat & RXSTAT_GOOD) == 0) {
943 ifp->if_ierrors++;
944
945 if (pktstat & RXSTAT_OFLOW)
946 printf("%s: receive FIFO overflow\n",
947 sc->sc_dev.dv_xname);
948
949 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
950 sc->sc_rxmap[i]->dm_mapsize,
951 BUS_DMASYNC_PREREAD);
952 SQ_INIT_RXDESC(sc, i);
953 SQ_DPRINTF(("%s: sq_rxintr: buf %d no RXSTAT_GOOD\n",
954 sc->sc_dev.dv_xname, i));
955 continue;
956 }
957
958 if (sq_add_rxbuf(sc, i) != 0) {
959 ifp->if_ierrors++;
960 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
961 sc->sc_rxmap[i]->dm_mapsize,
962 BUS_DMASYNC_PREREAD);
963 SQ_INIT_RXDESC(sc, i);
964 SQ_DPRINTF(("%s: sq_rxintr: buf %d sq_add_rxbuf() "
965 "failed\n", sc->sc_dev.dv_xname, i));
966 continue;
967 }
968
969
970 m->m_data += 2;
971 m->m_pkthdr.rcvif = ifp;
972 m->m_pkthdr.len = m->m_len = framelen;
973
974 ifp->if_ipackets++;
975
976 SQ_DPRINTF(("%s: sq_rxintr: buf %d len %d\n",
977 sc->sc_dev.dv_xname, i, framelen));
978
979 #if NBPFILTER > 0
980 if (ifp->if_bpf)
981 bpf_mtap(ifp->if_bpf, m);
982 #endif
983 (*ifp->if_input)(ifp, m);
984 }
985
986
987 /* If anything happened, move ring start/end pointers to new spot */
988 if (i != sc->sc_nextrx) {
989 /*
990 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and
991 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN
992 */
993
994 new_end = SQ_PREVRX(i);
995 sc->sc_rxdesc[new_end].hpc3_hdd_ctl |= HPC3_HDD_CTL_EOCHAIN;
996 SQ_CDRXSYNC(sc, new_end, BUS_DMASYNC_PREREAD |
997 BUS_DMASYNC_PREWRITE);
998
999 orig_end = SQ_PREVRX(sc->sc_nextrx);
1000 sc->sc_rxdesc[orig_end].hpc3_hdd_ctl &= ~HPC3_HDD_CTL_EOCHAIN;
1001 SQ_CDRXSYNC(sc, orig_end, BUS_DMASYNC_PREREAD |
1002 BUS_DMASYNC_PREWRITE);
1003
1004 sc->sc_nextrx = i;
1005 }
1006
1007 status = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl);
1008
1009 /* If receive channel is stopped, restart it... */
1010 if ((status & sc->hpc_regs->enetr_ctl_active) == 0) {
1011 /* Pass the start of the receive ring to the HPC */
1012 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc,
1013 sc->sc_nextrx));
1014
1015 /* And turn on the HPC ethernet receive channel */
1016 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl,
1017 sc->hpc_regs->enetr_ctl_active);
1018 }
1019
1020 return count;
1021 }
1022
1023 static int
1024 sq_txintr(struct sq_softc *sc)
1025 {
1026 int shift = 0;
1027 u_int32_t status, tmp;
1028 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1029
1030 if (sc->hpc_regs->revision != 3)
1031 shift = 16;
1032
1033 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl) >> shift;
1034
1035 SQ_TRACE(SQ_TXINTR_ENTER, sc, sc->sc_prevtx, status);
1036
1037 tmp = (sc->hpc_regs->enetx_ctl_active >> shift) | TXSTAT_GOOD;
1038 if ((status & tmp) == 0) {
1039 if (status & TXSTAT_COLL)
1040 ifp->if_collisions++;
1041
1042 if (status & TXSTAT_UFLOW) {
1043 printf("%s: transmit underflow\n", sc->sc_dev.dv_xname);
1044 ifp->if_oerrors++;
1045 }
1046
1047 if (status & TXSTAT_16COLL) {
1048 printf("%s: max collisions reached\n",
1049 sc->sc_dev.dv_xname);
1050 ifp->if_oerrors++;
1051 ifp->if_collisions += 16;
1052 }
1053 }
1054
1055 /* prevtx now points to next xmit packet not yet finished */
1056 if (sc->hpc_regs->revision == 3)
1057 sq_txring_hpc3(sc);
1058 else
1059 sq_txring_hpc1(sc);
1060
1061 /* If we have buffers free, let upper layers know */
1062 if (sc->sc_nfreetx > 0)
1063 ifp->if_flags &= ~IFF_OACTIVE;
1064
1065 /* If all packets have left the coop, cancel watchdog */
1066 if (sc->sc_nfreetx == SQ_NTXDESC)
1067 ifp->if_timer = 0;
1068
1069 SQ_TRACE(SQ_TXINTR_EXIT, sc, sc->sc_prevtx, status);
1070 sq_start(ifp);
1071
1072 return 1;
1073 }
1074
1075 /*
1076 * Reclaim used transmit descriptors and restart the transmit DMA
1077 * engine if necessary.
1078 */
1079 static void
1080 sq_txring_hpc1(struct sq_softc *sc)
1081 {
1082 /*
1083 * HPC1 doesn't tag transmitted descriptors, however,
1084 * the NDBP register points to the next descriptor that
1085 * has not yet been processed. If DMA is not in progress,
1086 * we can safely reclaim all descriptors up to NDBP, and,
1087 * if necessary, restart DMA at NDBP. Otherwise, if DMA
1088 * is active, we can only safely reclaim up to CBP.
1089 *
1090 * For now, we'll only reclaim on inactive DMA and assume
1091 * that a sufficiently large ring keeps us out of trouble.
1092 */
1093 u_int32_t reclaimto, status;
1094 int reclaimall, i = sc->sc_prevtx;
1095 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1096
1097 status = sq_hpc_read(sc, HPC1_ENETX_CTL);
1098 if (status & HPC1_ENETX_CTL_ACTIVE) {
1099 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
1100 return;
1101 } else
1102 reclaimto = sq_hpc_read(sc, HPC1_ENETX_NDBP);
1103
1104 if (sc->sc_nfreetx == 0 && SQ_CDTXADDR(sc, i) == reclaimto)
1105 reclaimall = 1;
1106 else
1107 reclaimall = 0;
1108
1109 while (sc->sc_nfreetx < SQ_NTXDESC) {
1110 if (SQ_CDTXADDR(sc, i) == reclaimto && !reclaimall)
1111 break;
1112
1113 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
1114 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1115
1116 /* Sync the packet data, unload DMA map, free mbuf */
1117 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
1118 sc->sc_txmap[i]->dm_mapsize,
1119 BUS_DMASYNC_POSTWRITE);
1120 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
1121 m_freem(sc->sc_txmbuf[i]);
1122 sc->sc_txmbuf[i] = NULL;
1123
1124 ifp->if_opackets++;
1125 sc->sc_nfreetx++;
1126
1127 SQ_TRACE(SQ_DONE_DMA, sc, i, status);
1128
1129 i = SQ_NEXTTX(i);
1130 }
1131
1132 if (sc->sc_nfreetx < SQ_NTXDESC) {
1133 SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
1134
1135 KASSERT(reclaimto == SQ_CDTXADDR(sc, i));
1136
1137 sq_hpc_write(sc, HPC1_ENETX_CFXBP, reclaimto);
1138 sq_hpc_write(sc, HPC1_ENETX_CBP, reclaimto);
1139
1140 /* Kick DMA channel into life */
1141 sq_hpc_write(sc, HPC1_ENETX_CTL, HPC1_ENETX_CTL_ACTIVE);
1142
1143 /*
1144 * Set a watchdog timer in case the chip
1145 * flakes out.
1146 */
1147 ifp->if_timer = 5;
1148 }
1149
1150 sc->sc_prevtx = i;
1151 }
1152
1153 /*
1154 * Reclaim used transmit descriptors and restart the transmit DMA
1155 * engine if necessary.
1156 */
1157 static void
1158 sq_txring_hpc3(struct sq_softc *sc)
1159 {
1160 /*
1161 * HPC3 tags descriptors with a bit once they've been
1162 * transmitted. We need only free each XMITDONE'd
1163 * descriptor, and restart the DMA engine if any
1164 * descriptors are left over.
1165 */
1166 int i;
1167 u_int32_t status = 0;
1168 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1169
1170 i = sc->sc_prevtx;
1171 while (sc->sc_nfreetx < SQ_NTXDESC) {
1172 /*
1173 * Check status first so we don't end up with a case of
1174 * the buffer not being finished while the DMA channel
1175 * has gone idle.
1176 */
1177 status = sq_hpc_read(sc, HPC3_ENETX_CTL);
1178
1179 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
1180 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1181
1182 /* Check for used descriptor and restart DMA chain if needed */
1183 if (!(sc->sc_txdesc[i].hpc3_hdd_ctl & HPC3_HDD_CTL_XMITDONE)) {
1184 if ((status & HPC3_ENETX_CTL_ACTIVE) == 0) {
1185 SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
1186
1187 sq_hpc_write(sc, HPC3_ENETX_NDBP,
1188 SQ_CDTXADDR(sc, i));
1189
1190 /* Kick DMA channel into life */
1191 sq_hpc_write(sc, HPC3_ENETX_CTL,
1192 HPC3_ENETX_CTL_ACTIVE);
1193
1194 /*
1195 * Set a watchdog timer in case the chip
1196 * flakes out.
1197 */
1198 ifp->if_timer = 5;
1199 } else
1200 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
1201 break;
1202 }
1203
1204 /* Sync the packet data, unload DMA map, free mbuf */
1205 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
1206 sc->sc_txmap[i]->dm_mapsize,
1207 BUS_DMASYNC_POSTWRITE);
1208 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
1209 m_freem(sc->sc_txmbuf[i]);
1210 sc->sc_txmbuf[i] = NULL;
1211
1212 ifp->if_opackets++;
1213 sc->sc_nfreetx++;
1214
1215 SQ_TRACE(SQ_DONE_DMA, sc, i, status);
1216 i = SQ_NEXTTX(i);
1217 }
1218
1219 sc->sc_prevtx = i;
1220 }
1221
1222 void
1223 sq_reset(struct sq_softc *sc)
1224 {
1225 /* Stop HPC dma channels */
1226 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl, 0);
1227 sq_hpc_write(sc, sc->hpc_regs->enetx_ctl, 0);
1228
1229 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 3);
1230 delay(20);
1231 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 0);
1232 }
1233
1234 /* sq_add_rxbuf: Add a receive buffer to the indicated descriptor. */
1235 int
1236 sq_add_rxbuf(struct sq_softc *sc, int idx)
1237 {
1238 int err;
1239 struct mbuf *m;
1240
1241 MGETHDR(m, M_DONTWAIT, MT_DATA);
1242 if (m == NULL)
1243 return (ENOBUFS);
1244
1245 MCLGET(m, M_DONTWAIT);
1246 if ((m->m_flags & M_EXT) == 0) {
1247 m_freem(m);
1248 return (ENOBUFS);
1249 }
1250
1251 if (sc->sc_rxmbuf[idx] != NULL)
1252 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]);
1253
1254 sc->sc_rxmbuf[idx] = m;
1255
1256 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx],
1257 m->m_ext.ext_buf, m->m_ext.ext_size,
1258 NULL, BUS_DMA_NOWAIT)) != 0) {
1259 printf("%s: can't load rx DMA map %d, error = %d\n",
1260 sc->sc_dev.dv_xname, idx, err);
1261 panic("sq_add_rxbuf"); /* XXX */
1262 }
1263
1264 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx], 0,
1265 sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD);
1266
1267 SQ_INIT_RXDESC(sc, idx);
1268
1269 return 0;
1270 }
1271
1272 void
1273 sq_dump_buffer(u_int32_t addr, u_int32_t len)
1274 {
1275 u_int i;
1276 u_char* physaddr = (char*) MIPS_PHYS_TO_KSEG1((caddr_t)addr);
1277
1278 if (len == 0)
1279 return;
1280
1281 printf("%p: ", physaddr);
1282
1283 for (i = 0; i < len; i++) {
1284 printf("%02x ", *(physaddr + i) & 0xff);
1285 if ((i % 16) == 15 && i != len - 1)
1286 printf("\n%p: ", physaddr + i);
1287 }
1288
1289 printf("\n");
1290 }
1291
1292 void
1293 enaddr_aton(const char* str, u_int8_t* eaddr)
1294 {
1295 int i;
1296 char c;
1297
1298 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1299 if (*str == ':')
1300 str++;
1301
1302 c = *str++;
1303 if (isdigit(c)) {
1304 eaddr[i] = (c - '0');
1305 } else if (isxdigit(c)) {
1306 eaddr[i] = (toupper(c) + 10 - 'A');
1307 }
1308
1309 c = *str++;
1310 if (isdigit(c)) {
1311 eaddr[i] = (eaddr[i] << 4) | (c - '0');
1312 } else if (isxdigit(c)) {
1313 eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A');
1314 }
1315 }
1316 }
1317