if_sq.c revision 1.27 1 /* $NetBSD: if_sq.c,v 1.27 2004/12/31 22:32:34 rumble Exp $ */
2
3 /*
4 * Copyright (c) 2001 Rafal K. Boni
5 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * Portions of this code are derived from software contributed to The
9 * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: if_sq.c,v 1.27 2004/12/31 22:32:34 rumble Exp $");
37
38 #include "bpfilter.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/device.h>
43 #include <sys/callout.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/ioctl.h>
49 #include <sys/errno.h>
50 #include <sys/syslog.h>
51
52 #include <uvm/uvm_extern.h>
53
54 #include <machine/endian.h>
55
56 #include <net/if.h>
57 #include <net/if_dl.h>
58 #include <net/if_media.h>
59 #include <net/if_ether.h>
60
61 #if NBPFILTER > 0
62 #include <net/bpf.h>
63 #endif
64
65 #include <machine/bus.h>
66 #include <machine/intr.h>
67
68 #include <dev/ic/seeq8003reg.h>
69
70 #include <sgimips/hpc/sqvar.h>
71 #include <sgimips/hpc/hpcvar.h>
72 #include <sgimips/hpc/hpcreg.h>
73
74 #include <dev/arcbios/arcbios.h>
75 #include <dev/arcbios/arcbiosvar.h>
76
77 #define static
78
79 /*
80 * Short TODO list:
81 * (1) Do counters for bad-RX packets.
82 * (2) Allow multi-segment transmits, instead of copying to a single,
83 * contiguous mbuf.
84 * (3) Verify sq_stop() turns off enough stuff; I was still getting
85 * seeq interrupts after sq_stop().
86 * (4) Implement EDLC modes: especially packet auto-pad and simplex
87 * mode.
88 * (5) Should the driver filter out its own transmissions in non-EDLC
89 * mode?
90 * (6) Multicast support -- multicast filter, address management, ...
91 * (7) Deal with RB0 (recv buffer overflow) on reception. Will need
92 * to figure out if RB0 is read-only as stated in one spot in the
93 * HPC spec or read-write (ie, is the 'write a one to clear it')
94 * the correct thing?
95 */
96
97 #if defined(SQ_DEBUG)
98 int sq_debug = 0;
99 #define SQ_DPRINTF(x) if (sq_debug) printf x
100 #else
101 #define SQ_DPRINTF(x)
102 #endif
103
104 static int sq_match(struct device *, struct cfdata *, void *);
105 static void sq_attach(struct device *, struct device *, void *);
106 static int sq_init(struct ifnet *);
107 static void sq_start(struct ifnet *);
108 static void sq_stop(struct ifnet *, int);
109 static void sq_watchdog(struct ifnet *);
110 static int sq_ioctl(struct ifnet *, u_long, caddr_t);
111
112 static void sq_set_filter(struct sq_softc *);
113 static int sq_intr(void *);
114 static int sq_rxintr(struct sq_softc *);
115 static int sq_txintr(struct sq_softc *);
116 static void sq_txring_hpc1(struct sq_softc *);
117 static void sq_txring_hpc3(struct sq_softc *);
118 static void sq_reset(struct sq_softc *);
119 static int sq_add_rxbuf(struct sq_softc *, int);
120 static void sq_dump_buffer(u_int32_t addr, u_int32_t len);
121 static void sq_trace_dump(struct sq_softc *);
122
123 static void enaddr_aton(const char*, u_int8_t*);
124
125 CFATTACH_DECL(sq, sizeof(struct sq_softc),
126 sq_match, sq_attach, NULL, NULL);
127
128 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
129
130 #define sq_seeq_read(sc, off) \
131 bus_space_read_1(sc->sc_regt, sc->sc_regh, off)
132 #define sq_seeq_write(sc, off, val) \
133 bus_space_write_1(sc->sc_regt, sc->sc_regh, off, val)
134
135 #define sq_hpc_read(sc, off) \
136 bus_space_read_4(sc->sc_hpct, sc->sc_hpch, off)
137 #define sq_hpc_write(sc, off, val) \
138 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, off, val)
139
140 static int
141 sq_match(struct device *parent, struct cfdata *cf, void *aux)
142 {
143 struct hpc_attach_args *ha = aux;
144
145 if (strcmp(ha->ha_name, cf->cf_name) == 0)
146 return (1);
147
148 return (0);
149 }
150
151 static void
152 sq_attach(struct device *parent, struct device *self, void *aux)
153 {
154 int i, err;
155 char* macaddr;
156 struct sq_softc *sc = (void *)self;
157 struct hpc_attach_args *haa = aux;
158 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
159
160 sc->sc_hpct = haa->ha_st;
161 sc->hpc_regs = haa->hpc_regs; /* HPC register definitions */
162
163 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
164 haa->ha_dmaoff,
165 sc->hpc_regs->enet_regs_size,
166 &sc->sc_hpch)) != 0) {
167 printf(": unable to map HPC DMA registers, error = %d\n", err);
168 goto fail_0;
169 }
170
171 sc->sc_regt = haa->ha_st;
172 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
173 haa->ha_devoff,
174 sc->hpc_regs->enet_devregs_size,
175 &sc->sc_regh)) != 0) {
176 printf(": unable to map Seeq registers, error = %d\n", err);
177 goto fail_0;
178 }
179
180 sc->sc_dmat = haa->ha_dmat;
181
182 if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control),
183 PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg,
184 1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) {
185 printf(": unable to allocate control data, error = %d\n", err);
186 goto fail_0;
187 }
188
189 if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg,
190 sizeof(struct sq_control),
191 (caddr_t *)&sc->sc_control,
192 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
193 printf(": unable to map control data, error = %d\n", err);
194 goto fail_1;
195 }
196
197 if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control),
198 1, sizeof(struct sq_control), PAGE_SIZE,
199 BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) {
200 printf(": unable to create DMA map for control data, error "
201 "= %d\n", err);
202 goto fail_2;
203 }
204
205 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control,
206 sizeof(struct sq_control),
207 NULL, BUS_DMA_NOWAIT)) != 0) {
208 printf(": unable to load DMA map for control data, error "
209 "= %d\n", err);
210 goto fail_3;
211 }
212
213 memset(sc->sc_control, 0, sizeof(struct sq_control));
214
215 /* Create transmit buffer DMA maps */
216 for (i = 0; i < SQ_NTXDESC; i++) {
217 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
218 0, BUS_DMA_NOWAIT,
219 &sc->sc_txmap[i])) != 0) {
220 printf(": unable to create tx DMA map %d, error = %d\n",
221 i, err);
222 goto fail_4;
223 }
224 }
225
226 /* Create receive buffer DMA maps */
227 for (i = 0; i < SQ_NRXDESC; i++) {
228 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
229 0, BUS_DMA_NOWAIT,
230 &sc->sc_rxmap[i])) != 0) {
231 printf(": unable to create rx DMA map %d, error = %d\n",
232 i, err);
233 goto fail_5;
234 }
235 }
236
237 /* Pre-allocate the receive buffers. */
238 for (i = 0; i < SQ_NRXDESC; i++) {
239 if ((err = sq_add_rxbuf(sc, i)) != 0) {
240 printf(": unable to allocate or map rx buffer %d\n,"
241 " error = %d\n", i, err);
242 goto fail_6;
243 }
244 }
245
246 if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) {
247 printf(": unable to get MAC address!\n");
248 goto fail_6;
249 }
250
251 evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL,
252 self->dv_xname, "intr");
253
254 if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) {
255 printf(": unable to establish interrupt!\n");
256 goto fail_6;
257 }
258
259 /* Reset the chip to a known state. */
260 sq_reset(sc);
261
262 /*
263 * Determine if we're an 8003 or 80c03 by setting the first
264 * MAC address register to non-zero, and then reading it back.
265 * If it's zero, we have an 80c03, because we will have read
266 * the TxCollLSB register.
267 */
268 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0xa5);
269 if (sq_seeq_read(sc, SEEQ_TXCOLLS0) == 0)
270 sc->sc_type = SQ_TYPE_80C03;
271 else
272 sc->sc_type = SQ_TYPE_8003;
273 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0x00);
274
275 printf(": SGI Seeq %s\n",
276 sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003");
277
278 enaddr_aton(macaddr, sc->sc_enaddr);
279
280 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
281 ether_sprintf(sc->sc_enaddr));
282
283 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
284 ifp->if_softc = sc;
285 ifp->if_mtu = ETHERMTU;
286 ifp->if_init = sq_init;
287 ifp->if_stop = sq_stop;
288 ifp->if_start = sq_start;
289 ifp->if_ioctl = sq_ioctl;
290 ifp->if_watchdog = sq_watchdog;
291 ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST;
292 IFQ_SET_READY(&ifp->if_snd);
293
294 if_attach(ifp);
295 ether_ifattach(ifp, sc->sc_enaddr);
296
297 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
298 /* Done! */
299 return;
300
301 /*
302 * Free any resources we've allocated during the failed attach
303 * attempt. Do this in reverse order and fall through.
304 */
305 fail_6:
306 for (i = 0; i < SQ_NRXDESC; i++) {
307 if (sc->sc_rxmbuf[i] != NULL) {
308 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]);
309 m_freem(sc->sc_rxmbuf[i]);
310 }
311 }
312 fail_5:
313 for (i = 0; i < SQ_NRXDESC; i++) {
314 if (sc->sc_rxmap[i] != NULL)
315 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]);
316 }
317 fail_4:
318 for (i = 0; i < SQ_NTXDESC; i++) {
319 if (sc->sc_txmap[i] != NULL)
320 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]);
321 }
322 bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap);
323 fail_3:
324 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap);
325 fail_2:
326 bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control,
327 sizeof(struct sq_control));
328 fail_1:
329 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg);
330 fail_0:
331 return;
332 }
333
334 /* Set up data to get the interface up and running. */
335 int
336 sq_init(struct ifnet *ifp)
337 {
338 int i;
339 u_int32_t reg;
340 struct sq_softc *sc = ifp->if_softc;
341
342 /* Cancel any in-progress I/O */
343 sq_stop(ifp, 0);
344
345 sc->sc_nextrx = 0;
346
347 sc->sc_nfreetx = SQ_NTXDESC;
348 sc->sc_nexttx = sc->sc_prevtx = 0;
349
350 SQ_TRACE(SQ_RESET, sc, 0, 0);
351
352 /* Set into 8003 mode, bank 0 to program ethernet address */
353 sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_BANK0);
354
355 /* Now write the address */
356 for (i = 0; i < ETHER_ADDR_LEN; i++)
357 sq_seeq_write(sc, i, sc->sc_enaddr[i]);
358
359 sc->sc_rxcmd = RXCMD_IE_CRC |
360 RXCMD_IE_DRIB |
361 RXCMD_IE_SHORT |
362 RXCMD_IE_END |
363 RXCMD_IE_GOOD;
364
365 /*
366 * Set the receive filter -- this will add some bits to the
367 * prototype RXCMD register. Do this before setting the
368 * transmit config register, since we might need to switch
369 * banks.
370 */
371 sq_set_filter(sc);
372
373 /* Set up Seeq transmit command register */
374 sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_IE_UFLOW |
375 TXCMD_IE_COLL |
376 TXCMD_IE_16COLL |
377 TXCMD_IE_GOOD);
378
379 /* Now write the receive command register. */
380 sq_seeq_write(sc, SEEQ_RXCMD, sc->sc_rxcmd);
381
382 /* Set up HPC ethernet DMA config */
383 if (sc->hpc_regs->revision == 3) {
384 reg = sq_hpc_read(sc, HPC3_ENETR_DMACFG);
385 sq_hpc_write(sc, HPC3_ENETR_DMACFG, reg |
386 HPC3_ENETR_DMACFG_FIX_RXDC |
387 HPC3_ENETR_DMACFG_FIX_INTR |
388 HPC3_ENETR_DMACFG_FIX_EOP);
389 }
390
391 /* Pass the start of the receive ring to the HPC */
392 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc, 0));
393
394 /* And turn on the HPC ethernet receive channel */
395 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl,
396 sc->hpc_regs->enetr_ctl_active);
397
398 /*
399 * Turn off delayed receive interrupts on HPC1.
400 * (see Hollywood HPC Specification 2.1.4.3)
401 */
402 if (sc->hpc_regs->revision != 3)
403 sq_hpc_write(sc, HPC1_ENET_INTDELAY, HPC1_ENET_INTDELAY_OFF);
404
405 ifp->if_flags |= IFF_RUNNING;
406 ifp->if_flags &= ~IFF_OACTIVE;
407
408 return 0;
409 }
410
411 static void
412 sq_set_filter(struct sq_softc *sc)
413 {
414 struct ethercom *ec = &sc->sc_ethercom;
415 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
416 struct ether_multi *enm;
417 struct ether_multistep step;
418
419 /*
420 * Check for promiscuous mode. Also implies
421 * all-multicast.
422 */
423 if (ifp->if_flags & IFF_PROMISC) {
424 sc->sc_rxcmd |= RXCMD_REC_ALL;
425 ifp->if_flags |= IFF_ALLMULTI;
426 return;
427 }
428
429 /*
430 * The 8003 has no hash table. If we have any multicast
431 * addresses on the list, enable reception of all multicast
432 * frames.
433 *
434 * XXX The 80c03 has a hash table. We should use it.
435 */
436
437 ETHER_FIRST_MULTI(step, ec, enm);
438
439 if (enm == NULL) {
440 sc->sc_rxcmd &= ~RXCMD_REC_MASK;
441 sc->sc_rxcmd |= RXCMD_REC_BROAD;
442
443 ifp->if_flags &= ~IFF_ALLMULTI;
444 return;
445 }
446
447 sc->sc_rxcmd |= RXCMD_REC_MULTI;
448 ifp->if_flags |= IFF_ALLMULTI;
449 }
450
451 int
452 sq_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
453 {
454 int s, error = 0;
455
456 SQ_TRACE(SQ_IOCTL, (struct sq_softc *)ifp->if_softc, 0, 0);
457
458 s = splnet();
459
460 error = ether_ioctl(ifp, cmd, data);
461 if (error == ENETRESET) {
462 /*
463 * Multicast list has changed; set the hardware filter
464 * accordingly.
465 */
466 if (ifp->if_flags & IFF_RUNNING)
467 error = sq_init(ifp);
468 else
469 error = 0;
470 }
471
472 splx(s);
473 return (error);
474 }
475
476 void
477 sq_start(struct ifnet *ifp)
478 {
479 struct sq_softc *sc = ifp->if_softc;
480 u_int32_t status;
481 struct mbuf *m0, *m;
482 bus_dmamap_t dmamap;
483 int err, totlen, nexttx, firsttx, lasttx = -1, ofree, seg;
484
485 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
486 return;
487
488 /*
489 * Remember the previous number of free descriptors and
490 * the first descriptor we'll use.
491 */
492 ofree = sc->sc_nfreetx;
493 firsttx = sc->sc_nexttx;
494
495 /*
496 * Loop through the send queue, setting up transmit descriptors
497 * until we drain the queue, or use up all available transmit
498 * descriptors.
499 */
500 while (sc->sc_nfreetx != 0) {
501 /*
502 * Grab a packet off the queue.
503 */
504 IFQ_POLL(&ifp->if_snd, m0);
505 if (m0 == NULL)
506 break;
507 m = NULL;
508
509 dmamap = sc->sc_txmap[sc->sc_nexttx];
510
511 /*
512 * Load the DMA map. If this fails, the packet either
513 * didn't fit in the alloted number of segments, or we were
514 * short on resources. In this case, we'll copy and try
515 * again.
516 * Also copy it if we need to pad, so that we are sure there
517 * is room for the pad buffer.
518 * XXX the right way of doing this is to use a static buffer
519 * for padding and adding it to the transmit descriptor (see
520 * sys/dev/pci/if_tl.c for example). We can't do this here yet
521 * because we can't send packets with more than one fragment.
522 */
523 if (m0->m_pkthdr.len < ETHER_PAD_LEN ||
524 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
525 BUS_DMA_NOWAIT) != 0) {
526 MGETHDR(m, M_DONTWAIT, MT_DATA);
527 if (m == NULL) {
528 printf("%s: unable to allocate Tx mbuf\n",
529 sc->sc_dev.dv_xname);
530 break;
531 }
532 if (m0->m_pkthdr.len > MHLEN) {
533 MCLGET(m, M_DONTWAIT);
534 if ((m->m_flags & M_EXT) == 0) {
535 printf("%s: unable to allocate Tx "
536 "cluster\n", sc->sc_dev.dv_xname);
537 m_freem(m);
538 break;
539 }
540 }
541
542 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
543 if (m0->m_pkthdr.len < ETHER_PAD_LEN) {
544 memset(mtod(m, char *) + m0->m_pkthdr.len, 0,
545 ETHER_PAD_LEN - m0->m_pkthdr.len);
546 m->m_pkthdr.len = m->m_len = ETHER_PAD_LEN;
547 } else
548 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
549
550 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
551 m, BUS_DMA_NOWAIT)) != 0) {
552 printf("%s: unable to load Tx buffer, "
553 "error = %d\n", sc->sc_dev.dv_xname, err);
554 break;
555 }
556 }
557
558 /*
559 * Ensure we have enough descriptors free to describe
560 * the packet.
561 */
562 if (dmamap->dm_nsegs > sc->sc_nfreetx) {
563 /*
564 * Not enough free descriptors to transmit this
565 * packet. We haven't committed to anything yet,
566 * so just unload the DMA map, put the packet
567 * back on the queue, and punt. Notify the upper
568 * layer that there are no more slots left.
569 *
570 * XXX We could allocate an mbuf and copy, but
571 * XXX it is worth it?
572 */
573 ifp->if_flags |= IFF_OACTIVE;
574 bus_dmamap_unload(sc->sc_dmat, dmamap);
575 if (m != NULL)
576 m_freem(m);
577 break;
578 }
579
580 IFQ_DEQUEUE(&ifp->if_snd, m0);
581 #if NBPFILTER > 0
582 /*
583 * Pass the packet to any BPF listeners.
584 */
585 if (ifp->if_bpf)
586 bpf_mtap(ifp->if_bpf, m0);
587 #endif /* NBPFILTER > 0 */
588 if (m != NULL) {
589 m_freem(m0);
590 m0 = m;
591 }
592
593 /*
594 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
595 */
596
597 SQ_TRACE(SQ_ENQUEUE, sc, sc->sc_nexttx, 0);
598
599 /* Sync the DMA map. */
600 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
601 BUS_DMASYNC_PREWRITE);
602
603 /*
604 * Initialize the transmit descriptors.
605 */
606 for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0;
607 seg < dmamap->dm_nsegs;
608 seg++, nexttx = SQ_NEXTTX(nexttx)) {
609 if (sc->hpc_regs->revision == 3) {
610 sc->sc_txdesc[nexttx].hpc3_hdd_bufptr =
611 dmamap->dm_segs[seg].ds_addr;
612 sc->sc_txdesc[nexttx].hpc3_hdd_ctl =
613 dmamap->dm_segs[seg].ds_len;
614 } else {
615 sc->sc_txdesc[nexttx].hpc1_hdd_bufptr =
616 dmamap->dm_segs[seg].ds_addr;
617 sc->sc_txdesc[nexttx].hpc1_hdd_ctl =
618 dmamap->dm_segs[seg].ds_len;
619 }
620 sc->sc_txdesc[nexttx].hdd_descptr=
621 SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx));
622 lasttx = nexttx;
623 totlen += dmamap->dm_segs[seg].ds_len;
624 }
625
626 /* Last descriptor gets end-of-packet */
627 KASSERT(lasttx != -1);
628 if (sc->hpc_regs->revision == 3)
629 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |=
630 HPC3_HDD_CTL_EOPACKET;
631 else
632 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |=
633 HPC1_HDD_CTL_EOPACKET;
634
635 SQ_DPRINTF(("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname,
636 sc->sc_nexttx, lasttx,
637 totlen));
638
639 if (ifp->if_flags & IFF_DEBUG) {
640 printf(" transmit chain:\n");
641 for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) {
642 printf(" descriptor %d:\n", seg);
643 printf(" hdd_bufptr: 0x%08x\n",
644 (sc->hpc_regs->revision == 3) ?
645 sc->sc_txdesc[seg].hpc3_hdd_bufptr :
646 sc->sc_txdesc[seg].hpc1_hdd_bufptr);
647 printf(" hdd_ctl: 0x%08x\n",
648 (sc->hpc_regs->revision == 3) ?
649 sc->sc_txdesc[seg].hpc3_hdd_ctl:
650 sc->sc_txdesc[seg].hpc1_hdd_ctl);
651 printf(" hdd_descptr: 0x%08x\n",
652 sc->sc_txdesc[seg].hdd_descptr);
653
654 if (seg == lasttx)
655 break;
656 }
657 }
658
659 /* Sync the descriptors we're using. */
660 SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs,
661 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
662
663 /* Store a pointer to the packet so we can free it later */
664 sc->sc_txmbuf[sc->sc_nexttx] = m0;
665
666 /* Advance the tx pointer. */
667 sc->sc_nfreetx -= dmamap->dm_nsegs;
668 sc->sc_nexttx = nexttx;
669 }
670
671 /* All transmit descriptors used up, let upper layers know */
672 if (sc->sc_nfreetx == 0)
673 ifp->if_flags |= IFF_OACTIVE;
674
675 if (sc->sc_nfreetx != ofree) {
676 SQ_DPRINTF(("%s: %d packets enqueued, first %d, INTR on %d\n",
677 sc->sc_dev.dv_xname, lasttx - firsttx + 1,
678 firsttx, lasttx));
679
680 /*
681 * Cause a transmit interrupt to happen on the
682 * last packet we enqueued, mark it as the last
683 * descriptor.
684 *
685 * HPC1_HDD_CTL_INTR will generate an interrupt on
686 * HPC1. HPC3 requires HPC3_HDD_CTL_EOPACKET in
687 * addition to HPC3_HDD_CTL_INTR to interrupt.
688 */
689 KASSERT(lasttx != -1);
690 if (sc->hpc_regs->revision == 3) {
691 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |=
692 HPC3_HDD_CTL_INTR | HPC3_HDD_CTL_EOCHAIN;
693 } else {
694 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |= HPC1_HDD_CTL_INTR;
695 sc->sc_txdesc[lasttx].hpc1_hdd_bufptr |=
696 HPC1_HDD_CTL_EOCHAIN;
697 }
698
699 SQ_CDTXSYNC(sc, lasttx, 1,
700 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
701
702 /*
703 * There is a potential race condition here if the HPC
704 * DMA channel is active and we try and either update
705 * the 'next descriptor' pointer in the HPC PIO space
706 * or the 'next descriptor' pointer in a previous desc-
707 * riptor.
708 *
709 * To avoid this, if the channel is active, we rely on
710 * the transmit interrupt routine noticing that there
711 * are more packets to send and restarting the HPC DMA
712 * engine, rather than mucking with the DMA state here.
713 */
714 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl);
715
716 if ((status & sc->hpc_regs->enetx_ctl_active) != 0) {
717 SQ_TRACE(SQ_ADD_TO_DMA, sc, firsttx, status);
718
719 /*
720 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and
721 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN
722 */
723 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc3_hdd_ctl &=
724 ~HPC3_HDD_CTL_EOCHAIN;
725
726 if (sc->hpc_regs->revision != 3)
727 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc1_hdd_ctl
728 &= ~HPC1_HDD_CTL_INTR;
729
730 SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx), 1,
731 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
732 } else if (sc->hpc_regs->revision == 3) {
733 SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
734
735 sq_hpc_write(sc, HPC3_ENETX_NDBP, SQ_CDTXADDR(sc,
736 firsttx));
737
738 /* Kick DMA channel into life */
739 sq_hpc_write(sc, HPC3_ENETX_CTL, HPC3_ENETX_CTL_ACTIVE);
740 } else {
741 /*
742 * In the HPC1 case where transmit DMA is
743 * inactive, we can either kick off if
744 * the ring was previously empty, or call
745 * our transmit interrupt handler to
746 * figure out if the ring stopped short
747 * and restart at the right place.
748 */
749 if (ofree == SQ_NTXDESC) {
750 SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
751
752 sq_hpc_write(sc, HPC1_ENETX_NDBP,
753 SQ_CDTXADDR(sc, firsttx));
754 sq_hpc_write(sc, HPC1_ENETX_CFXBP,
755 SQ_CDTXADDR(sc, firsttx));
756 sq_hpc_write(sc, HPC1_ENETX_CBP,
757 SQ_CDTXADDR(sc, firsttx));
758
759 /* Kick DMA channel into life */
760 sq_hpc_write(sc, HPC1_ENETX_CTL,
761 HPC1_ENETX_CTL_ACTIVE);
762 } else
763 sq_txring_hpc1(sc);
764 }
765
766 /* Set a watchdog timer in case the chip flakes out. */
767 ifp->if_timer = 5;
768 }
769 }
770
771 void
772 sq_stop(struct ifnet *ifp, int disable)
773 {
774 int i;
775 struct sq_softc *sc = ifp->if_softc;
776
777 for (i =0; i < SQ_NTXDESC; i++) {
778 if (sc->sc_txmbuf[i] != NULL) {
779 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
780 m_freem(sc->sc_txmbuf[i]);
781 sc->sc_txmbuf[i] = NULL;
782 }
783 }
784
785 /* Clear Seeq transmit/receive command registers */
786 sq_seeq_write(sc, SEEQ_TXCMD, 0);
787 sq_seeq_write(sc, SEEQ_RXCMD, 0);
788
789 sq_reset(sc);
790
791 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
792 ifp->if_timer = 0;
793 }
794
795 /* Device timeout/watchdog routine. */
796 void
797 sq_watchdog(struct ifnet *ifp)
798 {
799 u_int32_t status;
800 struct sq_softc *sc = ifp->if_softc;
801
802 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl);
803 log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, "
804 "status %08x)\n", sc->sc_dev.dv_xname, sc->sc_prevtx,
805 sc->sc_nexttx, sc->sc_nfreetx, status);
806
807 sq_trace_dump(sc);
808
809 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
810 sc->sq_trace_idx = 0;
811
812 ++ifp->if_oerrors;
813
814 sq_init(ifp);
815 }
816
817 static void
818 sq_trace_dump(struct sq_softc *sc)
819 {
820 int i;
821 char *act;
822
823 for (i = 0; i < sc->sq_trace_idx; i++) {
824 switch (sc->sq_trace[i].action) {
825 case SQ_RESET: act = "SQ_RESET"; break;
826 case SQ_ADD_TO_DMA: act = "SQ_ADD_TO_DMA"; break;
827 case SQ_START_DMA: act = "SQ_START_DMA"; break;
828 case SQ_DONE_DMA: act = "SQ_DONE_DMA"; break;
829 case SQ_RESTART_DMA: act = "SQ_RESTART_DMA"; break;
830 case SQ_TXINTR_ENTER: act = "SQ_TXINTR_ENTER"; break;
831 case SQ_TXINTR_EXIT: act = "SQ_TXINTR_EXIT"; break;
832 case SQ_TXINTR_BUSY: act = "SQ_TXINTR_BUSY"; break;
833 case SQ_IOCTL: act = "SQ_IOCTL"; break;
834 case SQ_ENQUEUE: act = "SQ_ENQUEUE"; break;
835 default: act = "UNKNOWN";
836 }
837
838 printf("%s: [%03d] action %-16s buf %03d free %03d "
839 "status %08x line %d\n", sc->sc_dev.dv_xname, i, act,
840 sc->sq_trace[i].bufno, sc->sq_trace[i].freebuf,
841 sc->sq_trace[i].status, sc->sq_trace[i].line);
842 }
843 }
844
845 static int
846 sq_intr(void * arg)
847 {
848 struct sq_softc *sc = arg;
849 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
850 int handled = 0;
851 u_int32_t stat;
852
853 stat = sq_hpc_read(sc, sc->hpc_regs->enetr_reset);
854
855 if ((stat & 2) == 0)
856 SQ_DPRINTF(("%s: Unexpected interrupt!\n",
857 sc->sc_dev.dv_xname));
858 else
859 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, (stat | 2));
860
861 /*
862 * If the interface isn't running, the interrupt couldn't
863 * possibly have come from us.
864 */
865 if ((ifp->if_flags & IFF_RUNNING) == 0)
866 return 0;
867
868 sc->sq_intrcnt.ev_count++;
869
870 /* Always check for received packets */
871 if (sq_rxintr(sc) != 0)
872 handled++;
873
874 /* Only handle transmit interrupts if we actually sent something */
875 if (sc->sc_nfreetx < SQ_NTXDESC) {
876 sq_txintr(sc);
877 handled++;
878 }
879
880 #if NRND > 0
881 if (handled)
882 rnd_add_uint32(&sc->rnd_source, stat);
883 #endif
884 return (handled);
885 }
886
887 static int
888 sq_rxintr(struct sq_softc *sc)
889 {
890 int count = 0;
891 struct mbuf* m;
892 int i, framelen;
893 u_int8_t pktstat;
894 u_int32_t status;
895 u_int32_t ctl_reg;
896 int new_end, orig_end;
897 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
898
899 for (i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) {
900 SQ_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD |
901 BUS_DMASYNC_POSTWRITE);
902
903 /*
904 * If this is a CPU-owned buffer, we're at the end of the list.
905 */
906 if (sc->hpc_regs->revision == 3)
907 ctl_reg = sc->sc_rxdesc[i].hpc3_hdd_ctl &
908 HPC3_HDD_CTL_OWN;
909 else
910 ctl_reg = sc->sc_rxdesc[i].hpc1_hdd_ctl &
911 HPC1_HDD_CTL_OWN;
912
913 if (ctl_reg) {
914 #if defined(SQ_DEBUG)
915 u_int32_t reg;
916
917 reg = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl);
918 SQ_DPRINTF(("%s: rxintr: done at %d (ctl %08x)\n",
919 sc->sc_dev.dv_xname, i, reg));
920 #endif
921 break;
922 }
923
924 count++;
925
926 m = sc->sc_rxmbuf[i];
927 framelen = m->m_ext.ext_size - 3;
928 if (sc->hpc_regs->revision == 3)
929 framelen -=
930 HPC3_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc3_hdd_ctl);
931 else
932 framelen -=
933 HPC1_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc1_hdd_ctl);
934
935 /* Now sync the actual packet data */
936 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
937 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);
938
939 pktstat = *((u_int8_t*)m->m_data + framelen + 2);
940
941 if ((pktstat & RXSTAT_GOOD) == 0) {
942 ifp->if_ierrors++;
943
944 if (pktstat & RXSTAT_OFLOW)
945 printf("%s: receive FIFO overflow\n",
946 sc->sc_dev.dv_xname);
947
948 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
949 sc->sc_rxmap[i]->dm_mapsize,
950 BUS_DMASYNC_PREREAD);
951 SQ_INIT_RXDESC(sc, i);
952 SQ_DPRINTF(("%s: sq_rxintr: buf %d no RXSTAT_GOOD\n",
953 sc->sc_dev.dv_xname, i));
954 continue;
955 }
956
957 if (sq_add_rxbuf(sc, i) != 0) {
958 ifp->if_ierrors++;
959 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
960 sc->sc_rxmap[i]->dm_mapsize,
961 BUS_DMASYNC_PREREAD);
962 SQ_INIT_RXDESC(sc, i);
963 SQ_DPRINTF(("%s: sq_rxintr: buf %d sq_add_rxbuf() "
964 "failed\n", sc->sc_dev.dv_xname, i));
965 continue;
966 }
967
968
969 m->m_data += 2;
970 m->m_pkthdr.rcvif = ifp;
971 m->m_pkthdr.len = m->m_len = framelen;
972
973 ifp->if_ipackets++;
974
975 SQ_DPRINTF(("%s: sq_rxintr: buf %d len %d\n",
976 sc->sc_dev.dv_xname, i, framelen));
977
978 #if NBPFILTER > 0
979 if (ifp->if_bpf)
980 bpf_mtap(ifp->if_bpf, m);
981 #endif
982 (*ifp->if_input)(ifp, m);
983 }
984
985
986 /* If anything happened, move ring start/end pointers to new spot */
987 if (i != sc->sc_nextrx) {
988 /*
989 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and
990 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN
991 */
992
993 new_end = SQ_PREVRX(i);
994 sc->sc_rxdesc[new_end].hpc3_hdd_ctl |= HPC3_HDD_CTL_EOCHAIN;
995 SQ_CDRXSYNC(sc, new_end, BUS_DMASYNC_PREREAD |
996 BUS_DMASYNC_PREWRITE);
997
998 orig_end = SQ_PREVRX(sc->sc_nextrx);
999 sc->sc_rxdesc[orig_end].hpc3_hdd_ctl &= ~HPC3_HDD_CTL_EOCHAIN;
1000 SQ_CDRXSYNC(sc, orig_end, BUS_DMASYNC_PREREAD |
1001 BUS_DMASYNC_PREWRITE);
1002
1003 sc->sc_nextrx = i;
1004 }
1005
1006 status = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl);
1007
1008 /* If receive channel is stopped, restart it... */
1009 if ((status & sc->hpc_regs->enetr_ctl_active) == 0) {
1010 /* Pass the start of the receive ring to the HPC */
1011 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc,
1012 sc->sc_nextrx));
1013
1014 /* And turn on the HPC ethernet receive channel */
1015 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl,
1016 sc->hpc_regs->enetr_ctl_active);
1017 }
1018
1019 return count;
1020 }
1021
1022 static int
1023 sq_txintr(struct sq_softc *sc)
1024 {
1025 int shift = 0;
1026 u_int32_t status, tmp;
1027 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1028
1029 if (sc->hpc_regs->revision != 3)
1030 shift = 16;
1031
1032 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl) >> shift;
1033
1034 SQ_TRACE(SQ_TXINTR_ENTER, sc, sc->sc_prevtx, status);
1035
1036 tmp = (sc->hpc_regs->enetx_ctl_active >> shift) | TXSTAT_GOOD;
1037 if ((status & tmp) == 0) {
1038 if (status & TXSTAT_COLL)
1039 ifp->if_collisions++;
1040
1041 if (status & TXSTAT_UFLOW) {
1042 printf("%s: transmit underflow\n", sc->sc_dev.dv_xname);
1043 ifp->if_oerrors++;
1044 }
1045
1046 if (status & TXSTAT_16COLL) {
1047 printf("%s: max collisions reached\n",
1048 sc->sc_dev.dv_xname);
1049 ifp->if_oerrors++;
1050 ifp->if_collisions += 16;
1051 }
1052 }
1053
1054 /* prevtx now points to next xmit packet not yet finished */
1055 if (sc->hpc_regs->revision == 3)
1056 sq_txring_hpc3(sc);
1057 else
1058 sq_txring_hpc1(sc);
1059
1060 /* If we have buffers free, let upper layers know */
1061 if (sc->sc_nfreetx > 0)
1062 ifp->if_flags &= ~IFF_OACTIVE;
1063
1064 /* If all packets have left the coop, cancel watchdog */
1065 if (sc->sc_nfreetx == SQ_NTXDESC)
1066 ifp->if_timer = 0;
1067
1068 SQ_TRACE(SQ_TXINTR_EXIT, sc, sc->sc_prevtx, status);
1069 sq_start(ifp);
1070
1071 return 1;
1072 }
1073
1074 /*
1075 * Reclaim used transmit descriptors and restart the transmit DMA
1076 * engine if necessary.
1077 */
1078 static void
1079 sq_txring_hpc1(struct sq_softc *sc)
1080 {
1081 /*
1082 * HPC1 doesn't tag transmitted descriptors, however,
1083 * the NDBP register points to the next descriptor that
1084 * has not yet been processed. If DMA is not in progress,
1085 * we can safely reclaim all descriptors up to NDBP, and,
1086 * if necessary, restart DMA at NDBP. Otherwise, if DMA
1087 * is active, we can only safely reclaim up to CBP.
1088 *
1089 * For now, we'll only reclaim on inactive DMA and assume
1090 * that a sufficiently large ring keeps us out of trouble.
1091 */
1092 u_int32_t reclaimto, status;
1093 int reclaimall, i = sc->sc_prevtx;
1094 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1095
1096 status = sq_hpc_read(sc, HPC1_ENETX_CTL);
1097 if (status & HPC1_ENETX_CTL_ACTIVE) {
1098 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
1099 return;
1100 } else
1101 reclaimto = sq_hpc_read(sc, HPC1_ENETX_NDBP);
1102
1103 if (sc->sc_nfreetx == 0 && SQ_CDTXADDR(sc, i) == reclaimto)
1104 reclaimall = 1;
1105 else
1106 reclaimall = 0;
1107
1108 while (sc->sc_nfreetx < SQ_NTXDESC) {
1109 if (SQ_CDTXADDR(sc, i) == reclaimto && !reclaimall)
1110 break;
1111
1112 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
1113 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1114
1115 /* Sync the packet data, unload DMA map, free mbuf */
1116 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
1117 sc->sc_txmap[i]->dm_mapsize,
1118 BUS_DMASYNC_POSTWRITE);
1119 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
1120 m_freem(sc->sc_txmbuf[i]);
1121 sc->sc_txmbuf[i] = NULL;
1122
1123 ifp->if_opackets++;
1124 sc->sc_nfreetx++;
1125
1126 SQ_TRACE(SQ_DONE_DMA, sc, i, status);
1127
1128 i = SQ_NEXTTX(i);
1129 }
1130
1131 if (sc->sc_nfreetx < SQ_NTXDESC) {
1132 SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
1133
1134 KASSERT(reclaimto == SQ_CDTXADDR(sc, i));
1135
1136 sq_hpc_write(sc, HPC1_ENETX_CFXBP, reclaimto);
1137 sq_hpc_write(sc, HPC1_ENETX_CBP, reclaimto);
1138
1139 /* Kick DMA channel into life */
1140 sq_hpc_write(sc, HPC1_ENETX_CTL, HPC1_ENETX_CTL_ACTIVE);
1141
1142 /*
1143 * Set a watchdog timer in case the chip
1144 * flakes out.
1145 */
1146 ifp->if_timer = 5;
1147 }
1148
1149 sc->sc_prevtx = i;
1150 }
1151
1152 /*
1153 * Reclaim used transmit descriptors and restart the transmit DMA
1154 * engine if necessary.
1155 */
1156 static void
1157 sq_txring_hpc3(struct sq_softc *sc)
1158 {
1159 /*
1160 * HPC3 tags descriptors with a bit once they've been
1161 * transmitted. We need only free each XMITDONE'd
1162 * descriptor, and restart the DMA engine if any
1163 * descriptors are left over.
1164 */
1165 int i;
1166 u_int32_t status = 0;
1167 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1168
1169 i = sc->sc_prevtx;
1170 while (sc->sc_nfreetx < SQ_NTXDESC) {
1171 /*
1172 * Check status first so we don't end up with a case of
1173 * the buffer not being finished while the DMA channel
1174 * has gone idle.
1175 */
1176 status = sq_hpc_read(sc, HPC3_ENETX_CTL);
1177
1178 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
1179 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1180
1181 /* Check for used descriptor and restart DMA chain if needed */
1182 if (!(sc->sc_txdesc[i].hpc3_hdd_ctl & HPC3_HDD_CTL_XMITDONE)) {
1183 if ((status & HPC3_ENETX_CTL_ACTIVE) == 0) {
1184 SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
1185
1186 sq_hpc_write(sc, HPC3_ENETX_NDBP,
1187 SQ_CDTXADDR(sc, i));
1188
1189 /* Kick DMA channel into life */
1190 sq_hpc_write(sc, HPC3_ENETX_CTL,
1191 HPC3_ENETX_CTL_ACTIVE);
1192
1193 /*
1194 * Set a watchdog timer in case the chip
1195 * flakes out.
1196 */
1197 ifp->if_timer = 5;
1198 } else
1199 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
1200 break;
1201 }
1202
1203 /* Sync the packet data, unload DMA map, free mbuf */
1204 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
1205 sc->sc_txmap[i]->dm_mapsize,
1206 BUS_DMASYNC_POSTWRITE);
1207 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
1208 m_freem(sc->sc_txmbuf[i]);
1209 sc->sc_txmbuf[i] = NULL;
1210
1211 ifp->if_opackets++;
1212 sc->sc_nfreetx++;
1213
1214 SQ_TRACE(SQ_DONE_DMA, sc, i, status);
1215 i = SQ_NEXTTX(i);
1216 }
1217
1218 sc->sc_prevtx = i;
1219 }
1220
1221 void
1222 sq_reset(struct sq_softc *sc)
1223 {
1224 /* Stop HPC dma channels */
1225 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl, 0);
1226 sq_hpc_write(sc, sc->hpc_regs->enetx_ctl, 0);
1227
1228 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 3);
1229 delay(20);
1230 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 0);
1231 }
1232
1233 /* sq_add_rxbuf: Add a receive buffer to the indicated descriptor. */
1234 int
1235 sq_add_rxbuf(struct sq_softc *sc, int idx)
1236 {
1237 int err;
1238 struct mbuf *m;
1239
1240 MGETHDR(m, M_DONTWAIT, MT_DATA);
1241 if (m == NULL)
1242 return (ENOBUFS);
1243
1244 MCLGET(m, M_DONTWAIT);
1245 if ((m->m_flags & M_EXT) == 0) {
1246 m_freem(m);
1247 return (ENOBUFS);
1248 }
1249
1250 if (sc->sc_rxmbuf[idx] != NULL)
1251 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]);
1252
1253 sc->sc_rxmbuf[idx] = m;
1254
1255 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx],
1256 m->m_ext.ext_buf, m->m_ext.ext_size,
1257 NULL, BUS_DMA_NOWAIT)) != 0) {
1258 printf("%s: can't load rx DMA map %d, error = %d\n",
1259 sc->sc_dev.dv_xname, idx, err);
1260 panic("sq_add_rxbuf"); /* XXX */
1261 }
1262
1263 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx], 0,
1264 sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD);
1265
1266 SQ_INIT_RXDESC(sc, idx);
1267
1268 return 0;
1269 }
1270
1271 void
1272 sq_dump_buffer(u_int32_t addr, u_int32_t len)
1273 {
1274 u_int i;
1275 u_char* physaddr = (char*) MIPS_PHYS_TO_KSEG1((caddr_t)addr);
1276
1277 if (len == 0)
1278 return;
1279
1280 printf("%p: ", physaddr);
1281
1282 for (i = 0; i < len; i++) {
1283 printf("%02x ", *(physaddr + i) & 0xff);
1284 if ((i % 16) == 15 && i != len - 1)
1285 printf("\n%p: ", physaddr + i);
1286 }
1287
1288 printf("\n");
1289 }
1290
1291 void
1292 enaddr_aton(const char* str, u_int8_t* eaddr)
1293 {
1294 int i;
1295 char c;
1296
1297 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1298 if (*str == ':')
1299 str++;
1300
1301 c = *str++;
1302 if (isdigit(c)) {
1303 eaddr[i] = (c - '0');
1304 } else if (isxdigit(c)) {
1305 eaddr[i] = (toupper(c) + 10 - 'A');
1306 }
1307
1308 c = *str++;
1309 if (isdigit(c)) {
1310 eaddr[i] = (eaddr[i] << 4) | (c - '0');
1311 } else if (isxdigit(c)) {
1312 eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A');
1313 }
1314 }
1315 }
1316