if_sq.c revision 1.29.20.1 1 /* $NetBSD: if_sq.c,v 1.29.20.1 2007/01/12 01:00:58 ad Exp $ */
2
3 /*
4 * Copyright (c) 2001 Rafal K. Boni
5 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * Portions of this code are derived from software contributed to The
9 * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: if_sq.c,v 1.29.20.1 2007/01/12 01:00:58 ad Exp $");
37
38 #include "bpfilter.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/device.h>
43 #include <sys/callout.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/ioctl.h>
49 #include <sys/errno.h>
50 #include <sys/syslog.h>
51
52 #include <uvm/uvm_extern.h>
53
54 #include <machine/endian.h>
55
56 #include <net/if.h>
57 #include <net/if_dl.h>
58 #include <net/if_media.h>
59 #include <net/if_ether.h>
60
61 #if NBPFILTER > 0
62 #include <net/bpf.h>
63 #endif
64
65 #include <machine/bus.h>
66 #include <machine/intr.h>
67
68 #include <dev/ic/seeq8003reg.h>
69
70 #include <sgimips/hpc/sqvar.h>
71 #include <sgimips/hpc/hpcvar.h>
72 #include <sgimips/hpc/hpcreg.h>
73
74 #include <dev/arcbios/arcbios.h>
75 #include <dev/arcbios/arcbiosvar.h>
76
77 #define static
78
79 /*
80 * Short TODO list:
81 * (1) Do counters for bad-RX packets.
82 * (2) Allow multi-segment transmits, instead of copying to a single,
83 * contiguous mbuf.
84 * (3) Verify sq_stop() turns off enough stuff; I was still getting
85 * seeq interrupts after sq_stop().
86 * (4) Implement EDLC modes: especially packet auto-pad and simplex
87 * mode.
88 * (5) Should the driver filter out its own transmissions in non-EDLC
89 * mode?
90 * (6) Multicast support -- multicast filter, address management, ...
91 * (7) Deal with RB0 (recv buffer overflow) on reception. Will need
92 * to figure out if RB0 is read-only as stated in one spot in the
93 * HPC spec or read-write (ie, is the 'write a one to clear it')
94 * the correct thing?
95 */
96
97 #if defined(SQ_DEBUG)
98 int sq_debug = 0;
99 #define SQ_DPRINTF(x) if (sq_debug) printf x
100 #else
101 #define SQ_DPRINTF(x)
102 #endif
103
104 static int sq_match(struct device *, struct cfdata *, void *);
105 static void sq_attach(struct device *, struct device *, void *);
106 static int sq_init(struct ifnet *);
107 static void sq_start(struct ifnet *);
108 static void sq_stop(struct ifnet *, int);
109 static void sq_watchdog(struct ifnet *);
110 static int sq_ioctl(struct ifnet *, u_long, caddr_t);
111
112 static void sq_set_filter(struct sq_softc *);
113 static int sq_intr(void *);
114 static int sq_rxintr(struct sq_softc *);
115 static int sq_txintr(struct sq_softc *);
116 static void sq_txring_hpc1(struct sq_softc *);
117 static void sq_txring_hpc3(struct sq_softc *);
118 static void sq_reset(struct sq_softc *);
119 static int sq_add_rxbuf(struct sq_softc *, int);
120 static void sq_dump_buffer(u_int32_t addr, u_int32_t len);
121 static void sq_trace_dump(struct sq_softc *);
122
123 static void enaddr_aton(const char*, u_int8_t*);
124
125 CFATTACH_DECL(sq, sizeof(struct sq_softc),
126 sq_match, sq_attach, NULL, NULL);
127
128 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
129
130 #define sq_seeq_read(sc, off) \
131 bus_space_read_1(sc->sc_regt, sc->sc_regh, off)
132 #define sq_seeq_write(sc, off, val) \
133 bus_space_write_1(sc->sc_regt, sc->sc_regh, off, val)
134
135 #define sq_hpc_read(sc, off) \
136 bus_space_read_4(sc->sc_hpct, sc->sc_hpch, off)
137 #define sq_hpc_write(sc, off, val) \
138 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, off, val)
139
140 /* MAC address offset for non-onboard implementations */
141 #define SQ_HPC_EEPROM_ENADDR 250
142
143 #define SGI_OUI_0 0x08
144 #define SGI_OUI_1 0x00
145 #define SGI_OUI_2 0x69
146
147 static int
148 sq_match(struct device *parent, struct cfdata *cf, void *aux)
149 {
150 struct hpc_attach_args *ha = aux;
151
152 if (strcmp(ha->ha_name, cf->cf_name) == 0)
153 return (1);
154
155 return (0);
156 }
157
158 static void
159 sq_attach(struct device *parent, struct device *self, void *aux)
160 {
161 int i, err;
162 const char* macaddr;
163 struct sq_softc *sc = (void *)self;
164 struct hpc_attach_args *haa = aux;
165 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
166
167 sc->sc_hpct = haa->ha_st;
168 sc->hpc_regs = haa->hpc_regs; /* HPC register definitions */
169
170 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
171 haa->ha_dmaoff,
172 sc->hpc_regs->enet_regs_size,
173 &sc->sc_hpch)) != 0) {
174 printf(": unable to map HPC DMA registers, error = %d\n", err);
175 goto fail_0;
176 }
177
178 sc->sc_regt = haa->ha_st;
179 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
180 haa->ha_devoff,
181 sc->hpc_regs->enet_devregs_size,
182 &sc->sc_regh)) != 0) {
183 printf(": unable to map Seeq registers, error = %d\n", err);
184 goto fail_0;
185 }
186
187 sc->sc_dmat = haa->ha_dmat;
188
189 if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control),
190 PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg,
191 1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) {
192 printf(": unable to allocate control data, error = %d\n", err);
193 goto fail_0;
194 }
195
196 if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg,
197 sizeof(struct sq_control),
198 (caddr_t *)&sc->sc_control,
199 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
200 printf(": unable to map control data, error = %d\n", err);
201 goto fail_1;
202 }
203
204 if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control),
205 1, sizeof(struct sq_control), PAGE_SIZE,
206 BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) {
207 printf(": unable to create DMA map for control data, error "
208 "= %d\n", err);
209 goto fail_2;
210 }
211
212 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control,
213 sizeof(struct sq_control),
214 NULL, BUS_DMA_NOWAIT)) != 0) {
215 printf(": unable to load DMA map for control data, error "
216 "= %d\n", err);
217 goto fail_3;
218 }
219
220 memset(sc->sc_control, 0, sizeof(struct sq_control));
221
222 /* Create transmit buffer DMA maps */
223 for (i = 0; i < SQ_NTXDESC; i++) {
224 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
225 0, BUS_DMA_NOWAIT,
226 &sc->sc_txmap[i])) != 0) {
227 printf(": unable to create tx DMA map %d, error = %d\n",
228 i, err);
229 goto fail_4;
230 }
231 }
232
233 /* Create receive buffer DMA maps */
234 for (i = 0; i < SQ_NRXDESC; i++) {
235 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
236 0, BUS_DMA_NOWAIT,
237 &sc->sc_rxmap[i])) != 0) {
238 printf(": unable to create rx DMA map %d, error = %d\n",
239 i, err);
240 goto fail_5;
241 }
242 }
243
244 /* Pre-allocate the receive buffers. */
245 for (i = 0; i < SQ_NRXDESC; i++) {
246 if ((err = sq_add_rxbuf(sc, i)) != 0) {
247 printf(": unable to allocate or map rx buffer %d\n,"
248 " error = %d\n", i, err);
249 goto fail_6;
250 }
251 }
252
253 memcpy(sc->sc_enaddr, &haa->hpc_eeprom[SQ_HPC_EEPROM_ENADDR],
254 ETHER_ADDR_LEN);
255
256 /*
257 * If our mac address is bogus, obtain it from ARCBIOS. This will
258 * be true of the onboard HPC3 on IP22, since there is no eeprom,
259 * but rather the DS1386 RTC's battery-backed ram is used.
260 */
261 if (sc->sc_enaddr[0] != SGI_OUI_0 || sc->sc_enaddr[1] != SGI_OUI_1 ||
262 sc->sc_enaddr[2] != SGI_OUI_2) {
263 macaddr = ARCBIOS->GetEnvironmentVariable("eaddr");
264 if (macaddr == NULL) {
265 printf(": unable to get MAC address!\n");
266 goto fail_6;
267 }
268 enaddr_aton(macaddr, sc->sc_enaddr);
269 }
270
271 evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL,
272 self->dv_xname, "intr");
273
274 if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) {
275 printf(": unable to establish interrupt!\n");
276 goto fail_6;
277 }
278
279 /* Reset the chip to a known state. */
280 sq_reset(sc);
281
282 /*
283 * Determine if we're an 8003 or 80c03 by setting the first
284 * MAC address register to non-zero, and then reading it back.
285 * If it's zero, we have an 80c03, because we will have read
286 * the TxCollLSB register.
287 */
288 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0xa5);
289 if (sq_seeq_read(sc, SEEQ_TXCOLLS0) == 0)
290 sc->sc_type = SQ_TYPE_80C03;
291 else
292 sc->sc_type = SQ_TYPE_8003;
293 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0x00);
294
295 printf(": SGI Seeq %s\n",
296 sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003");
297
298 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
299 ether_sprintf(sc->sc_enaddr));
300
301 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
302 ifp->if_softc = sc;
303 ifp->if_mtu = ETHERMTU;
304 ifp->if_init = sq_init;
305 ifp->if_stop = sq_stop;
306 ifp->if_start = sq_start;
307 ifp->if_ioctl = sq_ioctl;
308 ifp->if_watchdog = sq_watchdog;
309 ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST;
310 IFQ_SET_READY(&ifp->if_snd);
311
312 if_attach(ifp);
313 ether_ifattach(ifp, sc->sc_enaddr);
314
315 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
316 /* Done! */
317 return;
318
319 /*
320 * Free any resources we've allocated during the failed attach
321 * attempt. Do this in reverse order and fall through.
322 */
323 fail_6:
324 for (i = 0; i < SQ_NRXDESC; i++) {
325 if (sc->sc_rxmbuf[i] != NULL) {
326 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]);
327 m_freem(sc->sc_rxmbuf[i]);
328 }
329 }
330 fail_5:
331 for (i = 0; i < SQ_NRXDESC; i++) {
332 if (sc->sc_rxmap[i] != NULL)
333 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]);
334 }
335 fail_4:
336 for (i = 0; i < SQ_NTXDESC; i++) {
337 if (sc->sc_txmap[i] != NULL)
338 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]);
339 }
340 bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap);
341 fail_3:
342 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap);
343 fail_2:
344 bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control,
345 sizeof(struct sq_control));
346 fail_1:
347 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg);
348 fail_0:
349 return;
350 }
351
352 /* Set up data to get the interface up and running. */
353 int
354 sq_init(struct ifnet *ifp)
355 {
356 int i;
357 u_int32_t reg;
358 struct sq_softc *sc = ifp->if_softc;
359
360 /* Cancel any in-progress I/O */
361 sq_stop(ifp, 0);
362
363 sc->sc_nextrx = 0;
364
365 sc->sc_nfreetx = SQ_NTXDESC;
366 sc->sc_nexttx = sc->sc_prevtx = 0;
367
368 SQ_TRACE(SQ_RESET, sc, 0, 0);
369
370 /* Set into 8003 mode, bank 0 to program ethernet address */
371 sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_BANK0);
372
373 /* Now write the address */
374 for (i = 0; i < ETHER_ADDR_LEN; i++)
375 sq_seeq_write(sc, i, sc->sc_enaddr[i]);
376
377 sc->sc_rxcmd = RXCMD_IE_CRC |
378 RXCMD_IE_DRIB |
379 RXCMD_IE_SHORT |
380 RXCMD_IE_END |
381 RXCMD_IE_GOOD;
382
383 /*
384 * Set the receive filter -- this will add some bits to the
385 * prototype RXCMD register. Do this before setting the
386 * transmit config register, since we might need to switch
387 * banks.
388 */
389 sq_set_filter(sc);
390
391 /* Set up Seeq transmit command register */
392 sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_IE_UFLOW |
393 TXCMD_IE_COLL |
394 TXCMD_IE_16COLL |
395 TXCMD_IE_GOOD);
396
397 /* Now write the receive command register. */
398 sq_seeq_write(sc, SEEQ_RXCMD, sc->sc_rxcmd);
399
400 /* Set up HPC ethernet DMA config */
401 if (sc->hpc_regs->revision == 3) {
402 reg = sq_hpc_read(sc, HPC3_ENETR_DMACFG);
403 sq_hpc_write(sc, HPC3_ENETR_DMACFG, reg |
404 HPC3_ENETR_DMACFG_FIX_RXDC |
405 HPC3_ENETR_DMACFG_FIX_INTR |
406 HPC3_ENETR_DMACFG_FIX_EOP);
407 }
408
409 /* Pass the start of the receive ring to the HPC */
410 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc, 0));
411
412 /* And turn on the HPC ethernet receive channel */
413 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl,
414 sc->hpc_regs->enetr_ctl_active);
415
416 /*
417 * Turn off delayed receive interrupts on HPC1.
418 * (see Hollywood HPC Specification 2.1.4.3)
419 */
420 if (sc->hpc_regs->revision != 3)
421 sq_hpc_write(sc, HPC1_ENET_INTDELAY, HPC1_ENET_INTDELAY_OFF);
422
423 ifp->if_flags |= IFF_RUNNING;
424 ifp->if_flags &= ~IFF_OACTIVE;
425
426 return 0;
427 }
428
429 static void
430 sq_set_filter(struct sq_softc *sc)
431 {
432 struct ethercom *ec = &sc->sc_ethercom;
433 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
434 struct ether_multi *enm;
435 struct ether_multistep step;
436
437 /*
438 * Check for promiscuous mode. Also implies
439 * all-multicast.
440 */
441 if (ifp->if_flags & IFF_PROMISC) {
442 sc->sc_rxcmd |= RXCMD_REC_ALL;
443 ifp->if_flags |= IFF_ALLMULTI;
444 return;
445 }
446
447 /*
448 * The 8003 has no hash table. If we have any multicast
449 * addresses on the list, enable reception of all multicast
450 * frames.
451 *
452 * XXX The 80c03 has a hash table. We should use it.
453 */
454
455 ETHER_FIRST_MULTI(step, ec, enm);
456
457 if (enm == NULL) {
458 sc->sc_rxcmd &= ~RXCMD_REC_MASK;
459 sc->sc_rxcmd |= RXCMD_REC_BROAD;
460
461 ifp->if_flags &= ~IFF_ALLMULTI;
462 return;
463 }
464
465 sc->sc_rxcmd |= RXCMD_REC_MULTI;
466 ifp->if_flags |= IFF_ALLMULTI;
467 }
468
469 int
470 sq_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
471 {
472 int s, error = 0;
473
474 SQ_TRACE(SQ_IOCTL, (struct sq_softc *)ifp->if_softc, 0, 0);
475
476 s = splnet();
477
478 error = ether_ioctl(ifp, cmd, data);
479 if (error == ENETRESET) {
480 /*
481 * Multicast list has changed; set the hardware filter
482 * accordingly.
483 */
484 if (ifp->if_flags & IFF_RUNNING)
485 error = sq_init(ifp);
486 else
487 error = 0;
488 }
489
490 splx(s);
491 return (error);
492 }
493
494 void
495 sq_start(struct ifnet *ifp)
496 {
497 struct sq_softc *sc = ifp->if_softc;
498 u_int32_t status;
499 struct mbuf *m0, *m;
500 bus_dmamap_t dmamap;
501 int err, totlen, nexttx, firsttx, lasttx = -1, ofree, seg;
502
503 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
504 return;
505
506 /*
507 * Remember the previous number of free descriptors and
508 * the first descriptor we'll use.
509 */
510 ofree = sc->sc_nfreetx;
511 firsttx = sc->sc_nexttx;
512
513 /*
514 * Loop through the send queue, setting up transmit descriptors
515 * until we drain the queue, or use up all available transmit
516 * descriptors.
517 */
518 while (sc->sc_nfreetx != 0) {
519 /*
520 * Grab a packet off the queue.
521 */
522 IFQ_POLL(&ifp->if_snd, m0);
523 if (m0 == NULL)
524 break;
525 m = NULL;
526
527 dmamap = sc->sc_txmap[sc->sc_nexttx];
528
529 /*
530 * Load the DMA map. If this fails, the packet either
531 * didn't fit in the alloted number of segments, or we were
532 * short on resources. In this case, we'll copy and try
533 * again.
534 * Also copy it if we need to pad, so that we are sure there
535 * is room for the pad buffer.
536 * XXX the right way of doing this is to use a static buffer
537 * for padding and adding it to the transmit descriptor (see
538 * sys/dev/pci/if_tl.c for example). We can't do this here yet
539 * because we can't send packets with more than one fragment.
540 */
541 if (m0->m_pkthdr.len < ETHER_PAD_LEN ||
542 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
543 BUS_DMA_NOWAIT) != 0) {
544 MGETHDR(m, M_DONTWAIT, MT_DATA);
545 if (m == NULL) {
546 printf("%s: unable to allocate Tx mbuf\n",
547 sc->sc_dev.dv_xname);
548 break;
549 }
550 if (m0->m_pkthdr.len > MHLEN) {
551 MCLGET(m, M_DONTWAIT);
552 if ((m->m_flags & M_EXT) == 0) {
553 printf("%s: unable to allocate Tx "
554 "cluster\n", sc->sc_dev.dv_xname);
555 m_freem(m);
556 break;
557 }
558 }
559
560 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
561 if (m0->m_pkthdr.len < ETHER_PAD_LEN) {
562 memset(mtod(m, char *) + m0->m_pkthdr.len, 0,
563 ETHER_PAD_LEN - m0->m_pkthdr.len);
564 m->m_pkthdr.len = m->m_len = ETHER_PAD_LEN;
565 } else
566 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
567
568 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
569 m, BUS_DMA_NOWAIT)) != 0) {
570 printf("%s: unable to load Tx buffer, "
571 "error = %d\n", sc->sc_dev.dv_xname, err);
572 break;
573 }
574 }
575
576 /*
577 * Ensure we have enough descriptors free to describe
578 * the packet.
579 */
580 if (dmamap->dm_nsegs > sc->sc_nfreetx) {
581 /*
582 * Not enough free descriptors to transmit this
583 * packet. We haven't committed to anything yet,
584 * so just unload the DMA map, put the packet
585 * back on the queue, and punt. Notify the upper
586 * layer that there are no more slots left.
587 *
588 * XXX We could allocate an mbuf and copy, but
589 * XXX it is worth it?
590 */
591 ifp->if_flags |= IFF_OACTIVE;
592 bus_dmamap_unload(sc->sc_dmat, dmamap);
593 if (m != NULL)
594 m_freem(m);
595 break;
596 }
597
598 IFQ_DEQUEUE(&ifp->if_snd, m0);
599 #if NBPFILTER > 0
600 /*
601 * Pass the packet to any BPF listeners.
602 */
603 if (ifp->if_bpf)
604 bpf_mtap(ifp->if_bpf, m0);
605 #endif /* NBPFILTER > 0 */
606 if (m != NULL) {
607 m_freem(m0);
608 m0 = m;
609 }
610
611 /*
612 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
613 */
614
615 SQ_TRACE(SQ_ENQUEUE, sc, sc->sc_nexttx, 0);
616
617 /* Sync the DMA map. */
618 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
619 BUS_DMASYNC_PREWRITE);
620
621 /*
622 * Initialize the transmit descriptors.
623 */
624 for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0;
625 seg < dmamap->dm_nsegs;
626 seg++, nexttx = SQ_NEXTTX(nexttx)) {
627 if (sc->hpc_regs->revision == 3) {
628 sc->sc_txdesc[nexttx].hpc3_hdd_bufptr =
629 dmamap->dm_segs[seg].ds_addr;
630 sc->sc_txdesc[nexttx].hpc3_hdd_ctl =
631 dmamap->dm_segs[seg].ds_len;
632 } else {
633 sc->sc_txdesc[nexttx].hpc1_hdd_bufptr =
634 dmamap->dm_segs[seg].ds_addr;
635 sc->sc_txdesc[nexttx].hpc1_hdd_ctl =
636 dmamap->dm_segs[seg].ds_len;
637 }
638 sc->sc_txdesc[nexttx].hdd_descptr=
639 SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx));
640 lasttx = nexttx;
641 totlen += dmamap->dm_segs[seg].ds_len;
642 }
643
644 /* Last descriptor gets end-of-packet */
645 KASSERT(lasttx != -1);
646 if (sc->hpc_regs->revision == 3)
647 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |=
648 HPC3_HDD_CTL_EOPACKET;
649 else
650 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |=
651 HPC1_HDD_CTL_EOPACKET;
652
653 SQ_DPRINTF(("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname,
654 sc->sc_nexttx, lasttx,
655 totlen));
656
657 if (ifp->if_flags & IFF_DEBUG) {
658 printf(" transmit chain:\n");
659 for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) {
660 printf(" descriptor %d:\n", seg);
661 printf(" hdd_bufptr: 0x%08x\n",
662 (sc->hpc_regs->revision == 3) ?
663 sc->sc_txdesc[seg].hpc3_hdd_bufptr :
664 sc->sc_txdesc[seg].hpc1_hdd_bufptr);
665 printf(" hdd_ctl: 0x%08x\n",
666 (sc->hpc_regs->revision == 3) ?
667 sc->sc_txdesc[seg].hpc3_hdd_ctl:
668 sc->sc_txdesc[seg].hpc1_hdd_ctl);
669 printf(" hdd_descptr: 0x%08x\n",
670 sc->sc_txdesc[seg].hdd_descptr);
671
672 if (seg == lasttx)
673 break;
674 }
675 }
676
677 /* Sync the descriptors we're using. */
678 SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs,
679 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
680
681 /* Store a pointer to the packet so we can free it later */
682 sc->sc_txmbuf[sc->sc_nexttx] = m0;
683
684 /* Advance the tx pointer. */
685 sc->sc_nfreetx -= dmamap->dm_nsegs;
686 sc->sc_nexttx = nexttx;
687 }
688
689 /* All transmit descriptors used up, let upper layers know */
690 if (sc->sc_nfreetx == 0)
691 ifp->if_flags |= IFF_OACTIVE;
692
693 if (sc->sc_nfreetx != ofree) {
694 SQ_DPRINTF(("%s: %d packets enqueued, first %d, INTR on %d\n",
695 sc->sc_dev.dv_xname, lasttx - firsttx + 1,
696 firsttx, lasttx));
697
698 /*
699 * Cause a transmit interrupt to happen on the
700 * last packet we enqueued, mark it as the last
701 * descriptor.
702 *
703 * HPC1_HDD_CTL_INTR will generate an interrupt on
704 * HPC1. HPC3 requires HPC3_HDD_CTL_EOPACKET in
705 * addition to HPC3_HDD_CTL_INTR to interrupt.
706 */
707 KASSERT(lasttx != -1);
708 if (sc->hpc_regs->revision == 3) {
709 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |=
710 HPC3_HDD_CTL_INTR | HPC3_HDD_CTL_EOCHAIN;
711 } else {
712 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |= HPC1_HDD_CTL_INTR;
713 sc->sc_txdesc[lasttx].hpc1_hdd_bufptr |=
714 HPC1_HDD_CTL_EOCHAIN;
715 }
716
717 SQ_CDTXSYNC(sc, lasttx, 1,
718 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
719
720 /*
721 * There is a potential race condition here if the HPC
722 * DMA channel is active and we try and either update
723 * the 'next descriptor' pointer in the HPC PIO space
724 * or the 'next descriptor' pointer in a previous desc-
725 * riptor.
726 *
727 * To avoid this, if the channel is active, we rely on
728 * the transmit interrupt routine noticing that there
729 * are more packets to send and restarting the HPC DMA
730 * engine, rather than mucking with the DMA state here.
731 */
732 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl);
733
734 if ((status & sc->hpc_regs->enetx_ctl_active) != 0) {
735 SQ_TRACE(SQ_ADD_TO_DMA, sc, firsttx, status);
736
737 /*
738 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and
739 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN
740 */
741 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc3_hdd_ctl &=
742 ~HPC3_HDD_CTL_EOCHAIN;
743
744 if (sc->hpc_regs->revision != 3)
745 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc1_hdd_ctl
746 &= ~HPC1_HDD_CTL_INTR;
747
748 SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx), 1,
749 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
750 } else if (sc->hpc_regs->revision == 3) {
751 SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
752
753 sq_hpc_write(sc, HPC3_ENETX_NDBP, SQ_CDTXADDR(sc,
754 firsttx));
755
756 /* Kick DMA channel into life */
757 sq_hpc_write(sc, HPC3_ENETX_CTL, HPC3_ENETX_CTL_ACTIVE);
758 } else {
759 /*
760 * In the HPC1 case where transmit DMA is
761 * inactive, we can either kick off if
762 * the ring was previously empty, or call
763 * our transmit interrupt handler to
764 * figure out if the ring stopped short
765 * and restart at the right place.
766 */
767 if (ofree == SQ_NTXDESC) {
768 SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
769
770 sq_hpc_write(sc, HPC1_ENETX_NDBP,
771 SQ_CDTXADDR(sc, firsttx));
772 sq_hpc_write(sc, HPC1_ENETX_CFXBP,
773 SQ_CDTXADDR(sc, firsttx));
774 sq_hpc_write(sc, HPC1_ENETX_CBP,
775 SQ_CDTXADDR(sc, firsttx));
776
777 /* Kick DMA channel into life */
778 sq_hpc_write(sc, HPC1_ENETX_CTL,
779 HPC1_ENETX_CTL_ACTIVE);
780 } else
781 sq_txring_hpc1(sc);
782 }
783
784 /* Set a watchdog timer in case the chip flakes out. */
785 ifp->if_timer = 5;
786 }
787 }
788
789 void
790 sq_stop(struct ifnet *ifp, int disable)
791 {
792 int i;
793 struct sq_softc *sc = ifp->if_softc;
794
795 for (i =0; i < SQ_NTXDESC; i++) {
796 if (sc->sc_txmbuf[i] != NULL) {
797 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
798 m_freem(sc->sc_txmbuf[i]);
799 sc->sc_txmbuf[i] = NULL;
800 }
801 }
802
803 /* Clear Seeq transmit/receive command registers */
804 sq_seeq_write(sc, SEEQ_TXCMD, 0);
805 sq_seeq_write(sc, SEEQ_RXCMD, 0);
806
807 sq_reset(sc);
808
809 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
810 ifp->if_timer = 0;
811 }
812
813 /* Device timeout/watchdog routine. */
814 void
815 sq_watchdog(struct ifnet *ifp)
816 {
817 u_int32_t status;
818 struct sq_softc *sc = ifp->if_softc;
819
820 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl);
821 log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, "
822 "status %08x)\n", sc->sc_dev.dv_xname, sc->sc_prevtx,
823 sc->sc_nexttx, sc->sc_nfreetx, status);
824
825 sq_trace_dump(sc);
826
827 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
828 sc->sq_trace_idx = 0;
829
830 ++ifp->if_oerrors;
831
832 sq_init(ifp);
833 }
834
835 static void
836 sq_trace_dump(struct sq_softc *sc)
837 {
838 int i;
839 const char *act;
840
841 for (i = 0; i < sc->sq_trace_idx; i++) {
842 switch (sc->sq_trace[i].action) {
843 case SQ_RESET: act = "SQ_RESET"; break;
844 case SQ_ADD_TO_DMA: act = "SQ_ADD_TO_DMA"; break;
845 case SQ_START_DMA: act = "SQ_START_DMA"; break;
846 case SQ_DONE_DMA: act = "SQ_DONE_DMA"; break;
847 case SQ_RESTART_DMA: act = "SQ_RESTART_DMA"; break;
848 case SQ_TXINTR_ENTER: act = "SQ_TXINTR_ENTER"; break;
849 case SQ_TXINTR_EXIT: act = "SQ_TXINTR_EXIT"; break;
850 case SQ_TXINTR_BUSY: act = "SQ_TXINTR_BUSY"; break;
851 case SQ_IOCTL: act = "SQ_IOCTL"; break;
852 case SQ_ENQUEUE: act = "SQ_ENQUEUE"; break;
853 default: act = "UNKNOWN";
854 }
855
856 printf("%s: [%03d] action %-16s buf %03d free %03d "
857 "status %08x line %d\n", sc->sc_dev.dv_xname, i, act,
858 sc->sq_trace[i].bufno, sc->sq_trace[i].freebuf,
859 sc->sq_trace[i].status, sc->sq_trace[i].line);
860 }
861 }
862
863 static int
864 sq_intr(void * arg)
865 {
866 struct sq_softc *sc = arg;
867 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
868 int handled = 0;
869 u_int32_t stat;
870
871 stat = sq_hpc_read(sc, sc->hpc_regs->enetr_reset);
872
873 if ((stat & 2) == 0)
874 SQ_DPRINTF(("%s: Unexpected interrupt!\n",
875 sc->sc_dev.dv_xname));
876 else
877 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, (stat | 2));
878
879 /*
880 * If the interface isn't running, the interrupt couldn't
881 * possibly have come from us.
882 */
883 if ((ifp->if_flags & IFF_RUNNING) == 0)
884 return 0;
885
886 sc->sq_intrcnt.ev_count++;
887
888 /* Always check for received packets */
889 if (sq_rxintr(sc) != 0)
890 handled++;
891
892 /* Only handle transmit interrupts if we actually sent something */
893 if (sc->sc_nfreetx < SQ_NTXDESC) {
894 sq_txintr(sc);
895 handled++;
896 }
897
898 #if NRND > 0
899 if (handled)
900 rnd_add_uint32(&sc->rnd_source, stat);
901 #endif
902 return (handled);
903 }
904
905 static int
906 sq_rxintr(struct sq_softc *sc)
907 {
908 int count = 0;
909 struct mbuf* m;
910 int i, framelen;
911 u_int8_t pktstat;
912 u_int32_t status;
913 u_int32_t ctl_reg;
914 int new_end, orig_end;
915 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
916
917 for (i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) {
918 SQ_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD |
919 BUS_DMASYNC_POSTWRITE);
920
921 /*
922 * If this is a CPU-owned buffer, we're at the end of the list.
923 */
924 if (sc->hpc_regs->revision == 3)
925 ctl_reg = sc->sc_rxdesc[i].hpc3_hdd_ctl &
926 HPC3_HDD_CTL_OWN;
927 else
928 ctl_reg = sc->sc_rxdesc[i].hpc1_hdd_ctl &
929 HPC1_HDD_CTL_OWN;
930
931 if (ctl_reg) {
932 #if defined(SQ_DEBUG)
933 u_int32_t reg;
934
935 reg = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl);
936 SQ_DPRINTF(("%s: rxintr: done at %d (ctl %08x)\n",
937 sc->sc_dev.dv_xname, i, reg));
938 #endif
939 break;
940 }
941
942 count++;
943
944 m = sc->sc_rxmbuf[i];
945 framelen = m->m_ext.ext_size - 3;
946 if (sc->hpc_regs->revision == 3)
947 framelen -=
948 HPC3_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc3_hdd_ctl);
949 else
950 framelen -=
951 HPC1_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc1_hdd_ctl);
952
953 /* Now sync the actual packet data */
954 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
955 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);
956
957 pktstat = *((u_int8_t*)m->m_data + framelen + 2);
958
959 if ((pktstat & RXSTAT_GOOD) == 0) {
960 ifp->if_ierrors++;
961
962 if (pktstat & RXSTAT_OFLOW)
963 printf("%s: receive FIFO overflow\n",
964 sc->sc_dev.dv_xname);
965
966 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
967 sc->sc_rxmap[i]->dm_mapsize,
968 BUS_DMASYNC_PREREAD);
969 SQ_INIT_RXDESC(sc, i);
970 SQ_DPRINTF(("%s: sq_rxintr: buf %d no RXSTAT_GOOD\n",
971 sc->sc_dev.dv_xname, i));
972 continue;
973 }
974
975 if (sq_add_rxbuf(sc, i) != 0) {
976 ifp->if_ierrors++;
977 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
978 sc->sc_rxmap[i]->dm_mapsize,
979 BUS_DMASYNC_PREREAD);
980 SQ_INIT_RXDESC(sc, i);
981 SQ_DPRINTF(("%s: sq_rxintr: buf %d sq_add_rxbuf() "
982 "failed\n", sc->sc_dev.dv_xname, i));
983 continue;
984 }
985
986
987 m->m_data += 2;
988 m->m_pkthdr.rcvif = ifp;
989 m->m_pkthdr.len = m->m_len = framelen;
990
991 ifp->if_ipackets++;
992
993 SQ_DPRINTF(("%s: sq_rxintr: buf %d len %d\n",
994 sc->sc_dev.dv_xname, i, framelen));
995
996 #if NBPFILTER > 0
997 if (ifp->if_bpf)
998 bpf_mtap(ifp->if_bpf, m);
999 #endif
1000 (*ifp->if_input)(ifp, m);
1001 }
1002
1003
1004 /* If anything happened, move ring start/end pointers to new spot */
1005 if (i != sc->sc_nextrx) {
1006 /*
1007 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and
1008 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN
1009 */
1010
1011 new_end = SQ_PREVRX(i);
1012 sc->sc_rxdesc[new_end].hpc3_hdd_ctl |= HPC3_HDD_CTL_EOCHAIN;
1013 SQ_CDRXSYNC(sc, new_end, BUS_DMASYNC_PREREAD |
1014 BUS_DMASYNC_PREWRITE);
1015
1016 orig_end = SQ_PREVRX(sc->sc_nextrx);
1017 sc->sc_rxdesc[orig_end].hpc3_hdd_ctl &= ~HPC3_HDD_CTL_EOCHAIN;
1018 SQ_CDRXSYNC(sc, orig_end, BUS_DMASYNC_PREREAD |
1019 BUS_DMASYNC_PREWRITE);
1020
1021 sc->sc_nextrx = i;
1022 }
1023
1024 status = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl);
1025
1026 /* If receive channel is stopped, restart it... */
1027 if ((status & sc->hpc_regs->enetr_ctl_active) == 0) {
1028 /* Pass the start of the receive ring to the HPC */
1029 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc,
1030 sc->sc_nextrx));
1031
1032 /* And turn on the HPC ethernet receive channel */
1033 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl,
1034 sc->hpc_regs->enetr_ctl_active);
1035 }
1036
1037 return count;
1038 }
1039
1040 static int
1041 sq_txintr(struct sq_softc *sc)
1042 {
1043 int shift = 0;
1044 u_int32_t status, tmp;
1045 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1046
1047 if (sc->hpc_regs->revision != 3)
1048 shift = 16;
1049
1050 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl) >> shift;
1051
1052 SQ_TRACE(SQ_TXINTR_ENTER, sc, sc->sc_prevtx, status);
1053
1054 tmp = (sc->hpc_regs->enetx_ctl_active >> shift) | TXSTAT_GOOD;
1055 if ((status & tmp) == 0) {
1056 if (status & TXSTAT_COLL)
1057 ifp->if_collisions++;
1058
1059 if (status & TXSTAT_UFLOW) {
1060 printf("%s: transmit underflow\n", sc->sc_dev.dv_xname);
1061 ifp->if_oerrors++;
1062 }
1063
1064 if (status & TXSTAT_16COLL) {
1065 printf("%s: max collisions reached\n",
1066 sc->sc_dev.dv_xname);
1067 ifp->if_oerrors++;
1068 ifp->if_collisions += 16;
1069 }
1070 }
1071
1072 /* prevtx now points to next xmit packet not yet finished */
1073 if (sc->hpc_regs->revision == 3)
1074 sq_txring_hpc3(sc);
1075 else
1076 sq_txring_hpc1(sc);
1077
1078 /* If we have buffers free, let upper layers know */
1079 if (sc->sc_nfreetx > 0)
1080 ifp->if_flags &= ~IFF_OACTIVE;
1081
1082 /* If all packets have left the coop, cancel watchdog */
1083 if (sc->sc_nfreetx == SQ_NTXDESC)
1084 ifp->if_timer = 0;
1085
1086 SQ_TRACE(SQ_TXINTR_EXIT, sc, sc->sc_prevtx, status);
1087 sq_start(ifp);
1088
1089 return 1;
1090 }
1091
1092 /*
1093 * Reclaim used transmit descriptors and restart the transmit DMA
1094 * engine if necessary.
1095 */
1096 static void
1097 sq_txring_hpc1(struct sq_softc *sc)
1098 {
1099 /*
1100 * HPC1 doesn't tag transmitted descriptors, however,
1101 * the NDBP register points to the next descriptor that
1102 * has not yet been processed. If DMA is not in progress,
1103 * we can safely reclaim all descriptors up to NDBP, and,
1104 * if necessary, restart DMA at NDBP. Otherwise, if DMA
1105 * is active, we can only safely reclaim up to CBP.
1106 *
1107 * For now, we'll only reclaim on inactive DMA and assume
1108 * that a sufficiently large ring keeps us out of trouble.
1109 */
1110 u_int32_t reclaimto, status;
1111 int reclaimall, i = sc->sc_prevtx;
1112 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1113
1114 status = sq_hpc_read(sc, HPC1_ENETX_CTL);
1115 if (status & HPC1_ENETX_CTL_ACTIVE) {
1116 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
1117 return;
1118 } else
1119 reclaimto = sq_hpc_read(sc, HPC1_ENETX_NDBP);
1120
1121 if (sc->sc_nfreetx == 0 && SQ_CDTXADDR(sc, i) == reclaimto)
1122 reclaimall = 1;
1123 else
1124 reclaimall = 0;
1125
1126 while (sc->sc_nfreetx < SQ_NTXDESC) {
1127 if (SQ_CDTXADDR(sc, i) == reclaimto && !reclaimall)
1128 break;
1129
1130 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
1131 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1132
1133 /* Sync the packet data, unload DMA map, free mbuf */
1134 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
1135 sc->sc_txmap[i]->dm_mapsize,
1136 BUS_DMASYNC_POSTWRITE);
1137 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
1138 m_freem(sc->sc_txmbuf[i]);
1139 sc->sc_txmbuf[i] = NULL;
1140
1141 ifp->if_opackets++;
1142 sc->sc_nfreetx++;
1143
1144 SQ_TRACE(SQ_DONE_DMA, sc, i, status);
1145
1146 i = SQ_NEXTTX(i);
1147 }
1148
1149 if (sc->sc_nfreetx < SQ_NTXDESC) {
1150 SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
1151
1152 KASSERT(reclaimto == SQ_CDTXADDR(sc, i));
1153
1154 sq_hpc_write(sc, HPC1_ENETX_CFXBP, reclaimto);
1155 sq_hpc_write(sc, HPC1_ENETX_CBP, reclaimto);
1156
1157 /* Kick DMA channel into life */
1158 sq_hpc_write(sc, HPC1_ENETX_CTL, HPC1_ENETX_CTL_ACTIVE);
1159
1160 /*
1161 * Set a watchdog timer in case the chip
1162 * flakes out.
1163 */
1164 ifp->if_timer = 5;
1165 }
1166
1167 sc->sc_prevtx = i;
1168 }
1169
1170 /*
1171 * Reclaim used transmit descriptors and restart the transmit DMA
1172 * engine if necessary.
1173 */
1174 static void
1175 sq_txring_hpc3(struct sq_softc *sc)
1176 {
1177 /*
1178 * HPC3 tags descriptors with a bit once they've been
1179 * transmitted. We need only free each XMITDONE'd
1180 * descriptor, and restart the DMA engine if any
1181 * descriptors are left over.
1182 */
1183 int i;
1184 u_int32_t status = 0;
1185 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1186
1187 i = sc->sc_prevtx;
1188 while (sc->sc_nfreetx < SQ_NTXDESC) {
1189 /*
1190 * Check status first so we don't end up with a case of
1191 * the buffer not being finished while the DMA channel
1192 * has gone idle.
1193 */
1194 status = sq_hpc_read(sc, HPC3_ENETX_CTL);
1195
1196 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
1197 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1198
1199 /* Check for used descriptor and restart DMA chain if needed */
1200 if (!(sc->sc_txdesc[i].hpc3_hdd_ctl & HPC3_HDD_CTL_XMITDONE)) {
1201 if ((status & HPC3_ENETX_CTL_ACTIVE) == 0) {
1202 SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
1203
1204 sq_hpc_write(sc, HPC3_ENETX_NDBP,
1205 SQ_CDTXADDR(sc, i));
1206
1207 /* Kick DMA channel into life */
1208 sq_hpc_write(sc, HPC3_ENETX_CTL,
1209 HPC3_ENETX_CTL_ACTIVE);
1210
1211 /*
1212 * Set a watchdog timer in case the chip
1213 * flakes out.
1214 */
1215 ifp->if_timer = 5;
1216 } else
1217 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
1218 break;
1219 }
1220
1221 /* Sync the packet data, unload DMA map, free mbuf */
1222 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
1223 sc->sc_txmap[i]->dm_mapsize,
1224 BUS_DMASYNC_POSTWRITE);
1225 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
1226 m_freem(sc->sc_txmbuf[i]);
1227 sc->sc_txmbuf[i] = NULL;
1228
1229 ifp->if_opackets++;
1230 sc->sc_nfreetx++;
1231
1232 SQ_TRACE(SQ_DONE_DMA, sc, i, status);
1233 i = SQ_NEXTTX(i);
1234 }
1235
1236 sc->sc_prevtx = i;
1237 }
1238
1239 void
1240 sq_reset(struct sq_softc *sc)
1241 {
1242 /* Stop HPC dma channels */
1243 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl, 0);
1244 sq_hpc_write(sc, sc->hpc_regs->enetx_ctl, 0);
1245
1246 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 3);
1247 delay(20);
1248 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 0);
1249 }
1250
1251 /* sq_add_rxbuf: Add a receive buffer to the indicated descriptor. */
1252 int
1253 sq_add_rxbuf(struct sq_softc *sc, int idx)
1254 {
1255 int err;
1256 struct mbuf *m;
1257
1258 MGETHDR(m, M_DONTWAIT, MT_DATA);
1259 if (m == NULL)
1260 return (ENOBUFS);
1261
1262 MCLGET(m, M_DONTWAIT);
1263 if ((m->m_flags & M_EXT) == 0) {
1264 m_freem(m);
1265 return (ENOBUFS);
1266 }
1267
1268 if (sc->sc_rxmbuf[idx] != NULL)
1269 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]);
1270
1271 sc->sc_rxmbuf[idx] = m;
1272
1273 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx],
1274 m->m_ext.ext_buf, m->m_ext.ext_size,
1275 NULL, BUS_DMA_NOWAIT)) != 0) {
1276 printf("%s: can't load rx DMA map %d, error = %d\n",
1277 sc->sc_dev.dv_xname, idx, err);
1278 panic("sq_add_rxbuf"); /* XXX */
1279 }
1280
1281 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx], 0,
1282 sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD);
1283
1284 SQ_INIT_RXDESC(sc, idx);
1285
1286 return 0;
1287 }
1288
1289 void
1290 sq_dump_buffer(u_int32_t addr, u_int32_t len)
1291 {
1292 u_int i;
1293 u_char* physaddr = (char*) MIPS_PHYS_TO_KSEG1((caddr_t)addr);
1294
1295 if (len == 0)
1296 return;
1297
1298 printf("%p: ", physaddr);
1299
1300 for (i = 0; i < len; i++) {
1301 printf("%02x ", *(physaddr + i) & 0xff);
1302 if ((i % 16) == 15 && i != len - 1)
1303 printf("\n%p: ", physaddr + i);
1304 }
1305
1306 printf("\n");
1307 }
1308
1309 void
1310 enaddr_aton(const char* str, u_int8_t* eaddr)
1311 {
1312 int i;
1313 char c;
1314
1315 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1316 if (*str == ':')
1317 str++;
1318
1319 c = *str++;
1320 if (isdigit(c)) {
1321 eaddr[i] = (c - '0');
1322 } else if (isxdigit(c)) {
1323 eaddr[i] = (toupper(c) + 10 - 'A');
1324 }
1325
1326 c = *str++;
1327 if (isdigit(c)) {
1328 eaddr[i] = (eaddr[i] << 4) | (c - '0');
1329 } else if (isxdigit(c)) {
1330 eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A');
1331 }
1332 }
1333 }
1334