if_sq.c revision 1.31 1 /* $NetBSD: if_sq.c,v 1.31 2007/02/19 20:14:31 rumble Exp $ */
2
3 /*
4 * Copyright (c) 2001 Rafal K. Boni
5 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * Portions of this code are derived from software contributed to The
9 * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: if_sq.c,v 1.31 2007/02/19 20:14:31 rumble Exp $");
37
38 #include "bpfilter.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/device.h>
43 #include <sys/callout.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/ioctl.h>
49 #include <sys/errno.h>
50 #include <sys/syslog.h>
51
52 #include <uvm/uvm_extern.h>
53
54 #include <machine/endian.h>
55
56 #include <net/if.h>
57 #include <net/if_dl.h>
58 #include <net/if_media.h>
59 #include <net/if_ether.h>
60
61 #if NBPFILTER > 0
62 #include <net/bpf.h>
63 #endif
64
65 #include <machine/bus.h>
66 #include <machine/intr.h>
67
68 #include <dev/ic/seeq8003reg.h>
69
70 #include <sgimips/hpc/sqvar.h>
71 #include <sgimips/hpc/hpcvar.h>
72 #include <sgimips/hpc/hpcreg.h>
73
74 #include <dev/arcbios/arcbios.h>
75 #include <dev/arcbios/arcbiosvar.h>
76
77 #define static
78
79 /*
80 * Short TODO list:
81 * (1) Do counters for bad-RX packets.
82 * (2) Allow multi-segment transmits, instead of copying to a single,
83 * contiguous mbuf.
84 * (3) Verify sq_stop() turns off enough stuff; I was still getting
85 * seeq interrupts after sq_stop().
86 * (4) Implement EDLC modes: especially packet auto-pad and simplex
87 * mode.
88 * (5) Should the driver filter out its own transmissions in non-EDLC
89 * mode?
90 * (6) Multicast support -- multicast filter, address management, ...
91 * (7) Deal with RB0 (recv buffer overflow) on reception. Will need
92 * to figure out if RB0 is read-only as stated in one spot in the
93 * HPC spec or read-write (ie, is the 'write a one to clear it')
94 * the correct thing?
95 */
96
97 #if defined(SQ_DEBUG)
98 int sq_debug = 0;
99 #define SQ_DPRINTF(x) if (sq_debug) printf x
100 #else
101 #define SQ_DPRINTF(x)
102 #endif
103
104 static int sq_match(struct device *, struct cfdata *, void *);
105 static void sq_attach(struct device *, struct device *, void *);
106 static int sq_init(struct ifnet *);
107 static void sq_start(struct ifnet *);
108 static void sq_stop(struct ifnet *, int);
109 static void sq_watchdog(struct ifnet *);
110 static int sq_ioctl(struct ifnet *, u_long, caddr_t);
111
112 static void sq_set_filter(struct sq_softc *);
113 static int sq_intr(void *);
114 static int sq_rxintr(struct sq_softc *);
115 static int sq_txintr(struct sq_softc *);
116 static void sq_txring_hpc1(struct sq_softc *);
117 static void sq_txring_hpc3(struct sq_softc *);
118 static void sq_reset(struct sq_softc *);
119 static int sq_add_rxbuf(struct sq_softc *, int);
120 static void sq_dump_buffer(u_int32_t addr, u_int32_t len);
121 static void sq_trace_dump(struct sq_softc *);
122
123 static void enaddr_aton(const char*, u_int8_t*);
124
125 CFATTACH_DECL(sq, sizeof(struct sq_softc),
126 sq_match, sq_attach, NULL, NULL);
127
128 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
129
130 #define sq_seeq_read(sc, off) \
131 bus_space_read_1(sc->sc_regt, sc->sc_regh, off)
132 #define sq_seeq_write(sc, off, val) \
133 bus_space_write_1(sc->sc_regt, sc->sc_regh, off, val)
134
135 #define sq_hpc_read(sc, off) \
136 bus_space_read_4(sc->sc_hpct, sc->sc_hpch, off)
137 #define sq_hpc_write(sc, off, val) \
138 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, off, val)
139
140 /* MAC address offset for non-onboard implementations */
141 #define SQ_HPC_EEPROM_ENADDR 250
142
143 #define SGI_OUI_0 0x08
144 #define SGI_OUI_1 0x00
145 #define SGI_OUI_2 0x69
146
147 static int
148 sq_match(struct device *parent, struct cfdata *cf, void *aux)
149 {
150 struct hpc_attach_args *ha = aux;
151
152 if (strcmp(ha->ha_name, cf->cf_name) == 0)
153 return (1);
154
155 return (0);
156 }
157
158 static void
159 sq_attach(struct device *parent, struct device *self, void *aux)
160 {
161 int i, err;
162 const char* macaddr;
163 struct sq_softc *sc = (void *)self;
164 struct hpc_attach_args *haa = aux;
165 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
166
167 sc->sc_hpct = haa->ha_st;
168 sc->hpc_regs = haa->hpc_regs; /* HPC register definitions */
169
170 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
171 haa->ha_dmaoff,
172 sc->hpc_regs->enet_regs_size,
173 &sc->sc_hpch)) != 0) {
174 printf(": unable to map HPC DMA registers, error = %d\n", err);
175 goto fail_0;
176 }
177
178 sc->sc_regt = haa->ha_st;
179 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
180 haa->ha_devoff,
181 sc->hpc_regs->enet_devregs_size,
182 &sc->sc_regh)) != 0) {
183 printf(": unable to map Seeq registers, error = %d\n", err);
184 goto fail_0;
185 }
186
187 sc->sc_dmat = haa->ha_dmat;
188
189 if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control),
190 PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg,
191 1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) {
192 printf(": unable to allocate control data, error = %d\n", err);
193 goto fail_0;
194 }
195
196 if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg,
197 sizeof(struct sq_control),
198 (caddr_t *)&sc->sc_control,
199 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
200 printf(": unable to map control data, error = %d\n", err);
201 goto fail_1;
202 }
203
204 if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control),
205 1, sizeof(struct sq_control), PAGE_SIZE,
206 BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) {
207 printf(": unable to create DMA map for control data, error "
208 "= %d\n", err);
209 goto fail_2;
210 }
211
212 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control,
213 sizeof(struct sq_control),
214 NULL, BUS_DMA_NOWAIT)) != 0) {
215 printf(": unable to load DMA map for control data, error "
216 "= %d\n", err);
217 goto fail_3;
218 }
219
220 memset(sc->sc_control, 0, sizeof(struct sq_control));
221
222 /* Create transmit buffer DMA maps */
223 for (i = 0; i < SQ_NTXDESC; i++) {
224 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
225 0, BUS_DMA_NOWAIT,
226 &sc->sc_txmap[i])) != 0) {
227 printf(": unable to create tx DMA map %d, error = %d\n",
228 i, err);
229 goto fail_4;
230 }
231 }
232
233 /* Create receive buffer DMA maps */
234 for (i = 0; i < SQ_NRXDESC; i++) {
235 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
236 0, BUS_DMA_NOWAIT,
237 &sc->sc_rxmap[i])) != 0) {
238 printf(": unable to create rx DMA map %d, error = %d\n",
239 i, err);
240 goto fail_5;
241 }
242 }
243
244 /* Pre-allocate the receive buffers. */
245 for (i = 0; i < SQ_NRXDESC; i++) {
246 if ((err = sq_add_rxbuf(sc, i)) != 0) {
247 printf(": unable to allocate or map rx buffer %d\n,"
248 " error = %d\n", i, err);
249 goto fail_6;
250 }
251 }
252
253 memcpy(sc->sc_enaddr, &haa->hpc_eeprom[SQ_HPC_EEPROM_ENADDR],
254 ETHER_ADDR_LEN);
255
256 /*
257 * If our mac address is bogus, obtain it from ARCBIOS. This will
258 * be true of the onboard HPC3 on IP22, since there is no eeprom,
259 * but rather the DS1386 RTC's battery-backed ram is used.
260 */
261 if (sc->sc_enaddr[0] != SGI_OUI_0 || sc->sc_enaddr[1] != SGI_OUI_1 ||
262 sc->sc_enaddr[2] != SGI_OUI_2) {
263 macaddr = ARCBIOS->GetEnvironmentVariable("eaddr");
264 if (macaddr == NULL) {
265 printf(": unable to get MAC address!\n");
266 goto fail_6;
267 }
268 enaddr_aton(macaddr, sc->sc_enaddr);
269 }
270
271 evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL,
272 self->dv_xname, "intr");
273
274 if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) {
275 printf(": unable to establish interrupt!\n");
276 goto fail_6;
277 }
278
279 /* Reset the chip to a known state. */
280 sq_reset(sc);
281
282 /*
283 * Determine if we're an 8003 or 80c03 by setting the first
284 * MAC address register to non-zero, and then reading it back.
285 * If it's zero, we have an 80c03, because we will have read
286 * the TxCollLSB register.
287 */
288 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0xa5);
289 if (sq_seeq_read(sc, SEEQ_TXCOLLS0) == 0)
290 sc->sc_type = SQ_TYPE_80C03;
291 else
292 sc->sc_type = SQ_TYPE_8003;
293 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0x00);
294
295 printf(": SGI Seeq %s\n",
296 sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003");
297
298 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
299 ether_sprintf(sc->sc_enaddr));
300
301 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
302 ifp->if_softc = sc;
303 ifp->if_mtu = ETHERMTU;
304 ifp->if_init = sq_init;
305 ifp->if_stop = sq_stop;
306 ifp->if_start = sq_start;
307 ifp->if_ioctl = sq_ioctl;
308 ifp->if_watchdog = sq_watchdog;
309 ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST;
310 IFQ_SET_READY(&ifp->if_snd);
311
312 if_attach(ifp);
313 ether_ifattach(ifp, sc->sc_enaddr);
314
315 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
316 /* Done! */
317 return;
318
319 /*
320 * Free any resources we've allocated during the failed attach
321 * attempt. Do this in reverse order and fall through.
322 */
323 fail_6:
324 for (i = 0; i < SQ_NRXDESC; i++) {
325 if (sc->sc_rxmbuf[i] != NULL) {
326 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]);
327 m_freem(sc->sc_rxmbuf[i]);
328 }
329 }
330 fail_5:
331 for (i = 0; i < SQ_NRXDESC; i++) {
332 if (sc->sc_rxmap[i] != NULL)
333 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]);
334 }
335 fail_4:
336 for (i = 0; i < SQ_NTXDESC; i++) {
337 if (sc->sc_txmap[i] != NULL)
338 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]);
339 }
340 bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap);
341 fail_3:
342 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap);
343 fail_2:
344 bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control,
345 sizeof(struct sq_control));
346 fail_1:
347 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg);
348 fail_0:
349 return;
350 }
351
352 /* Set up data to get the interface up and running. */
353 int
354 sq_init(struct ifnet *ifp)
355 {
356 int i;
357 struct sq_softc *sc = ifp->if_softc;
358
359 /* Cancel any in-progress I/O */
360 sq_stop(ifp, 0);
361
362 sc->sc_nextrx = 0;
363
364 sc->sc_nfreetx = SQ_NTXDESC;
365 sc->sc_nexttx = sc->sc_prevtx = 0;
366
367 SQ_TRACE(SQ_RESET, sc, 0, 0);
368
369 /* Set into 8003 mode, bank 0 to program ethernet address */
370 sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_BANK0);
371
372 /* Now write the address */
373 for (i = 0; i < ETHER_ADDR_LEN; i++)
374 sq_seeq_write(sc, i, sc->sc_enaddr[i]);
375
376 sc->sc_rxcmd = RXCMD_IE_CRC |
377 RXCMD_IE_DRIB |
378 RXCMD_IE_SHORT |
379 RXCMD_IE_END |
380 RXCMD_IE_GOOD;
381
382 /*
383 * Set the receive filter -- this will add some bits to the
384 * prototype RXCMD register. Do this before setting the
385 * transmit config register, since we might need to switch
386 * banks.
387 */
388 sq_set_filter(sc);
389
390 /* Set up Seeq transmit command register */
391 sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_IE_UFLOW |
392 TXCMD_IE_COLL |
393 TXCMD_IE_16COLL |
394 TXCMD_IE_GOOD);
395
396 /* Now write the receive command register. */
397 sq_seeq_write(sc, SEEQ_RXCMD, sc->sc_rxcmd);
398
399 /*
400 * Set up HPC ethernet PIO and DMA configurations.
401 *
402 * The PROM appears to do most of this for the onboard HPC3, but
403 * not for the Challenge S's IOPLUS chip. We copy how the onboard
404 * chip is configured and assume that it's correct for both.
405 */
406 if (sc->hpc_regs->revision == 3) {
407 u_int32_t dmareg, pioreg;
408
409 pioreg = HPC3_ENETR_PIOCFG_P1(1) |
410 HPC3_ENETR_PIOCFG_P2(6) |
411 HPC3_ENETR_PIOCFG_P3(1);
412
413 dmareg = HPC3_ENETR_DMACFG_D1(6) |
414 HPC3_ENETR_DMACFG_D2(2) |
415 HPC3_ENETR_DMACFG_D3(0) |
416 HPC3_ENETR_DMACFG_FIX_RXDC |
417 HPC3_ENETR_DMACFG_FIX_INTR |
418 HPC3_ENETR_DMACFG_FIX_EOP |
419 HPC3_ENETR_DMACFG_TIMEOUT;
420
421 sq_hpc_write(sc, HPC3_ENETR_PIOCFG, pioreg);
422 sq_hpc_write(sc, HPC3_ENETR_DMACFG, dmareg);
423 }
424
425 /* Pass the start of the receive ring to the HPC */
426 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc, 0));
427
428 /* And turn on the HPC ethernet receive channel */
429 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl,
430 sc->hpc_regs->enetr_ctl_active);
431
432 /*
433 * Turn off delayed receive interrupts on HPC1.
434 * (see Hollywood HPC Specification 2.1.4.3)
435 */
436 if (sc->hpc_regs->revision != 3)
437 sq_hpc_write(sc, HPC1_ENET_INTDELAY, HPC1_ENET_INTDELAY_OFF);
438
439 ifp->if_flags |= IFF_RUNNING;
440 ifp->if_flags &= ~IFF_OACTIVE;
441
442 return 0;
443 }
444
445 static void
446 sq_set_filter(struct sq_softc *sc)
447 {
448 struct ethercom *ec = &sc->sc_ethercom;
449 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
450 struct ether_multi *enm;
451 struct ether_multistep step;
452
453 /*
454 * Check for promiscuous mode. Also implies
455 * all-multicast.
456 */
457 if (ifp->if_flags & IFF_PROMISC) {
458 sc->sc_rxcmd |= RXCMD_REC_ALL;
459 ifp->if_flags |= IFF_ALLMULTI;
460 return;
461 }
462
463 /*
464 * The 8003 has no hash table. If we have any multicast
465 * addresses on the list, enable reception of all multicast
466 * frames.
467 *
468 * XXX The 80c03 has a hash table. We should use it.
469 */
470
471 ETHER_FIRST_MULTI(step, ec, enm);
472
473 if (enm == NULL) {
474 sc->sc_rxcmd &= ~RXCMD_REC_MASK;
475 sc->sc_rxcmd |= RXCMD_REC_BROAD;
476
477 ifp->if_flags &= ~IFF_ALLMULTI;
478 return;
479 }
480
481 sc->sc_rxcmd |= RXCMD_REC_MULTI;
482 ifp->if_flags |= IFF_ALLMULTI;
483 }
484
485 int
486 sq_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
487 {
488 int s, error = 0;
489
490 SQ_TRACE(SQ_IOCTL, (struct sq_softc *)ifp->if_softc, 0, 0);
491
492 s = splnet();
493
494 error = ether_ioctl(ifp, cmd, data);
495 if (error == ENETRESET) {
496 /*
497 * Multicast list has changed; set the hardware filter
498 * accordingly.
499 */
500 if (ifp->if_flags & IFF_RUNNING)
501 error = sq_init(ifp);
502 else
503 error = 0;
504 }
505
506 splx(s);
507 return (error);
508 }
509
510 void
511 sq_start(struct ifnet *ifp)
512 {
513 struct sq_softc *sc = ifp->if_softc;
514 u_int32_t status;
515 struct mbuf *m0, *m;
516 bus_dmamap_t dmamap;
517 int err, totlen, nexttx, firsttx, lasttx = -1, ofree, seg;
518
519 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
520 return;
521
522 /*
523 * Remember the previous number of free descriptors and
524 * the first descriptor we'll use.
525 */
526 ofree = sc->sc_nfreetx;
527 firsttx = sc->sc_nexttx;
528
529 /*
530 * Loop through the send queue, setting up transmit descriptors
531 * until we drain the queue, or use up all available transmit
532 * descriptors.
533 */
534 while (sc->sc_nfreetx != 0) {
535 /*
536 * Grab a packet off the queue.
537 */
538 IFQ_POLL(&ifp->if_snd, m0);
539 if (m0 == NULL)
540 break;
541 m = NULL;
542
543 dmamap = sc->sc_txmap[sc->sc_nexttx];
544
545 /*
546 * Load the DMA map. If this fails, the packet either
547 * didn't fit in the alloted number of segments, or we were
548 * short on resources. In this case, we'll copy and try
549 * again.
550 * Also copy it if we need to pad, so that we are sure there
551 * is room for the pad buffer.
552 * XXX the right way of doing this is to use a static buffer
553 * for padding and adding it to the transmit descriptor (see
554 * sys/dev/pci/if_tl.c for example). We can't do this here yet
555 * because we can't send packets with more than one fragment.
556 */
557 if (m0->m_pkthdr.len < ETHER_PAD_LEN ||
558 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
559 BUS_DMA_NOWAIT) != 0) {
560 MGETHDR(m, M_DONTWAIT, MT_DATA);
561 if (m == NULL) {
562 printf("%s: unable to allocate Tx mbuf\n",
563 sc->sc_dev.dv_xname);
564 break;
565 }
566 if (m0->m_pkthdr.len > MHLEN) {
567 MCLGET(m, M_DONTWAIT);
568 if ((m->m_flags & M_EXT) == 0) {
569 printf("%s: unable to allocate Tx "
570 "cluster\n", sc->sc_dev.dv_xname);
571 m_freem(m);
572 break;
573 }
574 }
575
576 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
577 if (m0->m_pkthdr.len < ETHER_PAD_LEN) {
578 memset(mtod(m, char *) + m0->m_pkthdr.len, 0,
579 ETHER_PAD_LEN - m0->m_pkthdr.len);
580 m->m_pkthdr.len = m->m_len = ETHER_PAD_LEN;
581 } else
582 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
583
584 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
585 m, BUS_DMA_NOWAIT)) != 0) {
586 printf("%s: unable to load Tx buffer, "
587 "error = %d\n", sc->sc_dev.dv_xname, err);
588 break;
589 }
590 }
591
592 /*
593 * Ensure we have enough descriptors free to describe
594 * the packet.
595 */
596 if (dmamap->dm_nsegs > sc->sc_nfreetx) {
597 /*
598 * Not enough free descriptors to transmit this
599 * packet. We haven't committed to anything yet,
600 * so just unload the DMA map, put the packet
601 * back on the queue, and punt. Notify the upper
602 * layer that there are no more slots left.
603 *
604 * XXX We could allocate an mbuf and copy, but
605 * XXX it is worth it?
606 */
607 ifp->if_flags |= IFF_OACTIVE;
608 bus_dmamap_unload(sc->sc_dmat, dmamap);
609 if (m != NULL)
610 m_freem(m);
611 break;
612 }
613
614 IFQ_DEQUEUE(&ifp->if_snd, m0);
615 #if NBPFILTER > 0
616 /*
617 * Pass the packet to any BPF listeners.
618 */
619 if (ifp->if_bpf)
620 bpf_mtap(ifp->if_bpf, m0);
621 #endif /* NBPFILTER > 0 */
622 if (m != NULL) {
623 m_freem(m0);
624 m0 = m;
625 }
626
627 /*
628 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
629 */
630
631 SQ_TRACE(SQ_ENQUEUE, sc, sc->sc_nexttx, 0);
632
633 /* Sync the DMA map. */
634 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
635 BUS_DMASYNC_PREWRITE);
636
637 /*
638 * Initialize the transmit descriptors.
639 */
640 for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0;
641 seg < dmamap->dm_nsegs;
642 seg++, nexttx = SQ_NEXTTX(nexttx)) {
643 if (sc->hpc_regs->revision == 3) {
644 sc->sc_txdesc[nexttx].hpc3_hdd_bufptr =
645 dmamap->dm_segs[seg].ds_addr;
646 sc->sc_txdesc[nexttx].hpc3_hdd_ctl =
647 dmamap->dm_segs[seg].ds_len;
648 } else {
649 sc->sc_txdesc[nexttx].hpc1_hdd_bufptr =
650 dmamap->dm_segs[seg].ds_addr;
651 sc->sc_txdesc[nexttx].hpc1_hdd_ctl =
652 dmamap->dm_segs[seg].ds_len;
653 }
654 sc->sc_txdesc[nexttx].hdd_descptr=
655 SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx));
656 lasttx = nexttx;
657 totlen += dmamap->dm_segs[seg].ds_len;
658 }
659
660 /* Last descriptor gets end-of-packet */
661 KASSERT(lasttx != -1);
662 if (sc->hpc_regs->revision == 3)
663 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |=
664 HPC3_HDD_CTL_EOPACKET;
665 else
666 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |=
667 HPC1_HDD_CTL_EOPACKET;
668
669 SQ_DPRINTF(("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname,
670 sc->sc_nexttx, lasttx,
671 totlen));
672
673 if (ifp->if_flags & IFF_DEBUG) {
674 printf(" transmit chain:\n");
675 for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) {
676 printf(" descriptor %d:\n", seg);
677 printf(" hdd_bufptr: 0x%08x\n",
678 (sc->hpc_regs->revision == 3) ?
679 sc->sc_txdesc[seg].hpc3_hdd_bufptr :
680 sc->sc_txdesc[seg].hpc1_hdd_bufptr);
681 printf(" hdd_ctl: 0x%08x\n",
682 (sc->hpc_regs->revision == 3) ?
683 sc->sc_txdesc[seg].hpc3_hdd_ctl:
684 sc->sc_txdesc[seg].hpc1_hdd_ctl);
685 printf(" hdd_descptr: 0x%08x\n",
686 sc->sc_txdesc[seg].hdd_descptr);
687
688 if (seg == lasttx)
689 break;
690 }
691 }
692
693 /* Sync the descriptors we're using. */
694 SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs,
695 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
696
697 /* Store a pointer to the packet so we can free it later */
698 sc->sc_txmbuf[sc->sc_nexttx] = m0;
699
700 /* Advance the tx pointer. */
701 sc->sc_nfreetx -= dmamap->dm_nsegs;
702 sc->sc_nexttx = nexttx;
703 }
704
705 /* All transmit descriptors used up, let upper layers know */
706 if (sc->sc_nfreetx == 0)
707 ifp->if_flags |= IFF_OACTIVE;
708
709 if (sc->sc_nfreetx != ofree) {
710 SQ_DPRINTF(("%s: %d packets enqueued, first %d, INTR on %d\n",
711 sc->sc_dev.dv_xname, lasttx - firsttx + 1,
712 firsttx, lasttx));
713
714 /*
715 * Cause a transmit interrupt to happen on the
716 * last packet we enqueued, mark it as the last
717 * descriptor.
718 *
719 * HPC1_HDD_CTL_INTR will generate an interrupt on
720 * HPC1. HPC3 requires HPC3_HDD_CTL_EOPACKET in
721 * addition to HPC3_HDD_CTL_INTR to interrupt.
722 */
723 KASSERT(lasttx != -1);
724 if (sc->hpc_regs->revision == 3) {
725 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |=
726 HPC3_HDD_CTL_INTR | HPC3_HDD_CTL_EOCHAIN;
727 } else {
728 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |= HPC1_HDD_CTL_INTR;
729 sc->sc_txdesc[lasttx].hpc1_hdd_bufptr |=
730 HPC1_HDD_CTL_EOCHAIN;
731 }
732
733 SQ_CDTXSYNC(sc, lasttx, 1,
734 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
735
736 /*
737 * There is a potential race condition here if the HPC
738 * DMA channel is active and we try and either update
739 * the 'next descriptor' pointer in the HPC PIO space
740 * or the 'next descriptor' pointer in a previous desc-
741 * riptor.
742 *
743 * To avoid this, if the channel is active, we rely on
744 * the transmit interrupt routine noticing that there
745 * are more packets to send and restarting the HPC DMA
746 * engine, rather than mucking with the DMA state here.
747 */
748 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl);
749
750 if ((status & sc->hpc_regs->enetx_ctl_active) != 0) {
751 SQ_TRACE(SQ_ADD_TO_DMA, sc, firsttx, status);
752
753 /*
754 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and
755 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN
756 */
757 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc3_hdd_ctl &=
758 ~HPC3_HDD_CTL_EOCHAIN;
759
760 if (sc->hpc_regs->revision != 3)
761 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc1_hdd_ctl
762 &= ~HPC1_HDD_CTL_INTR;
763
764 SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx), 1,
765 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
766 } else if (sc->hpc_regs->revision == 3) {
767 SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
768
769 sq_hpc_write(sc, HPC3_ENETX_NDBP, SQ_CDTXADDR(sc,
770 firsttx));
771
772 /* Kick DMA channel into life */
773 sq_hpc_write(sc, HPC3_ENETX_CTL, HPC3_ENETX_CTL_ACTIVE);
774 } else {
775 /*
776 * In the HPC1 case where transmit DMA is
777 * inactive, we can either kick off if
778 * the ring was previously empty, or call
779 * our transmit interrupt handler to
780 * figure out if the ring stopped short
781 * and restart at the right place.
782 */
783 if (ofree == SQ_NTXDESC) {
784 SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
785
786 sq_hpc_write(sc, HPC1_ENETX_NDBP,
787 SQ_CDTXADDR(sc, firsttx));
788 sq_hpc_write(sc, HPC1_ENETX_CFXBP,
789 SQ_CDTXADDR(sc, firsttx));
790 sq_hpc_write(sc, HPC1_ENETX_CBP,
791 SQ_CDTXADDR(sc, firsttx));
792
793 /* Kick DMA channel into life */
794 sq_hpc_write(sc, HPC1_ENETX_CTL,
795 HPC1_ENETX_CTL_ACTIVE);
796 } else
797 sq_txring_hpc1(sc);
798 }
799
800 /* Set a watchdog timer in case the chip flakes out. */
801 ifp->if_timer = 5;
802 }
803 }
804
805 void
806 sq_stop(struct ifnet *ifp, int disable)
807 {
808 int i;
809 struct sq_softc *sc = ifp->if_softc;
810
811 for (i =0; i < SQ_NTXDESC; i++) {
812 if (sc->sc_txmbuf[i] != NULL) {
813 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
814 m_freem(sc->sc_txmbuf[i]);
815 sc->sc_txmbuf[i] = NULL;
816 }
817 }
818
819 /* Clear Seeq transmit/receive command registers */
820 sq_seeq_write(sc, SEEQ_TXCMD, 0);
821 sq_seeq_write(sc, SEEQ_RXCMD, 0);
822
823 sq_reset(sc);
824
825 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
826 ifp->if_timer = 0;
827 }
828
829 /* Device timeout/watchdog routine. */
830 void
831 sq_watchdog(struct ifnet *ifp)
832 {
833 u_int32_t status;
834 struct sq_softc *sc = ifp->if_softc;
835
836 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl);
837 log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, "
838 "status %08x)\n", sc->sc_dev.dv_xname, sc->sc_prevtx,
839 sc->sc_nexttx, sc->sc_nfreetx, status);
840
841 sq_trace_dump(sc);
842
843 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
844 sc->sq_trace_idx = 0;
845
846 ++ifp->if_oerrors;
847
848 sq_init(ifp);
849 }
850
851 static void
852 sq_trace_dump(struct sq_softc *sc)
853 {
854 int i;
855 const char *act;
856
857 for (i = 0; i < sc->sq_trace_idx; i++) {
858 switch (sc->sq_trace[i].action) {
859 case SQ_RESET: act = "SQ_RESET"; break;
860 case SQ_ADD_TO_DMA: act = "SQ_ADD_TO_DMA"; break;
861 case SQ_START_DMA: act = "SQ_START_DMA"; break;
862 case SQ_DONE_DMA: act = "SQ_DONE_DMA"; break;
863 case SQ_RESTART_DMA: act = "SQ_RESTART_DMA"; break;
864 case SQ_TXINTR_ENTER: act = "SQ_TXINTR_ENTER"; break;
865 case SQ_TXINTR_EXIT: act = "SQ_TXINTR_EXIT"; break;
866 case SQ_TXINTR_BUSY: act = "SQ_TXINTR_BUSY"; break;
867 case SQ_IOCTL: act = "SQ_IOCTL"; break;
868 case SQ_ENQUEUE: act = "SQ_ENQUEUE"; break;
869 default: act = "UNKNOWN";
870 }
871
872 printf("%s: [%03d] action %-16s buf %03d free %03d "
873 "status %08x line %d\n", sc->sc_dev.dv_xname, i, act,
874 sc->sq_trace[i].bufno, sc->sq_trace[i].freebuf,
875 sc->sq_trace[i].status, sc->sq_trace[i].line);
876 }
877 }
878
879 static int
880 sq_intr(void * arg)
881 {
882 struct sq_softc *sc = arg;
883 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
884 int handled = 0;
885 u_int32_t stat;
886
887 stat = sq_hpc_read(sc, sc->hpc_regs->enetr_reset);
888
889 if ((stat & 2) == 0)
890 SQ_DPRINTF(("%s: Unexpected interrupt!\n",
891 sc->sc_dev.dv_xname));
892 else
893 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, (stat | 2));
894
895 /*
896 * If the interface isn't running, the interrupt couldn't
897 * possibly have come from us.
898 */
899 if ((ifp->if_flags & IFF_RUNNING) == 0)
900 return 0;
901
902 sc->sq_intrcnt.ev_count++;
903
904 /* Always check for received packets */
905 if (sq_rxintr(sc) != 0)
906 handled++;
907
908 /* Only handle transmit interrupts if we actually sent something */
909 if (sc->sc_nfreetx < SQ_NTXDESC) {
910 sq_txintr(sc);
911 handled++;
912 }
913
914 #if NRND > 0
915 if (handled)
916 rnd_add_uint32(&sc->rnd_source, stat);
917 #endif
918 return (handled);
919 }
920
921 static int
922 sq_rxintr(struct sq_softc *sc)
923 {
924 int count = 0;
925 struct mbuf* m;
926 int i, framelen;
927 u_int8_t pktstat;
928 u_int32_t status;
929 u_int32_t ctl_reg;
930 int new_end, orig_end;
931 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
932
933 for (i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) {
934 SQ_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD |
935 BUS_DMASYNC_POSTWRITE);
936
937 /*
938 * If this is a CPU-owned buffer, we're at the end of the list.
939 */
940 if (sc->hpc_regs->revision == 3)
941 ctl_reg = sc->sc_rxdesc[i].hpc3_hdd_ctl &
942 HPC3_HDD_CTL_OWN;
943 else
944 ctl_reg = sc->sc_rxdesc[i].hpc1_hdd_ctl &
945 HPC1_HDD_CTL_OWN;
946
947 if (ctl_reg) {
948 #if defined(SQ_DEBUG)
949 u_int32_t reg;
950
951 reg = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl);
952 SQ_DPRINTF(("%s: rxintr: done at %d (ctl %08x)\n",
953 sc->sc_dev.dv_xname, i, reg));
954 #endif
955 break;
956 }
957
958 count++;
959
960 m = sc->sc_rxmbuf[i];
961 framelen = m->m_ext.ext_size - 3;
962 if (sc->hpc_regs->revision == 3)
963 framelen -=
964 HPC3_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc3_hdd_ctl);
965 else
966 framelen -=
967 HPC1_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc1_hdd_ctl);
968
969 /* Now sync the actual packet data */
970 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
971 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);
972
973 pktstat = *((u_int8_t*)m->m_data + framelen + 2);
974
975 if ((pktstat & RXSTAT_GOOD) == 0) {
976 ifp->if_ierrors++;
977
978 if (pktstat & RXSTAT_OFLOW)
979 printf("%s: receive FIFO overflow\n",
980 sc->sc_dev.dv_xname);
981
982 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
983 sc->sc_rxmap[i]->dm_mapsize,
984 BUS_DMASYNC_PREREAD);
985 SQ_INIT_RXDESC(sc, i);
986 SQ_DPRINTF(("%s: sq_rxintr: buf %d no RXSTAT_GOOD\n",
987 sc->sc_dev.dv_xname, i));
988 continue;
989 }
990
991 if (sq_add_rxbuf(sc, i) != 0) {
992 ifp->if_ierrors++;
993 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
994 sc->sc_rxmap[i]->dm_mapsize,
995 BUS_DMASYNC_PREREAD);
996 SQ_INIT_RXDESC(sc, i);
997 SQ_DPRINTF(("%s: sq_rxintr: buf %d sq_add_rxbuf() "
998 "failed\n", sc->sc_dev.dv_xname, i));
999 continue;
1000 }
1001
1002
1003 m->m_data += 2;
1004 m->m_pkthdr.rcvif = ifp;
1005 m->m_pkthdr.len = m->m_len = framelen;
1006
1007 ifp->if_ipackets++;
1008
1009 SQ_DPRINTF(("%s: sq_rxintr: buf %d len %d\n",
1010 sc->sc_dev.dv_xname, i, framelen));
1011
1012 #if NBPFILTER > 0
1013 if (ifp->if_bpf)
1014 bpf_mtap(ifp->if_bpf, m);
1015 #endif
1016 (*ifp->if_input)(ifp, m);
1017 }
1018
1019
1020 /* If anything happened, move ring start/end pointers to new spot */
1021 if (i != sc->sc_nextrx) {
1022 /*
1023 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and
1024 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN
1025 */
1026
1027 new_end = SQ_PREVRX(i);
1028 sc->sc_rxdesc[new_end].hpc3_hdd_ctl |= HPC3_HDD_CTL_EOCHAIN;
1029 SQ_CDRXSYNC(sc, new_end, BUS_DMASYNC_PREREAD |
1030 BUS_DMASYNC_PREWRITE);
1031
1032 orig_end = SQ_PREVRX(sc->sc_nextrx);
1033 sc->sc_rxdesc[orig_end].hpc3_hdd_ctl &= ~HPC3_HDD_CTL_EOCHAIN;
1034 SQ_CDRXSYNC(sc, orig_end, BUS_DMASYNC_PREREAD |
1035 BUS_DMASYNC_PREWRITE);
1036
1037 sc->sc_nextrx = i;
1038 }
1039
1040 status = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl);
1041
1042 /* If receive channel is stopped, restart it... */
1043 if ((status & sc->hpc_regs->enetr_ctl_active) == 0) {
1044 /* Pass the start of the receive ring to the HPC */
1045 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc,
1046 sc->sc_nextrx));
1047
1048 /* And turn on the HPC ethernet receive channel */
1049 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl,
1050 sc->hpc_regs->enetr_ctl_active);
1051 }
1052
1053 return count;
1054 }
1055
1056 static int
1057 sq_txintr(struct sq_softc *sc)
1058 {
1059 int shift = 0;
1060 u_int32_t status, tmp;
1061 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1062
1063 if (sc->hpc_regs->revision != 3)
1064 shift = 16;
1065
1066 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl) >> shift;
1067
1068 SQ_TRACE(SQ_TXINTR_ENTER, sc, sc->sc_prevtx, status);
1069
1070 tmp = (sc->hpc_regs->enetx_ctl_active >> shift) | TXSTAT_GOOD;
1071 if ((status & tmp) == 0) {
1072 if (status & TXSTAT_COLL)
1073 ifp->if_collisions++;
1074
1075 if (status & TXSTAT_UFLOW) {
1076 printf("%s: transmit underflow\n", sc->sc_dev.dv_xname);
1077 ifp->if_oerrors++;
1078 }
1079
1080 if (status & TXSTAT_16COLL) {
1081 printf("%s: max collisions reached\n",
1082 sc->sc_dev.dv_xname);
1083 ifp->if_oerrors++;
1084 ifp->if_collisions += 16;
1085 }
1086 }
1087
1088 /* prevtx now points to next xmit packet not yet finished */
1089 if (sc->hpc_regs->revision == 3)
1090 sq_txring_hpc3(sc);
1091 else
1092 sq_txring_hpc1(sc);
1093
1094 /* If we have buffers free, let upper layers know */
1095 if (sc->sc_nfreetx > 0)
1096 ifp->if_flags &= ~IFF_OACTIVE;
1097
1098 /* If all packets have left the coop, cancel watchdog */
1099 if (sc->sc_nfreetx == SQ_NTXDESC)
1100 ifp->if_timer = 0;
1101
1102 SQ_TRACE(SQ_TXINTR_EXIT, sc, sc->sc_prevtx, status);
1103 sq_start(ifp);
1104
1105 return 1;
1106 }
1107
1108 /*
1109 * Reclaim used transmit descriptors and restart the transmit DMA
1110 * engine if necessary.
1111 */
1112 static void
1113 sq_txring_hpc1(struct sq_softc *sc)
1114 {
1115 /*
1116 * HPC1 doesn't tag transmitted descriptors, however,
1117 * the NDBP register points to the next descriptor that
1118 * has not yet been processed. If DMA is not in progress,
1119 * we can safely reclaim all descriptors up to NDBP, and,
1120 * if necessary, restart DMA at NDBP. Otherwise, if DMA
1121 * is active, we can only safely reclaim up to CBP.
1122 *
1123 * For now, we'll only reclaim on inactive DMA and assume
1124 * that a sufficiently large ring keeps us out of trouble.
1125 */
1126 u_int32_t reclaimto, status;
1127 int reclaimall, i = sc->sc_prevtx;
1128 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1129
1130 status = sq_hpc_read(sc, HPC1_ENETX_CTL);
1131 if (status & HPC1_ENETX_CTL_ACTIVE) {
1132 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
1133 return;
1134 } else
1135 reclaimto = sq_hpc_read(sc, HPC1_ENETX_NDBP);
1136
1137 if (sc->sc_nfreetx == 0 && SQ_CDTXADDR(sc, i) == reclaimto)
1138 reclaimall = 1;
1139 else
1140 reclaimall = 0;
1141
1142 while (sc->sc_nfreetx < SQ_NTXDESC) {
1143 if (SQ_CDTXADDR(sc, i) == reclaimto && !reclaimall)
1144 break;
1145
1146 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
1147 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1148
1149 /* Sync the packet data, unload DMA map, free mbuf */
1150 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
1151 sc->sc_txmap[i]->dm_mapsize,
1152 BUS_DMASYNC_POSTWRITE);
1153 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
1154 m_freem(sc->sc_txmbuf[i]);
1155 sc->sc_txmbuf[i] = NULL;
1156
1157 ifp->if_opackets++;
1158 sc->sc_nfreetx++;
1159
1160 SQ_TRACE(SQ_DONE_DMA, sc, i, status);
1161
1162 i = SQ_NEXTTX(i);
1163 }
1164
1165 if (sc->sc_nfreetx < SQ_NTXDESC) {
1166 SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
1167
1168 KASSERT(reclaimto == SQ_CDTXADDR(sc, i));
1169
1170 sq_hpc_write(sc, HPC1_ENETX_CFXBP, reclaimto);
1171 sq_hpc_write(sc, HPC1_ENETX_CBP, reclaimto);
1172
1173 /* Kick DMA channel into life */
1174 sq_hpc_write(sc, HPC1_ENETX_CTL, HPC1_ENETX_CTL_ACTIVE);
1175
1176 /*
1177 * Set a watchdog timer in case the chip
1178 * flakes out.
1179 */
1180 ifp->if_timer = 5;
1181 }
1182
1183 sc->sc_prevtx = i;
1184 }
1185
1186 /*
1187 * Reclaim used transmit descriptors and restart the transmit DMA
1188 * engine if necessary.
1189 */
1190 static void
1191 sq_txring_hpc3(struct sq_softc *sc)
1192 {
1193 /*
1194 * HPC3 tags descriptors with a bit once they've been
1195 * transmitted. We need only free each XMITDONE'd
1196 * descriptor, and restart the DMA engine if any
1197 * descriptors are left over.
1198 */
1199 int i;
1200 u_int32_t status = 0;
1201 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1202
1203 i = sc->sc_prevtx;
1204 while (sc->sc_nfreetx < SQ_NTXDESC) {
1205 /*
1206 * Check status first so we don't end up with a case of
1207 * the buffer not being finished while the DMA channel
1208 * has gone idle.
1209 */
1210 status = sq_hpc_read(sc, HPC3_ENETX_CTL);
1211
1212 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
1213 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1214
1215 /* Check for used descriptor and restart DMA chain if needed */
1216 if (!(sc->sc_txdesc[i].hpc3_hdd_ctl & HPC3_HDD_CTL_XMITDONE)) {
1217 if ((status & HPC3_ENETX_CTL_ACTIVE) == 0) {
1218 SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
1219
1220 sq_hpc_write(sc, HPC3_ENETX_NDBP,
1221 SQ_CDTXADDR(sc, i));
1222
1223 /* Kick DMA channel into life */
1224 sq_hpc_write(sc, HPC3_ENETX_CTL,
1225 HPC3_ENETX_CTL_ACTIVE);
1226
1227 /*
1228 * Set a watchdog timer in case the chip
1229 * flakes out.
1230 */
1231 ifp->if_timer = 5;
1232 } else
1233 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
1234 break;
1235 }
1236
1237 /* Sync the packet data, unload DMA map, free mbuf */
1238 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
1239 sc->sc_txmap[i]->dm_mapsize,
1240 BUS_DMASYNC_POSTWRITE);
1241 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
1242 m_freem(sc->sc_txmbuf[i]);
1243 sc->sc_txmbuf[i] = NULL;
1244
1245 ifp->if_opackets++;
1246 sc->sc_nfreetx++;
1247
1248 SQ_TRACE(SQ_DONE_DMA, sc, i, status);
1249 i = SQ_NEXTTX(i);
1250 }
1251
1252 sc->sc_prevtx = i;
1253 }
1254
1255 void
1256 sq_reset(struct sq_softc *sc)
1257 {
1258 /* Stop HPC dma channels */
1259 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl, 0);
1260 sq_hpc_write(sc, sc->hpc_regs->enetx_ctl, 0);
1261
1262 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 3);
1263 delay(20);
1264 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 0);
1265 }
1266
1267 /* sq_add_rxbuf: Add a receive buffer to the indicated descriptor. */
1268 int
1269 sq_add_rxbuf(struct sq_softc *sc, int idx)
1270 {
1271 int err;
1272 struct mbuf *m;
1273
1274 MGETHDR(m, M_DONTWAIT, MT_DATA);
1275 if (m == NULL)
1276 return (ENOBUFS);
1277
1278 MCLGET(m, M_DONTWAIT);
1279 if ((m->m_flags & M_EXT) == 0) {
1280 m_freem(m);
1281 return (ENOBUFS);
1282 }
1283
1284 if (sc->sc_rxmbuf[idx] != NULL)
1285 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]);
1286
1287 sc->sc_rxmbuf[idx] = m;
1288
1289 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx],
1290 m->m_ext.ext_buf, m->m_ext.ext_size,
1291 NULL, BUS_DMA_NOWAIT)) != 0) {
1292 printf("%s: can't load rx DMA map %d, error = %d\n",
1293 sc->sc_dev.dv_xname, idx, err);
1294 panic("sq_add_rxbuf"); /* XXX */
1295 }
1296
1297 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx], 0,
1298 sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD);
1299
1300 SQ_INIT_RXDESC(sc, idx);
1301
1302 return 0;
1303 }
1304
1305 void
1306 sq_dump_buffer(u_int32_t addr, u_int32_t len)
1307 {
1308 u_int i;
1309 u_char* physaddr = (char*) MIPS_PHYS_TO_KSEG1((caddr_t)addr);
1310
1311 if (len == 0)
1312 return;
1313
1314 printf("%p: ", physaddr);
1315
1316 for (i = 0; i < len; i++) {
1317 printf("%02x ", *(physaddr + i) & 0xff);
1318 if ((i % 16) == 15 && i != len - 1)
1319 printf("\n%p: ", physaddr + i);
1320 }
1321
1322 printf("\n");
1323 }
1324
1325 void
1326 enaddr_aton(const char* str, u_int8_t* eaddr)
1327 {
1328 int i;
1329 char c;
1330
1331 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1332 if (*str == ':')
1333 str++;
1334
1335 c = *str++;
1336 if (isdigit(c)) {
1337 eaddr[i] = (c - '0');
1338 } else if (isxdigit(c)) {
1339 eaddr[i] = (toupper(c) + 10 - 'A');
1340 }
1341
1342 c = *str++;
1343 if (isdigit(c)) {
1344 eaddr[i] = (eaddr[i] << 4) | (c - '0');
1345 } else if (isxdigit(c)) {
1346 eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A');
1347 }
1348 }
1349 }
1350