if_sq.c revision 1.36 1 /* $NetBSD: if_sq.c,v 1.36 2010/04/05 07:19:31 joerg Exp $ */
2
3 /*
4 * Copyright (c) 2001 Rafal K. Boni
5 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * Portions of this code are derived from software contributed to The
9 * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: if_sq.c,v 1.36 2010/04/05 07:19:31 joerg Exp $");
37
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/device.h>
42 #include <sys/callout.h>
43 #include <sys/mbuf.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 #include <sys/socket.h>
47 #include <sys/ioctl.h>
48 #include <sys/errno.h>
49 #include <sys/syslog.h>
50
51 #include <uvm/uvm_extern.h>
52
53 #include <machine/endian.h>
54
55 #include <net/if.h>
56 #include <net/if_dl.h>
57 #include <net/if_media.h>
58 #include <net/if_ether.h>
59
60 #include <net/bpf.h>
61
62 #include <machine/bus.h>
63 #include <machine/intr.h>
64 #include <machine/sysconf.h>
65
66 #include <dev/ic/seeq8003reg.h>
67
68 #include <sgimips/hpc/sqvar.h>
69 #include <sgimips/hpc/hpcvar.h>
70 #include <sgimips/hpc/hpcreg.h>
71
72 #include <dev/arcbios/arcbios.h>
73 #include <dev/arcbios/arcbiosvar.h>
74
75 #define static
76
77 /*
78 * Short TODO list:
79 * (1) Do counters for bad-RX packets.
80 * (2) Allow multi-segment transmits, instead of copying to a single,
81 * contiguous mbuf.
82 * (3) Verify sq_stop() turns off enough stuff; I was still getting
83 * seeq interrupts after sq_stop().
84 * (4) Implement EDLC modes: especially packet auto-pad and simplex
85 * mode.
86 * (5) Should the driver filter out its own transmissions in non-EDLC
87 * mode?
88 * (6) Multicast support -- multicast filter, address management, ...
89 * (7) Deal with RB0 (recv buffer overflow) on reception. Will need
90 * to figure out if RB0 is read-only as stated in one spot in the
91 * HPC spec or read-write (ie, is the 'write a one to clear it')
92 * the correct thing?
93 */
94
95 #if defined(SQ_DEBUG)
96 int sq_debug = 0;
97 #define SQ_DPRINTF(x) if (sq_debug) printf x
98 #else
99 #define SQ_DPRINTF(x)
100 #endif
101
102 static int sq_match(struct device *, struct cfdata *, void *);
103 static void sq_attach(struct device *, struct device *, void *);
104 static int sq_init(struct ifnet *);
105 static void sq_start(struct ifnet *);
106 static void sq_stop(struct ifnet *, int);
107 static void sq_watchdog(struct ifnet *);
108 static int sq_ioctl(struct ifnet *, u_long, void *);
109
110 static void sq_set_filter(struct sq_softc *);
111 static int sq_intr(void *);
112 static int sq_rxintr(struct sq_softc *);
113 static int sq_txintr(struct sq_softc *);
114 static void sq_txring_hpc1(struct sq_softc *);
115 static void sq_txring_hpc3(struct sq_softc *);
116 static void sq_reset(struct sq_softc *);
117 static int sq_add_rxbuf(struct sq_softc *, int);
118 static void sq_dump_buffer(paddr_t addr, psize_t len);
119 static void sq_trace_dump(struct sq_softc *);
120
121 static void enaddr_aton(const char*, u_int8_t*);
122
123 CFATTACH_DECL(sq, sizeof(struct sq_softc),
124 sq_match, sq_attach, NULL, NULL);
125
126 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
127
128 #define sq_seeq_read(sc, off) \
129 bus_space_read_1(sc->sc_regt, sc->sc_regh, off)
130 #define sq_seeq_write(sc, off, val) \
131 bus_space_write_1(sc->sc_regt, sc->sc_regh, off, val)
132
133 #define sq_hpc_read(sc, off) \
134 bus_space_read_4(sc->sc_hpct, sc->sc_hpch, off)
135 #define sq_hpc_write(sc, off, val) \
136 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, off, val)
137
138 /* MAC address offset for non-onboard implementations */
139 #define SQ_HPC_EEPROM_ENADDR 250
140
141 #define SGI_OUI_0 0x08
142 #define SGI_OUI_1 0x00
143 #define SGI_OUI_2 0x69
144
145 static int
146 sq_match(struct device *parent, struct cfdata *cf, void *aux)
147 {
148 struct hpc_attach_args *ha = aux;
149
150 if (strcmp(ha->ha_name, cf->cf_name) == 0) {
151 vaddr_t reset, txstat;
152
153 reset = MIPS_PHYS_TO_KSEG1(ha->ha_sh +
154 ha->ha_dmaoff + ha->hpc_regs->enetr_reset);
155 txstat = MIPS_PHYS_TO_KSEG1(ha->ha_sh +
156 ha->ha_devoff + (SEEQ_TXSTAT << 2));
157
158 if (platform.badaddr((void *)reset, sizeof(reset)))
159 return (0);
160
161 *(volatile uint32_t *)reset = 0x1;
162 delay(20);
163 *(volatile uint32_t *)reset = 0x0;
164
165 if (platform.badaddr((void *)txstat, sizeof(txstat)))
166 return (0);
167
168 if ((*(volatile uint32_t *)txstat & 0xff) == TXSTAT_OLDNEW)
169 return (1);
170 }
171
172 return (0);
173 }
174
175 static void
176 sq_attach(struct device *parent, struct device *self, void *aux)
177 {
178 int i, err;
179 const char* macaddr;
180 struct sq_softc *sc = (void *)self;
181 struct hpc_attach_args *haa = aux;
182 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
183
184 sc->sc_hpct = haa->ha_st;
185 sc->hpc_regs = haa->hpc_regs; /* HPC register definitions */
186
187 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
188 haa->ha_dmaoff,
189 sc->hpc_regs->enet_regs_size,
190 &sc->sc_hpch)) != 0) {
191 printf(": unable to map HPC DMA registers, error = %d\n", err);
192 goto fail_0;
193 }
194
195 sc->sc_regt = haa->ha_st;
196 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
197 haa->ha_devoff,
198 sc->hpc_regs->enet_devregs_size,
199 &sc->sc_regh)) != 0) {
200 printf(": unable to map Seeq registers, error = %d\n", err);
201 goto fail_0;
202 }
203
204 sc->sc_dmat = haa->ha_dmat;
205
206 if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control),
207 PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg,
208 1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) {
209 printf(": unable to allocate control data, error = %d\n", err);
210 goto fail_0;
211 }
212
213 if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg,
214 sizeof(struct sq_control),
215 (void **)&sc->sc_control,
216 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
217 printf(": unable to map control data, error = %d\n", err);
218 goto fail_1;
219 }
220
221 if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control),
222 1, sizeof(struct sq_control), PAGE_SIZE,
223 BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) {
224 printf(": unable to create DMA map for control data, error "
225 "= %d\n", err);
226 goto fail_2;
227 }
228
229 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control,
230 sizeof(struct sq_control),
231 NULL, BUS_DMA_NOWAIT)) != 0) {
232 printf(": unable to load DMA map for control data, error "
233 "= %d\n", err);
234 goto fail_3;
235 }
236
237 memset(sc->sc_control, 0, sizeof(struct sq_control));
238
239 /* Create transmit buffer DMA maps */
240 for (i = 0; i < SQ_NTXDESC; i++) {
241 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
242 0, BUS_DMA_NOWAIT,
243 &sc->sc_txmap[i])) != 0) {
244 printf(": unable to create tx DMA map %d, error = %d\n",
245 i, err);
246 goto fail_4;
247 }
248 }
249
250 /* Create receive buffer DMA maps */
251 for (i = 0; i < SQ_NRXDESC; i++) {
252 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
253 0, BUS_DMA_NOWAIT,
254 &sc->sc_rxmap[i])) != 0) {
255 printf(": unable to create rx DMA map %d, error = %d\n",
256 i, err);
257 goto fail_5;
258 }
259 }
260
261 /* Pre-allocate the receive buffers. */
262 for (i = 0; i < SQ_NRXDESC; i++) {
263 if ((err = sq_add_rxbuf(sc, i)) != 0) {
264 printf(": unable to allocate or map rx buffer %d\n,"
265 " error = %d\n", i, err);
266 goto fail_6;
267 }
268 }
269
270 memcpy(sc->sc_enaddr, &haa->hpc_eeprom[SQ_HPC_EEPROM_ENADDR],
271 ETHER_ADDR_LEN);
272
273 /*
274 * If our mac address is bogus, obtain it from ARCBIOS. This will
275 * be true of the onboard HPC3 on IP22, since there is no eeprom,
276 * but rather the DS1386 RTC's battery-backed ram is used.
277 */
278 if (sc->sc_enaddr[0] != SGI_OUI_0 || sc->sc_enaddr[1] != SGI_OUI_1 ||
279 sc->sc_enaddr[2] != SGI_OUI_2) {
280 macaddr = ARCBIOS->GetEnvironmentVariable("eaddr");
281 if (macaddr == NULL) {
282 printf(": unable to get MAC address!\n");
283 goto fail_6;
284 }
285 enaddr_aton(macaddr, sc->sc_enaddr);
286 }
287
288 evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL,
289 self->dv_xname, "intr");
290
291 if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) {
292 printf(": unable to establish interrupt!\n");
293 goto fail_6;
294 }
295
296 /* Reset the chip to a known state. */
297 sq_reset(sc);
298
299 /*
300 * Determine if we're an 8003 or 80c03 by setting the first
301 * MAC address register to non-zero, and then reading it back.
302 * If it's zero, we have an 80c03, because we will have read
303 * the TxCollLSB register.
304 */
305 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0xa5);
306 if (sq_seeq_read(sc, SEEQ_TXCOLLS0) == 0)
307 sc->sc_type = SQ_TYPE_80C03;
308 else
309 sc->sc_type = SQ_TYPE_8003;
310 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0x00);
311
312 printf(": SGI Seeq %s\n",
313 sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003");
314
315 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
316 ether_sprintf(sc->sc_enaddr));
317
318 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
319 ifp->if_softc = sc;
320 ifp->if_mtu = ETHERMTU;
321 ifp->if_init = sq_init;
322 ifp->if_stop = sq_stop;
323 ifp->if_start = sq_start;
324 ifp->if_ioctl = sq_ioctl;
325 ifp->if_watchdog = sq_watchdog;
326 ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST;
327 IFQ_SET_READY(&ifp->if_snd);
328
329 if_attach(ifp);
330 ether_ifattach(ifp, sc->sc_enaddr);
331
332 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
333 /* Done! */
334 return;
335
336 /*
337 * Free any resources we've allocated during the failed attach
338 * attempt. Do this in reverse order and fall through.
339 */
340 fail_6:
341 for (i = 0; i < SQ_NRXDESC; i++) {
342 if (sc->sc_rxmbuf[i] != NULL) {
343 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]);
344 m_freem(sc->sc_rxmbuf[i]);
345 }
346 }
347 fail_5:
348 for (i = 0; i < SQ_NRXDESC; i++) {
349 if (sc->sc_rxmap[i] != NULL)
350 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]);
351 }
352 fail_4:
353 for (i = 0; i < SQ_NTXDESC; i++) {
354 if (sc->sc_txmap[i] != NULL)
355 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]);
356 }
357 bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap);
358 fail_3:
359 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap);
360 fail_2:
361 bus_dmamem_unmap(sc->sc_dmat, (void *) sc->sc_control,
362 sizeof(struct sq_control));
363 fail_1:
364 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg);
365 fail_0:
366 return;
367 }
368
369 /* Set up data to get the interface up and running. */
370 int
371 sq_init(struct ifnet *ifp)
372 {
373 int i;
374 struct sq_softc *sc = ifp->if_softc;
375
376 /* Cancel any in-progress I/O */
377 sq_stop(ifp, 0);
378
379 sc->sc_nextrx = 0;
380
381 sc->sc_nfreetx = SQ_NTXDESC;
382 sc->sc_nexttx = sc->sc_prevtx = 0;
383
384 SQ_TRACE(SQ_RESET, sc, 0, 0);
385
386 /* Set into 8003 mode, bank 0 to program ethernet address */
387 sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_BANK0);
388
389 /* Now write the address */
390 for (i = 0; i < ETHER_ADDR_LEN; i++)
391 sq_seeq_write(sc, i, sc->sc_enaddr[i]);
392
393 sc->sc_rxcmd = RXCMD_IE_CRC |
394 RXCMD_IE_DRIB |
395 RXCMD_IE_SHORT |
396 RXCMD_IE_END |
397 RXCMD_IE_GOOD;
398
399 /*
400 * Set the receive filter -- this will add some bits to the
401 * prototype RXCMD register. Do this before setting the
402 * transmit config register, since we might need to switch
403 * banks.
404 */
405 sq_set_filter(sc);
406
407 /* Set up Seeq transmit command register */
408 sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_IE_UFLOW |
409 TXCMD_IE_COLL |
410 TXCMD_IE_16COLL |
411 TXCMD_IE_GOOD);
412
413 /* Now write the receive command register. */
414 sq_seeq_write(sc, SEEQ_RXCMD, sc->sc_rxcmd);
415
416 /*
417 * Set up HPC ethernet PIO and DMA configurations.
418 *
419 * The PROM appears to do most of this for the onboard HPC3, but
420 * not for the Challenge S's IOPLUS chip. We copy how the onboard
421 * chip is configured and assume that it's correct for both.
422 */
423 if (sc->hpc_regs->revision == 3) {
424 u_int32_t dmareg, pioreg;
425
426 pioreg = HPC3_ENETR_PIOCFG_P1(1) |
427 HPC3_ENETR_PIOCFG_P2(6) |
428 HPC3_ENETR_PIOCFG_P3(1);
429
430 dmareg = HPC3_ENETR_DMACFG_D1(6) |
431 HPC3_ENETR_DMACFG_D2(2) |
432 HPC3_ENETR_DMACFG_D3(0) |
433 HPC3_ENETR_DMACFG_FIX_RXDC |
434 HPC3_ENETR_DMACFG_FIX_INTR |
435 HPC3_ENETR_DMACFG_FIX_EOP |
436 HPC3_ENETR_DMACFG_TIMEOUT;
437
438 sq_hpc_write(sc, HPC3_ENETR_PIOCFG, pioreg);
439 sq_hpc_write(sc, HPC3_ENETR_DMACFG, dmareg);
440 }
441
442 /* Pass the start of the receive ring to the HPC */
443 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc, 0));
444
445 /* And turn on the HPC ethernet receive channel */
446 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl,
447 sc->hpc_regs->enetr_ctl_active);
448
449 /*
450 * Turn off delayed receive interrupts on HPC1.
451 * (see Hollywood HPC Specification 2.1.4.3)
452 */
453 if (sc->hpc_regs->revision != 3)
454 sq_hpc_write(sc, HPC1_ENET_INTDELAY, HPC1_ENET_INTDELAY_OFF);
455
456 ifp->if_flags |= IFF_RUNNING;
457 ifp->if_flags &= ~IFF_OACTIVE;
458
459 return 0;
460 }
461
462 static void
463 sq_set_filter(struct sq_softc *sc)
464 {
465 struct ethercom *ec = &sc->sc_ethercom;
466 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
467 struct ether_multi *enm;
468 struct ether_multistep step;
469
470 /*
471 * Check for promiscuous mode. Also implies
472 * all-multicast.
473 */
474 if (ifp->if_flags & IFF_PROMISC) {
475 sc->sc_rxcmd |= RXCMD_REC_ALL;
476 ifp->if_flags |= IFF_ALLMULTI;
477 return;
478 }
479
480 /*
481 * The 8003 has no hash table. If we have any multicast
482 * addresses on the list, enable reception of all multicast
483 * frames.
484 *
485 * XXX The 80c03 has a hash table. We should use it.
486 */
487
488 ETHER_FIRST_MULTI(step, ec, enm);
489
490 if (enm == NULL) {
491 sc->sc_rxcmd &= ~RXCMD_REC_MASK;
492 sc->sc_rxcmd |= RXCMD_REC_BROAD;
493
494 ifp->if_flags &= ~IFF_ALLMULTI;
495 return;
496 }
497
498 sc->sc_rxcmd |= RXCMD_REC_MULTI;
499 ifp->if_flags |= IFF_ALLMULTI;
500 }
501
502 int
503 sq_ioctl(struct ifnet *ifp, u_long cmd, void *data)
504 {
505 int s, error = 0;
506
507 SQ_TRACE(SQ_IOCTL, (struct sq_softc *)ifp->if_softc, 0, 0);
508
509 s = splnet();
510
511 error = ether_ioctl(ifp, cmd, data);
512 if (error == ENETRESET) {
513 /*
514 * Multicast list has changed; set the hardware filter
515 * accordingly.
516 */
517 if (ifp->if_flags & IFF_RUNNING)
518 error = sq_init(ifp);
519 else
520 error = 0;
521 }
522
523 splx(s);
524 return (error);
525 }
526
527 void
528 sq_start(struct ifnet *ifp)
529 {
530 struct sq_softc *sc = ifp->if_softc;
531 u_int32_t status;
532 struct mbuf *m0, *m;
533 bus_dmamap_t dmamap;
534 int err, totlen, nexttx, firsttx, lasttx = -1, ofree, seg;
535
536 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
537 return;
538
539 /*
540 * Remember the previous number of free descriptors and
541 * the first descriptor we'll use.
542 */
543 ofree = sc->sc_nfreetx;
544 firsttx = sc->sc_nexttx;
545
546 /*
547 * Loop through the send queue, setting up transmit descriptors
548 * until we drain the queue, or use up all available transmit
549 * descriptors.
550 */
551 while (sc->sc_nfreetx != 0) {
552 /*
553 * Grab a packet off the queue.
554 */
555 IFQ_POLL(&ifp->if_snd, m0);
556 if (m0 == NULL)
557 break;
558 m = NULL;
559
560 dmamap = sc->sc_txmap[sc->sc_nexttx];
561
562 /*
563 * Load the DMA map. If this fails, the packet either
564 * didn't fit in the alloted number of segments, or we were
565 * short on resources. In this case, we'll copy and try
566 * again.
567 * Also copy it if we need to pad, so that we are sure there
568 * is room for the pad buffer.
569 * XXX the right way of doing this is to use a static buffer
570 * for padding and adding it to the transmit descriptor (see
571 * sys/dev/pci/if_tl.c for example). We can't do this here yet
572 * because we can't send packets with more than one fragment.
573 */
574 if (m0->m_pkthdr.len < ETHER_PAD_LEN ||
575 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
576 BUS_DMA_NOWAIT) != 0) {
577 MGETHDR(m, M_DONTWAIT, MT_DATA);
578 if (m == NULL) {
579 printf("%s: unable to allocate Tx mbuf\n",
580 sc->sc_dev.dv_xname);
581 break;
582 }
583 if (m0->m_pkthdr.len > MHLEN) {
584 MCLGET(m, M_DONTWAIT);
585 if ((m->m_flags & M_EXT) == 0) {
586 printf("%s: unable to allocate Tx "
587 "cluster\n", sc->sc_dev.dv_xname);
588 m_freem(m);
589 break;
590 }
591 }
592
593 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
594 if (m0->m_pkthdr.len < ETHER_PAD_LEN) {
595 memset(mtod(m, char *) + m0->m_pkthdr.len, 0,
596 ETHER_PAD_LEN - m0->m_pkthdr.len);
597 m->m_pkthdr.len = m->m_len = ETHER_PAD_LEN;
598 } else
599 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
600
601 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
602 m, BUS_DMA_NOWAIT)) != 0) {
603 printf("%s: unable to load Tx buffer, "
604 "error = %d\n", sc->sc_dev.dv_xname, err);
605 break;
606 }
607 }
608
609 /*
610 * Ensure we have enough descriptors free to describe
611 * the packet.
612 */
613 if (dmamap->dm_nsegs > sc->sc_nfreetx) {
614 /*
615 * Not enough free descriptors to transmit this
616 * packet. We haven't committed to anything yet,
617 * so just unload the DMA map, put the packet
618 * back on the queue, and punt. Notify the upper
619 * layer that there are no more slots left.
620 *
621 * XXX We could allocate an mbuf and copy, but
622 * XXX it is worth it?
623 */
624 ifp->if_flags |= IFF_OACTIVE;
625 bus_dmamap_unload(sc->sc_dmat, dmamap);
626 if (m != NULL)
627 m_freem(m);
628 break;
629 }
630
631 IFQ_DEQUEUE(&ifp->if_snd, m0);
632 /*
633 * Pass the packet to any BPF listeners.
634 */
635 bpf_mtap(ifp, m0);
636 if (m != NULL) {
637 m_freem(m0);
638 m0 = m;
639 }
640
641 /*
642 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
643 */
644
645 SQ_TRACE(SQ_ENQUEUE, sc, sc->sc_nexttx, 0);
646
647 /* Sync the DMA map. */
648 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
649 BUS_DMASYNC_PREWRITE);
650
651 /*
652 * Initialize the transmit descriptors.
653 */
654 for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0;
655 seg < dmamap->dm_nsegs;
656 seg++, nexttx = SQ_NEXTTX(nexttx)) {
657 if (sc->hpc_regs->revision == 3) {
658 sc->sc_txdesc[nexttx].hpc3_hdd_bufptr =
659 dmamap->dm_segs[seg].ds_addr;
660 sc->sc_txdesc[nexttx].hpc3_hdd_ctl =
661 dmamap->dm_segs[seg].ds_len;
662 } else {
663 sc->sc_txdesc[nexttx].hpc1_hdd_bufptr =
664 dmamap->dm_segs[seg].ds_addr;
665 sc->sc_txdesc[nexttx].hpc1_hdd_ctl =
666 dmamap->dm_segs[seg].ds_len;
667 }
668 sc->sc_txdesc[nexttx].hdd_descptr=
669 SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx));
670 lasttx = nexttx;
671 totlen += dmamap->dm_segs[seg].ds_len;
672 }
673
674 /* Last descriptor gets end-of-packet */
675 KASSERT(lasttx != -1);
676 if (sc->hpc_regs->revision == 3)
677 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |=
678 HPC3_HDD_CTL_EOPACKET;
679 else
680 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |=
681 HPC1_HDD_CTL_EOPACKET;
682
683 SQ_DPRINTF(("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname,
684 sc->sc_nexttx, lasttx,
685 totlen));
686
687 if (ifp->if_flags & IFF_DEBUG) {
688 printf(" transmit chain:\n");
689 for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) {
690 printf(" descriptor %d:\n", seg);
691 printf(" hdd_bufptr: 0x%08x\n",
692 (sc->hpc_regs->revision == 3) ?
693 sc->sc_txdesc[seg].hpc3_hdd_bufptr :
694 sc->sc_txdesc[seg].hpc1_hdd_bufptr);
695 printf(" hdd_ctl: 0x%08x\n",
696 (sc->hpc_regs->revision == 3) ?
697 sc->sc_txdesc[seg].hpc3_hdd_ctl:
698 sc->sc_txdesc[seg].hpc1_hdd_ctl);
699 printf(" hdd_descptr: 0x%08x\n",
700 sc->sc_txdesc[seg].hdd_descptr);
701
702 if (seg == lasttx)
703 break;
704 }
705 }
706
707 /* Sync the descriptors we're using. */
708 SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs,
709 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
710
711 /* Store a pointer to the packet so we can free it later */
712 sc->sc_txmbuf[sc->sc_nexttx] = m0;
713
714 /* Advance the tx pointer. */
715 sc->sc_nfreetx -= dmamap->dm_nsegs;
716 sc->sc_nexttx = nexttx;
717 }
718
719 /* All transmit descriptors used up, let upper layers know */
720 if (sc->sc_nfreetx == 0)
721 ifp->if_flags |= IFF_OACTIVE;
722
723 if (sc->sc_nfreetx != ofree) {
724 SQ_DPRINTF(("%s: %d packets enqueued, first %d, INTR on %d\n",
725 sc->sc_dev.dv_xname, lasttx - firsttx + 1,
726 firsttx, lasttx));
727
728 /*
729 * Cause a transmit interrupt to happen on the
730 * last packet we enqueued, mark it as the last
731 * descriptor.
732 *
733 * HPC1_HDD_CTL_INTR will generate an interrupt on
734 * HPC1. HPC3 requires HPC3_HDD_CTL_EOPACKET in
735 * addition to HPC3_HDD_CTL_INTR to interrupt.
736 */
737 KASSERT(lasttx != -1);
738 if (sc->hpc_regs->revision == 3) {
739 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |=
740 HPC3_HDD_CTL_INTR | HPC3_HDD_CTL_EOCHAIN;
741 } else {
742 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |= HPC1_HDD_CTL_INTR;
743 sc->sc_txdesc[lasttx].hpc1_hdd_bufptr |=
744 HPC1_HDD_CTL_EOCHAIN;
745 }
746
747 SQ_CDTXSYNC(sc, lasttx, 1,
748 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
749
750 /*
751 * There is a potential race condition here if the HPC
752 * DMA channel is active and we try and either update
753 * the 'next descriptor' pointer in the HPC PIO space
754 * or the 'next descriptor' pointer in a previous desc-
755 * riptor.
756 *
757 * To avoid this, if the channel is active, we rely on
758 * the transmit interrupt routine noticing that there
759 * are more packets to send and restarting the HPC DMA
760 * engine, rather than mucking with the DMA state here.
761 */
762 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl);
763
764 if ((status & sc->hpc_regs->enetx_ctl_active) != 0) {
765 SQ_TRACE(SQ_ADD_TO_DMA, sc, firsttx, status);
766
767 /*
768 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and
769 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN
770 */
771 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc3_hdd_ctl &=
772 ~HPC3_HDD_CTL_EOCHAIN;
773
774 if (sc->hpc_regs->revision != 3)
775 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc1_hdd_ctl
776 &= ~HPC1_HDD_CTL_INTR;
777
778 SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx), 1,
779 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
780 } else if (sc->hpc_regs->revision == 3) {
781 SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
782
783 sq_hpc_write(sc, HPC3_ENETX_NDBP, SQ_CDTXADDR(sc,
784 firsttx));
785
786 /* Kick DMA channel into life */
787 sq_hpc_write(sc, HPC3_ENETX_CTL, HPC3_ENETX_CTL_ACTIVE);
788 } else {
789 /*
790 * In the HPC1 case where transmit DMA is
791 * inactive, we can either kick off if
792 * the ring was previously empty, or call
793 * our transmit interrupt handler to
794 * figure out if the ring stopped short
795 * and restart at the right place.
796 */
797 if (ofree == SQ_NTXDESC) {
798 SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
799
800 sq_hpc_write(sc, HPC1_ENETX_NDBP,
801 SQ_CDTXADDR(sc, firsttx));
802 sq_hpc_write(sc, HPC1_ENETX_CFXBP,
803 SQ_CDTXADDR(sc, firsttx));
804 sq_hpc_write(sc, HPC1_ENETX_CBP,
805 SQ_CDTXADDR(sc, firsttx));
806
807 /* Kick DMA channel into life */
808 sq_hpc_write(sc, HPC1_ENETX_CTL,
809 HPC1_ENETX_CTL_ACTIVE);
810 } else
811 sq_txring_hpc1(sc);
812 }
813
814 /* Set a watchdog timer in case the chip flakes out. */
815 ifp->if_timer = 5;
816 }
817 }
818
819 void
820 sq_stop(struct ifnet *ifp, int disable)
821 {
822 int i;
823 struct sq_softc *sc = ifp->if_softc;
824
825 for (i =0; i < SQ_NTXDESC; i++) {
826 if (sc->sc_txmbuf[i] != NULL) {
827 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
828 m_freem(sc->sc_txmbuf[i]);
829 sc->sc_txmbuf[i] = NULL;
830 }
831 }
832
833 /* Clear Seeq transmit/receive command registers */
834 sq_seeq_write(sc, SEEQ_TXCMD, 0);
835 sq_seeq_write(sc, SEEQ_RXCMD, 0);
836
837 sq_reset(sc);
838
839 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
840 ifp->if_timer = 0;
841 }
842
843 /* Device timeout/watchdog routine. */
844 void
845 sq_watchdog(struct ifnet *ifp)
846 {
847 u_int32_t status;
848 struct sq_softc *sc = ifp->if_softc;
849
850 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl);
851 log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, "
852 "status %08x)\n", sc->sc_dev.dv_xname, sc->sc_prevtx,
853 sc->sc_nexttx, sc->sc_nfreetx, status);
854
855 sq_trace_dump(sc);
856
857 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
858 sc->sq_trace_idx = 0;
859
860 ++ifp->if_oerrors;
861
862 sq_init(ifp);
863 }
864
865 static void
866 sq_trace_dump(struct sq_softc *sc)
867 {
868 int i;
869 const char *act;
870
871 for (i = 0; i < sc->sq_trace_idx; i++) {
872 switch (sc->sq_trace[i].action) {
873 case SQ_RESET: act = "SQ_RESET"; break;
874 case SQ_ADD_TO_DMA: act = "SQ_ADD_TO_DMA"; break;
875 case SQ_START_DMA: act = "SQ_START_DMA"; break;
876 case SQ_DONE_DMA: act = "SQ_DONE_DMA"; break;
877 case SQ_RESTART_DMA: act = "SQ_RESTART_DMA"; break;
878 case SQ_TXINTR_ENTER: act = "SQ_TXINTR_ENTER"; break;
879 case SQ_TXINTR_EXIT: act = "SQ_TXINTR_EXIT"; break;
880 case SQ_TXINTR_BUSY: act = "SQ_TXINTR_BUSY"; break;
881 case SQ_IOCTL: act = "SQ_IOCTL"; break;
882 case SQ_ENQUEUE: act = "SQ_ENQUEUE"; break;
883 default: act = "UNKNOWN";
884 }
885
886 printf("%s: [%03d] action %-16s buf %03d free %03d "
887 "status %08x line %d\n", sc->sc_dev.dv_xname, i, act,
888 sc->sq_trace[i].bufno, sc->sq_trace[i].freebuf,
889 sc->sq_trace[i].status, sc->sq_trace[i].line);
890 }
891 }
892
893 static int
894 sq_intr(void *arg)
895 {
896 struct sq_softc *sc = arg;
897 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
898 int handled = 0;
899 u_int32_t stat;
900
901 stat = sq_hpc_read(sc, sc->hpc_regs->enetr_reset);
902
903 if ((stat & 2) == 0)
904 SQ_DPRINTF(("%s: Unexpected interrupt!\n",
905 sc->sc_dev.dv_xname));
906 else
907 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, (stat | 2));
908
909 /*
910 * If the interface isn't running, the interrupt couldn't
911 * possibly have come from us.
912 */
913 if ((ifp->if_flags & IFF_RUNNING) == 0)
914 return 0;
915
916 sc->sq_intrcnt.ev_count++;
917
918 /* Always check for received packets */
919 if (sq_rxintr(sc) != 0)
920 handled++;
921
922 /* Only handle transmit interrupts if we actually sent something */
923 if (sc->sc_nfreetx < SQ_NTXDESC) {
924 sq_txintr(sc);
925 handled++;
926 }
927
928 #if NRND > 0
929 if (handled)
930 rnd_add_uint32(&sc->rnd_source, stat);
931 #endif
932 return (handled);
933 }
934
935 static int
936 sq_rxintr(struct sq_softc *sc)
937 {
938 int count = 0;
939 struct mbuf* m;
940 int i, framelen;
941 u_int8_t pktstat;
942 u_int32_t status;
943 u_int32_t ctl_reg;
944 int new_end, orig_end;
945 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
946
947 for (i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) {
948 SQ_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD |
949 BUS_DMASYNC_POSTWRITE);
950
951 /*
952 * If this is a CPU-owned buffer, we're at the end of the list.
953 */
954 if (sc->hpc_regs->revision == 3)
955 ctl_reg = sc->sc_rxdesc[i].hpc3_hdd_ctl &
956 HPC3_HDD_CTL_OWN;
957 else
958 ctl_reg = sc->sc_rxdesc[i].hpc1_hdd_ctl &
959 HPC1_HDD_CTL_OWN;
960
961 if (ctl_reg) {
962 #if defined(SQ_DEBUG)
963 u_int32_t reg;
964
965 reg = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl);
966 SQ_DPRINTF(("%s: rxintr: done at %d (ctl %08x)\n",
967 sc->sc_dev.dv_xname, i, reg));
968 #endif
969 break;
970 }
971
972 count++;
973
974 m = sc->sc_rxmbuf[i];
975 framelen = m->m_ext.ext_size - 3;
976 if (sc->hpc_regs->revision == 3)
977 framelen -=
978 HPC3_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc3_hdd_ctl);
979 else
980 framelen -=
981 HPC1_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc1_hdd_ctl);
982
983 /* Now sync the actual packet data */
984 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
985 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);
986
987 pktstat = *((u_int8_t*)m->m_data + framelen + 2);
988
989 if ((pktstat & RXSTAT_GOOD) == 0) {
990 ifp->if_ierrors++;
991
992 if (pktstat & RXSTAT_OFLOW)
993 printf("%s: receive FIFO overflow\n",
994 sc->sc_dev.dv_xname);
995
996 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
997 sc->sc_rxmap[i]->dm_mapsize,
998 BUS_DMASYNC_PREREAD);
999 SQ_INIT_RXDESC(sc, i);
1000 SQ_DPRINTF(("%s: sq_rxintr: buf %d no RXSTAT_GOOD\n",
1001 sc->sc_dev.dv_xname, i));
1002 continue;
1003 }
1004
1005 if (sq_add_rxbuf(sc, i) != 0) {
1006 ifp->if_ierrors++;
1007 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
1008 sc->sc_rxmap[i]->dm_mapsize,
1009 BUS_DMASYNC_PREREAD);
1010 SQ_INIT_RXDESC(sc, i);
1011 SQ_DPRINTF(("%s: sq_rxintr: buf %d sq_add_rxbuf() "
1012 "failed\n", sc->sc_dev.dv_xname, i));
1013 continue;
1014 }
1015
1016
1017 m->m_data += 2;
1018 m->m_pkthdr.rcvif = ifp;
1019 m->m_pkthdr.len = m->m_len = framelen;
1020
1021 ifp->if_ipackets++;
1022
1023 SQ_DPRINTF(("%s: sq_rxintr: buf %d len %d\n",
1024 sc->sc_dev.dv_xname, i, framelen));
1025
1026 bpf_mtap(ifp, m);
1027 (*ifp->if_input)(ifp, m);
1028 }
1029
1030
1031 /* If anything happened, move ring start/end pointers to new spot */
1032 if (i != sc->sc_nextrx) {
1033 /*
1034 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and
1035 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN
1036 */
1037
1038 new_end = SQ_PREVRX(i);
1039 sc->sc_rxdesc[new_end].hpc3_hdd_ctl |= HPC3_HDD_CTL_EOCHAIN;
1040 SQ_CDRXSYNC(sc, new_end, BUS_DMASYNC_PREREAD |
1041 BUS_DMASYNC_PREWRITE);
1042
1043 orig_end = SQ_PREVRX(sc->sc_nextrx);
1044 sc->sc_rxdesc[orig_end].hpc3_hdd_ctl &= ~HPC3_HDD_CTL_EOCHAIN;
1045 SQ_CDRXSYNC(sc, orig_end, BUS_DMASYNC_PREREAD |
1046 BUS_DMASYNC_PREWRITE);
1047
1048 sc->sc_nextrx = i;
1049 }
1050
1051 status = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl);
1052
1053 /* If receive channel is stopped, restart it... */
1054 if ((status & sc->hpc_regs->enetr_ctl_active) == 0) {
1055 /* Pass the start of the receive ring to the HPC */
1056 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc,
1057 sc->sc_nextrx));
1058
1059 /* And turn on the HPC ethernet receive channel */
1060 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl,
1061 sc->hpc_regs->enetr_ctl_active);
1062 }
1063
1064 return count;
1065 }
1066
1067 static int
1068 sq_txintr(struct sq_softc *sc)
1069 {
1070 int shift = 0;
1071 u_int32_t status, tmp;
1072 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1073
1074 if (sc->hpc_regs->revision != 3)
1075 shift = 16;
1076
1077 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl) >> shift;
1078
1079 SQ_TRACE(SQ_TXINTR_ENTER, sc, sc->sc_prevtx, status);
1080
1081 tmp = (sc->hpc_regs->enetx_ctl_active >> shift) | TXSTAT_GOOD;
1082 if ((status & tmp) == 0) {
1083 if (status & TXSTAT_COLL)
1084 ifp->if_collisions++;
1085
1086 if (status & TXSTAT_UFLOW) {
1087 printf("%s: transmit underflow\n", sc->sc_dev.dv_xname);
1088 ifp->if_oerrors++;
1089 }
1090
1091 if (status & TXSTAT_16COLL) {
1092 printf("%s: max collisions reached\n",
1093 sc->sc_dev.dv_xname);
1094 ifp->if_oerrors++;
1095 ifp->if_collisions += 16;
1096 }
1097 }
1098
1099 /* prevtx now points to next xmit packet not yet finished */
1100 if (sc->hpc_regs->revision == 3)
1101 sq_txring_hpc3(sc);
1102 else
1103 sq_txring_hpc1(sc);
1104
1105 /* If we have buffers free, let upper layers know */
1106 if (sc->sc_nfreetx > 0)
1107 ifp->if_flags &= ~IFF_OACTIVE;
1108
1109 /* If all packets have left the coop, cancel watchdog */
1110 if (sc->sc_nfreetx == SQ_NTXDESC)
1111 ifp->if_timer = 0;
1112
1113 SQ_TRACE(SQ_TXINTR_EXIT, sc, sc->sc_prevtx, status);
1114 sq_start(ifp);
1115
1116 return 1;
1117 }
1118
1119 /*
1120 * Reclaim used transmit descriptors and restart the transmit DMA
1121 * engine if necessary.
1122 */
1123 static void
1124 sq_txring_hpc1(struct sq_softc *sc)
1125 {
1126 /*
1127 * HPC1 doesn't tag transmitted descriptors, however,
1128 * the NDBP register points to the next descriptor that
1129 * has not yet been processed. If DMA is not in progress,
1130 * we can safely reclaim all descriptors up to NDBP, and,
1131 * if necessary, restart DMA at NDBP. Otherwise, if DMA
1132 * is active, we can only safely reclaim up to CBP.
1133 *
1134 * For now, we'll only reclaim on inactive DMA and assume
1135 * that a sufficiently large ring keeps us out of trouble.
1136 */
1137 u_int32_t reclaimto, status;
1138 int reclaimall, i = sc->sc_prevtx;
1139 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1140
1141 status = sq_hpc_read(sc, HPC1_ENETX_CTL);
1142 if (status & HPC1_ENETX_CTL_ACTIVE) {
1143 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
1144 return;
1145 } else
1146 reclaimto = sq_hpc_read(sc, HPC1_ENETX_NDBP);
1147
1148 if (sc->sc_nfreetx == 0 && SQ_CDTXADDR(sc, i) == reclaimto)
1149 reclaimall = 1;
1150 else
1151 reclaimall = 0;
1152
1153 while (sc->sc_nfreetx < SQ_NTXDESC) {
1154 if (SQ_CDTXADDR(sc, i) == reclaimto && !reclaimall)
1155 break;
1156
1157 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
1158 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1159
1160 /* Sync the packet data, unload DMA map, free mbuf */
1161 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
1162 sc->sc_txmap[i]->dm_mapsize,
1163 BUS_DMASYNC_POSTWRITE);
1164 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
1165 m_freem(sc->sc_txmbuf[i]);
1166 sc->sc_txmbuf[i] = NULL;
1167
1168 ifp->if_opackets++;
1169 sc->sc_nfreetx++;
1170
1171 SQ_TRACE(SQ_DONE_DMA, sc, i, status);
1172
1173 i = SQ_NEXTTX(i);
1174 }
1175
1176 if (sc->sc_nfreetx < SQ_NTXDESC) {
1177 SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
1178
1179 KASSERT(reclaimto == SQ_CDTXADDR(sc, i));
1180
1181 sq_hpc_write(sc, HPC1_ENETX_CFXBP, reclaimto);
1182 sq_hpc_write(sc, HPC1_ENETX_CBP, reclaimto);
1183
1184 /* Kick DMA channel into life */
1185 sq_hpc_write(sc, HPC1_ENETX_CTL, HPC1_ENETX_CTL_ACTIVE);
1186
1187 /*
1188 * Set a watchdog timer in case the chip
1189 * flakes out.
1190 */
1191 ifp->if_timer = 5;
1192 }
1193
1194 sc->sc_prevtx = i;
1195 }
1196
1197 /*
1198 * Reclaim used transmit descriptors and restart the transmit DMA
1199 * engine if necessary.
1200 */
1201 static void
1202 sq_txring_hpc3(struct sq_softc *sc)
1203 {
1204 /*
1205 * HPC3 tags descriptors with a bit once they've been
1206 * transmitted. We need only free each XMITDONE'd
1207 * descriptor, and restart the DMA engine if any
1208 * descriptors are left over.
1209 */
1210 int i;
1211 u_int32_t status = 0;
1212 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1213
1214 i = sc->sc_prevtx;
1215 while (sc->sc_nfreetx < SQ_NTXDESC) {
1216 /*
1217 * Check status first so we don't end up with a case of
1218 * the buffer not being finished while the DMA channel
1219 * has gone idle.
1220 */
1221 status = sq_hpc_read(sc, HPC3_ENETX_CTL);
1222
1223 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
1224 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1225
1226 /* Check for used descriptor and restart DMA chain if needed */
1227 if (!(sc->sc_txdesc[i].hpc3_hdd_ctl & HPC3_HDD_CTL_XMITDONE)) {
1228 if ((status & HPC3_ENETX_CTL_ACTIVE) == 0) {
1229 SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
1230
1231 sq_hpc_write(sc, HPC3_ENETX_NDBP,
1232 SQ_CDTXADDR(sc, i));
1233
1234 /* Kick DMA channel into life */
1235 sq_hpc_write(sc, HPC3_ENETX_CTL,
1236 HPC3_ENETX_CTL_ACTIVE);
1237
1238 /*
1239 * Set a watchdog timer in case the chip
1240 * flakes out.
1241 */
1242 ifp->if_timer = 5;
1243 } else
1244 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
1245 break;
1246 }
1247
1248 /* Sync the packet data, unload DMA map, free mbuf */
1249 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
1250 sc->sc_txmap[i]->dm_mapsize,
1251 BUS_DMASYNC_POSTWRITE);
1252 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
1253 m_freem(sc->sc_txmbuf[i]);
1254 sc->sc_txmbuf[i] = NULL;
1255
1256 ifp->if_opackets++;
1257 sc->sc_nfreetx++;
1258
1259 SQ_TRACE(SQ_DONE_DMA, sc, i, status);
1260 i = SQ_NEXTTX(i);
1261 }
1262
1263 sc->sc_prevtx = i;
1264 }
1265
1266 void
1267 sq_reset(struct sq_softc *sc)
1268 {
1269 /* Stop HPC dma channels */
1270 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl, 0);
1271 sq_hpc_write(sc, sc->hpc_regs->enetx_ctl, 0);
1272
1273 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 3);
1274 delay(20);
1275 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 0);
1276 }
1277
1278 /* sq_add_rxbuf: Add a receive buffer to the indicated descriptor. */
1279 int
1280 sq_add_rxbuf(struct sq_softc *sc, int idx)
1281 {
1282 int err;
1283 struct mbuf *m;
1284
1285 MGETHDR(m, M_DONTWAIT, MT_DATA);
1286 if (m == NULL)
1287 return (ENOBUFS);
1288
1289 MCLGET(m, M_DONTWAIT);
1290 if ((m->m_flags & M_EXT) == 0) {
1291 m_freem(m);
1292 return (ENOBUFS);
1293 }
1294
1295 if (sc->sc_rxmbuf[idx] != NULL)
1296 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]);
1297
1298 sc->sc_rxmbuf[idx] = m;
1299
1300 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx],
1301 m->m_ext.ext_buf, m->m_ext.ext_size,
1302 NULL, BUS_DMA_NOWAIT)) != 0) {
1303 printf("%s: can't load rx DMA map %d, error = %d\n",
1304 sc->sc_dev.dv_xname, idx, err);
1305 panic("sq_add_rxbuf"); /* XXX */
1306 }
1307
1308 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx], 0,
1309 sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD);
1310
1311 SQ_INIT_RXDESC(sc, idx);
1312
1313 return 0;
1314 }
1315
1316 void
1317 sq_dump_buffer(paddr_t addr, psize_t len)
1318 {
1319 u_int i;
1320 u_char* physaddr = (char*) MIPS_PHYS_TO_KSEG1(addr);
1321
1322 if (len == 0)
1323 return;
1324
1325 printf("%p: ", physaddr);
1326
1327 for (i = 0; i < len; i++) {
1328 printf("%02x ", *(physaddr + i) & 0xff);
1329 if ((i % 16) == 15 && i != len - 1)
1330 printf("\n%p: ", physaddr + i);
1331 }
1332
1333 printf("\n");
1334 }
1335
1336 void
1337 enaddr_aton(const char* str, u_int8_t* eaddr)
1338 {
1339 int i;
1340 char c;
1341
1342 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1343 if (*str == ':')
1344 str++;
1345
1346 c = *str++;
1347 if (isdigit(c)) {
1348 eaddr[i] = (c - '0');
1349 } else if (isxdigit(c)) {
1350 eaddr[i] = (toupper(c) + 10 - 'A');
1351 }
1352
1353 c = *str++;
1354 if (isdigit(c)) {
1355 eaddr[i] = (eaddr[i] << 4) | (c - '0');
1356 } else if (isxdigit(c)) {
1357 eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A');
1358 }
1359 }
1360 }
1361