if_sq.c revision 1.35 1 /* $NetBSD: if_sq.c,v 1.35 2010/01/19 22:06:22 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2001 Rafal K. Boni
5 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * Portions of this code are derived from software contributed to The
9 * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: if_sq.c,v 1.35 2010/01/19 22:06:22 pooka Exp $");
37
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/device.h>
42 #include <sys/callout.h>
43 #include <sys/mbuf.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 #include <sys/socket.h>
47 #include <sys/ioctl.h>
48 #include <sys/errno.h>
49 #include <sys/syslog.h>
50
51 #include <uvm/uvm_extern.h>
52
53 #include <machine/endian.h>
54
55 #include <net/if.h>
56 #include <net/if_dl.h>
57 #include <net/if_media.h>
58 #include <net/if_ether.h>
59
60 #include <net/bpf.h>
61
62 #include <machine/bus.h>
63 #include <machine/intr.h>
64 #include <machine/sysconf.h>
65
66 #include <dev/ic/seeq8003reg.h>
67
68 #include <sgimips/hpc/sqvar.h>
69 #include <sgimips/hpc/hpcvar.h>
70 #include <sgimips/hpc/hpcreg.h>
71
72 #include <dev/arcbios/arcbios.h>
73 #include <dev/arcbios/arcbiosvar.h>
74
75 #define static
76
77 /*
78 * Short TODO list:
79 * (1) Do counters for bad-RX packets.
80 * (2) Allow multi-segment transmits, instead of copying to a single,
81 * contiguous mbuf.
82 * (3) Verify sq_stop() turns off enough stuff; I was still getting
83 * seeq interrupts after sq_stop().
84 * (4) Implement EDLC modes: especially packet auto-pad and simplex
85 * mode.
86 * (5) Should the driver filter out its own transmissions in non-EDLC
87 * mode?
88 * (6) Multicast support -- multicast filter, address management, ...
89 * (7) Deal with RB0 (recv buffer overflow) on reception. Will need
90 * to figure out if RB0 is read-only as stated in one spot in the
91 * HPC spec or read-write (ie, is the 'write a one to clear it')
92 * the correct thing?
93 */
94
95 #if defined(SQ_DEBUG)
96 int sq_debug = 0;
97 #define SQ_DPRINTF(x) if (sq_debug) printf x
98 #else
99 #define SQ_DPRINTF(x)
100 #endif
101
102 static int sq_match(struct device *, struct cfdata *, void *);
103 static void sq_attach(struct device *, struct device *, void *);
104 static int sq_init(struct ifnet *);
105 static void sq_start(struct ifnet *);
106 static void sq_stop(struct ifnet *, int);
107 static void sq_watchdog(struct ifnet *);
108 static int sq_ioctl(struct ifnet *, u_long, void *);
109
110 static void sq_set_filter(struct sq_softc *);
111 static int sq_intr(void *);
112 static int sq_rxintr(struct sq_softc *);
113 static int sq_txintr(struct sq_softc *);
114 static void sq_txring_hpc1(struct sq_softc *);
115 static void sq_txring_hpc3(struct sq_softc *);
116 static void sq_reset(struct sq_softc *);
117 static int sq_add_rxbuf(struct sq_softc *, int);
118 static void sq_dump_buffer(paddr_t addr, psize_t len);
119 static void sq_trace_dump(struct sq_softc *);
120
121 static void enaddr_aton(const char*, u_int8_t*);
122
123 CFATTACH_DECL(sq, sizeof(struct sq_softc),
124 sq_match, sq_attach, NULL, NULL);
125
126 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
127
128 #define sq_seeq_read(sc, off) \
129 bus_space_read_1(sc->sc_regt, sc->sc_regh, off)
130 #define sq_seeq_write(sc, off, val) \
131 bus_space_write_1(sc->sc_regt, sc->sc_regh, off, val)
132
133 #define sq_hpc_read(sc, off) \
134 bus_space_read_4(sc->sc_hpct, sc->sc_hpch, off)
135 #define sq_hpc_write(sc, off, val) \
136 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, off, val)
137
138 /* MAC address offset for non-onboard implementations */
139 #define SQ_HPC_EEPROM_ENADDR 250
140
141 #define SGI_OUI_0 0x08
142 #define SGI_OUI_1 0x00
143 #define SGI_OUI_2 0x69
144
145 static int
146 sq_match(struct device *parent, struct cfdata *cf, void *aux)
147 {
148 struct hpc_attach_args *ha = aux;
149
150 if (strcmp(ha->ha_name, cf->cf_name) == 0) {
151 vaddr_t reset, txstat;
152
153 reset = MIPS_PHYS_TO_KSEG1(ha->ha_sh +
154 ha->ha_dmaoff + ha->hpc_regs->enetr_reset);
155 txstat = MIPS_PHYS_TO_KSEG1(ha->ha_sh +
156 ha->ha_devoff + (SEEQ_TXSTAT << 2));
157
158 if (platform.badaddr((void *)reset, sizeof(reset)))
159 return (0);
160
161 *(volatile uint32_t *)reset = 0x1;
162 delay(20);
163 *(volatile uint32_t *)reset = 0x0;
164
165 if (platform.badaddr((void *)txstat, sizeof(txstat)))
166 return (0);
167
168 if ((*(volatile uint32_t *)txstat & 0xff) == TXSTAT_OLDNEW)
169 return (1);
170 }
171
172 return (0);
173 }
174
175 static void
176 sq_attach(struct device *parent, struct device *self, void *aux)
177 {
178 int i, err;
179 const char* macaddr;
180 struct sq_softc *sc = (void *)self;
181 struct hpc_attach_args *haa = aux;
182 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
183
184 sc->sc_hpct = haa->ha_st;
185 sc->hpc_regs = haa->hpc_regs; /* HPC register definitions */
186
187 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
188 haa->ha_dmaoff,
189 sc->hpc_regs->enet_regs_size,
190 &sc->sc_hpch)) != 0) {
191 printf(": unable to map HPC DMA registers, error = %d\n", err);
192 goto fail_0;
193 }
194
195 sc->sc_regt = haa->ha_st;
196 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
197 haa->ha_devoff,
198 sc->hpc_regs->enet_devregs_size,
199 &sc->sc_regh)) != 0) {
200 printf(": unable to map Seeq registers, error = %d\n", err);
201 goto fail_0;
202 }
203
204 sc->sc_dmat = haa->ha_dmat;
205
206 if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control),
207 PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg,
208 1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) {
209 printf(": unable to allocate control data, error = %d\n", err);
210 goto fail_0;
211 }
212
213 if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg,
214 sizeof(struct sq_control),
215 (void **)&sc->sc_control,
216 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
217 printf(": unable to map control data, error = %d\n", err);
218 goto fail_1;
219 }
220
221 if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control),
222 1, sizeof(struct sq_control), PAGE_SIZE,
223 BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) {
224 printf(": unable to create DMA map for control data, error "
225 "= %d\n", err);
226 goto fail_2;
227 }
228
229 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control,
230 sizeof(struct sq_control),
231 NULL, BUS_DMA_NOWAIT)) != 0) {
232 printf(": unable to load DMA map for control data, error "
233 "= %d\n", err);
234 goto fail_3;
235 }
236
237 memset(sc->sc_control, 0, sizeof(struct sq_control));
238
239 /* Create transmit buffer DMA maps */
240 for (i = 0; i < SQ_NTXDESC; i++) {
241 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
242 0, BUS_DMA_NOWAIT,
243 &sc->sc_txmap[i])) != 0) {
244 printf(": unable to create tx DMA map %d, error = %d\n",
245 i, err);
246 goto fail_4;
247 }
248 }
249
250 /* Create receive buffer DMA maps */
251 for (i = 0; i < SQ_NRXDESC; i++) {
252 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
253 0, BUS_DMA_NOWAIT,
254 &sc->sc_rxmap[i])) != 0) {
255 printf(": unable to create rx DMA map %d, error = %d\n",
256 i, err);
257 goto fail_5;
258 }
259 }
260
261 /* Pre-allocate the receive buffers. */
262 for (i = 0; i < SQ_NRXDESC; i++) {
263 if ((err = sq_add_rxbuf(sc, i)) != 0) {
264 printf(": unable to allocate or map rx buffer %d\n,"
265 " error = %d\n", i, err);
266 goto fail_6;
267 }
268 }
269
270 memcpy(sc->sc_enaddr, &haa->hpc_eeprom[SQ_HPC_EEPROM_ENADDR],
271 ETHER_ADDR_LEN);
272
273 /*
274 * If our mac address is bogus, obtain it from ARCBIOS. This will
275 * be true of the onboard HPC3 on IP22, since there is no eeprom,
276 * but rather the DS1386 RTC's battery-backed ram is used.
277 */
278 if (sc->sc_enaddr[0] != SGI_OUI_0 || sc->sc_enaddr[1] != SGI_OUI_1 ||
279 sc->sc_enaddr[2] != SGI_OUI_2) {
280 macaddr = ARCBIOS->GetEnvironmentVariable("eaddr");
281 if (macaddr == NULL) {
282 printf(": unable to get MAC address!\n");
283 goto fail_6;
284 }
285 enaddr_aton(macaddr, sc->sc_enaddr);
286 }
287
288 evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL,
289 self->dv_xname, "intr");
290
291 if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) {
292 printf(": unable to establish interrupt!\n");
293 goto fail_6;
294 }
295
296 /* Reset the chip to a known state. */
297 sq_reset(sc);
298
299 /*
300 * Determine if we're an 8003 or 80c03 by setting the first
301 * MAC address register to non-zero, and then reading it back.
302 * If it's zero, we have an 80c03, because we will have read
303 * the TxCollLSB register.
304 */
305 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0xa5);
306 if (sq_seeq_read(sc, SEEQ_TXCOLLS0) == 0)
307 sc->sc_type = SQ_TYPE_80C03;
308 else
309 sc->sc_type = SQ_TYPE_8003;
310 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0x00);
311
312 printf(": SGI Seeq %s\n",
313 sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003");
314
315 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
316 ether_sprintf(sc->sc_enaddr));
317
318 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
319 ifp->if_softc = sc;
320 ifp->if_mtu = ETHERMTU;
321 ifp->if_init = sq_init;
322 ifp->if_stop = sq_stop;
323 ifp->if_start = sq_start;
324 ifp->if_ioctl = sq_ioctl;
325 ifp->if_watchdog = sq_watchdog;
326 ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST;
327 IFQ_SET_READY(&ifp->if_snd);
328
329 if_attach(ifp);
330 ether_ifattach(ifp, sc->sc_enaddr);
331
332 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
333 /* Done! */
334 return;
335
336 /*
337 * Free any resources we've allocated during the failed attach
338 * attempt. Do this in reverse order and fall through.
339 */
340 fail_6:
341 for (i = 0; i < SQ_NRXDESC; i++) {
342 if (sc->sc_rxmbuf[i] != NULL) {
343 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]);
344 m_freem(sc->sc_rxmbuf[i]);
345 }
346 }
347 fail_5:
348 for (i = 0; i < SQ_NRXDESC; i++) {
349 if (sc->sc_rxmap[i] != NULL)
350 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]);
351 }
352 fail_4:
353 for (i = 0; i < SQ_NTXDESC; i++) {
354 if (sc->sc_txmap[i] != NULL)
355 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]);
356 }
357 bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap);
358 fail_3:
359 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap);
360 fail_2:
361 bus_dmamem_unmap(sc->sc_dmat, (void *) sc->sc_control,
362 sizeof(struct sq_control));
363 fail_1:
364 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg);
365 fail_0:
366 return;
367 }
368
369 /* Set up data to get the interface up and running. */
370 int
371 sq_init(struct ifnet *ifp)
372 {
373 int i;
374 struct sq_softc *sc = ifp->if_softc;
375
376 /* Cancel any in-progress I/O */
377 sq_stop(ifp, 0);
378
379 sc->sc_nextrx = 0;
380
381 sc->sc_nfreetx = SQ_NTXDESC;
382 sc->sc_nexttx = sc->sc_prevtx = 0;
383
384 SQ_TRACE(SQ_RESET, sc, 0, 0);
385
386 /* Set into 8003 mode, bank 0 to program ethernet address */
387 sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_BANK0);
388
389 /* Now write the address */
390 for (i = 0; i < ETHER_ADDR_LEN; i++)
391 sq_seeq_write(sc, i, sc->sc_enaddr[i]);
392
393 sc->sc_rxcmd = RXCMD_IE_CRC |
394 RXCMD_IE_DRIB |
395 RXCMD_IE_SHORT |
396 RXCMD_IE_END |
397 RXCMD_IE_GOOD;
398
399 /*
400 * Set the receive filter -- this will add some bits to the
401 * prototype RXCMD register. Do this before setting the
402 * transmit config register, since we might need to switch
403 * banks.
404 */
405 sq_set_filter(sc);
406
407 /* Set up Seeq transmit command register */
408 sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_IE_UFLOW |
409 TXCMD_IE_COLL |
410 TXCMD_IE_16COLL |
411 TXCMD_IE_GOOD);
412
413 /* Now write the receive command register. */
414 sq_seeq_write(sc, SEEQ_RXCMD, sc->sc_rxcmd);
415
416 /*
417 * Set up HPC ethernet PIO and DMA configurations.
418 *
419 * The PROM appears to do most of this for the onboard HPC3, but
420 * not for the Challenge S's IOPLUS chip. We copy how the onboard
421 * chip is configured and assume that it's correct for both.
422 */
423 if (sc->hpc_regs->revision == 3) {
424 u_int32_t dmareg, pioreg;
425
426 pioreg = HPC3_ENETR_PIOCFG_P1(1) |
427 HPC3_ENETR_PIOCFG_P2(6) |
428 HPC3_ENETR_PIOCFG_P3(1);
429
430 dmareg = HPC3_ENETR_DMACFG_D1(6) |
431 HPC3_ENETR_DMACFG_D2(2) |
432 HPC3_ENETR_DMACFG_D3(0) |
433 HPC3_ENETR_DMACFG_FIX_RXDC |
434 HPC3_ENETR_DMACFG_FIX_INTR |
435 HPC3_ENETR_DMACFG_FIX_EOP |
436 HPC3_ENETR_DMACFG_TIMEOUT;
437
438 sq_hpc_write(sc, HPC3_ENETR_PIOCFG, pioreg);
439 sq_hpc_write(sc, HPC3_ENETR_DMACFG, dmareg);
440 }
441
442 /* Pass the start of the receive ring to the HPC */
443 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc, 0));
444
445 /* And turn on the HPC ethernet receive channel */
446 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl,
447 sc->hpc_regs->enetr_ctl_active);
448
449 /*
450 * Turn off delayed receive interrupts on HPC1.
451 * (see Hollywood HPC Specification 2.1.4.3)
452 */
453 if (sc->hpc_regs->revision != 3)
454 sq_hpc_write(sc, HPC1_ENET_INTDELAY, HPC1_ENET_INTDELAY_OFF);
455
456 ifp->if_flags |= IFF_RUNNING;
457 ifp->if_flags &= ~IFF_OACTIVE;
458
459 return 0;
460 }
461
462 static void
463 sq_set_filter(struct sq_softc *sc)
464 {
465 struct ethercom *ec = &sc->sc_ethercom;
466 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
467 struct ether_multi *enm;
468 struct ether_multistep step;
469
470 /*
471 * Check for promiscuous mode. Also implies
472 * all-multicast.
473 */
474 if (ifp->if_flags & IFF_PROMISC) {
475 sc->sc_rxcmd |= RXCMD_REC_ALL;
476 ifp->if_flags |= IFF_ALLMULTI;
477 return;
478 }
479
480 /*
481 * The 8003 has no hash table. If we have any multicast
482 * addresses on the list, enable reception of all multicast
483 * frames.
484 *
485 * XXX The 80c03 has a hash table. We should use it.
486 */
487
488 ETHER_FIRST_MULTI(step, ec, enm);
489
490 if (enm == NULL) {
491 sc->sc_rxcmd &= ~RXCMD_REC_MASK;
492 sc->sc_rxcmd |= RXCMD_REC_BROAD;
493
494 ifp->if_flags &= ~IFF_ALLMULTI;
495 return;
496 }
497
498 sc->sc_rxcmd |= RXCMD_REC_MULTI;
499 ifp->if_flags |= IFF_ALLMULTI;
500 }
501
502 int
503 sq_ioctl(struct ifnet *ifp, u_long cmd, void *data)
504 {
505 int s, error = 0;
506
507 SQ_TRACE(SQ_IOCTL, (struct sq_softc *)ifp->if_softc, 0, 0);
508
509 s = splnet();
510
511 error = ether_ioctl(ifp, cmd, data);
512 if (error == ENETRESET) {
513 /*
514 * Multicast list has changed; set the hardware filter
515 * accordingly.
516 */
517 if (ifp->if_flags & IFF_RUNNING)
518 error = sq_init(ifp);
519 else
520 error = 0;
521 }
522
523 splx(s);
524 return (error);
525 }
526
527 void
528 sq_start(struct ifnet *ifp)
529 {
530 struct sq_softc *sc = ifp->if_softc;
531 u_int32_t status;
532 struct mbuf *m0, *m;
533 bus_dmamap_t dmamap;
534 int err, totlen, nexttx, firsttx, lasttx = -1, ofree, seg;
535
536 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
537 return;
538
539 /*
540 * Remember the previous number of free descriptors and
541 * the first descriptor we'll use.
542 */
543 ofree = sc->sc_nfreetx;
544 firsttx = sc->sc_nexttx;
545
546 /*
547 * Loop through the send queue, setting up transmit descriptors
548 * until we drain the queue, or use up all available transmit
549 * descriptors.
550 */
551 while (sc->sc_nfreetx != 0) {
552 /*
553 * Grab a packet off the queue.
554 */
555 IFQ_POLL(&ifp->if_snd, m0);
556 if (m0 == NULL)
557 break;
558 m = NULL;
559
560 dmamap = sc->sc_txmap[sc->sc_nexttx];
561
562 /*
563 * Load the DMA map. If this fails, the packet either
564 * didn't fit in the alloted number of segments, or we were
565 * short on resources. In this case, we'll copy and try
566 * again.
567 * Also copy it if we need to pad, so that we are sure there
568 * is room for the pad buffer.
569 * XXX the right way of doing this is to use a static buffer
570 * for padding and adding it to the transmit descriptor (see
571 * sys/dev/pci/if_tl.c for example). We can't do this here yet
572 * because we can't send packets with more than one fragment.
573 */
574 if (m0->m_pkthdr.len < ETHER_PAD_LEN ||
575 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
576 BUS_DMA_NOWAIT) != 0) {
577 MGETHDR(m, M_DONTWAIT, MT_DATA);
578 if (m == NULL) {
579 printf("%s: unable to allocate Tx mbuf\n",
580 sc->sc_dev.dv_xname);
581 break;
582 }
583 if (m0->m_pkthdr.len > MHLEN) {
584 MCLGET(m, M_DONTWAIT);
585 if ((m->m_flags & M_EXT) == 0) {
586 printf("%s: unable to allocate Tx "
587 "cluster\n", sc->sc_dev.dv_xname);
588 m_freem(m);
589 break;
590 }
591 }
592
593 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
594 if (m0->m_pkthdr.len < ETHER_PAD_LEN) {
595 memset(mtod(m, char *) + m0->m_pkthdr.len, 0,
596 ETHER_PAD_LEN - m0->m_pkthdr.len);
597 m->m_pkthdr.len = m->m_len = ETHER_PAD_LEN;
598 } else
599 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
600
601 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
602 m, BUS_DMA_NOWAIT)) != 0) {
603 printf("%s: unable to load Tx buffer, "
604 "error = %d\n", sc->sc_dev.dv_xname, err);
605 break;
606 }
607 }
608
609 /*
610 * Ensure we have enough descriptors free to describe
611 * the packet.
612 */
613 if (dmamap->dm_nsegs > sc->sc_nfreetx) {
614 /*
615 * Not enough free descriptors to transmit this
616 * packet. We haven't committed to anything yet,
617 * so just unload the DMA map, put the packet
618 * back on the queue, and punt. Notify the upper
619 * layer that there are no more slots left.
620 *
621 * XXX We could allocate an mbuf and copy, but
622 * XXX it is worth it?
623 */
624 ifp->if_flags |= IFF_OACTIVE;
625 bus_dmamap_unload(sc->sc_dmat, dmamap);
626 if (m != NULL)
627 m_freem(m);
628 break;
629 }
630
631 IFQ_DEQUEUE(&ifp->if_snd, m0);
632 /*
633 * Pass the packet to any BPF listeners.
634 */
635 if (ifp->if_bpf)
636 bpf_ops->bpf_mtap(ifp->if_bpf, m0);
637 if (m != NULL) {
638 m_freem(m0);
639 m0 = m;
640 }
641
642 /*
643 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
644 */
645
646 SQ_TRACE(SQ_ENQUEUE, sc, sc->sc_nexttx, 0);
647
648 /* Sync the DMA map. */
649 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
650 BUS_DMASYNC_PREWRITE);
651
652 /*
653 * Initialize the transmit descriptors.
654 */
655 for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0;
656 seg < dmamap->dm_nsegs;
657 seg++, nexttx = SQ_NEXTTX(nexttx)) {
658 if (sc->hpc_regs->revision == 3) {
659 sc->sc_txdesc[nexttx].hpc3_hdd_bufptr =
660 dmamap->dm_segs[seg].ds_addr;
661 sc->sc_txdesc[nexttx].hpc3_hdd_ctl =
662 dmamap->dm_segs[seg].ds_len;
663 } else {
664 sc->sc_txdesc[nexttx].hpc1_hdd_bufptr =
665 dmamap->dm_segs[seg].ds_addr;
666 sc->sc_txdesc[nexttx].hpc1_hdd_ctl =
667 dmamap->dm_segs[seg].ds_len;
668 }
669 sc->sc_txdesc[nexttx].hdd_descptr=
670 SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx));
671 lasttx = nexttx;
672 totlen += dmamap->dm_segs[seg].ds_len;
673 }
674
675 /* Last descriptor gets end-of-packet */
676 KASSERT(lasttx != -1);
677 if (sc->hpc_regs->revision == 3)
678 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |=
679 HPC3_HDD_CTL_EOPACKET;
680 else
681 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |=
682 HPC1_HDD_CTL_EOPACKET;
683
684 SQ_DPRINTF(("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname,
685 sc->sc_nexttx, lasttx,
686 totlen));
687
688 if (ifp->if_flags & IFF_DEBUG) {
689 printf(" transmit chain:\n");
690 for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) {
691 printf(" descriptor %d:\n", seg);
692 printf(" hdd_bufptr: 0x%08x\n",
693 (sc->hpc_regs->revision == 3) ?
694 sc->sc_txdesc[seg].hpc3_hdd_bufptr :
695 sc->sc_txdesc[seg].hpc1_hdd_bufptr);
696 printf(" hdd_ctl: 0x%08x\n",
697 (sc->hpc_regs->revision == 3) ?
698 sc->sc_txdesc[seg].hpc3_hdd_ctl:
699 sc->sc_txdesc[seg].hpc1_hdd_ctl);
700 printf(" hdd_descptr: 0x%08x\n",
701 sc->sc_txdesc[seg].hdd_descptr);
702
703 if (seg == lasttx)
704 break;
705 }
706 }
707
708 /* Sync the descriptors we're using. */
709 SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs,
710 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
711
712 /* Store a pointer to the packet so we can free it later */
713 sc->sc_txmbuf[sc->sc_nexttx] = m0;
714
715 /* Advance the tx pointer. */
716 sc->sc_nfreetx -= dmamap->dm_nsegs;
717 sc->sc_nexttx = nexttx;
718 }
719
720 /* All transmit descriptors used up, let upper layers know */
721 if (sc->sc_nfreetx == 0)
722 ifp->if_flags |= IFF_OACTIVE;
723
724 if (sc->sc_nfreetx != ofree) {
725 SQ_DPRINTF(("%s: %d packets enqueued, first %d, INTR on %d\n",
726 sc->sc_dev.dv_xname, lasttx - firsttx + 1,
727 firsttx, lasttx));
728
729 /*
730 * Cause a transmit interrupt to happen on the
731 * last packet we enqueued, mark it as the last
732 * descriptor.
733 *
734 * HPC1_HDD_CTL_INTR will generate an interrupt on
735 * HPC1. HPC3 requires HPC3_HDD_CTL_EOPACKET in
736 * addition to HPC3_HDD_CTL_INTR to interrupt.
737 */
738 KASSERT(lasttx != -1);
739 if (sc->hpc_regs->revision == 3) {
740 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |=
741 HPC3_HDD_CTL_INTR | HPC3_HDD_CTL_EOCHAIN;
742 } else {
743 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |= HPC1_HDD_CTL_INTR;
744 sc->sc_txdesc[lasttx].hpc1_hdd_bufptr |=
745 HPC1_HDD_CTL_EOCHAIN;
746 }
747
748 SQ_CDTXSYNC(sc, lasttx, 1,
749 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
750
751 /*
752 * There is a potential race condition here if the HPC
753 * DMA channel is active and we try and either update
754 * the 'next descriptor' pointer in the HPC PIO space
755 * or the 'next descriptor' pointer in a previous desc-
756 * riptor.
757 *
758 * To avoid this, if the channel is active, we rely on
759 * the transmit interrupt routine noticing that there
760 * are more packets to send and restarting the HPC DMA
761 * engine, rather than mucking with the DMA state here.
762 */
763 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl);
764
765 if ((status & sc->hpc_regs->enetx_ctl_active) != 0) {
766 SQ_TRACE(SQ_ADD_TO_DMA, sc, firsttx, status);
767
768 /*
769 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and
770 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN
771 */
772 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc3_hdd_ctl &=
773 ~HPC3_HDD_CTL_EOCHAIN;
774
775 if (sc->hpc_regs->revision != 3)
776 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc1_hdd_ctl
777 &= ~HPC1_HDD_CTL_INTR;
778
779 SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx), 1,
780 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
781 } else if (sc->hpc_regs->revision == 3) {
782 SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
783
784 sq_hpc_write(sc, HPC3_ENETX_NDBP, SQ_CDTXADDR(sc,
785 firsttx));
786
787 /* Kick DMA channel into life */
788 sq_hpc_write(sc, HPC3_ENETX_CTL, HPC3_ENETX_CTL_ACTIVE);
789 } else {
790 /*
791 * In the HPC1 case where transmit DMA is
792 * inactive, we can either kick off if
793 * the ring was previously empty, or call
794 * our transmit interrupt handler to
795 * figure out if the ring stopped short
796 * and restart at the right place.
797 */
798 if (ofree == SQ_NTXDESC) {
799 SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
800
801 sq_hpc_write(sc, HPC1_ENETX_NDBP,
802 SQ_CDTXADDR(sc, firsttx));
803 sq_hpc_write(sc, HPC1_ENETX_CFXBP,
804 SQ_CDTXADDR(sc, firsttx));
805 sq_hpc_write(sc, HPC1_ENETX_CBP,
806 SQ_CDTXADDR(sc, firsttx));
807
808 /* Kick DMA channel into life */
809 sq_hpc_write(sc, HPC1_ENETX_CTL,
810 HPC1_ENETX_CTL_ACTIVE);
811 } else
812 sq_txring_hpc1(sc);
813 }
814
815 /* Set a watchdog timer in case the chip flakes out. */
816 ifp->if_timer = 5;
817 }
818 }
819
820 void
821 sq_stop(struct ifnet *ifp, int disable)
822 {
823 int i;
824 struct sq_softc *sc = ifp->if_softc;
825
826 for (i =0; i < SQ_NTXDESC; i++) {
827 if (sc->sc_txmbuf[i] != NULL) {
828 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
829 m_freem(sc->sc_txmbuf[i]);
830 sc->sc_txmbuf[i] = NULL;
831 }
832 }
833
834 /* Clear Seeq transmit/receive command registers */
835 sq_seeq_write(sc, SEEQ_TXCMD, 0);
836 sq_seeq_write(sc, SEEQ_RXCMD, 0);
837
838 sq_reset(sc);
839
840 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
841 ifp->if_timer = 0;
842 }
843
844 /* Device timeout/watchdog routine. */
845 void
846 sq_watchdog(struct ifnet *ifp)
847 {
848 u_int32_t status;
849 struct sq_softc *sc = ifp->if_softc;
850
851 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl);
852 log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, "
853 "status %08x)\n", sc->sc_dev.dv_xname, sc->sc_prevtx,
854 sc->sc_nexttx, sc->sc_nfreetx, status);
855
856 sq_trace_dump(sc);
857
858 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
859 sc->sq_trace_idx = 0;
860
861 ++ifp->if_oerrors;
862
863 sq_init(ifp);
864 }
865
866 static void
867 sq_trace_dump(struct sq_softc *sc)
868 {
869 int i;
870 const char *act;
871
872 for (i = 0; i < sc->sq_trace_idx; i++) {
873 switch (sc->sq_trace[i].action) {
874 case SQ_RESET: act = "SQ_RESET"; break;
875 case SQ_ADD_TO_DMA: act = "SQ_ADD_TO_DMA"; break;
876 case SQ_START_DMA: act = "SQ_START_DMA"; break;
877 case SQ_DONE_DMA: act = "SQ_DONE_DMA"; break;
878 case SQ_RESTART_DMA: act = "SQ_RESTART_DMA"; break;
879 case SQ_TXINTR_ENTER: act = "SQ_TXINTR_ENTER"; break;
880 case SQ_TXINTR_EXIT: act = "SQ_TXINTR_EXIT"; break;
881 case SQ_TXINTR_BUSY: act = "SQ_TXINTR_BUSY"; break;
882 case SQ_IOCTL: act = "SQ_IOCTL"; break;
883 case SQ_ENQUEUE: act = "SQ_ENQUEUE"; break;
884 default: act = "UNKNOWN";
885 }
886
887 printf("%s: [%03d] action %-16s buf %03d free %03d "
888 "status %08x line %d\n", sc->sc_dev.dv_xname, i, act,
889 sc->sq_trace[i].bufno, sc->sq_trace[i].freebuf,
890 sc->sq_trace[i].status, sc->sq_trace[i].line);
891 }
892 }
893
894 static int
895 sq_intr(void *arg)
896 {
897 struct sq_softc *sc = arg;
898 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
899 int handled = 0;
900 u_int32_t stat;
901
902 stat = sq_hpc_read(sc, sc->hpc_regs->enetr_reset);
903
904 if ((stat & 2) == 0)
905 SQ_DPRINTF(("%s: Unexpected interrupt!\n",
906 sc->sc_dev.dv_xname));
907 else
908 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, (stat | 2));
909
910 /*
911 * If the interface isn't running, the interrupt couldn't
912 * possibly have come from us.
913 */
914 if ((ifp->if_flags & IFF_RUNNING) == 0)
915 return 0;
916
917 sc->sq_intrcnt.ev_count++;
918
919 /* Always check for received packets */
920 if (sq_rxintr(sc) != 0)
921 handled++;
922
923 /* Only handle transmit interrupts if we actually sent something */
924 if (sc->sc_nfreetx < SQ_NTXDESC) {
925 sq_txintr(sc);
926 handled++;
927 }
928
929 #if NRND > 0
930 if (handled)
931 rnd_add_uint32(&sc->rnd_source, stat);
932 #endif
933 return (handled);
934 }
935
936 static int
937 sq_rxintr(struct sq_softc *sc)
938 {
939 int count = 0;
940 struct mbuf* m;
941 int i, framelen;
942 u_int8_t pktstat;
943 u_int32_t status;
944 u_int32_t ctl_reg;
945 int new_end, orig_end;
946 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
947
948 for (i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) {
949 SQ_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD |
950 BUS_DMASYNC_POSTWRITE);
951
952 /*
953 * If this is a CPU-owned buffer, we're at the end of the list.
954 */
955 if (sc->hpc_regs->revision == 3)
956 ctl_reg = sc->sc_rxdesc[i].hpc3_hdd_ctl &
957 HPC3_HDD_CTL_OWN;
958 else
959 ctl_reg = sc->sc_rxdesc[i].hpc1_hdd_ctl &
960 HPC1_HDD_CTL_OWN;
961
962 if (ctl_reg) {
963 #if defined(SQ_DEBUG)
964 u_int32_t reg;
965
966 reg = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl);
967 SQ_DPRINTF(("%s: rxintr: done at %d (ctl %08x)\n",
968 sc->sc_dev.dv_xname, i, reg));
969 #endif
970 break;
971 }
972
973 count++;
974
975 m = sc->sc_rxmbuf[i];
976 framelen = m->m_ext.ext_size - 3;
977 if (sc->hpc_regs->revision == 3)
978 framelen -=
979 HPC3_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc3_hdd_ctl);
980 else
981 framelen -=
982 HPC1_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc1_hdd_ctl);
983
984 /* Now sync the actual packet data */
985 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
986 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);
987
988 pktstat = *((u_int8_t*)m->m_data + framelen + 2);
989
990 if ((pktstat & RXSTAT_GOOD) == 0) {
991 ifp->if_ierrors++;
992
993 if (pktstat & RXSTAT_OFLOW)
994 printf("%s: receive FIFO overflow\n",
995 sc->sc_dev.dv_xname);
996
997 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
998 sc->sc_rxmap[i]->dm_mapsize,
999 BUS_DMASYNC_PREREAD);
1000 SQ_INIT_RXDESC(sc, i);
1001 SQ_DPRINTF(("%s: sq_rxintr: buf %d no RXSTAT_GOOD\n",
1002 sc->sc_dev.dv_xname, i));
1003 continue;
1004 }
1005
1006 if (sq_add_rxbuf(sc, i) != 0) {
1007 ifp->if_ierrors++;
1008 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
1009 sc->sc_rxmap[i]->dm_mapsize,
1010 BUS_DMASYNC_PREREAD);
1011 SQ_INIT_RXDESC(sc, i);
1012 SQ_DPRINTF(("%s: sq_rxintr: buf %d sq_add_rxbuf() "
1013 "failed\n", sc->sc_dev.dv_xname, i));
1014 continue;
1015 }
1016
1017
1018 m->m_data += 2;
1019 m->m_pkthdr.rcvif = ifp;
1020 m->m_pkthdr.len = m->m_len = framelen;
1021
1022 ifp->if_ipackets++;
1023
1024 SQ_DPRINTF(("%s: sq_rxintr: buf %d len %d\n",
1025 sc->sc_dev.dv_xname, i, framelen));
1026
1027 if (ifp->if_bpf)
1028 bpf_ops->bpf_mtap(ifp->if_bpf, m);
1029 (*ifp->if_input)(ifp, m);
1030 }
1031
1032
1033 /* If anything happened, move ring start/end pointers to new spot */
1034 if (i != sc->sc_nextrx) {
1035 /*
1036 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and
1037 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN
1038 */
1039
1040 new_end = SQ_PREVRX(i);
1041 sc->sc_rxdesc[new_end].hpc3_hdd_ctl |= HPC3_HDD_CTL_EOCHAIN;
1042 SQ_CDRXSYNC(sc, new_end, BUS_DMASYNC_PREREAD |
1043 BUS_DMASYNC_PREWRITE);
1044
1045 orig_end = SQ_PREVRX(sc->sc_nextrx);
1046 sc->sc_rxdesc[orig_end].hpc3_hdd_ctl &= ~HPC3_HDD_CTL_EOCHAIN;
1047 SQ_CDRXSYNC(sc, orig_end, BUS_DMASYNC_PREREAD |
1048 BUS_DMASYNC_PREWRITE);
1049
1050 sc->sc_nextrx = i;
1051 }
1052
1053 status = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl);
1054
1055 /* If receive channel is stopped, restart it... */
1056 if ((status & sc->hpc_regs->enetr_ctl_active) == 0) {
1057 /* Pass the start of the receive ring to the HPC */
1058 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc,
1059 sc->sc_nextrx));
1060
1061 /* And turn on the HPC ethernet receive channel */
1062 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl,
1063 sc->hpc_regs->enetr_ctl_active);
1064 }
1065
1066 return count;
1067 }
1068
1069 static int
1070 sq_txintr(struct sq_softc *sc)
1071 {
1072 int shift = 0;
1073 u_int32_t status, tmp;
1074 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1075
1076 if (sc->hpc_regs->revision != 3)
1077 shift = 16;
1078
1079 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl) >> shift;
1080
1081 SQ_TRACE(SQ_TXINTR_ENTER, sc, sc->sc_prevtx, status);
1082
1083 tmp = (sc->hpc_regs->enetx_ctl_active >> shift) | TXSTAT_GOOD;
1084 if ((status & tmp) == 0) {
1085 if (status & TXSTAT_COLL)
1086 ifp->if_collisions++;
1087
1088 if (status & TXSTAT_UFLOW) {
1089 printf("%s: transmit underflow\n", sc->sc_dev.dv_xname);
1090 ifp->if_oerrors++;
1091 }
1092
1093 if (status & TXSTAT_16COLL) {
1094 printf("%s: max collisions reached\n",
1095 sc->sc_dev.dv_xname);
1096 ifp->if_oerrors++;
1097 ifp->if_collisions += 16;
1098 }
1099 }
1100
1101 /* prevtx now points to next xmit packet not yet finished */
1102 if (sc->hpc_regs->revision == 3)
1103 sq_txring_hpc3(sc);
1104 else
1105 sq_txring_hpc1(sc);
1106
1107 /* If we have buffers free, let upper layers know */
1108 if (sc->sc_nfreetx > 0)
1109 ifp->if_flags &= ~IFF_OACTIVE;
1110
1111 /* If all packets have left the coop, cancel watchdog */
1112 if (sc->sc_nfreetx == SQ_NTXDESC)
1113 ifp->if_timer = 0;
1114
1115 SQ_TRACE(SQ_TXINTR_EXIT, sc, sc->sc_prevtx, status);
1116 sq_start(ifp);
1117
1118 return 1;
1119 }
1120
1121 /*
1122 * Reclaim used transmit descriptors and restart the transmit DMA
1123 * engine if necessary.
1124 */
1125 static void
1126 sq_txring_hpc1(struct sq_softc *sc)
1127 {
1128 /*
1129 * HPC1 doesn't tag transmitted descriptors, however,
1130 * the NDBP register points to the next descriptor that
1131 * has not yet been processed. If DMA is not in progress,
1132 * we can safely reclaim all descriptors up to NDBP, and,
1133 * if necessary, restart DMA at NDBP. Otherwise, if DMA
1134 * is active, we can only safely reclaim up to CBP.
1135 *
1136 * For now, we'll only reclaim on inactive DMA and assume
1137 * that a sufficiently large ring keeps us out of trouble.
1138 */
1139 u_int32_t reclaimto, status;
1140 int reclaimall, i = sc->sc_prevtx;
1141 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1142
1143 status = sq_hpc_read(sc, HPC1_ENETX_CTL);
1144 if (status & HPC1_ENETX_CTL_ACTIVE) {
1145 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
1146 return;
1147 } else
1148 reclaimto = sq_hpc_read(sc, HPC1_ENETX_NDBP);
1149
1150 if (sc->sc_nfreetx == 0 && SQ_CDTXADDR(sc, i) == reclaimto)
1151 reclaimall = 1;
1152 else
1153 reclaimall = 0;
1154
1155 while (sc->sc_nfreetx < SQ_NTXDESC) {
1156 if (SQ_CDTXADDR(sc, i) == reclaimto && !reclaimall)
1157 break;
1158
1159 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
1160 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1161
1162 /* Sync the packet data, unload DMA map, free mbuf */
1163 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
1164 sc->sc_txmap[i]->dm_mapsize,
1165 BUS_DMASYNC_POSTWRITE);
1166 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
1167 m_freem(sc->sc_txmbuf[i]);
1168 sc->sc_txmbuf[i] = NULL;
1169
1170 ifp->if_opackets++;
1171 sc->sc_nfreetx++;
1172
1173 SQ_TRACE(SQ_DONE_DMA, sc, i, status);
1174
1175 i = SQ_NEXTTX(i);
1176 }
1177
1178 if (sc->sc_nfreetx < SQ_NTXDESC) {
1179 SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
1180
1181 KASSERT(reclaimto == SQ_CDTXADDR(sc, i));
1182
1183 sq_hpc_write(sc, HPC1_ENETX_CFXBP, reclaimto);
1184 sq_hpc_write(sc, HPC1_ENETX_CBP, reclaimto);
1185
1186 /* Kick DMA channel into life */
1187 sq_hpc_write(sc, HPC1_ENETX_CTL, HPC1_ENETX_CTL_ACTIVE);
1188
1189 /*
1190 * Set a watchdog timer in case the chip
1191 * flakes out.
1192 */
1193 ifp->if_timer = 5;
1194 }
1195
1196 sc->sc_prevtx = i;
1197 }
1198
1199 /*
1200 * Reclaim used transmit descriptors and restart the transmit DMA
1201 * engine if necessary.
1202 */
1203 static void
1204 sq_txring_hpc3(struct sq_softc *sc)
1205 {
1206 /*
1207 * HPC3 tags descriptors with a bit once they've been
1208 * transmitted. We need only free each XMITDONE'd
1209 * descriptor, and restart the DMA engine if any
1210 * descriptors are left over.
1211 */
1212 int i;
1213 u_int32_t status = 0;
1214 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1215
1216 i = sc->sc_prevtx;
1217 while (sc->sc_nfreetx < SQ_NTXDESC) {
1218 /*
1219 * Check status first so we don't end up with a case of
1220 * the buffer not being finished while the DMA channel
1221 * has gone idle.
1222 */
1223 status = sq_hpc_read(sc, HPC3_ENETX_CTL);
1224
1225 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
1226 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1227
1228 /* Check for used descriptor and restart DMA chain if needed */
1229 if (!(sc->sc_txdesc[i].hpc3_hdd_ctl & HPC3_HDD_CTL_XMITDONE)) {
1230 if ((status & HPC3_ENETX_CTL_ACTIVE) == 0) {
1231 SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
1232
1233 sq_hpc_write(sc, HPC3_ENETX_NDBP,
1234 SQ_CDTXADDR(sc, i));
1235
1236 /* Kick DMA channel into life */
1237 sq_hpc_write(sc, HPC3_ENETX_CTL,
1238 HPC3_ENETX_CTL_ACTIVE);
1239
1240 /*
1241 * Set a watchdog timer in case the chip
1242 * flakes out.
1243 */
1244 ifp->if_timer = 5;
1245 } else
1246 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
1247 break;
1248 }
1249
1250 /* Sync the packet data, unload DMA map, free mbuf */
1251 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
1252 sc->sc_txmap[i]->dm_mapsize,
1253 BUS_DMASYNC_POSTWRITE);
1254 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
1255 m_freem(sc->sc_txmbuf[i]);
1256 sc->sc_txmbuf[i] = NULL;
1257
1258 ifp->if_opackets++;
1259 sc->sc_nfreetx++;
1260
1261 SQ_TRACE(SQ_DONE_DMA, sc, i, status);
1262 i = SQ_NEXTTX(i);
1263 }
1264
1265 sc->sc_prevtx = i;
1266 }
1267
1268 void
1269 sq_reset(struct sq_softc *sc)
1270 {
1271 /* Stop HPC dma channels */
1272 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl, 0);
1273 sq_hpc_write(sc, sc->hpc_regs->enetx_ctl, 0);
1274
1275 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 3);
1276 delay(20);
1277 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 0);
1278 }
1279
1280 /* sq_add_rxbuf: Add a receive buffer to the indicated descriptor. */
1281 int
1282 sq_add_rxbuf(struct sq_softc *sc, int idx)
1283 {
1284 int err;
1285 struct mbuf *m;
1286
1287 MGETHDR(m, M_DONTWAIT, MT_DATA);
1288 if (m == NULL)
1289 return (ENOBUFS);
1290
1291 MCLGET(m, M_DONTWAIT);
1292 if ((m->m_flags & M_EXT) == 0) {
1293 m_freem(m);
1294 return (ENOBUFS);
1295 }
1296
1297 if (sc->sc_rxmbuf[idx] != NULL)
1298 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]);
1299
1300 sc->sc_rxmbuf[idx] = m;
1301
1302 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx],
1303 m->m_ext.ext_buf, m->m_ext.ext_size,
1304 NULL, BUS_DMA_NOWAIT)) != 0) {
1305 printf("%s: can't load rx DMA map %d, error = %d\n",
1306 sc->sc_dev.dv_xname, idx, err);
1307 panic("sq_add_rxbuf"); /* XXX */
1308 }
1309
1310 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx], 0,
1311 sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD);
1312
1313 SQ_INIT_RXDESC(sc, idx);
1314
1315 return 0;
1316 }
1317
1318 void
1319 sq_dump_buffer(paddr_t addr, psize_t len)
1320 {
1321 u_int i;
1322 u_char* physaddr = (char*) MIPS_PHYS_TO_KSEG1(addr);
1323
1324 if (len == 0)
1325 return;
1326
1327 printf("%p: ", physaddr);
1328
1329 for (i = 0; i < len; i++) {
1330 printf("%02x ", *(physaddr + i) & 0xff);
1331 if ((i % 16) == 15 && i != len - 1)
1332 printf("\n%p: ", physaddr + i);
1333 }
1334
1335 printf("\n");
1336 }
1337
1338 void
1339 enaddr_aton(const char* str, u_int8_t* eaddr)
1340 {
1341 int i;
1342 char c;
1343
1344 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1345 if (*str == ':')
1346 str++;
1347
1348 c = *str++;
1349 if (isdigit(c)) {
1350 eaddr[i] = (c - '0');
1351 } else if (isxdigit(c)) {
1352 eaddr[i] = (toupper(c) + 10 - 'A');
1353 }
1354
1355 c = *str++;
1356 if (isdigit(c)) {
1357 eaddr[i] = (eaddr[i] << 4) | (c - '0');
1358 } else if (isxdigit(c)) {
1359 eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A');
1360 }
1361 }
1362 }
1363