if_sq.c revision 1.49 1 /* $NetBSD: if_sq.c,v 1.49 2018/06/26 06:47:59 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001 Rafal K. Boni
5 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * Portions of this code are derived from software contributed to The
9 * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: if_sq.c,v 1.49 2018/06/26 06:47:59 msaitoh Exp $");
37
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/device.h>
42 #include <sys/callout.h>
43 #include <sys/mbuf.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 #include <sys/socket.h>
47 #include <sys/ioctl.h>
48 #include <sys/errno.h>
49 #include <sys/syslog.h>
50
51 #include <uvm/uvm_extern.h>
52
53 #include <machine/endian.h>
54
55 #include <net/if.h>
56 #include <net/if_dl.h>
57 #include <net/if_media.h>
58 #include <net/if_ether.h>
59
60 #include <net/bpf.h>
61
62 #include <sys/bus.h>
63 #include <machine/intr.h>
64 #include <machine/sysconf.h>
65
66 #include <dev/ic/seeq8003reg.h>
67
68 #include <sgimips/hpc/sqvar.h>
69 #include <sgimips/hpc/hpcvar.h>
70 #include <sgimips/hpc/hpcreg.h>
71
72 #include <dev/arcbios/arcbios.h>
73 #include <dev/arcbios/arcbiosvar.h>
74
75 #define static
76
77 /*
78 * Short TODO list:
79 * (1) Do counters for bad-RX packets.
80 * (2) Allow multi-segment transmits, instead of copying to a single,
81 * contiguous mbuf.
82 * (3) Verify sq_stop() turns off enough stuff; I was still getting
83 * seeq interrupts after sq_stop().
84 * (4) Implement EDLC modes: especially packet auto-pad and simplex
85 * mode.
86 * (5) Should the driver filter out its own transmissions in non-EDLC
87 * mode?
88 * (6) Multicast support -- multicast filter, address management, ...
89 * (7) Deal with RB0 (recv buffer overflow) on reception. Will need
90 * to figure out if RB0 is read-only as stated in one spot in the
91 * HPC spec or read-write (ie, is the 'write a one to clear it')
92 * the correct thing?
93 */
94
95 #if defined(SQ_DEBUG)
96 int sq_debug = 0;
97 #define SQ_DPRINTF(x) if (sq_debug) printf x
98 #else
99 #define SQ_DPRINTF(x)
100 #endif
101
102 static int sq_match(device_t, cfdata_t, void *);
103 static void sq_attach(device_t, device_t, void *);
104 static int sq_init(struct ifnet *);
105 static void sq_start(struct ifnet *);
106 static void sq_stop(struct ifnet *, int);
107 static void sq_watchdog(struct ifnet *);
108 static int sq_ioctl(struct ifnet *, u_long, void *);
109
110 static void sq_set_filter(struct sq_softc *);
111 static int sq_intr(void *);
112 static int sq_rxintr(struct sq_softc *);
113 static int sq_txintr(struct sq_softc *);
114 static void sq_txring_hpc1(struct sq_softc *);
115 static void sq_txring_hpc3(struct sq_softc *);
116 static void sq_reset(struct sq_softc *);
117 static int sq_add_rxbuf(struct sq_softc *, int);
118 static void sq_dump_buffer(paddr_t addr, psize_t len);
119 static void sq_trace_dump(struct sq_softc *);
120
121 CFATTACH_DECL_NEW(sq, sizeof(struct sq_softc),
122 sq_match, sq_attach, NULL, NULL);
123
124 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
125
126 #define sq_seeq_read(sc, off) \
127 bus_space_read_1(sc->sc_regt, sc->sc_regh, (off << 2) + 3)
128 #define sq_seeq_write(sc, off, val) \
129 bus_space_write_1(sc->sc_regt, sc->sc_regh, (off << 2) + 3, val)
130
131 #define sq_hpc_read(sc, off) \
132 bus_space_read_4(sc->sc_hpct, sc->sc_hpch, off)
133 #define sq_hpc_write(sc, off, val) \
134 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, off, val)
135
136 /* MAC address offset for non-onboard implementations */
137 #define SQ_HPC_EEPROM_ENADDR 250
138
139 #define SGI_OUI_0 0x08
140 #define SGI_OUI_1 0x00
141 #define SGI_OUI_2 0x69
142
143 static int
144 sq_match(device_t parent, cfdata_t cf, void *aux)
145 {
146 struct hpc_attach_args *ha = aux;
147
148 if (strcmp(ha->ha_name, cf->cf_name) == 0) {
149 vaddr_t reset, txstat;
150
151 reset = MIPS_PHYS_TO_KSEG1(ha->ha_sh +
152 ha->ha_dmaoff + ha->hpc_regs->enetr_reset);
153 txstat = MIPS_PHYS_TO_KSEG1(ha->ha_sh +
154 ha->ha_devoff + (SEEQ_TXSTAT << 2));
155
156 if (platform.badaddr((void *)reset, sizeof(reset)))
157 return 0;
158
159 *(volatile uint32_t *)reset = 0x1;
160 delay(20);
161 *(volatile uint32_t *)reset = 0x0;
162
163 if (platform.badaddr((void *)txstat, sizeof(txstat)))
164 return 0;
165
166 if ((*(volatile uint32_t *)txstat & 0xff) == TXSTAT_OLDNEW)
167 return 1;
168 }
169
170 return 0;
171 }
172
173 static void
174 sq_attach(device_t parent, device_t self, void *aux)
175 {
176 int i, err;
177 const char* macaddr;
178 struct sq_softc *sc = device_private(self);
179 struct hpc_attach_args *haa = aux;
180 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
181
182 sc->sc_dev = self;
183 sc->sc_hpct = haa->ha_st;
184 sc->hpc_regs = haa->hpc_regs; /* HPC register definitions */
185
186 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
187 haa->ha_dmaoff, sc->hpc_regs->enet_regs_size,
188 &sc->sc_hpch)) != 0) {
189 printf(": unable to map HPC DMA registers, error = %d\n", err);
190 goto fail_0;
191 }
192
193 sc->sc_regt = haa->ha_st;
194 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
195 haa->ha_devoff, sc->hpc_regs->enet_devregs_size,
196 &sc->sc_regh)) != 0) {
197 printf(": unable to map Seeq registers, error = %d\n", err);
198 goto fail_0;
199 }
200
201 sc->sc_dmat = haa->ha_dmat;
202
203 if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control),
204 PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg, 1, &sc->sc_ncdseg,
205 BUS_DMA_NOWAIT)) != 0) {
206 printf(": unable to allocate control data, error = %d\n", err);
207 goto fail_0;
208 }
209
210 if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg,
211 sizeof(struct sq_control), (void **)&sc->sc_control,
212 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
213 printf(": unable to map control data, error = %d\n", err);
214 goto fail_1;
215 }
216
217 if ((err = bus_dmamap_create(sc->sc_dmat,
218 sizeof(struct sq_control), 1, sizeof(struct sq_control), PAGE_SIZE,
219 BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) {
220 printf(": unable to create DMA map for control data, error "
221 "= %d\n", err);
222 goto fail_2;
223 }
224
225 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap,
226 sc->sc_control, sizeof(struct sq_control), NULL,
227 BUS_DMA_NOWAIT)) != 0) {
228 printf(": unable to load DMA map for control data, error "
229 "= %d\n", err);
230 goto fail_3;
231 }
232
233 memset(sc->sc_control, 0, sizeof(struct sq_control));
234
235 /* Create transmit buffer DMA maps */
236 for (i = 0; i < SQ_NTXDESC; i++) {
237 if ((err = bus_dmamap_create(sc->sc_dmat,
238 MCLBYTES, 1, MCLBYTES, 0,
239 BUS_DMA_NOWAIT, &sc->sc_txmap[i])) != 0) {
240 printf(": unable to create tx DMA map %d, error = %d\n",
241 i, err);
242 goto fail_4;
243 }
244 }
245
246 /* Create receive buffer DMA maps */
247 for (i = 0; i < SQ_NRXDESC; i++) {
248 if ((err = bus_dmamap_create(sc->sc_dmat,
249 MCLBYTES, 1, MCLBYTES, 0,
250 BUS_DMA_NOWAIT, &sc->sc_rxmap[i])) != 0) {
251 printf(": unable to create rx DMA map %d, error = %d\n",
252 i, err);
253 goto fail_5;
254 }
255 }
256
257 /* Pre-allocate the receive buffers. */
258 for (i = 0; i < SQ_NRXDESC; i++) {
259 if ((err = sq_add_rxbuf(sc, i)) != 0) {
260 printf(": unable to allocate or map rx buffer %d\n,"
261 " error = %d\n", i, err);
262 goto fail_6;
263 }
264 }
265
266 memcpy(sc->sc_enaddr, &haa->hpc_eeprom[SQ_HPC_EEPROM_ENADDR],
267 ETHER_ADDR_LEN);
268
269 /*
270 * If our mac address is bogus, obtain it from ARCBIOS. This will
271 * be true of the onboard HPC3 on IP22, since there is no eeprom,
272 * but rather the DS1386 RTC's battery-backed ram is used.
273 */
274 if (sc->sc_enaddr[0] != SGI_OUI_0 ||
275 sc->sc_enaddr[1] != SGI_OUI_1 ||
276 sc->sc_enaddr[2] != SGI_OUI_2) {
277 macaddr = arcbios_GetEnvironmentVariable("eaddr");
278 if (macaddr == NULL) {
279 printf(": unable to get MAC address!\n");
280 goto fail_6;
281 }
282 ether_aton_r(sc->sc_enaddr, sizeof(sc->sc_enaddr), macaddr);
283 }
284
285 evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL,
286 device_xname(self), "intr");
287
288 if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) {
289 printf(": unable to establish interrupt!\n");
290 goto fail_6;
291 }
292
293 /* Reset the chip to a known state. */
294 sq_reset(sc);
295
296 /*
297 * Determine if we're an 8003 or 80c03 by setting the first
298 * MAC address register to non-zero, and then reading it back.
299 * If it's zero, we have an 80c03, because we will have read
300 * the TxCollLSB register.
301 */
302 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0xa5);
303 if (sq_seeq_read(sc, SEEQ_TXCOLLS0) == 0)
304 sc->sc_type = SQ_TYPE_80C03;
305 else
306 sc->sc_type = SQ_TYPE_8003;
307 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0x00);
308
309 printf(": SGI Seeq %s\n",
310 sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003");
311
312 printf("%s: Ethernet address %s\n",
313 device_xname(self), ether_sprintf(sc->sc_enaddr));
314
315 strcpy(ifp->if_xname, device_xname(self));
316 ifp->if_softc = sc;
317 ifp->if_mtu = ETHERMTU;
318 ifp->if_init = sq_init;
319 ifp->if_stop = sq_stop;
320 ifp->if_start = sq_start;
321 ifp->if_ioctl = sq_ioctl;
322 ifp->if_watchdog = sq_watchdog;
323 ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST;
324 IFQ_SET_READY(&ifp->if_snd);
325
326 if_attach(ifp);
327 if_deferred_start_init(ifp, NULL);
328 ether_ifattach(ifp, sc->sc_enaddr);
329
330 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
331 /* Done! */
332 return;
333
334 /*
335 * Free any resources we've allocated during the failed attach
336 * attempt. Do this in reverse order and fall through.
337 */
338 fail_6:
339 for (i = 0; i < SQ_NRXDESC; i++) {
340 if (sc->sc_rxmbuf[i] != NULL) {
341 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]);
342 m_freem(sc->sc_rxmbuf[i]);
343 }
344 }
345 fail_5:
346 for (i = 0; i < SQ_NRXDESC; i++) {
347 if (sc->sc_rxmap[i] != NULL)
348 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]);
349 }
350 fail_4:
351 for (i = 0; i < SQ_NTXDESC; i++) {
352 if (sc->sc_txmap[i] != NULL)
353 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]);
354 }
355 bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap);
356 fail_3:
357 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap);
358 fail_2:
359 bus_dmamem_unmap(sc->sc_dmat,
360 (void *)sc->sc_control, sizeof(struct sq_control));
361 fail_1:
362 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg);
363 fail_0:
364 return;
365 }
366
367 /* Set up data to get the interface up and running. */
368 int
369 sq_init(struct ifnet *ifp)
370 {
371 int i;
372 struct sq_softc *sc = ifp->if_softc;
373
374 /* Cancel any in-progress I/O */
375 sq_stop(ifp, 0);
376
377 sc->sc_nextrx = 0;
378
379 sc->sc_nfreetx = SQ_NTXDESC;
380 sc->sc_nexttx = sc->sc_prevtx = 0;
381
382 SQ_TRACE(SQ_RESET, sc, 0, 0);
383
384 /* Set into 8003 mode, bank 0 to program ethernet address */
385 sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_BANK0);
386
387 /* Now write the address */
388 for (i = 0; i < ETHER_ADDR_LEN; i++)
389 sq_seeq_write(sc, i, sc->sc_enaddr[i]);
390
391 sc->sc_rxcmd =
392 RXCMD_IE_CRC |
393 RXCMD_IE_DRIB |
394 RXCMD_IE_SHORT |
395 RXCMD_IE_END |
396 RXCMD_IE_GOOD;
397
398 /*
399 * Set the receive filter -- this will add some bits to the
400 * prototype RXCMD register. Do this before setting the
401 * transmit config register, since we might need to switch
402 * banks.
403 */
404 sq_set_filter(sc);
405
406 /* Set up Seeq transmit command register */
407 sq_seeq_write(sc, SEEQ_TXCMD,
408 TXCMD_IE_UFLOW |
409 TXCMD_IE_COLL |
410 TXCMD_IE_16COLL |
411 TXCMD_IE_GOOD);
412
413 /* Now write the receive command register. */
414 sq_seeq_write(sc, SEEQ_RXCMD, sc->sc_rxcmd);
415
416 /*
417 * Set up HPC ethernet PIO and DMA configurations.
418 *
419 * The PROM appears to do most of this for the onboard HPC3, but
420 * not for the Challenge S's IOPLUS chip. We copy how the onboard
421 * chip is configured and assume that it's correct for both.
422 */
423 if (sc->hpc_regs->revision == 3) {
424 uint32_t dmareg, pioreg;
425
426 pioreg =
427 HPC3_ENETR_PIOCFG_P1(1) |
428 HPC3_ENETR_PIOCFG_P2(6) |
429 HPC3_ENETR_PIOCFG_P3(1);
430
431 dmareg =
432 HPC3_ENETR_DMACFG_D1(6) |
433 HPC3_ENETR_DMACFG_D2(2) |
434 HPC3_ENETR_DMACFG_D3(0) |
435 HPC3_ENETR_DMACFG_FIX_RXDC |
436 HPC3_ENETR_DMACFG_FIX_INTR |
437 HPC3_ENETR_DMACFG_FIX_EOP |
438 HPC3_ENETR_DMACFG_TIMEOUT;
439
440 sq_hpc_write(sc, HPC3_ENETR_PIOCFG, pioreg);
441 sq_hpc_write(sc, HPC3_ENETR_DMACFG, dmareg);
442 }
443
444 /* Pass the start of the receive ring to the HPC */
445 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc, 0));
446
447 /* And turn on the HPC ethernet receive channel */
448 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl,
449 sc->hpc_regs->enetr_ctl_active);
450
451 /*
452 * Turn off delayed receive interrupts on HPC1.
453 * (see Hollywood HPC Specification 2.1.4.3)
454 */
455 if (sc->hpc_regs->revision != 3)
456 sq_hpc_write(sc, HPC1_ENET_INTDELAY, HPC1_ENET_INTDELAY_OFF);
457
458 ifp->if_flags |= IFF_RUNNING;
459 ifp->if_flags &= ~IFF_OACTIVE;
460
461 return 0;
462 }
463
464 static void
465 sq_set_filter(struct sq_softc *sc)
466 {
467 struct ethercom *ec = &sc->sc_ethercom;
468 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
469 struct ether_multi *enm;
470 struct ether_multistep step;
471
472 /*
473 * Check for promiscuous mode. Also implies
474 * all-multicast.
475 */
476 if (ifp->if_flags & IFF_PROMISC) {
477 sc->sc_rxcmd |= RXCMD_REC_ALL;
478 ifp->if_flags |= IFF_ALLMULTI;
479 return;
480 }
481
482 /*
483 * The 8003 has no hash table. If we have any multicast
484 * addresses on the list, enable reception of all multicast
485 * frames.
486 *
487 * XXX The 80c03 has a hash table. We should use it.
488 */
489
490 ETHER_FIRST_MULTI(step, ec, enm);
491
492 if (enm == NULL) {
493 sc->sc_rxcmd &= ~RXCMD_REC_MASK;
494 sc->sc_rxcmd |= RXCMD_REC_BROAD;
495
496 ifp->if_flags &= ~IFF_ALLMULTI;
497 return;
498 }
499
500 sc->sc_rxcmd |= RXCMD_REC_MULTI;
501 ifp->if_flags |= IFF_ALLMULTI;
502 }
503
504 int
505 sq_ioctl(struct ifnet *ifp, u_long cmd, void *data)
506 {
507 int s, error = 0;
508
509 SQ_TRACE(SQ_IOCTL, (struct sq_softc *)ifp->if_softc, 0, 0);
510
511 s = splnet();
512
513 error = ether_ioctl(ifp, cmd, data);
514 if (error == ENETRESET) {
515 /*
516 * Multicast list has changed; set the hardware filter
517 * accordingly.
518 */
519 if (ifp->if_flags & IFF_RUNNING)
520 error = sq_init(ifp);
521 else
522 error = 0;
523 }
524
525 splx(s);
526 return error;
527 }
528
529 void
530 sq_start(struct ifnet *ifp)
531 {
532 struct sq_softc *sc = ifp->if_softc;
533 uint32_t status;
534 struct mbuf *m0, *m;
535 bus_dmamap_t dmamap;
536 int err, totlen, nexttx, firsttx, lasttx = -1, ofree, seg;
537
538 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
539 return;
540
541 /*
542 * Remember the previous number of free descriptors and
543 * the first descriptor we'll use.
544 */
545 ofree = sc->sc_nfreetx;
546 firsttx = sc->sc_nexttx;
547
548 /*
549 * Loop through the send queue, setting up transmit descriptors
550 * until we drain the queue, or use up all available transmit
551 * descriptors.
552 */
553 while (sc->sc_nfreetx != 0) {
554 /*
555 * Grab a packet off the queue.
556 */
557 IFQ_POLL(&ifp->if_snd, m0);
558 if (m0 == NULL)
559 break;
560 m = NULL;
561
562 dmamap = sc->sc_txmap[sc->sc_nexttx];
563
564 /*
565 * Load the DMA map. If this fails, the packet either
566 * didn't fit in the alloted number of segments, or we were
567 * short on resources. In this case, we'll copy and try
568 * again.
569 * Also copy it if we need to pad, so that we are sure there
570 * is room for the pad buffer.
571 * XXX the right way of doing this is to use a static buffer
572 * for padding and adding it to the transmit descriptor (see
573 * sys/dev/pci/if_tl.c for example). We can't do this here yet
574 * because we can't send packets with more than one fragment.
575 */
576 if (m0->m_pkthdr.len < ETHER_PAD_LEN ||
577 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
578 BUS_DMA_NOWAIT) != 0) {
579 MGETHDR(m, M_DONTWAIT, MT_DATA);
580 if (m == NULL) {
581 printf("%s: unable to allocate Tx mbuf\n",
582 device_xname(sc->sc_dev));
583 break;
584 }
585 if (m0->m_pkthdr.len > MHLEN) {
586 MCLGET(m, M_DONTWAIT);
587 if ((m->m_flags & M_EXT) == 0) {
588 printf("%s: unable to allocate Tx "
589 "cluster\n",
590 device_xname(sc->sc_dev));
591 m_freem(m);
592 break;
593 }
594 }
595
596 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
597 if (m0->m_pkthdr.len < ETHER_PAD_LEN) {
598 memset(mtod(m, char *) + m0->m_pkthdr.len, 0,
599 ETHER_PAD_LEN - m0->m_pkthdr.len);
600 m->m_pkthdr.len = m->m_len = ETHER_PAD_LEN;
601 } else
602 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
603
604 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
605 m, BUS_DMA_NOWAIT)) != 0) {
606 printf("%s: unable to load Tx buffer, "
607 "error = %d\n",
608 device_xname(sc->sc_dev), err);
609 break;
610 }
611 }
612
613 /*
614 * Ensure we have enough descriptors free to describe
615 * the packet.
616 */
617 if (dmamap->dm_nsegs > sc->sc_nfreetx) {
618 /*
619 * Not enough free descriptors to transmit this
620 * packet. We haven't committed to anything yet,
621 * so just unload the DMA map, put the packet
622 * back on the queue, and punt. Notify the upper
623 * layer that there are no more slots left.
624 *
625 * XXX We could allocate an mbuf and copy, but
626 * XXX it is worth it?
627 */
628 ifp->if_flags |= IFF_OACTIVE;
629 bus_dmamap_unload(sc->sc_dmat, dmamap);
630 if (m != NULL)
631 m_freem(m);
632 break;
633 }
634
635 IFQ_DEQUEUE(&ifp->if_snd, m0);
636 /*
637 * Pass the packet to any BPF listeners.
638 */
639 bpf_mtap(ifp, m0, BPF_D_OUT);
640 if (m != NULL) {
641 m_freem(m0);
642 m0 = m;
643 }
644
645 /*
646 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
647 */
648
649 SQ_TRACE(SQ_ENQUEUE, sc, sc->sc_nexttx, 0);
650
651 /* Sync the DMA map. */
652 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
653 BUS_DMASYNC_PREWRITE);
654
655 /*
656 * Initialize the transmit descriptors.
657 */
658 for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0;
659 seg < dmamap->dm_nsegs;
660 seg++, nexttx = SQ_NEXTTX(nexttx)) {
661 if (sc->hpc_regs->revision == 3) {
662 sc->sc_txdesc[nexttx].hpc3_hdd_bufptr =
663 dmamap->dm_segs[seg].ds_addr;
664 sc->sc_txdesc[nexttx].hpc3_hdd_ctl =
665 dmamap->dm_segs[seg].ds_len;
666 } else {
667 sc->sc_txdesc[nexttx].hpc1_hdd_bufptr =
668 dmamap->dm_segs[seg].ds_addr;
669 sc->sc_txdesc[nexttx].hpc1_hdd_ctl =
670 dmamap->dm_segs[seg].ds_len;
671 }
672 sc->sc_txdesc[nexttx].hdd_descptr =
673 SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx));
674 lasttx = nexttx;
675 totlen += dmamap->dm_segs[seg].ds_len;
676 }
677
678 /* Last descriptor gets end-of-packet */
679 KASSERT(lasttx != -1);
680 if (sc->hpc_regs->revision == 3)
681 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |=
682 HPC3_HDD_CTL_EOPACKET;
683 else
684 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |=
685 HPC1_HDD_CTL_EOPACKET;
686
687 SQ_DPRINTF(("%s: transmit %d-%d, len %d\n",
688 device_xname(sc->sc_dev), sc->sc_nexttx, lasttx, totlen));
689
690 if (ifp->if_flags & IFF_DEBUG) {
691 printf(" transmit chain:\n");
692 for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) {
693 printf(" descriptor %d:\n", seg);
694 printf(" hdd_bufptr: 0x%08x\n",
695 (sc->hpc_regs->revision == 3) ?
696 sc->sc_txdesc[seg].hpc3_hdd_bufptr :
697 sc->sc_txdesc[seg].hpc1_hdd_bufptr);
698 printf(" hdd_ctl: 0x%08x\n",
699 (sc->hpc_regs->revision == 3) ?
700 sc->sc_txdesc[seg].hpc3_hdd_ctl:
701 sc->sc_txdesc[seg].hpc1_hdd_ctl);
702 printf(" hdd_descptr: 0x%08x\n",
703 sc->sc_txdesc[seg].hdd_descptr);
704
705 if (seg == lasttx)
706 break;
707 }
708 }
709
710 /* Sync the descriptors we're using. */
711 SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs,
712 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
713
714 /* Store a pointer to the packet so we can free it later */
715 sc->sc_txmbuf[sc->sc_nexttx] = m0;
716
717 /* Advance the tx pointer. */
718 sc->sc_nfreetx -= dmamap->dm_nsegs;
719 sc->sc_nexttx = nexttx;
720 }
721
722 /* All transmit descriptors used up, let upper layers know */
723 if (sc->sc_nfreetx == 0)
724 ifp->if_flags |= IFF_OACTIVE;
725
726 if (sc->sc_nfreetx != ofree) {
727 SQ_DPRINTF(("%s: %d packets enqueued, first %d, INTR on %d\n",
728 device_xname(sc->sc_dev), lasttx - firsttx + 1,
729 firsttx, lasttx));
730
731 /*
732 * Cause a transmit interrupt to happen on the
733 * last packet we enqueued, mark it as the last
734 * descriptor.
735 *
736 * HPC1_HDD_CTL_INTR will generate an interrupt on
737 * HPC1. HPC3 requires HPC3_HDD_CTL_EOPACKET in
738 * addition to HPC3_HDD_CTL_INTR to interrupt.
739 */
740 KASSERT(lasttx != -1);
741 if (sc->hpc_regs->revision == 3) {
742 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |=
743 HPC3_HDD_CTL_INTR | HPC3_HDD_CTL_EOCHAIN;
744 } else {
745 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |= HPC1_HDD_CTL_INTR;
746 sc->sc_txdesc[lasttx].hpc1_hdd_bufptr |=
747 HPC1_HDD_CTL_EOCHAIN;
748 }
749
750 SQ_CDTXSYNC(sc, lasttx, 1,
751 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
752
753 /*
754 * There is a potential race condition here if the HPC
755 * DMA channel is active and we try and either update
756 * the 'next descriptor' pointer in the HPC PIO space
757 * or the 'next descriptor' pointer in a previous desc-
758 * riptor.
759 *
760 * To avoid this, if the channel is active, we rely on
761 * the transmit interrupt routine noticing that there
762 * are more packets to send and restarting the HPC DMA
763 * engine, rather than mucking with the DMA state here.
764 */
765 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl);
766
767 if ((status & sc->hpc_regs->enetx_ctl_active) != 0) {
768 SQ_TRACE(SQ_ADD_TO_DMA, sc, firsttx, status);
769
770 /*
771 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and
772 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN
773 */
774 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc3_hdd_ctl &=
775 ~HPC3_HDD_CTL_EOCHAIN;
776
777 if (sc->hpc_regs->revision != 3)
778 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc1_hdd_ctl
779 &= ~HPC1_HDD_CTL_INTR;
780
781 SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx), 1,
782 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
783 } else if (sc->hpc_regs->revision == 3) {
784 SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
785
786 sq_hpc_write(sc, HPC3_ENETX_NDBP, SQ_CDTXADDR(sc,
787 firsttx));
788
789 /* Kick DMA channel into life */
790 sq_hpc_write(sc, HPC3_ENETX_CTL, HPC3_ENETX_CTL_ACTIVE);
791 } else {
792 /*
793 * In the HPC1 case where transmit DMA is
794 * inactive, we can either kick off if
795 * the ring was previously empty, or call
796 * our transmit interrupt handler to
797 * figure out if the ring stopped short
798 * and restart at the right place.
799 */
800 if (ofree == SQ_NTXDESC) {
801 SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
802
803 sq_hpc_write(sc, HPC1_ENETX_NDBP,
804 SQ_CDTXADDR(sc, firsttx));
805 sq_hpc_write(sc, HPC1_ENETX_CFXBP,
806 SQ_CDTXADDR(sc, firsttx));
807 sq_hpc_write(sc, HPC1_ENETX_CBP,
808 SQ_CDTXADDR(sc, firsttx));
809
810 /* Kick DMA channel into life */
811 sq_hpc_write(sc, HPC1_ENETX_CTL,
812 HPC1_ENETX_CTL_ACTIVE);
813 } else
814 sq_txring_hpc1(sc);
815 }
816
817 /* Set a watchdog timer in case the chip flakes out. */
818 ifp->if_timer = 5;
819 }
820 }
821
822 void
823 sq_stop(struct ifnet *ifp, int disable)
824 {
825 int i;
826 struct sq_softc *sc = ifp->if_softc;
827
828 for (i = 0; i < SQ_NTXDESC; i++) {
829 if (sc->sc_txmbuf[i] != NULL) {
830 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
831 m_freem(sc->sc_txmbuf[i]);
832 sc->sc_txmbuf[i] = NULL;
833 }
834 }
835
836 /* Clear Seeq transmit/receive command registers */
837 sq_seeq_write(sc, SEEQ_TXCMD, 0);
838 sq_seeq_write(sc, SEEQ_RXCMD, 0);
839
840 sq_reset(sc);
841
842 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
843 ifp->if_timer = 0;
844 }
845
846 /* Device timeout/watchdog routine. */
847 void
848 sq_watchdog(struct ifnet *ifp)
849 {
850 uint32_t status;
851 struct sq_softc *sc = ifp->if_softc;
852
853 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl);
854 log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, "
855 "status %08x)\n", device_xname(sc->sc_dev), sc->sc_prevtx,
856 sc->sc_nexttx, sc->sc_nfreetx, status);
857
858 sq_trace_dump(sc);
859
860 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace));
861 sc->sq_trace_idx = 0;
862
863 ++ifp->if_oerrors;
864
865 sq_init(ifp);
866 }
867
868 static void
869 sq_trace_dump(struct sq_softc *sc)
870 {
871 int i;
872 const char *act;
873
874 for (i = 0; i < sc->sq_trace_idx; i++) {
875 switch (sc->sq_trace[i].action) {
876 case SQ_RESET: act = "SQ_RESET"; break;
877 case SQ_ADD_TO_DMA: act = "SQ_ADD_TO_DMA"; break;
878 case SQ_START_DMA: act = "SQ_START_DMA"; break;
879 case SQ_DONE_DMA: act = "SQ_DONE_DMA"; break;
880 case SQ_RESTART_DMA: act = "SQ_RESTART_DMA"; break;
881 case SQ_TXINTR_ENTER: act = "SQ_TXINTR_ENTER"; break;
882 case SQ_TXINTR_EXIT: act = "SQ_TXINTR_EXIT"; break;
883 case SQ_TXINTR_BUSY: act = "SQ_TXINTR_BUSY"; break;
884 case SQ_IOCTL: act = "SQ_IOCTL"; break;
885 case SQ_ENQUEUE: act = "SQ_ENQUEUE"; break;
886 default: act = "UNKNOWN";
887 }
888
889 printf("%s: [%03d] action %-16s buf %03d free %03d "
890 "status %08x line %d\n", device_xname(sc->sc_dev), i, act,
891 sc->sq_trace[i].bufno, sc->sq_trace[i].freebuf,
892 sc->sq_trace[i].status, sc->sq_trace[i].line);
893 }
894 }
895
896 static int
897 sq_intr(void *arg)
898 {
899 struct sq_softc *sc = arg;
900 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
901 int handled = 0;
902 uint32_t stat;
903
904 stat = sq_hpc_read(sc, sc->hpc_regs->enetr_reset);
905
906 if ((stat & 2) == 0)
907 SQ_DPRINTF(("%s: Unexpected interrupt!\n",
908 device_xname(sc->sc_dev)));
909 else
910 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, (stat | 2));
911
912 /*
913 * If the interface isn't running, the interrupt couldn't
914 * possibly have come from us.
915 */
916 if ((ifp->if_flags & IFF_RUNNING) == 0)
917 return 0;
918
919 sc->sq_intrcnt.ev_count++;
920
921 /* Always check for received packets */
922 if (sq_rxintr(sc) != 0)
923 handled++;
924
925 /* Only handle transmit interrupts if we actually sent something */
926 if (sc->sc_nfreetx < SQ_NTXDESC) {
927 sq_txintr(sc);
928 handled++;
929 }
930
931 if (handled)
932 rnd_add_uint32(&sc->rnd_source, stat);
933 return handled;
934 }
935
936 static int
937 sq_rxintr(struct sq_softc *sc)
938 {
939 int count = 0;
940 struct mbuf* m;
941 int i, framelen;
942 uint8_t pktstat;
943 uint32_t status;
944 uint32_t ctl_reg;
945 int new_end, orig_end;
946 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
947
948 for (i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) {
949 SQ_CDRXSYNC(sc, i,
950 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
951
952 /*
953 * If this is a CPU-owned buffer, we're at the end of the list.
954 */
955 if (sc->hpc_regs->revision == 3)
956 ctl_reg =
957 sc->sc_rxdesc[i].hpc3_hdd_ctl & HPC3_HDD_CTL_OWN;
958 else
959 ctl_reg =
960 sc->sc_rxdesc[i].hpc1_hdd_ctl & HPC1_HDD_CTL_OWN;
961
962 if (ctl_reg) {
963 #if defined(SQ_DEBUG)
964 uint32_t reg;
965
966 reg = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl);
967 SQ_DPRINTF(("%s: rxintr: done at %d (ctl %08x)\n",
968 device_xname(sc->sc_dev), i, reg));
969 #endif
970 break;
971 }
972
973 count++;
974
975 m = sc->sc_rxmbuf[i];
976 framelen = m->m_ext.ext_size - 3;
977 if (sc->hpc_regs->revision == 3)
978 framelen -=
979 HPC3_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc3_hdd_ctl);
980 else
981 framelen -=
982 HPC1_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc1_hdd_ctl);
983
984 /* Now sync the actual packet data */
985 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
986 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);
987
988 pktstat = *((uint8_t *)m->m_data + framelen + 2);
989
990 if ((pktstat & RXSTAT_GOOD) == 0) {
991 ifp->if_ierrors++;
992
993 if (pktstat & RXSTAT_OFLOW)
994 printf("%s: receive FIFO overflow\n",
995 device_xname(sc->sc_dev));
996
997 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
998 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
999 SQ_INIT_RXDESC(sc, i);
1000 SQ_DPRINTF(("%s: sq_rxintr: buf %d no RXSTAT_GOOD\n",
1001 device_xname(sc->sc_dev), i));
1002 continue;
1003 }
1004
1005 if (sq_add_rxbuf(sc, i) != 0) {
1006 ifp->if_ierrors++;
1007 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
1008 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
1009 SQ_INIT_RXDESC(sc, i);
1010 SQ_DPRINTF(("%s: sq_rxintr: buf %d sq_add_rxbuf() "
1011 "failed\n", device_xname(sc->sc_dev), i));
1012 continue;
1013 }
1014
1015
1016 m->m_data += 2;
1017 m_set_rcvif(m, ifp);
1018 m->m_pkthdr.len = m->m_len = framelen;
1019
1020 SQ_DPRINTF(("%s: sq_rxintr: buf %d len %d\n",
1021 device_xname(sc->sc_dev), i, framelen));
1022
1023 if_percpuq_enqueue(ifp->if_percpuq, m);
1024 }
1025
1026
1027 /* If anything happened, move ring start/end pointers to new spot */
1028 if (i != sc->sc_nextrx) {
1029 /*
1030 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and
1031 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN
1032 */
1033
1034 new_end = SQ_PREVRX(i);
1035 sc->sc_rxdesc[new_end].hpc3_hdd_ctl |= HPC3_HDD_CTL_EOCHAIN;
1036 SQ_CDRXSYNC(sc, new_end,
1037 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1038
1039 orig_end = SQ_PREVRX(sc->sc_nextrx);
1040 sc->sc_rxdesc[orig_end].hpc3_hdd_ctl &= ~HPC3_HDD_CTL_EOCHAIN;
1041 SQ_CDRXSYNC(sc, orig_end,
1042 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1043
1044 sc->sc_nextrx = i;
1045 }
1046
1047 status = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl);
1048
1049 /* If receive channel is stopped, restart it... */
1050 if ((status & sc->hpc_regs->enetr_ctl_active) == 0) {
1051 /* Pass the start of the receive ring to the HPC */
1052 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp,
1053 SQ_CDRXADDR(sc, sc->sc_nextrx));
1054
1055 /* And turn on the HPC ethernet receive channel */
1056 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl,
1057 sc->hpc_regs->enetr_ctl_active);
1058 }
1059
1060 return count;
1061 }
1062
1063 static int
1064 sq_txintr(struct sq_softc *sc)
1065 {
1066 int shift = 0;
1067 uint32_t status, tmp;
1068 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1069
1070 if (sc->hpc_regs->revision != 3)
1071 shift = 16;
1072
1073 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl) >> shift;
1074
1075 SQ_TRACE(SQ_TXINTR_ENTER, sc, sc->sc_prevtx, status);
1076
1077 tmp = (sc->hpc_regs->enetx_ctl_active >> shift) | TXSTAT_GOOD;
1078 if ((status & tmp) == 0) {
1079 if (status & TXSTAT_COLL)
1080 ifp->if_collisions++;
1081
1082 if (status & TXSTAT_UFLOW) {
1083 printf("%s: transmit underflow\n",
1084 device_xname(sc->sc_dev));
1085 ifp->if_oerrors++;
1086 }
1087
1088 if (status & TXSTAT_16COLL) {
1089 printf("%s: max collisions reached\n",
1090 device_xname(sc->sc_dev));
1091 ifp->if_oerrors++;
1092 ifp->if_collisions += 16;
1093 }
1094 }
1095
1096 /* prevtx now points to next xmit packet not yet finished */
1097 if (sc->hpc_regs->revision == 3)
1098 sq_txring_hpc3(sc);
1099 else
1100 sq_txring_hpc1(sc);
1101
1102 /* If we have buffers free, let upper layers know */
1103 if (sc->sc_nfreetx > 0)
1104 ifp->if_flags &= ~IFF_OACTIVE;
1105
1106 /* If all packets have left the coop, cancel watchdog */
1107 if (sc->sc_nfreetx == SQ_NTXDESC)
1108 ifp->if_timer = 0;
1109
1110 SQ_TRACE(SQ_TXINTR_EXIT, sc, sc->sc_prevtx, status);
1111 if_schedule_deferred_start(ifp);
1112
1113 return 1;
1114 }
1115
1116 /*
1117 * Reclaim used transmit descriptors and restart the transmit DMA
1118 * engine if necessary.
1119 */
1120 static void
1121 sq_txring_hpc1(struct sq_softc *sc)
1122 {
1123 /*
1124 * HPC1 doesn't tag transmitted descriptors, however,
1125 * the NDBP register points to the next descriptor that
1126 * has not yet been processed. If DMA is not in progress,
1127 * we can safely reclaim all descriptors up to NDBP, and,
1128 * if necessary, restart DMA at NDBP. Otherwise, if DMA
1129 * is active, we can only safely reclaim up to CBP.
1130 *
1131 * For now, we'll only reclaim on inactive DMA and assume
1132 * that a sufficiently large ring keeps us out of trouble.
1133 */
1134 uint32_t reclaimto, status;
1135 int reclaimall, i = sc->sc_prevtx;
1136 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1137
1138 status = sq_hpc_read(sc, HPC1_ENETX_CTL);
1139 if (status & HPC1_ENETX_CTL_ACTIVE) {
1140 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
1141 return;
1142 } else
1143 reclaimto = sq_hpc_read(sc, HPC1_ENETX_NDBP);
1144
1145 if (sc->sc_nfreetx == 0 && SQ_CDTXADDR(sc, i) == reclaimto)
1146 reclaimall = 1;
1147 else
1148 reclaimall = 0;
1149
1150 while (sc->sc_nfreetx < SQ_NTXDESC) {
1151 if (SQ_CDTXADDR(sc, i) == reclaimto && !reclaimall)
1152 break;
1153
1154 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
1155 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1156
1157 /* Sync the packet data, unload DMA map, free mbuf */
1158 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i],
1159 0, sc->sc_txmap[i]->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1160 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
1161 m_freem(sc->sc_txmbuf[i]);
1162 sc->sc_txmbuf[i] = NULL;
1163
1164 ifp->if_opackets++;
1165 sc->sc_nfreetx++;
1166
1167 SQ_TRACE(SQ_DONE_DMA, sc, i, status);
1168
1169 i = SQ_NEXTTX(i);
1170 }
1171
1172 if (sc->sc_nfreetx < SQ_NTXDESC) {
1173 SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
1174
1175 KASSERT(reclaimto == SQ_CDTXADDR(sc, i));
1176
1177 sq_hpc_write(sc, HPC1_ENETX_CFXBP, reclaimto);
1178 sq_hpc_write(sc, HPC1_ENETX_CBP, reclaimto);
1179
1180 /* Kick DMA channel into life */
1181 sq_hpc_write(sc, HPC1_ENETX_CTL, HPC1_ENETX_CTL_ACTIVE);
1182
1183 /*
1184 * Set a watchdog timer in case the chip
1185 * flakes out.
1186 */
1187 ifp->if_timer = 5;
1188 }
1189
1190 sc->sc_prevtx = i;
1191 }
1192
1193 /*
1194 * Reclaim used transmit descriptors and restart the transmit DMA
1195 * engine if necessary.
1196 */
1197 static void
1198 sq_txring_hpc3(struct sq_softc *sc)
1199 {
1200 /*
1201 * HPC3 tags descriptors with a bit once they've been
1202 * transmitted. We need only free each XMITDONE'd
1203 * descriptor, and restart the DMA engine if any
1204 * descriptors are left over.
1205 */
1206 int i;
1207 uint32_t status = 0;
1208 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1209
1210 i = sc->sc_prevtx;
1211 while (sc->sc_nfreetx < SQ_NTXDESC) {
1212 /*
1213 * Check status first so we don't end up with a case of
1214 * the buffer not being finished while the DMA channel
1215 * has gone idle.
1216 */
1217 status = sq_hpc_read(sc, HPC3_ENETX_CTL);
1218
1219 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
1220 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1221
1222 /* Check for used descriptor and restart DMA chain if needed */
1223 if ((sc->sc_txdesc[i].hpc3_hdd_ctl &
1224 HPC3_HDD_CTL_XMITDONE) == 0) {
1225 if ((status & HPC3_ENETX_CTL_ACTIVE) == 0) {
1226 SQ_TRACE(SQ_RESTART_DMA, sc, i, status);
1227
1228 sq_hpc_write(sc, HPC3_ENETX_NDBP,
1229 SQ_CDTXADDR(sc, i));
1230
1231 /* Kick DMA channel into life */
1232 sq_hpc_write(sc, HPC3_ENETX_CTL,
1233 HPC3_ENETX_CTL_ACTIVE);
1234
1235 /*
1236 * Set a watchdog timer in case the chip
1237 * flakes out.
1238 */
1239 ifp->if_timer = 5;
1240 } else
1241 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
1242 break;
1243 }
1244
1245 /* Sync the packet data, unload DMA map, free mbuf */
1246 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i],
1247 0, sc->sc_txmap[i]->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1248 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
1249 m_freem(sc->sc_txmbuf[i]);
1250 sc->sc_txmbuf[i] = NULL;
1251
1252 ifp->if_opackets++;
1253 sc->sc_nfreetx++;
1254
1255 SQ_TRACE(SQ_DONE_DMA, sc, i, status);
1256 i = SQ_NEXTTX(i);
1257 }
1258
1259 sc->sc_prevtx = i;
1260 }
1261
1262 void
1263 sq_reset(struct sq_softc *sc)
1264 {
1265
1266 /* Stop HPC dma channels */
1267 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl, 0);
1268 sq_hpc_write(sc, sc->hpc_regs->enetx_ctl, 0);
1269
1270 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 3);
1271 delay(20);
1272 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 0);
1273 }
1274
1275 /* sq_add_rxbuf: Add a receive buffer to the indicated descriptor. */
1276 int
1277 sq_add_rxbuf(struct sq_softc *sc, int idx)
1278 {
1279 int err;
1280 struct mbuf *m;
1281
1282 MGETHDR(m, M_DONTWAIT, MT_DATA);
1283 if (m == NULL)
1284 return ENOBUFS;
1285
1286 MCLGET(m, M_DONTWAIT);
1287 if ((m->m_flags & M_EXT) == 0) {
1288 m_freem(m);
1289 return ENOBUFS;
1290 }
1291
1292 if (sc->sc_rxmbuf[idx] != NULL)
1293 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]);
1294
1295 sc->sc_rxmbuf[idx] = m;
1296
1297 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx],
1298 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1299 printf("%s: can't load rx DMA map %d, error = %d\n",
1300 device_xname(sc->sc_dev), idx, err);
1301 panic("sq_add_rxbuf"); /* XXX */
1302 }
1303
1304 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx],
1305 0, sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD);
1306
1307 SQ_INIT_RXDESC(sc, idx);
1308
1309 return 0;
1310 }
1311
1312 void
1313 sq_dump_buffer(paddr_t addr, psize_t len)
1314 {
1315 u_int i;
1316 uint8_t *physaddr = (uint8_t *)MIPS_PHYS_TO_KSEG1(addr);
1317
1318 if (len == 0)
1319 return;
1320
1321 printf("%p: ", physaddr);
1322
1323 for (i = 0; i < len; i++) {
1324 printf("%02x ", *(physaddr + i) & 0xff);
1325 if ((i % 16) == 15 && i != len - 1)
1326 printf("\n%p: ", physaddr + i);
1327 }
1328
1329 printf("\n");
1330 }
1331