if_txp.c revision 1.72 1 /* $NetBSD: if_txp.c,v 1.72 2020/03/10 00:26:47 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001
5 * Jason L. Wright <jason (at) thought.net>, Theo de Raadt, and
6 * Aaron Campbell <aaron (at) monkey.org>. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR THE VOICES IN THEIR HEADS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * Driver for 3c990 (Typhoon) Ethernet ASIC
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: if_txp.c,v 1.72 2020/03/10 00:26:47 thorpej Exp $");
36
37 #include "opt_inet.h"
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/sockio.h>
42 #include <sys/mbuf.h>
43 #include <sys/malloc.h>
44 #include <sys/kernel.h>
45 #include <sys/socket.h>
46 #include <sys/device.h>
47 #include <sys/callout.h>
48 #include <sys/bus.h>
49
50 #include <net/if.h>
51 #include <net/if_dl.h>
52 #include <net/if_types.h>
53 #include <net/if_ether.h>
54 #include <net/if_arp.h>
55 #include <net/if_media.h>
56 #include <net/bpf.h>
57
58 #ifdef INET
59 #include <netinet/in.h>
60 #include <netinet/in_systm.h>
61 #include <netinet/in_var.h>
62 #include <netinet/ip.h>
63 #include <netinet/if_inarp.h>
64 #endif
65
66 #include <dev/mii/mii.h>
67 #include <dev/mii/miivar.h>
68 #include <dev/pci/pcireg.h>
69 #include <dev/pci/pcivar.h>
70 #include <dev/pci/pcidevs.h>
71
72 #include <dev/pci/if_txpreg.h>
73
74 #include <dev/microcode/typhoon/3c990img.h>
75
76 /*
77 * These currently break the 3c990 firmware, hopefully will be resolved
78 * at some point.
79 */
80 #undef TRY_TX_UDP_CSUM
81 #undef TRY_TX_TCP_CSUM
82
83 static int txp_probe(device_t, cfdata_t, void *);
84 static void txp_attach(device_t, device_t, void *);
85 static int txp_intr(void *);
86 static void txp_tick(void *);
87 static bool txp_shutdown(device_t, int);
88 static int txp_ioctl(struct ifnet *, u_long, void *);
89 static void txp_start(struct ifnet *);
90 static void txp_stop(struct txp_softc *);
91 static void txp_init(struct txp_softc *);
92 static void txp_watchdog(struct ifnet *);
93
94 static int txp_chip_init(struct txp_softc *);
95 static int txp_reset_adapter(struct txp_softc *);
96 static int txp_download_fw(struct txp_softc *);
97 static int txp_download_fw_wait(struct txp_softc *);
98 static int txp_download_fw_section(struct txp_softc *,
99 const struct txp_fw_section_header *, int);
100 static int txp_alloc_rings(struct txp_softc *);
101 static void txp_dma_free(struct txp_softc *, struct txp_dma_alloc *);
102 static int txp_dma_malloc(struct txp_softc *, bus_size_t, struct txp_dma_alloc *, int);
103 static void txp_set_filter(struct txp_softc *);
104
105 static int txp_cmd_desc_numfree(struct txp_softc *);
106 static int txp_command(struct txp_softc *, uint16_t, uint16_t, uint32_t,
107 uint32_t, uint16_t *, uint32_t *, uint32_t *, int);
108 static int txp_command2(struct txp_softc *, uint16_t, uint16_t,
109 uint32_t, uint32_t, struct txp_ext_desc *, uint8_t,
110 struct txp_rsp_desc **, int);
111 static int txp_response(struct txp_softc *, uint32_t, uint16_t, uint16_t,
112 struct txp_rsp_desc **);
113 static void txp_rsp_fixup(struct txp_softc *, struct txp_rsp_desc *,
114 struct txp_rsp_desc *);
115 static void txp_capabilities(struct txp_softc *);
116
117 static void txp_ifmedia_sts(struct ifnet *, struct ifmediareq *);
118 static int txp_ifmedia_upd(struct ifnet *);
119 static void txp_tx_reclaim(struct txp_softc *, struct txp_tx_ring *,
120 struct txp_dma_alloc *);
121 static void txp_rxbuf_reclaim(struct txp_softc *);
122 static void txp_rx_reclaim(struct txp_softc *, struct txp_rx_ring *,
123 struct txp_dma_alloc *);
124
125 static void txp_rxd_free(struct txp_softc *, struct txp_swdesc *);
126 static struct txp_swdesc *txp_rxd_alloc(struct txp_softc *);
127
128 CFATTACH_DECL_NEW(txp, sizeof(struct txp_softc), txp_probe, txp_attach,
129 NULL, NULL);
130
131 static const struct txp_pci_match {
132 int vid, did, flags;
133 } txp_devices[] = {
134 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990, 0 },
135 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990TX95, 0 },
136 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990TX97, 0 },
137 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990SVR95, TXP_SERVERVERSION },
138 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990SVR97, TXP_SERVERVERSION },
139 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C990B, TXP_USESUBSYSTEM },
140 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C990BSVR, TXP_SERVERVERSION },
141 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990FX, TXP_USESUBSYSTEM },
142 };
143
144 static const struct txp_pci_match *txp_pcilookup(pcireg_t);
145
146 static const struct {
147 uint16_t mask, value;
148 int flags;
149 } txp_subsysinfo[] = {
150 {0xf000, 0x2000, TXP_SERVERVERSION},
151 {0x0100, 0x0100, TXP_FIBER},
152 #if 0 /* information from 3com header, unused */
153 {0x0010, 0x0010, /* secured firmware */},
154 {0x0003, 0x0000, /* variable DES */},
155 {0x0003, 0x0001, /* single DES - "95" */},
156 {0x0003, 0x0002, /* triple DES - "97" */},
157 #endif
158 };
159
160 static const struct txp_pci_match *
161 txp_pcilookup(pcireg_t id)
162 {
163 int i;
164
165 for (i = 0; i < __arraycount(txp_devices); i++)
166 if (PCI_VENDOR(id) == txp_devices[i].vid &&
167 PCI_PRODUCT(id) == txp_devices[i].did)
168 return &txp_devices[i];
169 return (0);
170 }
171
172 static int
173 txp_probe(device_t parent, cfdata_t match, void *aux)
174 {
175 struct pci_attach_args *pa = aux;
176
177 if (txp_pcilookup(pa->pa_id))
178 return (1);
179 return (0);
180 }
181
182 static void
183 txp_attach(device_t parent, device_t self, void *aux)
184 {
185 struct txp_softc *sc = device_private(self);
186 struct pci_attach_args *pa = aux;
187 pci_chipset_tag_t pc = pa->pa_pc;
188 pci_intr_handle_t ih;
189 const char *intrstr = NULL;
190 struct ifnet *ifp = &sc->sc_arpcom.ec_if;
191 uint32_t command;
192 uint16_t p1;
193 uint32_t p2;
194 u_char enaddr[6];
195 const struct txp_pci_match *match;
196 uint16_t subsys;
197 int i, flags;
198 char devinfo[256];
199 char intrbuf[PCI_INTRSTR_LEN];
200
201 sc->sc_dev = self;
202 sc->sc_cold = 1;
203
204 match = txp_pcilookup(pa->pa_id);
205 flags = match->flags;
206 if (match->flags & TXP_USESUBSYSTEM) {
207 subsys = PCI_PRODUCT(pci_conf_read(pc, pa->pa_tag,
208 PCI_SUBSYS_ID_REG));
209 for (i = 0;
210 i < sizeof(txp_subsysinfo)/sizeof(txp_subsysinfo[0]);
211 i++)
212 if ((subsys & txp_subsysinfo[i].mask) ==
213 txp_subsysinfo[i].value)
214 flags |= txp_subsysinfo[i].flags;
215 }
216 sc->sc_flags = flags;
217
218 aprint_naive("\n");
219 pci_devinfo(pa->pa_id, 0, 0, devinfo, sizeof(devinfo));
220 #define TXP_EXTRAINFO ((flags & (TXP_USESUBSYSTEM | TXP_SERVERVERSION)) == \
221 (TXP_USESUBSYSTEM | TXP_SERVERVERSION) ? " (SVR)" : "")
222 aprint_normal(": %s%s\n%s", devinfo, TXP_EXTRAINFO,
223 device_xname(sc->sc_dev));
224
225 command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
226
227 if (!(command & PCI_COMMAND_MASTER_ENABLE)) {
228 aprint_error(": failed to enable bus mastering\n");
229 return;
230 }
231
232 if (!(command & PCI_COMMAND_MEM_ENABLE)) {
233 aprint_error(": failed to enable memory mapping\n");
234 return;
235 }
236 if (pci_mapreg_map(pa, TXP_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
237 &sc->sc_bt, &sc->sc_bh, NULL, NULL)) {
238 aprint_error(": can't map mem space %d\n", 0);
239 return;
240 }
241
242 if (pci_dma64_available(pa))
243 sc->sc_dmat = pa->pa_dmat64;
244 else
245 sc->sc_dmat = pa->pa_dmat;
246
247 /*
248 * Allocate our interrupt.
249 */
250 if (pci_intr_map(pa, &ih)) {
251 aprint_error(": couldn't map interrupt\n");
252 return;
253 }
254
255 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
256 sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, txp_intr, sc,
257 device_xname(self));
258 if (sc->sc_ih == NULL) {
259 aprint_error(": couldn't establish interrupt");
260 if (intrstr != NULL)
261 aprint_normal(" at %s", intrstr);
262 aprint_normal("\n");
263 return;
264 }
265 aprint_normal(": interrupting at %s\n", intrstr);
266
267 if (txp_chip_init(sc))
268 goto cleanupintr;
269
270 if (txp_download_fw(sc))
271 goto cleanupintr;
272
273 if (txp_alloc_rings(sc))
274 goto cleanupintr;
275
276 if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0,
277 NULL, NULL, NULL, 1))
278 goto cleanupintr;
279
280 if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0,
281 &p1, &p2, NULL, 1))
282 goto cleanupintr;
283
284 p1 = htole16(p1);
285 enaddr[0] = ((uint8_t *)&p1)[1];
286 enaddr[1] = ((uint8_t *)&p1)[0];
287 p2 = htole32(p2);
288 enaddr[2] = ((uint8_t *)&p2)[3];
289 enaddr[3] = ((uint8_t *)&p2)[2];
290 enaddr[4] = ((uint8_t *)&p2)[1];
291 enaddr[5] = ((uint8_t *)&p2)[0];
292
293 aprint_normal_dev(self, "Ethernet address %s\n",
294 ether_sprintf(enaddr));
295 sc->sc_cold = 0;
296
297 /* Initialize ifmedia structures. */
298 sc->sc_arpcom.ec_ifmedia = &sc->sc_ifmedia;
299 ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts);
300 if (flags & TXP_FIBER) {
301 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_FX,
302 0, NULL);
303 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_FX | IFM_FDX,
304 0, NULL);
305 } else {
306 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T,
307 0, NULL);
308 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX,
309 0, NULL);
310 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX,
311 0, NULL);
312 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX,
313 0, NULL);
314 }
315 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
316
317 sc->sc_xcvr = TXP_XCVR_AUTO;
318 txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0,
319 NULL, NULL, NULL, 0);
320 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO);
321
322 ifp->if_softc = sc;
323 ifp->if_mtu = ETHERMTU;
324 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
325 ifp->if_ioctl = txp_ioctl;
326 ifp->if_start = txp_start;
327 ifp->if_watchdog = txp_watchdog;
328 ifp->if_baudrate = 10000000;
329 IFQ_SET_MAXLEN(&ifp->if_snd, TX_ENTRIES);
330 IFQ_SET_READY(&ifp->if_snd);
331 ifp->if_capabilities = 0;
332 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
333
334 txp_capabilities(sc);
335
336 callout_init(&sc->sc_tick, 0);
337 callout_setfunc(&sc->sc_tick, txp_tick, sc);
338
339 /*
340 * Attach us everywhere
341 */
342 if_attach(ifp);
343 if_deferred_start_init(ifp, NULL);
344 ether_ifattach(ifp, enaddr);
345
346 /*
347 * XXX Because we allocate Rx buffers in txp_alloc_rings(),
348 * XXX we have to go back and claim them now that our mowners
349 * XXX have been initialized (in ether_ifattach()).
350 *
351 * XXX FIXME by allocating Rx buffers only when interface is
352 * XXX running, like other drivers do.
353 */
354 for (i = 0; i < RXBUF_ENTRIES; i++) {
355 KASSERT(sc->sc_rxd[i].sd_mbuf != NULL);
356 MCLAIM(sc->sc_rxd[i].sd_mbuf, &sc->sc_arpcom.ec_rx_mowner);
357 }
358
359 if (pmf_device_register1(self, NULL, NULL, txp_shutdown))
360 pmf_class_network_register(self, ifp);
361 else
362 aprint_error_dev(self, "couldn't establish power handler\n");
363
364 return;
365
366 cleanupintr:
367 pci_intr_disestablish(pc, sc->sc_ih);
368
369 return;
370
371 }
372
373 static int
374 txp_chip_init(struct txp_softc *sc)
375 {
376 /* disable interrupts */
377 WRITE_REG(sc, TXP_IER, 0);
378 WRITE_REG(sc, TXP_IMR,
379 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
380 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
381 TXP_INT_LATCH);
382
383 /* ack all interrupts */
384 WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH |
385 TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
386 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
387 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
388 TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0);
389
390 if (txp_reset_adapter(sc))
391 return (-1);
392
393 /* disable interrupts */
394 WRITE_REG(sc, TXP_IER, 0);
395 WRITE_REG(sc, TXP_IMR,
396 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
397 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
398 TXP_INT_LATCH);
399
400 /* ack all interrupts */
401 WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH |
402 TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
403 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
404 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
405 TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0);
406
407 return (0);
408 }
409
410 static int
411 txp_reset_adapter(struct txp_softc *sc)
412 {
413 uint32_t r;
414 int i;
415
416 WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL);
417 DELAY(1000);
418 WRITE_REG(sc, TXP_SRR, 0);
419
420 /* Should wait max 6 seconds */
421 for (i = 0; i < 6000; i++) {
422 r = READ_REG(sc, TXP_A2H_0);
423 if (r == STAT_WAITING_FOR_HOST_REQUEST)
424 break;
425 DELAY(1000);
426 }
427
428 if (r != STAT_WAITING_FOR_HOST_REQUEST) {
429 printf("%s: reset hung\n", TXP_DEVNAME(sc));
430 return (-1);
431 }
432
433 return (0);
434 }
435
436 static int
437 txp_download_fw(struct txp_softc *sc)
438 {
439 const struct txp_fw_file_header *fileheader;
440 const struct txp_fw_section_header *secthead;
441 int sect;
442 uint32_t r, i, ier, imr;
443
444 ier = READ_REG(sc, TXP_IER);
445 WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0);
446
447 imr = READ_REG(sc, TXP_IMR);
448 WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0);
449
450 for (i = 0; i < 10000; i++) {
451 r = READ_REG(sc, TXP_A2H_0);
452 if (r == STAT_WAITING_FOR_HOST_REQUEST)
453 break;
454 DELAY(50);
455 }
456 if (r != STAT_WAITING_FOR_HOST_REQUEST) {
457 printf(": not waiting for host request\n");
458 return (-1);
459 }
460
461 /* Ack the status */
462 WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
463
464 fileheader = (const struct txp_fw_file_header *)tc990image;
465 if (memcmp("TYPHOON", fileheader->magicid,
466 sizeof(fileheader->magicid))) {
467 printf(": fw invalid magic\n");
468 return (-1);
469 }
470
471 /* Tell boot firmware to get ready for image */
472 WRITE_REG(sc, TXP_H2A_1, le32toh(fileheader->addr));
473 WRITE_REG(sc, TXP_H2A_2, le32toh(fileheader->hmac[0]));
474 WRITE_REG(sc, TXP_H2A_3, le32toh(fileheader->hmac[1]));
475 WRITE_REG(sc, TXP_H2A_4, le32toh(fileheader->hmac[2]));
476 WRITE_REG(sc, TXP_H2A_5, le32toh(fileheader->hmac[3]));
477 WRITE_REG(sc, TXP_H2A_6, le32toh(fileheader->hmac[4]));
478 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE);
479
480 if (txp_download_fw_wait(sc)) {
481 printf("%s: fw wait failed, initial\n",
482 device_xname(sc->sc_dev));
483 return (-1);
484 }
485
486 secthead = (const struct txp_fw_section_header *)
487 (((const uint8_t *)tc990image) +
488 sizeof(struct txp_fw_file_header));
489
490 for (sect = 0; sect < le32toh(fileheader->nsections); sect++) {
491 if (txp_download_fw_section(sc, secthead, sect))
492 return (-1);
493 secthead = (const struct txp_fw_section_header *)
494 (((const uint8_t *)secthead) + le32toh(secthead->nbytes) +
495 sizeof(*secthead));
496 }
497
498 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE);
499
500 for (i = 0; i < 10000; i++) {
501 r = READ_REG(sc, TXP_A2H_0);
502 if (r == STAT_WAITING_FOR_BOOT)
503 break;
504 DELAY(50);
505 }
506 if (r != STAT_WAITING_FOR_BOOT) {
507 printf(": not waiting for boot\n");
508 return (-1);
509 }
510
511 WRITE_REG(sc, TXP_IER, ier);
512 WRITE_REG(sc, TXP_IMR, imr);
513
514 return (0);
515 }
516
517 static int
518 txp_download_fw_wait(struct txp_softc *sc)
519 {
520 uint32_t i, r;
521
522 for (i = 0; i < 10000; i++) {
523 r = READ_REG(sc, TXP_ISR);
524 if (r & TXP_INT_A2H_0)
525 break;
526 DELAY(50);
527 }
528
529 if (!(r & TXP_INT_A2H_0)) {
530 printf(": fw wait failed comm0\n");
531 return (-1);
532 }
533
534 WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
535
536 r = READ_REG(sc, TXP_A2H_0);
537 if (r != STAT_WAITING_FOR_SEGMENT) {
538 printf(": fw not waiting for segment\n");
539 return (-1);
540 }
541 return (0);
542 }
543
544 static int
545 txp_download_fw_section(struct txp_softc *sc,
546 const struct txp_fw_section_header *sect, int sectnum)
547 {
548 struct txp_dma_alloc dma;
549 int rseg, err = 0;
550 struct mbuf m;
551 #ifdef INET
552 uint16_t csum;
553 #endif
554
555 /* Skip zero length sections */
556 if (sect->nbytes == 0)
557 return (0);
558
559 /* Make sure we aren't past the end of the image */
560 rseg = ((const uint8_t *)sect) - ((const uint8_t *)tc990image);
561 if (rseg >= sizeof(tc990image)) {
562 printf(": fw invalid section address, section %d\n", sectnum);
563 return (-1);
564 }
565
566 /* Make sure this section doesn't go past the end */
567 rseg += le32toh(sect->nbytes);
568 if (rseg >= sizeof(tc990image)) {
569 printf(": fw truncated section %d\n", sectnum);
570 return (-1);
571 }
572
573 /* map a buffer, copy segment to it, get physaddr */
574 if (txp_dma_malloc(sc, le32toh(sect->nbytes), &dma, 0)) {
575 printf(": fw dma malloc failed, section %d\n", sectnum);
576 return (-1);
577 }
578
579 memcpy(dma.dma_vaddr, ((const uint8_t *)sect) + sizeof(*sect),
580 le32toh(sect->nbytes));
581
582 /*
583 * dummy up mbuf and verify section checksum
584 */
585 m.m_type = MT_DATA;
586 m.m_next = m.m_nextpkt = NULL;
587 m.m_owner = NULL;
588 m.m_len = le32toh(sect->nbytes);
589 m.m_data = dma.dma_vaddr;
590 m.m_flags = 0;
591 #ifdef INET
592 csum = in_cksum(&m, le32toh(sect->nbytes));
593 if (csum != sect->cksum) {
594 printf(": fw section %d, bad cksum (expected 0x%x got 0x%x)\n",
595 sectnum, sect->cksum, csum);
596 txp_dma_free(sc, &dma);
597 return -1;
598 }
599 #endif
600
601 bus_dmamap_sync(sc->sc_dmat, dma.dma_map, 0,
602 dma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
603
604 WRITE_REG(sc, TXP_H2A_1, le32toh(sect->nbytes));
605 WRITE_REG(sc, TXP_H2A_2, le32toh(sect->cksum));
606 WRITE_REG(sc, TXP_H2A_3, le32toh(sect->addr));
607 WRITE_REG(sc, TXP_H2A_4, BUS_ADDR_HI32(dma.dma_paddr));
608 WRITE_REG(sc, TXP_H2A_5, BUS_ADDR_LO32(dma.dma_paddr));
609 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE);
610
611 if (txp_download_fw_wait(sc)) {
612 printf("%s: fw wait failed, section %d\n",
613 device_xname(sc->sc_dev), sectnum);
614 err = -1;
615 }
616
617 bus_dmamap_sync(sc->sc_dmat, dma.dma_map, 0,
618 dma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
619
620 txp_dma_free(sc, &dma);
621 return (err);
622 }
623
624 static int
625 txp_intr(void *vsc)
626 {
627 struct txp_softc *sc = vsc;
628 struct txp_hostvar *hv = sc->sc_hostvar;
629 uint32_t isr;
630 int claimed = 0;
631
632 /* mask all interrupts */
633 WRITE_REG(sc, TXP_IMR, TXP_INT_RESERVED | TXP_INT_SELF |
634 TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
635 TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 |
636 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
637 TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_LATCH);
638
639 bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
640 sizeof(struct txp_hostvar),
641 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
642
643 isr = READ_REG(sc, TXP_ISR);
644 while (isr) {
645 claimed = 1;
646 WRITE_REG(sc, TXP_ISR, isr);
647
648 if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff))
649 txp_rx_reclaim(sc, &sc->sc_rxhir, &sc->sc_rxhiring_dma);
650 if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff))
651 txp_rx_reclaim(sc, &sc->sc_rxlor, &sc->sc_rxloring_dma);
652
653 if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx)
654 txp_rxbuf_reclaim(sc);
655
656 if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
657 TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off)))))
658 txp_tx_reclaim(sc, &sc->sc_txhir, &sc->sc_txhiring_dma);
659
660 if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
661 TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off)))))
662 txp_tx_reclaim(sc, &sc->sc_txlor, &sc->sc_txloring_dma);
663
664 isr = READ_REG(sc, TXP_ISR);
665 }
666
667 bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
668 sizeof(struct txp_hostvar),
669 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
670
671 /* unmask all interrupts */
672 WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3);
673
674 if_schedule_deferred_start(&sc->sc_arpcom.ec_if);
675
676 return (claimed);
677 }
678
679 static struct txp_swdesc *
680 txp_rxd_alloc(struct txp_softc *sc)
681 {
682 if (sc->sc_txd_pool_ptr == 0)
683 return NULL;
684 return sc->sc_rxd_pool[--sc->sc_txd_pool_ptr];
685 }
686
687 static void
688 txp_rxd_free(struct txp_softc *sc, struct txp_swdesc *sd)
689 {
690 KASSERT(sc->sc_txd_pool_ptr < RXBUF_ENTRIES);
691 sc->sc_rxd_pool[sc->sc_txd_pool_ptr++] = sd;
692 }
693
694 static inline uint32_t
695 txp_rxd_idx(struct txp_softc *sc, struct txp_swdesc *sd)
696 {
697 KASSERT(sd >= &sc->sc_rxd[0] && sd < &sc->sc_rxd[RXBUF_ENTRIES]);
698 return (uint32_t)(sd - &sc->sc_rxd[0]);
699 }
700
701 static inline uint32_t
702 txp_txd_idx(struct txp_softc *sc, struct txp_swdesc *sd)
703 {
704 KASSERT(sd >= &sc->sc_txd[0] && sd < &sc->sc_txd[TX_ENTRIES]);
705 return (uint32_t)(sd - &sc->sc_txd[0]);
706 }
707
708 static void
709 txp_rx_reclaim(struct txp_softc *sc, struct txp_rx_ring *r,
710 struct txp_dma_alloc *dma)
711 {
712 struct ifnet *ifp = &sc->sc_arpcom.ec_if;
713 struct txp_rx_desc *rxd;
714 struct mbuf *m;
715 struct txp_swdesc *sd;
716 uint32_t roff, woff;
717 int sumflags = 0;
718 int idx;
719
720 roff = le32toh(*r->r_roff);
721 woff = le32toh(*r->r_woff);
722 idx = roff / sizeof(struct txp_rx_desc);
723 rxd = r->r_desc + idx;
724
725 while (roff != woff) {
726
727 bus_dmamap_sync(sc->sc_dmat, dma->dma_map,
728 idx * sizeof(struct txp_rx_desc),
729 sizeof(struct txp_rx_desc), BUS_DMASYNC_POSTREAD);
730
731 if (rxd->rx_flags & RX_FLAGS_ERROR) {
732 printf("%s: error 0x%x\n", device_xname(sc->sc_dev),
733 le32toh(rxd->rx_stat));
734 if_statinc(ifp, if_ierrors);
735 goto next;
736 }
737
738 /* retrieve stashed pointer */
739 KASSERT(rxd->rx_vaddrlo < RXBUF_ENTRIES);
740 sd = &sc->sc_rxd[rxd->rx_vaddrlo];
741
742 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
743 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
744 bus_dmamap_unload(sc->sc_dmat, sd->sd_map);
745 m = sd->sd_mbuf;
746 sd->sd_mbuf = NULL;
747 txp_rxd_free(sc, sd);
748 m->m_pkthdr.len = m->m_len = le16toh(rxd->rx_len);
749
750 #ifdef __STRICT_ALIGNMENT
751 {
752 /*
753 * XXX Nice chip, except it won't accept "off by 2"
754 * buffers, so we're force to copy. Supposedly
755 * this will be fixed in a newer firmware rev
756 * and this will be temporary.
757 */
758 struct mbuf *mnew;
759
760 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
761 if (mnew == NULL) {
762 m_freem(m);
763 goto next;
764 }
765 MCLAIM(mnew, &sc->sc_arpcom.ec_rx_mowner);
766 if (m->m_len > (MHLEN - 2)) {
767 MCLGET(mnew, M_DONTWAIT);
768 if (!(mnew->m_flags & M_EXT)) {
769 m_freem(mnew);
770 m_freem(m);
771 goto next;
772 }
773 }
774 m_set_rcvif(mnew, ifp);
775 mnew->m_pkthdr.len = mnew->m_len = m->m_len;
776 mnew->m_data += 2;
777 memcpy(mnew->m_data, m->m_data, m->m_len);
778 m_freem(m);
779 m = mnew;
780 }
781 #endif
782
783 if (rxd->rx_stat & htole32(RX_STAT_IPCKSUMBAD))
784 sumflags |= (M_CSUM_IPv4 | M_CSUM_IPv4_BAD);
785 else if (rxd->rx_stat & htole32(RX_STAT_IPCKSUMGOOD))
786 sumflags |= M_CSUM_IPv4;
787
788 if (rxd->rx_stat & htole32(RX_STAT_TCPCKSUMBAD))
789 sumflags |= (M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD);
790 else if (rxd->rx_stat & htole32(RX_STAT_TCPCKSUMGOOD))
791 sumflags |= M_CSUM_TCPv4;
792
793 if (rxd->rx_stat & htole32(RX_STAT_UDPCKSUMBAD))
794 sumflags |= (M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD);
795 else if (rxd->rx_stat & htole32(RX_STAT_UDPCKSUMGOOD))
796 sumflags |= M_CSUM_UDPv4;
797
798 m->m_pkthdr.csum_flags = sumflags;
799
800 if (rxd->rx_stat & htole32(RX_STAT_VLAN)) {
801 vlan_set_tag(m, htons(rxd->rx_vlan >> 16));
802 }
803
804 if_percpuq_enqueue(ifp->if_percpuq, m);
805
806 next:
807 bus_dmamap_sync(sc->sc_dmat, dma->dma_map,
808 idx * sizeof(struct txp_rx_desc),
809 sizeof(struct txp_rx_desc), BUS_DMASYNC_PREREAD);
810
811 roff += sizeof(struct txp_rx_desc);
812 if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) {
813 idx = 0;
814 roff = 0;
815 rxd = r->r_desc;
816 } else {
817 idx++;
818 rxd++;
819 }
820 woff = le32toh(*r->r_woff);
821 }
822
823 *r->r_roff = htole32(woff);
824 }
825
826 static void
827 txp_rxbuf_reclaim(struct txp_softc *sc)
828 {
829 struct ifnet *ifp = &sc->sc_arpcom.ec_if;
830 struct txp_hostvar *hv = sc->sc_hostvar;
831 struct txp_rxbuf_desc *rbd;
832 struct txp_swdesc *sd;
833 uint32_t i, end;
834
835 end = TXP_OFFSET2IDX(le32toh(hv->hv_rx_buf_read_idx));
836 i = TXP_OFFSET2IDX(le32toh(hv->hv_rx_buf_write_idx));
837
838 if (++i == RXBUF_ENTRIES)
839 i = 0;
840
841 rbd = sc->sc_rxbufs + i;
842
843 while (i != end) {
844 sd = txp_rxd_alloc(sc);
845 if (sd == NULL)
846 break;
847
848 MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA);
849 if (sd->sd_mbuf == NULL)
850 goto err_sd;
851 MCLAIM(sd->sd_mbuf, &sc->sc_arpcom.ec_rx_mowner);
852
853 MCLGET(sd->sd_mbuf, M_DONTWAIT);
854 if ((sd->sd_mbuf->m_flags & M_EXT) == 0)
855 goto err_mbuf;
856 m_set_rcvif(sd->sd_mbuf, ifp);
857 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
858 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, sd->sd_mbuf,
859 BUS_DMA_NOWAIT)) {
860 goto err_mbuf;
861 }
862
863 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map,
864 i * sizeof(struct txp_rxbuf_desc),
865 sizeof(struct txp_rxbuf_desc), BUS_DMASYNC_POSTWRITE);
866
867 /* stash away pointer */
868 rbd->rb_vaddrlo = txp_rxd_idx(sc, sd);
869
870 rbd->rb_paddrlo =
871 htole32(BUS_ADDR_LO32(sd->sd_map->dm_segs[0].ds_addr));
872 rbd->rb_paddrhi =
873 htole32(BUS_ADDR_HI32(sd->sd_map->dm_segs[0].ds_addr));
874
875 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
876 sd->sd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
877
878 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map,
879 i * sizeof(struct txp_rxbuf_desc),
880 sizeof(struct txp_rxbuf_desc), BUS_DMASYNC_PREWRITE);
881
882 hv->hv_rx_buf_write_idx = htole32(TXP_IDX2OFFSET(i));
883
884 if (++i == RXBUF_ENTRIES) {
885 i = 0;
886 rbd = sc->sc_rxbufs;
887 } else
888 rbd++;
889 }
890 return;
891
892 err_mbuf:
893 m_freem(sd->sd_mbuf);
894 sd->sd_mbuf = NULL;
895 err_sd:
896 txp_rxd_free(sc, sd);
897 }
898
899 /*
900 * Reclaim mbufs and entries from a transmit ring.
901 */
902 static void
903 txp_tx_reclaim(struct txp_softc *sc, struct txp_tx_ring *r,
904 struct txp_dma_alloc *dma)
905 {
906 struct ifnet *ifp = &sc->sc_arpcom.ec_if;
907 uint32_t idx = TXP_OFFSET2IDX(le32toh(*(r->r_off)));
908 uint32_t cons = r->r_cons, cnt = r->r_cnt;
909 struct txp_tx_desc *txd = r->r_desc + cons;
910 struct txp_swdesc *sd;
911 struct mbuf *m;
912
913 while (cons != idx) {
914 if (cnt == 0)
915 break;
916
917 bus_dmamap_sync(sc->sc_dmat, dma->dma_map,
918 cons * sizeof(struct txp_tx_desc),
919 sizeof(struct txp_tx_desc),
920 BUS_DMASYNC_POSTWRITE);
921
922 if ((txd->tx_flags & TX_FLAGS_TYPE_M) ==
923 TX_FLAGS_TYPE_DATA) {
924 KASSERT(txd->tx_addrlo < TX_ENTRIES);
925 sd = &sc->sc_txd[txd->tx_addrlo];
926 m = sd->sd_mbuf;
927 sd->sd_mbuf = NULL;
928 if (m != NULL) {
929 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
930 sd->sd_map->dm_mapsize,
931 BUS_DMASYNC_POSTWRITE);
932 bus_dmamap_unload(sc->sc_dmat, sd->sd_map);
933 m_freem(m);
934 txd->tx_addrlo = 0;
935 txd->tx_addrhi = 0;
936 if_statinc(ifp, if_opackets);
937 }
938 }
939 ifp->if_flags &= ~IFF_OACTIVE;
940
941 if (++cons == TX_ENTRIES) {
942 txd = r->r_desc;
943 cons = 0;
944 } else
945 txd++;
946
947 cnt--;
948 }
949
950 r->r_cons = cons;
951 r->r_cnt = cnt;
952 if (cnt == 0)
953 ifp->if_timer = 0;
954 }
955
956 static bool
957 txp_shutdown(device_t self, int howto)
958 {
959 struct txp_softc *sc;
960
961 sc = device_private(self);
962
963 /* mask all interrupts */
964 WRITE_REG(sc, TXP_IMR,
965 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
966 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
967 TXP_INT_LATCH);
968
969 txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0);
970 txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0);
971 txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL, 0);
972
973 return true;
974 }
975
976 static int
977 txp_alloc_rings(struct txp_softc *sc)
978 {
979 struct ifnet *ifp = &sc->sc_arpcom.ec_if;
980 struct txp_boot_record *boot;
981 struct txp_swdesc *sd;
982 uint32_t r;
983 int i, j, nb;
984
985 /* boot record */
986 if (txp_dma_malloc(sc, sizeof(struct txp_boot_record),
987 &sc->sc_boot_dma, BUS_DMA_COHERENT)) {
988 printf(": can't allocate boot record\n");
989 return (-1);
990 }
991 boot = (struct txp_boot_record *)sc->sc_boot_dma.dma_vaddr;
992 memset(boot, 0, sizeof(*boot));
993 sc->sc_boot = boot;
994
995 /* host variables */
996 if (txp_dma_malloc(sc, sizeof(struct txp_hostvar), &sc->sc_host_dma,
997 BUS_DMA_COHERENT)) {
998 printf(": can't allocate host ring\n");
999 goto bail_boot;
1000 }
1001 memset(sc->sc_host_dma.dma_vaddr, 0, sizeof(struct txp_hostvar));
1002 boot->br_hostvar_lo = htole32(BUS_ADDR_LO32(sc->sc_host_dma.dma_paddr));
1003 boot->br_hostvar_hi = htole32(BUS_ADDR_HI32(sc->sc_host_dma.dma_paddr));
1004 sc->sc_hostvar = (struct txp_hostvar *)sc->sc_host_dma.dma_vaddr;
1005
1006 /* high priority tx ring */
1007 if (txp_dma_malloc(sc, sizeof(struct txp_tx_desc) * TX_ENTRIES,
1008 &sc->sc_txhiring_dma, BUS_DMA_COHERENT)) {
1009 printf(": can't allocate high tx ring\n");
1010 goto bail_host;
1011 }
1012 memset(sc->sc_txhiring_dma.dma_vaddr, 0,
1013 sizeof(struct txp_tx_desc) * TX_ENTRIES);
1014 boot->br_txhipri_lo =
1015 htole32(BUS_ADDR_LO32(sc->sc_txhiring_dma.dma_paddr));
1016 boot->br_txhipri_hi =
1017 htole32(BUS_ADDR_HI32(sc->sc_txhiring_dma.dma_paddr));
1018 boot->br_txhipri_siz = htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
1019 sc->sc_txhir.r_reg = TXP_H2A_1;
1020 sc->sc_txhir.r_desc =
1021 (struct txp_tx_desc *)sc->sc_txhiring_dma.dma_vaddr;
1022 sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0;
1023 sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx;
1024 for (i = 0; i < TX_ENTRIES; i++) {
1025 if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN,
1026 TXP_MAXTXSEGS, TXP_MAX_SEGLEN, 0, BUS_DMA_NOWAIT,
1027 &sc->sc_txd[i].sd_map) != 0) {
1028 for (j = 0; j < i; j++) {
1029 bus_dmamap_destroy(sc->sc_dmat,
1030 sc->sc_txd[j].sd_map);
1031 sc->sc_txd[j].sd_map = NULL;
1032 }
1033 goto bail_txhiring;
1034 }
1035 }
1036
1037 /* low priority tx ring */
1038 if (txp_dma_malloc(sc, sizeof(struct txp_tx_desc) * TX_ENTRIES,
1039 &sc->sc_txloring_dma, BUS_DMA_COHERENT)) {
1040 printf(": can't allocate low tx ring\n");
1041 goto bail_txhiring;
1042 }
1043 memset(sc->sc_txloring_dma.dma_vaddr, 0,
1044 sizeof(struct txp_tx_desc) * TX_ENTRIES);
1045 boot->br_txlopri_lo =
1046 htole32(BUS_ADDR_LO32(sc->sc_txloring_dma.dma_paddr));
1047 boot->br_txlopri_hi =
1048 htole32(BUS_ADDR_HI32(sc->sc_txloring_dma.dma_paddr));
1049 boot->br_txlopri_siz = htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
1050 sc->sc_txlor.r_reg = TXP_H2A_3;
1051 sc->sc_txlor.r_desc =
1052 (struct txp_tx_desc *)sc->sc_txloring_dma.dma_vaddr;
1053 sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0;
1054 sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx;
1055
1056 /* high priority rx ring */
1057 if (txp_dma_malloc(sc, sizeof(struct txp_rx_desc) * RX_ENTRIES,
1058 &sc->sc_rxhiring_dma, BUS_DMA_COHERENT)) {
1059 printf(": can't allocate high rx ring\n");
1060 goto bail_txloring;
1061 }
1062 memset(sc->sc_rxhiring_dma.dma_vaddr, 0,
1063 sizeof(struct txp_rx_desc) * RX_ENTRIES);
1064 boot->br_rxhipri_lo =
1065 htole32(BUS_ADDR_LO32(sc->sc_rxhiring_dma.dma_paddr));
1066 boot->br_rxhipri_hi =
1067 htole32(BUS_ADDR_HI32(sc->sc_rxhiring_dma.dma_paddr));
1068 boot->br_rxhipri_siz = htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
1069 sc->sc_rxhir.r_desc =
1070 (struct txp_rx_desc *)sc->sc_rxhiring_dma.dma_vaddr;
1071 sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx;
1072 sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx;
1073 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxhiring_dma.dma_map,
1074 0, sc->sc_rxhiring_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1075
1076 /* low priority ring */
1077 if (txp_dma_malloc(sc, sizeof(struct txp_rx_desc) * RX_ENTRIES,
1078 &sc->sc_rxloring_dma, BUS_DMA_COHERENT)) {
1079 printf(": can't allocate low rx ring\n");
1080 goto bail_rxhiring;
1081 }
1082 memset(sc->sc_rxloring_dma.dma_vaddr, 0,
1083 sizeof(struct txp_rx_desc) * RX_ENTRIES);
1084 boot->br_rxlopri_lo =
1085 htole32(BUS_ADDR_LO32(sc->sc_rxloring_dma.dma_paddr));
1086 boot->br_rxlopri_hi =
1087 htole32(BUS_ADDR_HI32(sc->sc_rxloring_dma.dma_paddr));
1088 boot->br_rxlopri_siz = htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
1089 sc->sc_rxlor.r_desc =
1090 (struct txp_rx_desc *)sc->sc_rxloring_dma.dma_vaddr;
1091 sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx;
1092 sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx;
1093 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxloring_dma.dma_map,
1094 0, sc->sc_rxloring_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1095
1096 /* command ring */
1097 if (txp_dma_malloc(sc, sizeof(struct txp_cmd_desc) * CMD_ENTRIES,
1098 &sc->sc_cmdring_dma, BUS_DMA_COHERENT)) {
1099 printf(": can't allocate command ring\n");
1100 goto bail_rxloring;
1101 }
1102 memset(sc->sc_cmdring_dma.dma_vaddr, 0,
1103 sizeof(struct txp_cmd_desc) * CMD_ENTRIES);
1104 boot->br_cmd_lo = htole32(BUS_ADDR_LO32(sc->sc_cmdring_dma.dma_paddr));
1105 boot->br_cmd_hi = htole32(BUS_ADDR_HI32(sc->sc_cmdring_dma.dma_paddr));
1106 boot->br_cmd_siz = htole32(CMD_ENTRIES * sizeof(struct txp_cmd_desc));
1107 sc->sc_cmdring.base = (struct txp_cmd_desc *)sc->sc_cmdring_dma.dma_vaddr;
1108 sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc);
1109 sc->sc_cmdring.lastwrite = 0;
1110
1111 /* response ring */
1112 if (txp_dma_malloc(sc, sizeof(struct txp_rsp_desc) * RSP_ENTRIES,
1113 &sc->sc_rspring_dma, BUS_DMA_COHERENT)) {
1114 printf(": can't allocate response ring\n");
1115 goto bail_cmdring;
1116 }
1117 memset(sc->sc_rspring_dma.dma_vaddr, 0,
1118 sizeof(struct txp_rsp_desc) * RSP_ENTRIES);
1119 boot->br_resp_lo = htole32(BUS_ADDR_LO32(sc->sc_rspring_dma.dma_paddr));
1120 boot->br_resp_hi = htole32(BUS_ADDR_HI32(sc->sc_rspring_dma.dma_paddr));
1121 boot->br_resp_siz = htole32(CMD_ENTRIES * sizeof(struct txp_rsp_desc));
1122 sc->sc_rspring.base = (struct txp_rsp_desc *)sc->sc_rspring_dma.dma_vaddr;
1123 sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc);
1124 sc->sc_rspring.lastwrite = 0;
1125
1126 /* receive buffer ring */
1127 if (txp_dma_malloc(sc, sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES,
1128 &sc->sc_rxbufring_dma, BUS_DMA_COHERENT)) {
1129 printf(": can't allocate rx buffer ring\n");
1130 goto bail_rspring;
1131 }
1132 memset(sc->sc_rxbufring_dma.dma_vaddr, 0,
1133 sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES);
1134 boot->br_rxbuf_lo = htole32(BUS_ADDR_LO32(sc->sc_rxbufring_dma.dma_paddr));
1135 boot->br_rxbuf_hi = htole32(BUS_ADDR_HI32(sc->sc_rxbufring_dma.dma_paddr));
1136 boot->br_rxbuf_siz = htole32(RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc));
1137 sc->sc_rxbufs = (struct txp_rxbuf_desc *)sc->sc_rxbufring_dma.dma_vaddr;
1138 for (nb = 0; nb < RXBUF_ENTRIES; nb++) {
1139 sd = &sc->sc_rxd[nb];
1140
1141 /* stash away pointer */
1142 sc->sc_rxbufs[nb].rb_vaddrlo = txp_rxd_idx(sc, sd);
1143
1144 MGETHDR(sd->sd_mbuf, M_WAIT, MT_DATA);
1145 if (sd->sd_mbuf == NULL) {
1146 goto bail_rxbufring;
1147 }
1148
1149 MCLGET(sd->sd_mbuf, M_WAIT);
1150 if ((sd->sd_mbuf->m_flags & M_EXT) == 0) {
1151 goto bail_rxbufring;
1152 }
1153 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
1154 m_set_rcvif(sd->sd_mbuf, ifp);
1155 if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 1,
1156 TXP_MAX_PKTLEN, 0, BUS_DMA_WAITOK, &sd->sd_map)) {
1157 goto bail_rxbufring;
1158 }
1159 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, sd->sd_mbuf,
1160 BUS_DMA_WAITOK)) {
1161 bus_dmamap_destroy(sc->sc_dmat, sd->sd_map);
1162 goto bail_rxbufring;
1163 }
1164 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
1165 sd->sd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1166
1167 sc->sc_rxbufs[nb].rb_paddrlo =
1168 htole32(BUS_ADDR_LO32(sd->sd_map->dm_segs[0].ds_addr));
1169 sc->sc_rxbufs[nb].rb_paddrhi =
1170 htole32(BUS_ADDR_HI32(sd->sd_map->dm_segs[0].ds_addr));
1171 }
1172 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map,
1173 0, sc->sc_rxbufring_dma.dma_map->dm_mapsize,
1174 BUS_DMASYNC_PREWRITE);
1175 sc->sc_hostvar->hv_rx_buf_write_idx = htole32((RXBUF_ENTRIES - 1) *
1176 sizeof(struct txp_rxbuf_desc));
1177
1178 /* zero dma */
1179 if (txp_dma_malloc(sc, sizeof(uint32_t), &sc->sc_zero_dma,
1180 BUS_DMA_COHERENT)) {
1181 printf(": can't allocate response ring\n");
1182 goto bail_rxbufring;
1183 }
1184 memset(sc->sc_zero_dma.dma_vaddr, 0, sizeof(uint32_t));
1185 boot->br_zero_lo = htole32(BUS_ADDR_LO32(sc->sc_zero_dma.dma_paddr));
1186 boot->br_zero_hi = htole32(BUS_ADDR_HI32(sc->sc_zero_dma.dma_paddr));
1187
1188 /* See if it's waiting for boot, and try to boot it */
1189 for (i = 0; i < 10000; i++) {
1190 r = READ_REG(sc, TXP_A2H_0);
1191 if (r == STAT_WAITING_FOR_BOOT)
1192 break;
1193 DELAY(50);
1194 }
1195 if (r != STAT_WAITING_FOR_BOOT) {
1196 printf(": not waiting for boot\n");
1197 goto bail;
1198 }
1199 WRITE_REG(sc, TXP_H2A_2, BUS_ADDR_HI32(sc->sc_boot_dma.dma_paddr));
1200 WRITE_REG(sc, TXP_H2A_1, BUS_ADDR_LO32(sc->sc_boot_dma.dma_paddr));
1201 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD);
1202
1203 /* See if it booted */
1204 for (i = 0; i < 10000; i++) {
1205 r = READ_REG(sc, TXP_A2H_0);
1206 if (r == STAT_RUNNING)
1207 break;
1208 DELAY(50);
1209 }
1210 if (r != STAT_RUNNING) {
1211 printf(": fw not running\n");
1212 goto bail;
1213 }
1214
1215 /* Clear TX and CMD ring write registers */
1216 WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL);
1217 WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL);
1218 WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL);
1219 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL);
1220
1221 return (0);
1222
1223 bail:
1224 txp_dma_free(sc, &sc->sc_zero_dma);
1225 bail_rxbufring:
1226 if (nb == RXBUF_ENTRIES)
1227 nb--;
1228 for (i = 0; i <= nb; i++) {
1229 memcpy(&sd, __UNVOLATILE(&sc->sc_rxbufs[i].rb_vaddrlo),
1230 sizeof(sd));
1231 /* XXXJRT */
1232 }
1233 txp_dma_free(sc, &sc->sc_rxbufring_dma);
1234 bail_rspring:
1235 txp_dma_free(sc, &sc->sc_rspring_dma);
1236 bail_cmdring:
1237 txp_dma_free(sc, &sc->sc_cmdring_dma);
1238 bail_rxloring:
1239 txp_dma_free(sc, &sc->sc_rxloring_dma);
1240 bail_rxhiring:
1241 txp_dma_free(sc, &sc->sc_rxhiring_dma);
1242 bail_txloring:
1243 txp_dma_free(sc, &sc->sc_txloring_dma);
1244 bail_txhiring:
1245 txp_dma_free(sc, &sc->sc_txhiring_dma);
1246 bail_host:
1247 txp_dma_free(sc, &sc->sc_host_dma);
1248 bail_boot:
1249 txp_dma_free(sc, &sc->sc_boot_dma);
1250 return (-1);
1251 }
1252
1253 static int
1254 txp_dma_malloc(struct txp_softc *sc, bus_size_t size,
1255 struct txp_dma_alloc *dma, int mapflags)
1256 {
1257 int r;
1258
1259 if ((r = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0,
1260 &dma->dma_seg, 1, &dma->dma_nseg, 0)) != 0)
1261 goto fail_0;
1262
1263 if ((r = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg,
1264 size, &dma->dma_vaddr, mapflags | BUS_DMA_NOWAIT)) != 0)
1265 goto fail_1;
1266
1267 if ((r = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1268 BUS_DMA_NOWAIT, &dma->dma_map)) != 0)
1269 goto fail_2;
1270
1271 if ((r = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr,
1272 size, NULL, BUS_DMA_NOWAIT)) != 0)
1273 goto fail_3;
1274
1275 dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
1276 return (0);
1277
1278 fail_3:
1279 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
1280 fail_2:
1281 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size);
1282 fail_1:
1283 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg);
1284 fail_0:
1285 return (r);
1286 }
1287
1288 static void
1289 txp_dma_free(struct txp_softc *sc, struct txp_dma_alloc *dma)
1290 {
1291 bus_size_t mapsize = dma->dma_map->dm_mapsize;
1292
1293 bus_dmamap_unload(sc->sc_dmat, dma->dma_map);
1294 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, mapsize);
1295 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg);
1296 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
1297 }
1298
1299 static int
1300 txp_ioctl(struct ifnet *ifp, u_long command, void *data)
1301 {
1302 struct txp_softc *sc = ifp->if_softc;
1303 struct ifaddr *ifa = (struct ifaddr *)data;
1304 int s, error = 0;
1305
1306 s = splnet();
1307
1308 #if 0
1309 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, command, data)) > 0) {
1310 splx(s);
1311 return error;
1312 }
1313 #endif
1314
1315 switch (command) {
1316 case SIOCINITIFADDR:
1317 ifp->if_flags |= IFF_UP;
1318 txp_init(sc);
1319 switch (ifa->ifa_addr->sa_family) {
1320 #ifdef INET
1321 case AF_INET:
1322 arp_ifinit(ifp, ifa);
1323 break;
1324 #endif /* INET */
1325 default:
1326 break;
1327 }
1328 break;
1329 case SIOCSIFFLAGS:
1330 if ((error = ifioctl_common(ifp, command, data)) != 0)
1331 break;
1332 if (ifp->if_flags & IFF_UP) {
1333 txp_init(sc);
1334 } else {
1335 if (ifp->if_flags & IFF_RUNNING)
1336 txp_stop(sc);
1337 }
1338 break;
1339 case SIOCADDMULTI:
1340 case SIOCDELMULTI:
1341 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
1342 break;
1343
1344 error = 0;
1345
1346 if (command != SIOCADDMULTI && command != SIOCDELMULTI)
1347 ;
1348 else if (ifp->if_flags & IFF_RUNNING) {
1349 /*
1350 * Multicast list has changed; set the hardware
1351 * filter accordingly.
1352 */
1353 txp_set_filter(sc);
1354 }
1355 break;
1356 default:
1357 error = ether_ioctl(ifp, command, data);
1358 break;
1359 }
1360
1361 splx(s);
1362
1363 return (error);
1364 }
1365
1366 static void
1367 txp_init(struct txp_softc *sc)
1368 {
1369 struct ifnet *ifp = &sc->sc_arpcom.ec_if;
1370 int s;
1371
1372 txp_stop(sc);
1373
1374 s = splnet();
1375
1376 txp_set_filter(sc);
1377
1378 txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1);
1379 txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1);
1380
1381 WRITE_REG(sc, TXP_IER, TXP_INT_RESERVED | TXP_INT_SELF |
1382 TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
1383 TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 |
1384 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
1385 TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_LATCH);
1386 WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3);
1387
1388 ifp->if_flags |= IFF_RUNNING;
1389 ifp->if_flags &= ~IFF_OACTIVE;
1390 ifp->if_timer = 0;
1391
1392 if (!callout_pending(&sc->sc_tick))
1393 callout_schedule(&sc->sc_tick, hz);
1394
1395 splx(s);
1396 }
1397
1398 static void
1399 txp_tick(void *vsc)
1400 {
1401 struct txp_softc *sc = vsc;
1402 struct ifnet *ifp = &sc->sc_arpcom.ec_if;
1403 struct txp_rsp_desc *rsp = NULL;
1404 struct txp_ext_desc *ext;
1405 int s;
1406
1407 s = splnet();
1408 txp_rxbuf_reclaim(sc);
1409
1410 if (txp_command2(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
1411 &rsp, 1))
1412 goto out;
1413 if (rsp->rsp_numdesc != 6)
1414 goto out;
1415 if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0,
1416 NULL, NULL, NULL, 1))
1417 goto out;
1418 ext = (struct txp_ext_desc *)(rsp + 1);
1419
1420 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1421 if_statadd_ref(nsr, if_ierrors,
1422 ext[3].ext_2 + ext[3].ext_3 + ext[3].ext_4 +
1423 ext[4].ext_1 + ext[4].ext_4);
1424 if_statadd_ref(nsr, if_oerrors,
1425 ext[0].ext_1 + ext[1].ext_1 + ext[1].ext_4 + ext[2].ext_1);
1426 if_statadd_ref(nsr, if_collisions,
1427 ext[0].ext_2 + ext[0].ext_3 + ext[1].ext_2 + ext[1].ext_3);
1428 if_statadd_ref(nsr, if_opackets, rsp->rsp_par2);
1429 IF_STAT_PUTREF(ifp);
1430
1431 out:
1432 if (rsp != NULL)
1433 free(rsp, M_DEVBUF);
1434
1435 splx(s);
1436 callout_schedule(&sc->sc_tick, hz);
1437 }
1438
1439 static void
1440 txp_start(struct ifnet *ifp)
1441 {
1442 struct txp_softc *sc = ifp->if_softc;
1443 struct txp_tx_ring *r = &sc->sc_txhir;
1444 struct txp_tx_desc *txd;
1445 int txdidx;
1446 struct txp_frag_desc *fxd;
1447 struct mbuf *m, *mnew;
1448 struct txp_swdesc *sd;
1449 uint32_t prod, cnt, i;
1450 int error;
1451
1452 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1453 return;
1454
1455 prod = r->r_prod;
1456 cnt = r->r_cnt;
1457
1458 while (1) {
1459 if (cnt >= TX_ENTRIES - TXP_MAXTXSEGS - 4) {
1460 ifp->if_flags |= IFF_OACTIVE;
1461 break;
1462 }
1463
1464 IFQ_POLL(&ifp->if_snd, m);
1465 if (m == NULL)
1466 break;
1467 mnew = NULL;
1468
1469 sd = sc->sc_txd + prod;
1470
1471 /*
1472 * Load the DMA map. If this fails, the packet either
1473 * didn't fit in the alloted number of segments, or we
1474 * were short on resources. In this case, we'll copy
1475 * and try again.
1476 */
1477 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, m,
1478 BUS_DMA_NOWAIT) != 0) {
1479 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1480 if (mnew == NULL) {
1481 printf("%s: unable to allocate Tx mbuf\n",
1482 device_xname(sc->sc_dev));
1483 break;
1484 }
1485 MCLAIM(mnew, &sc->sc_arpcom.ec_tx_mowner);
1486 if (m->m_pkthdr.len > MHLEN) {
1487 MCLGET(mnew, M_DONTWAIT);
1488 if ((mnew->m_flags & M_EXT) == 0) {
1489 printf("%s: unable to allocate Tx "
1490 "cluster\n",
1491 device_xname(sc->sc_dev));
1492 m_freem(mnew);
1493 break;
1494 }
1495 }
1496 m_copydata(m, 0, m->m_pkthdr.len, mtod(mnew, void *));
1497 mnew->m_pkthdr.len = mnew->m_len = m->m_pkthdr.len;
1498 error = bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map,
1499 mnew, BUS_DMA_NOWAIT);
1500 if (error) {
1501 printf("%s: unable to load Tx buffer, "
1502 "error = %d\n", device_xname(sc->sc_dev),
1503 error);
1504 m_freem(mnew);
1505 break;
1506 }
1507 }
1508
1509 IFQ_DEQUEUE(&ifp->if_snd, m);
1510 if (mnew != NULL) {
1511 m_freem(m);
1512 m = mnew;
1513 }
1514
1515 /*
1516 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1517 */
1518
1519 sd->sd_mbuf = m;
1520
1521 txd = r->r_desc + prod;
1522 txdidx = prod;
1523 txd->tx_flags = TX_FLAGS_TYPE_DATA;
1524 txd->tx_numdesc = 0;
1525 txd->tx_addrlo = txp_txd_idx(sc, sd);
1526 txd->tx_addrhi = 0;
1527 txd->tx_totlen = m->m_pkthdr.len;
1528 txd->tx_pflags = 0;
1529 txd->tx_numdesc = sd->sd_map->dm_nsegs;
1530
1531 if (++prod == TX_ENTRIES)
1532 prod = 0;
1533 cnt++;
1534
1535 if (vlan_has_tag(m))
1536 txd->tx_pflags = TX_PFLAGS_VLAN |
1537 (htons(vlan_get_tag(m)) << TX_PFLAGS_VLANTAG_S);
1538
1539 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
1540 txd->tx_pflags |= TX_PFLAGS_IPCKSUM;
1541 #ifdef TRY_TX_TCP_CSUM
1542 if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4)
1543 txd->tx_pflags |= TX_PFLAGS_TCPCKSUM;
1544 #endif
1545 #ifdef TRY_TX_UDP_CSUM
1546 if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4)
1547 txd->tx_pflags |= TX_PFLAGS_UDPCKSUM;
1548 #endif
1549
1550 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
1551 sd->sd_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1552
1553 fxd = (struct txp_frag_desc *)(r->r_desc + prod);
1554 for (i = 0; i < sd->sd_map->dm_nsegs; i++) {
1555 fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG |
1556 FRAG_FLAGS_VALID;
1557 fxd->frag_rsvd1 = 0;
1558 fxd->frag_len = htole16(sd->sd_map->dm_segs[i].ds_len);
1559 fxd->frag_addrlo =
1560 htole32(BUS_ADDR_LO32(sd->sd_map->dm_segs[i].ds_addr));
1561 fxd->frag_addrhi =
1562 htole32(BUS_ADDR_HI32(sd->sd_map->dm_segs[i].ds_addr));
1563 fxd->frag_rsvd2 = 0;
1564
1565 bus_dmamap_sync(sc->sc_dmat,
1566 sc->sc_txhiring_dma.dma_map,
1567 prod * sizeof(struct txp_frag_desc),
1568 sizeof(struct txp_frag_desc), BUS_DMASYNC_PREWRITE);
1569
1570 if (++prod == TX_ENTRIES) {
1571 fxd = (struct txp_frag_desc *)r->r_desc;
1572 prod = 0;
1573 } else
1574 fxd++;
1575 cnt++;
1576 }
1577
1578 ifp->if_timer = 5;
1579
1580 bpf_mtap(ifp, m, BPF_D_OUT);
1581
1582 txd->tx_flags |= TX_FLAGS_VALID;
1583 bus_dmamap_sync(sc->sc_dmat, sc->sc_txhiring_dma.dma_map,
1584 txdidx * sizeof(struct txp_tx_desc),
1585 sizeof(struct txp_tx_desc), BUS_DMASYNC_PREWRITE);
1586
1587 #if 0
1588 {
1589 struct mbuf *mx;
1590 int i;
1591
1592 printf("txd: flags 0x%x ndesc %d totlen %d pflags 0x%x\n",
1593 txd->tx_flags, txd->tx_numdesc, txd->tx_totlen,
1594 txd->tx_pflags);
1595 for (mx = m; mx != NULL; mx = mx->m_next) {
1596 for (i = 0; i < mx->m_len; i++) {
1597 printf(":%02x",
1598 (uint8_t)m->m_data[i]);
1599 }
1600 }
1601 printf("\n");
1602 }
1603 #endif
1604
1605 WRITE_REG(sc, r->r_reg, TXP_IDX2OFFSET(prod));
1606 }
1607
1608 r->r_prod = prod;
1609 r->r_cnt = cnt;
1610 }
1611
1612 /*
1613 * Handle simple commands sent to the typhoon
1614 */
1615 static int
1616 txp_command(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2,
1617 uint32_t in3, uint16_t *out1, uint32_t *out2, uint32_t *out3, int wait)
1618 {
1619 struct txp_rsp_desc *rsp = NULL;
1620
1621 if (txp_command2(sc, id, in1, in2, in3, NULL, 0, &rsp, wait))
1622 return (-1);
1623
1624 if (!wait)
1625 return (0);
1626
1627 if (out1 != NULL)
1628 *out1 = le16toh(rsp->rsp_par1);
1629 if (out2 != NULL)
1630 *out2 = le32toh(rsp->rsp_par2);
1631 if (out3 != NULL)
1632 *out3 = le32toh(rsp->rsp_par3);
1633 free(rsp, M_DEVBUF);
1634 return (0);
1635 }
1636
1637 static int
1638 txp_command2(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2,
1639 uint32_t in3, struct txp_ext_desc *in_extp, uint8_t in_extn,
1640 struct txp_rsp_desc **rspp, int wait)
1641 {
1642 struct txp_hostvar *hv = sc->sc_hostvar;
1643 struct txp_cmd_desc *cmd;
1644 struct txp_ext_desc *ext;
1645 uint32_t idx, i;
1646 uint16_t seq;
1647
1648 if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) {
1649 printf("%s: no free cmd descriptors\n", TXP_DEVNAME(sc));
1650 return (-1);
1651 }
1652
1653 idx = sc->sc_cmdring.lastwrite;
1654 cmd = (struct txp_cmd_desc *)(((uint8_t *)sc->sc_cmdring.base) + idx);
1655 memset(cmd, 0, sizeof(*cmd));
1656
1657 cmd->cmd_numdesc = in_extn;
1658 seq = sc->sc_seq++;
1659 cmd->cmd_seq = htole16(seq);
1660 cmd->cmd_id = htole16(id);
1661 cmd->cmd_par1 = htole16(in1);
1662 cmd->cmd_par2 = htole32(in2);
1663 cmd->cmd_par3 = htole32(in3);
1664 cmd->cmd_flags = CMD_FLAGS_TYPE_CMD |
1665 (wait ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID;
1666
1667 idx += sizeof(struct txp_cmd_desc);
1668 if (idx == sc->sc_cmdring.size)
1669 idx = 0;
1670
1671 for (i = 0; i < in_extn; i++) {
1672 ext = (struct txp_ext_desc *)(((uint8_t *)sc->sc_cmdring.base) + idx);
1673 memcpy(ext, in_extp, sizeof(struct txp_ext_desc));
1674 in_extp++;
1675 idx += sizeof(struct txp_cmd_desc);
1676 if (idx == sc->sc_cmdring.size)
1677 idx = 0;
1678 }
1679
1680 sc->sc_cmdring.lastwrite = idx;
1681
1682 WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite);
1683 bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
1684 sizeof(struct txp_hostvar), BUS_DMASYNC_PREREAD);
1685
1686 if (!wait)
1687 return (0);
1688
1689 for (i = 0; i < 10000; i++) {
1690 bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
1691 sizeof(struct txp_hostvar), BUS_DMASYNC_POSTREAD);
1692 idx = le32toh(hv->hv_resp_read_idx);
1693 if (idx != le32toh(hv->hv_resp_write_idx)) {
1694 *rspp = NULL;
1695 if (txp_response(sc, idx, id, seq, rspp))
1696 return (-1);
1697 if (*rspp != NULL)
1698 break;
1699 }
1700 bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
1701 sizeof(struct txp_hostvar), BUS_DMASYNC_PREREAD);
1702 DELAY(50);
1703 }
1704 if (i == 1000 || (*rspp) == NULL) {
1705 printf("%s: 0x%x command failed\n", TXP_DEVNAME(sc), id);
1706 return (-1);
1707 }
1708
1709 return (0);
1710 }
1711
1712 static int
1713 txp_response(struct txp_softc *sc, uint32_t ridx, uint16_t id, uint16_t seq,
1714 struct txp_rsp_desc **rspp)
1715 {
1716 struct txp_hostvar *hv = sc->sc_hostvar;
1717 struct txp_rsp_desc *rsp;
1718
1719 while (ridx != le32toh(hv->hv_resp_write_idx)) {
1720 rsp = (struct txp_rsp_desc *)(((uint8_t *)sc->sc_rspring.base) + ridx);
1721
1722 if (id == le16toh(rsp->rsp_id) && le16toh(rsp->rsp_seq) == seq) {
1723 *rspp = (struct txp_rsp_desc *)malloc(
1724 sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1),
1725 M_DEVBUF, M_NOWAIT);
1726 if ((*rspp) == NULL)
1727 return (-1);
1728 txp_rsp_fixup(sc, rsp, *rspp);
1729 return (0);
1730 }
1731
1732 if (rsp->rsp_flags & RSP_FLAGS_ERROR) {
1733 printf("%s: response error: id 0x%x\n",
1734 TXP_DEVNAME(sc), le16toh(rsp->rsp_id));
1735 txp_rsp_fixup(sc, rsp, NULL);
1736 ridx = le32toh(hv->hv_resp_read_idx);
1737 continue;
1738 }
1739
1740 switch (le16toh(rsp->rsp_id)) {
1741 case TXP_CMD_CYCLE_STATISTICS:
1742 case TXP_CMD_MEDIA_STATUS_READ:
1743 break;
1744 case TXP_CMD_HELLO_RESPONSE:
1745 printf("%s: hello\n", TXP_DEVNAME(sc));
1746 break;
1747 default:
1748 printf("%s: unknown id(0x%x)\n", TXP_DEVNAME(sc),
1749 le16toh(rsp->rsp_id));
1750 }
1751
1752 txp_rsp_fixup(sc, rsp, NULL);
1753 ridx = le32toh(hv->hv_resp_read_idx);
1754 hv->hv_resp_read_idx = le32toh(ridx);
1755 }
1756
1757 return (0);
1758 }
1759
1760 static void
1761 txp_rsp_fixup(struct txp_softc *sc, struct txp_rsp_desc *rsp,
1762 struct txp_rsp_desc *dst)
1763 {
1764 struct txp_rsp_desc *src = rsp;
1765 struct txp_hostvar *hv = sc->sc_hostvar;
1766 uint32_t i, ridx;
1767
1768 ridx = le32toh(hv->hv_resp_read_idx);
1769
1770 for (i = 0; i < rsp->rsp_numdesc + 1; i++) {
1771 if (dst != NULL)
1772 memcpy(dst++, src, sizeof(struct txp_rsp_desc));
1773 ridx += sizeof(struct txp_rsp_desc);
1774 if (ridx == sc->sc_rspring.size) {
1775 src = sc->sc_rspring.base;
1776 ridx = 0;
1777 } else
1778 src++;
1779 sc->sc_rspring.lastwrite = ridx;
1780 hv->hv_resp_read_idx = htole32(ridx);
1781 }
1782
1783 hv->hv_resp_read_idx = htole32(ridx);
1784 }
1785
1786 static int
1787 txp_cmd_desc_numfree(struct txp_softc *sc)
1788 {
1789 struct txp_hostvar *hv = sc->sc_hostvar;
1790 struct txp_boot_record *br = sc->sc_boot;
1791 uint32_t widx, ridx, nfree;
1792
1793 widx = sc->sc_cmdring.lastwrite;
1794 ridx = le32toh(hv->hv_cmd_read_idx);
1795
1796 if (widx == ridx) {
1797 /* Ring is completely free */
1798 nfree = le32toh(br->br_cmd_siz) - sizeof(struct txp_cmd_desc);
1799 } else {
1800 if (widx > ridx)
1801 nfree = le32toh(br->br_cmd_siz) -
1802 (widx - ridx + sizeof(struct txp_cmd_desc));
1803 else
1804 nfree = ridx - widx - sizeof(struct txp_cmd_desc);
1805 }
1806
1807 return (nfree / sizeof(struct txp_cmd_desc));
1808 }
1809
1810 static void
1811 txp_stop(struct txp_softc *sc)
1812 {
1813 txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1);
1814 txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1);
1815
1816 if (callout_pending(&sc->sc_tick))
1817 callout_stop(&sc->sc_tick);
1818 }
1819
1820 static void
1821 txp_watchdog(struct ifnet *ifp)
1822 {
1823 }
1824
1825 static int
1826 txp_ifmedia_upd(struct ifnet *ifp)
1827 {
1828 struct txp_softc *sc = ifp->if_softc;
1829 struct ifmedia *ifm = &sc->sc_ifmedia;
1830 uint16_t new_xcvr;
1831
1832 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1833 return (EINVAL);
1834
1835 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) {
1836 if ((ifm->ifm_media & IFM_FDX) != 0)
1837 new_xcvr = TXP_XCVR_10_FDX;
1838 else
1839 new_xcvr = TXP_XCVR_10_HDX;
1840 } else if ((IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) ||
1841 (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_FX)) {
1842 if ((ifm->ifm_media & IFM_FDX) != 0)
1843 new_xcvr = TXP_XCVR_100_FDX;
1844 else
1845 new_xcvr = TXP_XCVR_100_HDX;
1846 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
1847 new_xcvr = TXP_XCVR_AUTO;
1848 } else
1849 return (EINVAL);
1850
1851 /* nothing to do */
1852 if (sc->sc_xcvr == new_xcvr)
1853 return (0);
1854
1855 txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0,
1856 NULL, NULL, NULL, 0);
1857 sc->sc_xcvr = new_xcvr;
1858
1859 return (0);
1860 }
1861
1862 static void
1863 txp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1864 {
1865 struct txp_softc *sc = ifp->if_softc;
1866 struct ifmedia *ifm = &sc->sc_ifmedia;
1867 uint16_t bmsr, bmcr, anlpar;
1868
1869 ifmr->ifm_status = IFM_AVALID;
1870 ifmr->ifm_active = IFM_ETHER;
1871
1872 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
1873 &bmsr, NULL, NULL, 1))
1874 goto bail;
1875 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
1876 &bmsr, NULL, NULL, 1))
1877 goto bail;
1878
1879 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0,
1880 &bmcr, NULL, NULL, 1))
1881 goto bail;
1882
1883 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0,
1884 &anlpar, NULL, NULL, 1))
1885 goto bail;
1886
1887 if (bmsr & BMSR_LINK)
1888 ifmr->ifm_status |= IFM_ACTIVE;
1889
1890 if (bmcr & BMCR_ISO) {
1891 ifmr->ifm_active |= IFM_NONE;
1892 ifmr->ifm_status = 0;
1893 return;
1894 }
1895
1896 if (bmcr & BMCR_LOOP)
1897 ifmr->ifm_active |= IFM_LOOP;
1898
1899 if (!(sc->sc_flags & TXP_FIBER) && (bmcr & BMCR_AUTOEN)) {
1900 if ((bmsr & BMSR_ACOMP) == 0) {
1901 ifmr->ifm_active |= IFM_NONE;
1902 return;
1903 }
1904
1905 if (anlpar & ANLPAR_TX_FD)
1906 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1907 else if (anlpar & ANLPAR_T4)
1908 ifmr->ifm_active |= IFM_100_T4 | IFM_HDX;
1909 else if (anlpar & ANLPAR_TX)
1910 ifmr->ifm_active |= IFM_100_TX | IFM_HDX;
1911 else if (anlpar & ANLPAR_10_FD)
1912 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1913 else if (anlpar & ANLPAR_10)
1914 ifmr->ifm_active |= IFM_10_T | IFM_HDX;
1915 else
1916 ifmr->ifm_active |= IFM_NONE;
1917 } else
1918 ifmr->ifm_active = ifm->ifm_cur->ifm_media;
1919 return;
1920
1921 bail:
1922 ifmr->ifm_active |= IFM_NONE;
1923 ifmr->ifm_status &= ~IFM_AVALID;
1924 }
1925
1926 #if 0 /* XXX XXX XXX UNUSED */
1927 static void
1928 txp_show_descriptor(void *d)
1929 {
1930 struct txp_cmd_desc *cmd = d;
1931 struct txp_rsp_desc *rsp = d;
1932 struct txp_tx_desc *txd = d;
1933 struct txp_frag_desc *frgd = d;
1934
1935 switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) {
1936 case CMD_FLAGS_TYPE_CMD:
1937 /* command descriptor */
1938 printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 "
1939 "0x%x par3 0x%x]\n",
1940 cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
1941 le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
1942 le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
1943 break;
1944 case CMD_FLAGS_TYPE_RESP:
1945 /* response descriptor */
1946 printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 "
1947 "0x%x par3 0x%x]\n",
1948 rsp->rsp_flags, rsp->rsp_numdesc, le16toh(rsp->rsp_id),
1949 le16toh(rsp->rsp_seq), le16toh(rsp->rsp_par1),
1950 le32toh(rsp->rsp_par2), le32toh(rsp->rsp_par3));
1951 break;
1952 case CMD_FLAGS_TYPE_DATA:
1953 /* data header (assuming tx for now) */
1954 printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x "
1955 "pflags 0x%x]",
1956 txd->tx_flags, txd->tx_numdesc, txd->tx_totlen,
1957 txd->tx_addrlo, txd->tx_addrhi, txd->tx_pflags);
1958 break;
1959 case CMD_FLAGS_TYPE_FRAG:
1960 /* fragment descriptor */
1961 printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x "
1962 "rsvd2 0x%x]",
1963 frgd->frag_flags, frgd->frag_rsvd1, frgd->frag_len,
1964 frgd->frag_addrlo, frgd->frag_addrhi, frgd->frag_rsvd2);
1965 break;
1966 default:
1967 printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 "
1968 "0x%x par2 0x%x par3 0x%x]\n",
1969 cmd->cmd_flags & CMD_FLAGS_TYPE_M,
1970 cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
1971 le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
1972 le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
1973 break;
1974 }
1975 }
1976 #endif
1977
1978 static void
1979 txp_set_filter(struct txp_softc *sc)
1980 {
1981 struct ethercom *ec = &sc->sc_arpcom;
1982 struct ifnet *ifp = &sc->sc_arpcom.ec_if;
1983 uint32_t crc, carry, hashbit, hash[2];
1984 uint16_t filter;
1985 uint8_t octet;
1986 int i, j, mcnt = 0;
1987 struct ether_multi *enm;
1988 struct ether_multistep step;
1989
1990 if (ifp->if_flags & IFF_PROMISC) {
1991 filter = TXP_RXFILT_PROMISC;
1992 goto setit;
1993 }
1994
1995 again:
1996 filter = TXP_RXFILT_DIRECT;
1997
1998 if (ifp->if_flags & IFF_BROADCAST)
1999 filter |= TXP_RXFILT_BROADCAST;
2000
2001 if (ifp->if_flags & IFF_ALLMULTI)
2002 filter |= TXP_RXFILT_ALLMULTI;
2003 else {
2004 hash[0] = hash[1] = 0;
2005
2006 ETHER_LOCK(ec);
2007 ETHER_FIRST_MULTI(step, ec, enm);
2008 while (enm != NULL) {
2009 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
2010 ETHER_ADDR_LEN)) {
2011 /*
2012 * We must listen to a range of multicast
2013 * addresses. For now, just accept all
2014 * multicasts, rather than trying to set only
2015 * those filter bits needed to match the range.
2016 * (At this time, the only use of address
2017 * ranges is for IP multicast routing, for
2018 * which the range is big enough to require
2019 * all bits set.)
2020 */
2021 ifp->if_flags |= IFF_ALLMULTI;
2022 ETHER_UNLOCK(ec);
2023 goto again;
2024 }
2025
2026 mcnt++;
2027 crc = 0xffffffff;
2028
2029 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2030 octet = enm->enm_addrlo[i];
2031 for (j = 0; j < 8; j++) {
2032 carry = ((crc & 0x80000000) ? 1 : 0) ^
2033 (octet & 1);
2034 crc <<= 1;
2035 octet >>= 1;
2036 if (carry)
2037 crc = (crc ^ TXP_POLYNOMIAL) |
2038 carry;
2039 }
2040 }
2041 hashbit = (uint16_t)(crc & (64 - 1));
2042 hash[hashbit / 32] |= (1 << hashbit % 32);
2043 ETHER_NEXT_MULTI(step, enm);
2044 }
2045 ETHER_UNLOCK(ec);
2046
2047 if (mcnt > 0) {
2048 filter |= TXP_RXFILT_HASHMULTI;
2049 txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE,
2050 2, hash[0], hash[1], NULL, NULL, NULL, 0);
2051 }
2052 }
2053
2054 setit:
2055 txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0,
2056 NULL, NULL, NULL, 1);
2057 }
2058
2059 static void
2060 txp_capabilities(struct txp_softc *sc)
2061 {
2062 struct ifnet *ifp = &sc->sc_arpcom.ec_if;
2063 struct txp_rsp_desc *rsp = NULL;
2064 struct txp_ext_desc *ext;
2065
2066 if (txp_command2(sc, TXP_CMD_OFFLOAD_READ, 0, 0, 0, NULL, 0, &rsp, 1))
2067 goto out;
2068
2069 if (rsp->rsp_numdesc != 1)
2070 goto out;
2071 ext = (struct txp_ext_desc *)(rsp + 1);
2072
2073 sc->sc_tx_capability = ext->ext_1 & OFFLOAD_MASK;
2074 sc->sc_rx_capability = ext->ext_2 & OFFLOAD_MASK;
2075
2076 sc->sc_arpcom.ec_capabilities |= ETHERCAP_VLAN_MTU;
2077 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_VLAN) {
2078 sc->sc_tx_capability |= OFFLOAD_VLAN;
2079 sc->sc_rx_capability |= OFFLOAD_VLAN;
2080 sc->sc_arpcom.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
2081 sc->sc_arpcom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
2082 }
2083
2084 #if 0
2085 /* not ready yet */
2086 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPSEC) {
2087 sc->sc_tx_capability |= OFFLOAD_IPSEC;
2088 sc->sc_rx_capability |= OFFLOAD_IPSEC;
2089 ifp->if_capabilities |= IFCAP_IPSEC;
2090 }
2091 #endif
2092
2093 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPCKSUM) {
2094 sc->sc_tx_capability |= OFFLOAD_IPCKSUM;
2095 sc->sc_rx_capability |= OFFLOAD_IPCKSUM;
2096 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
2097 }
2098
2099 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_TCPCKSUM) {
2100 sc->sc_rx_capability |= OFFLOAD_TCPCKSUM;
2101 #ifdef TRY_TX_TCP_CSUM
2102 sc->sc_tx_capability |= OFFLOAD_TCPCKSUM;
2103 ifp->if_capabilities |=
2104 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx;
2105 #endif
2106 }
2107
2108 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_UDPCKSUM) {
2109 sc->sc_rx_capability |= OFFLOAD_UDPCKSUM;
2110 #ifdef TRY_TX_UDP_CSUM
2111 sc->sc_tx_capability |= OFFLOAD_UDPCKSUM;
2112 ifp->if_capabilities |=
2113 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
2114 #endif
2115 }
2116
2117 if (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0,
2118 sc->sc_tx_capability, sc->sc_rx_capability, NULL, NULL, NULL, 1))
2119 goto out;
2120
2121 out:
2122 if (rsp != NULL)
2123 free(rsp, M_DEVBUF);
2124 }
2125