i82557.c revision 1.5 1 /* $NetBSD: i82557.c,v 1.5 1999/08/04 00:14:08 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1995, David Greenman
42 * All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice unmodified, this list of conditions, and the following
49 * disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright
51 * notice, this list of conditions and the following disclaimer in the
52 * documentation and/or other materials provided with the distribution.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * Id: if_fxp.c,v 1.47 1998/01/08 23:42:29 eivind Exp
67 */
68
69 /*
70 * Device driver for the Intel i82557 fast Ethernet controller.
71 */
72
73 #include "opt_inet.h"
74 #include "opt_ns.h"
75 #include "bpfilter.h"
76 #include "rnd.h"
77
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/mbuf.h>
81 #include <sys/malloc.h>
82 #include <sys/kernel.h>
83 #include <sys/socket.h>
84 #include <sys/ioctl.h>
85 #include <sys/errno.h>
86 #include <sys/device.h>
87
88 #include <vm/vm.h> /* for PAGE_SIZE */
89
90 #if NRND > 0
91 #include <sys/rnd.h>
92 #endif
93
94 #include <net/if.h>
95 #include <net/if_dl.h>
96 #include <net/if_media.h>
97 #include <net/if_ether.h>
98
99 #if NBPFILTER > 0
100 #include <net/bpf.h>
101 #endif
102
103 #ifdef INET
104 #include <netinet/in.h>
105 #include <netinet/if_inarp.h>
106 #endif
107
108 #ifdef NS
109 #include <netns/ns.h>
110 #include <netns/ns_if.h>
111 #endif
112
113 #include <machine/bus.h>
114 #include <machine/intr.h>
115
116 #include <dev/mii/miivar.h>
117
118 #include <dev/ic/i82557reg.h>
119 #include <dev/ic/i82557var.h>
120
121 /*
122 * NOTE! On the Alpha, we have an alignment constraint. The
123 * card DMAs the packet immediately following the RFA. However,
124 * the first thing in the packet is a 14-byte Ethernet header.
125 * This means that the packet is misaligned. To compensate,
126 * we actually offset the RFA 2 bytes into the cluster. This
127 * alignes the packet after the Ethernet header at a 32-bit
128 * boundary. HOWEVER! This means that the RFA is misaligned!
129 */
130 #define RFA_ALIGNMENT_FUDGE 2
131
132 /*
133 * Template for default configuration parameters.
134 * See struct fxp_cb_config for the bit definitions.
135 */
136 u_int8_t fxp_cb_config_template[] = {
137 0x0, 0x0, /* cb_status */
138 0x80, 0x2, /* cb_command */
139 0xff, 0xff, 0xff, 0xff, /* link_addr */
140 0x16, /* 0 */
141 0x8, /* 1 */
142 0x0, /* 2 */
143 0x0, /* 3 */
144 0x0, /* 4 */
145 0x80, /* 5 */
146 0xb2, /* 6 */
147 0x3, /* 7 */
148 0x1, /* 8 */
149 0x0, /* 9 */
150 0x26, /* 10 */
151 0x0, /* 11 */
152 0x60, /* 12 */
153 0x0, /* 13 */
154 0xf2, /* 14 */
155 0x48, /* 15 */
156 0x0, /* 16 */
157 0x40, /* 17 */
158 0xf3, /* 18 */
159 0x0, /* 19 */
160 0x3f, /* 20 */
161 0x5 /* 21 */
162 };
163
164 void fxp_mii_initmedia __P((struct fxp_softc *));
165 int fxp_mii_mediachange __P((struct ifnet *));
166 void fxp_mii_mediastatus __P((struct ifnet *, struct ifmediareq *));
167
168 void fxp_80c24_initmedia __P((struct fxp_softc *));
169 int fxp_80c24_mediachange __P((struct ifnet *));
170 void fxp_80c24_mediastatus __P((struct ifnet *, struct ifmediareq *));
171
172 inline void fxp_scb_wait __P((struct fxp_softc *));
173
174 void fxp_start __P((struct ifnet *));
175 int fxp_ioctl __P((struct ifnet *, u_long, caddr_t));
176 void fxp_init __P((struct fxp_softc *));
177 void fxp_stop __P((struct fxp_softc *));
178 void fxp_watchdog __P((struct ifnet *));
179 int fxp_add_rfabuf __P((struct fxp_softc *, struct fxp_rxdesc *));
180 int fxp_mdi_read __P((struct device *, int, int));
181 void fxp_statchg __P((struct device *));
182 void fxp_mdi_write __P((struct device *, int, int, int));
183 void fxp_read_eeprom __P((struct fxp_softc *, u_int16_t *, int, int));
184 void fxp_get_info __P((struct fxp_softc *, u_int8_t *));
185 void fxp_tick __P((void *));
186 void fxp_mc_setup __P((struct fxp_softc *));
187
188 void fxp_shutdown __P((void *));
189
190 struct fxp_phytype {
191 int fp_phy; /* type of PHY, -1 for MII at the end. */
192 void (*fp_init) __P((struct fxp_softc *));
193 } fxp_phytype_table[] = {
194 { FXP_PHY_80C24, fxp_80c24_initmedia },
195 { -1, fxp_mii_initmedia },
196 };
197
198 /*
199 * Set initial transmit threshold at 64 (512 bytes). This is
200 * increased by 64 (512 bytes) at a time, to maximum of 192
201 * (1536 bytes), if an underrun occurs.
202 */
203 static int tx_threshold = 64;
204
205 /*
206 * Wait for the previous command to be accepted (but not necessarily
207 * completed).
208 */
209 inline void
210 fxp_scb_wait(sc)
211 struct fxp_softc *sc;
212 {
213 int i = 10000;
214
215 while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i)
216 delay(2);
217 if (i == 0)
218 printf("%s: WARNING: SCB timed out!\n", sc->sc_dev.dv_xname);
219 }
220
221 /*
222 * Finish attaching an i82557 interface. Called by bus-specific front-end.
223 */
224 void
225 fxp_attach(sc)
226 struct fxp_softc *sc;
227 {
228 u_int8_t enaddr[6];
229 struct ifnet *ifp;
230 bus_dma_segment_t seg;
231 int rseg, i, error;
232 struct fxp_phytype *fp;
233
234 /*
235 * Allocate the control data structures, and create and load the
236 * DMA map for it.
237 */
238 if ((error = bus_dmamem_alloc(sc->sc_dmat,
239 sizeof(struct fxp_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
240 0)) != 0) {
241 printf("%s: unable to allocate control data, error = %d\n",
242 sc->sc_dev.dv_xname, error);
243 goto fail_0;
244 }
245
246 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
247 sizeof(struct fxp_control_data), (caddr_t *)&sc->sc_control_data,
248 BUS_DMA_COHERENT)) != 0) {
249 printf("%s: unable to map control data, error = %d\n",
250 sc->sc_dev.dv_xname, error);
251 goto fail_1;
252 }
253 bzero(sc->sc_control_data, sizeof(struct fxp_control_data));
254
255 if ((error = bus_dmamap_create(sc->sc_dmat,
256 sizeof(struct fxp_control_data), 1,
257 sizeof(struct fxp_control_data), 0, 0, &sc->sc_dmamap)) != 0) {
258 printf("%s: unable to create control data DMA map, "
259 "error = %d\n", sc->sc_dev.dv_xname, error);
260 goto fail_2;
261 }
262
263 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap,
264 sc->sc_control_data, sizeof(struct fxp_control_data), NULL,
265 0)) != 0) {
266 printf("%s: can't load control data DMA map, error = %d\n",
267 sc->sc_dev.dv_xname, error);
268 goto fail_3;
269 }
270
271 /*
272 * Create the transmit buffer DMA maps.
273 */
274 for (i = 0; i < FXP_NTXCB; i++) {
275 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
276 FXP_NTXSEG, MCLBYTES, 0, 0,
277 &FXP_DSTX(sc, i)->txs_dmamap)) != 0) {
278 printf("%s: unable to create tx DMA map %d, "
279 "error = %d\n", sc->sc_dev.dv_xname, i, error);
280 goto fail_4;
281 }
282 }
283
284 /*
285 * Create the receive buffer DMA maps.
286 */
287 for (i = 0; i < FXP_NRFABUFS; i++) {
288 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
289 MCLBYTES, 0, 0, &sc->sc_rx_dmamaps[i])) != 0) {
290 printf("%s: unable to create rx DMA map %d, "
291 "error = %d\n", sc->sc_dev.dv_xname, i, error);
292 goto fail_5;
293 }
294 }
295
296 /*
297 * Pre-allocate the receive buffers.
298 */
299 for (i = 0; i < FXP_NRFABUFS; i++) {
300 sc->sc_rxdescs[i].fr_dmamap = sc->sc_rx_dmamaps[i];
301 if (fxp_add_rfabuf(sc, &sc->sc_rxdescs[i]) != 0) {
302 printf("%s: unable to allocate or map rx buffer %d, "
303 "error = %d\n", sc->sc_dev.dv_xname, i, error);
304 goto fail_6;
305 }
306 }
307
308 /* Initialize MAC address and media structures. */
309 fxp_get_info(sc, enaddr);
310
311 printf("%s: Ethernet address %s, %s Mb/s\n", sc->sc_dev.dv_xname,
312 ether_sprintf(enaddr), sc->phy_10Mbps_only ? "10" : "10/100");
313
314 ifp = &sc->sc_ethercom.ec_if;
315
316 /*
317 * Get info about our media interface, and initialize it. Note
318 * the table terminates itself with a phy of -1, indicating
319 * that we're using MII.
320 */
321 for (fp = fxp_phytype_table; fp->fp_phy != -1; fp++)
322 if (fp->fp_phy == sc->phy_primary_device)
323 break;
324 (*fp->fp_init)(sc);
325
326 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
327 ifp->if_softc = sc;
328 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
329 ifp->if_ioctl = fxp_ioctl;
330 ifp->if_start = fxp_start;
331 ifp->if_watchdog = fxp_watchdog;
332
333 /*
334 * Attach the interface.
335 */
336 if_attach(ifp);
337 ether_ifattach(ifp, enaddr);
338 #if NBPFILTER > 0
339 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
340 sizeof(struct ether_header));
341 #endif
342 #if NRND > 0
343 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
344 RND_TYPE_NET, 0);
345 #endif
346
347 /*
348 * Add shutdown hook so that DMA is disabled prior to reboot. Not
349 * doing do could allow DMA to corrupt kernel memory during the
350 * reboot before the driver initializes.
351 */
352 sc->sc_sdhook = shutdownhook_establish(fxp_shutdown, sc);
353 if (sc->sc_sdhook == NULL)
354 printf("%s: WARNING: unable to establish shutdown hook\n",
355 sc->sc_dev.dv_xname);
356 return;
357
358 /*
359 * Free any resources we've allocated during the failed attach
360 * attempt. Do this in reverse order and fall though.
361 */
362 fail_6:
363 for (i = 0; i < FXP_NRFABUFS; i++) {
364 if (sc->sc_rxdescs[i].fr_mbhead != NULL) {
365 bus_dmamap_unload(sc->sc_dmat,
366 sc->sc_rxdescs[i].fr_dmamap);
367 m_freem(sc->sc_rxdescs[i].fr_mbhead);
368 }
369 }
370 fail_5:
371 for (i = 0; i < FXP_NRFABUFS; i++) {
372 if (sc->sc_rxdescs[i].fr_dmamap != NULL)
373 bus_dmamap_destroy(sc->sc_dmat,
374 sc->sc_rxdescs[i].fr_dmamap);
375 }
376 fail_4:
377 for (i = 0; i < FXP_NTXCB; i++) {
378 if (FXP_DSTX(sc, i)->txs_dmamap != NULL)
379 bus_dmamap_destroy(sc->sc_dmat,
380 FXP_DSTX(sc, i)->txs_dmamap);
381 }
382 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
383 fail_3:
384 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
385 fail_2:
386 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
387 sizeof(struct fxp_control_data));
388 fail_1:
389 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
390 fail_0:
391 return;
392 }
393
394 void
395 fxp_mii_initmedia(sc)
396 struct fxp_softc *sc;
397 {
398
399 sc->sc_mii.mii_ifp = &sc->sc_ethercom.ec_if;
400 sc->sc_mii.mii_readreg = fxp_mdi_read;
401 sc->sc_mii.mii_writereg = fxp_mdi_write;
402 sc->sc_mii.mii_statchg = fxp_statchg;
403 ifmedia_init(&sc->sc_mii.mii_media, 0, fxp_mii_mediachange,
404 fxp_mii_mediastatus);
405 mii_phy_probe(&sc->sc_dev, &sc->sc_mii, 0xffffffff);
406 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
407 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
408 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
409 } else
410 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
411 }
412
413 void
414 fxp_80c24_initmedia(sc)
415 struct fxp_softc *sc;
416 {
417
418 /*
419 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
420 * doesn't have a programming interface of any sort. The
421 * media is sensed automatically based on how the link partner
422 * is configured. This is, in essence, manual configuration.
423 */
424 printf("%s: Seeq 80c24 AutoDUPLEX media interface present\n",
425 sc->sc_dev.dv_xname);
426 ifmedia_init(&sc->sc_mii.mii_media, 0, fxp_80c24_mediachange,
427 fxp_80c24_mediastatus);
428 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
429 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
430 }
431
432 /*
433 * Device shutdown routine. Called at system shutdown after sync. The
434 * main purpose of this routine is to shut off receiver DMA so that
435 * kernel memory doesn't get clobbered during warmboot.
436 */
437 void
438 fxp_shutdown(arg)
439 void *arg;
440 {
441 struct fxp_softc *sc = arg;
442
443 fxp_stop(sc);
444 }
445
446 /*
447 * Initialize the interface media.
448 */
449 void
450 fxp_get_info(sc, enaddr)
451 struct fxp_softc *sc;
452 u_int8_t *enaddr;
453 {
454 u_int16_t data, myea[3];
455
456 /*
457 * Reset to a stable state.
458 */
459 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
460 DELAY(10);
461
462 /*
463 * Get info about the primary PHY
464 */
465 fxp_read_eeprom(sc, &data, 6, 1);
466 sc->phy_primary_addr = data & 0xff;
467 sc->phy_primary_device = (data >> 8) & 0x3f;
468 sc->phy_10Mbps_only = data >> 15;
469
470 /*
471 * Read MAC address.
472 */
473 fxp_read_eeprom(sc, myea, 0, 3);
474 bcopy(myea, enaddr, ETHER_ADDR_LEN);
475 }
476
477 /*
478 * Read from the serial EEPROM. Basically, you manually shift in
479 * the read opcode (one bit at a time) and then shift in the address,
480 * and then you shift out the data (all of this one bit at a time).
481 * The word size is 16 bits, so you have to provide the address for
482 * every 16 bits of data.
483 */
484 void
485 fxp_read_eeprom(sc, data, offset, words)
486 struct fxp_softc *sc;
487 u_int16_t *data;
488 int offset;
489 int words;
490 {
491 u_int16_t reg;
492 int i, x;
493
494 for (i = 0; i < words; i++) {
495 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
496 /*
497 * Shift in read opcode.
498 */
499 for (x = 3; x > 0; x--) {
500 if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) {
501 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
502 } else {
503 reg = FXP_EEPROM_EECS;
504 }
505 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
506 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
507 reg | FXP_EEPROM_EESK);
508 DELAY(1);
509 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
510 DELAY(1);
511 }
512 /*
513 * Shift in address.
514 */
515 for (x = 6; x > 0; x--) {
516 if ((i + offset) & (1 << (x - 1))) {
517 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
518 } else {
519 reg = FXP_EEPROM_EECS;
520 }
521 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
522 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
523 reg | FXP_EEPROM_EESK);
524 DELAY(1);
525 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
526 DELAY(1);
527 }
528 reg = FXP_EEPROM_EECS;
529 data[i] = 0;
530 /*
531 * Shift out data.
532 */
533 for (x = 16; x > 0; x--) {
534 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
535 reg | FXP_EEPROM_EESK);
536 DELAY(1);
537 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) &
538 FXP_EEPROM_EEDO)
539 data[i] |= (1 << (x - 1));
540 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
541 DELAY(1);
542 }
543 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
544 DELAY(1);
545 }
546 }
547
548 /*
549 * Start packet transmission on the interface.
550 */
551 void
552 fxp_start(ifp)
553 struct ifnet *ifp;
554 {
555 struct fxp_softc *sc = ifp->if_softc;
556 struct mbuf *m0, *m;
557 struct fxp_cb_tx *txd;
558 struct fxp_txsoft *txs;
559 struct fxp_tbdlist *tbd;
560 bus_dmamap_t dmamap;
561 int error, lasttx, nexttx, opending, seg;
562
563 /*
564 * If we need multicast setup, bail out now.
565 */
566 if (sc->sc_flags & FXPF_NEEDMCSETUP) {
567 ifp->if_flags |= IFF_OACTIVE;
568 return;
569 }
570
571 /*
572 * Remember the previous txpending and the current lasttx.
573 */
574 opending = sc->sc_txpending;
575 lasttx = sc->sc_txlast;
576
577 /*
578 * Loop through the send queue, setting up transmit descriptors
579 * until we drain the queue, or use up all available transmit
580 * descriptors.
581 */
582 while (sc->sc_txpending < FXP_NTXCB) {
583 /*
584 * Grab a packet off the queue.
585 */
586 IF_DEQUEUE(&ifp->if_snd, m0);
587 if (m0 == NULL)
588 break;
589
590 /*
591 * Get the next available transmit descriptor.
592 */
593 nexttx = FXP_NEXTTX(sc->sc_txlast);
594 txd = FXP_CDTX(sc, nexttx);
595 tbd = FXP_CDTBD(sc, nexttx);
596 txs = FXP_DSTX(sc, nexttx);
597 dmamap = txs->txs_dmamap;
598
599 /*
600 * Load the DMA map. If this fails, the packet either
601 * didn't fit in the allotted number of frags, or we were
602 * short on resources. In this case, we'll copy and try
603 * again.
604 */
605 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
606 BUS_DMA_NOWAIT) != 0) {
607 MGETHDR(m, M_DONTWAIT, MT_DATA);
608 if (m == NULL) {
609 printf("%s: unable to allocate Tx mbuf\n",
610 sc->sc_dev.dv_xname);
611 IF_PREPEND(&ifp->if_snd, m0);
612 break;
613 }
614 if (m0->m_pkthdr.len > MHLEN) {
615 MCLGET(m, M_DONTWAIT);
616 if ((m->m_flags & M_EXT) == 0) {
617 printf("%s: unable to allocate Tx "
618 "cluster\n", sc->sc_dev.dv_xname);
619 m_freem(m);
620 IF_PREPEND(&ifp->if_snd, m0);
621 break;
622 }
623 }
624 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
625 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
626 m_freem(m0);
627 m0 = m;
628 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
629 m0, BUS_DMA_NOWAIT);
630 if (error) {
631 printf("%s: unable to load Tx buffer, "
632 "error = %d\n", sc->sc_dev.dv_xname, error);
633 IF_PREPEND(&ifp->if_snd, m0);
634 break;
635 }
636 }
637
638 /* Initialize the fraglist. */
639 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
640 tbd->tbd_d[seg].tb_addr =
641 dmamap->dm_segs[seg].ds_addr;
642 tbd->tbd_d[seg].tb_size =
643 dmamap->dm_segs[seg].ds_len;
644 }
645
646 FXP_CDTBDSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE);
647
648 /* Sync the DMA map. */
649 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
650 BUS_DMASYNC_PREWRITE);
651
652 /*
653 * Store a pointer to the packet so we can free it later.
654 */
655 txs->txs_mbuf = m0;
656
657 /*
658 * Initialize the transmit descriptor.
659 */
660 txd->cb_status = 0;
661 txd->cb_command =
662 FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF;
663 txd->tx_threshold = tx_threshold;
664 txd->tbd_number = dmamap->dm_nsegs;
665
666 FXP_CDTXSYNC(sc, nexttx,
667 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
668
669 /* Advance the tx pointer. */
670 sc->sc_txpending++;
671 sc->sc_txlast = nexttx;
672
673 #if NBPFILTER > 0
674 /*
675 * Pass packet to bpf if there is a listener.
676 */
677 if (ifp->if_bpf)
678 bpf_mtap(ifp->if_bpf, m0);
679 #endif
680 }
681
682 if (sc->sc_txpending == FXP_NTXCB) {
683 /* No more slots; notify upper layer. */
684 ifp->if_flags |= IFF_OACTIVE;
685 }
686
687 if (sc->sc_txpending != opending) {
688 /*
689 * We enqueued packets. If the transmitter was idle,
690 * reset the txdirty pointer.
691 */
692 if (opending == 0)
693 sc->sc_txdirty = FXP_NEXTTX(lasttx);
694
695 /*
696 * Cause the chip to interrupt and suspend command
697 * processing once the last packet we've enqueued
698 * has been transmitted.
699 */
700 FXP_CDTX(sc, sc->sc_txlast)->cb_command |=
701 FXP_CB_COMMAND_I | FXP_CB_COMMAND_S;
702 FXP_CDTXSYNC(sc, sc->sc_txlast,
703 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
704
705 /*
706 * The entire packet chain is set up. Clear the suspend bit
707 * on the command prior to the first packet we set up.
708 */
709 FXP_CDTXSYNC(sc, lasttx,
710 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
711 FXP_CDTX(sc, lasttx)->cb_command &= ~FXP_CB_COMMAND_S;
712 FXP_CDTXSYNC(sc, lasttx,
713 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
714
715 /*
716 * Issue a Resume command in case the chip was suspended.
717 */
718 fxp_scb_wait(sc);
719 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_RESUME);
720
721 /* Set a watchdog timer in case the chip flakes out. */
722 ifp->if_timer = 5;
723 }
724 }
725
726 /*
727 * Process interface interrupts.
728 */
729 int
730 fxp_intr(arg)
731 void *arg;
732 {
733 struct fxp_softc *sc = arg;
734 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
735 struct fxp_cb_tx *txd;
736 struct fxp_txsoft *txs;
737 int i, oflags, claimed = 0;
738 u_int8_t statack;
739
740 while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) {
741 claimed = 1;
742
743 /*
744 * First ACK all the interrupts in this pass.
745 */
746 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack);
747
748 /*
749 * Process receiver interrupts. If a no-resource (RNR)
750 * condition exists, get whatever packets we can and
751 * re-start the receiver.
752 */
753 if (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR)) {
754 struct fxp_rxdesc *rxd;
755 struct mbuf *m;
756 struct fxp_rfa *rfa;
757 bus_dmamap_t rxmap;
758 rcvloop:
759 rxd = sc->rfa_head;
760 rxmap = rxd->fr_dmamap;
761 m = rxd->fr_mbhead;
762 rfa = (struct fxp_rfa *)(m->m_ext.ext_buf +
763 RFA_ALIGNMENT_FUDGE);
764
765 bus_dmamap_sync(sc->sc_dmat, rxmap, 0,
766 rxmap->dm_mapsize,
767 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
768
769 if (rfa->rfa_status & FXP_RFA_STATUS_C) {
770 /*
771 * Remove first packet from the chain.
772 */
773 sc->rfa_head = rxd->fr_next;
774 rxd->fr_next = NULL;
775
776 /*
777 * Add a new buffer to the receive chain.
778 * If this fails, the old buffer is recycled
779 * instead.
780 */
781 if (fxp_add_rfabuf(sc, rxd) == 0) {
782 struct ether_header *eh;
783 u_int16_t total_len;
784
785 total_len = rfa->actual_size &
786 (MCLBYTES - 1);
787 if (total_len <
788 sizeof(struct ether_header)) {
789 m_freem(m);
790 goto rcvloop;
791 }
792 m->m_pkthdr.rcvif = ifp;
793 m->m_pkthdr.len = m->m_len = total_len;
794 eh = mtod(m, struct ether_header *);
795 #if NBPFILTER > 0
796 if (ifp->if_bpf) {
797 bpf_tap(ifp->if_bpf,
798 mtod(m, caddr_t),
799 total_len);
800 /*
801 * Only pass this packet up
802 * if it is for us.
803 */
804 if ((ifp->if_flags &
805 IFF_PROMISC) &&
806 (rfa->rfa_status &
807 FXP_RFA_STATUS_IAMATCH) &&
808 (eh->ether_dhost[0] & 1)
809 == 0) {
810 m_freem(m);
811 goto rcvloop;
812 }
813 }
814 #endif /* NBPFILTER > 0 */
815 (*ifp->if_input)(ifp, m);
816 }
817 goto rcvloop;
818 }
819 if (statack & FXP_SCB_STATACK_RNR) {
820 fxp_scb_wait(sc);
821 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
822 rxmap->dm_segs[0].ds_addr +
823 RFA_ALIGNMENT_FUDGE);
824 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND,
825 FXP_SCB_COMMAND_RU_START);
826 }
827 }
828 /*
829 * Free any finished transmit mbuf chains.
830 */
831 if (statack & (FXP_SCB_STATACK_CXTNO|FXP_SCB_STATACK_CNA)) {
832 ifp->if_flags &= ~IFF_OACTIVE;
833 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
834 i = FXP_NEXTTX(i), sc->sc_txpending--) {
835 txd = FXP_CDTX(sc, i);
836 txs = FXP_DSTX(sc, i);
837
838 FXP_CDTXSYNC(sc, i,
839 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
840
841 if ((txd->cb_status & FXP_CB_STATUS_C) == 0)
842 break;
843
844 FXP_CDTBDSYNC(sc, i, BUS_DMASYNC_POSTWRITE);
845
846 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
847 0, txs->txs_dmamap->dm_mapsize,
848 BUS_DMASYNC_POSTWRITE);
849 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
850 m_freem(txs->txs_mbuf);
851 txs->txs_mbuf = NULL;
852 }
853
854 /* Update the dirty transmit buffer pointer. */
855 sc->sc_txdirty = i;
856
857 /*
858 * Cancel the watchdog timer if there are no pending
859 * transmissions.
860 */
861 if (sc->sc_txpending == 0) {
862 ifp->if_timer = 0;
863
864 /*
865 * If we need a multicast filter setup,
866 * do that now.
867 */
868 if (sc->sc_flags & FXPF_NEEDMCSETUP) {
869 oflags = ifp->if_flags;
870 fxp_mc_setup(sc);
871
872 /*
873 * If IFF_ALLMULTI state changed,
874 * we need to reinitialize the chip,
875 * because this is handled by the
876 * config block.
877 */
878 if (((ifp->if_flags ^ oflags) &
879 IFF_ALLMULTI) != 0)
880 fxp_init(sc);
881 }
882 }
883
884 /*
885 * Try to get more packets going.
886 */
887 fxp_start(ifp);
888 }
889 }
890
891 #if NRND > 0
892 if (claimed)
893 rnd_add_uint32(&sc->rnd_source, statack);
894 #endif
895 return (claimed);
896 }
897
898 /*
899 * Update packet in/out/collision statistics. The i82557 doesn't
900 * allow you to access these counters without doing a fairly
901 * expensive DMA to get _all_ of the statistics it maintains, so
902 * we do this operation here only once per second. The statistics
903 * counters in the kernel are updated from the previous dump-stats
904 * DMA and then a new dump-stats DMA is started. The on-chip
905 * counters are zeroed when the DMA completes. If we can't start
906 * the DMA immediately, we don't wait - we just prepare to read
907 * them again next time.
908 */
909 void
910 fxp_tick(arg)
911 void *arg;
912 {
913 struct fxp_softc *sc = arg;
914 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
915 struct fxp_stats *sp = &sc->sc_control_data->fcd_stats;
916 int oflags, s;
917
918 s = splnet();
919
920 oflags = ifp->if_flags;
921
922 ifp->if_opackets += sp->tx_good;
923 ifp->if_collisions += sp->tx_total_collisions;
924 if (sp->rx_good) {
925 ifp->if_ipackets += sp->rx_good;
926 sc->rx_idle_secs = 0;
927 } else {
928 sc->rx_idle_secs++;
929 }
930 ifp->if_ierrors +=
931 sp->rx_crc_errors +
932 sp->rx_alignment_errors +
933 sp->rx_rnr_errors +
934 sp->rx_overrun_errors;
935 /*
936 * If any transmit underruns occured, bump up the transmit
937 * threshold by another 512 bytes (64 * 8).
938 */
939 if (sp->tx_underruns) {
940 ifp->if_oerrors += sp->tx_underruns;
941 if (tx_threshold < 192)
942 tx_threshold += 64;
943 }
944
945 /*
946 * If we haven't received any packets in FXP_MAC_RX_IDLE seconds,
947 * then assume the receiver has locked up and attempt to clear
948 * the condition by reprogramming the multicast filter. This is
949 * a work-around for a bug in the 82557 where the receiver locks
950 * up if it gets certain types of garbage in the syncronization
951 * bits prior to the packet header. This bug is supposed to only
952 * occur in 10Mbps mode, but has been seen to occur in 100Mbps
953 * mode as well (perhaps due to a 10/100 speed transition).
954 */
955 if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) {
956 sc->rx_idle_secs = 0;
957 fxp_mc_setup(sc);
958 }
959 /*
960 * If there is no pending command, start another stats
961 * dump. Otherwise punt for now.
962 */
963 if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) {
964 /*
965 * Start another stats dump.
966 */
967 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND,
968 FXP_SCB_COMMAND_CU_DUMPRESET);
969 } else {
970 /*
971 * A previous command is still waiting to be accepted.
972 * Just zero our copy of the stats and wait for the
973 * next timer event to update them.
974 */
975 sp->tx_good = 0;
976 sp->tx_underruns = 0;
977 sp->tx_total_collisions = 0;
978
979 sp->rx_good = 0;
980 sp->rx_crc_errors = 0;
981 sp->rx_alignment_errors = 0;
982 sp->rx_rnr_errors = 0;
983 sp->rx_overrun_errors = 0;
984 }
985
986 /* Tick the MII clock. */
987 mii_tick(&sc->sc_mii);
988
989 /*
990 * If IFF_ALLMULTI state changed, we need to reinitialize the chip,
991 * because this is handled by the config block.
992 *
993 * NOTE: This shouldn't ever really happen here.
994 */
995 if (((ifp->if_flags ^ oflags) & IFF_ALLMULTI) != 0) {
996 if (ifp->if_flags & IFF_DEBUG)
997 printf("%s: fxp_tick: allmulti state changed\n",
998 sc->sc_dev.dv_xname);
999 fxp_init(sc);
1000 }
1001
1002 splx(s);
1003
1004 /*
1005 * Schedule another timeout one second from now.
1006 */
1007 timeout(fxp_tick, sc, hz);
1008 }
1009
1010 /*
1011 * Stop the interface. Cancels the statistics updater and resets
1012 * the interface.
1013 */
1014 void
1015 fxp_stop(sc)
1016 struct fxp_softc *sc;
1017 {
1018 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1019 struct fxp_rxdesc *rxd;
1020 struct fxp_txsoft *txs;
1021 int i;
1022
1023 /*
1024 * Cancel stats updater.
1025 */
1026 untimeout(fxp_tick, sc);
1027
1028 /*
1029 * Issue software reset
1030 */
1031 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
1032 DELAY(10);
1033
1034 /*
1035 * Release any xmit buffers.
1036 */
1037 for (i = 0; i < FXP_NTXCB; i++) {
1038 txs = FXP_DSTX(sc, i);
1039 if (txs->txs_mbuf != NULL) {
1040 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1041 m_freem(txs->txs_mbuf);
1042 txs->txs_mbuf = NULL;
1043 }
1044 }
1045 sc->sc_txpending = 0;
1046
1047 /*
1048 * Free all the receive buffers then reallocate/reinitialize
1049 */
1050 sc->rfa_head = NULL;
1051 sc->rfa_tail = NULL;
1052 for (i = 0; i < FXP_NRFABUFS; i++) {
1053 rxd = &sc->sc_rxdescs[i];
1054 if (rxd->fr_mbhead != NULL) {
1055 bus_dmamap_unload(sc->sc_dmat, rxd->fr_dmamap);
1056 m_freem(rxd->fr_mbhead);
1057 rxd->fr_mbhead = NULL;
1058 }
1059 if (fxp_add_rfabuf(sc, rxd) != 0) {
1060 /*
1061 * This "can't happen" - we're at splnet()
1062 * and we just freed the buffer we need
1063 * above.
1064 */
1065 panic("fxp_stop: no buffers!");
1066 }
1067 }
1068
1069 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1070 ifp->if_timer = 0;
1071 }
1072
1073 /*
1074 * Watchdog/transmission transmit timeout handler. Called when a
1075 * transmission is started on the interface, but no interrupt is
1076 * received before the timeout. This usually indicates that the
1077 * card has wedged for some reason.
1078 */
1079 void
1080 fxp_watchdog(ifp)
1081 struct ifnet *ifp;
1082 {
1083 struct fxp_softc *sc = ifp->if_softc;
1084
1085 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1086 ifp->if_oerrors++;
1087
1088 fxp_init(sc);
1089 }
1090
1091 /*
1092 * Initialize the interface. Must be called at splnet().
1093 */
1094 void
1095 fxp_init(sc)
1096 struct fxp_softc *sc;
1097 {
1098 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1099 struct fxp_cb_config *cbp;
1100 struct fxp_cb_ias *cb_ias;
1101 struct fxp_cb_tx *txd;
1102 int i, prm, allm;
1103
1104 /*
1105 * Cancel any pending I/O
1106 */
1107 fxp_stop(sc);
1108
1109 sc->sc_flags = 0;
1110
1111 /*
1112 * Initialize base of CBL and RFA memory. Loading with zero
1113 * sets it up for regular linear addressing.
1114 */
1115 fxp_scb_wait(sc);
1116 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0);
1117 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_BASE);
1118
1119 fxp_scb_wait(sc);
1120 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_BASE);
1121
1122 /*
1123 * Initialize the multicast filter. Do this now, since we might
1124 * have to setup the config block differently.
1125 */
1126 fxp_mc_setup(sc);
1127
1128 prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0;
1129 allm = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0;
1130
1131 /*
1132 * Initialize base of dump-stats buffer.
1133 */
1134 fxp_scb_wait(sc);
1135 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
1136 sc->sc_cddma + FXP_CDSTATSOFF);
1137 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_DUMP_ADR);
1138
1139 cbp = &sc->sc_control_data->fcd_configcb;
1140 memset(cbp, 0, sizeof(struct fxp_cb_config));
1141
1142 /*
1143 * This copy is kind of disgusting, but there are a bunch of must be
1144 * zero and must be one bits in this structure and this is the easiest
1145 * way to initialize them all to proper values.
1146 */
1147 memcpy(cbp, fxp_cb_config_template, sizeof(fxp_cb_config_template));
1148
1149 cbp->cb_status = 0;
1150 cbp->cb_command = FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL;
1151 cbp->link_addr = -1; /* (no) next command */
1152 cbp->byte_count = 22; /* (22) bytes to config */
1153 cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */
1154 cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */
1155 cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */
1156 cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */
1157 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */
1158 cbp->dma_bce = 0; /* (disable) dma max counters */
1159 cbp->late_scb = 0; /* (don't) defer SCB update */
1160 cbp->tno_int = 0; /* (disable) tx not okay interrupt */
1161 cbp->ci_int = 1; /* interrupt on CU idle */
1162 cbp->save_bf = prm; /* save bad frames */
1163 cbp->disc_short_rx = !prm; /* discard short packets */
1164 cbp->underrun_retry = 1; /* retry mode (1) on DMA underrun */
1165 cbp->mediatype = !sc->phy_10Mbps_only; /* interface mode */
1166 cbp->nsai = 1; /* (don't) disable source addr insert */
1167 cbp->preamble_length = 2; /* (7 byte) preamble */
1168 cbp->loopback = 0; /* (don't) loopback */
1169 cbp->linear_priority = 0; /* (normal CSMA/CD operation) */
1170 cbp->linear_pri_mode = 0; /* (wait after xmit only) */
1171 cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */
1172 cbp->promiscuous = prm; /* promiscuous mode */
1173 cbp->bcast_disable = 0; /* (don't) disable broadcasts */
1174 cbp->crscdt = 0; /* (CRS only) */
1175 cbp->stripping = !prm; /* truncate rx packet to byte count */
1176 cbp->padding = 1; /* (do) pad short tx packets */
1177 cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */
1178 cbp->force_fdx = 0; /* (don't) force full duplex */
1179 cbp->fdx_pin_en = 1; /* (enable) FDX# pin */
1180 cbp->multi_ia = 0; /* (don't) accept multiple IAs */
1181 cbp->mc_all = allm; /* accept all multicasts */
1182
1183 FXP_CDCONFIGSYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1184
1185 /*
1186 * Start the config command/DMA.
1187 */
1188 fxp_scb_wait(sc);
1189 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->sc_cddma + FXP_CDCONFIGOFF);
1190 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START);
1191 /* ...and wait for it to complete. */
1192 do {
1193 FXP_CDCONFIGSYNC(sc,
1194 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1195 } while ((cbp->cb_status & FXP_CB_STATUS_C) == 0);
1196
1197 /*
1198 * Initialize the station address.
1199 */
1200 cb_ias = &sc->sc_control_data->fcd_iascb;
1201 cb_ias->cb_status = 0;
1202 cb_ias->cb_command = FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL;
1203 cb_ias->link_addr = -1;
1204 memcpy((void *)cb_ias->macaddr, LLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
1205
1206 FXP_CDIASSYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1207
1208 /*
1209 * Start the IAS (Individual Address Setup) command/DMA.
1210 */
1211 fxp_scb_wait(sc);
1212 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->sc_cddma + FXP_CDIASOFF);
1213 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START);
1214 /* ...and wait for it to complete. */
1215 do {
1216 FXP_CDIASSYNC(sc,
1217 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1218 } while ((cb_ias->cb_status & FXP_CB_STATUS_C) == 0);
1219
1220 /*
1221 * Initialize the transmit descriptor ring. txlast is initialized
1222 * to the end of the list so that it will wrap around to the first
1223 * descriptor when the first packet is transmitted.
1224 */
1225 for (i = 0; i < FXP_NTXCB; i++) {
1226 txd = FXP_CDTX(sc, i);
1227 memset(txd, 0, sizeof(struct fxp_cb_tx));
1228 txd->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S;
1229 txd->tbd_array_addr = FXP_CDTBDADDR(sc, i);
1230 txd->link_addr = FXP_CDTXADDR(sc, FXP_NEXTTX(i));
1231 FXP_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1232 }
1233 sc->sc_txpending = 0;
1234 sc->sc_txdirty = 0;
1235 sc->sc_txlast = FXP_NTXCB - 1;
1236
1237 /*
1238 * Give the transmit ring to the chip. We do this by pointing
1239 * the chip at the last descriptor (which is a NOP|SUSPEND), and
1240 * issuing a start command. It will execute the NOP and then
1241 * suspend, pointing at the first descriptor.
1242 */
1243 fxp_scb_wait(sc);
1244 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, FXP_CDTXADDR(sc, sc->sc_txlast));
1245 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START);
1246
1247 /*
1248 * Initialize receiver buffer area - RFA.
1249 */
1250 fxp_scb_wait(sc);
1251 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
1252 sc->rfa_head->fr_dmamap->dm_segs[0].ds_addr + RFA_ALIGNMENT_FUDGE);
1253 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_START);
1254
1255 /*
1256 * Set current media.
1257 */
1258 mii_mediachg(&sc->sc_mii);
1259
1260 /*
1261 * ...all done!
1262 */
1263 ifp->if_flags |= IFF_RUNNING;
1264 ifp->if_flags &= ~IFF_OACTIVE;
1265
1266 /*
1267 * Start stats updater.
1268 */
1269 timeout(fxp_tick, sc, hz);
1270
1271 /*
1272 * Attempt to start output on the interface.
1273 */
1274 fxp_start(ifp);
1275 }
1276
1277 /*
1278 * Change media according to request.
1279 */
1280 int
1281 fxp_mii_mediachange(ifp)
1282 struct ifnet *ifp;
1283 {
1284 struct fxp_softc *sc = ifp->if_softc;
1285
1286 if (ifp->if_flags & IFF_UP)
1287 mii_mediachg(&sc->sc_mii);
1288 return (0);
1289 }
1290
1291 /*
1292 * Notify the world which media we're using.
1293 */
1294 void
1295 fxp_mii_mediastatus(ifp, ifmr)
1296 struct ifnet *ifp;
1297 struct ifmediareq *ifmr;
1298 {
1299 struct fxp_softc *sc = ifp->if_softc;
1300
1301 mii_pollstat(&sc->sc_mii);
1302 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1303 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1304 }
1305
1306 int
1307 fxp_80c24_mediachange(ifp)
1308 struct ifnet *ifp;
1309 {
1310
1311 /* Nothing to do here. */
1312 return (0);
1313 }
1314
1315 void
1316 fxp_80c24_mediastatus(ifp, ifmr)
1317 struct ifnet *ifp;
1318 struct ifmediareq *ifmr;
1319 {
1320 struct fxp_softc *sc = ifp->if_softc;
1321
1322 /*
1323 * Media is currently-selected media. We cannot determine
1324 * the link status.
1325 */
1326 ifmr->ifm_status = 0;
1327 ifmr->ifm_active = sc->sc_mii.mii_media.ifm_cur->ifm_media;
1328 }
1329
1330 /*
1331 * Add a buffer to the end of the RFA buffer list.
1332 * Return 0 if successful, 1 for failure. A failure results in
1333 * adding the 'oldm' (if non-NULL) on to the end of the list -
1334 * tossing out it's old contents and recycling it.
1335 * The RFA struct is stuck at the beginning of mbuf cluster and the
1336 * data pointer is fixed up to point just past it.
1337 */
1338 int
1339 fxp_add_rfabuf(sc, rxd)
1340 struct fxp_softc *sc;
1341 struct fxp_rxdesc *rxd;
1342 {
1343 struct mbuf *m, *oldm;
1344 struct fxp_rfa *rfa, *p_rfa;
1345 bus_dmamap_t rxmap;
1346 u_int32_t v;
1347 int error, rval = 0;
1348
1349 oldm = rxd->fr_mbhead;
1350 rxmap = rxd->fr_dmamap;
1351
1352 MGETHDR(m, M_DONTWAIT, MT_DATA);
1353 if (m != NULL) {
1354 MCLGET(m, M_DONTWAIT);
1355 if ((m->m_flags & M_EXT) == 0) {
1356 m_freem(m);
1357 if (oldm == NULL)
1358 return 1;
1359 m = oldm;
1360 m->m_data = m->m_ext.ext_buf;
1361 rval = 1;
1362 }
1363 } else {
1364 if (oldm == NULL)
1365 return 1;
1366 m = oldm;
1367 m->m_data = m->m_ext.ext_buf;
1368 rval = 1;
1369 }
1370
1371 rxd->fr_mbhead = m;
1372
1373 /*
1374 * Setup the DMA map for this receive buffer.
1375 */
1376 if (m != oldm) {
1377 if (oldm != NULL)
1378 bus_dmamap_unload(sc->sc_dmat, rxmap);
1379 error = bus_dmamap_load(sc->sc_dmat, rxmap,
1380 m->m_ext.ext_buf, MCLBYTES, NULL, BUS_DMA_NOWAIT);
1381 if (error) {
1382 printf("%s: can't load rx buffer, error = %d\n",
1383 sc->sc_dev.dv_xname, error);
1384 panic("fxp_add_rfabuf"); /* XXX */
1385 }
1386 }
1387
1388 /*
1389 * Move the data pointer up so that the incoming data packet
1390 * will be 32-bit aligned.
1391 */
1392 m->m_data += RFA_ALIGNMENT_FUDGE;
1393
1394 /*
1395 * Get a pointer to the base of the mbuf cluster and move
1396 * data start past the RFA descriptor.
1397 */
1398 rfa = mtod(m, struct fxp_rfa *);
1399 m->m_data += sizeof(struct fxp_rfa);
1400 rfa->size = MCLBYTES - sizeof(struct fxp_rfa) - RFA_ALIGNMENT_FUDGE;
1401
1402 /*
1403 * Initialize the rest of the RFA.
1404 */
1405 rfa->rfa_status = 0;
1406 rfa->rfa_control = FXP_RFA_CONTROL_EL;
1407 rfa->actual_size = 0;
1408
1409 /*
1410 * Note that since the RFA is misaligned, we cannot store values
1411 * directly. Instead, we must copy.
1412 */
1413 v = -1;
1414 memcpy((void *)&rfa->link_addr, &v, sizeof(v));
1415 memcpy((void *)&rfa->rbd_addr, &v, sizeof(v));
1416
1417 /*
1418 * If there are other buffers already on the list, attach this
1419 * one to the end by fixing up the tail to point to this one.
1420 */
1421 if (sc->rfa_head != NULL) {
1422 p_rfa = (struct fxp_rfa *)
1423 (sc->rfa_tail->fr_mbhead->m_ext.ext_buf +
1424 RFA_ALIGNMENT_FUDGE);
1425 sc->rfa_tail->fr_next = rxd;
1426 v = rxmap->dm_segs[0].ds_addr + RFA_ALIGNMENT_FUDGE;
1427 memcpy((void *)&p_rfa->link_addr, &v, sizeof(v));
1428 p_rfa->rfa_control &= ~FXP_RFA_CONTROL_EL;
1429 } else {
1430 sc->rfa_head = rxd;
1431 }
1432 sc->rfa_tail = rxd;
1433
1434 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1435 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1436
1437 return (rval);
1438 }
1439
1440 volatile int
1441 fxp_mdi_read(self, phy, reg)
1442 struct device *self;
1443 int phy;
1444 int reg;
1445 {
1446 struct fxp_softc *sc = (struct fxp_softc *)self;
1447 int count = 10000;
1448 int value;
1449
1450 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
1451 (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21));
1452
1453 while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0
1454 && count--)
1455 DELAY(10);
1456
1457 if (count <= 0)
1458 printf("%s: fxp_mdi_read: timed out\n", sc->sc_dev.dv_xname);
1459
1460 return (value & 0xffff);
1461 }
1462
1463 void
1464 fxp_statchg(self)
1465 struct device *self;
1466 {
1467
1468 /* XXX Update ifp->if_baudrate */
1469 }
1470
1471 void
1472 fxp_mdi_write(self, phy, reg, value)
1473 struct device *self;
1474 int phy;
1475 int reg;
1476 int value;
1477 {
1478 struct fxp_softc *sc = (struct fxp_softc *)self;
1479 int count = 10000;
1480
1481 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
1482 (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) |
1483 (value & 0xffff));
1484
1485 while((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 &&
1486 count--)
1487 DELAY(10);
1488
1489 if (count <= 0)
1490 printf("%s: fxp_mdi_write: timed out\n", sc->sc_dev.dv_xname);
1491 }
1492
1493 int
1494 fxp_ioctl(ifp, command, data)
1495 struct ifnet *ifp;
1496 u_long command;
1497 caddr_t data;
1498 {
1499 struct fxp_softc *sc = ifp->if_softc;
1500 struct ifreq *ifr = (struct ifreq *)data;
1501 struct ifaddr *ifa = (struct ifaddr *)data;
1502 int s, oflags, error = 0;
1503
1504 s = splnet();
1505
1506 switch (command) {
1507 case SIOCSIFADDR:
1508 ifp->if_flags |= IFF_UP;
1509
1510 switch (ifa->ifa_addr->sa_family) {
1511 #ifdef INET
1512 case AF_INET:
1513 fxp_init(sc);
1514 arp_ifinit(ifp, ifa);
1515 break;
1516 #endif /* INET */
1517 #ifdef NS
1518 case AF_NS:
1519 {
1520 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
1521
1522 if (ns_nullhost(*ina))
1523 ina->x_host = *(union ns_host *)
1524 LLADDR(ifp->if_sadl);
1525 else
1526 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
1527 ifp->if_addrlen);
1528 /* Set new address. */
1529 fxp_init(sc);
1530 break;
1531 }
1532 #endif /* NS */
1533 default:
1534 fxp_init(sc);
1535 break;
1536 }
1537 break;
1538
1539 case SIOCSIFMTU:
1540 if (ifr->ifr_mtu > ETHERMTU)
1541 error = EINVAL;
1542 else
1543 ifp->if_mtu = ifr->ifr_mtu;
1544 break;
1545
1546 case SIOCSIFFLAGS:
1547 if ((ifp->if_flags & IFF_UP) == 0 &&
1548 (ifp->if_flags & IFF_RUNNING) != 0) {
1549 /*
1550 * If interface is marked down and it is running, then
1551 * stop it.
1552 */
1553 fxp_stop(sc);
1554 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1555 (ifp->if_flags & IFF_RUNNING) == 0) {
1556 /*
1557 * If interface is marked up and it is stopped, then
1558 * start it.
1559 */
1560 fxp_init(sc);
1561 } else if ((ifp->if_flags & IFF_UP) != 0) {
1562 /*
1563 * Reset the interface to pick up change in any other
1564 * flags that affect the hardware state.
1565 */
1566 fxp_init(sc);
1567 }
1568 break;
1569
1570 case SIOCADDMULTI:
1571 case SIOCDELMULTI:
1572 error = (command == SIOCADDMULTI) ?
1573 ether_addmulti(ifr, &sc->sc_ethercom) :
1574 ether_delmulti(ifr, &sc->sc_ethercom);
1575
1576 if (error == ENETRESET) {
1577 /*
1578 * Multicast list has changed; set the hardware
1579 * filter accordingly.
1580 */
1581 oflags = ifp->if_flags;
1582 fxp_mc_setup(sc);
1583
1584 /*
1585 * If IFF_ALLMULTI state changed, we need to
1586 * reinitialize the chip, because this is
1587 * handled by the config block.
1588 */
1589 if (((ifp->if_flags ^ oflags) & IFF_ALLMULTI) != 0)
1590 fxp_init(sc);
1591 error = 0;
1592 }
1593 break;
1594
1595 case SIOCSIFMEDIA:
1596 case SIOCGIFMEDIA:
1597 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1598 break;
1599
1600 default:
1601 error = EINVAL;
1602 break;
1603 }
1604
1605 splx(s);
1606 return (error);
1607 }
1608
1609 /*
1610 * Program the multicast filter.
1611 *
1612 * This function must be called at splnet().
1613 */
1614 void
1615 fxp_mc_setup(sc)
1616 struct fxp_softc *sc;
1617 {
1618 struct fxp_cb_mcs *mcsp = &sc->sc_control_data->fcd_mcscb;
1619 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1620 struct ethercom *ec = &sc->sc_ethercom;
1621 struct ether_multi *enm;
1622 struct ether_multistep step;
1623 int nmcasts;
1624
1625 /*
1626 * If there are transmissions pending, wait until they're
1627 * complete. fxp_intr() will call us when they've drained.
1628 */
1629 if (sc->sc_txpending) {
1630 sc->sc_flags |= FXPF_NEEDMCSETUP;
1631 return;
1632 }
1633 sc->sc_flags &= ~FXPF_NEEDMCSETUP;
1634
1635 ifp->if_flags &= ~IFF_ALLMULTI;
1636
1637 /*
1638 * Initialize multicast setup descriptor.
1639 */
1640 nmcasts = 0;
1641 ETHER_FIRST_MULTI(step, ec, enm);
1642 while (enm != NULL) {
1643 /*
1644 * Check for too many multicast addresses or if we're
1645 * listening to a range. Either way, we simply have
1646 * to accept all multicasts.
1647 */
1648 if (nmcasts >= MAXMCADDR ||
1649 memcmp(enm->enm_addrlo, enm->enm_addrhi,
1650 ETHER_ADDR_LEN) != 0) {
1651 /*
1652 * Callers of this function must do the
1653 * right thing with this. If we're called
1654 * from outside fxp_init(), the caller must
1655 * detect if the state if IFF_ALLMULTI changes.
1656 * If it does, the caller must then call
1657 * fxp_init(), since allmulti is handled by
1658 * the config block.
1659 */
1660 ifp->if_flags |= IFF_ALLMULTI;
1661 return;
1662 }
1663 memcpy((void *)&mcsp->mc_addr[nmcasts][0], enm->enm_addrlo,
1664 ETHER_ADDR_LEN);
1665 nmcasts++;
1666 ETHER_NEXT_MULTI(step, enm);
1667 }
1668
1669 mcsp->cb_status = 0;
1670 mcsp->cb_command = FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_S;
1671 mcsp->link_addr = FXP_CDTXADDR(sc, FXP_NEXTTX(sc->sc_txlast));
1672 mcsp->mc_cnt = nmcasts * ETHER_ADDR_LEN;
1673
1674 FXP_CDMCSSYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1675
1676 /*
1677 * Wait until the command unit is not active. This should never
1678 * happen since nothing is queued, but make sure anyway.
1679 */
1680 while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) ==
1681 FXP_SCB_CUS_ACTIVE)
1682 /* nothing */ ;
1683
1684 /*
1685 * Start the multicast setup command/DMA.
1686 */
1687 fxp_scb_wait(sc);
1688 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->sc_cddma + FXP_CDMCSOFF);
1689 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START);
1690
1691 /* ...and wait for it to complete. */
1692 do {
1693 FXP_CDMCSSYNC(sc,
1694 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1695 } while ((mcsp->cb_status & FXP_CB_STATUS_C) == 0);
1696 }
1697