i82557.c revision 1.8 1 /* $NetBSD: i82557.c,v 1.8 1999/08/05 01:35:40 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1995, David Greenman
42 * All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice unmodified, this list of conditions, and the following
49 * disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright
51 * notice, this list of conditions and the following disclaimer in the
52 * documentation and/or other materials provided with the distribution.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * Id: if_fxp.c,v 1.47 1998/01/08 23:42:29 eivind Exp
67 */
68
69 /*
70 * Device driver for the Intel i82557 fast Ethernet controller.
71 */
72
73 #include "opt_inet.h"
74 #include "opt_ns.h"
75 #include "bpfilter.h"
76 #include "rnd.h"
77
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/mbuf.h>
81 #include <sys/malloc.h>
82 #include <sys/kernel.h>
83 #include <sys/socket.h>
84 #include <sys/ioctl.h>
85 #include <sys/errno.h>
86 #include <sys/device.h>
87
88 #include <vm/vm.h> /* for PAGE_SIZE */
89
90 #if NRND > 0
91 #include <sys/rnd.h>
92 #endif
93
94 #include <net/if.h>
95 #include <net/if_dl.h>
96 #include <net/if_media.h>
97 #include <net/if_ether.h>
98
99 #if NBPFILTER > 0
100 #include <net/bpf.h>
101 #endif
102
103 #ifdef INET
104 #include <netinet/in.h>
105 #include <netinet/if_inarp.h>
106 #endif
107
108 #ifdef NS
109 #include <netns/ns.h>
110 #include <netns/ns_if.h>
111 #endif
112
113 #include <machine/bus.h>
114 #include <machine/intr.h>
115
116 #include <dev/mii/miivar.h>
117
118 #include <dev/ic/i82557reg.h>
119 #include <dev/ic/i82557var.h>
120
121 /*
122 * NOTE! On the Alpha, we have an alignment constraint. The
123 * card DMAs the packet immediately following the RFA. However,
124 * the first thing in the packet is a 14-byte Ethernet header.
125 * This means that the packet is misaligned. To compensate,
126 * we actually offset the RFA 2 bytes into the cluster. This
127 * alignes the packet after the Ethernet header at a 32-bit
128 * boundary. HOWEVER! This means that the RFA is misaligned!
129 */
130 #define RFA_ALIGNMENT_FUDGE 2
131
132 /*
133 * Template for default configuration parameters.
134 * See struct fxp_cb_config for the bit definitions.
135 */
136 u_int8_t fxp_cb_config_template[] = {
137 0x0, 0x0, /* cb_status */
138 0x80, 0x2, /* cb_command */
139 0xff, 0xff, 0xff, 0xff, /* link_addr */
140 0x16, /* 0 */
141 0x8, /* 1 */
142 0x0, /* 2 */
143 0x0, /* 3 */
144 0x0, /* 4 */
145 0x80, /* 5 */
146 0xb2, /* 6 */
147 0x3, /* 7 */
148 0x1, /* 8 */
149 0x0, /* 9 */
150 0x26, /* 10 */
151 0x0, /* 11 */
152 0x60, /* 12 */
153 0x0, /* 13 */
154 0xf2, /* 14 */
155 0x48, /* 15 */
156 0x0, /* 16 */
157 0x40, /* 17 */
158 0xf3, /* 18 */
159 0x0, /* 19 */
160 0x3f, /* 20 */
161 0x5 /* 21 */
162 };
163
164 void fxp_mii_initmedia __P((struct fxp_softc *));
165 int fxp_mii_mediachange __P((struct ifnet *));
166 void fxp_mii_mediastatus __P((struct ifnet *, struct ifmediareq *));
167
168 void fxp_80c24_initmedia __P((struct fxp_softc *));
169 int fxp_80c24_mediachange __P((struct ifnet *));
170 void fxp_80c24_mediastatus __P((struct ifnet *, struct ifmediareq *));
171
172 inline void fxp_scb_wait __P((struct fxp_softc *));
173
174 void fxp_start __P((struct ifnet *));
175 int fxp_ioctl __P((struct ifnet *, u_long, caddr_t));
176 int fxp_init __P((struct fxp_softc *));
177 void fxp_rxdrain __P((struct fxp_softc *));
178 void fxp_stop __P((struct fxp_softc *, int));
179 void fxp_watchdog __P((struct ifnet *));
180 int fxp_add_rfabuf __P((struct fxp_softc *, bus_dmamap_t, int));
181 int fxp_mdi_read __P((struct device *, int, int));
182 void fxp_statchg __P((struct device *));
183 void fxp_mdi_write __P((struct device *, int, int, int));
184 void fxp_read_eeprom __P((struct fxp_softc *, u_int16_t *, int, int));
185 void fxp_get_info __P((struct fxp_softc *, u_int8_t *));
186 void fxp_tick __P((void *));
187 void fxp_mc_setup __P((struct fxp_softc *));
188
189 void fxp_shutdown __P((void *));
190
191 int fxp_copy_small = 0;
192
193 struct fxp_phytype {
194 int fp_phy; /* type of PHY, -1 for MII at the end. */
195 void (*fp_init) __P((struct fxp_softc *));
196 } fxp_phytype_table[] = {
197 { FXP_PHY_80C24, fxp_80c24_initmedia },
198 { -1, fxp_mii_initmedia },
199 };
200
201 /*
202 * Set initial transmit threshold at 64 (512 bytes). This is
203 * increased by 64 (512 bytes) at a time, to maximum of 192
204 * (1536 bytes), if an underrun occurs.
205 */
206 static int tx_threshold = 64;
207
208 /*
209 * Wait for the previous command to be accepted (but not necessarily
210 * completed).
211 */
212 inline void
213 fxp_scb_wait(sc)
214 struct fxp_softc *sc;
215 {
216 int i = 10000;
217
218 while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i)
219 delay(2);
220 if (i == 0)
221 printf("%s: WARNING: SCB timed out!\n", sc->sc_dev.dv_xname);
222 }
223
224 /*
225 * Finish attaching an i82557 interface. Called by bus-specific front-end.
226 */
227 void
228 fxp_attach(sc)
229 struct fxp_softc *sc;
230 {
231 u_int8_t enaddr[6];
232 struct ifnet *ifp;
233 bus_dma_segment_t seg;
234 int rseg, i, error;
235 struct fxp_phytype *fp;
236
237 /*
238 * Allocate the control data structures, and create and load the
239 * DMA map for it.
240 */
241 if ((error = bus_dmamem_alloc(sc->sc_dmat,
242 sizeof(struct fxp_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
243 0)) != 0) {
244 printf("%s: unable to allocate control data, error = %d\n",
245 sc->sc_dev.dv_xname, error);
246 goto fail_0;
247 }
248
249 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
250 sizeof(struct fxp_control_data), (caddr_t *)&sc->sc_control_data,
251 BUS_DMA_COHERENT)) != 0) {
252 printf("%s: unable to map control data, error = %d\n",
253 sc->sc_dev.dv_xname, error);
254 goto fail_1;
255 }
256 bzero(sc->sc_control_data, sizeof(struct fxp_control_data));
257
258 if ((error = bus_dmamap_create(sc->sc_dmat,
259 sizeof(struct fxp_control_data), 1,
260 sizeof(struct fxp_control_data), 0, 0, &sc->sc_dmamap)) != 0) {
261 printf("%s: unable to create control data DMA map, "
262 "error = %d\n", sc->sc_dev.dv_xname, error);
263 goto fail_2;
264 }
265
266 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap,
267 sc->sc_control_data, sizeof(struct fxp_control_data), NULL,
268 0)) != 0) {
269 printf("%s: can't load control data DMA map, error = %d\n",
270 sc->sc_dev.dv_xname, error);
271 goto fail_3;
272 }
273
274 /*
275 * Create the transmit buffer DMA maps.
276 */
277 for (i = 0; i < FXP_NTXCB; i++) {
278 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
279 FXP_NTXSEG, MCLBYTES, 0, 0,
280 &FXP_DSTX(sc, i)->txs_dmamap)) != 0) {
281 printf("%s: unable to create tx DMA map %d, "
282 "error = %d\n", sc->sc_dev.dv_xname, i, error);
283 goto fail_4;
284 }
285 }
286
287 /*
288 * Create the receive buffer DMA maps.
289 */
290 for (i = 0; i < FXP_NRFABUFS; i++) {
291 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
292 MCLBYTES, 0, 0, &sc->sc_rxmaps[i])) != 0) {
293 printf("%s: unable to create rx DMA map %d, "
294 "error = %d\n", sc->sc_dev.dv_xname, i, error);
295 goto fail_5;
296 }
297 }
298
299 /* Initialize MAC address and media structures. */
300 fxp_get_info(sc, enaddr);
301
302 printf("%s: Ethernet address %s, %s Mb/s\n", sc->sc_dev.dv_xname,
303 ether_sprintf(enaddr), sc->phy_10Mbps_only ? "10" : "10/100");
304
305 ifp = &sc->sc_ethercom.ec_if;
306
307 /*
308 * Get info about our media interface, and initialize it. Note
309 * the table terminates itself with a phy of -1, indicating
310 * that we're using MII.
311 */
312 for (fp = fxp_phytype_table; fp->fp_phy != -1; fp++)
313 if (fp->fp_phy == sc->phy_primary_device)
314 break;
315 (*fp->fp_init)(sc);
316
317 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
318 ifp->if_softc = sc;
319 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
320 ifp->if_ioctl = fxp_ioctl;
321 ifp->if_start = fxp_start;
322 ifp->if_watchdog = fxp_watchdog;
323
324 /*
325 * Attach the interface.
326 */
327 if_attach(ifp);
328 ether_ifattach(ifp, enaddr);
329 #if NBPFILTER > 0
330 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
331 sizeof(struct ether_header));
332 #endif
333 #if NRND > 0
334 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
335 RND_TYPE_NET, 0);
336 #endif
337
338 /*
339 * Add shutdown hook so that DMA is disabled prior to reboot. Not
340 * doing do could allow DMA to corrupt kernel memory during the
341 * reboot before the driver initializes.
342 */
343 sc->sc_sdhook = shutdownhook_establish(fxp_shutdown, sc);
344 if (sc->sc_sdhook == NULL)
345 printf("%s: WARNING: unable to establish shutdown hook\n",
346 sc->sc_dev.dv_xname);
347 return;
348
349 /*
350 * Free any resources we've allocated during the failed attach
351 * attempt. Do this in reverse order and fall though.
352 */
353 fail_5:
354 for (i = 0; i < FXP_NRFABUFS; i++) {
355 if (sc->sc_rxmaps[i] != NULL)
356 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmaps[i]);
357 }
358 fail_4:
359 for (i = 0; i < FXP_NTXCB; i++) {
360 if (FXP_DSTX(sc, i)->txs_dmamap != NULL)
361 bus_dmamap_destroy(sc->sc_dmat,
362 FXP_DSTX(sc, i)->txs_dmamap);
363 }
364 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
365 fail_3:
366 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
367 fail_2:
368 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
369 sizeof(struct fxp_control_data));
370 fail_1:
371 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
372 fail_0:
373 return;
374 }
375
376 void
377 fxp_mii_initmedia(sc)
378 struct fxp_softc *sc;
379 {
380
381 sc->sc_flags |= FXPF_MII;
382
383 sc->sc_mii.mii_ifp = &sc->sc_ethercom.ec_if;
384 sc->sc_mii.mii_readreg = fxp_mdi_read;
385 sc->sc_mii.mii_writereg = fxp_mdi_write;
386 sc->sc_mii.mii_statchg = fxp_statchg;
387 ifmedia_init(&sc->sc_mii.mii_media, 0, fxp_mii_mediachange,
388 fxp_mii_mediastatus);
389 mii_phy_probe(&sc->sc_dev, &sc->sc_mii, 0xffffffff);
390 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
391 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
392 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
393 } else
394 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
395 }
396
397 void
398 fxp_80c24_initmedia(sc)
399 struct fxp_softc *sc;
400 {
401
402 /*
403 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
404 * doesn't have a programming interface of any sort. The
405 * media is sensed automatically based on how the link partner
406 * is configured. This is, in essence, manual configuration.
407 */
408 printf("%s: Seeq 80c24 AutoDUPLEX media interface present\n",
409 sc->sc_dev.dv_xname);
410 ifmedia_init(&sc->sc_mii.mii_media, 0, fxp_80c24_mediachange,
411 fxp_80c24_mediastatus);
412 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
413 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
414 }
415
416 /*
417 * Device shutdown routine. Called at system shutdown after sync. The
418 * main purpose of this routine is to shut off receiver DMA so that
419 * kernel memory doesn't get clobbered during warmboot.
420 */
421 void
422 fxp_shutdown(arg)
423 void *arg;
424 {
425 struct fxp_softc *sc = arg;
426
427 fxp_stop(sc, 1);
428 }
429
430 /*
431 * Initialize the interface media.
432 */
433 void
434 fxp_get_info(sc, enaddr)
435 struct fxp_softc *sc;
436 u_int8_t *enaddr;
437 {
438 u_int16_t data, myea[3];
439
440 /*
441 * Reset to a stable state.
442 */
443 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
444 DELAY(10);
445
446 /*
447 * Get info about the primary PHY
448 */
449 fxp_read_eeprom(sc, &data, 6, 1);
450 sc->phy_primary_addr = data & 0xff;
451 sc->phy_primary_device = (data >> 8) & 0x3f;
452 sc->phy_10Mbps_only = data >> 15;
453
454 /*
455 * Read MAC address.
456 */
457 fxp_read_eeprom(sc, myea, 0, 3);
458 bcopy(myea, enaddr, ETHER_ADDR_LEN);
459 }
460
461 /*
462 * Read from the serial EEPROM. Basically, you manually shift in
463 * the read opcode (one bit at a time) and then shift in the address,
464 * and then you shift out the data (all of this one bit at a time).
465 * The word size is 16 bits, so you have to provide the address for
466 * every 16 bits of data.
467 */
468 void
469 fxp_read_eeprom(sc, data, offset, words)
470 struct fxp_softc *sc;
471 u_int16_t *data;
472 int offset;
473 int words;
474 {
475 u_int16_t reg;
476 int i, x;
477
478 for (i = 0; i < words; i++) {
479 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
480 /*
481 * Shift in read opcode.
482 */
483 for (x = 3; x > 0; x--) {
484 if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) {
485 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
486 } else {
487 reg = FXP_EEPROM_EECS;
488 }
489 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
490 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
491 reg | FXP_EEPROM_EESK);
492 DELAY(1);
493 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
494 DELAY(1);
495 }
496 /*
497 * Shift in address.
498 */
499 for (x = 6; x > 0; x--) {
500 if ((i + offset) & (1 << (x - 1))) {
501 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
502 } else {
503 reg = FXP_EEPROM_EECS;
504 }
505 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
506 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
507 reg | FXP_EEPROM_EESK);
508 DELAY(1);
509 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
510 DELAY(1);
511 }
512 reg = FXP_EEPROM_EECS;
513 data[i] = 0;
514 /*
515 * Shift out data.
516 */
517 for (x = 16; x > 0; x--) {
518 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
519 reg | FXP_EEPROM_EESK);
520 DELAY(1);
521 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) &
522 FXP_EEPROM_EEDO)
523 data[i] |= (1 << (x - 1));
524 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
525 DELAY(1);
526 }
527 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
528 DELAY(1);
529 }
530 }
531
532 /*
533 * Start packet transmission on the interface.
534 */
535 void
536 fxp_start(ifp)
537 struct ifnet *ifp;
538 {
539 struct fxp_softc *sc = ifp->if_softc;
540 struct mbuf *m0, *m;
541 struct fxp_cb_tx *txd;
542 struct fxp_txsoft *txs;
543 struct fxp_tbdlist *tbd;
544 bus_dmamap_t dmamap;
545 int error, lasttx, nexttx, opending, seg;
546
547 /*
548 * If we want a re-init, bail out now.
549 */
550 if (sc->sc_flags & FXPF_WANTINIT) {
551 ifp->if_flags |= IFF_OACTIVE;
552 return;
553 }
554
555 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
556 return;
557
558 /*
559 * Remember the previous txpending and the current lasttx.
560 */
561 opending = sc->sc_txpending;
562 lasttx = sc->sc_txlast;
563
564 /*
565 * Loop through the send queue, setting up transmit descriptors
566 * until we drain the queue, or use up all available transmit
567 * descriptors.
568 */
569 while (sc->sc_txpending < FXP_NTXCB) {
570 /*
571 * Grab a packet off the queue.
572 */
573 IF_DEQUEUE(&ifp->if_snd, m0);
574 if (m0 == NULL)
575 break;
576
577 /*
578 * Get the next available transmit descriptor.
579 */
580 nexttx = FXP_NEXTTX(sc->sc_txlast);
581 txd = FXP_CDTX(sc, nexttx);
582 tbd = FXP_CDTBD(sc, nexttx);
583 txs = FXP_DSTX(sc, nexttx);
584 dmamap = txs->txs_dmamap;
585
586 /*
587 * Load the DMA map. If this fails, the packet either
588 * didn't fit in the allotted number of frags, or we were
589 * short on resources. In this case, we'll copy and try
590 * again.
591 */
592 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
593 BUS_DMA_NOWAIT) != 0) {
594 MGETHDR(m, M_DONTWAIT, MT_DATA);
595 if (m == NULL) {
596 printf("%s: unable to allocate Tx mbuf\n",
597 sc->sc_dev.dv_xname);
598 IF_PREPEND(&ifp->if_snd, m0);
599 break;
600 }
601 if (m0->m_pkthdr.len > MHLEN) {
602 MCLGET(m, M_DONTWAIT);
603 if ((m->m_flags & M_EXT) == 0) {
604 printf("%s: unable to allocate Tx "
605 "cluster\n", sc->sc_dev.dv_xname);
606 m_freem(m);
607 IF_PREPEND(&ifp->if_snd, m0);
608 break;
609 }
610 }
611 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
612 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
613 m_freem(m0);
614 m0 = m;
615 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
616 m0, BUS_DMA_NOWAIT);
617 if (error) {
618 printf("%s: unable to load Tx buffer, "
619 "error = %d\n", sc->sc_dev.dv_xname, error);
620 IF_PREPEND(&ifp->if_snd, m0);
621 break;
622 }
623 }
624
625 /* Initialize the fraglist. */
626 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
627 tbd->tbd_d[seg].tb_addr =
628 dmamap->dm_segs[seg].ds_addr;
629 tbd->tbd_d[seg].tb_size =
630 dmamap->dm_segs[seg].ds_len;
631 }
632
633 FXP_CDTBDSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE);
634
635 /* Sync the DMA map. */
636 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
637 BUS_DMASYNC_PREWRITE);
638
639 /*
640 * Store a pointer to the packet so we can free it later.
641 */
642 txs->txs_mbuf = m0;
643
644 /*
645 * Initialize the transmit descriptor.
646 */
647 txd->cb_status = 0;
648 txd->cb_command =
649 FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF;
650 txd->tx_threshold = tx_threshold;
651 txd->tbd_number = dmamap->dm_nsegs;
652
653 FXP_CDTXSYNC(sc, nexttx,
654 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
655
656 /* Advance the tx pointer. */
657 sc->sc_txpending++;
658 sc->sc_txlast = nexttx;
659
660 #if NBPFILTER > 0
661 /*
662 * Pass packet to bpf if there is a listener.
663 */
664 if (ifp->if_bpf)
665 bpf_mtap(ifp->if_bpf, m0);
666 #endif
667 }
668
669 if (sc->sc_txpending == FXP_NTXCB) {
670 /* No more slots; notify upper layer. */
671 ifp->if_flags |= IFF_OACTIVE;
672 }
673
674 if (sc->sc_txpending != opending) {
675 /*
676 * We enqueued packets. If the transmitter was idle,
677 * reset the txdirty pointer.
678 */
679 if (opending == 0)
680 sc->sc_txdirty = FXP_NEXTTX(lasttx);
681
682 /*
683 * Cause the chip to interrupt and suspend command
684 * processing once the last packet we've enqueued
685 * has been transmitted.
686 */
687 FXP_CDTX(sc, sc->sc_txlast)->cb_command |=
688 FXP_CB_COMMAND_I | FXP_CB_COMMAND_S;
689 FXP_CDTXSYNC(sc, sc->sc_txlast,
690 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
691
692 /*
693 * The entire packet chain is set up. Clear the suspend bit
694 * on the command prior to the first packet we set up.
695 */
696 FXP_CDTXSYNC(sc, lasttx,
697 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
698 FXP_CDTX(sc, lasttx)->cb_command &= ~FXP_CB_COMMAND_S;
699 FXP_CDTXSYNC(sc, lasttx,
700 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
701
702 /*
703 * Issue a Resume command in case the chip was suspended.
704 */
705 fxp_scb_wait(sc);
706 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_RESUME);
707
708 /* Set a watchdog timer in case the chip flakes out. */
709 ifp->if_timer = 5;
710 }
711 }
712
713 /*
714 * Process interface interrupts.
715 */
716 int
717 fxp_intr(arg)
718 void *arg;
719 {
720 struct fxp_softc *sc = arg;
721 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
722 struct fxp_cb_tx *txd;
723 struct fxp_txsoft *txs;
724 struct mbuf *m, *m0;
725 bus_dmamap_t rxmap;
726 struct fxp_rfa *rfa;
727 struct ether_header *eh;
728 int i, claimed = 0;
729 u_int16_t len;
730 u_int8_t statack;
731
732 while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) {
733 claimed = 1;
734
735 /*
736 * First ACK all the interrupts in this pass.
737 */
738 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack);
739
740 /*
741 * Process receiver interrupts. If a no-resource (RNR)
742 * condition exists, get whatever packets we can and
743 * re-start the receiver.
744 */
745 if (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR)) {
746 rcvloop:
747 m = sc->sc_rxq.ifq_head;
748 rfa = FXP_MTORFA(m);
749 rxmap = M_GETCTX(m, bus_dmamap_t);
750
751 FXP_RFASYNC(sc, m,
752 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
753
754 if ((rfa->rfa_status & FXP_RFA_STATUS_C) == 0) {
755 /*
756 * We have processed all of the
757 * receive buffers.
758 */
759 goto do_transmit;
760 }
761
762 IF_DEQUEUE(&sc->sc_rxq, m);
763
764 FXP_RXBUFSYNC(sc, m, BUS_DMASYNC_POSTREAD);
765
766 len = rfa->actual_size & (m->m_ext.ext_size - 1);
767
768 if (len < sizeof(struct ether_header)) {
769 /*
770 * Runt packet; drop it now.
771 */
772 FXP_INIT_RFABUF(sc, m);
773 goto rcvloop;
774 }
775
776 /*
777 * If the packet is small enough to fit in a
778 * single header mbuf, allocate one and copy
779 * the data into it. This greatly reduces
780 * memory consumption when we receive lots
781 * of small packets.
782 *
783 * Otherwise, we add a new buffer to the receive
784 * chain. If this fails, we drop the packet and
785 * recycle the old buffer.
786 */
787 if (fxp_copy_small != 0 && len <= MHLEN) {
788 MGETHDR(m0, M_DONTWAIT, MT_DATA);
789 if (m == NULL)
790 goto dropit;
791 memcpy(mtod(m0, caddr_t),
792 mtod(m, caddr_t), len);
793 FXP_INIT_RFABUF(sc, m);
794 m = m0;
795 } else {
796 if (fxp_add_rfabuf(sc, rxmap, 1) != 0) {
797 dropit:
798 ifp->if_ierrors++;
799 FXP_INIT_RFABUF(sc, m);
800 goto rcvloop;
801 }
802 }
803
804 m->m_pkthdr.rcvif = ifp;
805 m->m_pkthdr.len = m->m_len = len;
806 eh = mtod(m, struct ether_header *);
807
808 #if NBPFILTER > 0
809 /*
810 * Pass this up to any BPF listeners, but only
811 * pass it up the stack it its for us.
812 */
813 if (ifp->if_bpf) {
814 bpf_mtap(ifp->if_bpf, m);
815
816 if ((ifp->if_flags & IFF_PROMISC) != 0 &&
817 (rfa->rfa_status &
818 FXP_RFA_STATUS_IAMATCH) != 0 &&
819 (eh->ether_dhost[0] & 1) == 0) {
820 m_freem(m);
821 goto rcvloop;
822 }
823 }
824 #endif /* NBPFILTER > 0 */
825
826 /* Pass it on. */
827 (*ifp->if_input)(ifp, m);
828 goto rcvloop;
829 }
830
831 do_transmit:
832 if (statack & FXP_SCB_STATACK_RNR) {
833 rxmap = M_GETCTX(sc->sc_rxq.ifq_head, bus_dmamap_t);
834 fxp_scb_wait(sc);
835 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
836 rxmap->dm_segs[0].ds_addr +
837 RFA_ALIGNMENT_FUDGE);
838 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND,
839 FXP_SCB_COMMAND_RU_START);
840 }
841
842 /*
843 * Free any finished transmit mbuf chains.
844 */
845 if (statack & (FXP_SCB_STATACK_CXTNO|FXP_SCB_STATACK_CNA)) {
846 ifp->if_flags &= ~IFF_OACTIVE;
847 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
848 i = FXP_NEXTTX(i), sc->sc_txpending--) {
849 txd = FXP_CDTX(sc, i);
850 txs = FXP_DSTX(sc, i);
851
852 FXP_CDTXSYNC(sc, i,
853 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
854
855 if ((txd->cb_status & FXP_CB_STATUS_C) == 0)
856 break;
857
858 FXP_CDTBDSYNC(sc, i, BUS_DMASYNC_POSTWRITE);
859
860 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
861 0, txs->txs_dmamap->dm_mapsize,
862 BUS_DMASYNC_POSTWRITE);
863 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
864 m_freem(txs->txs_mbuf);
865 txs->txs_mbuf = NULL;
866 }
867
868 /* Update the dirty transmit buffer pointer. */
869 sc->sc_txdirty = i;
870
871 /*
872 * Cancel the watchdog timer if there are no pending
873 * transmissions.
874 */
875 if (sc->sc_txpending == 0) {
876 ifp->if_timer = 0;
877
878 /*
879 * If we want a re-init, do that now.
880 */
881 if (sc->sc_flags & FXPF_WANTINIT)
882 (void) fxp_init(sc);
883 }
884
885 /*
886 * Try to get more packets going.
887 */
888 fxp_start(ifp);
889 }
890 }
891
892 #if NRND > 0
893 if (claimed)
894 rnd_add_uint32(&sc->rnd_source, statack);
895 #endif
896 return (claimed);
897 }
898
899 /*
900 * Update packet in/out/collision statistics. The i82557 doesn't
901 * allow you to access these counters without doing a fairly
902 * expensive DMA to get _all_ of the statistics it maintains, so
903 * we do this operation here only once per second. The statistics
904 * counters in the kernel are updated from the previous dump-stats
905 * DMA and then a new dump-stats DMA is started. The on-chip
906 * counters are zeroed when the DMA completes. If we can't start
907 * the DMA immediately, we don't wait - we just prepare to read
908 * them again next time.
909 */
910 void
911 fxp_tick(arg)
912 void *arg;
913 {
914 struct fxp_softc *sc = arg;
915 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
916 struct fxp_stats *sp = &sc->sc_control_data->fcd_stats;
917 int s;
918
919 s = splnet();
920
921 ifp->if_opackets += sp->tx_good;
922 ifp->if_collisions += sp->tx_total_collisions;
923 if (sp->rx_good) {
924 ifp->if_ipackets += sp->rx_good;
925 sc->sc_rxidle = 0;
926 } else {
927 sc->sc_rxidle++;
928 }
929 ifp->if_ierrors +=
930 sp->rx_crc_errors +
931 sp->rx_alignment_errors +
932 sp->rx_rnr_errors +
933 sp->rx_overrun_errors;
934 /*
935 * If any transmit underruns occured, bump up the transmit
936 * threshold by another 512 bytes (64 * 8).
937 */
938 if (sp->tx_underruns) {
939 ifp->if_oerrors += sp->tx_underruns;
940 if (tx_threshold < 192)
941 tx_threshold += 64;
942 }
943
944 /*
945 * If we haven't received any packets in FXP_MAC_RX_IDLE seconds,
946 * then assume the receiver has locked up and attempt to clear
947 * the condition by reprogramming the multicast filter (actually,
948 * resetting the interface). This is a work-around for a bug in
949 * the 82557 where the receiver locks up if it gets certain types
950 * of garbage in the syncronization bits prior to the packet header.
951 * This bug is supposed to only occur in 10Mbps mode, but has been
952 * seen to occur in 100Mbps mode as well (perhaps due to a 10/100
953 * speed transition).
954 */
955 if (sc->sc_rxidle > FXP_MAX_RX_IDLE) {
956 (void) fxp_init(sc);
957 splx(s);
958 return;
959 }
960 /*
961 * If there is no pending command, start another stats
962 * dump. Otherwise punt for now.
963 */
964 if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) {
965 /*
966 * Start another stats dump.
967 */
968 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND,
969 FXP_SCB_COMMAND_CU_DUMPRESET);
970 } else {
971 /*
972 * A previous command is still waiting to be accepted.
973 * Just zero our copy of the stats and wait for the
974 * next timer event to update them.
975 */
976 sp->tx_good = 0;
977 sp->tx_underruns = 0;
978 sp->tx_total_collisions = 0;
979
980 sp->rx_good = 0;
981 sp->rx_crc_errors = 0;
982 sp->rx_alignment_errors = 0;
983 sp->rx_rnr_errors = 0;
984 sp->rx_overrun_errors = 0;
985 }
986
987 if (sc->sc_flags & FXPF_MII) {
988 /* Tick the MII clock. */
989 mii_tick(&sc->sc_mii);
990 }
991
992 splx(s);
993
994 /*
995 * Schedule another timeout one second from now.
996 */
997 timeout(fxp_tick, sc, hz);
998 }
999
1000 /*
1001 * Drain the receive queue.
1002 */
1003 void
1004 fxp_rxdrain(sc)
1005 struct fxp_softc *sc;
1006 {
1007 bus_dmamap_t rxmap;
1008 struct mbuf *m;
1009
1010 for (;;) {
1011 IF_DEQUEUE(&sc->sc_rxq, m);
1012 if (m == NULL)
1013 break;
1014 rxmap = M_GETCTX(m, bus_dmamap_t);
1015 bus_dmamap_unload(sc->sc_dmat, rxmap);
1016 FXP_RXMAP_PUT(sc, rxmap);
1017 m_freem(m);
1018 }
1019 }
1020
1021 /*
1022 * Stop the interface. Cancels the statistics updater and resets
1023 * the interface.
1024 */
1025 void
1026 fxp_stop(sc, drain)
1027 struct fxp_softc *sc;
1028 int drain;
1029 {
1030 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1031 struct fxp_txsoft *txs;
1032 int i;
1033
1034 /*
1035 * Cancel stats updater.
1036 */
1037 untimeout(fxp_tick, sc);
1038
1039 /*
1040 * Issue software reset
1041 */
1042 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
1043 DELAY(10);
1044
1045 /*
1046 * Release any xmit buffers.
1047 */
1048 for (i = 0; i < FXP_NTXCB; i++) {
1049 txs = FXP_DSTX(sc, i);
1050 if (txs->txs_mbuf != NULL) {
1051 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1052 m_freem(txs->txs_mbuf);
1053 txs->txs_mbuf = NULL;
1054 }
1055 }
1056 sc->sc_txpending = 0;
1057
1058 if (drain) {
1059 /*
1060 * Release the receive buffers.
1061 */
1062 fxp_rxdrain(sc);
1063 }
1064
1065 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1066 ifp->if_timer = 0;
1067 }
1068
1069 /*
1070 * Watchdog/transmission transmit timeout handler. Called when a
1071 * transmission is started on the interface, but no interrupt is
1072 * received before the timeout. This usually indicates that the
1073 * card has wedged for some reason.
1074 */
1075 void
1076 fxp_watchdog(ifp)
1077 struct ifnet *ifp;
1078 {
1079 struct fxp_softc *sc = ifp->if_softc;
1080
1081 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1082 ifp->if_oerrors++;
1083
1084 (void) fxp_init(sc);
1085 }
1086
1087 /*
1088 * Initialize the interface. Must be called at splnet().
1089 */
1090 int
1091 fxp_init(sc)
1092 struct fxp_softc *sc;
1093 {
1094 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1095 struct fxp_cb_config *cbp;
1096 struct fxp_cb_ias *cb_ias;
1097 struct fxp_cb_tx *txd;
1098 bus_dmamap_t rxmap;
1099 int i, prm, allm, error = 0;
1100
1101 /*
1102 * Cancel any pending I/O
1103 */
1104 fxp_stop(sc, 0);
1105
1106 sc->sc_flags = 0;
1107
1108 /*
1109 * Initialize base of CBL and RFA memory. Loading with zero
1110 * sets it up for regular linear addressing.
1111 */
1112 fxp_scb_wait(sc);
1113 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0);
1114 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_BASE);
1115
1116 fxp_scb_wait(sc);
1117 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_BASE);
1118
1119 /*
1120 * Initialize the multicast filter. Do this now, since we might
1121 * have to setup the config block differently.
1122 */
1123 fxp_mc_setup(sc);
1124
1125 prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0;
1126 allm = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0;
1127
1128 /*
1129 * Initialize base of dump-stats buffer.
1130 */
1131 fxp_scb_wait(sc);
1132 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
1133 sc->sc_cddma + FXP_CDSTATSOFF);
1134 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_DUMP_ADR);
1135
1136 cbp = &sc->sc_control_data->fcd_configcb;
1137 memset(cbp, 0, sizeof(struct fxp_cb_config));
1138
1139 /*
1140 * This copy is kind of disgusting, but there are a bunch of must be
1141 * zero and must be one bits in this structure and this is the easiest
1142 * way to initialize them all to proper values.
1143 */
1144 memcpy(cbp, fxp_cb_config_template, sizeof(fxp_cb_config_template));
1145
1146 cbp->cb_status = 0;
1147 cbp->cb_command = FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL;
1148 cbp->link_addr = -1; /* (no) next command */
1149 cbp->byte_count = 22; /* (22) bytes to config */
1150 cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */
1151 cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */
1152 cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */
1153 cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */
1154 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */
1155 cbp->dma_bce = 0; /* (disable) dma max counters */
1156 cbp->late_scb = 0; /* (don't) defer SCB update */
1157 cbp->tno_int = 0; /* (disable) tx not okay interrupt */
1158 cbp->ci_int = 1; /* interrupt on CU idle */
1159 cbp->save_bf = prm; /* save bad frames */
1160 cbp->disc_short_rx = !prm; /* discard short packets */
1161 cbp->underrun_retry = 1; /* retry mode (1) on DMA underrun */
1162 cbp->mediatype = !sc->phy_10Mbps_only; /* interface mode */
1163 cbp->nsai = 1; /* (don't) disable source addr insert */
1164 cbp->preamble_length = 2; /* (7 byte) preamble */
1165 cbp->loopback = 0; /* (don't) loopback */
1166 cbp->linear_priority = 0; /* (normal CSMA/CD operation) */
1167 cbp->linear_pri_mode = 0; /* (wait after xmit only) */
1168 cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */
1169 cbp->promiscuous = prm; /* promiscuous mode */
1170 cbp->bcast_disable = 0; /* (don't) disable broadcasts */
1171 cbp->crscdt = 0; /* (CRS only) */
1172 cbp->stripping = !prm; /* truncate rx packet to byte count */
1173 cbp->padding = 1; /* (do) pad short tx packets */
1174 cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */
1175 cbp->force_fdx = 0; /* (don't) force full duplex */
1176 cbp->fdx_pin_en = 1; /* (enable) FDX# pin */
1177 cbp->multi_ia = 0; /* (don't) accept multiple IAs */
1178 cbp->mc_all = allm; /* accept all multicasts */
1179
1180 FXP_CDCONFIGSYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1181
1182 /*
1183 * Start the config command/DMA.
1184 */
1185 fxp_scb_wait(sc);
1186 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->sc_cddma + FXP_CDCONFIGOFF);
1187 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START);
1188 /* ...and wait for it to complete. */
1189 do {
1190 FXP_CDCONFIGSYNC(sc,
1191 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1192 } while ((cbp->cb_status & FXP_CB_STATUS_C) == 0);
1193
1194 /*
1195 * Initialize the station address.
1196 */
1197 cb_ias = &sc->sc_control_data->fcd_iascb;
1198 cb_ias->cb_status = 0;
1199 cb_ias->cb_command = FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL;
1200 cb_ias->link_addr = -1;
1201 memcpy((void *)cb_ias->macaddr, LLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
1202
1203 FXP_CDIASSYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1204
1205 /*
1206 * Start the IAS (Individual Address Setup) command/DMA.
1207 */
1208 fxp_scb_wait(sc);
1209 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->sc_cddma + FXP_CDIASOFF);
1210 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START);
1211 /* ...and wait for it to complete. */
1212 do {
1213 FXP_CDIASSYNC(sc,
1214 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1215 } while ((cb_ias->cb_status & FXP_CB_STATUS_C) == 0);
1216
1217 /*
1218 * Initialize the transmit descriptor ring. txlast is initialized
1219 * to the end of the list so that it will wrap around to the first
1220 * descriptor when the first packet is transmitted.
1221 */
1222 for (i = 0; i < FXP_NTXCB; i++) {
1223 txd = FXP_CDTX(sc, i);
1224 memset(txd, 0, sizeof(struct fxp_cb_tx));
1225 txd->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S;
1226 txd->tbd_array_addr = FXP_CDTBDADDR(sc, i);
1227 txd->link_addr = FXP_CDTXADDR(sc, FXP_NEXTTX(i));
1228 FXP_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1229 }
1230 sc->sc_txpending = 0;
1231 sc->sc_txdirty = 0;
1232 sc->sc_txlast = FXP_NTXCB - 1;
1233
1234 /*
1235 * Initialize the receive buffer list.
1236 */
1237 sc->sc_rxq.ifq_maxlen = FXP_NRFABUFS;
1238 while (sc->sc_rxq.ifq_len < FXP_NRFABUFS) {
1239 rxmap = FXP_RXMAP_GET(sc);
1240 if ((error = fxp_add_rfabuf(sc, rxmap, 0)) != 0) {
1241 printf("%s: unable to allocate or map rx "
1242 "buffer %d, error = %d\n",
1243 sc->sc_dev.dv_xname,
1244 sc->sc_rxq.ifq_len, error);
1245 /*
1246 * XXX Should attempt to run with fewer receive
1247 * XXX buffers instead of just failing.
1248 */
1249 FXP_RXMAP_PUT(sc, rxmap);
1250 fxp_rxdrain(sc);
1251 goto out;
1252 }
1253 }
1254 sc->sc_rxidle = 0;
1255
1256 /*
1257 * Give the transmit ring to the chip. We do this by pointing
1258 * the chip at the last descriptor (which is a NOP|SUSPEND), and
1259 * issuing a start command. It will execute the NOP and then
1260 * suspend, pointing at the first descriptor.
1261 */
1262 fxp_scb_wait(sc);
1263 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, FXP_CDTXADDR(sc, sc->sc_txlast));
1264 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START);
1265
1266 /*
1267 * Initialize receiver buffer area - RFA.
1268 */
1269 rxmap = M_GETCTX(sc->sc_rxq.ifq_head, bus_dmamap_t);
1270 fxp_scb_wait(sc);
1271 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
1272 rxmap->dm_segs[0].ds_addr + RFA_ALIGNMENT_FUDGE);
1273 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_START);
1274
1275 if (sc->sc_flags & FXPF_MII) {
1276 /*
1277 * Set current media.
1278 */
1279 mii_mediachg(&sc->sc_mii);
1280 }
1281
1282 /*
1283 * ...all done!
1284 */
1285 ifp->if_flags |= IFF_RUNNING;
1286 ifp->if_flags &= ~IFF_OACTIVE;
1287
1288 /*
1289 * Start the one second timer.
1290 */
1291 timeout(fxp_tick, sc, hz);
1292
1293 /*
1294 * Attempt to start output on the interface.
1295 */
1296 fxp_start(ifp);
1297
1298 out:
1299 if (error)
1300 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1301 return (error);
1302 }
1303
1304 /*
1305 * Change media according to request.
1306 */
1307 int
1308 fxp_mii_mediachange(ifp)
1309 struct ifnet *ifp;
1310 {
1311 struct fxp_softc *sc = ifp->if_softc;
1312
1313 if (ifp->if_flags & IFF_UP)
1314 mii_mediachg(&sc->sc_mii);
1315 return (0);
1316 }
1317
1318 /*
1319 * Notify the world which media we're using.
1320 */
1321 void
1322 fxp_mii_mediastatus(ifp, ifmr)
1323 struct ifnet *ifp;
1324 struct ifmediareq *ifmr;
1325 {
1326 struct fxp_softc *sc = ifp->if_softc;
1327
1328 mii_pollstat(&sc->sc_mii);
1329 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1330 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1331 }
1332
1333 int
1334 fxp_80c24_mediachange(ifp)
1335 struct ifnet *ifp;
1336 {
1337
1338 /* Nothing to do here. */
1339 return (0);
1340 }
1341
1342 void
1343 fxp_80c24_mediastatus(ifp, ifmr)
1344 struct ifnet *ifp;
1345 struct ifmediareq *ifmr;
1346 {
1347 struct fxp_softc *sc = ifp->if_softc;
1348
1349 /*
1350 * Media is currently-selected media. We cannot determine
1351 * the link status.
1352 */
1353 ifmr->ifm_status = 0;
1354 ifmr->ifm_active = sc->sc_mii.mii_media.ifm_cur->ifm_media;
1355 }
1356
1357 /*
1358 * Add a buffer to the end of the RFA buffer list.
1359 * Return 0 if successful, error code on failure.
1360 *
1361 * The RFA struct is stuck at the beginning of mbuf cluster and the
1362 * data pointer is fixed up to point just past it.
1363 */
1364 int
1365 fxp_add_rfabuf(sc, rxmap, unload)
1366 struct fxp_softc *sc;
1367 bus_dmamap_t rxmap;
1368 int unload;
1369 {
1370 struct mbuf *m;
1371 int error;
1372
1373 MGETHDR(m, M_DONTWAIT, MT_DATA);
1374 if (m == NULL)
1375 return (ENOBUFS);
1376
1377 MCLGET(m, M_DONTWAIT);
1378 if ((m->m_flags & M_EXT) == 0) {
1379 m_freem(m);
1380 return (ENOBUFS);
1381 }
1382
1383 if (unload)
1384 bus_dmamap_unload(sc->sc_dmat, rxmap);
1385
1386 M_SETCTX(m, rxmap);
1387
1388 error = bus_dmamap_load(sc->sc_dmat, rxmap,
1389 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1390 if (error) {
1391 printf("%s: can't load rx DMA map %d, error = %d\n",
1392 sc->sc_dev.dv_xname, sc->sc_rxq.ifq_len, error);
1393 panic("fxp_add_rfabuf"); /* XXX */
1394 }
1395
1396 FXP_INIT_RFABUF(sc, m);
1397
1398 return (0);
1399 }
1400
1401 volatile int
1402 fxp_mdi_read(self, phy, reg)
1403 struct device *self;
1404 int phy;
1405 int reg;
1406 {
1407 struct fxp_softc *sc = (struct fxp_softc *)self;
1408 int count = 10000;
1409 int value;
1410
1411 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
1412 (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21));
1413
1414 while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0
1415 && count--)
1416 DELAY(10);
1417
1418 if (count <= 0)
1419 printf("%s: fxp_mdi_read: timed out\n", sc->sc_dev.dv_xname);
1420
1421 return (value & 0xffff);
1422 }
1423
1424 void
1425 fxp_statchg(self)
1426 struct device *self;
1427 {
1428
1429 /* XXX Update ifp->if_baudrate */
1430 }
1431
1432 void
1433 fxp_mdi_write(self, phy, reg, value)
1434 struct device *self;
1435 int phy;
1436 int reg;
1437 int value;
1438 {
1439 struct fxp_softc *sc = (struct fxp_softc *)self;
1440 int count = 10000;
1441
1442 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
1443 (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) |
1444 (value & 0xffff));
1445
1446 while((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 &&
1447 count--)
1448 DELAY(10);
1449
1450 if (count <= 0)
1451 printf("%s: fxp_mdi_write: timed out\n", sc->sc_dev.dv_xname);
1452 }
1453
1454 int
1455 fxp_ioctl(ifp, command, data)
1456 struct ifnet *ifp;
1457 u_long command;
1458 caddr_t data;
1459 {
1460 struct fxp_softc *sc = ifp->if_softc;
1461 struct ifreq *ifr = (struct ifreq *)data;
1462 struct ifaddr *ifa = (struct ifaddr *)data;
1463 int s, error = 0;
1464
1465 s = splnet();
1466
1467 switch (command) {
1468 case SIOCSIFADDR:
1469 ifp->if_flags |= IFF_UP;
1470
1471 switch (ifa->ifa_addr->sa_family) {
1472 #ifdef INET
1473 case AF_INET:
1474 if ((error = fxp_init(sc)) != 0)
1475 break;
1476 arp_ifinit(ifp, ifa);
1477 break;
1478 #endif /* INET */
1479 #ifdef NS
1480 case AF_NS:
1481 {
1482 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
1483
1484 if (ns_nullhost(*ina))
1485 ina->x_host = *(union ns_host *)
1486 LLADDR(ifp->if_sadl);
1487 else
1488 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
1489 ifp->if_addrlen);
1490 /* Set new address. */
1491 error = fxp_init(sc);
1492 break;
1493 }
1494 #endif /* NS */
1495 default:
1496 error = fxp_init(sc);
1497 break;
1498 }
1499 break;
1500
1501 case SIOCSIFMTU:
1502 if (ifr->ifr_mtu > ETHERMTU)
1503 error = EINVAL;
1504 else
1505 ifp->if_mtu = ifr->ifr_mtu;
1506 break;
1507
1508 case SIOCSIFFLAGS:
1509 if ((ifp->if_flags & IFF_UP) == 0 &&
1510 (ifp->if_flags & IFF_RUNNING) != 0) {
1511 /*
1512 * If interface is marked down and it is running, then
1513 * stop it.
1514 */
1515 fxp_stop(sc, 1);
1516 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1517 (ifp->if_flags & IFF_RUNNING) == 0) {
1518 /*
1519 * If interface is marked up and it is stopped, then
1520 * start it.
1521 */
1522 error = fxp_init(sc);
1523 } else if ((ifp->if_flags & IFF_UP) != 0) {
1524 /*
1525 * Reset the interface to pick up change in any other
1526 * flags that affect the hardware state.
1527 */
1528 error = fxp_init(sc);
1529 }
1530 break;
1531
1532 case SIOCADDMULTI:
1533 case SIOCDELMULTI:
1534 error = (command == SIOCADDMULTI) ?
1535 ether_addmulti(ifr, &sc->sc_ethercom) :
1536 ether_delmulti(ifr, &sc->sc_ethercom);
1537
1538 if (error == ENETRESET) {
1539 /*
1540 * Multicast list has changed; set the hardware
1541 * filter accordingly.
1542 */
1543 if (sc->sc_txpending) {
1544 sc->sc_flags |= FXPF_WANTINIT;
1545 error = 0;
1546 } else
1547 error = fxp_init(sc);
1548 }
1549 break;
1550
1551 case SIOCSIFMEDIA:
1552 case SIOCGIFMEDIA:
1553 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1554 break;
1555
1556 default:
1557 error = EINVAL;
1558 break;
1559 }
1560
1561 splx(s);
1562 return (error);
1563 }
1564
1565 /*
1566 * Program the multicast filter.
1567 *
1568 * This function must be called at splnet().
1569 */
1570 void
1571 fxp_mc_setup(sc)
1572 struct fxp_softc *sc;
1573 {
1574 struct fxp_cb_mcs *mcsp = &sc->sc_control_data->fcd_mcscb;
1575 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1576 struct ethercom *ec = &sc->sc_ethercom;
1577 struct ether_multi *enm;
1578 struct ether_multistep step;
1579 int nmcasts;
1580
1581 #ifdef DIAGNOSTIC
1582 if (sc->sc_txpending)
1583 panic("fxp_mc_setup: pending transmissions");
1584 #endif
1585
1586 ifp->if_flags &= ~IFF_ALLMULTI;
1587
1588 /*
1589 * Initialize multicast setup descriptor.
1590 */
1591 nmcasts = 0;
1592 ETHER_FIRST_MULTI(step, ec, enm);
1593 while (enm != NULL) {
1594 /*
1595 * Check for too many multicast addresses or if we're
1596 * listening to a range. Either way, we simply have
1597 * to accept all multicasts.
1598 */
1599 if (nmcasts >= MAXMCADDR ||
1600 memcmp(enm->enm_addrlo, enm->enm_addrhi,
1601 ETHER_ADDR_LEN) != 0) {
1602 /*
1603 * Callers of this function must do the
1604 * right thing with this. If we're called
1605 * from outside fxp_init(), the caller must
1606 * detect if the state if IFF_ALLMULTI changes.
1607 * If it does, the caller must then call
1608 * fxp_init(), since allmulti is handled by
1609 * the config block.
1610 */
1611 ifp->if_flags |= IFF_ALLMULTI;
1612 return;
1613 }
1614 memcpy((void *)&mcsp->mc_addr[nmcasts][0], enm->enm_addrlo,
1615 ETHER_ADDR_LEN);
1616 nmcasts++;
1617 ETHER_NEXT_MULTI(step, enm);
1618 }
1619
1620 mcsp->cb_status = 0;
1621 mcsp->cb_command = FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL;
1622 mcsp->link_addr = FXP_CDTXADDR(sc, FXP_NEXTTX(sc->sc_txlast));
1623 mcsp->mc_cnt = nmcasts * ETHER_ADDR_LEN;
1624
1625 FXP_CDMCSSYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1626
1627 /*
1628 * Wait until the command unit is not active. This should never
1629 * happen since nothing is queued, but make sure anyway.
1630 */
1631 while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) ==
1632 FXP_SCB_CUS_ACTIVE)
1633 /* nothing */ ;
1634
1635 /*
1636 * Start the multicast setup command/DMA.
1637 */
1638 fxp_scb_wait(sc);
1639 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->sc_cddma + FXP_CDMCSOFF);
1640 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START);
1641
1642 /* ...and wait for it to complete. */
1643 do {
1644 FXP_CDMCSSYNC(sc,
1645 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1646 } while ((mcsp->cb_status & FXP_CB_STATUS_C) == 0);
1647 }
1648