i82557.c revision 1.1 1 /* $NetBSD: i82557.c,v 1.1 1999/06/20 16:33:28 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1995, David Greenman
42 * All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice unmodified, this list of conditions, and the following
49 * disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright
51 * notice, this list of conditions and the following disclaimer in the
52 * documentation and/or other materials provided with the distribution.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * Id: if_fxp.c,v 1.47 1998/01/08 23:42:29 eivind Exp
67 */
68
69 /*
70 * Device driver for the Intel i82557 fast Ethernet controller.
71 */
72
73 #include "opt_inet.h"
74 #include "opt_ns.h"
75 #include "bpfilter.h"
76 #include "rnd.h"
77
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/mbuf.h>
81 #include <sys/malloc.h>
82 #include <sys/kernel.h>
83 #include <sys/socket.h>
84 #include <sys/ioctl.h>
85 #include <sys/errno.h>
86 #include <sys/device.h>
87
88 #include <vm/vm.h> /* for PAGE_SIZE */
89
90 #if NRND > 0
91 #include <sys/rnd.h>
92 #endif
93
94 #include <net/if.h>
95 #include <net/if_dl.h>
96 #include <net/if_media.h>
97 #include <net/if_ether.h>
98
99 #if NBPFILTER > 0
100 #include <net/bpf.h>
101 #endif
102
103 #ifdef INET
104 #include <netinet/in.h>
105 #include <netinet/if_inarp.h>
106 #endif
107
108 #ifdef NS
109 #include <netns/ns.h>
110 #include <netns/ns_if.h>
111 #endif
112
113 #include <machine/bus.h>
114 #include <machine/intr.h>
115
116 #include <dev/mii/miivar.h>
117
118 #include <dev/ic/i82557reg.h>
119 #include <dev/ic/i82557var.h>
120
121 /*
122 * NOTE! On the Alpha, we have an alignment constraint. The
123 * card DMAs the packet immediately following the RFA. However,
124 * the first thing in the packet is a 14-byte Ethernet header.
125 * This means that the packet is misaligned. To compensate,
126 * we actually offset the RFA 2 bytes into the cluster. This
127 * alignes the packet after the Ethernet header at a 32-bit
128 * boundary. HOWEVER! This means that the RFA is misaligned!
129 */
130 #define RFA_ALIGNMENT_FUDGE 2
131
132 /*
133 * Template for default configuration parameters.
134 * See struct fxp_cb_config for the bit definitions.
135 */
136 u_int8_t fxp_cb_config_template[] = {
137 0x0, 0x0, /* cb_status */
138 0x80, 0x2, /* cb_command */
139 0xff, 0xff, 0xff, 0xff, /* link_addr */
140 0x16, /* 0 */
141 0x8, /* 1 */
142 0x0, /* 2 */
143 0x0, /* 3 */
144 0x0, /* 4 */
145 0x80, /* 5 */
146 0xb2, /* 6 */
147 0x3, /* 7 */
148 0x1, /* 8 */
149 0x0, /* 9 */
150 0x26, /* 10 */
151 0x0, /* 11 */
152 0x60, /* 12 */
153 0x0, /* 13 */
154 0xf2, /* 14 */
155 0x48, /* 15 */
156 0x0, /* 16 */
157 0x40, /* 17 */
158 0xf3, /* 18 */
159 0x0, /* 19 */
160 0x3f, /* 20 */
161 0x5 /* 21 */
162 };
163
164 void fxp_mii_initmedia __P((struct fxp_softc *));
165 int fxp_mii_mediachange __P((struct ifnet *));
166 void fxp_mii_mediastatus __P((struct ifnet *, struct ifmediareq *));
167
168 void fxp_80c24_initmedia __P((struct fxp_softc *));
169 int fxp_80c24_mediachange __P((struct ifnet *));
170 void fxp_80c24_mediastatus __P((struct ifnet *, struct ifmediareq *));
171
172 inline void fxp_scb_wait __P((struct fxp_softc *));
173
174 void fxp_start __P((struct ifnet *));
175 int fxp_ioctl __P((struct ifnet *, u_long, caddr_t));
176 void fxp_init __P((void *));
177 void fxp_stop __P((struct fxp_softc *));
178 void fxp_watchdog __P((struct ifnet *));
179 int fxp_add_rfabuf __P((struct fxp_softc *, struct fxp_rxdesc *));
180 int fxp_mdi_read __P((struct device *, int, int));
181 void fxp_statchg __P((struct device *));
182 void fxp_mdi_write __P((struct device *, int, int, int));
183 void fxp_read_eeprom __P((struct fxp_softc *, u_int16_t *, int, int));
184 void fxp_get_info __P((struct fxp_softc *, u_int8_t *));
185 void fxp_tick __P((void *));
186 void fxp_mc_setup __P((struct fxp_softc *));
187
188 void fxp_shutdown __P((void *));
189
190 struct fxp_phytype {
191 int fp_phy; /* type of PHY, -1 for MII at the end. */
192 void (*fp_init) __P((struct fxp_softc *));
193 } fxp_phytype_table[] = {
194 { FXP_PHY_80C24, fxp_80c24_initmedia },
195 { -1, fxp_mii_initmedia },
196 };
197
198 /*
199 * Set initial transmit threshold at 64 (512 bytes). This is
200 * increased by 64 (512 bytes) at a time, to maximum of 192
201 * (1536 bytes), if an underrun occurs.
202 */
203 static int tx_threshold = 64;
204
205 /*
206 * Wait for the previous command to be accepted (but not necessarily
207 * completed).
208 */
209 inline void
210 fxp_scb_wait(sc)
211 struct fxp_softc *sc;
212 {
213 int i = 10000;
214
215 while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i)
216 DELAY(1);
217 if (i == 0)
218 printf("%s: WARNING: SCB timed out!\n", sc->sc_dev.dv_xname);
219 }
220
221 /*
222 * Finish attaching an i82557 interface. Called by bus-specific front-end.
223 */
224 void
225 fxp_attach(sc)
226 struct fxp_softc *sc;
227 {
228 u_int8_t enaddr[6];
229 struct ifnet *ifp;
230 bus_dma_segment_t seg;
231 int rseg, i, error;
232 struct fxp_phytype *fp;
233
234 /*
235 * Allocate the control data structures, and create and load the
236 * DMA map for it.
237 */
238 if ((error = bus_dmamem_alloc(sc->sc_dmat,
239 sizeof(struct fxp_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
240 0)) != 0) {
241 printf("%s: unable to allocate control data, error = %d\n",
242 sc->sc_dev.dv_xname, error);
243 goto fail_0;
244 }
245
246 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
247 sizeof(struct fxp_control_data), (caddr_t *)&sc->control_data,
248 BUS_DMA_COHERENT)) != 0) {
249 printf("%s: unable to map control data, error = %d\n",
250 sc->sc_dev.dv_xname, error);
251 goto fail_1;
252 }
253 bzero(sc->control_data, sizeof(struct fxp_control_data));
254
255 if ((error = bus_dmamap_create(sc->sc_dmat,
256 sizeof(struct fxp_control_data), 1,
257 sizeof(struct fxp_control_data), 0, 0, &sc->sc_dmamap)) != 0) {
258 printf("%s: unable to create control data DMA map, "
259 "error = %d\n", sc->sc_dev.dv_xname, error);
260 goto fail_2;
261 }
262
263 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap,
264 sc->control_data, sizeof(struct fxp_control_data), NULL,
265 0)) != 0) {
266 printf("%s: can't load control data DMA map, error = %d\n",
267 sc->sc_dev.dv_xname, error);
268 goto fail_3;
269 }
270
271 /*
272 * Create the transmit buffer DMA maps.
273 */
274 for (i = 0; i < FXP_NTXCB; i++) {
275 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
276 FXP_NTXSEG, MCLBYTES, 0, 0,
277 &sc->sc_tx_dmamaps[i])) != 0) {
278 printf("%s: unable to create tx DMA map %d, "
279 "error = %d\n", sc->sc_dev.dv_xname, i, error);
280 goto fail_4;
281 }
282 }
283
284 /*
285 * Create the receive buffer DMA maps.
286 */
287 for (i = 0; i < FXP_NRFABUFS; i++) {
288 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
289 MCLBYTES, 0, 0, &sc->sc_rx_dmamaps[i])) != 0) {
290 printf("%s: unable to create rx DMA map %d, "
291 "error = %d\n", sc->sc_dev.dv_xname, i, error);
292 goto fail_5;
293 }
294 }
295
296 /*
297 * Pre-allocate the receive buffers.
298 */
299 for (i = 0; i < FXP_NRFABUFS; i++) {
300 sc->sc_rxdescs[i].fr_dmamap = sc->sc_rx_dmamaps[i];
301 if (fxp_add_rfabuf(sc, &sc->sc_rxdescs[i]) != 0) {
302 printf("%s: unable to allocate or map rx buffer %d, "
303 "error = %d\n", sc->sc_dev.dv_xname, i, error);
304 goto fail_6;
305 }
306 }
307
308 /* Initialize MAC address and media structures. */
309 fxp_get_info(sc, enaddr);
310
311 printf("%s: Ethernet address %s, %s Mb/s\n", sc->sc_dev.dv_xname,
312 ether_sprintf(enaddr), sc->phy_10Mbps_only ? "10" : "10/100");
313
314 ifp = &sc->sc_ethercom.ec_if;
315
316 /*
317 * Get info about our media interface, and initialize it. Note
318 * the table terminates itself with a phy of -1, indicating
319 * that we're using MII.
320 */
321 for (fp = fxp_phytype_table; fp->fp_phy != -1; fp++)
322 if (fp->fp_phy == sc->phy_primary_device)
323 break;
324 (*fp->fp_init)(sc);
325
326 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
327 ifp->if_softc = sc;
328 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
329 ifp->if_ioctl = fxp_ioctl;
330 ifp->if_start = fxp_start;
331 ifp->if_watchdog = fxp_watchdog;
332
333 /*
334 * Attach the interface.
335 */
336 if_attach(ifp);
337 ether_ifattach(ifp, enaddr);
338 #if NBPFILTER > 0
339 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
340 sizeof(struct ether_header));
341 #endif
342
343 /*
344 * Let the system queue as many packets as we have TX descriptors.
345 */
346 ifp->if_snd.ifq_maxlen = FXP_NTXCB;
347
348 #if NRND > 0
349 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
350 RND_TYPE_NET, 0);
351 #endif
352
353 /*
354 * Add shutdown hook so that DMA is disabled prior to reboot. Not
355 * doing do could allow DMA to corrupt kernel memory during the
356 * reboot before the driver initializes.
357 */
358 sc->sc_sdhook = shutdownhook_establish(fxp_shutdown, sc);
359 if (sc->sc_sdhook == NULL)
360 printf("%s: WARNING: unable to establish shutdown hook\n",
361 sc->sc_dev.dv_xname);
362 return;
363
364 /*
365 * Free any resources we've allocated during the failed attach
366 * attempt. Do this in reverse order and fall though.
367 */
368 fail_6:
369 for (i = 0; i < FXP_NRFABUFS; i++) {
370 if (sc->sc_rxdescs[i].fr_mbhead != NULL) {
371 bus_dmamap_unload(sc->sc_dmat,
372 sc->sc_rxdescs[i].fr_dmamap);
373 m_freem(sc->sc_rxdescs[i].fr_mbhead);
374 }
375 }
376 fail_5:
377 for (i = 0; i < FXP_NRFABUFS; i++) {
378 if (sc->sc_rxdescs[i].fr_dmamap != NULL)
379 bus_dmamap_destroy(sc->sc_dmat,
380 sc->sc_rxdescs[i].fr_dmamap);
381 }
382 fail_4:
383 for (i = 0; i < FXP_NTXCB; i++) {
384 if (sc->sc_tx_dmamaps[i] != NULL)
385 bus_dmamap_destroy(sc->sc_dmat,
386 sc->sc_tx_dmamaps[i]);
387 }
388 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
389 fail_3:
390 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
391 fail_2:
392 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->control_data,
393 sizeof(struct fxp_control_data));
394 fail_1:
395 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
396 fail_0:
397 return;
398 }
399
400 void
401 fxp_mii_initmedia(sc)
402 struct fxp_softc *sc;
403 {
404
405 sc->sc_mii.mii_ifp = &sc->sc_ethercom.ec_if;
406 sc->sc_mii.mii_readreg = fxp_mdi_read;
407 sc->sc_mii.mii_writereg = fxp_mdi_write;
408 sc->sc_mii.mii_statchg = fxp_statchg;
409 ifmedia_init(&sc->sc_mii.mii_media, 0, fxp_mii_mediachange,
410 fxp_mii_mediastatus);
411 mii_phy_probe(&sc->sc_dev, &sc->sc_mii, 0xffffffff);
412 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
413 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
414 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
415 } else
416 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
417 }
418
419 void
420 fxp_80c24_initmedia(sc)
421 struct fxp_softc *sc;
422 {
423
424 /*
425 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
426 * doesn't have a programming interface of any sort. The
427 * media is sensed automatically based on how the link partner
428 * is configured. This is, in essence, manual configuration.
429 */
430 printf("%s: Seeq 80c24 AutoDUPLEX media interface present\n",
431 sc->sc_dev.dv_xname);
432 ifmedia_init(&sc->sc_mii.mii_media, 0, fxp_80c24_mediachange,
433 fxp_80c24_mediastatus);
434 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
435 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
436 }
437
438 /*
439 * Device shutdown routine. Called at system shutdown after sync. The
440 * main purpose of this routine is to shut off receiver DMA so that
441 * kernel memory doesn't get clobbered during warmboot.
442 */
443 void
444 fxp_shutdown(sc)
445 void *sc;
446 {
447
448 fxp_stop((struct fxp_softc *) sc);
449 }
450
451 /*
452 * Initialize the interface media.
453 */
454 void
455 fxp_get_info(sc, enaddr)
456 struct fxp_softc *sc;
457 u_int8_t *enaddr;
458 {
459 u_int16_t data, myea[3];
460
461 /*
462 * Reset to a stable state.
463 */
464 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
465 DELAY(10);
466
467 /*
468 * Get info about the primary PHY
469 */
470 fxp_read_eeprom(sc, &data, 6, 1);
471 sc->phy_primary_addr = data & 0xff;
472 sc->phy_primary_device = (data >> 8) & 0x3f;
473 sc->phy_10Mbps_only = data >> 15;
474
475 /*
476 * Read MAC address.
477 */
478 fxp_read_eeprom(sc, myea, 0, 3);
479 bcopy(myea, enaddr, ETHER_ADDR_LEN);
480 }
481
482 /*
483 * Read from the serial EEPROM. Basically, you manually shift in
484 * the read opcode (one bit at a time) and then shift in the address,
485 * and then you shift out the data (all of this one bit at a time).
486 * The word size is 16 bits, so you have to provide the address for
487 * every 16 bits of data.
488 */
489 void
490 fxp_read_eeprom(sc, data, offset, words)
491 struct fxp_softc *sc;
492 u_int16_t *data;
493 int offset;
494 int words;
495 {
496 u_int16_t reg;
497 int i, x;
498
499 for (i = 0; i < words; i++) {
500 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
501 /*
502 * Shift in read opcode.
503 */
504 for (x = 3; x > 0; x--) {
505 if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) {
506 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
507 } else {
508 reg = FXP_EEPROM_EECS;
509 }
510 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
511 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
512 reg | FXP_EEPROM_EESK);
513 DELAY(1);
514 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
515 DELAY(1);
516 }
517 /*
518 * Shift in address.
519 */
520 for (x = 6; x > 0; x--) {
521 if ((i + offset) & (1 << (x - 1))) {
522 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
523 } else {
524 reg = FXP_EEPROM_EECS;
525 }
526 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
527 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
528 reg | FXP_EEPROM_EESK);
529 DELAY(1);
530 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
531 DELAY(1);
532 }
533 reg = FXP_EEPROM_EECS;
534 data[i] = 0;
535 /*
536 * Shift out data.
537 */
538 for (x = 16; x > 0; x--) {
539 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
540 reg | FXP_EEPROM_EESK);
541 DELAY(1);
542 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) &
543 FXP_EEPROM_EEDO)
544 data[i] |= (1 << (x - 1));
545 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
546 DELAY(1);
547 }
548 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
549 DELAY(1);
550 }
551 }
552
553 /*
554 * Start packet transmission on the interface.
555 */
556 void
557 fxp_start(ifp)
558 struct ifnet *ifp;
559 {
560 struct fxp_softc *sc = ifp->if_softc;
561 struct fxp_cb_tx *txp;
562 bus_dmamap_t dmamap;
563 int old_queued;
564
565 /*
566 * See if we need to suspend xmit until the multicast filter
567 * has been reprogrammed (which can only be done at the head
568 * of the command chain).
569 */
570 if (sc->need_mcsetup || (old_queued = sc->tx_queued) >= FXP_NTXCB) {
571 ifp->if_flags |= IFF_OACTIVE;
572 return;
573 }
574
575 /*
576 * We're finished if there is nothing more to add to the list or if
577 * we're all filled up with buffers to transmit.
578 */
579 while (ifp->if_snd.ifq_head != NULL && sc->tx_queued < FXP_NTXCB) {
580 struct mbuf *mb_head;
581 int segment, error;
582
583 /*
584 * Grab a packet to transmit.
585 */
586 IF_DEQUEUE(&ifp->if_snd, mb_head);
587
588 /*
589 * Get pointer to next available tx desc.
590 */
591 txp = sc->cbl_last->cb_soft.next;
592 dmamap = txp->cb_soft.dmamap;
593
594 /*
595 * Go through each of the mbufs in the chain and initialize
596 * the transmit buffer descriptors with the physical address
597 * and size of the mbuf.
598 */
599 tbdinit:
600 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
601 mb_head, BUS_DMA_NOWAIT);
602 switch (error) {
603 case 0:
604 /* Success. */
605 break;
606
607 case EFBIG:
608 {
609 struct mbuf *mn;
610
611 /*
612 * We ran out of segments. We have to recopy this
613 * mbuf chain first. Bail out if we can't get the
614 * new buffers.
615 */
616 printf("%s: too many segments, ", sc->sc_dev.dv_xname);
617
618 MGETHDR(mn, M_DONTWAIT, MT_DATA);
619 if (mn == NULL) {
620 m_freem(mb_head);
621 printf("aborting\n");
622 goto out;
623 }
624 if (mb_head->m_pkthdr.len > MHLEN) {
625 MCLGET(mn, M_DONTWAIT);
626 if ((mn->m_flags & M_EXT) == 0) {
627 m_freem(mn);
628 m_freem(mb_head);
629 printf("aborting\n");
630 goto out;
631 }
632 }
633 m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
634 mtod(mn, caddr_t));
635 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len;
636 m_freem(mb_head);
637 mb_head = mn;
638 printf("retrying\n");
639 goto tbdinit;
640 }
641
642 default:
643 /*
644 * Some other problem; report it.
645 */
646 printf("%s: can't load mbuf chain, error = %d\n",
647 sc->sc_dev.dv_xname, error);
648 m_freem(mb_head);
649 goto out;
650 }
651
652 for (segment = 0; segment < dmamap->dm_nsegs; segment++) {
653 txp->tbd[segment].tb_addr =
654 dmamap->dm_segs[segment].ds_addr;
655 txp->tbd[segment].tb_size =
656 dmamap->dm_segs[segment].ds_len;
657 }
658
659 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
660 BUS_DMASYNC_PREWRITE);
661
662 txp->tbd_number = dmamap->dm_nsegs;
663 txp->cb_soft.mb_head = mb_head;
664 txp->cb_status = 0;
665 txp->cb_command =
666 FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S;
667 txp->tx_threshold = tx_threshold;
668 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
669 FXP_TXDESCOFF(sc, txp), FXP_TXDESCSIZE,
670 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
671
672 /*
673 * Advance the end of list forward.
674 */
675 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
676 FXP_TXDESCOFF(sc, sc->cbl_last), FXP_TXDESCSIZE,
677 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
678 sc->cbl_last->cb_command &= ~FXP_CB_COMMAND_S;
679 sc->cbl_last = txp;
680 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
681 FXP_TXDESCOFF(sc, sc->cbl_last), FXP_TXDESCSIZE,
682 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
683
684 /*
685 * Advance the beginning of the list forward if there are
686 * no other packets queued (when nothing is queued, cbl_first
687 * sits on the last TxCB that was sent out).
688 */
689 if (sc->tx_queued == 0)
690 sc->cbl_first = txp;
691
692 sc->tx_queued++;
693
694 #if NBPFILTER > 0
695 /*
696 * Pass packet to bpf if there is a listener.
697 */
698 if (ifp->if_bpf)
699 bpf_mtap(ifp->if_bpf, mb_head);
700 #endif
701 }
702
703 out:
704 /*
705 * We're finished. If we added to the list, issue a RESUME to get DMA
706 * going again if suspended.
707 */
708 if (old_queued != sc->tx_queued) {
709 fxp_scb_wait(sc);
710 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_RESUME);
711
712 /*
713 * Set a 5 second timer just in case we don't hear from the
714 * card again.
715 */
716 ifp->if_timer = 5;
717 }
718 }
719
720 /*
721 * Process interface interrupts.
722 */
723 int
724 fxp_intr(arg)
725 void *arg;
726 {
727 struct fxp_softc *sc = arg;
728 struct ifnet *ifp = &sc->sc_if;
729 u_int8_t statack;
730 int claimed = 0;
731
732 while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) {
733 claimed = 1;
734
735 /*
736 * First ACK all the interrupts in this pass.
737 */
738 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack);
739
740 /*
741 * Process receiver interrupts. If a no-resource (RNR)
742 * condition exists, get whatever packets we can and
743 * re-start the receiver.
744 */
745 if (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR)) {
746 struct fxp_rxdesc *rxd;
747 struct mbuf *m;
748 struct fxp_rfa *rfa;
749 bus_dmamap_t rxmap;
750 rcvloop:
751 rxd = sc->rfa_head;
752 rxmap = rxd->fr_dmamap;
753 m = rxd->fr_mbhead;
754 rfa = (struct fxp_rfa *)(m->m_ext.ext_buf +
755 RFA_ALIGNMENT_FUDGE);
756
757 bus_dmamap_sync(sc->sc_dmat, rxmap, 0,
758 rxmap->dm_mapsize,
759 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
760
761 if (rfa->rfa_status & FXP_RFA_STATUS_C) {
762 /*
763 * Remove first packet from the chain.
764 */
765 sc->rfa_head = rxd->fr_next;
766 rxd->fr_next = NULL;
767
768 /*
769 * Add a new buffer to the receive chain.
770 * If this fails, the old buffer is recycled
771 * instead.
772 */
773 if (fxp_add_rfabuf(sc, rxd) == 0) {
774 struct ether_header *eh;
775 u_int16_t total_len;
776
777 total_len = rfa->actual_size &
778 (MCLBYTES - 1);
779 if (total_len <
780 sizeof(struct ether_header)) {
781 m_freem(m);
782 goto rcvloop;
783 }
784 m->m_pkthdr.rcvif = ifp;
785 m->m_pkthdr.len = m->m_len = total_len;
786 eh = mtod(m, struct ether_header *);
787 #if NBPFILTER > 0
788 if (ifp->if_bpf) {
789 bpf_tap(ifp->if_bpf,
790 mtod(m, caddr_t),
791 total_len);
792 /*
793 * Only pass this packet up
794 * if it is for us.
795 */
796 if ((ifp->if_flags &
797 IFF_PROMISC) &&
798 (rfa->rfa_status &
799 FXP_RFA_STATUS_IAMATCH) &&
800 (eh->ether_dhost[0] & 1)
801 == 0) {
802 m_freem(m);
803 goto rcvloop;
804 }
805 }
806 #endif /* NBPFILTER > 0 */
807 (*ifp->if_input)(ifp, m);
808 }
809 goto rcvloop;
810 }
811 if (statack & FXP_SCB_STATACK_RNR) {
812 fxp_scb_wait(sc);
813 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
814 rxmap->dm_segs[0].ds_addr +
815 RFA_ALIGNMENT_FUDGE);
816 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND,
817 FXP_SCB_COMMAND_RU_START);
818 }
819 }
820 /*
821 * Free any finished transmit mbuf chains.
822 */
823 if (statack & FXP_SCB_STATACK_CNA) {
824 struct fxp_cb_tx *txp;
825 bus_dmamap_t txmap;
826
827 for (txp = sc->cbl_first; sc->tx_queued;
828 txp = txp->cb_soft.next) {
829 bus_dmamap_sync(sc->sc_dmat,
830 sc->sc_dmamap, FXP_TXDESCOFF(sc, txp),
831 FXP_TXDESCSIZE,
832 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
833 if ((txp->cb_status & FXP_CB_STATUS_C) == 0)
834 break;
835 if (txp->cb_soft.mb_head != NULL) {
836 txmap = txp->cb_soft.dmamap;
837 bus_dmamap_sync(sc->sc_dmat, txmap,
838 0, txmap->dm_mapsize,
839 BUS_DMASYNC_POSTWRITE);
840 bus_dmamap_unload(sc->sc_dmat, txmap);
841 m_freem(txp->cb_soft.mb_head);
842 txp->cb_soft.mb_head = NULL;
843 }
844 sc->tx_queued--;
845 }
846 sc->cbl_first = txp;
847 ifp->if_flags &= ~IFF_OACTIVE;
848 if (sc->tx_queued == 0) {
849 ifp->if_timer = 0;
850 if (sc->need_mcsetup)
851 fxp_mc_setup(sc);
852 }
853 /*
854 * Try to start more packets transmitting.
855 */
856 if (ifp->if_snd.ifq_head != NULL)
857 fxp_start(ifp);
858 }
859 }
860
861 #if NRND > 0
862 if (claimed)
863 rnd_add_uint32(&sc->rnd_source, statack);
864 #endif
865 return (claimed);
866 }
867
868 /*
869 * Update packet in/out/collision statistics. The i82557 doesn't
870 * allow you to access these counters without doing a fairly
871 * expensive DMA to get _all_ of the statistics it maintains, so
872 * we do this operation here only once per second. The statistics
873 * counters in the kernel are updated from the previous dump-stats
874 * DMA and then a new dump-stats DMA is started. The on-chip
875 * counters are zeroed when the DMA completes. If we can't start
876 * the DMA immediately, we don't wait - we just prepare to read
877 * them again next time.
878 */
879 void
880 fxp_tick(arg)
881 void *arg;
882 {
883 struct fxp_softc *sc = arg;
884 struct ifnet *ifp = &sc->sc_if;
885 struct fxp_stats *sp = &sc->control_data->fcd_stats;
886 int s = splnet();
887
888 ifp->if_opackets += sp->tx_good;
889 ifp->if_collisions += sp->tx_total_collisions;
890 if (sp->rx_good) {
891 ifp->if_ipackets += sp->rx_good;
892 sc->rx_idle_secs = 0;
893 } else {
894 sc->rx_idle_secs++;
895 }
896 ifp->if_ierrors +=
897 sp->rx_crc_errors +
898 sp->rx_alignment_errors +
899 sp->rx_rnr_errors +
900 sp->rx_overrun_errors;
901 /*
902 * If any transmit underruns occured, bump up the transmit
903 * threshold by another 512 bytes (64 * 8).
904 */
905 if (sp->tx_underruns) {
906 ifp->if_oerrors += sp->tx_underruns;
907 if (tx_threshold < 192)
908 tx_threshold += 64;
909 }
910
911 /*
912 * If we haven't received any packets in FXP_MAC_RX_IDLE seconds,
913 * then assume the receiver has locked up and attempt to clear
914 * the condition by reprogramming the multicast filter. This is
915 * a work-around for a bug in the 82557 where the receiver locks
916 * up if it gets certain types of garbage in the syncronization
917 * bits prior to the packet header. This bug is supposed to only
918 * occur in 10Mbps mode, but has been seen to occur in 100Mbps
919 * mode as well (perhaps due to a 10/100 speed transition).
920 */
921 if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) {
922 sc->rx_idle_secs = 0;
923 fxp_mc_setup(sc);
924 }
925 /*
926 * If there is no pending command, start another stats
927 * dump. Otherwise punt for now.
928 */
929 if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) {
930 /*
931 * Start another stats dump.
932 */
933 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND,
934 FXP_SCB_COMMAND_CU_DUMPRESET);
935 } else {
936 /*
937 * A previous command is still waiting to be accepted.
938 * Just zero our copy of the stats and wait for the
939 * next timer event to update them.
940 */
941 sp->tx_good = 0;
942 sp->tx_underruns = 0;
943 sp->tx_total_collisions = 0;
944
945 sp->rx_good = 0;
946 sp->rx_crc_errors = 0;
947 sp->rx_alignment_errors = 0;
948 sp->rx_rnr_errors = 0;
949 sp->rx_overrun_errors = 0;
950 }
951
952 /* Tick the MII clock. */
953 mii_tick(&sc->sc_mii);
954 splx(s);
955
956 /*
957 * Schedule another timeout one second from now.
958 */
959 timeout(fxp_tick, sc, hz);
960 }
961
962 /*
963 * Stop the interface. Cancels the statistics updater and resets
964 * the interface.
965 */
966 void
967 fxp_stop(sc)
968 struct fxp_softc *sc;
969 {
970 struct ifnet *ifp = &sc->sc_if;
971 struct fxp_rxdesc *rxd;
972 struct fxp_cb_tx *txp;
973 int i;
974
975 /*
976 * Cancel stats updater.
977 */
978 untimeout(fxp_tick, sc);
979
980 /*
981 * Issue software reset
982 */
983 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
984 DELAY(10);
985
986 /*
987 * Release any xmit buffers.
988 */
989 for (txp = sc->control_data->fcd_txcbs, i = 0; i < FXP_NTXCB; i++) {
990 if (txp[i].cb_soft.mb_head != NULL) {
991 bus_dmamap_unload(sc->sc_dmat, txp[i].cb_soft.dmamap);
992 m_freem(txp[i].cb_soft.mb_head);
993 txp[i].cb_soft.mb_head = NULL;
994 }
995 }
996 sc->tx_queued = 0;
997
998 /*
999 * Free all the receive buffers then reallocate/reinitialize
1000 */
1001 sc->rfa_head = NULL;
1002 sc->rfa_tail = NULL;
1003 for (i = 0; i < FXP_NRFABUFS; i++) {
1004 rxd = &sc->sc_rxdescs[i];
1005 if (rxd->fr_mbhead != NULL) {
1006 bus_dmamap_unload(sc->sc_dmat, rxd->fr_dmamap);
1007 m_freem(rxd->fr_mbhead);
1008 rxd->fr_mbhead = NULL;
1009 }
1010 if (fxp_add_rfabuf(sc, rxd) != 0) {
1011 /*
1012 * This "can't happen" - we're at splnet()
1013 * and we just freed the buffer we need
1014 * above.
1015 */
1016 panic("fxp_stop: no buffers!");
1017 }
1018 }
1019
1020 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1021 ifp->if_timer = 0;
1022 }
1023
1024 /*
1025 * Watchdog/transmission transmit timeout handler. Called when a
1026 * transmission is started on the interface, but no interrupt is
1027 * received before the timeout. This usually indicates that the
1028 * card has wedged for some reason.
1029 */
1030 void
1031 fxp_watchdog(ifp)
1032 struct ifnet *ifp;
1033 {
1034 struct fxp_softc *sc = ifp->if_softc;
1035
1036 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1037 ifp->if_oerrors++;
1038
1039 fxp_init(sc);
1040 }
1041
1042 void
1043 fxp_init(xsc)
1044 void *xsc;
1045 {
1046 struct fxp_softc *sc = xsc;
1047 struct ifnet *ifp = &sc->sc_if;
1048 struct fxp_cb_config *cbp;
1049 struct fxp_cb_ias *cb_ias;
1050 struct fxp_cb_tx *txp;
1051 int i, s, prm, error;
1052
1053 s = splnet();
1054 /*
1055 * Cancel any pending I/O
1056 */
1057 fxp_stop(sc);
1058
1059 prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0;
1060 sc->promisc_mode = prm;
1061
1062 /*
1063 * Initialize base of CBL and RFA memory. Loading with zero
1064 * sets it up for regular linear addressing.
1065 */
1066 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0);
1067 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_BASE);
1068
1069 fxp_scb_wait(sc);
1070 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_BASE);
1071
1072 /*
1073 * Initialize base of dump-stats buffer.
1074 */
1075 fxp_scb_wait(sc);
1076 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
1077 sc->sc_cddma + FXP_CDOFF(fcd_stats));
1078 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_DUMP_ADR);
1079
1080 /*
1081 * We temporarily use memory that contains the TxCB list to
1082 * construct the config CB. The TxCB list memory is rebuilt
1083 * later.
1084 */
1085 cbp = (struct fxp_cb_config *) sc->control_data->fcd_txcbs;
1086
1087 /*
1088 * This bcopy is kind of disgusting, but there are a bunch of must be
1089 * zero and must be one bits in this structure and this is the easiest
1090 * way to initialize them all to proper values.
1091 */
1092 bcopy(fxp_cb_config_template, (void *)&cbp->cb_status,
1093 sizeof(fxp_cb_config_template));
1094
1095 cbp->cb_status = 0;
1096 cbp->cb_command = FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL;
1097 cbp->link_addr = -1; /* (no) next command */
1098 cbp->byte_count = 22; /* (22) bytes to config */
1099 cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */
1100 cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */
1101 cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */
1102 cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */
1103 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */
1104 cbp->dma_bce = 0; /* (disable) dma max counters */
1105 cbp->late_scb = 0; /* (don't) defer SCB update */
1106 cbp->tno_int = 0; /* (disable) tx not okay interrupt */
1107 cbp->ci_int = 0; /* interrupt on CU not active */
1108 cbp->save_bf = prm; /* save bad frames */
1109 cbp->disc_short_rx = !prm; /* discard short packets */
1110 cbp->underrun_retry = 1; /* retry mode (1) on DMA underrun */
1111 cbp->mediatype = !sc->phy_10Mbps_only; /* interface mode */
1112 cbp->nsai = 1; /* (don't) disable source addr insert */
1113 cbp->preamble_length = 2; /* (7 byte) preamble */
1114 cbp->loopback = 0; /* (don't) loopback */
1115 cbp->linear_priority = 0; /* (normal CSMA/CD operation) */
1116 cbp->linear_pri_mode = 0; /* (wait after xmit only) */
1117 cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */
1118 cbp->promiscuous = prm; /* promiscuous mode */
1119 cbp->bcast_disable = 0; /* (don't) disable broadcasts */
1120 cbp->crscdt = 0; /* (CRS only) */
1121 cbp->stripping = !prm; /* truncate rx packet to byte count */
1122 cbp->padding = 1; /* (do) pad short tx packets */
1123 cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */
1124 cbp->force_fdx = 0; /* (don't) force full duplex */
1125 cbp->fdx_pin_en = 1; /* (enable) FDX# pin */
1126 cbp->multi_ia = 0; /* (don't) accept multiple IAs */
1127 cbp->mc_all = sc->all_mcasts;/* accept all multicasts */
1128
1129 /* Load the DMA map */
1130 error = bus_dmamap_load(sc->sc_dmat, sc->sc_tx_dmamaps[0], cbp,
1131 sizeof(struct fxp_cb_config), NULL, BUS_DMA_NOWAIT);
1132 if (error) {
1133 printf("%s: can't load config buffer, error = %d\n",
1134 sc->sc_dev.dv_xname, error);
1135 return;
1136 }
1137 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_dmamaps[0],
1138 0, sizeof(struct fxp_cb_config),
1139 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1140
1141 /*
1142 * Start the config command/DMA.
1143 */
1144 fxp_scb_wait(sc);
1145 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
1146 sc->sc_cddma + FXP_CDOFF(fcd_txcbs[0].cb_status));
1147 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START);
1148 /* ...and wait for it to complete. */
1149 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_dmamaps[0],
1150 0, sizeof(struct fxp_cb_config),
1151 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1152 while (!(cbp->cb_status & FXP_CB_STATUS_C))
1153 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_dmamaps[0],
1154 0, sizeof(struct fxp_cb_config),
1155 BUS_DMASYNC_POSTREAD);
1156
1157 /* Unload the DMA map */
1158 bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_dmamaps[0]);
1159
1160 /*
1161 * Now initialize the station address. Temporarily use the TxCB
1162 * memory area like we did above for the config CB.
1163 */
1164 cb_ias = (struct fxp_cb_ias *) sc->control_data->fcd_txcbs;
1165 cb_ias->cb_status = 0;
1166 cb_ias->cb_command = FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL;
1167 cb_ias->link_addr = -1;
1168 bcopy(LLADDR(ifp->if_sadl), (void *)cb_ias->macaddr, 6);
1169
1170 /* Load the DMA map */
1171 error = bus_dmamap_load(sc->sc_dmat, sc->sc_tx_dmamaps[0], cbp,
1172 sizeof(struct fxp_cb_ias), NULL, BUS_DMA_NOWAIT);
1173 if (error) {
1174 printf("%s: can't load address buffer, error = %d\n",
1175 sc->sc_dev .dv_xname, error);
1176 return;
1177 }
1178 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_dmamaps[0],
1179 0, sizeof(struct fxp_cb_ias),
1180 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1181
1182 /*
1183 * Start the IAS (Individual Address Setup) command/DMA.
1184 */
1185 fxp_scb_wait(sc);
1186 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START);
1187 /* ...and wait for it to complete. */
1188 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_dmamaps[0],
1189 0, sizeof(struct fxp_cb_ias),
1190 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1191 while (!(cb_ias->cb_status & FXP_CB_STATUS_C))
1192 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_dmamaps[0],
1193 0, sizeof(struct fxp_cb_ias),
1194 BUS_DMASYNC_POSTREAD);
1195
1196 /* Unload the DMA map */
1197 bus_dmamap_unload(sc->sc_dmat, sc->sc_tx_dmamaps[0]);
1198
1199 /*
1200 * Initialize transmit control block (TxCB) list.
1201 */
1202
1203 txp = sc->control_data->fcd_txcbs;
1204 bzero(txp, sizeof(sc->control_data->fcd_txcbs));
1205 for (i = 0; i < FXP_NTXCB; i++) {
1206 txp[i].cb_status = FXP_CB_STATUS_C | FXP_CB_STATUS_OK;
1207 txp[i].cb_command = FXP_CB_COMMAND_NOP;
1208 txp[i].link_addr = sc->sc_cddma +
1209 FXP_CDOFF(fcd_txcbs[(i + 1) & FXP_TXCB_MASK].cb_status);
1210 txp[i].tbd_array_addr = sc->sc_cddma +
1211 FXP_CDOFF(fcd_txcbs[i].tbd[0]);
1212 txp[i].cb_soft.dmamap = sc->sc_tx_dmamaps[i];
1213 txp[i].cb_soft.next = &txp[(i + 1) & FXP_TXCB_MASK];
1214 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1215 FXP_TXDESCOFF(sc, &txp[i]), FXP_TXDESCSIZE,
1216 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1217 }
1218 /*
1219 * Set the suspend flag on the first TxCB and start the control
1220 * unit. It will execute the NOP and then suspend.
1221 */
1222 txp->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S;
1223 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1224 FXP_TXDESCOFF(sc, txp), FXP_TXDESCSIZE,
1225 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1226
1227 sc->cbl_first = sc->cbl_last = txp;
1228 sc->tx_queued = 1;
1229
1230 fxp_scb_wait(sc);
1231 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START);
1232
1233 /*
1234 * Initialize receiver buffer area - RFA.
1235 */
1236 fxp_scb_wait(sc);
1237 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
1238 sc->rfa_head->fr_dmamap->dm_segs[0].ds_addr + RFA_ALIGNMENT_FUDGE);
1239 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_START);
1240
1241 /*
1242 * Set current media.
1243 */
1244 mii_mediachg(&sc->sc_mii);
1245
1246 ifp->if_flags |= IFF_RUNNING;
1247 ifp->if_flags &= ~IFF_OACTIVE;
1248 splx(s);
1249
1250 /*
1251 * Start stats updater.
1252 */
1253 timeout(fxp_tick, sc, hz);
1254 }
1255
1256 /*
1257 * Change media according to request.
1258 */
1259 int
1260 fxp_mii_mediachange(ifp)
1261 struct ifnet *ifp;
1262 {
1263 struct fxp_softc *sc = ifp->if_softc;
1264
1265 if (ifp->if_flags & IFF_UP)
1266 mii_mediachg(&sc->sc_mii);
1267 return (0);
1268 }
1269
1270 /*
1271 * Notify the world which media we're using.
1272 */
1273 void
1274 fxp_mii_mediastatus(ifp, ifmr)
1275 struct ifnet *ifp;
1276 struct ifmediareq *ifmr;
1277 {
1278 struct fxp_softc *sc = ifp->if_softc;
1279
1280 mii_pollstat(&sc->sc_mii);
1281 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1282 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1283 }
1284
1285 int
1286 fxp_80c24_mediachange(ifp)
1287 struct ifnet *ifp;
1288 {
1289
1290 /* Nothing to do here. */
1291 return (0);
1292 }
1293
1294 void
1295 fxp_80c24_mediastatus(ifp, ifmr)
1296 struct ifnet *ifp;
1297 struct ifmediareq *ifmr;
1298 {
1299 struct fxp_softc *sc = ifp->if_softc;
1300
1301 /*
1302 * Media is currently-selected media. We cannot determine
1303 * the link status.
1304 */
1305 ifmr->ifm_status = 0;
1306 ifmr->ifm_active = sc->sc_mii.mii_media.ifm_cur->ifm_media;
1307 }
1308
1309 /*
1310 * Add a buffer to the end of the RFA buffer list.
1311 * Return 0 if successful, 1 for failure. A failure results in
1312 * adding the 'oldm' (if non-NULL) on to the end of the list -
1313 * tossing out it's old contents and recycling it.
1314 * The RFA struct is stuck at the beginning of mbuf cluster and the
1315 * data pointer is fixed up to point just past it.
1316 */
1317 int
1318 fxp_add_rfabuf(sc, rxd)
1319 struct fxp_softc *sc;
1320 struct fxp_rxdesc *rxd;
1321 {
1322 struct mbuf *m, *oldm;
1323 struct fxp_rfa *rfa, *p_rfa;
1324 bus_dmamap_t rxmap;
1325 u_int32_t v;
1326 int error, rval = 0;
1327
1328 oldm = rxd->fr_mbhead;
1329 rxmap = rxd->fr_dmamap;
1330
1331 MGETHDR(m, M_DONTWAIT, MT_DATA);
1332 if (m != NULL) {
1333 MCLGET(m, M_DONTWAIT);
1334 if ((m->m_flags & M_EXT) == 0) {
1335 m_freem(m);
1336 if (oldm == NULL)
1337 return 1;
1338 m = oldm;
1339 m->m_data = m->m_ext.ext_buf;
1340 rval = 1;
1341 }
1342 } else {
1343 if (oldm == NULL)
1344 return 1;
1345 m = oldm;
1346 m->m_data = m->m_ext.ext_buf;
1347 rval = 1;
1348 }
1349
1350 rxd->fr_mbhead = m;
1351
1352 /*
1353 * Setup the DMA map for this receive buffer.
1354 */
1355 if (m != oldm) {
1356 if (oldm != NULL)
1357 bus_dmamap_unload(sc->sc_dmat, rxmap);
1358 error = bus_dmamap_load(sc->sc_dmat, rxmap,
1359 m->m_ext.ext_buf, MCLBYTES, NULL, BUS_DMA_NOWAIT);
1360 if (error) {
1361 printf("%s: can't load rx buffer, error = %d\n",
1362 sc->sc_dev.dv_xname, error);
1363 panic("fxp_add_rfabuf"); /* XXX */
1364 }
1365 }
1366
1367 /*
1368 * Move the data pointer up so that the incoming data packet
1369 * will be 32-bit aligned.
1370 */
1371 m->m_data += RFA_ALIGNMENT_FUDGE;
1372
1373 /*
1374 * Get a pointer to the base of the mbuf cluster and move
1375 * data start past the RFA descriptor.
1376 */
1377 rfa = mtod(m, struct fxp_rfa *);
1378 m->m_data += sizeof(struct fxp_rfa);
1379 rfa->size = MCLBYTES - sizeof(struct fxp_rfa) - RFA_ALIGNMENT_FUDGE;
1380
1381 /*
1382 * Initialize the rest of the RFA.
1383 */
1384 rfa->rfa_status = 0;
1385 rfa->rfa_control = FXP_RFA_CONTROL_EL;
1386 rfa->actual_size = 0;
1387
1388 /*
1389 * Note that since the RFA is misaligned, we cannot store values
1390 * directly. Instead, we must copy.
1391 */
1392 v = -1;
1393 memcpy((void *)&rfa->link_addr, &v, sizeof(v));
1394 memcpy((void *)&rfa->rbd_addr, &v, sizeof(v));
1395
1396 /*
1397 * If there are other buffers already on the list, attach this
1398 * one to the end by fixing up the tail to point to this one.
1399 */
1400 if (sc->rfa_head != NULL) {
1401 p_rfa = (struct fxp_rfa *)
1402 (sc->rfa_tail->fr_mbhead->m_ext.ext_buf +
1403 RFA_ALIGNMENT_FUDGE);
1404 sc->rfa_tail->fr_next = rxd;
1405 v = rxmap->dm_segs[0].ds_addr + RFA_ALIGNMENT_FUDGE;
1406 memcpy((void *)&p_rfa->link_addr, &v, sizeof(v));
1407 p_rfa->rfa_control &= ~FXP_RFA_CONTROL_EL;
1408 } else {
1409 sc->rfa_head = rxd;
1410 }
1411 sc->rfa_tail = rxd;
1412
1413 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1414 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1415
1416 return (rval);
1417 }
1418
1419 volatile int
1420 fxp_mdi_read(self, phy, reg)
1421 struct device *self;
1422 int phy;
1423 int reg;
1424 {
1425 struct fxp_softc *sc = (struct fxp_softc *)self;
1426 int count = 10000;
1427 int value;
1428
1429 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
1430 (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21));
1431
1432 while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0
1433 && count--)
1434 DELAY(10);
1435
1436 if (count <= 0)
1437 printf("%s: fxp_mdi_read: timed out\n", sc->sc_dev.dv_xname);
1438
1439 return (value & 0xffff);
1440 }
1441
1442 void
1443 fxp_statchg(self)
1444 struct device *self;
1445 {
1446
1447 /* XXX Update ifp->if_baudrate */
1448 }
1449
1450 void
1451 fxp_mdi_write(self, phy, reg, value)
1452 struct device *self;
1453 int phy;
1454 int reg;
1455 int value;
1456 {
1457 struct fxp_softc *sc = (struct fxp_softc *)self;
1458 int count = 10000;
1459
1460 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
1461 (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) |
1462 (value & 0xffff));
1463
1464 while((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 &&
1465 count--)
1466 DELAY(10);
1467
1468 if (count <= 0)
1469 printf("%s: fxp_mdi_write: timed out\n", sc->sc_dev.dv_xname);
1470 }
1471
1472 int
1473 fxp_ioctl(ifp, command, data)
1474 struct ifnet *ifp;
1475 u_long command;
1476 caddr_t data;
1477 {
1478 struct fxp_softc *sc = ifp->if_softc;
1479 struct ifreq *ifr = (struct ifreq *)data;
1480 struct ifaddr *ifa = (struct ifaddr *)data;
1481 int s, error = 0;
1482
1483 s = splnet();
1484
1485 switch (command) {
1486 case SIOCSIFADDR:
1487 ifp->if_flags |= IFF_UP;
1488
1489 switch (ifa->ifa_addr->sa_family) {
1490 #ifdef INET
1491 case AF_INET:
1492 fxp_init(sc);
1493 arp_ifinit(ifp, ifa);
1494 break;
1495 #endif
1496 #ifdef NS
1497 case AF_NS:
1498 {
1499 register struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
1500
1501 if (ns_nullhost(*ina))
1502 ina->x_host = *(union ns_host *)
1503 LLADDR(ifp->if_sadl);
1504 else
1505 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
1506 ifp->if_addrlen);
1507 /* Set new address. */
1508 fxp_init(sc);
1509 break;
1510 }
1511 #endif
1512 default:
1513 fxp_init(sc);
1514 break;
1515 }
1516 break;
1517
1518 case SIOCSIFMTU:
1519 if (ifr->ifr_mtu > ETHERMTU)
1520 error = EINVAL;
1521 else
1522 ifp->if_mtu = ifr->ifr_mtu;
1523 break;
1524
1525 case SIOCSIFFLAGS:
1526 sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0;
1527
1528 /*
1529 * If interface is marked up and not running, then start it.
1530 * If it is marked down and running, stop it.
1531 * XXX If it's up then re-initialize it. This is so flags
1532 * such as IFF_PROMISC are handled.
1533 */
1534 if (ifp->if_flags & IFF_UP) {
1535 fxp_init(sc);
1536 } else {
1537 if (ifp->if_flags & IFF_RUNNING)
1538 fxp_stop(sc);
1539 }
1540 break;
1541
1542 case SIOCADDMULTI:
1543 case SIOCDELMULTI:
1544 sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0;
1545 error = (command == SIOCADDMULTI) ?
1546 ether_addmulti(ifr, &sc->sc_ethercom) :
1547 ether_delmulti(ifr, &sc->sc_ethercom);
1548
1549 if (error == ENETRESET) {
1550 /*
1551 * Multicast list has changed; set the hardware
1552 * filter accordingly.
1553 */
1554 if (!sc->all_mcasts)
1555 fxp_mc_setup(sc);
1556 /*
1557 * fxp_mc_setup() can turn on all_mcasts if we run
1558 * out of space, so check it again rather than else {}.
1559 */
1560 if (sc->all_mcasts)
1561 fxp_init(sc);
1562 error = 0;
1563 }
1564 break;
1565
1566 case SIOCSIFMEDIA:
1567 case SIOCGIFMEDIA:
1568 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1569 break;
1570
1571 default:
1572 error = EINVAL;
1573 }
1574 (void) splx(s);
1575 return (error);
1576 }
1577
1578 /*
1579 * Program the multicast filter.
1580 *
1581 * We have an artificial restriction that the multicast setup command
1582 * must be the first command in the chain, so we take steps to ensure
1583 * that. By requiring this, it allows us to keep the performance of
1584 * the pre-initialized command ring (esp. link pointers) by not actually
1585 * inserting the mcsetup command in the ring - i.e. it's link pointer
1586 * points to the TxCB ring, but the mcsetup descriptor itself is not part
1587 * of it. We then can do 'CU_START' on the mcsetup descriptor and have it
1588 * lead into the regular TxCB ring when it completes.
1589 *
1590 * This function must be called at splnet.
1591 */
1592 void
1593 fxp_mc_setup(sc)
1594 struct fxp_softc *sc;
1595 {
1596 struct fxp_cb_mcs *mcsp = &sc->control_data->fcd_mcscb;
1597 struct ifnet *ifp = &sc->sc_if;
1598 struct ethercom *ec = &sc->sc_ethercom;
1599 struct ether_multi *enm;
1600 struct ether_multistep step;
1601 int nmcasts;
1602
1603 if (sc->tx_queued) {
1604 sc->need_mcsetup = 1;
1605 return;
1606 }
1607 sc->need_mcsetup = 0;
1608
1609 /*
1610 * Initialize multicast setup descriptor.
1611 */
1612 mcsp->cb_soft.next = sc->control_data->fcd_txcbs;
1613 mcsp->cb_soft.mb_head = NULL;
1614 mcsp->cb_soft.dmamap = NULL;
1615 mcsp->cb_status = 0;
1616 mcsp->cb_command = FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_S;
1617 mcsp->link_addr = sc->sc_cddma + FXP_CDOFF(fcd_txcbs[0].cb_status);
1618
1619 nmcasts = 0;
1620 if (!sc->all_mcasts) {
1621 ETHER_FIRST_MULTI(step, ec, enm);
1622 while (enm != NULL) {
1623 /*
1624 * Check for too many multicast addresses or if we're
1625 * listening to a range. Either way, we simply have
1626 * to accept all multicasts.
1627 */
1628 if (nmcasts >= MAXMCADDR ||
1629 bcmp(enm->enm_addrlo, enm->enm_addrhi,
1630 ETHER_ADDR_LEN) != 0) {
1631 sc->all_mcasts = 1;
1632 nmcasts = 0;
1633 break;
1634 }
1635 bcopy(enm->enm_addrlo,
1636 (void *)
1637 &sc->control_data->fcd_mcscb.mc_addr[nmcasts][0],
1638 ETHER_ADDR_LEN);
1639 nmcasts++;
1640 ETHER_NEXT_MULTI(step, enm);
1641 }
1642 }
1643 mcsp->mc_cnt = nmcasts * 6;
1644 sc->cbl_first = sc->cbl_last = (struct fxp_cb_tx *) mcsp;
1645 sc->tx_queued = 1;
1646
1647 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1648 FXP_CDOFF(fcd_mcscb.cb_status), FXP_MCSDESCSIZE,
1649 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1650
1651 /*
1652 * Wait until command unit is not active. This should never
1653 * be the case when nothing is queued, but make sure anyway.
1654 */
1655 while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) ==
1656 FXP_SCB_CUS_ACTIVE) ;
1657
1658 /*
1659 * Start the multicast setup command.
1660 */
1661 fxp_scb_wait(sc);
1662 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
1663 sc->sc_cddma + FXP_CDOFF(fcd_mcscb.cb_status));
1664 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START);
1665
1666 ifp->if_timer = 5;
1667 return;
1668 }
1669