gem.c revision 1.77.6.2
1/*	$NetBSD: gem.c,v 1.77.6.2 2008/12/13 01:14:14 haad Exp $ */
2
3/*
4 *
5 * Copyright (C) 2001 Eduardo Horvath.
6 * Copyright (c) 2001-2003 Thomas Moestl
7 * All rights reserved.
8 *
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 */
32
33/*
34 * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers
35 * See `GEM Gigabit Ethernet ASIC Specification'
36 *   http://www.sun.com/processors/manuals/ge.pdf
37 */
38
39#include <sys/cdefs.h>
40__KERNEL_RCSID(0, "$NetBSD: gem.c,v 1.77.6.2 2008/12/13 01:14:14 haad Exp $");
41
42#include "opt_inet.h"
43#include "bpfilter.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/callout.h>
48#include <sys/mbuf.h>
49#include <sys/syslog.h>
50#include <sys/malloc.h>
51#include <sys/kernel.h>
52#include <sys/socket.h>
53#include <sys/ioctl.h>
54#include <sys/errno.h>
55#include <sys/device.h>
56
57#include <machine/endian.h>
58
59#include <uvm/uvm_extern.h>
60
61#include <net/if.h>
62#include <net/if_dl.h>
63#include <net/if_media.h>
64#include <net/if_ether.h>
65
66#ifdef INET
67#include <netinet/in.h>
68#include <netinet/in_systm.h>
69#include <netinet/in_var.h>
70#include <netinet/ip.h>
71#include <netinet/tcp.h>
72#include <netinet/udp.h>
73#endif
74
75#if NBPFILTER > 0
76#include <net/bpf.h>
77#endif
78
79#include <sys/bus.h>
80#include <sys/intr.h>
81
82#include <dev/mii/mii.h>
83#include <dev/mii/miivar.h>
84#include <dev/mii/mii_bitbang.h>
85
86#include <dev/ic/gemreg.h>
87#include <dev/ic/gemvar.h>
88
89#define TRIES	10000
90
91static void	gem_start(struct ifnet *);
92static void	gem_stop(struct ifnet *, int);
93int		gem_ioctl(struct ifnet *, u_long, void *);
94void		gem_tick(void *);
95void		gem_watchdog(struct ifnet *);
96void		gem_shutdown(void *);
97void		gem_pcs_start(struct gem_softc *sc);
98void		gem_pcs_stop(struct gem_softc *sc, int);
99int		gem_init(struct ifnet *);
100void		gem_init_regs(struct gem_softc *sc);
101static int	gem_ringsize(int sz);
102static int	gem_meminit(struct gem_softc *);
103void		gem_mifinit(struct gem_softc *);
104static int	gem_bitwait(struct gem_softc *sc, bus_space_handle_t, int,
105		    u_int32_t, u_int32_t);
106void		gem_reset(struct gem_softc *);
107int		gem_reset_rx(struct gem_softc *sc);
108static void	gem_reset_rxdma(struct gem_softc *sc);
109static void	gem_rx_common(struct gem_softc *sc);
110int		gem_reset_tx(struct gem_softc *sc);
111int		gem_disable_rx(struct gem_softc *sc);
112int		gem_disable_tx(struct gem_softc *sc);
113static void	gem_rxdrain(struct gem_softc *sc);
114int		gem_add_rxbuf(struct gem_softc *sc, int idx);
115void		gem_setladrf(struct gem_softc *);
116
117/* MII methods & callbacks */
118static int	gem_mii_readreg(struct device *, int, int);
119static void	gem_mii_writereg(struct device *, int, int, int);
120static void	gem_mii_statchg(struct device *);
121
122static int	gem_ifflags_cb(struct ethercom *);
123
124void		gem_statuschange(struct gem_softc *);
125
126int		gem_ser_mediachange(struct ifnet *);
127void		gem_ser_mediastatus(struct ifnet *, struct ifmediareq *);
128
129struct mbuf	*gem_get(struct gem_softc *, int, int);
130int		gem_put(struct gem_softc *, int, struct mbuf *);
131void		gem_read(struct gem_softc *, int, int);
132int		gem_pint(struct gem_softc *);
133int		gem_eint(struct gem_softc *, u_int);
134int		gem_rint(struct gem_softc *);
135int		gem_tint(struct gem_softc *);
136void		gem_power(int, void *);
137
138#ifdef GEM_DEBUG
139static void gem_txsoft_print(const struct gem_softc *, int, int);
140#define	DPRINTF(sc, x)	if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \
141				printf x
142#else
143#define	DPRINTF(sc, x)	/* nothing */
144#endif
145
146#define ETHER_MIN_TX (ETHERMIN + sizeof(struct ether_header))
147
148
149/*
150 * gem_attach:
151 *
152 *	Attach a Gem interface to the system.
153 */
154void
155gem_attach(sc, enaddr)
156	struct gem_softc *sc;
157	const uint8_t *enaddr;
158{
159	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
160	struct mii_data *mii = &sc->sc_mii;
161	bus_space_tag_t t = sc->sc_bustag;
162	bus_space_handle_t h = sc->sc_h1;
163	struct ifmedia_entry *ifm;
164	int i, error;
165	u_int32_t v;
166	char *nullbuf;
167
168	/* Make sure the chip is stopped. */
169	ifp->if_softc = sc;
170	gem_reset(sc);
171
172	/*
173	 * Allocate the control data structures, and create and load the
174	 * DMA map for it. gem_control_data is 9216 bytes, we have space for
175	 * the padding buffer in the bus_dmamem_alloc()'d memory.
176	 */
177	if ((error = bus_dmamem_alloc(sc->sc_dmatag,
178	    sizeof(struct gem_control_data) + ETHER_MIN_TX, PAGE_SIZE,
179	    0, &sc->sc_cdseg, 1, &sc->sc_cdnseg, 0)) != 0) {
180		aprint_error_dev(&sc->sc_dev,
181		   "unable to allocate control data, error = %d\n",
182		    error);
183		goto fail_0;
184	}
185
186	/* XXX should map this in with correct endianness */
187	if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg,
188	    sizeof(struct gem_control_data), (void **)&sc->sc_control_data,
189	    BUS_DMA_COHERENT)) != 0) {
190		aprint_error_dev(&sc->sc_dev, "unable to map control data, error = %d\n",
191		    error);
192		goto fail_1;
193	}
194
195	nullbuf =
196	    (char *)sc->sc_control_data + sizeof(struct gem_control_data);
197
198	if ((error = bus_dmamap_create(sc->sc_dmatag,
199	    sizeof(struct gem_control_data), 1,
200	    sizeof(struct gem_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
201		aprint_error_dev(&sc->sc_dev, "unable to create control data DMA map, "
202		    "error = %d\n", error);
203		goto fail_2;
204	}
205
206	if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap,
207	    sc->sc_control_data, sizeof(struct gem_control_data), NULL,
208	    0)) != 0) {
209		aprint_error_dev(&sc->sc_dev,
210		    "unable to load control data DMA map, error = %d\n",
211		    error);
212		goto fail_3;
213	}
214
215	memset(nullbuf, 0, ETHER_MIN_TX);
216	if ((error = bus_dmamap_create(sc->sc_dmatag,
217	    ETHER_MIN_TX, 1, ETHER_MIN_TX, 0, 0, &sc->sc_nulldmamap)) != 0) {
218		aprint_error_dev(&sc->sc_dev, "unable to create padding DMA map, "
219		    "error = %d\n", error);
220		goto fail_4;
221	}
222
223	if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_nulldmamap,
224	    nullbuf, ETHER_MIN_TX, NULL, 0)) != 0) {
225		aprint_error_dev(&sc->sc_dev,
226		    "unable to load padding DMA map, error = %d\n",
227		    error);
228		goto fail_5;
229	}
230
231	bus_dmamap_sync(sc->sc_dmatag, sc->sc_nulldmamap, 0, ETHER_MIN_TX,
232	    BUS_DMASYNC_PREWRITE);
233
234	/*
235	 * Initialize the transmit job descriptors.
236	 */
237	SIMPLEQ_INIT(&sc->sc_txfreeq);
238	SIMPLEQ_INIT(&sc->sc_txdirtyq);
239
240	/*
241	 * Create the transmit buffer DMA maps.
242	 */
243	for (i = 0; i < GEM_TXQUEUELEN; i++) {
244		struct gem_txsoft *txs;
245
246		txs = &sc->sc_txsoft[i];
247		txs->txs_mbuf = NULL;
248		if ((error = bus_dmamap_create(sc->sc_dmatag,
249		    ETHER_MAX_LEN_JUMBO, GEM_NTXSEGS,
250		    ETHER_MAX_LEN_JUMBO, 0, 0,
251		    &txs->txs_dmamap)) != 0) {
252			aprint_error_dev(&sc->sc_dev, "unable to create tx DMA map %d, "
253			    "error = %d\n", i, error);
254			goto fail_6;
255		}
256		SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
257	}
258
259	/*
260	 * Create the receive buffer DMA maps.
261	 */
262	for (i = 0; i < GEM_NRXDESC; i++) {
263		if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,
264		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
265			aprint_error_dev(&sc->sc_dev, "unable to create rx DMA map %d, "
266			    "error = %d\n", i, error);
267			goto fail_7;
268		}
269		sc->sc_rxsoft[i].rxs_mbuf = NULL;
270	}
271
272	/* Initialize ifmedia structures and MII info */
273	mii->mii_ifp = ifp;
274	mii->mii_readreg = gem_mii_readreg;
275	mii->mii_writereg = gem_mii_writereg;
276	mii->mii_statchg = gem_mii_statchg;
277
278	sc->sc_ethercom.ec_mii = mii;
279
280	/*
281	 * Initialization based  on `GEM Gigabit Ethernet ASIC Specification'
282	 * Section 3.2.1 `Initialization Sequence'.
283	 * However, we can't assume SERDES or Serialink if neither
284	 * GEM_MIF_CONFIG_MDI0 nor GEM_MIF_CONFIG_MDI1 are set
285	 * being set, as both are set on Sun X1141A (with SERDES).  So,
286	 * we rely on our bus attachment setting GEM_SERDES or GEM_SERIAL.
287	 * Also, for Apple variants with 2 PHY's, we prefer the external
288	 * PHY over the internal PHY.
289	 */
290	gem_mifinit(sc);
291
292	if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0) {
293		ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange,
294		    ether_mediastatus);
295		mii_attach(&sc->sc_dev, mii, 0xffffffff,
296		    MII_PHY_ANY, MII_OFFSET_ANY, MIIF_FORCEANEG);
297		if (LIST_EMPTY(&mii->mii_phys)) {
298				/* No PHY attached */
299				aprint_error_dev(&sc->sc_dev, "PHY probe failed\n");
300				goto fail_7;
301		} else {
302			struct mii_softc *child;
303
304			/*
305			 * Walk along the list of attached MII devices and
306			 * establish an `MII instance' to `PHY number'
307			 * mapping.
308			 */
309			LIST_FOREACH(child, &mii->mii_phys, mii_list) {
310				/*
311				 * Note: we support just one PHY: the internal
312				 * or external MII is already selected for us
313				 * by the GEM_MIF_CONFIG  register.
314				 */
315				if (child->mii_phy > 1 || child->mii_inst > 0) {
316					aprint_error_dev(&sc->sc_dev,
317					    "cannot accommodate MII device"
318					    " %s at PHY %d, instance %d\n",
319					       device_xname(child->mii_dev),
320					       child->mii_phy, child->mii_inst);
321					continue;
322				}
323				sc->sc_phys[child->mii_inst] = child->mii_phy;
324			}
325
326			/*
327			 * Now select and activate the PHY we will use.
328			 *
329			 * The order of preference is External (MDI1),
330			 * then Internal (MDI0),
331			 */
332			if (sc->sc_phys[1]) {
333#ifdef GEM_DEBUG
334				aprint_debug_dev(&sc->sc_dev, "using external PHY\n");
335#endif
336				sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL;
337			} else {
338#ifdef GEM_DEBUG
339				aprint_debug_dev(&sc->sc_dev, "using internal PHY\n");
340				sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL;
341#endif
342			}
343			bus_space_write_4(t, h, GEM_MIF_CONFIG,
344			    sc->sc_mif_config);
345			if (sc->sc_variant != GEM_SUN_ERI)
346				bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE,
347				    GEM_MII_DATAPATH_MII);
348
349			/*
350			 * XXX - we can really do the following ONLY if the
351			 * PHY indeed has the auto negotiation capability!!
352			 */
353			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
354		}
355	} else {
356		ifmedia_init(&mii->mii_media, IFM_IMASK, gem_ser_mediachange,
357		    gem_ser_mediastatus);
358		/* SERDES or Serialink */
359		if (sc->sc_flags & GEM_SERDES) {
360			bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE,
361			    GEM_MII_DATAPATH_SERDES);
362		} else {
363			sc->sc_flags |= GEM_SERIAL;
364			bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE,
365			    GEM_MII_DATAPATH_SERIAL);
366		}
367
368		aprint_normal_dev(&sc->sc_dev, "using external PCS %s: ",
369		    sc->sc_flags & GEM_SERDES ? "SERDES" : "Serialink");
370
371		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0, NULL);
372		/* Check for FDX and HDX capabilities */
373		sc->sc_mii_anar = bus_space_read_4(t, h, GEM_MII_ANAR);
374		if (sc->sc_mii_anar & GEM_MII_ANEG_FUL_DUPLX) {
375			ifmedia_add(&sc->sc_mii.mii_media,
376			    IFM_ETHER|IFM_1000_SX|IFM_MANUAL|IFM_FDX, 0, NULL);
377			aprint_normal("1000baseSX-FDX, ");
378		}
379		if (sc->sc_mii_anar & GEM_MII_ANEG_HLF_DUPLX) {
380			ifmedia_add(&sc->sc_mii.mii_media,
381			    IFM_ETHER|IFM_1000_SX|IFM_MANUAL|IFM_HDX, 0, NULL);
382			aprint_normal("1000baseSX-HDX, ");
383		}
384		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
385		sc->sc_mii_media = IFM_AUTO;
386		aprint_normal("auto\n");
387
388		gem_pcs_stop(sc, 1);
389	}
390
391	/*
392	 * From this point forward, the attachment cannot fail.  A failure
393	 * before this point releases all resources that may have been
394	 * allocated.
395	 */
396
397	/* Announce ourselves. */
398	aprint_normal_dev(&sc->sc_dev, "Ethernet address %s",
399	    ether_sprintf(enaddr));
400
401	/* Get RX FIFO size */
402	sc->sc_rxfifosize = 64 *
403	    bus_space_read_4(t, h, GEM_RX_FIFO_SIZE);
404	aprint_normal(", %uKB RX fifo", sc->sc_rxfifosize / 1024);
405
406	/* Get TX FIFO size */
407	v = bus_space_read_4(t, h, GEM_TX_FIFO_SIZE);
408	aprint_normal(", %uKB TX fifo\n", v / 16);
409
410	/* Initialize ifnet structure. */
411	strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ);
412	ifp->if_softc = sc;
413	ifp->if_flags =
414	    IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
415	sc->sc_if_flags = ifp->if_flags;
416	/*
417	 * The GEM hardware supports basic TCP checksum offloading only.
418	 * Several (all?) revisions (Sun rev. 01 and Apple rev. 00 and 80)
419	 * have bugs in the receive checksum, so don't enable it for now.
420	if ((GEM_IS_SUN(sc) && sc->sc_chiprev != 1) ||
421	    (GEM_IS_APPLE(sc) &&
422	    (sc->sc_chiprev != 0 && sc->sc_chiprev != 0x80)))
423		ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Rx;
424	*/
425	ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx;
426	ifp->if_start = gem_start;
427	ifp->if_ioctl = gem_ioctl;
428	ifp->if_watchdog = gem_watchdog;
429	ifp->if_stop = gem_stop;
430	ifp->if_init = gem_init;
431	IFQ_SET_READY(&ifp->if_snd);
432
433	/*
434	 * If we support GigE media, we support jumbo frames too.
435	 * Unless we are Apple.
436	 */
437	TAILQ_FOREACH(ifm, &sc->sc_mii.mii_media.ifm_list, ifm_list) {
438		if (IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T ||
439		    IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_SX ||
440		    IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_LX ||
441		    IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_CX) {
442			if (!GEM_IS_APPLE(sc))
443				sc->sc_ethercom.ec_capabilities
444				    |= ETHERCAP_JUMBO_MTU;
445			sc->sc_flags |= GEM_GIGABIT;
446			break;
447		}
448	}
449
450	/* claim 802.1q capability */
451	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
452
453	/* Attach the interface. */
454	if_attach(ifp);
455	ether_ifattach(ifp, enaddr);
456	ether_set_ifflags_cb(&sc->sc_ethercom, gem_ifflags_cb);
457
458	sc->sc_sh = shutdownhook_establish(gem_shutdown, sc);
459	if (sc->sc_sh == NULL)
460		panic("gem_config: can't establish shutdownhook");
461
462#if NRND > 0
463	rnd_attach_source(&sc->rnd_source, device_xname(&sc->sc_dev),
464			  RND_TYPE_NET, 0);
465#endif
466
467	evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR,
468	    NULL, device_xname(&sc->sc_dev), "interrupts");
469#ifdef GEM_COUNTERS
470	evcnt_attach_dynamic(&sc->sc_ev_txint, EVCNT_TYPE_INTR,
471	    &sc->sc_ev_intr, device_xname(&sc->sc_dev), "tx interrupts");
472	evcnt_attach_dynamic(&sc->sc_ev_rxint, EVCNT_TYPE_INTR,
473	    &sc->sc_ev_intr, device_xname(&sc->sc_dev), "rx interrupts");
474	evcnt_attach_dynamic(&sc->sc_ev_rxfull, EVCNT_TYPE_INTR,
475	    &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx ring full");
476	evcnt_attach_dynamic(&sc->sc_ev_rxnobuf, EVCNT_TYPE_INTR,
477	    &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx malloc failure");
478	evcnt_attach_dynamic(&sc->sc_ev_rxhist[0], EVCNT_TYPE_INTR,
479	    &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx 0desc");
480	evcnt_attach_dynamic(&sc->sc_ev_rxhist[1], EVCNT_TYPE_INTR,
481	    &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx 1desc");
482	evcnt_attach_dynamic(&sc->sc_ev_rxhist[2], EVCNT_TYPE_INTR,
483	    &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx 2desc");
484	evcnt_attach_dynamic(&sc->sc_ev_rxhist[3], EVCNT_TYPE_INTR,
485	    &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx 3desc");
486	evcnt_attach_dynamic(&sc->sc_ev_rxhist[4], EVCNT_TYPE_INTR,
487	    &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx >3desc");
488	evcnt_attach_dynamic(&sc->sc_ev_rxhist[5], EVCNT_TYPE_INTR,
489	    &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx >7desc");
490	evcnt_attach_dynamic(&sc->sc_ev_rxhist[6], EVCNT_TYPE_INTR,
491	    &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx >15desc");
492	evcnt_attach_dynamic(&sc->sc_ev_rxhist[7], EVCNT_TYPE_INTR,
493	    &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx >31desc");
494	evcnt_attach_dynamic(&sc->sc_ev_rxhist[8], EVCNT_TYPE_INTR,
495	    &sc->sc_ev_rxint, device_xname(&sc->sc_dev), "rx >63desc");
496#endif
497
498#if notyet
499	/*
500	 * Add a suspend hook to make sure we come back up after a
501	 * resume.
502	 */
503	sc->sc_powerhook = powerhook_establish(device_xname(&sc->sc_dev),
504	    gem_power, sc);
505	if (sc->sc_powerhook == NULL)
506		aprint_error_dev(&sc->sc_dev, "WARNING: unable to establish power hook\n");
507#endif
508
509	callout_init(&sc->sc_tick_ch, 0);
510	return;
511
512	/*
513	 * Free any resources we've allocated during the failed attach
514	 * attempt.  Do this in reverse order and fall through.
515	 */
516 fail_7:
517	for (i = 0; i < GEM_NRXDESC; i++) {
518		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
519			bus_dmamap_destroy(sc->sc_dmatag,
520			    sc->sc_rxsoft[i].rxs_dmamap);
521	}
522 fail_6:
523	for (i = 0; i < GEM_TXQUEUELEN; i++) {
524		if (sc->sc_txsoft[i].txs_dmamap != NULL)
525			bus_dmamap_destroy(sc->sc_dmatag,
526			    sc->sc_txsoft[i].txs_dmamap);
527	}
528	bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap);
529 fail_5:
530	bus_dmamap_destroy(sc->sc_dmatag, sc->sc_nulldmamap);
531 fail_4:
532	bus_dmamem_unmap(sc->sc_dmatag, (void *)nullbuf, ETHER_MIN_TX);
533 fail_3:
534	bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap);
535 fail_2:
536	bus_dmamem_unmap(sc->sc_dmatag, (void *)sc->sc_control_data,
537	    sizeof(struct gem_control_data));
538 fail_1:
539	bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg);
540 fail_0:
541	return;
542}
543
544
545void
546gem_tick(arg)
547	void *arg;
548{
549	struct gem_softc *sc = arg;
550	int s;
551
552	if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0) {
553		/*
554		 * We have to reset everything if we failed to get a
555		 * PCS interrupt.  Restarting the callout is handled
556		 * in gem_pcs_start().
557		 */
558		gem_init(&sc->sc_ethercom.ec_if);
559	} else {
560		s = splnet();
561		mii_tick(&sc->sc_mii);
562		splx(s);
563		callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
564	}
565}
566
567static int
568gem_bitwait(sc, h, r, clr, set)
569	struct gem_softc *sc;
570	bus_space_handle_t h;
571	int r;
572	u_int32_t clr;
573	u_int32_t set;
574{
575	int i;
576	u_int32_t reg;
577
578	for (i = TRIES; i--; DELAY(100)) {
579		reg = bus_space_read_4(sc->sc_bustag, h, r);
580		if ((reg & clr) == 0 && (reg & set) == set)
581			return (1);
582	}
583	return (0);
584}
585
586void
587gem_reset(sc)
588	struct gem_softc *sc;
589{
590	bus_space_tag_t t = sc->sc_bustag;
591	bus_space_handle_t h = sc->sc_h2;
592	int s;
593
594	s = splnet();
595	DPRINTF(sc, ("%s: gem_reset\n", device_xname(&sc->sc_dev)));
596	gem_reset_rx(sc);
597	gem_reset_tx(sc);
598
599	/* Do a full reset */
600	bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX|GEM_RESET_TX);
601	if (!gem_bitwait(sc, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
602		aprint_error_dev(&sc->sc_dev, "cannot reset device\n");
603	splx(s);
604}
605
606
607/*
608 * gem_rxdrain:
609 *
610 *	Drain the receive queue.
611 */
612static void
613gem_rxdrain(struct gem_softc *sc)
614{
615	struct gem_rxsoft *rxs;
616	int i;
617
618	for (i = 0; i < GEM_NRXDESC; i++) {
619		rxs = &sc->sc_rxsoft[i];
620		if (rxs->rxs_mbuf != NULL) {
621			bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
622			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
623			bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
624			m_freem(rxs->rxs_mbuf);
625			rxs->rxs_mbuf = NULL;
626		}
627	}
628}
629
630/*
631 * Reset the whole thing.
632 */
633static void
634gem_stop(struct ifnet *ifp, int disable)
635{
636	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
637	struct gem_txsoft *txs;
638
639	DPRINTF(sc, ("%s: gem_stop\n", device_xname(&sc->sc_dev)));
640
641	callout_stop(&sc->sc_tick_ch);
642	if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0)
643		gem_pcs_stop(sc, disable);
644	else
645		mii_down(&sc->sc_mii);
646
647	/* XXX - Should we reset these instead? */
648	gem_disable_tx(sc);
649	gem_disable_rx(sc);
650
651	/*
652	 * Release any queued transmit buffers.
653	 */
654	while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
655		SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
656		if (txs->txs_mbuf != NULL) {
657			bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap, 0,
658			    txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
659			bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap);
660			m_freem(txs->txs_mbuf);
661			txs->txs_mbuf = NULL;
662		}
663		SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
664	}
665
666	/*
667	 * Mark the interface down and cancel the watchdog timer.
668	 */
669	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
670	sc->sc_if_flags = ifp->if_flags;
671	ifp->if_timer = 0;
672
673	if (disable)
674		gem_rxdrain(sc);
675}
676
677
678/*
679 * Reset the receiver
680 */
681int
682gem_reset_rx(struct gem_softc *sc)
683{
684	bus_space_tag_t t = sc->sc_bustag;
685	bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2;
686
687	/*
688	 * Resetting while DMA is in progress can cause a bus hang, so we
689	 * disable DMA first.
690	 */
691	gem_disable_rx(sc);
692	bus_space_write_4(t, h, GEM_RX_CONFIG, 0);
693	bus_space_barrier(t, h, GEM_RX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
694	/* Wait till it finishes */
695	if (!gem_bitwait(sc, h, GEM_RX_CONFIG, 1, 0))
696		aprint_error_dev(&sc->sc_dev, "cannot disable read dma\n");
697
698	/* Finally, reset the ERX */
699	bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_RX);
700	bus_space_barrier(t, h, GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE);
701	/* Wait till it finishes */
702	if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_RX, 0)) {
703		aprint_error_dev(&sc->sc_dev, "cannot reset receiver\n");
704		return (1);
705	}
706	return (0);
707}
708
709
710/*
711 * Reset the receiver DMA engine.
712 *
713 * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW
714 * etc in order to reset the receiver DMA engine only and not do a full
715 * reset which amongst others also downs the link and clears the FIFOs.
716 */
717static void
718gem_reset_rxdma(struct gem_softc *sc)
719{
720	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
721	bus_space_tag_t t = sc->sc_bustag;
722	bus_space_handle_t h = sc->sc_h1;
723	int i;
724
725	if (gem_reset_rx(sc) != 0) {
726		gem_init(ifp);
727		return;
728	}
729	for (i = 0; i < GEM_NRXDESC; i++)
730		if (sc->sc_rxsoft[i].rxs_mbuf != NULL)
731			GEM_UPDATE_RXDESC(sc, i);
732	sc->sc_rxptr = 0;
733	GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
734	GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD);
735
736	/* Reprogram Descriptor Ring Base Addresses */
737	/* NOTE: we use only 32-bit DMA addresses here. */
738	bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0);
739	bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
740
741	/* Redo ERX Configuration */
742	gem_rx_common(sc);
743
744	/* Give the reciever a swift kick */
745	bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC - 4);
746}
747
748/*
749 * Common RX configuration for gem_init() and gem_reset_rxdma().
750 */
751static void
752gem_rx_common(struct gem_softc *sc)
753{
754	bus_space_tag_t t = sc->sc_bustag;
755	bus_space_handle_t h = sc->sc_h1;
756	u_int32_t v;
757
758	/* Encode Receive Descriptor ring size: four possible values */
759	v = gem_ringsize(GEM_NRXDESC /*XXX*/);
760
761	/* Set receive h/w checksum offset */
762#ifdef INET
763	v |= (ETHER_HDR_LEN + sizeof(struct ip) +
764	    ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ?
765	    ETHER_VLAN_ENCAP_LEN : 0)) << GEM_RX_CONFIG_CXM_START_SHFT;
766#endif
767
768	/* Enable RX DMA */
769	bus_space_write_4(t, h, GEM_RX_CONFIG,
770	    v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
771	    (2 << GEM_RX_CONFIG_FBOFF_SHFT) | GEM_RX_CONFIG_RXDMA_EN);
772
773	/*
774	 * The following value is for an OFF Threshold of about 3/4 full
775	 * and an ON Threshold of 1/4 full.
776	 */
777	bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH,
778	    (3 * sc->sc_rxfifosize / 256) |
779	    ((sc->sc_rxfifosize / 256) << 12));
780	bus_space_write_4(t, h, GEM_RX_BLANKING,
781	    (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6);
782}
783
784/*
785 * Reset the transmitter
786 */
787int
788gem_reset_tx(struct gem_softc *sc)
789{
790	bus_space_tag_t t = sc->sc_bustag;
791	bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2;
792
793	/*
794	 * Resetting while DMA is in progress can cause a bus hang, so we
795	 * disable DMA first.
796	 */
797	gem_disable_tx(sc);
798	bus_space_write_4(t, h, GEM_TX_CONFIG, 0);
799	bus_space_barrier(t, h, GEM_TX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
800	/* Wait till it finishes */
801	if (!gem_bitwait(sc, h, GEM_TX_CONFIG, 1, 0))
802		aprint_error_dev(&sc->sc_dev, "cannot disable read dma\n");
803	/* Wait 5ms extra. */
804	delay(5000);
805
806	/* Finally, reset the ETX */
807	bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_TX);
808	bus_space_barrier(t, h, GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE);
809	/* Wait till it finishes */
810	if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_TX, 0)) {
811		aprint_error_dev(&sc->sc_dev, "cannot reset receiver\n");
812		return (1);
813	}
814	return (0);
815}
816
817/*
818 * disable receiver.
819 */
820int
821gem_disable_rx(struct gem_softc *sc)
822{
823	bus_space_tag_t t = sc->sc_bustag;
824	bus_space_handle_t h = sc->sc_h1;
825	u_int32_t cfg;
826
827	/* Flip the enable bit */
828	cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
829	cfg &= ~GEM_MAC_RX_ENABLE;
830	bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg);
831	bus_space_barrier(t, h, GEM_MAC_RX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
832	/* Wait for it to finish */
833	return (gem_bitwait(sc, h, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0));
834}
835
836/*
837 * disable transmitter.
838 */
839int
840gem_disable_tx(struct gem_softc *sc)
841{
842	bus_space_tag_t t = sc->sc_bustag;
843	bus_space_handle_t h = sc->sc_h1;
844	u_int32_t cfg;
845
846	/* Flip the enable bit */
847	cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG);
848	cfg &= ~GEM_MAC_TX_ENABLE;
849	bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg);
850	bus_space_barrier(t, h, GEM_MAC_TX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
851	/* Wait for it to finish */
852	return (gem_bitwait(sc, h, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0));
853}
854
855/*
856 * Initialize interface.
857 */
858int
859gem_meminit(struct gem_softc *sc)
860{
861	struct gem_rxsoft *rxs;
862	int i, error;
863
864	/*
865	 * Initialize the transmit descriptor ring.
866	 */
867	memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
868	for (i = 0; i < GEM_NTXDESC; i++) {
869		sc->sc_txdescs[i].gd_flags = 0;
870		sc->sc_txdescs[i].gd_addr = 0;
871	}
872	GEM_CDTXSYNC(sc, 0, GEM_NTXDESC,
873	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
874	sc->sc_txfree = GEM_NTXDESC-1;
875	sc->sc_txnext = 0;
876	sc->sc_txwin = 0;
877
878	/*
879	 * Initialize the receive descriptor and receive job
880	 * descriptor rings.
881	 */
882	for (i = 0; i < GEM_NRXDESC; i++) {
883		rxs = &sc->sc_rxsoft[i];
884		if (rxs->rxs_mbuf == NULL) {
885			if ((error = gem_add_rxbuf(sc, i)) != 0) {
886				aprint_error_dev(&sc->sc_dev, "unable to allocate or map rx "
887				    "buffer %d, error = %d\n",
888				    i, error);
889				/*
890				 * XXX Should attempt to run with fewer receive
891				 * XXX buffers instead of just failing.
892				 */
893				gem_rxdrain(sc);
894				return (1);
895			}
896		} else
897			GEM_INIT_RXDESC(sc, i);
898	}
899	sc->sc_rxptr = 0;
900	sc->sc_meminited = 1;
901	GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
902	GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD);
903
904	return (0);
905}
906
907static int
908gem_ringsize(int sz)
909{
910	switch (sz) {
911	case 32:
912		return GEM_RING_SZ_32;
913	case 64:
914		return GEM_RING_SZ_64;
915	case 128:
916		return GEM_RING_SZ_128;
917	case 256:
918		return GEM_RING_SZ_256;
919	case 512:
920		return GEM_RING_SZ_512;
921	case 1024:
922		return GEM_RING_SZ_1024;
923	case 2048:
924		return GEM_RING_SZ_2048;
925	case 4096:
926		return GEM_RING_SZ_4096;
927	case 8192:
928		return GEM_RING_SZ_8192;
929	default:
930		printf("gem: invalid Receive Descriptor ring size %d\n", sz);
931		return GEM_RING_SZ_32;
932	}
933}
934
935
936/*
937 * Start PCS
938 */
939void
940gem_pcs_start(struct gem_softc *sc)
941{
942	bus_space_tag_t t = sc->sc_bustag;
943	bus_space_handle_t h = sc->sc_h1;
944	uint32_t v;
945
946#ifdef GEM_DEBUG
947	aprint_debug_dev(&sc->sc_dev, "gem_pcs_start()\n");
948#endif
949
950	/*
951	 * Set up.  We must disable the MII before modifying the
952	 * GEM_MII_ANAR register
953	 */
954	if (sc->sc_flags & GEM_SERDES) {
955		bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE,
956		    GEM_MII_DATAPATH_SERDES);
957		bus_space_write_4(t, h, GEM_MII_SLINK_CONTROL,
958		    GEM_MII_SLINK_LOOPBACK);
959	} else {
960		bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE,
961		    GEM_MII_DATAPATH_SERIAL);
962		bus_space_write_4(t, h, GEM_MII_SLINK_CONTROL, 0);
963	}
964	bus_space_write_4(t, h, GEM_MII_CONFIG, 0);
965	v = bus_space_read_4(t, h, GEM_MII_ANAR);
966	v |= (GEM_MII_ANEG_SYM_PAUSE | GEM_MII_ANEG_ASYM_PAUSE);
967	if (sc->sc_mii_media == IFM_AUTO)
968		v |= (GEM_MII_ANEG_FUL_DUPLX | GEM_MII_ANEG_HLF_DUPLX);
969	else if (sc->sc_mii_media == IFM_FDX) {
970		v |= GEM_MII_ANEG_FUL_DUPLX;
971		v &= ~GEM_MII_ANEG_HLF_DUPLX;
972	} else if (sc->sc_mii_media == IFM_HDX) {
973		v &= ~GEM_MII_ANEG_FUL_DUPLX;
974		v |= GEM_MII_ANEG_HLF_DUPLX;
975	}
976
977	/* Configure link. */
978	bus_space_write_4(t, h, GEM_MII_ANAR, v);
979	bus_space_write_4(t, h, GEM_MII_CONTROL,
980	    GEM_MII_CONTROL_AUTONEG | GEM_MII_CONTROL_RAN);
981	bus_space_write_4(t, h, GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE);
982	gem_bitwait(sc, h, GEM_MII_STATUS, 0, GEM_MII_STATUS_ANEG_CPT);
983
984	/* Start the 10 second timer */
985	callout_reset(&sc->sc_tick_ch, hz * 10, gem_tick, sc);
986}
987
988/*
989 * Stop PCS
990 */
991void
992gem_pcs_stop(struct gem_softc *sc, int disable)
993{
994	bus_space_tag_t t = sc->sc_bustag;
995	bus_space_handle_t h = sc->sc_h1;
996
997#ifdef GEM_DEBUG
998	aprint_debug_dev(&sc->sc_dev, "gem_pcs_stop()\n");
999#endif
1000
1001	/* Tell link partner that we're going away */
1002	bus_space_write_4(t, h, GEM_MII_ANAR, GEM_MII_ANEG_RF);
1003
1004	/*
1005	 * Disable PCS MII.  The documentation suggests that setting
1006	 * GEM_MII_CONFIG_ENABLE to zero and then restarting auto-
1007	 * negotiation will shut down the link.  However, it appears
1008	 * that we also need to unset the datapath mode.
1009	 */
1010	bus_space_write_4(t, h, GEM_MII_CONFIG, 0);
1011	bus_space_write_4(t, h, GEM_MII_CONTROL,
1012	    GEM_MII_CONTROL_AUTONEG | GEM_MII_CONTROL_RAN);
1013	bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE, GEM_MII_DATAPATH_MII);
1014	bus_space_write_4(t, h, GEM_MII_CONFIG, 0);
1015
1016	if (disable) {
1017		if (sc->sc_flags & GEM_SERDES)
1018			bus_space_write_4(t, h, GEM_MII_SLINK_CONTROL,
1019				GEM_MII_SLINK_POWER_OFF);
1020		else
1021			bus_space_write_4(t, h, GEM_MII_SLINK_CONTROL,
1022			    GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_POWER_OFF);
1023	}
1024
1025	sc->sc_flags &= ~GEM_LINK;
1026	sc->sc_mii.mii_media_active = IFM_ETHER | IFM_NONE;
1027	sc->sc_mii.mii_media_status = IFM_AVALID;
1028}
1029
1030
1031/*
1032 * Initialization of interface; set up initialization block
1033 * and transmit/receive descriptor rings.
1034 */
1035int
1036gem_init(struct ifnet *ifp)
1037{
1038	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
1039	bus_space_tag_t t = sc->sc_bustag;
1040	bus_space_handle_t h = sc->sc_h1;
1041	int rc = 0, s;
1042	u_int max_frame_size;
1043	u_int32_t v;
1044
1045	s = splnet();
1046
1047	DPRINTF(sc, ("%s: gem_init: calling stop\n", device_xname(&sc->sc_dev)));
1048	/*
1049	 * Initialization sequence. The numbered steps below correspond
1050	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
1051	 * Channel Engine manual (part of the PCIO manual).
1052	 * See also the STP2002-STQ document from Sun Microsystems.
1053	 */
1054
1055	/* step 1 & 2. Reset the Ethernet Channel */
1056	gem_stop(ifp, 0);
1057	gem_reset(sc);
1058	DPRINTF(sc, ("%s: gem_init: restarting\n", device_xname(&sc->sc_dev)));
1059
1060	/* Re-initialize the MIF */
1061	gem_mifinit(sc);
1062
1063	/* Set up correct datapath for non-SERDES/Serialink */
1064	if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0 &&
1065	    sc->sc_variant != GEM_SUN_ERI)
1066		bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE,
1067		    GEM_MII_DATAPATH_MII);
1068
1069	/* Call MI reset function if any */
1070	if (sc->sc_hwreset)
1071		(*sc->sc_hwreset)(sc);
1072
1073	/* step 3. Setup data structures in host memory */
1074	if (gem_meminit(sc) != 0)
1075		return 1;
1076
1077	/* step 4. TX MAC registers & counters */
1078	gem_init_regs(sc);
1079	max_frame_size = max(sc->sc_ethercom.ec_if.if_mtu, ETHERMTU);
1080	max_frame_size += ETHER_HDR_LEN + ETHER_CRC_LEN;
1081	if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU)
1082		max_frame_size += ETHER_VLAN_ENCAP_LEN;
1083	bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME,
1084	    max_frame_size|/* burst size */(0x2000<<16));
1085
1086	/* step 5. RX MAC registers & counters */
1087	gem_setladrf(sc);
1088
1089	/* step 6 & 7. Program Descriptor Ring Base Addresses */
1090	/* NOTE: we use only 32-bit DMA addresses here. */
1091	bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0);
1092	bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
1093
1094	bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0);
1095	bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
1096
1097	/* step 8. Global Configuration & Interrupt Mask */
1098	if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0)
1099		v = GEM_INTR_PCS;
1100	else
1101		v = GEM_INTR_MIF;
1102	bus_space_write_4(t, h, GEM_INTMASK,
1103		      ~(GEM_INTR_TX_INTME |
1104			GEM_INTR_TX_EMPTY |
1105			GEM_INTR_TX_MAC |
1106			GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF|
1107			GEM_INTR_RX_TAG_ERR | GEM_INTR_MAC_CONTROL|
1108			GEM_INTR_BERR | v));
1109	bus_space_write_4(t, h, GEM_MAC_RX_MASK,
1110			GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
1111	bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXX */
1112	bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK,
1113	    GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME);
1114
1115	/* step 9. ETX Configuration: use mostly default values */
1116
1117	/* Enable TX DMA */
1118	v = gem_ringsize(GEM_NTXDESC /*XXX*/);
1119	bus_space_write_4(t, h, GEM_TX_CONFIG,
1120		v|GEM_TX_CONFIG_TXDMA_EN|
1121		((0x4FF<<10)&GEM_TX_CONFIG_TXFIFO_TH));
1122	bus_space_write_4(t, h, GEM_TX_KICK, sc->sc_txnext);
1123
1124	/* step 10. ERX Configuration */
1125	gem_rx_common(sc);
1126
1127	/* step 11. Configure Media */
1128	if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0 &&
1129	    (rc = mii_ifmedia_change(&sc->sc_mii)) != 0)
1130		goto out;
1131
1132	/* step 12. RX_MAC Configuration Register */
1133	v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
1134	v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC;
1135	bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v);
1136
1137	/* step 14. Issue Transmit Pending command */
1138
1139	/* Call MI initialization function if any */
1140	if (sc->sc_hwinit)
1141		(*sc->sc_hwinit)(sc);
1142
1143
1144	/* step 15.  Give the reciever a swift kick */
1145	bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4);
1146
1147	if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0)
1148		/* Configure PCS */
1149		gem_pcs_start(sc);
1150	else
1151		/* Start the one second timer. */
1152		callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
1153
1154	sc->sc_flags &= ~GEM_LINK;
1155	ifp->if_flags |= IFF_RUNNING;
1156	ifp->if_flags &= ~IFF_OACTIVE;
1157	ifp->if_timer = 0;
1158	sc->sc_if_flags = ifp->if_flags;
1159out:
1160	splx(s);
1161
1162	return (0);
1163}
1164
1165void
1166gem_init_regs(struct gem_softc *sc)
1167{
1168	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1169	bus_space_tag_t t = sc->sc_bustag;
1170	bus_space_handle_t h = sc->sc_h1;
1171	const u_char *laddr = CLLADDR(ifp->if_sadl);
1172	u_int32_t v;
1173
1174	/* These regs are not cleared on reset */
1175	if (!sc->sc_inited) {
1176
1177		/* Load recommended values */
1178		bus_space_write_4(t, h, GEM_MAC_IPG0, 0x00);
1179		bus_space_write_4(t, h, GEM_MAC_IPG1, 0x08);
1180		bus_space_write_4(t, h, GEM_MAC_IPG2, 0x04);
1181
1182		bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
1183		/* Max frame and max burst size */
1184		bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME,
1185		    ETHER_MAX_LEN | (0x2000<<16));
1186
1187		bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x07);
1188		bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x04);
1189		bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10);
1190		bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088);
1191		bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED,
1192		    ((laddr[5]<<8)|laddr[4])&0x3ff);
1193
1194		/* Secondary MAC addr set to 0:0:0:0:0:0 */
1195		bus_space_write_4(t, h, GEM_MAC_ADDR3, 0);
1196		bus_space_write_4(t, h, GEM_MAC_ADDR4, 0);
1197		bus_space_write_4(t, h, GEM_MAC_ADDR5, 0);
1198
1199		/* MAC control addr set to 01:80:c2:00:00:01 */
1200		bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001);
1201		bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200);
1202		bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180);
1203
1204		/* MAC filter addr set to 0:0:0:0:0:0 */
1205		bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0);
1206		bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0);
1207		bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0);
1208
1209		bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0);
1210		bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0);
1211
1212		sc->sc_inited = 1;
1213	}
1214
1215	/* Counters need to be zeroed */
1216	bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0);
1217	bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0);
1218	bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0);
1219	bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0);
1220	bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0);
1221	bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0);
1222	bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0);
1223	bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0);
1224	bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0);
1225	bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0);
1226	bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0);
1227
1228	/* Set XOFF PAUSE time. */
1229	bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0);
1230
1231	/*
1232	 * Set the internal arbitration to "infinite" bursts of the
1233	 * maximum length of 31 * 64 bytes so DMA transfers aren't
1234	 * split up in cache line size chunks. This greatly improves
1235	 * especially RX performance.
1236	 * Enable silicon bug workarounds for the Apple variants.
1237	 */
1238	bus_space_write_4(t, h, GEM_CONFIG,
1239	    GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT |
1240	    ((sc->sc_flags & GEM_PCI) ?
1241	    GEM_CONFIG_BURST_INF : GEM_CONFIG_BURST_64) | (GEM_IS_APPLE(sc) ?
1242	    GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0));
1243
1244	/*
1245	 * Set the station address.
1246	 */
1247	bus_space_write_4(t, h, GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]);
1248	bus_space_write_4(t, h, GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]);
1249	bus_space_write_4(t, h, GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]);
1250
1251	/*
1252	 * Enable MII outputs.  Enable GMII if there is a gigabit PHY.
1253	 */
1254	sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG);
1255	v = GEM_MAC_XIF_TX_MII_ENA;
1256	if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0)  {
1257		if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) {
1258			v |= GEM_MAC_XIF_FDPLX_LED;
1259				if (sc->sc_flags & GEM_GIGABIT)
1260					v |= GEM_MAC_XIF_GMII_MODE;
1261		}
1262	} else {
1263		v |= GEM_MAC_XIF_GMII_MODE;
1264	}
1265	bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v);
1266}
1267
1268#ifdef GEM_DEBUG
1269static void
1270gem_txsoft_print(const struct gem_softc *sc, int firstdesc, int lastdesc)
1271{
1272	int i;
1273
1274	for (i = firstdesc;; i = GEM_NEXTTX(i)) {
1275		printf("descriptor %d:\t", i);
1276		printf("gd_flags:   0x%016" PRIx64 "\t",
1277			GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags));
1278		printf("gd_addr: 0x%016" PRIx64 "\n",
1279			GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr));
1280		if (i == lastdesc)
1281			break;
1282	}
1283}
1284#endif
1285
1286static void
1287gem_start(ifp)
1288	struct ifnet *ifp;
1289{
1290	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
1291	struct mbuf *m0, *m;
1292	struct gem_txsoft *txs;
1293	bus_dmamap_t dmamap;
1294	int error, firsttx, nexttx = -1, lasttx = -1, ofree, seg;
1295	uint64_t flags = 0;
1296
1297	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1298		return;
1299
1300	/*
1301	 * Remember the previous number of free descriptors and
1302	 * the first descriptor we'll use.
1303	 */
1304	ofree = sc->sc_txfree;
1305	firsttx = sc->sc_txnext;
1306
1307	DPRINTF(sc, ("%s: gem_start: txfree %d, txnext %d\n",
1308	    device_xname(&sc->sc_dev), ofree, firsttx));
1309
1310	/*
1311	 * Loop through the send queue, setting up transmit descriptors
1312	 * until we drain the queue, or use up all available transmit
1313	 * descriptors.
1314	 */
1315	while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL &&
1316	    sc->sc_txfree != 0) {
1317		/*
1318		 * Grab a packet off the queue.
1319		 */
1320		IFQ_POLL(&ifp->if_snd, m0);
1321		if (m0 == NULL)
1322			break;
1323		m = NULL;
1324
1325		dmamap = txs->txs_dmamap;
1326
1327		/*
1328		 * Load the DMA map.  If this fails, the packet either
1329		 * didn't fit in the alloted number of segments, or we were
1330		 * short on resources.  In this case, we'll copy and try
1331		 * again.
1332		 */
1333		if (bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap, m0,
1334		      BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0 ||
1335		      (m0->m_pkthdr.len < ETHER_MIN_TX &&
1336		       dmamap->dm_nsegs == GEM_NTXSEGS)) {
1337			if (m0->m_pkthdr.len > MCLBYTES) {
1338				aprint_error_dev(&sc->sc_dev, "unable to allocate jumbo Tx "
1339				    "cluster\n");
1340				IFQ_DEQUEUE(&ifp->if_snd, m0);
1341				m_freem(m0);
1342				continue;
1343			}
1344			MGETHDR(m, M_DONTWAIT, MT_DATA);
1345			if (m == NULL) {
1346				aprint_error_dev(&sc->sc_dev, "unable to allocate Tx mbuf\n");
1347				break;
1348			}
1349			MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner);
1350			if (m0->m_pkthdr.len > MHLEN) {
1351				MCLGET(m, M_DONTWAIT);
1352				if ((m->m_flags & M_EXT) == 0) {
1353					aprint_error_dev(&sc->sc_dev, "unable to allocate Tx "
1354					    "cluster\n");
1355					m_freem(m);
1356					break;
1357				}
1358			}
1359			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
1360			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
1361			error = bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap,
1362			    m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1363			if (error) {
1364				aprint_error_dev(&sc->sc_dev, "unable to load Tx buffer, "
1365				    "error = %d\n", error);
1366				break;
1367			}
1368		}
1369
1370		/*
1371		 * Ensure we have enough descriptors free to describe
1372		 * the packet.
1373		 */
1374		if (dmamap->dm_nsegs > ((m0->m_pkthdr.len < ETHER_MIN_TX) ?
1375		     (sc->sc_txfree - 1) : sc->sc_txfree)) {
1376			/*
1377			 * Not enough free descriptors to transmit this
1378			 * packet.  We haven't committed to anything yet,
1379			 * so just unload the DMA map, put the packet
1380			 * back on the queue, and punt.  Notify the upper
1381			 * layer that there are no more slots left.
1382			 *
1383			 * XXX We could allocate an mbuf and copy, but
1384			 * XXX it is worth it?
1385			 */
1386			ifp->if_flags |= IFF_OACTIVE;
1387			sc->sc_if_flags = ifp->if_flags;
1388			bus_dmamap_unload(sc->sc_dmatag, dmamap);
1389			if (m != NULL)
1390				m_freem(m);
1391			break;
1392		}
1393
1394		IFQ_DEQUEUE(&ifp->if_snd, m0);
1395		if (m != NULL) {
1396			m_freem(m0);
1397			m0 = m;
1398		}
1399
1400		/*
1401		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1402		 */
1403
1404		/* Sync the DMA map. */
1405		bus_dmamap_sync(sc->sc_dmatag, dmamap, 0, dmamap->dm_mapsize,
1406		    BUS_DMASYNC_PREWRITE);
1407
1408		/*
1409		 * Initialize the transmit descriptors.
1410		 */
1411		for (nexttx = sc->sc_txnext, seg = 0;
1412		     seg < dmamap->dm_nsegs;
1413		     seg++, nexttx = GEM_NEXTTX(nexttx)) {
1414
1415			/*
1416			 * If this is the first descriptor we're
1417			 * enqueueing, set the start of packet flag,
1418			 * and the checksum stuff if we want the hardware
1419			 * to do it.
1420			 */
1421			sc->sc_txdescs[nexttx].gd_addr =
1422			    GEM_DMA_WRITE(sc, dmamap->dm_segs[seg].ds_addr);
1423			flags = dmamap->dm_segs[seg].ds_len & GEM_TD_BUFSIZE;
1424			if (nexttx == firsttx) {
1425				flags |= GEM_TD_START_OF_PACKET;
1426				if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
1427					sc->sc_txwin = 0;
1428					flags |= GEM_TD_INTERRUPT_ME;
1429				}
1430
1431#ifdef INET
1432				/* h/w checksum */
1433				if (ifp->if_csum_flags_tx & M_CSUM_TCPv4 &&
1434				    m0->m_pkthdr.csum_flags & M_CSUM_TCPv4) {
1435					struct ether_header *eh;
1436					uint16_t offset, start;
1437
1438					eh = mtod(m0, struct ether_header *);
1439					switch (ntohs(eh->ether_type)) {
1440					case ETHERTYPE_IP:
1441						start = ETHER_HDR_LEN;
1442						break;
1443					case ETHERTYPE_VLAN:
1444						start = ETHER_HDR_LEN +
1445							ETHER_VLAN_ENCAP_LEN;
1446						break;
1447					default:
1448						/* unsupported, drop it */
1449						m_free(m0);
1450						continue;
1451					}
1452					start += M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1453					offset = M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data) + start;
1454					flags |= (start <<
1455						  GEM_TD_CXSUM_STARTSHFT) |
1456						 (offset <<
1457						  GEM_TD_CXSUM_STUFFSHFT) |
1458						 GEM_TD_CXSUM_ENABLE;
1459				}
1460#endif
1461			}
1462			if (seg == dmamap->dm_nsegs - 1) {
1463				flags |= GEM_TD_END_OF_PACKET;
1464			} else {
1465				/* last flag set outside of loop */
1466				sc->sc_txdescs[nexttx].gd_flags =
1467					GEM_DMA_WRITE(sc, flags);
1468			}
1469			lasttx = nexttx;
1470		}
1471		if (m0->m_pkthdr.len < ETHER_MIN_TX) {
1472			/* add padding buffer at end of chain */
1473			flags &= ~GEM_TD_END_OF_PACKET;
1474			sc->sc_txdescs[lasttx].gd_flags =
1475			    GEM_DMA_WRITE(sc, flags);
1476
1477			sc->sc_txdescs[nexttx].gd_addr =
1478			    GEM_DMA_WRITE(sc,
1479			    sc->sc_nulldmamap->dm_segs[0].ds_addr);
1480			flags = ((ETHER_MIN_TX - m0->m_pkthdr.len) &
1481			    GEM_TD_BUFSIZE) | GEM_TD_END_OF_PACKET;
1482			lasttx = nexttx;
1483			nexttx = GEM_NEXTTX(nexttx);
1484			seg++;
1485		}
1486		sc->sc_txdescs[lasttx].gd_flags = GEM_DMA_WRITE(sc, flags);
1487
1488		KASSERT(lasttx != -1);
1489
1490		/*
1491		 * Store a pointer to the packet so we can free it later,
1492		 * and remember what txdirty will be once the packet is
1493		 * done.
1494		 */
1495		txs->txs_mbuf = m0;
1496		txs->txs_firstdesc = sc->sc_txnext;
1497		txs->txs_lastdesc = lasttx;
1498		txs->txs_ndescs = seg;
1499
1500#ifdef GEM_DEBUG
1501		if (ifp->if_flags & IFF_DEBUG) {
1502			printf("     gem_start %p transmit chain:\n", txs);
1503			gem_txsoft_print(sc, txs->txs_firstdesc,
1504			    txs->txs_lastdesc);
1505		}
1506#endif
1507
1508		/* Sync the descriptors we're using. */
1509		GEM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndescs,
1510		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1511
1512		/* Advance the tx pointer. */
1513		sc->sc_txfree -= txs->txs_ndescs;
1514		sc->sc_txnext = nexttx;
1515
1516		SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1517		SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1518
1519#if NBPFILTER > 0
1520		/*
1521		 * Pass the packet to any BPF listeners.
1522		 */
1523		if (ifp->if_bpf)
1524			bpf_mtap(ifp->if_bpf, m0);
1525#endif /* NBPFILTER > 0 */
1526	}
1527
1528	if (txs == NULL || sc->sc_txfree == 0) {
1529		/* No more slots left; notify upper layer. */
1530		ifp->if_flags |= IFF_OACTIVE;
1531		sc->sc_if_flags = ifp->if_flags;
1532	}
1533
1534	if (sc->sc_txfree != ofree) {
1535		DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n",
1536		    device_xname(&sc->sc_dev), lasttx, firsttx));
1537		/*
1538		 * The entire packet chain is set up.
1539		 * Kick the transmitter.
1540		 */
1541		DPRINTF(sc, ("%s: gem_start: kicking tx %d\n",
1542			device_xname(&sc->sc_dev), nexttx));
1543		bus_space_write_4(sc->sc_bustag, sc->sc_h1, GEM_TX_KICK,
1544			sc->sc_txnext);
1545
1546		/* Set a watchdog timer in case the chip flakes out. */
1547		ifp->if_timer = 5;
1548		DPRINTF(sc, ("%s: gem_start: watchdog %d\n",
1549			device_xname(&sc->sc_dev), ifp->if_timer));
1550	}
1551}
1552
1553/*
1554 * Transmit interrupt.
1555 */
1556int
1557gem_tint(sc)
1558	struct gem_softc *sc;
1559{
1560	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1561	bus_space_tag_t t = sc->sc_bustag;
1562	bus_space_handle_t mac = sc->sc_h1;
1563	struct gem_txsoft *txs;
1564	int txlast;
1565	int progress = 0;
1566	u_int32_t v;
1567
1568	DPRINTF(sc, ("%s: gem_tint\n", device_xname(&sc->sc_dev)));
1569
1570	/* Unload collision counters ... */
1571	v = bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) +
1572	    bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT);
1573	ifp->if_collisions += v +
1574	    bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) +
1575	    bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT);
1576	ifp->if_oerrors += v;
1577
1578	/* ... then clear the hardware counters. */
1579	bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0);
1580	bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0);
1581	bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0);
1582	bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0);
1583
1584	/*
1585	 * Go through our Tx list and free mbufs for those
1586	 * frames that have been transmitted.
1587	 */
1588	while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1589		/*
1590		 * In theory, we could harvest some descriptors before
1591		 * the ring is empty, but that's a bit complicated.
1592		 *
1593		 * GEM_TX_COMPLETION points to the last descriptor
1594		 * processed +1.
1595		 *
1596		 * Let's assume that the NIC writes back to the Tx
1597		 * descriptors before it updates the completion
1598		 * register.  If the NIC has posted writes to the
1599		 * Tx descriptors, PCI ordering requires that the
1600		 * posted writes flush to RAM before the register-read
1601		 * finishes.  So let's read the completion register,
1602		 * before syncing the descriptors, so that we
1603		 * examine Tx descriptors that are at least as
1604		 * current as the completion register.
1605		 */
1606		txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION);
1607		DPRINTF(sc,
1608			("gem_tint: txs->txs_lastdesc = %d, txlast = %d\n",
1609				txs->txs_lastdesc, txlast));
1610		if (txs->txs_firstdesc <= txs->txs_lastdesc) {
1611			if (txlast >= txs->txs_firstdesc &&
1612			    txlast <= txs->txs_lastdesc)
1613				break;
1614		} else if (txlast >= txs->txs_firstdesc ||
1615			   txlast <= txs->txs_lastdesc)
1616			break;
1617
1618		GEM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndescs,
1619		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1620
1621#ifdef GEM_DEBUG	/* XXX DMA synchronization? */
1622		if (ifp->if_flags & IFF_DEBUG) {
1623			printf("    txsoft %p transmit chain:\n", txs);
1624			gem_txsoft_print(sc, txs->txs_firstdesc,
1625			    txs->txs_lastdesc);
1626		}
1627#endif
1628
1629
1630		DPRINTF(sc, ("gem_tint: releasing a desc\n"));
1631		SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1632
1633		sc->sc_txfree += txs->txs_ndescs;
1634
1635		bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap,
1636		    0, txs->txs_dmamap->dm_mapsize,
1637		    BUS_DMASYNC_POSTWRITE);
1638		bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap);
1639		if (txs->txs_mbuf != NULL) {
1640			m_freem(txs->txs_mbuf);
1641			txs->txs_mbuf = NULL;
1642		}
1643
1644		SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1645
1646		ifp->if_opackets++;
1647		progress = 1;
1648	}
1649
1650#if 0
1651	DPRINTF(sc, ("gem_tint: GEM_TX_STATE_MACHINE %x "
1652		"GEM_TX_DATA_PTR %" PRIx64 "GEM_TX_COMPLETION %" PRIx32 "\n",
1653		bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_TX_STATE_MACHINE),
1654		((uint64_t)bus_space_read_4(sc->sc_bustag, sc->sc_h1,
1655			GEM_TX_DATA_PTR_HI) << 32) |
1656			     bus_space_read_4(sc->sc_bustag, sc->sc_h1,
1657			GEM_TX_DATA_PTR_LO),
1658		bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_TX_COMPLETION)));
1659#endif
1660
1661	if (progress) {
1662		if (sc->sc_txfree == GEM_NTXDESC - 1)
1663			sc->sc_txwin = 0;
1664
1665		/* Freed some descriptors, so reset IFF_OACTIVE and restart. */
1666		ifp->if_flags &= ~IFF_OACTIVE;
1667		sc->sc_if_flags = ifp->if_flags;
1668		ifp->if_timer = SIMPLEQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5;
1669		gem_start(ifp);
1670	}
1671	DPRINTF(sc, ("%s: gem_tint: watchdog %d\n",
1672		device_xname(&sc->sc_dev), ifp->if_timer));
1673
1674	return (1);
1675}
1676
1677/*
1678 * Receive interrupt.
1679 */
1680int
1681gem_rint(sc)
1682	struct gem_softc *sc;
1683{
1684	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1685	bus_space_tag_t t = sc->sc_bustag;
1686	bus_space_handle_t h = sc->sc_h1;
1687	struct gem_rxsoft *rxs;
1688	struct mbuf *m;
1689	u_int64_t rxstat;
1690	u_int32_t rxcomp;
1691	int i, len, progress = 0;
1692
1693	DPRINTF(sc, ("%s: gem_rint\n", device_xname(&sc->sc_dev)));
1694
1695	/*
1696	 * Ignore spurious interrupt that sometimes occurs before
1697	 * we are set up when we network boot.
1698	 */
1699	if (!sc->sc_meminited)
1700		return 1;
1701
1702	/*
1703	 * Read the completion register once.  This limits
1704	 * how long the following loop can execute.
1705	 */
1706	rxcomp = bus_space_read_4(t, h, GEM_RX_COMPLETION);
1707
1708	/*
1709	 * XXX Read the lastrx only once at the top for speed.
1710	 */
1711	DPRINTF(sc, ("gem_rint: sc->rxptr %d, complete %d\n",
1712		sc->sc_rxptr, rxcomp));
1713
1714	/*
1715	 * Go into the loop at least once.
1716	 */
1717	for (i = sc->sc_rxptr; i == sc->sc_rxptr || i != rxcomp;
1718	     i = GEM_NEXTRX(i)) {
1719		rxs = &sc->sc_rxsoft[i];
1720
1721		GEM_CDRXSYNC(sc, i,
1722		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1723
1724		rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags);
1725
1726		if (rxstat & GEM_RD_OWN) {
1727			GEM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1728			/*
1729			 * We have processed all of the receive buffers.
1730			 */
1731			break;
1732		}
1733
1734		progress++;
1735		ifp->if_ipackets++;
1736
1737		if (rxstat & GEM_RD_BAD_CRC) {
1738			ifp->if_ierrors++;
1739			aprint_error_dev(&sc->sc_dev, "receive error: CRC error\n");
1740			GEM_INIT_RXDESC(sc, i);
1741			continue;
1742		}
1743
1744		bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
1745		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1746#ifdef GEM_DEBUG
1747		if (ifp->if_flags & IFF_DEBUG) {
1748			printf("    rxsoft %p descriptor %d: ", rxs, i);
1749			printf("gd_flags: 0x%016llx\t", (long long)
1750				GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags));
1751			printf("gd_addr: 0x%016llx\n", (long long)
1752				GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr));
1753		}
1754#endif
1755
1756		/* No errors; receive the packet. */
1757		len = GEM_RD_BUFLEN(rxstat);
1758
1759		/*
1760		 * Allocate a new mbuf cluster.  If that fails, we are
1761		 * out of memory, and must drop the packet and recycle
1762		 * the buffer that's already attached to this descriptor.
1763		 */
1764		m = rxs->rxs_mbuf;
1765		if (gem_add_rxbuf(sc, i) != 0) {
1766			GEM_COUNTER_INCR(sc, sc_ev_rxnobuf);
1767			ifp->if_ierrors++;
1768			GEM_INIT_RXDESC(sc, i);
1769			bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
1770			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1771			continue;
1772		}
1773		m->m_data += 2; /* We're already off by two */
1774
1775		m->m_pkthdr.rcvif = ifp;
1776		m->m_pkthdr.len = m->m_len = len;
1777
1778#if NBPFILTER > 0
1779		/*
1780		 * Pass this up to any BPF listeners, but only
1781		 * pass it up the stack if it's for us.
1782		 */
1783		if (ifp->if_bpf)
1784			bpf_mtap(ifp->if_bpf, m);
1785#endif /* NBPFILTER > 0 */
1786
1787#ifdef INET
1788		/* hardware checksum */
1789		if (ifp->if_csum_flags_rx & M_CSUM_TCPv4) {
1790			struct ether_header *eh;
1791			struct ip *ip;
1792			int32_t hlen, pktlen;
1793
1794			if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) {
1795				pktlen = m->m_pkthdr.len - ETHER_HDR_LEN -
1796					 ETHER_VLAN_ENCAP_LEN;
1797				eh = (struct ether_header *) (mtod(m, char *) +
1798					ETHER_VLAN_ENCAP_LEN);
1799			} else {
1800				pktlen = m->m_pkthdr.len - ETHER_HDR_LEN;
1801				eh = mtod(m, struct ether_header *);
1802			}
1803			if (ntohs(eh->ether_type) != ETHERTYPE_IP)
1804				goto swcsum;
1805			ip = (struct ip *) ((char *)eh + ETHER_HDR_LEN);
1806
1807			/* IPv4 only */
1808			if (ip->ip_v != IPVERSION)
1809				goto swcsum;
1810
1811			hlen = ip->ip_hl << 2;
1812			if (hlen < sizeof(struct ip))
1813				goto swcsum;
1814
1815			/*
1816			 * bail if too short, has random trailing garbage,
1817			 * truncated, fragment, or has ethernet pad.
1818			 */
1819			if ((ntohs(ip->ip_len) < hlen) ||
1820			    (ntohs(ip->ip_len) != pktlen) ||
1821			    (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK)))
1822				goto swcsum;
1823
1824			switch (ip->ip_p) {
1825			case IPPROTO_TCP:
1826				if (! (ifp->if_csum_flags_rx & M_CSUM_TCPv4))
1827					goto swcsum;
1828				if (pktlen < (hlen + sizeof(struct tcphdr)))
1829					goto swcsum;
1830				m->m_pkthdr.csum_flags = M_CSUM_TCPv4;
1831				break;
1832			case IPPROTO_UDP:
1833				/* FALLTHROUGH */
1834			default:
1835				goto swcsum;
1836			}
1837
1838			/* the uncomplemented sum is expected */
1839			m->m_pkthdr.csum_data = (~rxstat) & GEM_RD_CHECKSUM;
1840
1841			/* if the pkt had ip options, we have to deduct them */
1842			if (hlen > sizeof(struct ip)) {
1843				uint16_t *opts;
1844				uint32_t optsum, temp;
1845
1846				optsum = 0;
1847				temp = hlen - sizeof(struct ip);
1848				opts = (uint16_t *) ((char *) ip +
1849					sizeof(struct ip));
1850
1851				while (temp > 1) {
1852					optsum += ntohs(*opts++);
1853					temp -= 2;
1854				}
1855				while (optsum >> 16)
1856					optsum = (optsum >> 16) +
1857						 (optsum & 0xffff);
1858
1859				/* Deduct ip opts sum from hwsum (rfc 1624). */
1860				m->m_pkthdr.csum_data =
1861					~((~m->m_pkthdr.csum_data) - ~optsum);
1862
1863				while (m->m_pkthdr.csum_data >> 16)
1864					m->m_pkthdr.csum_data =
1865						(m->m_pkthdr.csum_data >> 16) +
1866						(m->m_pkthdr.csum_data &
1867						 0xffff);
1868			}
1869
1870			m->m_pkthdr.csum_flags |= M_CSUM_DATA |
1871						  M_CSUM_NO_PSEUDOHDR;
1872		} else
1873swcsum:
1874			m->m_pkthdr.csum_flags = 0;
1875#endif
1876		/* Pass it on. */
1877		(*ifp->if_input)(ifp, m);
1878	}
1879
1880	if (progress) {
1881		/* Update the receive pointer. */
1882		if (i == sc->sc_rxptr) {
1883			GEM_COUNTER_INCR(sc, sc_ev_rxfull);
1884#ifdef GEM_DEBUG
1885			if (ifp->if_flags & IFF_DEBUG)
1886				printf("%s: rint: ring wrap\n",
1887				    device_xname(&sc->sc_dev));
1888#endif
1889		}
1890		sc->sc_rxptr = i;
1891		bus_space_write_4(t, h, GEM_RX_KICK, GEM_PREVRX(i));
1892	}
1893#ifdef GEM_COUNTERS
1894	if (progress <= 4) {
1895		GEM_COUNTER_INCR(sc, sc_ev_rxhist[progress]);
1896	} else if (progress < 32) {
1897		if (progress < 16)
1898			GEM_COUNTER_INCR(sc, sc_ev_rxhist[5]);
1899		else
1900			GEM_COUNTER_INCR(sc, sc_ev_rxhist[6]);
1901
1902	} else {
1903		if (progress < 64)
1904			GEM_COUNTER_INCR(sc, sc_ev_rxhist[7]);
1905		else
1906			GEM_COUNTER_INCR(sc, sc_ev_rxhist[8]);
1907	}
1908#endif
1909
1910	DPRINTF(sc, ("gem_rint: done sc->rxptr %d, complete %d\n",
1911		sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)));
1912
1913	/* Read error counters ... */
1914	ifp->if_ierrors +=
1915	    bus_space_read_4(t, h, GEM_MAC_RX_LEN_ERR_CNT) +
1916	    bus_space_read_4(t, h, GEM_MAC_RX_ALIGN_ERR) +
1917	    bus_space_read_4(t, h, GEM_MAC_RX_CRC_ERR_CNT) +
1918	    bus_space_read_4(t, h, GEM_MAC_RX_CODE_VIOL);
1919
1920	/* ... then clear the hardware counters. */
1921	bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0);
1922	bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0);
1923	bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0);
1924	bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0);
1925
1926	return (1);
1927}
1928
1929
1930/*
1931 * gem_add_rxbuf:
1932 *
1933 *	Add a receive buffer to the indicated descriptor.
1934 */
1935int
1936gem_add_rxbuf(struct gem_softc *sc, int idx)
1937{
1938	struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
1939	struct mbuf *m;
1940	int error;
1941
1942	MGETHDR(m, M_DONTWAIT, MT_DATA);
1943	if (m == NULL)
1944		return (ENOBUFS);
1945
1946	MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
1947	MCLGET(m, M_DONTWAIT);
1948	if ((m->m_flags & M_EXT) == 0) {
1949		m_freem(m);
1950		return (ENOBUFS);
1951	}
1952
1953#ifdef GEM_DEBUG
1954/* bzero the packet to check DMA */
1955	memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
1956#endif
1957
1958	if (rxs->rxs_mbuf != NULL)
1959		bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
1960
1961	rxs->rxs_mbuf = m;
1962
1963	error = bus_dmamap_load(sc->sc_dmatag, rxs->rxs_dmamap,
1964	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1965	    BUS_DMA_READ|BUS_DMA_NOWAIT);
1966	if (error) {
1967		aprint_error_dev(&sc->sc_dev, "can't load rx DMA map %d, error = %d\n",
1968		    idx, error);
1969		panic("gem_add_rxbuf");	/* XXX */
1970	}
1971
1972	bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
1973	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1974
1975	GEM_INIT_RXDESC(sc, idx);
1976
1977	return (0);
1978}
1979
1980
1981int
1982gem_eint(struct gem_softc *sc, u_int status)
1983{
1984	char bits[128];
1985	u_int32_t r, v;
1986
1987	if ((status & GEM_INTR_MIF) != 0) {
1988		printf("%s: XXXlink status changed\n", device_xname(&sc->sc_dev));
1989		return (1);
1990	}
1991
1992	if ((status & GEM_INTR_RX_TAG_ERR) != 0) {
1993		gem_reset_rxdma(sc);
1994		return (1);
1995	}
1996
1997	if (status & GEM_INTR_BERR) {
1998		if (sc->sc_flags & GEM_PCI)
1999			r = GEM_ERROR_STATUS;
2000		else
2001			r = GEM_SBUS_ERROR_STATUS;
2002		bus_space_read_4(sc->sc_bustag, sc->sc_h2, r);
2003		v = bus_space_read_4(sc->sc_bustag, sc->sc_h2, r);
2004		aprint_error_dev(&sc->sc_dev, "bus error interrupt: 0x%02x\n",
2005		    v);
2006		return (1);
2007	}
2008
2009	printf("%s: status=%s\n", device_xname(&sc->sc_dev),
2010		bitmask_snprintf(status, GEM_INTR_BITS, bits, sizeof(bits)));
2011	return (1);
2012}
2013
2014
2015/*
2016 * PCS interrupts.
2017 * We should receive these when the link status changes, but sometimes
2018 * we don't receive them for link up.  We compensate for this in the
2019 * gem_tick() callout.
2020 */
2021int
2022gem_pint(struct gem_softc *sc)
2023{
2024	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2025	bus_space_tag_t t = sc->sc_bustag;
2026	bus_space_handle_t h = sc->sc_h1;
2027	u_int32_t v, v2;
2028
2029	/*
2030	 * Clear the PCS interrupt from GEM_STATUS.  The PCS register is
2031	 * latched, so we have to read it twice.  There is only one bit in
2032	 * use, so the value is meaningless.
2033	 */
2034	bus_space_read_4(t, h, GEM_MII_INTERRUP_STATUS);
2035	bus_space_read_4(t, h, GEM_MII_INTERRUP_STATUS);
2036
2037	if ((ifp->if_flags & IFF_UP) == 0)
2038		return 1;
2039
2040	if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0)
2041		return 1;
2042
2043	v = bus_space_read_4(t, h, GEM_MII_STATUS);
2044	/* If we see remote fault, our link partner is probably going away */
2045	if ((v & GEM_MII_STATUS_REM_FLT) != 0) {
2046		gem_bitwait(sc, h, GEM_MII_STATUS, GEM_MII_STATUS_REM_FLT, 0);
2047		v = bus_space_read_4(t, h, GEM_MII_STATUS);
2048	/* Otherwise, we may need to wait after auto-negotiation completes */
2049	} else if ((v & (GEM_MII_STATUS_LINK_STS | GEM_MII_STATUS_ANEG_CPT)) ==
2050	    GEM_MII_STATUS_ANEG_CPT) {
2051		gem_bitwait(sc, h, GEM_MII_STATUS, 0, GEM_MII_STATUS_LINK_STS);
2052		v = bus_space_read_4(t, h, GEM_MII_STATUS);
2053	}
2054	if ((v & GEM_MII_STATUS_LINK_STS) != 0) {
2055		if (sc->sc_flags & GEM_LINK) {
2056			return 1;
2057		}
2058		callout_stop(&sc->sc_tick_ch);
2059		v = bus_space_read_4(t, h, GEM_MII_ANAR);
2060		v2 = bus_space_read_4(t, h, GEM_MII_ANLPAR);
2061		sc->sc_mii.mii_media_active = IFM_ETHER | IFM_1000_SX;
2062		sc->sc_mii.mii_media_status = IFM_AVALID | IFM_ACTIVE;
2063		v &= v2;
2064		if (v & GEM_MII_ANEG_FUL_DUPLX) {
2065			sc->sc_mii.mii_media_active |= IFM_FDX;
2066#ifdef GEM_DEBUG
2067			aprint_debug_dev(&sc->sc_dev, "link up: full duplex\n");
2068#endif
2069		} else if (v & GEM_MII_ANEG_HLF_DUPLX) {
2070			sc->sc_mii.mii_media_active |= IFM_HDX;
2071#ifdef GEM_DEBUG
2072			aprint_debug_dev(&sc->sc_dev, "link up: half duplex\n");
2073#endif
2074		} else {
2075#ifdef GEM_DEBUG
2076			aprint_debug_dev(&sc->sc_dev, "duplex mismatch\n");
2077#endif
2078		}
2079		gem_statuschange(sc);
2080	} else {
2081		if ((sc->sc_flags & GEM_LINK) == 0) {
2082			return 1;
2083		}
2084		sc->sc_mii.mii_media_active = IFM_ETHER | IFM_NONE;
2085		sc->sc_mii.mii_media_status = IFM_AVALID;
2086#ifdef GEM_DEBUG
2087			aprint_debug_dev(&sc->sc_dev, "link down\n");
2088#endif
2089		gem_statuschange(sc);
2090
2091		/* Start the 10 second timer */
2092		callout_reset(&sc->sc_tick_ch, hz * 10, gem_tick, sc);
2093	}
2094	return 1;
2095}
2096
2097
2098
2099int
2100gem_intr(v)
2101	void *v;
2102{
2103	struct gem_softc *sc = (struct gem_softc *)v;
2104	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2105	bus_space_tag_t t = sc->sc_bustag;
2106	bus_space_handle_t h = sc->sc_h1;
2107	u_int32_t status;
2108	int r = 0;
2109#ifdef GEM_DEBUG
2110	char bits[128];
2111#endif
2112
2113	/* XXX We should probably mask out interrupts until we're done */
2114
2115	sc->sc_ev_intr.ev_count++;
2116
2117	status = bus_space_read_4(t, h, GEM_STATUS);
2118	DPRINTF(sc, ("%s: gem_intr: cplt 0x%x status %s\n",
2119		device_xname(&sc->sc_dev), (status >> 19),
2120		bitmask_snprintf(status, GEM_INTR_BITS, bits, sizeof(bits))));
2121
2122	if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0)
2123		r |= gem_eint(sc, status);
2124
2125	/* We don't bother with GEM_INTR_TX_DONE */
2126	if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) {
2127		GEM_COUNTER_INCR(sc, sc_ev_txint);
2128		r |= gem_tint(sc);
2129	}
2130
2131	if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) {
2132		GEM_COUNTER_INCR(sc, sc_ev_rxint);
2133		r |= gem_rint(sc);
2134	}
2135
2136	/* We should eventually do more than just print out error stats. */
2137	if (status & GEM_INTR_TX_MAC) {
2138		int txstat = bus_space_read_4(t, h, GEM_MAC_TX_STATUS);
2139		if (txstat & ~GEM_MAC_TX_XMIT_DONE)
2140			printf("%s: MAC tx fault, status %x\n",
2141			    device_xname(&sc->sc_dev), txstat);
2142		if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG))
2143			gem_init(ifp);
2144	}
2145	if (status & GEM_INTR_RX_MAC) {
2146		int rxstat = bus_space_read_4(t, h, GEM_MAC_RX_STATUS);
2147		/*
2148		 * At least with GEM_SUN_GEM and some GEM_SUN_ERI
2149		 * revisions GEM_MAC_RX_OVERFLOW happen often due to a
2150		 * silicon bug so handle them silently. Moreover, it's
2151		 * likely that the receiver has hung so we reset it.
2152		 */
2153		if (rxstat & GEM_MAC_RX_OVERFLOW) {
2154			ifp->if_ierrors++;
2155			gem_reset_rxdma(sc);
2156		} else if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT))
2157			printf("%s: MAC rx fault, status 0x%02x\n",
2158			    device_xname(&sc->sc_dev), rxstat);
2159	}
2160	if (status & GEM_INTR_PCS) {
2161		r |= gem_pint(sc);
2162	}
2163
2164/* Do we need to do anything with these?
2165	if ((status & GEM_MAC_CONTROL_STATUS) != 0) {
2166		status2 = bus_read_4(sc->sc_res[0], GEM_MAC_CONTROL_STATUS);
2167		if ((status2 & GEM_MAC_PAUSED) != 0)
2168			aprintf_debug_dev(&sc->sc_dev, "PAUSE received (%d slots)\n",
2169			    GEM_MAC_PAUSE_TIME(status2));
2170		if ((status2 & GEM_MAC_PAUSE) != 0)
2171			aprintf_debug_dev(&sc->sc_dev, "transited to PAUSE state\n");
2172		if ((status2 & GEM_MAC_RESUME) != 0)
2173			aprintf_debug_dev(&sc->sc_dev, "transited to non-PAUSE state\n");
2174	}
2175	if ((status & GEM_INTR_MIF) != 0)
2176		aprintf_debug_dev(&sc->sc_dev, "MIF interrupt\n");
2177*/
2178#if NRND > 0
2179	rnd_add_uint32(&sc->rnd_source, status);
2180#endif
2181	return (r);
2182}
2183
2184
2185void
2186gem_watchdog(ifp)
2187	struct ifnet *ifp;
2188{
2189	struct gem_softc *sc = ifp->if_softc;
2190
2191	DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x "
2192		"GEM_MAC_RX_CONFIG %x\n",
2193		bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_CONFIG),
2194		bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_STATUS),
2195		bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_CONFIG)));
2196
2197	log(LOG_ERR, "%s: device timeout\n", device_xname(&sc->sc_dev));
2198	++ifp->if_oerrors;
2199
2200	/* Try to get more packets going. */
2201	gem_start(ifp);
2202}
2203
2204/*
2205 * Initialize the MII Management Interface
2206 */
2207void
2208gem_mifinit(sc)
2209	struct gem_softc *sc;
2210{
2211	bus_space_tag_t t = sc->sc_bustag;
2212	bus_space_handle_t mif = sc->sc_h1;
2213
2214	/* Configure the MIF in frame mode */
2215	sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG);
2216	sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA;
2217	bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config);
2218}
2219
2220/*
2221 * MII interface
2222 *
2223 * The GEM MII interface supports at least three different operating modes:
2224 *
2225 * Bitbang mode is implemented using data, clock and output enable registers.
2226 *
2227 * Frame mode is implemented by loading a complete frame into the frame
2228 * register and polling the valid bit for completion.
2229 *
2230 * Polling mode uses the frame register but completion is indicated by
2231 * an interrupt.
2232 *
2233 */
2234static int
2235gem_mii_readreg(self, phy, reg)
2236	struct device *self;
2237	int phy, reg;
2238{
2239	struct gem_softc *sc = (void *)self;
2240	bus_space_tag_t t = sc->sc_bustag;
2241	bus_space_handle_t mif = sc->sc_h1;
2242	int n;
2243	u_int32_t v;
2244
2245#ifdef GEM_DEBUG1
2246	if (sc->sc_debug)
2247		printf("gem_mii_readreg: PHY %d reg %d\n", phy, reg);
2248#endif
2249
2250	/* Construct the frame command */
2251	v = (reg << GEM_MIF_REG_SHIFT)	| (phy << GEM_MIF_PHY_SHIFT) |
2252		GEM_MIF_FRAME_READ;
2253
2254	bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
2255	for (n = 0; n < 100; n++) {
2256		DELAY(1);
2257		v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
2258		if (v & GEM_MIF_FRAME_TA0)
2259			return (v & GEM_MIF_FRAME_DATA);
2260	}
2261
2262	printf("%s: mii_read timeout\n", device_xname(&sc->sc_dev));
2263	return (0);
2264}
2265
2266static void
2267gem_mii_writereg(self, phy, reg, val)
2268	struct device *self;
2269	int phy, reg, val;
2270{
2271	struct gem_softc *sc = (void *)self;
2272	bus_space_tag_t t = sc->sc_bustag;
2273	bus_space_handle_t mif = sc->sc_h1;
2274	int n;
2275	u_int32_t v;
2276
2277#ifdef GEM_DEBUG1
2278	if (sc->sc_debug)
2279		printf("gem_mii_writereg: PHY %d reg %d val %x\n",
2280			phy, reg, val);
2281#endif
2282
2283	/* Construct the frame command */
2284	v = GEM_MIF_FRAME_WRITE			|
2285	    (phy << GEM_MIF_PHY_SHIFT)		|
2286	    (reg << GEM_MIF_REG_SHIFT)		|
2287	    (val & GEM_MIF_FRAME_DATA);
2288
2289	bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
2290	for (n = 0; n < 100; n++) {
2291		DELAY(1);
2292		v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
2293		if (v & GEM_MIF_FRAME_TA0)
2294			return;
2295	}
2296
2297	printf("%s: mii_write timeout\n", device_xname(&sc->sc_dev));
2298}
2299
2300static void
2301gem_mii_statchg(dev)
2302	struct device *dev;
2303{
2304	struct gem_softc *sc = (void *)dev;
2305#ifdef GEM_DEBUG
2306	int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
2307#endif
2308
2309#ifdef GEM_DEBUG
2310	if (sc->sc_debug)
2311		printf("gem_mii_statchg: status change: phy = %d\n",
2312			sc->sc_phys[instance]);
2313#endif
2314	gem_statuschange(sc);
2315}
2316
2317/*
2318 * Common status change for gem_mii_statchg() and gem_pint()
2319 */
2320void
2321gem_statuschange(struct gem_softc* sc)
2322{
2323	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2324	bus_space_tag_t t = sc->sc_bustag;
2325	bus_space_handle_t mac = sc->sc_h1;
2326	int gigabit;
2327	u_int32_t rxcfg, txcfg, v;
2328
2329	if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0 &&
2330	    IFM_SUBTYPE(sc->sc_mii.mii_media_active) != IFM_NONE)
2331		sc->sc_flags |= GEM_LINK;
2332	else
2333		sc->sc_flags &= ~GEM_LINK;
2334
2335	if (sc->sc_ethercom.ec_if.if_baudrate == IF_Mbps(1000))
2336		gigabit = 1;
2337	else
2338		gigabit = 0;
2339
2340	/*
2341	 * The configuration done here corresponds to the steps F) and
2342	 * G) and as far as enabling of RX and TX MAC goes also step H)
2343	 * of the initialization sequence outlined in section 3.2.1 of
2344	 * the GEM Gigabit Ethernet ASIC Specification.
2345	 */
2346
2347	rxcfg = bus_space_read_4(t, mac, GEM_MAC_RX_CONFIG);
2348	rxcfg &= ~(GEM_MAC_RX_CARR_EXTEND | GEM_MAC_RX_ENABLE);
2349	txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT;
2350	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
2351		txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS;
2352	else if (gigabit) {
2353		rxcfg |= GEM_MAC_RX_CARR_EXTEND;
2354		txcfg |= GEM_MAC_RX_CARR_EXTEND;
2355	}
2356	bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0);
2357	bus_space_barrier(t, mac, GEM_MAC_TX_CONFIG, 4,
2358	    BUS_SPACE_BARRIER_WRITE);
2359	if (!gem_bitwait(sc, mac, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0))
2360		aprint_normal_dev(&sc->sc_dev, "cannot disable TX MAC\n");
2361	bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, txcfg);
2362	bus_space_write_4(t, mac, GEM_MAC_RX_CONFIG, 0);
2363	bus_space_barrier(t, mac, GEM_MAC_RX_CONFIG, 4,
2364	    BUS_SPACE_BARRIER_WRITE);
2365	if (!gem_bitwait(sc, mac, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0))
2366		aprint_normal_dev(&sc->sc_dev, "cannot disable RX MAC\n");
2367	bus_space_write_4(t, mac, GEM_MAC_RX_CONFIG, rxcfg);
2368
2369	v = bus_space_read_4(t, mac, GEM_MAC_CONTROL_CONFIG) &
2370	    ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE);
2371	bus_space_write_4(t, mac, GEM_MAC_CONTROL_CONFIG, v);
2372
2373	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) == 0 &&
2374	    gigabit != 0)
2375		bus_space_write_4(t, mac, GEM_MAC_SLOT_TIME,
2376		    GEM_MAC_SLOT_TIME_CARR_EXTEND);
2377	else
2378		bus_space_write_4(t, mac, GEM_MAC_SLOT_TIME,
2379		    GEM_MAC_SLOT_TIME_NORMAL);
2380
2381	/* XIF Configuration */
2382	if (sc->sc_flags & GEM_LINK)
2383		v = GEM_MAC_XIF_LINK_LED;
2384	else
2385		v = 0;
2386	v |= GEM_MAC_XIF_TX_MII_ENA;
2387
2388	/* If an external transceiver is connected, enable its MII drivers */
2389	sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG);
2390	if ((sc->sc_flags &(GEM_SERDES | GEM_SERIAL)) == 0) {
2391		if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) {
2392			/* External MII needs echo disable if half duplex. */
2393			if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) &
2394			    IFM_FDX) != 0)
2395				/* turn on full duplex LED */
2396				v |= GEM_MAC_XIF_FDPLX_LED;
2397			else
2398				/* half duplex -- disable echo */
2399				v |= GEM_MAC_XIF_ECHO_DISABL;
2400			if (gigabit)
2401				v |= GEM_MAC_XIF_GMII_MODE;
2402			else
2403				v &= ~GEM_MAC_XIF_GMII_MODE;
2404		} else
2405			/* Internal MII needs buf enable */
2406			v |= GEM_MAC_XIF_MII_BUF_ENA;
2407	} else {
2408		if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
2409			v |= GEM_MAC_XIF_FDPLX_LED;
2410		v |= GEM_MAC_XIF_GMII_MODE;
2411	}
2412	bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v);
2413
2414	if ((ifp->if_flags & IFF_RUNNING) != 0 &&
2415	    (sc->sc_flags & GEM_LINK) != 0) {
2416		bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG,
2417		    txcfg | GEM_MAC_TX_ENABLE);
2418		bus_space_write_4(t, mac, GEM_MAC_RX_CONFIG,
2419		    rxcfg | GEM_MAC_RX_ENABLE);
2420	}
2421}
2422
2423int
2424gem_ser_mediachange(struct ifnet *ifp)
2425{
2426	struct gem_softc *sc = ifp->if_softc;
2427	u_int s, t;
2428
2429	if (IFM_TYPE(sc->sc_mii.mii_media.ifm_media) != IFM_ETHER)
2430		return EINVAL;
2431
2432	s = IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media);
2433	if (s == IFM_AUTO) {
2434		if (sc->sc_mii_media != s) {
2435#ifdef GEM_DEBUG
2436			aprint_debug_dev(&sc->sc_dev, "setting media to auto\n");
2437#endif
2438			sc->sc_mii_media = s;
2439			if (ifp->if_flags & IFF_UP) {
2440				gem_pcs_stop(sc, 0);
2441				gem_pcs_start(sc);
2442			}
2443		}
2444		return 0;
2445	}
2446	if (s == IFM_1000_SX) {
2447		t = IFM_OPTIONS(sc->sc_mii.mii_media.ifm_media);
2448		if (t == IFM_FDX || t == IFM_HDX) {
2449			if (sc->sc_mii_media != t) {
2450				sc->sc_mii_media = t;
2451#ifdef GEM_DEBUG
2452				aprint_debug_dev(&sc->sc_dev,
2453				    "setting media to 1000baseSX-%s\n",
2454				    t == IFM_FDX ? "FDX" : "HDX");
2455#endif
2456				if (ifp->if_flags & IFF_UP) {
2457					gem_pcs_stop(sc, 0);
2458					gem_pcs_start(sc);
2459				}
2460			}
2461			return 0;
2462		}
2463	}
2464	return EINVAL;
2465}
2466
2467void
2468gem_ser_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2469{
2470	struct gem_softc *sc = ifp->if_softc;
2471
2472	if ((ifp->if_flags & IFF_UP) == 0)
2473		return;
2474	ifmr->ifm_active = sc->sc_mii.mii_media_active;
2475	ifmr->ifm_status = sc->sc_mii.mii_media_status;
2476}
2477
2478static int
2479gem_ifflags_cb(struct ethercom *ec)
2480{
2481	struct ifnet *ifp = &ec->ec_if;
2482	struct gem_softc *sc = ifp->if_softc;
2483	int change = ifp->if_flags ^ sc->sc_if_flags;
2484
2485	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
2486		return ENETRESET;
2487	else if ((change & IFF_PROMISC) != 0)
2488		gem_setladrf(sc);
2489	return 0;
2490}
2491
2492/*
2493 * Process an ioctl request.
2494 */
2495int
2496gem_ioctl(struct ifnet *ifp, unsigned long cmd, void *data)
2497{
2498	struct gem_softc *sc = ifp->if_softc;
2499	int s, error = 0;
2500
2501	s = splnet();
2502
2503	if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
2504		error = 0;
2505		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2506			;
2507		else if (ifp->if_flags & IFF_RUNNING) {
2508			/*
2509			 * Multicast list has changed; set the hardware filter
2510			 * accordingly.
2511			 */
2512			gem_setladrf(sc);
2513		}
2514	}
2515
2516	/* Try to get things going again */
2517	if (ifp->if_flags & IFF_UP)
2518		gem_start(ifp);
2519	splx(s);
2520	return (error);
2521}
2522
2523
2524void
2525gem_shutdown(arg)
2526	void *arg;
2527{
2528	struct gem_softc *sc = (struct gem_softc *)arg;
2529	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2530
2531	gem_stop(ifp, 1);
2532}
2533
2534/*
2535 * Set up the logical address filter.
2536 */
2537void
2538gem_setladrf(sc)
2539	struct gem_softc *sc;
2540{
2541	struct ethercom *ec = &sc->sc_ethercom;
2542	struct ifnet *ifp = &ec->ec_if;
2543	struct ether_multi *enm;
2544	struct ether_multistep step;
2545	bus_space_tag_t t = sc->sc_bustag;
2546	bus_space_handle_t h = sc->sc_h1;
2547	u_int32_t crc;
2548	u_int32_t hash[16];
2549	u_int32_t v;
2550	int i;
2551
2552	/* Get current RX configuration */
2553	v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
2554
2555	/*
2556	 * Turn off promiscuous mode, promiscuous group mode (all multicast),
2557	 * and hash filter.  Depending on the case, the right bit will be
2558	 * enabled.
2559	 */
2560	v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER|
2561	    GEM_MAC_RX_PROMISC_GRP);
2562
2563	if ((ifp->if_flags & IFF_PROMISC) != 0) {
2564		/* Turn on promiscuous mode */
2565		v |= GEM_MAC_RX_PROMISCUOUS;
2566		ifp->if_flags |= IFF_ALLMULTI;
2567		goto chipit;
2568	}
2569
2570	/*
2571	 * Set up multicast address filter by passing all multicast addresses
2572	 * through a crc generator, and then using the high order 8 bits as an
2573	 * index into the 256 bit logical address filter.  The high order 4
2574	 * bits selects the word, while the other 4 bits select the bit within
2575	 * the word (where bit 0 is the MSB).
2576	 */
2577
2578	/* Clear hash table */
2579	memset(hash, 0, sizeof(hash));
2580
2581	ETHER_FIRST_MULTI(step, ec, enm);
2582	while (enm != NULL) {
2583		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2584			/*
2585			 * We must listen to a range of multicast addresses.
2586			 * For now, just accept all multicasts, rather than
2587			 * trying to set only those filter bits needed to match
2588			 * the range.  (At this time, the only use of address
2589			 * ranges is for IP multicast routing, for which the
2590			 * range is big enough to require all bits set.)
2591			 * XXX should use the address filters for this
2592			 */
2593			ifp->if_flags |= IFF_ALLMULTI;
2594			v |= GEM_MAC_RX_PROMISC_GRP;
2595			goto chipit;
2596		}
2597
2598		/* Get the LE CRC32 of the address */
2599		crc = ether_crc32_le(enm->enm_addrlo, sizeof(enm->enm_addrlo));
2600
2601		/* Just want the 8 most significant bits. */
2602		crc >>= 24;
2603
2604		/* Set the corresponding bit in the filter. */
2605		hash[crc >> 4] |= 1 << (15 - (crc & 15));
2606
2607		ETHER_NEXT_MULTI(step, enm);
2608	}
2609
2610	v |= GEM_MAC_RX_HASH_FILTER;
2611	ifp->if_flags &= ~IFF_ALLMULTI;
2612
2613	/* Now load the hash table into the chip (if we are using it) */
2614	for (i = 0; i < 16; i++) {
2615		bus_space_write_4(t, h,
2616		    GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0),
2617		    hash[i]);
2618	}
2619
2620chipit:
2621	sc->sc_if_flags = ifp->if_flags;
2622	bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v);
2623}
2624
2625#if notyet
2626
2627/*
2628 * gem_power:
2629 *
2630 *	Power management (suspend/resume) hook.
2631 */
2632void
2633gem_power(why, arg)
2634	int why;
2635	void *arg;
2636{
2637	struct gem_softc *sc = arg;
2638	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2639	int s;
2640
2641	s = splnet();
2642	switch (why) {
2643	case PWR_SUSPEND:
2644	case PWR_STANDBY:
2645		gem_stop(ifp, 1);
2646		if (sc->sc_power != NULL)
2647			(*sc->sc_power)(sc, why);
2648		break;
2649	case PWR_RESUME:
2650		if (ifp->if_flags & IFF_UP) {
2651			if (sc->sc_power != NULL)
2652				(*sc->sc_power)(sc, why);
2653			gem_init(ifp);
2654		}
2655		break;
2656	case PWR_SOFTSUSPEND:
2657	case PWR_SOFTSTANDBY:
2658	case PWR_SOFTRESUME:
2659		break;
2660	}
2661	splx(s);
2662}
2663#endif
2664