gemvar.h revision 1.4 1 /* $NetBSD: gemvar.h,v 1.4 2001/10/18 15:09:15 thorpej Exp $ */
2
3 /*
4 *
5 * Copyright (C) 2001 Eduardo Horvath.
6 * All rights reserved.
7 *
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32 #ifndef _IF_GEMVAR_H
33 #define _IF_GEMVAR_H
34
35
36 #include "rnd.h"
37
38 #include <sys/queue.h>
39 #include <sys/callout.h>
40
41 #if NRND > 0
42 #include <sys/rnd.h>
43 #endif
44
45 /*
46 * Misc. definitions for the Sun ``Gem'' Ethernet controller family driver.
47 */
48
49 /*
50 * Transmit descriptor list size. This is arbitrary, but allocate
51 * enough descriptors for 64 pending transmissions and 16 segments
52 * per packet.
53 */
54 #define GEM_NTXSEGS 16
55
56 #define GEM_TXQUEUELEN 64
57 #define GEM_NTXDESC (GEM_TXQUEUELEN * GEM_NTXSEGS)
58 #define GEM_NTXDESC_MASK (GEM_NTXDESC - 1)
59 #define GEM_NEXTTX(x) ((x + 1) & GEM_NTXDESC_MASK)
60
61 /*
62 * Receive descriptor list size. We have one Rx buffer per incoming
63 * packet, so this logic is a little simpler.
64 */
65 #define GEM_NRXDESC 128
66 #define GEM_NRXDESC_MASK (GEM_NRXDESC - 1)
67 #define GEM_NEXTRX(x) ((x + 1) & GEM_NRXDESC_MASK)
68
69 /*
70 * Control structures are DMA'd to the GEM chip. We allocate them in
71 * a single clump that maps to a single DMA segment to make several things
72 * easier.
73 */
74 struct gem_control_data {
75 /*
76 * The transmit descriptors.
77 */
78 struct gem_desc gcd_txdescs[GEM_NTXDESC];
79
80 /*
81 * The receive descriptors.
82 */
83 struct gem_desc gcd_rxdescs[GEM_NRXDESC];
84 };
85
86 #define GEM_CDOFF(x) offsetof(struct gem_control_data, x)
87 #define GEM_CDTXOFF(x) GEM_CDOFF(gcd_txdescs[(x)])
88 #define GEM_CDRXOFF(x) GEM_CDOFF(gcd_rxdescs[(x)])
89
90 /*
91 * Software state for transmit jobs.
92 */
93 struct gem_txsoft {
94 struct mbuf *txs_mbuf; /* head of our mbuf chain */
95 bus_dmamap_t txs_dmamap; /* our DMA map */
96 int txs_firstdesc; /* first descriptor in packet */
97 int txs_lastdesc; /* last descriptor in packet */
98 int txs_ndescs; /* number of descriptors */
99 SIMPLEQ_ENTRY(gem_txsoft) txs_q;
100 };
101
102 SIMPLEQ_HEAD(gem_txsq, gem_txsoft);
103
104 /*
105 * Software state for receive jobs.
106 */
107 struct gem_rxsoft {
108 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
109 bus_dmamap_t rxs_dmamap; /* our DMA map */
110 };
111
112
113 /*
114 * Table which describes the transmit threshold mode. We generally
115 * start at index 0. Whenever we get a transmit underrun, we increment
116 * our index, falling back if we encounter the NULL terminator.
117 */
118 struct gem_txthresh_tab {
119 u_int32_t txth_opmode; /* OPMODE bits */
120 const char *txth_name; /* name of mode */
121 };
122
123 /*
124 * Some misc. statics, useful for debugging.
125 */
126 struct gem_stats {
127 u_long ts_tx_uf; /* transmit underflow errors */
128 u_long ts_tx_to; /* transmit jabber timeouts */
129 u_long ts_tx_ec; /* excessve collision count */
130 u_long ts_tx_lc; /* late collision count */
131 };
132
133 /*
134 * Software state per device.
135 */
136 struct gem_softc {
137 struct device sc_dev; /* generic device information */
138 struct ethercom sc_ethercom; /* ethernet common data */
139 struct mii_data sc_mii; /* MII media control */
140 #define sc_media sc_mii.mii_media/* shorthand */
141 struct callout sc_tick_ch; /* tick callout */
142
143 /* The following bus handles are to be provided by the bus front-end */
144 bus_space_tag_t sc_bustag; /* bus tag */
145 bus_dma_tag_t sc_dmatag; /* bus dma tag */
146 bus_dmamap_t sc_dmamap; /* bus dma handle */
147 bus_space_handle_t sc_h; /* bus space handle for all regs */
148 #if 0
149 /* The following may be needed for SBus */
150 bus_space_handle_t sc_seb; /* HME Global registers */
151 bus_space_handle_t sc_erx; /* HME ERX registers */
152 bus_space_handle_t sc_etx; /* HME ETX registers */
153 bus_space_handle_t sc_mac; /* HME MAC registers */
154 bus_space_handle_t sc_mif; /* HME MIF registers */
155 #endif
156 int sc_phys[2]; /* MII instance -> PHY map */
157
158 int sc_mif_config; /* Selected MII reg setting */
159
160 int sc_pci; /* XXXXX -- PCI buses are LE. */
161
162 void *sc_sdhook; /* shutdown hook */
163 void *sc_powerhook; /* power management hook */
164
165 struct gem_stats sc_stats; /* debugging stats */
166
167 /*
168 * Ring buffer DMA stuff.
169 */
170 bus_dma_segment_t sc_cdseg; /* control data memory */
171 int sc_cdnseg; /* number of segments */
172 bus_dmamap_t sc_cddmamap; /* control data DMA map */
173 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
174
175 /*
176 * Software state for transmit and receive descriptors.
177 */
178 struct gem_txsoft sc_txsoft[GEM_TXQUEUELEN];
179 struct gem_rxsoft sc_rxsoft[GEM_NRXDESC];
180
181 /*
182 * Control data structures.
183 */
184 struct gem_control_data *sc_control_data;
185 #define sc_txdescs sc_control_data->gcd_txdescs
186 #define sc_rxdescs sc_control_data->gcd_rxdescs
187
188 int sc_txfree; /* number of free Tx descriptors */
189 int sc_txnext; /* next ready Tx descriptor */
190
191 u_int32_t sc_tdctl_ch; /* conditional desc chaining */
192 u_int32_t sc_tdctl_er; /* conditional desc end-of-ring */
193
194 u_int32_t sc_setup_fsls; /* FS|LS on setup descriptor */
195
196 struct gem_txsq sc_txfreeq; /* free Tx descsofts */
197 struct gem_txsq sc_txdirtyq; /* dirty Tx descsofts */
198
199 int sc_rxptr; /* next ready RX descriptor/descsoft */
200
201 /* ========== */
202 int sc_inited;
203 int sc_debug;
204 void *sc_sh; /* shutdownhook cookie */
205
206 /* Special hardware hooks */
207 void (*sc_hwreset) __P((struct gem_softc *));
208 void (*sc_hwinit) __P((struct gem_softc *));
209
210 #if NRND > 0
211 rndsource_element_t rnd_source;
212 #endif
213 };
214
215
216 #define GEM_DMA_READ(sc, v) (((sc)->sc_pci) ? le64toh(v) : be64toh(v))
217 #define GEM_DMA_WRITE(sc, v) (((sc)->sc_pci) ? htole64(v) : htobe64(v))
218
219 /*
220 * This macro returns the current media entry for *non-MII* media.
221 */
222 #define GEM_CURRENT_MEDIA(sc) \
223 (IFM_SUBTYPE((sc)->sc_mii.mii_media.ifm_cur->ifm_media) != IFM_AUTO ? \
224 (sc)->sc_mii.mii_media.ifm_cur : (sc)->sc_nway_active)
225
226 /*
227 * This macro determines if a change to media-related OPMODE bits requires
228 * a chip reset.
229 */
230 #define GEM_MEDIA_NEEDSRESET(sc, newbits) \
231 (((sc)->sc_opmode & OPMODE_MEDIA_BITS) != \
232 ((newbits) & OPMODE_MEDIA_BITS))
233
234 #define GEM_CDTXADDR(sc, x) ((sc)->sc_cddma + GEM_CDTXOFF((x)))
235 #define GEM_CDRXADDR(sc, x) ((sc)->sc_cddma + GEM_CDRXOFF((x)))
236
237 #define GEM_CDSPADDR(sc) ((sc)->sc_cddma + GEM_CDSPOFF)
238
239 #define GEM_CDTXSYNC(sc, x, n, ops) \
240 do { \
241 int __x, __n; \
242 \
243 __x = (x); \
244 __n = (n); \
245 \
246 /* If it will wrap around, sync to the end of the ring. */ \
247 if ((__x + __n) > GEM_NTXDESC) { \
248 bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, \
249 GEM_CDTXOFF(__x), sizeof(struct gem_desc) * \
250 (GEM_NTXDESC - __x), (ops)); \
251 __n -= (GEM_NTXDESC - __x); \
252 __x = 0; \
253 } \
254 \
255 /* Now sync whatever is left. */ \
256 bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, \
257 GEM_CDTXOFF(__x), sizeof(struct gem_desc) * __n, (ops)); \
258 } while (0)
259
260 #define GEM_CDRXSYNC(sc, x, ops) \
261 bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, \
262 GEM_CDRXOFF((x)), sizeof(struct gem_desc), (ops))
263
264 #define GEM_CDSPSYNC(sc, ops) \
265 bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, \
266 GEM_CDSPOFF, GEM_SETUP_PACKET_LEN, (ops))
267
268 #define GEM_INIT_RXDESC(sc, x) \
269 do { \
270 struct gem_rxsoft *__rxs = &sc->sc_rxsoft[(x)]; \
271 struct gem_desc *__rxd = &sc->sc_rxdescs[(x)]; \
272 struct mbuf *__m = __rxs->rxs_mbuf; \
273 \
274 __m->m_data = __m->m_ext.ext_buf; \
275 __rxd->gd_addr = \
276 GEM_DMA_WRITE((sc), __rxs->rxs_dmamap->dm_segs[0].ds_addr); \
277 __rxd->gd_flags = \
278 GEM_DMA_WRITE((sc), \
279 (((__m->m_ext.ext_size)<<GEM_RD_BUFSHIFT) \
280 & GEM_RD_BUFSIZE) | GEM_RD_OWN); \
281 GEM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
282 } while (0)
283
284 #ifdef _KERNEL
285 void gem_attach __P((struct gem_softc *, const uint8_t *));
286 int gem_intr __P((void *));
287
288 int gem_mediachange __P((struct ifnet *));
289 void gem_mediastatus __P((struct ifnet *, struct ifmediareq *));
290
291 void gem_reset __P((struct gem_softc *));
292 #endif /* _KERNEL */
293
294
295 #endif
296