gemvar.h revision 1.6 1 /* $NetBSD: gemvar.h,v 1.6 2002/05/08 02:12:55 matt Exp $ */
2
3 /*
4 *
5 * Copyright (C) 2001 Eduardo Horvath.
6 * All rights reserved.
7 *
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32 #ifndef _IF_GEMVAR_H
33 #define _IF_GEMVAR_H
34
35
36 #include "rnd.h"
37
38 #include <sys/queue.h>
39 #include <sys/callout.h>
40
41 #if NRND > 0
42 #include <sys/rnd.h>
43 #endif
44
45 /*
46 * Misc. definitions for the Sun ``Gem'' Ethernet controller family driver.
47 */
48
49 /*
50 * Transmit descriptor list size. This is arbitrary, but allocate
51 * enough descriptors for 64 pending transmissions and 16 segments
52 * per packet.
53 */
54 #define GEM_NTXSEGS 16
55
56 #define GEM_TXQUEUELEN 64
57 #define GEM_NTXDESC (GEM_TXQUEUELEN * GEM_NTXSEGS)
58 #define GEM_NTXDESC_MASK (GEM_NTXDESC - 1)
59 #define GEM_NEXTTX(x) ((x + 1) & GEM_NTXDESC_MASK)
60
61 /*
62 * Receive descriptor list size. We have one Rx buffer per incoming
63 * packet, so this logic is a little simpler.
64 */
65 #define GEM_NRXDESC 128
66 #define GEM_NRXDESC_MASK (GEM_NRXDESC - 1)
67 #define GEM_NEXTRX(x) ((x + 1) & GEM_NRXDESC_MASK)
68
69 /*
70 * Control structures are DMA'd to the GEM chip. We allocate them in
71 * a single clump that maps to a single DMA segment to make several things
72 * easier.
73 */
74 struct gem_control_data {
75 /*
76 * The transmit descriptors.
77 */
78 struct gem_desc gcd_txdescs[GEM_NTXDESC];
79
80 /*
81 * The receive descriptors.
82 */
83 struct gem_desc gcd_rxdescs[GEM_NRXDESC];
84 };
85
86 #define GEM_CDOFF(x) offsetof(struct gem_control_data, x)
87 #define GEM_CDTXOFF(x) GEM_CDOFF(gcd_txdescs[(x)])
88 #define GEM_CDRXOFF(x) GEM_CDOFF(gcd_rxdescs[(x)])
89
90 /*
91 * Software state for transmit jobs.
92 */
93 struct gem_txsoft {
94 struct mbuf *txs_mbuf; /* head of our mbuf chain */
95 bus_dmamap_t txs_dmamap; /* our DMA map */
96 int txs_firstdesc; /* first descriptor in packet */
97 int txs_lastdesc; /* last descriptor in packet */
98 int txs_ndescs; /* number of descriptors */
99 SIMPLEQ_ENTRY(gem_txsoft) txs_q;
100 };
101
102 SIMPLEQ_HEAD(gem_txsq, gem_txsoft);
103
104 /*
105 * Software state for receive jobs.
106 */
107 struct gem_rxsoft {
108 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
109 bus_dmamap_t rxs_dmamap; /* our DMA map */
110 };
111
112 /*
113 * Software state per device.
114 */
115 struct gem_softc {
116 struct device sc_dev; /* generic device information */
117 struct ethercom sc_ethercom; /* ethernet common data */
118 struct mii_data sc_mii; /* MII media control */
119 #define sc_media sc_mii.mii_media/* shorthand */
120 struct callout sc_tick_ch; /* tick callout */
121
122 /* The following bus handles are to be provided by the bus front-end */
123 bus_space_tag_t sc_bustag; /* bus tag */
124 bus_dma_tag_t sc_dmatag; /* bus dma tag */
125 bus_dmamap_t sc_dmamap; /* bus dma handle */
126 bus_space_handle_t sc_h; /* bus space handle for all regs */
127
128 int sc_phys[2]; /* MII instance -> PHY map */
129
130 int sc_mif_config; /* Selected MII reg setting */
131
132 int sc_pci; /* XXXXX -- PCI buses are LE. */
133
134 void *sc_sdhook; /* shutdown hook */
135 void *sc_powerhook; /* power management hook */
136
137 /*
138 * Ring buffer DMA stuff.
139 */
140 bus_dma_segment_t sc_cdseg; /* control data memory */
141 int sc_cdnseg; /* number of segments */
142 bus_dmamap_t sc_cddmamap; /* control data DMA map */
143 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
144
145 /*
146 * Software state for transmit and receive descriptors.
147 */
148 struct gem_txsoft sc_txsoft[GEM_TXQUEUELEN];
149 struct gem_rxsoft sc_rxsoft[GEM_NRXDESC];
150
151 /*
152 * Control data structures.
153 */
154 struct gem_control_data *sc_control_data;
155 #define sc_txdescs sc_control_data->gcd_txdescs
156 #define sc_rxdescs sc_control_data->gcd_rxdescs
157
158 int sc_txfree; /* number of free Tx descriptors */
159 int sc_txnext; /* next ready Tx descriptor */
160 int sc_txwin; /* Tx descriptors since last Tx int */
161
162 struct gem_txsq sc_txfreeq; /* free Tx descsofts */
163 struct gem_txsq sc_txdirtyq; /* dirty Tx descsofts */
164
165 int sc_rxptr; /* next ready RX descriptor/descsoft */
166
167 /* ========== */
168 int sc_inited;
169 int sc_debug;
170 void *sc_sh; /* shutdownhook cookie */
171
172 /* Special hardware hooks */
173 void (*sc_hwreset) __P((struct gem_softc *));
174 void (*sc_hwinit) __P((struct gem_softc *));
175
176 #if NRND > 0
177 rndsource_element_t rnd_source;
178 #endif
179 };
180
181
182 #define GEM_DMA_READ(sc, v) (((sc)->sc_pci) ? le64toh(v) : be64toh(v))
183 #define GEM_DMA_WRITE(sc, v) (((sc)->sc_pci) ? htole64(v) : htobe64(v))
184
185 #define GEM_CDTXADDR(sc, x) ((sc)->sc_cddma + GEM_CDTXOFF((x)))
186 #define GEM_CDRXADDR(sc, x) ((sc)->sc_cddma + GEM_CDRXOFF((x)))
187
188 #define GEM_CDSPADDR(sc) ((sc)->sc_cddma + GEM_CDSPOFF)
189
190 #define GEM_CDTXSYNC(sc, x, n, ops) \
191 do { \
192 int __x, __n; \
193 \
194 __x = (x); \
195 __n = (n); \
196 \
197 /* If it will wrap around, sync to the end of the ring. */ \
198 if ((__x + __n) > GEM_NTXDESC) { \
199 bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, \
200 GEM_CDTXOFF(__x), sizeof(struct gem_desc) * \
201 (GEM_NTXDESC - __x), (ops)); \
202 __n -= (GEM_NTXDESC - __x); \
203 __x = 0; \
204 } \
205 \
206 /* Now sync whatever is left. */ \
207 bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, \
208 GEM_CDTXOFF(__x), sizeof(struct gem_desc) * __n, (ops)); \
209 } while (0)
210
211 #define GEM_CDRXSYNC(sc, x, ops) \
212 bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, \
213 GEM_CDRXOFF((x)), sizeof(struct gem_desc), (ops))
214
215 #define GEM_CDSPSYNC(sc, ops) \
216 bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, \
217 GEM_CDSPOFF, GEM_SETUP_PACKET_LEN, (ops))
218
219 #define GEM_INIT_RXDESC(sc, x) \
220 do { \
221 struct gem_rxsoft *__rxs = &sc->sc_rxsoft[(x)]; \
222 struct gem_desc *__rxd = &sc->sc_rxdescs[(x)]; \
223 struct mbuf *__m = __rxs->rxs_mbuf; \
224 \
225 __m->m_data = __m->m_ext.ext_buf; \
226 __rxd->gd_addr = \
227 GEM_DMA_WRITE((sc), __rxs->rxs_dmamap->dm_segs[0].ds_addr); \
228 __rxd->gd_flags = \
229 GEM_DMA_WRITE((sc), \
230 (((__m->m_ext.ext_size)<<GEM_RD_BUFSHIFT) \
231 & GEM_RD_BUFSIZE) | GEM_RD_OWN); \
232 GEM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
233 } while (0)
234
235 #ifdef _KERNEL
236 void gem_attach __P((struct gem_softc *, const uint8_t *));
237 int gem_intr __P((void *));
238
239 void gem_reset __P((struct gem_softc *));
240 #endif /* _KERNEL */
241
242
243 #endif
244