if_gfevar.h revision 1.10 1 /* $NetBSD: if_gfevar.h,v 1.10 2010/04/28 13:51:56 kiyohara Exp $ */
2
3 /*
4 * Copyright (c) 2002 Allegro Networks, Inc., Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed for the NetBSD Project by
18 * Allegro Networks, Inc., and Wasabi Systems, Inc.
19 * 4. The name of Allegro Networks, Inc. may not be used to endorse
20 * or promote products derived from this software without specific prior
21 * written permission.
22 * 5. The name of Wasabi Systems, Inc. may not be used to endorse
23 * or promote products derived from this software without specific prior
24 * written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY ALLEGRO NETWORKS, INC. AND
27 * WASABI SYSTEMS, INC. ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
28 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
29 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL EITHER ALLEGRO NETWORKS, INC. OR WASABI SYSTEMS, INC.
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39 #ifndef _IF_GFEVAR_H_
40 #define _IF_GFEVAR_H_
41
42 #define GE_RXDESC_MEMSIZE (1 * PAGE_SIZE)
43 #define GE_RXDESC_MAX 64
44 #define GE_RXBUF_SIZE 2048
45 #define GE_RXBUF_MEMSIZE (GE_RXDESC_MAX*GE_RXBUF_SIZE)
46 #define GE_RXBUF_NSEGS ((GE_RXBUF_MEMSIZE/PAGE_SIZE)+1)
47 #define GE_DMSEG_MAX (GE_RXBUF_NSEGS)
48
49 struct gfe_dmamem {
50 bus_dmamap_t gdm_map; /* dmamem'ed memory */
51 void *gdm_kva; /* kva of tx memory */
52 int gdm_nsegs; /* # of segment in gdm_segs */
53 int gdm_maxsegs; /* maximum # of segments allowed */
54 size_t gdm_size; /* size of memory region */
55 bus_dma_segment_t gdm_segs[GE_DMSEG_MAX]; /* dma segment of tx memory */
56 };
57
58 /* With a 4096 page size, we get 256 descriptors per page.
59 */
60 #define GE_TXDESC_MEMSIZE (1 * PAGE_SIZE)
61 #define GE_TXDESC_MAX (GE_TXDESC_MEMSIZE / 16)
62 #define GE_TXBUF_SIZE (4 * PAGE_SIZE)
63
64 struct gfe_txqueue {
65 struct ifqueue txq_pendq; /* these are ready to go to the GT */
66 struct gfe_dmamem txq_desc_mem; /* transmit descriptor memory */
67 struct gfe_dmamem txq_buf_mem; /* transmit buffer memory */
68 unsigned int txq_lo; /* next to be given to GT */
69 unsigned int txq_fi; /* next to be returned to CPU */
70 unsigned int txq_ei_gapcount; /* counter until next EI */
71 unsigned int txq_nactive; /* number of active descriptors */
72 unsigned int txq_outptr; /* where to put next transmit packet */
73 unsigned int txq_inptr; /* start of 1st queued tx packet */
74 uint32_t txq_intrbits; /* bits to write to EIMR */
75 uint32_t txq_esdcmrbits; /* bits to write to ESDCMR */
76 uint32_t txq_epsrbits; /* bits to test with EPSR */
77 volatile struct gt_eth_desc *txq_descs; /* ptr to tx descriptors */
78 bus_addr_t txq_ectdp; /* offset to cur. tx desc ptr reg */
79 bus_addr_t txq_desc_busaddr; /* bus addr of tx descriptors */
80 bus_addr_t txq_buf_busaddr; /* bus addr of tx buffers */
81 };
82
83 /* With a 4096 page size, we get 256 descriptors per page. We want 1024
84 * which will give us about 8ms of 64 byte packets (2ms for each priority
85 * queue).
86 */
87
88 struct gfe_rxbuf {
89 uint8_t rxb_data[GE_RXBUF_SIZE];
90 };
91
92 struct gfe_rxqueue {
93 struct gfe_dmamem rxq_desc_mem; /* receive descriptor memory */
94 struct gfe_dmamem rxq_buf_mem; /* receive buffer memory */
95 struct mbuf *rxq_curpkt; /* mbuf for current packet */
96 volatile struct gt_eth_desc *rxq_descs;
97 struct gfe_rxbuf *rxq_bufs;
98 unsigned int rxq_fi; /* next to be returned to CPU */
99 unsigned int rxq_active; /* # of descriptors given to GT */
100 uint32_t rxq_intrbits; /* bits to write to EIMR */
101 bus_addr_t rxq_desc_busaddr; /* bus addr of rx descriptors */
102 uint32_t rxq_cmdsts; /* save cmdsts from first descriptor */
103 bus_size_t rxq_efrdp;
104 bus_size_t rxq_ecrdp;
105 };
106
107 enum gfe_txprio {
108 GE_TXPRIO_HI=1,
109 GE_TXPRIO_LO=0,
110 GE_TXPRIO_NONE=2
111 };
112 enum gfe_rxprio {
113 GE_RXPRIO_HI=3,
114 GE_RXPRIO_MEDHI=2,
115 GE_RXPRIO_MEDLO=1,
116 GE_RXPRIO_LO=0
117 };
118
119 struct gfec_softc {
120 device_t sc_dev; /* must be first */
121
122 bus_space_tag_t sc_iot;
123 bus_space_handle_t sc_ioh; /* subregion for ethernet */
124
125 kmutex_t sc_mtx;
126 };
127
128 struct gfe_softc {
129 device_t sc_dev; /* must be first */
130 struct ethercom sc_ec; /* common ethernet glue */
131 struct callout sc_co; /* resource recovery */
132 mii_data_t sc_mii; /* mii interface */
133
134 bus_space_tag_t sc_memt;
135 bus_space_handle_t sc_memh; /* subregion for ethernet */
136 bus_dma_tag_t sc_dmat;
137 int sc_macno; /* which mac? 0, 1, or 2 */
138
139 unsigned int sc_tickflags;
140 #define GE_TICK_TX_IFSTART 0x0001
141 #define GE_TICK_RX_RESTART 0x0002
142 unsigned int sc_flags;
143 #define GE_ALLMULTI 0x0001
144 #define GE_PHYSTSCHG 0x0002
145 #define GE_RXACTIVE 0x0004
146 #define GE_NOFREE 0x0008 /* Don't free on disable */
147 uint32_t sc_pcr; /* current EPCR value */
148 uint32_t sc_pcxr; /* current EPCXR value */
149 uint32_t sc_intrmask; /* current EIMR value */
150 uint32_t sc_idlemask; /* suspended EIMR bits */
151 size_t sc_max_frame_length; /* maximum frame length */
152
153 /*
154 * Hash table related members
155 */
156 struct gfe_dmamem sc_hash_mem; /* dma'ble hash table */
157 uint64_t *sc_hashtable;
158 unsigned int sc_hashmask; /* 0x1ff or 0x1fff */
159
160 /*
161 * Transmit related members
162 */
163 struct gfe_txqueue sc_txq[2]; /* High & Low transmit queues */
164
165 /*
166 * Receive related members
167 */
168 struct gfe_rxqueue sc_rxq[4]; /* Hi/MedHi/MedLo/Lo receive queues */
169
170 #if NRND > 0
171 rndsource_element_t sc_rnd_source;
172 #endif
173 };
174 #endif /* _IF_GFEVAR_H_ */
175