if_gfevar.h revision 1.9 1 1.9 he /* $NetBSD: if_gfevar.h,v 1.9 2008/06/10 22:44:07 he Exp $ */
2 1.1 matt
3 1.1 matt /*
4 1.1 matt * Copyright (c) 2002 Allegro Networks, Inc., Wasabi Systems, Inc.
5 1.1 matt * All rights reserved.
6 1.1 matt *
7 1.1 matt * Redistribution and use in source and binary forms, with or without
8 1.1 matt * modification, are permitted provided that the following conditions
9 1.1 matt * are met:
10 1.1 matt * 1. Redistributions of source code must retain the above copyright
11 1.1 matt * notice, this list of conditions and the following disclaimer.
12 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 matt * notice, this list of conditions and the following disclaimer in the
14 1.1 matt * documentation and/or other materials provided with the distribution.
15 1.1 matt * 3. All advertising materials mentioning features or use of this software
16 1.1 matt * must display the following acknowledgement:
17 1.1 matt * This product includes software developed for the NetBSD Project by
18 1.1 matt * Allegro Networks, Inc., and Wasabi Systems, Inc.
19 1.1 matt * 4. The name of Allegro Networks, Inc. may not be used to endorse
20 1.1 matt * or promote products derived from this software without specific prior
21 1.1 matt * written permission.
22 1.1 matt * 5. The name of Wasabi Systems, Inc. may not be used to endorse
23 1.1 matt * or promote products derived from this software without specific prior
24 1.1 matt * written permission.
25 1.1 matt *
26 1.1 matt * THIS SOFTWARE IS PROVIDED BY ALLEGRO NETWORKS, INC. AND
27 1.1 matt * WASABI SYSTEMS, INC. ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
28 1.1 matt * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
29 1.1 matt * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 1.1 matt * IN NO EVENT SHALL EITHER ALLEGRO NETWORKS, INC. OR WASABI SYSTEMS, INC.
31 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
38 1.1 matt */
39 1.1 matt
40 1.4 thorpej #define GE_RXDESC_MEMSIZE (1 * PAGE_SIZE)
41 1.1 matt #define GE_RXDESC_MAX 64
42 1.1 matt #define GE_RXBUF_SIZE 2048
43 1.1 matt #define GE_RXBUF_MEMSIZE (GE_RXDESC_MAX*GE_RXBUF_SIZE)
44 1.4 thorpej #define GE_RXBUF_NSEGS ((GE_RXBUF_MEMSIZE/PAGE_SIZE)+1)
45 1.1 matt #define GE_DMSEG_MAX (GE_RXBUF_NSEGS)
46 1.1 matt
47 1.1 matt struct gfe_dmamem {
48 1.1 matt bus_dmamap_t gdm_map; /* dmamem'ed memory */
49 1.8 christos void *gdm_kva; /* kva of tx memory */
50 1.1 matt int gdm_nsegs; /* # of segment in gdm_segs */
51 1.1 matt int gdm_maxsegs; /* maximum # of segments allowed */
52 1.1 matt size_t gdm_size; /* size of memory region */
53 1.1 matt bus_dma_segment_t gdm_segs[GE_DMSEG_MAX]; /* dma segment of tx memory */
54 1.1 matt };
55 1.1 matt
56 1.1 matt /* With a 4096 page size, we get 256 descriptors per page.
57 1.1 matt */
58 1.5 matt #define GE_TXDESC_MEMSIZE (1 * PAGE_SIZE)
59 1.5 matt #define GE_TXDESC_MAX (GE_TXDESC_MEMSIZE / 16)
60 1.4 thorpej #define GE_TXBUF_SIZE (4 * PAGE_SIZE)
61 1.1 matt
62 1.1 matt struct gfe_txqueue {
63 1.1 matt struct ifqueue txq_pendq; /* these are ready to go to the GT */
64 1.1 matt struct gfe_dmamem txq_desc_mem; /* transmit descriptor memory */
65 1.1 matt struct gfe_dmamem txq_buf_mem; /* transmit buffer memory */
66 1.1 matt unsigned int txq_lo; /* next to be given to GT */
67 1.1 matt unsigned int txq_fi; /* next to be returned to CPU */
68 1.1 matt unsigned int txq_ei_gapcount; /* counter until next EI */
69 1.1 matt unsigned int txq_nactive; /* number of active descriptors */
70 1.1 matt unsigned int txq_outptr; /* where to put next transmit packet */
71 1.1 matt unsigned int txq_inptr; /* start of 1st queued tx packet */
72 1.1 matt uint32_t txq_intrbits; /* bits to write to EIMR */
73 1.1 matt uint32_t txq_esdcmrbits; /* bits to write to ESDCMR */
74 1.1 matt uint32_t txq_epsrbits; /* bits to test with EPSR */
75 1.1 matt volatile struct gt_eth_desc *txq_descs; /* ptr to tx descriptors */
76 1.1 matt bus_addr_t txq_ectdp; /* offset to cur. tx desc ptr reg */
77 1.1 matt bus_addr_t txq_desc_busaddr; /* bus addr of tx descriptors */
78 1.1 matt bus_addr_t txq_buf_busaddr; /* bus addr of tx buffers */
79 1.1 matt };
80 1.1 matt
81 1.1 matt /* With a 4096 page size, we get 256 descriptors per page. We want 1024
82 1.1 matt * which will give us about 8ms of 64 byte packets (2ms for each priority
83 1.1 matt * queue).
84 1.1 matt */
85 1.1 matt
86 1.1 matt struct gfe_rxbuf {
87 1.9 he uint8_t rxb_data[GE_RXBUF_SIZE];
88 1.1 matt };
89 1.1 matt
90 1.1 matt struct gfe_rxqueue {
91 1.1 matt struct gfe_dmamem rxq_desc_mem; /* receive descriptor memory */
92 1.1 matt struct gfe_dmamem rxq_buf_mem; /* receive buffer memory */
93 1.1 matt struct mbuf *rxq_curpkt; /* mbuf for current packet */
94 1.1 matt volatile struct gt_eth_desc *rxq_descs;
95 1.1 matt struct gfe_rxbuf *rxq_bufs;
96 1.1 matt unsigned int rxq_fi; /* next to be returned to CPU */
97 1.1 matt unsigned int rxq_active; /* # of descriptors given to GT */
98 1.1 matt uint32_t rxq_intrbits; /* bits to write to EIMR */
99 1.1 matt bus_addr_t rxq_desc_busaddr; /* bus addr of rx descriptors */
100 1.1 matt uint32_t rxq_cmdsts; /* save cmdsts from first descriptor */
101 1.1 matt bus_size_t rxq_efrdp;
102 1.1 matt bus_size_t rxq_ecrdp;
103 1.1 matt };
104 1.1 matt
105 1.1 matt enum gfe_txprio {
106 1.1 matt GE_TXPRIO_HI=1,
107 1.1 matt GE_TXPRIO_LO=0,
108 1.1 matt GE_TXPRIO_NONE=2
109 1.1 matt };
110 1.1 matt enum gfe_rxprio {
111 1.1 matt GE_RXPRIO_HI=3,
112 1.1 matt GE_RXPRIO_MEDHI=2,
113 1.1 matt GE_RXPRIO_MEDLO=1,
114 1.1 matt GE_RXPRIO_LO=0
115 1.1 matt };
116 1.1 matt
117 1.1 matt struct gfe_softc {
118 1.1 matt struct device sc_dev; /* must be first */
119 1.1 matt struct ethercom sc_ec; /* common ethernet glue */
120 1.1 matt struct callout sc_co; /* resource recovery */
121 1.1 matt mii_data_t sc_mii; /* mii interface */
122 1.1 matt
123 1.1 matt /*
124 1.6 perry *
125 1.1 matt */
126 1.2 matt bus_space_tag_t sc_gt_memt;
127 1.2 matt bus_space_handle_t sc_gt_memh;
128 1.3 matt bus_space_handle_t sc_memh; /* subregion for ethernet */
129 1.1 matt bus_dma_tag_t sc_dmat;
130 1.1 matt int sc_macno; /* which mac? 0, 1, or 2 */
131 1.1 matt
132 1.1 matt unsigned int sc_tickflags;
133 1.1 matt #define GE_TICK_TX_IFSTART 0x0001
134 1.1 matt #define GE_TICK_RX_RESTART 0x0002
135 1.1 matt unsigned int sc_flags;
136 1.1 matt #define GE_ALLMULTI 0x0001
137 1.1 matt #define GE_PHYSTSCHG 0x0002
138 1.1 matt #define GE_RXACTIVE 0x0004
139 1.5 matt #define GE_NOFREE 0x0008 /* Don't free on disable */
140 1.1 matt uint32_t sc_pcr; /* current EPCR value */
141 1.1 matt uint32_t sc_pcxr; /* current EPCXR value */
142 1.1 matt uint32_t sc_intrmask; /* current EIMR value */
143 1.1 matt uint32_t sc_idlemask; /* suspended EIMR bits */
144 1.1 matt size_t sc_max_frame_length; /* maximum frame length */
145 1.1 matt
146 1.1 matt /*
147 1.1 matt * Hash table related members
148 1.1 matt */
149 1.1 matt struct gfe_dmamem sc_hash_mem; /* dma'ble hash table */
150 1.1 matt uint64_t *sc_hashtable;
151 1.1 matt unsigned int sc_hashmask; /* 0x1ff or 0x1fff */
152 1.1 matt
153 1.1 matt /*
154 1.1 matt * Transmit related members
155 1.1 matt */
156 1.5 matt struct gfe_txqueue sc_txq[2]; /* High & Low transmit queues */
157 1.1 matt
158 1.1 matt /*
159 1.1 matt * Receive related members
160 1.1 matt */
161 1.5 matt struct gfe_rxqueue sc_rxq[4]; /* Hi/MedHi/MedLo/Lo receive queues */
162 1.1 matt };
163