if_mvxpevar.h revision 1.1 1 1.1 hsuenaga /* $NetBSD: if_mvxpevar.h,v 1.1 2015/05/03 14:38:10 hsuenaga Exp $ */
2 1.1 hsuenaga /*
3 1.1 hsuenaga * Copyright (c) 2015 Internet Initiative Japan Inc.
4 1.1 hsuenaga * All rights reserved.
5 1.1 hsuenaga *
6 1.1 hsuenaga * Redistribution and use in source and binary forms, with or without
7 1.1 hsuenaga * modification, are permitted provided that the following conditions
8 1.1 hsuenaga * are met:
9 1.1 hsuenaga * 1. Redistributions of source code must retain the above copyright
10 1.1 hsuenaga * notice, this list of conditions and the following disclaimer.
11 1.1 hsuenaga * 2. Redistributions in binary form must reproduce the above copyright
12 1.1 hsuenaga * notice, this list of conditions and the following disclaimer in the
13 1.1 hsuenaga * documentation and/or other materials provided with the distribution.
14 1.1 hsuenaga *
15 1.1 hsuenaga * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 1.1 hsuenaga * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 1.1 hsuenaga * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 1.1 hsuenaga * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 1.1 hsuenaga * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 1.1 hsuenaga * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 1.1 hsuenaga * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 1.1 hsuenaga * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 1.1 hsuenaga * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 1.1 hsuenaga * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 1.1 hsuenaga * POSSIBILITY OF SUCH DAMAGE.
26 1.1 hsuenaga */
27 1.1 hsuenaga #ifndef _IF_MVXPEVAR_H_
28 1.1 hsuenaga #define _IF_MVXPEVAR_H_
29 1.1 hsuenaga /*
30 1.1 hsuenaga * Comple options
31 1.1 hsuenaga * XXX: use kernel config
32 1.1 hsuenaga */
33 1.1 hsuenaga #define MVXPE_DEBUG 0
34 1.1 hsuenaga #define MVXPE_EVENT_COUNTERS
35 1.1 hsuenaga
36 1.1 hsuenaga /*
37 1.1 hsuenaga * Limit of packet sizes.
38 1.1 hsuenaga */
39 1.1 hsuenaga #define MVXPE_HWHEADER_SIZE 2 /* Marvell Header */
40 1.1 hsuenaga #define MVXPE_MRU 2000 /* Max Receive Unit */
41 1.1 hsuenaga #define MVXPE_MTU MVXPE_MRU /* Max Transmit Unit */
42 1.1 hsuenaga
43 1.1 hsuenaga /*
44 1.1 hsuenaga * Default limit of queue length
45 1.1 hsuenaga *
46 1.1 hsuenaga * queue 0 is lowest priority and queue 7 is highest priority.
47 1.1 hsuenaga * IP packet is received on queue 7 by default.
48 1.1 hsuenaga *
49 1.1 hsuenaga * XXX: packet classifier is not implement yet
50 1.1 hsuenaga */
51 1.1 hsuenaga #define MVXPE_RX_QUEUE_LIMIT_0 8
52 1.1 hsuenaga #define MVXPE_RX_QUEUE_LIMIT_1 8
53 1.1 hsuenaga #define MVXPE_RX_QUEUE_LIMIT_2 8
54 1.1 hsuenaga #define MVXPE_RX_QUEUE_LIMIT_3 8
55 1.1 hsuenaga #define MVXPE_RX_QUEUE_LIMIT_4 8
56 1.1 hsuenaga #define MVXPE_RX_QUEUE_LIMIT_5 8
57 1.1 hsuenaga #define MVXPE_RX_QUEUE_LIMIT_6 8
58 1.1 hsuenaga #define MVXPE_RX_QUEUE_LIMIT_7 256
59 1.1 hsuenaga
60 1.1 hsuenaga #define MVXPE_TX_QUEUE_LIMIT_0 256
61 1.1 hsuenaga #define MVXPE_TX_QUEUE_LIMIT_1 8
62 1.1 hsuenaga #define MVXPE_TX_QUEUE_LIMIT_2 8
63 1.1 hsuenaga #define MVXPE_TX_QUEUE_LIMIT_3 8
64 1.1 hsuenaga #define MVXPE_TX_QUEUE_LIMIT_4 8
65 1.1 hsuenaga #define MVXPE_TX_QUEUE_LIMIT_5 8
66 1.1 hsuenaga #define MVXPE_TX_QUEUE_LIMIT_6 8
67 1.1 hsuenaga #define MVXPE_TX_QUEUE_LIMIT_7 8
68 1.1 hsuenaga
69 1.1 hsuenaga /*
70 1.1 hsuenaga * Device Register access
71 1.1 hsuenaga */
72 1.1 hsuenaga #define MVXPE_READ(sc, reg) \
73 1.1 hsuenaga bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))
74 1.1 hsuenaga #define MVXPE_WRITE(sc, reg, val) \
75 1.1 hsuenaga bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
76 1.1 hsuenaga
77 1.1 hsuenaga #define MVXPE_READ_REGION(sc, reg, val, c) \
78 1.1 hsuenaga bus_space_read_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
79 1.1 hsuenaga #define MVXPE_WRITE_REGION(sc, reg, val, c) \
80 1.1 hsuenaga bus_space_write_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
81 1.1 hsuenaga
82 1.1 hsuenaga #define MVXPE_READ_MIB(sc, reg) \
83 1.1 hsuenaga bus_space_read_4((sc)->sc_iot, (sc)->sc_mibh, (reg))
84 1.1 hsuenaga
85 1.1 hsuenaga #define MVXPE_IS_LINKUP(sc) \
86 1.1 hsuenaga (MVXPE_READ((sc), MVXPE_PSR) & MVXPE_PSR_LINKUP)
87 1.1 hsuenaga
88 1.1 hsuenaga /*
89 1.1 hsuenaga * EEE: Lower Power Idle config
90 1.1 hsuenaga * Default timer is duration of MTU sized frame transmission.
91 1.1 hsuenaga * The timer can be negotiated by LLDP protocol, but we have no
92 1.1 hsuenaga * support.
93 1.1 hsuenaga */
94 1.1 hsuenaga #define MVXPE_LPI_TS (MVXPE_MRU * 8 / 1000) /* [us] */
95 1.1 hsuenaga #define MVXPE_LPI_TW (MVXPE_MRU * 8 / 1000) /* [us] */
96 1.1 hsuenaga #define MVXPE_LPI_LI (MVXPE_MRU * 8 / 1000) /* [us] */
97 1.1 hsuenaga
98 1.1 hsuenaga /*
99 1.1 hsuenaga * DMA Descriptor
100 1.1 hsuenaga *
101 1.1 hsuenaga * the ethernet device has 8 rx/tx DMA queues. each of queue has its own
102 1.1 hsuenaga * decriptor list. descriptors are simply index by counter inside the device.
103 1.1 hsuenaga */
104 1.1 hsuenaga #define MVXPE_TX_RING_CNT 256
105 1.1 hsuenaga #define MVXPE_TX_RING_MSK (MVXPE_TX_RING_CNT - 1)
106 1.1 hsuenaga #define MVXPE_TX_RING_NEXT(x) (((x) + 1) & MVXPE_TX_RING_MSK)
107 1.1 hsuenaga #define MVXPE_RX_RING_CNT 256
108 1.1 hsuenaga #define MVXPE_RX_RING_MSK (MVXPE_RX_RING_CNT - 1)
109 1.1 hsuenaga #define MVXPE_RX_RING_NEXT(x) (((x) + 1) & MVXPE_RX_RING_MSK)
110 1.1 hsuenaga #define MVXPE_TX_SEGLIMIT 32
111 1.1 hsuenaga
112 1.1 hsuenaga struct mvxpe_rx_ring {
113 1.1 hsuenaga /* Real descriptors array. shared by RxDMA */
114 1.1 hsuenaga struct mvxpe_rx_desc *rx_descriptors;
115 1.1 hsuenaga bus_dmamap_t rx_descriptors_map;
116 1.1 hsuenaga
117 1.1 hsuenaga /* Managment entries for each of descritors */
118 1.1 hsuenaga struct mvxpe_rx_handle {
119 1.1 hsuenaga struct mvxpe_rx_desc *rxdesc_va;
120 1.1 hsuenaga off_t rxdesc_off; /* from rx_descriptors[0] */
121 1.1 hsuenaga struct mvxpe_bm_chunk *chunk;
122 1.1 hsuenaga } rx_handle[MVXPE_RX_RING_CNT];
123 1.1 hsuenaga
124 1.1 hsuenaga /* locks */
125 1.1 hsuenaga kmutex_t rx_ring_mtx;
126 1.1 hsuenaga
127 1.1 hsuenaga /* Index */
128 1.1 hsuenaga int rx_dma;
129 1.1 hsuenaga int rx_cpu;
130 1.1 hsuenaga
131 1.1 hsuenaga /* Limit */
132 1.1 hsuenaga int rx_queue_len;
133 1.1 hsuenaga int rx_queue_th_received;
134 1.1 hsuenaga int rx_queue_th_free;
135 1.1 hsuenaga int rx_queue_th_time; /* [Tclk] */
136 1.1 hsuenaga };
137 1.1 hsuenaga
138 1.1 hsuenaga struct mvxpe_tx_ring {
139 1.1 hsuenaga /* Real descriptors array. shared by TxDMA */
140 1.1 hsuenaga struct mvxpe_tx_desc *tx_descriptors;
141 1.1 hsuenaga bus_dmamap_t tx_descriptors_map;
142 1.1 hsuenaga
143 1.1 hsuenaga /* Managment entries for each of descritors */
144 1.1 hsuenaga struct mvxpe_tx_handle {
145 1.1 hsuenaga struct mvxpe_tx_desc *txdesc_va;
146 1.1 hsuenaga off_t txdesc_off; /* from tx_descriptors[0] */
147 1.1 hsuenaga struct mbuf *txdesc_mbuf;
148 1.1 hsuenaga bus_dmamap_t txdesc_mbuf_map;
149 1.1 hsuenaga } tx_handle[MVXPE_TX_RING_CNT];
150 1.1 hsuenaga
151 1.1 hsuenaga /* locks */
152 1.1 hsuenaga kmutex_t tx_ring_mtx;
153 1.1 hsuenaga
154 1.1 hsuenaga /* Index */
155 1.1 hsuenaga int tx_free_cnt;
156 1.1 hsuenaga int tx_dma;
157 1.1 hsuenaga int tx_cpu;
158 1.1 hsuenaga
159 1.1 hsuenaga /* Limit */
160 1.1 hsuenaga int tx_queue_len;
161 1.1 hsuenaga int tx_queue_th_free;
162 1.1 hsuenaga };
163 1.1 hsuenaga
164 1.1 hsuenaga static inline int
165 1.1 hsuenaga tx_counter_adv(int ctr, int n)
166 1.1 hsuenaga {
167 1.1 hsuenaga /* XXX: lock or atomic */
168 1.1 hsuenaga ctr += n;
169 1.1 hsuenaga while (ctr >= MVXPE_TX_RING_CNT)
170 1.1 hsuenaga ctr -= MVXPE_TX_RING_CNT;
171 1.1 hsuenaga
172 1.1 hsuenaga return ctr;
173 1.1 hsuenaga }
174 1.1 hsuenaga
175 1.1 hsuenaga static inline int
176 1.1 hsuenaga rx_counter_adv(int ctr, int n)
177 1.1 hsuenaga {
178 1.1 hsuenaga /* XXX: lock or atomic */
179 1.1 hsuenaga ctr += n;
180 1.1 hsuenaga while (ctr >= MVXPE_TX_RING_CNT)
181 1.1 hsuenaga ctr -= MVXPE_TX_RING_CNT;
182 1.1 hsuenaga
183 1.1 hsuenaga return ctr;
184 1.1 hsuenaga }
185 1.1 hsuenaga
186 1.1 hsuenaga /*
187 1.1 hsuenaga * Buffer alignement
188 1.1 hsuenaga */
189 1.1 hsuenaga #define MVXPE_RXBUF_ALIGN 32 /* Cache line size */
190 1.1 hsuenaga #define MVXPE_RXBUF_MASK (MVXPE_RXBUF_ALIGN - 1)
191 1.1 hsuenaga #define MVXPE_BM_ADDR_ALIGN 32
192 1.1 hsuenaga #define MVXPE_BM_ADDR_MASK (MVXPE_BM_ADDR_ALIGN - 1)
193 1.1 hsuenaga
194 1.1 hsuenaga /*
195 1.1 hsuenaga * Timeout control
196 1.1 hsuenaga */
197 1.1 hsuenaga #define MVXPE_PHY_TIMEOUT 10000 /* msec */
198 1.1 hsuenaga #define RX_DISABLE_TIMEOUT 0x1000000 /* times */
199 1.1 hsuenaga #define TX_DISABLE_TIMEOUT 0x1000000 /* times */
200 1.1 hsuenaga #define TX_FIFO_EMPTY_TIMEOUT 0x1000000 /* times */
201 1.1 hsuenaga
202 1.1 hsuenaga /*
203 1.1 hsuenaga * Event counter
204 1.1 hsuenaga */
205 1.1 hsuenaga #ifdef MVXPE_EVENT_COUNTERS
206 1.1 hsuenaga #define MVXPE_EVCNT_INCR(ev) (ev)->ev_count++
207 1.1 hsuenaga #define MVXPE_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
208 1.1 hsuenaga #else
209 1.1 hsuenaga #define MVXPE_EVCNT_INCR(ev) /* nothing */
210 1.1 hsuenaga #define MVXPE_EVCNT_ADD(ev, val) /* nothing */
211 1.1 hsuenaga #endif
212 1.1 hsuenaga struct mvxpe_evcnt {
213 1.1 hsuenaga /*
214 1.1 hsuenaga * Master Interrupt Handler
215 1.1 hsuenaga */
216 1.1 hsuenaga struct evcnt ev_i_rxtxth;
217 1.1 hsuenaga struct evcnt ev_i_rxtx;
218 1.1 hsuenaga struct evcnt ev_i_misc;
219 1.1 hsuenaga
220 1.1 hsuenaga /*
221 1.1 hsuenaga * RXTXTH Interrupt
222 1.1 hsuenaga */
223 1.1 hsuenaga struct evcnt ev_rxtxth_txerr;
224 1.1 hsuenaga
225 1.1 hsuenaga /*
226 1.1 hsuenaga * MISC Interrupt
227 1.1 hsuenaga */
228 1.1 hsuenaga struct evcnt ev_misc_phystatuschng;
229 1.1 hsuenaga struct evcnt ev_misc_linkchange;
230 1.1 hsuenaga struct evcnt ev_misc_iae;
231 1.1 hsuenaga struct evcnt ev_misc_rxoverrun;
232 1.1 hsuenaga struct evcnt ev_misc_rxcrc;
233 1.1 hsuenaga struct evcnt ev_misc_rxlargepacket;
234 1.1 hsuenaga struct evcnt ev_misc_txunderrun;
235 1.1 hsuenaga struct evcnt ev_misc_prbserr;
236 1.1 hsuenaga struct evcnt ev_misc_srse;
237 1.1 hsuenaga struct evcnt ev_misc_txreq;
238 1.1 hsuenaga
239 1.1 hsuenaga /*
240 1.1 hsuenaga * RxTx Interrupt
241 1.1 hsuenaga */
242 1.1 hsuenaga struct evcnt ev_rxtx_rreq;
243 1.1 hsuenaga struct evcnt ev_rxtx_rpq;
244 1.1 hsuenaga struct evcnt ev_rxtx_tbrq;
245 1.1 hsuenaga struct evcnt ev_rxtx_rxtxth;
246 1.1 hsuenaga struct evcnt ev_rxtx_txerr;
247 1.1 hsuenaga struct evcnt ev_rxtx_misc;
248 1.1 hsuenaga
249 1.1 hsuenaga /*
250 1.1 hsuenaga * Link
251 1.1 hsuenaga */
252 1.1 hsuenaga struct evcnt ev_link_up;
253 1.1 hsuenaga struct evcnt ev_link_down;
254 1.1 hsuenaga
255 1.1 hsuenaga /*
256 1.1 hsuenaga * Rx Descriptor
257 1.1 hsuenaga */
258 1.1 hsuenaga struct evcnt ev_rxd_ce;
259 1.1 hsuenaga struct evcnt ev_rxd_or;
260 1.1 hsuenaga struct evcnt ev_rxd_mf;
261 1.1 hsuenaga struct evcnt ev_rxd_re;
262 1.1 hsuenaga struct evcnt ev_rxd_scat;
263 1.1 hsuenaga
264 1.1 hsuenaga /*
265 1.1 hsuenaga * Tx Descriptor
266 1.1 hsuenaga */
267 1.1 hsuenaga struct evcnt ev_txd_lc;
268 1.1 hsuenaga struct evcnt ev_txd_ur;
269 1.1 hsuenaga struct evcnt ev_txd_rl;
270 1.1 hsuenaga struct evcnt ev_txd_oth;
271 1.1 hsuenaga
272 1.1 hsuenaga /*
273 1.1 hsuenaga * Status Registers
274 1.1 hsuenaga */
275 1.1 hsuenaga struct evcnt ev_reg_pdfc; /* Rx Port Discard Frame Counter */
276 1.1 hsuenaga struct evcnt ev_reg_pofc; /* Rx Port Overrun Frame Counter */
277 1.1 hsuenaga struct evcnt ev_reg_txbadfcs; /* Tx BAD FCS Counter */
278 1.1 hsuenaga struct evcnt ev_reg_txdropped; /* Tx Dropped Counter */
279 1.1 hsuenaga struct evcnt ev_reg_lpic;
280 1.1 hsuenaga
281 1.1 hsuenaga
282 1.1 hsuenaga /* Device Driver Errors */
283 1.1 hsuenaga struct evcnt ev_drv_wdogsoft;
284 1.1 hsuenaga struct evcnt ev_drv_txerr;
285 1.1 hsuenaga struct evcnt ev_drv_rxq[MVXPE_QUEUE_SIZE];
286 1.1 hsuenaga struct evcnt ev_drv_rxqe[MVXPE_QUEUE_SIZE];
287 1.1 hsuenaga struct evcnt ev_drv_txq[MVXPE_QUEUE_SIZE];
288 1.1 hsuenaga struct evcnt ev_drv_txqe[MVXPE_QUEUE_SIZE];
289 1.1 hsuenaga };
290 1.1 hsuenaga
291 1.1 hsuenaga /*
292 1.1 hsuenaga * Debug
293 1.1 hsuenaga */
294 1.1 hsuenaga #ifdef MVXPE_DEBUG
295 1.1 hsuenaga #define DPRINTF(fmt, ...) \
296 1.1 hsuenaga do { \
297 1.1 hsuenaga if (mvxpe_debug >= 1) { \
298 1.1 hsuenaga printf("%s: ", __func__); \
299 1.1 hsuenaga printf((fmt), ##__VA_ARGS__); \
300 1.1 hsuenaga } \
301 1.1 hsuenaga } while (/*CONSTCOND*/0)
302 1.1 hsuenaga #define DPRINTFN(level , fmt, ...) \
303 1.1 hsuenaga do { \
304 1.1 hsuenaga if (mvxpe_debug >= (level)) { \
305 1.1 hsuenaga printf("%s: ", __func__); \
306 1.1 hsuenaga printf((fmt), ##__VA_ARGS__); \
307 1.1 hsuenaga } \
308 1.1 hsuenaga } while (/*CONSTCOND*/0)
309 1.1 hsuenaga #define DPRINTDEV(dev, level, fmt, ...) \
310 1.1 hsuenaga do { \
311 1.1 hsuenaga if (mvxpe_debug >= (level)) { \
312 1.1 hsuenaga device_printf((dev), \
313 1.1 hsuenaga "%s: "fmt , __func__, ##__VA_ARGS__); \
314 1.1 hsuenaga } \
315 1.1 hsuenaga } while (/*CONSTCOND*/0)
316 1.1 hsuenaga #define DPRINTSC(sc, level, fmt, ...) \
317 1.1 hsuenaga do { \
318 1.1 hsuenaga device_t dev = (sc)->sc_dev; \
319 1.1 hsuenaga if (mvxpe_debug >= (level)) { \
320 1.1 hsuenaga device_printf(dev, \
321 1.1 hsuenaga "%s: " fmt, __func__, ##__VA_ARGS__); \
322 1.1 hsuenaga } \
323 1.1 hsuenaga } while (/*CONSTCOND*/0)
324 1.1 hsuenaga #define DPRINTIFNET(ifp, level, fmt, ...) \
325 1.1 hsuenaga do { \
326 1.1 hsuenaga const char *xname = (ifp)->if_xname; \
327 1.1 hsuenaga if (mvxpe_debug >= (level)) { \
328 1.1 hsuenaga printf("%s: %s: " fmt, xname, __func__, ##__VA_ARGS__);\
329 1.1 hsuenaga } \
330 1.1 hsuenaga } while (/*CONSTCOND*/0)
331 1.1 hsuenaga #define DPRINTIFNET(ifp, level, fmt, ...) \
332 1.1 hsuenaga do { \
333 1.1 hsuenaga const char *xname = (ifp)->if_xname; \
334 1.1 hsuenaga if (mvxpe_debug >= (level)) { \
335 1.1 hsuenaga printf("%s: %s: " fmt, xname, __func__, ##__VA_ARGS__);\
336 1.1 hsuenaga } \
337 1.1 hsuenaga } while (/*CONSTCOND*/0)
338 1.1 hsuenaga #define DPRINTPRXS(level, q) \
339 1.1 hsuenaga do { \
340 1.1 hsuenaga uint32_t _reg = MVXPE_READ(sc, MVXPE_PRXS(q)); \
341 1.1 hsuenaga if (mvxpe_debug >= (level)) { \
342 1.1 hsuenaga printf("PRXS(queue %d) %#x: Occupied %d, NoOccupied %d.\n", \
343 1.1 hsuenaga q, _reg, MVXPE_PRXS_GET_ODC(_reg), \
344 1.1 hsuenaga MVXPE_PRXS_GET_NODC(_reg)); \
345 1.1 hsuenaga } \
346 1.1 hsuenaga } while (/*CONSTCOND*/0)
347 1.1 hsuenaga #else
348 1.1 hsuenaga #define DPRINTF(fmt, ...)
349 1.1 hsuenaga #define DPRINTFN(level, fmt, ...)
350 1.1 hsuenaga #define DPRINTDEV(dev, level, fmt, ...)
351 1.1 hsuenaga #define DPRINTSC(sc, level, fmt, ...)
352 1.1 hsuenaga #define DPRINTIFNET(ifp, level, fmt, ...)
353 1.1 hsuenaga #define DPRINTPRXS(level, reg)
354 1.1 hsuenaga #endif
355 1.1 hsuenaga
356 1.1 hsuenaga #define KASSERT_SC_MTX(sc) \
357 1.1 hsuenaga KASSERT(mutex_owned(&(sc)->sc_mtx))
358 1.1 hsuenaga #define KASSERT_BM_MTX(sc) \
359 1.1 hsuenaga KASSERT(mutex_owned(&(sc)->sc_bm.bm_mtx))
360 1.1 hsuenaga #define KASSERT_RX_MTX(sc, q) \
361 1.1 hsuenaga KASSERT(mutex_owned(&(sc)->sc_rx_ring[(q)].rx_ring_mtx))
362 1.1 hsuenaga #define KASSERT_TX_MTX(sc, q) \
363 1.1 hsuenaga KASSERT(mutex_owned(&(sc)->sc_tx_ring[(q)].tx_ring_mtx))
364 1.1 hsuenaga
365 1.1 hsuenaga /*
366 1.1 hsuenaga * Configuration parameters
367 1.1 hsuenaga */
368 1.1 hsuenaga struct mvxpe_conf {
369 1.1 hsuenaga int cf_lpi; /* EEE Low Power IDLE enable */
370 1.1 hsuenaga int cf_fc; /* Flow Control enable */
371 1.1 hsuenaga };
372 1.1 hsuenaga
373 1.1 hsuenaga /*
374 1.1 hsuenaga * sysctl(9) parameters
375 1.1 hsuenaga */
376 1.1 hsuenaga struct mvxpe_softc;
377 1.1 hsuenaga struct mvxpe_sysctl_queue {
378 1.1 hsuenaga struct mvxpe_softc *sc;
379 1.1 hsuenaga int rxtx;
380 1.1 hsuenaga int queue;
381 1.1 hsuenaga };
382 1.1 hsuenaga #define MVXPE_SYSCTL_RX 0
383 1.1 hsuenaga #define MVXPE_SYSCTL_TX 1
384 1.1 hsuenaga
385 1.1 hsuenaga struct mvxpe_sysctl_mib {
386 1.1 hsuenaga struct mvxpe_softc *sc;
387 1.1 hsuenaga int index;
388 1.1 hsuenaga uint64_t counter;
389 1.1 hsuenaga };
390 1.1 hsuenaga
391 1.1 hsuenaga /*
392 1.1 hsuenaga * Packet Buffer Header
393 1.1 hsuenaga *
394 1.1 hsuenaga * this chunks may be managed by H/W Buffer Manger(BM) device,
395 1.1 hsuenaga * but there is no device driver yet.
396 1.1 hsuenaga *
397 1.1 hsuenaga * +----------------+ bm_buf
398 1.1 hsuenaga * |chunk header | |
399 1.1 hsuenaga * +----------------+ | | |chunk->buf_off
400 1.1 hsuenaga * |mbuf (M_EXT set)|<--------|struct mbuf *m | V
401 1.1 hsuenaga * +----------------+ +----------------+ chunk->buf_va/buf_pa
402 1.1 hsuenaga * | m_ext.ext_buf|-------->|packet buffer | |
403 1.1 hsuenaga * +----------------+ | | |chunk->buf_size
404 1.1 hsuenaga * | | V
405 1.1 hsuenaga * +----------------+
406 1.1 hsuenaga * |chunk header |
407 1.1 hsuenaga * |.... |
408 1.1 hsuenaga */
409 1.1 hsuenaga #define MVXPE_BM_SLOTS \
410 1.1 hsuenaga (MVXPE_RX_RING_CNT * (MVXPE_QUEUE_SIZE + 1))
411 1.1 hsuenaga #define MVXPE_BM_SIZE \
412 1.1 hsuenaga (MVXPE_MRU + MVXPE_HWHEADER_SIZE)
413 1.1 hsuenaga
414 1.1 hsuenaga struct mvxpe_bm_chunk {
415 1.1 hsuenaga struct mbuf *m; /* back pointer to mbuf header */
416 1.1 hsuenaga void *sc; /* back pointer to softc */
417 1.1 hsuenaga off_t off; /* offset of chunk */
418 1.1 hsuenaga paddr_t pa; /* physical address of chunk */
419 1.1 hsuenaga
420 1.1 hsuenaga off_t buf_off; /* offset of packet from sc_bm_buf */
421 1.1 hsuenaga paddr_t buf_pa; /* physical address of packet */
422 1.1 hsuenaga vaddr_t buf_va; /* virtual addres of packet */
423 1.1 hsuenaga size_t buf_size; /* size of buffer (exclude hdr) */
424 1.1 hsuenaga
425 1.1 hsuenaga LIST_ENTRY(mvxpe_bm_chunk) link;
426 1.1 hsuenaga /* followed by packet buffer */
427 1.1 hsuenaga };
428 1.1 hsuenaga
429 1.1 hsuenaga struct mvxpe_bm_softc {
430 1.1 hsuenaga bus_dma_tag_t bm_dmat;
431 1.1 hsuenaga bus_dmamap_t bm_map;
432 1.1 hsuenaga kmutex_t bm_mtx;
433 1.1 hsuenaga
434 1.1 hsuenaga /* DMA MAP for entire buffer */
435 1.1 hsuenaga char *bm_buf;
436 1.1 hsuenaga
437 1.1 hsuenaga /* memory chunk properties */
438 1.1 hsuenaga size_t bm_slotsize; /* size of bm_slots include header */
439 1.1 hsuenaga size_t bm_chunk_count; /* number of chunks */
440 1.1 hsuenaga size_t bm_chunk_size; /* size of packet buffer */
441 1.1 hsuenaga off_t bm_chunk_header_size; /* size of hader + padding */
442 1.1 hsuenaga off_t bm_chunk_packet_offset; /* allocate m_leading_space */
443 1.1 hsuenaga struct mvxpe_bm_chunk *bm_slots[MVXPE_BM_SLOTS];
444 1.1 hsuenaga
445 1.1 hsuenaga /* for software based management */
446 1.1 hsuenaga LIST_HEAD(__mvxpe_bm_freehead, mvxpe_bm_chunk) bm_free;
447 1.1 hsuenaga LIST_HEAD(__mvxpe_bm_inusehead, mvxpe_bm_chunk) bm_inuse;
448 1.1 hsuenaga } sc_bm;
449 1.1 hsuenaga
450 1.1 hsuenaga #define BM_SYNC_ALL 0
451 1.1 hsuenaga
452 1.1 hsuenaga /*
453 1.1 hsuenaga * Ethernet Device main context
454 1.1 hsuenaga */
455 1.1 hsuenaga struct mvxpe_softc {
456 1.1 hsuenaga device_t sc_dev;
457 1.1 hsuenaga int sc_port;
458 1.1 hsuenaga uint32_t sc_version;
459 1.1 hsuenaga
460 1.1 hsuenaga /*
461 1.1 hsuenaga * sc_mtx must be held by interface functions to/from
462 1.1 hsuenaga * other frameworks. interrupt hander, sysctl hander,
463 1.1 hsuenaga * ioctl hander, and so on.
464 1.1 hsuenaga */
465 1.1 hsuenaga kmutex_t sc_mtx;
466 1.1 hsuenaga
467 1.1 hsuenaga /*
468 1.1 hsuenaga * Ethernet facilities
469 1.1 hsuenaga */
470 1.1 hsuenaga struct ethercom sc_ethercom;
471 1.1 hsuenaga struct mii_data sc_mii;
472 1.1 hsuenaga u_int8_t sc_enaddr[ETHER_ADDR_LEN]; /* station addr */
473 1.1 hsuenaga int sc_if_flags;
474 1.1 hsuenaga int sc_wdogsoft;
475 1.1 hsuenaga
476 1.1 hsuenaga /*
477 1.1 hsuenaga * Configuration Parameters
478 1.1 hsuenaga */
479 1.1 hsuenaga struct mvxpe_conf sc_cf;
480 1.1 hsuenaga
481 1.1 hsuenaga /*
482 1.1 hsuenaga * I/O Spaces
483 1.1 hsuenaga */
484 1.1 hsuenaga bus_space_tag_t sc_iot;
485 1.1 hsuenaga bus_space_handle_t sc_ioh; /* all registers handle */
486 1.1 hsuenaga bus_space_handle_t sc_mibh; /* mib counter handle */
487 1.1 hsuenaga
488 1.1 hsuenaga /*
489 1.1 hsuenaga * DMA Spaces
490 1.1 hsuenaga */
491 1.1 hsuenaga bus_dma_tag_t sc_dmat;
492 1.1 hsuenaga struct mvxpe_rx_ring sc_rx_ring[MVXPE_QUEUE_SIZE];
493 1.1 hsuenaga struct mvxpe_tx_ring sc_tx_ring[MVXPE_QUEUE_SIZE];
494 1.1 hsuenaga int sc_tx_pending; /* total number of tx pkt */
495 1.1 hsuenaga
496 1.1 hsuenaga /*
497 1.1 hsuenaga * Software Buffer Manager
498 1.1 hsuenaga * XXX: to be writtten the independent device driver.
499 1.1 hsuenaga */
500 1.1 hsuenaga struct mvxpe_bm_softc sc_bm;
501 1.1 hsuenaga
502 1.1 hsuenaga /*
503 1.1 hsuenaga * Maintance clock
504 1.1 hsuenaga */
505 1.1 hsuenaga callout_t sc_tick_ch; /* tick callout */
506 1.1 hsuenaga
507 1.1 hsuenaga /*
508 1.1 hsuenaga * Link State control
509 1.1 hsuenaga */
510 1.1 hsuenaga uint32_t sc_linkstate;
511 1.1 hsuenaga
512 1.1 hsuenaga /*
513 1.1 hsuenaga * Act as Rndom source
514 1.1 hsuenaga */
515 1.1 hsuenaga krndsource_t sc_rnd_source;
516 1.1 hsuenaga
517 1.1 hsuenaga /*
518 1.1 hsuenaga * Sysctl interfaces
519 1.1 hsuenaga */
520 1.1 hsuenaga struct sysctllog *sc_mvxpe_clog;
521 1.1 hsuenaga struct mvxpe_sysctl_queue sc_sysctl_rx_queue[MVXPE_QUEUE_SIZE];
522 1.1 hsuenaga struct mvxpe_sysctl_queue sc_sysctl_tx_queue[MVXPE_QUEUE_SIZE];
523 1.1 hsuenaga
524 1.1 hsuenaga /*
525 1.1 hsuenaga * MIB counter
526 1.1 hsuenaga */
527 1.1 hsuenaga size_t sc_sysctl_mib_size;
528 1.1 hsuenaga struct mvxpe_sysctl_mib *sc_sysctl_mib;
529 1.1 hsuenaga
530 1.1 hsuenaga #ifdef MVXPE_EVENT_COUNTERS
531 1.1 hsuenaga /*
532 1.1 hsuenaga * Event counter
533 1.1 hsuenaga */
534 1.1 hsuenaga struct mvxpe_evcnt sc_ev;
535 1.1 hsuenaga #endif
536 1.1 hsuenaga };
537 1.1 hsuenaga #define MVXPE_RX_RING_MEM_VA(sc, q) \
538 1.1 hsuenaga ((sc)->sc_rx_ring[(q)].rx_descriptors)
539 1.1 hsuenaga #define MVXPE_RX_RING_MEM_PA(sc, q) \
540 1.1 hsuenaga ((sc)->sc_rx_ring[(q)].rx_descriptors_map->dm_segs[0].ds_addr)
541 1.1 hsuenaga #define MVXPE_RX_RING_MEM_MAP(sc, q) \
542 1.1 hsuenaga ((sc)->sc_rx_ring[(q)].rx_descriptors_map)
543 1.1 hsuenaga #define MVXPE_RX_RING(sc, q) \
544 1.1 hsuenaga (&(sc)->sc_rx_ring[(q)])
545 1.1 hsuenaga #define MVXPE_RX_HANDLE(sc, q, i) \
546 1.1 hsuenaga (&(sc)->sc_rx_ring[(q)].rx_handle[(i)])
547 1.1 hsuenaga #define MVXPE_RX_DESC(sc, q, i) \
548 1.1 hsuenaga ((sc)->sc_rx_ring[(q)].rx_handle[(i)].rxdesc_va)
549 1.1 hsuenaga #define MVXPE_RX_DESC_OFF(sc, q, i) \
550 1.1 hsuenaga ((sc)->sc_rx_ring[(q)].rx_handle[(i)].rxdesc_off)
551 1.1 hsuenaga #define MVXPE_RX_PKTBUF(sc, q, i) \
552 1.1 hsuenaga ((sc)->sc_rx_ring[(q)].rx_handle[(i)].chunk)
553 1.1 hsuenaga
554 1.1 hsuenaga #define MVXPE_TX_RING_MEM_VA(sc, q) \
555 1.1 hsuenaga ((sc)->sc_tx_ring[(q)].tx_descriptors)
556 1.1 hsuenaga #define MVXPE_TX_RING_MEM_PA(sc, q) \
557 1.1 hsuenaga ((sc)->sc_tx_ring[(q)].tx_descriptors_map->dm_segs[0].ds_addr)
558 1.1 hsuenaga #define MVXPE_TX_RING_MEM_MAP(sc, q) \
559 1.1 hsuenaga ((sc)->sc_tx_ring[(q)].tx_descriptors_map)
560 1.1 hsuenaga #define MVXPE_TX_RING(sc, q) \
561 1.1 hsuenaga (&(sc)->sc_tx_ring[(q)])
562 1.1 hsuenaga #define MVXPE_TX_HANDLE(sc, q, i) \
563 1.1 hsuenaga (&(sc)->sc_tx_ring[(q)].tx_handle[(i)])
564 1.1 hsuenaga #define MVXPE_TX_DESC(sc, q, i) \
565 1.1 hsuenaga ((sc)->sc_tx_ring[(q)].tx_handle[(i)].txdesc_va)
566 1.1 hsuenaga #define MVXPE_TX_DESC_OFF(sc, q, i) \
567 1.1 hsuenaga ((sc)->sc_tx_ring[(q)].tx_handle[(i)].txdesc_off)
568 1.1 hsuenaga #define MVXPE_TX_MBUF(sc, q, i) \
569 1.1 hsuenaga ((sc)->sc_tx_ring[(q)].tx_handle[(i)].txdesc_mbuf)
570 1.1 hsuenaga #define MVXPE_TX_MAP(sc, q, i) \
571 1.1 hsuenaga ((sc)->sc_tx_ring[(q)].tx_handle[(i)].txdesc_mbuf_map)
572 1.1 hsuenaga
573 1.1 hsuenaga #endif /* _IF_MVXPEVAR_H_ */
574