if_mvxpevar.h revision 1.2 1 /* $NetBSD: if_mvxpevar.h,v 1.2 2015/06/03 03:55:47 hsuenaga Exp $ */
2 /*
3 * Copyright (c) 2015 Internet Initiative Japan Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27 #ifndef _IF_MVXPEVAR_H_
28 #define _IF_MVXPEVAR_H_
29 #include <net/if.h>
30 #include <dev/marvell/mvxpbmvar.h>
31
32 /*
33 * Limit of packet sizes.
34 */
35 #define MVXPE_HWHEADER_SIZE 2 /* Marvell Header */
36 #define MVXPE_MRU 2000 /* Max Receive Unit */
37 #define MVXPE_MTU MVXPE_MRU /* Max Transmit Unit */
38
39 /*
40 * Default limit of queue length
41 *
42 * queue 0 is lowest priority and queue 7 is highest priority.
43 * IP packet is received on queue 7 by default.
44 *
45 * XXX: packet classifier is not implement yet
46 */
47 #define MVXPE_RX_QUEUE_LIMIT_0 8
48 #define MVXPE_RX_QUEUE_LIMIT_1 8
49 #define MVXPE_RX_QUEUE_LIMIT_2 8
50 #define MVXPE_RX_QUEUE_LIMIT_3 8
51 #define MVXPE_RX_QUEUE_LIMIT_4 8
52 #define MVXPE_RX_QUEUE_LIMIT_5 8
53 #define MVXPE_RX_QUEUE_LIMIT_6 8
54 #define MVXPE_RX_QUEUE_LIMIT_7 IFQ_MAXLEN
55
56 #define MVXPE_TX_QUEUE_LIMIT_0 IFQ_MAXLEN
57 #define MVXPE_TX_QUEUE_LIMIT_1 8
58 #define MVXPE_TX_QUEUE_LIMIT_2 8
59 #define MVXPE_TX_QUEUE_LIMIT_3 8
60 #define MVXPE_TX_QUEUE_LIMIT_4 8
61 #define MVXPE_TX_QUEUE_LIMIT_5 8
62 #define MVXPE_TX_QUEUE_LIMIT_6 8
63 #define MVXPE_TX_QUEUE_LIMIT_7 8
64
65 /* interrupt is triggered when corossing (queuelen / RATIO) */
66 #define MVXPE_RXTH_RATIO 8
67 #define MVXPE_RXTH_REFILL_RATIO 2
68 #define MVXPE_TXTH_RATIO 8
69
70 /*
71 * Device Register access
72 */
73 #define MVXPE_READ(sc, reg) \
74 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))
75 #define MVXPE_WRITE(sc, reg, val) \
76 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
77
78 #define MVXPE_READ_REGION(sc, reg, val, c) \
79 bus_space_read_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
80 #define MVXPE_WRITE_REGION(sc, reg, val, c) \
81 bus_space_write_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
82
83 #define MVXPE_READ_MIB(sc, reg) \
84 bus_space_read_4((sc)->sc_iot, (sc)->sc_mibh, (reg))
85
86 #define MVXPE_IS_LINKUP(sc) \
87 (MVXPE_READ((sc), MVXPE_PSR) & MVXPE_PSR_LINKUP)
88
89 #define MVXPE_IS_QUEUE_BUSY(queues, q) \
90 ((((queues) >> (q)) & 0x1))
91
92 /*
93 * EEE: Lower Power Idle config
94 * Default timer is duration of MTU sized frame transmission.
95 * The timer can be negotiated by LLDP protocol, but we have no
96 * support.
97 */
98 #define MVXPE_LPI_TS (MVXPE_MRU * 8 / 1000) /* [us] */
99 #define MVXPE_LPI_TW (MVXPE_MRU * 8 / 1000) /* [us] */
100 #define MVXPE_LPI_LI (MVXPE_MRU * 8 / 1000) /* [us] */
101
102 /*
103 * DMA Descriptor
104 *
105 * the ethernet device has 8 rx/tx DMA queues. each of queue has its own
106 * decriptor list. descriptors are simply index by counter inside the device.
107 */
108 #define MVXPE_TX_RING_CNT IFQ_MAXLEN
109 #define MVXPE_TX_RING_MSK (MVXPE_TX_RING_CNT - 1)
110 #define MVXPE_TX_RING_NEXT(x) (((x) + 1) & MVXPE_TX_RING_MSK)
111 #define MVXPE_RX_RING_CNT IFQ_MAXLEN
112 #define MVXPE_RX_RING_MSK (MVXPE_RX_RING_CNT - 1)
113 #define MVXPE_RX_RING_NEXT(x) (((x) + 1) & MVXPE_RX_RING_MSK)
114 #define MVXPE_TX_SEGLIMIT 32
115
116 struct mvxpe_rx_ring {
117 /* Real descriptors array. shared by RxDMA */
118 struct mvxpe_rx_desc *rx_descriptors;
119 bus_dmamap_t rx_descriptors_map;
120
121 /* Managment entries for each of descritors */
122 struct mvxpe_rx_handle {
123 struct mvxpe_rx_desc *rxdesc_va;
124 off_t rxdesc_off; /* from rx_descriptors[0] */
125 struct mvxpbm_chunk *chunk;
126 } rx_handle[MVXPE_RX_RING_CNT];
127
128 /* locks */
129 kmutex_t rx_ring_mtx;
130
131 /* Index */
132 int rx_dma;
133 int rx_cpu;
134
135 /* Limit */
136 int rx_queue_len;
137 int rx_queue_th_received;
138 int rx_queue_th_free;
139 int rx_queue_th_time; /* [Tclk] */
140 };
141
142 struct mvxpe_tx_ring {
143 /* Real descriptors array. shared by TxDMA */
144 struct mvxpe_tx_desc *tx_descriptors;
145 bus_dmamap_t tx_descriptors_map;
146
147 /* Managment entries for each of descritors */
148 struct mvxpe_tx_handle {
149 struct mvxpe_tx_desc *txdesc_va;
150 off_t txdesc_off; /* from tx_descriptors[0] */
151 struct mbuf *txdesc_mbuf;
152 bus_dmamap_t txdesc_mbuf_map;
153 } tx_handle[MVXPE_TX_RING_CNT];
154
155 /* locks */
156 kmutex_t tx_ring_mtx;
157
158 /* Index */
159 int tx_used;
160 int tx_dma;
161 int tx_cpu;
162
163 /* Limit */
164 int tx_queue_len;
165 int tx_queue_th_free;
166 };
167
168 static inline int
169 tx_counter_adv(int ctr, int n)
170 {
171 /* XXX: lock or atomic */
172 ctr += n;
173 while (ctr >= MVXPE_TX_RING_CNT)
174 ctr -= MVXPE_TX_RING_CNT;
175
176 return ctr;
177 }
178
179 static inline int
180 rx_counter_adv(int ctr, int n)
181 {
182 /* XXX: lock or atomic */
183 ctr += n;
184 while (ctr >= MVXPE_TX_RING_CNT)
185 ctr -= MVXPE_TX_RING_CNT;
186
187 return ctr;
188 }
189
190 /*
191 * Timeout control
192 */
193 #define MVXPE_PHY_TIMEOUT 10000 /* msec */
194 #define RX_DISABLE_TIMEOUT 0x1000000 /* times */
195 #define TX_DISABLE_TIMEOUT 0x1000000 /* times */
196 #define TX_FIFO_EMPTY_TIMEOUT 0x1000000 /* times */
197
198 /*
199 * Event counter
200 */
201 #ifdef MVXPE_EVENT_COUNTERS
202 #define MVXPE_EVCNT_INCR(ev) (ev)->ev_count++
203 #define MVXPE_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
204 #else
205 #define MVXPE_EVCNT_INCR(ev) /* nothing */
206 #define MVXPE_EVCNT_ADD(ev, val) /* nothing */
207 #endif
208 struct mvxpe_evcnt {
209 /*
210 * Master Interrupt Handler
211 */
212 struct evcnt ev_i_rxtxth;
213 struct evcnt ev_i_rxtx;
214 struct evcnt ev_i_misc;
215
216 /*
217 * RXTXTH Interrupt
218 */
219 struct evcnt ev_rxtxth_txerr;
220
221 /*
222 * MISC Interrupt
223 */
224 struct evcnt ev_misc_phystatuschng;
225 struct evcnt ev_misc_linkchange;
226 struct evcnt ev_misc_iae;
227 struct evcnt ev_misc_rxoverrun;
228 struct evcnt ev_misc_rxcrc;
229 struct evcnt ev_misc_rxlargepacket;
230 struct evcnt ev_misc_txunderrun;
231 struct evcnt ev_misc_prbserr;
232 struct evcnt ev_misc_srse;
233 struct evcnt ev_misc_txreq;
234
235 /*
236 * RxTx Interrupt
237 */
238 struct evcnt ev_rxtx_rreq;
239 struct evcnt ev_rxtx_rpq;
240 struct evcnt ev_rxtx_tbrq;
241 struct evcnt ev_rxtx_rxtxth;
242 struct evcnt ev_rxtx_txerr;
243 struct evcnt ev_rxtx_misc;
244
245 /*
246 * Link
247 */
248 struct evcnt ev_link_up;
249 struct evcnt ev_link_down;
250
251 /*
252 * Rx Descriptor
253 */
254 struct evcnt ev_rxd_ce;
255 struct evcnt ev_rxd_or;
256 struct evcnt ev_rxd_mf;
257 struct evcnt ev_rxd_re;
258 struct evcnt ev_rxd_scat;
259
260 /*
261 * Tx Descriptor
262 */
263 struct evcnt ev_txd_lc;
264 struct evcnt ev_txd_ur;
265 struct evcnt ev_txd_rl;
266 struct evcnt ev_txd_oth;
267
268 /*
269 * Status Registers
270 */
271 struct evcnt ev_reg_pdfc; /* Rx Port Discard Frame Counter */
272 struct evcnt ev_reg_pofc; /* Rx Port Overrun Frame Counter */
273 struct evcnt ev_reg_txbadfcs; /* Tx BAD FCS Counter */
274 struct evcnt ev_reg_txdropped; /* Tx Dropped Counter */
275 struct evcnt ev_reg_lpic;
276
277
278 /* Device Driver Errors */
279 struct evcnt ev_drv_wdogsoft;
280 struct evcnt ev_drv_txerr;
281 struct evcnt ev_drv_rxq[MVXPE_QUEUE_SIZE];
282 struct evcnt ev_drv_rxqe[MVXPE_QUEUE_SIZE];
283 struct evcnt ev_drv_txq[MVXPE_QUEUE_SIZE];
284 struct evcnt ev_drv_txqe[MVXPE_QUEUE_SIZE];
285 };
286
287 /*
288 * Debug
289 */
290 #ifdef MVXPE_DEBUG
291 #define DPRINTF(fmt, ...) \
292 do { \
293 if (mvxpe_debug >= 1) { \
294 printf("%s: ", __func__); \
295 printf((fmt), ##__VA_ARGS__); \
296 } \
297 } while (/*CONSTCOND*/0)
298 #define DPRINTFN(level , fmt, ...) \
299 do { \
300 if (mvxpe_debug >= (level)) { \
301 printf("%s: ", __func__); \
302 printf((fmt), ##__VA_ARGS__); \
303 } \
304 } while (/*CONSTCOND*/0)
305 #define DPRINTDEV(dev, level, fmt, ...) \
306 do { \
307 if (mvxpe_debug >= (level)) { \
308 device_printf((dev), \
309 "%s: "fmt , __func__, ##__VA_ARGS__); \
310 } \
311 } while (/*CONSTCOND*/0)
312 #define DPRINTSC(sc, level, fmt, ...) \
313 do { \
314 device_t dev = (sc)->sc_dev; \
315 if (mvxpe_debug >= (level)) { \
316 device_printf(dev, \
317 "%s: " fmt, __func__, ##__VA_ARGS__); \
318 } \
319 } while (/*CONSTCOND*/0)
320 #define DPRINTIFNET(ifp, level, fmt, ...) \
321 do { \
322 const char *xname = (ifp)->if_xname; \
323 if (mvxpe_debug >= (level)) { \
324 printf("%s: %s: " fmt, xname, __func__, ##__VA_ARGS__);\
325 } \
326 } while (/*CONSTCOND*/0)
327 #define DPRINTIFNET(ifp, level, fmt, ...) \
328 do { \
329 const char *xname = (ifp)->if_xname; \
330 if (mvxpe_debug >= (level)) { \
331 printf("%s: %s: " fmt, xname, __func__, ##__VA_ARGS__);\
332 } \
333 } while (/*CONSTCOND*/0)
334 #define DPRINTPRXS(level, q) \
335 do { \
336 uint32_t _reg = MVXPE_READ(sc, MVXPE_PRXS(q)); \
337 if (mvxpe_debug >= (level)) { \
338 printf("PRXS(queue %d) %#x: Occupied %d, NoOccupied %d.\n", \
339 q, _reg, MVXPE_PRXS_GET_ODC(_reg), \
340 MVXPE_PRXS_GET_NODC(_reg)); \
341 } \
342 } while (/*CONSTCOND*/0)
343 #else
344 #define DPRINTF(fmt, ...)
345 #define DPRINTFN(level, fmt, ...)
346 #define DPRINTDEV(dev, level, fmt, ...)
347 #define DPRINTSC(sc, level, fmt, ...)
348 #define DPRINTIFNET(ifp, level, fmt, ...)
349 #define DPRINTPRXS(level, reg)
350 #endif
351
352 #define KASSERT_SC_MTX(sc) \
353 KASSERT(mutex_owned(&(sc)->sc_mtx))
354 #define KASSERT_BM_MTX(sc) \
355 KASSERT(mutex_owned(&(sc)->sc_bm.bm_mtx))
356 #define KASSERT_RX_MTX(sc, q) \
357 KASSERT(mutex_owned(&(sc)->sc_rx_ring[(q)].rx_ring_mtx))
358 #define KASSERT_TX_MTX(sc, q) \
359 KASSERT(mutex_owned(&(sc)->sc_tx_ring[(q)].tx_ring_mtx))
360
361 /*
362 * Configuration parameters
363 */
364 struct mvxpe_conf {
365 int cf_lpi; /* EEE Low Power IDLE enable */
366 int cf_fc; /* Flow Control enable */
367 };
368
369 /*
370 * sysctl(9) parameters
371 */
372 struct mvxpe_softc;
373 struct mvxpe_sysctl_queue {
374 struct mvxpe_softc *sc;
375 int rxtx;
376 int queue;
377 };
378 #define MVXPE_SYSCTL_RX 0
379 #define MVXPE_SYSCTL_TX 1
380
381 struct mvxpe_sysctl_mib {
382 struct mvxpe_softc *sc;
383 int index;
384 uint64_t counter;
385 };
386
387 /*
388 * Ethernet Device main context
389 */
390 struct mvxpe_softc {
391 device_t sc_dev;
392 int sc_port;
393 uint32_t sc_version;
394
395 /*
396 * sc_mtx must be held by interface functions to/from
397 * other frameworks. interrupt hander, sysctl hander,
398 * ioctl hander, and so on.
399 */
400 kmutex_t sc_mtx;
401
402 /*
403 * Ethernet facilities
404 */
405 struct ethercom sc_ethercom;
406 struct mii_data sc_mii;
407 u_int8_t sc_enaddr[ETHER_ADDR_LEN]; /* station addr */
408 int sc_if_flags;
409 int sc_wdogsoft;
410
411 /*
412 * Configuration Parameters
413 */
414 struct mvxpe_conf sc_cf;
415
416 /*
417 * I/O Spaces
418 */
419 bus_space_tag_t sc_iot;
420 bus_space_handle_t sc_ioh; /* all registers handle */
421 bus_space_handle_t sc_mibh; /* mib counter handle */
422
423 /*
424 * DMA Spaces
425 */
426 bus_dma_tag_t sc_dmat;
427 struct mvxpe_rx_ring sc_rx_ring[MVXPE_QUEUE_SIZE];
428 struct mvxpe_tx_ring sc_tx_ring[MVXPE_QUEUE_SIZE];
429 int sc_tx_pending; /* total number of tx pkt */
430
431 /*
432 * Software Buffer Manager
433 */
434 struct mvxpbm_softc *sc_bm;
435
436 /*
437 * Maintance clock
438 */
439 callout_t sc_tick_ch; /* tick callout */
440
441 /*
442 * Link State control
443 */
444 uint32_t sc_linkstate;
445
446 /*
447 * Act as Rndom source
448 */
449 krndsource_t sc_rnd_source;
450
451 /*
452 * Sysctl interfaces
453 */
454 struct sysctllog *sc_mvxpe_clog;
455 struct mvxpe_sysctl_queue sc_sysctl_rx_queue[MVXPE_QUEUE_SIZE];
456 struct mvxpe_sysctl_queue sc_sysctl_tx_queue[MVXPE_QUEUE_SIZE];
457
458 /*
459 * MIB counter
460 */
461 size_t sc_sysctl_mib_size;
462 struct mvxpe_sysctl_mib *sc_sysctl_mib;
463
464 #ifdef MVXPE_EVENT_COUNTERS
465 /*
466 * Event counter
467 */
468 struct mvxpe_evcnt sc_ev;
469 #endif
470 };
471 #define MVXPE_RX_RING_MEM_VA(sc, q) \
472 ((sc)->sc_rx_ring[(q)].rx_descriptors)
473 #define MVXPE_RX_RING_MEM_PA(sc, q) \
474 ((sc)->sc_rx_ring[(q)].rx_descriptors_map->dm_segs[0].ds_addr)
475 #define MVXPE_RX_RING_MEM_MAP(sc, q) \
476 ((sc)->sc_rx_ring[(q)].rx_descriptors_map)
477 #define MVXPE_RX_RING(sc, q) \
478 (&(sc)->sc_rx_ring[(q)])
479 #define MVXPE_RX_HANDLE(sc, q, i) \
480 (&(sc)->sc_rx_ring[(q)].rx_handle[(i)])
481 #define MVXPE_RX_DESC(sc, q, i) \
482 ((sc)->sc_rx_ring[(q)].rx_handle[(i)].rxdesc_va)
483 #define MVXPE_RX_DESC_OFF(sc, q, i) \
484 ((sc)->sc_rx_ring[(q)].rx_handle[(i)].rxdesc_off)
485 #define MVXPE_RX_PKTBUF(sc, q, i) \
486 ((sc)->sc_rx_ring[(q)].rx_handle[(i)].chunk)
487
488 #define MVXPE_TX_RING_MEM_VA(sc, q) \
489 ((sc)->sc_tx_ring[(q)].tx_descriptors)
490 #define MVXPE_TX_RING_MEM_PA(sc, q) \
491 ((sc)->sc_tx_ring[(q)].tx_descriptors_map->dm_segs[0].ds_addr)
492 #define MVXPE_TX_RING_MEM_MAP(sc, q) \
493 ((sc)->sc_tx_ring[(q)].tx_descriptors_map)
494 #define MVXPE_TX_RING(sc, q) \
495 (&(sc)->sc_tx_ring[(q)])
496 #define MVXPE_TX_HANDLE(sc, q, i) \
497 (&(sc)->sc_tx_ring[(q)].tx_handle[(i)])
498 #define MVXPE_TX_DESC(sc, q, i) \
499 ((sc)->sc_tx_ring[(q)].tx_handle[(i)].txdesc_va)
500 #define MVXPE_TX_DESC_OFF(sc, q, i) \
501 ((sc)->sc_tx_ring[(q)].tx_handle[(i)].txdesc_off)
502 #define MVXPE_TX_MBUF(sc, q, i) \
503 ((sc)->sc_tx_ring[(q)].tx_handle[(i)].txdesc_mbuf)
504 #define MVXPE_TX_MAP(sc, q, i) \
505 ((sc)->sc_tx_ring[(q)].tx_handle[(i)].txdesc_mbuf_map)
506
507 #endif /* _IF_MVXPEVAR_H_ */
508