if_mvxpevar.h revision 1.1 1 /* $NetBSD: if_mvxpevar.h,v 1.1 2015/05/03 14:38:10 hsuenaga Exp $ */
2 /*
3 * Copyright (c) 2015 Internet Initiative Japan Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27 #ifndef _IF_MVXPEVAR_H_
28 #define _IF_MVXPEVAR_H_
29 /*
30 * Comple options
31 * XXX: use kernel config
32 */
33 #define MVXPE_DEBUG 0
34 #define MVXPE_EVENT_COUNTERS
35
36 /*
37 * Limit of packet sizes.
38 */
39 #define MVXPE_HWHEADER_SIZE 2 /* Marvell Header */
40 #define MVXPE_MRU 2000 /* Max Receive Unit */
41 #define MVXPE_MTU MVXPE_MRU /* Max Transmit Unit */
42
43 /*
44 * Default limit of queue length
45 *
46 * queue 0 is lowest priority and queue 7 is highest priority.
47 * IP packet is received on queue 7 by default.
48 *
49 * XXX: packet classifier is not implement yet
50 */
51 #define MVXPE_RX_QUEUE_LIMIT_0 8
52 #define MVXPE_RX_QUEUE_LIMIT_1 8
53 #define MVXPE_RX_QUEUE_LIMIT_2 8
54 #define MVXPE_RX_QUEUE_LIMIT_3 8
55 #define MVXPE_RX_QUEUE_LIMIT_4 8
56 #define MVXPE_RX_QUEUE_LIMIT_5 8
57 #define MVXPE_RX_QUEUE_LIMIT_6 8
58 #define MVXPE_RX_QUEUE_LIMIT_7 256
59
60 #define MVXPE_TX_QUEUE_LIMIT_0 256
61 #define MVXPE_TX_QUEUE_LIMIT_1 8
62 #define MVXPE_TX_QUEUE_LIMIT_2 8
63 #define MVXPE_TX_QUEUE_LIMIT_3 8
64 #define MVXPE_TX_QUEUE_LIMIT_4 8
65 #define MVXPE_TX_QUEUE_LIMIT_5 8
66 #define MVXPE_TX_QUEUE_LIMIT_6 8
67 #define MVXPE_TX_QUEUE_LIMIT_7 8
68
69 /*
70 * Device Register access
71 */
72 #define MVXPE_READ(sc, reg) \
73 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))
74 #define MVXPE_WRITE(sc, reg, val) \
75 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
76
77 #define MVXPE_READ_REGION(sc, reg, val, c) \
78 bus_space_read_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
79 #define MVXPE_WRITE_REGION(sc, reg, val, c) \
80 bus_space_write_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
81
82 #define MVXPE_READ_MIB(sc, reg) \
83 bus_space_read_4((sc)->sc_iot, (sc)->sc_mibh, (reg))
84
85 #define MVXPE_IS_LINKUP(sc) \
86 (MVXPE_READ((sc), MVXPE_PSR) & MVXPE_PSR_LINKUP)
87
88 /*
89 * EEE: Lower Power Idle config
90 * Default timer is duration of MTU sized frame transmission.
91 * The timer can be negotiated by LLDP protocol, but we have no
92 * support.
93 */
94 #define MVXPE_LPI_TS (MVXPE_MRU * 8 / 1000) /* [us] */
95 #define MVXPE_LPI_TW (MVXPE_MRU * 8 / 1000) /* [us] */
96 #define MVXPE_LPI_LI (MVXPE_MRU * 8 / 1000) /* [us] */
97
98 /*
99 * DMA Descriptor
100 *
101 * the ethernet device has 8 rx/tx DMA queues. each of queue has its own
102 * decriptor list. descriptors are simply index by counter inside the device.
103 */
104 #define MVXPE_TX_RING_CNT 256
105 #define MVXPE_TX_RING_MSK (MVXPE_TX_RING_CNT - 1)
106 #define MVXPE_TX_RING_NEXT(x) (((x) + 1) & MVXPE_TX_RING_MSK)
107 #define MVXPE_RX_RING_CNT 256
108 #define MVXPE_RX_RING_MSK (MVXPE_RX_RING_CNT - 1)
109 #define MVXPE_RX_RING_NEXT(x) (((x) + 1) & MVXPE_RX_RING_MSK)
110 #define MVXPE_TX_SEGLIMIT 32
111
112 struct mvxpe_rx_ring {
113 /* Real descriptors array. shared by RxDMA */
114 struct mvxpe_rx_desc *rx_descriptors;
115 bus_dmamap_t rx_descriptors_map;
116
117 /* Managment entries for each of descritors */
118 struct mvxpe_rx_handle {
119 struct mvxpe_rx_desc *rxdesc_va;
120 off_t rxdesc_off; /* from rx_descriptors[0] */
121 struct mvxpe_bm_chunk *chunk;
122 } rx_handle[MVXPE_RX_RING_CNT];
123
124 /* locks */
125 kmutex_t rx_ring_mtx;
126
127 /* Index */
128 int rx_dma;
129 int rx_cpu;
130
131 /* Limit */
132 int rx_queue_len;
133 int rx_queue_th_received;
134 int rx_queue_th_free;
135 int rx_queue_th_time; /* [Tclk] */
136 };
137
138 struct mvxpe_tx_ring {
139 /* Real descriptors array. shared by TxDMA */
140 struct mvxpe_tx_desc *tx_descriptors;
141 bus_dmamap_t tx_descriptors_map;
142
143 /* Managment entries for each of descritors */
144 struct mvxpe_tx_handle {
145 struct mvxpe_tx_desc *txdesc_va;
146 off_t txdesc_off; /* from tx_descriptors[0] */
147 struct mbuf *txdesc_mbuf;
148 bus_dmamap_t txdesc_mbuf_map;
149 } tx_handle[MVXPE_TX_RING_CNT];
150
151 /* locks */
152 kmutex_t tx_ring_mtx;
153
154 /* Index */
155 int tx_free_cnt;
156 int tx_dma;
157 int tx_cpu;
158
159 /* Limit */
160 int tx_queue_len;
161 int tx_queue_th_free;
162 };
163
164 static inline int
165 tx_counter_adv(int ctr, int n)
166 {
167 /* XXX: lock or atomic */
168 ctr += n;
169 while (ctr >= MVXPE_TX_RING_CNT)
170 ctr -= MVXPE_TX_RING_CNT;
171
172 return ctr;
173 }
174
175 static inline int
176 rx_counter_adv(int ctr, int n)
177 {
178 /* XXX: lock or atomic */
179 ctr += n;
180 while (ctr >= MVXPE_TX_RING_CNT)
181 ctr -= MVXPE_TX_RING_CNT;
182
183 return ctr;
184 }
185
186 /*
187 * Buffer alignement
188 */
189 #define MVXPE_RXBUF_ALIGN 32 /* Cache line size */
190 #define MVXPE_RXBUF_MASK (MVXPE_RXBUF_ALIGN - 1)
191 #define MVXPE_BM_ADDR_ALIGN 32
192 #define MVXPE_BM_ADDR_MASK (MVXPE_BM_ADDR_ALIGN - 1)
193
194 /*
195 * Timeout control
196 */
197 #define MVXPE_PHY_TIMEOUT 10000 /* msec */
198 #define RX_DISABLE_TIMEOUT 0x1000000 /* times */
199 #define TX_DISABLE_TIMEOUT 0x1000000 /* times */
200 #define TX_FIFO_EMPTY_TIMEOUT 0x1000000 /* times */
201
202 /*
203 * Event counter
204 */
205 #ifdef MVXPE_EVENT_COUNTERS
206 #define MVXPE_EVCNT_INCR(ev) (ev)->ev_count++
207 #define MVXPE_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
208 #else
209 #define MVXPE_EVCNT_INCR(ev) /* nothing */
210 #define MVXPE_EVCNT_ADD(ev, val) /* nothing */
211 #endif
212 struct mvxpe_evcnt {
213 /*
214 * Master Interrupt Handler
215 */
216 struct evcnt ev_i_rxtxth;
217 struct evcnt ev_i_rxtx;
218 struct evcnt ev_i_misc;
219
220 /*
221 * RXTXTH Interrupt
222 */
223 struct evcnt ev_rxtxth_txerr;
224
225 /*
226 * MISC Interrupt
227 */
228 struct evcnt ev_misc_phystatuschng;
229 struct evcnt ev_misc_linkchange;
230 struct evcnt ev_misc_iae;
231 struct evcnt ev_misc_rxoverrun;
232 struct evcnt ev_misc_rxcrc;
233 struct evcnt ev_misc_rxlargepacket;
234 struct evcnt ev_misc_txunderrun;
235 struct evcnt ev_misc_prbserr;
236 struct evcnt ev_misc_srse;
237 struct evcnt ev_misc_txreq;
238
239 /*
240 * RxTx Interrupt
241 */
242 struct evcnt ev_rxtx_rreq;
243 struct evcnt ev_rxtx_rpq;
244 struct evcnt ev_rxtx_tbrq;
245 struct evcnt ev_rxtx_rxtxth;
246 struct evcnt ev_rxtx_txerr;
247 struct evcnt ev_rxtx_misc;
248
249 /*
250 * Link
251 */
252 struct evcnt ev_link_up;
253 struct evcnt ev_link_down;
254
255 /*
256 * Rx Descriptor
257 */
258 struct evcnt ev_rxd_ce;
259 struct evcnt ev_rxd_or;
260 struct evcnt ev_rxd_mf;
261 struct evcnt ev_rxd_re;
262 struct evcnt ev_rxd_scat;
263
264 /*
265 * Tx Descriptor
266 */
267 struct evcnt ev_txd_lc;
268 struct evcnt ev_txd_ur;
269 struct evcnt ev_txd_rl;
270 struct evcnt ev_txd_oth;
271
272 /*
273 * Status Registers
274 */
275 struct evcnt ev_reg_pdfc; /* Rx Port Discard Frame Counter */
276 struct evcnt ev_reg_pofc; /* Rx Port Overrun Frame Counter */
277 struct evcnt ev_reg_txbadfcs; /* Tx BAD FCS Counter */
278 struct evcnt ev_reg_txdropped; /* Tx Dropped Counter */
279 struct evcnt ev_reg_lpic;
280
281
282 /* Device Driver Errors */
283 struct evcnt ev_drv_wdogsoft;
284 struct evcnt ev_drv_txerr;
285 struct evcnt ev_drv_rxq[MVXPE_QUEUE_SIZE];
286 struct evcnt ev_drv_rxqe[MVXPE_QUEUE_SIZE];
287 struct evcnt ev_drv_txq[MVXPE_QUEUE_SIZE];
288 struct evcnt ev_drv_txqe[MVXPE_QUEUE_SIZE];
289 };
290
291 /*
292 * Debug
293 */
294 #ifdef MVXPE_DEBUG
295 #define DPRINTF(fmt, ...) \
296 do { \
297 if (mvxpe_debug >= 1) { \
298 printf("%s: ", __func__); \
299 printf((fmt), ##__VA_ARGS__); \
300 } \
301 } while (/*CONSTCOND*/0)
302 #define DPRINTFN(level , fmt, ...) \
303 do { \
304 if (mvxpe_debug >= (level)) { \
305 printf("%s: ", __func__); \
306 printf((fmt), ##__VA_ARGS__); \
307 } \
308 } while (/*CONSTCOND*/0)
309 #define DPRINTDEV(dev, level, fmt, ...) \
310 do { \
311 if (mvxpe_debug >= (level)) { \
312 device_printf((dev), \
313 "%s: "fmt , __func__, ##__VA_ARGS__); \
314 } \
315 } while (/*CONSTCOND*/0)
316 #define DPRINTSC(sc, level, fmt, ...) \
317 do { \
318 device_t dev = (sc)->sc_dev; \
319 if (mvxpe_debug >= (level)) { \
320 device_printf(dev, \
321 "%s: " fmt, __func__, ##__VA_ARGS__); \
322 } \
323 } while (/*CONSTCOND*/0)
324 #define DPRINTIFNET(ifp, level, fmt, ...) \
325 do { \
326 const char *xname = (ifp)->if_xname; \
327 if (mvxpe_debug >= (level)) { \
328 printf("%s: %s: " fmt, xname, __func__, ##__VA_ARGS__);\
329 } \
330 } while (/*CONSTCOND*/0)
331 #define DPRINTIFNET(ifp, level, fmt, ...) \
332 do { \
333 const char *xname = (ifp)->if_xname; \
334 if (mvxpe_debug >= (level)) { \
335 printf("%s: %s: " fmt, xname, __func__, ##__VA_ARGS__);\
336 } \
337 } while (/*CONSTCOND*/0)
338 #define DPRINTPRXS(level, q) \
339 do { \
340 uint32_t _reg = MVXPE_READ(sc, MVXPE_PRXS(q)); \
341 if (mvxpe_debug >= (level)) { \
342 printf("PRXS(queue %d) %#x: Occupied %d, NoOccupied %d.\n", \
343 q, _reg, MVXPE_PRXS_GET_ODC(_reg), \
344 MVXPE_PRXS_GET_NODC(_reg)); \
345 } \
346 } while (/*CONSTCOND*/0)
347 #else
348 #define DPRINTF(fmt, ...)
349 #define DPRINTFN(level, fmt, ...)
350 #define DPRINTDEV(dev, level, fmt, ...)
351 #define DPRINTSC(sc, level, fmt, ...)
352 #define DPRINTIFNET(ifp, level, fmt, ...)
353 #define DPRINTPRXS(level, reg)
354 #endif
355
356 #define KASSERT_SC_MTX(sc) \
357 KASSERT(mutex_owned(&(sc)->sc_mtx))
358 #define KASSERT_BM_MTX(sc) \
359 KASSERT(mutex_owned(&(sc)->sc_bm.bm_mtx))
360 #define KASSERT_RX_MTX(sc, q) \
361 KASSERT(mutex_owned(&(sc)->sc_rx_ring[(q)].rx_ring_mtx))
362 #define KASSERT_TX_MTX(sc, q) \
363 KASSERT(mutex_owned(&(sc)->sc_tx_ring[(q)].tx_ring_mtx))
364
365 /*
366 * Configuration parameters
367 */
368 struct mvxpe_conf {
369 int cf_lpi; /* EEE Low Power IDLE enable */
370 int cf_fc; /* Flow Control enable */
371 };
372
373 /*
374 * sysctl(9) parameters
375 */
376 struct mvxpe_softc;
377 struct mvxpe_sysctl_queue {
378 struct mvxpe_softc *sc;
379 int rxtx;
380 int queue;
381 };
382 #define MVXPE_SYSCTL_RX 0
383 #define MVXPE_SYSCTL_TX 1
384
385 struct mvxpe_sysctl_mib {
386 struct mvxpe_softc *sc;
387 int index;
388 uint64_t counter;
389 };
390
391 /*
392 * Packet Buffer Header
393 *
394 * this chunks may be managed by H/W Buffer Manger(BM) device,
395 * but there is no device driver yet.
396 *
397 * +----------------+ bm_buf
398 * |chunk header | |
399 * +----------------+ | | |chunk->buf_off
400 * |mbuf (M_EXT set)|<--------|struct mbuf *m | V
401 * +----------------+ +----------------+ chunk->buf_va/buf_pa
402 * | m_ext.ext_buf|-------->|packet buffer | |
403 * +----------------+ | | |chunk->buf_size
404 * | | V
405 * +----------------+
406 * |chunk header |
407 * |.... |
408 */
409 #define MVXPE_BM_SLOTS \
410 (MVXPE_RX_RING_CNT * (MVXPE_QUEUE_SIZE + 1))
411 #define MVXPE_BM_SIZE \
412 (MVXPE_MRU + MVXPE_HWHEADER_SIZE)
413
414 struct mvxpe_bm_chunk {
415 struct mbuf *m; /* back pointer to mbuf header */
416 void *sc; /* back pointer to softc */
417 off_t off; /* offset of chunk */
418 paddr_t pa; /* physical address of chunk */
419
420 off_t buf_off; /* offset of packet from sc_bm_buf */
421 paddr_t buf_pa; /* physical address of packet */
422 vaddr_t buf_va; /* virtual addres of packet */
423 size_t buf_size; /* size of buffer (exclude hdr) */
424
425 LIST_ENTRY(mvxpe_bm_chunk) link;
426 /* followed by packet buffer */
427 };
428
429 struct mvxpe_bm_softc {
430 bus_dma_tag_t bm_dmat;
431 bus_dmamap_t bm_map;
432 kmutex_t bm_mtx;
433
434 /* DMA MAP for entire buffer */
435 char *bm_buf;
436
437 /* memory chunk properties */
438 size_t bm_slotsize; /* size of bm_slots include header */
439 size_t bm_chunk_count; /* number of chunks */
440 size_t bm_chunk_size; /* size of packet buffer */
441 off_t bm_chunk_header_size; /* size of hader + padding */
442 off_t bm_chunk_packet_offset; /* allocate m_leading_space */
443 struct mvxpe_bm_chunk *bm_slots[MVXPE_BM_SLOTS];
444
445 /* for software based management */
446 LIST_HEAD(__mvxpe_bm_freehead, mvxpe_bm_chunk) bm_free;
447 LIST_HEAD(__mvxpe_bm_inusehead, mvxpe_bm_chunk) bm_inuse;
448 } sc_bm;
449
450 #define BM_SYNC_ALL 0
451
452 /*
453 * Ethernet Device main context
454 */
455 struct mvxpe_softc {
456 device_t sc_dev;
457 int sc_port;
458 uint32_t sc_version;
459
460 /*
461 * sc_mtx must be held by interface functions to/from
462 * other frameworks. interrupt hander, sysctl hander,
463 * ioctl hander, and so on.
464 */
465 kmutex_t sc_mtx;
466
467 /*
468 * Ethernet facilities
469 */
470 struct ethercom sc_ethercom;
471 struct mii_data sc_mii;
472 u_int8_t sc_enaddr[ETHER_ADDR_LEN]; /* station addr */
473 int sc_if_flags;
474 int sc_wdogsoft;
475
476 /*
477 * Configuration Parameters
478 */
479 struct mvxpe_conf sc_cf;
480
481 /*
482 * I/O Spaces
483 */
484 bus_space_tag_t sc_iot;
485 bus_space_handle_t sc_ioh; /* all registers handle */
486 bus_space_handle_t sc_mibh; /* mib counter handle */
487
488 /*
489 * DMA Spaces
490 */
491 bus_dma_tag_t sc_dmat;
492 struct mvxpe_rx_ring sc_rx_ring[MVXPE_QUEUE_SIZE];
493 struct mvxpe_tx_ring sc_tx_ring[MVXPE_QUEUE_SIZE];
494 int sc_tx_pending; /* total number of tx pkt */
495
496 /*
497 * Software Buffer Manager
498 * XXX: to be writtten the independent device driver.
499 */
500 struct mvxpe_bm_softc sc_bm;
501
502 /*
503 * Maintance clock
504 */
505 callout_t sc_tick_ch; /* tick callout */
506
507 /*
508 * Link State control
509 */
510 uint32_t sc_linkstate;
511
512 /*
513 * Act as Rndom source
514 */
515 krndsource_t sc_rnd_source;
516
517 /*
518 * Sysctl interfaces
519 */
520 struct sysctllog *sc_mvxpe_clog;
521 struct mvxpe_sysctl_queue sc_sysctl_rx_queue[MVXPE_QUEUE_SIZE];
522 struct mvxpe_sysctl_queue sc_sysctl_tx_queue[MVXPE_QUEUE_SIZE];
523
524 /*
525 * MIB counter
526 */
527 size_t sc_sysctl_mib_size;
528 struct mvxpe_sysctl_mib *sc_sysctl_mib;
529
530 #ifdef MVXPE_EVENT_COUNTERS
531 /*
532 * Event counter
533 */
534 struct mvxpe_evcnt sc_ev;
535 #endif
536 };
537 #define MVXPE_RX_RING_MEM_VA(sc, q) \
538 ((sc)->sc_rx_ring[(q)].rx_descriptors)
539 #define MVXPE_RX_RING_MEM_PA(sc, q) \
540 ((sc)->sc_rx_ring[(q)].rx_descriptors_map->dm_segs[0].ds_addr)
541 #define MVXPE_RX_RING_MEM_MAP(sc, q) \
542 ((sc)->sc_rx_ring[(q)].rx_descriptors_map)
543 #define MVXPE_RX_RING(sc, q) \
544 (&(sc)->sc_rx_ring[(q)])
545 #define MVXPE_RX_HANDLE(sc, q, i) \
546 (&(sc)->sc_rx_ring[(q)].rx_handle[(i)])
547 #define MVXPE_RX_DESC(sc, q, i) \
548 ((sc)->sc_rx_ring[(q)].rx_handle[(i)].rxdesc_va)
549 #define MVXPE_RX_DESC_OFF(sc, q, i) \
550 ((sc)->sc_rx_ring[(q)].rx_handle[(i)].rxdesc_off)
551 #define MVXPE_RX_PKTBUF(sc, q, i) \
552 ((sc)->sc_rx_ring[(q)].rx_handle[(i)].chunk)
553
554 #define MVXPE_TX_RING_MEM_VA(sc, q) \
555 ((sc)->sc_tx_ring[(q)].tx_descriptors)
556 #define MVXPE_TX_RING_MEM_PA(sc, q) \
557 ((sc)->sc_tx_ring[(q)].tx_descriptors_map->dm_segs[0].ds_addr)
558 #define MVXPE_TX_RING_MEM_MAP(sc, q) \
559 ((sc)->sc_tx_ring[(q)].tx_descriptors_map)
560 #define MVXPE_TX_RING(sc, q) \
561 (&(sc)->sc_tx_ring[(q)])
562 #define MVXPE_TX_HANDLE(sc, q, i) \
563 (&(sc)->sc_tx_ring[(q)].tx_handle[(i)])
564 #define MVXPE_TX_DESC(sc, q, i) \
565 ((sc)->sc_tx_ring[(q)].tx_handle[(i)].txdesc_va)
566 #define MVXPE_TX_DESC_OFF(sc, q, i) \
567 ((sc)->sc_tx_ring[(q)].tx_handle[(i)].txdesc_off)
568 #define MVXPE_TX_MBUF(sc, q, i) \
569 ((sc)->sc_tx_ring[(q)].tx_handle[(i)].txdesc_mbuf)
570 #define MVXPE_TX_MAP(sc, q, i) \
571 ((sc)->sc_tx_ring[(q)].tx_handle[(i)].txdesc_mbuf_map)
572
573 #endif /* _IF_MVXPEVAR_H_ */
574