1 1.9 andvar /* $NetBSD: if_mvxpevar.h,v 1.9 2022/04/04 19:33:45 andvar Exp $ */ 2 1.1 hsuenaga /* 3 1.1 hsuenaga * Copyright (c) 2015 Internet Initiative Japan Inc. 4 1.1 hsuenaga * All rights reserved. 5 1.1 hsuenaga * 6 1.1 hsuenaga * Redistribution and use in source and binary forms, with or without 7 1.1 hsuenaga * modification, are permitted provided that the following conditions 8 1.1 hsuenaga * are met: 9 1.1 hsuenaga * 1. Redistributions of source code must retain the above copyright 10 1.1 hsuenaga * notice, this list of conditions and the following disclaimer. 11 1.1 hsuenaga * 2. Redistributions in binary form must reproduce the above copyright 12 1.1 hsuenaga * notice, this list of conditions and the following disclaimer in the 13 1.1 hsuenaga * documentation and/or other materials provided with the distribution. 14 1.1 hsuenaga * 15 1.1 hsuenaga * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 1.1 hsuenaga * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 1.1 hsuenaga * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 1.1 hsuenaga * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 1.1 hsuenaga * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 1.1 hsuenaga * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 1.1 hsuenaga * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 1.1 hsuenaga * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 1.1 hsuenaga * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 1.1 hsuenaga * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 1.1 hsuenaga * POSSIBILITY OF SUCH DAMAGE. 26 1.1 hsuenaga */ 27 1.1 hsuenaga #ifndef _IF_MVXPEVAR_H_ 28 1.1 hsuenaga #define _IF_MVXPEVAR_H_ 29 1.2 hsuenaga #include <net/if.h> 30 1.2 hsuenaga #include <dev/marvell/mvxpbmvar.h> 31 1.1 hsuenaga 32 1.1 hsuenaga /* 33 1.1 hsuenaga * Limit of packet sizes. 34 1.1 hsuenaga */ 35 1.1 hsuenaga #define MVXPE_HWHEADER_SIZE 2 /* Marvell Header */ 36 1.1 hsuenaga #define MVXPE_MRU 2000 /* Max Receive Unit */ 37 1.1 hsuenaga #define MVXPE_MTU MVXPE_MRU /* Max Transmit Unit */ 38 1.1 hsuenaga 39 1.1 hsuenaga /* 40 1.1 hsuenaga * Default limit of queue length 41 1.1 hsuenaga * 42 1.1 hsuenaga * queue 0 is lowest priority and queue 7 is highest priority. 43 1.1 hsuenaga * 44 1.1 hsuenaga * XXX: packet classifier is not implement yet 45 1.1 hsuenaga */ 46 1.3 hikaru #define MVXPE_RX_QUEUE_LIMIT_0 IFQ_MAXLEN 47 1.2 hsuenaga #define MVXPE_RX_QUEUE_LIMIT_1 8 48 1.2 hsuenaga #define MVXPE_RX_QUEUE_LIMIT_2 8 49 1.2 hsuenaga #define MVXPE_RX_QUEUE_LIMIT_3 8 50 1.2 hsuenaga #define MVXPE_RX_QUEUE_LIMIT_4 8 51 1.2 hsuenaga #define MVXPE_RX_QUEUE_LIMIT_5 8 52 1.2 hsuenaga #define MVXPE_RX_QUEUE_LIMIT_6 8 53 1.3 hikaru #define MVXPE_RX_QUEUE_LIMIT_7 8 54 1.2 hsuenaga 55 1.2 hsuenaga #define MVXPE_TX_QUEUE_LIMIT_0 IFQ_MAXLEN 56 1.2 hsuenaga #define MVXPE_TX_QUEUE_LIMIT_1 8 57 1.2 hsuenaga #define MVXPE_TX_QUEUE_LIMIT_2 8 58 1.2 hsuenaga #define MVXPE_TX_QUEUE_LIMIT_3 8 59 1.2 hsuenaga #define MVXPE_TX_QUEUE_LIMIT_4 8 60 1.2 hsuenaga #define MVXPE_TX_QUEUE_LIMIT_5 8 61 1.2 hsuenaga #define MVXPE_TX_QUEUE_LIMIT_6 8 62 1.2 hsuenaga #define MVXPE_TX_QUEUE_LIMIT_7 8 63 1.2 hsuenaga 64 1.2 hsuenaga /* interrupt is triggered when corossing (queuelen / RATIO) */ 65 1.2 hsuenaga #define MVXPE_RXTH_RATIO 8 66 1.2 hsuenaga #define MVXPE_RXTH_REFILL_RATIO 2 67 1.2 hsuenaga #define MVXPE_TXTH_RATIO 8 68 1.1 hsuenaga 69 1.1 hsuenaga /* 70 1.1 hsuenaga * Device Register access 71 1.1 hsuenaga */ 72 1.1 hsuenaga #define MVXPE_READ(sc, reg) \ 73 1.1 hsuenaga bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg)) 74 1.1 hsuenaga #define MVXPE_WRITE(sc, reg, val) \ 75 1.1 hsuenaga bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val)) 76 1.1 hsuenaga 77 1.1 hsuenaga #define MVXPE_READ_REGION(sc, reg, val, c) \ 78 1.1 hsuenaga bus_space_read_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c)) 79 1.1 hsuenaga #define MVXPE_WRITE_REGION(sc, reg, val, c) \ 80 1.1 hsuenaga bus_space_write_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c)) 81 1.1 hsuenaga 82 1.1 hsuenaga #define MVXPE_READ_MIB(sc, reg) \ 83 1.1 hsuenaga bus_space_read_4((sc)->sc_iot, (sc)->sc_mibh, (reg)) 84 1.1 hsuenaga 85 1.1 hsuenaga #define MVXPE_IS_LINKUP(sc) \ 86 1.1 hsuenaga (MVXPE_READ((sc), MVXPE_PSR) & MVXPE_PSR_LINKUP) 87 1.1 hsuenaga 88 1.2 hsuenaga #define MVXPE_IS_QUEUE_BUSY(queues, q) \ 89 1.2 hsuenaga ((((queues) >> (q)) & 0x1)) 90 1.2 hsuenaga 91 1.1 hsuenaga /* 92 1.1 hsuenaga * EEE: Lower Power Idle config 93 1.1 hsuenaga * Default timer is duration of MTU sized frame transmission. 94 1.1 hsuenaga * The timer can be negotiated by LLDP protocol, but we have no 95 1.1 hsuenaga * support. 96 1.1 hsuenaga */ 97 1.1 hsuenaga #define MVXPE_LPI_TS (MVXPE_MRU * 8 / 1000) /* [us] */ 98 1.1 hsuenaga #define MVXPE_LPI_TW (MVXPE_MRU * 8 / 1000) /* [us] */ 99 1.1 hsuenaga #define MVXPE_LPI_LI (MVXPE_MRU * 8 / 1000) /* [us] */ 100 1.1 hsuenaga 101 1.1 hsuenaga /* 102 1.1 hsuenaga * DMA Descriptor 103 1.1 hsuenaga * 104 1.1 hsuenaga * the ethernet device has 8 rx/tx DMA queues. each of queue has its own 105 1.6 msaitoh * descriptor list. descriptors are simply index by counter inside the device. 106 1.1 hsuenaga */ 107 1.2 hsuenaga #define MVXPE_TX_RING_CNT IFQ_MAXLEN 108 1.1 hsuenaga #define MVXPE_TX_RING_MSK (MVXPE_TX_RING_CNT - 1) 109 1.1 hsuenaga #define MVXPE_TX_RING_NEXT(x) (((x) + 1) & MVXPE_TX_RING_MSK) 110 1.2 hsuenaga #define MVXPE_RX_RING_CNT IFQ_MAXLEN 111 1.1 hsuenaga #define MVXPE_RX_RING_MSK (MVXPE_RX_RING_CNT - 1) 112 1.1 hsuenaga #define MVXPE_RX_RING_NEXT(x) (((x) + 1) & MVXPE_RX_RING_MSK) 113 1.1 hsuenaga #define MVXPE_TX_SEGLIMIT 32 114 1.1 hsuenaga 115 1.1 hsuenaga struct mvxpe_rx_ring { 116 1.1 hsuenaga /* Real descriptors array. shared by RxDMA */ 117 1.1 hsuenaga struct mvxpe_rx_desc *rx_descriptors; 118 1.1 hsuenaga bus_dmamap_t rx_descriptors_map; 119 1.1 hsuenaga 120 1.7 andvar /* Management entries for each of descriptors */ 121 1.1 hsuenaga struct mvxpe_rx_handle { 122 1.1 hsuenaga struct mvxpe_rx_desc *rxdesc_va; 123 1.1 hsuenaga off_t rxdesc_off; /* from rx_descriptors[0] */ 124 1.2 hsuenaga struct mvxpbm_chunk *chunk; 125 1.1 hsuenaga } rx_handle[MVXPE_RX_RING_CNT]; 126 1.1 hsuenaga 127 1.1 hsuenaga /* locks */ 128 1.1 hsuenaga kmutex_t rx_ring_mtx; 129 1.1 hsuenaga 130 1.1 hsuenaga /* Index */ 131 1.1 hsuenaga int rx_dma; 132 1.1 hsuenaga int rx_cpu; 133 1.1 hsuenaga 134 1.1 hsuenaga /* Limit */ 135 1.1 hsuenaga int rx_queue_len; 136 1.1 hsuenaga int rx_queue_th_received; 137 1.1 hsuenaga int rx_queue_th_free; 138 1.1 hsuenaga int rx_queue_th_time; /* [Tclk] */ 139 1.1 hsuenaga }; 140 1.1 hsuenaga 141 1.1 hsuenaga struct mvxpe_tx_ring { 142 1.1 hsuenaga /* Real descriptors array. shared by TxDMA */ 143 1.1 hsuenaga struct mvxpe_tx_desc *tx_descriptors; 144 1.1 hsuenaga bus_dmamap_t tx_descriptors_map; 145 1.1 hsuenaga 146 1.7 andvar /* Management entries for each of descriptors */ 147 1.1 hsuenaga struct mvxpe_tx_handle { 148 1.1 hsuenaga struct mvxpe_tx_desc *txdesc_va; 149 1.1 hsuenaga off_t txdesc_off; /* from tx_descriptors[0] */ 150 1.1 hsuenaga struct mbuf *txdesc_mbuf; 151 1.1 hsuenaga bus_dmamap_t txdesc_mbuf_map; 152 1.1 hsuenaga } tx_handle[MVXPE_TX_RING_CNT]; 153 1.1 hsuenaga 154 1.1 hsuenaga /* locks */ 155 1.1 hsuenaga kmutex_t tx_ring_mtx; 156 1.1 hsuenaga 157 1.1 hsuenaga /* Index */ 158 1.2 hsuenaga int tx_used; 159 1.1 hsuenaga int tx_dma; 160 1.1 hsuenaga int tx_cpu; 161 1.1 hsuenaga 162 1.1 hsuenaga /* Limit */ 163 1.1 hsuenaga int tx_queue_len; 164 1.1 hsuenaga int tx_queue_th_free; 165 1.1 hsuenaga }; 166 1.1 hsuenaga 167 1.4 christos static __inline int 168 1.1 hsuenaga tx_counter_adv(int ctr, int n) 169 1.1 hsuenaga { 170 1.1 hsuenaga /* XXX: lock or atomic */ 171 1.1 hsuenaga ctr += n; 172 1.1 hsuenaga while (ctr >= MVXPE_TX_RING_CNT) 173 1.1 hsuenaga ctr -= MVXPE_TX_RING_CNT; 174 1.1 hsuenaga 175 1.1 hsuenaga return ctr; 176 1.1 hsuenaga } 177 1.1 hsuenaga 178 1.4 christos static __inline int 179 1.1 hsuenaga rx_counter_adv(int ctr, int n) 180 1.1 hsuenaga { 181 1.1 hsuenaga /* XXX: lock or atomic */ 182 1.1 hsuenaga ctr += n; 183 1.1 hsuenaga while (ctr >= MVXPE_TX_RING_CNT) 184 1.1 hsuenaga ctr -= MVXPE_TX_RING_CNT; 185 1.1 hsuenaga 186 1.1 hsuenaga return ctr; 187 1.1 hsuenaga } 188 1.1 hsuenaga 189 1.1 hsuenaga /* 190 1.1 hsuenaga * Timeout control 191 1.1 hsuenaga */ 192 1.1 hsuenaga #define MVXPE_PHY_TIMEOUT 10000 /* msec */ 193 1.1 hsuenaga #define RX_DISABLE_TIMEOUT 0x1000000 /* times */ 194 1.1 hsuenaga #define TX_DISABLE_TIMEOUT 0x1000000 /* times */ 195 1.1 hsuenaga #define TX_FIFO_EMPTY_TIMEOUT 0x1000000 /* times */ 196 1.1 hsuenaga 197 1.1 hsuenaga /* 198 1.1 hsuenaga * Event counter 199 1.1 hsuenaga */ 200 1.1 hsuenaga #ifdef MVXPE_EVENT_COUNTERS 201 1.1 hsuenaga #define MVXPE_EVCNT_INCR(ev) (ev)->ev_count++ 202 1.1 hsuenaga #define MVXPE_EVCNT_ADD(ev, val) (ev)->ev_count += (val) 203 1.1 hsuenaga #else 204 1.1 hsuenaga #define MVXPE_EVCNT_INCR(ev) /* nothing */ 205 1.1 hsuenaga #define MVXPE_EVCNT_ADD(ev, val) /* nothing */ 206 1.1 hsuenaga #endif 207 1.1 hsuenaga struct mvxpe_evcnt { 208 1.1 hsuenaga /* 209 1.1 hsuenaga * Master Interrupt Handler 210 1.1 hsuenaga */ 211 1.1 hsuenaga struct evcnt ev_i_rxtxth; 212 1.1 hsuenaga struct evcnt ev_i_rxtx; 213 1.1 hsuenaga struct evcnt ev_i_misc; 214 1.1 hsuenaga 215 1.1 hsuenaga /* 216 1.1 hsuenaga * RXTXTH Interrupt 217 1.1 hsuenaga */ 218 1.1 hsuenaga struct evcnt ev_rxtxth_txerr; 219 1.1 hsuenaga 220 1.1 hsuenaga /* 221 1.1 hsuenaga * MISC Interrupt 222 1.1 hsuenaga */ 223 1.1 hsuenaga struct evcnt ev_misc_phystatuschng; 224 1.1 hsuenaga struct evcnt ev_misc_linkchange; 225 1.1 hsuenaga struct evcnt ev_misc_iae; 226 1.1 hsuenaga struct evcnt ev_misc_rxoverrun; 227 1.1 hsuenaga struct evcnt ev_misc_rxcrc; 228 1.1 hsuenaga struct evcnt ev_misc_rxlargepacket; 229 1.1 hsuenaga struct evcnt ev_misc_txunderrun; 230 1.1 hsuenaga struct evcnt ev_misc_prbserr; 231 1.1 hsuenaga struct evcnt ev_misc_srse; 232 1.1 hsuenaga struct evcnt ev_misc_txreq; 233 1.1 hsuenaga 234 1.1 hsuenaga /* 235 1.1 hsuenaga * RxTx Interrupt 236 1.1 hsuenaga */ 237 1.1 hsuenaga struct evcnt ev_rxtx_rreq; 238 1.1 hsuenaga struct evcnt ev_rxtx_rpq; 239 1.1 hsuenaga struct evcnt ev_rxtx_tbrq; 240 1.1 hsuenaga struct evcnt ev_rxtx_rxtxth; 241 1.1 hsuenaga struct evcnt ev_rxtx_txerr; 242 1.1 hsuenaga struct evcnt ev_rxtx_misc; 243 1.1 hsuenaga 244 1.1 hsuenaga /* 245 1.1 hsuenaga * Link 246 1.1 hsuenaga */ 247 1.1 hsuenaga struct evcnt ev_link_up; 248 1.1 hsuenaga struct evcnt ev_link_down; 249 1.1 hsuenaga 250 1.1 hsuenaga /* 251 1.1 hsuenaga * Rx Descriptor 252 1.1 hsuenaga */ 253 1.1 hsuenaga struct evcnt ev_rxd_ce; 254 1.1 hsuenaga struct evcnt ev_rxd_or; 255 1.1 hsuenaga struct evcnt ev_rxd_mf; 256 1.1 hsuenaga struct evcnt ev_rxd_re; 257 1.1 hsuenaga struct evcnt ev_rxd_scat; 258 1.1 hsuenaga 259 1.1 hsuenaga /* 260 1.1 hsuenaga * Tx Descriptor 261 1.1 hsuenaga */ 262 1.1 hsuenaga struct evcnt ev_txd_lc; 263 1.1 hsuenaga struct evcnt ev_txd_ur; 264 1.1 hsuenaga struct evcnt ev_txd_rl; 265 1.1 hsuenaga struct evcnt ev_txd_oth; 266 1.1 hsuenaga 267 1.1 hsuenaga /* 268 1.1 hsuenaga * Status Registers 269 1.1 hsuenaga */ 270 1.1 hsuenaga struct evcnt ev_reg_pdfc; /* Rx Port Discard Frame Counter */ 271 1.1 hsuenaga struct evcnt ev_reg_pofc; /* Rx Port Overrun Frame Counter */ 272 1.1 hsuenaga struct evcnt ev_reg_txbadfcs; /* Tx BAD FCS Counter */ 273 1.1 hsuenaga struct evcnt ev_reg_txdropped; /* Tx Dropped Counter */ 274 1.1 hsuenaga struct evcnt ev_reg_lpic; 275 1.1 hsuenaga 276 1.1 hsuenaga 277 1.1 hsuenaga /* Device Driver Errors */ 278 1.1 hsuenaga struct evcnt ev_drv_wdogsoft; 279 1.1 hsuenaga struct evcnt ev_drv_txerr; 280 1.1 hsuenaga struct evcnt ev_drv_rxq[MVXPE_QUEUE_SIZE]; 281 1.1 hsuenaga struct evcnt ev_drv_rxqe[MVXPE_QUEUE_SIZE]; 282 1.1 hsuenaga struct evcnt ev_drv_txq[MVXPE_QUEUE_SIZE]; 283 1.1 hsuenaga struct evcnt ev_drv_txqe[MVXPE_QUEUE_SIZE]; 284 1.1 hsuenaga }; 285 1.1 hsuenaga 286 1.1 hsuenaga /* 287 1.1 hsuenaga * Debug 288 1.1 hsuenaga */ 289 1.1 hsuenaga #ifdef MVXPE_DEBUG 290 1.1 hsuenaga #define DPRINTF(fmt, ...) \ 291 1.1 hsuenaga do { \ 292 1.1 hsuenaga if (mvxpe_debug >= 1) { \ 293 1.1 hsuenaga printf("%s: ", __func__); \ 294 1.1 hsuenaga printf((fmt), ##__VA_ARGS__); \ 295 1.1 hsuenaga } \ 296 1.1 hsuenaga } while (/*CONSTCOND*/0) 297 1.1 hsuenaga #define DPRINTFN(level , fmt, ...) \ 298 1.1 hsuenaga do { \ 299 1.1 hsuenaga if (mvxpe_debug >= (level)) { \ 300 1.1 hsuenaga printf("%s: ", __func__); \ 301 1.1 hsuenaga printf((fmt), ##__VA_ARGS__); \ 302 1.1 hsuenaga } \ 303 1.1 hsuenaga } while (/*CONSTCOND*/0) 304 1.1 hsuenaga #define DPRINTDEV(dev, level, fmt, ...) \ 305 1.1 hsuenaga do { \ 306 1.1 hsuenaga if (mvxpe_debug >= (level)) { \ 307 1.1 hsuenaga device_printf((dev), \ 308 1.1 hsuenaga "%s: "fmt , __func__, ##__VA_ARGS__); \ 309 1.1 hsuenaga } \ 310 1.1 hsuenaga } while (/*CONSTCOND*/0) 311 1.1 hsuenaga #define DPRINTSC(sc, level, fmt, ...) \ 312 1.1 hsuenaga do { \ 313 1.1 hsuenaga device_t dev = (sc)->sc_dev; \ 314 1.1 hsuenaga if (mvxpe_debug >= (level)) { \ 315 1.1 hsuenaga device_printf(dev, \ 316 1.1 hsuenaga "%s: " fmt, __func__, ##__VA_ARGS__); \ 317 1.1 hsuenaga } \ 318 1.1 hsuenaga } while (/*CONSTCOND*/0) 319 1.1 hsuenaga #define DPRINTIFNET(ifp, level, fmt, ...) \ 320 1.1 hsuenaga do { \ 321 1.1 hsuenaga const char *xname = (ifp)->if_xname; \ 322 1.1 hsuenaga if (mvxpe_debug >= (level)) { \ 323 1.1 hsuenaga printf("%s: %s: " fmt, xname, __func__, ##__VA_ARGS__);\ 324 1.1 hsuenaga } \ 325 1.1 hsuenaga } while (/*CONSTCOND*/0) 326 1.1 hsuenaga #define DPRINTIFNET(ifp, level, fmt, ...) \ 327 1.1 hsuenaga do { \ 328 1.1 hsuenaga const char *xname = (ifp)->if_xname; \ 329 1.1 hsuenaga if (mvxpe_debug >= (level)) { \ 330 1.1 hsuenaga printf("%s: %s: " fmt, xname, __func__, ##__VA_ARGS__);\ 331 1.1 hsuenaga } \ 332 1.1 hsuenaga } while (/*CONSTCOND*/0) 333 1.1 hsuenaga #define DPRINTPRXS(level, q) \ 334 1.1 hsuenaga do { \ 335 1.1 hsuenaga uint32_t _reg = MVXPE_READ(sc, MVXPE_PRXS(q)); \ 336 1.1 hsuenaga if (mvxpe_debug >= (level)) { \ 337 1.1 hsuenaga printf("PRXS(queue %d) %#x: Occupied %d, NoOccupied %d.\n", \ 338 1.1 hsuenaga q, _reg, MVXPE_PRXS_GET_ODC(_reg), \ 339 1.1 hsuenaga MVXPE_PRXS_GET_NODC(_reg)); \ 340 1.1 hsuenaga } \ 341 1.1 hsuenaga } while (/*CONSTCOND*/0) 342 1.1 hsuenaga #else 343 1.1 hsuenaga #define DPRINTF(fmt, ...) 344 1.1 hsuenaga #define DPRINTFN(level, fmt, ...) 345 1.1 hsuenaga #define DPRINTDEV(dev, level, fmt, ...) 346 1.1 hsuenaga #define DPRINTSC(sc, level, fmt, ...) 347 1.1 hsuenaga #define DPRINTIFNET(ifp, level, fmt, ...) 348 1.1 hsuenaga #define DPRINTPRXS(level, reg) 349 1.1 hsuenaga #endif 350 1.1 hsuenaga 351 1.1 hsuenaga #define KASSERT_SC_MTX(sc) \ 352 1.1 hsuenaga KASSERT(mutex_owned(&(sc)->sc_mtx)) 353 1.1 hsuenaga #define KASSERT_BM_MTX(sc) \ 354 1.1 hsuenaga KASSERT(mutex_owned(&(sc)->sc_bm.bm_mtx)) 355 1.1 hsuenaga #define KASSERT_RX_MTX(sc, q) \ 356 1.1 hsuenaga KASSERT(mutex_owned(&(sc)->sc_rx_ring[(q)].rx_ring_mtx)) 357 1.1 hsuenaga #define KASSERT_TX_MTX(sc, q) \ 358 1.1 hsuenaga KASSERT(mutex_owned(&(sc)->sc_tx_ring[(q)].tx_ring_mtx)) 359 1.1 hsuenaga 360 1.1 hsuenaga /* 361 1.1 hsuenaga * Configuration parameters 362 1.1 hsuenaga */ 363 1.1 hsuenaga struct mvxpe_conf { 364 1.1 hsuenaga int cf_lpi; /* EEE Low Power IDLE enable */ 365 1.1 hsuenaga int cf_fc; /* Flow Control enable */ 366 1.1 hsuenaga }; 367 1.1 hsuenaga 368 1.1 hsuenaga /* 369 1.1 hsuenaga * sysctl(9) parameters 370 1.1 hsuenaga */ 371 1.1 hsuenaga struct mvxpe_softc; 372 1.1 hsuenaga struct mvxpe_sysctl_queue { 373 1.1 hsuenaga struct mvxpe_softc *sc; 374 1.1 hsuenaga int rxtx; 375 1.1 hsuenaga int queue; 376 1.1 hsuenaga }; 377 1.1 hsuenaga #define MVXPE_SYSCTL_RX 0 378 1.1 hsuenaga #define MVXPE_SYSCTL_TX 1 379 1.1 hsuenaga 380 1.1 hsuenaga struct mvxpe_sysctl_mib { 381 1.1 hsuenaga struct mvxpe_softc *sc; 382 1.1 hsuenaga int index; 383 1.1 hsuenaga uint64_t counter; 384 1.1 hsuenaga }; 385 1.1 hsuenaga 386 1.1 hsuenaga /* 387 1.1 hsuenaga * Ethernet Device main context 388 1.1 hsuenaga */ 389 1.1 hsuenaga struct mvxpe_softc { 390 1.1 hsuenaga device_t sc_dev; 391 1.1 hsuenaga int sc_port; 392 1.1 hsuenaga uint32_t sc_version; 393 1.1 hsuenaga 394 1.1 hsuenaga /* 395 1.1 hsuenaga * sc_mtx must be held by interface functions to/from 396 1.8 andvar * other frameworks. interrupt handler, sysctl handler, 397 1.8 andvar * ioctl handler, and so on. 398 1.1 hsuenaga */ 399 1.1 hsuenaga kmutex_t sc_mtx; 400 1.1 hsuenaga 401 1.1 hsuenaga /* 402 1.1 hsuenaga * Ethernet facilities 403 1.1 hsuenaga */ 404 1.1 hsuenaga struct ethercom sc_ethercom; 405 1.1 hsuenaga struct mii_data sc_mii; 406 1.1 hsuenaga u_int8_t sc_enaddr[ETHER_ADDR_LEN]; /* station addr */ 407 1.5 msaitoh u_short sc_if_flags; 408 1.1 hsuenaga int sc_wdogsoft; 409 1.1 hsuenaga 410 1.1 hsuenaga /* 411 1.1 hsuenaga * Configuration Parameters 412 1.1 hsuenaga */ 413 1.1 hsuenaga struct mvxpe_conf sc_cf; 414 1.1 hsuenaga 415 1.1 hsuenaga /* 416 1.1 hsuenaga * I/O Spaces 417 1.1 hsuenaga */ 418 1.1 hsuenaga bus_space_tag_t sc_iot; 419 1.1 hsuenaga bus_space_handle_t sc_ioh; /* all registers handle */ 420 1.1 hsuenaga bus_space_handle_t sc_mibh; /* mib counter handle */ 421 1.1 hsuenaga 422 1.1 hsuenaga /* 423 1.1 hsuenaga * DMA Spaces 424 1.1 hsuenaga */ 425 1.1 hsuenaga bus_dma_tag_t sc_dmat; 426 1.1 hsuenaga struct mvxpe_rx_ring sc_rx_ring[MVXPE_QUEUE_SIZE]; 427 1.1 hsuenaga struct mvxpe_tx_ring sc_tx_ring[MVXPE_QUEUE_SIZE]; 428 1.1 hsuenaga int sc_tx_pending; /* total number of tx pkt */ 429 1.1 hsuenaga 430 1.1 hsuenaga /* 431 1.1 hsuenaga * Software Buffer Manager 432 1.1 hsuenaga */ 433 1.2 hsuenaga struct mvxpbm_softc *sc_bm; 434 1.1 hsuenaga 435 1.1 hsuenaga /* 436 1.9 andvar * Maintenance clock 437 1.1 hsuenaga */ 438 1.1 hsuenaga callout_t sc_tick_ch; /* tick callout */ 439 1.1 hsuenaga 440 1.1 hsuenaga /* 441 1.1 hsuenaga * Link State control 442 1.1 hsuenaga */ 443 1.1 hsuenaga uint32_t sc_linkstate; 444 1.1 hsuenaga 445 1.1 hsuenaga /* 446 1.1 hsuenaga * Act as Rndom source 447 1.1 hsuenaga */ 448 1.1 hsuenaga krndsource_t sc_rnd_source; 449 1.1 hsuenaga 450 1.1 hsuenaga /* 451 1.1 hsuenaga * Sysctl interfaces 452 1.1 hsuenaga */ 453 1.1 hsuenaga struct sysctllog *sc_mvxpe_clog; 454 1.1 hsuenaga struct mvxpe_sysctl_queue sc_sysctl_rx_queue[MVXPE_QUEUE_SIZE]; 455 1.1 hsuenaga struct mvxpe_sysctl_queue sc_sysctl_tx_queue[MVXPE_QUEUE_SIZE]; 456 1.1 hsuenaga 457 1.1 hsuenaga /* 458 1.1 hsuenaga * MIB counter 459 1.1 hsuenaga */ 460 1.1 hsuenaga size_t sc_sysctl_mib_size; 461 1.1 hsuenaga struct mvxpe_sysctl_mib *sc_sysctl_mib; 462 1.1 hsuenaga 463 1.1 hsuenaga #ifdef MVXPE_EVENT_COUNTERS 464 1.1 hsuenaga /* 465 1.1 hsuenaga * Event counter 466 1.1 hsuenaga */ 467 1.1 hsuenaga struct mvxpe_evcnt sc_ev; 468 1.1 hsuenaga #endif 469 1.1 hsuenaga }; 470 1.1 hsuenaga #define MVXPE_RX_RING_MEM_VA(sc, q) \ 471 1.1 hsuenaga ((sc)->sc_rx_ring[(q)].rx_descriptors) 472 1.1 hsuenaga #define MVXPE_RX_RING_MEM_PA(sc, q) \ 473 1.1 hsuenaga ((sc)->sc_rx_ring[(q)].rx_descriptors_map->dm_segs[0].ds_addr) 474 1.1 hsuenaga #define MVXPE_RX_RING_MEM_MAP(sc, q) \ 475 1.1 hsuenaga ((sc)->sc_rx_ring[(q)].rx_descriptors_map) 476 1.1 hsuenaga #define MVXPE_RX_RING(sc, q) \ 477 1.1 hsuenaga (&(sc)->sc_rx_ring[(q)]) 478 1.1 hsuenaga #define MVXPE_RX_HANDLE(sc, q, i) \ 479 1.1 hsuenaga (&(sc)->sc_rx_ring[(q)].rx_handle[(i)]) 480 1.1 hsuenaga #define MVXPE_RX_DESC(sc, q, i) \ 481 1.1 hsuenaga ((sc)->sc_rx_ring[(q)].rx_handle[(i)].rxdesc_va) 482 1.1 hsuenaga #define MVXPE_RX_DESC_OFF(sc, q, i) \ 483 1.1 hsuenaga ((sc)->sc_rx_ring[(q)].rx_handle[(i)].rxdesc_off) 484 1.1 hsuenaga #define MVXPE_RX_PKTBUF(sc, q, i) \ 485 1.1 hsuenaga ((sc)->sc_rx_ring[(q)].rx_handle[(i)].chunk) 486 1.1 hsuenaga 487 1.1 hsuenaga #define MVXPE_TX_RING_MEM_VA(sc, q) \ 488 1.1 hsuenaga ((sc)->sc_tx_ring[(q)].tx_descriptors) 489 1.1 hsuenaga #define MVXPE_TX_RING_MEM_PA(sc, q) \ 490 1.1 hsuenaga ((sc)->sc_tx_ring[(q)].tx_descriptors_map->dm_segs[0].ds_addr) 491 1.1 hsuenaga #define MVXPE_TX_RING_MEM_MAP(sc, q) \ 492 1.1 hsuenaga ((sc)->sc_tx_ring[(q)].tx_descriptors_map) 493 1.1 hsuenaga #define MVXPE_TX_RING(sc, q) \ 494 1.1 hsuenaga (&(sc)->sc_tx_ring[(q)]) 495 1.1 hsuenaga #define MVXPE_TX_HANDLE(sc, q, i) \ 496 1.1 hsuenaga (&(sc)->sc_tx_ring[(q)].tx_handle[(i)]) 497 1.1 hsuenaga #define MVXPE_TX_DESC(sc, q, i) \ 498 1.1 hsuenaga ((sc)->sc_tx_ring[(q)].tx_handle[(i)].txdesc_va) 499 1.1 hsuenaga #define MVXPE_TX_DESC_OFF(sc, q, i) \ 500 1.1 hsuenaga ((sc)->sc_tx_ring[(q)].tx_handle[(i)].txdesc_off) 501 1.1 hsuenaga #define MVXPE_TX_MBUF(sc, q, i) \ 502 1.1 hsuenaga ((sc)->sc_tx_ring[(q)].tx_handle[(i)].txdesc_mbuf) 503 1.1 hsuenaga #define MVXPE_TX_MAP(sc, q, i) \ 504 1.1 hsuenaga ((sc)->sc_tx_ring[(q)].tx_handle[(i)].txdesc_mbuf_map) 505 1.1 hsuenaga 506 1.1 hsuenaga #endif /* _IF_MVXPEVAR_H_ */ 507