Home | History | Annotate | Line # | Download | only in cxgb
      1 /**************************************************************************
      2 
      3 Copyright (c) 2007, Chelsio Inc.
      4 All rights reserved.
      5 
      6 Redistribution and use in source and binary forms, with or without
      7 modification, are permitted provided that the following conditions are met:
      8 
      9  1. Redistributions of source code must retain the above copyright notice,
     10     this list of conditions and the following disclaimer.
     11 
     12  2. Neither the name of the Chelsio Corporation nor the names of its
     13     contributors may be used to endorse or promote products derived from
     14     this software without specific prior written permission.
     15 
     16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26 POSSIBILITY OF SUCH DAMAGE.
     27 
     28 
     29 ***************************************************************************/
     30 
     31 
     32 
     33 #ifndef _CXGB_ADAPTER_H_
     34 #define _CXGB_ADAPTER_H_
     35 
     36 #include <sys/lock.h>
     37 #include <sys/mutex.h>
     38 #include <sys/mbuf.h>
     39 #include <sys/socket.h>
     40 #include <sys/sockio.h>
     41 
     42 #include <net/if.h>
     43 #include <net/if_ether.h>
     44 #include <net/if_media.h>
     45 
     46 #include <sys/bus.h>
     47 #include <dev/pci/pcireg.h>
     48 #include <dev/pci/pcivar.h>
     49 
     50 #ifdef CONFIG_DEFINED
     51 #include <cxgb_osdep.h>
     52 #include <ulp/toecore/toedev.h>
     53 #include <sys/mbufq.h>
     54 #else
     55 #include "cxgb_osdep.h"
     56 #include "cxgb_mbuf.h"
     57 #include "cxgb_toedev.h"
     58 #endif
     59 
     60 struct adapter;
     61 struct sge_qset;
     62 extern int cxgb_debug;
     63 
     64 #ifdef DEBUG_LOCKING
     65 #define MTX_INIT(lock, lockname, class, flags) \
     66     do { \
     67         printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
     68         mtx_init((lock), lockname, class, flags);       \
     69     } while (0)
     70 
     71 #define MTX_DESTROY(lock) \
     72     do { \
     73         printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
     74         mtx_destroy((lock));                    \
     75     } while (0)
     76 
     77 #define SX_INIT(lock, lockname) \
     78     do { \
     79         printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
     80         sx_init((lock), lockname);      \
     81     } while (0)
     82 
     83 #define SX_DESTROY(lock) \
     84     do { \
     85         printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
     86         sx_destroy((lock));                 \
     87     } while (0)
     88 #else
     89 #define MTX_INIT mtx_init
     90 #define MTX_DESTROY mtx_destroy
     91 #define SX_INIT sx_init
     92 #define SX_DESTROY sx_destroy
     93 #endif
     94 
     95 struct port_device {
     96 	device_t	dev;
     97 	struct adapter	*parent;
     98 	int		port_number;
     99 };
    100 
    101 struct port_info {
    102     struct adapter  *adapter;
    103     struct ifnet    *ifp;
    104     struct port_device *pd;
    105     int     port;
    106     u_short     if_flags;
    107     const struct port_type_info *port_type;
    108     struct cphy phy;
    109     struct cmac mac;
    110     struct link_config link_config;
    111     struct ifmedia  media;
    112 #ifdef USE_SX
    113     struct sx   lock;
    114 #else
    115     struct mtx  lock;
    116 #endif
    117     uint8_t     port_id;
    118     uint8_t     tx_chan;
    119     uint8_t     txpkt_intf;
    120     uint8_t     nqsets;
    121     uint8_t         first_qset;
    122 
    123     uint8_t     hw_addr[ETHER_ADDR_LEN];
    124     struct cxgb_task start_task;
    125     struct cxgb_task timer_reclaim_task;
    126     struct cdev     *port_cdev;
    127 
    128 #define PORT_NAME_LEN 32
    129 #define TASKQ_NAME_LEN 32
    130     char            lockbuf[PORT_NAME_LEN];
    131     char            taskqbuf[TASKQ_NAME_LEN];
    132 };
    133 
    134 enum {              /* adapter flags */
    135     FULL_INIT_DONE  = (1 << 0),
    136     USING_MSI   = (1 << 1),
    137     USING_MSIX  = (1 << 2),
    138     QUEUES_BOUND    = (1 << 3),
    139     FW_UPTODATE     = (1 << 4),
    140     TPS_UPTODATE    = (1 << 5),
    141 };
    142 
    143 
    144 #define FL_Q_SIZE   4096
    145 #define JUMBO_Q_SIZE    512
    146 #define RSPQ_Q_SIZE 1024
    147 #define TX_ETH_Q_SIZE   1024
    148 
    149 
    150 
    151 /*
    152  * Types of Tx queues in each queue set.  Order here matters, do not change.
    153  * XXX TOE is not implemented yet, so the extra queues are just placeholders.
    154  */
    155 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
    156 
    157 
    158 /* careful, the following are set on priv_flags and must not collide with
    159  * IFF_ flags!
    160  */
    161 enum {
    162     LRO_ACTIVE = (1 << 8),
    163 };
    164 
    165 /* Max concurrent LRO sessions per queue set */
    166 #define MAX_LRO_SES 8
    167 
    168 struct t3_lro_session {
    169     struct mbuf *head;
    170     struct mbuf *tail;
    171     uint32_t seq;
    172     uint16_t ip_len;
    173     uint16_t mss;
    174     uint16_t vtag;
    175     uint8_t npkts;
    176 };
    177 
    178 struct lro_state {
    179     unsigned short enabled;
    180     unsigned short active_idx;
    181     unsigned int nactive;
    182     struct t3_lro_session sess[MAX_LRO_SES];
    183 };
    184 
    185 #define RX_BUNDLE_SIZE 8
    186 
    187 struct rsp_desc;
    188 
    189 struct sge_rspq {
    190     uint32_t    credits;
    191     uint32_t    size;
    192     uint32_t    cidx;
    193     uint32_t    gen;
    194     uint32_t    polling;
    195     uint32_t    holdoff_tmr;
    196     uint32_t    next_holdoff;
    197     uint32_t        imm_data;
    198     struct rsp_desc *desc;
    199     uint32_t    cntxt_id;
    200     struct mtx      lock;
    201     struct mbuf     *rx_head;    /* offload packet receive queue head */
    202     struct mbuf     *rx_tail;    /* offload packet receive queue tail */
    203 
    204     uint32_t        offload_pkts;
    205     uint32_t        offload_bundles;
    206     uint32_t        pure_rsps;
    207     uint32_t        unhandled_irqs;
    208 
    209     bus_addr_t  phys_addr;
    210     bus_dma_tag_t   desc_tag;
    211     bus_dmamap_t    desc_map;
    212 
    213     struct t3_mbuf_hdr rspq_mh;
    214 #define RSPQ_NAME_LEN  32
    215     char            lockbuf[RSPQ_NAME_LEN];
    216 
    217 };
    218 
    219 #ifndef DISABLE_MBUF_IOVEC
    220 #define rspq_mbuf rspq_mh.mh_head
    221 #endif
    222 
    223 struct rx_desc;
    224 struct rx_sw_desc;
    225 
    226 struct sge_fl {
    227     uint32_t    buf_size;
    228     uint32_t    credits;
    229     uint32_t    size;
    230     uint32_t    cidx;
    231     uint32_t    pidx;
    232     uint32_t    gen;
    233     struct rx_desc  *desc;
    234     struct rx_sw_desc *sdesc;
    235     bus_addr_t  phys_addr;
    236     uint32_t    cntxt_id;
    237     uint64_t    empty;
    238     bus_dma_tag_t   desc_tag;
    239     bus_dmamap_t    desc_map;
    240     bus_dma_tag_t   entry_tag;
    241     int             type;
    242 };
    243 
    244 struct tx_desc;
    245 struct tx_sw_desc;
    246 
    247 #define TXQ_TRANSMITTING    0x1
    248 
    249 struct sge_txq {
    250     uint64_t    flags;
    251     uint32_t    in_use;
    252     uint32_t    size;
    253     uint32_t    processed;
    254     uint32_t    cleaned;
    255     uint32_t    stop_thres;
    256     uint32_t    cidx;
    257     uint32_t    pidx;
    258     uint32_t    gen;
    259     uint32_t    unacked;
    260     struct tx_desc  *desc;
    261     struct tx_sw_desc *sdesc;
    262     uint32_t    token;
    263     bus_addr_t  phys_addr;
    264     struct cxgb_task qresume_task;
    265     struct cxgb_task qreclaim_task;
    266     struct port_info *port;
    267     uint32_t    cntxt_id;
    268     uint64_t    stops;
    269     uint64_t    restarts;
    270     bus_dma_tag_t   desc_tag;
    271     bus_dmamap_t    desc_map;
    272     bus_dma_tag_t   entry_tag;
    273     struct mbuf_head sendq;
    274     struct mtx      lock;
    275 #define TXQ_NAME_LEN  32
    276     char            lockbuf[TXQ_NAME_LEN];
    277 };
    278 
    279 
    280 enum {
    281     SGE_PSTAT_TSO,              /* # of TSO requests */
    282     SGE_PSTAT_RX_CSUM_GOOD,     /* # of successful RX csum offloads */
    283     SGE_PSTAT_TX_CSUM,          /* # of TX checksum offloads */
    284     SGE_PSTAT_VLANEX,           /* # of VLAN tag extractions */
    285     SGE_PSTAT_VLANINS,          /* # of VLAN tag insertions */
    286     SGE_PSTATS_LRO_QUEUED,      /* # of LRO appended packets */
    287     SGE_PSTATS_LRO_FLUSHED,     /* # of LRO flushed packets */
    288     SGE_PSTATS_LRO_X_STREAMS,   /* # of exceeded LRO contexts */
    289 };
    290 
    291 #define SGE_PSTAT_MAX (SGE_PSTATS_LRO_X_STREAMS+1)
    292 
    293 struct sge_qset {
    294     struct sge_rspq     rspq;
    295     struct sge_fl       fl[SGE_RXQ_PER_SET];
    296     struct lro_state        lro;
    297     struct sge_txq      txq[SGE_TXQ_PER_SET];
    298     uint32_t                txq_stopped;       /* which Tx queues are stopped */
    299     uint64_t                port_stats[SGE_PSTAT_MAX];
    300     struct port_info        *port;
    301     int                     idx; /* qset # */
    302 };
    303 
    304 struct sge {
    305     struct sge_qset         qs[SGE_QSETS];
    306     struct mtx              reg_lock;
    307 };
    308 
    309 struct filter_info;
    310 
    311 struct adapter {
    312     device_t        dev; // so we have a compatible pointer
    313     int         flags;
    314     TAILQ_ENTRY(adapter)    adapter_entry;
    315 
    316     /* PCI register resources */
    317     int         regs_rid;
    318     struct resource     *regs_res;
    319     bus_space_handle_t  bh;
    320     bus_space_tag_t     bt;
    321     bus_size_t              mmio_len;
    322     uint32_t                link_width;
    323     struct pci_attach_args pa;
    324     uint32_t            bar0;
    325     bus_space_handle_t  bar0_handle;
    326     pci_intr_handle_t   intr_handle;
    327     void               *intr_cookie;
    328 
    329     /* DMA resources */
    330     bus_dma_tag_t       parent_dmat;
    331     bus_dma_tag_t       rx_dmat;
    332     bus_dma_tag_t       rx_jumbo_dmat;
    333     bus_dma_tag_t       tx_dmat;
    334 
    335     /* Interrupt resources */
    336     int         irq_rid;
    337 
    338     uint32_t        msix_regs_rid;
    339     struct resource     *msix_regs_res;
    340 
    341     struct resource     *msix_irq_res[SGE_QSETS];
    342     int         msix_irq_rid[SGE_QSETS];
    343     void            *msix_intr_tag[SGE_QSETS];
    344     uint8_t                 rxpkt_map[8]; /* maps RX_PKT interface values to port ids */
    345     uint8_t                 rrss_map[SGE_QSETS]; /* revers RSS map table */
    346 
    347     struct filter_info      *filters;
    348 
    349     /* Tasks */
    350     struct cxgb_task    ext_intr_task;
    351     struct cxgb_task    slow_intr_task;
    352     struct cxgb_task    tick_task;
    353     struct callout      cxgb_tick_ch;
    354     struct callout      sge_timer_ch;
    355 
    356     /* Register lock for use by the hardware layer */
    357     struct mtx      mdio_lock;
    358     struct mtx      elmer_lock;
    359 
    360     /* Bookkeeping for the hardware layer */
    361     struct adapter_params  params;
    362     unsigned int slow_intr_mask;
    363     unsigned long irq_stats[IRQ_NUM_STATS];
    364 
    365     struct sge              sge;
    366     struct mc7              pmrx;
    367     struct mc7              pmtx;
    368     struct mc7              cm;
    369     struct mc5              mc5;
    370 
    371     struct port_info    port[MAX_NPORTS];
    372     device_t        portdev[MAX_NPORTS];
    373     struct toedev           tdev;
    374     char                    fw_version[64];
    375     uint32_t                open_device_map;
    376     uint32_t                registered_device_map;
    377 #ifdef USE_SX
    378     struct sx               lock;
    379 #else
    380     struct mtx              lock;
    381 #endif
    382     int                     (*cxgb_intr)(void *);
    383     int                     msi_count;
    384 
    385 #define ADAPTER_LOCK_NAME_LEN   32
    386     char                    lockbuf[ADAPTER_LOCK_NAME_LEN];
    387     char                    reglockbuf[ADAPTER_LOCK_NAME_LEN];
    388     char                    mdiolockbuf[ADAPTER_LOCK_NAME_LEN];
    389     char                    elmerlockbuf[ADAPTER_LOCK_NAME_LEN];
    390 };
    391 
    392 struct t3_rx_mode {
    393 
    394     uint32_t                idx;
    395     struct port_info        *port;
    396 };
    397 
    398 
    399 #define MDIO_LOCK(adapter)  mtx_lock(&(adapter)->mdio_lock)
    400 #define MDIO_UNLOCK(adapter)    mtx_unlock(&(adapter)->mdio_lock)
    401 #define ELMR_LOCK(adapter)  mtx_lock(&(adapter)->elmer_lock)
    402 #define ELMR_UNLOCK(adapter)    mtx_unlock(&(adapter)->elmer_lock)
    403 
    404 
    405 #ifdef USE_SX
    406 #define PORT_LOCK(port)          sx_xlock(&(port)->lock);
    407 #define PORT_UNLOCK(port)        sx_xunlock(&(port)->lock);
    408 #define PORT_LOCK_INIT(port, name)   SX_INIT(&(port)->lock, name)
    409 #define PORT_LOCK_DEINIT(port)       SX_DESTROY(&(port)->lock)
    410 #define PORT_LOCK_ASSERT_OWNED(port) sx_assert(&(port)->lock, SA_LOCKED)
    411 
    412 #define ADAPTER_LOCK(adap)             sx_xlock(&(adap)->lock);
    413 #define ADAPTER_UNLOCK(adap)               sx_xunlock(&(adap)->lock);
    414 #define ADAPTER_LOCK_INIT(adap, name)      SX_INIT(&(adap)->lock, name)
    415 #define ADAPTER_LOCK_DEINIT(adap)          SX_DESTROY(&(adap)->lock)
    416 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) sx_assert(&(adap)->lock, SA_UNLOCKED)
    417 #else
    418 #define PORT_LOCK(port)          mtx_lock(&(port)->lock);
    419 #define PORT_UNLOCK(port)        mtx_unlock(&(port)->lock);
    420 #define PORT_LOCK_INIT(port, name)   mtx_init(&(port)->lock, name, 0, MTX_DEF)
    421 #define PORT_LOCK_DEINIT(port)       mtx_destroy(&(port)->lock)
    422 #define PORT_LOCK_ASSERT_OWNED(port) mtx_assert(&(port)->lock, MA_OWNED)
    423 
    424 #define ADAPTER_LOCK(adap)  mtx_lock(&(adap)->lock);
    425 #define ADAPTER_UNLOCK(adap)    mtx_unlock(&(adap)->lock);
    426 #define ADAPTER_LOCK_INIT(adap, name) mtx_init(&(adap)->lock, name, 0, MTX_DEF)
    427 #define ADAPTER_LOCK_DEINIT(adap) mtx_destroy(&(adap)->lock)
    428 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) mtx_assert(&(adap)->lock, MA_NOTOWNED)
    429 #endif
    430 
    431 
    432 static __inline uint32_t
    433 t3_read_reg(adapter_t *adapter, uint32_t reg_addr)
    434 {
    435     return (bus_space_read_4(adapter->bt, adapter->bh, reg_addr));
    436 }
    437 
    438 static __inline void
    439 t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val)
    440 {
    441     bus_space_write_4(adapter->bt, adapter->bh, reg_addr, val);
    442 }
    443 
    444 static __inline void
    445 t3_os_pci_read_config_4(adapter_t *adapter, int reg, uint32_t *val)
    446 {
    447     *val = pci_conf_read(adapter->pa.pa_pc, adapter->pa.pa_tag, reg);
    448 }
    449 
    450 static __inline void
    451 t3_os_pci_write_config_4(adapter_t *adapter, int reg, uint32_t val)
    452 {
    453     pci_conf_write(adapter->pa.pa_pc, adapter->pa.pa_tag, reg, val);
    454 }
    455 
    456 static __inline void
    457 t3_os_pci_read_config_2(adapter_t *adapter, int reg, uint16_t *val)
    458 {
    459     uint32_t temp;
    460     temp = pci_conf_read(adapter->pa.pa_pc, adapter->pa.pa_tag, reg&0xfc);
    461     if (reg&0x2)
    462         *val = (temp>>16)&0xffff;
    463     else
    464         *val = temp&0xffff;
    465 }
    466 
    467 static __inline void
    468 t3_os_pci_write_config_2(adapter_t *adapter, int reg, uint16_t val)
    469 {
    470     uint32_t temp = pci_conf_read(adapter->pa.pa_pc, adapter->pa.pa_tag, reg&0xfc);
    471     if (reg&0x2)
    472         temp = (temp&0xffff)|(val<<16);
    473     else
    474         temp = (temp&0xffff0000)|val;
    475     pci_conf_write(adapter->pa.pa_pc, adapter->pa.pa_tag, reg&0xfc, temp);
    476 }
    477 
    478 static __inline uint8_t *
    479 t3_get_next_mcaddr(struct t3_rx_mode *rm)
    480 {
    481     uint8_t *macaddr = NULL;
    482 
    483     if (rm->idx == 0)
    484         macaddr = rm->port->hw_addr;
    485 
    486     rm->idx++;
    487     return (macaddr);
    488 }
    489 
    490 static __inline void
    491 t3_init_rx_mode(struct t3_rx_mode *rm, struct port_info *port)
    492 {
    493     rm->idx = 0;
    494     rm->port = port;
    495 }
    496 
    497 static __inline struct port_info *
    498 adap2pinfo(struct adapter *adap, int idx)
    499 {
    500     return &adap->port[idx];
    501 }
    502 
    503 int t3_os_find_pci_capability(adapter_t *adapter, int cap);
    504 int t3_os_pci_save_state(struct adapter *adapter);
    505 int t3_os_pci_restore_state(struct adapter *adapter);
    506 void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status,
    507             int speed, int duplex, int fc);
    508 void t3_sge_err_intr_handler(adapter_t *adapter);
    509 int t3_offload_tx(struct toedev *, struct mbuf *);
    510 void t3_os_ext_intr_handler(adapter_t *adapter);
    511 void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]);
    512 int t3_mgmt_tx(adapter_t *adap, struct mbuf *m);
    513 
    514 
    515 int t3_sge_alloc(struct adapter *);
    516 int t3_sge_free(struct adapter *);
    517 int t3_sge_alloc_qset(adapter_t *, uint32_t, int, int, const struct qset_params *,
    518     int, struct port_info *);
    519 void t3_free_sge_resources(adapter_t *);
    520 void t3_sge_start(adapter_t *);
    521 void t3_sge_stop(adapter_t *);
    522 int t3b_intr(void *data);
    523 int t3_intr_msi(void *data);
    524 int t3_intr_msix(void *data);
    525 int t3_encap(struct port_info *, struct mbuf **, int *free);
    526 
    527 int t3_sge_init_adapter(adapter_t *);
    528 int t3_sge_init_port(struct port_info *);
    529 void t3_sge_deinit_sw(adapter_t *);
    530 
    531 void t3_rx_eth_lro(adapter_t *adap, struct sge_rspq *rq, struct mbuf *m,
    532     int ethpad, uint32_t rss_hash, uint32_t rss_csum, int lro);
    533 void t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad);
    534 void t3_lro_flush(adapter_t *adap, struct sge_qset *qs, struct lro_state *state);
    535 
    536 void t3_add_sysctls(adapter_t *sc);
    537 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
    538     unsigned char *data);
    539 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
    540 /*
    541  * XXX figure out how we can return this to being private to sge
    542  */
    543 #define desc_reclaimable(q) ((int)((q)->processed - (q)->cleaned - TX_MAX_DESC))
    544 
    545 #define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
    546 
    547 static __inline struct sge_qset *
    548 fl_to_qset(struct sge_fl *q, int qidx)
    549 {
    550     return container_of(q, struct sge_qset, fl[qidx]);
    551 }
    552 
    553 static __inline struct sge_qset *
    554 rspq_to_qset(struct sge_rspq *q)
    555 {
    556     return container_of(q, struct sge_qset, rspq);
    557 }
    558 
    559 static __inline struct sge_qset *
    560 txq_to_qset(struct sge_txq *q, int qidx)
    561 {
    562     return container_of(q, struct sge_qset, txq[qidx]);
    563 }
    564 
    565 static __inline struct adapter *
    566 tdev2adap(struct toedev *d)
    567 {
    568     return container_of(d, struct adapter, tdev);
    569 }
    570 
    571 #undef container_of
    572 
    573 #define OFFLOAD_DEVMAP_BIT 15
    574 static __inline int offload_running(adapter_t *adapter)
    575 {
    576         return isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
    577 }
    578 
    579 
    580 #endif
    581