Home | History | Annotate | Line # | Download | only in cxgb
      1  1.1     jklos 
      2  1.1     jklos /**************************************************************************
      3  1.1     jklos 
      4  1.1     jklos Copyright (c) 2007, Chelsio Inc.
      5  1.1     jklos All rights reserved.
      6  1.1     jklos 
      7  1.1     jklos Redistribution and use in source and binary forms, with or without
      8  1.1     jklos modification, are permitted provided that the following conditions are met:
      9  1.1     jklos 
     10  1.1     jklos  1. Redistributions of source code must retain the above copyright notice,
     11  1.1     jklos     this list of conditions and the following disclaimer.
     12  1.1     jklos 
     13  1.1     jklos  2. Neither the name of the Chelsio Corporation nor the names of its
     14  1.1     jklos     contributors may be used to endorse or promote products derived from
     15  1.1     jklos     this software without specific prior written permission.
     16  1.1     jklos 
     17  1.1     jklos THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     18  1.1     jklos AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     19  1.1     jklos IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     20  1.1     jklos ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     21  1.1     jklos LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22  1.1     jklos CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23  1.1     jklos SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24  1.1     jklos INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25  1.1     jklos CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26  1.1     jklos ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27  1.1     jklos POSSIBILITY OF SUCH DAMAGE.
     28  1.1     jklos 
     29  1.1     jklos 
     30  1.1     jklos ***************************************************************************/
     31  1.1     jklos 
     32  1.1     jklos 
     33  1.1     jklos #include <sys/cdefs.h>
     34  1.7    andvar __KERNEL_RCSID(0, "$NetBSD: cxgb_offload.c,v 1.7 2021/08/08 20:57:09 andvar Exp $");
     35  1.1     jklos 
     36  1.1     jklos #include <sys/param.h>
     37  1.1     jklos #include <sys/systm.h>
     38  1.1     jklos #include <sys/kernel.h>
     39  1.1     jklos #include <sys/conf.h>
     40  1.2    dyoung #include <sys/bus.h>
     41  1.1     jklos #include <sys/ioccom.h>
     42  1.1     jklos #include <sys/mbuf.h>
     43  1.1     jklos #include <sys/socket.h>
     44  1.1     jklos #include <sys/sockio.h>
     45  1.1     jklos #include <sys/sysctl.h>
     46  1.1     jklos #include <sys/queue.h>
     47  1.1     jklos 
     48  1.1     jklos #ifdef CONFIG_DEFINED
     49  1.1     jklos #include <cxgb_include.h>
     50  1.1     jklos #else
     51  1.1     jklos #include "cxgb_include.h"
     52  1.1     jklos #endif
     53  1.1     jklos 
     54  1.1     jklos #include <net/route.h>
     55  1.1     jklos 
     56  1.1     jklos /*
     57  1.1     jklos  * XXX
     58  1.1     jklos  */
     59  1.1     jklos #define LOG_NOTICE 2
     60  1.1     jklos #define BUG_ON(...)
     61  1.1     jklos #define VALIDATE_TID 0
     62  1.1     jklos 
     63  1.1     jklos 
     64  1.1     jklos TAILQ_HEAD(, cxgb_client) client_list;
     65  1.1     jklos TAILQ_HEAD(, toedev) ofld_dev_list;
     66  1.1     jklos TAILQ_HEAD(, adapter) adapter_list;
     67  1.1     jklos 
     68  1.1     jklos static struct mtx cxgb_db_lock;
     69  1.1     jklos static struct rwlock adapter_list_lock;
     70  1.1     jklos 
     71  1.1     jklos 
     72  1.1     jklos static const unsigned int MAX_ATIDS = 64 * 1024;
     73  1.1     jklos static const unsigned int ATID_BASE = 0x100000;
     74  1.1     jklos static int inited = 0;
     75  1.1     jklos 
     76  1.1     jklos static inline int
     77  1.1     jklos offload_activated(struct toedev *tdev)
     78  1.1     jklos {
     79  1.1     jklos     struct adapter *adapter = tdev2adap(tdev);
     80  1.1     jklos 
     81  1.1     jklos     return (isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT));
     82  1.1     jklos }
     83  1.1     jklos 
     84  1.1     jklos /**
     85  1.1     jklos  *  cxgb_register_client - register an offload client
     86  1.1     jklos  *  @client: the client
     87  1.1     jklos  *
     88  1.1     jklos  *  Add the client to the client list,
     89  1.1     jklos  *  and call backs the client for each activated offload device
     90  1.1     jklos  */
     91  1.1     jklos void
     92  1.1     jklos cxgb_register_client(struct cxgb_client *client)
     93  1.1     jklos {
     94  1.1     jklos     struct toedev *tdev;
     95  1.1     jklos 
     96  1.1     jklos     mtx_lock(&cxgb_db_lock);
     97  1.1     jklos     TAILQ_INSERT_TAIL(&client_list, client, client_entry);
     98  1.1     jklos 
     99  1.1     jklos     if (client->add) {
    100  1.1     jklos         TAILQ_FOREACH(tdev, &ofld_dev_list, ofld_entry) {
    101  1.1     jklos             if (offload_activated(tdev))
    102  1.1     jklos                 client->add(tdev);
    103  1.1     jklos         }
    104  1.1     jklos     }
    105  1.1     jklos     mtx_unlock(&cxgb_db_lock);
    106  1.1     jklos }
    107  1.1     jklos 
    108  1.1     jklos /**
    109  1.1     jklos  *  cxgb_unregister_client - unregister an offload client
    110  1.1     jklos  *  @client: the client
    111  1.1     jklos  *
    112  1.1     jklos  *  Remove the client to the client list,
    113  1.1     jklos  *  and call backs the client for each activated offload device.
    114  1.1     jklos  */
    115  1.1     jklos void
    116  1.1     jklos cxgb_unregister_client(struct cxgb_client *client)
    117  1.1     jklos {
    118  1.1     jklos     struct toedev *tdev;
    119  1.1     jklos 
    120  1.1     jklos     mtx_lock(&cxgb_db_lock);
    121  1.1     jklos     TAILQ_REMOVE(&client_list, client, client_entry);
    122  1.1     jklos 
    123  1.1     jklos     if (client->remove) {
    124  1.1     jklos         TAILQ_FOREACH(tdev, &ofld_dev_list, ofld_entry) {
    125  1.1     jklos             if (offload_activated(tdev))
    126  1.1     jklos                 client->remove(tdev);
    127  1.1     jklos         }
    128  1.1     jklos     }
    129  1.1     jklos     mtx_unlock(&cxgb_db_lock);
    130  1.1     jklos }
    131  1.1     jklos 
    132  1.1     jklos /**
    133  1.1     jklos  *  cxgb_add_clients - activate register clients for an offload device
    134  1.1     jklos  *  @tdev: the offload device
    135  1.1     jklos  *
    136  1.1     jklos  *  Call backs all registered clients once a offload device is activated
    137  1.1     jklos  */
    138  1.1     jklos void
    139  1.1     jklos cxgb_add_clients(struct toedev *tdev)
    140  1.1     jklos {
    141  1.1     jklos     struct cxgb_client *client;
    142  1.1     jklos 
    143  1.1     jklos     mtx_lock(&cxgb_db_lock);
    144  1.1     jklos     TAILQ_FOREACH(client, &client_list, client_entry) {
    145  1.1     jklos         if (client->add)
    146  1.1     jklos             client->add(tdev);
    147  1.1     jklos     }
    148  1.1     jklos     mtx_unlock(&cxgb_db_lock);
    149  1.1     jklos }
    150  1.1     jklos 
    151  1.1     jklos /**
    152  1.1     jklos  *  cxgb_remove_clients - activate register clients for an offload device
    153  1.1     jklos  *  @tdev: the offload device
    154  1.1     jklos  *
    155  1.1     jklos  *  Call backs all registered clients once a offload device is deactivated
    156  1.1     jklos  */
    157  1.1     jklos void
    158  1.1     jklos cxgb_remove_clients(struct toedev *tdev)
    159  1.1     jklos {
    160  1.1     jklos     struct cxgb_client *client;
    161  1.1     jklos 
    162  1.1     jklos     mtx_lock(&cxgb_db_lock);
    163  1.1     jklos     TAILQ_FOREACH(client, &client_list, client_entry) {
    164  1.1     jklos         if (client->remove)
    165  1.1     jklos             client->remove(tdev);
    166  1.1     jklos     }
    167  1.1     jklos     mtx_unlock(&cxgb_db_lock);
    168  1.1     jklos }
    169  1.1     jklos 
    170  1.1     jklos static int
    171  1.1     jklos is_offloading(struct ifnet *ifp)
    172  1.1     jklos {
    173  1.1     jklos     struct adapter *adapter;
    174  1.1     jklos     int port;
    175  1.1     jklos 
    176  1.1     jklos     rw_rlock(&adapter_list_lock);
    177  1.1     jklos     TAILQ_FOREACH(adapter, &adapter_list, adapter_entry) {
    178  1.1     jklos         for_each_port(adapter, port) {
    179  1.1     jklos             if (ifp == adapter->port[port].ifp) {
    180  1.1     jklos                 rw_runlock(&adapter_list_lock);
    181  1.1     jklos                 return 1;
    182  1.1     jklos             }
    183  1.1     jklos         }
    184  1.1     jklos     }
    185  1.1     jklos     rw_runlock(&adapter_list_lock);
    186  1.1     jklos     return 0;
    187  1.1     jklos }
    188  1.1     jklos 
    189  1.1     jklos static struct ifnet *
    190  1.1     jklos get_iff_from_mac(adapter_t *adapter, const uint8_t *mac, unsigned int vlan)
    191  1.1     jklos {
    192  1.1     jklos #ifdef notyet
    193  1.1     jklos     int i;
    194  1.1     jklos 
    195  1.1     jklos     for_each_port(adapter, i) {
    196  1.1     jklos         const struct vlan_group *grp;
    197  1.1     jklos         const struct port_info *p = &adapter->port[i];
    198  1.1     jklos         struct ifnet *ifnet = p->ifp;
    199  1.1     jklos 
    200  1.1     jklos         if (!memcmp(p->hw_addr, mac, ETHER_ADDR_LEN)) {
    201  1.1     jklos             if (vlan && vlan != EVL_VLID_MASK) {
    202  1.1     jklos                 grp = p->vlan_grp;
    203  1.1     jklos                 dev = grp ? grp->vlan_devices[vlan] : NULL;
    204  1.1     jklos             } else
    205  1.1     jklos                 while (dev->master)
    206  1.1     jklos                     dev = dev->master;
    207  1.1     jklos             return dev;
    208  1.1     jklos         }
    209  1.1     jklos     }
    210  1.1     jklos #endif
    211  1.1     jklos     return NULL;
    212  1.1     jklos }
    213  1.1     jklos 
    214  1.1     jklos static inline void
    215  1.1     jklos failover_fixup(adapter_t *adapter, int port)
    216  1.1     jklos {
    217  1.1     jklos     if (adapter->params.rev == 0) {
    218  1.1     jklos         struct ifnet *ifp = adapter->port[port].ifp;
    219  1.1     jklos         struct cmac *mac = &adapter->port[port].mac;
    220  1.1     jklos         if (!(ifp->if_flags & IFF_UP)) {
    221  1.1     jklos             /* Failover triggered by the interface ifdown */
    222  1.1     jklos             t3_write_reg(adapter, A_XGM_TX_CTRL + mac->offset,
    223  1.1     jklos                      F_TXEN);
    224  1.1     jklos             t3_read_reg(adapter, A_XGM_TX_CTRL + mac->offset);
    225  1.1     jklos         } else {
    226  1.1     jklos             /* Failover triggered by the interface link down */
    227  1.1     jklos             t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
    228  1.1     jklos             t3_read_reg(adapter, A_XGM_RX_CTRL + mac->offset);
    229  1.1     jklos             t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset,
    230  1.1     jklos                      F_RXEN);
    231  1.1     jklos         }
    232  1.1     jklos     }
    233  1.1     jklos }
    234  1.1     jklos 
    235  1.1     jklos static int
    236  1.1     jklos cxgb_ulp_iscsi_ctl(adapter_t *adapter, unsigned int req, void *data)
    237  1.1     jklos {
    238  1.1     jklos     int ret = 0;
    239  1.1     jklos     struct ulp_iscsi_info *uiip = data;
    240  1.1     jklos 
    241  1.1     jklos     switch (req) {
    242  1.1     jklos     case ULP_ISCSI_GET_PARAMS:
    243  1.1     jklos         uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
    244  1.1     jklos         uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
    245  1.1     jklos         uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
    246  1.1     jklos         /*
    247  1.1     jklos          * On tx, the iscsi pdu has to be <= tx page size and has to
    248  1.1     jklos          * fit into the Tx PM FIFO.
    249  1.1     jklos          */
    250  1.5  riastrad         uiip->max_txsz = uimin(adapter->params.tp.tx_pg_size,
    251  1.1     jklos                      t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
    252  1.1     jklos         /* on rx, the iscsi pdu has to be < rx page size and the
    253  1.1     jklos            whole pdu + cpl headers has to fit into one sge buffer */
    254  1.1     jklos         uiip->max_rxsz =
    255  1.5  riastrad             (unsigned int)uimin(adapter->params.tp.rx_pg_size,
    256  1.1     jklos             (adapter->sge.qs[0].fl[1].buf_size -
    257  1.1     jklos                 sizeof(struct cpl_rx_data) * 2 -
    258  1.1     jklos                 sizeof(struct cpl_rx_data_ddp)) );
    259  1.1     jklos         break;
    260  1.1     jklos     case ULP_ISCSI_SET_PARAMS:
    261  1.1     jklos         t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
    262  1.1     jklos         break;
    263  1.1     jklos     default:
    264  1.1     jklos         ret = (EOPNOTSUPP);
    265  1.1     jklos     }
    266  1.1     jklos     return ret;
    267  1.1     jklos }
    268  1.1     jklos 
    269  1.1     jklos /* Response queue used for RDMA events. */
    270  1.1     jklos #define ASYNC_NOTIF_RSPQ 0
    271  1.1     jklos 
    272  1.1     jklos static int
    273  1.1     jklos cxgb_rdma_ctl(adapter_t *adapter, unsigned int req, void *data)
    274  1.1     jklos {
    275  1.1     jklos     int ret = 0;
    276  1.1     jklos 
    277  1.1     jklos     switch (req) {
    278  1.1     jklos     case RDMA_GET_PARAMS: {
    279  1.1     jklos         struct rdma_info *req2 = data;
    280  1.1     jklos 
    281  1.1     jklos         req2->tpt_base = t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
    282  1.1     jklos         req2->tpt_top  = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
    283  1.1     jklos         req2->pbl_base = t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
    284  1.1     jklos         req2->pbl_top  = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
    285  1.1     jklos         req2->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
    286  1.1     jklos         req2->rqt_top  = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
    287  1.1     jklos         break;
    288  1.1     jklos     }
    289  1.1     jklos     case RDMA_CQ_OP: {
    290  1.1     jklos         struct rdma_cq_op *req2 = data;
    291  1.1     jklos 
    292  1.1     jklos         /* may be called in any context */
    293  1.1     jklos         mtx_lock(&adapter->sge.reg_lock);
    294  1.1     jklos         ret = t3_sge_cqcntxt_op(adapter, req2->id, req2->op,
    295  1.1     jklos                     req2->credits);
    296  1.1     jklos         mtx_unlock(&adapter->sge.reg_lock);
    297  1.1     jklos         break;
    298  1.1     jklos     }
    299  1.1     jklos     case RDMA_GET_MEM: {
    300  1.1     jklos         struct ch_mem_range *t = data;
    301  1.1     jklos         struct mc7 *mem;
    302  1.1     jklos 
    303  1.1     jklos         if ((t->addr & 7) || (t->len & 7))
    304  1.1     jklos             return (EINVAL);
    305  1.1     jklos         if (t->mem_id == MEM_CM)
    306  1.1     jklos             mem = &adapter->cm;
    307  1.1     jklos         else if (t->mem_id == MEM_PMRX)
    308  1.1     jklos             mem = &adapter->pmrx;
    309  1.1     jklos         else if (t->mem_id == MEM_PMTX)
    310  1.1     jklos             mem = &adapter->pmtx;
    311  1.1     jklos         else
    312  1.1     jklos             return (EINVAL);
    313  1.1     jklos 
    314  1.1     jklos         ret = t3_mc7_bd_read(mem, t->addr/8, t->len/8, (u64 *)t->buf);
    315  1.1     jklos         if (ret)
    316  1.1     jklos             return (ret);
    317  1.1     jklos         break;
    318  1.1     jklos     }
    319  1.1     jklos     case RDMA_CQ_SETUP: {
    320  1.1     jklos         struct rdma_cq_setup *req2 = data;
    321  1.1     jklos 
    322  1.1     jklos         mtx_lock(&adapter->sge.reg_lock);
    323  1.1     jklos         ret = t3_sge_init_cqcntxt(adapter, req2->id, req2->base_addr,
    324  1.1     jklos                       req2->size, ASYNC_NOTIF_RSPQ,
    325  1.1     jklos                       req2->ovfl_mode, req2->credits,
    326  1.1     jklos                       req2->credit_thres);
    327  1.1     jklos         mtx_unlock(&adapter->sge.reg_lock);
    328  1.1     jklos         break;
    329  1.1     jklos     }
    330  1.1     jklos     case RDMA_CQ_DISABLE:
    331  1.1     jklos         mtx_lock(&adapter->sge.reg_lock);
    332  1.1     jklos         ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
    333  1.1     jklos         mtx_unlock(&adapter->sge.reg_lock);
    334  1.1     jklos         break;
    335  1.1     jklos     case RDMA_CTRL_QP_SETUP: {
    336  1.1     jklos         struct rdma_ctrlqp_setup *req2 = data;
    337  1.1     jklos 
    338  1.1     jklos         mtx_lock(&adapter->sge.reg_lock);
    339  1.1     jklos         ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
    340  1.1     jklos                      SGE_CNTXT_RDMA, ASYNC_NOTIF_RSPQ,
    341  1.1     jklos                      req2->base_addr, req2->size,
    342  1.1     jklos                      FW_RI_TID_START, 1, 0);
    343  1.1     jklos         mtx_unlock(&adapter->sge.reg_lock);
    344  1.1     jklos         break;
    345  1.1     jklos     }
    346  1.1     jklos     default:
    347  1.1     jklos         ret = EOPNOTSUPP;
    348  1.1     jklos     }
    349  1.1     jklos     return (ret);
    350  1.1     jklos }
    351  1.1     jklos 
    352  1.1     jklos static int
    353  1.1     jklos cxgb_offload_ctl(struct toedev *tdev, unsigned int req, void *data)
    354  1.1     jklos {
    355  1.1     jklos     struct adapter *adapter = tdev2adap(tdev);
    356  1.1     jklos     struct tid_range *tid;
    357  1.1     jklos     struct mtutab *mtup;
    358  1.1     jklos     struct iff_mac *iffmacp;
    359  1.1     jklos     struct ddp_params *ddpp;
    360  1.1     jklos     struct adap_ports *ports;
    361  1.1     jklos     int port;
    362  1.1     jklos 
    363  1.1     jklos     switch (req) {
    364  1.1     jklos     case GET_MAX_OUTSTANDING_WR:
    365  1.1     jklos         *(unsigned int *)data = FW_WR_NUM;
    366  1.1     jklos         break;
    367  1.1     jklos     case GET_WR_LEN:
    368  1.1     jklos         *(unsigned int *)data = WR_FLITS;
    369  1.1     jklos         break;
    370  1.1     jklos     case GET_TX_MAX_CHUNK:
    371  1.1     jklos         *(unsigned int *)data = 1 << 20;  /* 1MB */
    372  1.1     jklos         break;
    373  1.1     jklos     case GET_TID_RANGE:
    374  1.1     jklos         tid = data;
    375  1.1     jklos         tid->num = t3_mc5_size(&adapter->mc5) -
    376  1.1     jklos             adapter->params.mc5.nroutes -
    377  1.1     jklos             adapter->params.mc5.nfilters -
    378  1.1     jklos             adapter->params.mc5.nservers;
    379  1.1     jklos         tid->base = 0;
    380  1.1     jklos         break;
    381  1.1     jklos     case GET_STID_RANGE:
    382  1.1     jklos         tid = data;
    383  1.1     jklos         tid->num = adapter->params.mc5.nservers;
    384  1.1     jklos         tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
    385  1.1     jklos             adapter->params.mc5.nfilters -
    386  1.1     jklos             adapter->params.mc5.nroutes;
    387  1.1     jklos         break;
    388  1.1     jklos     case GET_L2T_CAPACITY:
    389  1.1     jklos         *(unsigned int *)data = 2048;
    390  1.1     jklos         break;
    391  1.1     jklos     case GET_MTUS:
    392  1.1     jklos         mtup = data;
    393  1.1     jklos         mtup->size = NMTUS;
    394  1.1     jklos         mtup->mtus = adapter->params.mtus;
    395  1.1     jklos         break;
    396  1.1     jklos     case GET_IFF_FROM_MAC:
    397  1.1     jklos         iffmacp = data;
    398  1.1     jklos         iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
    399  1.1     jklos                       iffmacp->vlan_tag & EVL_VLID_MASK);
    400  1.1     jklos         break;
    401  1.1     jklos     case GET_DDP_PARAMS:
    402  1.1     jklos         ddpp = data;
    403  1.1     jklos         ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
    404  1.1     jklos         ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
    405  1.1     jklos         ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
    406  1.1     jklos         break;
    407  1.1     jklos     case GET_PORTS:
    408  1.1     jklos         ports = data;
    409  1.1     jklos         ports->nports   = adapter->params.nports;
    410  1.1     jklos         for_each_port(adapter, port)
    411  1.1     jklos             ports->lldevs[port] = adapter->port[port].ifp;
    412  1.1     jklos         break;
    413  1.1     jklos     case FAILOVER:
    414  1.1     jklos         port = *(int *)data;
    415  1.1     jklos         t3_port_failover(adapter, port);
    416  1.1     jklos         failover_fixup(adapter, port);
    417  1.1     jklos         break;
    418  1.1     jklos     case FAILOVER_DONE:
    419  1.1     jklos         port = *(int *)data;
    420  1.1     jklos         t3_failover_done(adapter, port);
    421  1.1     jklos         break;
    422  1.1     jklos     case FAILOVER_CLEAR:
    423  1.1     jklos         t3_failover_clear(adapter);
    424  1.1     jklos         break;
    425  1.1     jklos     case ULP_ISCSI_GET_PARAMS:
    426  1.1     jklos     case ULP_ISCSI_SET_PARAMS:
    427  1.1     jklos         if (!offload_running(adapter))
    428  1.1     jklos             return (EAGAIN);
    429  1.1     jklos         return cxgb_ulp_iscsi_ctl(adapter, req, data);
    430  1.1     jklos     case RDMA_GET_PARAMS:
    431  1.1     jklos     case RDMA_CQ_OP:
    432  1.1     jklos     case RDMA_CQ_SETUP:
    433  1.1     jklos     case RDMA_CQ_DISABLE:
    434  1.1     jklos     case RDMA_CTRL_QP_SETUP:
    435  1.1     jklos     case RDMA_GET_MEM:
    436  1.1     jklos         if (!offload_running(adapter))
    437  1.1     jklos             return (EAGAIN);
    438  1.1     jklos         return cxgb_rdma_ctl(adapter, req, data);
    439  1.1     jklos     default:
    440  1.1     jklos         return (EOPNOTSUPP);
    441  1.1     jklos     }
    442  1.1     jklos     return 0;
    443  1.1     jklos }
    444  1.1     jklos 
    445  1.1     jklos /*
    446  1.1     jklos  * Dummy handler for Rx offload packets in case we get an offload packet before
    447  1.1     jklos  * proper processing is setup.  This complains and drops the packet as it isn't
    448  1.1     jklos  * normal to get offload packets at this stage.
    449  1.1     jklos  */
    450  1.1     jklos static int
    451  1.1     jklos rx_offload_blackhole(struct toedev *dev, struct mbuf **m, int n)
    452  1.1     jklos {
    453  1.1     jklos     CH_ERR(tdev2adap(dev), "%d unexpected offload packets, first data 0x%x\n",
    454  1.1     jklos         n, *mtod(m[0], uint32_t *));
    455  1.1     jklos     while (n--)
    456  1.1     jklos         m_freem(m[n]);
    457  1.1     jklos     return 0;
    458  1.1     jklos }
    459  1.1     jklos 
    460  1.1     jklos static void
    461  1.1     jklos dummy_neigh_update(struct toedev *dev, struct rtentry *neigh)
    462  1.1     jklos {
    463  1.1     jklos }
    464  1.1     jklos 
    465  1.1     jklos void
    466  1.1     jklos cxgb_set_dummy_ops(struct toedev *dev)
    467  1.1     jklos {
    468  1.1     jklos     dev->recv         = rx_offload_blackhole;
    469  1.1     jklos     dev->neigh_update = dummy_neigh_update;
    470  1.1     jklos }
    471  1.1     jklos 
    472  1.1     jklos /*
    473  1.1     jklos  * Free an active-open TID.
    474  1.1     jklos  */
    475  1.1     jklos void *
    476  1.1     jklos cxgb_free_atid(struct toedev *tdev, int atid)
    477  1.1     jklos {
    478  1.1     jklos     struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
    479  1.1     jklos     union active_open_entry *p = atid2entry(t, atid);
    480  1.1     jklos     void *ctx = p->toe_tid.ctx;
    481  1.1     jklos 
    482  1.1     jklos     mtx_lock(&t->atid_lock);
    483  1.1     jklos     p->next = t->afree;
    484  1.1     jklos     t->afree = p;
    485  1.1     jklos     t->atids_in_use--;
    486  1.1     jklos     mtx_lock(&t->atid_lock);
    487  1.1     jklos 
    488  1.1     jklos     return ctx;
    489  1.1     jklos }
    490  1.1     jklos 
    491  1.1     jklos /*
    492  1.1     jklos  * Free a server TID and return it to the free pool.
    493  1.1     jklos  */
    494  1.1     jklos void
    495  1.1     jklos cxgb_free_stid(struct toedev *tdev, int stid)
    496  1.1     jklos {
    497  1.1     jklos     struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
    498  1.1     jklos     union listen_entry *p = stid2entry(t, stid);
    499  1.1     jklos 
    500  1.1     jklos     mtx_lock(&t->stid_lock);
    501  1.1     jklos     p->next = t->sfree;
    502  1.1     jklos     t->sfree = p;
    503  1.1     jklos     t->stids_in_use--;
    504  1.1     jklos     mtx_unlock(&t->stid_lock);
    505  1.1     jklos }
    506  1.1     jklos 
    507  1.1     jklos void
    508  1.1     jklos cxgb_insert_tid(struct toedev *tdev, struct cxgb_client *client,
    509  1.1     jklos     void *ctx, unsigned int tid)
    510  1.1     jklos {
    511  1.1     jklos     struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
    512  1.1     jklos 
    513  1.1     jklos     t->tid_tab[tid].client = client;
    514  1.1     jklos     t->tid_tab[tid].ctx = ctx;
    515  1.1     jklos     atomic_add_int(&t->tids_in_use, 1);
    516  1.1     jklos }
    517  1.1     jklos 
    518  1.1     jklos /*
    519  1.7    andvar  * Populate a TID_RELEASE WR.  The mbuf must be already properly sized.
    520  1.1     jklos  */
    521  1.1     jklos static inline void
    522  1.1     jklos mk_tid_release(struct mbuf *m, unsigned int tid)
    523  1.1     jklos {
    524  1.1     jklos     struct cpl_tid_release *req;
    525  1.1     jklos 
    526  1.1     jklos     m_set_priority(m, CPL_PRIORITY_SETUP);
    527  1.1     jklos     req = mtod(m, struct cpl_tid_release *);
    528  1.1     jklos     req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
    529  1.1     jklos     OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
    530  1.1     jklos }
    531  1.1     jklos 
    532  1.1     jklos static void
    533  1.1     jklos t3_process_tid_release_list(struct work *wk, void *data)
    534  1.1     jklos {
    535  1.1     jklos     struct mbuf *m;
    536  1.1     jklos     struct toedev *tdev = data;
    537  1.1     jklos     struct toe_data *td = TOE_DATA(tdev);
    538  1.1     jklos 
    539  1.1     jklos     mtx_lock(&td->tid_release_lock);
    540  1.1     jklos     while (td->tid_release_list) {
    541  1.1     jklos         struct toe_tid_entry *p = td->tid_release_list;
    542  1.1     jklos 
    543  1.1     jklos         td->tid_release_list = (struct toe_tid_entry *)p->ctx;
    544  1.1     jklos         mtx_unlock(&td->tid_release_lock);
    545  1.1     jklos         m = m_get(M_WAIT, MT_DATA);
    546  1.1     jklos         mk_tid_release(m, p - td->tid_maps.tid_tab);
    547  1.1     jklos         cxgb_ofld_send(tdev, m);
    548  1.1     jklos         p->ctx = NULL;
    549  1.1     jklos         mtx_lock(&td->tid_release_lock);
    550  1.1     jklos     }
    551  1.1     jklos     mtx_unlock(&td->tid_release_lock);
    552  1.1     jklos }
    553  1.1     jklos 
    554  1.1     jklos /* use ctx as a next pointer in the tid release list */
    555  1.1     jklos void
    556  1.1     jklos cxgb_queue_tid_release(struct toedev *tdev, unsigned int tid)
    557  1.1     jklos {
    558  1.1     jklos     struct toe_data *td = TOE_DATA(tdev);
    559  1.1     jklos     struct toe_tid_entry *p = &td->tid_maps.tid_tab[tid];
    560  1.1     jklos 
    561  1.1     jklos     mtx_lock(&td->tid_release_lock);
    562  1.1     jklos     p->ctx = td->tid_release_list;
    563  1.1     jklos     td->tid_release_list = p;
    564  1.1     jklos 
    565  1.1     jklos     if (!p->ctx)
    566  1.1     jklos         workqueue_enqueue(td->tid_release_task.wq, &td->tid_release_task.w, NULL);
    567  1.1     jklos 
    568  1.1     jklos     mtx_unlock(&td->tid_release_lock);
    569  1.1     jklos }
    570  1.1     jklos 
    571  1.1     jklos /*
    572  1.1     jklos  * Remove a tid from the TID table.  A client may defer processing its last
    573  1.1     jklos  * CPL message if it is locked at the time it arrives, and while the message
    574  1.1     jklos  * sits in the client's backlog the TID may be reused for another connection.
    575  1.1     jklos  * To handle this we atomically switch the TID association if it still points
    576  1.1     jklos  * to the original client context.
    577  1.1     jklos  */
    578  1.1     jklos void
    579  1.1     jklos cxgb_remove_tid(struct toedev *tdev, void *ctx, unsigned int tid)
    580  1.1     jklos {
    581  1.1     jklos     struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
    582  1.1     jklos 
    583  1.1     jklos     BUG_ON(tid >= t->ntids);
    584  1.1     jklos     if (tdev->type == T3A)
    585  1.1     jklos         atomic_cmpset_ptr((uintptr_t *)&t->tid_tab[tid].ctx, (long)NULL, (long)ctx);
    586  1.1     jklos     else {
    587  1.1     jklos         struct mbuf *m;
    588  1.1     jklos 
    589  1.1     jklos         m = m_get(M_NOWAIT, MT_DATA);
    590  1.1     jklos         if (__predict_true(m != NULL)) {
    591  1.1     jklos             mk_tid_release(m, tid);
    592  1.1     jklos             cxgb_ofld_send(tdev, m);
    593  1.1     jklos             t->tid_tab[tid].ctx = NULL;
    594  1.1     jklos         } else
    595  1.1     jklos             cxgb_queue_tid_release(tdev, tid);
    596  1.1     jklos     }
    597  1.1     jklos     atomic_add_int(&t->tids_in_use, -1);
    598  1.1     jklos }
    599  1.1     jklos 
    600  1.1     jklos int
    601  1.1     jklos cxgb_alloc_atid(struct toedev *tdev, struct cxgb_client *client,
    602  1.1     jklos              void *ctx)
    603  1.1     jklos {
    604  1.1     jklos     int atid = -1;
    605  1.1     jklos     struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
    606  1.1     jklos 
    607  1.1     jklos     mtx_lock(&t->atid_lock);
    608  1.1     jklos     if (t->afree) {
    609  1.1     jklos         union active_open_entry *p = t->afree;
    610  1.1     jklos 
    611  1.1     jklos         atid = (p - t->atid_tab) + t->atid_base;
    612  1.1     jklos         t->afree = p->next;
    613  1.1     jklos         p->toe_tid.ctx = ctx;
    614  1.1     jklos         p->toe_tid.client = client;
    615  1.1     jklos         t->atids_in_use++;
    616  1.1     jklos     }
    617  1.1     jklos     mtx_unlock(&t->atid_lock);
    618  1.1     jklos     return atid;
    619  1.1     jklos }
    620  1.1     jklos 
    621  1.1     jklos int
    622  1.1     jklos cxgb_alloc_stid(struct toedev *tdev, struct cxgb_client *client,
    623  1.1     jklos              void *ctx)
    624  1.1     jklos {
    625  1.1     jklos     int stid = -1;
    626  1.1     jklos     struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
    627  1.1     jklos 
    628  1.1     jklos     mtx_lock(&t->stid_lock);
    629  1.1     jklos     if (t->sfree) {
    630  1.1     jklos         union listen_entry *p = t->sfree;
    631  1.1     jklos 
    632  1.1     jklos         stid = (p - t->stid_tab) + t->stid_base;
    633  1.1     jklos         t->sfree = p->next;
    634  1.1     jklos         p->toe_tid.ctx = ctx;
    635  1.1     jklos         p->toe_tid.client = client;
    636  1.1     jklos         t->stids_in_use++;
    637  1.1     jklos     }
    638  1.1     jklos     mtx_unlock(&t->stid_lock);
    639  1.1     jklos     return stid;
    640  1.1     jklos }
    641  1.1     jklos 
    642  1.1     jklos static int
    643  1.1     jklos do_smt_write_rpl(struct toedev *dev, struct mbuf *m)
    644  1.1     jklos {
    645  1.1     jklos     struct cpl_smt_write_rpl *rpl = cplhdr(m);
    646  1.1     jklos 
    647  1.1     jklos     if (rpl->status != CPL_ERR_NONE)
    648  1.1     jklos         log(LOG_ERR,
    649  1.1     jklos                "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
    650  1.1     jklos                rpl->status, GET_TID(rpl));
    651  1.1     jklos 
    652  1.1     jklos     return CPL_RET_BUF_DONE;
    653  1.1     jklos }
    654  1.1     jklos 
    655  1.1     jklos static int
    656  1.1     jklos do_l2t_write_rpl(struct toedev *dev, struct mbuf *m)
    657  1.1     jklos {
    658  1.1     jklos     struct cpl_l2t_write_rpl *rpl = cplhdr(m);
    659  1.1     jklos 
    660  1.1     jklos     if (rpl->status != CPL_ERR_NONE)
    661  1.1     jklos         log(LOG_ERR,
    662  1.1     jklos                "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
    663  1.1     jklos                rpl->status, GET_TID(rpl));
    664  1.1     jklos 
    665  1.1     jklos     return CPL_RET_BUF_DONE;
    666  1.1     jklos }
    667  1.1     jklos 
    668  1.1     jklos static int
    669  1.1     jklos do_act_open_rpl(struct toedev *dev, struct mbuf *m)
    670  1.1     jklos {
    671  1.1     jklos     struct cpl_act_open_rpl *rpl = cplhdr(m);
    672  1.1     jklos     unsigned int atid = G_TID(ntohl(rpl->atid));
    673  1.1     jklos     struct toe_tid_entry *toe_tid;
    674  1.1     jklos 
    675  1.1     jklos     toe_tid = lookup_atid(&(TOE_DATA(dev))->tid_maps, atid);
    676  1.1     jklos     if (toe_tid->ctx && toe_tid->client && toe_tid->client->handlers &&
    677  1.1     jklos         toe_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
    678  1.1     jklos         return toe_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, m,
    679  1.1     jklos             toe_tid->ctx);
    680  1.1     jklos     } else {
    681  1.1     jklos         log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
    682  1.1     jklos             dev->name, CPL_ACT_OPEN_RPL);
    683  1.1     jklos         return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
    684  1.1     jklos     }
    685  1.1     jklos }
    686  1.1     jklos 
    687  1.1     jklos static int
    688  1.1     jklos do_stid_rpl(struct toedev *dev, struct mbuf *m)
    689  1.1     jklos {
    690  1.1     jklos     union opcode_tid *p = cplhdr(m);
    691  1.1     jklos     unsigned int stid = G_TID(ntohl(p->opcode_tid));
    692  1.1     jklos     struct toe_tid_entry *toe_tid;
    693  1.1     jklos 
    694  1.1     jklos     toe_tid = lookup_stid(&(TOE_DATA(dev))->tid_maps, stid);
    695  1.1     jklos     if (toe_tid->ctx && toe_tid->client->handlers &&
    696  1.1     jklos         toe_tid->client->handlers[p->opcode]) {
    697  1.1     jklos         return toe_tid->client->handlers[p->opcode] (dev, m, toe_tid->ctx);
    698  1.1     jklos     } else {
    699  1.1     jklos         log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
    700  1.1     jklos             dev->name, p->opcode);
    701  1.1     jklos         return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
    702  1.1     jklos     }
    703  1.1     jklos }
    704  1.1     jklos 
    705  1.1     jklos static int
    706  1.1     jklos do_hwtid_rpl(struct toedev *dev, struct mbuf *m)
    707  1.1     jklos {
    708  1.1     jklos     union opcode_tid *p = cplhdr(m);
    709  1.1     jklos     unsigned int hwtid;
    710  1.1     jklos     struct toe_tid_entry *toe_tid;
    711  1.1     jklos 
    712  1.1     jklos     printf("do_hwtid_rpl m=%p\n", m);
    713  1.1     jklos     return (0);
    714  1.4  christos #ifdef notyet
    715  1.1     jklos 
    716  1.1     jklos     hwtid = G_TID(ntohl(p->opcode_tid));
    717  1.1     jklos 
    718  1.1     jklos     toe_tid = lookup_tid(&(TOE_DATA(dev))->tid_maps, hwtid);
    719  1.1     jklos     if (toe_tid->ctx && toe_tid->client->handlers &&
    720  1.1     jklos         toe_tid->client->handlers[p->opcode]) {
    721  1.1     jklos         return toe_tid->client->handlers[p->opcode]
    722  1.1     jklos                         (dev, m, toe_tid->ctx);
    723  1.1     jklos     } else {
    724  1.1     jklos         log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
    725  1.1     jklos             dev->name, p->opcode);
    726  1.1     jklos         return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
    727  1.1     jklos     }
    728  1.4  christos #endif
    729  1.1     jklos }
    730  1.1     jklos 
    731  1.1     jklos static int
    732  1.1     jklos do_cr(struct toedev *dev, struct mbuf *m)
    733  1.1     jklos {
    734  1.1     jklos     struct cpl_pass_accept_req *req = cplhdr(m);
    735  1.1     jklos     unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
    736  1.1     jklos     struct toe_tid_entry *toe_tid;
    737  1.1     jklos 
    738  1.1     jklos     toe_tid = lookup_stid(&(TOE_DATA(dev))->tid_maps, stid);
    739  1.1     jklos     if (toe_tid->ctx && toe_tid->client->handlers &&
    740  1.1     jklos         toe_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
    741  1.1     jklos         return toe_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
    742  1.1     jklos                         (dev, m, toe_tid->ctx);
    743  1.1     jklos     } else {
    744  1.1     jklos         log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
    745  1.1     jklos             dev->name, CPL_PASS_ACCEPT_REQ);
    746  1.1     jklos         return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
    747  1.1     jklos     }
    748  1.1     jklos }
    749  1.1     jklos 
    750  1.1     jklos static int
    751  1.1     jklos do_abort_req_rss(struct toedev *dev, struct mbuf *m)
    752  1.1     jklos {
    753  1.1     jklos     union opcode_tid *p = cplhdr(m);
    754  1.1     jklos     unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
    755  1.1     jklos     struct toe_tid_entry *toe_tid;
    756  1.1     jklos 
    757  1.1     jklos     toe_tid = lookup_tid(&(TOE_DATA(dev))->tid_maps, hwtid);
    758  1.1     jklos     if (toe_tid->ctx && toe_tid->client->handlers &&
    759  1.1     jklos         toe_tid->client->handlers[p->opcode]) {
    760  1.1     jklos         return toe_tid->client->handlers[p->opcode]
    761  1.1     jklos                         (dev, m, toe_tid->ctx);
    762  1.1     jklos     } else {
    763  1.1     jklos         struct cpl_abort_req_rss *req = cplhdr(m);
    764  1.1     jklos         struct cpl_abort_rpl *rpl;
    765  1.1     jklos 
    766  1.1     jklos         struct mbuf *m2 = m_get(M_NOWAIT, MT_DATA);
    767  1.1     jklos         if (!m2) {
    768  1.1     jklos             log(LOG_NOTICE, "do_abort_req_rss: couldn't get mbuf!\n");
    769  1.1     jklos             goto out;
    770  1.1     jklos         }
    771  1.1     jklos 
    772  1.1     jklos         m_set_priority(m2, CPL_PRIORITY_DATA);
    773  1.1     jklos #if 0
    774  1.1     jklos         __skb_put(skb, sizeof(struct cpl_abort_rpl));
    775  1.1     jklos #endif
    776  1.1     jklos         rpl = cplhdr(m2);
    777  1.1     jklos         rpl->wr.wr_hi =
    778  1.1     jklos             htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
    779  1.1     jklos         rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req)));
    780  1.1     jklos         OPCODE_TID(rpl) =
    781  1.1     jklos             htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req)));
    782  1.1     jklos         rpl->cmd = req->status;
    783  1.1     jklos         cxgb_ofld_send(dev, m2);
    784  1.1     jklos  out:
    785  1.1     jklos         return CPL_RET_BUF_DONE;
    786  1.1     jklos     }
    787  1.1     jklos }
    788  1.1     jklos 
    789  1.1     jklos static int
    790  1.1     jklos do_act_establish(struct toedev *dev, struct mbuf *m)
    791  1.1     jklos {
    792  1.1     jklos     struct cpl_act_establish *req = cplhdr(m);
    793  1.1     jklos     unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
    794  1.1     jklos     struct toe_tid_entry *toe_tid;
    795  1.1     jklos 
    796  1.1     jklos     toe_tid = lookup_atid(&(TOE_DATA(dev))->tid_maps, atid);
    797  1.1     jklos     if (toe_tid->ctx && toe_tid->client->handlers &&
    798  1.1     jklos         toe_tid->client->handlers[CPL_ACT_ESTABLISH]) {
    799  1.1     jklos         return toe_tid->client->handlers[CPL_ACT_ESTABLISH]
    800  1.1     jklos                         (dev, m, toe_tid->ctx);
    801  1.1     jklos     } else {
    802  1.1     jklos         log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
    803  1.1     jklos             dev->name, CPL_PASS_ACCEPT_REQ);
    804  1.1     jklos         return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
    805  1.1     jklos     }
    806  1.1     jklos }
    807  1.1     jklos 
    808  1.1     jklos static int
    809  1.1     jklos do_set_tcb_rpl(struct toedev *dev, struct mbuf *m)
    810  1.1     jklos {
    811  1.1     jklos     struct cpl_set_tcb_rpl *rpl = cplhdr(m);
    812  1.1     jklos 
    813  1.1     jklos     if (rpl->status != CPL_ERR_NONE)
    814  1.1     jklos         log(LOG_ERR,
    815  1.1     jklos             "Unexpected SET_TCB_RPL status %u for tid %u\n",
    816  1.1     jklos             rpl->status, GET_TID(rpl));
    817  1.1     jklos     return CPL_RET_BUF_DONE;
    818  1.1     jklos }
    819  1.1     jklos 
    820  1.1     jklos static int
    821  1.1     jklos do_trace(struct toedev *dev, struct mbuf *m)
    822  1.1     jklos {
    823  1.1     jklos #if 0
    824  1.1     jklos     struct cpl_trace_pkt *p = cplhdr(m);
    825  1.1     jklos 
    826  1.1     jklos 
    827  1.1     jklos     skb->protocol = 0xffff;
    828  1.1     jklos     skb->dev = dev->lldev;
    829  1.1     jklos     skb_pull(skb, sizeof(*p));
    830  1.1     jklos     skb->mac.raw = mtod(m, (char *));
    831  1.1     jklos     netif_receive_skb(skb);
    832  1.1     jklos #endif
    833  1.1     jklos     return 0;
    834  1.1     jklos }
    835  1.1     jklos 
    836  1.1     jklos static int
    837  1.1     jklos do_term(struct toedev *dev, struct mbuf *m)
    838  1.1     jklos {
    839  1.1     jklos     unsigned int hwtid = ntohl(m_get_priority(m)) >> 8 & 0xfffff;
    840  1.1     jklos     unsigned int opcode = G_OPCODE(ntohl(m->m_pkthdr.csum_data));
    841  1.1     jklos     struct toe_tid_entry *toe_tid;
    842  1.1     jklos 
    843  1.1     jklos     toe_tid = lookup_tid(&(TOE_DATA(dev))->tid_maps, hwtid);
    844  1.1     jklos     if (toe_tid->ctx && toe_tid->client->handlers &&
    845  1.1     jklos         toe_tid->client->handlers[opcode]) {
    846  1.1     jklos         return toe_tid->client->handlers[opcode](dev, m, toe_tid->ctx);
    847  1.1     jklos     } else {
    848  1.1     jklos         log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
    849  1.1     jklos             dev->name, opcode);
    850  1.1     jklos         return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
    851  1.1     jklos     }
    852  1.1     jklos     return (0);
    853  1.1     jklos }
    854  1.1     jklos 
    855  1.1     jklos #if defined(FOO)
    856  1.1     jklos #include <linux/config.h>
    857  1.1     jklos #include <linux/kallsyms.h>
    858  1.1     jklos #include <linux/kprobes.h>
    859  1.1     jklos #include <net/arp.h>
    860  1.1     jklos 
    861  1.1     jklos static int (*orig_arp_constructor)(struct ifnet *);
    862  1.1     jklos 
    863  1.1     jklos static void
    864  1.1     jklos neigh_suspect(struct ifnet *neigh)
    865  1.1     jklos {
    866  1.1     jklos     struct hh_cache *hh;
    867  1.1     jklos 
    868  1.1     jklos     neigh->output = neigh->ops->output;
    869  1.1     jklos 
    870  1.1     jklos     for (hh = neigh->hh; hh; hh = hh->hh_next)
    871  1.1     jklos         hh->hh_output = neigh->ops->output;
    872  1.1     jklos }
    873  1.1     jklos 
    874  1.1     jklos static void
    875  1.1     jklos neigh_connect(struct ifnet *neigh)
    876  1.1     jklos {
    877  1.1     jklos     struct hh_cache *hh;
    878  1.1     jklos 
    879  1.1     jklos     neigh->output = neigh->ops->connected_output;
    880  1.1     jklos 
    881  1.1     jklos     for (hh = neigh->hh; hh; hh = hh->hh_next)
    882  1.1     jklos         hh->hh_output = neigh->ops->hh_output;
    883  1.1     jklos }
    884  1.1     jklos 
    885  1.1     jklos static inline int
    886  1.1     jklos neigh_max_probes(const struct neighbour *n)
    887  1.1     jklos {
    888  1.1     jklos     const struct neigh_parms *p = n->parms;
    889  1.1     jklos     return (n->nud_state & NUD_PROBE ?
    890  1.1     jklos         p->ucast_probes :
    891  1.1     jklos         p->ucast_probes + p->app_probes + p->mcast_probes);
    892  1.1     jklos }
    893  1.1     jklos 
    894  1.1     jklos static void
    895  1.1     jklos neigh_timer_handler_offload(unsigned long arg)
    896  1.1     jklos {
    897  1.1     jklos     unsigned long now, next;
    898  1.1     jklos     struct neighbour *neigh = (struct neighbour *)arg;
    899  1.1     jklos     unsigned state;
    900  1.1     jklos     int notify = 0;
    901  1.1     jklos 
    902  1.1     jklos     write_lock(&neigh->lock);
    903  1.1     jklos 
    904  1.1     jklos     state = neigh->nud_state;
    905  1.1     jklos     now = jiffies;
    906  1.1     jklos     next = now + HZ;
    907  1.1     jklos 
    908  1.1     jklos     if (!(state & NUD_IN_TIMER)) {
    909  1.1     jklos #ifndef CONFIG_SMP
    910  1.1     jklos         log(LOG_WARNING, "neigh: timer & !nud_in_timer\n");
    911  1.1     jklos #endif
    912  1.1     jklos         goto out;
    913  1.1     jklos     }
    914  1.1     jklos 
    915  1.1     jklos     if (state & NUD_REACHABLE) {
    916  1.1     jklos         if (time_before_eq(now,
    917  1.1     jklos                    neigh->confirmed +
    918  1.1     jklos                    neigh->parms->reachable_time)) {
    919  1.1     jklos             next = neigh->confirmed + neigh->parms->reachable_time;
    920  1.1     jklos         } else if (time_before_eq(now,
    921  1.1     jklos                       neigh->used +
    922  1.1     jklos                       neigh->parms->delay_probe_time)) {
    923  1.1     jklos             neigh->nud_state = NUD_DELAY;
    924  1.1     jklos             neigh->updated = jiffies;
    925  1.1     jklos             neigh_suspect(neigh);
    926  1.1     jklos             next = now + neigh->parms->delay_probe_time;
    927  1.1     jklos         } else {
    928  1.1     jklos             neigh->nud_state = NUD_STALE;
    929  1.1     jklos             neigh->updated = jiffies;
    930  1.1     jklos             neigh_suspect(neigh);
    931  1.1     jklos             cxgb_neigh_update(neigh);
    932  1.1     jklos         }
    933  1.1     jklos     } else if (state & NUD_DELAY) {
    934  1.1     jklos         if (time_before_eq(now,
    935  1.1     jklos                    neigh->confirmed +
    936  1.1     jklos                    neigh->parms->delay_probe_time)) {
    937  1.1     jklos             neigh->nud_state = NUD_REACHABLE;
    938  1.1     jklos             neigh->updated = jiffies;
    939  1.1     jklos             neigh_connect(neigh);
    940  1.1     jklos             cxgb_neigh_update(neigh);
    941  1.1     jklos             next = neigh->confirmed + neigh->parms->reachable_time;
    942  1.1     jklos         } else {
    943  1.1     jklos             neigh->nud_state = NUD_PROBE;
    944  1.1     jklos             neigh->updated = jiffies;
    945  1.1     jklos             atomic_set_int(&neigh->probes, 0);
    946  1.1     jklos             next = now + neigh->parms->retrans_time;
    947  1.1     jklos         }
    948  1.1     jklos     } else {
    949  1.1     jklos         /* NUD_PROBE|NUD_INCOMPLETE */
    950  1.1     jklos         next = now + neigh->parms->retrans_time;
    951  1.1     jklos     }
    952  1.1     jklos     /*
    953  1.1     jklos      * Needed for read of probes
    954  1.1     jklos      */
    955  1.1     jklos     mb();
    956  1.1     jklos     if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
    957  1.1     jklos         neigh->probes >= neigh_max_probes(neigh)) {
    958  1.1     jklos         struct mbuf *m;
    959  1.1     jklos 
    960  1.1     jklos         neigh->nud_state = NUD_FAILED;
    961  1.1     jklos         neigh->updated = jiffies;
    962  1.1     jklos         notify = 1;
    963  1.1     jklos         cxgb_neigh_update(neigh);
    964  1.1     jklos         NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
    965  1.1     jklos 
    966  1.1     jklos         /* It is very thin place. report_unreachable is very
    967  1.1     jklos            complicated routine. Particularly, it can hit the same
    968  1.1     jklos            neighbour entry!
    969  1.1     jklos            So that, we try to be accurate and avoid dead loop. --ANK
    970  1.1     jklos          */
    971  1.1     jklos         while (neigh->nud_state == NUD_FAILED &&
    972  1.1     jklos                (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
    973  1.1     jklos             write_unlock(&neigh->lock);
    974  1.1     jklos             neigh->ops->error_report(neigh, skb);
    975  1.1     jklos             write_lock(&neigh->lock);
    976  1.1     jklos         }
    977  1.1     jklos         skb_queue_purge(&neigh->arp_queue);
    978  1.1     jklos     }
    979  1.1     jklos 
    980  1.1     jklos     if (neigh->nud_state & NUD_IN_TIMER) {
    981  1.1     jklos         if (time_before(next, jiffies + HZ/2))
    982  1.1     jklos             next = jiffies + HZ/2;
    983  1.1     jklos         if (!mod_timer(&neigh->timer, next))
    984  1.1     jklos             neigh_hold(neigh);
    985  1.1     jklos     }
    986  1.1     jklos     if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
    987  1.1     jklos         struct mbuf *m = skb_peek(&neigh->arp_queue);
    988  1.1     jklos 
    989  1.1     jklos         write_unlock(&neigh->lock);
    990  1.1     jklos         neigh->ops->solicit(neigh, skb);
    991  1.1     jklos         atomic_add_int(&neigh->probes, 1);
    992  1.1     jklos         if (m)
    993  1.1     jklos             m_free(m);
    994  1.1     jklos     } else {
    995  1.1     jklos out:
    996  1.1     jklos         write_unlock(&neigh->lock);
    997  1.1     jklos     }
    998  1.1     jklos 
    999  1.1     jklos #ifdef CONFIG_ARPD
   1000  1.1     jklos     if (notify && neigh->parms->app_probes)
   1001  1.1     jklos         neigh_app_notify(neigh);
   1002  1.1     jklos #endif
   1003  1.1     jklos     neigh_release(neigh);
   1004  1.1     jklos }
   1005  1.1     jklos 
   1006  1.1     jklos static int
   1007  1.1     jklos arp_constructor_offload(struct neighbour *neigh)
   1008  1.1     jklos {
   1009  1.1     jklos     if (neigh->ifp && is_offloading(neigh->ifp))
   1010  1.1     jklos         neigh->timer.function = neigh_timer_handler_offload;
   1011  1.1     jklos     return orig_arp_constructor(neigh);
   1012  1.1     jklos }
   1013  1.1     jklos 
   1014  1.1     jklos /*
   1015  1.1     jklos  * This must match exactly the signature of neigh_update for jprobes to work.
   1016  1.1     jklos  * It runs from a trap handler with interrupts off so don't disable BH.
   1017  1.1     jklos  */
   1018  1.1     jklos static int
   1019  1.1     jklos neigh_update_offload(struct neighbour *neigh, const u8 *lladdr,
   1020  1.1     jklos                 u8 new, u32 flags)
   1021  1.1     jklos {
   1022  1.1     jklos     write_lock(&neigh->lock);
   1023  1.1     jklos     cxgb_neigh_update(neigh);
   1024  1.1     jklos     write_unlock(&neigh->lock);
   1025  1.1     jklos     jprobe_return();
   1026  1.1     jklos     /* NOTREACHED */
   1027  1.1     jklos     return 0;
   1028  1.1     jklos }
   1029  1.1     jklos 
   1030  1.1     jklos static struct jprobe neigh_update_jprobe = {
   1031  1.1     jklos     .entry = (kprobe_opcode_t *) neigh_update_offload,
   1032  1.1     jklos     .kp.addr = (kprobe_opcode_t *) neigh_update
   1033  1.1     jklos };
   1034  1.1     jklos 
   1035  1.1     jklos #ifdef MODULE_SUPPORT
   1036  1.1     jklos static int
   1037  1.1     jklos prepare_arp_with_t3core(void)
   1038  1.1     jklos {
   1039  1.1     jklos     int err;
   1040  1.1     jklos 
   1041  1.1     jklos     err = register_jprobe(&neigh_update_jprobe);
   1042  1.1     jklos     if (err) {
   1043  1.1     jklos         log(LOG_ERR, "Could not install neigh_update jprobe, "
   1044  1.1     jklos                 "error %d\n", err);
   1045  1.1     jklos         return err;
   1046  1.1     jklos     }
   1047  1.1     jklos 
   1048  1.1     jklos     orig_arp_constructor = arp_tbl.constructor;
   1049  1.1     jklos     arp_tbl.constructor  = arp_constructor_offload;
   1050  1.1     jklos 
   1051  1.1     jklos     return 0;
   1052  1.1     jklos }
   1053  1.1     jklos 
   1054  1.1     jklos static void
   1055  1.1     jklos restore_arp_sans_t3core(void)
   1056  1.1     jklos {
   1057  1.1     jklos     arp_tbl.constructor = orig_arp_constructor;
   1058  1.1     jklos     unregister_jprobe(&neigh_update_jprobe);
   1059  1.1     jklos }
   1060  1.1     jklos 
   1061  1.6   msaitoh #else /* Module support */
   1062  1.1     jklos static inline int
   1063  1.1     jklos prepare_arp_with_t3core(void)
   1064  1.1     jklos {
   1065  1.1     jklos     return 0;
   1066  1.1     jklos }
   1067  1.1     jklos 
   1068  1.1     jklos static inline void
   1069  1.1     jklos restore_arp_sans_t3core(void)
   1070  1.1     jklos {}
   1071  1.1     jklos #endif
   1072  1.1     jklos #endif
   1073  1.1     jklos /*
   1074  1.1     jklos  * Process a received packet with an unknown/unexpected CPL opcode.
   1075  1.1     jklos  */
   1076  1.1     jklos static int
   1077  1.1     jklos do_bad_cpl(struct toedev *dev, struct mbuf *m)
   1078  1.1     jklos {
   1079  1.1     jklos     log(LOG_ERR, "%s: received bad CPL command 0x%x\n", dev->name,
   1080  1.1     jklos         *mtod(m, uint32_t *));
   1081  1.1     jklos     return (CPL_RET_BUF_DONE | CPL_RET_BAD_MSG);
   1082  1.1     jklos }
   1083  1.1     jklos 
   1084  1.1     jklos /*
   1085  1.1     jklos  * Handlers for each CPL opcode
   1086  1.1     jklos  */
   1087  1.1     jklos static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
   1088  1.1     jklos 
   1089  1.1     jklos /*
   1090  1.1     jklos  * Add a new handler to the CPL dispatch table.  A NULL handler may be supplied
   1091  1.1     jklos  * to unregister an existing handler.
   1092  1.1     jklos  */
   1093  1.1     jklos void
   1094  1.1     jklos t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
   1095  1.1     jklos {
   1096  1.1     jklos     if (opcode < NUM_CPL_CMDS)
   1097  1.1     jklos         cpl_handlers[opcode] = h ? h : do_bad_cpl;
   1098  1.1     jklos     else
   1099  1.1     jklos         log(LOG_ERR, "T3C: handler registration for "
   1100  1.1     jklos                "opcode %x failed\n", opcode);
   1101  1.1     jklos }
   1102  1.1     jklos 
   1103  1.1     jklos /*
   1104  1.1     jklos  * TOEDEV's receive method.
   1105  1.1     jklos  */
   1106  1.1     jklos int
   1107  1.1     jklos process_rx(struct toedev *dev, struct mbuf **m, int n)
   1108  1.1     jklos {
   1109  1.1     jklos     while (n--) {
   1110  1.1     jklos         struct mbuf *m0 = *m++;
   1111  1.1     jklos         unsigned int opcode = G_OPCODE(ntohl(m0->m_pkthdr.csum_data));
   1112  1.1     jklos         int ret = cpl_handlers[opcode] (dev, m0);
   1113  1.1     jklos 
   1114  1.1     jklos #if VALIDATE_TID
   1115  1.1     jklos         if (ret & CPL_RET_UNKNOWN_TID) {
   1116  1.1     jklos             union opcode_tid *p = cplhdr(m0);
   1117  1.1     jklos 
   1118  1.1     jklos             log(LOG_ERR, "%s: CPL message (opcode %u) had "
   1119  1.1     jklos                    "unknown TID %u\n", dev->name, opcode,
   1120  1.1     jklos                    G_TID(ntohl(p->opcode_tid)));
   1121  1.1     jklos         }
   1122  1.1     jklos #endif
   1123  1.1     jklos         if (ret & CPL_RET_BUF_DONE)
   1124  1.1     jklos             m_freem(m0);
   1125  1.1     jklos     }
   1126  1.1     jklos     return 0;
   1127  1.1     jklos }
   1128  1.1     jklos 
   1129  1.1     jklos /*
   1130  1.1     jklos  * Sends an sk_buff to a T3C driver after dealing with any active network taps.
   1131  1.1     jklos  */
   1132  1.1     jklos int
   1133  1.1     jklos cxgb_ofld_send(struct toedev *dev, struct mbuf *m)
   1134  1.1     jklos {
   1135  1.1     jklos     int r;
   1136  1.1     jklos 
   1137  1.1     jklos     critical_enter();
   1138  1.1     jklos     r = dev->send(dev, m);
   1139  1.1     jklos     critical_exit();
   1140  1.1     jklos     return r;
   1141  1.1     jklos }
   1142  1.1     jklos 
   1143  1.1     jklos 
   1144  1.1     jklos /**
   1145  1.1     jklos  * cxgb_ofld_recv - process n received offload packets
   1146  1.1     jklos  * @dev: the offload device
   1147  1.1     jklos  * @m: an array of offload packets
   1148  1.1     jklos  * @n: the number of offload packets
   1149  1.1     jklos  *
   1150  1.1     jklos  * Process an array of ingress offload packets.  Each packet is forwarded
   1151  1.1     jklos  * to any active network taps and then passed to the offload device's receive
   1152  1.1     jklos  * method.  We optimize passing packets to the receive method by passing
   1153  1.1     jklos  * it the whole array at once except when there are active taps.
   1154  1.1     jklos  */
   1155  1.1     jklos int
   1156  1.1     jklos cxgb_ofld_recv(struct toedev *dev, struct mbuf **m, int n)
   1157  1.1     jklos {
   1158  1.1     jklos 
   1159  1.1     jklos #if defined(CONFIG_CHELSIO_T3)
   1160  1.1     jklos     if (likely(!netdev_nit))
   1161  1.1     jklos         return dev->recv(dev, skb, n);
   1162  1.1     jklos 
   1163  1.1     jklos     for ( ; n; n--, skb++) {
   1164  1.1     jklos         skb[0]->dev = dev->lldev;
   1165  1.1     jklos         dev_queue_xmit_nit(skb[0], dev->lldev);
   1166  1.1     jklos         skb[0]->dev = NULL;
   1167  1.1     jklos         dev->recv(dev, skb, 1);
   1168  1.1     jklos     }
   1169  1.1     jklos     return 0;
   1170  1.1     jklos #else
   1171  1.1     jklos     return dev->recv(dev, m, n);
   1172  1.1     jklos #endif
   1173  1.1     jklos }
   1174  1.1     jklos 
   1175  1.1     jklos void
   1176  1.1     jklos cxgb_neigh_update(struct rtentry *rt)
   1177  1.1     jklos {
   1178  1.1     jklos 
   1179  1.1     jklos     if (is_offloading(rt->rt_ifp)) {
   1180  1.1     jklos         struct toedev *tdev = TOEDEV(rt->rt_ifp);
   1181  1.1     jklos 
   1182  1.1     jklos         BUG_ON(!tdev);
   1183  1.1     jklos         t3_l2t_update(tdev, rt);
   1184  1.1     jklos     }
   1185  1.1     jklos }
   1186  1.1     jklos 
   1187  1.1     jklos static void
   1188  1.1     jklos set_l2t_ix(struct toedev *tdev, u32 tid, struct l2t_entry *e)
   1189  1.1     jklos {
   1190  1.1     jklos     struct mbuf *m;
   1191  1.1     jklos     struct cpl_set_tcb_field *req;
   1192  1.1     jklos 
   1193  1.1     jklos     m = m_gethdr(M_NOWAIT, MT_DATA);
   1194  1.1     jklos     if (!m) {
   1195  1.1     jklos         log(LOG_ERR, "%s: cannot allocate mbuf!\n", __func__);
   1196  1.1     jklos         return;
   1197  1.1     jklos     }
   1198  1.1     jklos 
   1199  1.1     jklos     m_set_priority(m, CPL_PRIORITY_CONTROL);
   1200  1.1     jklos     req = mtod(m, struct cpl_set_tcb_field *);
   1201  1.1     jklos     req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
   1202  1.1     jklos     OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
   1203  1.1     jklos     req->reply = 0;
   1204  1.1     jklos     req->cpu_idx = 0;
   1205  1.1     jklos     req->word = htons(W_TCB_L2T_IX);
   1206  1.1     jklos     req->mask = htobe64(V_TCB_L2T_IX(M_TCB_L2T_IX));
   1207  1.1     jklos     req->val = htobe64(V_TCB_L2T_IX(e->idx));
   1208  1.1     jklos     tdev->send(tdev, m);
   1209  1.1     jklos }
   1210  1.1     jklos 
   1211  1.1     jklos void
   1212  1.1     jklos cxgb_redirect(struct rtentry *old, struct rtentry *new)
   1213  1.1     jklos {
   1214  1.1     jklos     struct ifnet *olddev, *newdev;
   1215  1.1     jklos     struct tid_info *ti;
   1216  1.1     jklos     struct toedev *tdev;
   1217  1.1     jklos     u32 tid;
   1218  1.1     jklos     int update_tcb;
   1219  1.1     jklos     struct l2t_entry *e;
   1220  1.1     jklos     struct toe_tid_entry *te;
   1221  1.1     jklos 
   1222  1.1     jklos     olddev = old->rt_ifp;
   1223  1.1     jklos     newdev = new->rt_ifp;
   1224  1.1     jklos     if (!is_offloading(olddev))
   1225  1.1     jklos         return;
   1226  1.1     jklos     if (!is_offloading(newdev)) {
   1227  1.1     jklos         log(LOG_WARNING, "%s: Redirect to non-offload"
   1228  1.1     jklos             "device ignored.\n", __func__);
   1229  1.1     jklos         return;
   1230  1.1     jklos     }
   1231  1.1     jklos     tdev = TOEDEV(olddev);
   1232  1.1     jklos     BUG_ON(!tdev);
   1233  1.1     jklos     if (tdev != TOEDEV(newdev)) {
   1234  1.1     jklos         log(LOG_WARNING, "%s: Redirect to different "
   1235  1.1     jklos             "offload device ignored.\n", __func__);
   1236  1.1     jklos         return;
   1237  1.1     jklos     }
   1238  1.1     jklos 
   1239  1.1     jklos     /* Add new L2T entry */
   1240  1.1     jklos     e = t3_l2t_get(tdev, new, ((struct port_info *)new->rt_ifp->if_softc)->port_id);
   1241  1.1     jklos     if (!e) {
   1242  1.1     jklos         log(LOG_ERR, "%s: couldn't allocate new l2t entry!\n",
   1243  1.1     jklos                __func__);
   1244  1.1     jklos         return;
   1245  1.1     jklos     }
   1246  1.1     jklos 
   1247  1.1     jklos     /* Walk tid table and notify clients of dst change. */
   1248  1.1     jklos     ti = &(TOE_DATA(tdev))->tid_maps;
   1249  1.1     jklos     for (tid=0; tid < ti->ntids; tid++) {
   1250  1.1     jklos         te = lookup_tid(ti, tid);
   1251  1.1     jklos         BUG_ON(!te);
   1252  1.1     jklos         if (te->ctx && te->client && te->client->redirect) {
   1253  1.1     jklos             update_tcb = te->client->redirect(te->ctx, old, new,
   1254  1.1     jklos                               e);
   1255  1.1     jklos             if (update_tcb)  {
   1256  1.1     jklos                 l2t_hold(L2DATA(tdev), e);
   1257  1.1     jklos                 set_l2t_ix(tdev, tid, e);
   1258  1.1     jklos             }
   1259  1.1     jklos         }
   1260  1.1     jklos     }
   1261  1.1     jklos     l2t_release(L2DATA(tdev), e);
   1262  1.1     jklos }
   1263  1.1     jklos 
   1264  1.1     jklos /*
   1265  1.1     jklos  * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
   1266  1.1     jklos  * The allocated memory is cleared.
   1267  1.1     jklos  */
   1268  1.1     jklos void *
   1269  1.1     jklos cxgb_alloc_mem(unsigned long size)
   1270  1.1     jklos {
   1271  1.1     jklos     return malloc(size, M_DEVBUF, M_ZERO);
   1272  1.1     jklos }
   1273  1.1     jklos 
   1274  1.1     jklos /*
   1275  1.1     jklos  * Free memory allocated through t3_alloc_mem().
   1276  1.1     jklos  */
   1277  1.1     jklos void
   1278  1.1     jklos cxgb_free_mem(void *addr)
   1279  1.1     jklos {
   1280  1.1     jklos     free(addr, M_DEVBUF);
   1281  1.1     jklos }
   1282  1.1     jklos 
   1283  1.1     jklos 
   1284  1.1     jklos /*
   1285  1.1     jklos  * Allocate and initialize the TID tables.  Returns 0 on success.
   1286  1.1     jklos  */
   1287  1.1     jklos static int
   1288  1.1     jklos init_tid_tabs(struct tid_info *t, unsigned int ntids,
   1289  1.1     jklos              unsigned int natids, unsigned int nstids,
   1290  1.1     jklos              unsigned int atid_base, unsigned int stid_base)
   1291  1.1     jklos {
   1292  1.1     jklos     unsigned long size = ntids * sizeof(*t->tid_tab) +
   1293  1.1     jklos         natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
   1294  1.1     jklos 
   1295  1.1     jklos     t->tid_tab = cxgb_alloc_mem(size);
   1296  1.1     jklos     if (!t->tid_tab)
   1297  1.1     jklos         return (ENOMEM);
   1298  1.1     jklos 
   1299  1.1     jklos     t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
   1300  1.1     jklos     t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
   1301  1.1     jklos     t->ntids = ntids;
   1302  1.1     jklos     t->nstids = nstids;
   1303  1.1     jklos     t->stid_base = stid_base;
   1304  1.1     jklos     t->sfree = NULL;
   1305  1.1     jklos     t->natids = natids;
   1306  1.1     jklos     t->atid_base = atid_base;
   1307  1.1     jklos     t->afree = NULL;
   1308  1.1     jklos     t->stids_in_use = t->atids_in_use = 0;
   1309  1.1     jklos     atomic_set_int(&t->tids_in_use, 0);
   1310  1.1     jklos     mtx_init(&t->stid_lock, "stid", NULL, MTX_DEF);
   1311  1.1     jklos     mtx_init(&t->atid_lock, "atid", NULL, MTX_DEF);
   1312  1.1     jklos 
   1313  1.1     jklos     /*
   1314  1.1     jklos      * Setup the free lists for stid_tab and atid_tab.
   1315  1.1     jklos      */
   1316  1.1     jklos     if (nstids) {
   1317  1.1     jklos         while (--nstids)
   1318  1.1     jklos             t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
   1319  1.1     jklos         t->sfree = t->stid_tab;
   1320  1.1     jklos     }
   1321  1.1     jklos     if (natids) {
   1322  1.1     jklos         while (--natids)
   1323  1.1     jklos             t->atid_tab[natids - 1].next = &t->atid_tab[natids];
   1324  1.1     jklos         t->afree = t->atid_tab;
   1325  1.1     jklos     }
   1326  1.1     jklos     return 0;
   1327  1.1     jklos }
   1328  1.1     jklos 
   1329  1.1     jklos static void
   1330  1.1     jklos free_tid_maps(struct tid_info *t)
   1331  1.1     jklos {
   1332  1.1     jklos     cxgb_free_mem(t->tid_tab);
   1333  1.1     jklos }
   1334  1.1     jklos 
   1335  1.1     jklos static inline void
   1336  1.1     jklos add_adapter(adapter_t *adap)
   1337  1.1     jklos {
   1338  1.1     jklos     rw_wlock(&adapter_list_lock);
   1339  1.1     jklos     TAILQ_INSERT_TAIL(&adapter_list, adap, adapter_entry);
   1340  1.1     jklos     rw_wunlock(&adapter_list_lock);
   1341  1.1     jklos }
   1342  1.1     jklos 
   1343  1.1     jklos static inline void
   1344  1.1     jklos remove_adapter(adapter_t *adap)
   1345  1.1     jklos {
   1346  1.1     jklos     rw_wlock(&adapter_list_lock);
   1347  1.1     jklos     TAILQ_REMOVE(&adapter_list, adap, adapter_entry);
   1348  1.1     jklos     rw_wunlock(&adapter_list_lock);
   1349  1.1     jklos }
   1350  1.1     jklos 
   1351  1.1     jklos /*
   1352  1.1     jklos  * XXX
   1353  1.1     jklos  */
   1354  1.1     jklos #define t3_free_l2t(...)
   1355  1.1     jklos 
   1356  1.1     jklos int
   1357  1.1     jklos cxgb_offload_activate(struct adapter *adapter)
   1358  1.1     jklos {
   1359  1.1     jklos     struct toedev *dev = &adapter->tdev;
   1360  1.1     jklos     int natids, err;
   1361  1.1     jklos     struct toe_data *t;
   1362  1.1     jklos     struct tid_range stid_range, tid_range;
   1363  1.1     jklos     struct mtutab mtutab;
   1364  1.1     jklos     unsigned int l2t_capacity;
   1365  1.1     jklos 
   1366  1.1     jklos     t = malloc(sizeof(*t), M_DEVBUF, M_WAITOK);
   1367  1.1     jklos     if (!t)
   1368  1.1     jklos         return (ENOMEM);
   1369  1.1     jklos 
   1370  1.1     jklos     err = (EOPNOTSUPP);
   1371  1.1     jklos     if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
   1372  1.1     jklos         dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
   1373  1.1     jklos         dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
   1374  1.1     jklos         dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
   1375  1.1     jklos         dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
   1376  1.1     jklos         dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
   1377  1.1     jklos         goto out_free;
   1378  1.1     jklos 
   1379  1.1     jklos     err = (ENOMEM);
   1380  1.1     jklos     L2DATA(dev) = t3_init_l2t(l2t_capacity);
   1381  1.1     jklos     if (!L2DATA(dev))
   1382  1.1     jklos         goto out_free;
   1383  1.1     jklos 
   1384  1.5  riastrad     natids = uimin(tid_range.num / 2, MAX_ATIDS);
   1385  1.1     jklos     err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
   1386  1.1     jklos                 stid_range.num, ATID_BASE, stid_range.base);
   1387  1.1     jklos     if (err)
   1388  1.1     jklos         goto out_free_l2t;
   1389  1.1     jklos 
   1390  1.1     jklos     t->mtus = mtutab.mtus;
   1391  1.1     jklos     t->nmtus = mtutab.size;
   1392  1.1     jklos 
   1393  1.1     jklos     t->tid_release_task.name = "t3_process_tid_release_list";
   1394  1.1     jklos     t->tid_release_task.func = t3_process_tid_release_list;
   1395  1.1     jklos     t->tid_release_task.context = adapter;
   1396  1.1     jklos     kthread_create(PRI_NONE, 0, NULL, cxgb_make_task, &t->tid_release_task, NULL, "cxgb_make_task");
   1397  1.1     jklos     mtx_init(&t->tid_release_lock, "tid release", NULL, MTX_DEF);
   1398  1.1     jklos     t->dev = dev;
   1399  1.1     jklos 
   1400  1.1     jklos     TOE_DATA(dev) = t;
   1401  1.1     jklos     dev->recv = process_rx;
   1402  1.1     jklos     dev->neigh_update = t3_l2t_update;
   1403  1.1     jklos #if 0
   1404  1.1     jklos     offload_proc_dev_setup(dev);
   1405  1.1     jklos #endif
   1406  1.1     jklos     /* Register netevent handler once */
   1407  1.1     jklos     if (TAILQ_EMPTY(&adapter_list)) {
   1408  1.1     jklos #if defined(CONFIG_CHELSIO_T3_MODULE)
   1409  1.1     jklos         if (prepare_arp_with_t3core())
   1410  1.1     jklos             log(LOG_ERR, "Unable to set offload capabilities\n");
   1411  1.1     jklos #endif
   1412  1.1     jklos     }
   1413  1.1     jklos     add_adapter(adapter);
   1414  1.1     jklos     return 0;
   1415  1.1     jklos 
   1416  1.1     jklos out_free_l2t:
   1417  1.1     jklos     t3_free_l2t(L2DATA(dev));
   1418  1.1     jklos     L2DATA(dev) = NULL;
   1419  1.1     jklos out_free:
   1420  1.1     jklos     free(t, M_DEVBUF);
   1421  1.1     jklos     return err;
   1422  1.1     jklos 
   1423  1.1     jklos }
   1424  1.1     jklos 
   1425  1.1     jklos void
   1426  1.1     jklos cxgb_offload_deactivate(struct adapter *adapter)
   1427  1.1     jklos {
   1428  1.1     jklos     struct toedev *tdev = &adapter->tdev;
   1429  1.1     jklos     struct toe_data *t = TOE_DATA(tdev);
   1430  1.1     jklos 
   1431  1.1     jklos     remove_adapter(adapter);
   1432  1.1     jklos     if (TAILQ_EMPTY(&adapter_list)) {
   1433  1.1     jklos #if defined(CONFIG_CHELSIO_T3_MODULE)
   1434  1.1     jklos         restore_arp_sans_t3core();
   1435  1.1     jklos #endif
   1436  1.1     jklos     }
   1437  1.1     jklos     free_tid_maps(&t->tid_maps);
   1438  1.1     jklos     TOE_DATA(tdev) = NULL;
   1439  1.1     jklos     t3_free_l2t(L2DATA(tdev));
   1440  1.1     jklos     L2DATA(tdev) = NULL;
   1441  1.1     jklos     free(t, M_DEVBUF);
   1442  1.1     jklos }
   1443  1.1     jklos 
   1444  1.1     jklos 
   1445  1.1     jklos static inline void
   1446  1.1     jklos register_tdev(struct toedev *tdev)
   1447  1.1     jklos {
   1448  1.1     jklos     static int unit;
   1449  1.1     jklos 
   1450  1.1     jklos     mtx_lock(&cxgb_db_lock);
   1451  1.1     jklos     snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
   1452  1.1     jklos     TAILQ_INSERT_TAIL(&ofld_dev_list, tdev, ofld_entry);
   1453  1.1     jklos     mtx_unlock(&cxgb_db_lock);
   1454  1.1     jklos }
   1455  1.1     jklos 
   1456  1.1     jklos static inline void
   1457  1.1     jklos unregister_tdev(struct toedev *tdev)
   1458  1.1     jklos {
   1459  1.1     jklos     mtx_lock(&cxgb_db_lock);
   1460  1.1     jklos     TAILQ_REMOVE(&ofld_dev_list, tdev, ofld_entry);
   1461  1.1     jklos     mtx_unlock(&cxgb_db_lock);
   1462  1.1     jklos }
   1463  1.1     jklos 
   1464  1.1     jklos void
   1465  1.1     jklos cxgb_adapter_ofld(struct adapter *adapter)
   1466  1.1     jklos {
   1467  1.1     jklos     struct toedev *tdev = &adapter->tdev;
   1468  1.1     jklos 
   1469  1.1     jklos     cxgb_set_dummy_ops(tdev);
   1470  1.1     jklos     tdev->send = t3_offload_tx;
   1471  1.1     jklos     tdev->ctl = cxgb_offload_ctl;
   1472  1.1     jklos     tdev->type = adapter->params.rev == 0 ?
   1473  1.1     jklos              T3A : T3B;
   1474  1.1     jklos 
   1475  1.1     jklos     register_tdev(tdev);
   1476  1.1     jklos #if 0
   1477  1.1     jklos     offload_proc_dev_init(tdev);
   1478  1.1     jklos #endif
   1479  1.1     jklos }
   1480  1.1     jklos 
   1481  1.1     jklos void
   1482  1.1     jklos cxgb_adapter_unofld(struct adapter *adapter)
   1483  1.1     jklos {
   1484  1.1     jklos     struct toedev *tdev = &adapter->tdev;
   1485  1.1     jklos #if 0
   1486  1.1     jklos     offload_proc_dev_cleanup(tdev);
   1487  1.1     jklos     offload_proc_dev_exit(tdev);
   1488  1.1     jklos #endif
   1489  1.1     jklos     tdev->recv = NULL;
   1490  1.1     jklos     tdev->neigh_update = NULL;
   1491  1.1     jklos 
   1492  1.1     jklos     unregister_tdev(tdev);
   1493  1.1     jklos }
   1494  1.1     jklos 
   1495  1.1     jklos void
   1496  1.1     jklos cxgb_offload_init(void)
   1497  1.1     jklos {
   1498  1.1     jklos     int i;
   1499  1.1     jklos 
   1500  1.1     jklos     if (inited)
   1501  1.1     jklos         return;
   1502  1.1     jklos     else
   1503  1.1     jklos         inited = 1;
   1504  1.1     jklos 
   1505  1.1     jklos     mtx_init(&cxgb_db_lock, "ofld db", NULL, MTX_DEF);
   1506  1.1     jklos     rw_init(&adapter_list_lock);
   1507  1.1     jklos     TAILQ_INIT(&client_list);
   1508  1.1     jklos     TAILQ_INIT(&ofld_dev_list);
   1509  1.1     jklos     TAILQ_INIT(&adapter_list);
   1510  1.1     jklos 
   1511  1.1     jklos     for (i = 0; i < NUM_CPL_CMDS; ++i)
   1512  1.1     jklos         cpl_handlers[i] = do_bad_cpl;
   1513  1.1     jklos 
   1514  1.1     jklos     t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
   1515  1.1     jklos     t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
   1516  1.1     jklos     t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
   1517  1.1     jklos     t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
   1518  1.1     jklos     t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
   1519  1.1     jklos     t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl);
   1520  1.1     jklos     t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl);
   1521  1.1     jklos     t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
   1522  1.1     jklos     t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl);
   1523  1.1     jklos     t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
   1524  1.1     jklos     t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl);
   1525  1.1     jklos     t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
   1526  1.1     jklos     t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
   1527  1.1     jklos     t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
   1528  1.1     jklos     t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
   1529  1.1     jklos     t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
   1530  1.1     jklos     t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
   1531  1.1     jklos     t3_register_cpl_handler(CPL_SET_TCB_RPL, do_set_tcb_rpl);
   1532  1.1     jklos     t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
   1533  1.1     jklos     t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
   1534  1.1     jklos     t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
   1535  1.1     jklos     t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
   1536  1.1     jklos     t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
   1537  1.1     jklos     t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
   1538  1.1     jklos #if 0
   1539  1.1     jklos        if (offload_proc_init())
   1540  1.1     jklos            log(LOG_WARNING, "Unable to create /proc/net/cxgb3 dir\n");
   1541  1.1     jklos #endif
   1542  1.1     jklos }
   1543  1.1     jklos 
   1544  1.1     jklos void
   1545  1.1     jklos cxgb_offload_exit(void)
   1546  1.1     jklos {
   1547  1.1     jklos     static int deinited = 0;
   1548  1.1     jklos 
   1549  1.1     jklos     if (deinited)
   1550  1.1     jklos         return;
   1551  1.1     jklos 
   1552  1.1     jklos     deinited = 1;
   1553  1.1     jklos     mtx_destroy(&cxgb_db_lock);
   1554  1.1     jklos     rw_destroy(&adapter_list_lock);
   1555  1.1     jklos #if 0
   1556  1.1     jklos     offload_proc_cleanup();
   1557  1.1     jklos #endif
   1558  1.1     jklos }
   1559  1.1     jklos 
   1560  1.1     jklos #if 0
   1561  1.1     jklos static int
   1562  1.1     jklos offload_info_read_proc(char *buf, char **start, off_t offset,
   1563  1.1     jklos                   int length, int *eof, void *data)
   1564  1.1     jklos {
   1565  1.1     jklos     struct toe_data *d = data;
   1566  1.1     jklos     struct tid_info *t = &d->tid_maps;
   1567  1.1     jklos     int len;
   1568  1.1     jklos 
   1569  1.3  christos     len = snprintf(buf, length, "TID range: 0..%d, in use: %u\n"
   1570  1.1     jklos               "STID range: %d..%d, in use: %u\n"
   1571  1.1     jklos               "ATID range: %d..%d, in use: %u\n"
   1572  1.1     jklos               "MSS: %u\n",
   1573  1.1     jklos               t->ntids - 1, atomic_read(&t->tids_in_use), t->stid_base,
   1574  1.1     jklos               t->stid_base + t->nstids - 1, t->stids_in_use,
   1575  1.1     jklos               t->atid_base, t->atid_base + t->natids - 1,
   1576  1.1     jklos               t->atids_in_use, d->tx_max_chunk);
   1577  1.1     jklos     if (len > length)
   1578  1.1     jklos         len = length;
   1579  1.1     jklos     *eof = 1;
   1580  1.1     jklos     return len;
   1581  1.1     jklos }
   1582  1.1     jklos 
   1583  1.1     jklos static int
   1584  1.1     jklos offload_info_proc_setup(struct proc_dir_entry *dir,
   1585  1.1     jklos                    struct toe_data *d)
   1586  1.1     jklos {
   1587  1.1     jklos     struct proc_dir_entry *p;
   1588  1.1     jklos 
   1589  1.1     jklos     if (!dir)
   1590  1.1     jklos         return (EINVAL);
   1591  1.1     jklos 
   1592  1.1     jklos     p = create_proc_read_entry("info", 0, dir, offload_info_read_proc, d);
   1593  1.1     jklos     if (!p)
   1594  1.1     jklos         return (ENOMEM);
   1595  1.1     jklos 
   1596  1.1     jklos     p->owner = THIS_MODULE;
   1597  1.1     jklos     return 0;
   1598  1.1     jklos }
   1599  1.1     jklos 
   1600  1.1     jklos 
   1601  1.1     jklos static int
   1602  1.1     jklos offload_devices_read_proc(char *buf, char **start, off_t offset,
   1603  1.1     jklos                      int length, int *eof, void *data)
   1604  1.1     jklos {
   1605  1.1     jklos     int len;
   1606  1.1     jklos     struct toedev *dev;
   1607  1.1     jklos     struct net_device *ndev;
   1608  1.1     jklos 
   1609  1.3  christos     len = snprintf(buf, length, "Device           Interfaces\n");
   1610  1.1     jklos 
   1611  1.1     jklos     mtx_lock(&cxgb_db_lock);
   1612  1.1     jklos     TAILQ_FOREACH(dev, &ofld_dev_list, ofld_entry) {
   1613  1.3  christos 	if (len >= length)
   1614  1.3  christos 	    break;
   1615  1.3  christos         len += snprintf(buf + len, length - len, "%-16s", dev->name);
   1616  1.1     jklos         read_lock(&dev_base_lock);
   1617  1.1     jklos         for (ndev = dev_base; ndev; ndev = ndev->next) {
   1618  1.3  christos             if (TOEDEV(ndev) == dev) {
   1619  1.3  christos 		if (len >= length)
   1620  1.3  christos 		    break;
   1621  1.3  christos                 len += snprintf(buf + len, length - len, " %s", ndev->name);
   1622  1.3  christos 	    }
   1623  1.1     jklos         }
   1624  1.1     jklos         read_unlock(&dev_base_lock);
   1625  1.1     jklos         if (len >= length)
   1626  1.1     jklos             break;
   1627  1.3  christos         len += snprintf(buf + len, length - len, "\n");
   1628  1.1     jklos     }
   1629  1.1     jklos     mtx_unlock(&cxgb_db_lock);
   1630  1.1     jklos 
   1631  1.1     jklos     if (len > length)
   1632  1.1     jklos         len = length;
   1633  1.1     jklos     *eof = 1;
   1634  1.1     jklos     return len;
   1635  1.1     jklos }
   1636  1.1     jklos 
   1637  1.1     jklos #endif
   1638  1.1     jklos 
   1639