Home | History | Annotate | Line # | Download | only in cxgb
cxgb_offload.c revision 1.3
      1 
      2 /**************************************************************************
      3 
      4 Copyright (c) 2007, Chelsio Inc.
      5 All rights reserved.
      6 
      7 Redistribution and use in source and binary forms, with or without
      8 modification, are permitted provided that the following conditions are met:
      9 
     10  1. Redistributions of source code must retain the above copyright notice,
     11     this list of conditions and the following disclaimer.
     12 
     13  2. Neither the name of the Chelsio Corporation nor the names of its
     14     contributors may be used to endorse or promote products derived from
     15     this software without specific prior written permission.
     16 
     17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     18 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     19 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     20 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     21 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27 POSSIBILITY OF SUCH DAMAGE.
     28 
     29 
     30 ***************************************************************************/
     31 
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: cxgb_offload.c,v 1.2 2011/05/18 01:01:59 dyoung Exp $");
     35 
     36 #include <sys/param.h>
     37 #include <sys/systm.h>
     38 #include <sys/kernel.h>
     39 #include <sys/conf.h>
     40 #include <sys/bus.h>
     41 #include <sys/ioccom.h>
     42 #include <sys/mbuf.h>
     43 #include <sys/socket.h>
     44 #include <sys/sockio.h>
     45 #include <sys/sysctl.h>
     46 #include <sys/queue.h>
     47 
     48 #ifdef CONFIG_DEFINED
     49 #include <cxgb_include.h>
     50 #else
     51 #include "cxgb_include.h"
     52 #endif
     53 
     54 #include <net/route.h>
     55 
     56 /*
     57  * XXX
     58  */
     59 #define LOG_NOTICE 2
     60 #define BUG_ON(...)
     61 #define VALIDATE_TID 0
     62 
     63 
     64 TAILQ_HEAD(, cxgb_client) client_list;
     65 TAILQ_HEAD(, toedev) ofld_dev_list;
     66 TAILQ_HEAD(, adapter) adapter_list;
     67 
     68 static struct mtx cxgb_db_lock;
     69 static struct rwlock adapter_list_lock;
     70 
     71 
     72 static const unsigned int MAX_ATIDS = 64 * 1024;
     73 static const unsigned int ATID_BASE = 0x100000;
     74 static int inited = 0;
     75 
     76 static inline int
     77 offload_activated(struct toedev *tdev)
     78 {
     79     struct adapter *adapter = tdev2adap(tdev);
     80 
     81     return (isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT));
     82 }
     83 
     84 /**
     85  *  cxgb_register_client - register an offload client
     86  *  @client: the client
     87  *
     88  *  Add the client to the client list,
     89  *  and call backs the client for each activated offload device
     90  */
     91 void
     92 cxgb_register_client(struct cxgb_client *client)
     93 {
     94     struct toedev *tdev;
     95 
     96     mtx_lock(&cxgb_db_lock);
     97     TAILQ_INSERT_TAIL(&client_list, client, client_entry);
     98 
     99     if (client->add) {
    100         TAILQ_FOREACH(tdev, &ofld_dev_list, ofld_entry) {
    101             if (offload_activated(tdev))
    102                 client->add(tdev);
    103         }
    104     }
    105     mtx_unlock(&cxgb_db_lock);
    106 }
    107 
    108 /**
    109  *  cxgb_unregister_client - unregister an offload client
    110  *  @client: the client
    111  *
    112  *  Remove the client to the client list,
    113  *  and call backs the client for each activated offload device.
    114  */
    115 void
    116 cxgb_unregister_client(struct cxgb_client *client)
    117 {
    118     struct toedev *tdev;
    119 
    120     mtx_lock(&cxgb_db_lock);
    121     TAILQ_REMOVE(&client_list, client, client_entry);
    122 
    123     if (client->remove) {
    124         TAILQ_FOREACH(tdev, &ofld_dev_list, ofld_entry) {
    125             if (offload_activated(tdev))
    126                 client->remove(tdev);
    127         }
    128     }
    129     mtx_unlock(&cxgb_db_lock);
    130 }
    131 
    132 /**
    133  *  cxgb_add_clients - activate register clients for an offload device
    134  *  @tdev: the offload device
    135  *
    136  *  Call backs all registered clients once a offload device is activated
    137  */
    138 void
    139 cxgb_add_clients(struct toedev *tdev)
    140 {
    141     struct cxgb_client *client;
    142 
    143     mtx_lock(&cxgb_db_lock);
    144     TAILQ_FOREACH(client, &client_list, client_entry) {
    145         if (client->add)
    146             client->add(tdev);
    147     }
    148     mtx_unlock(&cxgb_db_lock);
    149 }
    150 
    151 /**
    152  *  cxgb_remove_clients - activate register clients for an offload device
    153  *  @tdev: the offload device
    154  *
    155  *  Call backs all registered clients once a offload device is deactivated
    156  */
    157 void
    158 cxgb_remove_clients(struct toedev *tdev)
    159 {
    160     struct cxgb_client *client;
    161 
    162     mtx_lock(&cxgb_db_lock);
    163     TAILQ_FOREACH(client, &client_list, client_entry) {
    164         if (client->remove)
    165             client->remove(tdev);
    166     }
    167     mtx_unlock(&cxgb_db_lock);
    168 }
    169 
    170 static int
    171 is_offloading(struct ifnet *ifp)
    172 {
    173     struct adapter *adapter;
    174     int port;
    175 
    176     rw_rlock(&adapter_list_lock);
    177     TAILQ_FOREACH(adapter, &adapter_list, adapter_entry) {
    178         for_each_port(adapter, port) {
    179             if (ifp == adapter->port[port].ifp) {
    180                 rw_runlock(&adapter_list_lock);
    181                 return 1;
    182             }
    183         }
    184     }
    185     rw_runlock(&adapter_list_lock);
    186     return 0;
    187 }
    188 
    189 static struct ifnet *
    190 get_iff_from_mac(adapter_t *adapter, const uint8_t *mac, unsigned int vlan)
    191 {
    192 #ifdef notyet
    193     int i;
    194 
    195     for_each_port(adapter, i) {
    196         const struct vlan_group *grp;
    197         const struct port_info *p = &adapter->port[i];
    198         struct ifnet *ifnet = p->ifp;
    199 
    200         if (!memcmp(p->hw_addr, mac, ETHER_ADDR_LEN)) {
    201             if (vlan && vlan != EVL_VLID_MASK) {
    202                 grp = p->vlan_grp;
    203                 dev = grp ? grp->vlan_devices[vlan] : NULL;
    204             } else
    205                 while (dev->master)
    206                     dev = dev->master;
    207             return dev;
    208         }
    209     }
    210 #endif
    211     return NULL;
    212 }
    213 
    214 static inline void
    215 failover_fixup(adapter_t *adapter, int port)
    216 {
    217     if (adapter->params.rev == 0) {
    218         struct ifnet *ifp = adapter->port[port].ifp;
    219         struct cmac *mac = &adapter->port[port].mac;
    220         if (!(ifp->if_flags & IFF_UP)) {
    221             /* Failover triggered by the interface ifdown */
    222             t3_write_reg(adapter, A_XGM_TX_CTRL + mac->offset,
    223                      F_TXEN);
    224             t3_read_reg(adapter, A_XGM_TX_CTRL + mac->offset);
    225         } else {
    226             /* Failover triggered by the interface link down */
    227             t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
    228             t3_read_reg(adapter, A_XGM_RX_CTRL + mac->offset);
    229             t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset,
    230                      F_RXEN);
    231         }
    232     }
    233 }
    234 
    235 static int
    236 cxgb_ulp_iscsi_ctl(adapter_t *adapter, unsigned int req, void *data)
    237 {
    238     int ret = 0;
    239     struct ulp_iscsi_info *uiip = data;
    240 
    241     switch (req) {
    242     case ULP_ISCSI_GET_PARAMS:
    243         uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
    244         uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
    245         uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
    246         /*
    247          * On tx, the iscsi pdu has to be <= tx page size and has to
    248          * fit into the Tx PM FIFO.
    249          */
    250         uiip->max_txsz = min(adapter->params.tp.tx_pg_size,
    251                      t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
    252         /* on rx, the iscsi pdu has to be < rx page size and the
    253            whole pdu + cpl headers has to fit into one sge buffer */
    254         uiip->max_rxsz =
    255             (unsigned int)min(adapter->params.tp.rx_pg_size,
    256             (adapter->sge.qs[0].fl[1].buf_size -
    257                 sizeof(struct cpl_rx_data) * 2 -
    258                 sizeof(struct cpl_rx_data_ddp)) );
    259         break;
    260     case ULP_ISCSI_SET_PARAMS:
    261         t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
    262         break;
    263     default:
    264         ret = (EOPNOTSUPP);
    265     }
    266     return ret;
    267 }
    268 
    269 /* Response queue used for RDMA events. */
    270 #define ASYNC_NOTIF_RSPQ 0
    271 
    272 static int
    273 cxgb_rdma_ctl(adapter_t *adapter, unsigned int req, void *data)
    274 {
    275     int ret = 0;
    276 
    277     switch (req) {
    278     case RDMA_GET_PARAMS: {
    279         struct rdma_info *req2 = data;
    280 
    281         req2->tpt_base = t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
    282         req2->tpt_top  = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
    283         req2->pbl_base = t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
    284         req2->pbl_top  = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
    285         req2->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
    286         req2->rqt_top  = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
    287         break;
    288     }
    289     case RDMA_CQ_OP: {
    290         struct rdma_cq_op *req2 = data;
    291 
    292         /* may be called in any context */
    293         mtx_lock(&adapter->sge.reg_lock);
    294         ret = t3_sge_cqcntxt_op(adapter, req2->id, req2->op,
    295                     req2->credits);
    296         mtx_unlock(&adapter->sge.reg_lock);
    297         break;
    298     }
    299     case RDMA_GET_MEM: {
    300         struct ch_mem_range *t = data;
    301         struct mc7 *mem;
    302 
    303         if ((t->addr & 7) || (t->len & 7))
    304             return (EINVAL);
    305         if (t->mem_id == MEM_CM)
    306             mem = &adapter->cm;
    307         else if (t->mem_id == MEM_PMRX)
    308             mem = &adapter->pmrx;
    309         else if (t->mem_id == MEM_PMTX)
    310             mem = &adapter->pmtx;
    311         else
    312             return (EINVAL);
    313 
    314         ret = t3_mc7_bd_read(mem, t->addr/8, t->len/8, (u64 *)t->buf);
    315         if (ret)
    316             return (ret);
    317         break;
    318     }
    319     case RDMA_CQ_SETUP: {
    320         struct rdma_cq_setup *req2 = data;
    321 
    322         mtx_lock(&adapter->sge.reg_lock);
    323         ret = t3_sge_init_cqcntxt(adapter, req2->id, req2->base_addr,
    324                       req2->size, ASYNC_NOTIF_RSPQ,
    325                       req2->ovfl_mode, req2->credits,
    326                       req2->credit_thres);
    327         mtx_unlock(&adapter->sge.reg_lock);
    328         break;
    329     }
    330     case RDMA_CQ_DISABLE:
    331         mtx_lock(&adapter->sge.reg_lock);
    332         ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
    333         mtx_unlock(&adapter->sge.reg_lock);
    334         break;
    335     case RDMA_CTRL_QP_SETUP: {
    336         struct rdma_ctrlqp_setup *req2 = data;
    337 
    338         mtx_lock(&adapter->sge.reg_lock);
    339         ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
    340                      SGE_CNTXT_RDMA, ASYNC_NOTIF_RSPQ,
    341                      req2->base_addr, req2->size,
    342                      FW_RI_TID_START, 1, 0);
    343         mtx_unlock(&adapter->sge.reg_lock);
    344         break;
    345     }
    346     default:
    347         ret = EOPNOTSUPP;
    348     }
    349     return (ret);
    350 }
    351 
    352 static int
    353 cxgb_offload_ctl(struct toedev *tdev, unsigned int req, void *data)
    354 {
    355     struct adapter *adapter = tdev2adap(tdev);
    356     struct tid_range *tid;
    357     struct mtutab *mtup;
    358     struct iff_mac *iffmacp;
    359     struct ddp_params *ddpp;
    360     struct adap_ports *ports;
    361     int port;
    362 
    363     switch (req) {
    364     case GET_MAX_OUTSTANDING_WR:
    365         *(unsigned int *)data = FW_WR_NUM;
    366         break;
    367     case GET_WR_LEN:
    368         *(unsigned int *)data = WR_FLITS;
    369         break;
    370     case GET_TX_MAX_CHUNK:
    371         *(unsigned int *)data = 1 << 20;  /* 1MB */
    372         break;
    373     case GET_TID_RANGE:
    374         tid = data;
    375         tid->num = t3_mc5_size(&adapter->mc5) -
    376             adapter->params.mc5.nroutes -
    377             adapter->params.mc5.nfilters -
    378             adapter->params.mc5.nservers;
    379         tid->base = 0;
    380         break;
    381     case GET_STID_RANGE:
    382         tid = data;
    383         tid->num = adapter->params.mc5.nservers;
    384         tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
    385             adapter->params.mc5.nfilters -
    386             adapter->params.mc5.nroutes;
    387         break;
    388     case GET_L2T_CAPACITY:
    389         *(unsigned int *)data = 2048;
    390         break;
    391     case GET_MTUS:
    392         mtup = data;
    393         mtup->size = NMTUS;
    394         mtup->mtus = adapter->params.mtus;
    395         break;
    396     case GET_IFF_FROM_MAC:
    397         iffmacp = data;
    398         iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
    399                       iffmacp->vlan_tag & EVL_VLID_MASK);
    400         break;
    401     case GET_DDP_PARAMS:
    402         ddpp = data;
    403         ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
    404         ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
    405         ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
    406         break;
    407     case GET_PORTS:
    408         ports = data;
    409         ports->nports   = adapter->params.nports;
    410         for_each_port(adapter, port)
    411             ports->lldevs[port] = adapter->port[port].ifp;
    412         break;
    413     case FAILOVER:
    414         port = *(int *)data;
    415         t3_port_failover(adapter, port);
    416         failover_fixup(adapter, port);
    417         break;
    418     case FAILOVER_DONE:
    419         port = *(int *)data;
    420         t3_failover_done(adapter, port);
    421         break;
    422     case FAILOVER_CLEAR:
    423         t3_failover_clear(adapter);
    424         break;
    425     case ULP_ISCSI_GET_PARAMS:
    426     case ULP_ISCSI_SET_PARAMS:
    427         if (!offload_running(adapter))
    428             return (EAGAIN);
    429         return cxgb_ulp_iscsi_ctl(adapter, req, data);
    430     case RDMA_GET_PARAMS:
    431     case RDMA_CQ_OP:
    432     case RDMA_CQ_SETUP:
    433     case RDMA_CQ_DISABLE:
    434     case RDMA_CTRL_QP_SETUP:
    435     case RDMA_GET_MEM:
    436         if (!offload_running(adapter))
    437             return (EAGAIN);
    438         return cxgb_rdma_ctl(adapter, req, data);
    439     default:
    440         return (EOPNOTSUPP);
    441     }
    442     return 0;
    443 }
    444 
    445 /*
    446  * Dummy handler for Rx offload packets in case we get an offload packet before
    447  * proper processing is setup.  This complains and drops the packet as it isn't
    448  * normal to get offload packets at this stage.
    449  */
    450 static int
    451 rx_offload_blackhole(struct toedev *dev, struct mbuf **m, int n)
    452 {
    453     CH_ERR(tdev2adap(dev), "%d unexpected offload packets, first data 0x%x\n",
    454         n, *mtod(m[0], uint32_t *));
    455     while (n--)
    456         m_freem(m[n]);
    457     return 0;
    458 }
    459 
    460 static void
    461 dummy_neigh_update(struct toedev *dev, struct rtentry *neigh)
    462 {
    463 }
    464 
    465 void
    466 cxgb_set_dummy_ops(struct toedev *dev)
    467 {
    468     dev->recv         = rx_offload_blackhole;
    469     dev->neigh_update = dummy_neigh_update;
    470 }
    471 
    472 /*
    473  * Free an active-open TID.
    474  */
    475 void *
    476 cxgb_free_atid(struct toedev *tdev, int atid)
    477 {
    478     struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
    479     union active_open_entry *p = atid2entry(t, atid);
    480     void *ctx = p->toe_tid.ctx;
    481 
    482     mtx_lock(&t->atid_lock);
    483     p->next = t->afree;
    484     t->afree = p;
    485     t->atids_in_use--;
    486     mtx_lock(&t->atid_lock);
    487 
    488     return ctx;
    489 }
    490 
    491 /*
    492  * Free a server TID and return it to the free pool.
    493  */
    494 void
    495 cxgb_free_stid(struct toedev *tdev, int stid)
    496 {
    497     struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
    498     union listen_entry *p = stid2entry(t, stid);
    499 
    500     mtx_lock(&t->stid_lock);
    501     p->next = t->sfree;
    502     t->sfree = p;
    503     t->stids_in_use--;
    504     mtx_unlock(&t->stid_lock);
    505 }
    506 
    507 void
    508 cxgb_insert_tid(struct toedev *tdev, struct cxgb_client *client,
    509     void *ctx, unsigned int tid)
    510 {
    511     struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
    512 
    513     t->tid_tab[tid].client = client;
    514     t->tid_tab[tid].ctx = ctx;
    515     atomic_add_int(&t->tids_in_use, 1);
    516 }
    517 
    518 /*
    519  * Populate a TID_RELEASE WR.  The mbuf must be already propely sized.
    520  */
    521 static inline void
    522 mk_tid_release(struct mbuf *m, unsigned int tid)
    523 {
    524     struct cpl_tid_release *req;
    525 
    526     m_set_priority(m, CPL_PRIORITY_SETUP);
    527     req = mtod(m, struct cpl_tid_release *);
    528     req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
    529     OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
    530 }
    531 
    532 static void
    533 t3_process_tid_release_list(struct work *wk, void *data)
    534 {
    535     struct mbuf *m;
    536     struct toedev *tdev = data;
    537     struct toe_data *td = TOE_DATA(tdev);
    538 
    539     mtx_lock(&td->tid_release_lock);
    540     while (td->tid_release_list) {
    541         struct toe_tid_entry *p = td->tid_release_list;
    542 
    543         td->tid_release_list = (struct toe_tid_entry *)p->ctx;
    544         mtx_unlock(&td->tid_release_lock);
    545         m = m_get(M_WAIT, MT_DATA);
    546         mk_tid_release(m, p - td->tid_maps.tid_tab);
    547         cxgb_ofld_send(tdev, m);
    548         p->ctx = NULL;
    549         mtx_lock(&td->tid_release_lock);
    550     }
    551     mtx_unlock(&td->tid_release_lock);
    552 }
    553 
    554 /* use ctx as a next pointer in the tid release list */
    555 void
    556 cxgb_queue_tid_release(struct toedev *tdev, unsigned int tid)
    557 {
    558     struct toe_data *td = TOE_DATA(tdev);
    559     struct toe_tid_entry *p = &td->tid_maps.tid_tab[tid];
    560 
    561     mtx_lock(&td->tid_release_lock);
    562     p->ctx = td->tid_release_list;
    563     td->tid_release_list = p;
    564 
    565     if (!p->ctx)
    566         workqueue_enqueue(td->tid_release_task.wq, &td->tid_release_task.w, NULL);
    567 
    568     mtx_unlock(&td->tid_release_lock);
    569 }
    570 
    571 /*
    572  * Remove a tid from the TID table.  A client may defer processing its last
    573  * CPL message if it is locked at the time it arrives, and while the message
    574  * sits in the client's backlog the TID may be reused for another connection.
    575  * To handle this we atomically switch the TID association if it still points
    576  * to the original client context.
    577  */
    578 void
    579 cxgb_remove_tid(struct toedev *tdev, void *ctx, unsigned int tid)
    580 {
    581     struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
    582 
    583     BUG_ON(tid >= t->ntids);
    584     if (tdev->type == T3A)
    585         atomic_cmpset_ptr((uintptr_t *)&t->tid_tab[tid].ctx, (long)NULL, (long)ctx);
    586     else {
    587         struct mbuf *m;
    588 
    589         m = m_get(M_NOWAIT, MT_DATA);
    590         if (__predict_true(m != NULL)) {
    591             mk_tid_release(m, tid);
    592             cxgb_ofld_send(tdev, m);
    593             t->tid_tab[tid].ctx = NULL;
    594         } else
    595             cxgb_queue_tid_release(tdev, tid);
    596     }
    597     atomic_add_int(&t->tids_in_use, -1);
    598 }
    599 
    600 int
    601 cxgb_alloc_atid(struct toedev *tdev, struct cxgb_client *client,
    602              void *ctx)
    603 {
    604     int atid = -1;
    605     struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
    606 
    607     mtx_lock(&t->atid_lock);
    608     if (t->afree) {
    609         union active_open_entry *p = t->afree;
    610 
    611         atid = (p - t->atid_tab) + t->atid_base;
    612         t->afree = p->next;
    613         p->toe_tid.ctx = ctx;
    614         p->toe_tid.client = client;
    615         t->atids_in_use++;
    616     }
    617     mtx_unlock(&t->atid_lock);
    618     return atid;
    619 }
    620 
    621 int
    622 cxgb_alloc_stid(struct toedev *tdev, struct cxgb_client *client,
    623              void *ctx)
    624 {
    625     int stid = -1;
    626     struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
    627 
    628     mtx_lock(&t->stid_lock);
    629     if (t->sfree) {
    630         union listen_entry *p = t->sfree;
    631 
    632         stid = (p - t->stid_tab) + t->stid_base;
    633         t->sfree = p->next;
    634         p->toe_tid.ctx = ctx;
    635         p->toe_tid.client = client;
    636         t->stids_in_use++;
    637     }
    638     mtx_unlock(&t->stid_lock);
    639     return stid;
    640 }
    641 
    642 static int
    643 do_smt_write_rpl(struct toedev *dev, struct mbuf *m)
    644 {
    645     struct cpl_smt_write_rpl *rpl = cplhdr(m);
    646 
    647     if (rpl->status != CPL_ERR_NONE)
    648         log(LOG_ERR,
    649                "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
    650                rpl->status, GET_TID(rpl));
    651 
    652     return CPL_RET_BUF_DONE;
    653 }
    654 
    655 static int
    656 do_l2t_write_rpl(struct toedev *dev, struct mbuf *m)
    657 {
    658     struct cpl_l2t_write_rpl *rpl = cplhdr(m);
    659 
    660     if (rpl->status != CPL_ERR_NONE)
    661         log(LOG_ERR,
    662                "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
    663                rpl->status, GET_TID(rpl));
    664 
    665     return CPL_RET_BUF_DONE;
    666 }
    667 
    668 static int
    669 do_act_open_rpl(struct toedev *dev, struct mbuf *m)
    670 {
    671     struct cpl_act_open_rpl *rpl = cplhdr(m);
    672     unsigned int atid = G_TID(ntohl(rpl->atid));
    673     struct toe_tid_entry *toe_tid;
    674 
    675     toe_tid = lookup_atid(&(TOE_DATA(dev))->tid_maps, atid);
    676     if (toe_tid->ctx && toe_tid->client && toe_tid->client->handlers &&
    677         toe_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
    678         return toe_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, m,
    679             toe_tid->ctx);
    680     } else {
    681         log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
    682             dev->name, CPL_ACT_OPEN_RPL);
    683         return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
    684     }
    685 }
    686 
    687 static int
    688 do_stid_rpl(struct toedev *dev, struct mbuf *m)
    689 {
    690     union opcode_tid *p = cplhdr(m);
    691     unsigned int stid = G_TID(ntohl(p->opcode_tid));
    692     struct toe_tid_entry *toe_tid;
    693 
    694     toe_tid = lookup_stid(&(TOE_DATA(dev))->tid_maps, stid);
    695     if (toe_tid->ctx && toe_tid->client->handlers &&
    696         toe_tid->client->handlers[p->opcode]) {
    697         return toe_tid->client->handlers[p->opcode] (dev, m, toe_tid->ctx);
    698     } else {
    699         log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
    700             dev->name, p->opcode);
    701         return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
    702     }
    703 }
    704 
    705 static int
    706 do_hwtid_rpl(struct toedev *dev, struct mbuf *m)
    707 {
    708     union opcode_tid *p = cplhdr(m);
    709     unsigned int hwtid;
    710     struct toe_tid_entry *toe_tid;
    711 
    712     printf("do_hwtid_rpl m=%p\n", m);
    713     return (0);
    714 
    715 
    716     hwtid = G_TID(ntohl(p->opcode_tid));
    717 
    718     toe_tid = lookup_tid(&(TOE_DATA(dev))->tid_maps, hwtid);
    719     if (toe_tid->ctx && toe_tid->client->handlers &&
    720         toe_tid->client->handlers[p->opcode]) {
    721         return toe_tid->client->handlers[p->opcode]
    722                         (dev, m, toe_tid->ctx);
    723     } else {
    724         log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
    725             dev->name, p->opcode);
    726         return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
    727     }
    728 }
    729 
    730 static int
    731 do_cr(struct toedev *dev, struct mbuf *m)
    732 {
    733     struct cpl_pass_accept_req *req = cplhdr(m);
    734     unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
    735     struct toe_tid_entry *toe_tid;
    736 
    737     toe_tid = lookup_stid(&(TOE_DATA(dev))->tid_maps, stid);
    738     if (toe_tid->ctx && toe_tid->client->handlers &&
    739         toe_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
    740         return toe_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
    741                         (dev, m, toe_tid->ctx);
    742     } else {
    743         log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
    744             dev->name, CPL_PASS_ACCEPT_REQ);
    745         return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
    746     }
    747 }
    748 
    749 static int
    750 do_abort_req_rss(struct toedev *dev, struct mbuf *m)
    751 {
    752     union opcode_tid *p = cplhdr(m);
    753     unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
    754     struct toe_tid_entry *toe_tid;
    755 
    756     toe_tid = lookup_tid(&(TOE_DATA(dev))->tid_maps, hwtid);
    757     if (toe_tid->ctx && toe_tid->client->handlers &&
    758         toe_tid->client->handlers[p->opcode]) {
    759         return toe_tid->client->handlers[p->opcode]
    760                         (dev, m, toe_tid->ctx);
    761     } else {
    762         struct cpl_abort_req_rss *req = cplhdr(m);
    763         struct cpl_abort_rpl *rpl;
    764 
    765         struct mbuf *m2 = m_get(M_NOWAIT, MT_DATA);
    766         if (!m2) {
    767             log(LOG_NOTICE, "do_abort_req_rss: couldn't get mbuf!\n");
    768             goto out;
    769         }
    770 
    771         m_set_priority(m2, CPL_PRIORITY_DATA);
    772 #if 0
    773         __skb_put(skb, sizeof(struct cpl_abort_rpl));
    774 #endif
    775         rpl = cplhdr(m2);
    776         rpl->wr.wr_hi =
    777             htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
    778         rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req)));
    779         OPCODE_TID(rpl) =
    780             htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req)));
    781         rpl->cmd = req->status;
    782         cxgb_ofld_send(dev, m2);
    783  out:
    784         return CPL_RET_BUF_DONE;
    785     }
    786 }
    787 
    788 static int
    789 do_act_establish(struct toedev *dev, struct mbuf *m)
    790 {
    791     struct cpl_act_establish *req = cplhdr(m);
    792     unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
    793     struct toe_tid_entry *toe_tid;
    794 
    795     toe_tid = lookup_atid(&(TOE_DATA(dev))->tid_maps, atid);
    796     if (toe_tid->ctx && toe_tid->client->handlers &&
    797         toe_tid->client->handlers[CPL_ACT_ESTABLISH]) {
    798         return toe_tid->client->handlers[CPL_ACT_ESTABLISH]
    799                         (dev, m, toe_tid->ctx);
    800     } else {
    801         log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
    802             dev->name, CPL_PASS_ACCEPT_REQ);
    803         return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
    804     }
    805 }
    806 
    807 static int
    808 do_set_tcb_rpl(struct toedev *dev, struct mbuf *m)
    809 {
    810     struct cpl_set_tcb_rpl *rpl = cplhdr(m);
    811 
    812     if (rpl->status != CPL_ERR_NONE)
    813         log(LOG_ERR,
    814             "Unexpected SET_TCB_RPL status %u for tid %u\n",
    815             rpl->status, GET_TID(rpl));
    816     return CPL_RET_BUF_DONE;
    817 }
    818 
    819 static int
    820 do_trace(struct toedev *dev, struct mbuf *m)
    821 {
    822 #if 0
    823     struct cpl_trace_pkt *p = cplhdr(m);
    824 
    825 
    826     skb->protocol = 0xffff;
    827     skb->dev = dev->lldev;
    828     skb_pull(skb, sizeof(*p));
    829     skb->mac.raw = mtod(m, (char *));
    830     netif_receive_skb(skb);
    831 #endif
    832     return 0;
    833 }
    834 
    835 static int
    836 do_term(struct toedev *dev, struct mbuf *m)
    837 {
    838     unsigned int hwtid = ntohl(m_get_priority(m)) >> 8 & 0xfffff;
    839     unsigned int opcode = G_OPCODE(ntohl(m->m_pkthdr.csum_data));
    840     struct toe_tid_entry *toe_tid;
    841 
    842     toe_tid = lookup_tid(&(TOE_DATA(dev))->tid_maps, hwtid);
    843     if (toe_tid->ctx && toe_tid->client->handlers &&
    844         toe_tid->client->handlers[opcode]) {
    845         return toe_tid->client->handlers[opcode](dev, m, toe_tid->ctx);
    846     } else {
    847         log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
    848             dev->name, opcode);
    849         return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
    850     }
    851     return (0);
    852 }
    853 
    854 #if defined(FOO)
    855 #include <linux/config.h>
    856 #include <linux/kallsyms.h>
    857 #include <linux/kprobes.h>
    858 #include <net/arp.h>
    859 
    860 static int (*orig_arp_constructor)(struct ifnet *);
    861 
    862 static void
    863 neigh_suspect(struct ifnet *neigh)
    864 {
    865     struct hh_cache *hh;
    866 
    867     neigh->output = neigh->ops->output;
    868 
    869     for (hh = neigh->hh; hh; hh = hh->hh_next)
    870         hh->hh_output = neigh->ops->output;
    871 }
    872 
    873 static void
    874 neigh_connect(struct ifnet *neigh)
    875 {
    876     struct hh_cache *hh;
    877 
    878     neigh->output = neigh->ops->connected_output;
    879 
    880     for (hh = neigh->hh; hh; hh = hh->hh_next)
    881         hh->hh_output = neigh->ops->hh_output;
    882 }
    883 
    884 static inline int
    885 neigh_max_probes(const struct neighbour *n)
    886 {
    887     const struct neigh_parms *p = n->parms;
    888     return (n->nud_state & NUD_PROBE ?
    889         p->ucast_probes :
    890         p->ucast_probes + p->app_probes + p->mcast_probes);
    891 }
    892 
    893 static void
    894 neigh_timer_handler_offload(unsigned long arg)
    895 {
    896     unsigned long now, next;
    897     struct neighbour *neigh = (struct neighbour *)arg;
    898     unsigned state;
    899     int notify = 0;
    900 
    901     write_lock(&neigh->lock);
    902 
    903     state = neigh->nud_state;
    904     now = jiffies;
    905     next = now + HZ;
    906 
    907     if (!(state & NUD_IN_TIMER)) {
    908 #ifndef CONFIG_SMP
    909         log(LOG_WARNING, "neigh: timer & !nud_in_timer\n");
    910 #endif
    911         goto out;
    912     }
    913 
    914     if (state & NUD_REACHABLE) {
    915         if (time_before_eq(now,
    916                    neigh->confirmed +
    917                    neigh->parms->reachable_time)) {
    918             next = neigh->confirmed + neigh->parms->reachable_time;
    919         } else if (time_before_eq(now,
    920                       neigh->used +
    921                       neigh->parms->delay_probe_time)) {
    922             neigh->nud_state = NUD_DELAY;
    923             neigh->updated = jiffies;
    924             neigh_suspect(neigh);
    925             next = now + neigh->parms->delay_probe_time;
    926         } else {
    927             neigh->nud_state = NUD_STALE;
    928             neigh->updated = jiffies;
    929             neigh_suspect(neigh);
    930             cxgb_neigh_update(neigh);
    931         }
    932     } else if (state & NUD_DELAY) {
    933         if (time_before_eq(now,
    934                    neigh->confirmed +
    935                    neigh->parms->delay_probe_time)) {
    936             neigh->nud_state = NUD_REACHABLE;
    937             neigh->updated = jiffies;
    938             neigh_connect(neigh);
    939             cxgb_neigh_update(neigh);
    940             next = neigh->confirmed + neigh->parms->reachable_time;
    941         } else {
    942             neigh->nud_state = NUD_PROBE;
    943             neigh->updated = jiffies;
    944             atomic_set_int(&neigh->probes, 0);
    945             next = now + neigh->parms->retrans_time;
    946         }
    947     } else {
    948         /* NUD_PROBE|NUD_INCOMPLETE */
    949         next = now + neigh->parms->retrans_time;
    950     }
    951     /*
    952      * Needed for read of probes
    953      */
    954     mb();
    955     if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
    956         neigh->probes >= neigh_max_probes(neigh)) {
    957         struct mbuf *m;
    958 
    959         neigh->nud_state = NUD_FAILED;
    960         neigh->updated = jiffies;
    961         notify = 1;
    962         cxgb_neigh_update(neigh);
    963         NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
    964 
    965         /* It is very thin place. report_unreachable is very
    966            complicated routine. Particularly, it can hit the same
    967            neighbour entry!
    968            So that, we try to be accurate and avoid dead loop. --ANK
    969          */
    970         while (neigh->nud_state == NUD_FAILED &&
    971                (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
    972             write_unlock(&neigh->lock);
    973             neigh->ops->error_report(neigh, skb);
    974             write_lock(&neigh->lock);
    975         }
    976         skb_queue_purge(&neigh->arp_queue);
    977     }
    978 
    979     if (neigh->nud_state & NUD_IN_TIMER) {
    980         if (time_before(next, jiffies + HZ/2))
    981             next = jiffies + HZ/2;
    982         if (!mod_timer(&neigh->timer, next))
    983             neigh_hold(neigh);
    984     }
    985     if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
    986         struct mbuf *m = skb_peek(&neigh->arp_queue);
    987 
    988         write_unlock(&neigh->lock);
    989         neigh->ops->solicit(neigh, skb);
    990         atomic_add_int(&neigh->probes, 1);
    991         if (m)
    992             m_free(m);
    993     } else {
    994 out:
    995         write_unlock(&neigh->lock);
    996     }
    997 
    998 #ifdef CONFIG_ARPD
    999     if (notify && neigh->parms->app_probes)
   1000         neigh_app_notify(neigh);
   1001 #endif
   1002     neigh_release(neigh);
   1003 }
   1004 
   1005 static int
   1006 arp_constructor_offload(struct neighbour *neigh)
   1007 {
   1008     if (neigh->ifp && is_offloading(neigh->ifp))
   1009         neigh->timer.function = neigh_timer_handler_offload;
   1010     return orig_arp_constructor(neigh);
   1011 }
   1012 
   1013 /*
   1014  * This must match exactly the signature of neigh_update for jprobes to work.
   1015  * It runs from a trap handler with interrupts off so don't disable BH.
   1016  */
   1017 static int
   1018 neigh_update_offload(struct neighbour *neigh, const u8 *lladdr,
   1019                 u8 new, u32 flags)
   1020 {
   1021     write_lock(&neigh->lock);
   1022     cxgb_neigh_update(neigh);
   1023     write_unlock(&neigh->lock);
   1024     jprobe_return();
   1025     /* NOTREACHED */
   1026     return 0;
   1027 }
   1028 
   1029 static struct jprobe neigh_update_jprobe = {
   1030     .entry = (kprobe_opcode_t *) neigh_update_offload,
   1031     .kp.addr = (kprobe_opcode_t *) neigh_update
   1032 };
   1033 
   1034 #ifdef MODULE_SUPPORT
   1035 static int
   1036 prepare_arp_with_t3core(void)
   1037 {
   1038     int err;
   1039 
   1040     err = register_jprobe(&neigh_update_jprobe);
   1041     if (err) {
   1042         log(LOG_ERR, "Could not install neigh_update jprobe, "
   1043                 "error %d\n", err);
   1044         return err;
   1045     }
   1046 
   1047     orig_arp_constructor = arp_tbl.constructor;
   1048     arp_tbl.constructor  = arp_constructor_offload;
   1049 
   1050     return 0;
   1051 }
   1052 
   1053 static void
   1054 restore_arp_sans_t3core(void)
   1055 {
   1056     arp_tbl.constructor = orig_arp_constructor;
   1057     unregister_jprobe(&neigh_update_jprobe);
   1058 }
   1059 
   1060 #else /* Module suport */
   1061 static inline int
   1062 prepare_arp_with_t3core(void)
   1063 {
   1064     return 0;
   1065 }
   1066 
   1067 static inline void
   1068 restore_arp_sans_t3core(void)
   1069 {}
   1070 #endif
   1071 #endif
   1072 /*
   1073  * Process a received packet with an unknown/unexpected CPL opcode.
   1074  */
   1075 static int
   1076 do_bad_cpl(struct toedev *dev, struct mbuf *m)
   1077 {
   1078     log(LOG_ERR, "%s: received bad CPL command 0x%x\n", dev->name,
   1079         *mtod(m, uint32_t *));
   1080     return (CPL_RET_BUF_DONE | CPL_RET_BAD_MSG);
   1081 }
   1082 
   1083 /*
   1084  * Handlers for each CPL opcode
   1085  */
   1086 static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
   1087 
   1088 /*
   1089  * Add a new handler to the CPL dispatch table.  A NULL handler may be supplied
   1090  * to unregister an existing handler.
   1091  */
   1092 void
   1093 t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
   1094 {
   1095     if (opcode < NUM_CPL_CMDS)
   1096         cpl_handlers[opcode] = h ? h : do_bad_cpl;
   1097     else
   1098         log(LOG_ERR, "T3C: handler registration for "
   1099                "opcode %x failed\n", opcode);
   1100 }
   1101 
   1102 /*
   1103  * TOEDEV's receive method.
   1104  */
   1105 int
   1106 process_rx(struct toedev *dev, struct mbuf **m, int n)
   1107 {
   1108     while (n--) {
   1109         struct mbuf *m0 = *m++;
   1110         unsigned int opcode = G_OPCODE(ntohl(m0->m_pkthdr.csum_data));
   1111         int ret = cpl_handlers[opcode] (dev, m0);
   1112 
   1113 #if VALIDATE_TID
   1114         if (ret & CPL_RET_UNKNOWN_TID) {
   1115             union opcode_tid *p = cplhdr(m0);
   1116 
   1117             log(LOG_ERR, "%s: CPL message (opcode %u) had "
   1118                    "unknown TID %u\n", dev->name, opcode,
   1119                    G_TID(ntohl(p->opcode_tid)));
   1120         }
   1121 #endif
   1122         if (ret & CPL_RET_BUF_DONE)
   1123             m_freem(m0);
   1124     }
   1125     return 0;
   1126 }
   1127 
   1128 /*
   1129  * Sends an sk_buff to a T3C driver after dealing with any active network taps.
   1130  */
   1131 int
   1132 cxgb_ofld_send(struct toedev *dev, struct mbuf *m)
   1133 {
   1134     int r;
   1135 
   1136     critical_enter();
   1137     r = dev->send(dev, m);
   1138     critical_exit();
   1139     return r;
   1140 }
   1141 
   1142 
   1143 /**
   1144  * cxgb_ofld_recv - process n received offload packets
   1145  * @dev: the offload device
   1146  * @m: an array of offload packets
   1147  * @n: the number of offload packets
   1148  *
   1149  * Process an array of ingress offload packets.  Each packet is forwarded
   1150  * to any active network taps and then passed to the offload device's receive
   1151  * method.  We optimize passing packets to the receive method by passing
   1152  * it the whole array at once except when there are active taps.
   1153  */
   1154 int
   1155 cxgb_ofld_recv(struct toedev *dev, struct mbuf **m, int n)
   1156 {
   1157 
   1158 #if defined(CONFIG_CHELSIO_T3)
   1159     if (likely(!netdev_nit))
   1160         return dev->recv(dev, skb, n);
   1161 
   1162     for ( ; n; n--, skb++) {
   1163         skb[0]->dev = dev->lldev;
   1164         dev_queue_xmit_nit(skb[0], dev->lldev);
   1165         skb[0]->dev = NULL;
   1166         dev->recv(dev, skb, 1);
   1167     }
   1168     return 0;
   1169 #else
   1170     return dev->recv(dev, m, n);
   1171 #endif
   1172 }
   1173 
   1174 void
   1175 cxgb_neigh_update(struct rtentry *rt)
   1176 {
   1177 
   1178     if (is_offloading(rt->rt_ifp)) {
   1179         struct toedev *tdev = TOEDEV(rt->rt_ifp);
   1180 
   1181         BUG_ON(!tdev);
   1182         t3_l2t_update(tdev, rt);
   1183     }
   1184 }
   1185 
   1186 static void
   1187 set_l2t_ix(struct toedev *tdev, u32 tid, struct l2t_entry *e)
   1188 {
   1189     struct mbuf *m;
   1190     struct cpl_set_tcb_field *req;
   1191 
   1192     m = m_gethdr(M_NOWAIT, MT_DATA);
   1193     if (!m) {
   1194         log(LOG_ERR, "%s: cannot allocate mbuf!\n", __func__);
   1195         return;
   1196     }
   1197 
   1198     m_set_priority(m, CPL_PRIORITY_CONTROL);
   1199     req = mtod(m, struct cpl_set_tcb_field *);
   1200     req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
   1201     OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
   1202     req->reply = 0;
   1203     req->cpu_idx = 0;
   1204     req->word = htons(W_TCB_L2T_IX);
   1205     req->mask = htobe64(V_TCB_L2T_IX(M_TCB_L2T_IX));
   1206     req->val = htobe64(V_TCB_L2T_IX(e->idx));
   1207     tdev->send(tdev, m);
   1208 }
   1209 
   1210 void
   1211 cxgb_redirect(struct rtentry *old, struct rtentry *new)
   1212 {
   1213     struct ifnet *olddev, *newdev;
   1214     struct tid_info *ti;
   1215     struct toedev *tdev;
   1216     u32 tid;
   1217     int update_tcb;
   1218     struct l2t_entry *e;
   1219     struct toe_tid_entry *te;
   1220 
   1221     olddev = old->rt_ifp;
   1222     newdev = new->rt_ifp;
   1223     if (!is_offloading(olddev))
   1224         return;
   1225     if (!is_offloading(newdev)) {
   1226         log(LOG_WARNING, "%s: Redirect to non-offload"
   1227             "device ignored.\n", __func__);
   1228         return;
   1229     }
   1230     tdev = TOEDEV(olddev);
   1231     BUG_ON(!tdev);
   1232     if (tdev != TOEDEV(newdev)) {
   1233         log(LOG_WARNING, "%s: Redirect to different "
   1234             "offload device ignored.\n", __func__);
   1235         return;
   1236     }
   1237 
   1238     /* Add new L2T entry */
   1239     e = t3_l2t_get(tdev, new, ((struct port_info *)new->rt_ifp->if_softc)->port_id);
   1240     if (!e) {
   1241         log(LOG_ERR, "%s: couldn't allocate new l2t entry!\n",
   1242                __func__);
   1243         return;
   1244     }
   1245 
   1246     /* Walk tid table and notify clients of dst change. */
   1247     ti = &(TOE_DATA(tdev))->tid_maps;
   1248     for (tid=0; tid < ti->ntids; tid++) {
   1249         te = lookup_tid(ti, tid);
   1250         BUG_ON(!te);
   1251         if (te->ctx && te->client && te->client->redirect) {
   1252             update_tcb = te->client->redirect(te->ctx, old, new,
   1253                               e);
   1254             if (update_tcb)  {
   1255                 l2t_hold(L2DATA(tdev), e);
   1256                 set_l2t_ix(tdev, tid, e);
   1257             }
   1258         }
   1259     }
   1260     l2t_release(L2DATA(tdev), e);
   1261 }
   1262 
   1263 /*
   1264  * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
   1265  * The allocated memory is cleared.
   1266  */
   1267 void *
   1268 cxgb_alloc_mem(unsigned long size)
   1269 {
   1270     return malloc(size, M_DEVBUF, M_ZERO);
   1271 }
   1272 
   1273 /*
   1274  * Free memory allocated through t3_alloc_mem().
   1275  */
   1276 void
   1277 cxgb_free_mem(void *addr)
   1278 {
   1279     free(addr, M_DEVBUF);
   1280 }
   1281 
   1282 
   1283 /*
   1284  * Allocate and initialize the TID tables.  Returns 0 on success.
   1285  */
   1286 static int
   1287 init_tid_tabs(struct tid_info *t, unsigned int ntids,
   1288              unsigned int natids, unsigned int nstids,
   1289              unsigned int atid_base, unsigned int stid_base)
   1290 {
   1291     unsigned long size = ntids * sizeof(*t->tid_tab) +
   1292         natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
   1293 
   1294     t->tid_tab = cxgb_alloc_mem(size);
   1295     if (!t->tid_tab)
   1296         return (ENOMEM);
   1297 
   1298     t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
   1299     t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
   1300     t->ntids = ntids;
   1301     t->nstids = nstids;
   1302     t->stid_base = stid_base;
   1303     t->sfree = NULL;
   1304     t->natids = natids;
   1305     t->atid_base = atid_base;
   1306     t->afree = NULL;
   1307     t->stids_in_use = t->atids_in_use = 0;
   1308     atomic_set_int(&t->tids_in_use, 0);
   1309     mtx_init(&t->stid_lock, "stid", NULL, MTX_DEF);
   1310     mtx_init(&t->atid_lock, "atid", NULL, MTX_DEF);
   1311 
   1312     /*
   1313      * Setup the free lists for stid_tab and atid_tab.
   1314      */
   1315     if (nstids) {
   1316         while (--nstids)
   1317             t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
   1318         t->sfree = t->stid_tab;
   1319     }
   1320     if (natids) {
   1321         while (--natids)
   1322             t->atid_tab[natids - 1].next = &t->atid_tab[natids];
   1323         t->afree = t->atid_tab;
   1324     }
   1325     return 0;
   1326 }
   1327 
   1328 static void
   1329 free_tid_maps(struct tid_info *t)
   1330 {
   1331     cxgb_free_mem(t->tid_tab);
   1332 }
   1333 
   1334 static inline void
   1335 add_adapter(adapter_t *adap)
   1336 {
   1337     rw_wlock(&adapter_list_lock);
   1338     TAILQ_INSERT_TAIL(&adapter_list, adap, adapter_entry);
   1339     rw_wunlock(&adapter_list_lock);
   1340 }
   1341 
   1342 static inline void
   1343 remove_adapter(adapter_t *adap)
   1344 {
   1345     rw_wlock(&adapter_list_lock);
   1346     TAILQ_REMOVE(&adapter_list, adap, adapter_entry);
   1347     rw_wunlock(&adapter_list_lock);
   1348 }
   1349 
   1350 /*
   1351  * XXX
   1352  */
   1353 #define t3_free_l2t(...)
   1354 
   1355 int
   1356 cxgb_offload_activate(struct adapter *adapter)
   1357 {
   1358     struct toedev *dev = &adapter->tdev;
   1359     int natids, err;
   1360     struct toe_data *t;
   1361     struct tid_range stid_range, tid_range;
   1362     struct mtutab mtutab;
   1363     unsigned int l2t_capacity;
   1364 
   1365     t = malloc(sizeof(*t), M_DEVBUF, M_WAITOK);
   1366     if (!t)
   1367         return (ENOMEM);
   1368 
   1369     err = (EOPNOTSUPP);
   1370     if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
   1371         dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
   1372         dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
   1373         dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
   1374         dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
   1375         dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
   1376         goto out_free;
   1377 
   1378     err = (ENOMEM);
   1379     L2DATA(dev) = t3_init_l2t(l2t_capacity);
   1380     if (!L2DATA(dev))
   1381         goto out_free;
   1382 
   1383     natids = min(tid_range.num / 2, MAX_ATIDS);
   1384     err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
   1385                 stid_range.num, ATID_BASE, stid_range.base);
   1386     if (err)
   1387         goto out_free_l2t;
   1388 
   1389     t->mtus = mtutab.mtus;
   1390     t->nmtus = mtutab.size;
   1391 
   1392     t->tid_release_task.name = "t3_process_tid_release_list";
   1393     t->tid_release_task.func = t3_process_tid_release_list;
   1394     t->tid_release_task.context = adapter;
   1395     kthread_create(PRI_NONE, 0, NULL, cxgb_make_task, &t->tid_release_task, NULL, "cxgb_make_task");
   1396     mtx_init(&t->tid_release_lock, "tid release", NULL, MTX_DEF);
   1397     t->dev = dev;
   1398 
   1399     TOE_DATA(dev) = t;
   1400     dev->recv = process_rx;
   1401     dev->neigh_update = t3_l2t_update;
   1402 #if 0
   1403     offload_proc_dev_setup(dev);
   1404 #endif
   1405     /* Register netevent handler once */
   1406     if (TAILQ_EMPTY(&adapter_list)) {
   1407 #if defined(CONFIG_CHELSIO_T3_MODULE)
   1408         if (prepare_arp_with_t3core())
   1409             log(LOG_ERR, "Unable to set offload capabilities\n");
   1410 #endif
   1411     }
   1412     add_adapter(adapter);
   1413     return 0;
   1414 
   1415 out_free_l2t:
   1416     t3_free_l2t(L2DATA(dev));
   1417     L2DATA(dev) = NULL;
   1418 out_free:
   1419     free(t, M_DEVBUF);
   1420     return err;
   1421 
   1422 }
   1423 
   1424 void
   1425 cxgb_offload_deactivate(struct adapter *adapter)
   1426 {
   1427     struct toedev *tdev = &adapter->tdev;
   1428     struct toe_data *t = TOE_DATA(tdev);
   1429 
   1430     remove_adapter(adapter);
   1431     if (TAILQ_EMPTY(&adapter_list)) {
   1432 #if defined(CONFIG_CHELSIO_T3_MODULE)
   1433         restore_arp_sans_t3core();
   1434 #endif
   1435     }
   1436     free_tid_maps(&t->tid_maps);
   1437     TOE_DATA(tdev) = NULL;
   1438     t3_free_l2t(L2DATA(tdev));
   1439     L2DATA(tdev) = NULL;
   1440     free(t, M_DEVBUF);
   1441 }
   1442 
   1443 
   1444 static inline void
   1445 register_tdev(struct toedev *tdev)
   1446 {
   1447     static int unit;
   1448 
   1449     mtx_lock(&cxgb_db_lock);
   1450     snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
   1451     TAILQ_INSERT_TAIL(&ofld_dev_list, tdev, ofld_entry);
   1452     mtx_unlock(&cxgb_db_lock);
   1453 }
   1454 
   1455 static inline void
   1456 unregister_tdev(struct toedev *tdev)
   1457 {
   1458     mtx_lock(&cxgb_db_lock);
   1459     TAILQ_REMOVE(&ofld_dev_list, tdev, ofld_entry);
   1460     mtx_unlock(&cxgb_db_lock);
   1461 }
   1462 
   1463 void
   1464 cxgb_adapter_ofld(struct adapter *adapter)
   1465 {
   1466     struct toedev *tdev = &adapter->tdev;
   1467 
   1468     cxgb_set_dummy_ops(tdev);
   1469     tdev->send = t3_offload_tx;
   1470     tdev->ctl = cxgb_offload_ctl;
   1471     tdev->type = adapter->params.rev == 0 ?
   1472              T3A : T3B;
   1473 
   1474     register_tdev(tdev);
   1475 #if 0
   1476     offload_proc_dev_init(tdev);
   1477 #endif
   1478 }
   1479 
   1480 void
   1481 cxgb_adapter_unofld(struct adapter *adapter)
   1482 {
   1483     struct toedev *tdev = &adapter->tdev;
   1484 #if 0
   1485     offload_proc_dev_cleanup(tdev);
   1486     offload_proc_dev_exit(tdev);
   1487 #endif
   1488     tdev->recv = NULL;
   1489     tdev->neigh_update = NULL;
   1490 
   1491     unregister_tdev(tdev);
   1492 }
   1493 
   1494 void
   1495 cxgb_offload_init(void)
   1496 {
   1497     int i;
   1498 
   1499     if (inited)
   1500         return;
   1501     else
   1502         inited = 1;
   1503 
   1504     mtx_init(&cxgb_db_lock, "ofld db", NULL, MTX_DEF);
   1505     rw_init(&adapter_list_lock);
   1506     TAILQ_INIT(&client_list);
   1507     TAILQ_INIT(&ofld_dev_list);
   1508     TAILQ_INIT(&adapter_list);
   1509 
   1510     for (i = 0; i < NUM_CPL_CMDS; ++i)
   1511         cpl_handlers[i] = do_bad_cpl;
   1512 
   1513     t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
   1514     t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
   1515     t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
   1516     t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
   1517     t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
   1518     t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl);
   1519     t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl);
   1520     t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
   1521     t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl);
   1522     t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
   1523     t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl);
   1524     t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
   1525     t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
   1526     t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
   1527     t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
   1528     t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
   1529     t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
   1530     t3_register_cpl_handler(CPL_SET_TCB_RPL, do_set_tcb_rpl);
   1531     t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
   1532     t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
   1533     t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
   1534     t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
   1535     t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
   1536     t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
   1537 #if 0
   1538        if (offload_proc_init())
   1539            log(LOG_WARNING, "Unable to create /proc/net/cxgb3 dir\n");
   1540 #endif
   1541 }
   1542 
   1543 void
   1544 cxgb_offload_exit(void)
   1545 {
   1546     static int deinited = 0;
   1547 
   1548     if (deinited)
   1549         return;
   1550 
   1551     deinited = 1;
   1552     mtx_destroy(&cxgb_db_lock);
   1553     rw_destroy(&adapter_list_lock);
   1554 #if 0
   1555     offload_proc_cleanup();
   1556 #endif
   1557 }
   1558 
   1559 #if 0
   1560 static int
   1561 offload_info_read_proc(char *buf, char **start, off_t offset,
   1562                   int length, int *eof, void *data)
   1563 {
   1564     struct toe_data *d = data;
   1565     struct tid_info *t = &d->tid_maps;
   1566     int len;
   1567 
   1568     len = snprintf(buf, length, "TID range: 0..%d, in use: %u\n"
   1569               "STID range: %d..%d, in use: %u\n"
   1570               "ATID range: %d..%d, in use: %u\n"
   1571               "MSS: %u\n",
   1572               t->ntids - 1, atomic_read(&t->tids_in_use), t->stid_base,
   1573               t->stid_base + t->nstids - 1, t->stids_in_use,
   1574               t->atid_base, t->atid_base + t->natids - 1,
   1575               t->atids_in_use, d->tx_max_chunk);
   1576     if (len > length)
   1577         len = length;
   1578     *eof = 1;
   1579     return len;
   1580 }
   1581 
   1582 static int
   1583 offload_info_proc_setup(struct proc_dir_entry *dir,
   1584                    struct toe_data *d)
   1585 {
   1586     struct proc_dir_entry *p;
   1587 
   1588     if (!dir)
   1589         return (EINVAL);
   1590 
   1591     p = create_proc_read_entry("info", 0, dir, offload_info_read_proc, d);
   1592     if (!p)
   1593         return (ENOMEM);
   1594 
   1595     p->owner = THIS_MODULE;
   1596     return 0;
   1597 }
   1598 
   1599 
   1600 static int
   1601 offload_devices_read_proc(char *buf, char **start, off_t offset,
   1602                      int length, int *eof, void *data)
   1603 {
   1604     int len;
   1605     struct toedev *dev;
   1606     struct net_device *ndev;
   1607 
   1608     len = snprintf(buf, length, "Device           Interfaces\n");
   1609 
   1610     mtx_lock(&cxgb_db_lock);
   1611     TAILQ_FOREACH(dev, &ofld_dev_list, ofld_entry) {
   1612 	if (len >= length)
   1613 	    break;
   1614         len += snprintf(buf + len, length - len, "%-16s", dev->name);
   1615         read_lock(&dev_base_lock);
   1616         for (ndev = dev_base; ndev; ndev = ndev->next) {
   1617             if (TOEDEV(ndev) == dev) {
   1618 		if (len >= length)
   1619 		    break;
   1620                 len += snprintf(buf + len, length - len, " %s", ndev->name);
   1621 	    }
   1622         }
   1623         read_unlock(&dev_base_lock);
   1624         if (len >= length)
   1625             break;
   1626         len += snprintf(buf + len, length - len, "\n");
   1627     }
   1628     mtx_unlock(&cxgb_db_lock);
   1629 
   1630     if (len > length)
   1631         len = length;
   1632     *eof = 1;
   1633     return len;
   1634 }
   1635 
   1636 #endif
   1637 
   1638