cxgb_offload.c revision 1.2 1 1.1 jklos
2 1.1 jklos /**************************************************************************
3 1.1 jklos
4 1.1 jklos Copyright (c) 2007, Chelsio Inc.
5 1.1 jklos All rights reserved.
6 1.1 jklos
7 1.1 jklos Redistribution and use in source and binary forms, with or without
8 1.1 jklos modification, are permitted provided that the following conditions are met:
9 1.1 jklos
10 1.1 jklos 1. Redistributions of source code must retain the above copyright notice,
11 1.1 jklos this list of conditions and the following disclaimer.
12 1.1 jklos
13 1.1 jklos 2. Neither the name of the Chelsio Corporation nor the names of its
14 1.1 jklos contributors may be used to endorse or promote products derived from
15 1.1 jklos this software without specific prior written permission.
16 1.1 jklos
17 1.1 jklos THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 1.1 jklos AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 1.1 jklos IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 1.1 jklos ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 1.1 jklos LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 1.1 jklos CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 1.1 jklos SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 1.1 jklos INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 1.1 jklos CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 1.1 jklos ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 1.1 jklos POSSIBILITY OF SUCH DAMAGE.
28 1.1 jklos
29 1.1 jklos
30 1.1 jklos ***************************************************************************/
31 1.1 jklos
32 1.1 jklos
33 1.1 jklos #include <sys/cdefs.h>
34 1.2 dyoung __KERNEL_RCSID(0, "$NetBSD: cxgb_offload.c,v 1.2 2011/05/18 01:01:59 dyoung Exp $");
35 1.1 jklos
36 1.1 jklos #include <sys/param.h>
37 1.1 jklos #include <sys/systm.h>
38 1.1 jklos #include <sys/kernel.h>
39 1.1 jklos #include <sys/conf.h>
40 1.2 dyoung #include <sys/bus.h>
41 1.1 jklos #include <sys/ioccom.h>
42 1.1 jklos #include <sys/mbuf.h>
43 1.1 jklos #include <sys/socket.h>
44 1.1 jklos #include <sys/sockio.h>
45 1.1 jklos #include <sys/sysctl.h>
46 1.1 jklos #include <sys/queue.h>
47 1.1 jklos
48 1.1 jklos #ifdef CONFIG_DEFINED
49 1.1 jklos #include <cxgb_include.h>
50 1.1 jklos #else
51 1.1 jklos #include "cxgb_include.h"
52 1.1 jklos #endif
53 1.1 jklos
54 1.1 jklos #include <net/route.h>
55 1.1 jklos
56 1.1 jklos /*
57 1.1 jklos * XXX
58 1.1 jklos */
59 1.1 jklos #define LOG_NOTICE 2
60 1.1 jklos #define BUG_ON(...)
61 1.1 jklos #define VALIDATE_TID 0
62 1.1 jklos
63 1.1 jklos
64 1.1 jklos TAILQ_HEAD(, cxgb_client) client_list;
65 1.1 jklos TAILQ_HEAD(, toedev) ofld_dev_list;
66 1.1 jklos TAILQ_HEAD(, adapter) adapter_list;
67 1.1 jklos
68 1.1 jklos static struct mtx cxgb_db_lock;
69 1.1 jklos static struct rwlock adapter_list_lock;
70 1.1 jklos
71 1.1 jklos
72 1.1 jklos static const unsigned int MAX_ATIDS = 64 * 1024;
73 1.1 jklos static const unsigned int ATID_BASE = 0x100000;
74 1.1 jklos static int inited = 0;
75 1.1 jklos
76 1.1 jklos static inline int
77 1.1 jklos offload_activated(struct toedev *tdev)
78 1.1 jklos {
79 1.1 jklos struct adapter *adapter = tdev2adap(tdev);
80 1.1 jklos
81 1.1 jklos return (isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT));
82 1.1 jklos }
83 1.1 jklos
84 1.1 jklos /**
85 1.1 jklos * cxgb_register_client - register an offload client
86 1.1 jklos * @client: the client
87 1.1 jklos *
88 1.1 jklos * Add the client to the client list,
89 1.1 jklos * and call backs the client for each activated offload device
90 1.1 jklos */
91 1.1 jklos void
92 1.1 jklos cxgb_register_client(struct cxgb_client *client)
93 1.1 jklos {
94 1.1 jklos struct toedev *tdev;
95 1.1 jklos
96 1.1 jklos mtx_lock(&cxgb_db_lock);
97 1.1 jklos TAILQ_INSERT_TAIL(&client_list, client, client_entry);
98 1.1 jklos
99 1.1 jklos if (client->add) {
100 1.1 jklos TAILQ_FOREACH(tdev, &ofld_dev_list, ofld_entry) {
101 1.1 jklos if (offload_activated(tdev))
102 1.1 jklos client->add(tdev);
103 1.1 jklos }
104 1.1 jklos }
105 1.1 jklos mtx_unlock(&cxgb_db_lock);
106 1.1 jklos }
107 1.1 jklos
108 1.1 jklos /**
109 1.1 jklos * cxgb_unregister_client - unregister an offload client
110 1.1 jklos * @client: the client
111 1.1 jklos *
112 1.1 jklos * Remove the client to the client list,
113 1.1 jklos * and call backs the client for each activated offload device.
114 1.1 jklos */
115 1.1 jklos void
116 1.1 jklos cxgb_unregister_client(struct cxgb_client *client)
117 1.1 jklos {
118 1.1 jklos struct toedev *tdev;
119 1.1 jklos
120 1.1 jklos mtx_lock(&cxgb_db_lock);
121 1.1 jklos TAILQ_REMOVE(&client_list, client, client_entry);
122 1.1 jklos
123 1.1 jklos if (client->remove) {
124 1.1 jklos TAILQ_FOREACH(tdev, &ofld_dev_list, ofld_entry) {
125 1.1 jklos if (offload_activated(tdev))
126 1.1 jklos client->remove(tdev);
127 1.1 jklos }
128 1.1 jklos }
129 1.1 jklos mtx_unlock(&cxgb_db_lock);
130 1.1 jklos }
131 1.1 jklos
132 1.1 jklos /**
133 1.1 jklos * cxgb_add_clients - activate register clients for an offload device
134 1.1 jklos * @tdev: the offload device
135 1.1 jklos *
136 1.1 jklos * Call backs all registered clients once a offload device is activated
137 1.1 jklos */
138 1.1 jklos void
139 1.1 jklos cxgb_add_clients(struct toedev *tdev)
140 1.1 jklos {
141 1.1 jklos struct cxgb_client *client;
142 1.1 jklos
143 1.1 jklos mtx_lock(&cxgb_db_lock);
144 1.1 jklos TAILQ_FOREACH(client, &client_list, client_entry) {
145 1.1 jklos if (client->add)
146 1.1 jklos client->add(tdev);
147 1.1 jklos }
148 1.1 jklos mtx_unlock(&cxgb_db_lock);
149 1.1 jklos }
150 1.1 jklos
151 1.1 jklos /**
152 1.1 jklos * cxgb_remove_clients - activate register clients for an offload device
153 1.1 jklos * @tdev: the offload device
154 1.1 jklos *
155 1.1 jklos * Call backs all registered clients once a offload device is deactivated
156 1.1 jklos */
157 1.1 jklos void
158 1.1 jklos cxgb_remove_clients(struct toedev *tdev)
159 1.1 jklos {
160 1.1 jklos struct cxgb_client *client;
161 1.1 jklos
162 1.1 jklos mtx_lock(&cxgb_db_lock);
163 1.1 jklos TAILQ_FOREACH(client, &client_list, client_entry) {
164 1.1 jklos if (client->remove)
165 1.1 jklos client->remove(tdev);
166 1.1 jklos }
167 1.1 jklos mtx_unlock(&cxgb_db_lock);
168 1.1 jklos }
169 1.1 jklos
170 1.1 jklos static int
171 1.1 jklos is_offloading(struct ifnet *ifp)
172 1.1 jklos {
173 1.1 jklos struct adapter *adapter;
174 1.1 jklos int port;
175 1.1 jklos
176 1.1 jklos rw_rlock(&adapter_list_lock);
177 1.1 jklos TAILQ_FOREACH(adapter, &adapter_list, adapter_entry) {
178 1.1 jklos for_each_port(adapter, port) {
179 1.1 jklos if (ifp == adapter->port[port].ifp) {
180 1.1 jklos rw_runlock(&adapter_list_lock);
181 1.1 jklos return 1;
182 1.1 jklos }
183 1.1 jklos }
184 1.1 jklos }
185 1.1 jklos rw_runlock(&adapter_list_lock);
186 1.1 jklos return 0;
187 1.1 jklos }
188 1.1 jklos
189 1.1 jklos static struct ifnet *
190 1.1 jklos get_iff_from_mac(adapter_t *adapter, const uint8_t *mac, unsigned int vlan)
191 1.1 jklos {
192 1.1 jklos #ifdef notyet
193 1.1 jklos int i;
194 1.1 jklos
195 1.1 jklos for_each_port(adapter, i) {
196 1.1 jklos const struct vlan_group *grp;
197 1.1 jklos const struct port_info *p = &adapter->port[i];
198 1.1 jklos struct ifnet *ifnet = p->ifp;
199 1.1 jklos
200 1.1 jklos if (!memcmp(p->hw_addr, mac, ETHER_ADDR_LEN)) {
201 1.1 jklos if (vlan && vlan != EVL_VLID_MASK) {
202 1.1 jklos grp = p->vlan_grp;
203 1.1 jklos dev = grp ? grp->vlan_devices[vlan] : NULL;
204 1.1 jklos } else
205 1.1 jklos while (dev->master)
206 1.1 jklos dev = dev->master;
207 1.1 jklos return dev;
208 1.1 jklos }
209 1.1 jklos }
210 1.1 jklos #endif
211 1.1 jklos return NULL;
212 1.1 jklos }
213 1.1 jklos
214 1.1 jklos static inline void
215 1.1 jklos failover_fixup(adapter_t *adapter, int port)
216 1.1 jklos {
217 1.1 jklos if (adapter->params.rev == 0) {
218 1.1 jklos struct ifnet *ifp = adapter->port[port].ifp;
219 1.1 jklos struct cmac *mac = &adapter->port[port].mac;
220 1.1 jklos if (!(ifp->if_flags & IFF_UP)) {
221 1.1 jklos /* Failover triggered by the interface ifdown */
222 1.1 jklos t3_write_reg(adapter, A_XGM_TX_CTRL + mac->offset,
223 1.1 jklos F_TXEN);
224 1.1 jklos t3_read_reg(adapter, A_XGM_TX_CTRL + mac->offset);
225 1.1 jklos } else {
226 1.1 jklos /* Failover triggered by the interface link down */
227 1.1 jklos t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
228 1.1 jklos t3_read_reg(adapter, A_XGM_RX_CTRL + mac->offset);
229 1.1 jklos t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset,
230 1.1 jklos F_RXEN);
231 1.1 jklos }
232 1.1 jklos }
233 1.1 jklos }
234 1.1 jklos
235 1.1 jklos static int
236 1.1 jklos cxgb_ulp_iscsi_ctl(adapter_t *adapter, unsigned int req, void *data)
237 1.1 jklos {
238 1.1 jklos int ret = 0;
239 1.1 jklos struct ulp_iscsi_info *uiip = data;
240 1.1 jklos
241 1.1 jklos switch (req) {
242 1.1 jklos case ULP_ISCSI_GET_PARAMS:
243 1.1 jklos uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
244 1.1 jklos uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
245 1.1 jklos uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
246 1.1 jklos /*
247 1.1 jklos * On tx, the iscsi pdu has to be <= tx page size and has to
248 1.1 jklos * fit into the Tx PM FIFO.
249 1.1 jklos */
250 1.1 jklos uiip->max_txsz = min(adapter->params.tp.tx_pg_size,
251 1.1 jklos t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
252 1.1 jklos /* on rx, the iscsi pdu has to be < rx page size and the
253 1.1 jklos whole pdu + cpl headers has to fit into one sge buffer */
254 1.1 jklos uiip->max_rxsz =
255 1.1 jklos (unsigned int)min(adapter->params.tp.rx_pg_size,
256 1.1 jklos (adapter->sge.qs[0].fl[1].buf_size -
257 1.1 jklos sizeof(struct cpl_rx_data) * 2 -
258 1.1 jklos sizeof(struct cpl_rx_data_ddp)) );
259 1.1 jklos break;
260 1.1 jklos case ULP_ISCSI_SET_PARAMS:
261 1.1 jklos t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
262 1.1 jklos break;
263 1.1 jklos default:
264 1.1 jklos ret = (EOPNOTSUPP);
265 1.1 jklos }
266 1.1 jklos return ret;
267 1.1 jklos }
268 1.1 jklos
269 1.1 jklos /* Response queue used for RDMA events. */
270 1.1 jklos #define ASYNC_NOTIF_RSPQ 0
271 1.1 jklos
272 1.1 jklos static int
273 1.1 jklos cxgb_rdma_ctl(adapter_t *adapter, unsigned int req, void *data)
274 1.1 jklos {
275 1.1 jklos int ret = 0;
276 1.1 jklos
277 1.1 jklos switch (req) {
278 1.1 jklos case RDMA_GET_PARAMS: {
279 1.1 jklos struct rdma_info *req2 = data;
280 1.1 jklos
281 1.1 jklos req2->tpt_base = t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
282 1.1 jklos req2->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
283 1.1 jklos req2->pbl_base = t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
284 1.1 jklos req2->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
285 1.1 jklos req2->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
286 1.1 jklos req2->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
287 1.1 jklos break;
288 1.1 jklos }
289 1.1 jklos case RDMA_CQ_OP: {
290 1.1 jklos struct rdma_cq_op *req2 = data;
291 1.1 jklos
292 1.1 jklos /* may be called in any context */
293 1.1 jklos mtx_lock(&adapter->sge.reg_lock);
294 1.1 jklos ret = t3_sge_cqcntxt_op(adapter, req2->id, req2->op,
295 1.1 jklos req2->credits);
296 1.1 jklos mtx_unlock(&adapter->sge.reg_lock);
297 1.1 jklos break;
298 1.1 jklos }
299 1.1 jklos case RDMA_GET_MEM: {
300 1.1 jklos struct ch_mem_range *t = data;
301 1.1 jklos struct mc7 *mem;
302 1.1 jklos
303 1.1 jklos if ((t->addr & 7) || (t->len & 7))
304 1.1 jklos return (EINVAL);
305 1.1 jklos if (t->mem_id == MEM_CM)
306 1.1 jklos mem = &adapter->cm;
307 1.1 jklos else if (t->mem_id == MEM_PMRX)
308 1.1 jklos mem = &adapter->pmrx;
309 1.1 jklos else if (t->mem_id == MEM_PMTX)
310 1.1 jklos mem = &adapter->pmtx;
311 1.1 jklos else
312 1.1 jklos return (EINVAL);
313 1.1 jklos
314 1.1 jklos ret = t3_mc7_bd_read(mem, t->addr/8, t->len/8, (u64 *)t->buf);
315 1.1 jklos if (ret)
316 1.1 jklos return (ret);
317 1.1 jklos break;
318 1.1 jklos }
319 1.1 jklos case RDMA_CQ_SETUP: {
320 1.1 jklos struct rdma_cq_setup *req2 = data;
321 1.1 jklos
322 1.1 jklos mtx_lock(&adapter->sge.reg_lock);
323 1.1 jklos ret = t3_sge_init_cqcntxt(adapter, req2->id, req2->base_addr,
324 1.1 jklos req2->size, ASYNC_NOTIF_RSPQ,
325 1.1 jklos req2->ovfl_mode, req2->credits,
326 1.1 jklos req2->credit_thres);
327 1.1 jklos mtx_unlock(&adapter->sge.reg_lock);
328 1.1 jklos break;
329 1.1 jklos }
330 1.1 jklos case RDMA_CQ_DISABLE:
331 1.1 jklos mtx_lock(&adapter->sge.reg_lock);
332 1.1 jklos ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
333 1.1 jklos mtx_unlock(&adapter->sge.reg_lock);
334 1.1 jklos break;
335 1.1 jklos case RDMA_CTRL_QP_SETUP: {
336 1.1 jklos struct rdma_ctrlqp_setup *req2 = data;
337 1.1 jklos
338 1.1 jklos mtx_lock(&adapter->sge.reg_lock);
339 1.1 jklos ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
340 1.1 jklos SGE_CNTXT_RDMA, ASYNC_NOTIF_RSPQ,
341 1.1 jklos req2->base_addr, req2->size,
342 1.1 jklos FW_RI_TID_START, 1, 0);
343 1.1 jklos mtx_unlock(&adapter->sge.reg_lock);
344 1.1 jklos break;
345 1.1 jklos }
346 1.1 jklos default:
347 1.1 jklos ret = EOPNOTSUPP;
348 1.1 jklos }
349 1.1 jklos return (ret);
350 1.1 jklos }
351 1.1 jklos
352 1.1 jklos static int
353 1.1 jklos cxgb_offload_ctl(struct toedev *tdev, unsigned int req, void *data)
354 1.1 jklos {
355 1.1 jklos struct adapter *adapter = tdev2adap(tdev);
356 1.1 jklos struct tid_range *tid;
357 1.1 jklos struct mtutab *mtup;
358 1.1 jklos struct iff_mac *iffmacp;
359 1.1 jklos struct ddp_params *ddpp;
360 1.1 jklos struct adap_ports *ports;
361 1.1 jklos int port;
362 1.1 jklos
363 1.1 jklos switch (req) {
364 1.1 jklos case GET_MAX_OUTSTANDING_WR:
365 1.1 jklos *(unsigned int *)data = FW_WR_NUM;
366 1.1 jklos break;
367 1.1 jklos case GET_WR_LEN:
368 1.1 jklos *(unsigned int *)data = WR_FLITS;
369 1.1 jklos break;
370 1.1 jklos case GET_TX_MAX_CHUNK:
371 1.1 jklos *(unsigned int *)data = 1 << 20; /* 1MB */
372 1.1 jklos break;
373 1.1 jklos case GET_TID_RANGE:
374 1.1 jklos tid = data;
375 1.1 jklos tid->num = t3_mc5_size(&adapter->mc5) -
376 1.1 jklos adapter->params.mc5.nroutes -
377 1.1 jklos adapter->params.mc5.nfilters -
378 1.1 jklos adapter->params.mc5.nservers;
379 1.1 jklos tid->base = 0;
380 1.1 jklos break;
381 1.1 jklos case GET_STID_RANGE:
382 1.1 jklos tid = data;
383 1.1 jklos tid->num = adapter->params.mc5.nservers;
384 1.1 jklos tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
385 1.1 jklos adapter->params.mc5.nfilters -
386 1.1 jklos adapter->params.mc5.nroutes;
387 1.1 jklos break;
388 1.1 jklos case GET_L2T_CAPACITY:
389 1.1 jklos *(unsigned int *)data = 2048;
390 1.1 jklos break;
391 1.1 jklos case GET_MTUS:
392 1.1 jklos mtup = data;
393 1.1 jklos mtup->size = NMTUS;
394 1.1 jklos mtup->mtus = adapter->params.mtus;
395 1.1 jklos break;
396 1.1 jklos case GET_IFF_FROM_MAC:
397 1.1 jklos iffmacp = data;
398 1.1 jklos iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
399 1.1 jklos iffmacp->vlan_tag & EVL_VLID_MASK);
400 1.1 jklos break;
401 1.1 jklos case GET_DDP_PARAMS:
402 1.1 jklos ddpp = data;
403 1.1 jklos ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
404 1.1 jklos ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
405 1.1 jklos ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
406 1.1 jklos break;
407 1.1 jklos case GET_PORTS:
408 1.1 jklos ports = data;
409 1.1 jklos ports->nports = adapter->params.nports;
410 1.1 jklos for_each_port(adapter, port)
411 1.1 jklos ports->lldevs[port] = adapter->port[port].ifp;
412 1.1 jklos break;
413 1.1 jklos case FAILOVER:
414 1.1 jklos port = *(int *)data;
415 1.1 jklos t3_port_failover(adapter, port);
416 1.1 jklos failover_fixup(adapter, port);
417 1.1 jklos break;
418 1.1 jklos case FAILOVER_DONE:
419 1.1 jklos port = *(int *)data;
420 1.1 jklos t3_failover_done(adapter, port);
421 1.1 jklos break;
422 1.1 jklos case FAILOVER_CLEAR:
423 1.1 jklos t3_failover_clear(adapter);
424 1.1 jklos break;
425 1.1 jklos case ULP_ISCSI_GET_PARAMS:
426 1.1 jklos case ULP_ISCSI_SET_PARAMS:
427 1.1 jklos if (!offload_running(adapter))
428 1.1 jklos return (EAGAIN);
429 1.1 jklos return cxgb_ulp_iscsi_ctl(adapter, req, data);
430 1.1 jklos case RDMA_GET_PARAMS:
431 1.1 jklos case RDMA_CQ_OP:
432 1.1 jklos case RDMA_CQ_SETUP:
433 1.1 jklos case RDMA_CQ_DISABLE:
434 1.1 jklos case RDMA_CTRL_QP_SETUP:
435 1.1 jklos case RDMA_GET_MEM:
436 1.1 jklos if (!offload_running(adapter))
437 1.1 jklos return (EAGAIN);
438 1.1 jklos return cxgb_rdma_ctl(adapter, req, data);
439 1.1 jklos default:
440 1.1 jklos return (EOPNOTSUPP);
441 1.1 jklos }
442 1.1 jklos return 0;
443 1.1 jklos }
444 1.1 jklos
445 1.1 jklos /*
446 1.1 jklos * Dummy handler for Rx offload packets in case we get an offload packet before
447 1.1 jklos * proper processing is setup. This complains and drops the packet as it isn't
448 1.1 jklos * normal to get offload packets at this stage.
449 1.1 jklos */
450 1.1 jklos static int
451 1.1 jklos rx_offload_blackhole(struct toedev *dev, struct mbuf **m, int n)
452 1.1 jklos {
453 1.1 jklos CH_ERR(tdev2adap(dev), "%d unexpected offload packets, first data 0x%x\n",
454 1.1 jklos n, *mtod(m[0], uint32_t *));
455 1.1 jklos while (n--)
456 1.1 jklos m_freem(m[n]);
457 1.1 jklos return 0;
458 1.1 jklos }
459 1.1 jklos
460 1.1 jklos static void
461 1.1 jklos dummy_neigh_update(struct toedev *dev, struct rtentry *neigh)
462 1.1 jklos {
463 1.1 jklos }
464 1.1 jklos
465 1.1 jklos void
466 1.1 jklos cxgb_set_dummy_ops(struct toedev *dev)
467 1.1 jklos {
468 1.1 jklos dev->recv = rx_offload_blackhole;
469 1.1 jklos dev->neigh_update = dummy_neigh_update;
470 1.1 jklos }
471 1.1 jklos
472 1.1 jklos /*
473 1.1 jklos * Free an active-open TID.
474 1.1 jklos */
475 1.1 jklos void *
476 1.1 jklos cxgb_free_atid(struct toedev *tdev, int atid)
477 1.1 jklos {
478 1.1 jklos struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
479 1.1 jklos union active_open_entry *p = atid2entry(t, atid);
480 1.1 jklos void *ctx = p->toe_tid.ctx;
481 1.1 jklos
482 1.1 jklos mtx_lock(&t->atid_lock);
483 1.1 jklos p->next = t->afree;
484 1.1 jklos t->afree = p;
485 1.1 jklos t->atids_in_use--;
486 1.1 jklos mtx_lock(&t->atid_lock);
487 1.1 jklos
488 1.1 jklos return ctx;
489 1.1 jklos }
490 1.1 jklos
491 1.1 jklos /*
492 1.1 jklos * Free a server TID and return it to the free pool.
493 1.1 jklos */
494 1.1 jklos void
495 1.1 jklos cxgb_free_stid(struct toedev *tdev, int stid)
496 1.1 jklos {
497 1.1 jklos struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
498 1.1 jklos union listen_entry *p = stid2entry(t, stid);
499 1.1 jklos
500 1.1 jklos mtx_lock(&t->stid_lock);
501 1.1 jklos p->next = t->sfree;
502 1.1 jklos t->sfree = p;
503 1.1 jklos t->stids_in_use--;
504 1.1 jklos mtx_unlock(&t->stid_lock);
505 1.1 jklos }
506 1.1 jklos
507 1.1 jklos void
508 1.1 jklos cxgb_insert_tid(struct toedev *tdev, struct cxgb_client *client,
509 1.1 jklos void *ctx, unsigned int tid)
510 1.1 jklos {
511 1.1 jklos struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
512 1.1 jklos
513 1.1 jklos t->tid_tab[tid].client = client;
514 1.1 jklos t->tid_tab[tid].ctx = ctx;
515 1.1 jklos atomic_add_int(&t->tids_in_use, 1);
516 1.1 jklos }
517 1.1 jklos
518 1.1 jklos /*
519 1.1 jklos * Populate a TID_RELEASE WR. The mbuf must be already propely sized.
520 1.1 jklos */
521 1.1 jklos static inline void
522 1.1 jklos mk_tid_release(struct mbuf *m, unsigned int tid)
523 1.1 jklos {
524 1.1 jklos struct cpl_tid_release *req;
525 1.1 jklos
526 1.1 jklos m_set_priority(m, CPL_PRIORITY_SETUP);
527 1.1 jklos req = mtod(m, struct cpl_tid_release *);
528 1.1 jklos req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
529 1.1 jklos OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
530 1.1 jklos }
531 1.1 jklos
532 1.1 jklos static void
533 1.1 jklos t3_process_tid_release_list(struct work *wk, void *data)
534 1.1 jklos {
535 1.1 jklos struct mbuf *m;
536 1.1 jklos struct toedev *tdev = data;
537 1.1 jklos struct toe_data *td = TOE_DATA(tdev);
538 1.1 jklos
539 1.1 jklos mtx_lock(&td->tid_release_lock);
540 1.1 jklos while (td->tid_release_list) {
541 1.1 jklos struct toe_tid_entry *p = td->tid_release_list;
542 1.1 jklos
543 1.1 jklos td->tid_release_list = (struct toe_tid_entry *)p->ctx;
544 1.1 jklos mtx_unlock(&td->tid_release_lock);
545 1.1 jklos m = m_get(M_WAIT, MT_DATA);
546 1.1 jklos mk_tid_release(m, p - td->tid_maps.tid_tab);
547 1.1 jklos cxgb_ofld_send(tdev, m);
548 1.1 jklos p->ctx = NULL;
549 1.1 jklos mtx_lock(&td->tid_release_lock);
550 1.1 jklos }
551 1.1 jklos mtx_unlock(&td->tid_release_lock);
552 1.1 jklos }
553 1.1 jklos
554 1.1 jklos /* use ctx as a next pointer in the tid release list */
555 1.1 jklos void
556 1.1 jklos cxgb_queue_tid_release(struct toedev *tdev, unsigned int tid)
557 1.1 jklos {
558 1.1 jklos struct toe_data *td = TOE_DATA(tdev);
559 1.1 jklos struct toe_tid_entry *p = &td->tid_maps.tid_tab[tid];
560 1.1 jklos
561 1.1 jklos mtx_lock(&td->tid_release_lock);
562 1.1 jklos p->ctx = td->tid_release_list;
563 1.1 jklos td->tid_release_list = p;
564 1.1 jklos
565 1.1 jklos if (!p->ctx)
566 1.1 jklos workqueue_enqueue(td->tid_release_task.wq, &td->tid_release_task.w, NULL);
567 1.1 jklos
568 1.1 jklos mtx_unlock(&td->tid_release_lock);
569 1.1 jklos }
570 1.1 jklos
571 1.1 jklos /*
572 1.1 jklos * Remove a tid from the TID table. A client may defer processing its last
573 1.1 jklos * CPL message if it is locked at the time it arrives, and while the message
574 1.1 jklos * sits in the client's backlog the TID may be reused for another connection.
575 1.1 jklos * To handle this we atomically switch the TID association if it still points
576 1.1 jklos * to the original client context.
577 1.1 jklos */
578 1.1 jklos void
579 1.1 jklos cxgb_remove_tid(struct toedev *tdev, void *ctx, unsigned int tid)
580 1.1 jklos {
581 1.1 jklos struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
582 1.1 jklos
583 1.1 jklos BUG_ON(tid >= t->ntids);
584 1.1 jklos if (tdev->type == T3A)
585 1.1 jklos atomic_cmpset_ptr((uintptr_t *)&t->tid_tab[tid].ctx, (long)NULL, (long)ctx);
586 1.1 jklos else {
587 1.1 jklos struct mbuf *m;
588 1.1 jklos
589 1.1 jklos m = m_get(M_NOWAIT, MT_DATA);
590 1.1 jklos if (__predict_true(m != NULL)) {
591 1.1 jklos mk_tid_release(m, tid);
592 1.1 jklos cxgb_ofld_send(tdev, m);
593 1.1 jklos t->tid_tab[tid].ctx = NULL;
594 1.1 jklos } else
595 1.1 jklos cxgb_queue_tid_release(tdev, tid);
596 1.1 jklos }
597 1.1 jklos atomic_add_int(&t->tids_in_use, -1);
598 1.1 jklos }
599 1.1 jklos
600 1.1 jklos int
601 1.1 jklos cxgb_alloc_atid(struct toedev *tdev, struct cxgb_client *client,
602 1.1 jklos void *ctx)
603 1.1 jklos {
604 1.1 jklos int atid = -1;
605 1.1 jklos struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
606 1.1 jklos
607 1.1 jklos mtx_lock(&t->atid_lock);
608 1.1 jklos if (t->afree) {
609 1.1 jklos union active_open_entry *p = t->afree;
610 1.1 jklos
611 1.1 jklos atid = (p - t->atid_tab) + t->atid_base;
612 1.1 jklos t->afree = p->next;
613 1.1 jklos p->toe_tid.ctx = ctx;
614 1.1 jklos p->toe_tid.client = client;
615 1.1 jklos t->atids_in_use++;
616 1.1 jklos }
617 1.1 jklos mtx_unlock(&t->atid_lock);
618 1.1 jklos return atid;
619 1.1 jklos }
620 1.1 jklos
621 1.1 jklos int
622 1.1 jklos cxgb_alloc_stid(struct toedev *tdev, struct cxgb_client *client,
623 1.1 jklos void *ctx)
624 1.1 jklos {
625 1.1 jklos int stid = -1;
626 1.1 jklos struct tid_info *t = &(TOE_DATA(tdev))->tid_maps;
627 1.1 jklos
628 1.1 jklos mtx_lock(&t->stid_lock);
629 1.1 jklos if (t->sfree) {
630 1.1 jklos union listen_entry *p = t->sfree;
631 1.1 jklos
632 1.1 jklos stid = (p - t->stid_tab) + t->stid_base;
633 1.1 jklos t->sfree = p->next;
634 1.1 jklos p->toe_tid.ctx = ctx;
635 1.1 jklos p->toe_tid.client = client;
636 1.1 jklos t->stids_in_use++;
637 1.1 jklos }
638 1.1 jklos mtx_unlock(&t->stid_lock);
639 1.1 jklos return stid;
640 1.1 jklos }
641 1.1 jklos
642 1.1 jklos static int
643 1.1 jklos do_smt_write_rpl(struct toedev *dev, struct mbuf *m)
644 1.1 jklos {
645 1.1 jklos struct cpl_smt_write_rpl *rpl = cplhdr(m);
646 1.1 jklos
647 1.1 jklos if (rpl->status != CPL_ERR_NONE)
648 1.1 jklos log(LOG_ERR,
649 1.1 jklos "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
650 1.1 jklos rpl->status, GET_TID(rpl));
651 1.1 jklos
652 1.1 jklos return CPL_RET_BUF_DONE;
653 1.1 jklos }
654 1.1 jklos
655 1.1 jklos static int
656 1.1 jklos do_l2t_write_rpl(struct toedev *dev, struct mbuf *m)
657 1.1 jklos {
658 1.1 jklos struct cpl_l2t_write_rpl *rpl = cplhdr(m);
659 1.1 jklos
660 1.1 jklos if (rpl->status != CPL_ERR_NONE)
661 1.1 jklos log(LOG_ERR,
662 1.1 jklos "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
663 1.1 jklos rpl->status, GET_TID(rpl));
664 1.1 jklos
665 1.1 jklos return CPL_RET_BUF_DONE;
666 1.1 jklos }
667 1.1 jklos
668 1.1 jklos static int
669 1.1 jklos do_act_open_rpl(struct toedev *dev, struct mbuf *m)
670 1.1 jklos {
671 1.1 jklos struct cpl_act_open_rpl *rpl = cplhdr(m);
672 1.1 jklos unsigned int atid = G_TID(ntohl(rpl->atid));
673 1.1 jklos struct toe_tid_entry *toe_tid;
674 1.1 jklos
675 1.1 jklos toe_tid = lookup_atid(&(TOE_DATA(dev))->tid_maps, atid);
676 1.1 jklos if (toe_tid->ctx && toe_tid->client && toe_tid->client->handlers &&
677 1.1 jklos toe_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
678 1.1 jklos return toe_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, m,
679 1.1 jklos toe_tid->ctx);
680 1.1 jklos } else {
681 1.1 jklos log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
682 1.1 jklos dev->name, CPL_ACT_OPEN_RPL);
683 1.1 jklos return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
684 1.1 jklos }
685 1.1 jklos }
686 1.1 jklos
687 1.1 jklos static int
688 1.1 jklos do_stid_rpl(struct toedev *dev, struct mbuf *m)
689 1.1 jklos {
690 1.1 jklos union opcode_tid *p = cplhdr(m);
691 1.1 jklos unsigned int stid = G_TID(ntohl(p->opcode_tid));
692 1.1 jklos struct toe_tid_entry *toe_tid;
693 1.1 jklos
694 1.1 jklos toe_tid = lookup_stid(&(TOE_DATA(dev))->tid_maps, stid);
695 1.1 jklos if (toe_tid->ctx && toe_tid->client->handlers &&
696 1.1 jklos toe_tid->client->handlers[p->opcode]) {
697 1.1 jklos return toe_tid->client->handlers[p->opcode] (dev, m, toe_tid->ctx);
698 1.1 jklos } else {
699 1.1 jklos log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
700 1.1 jklos dev->name, p->opcode);
701 1.1 jklos return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
702 1.1 jklos }
703 1.1 jklos }
704 1.1 jklos
705 1.1 jklos static int
706 1.1 jklos do_hwtid_rpl(struct toedev *dev, struct mbuf *m)
707 1.1 jklos {
708 1.1 jklos union opcode_tid *p = cplhdr(m);
709 1.1 jklos unsigned int hwtid;
710 1.1 jklos struct toe_tid_entry *toe_tid;
711 1.1 jklos
712 1.1 jklos printf("do_hwtid_rpl m=%p\n", m);
713 1.1 jklos return (0);
714 1.1 jklos
715 1.1 jklos
716 1.1 jklos hwtid = G_TID(ntohl(p->opcode_tid));
717 1.1 jklos
718 1.1 jklos toe_tid = lookup_tid(&(TOE_DATA(dev))->tid_maps, hwtid);
719 1.1 jklos if (toe_tid->ctx && toe_tid->client->handlers &&
720 1.1 jklos toe_tid->client->handlers[p->opcode]) {
721 1.1 jklos return toe_tid->client->handlers[p->opcode]
722 1.1 jklos (dev, m, toe_tid->ctx);
723 1.1 jklos } else {
724 1.1 jklos log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
725 1.1 jklos dev->name, p->opcode);
726 1.1 jklos return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
727 1.1 jklos }
728 1.1 jklos }
729 1.1 jklos
730 1.1 jklos static int
731 1.1 jklos do_cr(struct toedev *dev, struct mbuf *m)
732 1.1 jklos {
733 1.1 jklos struct cpl_pass_accept_req *req = cplhdr(m);
734 1.1 jklos unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
735 1.1 jklos struct toe_tid_entry *toe_tid;
736 1.1 jklos
737 1.1 jklos toe_tid = lookup_stid(&(TOE_DATA(dev))->tid_maps, stid);
738 1.1 jklos if (toe_tid->ctx && toe_tid->client->handlers &&
739 1.1 jklos toe_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
740 1.1 jklos return toe_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
741 1.1 jklos (dev, m, toe_tid->ctx);
742 1.1 jklos } else {
743 1.1 jklos log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
744 1.1 jklos dev->name, CPL_PASS_ACCEPT_REQ);
745 1.1 jklos return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
746 1.1 jklos }
747 1.1 jklos }
748 1.1 jklos
749 1.1 jklos static int
750 1.1 jklos do_abort_req_rss(struct toedev *dev, struct mbuf *m)
751 1.1 jklos {
752 1.1 jklos union opcode_tid *p = cplhdr(m);
753 1.1 jklos unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
754 1.1 jklos struct toe_tid_entry *toe_tid;
755 1.1 jklos
756 1.1 jklos toe_tid = lookup_tid(&(TOE_DATA(dev))->tid_maps, hwtid);
757 1.1 jklos if (toe_tid->ctx && toe_tid->client->handlers &&
758 1.1 jklos toe_tid->client->handlers[p->opcode]) {
759 1.1 jklos return toe_tid->client->handlers[p->opcode]
760 1.1 jklos (dev, m, toe_tid->ctx);
761 1.1 jklos } else {
762 1.1 jklos struct cpl_abort_req_rss *req = cplhdr(m);
763 1.1 jklos struct cpl_abort_rpl *rpl;
764 1.1 jklos
765 1.1 jklos struct mbuf *m2 = m_get(M_NOWAIT, MT_DATA);
766 1.1 jklos if (!m2) {
767 1.1 jklos log(LOG_NOTICE, "do_abort_req_rss: couldn't get mbuf!\n");
768 1.1 jklos goto out;
769 1.1 jklos }
770 1.1 jklos
771 1.1 jklos m_set_priority(m2, CPL_PRIORITY_DATA);
772 1.1 jklos #if 0
773 1.1 jklos __skb_put(skb, sizeof(struct cpl_abort_rpl));
774 1.1 jklos #endif
775 1.1 jklos rpl = cplhdr(m2);
776 1.1 jklos rpl->wr.wr_hi =
777 1.1 jklos htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
778 1.1 jklos rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req)));
779 1.1 jklos OPCODE_TID(rpl) =
780 1.1 jklos htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req)));
781 1.1 jklos rpl->cmd = req->status;
782 1.1 jklos cxgb_ofld_send(dev, m2);
783 1.1 jklos out:
784 1.1 jklos return CPL_RET_BUF_DONE;
785 1.1 jklos }
786 1.1 jklos }
787 1.1 jklos
788 1.1 jklos static int
789 1.1 jklos do_act_establish(struct toedev *dev, struct mbuf *m)
790 1.1 jklos {
791 1.1 jklos struct cpl_act_establish *req = cplhdr(m);
792 1.1 jklos unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
793 1.1 jklos struct toe_tid_entry *toe_tid;
794 1.1 jklos
795 1.1 jklos toe_tid = lookup_atid(&(TOE_DATA(dev))->tid_maps, atid);
796 1.1 jklos if (toe_tid->ctx && toe_tid->client->handlers &&
797 1.1 jklos toe_tid->client->handlers[CPL_ACT_ESTABLISH]) {
798 1.1 jklos return toe_tid->client->handlers[CPL_ACT_ESTABLISH]
799 1.1 jklos (dev, m, toe_tid->ctx);
800 1.1 jklos } else {
801 1.1 jklos log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
802 1.1 jklos dev->name, CPL_PASS_ACCEPT_REQ);
803 1.1 jklos return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
804 1.1 jklos }
805 1.1 jklos }
806 1.1 jklos
807 1.1 jklos static int
808 1.1 jklos do_set_tcb_rpl(struct toedev *dev, struct mbuf *m)
809 1.1 jklos {
810 1.1 jklos struct cpl_set_tcb_rpl *rpl = cplhdr(m);
811 1.1 jklos
812 1.1 jklos if (rpl->status != CPL_ERR_NONE)
813 1.1 jklos log(LOG_ERR,
814 1.1 jklos "Unexpected SET_TCB_RPL status %u for tid %u\n",
815 1.1 jklos rpl->status, GET_TID(rpl));
816 1.1 jklos return CPL_RET_BUF_DONE;
817 1.1 jklos }
818 1.1 jklos
819 1.1 jklos static int
820 1.1 jklos do_trace(struct toedev *dev, struct mbuf *m)
821 1.1 jklos {
822 1.1 jklos #if 0
823 1.1 jklos struct cpl_trace_pkt *p = cplhdr(m);
824 1.1 jklos
825 1.1 jklos
826 1.1 jklos skb->protocol = 0xffff;
827 1.1 jklos skb->dev = dev->lldev;
828 1.1 jklos skb_pull(skb, sizeof(*p));
829 1.1 jklos skb->mac.raw = mtod(m, (char *));
830 1.1 jklos netif_receive_skb(skb);
831 1.1 jklos #endif
832 1.1 jklos return 0;
833 1.1 jklos }
834 1.1 jklos
835 1.1 jklos static int
836 1.1 jklos do_term(struct toedev *dev, struct mbuf *m)
837 1.1 jklos {
838 1.1 jklos unsigned int hwtid = ntohl(m_get_priority(m)) >> 8 & 0xfffff;
839 1.1 jklos unsigned int opcode = G_OPCODE(ntohl(m->m_pkthdr.csum_data));
840 1.1 jklos struct toe_tid_entry *toe_tid;
841 1.1 jklos
842 1.1 jklos toe_tid = lookup_tid(&(TOE_DATA(dev))->tid_maps, hwtid);
843 1.1 jklos if (toe_tid->ctx && toe_tid->client->handlers &&
844 1.1 jklos toe_tid->client->handlers[opcode]) {
845 1.1 jklos return toe_tid->client->handlers[opcode](dev, m, toe_tid->ctx);
846 1.1 jklos } else {
847 1.1 jklos log(LOG_ERR, "%s: received clientless CPL command 0x%x\n",
848 1.1 jklos dev->name, opcode);
849 1.1 jklos return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
850 1.1 jklos }
851 1.1 jklos return (0);
852 1.1 jklos }
853 1.1 jklos
854 1.1 jklos #if defined(FOO)
855 1.1 jklos #include <linux/config.h>
856 1.1 jklos #include <linux/kallsyms.h>
857 1.1 jklos #include <linux/kprobes.h>
858 1.1 jklos #include <net/arp.h>
859 1.1 jklos
860 1.1 jklos static int (*orig_arp_constructor)(struct ifnet *);
861 1.1 jklos
862 1.1 jklos static void
863 1.1 jklos neigh_suspect(struct ifnet *neigh)
864 1.1 jklos {
865 1.1 jklos struct hh_cache *hh;
866 1.1 jklos
867 1.1 jklos neigh->output = neigh->ops->output;
868 1.1 jklos
869 1.1 jklos for (hh = neigh->hh; hh; hh = hh->hh_next)
870 1.1 jklos hh->hh_output = neigh->ops->output;
871 1.1 jklos }
872 1.1 jklos
873 1.1 jklos static void
874 1.1 jklos neigh_connect(struct ifnet *neigh)
875 1.1 jklos {
876 1.1 jklos struct hh_cache *hh;
877 1.1 jklos
878 1.1 jklos neigh->output = neigh->ops->connected_output;
879 1.1 jklos
880 1.1 jklos for (hh = neigh->hh; hh; hh = hh->hh_next)
881 1.1 jklos hh->hh_output = neigh->ops->hh_output;
882 1.1 jklos }
883 1.1 jklos
884 1.1 jklos static inline int
885 1.1 jklos neigh_max_probes(const struct neighbour *n)
886 1.1 jklos {
887 1.1 jklos const struct neigh_parms *p = n->parms;
888 1.1 jklos return (n->nud_state & NUD_PROBE ?
889 1.1 jklos p->ucast_probes :
890 1.1 jklos p->ucast_probes + p->app_probes + p->mcast_probes);
891 1.1 jklos }
892 1.1 jklos
893 1.1 jklos static void
894 1.1 jklos neigh_timer_handler_offload(unsigned long arg)
895 1.1 jklos {
896 1.1 jklos unsigned long now, next;
897 1.1 jklos struct neighbour *neigh = (struct neighbour *)arg;
898 1.1 jklos unsigned state;
899 1.1 jklos int notify = 0;
900 1.1 jklos
901 1.1 jklos write_lock(&neigh->lock);
902 1.1 jklos
903 1.1 jklos state = neigh->nud_state;
904 1.1 jklos now = jiffies;
905 1.1 jklos next = now + HZ;
906 1.1 jklos
907 1.1 jklos if (!(state & NUD_IN_TIMER)) {
908 1.1 jklos #ifndef CONFIG_SMP
909 1.1 jklos log(LOG_WARNING, "neigh: timer & !nud_in_timer\n");
910 1.1 jklos #endif
911 1.1 jklos goto out;
912 1.1 jklos }
913 1.1 jklos
914 1.1 jklos if (state & NUD_REACHABLE) {
915 1.1 jklos if (time_before_eq(now,
916 1.1 jklos neigh->confirmed +
917 1.1 jklos neigh->parms->reachable_time)) {
918 1.1 jklos next = neigh->confirmed + neigh->parms->reachable_time;
919 1.1 jklos } else if (time_before_eq(now,
920 1.1 jklos neigh->used +
921 1.1 jklos neigh->parms->delay_probe_time)) {
922 1.1 jklos neigh->nud_state = NUD_DELAY;
923 1.1 jklos neigh->updated = jiffies;
924 1.1 jklos neigh_suspect(neigh);
925 1.1 jklos next = now + neigh->parms->delay_probe_time;
926 1.1 jklos } else {
927 1.1 jklos neigh->nud_state = NUD_STALE;
928 1.1 jklos neigh->updated = jiffies;
929 1.1 jklos neigh_suspect(neigh);
930 1.1 jklos cxgb_neigh_update(neigh);
931 1.1 jklos }
932 1.1 jklos } else if (state & NUD_DELAY) {
933 1.1 jklos if (time_before_eq(now,
934 1.1 jklos neigh->confirmed +
935 1.1 jklos neigh->parms->delay_probe_time)) {
936 1.1 jklos neigh->nud_state = NUD_REACHABLE;
937 1.1 jklos neigh->updated = jiffies;
938 1.1 jklos neigh_connect(neigh);
939 1.1 jklos cxgb_neigh_update(neigh);
940 1.1 jklos next = neigh->confirmed + neigh->parms->reachable_time;
941 1.1 jklos } else {
942 1.1 jklos neigh->nud_state = NUD_PROBE;
943 1.1 jklos neigh->updated = jiffies;
944 1.1 jklos atomic_set_int(&neigh->probes, 0);
945 1.1 jklos next = now + neigh->parms->retrans_time;
946 1.1 jklos }
947 1.1 jklos } else {
948 1.1 jklos /* NUD_PROBE|NUD_INCOMPLETE */
949 1.1 jklos next = now + neigh->parms->retrans_time;
950 1.1 jklos }
951 1.1 jklos /*
952 1.1 jklos * Needed for read of probes
953 1.1 jklos */
954 1.1 jklos mb();
955 1.1 jklos if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
956 1.1 jklos neigh->probes >= neigh_max_probes(neigh)) {
957 1.1 jklos struct mbuf *m;
958 1.1 jklos
959 1.1 jklos neigh->nud_state = NUD_FAILED;
960 1.1 jklos neigh->updated = jiffies;
961 1.1 jklos notify = 1;
962 1.1 jklos cxgb_neigh_update(neigh);
963 1.1 jklos NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
964 1.1 jklos
965 1.1 jklos /* It is very thin place. report_unreachable is very
966 1.1 jklos complicated routine. Particularly, it can hit the same
967 1.1 jklos neighbour entry!
968 1.1 jklos So that, we try to be accurate and avoid dead loop. --ANK
969 1.1 jklos */
970 1.1 jklos while (neigh->nud_state == NUD_FAILED &&
971 1.1 jklos (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
972 1.1 jklos write_unlock(&neigh->lock);
973 1.1 jklos neigh->ops->error_report(neigh, skb);
974 1.1 jklos write_lock(&neigh->lock);
975 1.1 jklos }
976 1.1 jklos skb_queue_purge(&neigh->arp_queue);
977 1.1 jklos }
978 1.1 jklos
979 1.1 jklos if (neigh->nud_state & NUD_IN_TIMER) {
980 1.1 jklos if (time_before(next, jiffies + HZ/2))
981 1.1 jklos next = jiffies + HZ/2;
982 1.1 jklos if (!mod_timer(&neigh->timer, next))
983 1.1 jklos neigh_hold(neigh);
984 1.1 jklos }
985 1.1 jklos if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
986 1.1 jklos struct mbuf *m = skb_peek(&neigh->arp_queue);
987 1.1 jklos
988 1.1 jklos write_unlock(&neigh->lock);
989 1.1 jklos neigh->ops->solicit(neigh, skb);
990 1.1 jklos atomic_add_int(&neigh->probes, 1);
991 1.1 jklos if (m)
992 1.1 jklos m_free(m);
993 1.1 jklos } else {
994 1.1 jklos out:
995 1.1 jklos write_unlock(&neigh->lock);
996 1.1 jklos }
997 1.1 jklos
998 1.1 jklos #ifdef CONFIG_ARPD
999 1.1 jklos if (notify && neigh->parms->app_probes)
1000 1.1 jklos neigh_app_notify(neigh);
1001 1.1 jklos #endif
1002 1.1 jklos neigh_release(neigh);
1003 1.1 jklos }
1004 1.1 jklos
1005 1.1 jklos static int
1006 1.1 jklos arp_constructor_offload(struct neighbour *neigh)
1007 1.1 jklos {
1008 1.1 jklos if (neigh->ifp && is_offloading(neigh->ifp))
1009 1.1 jklos neigh->timer.function = neigh_timer_handler_offload;
1010 1.1 jklos return orig_arp_constructor(neigh);
1011 1.1 jklos }
1012 1.1 jklos
1013 1.1 jklos /*
1014 1.1 jklos * This must match exactly the signature of neigh_update for jprobes to work.
1015 1.1 jklos * It runs from a trap handler with interrupts off so don't disable BH.
1016 1.1 jklos */
1017 1.1 jklos static int
1018 1.1 jklos neigh_update_offload(struct neighbour *neigh, const u8 *lladdr,
1019 1.1 jklos u8 new, u32 flags)
1020 1.1 jklos {
1021 1.1 jklos write_lock(&neigh->lock);
1022 1.1 jklos cxgb_neigh_update(neigh);
1023 1.1 jklos write_unlock(&neigh->lock);
1024 1.1 jklos jprobe_return();
1025 1.1 jklos /* NOTREACHED */
1026 1.1 jklos return 0;
1027 1.1 jklos }
1028 1.1 jklos
1029 1.1 jklos static struct jprobe neigh_update_jprobe = {
1030 1.1 jklos .entry = (kprobe_opcode_t *) neigh_update_offload,
1031 1.1 jklos .kp.addr = (kprobe_opcode_t *) neigh_update
1032 1.1 jklos };
1033 1.1 jklos
1034 1.1 jklos #ifdef MODULE_SUPPORT
1035 1.1 jklos static int
1036 1.1 jklos prepare_arp_with_t3core(void)
1037 1.1 jklos {
1038 1.1 jklos int err;
1039 1.1 jklos
1040 1.1 jklos err = register_jprobe(&neigh_update_jprobe);
1041 1.1 jklos if (err) {
1042 1.1 jklos log(LOG_ERR, "Could not install neigh_update jprobe, "
1043 1.1 jklos "error %d\n", err);
1044 1.1 jklos return err;
1045 1.1 jklos }
1046 1.1 jklos
1047 1.1 jklos orig_arp_constructor = arp_tbl.constructor;
1048 1.1 jklos arp_tbl.constructor = arp_constructor_offload;
1049 1.1 jklos
1050 1.1 jklos return 0;
1051 1.1 jklos }
1052 1.1 jklos
1053 1.1 jklos static void
1054 1.1 jklos restore_arp_sans_t3core(void)
1055 1.1 jklos {
1056 1.1 jklos arp_tbl.constructor = orig_arp_constructor;
1057 1.1 jklos unregister_jprobe(&neigh_update_jprobe);
1058 1.1 jklos }
1059 1.1 jklos
1060 1.1 jklos #else /* Module suport */
1061 1.1 jklos static inline int
1062 1.1 jklos prepare_arp_with_t3core(void)
1063 1.1 jklos {
1064 1.1 jklos return 0;
1065 1.1 jklos }
1066 1.1 jklos
1067 1.1 jklos static inline void
1068 1.1 jklos restore_arp_sans_t3core(void)
1069 1.1 jklos {}
1070 1.1 jklos #endif
1071 1.1 jklos #endif
1072 1.1 jklos /*
1073 1.1 jklos * Process a received packet with an unknown/unexpected CPL opcode.
1074 1.1 jklos */
1075 1.1 jklos static int
1076 1.1 jklos do_bad_cpl(struct toedev *dev, struct mbuf *m)
1077 1.1 jklos {
1078 1.1 jklos log(LOG_ERR, "%s: received bad CPL command 0x%x\n", dev->name,
1079 1.1 jklos *mtod(m, uint32_t *));
1080 1.1 jklos return (CPL_RET_BUF_DONE | CPL_RET_BAD_MSG);
1081 1.1 jklos }
1082 1.1 jklos
1083 1.1 jklos /*
1084 1.1 jklos * Handlers for each CPL opcode
1085 1.1 jklos */
1086 1.1 jklos static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
1087 1.1 jklos
1088 1.1 jklos /*
1089 1.1 jklos * Add a new handler to the CPL dispatch table. A NULL handler may be supplied
1090 1.1 jklos * to unregister an existing handler.
1091 1.1 jklos */
1092 1.1 jklos void
1093 1.1 jklos t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
1094 1.1 jklos {
1095 1.1 jklos if (opcode < NUM_CPL_CMDS)
1096 1.1 jklos cpl_handlers[opcode] = h ? h : do_bad_cpl;
1097 1.1 jklos else
1098 1.1 jklos log(LOG_ERR, "T3C: handler registration for "
1099 1.1 jklos "opcode %x failed\n", opcode);
1100 1.1 jklos }
1101 1.1 jklos
1102 1.1 jklos /*
1103 1.1 jklos * TOEDEV's receive method.
1104 1.1 jklos */
1105 1.1 jklos int
1106 1.1 jklos process_rx(struct toedev *dev, struct mbuf **m, int n)
1107 1.1 jklos {
1108 1.1 jklos while (n--) {
1109 1.1 jklos struct mbuf *m0 = *m++;
1110 1.1 jklos unsigned int opcode = G_OPCODE(ntohl(m0->m_pkthdr.csum_data));
1111 1.1 jklos int ret = cpl_handlers[opcode] (dev, m0);
1112 1.1 jklos
1113 1.1 jklos #if VALIDATE_TID
1114 1.1 jklos if (ret & CPL_RET_UNKNOWN_TID) {
1115 1.1 jklos union opcode_tid *p = cplhdr(m0);
1116 1.1 jklos
1117 1.1 jklos log(LOG_ERR, "%s: CPL message (opcode %u) had "
1118 1.1 jklos "unknown TID %u\n", dev->name, opcode,
1119 1.1 jklos G_TID(ntohl(p->opcode_tid)));
1120 1.1 jklos }
1121 1.1 jklos #endif
1122 1.1 jklos if (ret & CPL_RET_BUF_DONE)
1123 1.1 jklos m_freem(m0);
1124 1.1 jklos }
1125 1.1 jklos return 0;
1126 1.1 jklos }
1127 1.1 jklos
1128 1.1 jklos /*
1129 1.1 jklos * Sends an sk_buff to a T3C driver after dealing with any active network taps.
1130 1.1 jklos */
1131 1.1 jklos int
1132 1.1 jklos cxgb_ofld_send(struct toedev *dev, struct mbuf *m)
1133 1.1 jklos {
1134 1.1 jklos int r;
1135 1.1 jklos
1136 1.1 jklos critical_enter();
1137 1.1 jklos r = dev->send(dev, m);
1138 1.1 jklos critical_exit();
1139 1.1 jklos return r;
1140 1.1 jklos }
1141 1.1 jklos
1142 1.1 jklos
1143 1.1 jklos /**
1144 1.1 jklos * cxgb_ofld_recv - process n received offload packets
1145 1.1 jklos * @dev: the offload device
1146 1.1 jklos * @m: an array of offload packets
1147 1.1 jklos * @n: the number of offload packets
1148 1.1 jklos *
1149 1.1 jklos * Process an array of ingress offload packets. Each packet is forwarded
1150 1.1 jklos * to any active network taps and then passed to the offload device's receive
1151 1.1 jklos * method. We optimize passing packets to the receive method by passing
1152 1.1 jklos * it the whole array at once except when there are active taps.
1153 1.1 jklos */
1154 1.1 jklos int
1155 1.1 jklos cxgb_ofld_recv(struct toedev *dev, struct mbuf **m, int n)
1156 1.1 jklos {
1157 1.1 jklos
1158 1.1 jklos #if defined(CONFIG_CHELSIO_T3)
1159 1.1 jklos if (likely(!netdev_nit))
1160 1.1 jklos return dev->recv(dev, skb, n);
1161 1.1 jklos
1162 1.1 jklos for ( ; n; n--, skb++) {
1163 1.1 jklos skb[0]->dev = dev->lldev;
1164 1.1 jklos dev_queue_xmit_nit(skb[0], dev->lldev);
1165 1.1 jklos skb[0]->dev = NULL;
1166 1.1 jklos dev->recv(dev, skb, 1);
1167 1.1 jklos }
1168 1.1 jklos return 0;
1169 1.1 jklos #else
1170 1.1 jklos return dev->recv(dev, m, n);
1171 1.1 jklos #endif
1172 1.1 jklos }
1173 1.1 jklos
1174 1.1 jklos void
1175 1.1 jklos cxgb_neigh_update(struct rtentry *rt)
1176 1.1 jklos {
1177 1.1 jklos
1178 1.1 jklos if (is_offloading(rt->rt_ifp)) {
1179 1.1 jklos struct toedev *tdev = TOEDEV(rt->rt_ifp);
1180 1.1 jklos
1181 1.1 jklos BUG_ON(!tdev);
1182 1.1 jklos t3_l2t_update(tdev, rt);
1183 1.1 jklos }
1184 1.1 jklos }
1185 1.1 jklos
1186 1.1 jklos static void
1187 1.1 jklos set_l2t_ix(struct toedev *tdev, u32 tid, struct l2t_entry *e)
1188 1.1 jklos {
1189 1.1 jklos struct mbuf *m;
1190 1.1 jklos struct cpl_set_tcb_field *req;
1191 1.1 jklos
1192 1.1 jklos m = m_gethdr(M_NOWAIT, MT_DATA);
1193 1.1 jklos if (!m) {
1194 1.1 jklos log(LOG_ERR, "%s: cannot allocate mbuf!\n", __func__);
1195 1.1 jklos return;
1196 1.1 jklos }
1197 1.1 jklos
1198 1.1 jklos m_set_priority(m, CPL_PRIORITY_CONTROL);
1199 1.1 jklos req = mtod(m, struct cpl_set_tcb_field *);
1200 1.1 jklos req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1201 1.1 jklos OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1202 1.1 jklos req->reply = 0;
1203 1.1 jklos req->cpu_idx = 0;
1204 1.1 jklos req->word = htons(W_TCB_L2T_IX);
1205 1.1 jklos req->mask = htobe64(V_TCB_L2T_IX(M_TCB_L2T_IX));
1206 1.1 jklos req->val = htobe64(V_TCB_L2T_IX(e->idx));
1207 1.1 jklos tdev->send(tdev, m);
1208 1.1 jklos }
1209 1.1 jklos
1210 1.1 jklos void
1211 1.1 jklos cxgb_redirect(struct rtentry *old, struct rtentry *new)
1212 1.1 jklos {
1213 1.1 jklos struct ifnet *olddev, *newdev;
1214 1.1 jklos struct tid_info *ti;
1215 1.1 jklos struct toedev *tdev;
1216 1.1 jklos u32 tid;
1217 1.1 jklos int update_tcb;
1218 1.1 jklos struct l2t_entry *e;
1219 1.1 jklos struct toe_tid_entry *te;
1220 1.1 jklos
1221 1.1 jklos olddev = old->rt_ifp;
1222 1.1 jklos newdev = new->rt_ifp;
1223 1.1 jklos if (!is_offloading(olddev))
1224 1.1 jklos return;
1225 1.1 jklos if (!is_offloading(newdev)) {
1226 1.1 jklos log(LOG_WARNING, "%s: Redirect to non-offload"
1227 1.1 jklos "device ignored.\n", __func__);
1228 1.1 jklos return;
1229 1.1 jklos }
1230 1.1 jklos tdev = TOEDEV(olddev);
1231 1.1 jklos BUG_ON(!tdev);
1232 1.1 jklos if (tdev != TOEDEV(newdev)) {
1233 1.1 jklos log(LOG_WARNING, "%s: Redirect to different "
1234 1.1 jklos "offload device ignored.\n", __func__);
1235 1.1 jklos return;
1236 1.1 jklos }
1237 1.1 jklos
1238 1.1 jklos /* Add new L2T entry */
1239 1.1 jklos e = t3_l2t_get(tdev, new, ((struct port_info *)new->rt_ifp->if_softc)->port_id);
1240 1.1 jklos if (!e) {
1241 1.1 jklos log(LOG_ERR, "%s: couldn't allocate new l2t entry!\n",
1242 1.1 jklos __func__);
1243 1.1 jklos return;
1244 1.1 jklos }
1245 1.1 jklos
1246 1.1 jklos /* Walk tid table and notify clients of dst change. */
1247 1.1 jklos ti = &(TOE_DATA(tdev))->tid_maps;
1248 1.1 jklos for (tid=0; tid < ti->ntids; tid++) {
1249 1.1 jklos te = lookup_tid(ti, tid);
1250 1.1 jklos BUG_ON(!te);
1251 1.1 jklos if (te->ctx && te->client && te->client->redirect) {
1252 1.1 jklos update_tcb = te->client->redirect(te->ctx, old, new,
1253 1.1 jklos e);
1254 1.1 jklos if (update_tcb) {
1255 1.1 jklos l2t_hold(L2DATA(tdev), e);
1256 1.1 jklos set_l2t_ix(tdev, tid, e);
1257 1.1 jklos }
1258 1.1 jklos }
1259 1.1 jklos }
1260 1.1 jklos l2t_release(L2DATA(tdev), e);
1261 1.1 jklos }
1262 1.1 jklos
1263 1.1 jklos /*
1264 1.1 jklos * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1265 1.1 jklos * The allocated memory is cleared.
1266 1.1 jklos */
1267 1.1 jklos void *
1268 1.1 jklos cxgb_alloc_mem(unsigned long size)
1269 1.1 jklos {
1270 1.1 jklos return malloc(size, M_DEVBUF, M_ZERO);
1271 1.1 jklos }
1272 1.1 jklos
1273 1.1 jklos /*
1274 1.1 jklos * Free memory allocated through t3_alloc_mem().
1275 1.1 jklos */
1276 1.1 jklos void
1277 1.1 jklos cxgb_free_mem(void *addr)
1278 1.1 jklos {
1279 1.1 jklos free(addr, M_DEVBUF);
1280 1.1 jklos }
1281 1.1 jklos
1282 1.1 jklos
1283 1.1 jklos /*
1284 1.1 jklos * Allocate and initialize the TID tables. Returns 0 on success.
1285 1.1 jklos */
1286 1.1 jklos static int
1287 1.1 jklos init_tid_tabs(struct tid_info *t, unsigned int ntids,
1288 1.1 jklos unsigned int natids, unsigned int nstids,
1289 1.1 jklos unsigned int atid_base, unsigned int stid_base)
1290 1.1 jklos {
1291 1.1 jklos unsigned long size = ntids * sizeof(*t->tid_tab) +
1292 1.1 jklos natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
1293 1.1 jklos
1294 1.1 jklos t->tid_tab = cxgb_alloc_mem(size);
1295 1.1 jklos if (!t->tid_tab)
1296 1.1 jklos return (ENOMEM);
1297 1.1 jklos
1298 1.1 jklos t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
1299 1.1 jklos t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
1300 1.1 jklos t->ntids = ntids;
1301 1.1 jklos t->nstids = nstids;
1302 1.1 jklos t->stid_base = stid_base;
1303 1.1 jklos t->sfree = NULL;
1304 1.1 jklos t->natids = natids;
1305 1.1 jklos t->atid_base = atid_base;
1306 1.1 jklos t->afree = NULL;
1307 1.1 jklos t->stids_in_use = t->atids_in_use = 0;
1308 1.1 jklos atomic_set_int(&t->tids_in_use, 0);
1309 1.1 jklos mtx_init(&t->stid_lock, "stid", NULL, MTX_DEF);
1310 1.1 jklos mtx_init(&t->atid_lock, "atid", NULL, MTX_DEF);
1311 1.1 jklos
1312 1.1 jklos /*
1313 1.1 jklos * Setup the free lists for stid_tab and atid_tab.
1314 1.1 jklos */
1315 1.1 jklos if (nstids) {
1316 1.1 jklos while (--nstids)
1317 1.1 jklos t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
1318 1.1 jklos t->sfree = t->stid_tab;
1319 1.1 jklos }
1320 1.1 jklos if (natids) {
1321 1.1 jklos while (--natids)
1322 1.1 jklos t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1323 1.1 jklos t->afree = t->atid_tab;
1324 1.1 jklos }
1325 1.1 jklos return 0;
1326 1.1 jklos }
1327 1.1 jklos
1328 1.1 jklos static void
1329 1.1 jklos free_tid_maps(struct tid_info *t)
1330 1.1 jklos {
1331 1.1 jklos cxgb_free_mem(t->tid_tab);
1332 1.1 jklos }
1333 1.1 jklos
1334 1.1 jklos static inline void
1335 1.1 jklos add_adapter(adapter_t *adap)
1336 1.1 jklos {
1337 1.1 jklos rw_wlock(&adapter_list_lock);
1338 1.1 jklos TAILQ_INSERT_TAIL(&adapter_list, adap, adapter_entry);
1339 1.1 jklos rw_wunlock(&adapter_list_lock);
1340 1.1 jklos }
1341 1.1 jklos
1342 1.1 jklos static inline void
1343 1.1 jklos remove_adapter(adapter_t *adap)
1344 1.1 jklos {
1345 1.1 jklos rw_wlock(&adapter_list_lock);
1346 1.1 jklos TAILQ_REMOVE(&adapter_list, adap, adapter_entry);
1347 1.1 jklos rw_wunlock(&adapter_list_lock);
1348 1.1 jklos }
1349 1.1 jklos
1350 1.1 jklos /*
1351 1.1 jklos * XXX
1352 1.1 jklos */
1353 1.1 jklos #define t3_free_l2t(...)
1354 1.1 jklos
1355 1.1 jklos int
1356 1.1 jklos cxgb_offload_activate(struct adapter *adapter)
1357 1.1 jklos {
1358 1.1 jklos struct toedev *dev = &adapter->tdev;
1359 1.1 jklos int natids, err;
1360 1.1 jklos struct toe_data *t;
1361 1.1 jklos struct tid_range stid_range, tid_range;
1362 1.1 jklos struct mtutab mtutab;
1363 1.1 jklos unsigned int l2t_capacity;
1364 1.1 jklos
1365 1.1 jklos t = malloc(sizeof(*t), M_DEVBUF, M_WAITOK);
1366 1.1 jklos if (!t)
1367 1.1 jklos return (ENOMEM);
1368 1.1 jklos
1369 1.1 jklos err = (EOPNOTSUPP);
1370 1.1 jklos if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
1371 1.1 jklos dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
1372 1.1 jklos dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
1373 1.1 jklos dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
1374 1.1 jklos dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
1375 1.1 jklos dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
1376 1.1 jklos goto out_free;
1377 1.1 jklos
1378 1.1 jklos err = (ENOMEM);
1379 1.1 jklos L2DATA(dev) = t3_init_l2t(l2t_capacity);
1380 1.1 jklos if (!L2DATA(dev))
1381 1.1 jklos goto out_free;
1382 1.1 jklos
1383 1.1 jklos natids = min(tid_range.num / 2, MAX_ATIDS);
1384 1.1 jklos err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
1385 1.1 jklos stid_range.num, ATID_BASE, stid_range.base);
1386 1.1 jklos if (err)
1387 1.1 jklos goto out_free_l2t;
1388 1.1 jklos
1389 1.1 jklos t->mtus = mtutab.mtus;
1390 1.1 jklos t->nmtus = mtutab.size;
1391 1.1 jklos
1392 1.1 jklos t->tid_release_task.name = "t3_process_tid_release_list";
1393 1.1 jklos t->tid_release_task.func = t3_process_tid_release_list;
1394 1.1 jklos t->tid_release_task.context = adapter;
1395 1.1 jklos kthread_create(PRI_NONE, 0, NULL, cxgb_make_task, &t->tid_release_task, NULL, "cxgb_make_task");
1396 1.1 jklos mtx_init(&t->tid_release_lock, "tid release", NULL, MTX_DEF);
1397 1.1 jklos t->dev = dev;
1398 1.1 jklos
1399 1.1 jklos TOE_DATA(dev) = t;
1400 1.1 jklos dev->recv = process_rx;
1401 1.1 jklos dev->neigh_update = t3_l2t_update;
1402 1.1 jklos #if 0
1403 1.1 jklos offload_proc_dev_setup(dev);
1404 1.1 jklos #endif
1405 1.1 jklos /* Register netevent handler once */
1406 1.1 jklos if (TAILQ_EMPTY(&adapter_list)) {
1407 1.1 jklos #if defined(CONFIG_CHELSIO_T3_MODULE)
1408 1.1 jklos if (prepare_arp_with_t3core())
1409 1.1 jklos log(LOG_ERR, "Unable to set offload capabilities\n");
1410 1.1 jklos #endif
1411 1.1 jklos }
1412 1.1 jklos add_adapter(adapter);
1413 1.1 jklos return 0;
1414 1.1 jklos
1415 1.1 jklos out_free_l2t:
1416 1.1 jklos t3_free_l2t(L2DATA(dev));
1417 1.1 jklos L2DATA(dev) = NULL;
1418 1.1 jklos out_free:
1419 1.1 jklos free(t, M_DEVBUF);
1420 1.1 jklos return err;
1421 1.1 jklos
1422 1.1 jklos }
1423 1.1 jklos
1424 1.1 jklos void
1425 1.1 jklos cxgb_offload_deactivate(struct adapter *adapter)
1426 1.1 jklos {
1427 1.1 jklos struct toedev *tdev = &adapter->tdev;
1428 1.1 jklos struct toe_data *t = TOE_DATA(tdev);
1429 1.1 jklos
1430 1.1 jklos remove_adapter(adapter);
1431 1.1 jklos if (TAILQ_EMPTY(&adapter_list)) {
1432 1.1 jklos #if defined(CONFIG_CHELSIO_T3_MODULE)
1433 1.1 jklos restore_arp_sans_t3core();
1434 1.1 jklos #endif
1435 1.1 jklos }
1436 1.1 jklos free_tid_maps(&t->tid_maps);
1437 1.1 jklos TOE_DATA(tdev) = NULL;
1438 1.1 jklos t3_free_l2t(L2DATA(tdev));
1439 1.1 jklos L2DATA(tdev) = NULL;
1440 1.1 jklos free(t, M_DEVBUF);
1441 1.1 jklos }
1442 1.1 jklos
1443 1.1 jklos
1444 1.1 jklos static inline void
1445 1.1 jklos register_tdev(struct toedev *tdev)
1446 1.1 jklos {
1447 1.1 jklos static int unit;
1448 1.1 jklos
1449 1.1 jklos mtx_lock(&cxgb_db_lock);
1450 1.1 jklos snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
1451 1.1 jklos TAILQ_INSERT_TAIL(&ofld_dev_list, tdev, ofld_entry);
1452 1.1 jklos mtx_unlock(&cxgb_db_lock);
1453 1.1 jklos }
1454 1.1 jklos
1455 1.1 jklos static inline void
1456 1.1 jklos unregister_tdev(struct toedev *tdev)
1457 1.1 jklos {
1458 1.1 jklos mtx_lock(&cxgb_db_lock);
1459 1.1 jklos TAILQ_REMOVE(&ofld_dev_list, tdev, ofld_entry);
1460 1.1 jklos mtx_unlock(&cxgb_db_lock);
1461 1.1 jklos }
1462 1.1 jklos
1463 1.1 jklos void
1464 1.1 jklos cxgb_adapter_ofld(struct adapter *adapter)
1465 1.1 jklos {
1466 1.1 jklos struct toedev *tdev = &adapter->tdev;
1467 1.1 jklos
1468 1.1 jklos cxgb_set_dummy_ops(tdev);
1469 1.1 jklos tdev->send = t3_offload_tx;
1470 1.1 jklos tdev->ctl = cxgb_offload_ctl;
1471 1.1 jklos tdev->type = adapter->params.rev == 0 ?
1472 1.1 jklos T3A : T3B;
1473 1.1 jklos
1474 1.1 jklos register_tdev(tdev);
1475 1.1 jklos #if 0
1476 1.1 jklos offload_proc_dev_init(tdev);
1477 1.1 jklos #endif
1478 1.1 jklos }
1479 1.1 jklos
1480 1.1 jklos void
1481 1.1 jklos cxgb_adapter_unofld(struct adapter *adapter)
1482 1.1 jklos {
1483 1.1 jklos struct toedev *tdev = &adapter->tdev;
1484 1.1 jklos #if 0
1485 1.1 jklos offload_proc_dev_cleanup(tdev);
1486 1.1 jklos offload_proc_dev_exit(tdev);
1487 1.1 jklos #endif
1488 1.1 jklos tdev->recv = NULL;
1489 1.1 jklos tdev->neigh_update = NULL;
1490 1.1 jklos
1491 1.1 jklos unregister_tdev(tdev);
1492 1.1 jklos }
1493 1.1 jklos
1494 1.1 jklos void
1495 1.1 jklos cxgb_offload_init(void)
1496 1.1 jklos {
1497 1.1 jklos int i;
1498 1.1 jklos
1499 1.1 jklos if (inited)
1500 1.1 jklos return;
1501 1.1 jklos else
1502 1.1 jklos inited = 1;
1503 1.1 jklos
1504 1.1 jklos mtx_init(&cxgb_db_lock, "ofld db", NULL, MTX_DEF);
1505 1.1 jklos rw_init(&adapter_list_lock);
1506 1.1 jklos TAILQ_INIT(&client_list);
1507 1.1 jklos TAILQ_INIT(&ofld_dev_list);
1508 1.1 jklos TAILQ_INIT(&adapter_list);
1509 1.1 jklos
1510 1.1 jklos for (i = 0; i < NUM_CPL_CMDS; ++i)
1511 1.1 jklos cpl_handlers[i] = do_bad_cpl;
1512 1.1 jklos
1513 1.1 jklos t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
1514 1.1 jklos t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
1515 1.1 jklos t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
1516 1.1 jklos t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
1517 1.1 jklos t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
1518 1.1 jklos t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl);
1519 1.1 jklos t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl);
1520 1.1 jklos t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
1521 1.1 jklos t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl);
1522 1.1 jklos t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
1523 1.1 jklos t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl);
1524 1.1 jklos t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
1525 1.1 jklos t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
1526 1.1 jklos t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
1527 1.1 jklos t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
1528 1.1 jklos t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
1529 1.1 jklos t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
1530 1.1 jklos t3_register_cpl_handler(CPL_SET_TCB_RPL, do_set_tcb_rpl);
1531 1.1 jklos t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
1532 1.1 jklos t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
1533 1.1 jklos t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
1534 1.1 jklos t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
1535 1.1 jklos t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
1536 1.1 jklos t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
1537 1.1 jklos #if 0
1538 1.1 jklos if (offload_proc_init())
1539 1.1 jklos log(LOG_WARNING, "Unable to create /proc/net/cxgb3 dir\n");
1540 1.1 jklos #endif
1541 1.1 jklos }
1542 1.1 jklos
1543 1.1 jklos void
1544 1.1 jklos cxgb_offload_exit(void)
1545 1.1 jklos {
1546 1.1 jklos static int deinited = 0;
1547 1.1 jklos
1548 1.1 jklos if (deinited)
1549 1.1 jklos return;
1550 1.1 jklos
1551 1.1 jklos deinited = 1;
1552 1.1 jklos mtx_destroy(&cxgb_db_lock);
1553 1.1 jklos rw_destroy(&adapter_list_lock);
1554 1.1 jklos #if 0
1555 1.1 jklos offload_proc_cleanup();
1556 1.1 jklos #endif
1557 1.1 jklos }
1558 1.1 jklos
1559 1.1 jklos #if 0
1560 1.1 jklos static int
1561 1.1 jklos offload_info_read_proc(char *buf, char **start, off_t offset,
1562 1.1 jklos int length, int *eof, void *data)
1563 1.1 jklos {
1564 1.1 jklos struct toe_data *d = data;
1565 1.1 jklos struct tid_info *t = &d->tid_maps;
1566 1.1 jklos int len;
1567 1.1 jklos
1568 1.1 jklos len = sprintf(buf, "TID range: 0..%d, in use: %u\n"
1569 1.1 jklos "STID range: %d..%d, in use: %u\n"
1570 1.1 jklos "ATID range: %d..%d, in use: %u\n"
1571 1.1 jklos "MSS: %u\n",
1572 1.1 jklos t->ntids - 1, atomic_read(&t->tids_in_use), t->stid_base,
1573 1.1 jklos t->stid_base + t->nstids - 1, t->stids_in_use,
1574 1.1 jklos t->atid_base, t->atid_base + t->natids - 1,
1575 1.1 jklos t->atids_in_use, d->tx_max_chunk);
1576 1.1 jklos if (len > length)
1577 1.1 jklos len = length;
1578 1.1 jklos *eof = 1;
1579 1.1 jklos return len;
1580 1.1 jklos }
1581 1.1 jklos
1582 1.1 jklos static int
1583 1.1 jklos offload_info_proc_setup(struct proc_dir_entry *dir,
1584 1.1 jklos struct toe_data *d)
1585 1.1 jklos {
1586 1.1 jklos struct proc_dir_entry *p;
1587 1.1 jklos
1588 1.1 jklos if (!dir)
1589 1.1 jklos return (EINVAL);
1590 1.1 jklos
1591 1.1 jklos p = create_proc_read_entry("info", 0, dir, offload_info_read_proc, d);
1592 1.1 jklos if (!p)
1593 1.1 jklos return (ENOMEM);
1594 1.1 jklos
1595 1.1 jklos p->owner = THIS_MODULE;
1596 1.1 jklos return 0;
1597 1.1 jklos }
1598 1.1 jklos
1599 1.1 jklos
1600 1.1 jklos static int
1601 1.1 jklos offload_devices_read_proc(char *buf, char **start, off_t offset,
1602 1.1 jklos int length, int *eof, void *data)
1603 1.1 jklos {
1604 1.1 jklos int len;
1605 1.1 jklos struct toedev *dev;
1606 1.1 jklos struct net_device *ndev;
1607 1.1 jklos
1608 1.1 jklos len = sprintf(buf, "Device Interfaces\n");
1609 1.1 jklos
1610 1.1 jklos mtx_lock(&cxgb_db_lock);
1611 1.1 jklos TAILQ_FOREACH(dev, &ofld_dev_list, ofld_entry) {
1612 1.1 jklos len += sprintf(buf + len, "%-16s", dev->name);
1613 1.1 jklos read_lock(&dev_base_lock);
1614 1.1 jklos for (ndev = dev_base; ndev; ndev = ndev->next) {
1615 1.1 jklos if (TOEDEV(ndev) == dev)
1616 1.1 jklos len += sprintf(buf + len, " %s", ndev->name);
1617 1.1 jklos }
1618 1.1 jklos read_unlock(&dev_base_lock);
1619 1.1 jklos len += sprintf(buf + len, "\n");
1620 1.1 jklos if (len >= length)
1621 1.1 jklos break;
1622 1.1 jklos }
1623 1.1 jklos mtx_unlock(&cxgb_db_lock);
1624 1.1 jklos
1625 1.1 jklos if (len > length)
1626 1.1 jklos len = length;
1627 1.1 jklos *eof = 1;
1628 1.1 jklos return len;
1629 1.1 jklos }
1630 1.1 jklos
1631 1.1 jklos #endif
1632 1.1 jklos
1633