cxgb_main.c revision 1.1.4.3 1 1.1.4.2 rmind /**************************************************************************
2 1.1.4.2 rmind
3 1.1.4.2 rmind Copyright (c) 2007, Chelsio Inc.
4 1.1.4.2 rmind All rights reserved.
5 1.1.4.2 rmind
6 1.1.4.2 rmind Redistribution and use in source and binary forms, with or without
7 1.1.4.2 rmind modification, are permitted provided that the following conditions are met:
8 1.1.4.2 rmind
9 1.1.4.2 rmind 1. Redistributions of source code must retain the above copyright notice,
10 1.1.4.2 rmind this list of conditions and the following disclaimer.
11 1.1.4.2 rmind
12 1.1.4.2 rmind 2. Neither the name of the Chelsio Corporation nor the names of its
13 1.1.4.2 rmind contributors may be used to endorse or promote products derived from
14 1.1.4.2 rmind this software without specific prior written permission.
15 1.1.4.2 rmind
16 1.1.4.2 rmind THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 1.1.4.2 rmind AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 1.1.4.2 rmind IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 1.1.4.2 rmind ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 1.1.4.2 rmind LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 1.1.4.2 rmind CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 1.1.4.2 rmind SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 1.1.4.2 rmind INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 1.1.4.2 rmind CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 1.1.4.2 rmind ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 1.1.4.2 rmind POSSIBILITY OF SUCH DAMAGE.
27 1.1.4.2 rmind
28 1.1.4.2 rmind ***************************************************************************/
29 1.1.4.2 rmind
30 1.1.4.2 rmind #include <sys/cdefs.h>
31 1.1.4.2 rmind __KERNEL_RCSID(0, "$NetBSD: cxgb_main.c,v 1.1.4.3 2011/05/31 03:04:52 rmind Exp $");
32 1.1.4.2 rmind
33 1.1.4.2 rmind #include <sys/param.h>
34 1.1.4.2 rmind #include <sys/systm.h>
35 1.1.4.2 rmind #include <sys/kernel.h>
36 1.1.4.2 rmind #include <sys/conf.h>
37 1.1.4.3 rmind #include <sys/bus.h>
38 1.1.4.2 rmind #include <sys/ioccom.h>
39 1.1.4.2 rmind #include <sys/mbuf.h>
40 1.1.4.2 rmind #include <sys/socket.h>
41 1.1.4.2 rmind #include <sys/sockio.h>
42 1.1.4.2 rmind #include <sys/sysctl.h>
43 1.1.4.2 rmind #include <sys/queue.h>
44 1.1.4.2 rmind
45 1.1.4.2 rmind #include <net/bpf.h>
46 1.1.4.2 rmind #include <net/if.h>
47 1.1.4.2 rmind #include <net/if_arp.h>
48 1.1.4.2 rmind #include <net/if_dl.h>
49 1.1.4.2 rmind #include <net/if_media.h>
50 1.1.4.2 rmind #include <net/if_types.h>
51 1.1.4.2 rmind
52 1.1.4.2 rmind #include <netinet/in_systm.h>
53 1.1.4.2 rmind #include <netinet/in.h>
54 1.1.4.2 rmind #include <netinet/ip.h>
55 1.1.4.2 rmind #include <netinet/ip.h>
56 1.1.4.2 rmind #include <netinet/tcp.h>
57 1.1.4.2 rmind #include <netinet/udp.h>
58 1.1.4.2 rmind #include <netinet/if_inarp.h>
59 1.1.4.2 rmind
60 1.1.4.2 rmind #include <dev/pci/pcireg.h>
61 1.1.4.2 rmind #include <dev/pci/pcivar.h>
62 1.1.4.2 rmind
63 1.1.4.2 rmind #ifdef CONFIG_DEFINED
64 1.1.4.2 rmind #include <cxgb_include.h>
65 1.1.4.2 rmind #else
66 1.1.4.2 rmind #include <dev/pci/cxgb/cxgb_include.h>
67 1.1.4.2 rmind #endif
68 1.1.4.2 rmind
69 1.1.4.2 rmind #ifdef PRIV_SUPPORTED
70 1.1.4.2 rmind #include <sys/priv.h>
71 1.1.4.2 rmind #endif
72 1.1.4.2 rmind
73 1.1.4.2 rmind #include <altq/altq_conf.h>
74 1.1.4.2 rmind
75 1.1.4.2 rmind static int cxgb_setup_msix(adapter_t *, int);
76 1.1.4.2 rmind static void cxgb_teardown_msix(adapter_t *);
77 1.1.4.2 rmind static int cxgb_init(struct ifnet *);
78 1.1.4.2 rmind static void cxgb_init_locked(struct port_info *);
79 1.1.4.2 rmind static void cxgb_stop_locked(struct port_info *);
80 1.1.4.2 rmind static void cxgb_set_rxmode(struct port_info *);
81 1.1.4.2 rmind static int cxgb_ioctl(struct ifnet *, unsigned long, void *);
82 1.1.4.2 rmind static void cxgb_start(struct ifnet *);
83 1.1.4.2 rmind static void cxgb_stop(struct ifnet *, int);
84 1.1.4.2 rmind static void cxgb_start_proc(struct work *, void *);
85 1.1.4.2 rmind static int cxgb_media_change(struct ifnet *);
86 1.1.4.2 rmind static void cxgb_media_status(struct ifnet *, struct ifmediareq *);
87 1.1.4.2 rmind static int setup_sge_qsets(adapter_t *);
88 1.1.4.2 rmind static int cxgb_async_intr(void *);
89 1.1.4.2 rmind static void cxgb_ext_intr_handler(struct work *, void *);
90 1.1.4.2 rmind static void cxgb_tick_handler(struct work *, void *);
91 1.1.4.2 rmind static void cxgb_down_locked(struct adapter *sc);
92 1.1.4.2 rmind static void cxgb_tick(void *);
93 1.1.4.2 rmind static void setup_rss(adapter_t *sc);
94 1.1.4.2 rmind
95 1.1.4.2 rmind /* Attachment glue for the PCI controller end of the device. Each port of
96 1.1.4.2 rmind * the device is attached separately, as defined later.
97 1.1.4.2 rmind */
98 1.1.4.2 rmind static int cxgb_controller_match(device_t dev, cfdata_t match, void *context);
99 1.1.4.2 rmind static void cxgb_controller_attach(device_t parent, device_t dev, void *context);
100 1.1.4.2 rmind static int cxgb_controller_detach(device_t dev, int flags);
101 1.1.4.2 rmind static void cxgb_free(struct adapter *);
102 1.1.4.2 rmind static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
103 1.1.4.2 rmind unsigned int end);
104 1.1.4.2 rmind static void touch_bars(device_t dev);
105 1.1.4.2 rmind
106 1.1.4.2 rmind #ifdef notyet
107 1.1.4.2 rmind static int offload_close(struct toedev *tdev);
108 1.1.4.2 rmind #endif
109 1.1.4.2 rmind
110 1.1.4.2 rmind
111 1.1.4.2 rmind CFATTACH_DECL(cxgbc, sizeof(struct adapter), cxgb_controller_match, cxgb_controller_attach, cxgb_controller_detach, NULL);
112 1.1.4.2 rmind
113 1.1.4.2 rmind /*
114 1.1.4.2 rmind * Attachment glue for the ports. Attachment is done directly to the
115 1.1.4.2 rmind * controller device.
116 1.1.4.2 rmind */
117 1.1.4.2 rmind static int cxgb_port_match(device_t dev, cfdata_t match, void *context);
118 1.1.4.2 rmind static void cxgb_port_attach(device_t dev, device_t self, void *context);
119 1.1.4.2 rmind static int cxgb_port_detach(device_t dev, int flags);
120 1.1.4.2 rmind
121 1.1.4.2 rmind CFATTACH_DECL(cxgb, sizeof(struct port_device), cxgb_port_match, cxgb_port_attach, cxgb_port_detach, NULL);
122 1.1.4.2 rmind
123 1.1.4.2 rmind #define SGE_MSIX_COUNT (SGE_QSETS + 1)
124 1.1.4.2 rmind
125 1.1.4.2 rmind extern int collapse_mbufs;
126 1.1.4.2 rmind #ifdef MSI_SUPPORTED
127 1.1.4.2 rmind /*
128 1.1.4.2 rmind * The driver uses the best interrupt scheme available on a platform in the
129 1.1.4.2 rmind * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
130 1.1.4.2 rmind * of these schemes the driver may consider as follows:
131 1.1.4.2 rmind *
132 1.1.4.2 rmind * msi = 2: choose from among all three options
133 1.1.4.2 rmind * msi = 1 : only consider MSI and pin interrupts
134 1.1.4.2 rmind * msi = 0: force pin interrupts
135 1.1.4.2 rmind */
136 1.1.4.2 rmind static int msi_allowed = 2;
137 1.1.4.2 rmind #endif
138 1.1.4.2 rmind
139 1.1.4.2 rmind /*
140 1.1.4.2 rmind * The driver uses an auto-queue algorithm by default.
141 1.1.4.2 rmind * To disable it and force a single queue-set per port, use singleq = 1.
142 1.1.4.2 rmind */
143 1.1.4.2 rmind static int singleq = 1;
144 1.1.4.2 rmind
145 1.1.4.2 rmind enum {
146 1.1.4.2 rmind MAX_TXQ_ENTRIES = 16384,
147 1.1.4.2 rmind MAX_CTRL_TXQ_ENTRIES = 1024,
148 1.1.4.2 rmind MAX_RSPQ_ENTRIES = 16384,
149 1.1.4.2 rmind MAX_RX_BUFFERS = 16384,
150 1.1.4.2 rmind MAX_RX_JUMBO_BUFFERS = 16384,
151 1.1.4.2 rmind MIN_TXQ_ENTRIES = 4,
152 1.1.4.2 rmind MIN_CTRL_TXQ_ENTRIES = 4,
153 1.1.4.2 rmind MIN_RSPQ_ENTRIES = 32,
154 1.1.4.2 rmind MIN_FL_ENTRIES = 32,
155 1.1.4.2 rmind MIN_FL_JUMBO_ENTRIES = 32
156 1.1.4.2 rmind };
157 1.1.4.2 rmind
158 1.1.4.2 rmind struct filter_info {
159 1.1.4.2 rmind u32 sip;
160 1.1.4.2 rmind u32 sip_mask;
161 1.1.4.2 rmind u32 dip;
162 1.1.4.2 rmind u16 sport;
163 1.1.4.2 rmind u16 dport;
164 1.1.4.2 rmind u32 vlan:12;
165 1.1.4.2 rmind u32 vlan_prio:3;
166 1.1.4.2 rmind u32 mac_hit:1;
167 1.1.4.2 rmind u32 mac_idx:4;
168 1.1.4.2 rmind u32 mac_vld:1;
169 1.1.4.2 rmind u32 pkt_type:2;
170 1.1.4.2 rmind u32 report_filter_id:1;
171 1.1.4.2 rmind u32 pass:1;
172 1.1.4.2 rmind u32 rss:1;
173 1.1.4.2 rmind u32 qset:3;
174 1.1.4.2 rmind u32 locked:1;
175 1.1.4.2 rmind u32 valid:1;
176 1.1.4.2 rmind };
177 1.1.4.2 rmind
178 1.1.4.2 rmind enum { FILTER_NO_VLAN_PRI = 7 };
179 1.1.4.2 rmind
180 1.1.4.2 rmind #define PORT_MASK ((1 << MAX_NPORTS) - 1)
181 1.1.4.2 rmind
182 1.1.4.2 rmind /* Table for probing the cards. The desc field isn't actually used */
183 1.1.4.2 rmind struct cxgb_ident {
184 1.1.4.2 rmind uint16_t vendor;
185 1.1.4.2 rmind uint16_t device;
186 1.1.4.2 rmind int index;
187 1.1.4.2 rmind const char *desc;
188 1.1.4.2 rmind } cxgb_identifiers[] = {
189 1.1.4.2 rmind {PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
190 1.1.4.2 rmind {PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
191 1.1.4.2 rmind {PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
192 1.1.4.2 rmind {PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
193 1.1.4.2 rmind {PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
194 1.1.4.2 rmind {PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
195 1.1.4.2 rmind {PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
196 1.1.4.2 rmind {PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
197 1.1.4.2 rmind {PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
198 1.1.4.2 rmind {PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
199 1.1.4.2 rmind {PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"},
200 1.1.4.2 rmind {0, 0, 0, NULL}
201 1.1.4.2 rmind };
202 1.1.4.2 rmind
203 1.1.4.2 rmind
204 1.1.4.2 rmind static inline char
205 1.1.4.2 rmind t3rev2char(struct adapter *adapter)
206 1.1.4.2 rmind {
207 1.1.4.2 rmind char rev = 'z';
208 1.1.4.2 rmind
209 1.1.4.2 rmind switch(adapter->params.rev) {
210 1.1.4.2 rmind case T3_REV_A:
211 1.1.4.2 rmind rev = 'a';
212 1.1.4.2 rmind break;
213 1.1.4.2 rmind case T3_REV_B:
214 1.1.4.2 rmind case T3_REV_B2:
215 1.1.4.2 rmind rev = 'b';
216 1.1.4.2 rmind break;
217 1.1.4.2 rmind case T3_REV_C:
218 1.1.4.2 rmind rev = 'c';
219 1.1.4.2 rmind break;
220 1.1.4.2 rmind }
221 1.1.4.2 rmind return rev;
222 1.1.4.2 rmind }
223 1.1.4.2 rmind
224 1.1.4.2 rmind static struct cxgb_ident *cxgb_get_ident(struct pci_attach_args *pa)
225 1.1.4.2 rmind {
226 1.1.4.2 rmind struct cxgb_ident *id;
227 1.1.4.2 rmind int vendorid, deviceid;
228 1.1.4.2 rmind
229 1.1.4.2 rmind vendorid = PCI_VENDOR(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG));
230 1.1.4.2 rmind deviceid = PCI_PRODUCT(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG));
231 1.1.4.2 rmind
232 1.1.4.2 rmind for (id = cxgb_identifiers; id->desc != NULL; id++) {
233 1.1.4.2 rmind if ((id->vendor == vendorid) &&
234 1.1.4.2 rmind (id->device == deviceid)) {
235 1.1.4.2 rmind return (id);
236 1.1.4.2 rmind }
237 1.1.4.2 rmind }
238 1.1.4.2 rmind return (NULL);
239 1.1.4.2 rmind }
240 1.1.4.2 rmind
241 1.1.4.2 rmind static const struct adapter_info *cxgb_get_adapter_info(struct pci_attach_args *pa)
242 1.1.4.2 rmind {
243 1.1.4.2 rmind struct cxgb_ident *id;
244 1.1.4.2 rmind const struct adapter_info *ai;
245 1.1.4.2 rmind
246 1.1.4.2 rmind id = cxgb_get_ident(pa);
247 1.1.4.2 rmind if (id == NULL)
248 1.1.4.2 rmind return (NULL);
249 1.1.4.2 rmind
250 1.1.4.2 rmind ai = t3_get_adapter_info(id->index);
251 1.1.4.2 rmind return (ai);
252 1.1.4.2 rmind }
253 1.1.4.2 rmind
254 1.1.4.2 rmind static int cxgb_controller_match(device_t dev, cfdata_t match, void *context)
255 1.1.4.2 rmind {
256 1.1.4.2 rmind struct pci_attach_args *pa = context;
257 1.1.4.2 rmind const struct adapter_info *ai;
258 1.1.4.2 rmind
259 1.1.4.2 rmind ai = cxgb_get_adapter_info(pa);
260 1.1.4.2 rmind if (ai == NULL)
261 1.1.4.2 rmind return (0);
262 1.1.4.2 rmind
263 1.1.4.2 rmind return (100); // we ARE the best driver for this card!!
264 1.1.4.2 rmind }
265 1.1.4.2 rmind
266 1.1.4.2 rmind #define FW_FNAME "t3fw%d%d%d"
267 1.1.4.2 rmind #define TPEEPROM_NAME "t3%ctpe%d%d%d"
268 1.1.4.2 rmind #define TPSRAM_NAME "t3%cps%d%d%d"
269 1.1.4.2 rmind
270 1.1.4.2 rmind int cxgb_cfprint(void *aux, const char *info);
271 1.1.4.2 rmind int cxgb_cfprint(void *aux, const char *info)
272 1.1.4.2 rmind {
273 1.1.4.2 rmind if (info)
274 1.1.4.2 rmind {
275 1.1.4.2 rmind printf("cxgb_cfprint(%p, \"%s\")\n", aux, info);
276 1.1.4.2 rmind INT3;
277 1.1.4.2 rmind }
278 1.1.4.2 rmind
279 1.1.4.2 rmind return (QUIET);
280 1.1.4.2 rmind }
281 1.1.4.2 rmind
282 1.1.4.2 rmind void cxgb_make_task(void *context)
283 1.1.4.2 rmind {
284 1.1.4.2 rmind struct cxgb_task *w = (struct cxgb_task *)context;
285 1.1.4.2 rmind
286 1.1.4.2 rmind // we can only use workqueue_create() once the system is up and running
287 1.1.4.2 rmind workqueue_create(&w->wq, w->name, w->func, w->context, PRIBIO, IPL_NET, 0);
288 1.1.4.2 rmind // printf("======>> create workqueue for %s %p\n", w->name, w->wq);
289 1.1.4.2 rmind }
290 1.1.4.2 rmind
291 1.1.4.2 rmind static void
292 1.1.4.2 rmind cxgb_controller_attach(device_t parent, device_t dev, void *context)
293 1.1.4.2 rmind {
294 1.1.4.2 rmind device_t child;
295 1.1.4.2 rmind const struct adapter_info *ai;
296 1.1.4.2 rmind struct adapter *sc;
297 1.1.4.2 rmind struct pci_attach_args *pa = context;
298 1.1.4.2 rmind struct cxgb_attach_args cxgb_args;
299 1.1.4.2 rmind int locs[2];
300 1.1.4.2 rmind int i, error = 0;
301 1.1.4.2 rmind uint32_t vers;
302 1.1.4.2 rmind int port_qsets = 1;
303 1.1.4.2 rmind int reg;
304 1.1.4.2 rmind #ifdef MSI_SUPPORTED
305 1.1.4.2 rmind int msi_needed;
306 1.1.4.2 rmind #endif
307 1.1.4.2 rmind
308 1.1.4.2 rmind sc = device_private(dev);
309 1.1.4.2 rmind sc->dev = dev;
310 1.1.4.2 rmind memcpy(&sc->pa, pa, sizeof(struct pci_attach_args));
311 1.1.4.2 rmind sc->msi_count = 0;
312 1.1.4.2 rmind ai = cxgb_get_adapter_info(pa);
313 1.1.4.2 rmind
314 1.1.4.2 rmind /*
315 1.1.4.2 rmind * XXX not really related but a recent addition
316 1.1.4.2 rmind */
317 1.1.4.2 rmind #ifdef MSI_SUPPORTED
318 1.1.4.2 rmind /* find the PCIe link width and set max read request to 4KB*/
319 1.1.4.2 rmind if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) {
320 1.1.4.2 rmind uint16_t lnk, pectl;
321 1.1.4.2 rmind lnk = pci_read_config(dev, reg + 0x12, 2);
322 1.1.4.2 rmind sc->link_width = (lnk >> 4) & 0x3f;
323 1.1.4.2 rmind
324 1.1.4.2 rmind pectl = pci_read_config(dev, reg + 0x8, 2);
325 1.1.4.2 rmind pectl = (pectl & ~0x7000) | (5 << 12);
326 1.1.4.2 rmind pci_write_config(dev, reg + 0x8, pectl, 2);
327 1.1.4.2 rmind }
328 1.1.4.2 rmind
329 1.1.4.2 rmind if (sc->link_width != 0 && sc->link_width <= 4 &&
330 1.1.4.2 rmind (ai->nports0 + ai->nports1) <= 2) {
331 1.1.4.2 rmind device_printf(sc->dev,
332 1.1.4.2 rmind "PCIe x%d Link, expect reduced performance\n",
333 1.1.4.2 rmind sc->link_width);
334 1.1.4.2 rmind }
335 1.1.4.2 rmind #endif
336 1.1.4.2 rmind
337 1.1.4.2 rmind touch_bars(dev);
338 1.1.4.2 rmind
339 1.1.4.2 rmind pci_enable_busmaster(dev);
340 1.1.4.2 rmind
341 1.1.4.2 rmind /*
342 1.1.4.2 rmind * Allocate the registers and make them available to the driver.
343 1.1.4.2 rmind * The registers that we care about for NIC mode are in BAR 0
344 1.1.4.2 rmind */
345 1.1.4.2 rmind sc->regs_rid = PCI_MAPREG_START;
346 1.1.4.2 rmind t3_os_pci_read_config_4(sc, PCI_MAPREG_START, ®);
347 1.1.4.2 rmind
348 1.1.4.2 rmind // call bus_space_map
349 1.1.4.2 rmind sc->bar0 = reg&0xFFFFF000;
350 1.1.4.2 rmind bus_space_map(sc->pa.pa_memt, sc->bar0, 4096, 0, &sc->bar0_handle);
351 1.1.4.2 rmind
352 1.1.4.2 rmind MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_DEF);
353 1.1.4.2 rmind MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
354 1.1.4.2 rmind MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
355 1.1.4.2 rmind
356 1.1.4.2 rmind sc->bt = sc->pa.pa_memt;
357 1.1.4.2 rmind sc->bh = sc->bar0_handle;
358 1.1.4.2 rmind sc->mmio_len = 4096;
359 1.1.4.2 rmind
360 1.1.4.2 rmind if (t3_prep_adapter(sc, ai, 1) < 0) {
361 1.1.4.2 rmind printf("prep adapter failed\n");
362 1.1.4.2 rmind error = ENODEV;
363 1.1.4.2 rmind goto out;
364 1.1.4.2 rmind }
365 1.1.4.2 rmind /* Allocate the BAR for doing MSI-X. If it succeeds, try to allocate
366 1.1.4.2 rmind * enough messages for the queue sets. If that fails, try falling
367 1.1.4.2 rmind * back to MSI. If that fails, then try falling back to the legacy
368 1.1.4.2 rmind * interrupt pin model.
369 1.1.4.2 rmind */
370 1.1.4.2 rmind #ifdef MSI_SUPPORTED
371 1.1.4.2 rmind
372 1.1.4.2 rmind sc->msix_regs_rid = 0x20;
373 1.1.4.2 rmind if ((msi_allowed >= 2) &&
374 1.1.4.2 rmind (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
375 1.1.4.2 rmind &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
376 1.1.4.2 rmind
377 1.1.4.2 rmind msi_needed = sc->msi_count = SGE_MSIX_COUNT;
378 1.1.4.2 rmind
379 1.1.4.2 rmind if (((error = pci_alloc_msix(dev, &sc->msi_count)) != 0) ||
380 1.1.4.2 rmind (sc->msi_count != msi_needed)) {
381 1.1.4.2 rmind device_printf(dev, "msix allocation failed - msi_count = %d"
382 1.1.4.2 rmind " msi_needed=%d will try msi err=%d\n", sc->msi_count,
383 1.1.4.2 rmind msi_needed, error);
384 1.1.4.2 rmind sc->msi_count = 0;
385 1.1.4.2 rmind pci_release_msi(dev);
386 1.1.4.2 rmind bus_release_resource(dev, SYS_RES_MEMORY,
387 1.1.4.2 rmind sc->msix_regs_rid, sc->msix_regs_res);
388 1.1.4.2 rmind sc->msix_regs_res = NULL;
389 1.1.4.2 rmind } else {
390 1.1.4.2 rmind sc->flags |= USING_MSIX;
391 1.1.4.2 rmind sc->cxgb_intr = t3_intr_msix;
392 1.1.4.2 rmind }
393 1.1.4.2 rmind }
394 1.1.4.2 rmind
395 1.1.4.2 rmind if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
396 1.1.4.2 rmind sc->msi_count = 1;
397 1.1.4.2 rmind if (pci_alloc_msi(dev, &sc->msi_count)) {
398 1.1.4.2 rmind device_printf(dev, "alloc msi failed - will try INTx\n");
399 1.1.4.2 rmind sc->msi_count = 0;
400 1.1.4.2 rmind pci_release_msi(dev);
401 1.1.4.2 rmind } else {
402 1.1.4.2 rmind sc->flags |= USING_MSI;
403 1.1.4.2 rmind sc->irq_rid = 1;
404 1.1.4.2 rmind sc->cxgb_intr = t3_intr_msi;
405 1.1.4.2 rmind }
406 1.1.4.2 rmind }
407 1.1.4.2 rmind #endif
408 1.1.4.2 rmind if (sc->msi_count == 0) {
409 1.1.4.2 rmind device_printf(dev, "using line interrupts\n");
410 1.1.4.2 rmind sc->irq_rid = 0;
411 1.1.4.2 rmind sc->cxgb_intr = t3b_intr;
412 1.1.4.2 rmind }
413 1.1.4.2 rmind
414 1.1.4.2 rmind sc->ext_intr_task.name = "cxgb_ext_intr_handler";
415 1.1.4.2 rmind sc->ext_intr_task.func = cxgb_ext_intr_handler;
416 1.1.4.2 rmind sc->ext_intr_task.context = sc;
417 1.1.4.2 rmind kthread_create(PRI_NONE, 0, NULL, cxgb_make_task, &sc->ext_intr_task, NULL, "cxgb_make_task");
418 1.1.4.2 rmind
419 1.1.4.2 rmind sc->tick_task.name = "cxgb_tick_handler";
420 1.1.4.2 rmind sc->tick_task.func = cxgb_tick_handler;
421 1.1.4.2 rmind sc->tick_task.context = sc;
422 1.1.4.2 rmind kthread_create(PRI_NONE, 0, NULL, cxgb_make_task, &sc->tick_task, NULL, "cxgb_make_task");
423 1.1.4.2 rmind
424 1.1.4.2 rmind /* Create a periodic callout for checking adapter status */
425 1.1.4.2 rmind callout_init(&sc->cxgb_tick_ch, 0);
426 1.1.4.2 rmind
427 1.1.4.2 rmind if (t3_check_fw_version(sc) != 0) {
428 1.1.4.2 rmind /*
429 1.1.4.2 rmind * Warn user that a firmware update will be attempted in init.
430 1.1.4.2 rmind */
431 1.1.4.2 rmind device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
432 1.1.4.2 rmind FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
433 1.1.4.2 rmind sc->flags &= ~FW_UPTODATE;
434 1.1.4.2 rmind } else {
435 1.1.4.2 rmind sc->flags |= FW_UPTODATE;
436 1.1.4.2 rmind }
437 1.1.4.2 rmind
438 1.1.4.2 rmind if (t3_check_tpsram_version(sc) != 0) {
439 1.1.4.2 rmind /*
440 1.1.4.2 rmind * Warn user that a firmware update will be attempted in init.
441 1.1.4.2 rmind */
442 1.1.4.2 rmind device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n",
443 1.1.4.2 rmind t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
444 1.1.4.2 rmind sc->flags &= ~TPS_UPTODATE;
445 1.1.4.2 rmind } else {
446 1.1.4.2 rmind sc->flags |= TPS_UPTODATE;
447 1.1.4.2 rmind }
448 1.1.4.2 rmind
449 1.1.4.2 rmind if ((sc->flags & USING_MSIX) && !singleq)
450 1.1.4.2 rmind port_qsets = (SGE_QSETS/(sc)->params.nports);
451 1.1.4.2 rmind
452 1.1.4.2 rmind /*
453 1.1.4.2 rmind * Create a child device for each MAC. The ethernet attachment
454 1.1.4.2 rmind * will be done in these children.
455 1.1.4.2 rmind */
456 1.1.4.2 rmind for (i = 0; i < (sc)->params.nports; i++) {
457 1.1.4.2 rmind struct port_info *pi;
458 1.1.4.2 rmind
459 1.1.4.2 rmind pi = &sc->port[i];
460 1.1.4.2 rmind pi->adapter = sc;
461 1.1.4.2 rmind pi->nqsets = port_qsets;
462 1.1.4.2 rmind pi->first_qset = i*port_qsets;
463 1.1.4.2 rmind pi->port_id = i;
464 1.1.4.2 rmind pi->tx_chan = i >= ai->nports0;
465 1.1.4.2 rmind pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i;
466 1.1.4.2 rmind sc->rxpkt_map[pi->txpkt_intf] = i;
467 1.1.4.2 rmind cxgb_args.port = i;
468 1.1.4.2 rmind locs[0] = 1;
469 1.1.4.2 rmind locs[1] = i;
470 1.1.4.2 rmind printf("\n"); // for cleaner formatting in dmesg
471 1.1.4.2 rmind child = config_found_sm_loc(dev, "cxgbc", locs, &cxgb_args,
472 1.1.4.2 rmind cxgb_cfprint, config_stdsubmatch);
473 1.1.4.2 rmind printf("\n"); // for cleaner formatting in dmesg
474 1.1.4.2 rmind sc->portdev[i] = child;
475 1.1.4.2 rmind }
476 1.1.4.2 rmind
477 1.1.4.2 rmind /*
478 1.1.4.2 rmind * XXX need to poll for link status
479 1.1.4.2 rmind */
480 1.1.4.2 rmind sc->params.stats_update_period = 1;
481 1.1.4.2 rmind
482 1.1.4.2 rmind /* initialize sge private state */
483 1.1.4.2 rmind t3_sge_init_adapter(sc);
484 1.1.4.2 rmind
485 1.1.4.2 rmind t3_led_ready(sc);
486 1.1.4.2 rmind
487 1.1.4.2 rmind error = t3_get_fw_version(sc, &vers);
488 1.1.4.2 rmind if (error)
489 1.1.4.2 rmind goto out;
490 1.1.4.2 rmind
491 1.1.4.2 rmind snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
492 1.1.4.2 rmind G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
493 1.1.4.2 rmind G_FW_VERSION_MICRO(vers));
494 1.1.4.2 rmind out:
495 1.1.4.2 rmind if (error)
496 1.1.4.2 rmind {
497 1.1.4.2 rmind cxgb_free(sc);
498 1.1.4.2 rmind }
499 1.1.4.2 rmind }
500 1.1.4.2 rmind
501 1.1.4.2 rmind static int
502 1.1.4.2 rmind cxgb_controller_detach(device_t dev, int flags)
503 1.1.4.2 rmind {
504 1.1.4.2 rmind struct adapter *sc;
505 1.1.4.2 rmind
506 1.1.4.2 rmind sc = device_private(dev);
507 1.1.4.2 rmind
508 1.1.4.2 rmind cxgb_free(sc);
509 1.1.4.2 rmind
510 1.1.4.2 rmind return (0);
511 1.1.4.2 rmind }
512 1.1.4.2 rmind
513 1.1.4.2 rmind static void
514 1.1.4.2 rmind cxgb_free(struct adapter *sc)
515 1.1.4.2 rmind {
516 1.1.4.2 rmind int i;
517 1.1.4.2 rmind
518 1.1.4.2 rmind ADAPTER_LOCK(sc);
519 1.1.4.2 rmind /*
520 1.1.4.2 rmind * drops the lock
521 1.1.4.2 rmind */
522 1.1.4.2 rmind cxgb_down_locked(sc);
523 1.1.4.2 rmind
524 1.1.4.2 rmind #ifdef MSI_SUPPORTED
525 1.1.4.2 rmind if (sc->flags & (USING_MSI | USING_MSIX)) {
526 1.1.4.2 rmind device_printf(sc->dev, "releasing msi message(s)\n");
527 1.1.4.2 rmind pci_release_msi(sc->dev);
528 1.1.4.2 rmind } else {
529 1.1.4.2 rmind device_printf(sc->dev, "no msi message to release\n");
530 1.1.4.2 rmind }
531 1.1.4.2 rmind if (sc->msix_regs_res != NULL) {
532 1.1.4.2 rmind bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
533 1.1.4.2 rmind sc->msix_regs_res);
534 1.1.4.2 rmind }
535 1.1.4.2 rmind #endif
536 1.1.4.2 rmind
537 1.1.4.2 rmind t3_sge_deinit_sw(sc);
538 1.1.4.2 rmind /*
539 1.1.4.2 rmind * Wait for last callout
540 1.1.4.2 rmind */
541 1.1.4.2 rmind
542 1.1.4.2 rmind tsleep(&sc, 0, "cxgb unload", 3*hz);
543 1.1.4.2 rmind
544 1.1.4.2 rmind for (i = 0; i < (sc)->params.nports; ++i) {
545 1.1.4.2 rmind if (sc->portdev[i] != NULL)
546 1.1.4.2 rmind {
547 1.1.4.2 rmind INT3;
548 1.1.4.2 rmind }
549 1.1.4.2 rmind }
550 1.1.4.2 rmind
551 1.1.4.2 rmind #ifdef notyet
552 1.1.4.2 rmind if (is_offload(sc)) {
553 1.1.4.2 rmind cxgb_adapter_unofld(sc);
554 1.1.4.2 rmind if (isset(&sc->open_device_map, OFFLOAD_DEVMAP_BIT))
555 1.1.4.2 rmind offload_close(&sc->tdev);
556 1.1.4.2 rmind }
557 1.1.4.2 rmind #endif
558 1.1.4.2 rmind
559 1.1.4.2 rmind t3_free_sge_resources(sc);
560 1.1.4.2 rmind free(sc->filters, M_DEVBUF);
561 1.1.4.2 rmind t3_sge_free(sc);
562 1.1.4.2 rmind
563 1.1.4.2 rmind MTX_DESTROY(&sc->mdio_lock);
564 1.1.4.2 rmind MTX_DESTROY(&sc->sge.reg_lock);
565 1.1.4.2 rmind MTX_DESTROY(&sc->elmer_lock);
566 1.1.4.2 rmind ADAPTER_LOCK_DEINIT(sc);
567 1.1.4.2 rmind
568 1.1.4.2 rmind return;
569 1.1.4.2 rmind }
570 1.1.4.2 rmind
571 1.1.4.2 rmind /**
572 1.1.4.2 rmind * setup_sge_qsets - configure SGE Tx/Rx/response queues
573 1.1.4.2 rmind * @sc: the controller softc
574 1.1.4.2 rmind *
575 1.1.4.2 rmind * Determines how many sets of SGE queues to use and initializes them.
576 1.1.4.2 rmind * We support multiple queue sets per port if we have MSI-X, otherwise
577 1.1.4.2 rmind * just one queue set per port.
578 1.1.4.2 rmind */
579 1.1.4.2 rmind static int
580 1.1.4.2 rmind setup_sge_qsets(adapter_t *sc)
581 1.1.4.2 rmind {
582 1.1.4.2 rmind int i, j, err, irq_idx = 0, qset_idx = 0;
583 1.1.4.2 rmind u_int ntxq = SGE_TXQ_PER_SET;
584 1.1.4.2 rmind
585 1.1.4.2 rmind if ((err = t3_sge_alloc(sc)) != 0) {
586 1.1.4.2 rmind device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
587 1.1.4.2 rmind return (err);
588 1.1.4.2 rmind }
589 1.1.4.2 rmind
590 1.1.4.2 rmind if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
591 1.1.4.2 rmind irq_idx = -1;
592 1.1.4.2 rmind
593 1.1.4.2 rmind for (i = 0; i < (sc)->params.nports; i++) {
594 1.1.4.2 rmind struct port_info *pi = &sc->port[i];
595 1.1.4.2 rmind
596 1.1.4.2 rmind for (j = 0; j < pi->nqsets; j++, qset_idx++) {
597 1.1.4.2 rmind err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
598 1.1.4.2 rmind (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
599 1.1.4.2 rmind &sc->params.sge.qset[qset_idx], ntxq, pi);
600 1.1.4.2 rmind if (err) {
601 1.1.4.2 rmind t3_free_sge_resources(sc);
602 1.1.4.2 rmind device_printf(sc->dev, "t3_sge_alloc_qset failed with %d\n",
603 1.1.4.2 rmind err);
604 1.1.4.2 rmind return (err);
605 1.1.4.2 rmind }
606 1.1.4.2 rmind }
607 1.1.4.2 rmind }
608 1.1.4.2 rmind
609 1.1.4.2 rmind return (0);
610 1.1.4.2 rmind }
611 1.1.4.2 rmind
612 1.1.4.2 rmind static void
613 1.1.4.2 rmind cxgb_teardown_msix(adapter_t *sc)
614 1.1.4.2 rmind {
615 1.1.4.2 rmind int i, nqsets;
616 1.1.4.2 rmind
617 1.1.4.2 rmind for (nqsets = i = 0; i < (sc)->params.nports; i++)
618 1.1.4.2 rmind nqsets += sc->port[i].nqsets;
619 1.1.4.2 rmind
620 1.1.4.2 rmind for (i = 0; i < nqsets; i++) {
621 1.1.4.2 rmind if (sc->msix_intr_tag[i] != NULL) {
622 1.1.4.2 rmind sc->msix_intr_tag[i] = NULL;
623 1.1.4.2 rmind }
624 1.1.4.2 rmind if (sc->msix_irq_res[i] != NULL) {
625 1.1.4.2 rmind sc->msix_irq_res[i] = NULL;
626 1.1.4.2 rmind }
627 1.1.4.2 rmind }
628 1.1.4.2 rmind }
629 1.1.4.2 rmind
630 1.1.4.2 rmind static int
631 1.1.4.2 rmind cxgb_setup_msix(adapter_t *sc, int msix_count)
632 1.1.4.2 rmind {
633 1.1.4.2 rmind int i, j, k, nqsets, rid;
634 1.1.4.2 rmind
635 1.1.4.2 rmind /* The first message indicates link changes and error conditions */
636 1.1.4.2 rmind sc->irq_rid = 1;
637 1.1.4.2 rmind /* Allocate PCI interrupt resources. */
638 1.1.4.2 rmind if (pci_intr_map(&sc->pa, &sc->intr_handle))
639 1.1.4.2 rmind {
640 1.1.4.2 rmind printf("cxgb_setup_msix(%d): pci_intr_map() failed\n", __LINE__);
641 1.1.4.2 rmind return (EINVAL);
642 1.1.4.2 rmind }
643 1.1.4.2 rmind sc->intr_cookie = pci_intr_establish(sc->pa.pa_pc, sc->intr_handle,
644 1.1.4.2 rmind IPL_NET, cxgb_async_intr, sc);
645 1.1.4.2 rmind if (sc->intr_cookie == NULL)
646 1.1.4.2 rmind {
647 1.1.4.2 rmind printf("cxgb_setup_msix(%d): pci_intr_establish() failed\n", __LINE__);
648 1.1.4.2 rmind return (EINVAL);
649 1.1.4.2 rmind }
650 1.1.4.2 rmind for (i = k = 0; i < (sc)->params.nports; i++) {
651 1.1.4.2 rmind nqsets = sc->port[i].nqsets;
652 1.1.4.2 rmind for (j = 0; j < nqsets; j++, k++) {
653 1.1.4.2 rmind rid = k + 2;
654 1.1.4.2 rmind if (cxgb_debug)
655 1.1.4.2 rmind printf("rid=%d ", rid);
656 1.1.4.2 rmind INT3;
657 1.1.4.2 rmind }
658 1.1.4.2 rmind }
659 1.1.4.2 rmind
660 1.1.4.2 rmind
661 1.1.4.2 rmind return (0);
662 1.1.4.2 rmind }
663 1.1.4.2 rmind
664 1.1.4.2 rmind static int cxgb_port_match(device_t dev, cfdata_t match, void *context)
665 1.1.4.2 rmind {
666 1.1.4.2 rmind return (100);
667 1.1.4.2 rmind }
668 1.1.4.2 rmind
669 1.1.4.2 rmind #define IFCAP_HWCSUM (IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_IPv4_Tx)
670 1.1.4.2 rmind #define IFCAP_RXCSUM IFCAP_CSUM_IPv4_Rx
671 1.1.4.2 rmind #define IFCAP_TXCSUM IFCAP_CSUM_IPv4_Tx
672 1.1.4.2 rmind
673 1.1.4.2 rmind #ifdef TSO_SUPPORTED
674 1.1.4.2 rmind #define CXGB_CAP (IFCAP_HWCSUM | IFCAP_TSO)
675 1.1.4.2 rmind /* Don't enable TSO6 yet */
676 1.1.4.2 rmind #define CXGB_CAP_ENABLE (IFCAP_HWCSUM | IFCAP_TSO4)
677 1.1.4.2 rmind #else
678 1.1.4.2 rmind #define CXGB_CAP (IFCAP_HWCSUM)
679 1.1.4.2 rmind /* Don't enable TSO6 yet */
680 1.1.4.2 rmind #define CXGB_CAP_ENABLE (IFCAP_HWCSUM)
681 1.1.4.2 rmind #define IFCAP_TSO4 0x0
682 1.1.4.2 rmind #define IFCAP_TSO6 0x0
683 1.1.4.2 rmind #define CSUM_TSO 0x0
684 1.1.4.2 rmind #endif
685 1.1.4.2 rmind
686 1.1.4.2 rmind static void
687 1.1.4.2 rmind cxgb_port_attach(device_t dev, device_t self, void *context)
688 1.1.4.2 rmind {
689 1.1.4.2 rmind struct port_info *p;
690 1.1.4.2 rmind struct port_device *pd;
691 1.1.4.2 rmind int *port_number = (int *)context;
692 1.1.4.2 rmind char buf[32];
693 1.1.4.2 rmind struct ifnet *ifp;
694 1.1.4.2 rmind int media_flags;
695 1.1.4.2 rmind pd = (struct port_device *)self; // device is first element in port_device
696 1.1.4.2 rmind pd->dev = self;
697 1.1.4.2 rmind pd->parent = (struct adapter *)dev;
698 1.1.4.2 rmind pd->port_number = *port_number;
699 1.1.4.2 rmind p = &pd->parent->port[*port_number];
700 1.1.4.2 rmind p->pd = pd;
701 1.1.4.2 rmind
702 1.1.4.2 rmind PORT_LOCK_INIT(p, p->lockbuf);
703 1.1.4.2 rmind
704 1.1.4.2 rmind /* Allocate an ifnet object and set it up */
705 1.1.4.2 rmind ifp = p->ifp = (void *)malloc(sizeof (struct ifnet), M_IFADDR, M_WAITOK);
706 1.1.4.2 rmind if (ifp == NULL) {
707 1.1.4.2 rmind device_printf(dev, "Cannot allocate ifnet\n");
708 1.1.4.2 rmind return;
709 1.1.4.2 rmind }
710 1.1.4.2 rmind memset(ifp, 0, sizeof(struct ifnet));
711 1.1.4.2 rmind
712 1.1.4.2 rmind /*
713 1.1.4.2 rmind * Note that there is currently no watchdog timer.
714 1.1.4.2 rmind */
715 1.1.4.2 rmind snprintf(buf, sizeof(buf), "cxgb%d", p->port);
716 1.1.4.2 rmind strcpy(ifp->if_xname, buf);
717 1.1.4.2 rmind ifp->if_init = cxgb_init;
718 1.1.4.2 rmind ifp->if_softc = p;
719 1.1.4.2 rmind ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
720 1.1.4.2 rmind ifp->if_ioctl = cxgb_ioctl;
721 1.1.4.2 rmind ifp->if_start = cxgb_start;
722 1.1.4.2 rmind ifp->if_stop = cxgb_stop;
723 1.1.4.2 rmind ifp->if_timer = 0; /* Disable ifnet watchdog */
724 1.1.4.2 rmind ifp->if_watchdog = NULL;
725 1.1.4.2 rmind
726 1.1.4.2 rmind ifp->if_snd.ifq_maxlen = TX_ETH_Q_SIZE;
727 1.1.4.2 rmind IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_maxlen);
728 1.1.4.2 rmind
729 1.1.4.2 rmind IFQ_SET_READY(&ifp->if_snd);
730 1.1.4.2 rmind
731 1.1.4.2 rmind ifp->if_capabilities = ifp->if_capenable = 0;
732 1.1.4.2 rmind ifp->if_baudrate = 10000000000; // 10 Gbps
733 1.1.4.2 rmind /*
734 1.1.4.2 rmind * disable TSO on 4-port - it isn't supported by the firmware yet
735 1.1.4.2 rmind */
736 1.1.4.2 rmind if (p->adapter->params.nports > 2) {
737 1.1.4.2 rmind ifp->if_capabilities &= ~(IFCAP_TSO4 | IFCAP_TSO6);
738 1.1.4.2 rmind ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TSO6);
739 1.1.4.2 rmind }
740 1.1.4.2 rmind
741 1.1.4.2 rmind if_attach(ifp);
742 1.1.4.2 rmind ether_ifattach(ifp, p->hw_addr);
743 1.1.4.2 rmind /*
744 1.1.4.2 rmind * Only default to jumbo frames on 10GigE
745 1.1.4.2 rmind */
746 1.1.4.2 rmind if (p->adapter->params.nports <= 2)
747 1.1.4.2 rmind ifp->if_mtu = 9000;
748 1.1.4.2 rmind ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
749 1.1.4.2 rmind cxgb_media_status);
750 1.1.4.2 rmind
751 1.1.4.2 rmind if (!strcmp(p->port_type->desc, "10GBASE-CX4")) {
752 1.1.4.2 rmind media_flags = IFM_ETHER | IFM_10G_CX4 | IFM_FDX;
753 1.1.4.2 rmind } else if (!strcmp(p->port_type->desc, "10GBASE-SR")) {
754 1.1.4.2 rmind media_flags = IFM_ETHER | IFM_10G_SR | IFM_FDX;
755 1.1.4.2 rmind } else if (!strcmp(p->port_type->desc, "10GBASE-XR")) {
756 1.1.4.2 rmind media_flags = IFM_ETHER | IFM_10G_LR | IFM_FDX;
757 1.1.4.2 rmind } else if (!strcmp(p->port_type->desc, "10/100/1000BASE-T")) {
758 1.1.4.2 rmind ifmedia_add(&p->media, IFM_ETHER | IFM_10_T, 0, NULL);
759 1.1.4.2 rmind ifmedia_add(&p->media, IFM_ETHER | IFM_10_T | IFM_FDX,
760 1.1.4.2 rmind 0, NULL);
761 1.1.4.2 rmind ifmedia_add(&p->media, IFM_ETHER | IFM_100_TX,
762 1.1.4.2 rmind 0, NULL);
763 1.1.4.2 rmind ifmedia_add(&p->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
764 1.1.4.2 rmind 0, NULL);
765 1.1.4.2 rmind ifmedia_add(&p->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
766 1.1.4.2 rmind 0, NULL);
767 1.1.4.2 rmind media_flags = 0;
768 1.1.4.2 rmind } else {
769 1.1.4.2 rmind printf("unsupported media type %s\n", p->port_type->desc);
770 1.1.4.2 rmind return;
771 1.1.4.2 rmind }
772 1.1.4.2 rmind if (media_flags) {
773 1.1.4.2 rmind ifmedia_add(&p->media, media_flags, 0, NULL);
774 1.1.4.2 rmind ifmedia_set(&p->media, media_flags);
775 1.1.4.2 rmind } else {
776 1.1.4.2 rmind ifmedia_add(&p->media, IFM_ETHER | IFM_AUTO, 0, NULL);
777 1.1.4.2 rmind ifmedia_set(&p->media, IFM_ETHER | IFM_AUTO);
778 1.1.4.2 rmind }
779 1.1.4.2 rmind
780 1.1.4.2 rmind snprintf(p->taskqbuf, TASKQ_NAME_LEN, "cxgb_port_taskq%d", p->port_id);
781 1.1.4.2 rmind p->start_task.name = "cxgb_start_proc";
782 1.1.4.2 rmind p->start_task.func = cxgb_start_proc;
783 1.1.4.2 rmind p->start_task.context = ifp;
784 1.1.4.2 rmind kthread_create(PRI_NONE, 0, NULL, cxgb_make_task, &p->start_task, NULL, "cxgb_make_task");
785 1.1.4.2 rmind
786 1.1.4.2 rmind t3_sge_init_port(p);
787 1.1.4.2 rmind }
788 1.1.4.2 rmind
789 1.1.4.2 rmind static int
790 1.1.4.2 rmind cxgb_port_detach(device_t dev, int flags)
791 1.1.4.2 rmind {
792 1.1.4.2 rmind struct port_info *p;
793 1.1.4.2 rmind
794 1.1.4.2 rmind p = (struct port_info *)dev; // device is first thing in adapter
795 1.1.4.2 rmind
796 1.1.4.2 rmind PORT_LOCK(p);
797 1.1.4.2 rmind if (p->ifp->if_drv_flags & IFF_DRV_RUNNING)
798 1.1.4.2 rmind cxgb_stop_locked(p);
799 1.1.4.2 rmind PORT_UNLOCK(p);
800 1.1.4.2 rmind
801 1.1.4.2 rmind if (p->start_task.wq != NULL) {
802 1.1.4.2 rmind workqueue_destroy(p->start_task.wq);
803 1.1.4.2 rmind p->start_task.wq = NULL;
804 1.1.4.2 rmind }
805 1.1.4.2 rmind
806 1.1.4.2 rmind ether_ifdetach(p->ifp);
807 1.1.4.2 rmind /*
808 1.1.4.2 rmind * the lock may be acquired in ifdetach
809 1.1.4.2 rmind */
810 1.1.4.2 rmind PORT_LOCK_DEINIT(p);
811 1.1.4.2 rmind if_detach(p->ifp);
812 1.1.4.2 rmind
813 1.1.4.2 rmind return (0);
814 1.1.4.2 rmind }
815 1.1.4.2 rmind
816 1.1.4.2 rmind void
817 1.1.4.2 rmind t3_fatal_err(struct adapter *sc)
818 1.1.4.2 rmind {
819 1.1.4.2 rmind u_int fw_status[4];
820 1.1.4.2 rmind
821 1.1.4.2 rmind if (sc->flags & FULL_INIT_DONE) {
822 1.1.4.2 rmind t3_sge_stop(sc);
823 1.1.4.2 rmind t3_write_reg(sc, A_XGM_TX_CTRL, 0);
824 1.1.4.2 rmind t3_write_reg(sc, A_XGM_RX_CTRL, 0);
825 1.1.4.2 rmind t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0);
826 1.1.4.2 rmind t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0);
827 1.1.4.2 rmind t3_intr_disable(sc);
828 1.1.4.2 rmind }
829 1.1.4.2 rmind device_printf(sc->dev,"encountered fatal error, operation suspended\n");
830 1.1.4.2 rmind if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status))
831 1.1.4.2 rmind device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
832 1.1.4.2 rmind fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
833 1.1.4.2 rmind }
834 1.1.4.2 rmind
835 1.1.4.2 rmind int
836 1.1.4.2 rmind t3_os_find_pci_capability(adapter_t *sc, int cap)
837 1.1.4.2 rmind {
838 1.1.4.2 rmind device_t dev;
839 1.1.4.2 rmind uint32_t status;
840 1.1.4.2 rmind uint32_t bhlc;
841 1.1.4.2 rmind uint32_t temp;
842 1.1.4.2 rmind uint8_t ptr;
843 1.1.4.2 rmind dev = sc->dev;
844 1.1.4.2 rmind status = pci_conf_read(sc->pa.pa_pc, sc->pa.pa_tag, PCI_COMMAND_STATUS_REG);
845 1.1.4.2 rmind if (!(status&PCI_STATUS_CAPLIST_SUPPORT))
846 1.1.4.2 rmind return (0);
847 1.1.4.2 rmind bhlc = pci_conf_read(sc->pa.pa_pc, sc->pa.pa_tag, PCI_BHLC_REG);
848 1.1.4.2 rmind switch (PCI_HDRTYPE(bhlc))
849 1.1.4.2 rmind {
850 1.1.4.2 rmind case 0:
851 1.1.4.2 rmind case 1:
852 1.1.4.2 rmind ptr = PCI_CAPLISTPTR_REG;
853 1.1.4.2 rmind break;
854 1.1.4.2 rmind case 2:
855 1.1.4.2 rmind ptr = PCI_CARDBUS_CAPLISTPTR_REG;
856 1.1.4.2 rmind break;
857 1.1.4.2 rmind default:
858 1.1.4.2 rmind return (0);
859 1.1.4.2 rmind }
860 1.1.4.2 rmind temp = pci_conf_read(sc->pa.pa_pc, sc->pa.pa_tag, ptr);
861 1.1.4.2 rmind ptr = PCI_CAPLIST_PTR(temp);
862 1.1.4.2 rmind while (ptr != 0) {
863 1.1.4.2 rmind temp = pci_conf_read(sc->pa.pa_pc, sc->pa.pa_tag, ptr);
864 1.1.4.2 rmind if (PCI_CAPLIST_CAP(temp) == cap)
865 1.1.4.2 rmind return (ptr);
866 1.1.4.2 rmind ptr = PCI_CAPLIST_NEXT(temp);
867 1.1.4.2 rmind }
868 1.1.4.2 rmind
869 1.1.4.2 rmind return (0);
870 1.1.4.2 rmind }
871 1.1.4.2 rmind
872 1.1.4.2 rmind int
873 1.1.4.2 rmind t3_os_pci_save_state(struct adapter *sc)
874 1.1.4.2 rmind {
875 1.1.4.2 rmind INT3;
876 1.1.4.2 rmind return (0);
877 1.1.4.2 rmind }
878 1.1.4.2 rmind
879 1.1.4.2 rmind int
880 1.1.4.2 rmind t3_os_pci_restore_state(struct adapter *sc)
881 1.1.4.2 rmind {
882 1.1.4.2 rmind INT3;
883 1.1.4.2 rmind return (0);
884 1.1.4.2 rmind }
885 1.1.4.2 rmind
886 1.1.4.2 rmind /**
887 1.1.4.2 rmind * t3_os_link_changed - handle link status changes
888 1.1.4.2 rmind * @adapter: the adapter associated with the link change
889 1.1.4.2 rmind * @port_id: the port index whose limk status has changed
890 1.1.4.2 rmind * @link_stat: the new status of the link
891 1.1.4.2 rmind * @speed: the new speed setting
892 1.1.4.2 rmind * @duplex: the new duplex setting
893 1.1.4.2 rmind * @fc: the new flow-control setting
894 1.1.4.2 rmind *
895 1.1.4.2 rmind * This is the OS-dependent handler for link status changes. The OS
896 1.1.4.2 rmind * neutral handler takes care of most of the processing for these events,
897 1.1.4.2 rmind * then calls this handler for any OS-specific processing.
898 1.1.4.2 rmind */
899 1.1.4.2 rmind void
900 1.1.4.2 rmind t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
901 1.1.4.2 rmind int duplex, int fc)
902 1.1.4.2 rmind {
903 1.1.4.2 rmind struct port_info *pi = &adapter->port[port_id];
904 1.1.4.2 rmind struct cmac *mac = &adapter->port[port_id].mac;
905 1.1.4.2 rmind
906 1.1.4.2 rmind if ((pi->ifp->if_flags & IFF_UP) == 0)
907 1.1.4.2 rmind return;
908 1.1.4.2 rmind
909 1.1.4.2 rmind if (link_status) {
910 1.1.4.2 rmind t3_mac_enable(mac, MAC_DIRECTION_RX);
911 1.1.4.2 rmind if_link_state_change(pi->ifp, LINK_STATE_UP);
912 1.1.4.2 rmind } else {
913 1.1.4.2 rmind if_link_state_change(pi->ifp, LINK_STATE_DOWN);
914 1.1.4.2 rmind pi->phy.ops->power_down(&pi->phy, 1);
915 1.1.4.2 rmind t3_mac_disable(mac, MAC_DIRECTION_RX);
916 1.1.4.2 rmind t3_link_start(&pi->phy, mac, &pi->link_config);
917 1.1.4.2 rmind }
918 1.1.4.2 rmind }
919 1.1.4.2 rmind
920 1.1.4.2 rmind /*
921 1.1.4.2 rmind * Interrupt-context handler for external (PHY) interrupts.
922 1.1.4.2 rmind */
923 1.1.4.2 rmind void
924 1.1.4.2 rmind t3_os_ext_intr_handler(adapter_t *sc)
925 1.1.4.2 rmind {
926 1.1.4.2 rmind if (cxgb_debug)
927 1.1.4.2 rmind printf("t3_os_ext_intr_handler\n");
928 1.1.4.2 rmind /*
929 1.1.4.2 rmind * Schedule a task to handle external interrupts as they may be slow
930 1.1.4.2 rmind * and we use a mutex to protect MDIO registers. We disable PHY
931 1.1.4.2 rmind * interrupts in the meantime and let the task reenable them when
932 1.1.4.2 rmind * it's done.
933 1.1.4.2 rmind */
934 1.1.4.2 rmind ADAPTER_LOCK(sc);
935 1.1.4.2 rmind if (sc->slow_intr_mask) {
936 1.1.4.2 rmind sc->slow_intr_mask &= ~F_T3DBG;
937 1.1.4.2 rmind t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
938 1.1.4.2 rmind workqueue_enqueue(sc->ext_intr_task.wq, &sc->ext_intr_task.w, NULL);
939 1.1.4.2 rmind }
940 1.1.4.2 rmind ADAPTER_UNLOCK(sc);
941 1.1.4.2 rmind }
942 1.1.4.2 rmind
943 1.1.4.2 rmind void
944 1.1.4.2 rmind t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
945 1.1.4.2 rmind {
946 1.1.4.2 rmind
947 1.1.4.2 rmind /*
948 1.1.4.2 rmind * The ifnet might not be allocated before this gets called,
949 1.1.4.2 rmind * as this is called early on in attach by t3_prep_adapter
950 1.1.4.2 rmind * save the address off in the port structure
951 1.1.4.2 rmind */
952 1.1.4.2 rmind if (cxgb_debug)
953 1.1.4.2 rmind printf("set_hw_addr on idx %d addr %02x:%02x:%02x:%02x:%02x:%02x\n",
954 1.1.4.2 rmind port_idx, hw_addr[0], hw_addr[1], hw_addr[2], hw_addr[3], hw_addr[4], hw_addr[5]);
955 1.1.4.2 rmind memcpy(adapter->port[port_idx].hw_addr, hw_addr, ETHER_ADDR_LEN);
956 1.1.4.2 rmind }
957 1.1.4.2 rmind
958 1.1.4.2 rmind /**
959 1.1.4.2 rmind * link_start - enable a port
960 1.1.4.2 rmind * @p: the port to enable
961 1.1.4.2 rmind *
962 1.1.4.2 rmind * Performs the MAC and PHY actions needed to enable a port.
963 1.1.4.2 rmind */
964 1.1.4.2 rmind static void
965 1.1.4.2 rmind cxgb_link_start(struct port_info *p)
966 1.1.4.2 rmind {
967 1.1.4.2 rmind struct ifnet *ifp;
968 1.1.4.2 rmind struct t3_rx_mode rm;
969 1.1.4.2 rmind struct cmac *mac = &p->mac;
970 1.1.4.2 rmind
971 1.1.4.2 rmind ifp = p->ifp;
972 1.1.4.2 rmind
973 1.1.4.2 rmind t3_init_rx_mode(&rm, p);
974 1.1.4.2 rmind if (!mac->multiport)
975 1.1.4.2 rmind t3_mac_reset(mac);
976 1.1.4.2 rmind t3_mac_set_mtu(mac, ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
977 1.1.4.2 rmind t3_mac_set_address(mac, 0, p->hw_addr);
978 1.1.4.2 rmind t3_mac_set_rx_mode(mac, &rm);
979 1.1.4.2 rmind t3_link_start(&p->phy, mac, &p->link_config);
980 1.1.4.2 rmind t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
981 1.1.4.2 rmind }
982 1.1.4.2 rmind
983 1.1.4.2 rmind /**
984 1.1.4.2 rmind * setup_rss - configure Receive Side Steering (per-queue connection demux)
985 1.1.4.2 rmind * @adap: the adapter
986 1.1.4.2 rmind *
987 1.1.4.2 rmind * Sets up RSS to distribute packets to multiple receive queues. We
988 1.1.4.2 rmind * configure the RSS CPU lookup table to distribute to the number of HW
989 1.1.4.2 rmind * receive queues, and the response queue lookup table to narrow that
990 1.1.4.2 rmind * down to the response queues actually configured for each port.
991 1.1.4.2 rmind * We always configure the RSS mapping for two ports since the mapping
992 1.1.4.2 rmind * table has plenty of entries.
993 1.1.4.2 rmind */
994 1.1.4.2 rmind static void
995 1.1.4.2 rmind setup_rss(adapter_t *adap)
996 1.1.4.2 rmind {
997 1.1.4.2 rmind int i;
998 1.1.4.2 rmind u_int nq[2];
999 1.1.4.2 rmind uint8_t cpus[SGE_QSETS + 1];
1000 1.1.4.2 rmind uint16_t rspq_map[RSS_TABLE_SIZE];
1001 1.1.4.2 rmind
1002 1.1.4.2 rmind for (i = 0; i < SGE_QSETS; ++i)
1003 1.1.4.2 rmind cpus[i] = i;
1004 1.1.4.2 rmind cpus[SGE_QSETS] = 0xff;
1005 1.1.4.2 rmind
1006 1.1.4.2 rmind nq[0] = nq[1] = 0;
1007 1.1.4.2 rmind for_each_port(adap, i) {
1008 1.1.4.2 rmind const struct port_info *pi = adap2pinfo(adap, i);
1009 1.1.4.2 rmind
1010 1.1.4.2 rmind nq[pi->tx_chan] += pi->nqsets;
1011 1.1.4.2 rmind }
1012 1.1.4.2 rmind nq[0] = max(nq[0], 1U);
1013 1.1.4.2 rmind nq[1] = max(nq[1], 1U);
1014 1.1.4.2 rmind for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
1015 1.1.4.2 rmind rspq_map[i] = i % nq[0];
1016 1.1.4.2 rmind rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq[1]) + nq[0];
1017 1.1.4.2 rmind }
1018 1.1.4.2 rmind /* Calculate the reverse RSS map table */
1019 1.1.4.2 rmind for (i = 0; i < RSS_TABLE_SIZE; ++i)
1020 1.1.4.2 rmind if (adap->rrss_map[rspq_map[i]] == 0xff)
1021 1.1.4.2 rmind adap->rrss_map[rspq_map[i]] = i;
1022 1.1.4.2 rmind
1023 1.1.4.2 rmind t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
1024 1.1.4.2 rmind F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN |
1025 1.1.4.2 rmind V_RRCPLCPUSIZE(6), cpus, rspq_map);
1026 1.1.4.2 rmind
1027 1.1.4.2 rmind }
1028 1.1.4.2 rmind
1029 1.1.4.2 rmind /*
1030 1.1.4.2 rmind * Sends an mbuf to an offload queue driver
1031 1.1.4.2 rmind * after dealing with any active network taps.
1032 1.1.4.2 rmind */
1033 1.1.4.2 rmind static inline int
1034 1.1.4.2 rmind offload_tx(struct toedev *tdev, struct mbuf *m)
1035 1.1.4.2 rmind {
1036 1.1.4.2 rmind int ret;
1037 1.1.4.2 rmind
1038 1.1.4.2 rmind critical_enter();
1039 1.1.4.2 rmind ret = t3_offload_tx(tdev, m);
1040 1.1.4.2 rmind critical_exit();
1041 1.1.4.2 rmind return (ret);
1042 1.1.4.2 rmind }
1043 1.1.4.2 rmind
1044 1.1.4.2 rmind static void
1045 1.1.4.2 rmind send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
1046 1.1.4.2 rmind int hi, int port)
1047 1.1.4.2 rmind {
1048 1.1.4.2 rmind struct mbuf *m;
1049 1.1.4.2 rmind struct mngt_pktsched_wr *req;
1050 1.1.4.2 rmind
1051 1.1.4.2 rmind m = m_gethdr(M_DONTWAIT, MT_DATA);
1052 1.1.4.2 rmind if (m) {
1053 1.1.4.2 rmind req = mtod(m, struct mngt_pktsched_wr *);
1054 1.1.4.2 rmind req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
1055 1.1.4.2 rmind req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
1056 1.1.4.2 rmind req->sched = sched;
1057 1.1.4.2 rmind req->idx = qidx;
1058 1.1.4.2 rmind req->min = lo;
1059 1.1.4.2 rmind req->max = hi;
1060 1.1.4.2 rmind req->binding = port;
1061 1.1.4.2 rmind m->m_len = m->m_pkthdr.len = sizeof(*req);
1062 1.1.4.2 rmind t3_mgmt_tx(adap, m);
1063 1.1.4.2 rmind }
1064 1.1.4.2 rmind }
1065 1.1.4.2 rmind
1066 1.1.4.2 rmind static void
1067 1.1.4.2 rmind bind_qsets(adapter_t *sc)
1068 1.1.4.2 rmind {
1069 1.1.4.2 rmind int i, j;
1070 1.1.4.2 rmind
1071 1.1.4.2 rmind for (i = 0; i < (sc)->params.nports; ++i) {
1072 1.1.4.2 rmind const struct port_info *pi = adap2pinfo(sc, i);
1073 1.1.4.2 rmind
1074 1.1.4.2 rmind for (j = 0; j < pi->nqsets; ++j) {
1075 1.1.4.2 rmind send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
1076 1.1.4.2 rmind -1, pi->tx_chan);
1077 1.1.4.2 rmind
1078 1.1.4.2 rmind }
1079 1.1.4.2 rmind }
1080 1.1.4.2 rmind }
1081 1.1.4.2 rmind
1082 1.1.4.2 rmind /**
1083 1.1.4.2 rmind * cxgb_up - enable the adapter
1084 1.1.4.2 rmind * @adap: adapter being enabled
1085 1.1.4.2 rmind *
1086 1.1.4.2 rmind * Called when the first port is enabled, this function performs the
1087 1.1.4.2 rmind * actions necessary to make an adapter operational, such as completing
1088 1.1.4.2 rmind * the initialization of HW modules, and enabling interrupts.
1089 1.1.4.2 rmind *
1090 1.1.4.2 rmind */
1091 1.1.4.2 rmind static int
1092 1.1.4.2 rmind cxgb_up(struct adapter *sc)
1093 1.1.4.2 rmind {
1094 1.1.4.2 rmind int err = 0;
1095 1.1.4.2 rmind
1096 1.1.4.2 rmind if ((sc->flags & FULL_INIT_DONE) == 0) {
1097 1.1.4.2 rmind
1098 1.1.4.2 rmind if ((sc->flags & FW_UPTODATE) == 0)
1099 1.1.4.2 rmind printf("SHOULD UPGRADE FIRMWARE!\n");
1100 1.1.4.2 rmind if ((sc->flags & TPS_UPTODATE) == 0)
1101 1.1.4.2 rmind printf("SHOULD UPDATE TPSRAM\n");
1102 1.1.4.2 rmind err = t3_init_hw(sc, 0);
1103 1.1.4.2 rmind if (err)
1104 1.1.4.2 rmind goto out;
1105 1.1.4.2 rmind
1106 1.1.4.2 rmind t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1107 1.1.4.2 rmind
1108 1.1.4.2 rmind err = setup_sge_qsets(sc);
1109 1.1.4.2 rmind if (err)
1110 1.1.4.2 rmind goto out;
1111 1.1.4.2 rmind
1112 1.1.4.2 rmind setup_rss(sc);
1113 1.1.4.2 rmind sc->flags |= FULL_INIT_DONE;
1114 1.1.4.2 rmind }
1115 1.1.4.2 rmind
1116 1.1.4.2 rmind t3_intr_clear(sc);
1117 1.1.4.2 rmind
1118 1.1.4.2 rmind /* If it's MSI or INTx, allocate a single interrupt for everything */
1119 1.1.4.2 rmind if ((sc->flags & USING_MSIX) == 0) {
1120 1.1.4.2 rmind if (pci_intr_map(&sc->pa, &sc->intr_handle))
1121 1.1.4.2 rmind {
1122 1.1.4.2 rmind device_printf(sc->dev, "Cannot allocate interrupt\n");
1123 1.1.4.2 rmind err = EINVAL;
1124 1.1.4.2 rmind goto out;
1125 1.1.4.2 rmind }
1126 1.1.4.2 rmind device_printf(sc->dev, "allocated intr_handle=%p\n", sc->intr_handle);
1127 1.1.4.2 rmind sc->intr_cookie = pci_intr_establish(sc->pa.pa_pc,
1128 1.1.4.2 rmind sc->intr_handle, IPL_NET,
1129 1.1.4.2 rmind sc->cxgb_intr, sc);
1130 1.1.4.2 rmind if (sc->intr_cookie == NULL)
1131 1.1.4.2 rmind {
1132 1.1.4.2 rmind device_printf(sc->dev, "Cannot establish interrupt\n");
1133 1.1.4.2 rmind err = EINVAL;
1134 1.1.4.2 rmind goto irq_err;
1135 1.1.4.2 rmind }
1136 1.1.4.2 rmind } else {
1137 1.1.4.2 rmind printf("Using MSIX?!?!?!\n");
1138 1.1.4.2 rmind INT3;
1139 1.1.4.2 rmind cxgb_setup_msix(sc, sc->msi_count);
1140 1.1.4.2 rmind }
1141 1.1.4.2 rmind
1142 1.1.4.2 rmind t3_sge_start(sc);
1143 1.1.4.2 rmind t3_intr_enable(sc);
1144 1.1.4.2 rmind
1145 1.1.4.2 rmind if (!(sc->flags & QUEUES_BOUND)) {
1146 1.1.4.2 rmind bind_qsets(sc);
1147 1.1.4.2 rmind sc->flags |= QUEUES_BOUND;
1148 1.1.4.2 rmind }
1149 1.1.4.2 rmind out:
1150 1.1.4.2 rmind return (err);
1151 1.1.4.2 rmind irq_err:
1152 1.1.4.2 rmind CH_ERR(sc, "request_irq failed, err %d\n", err);
1153 1.1.4.2 rmind goto out;
1154 1.1.4.2 rmind }
1155 1.1.4.2 rmind
1156 1.1.4.2 rmind
1157 1.1.4.2 rmind /*
1158 1.1.4.2 rmind * Release resources when all the ports and offloading have been stopped.
1159 1.1.4.2 rmind */
1160 1.1.4.2 rmind static void
1161 1.1.4.2 rmind cxgb_down_locked(struct adapter *sc)
1162 1.1.4.2 rmind {
1163 1.1.4.2 rmind t3_sge_stop(sc);
1164 1.1.4.2 rmind t3_intr_disable(sc);
1165 1.1.4.2 rmind
1166 1.1.4.2 rmind INT3; // XXXXXXXXXXXXXXXXXX
1167 1.1.4.2 rmind
1168 1.1.4.2 rmind if (sc->flags & USING_MSIX)
1169 1.1.4.2 rmind cxgb_teardown_msix(sc);
1170 1.1.4.2 rmind ADAPTER_UNLOCK(sc);
1171 1.1.4.2 rmind
1172 1.1.4.2 rmind callout_drain(&sc->cxgb_tick_ch);
1173 1.1.4.2 rmind callout_drain(&sc->sge_timer_ch);
1174 1.1.4.2 rmind
1175 1.1.4.2 rmind #ifdef notyet
1176 1.1.4.2 rmind
1177 1.1.4.2 rmind if (sc->port[i].tq != NULL)
1178 1.1.4.2 rmind #endif
1179 1.1.4.2 rmind
1180 1.1.4.2 rmind }
1181 1.1.4.2 rmind
1182 1.1.4.2 rmind static int
1183 1.1.4.2 rmind cxgb_init(struct ifnet *ifp)
1184 1.1.4.2 rmind {
1185 1.1.4.2 rmind struct port_info *p = ifp->if_softc;
1186 1.1.4.2 rmind
1187 1.1.4.2 rmind PORT_LOCK(p);
1188 1.1.4.2 rmind cxgb_init_locked(p);
1189 1.1.4.2 rmind PORT_UNLOCK(p);
1190 1.1.4.2 rmind
1191 1.1.4.2 rmind return (0); // ????????????
1192 1.1.4.2 rmind }
1193 1.1.4.2 rmind
1194 1.1.4.2 rmind static void
1195 1.1.4.2 rmind cxgb_init_locked(struct port_info *p)
1196 1.1.4.2 rmind {
1197 1.1.4.2 rmind struct ifnet *ifp;
1198 1.1.4.2 rmind adapter_t *sc = p->adapter;
1199 1.1.4.2 rmind int err;
1200 1.1.4.2 rmind
1201 1.1.4.2 rmind PORT_LOCK_ASSERT_OWNED(p);
1202 1.1.4.2 rmind ifp = p->ifp;
1203 1.1.4.2 rmind
1204 1.1.4.2 rmind ADAPTER_LOCK(p->adapter);
1205 1.1.4.2 rmind if ((sc->open_device_map == 0) && (err = cxgb_up(sc))) {
1206 1.1.4.2 rmind ADAPTER_UNLOCK(p->adapter);
1207 1.1.4.2 rmind cxgb_stop_locked(p);
1208 1.1.4.2 rmind return;
1209 1.1.4.2 rmind }
1210 1.1.4.2 rmind if (p->adapter->open_device_map == 0) {
1211 1.1.4.2 rmind t3_intr_clear(sc);
1212 1.1.4.2 rmind t3_sge_init_adapter(sc);
1213 1.1.4.2 rmind }
1214 1.1.4.2 rmind setbit(&p->adapter->open_device_map, p->port_id);
1215 1.1.4.2 rmind ADAPTER_UNLOCK(p->adapter);
1216 1.1.4.2 rmind
1217 1.1.4.2 rmind cxgb_link_start(p);
1218 1.1.4.2 rmind t3_link_changed(sc, p->port_id);
1219 1.1.4.2 rmind ifp->if_baudrate = p->link_config.speed * 1000000;
1220 1.1.4.2 rmind
1221 1.1.4.2 rmind device_printf(sc->dev, "enabling interrupts on port=%d\n", p->port_id);
1222 1.1.4.2 rmind t3_port_intr_enable(sc, p->port_id);
1223 1.1.4.2 rmind
1224 1.1.4.2 rmind callout_reset(&sc->cxgb_tick_ch, sc->params.stats_update_period * hz,
1225 1.1.4.2 rmind cxgb_tick, sc);
1226 1.1.4.2 rmind
1227 1.1.4.2 rmind ifp->if_drv_flags |= IFF_DRV_RUNNING;
1228 1.1.4.2 rmind ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1229 1.1.4.2 rmind }
1230 1.1.4.2 rmind
1231 1.1.4.2 rmind static void
1232 1.1.4.2 rmind cxgb_set_rxmode(struct port_info *p)
1233 1.1.4.2 rmind {
1234 1.1.4.2 rmind struct t3_rx_mode rm;
1235 1.1.4.2 rmind struct cmac *mac = &p->mac;
1236 1.1.4.2 rmind
1237 1.1.4.2 rmind PORT_LOCK_ASSERT_OWNED(p);
1238 1.1.4.2 rmind
1239 1.1.4.2 rmind t3_init_rx_mode(&rm, p);
1240 1.1.4.2 rmind t3_mac_set_rx_mode(mac, &rm);
1241 1.1.4.2 rmind }
1242 1.1.4.2 rmind
1243 1.1.4.2 rmind static void
1244 1.1.4.2 rmind cxgb_stop_locked(struct port_info *p)
1245 1.1.4.2 rmind {
1246 1.1.4.2 rmind struct ifnet *ifp;
1247 1.1.4.2 rmind
1248 1.1.4.2 rmind PORT_LOCK_ASSERT_OWNED(p);
1249 1.1.4.2 rmind ADAPTER_LOCK_ASSERT_NOTOWNED(p->adapter);
1250 1.1.4.2 rmind
1251 1.1.4.2 rmind ifp = p->ifp;
1252 1.1.4.2 rmind
1253 1.1.4.2 rmind t3_port_intr_disable(p->adapter, p->port_id);
1254 1.1.4.2 rmind ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1255 1.1.4.2 rmind p->phy.ops->power_down(&p->phy, 1);
1256 1.1.4.2 rmind t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1257 1.1.4.2 rmind
1258 1.1.4.2 rmind ADAPTER_LOCK(p->adapter);
1259 1.1.4.2 rmind clrbit(&p->adapter->open_device_map, p->port_id);
1260 1.1.4.2 rmind
1261 1.1.4.2 rmind
1262 1.1.4.2 rmind if (p->adapter->open_device_map == 0) {
1263 1.1.4.2 rmind cxgb_down_locked(p->adapter);
1264 1.1.4.2 rmind } else
1265 1.1.4.2 rmind ADAPTER_UNLOCK(p->adapter);
1266 1.1.4.2 rmind
1267 1.1.4.2 rmind }
1268 1.1.4.2 rmind
1269 1.1.4.2 rmind static int
1270 1.1.4.2 rmind cxgb_set_mtu(struct port_info *p, int mtu)
1271 1.1.4.2 rmind {
1272 1.1.4.2 rmind struct ifnet *ifp = p->ifp;
1273 1.1.4.2 rmind struct ifreq ifr;
1274 1.1.4.2 rmind int error = 0;
1275 1.1.4.2 rmind
1276 1.1.4.2 rmind ifr.ifr_mtu = mtu;
1277 1.1.4.2 rmind
1278 1.1.4.2 rmind if ((mtu < ETHERMIN) || (mtu > ETHER_MAX_LEN_JUMBO))
1279 1.1.4.2 rmind error = EINVAL;
1280 1.1.4.2 rmind else if ((error = ifioctl_common(ifp, SIOCSIFMTU, &ifr)) == ENETRESET) {
1281 1.1.4.2 rmind error = 0;
1282 1.1.4.2 rmind PORT_LOCK(p);
1283 1.1.4.2 rmind if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1284 1.1.4.2 rmind callout_stop(&p->adapter->cxgb_tick_ch);
1285 1.1.4.2 rmind cxgb_stop_locked(p);
1286 1.1.4.2 rmind cxgb_init_locked(p);
1287 1.1.4.2 rmind }
1288 1.1.4.2 rmind PORT_UNLOCK(p);
1289 1.1.4.2 rmind }
1290 1.1.4.2 rmind return (error);
1291 1.1.4.2 rmind }
1292 1.1.4.2 rmind
1293 1.1.4.2 rmind static int
1294 1.1.4.2 rmind cxgb_ioctl(struct ifnet *ifp, unsigned long command, void *data)
1295 1.1.4.2 rmind {
1296 1.1.4.2 rmind struct port_info *p = ifp->if_softc;
1297 1.1.4.2 rmind struct ifaddr *ifa = (struct ifaddr *)data;
1298 1.1.4.2 rmind struct ifreq *ifr = (struct ifreq *)data;
1299 1.1.4.2 rmind int flags, error = 0;
1300 1.1.4.2 rmind
1301 1.1.4.2 rmind /*
1302 1.1.4.2 rmind * XXX need to check that we aren't in the middle of an unload
1303 1.1.4.2 rmind */
1304 1.1.4.2 rmind printf("cxgb_ioctl(%d): command=%08lx\n", __LINE__, command);
1305 1.1.4.2 rmind switch (command) {
1306 1.1.4.2 rmind case SIOCSIFMTU:
1307 1.1.4.2 rmind error = cxgb_set_mtu(p, ifr->ifr_mtu);
1308 1.1.4.2 rmind printf("SIOCSIFMTU: error=%d\n", error);
1309 1.1.4.2 rmind break;
1310 1.1.4.2 rmind case SIOCINITIFADDR:
1311 1.1.4.2 rmind printf("SIOCINITIFADDR:\n");
1312 1.1.4.2 rmind PORT_LOCK(p);
1313 1.1.4.2 rmind if (ifa->ifa_addr->sa_family == AF_INET) {
1314 1.1.4.2 rmind ifp->if_flags |= IFF_UP;
1315 1.1.4.2 rmind if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1316 1.1.4.2 rmind cxgb_init_locked(p);
1317 1.1.4.2 rmind arp_ifinit(ifp, ifa);
1318 1.1.4.2 rmind } else
1319 1.1.4.2 rmind error = ether_ioctl(ifp, command, data);
1320 1.1.4.2 rmind PORT_UNLOCK(p);
1321 1.1.4.2 rmind break;
1322 1.1.4.2 rmind case SIOCSIFFLAGS:
1323 1.1.4.2 rmind printf("SIOCSIFFLAGS:\n");
1324 1.1.4.2 rmind #if 0
1325 1.1.4.2 rmind if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1326 1.1.4.2 rmind break;
1327 1.1.4.2 rmind #endif
1328 1.1.4.2 rmind callout_drain(&p->adapter->cxgb_tick_ch);
1329 1.1.4.2 rmind PORT_LOCK(p);
1330 1.1.4.2 rmind if (ifp->if_flags & IFF_UP) {
1331 1.1.4.2 rmind if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1332 1.1.4.2 rmind flags = p->if_flags;
1333 1.1.4.2 rmind if (((ifp->if_flags ^ flags) & IFF_PROMISC) ||
1334 1.1.4.2 rmind ((ifp->if_flags ^ flags) & IFF_ALLMULTI))
1335 1.1.4.2 rmind cxgb_set_rxmode(p);
1336 1.1.4.2 rmind } else
1337 1.1.4.2 rmind cxgb_init_locked(p);
1338 1.1.4.2 rmind p->if_flags = ifp->if_flags;
1339 1.1.4.2 rmind } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1340 1.1.4.2 rmind cxgb_stop_locked(p);
1341 1.1.4.2 rmind
1342 1.1.4.2 rmind if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1343 1.1.4.2 rmind adapter_t *sc = p->adapter;
1344 1.1.4.2 rmind callout_reset(&sc->cxgb_tick_ch,
1345 1.1.4.2 rmind sc->params.stats_update_period * hz,
1346 1.1.4.2 rmind cxgb_tick, sc);
1347 1.1.4.2 rmind }
1348 1.1.4.2 rmind PORT_UNLOCK(p);
1349 1.1.4.2 rmind break;
1350 1.1.4.2 rmind case SIOCSIFMEDIA:
1351 1.1.4.2 rmind printf("SIOCSIFMEDIA:\n");
1352 1.1.4.2 rmind case SIOCGIFMEDIA:
1353 1.1.4.2 rmind error = ifmedia_ioctl(ifp, ifr, &p->media, command);
1354 1.1.4.2 rmind printf("SIOCGIFMEDIA: error=%d\n", error);
1355 1.1.4.2 rmind break;
1356 1.1.4.2 rmind default:
1357 1.1.4.2 rmind printf("Dir = %x Len = %x Group = '%c' Num = %x\n",
1358 1.1.4.2 rmind (unsigned int)(command&0xe0000000)>>28, (unsigned int)(command&0x1fff0000)>>16,
1359 1.1.4.2 rmind (unsigned int)(command&0xff00)>>8, (unsigned int)command&0xff);
1360 1.1.4.2 rmind if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
1361 1.1.4.2 rmind break;
1362 1.1.4.2 rmind error = 0;
1363 1.1.4.2 rmind break;
1364 1.1.4.2 rmind }
1365 1.1.4.2 rmind return (error);
1366 1.1.4.2 rmind }
1367 1.1.4.2 rmind
1368 1.1.4.2 rmind static int
1369 1.1.4.2 rmind cxgb_start_tx(struct ifnet *ifp, uint32_t txmax)
1370 1.1.4.2 rmind {
1371 1.1.4.2 rmind struct sge_qset *qs;
1372 1.1.4.2 rmind struct sge_txq *txq;
1373 1.1.4.2 rmind struct port_info *p = ifp->if_softc;
1374 1.1.4.2 rmind struct mbuf *m = NULL;
1375 1.1.4.2 rmind int err, in_use_init, free_it;
1376 1.1.4.2 rmind
1377 1.1.4.2 rmind if (!p->link_config.link_ok)
1378 1.1.4.2 rmind {
1379 1.1.4.2 rmind return (ENXIO);
1380 1.1.4.2 rmind }
1381 1.1.4.2 rmind
1382 1.1.4.2 rmind if (IFQ_IS_EMPTY(&ifp->if_snd))
1383 1.1.4.2 rmind {
1384 1.1.4.2 rmind return (ENOBUFS);
1385 1.1.4.2 rmind }
1386 1.1.4.2 rmind
1387 1.1.4.2 rmind qs = &p->adapter->sge.qs[p->first_qset];
1388 1.1.4.2 rmind txq = &qs->txq[TXQ_ETH];
1389 1.1.4.2 rmind err = 0;
1390 1.1.4.2 rmind
1391 1.1.4.2 rmind if (txq->flags & TXQ_TRANSMITTING)
1392 1.1.4.2 rmind {
1393 1.1.4.2 rmind return (EINPROGRESS);
1394 1.1.4.2 rmind }
1395 1.1.4.2 rmind
1396 1.1.4.2 rmind mtx_lock(&txq->lock);
1397 1.1.4.2 rmind txq->flags |= TXQ_TRANSMITTING;
1398 1.1.4.2 rmind in_use_init = txq->in_use;
1399 1.1.4.2 rmind while ((txq->in_use - in_use_init < txmax) &&
1400 1.1.4.2 rmind (txq->size > txq->in_use + TX_MAX_DESC)) {
1401 1.1.4.2 rmind free_it = 0;
1402 1.1.4.2 rmind IFQ_DEQUEUE(&ifp->if_snd, m);
1403 1.1.4.2 rmind if (m == NULL)
1404 1.1.4.2 rmind break;
1405 1.1.4.2 rmind /*
1406 1.1.4.2 rmind * Convert chain to M_IOVEC
1407 1.1.4.2 rmind */
1408 1.1.4.2 rmind KASSERT((m->m_flags & M_IOVEC) == 0);
1409 1.1.4.2 rmind #ifdef notyet
1410 1.1.4.2 rmind m0 = m;
1411 1.1.4.2 rmind if (collapse_mbufs && m->m_pkthdr.len > MCLBYTES &&
1412 1.1.4.2 rmind m_collapse(m, TX_MAX_SEGS, &m0) == EFBIG) {
1413 1.1.4.2 rmind if ((m0 = m_defrag(m, M_NOWAIT)) != NULL) {
1414 1.1.4.2 rmind m = m0;
1415 1.1.4.2 rmind m_collapse(m, TX_MAX_SEGS, &m0);
1416 1.1.4.2 rmind } else
1417 1.1.4.2 rmind break;
1418 1.1.4.2 rmind }
1419 1.1.4.2 rmind m = m0;
1420 1.1.4.2 rmind #endif
1421 1.1.4.2 rmind if ((err = t3_encap(p, &m, &free_it)) != 0)
1422 1.1.4.2 rmind {
1423 1.1.4.2 rmind printf("t3_encap() returned %d\n", err);
1424 1.1.4.2 rmind break;
1425 1.1.4.2 rmind }
1426 1.1.4.2 rmind // bpf_mtap(ifp, m);
1427 1.1.4.2 rmind if (free_it)
1428 1.1.4.2 rmind {
1429 1.1.4.2 rmind m_freem(m);
1430 1.1.4.2 rmind }
1431 1.1.4.2 rmind }
1432 1.1.4.2 rmind txq->flags &= ~TXQ_TRANSMITTING;
1433 1.1.4.2 rmind mtx_unlock(&txq->lock);
1434 1.1.4.2 rmind
1435 1.1.4.2 rmind if (__predict_false(err)) {
1436 1.1.4.2 rmind if (err == ENOMEM) {
1437 1.1.4.2 rmind ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1438 1.1.4.2 rmind // XXXXXXXXXX lock/unlock??
1439 1.1.4.2 rmind IF_PREPEND(&ifp->if_snd, m);
1440 1.1.4.2 rmind }
1441 1.1.4.2 rmind }
1442 1.1.4.2 rmind if (err == 0 && m == NULL)
1443 1.1.4.2 rmind err = ENOBUFS;
1444 1.1.4.2 rmind else if ((err == 0) && (txq->size <= txq->in_use + TX_MAX_DESC) &&
1445 1.1.4.2 rmind (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
1446 1.1.4.2 rmind ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1447 1.1.4.2 rmind err = ENOSPC;
1448 1.1.4.2 rmind }
1449 1.1.4.2 rmind return (err);
1450 1.1.4.2 rmind }
1451 1.1.4.2 rmind
1452 1.1.4.2 rmind static void
1453 1.1.4.2 rmind cxgb_start_proc(struct work *wk, void *arg)
1454 1.1.4.2 rmind {
1455 1.1.4.2 rmind struct ifnet *ifp = arg;
1456 1.1.4.2 rmind struct port_info *pi = ifp->if_softc;
1457 1.1.4.2 rmind struct sge_qset *qs;
1458 1.1.4.2 rmind struct sge_txq *txq;
1459 1.1.4.2 rmind int error;
1460 1.1.4.2 rmind
1461 1.1.4.2 rmind qs = &pi->adapter->sge.qs[pi->first_qset];
1462 1.1.4.2 rmind txq = &qs->txq[TXQ_ETH];
1463 1.1.4.2 rmind
1464 1.1.4.2 rmind do {
1465 1.1.4.2 rmind if (desc_reclaimable(txq) > TX_CLEAN_MAX_DESC >> 2)
1466 1.1.4.2 rmind workqueue_enqueue(pi->timer_reclaim_task.wq, &pi->timer_reclaim_task.w, NULL);
1467 1.1.4.2 rmind
1468 1.1.4.2 rmind error = cxgb_start_tx(ifp, TX_START_MAX_DESC);
1469 1.1.4.2 rmind } while (error == 0);
1470 1.1.4.2 rmind }
1471 1.1.4.2 rmind
1472 1.1.4.2 rmind static void
1473 1.1.4.2 rmind cxgb_start(struct ifnet *ifp)
1474 1.1.4.2 rmind {
1475 1.1.4.2 rmind struct port_info *pi = ifp->if_softc;
1476 1.1.4.2 rmind struct sge_qset *qs;
1477 1.1.4.2 rmind struct sge_txq *txq;
1478 1.1.4.2 rmind int err;
1479 1.1.4.2 rmind
1480 1.1.4.2 rmind qs = &pi->adapter->sge.qs[pi->first_qset];
1481 1.1.4.2 rmind txq = &qs->txq[TXQ_ETH];
1482 1.1.4.2 rmind
1483 1.1.4.2 rmind if (desc_reclaimable(txq) > TX_CLEAN_MAX_DESC >> 2)
1484 1.1.4.2 rmind workqueue_enqueue(pi->timer_reclaim_task.wq, &pi->timer_reclaim_task.w, NULL);
1485 1.1.4.2 rmind
1486 1.1.4.2 rmind err = cxgb_start_tx(ifp, TX_START_MAX_DESC);
1487 1.1.4.2 rmind
1488 1.1.4.2 rmind if (err == 0)
1489 1.1.4.2 rmind workqueue_enqueue(pi->start_task.wq, &pi->start_task.w, NULL);
1490 1.1.4.2 rmind }
1491 1.1.4.2 rmind
1492 1.1.4.2 rmind static void
1493 1.1.4.2 rmind cxgb_stop(struct ifnet *ifp, int reason)
1494 1.1.4.2 rmind {
1495 1.1.4.2 rmind struct port_info *pi = ifp->if_softc;
1496 1.1.4.2 rmind
1497 1.1.4.2 rmind printf("cxgb_stop(): pi=%p, reason=%d\n", pi, reason);
1498 1.1.4.2 rmind INT3;
1499 1.1.4.2 rmind }
1500 1.1.4.2 rmind
1501 1.1.4.2 rmind static int
1502 1.1.4.2 rmind cxgb_media_change(struct ifnet *ifp)
1503 1.1.4.2 rmind {
1504 1.1.4.2 rmind printf("media change not supported: ifp=%p\n", ifp);
1505 1.1.4.2 rmind return (ENXIO);
1506 1.1.4.2 rmind }
1507 1.1.4.2 rmind
1508 1.1.4.2 rmind static void
1509 1.1.4.2 rmind cxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1510 1.1.4.2 rmind {
1511 1.1.4.2 rmind struct port_info *p;
1512 1.1.4.2 rmind
1513 1.1.4.2 rmind p = ifp->if_softc;
1514 1.1.4.2 rmind
1515 1.1.4.2 rmind ifmr->ifm_status = IFM_AVALID;
1516 1.1.4.2 rmind ifmr->ifm_active = IFM_ETHER;
1517 1.1.4.2 rmind
1518 1.1.4.2 rmind if (!p->link_config.link_ok)
1519 1.1.4.2 rmind return;
1520 1.1.4.2 rmind
1521 1.1.4.2 rmind ifmr->ifm_status |= IFM_ACTIVE;
1522 1.1.4.2 rmind
1523 1.1.4.2 rmind switch (p->link_config.speed) {
1524 1.1.4.2 rmind case 10:
1525 1.1.4.2 rmind ifmr->ifm_active |= IFM_10_T;
1526 1.1.4.2 rmind break;
1527 1.1.4.2 rmind case 100:
1528 1.1.4.2 rmind ifmr->ifm_active |= IFM_100_TX;
1529 1.1.4.2 rmind break;
1530 1.1.4.2 rmind case 1000:
1531 1.1.4.2 rmind ifmr->ifm_active |= IFM_1000_T;
1532 1.1.4.2 rmind break;
1533 1.1.4.2 rmind }
1534 1.1.4.2 rmind
1535 1.1.4.2 rmind if (p->link_config.duplex)
1536 1.1.4.2 rmind ifmr->ifm_active |= IFM_FDX;
1537 1.1.4.2 rmind else
1538 1.1.4.2 rmind ifmr->ifm_active |= IFM_HDX;
1539 1.1.4.2 rmind }
1540 1.1.4.2 rmind
1541 1.1.4.2 rmind static int
1542 1.1.4.2 rmind cxgb_async_intr(void *data)
1543 1.1.4.2 rmind {
1544 1.1.4.2 rmind adapter_t *sc = data;
1545 1.1.4.2 rmind
1546 1.1.4.2 rmind if (cxgb_debug)
1547 1.1.4.2 rmind device_printf(sc->dev, "cxgb_async_intr\n");
1548 1.1.4.2 rmind /*
1549 1.1.4.2 rmind * May need to sleep - defer to taskqueue
1550 1.1.4.2 rmind */
1551 1.1.4.2 rmind workqueue_enqueue(sc->slow_intr_task.wq, &sc->slow_intr_task.w, NULL);
1552 1.1.4.2 rmind
1553 1.1.4.2 rmind return (1);
1554 1.1.4.2 rmind }
1555 1.1.4.2 rmind
1556 1.1.4.2 rmind static void
1557 1.1.4.2 rmind cxgb_ext_intr_handler(struct work *wk, void *arg)
1558 1.1.4.2 rmind {
1559 1.1.4.2 rmind adapter_t *sc = (adapter_t *)arg;
1560 1.1.4.2 rmind
1561 1.1.4.2 rmind if (cxgb_debug)
1562 1.1.4.2 rmind printf("cxgb_ext_intr_handler\n");
1563 1.1.4.2 rmind
1564 1.1.4.2 rmind t3_phy_intr_handler(sc);
1565 1.1.4.2 rmind
1566 1.1.4.2 rmind /* Now reenable external interrupts */
1567 1.1.4.2 rmind ADAPTER_LOCK(sc);
1568 1.1.4.2 rmind if (sc->slow_intr_mask) {
1569 1.1.4.2 rmind sc->slow_intr_mask |= F_T3DBG;
1570 1.1.4.2 rmind t3_write_reg(sc, A_PL_INT_CAUSE0, F_T3DBG);
1571 1.1.4.2 rmind t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
1572 1.1.4.2 rmind }
1573 1.1.4.2 rmind ADAPTER_UNLOCK(sc);
1574 1.1.4.2 rmind }
1575 1.1.4.2 rmind
1576 1.1.4.2 rmind static void
1577 1.1.4.2 rmind check_link_status(adapter_t *sc)
1578 1.1.4.2 rmind {
1579 1.1.4.2 rmind int i;
1580 1.1.4.2 rmind
1581 1.1.4.2 rmind for (i = 0; i < (sc)->params.nports; ++i) {
1582 1.1.4.2 rmind struct port_info *p = &sc->port[i];
1583 1.1.4.2 rmind
1584 1.1.4.2 rmind if (!(p->port_type->caps & SUPPORTED_IRQ))
1585 1.1.4.2 rmind t3_link_changed(sc, i);
1586 1.1.4.2 rmind p->ifp->if_baudrate = p->link_config.speed * 1000000;
1587 1.1.4.2 rmind }
1588 1.1.4.2 rmind }
1589 1.1.4.2 rmind
1590 1.1.4.2 rmind static void
1591 1.1.4.2 rmind check_t3b2_mac(struct adapter *adapter)
1592 1.1.4.2 rmind {
1593 1.1.4.2 rmind int i;
1594 1.1.4.2 rmind
1595 1.1.4.2 rmind for_each_port(adapter, i) {
1596 1.1.4.2 rmind struct port_info *p = &adapter->port[i];
1597 1.1.4.2 rmind struct ifnet *ifp = p->ifp;
1598 1.1.4.2 rmind int status;
1599 1.1.4.2 rmind
1600 1.1.4.2 rmind if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1601 1.1.4.2 rmind continue;
1602 1.1.4.2 rmind
1603 1.1.4.2 rmind status = 0;
1604 1.1.4.2 rmind PORT_LOCK(p);
1605 1.1.4.2 rmind if ((ifp->if_drv_flags & IFF_DRV_RUNNING))
1606 1.1.4.2 rmind status = t3b2_mac_watchdog_task(&p->mac);
1607 1.1.4.2 rmind if (status == 1)
1608 1.1.4.2 rmind p->mac.stats.num_toggled++;
1609 1.1.4.2 rmind else if (status == 2) {
1610 1.1.4.2 rmind struct cmac *mac = &p->mac;
1611 1.1.4.2 rmind
1612 1.1.4.2 rmind t3_mac_set_mtu(mac, ifp->if_mtu + ETHER_HDR_LEN
1613 1.1.4.2 rmind + ETHER_VLAN_ENCAP_LEN);
1614 1.1.4.2 rmind t3_mac_set_address(mac, 0, p->hw_addr);
1615 1.1.4.2 rmind cxgb_set_rxmode(p);
1616 1.1.4.2 rmind t3_link_start(&p->phy, mac, &p->link_config);
1617 1.1.4.2 rmind t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
1618 1.1.4.2 rmind t3_port_intr_enable(adapter, p->port_id);
1619 1.1.4.2 rmind p->mac.stats.num_resets++;
1620 1.1.4.2 rmind }
1621 1.1.4.2 rmind PORT_UNLOCK(p);
1622 1.1.4.2 rmind }
1623 1.1.4.2 rmind }
1624 1.1.4.2 rmind
1625 1.1.4.2 rmind static void
1626 1.1.4.2 rmind cxgb_tick(void *arg)
1627 1.1.4.2 rmind {
1628 1.1.4.2 rmind adapter_t *sc = (adapter_t *)arg;
1629 1.1.4.2 rmind
1630 1.1.4.2 rmind workqueue_enqueue(sc->tick_task.wq, &sc->tick_task.w, NULL);
1631 1.1.4.2 rmind
1632 1.1.4.2 rmind if (sc->open_device_map != 0)
1633 1.1.4.2 rmind callout_reset(&sc->cxgb_tick_ch, sc->params.stats_update_period * hz,
1634 1.1.4.2 rmind cxgb_tick, sc);
1635 1.1.4.2 rmind }
1636 1.1.4.2 rmind
1637 1.1.4.2 rmind static void
1638 1.1.4.2 rmind cxgb_tick_handler(struct work *wk, void *arg)
1639 1.1.4.2 rmind {
1640 1.1.4.2 rmind adapter_t *sc = (adapter_t *)arg;
1641 1.1.4.2 rmind const struct adapter_params *p = &sc->params;
1642 1.1.4.2 rmind
1643 1.1.4.2 rmind ADAPTER_LOCK(sc);
1644 1.1.4.2 rmind if (p->linkpoll_period)
1645 1.1.4.2 rmind check_link_status(sc);
1646 1.1.4.2 rmind
1647 1.1.4.2 rmind /*
1648 1.1.4.2 rmind * adapter lock can currently only be acquire after the
1649 1.1.4.2 rmind * port lock
1650 1.1.4.2 rmind */
1651 1.1.4.2 rmind ADAPTER_UNLOCK(sc);
1652 1.1.4.2 rmind
1653 1.1.4.2 rmind if (p->rev == T3_REV_B2 && p->nports < 4)
1654 1.1.4.2 rmind check_t3b2_mac(sc);
1655 1.1.4.2 rmind }
1656 1.1.4.2 rmind
1657 1.1.4.2 rmind static void
1658 1.1.4.2 rmind touch_bars(device_t dev)
1659 1.1.4.2 rmind {
1660 1.1.4.2 rmind /*
1661 1.1.4.2 rmind * Don't enable yet
1662 1.1.4.2 rmind */
1663 1.1.4.2 rmind #if !defined(__LP64__) && 0
1664 1.1.4.2 rmind u32 v;
1665 1.1.4.2 rmind
1666 1.1.4.2 rmind pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v);
1667 1.1.4.2 rmind pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v);
1668 1.1.4.2 rmind pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v);
1669 1.1.4.2 rmind pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v);
1670 1.1.4.2 rmind pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v);
1671 1.1.4.2 rmind pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v);
1672 1.1.4.2 rmind #endif
1673 1.1.4.2 rmind }
1674 1.1.4.2 rmind
1675 1.1.4.2 rmind static __inline void
1676 1.1.4.2 rmind reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
1677 1.1.4.2 rmind unsigned int end)
1678 1.1.4.2 rmind {
1679 1.1.4.2 rmind uint32_t *p = (uint32_t *)buf + start;
1680 1.1.4.2 rmind
1681 1.1.4.2 rmind for ( ; start <= end; start += sizeof(uint32_t))
1682 1.1.4.2 rmind *p++ = t3_read_reg(ap, start);
1683 1.1.4.2 rmind }
1684 1.1.4.2 rmind
1685