ixv.c revision 1.1 1 1.1 dyoung /******************************************************************************
2 1.1 dyoung
3 1.1 dyoung Copyright (c) 2001-2010, Intel Corporation
4 1.1 dyoung All rights reserved.
5 1.1 dyoung
6 1.1 dyoung Redistribution and use in source and binary forms, with or without
7 1.1 dyoung modification, are permitted provided that the following conditions are met:
8 1.1 dyoung
9 1.1 dyoung 1. Redistributions of source code must retain the above copyright notice,
10 1.1 dyoung this list of conditions and the following disclaimer.
11 1.1 dyoung
12 1.1 dyoung 2. Redistributions in binary form must reproduce the above copyright
13 1.1 dyoung notice, this list of conditions and the following disclaimer in the
14 1.1 dyoung documentation and/or other materials provided with the distribution.
15 1.1 dyoung
16 1.1 dyoung 3. Neither the name of the Intel Corporation nor the names of its
17 1.1 dyoung contributors may be used to endorse or promote products derived from
18 1.1 dyoung this software without specific prior written permission.
19 1.1 dyoung
20 1.1 dyoung THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 1.1 dyoung AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 1.1 dyoung IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 1.1 dyoung ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 1.1 dyoung LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 dyoung CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 dyoung SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 dyoung INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 dyoung CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 dyoung ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 dyoung POSSIBILITY OF SUCH DAMAGE.
31 1.1 dyoung
32 1.1 dyoung ******************************************************************************/
33 1.1 dyoung /*$FreeBSD: src/sys/dev/ixgbe/ixv.c,v 1.2 2011/03/23 13:10:15 jhb Exp $*/
34 1.1 dyoung /*$NetBSD: ixv.c,v 1.1 2011/08/12 21:55:29 dyoung Exp $*/
35 1.1 dyoung
36 1.1 dyoung #include "opt_inet.h"
37 1.1 dyoung
38 1.1 dyoung #include "ixv.h"
39 1.1 dyoung
40 1.1 dyoung /*********************************************************************
41 1.1 dyoung * Driver version
42 1.1 dyoung *********************************************************************/
43 1.1 dyoung char ixv_driver_version[] = "1.0.0";
44 1.1 dyoung
45 1.1 dyoung /*********************************************************************
46 1.1 dyoung * PCI Device ID Table
47 1.1 dyoung *
48 1.1 dyoung * Used by probe to select devices to load on
49 1.1 dyoung * Last field stores an index into ixv_strings
50 1.1 dyoung * Last entry must be all 0s
51 1.1 dyoung *
52 1.1 dyoung * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
53 1.1 dyoung *********************************************************************/
54 1.1 dyoung
55 1.1 dyoung static ixv_vendor_info_t ixv_vendor_info_array[] =
56 1.1 dyoung {
57 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
58 1.1 dyoung /* required last entry */
59 1.1 dyoung {0, 0, 0, 0, 0}
60 1.1 dyoung };
61 1.1 dyoung
62 1.1 dyoung /*********************************************************************
63 1.1 dyoung * Table of branding strings
64 1.1 dyoung *********************************************************************/
65 1.1 dyoung
66 1.1 dyoung static char *ixv_strings[] = {
67 1.1 dyoung "Intel(R) PRO/10GbE Virtual Function Network Driver"
68 1.1 dyoung };
69 1.1 dyoung
70 1.1 dyoung /*********************************************************************
71 1.1 dyoung * Function prototypes
72 1.1 dyoung *********************************************************************/
73 1.1 dyoung static int ixv_probe(device_t);
74 1.1 dyoung static int ixv_attach(device_t);
75 1.1 dyoung static int ixv_detach(device_t);
76 1.1 dyoung static int ixv_shutdown(device_t);
77 1.1 dyoung #if __FreeBSD_version < 800000
78 1.1 dyoung static void ixv_start(struct ifnet *);
79 1.1 dyoung static void ixv_start_locked(struct tx_ring *, struct ifnet *);
80 1.1 dyoung #else
81 1.1 dyoung static int ixv_mq_start(struct ifnet *, struct mbuf *);
82 1.1 dyoung static int ixv_mq_start_locked(struct ifnet *,
83 1.1 dyoung struct tx_ring *, struct mbuf *);
84 1.1 dyoung static void ixv_qflush(struct ifnet *);
85 1.1 dyoung #endif
86 1.1 dyoung static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
87 1.1 dyoung static void ixv_init(void *);
88 1.1 dyoung static void ixv_init_locked(struct adapter *);
89 1.1 dyoung static void ixv_stop(void *);
90 1.1 dyoung static void ixv_media_status(struct ifnet *, struct ifmediareq *);
91 1.1 dyoung static int ixv_media_change(struct ifnet *);
92 1.1 dyoung static void ixv_identify_hardware(struct adapter *);
93 1.1 dyoung static int ixv_allocate_pci_resources(struct adapter *);
94 1.1 dyoung static int ixv_allocate_msix(struct adapter *);
95 1.1 dyoung static int ixv_allocate_queues(struct adapter *);
96 1.1 dyoung static int ixv_setup_msix(struct adapter *);
97 1.1 dyoung static void ixv_free_pci_resources(struct adapter *);
98 1.1 dyoung static void ixv_local_timer(void *);
99 1.1 dyoung static void ixv_setup_interface(device_t, struct adapter *);
100 1.1 dyoung static void ixv_config_link(struct adapter *);
101 1.1 dyoung
102 1.1 dyoung static int ixv_allocate_transmit_buffers(struct tx_ring *);
103 1.1 dyoung static int ixv_setup_transmit_structures(struct adapter *);
104 1.1 dyoung static void ixv_setup_transmit_ring(struct tx_ring *);
105 1.1 dyoung static void ixv_initialize_transmit_units(struct adapter *);
106 1.1 dyoung static void ixv_free_transmit_structures(struct adapter *);
107 1.1 dyoung static void ixv_free_transmit_buffers(struct tx_ring *);
108 1.1 dyoung
109 1.1 dyoung static int ixv_allocate_receive_buffers(struct rx_ring *);
110 1.1 dyoung static int ixv_setup_receive_structures(struct adapter *);
111 1.1 dyoung static int ixv_setup_receive_ring(struct rx_ring *);
112 1.1 dyoung static void ixv_initialize_receive_units(struct adapter *);
113 1.1 dyoung static void ixv_free_receive_structures(struct adapter *);
114 1.1 dyoung static void ixv_free_receive_buffers(struct rx_ring *);
115 1.1 dyoung
116 1.1 dyoung static void ixv_enable_intr(struct adapter *);
117 1.1 dyoung static void ixv_disable_intr(struct adapter *);
118 1.1 dyoung static bool ixv_txeof(struct tx_ring *);
119 1.1 dyoung static bool ixv_rxeof(struct ix_queue *, int);
120 1.1 dyoung static void ixv_rx_checksum(u32, struct mbuf *, u32);
121 1.1 dyoung static void ixv_set_multi(struct adapter *);
122 1.1 dyoung static void ixv_update_link_status(struct adapter *);
123 1.1 dyoung static void ixv_refresh_mbufs(struct rx_ring *, int);
124 1.1 dyoung static int ixv_xmit(struct tx_ring *, struct mbuf **);
125 1.1 dyoung static int ixv_sysctl_stats(SYSCTL_HANDLER_ARGS);
126 1.1 dyoung static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
127 1.1 dyoung static int ixv_set_flowcntl(SYSCTL_HANDLER_ARGS);
128 1.1 dyoung static int ixv_dma_malloc(struct adapter *, bus_size_t,
129 1.1 dyoung struct ixv_dma_alloc *, int);
130 1.1 dyoung static void ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
131 1.1 dyoung static void ixv_add_rx_process_limit(struct adapter *, const char *,
132 1.1 dyoung const char *, int *, int);
133 1.1 dyoung static bool ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
134 1.1 dyoung static bool ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
135 1.1 dyoung static void ixv_set_ivar(struct adapter *, u8, u8, s8);
136 1.1 dyoung static void ixv_configure_ivars(struct adapter *);
137 1.1 dyoung static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
138 1.1 dyoung
139 1.1 dyoung static void ixv_setup_vlan_support(struct adapter *);
140 1.1 dyoung static void ixv_register_vlan(void *, struct ifnet *, u16);
141 1.1 dyoung static void ixv_unregister_vlan(void *, struct ifnet *, u16);
142 1.1 dyoung
143 1.1 dyoung static void ixv_save_stats(struct adapter *);
144 1.1 dyoung static void ixv_init_stats(struct adapter *);
145 1.1 dyoung static void ixv_update_stats(struct adapter *);
146 1.1 dyoung
147 1.1 dyoung static __inline void ixv_rx_discard(struct rx_ring *, int);
148 1.1 dyoung static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
149 1.1 dyoung struct mbuf *, u32);
150 1.1 dyoung
151 1.1 dyoung /* The MSI/X Interrupt handlers */
152 1.1 dyoung static void ixv_msix_que(void *);
153 1.1 dyoung static void ixv_msix_mbx(void *);
154 1.1 dyoung
155 1.1 dyoung /* Deferred interrupt tasklets */
156 1.1 dyoung static void ixv_handle_que(void *, int);
157 1.1 dyoung static void ixv_handle_mbx(void *, int);
158 1.1 dyoung
159 1.1 dyoung /*********************************************************************
160 1.1 dyoung * FreeBSD Device Interface Entry Points
161 1.1 dyoung *********************************************************************/
162 1.1 dyoung
163 1.1 dyoung static device_method_t ixv_methods[] = {
164 1.1 dyoung /* Device interface */
165 1.1 dyoung DEVMETHOD(device_probe, ixv_probe),
166 1.1 dyoung DEVMETHOD(device_attach, ixv_attach),
167 1.1 dyoung DEVMETHOD(device_detach, ixv_detach),
168 1.1 dyoung DEVMETHOD(device_shutdown, ixv_shutdown),
169 1.1 dyoung {0, 0}
170 1.1 dyoung };
171 1.1 dyoung
172 1.1 dyoung #if 0
173 1.1 dyoung static driver_t ixv_driver = {
174 1.1 dyoung "ix", ixv_methods, sizeof(struct adapter),
175 1.1 dyoung };
176 1.1 dyoung
177 1.1 dyoung extern devclass_t ixgbe_devclass;
178 1.1 dyoung DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
179 1.1 dyoung MODULE_DEPEND(ixv, pci, 1, 1, 1);
180 1.1 dyoung MODULE_DEPEND(ixv, ether, 1, 1, 1);
181 1.1 dyoung #endif
182 1.1 dyoung
183 1.1 dyoung /*
184 1.1 dyoung ** TUNEABLE PARAMETERS:
185 1.1 dyoung */
186 1.1 dyoung
187 1.1 dyoung /*
188 1.1 dyoung ** AIM: Adaptive Interrupt Moderation
189 1.1 dyoung ** which means that the interrupt rate
190 1.1 dyoung ** is varied over time based on the
191 1.1 dyoung ** traffic for that interrupt vector
192 1.1 dyoung */
193 1.1 dyoung static int ixv_enable_aim = FALSE;
194 1.1 dyoung #define TUNABLE_INT(__x, __y)
195 1.1 dyoung TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
196 1.1 dyoung
197 1.1 dyoung /* How many packets rxeof tries to clean at a time */
198 1.1 dyoung static int ixv_rx_process_limit = 128;
199 1.1 dyoung TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
200 1.1 dyoung
201 1.1 dyoung /* Flow control setting, default to full */
202 1.1 dyoung static int ixv_flow_control = ixgbe_fc_full;
203 1.1 dyoung TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
204 1.1 dyoung
205 1.1 dyoung /*
206 1.1 dyoung * Header split: this causes the hardware to DMA
207 1.1 dyoung * the header into a seperate mbuf from the payload,
208 1.1 dyoung * it can be a performance win in some workloads, but
209 1.1 dyoung * in others it actually hurts, its off by default.
210 1.1 dyoung */
211 1.1 dyoung static bool ixv_header_split = FALSE;
212 1.1 dyoung TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
213 1.1 dyoung
214 1.1 dyoung /*
215 1.1 dyoung ** Number of TX descriptors per ring,
216 1.1 dyoung ** setting higher than RX as this seems
217 1.1 dyoung ** the better performing choice.
218 1.1 dyoung */
219 1.1 dyoung static int ixv_txd = DEFAULT_TXD;
220 1.1 dyoung TUNABLE_INT("hw.ixv.txd", &ixv_txd);
221 1.1 dyoung
222 1.1 dyoung /* Number of RX descriptors per ring */
223 1.1 dyoung static int ixv_rxd = DEFAULT_RXD;
224 1.1 dyoung TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
225 1.1 dyoung
226 1.1 dyoung /*
227 1.1 dyoung ** Shadow VFTA table, this is needed because
228 1.1 dyoung ** the real filter table gets cleared during
229 1.1 dyoung ** a soft reset and we need to repopulate it.
230 1.1 dyoung */
231 1.1 dyoung static u32 ixv_shadow_vfta[VFTA_SIZE];
232 1.1 dyoung
233 1.1 dyoung /*********************************************************************
234 1.1 dyoung * Device identification routine
235 1.1 dyoung *
236 1.1 dyoung * ixv_probe determines if the driver should be loaded on
237 1.1 dyoung * adapter based on PCI vendor/device id of the adapter.
238 1.1 dyoung *
239 1.1 dyoung * return 0 on success, positive on failure
240 1.1 dyoung *********************************************************************/
241 1.1 dyoung
242 1.1 dyoung static int
243 1.1 dyoung ixv_probe(device_t dev)
244 1.1 dyoung {
245 1.1 dyoung ixv_vendor_info_t *ent;
246 1.1 dyoung
247 1.1 dyoung u16 pci_vendor_id = 0;
248 1.1 dyoung u16 pci_device_id = 0;
249 1.1 dyoung u16 pci_subvendor_id = 0;
250 1.1 dyoung u16 pci_subdevice_id = 0;
251 1.1 dyoung char adapter_name[256];
252 1.1 dyoung
253 1.1 dyoung
254 1.1 dyoung pci_vendor_id = pci_get_vendor(dev);
255 1.1 dyoung if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
256 1.1 dyoung return (ENXIO);
257 1.1 dyoung
258 1.1 dyoung pci_device_id = pci_get_device(dev);
259 1.1 dyoung pci_subvendor_id = pci_get_subvendor(dev);
260 1.1 dyoung pci_subdevice_id = pci_get_subdevice(dev);
261 1.1 dyoung
262 1.1 dyoung ent = ixv_vendor_info_array;
263 1.1 dyoung while (ent->vendor_id != 0) {
264 1.1 dyoung if ((pci_vendor_id == ent->vendor_id) &&
265 1.1 dyoung (pci_device_id == ent->device_id) &&
266 1.1 dyoung
267 1.1 dyoung ((pci_subvendor_id == ent->subvendor_id) ||
268 1.1 dyoung (ent->subvendor_id == 0)) &&
269 1.1 dyoung
270 1.1 dyoung ((pci_subdevice_id == ent->subdevice_id) ||
271 1.1 dyoung (ent->subdevice_id == 0))) {
272 1.1 dyoung sprintf(adapter_name, "%s, Version - %s",
273 1.1 dyoung ixv_strings[ent->index],
274 1.1 dyoung ixv_driver_version);
275 1.1 dyoung device_set_desc_copy(dev, adapter_name);
276 1.1 dyoung return (0);
277 1.1 dyoung }
278 1.1 dyoung ent++;
279 1.1 dyoung }
280 1.1 dyoung return (ENXIO);
281 1.1 dyoung }
282 1.1 dyoung
283 1.1 dyoung /*********************************************************************
284 1.1 dyoung * Device initialization routine
285 1.1 dyoung *
286 1.1 dyoung * The attach entry point is called when the driver is being loaded.
287 1.1 dyoung * This routine identifies the type of hardware, allocates all resources
288 1.1 dyoung * and initializes the hardware.
289 1.1 dyoung *
290 1.1 dyoung * return 0 on success, positive on failure
291 1.1 dyoung *********************************************************************/
292 1.1 dyoung
293 1.1 dyoung static int
294 1.1 dyoung ixv_attach(device_t dev)
295 1.1 dyoung {
296 1.1 dyoung struct adapter *adapter;
297 1.1 dyoung struct ixgbe_hw *hw;
298 1.1 dyoung int error = 0;
299 1.1 dyoung
300 1.1 dyoung INIT_DEBUGOUT("ixv_attach: begin");
301 1.1 dyoung
302 1.1 dyoung /* Allocate, clear, and link in our adapter structure */
303 1.1 dyoung adapter = device_get_softc(dev);
304 1.1 dyoung adapter->dev = adapter->osdep.dev = dev;
305 1.1 dyoung hw = &adapter->hw;
306 1.1 dyoung
307 1.1 dyoung /* Core Lock Init*/
308 1.1 dyoung IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
309 1.1 dyoung
310 1.1 dyoung /* SYSCTL APIs */
311 1.1 dyoung SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
312 1.1 dyoung SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
313 1.1 dyoung OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
314 1.1 dyoung adapter, 0, ixv_sysctl_stats, "I", "Statistics");
315 1.1 dyoung
316 1.1 dyoung SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
317 1.1 dyoung SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
318 1.1 dyoung OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
319 1.1 dyoung adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
320 1.1 dyoung
321 1.1 dyoung SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
322 1.1 dyoung SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
323 1.1 dyoung OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
324 1.1 dyoung adapter, 0, ixv_set_flowcntl, "I", "Flow Control");
325 1.1 dyoung
326 1.1 dyoung SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
327 1.1 dyoung SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
328 1.1 dyoung OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
329 1.1 dyoung &ixv_enable_aim, 1, "Interrupt Moderation");
330 1.1 dyoung
331 1.1 dyoung /* Set up the timer callout */
332 1.1 dyoung callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
333 1.1 dyoung
334 1.1 dyoung /* Determine hardware revision */
335 1.1 dyoung ixv_identify_hardware(adapter);
336 1.1 dyoung
337 1.1 dyoung /* Do base PCI setup - map BAR0 */
338 1.1 dyoung if (ixv_allocate_pci_resources(adapter)) {
339 1.1 dyoung device_printf(dev, "Allocation of PCI resources failed\n");
340 1.1 dyoung error = ENXIO;
341 1.1 dyoung goto err_out;
342 1.1 dyoung }
343 1.1 dyoung
344 1.1 dyoung /* Do descriptor calc and sanity checks */
345 1.1 dyoung if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
346 1.1 dyoung ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
347 1.1 dyoung device_printf(dev, "TXD config issue, using default!\n");
348 1.1 dyoung adapter->num_tx_desc = DEFAULT_TXD;
349 1.1 dyoung } else
350 1.1 dyoung adapter->num_tx_desc = ixv_txd;
351 1.1 dyoung
352 1.1 dyoung if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
353 1.1 dyoung ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
354 1.1 dyoung device_printf(dev, "RXD config issue, using default!\n");
355 1.1 dyoung adapter->num_rx_desc = DEFAULT_RXD;
356 1.1 dyoung } else
357 1.1 dyoung adapter->num_rx_desc = ixv_rxd;
358 1.1 dyoung
359 1.1 dyoung /* Allocate our TX/RX Queues */
360 1.1 dyoung if (ixv_allocate_queues(adapter)) {
361 1.1 dyoung error = ENOMEM;
362 1.1 dyoung goto err_out;
363 1.1 dyoung }
364 1.1 dyoung
365 1.1 dyoung /*
366 1.1 dyoung ** Initialize the shared code: its
367 1.1 dyoung ** at this point the mac type is set.
368 1.1 dyoung */
369 1.1 dyoung error = ixgbe_init_shared_code(hw);
370 1.1 dyoung if (error) {
371 1.1 dyoung device_printf(dev,"Shared Code Initialization Failure\n");
372 1.1 dyoung error = EIO;
373 1.1 dyoung goto err_late;
374 1.1 dyoung }
375 1.1 dyoung
376 1.1 dyoung /* Setup the mailbox */
377 1.1 dyoung ixgbe_init_mbx_params_vf(hw);
378 1.1 dyoung
379 1.1 dyoung ixgbe_reset_hw(hw);
380 1.1 dyoung
381 1.1 dyoung /* Get Hardware Flow Control setting */
382 1.1 dyoung hw->fc.requested_mode = ixgbe_fc_full;
383 1.1 dyoung hw->fc.pause_time = IXV_FC_PAUSE;
384 1.1 dyoung hw->fc.low_water = IXV_FC_LO;
385 1.1 dyoung hw->fc.high_water = IXV_FC_HI;
386 1.1 dyoung hw->fc.send_xon = TRUE;
387 1.1 dyoung
388 1.1 dyoung error = ixgbe_init_hw(hw);
389 1.1 dyoung if (error) {
390 1.1 dyoung device_printf(dev,"Hardware Initialization Failure\n");
391 1.1 dyoung error = EIO;
392 1.1 dyoung goto err_late;
393 1.1 dyoung }
394 1.1 dyoung
395 1.1 dyoung error = ixv_allocate_msix(adapter);
396 1.1 dyoung if (error)
397 1.1 dyoung goto err_late;
398 1.1 dyoung
399 1.1 dyoung /* Setup OS specific network interface */
400 1.1 dyoung ixv_setup_interface(dev, adapter);
401 1.1 dyoung
402 1.1 dyoung /* Sysctl for limiting the amount of work done in the taskqueue */
403 1.1 dyoung ixv_add_rx_process_limit(adapter, "rx_processing_limit",
404 1.1 dyoung "max number of rx packets to process", &adapter->rx_process_limit,
405 1.1 dyoung ixv_rx_process_limit);
406 1.1 dyoung
407 1.1 dyoung /* Do the stats setup */
408 1.1 dyoung ixv_save_stats(adapter);
409 1.1 dyoung ixv_init_stats(adapter);
410 1.1 dyoung
411 1.1 dyoung /* Register for VLAN events */
412 1.1 dyoung adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
413 1.1 dyoung ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
414 1.1 dyoung adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
415 1.1 dyoung ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
416 1.1 dyoung
417 1.1 dyoung INIT_DEBUGOUT("ixv_attach: end");
418 1.1 dyoung return (0);
419 1.1 dyoung
420 1.1 dyoung err_late:
421 1.1 dyoung ixv_free_transmit_structures(adapter);
422 1.1 dyoung ixv_free_receive_structures(adapter);
423 1.1 dyoung err_out:
424 1.1 dyoung ixv_free_pci_resources(adapter);
425 1.1 dyoung return (error);
426 1.1 dyoung
427 1.1 dyoung }
428 1.1 dyoung
429 1.1 dyoung /*********************************************************************
430 1.1 dyoung * Device removal routine
431 1.1 dyoung *
432 1.1 dyoung * The detach entry point is called when the driver is being removed.
433 1.1 dyoung * This routine stops the adapter and deallocates all the resources
434 1.1 dyoung * that were allocated for driver operation.
435 1.1 dyoung *
436 1.1 dyoung * return 0 on success, positive on failure
437 1.1 dyoung *********************************************************************/
438 1.1 dyoung
439 1.1 dyoung static int
440 1.1 dyoung ixv_detach(device_t dev)
441 1.1 dyoung {
442 1.1 dyoung struct adapter *adapter = device_get_softc(dev);
443 1.1 dyoung struct ix_queue *que = adapter->queues;
444 1.1 dyoung
445 1.1 dyoung INIT_DEBUGOUT("ixv_detach: begin");
446 1.1 dyoung
447 1.1 dyoung /* Make sure VLANS are not using driver */
448 1.1 dyoung if (adapter->ifp->if_vlantrunk != NULL) {
449 1.1 dyoung device_printf(dev,"Vlan in use, detach first\n");
450 1.1 dyoung return (EBUSY);
451 1.1 dyoung }
452 1.1 dyoung
453 1.1 dyoung IXV_CORE_LOCK(adapter);
454 1.1 dyoung ixv_stop(adapter);
455 1.1 dyoung IXV_CORE_UNLOCK(adapter);
456 1.1 dyoung
457 1.1 dyoung for (int i = 0; i < adapter->num_queues; i++, que++) {
458 1.1 dyoung if (que->tq) {
459 1.1 dyoung taskqueue_drain(que->tq, &que->que_task);
460 1.1 dyoung taskqueue_free(que->tq);
461 1.1 dyoung }
462 1.1 dyoung }
463 1.1 dyoung
464 1.1 dyoung /* Drain the Link queue */
465 1.1 dyoung if (adapter->tq) {
466 1.1 dyoung taskqueue_drain(adapter->tq, &adapter->mbx_task);
467 1.1 dyoung taskqueue_free(adapter->tq);
468 1.1 dyoung }
469 1.1 dyoung
470 1.1 dyoung /* Unregister VLAN events */
471 1.1 dyoung if (adapter->vlan_attach != NULL)
472 1.1 dyoung EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
473 1.1 dyoung if (adapter->vlan_detach != NULL)
474 1.1 dyoung EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
475 1.1 dyoung
476 1.1 dyoung ether_ifdetach(adapter->ifp);
477 1.1 dyoung callout_drain(&adapter->timer);
478 1.1 dyoung ixv_free_pci_resources(adapter);
479 1.1 dyoung bus_generic_detach(dev);
480 1.1 dyoung if_free(adapter->ifp);
481 1.1 dyoung
482 1.1 dyoung ixv_free_transmit_structures(adapter);
483 1.1 dyoung ixv_free_receive_structures(adapter);
484 1.1 dyoung
485 1.1 dyoung IXV_CORE_LOCK_DESTROY(adapter);
486 1.1 dyoung return (0);
487 1.1 dyoung }
488 1.1 dyoung
489 1.1 dyoung /*********************************************************************
490 1.1 dyoung *
491 1.1 dyoung * Shutdown entry point
492 1.1 dyoung *
493 1.1 dyoung **********************************************************************/
494 1.1 dyoung static int
495 1.1 dyoung ixv_shutdown(device_t dev)
496 1.1 dyoung {
497 1.1 dyoung struct adapter *adapter = device_get_softc(dev);
498 1.1 dyoung IXV_CORE_LOCK(adapter);
499 1.1 dyoung ixv_stop(adapter);
500 1.1 dyoung IXV_CORE_UNLOCK(adapter);
501 1.1 dyoung return (0);
502 1.1 dyoung }
503 1.1 dyoung
504 1.1 dyoung #if __FreeBSD_version < 800000
505 1.1 dyoung /*********************************************************************
506 1.1 dyoung * Transmit entry point
507 1.1 dyoung *
508 1.1 dyoung * ixv_start is called by the stack to initiate a transmit.
509 1.1 dyoung * The driver will remain in this routine as long as there are
510 1.1 dyoung * packets to transmit and transmit resources are available.
511 1.1 dyoung * In case resources are not available stack is notified and
512 1.1 dyoung * the packet is requeued.
513 1.1 dyoung **********************************************************************/
514 1.1 dyoung static void
515 1.1 dyoung ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
516 1.1 dyoung {
517 1.1 dyoung struct mbuf *m_head;
518 1.1 dyoung struct adapter *adapter = txr->adapter;
519 1.1 dyoung
520 1.1 dyoung IXV_TX_LOCK_ASSERT(txr);
521 1.1 dyoung
522 1.1 dyoung if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
523 1.1 dyoung IFF_DRV_RUNNING)
524 1.1 dyoung return;
525 1.1 dyoung if (!adapter->link_active)
526 1.1 dyoung return;
527 1.1 dyoung
528 1.1 dyoung while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
529 1.1 dyoung
530 1.1 dyoung IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
531 1.1 dyoung if (m_head == NULL)
532 1.1 dyoung break;
533 1.1 dyoung
534 1.1 dyoung if (ixv_xmit(txr, &m_head)) {
535 1.1 dyoung if (m_head == NULL)
536 1.1 dyoung break;
537 1.1 dyoung ifp->if_drv_flags |= IFF_DRV_OACTIVE;
538 1.1 dyoung IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
539 1.1 dyoung break;
540 1.1 dyoung }
541 1.1 dyoung /* Send a copy of the frame to the BPF listener */
542 1.1 dyoung ETHER_BPF_MTAP(ifp, m_head);
543 1.1 dyoung
544 1.1 dyoung /* Set watchdog on */
545 1.1 dyoung txr->watchdog_check = TRUE;
546 1.1 dyoung txr->watchdog_time = ticks;
547 1.1 dyoung
548 1.1 dyoung }
549 1.1 dyoung return;
550 1.1 dyoung }
551 1.1 dyoung
552 1.1 dyoung /*
553 1.1 dyoung * Legacy TX start - called by the stack, this
554 1.1 dyoung * always uses the first tx ring, and should
555 1.1 dyoung * not be used with multiqueue tx enabled.
556 1.1 dyoung */
557 1.1 dyoung static void
558 1.1 dyoung ixv_start(struct ifnet *ifp)
559 1.1 dyoung {
560 1.1 dyoung struct adapter *adapter = ifp->if_softc;
561 1.1 dyoung struct tx_ring *txr = adapter->tx_rings;
562 1.1 dyoung
563 1.1 dyoung if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
564 1.1 dyoung IXV_TX_LOCK(txr);
565 1.1 dyoung ixv_start_locked(txr, ifp);
566 1.1 dyoung IXV_TX_UNLOCK(txr);
567 1.1 dyoung }
568 1.1 dyoung return;
569 1.1 dyoung }
570 1.1 dyoung
571 1.1 dyoung #else
572 1.1 dyoung
573 1.1 dyoung /*
574 1.1 dyoung ** Multiqueue Transmit driver
575 1.1 dyoung **
576 1.1 dyoung */
577 1.1 dyoung static int
578 1.1 dyoung ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
579 1.1 dyoung {
580 1.1 dyoung struct adapter *adapter = ifp->if_softc;
581 1.1 dyoung struct ix_queue *que;
582 1.1 dyoung struct tx_ring *txr;
583 1.1 dyoung int i = 0, err = 0;
584 1.1 dyoung
585 1.1 dyoung /* Which queue to use */
586 1.1 dyoung if ((m->m_flags & M_FLOWID) != 0)
587 1.1 dyoung i = m->m_pkthdr.flowid % adapter->num_queues;
588 1.1 dyoung
589 1.1 dyoung txr = &adapter->tx_rings[i];
590 1.1 dyoung que = &adapter->queues[i];
591 1.1 dyoung
592 1.1 dyoung if (IXV_TX_TRYLOCK(txr)) {
593 1.1 dyoung err = ixv_mq_start_locked(ifp, txr, m);
594 1.1 dyoung IXV_TX_UNLOCK(txr);
595 1.1 dyoung } else {
596 1.1 dyoung err = drbr_enqueue(ifp, txr->br, m);
597 1.1 dyoung taskqueue_enqueue(que->tq, &que->que_task);
598 1.1 dyoung }
599 1.1 dyoung
600 1.1 dyoung return (err);
601 1.1 dyoung }
602 1.1 dyoung
603 1.1 dyoung static int
604 1.1 dyoung ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
605 1.1 dyoung {
606 1.1 dyoung struct adapter *adapter = txr->adapter;
607 1.1 dyoung struct mbuf *next;
608 1.1 dyoung int enqueued, err = 0;
609 1.1 dyoung
610 1.1 dyoung if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
611 1.1 dyoung IFF_DRV_RUNNING || adapter->link_active == 0) {
612 1.1 dyoung if (m != NULL)
613 1.1 dyoung err = drbr_enqueue(ifp, txr->br, m);
614 1.1 dyoung return (err);
615 1.1 dyoung }
616 1.1 dyoung
617 1.1 dyoung /* Do a clean if descriptors are low */
618 1.1 dyoung if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
619 1.1 dyoung ixv_txeof(txr);
620 1.1 dyoung
621 1.1 dyoung enqueued = 0;
622 1.1 dyoung if (m == NULL) {
623 1.1 dyoung next = drbr_dequeue(ifp, txr->br);
624 1.1 dyoung } else if (drbr_needs_enqueue(ifp, txr->br)) {
625 1.1 dyoung if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
626 1.1 dyoung return (err);
627 1.1 dyoung next = drbr_dequeue(ifp, txr->br);
628 1.1 dyoung } else
629 1.1 dyoung next = m;
630 1.1 dyoung
631 1.1 dyoung /* Process the queue */
632 1.1 dyoung while (next != NULL) {
633 1.1 dyoung if ((err = ixv_xmit(txr, &next)) != 0) {
634 1.1 dyoung if (next != NULL)
635 1.1 dyoung err = drbr_enqueue(ifp, txr->br, next);
636 1.1 dyoung break;
637 1.1 dyoung }
638 1.1 dyoung enqueued++;
639 1.1 dyoung drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
640 1.1 dyoung /* Send a copy of the frame to the BPF listener */
641 1.1 dyoung ETHER_BPF_MTAP(ifp, next);
642 1.1 dyoung if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
643 1.1 dyoung break;
644 1.1 dyoung if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
645 1.1 dyoung ifp->if_drv_flags |= IFF_DRV_OACTIVE;
646 1.1 dyoung break;
647 1.1 dyoung }
648 1.1 dyoung next = drbr_dequeue(ifp, txr->br);
649 1.1 dyoung }
650 1.1 dyoung
651 1.1 dyoung if (enqueued > 0) {
652 1.1 dyoung /* Set watchdog on */
653 1.1 dyoung txr->watchdog_check = TRUE;
654 1.1 dyoung txr->watchdog_time = ticks;
655 1.1 dyoung }
656 1.1 dyoung
657 1.1 dyoung return (err);
658 1.1 dyoung }
659 1.1 dyoung
660 1.1 dyoung /*
661 1.1 dyoung ** Flush all ring buffers
662 1.1 dyoung */
663 1.1 dyoung static void
664 1.1 dyoung ixv_qflush(struct ifnet *ifp)
665 1.1 dyoung {
666 1.1 dyoung struct adapter *adapter = ifp->if_softc;
667 1.1 dyoung struct tx_ring *txr = adapter->tx_rings;
668 1.1 dyoung struct mbuf *m;
669 1.1 dyoung
670 1.1 dyoung for (int i = 0; i < adapter->num_queues; i++, txr++) {
671 1.1 dyoung IXV_TX_LOCK(txr);
672 1.1 dyoung while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
673 1.1 dyoung m_freem(m);
674 1.1 dyoung IXV_TX_UNLOCK(txr);
675 1.1 dyoung }
676 1.1 dyoung if_qflush(ifp);
677 1.1 dyoung }
678 1.1 dyoung
679 1.1 dyoung #endif
680 1.1 dyoung
681 1.1 dyoung /*********************************************************************
682 1.1 dyoung * Ioctl entry point
683 1.1 dyoung *
684 1.1 dyoung * ixv_ioctl is called when the user wants to configure the
685 1.1 dyoung * interface.
686 1.1 dyoung *
687 1.1 dyoung * return 0 on success, positive on failure
688 1.1 dyoung **********************************************************************/
689 1.1 dyoung
690 1.1 dyoung static int
691 1.1 dyoung ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
692 1.1 dyoung {
693 1.1 dyoung struct adapter *adapter = ifp->if_softc;
694 1.1 dyoung struct ifreq *ifr = (struct ifreq *) data;
695 1.1 dyoung int error = 0;
696 1.1 dyoung
697 1.1 dyoung switch (command) {
698 1.1 dyoung
699 1.1 dyoung case SIOCSIFMTU:
700 1.1 dyoung IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
701 1.1 dyoung if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
702 1.1 dyoung error = EINVAL;
703 1.1 dyoung } else {
704 1.1 dyoung IXV_CORE_LOCK(adapter);
705 1.1 dyoung ifp->if_mtu = ifr->ifr_mtu;
706 1.1 dyoung adapter->max_frame_size =
707 1.1 dyoung ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
708 1.1 dyoung ixv_init_locked(adapter);
709 1.1 dyoung IXV_CORE_UNLOCK(adapter);
710 1.1 dyoung }
711 1.1 dyoung break;
712 1.1 dyoung case SIOCSIFFLAGS:
713 1.1 dyoung IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
714 1.1 dyoung IXV_CORE_LOCK(adapter);
715 1.1 dyoung if (ifp->if_flags & IFF_UP) {
716 1.1 dyoung if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
717 1.1 dyoung ixv_init_locked(adapter);
718 1.1 dyoung } else
719 1.1 dyoung if (ifp->if_drv_flags & IFF_DRV_RUNNING)
720 1.1 dyoung ixv_stop(adapter);
721 1.1 dyoung adapter->if_flags = ifp->if_flags;
722 1.1 dyoung IXV_CORE_UNLOCK(adapter);
723 1.1 dyoung break;
724 1.1 dyoung case SIOCADDMULTI:
725 1.1 dyoung case SIOCDELMULTI:
726 1.1 dyoung IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
727 1.1 dyoung if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
728 1.1 dyoung IXV_CORE_LOCK(adapter);
729 1.1 dyoung ixv_disable_intr(adapter);
730 1.1 dyoung ixv_set_multi(adapter);
731 1.1 dyoung ixv_enable_intr(adapter);
732 1.1 dyoung IXV_CORE_UNLOCK(adapter);
733 1.1 dyoung }
734 1.1 dyoung break;
735 1.1 dyoung case SIOCSIFMEDIA:
736 1.1 dyoung case SIOCGIFMEDIA:
737 1.1 dyoung IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
738 1.1 dyoung error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
739 1.1 dyoung break;
740 1.1 dyoung case SIOCSIFCAP:
741 1.1 dyoung {
742 1.1 dyoung int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
743 1.1 dyoung IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
744 1.1 dyoung if (mask & IFCAP_HWCSUM)
745 1.1 dyoung ifp->if_capenable ^= IFCAP_HWCSUM;
746 1.1 dyoung if (mask & IFCAP_TSO4)
747 1.1 dyoung ifp->if_capenable ^= IFCAP_TSO4;
748 1.1 dyoung if (mask & IFCAP_LRO)
749 1.1 dyoung ifp->if_capenable ^= IFCAP_LRO;
750 1.1 dyoung if (mask & IFCAP_VLAN_HWTAGGING)
751 1.1 dyoung ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
752 1.1 dyoung if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
753 1.1 dyoung IXV_CORE_LOCK(adapter);
754 1.1 dyoung ixv_init_locked(adapter);
755 1.1 dyoung IXV_CORE_UNLOCK(adapter);
756 1.1 dyoung }
757 1.1 dyoung VLAN_CAPABILITIES(ifp);
758 1.1 dyoung break;
759 1.1 dyoung }
760 1.1 dyoung
761 1.1 dyoung default:
762 1.1 dyoung IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
763 1.1 dyoung error = ether_ioctl(ifp, command, data);
764 1.1 dyoung break;
765 1.1 dyoung }
766 1.1 dyoung
767 1.1 dyoung return (error);
768 1.1 dyoung }
769 1.1 dyoung
770 1.1 dyoung /*********************************************************************
771 1.1 dyoung * Init entry point
772 1.1 dyoung *
773 1.1 dyoung * This routine is used in two ways. It is used by the stack as
774 1.1 dyoung * init entry point in network interface structure. It is also used
775 1.1 dyoung * by the driver as a hw/sw initialization routine to get to a
776 1.1 dyoung * consistent state.
777 1.1 dyoung *
778 1.1 dyoung * return 0 on success, positive on failure
779 1.1 dyoung **********************************************************************/
780 1.1 dyoung #define IXGBE_MHADD_MFS_SHIFT 16
781 1.1 dyoung
782 1.1 dyoung static void
783 1.1 dyoung ixv_init_locked(struct adapter *adapter)
784 1.1 dyoung {
785 1.1 dyoung struct ifnet *ifp = adapter->ifp;
786 1.1 dyoung device_t dev = adapter->dev;
787 1.1 dyoung struct ixgbe_hw *hw = &adapter->hw;
788 1.1 dyoung u32 mhadd, gpie;
789 1.1 dyoung
790 1.1 dyoung INIT_DEBUGOUT("ixv_init: begin");
791 1.1 dyoung mtx_assert(&adapter->core_mtx, MA_OWNED);
792 1.1 dyoung hw->adapter_stopped = FALSE;
793 1.1 dyoung ixgbe_stop_adapter(hw);
794 1.1 dyoung callout_stop(&adapter->timer);
795 1.1 dyoung
796 1.1 dyoung /* reprogram the RAR[0] in case user changed it. */
797 1.1 dyoung ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
798 1.1 dyoung
799 1.1 dyoung /* Get the latest mac address, User can use a LAA */
800 1.1 dyoung bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
801 1.1 dyoung IXGBE_ETH_LENGTH_OF_ADDRESS);
802 1.1 dyoung ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
803 1.1 dyoung hw->addr_ctrl.rar_used_count = 1;
804 1.1 dyoung
805 1.1 dyoung /* Prepare transmit descriptors and buffers */
806 1.1 dyoung if (ixv_setup_transmit_structures(adapter)) {
807 1.1 dyoung device_printf(dev,"Could not setup transmit structures\n");
808 1.1 dyoung ixv_stop(adapter);
809 1.1 dyoung return;
810 1.1 dyoung }
811 1.1 dyoung
812 1.1 dyoung ixgbe_reset_hw(hw);
813 1.1 dyoung ixv_initialize_transmit_units(adapter);
814 1.1 dyoung
815 1.1 dyoung /* Setup Multicast table */
816 1.1 dyoung ixv_set_multi(adapter);
817 1.1 dyoung
818 1.1 dyoung /*
819 1.1 dyoung ** Determine the correct mbuf pool
820 1.1 dyoung ** for doing jumbo/headersplit
821 1.1 dyoung */
822 1.1 dyoung if (ifp->if_mtu > ETHERMTU)
823 1.1 dyoung adapter->rx_mbuf_sz = MJUMPAGESIZE;
824 1.1 dyoung else
825 1.1 dyoung adapter->rx_mbuf_sz = MCLBYTES;
826 1.1 dyoung
827 1.1 dyoung /* Prepare receive descriptors and buffers */
828 1.1 dyoung if (ixv_setup_receive_structures(adapter)) {
829 1.1 dyoung device_printf(dev,"Could not setup receive structures\n");
830 1.1 dyoung ixv_stop(adapter);
831 1.1 dyoung return;
832 1.1 dyoung }
833 1.1 dyoung
834 1.1 dyoung /* Configure RX settings */
835 1.1 dyoung ixv_initialize_receive_units(adapter);
836 1.1 dyoung
837 1.1 dyoung /* Enable Enhanced MSIX mode */
838 1.1 dyoung gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
839 1.1 dyoung gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
840 1.1 dyoung gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
841 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
842 1.1 dyoung
843 1.1 dyoung /* Set the various hardware offload abilities */
844 1.1 dyoung ifp->if_hwassist = 0;
845 1.1 dyoung if (ifp->if_capenable & IFCAP_TSO4)
846 1.1 dyoung ifp->if_hwassist |= CSUM_TSO;
847 1.1 dyoung if (ifp->if_capenable & IFCAP_TXCSUM) {
848 1.1 dyoung ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
849 1.1 dyoung #if __FreeBSD_version >= 800000
850 1.1 dyoung ifp->if_hwassist |= CSUM_SCTP;
851 1.1 dyoung #endif
852 1.1 dyoung }
853 1.1 dyoung
854 1.1 dyoung /* Set MTU size */
855 1.1 dyoung if (ifp->if_mtu > ETHERMTU) {
856 1.1 dyoung mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
857 1.1 dyoung mhadd &= ~IXGBE_MHADD_MFS_MASK;
858 1.1 dyoung mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
859 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
860 1.1 dyoung }
861 1.1 dyoung
862 1.1 dyoung /* Set up VLAN offload and filter */
863 1.1 dyoung ixv_setup_vlan_support(adapter);
864 1.1 dyoung
865 1.1 dyoung callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
866 1.1 dyoung
867 1.1 dyoung /* Set up MSI/X routing */
868 1.1 dyoung ixv_configure_ivars(adapter);
869 1.1 dyoung
870 1.1 dyoung /* Set up auto-mask */
871 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
872 1.1 dyoung
873 1.1 dyoung /* Set moderation on the Link interrupt */
874 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
875 1.1 dyoung
876 1.1 dyoung /* Stats init */
877 1.1 dyoung ixv_init_stats(adapter);
878 1.1 dyoung
879 1.1 dyoung /* Config/Enable Link */
880 1.1 dyoung ixv_config_link(adapter);
881 1.1 dyoung
882 1.1 dyoung /* And now turn on interrupts */
883 1.1 dyoung ixv_enable_intr(adapter);
884 1.1 dyoung
885 1.1 dyoung /* Now inform the stack we're ready */
886 1.1 dyoung ifp->if_drv_flags |= IFF_DRV_RUNNING;
887 1.1 dyoung ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
888 1.1 dyoung
889 1.1 dyoung return;
890 1.1 dyoung }
891 1.1 dyoung
892 1.1 dyoung static void
893 1.1 dyoung ixv_init(void *arg)
894 1.1 dyoung {
895 1.1 dyoung struct adapter *adapter = arg;
896 1.1 dyoung
897 1.1 dyoung IXV_CORE_LOCK(adapter);
898 1.1 dyoung ixv_init_locked(adapter);
899 1.1 dyoung IXV_CORE_UNLOCK(adapter);
900 1.1 dyoung return;
901 1.1 dyoung }
902 1.1 dyoung
903 1.1 dyoung
904 1.1 dyoung /*
905 1.1 dyoung **
906 1.1 dyoung ** MSIX Interrupt Handlers and Tasklets
907 1.1 dyoung **
908 1.1 dyoung */
909 1.1 dyoung
910 1.1 dyoung static inline void
911 1.1 dyoung ixv_enable_queue(struct adapter *adapter, u32 vector)
912 1.1 dyoung {
913 1.1 dyoung struct ixgbe_hw *hw = &adapter->hw;
914 1.1 dyoung u32 queue = 1 << vector;
915 1.1 dyoung u32 mask;
916 1.1 dyoung
917 1.1 dyoung mask = (IXGBE_EIMS_RTX_QUEUE & queue);
918 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
919 1.1 dyoung }
920 1.1 dyoung
921 1.1 dyoung static inline void
922 1.1 dyoung ixv_disable_queue(struct adapter *adapter, u32 vector)
923 1.1 dyoung {
924 1.1 dyoung struct ixgbe_hw *hw = &adapter->hw;
925 1.1 dyoung u64 queue = (u64)(1 << vector);
926 1.1 dyoung u32 mask;
927 1.1 dyoung
928 1.1 dyoung mask = (IXGBE_EIMS_RTX_QUEUE & queue);
929 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
930 1.1 dyoung }
931 1.1 dyoung
932 1.1 dyoung static inline void
933 1.1 dyoung ixv_rearm_queues(struct adapter *adapter, u64 queues)
934 1.1 dyoung {
935 1.1 dyoung u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
936 1.1 dyoung IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
937 1.1 dyoung }
938 1.1 dyoung
939 1.1 dyoung
940 1.1 dyoung static void
941 1.1 dyoung ixv_handle_que(void *context)
942 1.1 dyoung {
943 1.1 dyoung struct ix_queue *que = context;
944 1.1 dyoung struct adapter *adapter = que->adapter;
945 1.1 dyoung struct tx_ring *txr = que->txr;
946 1.1 dyoung struct ifnet *ifp = adapter->ifp;
947 1.1 dyoung bool more;
948 1.1 dyoung
949 1.1 dyoung if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
950 1.1 dyoung more = ixv_rxeof(que, adapter->rx_process_limit);
951 1.1 dyoung IXV_TX_LOCK(txr);
952 1.1 dyoung ixv_txeof(txr);
953 1.1 dyoung #if __FreeBSD_version >= 800000
954 1.1 dyoung if (!drbr_empty(ifp, txr->br))
955 1.1 dyoung ixv_mq_start_locked(ifp, txr, NULL);
956 1.1 dyoung #else
957 1.1 dyoung if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
958 1.1 dyoung ixv_start_locked(txr, ifp);
959 1.1 dyoung #endif
960 1.1 dyoung IXV_TX_UNLOCK(txr);
961 1.1 dyoung if (more) {
962 1.1 dyoung taskqueue_enqueue(que->tq, &que->que_task);
963 1.1 dyoung return;
964 1.1 dyoung }
965 1.1 dyoung }
966 1.1 dyoung
967 1.1 dyoung /* Reenable this interrupt */
968 1.1 dyoung ixv_enable_queue(adapter, que->msix);
969 1.1 dyoung return;
970 1.1 dyoung }
971 1.1 dyoung
972 1.1 dyoung /*********************************************************************
973 1.1 dyoung *
974 1.1 dyoung * MSI Queue Interrupt Service routine
975 1.1 dyoung *
976 1.1 dyoung **********************************************************************/
977 1.1 dyoung void
978 1.1 dyoung ixv_msix_que(void *arg)
979 1.1 dyoung {
980 1.1 dyoung struct ix_queue *que = arg;
981 1.1 dyoung struct adapter *adapter = que->adapter;
982 1.1 dyoung struct tx_ring *txr = que->txr;
983 1.1 dyoung struct rx_ring *rxr = que->rxr;
984 1.1 dyoung bool more_tx, more_rx;
985 1.1 dyoung u32 newitr = 0;
986 1.1 dyoung
987 1.1 dyoung ixv_disable_queue(adapter, que->msix);
988 1.1 dyoung ++que->irqs;
989 1.1 dyoung
990 1.1 dyoung more_rx = ixv_rxeof(que, adapter->rx_process_limit);
991 1.1 dyoung
992 1.1 dyoung IXV_TX_LOCK(txr);
993 1.1 dyoung more_tx = ixv_txeof(txr);
994 1.1 dyoung IXV_TX_UNLOCK(txr);
995 1.1 dyoung
996 1.1 dyoung more_rx = ixv_rxeof(que, adapter->rx_process_limit);
997 1.1 dyoung
998 1.1 dyoung /* Do AIM now? */
999 1.1 dyoung
1000 1.1 dyoung if (ixv_enable_aim == FALSE)
1001 1.1 dyoung goto no_calc;
1002 1.1 dyoung /*
1003 1.1 dyoung ** Do Adaptive Interrupt Moderation:
1004 1.1 dyoung ** - Write out last calculated setting
1005 1.1 dyoung ** - Calculate based on average size over
1006 1.1 dyoung ** the last interval.
1007 1.1 dyoung */
1008 1.1 dyoung if (que->eitr_setting)
1009 1.1 dyoung IXGBE_WRITE_REG(&adapter->hw,
1010 1.1 dyoung IXGBE_VTEITR(que->msix),
1011 1.1 dyoung que->eitr_setting);
1012 1.1 dyoung
1013 1.1 dyoung que->eitr_setting = 0;
1014 1.1 dyoung
1015 1.1 dyoung /* Idle, do nothing */
1016 1.1 dyoung if ((txr->bytes == 0) && (rxr->bytes == 0))
1017 1.1 dyoung goto no_calc;
1018 1.1 dyoung
1019 1.1 dyoung if ((txr->bytes) && (txr->packets))
1020 1.1 dyoung newitr = txr->bytes/txr->packets;
1021 1.1 dyoung if ((rxr->bytes) && (rxr->packets))
1022 1.1 dyoung newitr = max(newitr,
1023 1.1 dyoung (rxr->bytes / rxr->packets));
1024 1.1 dyoung newitr += 24; /* account for hardware frame, crc */
1025 1.1 dyoung
1026 1.1 dyoung /* set an upper boundary */
1027 1.1 dyoung newitr = min(newitr, 3000);
1028 1.1 dyoung
1029 1.1 dyoung /* Be nice to the mid range */
1030 1.1 dyoung if ((newitr > 300) && (newitr < 1200))
1031 1.1 dyoung newitr = (newitr / 3);
1032 1.1 dyoung else
1033 1.1 dyoung newitr = (newitr / 2);
1034 1.1 dyoung
1035 1.1 dyoung newitr |= newitr << 16;
1036 1.1 dyoung
1037 1.1 dyoung /* save for next interrupt */
1038 1.1 dyoung que->eitr_setting = newitr;
1039 1.1 dyoung
1040 1.1 dyoung /* Reset state */
1041 1.1 dyoung txr->bytes = 0;
1042 1.1 dyoung txr->packets = 0;
1043 1.1 dyoung rxr->bytes = 0;
1044 1.1 dyoung rxr->packets = 0;
1045 1.1 dyoung
1046 1.1 dyoung no_calc:
1047 1.1 dyoung if (more_tx || more_rx)
1048 1.1 dyoung taskqueue_enqueue(que->tq, &que->que_task);
1049 1.1 dyoung else /* Reenable this interrupt */
1050 1.1 dyoung ixv_enable_queue(adapter, que->msix);
1051 1.1 dyoung return;
1052 1.1 dyoung }
1053 1.1 dyoung
1054 1.1 dyoung static void
1055 1.1 dyoung ixv_msix_mbx(void *arg)
1056 1.1 dyoung {
1057 1.1 dyoung struct adapter *adapter = arg;
1058 1.1 dyoung struct ixgbe_hw *hw = &adapter->hw;
1059 1.1 dyoung u32 reg;
1060 1.1 dyoung
1061 1.1 dyoung ++adapter->mbx_irq;
1062 1.1 dyoung
1063 1.1 dyoung /* First get the cause */
1064 1.1 dyoung reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
1065 1.1 dyoung /* Clear interrupt with write */
1066 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
1067 1.1 dyoung
1068 1.1 dyoung /* Link status change */
1069 1.1 dyoung if (reg & IXGBE_EICR_LSC)
1070 1.1 dyoung taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1071 1.1 dyoung
1072 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
1073 1.1 dyoung return;
1074 1.1 dyoung }
1075 1.1 dyoung
1076 1.1 dyoung /*********************************************************************
1077 1.1 dyoung *
1078 1.1 dyoung * Media Ioctl callback
1079 1.1 dyoung *
1080 1.1 dyoung * This routine is called whenever the user queries the status of
1081 1.1 dyoung * the interface using ifconfig.
1082 1.1 dyoung *
1083 1.1 dyoung **********************************************************************/
1084 1.1 dyoung static void
1085 1.1 dyoung ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1086 1.1 dyoung {
1087 1.1 dyoung struct adapter *adapter = ifp->if_softc;
1088 1.1 dyoung
1089 1.1 dyoung INIT_DEBUGOUT("ixv_media_status: begin");
1090 1.1 dyoung IXV_CORE_LOCK(adapter);
1091 1.1 dyoung ixv_update_link_status(adapter);
1092 1.1 dyoung
1093 1.1 dyoung ifmr->ifm_status = IFM_AVALID;
1094 1.1 dyoung ifmr->ifm_active = IFM_ETHER;
1095 1.1 dyoung
1096 1.1 dyoung if (!adapter->link_active) {
1097 1.1 dyoung IXV_CORE_UNLOCK(adapter);
1098 1.1 dyoung return;
1099 1.1 dyoung }
1100 1.1 dyoung
1101 1.1 dyoung ifmr->ifm_status |= IFM_ACTIVE;
1102 1.1 dyoung
1103 1.1 dyoung switch (adapter->link_speed) {
1104 1.1 dyoung case IXGBE_LINK_SPEED_1GB_FULL:
1105 1.1 dyoung ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1106 1.1 dyoung break;
1107 1.1 dyoung case IXGBE_LINK_SPEED_10GB_FULL:
1108 1.1 dyoung ifmr->ifm_active |= IFM_FDX;
1109 1.1 dyoung break;
1110 1.1 dyoung }
1111 1.1 dyoung
1112 1.1 dyoung IXV_CORE_UNLOCK(adapter);
1113 1.1 dyoung
1114 1.1 dyoung return;
1115 1.1 dyoung }
1116 1.1 dyoung
1117 1.1 dyoung /*********************************************************************
1118 1.1 dyoung *
1119 1.1 dyoung * Media Ioctl callback
1120 1.1 dyoung *
1121 1.1 dyoung * This routine is called when the user changes speed/duplex using
1122 1.1 dyoung * media/mediopt option with ifconfig.
1123 1.1 dyoung *
1124 1.1 dyoung **********************************************************************/
1125 1.1 dyoung static int
1126 1.1 dyoung ixv_media_change(struct ifnet * ifp)
1127 1.1 dyoung {
1128 1.1 dyoung struct adapter *adapter = ifp->if_softc;
1129 1.1 dyoung struct ifmedia *ifm = &adapter->media;
1130 1.1 dyoung
1131 1.1 dyoung INIT_DEBUGOUT("ixv_media_change: begin");
1132 1.1 dyoung
1133 1.1 dyoung if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1134 1.1 dyoung return (EINVAL);
1135 1.1 dyoung
1136 1.1 dyoung switch (IFM_SUBTYPE(ifm->ifm_media)) {
1137 1.1 dyoung case IFM_AUTO:
1138 1.1 dyoung break;
1139 1.1 dyoung default:
1140 1.1 dyoung device_printf(adapter->dev, "Only auto media type\n");
1141 1.1 dyoung return (EINVAL);
1142 1.1 dyoung }
1143 1.1 dyoung
1144 1.1 dyoung return (0);
1145 1.1 dyoung }
1146 1.1 dyoung
1147 1.1 dyoung /*********************************************************************
1148 1.1 dyoung *
1149 1.1 dyoung * This routine maps the mbufs to tx descriptors, allowing the
1150 1.1 dyoung * TX engine to transmit the packets.
1151 1.1 dyoung * - return 0 on success, positive on failure
1152 1.1 dyoung *
1153 1.1 dyoung **********************************************************************/
1154 1.1 dyoung
1155 1.1 dyoung static int
1156 1.1 dyoung ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1157 1.1 dyoung {
1158 1.1 dyoung struct adapter *adapter = txr->adapter;
1159 1.1 dyoung u32 olinfo_status = 0, cmd_type_len;
1160 1.1 dyoung u32 paylen = 0;
1161 1.1 dyoung int i, j, error, nsegs;
1162 1.1 dyoung int first, last = 0;
1163 1.1 dyoung struct mbuf *m_head;
1164 1.1 dyoung bus_dma_segment_t segs[32];
1165 1.1 dyoung bus_dmamap_t map;
1166 1.1 dyoung struct ixv_tx_buf *txbuf, *txbuf_mapped;
1167 1.1 dyoung union ixgbe_adv_tx_desc *txd = NULL;
1168 1.1 dyoung
1169 1.1 dyoung m_head = *m_headp;
1170 1.1 dyoung
1171 1.1 dyoung /* Basic descriptor defines */
1172 1.1 dyoung cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1173 1.1 dyoung IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1174 1.1 dyoung
1175 1.1 dyoung if (m_head->m_flags & M_VLANTAG)
1176 1.1 dyoung cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1177 1.1 dyoung
1178 1.1 dyoung /*
1179 1.1 dyoung * Important to capture the first descriptor
1180 1.1 dyoung * used because it will contain the index of
1181 1.1 dyoung * the one we tell the hardware to report back
1182 1.1 dyoung */
1183 1.1 dyoung first = txr->next_avail_desc;
1184 1.1 dyoung txbuf = &txr->tx_buffers[first];
1185 1.1 dyoung txbuf_mapped = txbuf;
1186 1.1 dyoung map = txbuf->map;
1187 1.1 dyoung
1188 1.1 dyoung /*
1189 1.1 dyoung * Map the packet for DMA.
1190 1.1 dyoung */
1191 1.1 dyoung error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1192 1.1 dyoung *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1193 1.1 dyoung
1194 1.1 dyoung if (error == EFBIG) {
1195 1.1 dyoung struct mbuf *m;
1196 1.1 dyoung
1197 1.1 dyoung m = m_defrag(*m_headp, M_DONTWAIT);
1198 1.1 dyoung if (m == NULL) {
1199 1.1 dyoung adapter->mbuf_defrag_failed++;
1200 1.1 dyoung m_freem(*m_headp);
1201 1.1 dyoung *m_headp = NULL;
1202 1.1 dyoung return (ENOBUFS);
1203 1.1 dyoung }
1204 1.1 dyoung *m_headp = m;
1205 1.1 dyoung
1206 1.1 dyoung /* Try it again */
1207 1.1 dyoung error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1208 1.1 dyoung *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1209 1.1 dyoung
1210 1.1 dyoung if (error == ENOMEM) {
1211 1.1 dyoung adapter->no_tx_dma_setup++;
1212 1.1 dyoung return (error);
1213 1.1 dyoung } else if (error != 0) {
1214 1.1 dyoung adapter->no_tx_dma_setup++;
1215 1.1 dyoung m_freem(*m_headp);
1216 1.1 dyoung *m_headp = NULL;
1217 1.1 dyoung return (error);
1218 1.1 dyoung }
1219 1.1 dyoung } else if (error == ENOMEM) {
1220 1.1 dyoung adapter->no_tx_dma_setup++;
1221 1.1 dyoung return (error);
1222 1.1 dyoung } else if (error != 0) {
1223 1.1 dyoung adapter->no_tx_dma_setup++;
1224 1.1 dyoung m_freem(*m_headp);
1225 1.1 dyoung *m_headp = NULL;
1226 1.1 dyoung return (error);
1227 1.1 dyoung }
1228 1.1 dyoung
1229 1.1 dyoung /* Make certain there are enough descriptors */
1230 1.1 dyoung if (nsegs > txr->tx_avail - 2) {
1231 1.1 dyoung txr->no_desc_avail++;
1232 1.1 dyoung error = ENOBUFS;
1233 1.1 dyoung goto xmit_fail;
1234 1.1 dyoung }
1235 1.1 dyoung m_head = *m_headp;
1236 1.1 dyoung
1237 1.1 dyoung /*
1238 1.1 dyoung ** Set up the appropriate offload context
1239 1.1 dyoung ** this becomes the first descriptor of
1240 1.1 dyoung ** a packet.
1241 1.1 dyoung */
1242 1.1 dyoung if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1243 1.1 dyoung if (ixv_tso_setup(txr, m_head, &paylen)) {
1244 1.1 dyoung cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1245 1.1 dyoung olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1246 1.1 dyoung olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1247 1.1 dyoung olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1248 1.1 dyoung ++adapter->tso_tx;
1249 1.1 dyoung } else
1250 1.1 dyoung return (ENXIO);
1251 1.1 dyoung } else if (ixv_tx_ctx_setup(txr, m_head))
1252 1.1 dyoung olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1253 1.1 dyoung
1254 1.1 dyoung /* Record payload length */
1255 1.1 dyoung if (paylen == 0)
1256 1.1 dyoung olinfo_status |= m_head->m_pkthdr.len <<
1257 1.1 dyoung IXGBE_ADVTXD_PAYLEN_SHIFT;
1258 1.1 dyoung
1259 1.1 dyoung i = txr->next_avail_desc;
1260 1.1 dyoung for (j = 0; j < nsegs; j++) {
1261 1.1 dyoung bus_size_t seglen;
1262 1.1 dyoung bus_addr_t segaddr;
1263 1.1 dyoung
1264 1.1 dyoung txbuf = &txr->tx_buffers[i];
1265 1.1 dyoung txd = &txr->tx_base[i];
1266 1.1 dyoung seglen = segs[j].ds_len;
1267 1.1 dyoung segaddr = htole64(segs[j].ds_addr);
1268 1.1 dyoung
1269 1.1 dyoung txd->read.buffer_addr = segaddr;
1270 1.1 dyoung txd->read.cmd_type_len = htole32(txr->txd_cmd |
1271 1.1 dyoung cmd_type_len |seglen);
1272 1.1 dyoung txd->read.olinfo_status = htole32(olinfo_status);
1273 1.1 dyoung last = i; /* descriptor that will get completion IRQ */
1274 1.1 dyoung
1275 1.1 dyoung if (++i == adapter->num_tx_desc)
1276 1.1 dyoung i = 0;
1277 1.1 dyoung
1278 1.1 dyoung txbuf->m_head = NULL;
1279 1.1 dyoung txbuf->eop_index = -1;
1280 1.1 dyoung }
1281 1.1 dyoung
1282 1.1 dyoung txd->read.cmd_type_len |=
1283 1.1 dyoung htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1284 1.1 dyoung txr->tx_avail -= nsegs;
1285 1.1 dyoung txr->next_avail_desc = i;
1286 1.1 dyoung
1287 1.1 dyoung txbuf->m_head = m_head;
1288 1.1 dyoung txbuf->map = map;
1289 1.1 dyoung bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1290 1.1 dyoung
1291 1.1 dyoung /* Set the index of the descriptor that will be marked done */
1292 1.1 dyoung txbuf = &txr->tx_buffers[first];
1293 1.1 dyoung txbuf->eop_index = last;
1294 1.1 dyoung
1295 1.1 dyoung bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1296 1.1 dyoung BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1297 1.1 dyoung /*
1298 1.1 dyoung * Advance the Transmit Descriptor Tail (Tdt), this tells the
1299 1.1 dyoung * hardware that this frame is available to transmit.
1300 1.1 dyoung */
1301 1.1 dyoung ++txr->total_packets;
1302 1.1 dyoung IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
1303 1.1 dyoung
1304 1.1 dyoung return (0);
1305 1.1 dyoung
1306 1.1 dyoung xmit_fail:
1307 1.1 dyoung bus_dmamap_unload(txr->txtag, txbuf->map);
1308 1.1 dyoung return (error);
1309 1.1 dyoung
1310 1.1 dyoung }
1311 1.1 dyoung
1312 1.1 dyoung
1313 1.1 dyoung /*********************************************************************
1314 1.1 dyoung * Multicast Update
1315 1.1 dyoung *
1316 1.1 dyoung * This routine is called whenever multicast address list is updated.
1317 1.1 dyoung *
1318 1.1 dyoung **********************************************************************/
1319 1.1 dyoung #define IXGBE_RAR_ENTRIES 16
1320 1.1 dyoung
1321 1.1 dyoung static void
1322 1.1 dyoung ixv_set_multi(struct adapter *adapter)
1323 1.1 dyoung {
1324 1.1 dyoung u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1325 1.1 dyoung u8 *update_ptr;
1326 1.1 dyoung struct ifmultiaddr *ifma;
1327 1.1 dyoung int mcnt = 0;
1328 1.1 dyoung struct ifnet *ifp = adapter->ifp;
1329 1.1 dyoung
1330 1.1 dyoung IOCTL_DEBUGOUT("ixv_set_multi: begin");
1331 1.1 dyoung
1332 1.1 dyoung #if __FreeBSD_version < 800000
1333 1.1 dyoung IF_ADDR_LOCK(ifp);
1334 1.1 dyoung #else
1335 1.1 dyoung if_maddr_rlock(ifp);
1336 1.1 dyoung #endif
1337 1.1 dyoung TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1338 1.1 dyoung if (ifma->ifma_addr->sa_family != AF_LINK)
1339 1.1 dyoung continue;
1340 1.1 dyoung bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1341 1.1 dyoung &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1342 1.1 dyoung IXGBE_ETH_LENGTH_OF_ADDRESS);
1343 1.1 dyoung mcnt++;
1344 1.1 dyoung }
1345 1.1 dyoung #if __FreeBSD_version < 800000
1346 1.1 dyoung IF_ADDR_UNLOCK(ifp);
1347 1.1 dyoung #else
1348 1.1 dyoung if_maddr_runlock(ifp);
1349 1.1 dyoung #endif
1350 1.1 dyoung
1351 1.1 dyoung update_ptr = mta;
1352 1.1 dyoung
1353 1.1 dyoung ixgbe_update_mc_addr_list(&adapter->hw,
1354 1.1 dyoung update_ptr, mcnt, ixv_mc_array_itr);
1355 1.1 dyoung
1356 1.1 dyoung return;
1357 1.1 dyoung }
1358 1.1 dyoung
1359 1.1 dyoung /*
1360 1.1 dyoung * This is an iterator function now needed by the multicast
1361 1.1 dyoung * shared code. It simply feeds the shared code routine the
1362 1.1 dyoung * addresses in the array of ixv_set_multi() one by one.
1363 1.1 dyoung */
1364 1.1 dyoung static u8 *
1365 1.1 dyoung ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1366 1.1 dyoung {
1367 1.1 dyoung u8 *addr = *update_ptr;
1368 1.1 dyoung u8 *newptr;
1369 1.1 dyoung *vmdq = 0;
1370 1.1 dyoung
1371 1.1 dyoung newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1372 1.1 dyoung *update_ptr = newptr;
1373 1.1 dyoung return addr;
1374 1.1 dyoung }
1375 1.1 dyoung
1376 1.1 dyoung /*********************************************************************
1377 1.1 dyoung * Timer routine
1378 1.1 dyoung *
1379 1.1 dyoung * This routine checks for link status,updates statistics,
1380 1.1 dyoung * and runs the watchdog check.
1381 1.1 dyoung *
1382 1.1 dyoung **********************************************************************/
1383 1.1 dyoung
1384 1.1 dyoung static void
1385 1.1 dyoung ixv_local_timer(void *arg)
1386 1.1 dyoung {
1387 1.1 dyoung struct adapter *adapter = arg;
1388 1.1 dyoung device_t dev = adapter->dev;
1389 1.1 dyoung struct tx_ring *txr = adapter->tx_rings;
1390 1.1 dyoung int i;
1391 1.1 dyoung
1392 1.1 dyoung mtx_assert(&adapter->core_mtx, MA_OWNED);
1393 1.1 dyoung
1394 1.1 dyoung ixv_update_link_status(adapter);
1395 1.1 dyoung
1396 1.1 dyoung /* Stats Update */
1397 1.1 dyoung ixv_update_stats(adapter);
1398 1.1 dyoung
1399 1.1 dyoung /*
1400 1.1 dyoung * If the interface has been paused
1401 1.1 dyoung * then don't do the watchdog check
1402 1.1 dyoung */
1403 1.1 dyoung if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
1404 1.1 dyoung goto out;
1405 1.1 dyoung /*
1406 1.1 dyoung ** Check for time since any descriptor was cleaned
1407 1.1 dyoung */
1408 1.1 dyoung for (i = 0; i < adapter->num_queues; i++, txr++) {
1409 1.1 dyoung IXV_TX_LOCK(txr);
1410 1.1 dyoung if (txr->watchdog_check == FALSE) {
1411 1.1 dyoung IXV_TX_UNLOCK(txr);
1412 1.1 dyoung continue;
1413 1.1 dyoung }
1414 1.1 dyoung if ((ticks - txr->watchdog_time) > IXV_WATCHDOG)
1415 1.1 dyoung goto hung;
1416 1.1 dyoung IXV_TX_UNLOCK(txr);
1417 1.1 dyoung }
1418 1.1 dyoung out:
1419 1.1 dyoung ixv_rearm_queues(adapter, adapter->que_mask);
1420 1.1 dyoung callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1421 1.1 dyoung return;
1422 1.1 dyoung
1423 1.1 dyoung hung:
1424 1.1 dyoung device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1425 1.1 dyoung device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
1426 1.1 dyoung IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
1427 1.1 dyoung IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
1428 1.1 dyoung device_printf(dev,"TX(%d) desc avail = %d,"
1429 1.1 dyoung "Next TX to Clean = %d\n",
1430 1.1 dyoung txr->me, txr->tx_avail, txr->next_to_clean);
1431 1.1 dyoung adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1432 1.1 dyoung adapter->watchdog_events++;
1433 1.1 dyoung IXV_TX_UNLOCK(txr);
1434 1.1 dyoung ixv_init_locked(adapter);
1435 1.1 dyoung }
1436 1.1 dyoung
1437 1.1 dyoung /*
1438 1.1 dyoung ** Note: this routine updates the OS on the link state
1439 1.1 dyoung ** the real check of the hardware only happens with
1440 1.1 dyoung ** a link interrupt.
1441 1.1 dyoung */
1442 1.1 dyoung static void
1443 1.1 dyoung ixv_update_link_status(struct adapter *adapter)
1444 1.1 dyoung {
1445 1.1 dyoung struct ifnet *ifp = adapter->ifp;
1446 1.1 dyoung struct tx_ring *txr = adapter->tx_rings;
1447 1.1 dyoung device_t dev = adapter->dev;
1448 1.1 dyoung
1449 1.1 dyoung
1450 1.1 dyoung if (adapter->link_up){
1451 1.1 dyoung if (adapter->link_active == FALSE) {
1452 1.1 dyoung if (bootverbose)
1453 1.1 dyoung device_printf(dev,"Link is up %d Gbps %s \n",
1454 1.1 dyoung ((adapter->link_speed == 128)? 10:1),
1455 1.1 dyoung "Full Duplex");
1456 1.1 dyoung adapter->link_active = TRUE;
1457 1.1 dyoung if_link_state_change(ifp, LINK_STATE_UP);
1458 1.1 dyoung }
1459 1.1 dyoung } else { /* Link down */
1460 1.1 dyoung if (adapter->link_active == TRUE) {
1461 1.1 dyoung if (bootverbose)
1462 1.1 dyoung device_printf(dev,"Link is Down\n");
1463 1.1 dyoung if_link_state_change(ifp, LINK_STATE_DOWN);
1464 1.1 dyoung adapter->link_active = FALSE;
1465 1.1 dyoung for (int i = 0; i < adapter->num_queues;
1466 1.1 dyoung i++, txr++)
1467 1.1 dyoung txr->watchdog_check = FALSE;
1468 1.1 dyoung }
1469 1.1 dyoung }
1470 1.1 dyoung
1471 1.1 dyoung return;
1472 1.1 dyoung }
1473 1.1 dyoung
1474 1.1 dyoung
1475 1.1 dyoung /*********************************************************************
1476 1.1 dyoung *
1477 1.1 dyoung * This routine disables all traffic on the adapter by issuing a
1478 1.1 dyoung * global reset on the MAC and deallocates TX/RX buffers.
1479 1.1 dyoung *
1480 1.1 dyoung **********************************************************************/
1481 1.1 dyoung
1482 1.1 dyoung static void
1483 1.1 dyoung ixv_stop(void *arg)
1484 1.1 dyoung {
1485 1.1 dyoung struct ifnet *ifp;
1486 1.1 dyoung struct adapter *adapter = arg;
1487 1.1 dyoung struct ixgbe_hw *hw = &adapter->hw;
1488 1.1 dyoung ifp = adapter->ifp;
1489 1.1 dyoung
1490 1.1 dyoung mtx_assert(&adapter->core_mtx, MA_OWNED);
1491 1.1 dyoung
1492 1.1 dyoung INIT_DEBUGOUT("ixv_stop: begin\n");
1493 1.1 dyoung ixv_disable_intr(adapter);
1494 1.1 dyoung
1495 1.1 dyoung /* Tell the stack that the interface is no longer active */
1496 1.1 dyoung ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1497 1.1 dyoung
1498 1.1 dyoung ixgbe_reset_hw(hw);
1499 1.1 dyoung adapter->hw.adapter_stopped = FALSE;
1500 1.1 dyoung ixgbe_stop_adapter(hw);
1501 1.1 dyoung callout_stop(&adapter->timer);
1502 1.1 dyoung
1503 1.1 dyoung /* reprogram the RAR[0] in case user changed it. */
1504 1.1 dyoung ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1505 1.1 dyoung
1506 1.1 dyoung return;
1507 1.1 dyoung }
1508 1.1 dyoung
1509 1.1 dyoung
1510 1.1 dyoung /*********************************************************************
1511 1.1 dyoung *
1512 1.1 dyoung * Determine hardware revision.
1513 1.1 dyoung *
1514 1.1 dyoung **********************************************************************/
1515 1.1 dyoung static void
1516 1.1 dyoung ixv_identify_hardware(struct adapter *adapter)
1517 1.1 dyoung {
1518 1.1 dyoung device_t dev = adapter->dev;
1519 1.1 dyoung u16 pci_cmd_word;
1520 1.1 dyoung
1521 1.1 dyoung /*
1522 1.1 dyoung ** Make sure BUSMASTER is set, on a VM under
1523 1.1 dyoung ** KVM it may not be and will break things.
1524 1.1 dyoung */
1525 1.1 dyoung pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1526 1.1 dyoung if (!((pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1527 1.1 dyoung (pci_cmd_word & PCIM_CMD_MEMEN))) {
1528 1.1 dyoung INIT_DEBUGOUT("Memory Access and/or Bus Master "
1529 1.1 dyoung "bits were not set!\n");
1530 1.1 dyoung pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1531 1.1 dyoung pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1532 1.1 dyoung }
1533 1.1 dyoung
1534 1.1 dyoung /* Save off the information about this board */
1535 1.1 dyoung adapter->hw.vendor_id = pci_get_vendor(dev);
1536 1.1 dyoung adapter->hw.device_id = pci_get_device(dev);
1537 1.1 dyoung adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1538 1.1 dyoung adapter->hw.subsystem_vendor_id =
1539 1.1 dyoung pci_read_config(dev, PCIR_SUBVEND_0, 2);
1540 1.1 dyoung adapter->hw.subsystem_device_id =
1541 1.1 dyoung pci_read_config(dev, PCIR_SUBDEV_0, 2);
1542 1.1 dyoung
1543 1.1 dyoung return;
1544 1.1 dyoung }
1545 1.1 dyoung
1546 1.1 dyoung /*********************************************************************
1547 1.1 dyoung *
1548 1.1 dyoung * Setup MSIX Interrupt resources and handlers
1549 1.1 dyoung *
1550 1.1 dyoung **********************************************************************/
1551 1.1 dyoung static int
1552 1.1 dyoung ixv_allocate_msix(struct adapter *adapter)
1553 1.1 dyoung {
1554 1.1 dyoung device_t dev = adapter->dev;
1555 1.1 dyoung struct ix_queue *que = adapter->queues;
1556 1.1 dyoung int error, rid, vector = 0;
1557 1.1 dyoung
1558 1.1 dyoung for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
1559 1.1 dyoung rid = vector + 1;
1560 1.1 dyoung que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1561 1.1 dyoung RF_SHAREABLE | RF_ACTIVE);
1562 1.1 dyoung if (que->res == NULL) {
1563 1.1 dyoung device_printf(dev,"Unable to allocate"
1564 1.1 dyoung " bus resource: que interrupt [%d]\n", vector);
1565 1.1 dyoung return (ENXIO);
1566 1.1 dyoung }
1567 1.1 dyoung /* Set the handler function */
1568 1.1 dyoung error = bus_setup_intr(dev, que->res,
1569 1.1 dyoung INTR_TYPE_NET | INTR_MPSAFE, NULL,
1570 1.1 dyoung ixv_msix_que, que, &que->tag);
1571 1.1 dyoung if (error) {
1572 1.1 dyoung que->res = NULL;
1573 1.1 dyoung device_printf(dev, "Failed to register QUE handler");
1574 1.1 dyoung return (error);
1575 1.1 dyoung }
1576 1.1 dyoung #if __FreeBSD_version >= 800504
1577 1.1 dyoung bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1578 1.1 dyoung #endif
1579 1.1 dyoung que->msix = vector;
1580 1.1 dyoung adapter->que_mask |= (u64)(1 << que->msix);
1581 1.1 dyoung /*
1582 1.1 dyoung ** Bind the msix vector, and thus the
1583 1.1 dyoung ** ring to the corresponding cpu.
1584 1.1 dyoung */
1585 1.1 dyoung if (adapter->num_queues > 1)
1586 1.1 dyoung bus_bind_intr(dev, que->res, i);
1587 1.1 dyoung
1588 1.1 dyoung ixgbe_task_init(&que->que_task, ixv_handle_que, que);
1589 1.1 dyoung que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1590 1.1 dyoung taskqueue_thread_enqueue, &que->tq);
1591 1.1 dyoung taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1592 1.1 dyoung device_get_nameunit(adapter->dev));
1593 1.1 dyoung }
1594 1.1 dyoung
1595 1.1 dyoung /* and Mailbox */
1596 1.1 dyoung rid = vector + 1;
1597 1.1 dyoung adapter->res = bus_alloc_resource_any(dev,
1598 1.1 dyoung SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1599 1.1 dyoung if (!adapter->res) {
1600 1.1 dyoung device_printf(dev,"Unable to allocate"
1601 1.1 dyoung " bus resource: MBX interrupt [%d]\n", rid);
1602 1.1 dyoung return (ENXIO);
1603 1.1 dyoung }
1604 1.1 dyoung /* Set the mbx handler function */
1605 1.1 dyoung error = bus_setup_intr(dev, adapter->res,
1606 1.1 dyoung INTR_TYPE_NET | INTR_MPSAFE, NULL,
1607 1.1 dyoung ixv_msix_mbx, adapter, &adapter->tag);
1608 1.1 dyoung if (error) {
1609 1.1 dyoung adapter->res = NULL;
1610 1.1 dyoung device_printf(dev, "Failed to register LINK handler");
1611 1.1 dyoung return (error);
1612 1.1 dyoung }
1613 1.1 dyoung #if __FreeBSD_version >= 800504
1614 1.1 dyoung bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1615 1.1 dyoung #endif
1616 1.1 dyoung adapter->mbxvec = vector;
1617 1.1 dyoung /* Tasklets for Mailbox */
1618 1.1 dyoung ixgbe_task_init(&adapter->mbx_task, ixv_handle_mbx, adapter);
1619 1.1 dyoung adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1620 1.1 dyoung taskqueue_thread_enqueue, &adapter->tq);
1621 1.1 dyoung taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1622 1.1 dyoung device_get_nameunit(adapter->dev));
1623 1.1 dyoung /*
1624 1.1 dyoung ** Due to a broken design QEMU will fail to properly
1625 1.1 dyoung ** enable the guest for MSIX unless the vectors in
1626 1.1 dyoung ** the table are all set up, so we must rewrite the
1627 1.1 dyoung ** ENABLE in the MSIX control register again at this
1628 1.1 dyoung ** point to cause it to successfully initialize us.
1629 1.1 dyoung */
1630 1.1 dyoung if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1631 1.1 dyoung int msix_ctrl;
1632 1.1 dyoung pci_find_cap(dev, PCIY_MSIX, &rid);
1633 1.1 dyoung rid += PCIR_MSIX_CTRL;
1634 1.1 dyoung msix_ctrl = pci_read_config(dev, rid, 2);
1635 1.1 dyoung msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1636 1.1 dyoung pci_write_config(dev, rid, msix_ctrl, 2);
1637 1.1 dyoung }
1638 1.1 dyoung
1639 1.1 dyoung return (0);
1640 1.1 dyoung }
1641 1.1 dyoung
1642 1.1 dyoung /*
1643 1.1 dyoung * Setup MSIX resources, note that the VF
1644 1.1 dyoung * device MUST use MSIX, there is no fallback.
1645 1.1 dyoung */
1646 1.1 dyoung static int
1647 1.1 dyoung ixv_setup_msix(struct adapter *adapter)
1648 1.1 dyoung {
1649 1.1 dyoung device_t dev = adapter->dev;
1650 1.1 dyoung int rid, vectors, want = 2;
1651 1.1 dyoung
1652 1.1 dyoung
1653 1.1 dyoung /* First try MSI/X */
1654 1.1 dyoung rid = PCIR_BAR(3);
1655 1.1 dyoung adapter->msix_mem = bus_alloc_resource_any(dev,
1656 1.1 dyoung SYS_RES_MEMORY, &rid, RF_ACTIVE);
1657 1.1 dyoung if (!adapter->msix_mem) {
1658 1.1 dyoung device_printf(adapter->dev,
1659 1.1 dyoung "Unable to map MSIX table \n");
1660 1.1 dyoung goto out;
1661 1.1 dyoung }
1662 1.1 dyoung
1663 1.1 dyoung vectors = pci_msix_count(dev);
1664 1.1 dyoung if (vectors < 2) {
1665 1.1 dyoung bus_release_resource(dev, SYS_RES_MEMORY,
1666 1.1 dyoung rid, adapter->msix_mem);
1667 1.1 dyoung adapter->msix_mem = NULL;
1668 1.1 dyoung goto out;
1669 1.1 dyoung }
1670 1.1 dyoung
1671 1.1 dyoung /*
1672 1.1 dyoung ** Want two vectors: one for a queue,
1673 1.1 dyoung ** plus an additional for mailbox.
1674 1.1 dyoung */
1675 1.1 dyoung if (pci_alloc_msix(dev, &want) == 0) {
1676 1.1 dyoung device_printf(adapter->dev,
1677 1.1 dyoung "Using MSIX interrupts with %d vectors\n", want);
1678 1.1 dyoung return (want);
1679 1.1 dyoung }
1680 1.1 dyoung out:
1681 1.1 dyoung device_printf(adapter->dev,"MSIX config error\n");
1682 1.1 dyoung return (ENXIO);
1683 1.1 dyoung }
1684 1.1 dyoung
1685 1.1 dyoung
1686 1.1 dyoung static int
1687 1.1 dyoung ixv_allocate_pci_resources(struct adapter *adapter)
1688 1.1 dyoung {
1689 1.1 dyoung int rid;
1690 1.1 dyoung device_t dev = adapter->dev;
1691 1.1 dyoung
1692 1.1 dyoung rid = PCIR_BAR(0);
1693 1.1 dyoung adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1694 1.1 dyoung &rid, RF_ACTIVE);
1695 1.1 dyoung
1696 1.1 dyoung if (!(adapter->pci_mem)) {
1697 1.1 dyoung device_printf(dev,"Unable to allocate bus resource: memory\n");
1698 1.1 dyoung return (ENXIO);
1699 1.1 dyoung }
1700 1.1 dyoung
1701 1.1 dyoung adapter->osdep.mem_bus_space_tag =
1702 1.1 dyoung rman_get_bustag(adapter->pci_mem);
1703 1.1 dyoung adapter->osdep.mem_bus_space_handle =
1704 1.1 dyoung rman_get_bushandle(adapter->pci_mem);
1705 1.1 dyoung adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
1706 1.1 dyoung
1707 1.1 dyoung adapter->num_queues = 1;
1708 1.1 dyoung adapter->hw.back = &adapter->osdep;
1709 1.1 dyoung
1710 1.1 dyoung /*
1711 1.1 dyoung ** Now setup MSI/X, should
1712 1.1 dyoung ** return us the number of
1713 1.1 dyoung ** configured vectors.
1714 1.1 dyoung */
1715 1.1 dyoung adapter->msix = ixv_setup_msix(adapter);
1716 1.1 dyoung if (adapter->msix == ENXIO)
1717 1.1 dyoung return (ENXIO);
1718 1.1 dyoung else
1719 1.1 dyoung return (0);
1720 1.1 dyoung }
1721 1.1 dyoung
1722 1.1 dyoung static void
1723 1.1 dyoung ixv_free_pci_resources(struct adapter * adapter)
1724 1.1 dyoung {
1725 1.1 dyoung struct ix_queue *que = adapter->queues;
1726 1.1 dyoung device_t dev = adapter->dev;
1727 1.1 dyoung int rid, memrid;
1728 1.1 dyoung
1729 1.1 dyoung memrid = PCIR_BAR(MSIX_BAR);
1730 1.1 dyoung
1731 1.1 dyoung /*
1732 1.1 dyoung ** There is a slight possibility of a failure mode
1733 1.1 dyoung ** in attach that will result in entering this function
1734 1.1 dyoung ** before interrupt resources have been initialized, and
1735 1.1 dyoung ** in that case we do not want to execute the loops below
1736 1.1 dyoung ** We can detect this reliably by the state of the adapter
1737 1.1 dyoung ** res pointer.
1738 1.1 dyoung */
1739 1.1 dyoung if (adapter->res == NULL)
1740 1.1 dyoung goto mem;
1741 1.1 dyoung
1742 1.1 dyoung /*
1743 1.1 dyoung ** Release all msix queue resources:
1744 1.1 dyoung */
1745 1.1 dyoung for (int i = 0; i < adapter->num_queues; i++, que++) {
1746 1.1 dyoung rid = que->msix + 1;
1747 1.1 dyoung if (que->tag != NULL) {
1748 1.1 dyoung bus_teardown_intr(dev, que->res, que->tag);
1749 1.1 dyoung que->tag = NULL;
1750 1.1 dyoung }
1751 1.1 dyoung if (que->res != NULL)
1752 1.1 dyoung bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1753 1.1 dyoung }
1754 1.1 dyoung
1755 1.1 dyoung
1756 1.1 dyoung /* Clean the Legacy or Link interrupt last */
1757 1.1 dyoung if (adapter->mbxvec) /* we are doing MSIX */
1758 1.1 dyoung rid = adapter->mbxvec + 1;
1759 1.1 dyoung else
1760 1.1 dyoung (adapter->msix != 0) ? (rid = 1):(rid = 0);
1761 1.1 dyoung
1762 1.1 dyoung if (adapter->tag != NULL) {
1763 1.1 dyoung bus_teardown_intr(dev, adapter->res, adapter->tag);
1764 1.1 dyoung adapter->tag = NULL;
1765 1.1 dyoung }
1766 1.1 dyoung if (adapter->res != NULL)
1767 1.1 dyoung bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1768 1.1 dyoung
1769 1.1 dyoung mem:
1770 1.1 dyoung if (adapter->msix)
1771 1.1 dyoung pci_release_msi(dev);
1772 1.1 dyoung
1773 1.1 dyoung if (adapter->msix_mem != NULL)
1774 1.1 dyoung bus_release_resource(dev, SYS_RES_MEMORY,
1775 1.1 dyoung memrid, adapter->msix_mem);
1776 1.1 dyoung
1777 1.1 dyoung if (adapter->pci_mem != NULL)
1778 1.1 dyoung bus_release_resource(dev, SYS_RES_MEMORY,
1779 1.1 dyoung PCIR_BAR(0), adapter->pci_mem);
1780 1.1 dyoung
1781 1.1 dyoung return;
1782 1.1 dyoung }
1783 1.1 dyoung
1784 1.1 dyoung /*********************************************************************
1785 1.1 dyoung *
1786 1.1 dyoung * Setup networking device structure and register an interface.
1787 1.1 dyoung *
1788 1.1 dyoung **********************************************************************/
1789 1.1 dyoung static void
1790 1.1 dyoung ixv_setup_interface(device_t dev, struct adapter *adapter)
1791 1.1 dyoung {
1792 1.1 dyoung struct ifnet *ifp;
1793 1.1 dyoung
1794 1.1 dyoung INIT_DEBUGOUT("ixv_setup_interface: begin");
1795 1.1 dyoung
1796 1.1 dyoung ifp = adapter->ifp = if_alloc(IFT_ETHER);
1797 1.1 dyoung if (ifp == NULL)
1798 1.1 dyoung panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1799 1.1 dyoung if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1800 1.1 dyoung ifp->if_mtu = ETHERMTU;
1801 1.1 dyoung ifp->if_baudrate = 1000000000;
1802 1.1 dyoung ifp->if_init = ixv_init;
1803 1.1 dyoung ifp->if_softc = adapter;
1804 1.1 dyoung ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1805 1.1 dyoung ifp->if_ioctl = ixv_ioctl;
1806 1.1 dyoung #if __FreeBSD_version >= 800000
1807 1.1 dyoung ifp->if_transmit = ixv_mq_start;
1808 1.1 dyoung ifp->if_qflush = ixv_qflush;
1809 1.1 dyoung #else
1810 1.1 dyoung ifp->if_start = ixv_start;
1811 1.1 dyoung #endif
1812 1.1 dyoung ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1813 1.1 dyoung
1814 1.1 dyoung ether_ifattach(ifp, adapter->hw.mac.addr);
1815 1.1 dyoung
1816 1.1 dyoung adapter->max_frame_size =
1817 1.1 dyoung ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1818 1.1 dyoung
1819 1.1 dyoung /*
1820 1.1 dyoung * Tell the upper layer(s) we support long frames.
1821 1.1 dyoung */
1822 1.1 dyoung ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1823 1.1 dyoung
1824 1.1 dyoung ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1825 1.1 dyoung ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1826 1.1 dyoung ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_LRO;
1827 1.1 dyoung
1828 1.1 dyoung ifp->if_capenable = ifp->if_capabilities;
1829 1.1 dyoung
1830 1.1 dyoung /*
1831 1.1 dyoung * Specify the media types supported by this adapter and register
1832 1.1 dyoung * callbacks to update media and link information
1833 1.1 dyoung */
1834 1.1 dyoung ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1835 1.1 dyoung ixv_media_status);
1836 1.1 dyoung ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1837 1.1 dyoung ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1838 1.1 dyoung ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1839 1.1 dyoung
1840 1.1 dyoung return;
1841 1.1 dyoung }
1842 1.1 dyoung
1843 1.1 dyoung static void
1844 1.1 dyoung ixv_config_link(struct adapter *adapter)
1845 1.1 dyoung {
1846 1.1 dyoung struct ixgbe_hw *hw = &adapter->hw;
1847 1.1 dyoung u32 autoneg, err = 0;
1848 1.1 dyoung bool negotiate = TRUE;
1849 1.1 dyoung
1850 1.1 dyoung if (hw->mac.ops.check_link)
1851 1.1 dyoung err = hw->mac.ops.check_link(hw, &autoneg,
1852 1.1 dyoung &adapter->link_up, FALSE);
1853 1.1 dyoung if (err)
1854 1.1 dyoung goto out;
1855 1.1 dyoung
1856 1.1 dyoung if (hw->mac.ops.setup_link)
1857 1.1 dyoung err = hw->mac.ops.setup_link(hw, autoneg,
1858 1.1 dyoung negotiate, adapter->link_up);
1859 1.1 dyoung out:
1860 1.1 dyoung return;
1861 1.1 dyoung }
1862 1.1 dyoung
1863 1.1 dyoung /********************************************************************
1864 1.1 dyoung * Manage DMA'able memory.
1865 1.1 dyoung *******************************************************************/
1866 1.1 dyoung static void
1867 1.1 dyoung ixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1868 1.1 dyoung {
1869 1.1 dyoung if (error)
1870 1.1 dyoung return;
1871 1.1 dyoung *(bus_addr_t *) arg = segs->ds_addr;
1872 1.1 dyoung return;
1873 1.1 dyoung }
1874 1.1 dyoung
1875 1.1 dyoung static int
1876 1.1 dyoung ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
1877 1.1 dyoung struct ixv_dma_alloc *dma, int mapflags)
1878 1.1 dyoung {
1879 1.1 dyoung device_t dev = adapter->dev;
1880 1.1 dyoung int r;
1881 1.1 dyoung
1882 1.1 dyoung r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
1883 1.1 dyoung DBA_ALIGN, 0, /* alignment, bounds */
1884 1.1 dyoung BUS_SPACE_MAXADDR, /* lowaddr */
1885 1.1 dyoung BUS_SPACE_MAXADDR, /* highaddr */
1886 1.1 dyoung NULL, NULL, /* filter, filterarg */
1887 1.1 dyoung size, /* maxsize */
1888 1.1 dyoung 1, /* nsegments */
1889 1.1 dyoung size, /* maxsegsize */
1890 1.1 dyoung BUS_DMA_ALLOCNOW, /* flags */
1891 1.1 dyoung NULL, /* lockfunc */
1892 1.1 dyoung NULL, /* lockfuncarg */
1893 1.1 dyoung &dma->dma_tag);
1894 1.1 dyoung if (r != 0) {
1895 1.1 dyoung device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; "
1896 1.1 dyoung "error %u\n", r);
1897 1.1 dyoung goto fail_0;
1898 1.1 dyoung }
1899 1.1 dyoung r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1900 1.1 dyoung BUS_DMA_NOWAIT, &dma->dma_map);
1901 1.1 dyoung if (r != 0) {
1902 1.1 dyoung device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; "
1903 1.1 dyoung "error %u\n", r);
1904 1.1 dyoung goto fail_1;
1905 1.1 dyoung }
1906 1.1 dyoung r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1907 1.1 dyoung size,
1908 1.1 dyoung ixv_dmamap_cb,
1909 1.1 dyoung &dma->dma_paddr,
1910 1.1 dyoung mapflags | BUS_DMA_NOWAIT);
1911 1.1 dyoung if (r != 0) {
1912 1.1 dyoung device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; "
1913 1.1 dyoung "error %u\n", r);
1914 1.1 dyoung goto fail_2;
1915 1.1 dyoung }
1916 1.1 dyoung dma->dma_size = size;
1917 1.1 dyoung return (0);
1918 1.1 dyoung fail_2:
1919 1.1 dyoung bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1920 1.1 dyoung fail_1:
1921 1.1 dyoung bus_dma_tag_destroy(dma->dma_tag);
1922 1.1 dyoung fail_0:
1923 1.1 dyoung dma->dma_map = NULL;
1924 1.1 dyoung dma->dma_tag = NULL;
1925 1.1 dyoung return (r);
1926 1.1 dyoung }
1927 1.1 dyoung
1928 1.1 dyoung static void
1929 1.1 dyoung ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
1930 1.1 dyoung {
1931 1.1 dyoung bus_dmamap_sync(dma->dma_tag, dma->dma_map,
1932 1.1 dyoung BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1933 1.1 dyoung bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1934 1.1 dyoung bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1935 1.1 dyoung bus_dma_tag_destroy(dma->dma_tag);
1936 1.1 dyoung }
1937 1.1 dyoung
1938 1.1 dyoung
1939 1.1 dyoung /*********************************************************************
1940 1.1 dyoung *
1941 1.1 dyoung * Allocate memory for the transmit and receive rings, and then
1942 1.1 dyoung * the descriptors associated with each, called only once at attach.
1943 1.1 dyoung *
1944 1.1 dyoung **********************************************************************/
1945 1.1 dyoung static int
1946 1.1 dyoung ixv_allocate_queues(struct adapter *adapter)
1947 1.1 dyoung {
1948 1.1 dyoung device_t dev = adapter->dev;
1949 1.1 dyoung struct ix_queue *que;
1950 1.1 dyoung struct tx_ring *txr;
1951 1.1 dyoung struct rx_ring *rxr;
1952 1.1 dyoung int rsize, tsize, error = 0;
1953 1.1 dyoung int txconf = 0, rxconf = 0;
1954 1.1 dyoung
1955 1.1 dyoung /* First allocate the top level queue structs */
1956 1.1 dyoung if (!(adapter->queues =
1957 1.1 dyoung (struct ix_queue *) malloc(sizeof(struct ix_queue) *
1958 1.1 dyoung adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1959 1.1 dyoung device_printf(dev, "Unable to allocate queue memory\n");
1960 1.1 dyoung error = ENOMEM;
1961 1.1 dyoung goto fail;
1962 1.1 dyoung }
1963 1.1 dyoung
1964 1.1 dyoung /* First allocate the TX ring struct memory */
1965 1.1 dyoung if (!(adapter->tx_rings =
1966 1.1 dyoung (struct tx_ring *) malloc(sizeof(struct tx_ring) *
1967 1.1 dyoung adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1968 1.1 dyoung device_printf(dev, "Unable to allocate TX ring memory\n");
1969 1.1 dyoung error = ENOMEM;
1970 1.1 dyoung goto tx_fail;
1971 1.1 dyoung }
1972 1.1 dyoung
1973 1.1 dyoung /* Next allocate the RX */
1974 1.1 dyoung if (!(adapter->rx_rings =
1975 1.1 dyoung (struct rx_ring *) malloc(sizeof(struct rx_ring) *
1976 1.1 dyoung adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1977 1.1 dyoung device_printf(dev, "Unable to allocate RX ring memory\n");
1978 1.1 dyoung error = ENOMEM;
1979 1.1 dyoung goto rx_fail;
1980 1.1 dyoung }
1981 1.1 dyoung
1982 1.1 dyoung /* For the ring itself */
1983 1.1 dyoung tsize = roundup2(adapter->num_tx_desc *
1984 1.1 dyoung sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
1985 1.1 dyoung
1986 1.1 dyoung /*
1987 1.1 dyoung * Now set up the TX queues, txconf is needed to handle the
1988 1.1 dyoung * possibility that things fail midcourse and we need to
1989 1.1 dyoung * undo memory gracefully
1990 1.1 dyoung */
1991 1.1 dyoung for (int i = 0; i < adapter->num_queues; i++, txconf++) {
1992 1.1 dyoung /* Set up some basics */
1993 1.1 dyoung txr = &adapter->tx_rings[i];
1994 1.1 dyoung txr->adapter = adapter;
1995 1.1 dyoung txr->me = i;
1996 1.1 dyoung
1997 1.1 dyoung /* Initialize the TX side lock */
1998 1.1 dyoung snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1999 1.1 dyoung device_get_nameunit(dev), txr->me);
2000 1.1 dyoung mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2001 1.1 dyoung
2002 1.1 dyoung if (ixv_dma_malloc(adapter, tsize,
2003 1.1 dyoung &txr->txdma, BUS_DMA_NOWAIT)) {
2004 1.1 dyoung device_printf(dev,
2005 1.1 dyoung "Unable to allocate TX Descriptor memory\n");
2006 1.1 dyoung error = ENOMEM;
2007 1.1 dyoung goto err_tx_desc;
2008 1.1 dyoung }
2009 1.1 dyoung txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2010 1.1 dyoung bzero((void *)txr->tx_base, tsize);
2011 1.1 dyoung
2012 1.1 dyoung /* Now allocate transmit buffers for the ring */
2013 1.1 dyoung if (ixv_allocate_transmit_buffers(txr)) {
2014 1.1 dyoung device_printf(dev,
2015 1.1 dyoung "Critical Failure setting up transmit buffers\n");
2016 1.1 dyoung error = ENOMEM;
2017 1.1 dyoung goto err_tx_desc;
2018 1.1 dyoung }
2019 1.1 dyoung #if __FreeBSD_version >= 800000
2020 1.1 dyoung /* Allocate a buf ring */
2021 1.1 dyoung txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
2022 1.1 dyoung M_WAITOK, &txr->tx_mtx);
2023 1.1 dyoung if (txr->br == NULL) {
2024 1.1 dyoung device_printf(dev,
2025 1.1 dyoung "Critical Failure setting up buf ring\n");
2026 1.1 dyoung error = ENOMEM;
2027 1.1 dyoung goto err_tx_desc;
2028 1.1 dyoung }
2029 1.1 dyoung #endif
2030 1.1 dyoung }
2031 1.1 dyoung
2032 1.1 dyoung /*
2033 1.1 dyoung * Next the RX queues...
2034 1.1 dyoung */
2035 1.1 dyoung rsize = roundup2(adapter->num_rx_desc *
2036 1.1 dyoung sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2037 1.1 dyoung for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2038 1.1 dyoung rxr = &adapter->rx_rings[i];
2039 1.1 dyoung /* Set up some basics */
2040 1.1 dyoung rxr->adapter = adapter;
2041 1.1 dyoung rxr->me = i;
2042 1.1 dyoung
2043 1.1 dyoung /* Initialize the RX side lock */
2044 1.1 dyoung snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2045 1.1 dyoung device_get_nameunit(dev), rxr->me);
2046 1.1 dyoung mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2047 1.1 dyoung
2048 1.1 dyoung if (ixv_dma_malloc(adapter, rsize,
2049 1.1 dyoung &rxr->rxdma, BUS_DMA_NOWAIT)) {
2050 1.1 dyoung device_printf(dev,
2051 1.1 dyoung "Unable to allocate RxDescriptor memory\n");
2052 1.1 dyoung error = ENOMEM;
2053 1.1 dyoung goto err_rx_desc;
2054 1.1 dyoung }
2055 1.1 dyoung rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2056 1.1 dyoung bzero((void *)rxr->rx_base, rsize);
2057 1.1 dyoung
2058 1.1 dyoung /* Allocate receive buffers for the ring*/
2059 1.1 dyoung if (ixv_allocate_receive_buffers(rxr)) {
2060 1.1 dyoung device_printf(dev,
2061 1.1 dyoung "Critical Failure setting up receive buffers\n");
2062 1.1 dyoung error = ENOMEM;
2063 1.1 dyoung goto err_rx_desc;
2064 1.1 dyoung }
2065 1.1 dyoung }
2066 1.1 dyoung
2067 1.1 dyoung /*
2068 1.1 dyoung ** Finally set up the queue holding structs
2069 1.1 dyoung */
2070 1.1 dyoung for (int i = 0; i < adapter->num_queues; i++) {
2071 1.1 dyoung que = &adapter->queues[i];
2072 1.1 dyoung que->adapter = adapter;
2073 1.1 dyoung que->txr = &adapter->tx_rings[i];
2074 1.1 dyoung que->rxr = &adapter->rx_rings[i];
2075 1.1 dyoung }
2076 1.1 dyoung
2077 1.1 dyoung return (0);
2078 1.1 dyoung
2079 1.1 dyoung err_rx_desc:
2080 1.1 dyoung for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2081 1.1 dyoung ixv_dma_free(adapter, &rxr->rxdma);
2082 1.1 dyoung err_tx_desc:
2083 1.1 dyoung for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2084 1.1 dyoung ixv_dma_free(adapter, &txr->txdma);
2085 1.1 dyoung free(adapter->rx_rings, M_DEVBUF);
2086 1.1 dyoung rx_fail:
2087 1.1 dyoung free(adapter->tx_rings, M_DEVBUF);
2088 1.1 dyoung tx_fail:
2089 1.1 dyoung free(adapter->queues, M_DEVBUF);
2090 1.1 dyoung fail:
2091 1.1 dyoung return (error);
2092 1.1 dyoung }
2093 1.1 dyoung
2094 1.1 dyoung
2095 1.1 dyoung /*********************************************************************
2096 1.1 dyoung *
2097 1.1 dyoung * Allocate memory for tx_buffer structures. The tx_buffer stores all
2098 1.1 dyoung * the information needed to transmit a packet on the wire. This is
2099 1.1 dyoung * called only once at attach, setup is done every reset.
2100 1.1 dyoung *
2101 1.1 dyoung **********************************************************************/
2102 1.1 dyoung static int
2103 1.1 dyoung ixv_allocate_transmit_buffers(struct tx_ring *txr)
2104 1.1 dyoung {
2105 1.1 dyoung struct adapter *adapter = txr->adapter;
2106 1.1 dyoung device_t dev = adapter->dev;
2107 1.1 dyoung struct ixv_tx_buf *txbuf;
2108 1.1 dyoung int error, i;
2109 1.1 dyoung
2110 1.1 dyoung /*
2111 1.1 dyoung * Setup DMA descriptor areas.
2112 1.1 dyoung */
2113 1.1 dyoung if ((error = bus_dma_tag_create(NULL, /* parent */
2114 1.1 dyoung 1, 0, /* alignment, bounds */
2115 1.1 dyoung BUS_SPACE_MAXADDR, /* lowaddr */
2116 1.1 dyoung BUS_SPACE_MAXADDR, /* highaddr */
2117 1.1 dyoung NULL, NULL, /* filter, filterarg */
2118 1.1 dyoung IXV_TSO_SIZE, /* maxsize */
2119 1.1 dyoung 32, /* nsegments */
2120 1.1 dyoung PAGE_SIZE, /* maxsegsize */
2121 1.1 dyoung 0, /* flags */
2122 1.1 dyoung NULL, /* lockfunc */
2123 1.1 dyoung NULL, /* lockfuncarg */
2124 1.1 dyoung &txr->txtag))) {
2125 1.1 dyoung device_printf(dev,"Unable to allocate TX DMA tag\n");
2126 1.1 dyoung goto fail;
2127 1.1 dyoung }
2128 1.1 dyoung
2129 1.1 dyoung if (!(txr->tx_buffers =
2130 1.1 dyoung (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
2131 1.1 dyoung adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2132 1.1 dyoung device_printf(dev, "Unable to allocate tx_buffer memory\n");
2133 1.1 dyoung error = ENOMEM;
2134 1.1 dyoung goto fail;
2135 1.1 dyoung }
2136 1.1 dyoung
2137 1.1 dyoung /* Create the descriptor buffer dma maps */
2138 1.1 dyoung txbuf = txr->tx_buffers;
2139 1.1 dyoung for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2140 1.1 dyoung error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2141 1.1 dyoung if (error != 0) {
2142 1.1 dyoung device_printf(dev, "Unable to create TX DMA map\n");
2143 1.1 dyoung goto fail;
2144 1.1 dyoung }
2145 1.1 dyoung }
2146 1.1 dyoung
2147 1.1 dyoung return 0;
2148 1.1 dyoung fail:
2149 1.1 dyoung /* We free all, it handles case where we are in the middle */
2150 1.1 dyoung ixv_free_transmit_structures(adapter);
2151 1.1 dyoung return (error);
2152 1.1 dyoung }
2153 1.1 dyoung
2154 1.1 dyoung /*********************************************************************
2155 1.1 dyoung *
2156 1.1 dyoung * Initialize a transmit ring.
2157 1.1 dyoung *
2158 1.1 dyoung **********************************************************************/
2159 1.1 dyoung static void
2160 1.1 dyoung ixv_setup_transmit_ring(struct tx_ring *txr)
2161 1.1 dyoung {
2162 1.1 dyoung struct adapter *adapter = txr->adapter;
2163 1.1 dyoung struct ixv_tx_buf *txbuf;
2164 1.1 dyoung int i;
2165 1.1 dyoung
2166 1.1 dyoung /* Clear the old ring contents */
2167 1.1 dyoung IXV_TX_LOCK(txr);
2168 1.1 dyoung bzero((void *)txr->tx_base,
2169 1.1 dyoung (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2170 1.1 dyoung /* Reset indices */
2171 1.1 dyoung txr->next_avail_desc = 0;
2172 1.1 dyoung txr->next_to_clean = 0;
2173 1.1 dyoung
2174 1.1 dyoung /* Free any existing tx buffers. */
2175 1.1 dyoung txbuf = txr->tx_buffers;
2176 1.1 dyoung for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2177 1.1 dyoung if (txbuf->m_head != NULL) {
2178 1.1 dyoung bus_dmamap_sync(txr->txtag, txbuf->map,
2179 1.1 dyoung BUS_DMASYNC_POSTWRITE);
2180 1.1 dyoung bus_dmamap_unload(txr->txtag, txbuf->map);
2181 1.1 dyoung m_freem(txbuf->m_head);
2182 1.1 dyoung txbuf->m_head = NULL;
2183 1.1 dyoung }
2184 1.1 dyoung /* Clear the EOP index */
2185 1.1 dyoung txbuf->eop_index = -1;
2186 1.1 dyoung }
2187 1.1 dyoung
2188 1.1 dyoung /* Set number of descriptors available */
2189 1.1 dyoung txr->tx_avail = adapter->num_tx_desc;
2190 1.1 dyoung
2191 1.1 dyoung bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2192 1.1 dyoung BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2193 1.1 dyoung IXV_TX_UNLOCK(txr);
2194 1.1 dyoung }
2195 1.1 dyoung
2196 1.1 dyoung /*********************************************************************
2197 1.1 dyoung *
2198 1.1 dyoung * Initialize all transmit rings.
2199 1.1 dyoung *
2200 1.1 dyoung **********************************************************************/
2201 1.1 dyoung static int
2202 1.1 dyoung ixv_setup_transmit_structures(struct adapter *adapter)
2203 1.1 dyoung {
2204 1.1 dyoung struct tx_ring *txr = adapter->tx_rings;
2205 1.1 dyoung
2206 1.1 dyoung for (int i = 0; i < adapter->num_queues; i++, txr++)
2207 1.1 dyoung ixv_setup_transmit_ring(txr);
2208 1.1 dyoung
2209 1.1 dyoung return (0);
2210 1.1 dyoung }
2211 1.1 dyoung
2212 1.1 dyoung /*********************************************************************
2213 1.1 dyoung *
2214 1.1 dyoung * Enable transmit unit.
2215 1.1 dyoung *
2216 1.1 dyoung **********************************************************************/
2217 1.1 dyoung static void
2218 1.1 dyoung ixv_initialize_transmit_units(struct adapter *adapter)
2219 1.1 dyoung {
2220 1.1 dyoung struct tx_ring *txr = adapter->tx_rings;
2221 1.1 dyoung struct ixgbe_hw *hw = &adapter->hw;
2222 1.1 dyoung
2223 1.1 dyoung
2224 1.1 dyoung for (int i = 0; i < adapter->num_queues; i++, txr++) {
2225 1.1 dyoung u64 tdba = txr->txdma.dma_paddr;
2226 1.1 dyoung u32 txctrl, txdctl;
2227 1.1 dyoung
2228 1.1 dyoung /* Set WTHRESH to 8, burst writeback */
2229 1.1 dyoung txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2230 1.1 dyoung txdctl |= (8 << 16);
2231 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2232 1.1 dyoung /* Now enable */
2233 1.1 dyoung txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2234 1.1 dyoung txdctl |= IXGBE_TXDCTL_ENABLE;
2235 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2236 1.1 dyoung
2237 1.1 dyoung /* Set the HW Tx Head and Tail indices */
2238 1.1 dyoung IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
2239 1.1 dyoung IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
2240 1.1 dyoung
2241 1.1 dyoung /* Setup Transmit Descriptor Cmd Settings */
2242 1.1 dyoung txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2243 1.1 dyoung txr->watchdog_check = FALSE;
2244 1.1 dyoung
2245 1.1 dyoung /* Set Ring parameters */
2246 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
2247 1.1 dyoung (tdba & 0x00000000ffffffffULL));
2248 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
2249 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
2250 1.1 dyoung adapter->num_tx_desc *
2251 1.1 dyoung sizeof(struct ixgbe_legacy_tx_desc));
2252 1.1 dyoung txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
2253 1.1 dyoung txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2254 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
2255 1.1 dyoung break;
2256 1.1 dyoung }
2257 1.1 dyoung
2258 1.1 dyoung return;
2259 1.1 dyoung }
2260 1.1 dyoung
2261 1.1 dyoung /*********************************************************************
2262 1.1 dyoung *
2263 1.1 dyoung * Free all transmit rings.
2264 1.1 dyoung *
2265 1.1 dyoung **********************************************************************/
2266 1.1 dyoung static void
2267 1.1 dyoung ixv_free_transmit_structures(struct adapter *adapter)
2268 1.1 dyoung {
2269 1.1 dyoung struct tx_ring *txr = adapter->tx_rings;
2270 1.1 dyoung
2271 1.1 dyoung for (int i = 0; i < adapter->num_queues; i++, txr++) {
2272 1.1 dyoung IXV_TX_LOCK(txr);
2273 1.1 dyoung ixv_free_transmit_buffers(txr);
2274 1.1 dyoung ixv_dma_free(adapter, &txr->txdma);
2275 1.1 dyoung IXV_TX_UNLOCK(txr);
2276 1.1 dyoung IXV_TX_LOCK_DESTROY(txr);
2277 1.1 dyoung }
2278 1.1 dyoung free(adapter->tx_rings, M_DEVBUF);
2279 1.1 dyoung }
2280 1.1 dyoung
2281 1.1 dyoung /*********************************************************************
2282 1.1 dyoung *
2283 1.1 dyoung * Free transmit ring related data structures.
2284 1.1 dyoung *
2285 1.1 dyoung **********************************************************************/
2286 1.1 dyoung static void
2287 1.1 dyoung ixv_free_transmit_buffers(struct tx_ring *txr)
2288 1.1 dyoung {
2289 1.1 dyoung struct adapter *adapter = txr->adapter;
2290 1.1 dyoung struct ixv_tx_buf *tx_buffer;
2291 1.1 dyoung int i;
2292 1.1 dyoung
2293 1.1 dyoung INIT_DEBUGOUT("free_transmit_ring: begin");
2294 1.1 dyoung
2295 1.1 dyoung if (txr->tx_buffers == NULL)
2296 1.1 dyoung return;
2297 1.1 dyoung
2298 1.1 dyoung tx_buffer = txr->tx_buffers;
2299 1.1 dyoung for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2300 1.1 dyoung if (tx_buffer->m_head != NULL) {
2301 1.1 dyoung bus_dmamap_sync(txr->txtag, tx_buffer->map,
2302 1.1 dyoung BUS_DMASYNC_POSTWRITE);
2303 1.1 dyoung bus_dmamap_unload(txr->txtag,
2304 1.1 dyoung tx_buffer->map);
2305 1.1 dyoung m_freem(tx_buffer->m_head);
2306 1.1 dyoung tx_buffer->m_head = NULL;
2307 1.1 dyoung if (tx_buffer->map != NULL) {
2308 1.1 dyoung bus_dmamap_destroy(txr->txtag,
2309 1.1 dyoung tx_buffer->map);
2310 1.1 dyoung tx_buffer->map = NULL;
2311 1.1 dyoung }
2312 1.1 dyoung } else if (tx_buffer->map != NULL) {
2313 1.1 dyoung bus_dmamap_unload(txr->txtag,
2314 1.1 dyoung tx_buffer->map);
2315 1.1 dyoung bus_dmamap_destroy(txr->txtag,
2316 1.1 dyoung tx_buffer->map);
2317 1.1 dyoung tx_buffer->map = NULL;
2318 1.1 dyoung }
2319 1.1 dyoung }
2320 1.1 dyoung #if __FreeBSD_version >= 800000
2321 1.1 dyoung if (txr->br != NULL)
2322 1.1 dyoung buf_ring_free(txr->br, M_DEVBUF);
2323 1.1 dyoung #endif
2324 1.1 dyoung if (txr->tx_buffers != NULL) {
2325 1.1 dyoung free(txr->tx_buffers, M_DEVBUF);
2326 1.1 dyoung txr->tx_buffers = NULL;
2327 1.1 dyoung }
2328 1.1 dyoung if (txr->txtag != NULL) {
2329 1.1 dyoung bus_dma_tag_destroy(txr->txtag);
2330 1.1 dyoung txr->txtag = NULL;
2331 1.1 dyoung }
2332 1.1 dyoung return;
2333 1.1 dyoung }
2334 1.1 dyoung
2335 1.1 dyoung /*********************************************************************
2336 1.1 dyoung *
2337 1.1 dyoung * Advanced Context Descriptor setup for VLAN or CSUM
2338 1.1 dyoung *
2339 1.1 dyoung **********************************************************************/
2340 1.1 dyoung
2341 1.1 dyoung static boolean_t
2342 1.1 dyoung ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2343 1.1 dyoung {
2344 1.1 dyoung struct adapter *adapter = txr->adapter;
2345 1.1 dyoung struct ixgbe_adv_tx_context_desc *TXD;
2346 1.1 dyoung struct ixv_tx_buf *tx_buffer;
2347 1.1 dyoung u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2348 1.1 dyoung struct ether_vlan_header *eh;
2349 1.1 dyoung struct ip *ip;
2350 1.1 dyoung struct ip6_hdr *ip6;
2351 1.1 dyoung int ehdrlen, ip_hlen = 0;
2352 1.1 dyoung u16 etype;
2353 1.1 dyoung u8 ipproto = 0;
2354 1.1 dyoung bool offload = TRUE;
2355 1.1 dyoung int ctxd = txr->next_avail_desc;
2356 1.1 dyoung u16 vtag = 0;
2357 1.1 dyoung
2358 1.1 dyoung
2359 1.1 dyoung if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
2360 1.1 dyoung offload = FALSE;
2361 1.1 dyoung
2362 1.1 dyoung
2363 1.1 dyoung tx_buffer = &txr->tx_buffers[ctxd];
2364 1.1 dyoung TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2365 1.1 dyoung
2366 1.1 dyoung /*
2367 1.1 dyoung ** In advanced descriptors the vlan tag must
2368 1.1 dyoung ** be placed into the descriptor itself.
2369 1.1 dyoung */
2370 1.1 dyoung if (mp->m_flags & M_VLANTAG) {
2371 1.1 dyoung vtag = htole16(mp->m_pkthdr.ether_vtag);
2372 1.1 dyoung vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2373 1.1 dyoung } else if (offload == FALSE)
2374 1.1 dyoung return FALSE;
2375 1.1 dyoung
2376 1.1 dyoung /*
2377 1.1 dyoung * Determine where frame payload starts.
2378 1.1 dyoung * Jump over vlan headers if already present,
2379 1.1 dyoung * helpful for QinQ too.
2380 1.1 dyoung */
2381 1.1 dyoung eh = mtod(mp, struct ether_vlan_header *);
2382 1.1 dyoung if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2383 1.1 dyoung etype = ntohs(eh->evl_proto);
2384 1.1 dyoung ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2385 1.1 dyoung } else {
2386 1.1 dyoung etype = ntohs(eh->evl_encap_proto);
2387 1.1 dyoung ehdrlen = ETHER_HDR_LEN;
2388 1.1 dyoung }
2389 1.1 dyoung
2390 1.1 dyoung /* Set the ether header length */
2391 1.1 dyoung vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2392 1.1 dyoung
2393 1.1 dyoung switch (etype) {
2394 1.1 dyoung case ETHERTYPE_IP:
2395 1.1 dyoung ip = (struct ip *)(mp->m_data + ehdrlen);
2396 1.1 dyoung ip_hlen = ip->ip_hl << 2;
2397 1.1 dyoung if (mp->m_len < ehdrlen + ip_hlen)
2398 1.1 dyoung return (FALSE);
2399 1.1 dyoung ipproto = ip->ip_p;
2400 1.1 dyoung type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2401 1.1 dyoung break;
2402 1.1 dyoung case ETHERTYPE_IPV6:
2403 1.1 dyoung ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2404 1.1 dyoung ip_hlen = sizeof(struct ip6_hdr);
2405 1.1 dyoung if (mp->m_len < ehdrlen + ip_hlen)
2406 1.1 dyoung return (FALSE);
2407 1.1 dyoung ipproto = ip6->ip6_nxt;
2408 1.1 dyoung type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2409 1.1 dyoung break;
2410 1.1 dyoung default:
2411 1.1 dyoung offload = FALSE;
2412 1.1 dyoung break;
2413 1.1 dyoung }
2414 1.1 dyoung
2415 1.1 dyoung vlan_macip_lens |= ip_hlen;
2416 1.1 dyoung type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2417 1.1 dyoung
2418 1.1 dyoung switch (ipproto) {
2419 1.1 dyoung case IPPROTO_TCP:
2420 1.1 dyoung if (mp->m_pkthdr.csum_flags & CSUM_TCP)
2421 1.1 dyoung type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2422 1.1 dyoung break;
2423 1.1 dyoung
2424 1.1 dyoung case IPPROTO_UDP:
2425 1.1 dyoung if (mp->m_pkthdr.csum_flags & CSUM_UDP)
2426 1.1 dyoung type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2427 1.1 dyoung break;
2428 1.1 dyoung
2429 1.1 dyoung #if __FreeBSD_version >= 800000
2430 1.1 dyoung case IPPROTO_SCTP:
2431 1.1 dyoung if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
2432 1.1 dyoung type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2433 1.1 dyoung break;
2434 1.1 dyoung #endif
2435 1.1 dyoung default:
2436 1.1 dyoung offload = FALSE;
2437 1.1 dyoung break;
2438 1.1 dyoung }
2439 1.1 dyoung
2440 1.1 dyoung /* Now copy bits into descriptor */
2441 1.1 dyoung TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2442 1.1 dyoung TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2443 1.1 dyoung TXD->seqnum_seed = htole32(0);
2444 1.1 dyoung TXD->mss_l4len_idx = htole32(0);
2445 1.1 dyoung
2446 1.1 dyoung tx_buffer->m_head = NULL;
2447 1.1 dyoung tx_buffer->eop_index = -1;
2448 1.1 dyoung
2449 1.1 dyoung /* We've consumed the first desc, adjust counters */
2450 1.1 dyoung if (++ctxd == adapter->num_tx_desc)
2451 1.1 dyoung ctxd = 0;
2452 1.1 dyoung txr->next_avail_desc = ctxd;
2453 1.1 dyoung --txr->tx_avail;
2454 1.1 dyoung
2455 1.1 dyoung return (offload);
2456 1.1 dyoung }
2457 1.1 dyoung
2458 1.1 dyoung /**********************************************************************
2459 1.1 dyoung *
2460 1.1 dyoung * Setup work for hardware segmentation offload (TSO) on
2461 1.1 dyoung * adapters using advanced tx descriptors
2462 1.1 dyoung *
2463 1.1 dyoung **********************************************************************/
2464 1.1 dyoung static boolean_t
2465 1.1 dyoung ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2466 1.1 dyoung {
2467 1.1 dyoung struct adapter *adapter = txr->adapter;
2468 1.1 dyoung struct ixgbe_adv_tx_context_desc *TXD;
2469 1.1 dyoung struct ixv_tx_buf *tx_buffer;
2470 1.1 dyoung u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2471 1.1 dyoung u32 mss_l4len_idx = 0;
2472 1.1 dyoung u16 vtag = 0;
2473 1.1 dyoung int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
2474 1.1 dyoung struct ether_vlan_header *eh;
2475 1.1 dyoung struct ip *ip;
2476 1.1 dyoung struct tcphdr *th;
2477 1.1 dyoung
2478 1.1 dyoung
2479 1.1 dyoung /*
2480 1.1 dyoung * Determine where frame payload starts.
2481 1.1 dyoung * Jump over vlan headers if already present
2482 1.1 dyoung */
2483 1.1 dyoung eh = mtod(mp, struct ether_vlan_header *);
2484 1.1 dyoung if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
2485 1.1 dyoung ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2486 1.1 dyoung else
2487 1.1 dyoung ehdrlen = ETHER_HDR_LEN;
2488 1.1 dyoung
2489 1.1 dyoung /* Ensure we have at least the IP+TCP header in the first mbuf. */
2490 1.1 dyoung if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2491 1.1 dyoung return FALSE;
2492 1.1 dyoung
2493 1.1 dyoung ctxd = txr->next_avail_desc;
2494 1.1 dyoung tx_buffer = &txr->tx_buffers[ctxd];
2495 1.1 dyoung TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2496 1.1 dyoung
2497 1.1 dyoung ip = (struct ip *)(mp->m_data + ehdrlen);
2498 1.1 dyoung if (ip->ip_p != IPPROTO_TCP)
2499 1.1 dyoung return FALSE; /* 0 */
2500 1.1 dyoung ip->ip_sum = 0;
2501 1.1 dyoung ip_hlen = ip->ip_hl << 2;
2502 1.1 dyoung th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2503 1.1 dyoung th->th_sum = in_pseudo(ip->ip_src.s_addr,
2504 1.1 dyoung ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2505 1.1 dyoung tcp_hlen = th->th_off << 2;
2506 1.1 dyoung hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2507 1.1 dyoung
2508 1.1 dyoung /* This is used in the transmit desc in encap */
2509 1.1 dyoung *paylen = mp->m_pkthdr.len - hdrlen;
2510 1.1 dyoung
2511 1.1 dyoung /* VLAN MACLEN IPLEN */
2512 1.1 dyoung if (mp->m_flags & M_VLANTAG) {
2513 1.1 dyoung vtag = htole16(mp->m_pkthdr.ether_vtag);
2514 1.1 dyoung vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2515 1.1 dyoung }
2516 1.1 dyoung
2517 1.1 dyoung vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2518 1.1 dyoung vlan_macip_lens |= ip_hlen;
2519 1.1 dyoung TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2520 1.1 dyoung
2521 1.1 dyoung /* ADV DTYPE TUCMD */
2522 1.1 dyoung type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2523 1.1 dyoung type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2524 1.1 dyoung type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2525 1.1 dyoung TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2526 1.1 dyoung
2527 1.1 dyoung
2528 1.1 dyoung /* MSS L4LEN IDX */
2529 1.1 dyoung mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2530 1.1 dyoung mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2531 1.1 dyoung TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2532 1.1 dyoung
2533 1.1 dyoung TXD->seqnum_seed = htole32(0);
2534 1.1 dyoung tx_buffer->m_head = NULL;
2535 1.1 dyoung tx_buffer->eop_index = -1;
2536 1.1 dyoung
2537 1.1 dyoung if (++ctxd == adapter->num_tx_desc)
2538 1.1 dyoung ctxd = 0;
2539 1.1 dyoung
2540 1.1 dyoung txr->tx_avail--;
2541 1.1 dyoung txr->next_avail_desc = ctxd;
2542 1.1 dyoung return TRUE;
2543 1.1 dyoung }
2544 1.1 dyoung
2545 1.1 dyoung
2546 1.1 dyoung /**********************************************************************
2547 1.1 dyoung *
2548 1.1 dyoung * Examine each tx_buffer in the used queue. If the hardware is done
2549 1.1 dyoung * processing the packet then free associated resources. The
2550 1.1 dyoung * tx_buffer is put back on the free queue.
2551 1.1 dyoung *
2552 1.1 dyoung **********************************************************************/
2553 1.1 dyoung static boolean_t
2554 1.1 dyoung ixv_txeof(struct tx_ring *txr)
2555 1.1 dyoung {
2556 1.1 dyoung struct adapter *adapter = txr->adapter;
2557 1.1 dyoung struct ifnet *ifp = adapter->ifp;
2558 1.1 dyoung u32 first, last, done;
2559 1.1 dyoung struct ixv_tx_buf *tx_buffer;
2560 1.1 dyoung struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
2561 1.1 dyoung
2562 1.1 dyoung mtx_assert(&txr->tx_mtx, MA_OWNED);
2563 1.1 dyoung
2564 1.1 dyoung if (txr->tx_avail == adapter->num_tx_desc)
2565 1.1 dyoung return FALSE;
2566 1.1 dyoung
2567 1.1 dyoung first = txr->next_to_clean;
2568 1.1 dyoung tx_buffer = &txr->tx_buffers[first];
2569 1.1 dyoung /* For cleanup we just use legacy struct */
2570 1.1 dyoung tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2571 1.1 dyoung last = tx_buffer->eop_index;
2572 1.1 dyoung if (last == -1)
2573 1.1 dyoung return FALSE;
2574 1.1 dyoung eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2575 1.1 dyoung
2576 1.1 dyoung /*
2577 1.1 dyoung ** Get the index of the first descriptor
2578 1.1 dyoung ** BEYOND the EOP and call that 'done'.
2579 1.1 dyoung ** I do this so the comparison in the
2580 1.1 dyoung ** inner while loop below can be simple
2581 1.1 dyoung */
2582 1.1 dyoung if (++last == adapter->num_tx_desc) last = 0;
2583 1.1 dyoung done = last;
2584 1.1 dyoung
2585 1.1 dyoung bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2586 1.1 dyoung BUS_DMASYNC_POSTREAD);
2587 1.1 dyoung /*
2588 1.1 dyoung ** Only the EOP descriptor of a packet now has the DD
2589 1.1 dyoung ** bit set, this is what we look for...
2590 1.1 dyoung */
2591 1.1 dyoung while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
2592 1.1 dyoung /* We clean the range of the packet */
2593 1.1 dyoung while (first != done) {
2594 1.1 dyoung tx_desc->upper.data = 0;
2595 1.1 dyoung tx_desc->lower.data = 0;
2596 1.1 dyoung tx_desc->buffer_addr = 0;
2597 1.1 dyoung ++txr->tx_avail;
2598 1.1 dyoung
2599 1.1 dyoung if (tx_buffer->m_head) {
2600 1.1 dyoung bus_dmamap_sync(txr->txtag,
2601 1.1 dyoung tx_buffer->map,
2602 1.1 dyoung BUS_DMASYNC_POSTWRITE);
2603 1.1 dyoung bus_dmamap_unload(txr->txtag,
2604 1.1 dyoung tx_buffer->map);
2605 1.1 dyoung m_freem(tx_buffer->m_head);
2606 1.1 dyoung tx_buffer->m_head = NULL;
2607 1.1 dyoung tx_buffer->map = NULL;
2608 1.1 dyoung }
2609 1.1 dyoung tx_buffer->eop_index = -1;
2610 1.1 dyoung txr->watchdog_time = ticks;
2611 1.1 dyoung
2612 1.1 dyoung if (++first == adapter->num_tx_desc)
2613 1.1 dyoung first = 0;
2614 1.1 dyoung
2615 1.1 dyoung tx_buffer = &txr->tx_buffers[first];
2616 1.1 dyoung tx_desc =
2617 1.1 dyoung (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2618 1.1 dyoung }
2619 1.1 dyoung ++ifp->if_opackets;
2620 1.1 dyoung /* See if there is more work now */
2621 1.1 dyoung last = tx_buffer->eop_index;
2622 1.1 dyoung if (last != -1) {
2623 1.1 dyoung eop_desc =
2624 1.1 dyoung (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2625 1.1 dyoung /* Get next done point */
2626 1.1 dyoung if (++last == adapter->num_tx_desc) last = 0;
2627 1.1 dyoung done = last;
2628 1.1 dyoung } else
2629 1.1 dyoung break;
2630 1.1 dyoung }
2631 1.1 dyoung bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2632 1.1 dyoung BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2633 1.1 dyoung
2634 1.1 dyoung txr->next_to_clean = first;
2635 1.1 dyoung
2636 1.1 dyoung /*
2637 1.1 dyoung * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
2638 1.1 dyoung * it is OK to send packets. If there are no pending descriptors,
2639 1.1 dyoung * clear the timeout. Otherwise, if some descriptors have been freed,
2640 1.1 dyoung * restart the timeout.
2641 1.1 dyoung */
2642 1.1 dyoung if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
2643 1.1 dyoung ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2644 1.1 dyoung if (txr->tx_avail == adapter->num_tx_desc) {
2645 1.1 dyoung txr->watchdog_check = FALSE;
2646 1.1 dyoung return FALSE;
2647 1.1 dyoung }
2648 1.1 dyoung }
2649 1.1 dyoung
2650 1.1 dyoung return TRUE;
2651 1.1 dyoung }
2652 1.1 dyoung
2653 1.1 dyoung /*********************************************************************
2654 1.1 dyoung *
2655 1.1 dyoung * Refresh mbuf buffers for RX descriptor rings
2656 1.1 dyoung * - now keeps its own state so discards due to resource
2657 1.1 dyoung * exhaustion are unnecessary, if an mbuf cannot be obtained
2658 1.1 dyoung * it just returns, keeping its placeholder, thus it can simply
2659 1.1 dyoung * be recalled to try again.
2660 1.1 dyoung *
2661 1.1 dyoung **********************************************************************/
2662 1.1 dyoung static void
2663 1.1 dyoung ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
2664 1.1 dyoung {
2665 1.1 dyoung struct adapter *adapter = rxr->adapter;
2666 1.1 dyoung bus_dma_segment_t hseg[1];
2667 1.1 dyoung bus_dma_segment_t pseg[1];
2668 1.1 dyoung struct ixv_rx_buf *rxbuf;
2669 1.1 dyoung struct mbuf *mh, *mp;
2670 1.1 dyoung int i, nsegs, error, cleaned;
2671 1.1 dyoung
2672 1.1 dyoung i = rxr->next_to_refresh;
2673 1.1 dyoung cleaned = -1; /* Signify no completions */
2674 1.1 dyoung while (i != limit) {
2675 1.1 dyoung rxbuf = &rxr->rx_buffers[i];
2676 1.1 dyoung if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
2677 1.1 dyoung mh = m_gethdr(M_DONTWAIT, MT_DATA);
2678 1.1 dyoung if (mh == NULL)
2679 1.1 dyoung goto update;
2680 1.1 dyoung mh->m_pkthdr.len = mh->m_len = MHLEN;
2681 1.1 dyoung mh->m_len = MHLEN;
2682 1.1 dyoung mh->m_flags |= M_PKTHDR;
2683 1.1 dyoung m_adj(mh, ETHER_ALIGN);
2684 1.1 dyoung /* Get the memory mapping */
2685 1.1 dyoung error = bus_dmamap_load_mbuf_sg(rxr->htag,
2686 1.1 dyoung rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
2687 1.1 dyoung if (error != 0) {
2688 1.1 dyoung printf("GET BUF: dmamap load"
2689 1.1 dyoung " failure - %d\n", error);
2690 1.1 dyoung m_free(mh);
2691 1.1 dyoung goto update;
2692 1.1 dyoung }
2693 1.1 dyoung rxbuf->m_head = mh;
2694 1.1 dyoung bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2695 1.1 dyoung BUS_DMASYNC_PREREAD);
2696 1.1 dyoung rxr->rx_base[i].read.hdr_addr =
2697 1.1 dyoung htole64(hseg[0].ds_addr);
2698 1.1 dyoung }
2699 1.1 dyoung
2700 1.1 dyoung if (rxbuf->m_pack == NULL) {
2701 1.1 dyoung mp = m_getjcl(M_DONTWAIT, MT_DATA,
2702 1.1 dyoung M_PKTHDR, adapter->rx_mbuf_sz);
2703 1.1 dyoung if (mp == NULL)
2704 1.1 dyoung goto update;
2705 1.1 dyoung mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2706 1.1 dyoung /* Get the memory mapping */
2707 1.1 dyoung error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2708 1.1 dyoung rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
2709 1.1 dyoung if (error != 0) {
2710 1.1 dyoung printf("GET BUF: dmamap load"
2711 1.1 dyoung " failure - %d\n", error);
2712 1.1 dyoung m_free(mp);
2713 1.1 dyoung goto update;
2714 1.1 dyoung }
2715 1.1 dyoung rxbuf->m_pack = mp;
2716 1.1 dyoung bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2717 1.1 dyoung BUS_DMASYNC_PREREAD);
2718 1.1 dyoung rxr->rx_base[i].read.pkt_addr =
2719 1.1 dyoung htole64(pseg[0].ds_addr);
2720 1.1 dyoung }
2721 1.1 dyoung
2722 1.1 dyoung cleaned = i;
2723 1.1 dyoung /* Calculate next index */
2724 1.1 dyoung if (++i == adapter->num_rx_desc)
2725 1.1 dyoung i = 0;
2726 1.1 dyoung /* This is the work marker for refresh */
2727 1.1 dyoung rxr->next_to_refresh = i;
2728 1.1 dyoung }
2729 1.1 dyoung update:
2730 1.1 dyoung if (cleaned != -1) /* If we refreshed some, bump tail */
2731 1.1 dyoung IXGBE_WRITE_REG(&adapter->hw,
2732 1.1 dyoung IXGBE_VFRDT(rxr->me), cleaned);
2733 1.1 dyoung return;
2734 1.1 dyoung }
2735 1.1 dyoung
2736 1.1 dyoung /*********************************************************************
2737 1.1 dyoung *
2738 1.1 dyoung * Allocate memory for rx_buffer structures. Since we use one
2739 1.1 dyoung * rx_buffer per received packet, the maximum number of rx_buffer's
2740 1.1 dyoung * that we'll need is equal to the number of receive descriptors
2741 1.1 dyoung * that we've allocated.
2742 1.1 dyoung *
2743 1.1 dyoung **********************************************************************/
2744 1.1 dyoung static int
2745 1.1 dyoung ixv_allocate_receive_buffers(struct rx_ring *rxr)
2746 1.1 dyoung {
2747 1.1 dyoung struct adapter *adapter = rxr->adapter;
2748 1.1 dyoung device_t dev = adapter->dev;
2749 1.1 dyoung struct ixv_rx_buf *rxbuf;
2750 1.1 dyoung int i, bsize, error;
2751 1.1 dyoung
2752 1.1 dyoung bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
2753 1.1 dyoung if (!(rxr->rx_buffers =
2754 1.1 dyoung (struct ixv_rx_buf *) malloc(bsize,
2755 1.1 dyoung M_DEVBUF, M_NOWAIT | M_ZERO))) {
2756 1.1 dyoung device_printf(dev, "Unable to allocate rx_buffer memory\n");
2757 1.1 dyoung error = ENOMEM;
2758 1.1 dyoung goto fail;
2759 1.1 dyoung }
2760 1.1 dyoung
2761 1.1 dyoung if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2762 1.1 dyoung 1, 0, /* alignment, bounds */
2763 1.1 dyoung BUS_SPACE_MAXADDR, /* lowaddr */
2764 1.1 dyoung BUS_SPACE_MAXADDR, /* highaddr */
2765 1.1 dyoung NULL, NULL, /* filter, filterarg */
2766 1.1 dyoung MSIZE, /* maxsize */
2767 1.1 dyoung 1, /* nsegments */
2768 1.1 dyoung MSIZE, /* maxsegsize */
2769 1.1 dyoung 0, /* flags */
2770 1.1 dyoung NULL, /* lockfunc */
2771 1.1 dyoung NULL, /* lockfuncarg */
2772 1.1 dyoung &rxr->htag))) {
2773 1.1 dyoung device_printf(dev, "Unable to create RX DMA tag\n");
2774 1.1 dyoung goto fail;
2775 1.1 dyoung }
2776 1.1 dyoung
2777 1.1 dyoung if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2778 1.1 dyoung 1, 0, /* alignment, bounds */
2779 1.1 dyoung BUS_SPACE_MAXADDR, /* lowaddr */
2780 1.1 dyoung BUS_SPACE_MAXADDR, /* highaddr */
2781 1.1 dyoung NULL, NULL, /* filter, filterarg */
2782 1.1 dyoung MJUMPAGESIZE, /* maxsize */
2783 1.1 dyoung 1, /* nsegments */
2784 1.1 dyoung MJUMPAGESIZE, /* maxsegsize */
2785 1.1 dyoung 0, /* flags */
2786 1.1 dyoung NULL, /* lockfunc */
2787 1.1 dyoung NULL, /* lockfuncarg */
2788 1.1 dyoung &rxr->ptag))) {
2789 1.1 dyoung device_printf(dev, "Unable to create RX DMA tag\n");
2790 1.1 dyoung goto fail;
2791 1.1 dyoung }
2792 1.1 dyoung
2793 1.1 dyoung for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
2794 1.1 dyoung rxbuf = &rxr->rx_buffers[i];
2795 1.1 dyoung error = bus_dmamap_create(rxr->htag,
2796 1.1 dyoung BUS_DMA_NOWAIT, &rxbuf->hmap);
2797 1.1 dyoung if (error) {
2798 1.1 dyoung device_printf(dev, "Unable to create RX head map\n");
2799 1.1 dyoung goto fail;
2800 1.1 dyoung }
2801 1.1 dyoung error = bus_dmamap_create(rxr->ptag,
2802 1.1 dyoung BUS_DMA_NOWAIT, &rxbuf->pmap);
2803 1.1 dyoung if (error) {
2804 1.1 dyoung device_printf(dev, "Unable to create RX pkt map\n");
2805 1.1 dyoung goto fail;
2806 1.1 dyoung }
2807 1.1 dyoung }
2808 1.1 dyoung
2809 1.1 dyoung return (0);
2810 1.1 dyoung
2811 1.1 dyoung fail:
2812 1.1 dyoung /* Frees all, but can handle partial completion */
2813 1.1 dyoung ixv_free_receive_structures(adapter);
2814 1.1 dyoung return (error);
2815 1.1 dyoung }
2816 1.1 dyoung
2817 1.1 dyoung static void
2818 1.1 dyoung ixv_free_receive_ring(struct rx_ring *rxr)
2819 1.1 dyoung {
2820 1.1 dyoung struct adapter *adapter;
2821 1.1 dyoung struct ixv_rx_buf *rxbuf;
2822 1.1 dyoung int i;
2823 1.1 dyoung
2824 1.1 dyoung adapter = rxr->adapter;
2825 1.1 dyoung for (i = 0; i < adapter->num_rx_desc; i++) {
2826 1.1 dyoung rxbuf = &rxr->rx_buffers[i];
2827 1.1 dyoung if (rxbuf->m_head != NULL) {
2828 1.1 dyoung bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2829 1.1 dyoung BUS_DMASYNC_POSTREAD);
2830 1.1 dyoung bus_dmamap_unload(rxr->htag, rxbuf->hmap);
2831 1.1 dyoung rxbuf->m_head->m_flags |= M_PKTHDR;
2832 1.1 dyoung m_freem(rxbuf->m_head);
2833 1.1 dyoung }
2834 1.1 dyoung if (rxbuf->m_pack != NULL) {
2835 1.1 dyoung bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2836 1.1 dyoung BUS_DMASYNC_POSTREAD);
2837 1.1 dyoung bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
2838 1.1 dyoung rxbuf->m_pack->m_flags |= M_PKTHDR;
2839 1.1 dyoung m_freem(rxbuf->m_pack);
2840 1.1 dyoung }
2841 1.1 dyoung rxbuf->m_head = NULL;
2842 1.1 dyoung rxbuf->m_pack = NULL;
2843 1.1 dyoung }
2844 1.1 dyoung }
2845 1.1 dyoung
2846 1.1 dyoung
2847 1.1 dyoung /*********************************************************************
2848 1.1 dyoung *
2849 1.1 dyoung * Initialize a receive ring and its buffers.
2850 1.1 dyoung *
2851 1.1 dyoung **********************************************************************/
2852 1.1 dyoung static int
2853 1.1 dyoung ixv_setup_receive_ring(struct rx_ring *rxr)
2854 1.1 dyoung {
2855 1.1 dyoung struct adapter *adapter;
2856 1.1 dyoung struct ifnet *ifp;
2857 1.1 dyoung device_t dev;
2858 1.1 dyoung struct ixv_rx_buf *rxbuf;
2859 1.1 dyoung bus_dma_segment_t pseg[1], hseg[1];
2860 1.1 dyoung struct lro_ctrl *lro = &rxr->lro;
2861 1.1 dyoung int rsize, nsegs, error = 0;
2862 1.1 dyoung
2863 1.1 dyoung adapter = rxr->adapter;
2864 1.1 dyoung ifp = adapter->ifp;
2865 1.1 dyoung dev = adapter->dev;
2866 1.1 dyoung
2867 1.1 dyoung /* Clear the ring contents */
2868 1.1 dyoung IXV_RX_LOCK(rxr);
2869 1.1 dyoung rsize = roundup2(adapter->num_rx_desc *
2870 1.1 dyoung sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2871 1.1 dyoung bzero((void *)rxr->rx_base, rsize);
2872 1.1 dyoung
2873 1.1 dyoung /* Free current RX buffer structs and their mbufs */
2874 1.1 dyoung ixv_free_receive_ring(rxr);
2875 1.1 dyoung
2876 1.1 dyoung /* Configure header split? */
2877 1.1 dyoung if (ixv_header_split)
2878 1.1 dyoung rxr->hdr_split = TRUE;
2879 1.1 dyoung
2880 1.1 dyoung /* Now replenish the mbufs */
2881 1.1 dyoung for (int j = 0; j != adapter->num_rx_desc; ++j) {
2882 1.1 dyoung struct mbuf *mh, *mp;
2883 1.1 dyoung
2884 1.1 dyoung rxbuf = &rxr->rx_buffers[j];
2885 1.1 dyoung /*
2886 1.1 dyoung ** Dont allocate mbufs if not
2887 1.1 dyoung ** doing header split, its wasteful
2888 1.1 dyoung */
2889 1.1 dyoung if (rxr->hdr_split == FALSE)
2890 1.1 dyoung goto skip_head;
2891 1.1 dyoung
2892 1.1 dyoung /* First the header */
2893 1.1 dyoung rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
2894 1.1 dyoung if (rxbuf->m_head == NULL) {
2895 1.1 dyoung error = ENOBUFS;
2896 1.1 dyoung goto fail;
2897 1.1 dyoung }
2898 1.1 dyoung m_adj(rxbuf->m_head, ETHER_ALIGN);
2899 1.1 dyoung mh = rxbuf->m_head;
2900 1.1 dyoung mh->m_len = mh->m_pkthdr.len = MHLEN;
2901 1.1 dyoung mh->m_flags |= M_PKTHDR;
2902 1.1 dyoung /* Get the memory mapping */
2903 1.1 dyoung error = bus_dmamap_load_mbuf_sg(rxr->htag,
2904 1.1 dyoung rxbuf->hmap, rxbuf->m_head, hseg,
2905 1.1 dyoung &nsegs, BUS_DMA_NOWAIT);
2906 1.1 dyoung if (error != 0) /* Nothing elegant to do here */
2907 1.1 dyoung goto fail;
2908 1.1 dyoung bus_dmamap_sync(rxr->htag,
2909 1.1 dyoung rxbuf->hmap, BUS_DMASYNC_PREREAD);
2910 1.1 dyoung /* Update descriptor */
2911 1.1 dyoung rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
2912 1.1 dyoung
2913 1.1 dyoung skip_head:
2914 1.1 dyoung /* Now the payload cluster */
2915 1.1 dyoung rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
2916 1.1 dyoung M_PKTHDR, adapter->rx_mbuf_sz);
2917 1.1 dyoung if (rxbuf->m_pack == NULL) {
2918 1.1 dyoung error = ENOBUFS;
2919 1.1 dyoung goto fail;
2920 1.1 dyoung }
2921 1.1 dyoung mp = rxbuf->m_pack;
2922 1.1 dyoung mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2923 1.1 dyoung /* Get the memory mapping */
2924 1.1 dyoung error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2925 1.1 dyoung rxbuf->pmap, mp, pseg,
2926 1.1 dyoung &nsegs, BUS_DMA_NOWAIT);
2927 1.1 dyoung if (error != 0)
2928 1.1 dyoung goto fail;
2929 1.1 dyoung bus_dmamap_sync(rxr->ptag,
2930 1.1 dyoung rxbuf->pmap, BUS_DMASYNC_PREREAD);
2931 1.1 dyoung /* Update descriptor */
2932 1.1 dyoung rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
2933 1.1 dyoung }
2934 1.1 dyoung
2935 1.1 dyoung
2936 1.1 dyoung /* Setup our descriptor indices */
2937 1.1 dyoung rxr->next_to_check = 0;
2938 1.1 dyoung rxr->next_to_refresh = 0;
2939 1.1 dyoung rxr->lro_enabled = FALSE;
2940 1.1 dyoung rxr->rx_split_packets = 0;
2941 1.1 dyoung rxr->rx_bytes = 0;
2942 1.1 dyoung
2943 1.1 dyoung bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2944 1.1 dyoung BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2945 1.1 dyoung
2946 1.1 dyoung /*
2947 1.1 dyoung ** Now set up the LRO interface:
2948 1.1 dyoung */
2949 1.1 dyoung if (ifp->if_capenable & IFCAP_LRO) {
2950 1.1 dyoung int err = tcp_lro_init(lro);
2951 1.1 dyoung if (err) {
2952 1.1 dyoung device_printf(dev, "LRO Initialization failed!\n");
2953 1.1 dyoung goto fail;
2954 1.1 dyoung }
2955 1.1 dyoung INIT_DEBUGOUT("RX Soft LRO Initialized\n");
2956 1.1 dyoung rxr->lro_enabled = TRUE;
2957 1.1 dyoung lro->ifp = adapter->ifp;
2958 1.1 dyoung }
2959 1.1 dyoung
2960 1.1 dyoung IXV_RX_UNLOCK(rxr);
2961 1.1 dyoung return (0);
2962 1.1 dyoung
2963 1.1 dyoung fail:
2964 1.1 dyoung ixv_free_receive_ring(rxr);
2965 1.1 dyoung IXV_RX_UNLOCK(rxr);
2966 1.1 dyoung return (error);
2967 1.1 dyoung }
2968 1.1 dyoung
2969 1.1 dyoung /*********************************************************************
2970 1.1 dyoung *
2971 1.1 dyoung * Initialize all receive rings.
2972 1.1 dyoung *
2973 1.1 dyoung **********************************************************************/
2974 1.1 dyoung static int
2975 1.1 dyoung ixv_setup_receive_structures(struct adapter *adapter)
2976 1.1 dyoung {
2977 1.1 dyoung struct rx_ring *rxr = adapter->rx_rings;
2978 1.1 dyoung int j;
2979 1.1 dyoung
2980 1.1 dyoung for (j = 0; j < adapter->num_queues; j++, rxr++)
2981 1.1 dyoung if (ixv_setup_receive_ring(rxr))
2982 1.1 dyoung goto fail;
2983 1.1 dyoung
2984 1.1 dyoung return (0);
2985 1.1 dyoung fail:
2986 1.1 dyoung /*
2987 1.1 dyoung * Free RX buffers allocated so far, we will only handle
2988 1.1 dyoung * the rings that completed, the failing case will have
2989 1.1 dyoung * cleaned up for itself. 'j' failed, so its the terminus.
2990 1.1 dyoung */
2991 1.1 dyoung for (int i = 0; i < j; ++i) {
2992 1.1 dyoung rxr = &adapter->rx_rings[i];
2993 1.1 dyoung ixv_free_receive_ring(rxr);
2994 1.1 dyoung }
2995 1.1 dyoung
2996 1.1 dyoung return (ENOBUFS);
2997 1.1 dyoung }
2998 1.1 dyoung
2999 1.1 dyoung /*********************************************************************
3000 1.1 dyoung *
3001 1.1 dyoung * Setup receive registers and features.
3002 1.1 dyoung *
3003 1.1 dyoung **********************************************************************/
3004 1.1 dyoung #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3005 1.1 dyoung
3006 1.1 dyoung static void
3007 1.1 dyoung ixv_initialize_receive_units(struct adapter *adapter)
3008 1.1 dyoung {
3009 1.1 dyoung struct rx_ring *rxr = adapter->rx_rings;
3010 1.1 dyoung struct ixgbe_hw *hw = &adapter->hw;
3011 1.1 dyoung struct ifnet *ifp = adapter->ifp;
3012 1.1 dyoung u32 bufsz, fctrl, rxcsum, hlreg;
3013 1.1 dyoung
3014 1.1 dyoung
3015 1.1 dyoung /* Enable broadcasts */
3016 1.1 dyoung fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3017 1.1 dyoung fctrl |= IXGBE_FCTRL_BAM;
3018 1.1 dyoung fctrl |= IXGBE_FCTRL_DPF;
3019 1.1 dyoung fctrl |= IXGBE_FCTRL_PMCF;
3020 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3021 1.1 dyoung
3022 1.1 dyoung /* Set for Jumbo Frames? */
3023 1.1 dyoung hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3024 1.1 dyoung if (ifp->if_mtu > ETHERMTU) {
3025 1.1 dyoung hlreg |= IXGBE_HLREG0_JUMBOEN;
3026 1.1 dyoung bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3027 1.1 dyoung } else {
3028 1.1 dyoung hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3029 1.1 dyoung bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3030 1.1 dyoung }
3031 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3032 1.1 dyoung
3033 1.1 dyoung for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3034 1.1 dyoung u64 rdba = rxr->rxdma.dma_paddr;
3035 1.1 dyoung u32 reg, rxdctl;
3036 1.1 dyoung
3037 1.1 dyoung /* Do the queue enabling first */
3038 1.1 dyoung rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3039 1.1 dyoung rxdctl |= IXGBE_RXDCTL_ENABLE;
3040 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
3041 1.1 dyoung for (int k = 0; k < 10; k++) {
3042 1.1 dyoung if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
3043 1.1 dyoung IXGBE_RXDCTL_ENABLE)
3044 1.1 dyoung break;
3045 1.1 dyoung else
3046 1.1 dyoung msec_delay(1);
3047 1.1 dyoung }
3048 1.1 dyoung wmb();
3049 1.1 dyoung
3050 1.1 dyoung /* Setup the Base and Length of the Rx Descriptor Ring */
3051 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
3052 1.1 dyoung (rdba & 0x00000000ffffffffULL));
3053 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
3054 1.1 dyoung (rdba >> 32));
3055 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
3056 1.1 dyoung adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3057 1.1 dyoung
3058 1.1 dyoung /* Set up the SRRCTL register */
3059 1.1 dyoung reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
3060 1.1 dyoung reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3061 1.1 dyoung reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3062 1.1 dyoung reg |= bufsz;
3063 1.1 dyoung if (rxr->hdr_split) {
3064 1.1 dyoung /* Use a standard mbuf for the header */
3065 1.1 dyoung reg |= ((IXV_RX_HDR <<
3066 1.1 dyoung IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3067 1.1 dyoung & IXGBE_SRRCTL_BSIZEHDR_MASK);
3068 1.1 dyoung reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3069 1.1 dyoung } else
3070 1.1 dyoung reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3071 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
3072 1.1 dyoung
3073 1.1 dyoung /* Setup the HW Rx Head and Tail Descriptor Pointers */
3074 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
3075 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
3076 1.1 dyoung adapter->num_rx_desc - 1);
3077 1.1 dyoung }
3078 1.1 dyoung
3079 1.1 dyoung rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3080 1.1 dyoung
3081 1.1 dyoung if (ifp->if_capenable & IFCAP_RXCSUM)
3082 1.1 dyoung rxcsum |= IXGBE_RXCSUM_PCSD;
3083 1.1 dyoung
3084 1.1 dyoung if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3085 1.1 dyoung rxcsum |= IXGBE_RXCSUM_IPPCSE;
3086 1.1 dyoung
3087 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3088 1.1 dyoung
3089 1.1 dyoung return;
3090 1.1 dyoung }
3091 1.1 dyoung
3092 1.1 dyoung /*********************************************************************
3093 1.1 dyoung *
3094 1.1 dyoung * Free all receive rings.
3095 1.1 dyoung *
3096 1.1 dyoung **********************************************************************/
3097 1.1 dyoung static void
3098 1.1 dyoung ixv_free_receive_structures(struct adapter *adapter)
3099 1.1 dyoung {
3100 1.1 dyoung struct rx_ring *rxr = adapter->rx_rings;
3101 1.1 dyoung
3102 1.1 dyoung for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3103 1.1 dyoung struct lro_ctrl *lro = &rxr->lro;
3104 1.1 dyoung ixv_free_receive_buffers(rxr);
3105 1.1 dyoung /* Free LRO memory */
3106 1.1 dyoung tcp_lro_free(lro);
3107 1.1 dyoung /* Free the ring memory as well */
3108 1.1 dyoung ixv_dma_free(adapter, &rxr->rxdma);
3109 1.1 dyoung }
3110 1.1 dyoung
3111 1.1 dyoung free(adapter->rx_rings, M_DEVBUF);
3112 1.1 dyoung }
3113 1.1 dyoung
3114 1.1 dyoung
3115 1.1 dyoung /*********************************************************************
3116 1.1 dyoung *
3117 1.1 dyoung * Free receive ring data structures
3118 1.1 dyoung *
3119 1.1 dyoung **********************************************************************/
3120 1.1 dyoung static void
3121 1.1 dyoung ixv_free_receive_buffers(struct rx_ring *rxr)
3122 1.1 dyoung {
3123 1.1 dyoung struct adapter *adapter = rxr->adapter;
3124 1.1 dyoung struct ixv_rx_buf *rxbuf;
3125 1.1 dyoung
3126 1.1 dyoung INIT_DEBUGOUT("free_receive_structures: begin");
3127 1.1 dyoung
3128 1.1 dyoung /* Cleanup any existing buffers */
3129 1.1 dyoung if (rxr->rx_buffers != NULL) {
3130 1.1 dyoung for (int i = 0; i < adapter->num_rx_desc; i++) {
3131 1.1 dyoung rxbuf = &rxr->rx_buffers[i];
3132 1.1 dyoung if (rxbuf->m_head != NULL) {
3133 1.1 dyoung bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3134 1.1 dyoung BUS_DMASYNC_POSTREAD);
3135 1.1 dyoung bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3136 1.1 dyoung rxbuf->m_head->m_flags |= M_PKTHDR;
3137 1.1 dyoung m_freem(rxbuf->m_head);
3138 1.1 dyoung }
3139 1.1 dyoung if (rxbuf->m_pack != NULL) {
3140 1.1 dyoung bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3141 1.1 dyoung BUS_DMASYNC_POSTREAD);
3142 1.1 dyoung bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3143 1.1 dyoung rxbuf->m_pack->m_flags |= M_PKTHDR;
3144 1.1 dyoung m_freem(rxbuf->m_pack);
3145 1.1 dyoung }
3146 1.1 dyoung rxbuf->m_head = NULL;
3147 1.1 dyoung rxbuf->m_pack = NULL;
3148 1.1 dyoung if (rxbuf->hmap != NULL) {
3149 1.1 dyoung bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
3150 1.1 dyoung rxbuf->hmap = NULL;
3151 1.1 dyoung }
3152 1.1 dyoung if (rxbuf->pmap != NULL) {
3153 1.1 dyoung bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
3154 1.1 dyoung rxbuf->pmap = NULL;
3155 1.1 dyoung }
3156 1.1 dyoung }
3157 1.1 dyoung if (rxr->rx_buffers != NULL) {
3158 1.1 dyoung free(rxr->rx_buffers, M_DEVBUF);
3159 1.1 dyoung rxr->rx_buffers = NULL;
3160 1.1 dyoung }
3161 1.1 dyoung }
3162 1.1 dyoung
3163 1.1 dyoung if (rxr->htag != NULL) {
3164 1.1 dyoung bus_dma_tag_destroy(rxr->htag);
3165 1.1 dyoung rxr->htag = NULL;
3166 1.1 dyoung }
3167 1.1 dyoung if (rxr->ptag != NULL) {
3168 1.1 dyoung bus_dma_tag_destroy(rxr->ptag);
3169 1.1 dyoung rxr->ptag = NULL;
3170 1.1 dyoung }
3171 1.1 dyoung
3172 1.1 dyoung return;
3173 1.1 dyoung }
3174 1.1 dyoung
3175 1.1 dyoung static __inline void
3176 1.1 dyoung ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
3177 1.1 dyoung {
3178 1.1 dyoung
3179 1.1 dyoung /*
3180 1.1 dyoung * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
3181 1.1 dyoung * should be computed by hardware. Also it should not have VLAN tag in
3182 1.1 dyoung * ethernet header.
3183 1.1 dyoung */
3184 1.1 dyoung if (rxr->lro_enabled &&
3185 1.1 dyoung (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
3186 1.1 dyoung (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3187 1.1 dyoung (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
3188 1.1 dyoung (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
3189 1.1 dyoung (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3190 1.1 dyoung (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
3191 1.1 dyoung /*
3192 1.1 dyoung * Send to the stack if:
3193 1.1 dyoung ** - LRO not enabled, or
3194 1.1 dyoung ** - no LRO resources, or
3195 1.1 dyoung ** - lro enqueue fails
3196 1.1 dyoung */
3197 1.1 dyoung if (rxr->lro.lro_cnt != 0)
3198 1.1 dyoung if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
3199 1.1 dyoung return;
3200 1.1 dyoung }
3201 1.1 dyoung (*ifp->if_input)(ifp, m);
3202 1.1 dyoung }
3203 1.1 dyoung
3204 1.1 dyoung static __inline void
3205 1.1 dyoung ixv_rx_discard(struct rx_ring *rxr, int i)
3206 1.1 dyoung {
3207 1.1 dyoung struct adapter *adapter = rxr->adapter;
3208 1.1 dyoung struct ixv_rx_buf *rbuf;
3209 1.1 dyoung struct mbuf *mh, *mp;
3210 1.1 dyoung
3211 1.1 dyoung rbuf = &rxr->rx_buffers[i];
3212 1.1 dyoung if (rbuf->fmp != NULL) /* Partial chain ? */
3213 1.1 dyoung m_freem(rbuf->fmp);
3214 1.1 dyoung
3215 1.1 dyoung mh = rbuf->m_head;
3216 1.1 dyoung mp = rbuf->m_pack;
3217 1.1 dyoung
3218 1.1 dyoung /* Reuse loaded DMA map and just update mbuf chain */
3219 1.1 dyoung mh->m_len = MHLEN;
3220 1.1 dyoung mh->m_flags |= M_PKTHDR;
3221 1.1 dyoung mh->m_next = NULL;
3222 1.1 dyoung
3223 1.1 dyoung mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
3224 1.1 dyoung mp->m_data = mp->m_ext.ext_buf;
3225 1.1 dyoung mp->m_next = NULL;
3226 1.1 dyoung return;
3227 1.1 dyoung }
3228 1.1 dyoung
3229 1.1 dyoung
3230 1.1 dyoung /*********************************************************************
3231 1.1 dyoung *
3232 1.1 dyoung * This routine executes in interrupt context. It replenishes
3233 1.1 dyoung * the mbufs in the descriptor and sends data which has been
3234 1.1 dyoung * dma'ed into host memory to upper layer.
3235 1.1 dyoung *
3236 1.1 dyoung * We loop at most count times if count is > 0, or until done if
3237 1.1 dyoung * count < 0.
3238 1.1 dyoung *
3239 1.1 dyoung * Return TRUE for more work, FALSE for all clean.
3240 1.1 dyoung *********************************************************************/
3241 1.1 dyoung static bool
3242 1.1 dyoung ixv_rxeof(struct ix_queue *que, int count)
3243 1.1 dyoung {
3244 1.1 dyoung struct adapter *adapter = que->adapter;
3245 1.1 dyoung struct rx_ring *rxr = que->rxr;
3246 1.1 dyoung struct ifnet *ifp = adapter->ifp;
3247 1.1 dyoung struct lro_ctrl *lro = &rxr->lro;
3248 1.1 dyoung struct lro_entry *queued;
3249 1.1 dyoung int i, nextp, processed = 0;
3250 1.1 dyoung u32 staterr = 0;
3251 1.1 dyoung union ixgbe_adv_rx_desc *cur;
3252 1.1 dyoung struct ixv_rx_buf *rbuf, *nbuf;
3253 1.1 dyoung
3254 1.1 dyoung IXV_RX_LOCK(rxr);
3255 1.1 dyoung
3256 1.1 dyoung for (i = rxr->next_to_check; count != 0;) {
3257 1.1 dyoung struct mbuf *sendmp, *mh, *mp;
3258 1.1 dyoung u32 rsc, ptype;
3259 1.1 dyoung u16 hlen, plen, hdr, vtag;
3260 1.1 dyoung bool eop;
3261 1.1 dyoung
3262 1.1 dyoung /* Sync the ring. */
3263 1.1 dyoung bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3264 1.1 dyoung BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3265 1.1 dyoung
3266 1.1 dyoung cur = &rxr->rx_base[i];
3267 1.1 dyoung staterr = le32toh(cur->wb.upper.status_error);
3268 1.1 dyoung
3269 1.1 dyoung if ((staterr & IXGBE_RXD_STAT_DD) == 0)
3270 1.1 dyoung break;
3271 1.1 dyoung if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3272 1.1 dyoung break;
3273 1.1 dyoung
3274 1.1 dyoung count--;
3275 1.1 dyoung sendmp = NULL;
3276 1.1 dyoung nbuf = NULL;
3277 1.1 dyoung rsc = 0;
3278 1.1 dyoung cur->wb.upper.status_error = 0;
3279 1.1 dyoung rbuf = &rxr->rx_buffers[i];
3280 1.1 dyoung mh = rbuf->m_head;
3281 1.1 dyoung mp = rbuf->m_pack;
3282 1.1 dyoung
3283 1.1 dyoung plen = le16toh(cur->wb.upper.length);
3284 1.1 dyoung ptype = le32toh(cur->wb.lower.lo_dword.data) &
3285 1.1 dyoung IXGBE_RXDADV_PKTTYPE_MASK;
3286 1.1 dyoung hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
3287 1.1 dyoung vtag = le16toh(cur->wb.upper.vlan);
3288 1.1 dyoung eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
3289 1.1 dyoung
3290 1.1 dyoung /* Make sure all parts of a bad packet are discarded */
3291 1.1 dyoung if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
3292 1.1 dyoung (rxr->discard)) {
3293 1.1 dyoung ifp->if_ierrors++;
3294 1.1 dyoung rxr->rx_discarded++;
3295 1.1 dyoung if (!eop)
3296 1.1 dyoung rxr->discard = TRUE;
3297 1.1 dyoung else
3298 1.1 dyoung rxr->discard = FALSE;
3299 1.1 dyoung ixv_rx_discard(rxr, i);
3300 1.1 dyoung goto next_desc;
3301 1.1 dyoung }
3302 1.1 dyoung
3303 1.1 dyoung if (!eop) {
3304 1.1 dyoung nextp = i + 1;
3305 1.1 dyoung if (nextp == adapter->num_rx_desc)
3306 1.1 dyoung nextp = 0;
3307 1.1 dyoung nbuf = &rxr->rx_buffers[nextp];
3308 1.1 dyoung prefetch(nbuf);
3309 1.1 dyoung }
3310 1.1 dyoung /*
3311 1.1 dyoung ** The header mbuf is ONLY used when header
3312 1.1 dyoung ** split is enabled, otherwise we get normal
3313 1.1 dyoung ** behavior, ie, both header and payload
3314 1.1 dyoung ** are DMA'd into the payload buffer.
3315 1.1 dyoung **
3316 1.1 dyoung ** Rather than using the fmp/lmp global pointers
3317 1.1 dyoung ** we now keep the head of a packet chain in the
3318 1.1 dyoung ** buffer struct and pass this along from one
3319 1.1 dyoung ** descriptor to the next, until we get EOP.
3320 1.1 dyoung */
3321 1.1 dyoung if (rxr->hdr_split && (rbuf->fmp == NULL)) {
3322 1.1 dyoung /* This must be an initial descriptor */
3323 1.1 dyoung hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3324 1.1 dyoung IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3325 1.1 dyoung if (hlen > IXV_RX_HDR)
3326 1.1 dyoung hlen = IXV_RX_HDR;
3327 1.1 dyoung mh->m_len = hlen;
3328 1.1 dyoung mh->m_flags |= M_PKTHDR;
3329 1.1 dyoung mh->m_next = NULL;
3330 1.1 dyoung mh->m_pkthdr.len = mh->m_len;
3331 1.1 dyoung /* Null buf pointer so it is refreshed */
3332 1.1 dyoung rbuf->m_head = NULL;
3333 1.1 dyoung /*
3334 1.1 dyoung ** Check the payload length, this
3335 1.1 dyoung ** could be zero if its a small
3336 1.1 dyoung ** packet.
3337 1.1 dyoung */
3338 1.1 dyoung if (plen > 0) {
3339 1.1 dyoung mp->m_len = plen;
3340 1.1 dyoung mp->m_next = NULL;
3341 1.1 dyoung mp->m_flags &= ~M_PKTHDR;
3342 1.1 dyoung mh->m_next = mp;
3343 1.1 dyoung mh->m_pkthdr.len += mp->m_len;
3344 1.1 dyoung /* Null buf pointer so it is refreshed */
3345 1.1 dyoung rbuf->m_pack = NULL;
3346 1.1 dyoung rxr->rx_split_packets++;
3347 1.1 dyoung }
3348 1.1 dyoung /*
3349 1.1 dyoung ** Now create the forward
3350 1.1 dyoung ** chain so when complete
3351 1.1 dyoung ** we wont have to.
3352 1.1 dyoung */
3353 1.1 dyoung if (eop == 0) {
3354 1.1 dyoung /* stash the chain head */
3355 1.1 dyoung nbuf->fmp = mh;
3356 1.1 dyoung /* Make forward chain */
3357 1.1 dyoung if (plen)
3358 1.1 dyoung mp->m_next = nbuf->m_pack;
3359 1.1 dyoung else
3360 1.1 dyoung mh->m_next = nbuf->m_pack;
3361 1.1 dyoung } else {
3362 1.1 dyoung /* Singlet, prepare to send */
3363 1.1 dyoung sendmp = mh;
3364 1.1 dyoung if (staterr & IXGBE_RXD_STAT_VP) {
3365 1.1 dyoung sendmp->m_pkthdr.ether_vtag = vtag;
3366 1.1 dyoung sendmp->m_flags |= M_VLANTAG;
3367 1.1 dyoung }
3368 1.1 dyoung }
3369 1.1 dyoung } else {
3370 1.1 dyoung /*
3371 1.1 dyoung ** Either no header split, or a
3372 1.1 dyoung ** secondary piece of a fragmented
3373 1.1 dyoung ** split packet.
3374 1.1 dyoung */
3375 1.1 dyoung mp->m_len = plen;
3376 1.1 dyoung /*
3377 1.1 dyoung ** See if there is a stored head
3378 1.1 dyoung ** that determines what we are
3379 1.1 dyoung */
3380 1.1 dyoung sendmp = rbuf->fmp;
3381 1.1 dyoung rbuf->m_pack = rbuf->fmp = NULL;
3382 1.1 dyoung
3383 1.1 dyoung if (sendmp != NULL) /* secondary frag */
3384 1.1 dyoung sendmp->m_pkthdr.len += mp->m_len;
3385 1.1 dyoung else {
3386 1.1 dyoung /* first desc of a non-ps chain */
3387 1.1 dyoung sendmp = mp;
3388 1.1 dyoung sendmp->m_flags |= M_PKTHDR;
3389 1.1 dyoung sendmp->m_pkthdr.len = mp->m_len;
3390 1.1 dyoung if (staterr & IXGBE_RXD_STAT_VP) {
3391 1.1 dyoung sendmp->m_pkthdr.ether_vtag = vtag;
3392 1.1 dyoung sendmp->m_flags |= M_VLANTAG;
3393 1.1 dyoung }
3394 1.1 dyoung }
3395 1.1 dyoung /* Pass the head pointer on */
3396 1.1 dyoung if (eop == 0) {
3397 1.1 dyoung nbuf->fmp = sendmp;
3398 1.1 dyoung sendmp = NULL;
3399 1.1 dyoung mp->m_next = nbuf->m_pack;
3400 1.1 dyoung }
3401 1.1 dyoung }
3402 1.1 dyoung ++processed;
3403 1.1 dyoung /* Sending this frame? */
3404 1.1 dyoung if (eop) {
3405 1.1 dyoung sendmp->m_pkthdr.rcvif = ifp;
3406 1.1 dyoung ifp->if_ipackets++;
3407 1.1 dyoung rxr->rx_packets++;
3408 1.1 dyoung /* capture data for AIM */
3409 1.1 dyoung rxr->bytes += sendmp->m_pkthdr.len;
3410 1.1 dyoung rxr->rx_bytes += sendmp->m_pkthdr.len;
3411 1.1 dyoung if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3412 1.1 dyoung ixv_rx_checksum(staterr, sendmp, ptype);
3413 1.1 dyoung #if __FreeBSD_version >= 800000
3414 1.1 dyoung sendmp->m_pkthdr.flowid = que->msix;
3415 1.1 dyoung sendmp->m_flags |= M_FLOWID;
3416 1.1 dyoung #endif
3417 1.1 dyoung }
3418 1.1 dyoung next_desc:
3419 1.1 dyoung bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3420 1.1 dyoung BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3421 1.1 dyoung
3422 1.1 dyoung /* Advance our pointers to the next descriptor. */
3423 1.1 dyoung if (++i == adapter->num_rx_desc)
3424 1.1 dyoung i = 0;
3425 1.1 dyoung
3426 1.1 dyoung /* Now send to the stack or do LRO */
3427 1.1 dyoung if (sendmp != NULL)
3428 1.1 dyoung ixv_rx_input(rxr, ifp, sendmp, ptype);
3429 1.1 dyoung
3430 1.1 dyoung /* Every 8 descriptors we go to refresh mbufs */
3431 1.1 dyoung if (processed == 8) {
3432 1.1 dyoung ixv_refresh_mbufs(rxr, i);
3433 1.1 dyoung processed = 0;
3434 1.1 dyoung }
3435 1.1 dyoung }
3436 1.1 dyoung
3437 1.1 dyoung /* Refresh any remaining buf structs */
3438 1.1 dyoung if (processed != 0) {
3439 1.1 dyoung ixv_refresh_mbufs(rxr, i);
3440 1.1 dyoung processed = 0;
3441 1.1 dyoung }
3442 1.1 dyoung
3443 1.1 dyoung rxr->next_to_check = i;
3444 1.1 dyoung
3445 1.1 dyoung /*
3446 1.1 dyoung * Flush any outstanding LRO work
3447 1.1 dyoung */
3448 1.1 dyoung while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
3449 1.1 dyoung SLIST_REMOVE_HEAD(&lro->lro_active, next);
3450 1.1 dyoung tcp_lro_flush(lro, queued);
3451 1.1 dyoung }
3452 1.1 dyoung
3453 1.1 dyoung IXV_RX_UNLOCK(rxr);
3454 1.1 dyoung
3455 1.1 dyoung /*
3456 1.1 dyoung ** We still have cleaning to do?
3457 1.1 dyoung ** Schedule another interrupt if so.
3458 1.1 dyoung */
3459 1.1 dyoung if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
3460 1.1 dyoung ixv_rearm_queues(adapter, (u64)(1 << que->msix));
3461 1.1 dyoung return (TRUE);
3462 1.1 dyoung }
3463 1.1 dyoung
3464 1.1 dyoung return (FALSE);
3465 1.1 dyoung }
3466 1.1 dyoung
3467 1.1 dyoung
3468 1.1 dyoung /*********************************************************************
3469 1.1 dyoung *
3470 1.1 dyoung * Verify that the hardware indicated that the checksum is valid.
3471 1.1 dyoung * Inform the stack about the status of checksum so that stack
3472 1.1 dyoung * doesn't spend time verifying the checksum.
3473 1.1 dyoung *
3474 1.1 dyoung *********************************************************************/
3475 1.1 dyoung static void
3476 1.1 dyoung ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
3477 1.1 dyoung {
3478 1.1 dyoung u16 status = (u16) staterr;
3479 1.1 dyoung u8 errors = (u8) (staterr >> 24);
3480 1.1 dyoung bool sctp = FALSE;
3481 1.1 dyoung
3482 1.1 dyoung if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3483 1.1 dyoung (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
3484 1.1 dyoung sctp = TRUE;
3485 1.1 dyoung
3486 1.1 dyoung if (status & IXGBE_RXD_STAT_IPCS) {
3487 1.1 dyoung if (!(errors & IXGBE_RXD_ERR_IPE)) {
3488 1.1 dyoung /* IP Checksum Good */
3489 1.1 dyoung mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3490 1.1 dyoung mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3491 1.1 dyoung
3492 1.1 dyoung } else
3493 1.1 dyoung mp->m_pkthdr.csum_flags = 0;
3494 1.1 dyoung }
3495 1.1 dyoung if (status & IXGBE_RXD_STAT_L4CS) {
3496 1.1 dyoung u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3497 1.1 dyoung #if __FreeBSD_version >= 800000
3498 1.1 dyoung if (sctp)
3499 1.1 dyoung type = CSUM_SCTP_VALID;
3500 1.1 dyoung #endif
3501 1.1 dyoung if (!(errors & IXGBE_RXD_ERR_TCPE)) {
3502 1.1 dyoung mp->m_pkthdr.csum_flags |= type;
3503 1.1 dyoung if (!sctp)
3504 1.1 dyoung mp->m_pkthdr.csum_data = htons(0xffff);
3505 1.1 dyoung }
3506 1.1 dyoung }
3507 1.1 dyoung return;
3508 1.1 dyoung }
3509 1.1 dyoung
3510 1.1 dyoung static void
3511 1.1 dyoung ixv_setup_vlan_support(struct adapter *adapter)
3512 1.1 dyoung {
3513 1.1 dyoung struct ixgbe_hw *hw = &adapter->hw;
3514 1.1 dyoung u32 ctrl, vid, vfta, retry;
3515 1.1 dyoung
3516 1.1 dyoung
3517 1.1 dyoung /*
3518 1.1 dyoung ** We get here thru init_locked, meaning
3519 1.1 dyoung ** a soft reset, this has already cleared
3520 1.1 dyoung ** the VFTA and other state, so if there
3521 1.1 dyoung ** have been no vlan's registered do nothing.
3522 1.1 dyoung */
3523 1.1 dyoung if (adapter->num_vlans == 0)
3524 1.1 dyoung return;
3525 1.1 dyoung
3526 1.1 dyoung /* Enable the queues */
3527 1.1 dyoung for (int i = 0; i < adapter->num_queues; i++) {
3528 1.1 dyoung ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3529 1.1 dyoung ctrl |= IXGBE_RXDCTL_VME;
3530 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
3531 1.1 dyoung }
3532 1.1 dyoung
3533 1.1 dyoung /*
3534 1.1 dyoung ** A soft reset zero's out the VFTA, so
3535 1.1 dyoung ** we need to repopulate it now.
3536 1.1 dyoung */
3537 1.1 dyoung for (int i = 0; i < VFTA_SIZE; i++) {
3538 1.1 dyoung if (ixv_shadow_vfta[i] == 0)
3539 1.1 dyoung continue;
3540 1.1 dyoung vfta = ixv_shadow_vfta[i];
3541 1.1 dyoung /*
3542 1.1 dyoung ** Reconstruct the vlan id's
3543 1.1 dyoung ** based on the bits set in each
3544 1.1 dyoung ** of the array ints.
3545 1.1 dyoung */
3546 1.1 dyoung for ( int j = 0; j < 32; j++) {
3547 1.1 dyoung retry = 0;
3548 1.1 dyoung if ((vfta & (1 << j)) == 0)
3549 1.1 dyoung continue;
3550 1.1 dyoung vid = (i * 32) + j;
3551 1.1 dyoung /* Call the shared code mailbox routine */
3552 1.1 dyoung while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
3553 1.1 dyoung if (++retry > 5)
3554 1.1 dyoung break;
3555 1.1 dyoung }
3556 1.1 dyoung }
3557 1.1 dyoung }
3558 1.1 dyoung }
3559 1.1 dyoung
3560 1.1 dyoung /*
3561 1.1 dyoung ** This routine is run via an vlan config EVENT,
3562 1.1 dyoung ** it enables us to use the HW Filter table since
3563 1.1 dyoung ** we can get the vlan id. This just creates the
3564 1.1 dyoung ** entry in the soft version of the VFTA, init will
3565 1.1 dyoung ** repopulate the real table.
3566 1.1 dyoung */
3567 1.1 dyoung static void
3568 1.1 dyoung ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3569 1.1 dyoung {
3570 1.1 dyoung struct adapter *adapter = ifp->if_softc;
3571 1.1 dyoung u16 index, bit;
3572 1.1 dyoung
3573 1.1 dyoung if (ifp->if_softc != arg) /* Not our event */
3574 1.1 dyoung return;
3575 1.1 dyoung
3576 1.1 dyoung if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3577 1.1 dyoung return;
3578 1.1 dyoung
3579 1.1 dyoung index = (vtag >> 5) & 0x7F;
3580 1.1 dyoung bit = vtag & 0x1F;
3581 1.1 dyoung ixv_shadow_vfta[index] |= (1 << bit);
3582 1.1 dyoung ++adapter->num_vlans;
3583 1.1 dyoung /* Re-init to load the changes */
3584 1.1 dyoung ixv_init(adapter);
3585 1.1 dyoung }
3586 1.1 dyoung
3587 1.1 dyoung /*
3588 1.1 dyoung ** This routine is run via an vlan
3589 1.1 dyoung ** unconfig EVENT, remove our entry
3590 1.1 dyoung ** in the soft vfta.
3591 1.1 dyoung */
3592 1.1 dyoung static void
3593 1.1 dyoung ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3594 1.1 dyoung {
3595 1.1 dyoung struct adapter *adapter = ifp->if_softc;
3596 1.1 dyoung u16 index, bit;
3597 1.1 dyoung
3598 1.1 dyoung if (ifp->if_softc != arg)
3599 1.1 dyoung return;
3600 1.1 dyoung
3601 1.1 dyoung if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3602 1.1 dyoung return;
3603 1.1 dyoung
3604 1.1 dyoung index = (vtag >> 5) & 0x7F;
3605 1.1 dyoung bit = vtag & 0x1F;
3606 1.1 dyoung ixv_shadow_vfta[index] &= ~(1 << bit);
3607 1.1 dyoung --adapter->num_vlans;
3608 1.1 dyoung /* Re-init to load the changes */
3609 1.1 dyoung ixv_init(adapter);
3610 1.1 dyoung }
3611 1.1 dyoung
3612 1.1 dyoung static void
3613 1.1 dyoung ixv_enable_intr(struct adapter *adapter)
3614 1.1 dyoung {
3615 1.1 dyoung struct ixgbe_hw *hw = &adapter->hw;
3616 1.1 dyoung struct ix_queue *que = adapter->queues;
3617 1.1 dyoung u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3618 1.1 dyoung
3619 1.1 dyoung
3620 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
3621 1.1 dyoung
3622 1.1 dyoung mask = IXGBE_EIMS_ENABLE_MASK;
3623 1.1 dyoung mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3624 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
3625 1.1 dyoung
3626 1.1 dyoung for (int i = 0; i < adapter->num_queues; i++, que++)
3627 1.1 dyoung ixv_enable_queue(adapter, que->msix);
3628 1.1 dyoung
3629 1.1 dyoung IXGBE_WRITE_FLUSH(hw);
3630 1.1 dyoung
3631 1.1 dyoung return;
3632 1.1 dyoung }
3633 1.1 dyoung
3634 1.1 dyoung static void
3635 1.1 dyoung ixv_disable_intr(struct adapter *adapter)
3636 1.1 dyoung {
3637 1.1 dyoung IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
3638 1.1 dyoung IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
3639 1.1 dyoung IXGBE_WRITE_FLUSH(&adapter->hw);
3640 1.1 dyoung return;
3641 1.1 dyoung }
3642 1.1 dyoung
3643 1.1 dyoung /*
3644 1.1 dyoung ** Setup the correct IVAR register for a particular MSIX interrupt
3645 1.1 dyoung ** - entry is the register array entry
3646 1.1 dyoung ** - vector is the MSIX vector for this queue
3647 1.1 dyoung ** - type is RX/TX/MISC
3648 1.1 dyoung */
3649 1.1 dyoung static void
3650 1.1 dyoung ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3651 1.1 dyoung {
3652 1.1 dyoung struct ixgbe_hw *hw = &adapter->hw;
3653 1.1 dyoung u32 ivar, index;
3654 1.1 dyoung
3655 1.1 dyoung vector |= IXGBE_IVAR_ALLOC_VAL;
3656 1.1 dyoung
3657 1.1 dyoung if (type == -1) { /* MISC IVAR */
3658 1.1 dyoung ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
3659 1.1 dyoung ivar &= ~0xFF;
3660 1.1 dyoung ivar |= vector;
3661 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
3662 1.1 dyoung } else { /* RX/TX IVARS */
3663 1.1 dyoung index = (16 * (entry & 1)) + (8 * type);
3664 1.1 dyoung ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
3665 1.1 dyoung ivar &= ~(0xFF << index);
3666 1.1 dyoung ivar |= (vector << index);
3667 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
3668 1.1 dyoung }
3669 1.1 dyoung }
3670 1.1 dyoung
3671 1.1 dyoung static void
3672 1.1 dyoung ixv_configure_ivars(struct adapter *adapter)
3673 1.1 dyoung {
3674 1.1 dyoung struct ix_queue *que = adapter->queues;
3675 1.1 dyoung
3676 1.1 dyoung for (int i = 0; i < adapter->num_queues; i++, que++) {
3677 1.1 dyoung /* First the RX queue entry */
3678 1.1 dyoung ixv_set_ivar(adapter, i, que->msix, 0);
3679 1.1 dyoung /* ... and the TX */
3680 1.1 dyoung ixv_set_ivar(adapter, i, que->msix, 1);
3681 1.1 dyoung /* Set an initial value in EITR */
3682 1.1 dyoung IXGBE_WRITE_REG(&adapter->hw,
3683 1.1 dyoung IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
3684 1.1 dyoung }
3685 1.1 dyoung
3686 1.1 dyoung /* For the Link interrupt */
3687 1.1 dyoung ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
3688 1.1 dyoung }
3689 1.1 dyoung
3690 1.1 dyoung
3691 1.1 dyoung /*
3692 1.1 dyoung ** Tasklet handler for MSIX MBX interrupts
3693 1.1 dyoung ** - do outside interrupt since it might sleep
3694 1.1 dyoung */
3695 1.1 dyoung static void
3696 1.1 dyoung ixv_handle_mbx(void *context)
3697 1.1 dyoung {
3698 1.1 dyoung struct adapter *adapter = context;
3699 1.1 dyoung
3700 1.1 dyoung ixgbe_check_link(&adapter->hw,
3701 1.1 dyoung &adapter->link_speed, &adapter->link_up, 0);
3702 1.1 dyoung ixv_update_link_status(adapter);
3703 1.1 dyoung }
3704 1.1 dyoung
3705 1.1 dyoung /*
3706 1.1 dyoung ** The VF stats registers never have a truely virgin
3707 1.1 dyoung ** starting point, so this routine tries to make an
3708 1.1 dyoung ** artificial one, marking ground zero on attach as
3709 1.1 dyoung ** it were.
3710 1.1 dyoung */
3711 1.1 dyoung static void
3712 1.1 dyoung ixv_save_stats(struct adapter *adapter)
3713 1.1 dyoung {
3714 1.1 dyoung if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
3715 1.1 dyoung adapter->stats.saved_reset_vfgprc +=
3716 1.1 dyoung adapter->stats.vfgprc - adapter->stats.base_vfgprc;
3717 1.1 dyoung adapter->stats.saved_reset_vfgptc +=
3718 1.1 dyoung adapter->stats.vfgptc - adapter->stats.base_vfgptc;
3719 1.1 dyoung adapter->stats.saved_reset_vfgorc +=
3720 1.1 dyoung adapter->stats.vfgorc - adapter->stats.base_vfgorc;
3721 1.1 dyoung adapter->stats.saved_reset_vfgotc +=
3722 1.1 dyoung adapter->stats.vfgotc - adapter->stats.base_vfgotc;
3723 1.1 dyoung adapter->stats.saved_reset_vfmprc +=
3724 1.1 dyoung adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3725 1.1 dyoung }
3726 1.1 dyoung }
3727 1.1 dyoung
3728 1.1 dyoung static void
3729 1.1 dyoung ixv_init_stats(struct adapter *adapter)
3730 1.1 dyoung {
3731 1.1 dyoung struct ixgbe_hw *hw = &adapter->hw;
3732 1.1 dyoung
3733 1.1 dyoung adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
3734 1.1 dyoung adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
3735 1.1 dyoung adapter->stats.last_vfgorc |=
3736 1.1 dyoung (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
3737 1.1 dyoung
3738 1.1 dyoung adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
3739 1.1 dyoung adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
3740 1.1 dyoung adapter->stats.last_vfgotc |=
3741 1.1 dyoung (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
3742 1.1 dyoung
3743 1.1 dyoung adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
3744 1.1 dyoung
3745 1.1 dyoung adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
3746 1.1 dyoung adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
3747 1.1 dyoung adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
3748 1.1 dyoung adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
3749 1.1 dyoung adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
3750 1.1 dyoung }
3751 1.1 dyoung
3752 1.1 dyoung #define UPDATE_STAT_32(reg, last, count) \
3753 1.1 dyoung { \
3754 1.1 dyoung u32 current = IXGBE_READ_REG(hw, reg); \
3755 1.1 dyoung if (current < last) \
3756 1.1 dyoung count += 0x100000000LL; \
3757 1.1 dyoung last = current; \
3758 1.1 dyoung count &= 0xFFFFFFFF00000000LL; \
3759 1.1 dyoung count |= current; \
3760 1.1 dyoung }
3761 1.1 dyoung
3762 1.1 dyoung #define UPDATE_STAT_36(lsb, msb, last, count) \
3763 1.1 dyoung { \
3764 1.1 dyoung u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
3765 1.1 dyoung u64 cur_msb = IXGBE_READ_REG(hw, msb); \
3766 1.1 dyoung u64 current = ((cur_msb << 32) | cur_lsb); \
3767 1.1 dyoung if (current < last) \
3768 1.1 dyoung count += 0x1000000000LL; \
3769 1.1 dyoung last = current; \
3770 1.1 dyoung count &= 0xFFFFFFF000000000LL; \
3771 1.1 dyoung count |= current; \
3772 1.1 dyoung }
3773 1.1 dyoung
3774 1.1 dyoung /*
3775 1.1 dyoung ** ixv_update_stats - Update the board statistics counters.
3776 1.1 dyoung */
3777 1.1 dyoung void
3778 1.1 dyoung ixv_update_stats(struct adapter *adapter)
3779 1.1 dyoung {
3780 1.1 dyoung struct ixgbe_hw *hw = &adapter->hw;
3781 1.1 dyoung
3782 1.1 dyoung UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3783 1.1 dyoung adapter->stats.vfgprc);
3784 1.1 dyoung UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3785 1.1 dyoung adapter->stats.vfgptc);
3786 1.1 dyoung UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3787 1.1 dyoung adapter->stats.last_vfgorc, adapter->stats.vfgorc);
3788 1.1 dyoung UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3789 1.1 dyoung adapter->stats.last_vfgotc, adapter->stats.vfgotc);
3790 1.1 dyoung UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3791 1.1 dyoung adapter->stats.vfmprc);
3792 1.1 dyoung }
3793 1.1 dyoung
3794 1.1 dyoung /**********************************************************************
3795 1.1 dyoung *
3796 1.1 dyoung * This routine is called only when ixgbe_display_debug_stats is enabled.
3797 1.1 dyoung * This routine provides a way to take a look at important statistics
3798 1.1 dyoung * maintained by the driver and hardware.
3799 1.1 dyoung *
3800 1.1 dyoung **********************************************************************/
3801 1.1 dyoung static void
3802 1.1 dyoung ixv_print_hw_stats(struct adapter * adapter)
3803 1.1 dyoung {
3804 1.1 dyoung device_t dev = adapter->dev;
3805 1.1 dyoung
3806 1.1 dyoung device_printf(dev,"Std Mbuf Failed = %lu\n",
3807 1.1 dyoung adapter->mbuf_defrag_failed);
3808 1.1 dyoung device_printf(dev,"Driver dropped packets = %lu\n",
3809 1.1 dyoung adapter->dropped_pkts);
3810 1.1 dyoung device_printf(dev, "watchdog timeouts = %ld\n",
3811 1.1 dyoung adapter->watchdog_events);
3812 1.1 dyoung
3813 1.1 dyoung device_printf(dev,"Good Packets Rcvd = %llu\n",
3814 1.1 dyoung (long long)adapter->stats.vfgprc);
3815 1.1 dyoung device_printf(dev,"Good Packets Xmtd = %llu\n",
3816 1.1 dyoung (long long)adapter->stats.vfgptc);
3817 1.1 dyoung device_printf(dev,"TSO Transmissions = %lu\n",
3818 1.1 dyoung adapter->tso_tx);
3819 1.1 dyoung
3820 1.1 dyoung }
3821 1.1 dyoung
3822 1.1 dyoung /**********************************************************************
3823 1.1 dyoung *
3824 1.1 dyoung * This routine is called only when em_display_debug_stats is enabled.
3825 1.1 dyoung * This routine provides a way to take a look at important statistics
3826 1.1 dyoung * maintained by the driver and hardware.
3827 1.1 dyoung *
3828 1.1 dyoung **********************************************************************/
3829 1.1 dyoung static void
3830 1.1 dyoung ixv_print_debug_info(struct adapter *adapter)
3831 1.1 dyoung {
3832 1.1 dyoung device_t dev = adapter->dev;
3833 1.1 dyoung struct ixgbe_hw *hw = &adapter->hw;
3834 1.1 dyoung struct ix_queue *que = adapter->queues;
3835 1.1 dyoung struct rx_ring *rxr;
3836 1.1 dyoung struct tx_ring *txr;
3837 1.1 dyoung struct lro_ctrl *lro;
3838 1.1 dyoung
3839 1.1 dyoung device_printf(dev,"Error Byte Count = %u \n",
3840 1.1 dyoung IXGBE_READ_REG(hw, IXGBE_ERRBC));
3841 1.1 dyoung
3842 1.1 dyoung for (int i = 0; i < adapter->num_queues; i++, que++) {
3843 1.1 dyoung txr = que->txr;
3844 1.1 dyoung rxr = que->rxr;
3845 1.1 dyoung lro = &rxr->lro;
3846 1.1 dyoung device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
3847 1.1 dyoung que->msix, (long)que->irqs);
3848 1.1 dyoung device_printf(dev,"RX(%d) Packets Received: %lld\n",
3849 1.1 dyoung rxr->me, (long long)rxr->rx_packets);
3850 1.1 dyoung device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
3851 1.1 dyoung rxr->me, (long long)rxr->rx_split_packets);
3852 1.1 dyoung device_printf(dev,"RX(%d) Bytes Received: %lu\n",
3853 1.1 dyoung rxr->me, (long)rxr->rx_bytes);
3854 1.1 dyoung device_printf(dev,"RX(%d) LRO Queued= %d\n",
3855 1.1 dyoung rxr->me, lro->lro_queued);
3856 1.1 dyoung device_printf(dev,"RX(%d) LRO Flushed= %d\n",
3857 1.1 dyoung rxr->me, lro->lro_flushed);
3858 1.1 dyoung device_printf(dev,"TX(%d) Packets Sent: %lu\n",
3859 1.1 dyoung txr->me, (long)txr->total_packets);
3860 1.1 dyoung device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
3861 1.1 dyoung txr->me, (long)txr->no_desc_avail);
3862 1.1 dyoung }
3863 1.1 dyoung
3864 1.1 dyoung device_printf(dev,"MBX IRQ Handled: %lu\n",
3865 1.1 dyoung (long)adapter->mbx_irq);
3866 1.1 dyoung return;
3867 1.1 dyoung }
3868 1.1 dyoung
3869 1.1 dyoung static int
3870 1.1 dyoung ixv_sysctl_stats(SYSCTL_HANDLER_ARGS)
3871 1.1 dyoung {
3872 1.1 dyoung int error;
3873 1.1 dyoung int result;
3874 1.1 dyoung struct adapter *adapter;
3875 1.1 dyoung
3876 1.1 dyoung result = -1;
3877 1.1 dyoung error = sysctl_handle_int(oidp, &result, 0, req);
3878 1.1 dyoung
3879 1.1 dyoung if (error || !req->newptr)
3880 1.1 dyoung return (error);
3881 1.1 dyoung
3882 1.1 dyoung if (result == 1) {
3883 1.1 dyoung adapter = (struct adapter *) arg1;
3884 1.1 dyoung ixv_print_hw_stats(adapter);
3885 1.1 dyoung }
3886 1.1 dyoung return error;
3887 1.1 dyoung }
3888 1.1 dyoung
3889 1.1 dyoung static int
3890 1.1 dyoung ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
3891 1.1 dyoung {
3892 1.1 dyoung int error, result;
3893 1.1 dyoung struct adapter *adapter;
3894 1.1 dyoung
3895 1.1 dyoung result = -1;
3896 1.1 dyoung error = sysctl_handle_int(oidp, &result, 0, req);
3897 1.1 dyoung
3898 1.1 dyoung if (error || !req->newptr)
3899 1.1 dyoung return (error);
3900 1.1 dyoung
3901 1.1 dyoung if (result == 1) {
3902 1.1 dyoung adapter = (struct adapter *) arg1;
3903 1.1 dyoung ixv_print_debug_info(adapter);
3904 1.1 dyoung }
3905 1.1 dyoung return error;
3906 1.1 dyoung }
3907 1.1 dyoung
3908 1.1 dyoung /*
3909 1.1 dyoung ** Set flow control using sysctl:
3910 1.1 dyoung ** Flow control values:
3911 1.1 dyoung ** 0 - off
3912 1.1 dyoung ** 1 - rx pause
3913 1.1 dyoung ** 2 - tx pause
3914 1.1 dyoung ** 3 - full
3915 1.1 dyoung */
3916 1.1 dyoung static int
3917 1.1 dyoung ixv_set_flowcntl(SYSCTL_HANDLER_ARGS)
3918 1.1 dyoung {
3919 1.1 dyoung int error;
3920 1.1 dyoung struct adapter *adapter;
3921 1.1 dyoung
3922 1.1 dyoung error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req);
3923 1.1 dyoung
3924 1.1 dyoung if (error)
3925 1.1 dyoung return (error);
3926 1.1 dyoung
3927 1.1 dyoung adapter = (struct adapter *) arg1;
3928 1.1 dyoung switch (ixv_flow_control) {
3929 1.1 dyoung case ixgbe_fc_rx_pause:
3930 1.1 dyoung case ixgbe_fc_tx_pause:
3931 1.1 dyoung case ixgbe_fc_full:
3932 1.1 dyoung adapter->hw.fc.requested_mode = ixv_flow_control;
3933 1.1 dyoung break;
3934 1.1 dyoung case ixgbe_fc_none:
3935 1.1 dyoung default:
3936 1.1 dyoung adapter->hw.fc.requested_mode = ixgbe_fc_none;
3937 1.1 dyoung }
3938 1.1 dyoung
3939 1.1 dyoung ixgbe_fc_enable(&adapter->hw, 0);
3940 1.1 dyoung return error;
3941 1.1 dyoung }
3942 1.1 dyoung
3943 1.1 dyoung static void
3944 1.1 dyoung ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
3945 1.1 dyoung const char *description, int *limit, int value)
3946 1.1 dyoung {
3947 1.1 dyoung *limit = value;
3948 1.1 dyoung SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
3949 1.1 dyoung SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3950 1.1 dyoung OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
3951 1.1 dyoung }
3952 1.1 dyoung
3953