ixv.c revision 1.198 1 1.198 msaitoh /* $NetBSD: ixv.c,v 1.198 2024/07/10 03:26:30 msaitoh Exp $ */
2 1.58 msaitoh
3 1.1 dyoung /******************************************************************************
4 1.1 dyoung
5 1.58 msaitoh Copyright (c) 2001-2017, Intel Corporation
6 1.1 dyoung All rights reserved.
7 1.58 msaitoh
8 1.58 msaitoh Redistribution and use in source and binary forms, with or without
9 1.1 dyoung modification, are permitted provided that the following conditions are met:
10 1.58 msaitoh
11 1.58 msaitoh 1. Redistributions of source code must retain the above copyright notice,
12 1.1 dyoung this list of conditions and the following disclaimer.
13 1.58 msaitoh
14 1.58 msaitoh 2. Redistributions in binary form must reproduce the above copyright
15 1.58 msaitoh notice, this list of conditions and the following disclaimer in the
16 1.1 dyoung documentation and/or other materials provided with the distribution.
17 1.58 msaitoh
18 1.58 msaitoh 3. Neither the name of the Intel Corporation nor the names of its
19 1.58 msaitoh contributors may be used to endorse or promote products derived from
20 1.1 dyoung this software without specific prior written permission.
21 1.58 msaitoh
22 1.1 dyoung THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 1.58 msaitoh AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 1.58 msaitoh IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 1.58 msaitoh ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 1.58 msaitoh LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 1.58 msaitoh CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 1.58 msaitoh SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 1.58 msaitoh INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 1.58 msaitoh CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 1.1 dyoung ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 1.1 dyoung POSSIBILITY OF SUCH DAMAGE.
33 1.1 dyoung
34 1.1 dyoung ******************************************************************************/
35 1.92 msaitoh /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/
36 1.1 dyoung
37 1.159 msaitoh #include <sys/cdefs.h>
38 1.198 msaitoh __KERNEL_RCSID(0, "$NetBSD: ixv.c,v 1.198 2024/07/10 03:26:30 msaitoh Exp $");
39 1.159 msaitoh
40 1.55 msaitoh #ifdef _KERNEL_OPT
41 1.1 dyoung #include "opt_inet.h"
42 1.4 msaitoh #include "opt_inet6.h"
43 1.55 msaitoh #endif
44 1.1 dyoung
45 1.21 msaitoh #include "ixgbe.h"
46 1.1 dyoung
47 1.58 msaitoh /************************************************************************
48 1.58 msaitoh * Driver version
49 1.58 msaitoh ************************************************************************/
50 1.103 maxv static const char ixv_driver_version[] = "2.0.1-k";
51 1.117 msaitoh /* XXX NetBSD: + 1.5.17 */
52 1.58 msaitoh
53 1.58 msaitoh /************************************************************************
54 1.58 msaitoh * PCI Device ID Table
55 1.58 msaitoh *
56 1.58 msaitoh * Used by probe to select devices to load on
57 1.58 msaitoh * Last field stores an index into ixv_strings
58 1.58 msaitoh * Last entry must be all 0s
59 1.1 dyoung *
60 1.58 msaitoh * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61 1.58 msaitoh ************************************************************************/
62 1.103 maxv static const ixgbe_vendor_info_t ixv_vendor_info_array[] =
63 1.1 dyoung {
64 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
65 1.5 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
66 1.21 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
67 1.21 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
68 1.58 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
69 1.1 dyoung /* required last entry */
70 1.1 dyoung {0, 0, 0, 0, 0}
71 1.1 dyoung };
72 1.1 dyoung
73 1.58 msaitoh /************************************************************************
74 1.58 msaitoh * Table of branding strings
75 1.58 msaitoh ************************************************************************/
76 1.58 msaitoh static const char *ixv_strings[] = {
77 1.1 dyoung "Intel(R) PRO/10GbE Virtual Function Network Driver"
78 1.1 dyoung };
79 1.1 dyoung
80 1.1 dyoung /*********************************************************************
81 1.1 dyoung * Function prototypes
82 1.1 dyoung *********************************************************************/
83 1.114 msaitoh static int ixv_probe(device_t, cfdata_t, void *);
84 1.22 msaitoh static void ixv_attach(device_t, device_t, void *);
85 1.114 msaitoh static int ixv_detach(device_t, int);
86 1.3 msaitoh #if 0
87 1.114 msaitoh static int ixv_shutdown(device_t);
88 1.3 msaitoh #endif
89 1.57 msaitoh static int ixv_ifflags_cb(struct ethercom *);
90 1.114 msaitoh static int ixv_ioctl(struct ifnet *, u_long, void *);
91 1.3 msaitoh static int ixv_init(struct ifnet *);
92 1.186 msaitoh static void ixv_init_locked(struct ixgbe_softc *);
93 1.56 msaitoh static void ixv_ifstop(struct ifnet *, int);
94 1.153 msaitoh static void ixv_stop_locked(void *);
95 1.186 msaitoh static void ixv_init_device_features(struct ixgbe_softc *);
96 1.114 msaitoh static void ixv_media_status(struct ifnet *, struct ifmediareq *);
97 1.114 msaitoh static int ixv_media_change(struct ifnet *);
98 1.186 msaitoh static int ixv_allocate_pci_resources(struct ixgbe_softc *,
99 1.3 msaitoh const struct pci_attach_args *);
100 1.186 msaitoh static void ixv_free_deferred_handlers(struct ixgbe_softc *);
101 1.186 msaitoh static int ixv_allocate_msix(struct ixgbe_softc *,
102 1.11 msaitoh const struct pci_attach_args *);
103 1.186 msaitoh static int ixv_configure_interrupts(struct ixgbe_softc *);
104 1.186 msaitoh static void ixv_free_pci_resources(struct ixgbe_softc *);
105 1.114 msaitoh static void ixv_local_timer(void *);
106 1.151 msaitoh static void ixv_handle_timer(struct work *, void *);
107 1.186 msaitoh static int ixv_setup_interface(device_t, struct ixgbe_softc *);
108 1.186 msaitoh static void ixv_schedule_admin_tasklet(struct ixgbe_softc *);
109 1.186 msaitoh static int ixv_negotiate_api(struct ixgbe_softc *);
110 1.186 msaitoh
111 1.186 msaitoh static void ixv_initialize_transmit_units(struct ixgbe_softc *);
112 1.186 msaitoh static void ixv_initialize_receive_units(struct ixgbe_softc *);
113 1.186 msaitoh static void ixv_initialize_rss_mapping(struct ixgbe_softc *);
114 1.186 msaitoh static s32 ixv_check_link(struct ixgbe_softc *);
115 1.186 msaitoh
116 1.186 msaitoh static void ixv_enable_intr(struct ixgbe_softc *);
117 1.186 msaitoh static void ixv_disable_intr(struct ixgbe_softc *);
118 1.186 msaitoh static int ixv_set_rxfilter(struct ixgbe_softc *);
119 1.186 msaitoh static void ixv_update_link_status(struct ixgbe_softc *);
120 1.3 msaitoh static int ixv_sysctl_debug(SYSCTLFN_PROTO);
121 1.186 msaitoh static void ixv_set_ivar(struct ixgbe_softc *, u8, u8, s8);
122 1.186 msaitoh static void ixv_configure_ivars(struct ixgbe_softc *);
123 1.1 dyoung static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
124 1.186 msaitoh static void ixv_eitr_write(struct ixgbe_softc *, uint32_t, uint32_t);
125 1.1 dyoung
126 1.186 msaitoh static void ixv_setup_vlan_tagging(struct ixgbe_softc *);
127 1.186 msaitoh static int ixv_setup_vlan_support(struct ixgbe_softc *);
128 1.120 msaitoh static int ixv_vlan_cb(struct ethercom *, uint16_t, bool);
129 1.186 msaitoh static int ixv_register_vlan(struct ixgbe_softc *, u16);
130 1.186 msaitoh static int ixv_unregister_vlan(struct ixgbe_softc *, u16);
131 1.1 dyoung
132 1.186 msaitoh static void ixv_add_device_sysctls(struct ixgbe_softc *);
133 1.186 msaitoh static void ixv_init_stats(struct ixgbe_softc *);
134 1.186 msaitoh static void ixv_update_stats(struct ixgbe_softc *);
135 1.186 msaitoh static void ixv_add_stats_sysctls(struct ixgbe_softc *);
136 1.186 msaitoh static void ixv_clear_evcnt(struct ixgbe_softc *);
137 1.83 msaitoh
138 1.83 msaitoh /* Sysctl handlers */
139 1.114 msaitoh static int ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
140 1.114 msaitoh static int ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
141 1.164 msaitoh static int ixv_sysctl_next_to_refresh_handler(SYSCTLFN_PROTO);
142 1.114 msaitoh static int ixv_sysctl_rdh_handler(SYSCTLFN_PROTO);
143 1.114 msaitoh static int ixv_sysctl_rdt_handler(SYSCTLFN_PROTO);
144 1.114 msaitoh static int ixv_sysctl_tdt_handler(SYSCTLFN_PROTO);
145 1.114 msaitoh static int ixv_sysctl_tdh_handler(SYSCTLFN_PROTO);
146 1.179 msaitoh static int ixv_sysctl_tx_process_limit(SYSCTLFN_PROTO);
147 1.179 msaitoh static int ixv_sysctl_rx_process_limit(SYSCTLFN_PROTO);
148 1.163 msaitoh static int ixv_sysctl_rx_copy_len(SYSCTLFN_PROTO);
149 1.1 dyoung
150 1.58 msaitoh /* The MSI-X Interrupt handlers */
151 1.11 msaitoh static int ixv_msix_que(void *);
152 1.11 msaitoh static int ixv_msix_mbx(void *);
153 1.1 dyoung
154 1.151 msaitoh /* Event handlers running on workqueue */
155 1.3 msaitoh static void ixv_handle_que(void *);
156 1.3 msaitoh
157 1.151 msaitoh /* Deferred workqueue handlers */
158 1.151 msaitoh static void ixv_handle_admin(struct work *, void *);
159 1.84 knakahar static void ixv_handle_que_work(struct work *, void *);
160 1.84 knakahar
161 1.186 msaitoh const struct sysctlnode *ixv_sysctl_instance(struct ixgbe_softc *);
162 1.103 maxv static const ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
163 1.1 dyoung
164 1.58 msaitoh /************************************************************************
165 1.150 msaitoh * NetBSD Device Interface Entry Points
166 1.58 msaitoh ************************************************************************/
167 1.186 msaitoh CFATTACH_DECL3_NEW(ixv, sizeof(struct ixgbe_softc),
168 1.3 msaitoh ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
169 1.3 msaitoh DVF_DETACH_SHUTDOWN);
170 1.3 msaitoh
171 1.1 dyoung #if 0
172 1.1 dyoung static driver_t ixv_driver = {
173 1.186 msaitoh "ixv", ixv_methods, sizeof(struct ixgbe_softc),
174 1.1 dyoung };
175 1.1 dyoung
176 1.22 msaitoh devclass_t ixv_devclass;
177 1.22 msaitoh DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
178 1.1 dyoung MODULE_DEPEND(ixv, pci, 1, 1, 1);
179 1.1 dyoung MODULE_DEPEND(ixv, ether, 1, 1, 1);
180 1.1 dyoung #endif
181 1.1 dyoung
182 1.1 dyoung /*
183 1.58 msaitoh * TUNEABLE PARAMETERS:
184 1.58 msaitoh */
185 1.1 dyoung
186 1.58 msaitoh /* Number of Queues - do not exceed MSI-X vectors - 1 */
187 1.44 msaitoh static int ixv_num_queues = 0;
188 1.23 msaitoh #define TUNABLE_INT(__x, __y)
189 1.23 msaitoh TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
190 1.23 msaitoh
191 1.1 dyoung /*
192 1.58 msaitoh * AIM: Adaptive Interrupt Moderation
193 1.58 msaitoh * which means that the interrupt rate
194 1.58 msaitoh * is varied over time based on the
195 1.58 msaitoh * traffic for that interrupt vector
196 1.58 msaitoh */
197 1.50 msaitoh static bool ixv_enable_aim = false;
198 1.1 dyoung TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
199 1.1 dyoung
200 1.83 msaitoh static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
201 1.83 msaitoh TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate);
202 1.83 msaitoh
203 1.1 dyoung /* How many packets rxeof tries to clean at a time */
204 1.21 msaitoh static int ixv_rx_process_limit = 256;
205 1.1 dyoung TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
206 1.1 dyoung
207 1.21 msaitoh /* How many packets txeof tries to clean at a time */
208 1.21 msaitoh static int ixv_tx_process_limit = 256;
209 1.21 msaitoh TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
210 1.1 dyoung
211 1.112 msaitoh /* Which packet processing uses workqueue or softint */
212 1.84 knakahar static bool ixv_txrx_workqueue = false;
213 1.84 knakahar
214 1.1 dyoung /*
215 1.58 msaitoh * Number of TX descriptors per ring,
216 1.58 msaitoh * setting higher than RX as this seems
217 1.58 msaitoh * the better performing choice.
218 1.58 msaitoh */
219 1.187 msaitoh static int ixv_txd = DEFAULT_TXD;
220 1.1 dyoung TUNABLE_INT("hw.ixv.txd", &ixv_txd);
221 1.1 dyoung
222 1.1 dyoung /* Number of RX descriptors per ring */
223 1.187 msaitoh static int ixv_rxd = DEFAULT_RXD;
224 1.1 dyoung TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
225 1.1 dyoung
226 1.58 msaitoh /* Legacy Transmit (single queue) */
227 1.58 msaitoh static int ixv_enable_legacy_tx = 0;
228 1.58 msaitoh TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
229 1.58 msaitoh
230 1.84 knakahar #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
231 1.55 msaitoh
232 1.58 msaitoh #if 0
233 1.58 msaitoh static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
234 1.58 msaitoh static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
235 1.58 msaitoh #endif
236 1.58 msaitoh
237 1.58 msaitoh /************************************************************************
238 1.58 msaitoh * ixv_probe - Device identification routine
239 1.1 dyoung *
240 1.58 msaitoh * Determines if the driver should be loaded on
241 1.58 msaitoh * adapter based on its PCI vendor/device ID.
242 1.1 dyoung *
243 1.58 msaitoh * return BUS_PROBE_DEFAULT on success, positive on failure
244 1.58 msaitoh ************************************************************************/
245 1.1 dyoung static int
246 1.3 msaitoh ixv_probe(device_t dev, cfdata_t cf, void *aux)
247 1.3 msaitoh {
248 1.19 knakahar #ifdef __HAVE_PCI_MSI_MSIX
249 1.3 msaitoh const struct pci_attach_args *pa = aux;
250 1.3 msaitoh
251 1.3 msaitoh return (ixv_lookup(pa) != NULL) ? 1 : 0;
252 1.18 msaitoh #else
253 1.18 msaitoh return 0;
254 1.18 msaitoh #endif
255 1.58 msaitoh } /* ixv_probe */
256 1.3 msaitoh
257 1.103 maxv static const ixgbe_vendor_info_t *
258 1.3 msaitoh ixv_lookup(const struct pci_attach_args *pa)
259 1.1 dyoung {
260 1.103 maxv const ixgbe_vendor_info_t *ent;
261 1.3 msaitoh pcireg_t subid;
262 1.1 dyoung
263 1.31 msaitoh INIT_DEBUGOUT("ixv_lookup: begin");
264 1.1 dyoung
265 1.3 msaitoh if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
266 1.3 msaitoh return NULL;
267 1.1 dyoung
268 1.3 msaitoh subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
269 1.1 dyoung
270 1.3 msaitoh for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
271 1.3 msaitoh if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
272 1.3 msaitoh (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
273 1.3 msaitoh ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
274 1.1 dyoung (ent->subvendor_id == 0)) &&
275 1.3 msaitoh ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
276 1.1 dyoung (ent->subdevice_id == 0))) {
277 1.3 msaitoh return ent;
278 1.1 dyoung }
279 1.1 dyoung }
280 1.58 msaitoh
281 1.3 msaitoh return NULL;
282 1.3 msaitoh }
283 1.3 msaitoh
284 1.58 msaitoh /************************************************************************
285 1.58 msaitoh * ixv_attach - Device initialization routine
286 1.57 msaitoh *
287 1.58 msaitoh * Called when the driver is being loaded.
288 1.58 msaitoh * Identifies the type of hardware, allocates all resources
289 1.58 msaitoh * and initializes the hardware.
290 1.57 msaitoh *
291 1.58 msaitoh * return 0 on success, positive on failure
292 1.58 msaitoh ************************************************************************/
293 1.3 msaitoh static void
294 1.3 msaitoh ixv_attach(device_t parent, device_t dev, void *aux)
295 1.1 dyoung {
296 1.186 msaitoh struct ixgbe_softc *sc;
297 1.1 dyoung struct ixgbe_hw *hw;
298 1.114 msaitoh int error = 0;
299 1.58 msaitoh pcireg_t id, subid;
300 1.103 maxv const ixgbe_vendor_info_t *ent;
301 1.3 msaitoh const struct pci_attach_args *pa = aux;
302 1.60 msaitoh const char *apivstr;
303 1.66 msaitoh const char *str;
304 1.151 msaitoh char wqname[MAXCOMLEN];
305 1.63 msaitoh char buf[256];
306 1.63 msaitoh
307 1.1 dyoung INIT_DEBUGOUT("ixv_attach: begin");
308 1.1 dyoung
309 1.58 msaitoh /*
310 1.58 msaitoh * Make sure BUSMASTER is set, on a VM under
311 1.58 msaitoh * KVM it may not be and will break things.
312 1.58 msaitoh */
313 1.58 msaitoh ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
314 1.58 msaitoh
315 1.1 dyoung /* Allocate, clear, and link in our adapter structure */
316 1.186 msaitoh sc = device_private(dev);
317 1.186 msaitoh sc->hw.back = sc;
318 1.186 msaitoh sc->dev = dev;
319 1.186 msaitoh hw = &sc->hw;
320 1.26 msaitoh
321 1.186 msaitoh sc->init_locked = ixv_init_locked;
322 1.186 msaitoh sc->stop_locked = ixv_stop_locked;
323 1.26 msaitoh
324 1.186 msaitoh sc->osdep.pc = pa->pa_pc;
325 1.186 msaitoh sc->osdep.tag = pa->pa_tag;
326 1.43 msaitoh if (pci_dma64_available(pa))
327 1.186 msaitoh sc->osdep.dmat = pa->pa_dmat64;
328 1.43 msaitoh else
329 1.186 msaitoh sc->osdep.dmat = pa->pa_dmat;
330 1.186 msaitoh sc->osdep.attached = false;
331 1.1 dyoung
332 1.3 msaitoh ent = ixv_lookup(pa);
333 1.3 msaitoh
334 1.3 msaitoh KASSERT(ent != NULL);
335 1.3 msaitoh
336 1.3 msaitoh aprint_normal(": %s, Version - %s\n",
337 1.3 msaitoh ixv_strings[ent->index], ixv_driver_version);
338 1.3 msaitoh
339 1.150 msaitoh /* Core Lock Init */
340 1.186 msaitoh IXGBE_CORE_LOCK_INIT(sc, device_xname(dev));
341 1.1 dyoung
342 1.1 dyoung /* Do base PCI setup - map BAR0 */
343 1.186 msaitoh if (ixv_allocate_pci_resources(sc, pa)) {
344 1.26 msaitoh aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
345 1.1 dyoung error = ENXIO;
346 1.1 dyoung goto err_out;
347 1.1 dyoung }
348 1.1 dyoung
349 1.58 msaitoh /* SYSCTL APIs */
350 1.186 msaitoh ixv_add_device_sysctls(sc);
351 1.25 msaitoh
352 1.151 msaitoh /* Set up the timer callout and workqueue */
353 1.198 msaitoh callout_init(&sc->timer, CALLOUT_MPSAFE);
354 1.151 msaitoh snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
355 1.186 msaitoh error = workqueue_create(&sc->timer_wq, wqname,
356 1.198 msaitoh ixv_handle_timer, sc, IXGBE_WORKQUEUE_PRI, IPL_NET, WQ_MPSAFE);
357 1.151 msaitoh if (error) {
358 1.151 msaitoh aprint_error_dev(dev,
359 1.151 msaitoh "could not create timer workqueue (%d)\n", error);
360 1.151 msaitoh goto err_out;
361 1.151 msaitoh }
362 1.25 msaitoh
363 1.58 msaitoh /* Save off the information about this board */
364 1.58 msaitoh id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
365 1.58 msaitoh subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
366 1.58 msaitoh hw->vendor_id = PCI_VENDOR(id);
367 1.58 msaitoh hw->device_id = PCI_PRODUCT(id);
368 1.58 msaitoh hw->revision_id =
369 1.58 msaitoh PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
370 1.58 msaitoh hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
371 1.58 msaitoh hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
372 1.1 dyoung
373 1.58 msaitoh /* A subset of set_mac_type */
374 1.58 msaitoh switch (hw->device_id) {
375 1.58 msaitoh case IXGBE_DEV_ID_82599_VF:
376 1.58 msaitoh hw->mac.type = ixgbe_mac_82599_vf;
377 1.66 msaitoh str = "82599 VF";
378 1.58 msaitoh break;
379 1.58 msaitoh case IXGBE_DEV_ID_X540_VF:
380 1.58 msaitoh hw->mac.type = ixgbe_mac_X540_vf;
381 1.66 msaitoh str = "X540 VF";
382 1.58 msaitoh break;
383 1.58 msaitoh case IXGBE_DEV_ID_X550_VF:
384 1.58 msaitoh hw->mac.type = ixgbe_mac_X550_vf;
385 1.66 msaitoh str = "X550 VF";
386 1.58 msaitoh break;
387 1.58 msaitoh case IXGBE_DEV_ID_X550EM_X_VF:
388 1.58 msaitoh hw->mac.type = ixgbe_mac_X550EM_x_vf;
389 1.66 msaitoh str = "X550EM X VF";
390 1.58 msaitoh break;
391 1.58 msaitoh case IXGBE_DEV_ID_X550EM_A_VF:
392 1.58 msaitoh hw->mac.type = ixgbe_mac_X550EM_a_vf;
393 1.66 msaitoh str = "X550EM A VF";
394 1.58 msaitoh break;
395 1.58 msaitoh default:
396 1.58 msaitoh /* Shouldn't get here since probe succeeded */
397 1.58 msaitoh aprint_error_dev(dev, "Unknown device ID!\n");
398 1.58 msaitoh error = ENXIO;
399 1.1 dyoung goto err_out;
400 1.58 msaitoh break;
401 1.1 dyoung }
402 1.66 msaitoh aprint_normal_dev(dev, "device %s\n", str);
403 1.1 dyoung
404 1.186 msaitoh ixv_init_device_features(sc);
405 1.58 msaitoh
406 1.58 msaitoh /* Initialize the shared code */
407 1.58 msaitoh error = ixgbe_init_ops_vf(hw);
408 1.1 dyoung if (error) {
409 1.58 msaitoh aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
410 1.1 dyoung error = EIO;
411 1.58 msaitoh goto err_out;
412 1.1 dyoung }
413 1.1 dyoung
414 1.1 dyoung /* Setup the mailbox */
415 1.1 dyoung ixgbe_init_mbx_params_vf(hw);
416 1.1 dyoung
417 1.58 msaitoh /* Set the right number of segments */
418 1.168 msaitoh KASSERT(IXGBE_82599_SCATTER_MAX >= IXGBE_SCATTER_DEFAULT);
419 1.186 msaitoh sc->num_segs = IXGBE_SCATTER_DEFAULT;
420 1.58 msaitoh
421 1.26 msaitoh /* Reset mbox api to 1.0 */
422 1.58 msaitoh error = hw->mac.ops.reset_hw(hw);
423 1.26 msaitoh if (error == IXGBE_ERR_RESET_FAILED)
424 1.58 msaitoh aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
425 1.26 msaitoh else if (error)
426 1.58 msaitoh aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
427 1.58 msaitoh error);
428 1.26 msaitoh if (error) {
429 1.26 msaitoh error = EIO;
430 1.58 msaitoh goto err_out;
431 1.26 msaitoh }
432 1.1 dyoung
433 1.58 msaitoh error = hw->mac.ops.init_hw(hw);
434 1.1 dyoung if (error) {
435 1.58 msaitoh aprint_error_dev(dev, "...init_hw() failed!\n");
436 1.1 dyoung error = EIO;
437 1.58 msaitoh goto err_out;
438 1.1 dyoung }
439 1.63 msaitoh
440 1.58 msaitoh /* Negotiate mailbox API version */
441 1.186 msaitoh error = ixv_negotiate_api(sc);
442 1.58 msaitoh if (error)
443 1.58 msaitoh aprint_normal_dev(dev,
444 1.58 msaitoh "MBX API negotiation failed during attach!\n");
445 1.60 msaitoh switch (hw->api_version) {
446 1.60 msaitoh case ixgbe_mbox_api_10:
447 1.60 msaitoh apivstr = "1.0";
448 1.60 msaitoh break;
449 1.60 msaitoh case ixgbe_mbox_api_20:
450 1.60 msaitoh apivstr = "2.0";
451 1.60 msaitoh break;
452 1.60 msaitoh case ixgbe_mbox_api_11:
453 1.60 msaitoh apivstr = "1.1";
454 1.60 msaitoh break;
455 1.60 msaitoh case ixgbe_mbox_api_12:
456 1.60 msaitoh apivstr = "1.2";
457 1.60 msaitoh break;
458 1.60 msaitoh case ixgbe_mbox_api_13:
459 1.60 msaitoh apivstr = "1.3";
460 1.60 msaitoh break;
461 1.172 msaitoh case ixgbe_mbox_api_14:
462 1.172 msaitoh apivstr = "1.4";
463 1.172 msaitoh break;
464 1.172 msaitoh case ixgbe_mbox_api_15:
465 1.172 msaitoh apivstr = "1.5";
466 1.172 msaitoh break;
467 1.60 msaitoh default:
468 1.60 msaitoh apivstr = "unknown";
469 1.60 msaitoh break;
470 1.60 msaitoh }
471 1.60 msaitoh aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
472 1.1 dyoung
473 1.21 msaitoh /* If no mac address was assigned, make a random one */
474 1.21 msaitoh if (!ixv_check_ether_addr(hw->mac.addr)) {
475 1.21 msaitoh u8 addr[ETHER_ADDR_LEN];
476 1.59 msaitoh uint64_t rndval = cprng_strong64();
477 1.21 msaitoh
478 1.21 msaitoh memcpy(addr, &rndval, sizeof(addr));
479 1.21 msaitoh addr[0] &= 0xFE;
480 1.21 msaitoh addr[0] |= 0x02;
481 1.21 msaitoh bcopy(addr, hw->mac.addr, sizeof(addr));
482 1.21 msaitoh }
483 1.21 msaitoh
484 1.58 msaitoh /* Register for VLAN events */
485 1.186 msaitoh ether_set_vlan_cb(&sc->osdep.ec, ixv_vlan_cb);
486 1.58 msaitoh
487 1.58 msaitoh /* Do descriptor calc and sanity checks */
488 1.58 msaitoh if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
489 1.58 msaitoh ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
490 1.188 msaitoh aprint_error_dev(dev, "Invalid TX ring size (%d). "
491 1.188 msaitoh "It must be between %d and %d, "
492 1.188 msaitoh "inclusive, and must be a multiple of %zu. "
493 1.188 msaitoh "Using default value of %d instead.\n",
494 1.188 msaitoh ixv_txd, MIN_TXD, MAX_TXD,
495 1.188 msaitoh DBA_ALIGN / sizeof(union ixgbe_adv_tx_desc),
496 1.188 msaitoh DEFAULT_TXD);
497 1.186 msaitoh sc->num_tx_desc = DEFAULT_TXD;
498 1.58 msaitoh } else
499 1.186 msaitoh sc->num_tx_desc = ixv_txd;
500 1.58 msaitoh
501 1.58 msaitoh if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
502 1.58 msaitoh ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
503 1.188 msaitoh aprint_error_dev(dev, "Invalid RX ring size (%d). "
504 1.188 msaitoh "It must be between %d and %d, "
505 1.188 msaitoh "inclusive, and must be a multiple of %zu. "
506 1.188 msaitoh "Using default value of %d instead.\n",
507 1.188 msaitoh ixv_rxd, MIN_RXD, MAX_RXD,
508 1.188 msaitoh DBA_ALIGN / sizeof(union ixgbe_adv_rx_desc),
509 1.188 msaitoh DEFAULT_RXD);
510 1.186 msaitoh sc->num_rx_desc = DEFAULT_RXD;
511 1.58 msaitoh } else
512 1.186 msaitoh sc->num_rx_desc = ixv_rxd;
513 1.58 msaitoh
514 1.179 msaitoh /* Sysctls for limiting the amount of work done in the taskqueues */
515 1.186 msaitoh sc->rx_process_limit
516 1.186 msaitoh = (ixv_rx_process_limit <= sc->num_rx_desc)
517 1.186 msaitoh ? ixv_rx_process_limit : sc->num_rx_desc;
518 1.186 msaitoh sc->tx_process_limit
519 1.186 msaitoh = (ixv_tx_process_limit <= sc->num_tx_desc)
520 1.186 msaitoh ? ixv_tx_process_limit : sc->num_tx_desc;
521 1.179 msaitoh
522 1.163 msaitoh /* Set default high limit of copying mbuf in rxeof */
523 1.186 msaitoh sc->rx_copy_len = IXGBE_RX_COPY_LEN_MAX;
524 1.163 msaitoh
525 1.58 msaitoh /* Setup MSI-X */
526 1.186 msaitoh error = ixv_configure_interrupts(sc);
527 1.58 msaitoh if (error)
528 1.58 msaitoh goto err_out;
529 1.58 msaitoh
530 1.58 msaitoh /* Allocate our TX/RX Queues */
531 1.186 msaitoh if (ixgbe_allocate_queues(sc)) {
532 1.58 msaitoh aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
533 1.58 msaitoh error = ENOMEM;
534 1.58 msaitoh goto err_out;
535 1.58 msaitoh }
536 1.58 msaitoh
537 1.50 msaitoh /* hw.ix defaults init */
538 1.186 msaitoh sc->enable_aim = ixv_enable_aim;
539 1.191 msaitoh sc->max_interrupt_rate = ixv_max_interrupt_rate;
540 1.50 msaitoh
541 1.186 msaitoh sc->txrx_use_workqueue = ixv_txrx_workqueue;
542 1.84 knakahar
543 1.186 msaitoh error = ixv_allocate_msix(sc, pa);
544 1.76 msaitoh if (error) {
545 1.130 msaitoh aprint_error_dev(dev, "ixv_allocate_msix() failed!\n");
546 1.76 msaitoh goto err_late;
547 1.76 msaitoh }
548 1.76 msaitoh
549 1.1 dyoung /* Setup OS specific network interface */
550 1.186 msaitoh error = ixv_setup_interface(dev, sc);
551 1.73 msaitoh if (error != 0) {
552 1.73 msaitoh aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
553 1.73 msaitoh goto err_late;
554 1.73 msaitoh }
555 1.1 dyoung
556 1.170 msaitoh /* Allocate multicast array memory */
557 1.186 msaitoh sc->mta = malloc(sizeof(*sc->mta) *
558 1.170 msaitoh IXGBE_MAX_VF_MC, M_DEVBUF, M_WAITOK);
559 1.170 msaitoh
560 1.185 msaitoh /* Check if VF was disabled by PF */
561 1.186 msaitoh error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
562 1.192 msaitoh if (error) {
563 1.185 msaitoh /* PF is not capable of controlling VF state. Enable the link. */
564 1.186 msaitoh sc->link_enabled = TRUE;
565 1.185 msaitoh }
566 1.185 msaitoh
567 1.1 dyoung /* Do the stats setup */
568 1.186 msaitoh ixv_init_stats(sc);
569 1.186 msaitoh ixv_add_stats_sysctls(sc);
570 1.1 dyoung
571 1.186 msaitoh if (sc->feat_en & IXGBE_FEATURE_NETMAP)
572 1.186 msaitoh ixgbe_netmap_attach(sc);
573 1.48 msaitoh
574 1.186 msaitoh snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, sc->feat_cap);
575 1.63 msaitoh aprint_verbose_dev(dev, "feature cap %s\n", buf);
576 1.186 msaitoh snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, sc->feat_en);
577 1.63 msaitoh aprint_verbose_dev(dev, "feature ena %s\n", buf);
578 1.63 msaitoh
579 1.1 dyoung INIT_DEBUGOUT("ixv_attach: end");
580 1.186 msaitoh sc->osdep.attached = true;
581 1.57 msaitoh
582 1.3 msaitoh return;
583 1.1 dyoung
584 1.1 dyoung err_late:
585 1.186 msaitoh ixgbe_free_queues(sc);
586 1.1 dyoung err_out:
587 1.186 msaitoh ixv_free_pci_resources(sc);
588 1.186 msaitoh IXGBE_CORE_LOCK_DESTROY(sc);
589 1.58 msaitoh
590 1.3 msaitoh return;
591 1.58 msaitoh } /* ixv_attach */
592 1.1 dyoung
593 1.58 msaitoh /************************************************************************
594 1.58 msaitoh * ixv_detach - Device removal routine
595 1.1 dyoung *
596 1.58 msaitoh * Called when the driver is being removed.
597 1.58 msaitoh * Stops the adapter and deallocates all the resources
598 1.58 msaitoh * that were allocated for driver operation.
599 1.1 dyoung *
600 1.58 msaitoh * return 0 on success, positive on failure
601 1.58 msaitoh ************************************************************************/
602 1.1 dyoung static int
603 1.3 msaitoh ixv_detach(device_t dev, int flags)
604 1.1 dyoung {
605 1.186 msaitoh struct ixgbe_softc *sc = device_private(dev);
606 1.186 msaitoh struct ixgbe_hw *hw = &sc->hw;
607 1.186 msaitoh struct tx_ring *txr = sc->tx_rings;
608 1.186 msaitoh struct rx_ring *rxr = sc->rx_rings;
609 1.186 msaitoh struct ixgbevf_hw_stats *stats = &sc->stats.vf;
610 1.1 dyoung
611 1.1 dyoung INIT_DEBUGOUT("ixv_detach: begin");
612 1.186 msaitoh if (sc->osdep.attached == false)
613 1.13 msaitoh return 0;
614 1.1 dyoung
615 1.56 msaitoh /* Stop the interface. Callouts are stopped in it. */
616 1.186 msaitoh ixv_ifstop(sc->ifp, 1);
617 1.56 msaitoh
618 1.186 msaitoh if (VLAN_ATTACHED(&sc->osdep.ec) &&
619 1.169 yamaguch (flags & (DETACH_SHUTDOWN | DETACH_FORCE)) == 0) {
620 1.26 msaitoh aprint_error_dev(dev, "VLANs in use, detach first\n");
621 1.3 msaitoh return EBUSY;
622 1.1 dyoung }
623 1.1 dyoung
624 1.186 msaitoh ether_ifdetach(sc->ifp);
625 1.186 msaitoh callout_halt(&sc->timer, NULL);
626 1.186 msaitoh ixv_free_deferred_handlers(sc);
627 1.58 msaitoh
628 1.186 msaitoh if (sc->feat_en & IXGBE_FEATURE_NETMAP)
629 1.186 msaitoh netmap_detach(sc->ifp);
630 1.58 msaitoh
631 1.186 msaitoh ixv_free_pci_resources(sc);
632 1.3 msaitoh #if 0 /* XXX the NetBSD port is probably missing something here */
633 1.1 dyoung bus_generic_detach(dev);
634 1.3 msaitoh #endif
635 1.186 msaitoh if_detach(sc->ifp);
636 1.186 msaitoh ifmedia_fini(&sc->media);
637 1.186 msaitoh if_percpuq_destroy(sc->ipq);
638 1.186 msaitoh
639 1.186 msaitoh sysctl_teardown(&sc->sysctllog);
640 1.186 msaitoh evcnt_detach(&sc->efbig_tx_dma_setup);
641 1.186 msaitoh evcnt_detach(&sc->mbuf_defrag_failed);
642 1.186 msaitoh evcnt_detach(&sc->efbig2_tx_dma_setup);
643 1.186 msaitoh evcnt_detach(&sc->einval_tx_dma_setup);
644 1.186 msaitoh evcnt_detach(&sc->other_tx_dma_setup);
645 1.186 msaitoh evcnt_detach(&sc->eagain_tx_dma_setup);
646 1.186 msaitoh evcnt_detach(&sc->enomem_tx_dma_setup);
647 1.186 msaitoh evcnt_detach(&sc->watchdog_events);
648 1.186 msaitoh evcnt_detach(&sc->tso_err);
649 1.186 msaitoh evcnt_detach(&sc->admin_irqev);
650 1.186 msaitoh evcnt_detach(&sc->link_workev);
651 1.186 msaitoh
652 1.186 msaitoh txr = sc->tx_rings;
653 1.186 msaitoh for (int i = 0; i < sc->num_queues; i++, rxr++, txr++) {
654 1.186 msaitoh evcnt_detach(&sc->queues[i].irqs);
655 1.186 msaitoh evcnt_detach(&sc->queues[i].handleq);
656 1.186 msaitoh evcnt_detach(&sc->queues[i].req);
657 1.49 msaitoh evcnt_detach(&txr->total_packets);
658 1.49 msaitoh #ifndef IXGBE_LEGACY_TX
659 1.49 msaitoh evcnt_detach(&txr->pcq_drops);
660 1.49 msaitoh #endif
661 1.184 msaitoh evcnt_detach(&txr->no_desc_avail);
662 1.184 msaitoh evcnt_detach(&txr->tso_tx);
663 1.49 msaitoh
664 1.49 msaitoh evcnt_detach(&rxr->rx_packets);
665 1.49 msaitoh evcnt_detach(&rxr->rx_bytes);
666 1.49 msaitoh evcnt_detach(&rxr->rx_copies);
667 1.166 msaitoh evcnt_detach(&rxr->no_mbuf);
668 1.49 msaitoh evcnt_detach(&rxr->rx_discarded);
669 1.49 msaitoh }
670 1.49 msaitoh evcnt_detach(&stats->ipcs);
671 1.49 msaitoh evcnt_detach(&stats->l4cs);
672 1.49 msaitoh evcnt_detach(&stats->ipcs_bad);
673 1.49 msaitoh evcnt_detach(&stats->l4cs_bad);
674 1.49 msaitoh
675 1.49 msaitoh /* Packet Reception Stats */
676 1.49 msaitoh evcnt_detach(&stats->vfgorc);
677 1.49 msaitoh evcnt_detach(&stats->vfgprc);
678 1.49 msaitoh evcnt_detach(&stats->vfmprc);
679 1.49 msaitoh
680 1.49 msaitoh /* Packet Transmission Stats */
681 1.49 msaitoh evcnt_detach(&stats->vfgotc);
682 1.49 msaitoh evcnt_detach(&stats->vfgptc);
683 1.41 msaitoh
684 1.67 msaitoh /* Mailbox Stats */
685 1.67 msaitoh evcnt_detach(&hw->mbx.stats.msgs_tx);
686 1.67 msaitoh evcnt_detach(&hw->mbx.stats.msgs_rx);
687 1.67 msaitoh evcnt_detach(&hw->mbx.stats.acks);
688 1.67 msaitoh evcnt_detach(&hw->mbx.stats.reqs);
689 1.67 msaitoh evcnt_detach(&hw->mbx.stats.rsts);
690 1.67 msaitoh
691 1.186 msaitoh ixgbe_free_queues(sc);
692 1.1 dyoung
693 1.186 msaitoh IXGBE_CORE_LOCK_DESTROY(sc);
694 1.58 msaitoh
695 1.1 dyoung return (0);
696 1.58 msaitoh } /* ixv_detach */
697 1.1 dyoung
698 1.58 msaitoh /************************************************************************
699 1.58 msaitoh * ixv_init_locked - Init entry point
700 1.58 msaitoh *
701 1.58 msaitoh * Used in two ways: It is used by the stack as an init entry
702 1.58 msaitoh * point in network interface structure. It is also used
703 1.58 msaitoh * by the driver as a hw/sw initialization routine to get
704 1.58 msaitoh * to a consistent state.
705 1.1 dyoung *
706 1.58 msaitoh * return 0 on success, positive on failure
707 1.58 msaitoh ************************************************************************/
708 1.1 dyoung static void
709 1.186 msaitoh ixv_init_locked(struct ixgbe_softc *sc)
710 1.1 dyoung {
711 1.186 msaitoh struct ifnet *ifp = sc->ifp;
712 1.186 msaitoh device_t dev = sc->dev;
713 1.186 msaitoh struct ixgbe_hw *hw = &sc->hw;
714 1.102 msaitoh struct ix_queue *que;
715 1.114 msaitoh int error = 0;
716 1.68 msaitoh uint32_t mask;
717 1.68 msaitoh int i;
718 1.1 dyoung
719 1.26 msaitoh INIT_DEBUGOUT("ixv_init_locked: begin");
720 1.186 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
721 1.1 dyoung hw->adapter_stopped = FALSE;
722 1.58 msaitoh hw->mac.ops.stop_adapter(hw);
723 1.186 msaitoh callout_stop(&sc->timer);
724 1.186 msaitoh for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++)
725 1.102 msaitoh que->disabled_count = 0;
726 1.1 dyoung
727 1.186 msaitoh sc->max_frame_size =
728 1.139 msaitoh ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
729 1.139 msaitoh
730 1.57 msaitoh /* reprogram the RAR[0] in case user changed it. */
731 1.58 msaitoh hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
732 1.1 dyoung
733 1.1 dyoung /* Get the latest mac address, User can use a LAA */
734 1.91 msaitoh memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
735 1.1 dyoung IXGBE_ETH_LENGTH_OF_ADDRESS);
736 1.58 msaitoh hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
737 1.1 dyoung
738 1.1 dyoung /* Prepare transmit descriptors and buffers */
739 1.186 msaitoh if (ixgbe_setup_transmit_structures(sc)) {
740 1.26 msaitoh aprint_error_dev(dev, "Could not setup transmit structures\n");
741 1.186 msaitoh ixv_stop_locked(sc);
742 1.1 dyoung return;
743 1.1 dyoung }
744 1.1 dyoung
745 1.26 msaitoh /* Reset VF and renegotiate mailbox API version */
746 1.58 msaitoh hw->mac.ops.reset_hw(hw);
747 1.92 msaitoh hw->mac.ops.start_hw(hw);
748 1.186 msaitoh error = ixv_negotiate_api(sc);
749 1.26 msaitoh if (error)
750 1.58 msaitoh device_printf(dev,
751 1.58 msaitoh "Mailbox API negotiation failed in init_locked!\n");
752 1.26 msaitoh
753 1.186 msaitoh ixv_initialize_transmit_units(sc);
754 1.1 dyoung
755 1.1 dyoung /* Setup Multicast table */
756 1.186 msaitoh ixv_set_rxfilter(sc);
757 1.1 dyoung
758 1.165 msaitoh /* Use fixed buffer size, even for jumbo frames */
759 1.186 msaitoh sc->rx_mbuf_sz = MCLBYTES;
760 1.1 dyoung
761 1.1 dyoung /* Prepare receive descriptors and buffers */
762 1.186 msaitoh error = ixgbe_setup_receive_structures(sc);
763 1.160 msaitoh if (error) {
764 1.160 msaitoh device_printf(dev,
765 1.160 msaitoh "Could not setup receive structures (err = %d)\n", error);
766 1.186 msaitoh ixv_stop_locked(sc);
767 1.1 dyoung return;
768 1.1 dyoung }
769 1.1 dyoung
770 1.1 dyoung /* Configure RX settings */
771 1.186 msaitoh ixv_initialize_receive_units(sc);
772 1.1 dyoung
773 1.151 msaitoh /* Initialize variable holding task enqueue requests interrupts */
774 1.186 msaitoh sc->task_requests = 0;
775 1.151 msaitoh
776 1.1 dyoung /* Set up VLAN offload and filter */
777 1.186 msaitoh ixv_setup_vlan_support(sc);
778 1.1 dyoung
779 1.58 msaitoh /* Set up MSI-X routing */
780 1.186 msaitoh ixv_configure_ivars(sc);
781 1.1 dyoung
782 1.1 dyoung /* Set up auto-mask */
783 1.186 msaitoh mask = (1 << sc->vector);
784 1.186 msaitoh for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++)
785 1.68 msaitoh mask |= (1 << que->msix);
786 1.68 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
787 1.1 dyoung
788 1.57 msaitoh /* Set moderation on the Link interrupt */
789 1.186 msaitoh ixv_eitr_write(sc, sc->vector, IXGBE_LINK_ITR);
790 1.1 dyoung
791 1.1 dyoung /* Stats init */
792 1.186 msaitoh ixv_init_stats(sc);
793 1.1 dyoung
794 1.1 dyoung /* Config/Enable Link */
795 1.186 msaitoh error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
796 1.192 msaitoh if (error) {
797 1.185 msaitoh /* PF is not capable of controlling VF state. Enable the link. */
798 1.186 msaitoh sc->link_enabled = TRUE;
799 1.186 msaitoh } else if (sc->link_enabled == FALSE)
800 1.185 msaitoh device_printf(dev, "VF is disabled by PF\n");
801 1.185 msaitoh
802 1.62 msaitoh hw->mac.get_link_status = TRUE;
803 1.186 msaitoh hw->mac.ops.check_link(hw, &sc->link_speed, &sc->link_up,
804 1.58 msaitoh FALSE);
805 1.1 dyoung
806 1.26 msaitoh /* Start watchdog */
807 1.186 msaitoh callout_reset(&sc->timer, hz, ixv_local_timer, sc);
808 1.186 msaitoh atomic_store_relaxed(&sc->timer_pending, 0);
809 1.151 msaitoh
810 1.151 msaitoh /* OK to schedule workqueues. */
811 1.186 msaitoh sc->schedule_wqs_ok = true;
812 1.26 msaitoh
813 1.79 msaitoh /* Update saved flags. See ixgbe_ifflags_cb() */
814 1.186 msaitoh sc->if_flags = ifp->if_flags;
815 1.186 msaitoh sc->ec_capenable = sc->osdep.ec.ec_capenable;
816 1.79 msaitoh
817 1.189 msaitoh /* Inform the stack we're ready */
818 1.3 msaitoh ifp->if_flags |= IFF_RUNNING;
819 1.1 dyoung
820 1.189 msaitoh /* And now turn on interrupts */
821 1.189 msaitoh ixv_enable_intr(sc);
822 1.189 msaitoh
823 1.1 dyoung return;
824 1.58 msaitoh } /* ixv_init_locked */
825 1.1 dyoung
826 1.88 msaitoh /************************************************************************
827 1.88 msaitoh * ixv_enable_queue
828 1.88 msaitoh ************************************************************************/
829 1.1 dyoung static inline void
830 1.186 msaitoh ixv_enable_queue(struct ixgbe_softc *sc, u32 vector)
831 1.1 dyoung {
832 1.186 msaitoh struct ixgbe_hw *hw = &sc->hw;
833 1.186 msaitoh struct ix_queue *que = &sc->queues[vector];
834 1.124 msaitoh u32 queue = 1UL << vector;
835 1.114 msaitoh u32 mask;
836 1.1 dyoung
837 1.90 knakahar mutex_enter(&que->dc_mtx);
838 1.90 knakahar if (que->disabled_count > 0 && --que->disabled_count > 0)
839 1.82 knakahar goto out;
840 1.82 knakahar
841 1.1 dyoung mask = (IXGBE_EIMS_RTX_QUEUE & queue);
842 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
843 1.82 knakahar out:
844 1.90 knakahar mutex_exit(&que->dc_mtx);
845 1.58 msaitoh } /* ixv_enable_queue */
846 1.1 dyoung
847 1.88 msaitoh /************************************************************************
848 1.88 msaitoh * ixv_disable_queue
849 1.88 msaitoh ************************************************************************/
850 1.1 dyoung static inline void
851 1.186 msaitoh ixv_disable_queue(struct ixgbe_softc *sc, u32 vector)
852 1.1 dyoung {
853 1.186 msaitoh struct ixgbe_hw *hw = &sc->hw;
854 1.186 msaitoh struct ix_queue *que = &sc->queues[vector];
855 1.124 msaitoh u32 queue = 1UL << vector;
856 1.114 msaitoh u32 mask;
857 1.1 dyoung
858 1.90 knakahar mutex_enter(&que->dc_mtx);
859 1.90 knakahar if (que->disabled_count++ > 0)
860 1.82 knakahar goto out;
861 1.82 knakahar
862 1.1 dyoung mask = (IXGBE_EIMS_RTX_QUEUE & queue);
863 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
864 1.82 knakahar out:
865 1.90 knakahar mutex_exit(&que->dc_mtx);
866 1.58 msaitoh } /* ixv_disable_queue */
867 1.1 dyoung
868 1.105 kamil #if 0
869 1.1 dyoung static inline void
870 1.186 msaitoh ixv_rearm_queues(struct ixgbe_softc *sc, u64 queues)
871 1.1 dyoung {
872 1.1 dyoung u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
873 1.186 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEICS, mask);
874 1.58 msaitoh } /* ixv_rearm_queues */
875 1.105 kamil #endif
876 1.1 dyoung
877 1.1 dyoung
878 1.58 msaitoh /************************************************************************
879 1.91 msaitoh * ixv_msix_que - MSI-X Queue Interrupt Service routine
880 1.58 msaitoh ************************************************************************/
881 1.58 msaitoh static int
882 1.1 dyoung ixv_msix_que(void *arg)
883 1.1 dyoung {
884 1.1 dyoung struct ix_queue *que = arg;
885 1.186 msaitoh struct ixgbe_softc *sc = que->sc;
886 1.1 dyoung struct tx_ring *txr = que->txr;
887 1.1 dyoung struct rx_ring *rxr = que->rxr;
888 1.21 msaitoh bool more;
889 1.1 dyoung u32 newitr = 0;
890 1.1 dyoung
891 1.186 msaitoh ixv_disable_queue(sc, que->msix);
892 1.178 msaitoh IXGBE_EVC_ADD(&que->irqs, 1);
893 1.1 dyoung
894 1.34 msaitoh #ifdef __NetBSD__
895 1.34 msaitoh /* Don't run ixgbe_rxeof in interrupt context */
896 1.34 msaitoh more = true;
897 1.34 msaitoh #else
898 1.21 msaitoh more = ixgbe_rxeof(que);
899 1.34 msaitoh #endif
900 1.1 dyoung
901 1.21 msaitoh IXGBE_TX_LOCK(txr);
902 1.21 msaitoh ixgbe_txeof(txr);
903 1.21 msaitoh IXGBE_TX_UNLOCK(txr);
904 1.1 dyoung
905 1.1 dyoung /* Do AIM now? */
906 1.1 dyoung
907 1.186 msaitoh if (sc->enable_aim == false)
908 1.1 dyoung goto no_calc;
909 1.1 dyoung /*
910 1.58 msaitoh * Do Adaptive Interrupt Moderation:
911 1.58 msaitoh * - Write out last calculated setting
912 1.58 msaitoh * - Calculate based on average size over
913 1.58 msaitoh * the last interval.
914 1.58 msaitoh */
915 1.63 msaitoh if (que->eitr_setting)
916 1.186 msaitoh ixv_eitr_write(sc, que->msix, que->eitr_setting);
917 1.58 msaitoh
918 1.57 msaitoh que->eitr_setting = 0;
919 1.57 msaitoh
920 1.57 msaitoh /* Idle, do nothing */
921 1.57 msaitoh if ((txr->bytes == 0) && (rxr->bytes == 0))
922 1.57 msaitoh goto no_calc;
923 1.1 dyoung
924 1.1 dyoung if ((txr->bytes) && (txr->packets))
925 1.57 msaitoh newitr = txr->bytes/txr->packets;
926 1.1 dyoung if ((rxr->bytes) && (rxr->packets))
927 1.106 riastrad newitr = uimax(newitr, (rxr->bytes / rxr->packets));
928 1.1 dyoung newitr += 24; /* account for hardware frame, crc */
929 1.1 dyoung
930 1.1 dyoung /* set an upper boundary */
931 1.106 riastrad newitr = uimin(newitr, 3000);
932 1.1 dyoung
933 1.1 dyoung /* Be nice to the mid range */
934 1.1 dyoung if ((newitr > 300) && (newitr < 1200))
935 1.1 dyoung newitr = (newitr / 3);
936 1.1 dyoung else
937 1.1 dyoung newitr = (newitr / 2);
938 1.1 dyoung
939 1.80 msaitoh /*
940 1.80 msaitoh * When RSC is used, ITR interval must be larger than RSC_DELAY.
941 1.80 msaitoh * Currently, we use 2us for RSC_DELAY. The minimum value is always
942 1.80 msaitoh * greater than 2us on 100M (and 10M?(not documented)), but it's not
943 1.80 msaitoh * on 1G and higher.
944 1.80 msaitoh */
945 1.186 msaitoh if ((sc->link_speed != IXGBE_LINK_SPEED_100_FULL)
946 1.186 msaitoh && (sc->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
947 1.80 msaitoh if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
948 1.80 msaitoh newitr = IXGBE_MIN_RSC_EITR_10G1G;
949 1.80 msaitoh }
950 1.58 msaitoh
951 1.58 msaitoh /* save for next interrupt */
952 1.58 msaitoh que->eitr_setting = newitr;
953 1.1 dyoung
954 1.57 msaitoh /* Reset state */
955 1.57 msaitoh txr->bytes = 0;
956 1.57 msaitoh txr->packets = 0;
957 1.57 msaitoh rxr->bytes = 0;
958 1.57 msaitoh rxr->packets = 0;
959 1.1 dyoung
960 1.1 dyoung no_calc:
961 1.86 msaitoh if (more)
962 1.3 msaitoh softint_schedule(que->que_si);
963 1.86 msaitoh else /* Re-enable this interrupt */
964 1.186 msaitoh ixv_enable_queue(sc, que->msix);
965 1.58 msaitoh
966 1.11 msaitoh return 1;
967 1.58 msaitoh } /* ixv_msix_que */
968 1.1 dyoung
969 1.58 msaitoh /************************************************************************
970 1.58 msaitoh * ixv_msix_mbx
971 1.58 msaitoh ************************************************************************/
972 1.11 msaitoh static int
973 1.1 dyoung ixv_msix_mbx(void *arg)
974 1.1 dyoung {
975 1.186 msaitoh struct ixgbe_softc *sc = arg;
976 1.186 msaitoh struct ixgbe_hw *hw = &sc->hw;
977 1.1 dyoung
978 1.186 msaitoh IXGBE_EVC_ADD(&sc->admin_irqev, 1);
979 1.69 msaitoh /* NetBSD: We use auto-clear, so it's not required to write VTEICR */
980 1.1 dyoung
981 1.1 dyoung /* Link status change */
982 1.69 msaitoh hw->mac.get_link_status = TRUE;
983 1.186 msaitoh atomic_or_32(&sc->task_requests, IXGBE_REQUEST_TASK_MBX);
984 1.186 msaitoh ixv_schedule_admin_tasklet(sc);
985 1.57 msaitoh
986 1.11 msaitoh return 1;
987 1.58 msaitoh } /* ixv_msix_mbx */
988 1.1 dyoung
989 1.80 msaitoh static void
990 1.186 msaitoh ixv_eitr_write(struct ixgbe_softc *sc, uint32_t index, uint32_t itr)
991 1.80 msaitoh {
992 1.80 msaitoh
993 1.80 msaitoh /*
994 1.80 msaitoh * Newer devices than 82598 have VF function, so this function is
995 1.80 msaitoh * simple.
996 1.80 msaitoh */
997 1.80 msaitoh itr |= IXGBE_EITR_CNT_WDIS;
998 1.80 msaitoh
999 1.186 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEITR(index), itr);
1000 1.80 msaitoh }
1001 1.80 msaitoh
1002 1.80 msaitoh
1003 1.58 msaitoh /************************************************************************
1004 1.58 msaitoh * ixv_media_status - Media Ioctl callback
1005 1.1 dyoung *
1006 1.58 msaitoh * Called whenever the user queries the status of
1007 1.58 msaitoh * the interface using ifconfig.
1008 1.58 msaitoh ************************************************************************/
1009 1.1 dyoung static void
1010 1.63 msaitoh ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1011 1.1 dyoung {
1012 1.186 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
1013 1.1 dyoung
1014 1.1 dyoung INIT_DEBUGOUT("ixv_media_status: begin");
1015 1.186 msaitoh ixv_update_link_status(sc);
1016 1.1 dyoung
1017 1.1 dyoung ifmr->ifm_status = IFM_AVALID;
1018 1.1 dyoung ifmr->ifm_active = IFM_ETHER;
1019 1.1 dyoung
1020 1.186 msaitoh if (sc->link_active != LINK_STATE_UP) {
1021 1.39 msaitoh ifmr->ifm_active |= IFM_NONE;
1022 1.1 dyoung return;
1023 1.1 dyoung }
1024 1.1 dyoung
1025 1.1 dyoung ifmr->ifm_status |= IFM_ACTIVE;
1026 1.1 dyoung
1027 1.186 msaitoh switch (sc->link_speed) {
1028 1.42 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
1029 1.42 msaitoh ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1030 1.42 msaitoh break;
1031 1.71 msaitoh case IXGBE_LINK_SPEED_5GB_FULL:
1032 1.71 msaitoh ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
1033 1.71 msaitoh break;
1034 1.71 msaitoh case IXGBE_LINK_SPEED_2_5GB_FULL:
1035 1.71 msaitoh ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
1036 1.71 msaitoh break;
1037 1.1 dyoung case IXGBE_LINK_SPEED_1GB_FULL:
1038 1.1 dyoung ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1039 1.1 dyoung break;
1040 1.42 msaitoh case IXGBE_LINK_SPEED_100_FULL:
1041 1.42 msaitoh ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1042 1.1 dyoung break;
1043 1.58 msaitoh case IXGBE_LINK_SPEED_10_FULL:
1044 1.58 msaitoh ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1045 1.58 msaitoh break;
1046 1.1 dyoung }
1047 1.1 dyoung
1048 1.70 msaitoh ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
1049 1.58 msaitoh } /* ixv_media_status */
1050 1.1 dyoung
1051 1.58 msaitoh /************************************************************************
1052 1.58 msaitoh * ixv_media_change - Media Ioctl callback
1053 1.1 dyoung *
1054 1.58 msaitoh * Called when the user changes speed/duplex using
1055 1.58 msaitoh * media/mediopt option with ifconfig.
1056 1.58 msaitoh ************************************************************************/
1057 1.1 dyoung static int
1058 1.57 msaitoh ixv_media_change(struct ifnet *ifp)
1059 1.1 dyoung {
1060 1.186 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
1061 1.186 msaitoh struct ifmedia *ifm = &sc->media;
1062 1.1 dyoung
1063 1.1 dyoung INIT_DEBUGOUT("ixv_media_change: begin");
1064 1.1 dyoung
1065 1.1 dyoung if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1066 1.1 dyoung return (EINVAL);
1067 1.1 dyoung
1068 1.57 msaitoh switch (IFM_SUBTYPE(ifm->ifm_media)) {
1069 1.57 msaitoh case IFM_AUTO:
1070 1.57 msaitoh break;
1071 1.57 msaitoh default:
1072 1.186 msaitoh device_printf(sc->dev, "Only auto media type\n");
1073 1.1 dyoung return (EINVAL);
1074 1.57 msaitoh }
1075 1.1 dyoung
1076 1.1 dyoung return (0);
1077 1.58 msaitoh } /* ixv_media_change */
1078 1.1 dyoung
1079 1.151 msaitoh static void
1080 1.186 msaitoh ixv_schedule_admin_tasklet(struct ixgbe_softc *sc)
1081 1.151 msaitoh {
1082 1.186 msaitoh if (sc->schedule_wqs_ok) {
1083 1.186 msaitoh if (atomic_cas_uint(&sc->admin_pending, 0, 1) == 0)
1084 1.186 msaitoh workqueue_enqueue(sc->admin_wq,
1085 1.186 msaitoh &sc->admin_wc, NULL);
1086 1.151 msaitoh }
1087 1.151 msaitoh }
1088 1.151 msaitoh
1089 1.137 msaitoh /************************************************************************
1090 1.58 msaitoh * ixv_negotiate_api
1091 1.1 dyoung *
1092 1.58 msaitoh * Negotiate the Mailbox API with the PF;
1093 1.58 msaitoh * start with the most featured API first.
1094 1.58 msaitoh ************************************************************************/
1095 1.58 msaitoh static int
1096 1.186 msaitoh ixv_negotiate_api(struct ixgbe_softc *sc)
1097 1.58 msaitoh {
1098 1.186 msaitoh struct ixgbe_hw *hw = &sc->hw;
1099 1.173 msaitoh int mbx_api[] = { ixgbe_mbox_api_15,
1100 1.173 msaitoh ixgbe_mbox_api_13,
1101 1.134 msaitoh ixgbe_mbox_api_12,
1102 1.134 msaitoh ixgbe_mbox_api_11,
1103 1.114 msaitoh ixgbe_mbox_api_10,
1104 1.114 msaitoh ixgbe_mbox_api_unknown };
1105 1.114 msaitoh int i = 0;
1106 1.58 msaitoh
1107 1.58 msaitoh while (mbx_api[i] != ixgbe_mbox_api_unknown) {
1108 1.173 msaitoh if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0) {
1109 1.173 msaitoh if (hw->api_version >= ixgbe_mbox_api_15)
1110 1.173 msaitoh ixgbe_upgrade_mbx_params_vf(hw);
1111 1.58 msaitoh return (0);
1112 1.173 msaitoh }
1113 1.58 msaitoh i++;
1114 1.58 msaitoh }
1115 1.58 msaitoh
1116 1.58 msaitoh return (EINVAL);
1117 1.58 msaitoh } /* ixv_negotiate_api */
1118 1.58 msaitoh
1119 1.58 msaitoh
1120 1.58 msaitoh /************************************************************************
1121 1.150 msaitoh * ixv_set_rxfilter - Multicast Update
1122 1.1 dyoung *
1123 1.58 msaitoh * Called whenever multicast address list is updated.
1124 1.58 msaitoh ************************************************************************/
1125 1.138 msaitoh static int
1126 1.186 msaitoh ixv_set_rxfilter(struct ixgbe_softc *sc)
1127 1.1 dyoung {
1128 1.170 msaitoh struct ixgbe_mc_addr *mta;
1129 1.186 msaitoh struct ifnet *ifp = sc->ifp;
1130 1.186 msaitoh struct ixgbe_hw *hw = &sc->hw;
1131 1.138 msaitoh u8 *update_ptr;
1132 1.138 msaitoh int mcnt = 0;
1133 1.186 msaitoh struct ethercom *ec = &sc->osdep.ec;
1134 1.138 msaitoh struct ether_multi *enm;
1135 1.138 msaitoh struct ether_multistep step;
1136 1.138 msaitoh bool overflow = false;
1137 1.138 msaitoh int error, rc = 0;
1138 1.1 dyoung
1139 1.186 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
1140 1.138 msaitoh IOCTL_DEBUGOUT("ixv_set_rxfilter: begin");
1141 1.1 dyoung
1142 1.186 msaitoh mta = sc->mta;
1143 1.170 msaitoh bzero(mta, sizeof(*mta) * IXGBE_MAX_VF_MC);
1144 1.170 msaitoh
1145 1.138 msaitoh /* 1: For PROMISC */
1146 1.138 msaitoh if (ifp->if_flags & IFF_PROMISC) {
1147 1.138 msaitoh error = hw->mac.ops.update_xcast_mode(hw,
1148 1.138 msaitoh IXGBEVF_XCAST_MODE_PROMISC);
1149 1.138 msaitoh if (error == IXGBE_ERR_NOT_TRUSTED) {
1150 1.186 msaitoh device_printf(sc->dev,
1151 1.138 msaitoh "this interface is not trusted\n");
1152 1.138 msaitoh error = EPERM;
1153 1.138 msaitoh } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
1154 1.186 msaitoh device_printf(sc->dev,
1155 1.138 msaitoh "the PF doesn't support promisc mode\n");
1156 1.138 msaitoh error = EOPNOTSUPP;
1157 1.138 msaitoh } else if (error == IXGBE_ERR_NOT_IN_PROMISC) {
1158 1.186 msaitoh device_printf(sc->dev,
1159 1.138 msaitoh "the PF may not in promisc mode\n");
1160 1.138 msaitoh error = EINVAL;
1161 1.138 msaitoh } else if (error) {
1162 1.186 msaitoh device_printf(sc->dev,
1163 1.138 msaitoh "failed to set promisc mode. error = %d\n",
1164 1.138 msaitoh error);
1165 1.138 msaitoh error = EIO;
1166 1.138 msaitoh } else
1167 1.138 msaitoh return 0;
1168 1.138 msaitoh rc = error;
1169 1.138 msaitoh }
1170 1.138 msaitoh
1171 1.138 msaitoh /* 2: For ALLMULTI or normal */
1172 1.72 msaitoh ETHER_LOCK(ec);
1173 1.3 msaitoh ETHER_FIRST_MULTI(step, ec, enm);
1174 1.3 msaitoh while (enm != NULL) {
1175 1.138 msaitoh if ((mcnt >= IXGBE_MAX_VF_MC) ||
1176 1.138 msaitoh (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1177 1.138 msaitoh ETHER_ADDR_LEN) != 0)) {
1178 1.134 msaitoh overflow = true;
1179 1.134 msaitoh break;
1180 1.134 msaitoh }
1181 1.3 msaitoh bcopy(enm->enm_addrlo,
1182 1.170 msaitoh mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
1183 1.1 dyoung mcnt++;
1184 1.3 msaitoh ETHER_NEXT_MULTI(step, enm);
1185 1.1 dyoung }
1186 1.72 msaitoh ETHER_UNLOCK(ec);
1187 1.1 dyoung
1188 1.138 msaitoh /* 3: For ALLMULTI */
1189 1.134 msaitoh if (overflow) {
1190 1.134 msaitoh error = hw->mac.ops.update_xcast_mode(hw,
1191 1.134 msaitoh IXGBEVF_XCAST_MODE_ALLMULTI);
1192 1.134 msaitoh if (error == IXGBE_ERR_NOT_TRUSTED) {
1193 1.186 msaitoh device_printf(sc->dev,
1194 1.134 msaitoh "this interface is not trusted\n");
1195 1.135 msaitoh error = EPERM;
1196 1.135 msaitoh } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
1197 1.186 msaitoh device_printf(sc->dev,
1198 1.135 msaitoh "the PF doesn't support allmulti mode\n");
1199 1.135 msaitoh error = EOPNOTSUPP;
1200 1.134 msaitoh } else if (error) {
1201 1.186 msaitoh device_printf(sc->dev,
1202 1.134 msaitoh "number of Ethernet multicast addresses "
1203 1.134 msaitoh "exceeds the limit (%d). error = %d\n",
1204 1.134 msaitoh IXGBE_MAX_VF_MC, error);
1205 1.134 msaitoh error = ENOSPC;
1206 1.134 msaitoh } else {
1207 1.138 msaitoh ETHER_LOCK(ec);
1208 1.134 msaitoh ec->ec_flags |= ETHER_F_ALLMULTI;
1209 1.138 msaitoh ETHER_UNLOCK(ec);
1210 1.148 msaitoh return rc; /* Promisc might have failed */
1211 1.134 msaitoh }
1212 1.138 msaitoh
1213 1.138 msaitoh if (rc == 0)
1214 1.138 msaitoh rc = error;
1215 1.138 msaitoh
1216 1.138 msaitoh /* Continue to update the multicast table as many as we can */
1217 1.134 msaitoh }
1218 1.134 msaitoh
1219 1.138 msaitoh /* 4: For normal operation */
1220 1.138 msaitoh error = hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI);
1221 1.138 msaitoh if ((error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) || (error == 0)) {
1222 1.138 msaitoh /* Normal operation */
1223 1.138 msaitoh ETHER_LOCK(ec);
1224 1.134 msaitoh ec->ec_flags &= ~ETHER_F_ALLMULTI;
1225 1.138 msaitoh ETHER_UNLOCK(ec);
1226 1.138 msaitoh error = 0;
1227 1.138 msaitoh } else if (error) {
1228 1.186 msaitoh device_printf(sc->dev,
1229 1.138 msaitoh "failed to set Ethernet multicast address "
1230 1.138 msaitoh "operation to normal. error = %d\n", error);
1231 1.134 msaitoh }
1232 1.134 msaitoh
1233 1.170 msaitoh update_ptr = (u8 *)mta;
1234 1.186 msaitoh error = sc->hw.mac.ops.update_mc_addr_list(&sc->hw,
1235 1.138 msaitoh update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1236 1.138 msaitoh if (rc == 0)
1237 1.138 msaitoh rc = error;
1238 1.138 msaitoh
1239 1.138 msaitoh return rc;
1240 1.138 msaitoh } /* ixv_set_rxfilter */
1241 1.1 dyoung
1242 1.58 msaitoh /************************************************************************
1243 1.58 msaitoh * ixv_mc_array_itr
1244 1.58 msaitoh *
1245 1.58 msaitoh * An iterator function needed by the multicast shared code.
1246 1.58 msaitoh * It feeds the shared code routine the addresses in the
1247 1.138 msaitoh * array of ixv_set_rxfilter() one by one.
1248 1.58 msaitoh ************************************************************************/
1249 1.1 dyoung static u8 *
1250 1.1 dyoung ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1251 1.1 dyoung {
1252 1.170 msaitoh struct ixgbe_mc_addr *mta;
1253 1.170 msaitoh
1254 1.170 msaitoh mta = (struct ixgbe_mc_addr *)*update_ptr;
1255 1.88 msaitoh
1256 1.1 dyoung *vmdq = 0;
1257 1.170 msaitoh *update_ptr = (u8*)(mta + 1);
1258 1.1 dyoung
1259 1.170 msaitoh return (mta->addr);
1260 1.58 msaitoh } /* ixv_mc_array_itr */
1261 1.1 dyoung
1262 1.58 msaitoh /************************************************************************
1263 1.58 msaitoh * ixv_local_timer - Timer routine
1264 1.1 dyoung *
1265 1.58 msaitoh * Checks for link status, updates statistics,
1266 1.58 msaitoh * and runs the watchdog check.
1267 1.58 msaitoh ************************************************************************/
1268 1.1 dyoung static void
1269 1.22 msaitoh ixv_local_timer(void *arg)
1270 1.22 msaitoh {
1271 1.186 msaitoh struct ixgbe_softc *sc = arg;
1272 1.22 msaitoh
1273 1.186 msaitoh if (sc->schedule_wqs_ok) {
1274 1.186 msaitoh if (atomic_cas_uint(&sc->timer_pending, 0, 1) == 0)
1275 1.186 msaitoh workqueue_enqueue(sc->timer_wq,
1276 1.186 msaitoh &sc->timer_wc, NULL);
1277 1.151 msaitoh }
1278 1.22 msaitoh }
1279 1.22 msaitoh
1280 1.22 msaitoh static void
1281 1.151 msaitoh ixv_handle_timer(struct work *wk, void *context)
1282 1.1 dyoung {
1283 1.186 msaitoh struct ixgbe_softc *sc = context;
1284 1.186 msaitoh device_t dev = sc->dev;
1285 1.186 msaitoh struct ix_queue *que = sc->queues;
1286 1.98 msaitoh u64 queues = 0;
1287 1.87 msaitoh u64 v0, v1, v2, v3, v4, v5, v6, v7;
1288 1.98 msaitoh int hung = 0;
1289 1.87 msaitoh int i;
1290 1.1 dyoung
1291 1.186 msaitoh IXGBE_CORE_LOCK(sc);
1292 1.1 dyoung
1293 1.186 msaitoh if (ixv_check_link(sc)) {
1294 1.186 msaitoh ixv_init_locked(sc);
1295 1.186 msaitoh IXGBE_CORE_UNLOCK(sc);
1296 1.117 msaitoh return;
1297 1.117 msaitoh }
1298 1.1 dyoung
1299 1.1 dyoung /* Stats Update */
1300 1.186 msaitoh ixv_update_stats(sc);
1301 1.1 dyoung
1302 1.87 msaitoh /* Update some event counters */
1303 1.87 msaitoh v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
1304 1.186 msaitoh que = sc->queues;
1305 1.186 msaitoh for (i = 0; i < sc->num_queues; i++, que++) {
1306 1.114 msaitoh struct tx_ring *txr = que->txr;
1307 1.87 msaitoh
1308 1.87 msaitoh v0 += txr->q_efbig_tx_dma_setup;
1309 1.87 msaitoh v1 += txr->q_mbuf_defrag_failed;
1310 1.87 msaitoh v2 += txr->q_efbig2_tx_dma_setup;
1311 1.87 msaitoh v3 += txr->q_einval_tx_dma_setup;
1312 1.87 msaitoh v4 += txr->q_other_tx_dma_setup;
1313 1.87 msaitoh v5 += txr->q_eagain_tx_dma_setup;
1314 1.87 msaitoh v6 += txr->q_enomem_tx_dma_setup;
1315 1.87 msaitoh v7 += txr->q_tso_err;
1316 1.87 msaitoh }
1317 1.186 msaitoh IXGBE_EVC_STORE(&sc->efbig_tx_dma_setup, v0);
1318 1.186 msaitoh IXGBE_EVC_STORE(&sc->mbuf_defrag_failed, v1);
1319 1.186 msaitoh IXGBE_EVC_STORE(&sc->efbig2_tx_dma_setup, v2);
1320 1.186 msaitoh IXGBE_EVC_STORE(&sc->einval_tx_dma_setup, v3);
1321 1.186 msaitoh IXGBE_EVC_STORE(&sc->other_tx_dma_setup, v4);
1322 1.186 msaitoh IXGBE_EVC_STORE(&sc->eagain_tx_dma_setup, v5);
1323 1.186 msaitoh IXGBE_EVC_STORE(&sc->enomem_tx_dma_setup, v6);
1324 1.186 msaitoh IXGBE_EVC_STORE(&sc->tso_err, v7);
1325 1.87 msaitoh
1326 1.98 msaitoh /*
1327 1.98 msaitoh * Check the TX queues status
1328 1.114 msaitoh * - mark hung queues so we don't schedule on them
1329 1.114 msaitoh * - watchdog only if all queues show hung
1330 1.98 msaitoh */
1331 1.186 msaitoh que = sc->queues;
1332 1.186 msaitoh for (i = 0; i < sc->num_queues; i++, que++) {
1333 1.98 msaitoh /* Keep track of queues with work for soft irq */
1334 1.98 msaitoh if (que->txr->busy)
1335 1.98 msaitoh queues |= ((u64)1 << que->me);
1336 1.21 msaitoh /*
1337 1.98 msaitoh * Each time txeof runs without cleaning, but there
1338 1.98 msaitoh * are uncleaned descriptors it increments busy. If
1339 1.98 msaitoh * we get to the MAX we declare it hung.
1340 1.58 msaitoh */
1341 1.98 msaitoh if (que->busy == IXGBE_QUEUE_HUNG) {
1342 1.98 msaitoh ++hung;
1343 1.98 msaitoh /* Mark the queue as inactive */
1344 1.186 msaitoh sc->active_queues &= ~((u64)1 << que->me);
1345 1.98 msaitoh continue;
1346 1.98 msaitoh } else {
1347 1.98 msaitoh /* Check if we've come back from hung */
1348 1.186 msaitoh if ((sc->active_queues & ((u64)1 << que->me)) == 0)
1349 1.186 msaitoh sc->active_queues |= ((u64)1 << que->me);
1350 1.1 dyoung }
1351 1.98 msaitoh if (que->busy >= IXGBE_MAX_TX_BUSY) {
1352 1.98 msaitoh device_printf(dev,
1353 1.98 msaitoh "Warning queue %d appears to be hung!\n", i);
1354 1.98 msaitoh que->txr->busy = IXGBE_QUEUE_HUNG;
1355 1.98 msaitoh ++hung;
1356 1.98 msaitoh }
1357 1.98 msaitoh }
1358 1.98 msaitoh
1359 1.98 msaitoh /* Only truly watchdog if all queues show hung */
1360 1.186 msaitoh if (hung == sc->num_queues)
1361 1.98 msaitoh goto watchdog;
1362 1.104 msaitoh #if 0
1363 1.98 msaitoh else if (queues != 0) { /* Force an IRQ on queues with work */
1364 1.186 msaitoh ixv_rearm_queues(sc, queues);
1365 1.21 msaitoh }
1366 1.104 msaitoh #endif
1367 1.21 msaitoh
1368 1.186 msaitoh atomic_store_relaxed(&sc->timer_pending, 0);
1369 1.186 msaitoh IXGBE_CORE_UNLOCK(sc);
1370 1.186 msaitoh callout_reset(&sc->timer, hz, ixv_local_timer, sc);
1371 1.98 msaitoh
1372 1.98 msaitoh return;
1373 1.96 msaitoh
1374 1.98 msaitoh watchdog:
1375 1.186 msaitoh device_printf(sc->dev, "Watchdog timeout -- resetting\n");
1376 1.186 msaitoh sc->ifp->if_flags &= ~IFF_RUNNING;
1377 1.186 msaitoh IXGBE_EVC_ADD(&sc->watchdog_events, 1);
1378 1.186 msaitoh ixv_init_locked(sc);
1379 1.186 msaitoh IXGBE_CORE_UNLOCK(sc);
1380 1.151 msaitoh } /* ixv_handle_timer */
1381 1.1 dyoung
1382 1.58 msaitoh /************************************************************************
1383 1.58 msaitoh * ixv_update_link_status - Update OS on link state
1384 1.58 msaitoh *
1385 1.58 msaitoh * Note: Only updates the OS on the cached link state.
1386 1.114 msaitoh * The real check of the hardware only happens with
1387 1.114 msaitoh * a link interrupt.
1388 1.58 msaitoh ************************************************************************/
1389 1.1 dyoung static void
1390 1.186 msaitoh ixv_update_link_status(struct ixgbe_softc *sc)
1391 1.1 dyoung {
1392 1.186 msaitoh struct ifnet *ifp = sc->ifp;
1393 1.186 msaitoh device_t dev = sc->dev;
1394 1.1 dyoung
1395 1.186 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
1396 1.89 knakahar
1397 1.186 msaitoh if (sc->link_up && sc->link_enabled) {
1398 1.186 msaitoh if (sc->link_active != LINK_STATE_UP) {
1399 1.42 msaitoh if (bootverbose) {
1400 1.42 msaitoh const char *bpsmsg;
1401 1.42 msaitoh
1402 1.186 msaitoh switch (sc->link_speed) {
1403 1.42 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
1404 1.42 msaitoh bpsmsg = "10 Gbps";
1405 1.42 msaitoh break;
1406 1.58 msaitoh case IXGBE_LINK_SPEED_5GB_FULL:
1407 1.58 msaitoh bpsmsg = "5 Gbps";
1408 1.58 msaitoh break;
1409 1.58 msaitoh case IXGBE_LINK_SPEED_2_5GB_FULL:
1410 1.58 msaitoh bpsmsg = "2.5 Gbps";
1411 1.58 msaitoh break;
1412 1.42 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
1413 1.42 msaitoh bpsmsg = "1 Gbps";
1414 1.42 msaitoh break;
1415 1.42 msaitoh case IXGBE_LINK_SPEED_100_FULL:
1416 1.42 msaitoh bpsmsg = "100 Mbps";
1417 1.42 msaitoh break;
1418 1.58 msaitoh case IXGBE_LINK_SPEED_10_FULL:
1419 1.58 msaitoh bpsmsg = "10 Mbps";
1420 1.58 msaitoh break;
1421 1.42 msaitoh default:
1422 1.42 msaitoh bpsmsg = "unknown speed";
1423 1.42 msaitoh break;
1424 1.42 msaitoh }
1425 1.63 msaitoh device_printf(dev, "Link is up %s %s \n",
1426 1.42 msaitoh bpsmsg, "Full Duplex");
1427 1.42 msaitoh }
1428 1.186 msaitoh sc->link_active = LINK_STATE_UP;
1429 1.1 dyoung if_link_state_change(ifp, LINK_STATE_UP);
1430 1.1 dyoung }
1431 1.109 msaitoh } else {
1432 1.109 msaitoh /*
1433 1.109 msaitoh * Do it when link active changes to DOWN. i.e.
1434 1.109 msaitoh * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
1435 1.114 msaitoh * b) LINK_STATE_UP -> LINK_STATE_DOWN
1436 1.109 msaitoh */
1437 1.186 msaitoh if (sc->link_active != LINK_STATE_DOWN) {
1438 1.1 dyoung if (bootverbose)
1439 1.63 msaitoh device_printf(dev, "Link is Down\n");
1440 1.1 dyoung if_link_state_change(ifp, LINK_STATE_DOWN);
1441 1.186 msaitoh sc->link_active = LINK_STATE_DOWN;
1442 1.1 dyoung }
1443 1.1 dyoung }
1444 1.58 msaitoh } /* ixv_update_link_status */
1445 1.1 dyoung
1446 1.1 dyoung
1447 1.58 msaitoh /************************************************************************
1448 1.58 msaitoh * ixv_stop - Stop the hardware
1449 1.58 msaitoh *
1450 1.58 msaitoh * Disables all traffic on the adapter by issuing a
1451 1.58 msaitoh * global reset on the MAC and deallocates TX/RX buffers.
1452 1.58 msaitoh ************************************************************************/
1453 1.3 msaitoh static void
1454 1.3 msaitoh ixv_ifstop(struct ifnet *ifp, int disable)
1455 1.3 msaitoh {
1456 1.186 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
1457 1.3 msaitoh
1458 1.186 msaitoh IXGBE_CORE_LOCK(sc);
1459 1.186 msaitoh ixv_stop_locked(sc);
1460 1.186 msaitoh IXGBE_CORE_UNLOCK(sc);
1461 1.186 msaitoh
1462 1.186 msaitoh workqueue_wait(sc->admin_wq, &sc->admin_wc);
1463 1.186 msaitoh atomic_store_relaxed(&sc->admin_pending, 0);
1464 1.186 msaitoh workqueue_wait(sc->timer_wq, &sc->timer_wc);
1465 1.186 msaitoh atomic_store_relaxed(&sc->timer_pending, 0);
1466 1.3 msaitoh }
1467 1.3 msaitoh
1468 1.1 dyoung static void
1469 1.153 msaitoh ixv_stop_locked(void *arg)
1470 1.1 dyoung {
1471 1.114 msaitoh struct ifnet *ifp;
1472 1.186 msaitoh struct ixgbe_softc *sc = arg;
1473 1.186 msaitoh struct ixgbe_hw *hw = &sc->hw;
1474 1.58 msaitoh
1475 1.186 msaitoh ifp = sc->ifp;
1476 1.1 dyoung
1477 1.186 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
1478 1.1 dyoung
1479 1.153 msaitoh INIT_DEBUGOUT("ixv_stop_locked: begin\n");
1480 1.186 msaitoh ixv_disable_intr(sc);
1481 1.1 dyoung
1482 1.1 dyoung /* Tell the stack that the interface is no longer active */
1483 1.195 msaitoh ifp->if_flags &= ~IFF_RUNNING;
1484 1.1 dyoung
1485 1.58 msaitoh hw->mac.ops.reset_hw(hw);
1486 1.186 msaitoh sc->hw.adapter_stopped = FALSE;
1487 1.58 msaitoh hw->mac.ops.stop_adapter(hw);
1488 1.186 msaitoh callout_stop(&sc->timer);
1489 1.1 dyoung
1490 1.151 msaitoh /* Don't schedule workqueues. */
1491 1.186 msaitoh sc->schedule_wqs_ok = false;
1492 1.151 msaitoh
1493 1.1 dyoung /* reprogram the RAR[0] in case user changed it. */
1494 1.58 msaitoh hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1495 1.1 dyoung
1496 1.1 dyoung return;
1497 1.153 msaitoh } /* ixv_stop_locked */
1498 1.1 dyoung
1499 1.1 dyoung
1500 1.58 msaitoh /************************************************************************
1501 1.58 msaitoh * ixv_allocate_pci_resources
1502 1.58 msaitoh ************************************************************************/
1503 1.57 msaitoh static int
1504 1.186 msaitoh ixv_allocate_pci_resources(struct ixgbe_softc *sc,
1505 1.57 msaitoh const struct pci_attach_args *pa)
1506 1.1 dyoung {
1507 1.193 msaitoh pcireg_t memtype, csr;
1508 1.193 msaitoh device_t dev = sc->dev;
1509 1.57 msaitoh bus_addr_t addr;
1510 1.57 msaitoh int flags;
1511 1.3 msaitoh
1512 1.57 msaitoh memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1513 1.57 msaitoh switch (memtype) {
1514 1.57 msaitoh case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1515 1.57 msaitoh case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1516 1.186 msaitoh sc->osdep.mem_bus_space_tag = pa->pa_memt;
1517 1.57 msaitoh if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1518 1.186 msaitoh memtype, &addr, &sc->osdep.mem_size, &flags) != 0)
1519 1.57 msaitoh goto map_err;
1520 1.57 msaitoh if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1521 1.57 msaitoh aprint_normal_dev(dev, "clearing prefetchable bit\n");
1522 1.57 msaitoh flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1523 1.57 msaitoh }
1524 1.186 msaitoh if (bus_space_map(sc->osdep.mem_bus_space_tag, addr,
1525 1.186 msaitoh sc->osdep.mem_size, flags,
1526 1.186 msaitoh &sc->osdep.mem_bus_space_handle) != 0) {
1527 1.3 msaitoh map_err:
1528 1.186 msaitoh sc->osdep.mem_size = 0;
1529 1.3 msaitoh aprint_error_dev(dev, "unable to map BAR0\n");
1530 1.3 msaitoh return ENXIO;
1531 1.3 msaitoh }
1532 1.108 msaitoh /*
1533 1.108 msaitoh * Enable address decoding for memory range in case it's not
1534 1.108 msaitoh * set.
1535 1.108 msaitoh */
1536 1.108 msaitoh csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
1537 1.108 msaitoh PCI_COMMAND_STATUS_REG);
1538 1.108 msaitoh csr |= PCI_COMMAND_MEM_ENABLE;
1539 1.108 msaitoh pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1540 1.108 msaitoh csr);
1541 1.3 msaitoh break;
1542 1.3 msaitoh default:
1543 1.3 msaitoh aprint_error_dev(dev, "unexpected type on BAR0\n");
1544 1.3 msaitoh return ENXIO;
1545 1.1 dyoung }
1546 1.1 dyoung
1547 1.23 msaitoh /* Pick up the tuneable queues */
1548 1.186 msaitoh sc->num_queues = ixv_num_queues;
1549 1.1 dyoung
1550 1.58 msaitoh return (0);
1551 1.58 msaitoh } /* ixv_allocate_pci_resources */
1552 1.1 dyoung
1553 1.151 msaitoh static void
1554 1.186 msaitoh ixv_free_deferred_handlers(struct ixgbe_softc *sc)
1555 1.151 msaitoh {
1556 1.186 msaitoh struct ix_queue *que = sc->queues;
1557 1.186 msaitoh struct tx_ring *txr = sc->tx_rings;
1558 1.151 msaitoh int i;
1559 1.151 msaitoh
1560 1.186 msaitoh for (i = 0; i < sc->num_queues; i++, que++, txr++) {
1561 1.186 msaitoh if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
1562 1.151 msaitoh if (txr->txr_si != NULL)
1563 1.151 msaitoh softint_disestablish(txr->txr_si);
1564 1.151 msaitoh }
1565 1.151 msaitoh if (que->que_si != NULL)
1566 1.151 msaitoh softint_disestablish(que->que_si);
1567 1.151 msaitoh }
1568 1.186 msaitoh if (sc->txr_wq != NULL)
1569 1.186 msaitoh workqueue_destroy(sc->txr_wq);
1570 1.186 msaitoh if (sc->txr_wq_enqueued != NULL)
1571 1.186 msaitoh percpu_free(sc->txr_wq_enqueued, sizeof(u_int));
1572 1.186 msaitoh if (sc->que_wq != NULL)
1573 1.186 msaitoh workqueue_destroy(sc->que_wq);
1574 1.151 msaitoh
1575 1.151 msaitoh /* Drain the Mailbox(link) queue */
1576 1.186 msaitoh if (sc->admin_wq != NULL) {
1577 1.186 msaitoh workqueue_destroy(sc->admin_wq);
1578 1.186 msaitoh sc->admin_wq = NULL;
1579 1.186 msaitoh }
1580 1.186 msaitoh if (sc->timer_wq != NULL) {
1581 1.186 msaitoh workqueue_destroy(sc->timer_wq);
1582 1.186 msaitoh sc->timer_wq = NULL;
1583 1.151 msaitoh }
1584 1.154 msaitoh } /* ixv_free_deferred_handlers */
1585 1.151 msaitoh
1586 1.58 msaitoh /************************************************************************
1587 1.58 msaitoh * ixv_free_pci_resources
1588 1.58 msaitoh ************************************************************************/
1589 1.1 dyoung static void
1590 1.186 msaitoh ixv_free_pci_resources(struct ixgbe_softc *sc)
1591 1.1 dyoung {
1592 1.190 msaitoh struct ix_queue *que = sc->queues;
1593 1.11 msaitoh int rid;
1594 1.1 dyoung
1595 1.1 dyoung /*
1596 1.58 msaitoh * Release all msix queue resources:
1597 1.58 msaitoh */
1598 1.186 msaitoh for (int i = 0; i < sc->num_queues; i++, que++) {
1599 1.1 dyoung if (que->res != NULL)
1600 1.186 msaitoh pci_intr_disestablish(sc->osdep.pc,
1601 1.186 msaitoh sc->osdep.ihs[i]);
1602 1.1 dyoung }
1603 1.1 dyoung
1604 1.12 msaitoh
1605 1.58 msaitoh /* Clean the Mailbox interrupt last */
1606 1.186 msaitoh rid = sc->vector;
1607 1.1 dyoung
1608 1.186 msaitoh if (sc->osdep.ihs[rid] != NULL) {
1609 1.186 msaitoh pci_intr_disestablish(sc->osdep.pc,
1610 1.186 msaitoh sc->osdep.ihs[rid]);
1611 1.186 msaitoh sc->osdep.ihs[rid] = NULL;
1612 1.41 msaitoh }
1613 1.11 msaitoh
1614 1.186 msaitoh pci_intr_release(sc->osdep.pc, sc->osdep.intrs,
1615 1.186 msaitoh sc->osdep.nintrs);
1616 1.11 msaitoh
1617 1.186 msaitoh if (sc->osdep.mem_size != 0) {
1618 1.186 msaitoh bus_space_unmap(sc->osdep.mem_bus_space_tag,
1619 1.186 msaitoh sc->osdep.mem_bus_space_handle,
1620 1.186 msaitoh sc->osdep.mem_size);
1621 1.11 msaitoh }
1622 1.1 dyoung
1623 1.1 dyoung return;
1624 1.58 msaitoh } /* ixv_free_pci_resources */
1625 1.1 dyoung
1626 1.58 msaitoh /************************************************************************
1627 1.58 msaitoh * ixv_setup_interface
1628 1.1 dyoung *
1629 1.58 msaitoh * Setup networking device structure and register an interface.
1630 1.58 msaitoh ************************************************************************/
1631 1.73 msaitoh static int
1632 1.186 msaitoh ixv_setup_interface(device_t dev, struct ixgbe_softc *sc)
1633 1.1 dyoung {
1634 1.186 msaitoh struct ethercom *ec = &sc->osdep.ec;
1635 1.1 dyoung struct ifnet *ifp;
1636 1.1 dyoung
1637 1.1 dyoung INIT_DEBUGOUT("ixv_setup_interface: begin");
1638 1.1 dyoung
1639 1.186 msaitoh ifp = sc->ifp = &ec->ec_if;
1640 1.3 msaitoh strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1641 1.46 msaitoh ifp->if_baudrate = IF_Gbps(10);
1642 1.1 dyoung ifp->if_init = ixv_init;
1643 1.3 msaitoh ifp->if_stop = ixv_ifstop;
1644 1.186 msaitoh ifp->if_softc = sc;
1645 1.1 dyoung ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1646 1.74 ozaki ifp->if_extflags = IFEF_MPSAFE;
1647 1.1 dyoung ifp->if_ioctl = ixv_ioctl;
1648 1.186 msaitoh if (sc->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1649 1.58 msaitoh #if 0
1650 1.58 msaitoh ixv_start_locked = ixgbe_legacy_start_locked;
1651 1.58 msaitoh #endif
1652 1.58 msaitoh } else {
1653 1.58 msaitoh ifp->if_transmit = ixgbe_mq_start;
1654 1.58 msaitoh #if 0
1655 1.58 msaitoh ixv_start_locked = ixgbe_mq_start_locked;
1656 1.35 msaitoh #endif
1657 1.58 msaitoh }
1658 1.58 msaitoh ifp->if_start = ixgbe_legacy_start;
1659 1.186 msaitoh IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 2);
1660 1.45 msaitoh IFQ_SET_READY(&ifp->if_snd);
1661 1.1 dyoung
1662 1.162 riastrad if_initialize(ifp);
1663 1.186 msaitoh sc->ipq = if_percpuq_create(&sc->osdep.ec.ec_if);
1664 1.51 msaitoh /*
1665 1.51 msaitoh * We use per TX queue softint, so if_deferred_start_init() isn't
1666 1.51 msaitoh * used.
1667 1.51 msaitoh */
1668 1.1 dyoung
1669 1.186 msaitoh sc->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1670 1.1 dyoung
1671 1.1 dyoung /*
1672 1.1 dyoung * Tell the upper layer(s) we support long frames.
1673 1.1 dyoung */
1674 1.3 msaitoh ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1675 1.3 msaitoh
1676 1.58 msaitoh /* Set capability flags */
1677 1.58 msaitoh ifp->if_capabilities |= IFCAP_HWCSUM
1678 1.114 msaitoh | IFCAP_TSOv4
1679 1.114 msaitoh | IFCAP_TSOv6;
1680 1.3 msaitoh ifp->if_capenable = 0;
1681 1.1 dyoung
1682 1.120 msaitoh ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER
1683 1.120 msaitoh | ETHERCAP_VLAN_HWTAGGING
1684 1.58 msaitoh | ETHERCAP_VLAN_HWCSUM
1685 1.58 msaitoh | ETHERCAP_JUMBO_MTU
1686 1.58 msaitoh | ETHERCAP_VLAN_MTU;
1687 1.58 msaitoh
1688 1.58 msaitoh /* Enable the above capabilities by default */
1689 1.3 msaitoh ec->ec_capenable = ec->ec_capabilities;
1690 1.1 dyoung
1691 1.194 yamaguch ether_ifattach(ifp, sc->hw.mac.addr);
1692 1.194 yamaguch aprint_normal_dev(dev, "Ethernet address %s\n",
1693 1.194 yamaguch ether_sprintf(sc->hw.mac.addr));
1694 1.194 yamaguch ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1695 1.194 yamaguch
1696 1.3 msaitoh /* Don't enable LRO by default */
1697 1.107 msaitoh #if 0
1698 1.107 msaitoh /* NetBSD doesn't support LRO yet */
1699 1.3 msaitoh ifp->if_capabilities |= IFCAP_LRO;
1700 1.21 msaitoh #endif
1701 1.3 msaitoh
1702 1.3 msaitoh /*
1703 1.1 dyoung * Specify the media types supported by this adapter and register
1704 1.1 dyoung * callbacks to update media and link information
1705 1.1 dyoung */
1706 1.186 msaitoh ec->ec_ifmedia = &sc->media;
1707 1.186 msaitoh ifmedia_init_with_lock(&sc->media, IFM_IMASK, ixv_media_change,
1708 1.186 msaitoh ixv_media_status, &sc->core_mtx);
1709 1.186 msaitoh ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1710 1.186 msaitoh ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1711 1.1 dyoung
1712 1.101 ozaki if_register(ifp);
1713 1.101 ozaki
1714 1.73 msaitoh return 0;
1715 1.58 msaitoh } /* ixv_setup_interface */
1716 1.58 msaitoh
1717 1.58 msaitoh
1718 1.58 msaitoh /************************************************************************
1719 1.58 msaitoh * ixv_initialize_transmit_units - Enable transmit unit.
1720 1.58 msaitoh ************************************************************************/
1721 1.21 msaitoh static void
1722 1.186 msaitoh ixv_initialize_transmit_units(struct ixgbe_softc *sc)
1723 1.1 dyoung {
1724 1.186 msaitoh struct tx_ring *txr = sc->tx_rings;
1725 1.186 msaitoh struct ixgbe_hw *hw = &sc->hw;
1726 1.91 msaitoh int i;
1727 1.1 dyoung
1728 1.186 msaitoh for (i = 0; i < sc->num_queues; i++, txr++) {
1729 1.58 msaitoh u64 tdba = txr->txdma.dma_paddr;
1730 1.58 msaitoh u32 txctrl, txdctl;
1731 1.91 msaitoh int j = txr->me;
1732 1.1 dyoung
1733 1.21 msaitoh /* Set WTHRESH to 8, burst writeback */
1734 1.91 msaitoh txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1735 1.196 msaitoh txdctl &= ~IXGBE_TXDCTL_WTHRESH_MASK;
1736 1.168 msaitoh txdctl |= IXGBE_TX_WTHRESH << IXGBE_TXDCTL_WTHRESH_SHIFT;
1737 1.91 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1738 1.1 dyoung
1739 1.21 msaitoh /* Set the HW Tx Head and Tail indices */
1740 1.186 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDH(j), 0);
1741 1.186 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDT(j), 0);
1742 1.1 dyoung
1743 1.21 msaitoh /* Set Tx Tail register */
1744 1.91 msaitoh txr->tail = IXGBE_VFTDT(j);
1745 1.1 dyoung
1746 1.100 msaitoh txr->txr_no_space = false;
1747 1.100 msaitoh
1748 1.21 msaitoh /* Set Ring parameters */
1749 1.91 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1750 1.57 msaitoh (tdba & 0x00000000ffffffffULL));
1751 1.91 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1752 1.91 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1753 1.186 msaitoh sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1754 1.91 msaitoh txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1755 1.21 msaitoh txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1756 1.91 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1757 1.1 dyoung
1758 1.21 msaitoh /* Now enable */
1759 1.91 msaitoh txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1760 1.21 msaitoh txdctl |= IXGBE_TXDCTL_ENABLE;
1761 1.91 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1762 1.1 dyoung }
1763 1.1 dyoung
1764 1.21 msaitoh return;
1765 1.58 msaitoh } /* ixv_initialize_transmit_units */
1766 1.58 msaitoh
1767 1.58 msaitoh
1768 1.58 msaitoh /************************************************************************
1769 1.58 msaitoh * ixv_initialize_rss_mapping
1770 1.58 msaitoh ************************************************************************/
1771 1.58 msaitoh static void
1772 1.186 msaitoh ixv_initialize_rss_mapping(struct ixgbe_softc *sc)
1773 1.58 msaitoh {
1774 1.186 msaitoh struct ixgbe_hw *hw = &sc->hw;
1775 1.114 msaitoh u32 reta = 0, mrqc, rss_key[10];
1776 1.114 msaitoh int queue_id;
1777 1.114 msaitoh int i, j;
1778 1.114 msaitoh u32 rss_hash_config;
1779 1.58 msaitoh
1780 1.78 knakahar /* force use default RSS key. */
1781 1.78 knakahar #ifdef __NetBSD__
1782 1.78 knakahar rss_getkey((uint8_t *) &rss_key);
1783 1.78 knakahar #else
1784 1.186 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS) {
1785 1.58 msaitoh /* Fetch the configured RSS key */
1786 1.58 msaitoh rss_getkey((uint8_t *)&rss_key);
1787 1.58 msaitoh } else {
1788 1.58 msaitoh /* set up random bits */
1789 1.58 msaitoh cprng_fast(&rss_key, sizeof(rss_key));
1790 1.58 msaitoh }
1791 1.78 knakahar #endif
1792 1.58 msaitoh
1793 1.58 msaitoh /* Now fill out hash function seeds */
1794 1.58 msaitoh for (i = 0; i < 10; i++)
1795 1.58 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1796 1.58 msaitoh
1797 1.58 msaitoh /* Set up the redirection table */
1798 1.58 msaitoh for (i = 0, j = 0; i < 64; i++, j++) {
1799 1.186 msaitoh if (j == sc->num_queues)
1800 1.58 msaitoh j = 0;
1801 1.1 dyoung
1802 1.186 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS) {
1803 1.58 msaitoh /*
1804 1.58 msaitoh * Fetch the RSS bucket id for the given indirection
1805 1.58 msaitoh * entry. Cap it at the number of configured buckets
1806 1.58 msaitoh * (which is num_queues.)
1807 1.58 msaitoh */
1808 1.58 msaitoh queue_id = rss_get_indirection_to_bucket(i);
1809 1.186 msaitoh queue_id = queue_id % sc->num_queues;
1810 1.58 msaitoh } else
1811 1.58 msaitoh queue_id = j;
1812 1.1 dyoung
1813 1.58 msaitoh /*
1814 1.58 msaitoh * The low 8 bits are for hash value (n+0);
1815 1.58 msaitoh * The next 8 bits are for hash value (n+1), etc.
1816 1.58 msaitoh */
1817 1.58 msaitoh reta >>= 8;
1818 1.58 msaitoh reta |= ((uint32_t)queue_id) << 24;
1819 1.58 msaitoh if ((i & 3) == 3) {
1820 1.58 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1821 1.58 msaitoh reta = 0;
1822 1.58 msaitoh }
1823 1.58 msaitoh }
1824 1.21 msaitoh
1825 1.58 msaitoh /* Perform hash on these packet types */
1826 1.186 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS)
1827 1.58 msaitoh rss_hash_config = rss_gethashconfig();
1828 1.58 msaitoh else {
1829 1.58 msaitoh /*
1830 1.58 msaitoh * Disable UDP - IP fragments aren't currently being handled
1831 1.58 msaitoh * and so we end up with a mix of 2-tuple and 4-tuple
1832 1.58 msaitoh * traffic.
1833 1.58 msaitoh */
1834 1.58 msaitoh rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1835 1.114 msaitoh | RSS_HASHTYPE_RSS_TCP_IPV4
1836 1.114 msaitoh | RSS_HASHTYPE_RSS_IPV6
1837 1.114 msaitoh | RSS_HASHTYPE_RSS_TCP_IPV6;
1838 1.58 msaitoh }
1839 1.58 msaitoh
1840 1.58 msaitoh mrqc = IXGBE_MRQC_RSSEN;
1841 1.58 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1842 1.58 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1843 1.58 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1844 1.58 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1845 1.58 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1846 1.58 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1847 1.58 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1848 1.58 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1849 1.58 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1850 1.186 msaitoh device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX "
1851 1.182 msaitoh "defined, but not supported\n", __func__);
1852 1.58 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1853 1.186 msaitoh device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX "
1854 1.182 msaitoh "defined, but not supported\n", __func__);
1855 1.58 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1856 1.58 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1857 1.58 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1858 1.58 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1859 1.58 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1860 1.186 msaitoh device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX "
1861 1.182 msaitoh "defined, but not supported\n", __func__);
1862 1.58 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1863 1.58 msaitoh } /* ixv_initialize_rss_mapping */
1864 1.58 msaitoh
1865 1.58 msaitoh
1866 1.58 msaitoh /************************************************************************
1867 1.58 msaitoh * ixv_initialize_receive_units - Setup receive registers and features.
1868 1.58 msaitoh ************************************************************************/
1869 1.21 msaitoh static void
1870 1.186 msaitoh ixv_initialize_receive_units(struct ixgbe_softc *sc)
1871 1.1 dyoung {
1872 1.186 msaitoh struct rx_ring *rxr = sc->rx_rings;
1873 1.186 msaitoh struct ixgbe_hw *hw = &sc->hw;
1874 1.186 msaitoh struct ifnet *ifp = sc->ifp;
1875 1.117 msaitoh u32 bufsz, psrtype;
1876 1.1 dyoung
1877 1.23 msaitoh if (ifp->if_mtu > ETHERMTU)
1878 1.23 msaitoh bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1879 1.23 msaitoh else
1880 1.23 msaitoh bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1881 1.1 dyoung
1882 1.58 msaitoh psrtype = IXGBE_PSRTYPE_TCPHDR
1883 1.114 msaitoh | IXGBE_PSRTYPE_UDPHDR
1884 1.114 msaitoh | IXGBE_PSRTYPE_IPV4HDR
1885 1.114 msaitoh | IXGBE_PSRTYPE_IPV6HDR
1886 1.114 msaitoh | IXGBE_PSRTYPE_L2HDR;
1887 1.58 msaitoh
1888 1.186 msaitoh if (sc->num_queues > 1)
1889 1.58 msaitoh psrtype |= 1 << 29;
1890 1.1 dyoung
1891 1.23 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1892 1.23 msaitoh
1893 1.26 msaitoh /* Tell PF our max_frame size */
1894 1.186 msaitoh if (ixgbevf_rlpml_set_vf(hw, sc->max_frame_size) != 0) {
1895 1.186 msaitoh device_printf(sc->dev, "There is a problem with the PF "
1896 1.182 msaitoh "setup. It is likely the receive unit for this VF will "
1897 1.182 msaitoh "not function correctly.\n");
1898 1.58 msaitoh }
1899 1.1 dyoung
1900 1.186 msaitoh for (int i = 0; i < sc->num_queues; i++, rxr++) {
1901 1.1 dyoung u64 rdba = rxr->rxdma.dma_paddr;
1902 1.1 dyoung u32 reg, rxdctl;
1903 1.91 msaitoh int j = rxr->me;
1904 1.1 dyoung
1905 1.23 msaitoh /* Disable the queue */
1906 1.91 msaitoh rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1907 1.28 msaitoh rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1908 1.91 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1909 1.91 msaitoh for (int k = 0; k < 10; k++) {
1910 1.91 msaitoh if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1911 1.23 msaitoh IXGBE_RXDCTL_ENABLE)
1912 1.23 msaitoh msec_delay(1);
1913 1.23 msaitoh else
1914 1.23 msaitoh break;
1915 1.23 msaitoh }
1916 1.143 msaitoh IXGBE_WRITE_BARRIER(hw);
1917 1.1 dyoung /* Setup the Base and Length of the Rx Descriptor Ring */
1918 1.91 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1919 1.1 dyoung (rdba & 0x00000000ffffffffULL));
1920 1.91 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1921 1.91 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1922 1.186 msaitoh sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1923 1.1 dyoung
1924 1.23 msaitoh /* Reset the ring indices */
1925 1.23 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1926 1.23 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1927 1.23 msaitoh
1928 1.1 dyoung /* Set up the SRRCTL register */
1929 1.91 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1930 1.1 dyoung reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1931 1.1 dyoung reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1932 1.1 dyoung reg |= bufsz;
1933 1.21 msaitoh reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1934 1.91 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1935 1.1 dyoung
1936 1.23 msaitoh /* Capture Rx Tail index */
1937 1.21 msaitoh rxr->tail = IXGBE_VFRDT(rxr->me);
1938 1.21 msaitoh
1939 1.21 msaitoh /* Do the queue enabling last */
1940 1.28 msaitoh rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1941 1.91 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1942 1.21 msaitoh for (int k = 0; k < 10; k++) {
1943 1.91 msaitoh if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1944 1.21 msaitoh IXGBE_RXDCTL_ENABLE)
1945 1.21 msaitoh break;
1946 1.58 msaitoh msec_delay(1);
1947 1.21 msaitoh }
1948 1.143 msaitoh IXGBE_WRITE_BARRIER(hw);
1949 1.24 msaitoh
1950 1.24 msaitoh /* Set the Tail Pointer */
1951 1.88 msaitoh #ifdef DEV_NETMAP
1952 1.25 msaitoh /*
1953 1.25 msaitoh * In netmap mode, we must preserve the buffers made
1954 1.25 msaitoh * available to userspace before the if_init()
1955 1.25 msaitoh * (this is true by default on the TX side, because
1956 1.25 msaitoh * init makes all buffers available to userspace).
1957 1.25 msaitoh *
1958 1.25 msaitoh * netmap_reset() and the device specific routines
1959 1.25 msaitoh * (e.g. ixgbe_setup_receive_rings()) map these
1960 1.25 msaitoh * buffers at the end of the NIC ring, so here we
1961 1.25 msaitoh * must set the RDT (tail) register to make sure
1962 1.25 msaitoh * they are not overwritten.
1963 1.25 msaitoh *
1964 1.25 msaitoh * In this driver the NIC ring starts at RDH = 0,
1965 1.25 msaitoh * RDT points to the last slot available for reception (?),
1966 1.25 msaitoh * so RDT = num_rx_desc - 1 means the whole ring is available.
1967 1.25 msaitoh */
1968 1.186 msaitoh if ((sc->feat_en & IXGBE_FEATURE_NETMAP) &&
1969 1.58 msaitoh (ifp->if_capenable & IFCAP_NETMAP)) {
1970 1.186 msaitoh struct netmap_adapter *na = NA(sc->ifp);
1971 1.117 msaitoh struct netmap_kring *kring = na->rx_rings[i];
1972 1.25 msaitoh int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1973 1.25 msaitoh
1974 1.25 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1975 1.25 msaitoh } else
1976 1.25 msaitoh #endif /* DEV_NETMAP */
1977 1.25 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1978 1.186 msaitoh sc->num_rx_desc - 1);
1979 1.1 dyoung }
1980 1.1 dyoung
1981 1.186 msaitoh if (sc->hw.mac.type >= ixgbe_mac_X550_vf)
1982 1.186 msaitoh ixv_initialize_rss_mapping(sc);
1983 1.58 msaitoh } /* ixv_initialize_receive_units */
1984 1.1 dyoung
1985 1.58 msaitoh /************************************************************************
1986 1.83 msaitoh * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function
1987 1.83 msaitoh *
1988 1.83 msaitoh * Retrieves the TDH value from the hardware
1989 1.83 msaitoh ************************************************************************/
1990 1.113 msaitoh static int
1991 1.83 msaitoh ixv_sysctl_tdh_handler(SYSCTLFN_ARGS)
1992 1.83 msaitoh {
1993 1.83 msaitoh struct sysctlnode node = *rnode;
1994 1.83 msaitoh struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1995 1.83 msaitoh uint32_t val;
1996 1.83 msaitoh
1997 1.83 msaitoh if (!txr)
1998 1.83 msaitoh return (0);
1999 1.83 msaitoh
2000 1.186 msaitoh val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_VFTDH(txr->me));
2001 1.83 msaitoh node.sysctl_data = &val;
2002 1.83 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2003 1.83 msaitoh } /* ixv_sysctl_tdh_handler */
2004 1.83 msaitoh
2005 1.83 msaitoh /************************************************************************
2006 1.83 msaitoh * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2007 1.83 msaitoh *
2008 1.83 msaitoh * Retrieves the TDT value from the hardware
2009 1.83 msaitoh ************************************************************************/
2010 1.113 msaitoh static int
2011 1.83 msaitoh ixv_sysctl_tdt_handler(SYSCTLFN_ARGS)
2012 1.83 msaitoh {
2013 1.83 msaitoh struct sysctlnode node = *rnode;
2014 1.83 msaitoh struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2015 1.83 msaitoh uint32_t val;
2016 1.83 msaitoh
2017 1.83 msaitoh if (!txr)
2018 1.83 msaitoh return (0);
2019 1.83 msaitoh
2020 1.186 msaitoh val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_VFTDT(txr->me));
2021 1.83 msaitoh node.sysctl_data = &val;
2022 1.83 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2023 1.83 msaitoh } /* ixv_sysctl_tdt_handler */
2024 1.83 msaitoh
2025 1.83 msaitoh /************************************************************************
2026 1.99 msaitoh * ixv_sysctl_next_to_check_handler - Receive Descriptor next to check
2027 1.99 msaitoh * handler function
2028 1.99 msaitoh *
2029 1.99 msaitoh * Retrieves the next_to_check value
2030 1.99 msaitoh ************************************************************************/
2031 1.113 msaitoh static int
2032 1.99 msaitoh ixv_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2033 1.99 msaitoh {
2034 1.99 msaitoh struct sysctlnode node = *rnode;
2035 1.99 msaitoh struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2036 1.99 msaitoh uint32_t val;
2037 1.99 msaitoh
2038 1.99 msaitoh if (!rxr)
2039 1.99 msaitoh return (0);
2040 1.99 msaitoh
2041 1.99 msaitoh val = rxr->next_to_check;
2042 1.99 msaitoh node.sysctl_data = &val;
2043 1.99 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2044 1.99 msaitoh } /* ixv_sysctl_next_to_check_handler */
2045 1.99 msaitoh
2046 1.99 msaitoh /************************************************************************
2047 1.164 msaitoh * ixv_sysctl_next_to_refresh_handler - Receive Descriptor next to refresh
2048 1.164 msaitoh * handler function
2049 1.164 msaitoh *
2050 1.164 msaitoh * Retrieves the next_to_refresh value
2051 1.164 msaitoh ************************************************************************/
2052 1.164 msaitoh static int
2053 1.164 msaitoh ixv_sysctl_next_to_refresh_handler(SYSCTLFN_ARGS)
2054 1.164 msaitoh {
2055 1.164 msaitoh struct sysctlnode node = *rnode;
2056 1.164 msaitoh struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2057 1.186 msaitoh struct ixgbe_softc *sc;
2058 1.164 msaitoh uint32_t val;
2059 1.164 msaitoh
2060 1.164 msaitoh if (!rxr)
2061 1.164 msaitoh return (0);
2062 1.164 msaitoh
2063 1.186 msaitoh sc = rxr->sc;
2064 1.186 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2065 1.164 msaitoh return (EPERM);
2066 1.164 msaitoh
2067 1.164 msaitoh val = rxr->next_to_refresh;
2068 1.164 msaitoh node.sysctl_data = &val;
2069 1.164 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2070 1.164 msaitoh } /* ixv_sysctl_next_to_refresh_handler */
2071 1.164 msaitoh
2072 1.164 msaitoh /************************************************************************
2073 1.83 msaitoh * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function
2074 1.83 msaitoh *
2075 1.83 msaitoh * Retrieves the RDH value from the hardware
2076 1.83 msaitoh ************************************************************************/
2077 1.113 msaitoh static int
2078 1.83 msaitoh ixv_sysctl_rdh_handler(SYSCTLFN_ARGS)
2079 1.83 msaitoh {
2080 1.83 msaitoh struct sysctlnode node = *rnode;
2081 1.83 msaitoh struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2082 1.83 msaitoh uint32_t val;
2083 1.83 msaitoh
2084 1.83 msaitoh if (!rxr)
2085 1.83 msaitoh return (0);
2086 1.83 msaitoh
2087 1.186 msaitoh val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_VFRDH(rxr->me));
2088 1.83 msaitoh node.sysctl_data = &val;
2089 1.83 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2090 1.83 msaitoh } /* ixv_sysctl_rdh_handler */
2091 1.83 msaitoh
2092 1.83 msaitoh /************************************************************************
2093 1.83 msaitoh * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function
2094 1.83 msaitoh *
2095 1.83 msaitoh * Retrieves the RDT value from the hardware
2096 1.83 msaitoh ************************************************************************/
2097 1.113 msaitoh static int
2098 1.83 msaitoh ixv_sysctl_rdt_handler(SYSCTLFN_ARGS)
2099 1.83 msaitoh {
2100 1.83 msaitoh struct sysctlnode node = *rnode;
2101 1.83 msaitoh struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2102 1.83 msaitoh uint32_t val;
2103 1.83 msaitoh
2104 1.83 msaitoh if (!rxr)
2105 1.83 msaitoh return (0);
2106 1.83 msaitoh
2107 1.186 msaitoh val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_VFRDT(rxr->me));
2108 1.83 msaitoh node.sysctl_data = &val;
2109 1.83 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2110 1.83 msaitoh } /* ixv_sysctl_rdt_handler */
2111 1.83 msaitoh
2112 1.126 msaitoh static void
2113 1.186 msaitoh ixv_setup_vlan_tagging(struct ixgbe_softc *sc)
2114 1.1 dyoung {
2115 1.186 msaitoh struct ethercom *ec = &sc->osdep.ec;
2116 1.186 msaitoh struct ixgbe_hw *hw = &sc->hw;
2117 1.114 msaitoh struct rx_ring *rxr;
2118 1.126 msaitoh u32 ctrl;
2119 1.126 msaitoh int i;
2120 1.110 msaitoh bool hwtagging;
2121 1.1 dyoung
2122 1.111 msaitoh /* Enable HW tagging only if any vlan is attached */
2123 1.110 msaitoh hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2124 1.111 msaitoh && VLAN_ATTACHED(ec);
2125 1.1 dyoung
2126 1.1 dyoung /* Enable the queues */
2127 1.186 msaitoh for (i = 0; i < sc->num_queues; i++) {
2128 1.186 msaitoh rxr = &sc->rx_rings[i];
2129 1.65 msaitoh ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
2130 1.110 msaitoh if (hwtagging)
2131 1.110 msaitoh ctrl |= IXGBE_RXDCTL_VME;
2132 1.110 msaitoh else
2133 1.110 msaitoh ctrl &= ~IXGBE_RXDCTL_VME;
2134 1.65 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
2135 1.26 msaitoh /*
2136 1.26 msaitoh * Let Rx path know that it needs to store VLAN tag
2137 1.26 msaitoh * as part of extra mbuf info.
2138 1.26 msaitoh */
2139 1.110 msaitoh rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2140 1.1 dyoung }
2141 1.126 msaitoh } /* ixv_setup_vlan_tagging */
2142 1.126 msaitoh
2143 1.126 msaitoh /************************************************************************
2144 1.126 msaitoh * ixv_setup_vlan_support
2145 1.126 msaitoh ************************************************************************/
2146 1.126 msaitoh static int
2147 1.186 msaitoh ixv_setup_vlan_support(struct ixgbe_softc *sc)
2148 1.126 msaitoh {
2149 1.186 msaitoh struct ethercom *ec = &sc->osdep.ec;
2150 1.186 msaitoh struct ixgbe_hw *hw = &sc->hw;
2151 1.126 msaitoh u32 vid, vfta, retry;
2152 1.126 msaitoh struct vlanid_list *vlanidp;
2153 1.126 msaitoh int rv, error = 0;
2154 1.1 dyoung
2155 1.126 msaitoh /*
2156 1.126 msaitoh * This function is called from both if_init and ifflags_cb()
2157 1.126 msaitoh * on NetBSD.
2158 1.126 msaitoh */
2159 1.126 msaitoh
2160 1.126 msaitoh /*
2161 1.126 msaitoh * Part 1:
2162 1.126 msaitoh * Setup VLAN HW tagging
2163 1.126 msaitoh */
2164 1.186 msaitoh ixv_setup_vlan_tagging(sc);
2165 1.126 msaitoh
2166 1.126 msaitoh if (!VLAN_ATTACHED(ec))
2167 1.120 msaitoh return 0;
2168 1.120 msaitoh
2169 1.126 msaitoh /*
2170 1.126 msaitoh * Part 2:
2171 1.126 msaitoh * Setup VLAN HW filter
2172 1.126 msaitoh */
2173 1.120 msaitoh /* Cleanup shadow_vfta */
2174 1.65 msaitoh for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
2175 1.186 msaitoh sc->shadow_vfta[i] = 0;
2176 1.120 msaitoh /* Generate shadow_vfta from ec_vids */
2177 1.127 msaitoh ETHER_LOCK(ec);
2178 1.120 msaitoh SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2179 1.120 msaitoh uint32_t idx;
2180 1.120 msaitoh
2181 1.120 msaitoh idx = vlanidp->vid / 32;
2182 1.120 msaitoh KASSERT(idx < IXGBE_VFTA_SIZE);
2183 1.186 msaitoh sc->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2184 1.120 msaitoh }
2185 1.127 msaitoh ETHER_UNLOCK(ec);
2186 1.142 msaitoh
2187 1.1 dyoung /*
2188 1.58 msaitoh * A soft reset zero's out the VFTA, so
2189 1.58 msaitoh * we need to repopulate it now.
2190 1.58 msaitoh */
2191 1.21 msaitoh for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
2192 1.186 msaitoh if (sc->shadow_vfta[i] == 0)
2193 1.1 dyoung continue;
2194 1.186 msaitoh vfta = sc->shadow_vfta[i];
2195 1.1 dyoung /*
2196 1.58 msaitoh * Reconstruct the vlan id's
2197 1.58 msaitoh * based on the bits set in each
2198 1.58 msaitoh * of the array ints.
2199 1.58 msaitoh */
2200 1.26 msaitoh for (int j = 0; j < 32; j++) {
2201 1.1 dyoung retry = 0;
2202 1.123 msaitoh if ((vfta & ((u32)1 << j)) == 0)
2203 1.1 dyoung continue;
2204 1.1 dyoung vid = (i * 32) + j;
2205 1.142 msaitoh
2206 1.1 dyoung /* Call the shared code mailbox routine */
2207 1.120 msaitoh while ((rv = hw->mac.ops.set_vfta(hw, vid, 0, TRUE,
2208 1.120 msaitoh FALSE)) != 0) {
2209 1.120 msaitoh if (++retry > 5) {
2210 1.186 msaitoh device_printf(sc->dev,
2211 1.120 msaitoh "%s: max retry exceeded\n",
2212 1.120 msaitoh __func__);
2213 1.1 dyoung break;
2214 1.120 msaitoh }
2215 1.120 msaitoh }
2216 1.120 msaitoh if (rv != 0) {
2217 1.186 msaitoh device_printf(sc->dev,
2218 1.120 msaitoh "failed to set vlan %d\n", vid);
2219 1.120 msaitoh error = EACCES;
2220 1.1 dyoung }
2221 1.1 dyoung }
2222 1.1 dyoung }
2223 1.120 msaitoh return error;
2224 1.58 msaitoh } /* ixv_setup_vlan_support */
2225 1.1 dyoung
2226 1.120 msaitoh static int
2227 1.120 msaitoh ixv_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2228 1.120 msaitoh {
2229 1.120 msaitoh struct ifnet *ifp = &ec->ec_if;
2230 1.186 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
2231 1.120 msaitoh int rv;
2232 1.120 msaitoh
2233 1.120 msaitoh if (set)
2234 1.186 msaitoh rv = ixv_register_vlan(sc, vid);
2235 1.120 msaitoh else
2236 1.186 msaitoh rv = ixv_unregister_vlan(sc, vid);
2237 1.120 msaitoh
2238 1.126 msaitoh if (rv != 0)
2239 1.126 msaitoh return rv;
2240 1.126 msaitoh
2241 1.126 msaitoh /*
2242 1.126 msaitoh * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2243 1.126 msaitoh * or 0 to 1.
2244 1.126 msaitoh */
2245 1.126 msaitoh if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2246 1.186 msaitoh ixv_setup_vlan_tagging(sc);
2247 1.126 msaitoh
2248 1.120 msaitoh return rv;
2249 1.120 msaitoh }
2250 1.120 msaitoh
2251 1.58 msaitoh /************************************************************************
2252 1.58 msaitoh * ixv_register_vlan
2253 1.58 msaitoh *
2254 1.58 msaitoh * Run via a vlan config EVENT, it enables us to use the
2255 1.58 msaitoh * HW Filter table since we can get the vlan id. This just
2256 1.58 msaitoh * creates the entry in the soft version of the VFTA, init
2257 1.58 msaitoh * will repopulate the real table.
2258 1.58 msaitoh ************************************************************************/
2259 1.120 msaitoh static int
2260 1.186 msaitoh ixv_register_vlan(struct ixgbe_softc *sc, u16 vtag)
2261 1.1 dyoung {
2262 1.186 msaitoh struct ixgbe_hw *hw = &sc->hw;
2263 1.1 dyoung u16 index, bit;
2264 1.120 msaitoh int error;
2265 1.1 dyoung
2266 1.26 msaitoh if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2267 1.120 msaitoh return EINVAL;
2268 1.186 msaitoh IXGBE_CORE_LOCK(sc);
2269 1.1 dyoung index = (vtag >> 5) & 0x7F;
2270 1.1 dyoung bit = vtag & 0x1F;
2271 1.186 msaitoh sc->shadow_vfta[index] |= ((u32)1 << bit);
2272 1.120 msaitoh error = hw->mac.ops.set_vfta(hw, vtag, 0, true, false);
2273 1.186 msaitoh IXGBE_CORE_UNLOCK(sc);
2274 1.120 msaitoh
2275 1.120 msaitoh if (error != 0) {
2276 1.186 msaitoh device_printf(sc->dev, "failed to register vlan %hu\n", vtag);
2277 1.120 msaitoh error = EACCES;
2278 1.120 msaitoh }
2279 1.120 msaitoh return error;
2280 1.58 msaitoh } /* ixv_register_vlan */
2281 1.1 dyoung
2282 1.58 msaitoh /************************************************************************
2283 1.58 msaitoh * ixv_unregister_vlan
2284 1.58 msaitoh *
2285 1.58 msaitoh * Run via a vlan unconfig EVENT, remove our entry
2286 1.58 msaitoh * in the soft vfta.
2287 1.58 msaitoh ************************************************************************/
2288 1.120 msaitoh static int
2289 1.186 msaitoh ixv_unregister_vlan(struct ixgbe_softc *sc, u16 vtag)
2290 1.1 dyoung {
2291 1.186 msaitoh struct ixgbe_hw *hw = &sc->hw;
2292 1.1 dyoung u16 index, bit;
2293 1.142 msaitoh int error;
2294 1.1 dyoung
2295 1.58 msaitoh if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2296 1.120 msaitoh return EINVAL;
2297 1.1 dyoung
2298 1.186 msaitoh IXGBE_CORE_LOCK(sc);
2299 1.1 dyoung index = (vtag >> 5) & 0x7F;
2300 1.1 dyoung bit = vtag & 0x1F;
2301 1.186 msaitoh sc->shadow_vfta[index] &= ~((u32)1 << bit);
2302 1.120 msaitoh error = hw->mac.ops.set_vfta(hw, vtag, 0, false, false);
2303 1.186 msaitoh IXGBE_CORE_UNLOCK(sc);
2304 1.120 msaitoh
2305 1.120 msaitoh if (error != 0) {
2306 1.186 msaitoh device_printf(sc->dev, "failed to unregister vlan %hu\n",
2307 1.120 msaitoh vtag);
2308 1.120 msaitoh error = EIO;
2309 1.120 msaitoh }
2310 1.120 msaitoh return error;
2311 1.58 msaitoh } /* ixv_unregister_vlan */
2312 1.1 dyoung
2313 1.58 msaitoh /************************************************************************
2314 1.58 msaitoh * ixv_enable_intr
2315 1.58 msaitoh ************************************************************************/
2316 1.1 dyoung static void
2317 1.186 msaitoh ixv_enable_intr(struct ixgbe_softc *sc)
2318 1.1 dyoung {
2319 1.186 msaitoh struct ixgbe_hw *hw = &sc->hw;
2320 1.186 msaitoh struct ix_queue *que = sc->queues;
2321 1.114 msaitoh u32 mask;
2322 1.68 msaitoh int i;
2323 1.1 dyoung
2324 1.68 msaitoh /* For VTEIAC */
2325 1.186 msaitoh mask = (1 << sc->vector);
2326 1.186 msaitoh for (i = 0; i < sc->num_queues; i++, que++)
2327 1.68 msaitoh mask |= (1 << que->msix);
2328 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
2329 1.1 dyoung
2330 1.68 msaitoh /* For VTEIMS */
2331 1.186 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << sc->vector));
2332 1.186 msaitoh que = sc->queues;
2333 1.186 msaitoh for (i = 0; i < sc->num_queues; i++, que++)
2334 1.186 msaitoh ixv_enable_queue(sc, que->msix);
2335 1.1 dyoung
2336 1.1 dyoung IXGBE_WRITE_FLUSH(hw);
2337 1.58 msaitoh } /* ixv_enable_intr */
2338 1.1 dyoung
2339 1.58 msaitoh /************************************************************************
2340 1.58 msaitoh * ixv_disable_intr
2341 1.58 msaitoh ************************************************************************/
2342 1.1 dyoung static void
2343 1.186 msaitoh ixv_disable_intr(struct ixgbe_softc *sc)
2344 1.1 dyoung {
2345 1.186 msaitoh struct ix_queue *que = sc->queues;
2346 1.82 knakahar
2347 1.186 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIAC, 0);
2348 1.82 knakahar
2349 1.82 knakahar /* disable interrupts other than queues */
2350 1.186 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIMC, sc->vector);
2351 1.82 knakahar
2352 1.186 msaitoh for (int i = 0; i < sc->num_queues; i++, que++)
2353 1.186 msaitoh ixv_disable_queue(sc, que->msix);
2354 1.82 knakahar
2355 1.186 msaitoh IXGBE_WRITE_FLUSH(&sc->hw);
2356 1.58 msaitoh } /* ixv_disable_intr */
2357 1.1 dyoung
2358 1.58 msaitoh /************************************************************************
2359 1.58 msaitoh * ixv_set_ivar
2360 1.58 msaitoh *
2361 1.58 msaitoh * Setup the correct IVAR register for a particular MSI-X interrupt
2362 1.58 msaitoh * - entry is the register array entry
2363 1.58 msaitoh * - vector is the MSI-X vector for this queue
2364 1.58 msaitoh * - type is RX/TX/MISC
2365 1.58 msaitoh ************************************************************************/
2366 1.1 dyoung static void
2367 1.186 msaitoh ixv_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
2368 1.1 dyoung {
2369 1.186 msaitoh struct ixgbe_hw *hw = &sc->hw;
2370 1.114 msaitoh u32 ivar, index;
2371 1.1 dyoung
2372 1.1 dyoung vector |= IXGBE_IVAR_ALLOC_VAL;
2373 1.1 dyoung
2374 1.1 dyoung if (type == -1) { /* MISC IVAR */
2375 1.1 dyoung ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
2376 1.1 dyoung ivar &= ~0xFF;
2377 1.1 dyoung ivar |= vector;
2378 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
2379 1.114 msaitoh } else { /* RX/TX IVARS */
2380 1.1 dyoung index = (16 * (entry & 1)) + (8 * type);
2381 1.1 dyoung ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
2382 1.121 msaitoh ivar &= ~(0xffUL << index);
2383 1.121 msaitoh ivar |= ((u32)vector << index);
2384 1.1 dyoung IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
2385 1.1 dyoung }
2386 1.58 msaitoh } /* ixv_set_ivar */
2387 1.1 dyoung
2388 1.58 msaitoh /************************************************************************
2389 1.58 msaitoh * ixv_configure_ivars
2390 1.58 msaitoh ************************************************************************/
2391 1.1 dyoung static void
2392 1.186 msaitoh ixv_configure_ivars(struct ixgbe_softc *sc)
2393 1.1 dyoung {
2394 1.186 msaitoh struct ix_queue *que = sc->queues;
2395 1.1 dyoung
2396 1.80 msaitoh /* XXX We should sync EITR value calculation with ixgbe.c? */
2397 1.80 msaitoh
2398 1.186 msaitoh for (int i = 0; i < sc->num_queues; i++, que++) {
2399 1.1 dyoung /* First the RX queue entry */
2400 1.186 msaitoh ixv_set_ivar(sc, i, que->msix, 0);
2401 1.1 dyoung /* ... and the TX */
2402 1.186 msaitoh ixv_set_ivar(sc, i, que->msix, 1);
2403 1.1 dyoung /* Set an initial value in EITR */
2404 1.186 msaitoh ixv_eitr_write(sc, que->msix, IXGBE_EITR_DEFAULT);
2405 1.1 dyoung }
2406 1.1 dyoung
2407 1.21 msaitoh /* For the mailbox interrupt */
2408 1.186 msaitoh ixv_set_ivar(sc, 1, sc->vector, -1);
2409 1.58 msaitoh } /* ixv_configure_ivars */
2410 1.1 dyoung
2411 1.1 dyoung
2412 1.58 msaitoh /************************************************************************
2413 1.176 msaitoh * ixv_init_stats
2414 1.58 msaitoh *
2415 1.58 msaitoh * The VF stats registers never have a truly virgin
2416 1.176 msaitoh * starting point, so this routine save initial vaules to
2417 1.176 msaitoh * last_<REGNAME>.
2418 1.58 msaitoh ************************************************************************/
2419 1.1 dyoung static void
2420 1.186 msaitoh ixv_init_stats(struct ixgbe_softc *sc)
2421 1.1 dyoung {
2422 1.186 msaitoh struct ixgbe_hw *hw = &sc->hw;
2423 1.57 msaitoh
2424 1.186 msaitoh sc->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2425 1.186 msaitoh sc->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2426 1.186 msaitoh sc->stats.vf.last_vfgorc |=
2427 1.1 dyoung (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2428 1.1 dyoung
2429 1.186 msaitoh sc->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2430 1.186 msaitoh sc->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2431 1.186 msaitoh sc->stats.vf.last_vfgotc |=
2432 1.1 dyoung (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2433 1.1 dyoung
2434 1.186 msaitoh sc->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2435 1.58 msaitoh } /* ixv_init_stats */
2436 1.1 dyoung
2437 1.1 dyoung #define UPDATE_STAT_32(reg, last, count) \
2438 1.114 msaitoh { \
2439 1.58 msaitoh u32 current = IXGBE_READ_REG(hw, (reg)); \
2440 1.178 msaitoh IXGBE_EVC_ADD(&count, current - (last)); \
2441 1.58 msaitoh (last) = current; \
2442 1.1 dyoung }
2443 1.1 dyoung
2444 1.178 msaitoh #define UPDATE_STAT_36(lsb, msb, last, count) \
2445 1.178 msaitoh { \
2446 1.178 msaitoh u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \
2447 1.178 msaitoh u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \
2448 1.178 msaitoh u64 current = ((cur_msb << 32) | cur_lsb); \
2449 1.178 msaitoh if (current < (last)) \
2450 1.178 msaitoh IXGBE_EVC_ADD(&count, current + __BIT(36) - (last)); \
2451 1.178 msaitoh else \
2452 1.178 msaitoh IXGBE_EVC_ADD(&count, current - (last)); \
2453 1.178 msaitoh (last) = current; \
2454 1.1 dyoung }
2455 1.1 dyoung
2456 1.58 msaitoh /************************************************************************
2457 1.58 msaitoh * ixv_update_stats - Update the board statistics counters.
2458 1.58 msaitoh ************************************************************************/
2459 1.1 dyoung void
2460 1.186 msaitoh ixv_update_stats(struct ixgbe_softc *sc)
2461 1.1 dyoung {
2462 1.186 msaitoh struct ixgbe_hw *hw = &sc->hw;
2463 1.186 msaitoh struct ixgbevf_hw_stats *stats = &sc->stats.vf;
2464 1.1 dyoung
2465 1.88 msaitoh UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
2466 1.88 msaitoh UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
2467 1.88 msaitoh UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
2468 1.58 msaitoh stats->vfgorc);
2469 1.88 msaitoh UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
2470 1.58 msaitoh stats->vfgotc);
2471 1.88 msaitoh UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
2472 1.58 msaitoh
2473 1.146 msaitoh /* VF doesn't count errors by hardware */
2474 1.146 msaitoh
2475 1.58 msaitoh } /* ixv_update_stats */
2476 1.1 dyoung
2477 1.83 msaitoh /************************************************************************
2478 1.83 msaitoh * ixv_sysctl_interrupt_rate_handler
2479 1.83 msaitoh ************************************************************************/
2480 1.83 msaitoh static int
2481 1.83 msaitoh ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
2482 1.83 msaitoh {
2483 1.83 msaitoh struct sysctlnode node = *rnode;
2484 1.83 msaitoh struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
2485 1.186 msaitoh struct ixgbe_softc *sc = que->sc;
2486 1.83 msaitoh uint32_t reg, usec, rate;
2487 1.83 msaitoh int error;
2488 1.83 msaitoh
2489 1.83 msaitoh if (que == NULL)
2490 1.83 msaitoh return 0;
2491 1.186 msaitoh reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_VTEITR(que->msix));
2492 1.83 msaitoh usec = ((reg & 0x0FF8) >> 3);
2493 1.83 msaitoh if (usec > 0)
2494 1.83 msaitoh rate = 500000 / usec;
2495 1.83 msaitoh else
2496 1.83 msaitoh rate = 0;
2497 1.83 msaitoh node.sysctl_data = &rate;
2498 1.83 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
2499 1.83 msaitoh if (error || newp == NULL)
2500 1.83 msaitoh return error;
2501 1.83 msaitoh reg &= ~0xfff; /* default, no limitation */
2502 1.83 msaitoh if (rate > 0 && rate < 500000) {
2503 1.83 msaitoh if (rate < 1000)
2504 1.83 msaitoh rate = 1000;
2505 1.148 msaitoh reg |= ((4000000 / rate) & 0xff8);
2506 1.83 msaitoh /*
2507 1.83 msaitoh * When RSC is used, ITR interval must be larger than
2508 1.83 msaitoh * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
2509 1.83 msaitoh * The minimum value is always greater than 2us on 100M
2510 1.83 msaitoh * (and 10M?(not documented)), but it's not on 1G and higher.
2511 1.83 msaitoh */
2512 1.186 msaitoh if ((sc->link_speed != IXGBE_LINK_SPEED_100_FULL)
2513 1.186 msaitoh && (sc->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2514 1.186 msaitoh if ((sc->num_queues > 1)
2515 1.83 msaitoh && (reg < IXGBE_MIN_RSC_EITR_10G1G))
2516 1.83 msaitoh return EINVAL;
2517 1.83 msaitoh }
2518 1.191 msaitoh sc->max_interrupt_rate = rate;
2519 1.83 msaitoh } else
2520 1.191 msaitoh sc->max_interrupt_rate = 0;
2521 1.186 msaitoh ixv_eitr_write(sc, que->msix, reg);
2522 1.83 msaitoh
2523 1.83 msaitoh return (0);
2524 1.83 msaitoh } /* ixv_sysctl_interrupt_rate_handler */
2525 1.83 msaitoh
2526 1.3 msaitoh const struct sysctlnode *
2527 1.186 msaitoh ixv_sysctl_instance(struct ixgbe_softc *sc)
2528 1.3 msaitoh {
2529 1.3 msaitoh const char *dvname;
2530 1.3 msaitoh struct sysctllog **log;
2531 1.3 msaitoh int rc;
2532 1.3 msaitoh const struct sysctlnode *rnode;
2533 1.3 msaitoh
2534 1.186 msaitoh log = &sc->sysctllog;
2535 1.186 msaitoh dvname = device_xname(sc->dev);
2536 1.3 msaitoh
2537 1.3 msaitoh if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2538 1.3 msaitoh 0, CTLTYPE_NODE, dvname,
2539 1.3 msaitoh SYSCTL_DESCR("ixv information and settings"),
2540 1.3 msaitoh NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2541 1.3 msaitoh goto err;
2542 1.3 msaitoh
2543 1.3 msaitoh return rnode;
2544 1.3 msaitoh err:
2545 1.186 msaitoh device_printf(sc->dev,
2546 1.129 msaitoh "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2547 1.3 msaitoh return NULL;
2548 1.3 msaitoh }
2549 1.48 msaitoh
2550 1.48 msaitoh static void
2551 1.186 msaitoh ixv_add_device_sysctls(struct ixgbe_softc *sc)
2552 1.48 msaitoh {
2553 1.48 msaitoh struct sysctllog **log;
2554 1.48 msaitoh const struct sysctlnode *rnode, *cnode;
2555 1.48 msaitoh device_t dev;
2556 1.48 msaitoh
2557 1.186 msaitoh dev = sc->dev;
2558 1.186 msaitoh log = &sc->sysctllog;
2559 1.48 msaitoh
2560 1.186 msaitoh if ((rnode = ixv_sysctl_instance(sc)) == NULL) {
2561 1.48 msaitoh aprint_error_dev(dev, "could not create sysctl root\n");
2562 1.48 msaitoh return;
2563 1.48 msaitoh }
2564 1.48 msaitoh
2565 1.48 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
2566 1.158 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT, "debug",
2567 1.158 msaitoh SYSCTL_DESCR("Debug Info"),
2568 1.186 msaitoh ixv_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL) != 0)
2569 1.48 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
2570 1.48 msaitoh
2571 1.48 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
2572 1.163 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT,
2573 1.163 msaitoh "rx_copy_len", SYSCTL_DESCR("RX Copy Length"),
2574 1.163 msaitoh ixv_sysctl_rx_copy_len, 0,
2575 1.186 msaitoh (void *)sc, 0, CTL_CREATE, CTL_EOL) != 0)
2576 1.163 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
2577 1.163 msaitoh
2578 1.163 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
2579 1.180 msaitoh CTLFLAG_READONLY, CTLTYPE_INT,
2580 1.180 msaitoh "num_tx_desc", SYSCTL_DESCR("Number of TX descriptors"),
2581 1.186 msaitoh NULL, 0, &sc->num_tx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
2582 1.180 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
2583 1.180 msaitoh
2584 1.180 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
2585 1.180 msaitoh CTLFLAG_READONLY, CTLTYPE_INT,
2586 1.180 msaitoh "num_rx_desc", SYSCTL_DESCR("Number of RX descriptors"),
2587 1.186 msaitoh NULL, 0, &sc->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
2588 1.180 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
2589 1.180 msaitoh
2590 1.180 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
2591 1.179 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT, "rx_process_limit",
2592 1.179 msaitoh SYSCTL_DESCR("max number of RX packets to process"),
2593 1.186 msaitoh ixv_sysctl_rx_process_limit, 0, (void *)sc, 0, CTL_CREATE,
2594 1.179 msaitoh CTL_EOL) != 0)
2595 1.179 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
2596 1.179 msaitoh
2597 1.179 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
2598 1.179 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT, "tx_process_limit",
2599 1.179 msaitoh SYSCTL_DESCR("max number of TX packets to process"),
2600 1.186 msaitoh ixv_sysctl_tx_process_limit, 0, (void *)sc, 0, CTL_CREATE,
2601 1.179 msaitoh CTL_EOL) != 0)
2602 1.179 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
2603 1.179 msaitoh
2604 1.179 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
2605 1.158 msaitoh CTLFLAG_READWRITE, CTLTYPE_BOOL, "enable_aim",
2606 1.158 msaitoh SYSCTL_DESCR("Interrupt Moderation"),
2607 1.186 msaitoh NULL, 0, &sc->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
2608 1.48 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
2609 1.84 knakahar
2610 1.84 knakahar if (sysctl_createv(log, 0, &rnode, &cnode,
2611 1.158 msaitoh CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
2612 1.158 msaitoh SYSCTL_DESCR("Use workqueue for packet processing"),
2613 1.186 msaitoh NULL, 0, &sc->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL)
2614 1.158 msaitoh != 0)
2615 1.84 knakahar aprint_error_dev(dev, "could not create sysctl\n");
2616 1.48 msaitoh }
2617 1.48 msaitoh
2618 1.58 msaitoh /************************************************************************
2619 1.58 msaitoh * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
2620 1.58 msaitoh ************************************************************************/
2621 1.48 msaitoh static void
2622 1.186 msaitoh ixv_add_stats_sysctls(struct ixgbe_softc *sc)
2623 1.48 msaitoh {
2624 1.186 msaitoh device_t dev = sc->dev;
2625 1.186 msaitoh struct tx_ring *txr = sc->tx_rings;
2626 1.186 msaitoh struct rx_ring *rxr = sc->rx_rings;
2627 1.186 msaitoh struct ixgbevf_hw_stats *stats = &sc->stats.vf;
2628 1.186 msaitoh struct ixgbe_hw *hw = &sc->hw;
2629 1.83 msaitoh const struct sysctlnode *rnode, *cnode;
2630 1.186 msaitoh struct sysctllog **log = &sc->sysctllog;
2631 1.48 msaitoh const char *xname = device_xname(dev);
2632 1.48 msaitoh
2633 1.48 msaitoh /* Driver Statistics */
2634 1.186 msaitoh evcnt_attach_dynamic(&sc->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
2635 1.49 msaitoh NULL, xname, "Driver tx dma soft fail EFBIG");
2636 1.186 msaitoh evcnt_attach_dynamic(&sc->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2637 1.48 msaitoh NULL, xname, "m_defrag() failed");
2638 1.186 msaitoh evcnt_attach_dynamic(&sc->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
2639 1.49 msaitoh NULL, xname, "Driver tx dma hard fail EFBIG");
2640 1.186 msaitoh evcnt_attach_dynamic(&sc->einval_tx_dma_setup, EVCNT_TYPE_MISC,
2641 1.49 msaitoh NULL, xname, "Driver tx dma hard fail EINVAL");
2642 1.186 msaitoh evcnt_attach_dynamic(&sc->other_tx_dma_setup, EVCNT_TYPE_MISC,
2643 1.49 msaitoh NULL, xname, "Driver tx dma hard fail other");
2644 1.186 msaitoh evcnt_attach_dynamic(&sc->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
2645 1.49 msaitoh NULL, xname, "Driver tx dma soft fail EAGAIN");
2646 1.186 msaitoh evcnt_attach_dynamic(&sc->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
2647 1.49 msaitoh NULL, xname, "Driver tx dma soft fail ENOMEM");
2648 1.186 msaitoh evcnt_attach_dynamic(&sc->watchdog_events, EVCNT_TYPE_MISC,
2649 1.48 msaitoh NULL, xname, "Watchdog timeouts");
2650 1.186 msaitoh evcnt_attach_dynamic(&sc->tso_err, EVCNT_TYPE_MISC,
2651 1.49 msaitoh NULL, xname, "TSO errors");
2652 1.186 msaitoh evcnt_attach_dynamic(&sc->admin_irqev, EVCNT_TYPE_INTR,
2653 1.151 msaitoh NULL, xname, "Admin MSI-X IRQ Handled");
2654 1.186 msaitoh evcnt_attach_dynamic(&sc->link_workev, EVCNT_TYPE_INTR,
2655 1.151 msaitoh NULL, xname, "Admin event");
2656 1.49 msaitoh
2657 1.186 msaitoh for (int i = 0; i < sc->num_queues; i++, rxr++, txr++) {
2658 1.184 msaitoh #ifdef LRO
2659 1.184 msaitoh struct lro_ctrl *lro = &rxr->lro;
2660 1.184 msaitoh #endif
2661 1.184 msaitoh
2662 1.186 msaitoh snprintf(sc->queues[i].evnamebuf,
2663 1.186 msaitoh sizeof(sc->queues[i].evnamebuf), "%s q%d", xname, i);
2664 1.186 msaitoh snprintf(sc->queues[i].namebuf,
2665 1.186 msaitoh sizeof(sc->queues[i].namebuf), "q%d", i);
2666 1.49 msaitoh
2667 1.186 msaitoh if ((rnode = ixv_sysctl_instance(sc)) == NULL) {
2668 1.182 msaitoh aprint_error_dev(dev,
2669 1.182 msaitoh "could not create sysctl root\n");
2670 1.49 msaitoh break;
2671 1.49 msaitoh }
2672 1.49 msaitoh
2673 1.49 msaitoh if (sysctl_createv(log, 0, &rnode, &rnode,
2674 1.49 msaitoh 0, CTLTYPE_NODE,
2675 1.186 msaitoh sc->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
2676 1.49 msaitoh NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
2677 1.49 msaitoh break;
2678 1.49 msaitoh
2679 1.49 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
2680 1.49 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT,
2681 1.49 msaitoh "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
2682 1.83 msaitoh ixv_sysctl_interrupt_rate_handler, 0,
2683 1.186 msaitoh (void *)&sc->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
2684 1.49 msaitoh break;
2685 1.49 msaitoh
2686 1.49 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
2687 1.49 msaitoh CTLFLAG_READONLY, CTLTYPE_INT,
2688 1.49 msaitoh "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
2689 1.83 msaitoh ixv_sysctl_tdh_handler, 0, (void *)txr,
2690 1.49 msaitoh 0, CTL_CREATE, CTL_EOL) != 0)
2691 1.49 msaitoh break;
2692 1.49 msaitoh
2693 1.49 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
2694 1.49 msaitoh CTLFLAG_READONLY, CTLTYPE_INT,
2695 1.49 msaitoh "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
2696 1.83 msaitoh ixv_sysctl_tdt_handler, 0, (void *)txr,
2697 1.49 msaitoh 0, CTL_CREATE, CTL_EOL) != 0)
2698 1.49 msaitoh break;
2699 1.83 msaitoh
2700 1.49 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
2701 1.158 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxck",
2702 1.158 msaitoh SYSCTL_DESCR("Receive Descriptor next to check"),
2703 1.158 msaitoh ixv_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
2704 1.99 msaitoh CTL_CREATE, CTL_EOL) != 0)
2705 1.99 msaitoh break;
2706 1.99 msaitoh
2707 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
2708 1.164 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxrf",
2709 1.164 msaitoh SYSCTL_DESCR("Receive Descriptor next to refresh"),
2710 1.164 msaitoh ixv_sysctl_next_to_refresh_handler, 0, (void *)rxr, 0,
2711 1.164 msaitoh CTL_CREATE, CTL_EOL) != 0)
2712 1.164 msaitoh break;
2713 1.164 msaitoh
2714 1.164 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
2715 1.158 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, "rxd_head",
2716 1.158 msaitoh SYSCTL_DESCR("Receive Descriptor Head"),
2717 1.83 msaitoh ixv_sysctl_rdh_handler, 0, (void *)rxr, 0,
2718 1.49 msaitoh CTL_CREATE, CTL_EOL) != 0)
2719 1.49 msaitoh break;
2720 1.49 msaitoh
2721 1.49 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
2722 1.158 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, "rxd_tail",
2723 1.158 msaitoh SYSCTL_DESCR("Receive Descriptor Tail"),
2724 1.83 msaitoh ixv_sysctl_rdt_handler, 0, (void *)rxr, 0,
2725 1.49 msaitoh CTL_CREATE, CTL_EOL) != 0)
2726 1.49 msaitoh break;
2727 1.49 msaitoh
2728 1.186 msaitoh evcnt_attach_dynamic(&sc->queues[i].irqs, EVCNT_TYPE_INTR,
2729 1.186 msaitoh NULL, sc->queues[i].evnamebuf, "IRQs on queue");
2730 1.186 msaitoh evcnt_attach_dynamic(&sc->queues[i].handleq,
2731 1.186 msaitoh EVCNT_TYPE_MISC, NULL, sc->queues[i].evnamebuf,
2732 1.184 msaitoh "Handled queue in softint");
2733 1.186 msaitoh evcnt_attach_dynamic(&sc->queues[i].req, EVCNT_TYPE_MISC,
2734 1.186 msaitoh NULL, sc->queues[i].evnamebuf, "Requeued in softint");
2735 1.184 msaitoh evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
2736 1.186 msaitoh NULL, sc->queues[i].evnamebuf,
2737 1.184 msaitoh "Queue Packets Transmitted");
2738 1.184 msaitoh #ifndef IXGBE_LEGACY_TX
2739 1.184 msaitoh evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
2740 1.186 msaitoh NULL, sc->queues[i].evnamebuf,
2741 1.184 msaitoh "Packets dropped in pcq");
2742 1.184 msaitoh #endif
2743 1.184 msaitoh evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
2744 1.186 msaitoh NULL, sc->queues[i].evnamebuf,
2745 1.184 msaitoh "TX Queue No Descriptor Available");
2746 1.184 msaitoh evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
2747 1.186 msaitoh NULL, sc->queues[i].evnamebuf, "TSO");
2748 1.184 msaitoh
2749 1.49 msaitoh evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
2750 1.186 msaitoh NULL, sc->queues[i].evnamebuf,
2751 1.158 msaitoh "Queue Bytes Received");
2752 1.184 msaitoh evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2753 1.186 msaitoh NULL, sc->queues[i].evnamebuf,
2754 1.184 msaitoh "Queue Packets Received");
2755 1.166 msaitoh evcnt_attach_dynamic(&rxr->no_mbuf, EVCNT_TYPE_MISC,
2756 1.186 msaitoh NULL, sc->queues[i].evnamebuf, "Rx no mbuf");
2757 1.49 msaitoh evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2758 1.186 msaitoh NULL, sc->queues[i].evnamebuf, "Rx discarded");
2759 1.184 msaitoh evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2760 1.186 msaitoh NULL, sc->queues[i].evnamebuf, "Copied RX Frames");
2761 1.49 msaitoh #ifdef LRO
2762 1.49 msaitoh SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2763 1.49 msaitoh CTLFLAG_RD, &lro->lro_queued, 0,
2764 1.49 msaitoh "LRO Queued");
2765 1.49 msaitoh SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2766 1.49 msaitoh CTLFLAG_RD, &lro->lro_flushed, 0,
2767 1.49 msaitoh "LRO Flushed");
2768 1.49 msaitoh #endif /* LRO */
2769 1.49 msaitoh }
2770 1.49 msaitoh
2771 1.58 msaitoh /* MAC stats get their own sub node */
2772 1.49 msaitoh
2773 1.49 msaitoh snprintf(stats->namebuf,
2774 1.49 msaitoh sizeof(stats->namebuf), "%s MAC Statistics", xname);
2775 1.49 msaitoh
2776 1.49 msaitoh evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2777 1.49 msaitoh stats->namebuf, "rx csum offload - IP");
2778 1.49 msaitoh evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2779 1.49 msaitoh stats->namebuf, "rx csum offload - L4");
2780 1.49 msaitoh evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2781 1.49 msaitoh stats->namebuf, "rx csum offload - IP bad");
2782 1.49 msaitoh evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2783 1.49 msaitoh stats->namebuf, "rx csum offload - L4 bad");
2784 1.48 msaitoh
2785 1.49 msaitoh /* Packet Reception Stats */
2786 1.48 msaitoh evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2787 1.48 msaitoh xname, "Good Packets Received");
2788 1.48 msaitoh evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2789 1.48 msaitoh xname, "Good Octets Received");
2790 1.48 msaitoh evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2791 1.48 msaitoh xname, "Multicast Packets Received");
2792 1.48 msaitoh evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2793 1.48 msaitoh xname, "Good Packets Transmitted");
2794 1.48 msaitoh evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2795 1.48 msaitoh xname, "Good Octets Transmitted");
2796 1.67 msaitoh
2797 1.67 msaitoh /* Mailbox Stats */
2798 1.67 msaitoh evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
2799 1.67 msaitoh xname, "message TXs");
2800 1.67 msaitoh evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
2801 1.67 msaitoh xname, "message RXs");
2802 1.67 msaitoh evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
2803 1.67 msaitoh xname, "ACKs");
2804 1.67 msaitoh evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
2805 1.67 msaitoh xname, "REQs");
2806 1.67 msaitoh evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
2807 1.67 msaitoh xname, "RSTs");
2808 1.67 msaitoh
2809 1.58 msaitoh } /* ixv_add_stats_sysctls */
2810 1.48 msaitoh
2811 1.131 msaitoh static void
2812 1.186 msaitoh ixv_clear_evcnt(struct ixgbe_softc *sc)
2813 1.131 msaitoh {
2814 1.186 msaitoh struct tx_ring *txr = sc->tx_rings;
2815 1.186 msaitoh struct rx_ring *rxr = sc->rx_rings;
2816 1.186 msaitoh struct ixgbevf_hw_stats *stats = &sc->stats.vf;
2817 1.186 msaitoh struct ixgbe_hw *hw = &sc->hw;
2818 1.131 msaitoh int i;
2819 1.131 msaitoh
2820 1.131 msaitoh /* Driver Statistics */
2821 1.186 msaitoh IXGBE_EVC_STORE(&sc->efbig_tx_dma_setup, 0);
2822 1.186 msaitoh IXGBE_EVC_STORE(&sc->mbuf_defrag_failed, 0);
2823 1.186 msaitoh IXGBE_EVC_STORE(&sc->efbig2_tx_dma_setup, 0);
2824 1.186 msaitoh IXGBE_EVC_STORE(&sc->einval_tx_dma_setup, 0);
2825 1.186 msaitoh IXGBE_EVC_STORE(&sc->other_tx_dma_setup, 0);
2826 1.186 msaitoh IXGBE_EVC_STORE(&sc->eagain_tx_dma_setup, 0);
2827 1.186 msaitoh IXGBE_EVC_STORE(&sc->enomem_tx_dma_setup, 0);
2828 1.186 msaitoh IXGBE_EVC_STORE(&sc->watchdog_events, 0);
2829 1.186 msaitoh IXGBE_EVC_STORE(&sc->tso_err, 0);
2830 1.186 msaitoh IXGBE_EVC_STORE(&sc->admin_irqev, 0);
2831 1.186 msaitoh IXGBE_EVC_STORE(&sc->link_workev, 0);
2832 1.186 msaitoh
2833 1.186 msaitoh for (i = 0; i < sc->num_queues; i++, rxr++, txr++) {
2834 1.186 msaitoh IXGBE_EVC_STORE(&sc->queues[i].irqs, 0);
2835 1.186 msaitoh IXGBE_EVC_STORE(&sc->queues[i].handleq, 0);
2836 1.186 msaitoh IXGBE_EVC_STORE(&sc->queues[i].req, 0);
2837 1.178 msaitoh IXGBE_EVC_STORE(&txr->total_packets, 0);
2838 1.131 msaitoh #ifndef IXGBE_LEGACY_TX
2839 1.178 msaitoh IXGBE_EVC_STORE(&txr->pcq_drops, 0);
2840 1.131 msaitoh #endif
2841 1.184 msaitoh IXGBE_EVC_STORE(&txr->no_desc_avail, 0);
2842 1.184 msaitoh IXGBE_EVC_STORE(&txr->tso_tx, 0);
2843 1.131 msaitoh txr->q_efbig_tx_dma_setup = 0;
2844 1.131 msaitoh txr->q_mbuf_defrag_failed = 0;
2845 1.131 msaitoh txr->q_efbig2_tx_dma_setup = 0;
2846 1.131 msaitoh txr->q_einval_tx_dma_setup = 0;
2847 1.131 msaitoh txr->q_other_tx_dma_setup = 0;
2848 1.131 msaitoh txr->q_eagain_tx_dma_setup = 0;
2849 1.131 msaitoh txr->q_enomem_tx_dma_setup = 0;
2850 1.131 msaitoh txr->q_tso_err = 0;
2851 1.131 msaitoh
2852 1.178 msaitoh IXGBE_EVC_STORE(&rxr->rx_packets, 0);
2853 1.178 msaitoh IXGBE_EVC_STORE(&rxr->rx_bytes, 0);
2854 1.178 msaitoh IXGBE_EVC_STORE(&rxr->rx_copies, 0);
2855 1.178 msaitoh IXGBE_EVC_STORE(&rxr->no_mbuf, 0);
2856 1.178 msaitoh IXGBE_EVC_STORE(&rxr->rx_discarded, 0);
2857 1.131 msaitoh }
2858 1.131 msaitoh
2859 1.131 msaitoh /* MAC stats get their own sub node */
2860 1.131 msaitoh
2861 1.178 msaitoh IXGBE_EVC_STORE(&stats->ipcs, 0);
2862 1.178 msaitoh IXGBE_EVC_STORE(&stats->l4cs, 0);
2863 1.178 msaitoh IXGBE_EVC_STORE(&stats->ipcs_bad, 0);
2864 1.178 msaitoh IXGBE_EVC_STORE(&stats->l4cs_bad, 0);
2865 1.131 msaitoh
2866 1.176 msaitoh /*
2867 1.176 msaitoh * Packet Reception Stats.
2868 1.176 msaitoh * Call ixv_init_stats() to save last VF counters' values.
2869 1.176 msaitoh */
2870 1.186 msaitoh ixv_init_stats(sc);
2871 1.178 msaitoh IXGBE_EVC_STORE(&stats->vfgprc, 0);
2872 1.178 msaitoh IXGBE_EVC_STORE(&stats->vfgorc, 0);
2873 1.178 msaitoh IXGBE_EVC_STORE(&stats->vfmprc, 0);
2874 1.178 msaitoh IXGBE_EVC_STORE(&stats->vfgptc, 0);
2875 1.178 msaitoh IXGBE_EVC_STORE(&stats->vfgotc, 0);
2876 1.131 msaitoh
2877 1.131 msaitoh /* Mailbox Stats */
2878 1.178 msaitoh IXGBE_EVC_STORE(&hw->mbx.stats.msgs_tx, 0);
2879 1.178 msaitoh IXGBE_EVC_STORE(&hw->mbx.stats.msgs_rx, 0);
2880 1.178 msaitoh IXGBE_EVC_STORE(&hw->mbx.stats.acks, 0);
2881 1.178 msaitoh IXGBE_EVC_STORE(&hw->mbx.stats.reqs, 0);
2882 1.178 msaitoh IXGBE_EVC_STORE(&hw->mbx.stats.rsts, 0);
2883 1.131 msaitoh
2884 1.131 msaitoh } /* ixv_clear_evcnt */
2885 1.131 msaitoh
2886 1.186 msaitoh #define PRINTQS(sc, regname) \
2887 1.175 msaitoh do { \
2888 1.186 msaitoh struct ixgbe_hw *_hw = &(sc)->hw; \
2889 1.175 msaitoh int _i; \
2890 1.175 msaitoh \
2891 1.186 msaitoh printf("%s: %s", device_xname((sc)->dev), #regname); \
2892 1.186 msaitoh for (_i = 0; _i < (sc)->num_queues; _i++) { \
2893 1.175 msaitoh printf((_i == 0) ? "\t" : " "); \
2894 1.175 msaitoh printf("%08x", IXGBE_READ_REG(_hw, \
2895 1.175 msaitoh IXGBE_##regname(_i))); \
2896 1.175 msaitoh } \
2897 1.175 msaitoh printf("\n"); \
2898 1.175 msaitoh } while (0)
2899 1.175 msaitoh
2900 1.58 msaitoh /************************************************************************
2901 1.58 msaitoh * ixv_print_debug_info
2902 1.57 msaitoh *
2903 1.58 msaitoh * Provides a way to take a look at important statistics
2904 1.58 msaitoh * maintained by the driver and hardware.
2905 1.58 msaitoh ************************************************************************/
2906 1.57 msaitoh static void
2907 1.186 msaitoh ixv_print_debug_info(struct ixgbe_softc *sc)
2908 1.57 msaitoh {
2909 1.186 msaitoh device_t dev = sc->dev;
2910 1.186 msaitoh struct ixgbe_hw *hw = &sc->hw;
2911 1.175 msaitoh int i;
2912 1.57 msaitoh
2913 1.175 msaitoh device_printf(dev, "queue:");
2914 1.186 msaitoh for (i = 0; i < sc->num_queues; i++) {
2915 1.175 msaitoh printf((i == 0) ? "\t" : " ");
2916 1.175 msaitoh printf("%8d", i);
2917 1.58 msaitoh }
2918 1.175 msaitoh printf("\n");
2919 1.186 msaitoh PRINTQS(sc, VFRDBAL);
2920 1.186 msaitoh PRINTQS(sc, VFRDBAH);
2921 1.186 msaitoh PRINTQS(sc, VFRDLEN);
2922 1.186 msaitoh PRINTQS(sc, VFSRRCTL);
2923 1.186 msaitoh PRINTQS(sc, VFRDH);
2924 1.186 msaitoh PRINTQS(sc, VFRDT);
2925 1.186 msaitoh PRINTQS(sc, VFRXDCTL);
2926 1.175 msaitoh
2927 1.175 msaitoh device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_VTEIMS));
2928 1.175 msaitoh device_printf(dev, "EIAM:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_VTEIAM));
2929 1.175 msaitoh device_printf(dev, "EIAC:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_VTEIAC));
2930 1.58 msaitoh } /* ixv_print_debug_info */
2931 1.58 msaitoh
2932 1.58 msaitoh /************************************************************************
2933 1.58 msaitoh * ixv_sysctl_debug
2934 1.58 msaitoh ************************************************************************/
2935 1.57 msaitoh static int
2936 1.57 msaitoh ixv_sysctl_debug(SYSCTLFN_ARGS)
2937 1.57 msaitoh {
2938 1.97 msaitoh struct sysctlnode node = *rnode;
2939 1.186 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
2940 1.174 msaitoh int error, result = 0;
2941 1.57 msaitoh
2942 1.57 msaitoh node.sysctl_data = &result;
2943 1.57 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
2944 1.57 msaitoh
2945 1.58 msaitoh if (error || newp == NULL)
2946 1.57 msaitoh return error;
2947 1.57 msaitoh
2948 1.97 msaitoh if (result == 1)
2949 1.186 msaitoh ixv_print_debug_info(sc);
2950 1.57 msaitoh
2951 1.57 msaitoh return 0;
2952 1.58 msaitoh } /* ixv_sysctl_debug */
2953 1.58 msaitoh
2954 1.58 msaitoh /************************************************************************
2955 1.163 msaitoh * ixv_sysctl_rx_copy_len
2956 1.163 msaitoh ************************************************************************/
2957 1.163 msaitoh static int
2958 1.163 msaitoh ixv_sysctl_rx_copy_len(SYSCTLFN_ARGS)
2959 1.163 msaitoh {
2960 1.163 msaitoh struct sysctlnode node = *rnode;
2961 1.186 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
2962 1.163 msaitoh int error;
2963 1.186 msaitoh int result = sc->rx_copy_len;
2964 1.163 msaitoh
2965 1.163 msaitoh node.sysctl_data = &result;
2966 1.163 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
2967 1.163 msaitoh
2968 1.163 msaitoh if (error || newp == NULL)
2969 1.163 msaitoh return error;
2970 1.163 msaitoh
2971 1.163 msaitoh if ((result < 0) || (result > IXGBE_RX_COPY_LEN_MAX))
2972 1.163 msaitoh return EINVAL;
2973 1.163 msaitoh
2974 1.186 msaitoh sc->rx_copy_len = result;
2975 1.163 msaitoh
2976 1.163 msaitoh return 0;
2977 1.179 msaitoh } /* ixv_sysctl_rx_copy_len */
2978 1.179 msaitoh
2979 1.179 msaitoh /************************************************************************
2980 1.179 msaitoh * ixv_sysctl_tx_process_limit
2981 1.179 msaitoh ************************************************************************/
2982 1.179 msaitoh static int
2983 1.179 msaitoh ixv_sysctl_tx_process_limit(SYSCTLFN_ARGS)
2984 1.179 msaitoh {
2985 1.179 msaitoh struct sysctlnode node = *rnode;
2986 1.186 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
2987 1.179 msaitoh int error;
2988 1.186 msaitoh int result = sc->tx_process_limit;
2989 1.179 msaitoh
2990 1.179 msaitoh node.sysctl_data = &result;
2991 1.179 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
2992 1.179 msaitoh
2993 1.179 msaitoh if (error || newp == NULL)
2994 1.179 msaitoh return error;
2995 1.179 msaitoh
2996 1.186 msaitoh if ((result <= 0) || (result > sc->num_tx_desc))
2997 1.179 msaitoh return EINVAL;
2998 1.179 msaitoh
2999 1.186 msaitoh sc->tx_process_limit = result;
3000 1.179 msaitoh
3001 1.179 msaitoh return 0;
3002 1.179 msaitoh } /* ixv_sysctl_tx_process_limit */
3003 1.179 msaitoh
3004 1.179 msaitoh /************************************************************************
3005 1.179 msaitoh * ixv_sysctl_rx_process_limit
3006 1.179 msaitoh ************************************************************************/
3007 1.179 msaitoh static int
3008 1.179 msaitoh ixv_sysctl_rx_process_limit(SYSCTLFN_ARGS)
3009 1.179 msaitoh {
3010 1.179 msaitoh struct sysctlnode node = *rnode;
3011 1.186 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
3012 1.179 msaitoh int error;
3013 1.186 msaitoh int result = sc->rx_process_limit;
3014 1.179 msaitoh
3015 1.179 msaitoh node.sysctl_data = &result;
3016 1.179 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
3017 1.179 msaitoh
3018 1.179 msaitoh if (error || newp == NULL)
3019 1.179 msaitoh return error;
3020 1.179 msaitoh
3021 1.186 msaitoh if ((result <= 0) || (result > sc->num_rx_desc))
3022 1.179 msaitoh return EINVAL;
3023 1.179 msaitoh
3024 1.186 msaitoh sc->rx_process_limit = result;
3025 1.179 msaitoh
3026 1.179 msaitoh return 0;
3027 1.179 msaitoh } /* ixv_sysctl_rx_process_limit */
3028 1.163 msaitoh
3029 1.163 msaitoh /************************************************************************
3030 1.58 msaitoh * ixv_init_device_features
3031 1.58 msaitoh ************************************************************************/
3032 1.58 msaitoh static void
3033 1.186 msaitoh ixv_init_device_features(struct ixgbe_softc *sc)
3034 1.58 msaitoh {
3035 1.186 msaitoh sc->feat_cap = IXGBE_FEATURE_NETMAP
3036 1.114 msaitoh | IXGBE_FEATURE_VF
3037 1.114 msaitoh | IXGBE_FEATURE_RSS
3038 1.114 msaitoh | IXGBE_FEATURE_LEGACY_TX;
3039 1.58 msaitoh
3040 1.58 msaitoh /* A tad short on feature flags for VFs, atm. */
3041 1.186 msaitoh switch (sc->hw.mac.type) {
3042 1.58 msaitoh case ixgbe_mac_82599_vf:
3043 1.58 msaitoh break;
3044 1.58 msaitoh case ixgbe_mac_X540_vf:
3045 1.58 msaitoh break;
3046 1.58 msaitoh case ixgbe_mac_X550_vf:
3047 1.58 msaitoh case ixgbe_mac_X550EM_x_vf:
3048 1.58 msaitoh case ixgbe_mac_X550EM_a_vf:
3049 1.186 msaitoh sc->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
3050 1.58 msaitoh break;
3051 1.58 msaitoh default:
3052 1.58 msaitoh break;
3053 1.58 msaitoh }
3054 1.57 msaitoh
3055 1.58 msaitoh /* Enabled by default... */
3056 1.58 msaitoh /* Is a virtual function (VF) */
3057 1.186 msaitoh if (sc->feat_cap & IXGBE_FEATURE_VF)
3058 1.186 msaitoh sc->feat_en |= IXGBE_FEATURE_VF;
3059 1.58 msaitoh /* Netmap */
3060 1.186 msaitoh if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
3061 1.186 msaitoh sc->feat_en |= IXGBE_FEATURE_NETMAP;
3062 1.58 msaitoh /* Receive-Side Scaling (RSS) */
3063 1.186 msaitoh if (sc->feat_cap & IXGBE_FEATURE_RSS)
3064 1.186 msaitoh sc->feat_en |= IXGBE_FEATURE_RSS;
3065 1.58 msaitoh /* Needs advanced context descriptor regardless of offloads req'd */
3066 1.186 msaitoh if (sc->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
3067 1.186 msaitoh sc->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
3068 1.58 msaitoh
3069 1.58 msaitoh /* Enabled via sysctl... */
3070 1.58 msaitoh /* Legacy (single queue) transmit */
3071 1.186 msaitoh if ((sc->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
3072 1.58 msaitoh ixv_enable_legacy_tx)
3073 1.186 msaitoh sc->feat_en |= IXGBE_FEATURE_LEGACY_TX;
3074 1.58 msaitoh } /* ixv_init_device_features */
3075 1.58 msaitoh
3076 1.58 msaitoh /************************************************************************
3077 1.58 msaitoh * ixv_shutdown - Shutdown entry point
3078 1.58 msaitoh ************************************************************************/
3079 1.57 msaitoh #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3080 1.57 msaitoh static int
3081 1.57 msaitoh ixv_shutdown(device_t dev)
3082 1.57 msaitoh {
3083 1.186 msaitoh struct ixgbe_softc *sc = device_private(dev);
3084 1.186 msaitoh IXGBE_CORE_LOCK(sc);
3085 1.186 msaitoh ixv_stop_locked(sc);
3086 1.186 msaitoh IXGBE_CORE_UNLOCK(sc);
3087 1.57 msaitoh
3088 1.57 msaitoh return (0);
3089 1.58 msaitoh } /* ixv_shutdown */
3090 1.57 msaitoh #endif
3091 1.57 msaitoh
3092 1.57 msaitoh static int
3093 1.57 msaitoh ixv_ifflags_cb(struct ethercom *ec)
3094 1.57 msaitoh {
3095 1.57 msaitoh struct ifnet *ifp = &ec->ec_if;
3096 1.186 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
3097 1.137 msaitoh u_short saved_flags;
3098 1.136 msaitoh u_short change;
3099 1.136 msaitoh int rv = 0;
3100 1.57 msaitoh
3101 1.186 msaitoh IXGBE_CORE_LOCK(sc);
3102 1.57 msaitoh
3103 1.186 msaitoh saved_flags = sc->if_flags;
3104 1.186 msaitoh change = ifp->if_flags ^ sc->if_flags;
3105 1.57 msaitoh if (change != 0)
3106 1.186 msaitoh sc->if_flags = ifp->if_flags;
3107 1.57 msaitoh
3108 1.118 msaitoh if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3109 1.118 msaitoh rv = ENETRESET;
3110 1.118 msaitoh goto out;
3111 1.137 msaitoh } else if ((change & IFF_PROMISC) != 0) {
3112 1.186 msaitoh rv = ixv_set_rxfilter(sc);
3113 1.137 msaitoh if (rv != 0) {
3114 1.137 msaitoh /* Restore previous */
3115 1.186 msaitoh sc->if_flags = saved_flags;
3116 1.137 msaitoh goto out;
3117 1.137 msaitoh }
3118 1.118 msaitoh }
3119 1.57 msaitoh
3120 1.120 msaitoh /* Check for ec_capenable. */
3121 1.186 msaitoh change = ec->ec_capenable ^ sc->ec_capenable;
3122 1.186 msaitoh sc->ec_capenable = ec->ec_capenable;
3123 1.120 msaitoh if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
3124 1.120 msaitoh | ETHERCAP_VLAN_HWFILTER)) != 0) {
3125 1.120 msaitoh rv = ENETRESET;
3126 1.120 msaitoh goto out;
3127 1.120 msaitoh }
3128 1.120 msaitoh
3129 1.120 msaitoh /*
3130 1.120 msaitoh * Special handling is not required for ETHERCAP_VLAN_MTU.
3131 1.120 msaitoh * PF's MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
3132 1.120 msaitoh */
3133 1.120 msaitoh
3134 1.65 msaitoh /* Set up VLAN support and filter */
3135 1.120 msaitoh if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
3136 1.186 msaitoh rv = ixv_setup_vlan_support(sc);
3137 1.65 msaitoh
3138 1.118 msaitoh out:
3139 1.186 msaitoh IXGBE_CORE_UNLOCK(sc);
3140 1.57 msaitoh
3141 1.118 msaitoh return rv;
3142 1.57 msaitoh }
3143 1.57 msaitoh
3144 1.58 msaitoh
3145 1.58 msaitoh /************************************************************************
3146 1.58 msaitoh * ixv_ioctl - Ioctl entry point
3147 1.57 msaitoh *
3148 1.58 msaitoh * Called when the user wants to configure the interface.
3149 1.57 msaitoh *
3150 1.58 msaitoh * return 0 on success, positive on failure
3151 1.58 msaitoh ************************************************************************/
3152 1.57 msaitoh static int
3153 1.58 msaitoh ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
3154 1.57 msaitoh {
3155 1.186 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
3156 1.186 msaitoh struct ixgbe_hw *hw = &sc->hw;
3157 1.57 msaitoh struct ifcapreq *ifcr = data;
3158 1.134 msaitoh int error;
3159 1.57 msaitoh int l4csum_en;
3160 1.113 msaitoh const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
3161 1.113 msaitoh IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
3162 1.57 msaitoh
3163 1.57 msaitoh switch (command) {
3164 1.57 msaitoh case SIOCSIFFLAGS:
3165 1.57 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
3166 1.57 msaitoh break;
3167 1.132 msaitoh case SIOCADDMULTI: {
3168 1.132 msaitoh struct ether_multi *enm;
3169 1.132 msaitoh struct ether_multistep step;
3170 1.186 msaitoh struct ethercom *ec = &sc->osdep.ec;
3171 1.134 msaitoh bool overflow = false;
3172 1.132 msaitoh int mcnt = 0;
3173 1.132 msaitoh
3174 1.132 msaitoh /*
3175 1.132 msaitoh * Check the number of multicast address. If it exceeds,
3176 1.132 msaitoh * return ENOSPC.
3177 1.132 msaitoh * Update this code when we support API 1.3.
3178 1.132 msaitoh */
3179 1.132 msaitoh ETHER_LOCK(ec);
3180 1.132 msaitoh ETHER_FIRST_MULTI(step, ec, enm);
3181 1.132 msaitoh while (enm != NULL) {
3182 1.132 msaitoh mcnt++;
3183 1.132 msaitoh
3184 1.132 msaitoh /*
3185 1.132 msaitoh * This code is before adding, so one room is required
3186 1.132 msaitoh * at least.
3187 1.132 msaitoh */
3188 1.132 msaitoh if (mcnt > (IXGBE_MAX_VF_MC - 1)) {
3189 1.134 msaitoh overflow = true;
3190 1.132 msaitoh break;
3191 1.132 msaitoh }
3192 1.132 msaitoh ETHER_NEXT_MULTI(step, enm);
3193 1.132 msaitoh }
3194 1.132 msaitoh ETHER_UNLOCK(ec);
3195 1.134 msaitoh error = 0;
3196 1.134 msaitoh if (overflow && ((ec->ec_flags & ETHER_F_ALLMULTI) == 0)) {
3197 1.134 msaitoh error = hw->mac.ops.update_xcast_mode(hw,
3198 1.134 msaitoh IXGBEVF_XCAST_MODE_ALLMULTI);
3199 1.134 msaitoh if (error == IXGBE_ERR_NOT_TRUSTED) {
3200 1.186 msaitoh device_printf(sc->dev,
3201 1.134 msaitoh "this interface is not trusted\n");
3202 1.135 msaitoh error = EPERM;
3203 1.135 msaitoh } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
3204 1.186 msaitoh device_printf(sc->dev,
3205 1.135 msaitoh "the PF doesn't support allmulti mode\n");
3206 1.135 msaitoh error = EOPNOTSUPP;
3207 1.134 msaitoh } else if (error) {
3208 1.186 msaitoh device_printf(sc->dev,
3209 1.134 msaitoh "number of Ethernet multicast addresses "
3210 1.134 msaitoh "exceeds the limit (%d). error = %d\n",
3211 1.134 msaitoh IXGBE_MAX_VF_MC, error);
3212 1.134 msaitoh error = ENOSPC;
3213 1.134 msaitoh } else
3214 1.134 msaitoh ec->ec_flags |= ETHER_F_ALLMULTI;
3215 1.134 msaitoh }
3216 1.132 msaitoh if (error)
3217 1.132 msaitoh return error;
3218 1.132 msaitoh }
3219 1.132 msaitoh /*FALLTHROUGH*/
3220 1.57 msaitoh case SIOCDELMULTI:
3221 1.57 msaitoh IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
3222 1.57 msaitoh break;
3223 1.57 msaitoh case SIOCSIFMEDIA:
3224 1.57 msaitoh case SIOCGIFMEDIA:
3225 1.57 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
3226 1.57 msaitoh break;
3227 1.57 msaitoh case SIOCSIFCAP:
3228 1.57 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
3229 1.57 msaitoh break;
3230 1.57 msaitoh case SIOCSIFMTU:
3231 1.57 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
3232 1.57 msaitoh break;
3233 1.131 msaitoh case SIOCZIFDATA:
3234 1.131 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
3235 1.186 msaitoh ixv_update_stats(sc);
3236 1.186 msaitoh ixv_clear_evcnt(sc);
3237 1.131 msaitoh break;
3238 1.57 msaitoh default:
3239 1.57 msaitoh IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
3240 1.57 msaitoh break;
3241 1.57 msaitoh }
3242 1.57 msaitoh
3243 1.57 msaitoh switch (command) {
3244 1.57 msaitoh case SIOCSIFCAP:
3245 1.57 msaitoh /* Layer-4 Rx checksum offload has to be turned on and
3246 1.57 msaitoh * off as a unit.
3247 1.57 msaitoh */
3248 1.57 msaitoh l4csum_en = ifcr->ifcr_capenable & l4csum;
3249 1.57 msaitoh if (l4csum_en != l4csum && l4csum_en != 0)
3250 1.57 msaitoh return EINVAL;
3251 1.57 msaitoh /*FALLTHROUGH*/
3252 1.57 msaitoh case SIOCADDMULTI:
3253 1.57 msaitoh case SIOCDELMULTI:
3254 1.57 msaitoh case SIOCSIFFLAGS:
3255 1.57 msaitoh case SIOCSIFMTU:
3256 1.57 msaitoh default:
3257 1.57 msaitoh if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
3258 1.57 msaitoh return error;
3259 1.57 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
3260 1.57 msaitoh ;
3261 1.57 msaitoh else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
3262 1.186 msaitoh IXGBE_CORE_LOCK(sc);
3263 1.186 msaitoh ixv_init_locked(sc);
3264 1.186 msaitoh IXGBE_CORE_UNLOCK(sc);
3265 1.57 msaitoh } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
3266 1.57 msaitoh /*
3267 1.57 msaitoh * Multicast list has changed; set the hardware filter
3268 1.57 msaitoh * accordingly.
3269 1.57 msaitoh */
3270 1.186 msaitoh IXGBE_CORE_LOCK(sc);
3271 1.186 msaitoh ixv_disable_intr(sc);
3272 1.186 msaitoh ixv_set_rxfilter(sc);
3273 1.186 msaitoh ixv_enable_intr(sc);
3274 1.186 msaitoh IXGBE_CORE_UNLOCK(sc);
3275 1.57 msaitoh }
3276 1.57 msaitoh return 0;
3277 1.57 msaitoh }
3278 1.58 msaitoh } /* ixv_ioctl */
3279 1.57 msaitoh
3280 1.58 msaitoh /************************************************************************
3281 1.58 msaitoh * ixv_init
3282 1.58 msaitoh ************************************************************************/
3283 1.57 msaitoh static int
3284 1.57 msaitoh ixv_init(struct ifnet *ifp)
3285 1.57 msaitoh {
3286 1.186 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
3287 1.57 msaitoh
3288 1.186 msaitoh IXGBE_CORE_LOCK(sc);
3289 1.186 msaitoh ixv_init_locked(sc);
3290 1.186 msaitoh IXGBE_CORE_UNLOCK(sc);
3291 1.57 msaitoh
3292 1.57 msaitoh return 0;
3293 1.58 msaitoh } /* ixv_init */
3294 1.57 msaitoh
3295 1.58 msaitoh /************************************************************************
3296 1.58 msaitoh * ixv_handle_que
3297 1.58 msaitoh ************************************************************************/
3298 1.57 msaitoh static void
3299 1.57 msaitoh ixv_handle_que(void *context)
3300 1.57 msaitoh {
3301 1.57 msaitoh struct ix_queue *que = context;
3302 1.186 msaitoh struct ixgbe_softc *sc = que->sc;
3303 1.57 msaitoh struct tx_ring *txr = que->txr;
3304 1.186 msaitoh struct ifnet *ifp = sc->ifp;
3305 1.57 msaitoh bool more;
3306 1.57 msaitoh
3307 1.178 msaitoh IXGBE_EVC_ADD(&que->handleq, 1);
3308 1.57 msaitoh
3309 1.57 msaitoh if (ifp->if_flags & IFF_RUNNING) {
3310 1.57 msaitoh IXGBE_TX_LOCK(txr);
3311 1.183 msaitoh more = ixgbe_txeof(txr);
3312 1.186 msaitoh if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX))
3313 1.58 msaitoh if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
3314 1.58 msaitoh ixgbe_mq_start_locked(ifp, txr);
3315 1.57 msaitoh /* Only for queue 0 */
3316 1.61 msaitoh /* NetBSD still needs this for CBQ */
3317 1.186 msaitoh if ((&sc->queues[0] == que)
3318 1.58 msaitoh && (!ixgbe_legacy_ring_empty(ifp, NULL)))
3319 1.58 msaitoh ixgbe_legacy_start_locked(ifp, txr);
3320 1.57 msaitoh IXGBE_TX_UNLOCK(txr);
3321 1.183 msaitoh more |= ixgbe_rxeof(que);
3322 1.57 msaitoh if (more) {
3323 1.178 msaitoh IXGBE_EVC_ADD(&que->req, 1);
3324 1.186 msaitoh if (sc->txrx_use_workqueue) {
3325 1.84 knakahar /*
3326 1.84 knakahar * "enqueued flag" is not required here
3327 1.84 knakahar * the same as ixg(4). See ixgbe_msix_que().
3328 1.84 knakahar */
3329 1.186 msaitoh workqueue_enqueue(sc->que_wq,
3330 1.84 knakahar &que->wq_cookie, curcpu());
3331 1.84 knakahar } else
3332 1.84 knakahar softint_schedule(que->que_si);
3333 1.57 msaitoh return;
3334 1.57 msaitoh }
3335 1.57 msaitoh }
3336 1.57 msaitoh
3337 1.58 msaitoh /* Re-enable this interrupt */
3338 1.186 msaitoh ixv_enable_queue(sc, que->msix);
3339 1.57 msaitoh
3340 1.57 msaitoh return;
3341 1.58 msaitoh } /* ixv_handle_que */
3342 1.57 msaitoh
3343 1.58 msaitoh /************************************************************************
3344 1.84 knakahar * ixv_handle_que_work
3345 1.84 knakahar ************************************************************************/
3346 1.84 knakahar static void
3347 1.84 knakahar ixv_handle_que_work(struct work *wk, void *context)
3348 1.84 knakahar {
3349 1.84 knakahar struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
3350 1.84 knakahar
3351 1.84 knakahar /*
3352 1.84 knakahar * "enqueued flag" is not required here the same as ixg(4).
3353 1.84 knakahar * See ixgbe_msix_que().
3354 1.84 knakahar */
3355 1.84 knakahar ixv_handle_que(que);
3356 1.84 knakahar }
3357 1.84 knakahar
3358 1.84 knakahar /************************************************************************
3359 1.58 msaitoh * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
3360 1.58 msaitoh ************************************************************************/
3361 1.57 msaitoh static int
3362 1.186 msaitoh ixv_allocate_msix(struct ixgbe_softc *sc, const struct pci_attach_args *pa)
3363 1.57 msaitoh {
3364 1.186 msaitoh device_t dev = sc->dev;
3365 1.186 msaitoh struct ix_queue *que = sc->queues;
3366 1.190 msaitoh struct tx_ring *txr = sc->tx_rings;
3367 1.114 msaitoh int error, msix_ctrl, rid, vector = 0;
3368 1.57 msaitoh pci_chipset_tag_t pc;
3369 1.57 msaitoh pcitag_t tag;
3370 1.57 msaitoh char intrbuf[PCI_INTRSTR_LEN];
3371 1.84 knakahar char wqname[MAXCOMLEN];
3372 1.57 msaitoh char intr_xname[32];
3373 1.57 msaitoh const char *intrstr = NULL;
3374 1.57 msaitoh kcpuset_t *affinity;
3375 1.57 msaitoh int cpu_id = 0;
3376 1.57 msaitoh
3377 1.186 msaitoh pc = sc->osdep.pc;
3378 1.186 msaitoh tag = sc->osdep.tag;
3379 1.57 msaitoh
3380 1.186 msaitoh sc->osdep.nintrs = sc->num_queues + 1;
3381 1.186 msaitoh if (pci_msix_alloc_exact(pa, &sc->osdep.intrs,
3382 1.186 msaitoh sc->osdep.nintrs) != 0) {
3383 1.57 msaitoh aprint_error_dev(dev,
3384 1.57 msaitoh "failed to allocate MSI-X interrupt\n");
3385 1.57 msaitoh return (ENXIO);
3386 1.57 msaitoh }
3387 1.57 msaitoh
3388 1.57 msaitoh kcpuset_create(&affinity, false);
3389 1.186 msaitoh for (int i = 0; i < sc->num_queues; i++, vector++, que++, txr++) {
3390 1.57 msaitoh snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
3391 1.57 msaitoh device_xname(dev), i);
3392 1.186 msaitoh intrstr = pci_intr_string(pc, sc->osdep.intrs[i], intrbuf,
3393 1.57 msaitoh sizeof(intrbuf));
3394 1.186 msaitoh pci_intr_setattr(pc, &sc->osdep.intrs[i], PCI_INTR_MPSAFE,
3395 1.57 msaitoh true);
3396 1.197 msaitoh
3397 1.57 msaitoh /* Set the handler function */
3398 1.186 msaitoh que->res = sc->osdep.ihs[i] = pci_intr_establish_xname(pc,
3399 1.186 msaitoh sc->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
3400 1.57 msaitoh intr_xname);
3401 1.57 msaitoh if (que->res == NULL) {
3402 1.186 msaitoh pci_intr_release(pc, sc->osdep.intrs,
3403 1.186 msaitoh sc->osdep.nintrs);
3404 1.57 msaitoh aprint_error_dev(dev,
3405 1.57 msaitoh "Failed to register QUE handler\n");
3406 1.57 msaitoh kcpuset_destroy(affinity);
3407 1.57 msaitoh return (ENXIO);
3408 1.57 msaitoh }
3409 1.57 msaitoh que->msix = vector;
3410 1.186 msaitoh sc->active_queues |= (u64)(1 << que->msix);
3411 1.57 msaitoh
3412 1.57 msaitoh cpu_id = i;
3413 1.57 msaitoh /* Round-robin affinity */
3414 1.57 msaitoh kcpuset_zero(affinity);
3415 1.57 msaitoh kcpuset_set(affinity, cpu_id % ncpu);
3416 1.186 msaitoh error = interrupt_distribute(sc->osdep.ihs[i], affinity, NULL);
3417 1.57 msaitoh aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
3418 1.57 msaitoh intrstr);
3419 1.57 msaitoh if (error == 0)
3420 1.57 msaitoh aprint_normal(", bound queue %d to cpu %d\n",
3421 1.57 msaitoh i, cpu_id % ncpu);
3422 1.57 msaitoh else
3423 1.57 msaitoh aprint_normal("\n");
3424 1.57 msaitoh
3425 1.57 msaitoh #ifndef IXGBE_LEGACY_TX
3426 1.57 msaitoh txr->txr_si
3427 1.198 msaitoh = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
3428 1.57 msaitoh ixgbe_deferred_mq_start, txr);
3429 1.57 msaitoh #endif
3430 1.57 msaitoh que->que_si
3431 1.198 msaitoh = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
3432 1.57 msaitoh ixv_handle_que, que);
3433 1.57 msaitoh if (que->que_si == NULL) {
3434 1.57 msaitoh aprint_error_dev(dev,
3435 1.113 msaitoh "could not establish software interrupt\n");
3436 1.57 msaitoh }
3437 1.57 msaitoh }
3438 1.84 knakahar snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
3439 1.186 msaitoh error = workqueue_create(&sc->txr_wq, wqname,
3440 1.186 msaitoh ixgbe_deferred_mq_start_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
3441 1.198 msaitoh WQ_PERCPU | WQ_MPSAFE);
3442 1.84 knakahar if (error) {
3443 1.158 msaitoh aprint_error_dev(dev,
3444 1.158 msaitoh "couldn't create workqueue for deferred Tx\n");
3445 1.84 knakahar }
3446 1.186 msaitoh sc->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
3447 1.84 knakahar
3448 1.84 knakahar snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
3449 1.186 msaitoh error = workqueue_create(&sc->que_wq, wqname,
3450 1.186 msaitoh ixv_handle_que_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
3451 1.198 msaitoh WQ_PERCPU | WQ_MPSAFE);
3452 1.84 knakahar if (error) {
3453 1.158 msaitoh aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
3454 1.84 knakahar }
3455 1.57 msaitoh
3456 1.57 msaitoh /* and Mailbox */
3457 1.57 msaitoh cpu_id++;
3458 1.57 msaitoh snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
3459 1.186 msaitoh sc->vector = vector;
3460 1.186 msaitoh intrstr = pci_intr_string(pc, sc->osdep.intrs[vector], intrbuf,
3461 1.57 msaitoh sizeof(intrbuf));
3462 1.197 msaitoh pci_intr_setattr(pc, &sc->osdep.intrs[vector], PCI_INTR_MPSAFE, true);
3463 1.197 msaitoh
3464 1.57 msaitoh /* Set the mbx handler function */
3465 1.186 msaitoh sc->osdep.ihs[vector] = pci_intr_establish_xname(pc,
3466 1.186 msaitoh sc->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, sc, intr_xname);
3467 1.186 msaitoh if (sc->osdep.ihs[vector] == NULL) {
3468 1.57 msaitoh aprint_error_dev(dev, "Failed to register LINK handler\n");
3469 1.57 msaitoh kcpuset_destroy(affinity);
3470 1.57 msaitoh return (ENXIO);
3471 1.57 msaitoh }
3472 1.57 msaitoh /* Round-robin affinity */
3473 1.57 msaitoh kcpuset_zero(affinity);
3474 1.57 msaitoh kcpuset_set(affinity, cpu_id % ncpu);
3475 1.186 msaitoh error = interrupt_distribute(sc->osdep.ihs[vector], affinity,
3476 1.150 msaitoh NULL);
3477 1.57 msaitoh
3478 1.57 msaitoh aprint_normal_dev(dev,
3479 1.57 msaitoh "for link, interrupting at %s", intrstr);
3480 1.57 msaitoh if (error == 0)
3481 1.57 msaitoh aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
3482 1.57 msaitoh else
3483 1.57 msaitoh aprint_normal("\n");
3484 1.57 msaitoh
3485 1.57 msaitoh /* Tasklets for Mailbox */
3486 1.151 msaitoh snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev));
3487 1.186 msaitoh error = workqueue_create(&sc->admin_wq, wqname,
3488 1.198 msaitoh ixv_handle_admin, sc, IXGBE_WORKQUEUE_PRI, IPL_NET, WQ_MPSAFE);
3489 1.151 msaitoh if (error) {
3490 1.151 msaitoh aprint_error_dev(dev,
3491 1.151 msaitoh "could not create admin workqueue (%d)\n", error);
3492 1.151 msaitoh goto err_out;
3493 1.151 msaitoh }
3494 1.151 msaitoh
3495 1.57 msaitoh /*
3496 1.58 msaitoh * Due to a broken design QEMU will fail to properly
3497 1.58 msaitoh * enable the guest for MSI-X unless the vectors in
3498 1.58 msaitoh * the table are all set up, so we must rewrite the
3499 1.58 msaitoh * ENABLE in the MSI-X control register again at this
3500 1.58 msaitoh * point to cause it to successfully initialize us.
3501 1.58 msaitoh */
3502 1.186 msaitoh if (sc->hw.mac.type == ixgbe_mac_82599_vf) {
3503 1.57 msaitoh pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
3504 1.57 msaitoh rid += PCI_MSIX_CTL;
3505 1.57 msaitoh msix_ctrl = pci_conf_read(pc, tag, rid);
3506 1.57 msaitoh msix_ctrl |= PCI_MSIX_CTL_ENABLE;
3507 1.57 msaitoh pci_conf_write(pc, tag, rid, msix_ctrl);
3508 1.57 msaitoh }
3509 1.57 msaitoh
3510 1.57 msaitoh kcpuset_destroy(affinity);
3511 1.57 msaitoh return (0);
3512 1.151 msaitoh err_out:
3513 1.151 msaitoh kcpuset_destroy(affinity);
3514 1.186 msaitoh ixv_free_deferred_handlers(sc);
3515 1.186 msaitoh ixv_free_pci_resources(sc);
3516 1.151 msaitoh return (error);
3517 1.58 msaitoh } /* ixv_allocate_msix */
3518 1.57 msaitoh
3519 1.58 msaitoh /************************************************************************
3520 1.58 msaitoh * ixv_configure_interrupts - Setup MSI-X resources
3521 1.58 msaitoh *
3522 1.58 msaitoh * Note: The VF device MUST use MSI-X, there is no fallback.
3523 1.58 msaitoh ************************************************************************/
3524 1.57 msaitoh static int
3525 1.186 msaitoh ixv_configure_interrupts(struct ixgbe_softc *sc)
3526 1.57 msaitoh {
3527 1.186 msaitoh device_t dev = sc->dev;
3528 1.57 msaitoh int want, queues, msgs;
3529 1.57 msaitoh
3530 1.58 msaitoh /* Must have at least 2 MSI-X vectors */
3531 1.186 msaitoh msgs = pci_msix_count(sc->osdep.pc, sc->osdep.tag);
3532 1.57 msaitoh if (msgs < 2) {
3533 1.63 msaitoh aprint_error_dev(dev, "MSIX config error\n");
3534 1.57 msaitoh return (ENXIO);
3535 1.57 msaitoh }
3536 1.57 msaitoh msgs = MIN(msgs, IXG_MAX_NINTR);
3537 1.57 msaitoh
3538 1.57 msaitoh /* Figure out a reasonable auto config value */
3539 1.57 msaitoh queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
3540 1.57 msaitoh
3541 1.57 msaitoh if (ixv_num_queues != 0)
3542 1.57 msaitoh queues = ixv_num_queues;
3543 1.57 msaitoh else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
3544 1.57 msaitoh queues = IXGBE_VF_MAX_TX_QUEUES;
3545 1.57 msaitoh
3546 1.57 msaitoh /*
3547 1.58 msaitoh * Want vectors for the queues,
3548 1.58 msaitoh * plus an additional for mailbox.
3549 1.58 msaitoh */
3550 1.57 msaitoh want = queues + 1;
3551 1.57 msaitoh if (msgs >= want)
3552 1.57 msaitoh msgs = want;
3553 1.57 msaitoh else {
3554 1.114 msaitoh aprint_error_dev(dev,
3555 1.58 msaitoh "MSI-X Configuration Problem, "
3556 1.182 msaitoh "%d vectors but %d queues wanted!\n", msgs, want);
3557 1.57 msaitoh return -1;
3558 1.57 msaitoh }
3559 1.57 msaitoh
3560 1.57 msaitoh aprint_normal_dev(dev,
3561 1.58 msaitoh "Using MSI-X interrupts with %d vectors\n", msgs);
3562 1.186 msaitoh sc->num_queues = queues;
3563 1.57 msaitoh
3564 1.58 msaitoh return (0);
3565 1.58 msaitoh } /* ixv_configure_interrupts */
3566 1.58 msaitoh
3567 1.58 msaitoh
3568 1.58 msaitoh /************************************************************************
3569 1.151 msaitoh * ixv_handle_admin - Tasklet handler for MSI-X MBX interrupts
3570 1.58 msaitoh *
3571 1.58 msaitoh * Done outside of interrupt context since the driver might sleep
3572 1.58 msaitoh ************************************************************************/
3573 1.57 msaitoh static void
3574 1.151 msaitoh ixv_handle_admin(struct work *wk, void *context)
3575 1.57 msaitoh {
3576 1.186 msaitoh struct ixgbe_softc *sc = context;
3577 1.186 msaitoh struct ixgbe_hw *hw = &sc->hw;
3578 1.57 msaitoh
3579 1.186 msaitoh IXGBE_CORE_LOCK(sc);
3580 1.89 knakahar
3581 1.186 msaitoh IXGBE_EVC_ADD(&sc->link_workev, 1);
3582 1.186 msaitoh sc->hw.mac.ops.check_link(&sc->hw, &sc->link_speed,
3583 1.186 msaitoh &sc->link_up, FALSE);
3584 1.186 msaitoh ixv_update_link_status(sc);
3585 1.89 knakahar
3586 1.186 msaitoh sc->task_requests = 0;
3587 1.186 msaitoh atomic_store_relaxed(&sc->admin_pending, 0);
3588 1.151 msaitoh
3589 1.151 msaitoh /* Re-enable interrupts */
3590 1.186 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << sc->vector));
3591 1.151 msaitoh
3592 1.186 msaitoh IXGBE_CORE_UNLOCK(sc);
3593 1.151 msaitoh } /* ixv_handle_admin */
3594 1.57 msaitoh
3595 1.58 msaitoh /************************************************************************
3596 1.58 msaitoh * ixv_check_link - Used in the local timer to poll for link changes
3597 1.58 msaitoh ************************************************************************/
3598 1.117 msaitoh static s32
3599 1.186 msaitoh ixv_check_link(struct ixgbe_softc *sc)
3600 1.57 msaitoh {
3601 1.117 msaitoh s32 error;
3602 1.89 knakahar
3603 1.186 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
3604 1.89 knakahar
3605 1.186 msaitoh sc->hw.mac.get_link_status = TRUE;
3606 1.57 msaitoh
3607 1.186 msaitoh error = sc->hw.mac.ops.check_link(&sc->hw,
3608 1.186 msaitoh &sc->link_speed, &sc->link_up, FALSE);
3609 1.186 msaitoh ixv_update_link_status(sc);
3610 1.117 msaitoh
3611 1.117 msaitoh return error;
3612 1.58 msaitoh } /* ixv_check_link */
3613