if_iavf.c revision 1.20 1 /* $NetBSD: if_iavf.c,v 1.20 2025/03/23 18:38:49 joe Exp $ */
2
3 /*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2016,2017 David Gwynne <dlg (at) openbsd.org>
36 * Copyright (c) 2019 Jonathan Matthew <jmatthew (at) openbsd.org>
37 *
38 * Permission to use, copy, modify, and distribute this software for any
39 * purpose with or without fee is hereby granted, provided that the above
40 * copyright notice and this permission notice appear in all copies.
41 *
42 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
43 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
44 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
45 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
46 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
47 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
48 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 */
50
51 /*
52 * Copyright (c) 2020 Internet Initiative Japan, Inc.
53 * All rights reserved.
54 *
55 * Redistribution and use in source and binary forms, with or without
56 * modification, are permitted provided that the following conditions
57 * are met:
58 * 1. Redistributions of source code must retain the above copyright
59 * notice, this list of conditions and the following disclaimer.
60 * 2. Redistributions in binary form must reproduce the above copyright
61 * notice, this list of conditions and the following disclaimer in the
62 * documentation and/or other materials provided with the distribution.
63 *
64 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
65 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
66 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
67 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
68 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
69 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
70 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
71 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
72 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
73 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
74 * POSSIBILITY OF SUCH DAMAGE.
75 */
76
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: if_iavf.c,v 1.20 2025/03/23 18:38:49 joe Exp $");
79
80 #include <sys/param.h>
81 #include <sys/types.h>
82
83 #include <sys/bitops.h>
84 #include <sys/bus.h>
85 #include <sys/cprng.h>
86 #include <sys/cpu.h>
87 #include <sys/device.h>
88 #include <sys/evcnt.h>
89 #include <sys/interrupt.h>
90 #include <sys/kmem.h>
91 #include <sys/module.h>
92 #include <sys/mutex.h>
93 #include <sys/pcq.h>
94 #include <sys/queue.h>
95 #include <sys/syslog.h>
96 #include <sys/workqueue.h>
97 #include <sys/xcall.h>
98
99 #include <net/bpf.h>
100 #include <net/if.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103 #include <net/if_ether.h>
104 #include <net/rss_config.h>
105
106 #include <netinet/tcp.h> /* for struct tcphdr */
107 #include <netinet/udp.h> /* for struct udphdr */
108
109 #include <dev/pci/pcivar.h>
110 #include <dev/pci/pcidevs.h>
111
112 #include <dev/pci/if_ixlreg.h>
113 #include <dev/pci/if_ixlvar.h>
114 #include <dev/pci/if_iavfvar.h>
115
116 #include <prop/proplib.h>
117
118 #define IAVF_PCIREG PCI_MAPREG_START
119 #define IAVF_AQ_NUM 256
120 #define IAVF_AQ_MASK (IAVF_AQ_NUM-1)
121 #define IAVF_AQ_ALIGN 64
122 #define IAVF_AQ_BUFLEN 4096
123 #define I40E_AQ_LARGE_BUF 512
124 #define IAVF_VF_MAJOR 1
125 #define IAVF_VF_MINOR 1
126
127 #define IAVF_VFR_INPROGRESS 0
128 #define IAVF_VFR_COMPLETED 1
129 #define IAVF_VFR_VFACTIVE 2
130
131 #define IAVF_REG_VFR 0xdeadbeef
132
133 #define IAVF_ITR_RX 0x0
134 #define IAVF_ITR_TX 0x1
135 #define IAVF_ITR_MISC 0x2
136 #define IAVF_NOITR 0x3
137
138 #define IAVF_MTU_ETHERLEN (ETHER_HDR_LEN \
139 + ETHER_CRC_LEN)
140 #define IAVF_MAX_MTU (9600 - IAVF_MTU_ETHERLEN)
141 #define IAVF_MIN_MTU (ETHER_MIN_LEN - ETHER_CRC_LEN)
142
143 #define IAVF_WORKQUEUE_PRI PRI_SOFTNET
144
145 #define IAVF_TX_PKT_DESCS 8
146 #define IAVF_TX_QUEUE_ALIGN 128
147 #define IAVF_RX_QUEUE_ALIGN 128
148 #define IAVF_TX_PKT_MAXSIZE (MCLBYTES * IAVF_TX_PKT_DESCS)
149 #define IAVF_MCLBYTES (MCLBYTES - ETHER_ALIGN)
150
151 #define IAVF_TICK_INTERVAL (5 * hz)
152 #define IAVF_WATCHDOG_TICKS 3
153 #define IAVF_WATCHDOG_STOP 0
154
155 #define IAVF_TXRX_PROCESS_UNLIMIT UINT_MAX
156 #define IAVF_TX_PROCESS_LIMIT 256
157 #define IAVF_RX_PROCESS_LIMIT 256
158 #define IAVF_TX_INTR_PROCESS_LIMIT 256
159 #define IAVF_RX_INTR_PROCESS_LIMIT 0U
160
161 #define IAVF_EXEC_TIMEOUT 3000
162
163 #define IAVF_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx | \
164 IFCAP_CSUM_TCPv4_Rx | \
165 IFCAP_CSUM_UDPv4_Rx | \
166 IFCAP_CSUM_TCPv6_Rx | \
167 IFCAP_CSUM_UDPv6_Rx)
168 #define IAVF_IFCAP_TXCSUM (IFCAP_CSUM_IPv4_Tx | \
169 IFCAP_CSUM_TCPv4_Tx | \
170 IFCAP_CSUM_UDPv4_Tx | \
171 IFCAP_CSUM_TCPv6_Tx | \
172 IFCAP_CSUM_UDPv6_Tx)
173 #define IAVF_CSUM_ALL_OFFLOAD (M_CSUM_IPv4 | \
174 M_CSUM_TCPv4 | M_CSUM_TCPv6 | \
175 M_CSUM_UDPv4 | M_CSUM_UDPv6)
176
177 struct iavf_softc; /* defined */
178
179 struct iavf_module_params {
180 int debug;
181 uint32_t rx_itr;
182 uint32_t tx_itr;
183 unsigned int rx_ndescs;
184 unsigned int tx_ndescs;
185 int max_qps;
186 };
187
188 struct iavf_product {
189 unsigned int vendor_id;
190 unsigned int product_id;
191 };
192
193 struct iavf_link_speed {
194 uint64_t baudrate;
195 uint64_t media;
196 };
197
198 struct iavf_aq_regs {
199 bus_size_t atq_tail;
200 bus_size_t atq_head;
201 bus_size_t atq_len;
202 bus_size_t atq_bal;
203 bus_size_t atq_bah;
204
205 bus_size_t arq_tail;
206 bus_size_t arq_head;
207 bus_size_t arq_len;
208 bus_size_t arq_bal;
209 bus_size_t arq_bah;
210
211 uint32_t atq_len_enable;
212 uint32_t atq_tail_mask;
213 uint32_t atq_head_mask;
214
215 uint32_t arq_len_enable;
216 uint32_t arq_tail_mask;
217 uint32_t arq_head_mask;
218 };
219
220 struct iavf_work {
221 struct work ixw_cookie;
222 void (*ixw_func)(void *);
223 void *ixw_arg;
224 unsigned int ixw_added;
225 };
226
227 struct iavf_tx_map {
228 struct mbuf *txm_m;
229 bus_dmamap_t txm_map;
230 unsigned int txm_eop;
231 };
232
233 struct iavf_tx_ring {
234 unsigned int txr_qid;
235 char txr_name[16];
236
237 struct iavf_softc *txr_sc;
238 kmutex_t txr_lock;
239 pcq_t *txr_intrq;
240 void *txr_si;
241 unsigned int txr_prod;
242 unsigned int txr_cons;
243
244 struct iavf_tx_map *txr_maps;
245 struct ixl_dmamem txr_mem;
246 bus_size_t txr_tail;
247
248 int txr_watchdog;
249
250 struct evcnt txr_defragged;
251 struct evcnt txr_defrag_failed;
252 struct evcnt txr_pcqdrop;
253 struct evcnt txr_transmitdef;
254 struct evcnt txr_defer;
255 struct evcnt txr_watchdogto;
256 struct evcnt txr_intr;
257 };
258
259 struct iavf_rx_map {
260 struct mbuf *rxm_m;
261 bus_dmamap_t rxm_map;
262 };
263
264 struct iavf_rx_ring {
265 unsigned int rxr_qid;
266 char rxr_name[16];
267
268 struct iavf_softc *rxr_sc;
269 kmutex_t rxr_lock;
270
271 unsigned int rxr_prod;
272 unsigned int rxr_cons;
273
274 struct iavf_rx_map *rxr_maps;
275 struct ixl_dmamem rxr_mem;
276 bus_size_t rxr_tail;
277
278 struct mbuf *rxr_m_head;
279 struct mbuf **rxr_m_tail;
280
281 struct evcnt rxr_mgethdr_failed;
282 struct evcnt rxr_mgetcl_failed;
283 struct evcnt rxr_mbuf_load_failed;
284 struct evcnt rxr_defer;
285 struct evcnt rxr_intr;
286 };
287
288 struct iavf_queue_pair {
289 struct iavf_tx_ring *qp_txr;
290 struct iavf_rx_ring *qp_rxr;
291 struct work qp_work;
292 void *qp_si;
293 bool qp_workqueue;
294 };
295
296 struct iavf_stat_counters {
297 struct evcnt isc_rx_bytes;
298 struct evcnt isc_rx_unicast;
299 struct evcnt isc_rx_multicast;
300 struct evcnt isc_rx_broadcast;
301 struct evcnt isc_rx_discards;
302 struct evcnt isc_rx_unknown_protocol;
303 struct evcnt isc_tx_bytes;
304 struct evcnt isc_tx_unicast;
305 struct evcnt isc_tx_multicast;
306 struct evcnt isc_tx_broadcast;
307 struct evcnt isc_tx_discards;
308 struct evcnt isc_tx_errors;
309 };
310
311 /*
312 * Locking notes:
313 * + A field in iavf_tx_ring is protected by txr_lock (a spin mutex), and
314 * A field in iavf_rx_ring is protected by rxr_lock (a spin mutex).
315 * - more than one lock must not be held at once.
316 * + fields named sc_atq_*, sc_arq_*, and sc_adminq_* are protected by
317 * sc_adminq_lock(a spin mutex).
318 * - The lock is held while accessing sc_aq_regs
319 * and is not held with txr_lock and rxr_lock together.
320 * + Other fields in iavf_softc is protected by sc_cfg_lock
321 * (an adaptive mutex).
322 * - The lock must be held before acquiring another lock.
323 *
324 * Locking order:
325 * - IFNET_LOCK => sc_cfg_lock => sc_adminq_lock
326 * - sc_cfg_lock => ETHER_LOCK => sc_adminq_lock
327 * - sc_cfg_lock => txr_lock
328 * - sc_cfg_lock => rxr_lock
329 */
330
331 struct iavf_softc {
332 device_t sc_dev;
333 enum i40e_mac_type sc_mac_type;
334 int sc_debuglevel;
335 bool sc_attached;
336 bool sc_dead;
337 kmutex_t sc_cfg_lock;
338 callout_t sc_tick;
339 struct ifmedia sc_media;
340 uint64_t sc_media_status;
341 uint64_t sc_media_active;
342 int sc_link_state;
343
344 const struct iavf_aq_regs *
345 sc_aq_regs;
346
347 struct ethercom sc_ec;
348 uint8_t sc_enaddr[ETHER_ADDR_LEN];
349 uint8_t sc_enaddr_fake[ETHER_ADDR_LEN];
350 uint8_t sc_enaddr_added[ETHER_ADDR_LEN];
351 uint8_t sc_enaddr_reset[ETHER_ADDR_LEN];
352 struct if_percpuq *sc_ipq;
353
354 struct pci_attach_args sc_pa;
355 bus_dma_tag_t sc_dmat;
356 bus_space_tag_t sc_memt;
357 bus_space_handle_t sc_memh;
358 bus_size_t sc_mems;
359 pci_intr_handle_t *sc_ihp;
360 void **sc_ihs;
361 unsigned int sc_nintrs;
362
363 uint32_t sc_major_ver;
364 uint32_t sc_minor_ver;
365 uint32_t sc_vf_id;
366 uint32_t sc_vf_cap;
367 uint16_t sc_vsi_id;
368 uint16_t sc_qset_handle;
369 uint16_t sc_max_mtu;
370 bool sc_got_vf_resources;
371 bool sc_got_irq_map;
372 unsigned int sc_max_vectors;
373
374 kmutex_t sc_adminq_lock;
375 kcondvar_t sc_adminq_cv;
376 struct ixl_dmamem sc_atq;
377 unsigned int sc_atq_prod;
378 unsigned int sc_atq_cons;
379 struct ixl_aq_bufs sc_atq_idle;
380 struct ixl_aq_bufs sc_atq_live;
381 struct ixl_dmamem sc_arq;
382 struct ixl_aq_bufs sc_arq_idle;
383 struct ixl_aq_bufs sc_arq_live;
384 unsigned int sc_arq_prod;
385 unsigned int sc_arq_cons;
386 struct iavf_work sc_arq_refill;
387 uint32_t sc_arq_opcode;
388 uint32_t sc_arq_retval;
389
390 uint32_t sc_tx_itr;
391 uint32_t sc_rx_itr;
392 unsigned int sc_tx_ring_ndescs;
393 unsigned int sc_rx_ring_ndescs;
394 unsigned int sc_nqueue_pairs;
395 unsigned int sc_nqps_alloc;
396 unsigned int sc_nqps_vsi;
397 unsigned int sc_nqps_req;
398 struct iavf_queue_pair *sc_qps;
399 bool sc_txrx_workqueue;
400 u_int sc_tx_intr_process_limit;
401 u_int sc_tx_process_limit;
402 u_int sc_rx_intr_process_limit;
403 u_int sc_rx_process_limit;
404
405 struct workqueue *sc_workq;
406 struct workqueue *sc_workq_txrx;
407 struct iavf_work sc_reset_task;
408 struct iavf_work sc_wdto_task;
409 struct iavf_work sc_req_queues_task;
410 bool sc_req_queues_retried;
411 bool sc_resetting;
412 bool sc_reset_up;
413
414 struct sysctllog *sc_sysctllog;
415 struct iavf_stat_counters
416 sc_stat_counters;
417 };
418
419 #define IAVF_LOG(_sc, _lvl, _fmt, _args...) \
420 do { \
421 if (!(_sc)->sc_attached) { \
422 switch (_lvl) { \
423 case LOG_ERR: \
424 case LOG_WARNING: \
425 aprint_error_dev((_sc)->sc_dev, _fmt, ##_args); \
426 break; \
427 case LOG_INFO: \
428 aprint_normal_dev((_sc)->sc_dev,_fmt, ##_args); \
429 break; \
430 case LOG_DEBUG: \
431 default: \
432 aprint_debug_dev((_sc)->sc_dev, _fmt, ##_args); \
433 } \
434 } else { \
435 struct ifnet *_ifp = &(_sc)->sc_ec.ec_if; \
436 log((_lvl), "%s: " _fmt, _ifp->if_xname, ##_args); \
437 } \
438 } while (0)
439
440 static int iavf_dmamem_alloc(bus_dma_tag_t, struct ixl_dmamem *,
441 bus_size_t, bus_size_t);
442 static void iavf_dmamem_free(bus_dma_tag_t, struct ixl_dmamem *);
443 static struct ixl_aq_buf *
444 iavf_aqb_get(struct iavf_softc *, struct ixl_aq_bufs *);
445 static struct ixl_aq_buf *
446 iavf_aqb_get_locked(struct ixl_aq_bufs *);
447 static void iavf_aqb_put_locked(struct ixl_aq_bufs *, struct ixl_aq_buf *);
448 static void iavf_aqb_clean(struct ixl_aq_bufs *, bus_dma_tag_t);
449
450 static const struct iavf_product *
451 iavf_lookup(const struct pci_attach_args *);
452 static enum i40e_mac_type
453 iavf_mactype(pci_product_id_t);
454 static void iavf_pci_csr_setup(pci_chipset_tag_t, pcitag_t);
455 static int iavf_wait_active(struct iavf_softc *);
456 static bool iavf_is_etheranyaddr(const uint8_t *);
457 static void iavf_prepare_fakeaddr(struct iavf_softc *);
458 static int iavf_replace_lla(struct ifnet *,
459 const uint8_t *, const uint8_t *);
460 static void iavf_evcnt_attach(struct evcnt *,
461 const char *, const char *);
462 static int iavf_setup_interrupts(struct iavf_softc *);
463 static void iavf_teardown_interrupts(struct iavf_softc *);
464 static int iavf_setup_sysctls(struct iavf_softc *);
465 static void iavf_teardown_sysctls(struct iavf_softc *);
466 static int iavf_setup_stats(struct iavf_softc *);
467 static void iavf_teardown_stats(struct iavf_softc *);
468 static struct workqueue *
469 iavf_workq_create(const char *, pri_t, int, int);
470 static void iavf_workq_destroy(struct workqueue *);
471 static int iavf_work_set(struct iavf_work *, void (*)(void *), void *);
472 static void iavf_work_add(struct workqueue *, struct iavf_work *);
473 static void iavf_work_wait(struct workqueue *, struct iavf_work *);
474 static unsigned int
475 iavf_calc_msix_count(struct iavf_softc *);
476 static unsigned int
477 iavf_calc_queue_pair_size(struct iavf_softc *);
478 static int iavf_queue_pairs_alloc(struct iavf_softc *);
479 static void iavf_queue_pairs_free(struct iavf_softc *);
480 static int iavf_arq_fill(struct iavf_softc *);
481 static void iavf_arq_refill(void *);
482 static int iavf_arq_poll(struct iavf_softc *, uint32_t, int);
483 static void iavf_atq_done(struct iavf_softc *);
484 static int iavf_init_admin_queue(struct iavf_softc *);
485 static void iavf_cleanup_admin_queue(struct iavf_softc *);
486 static int iavf_arq(struct iavf_softc *);
487 static int iavf_adminq_exec(struct iavf_softc *,
488 struct ixl_aq_desc *, struct ixl_aq_buf *);
489 static int iavf_adminq_poll(struct iavf_softc *,
490 struct ixl_aq_desc *, struct ixl_aq_buf *, int);
491 static int iavf_adminq_poll_locked(struct iavf_softc *,
492 struct ixl_aq_desc *, struct ixl_aq_buf *, int);
493 static int iavf_add_multi(struct iavf_softc *, uint8_t *, uint8_t *);
494 static int iavf_del_multi(struct iavf_softc *, uint8_t *, uint8_t *);
495 static void iavf_del_all_multi(struct iavf_softc *);
496
497 static int iavf_get_version(struct iavf_softc *, struct ixl_aq_buf *);
498 static int iavf_get_vf_resources(struct iavf_softc *, struct ixl_aq_buf *);
499 static int iavf_get_stats(struct iavf_softc *);
500 static int iavf_config_irq_map(struct iavf_softc *, struct ixl_aq_buf *);
501 static int iavf_config_vsi_queues(struct iavf_softc *);
502 static int iavf_config_hena(struct iavf_softc *);
503 static int iavf_config_rss_key(struct iavf_softc *);
504 static int iavf_config_rss_lut(struct iavf_softc *);
505 static int iavf_config_promisc_mode(struct iavf_softc *, int, int);
506 static int iavf_config_vlan_stripping(struct iavf_softc *, int);
507 static int iavf_config_vlan_id(struct iavf_softc *, uint16_t, uint32_t);
508 static int iavf_queue_select(struct iavf_softc *, int);
509 static int iavf_request_queues(struct iavf_softc *, unsigned int);
510 static int iavf_reset_vf(struct iavf_softc *);
511 static int iavf_eth_addr(struct iavf_softc *, const uint8_t *, uint32_t);
512 static void iavf_process_version(struct iavf_softc *,
513 struct ixl_aq_desc *, struct ixl_aq_buf *);
514 static void iavf_process_vf_resources(struct iavf_softc *,
515 struct ixl_aq_desc *, struct ixl_aq_buf *);
516 static void iavf_process_irq_map(struct iavf_softc *,
517 struct ixl_aq_desc *);
518 static void iavf_process_vc_event(struct iavf_softc *,
519 struct ixl_aq_desc *, struct ixl_aq_buf *);
520 static void iavf_process_stats(struct iavf_softc *,
521 struct ixl_aq_desc *, struct ixl_aq_buf *);
522 static void iavf_process_req_queues(struct iavf_softc *,
523 struct ixl_aq_desc *, struct ixl_aq_buf *);
524
525 static int iavf_intr(void *);
526 static int iavf_queue_intr(void *);
527 static void iavf_tick(void *);
528 static void iavf_tick_halt(void *);
529 static void iavf_reset_request(void *);
530 static void iavf_reset_start(void *);
531 static void iavf_reset(void *);
532 static void iavf_reset_finish(struct iavf_softc *);
533 static int iavf_init(struct ifnet *);
534 static int iavf_init_locked(struct iavf_softc *);
535 static void iavf_stop(struct ifnet *, int);
536 static void iavf_stop_locked(struct iavf_softc *);
537 static int iavf_ioctl(struct ifnet *, u_long, void *);
538 static void iavf_start(struct ifnet *);
539 static int iavf_transmit(struct ifnet *, struct mbuf*);
540 static int iavf_watchdog(struct iavf_tx_ring *);
541 static void iavf_watchdog_timeout(void *);
542 static int iavf_media_change(struct ifnet *);
543 static void iavf_media_status(struct ifnet *, struct ifmediareq *);
544 static int iavf_ifflags_cb(struct ethercom *);
545 static int iavf_vlan_cb(struct ethercom *, uint16_t, bool);
546 static void iavf_deferred_transmit(void *);
547 static void iavf_handle_queue(void *);
548 static void iavf_handle_queue_wk(struct work *, void *);
549 static int iavf_reinit(struct iavf_softc *);
550 static int iavf_rxfill(struct iavf_softc *, struct iavf_rx_ring *);
551 static void iavf_txr_clean(struct iavf_softc *, struct iavf_tx_ring *);
552 static void iavf_rxr_clean(struct iavf_softc *, struct iavf_rx_ring *);
553 static int iavf_txeof(struct iavf_softc *, struct iavf_tx_ring *,
554 u_int, struct evcnt *);
555 static int iavf_rxeof(struct iavf_softc *, struct iavf_rx_ring *,
556 u_int, struct evcnt *);
557 static int iavf_iff(struct iavf_softc *);
558 static int iavf_iff_locked(struct iavf_softc *);
559 static void iavf_post_request_queues(void *);
560 static int iavf_sysctl_itr_handler(SYSCTLFN_PROTO);
561
562 static int iavf_match(device_t, cfdata_t, void *);
563 static void iavf_attach(device_t, device_t, void*);
564 static int iavf_detach(device_t, int);
565 static int iavf_finalize_teardown(device_t);
566
567 CFATTACH_DECL3_NEW(iavf, sizeof(struct iavf_softc),
568 iavf_match, iavf_attach, iavf_detach, NULL, NULL, NULL,
569 DVF_DETACH_SHUTDOWN);
570
571 static const struct iavf_product iavf_products[] = {
572 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_VF },
573 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_VF_HV },
574 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_VF },
575 /* required last entry */
576 {0, 0}
577 };
578
579 static const struct iavf_link_speed iavf_link_speeds[] = {
580 { 0, 0 },
581 { IF_Mbps(100), IFM_100_TX },
582 { IF_Mbps(1000), IFM_1000_T },
583 { IF_Gbps(10), IFM_10G_T },
584 { IF_Gbps(40), IFM_40G_CR4 },
585 { IF_Gbps(20), IFM_20G_KR2 },
586 { IF_Gbps(25), IFM_25G_CR }
587 };
588
589 static const struct iavf_aq_regs iavf_aq_regs = {
590 .atq_tail = I40E_VF_ATQT1,
591 .atq_tail_mask = I40E_VF_ATQT1_ATQT_MASK,
592 .atq_head = I40E_VF_ATQH1,
593 .atq_head_mask = I40E_VF_ARQH1_ARQH_MASK,
594 .atq_len = I40E_VF_ATQLEN1,
595 .atq_bal = I40E_VF_ATQBAL1,
596 .atq_bah = I40E_VF_ATQBAH1,
597 .atq_len_enable = I40E_VF_ATQLEN1_ATQENABLE_MASK,
598
599 .arq_tail = I40E_VF_ARQT1,
600 .arq_tail_mask = I40E_VF_ARQT1_ARQT_MASK,
601 .arq_head = I40E_VF_ARQH1,
602 .arq_head_mask = I40E_VF_ARQH1_ARQH_MASK,
603 .arq_len = I40E_VF_ARQLEN1,
604 .arq_bal = I40E_VF_ARQBAL1,
605 .arq_bah = I40E_VF_ARQBAH1,
606 .arq_len_enable = I40E_VF_ARQLEN1_ARQENABLE_MASK,
607 };
608
609 static struct iavf_module_params iavf_params = {
610 .debug = 0,
611 .rx_itr = 0x07a, /* 4K intrs/sec */
612 .tx_itr = 0x07a, /* 4K intrs/sec */
613 .tx_ndescs = 512,
614 .rx_ndescs = 256,
615 .max_qps = INT_MAX,
616 };
617
618 #define delaymsec(_x) DELAY(1000 * (_x))
619 #define iavf_rd(_s, _r) \
620 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
621 #define iavf_wr(_s, _r, _v) \
622 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
623 #define iavf_barrier(_s, _r, _l, _o) \
624 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
625 #define iavf_flush(_s) (void)iavf_rd((_s), I40E_VFGEN_RSTAT)
626 #define iavf_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1))
627 #define iavf_allqueues(_sc) ((1 << ((_sc)->sc_nqueue_pairs)) - 1)
628
629 static inline void
630 iavf_intr_barrier(void)
631 {
632
633 /* make all interrupt handler finished */
634 xc_barrier(0);
635 }
636 static inline void
637 iavf_intr_enable(struct iavf_softc *sc)
638 {
639
640 iavf_wr(sc, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL0_INTENA_MASK |
641 I40E_VFINT_DYN_CTL0_CLEARPBA_MASK |
642 (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT));
643 iavf_wr(sc, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
644 iavf_flush(sc);
645 }
646
647 static inline void
648 iavf_intr_disable(struct iavf_softc *sc)
649 {
650
651 iavf_wr(sc, I40E_VFINT_DYN_CTL01,
652 (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT));
653 iavf_wr(sc, I40E_VFINT_ICR0_ENA1, 0);
654 iavf_flush(sc);
655 }
656
657 static inline void
658 iavf_queue_intr_enable(struct iavf_softc *sc, unsigned int qid)
659 {
660
661 iavf_wr(sc, I40E_VFINT_DYN_CTLN1(qid),
662 I40E_VFINT_DYN_CTLN1_INTENA_MASK |
663 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
664 (IAVF_NOITR << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT));
665 iavf_flush(sc);
666 }
667
668 static inline void
669 iavf_queue_intr_disable(struct iavf_softc *sc, unsigned int qid)
670 {
671
672 iavf_wr(sc, I40E_VFINT_DYN_CTLN1(qid),
673 (IAVF_NOITR << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT));
674 iavf_flush(sc);
675 }
676
677 static inline void
678 iavf_aq_vc_set_opcode(struct ixl_aq_desc *iaq, uint32_t opcode)
679 {
680 struct iavf_aq_vc *vc;
681
682 vc = (struct iavf_aq_vc *)&iaq->iaq_cookie;
683 vc->iaq_vc_opcode = htole32(opcode);
684 }
685
686 static inline uint32_t
687 iavf_aq_vc_get_opcode(const struct ixl_aq_desc *iaq)
688 {
689 const struct iavf_aq_vc *vc;
690
691 vc = (const struct iavf_aq_vc *)&iaq->iaq_cookie;
692 return le32toh(vc->iaq_vc_opcode);
693 }
694
695 static inline uint32_t
696 iavf_aq_vc_get_retval(const struct ixl_aq_desc *iaq)
697 {
698 const struct iavf_aq_vc *vc;
699
700 vc = (const struct iavf_aq_vc *)&iaq->iaq_cookie;
701 return le32toh(vc->iaq_vc_retval);
702 }
703
704 static int
705 iavf_match(device_t parent, cfdata_t match, void *aux)
706 {
707 const struct pci_attach_args *pa = aux;
708
709 return (iavf_lookup(pa) != NULL) ? 1 : 0;
710 }
711
712 static void
713 iavf_attach(device_t parent, device_t self, void *aux)
714 {
715 struct iavf_softc *sc;
716 struct pci_attach_args *pa = aux;
717 struct ifnet *ifp;
718 struct ixl_aq_buf *aqb;
719 pcireg_t memtype;
720 char xnamebuf[MAXCOMLEN];
721 int error, i;
722
723 sc = device_private(self);
724 sc->sc_dev = self;
725 ifp = &sc->sc_ec.ec_if;
726
727 sc->sc_pa = *pa;
728 sc->sc_dmat = (pci_dma64_available(pa)) ? pa->pa_dmat64 : pa->pa_dmat;
729 sc->sc_aq_regs = &iavf_aq_regs;
730 sc->sc_debuglevel = iavf_params.debug;
731 sc->sc_tx_ring_ndescs = iavf_params.tx_ndescs;
732 sc->sc_rx_ring_ndescs = iavf_params.rx_ndescs;
733 sc->sc_tx_itr = iavf_params.tx_itr;
734 sc->sc_rx_itr = iavf_params.rx_itr;
735 sc->sc_nqps_req = MIN(ncpu, iavf_params.max_qps);
736 iavf_prepare_fakeaddr(sc);
737
738 sc->sc_mac_type = iavf_mactype(PCI_PRODUCT(pa->pa_id));
739 iavf_pci_csr_setup(pa->pa_pc, pa->pa_tag);
740
741 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IAVF_PCIREG);
742 if (pci_mapreg_map(pa, IAVF_PCIREG, memtype, 0,
743 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) {
744 aprint_error(": unable to map registers\n");
745 return;
746 }
747
748 if (iavf_wait_active(sc) != 0) {
749 aprint_error(": VF reset timed out\n");
750 goto unmap;
751 }
752
753 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET);
754 mutex_init(&sc->sc_adminq_lock, MUTEX_DEFAULT, IPL_NET);
755 SIMPLEQ_INIT(&sc->sc_atq_idle);
756 SIMPLEQ_INIT(&sc->sc_atq_live);
757 SIMPLEQ_INIT(&sc->sc_arq_idle);
758 SIMPLEQ_INIT(&sc->sc_arq_live);
759 sc->sc_arq_cons = 0;
760 sc->sc_arq_prod = 0;
761 aqb = NULL;
762
763 if (iavf_dmamem_alloc(sc->sc_dmat, &sc->sc_atq,
764 sizeof(struct ixl_aq_desc) * IAVF_AQ_NUM, IAVF_AQ_ALIGN) != 0) {
765 aprint_error(": unable to allocate atq\n");
766 goto free_mutex;
767 }
768
769 if (iavf_dmamem_alloc(sc->sc_dmat, &sc->sc_arq,
770 sizeof(struct ixl_aq_desc) * IAVF_AQ_NUM, IAVF_AQ_ALIGN) != 0) {
771 aprint_error(": unable to allocate arq\n");
772 goto free_atq;
773 }
774
775 for (i = 0; i < IAVF_AQ_NUM; i++) {
776 aqb = iavf_aqb_get(sc, NULL);
777 if (aqb != NULL) {
778 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
779 }
780 }
781 aqb = NULL;
782
783 if (!iavf_arq_fill(sc)) {
784 aprint_error(": unable to fill arq descriptors\n");
785 goto free_arq;
786 }
787
788 if (iavf_init_admin_queue(sc) != 0) {
789 aprint_error(": unable to initialize admin queue\n");
790 goto shutdown;
791 }
792
793 aqb = iavf_aqb_get(sc, NULL);
794 if (aqb == NULL) {
795 aprint_error(": unable to allocate buffer for ATQ\n");
796 goto shutdown;
797 }
798
799 error = iavf_get_version(sc, aqb);
800 switch (error) {
801 case 0:
802 break;
803 case ETIMEDOUT:
804 aprint_error(": timeout waiting for VF version\n");
805 goto shutdown;
806 case ENOTSUP:
807 aprint_error(": unsupported VF version %d\n", sc->sc_major_ver);
808 goto shutdown;
809 default:
810 aprint_error(":unable to get VF interface version\n");
811 goto shutdown;
812 }
813
814 if (iavf_get_vf_resources(sc, aqb) != 0) {
815 aprint_error(": timeout waiting for VF resources\n");
816 goto shutdown;
817 }
818
819 aprint_normal(", VF version %d.%d%s",
820 sc->sc_major_ver, sc->sc_minor_ver,
821 (sc->sc_minor_ver > IAVF_VF_MINOR) ? "(minor mismatch)" : "");
822 aprint_normal(", VF %d, VSI %d", sc->sc_vf_id, sc->sc_vsi_id);
823 aprint_normal("\n");
824 aprint_naive("\n");
825
826 aprint_normal_dev(self, "Ethernet address %s\n",
827 ether_sprintf(sc->sc_enaddr));
828
829 if (iavf_queue_pairs_alloc(sc) != 0) {
830 goto shutdown;
831 }
832
833 if (iavf_setup_interrupts(sc) != 0) {
834 goto free_queue_pairs;
835 }
836
837 if (iavf_config_irq_map(sc, aqb) != 0) {
838 aprint_error(", timed out waiting for IRQ map response\n");
839 goto teardown_intrs;
840 }
841
842 if (iavf_setup_sysctls(sc) != 0) {
843 goto teardown_intrs;
844 }
845
846 if (iavf_setup_stats(sc) != 0) {
847 goto teardown_sysctls;
848 }
849
850 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
851 aqb = NULL;
852
853 snprintf(xnamebuf, sizeof(xnamebuf),
854 "%s_adminq_cv", device_xname(self));
855 cv_init(&sc->sc_adminq_cv, xnamebuf);
856
857 callout_init(&sc->sc_tick, CALLOUT_MPSAFE);
858 callout_setfunc(&sc->sc_tick, iavf_tick, sc);
859
860 iavf_work_set(&sc->sc_reset_task, iavf_reset_start, sc);
861 iavf_work_set(&sc->sc_arq_refill, iavf_arq_refill, sc);
862 iavf_work_set(&sc->sc_wdto_task, iavf_watchdog_timeout, sc);
863 iavf_work_set(&sc->sc_req_queues_task, iavf_post_request_queues, sc);
864 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self));
865 sc->sc_workq = iavf_workq_create(xnamebuf, IAVF_WORKQUEUE_PRI,
866 IPL_NET, WQ_MPSAFE);
867 if (sc->sc_workq == NULL)
868 goto destroy_cv;
869
870 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self));
871 error = workqueue_create(&sc->sc_workq_txrx, xnamebuf,
872 iavf_handle_queue_wk, sc, IAVF_WORKQUEUE_PRI, IPL_NET,
873 WQ_PERCPU|WQ_MPSAFE);
874 if (error != 0) {
875 sc->sc_workq_txrx = NULL;
876 goto teardown_wqs;
877 }
878
879 if_initialize(ifp);
880
881 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
882
883 ifp->if_softc = sc;
884 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
885 ifp->if_extflags = IFEF_MPSAFE;
886 ifp->if_ioctl = iavf_ioctl;
887 ifp->if_start = iavf_start;
888 ifp->if_transmit = iavf_transmit;
889 ifp->if_watchdog = NULL;
890 ifp->if_init = iavf_init;
891 ifp->if_stop = iavf_stop;
892
893 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs);
894 IFQ_SET_READY(&ifp->if_snd);
895 sc->sc_ipq = if_percpuq_create(ifp);
896
897 ifp->if_capabilities |= IAVF_IFCAP_RXCSUM;
898 ifp->if_capabilities |= IAVF_IFCAP_TXCSUM;
899
900 ether_set_vlan_cb(&sc->sc_ec, iavf_vlan_cb);
901 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
902 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
903 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities;
904
905 ether_set_ifflags_cb(&sc->sc_ec, iavf_ifflags_cb);
906
907 sc->sc_ec.ec_ifmedia = &sc->sc_media;
908 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, iavf_media_change,
909 iavf_media_status, &sc->sc_cfg_lock);
910
911 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
912 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
913
914 if_deferred_start_init(ifp, NULL);
915 ether_ifattach(ifp, sc->sc_enaddr);
916
917 sc->sc_txrx_workqueue = true;
918 sc->sc_tx_process_limit = IAVF_TX_PROCESS_LIMIT;
919 sc->sc_rx_process_limit = IAVF_RX_PROCESS_LIMIT;
920 sc->sc_tx_intr_process_limit = IAVF_TX_INTR_PROCESS_LIMIT;
921 sc->sc_rx_intr_process_limit = IAVF_RX_INTR_PROCESS_LIMIT;
922
923 if_register(ifp);
924 if_link_state_change(ifp, sc->sc_link_state);
925 iavf_intr_enable(sc);
926 if (sc->sc_nqps_vsi < sc->sc_nqps_req)
927 iavf_work_add(sc->sc_workq, &sc->sc_req_queues_task);
928 sc->sc_attached = true;
929 return;
930
931 teardown_wqs:
932 config_finalize_register(self, iavf_finalize_teardown);
933 destroy_cv:
934 cv_destroy(&sc->sc_adminq_cv);
935 callout_destroy(&sc->sc_tick);
936 iavf_teardown_stats(sc);
937 teardown_sysctls:
938 iavf_teardown_sysctls(sc);
939 teardown_intrs:
940 iavf_teardown_interrupts(sc);
941 free_queue_pairs:
942 iavf_queue_pairs_free(sc);
943 shutdown:
944 if (aqb != NULL)
945 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
946 iavf_cleanup_admin_queue(sc);
947 iavf_aqb_clean(&sc->sc_atq_idle, sc->sc_dmat);
948 iavf_aqb_clean(&sc->sc_arq_idle, sc->sc_dmat);
949 free_arq:
950 iavf_dmamem_free(sc->sc_dmat, &sc->sc_arq);
951 free_atq:
952 iavf_dmamem_free(sc->sc_dmat, &sc->sc_atq);
953 free_mutex:
954 mutex_destroy(&sc->sc_cfg_lock);
955 mutex_destroy(&sc->sc_adminq_lock);
956 unmap:
957 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
958 sc->sc_mems = 0;
959 sc->sc_attached = false;
960 }
961
962 static int
963 iavf_detach(device_t self, int flags)
964 {
965 struct iavf_softc *sc = device_private(self);
966 struct ifnet *ifp = &sc->sc_ec.ec_if;
967
968 if (!sc->sc_attached)
969 return 0;
970
971 iavf_stop(ifp, 1);
972
973 /*
974 * set a dummy function to halt callout safely
975 * even if a workqueue entry calls callout_schedule()
976 */
977 callout_setfunc(&sc->sc_tick, iavf_tick_halt, sc);
978 iavf_work_wait(sc->sc_workq, &sc->sc_reset_task);
979 iavf_work_wait(sc->sc_workq, &sc->sc_wdto_task);
980
981 callout_halt(&sc->sc_tick, NULL);
982 callout_destroy(&sc->sc_tick);
983
984 /* detach the I/F before stop adminq due to callbacks */
985 ether_ifdetach(ifp);
986 if_detach(ifp);
987 ifmedia_fini(&sc->sc_media);
988 if_percpuq_destroy(sc->sc_ipq);
989
990 iavf_intr_disable(sc);
991 iavf_intr_barrier();
992 iavf_work_wait(sc->sc_workq, &sc->sc_arq_refill);
993
994 mutex_enter(&sc->sc_adminq_lock);
995 iavf_cleanup_admin_queue(sc);
996 mutex_exit(&sc->sc_adminq_lock);
997 iavf_aqb_clean(&sc->sc_atq_idle, sc->sc_dmat);
998 iavf_aqb_clean(&sc->sc_arq_idle, sc->sc_dmat);
999 iavf_dmamem_free(sc->sc_dmat, &sc->sc_arq);
1000 iavf_dmamem_free(sc->sc_dmat, &sc->sc_atq);
1001 cv_destroy(&sc->sc_adminq_cv);
1002
1003 iavf_workq_destroy(sc->sc_workq);
1004 sc->sc_workq = NULL;
1005
1006 iavf_queue_pairs_free(sc);
1007 iavf_teardown_interrupts(sc);
1008 iavf_teardown_sysctls(sc);
1009 iavf_teardown_stats(sc);
1010 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1011
1012 mutex_destroy(&sc->sc_adminq_lock);
1013 mutex_destroy(&sc->sc_cfg_lock);
1014
1015 return 0;
1016 }
1017
1018 static int
1019 iavf_finalize_teardown(device_t self)
1020 {
1021 struct iavf_softc *sc = device_private(self);
1022
1023 if (sc->sc_workq != NULL) {
1024 iavf_workq_destroy(sc->sc_workq);
1025 sc->sc_workq = NULL;
1026 }
1027
1028 if (sc->sc_workq_txrx != NULL) {
1029 workqueue_destroy(sc->sc_workq_txrx);
1030 sc->sc_workq_txrx = NULL;
1031 }
1032
1033 return 0;
1034 }
1035
1036 static int
1037 iavf_init(struct ifnet *ifp)
1038 {
1039 struct iavf_softc *sc;
1040 int rv;
1041
1042 sc = ifp->if_softc;
1043 mutex_enter(&sc->sc_cfg_lock);
1044 rv = iavf_init_locked(sc);
1045 mutex_exit(&sc->sc_cfg_lock);
1046
1047 return rv;
1048 }
1049
1050 static int
1051 iavf_init_locked(struct iavf_softc *sc)
1052 {
1053 struct ifnet *ifp = &sc->sc_ec.ec_if;
1054 unsigned int i;
1055 int error;
1056
1057 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1058
1059 if (ISSET(ifp->if_flags, IFF_RUNNING))
1060 iavf_stop_locked(sc);
1061
1062 if (sc->sc_resetting)
1063 return ENXIO;
1064
1065 error = iavf_reinit(sc);
1066 if (error) {
1067 iavf_stop_locked(sc);
1068 return error;
1069 }
1070
1071 SET(ifp->if_flags, IFF_RUNNING);
1072 CLR(ifp->if_flags, IFF_OACTIVE);
1073
1074 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1075 iavf_wr(sc, I40E_VFINT_ITRN1(IAVF_ITR_RX, i), sc->sc_rx_itr);
1076 iavf_wr(sc, I40E_VFINT_ITRN1(IAVF_ITR_TX, i), sc->sc_tx_itr);
1077 }
1078 iavf_wr(sc, I40E_VFINT_ITR01(IAVF_ITR_RX), sc->sc_rx_itr);
1079 iavf_wr(sc, I40E_VFINT_ITR01(IAVF_ITR_TX), sc->sc_tx_itr);
1080 iavf_wr(sc, I40E_VFINT_ITR01(IAVF_ITR_MISC), 0);
1081
1082 error = iavf_iff_locked(sc);
1083 if (error) {
1084 iavf_stop_locked(sc);
1085 return error;
1086 };
1087
1088 /* ETHERCAP_VLAN_HWFILTER can not be disabled */
1089 SET(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1090
1091 callout_schedule(&sc->sc_tick, IAVF_TICK_INTERVAL);
1092 return 0;
1093 }
1094
1095 static int
1096 iavf_reinit(struct iavf_softc *sc)
1097 {
1098 struct iavf_rx_ring *rxr;
1099 struct iavf_tx_ring *txr;
1100 unsigned int i;
1101 uint32_t reg;
1102
1103 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1104
1105 sc->sc_reset_up = true;
1106 sc->sc_nqueue_pairs = MIN(sc->sc_nqps_alloc, sc->sc_nintrs - 1);
1107
1108 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1109 rxr = sc->sc_qps[i].qp_rxr;
1110 txr = sc->sc_qps[i].qp_txr;
1111
1112 iavf_rxfill(sc, rxr);
1113 txr->txr_watchdog = IAVF_WATCHDOG_STOP;
1114 }
1115
1116 if (iavf_config_vsi_queues(sc) != 0)
1117 return EIO;
1118
1119 if (iavf_config_hena(sc) != 0)
1120 return EIO;
1121
1122 iavf_config_rss_key(sc);
1123 iavf_config_rss_lut(sc);
1124
1125 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1126 iavf_queue_intr_enable(sc, i);
1127 }
1128 /* unmask */
1129 reg = iavf_rd(sc, I40E_VFINT_DYN_CTL01);
1130 reg |= (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT);
1131 iavf_wr(sc, I40E_VFINT_DYN_CTL01, reg);
1132
1133 if (iavf_queue_select(sc, IAVF_VC_OP_ENABLE_QUEUES) != 0)
1134 return EIO;
1135
1136 return 0;
1137 }
1138
1139 static void
1140 iavf_stop(struct ifnet *ifp, int disable)
1141 {
1142 struct iavf_softc *sc;
1143
1144 sc = ifp->if_softc;
1145 mutex_enter(&sc->sc_cfg_lock);
1146 iavf_stop_locked(sc);
1147 mutex_exit(&sc->sc_cfg_lock);
1148 }
1149
1150 static void
1151 iavf_stop_locked(struct iavf_softc *sc)
1152 {
1153 struct ifnet *ifp = &sc->sc_ec.ec_if;
1154 struct iavf_rx_ring *rxr;
1155 struct iavf_tx_ring *txr;
1156 uint32_t reg;
1157 unsigned int i;
1158
1159 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1160
1161 CLR(ifp->if_flags, IFF_RUNNING);
1162 sc->sc_reset_up = false;
1163 callout_stop(&sc->sc_tick);
1164
1165 if (!sc->sc_resetting) {
1166 /* disable queues*/
1167 if (iavf_queue_select(sc, IAVF_VC_OP_DISABLE_QUEUES) != 0) {
1168 goto die;
1169 }
1170 }
1171
1172 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1173 iavf_queue_intr_disable(sc, i);
1174 }
1175
1176 /* mask interrupts */
1177 reg = iavf_rd(sc, I40E_VFINT_DYN_CTL01);
1178 reg |= I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK |
1179 (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT);
1180 iavf_wr(sc, I40E_VFINT_DYN_CTL01, reg);
1181
1182 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1183 rxr = sc->sc_qps[i].qp_rxr;
1184 txr = sc->sc_qps[i].qp_txr;
1185
1186 mutex_enter(&rxr->rxr_lock);
1187 iavf_rxr_clean(sc, rxr);
1188 mutex_exit(&rxr->rxr_lock);
1189
1190 mutex_enter(&txr->txr_lock);
1191 iavf_txr_clean(sc, txr);
1192 mutex_exit(&txr->txr_lock);
1193
1194 workqueue_wait(sc->sc_workq_txrx,
1195 &sc->sc_qps[i].qp_work);
1196 }
1197
1198 return;
1199 die:
1200 if (!sc->sc_dead) {
1201 sc->sc_dead = true;
1202 log(LOG_INFO, "%s: Request VF reset\n", ifp->if_xname);
1203
1204 iavf_work_set(&sc->sc_reset_task, iavf_reset_request, sc);
1205 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
1206 }
1207 log(LOG_CRIT, "%s: failed to shut down rings\n", ifp->if_xname);
1208 }
1209
1210 static int
1211 iavf_watchdog(struct iavf_tx_ring *txr)
1212 {
1213 struct iavf_softc *sc;
1214
1215 sc = txr->txr_sc;
1216
1217 mutex_enter(&txr->txr_lock);
1218
1219 if (txr->txr_watchdog == IAVF_WATCHDOG_STOP
1220 || --txr->txr_watchdog > 0) {
1221 mutex_exit(&txr->txr_lock);
1222 return 0;
1223 }
1224
1225 txr->txr_watchdog = IAVF_WATCHDOG_STOP;
1226 txr->txr_watchdogto.ev_count++;
1227 mutex_exit(&txr->txr_lock);
1228
1229 device_printf(sc->sc_dev, "watchdog timeout on queue %d\n",
1230 txr->txr_qid);
1231 return 1;
1232 }
1233
1234 static void
1235 iavf_watchdog_timeout(void *xsc)
1236 {
1237 struct iavf_softc *sc;
1238 struct ifnet *ifp;
1239
1240 sc = xsc;
1241 ifp = &sc->sc_ec.ec_if;
1242
1243 mutex_enter(&sc->sc_cfg_lock);
1244 if (ISSET(ifp->if_flags, IFF_RUNNING))
1245 iavf_init_locked(sc);
1246 mutex_exit(&sc->sc_cfg_lock);
1247 }
1248
1249 static int
1250 iavf_media_change(struct ifnet *ifp)
1251 {
1252 struct iavf_softc *sc;
1253 struct ifmedia *ifm;
1254
1255 sc = ifp->if_softc;
1256 ifm = &sc->sc_media;
1257
1258 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1259 return EINVAL;
1260
1261 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1262 case IFM_AUTO:
1263 break;
1264 default:
1265 return EINVAL;
1266 }
1267
1268 return 0;
1269 }
1270
1271 static void
1272 iavf_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1273 {
1274 struct iavf_softc *sc = ifp->if_softc;
1275
1276 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1277
1278 ifmr->ifm_status = sc->sc_media_status;
1279 ifmr->ifm_active = sc->sc_media_active;
1280 }
1281
1282 static int
1283 iavf_ifflags_cb(struct ethercom *ec)
1284 {
1285 struct ifnet *ifp = &ec->ec_if;
1286 struct iavf_softc *sc = ifp->if_softc;
1287
1288 /* vlan hwfilter can not be disabled */
1289 SET(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER);
1290
1291 return iavf_iff(sc);
1292 }
1293
1294 static int
1295 iavf_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
1296 {
1297 struct ifnet *ifp = &ec->ec_if;
1298 struct iavf_softc *sc = ifp->if_softc;
1299 int rv;
1300
1301 mutex_enter(&sc->sc_cfg_lock);
1302
1303 if (sc->sc_resetting) {
1304 mutex_exit(&sc->sc_cfg_lock);
1305
1306 /* all vlan id was already removed */
1307 if (!set)
1308 return 0;
1309
1310 return ENXIO;
1311 }
1312
1313 /* ETHERCAP_VLAN_HWFILTER can not be disabled */
1314 SET(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1315
1316 if (set) {
1317 rv = iavf_config_vlan_id(sc, vid, IAVF_VC_OP_ADD_VLAN);
1318 if (!ISSET(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWTAGGING)) {
1319 iavf_config_vlan_stripping(sc,
1320 sc->sc_ec.ec_capenable);
1321 }
1322 } else {
1323 rv = iavf_config_vlan_id(sc, vid, IAVF_VC_OP_DEL_VLAN);
1324 }
1325
1326 mutex_exit(&sc->sc_cfg_lock);
1327
1328 if (rv != 0)
1329 return EIO;
1330
1331 return 0;
1332 }
1333
1334 static int
1335 iavf_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1336 {
1337 struct ifreq *ifr = (struct ifreq *)data;
1338 struct iavf_softc *sc = (struct iavf_softc *)ifp->if_softc;
1339 const struct sockaddr *sa;
1340 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1341 int s, error = 0;
1342 unsigned int nmtu;
1343
1344 switch (cmd) {
1345 case SIOCSIFMTU:
1346 nmtu = ifr->ifr_mtu;
1347
1348 if (nmtu < IAVF_MIN_MTU || nmtu > IAVF_MAX_MTU) {
1349 error = EINVAL;
1350 break;
1351 }
1352 if (ifp->if_mtu != nmtu) {
1353 s = splnet();
1354 error = ether_ioctl(ifp, cmd, data);
1355 splx(s);
1356 if (error == ENETRESET)
1357 error = iavf_init(ifp);
1358 }
1359 break;
1360 case SIOCADDMULTI:
1361 sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1362 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) {
1363 error = ether_multiaddr(sa, addrlo, addrhi);
1364 if (error != 0)
1365 return error;
1366
1367 error = iavf_add_multi(sc, addrlo, addrhi);
1368 if (error != 0 && error != ENETRESET) {
1369 ether_delmulti(sa, &sc->sc_ec);
1370 error = EIO;
1371 }
1372 }
1373 break;
1374
1375 case SIOCDELMULTI:
1376 sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1377 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) {
1378 error = ether_multiaddr(sa, addrlo, addrhi);
1379 if (error != 0)
1380 return error;
1381
1382 error = iavf_del_multi(sc, addrlo, addrhi);
1383 }
1384 break;
1385
1386 default:
1387 s = splnet();
1388 error = ether_ioctl(ifp, cmd, data);
1389 splx(s);
1390 }
1391
1392 if (error == ENETRESET)
1393 error = iavf_iff(sc);
1394
1395 return error;
1396 }
1397
1398 static int
1399 iavf_iff(struct iavf_softc *sc)
1400 {
1401 int error;
1402
1403 mutex_enter(&sc->sc_cfg_lock);
1404 error = iavf_iff_locked(sc);
1405 mutex_exit(&sc->sc_cfg_lock);
1406
1407 return error;
1408 }
1409
1410 static int
1411 iavf_iff_locked(struct iavf_softc *sc)
1412 {
1413 struct ifnet *ifp = &sc->sc_ec.ec_if;
1414 int unicast, multicast;
1415 const uint8_t *enaddr;
1416
1417 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1418
1419 if (!ISSET(ifp->if_flags, IFF_RUNNING))
1420 return 0;
1421
1422 unicast = 0;
1423 multicast = 0;
1424 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
1425 unicast = 1;
1426 multicast = 1;
1427 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1428 multicast = 1;
1429 }
1430
1431 iavf_config_promisc_mode(sc, unicast, multicast);
1432
1433 iavf_config_vlan_stripping(sc, sc->sc_ec.ec_capenable);
1434
1435 enaddr = CLLADDR(ifp->if_sadl);
1436 if (memcmp(enaddr, sc->sc_enaddr_added, ETHER_ADDR_LEN) != 0) {
1437 if (!iavf_is_etheranyaddr(sc->sc_enaddr_added)) {
1438 iavf_eth_addr(sc, sc->sc_enaddr_added,
1439 IAVF_VC_OP_DEL_ETH_ADDR);
1440 }
1441 memcpy(sc->sc_enaddr_added, enaddr, ETHER_ADDR_LEN);
1442 iavf_eth_addr(sc, enaddr, IAVF_VC_OP_ADD_ETH_ADDR);
1443 }
1444
1445 return 0;
1446 }
1447
1448 static const struct iavf_product *
1449 iavf_lookup(const struct pci_attach_args *pa)
1450 {
1451 const struct iavf_product *iavfp;
1452
1453 for (iavfp = iavf_products; iavfp->vendor_id != 0; iavfp++) {
1454 if (PCI_VENDOR(pa->pa_id) == iavfp->vendor_id &&
1455 PCI_PRODUCT(pa->pa_id) == iavfp->product_id)
1456 return iavfp;
1457 }
1458
1459 return NULL;
1460 }
1461
1462 static enum i40e_mac_type
1463 iavf_mactype(pci_product_id_t id)
1464 {
1465
1466 switch (id) {
1467 case PCI_PRODUCT_INTEL_XL710_VF:
1468 case PCI_PRODUCT_INTEL_XL710_VF_HV:
1469 return I40E_MAC_VF;
1470 case PCI_PRODUCT_INTEL_X722_VF:
1471 return I40E_MAC_X722_VF;
1472 }
1473
1474 return I40E_MAC_GENERIC;
1475 }
1476
1477 static const struct iavf_link_speed *
1478 iavf_find_link_speed(struct iavf_softc *sc, uint32_t link_speed)
1479 {
1480 size_t i;
1481
1482 for (i = 0; i < __arraycount(iavf_link_speeds); i++) {
1483 if (link_speed & (1 << i))
1484 return (&iavf_link_speeds[i]);
1485 }
1486
1487 return NULL;
1488 }
1489
1490 static void
1491 iavf_pci_csr_setup(pci_chipset_tag_t pc, pcitag_t tag)
1492 {
1493 pcireg_t csr;
1494
1495 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1496 csr |= (PCI_COMMAND_MASTER_ENABLE |
1497 PCI_COMMAND_MEM_ENABLE);
1498 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
1499 }
1500
1501 static int
1502 iavf_wait_active(struct iavf_softc *sc)
1503 {
1504 int tries;
1505 uint32_t reg;
1506
1507 for (tries = 0; tries < 100; tries++) {
1508 reg = iavf_rd(sc, I40E_VFGEN_RSTAT) &
1509 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1510 if (reg == IAVF_VFR_VFACTIVE ||
1511 reg == IAVF_VFR_COMPLETED)
1512 return 0;
1513
1514 delaymsec(10);
1515 }
1516
1517 return -1;
1518 }
1519
1520 static bool
1521 iavf_is_etheranyaddr(const uint8_t *enaddr)
1522 {
1523 static const uint8_t etheranyaddr[ETHER_ADDR_LEN] = {
1524 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1525 };
1526
1527 if (memcmp(enaddr, etheranyaddr, ETHER_ADDR_LEN) != 0)
1528 return false;
1529
1530 return true;
1531 }
1532
1533 static void
1534 iavf_prepare_fakeaddr(struct iavf_softc *sc)
1535 {
1536 uint64_t rndval;
1537
1538 if (!iavf_is_etheranyaddr(sc->sc_enaddr_fake))
1539 return;
1540
1541 rndval = cprng_strong64();
1542
1543 memcpy(sc->sc_enaddr_fake, &rndval, sizeof(sc->sc_enaddr_fake));
1544 sc->sc_enaddr_fake[0] &= 0xFE;
1545 sc->sc_enaddr_fake[0] |= 0x02;
1546 }
1547
1548 static int
1549 iavf_replace_lla(struct ifnet *ifp, const uint8_t *prev, const uint8_t *next)
1550 {
1551 union {
1552 struct sockaddr sa;
1553 struct sockaddr_dl sdl;
1554 struct sockaddr_storage ss;
1555 } u;
1556 struct psref psref_prev, psref_next;
1557 struct ifaddr *ifa_prev, *ifa_next;
1558 const struct sockaddr_dl *nsdl;
1559 int s, error;
1560
1561 KASSERT(IFNET_LOCKED(ifp));
1562
1563 error = 0;
1564 ifa_prev = ifa_next = NULL;
1565
1566 if (memcmp(prev, next, ETHER_ADDR_LEN) == 0) {
1567 goto done;
1568 }
1569
1570 if (sockaddr_dl_init(&u.sdl, sizeof(u.ss), ifp->if_index,
1571 ifp->if_type, ifp->if_xname, strlen(ifp->if_xname),
1572 prev, ETHER_ADDR_LEN) == NULL) {
1573 error = EINVAL;
1574 goto done;
1575 }
1576
1577 s = pserialize_read_enter();
1578 IFADDR_READER_FOREACH(ifa_prev, ifp) {
1579 if (sockaddr_cmp(&u.sa, ifa_prev->ifa_addr) == 0) {
1580 ifa_acquire(ifa_prev, &psref_prev);
1581 break;
1582 }
1583 }
1584 pserialize_read_exit(s);
1585
1586 if (sockaddr_dl_init(&u.sdl, sizeof(u.ss), ifp->if_index,
1587 ifp->if_type, ifp->if_xname, strlen(ifp->if_xname),
1588 next, ETHER_ADDR_LEN) == NULL) {
1589 error = EINVAL;
1590 goto done;
1591 }
1592
1593 s = pserialize_read_enter();
1594 IFADDR_READER_FOREACH(ifa_next, ifp) {
1595 if (sockaddr_cmp(&u.sa, ifa_next->ifa_addr) == 0) {
1596 ifa_acquire(ifa_next, &psref_next);
1597 break;
1598 }
1599 }
1600 pserialize_read_exit(s);
1601
1602 if (ifa_next == NULL) {
1603 nsdl = &u.sdl;
1604 ifa_next = if_dl_create(ifp, &nsdl);
1605 if (ifa_next == NULL) {
1606 error = ENOMEM;
1607 goto done;
1608 }
1609
1610 s = pserialize_read_enter();
1611 ifa_acquire(ifa_next, &psref_next);
1612 pserialize_read_exit(s);
1613
1614 sockaddr_copy(ifa_next->ifa_addr,
1615 ifa_next->ifa_addr->sa_len, &u.sa);
1616 ifa_insert(ifp, ifa_next);
1617 } else {
1618 nsdl = NULL;
1619 }
1620
1621 if (ifa_prev != NULL && ifa_prev == ifp->if_dl) {
1622 if_activate_sadl(ifp, ifa_next, nsdl);
1623 }
1624
1625 ifa_release(ifa_next, &psref_next);
1626 ifa_next = NULL;
1627
1628 if (ifa_prev != NULL && ifa_prev != ifp->if_hwdl) {
1629 ifaref(ifa_prev);
1630 ifa_release(ifa_prev, &psref_prev);
1631 ifa_remove(ifp, ifa_prev);
1632 KASSERTMSG(ifa_prev->ifa_refcnt == 1, "ifa_refcnt=%d",
1633 ifa_prev->ifa_refcnt);
1634 ifafree(ifa_prev);
1635 ifa_prev = NULL;
1636 }
1637
1638 if (ISSET(ifp->if_flags, IFF_RUNNING))
1639 error = ENETRESET;
1640
1641 done:
1642 if (ifa_prev != NULL)
1643 ifa_release(ifa_prev, &psref_prev);
1644 if (ifa_next != NULL)
1645 ifa_release(ifa_next, &psref_next);
1646
1647 return error;
1648 }
1649 static int
1650 iavf_add_multi(struct iavf_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1651 {
1652 struct ifnet *ifp = &sc->sc_ec.ec_if;
1653 int rv;
1654
1655 if (ISSET(ifp->if_flags, IFF_ALLMULTI))
1656 return 0;
1657
1658 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) {
1659 iavf_del_all_multi(sc);
1660 SET(ifp->if_flags, IFF_ALLMULTI);
1661 return ENETRESET;
1662 }
1663
1664 rv = iavf_eth_addr(sc, addrlo, IAVF_VC_OP_ADD_ETH_ADDR);
1665
1666 if (rv == ENOSPC) {
1667 iavf_del_all_multi(sc);
1668 SET(ifp->if_flags, IFF_ALLMULTI);
1669 return ENETRESET;
1670 }
1671
1672 return rv;
1673 }
1674
1675 static int
1676 iavf_del_multi(struct iavf_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1677 {
1678 struct ifnet *ifp = &sc->sc_ec.ec_if;
1679 struct ethercom *ec = &sc->sc_ec;
1680 struct ether_multi *enm, *enm_last;
1681 struct ether_multistep step;
1682 int error, rv = 0;
1683
1684 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1685 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0)
1686 return 0;
1687
1688 iavf_eth_addr(sc, addrlo, IAVF_VC_OP_DEL_ETH_ADDR);
1689 return 0;
1690 }
1691
1692 ETHER_LOCK(ec);
1693 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1694 ETHER_NEXT_MULTI(step, enm)) {
1695 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1696 ETHER_ADDR_LEN) != 0) {
1697 goto out;
1698 }
1699 }
1700
1701 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1702 ETHER_NEXT_MULTI(step, enm)) {
1703 error = iavf_eth_addr(sc, enm->enm_addrlo,
1704 IAVF_VC_OP_ADD_ETH_ADDR);
1705 if (error != 0)
1706 break;
1707 }
1708
1709 if (enm != NULL) {
1710 enm_last = enm;
1711 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1712 ETHER_NEXT_MULTI(step, enm)) {
1713 if (enm == enm_last)
1714 break;
1715
1716 iavf_eth_addr(sc, enm->enm_addrlo,
1717 IAVF_VC_OP_DEL_ETH_ADDR);
1718 }
1719 } else {
1720 CLR(ifp->if_flags, IFF_ALLMULTI);
1721 rv = ENETRESET;
1722 }
1723
1724 out:
1725 ETHER_UNLOCK(ec);
1726 return rv;
1727 }
1728
1729 static void
1730 iavf_del_all_multi(struct iavf_softc *sc)
1731 {
1732 struct ethercom *ec = &sc->sc_ec;
1733 struct ether_multi *enm;
1734 struct ether_multistep step;
1735
1736 ETHER_LOCK(ec);
1737 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1738 ETHER_NEXT_MULTI(step, enm)) {
1739 iavf_eth_addr(sc, enm->enm_addrlo,
1740 IAVF_VC_OP_DEL_ETH_ADDR);
1741 }
1742 ETHER_UNLOCK(ec);
1743 }
1744
1745 static int
1746 iavf_setup_interrupts(struct iavf_softc *sc)
1747 {
1748 struct pci_attach_args *pa;
1749 kcpuset_t *affinity = NULL;
1750 char intrbuf[PCI_INTRSTR_LEN], xnamebuf[32];
1751 char const *intrstr;
1752 int counts[PCI_INTR_TYPE_SIZE];
1753 int error, affinity_to;
1754 unsigned int vector, qid, num;
1755
1756 /* queue pairs + misc interrupt */
1757 num = sc->sc_nqps_alloc + 1;
1758
1759 num = MIN(num, iavf_calc_msix_count(sc));
1760 if (num <= 0) {
1761 return -1;
1762 }
1763
1764 KASSERT(sc->sc_nqps_alloc > 0);
1765 num = MIN(num, sc->sc_nqps_alloc + 1);
1766
1767 pa = &sc->sc_pa;
1768 memset(counts, 0, sizeof(counts));
1769 counts[PCI_INTR_TYPE_MSIX] = num;
1770
1771 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, PCI_INTR_TYPE_MSIX);
1772 if (error != 0) {
1773 IAVF_LOG(sc, LOG_WARNING, "couldn't allocate interrupts\n");
1774 return -1;
1775 }
1776
1777 KASSERT(pci_intr_type(pa->pa_pc, sc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX);
1778
1779 if (counts[PCI_INTR_TYPE_MSIX] < 1) {
1780 IAVF_LOG(sc, LOG_ERR, "couldn't allocate interrupts\n");
1781 } else if (counts[PCI_INTR_TYPE_MSIX] != (int)num) {
1782 IAVF_LOG(sc, LOG_DEBUG,
1783 "request %u interrupts, but allocate %d interrupts\n",
1784 num, counts[PCI_INTR_TYPE_MSIX]);
1785 num = counts[PCI_INTR_TYPE_MSIX];
1786 }
1787
1788 sc->sc_ihs = kmem_zalloc(sizeof(sc->sc_ihs[0]) * num, KM_NOSLEEP);
1789 if (sc->sc_ihs == NULL) {
1790 IAVF_LOG(sc, LOG_ERR,
1791 "couldn't allocate memory for interrupts\n");
1792 goto fail;
1793 }
1794
1795 /* vector #0 is Misc interrupt */
1796 vector = 0;
1797 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[vector], PCI_INTR_MPSAFE, true);
1798 intrstr = pci_intr_string(pa->pa_pc, sc->sc_ihp[vector],
1799 intrbuf, sizeof(intrbuf));
1800 snprintf(xnamebuf, sizeof(xnamebuf), "%s-Misc",
1801 device_xname(sc->sc_dev));
1802
1803 sc->sc_ihs[vector] = pci_intr_establish_xname(pa->pa_pc,
1804 sc->sc_ihp[vector], IPL_NET, iavf_intr, sc, xnamebuf);
1805 if (sc->sc_ihs[vector] == NULL) {
1806 IAVF_LOG(sc, LOG_WARNING,
1807 "unable to establish interrupt at %s", intrstr);
1808 goto fail;
1809 }
1810
1811 kcpuset_create(&affinity, false);
1812 affinity_to = 0;
1813 qid = 0;
1814 for (vector = 1; vector < num; vector++) {
1815 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[vector],
1816 PCI_INTR_MPSAFE, true);
1817 intrstr = pci_intr_string(pa->pa_pc, sc->sc_ihp[vector],
1818 intrbuf, sizeof(intrbuf));
1819 snprintf(xnamebuf, sizeof(xnamebuf), "%s-TXRX%u",
1820 device_xname(sc->sc_dev), qid);
1821
1822 sc->sc_ihs[vector] = pci_intr_establish_xname(pa->pa_pc,
1823 sc->sc_ihp[vector], IPL_NET, iavf_queue_intr,
1824 (void *)&sc->sc_qps[qid], xnamebuf);
1825 if (sc->sc_ihs[vector] == NULL) {
1826 IAVF_LOG(sc, LOG_WARNING,
1827 "unable to establish interrupt at %s\n", intrstr);
1828 goto fail;
1829 }
1830
1831 kcpuset_zero(affinity);
1832 kcpuset_set(affinity, affinity_to);
1833 error = interrupt_distribute(sc->sc_ihs[vector],
1834 affinity, NULL);
1835
1836 if (error == 0) {
1837 IAVF_LOG(sc, LOG_INFO,
1838 "for TXRX%d interrupt at %s, affinity to %d\n",
1839 qid, intrstr, affinity_to);
1840 } else {
1841 IAVF_LOG(sc, LOG_INFO,
1842 "for TXRX%d interrupt at %s\n",
1843 qid, intrstr);
1844 }
1845
1846 qid++;
1847 affinity_to = (affinity_to + 1) % ncpu;
1848 }
1849
1850 vector = 0;
1851 kcpuset_zero(affinity);
1852 kcpuset_set(affinity, affinity_to);
1853 intrstr = pci_intr_string(pa->pa_pc, sc->sc_ihp[vector],
1854 intrbuf, sizeof(intrbuf));
1855 error = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
1856 if (error == 0) {
1857 IAVF_LOG(sc, LOG_INFO,
1858 "for Misc interrupt at %s, affinity to %d\n",
1859 intrstr, affinity_to);
1860 } else {
1861 IAVF_LOG(sc, LOG_INFO,
1862 "for MISC interrupt at %s\n", intrstr);
1863 }
1864
1865 kcpuset_destroy(affinity);
1866
1867 sc->sc_nintrs = num;
1868 return 0;
1869
1870 fail:
1871 if (affinity != NULL)
1872 kcpuset_destroy(affinity);
1873
1874 if (sc->sc_ihs != NULL) {
1875 for (vector = 0; vector < num; vector++) {
1876 if (sc->sc_ihs[vector] == NULL)
1877 continue;
1878 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[vector]);
1879 }
1880 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * num);
1881 }
1882 pci_intr_release(pa->pa_pc, sc->sc_ihp, num);
1883
1884 return -1;
1885 }
1886
1887 static void
1888 iavf_teardown_interrupts(struct iavf_softc *sc)
1889 {
1890 struct pci_attach_args *pa;
1891 unsigned int i;
1892
1893 if (sc->sc_ihs == NULL)
1894 return;
1895
1896 pa = &sc->sc_pa;
1897
1898 for (i = 0; i < sc->sc_nintrs; i++) {
1899 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]);
1900 }
1901
1902 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
1903 sc->sc_ihs = NULL;
1904
1905 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
1906 sc->sc_nintrs = 0;
1907 }
1908
1909 static int
1910 iavf_setup_sysctls(struct iavf_softc *sc)
1911 {
1912 const char *devname;
1913 struct sysctllog **log;
1914 const struct sysctlnode *rnode, *rxnode, *txnode;
1915 int error;
1916
1917 log = &sc->sc_sysctllog;
1918 devname = device_xname(sc->sc_dev);
1919
1920 error = sysctl_createv(log, 0, NULL, &rnode,
1921 0, CTLTYPE_NODE, devname,
1922 SYSCTL_DESCR("iavf information and settings"),
1923 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
1924 if (error)
1925 goto out;
1926
1927 error = sysctl_createv(log, 0, &rnode, NULL,
1928 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
1929 SYSCTL_DESCR("Use workqueue for packet processing"),
1930 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
1931 if (error)
1932 goto out;
1933
1934 error = sysctl_createv(log, 0, &rnode, NULL,
1935 CTLFLAG_READWRITE, CTLTYPE_INT, "debug_level",
1936 SYSCTL_DESCR("Debug level"),
1937 NULL, 0, &sc->sc_debuglevel, 0, CTL_CREATE, CTL_EOL);
1938 if (error)
1939 goto out;
1940
1941 error = sysctl_createv(log, 0, &rnode, &rxnode,
1942 0, CTLTYPE_NODE, "rx",
1943 SYSCTL_DESCR("iavf information and settings for Rx"),
1944 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
1945 if (error)
1946 goto out;
1947
1948 error = sysctl_createv(log, 0, &rxnode, NULL,
1949 CTLFLAG_READWRITE, CTLTYPE_INT, "itr",
1950 SYSCTL_DESCR("Interrupt Throttling"),
1951 iavf_sysctl_itr_handler, 0,
1952 (void *)sc, 0, CTL_CREATE, CTL_EOL);
1953 if (error)
1954 goto out;
1955
1956 error = sysctl_createv(log, 0, &rxnode, NULL,
1957 CTLFLAG_READONLY, CTLTYPE_INT, "descriptor_num",
1958 SYSCTL_DESCR("descriptor size"),
1959 NULL, 0, &sc->sc_rx_ring_ndescs, 0, CTL_CREATE, CTL_EOL);
1960 if (error)
1961 goto out;
1962
1963 error = sysctl_createv(log, 0, &rxnode, NULL,
1964 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
1965 SYSCTL_DESCR("max number of Rx packets"
1966 " to process for interrupt processing"),
1967 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
1968 if (error)
1969 goto out;
1970
1971 error = sysctl_createv(log, 0, &rxnode, NULL,
1972 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
1973 SYSCTL_DESCR("max number of Rx packets"
1974 " to process for deferred processing"),
1975 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
1976 if (error)
1977 goto out;
1978
1979 error = sysctl_createv(log, 0, &rnode, &txnode,
1980 0, CTLTYPE_NODE, "tx",
1981 SYSCTL_DESCR("iavf information and settings for Tx"),
1982 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
1983 if (error)
1984 goto out;
1985
1986 error = sysctl_createv(log, 0, &txnode, NULL,
1987 CTLFLAG_READWRITE, CTLTYPE_INT, "itr",
1988 SYSCTL_DESCR("Interrupt Throttling"),
1989 iavf_sysctl_itr_handler, 0,
1990 (void *)sc, 0, CTL_CREATE, CTL_EOL);
1991 if (error)
1992 goto out;
1993
1994 error = sysctl_createv(log, 0, &txnode, NULL,
1995 CTLFLAG_READONLY, CTLTYPE_INT, "descriptor_num",
1996 SYSCTL_DESCR("the number of Tx descriptors"),
1997 NULL, 0, &sc->sc_tx_ring_ndescs, 0, CTL_CREATE, CTL_EOL);
1998 if (error)
1999 goto out;
2000
2001 error = sysctl_createv(log, 0, &txnode, NULL,
2002 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
2003 SYSCTL_DESCR("max number of Tx packets"
2004 " to process for interrupt processing"),
2005 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
2006 if (error)
2007 goto out;
2008
2009 error = sysctl_createv(log, 0, &txnode, NULL,
2010 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
2011 SYSCTL_DESCR("max number of Tx packets"
2012 " to process for deferred processing"),
2013 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
2014 if (error)
2015 goto out;
2016 out:
2017 return error;
2018 }
2019
2020 static void
2021 iavf_teardown_sysctls(struct iavf_softc *sc)
2022 {
2023
2024 sysctl_teardown(&sc->sc_sysctllog);
2025 }
2026
2027 static int
2028 iavf_setup_stats(struct iavf_softc *sc)
2029 {
2030 struct iavf_stat_counters *isc;
2031 const char *dn;
2032
2033 dn = device_xname(sc->sc_dev);
2034 isc = &sc->sc_stat_counters;
2035
2036 iavf_evcnt_attach(&isc->isc_rx_bytes, dn, "Rx bytes");
2037 iavf_evcnt_attach(&isc->isc_rx_unicast, dn, "Rx unicast");
2038 iavf_evcnt_attach(&isc->isc_rx_multicast, dn, "Rx multicast");
2039 iavf_evcnt_attach(&isc->isc_rx_broadcast, dn, "Rx broadcast");
2040 iavf_evcnt_attach(&isc->isc_rx_discards, dn, "Rx discards");
2041 iavf_evcnt_attach(&isc->isc_rx_unknown_protocol,
2042 dn, "Rx unknown protocol");
2043
2044 iavf_evcnt_attach(&isc->isc_tx_bytes, dn, "Tx bytes");
2045 iavf_evcnt_attach(&isc->isc_tx_unicast, dn, "Tx unicast");
2046 iavf_evcnt_attach(&isc->isc_tx_multicast, dn, "Tx multicast");
2047 iavf_evcnt_attach(&isc->isc_tx_broadcast, dn, "Tx broadcast");
2048 iavf_evcnt_attach(&isc->isc_tx_discards, dn, "Tx discards");
2049 iavf_evcnt_attach(&isc->isc_tx_errors, dn, "Tx errors");
2050
2051 return 0;
2052 }
2053
2054 static void
2055 iavf_teardown_stats(struct iavf_softc *sc)
2056 {
2057 struct iavf_stat_counters *isc;
2058
2059 isc = &sc->sc_stat_counters;
2060
2061 evcnt_detach(&isc->isc_rx_bytes);
2062 evcnt_detach(&isc->isc_rx_unicast);
2063 evcnt_detach(&isc->isc_rx_multicast);
2064 evcnt_detach(&isc->isc_rx_broadcast);
2065 evcnt_detach(&isc->isc_rx_discards);
2066 evcnt_detach(&isc->isc_rx_unknown_protocol);
2067
2068 evcnt_detach(&isc->isc_tx_bytes);
2069 evcnt_detach(&isc->isc_tx_unicast);
2070 evcnt_detach(&isc->isc_tx_multicast);
2071 evcnt_detach(&isc->isc_tx_broadcast);
2072 evcnt_detach(&isc->isc_tx_discards);
2073 evcnt_detach(&isc->isc_tx_errors);
2074
2075 }
2076
2077 static int
2078 iavf_init_admin_queue(struct iavf_softc *sc)
2079 {
2080 uint32_t reg;
2081
2082 sc->sc_atq_cons = 0;
2083 sc->sc_atq_prod = 0;
2084
2085 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2086 0, IXL_DMA_LEN(&sc->sc_atq),
2087 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2088 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2089 0, IXL_DMA_LEN(&sc->sc_arq),
2090 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2091
2092 iavf_wr(sc, sc->sc_aq_regs->atq_head, 0);
2093 iavf_wr(sc, sc->sc_aq_regs->arq_head, 0);
2094 iavf_wr(sc, sc->sc_aq_regs->atq_tail, 0);
2095 iavf_wr(sc, sc->sc_aq_regs->arq_tail, 0);
2096
2097 iavf_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
2098
2099 iavf_wr(sc, sc->sc_aq_regs->atq_bal,
2100 ixl_dmamem_lo(&sc->sc_atq));
2101 iavf_wr(sc, sc->sc_aq_regs->atq_bah,
2102 ixl_dmamem_hi(&sc->sc_atq));
2103 iavf_wr(sc, sc->sc_aq_regs->atq_len,
2104 sc->sc_aq_regs->atq_len_enable | IAVF_AQ_NUM);
2105
2106 iavf_wr(sc, sc->sc_aq_regs->arq_bal,
2107 ixl_dmamem_lo(&sc->sc_arq));
2108 iavf_wr(sc, sc->sc_aq_regs->arq_bah,
2109 ixl_dmamem_hi(&sc->sc_arq));
2110 iavf_wr(sc, sc->sc_aq_regs->arq_len,
2111 sc->sc_aq_regs->arq_len_enable | IAVF_AQ_NUM);
2112
2113 iavf_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
2114
2115 reg = iavf_rd(sc, sc->sc_aq_regs->atq_bal);
2116 if (reg != ixl_dmamem_lo(&sc->sc_atq))
2117 goto fail;
2118
2119 reg = iavf_rd(sc, sc->sc_aq_regs->arq_bal);
2120 if (reg != ixl_dmamem_lo(&sc->sc_arq))
2121 goto fail;
2122
2123 sc->sc_dead = false;
2124 return 0;
2125
2126 fail:
2127 iavf_wr(sc, sc->sc_aq_regs->atq_len, 0);
2128 iavf_wr(sc, sc->sc_aq_regs->arq_len, 0);
2129 return -1;
2130 }
2131
2132 static void
2133 iavf_cleanup_admin_queue(struct iavf_softc *sc)
2134 {
2135 struct ixl_aq_buf *aqb;
2136
2137 iavf_wr(sc, sc->sc_aq_regs->atq_head, 0);
2138 iavf_wr(sc, sc->sc_aq_regs->arq_head, 0);
2139 iavf_wr(sc, sc->sc_aq_regs->atq_tail, 0);
2140 iavf_wr(sc, sc->sc_aq_regs->arq_tail, 0);
2141
2142 iavf_wr(sc, sc->sc_aq_regs->atq_bal, 0);
2143 iavf_wr(sc, sc->sc_aq_regs->atq_bah, 0);
2144 iavf_wr(sc, sc->sc_aq_regs->atq_len, 0);
2145
2146 iavf_wr(sc, sc->sc_aq_regs->arq_bal, 0);
2147 iavf_wr(sc, sc->sc_aq_regs->arq_bah, 0);
2148 iavf_wr(sc, sc->sc_aq_regs->arq_len, 0);
2149 iavf_flush(sc);
2150
2151 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2152 0, IXL_DMA_LEN(&sc->sc_arq),
2153 BUS_DMASYNC_POSTREAD);
2154 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2155 0, IXL_DMA_LEN(&sc->sc_atq),
2156 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2157
2158 sc->sc_atq_cons = 0;
2159 sc->sc_atq_prod = 0;
2160 sc->sc_arq_cons = 0;
2161 sc->sc_arq_prod = 0;
2162
2163 memset(IXL_DMA_KVA(&sc->sc_arq), 0, IXL_DMA_LEN(&sc->sc_arq));
2164 memset(IXL_DMA_KVA(&sc->sc_atq), 0, IXL_DMA_LEN(&sc->sc_atq));
2165
2166 while ((aqb = iavf_aqb_get_locked(&sc->sc_arq_live)) != NULL) {
2167 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
2168 BUS_DMASYNC_POSTREAD);
2169 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
2170 }
2171
2172 while ((aqb = iavf_aqb_get_locked(&sc->sc_atq_live)) != NULL) {
2173 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
2174 BUS_DMASYNC_POSTREAD);
2175 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
2176 }
2177 }
2178
2179 static unsigned int
2180 iavf_calc_msix_count(struct iavf_softc *sc)
2181 {
2182 struct pci_attach_args *pa;
2183 int count;
2184
2185 pa = &sc->sc_pa;
2186 count = pci_msix_count(pa->pa_pc, pa->pa_tag);
2187 if (count < 0) {
2188 IAVF_LOG(sc, LOG_DEBUG,"MSIX config error\n");
2189 count = 0;
2190 }
2191
2192 return MIN(sc->sc_max_vectors, (unsigned int)count);
2193 }
2194
2195 static unsigned int
2196 iavf_calc_queue_pair_size(struct iavf_softc *sc)
2197 {
2198 unsigned int nqp, nvec;
2199
2200 nvec = iavf_calc_msix_count(sc);
2201 if (sc->sc_max_vectors > 1) {
2202 /* decrease the number of misc interrupt */
2203 nvec -= 1;
2204 }
2205
2206 nqp = ncpu;
2207 nqp = MIN(nqp, sc->sc_nqps_vsi);
2208 nqp = MIN(nqp, nvec);
2209 nqp = MIN(nqp, (unsigned int)iavf_params.max_qps);
2210
2211 return nqp;
2212 }
2213
2214 static struct iavf_tx_ring *
2215 iavf_txr_alloc(struct iavf_softc *sc, unsigned int qid)
2216 {
2217 struct iavf_tx_ring *txr;
2218 struct iavf_tx_map *maps;
2219 unsigned int i;
2220 int error;
2221
2222 txr = kmem_zalloc(sizeof(*txr), KM_NOSLEEP);
2223 if (txr == NULL)
2224 return NULL;
2225
2226 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs,
2227 KM_NOSLEEP);
2228 if (maps == NULL)
2229 goto free_txr;
2230
2231 if (iavf_dmamem_alloc(sc->sc_dmat, &txr->txr_mem,
2232 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2233 IAVF_TX_QUEUE_ALIGN) != 0) {
2234 goto free_maps;
2235 }
2236
2237 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2238 error = bus_dmamap_create(sc->sc_dmat, IAVF_TX_PKT_MAXSIZE,
2239 IAVF_TX_PKT_DESCS, IAVF_TX_PKT_MAXSIZE, 0,
2240 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &maps[i].txm_map);
2241 if (error)
2242 goto destroy_maps;
2243 }
2244
2245 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2246 if (txr->txr_intrq == NULL)
2247 goto destroy_maps;
2248
2249 txr->txr_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,
2250 iavf_deferred_transmit, txr);
2251 if (txr->txr_si == NULL)
2252 goto destroy_pcq;
2253
2254 snprintf(txr->txr_name, sizeof(txr->txr_name), "%s-tx%d",
2255 device_xname(sc->sc_dev), qid);
2256
2257 iavf_evcnt_attach(&txr->txr_defragged,
2258 txr->txr_name, "m_defrag successed");
2259 iavf_evcnt_attach(&txr->txr_defrag_failed,
2260 txr->txr_name, "m_defrag failed");
2261 iavf_evcnt_attach(&txr->txr_pcqdrop,
2262 txr->txr_name, "Dropped in pcq");
2263 iavf_evcnt_attach(&txr->txr_transmitdef,
2264 txr->txr_name, "Deferred transmit");
2265 iavf_evcnt_attach(&txr->txr_watchdogto,
2266 txr->txr_name, "Watchdog timedout on queue");
2267 iavf_evcnt_attach(&txr->txr_defer,
2268 txr->txr_name, "Handled queue in softint/workqueue");
2269
2270 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR, NULL,
2271 txr->txr_name, "Interrupt on queue");
2272
2273 txr->txr_qid = qid;
2274 txr->txr_sc = sc;
2275 txr->txr_maps = maps;
2276 txr->txr_prod = txr->txr_cons = 0;
2277 txr->txr_tail = I40E_QTX_TAIL1(qid);
2278 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2279
2280 return txr;
2281 destroy_pcq:
2282 pcq_destroy(txr->txr_intrq);
2283 destroy_maps:
2284 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2285 if (maps[i].txm_map == NULL)
2286 continue;
2287 bus_dmamap_destroy(sc->sc_dmat, maps[i].txm_map);
2288 }
2289
2290 iavf_dmamem_free(sc->sc_dmat, &txr->txr_mem);
2291 free_maps:
2292 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2293 free_txr:
2294 kmem_free(txr, sizeof(*txr));
2295 return NULL;
2296 }
2297
2298 static void
2299 iavf_txr_free(struct iavf_softc *sc, struct iavf_tx_ring *txr)
2300 {
2301 struct iavf_tx_map *maps;
2302 unsigned int i;
2303
2304 maps = txr->txr_maps;
2305 if (maps != NULL) {
2306 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2307 if (maps[i].txm_map == NULL)
2308 continue;
2309 bus_dmamap_destroy(sc->sc_dmat, maps[i].txm_map);
2310 }
2311 kmem_free(txr->txr_maps,
2312 sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2313 txr->txr_maps = NULL;
2314 }
2315
2316 evcnt_detach(&txr->txr_defragged);
2317 evcnt_detach(&txr->txr_defrag_failed);
2318 evcnt_detach(&txr->txr_pcqdrop);
2319 evcnt_detach(&txr->txr_transmitdef);
2320 evcnt_detach(&txr->txr_watchdogto);
2321 evcnt_detach(&txr->txr_defer);
2322 evcnt_detach(&txr->txr_intr);
2323
2324 iavf_dmamem_free(sc->sc_dmat, &txr->txr_mem);
2325 softint_disestablish(txr->txr_si);
2326 pcq_destroy(txr->txr_intrq);
2327 mutex_destroy(&txr->txr_lock);
2328 kmem_free(txr, sizeof(*txr));
2329 }
2330
2331 static struct iavf_rx_ring *
2332 iavf_rxr_alloc(struct iavf_softc *sc, unsigned int qid)
2333 {
2334 struct iavf_rx_ring *rxr;
2335 struct iavf_rx_map *maps;
2336 unsigned int i;
2337 int error;
2338
2339 rxr = kmem_zalloc(sizeof(*rxr), KM_NOSLEEP);
2340 if (rxr == NULL)
2341 return NULL;
2342
2343 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs,
2344 KM_NOSLEEP);
2345 if (maps == NULL)
2346 goto free_rxr;
2347
2348 if (iavf_dmamem_alloc(sc->sc_dmat, &rxr->rxr_mem,
2349 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
2350 IAVF_RX_QUEUE_ALIGN) != 0)
2351 goto free_maps;
2352
2353 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2354 error = bus_dmamap_create(sc->sc_dmat, IAVF_MCLBYTES,
2355 1, IAVF_MCLBYTES, 0,
2356 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &maps[i].rxm_map);
2357 if (error)
2358 goto destroy_maps;
2359 }
2360
2361 snprintf(rxr->rxr_name, sizeof(rxr->rxr_name), "%s-rx%d",
2362 device_xname(sc->sc_dev), qid);
2363
2364 iavf_evcnt_attach(&rxr->rxr_mgethdr_failed,
2365 rxr->rxr_name, "MGETHDR failed");
2366 iavf_evcnt_attach(&rxr->rxr_mgetcl_failed,
2367 rxr->rxr_name, "MCLGET failed");
2368 iavf_evcnt_attach(&rxr->rxr_mbuf_load_failed,
2369 rxr->rxr_name, "bus_dmamap_load_mbuf failed");
2370 iavf_evcnt_attach(&rxr->rxr_defer,
2371 rxr->rxr_name, "Handled queue in softint/workqueue");
2372
2373 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR, NULL,
2374 rxr->rxr_name, "Interrupt on queue");
2375
2376 rxr->rxr_qid = qid;
2377 rxr->rxr_sc = sc;
2378 rxr->rxr_cons = rxr->rxr_prod = 0;
2379 rxr->rxr_m_head = NULL;
2380 rxr->rxr_m_tail = &rxr->rxr_m_head;
2381 rxr->rxr_maps = maps;
2382 rxr->rxr_tail = I40E_QRX_TAIL1(qid);
2383 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
2384
2385 return rxr;
2386
2387 destroy_maps:
2388 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2389 if (maps[i].rxm_map == NULL)
2390 continue;
2391 bus_dmamap_destroy(sc->sc_dmat, maps[i].rxm_map);
2392 }
2393 iavf_dmamem_free(sc->sc_dmat, &rxr->rxr_mem);
2394 free_maps:
2395 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2396 free_rxr:
2397 kmem_free(rxr, sizeof(*rxr));
2398
2399 return NULL;
2400 }
2401
2402 static void
2403 iavf_rxr_free(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
2404 {
2405 struct iavf_rx_map *maps;
2406 unsigned int i;
2407
2408 maps = rxr->rxr_maps;
2409 if (maps != NULL) {
2410 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2411 if (maps[i].rxm_map == NULL)
2412 continue;
2413 bus_dmamap_destroy(sc->sc_dmat, maps[i].rxm_map);
2414 }
2415 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2416 rxr->rxr_maps = NULL;
2417 }
2418
2419 evcnt_detach(&rxr->rxr_mgethdr_failed);
2420 evcnt_detach(&rxr->rxr_mgetcl_failed);
2421 evcnt_detach(&rxr->rxr_mbuf_load_failed);
2422 evcnt_detach(&rxr->rxr_defer);
2423 evcnt_detach(&rxr->rxr_intr);
2424
2425 iavf_dmamem_free(sc->sc_dmat, &rxr->rxr_mem);
2426 mutex_destroy(&rxr->rxr_lock);
2427 kmem_free(rxr, sizeof(*rxr));
2428 }
2429
2430 static int
2431 iavf_queue_pairs_alloc(struct iavf_softc *sc)
2432 {
2433 struct iavf_queue_pair *qp;
2434 unsigned int i, num;
2435
2436 num = iavf_calc_queue_pair_size(sc);
2437 if (num <= 0) {
2438 return -1;
2439 }
2440
2441 sc->sc_qps = kmem_zalloc(sizeof(sc->sc_qps[0]) * num, KM_NOSLEEP);
2442 if (sc->sc_qps == NULL) {
2443 return -1;
2444 }
2445
2446 for (i = 0; i < num; i++) {
2447 qp = &sc->sc_qps[i];
2448
2449 qp->qp_rxr = iavf_rxr_alloc(sc, i);
2450 qp->qp_txr = iavf_txr_alloc(sc, i);
2451
2452 if (qp->qp_rxr == NULL || qp->qp_txr == NULL)
2453 goto free;
2454
2455 qp->qp_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,
2456 iavf_handle_queue, qp);
2457 if (qp->qp_si == NULL)
2458 goto free;
2459 }
2460
2461 sc->sc_nqps_alloc = num;
2462 return 0;
2463 free:
2464 for (i = 0; i < num; i++) {
2465 qp = &sc->sc_qps[i];
2466
2467 if (qp->qp_si != NULL)
2468 softint_disestablish(qp->qp_si);
2469 if (qp->qp_rxr != NULL)
2470 iavf_rxr_free(sc, qp->qp_rxr);
2471 if (qp->qp_txr != NULL)
2472 iavf_txr_free(sc, qp->qp_txr);
2473 }
2474
2475 kmem_free(sc->sc_qps, sizeof(sc->sc_qps[0]) * num);
2476 sc->sc_qps = NULL;
2477
2478 return -1;
2479 }
2480
2481 static void
2482 iavf_queue_pairs_free(struct iavf_softc *sc)
2483 {
2484 struct iavf_queue_pair *qp;
2485 unsigned int i;
2486 size_t sz;
2487
2488 if (sc->sc_qps == NULL)
2489 return;
2490
2491 for (i = 0; i < sc->sc_nqps_alloc; i++) {
2492 qp = &sc->sc_qps[i];
2493
2494 if (qp->qp_si != NULL)
2495 softint_disestablish(qp->qp_si);
2496 if (qp->qp_rxr != NULL)
2497 iavf_rxr_free(sc, qp->qp_rxr);
2498 if (qp->qp_txr != NULL)
2499 iavf_txr_free(sc, qp->qp_txr);
2500 }
2501
2502 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqps_alloc;
2503 kmem_free(sc->sc_qps, sz);
2504 sc->sc_qps = NULL;
2505 sc->sc_nqps_alloc = 0;
2506 }
2507
2508 static int
2509 iavf_rxfill(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
2510 {
2511 struct ixl_rx_rd_desc_32 *ring, *rxd;
2512 struct iavf_rx_map *rxm;
2513 bus_dmamap_t map;
2514 struct mbuf *m;
2515 unsigned int slots, prod, mask;
2516 int error, post;
2517
2518 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons,
2519 sc->sc_rx_ring_ndescs);
2520
2521 if (slots == 0)
2522 return 0;
2523
2524 post = 0;
2525 error = 0;
2526 prod = rxr->rxr_prod;
2527
2528 ring = IXL_DMA_KVA(&rxr->rxr_mem);
2529 mask = sc->sc_rx_ring_ndescs - 1;
2530
2531 do {
2532 rxm = &rxr->rxr_maps[prod];
2533
2534 MGETHDR(m, M_DONTWAIT, MT_DATA);
2535 if (m == NULL) {
2536 rxr->rxr_mgethdr_failed.ev_count++;
2537 error = -1;
2538 break;
2539 }
2540
2541 MCLGET(m, M_DONTWAIT);
2542 if (!ISSET(m->m_flags, M_EXT)) {
2543 rxr->rxr_mgetcl_failed.ev_count++;
2544 error = -1;
2545 m_freem(m);
2546 break;
2547 }
2548
2549 m->m_len = m->m_pkthdr.len = MCLBYTES;
2550 m_adj(m, ETHER_ALIGN);
2551
2552 map = rxm->rxm_map;
2553
2554 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
2555 BUS_DMA_READ|BUS_DMA_NOWAIT) != 0) {
2556 rxr->rxr_mbuf_load_failed.ev_count++;
2557 error = -1;
2558 m_freem(m);
2559 break;
2560 }
2561
2562 rxm->rxm_m = m;
2563
2564 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2565 BUS_DMASYNC_PREREAD);
2566
2567 rxd = &ring[prod];
2568 rxd->paddr = htole64(map->dm_segs[0].ds_addr);
2569 rxd->haddr = htole64(0);
2570
2571 prod++;
2572 prod &= mask;
2573 post = 1;
2574 } while (--slots);
2575
2576 if (post) {
2577 rxr->rxr_prod = prod;
2578 iavf_wr(sc, rxr->rxr_tail, prod);
2579 }
2580
2581 return error;
2582 }
2583
2584 static inline void
2585 iavf_rx_csum(struct mbuf *m, uint64_t qword)
2586 {
2587 int flags_mask;
2588
2589 if (!ISSET(qword, IXL_RX_DESC_L3L4P)) {
2590 /* No L3 or L4 checksum was calculated */
2591 return;
2592 }
2593
2594 switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) {
2595 case IXL_RX_DESC_PTYPE_IPV4FRAG:
2596 case IXL_RX_DESC_PTYPE_IPV4:
2597 case IXL_RX_DESC_PTYPE_SCTPV4:
2598 case IXL_RX_DESC_PTYPE_ICMPV4:
2599 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
2600 break;
2601 case IXL_RX_DESC_PTYPE_TCPV4:
2602 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
2603 flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD;
2604 break;
2605 case IXL_RX_DESC_PTYPE_UDPV4:
2606 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
2607 flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD;
2608 break;
2609 case IXL_RX_DESC_PTYPE_TCPV6:
2610 flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD;
2611 break;
2612 case IXL_RX_DESC_PTYPE_UDPV6:
2613 flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD;
2614 break;
2615 default:
2616 flags_mask = 0;
2617 }
2618
2619 m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 |
2620 M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6));
2621
2622 if (ISSET(qword, IXL_RX_DESC_IPE)) {
2623 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD);
2624 }
2625
2626 if (ISSET(qword, IXL_RX_DESC_L4E)) {
2627 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD);
2628 }
2629 }
2630
2631 static int
2632 iavf_rxeof(struct iavf_softc *sc, struct iavf_rx_ring *rxr, u_int rxlimit,
2633 struct evcnt *ecnt)
2634 {
2635 struct ifnet *ifp = &sc->sc_ec.ec_if;
2636 struct ixl_rx_wb_desc_32 *ring, *rxd;
2637 struct iavf_rx_map *rxm;
2638 bus_dmamap_t map;
2639 unsigned int cons, prod;
2640 struct mbuf *m;
2641 uint64_t word, word0;
2642 unsigned int len;
2643 unsigned int mask;
2644 int done = 0, more = 0;
2645
2646 KASSERT(mutex_owned(&rxr->rxr_lock));
2647
2648 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2649 return 0;
2650
2651 prod = rxr->rxr_prod;
2652 cons = rxr->rxr_cons;
2653
2654 if (cons == prod)
2655 return 0;
2656
2657 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2658 0, IXL_DMA_LEN(&rxr->rxr_mem),
2659 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2660
2661 ring = IXL_DMA_KVA(&rxr->rxr_mem);
2662 mask = sc->sc_rx_ring_ndescs - 1;
2663
2664 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2665
2666 do {
2667 if (rxlimit-- <= 0) {
2668 more = 1;
2669 break;
2670 }
2671
2672 rxd = &ring[cons];
2673
2674 word = le64toh(rxd->qword1);
2675
2676 if (!ISSET(word, IXL_RX_DESC_DD))
2677 break;
2678
2679 rxm = &rxr->rxr_maps[cons];
2680
2681 map = rxm->rxm_map;
2682 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2683 BUS_DMASYNC_POSTREAD);
2684 bus_dmamap_unload(sc->sc_dmat, map);
2685
2686 m = rxm->rxm_m;
2687 rxm->rxm_m = NULL;
2688
2689 KASSERT(m != NULL);
2690
2691 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
2692 m->m_len = len;
2693 m->m_pkthdr.len = 0;
2694
2695 m->m_next = NULL;
2696 *rxr->rxr_m_tail = m;
2697 rxr->rxr_m_tail = &m->m_next;
2698
2699 m = rxr->rxr_m_head;
2700 m->m_pkthdr.len += len;
2701
2702 if (ISSET(word, IXL_RX_DESC_EOP)) {
2703 word0 = le64toh(rxd->qword0);
2704
2705 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
2706 uint16_t vtag;
2707 vtag = __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK);
2708 vlan_set_tag(m, le16toh(vtag));
2709 }
2710
2711 if ((ifp->if_capenable & IAVF_IFCAP_RXCSUM) != 0)
2712 iavf_rx_csum(m, word);
2713
2714 if (!ISSET(word,
2715 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
2716 m_set_rcvif(m, ifp);
2717 if_statinc_ref(ifp, nsr, if_ipackets);
2718 if_statadd_ref(ifp, nsr, if_ibytes,
2719 m->m_pkthdr.len);
2720 if_percpuq_enqueue(sc->sc_ipq, m);
2721 } else {
2722 if_statinc_ref(ifp, nsr, if_ierrors);
2723 m_freem(m);
2724 }
2725
2726 rxr->rxr_m_head = NULL;
2727 rxr->rxr_m_tail = &rxr->rxr_m_head;
2728 }
2729
2730 cons++;
2731 cons &= mask;
2732
2733 done = 1;
2734 } while (cons != prod);
2735
2736 if (done) {
2737 ecnt->ev_count++;
2738 rxr->rxr_cons = cons;
2739 if (iavf_rxfill(sc, rxr) == -1)
2740 if_statinc_ref(ifp, nsr, if_iqdrops);
2741 }
2742
2743 IF_STAT_PUTREF(ifp);
2744
2745 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2746 0, IXL_DMA_LEN(&rxr->rxr_mem),
2747 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2748
2749 return more;
2750 }
2751
2752 static void
2753 iavf_rxr_clean(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
2754 {
2755 struct iavf_rx_map *maps, *rxm;
2756 bus_dmamap_t map;
2757 unsigned int i;
2758
2759 KASSERT(mutex_owned(&rxr->rxr_lock));
2760
2761 maps = rxr->rxr_maps;
2762 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2763 rxm = &maps[i];
2764
2765 if (rxm->rxm_m == NULL)
2766 continue;
2767
2768 map = rxm->rxm_map;
2769 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2770 BUS_DMASYNC_POSTWRITE);
2771 bus_dmamap_unload(sc->sc_dmat, map);
2772
2773 m_freem(rxm->rxm_m);
2774 rxm->rxm_m = NULL;
2775 }
2776
2777 m_freem(rxr->rxr_m_head);
2778 rxr->rxr_m_head = NULL;
2779 rxr->rxr_m_tail = &rxr->rxr_m_head;
2780
2781 memset(IXL_DMA_KVA(&rxr->rxr_mem), 0, IXL_DMA_LEN(&rxr->rxr_mem));
2782 rxr->rxr_prod = rxr->rxr_cons = 0;
2783 }
2784
2785 static int
2786 iavf_txeof(struct iavf_softc *sc, struct iavf_tx_ring *txr, u_int txlimit,
2787 struct evcnt *ecnt)
2788 {
2789 struct ifnet *ifp = &sc->sc_ec.ec_if;
2790 struct ixl_tx_desc *ring, *txd;
2791 struct iavf_tx_map *txm;
2792 struct mbuf *m;
2793 bus_dmamap_t map;
2794 unsigned int cons, prod, last;
2795 unsigned int mask;
2796 uint64_t dtype;
2797 int done = 0, more = 0;
2798
2799 KASSERT(mutex_owned(&txr->txr_lock));
2800
2801 prod = txr->txr_prod;
2802 cons = txr->txr_cons;
2803
2804 if (cons == prod)
2805 return 0;
2806
2807 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2808 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2809
2810 ring = IXL_DMA_KVA(&txr->txr_mem);
2811 mask = sc->sc_tx_ring_ndescs - 1;
2812
2813 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2814
2815 do {
2816 if (txlimit-- <= 0) {
2817 more = 1;
2818 break;
2819 }
2820
2821 txm = &txr->txr_maps[cons];
2822 last = txm->txm_eop;
2823 txd = &ring[last];
2824
2825 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2826 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2827 break;
2828
2829 map = txm->txm_map;
2830
2831 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2832 BUS_DMASYNC_POSTWRITE);
2833 bus_dmamap_unload(sc->sc_dmat, map);
2834
2835 m = txm->txm_m;
2836 if (m != NULL) {
2837 if_statinc_ref(ifp, nsr, if_opackets);
2838 if_statadd_ref(ifp, nsr, if_obytes, m->m_pkthdr.len);
2839 if (ISSET(m->m_flags, M_MCAST))
2840 if_statinc_ref(ifp, nsr, if_omcasts);
2841 m_freem(m);
2842 }
2843
2844 txm->txm_m = NULL;
2845 txm->txm_eop = -1;
2846
2847 cons = last + 1;
2848 cons &= mask;
2849 done = 1;
2850 } while (cons != prod);
2851
2852 IF_STAT_PUTREF(ifp);
2853
2854 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2855 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2856
2857 txr->txr_cons = cons;
2858
2859 if (done) {
2860 ecnt->ev_count++;
2861 softint_schedule(txr->txr_si);
2862 if (txr->txr_qid == 0) {
2863 CLR(ifp->if_flags, IFF_OACTIVE);
2864 if_schedule_deferred_start(ifp);
2865 }
2866 }
2867
2868 if (txr->txr_cons == txr->txr_prod) {
2869 txr->txr_watchdog = IAVF_WATCHDOG_STOP;
2870 }
2871
2872 return more;
2873 }
2874
2875 static inline int
2876 iavf_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0,
2877 struct iavf_tx_ring *txr)
2878 {
2879 struct mbuf *m;
2880 int error;
2881
2882 KASSERT(mutex_owned(&txr->txr_lock));
2883
2884 m = *m0;
2885
2886 error = bus_dmamap_load_mbuf(dmat, map, m,
2887 BUS_DMA_STREAMING|BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2888 if (error != EFBIG)
2889 return error;
2890
2891 m = m_defrag(m, M_DONTWAIT);
2892 if (m != NULL) {
2893 *m0 = m;
2894 txr->txr_defragged.ev_count++;
2895 error = bus_dmamap_load_mbuf(dmat, map, m,
2896 BUS_DMA_STREAMING|BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2897 } else {
2898 txr->txr_defrag_failed.ev_count++;
2899 error = ENOBUFS;
2900 }
2901
2902 return error;
2903 }
2904
2905 static inline int
2906 iavf_tx_setup_offloads(struct mbuf *m, uint64_t *cmd_txd)
2907 {
2908 struct ether_header *eh;
2909 size_t len;
2910 uint64_t cmd;
2911
2912 cmd = 0;
2913
2914 eh = mtod(m, struct ether_header *);
2915 switch (htons(eh->ether_type)) {
2916 case ETHERTYPE_IP:
2917 case ETHERTYPE_IPV6:
2918 len = ETHER_HDR_LEN;
2919 break;
2920 case ETHERTYPE_VLAN:
2921 len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2922 break;
2923 default:
2924 len = 0;
2925 }
2926 cmd |= ((len >> 1) << IXL_TX_DESC_MACLEN_SHIFT);
2927
2928 if (m->m_pkthdr.csum_flags &
2929 (M_CSUM_TSOv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
2930 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4;
2931 }
2932 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2933 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4_CSUM;
2934 }
2935
2936 if (m->m_pkthdr.csum_flags &
2937 (M_CSUM_TSOv6 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
2938 cmd |= IXL_TX_DESC_CMD_IIPT_IPV6;
2939 }
2940
2941 switch (cmd & IXL_TX_DESC_CMD_IIPT_MASK) {
2942 case IXL_TX_DESC_CMD_IIPT_IPV4:
2943 case IXL_TX_DESC_CMD_IIPT_IPV4_CSUM:
2944 len = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2945 break;
2946 case IXL_TX_DESC_CMD_IIPT_IPV6:
2947 len = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
2948 break;
2949 default:
2950 len = 0;
2951 }
2952 cmd |= ((len >> 2) << IXL_TX_DESC_IPLEN_SHIFT);
2953
2954 if (m->m_pkthdr.csum_flags &
2955 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_TCPv4 | M_CSUM_TCPv6)) {
2956 len = sizeof(struct tcphdr);
2957 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_TCP;
2958 } else if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) {
2959 len = sizeof(struct udphdr);
2960 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_UDP;
2961 } else {
2962 len = 0;
2963 }
2964 cmd |= ((len >> 2) << IXL_TX_DESC_L4LEN_SHIFT);
2965
2966 *cmd_txd |= cmd;
2967 return 0;
2968 }
2969
2970 static void
2971 iavf_tx_common_locked(struct ifnet *ifp, struct iavf_tx_ring *txr,
2972 bool is_transmit)
2973 {
2974 struct iavf_softc *sc;
2975 struct ixl_tx_desc *ring, *txd;
2976 struct iavf_tx_map *txm;
2977 bus_dmamap_t map;
2978 struct mbuf *m;
2979 unsigned int prod, free, last, i;
2980 unsigned int mask;
2981 uint64_t cmd, cmd_txd;
2982 int post = 0;
2983
2984 KASSERT(mutex_owned(&txr->txr_lock));
2985
2986 sc = ifp->if_softc;
2987
2988 if (!ISSET(ifp->if_flags, IFF_RUNNING)
2989 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) {
2990 if (!is_transmit)
2991 IFQ_PURGE(&ifp->if_snd);
2992 return;
2993 }
2994
2995 prod = txr->txr_prod;
2996 free = txr->txr_cons;
2997
2998 if (free <= prod)
2999 free += sc->sc_tx_ring_ndescs;
3000 free -= prod;
3001
3002 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
3003 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
3004
3005 ring = IXL_DMA_KVA(&txr->txr_mem);
3006 mask = sc->sc_tx_ring_ndescs - 1;
3007 last = prod;
3008 cmd = 0;
3009 txd = NULL;
3010
3011 for (;;) {
3012 if (free < IAVF_TX_PKT_DESCS) {
3013 if (!is_transmit)
3014 SET(ifp->if_flags, IFF_OACTIVE);
3015 break;
3016 }
3017
3018 if (is_transmit)
3019 m = pcq_get(txr->txr_intrq);
3020 else
3021 IFQ_DEQUEUE(&ifp->if_snd, m);
3022
3023 if (m == NULL)
3024 break;
3025
3026 txm = &txr->txr_maps[prod];
3027 map = txm->txm_map;
3028
3029 if (iavf_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
3030 if_statinc(ifp, if_oerrors);
3031 m_freem(m);
3032 continue;
3033 }
3034
3035 cmd_txd = 0;
3036 if (m->m_pkthdr.csum_flags & IAVF_CSUM_ALL_OFFLOAD) {
3037 iavf_tx_setup_offloads(m, &cmd_txd);
3038 }
3039 if (vlan_has_tag(m)) {
3040 uint16_t vtag;
3041 vtag = htole16(vlan_get_tag(m));
3042 cmd_txd |= IXL_TX_DESC_CMD_IL2TAG1 |
3043 ((uint64_t)vtag << IXL_TX_DESC_L2TAG1_SHIFT);
3044 }
3045
3046 bus_dmamap_sync(sc->sc_dmat, map, 0,
3047 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
3048
3049 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) {
3050 txd = &ring[prod];
3051
3052 cmd = (uint64_t)map->dm_segs[i].ds_len <<
3053 IXL_TX_DESC_BSIZE_SHIFT;
3054 cmd |= IXL_TX_DESC_DTYPE_DATA|IXL_TX_DESC_CMD_ICRC|
3055 cmd_txd;
3056
3057 txd->addr = htole64(map->dm_segs[i].ds_addr);
3058 txd->cmd = htole64(cmd);
3059
3060 last = prod;
3061 prod++;
3062 prod &= mask;
3063 }
3064
3065 cmd |= IXL_TX_DESC_CMD_EOP|IXL_TX_DESC_CMD_RS;
3066 txd->cmd = htole64(cmd);
3067 txm->txm_m = m;
3068 txm->txm_eop = last;
3069
3070 bpf_mtap(ifp, m, BPF_D_OUT);
3071 free -= i;
3072 post = 1;
3073 }
3074
3075 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
3076 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
3077
3078 if (post) {
3079 txr->txr_prod = prod;
3080 iavf_wr(sc, txr->txr_tail, prod);
3081 txr->txr_watchdog = IAVF_WATCHDOG_TICKS;
3082 }
3083 }
3084
3085 static inline int
3086 iavf_handle_queue_common(struct iavf_softc *sc, struct iavf_queue_pair *qp,
3087 u_int txlimit, struct evcnt *txevcnt,
3088 u_int rxlimit, struct evcnt *rxevcnt)
3089 {
3090 struct iavf_tx_ring *txr;
3091 struct iavf_rx_ring *rxr;
3092 int txmore, rxmore;
3093 int rv;
3094
3095 txr = qp->qp_txr;
3096 rxr = qp->qp_rxr;
3097
3098 mutex_enter(&txr->txr_lock);
3099 txmore = iavf_txeof(sc, txr, txlimit, txevcnt);
3100 mutex_exit(&txr->txr_lock);
3101
3102 mutex_enter(&rxr->rxr_lock);
3103 rxmore = iavf_rxeof(sc, rxr, rxlimit, rxevcnt);
3104 mutex_exit(&rxr->rxr_lock);
3105
3106 rv = txmore | (rxmore << 1);
3107
3108 return rv;
3109 }
3110
3111 static void
3112 iavf_sched_handle_queue(struct iavf_softc *sc, struct iavf_queue_pair *qp)
3113 {
3114
3115 if (qp->qp_workqueue)
3116 workqueue_enqueue(sc->sc_workq_txrx, &qp->qp_work, NULL);
3117 else
3118 softint_schedule(qp->qp_si);
3119 }
3120
3121 static void
3122 iavf_start(struct ifnet *ifp)
3123 {
3124 struct iavf_softc *sc;
3125 struct iavf_tx_ring *txr;
3126
3127 sc = ifp->if_softc;
3128 txr = sc->sc_qps[0].qp_txr;
3129
3130 mutex_enter(&txr->txr_lock);
3131 iavf_tx_common_locked(ifp, txr, false);
3132 mutex_exit(&txr->txr_lock);
3133
3134 }
3135
3136 static inline unsigned int
3137 iavf_select_txqueue(struct iavf_softc *sc, struct mbuf *m)
3138 {
3139 u_int cpuid;
3140
3141 cpuid = cpu_index(curcpu());
3142
3143 return (unsigned int)(cpuid % sc->sc_nqueue_pairs);
3144 }
3145
3146 static int
3147 iavf_transmit(struct ifnet *ifp, struct mbuf *m)
3148 {
3149 struct iavf_softc *sc;
3150 struct iavf_tx_ring *txr;
3151 unsigned int qid;
3152
3153 sc = ifp->if_softc;
3154 qid = iavf_select_txqueue(sc, m);
3155
3156 txr = sc->sc_qps[qid].qp_txr;
3157
3158 if (__predict_false(!pcq_put(txr->txr_intrq, m))) {
3159 mutex_enter(&txr->txr_lock);
3160 txr->txr_pcqdrop.ev_count++;
3161 mutex_exit(&txr->txr_lock);
3162
3163 m_freem(m);
3164 return ENOBUFS;
3165 }
3166
3167 if (mutex_tryenter(&txr->txr_lock)) {
3168 iavf_tx_common_locked(ifp, txr, true);
3169 mutex_exit(&txr->txr_lock);
3170 } else {
3171 kpreempt_disable();
3172 softint_schedule(txr->txr_si);
3173 kpreempt_enable();
3174 }
3175 return 0;
3176 }
3177
3178 static void
3179 iavf_deferred_transmit(void *xtxr)
3180 {
3181 struct iavf_tx_ring *txr;
3182 struct iavf_softc *sc;
3183 struct ifnet *ifp;
3184
3185 txr = xtxr;
3186 sc = txr->txr_sc;
3187 ifp = &sc->sc_ec.ec_if;
3188
3189 mutex_enter(&txr->txr_lock);
3190 txr->txr_transmitdef.ev_count++;
3191 if (pcq_peek(txr->txr_intrq) != NULL)
3192 iavf_tx_common_locked(ifp, txr, true);
3193 mutex_exit(&txr->txr_lock);
3194 }
3195
3196 static void
3197 iavf_txr_clean(struct iavf_softc *sc, struct iavf_tx_ring *txr)
3198 {
3199 struct iavf_tx_map *maps, *txm;
3200 bus_dmamap_t map;
3201 unsigned int i;
3202
3203 KASSERT(mutex_owned(&txr->txr_lock));
3204
3205 maps = txr->txr_maps;
3206 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
3207 txm = &maps[i];
3208
3209 if (txm->txm_m == NULL)
3210 continue;
3211
3212 map = txm->txm_map;
3213 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3214 BUS_DMASYNC_POSTWRITE);
3215 bus_dmamap_unload(sc->sc_dmat, map);
3216
3217 m_freem(txm->txm_m);
3218 txm->txm_m = NULL;
3219 }
3220
3221 memset(IXL_DMA_KVA(&txr->txr_mem), 0, IXL_DMA_LEN(&txr->txr_mem));
3222 txr->txr_prod = txr->txr_cons = 0;
3223 }
3224
3225 static int
3226 iavf_intr(void *xsc)
3227 {
3228 struct iavf_softc *sc = xsc;
3229 struct ifnet *ifp = &sc->sc_ec.ec_if;
3230 struct iavf_rx_ring *rxr;
3231 struct iavf_tx_ring *txr;
3232 uint32_t icr;
3233 unsigned int i;
3234
3235 /* read I40E_VFINT_ICR_ENA1 to clear status */
3236 (void)iavf_rd(sc, I40E_VFINT_ICR0_ENA1);
3237
3238 iavf_intr_enable(sc);
3239 icr = iavf_rd(sc, I40E_VFINT_ICR01);
3240
3241 if (icr == IAVF_REG_VFR) {
3242 log(LOG_INFO, "%s: VF reset in progress\n",
3243 ifp->if_xname);
3244 iavf_work_set(&sc->sc_reset_task, iavf_reset_start, sc);
3245 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
3246 return 1;
3247 }
3248
3249 if (ISSET(icr, I40E_VFINT_ICR01_ADMINQ_MASK)) {
3250 mutex_enter(&sc->sc_adminq_lock);
3251 iavf_atq_done(sc);
3252 iavf_arq(sc);
3253 mutex_exit(&sc->sc_adminq_lock);
3254 }
3255
3256 if (ISSET(icr, I40E_VFINT_ICR01_QUEUE_0_MASK)) {
3257 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3258 rxr = sc->sc_qps[i].qp_rxr;
3259 txr = sc->sc_qps[i].qp_txr;
3260
3261 mutex_enter(&rxr->rxr_lock);
3262 while (iavf_rxeof(sc, rxr, UINT_MAX,
3263 &rxr->rxr_intr) != 0) {
3264 /* do nothing */
3265 }
3266 mutex_exit(&rxr->rxr_lock);
3267
3268 mutex_enter(&txr->txr_lock);
3269 while (iavf_txeof(sc, txr, UINT_MAX,
3270 &txr->txr_intr) != 0) {
3271 /* do nothing */
3272 }
3273 mutex_exit(&txr->txr_lock);
3274 }
3275 }
3276
3277 return 0;
3278 }
3279
3280 static int
3281 iavf_queue_intr(void *xqp)
3282 {
3283 struct iavf_queue_pair *qp = xqp;
3284 struct iavf_tx_ring *txr;
3285 struct iavf_rx_ring *rxr;
3286 struct iavf_softc *sc;
3287 unsigned int qid;
3288 u_int txlimit, rxlimit;
3289 int more;
3290
3291 txr = qp->qp_txr;
3292 rxr = qp->qp_rxr;
3293 sc = txr->txr_sc;
3294 qid = txr->txr_qid;
3295
3296 txlimit = sc->sc_tx_intr_process_limit;
3297 rxlimit = sc->sc_rx_intr_process_limit;
3298 qp->qp_workqueue = sc->sc_txrx_workqueue;
3299
3300 more = iavf_handle_queue_common(sc, qp,
3301 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr);
3302
3303 if (more != 0) {
3304 iavf_sched_handle_queue(sc, qp);
3305 } else {
3306 /* for ALTQ */
3307 if (txr->txr_qid == 0)
3308 if_schedule_deferred_start(&sc->sc_ec.ec_if);
3309 softint_schedule(txr->txr_si);
3310
3311 iavf_queue_intr_enable(sc, qid);
3312 }
3313
3314 return 0;
3315 }
3316
3317 static void
3318 iavf_handle_queue_wk(struct work *wk, void *xsc __unused)
3319 {
3320 struct iavf_queue_pair *qp;
3321
3322 qp = container_of(wk, struct iavf_queue_pair, qp_work);
3323 iavf_handle_queue(qp);
3324 }
3325
3326 static void
3327 iavf_handle_queue(void *xqp)
3328 {
3329 struct iavf_queue_pair *qp = xqp;
3330 struct iavf_tx_ring *txr;
3331 struct iavf_rx_ring *rxr;
3332 struct iavf_softc *sc;
3333 unsigned int qid;
3334 u_int txlimit, rxlimit;
3335 int more;
3336
3337 txr = qp->qp_txr;
3338 rxr = qp->qp_rxr;
3339 sc = txr->txr_sc;
3340 qid = txr->txr_qid;
3341
3342 txlimit = sc->sc_tx_process_limit;
3343 rxlimit = sc->sc_rx_process_limit;
3344
3345 more = iavf_handle_queue_common(sc, qp,
3346 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer);
3347
3348 if (more != 0)
3349 iavf_sched_handle_queue(sc, qp);
3350 else
3351 iavf_queue_intr_enable(sc, qid);
3352 }
3353
3354 static void
3355 iavf_tick(void *xsc)
3356 {
3357 struct iavf_softc *sc;
3358 unsigned int i;
3359 int timedout;
3360
3361 sc = xsc;
3362 timedout = 0;
3363
3364 mutex_enter(&sc->sc_cfg_lock);
3365
3366 if (sc->sc_resetting) {
3367 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
3368 mutex_exit(&sc->sc_cfg_lock);
3369 return;
3370 }
3371
3372 iavf_get_stats(sc);
3373
3374 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3375 timedout |= iavf_watchdog(sc->sc_qps[i].qp_txr);
3376 }
3377
3378 if (timedout != 0) {
3379 iavf_work_add(sc->sc_workq, &sc->sc_wdto_task);
3380 } else {
3381 callout_schedule(&sc->sc_tick, IAVF_TICK_INTERVAL);
3382 }
3383
3384 mutex_exit(&sc->sc_cfg_lock);
3385 }
3386
3387 static void
3388 iavf_tick_halt(void *unused __unused)
3389 {
3390
3391 /* do nothing */
3392 }
3393
3394 static void
3395 iavf_reset_request(void *xsc)
3396 {
3397 struct iavf_softc *sc = xsc;
3398
3399 iavf_reset_vf(sc);
3400 iavf_reset_start(sc);
3401 }
3402
3403 static void
3404 iavf_reset_start(void *xsc)
3405 {
3406 struct iavf_softc *sc = xsc;
3407 struct ifnet *ifp = &sc->sc_ec.ec_if;
3408
3409 mutex_enter(&sc->sc_cfg_lock);
3410
3411 if (sc->sc_resetting)
3412 goto do_reset;
3413
3414 sc->sc_resetting = true;
3415 if_link_state_change(ifp, LINK_STATE_DOWN);
3416
3417 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
3418 iavf_stop_locked(sc);
3419 sc->sc_reset_up = true;
3420 }
3421
3422 memcpy(sc->sc_enaddr_reset, sc->sc_enaddr, ETHER_ADDR_LEN);
3423
3424 do_reset:
3425 iavf_work_set(&sc->sc_reset_task, iavf_reset, sc);
3426
3427 mutex_exit(&sc->sc_cfg_lock);
3428
3429 iavf_reset((void *)sc);
3430 }
3431
3432 static void
3433 iavf_reset(void *xsc)
3434 {
3435 struct iavf_softc *sc = xsc;
3436 struct ifnet *ifp = &sc->sc_ec.ec_if;
3437 struct ixl_aq_buf *aqb;
3438 bool realloc_qps, realloc_intrs;
3439
3440 mutex_enter(&sc->sc_cfg_lock);
3441
3442 mutex_enter(&sc->sc_adminq_lock);
3443 iavf_cleanup_admin_queue(sc);
3444 mutex_exit(&sc->sc_adminq_lock);
3445
3446 sc->sc_major_ver = UINT_MAX;
3447 sc->sc_minor_ver = UINT_MAX;
3448 sc->sc_got_vf_resources = 0;
3449 sc->sc_got_irq_map = 0;
3450
3451 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
3452 if (aqb == NULL)
3453 goto failed;
3454
3455 if (iavf_wait_active(sc) != 0) {
3456 log(LOG_WARNING, "%s: VF reset timed out\n",
3457 ifp->if_xname);
3458 goto failed;
3459 }
3460
3461 if (!iavf_arq_fill(sc)) {
3462 log(LOG_ERR, "%s: unable to fill arq descriptors\n",
3463 ifp->if_xname);
3464 goto failed;
3465 }
3466
3467 if (iavf_init_admin_queue(sc) != 0) {
3468 log(LOG_ERR, "%s: unable to initialize admin queue\n",
3469 ifp->if_xname);
3470 goto failed;
3471 }
3472
3473 if (iavf_get_version(sc, aqb) != 0) {
3474 log(LOG_ERR, "%s: unable to get VF interface version\n",
3475 ifp->if_xname);
3476 goto failed;
3477 }
3478
3479 if (iavf_get_vf_resources(sc, aqb) != 0) {
3480 log(LOG_ERR, "%s: timed out waiting for VF resources\n",
3481 ifp->if_xname);
3482 goto failed;
3483 }
3484
3485 if (sc->sc_nqps_alloc < iavf_calc_queue_pair_size(sc)) {
3486 realloc_qps = true;
3487 } else {
3488 realloc_qps = false;
3489 }
3490
3491 if (sc->sc_nintrs < iavf_calc_msix_count(sc)) {
3492 realloc_intrs = true;
3493 } else {
3494 realloc_intrs = false;
3495 }
3496
3497 if (realloc_qps || realloc_intrs)
3498 iavf_teardown_interrupts(sc);
3499
3500 if (realloc_qps) {
3501 iavf_queue_pairs_free(sc);
3502 if (iavf_queue_pairs_alloc(sc) != 0) {
3503 log(LOG_ERR, "%s: failed to allocate queue pairs\n",
3504 ifp->if_xname);
3505 goto failed;
3506 }
3507 }
3508
3509 if (realloc_qps || realloc_intrs) {
3510 if (iavf_setup_interrupts(sc) != 0) {
3511 sc->sc_nintrs = 0;
3512 log(LOG_ERR, "%s: failed to allocate interrupts\n",
3513 ifp->if_xname);
3514 goto failed;
3515 }
3516 log(LOG_INFO, "%s: reallocated queues\n", ifp->if_xname);
3517 }
3518
3519 if (iavf_config_irq_map(sc, aqb) != 0) {
3520 log(LOG_ERR, "%s: timed out configuring IRQ map\n",
3521 ifp->if_xname);
3522 goto failed;
3523 }
3524
3525 mutex_enter(&sc->sc_adminq_lock);
3526 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
3527 mutex_exit(&sc->sc_adminq_lock);
3528
3529 iavf_reset_finish(sc);
3530
3531 mutex_exit(&sc->sc_cfg_lock);
3532 return;
3533
3534 failed:
3535 mutex_enter(&sc->sc_adminq_lock);
3536 iavf_cleanup_admin_queue(sc);
3537 if (aqb != NULL) {
3538 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
3539 }
3540 mutex_exit(&sc->sc_adminq_lock);
3541 callout_schedule(&sc->sc_tick, IAVF_TICK_INTERVAL);
3542 mutex_exit(&sc->sc_cfg_lock);
3543 }
3544
3545 static void
3546 iavf_reset_finish(struct iavf_softc *sc)
3547 {
3548 struct ethercom *ec = &sc->sc_ec;
3549 struct ether_multi *enm;
3550 struct ether_multistep step;
3551 struct ifnet *ifp = &ec->ec_if;
3552 struct vlanid_list *vlanidp;
3553 uint8_t enaddr_prev[ETHER_ADDR_LEN], enaddr_next[ETHER_ADDR_LEN];
3554
3555 KASSERT(mutex_owned(&sc->sc_cfg_lock));
3556
3557 callout_stop(&sc->sc_tick);
3558
3559 iavf_intr_enable(sc);
3560
3561 if (!iavf_is_etheranyaddr(sc->sc_enaddr_added)) {
3562 iavf_eth_addr(sc, sc->sc_enaddr_added, IAVF_VC_OP_ADD_ETH_ADDR);
3563 }
3564
3565 ETHER_LOCK(ec);
3566 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
3567 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
3568 ETHER_NEXT_MULTI(step, enm)) {
3569 iavf_add_multi(sc, enm->enm_addrlo, enm->enm_addrhi);
3570 }
3571 }
3572
3573 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
3574 ETHER_UNLOCK(ec);
3575 iavf_config_vlan_id(sc, vlanidp->vid, IAVF_VC_OP_ADD_VLAN);
3576 ETHER_LOCK(ec);
3577 }
3578 ETHER_UNLOCK(ec);
3579
3580 if (memcmp(sc->sc_enaddr, sc->sc_enaddr_reset, ETHER_ADDR_LEN) != 0) {
3581 memcpy(enaddr_prev, sc->sc_enaddr_reset, sizeof(enaddr_prev));
3582 memcpy(enaddr_next, sc->sc_enaddr, sizeof(enaddr_next));
3583 log(LOG_INFO, "%s: Ethernet address changed to %s\n",
3584 ifp->if_xname, ether_sprintf(enaddr_next));
3585
3586 mutex_exit(&sc->sc_cfg_lock);
3587 IFNET_LOCK(ifp);
3588 kpreempt_disable();
3589 /*XXX we need an API to change ethernet address. */
3590 iavf_replace_lla(ifp, enaddr_prev, enaddr_next);
3591 kpreempt_enable();
3592 IFNET_UNLOCK(ifp);
3593 mutex_enter(&sc->sc_cfg_lock);
3594 }
3595
3596 sc->sc_resetting = false;
3597
3598 if (sc->sc_reset_up) {
3599 iavf_init_locked(sc);
3600 }
3601
3602 if (sc->sc_link_state != LINK_STATE_DOWN) {
3603 if_link_state_change(ifp, sc->sc_link_state);
3604 }
3605
3606 }
3607
3608 static int
3609 iavf_dmamem_alloc(bus_dma_tag_t dmat, struct ixl_dmamem *ixm,
3610 bus_size_t size, bus_size_t align)
3611 {
3612 ixm->ixm_size = size;
3613
3614 if (bus_dmamap_create(dmat, ixm->ixm_size, 1,
3615 ixm->ixm_size, 0,
3616 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
3617 &ixm->ixm_map) != 0)
3618 return 1;
3619 if (bus_dmamem_alloc(dmat, ixm->ixm_size,
3620 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
3621 BUS_DMA_WAITOK) != 0)
3622 goto destroy;
3623 if (bus_dmamem_map(dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
3624 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
3625 goto free;
3626 if (bus_dmamap_load(dmat, ixm->ixm_map, ixm->ixm_kva,
3627 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
3628 goto unmap;
3629
3630 memset(ixm->ixm_kva, 0, ixm->ixm_size);
3631
3632 return 0;
3633 unmap:
3634 bus_dmamem_unmap(dmat, ixm->ixm_kva, ixm->ixm_size);
3635 free:
3636 bus_dmamem_free(dmat, &ixm->ixm_seg, 1);
3637 destroy:
3638 bus_dmamap_destroy(dmat, ixm->ixm_map);
3639 return 1;
3640 }
3641
3642 static void
3643 iavf_dmamem_free(bus_dma_tag_t dmat, struct ixl_dmamem *ixm)
3644 {
3645
3646 bus_dmamap_unload(dmat, ixm->ixm_map);
3647 bus_dmamem_unmap(dmat, ixm->ixm_kva, ixm->ixm_size);
3648 bus_dmamem_free(dmat, &ixm->ixm_seg, 1);
3649 bus_dmamap_destroy(dmat, ixm->ixm_map);
3650 }
3651
3652 static struct ixl_aq_buf *
3653 iavf_aqb_alloc(bus_dma_tag_t dmat, size_t buflen)
3654 {
3655 struct ixl_aq_buf *aqb;
3656
3657 aqb = kmem_alloc(sizeof(*aqb), KM_NOSLEEP);
3658 if (aqb == NULL)
3659 return NULL;
3660
3661 aqb->aqb_size = buflen;
3662
3663 if (bus_dmamap_create(dmat, aqb->aqb_size, 1,
3664 aqb->aqb_size, 0,
3665 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0)
3666 goto free;
3667 if (bus_dmamem_alloc(dmat, aqb->aqb_size,
3668 IAVF_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs,
3669 BUS_DMA_WAITOK) != 0)
3670 goto destroy;
3671 if (bus_dmamem_map(dmat, &aqb->aqb_seg, aqb->aqb_nsegs,
3672 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0)
3673 goto dma_free;
3674 if (bus_dmamap_load(dmat, aqb->aqb_map, aqb->aqb_data,
3675 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0)
3676 goto unmap;
3677
3678 return aqb;
3679 unmap:
3680 bus_dmamem_unmap(dmat, aqb->aqb_data, aqb->aqb_size);
3681 dma_free:
3682 bus_dmamem_free(dmat, &aqb->aqb_seg, 1);
3683 destroy:
3684 bus_dmamap_destroy(dmat, aqb->aqb_map);
3685 free:
3686 kmem_free(aqb, sizeof(*aqb));
3687
3688 return NULL;
3689 }
3690
3691 static void
3692 iavf_aqb_free(bus_dma_tag_t dmat, struct ixl_aq_buf *aqb)
3693 {
3694
3695 bus_dmamap_unload(dmat, aqb->aqb_map);
3696 bus_dmamem_unmap(dmat, aqb->aqb_data, aqb->aqb_size);
3697 bus_dmamem_free(dmat, &aqb->aqb_seg, 1);
3698 bus_dmamap_destroy(dmat, aqb->aqb_map);
3699 kmem_free(aqb, sizeof(*aqb));
3700 }
3701
3702 static struct ixl_aq_buf *
3703 iavf_aqb_get_locked(struct ixl_aq_bufs *q)
3704 {
3705 struct ixl_aq_buf *aqb;
3706
3707 aqb = SIMPLEQ_FIRST(q);
3708 if (aqb != NULL) {
3709 SIMPLEQ_REMOVE(q, aqb, ixl_aq_buf, aqb_entry);
3710 }
3711
3712 return aqb;
3713 }
3714
3715 static struct ixl_aq_buf *
3716 iavf_aqb_get(struct iavf_softc *sc, struct ixl_aq_bufs *q)
3717 {
3718 struct ixl_aq_buf *aqb;
3719
3720 if (q != NULL) {
3721 mutex_enter(&sc->sc_adminq_lock);
3722 aqb = iavf_aqb_get_locked(q);
3723 mutex_exit(&sc->sc_adminq_lock);
3724 } else {
3725 aqb = NULL;
3726 }
3727
3728 if (aqb == NULL) {
3729 aqb = iavf_aqb_alloc(sc->sc_dmat, IAVF_AQ_BUFLEN);
3730 }
3731
3732 return aqb;
3733 }
3734
3735 static void
3736 iavf_aqb_put_locked(struct ixl_aq_bufs *q, struct ixl_aq_buf *aqb)
3737 {
3738
3739 SIMPLEQ_INSERT_TAIL(q, aqb, aqb_entry);
3740 }
3741
3742 static void
3743 iavf_aqb_clean(struct ixl_aq_bufs *q, bus_dma_tag_t dmat)
3744 {
3745 struct ixl_aq_buf *aqb;
3746
3747 while ((aqb = SIMPLEQ_FIRST(q)) != NULL) {
3748 SIMPLEQ_REMOVE(q, aqb, ixl_aq_buf, aqb_entry);
3749 iavf_aqb_free(dmat, aqb);
3750 }
3751 }
3752
3753 static const char *
3754 iavf_aq_vc_opcode_str(const struct ixl_aq_desc *iaq)
3755 {
3756
3757 switch (iavf_aq_vc_get_opcode(iaq)) {
3758 case IAVF_VC_OP_VERSION:
3759 return "GET_VERSION";
3760 case IAVF_VC_OP_RESET_VF:
3761 return "RESET_VF";
3762 case IAVF_VC_OP_GET_VF_RESOURCES:
3763 return "GET_VF_RESOURCES";
3764 case IAVF_VC_OP_CONFIG_TX_QUEUE:
3765 return "CONFIG_TX_QUEUE";
3766 case IAVF_VC_OP_CONFIG_RX_QUEUE:
3767 return "CONFIG_RX_QUEUE";
3768 case IAVF_VC_OP_CONFIG_VSI_QUEUES:
3769 return "CONFIG_VSI_QUEUES";
3770 case IAVF_VC_OP_CONFIG_IRQ_MAP:
3771 return "CONFIG_IRQ_MAP";
3772 case IAVF_VC_OP_ENABLE_QUEUES:
3773 return "ENABLE_QUEUES";
3774 case IAVF_VC_OP_DISABLE_QUEUES:
3775 return "DISABLE_QUEUES";
3776 case IAVF_VC_OP_ADD_ETH_ADDR:
3777 return "ADD_ETH_ADDR";
3778 case IAVF_VC_OP_DEL_ETH_ADDR:
3779 return "DEL_ETH_ADDR";
3780 case IAVF_VC_OP_CONFIG_PROMISC:
3781 return "CONFIG_PROMISC";
3782 case IAVF_VC_OP_GET_STATS:
3783 return "GET_STATS";
3784 case IAVF_VC_OP_EVENT:
3785 return "EVENT";
3786 case IAVF_VC_OP_CONFIG_RSS_KEY:
3787 return "CONFIG_RSS_KEY";
3788 case IAVF_VC_OP_CONFIG_RSS_LUT:
3789 return "CONFIG_RSS_LUT";
3790 case IAVF_VC_OP_GET_RSS_HENA_CAPS:
3791 return "GET_RS_HENA_CAPS";
3792 case IAVF_VC_OP_SET_RSS_HENA:
3793 return "SET_RSS_HENA";
3794 case IAVF_VC_OP_ENABLE_VLAN_STRIP:
3795 return "ENABLE_VLAN_STRIPPING";
3796 case IAVF_VC_OP_DISABLE_VLAN_STRIP:
3797 return "DISABLE_VLAN_STRIPPING";
3798 case IAVF_VC_OP_REQUEST_QUEUES:
3799 return "REQUEST_QUEUES";
3800 }
3801
3802 return "unknown";
3803 }
3804
3805 static void
3806 iavf_aq_dump(const struct iavf_softc *sc, const struct ixl_aq_desc *iaq,
3807 const char *msg)
3808 {
3809 char buf[512];
3810 size_t len;
3811
3812 len = sizeof(buf);
3813 buf[--len] = '\0';
3814
3815 device_printf(sc->sc_dev, "%s\n", msg);
3816 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags));
3817 device_printf(sc->sc_dev, "flags %s opcode %04x\n",
3818 buf, le16toh(iaq->iaq_opcode));
3819 device_printf(sc->sc_dev, "datalen %u retval %u\n",
3820 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval));
3821 device_printf(sc->sc_dev, "vc-opcode %u (%s)\n",
3822 iavf_aq_vc_get_opcode(iaq),
3823 iavf_aq_vc_opcode_str(iaq));
3824 device_printf(sc->sc_dev, "vc-retval %u\n",
3825 iavf_aq_vc_get_retval(iaq));
3826 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie);
3827 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n",
3828 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]),
3829 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3]));
3830 }
3831
3832 static int
3833 iavf_arq_fill(struct iavf_softc *sc)
3834 {
3835 struct ixl_aq_buf *aqb;
3836 struct ixl_aq_desc *arq, *iaq;
3837 unsigned int prod = sc->sc_arq_prod;
3838 unsigned int n;
3839 int filled;
3840
3841 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
3842 IAVF_AQ_NUM);
3843
3844 if (__predict_false(n <= 0))
3845 return 0;
3846
3847 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3848 0, IXL_DMA_LEN(&sc->sc_arq),
3849 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3850
3851 arq = IXL_DMA_KVA(&sc->sc_arq);
3852
3853 do {
3854 iaq = &arq[prod];
3855
3856 if (ixl_aq_has_dva(iaq)) {
3857 /* already filled */
3858 break;
3859 }
3860
3861 aqb = iavf_aqb_get_locked(&sc->sc_arq_idle);
3862 if (aqb == NULL)
3863 break;
3864
3865 memset(aqb->aqb_data, 0, aqb->aqb_size);
3866
3867 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
3868 aqb->aqb_size, BUS_DMASYNC_PREREAD);
3869
3870 iaq->iaq_flags = htole16(IXL_AQ_BUF |
3871 (aqb->aqb_size > I40E_AQ_LARGE_BUF ?
3872 IXL_AQ_LB : 0));
3873 iaq->iaq_opcode = 0;
3874 iaq->iaq_datalen = htole16(aqb->aqb_size);
3875 iaq->iaq_retval = 0;
3876 iaq->iaq_cookie = 0;
3877 iaq->iaq_param[0] = 0;
3878 iaq->iaq_param[1] = 0;
3879 ixl_aq_dva(iaq, IXL_AQB_DVA(aqb));
3880 iavf_aqb_put_locked(&sc->sc_arq_live, aqb);
3881
3882 prod++;
3883 prod &= IAVF_AQ_MASK;
3884 filled = 1;
3885 } while (--n);
3886
3887 sc->sc_arq_prod = prod;
3888
3889 if (filled) {
3890 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3891 0, IXL_DMA_LEN(&sc->sc_arq),
3892 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3893 iavf_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
3894 }
3895
3896 return filled;
3897 }
3898
3899 static int
3900 iavf_arq_wait(struct iavf_softc *sc, uint32_t opcode)
3901 {
3902 int error;
3903
3904 KASSERT(mutex_owned(&sc->sc_adminq_lock));
3905
3906 while ((error = cv_timedwait(&sc->sc_adminq_cv,
3907 &sc->sc_adminq_lock, mstohz(IAVF_EXEC_TIMEOUT))) == 0) {
3908 if (opcode == sc->sc_arq_opcode)
3909 break;
3910 }
3911
3912 if (error != 0 &&
3913 atomic_load_relaxed(&sc->sc_debuglevel) >= 2)
3914 device_printf(sc->sc_dev, "cv_timedwait error=%d\n", error);
3915
3916 return error;
3917 }
3918
3919 static void
3920 iavf_arq_refill(void *xsc)
3921 {
3922 struct iavf_softc *sc = xsc;
3923 struct ixl_aq_bufs aqbs;
3924 struct ixl_aq_buf *aqb;
3925 unsigned int n, i;
3926
3927 mutex_enter(&sc->sc_adminq_lock);
3928 iavf_arq_fill(sc);
3929 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
3930 IAVF_AQ_NUM);
3931 mutex_exit(&sc->sc_adminq_lock);
3932
3933 if (n == 0)
3934 return;
3935
3936 if (atomic_load_relaxed(&sc->sc_debuglevel) >= 1)
3937 device_printf(sc->sc_dev, "Allocate %d bufs for arq\n", n);
3938
3939 SIMPLEQ_INIT(&aqbs);
3940 for (i = 0; i < n; i++) {
3941 aqb = iavf_aqb_get(sc, NULL);
3942 if (aqb == NULL)
3943 continue;
3944 SIMPLEQ_INSERT_TAIL(&aqbs, aqb, aqb_entry);
3945 }
3946
3947 mutex_enter(&sc->sc_adminq_lock);
3948 while ((aqb = SIMPLEQ_FIRST(&aqbs)) != NULL) {
3949 SIMPLEQ_REMOVE(&aqbs, aqb, ixl_aq_buf, aqb_entry);
3950 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
3951 }
3952 iavf_arq_fill(sc);
3953 mutex_exit(&sc->sc_adminq_lock);
3954 }
3955
3956 static uint32_t
3957 iavf_process_arq(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
3958 struct ixl_aq_buf *aqb)
3959 {
3960 uint32_t vc_retval, vc_opcode;
3961 int dbg;
3962
3963 dbg = atomic_load_relaxed(&sc->sc_debuglevel);
3964 if (dbg >= 3)
3965 iavf_aq_dump(sc, iaq, "arq proc");
3966
3967 if (dbg >= 2) {
3968 vc_retval = iavf_aq_vc_get_retval(iaq);
3969 if (vc_retval != IAVF_VC_RC_SUCCESS) {
3970 device_printf(sc->sc_dev, "%s failed=%d(arq)\n",
3971 iavf_aq_vc_opcode_str(iaq), vc_retval);
3972 }
3973 }
3974
3975 vc_opcode = iavf_aq_vc_get_opcode(iaq);
3976 switch (vc_opcode) {
3977 case IAVF_VC_OP_VERSION:
3978 iavf_process_version(sc, iaq, aqb);
3979 break;
3980 case IAVF_VC_OP_GET_VF_RESOURCES:
3981 iavf_process_vf_resources(sc, iaq, aqb);
3982 break;
3983 case IAVF_VC_OP_CONFIG_IRQ_MAP:
3984 iavf_process_irq_map(sc, iaq);
3985 break;
3986 case IAVF_VC_OP_EVENT:
3987 iavf_process_vc_event(sc, iaq, aqb);
3988 break;
3989 case IAVF_VC_OP_GET_STATS:
3990 iavf_process_stats(sc, iaq, aqb);
3991 break;
3992 case IAVF_VC_OP_REQUEST_QUEUES:
3993 iavf_process_req_queues(sc, iaq, aqb);
3994 break;
3995 }
3996
3997 return vc_opcode;
3998 }
3999
4000 static int
4001 iavf_arq_poll(struct iavf_softc *sc, uint32_t wait_opcode, int retry)
4002 {
4003 struct ixl_aq_desc *arq, *iaq;
4004 struct ixl_aq_buf *aqb;
4005 unsigned int cons = sc->sc_arq_cons;
4006 unsigned int prod;
4007 uint32_t vc_opcode;
4008 bool received;
4009 int i;
4010
4011 for (i = 0, received = false; i < retry && !received; i++) {
4012 prod = iavf_rd(sc, sc->sc_aq_regs->arq_head);
4013 prod &= sc->sc_aq_regs->arq_head_mask;
4014
4015 if (prod == cons) {
4016 delaymsec(1);
4017 continue;
4018 }
4019
4020 if (prod >= IAVF_AQ_NUM) {
4021 return EIO;
4022 }
4023
4024 arq = IXL_DMA_KVA(&sc->sc_arq);
4025
4026 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
4027 0, IXL_DMA_LEN(&sc->sc_arq),
4028 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
4029
4030 do {
4031 iaq = &arq[cons];
4032 aqb = iavf_aqb_get_locked(&sc->sc_arq_live);
4033 KASSERT(aqb != NULL);
4034
4035 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
4036 IAVF_AQ_BUFLEN, BUS_DMASYNC_POSTREAD);
4037
4038 vc_opcode = iavf_process_arq(sc, iaq, aqb);
4039
4040 if (vc_opcode == wait_opcode)
4041 received = true;
4042
4043 memset(iaq, 0, sizeof(*iaq));
4044 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
4045
4046 cons++;
4047 cons &= IAVF_AQ_MASK;
4048
4049 } while (cons != prod);
4050
4051 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
4052 0, IXL_DMA_LEN(&sc->sc_arq),
4053 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4054
4055 sc->sc_arq_cons = cons;
4056 iavf_arq_fill(sc);
4057
4058 }
4059
4060 if (!received)
4061 return ETIMEDOUT;
4062
4063 return 0;
4064 }
4065
4066 static int
4067 iavf_arq(struct iavf_softc *sc)
4068 {
4069 struct ixl_aq_desc *arq, *iaq;
4070 struct ixl_aq_buf *aqb;
4071 unsigned int cons = sc->sc_arq_cons;
4072 unsigned int prod;
4073 uint32_t vc_opcode;
4074
4075 KASSERT(mutex_owned(&sc->sc_adminq_lock));
4076
4077 prod = iavf_rd(sc, sc->sc_aq_regs->arq_head);
4078 prod &= sc->sc_aq_regs->arq_head_mask;
4079
4080 /* broken value at resetting */
4081 if (prod >= IAVF_AQ_NUM) {
4082 iavf_work_set(&sc->sc_reset_task, iavf_reset_start, sc);
4083 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
4084 return 0;
4085 }
4086
4087 if (cons == prod)
4088 return 0;
4089
4090 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
4091 0, IXL_DMA_LEN(&sc->sc_arq),
4092 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
4093
4094 arq = IXL_DMA_KVA(&sc->sc_arq);
4095
4096 do {
4097 iaq = &arq[cons];
4098 aqb = iavf_aqb_get_locked(&sc->sc_arq_live);
4099
4100 KASSERT(aqb != NULL);
4101
4102 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IAVF_AQ_BUFLEN,
4103 BUS_DMASYNC_POSTREAD);
4104
4105 vc_opcode = iavf_process_arq(sc, iaq, aqb);
4106
4107 switch (vc_opcode) {
4108 case IAVF_VC_OP_CONFIG_TX_QUEUE:
4109 case IAVF_VC_OP_CONFIG_RX_QUEUE:
4110 case IAVF_VC_OP_CONFIG_VSI_QUEUES:
4111 case IAVF_VC_OP_ENABLE_QUEUES:
4112 case IAVF_VC_OP_DISABLE_QUEUES:
4113 case IAVF_VC_OP_GET_RSS_HENA_CAPS:
4114 case IAVF_VC_OP_SET_RSS_HENA:
4115 case IAVF_VC_OP_ADD_ETH_ADDR:
4116 case IAVF_VC_OP_DEL_ETH_ADDR:
4117 case IAVF_VC_OP_CONFIG_PROMISC:
4118 case IAVF_VC_OP_ADD_VLAN:
4119 case IAVF_VC_OP_DEL_VLAN:
4120 case IAVF_VC_OP_ENABLE_VLAN_STRIP:
4121 case IAVF_VC_OP_DISABLE_VLAN_STRIP:
4122 case IAVF_VC_OP_CONFIG_RSS_KEY:
4123 case IAVF_VC_OP_CONFIG_RSS_LUT:
4124 sc->sc_arq_retval = iavf_aq_vc_get_retval(iaq);
4125 sc->sc_arq_opcode = vc_opcode;
4126 cv_signal(&sc->sc_adminq_cv);
4127 break;
4128 }
4129
4130 memset(iaq, 0, sizeof(*iaq));
4131 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
4132
4133 cons++;
4134 cons &= IAVF_AQ_MASK;
4135 } while (cons != prod);
4136
4137 sc->sc_arq_cons = cons;
4138 iavf_work_add(sc->sc_workq, &sc->sc_arq_refill);
4139
4140 return 1;
4141 }
4142
4143 static int
4144 iavf_atq_post(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4145 struct ixl_aq_buf *aqb)
4146 {
4147 struct ixl_aq_desc *atq, *slot;
4148 unsigned int prod;
4149
4150 atq = IXL_DMA_KVA(&sc->sc_atq);
4151 prod = sc->sc_atq_prod;
4152 slot = &atq[prod];
4153
4154 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4155 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
4156
4157 *slot = *iaq;
4158 slot->iaq_flags |= htole16(IXL_AQ_SI);
4159 if (aqb != NULL) {
4160 ixl_aq_dva(slot, IXL_AQB_DVA(aqb));
4161 bus_dmamap_sync(sc->sc_dmat, IXL_AQB_MAP(aqb),
4162 0, IXL_AQB_LEN(aqb), BUS_DMASYNC_PREWRITE);
4163 iavf_aqb_put_locked(&sc->sc_atq_live, aqb);
4164 } else {
4165 ixl_aq_dva(slot, (bus_addr_t)0);
4166 }
4167
4168 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4169 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
4170
4171 if (atomic_load_relaxed(&sc->sc_debuglevel) >= 3)
4172 iavf_aq_dump(sc, slot, "post");
4173
4174 prod++;
4175 prod &= IAVF_AQ_MASK;
4176 sc->sc_atq_prod = prod;
4177 iavf_wr(sc, sc->sc_aq_regs->atq_tail, prod);
4178 return prod;
4179 }
4180
4181 static int
4182 iavf_atq_poll(struct iavf_softc *sc, unsigned int tm)
4183 {
4184 struct ixl_aq_desc *atq, *slot;
4185 struct ixl_aq_desc iaq;
4186 unsigned int prod;
4187 unsigned int t;
4188 int dbg;
4189
4190 dbg = atomic_load_relaxed(&sc->sc_debuglevel);
4191 atq = IXL_DMA_KVA(&sc->sc_atq);
4192 prod = sc->sc_atq_prod;
4193 slot = &atq[prod];
4194 t = 0;
4195
4196 while (iavf_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
4197 delaymsec(1);
4198
4199 if (t++ > tm) {
4200 if (dbg >= 2) {
4201 device_printf(sc->sc_dev,
4202 "atq timedout\n");
4203 }
4204 return ETIMEDOUT;
4205 }
4206 }
4207
4208 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4209 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
4210 iaq = *slot;
4211 memset(slot, 0, sizeof(*slot));
4212 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4213 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
4214
4215 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4216 if (dbg >= 2) {
4217 device_printf(sc->sc_dev,
4218 "atq retcode=0x%04x\n", le16toh(iaq.iaq_retval));
4219 }
4220 return EIO;
4221 }
4222
4223 return 0;
4224 }
4225
4226 static void
4227 iavf_atq_done(struct iavf_softc *sc)
4228 {
4229 struct ixl_aq_desc *atq, *slot;
4230 struct ixl_aq_buf *aqb;
4231 unsigned int cons;
4232 unsigned int prod;
4233
4234 KASSERT(mutex_owned(&sc->sc_adminq_lock));
4235
4236 prod = sc->sc_atq_prod;
4237 cons = sc->sc_atq_cons;
4238
4239 if (prod == cons)
4240 return;
4241
4242 atq = IXL_DMA_KVA(&sc->sc_atq);
4243
4244 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4245 0, IXL_DMA_LEN(&sc->sc_atq),
4246 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
4247
4248 do {
4249 slot = &atq[cons];
4250 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
4251 break;
4252
4253 if (ixl_aq_has_dva(slot) &&
4254 (aqb = iavf_aqb_get_locked(&sc->sc_atq_live)) != NULL) {
4255 bus_dmamap_sync(sc->sc_dmat, IXL_AQB_MAP(aqb),
4256 0, IXL_AQB_LEN(aqb), BUS_DMASYNC_POSTWRITE);
4257 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
4258 }
4259
4260 memset(slot, 0, sizeof(*slot));
4261
4262 cons++;
4263 cons &= IAVF_AQ_MASK;
4264 } while (cons != prod);
4265
4266 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4267 0, IXL_DMA_LEN(&sc->sc_atq),
4268 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4269
4270 sc->sc_atq_cons = cons;
4271 }
4272
4273 static int
4274 iavf_adminq_poll(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4275 struct ixl_aq_buf *aqb, int retry)
4276 {
4277 int error;
4278
4279 mutex_enter(&sc->sc_adminq_lock);
4280 error = iavf_adminq_poll_locked(sc, iaq, aqb, retry);
4281 mutex_exit(&sc->sc_adminq_lock);
4282
4283 return error;
4284 }
4285
4286 static int
4287 iavf_adminq_poll_locked(struct iavf_softc *sc,
4288 struct ixl_aq_desc *iaq, struct ixl_aq_buf *aqb, int retry)
4289 {
4290 uint32_t opcode;
4291 int error;
4292
4293 KASSERT(!sc->sc_attached || mutex_owned(&sc->sc_adminq_lock));
4294
4295 opcode = iavf_aq_vc_get_opcode(iaq);
4296
4297 iavf_atq_post(sc, iaq, aqb);
4298
4299 error = iavf_atq_poll(sc, retry);
4300
4301 /*
4302 * collect the aqb used in the current command and
4303 * added to sc_atq_live at iavf_atq_post(),
4304 * whether or not the command succeeded.
4305 */
4306 if (aqb != NULL) {
4307 (void)iavf_aqb_get_locked(&sc->sc_atq_live);
4308 bus_dmamap_sync(sc->sc_dmat, IXL_AQB_MAP(aqb),
4309 0, IXL_AQB_LEN(aqb), BUS_DMASYNC_POSTWRITE);
4310 }
4311
4312 if (error)
4313 return error;
4314
4315 error = iavf_arq_poll(sc, opcode, retry);
4316
4317 if (error != 0 &&
4318 atomic_load_relaxed(&sc->sc_debuglevel) >= 1) {
4319 device_printf(sc->sc_dev, "%s failed=%d(polling)\n",
4320 iavf_aq_vc_opcode_str(iaq), error);
4321 }
4322
4323 return error;
4324 }
4325
4326 static int
4327 iavf_adminq_exec(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4328 struct ixl_aq_buf *aqb)
4329 {
4330 int error;
4331 uint32_t opcode;
4332
4333 opcode = iavf_aq_vc_get_opcode(iaq);
4334
4335 mutex_enter(&sc->sc_adminq_lock);
4336 iavf_atq_post(sc, iaq, aqb);
4337
4338 error = iavf_arq_wait(sc, opcode);
4339 if (error == 0) {
4340 error = sc->sc_arq_retval;
4341 if (error != IAVF_VC_RC_SUCCESS &&
4342 atomic_load_relaxed(&sc->sc_debuglevel) >= 1) {
4343 device_printf(sc->sc_dev, "%s failed=%d\n",
4344 iavf_aq_vc_opcode_str(iaq), error);
4345 }
4346 }
4347
4348 mutex_exit(&sc->sc_adminq_lock);
4349 return error;
4350 }
4351
4352 static void
4353 iavf_process_version(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4354 struct ixl_aq_buf *aqb)
4355 {
4356 struct iavf_vc_version_info *ver;
4357
4358 ver = (struct iavf_vc_version_info *)aqb->aqb_data;
4359 sc->sc_major_ver = le32toh(ver->major);
4360 sc->sc_minor_ver = le32toh(ver->minor);
4361 }
4362
4363 static void
4364 iavf_process_vf_resources(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4365 struct ixl_aq_buf *aqb)
4366 {
4367 struct iavf_vc_vf_resource *vf_res;
4368 struct iavf_vc_vsi_resource *vsi_res;
4369 uint8_t *enaddr;
4370 int mtu, dbg;
4371 char buf[512];
4372
4373 dbg = atomic_load_relaxed(&sc->sc_debuglevel);
4374 sc->sc_got_vf_resources = 1;
4375
4376 vf_res = aqb->aqb_data;
4377 sc->sc_max_vectors = le16toh(vf_res->max_vectors);
4378 if (le16toh(vf_res->num_vsis) == 0) {
4379 if (dbg >= 1) {
4380 device_printf(sc->sc_dev, "no vsi available\n");
4381 }
4382 return;
4383 }
4384 sc->sc_vf_cap = le32toh(vf_res->offload_flags);
4385 if (dbg >= 2) {
4386 snprintb(buf, sizeof(buf),
4387 IAVF_VC_OFFLOAD_FMT, sc->sc_vf_cap);
4388 device_printf(sc->sc_dev, "VF cap=%s\n", buf);
4389 }
4390
4391 mtu = le16toh(vf_res->max_mtu);
4392 if (IAVF_MIN_MTU < mtu && mtu < IAVF_MAX_MTU) {
4393 sc->sc_max_mtu = MIN(IAVF_MAX_MTU, mtu);
4394 }
4395
4396 vsi_res = &vf_res->vsi_res[0];
4397 sc->sc_vsi_id = le16toh(vsi_res->vsi_id);
4398 sc->sc_vf_id = le32toh(iaq->iaq_param[0]);
4399 sc->sc_qset_handle = le16toh(vsi_res->qset_handle);
4400 sc->sc_nqps_vsi = le16toh(vsi_res->num_queue_pairs);
4401 if (!iavf_is_etheranyaddr(vsi_res->default_mac)) {
4402 enaddr = vsi_res->default_mac;
4403 } else {
4404 enaddr = sc->sc_enaddr_fake;
4405 }
4406 memcpy(sc->sc_enaddr, enaddr, ETHER_ADDR_LEN);
4407 }
4408
4409 static void
4410 iavf_process_irq_map(struct iavf_softc *sc, struct ixl_aq_desc *iaq)
4411 {
4412 uint32_t retval;
4413
4414 retval = iavf_aq_vc_get_retval(iaq);
4415 if (retval != IAVF_VC_RC_SUCCESS) {
4416 return;
4417 }
4418
4419 sc->sc_got_irq_map = 1;
4420 }
4421
4422 static void
4423 iavf_process_vc_event(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4424 struct ixl_aq_buf *aqb)
4425 {
4426 struct iavf_vc_pf_event *event;
4427 struct ifnet *ifp = &sc->sc_ec.ec_if;
4428 const struct iavf_link_speed *speed;
4429 int link;
4430
4431 event = aqb->aqb_data;
4432 switch (event->event) {
4433 case IAVF_VC_EVENT_LINK_CHANGE:
4434 sc->sc_media_status = IFM_AVALID;
4435 sc->sc_media_active = IFM_ETHER;
4436 link = LINK_STATE_DOWN;
4437 if (event->link_status) {
4438 link = LINK_STATE_UP;
4439 sc->sc_media_status |= IFM_ACTIVE;
4440 sc->sc_media_active |= IFM_FDX;
4441
4442 ifp->if_baudrate = 0;
4443 speed = iavf_find_link_speed(sc, event->link_speed);
4444 if (speed != NULL) {
4445 sc->sc_media_active |= speed->media;
4446 ifp->if_baudrate = speed->baudrate;
4447 }
4448 }
4449
4450 if (sc->sc_link_state != link) {
4451 sc->sc_link_state = link;
4452 if (sc->sc_attached) {
4453 if_link_state_change(ifp, link);
4454 }
4455 }
4456 break;
4457 case IAVF_VC_EVENT_RESET_IMPENDING:
4458 log(LOG_INFO, "%s: Reset warning received from the PF\n",
4459 ifp->if_xname);
4460 iavf_work_set(&sc->sc_reset_task, iavf_reset_request, sc);
4461 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
4462 break;
4463 }
4464 }
4465
4466 static void
4467 iavf_process_stats(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4468 struct ixl_aq_buf *aqb)
4469 {
4470 struct iavf_stat_counters *isc;
4471 struct i40e_eth_stats *st;
4472
4473 KASSERT(mutex_owned(&sc->sc_adminq_lock));
4474
4475 st = aqb->aqb_data;
4476 isc = &sc->sc_stat_counters;
4477
4478 isc->isc_rx_bytes.ev_count = st->rx_bytes;
4479 isc->isc_rx_unicast.ev_count = st->rx_unicast;
4480 isc->isc_rx_multicast.ev_count = st->rx_multicast;
4481 isc->isc_rx_broadcast.ev_count = st->rx_broadcast;
4482 isc->isc_rx_discards.ev_count = st->rx_discards;
4483 isc->isc_rx_unknown_protocol.ev_count = st->rx_unknown_protocol;
4484
4485 isc->isc_tx_bytes.ev_count = st->tx_bytes;
4486 isc->isc_tx_unicast.ev_count = st->tx_unicast;
4487 isc->isc_tx_multicast.ev_count = st->tx_multicast;
4488 isc->isc_tx_broadcast.ev_count = st->tx_broadcast;
4489 isc->isc_tx_discards.ev_count = st->tx_discards;
4490 isc->isc_tx_errors.ev_count = st->tx_errors;
4491 }
4492
4493 static void
4494 iavf_process_req_queues(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4495 struct ixl_aq_buf *aqb)
4496 {
4497 struct iavf_vc_res_request *req;
4498 struct ifnet *ifp;
4499 uint32_t vc_retval;
4500
4501 ifp = &sc->sc_ec.ec_if;
4502 req = aqb->aqb_data;
4503
4504 vc_retval = iavf_aq_vc_get_retval(iaq);
4505 if (vc_retval != IAVF_VC_RC_SUCCESS) {
4506 return;
4507 }
4508
4509 if (sc->sc_nqps_req < req->num_queue_pairs) {
4510 log(LOG_INFO,
4511 "%s: requested %d queues, but only %d left.\n",
4512 ifp->if_xname,
4513 sc->sc_nqps_req, req->num_queue_pairs);
4514 }
4515
4516 if (sc->sc_nqps_vsi < req->num_queue_pairs) {
4517 if (!sc->sc_req_queues_retried) {
4518 /* req->num_queue_pairs indicates max qps */
4519 sc->sc_nqps_req = req->num_queue_pairs;
4520
4521 sc->sc_req_queues_retried = true;
4522 iavf_work_add(sc->sc_workq, &sc->sc_req_queues_task);
4523 }
4524 }
4525 }
4526
4527 static int
4528 iavf_get_version(struct iavf_softc *sc, struct ixl_aq_buf *aqb)
4529 {
4530 struct ixl_aq_desc iaq;
4531 struct iavf_vc_version_info *ver;
4532 int error;
4533
4534 memset(&iaq, 0, sizeof(iaq));
4535 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4536 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4537 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_VERSION);
4538 iaq.iaq_datalen = htole16(sizeof(struct iavf_vc_version_info));
4539
4540 ver = IXL_AQB_KVA(aqb);
4541 ver->major = htole32(IAVF_VF_MAJOR);
4542 ver->minor = htole32(IAVF_VF_MINOR);
4543
4544 sc->sc_major_ver = UINT_MAX;
4545 sc->sc_minor_ver = UINT_MAX;
4546
4547 if (sc->sc_attached) {
4548 error = iavf_adminq_poll(sc, &iaq, aqb, 250);
4549 } else {
4550 error = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
4551 }
4552
4553 if (error)
4554 return -1;
4555
4556 return 0;
4557 }
4558
4559 static int
4560 iavf_get_vf_resources(struct iavf_softc *sc, struct ixl_aq_buf *aqb)
4561 {
4562 struct ixl_aq_desc iaq;
4563 uint32_t *cap, cap0;
4564 int error;
4565
4566 memset(&iaq, 0, sizeof(iaq));
4567 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4568 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4569 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_GET_VF_RESOURCES);
4570
4571 if (sc->sc_major_ver > 0) {
4572 cap0 = IAVF_VC_OFFLOAD_L2 |
4573 IAVF_VC_OFFLOAD_VLAN |
4574 IAVF_VC_OFFLOAD_RSS_PF |
4575 IAVF_VC_OFFLOAD_REQ_QUEUES;
4576
4577 cap = IXL_AQB_KVA(aqb);
4578 *cap = htole32(cap0);
4579 iaq.iaq_datalen = htole16(sizeof(*cap));
4580 }
4581
4582 sc->sc_got_vf_resources = 0;
4583 if (sc->sc_attached) {
4584 error = iavf_adminq_poll(sc, &iaq, aqb, 250);
4585 } else {
4586 error = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
4587 }
4588
4589 if (error)
4590 return -1;
4591 return 0;
4592 }
4593
4594 static int
4595 iavf_get_stats(struct iavf_softc *sc)
4596 {
4597 struct ixl_aq_desc iaq;
4598 struct ixl_aq_buf *aqb;
4599 struct iavf_vc_queue_select *qsel;
4600 int error;
4601
4602 mutex_enter(&sc->sc_adminq_lock);
4603 aqb = iavf_aqb_get_locked(&sc->sc_atq_idle);
4604 mutex_exit(&sc->sc_adminq_lock);
4605
4606 if (aqb == NULL)
4607 return ENOMEM;
4608
4609 qsel = IXL_AQB_KVA(aqb);
4610 memset(qsel, 0, sizeof(*qsel));
4611 qsel->vsi_id = htole16(sc->sc_vsi_id);
4612
4613 memset(&iaq, 0, sizeof(iaq));
4614
4615 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4616 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4617 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_GET_STATS);
4618 iaq.iaq_datalen = htole16(sizeof(*qsel));
4619
4620 if (atomic_load_relaxed(&sc->sc_debuglevel) >= 3) {
4621 device_printf(sc->sc_dev, "post GET_STATS command\n");
4622 }
4623
4624 mutex_enter(&sc->sc_adminq_lock);
4625 error = iavf_atq_post(sc, &iaq, aqb);
4626 mutex_exit(&sc->sc_adminq_lock);
4627
4628 return error;
4629 }
4630
4631 static int
4632 iavf_config_irq_map(struct iavf_softc *sc, struct ixl_aq_buf *aqb)
4633 {
4634 struct ixl_aq_desc iaq;
4635 struct iavf_vc_vector_map *vec;
4636 struct iavf_vc_irq_map_info *map;
4637 struct iavf_rx_ring *rxr;
4638 struct iavf_tx_ring *txr;
4639 unsigned int num_vec;
4640 int error;
4641
4642 map = IXL_AQB_KVA(aqb);
4643 vec = map->vecmap;
4644 num_vec = 0;
4645
4646 if (sc->sc_nintrs == 1) {
4647 vec[0].vsi_id = htole16(sc->sc_vsi_id);
4648 vec[0].vector_id = htole16(0);
4649 vec[0].rxq_map = htole16(iavf_allqueues(sc));
4650 vec[0].txq_map = htole16(iavf_allqueues(sc));
4651 vec[0].rxitr_idx = htole16(IAVF_NOITR);
4652 vec[0].rxitr_idx = htole16(IAVF_NOITR);
4653 num_vec = 1;
4654 } else if (sc->sc_nintrs > 1) {
4655 KASSERT(sc->sc_nqps_alloc >= (sc->sc_nintrs - 1));
4656 for (; num_vec < (sc->sc_nintrs - 1); num_vec++) {
4657 rxr = sc->sc_qps[num_vec].qp_rxr;
4658 txr = sc->sc_qps[num_vec].qp_txr;
4659
4660 vec[num_vec].vsi_id = htole16(sc->sc_vsi_id);
4661 vec[num_vec].vector_id = htole16(num_vec + 1);
4662 vec[num_vec].rxq_map = htole16(__BIT(rxr->rxr_qid));
4663 vec[num_vec].txq_map = htole16(__BIT(txr->txr_qid));
4664 vec[num_vec].rxitr_idx = htole16(IAVF_ITR_RX);
4665 vec[num_vec].txitr_idx = htole16(IAVF_ITR_TX);
4666 }
4667
4668 vec[num_vec].vsi_id = htole16(sc->sc_vsi_id);
4669 vec[num_vec].vector_id = htole16(0);
4670 vec[num_vec].rxq_map = htole16(0);
4671 vec[num_vec].txq_map = htole16(0);
4672 num_vec++;
4673 }
4674
4675 map->num_vectors = htole16(num_vec);
4676
4677 memset(&iaq, 0, sizeof(iaq));
4678 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4679 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4680 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_IRQ_MAP);
4681 iaq.iaq_datalen = htole16(sizeof(*map) + sizeof(*vec) * num_vec);
4682
4683 if (sc->sc_attached) {
4684 error = iavf_adminq_poll(sc, &iaq, aqb, 250);
4685 } else {
4686 error = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
4687 }
4688
4689 if (error)
4690 return -1;
4691
4692 return 0;
4693 }
4694
4695 static int
4696 iavf_config_vsi_queues(struct iavf_softc *sc)
4697 {
4698 struct ifnet *ifp = &sc->sc_ec.ec_if;
4699 struct ixl_aq_desc iaq;
4700 struct ixl_aq_buf *aqb;
4701 struct iavf_vc_queue_config_info *config;
4702 struct iavf_vc_txq_info *txq;
4703 struct iavf_vc_rxq_info *rxq;
4704 struct iavf_rx_ring *rxr;
4705 struct iavf_tx_ring *txr;
4706 uint32_t rxmtu_max;
4707 unsigned int i;
4708 int error;
4709
4710 rxmtu_max = ifp->if_mtu + IAVF_MTU_ETHERLEN;
4711
4712 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4713
4714 if (aqb == NULL)
4715 return -1;
4716
4717 config = IXL_AQB_KVA(aqb);
4718 memset(config, 0, sizeof(*config));
4719 config->vsi_id = htole16(sc->sc_vsi_id);
4720 config->num_queue_pairs = htole16(sc->sc_nqueue_pairs);
4721
4722 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
4723 rxr = sc->sc_qps[i].qp_rxr;
4724 txr = sc->sc_qps[i].qp_txr;
4725
4726 txq = &config->qpair[i].txq;
4727 txq->vsi_id = htole16(sc->sc_vsi_id);
4728 txq->queue_id = htole16(txr->txr_qid);
4729 txq->ring_len = htole16(sc->sc_tx_ring_ndescs);
4730 txq->headwb_ena = 0;
4731 txq->dma_ring_addr = htole64(IXL_DMA_DVA(&txr->txr_mem));
4732 txq->dma_headwb_addr = 0;
4733
4734 rxq = &config->qpair[i].rxq;
4735 rxq->vsi_id = htole16(sc->sc_vsi_id);
4736 rxq->queue_id = htole16(rxr->rxr_qid);
4737 rxq->ring_len = htole16(sc->sc_rx_ring_ndescs);
4738 rxq->splithdr_ena = 0;
4739 rxq->databuf_size = htole32(IAVF_MCLBYTES);
4740 rxq->max_pkt_size = htole32(rxmtu_max);
4741 rxq->dma_ring_addr = htole64(IXL_DMA_DVA(&rxr->rxr_mem));
4742 rxq->rx_split_pos = 0;
4743 }
4744
4745 memset(&iaq, 0, sizeof(iaq));
4746 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4747 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4748 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_VSI_QUEUES);
4749 iaq.iaq_datalen = htole16(sizeof(*config) +
4750 sizeof(config->qpair[0]) * sc->sc_nqueue_pairs);
4751
4752 error = iavf_adminq_exec(sc, &iaq, aqb);
4753 if (error != IAVF_VC_RC_SUCCESS) {
4754 return -1;
4755 }
4756
4757 return 0;
4758 }
4759
4760 static int
4761 iavf_config_hena(struct iavf_softc *sc)
4762 {
4763 struct ixl_aq_desc iaq;
4764 struct ixl_aq_buf *aqb;
4765 uint64_t *caps;
4766 int error;
4767
4768 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4769
4770 if (aqb == NULL)
4771 return -1;
4772
4773 caps = IXL_AQB_KVA(aqb);
4774 if (sc->sc_mac_type == I40E_MAC_X722_VF)
4775 *caps = IXL_RSS_HENA_DEFAULT_X722;
4776 else
4777 *caps = IXL_RSS_HENA_DEFAULT_XL710;
4778
4779 memset(&iaq, 0, sizeof(iaq));
4780 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4781 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4782 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_SET_RSS_HENA);
4783 iaq.iaq_datalen = htole16(sizeof(*caps));
4784
4785 error = iavf_adminq_exec(sc, &iaq, aqb);
4786 if (error != IAVF_VC_RC_SUCCESS) {
4787 return -1;
4788 }
4789
4790 return 0;
4791 }
4792
4793 static inline void
4794 iavf_get_default_rss_key(uint8_t *buf, size_t len)
4795 {
4796 uint8_t rss_seed[RSS_KEYSIZE];
4797 size_t cplen;
4798
4799 cplen = MIN(len, sizeof(rss_seed));
4800 rss_getkey(rss_seed);
4801
4802 memcpy(buf, rss_seed, cplen);
4803 if (cplen < len)
4804 memset(buf + cplen, 0, len - cplen);
4805 }
4806
4807 static int
4808 iavf_config_rss_key(struct iavf_softc *sc)
4809 {
4810 struct ixl_aq_desc iaq;
4811 struct ixl_aq_buf *aqb;
4812 struct iavf_vc_rss_key *rss_key;
4813 size_t key_len;
4814 int rv;
4815
4816 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4817 if (aqb == NULL)
4818 return -1;
4819
4820 rss_key = IXL_AQB_KVA(aqb);
4821 rss_key->vsi_id = htole16(sc->sc_vsi_id);
4822 key_len = IXL_RSS_KEY_SIZE;
4823 iavf_get_default_rss_key(rss_key->key, key_len);
4824 rss_key->key_len = key_len;
4825
4826 memset(&iaq, 0, sizeof(iaq));
4827 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4828 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4829 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_RSS_KEY);
4830 iaq.iaq_datalen = htole16(sizeof(*rss_key) - sizeof(rss_key->pad)
4831 + (sizeof(rss_key->key[0]) * key_len));
4832
4833 rv = iavf_adminq_exec(sc, &iaq, aqb);
4834 if (rv != IAVF_VC_RC_SUCCESS) {
4835 return -1;
4836 }
4837
4838 return 0;
4839 }
4840
4841 static int
4842 iavf_config_rss_lut(struct iavf_softc *sc)
4843 {
4844 struct ixl_aq_desc iaq;
4845 struct ixl_aq_buf *aqb;
4846 struct iavf_vc_rss_lut *rss_lut;
4847 uint8_t *lut, v;
4848 int rv, i;
4849
4850 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4851 if (aqb == NULL)
4852 return -1;
4853
4854 rss_lut = IXL_AQB_KVA(aqb);
4855 rss_lut->vsi_id = htole16(sc->sc_vsi_id);
4856 rss_lut->lut_entries = htole16(IXL_RSS_VSI_LUT_SIZE);
4857
4858 lut = rss_lut->lut;
4859 for (i = 0; i < IXL_RSS_VSI_LUT_SIZE; i++) {
4860 v = i % sc->sc_nqueue_pairs;
4861 v &= IAVF_RSS_VSI_LUT_ENTRY_MASK;
4862 lut[i] = v;
4863 }
4864
4865 memset(&iaq, 0, sizeof(iaq));
4866 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4867 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4868 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_RSS_LUT);
4869 iaq.iaq_datalen = htole16(sizeof(*rss_lut) - sizeof(rss_lut->pad)
4870 + (sizeof(rss_lut->lut[0]) * IXL_RSS_VSI_LUT_SIZE));
4871
4872 rv = iavf_adminq_exec(sc, &iaq, aqb);
4873 if (rv != IAVF_VC_RC_SUCCESS) {
4874 return -1;
4875 }
4876
4877 return 0;
4878 }
4879
4880 static int
4881 iavf_queue_select(struct iavf_softc *sc, int opcode)
4882 {
4883 struct ixl_aq_desc iaq;
4884 struct ixl_aq_buf *aqb;
4885 struct iavf_vc_queue_select *qsel;
4886 int error;
4887
4888 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4889 if (aqb == NULL)
4890 return -1;
4891
4892 qsel = IXL_AQB_KVA(aqb);
4893 qsel->vsi_id = htole16(sc->sc_vsi_id);
4894 qsel->rx_queues = htole32(iavf_allqueues(sc));
4895 qsel->tx_queues = htole32(iavf_allqueues(sc));
4896
4897 memset(&iaq, 0, sizeof(iaq));
4898 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4899 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4900 iavf_aq_vc_set_opcode(&iaq, opcode);
4901 iaq.iaq_datalen = htole16(sizeof(*qsel));
4902
4903 error = iavf_adminq_exec(sc, &iaq, aqb);
4904 if (error != IAVF_VC_RC_SUCCESS) {
4905 return -1;
4906 }
4907
4908 return 0;
4909 }
4910
4911 static int
4912 iavf_request_queues(struct iavf_softc *sc, unsigned int req_num)
4913 {
4914 struct ixl_aq_desc iaq;
4915 struct ixl_aq_buf *aqb;
4916 struct iavf_vc_res_request *req;
4917 int rv;
4918
4919 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4920 if (aqb == NULL)
4921 return ENOMEM;
4922
4923 req = IXL_AQB_KVA(aqb);
4924 req->num_queue_pairs = req_num;
4925
4926 memset(&iaq, 0, sizeof(iaq));
4927 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4928 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4929 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_REQUEST_QUEUES);
4930 iaq.iaq_datalen = htole16(sizeof(*req));
4931
4932 mutex_enter(&sc->sc_adminq_lock);
4933 rv = iavf_atq_post(sc, &iaq, aqb);
4934 mutex_exit(&sc->sc_adminq_lock);
4935
4936 return rv;
4937 }
4938
4939 static int
4940 iavf_reset_vf(struct iavf_softc *sc)
4941 {
4942 struct ixl_aq_desc iaq;
4943 int error;
4944
4945 memset(&iaq, 0, sizeof(iaq));
4946 iaq.iaq_flags = htole16(IXL_AQ_RD);
4947 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4948 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_RESET_VF);
4949 iaq.iaq_datalen = htole16(0);
4950
4951 iavf_wr(sc, I40E_VFGEN_RSTAT, IAVF_VFR_INPROGRESS);
4952
4953 mutex_enter(&sc->sc_adminq_lock);
4954 error = iavf_atq_post(sc, &iaq, NULL);
4955 mutex_exit(&sc->sc_adminq_lock);
4956
4957 return error;
4958 }
4959
4960 static int
4961 iavf_eth_addr(struct iavf_softc *sc, const uint8_t *addr, uint32_t opcode)
4962 {
4963 struct ixl_aq_desc iaq;
4964 struct ixl_aq_buf *aqb;
4965 struct iavf_vc_eth_addr_list *addrs;
4966 struct iavf_vc_eth_addr *vcaddr;
4967 int rv;
4968
4969 KASSERT(sc->sc_attached);
4970 KASSERT(opcode == IAVF_VC_OP_ADD_ETH_ADDR ||
4971 opcode == IAVF_VC_OP_DEL_ETH_ADDR);
4972
4973 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4974 if (aqb == NULL)
4975 return -1;
4976
4977 addrs = IXL_AQB_KVA(aqb);
4978 addrs->vsi_id = htole16(sc->sc_vsi_id);
4979 addrs->num_elements = htole16(1);
4980 vcaddr = addrs->list;
4981 memcpy(vcaddr->addr, addr, ETHER_ADDR_LEN);
4982
4983 memset(&iaq, 0, sizeof(iaq));
4984 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4985 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4986 iavf_aq_vc_set_opcode(&iaq, opcode);
4987 iaq.iaq_datalen = htole16(sizeof(*addrs) + sizeof(*vcaddr));
4988
4989 if (sc->sc_resetting) {
4990 mutex_enter(&sc->sc_adminq_lock);
4991 rv = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
4992 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
4993 mutex_exit(&sc->sc_adminq_lock);
4994 } else {
4995 rv = iavf_adminq_exec(sc, &iaq, aqb);
4996 }
4997
4998 if (rv != IAVF_VC_RC_SUCCESS) {
4999 return -1;
5000 }
5001
5002 return 0;
5003 }
5004
5005 static int
5006 iavf_config_promisc_mode(struct iavf_softc *sc, int unicast, int multicast)
5007 {
5008 struct ixl_aq_desc iaq;
5009 struct ixl_aq_buf *aqb;
5010 struct iavf_vc_promisc_info *promisc;
5011 int flags;
5012
5013 KASSERT(sc->sc_attached);
5014
5015 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
5016 if (aqb == NULL)
5017 return -1;
5018
5019 flags = 0;
5020 if (unicast)
5021 flags |= IAVF_FLAG_VF_UNICAST_PROMISC;
5022 if (multicast)
5023 flags |= IAVF_FLAG_VF_MULTICAST_PROMISC;
5024
5025 promisc = IXL_AQB_KVA(aqb);
5026 promisc->vsi_id = htole16(sc->sc_vsi_id);
5027 promisc->flags = htole16(flags);
5028
5029 memset(&iaq, 0, sizeof(iaq));
5030 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
5031 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
5032 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_PROMISC);
5033 iaq.iaq_datalen = htole16(sizeof(*promisc));
5034
5035 if (iavf_adminq_exec(sc, &iaq, aqb) != IAVF_VC_RC_SUCCESS) {
5036 return -1;
5037 }
5038
5039 return 0;
5040 }
5041
5042 static int
5043 iavf_config_vlan_stripping(struct iavf_softc *sc, int eccap)
5044 {
5045 struct ixl_aq_desc iaq;
5046 uint32_t opcode;
5047
5048 opcode = ISSET(eccap, ETHERCAP_VLAN_HWTAGGING) ?
5049 IAVF_VC_OP_ENABLE_VLAN_STRIP : IAVF_VC_OP_DISABLE_VLAN_STRIP;
5050
5051 memset(&iaq, 0, sizeof(iaq));
5052 iaq.iaq_flags = htole16(IXL_AQ_RD);
5053 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
5054 iavf_aq_vc_set_opcode(&iaq, opcode);
5055 iaq.iaq_datalen = htole16(0);
5056
5057 if (iavf_adminq_exec(sc, &iaq, NULL) != IAVF_VC_RC_SUCCESS) {
5058 return -1;
5059 }
5060
5061 return 0;
5062 }
5063
5064 static int
5065 iavf_config_vlan_id(struct iavf_softc *sc, uint16_t vid, uint32_t opcode)
5066 {
5067 struct ixl_aq_desc iaq;
5068 struct ixl_aq_buf *aqb;
5069 struct iavf_vc_vlan_filter *vfilter;
5070 int rv;
5071
5072 KASSERT(opcode == IAVF_VC_OP_ADD_VLAN || opcode == IAVF_VC_OP_DEL_VLAN);
5073
5074 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
5075
5076 if (aqb == NULL)
5077 return -1;
5078
5079 vfilter = IXL_AQB_KVA(aqb);
5080 vfilter->vsi_id = htole16(sc->sc_vsi_id);
5081 vfilter->num_vlan_id = htole16(1);
5082 vfilter->vlan_id[0] = vid;
5083
5084 memset(&iaq, 0, sizeof(iaq));
5085 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
5086 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
5087 iavf_aq_vc_set_opcode(&iaq, opcode);
5088 iaq.iaq_datalen = htole16(sizeof(*vfilter) + sizeof(vid));
5089
5090 if (sc->sc_resetting) {
5091 mutex_enter(&sc->sc_adminq_lock);
5092 rv = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
5093 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
5094 mutex_exit(&sc->sc_adminq_lock);
5095 } else {
5096 rv = iavf_adminq_exec(sc, &iaq, aqb);
5097 }
5098
5099 if (rv != IAVF_VC_RC_SUCCESS) {
5100 return -1;
5101 }
5102
5103 return 0;
5104 }
5105
5106 static void
5107 iavf_post_request_queues(void *xsc)
5108 {
5109 struct iavf_softc *sc;
5110 struct ifnet *ifp;
5111
5112 sc = xsc;
5113 ifp = &sc->sc_ec.ec_if;
5114
5115 if (!ISSET(sc->sc_vf_cap, IAVF_VC_OFFLOAD_REQ_QUEUES)) {
5116 log(LOG_DEBUG, "%s: the VF has no REQ_QUEUES capability\n",
5117 ifp->if_xname);
5118 return;
5119 }
5120
5121 log(LOG_INFO, "%s: try to change the number of queue pairs"
5122 " (vsi %u, %u allocated, request %u)\n",
5123 ifp->if_xname,
5124 sc->sc_nqps_vsi, sc->sc_nqps_alloc, sc->sc_nqps_req);
5125 iavf_request_queues(sc, sc->sc_nqps_req);
5126 }
5127
5128 static bool
5129 iavf_sysctlnode_is_rx(struct sysctlnode *node)
5130 {
5131
5132 if (strstr(node->sysctl_parent->sysctl_name, "rx") != NULL)
5133 return true;
5134
5135 return false;
5136 }
5137
5138 static int
5139 iavf_sysctl_itr_handler(SYSCTLFN_ARGS)
5140 {
5141 struct sysctlnode node = *rnode;
5142 struct iavf_softc *sc = (struct iavf_softc *)node.sysctl_data;
5143 uint32_t newitr, *itrptr;
5144 unsigned int i;
5145 int itr, error;
5146
5147 if (iavf_sysctlnode_is_rx(&node)) {
5148 itrptr = &sc->sc_rx_itr;
5149 itr = IAVF_ITR_RX;
5150 } else {
5151 itrptr = &sc->sc_tx_itr;
5152 itr = IAVF_ITR_TX;
5153 }
5154
5155 newitr = *itrptr;
5156 node.sysctl_data = &newitr;
5157 node.sysctl_size = sizeof(newitr);
5158
5159 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5160 if (error || newp == NULL)
5161 return error;
5162
5163 if (newitr > 0x07FF)
5164 return EINVAL;
5165
5166 *itrptr = newitr;
5167
5168 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
5169 iavf_wr(sc, I40E_VFINT_ITRN1(itr, i), *itrptr);
5170 }
5171 iavf_wr(sc, I40E_VFINT_ITR01(itr), *itrptr);
5172
5173 return 0;
5174 }
5175
5176 static void
5177 iavf_workq_work(struct work *wk, void *context)
5178 {
5179 struct iavf_work *work;
5180
5181 work = container_of(wk, struct iavf_work, ixw_cookie);
5182
5183 atomic_swap_uint(&work->ixw_added, 0);
5184 work->ixw_func(work->ixw_arg);
5185 }
5186
5187 static struct workqueue *
5188 iavf_workq_create(const char *name, pri_t prio, int ipl, int flags)
5189 {
5190 struct workqueue *wq;
5191 int error;
5192
5193 error = workqueue_create(&wq, name, iavf_workq_work, NULL,
5194 prio, ipl, flags);
5195
5196 if (error)
5197 return NULL;
5198
5199 return wq;
5200 }
5201
5202 static void
5203 iavf_workq_destroy(struct workqueue *wq)
5204 {
5205
5206 workqueue_destroy(wq);
5207 }
5208
5209 static int
5210 iavf_work_set(struct iavf_work *work, void (*func)(void *), void *arg)
5211 {
5212
5213 if (work->ixw_added != 0)
5214 return -1;
5215
5216 memset(work, 0, sizeof(*work));
5217 work->ixw_func = func;
5218 work->ixw_arg = arg;
5219
5220 return 0;
5221 }
5222
5223 static void
5224 iavf_work_add(struct workqueue *wq, struct iavf_work *work)
5225 {
5226 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0)
5227 return;
5228
5229 kpreempt_disable();
5230 workqueue_enqueue(wq, &work->ixw_cookie, NULL);
5231 kpreempt_enable();
5232 }
5233
5234 static void
5235 iavf_work_wait(struct workqueue *wq, struct iavf_work *work)
5236 {
5237
5238 workqueue_wait(wq, &work->ixw_cookie);
5239 }
5240
5241 static void
5242 iavf_evcnt_attach(struct evcnt *ec,
5243 const char *n0, const char *n1)
5244 {
5245
5246 evcnt_attach_dynamic(ec, EVCNT_TYPE_MISC,
5247 NULL, n0, n1);
5248 }
5249
5250 MODULE(MODULE_CLASS_DRIVER, if_iavf, "pci");
5251
5252 #ifdef _MODULE
5253 #include "ioconf.c"
5254 #endif
5255
5256 #ifdef _MODULE
5257 static void
5258 iavf_parse_modprop(prop_dictionary_t dict)
5259 {
5260 prop_object_t obj;
5261 int64_t val;
5262 uint32_t n;
5263
5264 if (dict == NULL)
5265 return;
5266
5267 obj = prop_dictionary_get(dict, "debug_level");
5268 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5269 val = prop_number_signed_value((prop_number_t)obj);
5270
5271 if (val > 0) {
5272 iavf_params.debug = val;
5273 printf("iavf: debug level=%d\n", iavf_params.debug);
5274 }
5275 }
5276
5277 obj = prop_dictionary_get(dict, "max_qps");
5278 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5279 val = prop_number_signed_value((prop_number_t)obj);
5280
5281 if (val < 1 || val > I40E_MAX_VF_QUEUES) {
5282 printf("iavf: invalid queue size(1 <= n <= %d)",
5283 I40E_MAX_VF_QUEUES);
5284 } else {
5285 iavf_params.max_qps = val;
5286 printf("iavf: request queue pair = %u\n",
5287 iavf_params.max_qps);
5288 }
5289 }
5290
5291 obj = prop_dictionary_get(dict, "tx_itr");
5292 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5293 val = prop_number_signed_value((prop_number_t)obj);
5294 if (val > 0x07FF) {
5295 printf("iavf: TX ITR too big (%" PRId64 " <= %d)",
5296 val, 0x7FF);
5297 } else {
5298 iavf_params.tx_itr = val;
5299 printf("iavf: TX ITR = 0x%" PRIx32,
5300 iavf_params.tx_itr);
5301 }
5302 }
5303
5304 obj = prop_dictionary_get(dict, "rx_itr");
5305 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5306 val = prop_number_signed_value((prop_number_t)obj);
5307 if (val > 0x07FF) {
5308 printf("iavf: RX ITR too big (%" PRId64 " <= %d)",
5309 val, 0x7FF);
5310 } else {
5311 iavf_params.rx_itr = val;
5312 printf("iavf: RX ITR = 0x%" PRIx32,
5313 iavf_params.rx_itr);
5314 }
5315 }
5316
5317 obj = prop_dictionary_get(dict, "tx_ndescs");
5318 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5319 val = prop_number_signed_value((prop_number_t)obj);
5320 n = 1U << (fls32(val) - 1);
5321 if (val != (int64_t) n) {
5322 printf("iavf: TX desc invalid size"
5323 "(%" PRId64 " != %" PRIu32 ")\n", val, n);
5324 } else if (val > (8192 - 32)) {
5325 printf("iavf: Tx desc too big (%" PRId64 " > %d)",
5326 val, (8192 - 32));
5327 } else {
5328 iavf_params.tx_ndescs = val;
5329 printf("iavf: TX descriptors = 0x%04x",
5330 iavf_params.tx_ndescs);
5331 }
5332 }
5333
5334 obj = prop_dictionary_get(dict, "rx_ndescs");
5335 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5336 val = prop_number_signed_value((prop_number_t)obj);
5337 n = 1U << (fls32(val) - 1);
5338 if (val != (int64_t) n) {
5339 printf("iavf: RX desc invalid size"
5340 "(%" PRId64 " != %" PRIu32 ")\n", val, n);
5341 } else if (val > (8192 - 32)) {
5342 printf("iavf: Rx desc too big (%" PRId64 " > %d)",
5343 val, (8192 - 32));
5344 } else {
5345 iavf_params.rx_ndescs = val;
5346 printf("iavf: RX descriptors = 0x%04x",
5347 iavf_params.rx_ndescs);
5348 }
5349 }
5350 }
5351 #endif
5352
5353 static int
5354 if_iavf_modcmd(modcmd_t cmd, void *opaque)
5355 {
5356 int error = 0;
5357
5358 #ifdef _MODULE
5359 switch (cmd) {
5360 case MODULE_CMD_INIT:
5361 iavf_parse_modprop((prop_dictionary_t)opaque);
5362 error = config_init_component(cfdriver_ioconf_if_iavf,
5363 cfattach_ioconf_if_iavf, cfdata_ioconf_if_iavf);
5364 break;
5365 case MODULE_CMD_FINI:
5366 error = config_fini_component(cfdriver_ioconf_if_iavf,
5367 cfattach_ioconf_if_iavf, cfdata_ioconf_if_iavf);
5368 break;
5369 default:
5370 error = ENOTTY;
5371 break;
5372 }
5373 #endif
5374
5375 return error;
5376 }
5377