if_iavf.c revision 1.9 1 /* $NetBSD: if_iavf.c,v 1.9 2020/12/10 03:58:35 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2016,2017 David Gwynne <dlg (at) openbsd.org>
36 * Copyright (c) 2019 Jonathan Matthew <jmatthew (at) openbsd.org>
37 *
38 * Permission to use, copy, modify, and distribute this software for any
39 * purpose with or without fee is hereby granted, provided that the above
40 * copyright notice and this permission notice appear in all copies.
41 *
42 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
43 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
44 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
45 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
46 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
47 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
48 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 */
50
51 /*
52 * Copyright (c) 2020 Internet Initiative Japan, Inc.
53 * All rights reserved.
54 *
55 * Redistribution and use in source and binary forms, with or without
56 * modification, are permitted provided that the following conditions
57 * are met:
58 * 1. Redistributions of source code must retain the above copyright
59 * notice, this list of conditions and the following disclaimer.
60 * 2. Redistributions in binary form must reproduce the above copyright
61 * notice, this list of conditions and the following disclaimer in the
62 * documentation and/or other materials provided with the distribution.
63 *
64 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
65 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
66 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
67 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
68 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
69 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
70 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
71 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
72 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
73 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
74 * POSSIBILITY OF SUCH DAMAGE.
75 */
76
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: if_iavf.c,v 1.9 2020/12/10 03:58:35 yamaguchi Exp $");
79
80 #include <sys/param.h>
81 #include <sys/types.h>
82
83 #include <sys/bitops.h>
84 #include <sys/bus.h>
85 #include <sys/cprng.h>
86 #include <sys/cpu.h>
87 #include <sys/device.h>
88 #include <sys/evcnt.h>
89 #include <sys/interrupt.h>
90 #include <sys/kmem.h>
91 #include <sys/module.h>
92 #include <sys/mutex.h>
93 #include <sys/pcq.h>
94 #include <sys/queue.h>
95 #include <sys/syslog.h>
96 #include <sys/workqueue.h>
97 #include <sys/xcall.h>
98
99 #include <net/bpf.h>
100 #include <net/if.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103 #include <net/if_ether.h>
104 #include <net/rss_config.h>
105
106 #include <netinet/tcp.h> /* for struct tcphdr */
107 #include <netinet/udp.h> /* for struct udphdr */
108
109 #include <dev/pci/pcivar.h>
110 #include <dev/pci/pcidevs.h>
111
112 #include <dev/pci/if_ixlreg.h>
113 #include <dev/pci/if_ixlvar.h>
114 #include <dev/pci/if_iavfvar.h>
115
116 #include <prop/proplib.h>
117
118 #define IAVF_PCIREG PCI_MAPREG_START
119 #define IAVF_AQ_NUM 256
120 #define IAVF_AQ_MASK (IAVF_AQ_NUM-1)
121 #define IAVF_AQ_ALIGN 64
122 #define IAVF_AQ_BUFLEN 4096
123 #define I40E_AQ_LARGE_BUF 512
124 #define IAVF_VF_MAJOR 1
125 #define IAVF_VF_MINOR 1
126
127 #define IAVF_VFR_INPROGRESS 0
128 #define IAVF_VFR_COMPLETED 1
129 #define IAVF_VFR_VFACTIVE 2
130
131 #define IAVF_REG_VFR 0xdeadbeef
132
133 #define IAVF_ITR_RX 0x0
134 #define IAVF_ITR_TX 0x1
135 #define IAVF_ITR_MISC 0x2
136 #define IAVF_NOITR 0x3
137
138 #define IAVF_MTU_ETHERLEN (ETHER_HDR_LEN \
139 + ETHER_CRC_LEN)
140 #define IAVF_MAX_MTU (9600 - IAVF_MTU_ETHERLEN)
141 #define IAVF_MIN_MTU (ETHER_MIN_LEN - ETHER_CRC_LEN)
142
143 #define IAVF_WORKQUEUE_PRI PRI_SOFTNET
144
145 #define IAVF_TX_PKT_DESCS 8
146 #define IAVF_TX_QUEUE_ALIGN 128
147 #define IAVF_RX_QUEUE_ALIGN 128
148 #define IAVF_TX_PKT_MAXSIZE (MCLBYTES * IAVF_TX_PKT_DESCS)
149 #define IAVF_MCLBYTES (MCLBYTES - ETHER_ALIGN)
150
151 #define IAVF_TICK_INTERVAL (5 * hz)
152 #define IAVF_WATCHDOG_TICKS 3
153 #define IAVF_WATCHDOG_STOP 0
154
155 #define IAVF_TXRX_PROCESS_UNLIMIT UINT_MAX
156 #define IAVF_TX_PROCESS_LIMIT 256
157 #define IAVF_RX_PROCESS_LIMIT 256
158 #define IAVF_TX_INTR_PROCESS_LIMIT 256
159 #define IAVF_RX_INTR_PROCESS_LIMIT 0U
160
161 #define IAVF_EXEC_TIMEOUT 3000
162
163 #define IAVF_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx | \
164 IFCAP_CSUM_TCPv4_Rx | \
165 IFCAP_CSUM_UDPv4_Rx | \
166 IFCAP_CSUM_TCPv6_Rx | \
167 IFCAP_CSUM_UDPv6_Rx)
168 #define IAVF_IFCAP_TXCSUM (IFCAP_CSUM_IPv4_Tx | \
169 IFCAP_CSUM_TCPv4_Tx | \
170 IFCAP_CSUM_UDPv4_Tx | \
171 IFCAP_CSUM_TCPv6_Tx | \
172 IFCAP_CSUM_UDPv6_Tx)
173 #define IAVF_CSUM_ALL_OFFLOAD (M_CSUM_IPv4 | \
174 M_CSUM_TCPv4 | M_CSUM_TCPv6 | \
175 M_CSUM_UDPv4 | M_CSUM_UDPv6)
176
177 struct iavf_softc; /* defined */
178
179 struct iavf_module_params {
180 int debug;
181 uint32_t rx_itr;
182 uint32_t tx_itr;
183 unsigned int rx_ndescs;
184 unsigned int tx_ndescs;
185 int max_qps;
186 };
187
188 struct iavf_product {
189 unsigned int vendor_id;
190 unsigned int product_id;
191 };
192
193 struct iavf_link_speed {
194 uint64_t baudrate;
195 uint64_t media;
196 };
197
198 struct iavf_aq_regs {
199 bus_size_t atq_tail;
200 bus_size_t atq_head;
201 bus_size_t atq_len;
202 bus_size_t atq_bal;
203 bus_size_t atq_bah;
204
205 bus_size_t arq_tail;
206 bus_size_t arq_head;
207 bus_size_t arq_len;
208 bus_size_t arq_bal;
209 bus_size_t arq_bah;
210
211 uint32_t atq_len_enable;
212 uint32_t atq_tail_mask;
213 uint32_t atq_head_mask;
214
215 uint32_t arq_len_enable;
216 uint32_t arq_tail_mask;
217 uint32_t arq_head_mask;
218 };
219
220 struct iavf_work {
221 struct work ixw_cookie;
222 void (*ixw_func)(void *);
223 void *ixw_arg;
224 unsigned int ixw_added;
225 };
226
227 struct iavf_tx_map {
228 struct mbuf *txm_m;
229 bus_dmamap_t txm_map;
230 unsigned int txm_eop;
231 };
232
233 struct iavf_tx_ring {
234 unsigned int txr_qid;
235 char txr_name[16];
236
237 struct iavf_softc *txr_sc;
238 kmutex_t txr_lock;
239 pcq_t *txr_intrq;
240 void *txr_si;
241 unsigned int txr_prod;
242 unsigned int txr_cons;
243
244 struct iavf_tx_map *txr_maps;
245 struct ixl_dmamem txr_mem;
246 bus_size_t txr_tail;
247
248 int txr_watchdog;
249
250 struct evcnt txr_defragged;
251 struct evcnt txr_defrag_failed;
252 struct evcnt txr_pcqdrop;
253 struct evcnt txr_transmitdef;
254 struct evcnt txr_defer;
255 struct evcnt txr_watchdogto;
256 struct evcnt txr_intr;
257 };
258
259 struct iavf_rx_map {
260 struct mbuf *rxm_m;
261 bus_dmamap_t rxm_map;
262 };
263
264 struct iavf_rx_ring {
265 unsigned int rxr_qid;
266 char rxr_name[16];
267
268 struct iavf_softc *rxr_sc;
269 kmutex_t rxr_lock;
270
271 unsigned int rxr_prod;
272 unsigned int rxr_cons;
273
274 struct iavf_rx_map *rxr_maps;
275 struct ixl_dmamem rxr_mem;
276 bus_size_t rxr_tail;
277
278 struct mbuf *rxr_m_head;
279 struct mbuf **rxr_m_tail;
280
281 struct evcnt rxr_mgethdr_failed;
282 struct evcnt rxr_mgetcl_failed;
283 struct evcnt rxr_mbuf_load_failed;
284 struct evcnt rxr_defer;
285 struct evcnt rxr_intr;
286 };
287
288 struct iavf_queue_pair {
289 struct iavf_tx_ring *qp_txr;
290 struct iavf_rx_ring *qp_rxr;
291 struct work qp_work;
292 void *qp_si;
293 bool qp_workqueue;
294 };
295
296 struct iavf_stat_counters {
297 struct evcnt isc_rx_bytes;
298 struct evcnt isc_rx_unicast;
299 struct evcnt isc_rx_multicast;
300 struct evcnt isc_rx_broadcast;
301 struct evcnt isc_rx_discards;
302 struct evcnt isc_rx_unknown_protocol;
303 struct evcnt isc_tx_bytes;
304 struct evcnt isc_tx_unicast;
305 struct evcnt isc_tx_multicast;
306 struct evcnt isc_tx_broadcast;
307 struct evcnt isc_tx_discards;
308 struct evcnt isc_tx_errors;
309 };
310
311 /*
312 * Locking notes:
313 * + A field in iavf_tx_ring is protected by txr_lock (a spin mutex), and
314 * A field in iavf_rx_ring is protected by rxr_lock (a spin mutex).
315 * - more than one lock must not be held at once.
316 * + fields named sc_atq_*, sc_arq_*, and sc_adminq_* are protected by
317 * sc_adminq_lock(a spin mutex).
318 * - The lock is held while accessing sc_aq_regs
319 * and is not held with txr_lock and rxr_lock together.
320 * + Other fields in iavf_softc is protected by sc_cfg_lock
321 * (an adaptive mutex).
322 * - The lock must be held before acquiring another lock.
323 *
324 * Locking order:
325 * - IFNET_LOCK => sc_cfg_lock => sc_adminq_lock
326 * - sc_cfg_lock => ETHER_LOCK => sc_adminq_lock
327 * - sc_cfg_lock => txr_lock
328 * - sc_cfg_lock => rxr_lock
329 */
330
331 struct iavf_softc {
332 device_t sc_dev;
333 enum i40e_mac_type sc_mac_type;
334 int sc_debuglevel;
335 bool sc_attached;
336 bool sc_dead;
337 kmutex_t sc_cfg_lock;
338 callout_t sc_tick;
339 struct ifmedia sc_media;
340 uint64_t sc_media_status;
341 uint64_t sc_media_active;
342 int sc_link_state;
343
344 const struct iavf_aq_regs *
345 sc_aq_regs;
346
347 struct ethercom sc_ec;
348 uint8_t sc_enaddr[ETHER_ADDR_LEN];
349 uint8_t sc_enaddr_fake[ETHER_ADDR_LEN];
350 uint8_t sc_enaddr_added[ETHER_ADDR_LEN];
351 uint8_t sc_enaddr_reset[ETHER_ADDR_LEN];
352 struct if_percpuq *sc_ipq;
353
354 struct pci_attach_args sc_pa;
355 bus_dma_tag_t sc_dmat;
356 bus_space_tag_t sc_memt;
357 bus_space_handle_t sc_memh;
358 bus_size_t sc_mems;
359 pci_intr_handle_t *sc_ihp;
360 void **sc_ihs;
361 unsigned int sc_nintrs;
362
363 uint32_t sc_major_ver;
364 uint32_t sc_minor_ver;
365 uint32_t sc_vf_id;
366 uint32_t sc_vf_cap;
367 uint16_t sc_vsi_id;
368 uint16_t sc_qset_handle;
369 uint16_t sc_max_mtu;
370 bool sc_got_vf_resources;
371 bool sc_got_irq_map;
372 unsigned int sc_max_vectors;
373
374 kmutex_t sc_adminq_lock;
375 kcondvar_t sc_adminq_cv;
376 struct ixl_dmamem sc_atq;
377 unsigned int sc_atq_prod;
378 unsigned int sc_atq_cons;
379 struct ixl_aq_bufs sc_atq_idle;
380 struct ixl_aq_bufs sc_atq_live;
381 struct ixl_dmamem sc_arq;
382 struct ixl_aq_bufs sc_arq_idle;
383 struct ixl_aq_bufs sc_arq_live;
384 unsigned int sc_arq_prod;
385 unsigned int sc_arq_cons;
386 struct iavf_work sc_arq_refill;
387 uint32_t sc_arq_opcode;
388 uint32_t sc_arq_retval;
389
390 uint32_t sc_tx_itr;
391 uint32_t sc_rx_itr;
392 unsigned int sc_tx_ring_ndescs;
393 unsigned int sc_rx_ring_ndescs;
394 unsigned int sc_nqueue_pairs;
395 unsigned int sc_nqps_alloc;
396 unsigned int sc_nqps_vsi;
397 unsigned int sc_nqps_req;
398 struct iavf_queue_pair *sc_qps;
399 bool sc_txrx_workqueue;
400 u_int sc_tx_intr_process_limit;
401 u_int sc_tx_process_limit;
402 u_int sc_rx_intr_process_limit;
403 u_int sc_rx_process_limit;
404
405 struct workqueue *sc_workq;
406 struct workqueue *sc_workq_txrx;
407 struct iavf_work sc_reset_task;
408 struct iavf_work sc_wdto_task;
409 struct iavf_work sc_req_queues_task;
410 bool sc_req_queues_retried;
411 bool sc_resetting;
412 bool sc_reset_up;
413
414 struct sysctllog *sc_sysctllog;
415 struct iavf_stat_counters
416 sc_stat_counters;
417 };
418
419 #define IAVF_LOG(_sc, _lvl, _fmt, _args...) \
420 do { \
421 if (!(_sc)->sc_attached) { \
422 switch (_lvl) { \
423 case LOG_ERR: \
424 case LOG_WARNING: \
425 aprint_error_dev((_sc)->sc_dev, _fmt, ##_args); \
426 break; \
427 case LOG_INFO: \
428 aprint_normal_dev((_sc)->sc_dev,_fmt, ##_args); \
429 break; \
430 case LOG_DEBUG: \
431 default: \
432 aprint_debug_dev((_sc)->sc_dev, _fmt, ##_args); \
433 } \
434 } else { \
435 struct ifnet *_ifp = &(_sc)->sc_ec.ec_if; \
436 log((_lvl), "%s: " _fmt, _ifp->if_xname, ##_args); \
437 } \
438 } while (0)
439
440 static int iavf_dmamem_alloc(bus_dma_tag_t, struct ixl_dmamem *,
441 bus_size_t, bus_size_t);
442 static void iavf_dmamem_free(bus_dma_tag_t, struct ixl_dmamem *);
443 static struct ixl_aq_buf *
444 iavf_aqb_get(struct iavf_softc *, struct ixl_aq_bufs *);
445 static struct ixl_aq_buf *
446 iavf_aqb_get_locked(struct ixl_aq_bufs *);
447 static void iavf_aqb_put_locked(struct ixl_aq_bufs *, struct ixl_aq_buf *);
448 static void iavf_aqb_clean(struct ixl_aq_bufs *, bus_dma_tag_t);
449
450 static const struct iavf_product *
451 iavf_lookup(const struct pci_attach_args *);
452 static enum i40e_mac_type
453 iavf_mactype(pci_product_id_t);
454 static void iavf_pci_csr_setup(pci_chipset_tag_t, pcitag_t);
455 static int iavf_wait_active(struct iavf_softc *);
456 static bool iavf_is_etheranyaddr(const uint8_t *);
457 static void iavf_prepare_fakeaddr(struct iavf_softc *);
458 static int iavf_replace_lla(struct ifnet *,
459 const uint8_t *, const uint8_t *);
460 static void iavf_evcnt_attach(struct evcnt *,
461 const char *, const char *);
462 static int iavf_setup_interrupts(struct iavf_softc *);
463 static void iavf_teardown_interrupts(struct iavf_softc *);
464 static int iavf_setup_sysctls(struct iavf_softc *);
465 static void iavf_teardown_sysctls(struct iavf_softc *);
466 static int iavf_setup_stats(struct iavf_softc *);
467 static void iavf_teardown_stats(struct iavf_softc *);
468 static struct workqueue *
469 iavf_workq_create(const char *, pri_t, int, int);
470 static void iavf_workq_destroy(struct workqueue *);
471 static int iavf_work_set(struct iavf_work *, void (*)(void *), void *);
472 static void iavf_work_add(struct workqueue *, struct iavf_work *);
473 static void iavf_work_wait(struct workqueue *, struct iavf_work *);
474 static unsigned int
475 iavf_calc_msix_count(struct iavf_softc *);
476 static unsigned int
477 iavf_calc_queue_pair_size(struct iavf_softc *);
478 static int iavf_queue_pairs_alloc(struct iavf_softc *);
479 static void iavf_queue_pairs_free(struct iavf_softc *);
480 static int iavf_arq_fill(struct iavf_softc *);
481 static void iavf_arq_refill(void *);
482 static int iavf_arq_poll(struct iavf_softc *, uint32_t, int);
483 static void iavf_atq_done(struct iavf_softc *);
484 static int iavf_init_admin_queue(struct iavf_softc *);
485 static void iavf_cleanup_admin_queue(struct iavf_softc *);
486 static int iavf_arq(struct iavf_softc *);
487 static int iavf_adminq_exec(struct iavf_softc *,
488 struct ixl_aq_desc *, struct ixl_aq_buf *);
489 static int iavf_adminq_poll(struct iavf_softc *,
490 struct ixl_aq_desc *, struct ixl_aq_buf *, int);
491 static int iavf_adminq_poll_locked(struct iavf_softc *,
492 struct ixl_aq_desc *, struct ixl_aq_buf *, int);
493 static int iavf_add_multi(struct iavf_softc *, uint8_t *, uint8_t *);
494 static int iavf_del_multi(struct iavf_softc *, uint8_t *, uint8_t *);
495 static void iavf_del_all_multi(struct iavf_softc *);
496
497 static int iavf_get_version(struct iavf_softc *, struct ixl_aq_buf *);
498 static int iavf_get_vf_resources(struct iavf_softc *, struct ixl_aq_buf *);
499 static int iavf_get_stats(struct iavf_softc *);
500 static int iavf_config_irq_map(struct iavf_softc *, struct ixl_aq_buf *);
501 static int iavf_config_vsi_queues(struct iavf_softc *);
502 static int iavf_config_hena(struct iavf_softc *);
503 static int iavf_config_rss_key(struct iavf_softc *);
504 static int iavf_config_rss_lut(struct iavf_softc *);
505 static int iavf_config_promisc_mode(struct iavf_softc *, int, int);
506 static int iavf_config_vlan_stripping(struct iavf_softc *, int);
507 static int iavf_config_vlan_id(struct iavf_softc *, uint16_t, uint32_t);
508 static int iavf_queue_select(struct iavf_softc *, int);
509 static int iavf_request_queues(struct iavf_softc *, unsigned int);
510 static int iavf_reset_vf(struct iavf_softc *);
511 static int iavf_eth_addr(struct iavf_softc *, const uint8_t *, uint32_t);
512 static void iavf_process_version(struct iavf_softc *,
513 struct ixl_aq_desc *, struct ixl_aq_buf *);
514 static void iavf_process_vf_resources(struct iavf_softc *,
515 struct ixl_aq_desc *, struct ixl_aq_buf *);
516 static void iavf_process_irq_map(struct iavf_softc *,
517 struct ixl_aq_desc *);
518 static void iavf_process_vc_event(struct iavf_softc *,
519 struct ixl_aq_desc *, struct ixl_aq_buf *);
520 static void iavf_process_stats(struct iavf_softc *,
521 struct ixl_aq_desc *, struct ixl_aq_buf *);
522 static void iavf_process_req_queues(struct iavf_softc *,
523 struct ixl_aq_desc *, struct ixl_aq_buf *);
524
525 static int iavf_intr(void *);
526 static int iavf_queue_intr(void *);
527 static void iavf_tick(void *);
528 static void iavf_tick_halt(void *);
529 static void iavf_reset_request(void *);
530 static void iavf_reset_start(void *);
531 static void iavf_reset(void *);
532 static void iavf_reset_finish(struct iavf_softc *);
533 static int iavf_init(struct ifnet *);
534 static int iavf_init_locked(struct iavf_softc *);
535 static void iavf_stop(struct ifnet *, int);
536 static void iavf_stop_locked(struct iavf_softc *);
537 static int iavf_ioctl(struct ifnet *, u_long, void *);
538 static void iavf_start(struct ifnet *);
539 static int iavf_transmit(struct ifnet *, struct mbuf*);
540 static int iavf_watchdog(struct iavf_tx_ring *);
541 static void iavf_watchdog_timeout(void *);
542 static int iavf_media_change(struct ifnet *);
543 static void iavf_media_status(struct ifnet *, struct ifmediareq *);
544 static int iavf_ifflags_cb(struct ethercom *);
545 static int iavf_vlan_cb(struct ethercom *, uint16_t, bool);
546 static void iavf_deferred_transmit(void *);
547 static void iavf_handle_queue(void *);
548 static void iavf_handle_queue_wk(struct work *, void *);
549 static int iavf_reinit(struct iavf_softc *);
550 static int iavf_rxfill(struct iavf_softc *, struct iavf_rx_ring *);
551 static void iavf_txr_clean(struct iavf_softc *, struct iavf_tx_ring *);
552 static void iavf_rxr_clean(struct iavf_softc *, struct iavf_rx_ring *);
553 static int iavf_txeof(struct iavf_softc *, struct iavf_tx_ring *,
554 u_int, struct evcnt *);
555 static int iavf_rxeof(struct iavf_softc *, struct iavf_rx_ring *,
556 u_int, struct evcnt *);
557 static int iavf_iff(struct iavf_softc *);
558 static int iavf_iff_locked(struct iavf_softc *);
559 static void iavf_post_request_queues(void *);
560 static int iavf_sysctl_itr_handler(SYSCTLFN_PROTO);
561
562 static int iavf_match(device_t, cfdata_t, void *);
563 static void iavf_attach(device_t, device_t, void*);
564 static int iavf_detach(device_t, int);
565 static int iavf_finalize_teardown(device_t);
566
567 CFATTACH_DECL3_NEW(iavf, sizeof(struct iavf_softc),
568 iavf_match, iavf_attach, iavf_detach, NULL, NULL, NULL,
569 DVF_DETACH_SHUTDOWN);
570
571 static const struct iavf_product iavf_products[] = {
572 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_VF },
573 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_VF_HV },
574 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_VF },
575 /* required last entry */
576 {0, 0}
577 };
578
579 static const struct iavf_link_speed iavf_link_speeds[] = {
580 { 0, 0 },
581 { IF_Mbps(100), IFM_100_TX },
582 { IF_Mbps(1000), IFM_1000_T },
583 { IF_Gbps(10), IFM_10G_T },
584 { IF_Gbps(40), IFM_40G_CR4 },
585 { IF_Gbps(20), IFM_20G_KR2 },
586 { IF_Gbps(25), IFM_25G_CR }
587 };
588
589 static const struct iavf_aq_regs iavf_aq_regs = {
590 .atq_tail = I40E_VF_ATQT1,
591 .atq_tail_mask = I40E_VF_ATQT1_ATQT_MASK,
592 .atq_head = I40E_VF_ATQH1,
593 .atq_head_mask = I40E_VF_ARQH1_ARQH_MASK,
594 .atq_len = I40E_VF_ATQLEN1,
595 .atq_bal = I40E_VF_ATQBAL1,
596 .atq_bah = I40E_VF_ATQBAH1,
597 .atq_len_enable = I40E_VF_ATQLEN1_ATQENABLE_MASK,
598
599 .arq_tail = I40E_VF_ARQT1,
600 .arq_tail_mask = I40E_VF_ARQT1_ARQT_MASK,
601 .arq_head = I40E_VF_ARQH1,
602 .arq_head_mask = I40E_VF_ARQH1_ARQH_MASK,
603 .arq_len = I40E_VF_ARQLEN1,
604 .arq_bal = I40E_VF_ARQBAL1,
605 .arq_bah = I40E_VF_ARQBAH1,
606 .arq_len_enable = I40E_VF_ARQLEN1_ARQENABLE_MASK,
607 };
608
609 static struct iavf_module_params iavf_params = {
610 .debug = 0,
611 .rx_itr = 0x07a, /* 4K intrs/sec */
612 .tx_itr = 0x07a, /* 4K intrs/sec */
613 .tx_ndescs = 512,
614 .rx_ndescs = 256,
615 .max_qps = INT_MAX,
616 };
617
618 #define delaymsec(_x) DELAY(1000 * (_x))
619 #define iavf_rd(_s, _r) \
620 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
621 #define iavf_wr(_s, _r, _v) \
622 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
623 #define iavf_barrier(_s, _r, _l, _o) \
624 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
625 #define iavf_flush(_s) (void)iavf_rd((_s), I40E_VFGEN_RSTAT)
626 #define iavf_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1))
627 #define iavf_allqueues(_sc) ((1 << ((_sc)->sc_nqueue_pairs)) - 1)
628
629 static inline void
630 iavf_intr_barrier(void)
631 {
632
633 /* make all interrupt handler finished */
634 xc_barrier(0);
635 }
636 static inline void
637 iavf_intr_enable(struct iavf_softc *sc)
638 {
639
640 iavf_wr(sc, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL0_INTENA_MASK |
641 I40E_VFINT_DYN_CTL0_CLEARPBA_MASK |
642 (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT));
643 iavf_wr(sc, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
644 iavf_flush(sc);
645 }
646
647 static inline void
648 iavf_intr_disable(struct iavf_softc *sc)
649 {
650
651 iavf_wr(sc, I40E_VFINT_DYN_CTL01,
652 (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT));
653 iavf_wr(sc, I40E_VFINT_ICR0_ENA1, 0);
654 iavf_flush(sc);
655 }
656
657 static inline void
658 iavf_queue_intr_enable(struct iavf_softc *sc, unsigned int qid)
659 {
660
661 iavf_wr(sc, I40E_VFINT_DYN_CTLN1(qid),
662 I40E_VFINT_DYN_CTLN1_INTENA_MASK |
663 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
664 (IAVF_NOITR << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT));
665 iavf_flush(sc);
666 }
667
668 static inline void
669 iavf_queue_intr_disable(struct iavf_softc *sc, unsigned int qid)
670 {
671
672 iavf_wr(sc, I40E_VFINT_DYN_CTLN1(qid),
673 (IAVF_NOITR << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT));
674 iavf_flush(sc);
675 }
676
677 static inline void
678 iavf_aq_vc_set_opcode(struct ixl_aq_desc *iaq, uint32_t opcode)
679 {
680 struct iavf_aq_vc *vc;
681
682 vc = (struct iavf_aq_vc *)&iaq->iaq_cookie;
683 vc->iaq_vc_opcode = htole32(opcode);
684 }
685
686 static inline uint32_t
687 iavf_aq_vc_get_opcode(const struct ixl_aq_desc *iaq)
688 {
689 const struct iavf_aq_vc *vc;
690
691 vc = (const struct iavf_aq_vc *)&iaq->iaq_cookie;
692 return le32toh(vc->iaq_vc_opcode);
693 }
694
695 static inline uint32_t
696 iavf_aq_vc_get_retval(const struct ixl_aq_desc *iaq)
697 {
698 const struct iavf_aq_vc *vc;
699
700 vc = (const struct iavf_aq_vc *)&iaq->iaq_cookie;
701 return le32toh(vc->iaq_vc_retval);
702 }
703
704 static int
705 iavf_match(device_t parent, cfdata_t match, void *aux)
706 {
707 const struct pci_attach_args *pa = aux;
708
709 return (iavf_lookup(pa) != NULL) ? 1 : 0;
710 }
711
712 static void
713 iavf_attach(device_t parent, device_t self, void *aux)
714 {
715 struct iavf_softc *sc;
716 struct pci_attach_args *pa = aux;
717 struct ifnet *ifp;
718 struct ixl_aq_buf *aqb;
719 pcireg_t memtype;
720 char xnamebuf[MAXCOMLEN];
721 int error, i;
722
723 sc = device_private(self);
724 sc->sc_dev = self;
725 ifp = &sc->sc_ec.ec_if;
726
727 sc->sc_pa = *pa;
728 sc->sc_dmat = (pci_dma64_available(pa)) ? pa->pa_dmat64 : pa->pa_dmat;
729 sc->sc_aq_regs = &iavf_aq_regs;
730 sc->sc_debuglevel = iavf_params.debug;
731 sc->sc_tx_ring_ndescs = iavf_params.tx_ndescs;
732 sc->sc_rx_ring_ndescs = iavf_params.rx_ndescs;
733 sc->sc_tx_itr = iavf_params.tx_itr;
734 sc->sc_rx_itr = iavf_params.rx_itr;
735 sc->sc_nqps_req = MIN(ncpu, iavf_params.max_qps);
736 iavf_prepare_fakeaddr(sc);
737
738 sc->sc_mac_type = iavf_mactype(PCI_PRODUCT(pa->pa_id));
739 iavf_pci_csr_setup(pa->pa_pc, pa->pa_tag);
740
741 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IAVF_PCIREG);
742 if (pci_mapreg_map(pa, IAVF_PCIREG, memtype, 0,
743 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) {
744 aprint_error(": unable to map registers\n");
745 return;
746 }
747
748 if (iavf_wait_active(sc) != 0) {
749 aprint_error(": VF reset timed out\n");
750 goto unmap;
751 }
752
753 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET);
754 mutex_init(&sc->sc_adminq_lock, MUTEX_DEFAULT, IPL_NET);
755 SIMPLEQ_INIT(&sc->sc_atq_idle);
756 SIMPLEQ_INIT(&sc->sc_atq_live);
757 SIMPLEQ_INIT(&sc->sc_arq_idle);
758 SIMPLEQ_INIT(&sc->sc_arq_live);
759 sc->sc_arq_cons = 0;
760 sc->sc_arq_prod = 0;
761 aqb = NULL;
762
763 if (iavf_dmamem_alloc(sc->sc_dmat, &sc->sc_atq,
764 sizeof(struct ixl_aq_desc) * IAVF_AQ_NUM, IAVF_AQ_ALIGN) != 0) {
765 aprint_error(": unable to allocate atq\n");
766 goto free_mutex;
767 }
768
769 if (iavf_dmamem_alloc(sc->sc_dmat, &sc->sc_arq,
770 sizeof(struct ixl_aq_desc) * IAVF_AQ_NUM, IAVF_AQ_ALIGN) != 0) {
771 aprint_error(": unable to allocate arq\n");
772 goto free_atq;
773 }
774
775 for (i = 0; i < IAVF_AQ_NUM; i++) {
776 aqb = iavf_aqb_get(sc, NULL);
777 if (aqb != NULL) {
778 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
779 }
780 }
781 aqb = NULL;
782
783 if (!iavf_arq_fill(sc)) {
784 aprint_error(": unable to fill arq descriptors\n");
785 goto free_arq;
786 }
787
788 if (iavf_init_admin_queue(sc) != 0) {
789 aprint_error(": unable to initialize admin queue\n");
790 goto shutdown;
791 }
792
793 aqb = iavf_aqb_get(sc, NULL);
794 if (aqb == NULL) {
795 aprint_error(": unable to allocate buffer for ATQ\n");
796 goto shutdown;
797 }
798
799 error = iavf_get_version(sc, aqb);
800 switch (error) {
801 case 0:
802 break;
803 case ETIMEDOUT:
804 aprint_error(": timeout waiting for VF version\n");
805 goto shutdown;
806 case ENOTSUP:
807 aprint_error(": unsupported VF version %d\n", sc->sc_major_ver);
808 goto shutdown;
809 default:
810 aprint_error(":unable to get VF interface version\n");
811 goto shutdown;
812 }
813
814 if (iavf_get_vf_resources(sc, aqb) != 0) {
815 aprint_error(": timeout waiting for VF resources\n");
816 goto shutdown;
817 }
818
819 aprint_normal(", VF version %d.%d%s",
820 sc->sc_major_ver, sc->sc_minor_ver,
821 (sc->sc_minor_ver > IAVF_VF_MINOR) ? "(minor mismatch)" : "");
822 aprint_normal(", VF %d, VSI %d", sc->sc_vf_id, sc->sc_vsi_id);
823 aprint_normal("\n");
824 aprint_naive("\n");
825
826 aprint_normal_dev(self, "Ethernet address %s\n",
827 ether_sprintf(sc->sc_enaddr));
828
829 if (iavf_queue_pairs_alloc(sc) != 0) {
830 goto shutdown;
831 }
832
833 if (iavf_setup_interrupts(sc) != 0) {
834 goto free_queue_pairs;
835 }
836
837 if (iavf_config_irq_map(sc, aqb) != 0) {
838 aprint_error(", timed out waiting for IRQ map response\n");
839 goto teardown_intrs;
840 }
841
842 if (iavf_setup_sysctls(sc) != 0) {
843 goto teardown_intrs;
844 }
845
846 if (iavf_setup_stats(sc) != 0) {
847 goto teardown_sysctls;
848 }
849
850 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
851 aqb = NULL;
852
853 snprintf(xnamebuf, sizeof(xnamebuf),
854 "%s_adminq_cv", device_xname(self));
855 cv_init(&sc->sc_adminq_cv, xnamebuf);
856
857 callout_init(&sc->sc_tick, CALLOUT_MPSAFE);
858 callout_setfunc(&sc->sc_tick, iavf_tick, sc);
859
860 iavf_work_set(&sc->sc_reset_task, iavf_reset_start, sc);
861 iavf_work_set(&sc->sc_arq_refill, iavf_arq_refill, sc);
862 iavf_work_set(&sc->sc_wdto_task, iavf_watchdog_timeout, sc);
863 iavf_work_set(&sc->sc_req_queues_task, iavf_post_request_queues, sc);
864 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self));
865 sc->sc_workq = iavf_workq_create(xnamebuf, IAVF_WORKQUEUE_PRI,
866 IPL_NET, WQ_MPSAFE);
867 if (sc->sc_workq == NULL)
868 goto destroy_cv;
869
870 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self));
871 error = workqueue_create(&sc->sc_workq_txrx, xnamebuf,
872 iavf_handle_queue_wk, sc, IAVF_WORKQUEUE_PRI, IPL_NET,
873 WQ_PERCPU|WQ_MPSAFE);
874 if (error != 0) {
875 sc->sc_workq_txrx = NULL;
876 goto teardown_wqs;
877 }
878
879 error = if_initialize(ifp);
880 if (error != 0) {
881 aprint_error_dev(self, "if_initialize failed=%d\n", error);
882 goto teardown_wqs;
883 }
884
885 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
886
887 ifp->if_softc = sc;
888 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
889 ifp->if_extflags = IFEF_MPSAFE;
890 ifp->if_ioctl = iavf_ioctl;
891 ifp->if_start = iavf_start;
892 ifp->if_transmit = iavf_transmit;
893 ifp->if_watchdog = NULL;
894 ifp->if_init = iavf_init;
895 ifp->if_stop = iavf_stop;
896
897 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs);
898 IFQ_SET_READY(&ifp->if_snd);
899 sc->sc_ipq = if_percpuq_create(ifp);
900
901 ifp->if_capabilities |= IAVF_IFCAP_RXCSUM;
902 ifp->if_capabilities |= IAVF_IFCAP_TXCSUM;
903
904 ether_set_vlan_cb(&sc->sc_ec, iavf_vlan_cb);
905 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
906 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
907 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities;
908
909 ether_set_ifflags_cb(&sc->sc_ec, iavf_ifflags_cb);
910
911 sc->sc_ec.ec_ifmedia = &sc->sc_media;
912 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, iavf_media_change,
913 iavf_media_status, &sc->sc_cfg_lock);
914
915 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
916 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
917
918 if_deferred_start_init(ifp, NULL);
919 ether_ifattach(ifp, sc->sc_enaddr);
920
921 sc->sc_txrx_workqueue = true;
922 sc->sc_tx_process_limit = IAVF_TX_PROCESS_LIMIT;
923 sc->sc_rx_process_limit = IAVF_RX_PROCESS_LIMIT;
924 sc->sc_tx_intr_process_limit = IAVF_TX_INTR_PROCESS_LIMIT;
925 sc->sc_rx_intr_process_limit = IAVF_RX_INTR_PROCESS_LIMIT;
926
927 if_register(ifp);
928 if_link_state_change(ifp, sc->sc_link_state);
929 iavf_intr_enable(sc);
930 if (sc->sc_nqps_vsi < sc->sc_nqps_req)
931 iavf_work_add(sc->sc_workq, &sc->sc_req_queues_task);
932 sc->sc_attached = true;
933 return;
934
935 teardown_wqs:
936 config_finalize_register(self, iavf_finalize_teardown);
937 destroy_cv:
938 cv_destroy(&sc->sc_adminq_cv);
939 callout_destroy(&sc->sc_tick);
940 iavf_teardown_stats(sc);
941 teardown_sysctls:
942 iavf_teardown_sysctls(sc);
943 teardown_intrs:
944 iavf_teardown_interrupts(sc);
945 free_queue_pairs:
946 iavf_queue_pairs_free(sc);
947 shutdown:
948 if (aqb != NULL)
949 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
950 iavf_cleanup_admin_queue(sc);
951 iavf_aqb_clean(&sc->sc_atq_idle, sc->sc_dmat);
952 iavf_aqb_clean(&sc->sc_arq_idle, sc->sc_dmat);
953 free_arq:
954 iavf_dmamem_free(sc->sc_dmat, &sc->sc_arq);
955 free_atq:
956 iavf_dmamem_free(sc->sc_dmat, &sc->sc_atq);
957 free_mutex:
958 mutex_destroy(&sc->sc_cfg_lock);
959 mutex_destroy(&sc->sc_adminq_lock);
960 unmap:
961 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
962 sc->sc_mems = 0;
963 sc->sc_attached = false;
964 }
965
966 static int
967 iavf_detach(device_t self, int flags)
968 {
969 struct iavf_softc *sc = device_private(self);
970 struct ifnet *ifp = &sc->sc_ec.ec_if;
971
972 if (!sc->sc_attached)
973 return 0;
974
975 iavf_stop(ifp, 1);
976
977 /*
978 * set a dummy function to halt callout safely
979 * even if a workqueue entry calls callout_schedule()
980 */
981 callout_setfunc(&sc->sc_tick, iavf_tick_halt, sc);
982 iavf_work_wait(sc->sc_workq, &sc->sc_reset_task);
983 iavf_work_wait(sc->sc_workq, &sc->sc_wdto_task);
984
985 callout_halt(&sc->sc_tick, NULL);
986 callout_destroy(&sc->sc_tick);
987
988 /* detach the I/F before stop adminq due to callbacks */
989 ether_ifdetach(ifp);
990 if_detach(ifp);
991 ifmedia_fini(&sc->sc_media);
992 if_percpuq_destroy(sc->sc_ipq);
993
994 iavf_intr_disable(sc);
995 iavf_intr_barrier();
996 iavf_work_wait(sc->sc_workq, &sc->sc_arq_refill);
997
998 mutex_enter(&sc->sc_adminq_lock);
999 iavf_cleanup_admin_queue(sc);
1000 mutex_exit(&sc->sc_adminq_lock);
1001 iavf_aqb_clean(&sc->sc_atq_idle, sc->sc_dmat);
1002 iavf_aqb_clean(&sc->sc_arq_idle, sc->sc_dmat);
1003 iavf_dmamem_free(sc->sc_dmat, &sc->sc_arq);
1004 iavf_dmamem_free(sc->sc_dmat, &sc->sc_atq);
1005 cv_destroy(&sc->sc_adminq_cv);
1006
1007 iavf_workq_destroy(sc->sc_workq);
1008 sc->sc_workq = NULL;
1009
1010 iavf_queue_pairs_free(sc);
1011 iavf_teardown_interrupts(sc);
1012 iavf_teardown_sysctls(sc);
1013 iavf_teardown_stats(sc);
1014 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1015
1016 mutex_destroy(&sc->sc_adminq_lock);
1017 mutex_destroy(&sc->sc_cfg_lock);
1018
1019 return 0;
1020 }
1021
1022 static int
1023 iavf_finalize_teardown(device_t self)
1024 {
1025 struct iavf_softc *sc = device_private(self);
1026
1027 if (sc->sc_workq != NULL) {
1028 iavf_workq_destroy(sc->sc_workq);
1029 sc->sc_workq = NULL;
1030 }
1031
1032 if (sc->sc_workq_txrx != NULL) {
1033 workqueue_destroy(sc->sc_workq_txrx);
1034 sc->sc_workq_txrx = NULL;
1035 }
1036
1037 return 0;
1038 }
1039
1040 static int
1041 iavf_init(struct ifnet *ifp)
1042 {
1043 struct iavf_softc *sc;
1044 int rv;
1045
1046 sc = ifp->if_softc;
1047 mutex_enter(&sc->sc_cfg_lock);
1048 rv = iavf_init_locked(sc);
1049 mutex_exit(&sc->sc_cfg_lock);
1050
1051 return rv;
1052 }
1053
1054 static int
1055 iavf_init_locked(struct iavf_softc *sc)
1056 {
1057 struct ifnet *ifp = &sc->sc_ec.ec_if;
1058 unsigned int i;
1059 int error;
1060
1061 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1062
1063 if (ISSET(ifp->if_flags, IFF_RUNNING))
1064 iavf_stop_locked(sc);
1065
1066 if (sc->sc_resetting)
1067 return ENXIO;
1068
1069 error = iavf_reinit(sc);
1070 if (error) {
1071 iavf_stop_locked(sc);
1072 return error;
1073 }
1074
1075 SET(ifp->if_flags, IFF_RUNNING);
1076 CLR(ifp->if_flags, IFF_OACTIVE);
1077
1078 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1079 iavf_wr(sc, I40E_VFINT_ITRN1(IAVF_ITR_RX, i), sc->sc_rx_itr);
1080 iavf_wr(sc, I40E_VFINT_ITRN1(IAVF_ITR_TX, i), sc->sc_tx_itr);
1081 }
1082 iavf_wr(sc, I40E_VFINT_ITR01(IAVF_ITR_RX), sc->sc_rx_itr);
1083 iavf_wr(sc, I40E_VFINT_ITR01(IAVF_ITR_TX), sc->sc_tx_itr);
1084 iavf_wr(sc, I40E_VFINT_ITR01(IAVF_ITR_MISC), 0);
1085
1086 error = iavf_iff_locked(sc);
1087 if (error) {
1088 iavf_stop_locked(sc);
1089 return error;
1090 };
1091
1092 /* ETHERCAP_VLAN_HWFILTER can not be disabled */
1093 SET(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1094
1095 callout_schedule(&sc->sc_tick, IAVF_TICK_INTERVAL);
1096 return 0;
1097 }
1098
1099 static int
1100 iavf_reinit(struct iavf_softc *sc)
1101 {
1102 struct iavf_rx_ring *rxr;
1103 struct iavf_tx_ring *txr;
1104 unsigned int i;
1105 uint32_t reg;
1106
1107 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1108
1109 sc->sc_reset_up = true;
1110 sc->sc_nqueue_pairs = MIN(sc->sc_nqps_alloc, sc->sc_nintrs - 1);
1111
1112 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1113 rxr = sc->sc_qps[i].qp_rxr;
1114 txr = sc->sc_qps[i].qp_txr;
1115
1116 iavf_rxfill(sc, rxr);
1117 txr->txr_watchdog = IAVF_WATCHDOG_STOP;
1118 }
1119
1120 if (iavf_config_vsi_queues(sc) != 0)
1121 return EIO;
1122
1123 if (iavf_config_hena(sc) != 0)
1124 return EIO;
1125
1126 iavf_config_rss_key(sc);
1127 iavf_config_rss_lut(sc);
1128
1129 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1130 iavf_queue_intr_enable(sc, i);
1131 }
1132 /* unmask */
1133 reg = iavf_rd(sc, I40E_VFINT_DYN_CTL01);
1134 reg |= (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT);
1135 iavf_wr(sc, I40E_VFINT_DYN_CTL01, reg);
1136
1137 if (iavf_queue_select(sc, IAVF_VC_OP_ENABLE_QUEUES) != 0)
1138 return EIO;
1139
1140 return 0;
1141 }
1142
1143 static void
1144 iavf_stop(struct ifnet *ifp, int disable)
1145 {
1146 struct iavf_softc *sc;
1147
1148 sc = ifp->if_softc;
1149 mutex_enter(&sc->sc_cfg_lock);
1150 iavf_stop_locked(sc);
1151 mutex_exit(&sc->sc_cfg_lock);
1152 }
1153
1154 static void
1155 iavf_stop_locked(struct iavf_softc *sc)
1156 {
1157 struct ifnet *ifp = &sc->sc_ec.ec_if;
1158 struct iavf_rx_ring *rxr;
1159 struct iavf_tx_ring *txr;
1160 uint32_t reg;
1161 unsigned int i;
1162
1163 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1164
1165 CLR(ifp->if_flags, IFF_RUNNING);
1166 sc->sc_reset_up = false;
1167 callout_stop(&sc->sc_tick);
1168
1169 if (!sc->sc_resetting) {
1170 /* disable queues*/
1171 if (iavf_queue_select(sc, IAVF_VC_OP_DISABLE_QUEUES) != 0) {
1172 goto die;
1173 }
1174 }
1175
1176 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1177 iavf_queue_intr_disable(sc, i);
1178 }
1179
1180 /* mask interrupts */
1181 reg = iavf_rd(sc, I40E_VFINT_DYN_CTL01);
1182 reg |= I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK |
1183 (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT);
1184 iavf_wr(sc, I40E_VFINT_DYN_CTL01, reg);
1185
1186 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1187 rxr = sc->sc_qps[i].qp_rxr;
1188 txr = sc->sc_qps[i].qp_txr;
1189
1190 mutex_enter(&rxr->rxr_lock);
1191 iavf_rxr_clean(sc, rxr);
1192 mutex_exit(&rxr->rxr_lock);
1193
1194 mutex_enter(&txr->txr_lock);
1195 iavf_txr_clean(sc, txr);
1196 mutex_exit(&txr->txr_lock);
1197
1198 workqueue_wait(sc->sc_workq_txrx,
1199 &sc->sc_qps[i].qp_work);
1200 }
1201
1202 return;
1203 die:
1204 if (!sc->sc_dead) {
1205 sc->sc_dead = true;
1206 log(LOG_INFO, "%s: Request VF reset\n", ifp->if_xname);
1207
1208 iavf_work_set(&sc->sc_reset_task, iavf_reset_request, sc);
1209 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
1210 }
1211 log(LOG_CRIT, "%s: failed to shut down rings\n", ifp->if_xname);
1212 }
1213
1214 static int
1215 iavf_watchdog(struct iavf_tx_ring *txr)
1216 {
1217 struct iavf_softc *sc;
1218
1219 sc = txr->txr_sc;
1220
1221 mutex_enter(&txr->txr_lock);
1222
1223 if (txr->txr_watchdog == IAVF_WATCHDOG_STOP
1224 || --txr->txr_watchdog > 0) {
1225 mutex_exit(&txr->txr_lock);
1226 return 0;
1227 }
1228
1229 txr->txr_watchdog = IAVF_WATCHDOG_STOP;
1230 txr->txr_watchdogto.ev_count++;
1231 mutex_exit(&txr->txr_lock);
1232
1233 device_printf(sc->sc_dev, "watchdog timeout on queue %d\n",
1234 txr->txr_qid);
1235 return 1;
1236 }
1237
1238 static void
1239 iavf_watchdog_timeout(void *xsc)
1240 {
1241 struct iavf_softc *sc;
1242 struct ifnet *ifp;
1243
1244 sc = xsc;
1245 ifp = &sc->sc_ec.ec_if;
1246
1247 mutex_enter(&sc->sc_cfg_lock);
1248 if (ISSET(ifp->if_flags, IFF_RUNNING))
1249 iavf_init_locked(sc);
1250 mutex_exit(&sc->sc_cfg_lock);
1251 }
1252
1253 static int
1254 iavf_media_change(struct ifnet *ifp)
1255 {
1256 struct iavf_softc *sc;
1257 struct ifmedia *ifm;
1258
1259 sc = ifp->if_softc;
1260 ifm = &sc->sc_media;
1261
1262 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1263 return EINVAL;
1264
1265 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1266 case IFM_AUTO:
1267 break;
1268 default:
1269 return EINVAL;
1270 }
1271
1272 return 0;
1273 }
1274
1275 static void
1276 iavf_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1277 {
1278 struct iavf_softc *sc = ifp->if_softc;
1279
1280 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1281
1282 ifmr->ifm_status = sc->sc_media_status;
1283 ifmr->ifm_active = sc->sc_media_active;
1284 }
1285
1286 static int
1287 iavf_ifflags_cb(struct ethercom *ec)
1288 {
1289 struct ifnet *ifp = &ec->ec_if;
1290 struct iavf_softc *sc = ifp->if_softc;
1291
1292 /* vlan hwfilter can not be disabled */
1293 SET(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER);
1294
1295 return iavf_iff(sc);
1296 }
1297
1298 static int
1299 iavf_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
1300 {
1301 struct ifnet *ifp = &ec->ec_if;
1302 struct iavf_softc *sc = ifp->if_softc;
1303 int rv;
1304
1305 mutex_enter(&sc->sc_cfg_lock);
1306
1307 if (sc->sc_resetting) {
1308 mutex_exit(&sc->sc_cfg_lock);
1309
1310 /* all vlan id was already removed */
1311 if (!set)
1312 return 0;
1313
1314 return ENXIO;
1315 }
1316
1317 /* ETHERCAP_VLAN_HWFILTER can not be disabled */
1318 SET(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1319
1320 if (set) {
1321 rv = iavf_config_vlan_id(sc, vid, IAVF_VC_OP_ADD_VLAN);
1322 if (!ISSET(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWTAGGING)) {
1323 iavf_config_vlan_stripping(sc,
1324 sc->sc_ec.ec_capenable);
1325 }
1326 } else {
1327 rv = iavf_config_vlan_id(sc, vid, IAVF_VC_OP_DEL_VLAN);
1328 }
1329
1330 mutex_exit(&sc->sc_cfg_lock);
1331
1332 if (rv != 0)
1333 return EIO;
1334
1335 return 0;
1336 }
1337
1338 static int
1339 iavf_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1340 {
1341 struct ifreq *ifr = (struct ifreq *)data;
1342 struct iavf_softc *sc = (struct iavf_softc *)ifp->if_softc;
1343 const struct sockaddr *sa;
1344 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1345 int s, error = 0;
1346 unsigned int nmtu;
1347
1348 switch (cmd) {
1349 case SIOCSIFMTU:
1350 nmtu = ifr->ifr_mtu;
1351
1352 if (nmtu < IAVF_MIN_MTU || nmtu > IAVF_MAX_MTU) {
1353 error = EINVAL;
1354 break;
1355 }
1356 if (ifp->if_mtu != nmtu) {
1357 s = splnet();
1358 error = ether_ioctl(ifp, cmd, data);
1359 splx(s);
1360 if (error == ENETRESET)
1361 error = iavf_init(ifp);
1362 }
1363 break;
1364 case SIOCADDMULTI:
1365 sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1366 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) {
1367 error = ether_multiaddr(sa, addrlo, addrhi);
1368 if (error != 0)
1369 return error;
1370
1371 error = iavf_add_multi(sc, addrlo, addrhi);
1372 if (error != 0 && error != ENETRESET) {
1373 ether_delmulti(sa, &sc->sc_ec);
1374 error = EIO;
1375 }
1376 }
1377 break;
1378
1379 case SIOCDELMULTI:
1380 sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1381 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) {
1382 error = ether_multiaddr(sa, addrlo, addrhi);
1383 if (error != 0)
1384 return error;
1385
1386 error = iavf_del_multi(sc, addrlo, addrhi);
1387 }
1388 break;
1389
1390 default:
1391 s = splnet();
1392 error = ether_ioctl(ifp, cmd, data);
1393 splx(s);
1394 }
1395
1396 if (error == ENETRESET)
1397 error = iavf_iff(sc);
1398
1399 return error;
1400 }
1401
1402 static int
1403 iavf_iff(struct iavf_softc *sc)
1404 {
1405 int error;
1406
1407 mutex_enter(&sc->sc_cfg_lock);
1408 error = iavf_iff_locked(sc);
1409 mutex_exit(&sc->sc_cfg_lock);
1410
1411 return error;
1412 }
1413
1414 static int
1415 iavf_iff_locked(struct iavf_softc *sc)
1416 {
1417 struct ifnet *ifp = &sc->sc_ec.ec_if;
1418 int unicast, multicast;
1419 const uint8_t *enaddr;
1420
1421 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1422
1423 if (!ISSET(ifp->if_flags, IFF_RUNNING))
1424 return 0;
1425
1426 unicast = 0;
1427 multicast = 0;
1428 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
1429 unicast = 1;
1430 multicast = 1;
1431 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1432 multicast = 1;
1433 }
1434
1435 iavf_config_promisc_mode(sc, unicast, multicast);
1436
1437 iavf_config_vlan_stripping(sc, sc->sc_ec.ec_capenable);
1438
1439 enaddr = CLLADDR(ifp->if_sadl);
1440 if (memcmp(enaddr, sc->sc_enaddr_added, ETHER_ADDR_LEN) != 0) {
1441 if (!iavf_is_etheranyaddr(sc->sc_enaddr_added)) {
1442 iavf_eth_addr(sc, sc->sc_enaddr_added,
1443 IAVF_VC_OP_DEL_ETH_ADDR);
1444 }
1445 memcpy(sc->sc_enaddr_added, enaddr, ETHER_ADDR_LEN);
1446 iavf_eth_addr(sc, enaddr, IAVF_VC_OP_ADD_ETH_ADDR);
1447 }
1448
1449 return 0;
1450 }
1451
1452 static const struct iavf_product *
1453 iavf_lookup(const struct pci_attach_args *pa)
1454 {
1455 const struct iavf_product *iavfp;
1456
1457 for (iavfp = iavf_products; iavfp->vendor_id != 0; iavfp++) {
1458 if (PCI_VENDOR(pa->pa_id) == iavfp->vendor_id &&
1459 PCI_PRODUCT(pa->pa_id) == iavfp->product_id)
1460 return iavfp;
1461 }
1462
1463 return NULL;
1464 }
1465
1466 static enum i40e_mac_type
1467 iavf_mactype(pci_product_id_t id)
1468 {
1469
1470 switch (id) {
1471 case PCI_PRODUCT_INTEL_XL710_VF:
1472 case PCI_PRODUCT_INTEL_XL710_VF_HV:
1473 return I40E_MAC_VF;
1474 case PCI_PRODUCT_INTEL_X722_VF:
1475 return I40E_MAC_X722_VF;
1476 }
1477
1478 return I40E_MAC_GENERIC;
1479 }
1480
1481 static const struct iavf_link_speed *
1482 iavf_find_link_speed(struct iavf_softc *sc, uint32_t link_speed)
1483 {
1484 size_t i;
1485
1486 for (i = 0; i < __arraycount(iavf_link_speeds); i++) {
1487 if (link_speed & (1 << i))
1488 return (&iavf_link_speeds[i]);
1489 }
1490
1491 return NULL;
1492 }
1493
1494 static void
1495 iavf_pci_csr_setup(pci_chipset_tag_t pc, pcitag_t tag)
1496 {
1497 pcireg_t csr;
1498
1499 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1500 csr |= (PCI_COMMAND_MASTER_ENABLE |
1501 PCI_COMMAND_MEM_ENABLE);
1502 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
1503 }
1504
1505 static int
1506 iavf_wait_active(struct iavf_softc *sc)
1507 {
1508 int tries;
1509 uint32_t reg;
1510
1511 for (tries = 0; tries < 100; tries++) {
1512 reg = iavf_rd(sc, I40E_VFGEN_RSTAT) &
1513 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1514 if (reg == IAVF_VFR_VFACTIVE ||
1515 reg == IAVF_VFR_COMPLETED)
1516 return 0;
1517
1518 delaymsec(10);
1519 }
1520
1521 return -1;
1522 }
1523
1524 static bool
1525 iavf_is_etheranyaddr(const uint8_t *enaddr)
1526 {
1527 static const uint8_t etheranyaddr[ETHER_ADDR_LEN] = {
1528 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1529 };
1530
1531 if (memcmp(enaddr, etheranyaddr, ETHER_ADDR_LEN) != 0)
1532 return false;
1533
1534 return true;
1535 }
1536
1537 static void
1538 iavf_prepare_fakeaddr(struct iavf_softc *sc)
1539 {
1540 uint64_t rndval;
1541
1542 if (!iavf_is_etheranyaddr(sc->sc_enaddr_fake))
1543 return;
1544
1545 rndval = cprng_strong64();
1546
1547 memcpy(sc->sc_enaddr_fake, &rndval, sizeof(sc->sc_enaddr_fake));
1548 sc->sc_enaddr_fake[0] &= 0xFE;
1549 sc->sc_enaddr_fake[0] |= 0x02;
1550 }
1551
1552 static int
1553 iavf_replace_lla(struct ifnet *ifp, const uint8_t *prev, const uint8_t *next)
1554 {
1555 union {
1556 struct sockaddr sa;
1557 struct sockaddr_dl sdl;
1558 struct sockaddr_storage ss;
1559 } u;
1560 struct psref psref_prev, psref_next;
1561 struct ifaddr *ifa_prev, *ifa_next;
1562 const struct sockaddr_dl *nsdl;
1563 int s, error;
1564
1565 KASSERT(IFNET_LOCKED(ifp));
1566
1567 error = 0;
1568 ifa_prev = ifa_next = NULL;
1569
1570 if (memcmp(prev, next, ETHER_ADDR_LEN) == 0) {
1571 goto done;
1572 }
1573
1574 if (sockaddr_dl_init(&u.sdl, sizeof(u.ss), ifp->if_index,
1575 ifp->if_type, ifp->if_xname, strlen(ifp->if_xname),
1576 prev, ETHER_ADDR_LEN) == NULL) {
1577 error = EINVAL;
1578 goto done;
1579 }
1580
1581 s = pserialize_read_enter();
1582 IFADDR_READER_FOREACH(ifa_prev, ifp) {
1583 if (sockaddr_cmp(&u.sa, ifa_prev->ifa_addr) == 0) {
1584 ifa_acquire(ifa_prev, &psref_prev);
1585 break;
1586 }
1587 }
1588 pserialize_read_exit(s);
1589
1590 if (sockaddr_dl_init(&u.sdl, sizeof(u.ss), ifp->if_index,
1591 ifp->if_type, ifp->if_xname, strlen(ifp->if_xname),
1592 next, ETHER_ADDR_LEN) == NULL) {
1593 error = EINVAL;
1594 goto done;
1595 }
1596
1597 s = pserialize_read_enter();
1598 IFADDR_READER_FOREACH(ifa_next, ifp) {
1599 if (sockaddr_cmp(&u.sa, ifa_next->ifa_addr) == 0) {
1600 ifa_acquire(ifa_next, &psref_next);
1601 break;
1602 }
1603 }
1604 pserialize_read_exit(s);
1605
1606 if (ifa_next == NULL) {
1607 nsdl = &u.sdl;
1608 ifa_next = if_dl_create(ifp, &nsdl);
1609 if (ifa_next == NULL) {
1610 error = ENOMEM;
1611 goto done;
1612 }
1613
1614 s = pserialize_read_enter();
1615 ifa_acquire(ifa_next, &psref_next);
1616 pserialize_read_exit(s);
1617
1618 sockaddr_copy(ifa_next->ifa_addr,
1619 ifa_next->ifa_addr->sa_len, &u.sa);
1620 ifa_insert(ifp, ifa_next);
1621 } else {
1622 nsdl = NULL;
1623 }
1624
1625 if (ifa_prev != NULL && ifa_prev == ifp->if_dl) {
1626 if_activate_sadl(ifp, ifa_next, nsdl);
1627 }
1628
1629 ifa_release(ifa_next, &psref_next);
1630 ifa_next = NULL;
1631
1632 if (ifa_prev != NULL && ifa_prev != ifp->if_hwdl) {
1633 ifaref(ifa_prev);
1634 ifa_release(ifa_prev, &psref_prev);
1635 ifa_remove(ifp, ifa_prev);
1636 KASSERTMSG(ifa_prev->ifa_refcnt == 1, "ifa_refcnt=%d",
1637 ifa_prev->ifa_refcnt);
1638 ifafree(ifa_prev);
1639 ifa_prev = NULL;
1640 }
1641
1642 if (ISSET(ifp->if_flags, IFF_RUNNING))
1643 error = ENETRESET;
1644
1645 done:
1646 if (ifa_prev != NULL)
1647 ifa_release(ifa_prev, &psref_prev);
1648 if (ifa_next != NULL)
1649 ifa_release(ifa_next, &psref_next);
1650
1651 return error;
1652 }
1653 static int
1654 iavf_add_multi(struct iavf_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1655 {
1656 struct ifnet *ifp = &sc->sc_ec.ec_if;
1657 int rv;
1658
1659 if (ISSET(ifp->if_flags, IFF_ALLMULTI))
1660 return 0;
1661
1662 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) {
1663 iavf_del_all_multi(sc);
1664 SET(ifp->if_flags, IFF_ALLMULTI);
1665 return ENETRESET;
1666 }
1667
1668 rv = iavf_eth_addr(sc, addrlo, IAVF_VC_OP_ADD_ETH_ADDR);
1669
1670 if (rv == ENOSPC) {
1671 iavf_del_all_multi(sc);
1672 SET(ifp->if_flags, IFF_ALLMULTI);
1673 return ENETRESET;
1674 }
1675
1676 return rv;
1677 }
1678
1679 static int
1680 iavf_del_multi(struct iavf_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1681 {
1682 struct ifnet *ifp = &sc->sc_ec.ec_if;
1683 struct ethercom *ec = &sc->sc_ec;
1684 struct ether_multi *enm, *enm_last;
1685 struct ether_multistep step;
1686 int error, rv = 0;
1687
1688 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1689 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0)
1690 return 0;
1691
1692 iavf_eth_addr(sc, addrlo, IAVF_VC_OP_DEL_ETH_ADDR);
1693 return 0;
1694 }
1695
1696 ETHER_LOCK(ec);
1697 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1698 ETHER_NEXT_MULTI(step, enm)) {
1699 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1700 ETHER_ADDR_LEN) != 0) {
1701 goto out;
1702 }
1703 }
1704
1705 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1706 ETHER_NEXT_MULTI(step, enm)) {
1707 error = iavf_eth_addr(sc, enm->enm_addrlo,
1708 IAVF_VC_OP_ADD_ETH_ADDR);
1709 if (error != 0)
1710 break;
1711 }
1712
1713 if (enm != NULL) {
1714 enm_last = enm;
1715 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1716 ETHER_NEXT_MULTI(step, enm)) {
1717 if (enm == enm_last)
1718 break;
1719
1720 iavf_eth_addr(sc, enm->enm_addrlo,
1721 IAVF_VC_OP_DEL_ETH_ADDR);
1722 }
1723 } else {
1724 CLR(ifp->if_flags, IFF_ALLMULTI);
1725 rv = ENETRESET;
1726 }
1727
1728 out:
1729 ETHER_UNLOCK(ec);
1730 return rv;
1731 }
1732
1733 static void
1734 iavf_del_all_multi(struct iavf_softc *sc)
1735 {
1736 struct ethercom *ec = &sc->sc_ec;
1737 struct ether_multi *enm;
1738 struct ether_multistep step;
1739
1740 ETHER_LOCK(ec);
1741 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1742 ETHER_NEXT_MULTI(step, enm)) {
1743 iavf_eth_addr(sc, enm->enm_addrlo,
1744 IAVF_VC_OP_DEL_ETH_ADDR);
1745 }
1746 ETHER_UNLOCK(ec);
1747 }
1748
1749 static int
1750 iavf_setup_interrupts(struct iavf_softc *sc)
1751 {
1752 struct pci_attach_args *pa;
1753 kcpuset_t *affinity = NULL;
1754 char intrbuf[PCI_INTRSTR_LEN], xnamebuf[32];
1755 char const *intrstr;
1756 int counts[PCI_INTR_TYPE_SIZE];
1757 int error, affinity_to;
1758 unsigned int vector, qid, num;
1759
1760 /* queue pairs + misc interrupt */
1761 num = sc->sc_nqps_alloc + 1;
1762
1763 num = MIN(num, iavf_calc_msix_count(sc));
1764 if (num <= 0) {
1765 return -1;
1766 }
1767
1768 KASSERT(sc->sc_nqps_alloc > 0);
1769 num = MIN(num, sc->sc_nqps_alloc + 1);
1770
1771 pa = &sc->sc_pa;
1772 memset(counts, 0, sizeof(counts));
1773 counts[PCI_INTR_TYPE_MSIX] = num;
1774
1775 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, PCI_INTR_TYPE_MSIX);
1776 if (error != 0) {
1777 IAVF_LOG(sc, LOG_WARNING, "couldn't allocate interrupts\n");
1778 return -1;
1779 }
1780
1781 KASSERT(pci_intr_type(pa->pa_pc, sc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX);
1782
1783 if (counts[PCI_INTR_TYPE_MSIX] < 1) {
1784 IAVF_LOG(sc, LOG_ERR, "couldn't allocate interrupts\n");
1785 } else if (counts[PCI_INTR_TYPE_MSIX] != (int)num) {
1786 IAVF_LOG(sc, LOG_DEBUG,
1787 "request %u intruppts, but allocate %d interrupts\n",
1788 num, counts[PCI_INTR_TYPE_MSIX]);
1789 num = counts[PCI_INTR_TYPE_MSIX];
1790 }
1791
1792 sc->sc_ihs = kmem_alloc(sizeof(sc->sc_ihs[0]) * num, KM_NOSLEEP);
1793 if (sc->sc_ihs == NULL) {
1794 IAVF_LOG(sc, LOG_ERR,
1795 "couldn't allocate memory for interrupts\n");
1796 goto fail;
1797 }
1798
1799 /* vector #0 is Misc interrupt */
1800 vector = 0;
1801 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[vector], PCI_INTR_MPSAFE, true);
1802 intrstr = pci_intr_string(pa->pa_pc, sc->sc_ihp[vector],
1803 intrbuf, sizeof(intrbuf));
1804 snprintf(xnamebuf, sizeof(xnamebuf), "%s-Misc",
1805 device_xname(sc->sc_dev));
1806
1807 sc->sc_ihs[vector] = pci_intr_establish_xname(pa->pa_pc,
1808 sc->sc_ihp[vector], IPL_NET, iavf_intr, sc, xnamebuf);
1809 if (sc->sc_ihs[vector] == NULL) {
1810 IAVF_LOG(sc, LOG_WARNING,
1811 "unable to establish interrupt at %s", intrstr);
1812 goto fail;
1813 }
1814
1815 kcpuset_create(&affinity, false);
1816 affinity_to = 0;
1817 qid = 0;
1818 for (vector = 1; vector < num; vector++) {
1819 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[vector],
1820 PCI_INTR_MPSAFE, true);
1821 intrstr = pci_intr_string(pa->pa_pc, sc->sc_ihp[vector],
1822 intrbuf, sizeof(intrbuf));
1823 snprintf(xnamebuf, sizeof(xnamebuf), "%s-TXRX%u",
1824 device_xname(sc->sc_dev), qid);
1825
1826 sc->sc_ihs[vector] = pci_intr_establish_xname(pa->pa_pc,
1827 sc->sc_ihp[vector], IPL_NET, iavf_queue_intr,
1828 (void *)&sc->sc_qps[qid], xnamebuf);
1829 if (sc->sc_ihs[vector] == NULL) {
1830 IAVF_LOG(sc, LOG_WARNING,
1831 "unable to establish interrupt at %s\n", intrstr);
1832 goto fail;
1833 }
1834
1835 kcpuset_zero(affinity);
1836 kcpuset_set(affinity, affinity_to);
1837 error = interrupt_distribute(sc->sc_ihs[vector],
1838 affinity, NULL);
1839
1840 if (error == 0) {
1841 IAVF_LOG(sc, LOG_INFO,
1842 "for TXRX%d interrupt at %s, affinity to %d\n",
1843 qid, intrstr, affinity_to);
1844 } else {
1845 IAVF_LOG(sc, LOG_INFO,
1846 "for TXRX%d interrupt at %s\n",
1847 qid, intrstr);
1848 }
1849
1850 qid++;
1851 affinity_to = (affinity_to + 1) % ncpu;
1852 }
1853
1854 vector = 0;
1855 kcpuset_zero(affinity);
1856 kcpuset_set(affinity, affinity_to);
1857 intrstr = pci_intr_string(pa->pa_pc, sc->sc_ihp[vector],
1858 intrbuf, sizeof(intrbuf));
1859 error = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
1860 if (error == 0) {
1861 IAVF_LOG(sc, LOG_INFO,
1862 "for Misc interrupt at %s, affinity to %d\n",
1863 intrstr, affinity_to);
1864 } else {
1865 IAVF_LOG(sc, LOG_INFO,
1866 "for MISC interrupt at %s\n", intrstr);
1867 }
1868
1869 kcpuset_destroy(affinity);
1870
1871 sc->sc_nintrs = num;
1872 return 0;
1873
1874 fail:
1875 if (affinity != NULL)
1876 kcpuset_destroy(affinity);
1877 for (vector = 0; vector < num; vector++) {
1878 if (sc->sc_ihs[vector] == NULL)
1879 continue;
1880 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[vector]);
1881 }
1882 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * num);
1883 pci_intr_release(pa->pa_pc, sc->sc_ihp, num);
1884
1885 return -1;
1886 }
1887
1888 static void
1889 iavf_teardown_interrupts(struct iavf_softc *sc)
1890 {
1891 struct pci_attach_args *pa;
1892 unsigned int i;
1893
1894 if (sc->sc_ihs == NULL)
1895 return;
1896
1897 pa = &sc->sc_pa;
1898
1899 for (i = 0; i < sc->sc_nintrs; i++) {
1900 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]);
1901 }
1902
1903 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
1904 sc->sc_ihs = NULL;
1905
1906 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
1907 sc->sc_nintrs = 0;
1908 }
1909
1910 static int
1911 iavf_setup_sysctls(struct iavf_softc *sc)
1912 {
1913 const char *devname;
1914 struct sysctllog **log;
1915 const struct sysctlnode *rnode, *rxnode, *txnode;
1916 int error;
1917
1918 log = &sc->sc_sysctllog;
1919 devname = device_xname(sc->sc_dev);
1920
1921 error = sysctl_createv(log, 0, NULL, &rnode,
1922 0, CTLTYPE_NODE, devname,
1923 SYSCTL_DESCR("iavf information and settings"),
1924 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
1925 if (error)
1926 goto out;
1927
1928 error = sysctl_createv(log, 0, &rnode, NULL,
1929 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
1930 SYSCTL_DESCR("Use workqueue for packet processing"),
1931 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
1932 if (error)
1933 goto out;
1934
1935 error = sysctl_createv(log, 0, &rnode, NULL,
1936 CTLFLAG_READWRITE, CTLTYPE_INT, "debug_level",
1937 SYSCTL_DESCR("Debug level"),
1938 NULL, 0, &sc->sc_debuglevel, 0, CTL_CREATE, CTL_EOL);
1939 if (error)
1940 goto out;
1941
1942 error = sysctl_createv(log, 0, &rnode, &rxnode,
1943 0, CTLTYPE_NODE, "rx",
1944 SYSCTL_DESCR("iavf information and settings for Rx"),
1945 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
1946 if (error)
1947 goto out;
1948
1949 error = sysctl_createv(log, 0, &rxnode, NULL,
1950 CTLFLAG_READWRITE, CTLTYPE_INT, "itr",
1951 SYSCTL_DESCR("Interrupt Throttling"),
1952 iavf_sysctl_itr_handler, 0,
1953 (void *)sc, 0, CTL_CREATE, CTL_EOL);
1954 if (error)
1955 goto out;
1956
1957 error = sysctl_createv(log, 0, &rxnode, NULL,
1958 CTLFLAG_READONLY, CTLTYPE_INT, "descriptor_num",
1959 SYSCTL_DESCR("descriptor size"),
1960 NULL, 0, &sc->sc_rx_ring_ndescs, 0, CTL_CREATE, CTL_EOL);
1961 if (error)
1962 goto out;
1963
1964 error = sysctl_createv(log, 0, &rxnode, NULL,
1965 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
1966 SYSCTL_DESCR("max number of Rx packets"
1967 " to process for interrupt processing"),
1968 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
1969 if (error)
1970 goto out;
1971
1972 error = sysctl_createv(log, 0, &rxnode, NULL,
1973 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
1974 SYSCTL_DESCR("max number of Rx packets"
1975 " to process for deferred processing"),
1976 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
1977 if (error)
1978 goto out;
1979
1980 error = sysctl_createv(log, 0, &rnode, &txnode,
1981 0, CTLTYPE_NODE, "tx",
1982 SYSCTL_DESCR("iavf information and settings for Tx"),
1983 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
1984 if (error)
1985 goto out;
1986
1987 error = sysctl_createv(log, 0, &txnode, NULL,
1988 CTLFLAG_READWRITE, CTLTYPE_INT, "itr",
1989 SYSCTL_DESCR("Interrupt Throttling"),
1990 iavf_sysctl_itr_handler, 0,
1991 (void *)sc, 0, CTL_CREATE, CTL_EOL);
1992 if (error)
1993 goto out;
1994
1995 error = sysctl_createv(log, 0, &txnode, NULL,
1996 CTLFLAG_READONLY, CTLTYPE_INT, "descriptor_num",
1997 SYSCTL_DESCR("the number of Tx descriptors"),
1998 NULL, 0, &sc->sc_tx_ring_ndescs, 0, CTL_CREATE, CTL_EOL);
1999 if (error)
2000 goto out;
2001
2002 error = sysctl_createv(log, 0, &txnode, NULL,
2003 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
2004 SYSCTL_DESCR("max number of Tx packets"
2005 " to process for interrupt processing"),
2006 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
2007 if (error)
2008 goto out;
2009
2010 error = sysctl_createv(log, 0, &txnode, NULL,
2011 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
2012 SYSCTL_DESCR("max number of Tx packets"
2013 " to process for deferred processing"),
2014 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
2015 if (error)
2016 goto out;
2017 out:
2018 return error;
2019 }
2020
2021 static void
2022 iavf_teardown_sysctls(struct iavf_softc *sc)
2023 {
2024
2025 sysctl_teardown(&sc->sc_sysctllog);
2026 }
2027
2028 static int
2029 iavf_setup_stats(struct iavf_softc *sc)
2030 {
2031 struct iavf_stat_counters *isc;
2032 const char *dn;
2033
2034 dn = device_xname(sc->sc_dev);
2035 isc = &sc->sc_stat_counters;
2036
2037 iavf_evcnt_attach(&isc->isc_rx_bytes, dn, "Rx bytes");
2038 iavf_evcnt_attach(&isc->isc_rx_unicast, dn, "Rx unicast");
2039 iavf_evcnt_attach(&isc->isc_rx_multicast, dn, "Rx multicast");
2040 iavf_evcnt_attach(&isc->isc_rx_broadcast, dn, "Rx broadcast");
2041 iavf_evcnt_attach(&isc->isc_rx_discards, dn, "Rx discards");
2042 iavf_evcnt_attach(&isc->isc_rx_unknown_protocol,
2043 dn, "Rx unknown protocol");
2044
2045 iavf_evcnt_attach(&isc->isc_tx_bytes, dn, "Tx bytes");
2046 iavf_evcnt_attach(&isc->isc_tx_unicast, dn, "Tx unicast");
2047 iavf_evcnt_attach(&isc->isc_tx_multicast, dn, "Tx multicast");
2048 iavf_evcnt_attach(&isc->isc_tx_broadcast, dn, "Tx broadcast");
2049 iavf_evcnt_attach(&isc->isc_tx_discards, dn, "Tx discards");
2050 iavf_evcnt_attach(&isc->isc_tx_errors, dn, "Tx errors");
2051
2052 return 0;
2053 }
2054
2055 static void
2056 iavf_teardown_stats(struct iavf_softc *sc)
2057 {
2058 struct iavf_stat_counters *isc;
2059
2060 isc = &sc->sc_stat_counters;
2061
2062 evcnt_detach(&isc->isc_rx_bytes);
2063 evcnt_detach(&isc->isc_rx_unicast);
2064 evcnt_detach(&isc->isc_rx_multicast);
2065 evcnt_detach(&isc->isc_rx_broadcast);
2066 evcnt_detach(&isc->isc_rx_discards);
2067 evcnt_detach(&isc->isc_rx_unknown_protocol);
2068
2069 evcnt_detach(&isc->isc_tx_bytes);
2070 evcnt_detach(&isc->isc_tx_unicast);
2071 evcnt_detach(&isc->isc_tx_multicast);
2072 evcnt_detach(&isc->isc_tx_broadcast);
2073 evcnt_detach(&isc->isc_tx_discards);
2074 evcnt_detach(&isc->isc_tx_errors);
2075
2076 }
2077
2078 static int
2079 iavf_init_admin_queue(struct iavf_softc *sc)
2080 {
2081 uint32_t reg;
2082
2083 sc->sc_atq_cons = 0;
2084 sc->sc_atq_prod = 0;
2085
2086 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2087 0, IXL_DMA_LEN(&sc->sc_atq),
2088 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2089 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2090 0, IXL_DMA_LEN(&sc->sc_arq),
2091 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2092
2093 iavf_wr(sc, sc->sc_aq_regs->atq_head, 0);
2094 iavf_wr(sc, sc->sc_aq_regs->arq_head, 0);
2095 iavf_wr(sc, sc->sc_aq_regs->atq_tail, 0);
2096 iavf_wr(sc, sc->sc_aq_regs->arq_tail, 0);
2097
2098 iavf_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
2099
2100 iavf_wr(sc, sc->sc_aq_regs->atq_bal,
2101 ixl_dmamem_lo(&sc->sc_atq));
2102 iavf_wr(sc, sc->sc_aq_regs->atq_bah,
2103 ixl_dmamem_hi(&sc->sc_atq));
2104 iavf_wr(sc, sc->sc_aq_regs->atq_len,
2105 sc->sc_aq_regs->atq_len_enable | IAVF_AQ_NUM);
2106
2107 iavf_wr(sc, sc->sc_aq_regs->arq_bal,
2108 ixl_dmamem_lo(&sc->sc_arq));
2109 iavf_wr(sc, sc->sc_aq_regs->arq_bah,
2110 ixl_dmamem_hi(&sc->sc_arq));
2111 iavf_wr(sc, sc->sc_aq_regs->arq_len,
2112 sc->sc_aq_regs->arq_len_enable | IAVF_AQ_NUM);
2113
2114 iavf_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
2115
2116 reg = iavf_rd(sc, sc->sc_aq_regs->atq_bal);
2117 if (reg != ixl_dmamem_lo(&sc->sc_atq))
2118 goto fail;
2119
2120 reg = iavf_rd(sc, sc->sc_aq_regs->arq_bal);
2121 if (reg != ixl_dmamem_lo(&sc->sc_arq))
2122 goto fail;
2123
2124 sc->sc_dead = false;
2125 return 0;
2126
2127 fail:
2128 iavf_wr(sc, sc->sc_aq_regs->atq_len, 0);
2129 iavf_wr(sc, sc->sc_aq_regs->arq_len, 0);
2130 return -1;
2131 }
2132
2133 static void
2134 iavf_cleanup_admin_queue(struct iavf_softc *sc)
2135 {
2136 struct ixl_aq_buf *aqb;
2137
2138 iavf_wr(sc, sc->sc_aq_regs->atq_head, 0);
2139 iavf_wr(sc, sc->sc_aq_regs->arq_head, 0);
2140 iavf_wr(sc, sc->sc_aq_regs->atq_tail, 0);
2141 iavf_wr(sc, sc->sc_aq_regs->arq_tail, 0);
2142
2143 iavf_wr(sc, sc->sc_aq_regs->atq_bal, 0);
2144 iavf_wr(sc, sc->sc_aq_regs->atq_bah, 0);
2145 iavf_wr(sc, sc->sc_aq_regs->atq_len, 0);
2146
2147 iavf_wr(sc, sc->sc_aq_regs->arq_bal, 0);
2148 iavf_wr(sc, sc->sc_aq_regs->arq_bah, 0);
2149 iavf_wr(sc, sc->sc_aq_regs->arq_len, 0);
2150 iavf_flush(sc);
2151
2152 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2153 0, IXL_DMA_LEN(&sc->sc_arq),
2154 BUS_DMASYNC_POSTREAD);
2155 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2156 0, IXL_DMA_LEN(&sc->sc_atq),
2157 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2158
2159 sc->sc_atq_cons = 0;
2160 sc->sc_atq_prod = 0;
2161 sc->sc_arq_cons = 0;
2162 sc->sc_arq_prod = 0;
2163
2164 memset(IXL_DMA_KVA(&sc->sc_arq), 0, IXL_DMA_LEN(&sc->sc_arq));
2165 memset(IXL_DMA_KVA(&sc->sc_atq), 0, IXL_DMA_LEN(&sc->sc_atq));
2166
2167 while ((aqb = iavf_aqb_get_locked(&sc->sc_arq_live)) != NULL) {
2168 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
2169 BUS_DMASYNC_POSTREAD);
2170 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
2171 }
2172
2173 while ((aqb = iavf_aqb_get_locked(&sc->sc_atq_live)) != NULL) {
2174 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
2175 BUS_DMASYNC_POSTREAD);
2176 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
2177 }
2178 }
2179
2180 static unsigned int
2181 iavf_calc_msix_count(struct iavf_softc *sc)
2182 {
2183 struct pci_attach_args *pa;
2184 int count;
2185
2186 pa = &sc->sc_pa;
2187 count = pci_msix_count(pa->pa_pc, pa->pa_tag);
2188 if (count < 0) {
2189 IAVF_LOG(sc, LOG_DEBUG,"MSIX config error\n");
2190 count = 0;
2191 }
2192
2193 return MIN(sc->sc_max_vectors, (unsigned int)count);
2194 }
2195
2196 static unsigned int
2197 iavf_calc_queue_pair_size(struct iavf_softc *sc)
2198 {
2199 unsigned int nqp, nvec;
2200
2201 nvec = iavf_calc_msix_count(sc);
2202 if (sc->sc_max_vectors > 1) {
2203 /* decrease the number of misc interrupt */
2204 nvec -= 1;
2205 }
2206
2207 nqp = ncpu;
2208 nqp = MIN(nqp, sc->sc_nqps_vsi);
2209 nqp = MIN(nqp, nvec);
2210 nqp = MIN(nqp, (unsigned int)iavf_params.max_qps);
2211
2212 return nqp;
2213 }
2214
2215 static struct iavf_tx_ring *
2216 iavf_txr_alloc(struct iavf_softc *sc, unsigned int qid)
2217 {
2218 struct iavf_tx_ring *txr;
2219 struct iavf_tx_map *maps;
2220 unsigned int i;
2221 int error;
2222
2223 txr = kmem_zalloc(sizeof(*txr), KM_NOSLEEP);
2224 if (txr == NULL)
2225 return NULL;
2226
2227 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs,
2228 KM_NOSLEEP);
2229 if (maps == NULL)
2230 goto free_txr;
2231
2232 if (iavf_dmamem_alloc(sc->sc_dmat, &txr->txr_mem,
2233 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2234 IAVF_TX_QUEUE_ALIGN) != 0) {
2235 goto free_maps;
2236 }
2237
2238 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2239 error = bus_dmamap_create(sc->sc_dmat, IAVF_TX_PKT_MAXSIZE,
2240 IAVF_TX_PKT_DESCS, IAVF_TX_PKT_MAXSIZE, 0,
2241 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &maps[i].txm_map);
2242 if (error)
2243 goto destroy_maps;
2244 }
2245
2246 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2247 if (txr->txr_intrq == NULL)
2248 goto destroy_maps;
2249
2250 txr->txr_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,
2251 iavf_deferred_transmit, txr);
2252 if (txr->txr_si == NULL)
2253 goto destroy_pcq;
2254
2255 snprintf(txr->txr_name, sizeof(txr->txr_name), "%s-tx%d",
2256 device_xname(sc->sc_dev), qid);
2257
2258 iavf_evcnt_attach(&txr->txr_defragged,
2259 txr->txr_name, "m_defrag successed");
2260 iavf_evcnt_attach(&txr->txr_defrag_failed,
2261 txr->txr_name, "m_defrag failed");
2262 iavf_evcnt_attach(&txr->txr_pcqdrop,
2263 txr->txr_name, "Dropped in pcq");
2264 iavf_evcnt_attach(&txr->txr_transmitdef,
2265 txr->txr_name, "Deferred transmit");
2266 iavf_evcnt_attach(&txr->txr_watchdogto,
2267 txr->txr_name, "Watchdog timedout on queue");
2268 iavf_evcnt_attach(&txr->txr_defer,
2269 txr->txr_name, "Handled queue in softint/workqueue");
2270
2271 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR, NULL,
2272 txr->txr_name, "Interrupt on queue");
2273
2274 txr->txr_qid = qid;
2275 txr->txr_sc = sc;
2276 txr->txr_maps = maps;
2277 txr->txr_prod = txr->txr_cons = 0;
2278 txr->txr_tail = I40E_QTX_TAIL1(qid);
2279 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2280
2281 return txr;
2282 destroy_pcq:
2283 pcq_destroy(txr->txr_intrq);
2284 destroy_maps:
2285 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2286 if (maps[i].txm_map == NULL)
2287 continue;
2288 bus_dmamap_destroy(sc->sc_dmat, maps[i].txm_map);
2289 }
2290
2291 iavf_dmamem_free(sc->sc_dmat, &txr->txr_mem);
2292 free_maps:
2293 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2294 free_txr:
2295 kmem_free(txr, sizeof(*txr));
2296 return NULL;
2297 }
2298
2299 static void
2300 iavf_txr_free(struct iavf_softc *sc, struct iavf_tx_ring *txr)
2301 {
2302 struct iavf_tx_map *maps;
2303 unsigned int i;
2304
2305 maps = txr->txr_maps;
2306 if (maps != NULL) {
2307 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2308 if (maps[i].txm_map == NULL)
2309 continue;
2310 bus_dmamap_destroy(sc->sc_dmat, maps[i].txm_map);
2311 }
2312 kmem_free(txr->txr_maps,
2313 sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2314 txr->txr_maps = NULL;
2315 }
2316
2317 evcnt_detach(&txr->txr_defragged);
2318 evcnt_detach(&txr->txr_defrag_failed);
2319 evcnt_detach(&txr->txr_pcqdrop);
2320 evcnt_detach(&txr->txr_transmitdef);
2321 evcnt_detach(&txr->txr_watchdogto);
2322 evcnt_detach(&txr->txr_defer);
2323 evcnt_detach(&txr->txr_intr);
2324
2325 iavf_dmamem_free(sc->sc_dmat, &txr->txr_mem);
2326 softint_disestablish(txr->txr_si);
2327 pcq_destroy(txr->txr_intrq);
2328 mutex_destroy(&txr->txr_lock);
2329 kmem_free(txr, sizeof(*txr));
2330 }
2331
2332 static struct iavf_rx_ring *
2333 iavf_rxr_alloc(struct iavf_softc *sc, unsigned int qid)
2334 {
2335 struct iavf_rx_ring *rxr;
2336 struct iavf_rx_map *maps;
2337 unsigned int i;
2338 int error;
2339
2340 rxr = kmem_zalloc(sizeof(*rxr), KM_NOSLEEP);
2341 if (rxr == NULL)
2342 return NULL;
2343
2344 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs,
2345 KM_NOSLEEP);
2346 if (maps == NULL)
2347 goto free_rxr;
2348
2349 if (iavf_dmamem_alloc(sc->sc_dmat, &rxr->rxr_mem,
2350 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
2351 IAVF_RX_QUEUE_ALIGN) != 0)
2352 goto free_maps;
2353
2354 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2355 error = bus_dmamap_create(sc->sc_dmat, IAVF_MCLBYTES,
2356 1, IAVF_MCLBYTES, 0,
2357 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &maps[i].rxm_map);
2358 if (error)
2359 goto destroy_maps;
2360 }
2361
2362 snprintf(rxr->rxr_name, sizeof(rxr->rxr_name), "%s-rx%d",
2363 device_xname(sc->sc_dev), qid);
2364
2365 iavf_evcnt_attach(&rxr->rxr_mgethdr_failed,
2366 rxr->rxr_name, "MGETHDR failed");
2367 iavf_evcnt_attach(&rxr->rxr_mgetcl_failed,
2368 rxr->rxr_name, "MCLGET failed");
2369 iavf_evcnt_attach(&rxr->rxr_mbuf_load_failed,
2370 rxr->rxr_name, "bus_dmamap_load_mbuf failed");
2371 iavf_evcnt_attach(&rxr->rxr_defer,
2372 rxr->rxr_name, "Handled queue in softint/workqueue");
2373
2374 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR, NULL,
2375 rxr->rxr_name, "Interrupt on queue");
2376
2377 rxr->rxr_qid = qid;
2378 rxr->rxr_sc = sc;
2379 rxr->rxr_cons = rxr->rxr_prod = 0;
2380 rxr->rxr_m_head = NULL;
2381 rxr->rxr_m_tail = &rxr->rxr_m_head;
2382 rxr->rxr_maps = maps;
2383 rxr->rxr_tail = I40E_QRX_TAIL1(qid);
2384 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
2385
2386 return rxr;
2387
2388 destroy_maps:
2389 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2390 if (maps[i].rxm_map == NULL)
2391 continue;
2392 bus_dmamap_destroy(sc->sc_dmat, maps[i].rxm_map);
2393 }
2394 iavf_dmamem_free(sc->sc_dmat, &rxr->rxr_mem);
2395 free_maps:
2396 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2397 free_rxr:
2398 kmem_free(rxr, sizeof(*rxr));
2399
2400 return NULL;
2401 }
2402
2403 static void
2404 iavf_rxr_free(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
2405 {
2406 struct iavf_rx_map *maps;
2407 unsigned int i;
2408
2409 maps = rxr->rxr_maps;
2410 if (maps != NULL) {
2411 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2412 if (maps[i].rxm_map == NULL)
2413 continue;
2414 bus_dmamap_destroy(sc->sc_dmat, maps[i].rxm_map);
2415 }
2416 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2417 rxr->rxr_maps = NULL;
2418 }
2419
2420 evcnt_detach(&rxr->rxr_mgethdr_failed);
2421 evcnt_detach(&rxr->rxr_mgetcl_failed);
2422 evcnt_detach(&rxr->rxr_mbuf_load_failed);
2423 evcnt_detach(&rxr->rxr_defer);
2424 evcnt_detach(&rxr->rxr_intr);
2425
2426 iavf_dmamem_free(sc->sc_dmat, &rxr->rxr_mem);
2427 mutex_destroy(&rxr->rxr_lock);
2428 kmem_free(rxr, sizeof(*rxr));
2429 }
2430
2431 static int
2432 iavf_queue_pairs_alloc(struct iavf_softc *sc)
2433 {
2434 struct iavf_queue_pair *qp;
2435 unsigned int i, num;
2436
2437 num = iavf_calc_queue_pair_size(sc);
2438 if (num <= 0) {
2439 return -1;
2440 }
2441
2442 sc->sc_qps = kmem_zalloc(sizeof(sc->sc_qps[0]) * num, KM_NOSLEEP);
2443 if (sc->sc_qps == NULL) {
2444 return -1;
2445 }
2446
2447 for (i = 0; i < num; i++) {
2448 qp = &sc->sc_qps[i];
2449
2450 qp->qp_rxr = iavf_rxr_alloc(sc, i);
2451 qp->qp_txr = iavf_txr_alloc(sc, i);
2452
2453 if (qp->qp_rxr == NULL || qp->qp_txr == NULL)
2454 goto free;
2455
2456 qp->qp_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,
2457 iavf_handle_queue, qp);
2458 if (qp->qp_si == NULL)
2459 goto free;
2460 }
2461
2462 sc->sc_nqps_alloc = num;
2463 return 0;
2464 free:
2465 for (i = 0; i < num; i++) {
2466 qp = &sc->sc_qps[i];
2467
2468 if (qp->qp_si != NULL)
2469 softint_disestablish(qp->qp_si);
2470 if (qp->qp_rxr != NULL)
2471 iavf_rxr_free(sc, qp->qp_rxr);
2472 if (qp->qp_txr != NULL)
2473 iavf_txr_free(sc, qp->qp_txr);
2474 }
2475
2476 kmem_free(sc->sc_qps, sizeof(sc->sc_qps[0]) * num);
2477 sc->sc_qps = NULL;
2478
2479 return -1;
2480 }
2481
2482 static void
2483 iavf_queue_pairs_free(struct iavf_softc *sc)
2484 {
2485 struct iavf_queue_pair *qp;
2486 unsigned int i;
2487 size_t sz;
2488
2489 if (sc->sc_qps == NULL)
2490 return;
2491
2492 for (i = 0; i < sc->sc_nqps_alloc; i++) {
2493 qp = &sc->sc_qps[i];
2494
2495 if (qp->qp_si != NULL)
2496 softint_disestablish(qp->qp_si);
2497 if (qp->qp_rxr != NULL)
2498 iavf_rxr_free(sc, qp->qp_rxr);
2499 if (qp->qp_txr != NULL)
2500 iavf_txr_free(sc, qp->qp_txr);
2501 }
2502
2503 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqps_alloc;
2504 kmem_free(sc->sc_qps, sz);
2505 sc->sc_qps = NULL;
2506 sc->sc_nqps_alloc = 0;
2507 }
2508
2509 static int
2510 iavf_rxfill(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
2511 {
2512 struct ixl_rx_rd_desc_32 *ring, *rxd;
2513 struct iavf_rx_map *rxm;
2514 bus_dmamap_t map;
2515 struct mbuf *m;
2516 unsigned int slots, prod, mask;
2517 int error, post;
2518
2519 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons,
2520 sc->sc_rx_ring_ndescs);
2521
2522 if (slots == 0)
2523 return 0;
2524
2525 error = 0;
2526 prod = rxr->rxr_prod;
2527
2528 ring = IXL_DMA_KVA(&rxr->rxr_mem);
2529 mask = sc->sc_rx_ring_ndescs - 1;
2530
2531 do {
2532 rxm = &rxr->rxr_maps[prod];
2533
2534 MGETHDR(m, M_DONTWAIT, MT_DATA);
2535 if (m == NULL) {
2536 rxr->rxr_mgethdr_failed.ev_count++;
2537 error = -1;
2538 break;
2539 }
2540
2541 MCLGET(m, M_DONTWAIT);
2542 if (!ISSET(m->m_flags, M_EXT)) {
2543 rxr->rxr_mgetcl_failed.ev_count++;
2544 error = -1;
2545 m_freem(m);
2546 break;
2547 }
2548
2549 m->m_len = m->m_pkthdr.len = MCLBYTES;
2550 m_adj(m, ETHER_ALIGN);
2551
2552 map = rxm->rxm_map;
2553
2554 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
2555 BUS_DMA_READ|BUS_DMA_NOWAIT) != 0) {
2556 rxr->rxr_mbuf_load_failed.ev_count++;
2557 error = -1;
2558 m_freem(m);
2559 break;
2560 }
2561
2562 rxm->rxm_m = m;
2563
2564 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2565 BUS_DMASYNC_PREREAD);
2566
2567 rxd = &ring[prod];
2568 rxd->paddr = htole64(map->dm_segs[0].ds_addr);
2569 rxd->haddr = htole64(0);
2570
2571 prod++;
2572 prod &= mask;
2573 post = 1;
2574 } while (--slots);
2575
2576 if (post) {
2577 rxr->rxr_prod = prod;
2578 iavf_wr(sc, rxr->rxr_tail, prod);
2579 }
2580
2581 return error;
2582 }
2583
2584 static inline void
2585 iavf_rx_csum(struct mbuf *m, uint64_t qword)
2586 {
2587 int flags_mask;
2588
2589 if (!ISSET(qword, IXL_RX_DESC_L3L4P)) {
2590 /* No L3 or L4 checksum was calculated */
2591 return;
2592 }
2593
2594 switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) {
2595 case IXL_RX_DESC_PTYPE_IPV4FRAG:
2596 case IXL_RX_DESC_PTYPE_IPV4:
2597 case IXL_RX_DESC_PTYPE_SCTPV4:
2598 case IXL_RX_DESC_PTYPE_ICMPV4:
2599 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
2600 break;
2601 case IXL_RX_DESC_PTYPE_TCPV4:
2602 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
2603 flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD;
2604 break;
2605 case IXL_RX_DESC_PTYPE_UDPV4:
2606 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
2607 flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD;
2608 break;
2609 case IXL_RX_DESC_PTYPE_TCPV6:
2610 flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD;
2611 break;
2612 case IXL_RX_DESC_PTYPE_UDPV6:
2613 flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD;
2614 break;
2615 default:
2616 flags_mask = 0;
2617 }
2618
2619 m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 |
2620 M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6));
2621
2622 if (ISSET(qword, IXL_RX_DESC_IPE)) {
2623 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD);
2624 }
2625
2626 if (ISSET(qword, IXL_RX_DESC_L4E)) {
2627 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD);
2628 }
2629 }
2630
2631 static int
2632 iavf_rxeof(struct iavf_softc *sc, struct iavf_rx_ring *rxr, u_int rxlimit,
2633 struct evcnt *ecnt)
2634 {
2635 struct ifnet *ifp = &sc->sc_ec.ec_if;
2636 struct ixl_rx_wb_desc_32 *ring, *rxd;
2637 struct iavf_rx_map *rxm;
2638 bus_dmamap_t map;
2639 unsigned int cons, prod;
2640 struct mbuf *m;
2641 uint64_t word, word0;
2642 unsigned int len;
2643 unsigned int mask;
2644 int done = 0, more = 0;
2645
2646 KASSERT(mutex_owned(&rxr->rxr_lock));
2647
2648 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2649 return 0;
2650
2651 prod = rxr->rxr_prod;
2652 cons = rxr->rxr_cons;
2653
2654 if (cons == prod)
2655 return 0;
2656
2657 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2658 0, IXL_DMA_LEN(&rxr->rxr_mem),
2659 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2660
2661 ring = IXL_DMA_KVA(&rxr->rxr_mem);
2662 mask = sc->sc_rx_ring_ndescs - 1;
2663
2664 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2665
2666 do {
2667 if (rxlimit-- <= 0) {
2668 more = 1;
2669 break;
2670 }
2671
2672 rxd = &ring[cons];
2673
2674 word = le64toh(rxd->qword1);
2675
2676 if (!ISSET(word, IXL_RX_DESC_DD))
2677 break;
2678
2679 rxm = &rxr->rxr_maps[cons];
2680
2681 map = rxm->rxm_map;
2682 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2683 BUS_DMASYNC_POSTREAD);
2684 bus_dmamap_unload(sc->sc_dmat, map);
2685
2686 m = rxm->rxm_m;
2687 rxm->rxm_m = NULL;
2688
2689 KASSERT(m != NULL);
2690
2691 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
2692 m->m_len = len;
2693 m->m_pkthdr.len = 0;
2694
2695 m->m_next = NULL;
2696 *rxr->rxr_m_tail = m;
2697 rxr->rxr_m_tail = &m->m_next;
2698
2699 m = rxr->rxr_m_head;
2700 m->m_pkthdr.len += len;
2701
2702 if (ISSET(word, IXL_RX_DESC_EOP)) {
2703 word0 = le64toh(rxd->qword0);
2704
2705 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
2706 vlan_set_tag(m,
2707 __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK));
2708 }
2709
2710 if ((ifp->if_capenable & IAVF_IFCAP_RXCSUM) != 0)
2711 iavf_rx_csum(m, word);
2712
2713 if (!ISSET(word,
2714 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
2715 m_set_rcvif(m, ifp);
2716 if_statinc_ref(nsr, if_ipackets);
2717 if_statadd_ref(nsr, if_ibytes,
2718 m->m_pkthdr.len);
2719 if_percpuq_enqueue(sc->sc_ipq, m);
2720 } else {
2721 if_statinc_ref(nsr, if_ierrors);
2722 m_freem(m);
2723 }
2724
2725 rxr->rxr_m_head = NULL;
2726 rxr->rxr_m_tail = &rxr->rxr_m_head;
2727 }
2728
2729 cons++;
2730 cons &= mask;
2731
2732 done = 1;
2733 } while (cons != prod);
2734
2735 if (done) {
2736 ecnt->ev_count++;
2737 rxr->rxr_cons = cons;
2738 if (iavf_rxfill(sc, rxr) == -1)
2739 if_statinc_ref(nsr, if_iqdrops);
2740 }
2741
2742 IF_STAT_PUTREF(ifp);
2743
2744 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2745 0, IXL_DMA_LEN(&rxr->rxr_mem),
2746 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2747
2748 return more;
2749 }
2750
2751 static void
2752 iavf_rxr_clean(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
2753 {
2754 struct iavf_rx_map *maps, *rxm;
2755 bus_dmamap_t map;
2756 unsigned int i;
2757
2758 KASSERT(mutex_owned(&rxr->rxr_lock));
2759
2760 maps = rxr->rxr_maps;
2761 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2762 rxm = &maps[i];
2763
2764 if (rxm->rxm_m == NULL)
2765 continue;
2766
2767 map = rxm->rxm_map;
2768 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2769 BUS_DMASYNC_POSTWRITE);
2770 bus_dmamap_unload(sc->sc_dmat, map);
2771
2772 m_freem(rxm->rxm_m);
2773 rxm->rxm_m = NULL;
2774 }
2775
2776 m_freem(rxr->rxr_m_head);
2777 rxr->rxr_m_head = NULL;
2778 rxr->rxr_m_tail = &rxr->rxr_m_head;
2779
2780 memset(IXL_DMA_KVA(&rxr->rxr_mem), 0, IXL_DMA_LEN(&rxr->rxr_mem));
2781 rxr->rxr_prod = rxr->rxr_cons = 0;
2782 }
2783
2784 static int
2785 iavf_txeof(struct iavf_softc *sc, struct iavf_tx_ring *txr, u_int txlimit,
2786 struct evcnt *ecnt)
2787 {
2788 struct ifnet *ifp = &sc->sc_ec.ec_if;
2789 struct ixl_tx_desc *ring, *txd;
2790 struct iavf_tx_map *txm;
2791 struct mbuf *m;
2792 bus_dmamap_t map;
2793 unsigned int cons, prod, last;
2794 unsigned int mask;
2795 uint64_t dtype;
2796 int done = 0, more = 0;
2797
2798 KASSERT(mutex_owned(&txr->txr_lock));
2799
2800 prod = txr->txr_prod;
2801 cons = txr->txr_cons;
2802
2803 if (cons == prod)
2804 return 0;
2805
2806 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2807 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2808
2809 ring = IXL_DMA_KVA(&txr->txr_mem);
2810 mask = sc->sc_tx_ring_ndescs - 1;
2811
2812 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2813
2814 do {
2815 if (txlimit-- <= 0) {
2816 more = 1;
2817 break;
2818 }
2819
2820 txm = &txr->txr_maps[cons];
2821 last = txm->txm_eop;
2822 txd = &ring[last];
2823
2824 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2825 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2826 break;
2827
2828 map = txm->txm_map;
2829
2830 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2831 BUS_DMASYNC_POSTWRITE);
2832 bus_dmamap_unload(sc->sc_dmat, map);
2833
2834 m = txm->txm_m;
2835 if (m != NULL) {
2836 if_statinc_ref(nsr, if_opackets);
2837 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
2838 if (ISSET(m->m_flags, M_MCAST))
2839 if_statinc_ref(nsr, if_omcasts);
2840 m_freem(m);
2841 }
2842
2843 txm->txm_m = NULL;
2844 txm->txm_eop = -1;
2845
2846 cons = last + 1;
2847 cons &= mask;
2848 done = 1;
2849 } while (cons != prod);
2850
2851 IF_STAT_PUTREF(ifp);
2852
2853 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2854 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2855
2856 txr->txr_cons = cons;
2857
2858 if (done) {
2859 ecnt->ev_count++;
2860 softint_schedule(txr->txr_si);
2861 if (txr->txr_qid == 0) {
2862 CLR(ifp->if_flags, IFF_OACTIVE);
2863 if_schedule_deferred_start(ifp);
2864 }
2865 }
2866
2867 if (txr->txr_cons == txr->txr_prod) {
2868 txr->txr_watchdog = IAVF_WATCHDOG_STOP;
2869 }
2870
2871 return more;
2872 }
2873
2874 static inline int
2875 iavf_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0,
2876 struct iavf_tx_ring *txr)
2877 {
2878 struct mbuf *m;
2879 int error;
2880
2881 KASSERT(mutex_owned(&txr->txr_lock));
2882
2883 m = *m0;
2884
2885 error = bus_dmamap_load_mbuf(dmat, map, m,
2886 BUS_DMA_STREAMING|BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2887 if (error != EFBIG)
2888 return error;
2889
2890 m = m_defrag(m, M_DONTWAIT);
2891 if (m != NULL) {
2892 *m0 = m;
2893 txr->txr_defragged.ev_count++;
2894 error = bus_dmamap_load_mbuf(dmat, map, m,
2895 BUS_DMA_STREAMING|BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2896 } else {
2897 txr->txr_defrag_failed.ev_count++;
2898 error = ENOBUFS;
2899 }
2900
2901 return error;
2902 }
2903
2904 static inline int
2905 iavf_tx_setup_offloads(struct mbuf *m, uint64_t *cmd_txd)
2906 {
2907 struct ether_header *eh;
2908 size_t len;
2909 uint64_t cmd;
2910
2911 cmd = 0;
2912
2913 eh = mtod(m, struct ether_header *);
2914 switch (htons(eh->ether_type)) {
2915 case ETHERTYPE_IP:
2916 case ETHERTYPE_IPV6:
2917 len = ETHER_HDR_LEN;
2918 break;
2919 case ETHERTYPE_VLAN:
2920 len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2921 break;
2922 default:
2923 len = 0;
2924 }
2925 cmd |= ((len >> 1) << IXL_TX_DESC_MACLEN_SHIFT);
2926
2927 if (m->m_pkthdr.csum_flags &
2928 (M_CSUM_TSOv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
2929 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4;
2930 }
2931 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2932 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4_CSUM;
2933 }
2934
2935 if (m->m_pkthdr.csum_flags &
2936 (M_CSUM_TSOv6 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
2937 cmd |= IXL_TX_DESC_CMD_IIPT_IPV6;
2938 }
2939
2940 switch (cmd & IXL_TX_DESC_CMD_IIPT_MASK) {
2941 case IXL_TX_DESC_CMD_IIPT_IPV4:
2942 case IXL_TX_DESC_CMD_IIPT_IPV4_CSUM:
2943 len = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2944 break;
2945 case IXL_TX_DESC_CMD_IIPT_IPV6:
2946 len = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
2947 break;
2948 default:
2949 len = 0;
2950 }
2951 cmd |= ((len >> 2) << IXL_TX_DESC_IPLEN_SHIFT);
2952
2953 if (m->m_pkthdr.csum_flags &
2954 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_TCPv4 | M_CSUM_TCPv6)) {
2955 len = sizeof(struct tcphdr);
2956 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_TCP;
2957 } else if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) {
2958 len = sizeof(struct udphdr);
2959 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_UDP;
2960 } else {
2961 len = 0;
2962 }
2963 cmd |= ((len >> 2) << IXL_TX_DESC_L4LEN_SHIFT);
2964
2965 *cmd_txd |= cmd;
2966 return 0;
2967 }
2968
2969 static void
2970 iavf_tx_common_locked(struct ifnet *ifp, struct iavf_tx_ring *txr,
2971 bool is_transmit)
2972 {
2973 struct iavf_softc *sc;
2974 struct ixl_tx_desc *ring, *txd;
2975 struct iavf_tx_map *txm;
2976 bus_dmamap_t map;
2977 struct mbuf *m;
2978 unsigned int prod, free, last, i;
2979 unsigned int mask;
2980 uint64_t cmd, cmd_txd;
2981 int post = 0;
2982
2983 KASSERT(mutex_owned(&txr->txr_lock));
2984
2985 sc = ifp->if_softc;
2986
2987 if (!ISSET(ifp->if_flags, IFF_RUNNING)
2988 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) {
2989 if (!is_transmit)
2990 IFQ_PURGE(&ifp->if_snd);
2991 return;
2992 }
2993
2994 prod = txr->txr_prod;
2995 free = txr->txr_cons;
2996
2997 if (free <= prod)
2998 free += sc->sc_tx_ring_ndescs;
2999 free -= prod;
3000
3001 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
3002 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
3003
3004 ring = IXL_DMA_KVA(&txr->txr_mem);
3005 mask = sc->sc_tx_ring_ndescs - 1;
3006 last = prod;
3007 cmd = 0;
3008 txd = NULL;
3009
3010 for (;;) {
3011 if (free < IAVF_TX_PKT_DESCS) {
3012 if (!is_transmit)
3013 SET(ifp->if_flags, IFF_OACTIVE);
3014 break;
3015 }
3016
3017 if (is_transmit)
3018 m = pcq_get(txr->txr_intrq);
3019 else
3020 IFQ_DEQUEUE(&ifp->if_snd, m);
3021
3022 if (m == NULL)
3023 break;
3024
3025 txm = &txr->txr_maps[prod];
3026 map = txm->txm_map;
3027
3028 if (iavf_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
3029 if_statinc(ifp, if_oerrors);
3030 m_freem(m);
3031 continue;
3032 }
3033
3034 cmd_txd = 0;
3035 if (m->m_pkthdr.csum_flags & IAVF_CSUM_ALL_OFFLOAD) {
3036 iavf_tx_setup_offloads(m, &cmd_txd);
3037 }
3038 if (vlan_has_tag(m)) {
3039 cmd_txd |= IXL_TX_DESC_CMD_IL2TAG1 |
3040 ((uint64_t)vlan_get_tag(m)
3041 << IXL_TX_DESC_L2TAG1_SHIFT);
3042 }
3043
3044 bus_dmamap_sync(sc->sc_dmat, map, 0,
3045 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
3046
3047 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) {
3048 txd = &ring[prod];
3049
3050 cmd = (uint64_t)map->dm_segs[i].ds_len <<
3051 IXL_TX_DESC_BSIZE_SHIFT;
3052 cmd |= IXL_TX_DESC_DTYPE_DATA|IXL_TX_DESC_CMD_ICRC|
3053 cmd_txd;
3054
3055 txd->addr = htole64(map->dm_segs[i].ds_addr);
3056 txd->cmd = htole64(cmd);
3057
3058 last = prod;
3059 prod++;
3060 prod &= mask;
3061 }
3062
3063 cmd |= IXL_TX_DESC_CMD_EOP|IXL_TX_DESC_CMD_RS;
3064 txd->cmd = htole64(cmd);
3065 txm->txm_m = m;
3066 txm->txm_eop = last;
3067
3068 bpf_mtap(ifp, m, BPF_D_OUT);
3069 free -= i;
3070 post = 1;
3071 }
3072
3073 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
3074 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
3075
3076 if (post) {
3077 txr->txr_prod = prod;
3078 iavf_wr(sc, txr->txr_tail, prod);
3079 txr->txr_watchdog = IAVF_WATCHDOG_TICKS;
3080 }
3081 }
3082
3083 static inline int
3084 iavf_handle_queue_common(struct iavf_softc *sc, struct iavf_queue_pair *qp,
3085 u_int txlimit, struct evcnt *txevcnt,
3086 u_int rxlimit, struct evcnt *rxevcnt)
3087 {
3088 struct iavf_tx_ring *txr;
3089 struct iavf_rx_ring *rxr;
3090 int txmore, rxmore;
3091 int rv;
3092
3093 txr = qp->qp_txr;
3094 rxr = qp->qp_rxr;
3095
3096 mutex_enter(&txr->txr_lock);
3097 txmore = iavf_txeof(sc, txr, txlimit, txevcnt);
3098 mutex_exit(&txr->txr_lock);
3099
3100 mutex_enter(&rxr->rxr_lock);
3101 rxmore = iavf_rxeof(sc, rxr, rxlimit, rxevcnt);
3102 mutex_exit(&rxr->rxr_lock);
3103
3104 rv = txmore | (rxmore << 1);
3105
3106 return rv;
3107 }
3108
3109 static void
3110 iavf_sched_handle_queue(struct iavf_softc *sc, struct iavf_queue_pair *qp)
3111 {
3112
3113 if (qp->qp_workqueue)
3114 workqueue_enqueue(sc->sc_workq_txrx, &qp->qp_work, NULL);
3115 else
3116 softint_schedule(qp->qp_si);
3117 }
3118
3119 static void
3120 iavf_start(struct ifnet *ifp)
3121 {
3122 struct iavf_softc *sc;
3123 struct iavf_tx_ring *txr;
3124
3125 sc = ifp->if_softc;
3126 txr = sc->sc_qps[0].qp_txr;
3127
3128 mutex_enter(&txr->txr_lock);
3129 iavf_tx_common_locked(ifp, txr, false);
3130 mutex_exit(&txr->txr_lock);
3131
3132 }
3133
3134 static inline unsigned int
3135 iavf_select_txqueue(struct iavf_softc *sc, struct mbuf *m)
3136 {
3137 u_int cpuid;
3138
3139 cpuid = cpu_index(curcpu());
3140
3141 return (unsigned int)(cpuid % sc->sc_nqueue_pairs);
3142 }
3143
3144 static int
3145 iavf_transmit(struct ifnet *ifp, struct mbuf *m)
3146 {
3147 struct iavf_softc *sc;
3148 struct iavf_tx_ring *txr;
3149 unsigned int qid;
3150
3151 sc = ifp->if_softc;
3152 qid = iavf_select_txqueue(sc, m);
3153
3154 txr = sc->sc_qps[qid].qp_txr;
3155
3156 if (__predict_false(!pcq_put(txr->txr_intrq, m))) {
3157 mutex_enter(&txr->txr_lock);
3158 txr->txr_pcqdrop.ev_count++;
3159 mutex_exit(&txr->txr_lock);
3160
3161 m_freem(m);
3162 return ENOBUFS;
3163 }
3164
3165 if (mutex_tryenter(&txr->txr_lock)) {
3166 iavf_tx_common_locked(ifp, txr, true);
3167 mutex_exit(&txr->txr_lock);
3168 } else {
3169 kpreempt_disable();
3170 softint_schedule(txr->txr_si);
3171 kpreempt_enable();
3172 }
3173 return 0;
3174 }
3175
3176 static void
3177 iavf_deferred_transmit(void *xtxr)
3178 {
3179 struct iavf_tx_ring *txr;
3180 struct iavf_softc *sc;
3181 struct ifnet *ifp;
3182
3183 txr = xtxr;
3184 sc = txr->txr_sc;
3185 ifp = &sc->sc_ec.ec_if;
3186
3187 mutex_enter(&txr->txr_lock);
3188 txr->txr_transmitdef.ev_count++;
3189 if (pcq_peek(txr->txr_intrq) != NULL)
3190 iavf_tx_common_locked(ifp, txr, true);
3191 mutex_exit(&txr->txr_lock);
3192 }
3193
3194 static void
3195 iavf_txr_clean(struct iavf_softc *sc, struct iavf_tx_ring *txr)
3196 {
3197 struct iavf_tx_map *maps, *txm;
3198 bus_dmamap_t map;
3199 unsigned int i;
3200
3201 KASSERT(mutex_owned(&txr->txr_lock));
3202
3203 maps = txr->txr_maps;
3204 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
3205 txm = &maps[i];
3206
3207 if (txm->txm_m == NULL)
3208 continue;
3209
3210 map = txm->txm_map;
3211 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3212 BUS_DMASYNC_POSTWRITE);
3213 bus_dmamap_unload(sc->sc_dmat, map);
3214
3215 m_freem(txm->txm_m);
3216 txm->txm_m = NULL;
3217 }
3218
3219 memset(IXL_DMA_KVA(&txr->txr_mem), 0, IXL_DMA_LEN(&txr->txr_mem));
3220 txr->txr_prod = txr->txr_cons = 0;
3221 }
3222
3223 static int
3224 iavf_intr(void *xsc)
3225 {
3226 struct iavf_softc *sc = xsc;
3227 struct ifnet *ifp = &sc->sc_ec.ec_if;
3228 struct iavf_rx_ring *rxr;
3229 struct iavf_tx_ring *txr;
3230 uint32_t icr;
3231 unsigned int i;
3232
3233 /* read I40E_VFINT_ICR_ENA1 to clear status */
3234 (void)iavf_rd(sc, I40E_VFINT_ICR0_ENA1);
3235
3236 iavf_intr_enable(sc);
3237 icr = iavf_rd(sc, I40E_VFINT_ICR01);
3238
3239 if (icr == IAVF_REG_VFR) {
3240 log(LOG_INFO, "%s: VF reset in progress\n",
3241 ifp->if_xname);
3242 iavf_work_set(&sc->sc_reset_task, iavf_reset_start, sc);
3243 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
3244 return 1;
3245 }
3246
3247 if (ISSET(icr, I40E_VFINT_ICR01_ADMINQ_MASK)) {
3248 mutex_enter(&sc->sc_adminq_lock);
3249 iavf_atq_done(sc);
3250 iavf_arq(sc);
3251 mutex_exit(&sc->sc_adminq_lock);
3252 }
3253
3254 if (ISSET(icr, I40E_VFINT_ICR01_QUEUE_0_MASK)) {
3255 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3256 rxr = sc->sc_qps[i].qp_rxr;
3257 txr = sc->sc_qps[i].qp_txr;
3258
3259 mutex_enter(&rxr->rxr_lock);
3260 while (iavf_rxeof(sc, rxr, UINT_MAX,
3261 &rxr->rxr_intr) != 0) {
3262 /* do nothing */
3263 }
3264 mutex_exit(&rxr->rxr_lock);
3265
3266 mutex_enter(&txr->txr_lock);
3267 while (iavf_txeof(sc, txr, UINT_MAX,
3268 &txr->txr_intr) != 0) {
3269 /* do nothing */
3270 }
3271 mutex_exit(&txr->txr_lock);
3272 }
3273 }
3274
3275 return 0;
3276 }
3277
3278 static int
3279 iavf_queue_intr(void *xqp)
3280 {
3281 struct iavf_queue_pair *qp = xqp;
3282 struct iavf_tx_ring *txr;
3283 struct iavf_rx_ring *rxr;
3284 struct iavf_softc *sc;
3285 unsigned int qid;
3286 u_int txlimit, rxlimit;
3287 int more;
3288
3289 txr = qp->qp_txr;
3290 rxr = qp->qp_rxr;
3291 sc = txr->txr_sc;
3292 qid = txr->txr_qid;
3293
3294 txlimit = sc->sc_tx_intr_process_limit;
3295 rxlimit = sc->sc_rx_intr_process_limit;
3296 qp->qp_workqueue = sc->sc_txrx_workqueue;
3297
3298 more = iavf_handle_queue_common(sc, qp,
3299 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr);
3300
3301 if (more != 0) {
3302 iavf_sched_handle_queue(sc, qp);
3303 } else {
3304 /* for ALTQ */
3305 if (txr->txr_qid == 0)
3306 if_schedule_deferred_start(&sc->sc_ec.ec_if);
3307 softint_schedule(txr->txr_si);
3308
3309 iavf_queue_intr_enable(sc, qid);
3310 }
3311
3312 return 0;
3313 }
3314
3315 static void
3316 iavf_handle_queue_wk(struct work *wk, void *xsc __unused)
3317 {
3318 struct iavf_queue_pair *qp;
3319
3320 qp = container_of(wk, struct iavf_queue_pair, qp_work);
3321 iavf_handle_queue(qp);
3322 }
3323
3324 static void
3325 iavf_handle_queue(void *xqp)
3326 {
3327 struct iavf_queue_pair *qp = xqp;
3328 struct iavf_tx_ring *txr;
3329 struct iavf_rx_ring *rxr;
3330 struct iavf_softc *sc;
3331 unsigned int qid;
3332 u_int txlimit, rxlimit;
3333 int more;
3334
3335 txr = qp->qp_txr;
3336 rxr = qp->qp_rxr;
3337 sc = txr->txr_sc;
3338 qid = txr->txr_qid;
3339
3340 txlimit = sc->sc_tx_process_limit;
3341 rxlimit = sc->sc_rx_process_limit;
3342
3343 more = iavf_handle_queue_common(sc, qp,
3344 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer);
3345
3346 if (more != 0)
3347 iavf_sched_handle_queue(sc, qp);
3348 else
3349 iavf_queue_intr_enable(sc, qid);
3350 }
3351
3352 static void
3353 iavf_tick(void *xsc)
3354 {
3355 struct iavf_softc *sc;
3356 unsigned int i;
3357 int timedout;
3358
3359 sc = xsc;
3360 timedout = 0;
3361
3362 mutex_enter(&sc->sc_cfg_lock);
3363
3364 if (sc->sc_resetting) {
3365 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
3366 mutex_exit(&sc->sc_cfg_lock);
3367 return;
3368 }
3369
3370 iavf_get_stats(sc);
3371
3372 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3373 timedout |= iavf_watchdog(sc->sc_qps[i].qp_txr);
3374 }
3375
3376 if (timedout != 0) {
3377 iavf_work_add(sc->sc_workq, &sc->sc_wdto_task);
3378 } else {
3379 callout_schedule(&sc->sc_tick, IAVF_TICK_INTERVAL);
3380 }
3381
3382 mutex_exit(&sc->sc_cfg_lock);
3383 }
3384
3385 static void
3386 iavf_tick_halt(void *unused __unused)
3387 {
3388
3389 /* do nothing */
3390 }
3391
3392 static void
3393 iavf_reset_request(void *xsc)
3394 {
3395 struct iavf_softc *sc = xsc;
3396
3397 iavf_reset_vf(sc);
3398 iavf_reset_start(sc);
3399 }
3400
3401 static void
3402 iavf_reset_start(void *xsc)
3403 {
3404 struct iavf_softc *sc = xsc;
3405 struct ifnet *ifp = &sc->sc_ec.ec_if;
3406
3407 mutex_enter(&sc->sc_cfg_lock);
3408
3409 if (sc->sc_resetting)
3410 goto do_reset;
3411
3412 sc->sc_resetting = true;
3413 if_link_state_change(ifp, LINK_STATE_DOWN);
3414
3415 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
3416 iavf_stop_locked(sc);
3417 sc->sc_reset_up = true;
3418 }
3419
3420 memcpy(sc->sc_enaddr_reset, sc->sc_enaddr, ETHER_ADDR_LEN);
3421
3422 do_reset:
3423 iavf_work_set(&sc->sc_reset_task, iavf_reset, sc);
3424
3425 mutex_exit(&sc->sc_cfg_lock);
3426
3427 iavf_reset((void *)sc);
3428 }
3429
3430 static void
3431 iavf_reset(void *xsc)
3432 {
3433 struct iavf_softc *sc = xsc;
3434 struct ifnet *ifp = &sc->sc_ec.ec_if;
3435 struct ixl_aq_buf *aqb;
3436 bool realloc_qps, realloc_intrs;
3437
3438 mutex_enter(&sc->sc_cfg_lock);
3439
3440 mutex_enter(&sc->sc_adminq_lock);
3441 iavf_cleanup_admin_queue(sc);
3442 mutex_exit(&sc->sc_adminq_lock);
3443
3444 sc->sc_major_ver = UINT_MAX;
3445 sc->sc_minor_ver = UINT_MAX;
3446 sc->sc_got_vf_resources = 0;
3447 sc->sc_got_irq_map = 0;
3448
3449 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
3450 if (aqb == NULL)
3451 goto failed;
3452
3453 if (iavf_wait_active(sc) != 0) {
3454 log(LOG_WARNING, "%s: VF reset timed out\n",
3455 ifp->if_xname);
3456 goto failed;
3457 }
3458
3459 if (!iavf_arq_fill(sc)) {
3460 log(LOG_ERR, "%s: unable to fill arq descriptors\n",
3461 ifp->if_xname);
3462 goto failed;
3463 }
3464
3465 if (iavf_init_admin_queue(sc) != 0) {
3466 log(LOG_ERR, "%s: unable to initialize admin queue\n",
3467 ifp->if_xname);
3468 goto failed;
3469 }
3470
3471 if (iavf_get_version(sc, aqb) != 0) {
3472 log(LOG_ERR, "%s: unable to get VF interface version\n",
3473 ifp->if_xname);
3474 goto failed;
3475 }
3476
3477 if (iavf_get_vf_resources(sc, aqb) != 0) {
3478 log(LOG_ERR, "%s: timed out waiting for VF resources\n",
3479 ifp->if_xname);
3480 goto failed;
3481 }
3482
3483 if (sc->sc_nqps_alloc < iavf_calc_queue_pair_size(sc)) {
3484 realloc_qps = true;
3485 } else {
3486 realloc_qps = false;
3487 }
3488
3489 if (sc->sc_nintrs < iavf_calc_msix_count(sc)) {
3490 realloc_intrs = true;
3491 } else {
3492 realloc_intrs = false;
3493 }
3494
3495 if (realloc_qps || realloc_intrs)
3496 iavf_teardown_interrupts(sc);
3497
3498 if (realloc_qps) {
3499 iavf_queue_pairs_free(sc);
3500 if (iavf_queue_pairs_alloc(sc) != 0) {
3501 log(LOG_ERR, "%s: failed to allocate queue pairs\n",
3502 ifp->if_xname);
3503 goto failed;
3504 }
3505 }
3506
3507 if (realloc_qps || realloc_intrs) {
3508 if (iavf_setup_interrupts(sc) != 0) {
3509 sc->sc_nintrs = 0;
3510 log(LOG_ERR, "%s: failed to allocate interrupts\n",
3511 ifp->if_xname);
3512 goto failed;
3513 }
3514 log(LOG_INFO, "%s: reallocated queues\n", ifp->if_xname);
3515 }
3516
3517 if (iavf_config_irq_map(sc, aqb) != 0) {
3518 log(LOG_ERR, "%s: timed out configuring IRQ map\n",
3519 ifp->if_xname);
3520 goto failed;
3521 }
3522
3523 mutex_enter(&sc->sc_adminq_lock);
3524 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
3525 mutex_exit(&sc->sc_adminq_lock);
3526
3527 iavf_reset_finish(sc);
3528
3529 mutex_exit(&sc->sc_cfg_lock);
3530 return;
3531
3532 failed:
3533 mutex_enter(&sc->sc_adminq_lock);
3534 iavf_cleanup_admin_queue(sc);
3535 if (aqb != NULL) {
3536 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
3537 }
3538 mutex_exit(&sc->sc_adminq_lock);
3539 callout_schedule(&sc->sc_tick, IAVF_TICK_INTERVAL);
3540 mutex_exit(&sc->sc_cfg_lock);
3541 }
3542
3543 static void
3544 iavf_reset_finish(struct iavf_softc *sc)
3545 {
3546 struct ethercom *ec = &sc->sc_ec;
3547 struct ether_multi *enm;
3548 struct ether_multistep step;
3549 struct ifnet *ifp = &ec->ec_if;
3550 struct vlanid_list *vlanidp;
3551 uint8_t enaddr_prev[ETHER_ADDR_LEN], enaddr_next[ETHER_ADDR_LEN];
3552
3553 KASSERT(mutex_owned(&sc->sc_cfg_lock));
3554
3555 callout_stop(&sc->sc_tick);
3556
3557 iavf_intr_enable(sc);
3558
3559 if (!iavf_is_etheranyaddr(sc->sc_enaddr_added)) {
3560 iavf_eth_addr(sc, sc->sc_enaddr_added, IAVF_VC_OP_ADD_ETH_ADDR);
3561 }
3562
3563 ETHER_LOCK(ec);
3564 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
3565 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
3566 ETHER_NEXT_MULTI(step, enm)) {
3567 iavf_add_multi(sc, enm->enm_addrlo, enm->enm_addrhi);
3568 }
3569 }
3570
3571 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
3572 ETHER_UNLOCK(ec);
3573 iavf_config_vlan_id(sc, vlanidp->vid, IAVF_VC_OP_ADD_VLAN);
3574 ETHER_LOCK(ec);
3575 }
3576 ETHER_UNLOCK(ec);
3577
3578 if (memcmp(sc->sc_enaddr, sc->sc_enaddr_reset, ETHER_ADDR_LEN) != 0) {
3579 memcpy(enaddr_prev, sc->sc_enaddr_reset, sizeof(enaddr_prev));
3580 memcpy(enaddr_next, sc->sc_enaddr, sizeof(enaddr_next));
3581 log(LOG_INFO, "%s: Ethernet address changed to %s\n",
3582 ifp->if_xname, ether_sprintf(enaddr_next));
3583
3584 mutex_exit(&sc->sc_cfg_lock);
3585 IFNET_LOCK(ifp);
3586 kpreempt_disable();
3587 /*XXX we need an API to change ethernet address. */
3588 iavf_replace_lla(ifp, enaddr_prev, enaddr_next);
3589 kpreempt_enable();
3590 IFNET_UNLOCK(ifp);
3591 mutex_enter(&sc->sc_cfg_lock);
3592 }
3593
3594 sc->sc_resetting = false;
3595
3596 if (sc->sc_reset_up) {
3597 iavf_init_locked(sc);
3598 }
3599
3600 if (sc->sc_link_state != LINK_STATE_DOWN) {
3601 if_link_state_change(ifp, sc->sc_link_state);
3602 }
3603
3604 }
3605
3606 static int
3607 iavf_dmamem_alloc(bus_dma_tag_t dmat, struct ixl_dmamem *ixm,
3608 bus_size_t size, bus_size_t align)
3609 {
3610 ixm->ixm_size = size;
3611
3612 if (bus_dmamap_create(dmat, ixm->ixm_size, 1,
3613 ixm->ixm_size, 0,
3614 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
3615 &ixm->ixm_map) != 0)
3616 return 1;
3617 if (bus_dmamem_alloc(dmat, ixm->ixm_size,
3618 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
3619 BUS_DMA_WAITOK) != 0)
3620 goto destroy;
3621 if (bus_dmamem_map(dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
3622 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
3623 goto free;
3624 if (bus_dmamap_load(dmat, ixm->ixm_map, ixm->ixm_kva,
3625 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
3626 goto unmap;
3627
3628 memset(ixm->ixm_kva, 0, ixm->ixm_size);
3629
3630 return 0;
3631 unmap:
3632 bus_dmamem_unmap(dmat, ixm->ixm_kva, ixm->ixm_size);
3633 free:
3634 bus_dmamem_free(dmat, &ixm->ixm_seg, 1);
3635 destroy:
3636 bus_dmamap_destroy(dmat, ixm->ixm_map);
3637 return 1;
3638 }
3639
3640 static void
3641 iavf_dmamem_free(bus_dma_tag_t dmat, struct ixl_dmamem *ixm)
3642 {
3643
3644 bus_dmamap_unload(dmat, ixm->ixm_map);
3645 bus_dmamem_unmap(dmat, ixm->ixm_kva, ixm->ixm_size);
3646 bus_dmamem_free(dmat, &ixm->ixm_seg, 1);
3647 bus_dmamap_destroy(dmat, ixm->ixm_map);
3648 }
3649
3650 static struct ixl_aq_buf *
3651 iavf_aqb_alloc(bus_dma_tag_t dmat, size_t buflen)
3652 {
3653 struct ixl_aq_buf *aqb;
3654
3655 aqb = kmem_alloc(sizeof(*aqb), KM_NOSLEEP);
3656 if (aqb == NULL)
3657 return NULL;
3658
3659 aqb->aqb_size = buflen;
3660
3661 if (bus_dmamap_create(dmat, aqb->aqb_size, 1,
3662 aqb->aqb_size, 0,
3663 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0)
3664 goto free;
3665 if (bus_dmamem_alloc(dmat, aqb->aqb_size,
3666 IAVF_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs,
3667 BUS_DMA_WAITOK) != 0)
3668 goto destroy;
3669 if (bus_dmamem_map(dmat, &aqb->aqb_seg, aqb->aqb_nsegs,
3670 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0)
3671 goto dma_free;
3672 if (bus_dmamap_load(dmat, aqb->aqb_map, aqb->aqb_data,
3673 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0)
3674 goto unmap;
3675
3676 return aqb;
3677 unmap:
3678 bus_dmamem_unmap(dmat, aqb->aqb_data, aqb->aqb_size);
3679 dma_free:
3680 bus_dmamem_free(dmat, &aqb->aqb_seg, 1);
3681 destroy:
3682 bus_dmamap_destroy(dmat, aqb->aqb_map);
3683 free:
3684 kmem_free(aqb, sizeof(*aqb));
3685
3686 return NULL;
3687 }
3688
3689 static void
3690 iavf_aqb_free(bus_dma_tag_t dmat, struct ixl_aq_buf *aqb)
3691 {
3692
3693 bus_dmamap_unload(dmat, aqb->aqb_map);
3694 bus_dmamem_unmap(dmat, aqb->aqb_data, aqb->aqb_size);
3695 bus_dmamem_free(dmat, &aqb->aqb_seg, 1);
3696 bus_dmamap_destroy(dmat, aqb->aqb_map);
3697 kmem_free(aqb, sizeof(*aqb));
3698 }
3699
3700 static struct ixl_aq_buf *
3701 iavf_aqb_get_locked(struct ixl_aq_bufs *q)
3702 {
3703 struct ixl_aq_buf *aqb;
3704
3705 aqb = SIMPLEQ_FIRST(q);
3706 if (aqb != NULL) {
3707 SIMPLEQ_REMOVE(q, aqb, ixl_aq_buf, aqb_entry);
3708 }
3709
3710 return aqb;
3711 }
3712
3713 static struct ixl_aq_buf *
3714 iavf_aqb_get(struct iavf_softc *sc, struct ixl_aq_bufs *q)
3715 {
3716 struct ixl_aq_buf *aqb;
3717
3718 if (q != NULL) {
3719 mutex_enter(&sc->sc_adminq_lock);
3720 aqb = iavf_aqb_get_locked(q);
3721 mutex_exit(&sc->sc_adminq_lock);
3722 } else {
3723 aqb = NULL;
3724 }
3725
3726 if (aqb == NULL) {
3727 aqb = iavf_aqb_alloc(sc->sc_dmat, IAVF_AQ_BUFLEN);
3728 }
3729
3730 return aqb;
3731 }
3732
3733 static void
3734 iavf_aqb_put_locked(struct ixl_aq_bufs *q, struct ixl_aq_buf *aqb)
3735 {
3736
3737 SIMPLEQ_INSERT_TAIL(q, aqb, aqb_entry);
3738 }
3739
3740 static void
3741 iavf_aqb_clean(struct ixl_aq_bufs *q, bus_dma_tag_t dmat)
3742 {
3743 struct ixl_aq_buf *aqb;
3744
3745 while ((aqb = SIMPLEQ_FIRST(q)) != NULL) {
3746 SIMPLEQ_REMOVE(q, aqb, ixl_aq_buf, aqb_entry);
3747 iavf_aqb_free(dmat, aqb);
3748 }
3749 }
3750
3751 static const char *
3752 iavf_aq_vc_opcode_str(const struct ixl_aq_desc *iaq)
3753 {
3754
3755 switch (iavf_aq_vc_get_opcode(iaq)) {
3756 case IAVF_VC_OP_VERSION:
3757 return "GET_VERSION";
3758 case IAVF_VC_OP_RESET_VF:
3759 return "RESET_VF";
3760 case IAVF_VC_OP_GET_VF_RESOURCES:
3761 return "GET_VF_RESOURCES";
3762 case IAVF_VC_OP_CONFIG_TX_QUEUE:
3763 return "CONFIG_TX_QUEUE";
3764 case IAVF_VC_OP_CONFIG_RX_QUEUE:
3765 return "CONFIG_RX_QUEUE";
3766 case IAVF_VC_OP_CONFIG_VSI_QUEUES:
3767 return "CONFIG_VSI_QUEUES";
3768 case IAVF_VC_OP_CONFIG_IRQ_MAP:
3769 return "CONFIG_IRQ_MAP";
3770 case IAVF_VC_OP_ENABLE_QUEUES:
3771 return "ENABLE_QUEUES";
3772 case IAVF_VC_OP_DISABLE_QUEUES:
3773 return "DISABLE_QUEUES";
3774 case IAVF_VC_OP_ADD_ETH_ADDR:
3775 return "ADD_ETH_ADDR";
3776 case IAVF_VC_OP_DEL_ETH_ADDR:
3777 return "DEL_ETH_ADDR";
3778 case IAVF_VC_OP_CONFIG_PROMISC:
3779 return "CONFIG_PROMISC";
3780 case IAVF_VC_OP_GET_STATS:
3781 return "GET_STATS";
3782 case IAVF_VC_OP_EVENT:
3783 return "EVENT";
3784 case IAVF_VC_OP_CONFIG_RSS_KEY:
3785 return "CONFIG_RSS_KEY";
3786 case IAVF_VC_OP_GET_RSS_HENA_CAPS:
3787 return "GET_RS_HENA_CAPS";
3788 case IAVF_VC_OP_SET_RSS_HENA:
3789 return "SET_RSS_HENA";
3790 case IAVF_VC_OP_ENABLE_VLAN_STRIP:
3791 return "ENABLE_VLAN_STRIPPING";
3792 case IAVF_VC_OP_DISABLE_VLAN_STRIP:
3793 return "DISABLE_VLAN_STRIPPING";
3794 case IAVF_VC_OP_REQUEST_QUEUES:
3795 return "REQUEST_QUEUES";
3796 }
3797
3798 return "unknown";
3799 }
3800
3801 static void
3802 iavf_aq_dump(const struct iavf_softc *sc, const struct ixl_aq_desc *iaq,
3803 const char *msg)
3804 {
3805 char buf[512];
3806 size_t len;
3807
3808 len = sizeof(buf);
3809 buf[--len] = '\0';
3810
3811 device_printf(sc->sc_dev, "%s\n", msg);
3812 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags));
3813 device_printf(sc->sc_dev, "flags %s opcode %04x\n",
3814 buf, le16toh(iaq->iaq_opcode));
3815 device_printf(sc->sc_dev, "datalen %u retval %u\n",
3816 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval));
3817 device_printf(sc->sc_dev, "vc-opcode %u (%s)\n",
3818 iavf_aq_vc_get_opcode(iaq),
3819 iavf_aq_vc_opcode_str(iaq));
3820 device_printf(sc->sc_dev, "vc-retval %u\n",
3821 iavf_aq_vc_get_retval(iaq));
3822 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie);
3823 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n",
3824 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]),
3825 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3]));
3826 }
3827
3828 static int
3829 iavf_arq_fill(struct iavf_softc *sc)
3830 {
3831 struct ixl_aq_buf *aqb;
3832 struct ixl_aq_desc *arq, *iaq;
3833 unsigned int prod = sc->sc_arq_prod;
3834 unsigned int n;
3835 int filled;
3836
3837 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
3838 IAVF_AQ_NUM);
3839
3840 if (__predict_false(n <= 0))
3841 return 0;
3842
3843 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3844 0, IXL_DMA_LEN(&sc->sc_arq),
3845 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3846
3847 arq = IXL_DMA_KVA(&sc->sc_arq);
3848
3849 do {
3850 iaq = &arq[prod];
3851
3852 if (ixl_aq_has_dva(iaq)) {
3853 /* already filled */
3854 break;
3855 }
3856
3857 aqb = iavf_aqb_get_locked(&sc->sc_arq_idle);
3858 if (aqb == NULL)
3859 break;
3860
3861 memset(aqb->aqb_data, 0, aqb->aqb_size);
3862
3863 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
3864 aqb->aqb_size, BUS_DMASYNC_PREREAD);
3865
3866 iaq->iaq_flags = htole16(IXL_AQ_BUF |
3867 (aqb->aqb_size > I40E_AQ_LARGE_BUF ?
3868 IXL_AQ_LB : 0));
3869 iaq->iaq_opcode = 0;
3870 iaq->iaq_datalen = htole16(aqb->aqb_size);
3871 iaq->iaq_retval = 0;
3872 iaq->iaq_cookie = 0;
3873 iaq->iaq_param[0] = 0;
3874 iaq->iaq_param[1] = 0;
3875 ixl_aq_dva(iaq, IXL_AQB_DVA(aqb));
3876 iavf_aqb_put_locked(&sc->sc_arq_live, aqb);
3877
3878 prod++;
3879 prod &= IAVF_AQ_MASK;
3880 filled = 1;
3881 } while (--n);
3882
3883 sc->sc_arq_prod = prod;
3884
3885 if (filled) {
3886 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3887 0, IXL_DMA_LEN(&sc->sc_arq),
3888 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3889 iavf_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
3890 }
3891
3892 return filled;
3893 }
3894
3895 static int
3896 iavf_arq_wait(struct iavf_softc *sc, uint32_t opcode)
3897 {
3898 int error;
3899
3900 KASSERT(mutex_owned(&sc->sc_adminq_lock));
3901
3902 while ((error = cv_timedwait(&sc->sc_adminq_cv,
3903 &sc->sc_adminq_lock, mstohz(IAVF_EXEC_TIMEOUT))) == 0) {
3904 if (opcode == sc->sc_arq_opcode)
3905 break;
3906 }
3907
3908 if (error != 0 &&
3909 atomic_load_relaxed(&sc->sc_debuglevel) >= 2)
3910 device_printf(sc->sc_dev, "cv_timedwait error=%d\n", error);
3911
3912 return error;
3913 }
3914
3915 static void
3916 iavf_arq_refill(void *xsc)
3917 {
3918 struct iavf_softc *sc = xsc;
3919 struct ixl_aq_bufs aqbs;
3920 struct ixl_aq_buf *aqb;
3921 unsigned int n, i;
3922
3923 mutex_enter(&sc->sc_adminq_lock);
3924 iavf_arq_fill(sc);
3925 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
3926 IAVF_AQ_NUM);
3927 mutex_exit(&sc->sc_adminq_lock);
3928
3929 if (n == 0)
3930 return;
3931
3932 if (atomic_load_relaxed(&sc->sc_debuglevel) >= 1)
3933 device_printf(sc->sc_dev, "Allocate %d bufs for arq\n", n);
3934
3935 SIMPLEQ_INIT(&aqbs);
3936 for (i = 0; i < n; i++) {
3937 aqb = iavf_aqb_get(sc, NULL);
3938 if (aqb == NULL)
3939 continue;
3940 SIMPLEQ_INSERT_TAIL(&aqbs, aqb, aqb_entry);
3941 }
3942
3943 mutex_enter(&sc->sc_adminq_lock);
3944 while ((aqb = SIMPLEQ_FIRST(&aqbs)) != NULL) {
3945 SIMPLEQ_REMOVE(&aqbs, aqb, ixl_aq_buf, aqb_entry);
3946 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
3947 }
3948 iavf_arq_fill(sc);
3949 mutex_exit(&sc->sc_adminq_lock);
3950 }
3951
3952 static uint32_t
3953 iavf_process_arq(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
3954 struct ixl_aq_buf *aqb)
3955 {
3956 uint32_t vc_retval, vc_opcode;
3957 int dbg;
3958
3959 dbg = atomic_load_relaxed(&sc->sc_debuglevel);
3960 if (dbg >= 3)
3961 iavf_aq_dump(sc, iaq, "arq proc");
3962
3963 if (dbg >= 2) {
3964 vc_retval = iavf_aq_vc_get_retval(iaq);
3965 if (vc_retval != IAVF_VC_RC_SUCCESS) {
3966 device_printf(sc->sc_dev, "%s failed=%d(arq)\n",
3967 iavf_aq_vc_opcode_str(iaq), vc_retval);
3968 }
3969 }
3970
3971 vc_opcode = iavf_aq_vc_get_opcode(iaq);
3972 switch (vc_opcode) {
3973 case IAVF_VC_OP_VERSION:
3974 iavf_process_version(sc, iaq, aqb);
3975 break;
3976 case IAVF_VC_OP_GET_VF_RESOURCES:
3977 iavf_process_vf_resources(sc, iaq, aqb);
3978 break;
3979 case IAVF_VC_OP_CONFIG_IRQ_MAP:
3980 iavf_process_irq_map(sc, iaq);
3981 break;
3982 case IAVF_VC_OP_EVENT:
3983 iavf_process_vc_event(sc, iaq, aqb);
3984 break;
3985 case IAVF_VC_OP_GET_STATS:
3986 iavf_process_stats(sc, iaq, aqb);
3987 break;
3988 case IAVF_VC_OP_REQUEST_QUEUES:
3989 iavf_process_req_queues(sc, iaq, aqb);
3990 break;
3991 }
3992
3993 return vc_opcode;
3994 }
3995
3996 static int
3997 iavf_arq_poll(struct iavf_softc *sc, uint32_t wait_opcode, int retry)
3998 {
3999 struct ixl_aq_desc *arq, *iaq;
4000 struct ixl_aq_buf *aqb;
4001 unsigned int cons = sc->sc_arq_cons;
4002 unsigned int prod;
4003 uint32_t vc_opcode;
4004 bool received;
4005 int i;
4006
4007 for (i = 0, received = false; i < retry && !received; i++) {
4008 prod = iavf_rd(sc, sc->sc_aq_regs->arq_head);
4009 prod &= sc->sc_aq_regs->arq_head_mask;
4010
4011 if (prod == cons) {
4012 delaymsec(1);
4013 continue;
4014 }
4015
4016 if (prod >= IAVF_AQ_NUM) {
4017 return EIO;
4018 }
4019
4020 arq = IXL_DMA_KVA(&sc->sc_arq);
4021
4022 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
4023 0, IXL_DMA_LEN(&sc->sc_arq),
4024 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
4025
4026 do {
4027 iaq = &arq[cons];
4028 aqb = iavf_aqb_get_locked(&sc->sc_arq_live);
4029 KASSERT(aqb != NULL);
4030
4031 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
4032 IAVF_AQ_BUFLEN, BUS_DMASYNC_POSTREAD);
4033
4034 vc_opcode = iavf_process_arq(sc, iaq, aqb);
4035
4036 if (vc_opcode == wait_opcode)
4037 received = true;
4038
4039 memset(iaq, 0, sizeof(*iaq));
4040 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
4041
4042 cons++;
4043 cons &= IAVF_AQ_MASK;
4044
4045 } while (cons != prod);
4046
4047 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
4048 0, IXL_DMA_LEN(&sc->sc_arq),
4049 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4050
4051 sc->sc_arq_cons = cons;
4052 iavf_arq_fill(sc);
4053
4054 }
4055
4056 if (!received)
4057 return ETIMEDOUT;
4058
4059 return 0;
4060 }
4061
4062 static int
4063 iavf_arq(struct iavf_softc *sc)
4064 {
4065 struct ixl_aq_desc *arq, *iaq;
4066 struct ixl_aq_buf *aqb;
4067 unsigned int cons = sc->sc_arq_cons;
4068 unsigned int prod;
4069 uint32_t vc_opcode;
4070
4071 KASSERT(mutex_owned(&sc->sc_adminq_lock));
4072
4073 prod = iavf_rd(sc, sc->sc_aq_regs->arq_head);
4074 prod &= sc->sc_aq_regs->arq_head_mask;
4075
4076 /* broken value at resetting */
4077 if (prod >= IAVF_AQ_NUM) {
4078 iavf_work_set(&sc->sc_reset_task, iavf_reset_start, sc);
4079 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
4080 return 0;
4081 }
4082
4083 if (cons == prod)
4084 return 0;
4085
4086 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
4087 0, IXL_DMA_LEN(&sc->sc_arq),
4088 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
4089
4090 arq = IXL_DMA_KVA(&sc->sc_arq);
4091
4092 do {
4093 iaq = &arq[cons];
4094 aqb = iavf_aqb_get_locked(&sc->sc_arq_live);
4095
4096 KASSERT(aqb != NULL);
4097
4098 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IAVF_AQ_BUFLEN,
4099 BUS_DMASYNC_POSTREAD);
4100
4101 vc_opcode = iavf_process_arq(sc, iaq, aqb);
4102
4103 switch (vc_opcode) {
4104 case IAVF_VC_OP_CONFIG_TX_QUEUE:
4105 case IAVF_VC_OP_CONFIG_RX_QUEUE:
4106 case IAVF_VC_OP_CONFIG_VSI_QUEUES:
4107 case IAVF_VC_OP_ENABLE_QUEUES:
4108 case IAVF_VC_OP_DISABLE_QUEUES:
4109 case IAVF_VC_OP_GET_RSS_HENA_CAPS:
4110 case IAVF_VC_OP_SET_RSS_HENA:
4111 case IAVF_VC_OP_ADD_ETH_ADDR:
4112 case IAVF_VC_OP_DEL_ETH_ADDR:
4113 case IAVF_VC_OP_CONFIG_PROMISC:
4114 case IAVF_VC_OP_ADD_VLAN:
4115 case IAVF_VC_OP_DEL_VLAN:
4116 case IAVF_VC_OP_ENABLE_VLAN_STRIP:
4117 case IAVF_VC_OP_DISABLE_VLAN_STRIP:
4118 case IAVF_VC_OP_CONFIG_RSS_KEY:
4119 case IAVF_VC_OP_CONFIG_RSS_LUT:
4120 sc->sc_arq_retval = iavf_aq_vc_get_retval(iaq);
4121 sc->sc_arq_opcode = vc_opcode;
4122 cv_signal(&sc->sc_adminq_cv);
4123 break;
4124 }
4125
4126 memset(iaq, 0, sizeof(*iaq));
4127 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
4128
4129 cons++;
4130 cons &= IAVF_AQ_MASK;
4131 } while (cons != prod);
4132
4133 sc->sc_arq_cons = cons;
4134 iavf_work_add(sc->sc_workq, &sc->sc_arq_refill);
4135
4136 return 1;
4137 }
4138
4139 static int
4140 iavf_atq_post(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4141 struct ixl_aq_buf *aqb)
4142 {
4143 struct ixl_aq_desc *atq, *slot;
4144 unsigned int prod;
4145
4146 atq = IXL_DMA_KVA(&sc->sc_atq);
4147 prod = sc->sc_atq_prod;
4148 slot = &atq[prod];
4149
4150 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4151 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
4152
4153 *slot = *iaq;
4154 slot->iaq_flags |= htole16(IXL_AQ_SI);
4155 if (aqb != NULL) {
4156 ixl_aq_dva(slot, IXL_AQB_DVA(aqb));
4157 bus_dmamap_sync(sc->sc_dmat, IXL_AQB_MAP(aqb),
4158 0, IXL_AQB_LEN(aqb), BUS_DMASYNC_PREWRITE);
4159 iavf_aqb_put_locked(&sc->sc_atq_live, aqb);
4160 } else {
4161 ixl_aq_dva(slot, (bus_addr_t)0);
4162 }
4163
4164 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4165 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
4166
4167 if (atomic_load_relaxed(&sc->sc_debuglevel) >= 3)
4168 iavf_aq_dump(sc, slot, "post");
4169
4170 prod++;
4171 prod &= IAVF_AQ_MASK;
4172 sc->sc_atq_prod = prod;
4173 iavf_wr(sc, sc->sc_aq_regs->atq_tail, prod);
4174 return prod;
4175 }
4176
4177 static int
4178 iavf_atq_poll(struct iavf_softc *sc, unsigned int tm)
4179 {
4180 struct ixl_aq_desc *atq, *slot;
4181 struct ixl_aq_desc iaq;
4182 unsigned int prod;
4183 unsigned int t;
4184 int dbg;
4185
4186 dbg = atomic_load_relaxed(&sc->sc_debuglevel);
4187 atq = IXL_DMA_KVA(&sc->sc_atq);
4188 prod = sc->sc_atq_prod;
4189 slot = &atq[prod];
4190 t = 0;
4191
4192 while (iavf_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
4193 delaymsec(1);
4194
4195 if (t++ > tm) {
4196 if (dbg >= 2) {
4197 device_printf(sc->sc_dev,
4198 "atq timedout\n");
4199 }
4200 return ETIMEDOUT;
4201 }
4202 }
4203
4204 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4205 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
4206 iaq = *slot;
4207 memset(slot, 0, sizeof(*slot));
4208 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4209 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
4210
4211 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4212 if (dbg >= 2) {
4213 device_printf(sc->sc_dev,
4214 "atq retcode=0x%04x\n", le16toh(iaq.iaq_retval));
4215 }
4216 return EIO;
4217 }
4218
4219 return 0;
4220 }
4221
4222 static void
4223 iavf_atq_done(struct iavf_softc *sc)
4224 {
4225 struct ixl_aq_desc *atq, *slot;
4226 struct ixl_aq_buf *aqb;
4227 unsigned int cons;
4228 unsigned int prod;
4229
4230 KASSERT(mutex_owned(&sc->sc_adminq_lock));
4231
4232 prod = sc->sc_atq_prod;
4233 cons = sc->sc_atq_cons;
4234
4235 if (prod == cons)
4236 return;
4237
4238 atq = IXL_DMA_KVA(&sc->sc_atq);
4239
4240 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4241 0, IXL_DMA_LEN(&sc->sc_atq),
4242 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
4243
4244 do {
4245 slot = &atq[cons];
4246 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
4247 break;
4248
4249 if (ixl_aq_has_dva(slot) &&
4250 (aqb = iavf_aqb_get_locked(&sc->sc_atq_live)) != NULL) {
4251 bus_dmamap_sync(sc->sc_dmat, IXL_AQB_MAP(aqb),
4252 0, IXL_AQB_LEN(aqb), BUS_DMASYNC_POSTWRITE);
4253 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
4254 }
4255
4256 memset(slot, 0, sizeof(*slot));
4257
4258 cons++;
4259 cons &= IAVF_AQ_MASK;
4260 } while (cons != prod);
4261
4262 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4263 0, IXL_DMA_LEN(&sc->sc_atq),
4264 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4265
4266 sc->sc_atq_cons = cons;
4267 }
4268
4269 static int
4270 iavf_adminq_poll(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4271 struct ixl_aq_buf *aqb, int retry)
4272 {
4273 int error;
4274
4275 mutex_enter(&sc->sc_adminq_lock);
4276 error = iavf_adminq_poll_locked(sc, iaq, aqb, retry);
4277 mutex_exit(&sc->sc_adminq_lock);
4278
4279 return error;
4280 }
4281
4282 static int
4283 iavf_adminq_poll_locked(struct iavf_softc *sc,
4284 struct ixl_aq_desc *iaq, struct ixl_aq_buf *aqb, int retry)
4285 {
4286 uint32_t opcode;
4287 int error;
4288
4289 KASSERT(!sc->sc_attached || mutex_owned(&sc->sc_adminq_lock));
4290
4291 opcode = iavf_aq_vc_get_opcode(iaq);
4292
4293 iavf_atq_post(sc, iaq, aqb);
4294
4295 error = iavf_atq_poll(sc, retry);
4296
4297 /*
4298 * collect the aqb used in the current command and
4299 * added to sc_atq_live at iavf_atq_post(),
4300 * whether or not the command succeeded.
4301 */
4302 if (aqb != NULL) {
4303 (void)iavf_aqb_get_locked(&sc->sc_atq_live);
4304 bus_dmamap_sync(sc->sc_dmat, IXL_AQB_MAP(aqb),
4305 0, IXL_AQB_LEN(aqb), BUS_DMASYNC_POSTWRITE);
4306 }
4307
4308 if (error)
4309 return error;
4310
4311 error = iavf_arq_poll(sc, opcode, retry);
4312
4313 if (error != 0 &&
4314 atomic_load_relaxed(&sc->sc_debuglevel) >= 1) {
4315 device_printf(sc->sc_dev, "%s failed=%d(polling)\n",
4316 iavf_aq_vc_opcode_str(iaq), error);
4317 }
4318
4319 return error;
4320 }
4321
4322 static int
4323 iavf_adminq_exec(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4324 struct ixl_aq_buf *aqb)
4325 {
4326 int error;
4327 uint32_t opcode;
4328
4329 opcode = iavf_aq_vc_get_opcode(iaq);
4330
4331 mutex_enter(&sc->sc_adminq_lock);
4332 iavf_atq_post(sc, iaq, aqb);
4333
4334 error = iavf_arq_wait(sc, opcode);
4335 if (error == 0) {
4336 error = sc->sc_arq_retval;
4337 if (error != IAVF_VC_RC_SUCCESS &&
4338 atomic_load_relaxed(&sc->sc_debuglevel) >= 1) {
4339 device_printf(sc->sc_dev, "%s failed=%d\n",
4340 iavf_aq_vc_opcode_str(iaq), error);
4341 }
4342 }
4343
4344 mutex_exit(&sc->sc_adminq_lock);
4345 return error;
4346 }
4347
4348 static void
4349 iavf_process_version(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4350 struct ixl_aq_buf *aqb)
4351 {
4352 struct iavf_vc_version_info *ver;
4353
4354 ver = (struct iavf_vc_version_info *)aqb->aqb_data;
4355 sc->sc_major_ver = le32toh(ver->major);
4356 sc->sc_minor_ver = le32toh(ver->minor);
4357 }
4358
4359 static void
4360 iavf_process_vf_resources(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4361 struct ixl_aq_buf *aqb)
4362 {
4363 struct iavf_vc_vf_resource *vf_res;
4364 struct iavf_vc_vsi_resource *vsi_res;
4365 uint8_t *enaddr;
4366 int mtu, dbg;
4367 char buf[512];
4368
4369 dbg = atomic_load_relaxed(&sc->sc_debuglevel);
4370 sc->sc_got_vf_resources = 1;
4371
4372 vf_res = aqb->aqb_data;
4373 sc->sc_max_vectors = le16toh(vf_res->max_vectors);
4374 if (le16toh(vf_res->num_vsis) == 0) {
4375 if (dbg >= 1) {
4376 device_printf(sc->sc_dev, "no vsi available\n");
4377 }
4378 return;
4379 }
4380 sc->sc_vf_cap = le32toh(vf_res->offload_flags);
4381 if (dbg >= 2) {
4382 snprintb(buf, sizeof(buf),
4383 IAVF_VC_OFFLOAD_FMT, sc->sc_vf_cap);
4384 device_printf(sc->sc_dev, "VF cap=%s\n", buf);
4385 }
4386
4387 mtu = le16toh(vf_res->max_mtu);
4388 if (IAVF_MIN_MTU < mtu && mtu < IAVF_MAX_MTU) {
4389 sc->sc_max_mtu = MIN(IAVF_MAX_MTU, mtu);
4390 }
4391
4392 vsi_res = &vf_res->vsi_res[0];
4393 sc->sc_vsi_id = le16toh(vsi_res->vsi_id);
4394 sc->sc_vf_id = le32toh(iaq->iaq_param[0]);
4395 sc->sc_qset_handle = le16toh(vsi_res->qset_handle);
4396 sc->sc_nqps_vsi = le16toh(vsi_res->num_queue_pairs);
4397 if (!iavf_is_etheranyaddr(vsi_res->default_mac)) {
4398 enaddr = vsi_res->default_mac;
4399 } else {
4400 enaddr = sc->sc_enaddr_fake;
4401 }
4402 memcpy(sc->sc_enaddr, enaddr, ETHER_ADDR_LEN);
4403 }
4404
4405 static void
4406 iavf_process_irq_map(struct iavf_softc *sc, struct ixl_aq_desc *iaq)
4407 {
4408 uint32_t retval;
4409
4410 retval = iavf_aq_vc_get_retval(iaq);
4411 if (retval != IAVF_VC_RC_SUCCESS) {
4412 return;
4413 }
4414
4415 sc->sc_got_irq_map = 1;
4416 }
4417
4418 static void
4419 iavf_process_vc_event(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4420 struct ixl_aq_buf *aqb)
4421 {
4422 struct iavf_vc_pf_event *event;
4423 struct ifnet *ifp = &sc->sc_ec.ec_if;
4424 const struct iavf_link_speed *speed;
4425 int link;
4426
4427 event = aqb->aqb_data;
4428 switch (event->event) {
4429 case IAVF_VC_EVENT_LINK_CHANGE:
4430 sc->sc_media_status = IFM_AVALID;
4431 sc->sc_media_active = IFM_ETHER;
4432 link = LINK_STATE_DOWN;
4433 if (event->link_status) {
4434 link = LINK_STATE_UP;
4435 sc->sc_media_status |= IFM_ACTIVE;
4436
4437 ifp->if_baudrate = 0;
4438 speed = iavf_find_link_speed(sc, event->link_speed);
4439 if (speed != NULL) {
4440 sc->sc_media_active |= speed->media;
4441 ifp->if_baudrate = speed->baudrate;
4442 }
4443 }
4444
4445 if (sc->sc_link_state != link) {
4446 sc->sc_link_state = link;
4447 if (sc->sc_attached) {
4448 if_link_state_change(ifp, link);
4449 }
4450 }
4451 break;
4452 case IAVF_VC_EVENT_RESET_IMPENDING:
4453 log(LOG_INFO, "%s: Reset warning received from the PF\n",
4454 ifp->if_xname);
4455 iavf_work_set(&sc->sc_reset_task, iavf_reset_request, sc);
4456 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
4457 break;
4458 }
4459 }
4460
4461 static void
4462 iavf_process_stats(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4463 struct ixl_aq_buf *aqb)
4464 {
4465 struct iavf_stat_counters *isc;
4466 struct i40e_eth_stats *st;
4467
4468 KASSERT(mutex_owned(&sc->sc_adminq_lock));
4469
4470 st = aqb->aqb_data;
4471 isc = &sc->sc_stat_counters;
4472
4473 isc->isc_rx_bytes.ev_count = st->rx_bytes;
4474 isc->isc_rx_unicast.ev_count = st->rx_unicast;
4475 isc->isc_rx_multicast.ev_count = st->rx_multicast;
4476 isc->isc_rx_broadcast.ev_count = st->rx_broadcast;
4477 isc->isc_rx_discards.ev_count = st->rx_discards;
4478 isc->isc_rx_unknown_protocol.ev_count = st->rx_unknown_protocol;
4479
4480 isc->isc_tx_bytes.ev_count = st->tx_bytes;
4481 isc->isc_tx_unicast.ev_count = st->tx_unicast;
4482 isc->isc_tx_multicast.ev_count = st->tx_multicast;
4483 isc->isc_tx_broadcast.ev_count = st->tx_broadcast;
4484 isc->isc_tx_discards.ev_count = st->tx_discards;
4485 isc->isc_tx_errors.ev_count = st->tx_errors;
4486 }
4487
4488 static void
4489 iavf_process_req_queues(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4490 struct ixl_aq_buf *aqb)
4491 {
4492 struct iavf_vc_res_request *req;
4493 struct ifnet *ifp;
4494 uint32_t vc_retval;
4495
4496 ifp = &sc->sc_ec.ec_if;
4497 req = aqb->aqb_data;
4498
4499 vc_retval = iavf_aq_vc_get_retval(iaq);
4500 if (vc_retval != IAVF_VC_RC_SUCCESS) {
4501 return;
4502 }
4503
4504 if (sc->sc_nqps_req < req->num_queue_pairs) {
4505 log(LOG_INFO,
4506 "%s: requested %d queues, but only %d left.\n",
4507 ifp->if_xname,
4508 sc->sc_nqps_req, req->num_queue_pairs);
4509 }
4510
4511 if (sc->sc_nqps_vsi < req->num_queue_pairs) {
4512 if (!sc->sc_req_queues_retried) {
4513 /* req->num_queue_pairs indicates max qps */
4514 sc->sc_nqps_req = req->num_queue_pairs;
4515
4516 sc->sc_req_queues_retried = true;
4517 iavf_work_add(sc->sc_workq, &sc->sc_req_queues_task);
4518 }
4519 }
4520 }
4521
4522 static int
4523 iavf_get_version(struct iavf_softc *sc, struct ixl_aq_buf *aqb)
4524 {
4525 struct ixl_aq_desc iaq;
4526 struct iavf_vc_version_info *ver;
4527 int error;
4528
4529 memset(&iaq, 0, sizeof(iaq));
4530 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4531 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4532 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_VERSION);
4533 iaq.iaq_datalen = htole16(sizeof(struct iavf_vc_version_info));
4534
4535 ver = IXL_AQB_KVA(aqb);
4536 ver->major = htole32(IAVF_VF_MAJOR);
4537 ver->minor = htole32(IAVF_VF_MINOR);
4538
4539 sc->sc_major_ver = UINT_MAX;
4540 sc->sc_minor_ver = UINT_MAX;
4541
4542 if (sc->sc_attached) {
4543 error = iavf_adminq_poll(sc, &iaq, aqb, 250);
4544 } else {
4545 error = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
4546 }
4547
4548 if (error)
4549 return -1;
4550
4551 return 0;
4552 }
4553
4554 static int
4555 iavf_get_vf_resources(struct iavf_softc *sc, struct ixl_aq_buf *aqb)
4556 {
4557 struct ixl_aq_desc iaq;
4558 uint32_t *cap, cap0;
4559 int error;
4560
4561 memset(&iaq, 0, sizeof(iaq));
4562 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4563 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4564 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_GET_VF_RESOURCES);
4565
4566 if (sc->sc_major_ver > 0) {
4567 cap0 = IAVF_VC_OFFLOAD_L2 |
4568 IAVF_VC_OFFLOAD_VLAN |
4569 IAVF_VC_OFFLOAD_RSS_PF |
4570 IAVF_VC_OFFLOAD_REQ_QUEUES;
4571
4572 cap = IXL_AQB_KVA(aqb);
4573 *cap = htole32(cap0);
4574 iaq.iaq_datalen = htole16(sizeof(*cap));
4575 }
4576
4577 sc->sc_got_vf_resources = 0;
4578 if (sc->sc_attached) {
4579 error = iavf_adminq_poll(sc, &iaq, aqb, 250);
4580 } else {
4581 error = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
4582 }
4583
4584 if (error)
4585 return -1;
4586 return 0;
4587 }
4588
4589 static int
4590 iavf_get_stats(struct iavf_softc *sc)
4591 {
4592 struct ixl_aq_desc iaq;
4593 struct ixl_aq_buf *aqb;
4594 struct iavf_vc_queue_select *qsel;
4595 int error;
4596
4597 mutex_enter(&sc->sc_adminq_lock);
4598 aqb = iavf_aqb_get_locked(&sc->sc_atq_idle);
4599 mutex_exit(&sc->sc_adminq_lock);
4600
4601 if (aqb == NULL)
4602 return ENOMEM;
4603
4604 qsel = IXL_AQB_KVA(aqb);
4605 memset(qsel, 0, sizeof(*qsel));
4606 qsel->vsi_id = htole16(sc->sc_vsi_id);
4607
4608 memset(&iaq, 0, sizeof(iaq));
4609
4610 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4611 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4612 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_GET_STATS);
4613 iaq.iaq_datalen = htole16(sizeof(*qsel));
4614
4615 if (atomic_load_relaxed(&sc->sc_debuglevel) >= 3) {
4616 device_printf(sc->sc_dev, "post GET_STATS command\n");
4617 }
4618
4619 mutex_enter(&sc->sc_adminq_lock);
4620 error = iavf_atq_post(sc, &iaq, aqb);
4621 mutex_exit(&sc->sc_adminq_lock);
4622
4623 return error;
4624 }
4625
4626 static int
4627 iavf_config_irq_map(struct iavf_softc *sc, struct ixl_aq_buf *aqb)
4628 {
4629 struct ixl_aq_desc iaq;
4630 struct iavf_vc_vector_map *vec;
4631 struct iavf_vc_irq_map_info *map;
4632 struct iavf_rx_ring *rxr;
4633 struct iavf_tx_ring *txr;
4634 unsigned int num_vec;
4635 int error;
4636
4637 map = IXL_AQB_KVA(aqb);
4638 vec = map->vecmap;
4639 num_vec = 0;
4640
4641 if (sc->sc_nintrs == 1) {
4642 vec[0].vsi_id = htole16(sc->sc_vsi_id);
4643 vec[0].vector_id = htole16(0);
4644 vec[0].rxq_map = htole16(iavf_allqueues(sc));
4645 vec[0].txq_map = htole16(iavf_allqueues(sc));
4646 vec[0].rxitr_idx = htole16(IAVF_NOITR);
4647 vec[0].rxitr_idx = htole16(IAVF_NOITR);
4648 num_vec = 1;
4649 } else if (sc->sc_nintrs > 1) {
4650 KASSERT(sc->sc_nqps_alloc >= (sc->sc_nintrs - 1));
4651 for (; num_vec < (sc->sc_nintrs - 1); num_vec++) {
4652 rxr = sc->sc_qps[num_vec].qp_rxr;
4653 txr = sc->sc_qps[num_vec].qp_txr;
4654
4655 vec[num_vec].vsi_id = htole16(sc->sc_vsi_id);
4656 vec[num_vec].vector_id = htole16(num_vec + 1);
4657 vec[num_vec].rxq_map = htole16(__BIT(rxr->rxr_qid));
4658 vec[num_vec].txq_map = htole16(__BIT(txr->txr_qid));
4659 vec[num_vec].rxitr_idx = htole16(IAVF_ITR_RX);
4660 vec[num_vec].txitr_idx = htole16(IAVF_ITR_TX);
4661 }
4662
4663 vec[num_vec].vsi_id = htole16(sc->sc_vsi_id);
4664 vec[num_vec].vector_id = htole16(0);
4665 vec[num_vec].rxq_map = htole16(0);
4666 vec[num_vec].txq_map = htole16(0);
4667 num_vec++;
4668 }
4669
4670 map->num_vectors = htole16(num_vec);
4671
4672 memset(&iaq, 0, sizeof(iaq));
4673 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4674 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4675 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_IRQ_MAP);
4676 iaq.iaq_datalen = htole16(sizeof(*map) + sizeof(*vec) * num_vec);
4677
4678 if (sc->sc_attached) {
4679 error = iavf_adminq_poll(sc, &iaq, aqb, 250);
4680 } else {
4681 error = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
4682 }
4683
4684 if (error)
4685 return -1;
4686
4687 return 0;
4688 }
4689
4690 static int
4691 iavf_config_vsi_queues(struct iavf_softc *sc)
4692 {
4693 struct ifnet *ifp = &sc->sc_ec.ec_if;
4694 struct ixl_aq_desc iaq;
4695 struct ixl_aq_buf *aqb;
4696 struct iavf_vc_queue_config_info *config;
4697 struct iavf_vc_txq_info *txq;
4698 struct iavf_vc_rxq_info *rxq;
4699 struct iavf_rx_ring *rxr;
4700 struct iavf_tx_ring *txr;
4701 uint32_t rxmtu_max;
4702 unsigned int i;
4703 int error;
4704
4705 rxmtu_max = ifp->if_mtu + IAVF_MTU_ETHERLEN;
4706
4707 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4708
4709 if (aqb == NULL)
4710 return -1;
4711
4712 config = IXL_AQB_KVA(aqb);
4713 memset(config, 0, sizeof(*config));
4714 config->vsi_id = htole16(sc->sc_vsi_id);
4715 config->num_queue_pairs = htole16(sc->sc_nqueue_pairs);
4716
4717 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
4718 rxr = sc->sc_qps[i].qp_rxr;
4719 txr = sc->sc_qps[i].qp_txr;
4720
4721 txq = &config->qpair[i].txq;
4722 txq->vsi_id = htole16(sc->sc_vsi_id);
4723 txq->queue_id = htole16(txr->txr_qid);
4724 txq->ring_len = htole16(sc->sc_tx_ring_ndescs);
4725 txq->headwb_ena = 0;
4726 txq->dma_ring_addr = htole64(IXL_DMA_DVA(&txr->txr_mem));
4727 txq->dma_headwb_addr = 0;
4728
4729 rxq = &config->qpair[i].rxq;
4730 rxq->vsi_id = htole16(sc->sc_vsi_id);
4731 rxq->queue_id = htole16(rxr->rxr_qid);
4732 rxq->ring_len = htole16(sc->sc_rx_ring_ndescs);
4733 rxq->splithdr_ena = 0;
4734 rxq->databuf_size = htole32(IAVF_MCLBYTES);
4735 rxq->max_pkt_size = htole32(rxmtu_max);
4736 rxq->dma_ring_addr = htole64(IXL_DMA_DVA(&rxr->rxr_mem));
4737 rxq->rx_split_pos = 0;
4738 }
4739
4740 memset(&iaq, 0, sizeof(iaq));
4741 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4742 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4743 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_VSI_QUEUES);
4744 iaq.iaq_datalen = htole16(sizeof(*config) +
4745 sizeof(config->qpair[0]) * sc->sc_nqueue_pairs);
4746
4747 error = iavf_adminq_exec(sc, &iaq, aqb);
4748 if (error != IAVF_VC_RC_SUCCESS) {
4749 return -1;
4750 }
4751
4752 return 0;
4753 }
4754
4755 static int
4756 iavf_config_hena(struct iavf_softc *sc)
4757 {
4758 struct ixl_aq_desc iaq;
4759 struct ixl_aq_buf *aqb;
4760 uint64_t *caps;
4761 int error;
4762
4763 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4764
4765 if (aqb == NULL)
4766 return -1;
4767
4768 caps = IXL_AQB_KVA(aqb);
4769 if (sc->sc_mac_type == I40E_MAC_X722_VF)
4770 *caps = IXL_RSS_HENA_DEFAULT_X722;
4771 else
4772 *caps = IXL_RSS_HENA_DEFAULT_XL710;
4773
4774 memset(&iaq, 0, sizeof(iaq));
4775 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4776 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4777 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_SET_RSS_HENA);
4778 iaq.iaq_datalen = htole16(sizeof(*caps));
4779
4780 error = iavf_adminq_exec(sc, &iaq, aqb);
4781 if (error != IAVF_VC_RC_SUCCESS) {
4782 return -1;
4783 }
4784
4785 return 0;
4786 }
4787
4788 static inline void
4789 iavf_get_default_rss_key(uint8_t *buf, size_t len)
4790 {
4791 uint8_t rss_seed[RSS_KEYSIZE];
4792 size_t cplen;
4793
4794 cplen = MIN(len, sizeof(rss_seed));
4795 rss_getkey(rss_seed);
4796
4797 memcpy(buf, rss_seed, cplen);
4798 if (cplen < len)
4799 memset(buf + cplen, 0, len - cplen);
4800 }
4801
4802 static int
4803 iavf_config_rss_key(struct iavf_softc *sc)
4804 {
4805 struct ixl_aq_desc iaq;
4806 struct ixl_aq_buf *aqb;
4807 struct iavf_vc_rss_key *rss_key;
4808 size_t key_len;
4809 int rv;
4810
4811 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4812 if (aqb == NULL)
4813 return -1;
4814
4815 rss_key = IXL_AQB_KVA(aqb);
4816 rss_key->vsi_id = htole16(sc->sc_vsi_id);
4817 key_len = IXL_RSS_KEY_SIZE;
4818 iavf_get_default_rss_key(rss_key->key, key_len);
4819 rss_key->key_len = key_len;
4820
4821 memset(&iaq, 0, sizeof(iaq));
4822 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4823 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4824 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_RSS_KEY);
4825 iaq.iaq_datalen = htole16(sizeof(*rss_key) - sizeof(rss_key->pad)
4826 + (sizeof(rss_key->key[0]) * key_len));
4827
4828 rv = iavf_adminq_exec(sc, &iaq, aqb);
4829 if (rv != IAVF_VC_RC_SUCCESS) {
4830 return -1;
4831 }
4832
4833 return 0;
4834 }
4835
4836 static int
4837 iavf_config_rss_lut(struct iavf_softc *sc)
4838 {
4839 struct ixl_aq_desc iaq;
4840 struct ixl_aq_buf *aqb;
4841 struct iavf_vc_rss_lut *rss_lut;
4842 uint8_t *lut, v;
4843 int rv, i;
4844
4845 mutex_enter(&sc->sc_adminq_lock);
4846 mutex_exit(&sc->sc_adminq_lock);
4847
4848 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4849 if (aqb == NULL)
4850 return -1;
4851
4852 rss_lut = IXL_AQB_KVA(aqb);
4853 rss_lut->vsi_id = htole16(sc->sc_vsi_id);
4854 rss_lut->lut_entries = htole16(IXL_RSS_VSI_LUT_SIZE);
4855
4856 lut = rss_lut->lut;
4857 for (i = 0; i < IXL_RSS_VSI_LUT_SIZE; i++) {
4858 v = i % sc->sc_nqueue_pairs;
4859 v &= IAVF_RSS_VSI_LUT_ENTRY_MASK;
4860 lut[i] = v;
4861 }
4862
4863 memset(&iaq, 0, sizeof(iaq));
4864 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4865 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4866 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_RSS_LUT);
4867 iaq.iaq_datalen = htole16(sizeof(*rss_lut) - sizeof(rss_lut->pad)
4868 + (sizeof(rss_lut->lut[0]) * IXL_RSS_VSI_LUT_SIZE));
4869
4870 rv = iavf_adminq_exec(sc, &iaq, aqb);
4871 if (rv != IAVF_VC_RC_SUCCESS) {
4872 return -1;
4873 }
4874
4875 return 0;
4876 }
4877
4878 static int
4879 iavf_queue_select(struct iavf_softc *sc, int opcode)
4880 {
4881 struct ixl_aq_desc iaq;
4882 struct ixl_aq_buf *aqb;
4883 struct iavf_vc_queue_select *qsel;
4884 int error;
4885
4886 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4887 if (aqb == NULL)
4888 return -1;
4889
4890 qsel = IXL_AQB_KVA(aqb);
4891 qsel->vsi_id = htole16(sc->sc_vsi_id);
4892 qsel->rx_queues = htole32(iavf_allqueues(sc));
4893 qsel->tx_queues = htole32(iavf_allqueues(sc));
4894
4895 memset(&iaq, 0, sizeof(iaq));
4896 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4897 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4898 iavf_aq_vc_set_opcode(&iaq, opcode);
4899 iaq.iaq_datalen = htole16(sizeof(*qsel));
4900
4901 error = iavf_adminq_exec(sc, &iaq, aqb);
4902 if (error != IAVF_VC_RC_SUCCESS) {
4903 return -1;
4904 }
4905
4906 return 0;
4907 }
4908
4909 static int
4910 iavf_request_queues(struct iavf_softc *sc, unsigned int req_num)
4911 {
4912 struct ixl_aq_desc iaq;
4913 struct ixl_aq_buf *aqb;
4914 struct iavf_vc_res_request *req;
4915 int rv;
4916
4917 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4918 if (aqb == NULL)
4919 return ENOMEM;
4920
4921 req = IXL_AQB_KVA(aqb);
4922 req->num_queue_pairs = req_num;
4923
4924 memset(&iaq, 0, sizeof(iaq));
4925 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4926 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4927 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_REQUEST_QUEUES);
4928 iaq.iaq_datalen = htole16(sizeof(*req));
4929
4930 mutex_enter(&sc->sc_adminq_lock);
4931 rv = iavf_atq_post(sc, &iaq, aqb);
4932 mutex_exit(&sc->sc_adminq_lock);
4933
4934 return rv;
4935 }
4936
4937 static int
4938 iavf_reset_vf(struct iavf_softc *sc)
4939 {
4940 struct ixl_aq_desc iaq;
4941 int error;
4942
4943 memset(&iaq, 0, sizeof(iaq));
4944 iaq.iaq_flags = htole16(IXL_AQ_RD);
4945 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4946 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_RESET_VF);
4947 iaq.iaq_datalen = htole16(0);
4948
4949 iavf_wr(sc, I40E_VFGEN_RSTAT, IAVF_VFR_INPROGRESS);
4950
4951 mutex_enter(&sc->sc_adminq_lock);
4952 error = iavf_atq_post(sc, &iaq, NULL);
4953 mutex_exit(&sc->sc_adminq_lock);
4954
4955 return error;
4956 }
4957
4958 static int
4959 iavf_eth_addr(struct iavf_softc *sc, const uint8_t *addr, uint32_t opcode)
4960 {
4961 struct ixl_aq_desc iaq;
4962 struct ixl_aq_buf *aqb;
4963 struct iavf_vc_eth_addr_list *addrs;
4964 struct iavf_vc_eth_addr *vcaddr;
4965 int rv;
4966
4967 KASSERT(sc->sc_attached);
4968 KASSERT(opcode == IAVF_VC_OP_ADD_ETH_ADDR ||
4969 opcode == IAVF_VC_OP_DEL_ETH_ADDR);
4970
4971 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4972 if (aqb == NULL)
4973 return -1;
4974
4975 addrs = IXL_AQB_KVA(aqb);
4976 addrs->vsi_id = htole16(sc->sc_vsi_id);
4977 addrs->num_elements = htole16(1);
4978 vcaddr = addrs->list;
4979 memcpy(vcaddr->addr, addr, ETHER_ADDR_LEN);
4980
4981 memset(&iaq, 0, sizeof(iaq));
4982 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4983 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4984 iavf_aq_vc_set_opcode(&iaq, opcode);
4985 iaq.iaq_datalen = htole16(sizeof(*addrs) + sizeof(*vcaddr));
4986
4987 if (sc->sc_resetting) {
4988 mutex_enter(&sc->sc_adminq_lock);
4989 rv = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
4990 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
4991 mutex_exit(&sc->sc_adminq_lock);
4992 } else {
4993 rv = iavf_adminq_exec(sc, &iaq, aqb);
4994 }
4995
4996 if (rv != IAVF_VC_RC_SUCCESS) {
4997 return -1;
4998 }
4999
5000 return 0;
5001 }
5002
5003 static int
5004 iavf_config_promisc_mode(struct iavf_softc *sc, int unicast, int multicast)
5005 {
5006 struct ixl_aq_desc iaq;
5007 struct ixl_aq_buf *aqb;
5008 struct iavf_vc_promisc_info *promisc;
5009 int flags;
5010
5011 KASSERT(sc->sc_attached);
5012
5013 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
5014 if (aqb == NULL)
5015 return -1;
5016
5017 flags = 0;
5018 if (unicast)
5019 flags |= IAVF_FLAG_VF_UNICAST_PROMISC;
5020 if (multicast)
5021 flags |= IAVF_FLAG_VF_MULTICAST_PROMISC;
5022
5023 promisc = IXL_AQB_KVA(aqb);
5024 promisc->vsi_id = htole16(sc->sc_vsi_id);
5025 promisc->flags = htole16(flags);
5026
5027 memset(&iaq, 0, sizeof(iaq));
5028 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
5029 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
5030 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_PROMISC);
5031 iaq.iaq_datalen = htole16(sizeof(*promisc));
5032
5033 if (iavf_adminq_exec(sc, &iaq, aqb) != IAVF_VC_RC_SUCCESS) {
5034 return -1;
5035 }
5036
5037 return 0;
5038 }
5039
5040 static int
5041 iavf_config_vlan_stripping(struct iavf_softc *sc, int eccap)
5042 {
5043 struct ixl_aq_desc iaq;
5044 uint32_t opcode;
5045
5046 opcode = ISSET(eccap, ETHERCAP_VLAN_HWTAGGING) ?
5047 IAVF_VC_OP_ENABLE_VLAN_STRIP : IAVF_VC_OP_DISABLE_VLAN_STRIP;
5048
5049 memset(&iaq, 0, sizeof(iaq));
5050 iaq.iaq_flags = htole16(IXL_AQ_RD);
5051 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
5052 iavf_aq_vc_set_opcode(&iaq, opcode);
5053 iaq.iaq_datalen = htole16(0);
5054
5055 if (iavf_adminq_exec(sc, &iaq, NULL) != IAVF_VC_RC_SUCCESS) {
5056 return -1;
5057 }
5058
5059 return 0;
5060 }
5061
5062 static int
5063 iavf_config_vlan_id(struct iavf_softc *sc, uint16_t vid, uint32_t opcode)
5064 {
5065 struct ixl_aq_desc iaq;
5066 struct ixl_aq_buf *aqb;
5067 struct iavf_vc_vlan_filter *vfilter;
5068 int rv;
5069
5070 KASSERT(opcode == IAVF_VC_OP_ADD_VLAN || opcode == IAVF_VC_OP_DEL_VLAN);
5071
5072 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
5073
5074 if (aqb == NULL)
5075 return -1;
5076
5077 vfilter = IXL_AQB_KVA(aqb);
5078 vfilter->vsi_id = htole16(sc->sc_vsi_id);
5079 vfilter->num_vlan_id = htole16(1);
5080 vfilter->vlan_id[0] = vid;
5081
5082 memset(&iaq, 0, sizeof(iaq));
5083 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
5084 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
5085 iavf_aq_vc_set_opcode(&iaq, opcode);
5086 iaq.iaq_datalen = htole16(sizeof(*vfilter) + sizeof(vid));
5087
5088 if (sc->sc_resetting) {
5089 mutex_enter(&sc->sc_adminq_lock);
5090 rv = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
5091 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
5092 mutex_exit(&sc->sc_adminq_lock);
5093 } else {
5094 rv = iavf_adminq_exec(sc, &iaq, aqb);
5095 }
5096
5097 if (rv != IAVF_VC_RC_SUCCESS) {
5098 return -1;
5099 }
5100
5101 return 0;
5102 }
5103
5104 static void
5105 iavf_post_request_queues(void *xsc)
5106 {
5107 struct iavf_softc *sc;
5108 struct ifnet *ifp;
5109
5110 sc = xsc;
5111 ifp = &sc->sc_ec.ec_if;
5112
5113 if (!ISSET(sc->sc_vf_cap, IAVF_VC_OFFLOAD_REQ_QUEUES)) {
5114 log(LOG_DEBUG, "%s: the VF has no REQ_QUEUES capability\n",
5115 ifp->if_xname);
5116 return;
5117 }
5118
5119 log(LOG_INFO, "%s: try to change the number of queue pairs"
5120 " (vsi %u, %u allocated, request %u)\n",
5121 ifp->if_xname,
5122 sc->sc_nqps_vsi, sc->sc_nqps_alloc, sc->sc_nqps_req);
5123 iavf_request_queues(sc, sc->sc_nqps_req);
5124 }
5125
5126 static bool
5127 iavf_sysctlnode_is_rx(struct sysctlnode *node)
5128 {
5129
5130 if (strstr(node->sysctl_parent->sysctl_name, "rx") != NULL)
5131 return true;
5132
5133 return false;
5134 }
5135
5136 static int
5137 iavf_sysctl_itr_handler(SYSCTLFN_ARGS)
5138 {
5139 struct sysctlnode node = *rnode;
5140 struct iavf_softc *sc = (struct iavf_softc *)node.sysctl_data;
5141 uint32_t newitr, *itrptr;
5142 unsigned int i;
5143 int itr, error;
5144
5145 if (iavf_sysctlnode_is_rx(&node)) {
5146 itrptr = &sc->sc_rx_itr;
5147 itr = IAVF_ITR_RX;
5148 } else {
5149 itrptr = &sc->sc_tx_itr;
5150 itr = IAVF_ITR_TX;
5151 }
5152
5153 newitr = *itrptr;
5154 node.sysctl_data = &newitr;
5155 node.sysctl_size = sizeof(newitr);
5156
5157 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5158 if (error || newp == NULL)
5159 return error;
5160
5161 if (newitr > 0x07FF)
5162 return EINVAL;
5163
5164 *itrptr = newitr;
5165
5166 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
5167 iavf_wr(sc, I40E_VFINT_ITRN1(itr, i), *itrptr);
5168 }
5169 iavf_wr(sc, I40E_VFINT_ITR01(itr), *itrptr);
5170
5171 return 0;
5172 }
5173
5174 static void
5175 iavf_workq_work(struct work *wk, void *context)
5176 {
5177 struct iavf_work *work;
5178
5179 work = container_of(wk, struct iavf_work, ixw_cookie);
5180
5181 atomic_swap_uint(&work->ixw_added, 0);
5182 work->ixw_func(work->ixw_arg);
5183 }
5184
5185 static struct workqueue *
5186 iavf_workq_create(const char *name, pri_t prio, int ipl, int flags)
5187 {
5188 struct workqueue *wq;
5189 int error;
5190
5191 error = workqueue_create(&wq, name, iavf_workq_work, NULL,
5192 prio, ipl, flags);
5193
5194 if (error)
5195 return NULL;
5196
5197 return wq;
5198 }
5199
5200 static void
5201 iavf_workq_destroy(struct workqueue *wq)
5202 {
5203
5204 workqueue_destroy(wq);
5205 }
5206
5207 static int
5208 iavf_work_set(struct iavf_work *work, void (*func)(void *), void *arg)
5209 {
5210
5211 if (work->ixw_added != 0)
5212 return -1;
5213
5214 memset(work, 0, sizeof(*work));
5215 work->ixw_func = func;
5216 work->ixw_arg = arg;
5217
5218 return 0;
5219 }
5220
5221 static void
5222 iavf_work_add(struct workqueue *wq, struct iavf_work *work)
5223 {
5224 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0)
5225 return;
5226
5227 kpreempt_disable();
5228 workqueue_enqueue(wq, &work->ixw_cookie, NULL);
5229 kpreempt_enable();
5230 }
5231
5232 static void
5233 iavf_work_wait(struct workqueue *wq, struct iavf_work *work)
5234 {
5235
5236 workqueue_wait(wq, &work->ixw_cookie);
5237 }
5238
5239 static void
5240 iavf_evcnt_attach(struct evcnt *ec,
5241 const char *n0, const char *n1)
5242 {
5243
5244 evcnt_attach_dynamic(ec, EVCNT_TYPE_MISC,
5245 NULL, n0, n1);
5246 }
5247
5248 MODULE(MODULE_CLASS_DRIVER, if_iavf, "pci");
5249
5250 #ifdef _MODULE
5251 #include "ioconf.c"
5252 #endif
5253
5254 #ifdef _MODULE
5255 static void
5256 iavf_parse_modprop(prop_dictionary_t dict)
5257 {
5258 prop_object_t obj;
5259 int64_t val;
5260 uint32_t n;
5261
5262 if (dict == NULL)
5263 return;
5264
5265 obj = prop_dictionary_get(dict, "debug_level");
5266 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5267 val = prop_number_signed_value((prop_number_t)obj);
5268
5269 if (val > 0) {
5270 iavf_params.debug = val;
5271 printf("iavf: debug level=%d\n", iavf_params.debug);
5272 }
5273 }
5274
5275 obj = prop_dictionary_get(dict, "max_qps");
5276 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5277 val = prop_number_signed_value((prop_number_t)obj);
5278
5279 if (val < 1 || val > I40E_MAX_VF_QUEUES) {
5280 printf("iavf: invalid queue size(1 <= n <= %d)",
5281 I40E_MAX_VF_QUEUES);
5282 } else {
5283 iavf_params.max_qps = val;
5284 printf("iavf: request queue pair = %u\n",
5285 iavf_params.max_qps);
5286 }
5287 }
5288
5289 obj = prop_dictionary_get(dict, "tx_itr");
5290 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5291 val = prop_number_signed_value((prop_number_t)obj);
5292 if (val > 0x07FF) {
5293 printf("iavf: TX ITR too big (%" PRId64 " <= %d)",
5294 val, 0x7FF);
5295 } else {
5296 iavf_params.tx_itr = val;
5297 printf("iavf: TX ITR = 0x%" PRIx32,
5298 iavf_params.tx_itr);
5299 }
5300 }
5301
5302 obj = prop_dictionary_get(dict, "rx_itr");
5303 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5304 val = prop_number_signed_value((prop_number_t)obj);
5305 if (val > 0x07FF) {
5306 printf("iavf: RX ITR too big (%" PRId64 " <= %d)",
5307 val, 0x7FF);
5308 } else {
5309 iavf_params.rx_itr = val;
5310 printf("iavf: RX ITR = 0x%" PRIx32,
5311 iavf_params.rx_itr);
5312 }
5313 }
5314
5315 obj = prop_dictionary_get(dict, "tx_ndescs");
5316 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5317 val = prop_number_signed_value((prop_number_t)obj);
5318 n = 1U << (fls32(val) - 1);
5319 if (val != (int64_t) n) {
5320 printf("iavf: TX desc invlaid size"
5321 "(%" PRId64 " != %" PRIu32 ")\n", val, n);
5322 } else if (val > (8192 - 32)) {
5323 printf("iavf: Tx desc too big (%" PRId64 " > %d)",
5324 val, (8192 - 32));
5325 } else {
5326 iavf_params.tx_ndescs = val;
5327 printf("iavf: TX descriptors = 0x%04x",
5328 iavf_params.tx_ndescs);
5329 }
5330 }
5331
5332 obj = prop_dictionary_get(dict, "rx_ndescs");
5333 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5334 val = prop_number_signed_value((prop_number_t)obj);
5335 n = 1U << (fls32(val) - 1);
5336 if (val != (int64_t) n) {
5337 printf("iavf: RX desc invlaid size"
5338 "(%" PRId64 " != %" PRIu32 ")\n", val, n);
5339 } else if (val > (8192 - 32)) {
5340 printf("iavf: Rx desc too big (%" PRId64 " > %d)",
5341 val, (8192 - 32));
5342 } else {
5343 iavf_params.rx_ndescs = val;
5344 printf("iavf: RX descriptors = 0x%04x",
5345 iavf_params.rx_ndescs);
5346 }
5347 }
5348 }
5349 #endif
5350
5351 static int
5352 if_iavf_modcmd(modcmd_t cmd, void *opaque)
5353 {
5354 int error = 0;
5355
5356 #ifdef _MODULE
5357 switch (cmd) {
5358 case MODULE_CMD_INIT:
5359 iavf_parse_modprop((prop_dictionary_t)opaque);
5360 error = config_init_component(cfdriver_ioconf_if_iavf,
5361 cfattach_ioconf_if_iavf, cfdata_ioconf_if_iavf);
5362 break;
5363 case MODULE_CMD_FINI:
5364 error = config_fini_component(cfdriver_ioconf_if_iavf,
5365 cfattach_ioconf_if_iavf, cfdata_ioconf_if_iavf);
5366 break;
5367 default:
5368 error = ENOTTY;
5369 break;
5370 }
5371 #endif
5372
5373 return error;
5374 }
5375