if_iavf.c revision 1.6 1 /* $NetBSD: if_iavf.c,v 1.6 2020/09/17 06:34:43 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2016,2017 David Gwynne <dlg (at) openbsd.org>
36 * Copyright (c) 2019 Jonathan Matthew <jmatthew (at) openbsd.org>
37 *
38 * Permission to use, copy, modify, and distribute this software for any
39 * purpose with or without fee is hereby granted, provided that the above
40 * copyright notice and this permission notice appear in all copies.
41 *
42 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
43 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
44 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
45 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
46 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
47 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
48 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 */
50
51 /*
52 * Copyright (c) 2020 Internet Initiative Japan, Inc.
53 * All rights reserved.
54 *
55 * Redistribution and use in source and binary forms, with or without
56 * modification, are permitted provided that the following conditions
57 * are met:
58 * 1. Redistributions of source code must retain the above copyright
59 * notice, this list of conditions and the following disclaimer.
60 * 2. Redistributions in binary form must reproduce the above copyright
61 * notice, this list of conditions and the following disclaimer in the
62 * documentation and/or other materials provided with the distribution.
63 *
64 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
65 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
66 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
67 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
68 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
69 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
70 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
71 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
72 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
73 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
74 * POSSIBILITY OF SUCH DAMAGE.
75 */
76
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: if_iavf.c,v 1.6 2020/09/17 06:34:43 yamaguchi Exp $");
79
80 #include <sys/param.h>
81 #include <sys/types.h>
82
83 #include <sys/bitops.h>
84 #include <sys/bus.h>
85 #include <sys/cprng.h>
86 #include <sys/cpu.h>
87 #include <sys/device.h>
88 #include <sys/evcnt.h>
89 #include <sys/interrupt.h>
90 #include <sys/kmem.h>
91 #include <sys/module.h>
92 #include <sys/mutex.h>
93 #include <sys/pcq.h>
94 #include <sys/queue.h>
95 #include <sys/syslog.h>
96 #include <sys/workqueue.h>
97 #include <sys/xcall.h>
98
99 #include <net/bpf.h>
100 #include <net/if.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103 #include <net/if_ether.h>
104 #include <net/rss_config.h>
105
106 #include <netinet/tcp.h> /* for struct tcphdr */
107 #include <netinet/udp.h> /* for struct udphdr */
108
109 #include <dev/pci/pcivar.h>
110 #include <dev/pci/pcidevs.h>
111
112 #include <dev/pci/if_ixlreg.h>
113 #include <dev/pci/if_ixlvar.h>
114 #include <dev/pci/if_iavfvar.h>
115
116 #include <prop/proplib.h>
117
118 #define IAVF_PCIREG PCI_MAPREG_START
119 #define IAVF_AQ_NUM 256
120 #define IAVF_AQ_MASK (IAVF_AQ_NUM-1)
121 #define IAVF_AQ_ALIGN 64
122 #define IAVF_AQ_BUFLEN 4096
123 #define I40E_AQ_LARGE_BUF 512
124 #define IAVF_VF_MAJOR 1
125 #define IAVF_VF_MINOR 1
126
127 #define IAVF_VFR_INPROGRESS 0
128 #define IAVF_VFR_COMPLETED 1
129 #define IAVF_VFR_VFACTIVE 2
130
131 #define IAVF_REG_VFR 0xdeadbeef
132
133 #define IAVF_ITR_RX 0x0
134 #define IAVF_ITR_TX 0x1
135 #define IAVF_ITR_MISC 0x2
136 #define IAVF_NOITR 0x3
137
138 #define IAVF_MTU_ETHERLEN (ETHER_HDR_LEN \
139 + ETHER_CRC_LEN)
140 #define IAVF_MAX_MTU (9600 - IAVF_MTU_ETHERLEN)
141 #define IAVF_MIN_MTU (ETHER_MIN_LEN - ETHER_CRC_LEN)
142
143 #define IAVF_WORKQUEUE_PRI PRI_SOFTNET
144
145 #define IAVF_TX_PKT_DESCS 8
146 #define IAVF_TX_QUEUE_ALIGN 128
147 #define IAVF_RX_QUEUE_ALIGN 128
148 #define IAVF_TX_PKT_MAXSIZE (MCLBYTES * IAVF_TX_PKT_DESCS)
149 #define IAVF_MCLBYTES (MCLBYTES - ETHER_ALIGN)
150
151 #define IAVF_TICK_INTERVAL (5 * hz)
152 #define IAVF_WATCHDOG_TICKS 3
153 #define IAVF_WATCHDOG_STOP 0
154
155 #define IAVF_TXRX_PROCESS_UNLIMIT UINT_MAX
156 #define IAVF_TX_PROCESS_LIMIT 256
157 #define IAVF_RX_PROCESS_LIMIT 256
158 #define IAVF_TX_INTR_PROCESS_LIMIT 256
159 #define IAVF_RX_INTR_PROCESS_LIMIT 0U
160
161 #define IAVF_EXEC_TIMEOUT 3000
162
163 #define IAVF_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx | \
164 IFCAP_CSUM_TCPv4_Rx | \
165 IFCAP_CSUM_UDPv4_Rx | \
166 IFCAP_CSUM_TCPv6_Rx | \
167 IFCAP_CSUM_UDPv6_Rx)
168 #define IAVF_IFCAP_TXCSUM (IFCAP_CSUM_IPv4_Tx | \
169 IFCAP_CSUM_TCPv4_Tx | \
170 IFCAP_CSUM_UDPv4_Tx | \
171 IFCAP_CSUM_TCPv6_Tx | \
172 IFCAP_CSUM_UDPv6_Tx)
173 #define IAVF_CSUM_ALL_OFFLOAD (M_CSUM_IPv4 | \
174 M_CSUM_TCPv4 | M_CSUM_TCPv6 | \
175 M_CSUM_UDPv4 | M_CSUM_UDPv6)
176
177 struct iavf_softc; /* defined */
178
179 struct iavf_module_params {
180 int debug;
181 uint32_t rx_itr;
182 uint32_t tx_itr;
183 unsigned int rx_ndescs;
184 unsigned int tx_ndescs;
185 int max_qps;
186 };
187
188 struct iavf_product {
189 unsigned int vendor_id;
190 unsigned int product_id;
191 };
192
193 struct iavf_link_speed {
194 uint64_t baudrate;
195 uint64_t media;
196 };
197
198 struct iavf_aq_regs {
199 bus_size_t atq_tail;
200 bus_size_t atq_head;
201 bus_size_t atq_len;
202 bus_size_t atq_bal;
203 bus_size_t atq_bah;
204
205 bus_size_t arq_tail;
206 bus_size_t arq_head;
207 bus_size_t arq_len;
208 bus_size_t arq_bal;
209 bus_size_t arq_bah;
210
211 uint32_t atq_len_enable;
212 uint32_t atq_tail_mask;
213 uint32_t atq_head_mask;
214
215 uint32_t arq_len_enable;
216 uint32_t arq_tail_mask;
217 uint32_t arq_head_mask;
218 };
219
220 struct iavf_work {
221 struct work ixw_cookie;
222 void (*ixw_func)(void *);
223 void *ixw_arg;
224 unsigned int ixw_added;
225 };
226
227 struct iavf_tx_map {
228 struct mbuf *txm_m;
229 bus_dmamap_t txm_map;
230 unsigned int txm_eop;
231 };
232
233 struct iavf_tx_ring {
234 unsigned int txr_qid;
235 char txr_name[16];
236
237 struct iavf_softc *txr_sc;
238 kmutex_t txr_lock;
239 pcq_t *txr_intrq;
240 void *txr_si;
241 unsigned int txr_prod;
242 unsigned int txr_cons;
243
244 struct iavf_tx_map *txr_maps;
245 struct ixl_dmamem txr_mem;
246 bus_size_t txr_tail;
247
248 int txr_watchdog;
249
250 struct evcnt txr_defragged;
251 struct evcnt txr_defrag_failed;
252 struct evcnt txr_pcqdrop;
253 struct evcnt txr_transmitdef;
254 struct evcnt txr_defer;
255 struct evcnt txr_watchdogto;
256 struct evcnt txr_intr;
257 };
258
259 struct iavf_rx_map {
260 struct mbuf *rxm_m;
261 bus_dmamap_t rxm_map;
262 };
263
264 struct iavf_rx_ring {
265 unsigned int rxr_qid;
266 char rxr_name[16];
267
268 struct iavf_softc *rxr_sc;
269 kmutex_t rxr_lock;
270
271 unsigned int rxr_prod;
272 unsigned int rxr_cons;
273
274 struct iavf_rx_map *rxr_maps;
275 struct ixl_dmamem rxr_mem;
276 bus_size_t rxr_tail;
277
278 struct mbuf *rxr_m_head;
279 struct mbuf **rxr_m_tail;
280
281 struct evcnt rxr_mgethdr_failed;
282 struct evcnt rxr_mgetcl_failed;
283 struct evcnt rxr_mbuf_load_failed;
284 struct evcnt rxr_defer;
285 struct evcnt rxr_intr;
286 };
287
288 struct iavf_queue_pair {
289 struct iavf_tx_ring *qp_txr;
290 struct iavf_rx_ring *qp_rxr;
291 struct work qp_work;
292 void *qp_si;
293 bool qp_workqueue;
294 };
295
296 struct iavf_stat_counters {
297 struct evcnt isc_rx_bytes;
298 struct evcnt isc_rx_unicast;
299 struct evcnt isc_rx_multicast;
300 struct evcnt isc_rx_broadcast;
301 struct evcnt isc_rx_discards;
302 struct evcnt isc_rx_unknown_protocol;
303 struct evcnt isc_tx_bytes;
304 struct evcnt isc_tx_unicast;
305 struct evcnt isc_tx_multicast;
306 struct evcnt isc_tx_broadcast;
307 struct evcnt isc_tx_discards;
308 struct evcnt isc_tx_errors;
309 };
310
311 /*
312 * Locking notes:
313 * + A field in iavf_tx_ring is protected by txr_lock (a spin mutex), and
314 * A field in iavf_rx_ring is protected by rxr_lock (a spin mutex).
315 * - more than one lock must not be held at once.
316 * + fields named sc_atq_*, sc_arq_*, and sc_adminq_* are protected by
317 * sc_adminq_lock(a spin mutex).
318 * - The lock is held while accessing sc_aq_regs
319 * and is not held with txr_lock and rxr_lock together.
320 * + Other fields in iavf_softc is protected by sc_cfg_lock
321 * (an adaptive mutex).
322 * - The lock must be held before acquiring another lock.
323 *
324 * Locking order:
325 * - IFNET_LOCK => sc_cfg_lock => sc_adminq_lock
326 * - sc_cfg_lock => ETHER_LOCK => sc_adminq_lock
327 * - sc_cfg_lock => txr_lock
328 * - sc_cfg_lock => rxr_lock
329 */
330
331 struct iavf_softc {
332 device_t sc_dev;
333 enum i40e_mac_type sc_mac_type;
334 int sc_debuglevel;
335 bool sc_attached;
336 bool sc_dead;
337 kmutex_t sc_cfg_lock;
338 callout_t sc_tick;
339 struct ifmedia sc_media;
340 uint64_t sc_media_status;
341 uint64_t sc_media_active;
342 int sc_link_state;
343
344 const struct iavf_aq_regs *
345 sc_aq_regs;
346
347 struct ethercom sc_ec;
348 uint8_t sc_enaddr[ETHER_ADDR_LEN];
349 uint8_t sc_enaddr_fake[ETHER_ADDR_LEN];
350 uint8_t sc_enaddr_added[ETHER_ADDR_LEN];
351 uint8_t sc_enaddr_reset[ETHER_ADDR_LEN];
352 struct if_percpuq *sc_ipq;
353
354 struct pci_attach_args sc_pa;
355 bus_dma_tag_t sc_dmat;
356 bus_space_tag_t sc_memt;
357 bus_space_handle_t sc_memh;
358 bus_size_t sc_mems;
359 pci_intr_handle_t *sc_ihp;
360 void **sc_ihs;
361 unsigned int sc_nintrs;
362
363 uint32_t sc_major_ver;
364 uint32_t sc_minor_ver;
365 uint32_t sc_vf_id;
366 uint32_t sc_vf_cap;
367 uint16_t sc_vsi_id;
368 uint16_t sc_qset_handle;
369 uint16_t sc_max_mtu;
370 bool sc_got_vf_resources;
371 bool sc_got_irq_map;
372 unsigned int sc_max_vectors;
373
374 kmutex_t sc_adminq_lock;
375 kcondvar_t sc_adminq_cv;
376 struct ixl_dmamem sc_atq;
377 unsigned int sc_atq_prod;
378 unsigned int sc_atq_cons;
379 struct ixl_aq_bufs sc_atq_idle;
380 struct ixl_aq_bufs sc_atq_live;
381 struct ixl_dmamem sc_arq;
382 struct ixl_aq_bufs sc_arq_idle;
383 struct ixl_aq_bufs sc_arq_live;
384 unsigned int sc_arq_prod;
385 unsigned int sc_arq_cons;
386 struct iavf_work sc_arq_refill;
387 uint32_t sc_arq_opcode;
388 uint32_t sc_arq_retval;
389
390 uint32_t sc_tx_itr;
391 uint32_t sc_rx_itr;
392 unsigned int sc_tx_ring_ndescs;
393 unsigned int sc_rx_ring_ndescs;
394 unsigned int sc_nqueue_pairs;
395 unsigned int sc_nqps_alloc;
396 unsigned int sc_nqps_vsi;
397 unsigned int sc_nqps_req;
398 struct iavf_queue_pair *sc_qps;
399 bool sc_txrx_workqueue;
400 u_int sc_tx_intr_process_limit;
401 u_int sc_tx_process_limit;
402 u_int sc_rx_intr_process_limit;
403 u_int sc_rx_process_limit;
404
405 struct workqueue *sc_workq;
406 struct workqueue *sc_workq_txrx;
407 struct iavf_work sc_reset_task;
408 struct iavf_work sc_wdto_task;
409 struct iavf_work sc_req_queues_task;
410 bool sc_req_queues_retried;
411 bool sc_resetting;
412 bool sc_reset_up;
413
414 struct sysctllog *sc_sysctllog;
415 struct iavf_stat_counters
416 sc_stat_counters;
417 };
418
419 #define IAVF_LOG(_sc, _lvl, _fmt, _args...) \
420 do { \
421 if (!(_sc)->sc_attached) { \
422 switch (_lvl) { \
423 case LOG_ERR: \
424 case LOG_WARNING: \
425 aprint_error_dev((_sc)->sc_dev, _fmt, ##_args); \
426 break; \
427 case LOG_INFO: \
428 aprint_normal_dev((_sc)->sc_dev,_fmt, ##_args); \
429 break; \
430 case LOG_DEBUG: \
431 default: \
432 aprint_debug_dev((_sc)->sc_dev, _fmt, ##_args); \
433 } \
434 } else { \
435 struct ifnet *_ifp = &(_sc)->sc_ec.ec_if; \
436 log((_lvl), "%s: " _fmt, _ifp->if_xname, ##_args); \
437 } \
438 } while (0)
439
440 static int iavf_dmamem_alloc(bus_dma_tag_t, struct ixl_dmamem *,
441 bus_size_t, bus_size_t);
442 static void iavf_dmamem_free(bus_dma_tag_t, struct ixl_dmamem *);
443 static struct ixl_aq_buf *
444 iavf_aqb_get(struct iavf_softc *, struct ixl_aq_bufs *);
445 static struct ixl_aq_buf *
446 iavf_aqb_get_locked(struct ixl_aq_bufs *);
447 static void iavf_aqb_put_locked(struct ixl_aq_bufs *, struct ixl_aq_buf *);
448 static void iavf_aqb_clean(struct ixl_aq_bufs *, bus_dma_tag_t);
449
450 static const struct iavf_product *
451 iavf_lookup(const struct pci_attach_args *);
452 static enum i40e_mac_type
453 iavf_mactype(pci_product_id_t);
454 static void iavf_pci_csr_setup(pci_chipset_tag_t, pcitag_t);
455 static int iavf_wait_active(struct iavf_softc *);
456 static bool iavf_is_etheranyaddr(const uint8_t *);
457 static void iavf_prepare_fakeaddr(struct iavf_softc *);
458 static int iavf_replace_lla(struct ifnet *,
459 const uint8_t *, const uint8_t *);
460 static void iavf_evcnt_attach(struct evcnt *,
461 const char *, const char *);
462 static int iavf_setup_interrupts(struct iavf_softc *);
463 static void iavf_teardown_interrupts(struct iavf_softc *);
464 static int iavf_setup_sysctls(struct iavf_softc *);
465 static void iavf_teardown_sysctls(struct iavf_softc *);
466 static int iavf_setup_stats(struct iavf_softc *);
467 static void iavf_teardown_stats(struct iavf_softc *);
468 static struct workqueue *
469 iavf_workq_create(const char *, pri_t, int, int);
470 static void iavf_workq_destroy(struct workqueue *);
471 static int iavf_work_set(struct iavf_work *, void (*)(void *), void *);
472 static void iavf_work_add(struct workqueue *, struct iavf_work *);
473 static void iavf_work_wait(struct workqueue *, struct iavf_work *);
474 static unsigned int
475 iavf_calc_msix_count(struct iavf_softc *);
476 static unsigned int
477 iavf_calc_queue_pair_size(struct iavf_softc *);
478 static int iavf_queue_pairs_alloc(struct iavf_softc *);
479 static void iavf_queue_pairs_free(struct iavf_softc *);
480 static int iavf_arq_fill(struct iavf_softc *);
481 static void iavf_arq_refill(void *);
482 static int iavf_arq_poll(struct iavf_softc *, uint32_t, int);
483 static void iavf_atq_done(struct iavf_softc *);
484 static int iavf_init_admin_queue(struct iavf_softc *);
485 static void iavf_cleanup_admin_queue(struct iavf_softc *);
486 static int iavf_arq(struct iavf_softc *);
487 static int iavf_adminq_exec(struct iavf_softc *,
488 struct ixl_aq_desc *, struct ixl_aq_buf *);
489 static int iavf_adminq_poll(struct iavf_softc *,
490 struct ixl_aq_desc *, struct ixl_aq_buf *, int);
491 static int iavf_adminq_poll_locked(struct iavf_softc *,
492 struct ixl_aq_desc *, struct ixl_aq_buf *, int);
493 static int iavf_add_multi(struct iavf_softc *, uint8_t *, uint8_t *);
494 static int iavf_del_multi(struct iavf_softc *, uint8_t *, uint8_t *);
495 static void iavf_del_all_multi(struct iavf_softc *);
496
497 static int iavf_get_version(struct iavf_softc *, struct ixl_aq_buf *);
498 static int iavf_get_vf_resources(struct iavf_softc *, struct ixl_aq_buf *);
499 static int iavf_get_stats(struct iavf_softc *);
500 static int iavf_config_irq_map(struct iavf_softc *, struct ixl_aq_buf *);
501 static int iavf_config_vsi_queues(struct iavf_softc *);
502 static int iavf_config_hena(struct iavf_softc *);
503 static int iavf_config_rss_key(struct iavf_softc *);
504 static int iavf_config_rss_lut(struct iavf_softc *);
505 static int iavf_config_promisc_mode(struct iavf_softc *, int, int);
506 static int iavf_config_vlan_stripping(struct iavf_softc *, int);
507 static int iavf_config_vlan_id(struct iavf_softc *, uint16_t, uint32_t);
508 static int iavf_queue_select(struct iavf_softc *, int);
509 static int iavf_request_queues(struct iavf_softc *, unsigned int);
510 static int iavf_reset_vf(struct iavf_softc *);
511 static int iavf_eth_addr(struct iavf_softc *, const uint8_t *, uint32_t);
512 static void iavf_process_version(struct iavf_softc *,
513 struct ixl_aq_desc *, struct ixl_aq_buf *);
514 static void iavf_process_vf_resources(struct iavf_softc *,
515 struct ixl_aq_desc *, struct ixl_aq_buf *);
516 static void iavf_process_irq_map(struct iavf_softc *,
517 struct ixl_aq_desc *);
518 static void iavf_process_vc_event(struct iavf_softc *,
519 struct ixl_aq_desc *, struct ixl_aq_buf *);
520 static void iavf_process_stats(struct iavf_softc *,
521 struct ixl_aq_desc *, struct ixl_aq_buf *);
522 static void iavf_process_req_queues(struct iavf_softc *,
523 struct ixl_aq_desc *, struct ixl_aq_buf *);
524
525 static int iavf_intr(void *);
526 static int iavf_queue_intr(void *);
527 static void iavf_tick(void *);
528 static void iavf_tick_halt(void *);
529 static void iavf_reset_request(void *);
530 static void iavf_reset_start(void *);
531 static void iavf_reset(void *);
532 static void iavf_reset_finish(struct iavf_softc *);
533 static int iavf_init(struct ifnet *);
534 static int iavf_init_locked(struct iavf_softc *);
535 static void iavf_stop(struct ifnet *, int);
536 static void iavf_stop_locked(struct iavf_softc *);
537 static int iavf_ioctl(struct ifnet *, u_long, void *);
538 static void iavf_start(struct ifnet *);
539 static int iavf_transmit(struct ifnet *, struct mbuf*);
540 static int iavf_watchdog(struct iavf_tx_ring *);
541 static void iavf_watchdog_timeout(void *);
542 static int iavf_media_change(struct ifnet *);
543 static void iavf_media_status(struct ifnet *, struct ifmediareq *);
544 static int iavf_ifflags_cb(struct ethercom *);
545 static int iavf_vlan_cb(struct ethercom *, uint16_t, bool);
546 static void iavf_deferred_transmit(void *);
547 static void iavf_handle_queue(void *);
548 static void iavf_handle_queue_wk(struct work *, void *);
549 static int iavf_reinit(struct iavf_softc *);
550 static int iavf_rxfill(struct iavf_softc *, struct iavf_rx_ring *);
551 static void iavf_txr_clean(struct iavf_softc *, struct iavf_tx_ring *);
552 static void iavf_rxr_clean(struct iavf_softc *, struct iavf_rx_ring *);
553 static int iavf_txeof(struct iavf_softc *, struct iavf_tx_ring *,
554 u_int, struct evcnt *);
555 static int iavf_rxeof(struct iavf_softc *, struct iavf_rx_ring *,
556 u_int, struct evcnt *);
557 static int iavf_iff(struct iavf_softc *);
558 static int iavf_iff_locked(struct iavf_softc *);
559 static void iavf_post_request_queues(void *);
560 static int iavf_sysctl_itr_handler(SYSCTLFN_PROTO);
561
562 static int iavf_match(device_t, cfdata_t, void *);
563 static void iavf_attach(device_t, device_t, void*);
564 static int iavf_detach(device_t, int);
565 static int iavf_finalize_teardown(device_t);
566
567 CFATTACH_DECL3_NEW(iavf, sizeof(struct iavf_softc),
568 iavf_match, iavf_attach, iavf_detach, NULL, NULL, NULL,
569 DVF_DETACH_SHUTDOWN);
570
571 static const struct iavf_product iavf_products[] = {
572 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_VF },
573 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_VF_HV },
574 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_VF },
575 /* required last entry */
576 {0, 0}
577 };
578
579 static const struct iavf_link_speed iavf_link_speeds[] = {
580 { 0, 0 },
581 { IF_Mbps(100), IFM_100_TX },
582 { IF_Mbps(1000), IFM_1000_T },
583 { IF_Gbps(10), IFM_10G_T },
584 { IF_Gbps(40), IFM_40G_CR4 },
585 { IF_Gbps(20), IFM_20G_KR2 },
586 { IF_Gbps(25), IFM_25G_CR }
587 };
588
589 static const struct iavf_aq_regs iavf_aq_regs = {
590 .atq_tail = I40E_VF_ATQT1,
591 .atq_tail_mask = I40E_VF_ATQT1_ATQT_MASK,
592 .atq_head = I40E_VF_ATQH1,
593 .atq_head_mask = I40E_VF_ARQH1_ARQH_MASK,
594 .atq_len = I40E_VF_ATQLEN1,
595 .atq_bal = I40E_VF_ATQBAL1,
596 .atq_bah = I40E_VF_ATQBAH1,
597 .atq_len_enable = I40E_VF_ATQLEN1_ATQENABLE_MASK,
598
599 .arq_tail = I40E_VF_ARQT1,
600 .arq_tail_mask = I40E_VF_ARQT1_ARQT_MASK,
601 .arq_head = I40E_VF_ARQH1,
602 .arq_head_mask = I40E_VF_ARQH1_ARQH_MASK,
603 .arq_len = I40E_VF_ARQLEN1,
604 .arq_bal = I40E_VF_ARQBAL1,
605 .arq_bah = I40E_VF_ARQBAH1,
606 .arq_len_enable = I40E_VF_ARQLEN1_ARQENABLE_MASK,
607 };
608
609 static struct iavf_module_params iavf_params = {
610 .debug = 0,
611 .rx_itr = 0x07a, /* 4K intrs/sec */
612 .tx_itr = 0x07a, /* 4K intrs/sec */
613 .tx_ndescs = 512,
614 .rx_ndescs = 256,
615 .max_qps = INT_MAX,
616 };
617
618 #define delaymsec(_x) DELAY(1000 * (_x))
619 #define iavf_rd(_s, _r) \
620 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
621 #define iavf_wr(_s, _r, _v) \
622 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
623 #define iavf_barrier(_s, _r, _l, _o) \
624 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
625 #define iavf_flush(_s) (void)iavf_rd((_s), I40E_VFGEN_RSTAT)
626 #define iavf_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1))
627 #define iavf_allqueues(_sc) ((1 << ((_sc)->sc_nqueue_pairs)) - 1)
628
629 static inline void
630 iavf_intr_barrier(void)
631 {
632
633 /* make all interrupt handler finished */
634 xc_barrier(0);
635 }
636 static inline void
637 iavf_intr_enable(struct iavf_softc *sc)
638 {
639
640 iavf_wr(sc, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL0_INTENA_MASK |
641 I40E_VFINT_DYN_CTL0_CLEARPBA_MASK |
642 (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT));
643 iavf_wr(sc, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
644 iavf_flush(sc);
645 }
646
647 static inline void
648 iavf_intr_disable(struct iavf_softc *sc)
649 {
650
651 iavf_wr(sc, I40E_VFINT_DYN_CTL01,
652 (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT));
653 iavf_wr(sc, I40E_VFINT_ICR0_ENA1, 0);
654 iavf_flush(sc);
655 }
656
657 static inline void
658 iavf_queue_intr_enable(struct iavf_softc *sc, unsigned int qid)
659 {
660
661 iavf_wr(sc, I40E_VFINT_DYN_CTLN1(qid),
662 I40E_VFINT_DYN_CTLN1_INTENA_MASK |
663 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
664 (IAVF_NOITR << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT));
665 iavf_flush(sc);
666 }
667
668 static inline void
669 iavf_queue_intr_disable(struct iavf_softc *sc, unsigned int qid)
670 {
671
672 iavf_wr(sc, I40E_VFINT_DYN_CTLN1(qid),
673 (IAVF_NOITR << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT));
674 iavf_flush(sc);
675 }
676
677 static inline void
678 iavf_aq_vc_set_opcode(struct ixl_aq_desc *iaq, uint32_t opcode)
679 {
680 struct iavf_aq_vc *vc;
681
682 vc = (struct iavf_aq_vc *)&iaq->iaq_cookie;
683 vc->iaq_vc_opcode = htole32(opcode);
684 }
685
686 static inline uint32_t
687 iavf_aq_vc_get_opcode(const struct ixl_aq_desc *iaq)
688 {
689 const struct iavf_aq_vc *vc;
690
691 vc = (const struct iavf_aq_vc *)&iaq->iaq_cookie;
692 return le32toh(vc->iaq_vc_opcode);
693 }
694
695 static inline uint32_t
696 iavf_aq_vc_get_retval(const struct ixl_aq_desc *iaq)
697 {
698 const struct iavf_aq_vc *vc;
699
700 vc = (const struct iavf_aq_vc *)&iaq->iaq_cookie;
701 return le32toh(vc->iaq_vc_retval);
702 }
703
704 static int
705 iavf_match(device_t parent, cfdata_t match, void *aux)
706 {
707 const struct pci_attach_args *pa = aux;
708
709 return (iavf_lookup(pa) != NULL) ? 1 : 0;
710 }
711
712 static void
713 iavf_attach(device_t parent, device_t self, void *aux)
714 {
715 struct iavf_softc *sc;
716 struct pci_attach_args *pa = aux;
717 struct ifnet *ifp;
718 struct ixl_aq_buf *aqb;
719 pcireg_t memtype;
720 char xnamebuf[MAXCOMLEN];
721 int error, i;
722
723 sc = device_private(self);
724 sc->sc_dev = self;
725 ifp = &sc->sc_ec.ec_if;
726
727 sc->sc_pa = *pa;
728 sc->sc_dmat = (pci_dma64_available(pa)) ? pa->pa_dmat64 : pa->pa_dmat;
729 sc->sc_aq_regs = &iavf_aq_regs;
730 sc->sc_debuglevel = iavf_params.debug;
731 sc->sc_tx_ring_ndescs = iavf_params.tx_ndescs;
732 sc->sc_rx_ring_ndescs = iavf_params.rx_ndescs;
733 sc->sc_tx_itr = iavf_params.tx_itr;
734 sc->sc_rx_itr = iavf_params.rx_itr;
735 sc->sc_nqps_req = MIN(ncpu, iavf_params.max_qps);
736 iavf_prepare_fakeaddr(sc);
737
738 sc->sc_mac_type = iavf_mactype(PCI_PRODUCT(pa->pa_id));
739 iavf_pci_csr_setup(pa->pa_pc, pa->pa_tag);
740
741 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IAVF_PCIREG);
742 if (pci_mapreg_map(pa, IAVF_PCIREG, memtype, 0,
743 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) {
744 aprint_error(": unable to map registers\n");
745 return;
746 }
747
748 if (iavf_wait_active(sc) != 0) {
749 aprint_error(": VF reset timed out\n");
750 goto unmap;
751 }
752
753 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET);
754 mutex_init(&sc->sc_adminq_lock, MUTEX_DEFAULT, IPL_NET);
755 SIMPLEQ_INIT(&sc->sc_atq_idle);
756 SIMPLEQ_INIT(&sc->sc_atq_live);
757 SIMPLEQ_INIT(&sc->sc_arq_idle);
758 SIMPLEQ_INIT(&sc->sc_arq_live);
759 sc->sc_arq_cons = 0;
760 sc->sc_arq_prod = 0;
761 aqb = NULL;
762
763 if (iavf_dmamem_alloc(sc->sc_dmat, &sc->sc_atq,
764 sizeof(struct ixl_aq_desc) * IAVF_AQ_NUM, IAVF_AQ_ALIGN) != 0) {
765 aprint_error(": unable to allocate atq\n");
766 goto free_mutex;
767 }
768
769 if (iavf_dmamem_alloc(sc->sc_dmat, &sc->sc_arq,
770 sizeof(struct ixl_aq_desc) * IAVF_AQ_NUM, IAVF_AQ_ALIGN) != 0) {
771 aprint_error(": unable to allocate arq\n");
772 goto free_atq;
773 }
774
775 for (i = 0; i < IAVF_AQ_NUM; i++) {
776 aqb = iavf_aqb_get(sc, NULL);
777 if (aqb != NULL) {
778 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
779 }
780 }
781 aqb = NULL;
782
783 if (!iavf_arq_fill(sc)) {
784 aprint_error(": unable to fill arq descriptors\n");
785 goto free_arq;
786 }
787
788 if (iavf_init_admin_queue(sc) != 0) {
789 aprint_error(": unable to initialize admin queue\n");
790 goto shutdown;
791 }
792
793 aqb = iavf_aqb_get(sc, NULL);
794 if (aqb == NULL) {
795 aprint_error(": unable to allocate buffer for ATQ\n");
796 goto shutdown;
797 }
798
799 error = iavf_get_version(sc, aqb);
800 switch (error) {
801 case 0:
802 break;
803 case ETIMEDOUT:
804 aprint_error(": timeout waiting for VF version\n");
805 goto shutdown;
806 case ENOTSUP:
807 aprint_error(": unsupported VF version %d\n", sc->sc_major_ver);
808 goto shutdown;
809 default:
810 aprint_error(":unable to get VF interface version\n");
811 goto shutdown;
812 }
813
814 if (iavf_get_vf_resources(sc, aqb) != 0) {
815 aprint_error(": timeout waiting for VF resources\n");
816 goto shutdown;
817 }
818
819 aprint_normal(", VF version %d.%d%s",
820 sc->sc_major_ver, sc->sc_minor_ver,
821 (sc->sc_minor_ver > IAVF_VF_MINOR) ? "(minor mismatch)" : "");
822 aprint_normal(", VF %d, VSI %d", sc->sc_vf_id, sc->sc_vsi_id);
823 aprint_normal("\n");
824 aprint_naive("\n");
825
826 aprint_normal_dev(self, "Ethernet address %s\n",
827 ether_sprintf(sc->sc_enaddr));
828
829 if (iavf_queue_pairs_alloc(sc) != 0) {
830 goto shutdown;
831 }
832
833 if (iavf_setup_interrupts(sc) != 0) {
834 goto free_queue_pairs;
835 }
836
837 if (iavf_config_irq_map(sc, aqb) != 0) {
838 aprint_error(", timed out waiting for IRQ map response\n");
839 goto teardown_intrs;
840 }
841
842 if (iavf_setup_sysctls(sc) != 0) {
843 goto teardown_intrs;
844 }
845
846 if (iavf_setup_stats(sc) != 0) {
847 goto teardown_sysctls;
848 }
849
850 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
851 aqb = NULL;
852
853 snprintf(xnamebuf, sizeof(xnamebuf),
854 "%s_adminq_cv", device_xname(self));
855 cv_init(&sc->sc_adminq_cv, xnamebuf);
856
857 callout_init(&sc->sc_tick, CALLOUT_MPSAFE);
858 callout_setfunc(&sc->sc_tick, iavf_tick, sc);
859
860 iavf_work_set(&sc->sc_reset_task, iavf_reset_start, sc);
861 iavf_work_set(&sc->sc_arq_refill, iavf_arq_refill, sc);
862 iavf_work_set(&sc->sc_wdto_task, iavf_watchdog_timeout, sc);
863 iavf_work_set(&sc->sc_req_queues_task, iavf_post_request_queues, sc);
864 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self));
865 sc->sc_workq = iavf_workq_create(xnamebuf, IAVF_WORKQUEUE_PRI,
866 IPL_NET, WQ_MPSAFE);
867 if (sc->sc_workq == NULL)
868 goto destroy_cv;
869
870 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self));
871 error = workqueue_create(&sc->sc_workq_txrx, xnamebuf,
872 iavf_handle_queue_wk, sc, IAVF_WORKQUEUE_PRI, IPL_NET,
873 WQ_PERCPU|WQ_MPSAFE);
874 if (error != 0) {
875 sc->sc_workq_txrx = NULL;
876 goto teardown_wqs;
877 }
878
879 error = if_initialize(ifp);
880 if (error != 0) {
881 aprint_error_dev(self, "if_initialize failed=%d\n", error);
882 goto teardown_wqs;
883 }
884
885 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
886
887 ifp->if_softc = sc;
888 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
889 ifp->if_extflags = IFEF_MPSAFE;
890 ifp->if_ioctl = iavf_ioctl;
891 ifp->if_start = iavf_start;
892 ifp->if_transmit = iavf_transmit;
893 ifp->if_watchdog = NULL;
894 ifp->if_init = iavf_init;
895 ifp->if_stop = iavf_stop;
896
897 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs);
898 IFQ_SET_READY(&ifp->if_snd);
899 sc->sc_ipq = if_percpuq_create(ifp);
900
901 ifp->if_capabilities |= IAVF_IFCAP_RXCSUM;
902 ifp->if_capabilities |= IAVF_IFCAP_TXCSUM;
903
904 ether_set_vlan_cb(&sc->sc_ec, iavf_vlan_cb);
905 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
906 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
907 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities;
908
909 ether_set_ifflags_cb(&sc->sc_ec, iavf_ifflags_cb);
910
911 sc->sc_ec.ec_ifmedia = &sc->sc_media;
912 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, iavf_media_change,
913 iavf_media_status, &sc->sc_cfg_lock);
914
915 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
916 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
917
918 if_deferred_start_init(ifp, NULL);
919 ether_ifattach(ifp, sc->sc_enaddr);
920
921 sc->sc_txrx_workqueue = true;
922 sc->sc_tx_process_limit = IAVF_TX_PROCESS_LIMIT;
923 sc->sc_rx_process_limit = IAVF_RX_PROCESS_LIMIT;
924 sc->sc_tx_intr_process_limit = IAVF_TX_INTR_PROCESS_LIMIT;
925 sc->sc_rx_intr_process_limit = IAVF_RX_INTR_PROCESS_LIMIT;
926
927 if_register(ifp);
928 if_link_state_change(ifp, sc->sc_link_state);
929 iavf_intr_enable(sc);
930 if (sc->sc_nqps_vsi < sc->sc_nqps_req)
931 iavf_work_add(sc->sc_workq, &sc->sc_req_queues_task);
932 sc->sc_attached = true;
933 return;
934
935 teardown_wqs:
936 config_finalize_register(self, iavf_finalize_teardown);
937 destroy_cv:
938 cv_destroy(&sc->sc_adminq_cv);
939 callout_destroy(&sc->sc_tick);
940 iavf_teardown_stats(sc);
941 teardown_sysctls:
942 iavf_teardown_sysctls(sc);
943 teardown_intrs:
944 iavf_teardown_interrupts(sc);
945 free_queue_pairs:
946 iavf_queue_pairs_free(sc);
947 shutdown:
948 if (aqb != NULL)
949 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
950 iavf_cleanup_admin_queue(sc);
951 iavf_aqb_clean(&sc->sc_atq_idle, sc->sc_dmat);
952 iavf_aqb_clean(&sc->sc_arq_idle, sc->sc_dmat);
953 free_arq:
954 iavf_dmamem_free(sc->sc_dmat, &sc->sc_arq);
955 free_atq:
956 iavf_dmamem_free(sc->sc_dmat, &sc->sc_atq);
957 free_mutex:
958 mutex_destroy(&sc->sc_cfg_lock);
959 mutex_destroy(&sc->sc_adminq_lock);
960 unmap:
961 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
962 sc->sc_mems = 0;
963 sc->sc_attached = false;
964 }
965
966 static int
967 iavf_detach(device_t self, int flags)
968 {
969 struct iavf_softc *sc = device_private(self);
970 struct ifnet *ifp = &sc->sc_ec.ec_if;
971
972 if (!sc->sc_attached)
973 return 0;
974
975 iavf_stop(ifp, 1);
976
977 /*
978 * set a dummy function to halt callout safely
979 * even if a workqueue entry calls callout_schedule()
980 */
981 callout_setfunc(&sc->sc_tick, iavf_tick_halt, sc);
982 iavf_work_wait(sc->sc_workq, &sc->sc_reset_task);
983 iavf_work_wait(sc->sc_workq, &sc->sc_wdto_task);
984
985 callout_halt(&sc->sc_tick, NULL);
986 callout_destroy(&sc->sc_tick);
987
988 /* detach the I/F before stop adminq due to callbacks */
989 ether_ifdetach(ifp);
990 if_detach(ifp);
991 ifmedia_fini(&sc->sc_media);
992 if_percpuq_destroy(sc->sc_ipq);
993
994 iavf_intr_disable(sc);
995 iavf_intr_barrier();
996 iavf_work_wait(sc->sc_workq, &sc->sc_arq_refill);
997
998 mutex_enter(&sc->sc_adminq_lock);
999 iavf_cleanup_admin_queue(sc);
1000 mutex_exit(&sc->sc_adminq_lock);
1001 iavf_aqb_clean(&sc->sc_atq_idle, sc->sc_dmat);
1002 iavf_aqb_clean(&sc->sc_arq_idle, sc->sc_dmat);
1003 iavf_dmamem_free(sc->sc_dmat, &sc->sc_arq);
1004 iavf_dmamem_free(sc->sc_dmat, &sc->sc_atq);
1005 cv_destroy(&sc->sc_adminq_cv);
1006
1007 iavf_workq_destroy(sc->sc_workq);
1008 sc->sc_workq = NULL;
1009
1010 iavf_queue_pairs_free(sc);
1011 iavf_teardown_interrupts(sc);
1012 iavf_teardown_sysctls(sc);
1013 iavf_teardown_stats(sc);
1014 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1015
1016 mutex_destroy(&sc->sc_adminq_lock);
1017 mutex_destroy(&sc->sc_cfg_lock);
1018
1019 return 0;
1020 }
1021
1022 static int
1023 iavf_finalize_teardown(device_t self)
1024 {
1025 struct iavf_softc *sc = device_private(self);
1026
1027 if (sc->sc_workq != NULL) {
1028 iavf_workq_destroy(sc->sc_workq);
1029 sc->sc_workq = NULL;
1030 }
1031
1032 if (sc->sc_workq_txrx != NULL) {
1033 workqueue_destroy(sc->sc_workq_txrx);
1034 sc->sc_workq_txrx = NULL;
1035 }
1036
1037 return 0;
1038 }
1039
1040 static int
1041 iavf_init(struct ifnet *ifp)
1042 {
1043 struct iavf_softc *sc;
1044 int rv;
1045
1046 sc = ifp->if_softc;
1047 mutex_enter(&sc->sc_cfg_lock);
1048 rv = iavf_init_locked(sc);
1049 mutex_exit(&sc->sc_cfg_lock);
1050
1051 return rv;
1052 }
1053
1054 static int
1055 iavf_init_locked(struct iavf_softc *sc)
1056 {
1057 struct ifnet *ifp = &sc->sc_ec.ec_if;
1058 unsigned int i;
1059 int error;
1060
1061 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1062
1063 if (ISSET(ifp->if_flags, IFF_RUNNING))
1064 iavf_stop_locked(sc);
1065
1066 if (sc->sc_resetting)
1067 return ENXIO;
1068
1069 error = iavf_reinit(sc);
1070 if (error) {
1071 iavf_stop_locked(sc);
1072 return error;
1073 }
1074
1075 SET(ifp->if_flags, IFF_RUNNING);
1076 CLR(ifp->if_flags, IFF_OACTIVE);
1077
1078 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1079 iavf_wr(sc, I40E_VFINT_ITRN1(IAVF_ITR_RX, i), sc->sc_rx_itr);
1080 iavf_wr(sc, I40E_VFINT_ITRN1(IAVF_ITR_TX, i), sc->sc_tx_itr);
1081 }
1082 iavf_wr(sc, I40E_VFINT_ITR01(IAVF_ITR_RX), sc->sc_rx_itr);
1083 iavf_wr(sc, I40E_VFINT_ITR01(IAVF_ITR_TX), sc->sc_tx_itr);
1084 iavf_wr(sc, I40E_VFINT_ITR01(IAVF_ITR_MISC), 0);
1085
1086 error = iavf_iff_locked(sc);
1087 if (error) {
1088 iavf_stop_locked(sc);
1089 return error;
1090 };
1091
1092 /* ETHERCAP_VLAN_HWFILTER can not be disabled */
1093 SET(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1094
1095 callout_schedule(&sc->sc_tick, IAVF_TICK_INTERVAL);
1096 return 0;
1097 }
1098
1099 static int
1100 iavf_reinit(struct iavf_softc *sc)
1101 {
1102 struct iavf_rx_ring *rxr;
1103 struct iavf_tx_ring *txr;
1104 unsigned int i;
1105 uint32_t reg;
1106
1107 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1108
1109 sc->sc_reset_up = true;
1110 sc->sc_nqueue_pairs = MIN(sc->sc_nqps_alloc, sc->sc_nintrs - 1);
1111
1112 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1113 rxr = sc->sc_qps[i].qp_rxr;
1114 txr = sc->sc_qps[i].qp_txr;
1115
1116 iavf_rxfill(sc, rxr);
1117 txr->txr_watchdog = IAVF_WATCHDOG_STOP;
1118 }
1119
1120 if (iavf_config_vsi_queues(sc) != 0)
1121 return EIO;
1122
1123 if (iavf_config_hena(sc) != 0)
1124 return EIO;
1125
1126 iavf_config_rss_key(sc);
1127 iavf_config_rss_lut(sc);
1128
1129 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1130 iavf_queue_intr_enable(sc, i);
1131 }
1132 /* unmask */
1133 reg = iavf_rd(sc, I40E_VFINT_DYN_CTL01);
1134 reg |= (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT);
1135 iavf_wr(sc, I40E_VFINT_DYN_CTL01, reg);
1136
1137 if (iavf_queue_select(sc, IAVF_VC_OP_ENABLE_QUEUES) != 0)
1138 return EIO;
1139
1140 return 0;
1141 }
1142
1143 static void
1144 iavf_stop(struct ifnet *ifp, int disable)
1145 {
1146 struct iavf_softc *sc;
1147
1148 sc = ifp->if_softc;
1149 mutex_enter(&sc->sc_cfg_lock);
1150 iavf_stop_locked(sc);
1151 mutex_exit(&sc->sc_cfg_lock);
1152 }
1153
1154 static void
1155 iavf_stop_locked(struct iavf_softc *sc)
1156 {
1157 struct ifnet *ifp = &sc->sc_ec.ec_if;
1158 struct iavf_rx_ring *rxr;
1159 struct iavf_tx_ring *txr;
1160 uint32_t reg;
1161 unsigned int i;
1162
1163 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1164
1165 CLR(ifp->if_flags, IFF_RUNNING);
1166 sc->sc_reset_up = false;
1167 callout_stop(&sc->sc_tick);
1168
1169 if (!sc->sc_resetting) {
1170 /* disable queues*/
1171 if (iavf_queue_select(sc, IAVF_VC_OP_DISABLE_QUEUES) != 0) {
1172 goto die;
1173 }
1174 }
1175
1176 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1177 iavf_queue_intr_disable(sc, i);
1178 }
1179
1180 /* mask interrupts */
1181 reg = iavf_rd(sc, I40E_VFINT_DYN_CTL01);
1182 reg |= I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK |
1183 (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT);
1184 iavf_wr(sc, I40E_VFINT_DYN_CTL01, reg);
1185
1186 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1187 rxr = sc->sc_qps[i].qp_rxr;
1188 txr = sc->sc_qps[i].qp_txr;
1189
1190 mutex_enter(&rxr->rxr_lock);
1191 iavf_rxr_clean(sc, rxr);
1192 mutex_exit(&rxr->rxr_lock);
1193
1194 mutex_enter(&txr->txr_lock);
1195 iavf_txr_clean(sc, txr);
1196 mutex_exit(&txr->txr_lock);
1197
1198 workqueue_wait(sc->sc_workq_txrx,
1199 &sc->sc_qps[i].qp_work);
1200 }
1201
1202 return;
1203 die:
1204 if (!sc->sc_dead) {
1205 sc->sc_dead = true;
1206 log(LOG_INFO, "%s: Request VF reset\n", ifp->if_xname);
1207
1208 iavf_work_set(&sc->sc_reset_task, iavf_reset_request, sc);
1209 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
1210 }
1211 log(LOG_CRIT, "%s: failed to shut down rings\n", ifp->if_xname);
1212 }
1213
1214 static int
1215 iavf_watchdog(struct iavf_tx_ring *txr)
1216 {
1217 struct iavf_softc *sc;
1218
1219 sc = txr->txr_sc;
1220
1221 mutex_enter(&txr->txr_lock);
1222
1223 if (txr->txr_watchdog == IAVF_WATCHDOG_STOP
1224 || --txr->txr_watchdog > 0) {
1225 mutex_exit(&txr->txr_lock);
1226 return 0;
1227 }
1228
1229 txr->txr_watchdog = IAVF_WATCHDOG_STOP;
1230 txr->txr_watchdogto.ev_count++;
1231 mutex_exit(&txr->txr_lock);
1232
1233 device_printf(sc->sc_dev, "watchdog timeout on queue %d\n",
1234 txr->txr_qid);
1235 return 1;
1236 }
1237
1238 static void
1239 iavf_watchdog_timeout(void *xsc)
1240 {
1241 struct iavf_softc *sc;
1242 struct ifnet *ifp;
1243
1244 sc = xsc;
1245 ifp = &sc->sc_ec.ec_if;
1246
1247 mutex_enter(&sc->sc_cfg_lock);
1248 if (ISSET(ifp->if_flags, IFF_RUNNING))
1249 iavf_init_locked(sc);
1250 mutex_exit(&sc->sc_cfg_lock);
1251 }
1252
1253 static int
1254 iavf_media_change(struct ifnet *ifp)
1255 {
1256 struct iavf_softc *sc;
1257 struct ifmedia *ifm;
1258
1259 sc = ifp->if_softc;
1260 ifm = &sc->sc_media;
1261
1262 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1263 return EINVAL;
1264
1265 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1266 case IFM_AUTO:
1267 break;
1268 default:
1269 return EINVAL;
1270 }
1271
1272 return 0;
1273 }
1274
1275 static void
1276 iavf_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1277 {
1278 struct iavf_softc *sc = ifp->if_softc;
1279
1280 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1281
1282 ifmr->ifm_status = sc->sc_media_status;
1283 ifmr->ifm_active = sc->sc_media_active;
1284 }
1285
1286 static int
1287 iavf_ifflags_cb(struct ethercom *ec)
1288 {
1289 struct ifnet *ifp = &ec->ec_if;
1290 struct iavf_softc *sc = ifp->if_softc;
1291
1292 /* vlan hwfilter can not be disabled */
1293 SET(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER);
1294
1295 return iavf_iff(sc);
1296 }
1297
1298 static int
1299 iavf_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
1300 {
1301 struct ifnet *ifp = &ec->ec_if;
1302 struct iavf_softc *sc = ifp->if_softc;
1303 int rv;
1304
1305 mutex_enter(&sc->sc_cfg_lock);
1306
1307 if (sc->sc_resetting) {
1308 mutex_exit(&sc->sc_cfg_lock);
1309
1310 /* all vlan id was already removed */
1311 if (!set)
1312 return 0;
1313
1314 return ENXIO;
1315 }
1316
1317 /* ETHERCAP_VLAN_HWFILTER can not be disabled */
1318 SET(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1319
1320 if (set) {
1321 rv = iavf_config_vlan_id(sc, vid, IAVF_VC_OP_ADD_VLAN);
1322 if (!ISSET(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWTAGGING)) {
1323 iavf_config_vlan_stripping(sc,
1324 sc->sc_ec.ec_capenable);
1325 }
1326 } else {
1327 rv = iavf_config_vlan_id(sc, vid, IAVF_VC_OP_DEL_VLAN);
1328 }
1329
1330 mutex_exit(&sc->sc_cfg_lock);
1331
1332 if (rv != 0)
1333 return EIO;
1334
1335 return 0;
1336 }
1337
1338 static int
1339 iavf_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1340 {
1341 struct ifreq *ifr = (struct ifreq *)data;
1342 struct iavf_softc *sc = (struct iavf_softc *)ifp->if_softc;
1343 const struct sockaddr *sa;
1344 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1345 int s, error = 0;
1346 unsigned int nmtu;
1347
1348 switch (cmd) {
1349 case SIOCSIFMTU:
1350 nmtu = ifr->ifr_mtu;
1351
1352 if (nmtu < IAVF_MIN_MTU || nmtu > IAVF_MAX_MTU) {
1353 error = EINVAL;
1354 break;
1355 }
1356 if (ifp->if_mtu != nmtu) {
1357 s = splnet();
1358 error = ether_ioctl(ifp, cmd, data);
1359 splx(s);
1360 if (error == ENETRESET)
1361 error = iavf_init(ifp);
1362 }
1363 break;
1364 case SIOCADDMULTI:
1365 sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1366 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) {
1367 error = ether_multiaddr(sa, addrlo, addrhi);
1368 if (error != 0)
1369 return error;
1370
1371 error = iavf_add_multi(sc, addrlo, addrhi);
1372 if (error != 0 && error != ENETRESET) {
1373 ether_delmulti(sa, &sc->sc_ec);
1374 error = EIO;
1375 }
1376 }
1377 break;
1378
1379 case SIOCDELMULTI:
1380 sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1381 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) {
1382 error = ether_multiaddr(sa, addrlo, addrhi);
1383 if (error != 0)
1384 return error;
1385
1386 error = iavf_del_multi(sc, addrlo, addrhi);
1387 }
1388 break;
1389
1390 default:
1391 s = splnet();
1392 error = ether_ioctl(ifp, cmd, data);
1393 splx(s);
1394 }
1395
1396 if (error == ENETRESET)
1397 error = iavf_iff(sc);
1398
1399 return error;
1400 }
1401
1402 static int
1403 iavf_iff(struct iavf_softc *sc)
1404 {
1405 int error;
1406
1407 mutex_enter(&sc->sc_cfg_lock);
1408 error = iavf_iff_locked(sc);
1409 mutex_exit(&sc->sc_cfg_lock);
1410
1411 return error;
1412 }
1413
1414 static int
1415 iavf_iff_locked(struct iavf_softc *sc)
1416 {
1417 struct ifnet *ifp = &sc->sc_ec.ec_if;
1418 int unicast, multicast;
1419 const uint8_t *enaddr;
1420
1421 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1422
1423 if (!ISSET(ifp->if_flags, IFF_RUNNING))
1424 return 0;
1425
1426 unicast = 0;
1427 multicast = 0;
1428 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
1429 unicast = 1;
1430 multicast = 1;
1431 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1432 multicast = 1;
1433 }
1434
1435 iavf_config_promisc_mode(sc, unicast, multicast);
1436
1437 iavf_config_vlan_stripping(sc, sc->sc_ec.ec_capenable);
1438
1439 enaddr = CLLADDR(ifp->if_sadl);
1440 if (memcmp(enaddr, sc->sc_enaddr_added, ETHER_ADDR_LEN) != 0) {
1441 if (!iavf_is_etheranyaddr(sc->sc_enaddr_added)) {
1442 iavf_eth_addr(sc, sc->sc_enaddr_added,
1443 IAVF_VC_OP_DEL_ETH_ADDR);
1444 }
1445 memcpy(sc->sc_enaddr_added, enaddr, ETHER_ADDR_LEN);
1446 iavf_eth_addr(sc, enaddr, IAVF_VC_OP_ADD_ETH_ADDR);
1447 }
1448
1449 return 0;
1450 }
1451
1452 static const struct iavf_product *
1453 iavf_lookup(const struct pci_attach_args *pa)
1454 {
1455 const struct iavf_product *iavfp;
1456
1457 for (iavfp = iavf_products; iavfp->vendor_id != 0; iavfp++) {
1458 if (PCI_VENDOR(pa->pa_id) == iavfp->vendor_id &&
1459 PCI_PRODUCT(pa->pa_id) == iavfp->product_id)
1460 return iavfp;
1461 }
1462
1463 return NULL;
1464 }
1465
1466 static enum i40e_mac_type
1467 iavf_mactype(pci_product_id_t id)
1468 {
1469
1470 switch (id) {
1471 case PCI_PRODUCT_INTEL_XL710_VF:
1472 case PCI_PRODUCT_INTEL_XL710_VF_HV:
1473 return I40E_MAC_VF;
1474 case PCI_PRODUCT_INTEL_X722_VF:
1475 return I40E_MAC_X722_VF;
1476 }
1477
1478 return I40E_MAC_GENERIC;
1479 }
1480
1481 static const struct iavf_link_speed *
1482 iavf_find_link_speed(struct iavf_softc *sc, uint32_t link_speed)
1483 {
1484 size_t i;
1485
1486 for (i = 0; i < __arraycount(iavf_link_speeds); i++) {
1487 if (link_speed & (1 << i))
1488 return (&iavf_link_speeds[i]);
1489 }
1490
1491 return NULL;
1492 }
1493
1494 static void
1495 iavf_pci_csr_setup(pci_chipset_tag_t pc, pcitag_t tag)
1496 {
1497 pcireg_t csr;
1498
1499 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1500 csr |= (PCI_COMMAND_MASTER_ENABLE |
1501 PCI_COMMAND_MEM_ENABLE);
1502 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
1503 }
1504
1505 static int
1506 iavf_wait_active(struct iavf_softc *sc)
1507 {
1508 int tries;
1509 uint32_t reg;
1510
1511 for (tries = 0; tries < 100; tries++) {
1512 reg = iavf_rd(sc, I40E_VFGEN_RSTAT) &
1513 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1514 if (reg == IAVF_VFR_VFACTIVE ||
1515 reg == IAVF_VFR_COMPLETED)
1516 return 0;
1517
1518 delaymsec(10);
1519 }
1520
1521 return -1;
1522 }
1523
1524 static bool
1525 iavf_is_etheranyaddr(const uint8_t *enaddr)
1526 {
1527 static const uint8_t etheranyaddr[ETHER_ADDR_LEN] = {
1528 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1529 };
1530
1531 if (memcmp(enaddr, etheranyaddr, ETHER_ADDR_LEN) != 0)
1532 return false;
1533
1534 return true;
1535 }
1536
1537 static void
1538 iavf_prepare_fakeaddr(struct iavf_softc *sc)
1539 {
1540 uint64_t rndval;
1541
1542 if (!iavf_is_etheranyaddr(sc->sc_enaddr_fake))
1543 return;
1544
1545 rndval = cprng_strong64();
1546
1547 memcpy(sc->sc_enaddr_fake, &rndval, sizeof(sc->sc_enaddr_fake));
1548 sc->sc_enaddr_fake[0] &= 0xFE;
1549 sc->sc_enaddr_fake[0] |= 0x02;
1550 }
1551
1552 static int
1553 iavf_replace_lla(struct ifnet *ifp, const uint8_t *prev, const uint8_t *next)
1554 {
1555 union {
1556 struct sockaddr sa;
1557 struct sockaddr_dl sdl;
1558 struct sockaddr_storage ss;
1559 } u;
1560 struct psref psref_prev, psref_next;
1561 struct ifaddr *ifa_prev, *ifa_next;
1562 const struct sockaddr_dl *nsdl;
1563 int s, error;
1564
1565 KASSERT(IFNET_LOCKED(ifp));
1566
1567 error = 0;
1568 ifa_prev = ifa_next = NULL;
1569
1570 if (memcmp(prev, next, ETHER_ADDR_LEN) == 0) {
1571 goto done;
1572 }
1573
1574 if (sockaddr_dl_init(&u.sdl, sizeof(u.ss), ifp->if_index,
1575 ifp->if_type, ifp->if_xname, strlen(ifp->if_xname),
1576 prev, ETHER_ADDR_LEN) == NULL) {
1577 error = EINVAL;
1578 goto done;
1579 }
1580
1581 s = pserialize_read_enter();
1582 IFADDR_READER_FOREACH(ifa_prev, ifp) {
1583 if (sockaddr_cmp(&u.sa, ifa_prev->ifa_addr) == 0) {
1584 ifa_acquire(ifa_prev, &psref_prev);
1585 break;
1586 }
1587 }
1588 pserialize_read_exit(s);
1589
1590 if (sockaddr_dl_init(&u.sdl, sizeof(u.ss), ifp->if_index,
1591 ifp->if_type, ifp->if_xname, strlen(ifp->if_xname),
1592 next, ETHER_ADDR_LEN) == NULL) {
1593 error = EINVAL;
1594 goto done;
1595 }
1596
1597 s = pserialize_read_enter();
1598 IFADDR_READER_FOREACH(ifa_next, ifp) {
1599 if (sockaddr_cmp(&u.sa, ifa_next->ifa_addr) == 0) {
1600 ifa_acquire(ifa_next, &psref_next);
1601 break;
1602 }
1603 }
1604 pserialize_read_exit(s);
1605
1606 if (ifa_next == NULL) {
1607 nsdl = &u.sdl;
1608 ifa_next = if_dl_create(ifp, &nsdl);
1609 if (ifa_next == NULL) {
1610 error = ENOMEM;
1611 goto done;
1612 }
1613
1614 s = pserialize_read_enter();
1615 ifa_acquire(ifa_next, &psref_next);
1616 pserialize_read_exit(s);
1617
1618 sockaddr_copy(ifa_next->ifa_addr,
1619 ifa_next->ifa_addr->sa_len, &u.sa);
1620 ifa_insert(ifp, ifa_next);
1621 } else {
1622 nsdl = NULL;
1623 }
1624
1625 if (ifa_prev != NULL && ifa_prev == ifp->if_dl) {
1626 if_activate_sadl(ifp, ifa_next, nsdl);
1627 }
1628
1629 ifa_release(ifa_next, &psref_next);
1630 ifa_next = NULL;
1631
1632 if (ifa_prev != NULL && ifa_prev != ifp->if_hwdl) {
1633 ifaref(ifa_prev);
1634 ifa_release(ifa_prev, &psref_prev);
1635 ifa_remove(ifp, ifa_prev);
1636 KASSERTMSG(ifa_prev->ifa_refcnt == 1, "ifa_refcnt=%d",
1637 ifa_prev->ifa_refcnt);
1638 ifafree(ifa_prev);
1639 ifa_prev = NULL;
1640 }
1641
1642 if (ISSET(ifp->if_flags, IFF_RUNNING))
1643 error = ENETRESET;
1644
1645 done:
1646 if (ifa_prev != NULL)
1647 ifa_release(ifa_prev, &psref_prev);
1648 if (ifa_next != NULL)
1649 ifa_release(ifa_next, &psref_next);
1650
1651 return error;
1652 }
1653 static int
1654 iavf_add_multi(struct iavf_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1655 {
1656 struct ifnet *ifp = &sc->sc_ec.ec_if;
1657 int rv;
1658
1659 if (ISSET(ifp->if_flags, IFF_ALLMULTI))
1660 return 0;
1661
1662 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) {
1663 iavf_del_all_multi(sc);
1664 SET(ifp->if_flags, IFF_ALLMULTI);
1665 return ENETRESET;
1666 }
1667
1668 rv = iavf_eth_addr(sc, addrlo, IAVF_VC_OP_ADD_ETH_ADDR);
1669
1670 if (rv == ENOSPC) {
1671 iavf_del_all_multi(sc);
1672 SET(ifp->if_flags, IFF_ALLMULTI);
1673 return ENETRESET;
1674 }
1675
1676 return rv;
1677 }
1678
1679 static int
1680 iavf_del_multi(struct iavf_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1681 {
1682 struct ifnet *ifp = &sc->sc_ec.ec_if;
1683 struct ethercom *ec = &sc->sc_ec;
1684 struct ether_multi *enm, *enm_last;
1685 struct ether_multistep step;
1686 int error, rv = 0;
1687
1688 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1689 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0)
1690 return 0;
1691
1692 iavf_eth_addr(sc, addrlo, IAVF_VC_OP_DEL_ETH_ADDR);
1693 return 0;
1694 }
1695
1696 ETHER_LOCK(ec);
1697 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1698 ETHER_NEXT_MULTI(step, enm)) {
1699 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1700 ETHER_ADDR_LEN) != 0) {
1701 goto out;
1702 }
1703 }
1704
1705 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1706 ETHER_NEXT_MULTI(step, enm)) {
1707 error = iavf_eth_addr(sc, enm->enm_addrlo,
1708 IAVF_VC_OP_ADD_ETH_ADDR);
1709 if (error != 0)
1710 break;
1711 }
1712
1713 if (enm != NULL) {
1714 enm_last = enm;
1715 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1716 ETHER_NEXT_MULTI(step, enm)) {
1717 if (enm == enm_last)
1718 break;
1719
1720 iavf_eth_addr(sc, enm->enm_addrlo,
1721 IAVF_VC_OP_DEL_ETH_ADDR);
1722 }
1723 } else {
1724 CLR(ifp->if_flags, IFF_ALLMULTI);
1725 rv = ENETRESET;
1726 }
1727
1728 out:
1729 ETHER_UNLOCK(ec);
1730 return rv;
1731 }
1732
1733 static void
1734 iavf_del_all_multi(struct iavf_softc *sc)
1735 {
1736 struct ethercom *ec = &sc->sc_ec;
1737 struct ether_multi *enm;
1738 struct ether_multistep step;
1739
1740 ETHER_LOCK(ec);
1741 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1742 ETHER_NEXT_MULTI(step, enm)) {
1743 iavf_eth_addr(sc, enm->enm_addrlo,
1744 IAVF_VC_OP_DEL_ETH_ADDR);
1745 }
1746 ETHER_UNLOCK(ec);
1747 }
1748
1749 static int
1750 iavf_setup_interrupts(struct iavf_softc *sc)
1751 {
1752 struct pci_attach_args *pa;
1753 kcpuset_t *affinity = NULL;
1754 char intrbuf[PCI_INTRSTR_LEN], xnamebuf[32];
1755 char const *intrstr;
1756 int counts[PCI_INTR_TYPE_SIZE];
1757 int error, affinity_to;
1758 unsigned int vector, qid, num;
1759
1760 /* queue pairs + misc interrupt */
1761 num = sc->sc_nqps_alloc + 1;
1762
1763 num = MIN(num, iavf_calc_msix_count(sc));
1764 if (num <= 0) {
1765 return -1;
1766 }
1767
1768 KASSERT(sc->sc_nqps_alloc > 0);
1769 num = MIN(num, sc->sc_nqps_alloc + 1);
1770
1771 pa = &sc->sc_pa;
1772 memset(counts, 0, sizeof(counts));
1773 counts[PCI_INTR_TYPE_MSIX] = num;
1774
1775 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, PCI_INTR_TYPE_MSIX);
1776 if (error != 0) {
1777 IAVF_LOG(sc, LOG_WARNING, "couldn't allocate interrupts\n");
1778 return -1;
1779 }
1780
1781 KASSERT(pci_intr_type(pa->pa_pc, sc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX);
1782
1783 if (counts[PCI_INTR_TYPE_MSIX] < 1) {
1784 IAVF_LOG(sc, LOG_ERR, "couldn't allocate interrupts\n");
1785 } else if (counts[PCI_INTR_TYPE_MSIX] != (int)num) {
1786 IAVF_LOG(sc, LOG_DEBUG,
1787 "request %u intruppts, but allocate %d interrupts\n",
1788 num, counts[PCI_INTR_TYPE_MSIX]);
1789 num = counts[PCI_INTR_TYPE_MSIX];
1790 }
1791
1792 sc->sc_ihs = kmem_alloc(sizeof(sc->sc_ihs[0]) * num, KM_NOSLEEP);
1793 if (sc->sc_ihs == NULL) {
1794 IAVF_LOG(sc, LOG_ERR,
1795 "couldn't allocate memory for interrupts\n");
1796 goto fail;
1797 }
1798
1799 /* vector #0 is Misc interrupt */
1800 vector = 0;
1801 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[vector], PCI_INTR_MPSAFE, true);
1802 intrstr = pci_intr_string(pa->pa_pc, sc->sc_ihp[vector],
1803 intrbuf, sizeof(intrbuf));
1804 snprintf(xnamebuf, sizeof(xnamebuf), "%s-Misc",
1805 device_xname(sc->sc_dev));
1806
1807 sc->sc_ihs[vector] = pci_intr_establish_xname(pa->pa_pc,
1808 sc->sc_ihp[vector], IPL_NET, iavf_intr, sc, xnamebuf);
1809 if (sc->sc_ihs[vector] == NULL) {
1810 IAVF_LOG(sc, LOG_WARNING,
1811 "unable to establish interrupt at %s", intrstr);
1812 goto fail;
1813 }
1814
1815 kcpuset_create(&affinity, false);
1816 affinity_to = ((int)num <= ncpu) ? 1 : 0;
1817 qid = 0;
1818 for (vector = 1; vector < num; vector++) {
1819 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[vector],
1820 PCI_INTR_MPSAFE, true);
1821 intrstr = pci_intr_string(pa->pa_pc, sc->sc_ihp[vector],
1822 intrbuf, sizeof(intrbuf));
1823 snprintf(xnamebuf, sizeof(xnamebuf), "%s-TXRX%u",
1824 device_xname(sc->sc_dev), qid);
1825
1826 sc->sc_ihs[vector] = pci_intr_establish_xname(pa->pa_pc,
1827 sc->sc_ihp[vector], IPL_NET, iavf_queue_intr,
1828 (void *)&sc->sc_qps[qid], xnamebuf);
1829 if (sc->sc_ihs[vector] == NULL) {
1830 IAVF_LOG(sc, LOG_WARNING,
1831 "unable to establish interrupt at %s\n", intrstr);
1832 goto fail;
1833 }
1834
1835 kcpuset_zero(affinity);
1836 kcpuset_set(affinity, affinity_to);
1837 error = interrupt_distribute(sc->sc_ihs[vector],
1838 affinity, NULL);
1839
1840 if (error == 0) {
1841 IAVF_LOG(sc, LOG_INFO,
1842 "for TXRX%d interrupt at %s, affinity to %d\n",
1843 qid, intrstr, affinity_to);
1844 } else {
1845 IAVF_LOG(sc, LOG_INFO,
1846 "for TXRX%d interrupt at %s\n",
1847 qid, intrstr);
1848 }
1849
1850 qid++;
1851 affinity_to = (affinity_to + 1) % ncpu;
1852 }
1853
1854 kcpuset_destroy(affinity);
1855
1856 sc->sc_nintrs = num;
1857 return 0;
1858
1859 fail:
1860 if (affinity != NULL)
1861 kcpuset_destroy(affinity);
1862 for (vector = 0; vector < num; vector++) {
1863 if (sc->sc_ihs[vector] == NULL)
1864 continue;
1865 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[vector]);
1866 }
1867 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * num);
1868 pci_intr_release(pa->pa_pc, sc->sc_ihp, num);
1869
1870 return -1;
1871 }
1872
1873 static void
1874 iavf_teardown_interrupts(struct iavf_softc *sc)
1875 {
1876 struct pci_attach_args *pa;
1877 unsigned int i;
1878
1879 if (sc->sc_ihs == NULL)
1880 return;
1881
1882 pa = &sc->sc_pa;
1883
1884 for (i = 0; i < sc->sc_nintrs; i++) {
1885 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]);
1886 }
1887
1888 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
1889 sc->sc_ihs = NULL;
1890
1891 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
1892 sc->sc_nintrs = 0;
1893 }
1894
1895 static int
1896 iavf_setup_sysctls(struct iavf_softc *sc)
1897 {
1898 const char *devname;
1899 struct sysctllog **log;
1900 const struct sysctlnode *rnode, *rxnode, *txnode;
1901 int error;
1902
1903 log = &sc->sc_sysctllog;
1904 devname = device_xname(sc->sc_dev);
1905
1906 error = sysctl_createv(log, 0, NULL, &rnode,
1907 0, CTLTYPE_NODE, devname,
1908 SYSCTL_DESCR("iavf information and settings"),
1909 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
1910 if (error)
1911 goto out;
1912
1913 error = sysctl_createv(log, 0, &rnode, NULL,
1914 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
1915 SYSCTL_DESCR("Use workqueue for packet processing"),
1916 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
1917 if (error)
1918 goto out;
1919
1920 error = sysctl_createv(log, 0, &rnode, NULL,
1921 CTLFLAG_READWRITE, CTLTYPE_INT, "debug_level",
1922 SYSCTL_DESCR("Debug level"),
1923 NULL, 0, &sc->sc_debuglevel, 0, CTL_CREATE, CTL_EOL);
1924 if (error)
1925 goto out;
1926
1927 error = sysctl_createv(log, 0, &rnode, &rxnode,
1928 0, CTLTYPE_NODE, "rx",
1929 SYSCTL_DESCR("iavf information and settings for Rx"),
1930 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
1931 if (error)
1932 goto out;
1933
1934 error = sysctl_createv(log, 0, &rxnode, NULL,
1935 CTLFLAG_READWRITE, CTLTYPE_INT, "itr",
1936 SYSCTL_DESCR("Interrupt Throttling"),
1937 iavf_sysctl_itr_handler, 0,
1938 (void *)sc, 0, CTL_CREATE, CTL_EOL);
1939 if (error)
1940 goto out;
1941
1942 error = sysctl_createv(log, 0, &rxnode, NULL,
1943 CTLFLAG_READONLY, CTLTYPE_INT, "descriptor_num",
1944 SYSCTL_DESCR("descriptor size"),
1945 NULL, 0, &sc->sc_rx_ring_ndescs, 0, CTL_CREATE, CTL_EOL);
1946 if (error)
1947 goto out;
1948
1949 error = sysctl_createv(log, 0, &rxnode, NULL,
1950 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
1951 SYSCTL_DESCR("max number of Rx packets"
1952 " to process for interrupt processing"),
1953 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
1954 if (error)
1955 goto out;
1956
1957 error = sysctl_createv(log, 0, &rxnode, NULL,
1958 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
1959 SYSCTL_DESCR("max number of Rx packets"
1960 " to process for deferred processing"),
1961 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
1962 if (error)
1963 goto out;
1964
1965 error = sysctl_createv(log, 0, &rnode, &txnode,
1966 0, CTLTYPE_NODE, "tx",
1967 SYSCTL_DESCR("iavf information and settings for Tx"),
1968 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
1969 if (error)
1970 goto out;
1971
1972 error = sysctl_createv(log, 0, &txnode, NULL,
1973 CTLFLAG_READWRITE, CTLTYPE_INT, "itr",
1974 SYSCTL_DESCR("Interrupt Throttling"),
1975 iavf_sysctl_itr_handler, 0,
1976 (void *)sc, 0, CTL_CREATE, CTL_EOL);
1977 if (error)
1978 goto out;
1979
1980 error = sysctl_createv(log, 0, &txnode, NULL,
1981 CTLFLAG_READONLY, CTLTYPE_INT, "descriptor_num",
1982 SYSCTL_DESCR("the number of Tx descriptors"),
1983 NULL, 0, &sc->sc_tx_ring_ndescs, 0, CTL_CREATE, CTL_EOL);
1984 if (error)
1985 goto out;
1986
1987 error = sysctl_createv(log, 0, &txnode, NULL,
1988 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
1989 SYSCTL_DESCR("max number of Tx packets"
1990 " to process for interrupt processing"),
1991 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
1992 if (error)
1993 goto out;
1994
1995 error = sysctl_createv(log, 0, &txnode, NULL,
1996 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
1997 SYSCTL_DESCR("max number of Tx packets"
1998 " to process for deferred processing"),
1999 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
2000 if (error)
2001 goto out;
2002 out:
2003 return error;
2004 }
2005
2006 static void
2007 iavf_teardown_sysctls(struct iavf_softc *sc)
2008 {
2009
2010 sysctl_teardown(&sc->sc_sysctllog);
2011 }
2012
2013 static int
2014 iavf_setup_stats(struct iavf_softc *sc)
2015 {
2016 struct iavf_stat_counters *isc;
2017 const char *dn;
2018
2019 dn = device_xname(sc->sc_dev);
2020 isc = &sc->sc_stat_counters;
2021
2022 iavf_evcnt_attach(&isc->isc_rx_bytes, dn, "Rx bytes");
2023 iavf_evcnt_attach(&isc->isc_rx_unicast, dn, "Rx unicast");
2024 iavf_evcnt_attach(&isc->isc_rx_multicast, dn, "Rx multicast");
2025 iavf_evcnt_attach(&isc->isc_rx_broadcast, dn, "Rx broadcast");
2026 iavf_evcnt_attach(&isc->isc_rx_discards, dn, "Rx discards");
2027 iavf_evcnt_attach(&isc->isc_rx_unknown_protocol,
2028 dn, "Rx unknown protocol");
2029
2030 iavf_evcnt_attach(&isc->isc_tx_bytes, dn, "Tx bytes");
2031 iavf_evcnt_attach(&isc->isc_tx_unicast, dn, "Tx unicast");
2032 iavf_evcnt_attach(&isc->isc_tx_multicast, dn, "Tx multicast");
2033 iavf_evcnt_attach(&isc->isc_tx_broadcast, dn, "Tx broadcast");
2034 iavf_evcnt_attach(&isc->isc_tx_discards, dn, "Tx discards");
2035 iavf_evcnt_attach(&isc->isc_tx_errors, dn, "Tx errors");
2036
2037 return 0;
2038 }
2039
2040 static void
2041 iavf_teardown_stats(struct iavf_softc *sc)
2042 {
2043 struct iavf_stat_counters *isc;
2044
2045 isc = &sc->sc_stat_counters;
2046
2047 evcnt_detach(&isc->isc_rx_bytes);
2048 evcnt_detach(&isc->isc_rx_unicast);
2049 evcnt_detach(&isc->isc_rx_multicast);
2050 evcnt_detach(&isc->isc_rx_broadcast);
2051 evcnt_detach(&isc->isc_rx_discards);
2052 evcnt_detach(&isc->isc_rx_unknown_protocol);
2053
2054 evcnt_detach(&isc->isc_tx_bytes);
2055 evcnt_detach(&isc->isc_tx_unicast);
2056 evcnt_detach(&isc->isc_tx_multicast);
2057 evcnt_detach(&isc->isc_tx_broadcast);
2058 evcnt_detach(&isc->isc_tx_discards);
2059 evcnt_detach(&isc->isc_tx_errors);
2060
2061 }
2062
2063 static int
2064 iavf_init_admin_queue(struct iavf_softc *sc)
2065 {
2066 uint32_t reg;
2067
2068 sc->sc_atq_cons = 0;
2069 sc->sc_atq_prod = 0;
2070
2071 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2072 0, IXL_DMA_LEN(&sc->sc_atq),
2073 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2074 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2075 0, IXL_DMA_LEN(&sc->sc_arq),
2076 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2077
2078 iavf_wr(sc, sc->sc_aq_regs->atq_head, 0);
2079 iavf_wr(sc, sc->sc_aq_regs->arq_head, 0);
2080 iavf_wr(sc, sc->sc_aq_regs->atq_tail, 0);
2081 iavf_wr(sc, sc->sc_aq_regs->arq_tail, 0);
2082
2083 iavf_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
2084
2085 iavf_wr(sc, sc->sc_aq_regs->atq_bal,
2086 ixl_dmamem_lo(&sc->sc_atq));
2087 iavf_wr(sc, sc->sc_aq_regs->atq_bah,
2088 ixl_dmamem_hi(&sc->sc_atq));
2089 iavf_wr(sc, sc->sc_aq_regs->atq_len,
2090 sc->sc_aq_regs->atq_len_enable | IAVF_AQ_NUM);
2091
2092 iavf_wr(sc, sc->sc_aq_regs->arq_bal,
2093 ixl_dmamem_lo(&sc->sc_arq));
2094 iavf_wr(sc, sc->sc_aq_regs->arq_bah,
2095 ixl_dmamem_hi(&sc->sc_arq));
2096 iavf_wr(sc, sc->sc_aq_regs->arq_len,
2097 sc->sc_aq_regs->arq_len_enable | IAVF_AQ_NUM);
2098
2099 iavf_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
2100
2101 reg = iavf_rd(sc, sc->sc_aq_regs->atq_bal);
2102 if (reg != ixl_dmamem_lo(&sc->sc_atq))
2103 goto fail;
2104
2105 reg = iavf_rd(sc, sc->sc_aq_regs->arq_bal);
2106 if (reg != ixl_dmamem_lo(&sc->sc_arq))
2107 goto fail;
2108
2109 sc->sc_dead = false;
2110 return 0;
2111
2112 fail:
2113 iavf_wr(sc, sc->sc_aq_regs->atq_len, 0);
2114 iavf_wr(sc, sc->sc_aq_regs->arq_len, 0);
2115 return -1;
2116 }
2117
2118 static void
2119 iavf_cleanup_admin_queue(struct iavf_softc *sc)
2120 {
2121 struct ixl_aq_buf *aqb;
2122
2123 iavf_wr(sc, sc->sc_aq_regs->atq_head, 0);
2124 iavf_wr(sc, sc->sc_aq_regs->arq_head, 0);
2125 iavf_wr(sc, sc->sc_aq_regs->atq_tail, 0);
2126 iavf_wr(sc, sc->sc_aq_regs->arq_tail, 0);
2127
2128 iavf_wr(sc, sc->sc_aq_regs->atq_bal, 0);
2129 iavf_wr(sc, sc->sc_aq_regs->atq_bah, 0);
2130 iavf_wr(sc, sc->sc_aq_regs->atq_len, 0);
2131
2132 iavf_wr(sc, sc->sc_aq_regs->arq_bal, 0);
2133 iavf_wr(sc, sc->sc_aq_regs->arq_bah, 0);
2134 iavf_wr(sc, sc->sc_aq_regs->arq_len, 0);
2135 iavf_flush(sc);
2136
2137 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2138 0, IXL_DMA_LEN(&sc->sc_arq),
2139 BUS_DMASYNC_POSTREAD);
2140 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2141 0, IXL_DMA_LEN(&sc->sc_atq),
2142 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2143
2144 sc->sc_atq_cons = 0;
2145 sc->sc_atq_prod = 0;
2146 sc->sc_arq_cons = 0;
2147 sc->sc_arq_prod = 0;
2148
2149 memset(IXL_DMA_KVA(&sc->sc_arq), 0, IXL_DMA_LEN(&sc->sc_arq));
2150 memset(IXL_DMA_KVA(&sc->sc_atq), 0, IXL_DMA_LEN(&sc->sc_atq));
2151
2152 while ((aqb = iavf_aqb_get_locked(&sc->sc_arq_live)) != NULL) {
2153 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
2154 BUS_DMASYNC_POSTREAD);
2155 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
2156 }
2157
2158 while ((aqb = iavf_aqb_get_locked(&sc->sc_atq_live)) != NULL) {
2159 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
2160 BUS_DMASYNC_POSTREAD);
2161 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
2162 }
2163 }
2164
2165 static unsigned int
2166 iavf_calc_msix_count(struct iavf_softc *sc)
2167 {
2168 struct pci_attach_args *pa;
2169 int count;
2170
2171 pa = &sc->sc_pa;
2172 count = pci_msix_count(pa->pa_pc, pa->pa_tag);
2173 if (count < 0) {
2174 IAVF_LOG(sc, LOG_DEBUG,"MSIX config error\n");
2175 count = 0;
2176 }
2177
2178 return MIN(sc->sc_max_vectors, (unsigned int)count);
2179 }
2180
2181 static unsigned int
2182 iavf_calc_queue_pair_size(struct iavf_softc *sc)
2183 {
2184 unsigned int nqp, nvec;
2185
2186 nvec = iavf_calc_msix_count(sc);
2187 if (sc->sc_max_vectors > 1) {
2188 /* decrease the number of misc interrupt */
2189 nvec -= 1;
2190 }
2191
2192 nqp = ncpu;
2193 nqp = MIN(nqp, sc->sc_nqps_vsi);
2194 nqp = MIN(nqp, nvec);
2195 nqp = MIN(nqp, (unsigned int)iavf_params.max_qps);
2196
2197 return nqp;
2198 }
2199
2200 static struct iavf_tx_ring *
2201 iavf_txr_alloc(struct iavf_softc *sc, unsigned int qid)
2202 {
2203 struct iavf_tx_ring *txr;
2204 struct iavf_tx_map *maps;
2205 unsigned int i;
2206 int error;
2207
2208 txr = kmem_zalloc(sizeof(*txr), KM_NOSLEEP);
2209 if (txr == NULL)
2210 return NULL;
2211
2212 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs,
2213 KM_NOSLEEP);
2214 if (maps == NULL)
2215 goto free_txr;
2216
2217 if (iavf_dmamem_alloc(sc->sc_dmat, &txr->txr_mem,
2218 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2219 IAVF_TX_QUEUE_ALIGN) != 0) {
2220 goto free_maps;
2221 }
2222
2223 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2224 error = bus_dmamap_create(sc->sc_dmat, IAVF_TX_PKT_MAXSIZE,
2225 IAVF_TX_PKT_DESCS, IAVF_TX_PKT_MAXSIZE, 0,
2226 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &maps[i].txm_map);
2227 if (error)
2228 goto destroy_maps;
2229 }
2230
2231 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2232 if (txr->txr_intrq == NULL)
2233 goto destroy_maps;
2234
2235 txr->txr_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,
2236 iavf_deferred_transmit, txr);
2237 if (txr->txr_si == NULL)
2238 goto destroy_pcq;
2239
2240 snprintf(txr->txr_name, sizeof(txr->txr_name), "%s-tx%d",
2241 device_xname(sc->sc_dev), qid);
2242
2243 iavf_evcnt_attach(&txr->txr_defragged,
2244 txr->txr_name, "m_defrag successed");
2245 iavf_evcnt_attach(&txr->txr_defrag_failed,
2246 txr->txr_name, "m_defrag failed");
2247 iavf_evcnt_attach(&txr->txr_pcqdrop,
2248 txr->txr_name, "Dropped in pcq");
2249 iavf_evcnt_attach(&txr->txr_transmitdef,
2250 txr->txr_name, "Deferred transmit");
2251 iavf_evcnt_attach(&txr->txr_watchdogto,
2252 txr->txr_name, "Watchdog timedout on queue");
2253 iavf_evcnt_attach(&txr->txr_defer,
2254 txr->txr_name, "Handled queue in softint/workqueue");
2255
2256 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR, NULL,
2257 txr->txr_name, "Interrupt on queue");
2258
2259 txr->txr_qid = qid;
2260 txr->txr_sc = sc;
2261 txr->txr_maps = maps;
2262 txr->txr_prod = txr->txr_cons = 0;
2263 txr->txr_tail = I40E_QTX_TAIL1(qid);
2264 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2265
2266 return txr;
2267 destroy_pcq:
2268 pcq_destroy(txr->txr_intrq);
2269 destroy_maps:
2270 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2271 if (maps[i].txm_map == NULL)
2272 continue;
2273 bus_dmamap_destroy(sc->sc_dmat, maps[i].txm_map);
2274 }
2275
2276 iavf_dmamem_free(sc->sc_dmat, &txr->txr_mem);
2277 free_maps:
2278 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2279 free_txr:
2280 kmem_free(txr, sizeof(*txr));
2281 return NULL;
2282 }
2283
2284 static void
2285 iavf_txr_free(struct iavf_softc *sc, struct iavf_tx_ring *txr)
2286 {
2287 struct iavf_tx_map *maps;
2288 unsigned int i;
2289
2290 maps = txr->txr_maps;
2291 if (maps != NULL) {
2292 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2293 if (maps[i].txm_map == NULL)
2294 continue;
2295 bus_dmamap_destroy(sc->sc_dmat, maps[i].txm_map);
2296 }
2297 kmem_free(txr->txr_maps,
2298 sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2299 txr->txr_maps = NULL;
2300 }
2301
2302 evcnt_detach(&txr->txr_defragged);
2303 evcnt_detach(&txr->txr_defrag_failed);
2304 evcnt_detach(&txr->txr_pcqdrop);
2305 evcnt_detach(&txr->txr_transmitdef);
2306 evcnt_detach(&txr->txr_watchdogto);
2307 evcnt_detach(&txr->txr_defer);
2308 evcnt_detach(&txr->txr_intr);
2309
2310 iavf_dmamem_free(sc->sc_dmat, &txr->txr_mem);
2311 softint_disestablish(txr->txr_si);
2312 pcq_destroy(txr->txr_intrq);
2313 mutex_destroy(&txr->txr_lock);
2314 kmem_free(txr, sizeof(*txr));
2315 }
2316
2317 static struct iavf_rx_ring *
2318 iavf_rxr_alloc(struct iavf_softc *sc, unsigned int qid)
2319 {
2320 struct iavf_rx_ring *rxr;
2321 struct iavf_rx_map *maps;
2322 unsigned int i;
2323 int error;
2324
2325 rxr = kmem_zalloc(sizeof(*rxr), KM_NOSLEEP);
2326 if (rxr == NULL)
2327 return NULL;
2328
2329 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs,
2330 KM_NOSLEEP);
2331 if (maps == NULL)
2332 goto free_rxr;
2333
2334 if (iavf_dmamem_alloc(sc->sc_dmat, &rxr->rxr_mem,
2335 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
2336 IAVF_RX_QUEUE_ALIGN) != 0)
2337 goto free_maps;
2338
2339 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2340 error = bus_dmamap_create(sc->sc_dmat, IAVF_MCLBYTES,
2341 1, IAVF_MCLBYTES, 0,
2342 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &maps[i].rxm_map);
2343 if (error)
2344 goto destroy_maps;
2345 }
2346
2347 snprintf(rxr->rxr_name, sizeof(rxr->rxr_name), "%s-rx%d",
2348 device_xname(sc->sc_dev), qid);
2349
2350 iavf_evcnt_attach(&rxr->rxr_mgethdr_failed,
2351 rxr->rxr_name, "MGETHDR failed");
2352 iavf_evcnt_attach(&rxr->rxr_mgetcl_failed,
2353 rxr->rxr_name, "MCLGET failed");
2354 iavf_evcnt_attach(&rxr->rxr_mbuf_load_failed,
2355 rxr->rxr_name, "bus_dmamap_load_mbuf failed");
2356 iavf_evcnt_attach(&rxr->rxr_defer,
2357 rxr->rxr_name, "Handled queue in softint/workqueue");
2358
2359 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR, NULL,
2360 rxr->rxr_name, "Interrupt on queue");
2361
2362 rxr->rxr_qid = qid;
2363 rxr->rxr_sc = sc;
2364 rxr->rxr_cons = rxr->rxr_prod = 0;
2365 rxr->rxr_m_head = NULL;
2366 rxr->rxr_m_tail = &rxr->rxr_m_head;
2367 rxr->rxr_maps = maps;
2368 rxr->rxr_tail = I40E_QRX_TAIL1(qid);
2369 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
2370
2371 return rxr;
2372
2373 destroy_maps:
2374 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2375 if (maps[i].rxm_map == NULL)
2376 continue;
2377 bus_dmamap_destroy(sc->sc_dmat, maps[i].rxm_map);
2378 }
2379 iavf_dmamem_free(sc->sc_dmat, &rxr->rxr_mem);
2380 free_maps:
2381 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2382 free_rxr:
2383 kmem_free(rxr, sizeof(*rxr));
2384
2385 return NULL;
2386 }
2387
2388 static void
2389 iavf_rxr_free(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
2390 {
2391 struct iavf_rx_map *maps;
2392 unsigned int i;
2393
2394 maps = rxr->rxr_maps;
2395 if (maps != NULL) {
2396 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2397 if (maps[i].rxm_map == NULL)
2398 continue;
2399 bus_dmamap_destroy(sc->sc_dmat, maps[i].rxm_map);
2400 }
2401 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2402 rxr->rxr_maps = NULL;
2403 }
2404
2405 evcnt_detach(&rxr->rxr_mgethdr_failed);
2406 evcnt_detach(&rxr->rxr_mgetcl_failed);
2407 evcnt_detach(&rxr->rxr_mbuf_load_failed);
2408 evcnt_detach(&rxr->rxr_defer);
2409 evcnt_detach(&rxr->rxr_intr);
2410
2411 iavf_dmamem_free(sc->sc_dmat, &rxr->rxr_mem);
2412 mutex_destroy(&rxr->rxr_lock);
2413 kmem_free(rxr, sizeof(*rxr));
2414 }
2415
2416 static int
2417 iavf_queue_pairs_alloc(struct iavf_softc *sc)
2418 {
2419 struct iavf_queue_pair *qp;
2420 unsigned int i, num;
2421
2422 num = iavf_calc_queue_pair_size(sc);
2423 if (num <= 0) {
2424 return -1;
2425 }
2426
2427 sc->sc_qps = kmem_zalloc(sizeof(sc->sc_qps[0]) * num, KM_NOSLEEP);
2428 if (sc->sc_qps == NULL) {
2429 return -1;
2430 }
2431
2432 for (i = 0; i < num; i++) {
2433 qp = &sc->sc_qps[i];
2434
2435 qp->qp_rxr = iavf_rxr_alloc(sc, i);
2436 qp->qp_txr = iavf_txr_alloc(sc, i);
2437
2438 if (qp->qp_rxr == NULL || qp->qp_txr == NULL)
2439 goto free;
2440
2441 qp->qp_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,
2442 iavf_handle_queue, qp);
2443 if (qp->qp_si == NULL)
2444 goto free;
2445 }
2446
2447 sc->sc_nqps_alloc = num;
2448 return 0;
2449 free:
2450 for (i = 0; i < num; i++) {
2451 qp = &sc->sc_qps[i];
2452
2453 if (qp->qp_si != NULL)
2454 softint_disestablish(qp->qp_si);
2455 if (qp->qp_rxr != NULL)
2456 iavf_rxr_free(sc, qp->qp_rxr);
2457 if (qp->qp_txr != NULL)
2458 iavf_txr_free(sc, qp->qp_txr);
2459 }
2460
2461 kmem_free(sc->sc_qps, sizeof(sc->sc_qps[0]) * num);
2462 sc->sc_qps = NULL;
2463
2464 return -1;
2465 }
2466
2467 static void
2468 iavf_queue_pairs_free(struct iavf_softc *sc)
2469 {
2470 struct iavf_queue_pair *qp;
2471 unsigned int i;
2472 size_t sz;
2473
2474 if (sc->sc_qps == NULL)
2475 return;
2476
2477 for (i = 0; i < sc->sc_nqps_alloc; i++) {
2478 qp = &sc->sc_qps[i];
2479
2480 if (qp->qp_si != NULL)
2481 softint_disestablish(qp->qp_si);
2482 if (qp->qp_rxr != NULL)
2483 iavf_rxr_free(sc, qp->qp_rxr);
2484 if (qp->qp_txr != NULL)
2485 iavf_txr_free(sc, qp->qp_txr);
2486 }
2487
2488 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqps_alloc;
2489 kmem_free(sc->sc_qps, sz);
2490 sc->sc_qps = NULL;
2491 sc->sc_nqps_alloc = 0;
2492 }
2493
2494 static int
2495 iavf_rxfill(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
2496 {
2497 struct ixl_rx_rd_desc_32 *ring, *rxd;
2498 struct iavf_rx_map *rxm;
2499 bus_dmamap_t map;
2500 struct mbuf *m;
2501 unsigned int slots, prod, mask;
2502 int error, post;
2503
2504 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons,
2505 sc->sc_rx_ring_ndescs);
2506
2507 if (slots == 0)
2508 return 0;
2509
2510 error = 0;
2511 prod = rxr->rxr_prod;
2512
2513 ring = IXL_DMA_KVA(&rxr->rxr_mem);
2514 mask = sc->sc_rx_ring_ndescs - 1;
2515
2516 do {
2517 rxm = &rxr->rxr_maps[prod];
2518
2519 MGETHDR(m, M_DONTWAIT, MT_DATA);
2520 if (m == NULL) {
2521 rxr->rxr_mgethdr_failed.ev_count++;
2522 error = -1;
2523 break;
2524 }
2525
2526 MCLGET(m, M_DONTWAIT);
2527 if (!ISSET(m->m_flags, M_EXT)) {
2528 rxr->rxr_mgetcl_failed.ev_count++;
2529 error = -1;
2530 m_freem(m);
2531 break;
2532 }
2533
2534 m->m_len = m->m_pkthdr.len = MCLBYTES;
2535 m_adj(m, ETHER_ALIGN);
2536
2537 map = rxm->rxm_map;
2538
2539 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
2540 BUS_DMA_READ|BUS_DMA_NOWAIT) != 0) {
2541 rxr->rxr_mbuf_load_failed.ev_count++;
2542 error = -1;
2543 m_freem(m);
2544 break;
2545 }
2546
2547 rxm->rxm_m = m;
2548
2549 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2550 BUS_DMASYNC_PREREAD);
2551
2552 rxd = &ring[prod];
2553 rxd->paddr = htole64(map->dm_segs[0].ds_addr);
2554 rxd->haddr = htole64(0);
2555
2556 prod++;
2557 prod &= mask;
2558 post = 1;
2559 } while (--slots);
2560
2561 if (post) {
2562 rxr->rxr_prod = prod;
2563 iavf_wr(sc, rxr->rxr_tail, prod);
2564 }
2565
2566 return error;
2567 }
2568
2569 static inline void
2570 iavf_rx_csum(struct mbuf *m, uint64_t qword)
2571 {
2572 int flags_mask;
2573
2574 if (!ISSET(qword, IXL_RX_DESC_L3L4P)) {
2575 /* No L3 or L4 checksum was calculated */
2576 return;
2577 }
2578
2579 switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) {
2580 case IXL_RX_DESC_PTYPE_IPV4FRAG:
2581 case IXL_RX_DESC_PTYPE_IPV4:
2582 case IXL_RX_DESC_PTYPE_SCTPV4:
2583 case IXL_RX_DESC_PTYPE_ICMPV4:
2584 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
2585 break;
2586 case IXL_RX_DESC_PTYPE_TCPV4:
2587 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
2588 flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD;
2589 break;
2590 case IXL_RX_DESC_PTYPE_UDPV4:
2591 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
2592 flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD;
2593 break;
2594 case IXL_RX_DESC_PTYPE_TCPV6:
2595 flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD;
2596 break;
2597 case IXL_RX_DESC_PTYPE_UDPV6:
2598 flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD;
2599 break;
2600 default:
2601 flags_mask = 0;
2602 }
2603
2604 m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 |
2605 M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6));
2606
2607 if (ISSET(qword, IXL_RX_DESC_IPE)) {
2608 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD);
2609 }
2610
2611 if (ISSET(qword, IXL_RX_DESC_L4E)) {
2612 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD);
2613 }
2614 }
2615
2616 static int
2617 iavf_rxeof(struct iavf_softc *sc, struct iavf_rx_ring *rxr, u_int rxlimit,
2618 struct evcnt *ecnt)
2619 {
2620 struct ifnet *ifp = &sc->sc_ec.ec_if;
2621 struct ixl_rx_wb_desc_32 *ring, *rxd;
2622 struct iavf_rx_map *rxm;
2623 bus_dmamap_t map;
2624 unsigned int cons, prod;
2625 struct mbuf *m;
2626 uint64_t word, word0;
2627 unsigned int len;
2628 unsigned int mask;
2629 int done = 0, more = 0;
2630
2631 KASSERT(mutex_owned(&rxr->rxr_lock));
2632
2633 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2634 return 0;
2635
2636 prod = rxr->rxr_prod;
2637 cons = rxr->rxr_cons;
2638
2639 if (cons == prod)
2640 return 0;
2641
2642 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2643 0, IXL_DMA_LEN(&rxr->rxr_mem),
2644 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2645
2646 ring = IXL_DMA_KVA(&rxr->rxr_mem);
2647 mask = sc->sc_rx_ring_ndescs - 1;
2648
2649 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2650
2651 do {
2652 if (rxlimit-- <= 0) {
2653 more = 1;
2654 break;
2655 }
2656
2657 rxd = &ring[cons];
2658
2659 word = le64toh(rxd->qword1);
2660
2661 if (!ISSET(word, IXL_RX_DESC_DD))
2662 break;
2663
2664 rxm = &rxr->rxr_maps[cons];
2665
2666 map = rxm->rxm_map;
2667 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2668 BUS_DMASYNC_POSTREAD);
2669 bus_dmamap_unload(sc->sc_dmat, map);
2670
2671 m = rxm->rxm_m;
2672 rxm->rxm_m = NULL;
2673
2674 KASSERT(m != NULL);
2675
2676 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
2677 m->m_len = len;
2678 m->m_pkthdr.len = 0;
2679
2680 m->m_next = NULL;
2681 *rxr->rxr_m_tail = m;
2682 rxr->rxr_m_tail = &m->m_next;
2683
2684 m = rxr->rxr_m_head;
2685 m->m_pkthdr.len += len;
2686
2687 if (ISSET(word, IXL_RX_DESC_EOP)) {
2688 word0 = le64toh(rxd->qword0);
2689
2690 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
2691 vlan_set_tag(m,
2692 __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK));
2693 }
2694
2695 if ((ifp->if_capenable & IAVF_IFCAP_RXCSUM) != 0)
2696 iavf_rx_csum(m, word);
2697
2698 if (!ISSET(word,
2699 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
2700 m_set_rcvif(m, ifp);
2701 if_statinc_ref(nsr, if_ipackets);
2702 if_statadd_ref(nsr, if_ibytes,
2703 m->m_pkthdr.len);
2704 if_percpuq_enqueue(sc->sc_ipq, m);
2705 } else {
2706 if_statinc_ref(nsr, if_ierrors);
2707 m_freem(m);
2708 }
2709
2710 rxr->rxr_m_head = NULL;
2711 rxr->rxr_m_tail = &rxr->rxr_m_head;
2712 }
2713
2714 cons++;
2715 cons &= mask;
2716
2717 done = 1;
2718 } while (cons != prod);
2719
2720 if (done) {
2721 ecnt->ev_count++;
2722 rxr->rxr_cons = cons;
2723 if (iavf_rxfill(sc, rxr) == -1)
2724 if_statinc_ref(nsr, if_iqdrops);
2725 }
2726
2727 IF_STAT_PUTREF(ifp);
2728
2729 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2730 0, IXL_DMA_LEN(&rxr->rxr_mem),
2731 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2732
2733 return more;
2734 }
2735
2736 static void
2737 iavf_rxr_clean(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
2738 {
2739 struct iavf_rx_map *maps, *rxm;
2740 bus_dmamap_t map;
2741 unsigned int i;
2742
2743 KASSERT(mutex_owned(&rxr->rxr_lock));
2744
2745 maps = rxr->rxr_maps;
2746 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2747 rxm = &maps[i];
2748
2749 if (rxm->rxm_m == NULL)
2750 continue;
2751
2752 map = rxm->rxm_map;
2753 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2754 BUS_DMASYNC_POSTWRITE);
2755 bus_dmamap_unload(sc->sc_dmat, map);
2756
2757 m_freem(rxm->rxm_m);
2758 rxm->rxm_m = NULL;
2759 }
2760
2761 m_freem(rxr->rxr_m_head);
2762 rxr->rxr_m_head = NULL;
2763 rxr->rxr_m_tail = &rxr->rxr_m_head;
2764
2765 memset(IXL_DMA_KVA(&rxr->rxr_mem), 0, IXL_DMA_LEN(&rxr->rxr_mem));
2766 rxr->rxr_prod = rxr->rxr_cons = 0;
2767 }
2768
2769 static int
2770 iavf_txeof(struct iavf_softc *sc, struct iavf_tx_ring *txr, u_int txlimit,
2771 struct evcnt *ecnt)
2772 {
2773 struct ifnet *ifp = &sc->sc_ec.ec_if;
2774 struct ixl_tx_desc *ring, *txd;
2775 struct iavf_tx_map *txm;
2776 struct mbuf *m;
2777 bus_dmamap_t map;
2778 unsigned int cons, prod, last;
2779 unsigned int mask;
2780 uint64_t dtype;
2781 int done = 0, more = 0;
2782
2783 KASSERT(mutex_owned(&txr->txr_lock));
2784
2785 prod = txr->txr_prod;
2786 cons = txr->txr_cons;
2787
2788 if (cons == prod)
2789 return 0;
2790
2791 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2792 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2793
2794 ring = IXL_DMA_KVA(&txr->txr_mem);
2795 mask = sc->sc_tx_ring_ndescs - 1;
2796
2797 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2798
2799 do {
2800 if (txlimit-- <= 0) {
2801 more = 1;
2802 break;
2803 }
2804
2805 txm = &txr->txr_maps[cons];
2806 last = txm->txm_eop;
2807 txd = &ring[last];
2808
2809 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2810 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2811 break;
2812
2813 map = txm->txm_map;
2814
2815 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2816 BUS_DMASYNC_POSTWRITE);
2817 bus_dmamap_unload(sc->sc_dmat, map);
2818
2819 m = txm->txm_m;
2820 if (m != NULL) {
2821 if_statinc_ref(nsr, if_opackets);
2822 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
2823 if (ISSET(m->m_flags, M_MCAST))
2824 if_statinc_ref(nsr, if_omcasts);
2825 m_freem(m);
2826 }
2827
2828 txm->txm_m = NULL;
2829 txm->txm_eop = -1;
2830
2831 cons = last + 1;
2832 cons &= mask;
2833 done = 1;
2834 } while (cons != prod);
2835
2836 IF_STAT_PUTREF(ifp);
2837
2838 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2839 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2840
2841 txr->txr_cons = cons;
2842
2843 if (done) {
2844 ecnt->ev_count++;
2845 softint_schedule(txr->txr_si);
2846 if (txr->txr_qid == 0) {
2847 CLR(ifp->if_flags, IFF_OACTIVE);
2848 if_schedule_deferred_start(ifp);
2849 }
2850 }
2851
2852 if (txr->txr_cons == txr->txr_prod) {
2853 txr->txr_watchdog = IAVF_WATCHDOG_STOP;
2854 }
2855
2856 return more;
2857 }
2858
2859 static inline int
2860 iavf_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0,
2861 struct iavf_tx_ring *txr)
2862 {
2863 struct mbuf *m;
2864 int error;
2865
2866 KASSERT(mutex_owned(&txr->txr_lock));
2867
2868 m = *m0;
2869
2870 error = bus_dmamap_load_mbuf(dmat, map, m,
2871 BUS_DMA_STREAMING|BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2872 if (error != EFBIG)
2873 return error;
2874
2875 m = m_defrag(m, M_DONTWAIT);
2876 if (m != NULL) {
2877 *m0 = m;
2878 txr->txr_defragged.ev_count++;
2879 error = bus_dmamap_load_mbuf(dmat, map, m,
2880 BUS_DMA_STREAMING|BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2881 } else {
2882 txr->txr_defrag_failed.ev_count++;
2883 error = ENOBUFS;
2884 }
2885
2886 return error;
2887 }
2888
2889 static inline int
2890 iavf_tx_setup_offloads(struct mbuf *m, uint64_t *cmd_txd)
2891 {
2892 struct ether_header *eh;
2893 size_t len;
2894 uint64_t cmd;
2895
2896 cmd = 0;
2897
2898 eh = mtod(m, struct ether_header *);
2899 switch (htons(eh->ether_type)) {
2900 case ETHERTYPE_IP:
2901 case ETHERTYPE_IPV6:
2902 len = ETHER_HDR_LEN;
2903 break;
2904 case ETHERTYPE_VLAN:
2905 len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2906 break;
2907 default:
2908 len = 0;
2909 }
2910 cmd |= ((len >> 1) << IXL_TX_DESC_MACLEN_SHIFT);
2911
2912 if (m->m_pkthdr.csum_flags &
2913 (M_CSUM_TSOv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
2914 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4;
2915 }
2916 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2917 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4_CSUM;
2918 }
2919
2920 if (m->m_pkthdr.csum_flags &
2921 (M_CSUM_TSOv6 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
2922 cmd |= IXL_TX_DESC_CMD_IIPT_IPV6;
2923 }
2924
2925 switch (cmd & IXL_TX_DESC_CMD_IIPT_MASK) {
2926 case IXL_TX_DESC_CMD_IIPT_IPV4:
2927 case IXL_TX_DESC_CMD_IIPT_IPV4_CSUM:
2928 len = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2929 break;
2930 case IXL_TX_DESC_CMD_IIPT_IPV6:
2931 len = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
2932 break;
2933 default:
2934 len = 0;
2935 }
2936 cmd |= ((len >> 2) << IXL_TX_DESC_IPLEN_SHIFT);
2937
2938 if (m->m_pkthdr.csum_flags &
2939 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_TCPv4 | M_CSUM_TCPv6)) {
2940 len = sizeof(struct tcphdr);
2941 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_TCP;
2942 } else if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) {
2943 len = sizeof(struct udphdr);
2944 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_UDP;
2945 } else {
2946 len = 0;
2947 }
2948 cmd |= ((len >> 2) << IXL_TX_DESC_L4LEN_SHIFT);
2949
2950 *cmd_txd |= cmd;
2951 return 0;
2952 }
2953
2954 static void
2955 iavf_tx_common_locked(struct ifnet *ifp, struct iavf_tx_ring *txr,
2956 bool is_transmit)
2957 {
2958 struct iavf_softc *sc;
2959 struct ixl_tx_desc *ring, *txd;
2960 struct iavf_tx_map *txm;
2961 bus_dmamap_t map;
2962 struct mbuf *m;
2963 unsigned int prod, free, last, i;
2964 unsigned int mask;
2965 uint64_t cmd, cmd_txd;
2966 int post = 0;
2967
2968 KASSERT(mutex_owned(&txr->txr_lock));
2969
2970 sc = ifp->if_softc;
2971
2972 if (!ISSET(ifp->if_flags, IFF_RUNNING)
2973 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) {
2974 if (!is_transmit)
2975 IFQ_PURGE(&ifp->if_snd);
2976 return;
2977 }
2978
2979 prod = txr->txr_prod;
2980 free = txr->txr_cons;
2981
2982 if (free <= prod)
2983 free += sc->sc_tx_ring_ndescs;
2984 free -= prod;
2985
2986 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2987 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2988
2989 ring = IXL_DMA_KVA(&txr->txr_mem);
2990 mask = sc->sc_tx_ring_ndescs - 1;
2991 last = prod;
2992 cmd = 0;
2993 txd = NULL;
2994
2995 for (;;) {
2996 if (free < IAVF_TX_PKT_DESCS) {
2997 if (!is_transmit)
2998 SET(ifp->if_flags, IFF_OACTIVE);
2999 break;
3000 }
3001
3002 if (is_transmit)
3003 m = pcq_get(txr->txr_intrq);
3004 else
3005 IFQ_DEQUEUE(&ifp->if_snd, m);
3006
3007 if (m == NULL)
3008 break;
3009
3010 txm = &txr->txr_maps[prod];
3011 map = txm->txm_map;
3012
3013 if (iavf_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
3014 if_statinc(ifp, if_oerrors);
3015 m_freem(m);
3016 continue;
3017 }
3018
3019 cmd_txd = 0;
3020 if (m->m_pkthdr.csum_flags & IAVF_CSUM_ALL_OFFLOAD) {
3021 iavf_tx_setup_offloads(m, &cmd_txd);
3022 }
3023 if (vlan_has_tag(m)) {
3024 cmd_txd |= IXL_TX_DESC_CMD_IL2TAG1 |
3025 ((uint64_t)vlan_get_tag(m)
3026 << IXL_TX_DESC_L2TAG1_SHIFT);
3027 }
3028
3029 bus_dmamap_sync(sc->sc_dmat, map, 0,
3030 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
3031
3032 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) {
3033 txd = &ring[prod];
3034
3035 cmd = (uint64_t)map->dm_segs[i].ds_len <<
3036 IXL_TX_DESC_BSIZE_SHIFT;
3037 cmd |= IXL_TX_DESC_DTYPE_DATA|IXL_TX_DESC_CMD_ICRC|
3038 cmd_txd;
3039
3040 txd->addr = htole64(map->dm_segs[i].ds_addr);
3041 txd->cmd = htole64(cmd);
3042
3043 last = prod;
3044 prod++;
3045 prod &= mask;
3046 }
3047
3048 cmd |= IXL_TX_DESC_CMD_EOP|IXL_TX_DESC_CMD_RS;
3049 txd->cmd = htole64(cmd);
3050 txm->txm_m = m;
3051 txm->txm_eop = last;
3052
3053 bpf_mtap(ifp, m, BPF_D_OUT);
3054 free -= i;
3055 post = 1;
3056 }
3057
3058 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
3059 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
3060
3061 if (post) {
3062 txr->txr_prod = prod;
3063 iavf_wr(sc, txr->txr_tail, prod);
3064 txr->txr_watchdog = IAVF_WATCHDOG_TICKS;
3065 }
3066 }
3067
3068 static inline int
3069 iavf_handle_queue_common(struct iavf_softc *sc, struct iavf_queue_pair *qp,
3070 u_int txlimit, struct evcnt *txevcnt,
3071 u_int rxlimit, struct evcnt *rxevcnt)
3072 {
3073 struct iavf_tx_ring *txr;
3074 struct iavf_rx_ring *rxr;
3075 int txmore, rxmore;
3076 int rv;
3077
3078 txr = qp->qp_txr;
3079 rxr = qp->qp_rxr;
3080
3081 mutex_enter(&txr->txr_lock);
3082 txmore = iavf_txeof(sc, txr, txlimit, txevcnt);
3083 mutex_exit(&txr->txr_lock);
3084
3085 mutex_enter(&rxr->rxr_lock);
3086 rxmore = iavf_rxeof(sc, rxr, rxlimit, rxevcnt);
3087 mutex_exit(&rxr->rxr_lock);
3088
3089 rv = txmore | (rxmore << 1);
3090
3091 return rv;
3092 }
3093
3094 static void
3095 iavf_sched_handle_queue(struct iavf_softc *sc, struct iavf_queue_pair *qp)
3096 {
3097
3098 if (qp->qp_workqueue)
3099 workqueue_enqueue(sc->sc_workq_txrx, &qp->qp_work, NULL);
3100 else
3101 softint_schedule(qp->qp_si);
3102 }
3103
3104 static void
3105 iavf_start(struct ifnet *ifp)
3106 {
3107 struct iavf_softc *sc;
3108 struct iavf_tx_ring *txr;
3109
3110 sc = ifp->if_softc;
3111 txr = sc->sc_qps[0].qp_txr;
3112
3113 mutex_enter(&txr->txr_lock);
3114 iavf_tx_common_locked(ifp, txr, false);
3115 mutex_exit(&txr->txr_lock);
3116
3117 }
3118
3119 static inline unsigned int
3120 iavf_select_txqueue(struct iavf_softc *sc, struct mbuf *m)
3121 {
3122 u_int cpuid;
3123
3124 cpuid = cpu_index(curcpu());
3125
3126 return (unsigned int)(cpuid % sc->sc_nqueue_pairs);
3127 }
3128
3129 static int
3130 iavf_transmit(struct ifnet *ifp, struct mbuf *m)
3131 {
3132 struct iavf_softc *sc;
3133 struct iavf_tx_ring *txr;
3134 unsigned int qid;
3135
3136 sc = ifp->if_softc;
3137 qid = iavf_select_txqueue(sc, m);
3138
3139 txr = sc->sc_qps[qid].qp_txr;
3140
3141 if (__predict_false(!pcq_put(txr->txr_intrq, m))) {
3142 mutex_enter(&txr->txr_lock);
3143 txr->txr_pcqdrop.ev_count++;
3144 mutex_exit(&txr->txr_lock);
3145
3146 m_freem(m);
3147 return ENOBUFS;
3148 }
3149
3150 if (mutex_tryenter(&txr->txr_lock)) {
3151 iavf_tx_common_locked(ifp, txr, true);
3152 mutex_exit(&txr->txr_lock);
3153 } else {
3154 kpreempt_disable();
3155 softint_schedule(txr->txr_si);
3156 kpreempt_enable();
3157 }
3158 return 0;
3159 }
3160
3161 static void
3162 iavf_deferred_transmit(void *xtxr)
3163 {
3164 struct iavf_tx_ring *txr;
3165 struct iavf_softc *sc;
3166 struct ifnet *ifp;
3167
3168 txr = xtxr;
3169 sc = txr->txr_sc;
3170 ifp = &sc->sc_ec.ec_if;
3171
3172 mutex_enter(&txr->txr_lock);
3173 txr->txr_transmitdef.ev_count++;
3174 if (pcq_peek(txr->txr_intrq) != NULL)
3175 iavf_tx_common_locked(ifp, txr, true);
3176 mutex_exit(&txr->txr_lock);
3177 }
3178
3179 static void
3180 iavf_txr_clean(struct iavf_softc *sc, struct iavf_tx_ring *txr)
3181 {
3182 struct iavf_tx_map *maps, *txm;
3183 bus_dmamap_t map;
3184 unsigned int i;
3185
3186 KASSERT(mutex_owned(&txr->txr_lock));
3187
3188 maps = txr->txr_maps;
3189 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
3190 txm = &maps[i];
3191
3192 if (txm->txm_m == NULL)
3193 continue;
3194
3195 map = txm->txm_map;
3196 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3197 BUS_DMASYNC_POSTWRITE);
3198 bus_dmamap_unload(sc->sc_dmat, map);
3199
3200 m_freem(txm->txm_m);
3201 txm->txm_m = NULL;
3202 }
3203
3204 memset(IXL_DMA_KVA(&txr->txr_mem), 0, IXL_DMA_LEN(&txr->txr_mem));
3205 txr->txr_prod = txr->txr_cons = 0;
3206 }
3207
3208 static int
3209 iavf_intr(void *xsc)
3210 {
3211 struct iavf_softc *sc = xsc;
3212 struct ifnet *ifp = &sc->sc_ec.ec_if;
3213 struct iavf_rx_ring *rxr;
3214 struct iavf_tx_ring *txr;
3215 uint32_t icr;
3216 unsigned int i;
3217
3218 /* read I40E_VFINT_ICR_ENA1 to clear status */
3219 (void)iavf_rd(sc, I40E_VFINT_ICR0_ENA1);
3220
3221 iavf_intr_enable(sc);
3222 icr = iavf_rd(sc, I40E_VFINT_ICR01);
3223
3224 if (icr == IAVF_REG_VFR) {
3225 log(LOG_INFO, "%s: VF reset in progress\n",
3226 ifp->if_xname);
3227 iavf_work_set(&sc->sc_reset_task, iavf_reset_start, sc);
3228 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
3229 return 1;
3230 }
3231
3232 if (ISSET(icr, I40E_VFINT_ICR01_ADMINQ_MASK)) {
3233 mutex_enter(&sc->sc_adminq_lock);
3234 iavf_atq_done(sc);
3235 iavf_arq(sc);
3236 mutex_exit(&sc->sc_adminq_lock);
3237 }
3238
3239 if (ISSET(icr, I40E_VFINT_ICR01_QUEUE_0_MASK)) {
3240 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3241 rxr = sc->sc_qps[i].qp_rxr;
3242 txr = sc->sc_qps[i].qp_txr;
3243
3244 mutex_enter(&rxr->rxr_lock);
3245 while (iavf_rxeof(sc, rxr, UINT_MAX,
3246 &rxr->rxr_intr) != 0) {
3247 /* do nothing */
3248 }
3249 mutex_exit(&rxr->rxr_lock);
3250
3251 mutex_enter(&txr->txr_lock);
3252 while (iavf_txeof(sc, txr, UINT_MAX,
3253 &txr->txr_intr) != 0) {
3254 /* do nothing */
3255 }
3256 mutex_exit(&txr->txr_lock);
3257 }
3258 }
3259
3260 return 0;
3261 }
3262
3263 static int
3264 iavf_queue_intr(void *xqp)
3265 {
3266 struct iavf_queue_pair *qp = xqp;
3267 struct iavf_tx_ring *txr;
3268 struct iavf_rx_ring *rxr;
3269 struct iavf_softc *sc;
3270 unsigned int qid;
3271 u_int txlimit, rxlimit;
3272 int more;
3273
3274 txr = qp->qp_txr;
3275 rxr = qp->qp_rxr;
3276 sc = txr->txr_sc;
3277 qid = txr->txr_qid;
3278
3279 txlimit = sc->sc_tx_intr_process_limit;
3280 rxlimit = sc->sc_rx_intr_process_limit;
3281 qp->qp_workqueue = sc->sc_txrx_workqueue;
3282
3283 more = iavf_handle_queue_common(sc, qp,
3284 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr);
3285
3286 if (more != 0) {
3287 iavf_sched_handle_queue(sc, qp);
3288 } else {
3289 /* for ALTQ */
3290 if (txr->txr_qid == 0)
3291 if_schedule_deferred_start(&sc->sc_ec.ec_if);
3292 softint_schedule(txr->txr_si);
3293
3294 iavf_queue_intr_enable(sc, qid);
3295 }
3296
3297 return 0;
3298 }
3299
3300 static void
3301 iavf_handle_queue_wk(struct work *wk, void *xsc __unused)
3302 {
3303 struct iavf_queue_pair *qp;
3304
3305 qp = container_of(wk, struct iavf_queue_pair, qp_work);
3306 iavf_handle_queue(qp);
3307 }
3308
3309 static void
3310 iavf_handle_queue(void *xqp)
3311 {
3312 struct iavf_queue_pair *qp = xqp;
3313 struct iavf_tx_ring *txr;
3314 struct iavf_rx_ring *rxr;
3315 struct iavf_softc *sc;
3316 unsigned int qid;
3317 u_int txlimit, rxlimit;
3318 int more;
3319
3320 txr = qp->qp_txr;
3321 rxr = qp->qp_rxr;
3322 sc = txr->txr_sc;
3323 qid = txr->txr_qid;
3324
3325 txlimit = sc->sc_tx_process_limit;
3326 rxlimit = sc->sc_rx_process_limit;
3327
3328 more = iavf_handle_queue_common(sc, qp,
3329 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer);
3330
3331 if (more != 0)
3332 iavf_sched_handle_queue(sc, qp);
3333 else
3334 iavf_queue_intr_enable(sc, qid);
3335 }
3336
3337 static void
3338 iavf_tick(void *xsc)
3339 {
3340 struct iavf_softc *sc;
3341 unsigned int i;
3342 int timedout;
3343
3344 sc = xsc;
3345 timedout = 0;
3346
3347 mutex_enter(&sc->sc_cfg_lock);
3348
3349 if (sc->sc_resetting) {
3350 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
3351 mutex_exit(&sc->sc_cfg_lock);
3352 return;
3353 }
3354
3355 iavf_get_stats(sc);
3356
3357 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3358 timedout |= iavf_watchdog(sc->sc_qps[i].qp_txr);
3359 }
3360
3361 if (timedout != 0) {
3362 iavf_work_add(sc->sc_workq, &sc->sc_wdto_task);
3363 } else {
3364 callout_schedule(&sc->sc_tick, IAVF_TICK_INTERVAL);
3365 }
3366
3367 mutex_exit(&sc->sc_cfg_lock);
3368 }
3369
3370 static void
3371 iavf_tick_halt(void *unused __unused)
3372 {
3373
3374 /* do nothing */
3375 }
3376
3377 static void
3378 iavf_reset_request(void *xsc)
3379 {
3380 struct iavf_softc *sc = xsc;
3381
3382 iavf_reset_vf(sc);
3383 iavf_reset_start(sc);
3384 }
3385
3386 static void
3387 iavf_reset_start(void *xsc)
3388 {
3389 struct iavf_softc *sc = xsc;
3390 struct ifnet *ifp = &sc->sc_ec.ec_if;
3391
3392 mutex_enter(&sc->sc_cfg_lock);
3393
3394 if (sc->sc_resetting)
3395 goto do_reset;
3396
3397 sc->sc_resetting = true;
3398 if_link_state_change(ifp, LINK_STATE_DOWN);
3399
3400 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
3401 iavf_stop_locked(sc);
3402 sc->sc_reset_up = true;
3403 }
3404
3405 memcpy(sc->sc_enaddr_reset, sc->sc_enaddr, ETHER_ADDR_LEN);
3406
3407 do_reset:
3408 iavf_work_set(&sc->sc_reset_task, iavf_reset, sc);
3409
3410 mutex_exit(&sc->sc_cfg_lock);
3411
3412 iavf_reset((void *)sc);
3413 }
3414
3415 static void
3416 iavf_reset(void *xsc)
3417 {
3418 struct iavf_softc *sc = xsc;
3419 struct ifnet *ifp = &sc->sc_ec.ec_if;
3420 struct ixl_aq_buf *aqb;
3421 bool realloc_qps, realloc_intrs;
3422
3423 mutex_enter(&sc->sc_cfg_lock);
3424
3425 mutex_enter(&sc->sc_adminq_lock);
3426 iavf_cleanup_admin_queue(sc);
3427 mutex_exit(&sc->sc_adminq_lock);
3428
3429 sc->sc_major_ver = UINT_MAX;
3430 sc->sc_minor_ver = UINT_MAX;
3431 sc->sc_got_vf_resources = 0;
3432 sc->sc_got_irq_map = 0;
3433
3434 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
3435 if (aqb == NULL)
3436 goto failed;
3437
3438 if (iavf_wait_active(sc) != 0) {
3439 log(LOG_WARNING, "%s: VF reset timed out\n",
3440 ifp->if_xname);
3441 goto failed;
3442 }
3443
3444 if (!iavf_arq_fill(sc)) {
3445 log(LOG_ERR, "%s: unable to fill arq descriptors\n",
3446 ifp->if_xname);
3447 goto failed;
3448 }
3449
3450 if (iavf_init_admin_queue(sc) != 0) {
3451 log(LOG_ERR, "%s: unable to initialize admin queue\n",
3452 ifp->if_xname);
3453 goto failed;
3454 }
3455
3456 if (iavf_get_version(sc, aqb) != 0) {
3457 log(LOG_ERR, "%s: unable to get VF interface version\n",
3458 ifp->if_xname);
3459 goto failed;
3460 }
3461
3462 if (iavf_get_vf_resources(sc, aqb) != 0) {
3463 log(LOG_ERR, "%s: timed out waiting for VF resources\n",
3464 ifp->if_xname);
3465 goto failed;
3466 }
3467
3468 if (sc->sc_nqps_alloc < iavf_calc_queue_pair_size(sc)) {
3469 realloc_qps = true;
3470 } else {
3471 realloc_qps = false;
3472 }
3473
3474 if (sc->sc_nintrs < iavf_calc_msix_count(sc)) {
3475 realloc_intrs = true;
3476 } else {
3477 realloc_intrs = false;
3478 }
3479
3480 if (realloc_qps || realloc_intrs)
3481 iavf_teardown_interrupts(sc);
3482
3483 if (realloc_qps) {
3484 iavf_queue_pairs_free(sc);
3485 if (iavf_queue_pairs_alloc(sc) != 0) {
3486 log(LOG_ERR, "%s: failed to allocate queue pairs\n",
3487 ifp->if_xname);
3488 goto failed;
3489 }
3490 }
3491
3492 if (realloc_qps || realloc_intrs) {
3493 if (iavf_setup_interrupts(sc) != 0) {
3494 sc->sc_nintrs = 0;
3495 log(LOG_ERR, "%s: failed to allocate interrupts\n",
3496 ifp->if_xname);
3497 goto failed;
3498 }
3499 log(LOG_INFO, "%s: reallocated queues\n", ifp->if_xname);
3500 }
3501
3502 if (iavf_config_irq_map(sc, aqb) != 0) {
3503 log(LOG_ERR, "%s: timed out configuring IRQ map\n",
3504 ifp->if_xname);
3505 goto failed;
3506 }
3507
3508 mutex_enter(&sc->sc_adminq_lock);
3509 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
3510 mutex_exit(&sc->sc_adminq_lock);
3511
3512 iavf_reset_finish(sc);
3513
3514 mutex_exit(&sc->sc_cfg_lock);
3515 return;
3516
3517 failed:
3518 mutex_enter(&sc->sc_adminq_lock);
3519 iavf_cleanup_admin_queue(sc);
3520 if (aqb != NULL) {
3521 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
3522 }
3523 mutex_exit(&sc->sc_adminq_lock);
3524 callout_schedule(&sc->sc_tick, IAVF_TICK_INTERVAL);
3525 mutex_exit(&sc->sc_cfg_lock);
3526 }
3527
3528 static void
3529 iavf_reset_finish(struct iavf_softc *sc)
3530 {
3531 struct ethercom *ec = &sc->sc_ec;
3532 struct ether_multi *enm;
3533 struct ether_multistep step;
3534 struct ifnet *ifp = &ec->ec_if;
3535 struct vlanid_list *vlanidp;
3536 uint8_t enaddr_prev[ETHER_ADDR_LEN], enaddr_next[ETHER_ADDR_LEN];
3537
3538 KASSERT(mutex_owned(&sc->sc_cfg_lock));
3539
3540 callout_stop(&sc->sc_tick);
3541
3542 iavf_intr_enable(sc);
3543
3544 if (!iavf_is_etheranyaddr(sc->sc_enaddr_added)) {
3545 iavf_eth_addr(sc, sc->sc_enaddr_added, IAVF_VC_OP_ADD_ETH_ADDR);
3546 }
3547
3548 ETHER_LOCK(ec);
3549 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
3550 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
3551 ETHER_NEXT_MULTI(step, enm)) {
3552 iavf_add_multi(sc, enm->enm_addrlo, enm->enm_addrhi);
3553 }
3554 }
3555
3556 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
3557 ETHER_UNLOCK(ec);
3558 iavf_config_vlan_id(sc, vlanidp->vid, IAVF_VC_OP_ADD_VLAN);
3559 ETHER_LOCK(ec);
3560 }
3561 ETHER_UNLOCK(ec);
3562
3563 if (memcmp(sc->sc_enaddr, sc->sc_enaddr_reset, ETHER_ADDR_LEN) != 0) {
3564 memcpy(enaddr_prev, sc->sc_enaddr_reset, sizeof(enaddr_prev));
3565 memcpy(enaddr_next, sc->sc_enaddr, sizeof(enaddr_next));
3566 log(LOG_INFO, "%s: Ethernet address changed to %s\n",
3567 ifp->if_xname, ether_sprintf(enaddr_next));
3568
3569 mutex_exit(&sc->sc_cfg_lock);
3570 IFNET_LOCK(ifp);
3571 kpreempt_disable();
3572 /*XXX we need an API to change ethernet address. */
3573 iavf_replace_lla(ifp, enaddr_prev, enaddr_next);
3574 kpreempt_enable();
3575 IFNET_UNLOCK(ifp);
3576 mutex_enter(&sc->sc_cfg_lock);
3577 }
3578
3579 sc->sc_resetting = false;
3580
3581 if (sc->sc_reset_up) {
3582 iavf_init_locked(sc);
3583 }
3584
3585 if (sc->sc_link_state != LINK_STATE_DOWN) {
3586 if_link_state_change(ifp, sc->sc_link_state);
3587 }
3588
3589 }
3590
3591 static int
3592 iavf_dmamem_alloc(bus_dma_tag_t dmat, struct ixl_dmamem *ixm,
3593 bus_size_t size, bus_size_t align)
3594 {
3595 ixm->ixm_size = size;
3596
3597 if (bus_dmamap_create(dmat, ixm->ixm_size, 1,
3598 ixm->ixm_size, 0,
3599 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
3600 &ixm->ixm_map) != 0)
3601 return 1;
3602 if (bus_dmamem_alloc(dmat, ixm->ixm_size,
3603 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
3604 BUS_DMA_WAITOK) != 0)
3605 goto destroy;
3606 if (bus_dmamem_map(dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
3607 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
3608 goto free;
3609 if (bus_dmamap_load(dmat, ixm->ixm_map, ixm->ixm_kva,
3610 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
3611 goto unmap;
3612
3613 memset(ixm->ixm_kva, 0, ixm->ixm_size);
3614
3615 return 0;
3616 unmap:
3617 bus_dmamem_unmap(dmat, ixm->ixm_kva, ixm->ixm_size);
3618 free:
3619 bus_dmamem_free(dmat, &ixm->ixm_seg, 1);
3620 destroy:
3621 bus_dmamap_destroy(dmat, ixm->ixm_map);
3622 return 1;
3623 }
3624
3625 static void
3626 iavf_dmamem_free(bus_dma_tag_t dmat, struct ixl_dmamem *ixm)
3627 {
3628
3629 bus_dmamap_unload(dmat, ixm->ixm_map);
3630 bus_dmamem_unmap(dmat, ixm->ixm_kva, ixm->ixm_size);
3631 bus_dmamem_free(dmat, &ixm->ixm_seg, 1);
3632 bus_dmamap_destroy(dmat, ixm->ixm_map);
3633 }
3634
3635 static struct ixl_aq_buf *
3636 iavf_aqb_alloc(bus_dma_tag_t dmat, size_t buflen)
3637 {
3638 struct ixl_aq_buf *aqb;
3639
3640 aqb = kmem_alloc(sizeof(*aqb), KM_NOSLEEP);
3641 if (aqb == NULL)
3642 return NULL;
3643
3644 aqb->aqb_size = buflen;
3645
3646 if (bus_dmamap_create(dmat, aqb->aqb_size, 1,
3647 aqb->aqb_size, 0,
3648 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0)
3649 goto free;
3650 if (bus_dmamem_alloc(dmat, aqb->aqb_size,
3651 IAVF_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs,
3652 BUS_DMA_WAITOK) != 0)
3653 goto destroy;
3654 if (bus_dmamem_map(dmat, &aqb->aqb_seg, aqb->aqb_nsegs,
3655 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0)
3656 goto dma_free;
3657 if (bus_dmamap_load(dmat, aqb->aqb_map, aqb->aqb_data,
3658 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0)
3659 goto unmap;
3660
3661 return aqb;
3662 unmap:
3663 bus_dmamem_unmap(dmat, aqb->aqb_data, aqb->aqb_size);
3664 dma_free:
3665 bus_dmamem_free(dmat, &aqb->aqb_seg, 1);
3666 destroy:
3667 bus_dmamap_destroy(dmat, aqb->aqb_map);
3668 free:
3669 kmem_free(aqb, sizeof(*aqb));
3670
3671 return NULL;
3672 }
3673
3674 static void
3675 iavf_aqb_free(bus_dma_tag_t dmat, struct ixl_aq_buf *aqb)
3676 {
3677
3678 bus_dmamap_unload(dmat, aqb->aqb_map);
3679 bus_dmamem_unmap(dmat, aqb->aqb_data, aqb->aqb_size);
3680 bus_dmamem_free(dmat, &aqb->aqb_seg, 1);
3681 bus_dmamap_destroy(dmat, aqb->aqb_map);
3682 kmem_free(aqb, sizeof(*aqb));
3683 }
3684
3685 static struct ixl_aq_buf *
3686 iavf_aqb_get_locked(struct ixl_aq_bufs *q)
3687 {
3688 struct ixl_aq_buf *aqb;
3689
3690 aqb = SIMPLEQ_FIRST(q);
3691 if (aqb != NULL) {
3692 SIMPLEQ_REMOVE(q, aqb, ixl_aq_buf, aqb_entry);
3693 }
3694
3695 return aqb;
3696 }
3697
3698 static struct ixl_aq_buf *
3699 iavf_aqb_get(struct iavf_softc *sc, struct ixl_aq_bufs *q)
3700 {
3701 struct ixl_aq_buf *aqb;
3702
3703 if (q != NULL) {
3704 mutex_enter(&sc->sc_adminq_lock);
3705 aqb = iavf_aqb_get_locked(q);
3706 mutex_exit(&sc->sc_adminq_lock);
3707 } else {
3708 aqb = NULL;
3709 }
3710
3711 if (aqb == NULL) {
3712 aqb = iavf_aqb_alloc(sc->sc_dmat, IAVF_AQ_BUFLEN);
3713 }
3714
3715 return aqb;
3716 }
3717
3718 static void
3719 iavf_aqb_put_locked(struct ixl_aq_bufs *q, struct ixl_aq_buf *aqb)
3720 {
3721
3722 SIMPLEQ_INSERT_TAIL(q, aqb, aqb_entry);
3723 }
3724
3725 static void
3726 iavf_aqb_clean(struct ixl_aq_bufs *q, bus_dma_tag_t dmat)
3727 {
3728 struct ixl_aq_buf *aqb;
3729
3730 while ((aqb = SIMPLEQ_FIRST(q)) != NULL) {
3731 SIMPLEQ_REMOVE(q, aqb, ixl_aq_buf, aqb_entry);
3732 iavf_aqb_free(dmat, aqb);
3733 }
3734 }
3735
3736 static const char *
3737 iavf_aq_vc_opcode_str(const struct ixl_aq_desc *iaq)
3738 {
3739
3740 switch (iavf_aq_vc_get_opcode(iaq)) {
3741 case IAVF_VC_OP_VERSION:
3742 return "GET_VERSION";
3743 case IAVF_VC_OP_RESET_VF:
3744 return "RESET_VF";
3745 case IAVF_VC_OP_GET_VF_RESOURCES:
3746 return "GET_VF_RESOURCES";
3747 case IAVF_VC_OP_CONFIG_TX_QUEUE:
3748 return "CONFIG_TX_QUEUE";
3749 case IAVF_VC_OP_CONFIG_RX_QUEUE:
3750 return "CONFIG_RX_QUEUE";
3751 case IAVF_VC_OP_CONFIG_VSI_QUEUES:
3752 return "CONFIG_VSI_QUEUES";
3753 case IAVF_VC_OP_CONFIG_IRQ_MAP:
3754 return "CONFIG_IRQ_MAP";
3755 case IAVF_VC_OP_ENABLE_QUEUES:
3756 return "ENABLE_QUEUES";
3757 case IAVF_VC_OP_DISABLE_QUEUES:
3758 return "DISABLE_QUEUES";
3759 case IAVF_VC_OP_ADD_ETH_ADDR:
3760 return "ADD_ETH_ADDR";
3761 case IAVF_VC_OP_DEL_ETH_ADDR:
3762 return "DEL_ETH_ADDR";
3763 case IAVF_VC_OP_CONFIG_PROMISC:
3764 return "CONFIG_PROMISC";
3765 case IAVF_VC_OP_GET_STATS:
3766 return "GET_STATS";
3767 case IAVF_VC_OP_EVENT:
3768 return "EVENT";
3769 case IAVF_VC_OP_CONFIG_RSS_KEY:
3770 return "CONFIG_RSS_KEY";
3771 case IAVF_VC_OP_GET_RSS_HENA_CAPS:
3772 return "GET_RS_HENA_CAPS";
3773 case IAVF_VC_OP_SET_RSS_HENA:
3774 return "SET_RSS_HENA";
3775 case IAVF_VC_OP_ENABLE_VLAN_STRIP:
3776 return "ENABLE_VLAN_STRIPPING";
3777 case IAVF_VC_OP_DISABLE_VLAN_STRIP:
3778 return "DISABLE_VLAN_STRIPPING";
3779 case IAVF_VC_OP_REQUEST_QUEUES:
3780 return "REQUEST_QUEUES";
3781 }
3782
3783 return "unknown";
3784 }
3785
3786 static void
3787 iavf_aq_dump(const struct iavf_softc *sc, const struct ixl_aq_desc *iaq,
3788 const char *msg)
3789 {
3790 char buf[512];
3791 size_t len;
3792
3793 len = sizeof(buf);
3794 buf[--len] = '\0';
3795
3796 device_printf(sc->sc_dev, "%s\n", msg);
3797 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags));
3798 device_printf(sc->sc_dev, "flags %s opcode %04x\n",
3799 buf, le16toh(iaq->iaq_opcode));
3800 device_printf(sc->sc_dev, "datalen %u retval %u\n",
3801 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval));
3802 device_printf(sc->sc_dev, "vc-opcode %u (%s)\n",
3803 iavf_aq_vc_get_opcode(iaq),
3804 iavf_aq_vc_opcode_str(iaq));
3805 device_printf(sc->sc_dev, "vc-retval %u\n",
3806 iavf_aq_vc_get_retval(iaq));
3807 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie);
3808 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n",
3809 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]),
3810 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3]));
3811 }
3812
3813 static int
3814 iavf_arq_fill(struct iavf_softc *sc)
3815 {
3816 struct ixl_aq_buf *aqb;
3817 struct ixl_aq_desc *arq, *iaq;
3818 unsigned int prod = sc->sc_arq_prod;
3819 unsigned int n;
3820 int filled;
3821
3822 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
3823 IAVF_AQ_NUM);
3824
3825 if (__predict_false(n <= 0))
3826 return 0;
3827
3828 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3829 0, IXL_DMA_LEN(&sc->sc_arq),
3830 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3831
3832 arq = IXL_DMA_KVA(&sc->sc_arq);
3833
3834 do {
3835 iaq = &arq[prod];
3836
3837 if (ixl_aq_has_dva(iaq)) {
3838 /* already filled */
3839 break;
3840 }
3841
3842 aqb = iavf_aqb_get_locked(&sc->sc_arq_idle);
3843 if (aqb == NULL)
3844 break;
3845
3846 memset(aqb->aqb_data, 0, aqb->aqb_size);
3847
3848 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
3849 aqb->aqb_size, BUS_DMASYNC_PREREAD);
3850
3851 iaq->iaq_flags = htole16(IXL_AQ_BUF |
3852 (aqb->aqb_size > I40E_AQ_LARGE_BUF ?
3853 IXL_AQ_LB : 0));
3854 iaq->iaq_opcode = 0;
3855 iaq->iaq_datalen = htole16(aqb->aqb_size);
3856 iaq->iaq_retval = 0;
3857 iaq->iaq_cookie = 0;
3858 iaq->iaq_param[0] = 0;
3859 iaq->iaq_param[1] = 0;
3860 ixl_aq_dva(iaq, IXL_AQB_DVA(aqb));
3861 iavf_aqb_put_locked(&sc->sc_arq_live, aqb);
3862
3863 prod++;
3864 prod &= IAVF_AQ_MASK;
3865 filled = 1;
3866 } while (--n);
3867
3868 sc->sc_arq_prod = prod;
3869
3870 if (filled) {
3871 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3872 0, IXL_DMA_LEN(&sc->sc_arq),
3873 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3874 iavf_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
3875 }
3876
3877 return filled;
3878 }
3879
3880 static int
3881 iavf_arq_wait(struct iavf_softc *sc, uint32_t opcode)
3882 {
3883 int error;
3884
3885 KASSERT(mutex_owned(&sc->sc_adminq_lock));
3886
3887 while ((error = cv_timedwait(&sc->sc_adminq_cv,
3888 &sc->sc_adminq_lock, mstohz(IAVF_EXEC_TIMEOUT))) == 0) {
3889 if (opcode == sc->sc_arq_opcode)
3890 break;
3891 }
3892
3893 if (error != 0 &&
3894 atomic_load_relaxed(&sc->sc_debuglevel) >= 2)
3895 device_printf(sc->sc_dev, "cv_timedwait error=%d\n", error);
3896
3897 return error;
3898 }
3899
3900 static void
3901 iavf_arq_refill(void *xsc)
3902 {
3903 struct iavf_softc *sc = xsc;
3904 struct ixl_aq_bufs aqbs;
3905 struct ixl_aq_buf *aqb;
3906 unsigned int n, i;
3907
3908 mutex_enter(&sc->sc_adminq_lock);
3909 iavf_arq_fill(sc);
3910 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
3911 IAVF_AQ_NUM);
3912 mutex_exit(&sc->sc_adminq_lock);
3913
3914 if (n == 0)
3915 return;
3916
3917 if (atomic_load_relaxed(&sc->sc_debuglevel) >= 1)
3918 device_printf(sc->sc_dev, "Allocate %d bufs for arq\n", n);
3919
3920 SIMPLEQ_INIT(&aqbs);
3921 for (i = 0; i < n; i++) {
3922 aqb = iavf_aqb_get(sc, NULL);
3923 if (aqb == NULL)
3924 continue;
3925 SIMPLEQ_INSERT_TAIL(&aqbs, aqb, aqb_entry);
3926 }
3927
3928 mutex_enter(&sc->sc_adminq_lock);
3929 while ((aqb = SIMPLEQ_FIRST(&aqbs)) != NULL) {
3930 SIMPLEQ_REMOVE(&aqbs, aqb, ixl_aq_buf, aqb_entry);
3931 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
3932 }
3933 iavf_arq_fill(sc);
3934 mutex_exit(&sc->sc_adminq_lock);
3935 }
3936
3937 static uint32_t
3938 iavf_process_arq(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
3939 struct ixl_aq_buf *aqb)
3940 {
3941 uint32_t vc_retval, vc_opcode;
3942 int dbg;
3943
3944 dbg = atomic_load_relaxed(&sc->sc_debuglevel);
3945 if (dbg >= 3)
3946 iavf_aq_dump(sc, iaq, "arq proc");
3947
3948 if (dbg >= 2) {
3949 vc_retval = iavf_aq_vc_get_retval(iaq);
3950 if (vc_retval != IAVF_VC_RC_SUCCESS) {
3951 device_printf(sc->sc_dev, "%s failed=%d(arq)\n",
3952 iavf_aq_vc_opcode_str(iaq), vc_retval);
3953 }
3954 }
3955
3956 vc_opcode = iavf_aq_vc_get_opcode(iaq);
3957 switch (vc_opcode) {
3958 case IAVF_VC_OP_VERSION:
3959 iavf_process_version(sc, iaq, aqb);
3960 break;
3961 case IAVF_VC_OP_GET_VF_RESOURCES:
3962 iavf_process_vf_resources(sc, iaq, aqb);
3963 break;
3964 case IAVF_VC_OP_CONFIG_IRQ_MAP:
3965 iavf_process_irq_map(sc, iaq);
3966 break;
3967 case IAVF_VC_OP_EVENT:
3968 iavf_process_vc_event(sc, iaq, aqb);
3969 break;
3970 case IAVF_VC_OP_GET_STATS:
3971 iavf_process_stats(sc, iaq, aqb);
3972 break;
3973 case IAVF_VC_OP_REQUEST_QUEUES:
3974 iavf_process_req_queues(sc, iaq, aqb);
3975 break;
3976 }
3977
3978 return vc_opcode;
3979 }
3980
3981 static int
3982 iavf_arq_poll(struct iavf_softc *sc, uint32_t wait_opcode, int retry)
3983 {
3984 struct ixl_aq_desc *arq, *iaq;
3985 struct ixl_aq_buf *aqb;
3986 unsigned int cons = sc->sc_arq_cons;
3987 unsigned int prod;
3988 uint32_t vc_opcode;
3989 bool received;
3990 int i;
3991
3992 for (i = 0, received = false; i < retry && !received; i++) {
3993 prod = iavf_rd(sc, sc->sc_aq_regs->arq_head);
3994 prod &= sc->sc_aq_regs->arq_head_mask;
3995
3996 if (prod == cons) {
3997 delaymsec(1);
3998 continue;
3999 }
4000
4001 if (prod >= IAVF_AQ_NUM) {
4002 return EIO;
4003 }
4004
4005 arq = IXL_DMA_KVA(&sc->sc_arq);
4006
4007 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
4008 0, IXL_DMA_LEN(&sc->sc_arq),
4009 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
4010
4011 do {
4012 iaq = &arq[cons];
4013 aqb = iavf_aqb_get_locked(&sc->sc_arq_live);
4014 KASSERT(aqb != NULL);
4015
4016 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
4017 IAVF_AQ_BUFLEN, BUS_DMASYNC_POSTREAD);
4018
4019 vc_opcode = iavf_process_arq(sc, iaq, aqb);
4020
4021 if (vc_opcode == wait_opcode)
4022 received = true;
4023
4024 memset(iaq, 0, sizeof(*iaq));
4025 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
4026
4027 cons++;
4028 cons &= IAVF_AQ_MASK;
4029
4030 } while (cons != prod);
4031
4032 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
4033 0, IXL_DMA_LEN(&sc->sc_arq),
4034 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4035
4036 sc->sc_arq_cons = cons;
4037 iavf_arq_fill(sc);
4038
4039 }
4040
4041 if (!received)
4042 return ETIMEDOUT;
4043
4044 return 0;
4045 }
4046
4047 static int
4048 iavf_arq(struct iavf_softc *sc)
4049 {
4050 struct ixl_aq_desc *arq, *iaq;
4051 struct ixl_aq_buf *aqb;
4052 unsigned int cons = sc->sc_arq_cons;
4053 unsigned int prod;
4054 uint32_t vc_opcode;
4055
4056 KASSERT(mutex_owned(&sc->sc_adminq_lock));
4057
4058 prod = iavf_rd(sc, sc->sc_aq_regs->arq_head);
4059 prod &= sc->sc_aq_regs->arq_head_mask;
4060
4061 /* broken value at resetting */
4062 if (prod >= IAVF_AQ_NUM) {
4063 iavf_work_set(&sc->sc_reset_task, iavf_reset_start, sc);
4064 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
4065 return 0;
4066 }
4067
4068 if (cons == prod)
4069 return 0;
4070
4071 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
4072 0, IXL_DMA_LEN(&sc->sc_arq),
4073 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
4074
4075 arq = IXL_DMA_KVA(&sc->sc_arq);
4076
4077 do {
4078 iaq = &arq[cons];
4079 aqb = iavf_aqb_get_locked(&sc->sc_arq_live);
4080
4081 KASSERT(aqb != NULL);
4082
4083 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IAVF_AQ_BUFLEN,
4084 BUS_DMASYNC_POSTREAD);
4085
4086 vc_opcode = iavf_process_arq(sc, iaq, aqb);
4087
4088 switch (vc_opcode) {
4089 case IAVF_VC_OP_CONFIG_TX_QUEUE:
4090 case IAVF_VC_OP_CONFIG_RX_QUEUE:
4091 case IAVF_VC_OP_CONFIG_VSI_QUEUES:
4092 case IAVF_VC_OP_ENABLE_QUEUES:
4093 case IAVF_VC_OP_DISABLE_QUEUES:
4094 case IAVF_VC_OP_GET_RSS_HENA_CAPS:
4095 case IAVF_VC_OP_SET_RSS_HENA:
4096 case IAVF_VC_OP_ADD_ETH_ADDR:
4097 case IAVF_VC_OP_DEL_ETH_ADDR:
4098 case IAVF_VC_OP_CONFIG_PROMISC:
4099 case IAVF_VC_OP_ADD_VLAN:
4100 case IAVF_VC_OP_DEL_VLAN:
4101 case IAVF_VC_OP_ENABLE_VLAN_STRIP:
4102 case IAVF_VC_OP_DISABLE_VLAN_STRIP:
4103 case IAVF_VC_OP_CONFIG_RSS_KEY:
4104 case IAVF_VC_OP_CONFIG_RSS_LUT:
4105 sc->sc_arq_retval = iavf_aq_vc_get_retval(iaq);
4106 sc->sc_arq_opcode = vc_opcode;
4107 cv_signal(&sc->sc_adminq_cv);
4108 break;
4109 }
4110
4111 memset(iaq, 0, sizeof(*iaq));
4112 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
4113
4114 cons++;
4115 cons &= IAVF_AQ_MASK;
4116 } while (cons != prod);
4117
4118 sc->sc_arq_cons = cons;
4119 iavf_work_add(sc->sc_workq, &sc->sc_arq_refill);
4120
4121 return 1;
4122 }
4123
4124 static int
4125 iavf_atq_post(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4126 struct ixl_aq_buf *aqb)
4127 {
4128 struct ixl_aq_desc *atq, *slot;
4129 unsigned int prod;
4130
4131 atq = IXL_DMA_KVA(&sc->sc_atq);
4132 prod = sc->sc_atq_prod;
4133 slot = &atq[prod];
4134
4135 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4136 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
4137
4138 *slot = *iaq;
4139 slot->iaq_flags |= htole16(IXL_AQ_SI);
4140 if (aqb != NULL) {
4141 ixl_aq_dva(slot, IXL_AQB_DVA(aqb));
4142 bus_dmamap_sync(sc->sc_dmat, IXL_AQB_MAP(aqb),
4143 0, IXL_AQB_LEN(aqb), BUS_DMASYNC_PREWRITE);
4144 iavf_aqb_put_locked(&sc->sc_atq_live, aqb);
4145 } else {
4146 ixl_aq_dva(slot, (bus_addr_t)0);
4147 }
4148
4149 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4150 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
4151
4152 if (atomic_load_relaxed(&sc->sc_debuglevel) >= 3)
4153 iavf_aq_dump(sc, slot, "post");
4154
4155 prod++;
4156 prod &= IAVF_AQ_MASK;
4157 sc->sc_atq_prod = prod;
4158 iavf_wr(sc, sc->sc_aq_regs->atq_tail, prod);
4159 return prod;
4160 }
4161
4162 static int
4163 iavf_atq_poll(struct iavf_softc *sc, unsigned int tm)
4164 {
4165 struct ixl_aq_desc *atq, *slot;
4166 struct ixl_aq_desc iaq;
4167 struct ixl_aq_buf *aqb;
4168 unsigned int prod;
4169 unsigned int t;
4170 int dbg;
4171
4172 dbg = atomic_load_relaxed(&sc->sc_debuglevel);
4173 atq = IXL_DMA_KVA(&sc->sc_atq);
4174 prod = sc->sc_atq_prod;
4175 slot = &atq[prod];
4176 t = 0;
4177
4178 while (iavf_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
4179 delaymsec(1);
4180
4181 if (t++ > tm) {
4182 if (dbg >= 2) {
4183 device_printf(sc->sc_dev,
4184 "atq timedout\n");
4185 }
4186 return ETIMEDOUT;
4187 }
4188 }
4189
4190 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4191 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
4192 iaq = *slot;
4193 memset(slot, 0, sizeof(*slot));
4194 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4195 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
4196
4197 aqb = iavf_aqb_get_locked(&sc->sc_atq_live);
4198 if (aqb != NULL) {
4199 bus_dmamap_sync(sc->sc_dmat, IXL_AQB_MAP(aqb),
4200 0, IXL_AQB_LEN(aqb), BUS_DMASYNC_POSTWRITE);
4201 /* no need to do iavf_aqb_put(&sc->sc_atq_idle, aqb) */
4202 }
4203
4204 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4205 if (dbg >= 2) {
4206 device_printf(sc->sc_dev,
4207 "atq retcode=0x%04x\n", le16toh(iaq.iaq_retval));
4208 }
4209 return EIO;
4210 }
4211
4212 return 0;
4213 }
4214
4215 static void
4216 iavf_atq_done(struct iavf_softc *sc)
4217 {
4218 struct ixl_aq_desc *atq, *slot;
4219 struct ixl_aq_buf *aqb;
4220 unsigned int cons;
4221 unsigned int prod;
4222
4223 KASSERT(mutex_owned(&sc->sc_adminq_lock));
4224
4225 prod = sc->sc_atq_prod;
4226 cons = sc->sc_atq_cons;
4227
4228 if (prod == cons)
4229 return;
4230
4231 atq = IXL_DMA_KVA(&sc->sc_atq);
4232
4233 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4234 0, IXL_DMA_LEN(&sc->sc_atq),
4235 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
4236
4237 do {
4238 slot = &atq[cons];
4239 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
4240 break;
4241
4242 if (ixl_aq_has_dva(slot) &&
4243 (aqb = iavf_aqb_get_locked(&sc->sc_atq_live)) != NULL) {
4244 bus_dmamap_sync(sc->sc_dmat, IXL_AQB_MAP(aqb),
4245 0, IXL_AQB_LEN(aqb), BUS_DMASYNC_POSTWRITE);
4246 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
4247 }
4248
4249 memset(slot, 0, sizeof(*slot));
4250
4251 cons++;
4252 cons &= IAVF_AQ_MASK;
4253 } while (cons != prod);
4254
4255 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4256 0, IXL_DMA_LEN(&sc->sc_atq),
4257 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4258
4259 sc->sc_atq_cons = cons;
4260 }
4261
4262 static int
4263 iavf_adminq_poll(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4264 struct ixl_aq_buf *aqb, int retry)
4265 {
4266 int error;
4267
4268 mutex_enter(&sc->sc_adminq_lock);
4269 error = iavf_adminq_poll_locked(sc, iaq, aqb, retry);
4270 mutex_exit(&sc->sc_adminq_lock);
4271
4272 return error;
4273 }
4274
4275 static int
4276 iavf_adminq_poll_locked(struct iavf_softc *sc,
4277 struct ixl_aq_desc *iaq, struct ixl_aq_buf *aqb, int retry)
4278 {
4279 uint32_t opcode;
4280 int error;
4281
4282 KASSERT(!sc->sc_attached || mutex_owned(&sc->sc_adminq_lock));
4283
4284 opcode = iavf_aq_vc_get_opcode(iaq);
4285
4286 iavf_atq_post(sc, iaq, aqb);
4287
4288 error = iavf_atq_poll(sc, retry);
4289 if (error)
4290 return error;
4291
4292 error = iavf_arq_poll(sc, opcode, retry);
4293
4294 if (error != 0 &&
4295 atomic_load_relaxed(&sc->sc_debuglevel) >= 1) {
4296 device_printf(sc->sc_dev, "%s failed=%d(polling)\n",
4297 iavf_aq_vc_opcode_str(iaq), error);
4298 }
4299
4300 return error;
4301 }
4302
4303 static int
4304 iavf_adminq_exec(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4305 struct ixl_aq_buf *aqb)
4306 {
4307 int error;
4308 uint32_t opcode;
4309
4310 opcode = iavf_aq_vc_get_opcode(iaq);
4311
4312 mutex_enter(&sc->sc_adminq_lock);
4313 iavf_atq_post(sc, iaq, aqb);
4314
4315 error = iavf_arq_wait(sc, opcode);
4316 if (error == 0) {
4317 error = sc->sc_arq_retval;
4318 if (error != IAVF_VC_RC_SUCCESS &&
4319 atomic_load_relaxed(&sc->sc_debuglevel) >= 1) {
4320 device_printf(sc->sc_dev, "%s failed=%d\n",
4321 iavf_aq_vc_opcode_str(iaq), error);
4322 }
4323 }
4324
4325 mutex_exit(&sc->sc_adminq_lock);
4326 return error;
4327 }
4328
4329 static void
4330 iavf_process_version(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4331 struct ixl_aq_buf *aqb)
4332 {
4333 struct iavf_vc_version_info *ver;
4334
4335 ver = (struct iavf_vc_version_info *)aqb->aqb_data;
4336 sc->sc_major_ver = le32toh(ver->major);
4337 sc->sc_minor_ver = le32toh(ver->minor);
4338 }
4339
4340 static void
4341 iavf_process_vf_resources(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4342 struct ixl_aq_buf *aqb)
4343 {
4344 struct iavf_vc_vf_resource *vf_res;
4345 struct iavf_vc_vsi_resource *vsi_res;
4346 uint8_t *enaddr;
4347 int mtu, dbg;
4348 char buf[512];
4349
4350 dbg = atomic_load_relaxed(&sc->sc_debuglevel);
4351 sc->sc_got_vf_resources = 1;
4352
4353 vf_res = aqb->aqb_data;
4354 sc->sc_max_vectors = le16toh(vf_res->max_vectors);
4355 if (le16toh(vf_res->num_vsis) == 0) {
4356 if (dbg >= 1) {
4357 device_printf(sc->sc_dev, "no vsi available\n");
4358 }
4359 return;
4360 }
4361 sc->sc_vf_cap = le32toh(vf_res->offload_flags);
4362 if (dbg >= 2) {
4363 snprintb(buf, sizeof(buf),
4364 IAVF_VC_OFFLOAD_FMT, sc->sc_vf_cap);
4365 device_printf(sc->sc_dev, "VF cap=%s\n", buf);
4366 }
4367
4368 mtu = le16toh(vf_res->max_mtu);
4369 if (IAVF_MIN_MTU < mtu && mtu < IAVF_MAX_MTU) {
4370 sc->sc_max_mtu = MIN(IAVF_MAX_MTU, mtu);
4371 }
4372
4373 vsi_res = &vf_res->vsi_res[0];
4374 sc->sc_vsi_id = le16toh(vsi_res->vsi_id);
4375 sc->sc_vf_id = le32toh(iaq->iaq_param[0]);
4376 sc->sc_qset_handle = le16toh(vsi_res->qset_handle);
4377 sc->sc_nqps_vsi = le16toh(vsi_res->num_queue_pairs);
4378 if (!iavf_is_etheranyaddr(vsi_res->default_mac)) {
4379 enaddr = vsi_res->default_mac;
4380 } else {
4381 enaddr = sc->sc_enaddr_fake;
4382 }
4383 memcpy(sc->sc_enaddr, enaddr, ETHER_ADDR_LEN);
4384 }
4385
4386 static void
4387 iavf_process_irq_map(struct iavf_softc *sc, struct ixl_aq_desc *iaq)
4388 {
4389 uint32_t retval;
4390
4391 retval = iavf_aq_vc_get_retval(iaq);
4392 if (retval != IAVF_VC_RC_SUCCESS) {
4393 return;
4394 }
4395
4396 sc->sc_got_irq_map = 1;
4397 }
4398
4399 static void
4400 iavf_process_vc_event(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4401 struct ixl_aq_buf *aqb)
4402 {
4403 struct iavf_vc_pf_event *event;
4404 struct ifnet *ifp = &sc->sc_ec.ec_if;
4405 const struct iavf_link_speed *speed;
4406 int link;
4407
4408 event = aqb->aqb_data;
4409 switch (event->event) {
4410 case IAVF_VC_EVENT_LINK_CHANGE:
4411 sc->sc_media_status = IFM_AVALID;
4412 sc->sc_media_active = IFM_ETHER;
4413 link = LINK_STATE_DOWN;
4414 if (event->link_status) {
4415 link = LINK_STATE_UP;
4416 sc->sc_media_status |= IFM_ACTIVE;
4417
4418 ifp->if_baudrate = 0;
4419 speed = iavf_find_link_speed(sc, event->link_speed);
4420 if (speed != NULL) {
4421 sc->sc_media_active |= speed->media;
4422 ifp->if_baudrate = speed->baudrate;
4423 }
4424 }
4425
4426 if (sc->sc_link_state != link) {
4427 sc->sc_link_state = link;
4428 if (sc->sc_attached) {
4429 if_link_state_change(ifp, link);
4430 }
4431 }
4432 break;
4433 case IAVF_VC_EVENT_RESET_IMPENDING:
4434 log(LOG_INFO, "%s: Reset warning received from the PF\n",
4435 ifp->if_xname);
4436 iavf_work_set(&sc->sc_reset_task, iavf_reset_request, sc);
4437 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
4438 break;
4439 }
4440 }
4441
4442 static void
4443 iavf_process_stats(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4444 struct ixl_aq_buf *aqb)
4445 {
4446 struct iavf_stat_counters *isc;
4447 struct i40e_eth_stats *st;
4448
4449 KASSERT(mutex_owned(&sc->sc_adminq_lock));
4450
4451 st = aqb->aqb_data;
4452 isc = &sc->sc_stat_counters;
4453
4454 isc->isc_rx_bytes.ev_count = st->rx_bytes;
4455 isc->isc_rx_unicast.ev_count = st->rx_unicast;
4456 isc->isc_rx_multicast.ev_count = st->rx_multicast;
4457 isc->isc_rx_broadcast.ev_count = st->rx_broadcast;
4458 isc->isc_rx_discards.ev_count = st->rx_discards;
4459 isc->isc_rx_unknown_protocol.ev_count = st->rx_unknown_protocol;
4460
4461 isc->isc_tx_bytes.ev_count = st->tx_bytes;
4462 isc->isc_tx_unicast.ev_count = st->tx_unicast;
4463 isc->isc_tx_multicast.ev_count = st->tx_multicast;
4464 isc->isc_tx_broadcast.ev_count = st->tx_broadcast;
4465 isc->isc_tx_discards.ev_count = st->tx_discards;
4466 isc->isc_tx_errors.ev_count = st->tx_errors;
4467 }
4468
4469 static void
4470 iavf_process_req_queues(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4471 struct ixl_aq_buf *aqb)
4472 {
4473 struct iavf_vc_res_request *req;
4474 struct ifnet *ifp;
4475 uint32_t vc_retval;
4476
4477 ifp = &sc->sc_ec.ec_if;
4478 req = aqb->aqb_data;
4479
4480 vc_retval = iavf_aq_vc_get_retval(iaq);
4481 if (vc_retval != IAVF_VC_RC_SUCCESS) {
4482 return;
4483 }
4484
4485 if (sc->sc_nqps_req < req->num_queue_pairs) {
4486 log(LOG_INFO,
4487 "%s: requested %d queues, but only %d left.\n",
4488 ifp->if_xname,
4489 sc->sc_nqps_req, req->num_queue_pairs);
4490 }
4491
4492 if (sc->sc_nqps_vsi < req->num_queue_pairs) {
4493 if (!sc->sc_req_queues_retried) {
4494 /* req->num_queue_pairs indicates max qps */
4495 sc->sc_nqps_req = req->num_queue_pairs;
4496
4497 sc->sc_req_queues_retried = true;
4498 iavf_work_add(sc->sc_workq, &sc->sc_req_queues_task);
4499 }
4500 }
4501 }
4502
4503 static int
4504 iavf_get_version(struct iavf_softc *sc, struct ixl_aq_buf *aqb)
4505 {
4506 struct ixl_aq_desc iaq;
4507 struct iavf_vc_version_info *ver;
4508 int error;
4509
4510 memset(&iaq, 0, sizeof(iaq));
4511 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4512 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4513 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_VERSION);
4514 iaq.iaq_datalen = htole16(sizeof(struct iavf_vc_version_info));
4515
4516 ver = IXL_AQB_KVA(aqb);
4517 ver->major = htole32(IAVF_VF_MAJOR);
4518 ver->minor = htole32(IAVF_VF_MINOR);
4519
4520 sc->sc_major_ver = UINT_MAX;
4521 sc->sc_minor_ver = UINT_MAX;
4522
4523 if (sc->sc_attached) {
4524 error = iavf_adminq_poll(sc, &iaq, aqb, 250);
4525 } else {
4526 error = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
4527 }
4528
4529 if (error)
4530 return -1;
4531
4532 return 0;
4533 }
4534
4535 static int
4536 iavf_get_vf_resources(struct iavf_softc *sc, struct ixl_aq_buf *aqb)
4537 {
4538 struct ixl_aq_desc iaq;
4539 uint32_t *cap, cap0;
4540 int error;
4541
4542 memset(&iaq, 0, sizeof(iaq));
4543 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4544 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4545 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_GET_VF_RESOURCES);
4546
4547 if (sc->sc_major_ver > 0) {
4548 cap0 = IAVF_VC_OFFLOAD_L2 |
4549 IAVF_VC_OFFLOAD_VLAN |
4550 IAVF_VC_OFFLOAD_RSS_PF |
4551 IAVF_VC_OFFLOAD_REQ_QUEUES;
4552
4553 cap = IXL_AQB_KVA(aqb);
4554 *cap = htole32(cap0);
4555 iaq.iaq_datalen = htole16(sizeof(*cap));
4556 }
4557
4558 sc->sc_got_vf_resources = 0;
4559 if (sc->sc_attached) {
4560 error = iavf_adminq_poll(sc, &iaq, aqb, 250);
4561 } else {
4562 error = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
4563 }
4564
4565 if (error)
4566 return -1;
4567 return 0;
4568 }
4569
4570 static int
4571 iavf_get_stats(struct iavf_softc *sc)
4572 {
4573 struct ixl_aq_desc iaq;
4574 struct ixl_aq_buf *aqb;
4575 struct iavf_vc_queue_select *qsel;
4576 int error;
4577
4578 mutex_enter(&sc->sc_adminq_lock);
4579 aqb = iavf_aqb_get_locked(&sc->sc_atq_idle);
4580 mutex_exit(&sc->sc_adminq_lock);
4581
4582 if (aqb == NULL)
4583 return ENOMEM;
4584
4585 qsel = IXL_AQB_KVA(aqb);
4586 memset(qsel, 0, sizeof(*qsel));
4587 qsel->vsi_id = htole16(sc->sc_vsi_id);
4588
4589 memset(&iaq, 0, sizeof(iaq));
4590
4591 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4592 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4593 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_GET_STATS);
4594 iaq.iaq_datalen = htole16(sizeof(*qsel));
4595
4596 if (atomic_load_relaxed(&sc->sc_debuglevel) >= 3) {
4597 device_printf(sc->sc_dev, "post GET_STATS command\n");
4598 }
4599
4600 mutex_enter(&sc->sc_adminq_lock);
4601 error = iavf_atq_post(sc, &iaq, aqb);
4602 mutex_exit(&sc->sc_adminq_lock);
4603
4604 return error;
4605 }
4606
4607 static int
4608 iavf_config_irq_map(struct iavf_softc *sc, struct ixl_aq_buf *aqb)
4609 {
4610 struct ixl_aq_desc iaq;
4611 struct iavf_vc_vector_map *vec;
4612 struct iavf_vc_irq_map_info *map;
4613 struct iavf_rx_ring *rxr;
4614 struct iavf_tx_ring *txr;
4615 unsigned int num_vec;
4616 int error;
4617
4618 map = IXL_AQB_KVA(aqb);
4619 vec = map->vecmap;
4620 num_vec = 0;
4621
4622 if (sc->sc_nintrs == 1) {
4623 vec[0].vsi_id = htole16(sc->sc_vsi_id);
4624 vec[0].vector_id = htole16(0);
4625 vec[0].rxq_map = htole16(iavf_allqueues(sc));
4626 vec[0].txq_map = htole16(iavf_allqueues(sc));
4627 vec[0].rxitr_idx = htole16(IAVF_NOITR);
4628 vec[0].rxitr_idx = htole16(IAVF_NOITR);
4629 num_vec = 1;
4630 } else if (sc->sc_nintrs > 1) {
4631 KASSERT(sc->sc_nqps_alloc >= (sc->sc_nintrs - 1));
4632 for (; num_vec < (sc->sc_nintrs - 1); num_vec++) {
4633 rxr = sc->sc_qps[num_vec].qp_rxr;
4634 txr = sc->sc_qps[num_vec].qp_txr;
4635
4636 vec[num_vec].vsi_id = htole16(sc->sc_vsi_id);
4637 vec[num_vec].vector_id = htole16(num_vec + 1);
4638 vec[num_vec].rxq_map = htole16(__BIT(rxr->rxr_qid));
4639 vec[num_vec].txq_map = htole16(__BIT(txr->txr_qid));
4640 vec[num_vec].rxitr_idx = htole16(IAVF_ITR_RX);
4641 vec[num_vec].txitr_idx = htole16(IAVF_ITR_TX);
4642 }
4643
4644 vec[num_vec].vsi_id = htole16(sc->sc_vsi_id);
4645 vec[num_vec].vector_id = htole16(0);
4646 vec[num_vec].rxq_map = htole16(0);
4647 vec[num_vec].txq_map = htole16(0);
4648 num_vec++;
4649 }
4650
4651 map->num_vectors = htole16(num_vec);
4652
4653 memset(&iaq, 0, sizeof(iaq));
4654 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4655 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4656 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_IRQ_MAP);
4657 iaq.iaq_datalen = htole16(sizeof(*map) + sizeof(*vec) * num_vec);
4658
4659 if (sc->sc_attached) {
4660 error = iavf_adminq_poll(sc, &iaq, aqb, 250);
4661 } else {
4662 error = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
4663 }
4664
4665 if (error)
4666 return -1;
4667
4668 return 0;
4669 }
4670
4671 static int
4672 iavf_config_vsi_queues(struct iavf_softc *sc)
4673 {
4674 struct ifnet *ifp = &sc->sc_ec.ec_if;
4675 struct ixl_aq_desc iaq;
4676 struct ixl_aq_buf *aqb;
4677 struct iavf_vc_queue_config_info *config;
4678 struct iavf_vc_txq_info *txq;
4679 struct iavf_vc_rxq_info *rxq;
4680 struct iavf_rx_ring *rxr;
4681 struct iavf_tx_ring *txr;
4682 uint32_t rxmtu_max;
4683 unsigned int i;
4684 int error;
4685
4686 rxmtu_max = ifp->if_mtu + IAVF_MTU_ETHERLEN;
4687
4688 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4689
4690 if (aqb == NULL)
4691 return -1;
4692
4693 config = IXL_AQB_KVA(aqb);
4694 memset(config, 0, sizeof(*config));
4695 config->vsi_id = htole16(sc->sc_vsi_id);
4696 config->num_queue_pairs = htole16(sc->sc_nqueue_pairs);
4697
4698 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
4699 rxr = sc->sc_qps[i].qp_rxr;
4700 txr = sc->sc_qps[i].qp_txr;
4701
4702 txq = &config->qpair[i].txq;
4703 txq->vsi_id = htole16(sc->sc_vsi_id);
4704 txq->queue_id = htole16(txr->txr_qid);
4705 txq->ring_len = htole16(sc->sc_tx_ring_ndescs);
4706 txq->headwb_ena = 0;
4707 txq->dma_ring_addr = htole64(IXL_DMA_DVA(&txr->txr_mem));
4708 txq->dma_headwb_addr = 0;
4709
4710 rxq = &config->qpair[i].rxq;
4711 rxq->vsi_id = htole16(sc->sc_vsi_id);
4712 rxq->queue_id = htole16(rxr->rxr_qid);
4713 rxq->ring_len = htole16(sc->sc_rx_ring_ndescs);
4714 rxq->splithdr_ena = 0;
4715 rxq->databuf_size = htole32(IAVF_MCLBYTES);
4716 rxq->max_pkt_size = htole32(rxmtu_max);
4717 rxq->dma_ring_addr = htole64(IXL_DMA_DVA(&rxr->rxr_mem));
4718 rxq->rx_split_pos = 0;
4719 }
4720
4721 memset(&iaq, 0, sizeof(iaq));
4722 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4723 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4724 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_VSI_QUEUES);
4725 iaq.iaq_datalen = htole16(sizeof(*config) +
4726 sizeof(config->qpair[0]) * sc->sc_nqueue_pairs);
4727
4728 error = iavf_adminq_exec(sc, &iaq, aqb);
4729 if (error != IAVF_VC_RC_SUCCESS) {
4730 return -1;
4731 }
4732
4733 return 0;
4734 }
4735
4736 static int
4737 iavf_config_hena(struct iavf_softc *sc)
4738 {
4739 struct ixl_aq_desc iaq;
4740 struct ixl_aq_buf *aqb;
4741 uint64_t *caps;
4742 int error;
4743
4744 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4745
4746 if (aqb == NULL)
4747 return -1;
4748
4749 caps = IXL_AQB_KVA(aqb);
4750 if (sc->sc_mac_type == I40E_MAC_X722_VF)
4751 *caps = IXL_RSS_HENA_DEFAULT_XL710;
4752 else
4753 *caps = IXL_RSS_HENA_DEFAULT_X722;
4754
4755 memset(&iaq, 0, sizeof(iaq));
4756 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4757 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4758 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_SET_RSS_HENA);
4759 iaq.iaq_datalen = htole16(sizeof(*caps));
4760
4761 error = iavf_adminq_exec(sc, &iaq, aqb);
4762 if (error != IAVF_VC_RC_SUCCESS) {
4763 return -1;
4764 }
4765
4766 return 0;
4767 }
4768
4769 static inline void
4770 iavf_get_default_rss_key(uint8_t *buf, size_t len)
4771 {
4772 uint8_t rss_seed[RSS_KEYSIZE];
4773 size_t cplen;
4774
4775 cplen = MIN(len, sizeof(rss_seed));
4776 rss_getkey(rss_seed);
4777
4778 memcpy(buf, rss_seed, cplen);
4779 if (cplen < len)
4780 memset(buf + cplen, 0, len - cplen);
4781 }
4782
4783 static int
4784 iavf_config_rss_key(struct iavf_softc *sc)
4785 {
4786 struct ixl_aq_desc iaq;
4787 struct ixl_aq_buf *aqb;
4788 struct iavf_vc_rss_key *rss_key;
4789 size_t key_len;
4790 int rv;
4791
4792 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4793 if (aqb == NULL)
4794 return -1;
4795
4796 rss_key = IXL_AQB_KVA(aqb);
4797 rss_key->vsi_id = htole16(sc->sc_vsi_id);
4798 key_len = IXL_RSS_KEY_SIZE;
4799 iavf_get_default_rss_key(rss_key->key, key_len);
4800 rss_key->key_len = key_len;
4801
4802 memset(&iaq, 0, sizeof(iaq));
4803 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4804 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4805 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_RSS_KEY);
4806 iaq.iaq_datalen = htole16(sizeof(*rss_key) - sizeof(rss_key->pad)
4807 + (sizeof(rss_key->key[0]) * key_len));
4808
4809 rv = iavf_adminq_exec(sc, &iaq, aqb);
4810 if (rv != IAVF_VC_RC_SUCCESS) {
4811 return -1;
4812 }
4813
4814 return 0;
4815 }
4816
4817 static int
4818 iavf_config_rss_lut(struct iavf_softc *sc)
4819 {
4820 struct ixl_aq_desc iaq;
4821 struct ixl_aq_buf *aqb;
4822 struct iavf_vc_rss_lut *rss_lut;
4823 uint8_t *lut, v;
4824 int rv, i;
4825
4826 mutex_enter(&sc->sc_adminq_lock);
4827 mutex_exit(&sc->sc_adminq_lock);
4828
4829 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4830 if (aqb == NULL)
4831 return -1;
4832
4833 rss_lut = IXL_AQB_KVA(aqb);
4834 rss_lut->vsi_id = htole16(sc->sc_vsi_id);
4835 rss_lut->lut_entries = htole16(IXL_RSS_VSI_LUT_SIZE);
4836
4837 lut = rss_lut->lut;
4838 for (i = 0; i < IXL_RSS_VSI_LUT_SIZE; i++) {
4839 v = i % sc->sc_nqueue_pairs;
4840 v &= IAVF_RSS_VSI_LUT_ENTRY_MASK;
4841 lut[i] = v;
4842 }
4843
4844 memset(&iaq, 0, sizeof(iaq));
4845 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4846 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4847 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_RSS_LUT);
4848 iaq.iaq_datalen = htole16(sizeof(*rss_lut) - sizeof(rss_lut->pad)
4849 + (sizeof(rss_lut->lut[0]) * IXL_RSS_VSI_LUT_SIZE));
4850
4851 rv = iavf_adminq_exec(sc, &iaq, aqb);
4852 if (rv != IAVF_VC_RC_SUCCESS) {
4853 return -1;
4854 }
4855
4856 return 0;
4857 }
4858
4859 static int
4860 iavf_queue_select(struct iavf_softc *sc, int opcode)
4861 {
4862 struct ixl_aq_desc iaq;
4863 struct ixl_aq_buf *aqb;
4864 struct iavf_vc_queue_select *qsel;
4865 int error;
4866
4867 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4868 if (aqb == NULL)
4869 return -1;
4870
4871 qsel = IXL_AQB_KVA(aqb);
4872 qsel->vsi_id = htole16(sc->sc_vsi_id);
4873 qsel->rx_queues = htole32(iavf_allqueues(sc));
4874 qsel->tx_queues = htole32(iavf_allqueues(sc));
4875
4876 memset(&iaq, 0, sizeof(iaq));
4877 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4878 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4879 iavf_aq_vc_set_opcode(&iaq, opcode);
4880 iaq.iaq_datalen = htole16(sizeof(*qsel));
4881
4882 error = iavf_adminq_exec(sc, &iaq, aqb);
4883 if (error != IAVF_VC_RC_SUCCESS) {
4884 return -1;
4885 }
4886
4887 return 0;
4888 }
4889
4890 static int
4891 iavf_request_queues(struct iavf_softc *sc, unsigned int req_num)
4892 {
4893 struct ixl_aq_desc iaq;
4894 struct ixl_aq_buf *aqb;
4895 struct iavf_vc_res_request *req;
4896 int rv;
4897
4898 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4899 if (aqb == NULL)
4900 return ENOMEM;
4901
4902 req = IXL_AQB_KVA(aqb);
4903 req->num_queue_pairs = req_num;
4904
4905 memset(&iaq, 0, sizeof(iaq));
4906 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4907 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4908 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_REQUEST_QUEUES);
4909 iaq.iaq_datalen = htole16(sizeof(*req));
4910
4911 mutex_enter(&sc->sc_adminq_lock);
4912 rv = iavf_atq_post(sc, &iaq, aqb);
4913 mutex_exit(&sc->sc_adminq_lock);
4914
4915 return rv;
4916 }
4917
4918 static int
4919 iavf_reset_vf(struct iavf_softc *sc)
4920 {
4921 struct ixl_aq_desc iaq;
4922 int error;
4923
4924 memset(&iaq, 0, sizeof(iaq));
4925 iaq.iaq_flags = htole16(IXL_AQ_RD);
4926 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4927 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_RESET_VF);
4928 iaq.iaq_datalen = htole16(0);
4929
4930 iavf_wr(sc, I40E_VFGEN_RSTAT, IAVF_VFR_INPROGRESS);
4931
4932 mutex_enter(&sc->sc_adminq_lock);
4933 error = iavf_atq_post(sc, &iaq, NULL);
4934 mutex_exit(&sc->sc_adminq_lock);
4935
4936 return error;
4937 }
4938
4939 static int
4940 iavf_eth_addr(struct iavf_softc *sc, const uint8_t *addr, uint32_t opcode)
4941 {
4942 struct ixl_aq_desc iaq;
4943 struct ixl_aq_buf *aqb;
4944 struct iavf_vc_eth_addr_list *addrs;
4945 struct iavf_vc_eth_addr *vcaddr;
4946 int rv;
4947
4948 KASSERT(sc->sc_attached);
4949 KASSERT(opcode == IAVF_VC_OP_ADD_ETH_ADDR ||
4950 opcode == IAVF_VC_OP_DEL_ETH_ADDR);
4951
4952 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4953 if (aqb == NULL)
4954 return -1;
4955
4956 addrs = IXL_AQB_KVA(aqb);
4957 addrs->vsi_id = htole16(sc->sc_vsi_id);
4958 addrs->num_elements = htole16(1);
4959 vcaddr = addrs->list;
4960 memcpy(vcaddr->addr, addr, ETHER_ADDR_LEN);
4961
4962 memset(&iaq, 0, sizeof(iaq));
4963 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4964 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4965 iavf_aq_vc_set_opcode(&iaq, opcode);
4966 iaq.iaq_datalen = htole16(sizeof(*addrs) + sizeof(*vcaddr));
4967
4968 if (sc->sc_resetting) {
4969 mutex_enter(&sc->sc_adminq_lock);
4970 rv = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
4971 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
4972 mutex_exit(&sc->sc_adminq_lock);
4973 } else {
4974 rv = iavf_adminq_exec(sc, &iaq, aqb);
4975 }
4976
4977 if (rv != IAVF_VC_RC_SUCCESS) {
4978 return -1;
4979 }
4980
4981 return 0;
4982 }
4983
4984 static int
4985 iavf_config_promisc_mode(struct iavf_softc *sc, int unicast, int multicast)
4986 {
4987 struct ixl_aq_desc iaq;
4988 struct ixl_aq_buf *aqb;
4989 struct iavf_vc_promisc_info *promisc;
4990 int flags;
4991
4992 KASSERT(sc->sc_attached);
4993
4994 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4995 if (aqb == NULL)
4996 return -1;
4997
4998 flags = 0;
4999 if (unicast)
5000 flags |= IAVF_FLAG_VF_UNICAST_PROMISC;
5001 if (multicast)
5002 flags |= IAVF_FLAG_VF_MULTICAST_PROMISC;
5003
5004 promisc = IXL_AQB_KVA(aqb);
5005 promisc->vsi_id = htole16(sc->sc_vsi_id);
5006 promisc->flags = htole16(flags);
5007
5008 memset(&iaq, 0, sizeof(iaq));
5009 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
5010 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
5011 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_PROMISC);
5012 iaq.iaq_datalen = htole16(sizeof(*promisc));
5013
5014 if (iavf_adminq_exec(sc, &iaq, aqb) != IAVF_VC_RC_SUCCESS) {
5015 return -1;
5016 }
5017
5018 return 0;
5019 }
5020
5021 static int
5022 iavf_config_vlan_stripping(struct iavf_softc *sc, int eccap)
5023 {
5024 struct ixl_aq_desc iaq;
5025 uint32_t opcode;
5026
5027 opcode = ISSET(eccap, ETHERCAP_VLAN_HWTAGGING) ?
5028 IAVF_VC_OP_ENABLE_VLAN_STRIP : IAVF_VC_OP_DISABLE_VLAN_STRIP;
5029
5030 memset(&iaq, 0, sizeof(iaq));
5031 iaq.iaq_flags = htole16(IXL_AQ_RD);
5032 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
5033 iavf_aq_vc_set_opcode(&iaq, opcode);
5034 iaq.iaq_datalen = htole16(0);
5035
5036 if (iavf_adminq_exec(sc, &iaq, NULL) != IAVF_VC_RC_SUCCESS) {
5037 return -1;
5038 }
5039
5040 return 0;
5041 }
5042
5043 static int
5044 iavf_config_vlan_id(struct iavf_softc *sc, uint16_t vid, uint32_t opcode)
5045 {
5046 struct ixl_aq_desc iaq;
5047 struct ixl_aq_buf *aqb;
5048 struct iavf_vc_vlan_filter *vfilter;
5049 int rv;
5050
5051 KASSERT(opcode == IAVF_VC_OP_ADD_VLAN || opcode == IAVF_VC_OP_DEL_VLAN);
5052
5053 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
5054
5055 if (aqb == NULL)
5056 return -1;
5057
5058 vfilter = IXL_AQB_KVA(aqb);
5059 vfilter->vsi_id = htole16(sc->sc_vsi_id);
5060 vfilter->num_vlan_id = htole16(1);
5061 vfilter->vlan_id[0] = vid;
5062
5063 memset(&iaq, 0, sizeof(iaq));
5064 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
5065 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
5066 iavf_aq_vc_set_opcode(&iaq, opcode);
5067 iaq.iaq_datalen = htole16(sizeof(*vfilter) + sizeof(vid));
5068
5069 if (sc->sc_resetting) {
5070 mutex_enter(&sc->sc_adminq_lock);
5071 rv = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
5072 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
5073 mutex_exit(&sc->sc_adminq_lock);
5074 } else {
5075 rv = iavf_adminq_exec(sc, &iaq, aqb);
5076 }
5077
5078 if (rv != IAVF_VC_RC_SUCCESS) {
5079 return -1;
5080 }
5081
5082 return 0;
5083 }
5084
5085 static void
5086 iavf_post_request_queues(void *xsc)
5087 {
5088 struct iavf_softc *sc;
5089 struct ifnet *ifp;
5090
5091 sc = xsc;
5092 ifp = &sc->sc_ec.ec_if;
5093
5094 if (!ISSET(sc->sc_vf_cap, IAVF_VC_OFFLOAD_REQ_QUEUES)) {
5095 log(LOG_DEBUG, "%s: the VF has no REQ_QUEUES capability\n",
5096 ifp->if_xname);
5097 return;
5098 }
5099
5100 log(LOG_INFO, "%s: try to change the number of queue pairs"
5101 " (vsi %u, %u allocated, request %u)\n",
5102 ifp->if_xname,
5103 sc->sc_nqps_vsi, sc->sc_nqps_alloc, sc->sc_nqps_req);
5104 iavf_request_queues(sc, sc->sc_nqps_req);
5105 }
5106
5107 static bool
5108 iavf_sysctlnode_is_rx(struct sysctlnode *node)
5109 {
5110
5111 if (strstr(node->sysctl_parent->sysctl_name, "rx") != NULL)
5112 return true;
5113
5114 return false;
5115 }
5116
5117 static int
5118 iavf_sysctl_itr_handler(SYSCTLFN_ARGS)
5119 {
5120 struct sysctlnode node = *rnode;
5121 struct iavf_softc *sc = (struct iavf_softc *)node.sysctl_data;
5122 uint32_t newitr, *itrptr;
5123 unsigned int i;
5124 int itr, error;
5125
5126 if (iavf_sysctlnode_is_rx(&node)) {
5127 itrptr = &sc->sc_rx_itr;
5128 itr = IAVF_ITR_RX;
5129 } else {
5130 itrptr = &sc->sc_tx_itr;
5131 itr = IAVF_ITR_TX;
5132 }
5133
5134 newitr = *itrptr;
5135 node.sysctl_data = &newitr;
5136 node.sysctl_size = sizeof(newitr);
5137
5138 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5139 if (error || newp == NULL)
5140 return error;
5141
5142 if (newitr > 0x07FF)
5143 return EINVAL;
5144
5145 *itrptr = newitr;
5146
5147 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
5148 iavf_wr(sc, I40E_VFINT_ITRN1(itr, i), *itrptr);
5149 }
5150 iavf_wr(sc, I40E_VFINT_ITR01(itr), *itrptr);
5151
5152 return 0;
5153 }
5154
5155 static void
5156 iavf_workq_work(struct work *wk, void *context)
5157 {
5158 struct iavf_work *work;
5159
5160 work = container_of(wk, struct iavf_work, ixw_cookie);
5161
5162 atomic_swap_uint(&work->ixw_added, 0);
5163 work->ixw_func(work->ixw_arg);
5164 }
5165
5166 static struct workqueue *
5167 iavf_workq_create(const char *name, pri_t prio, int ipl, int flags)
5168 {
5169 struct workqueue *wq;
5170 int error;
5171
5172 error = workqueue_create(&wq, name, iavf_workq_work, NULL,
5173 prio, ipl, flags);
5174
5175 if (error)
5176 return NULL;
5177
5178 return wq;
5179 }
5180
5181 static void
5182 iavf_workq_destroy(struct workqueue *wq)
5183 {
5184
5185 workqueue_destroy(wq);
5186 }
5187
5188 static int
5189 iavf_work_set(struct iavf_work *work, void (*func)(void *), void *arg)
5190 {
5191
5192 if (work->ixw_added != 0)
5193 return -1;
5194
5195 memset(work, 0, sizeof(*work));
5196 work->ixw_func = func;
5197 work->ixw_arg = arg;
5198
5199 return 0;
5200 }
5201
5202 static void
5203 iavf_work_add(struct workqueue *wq, struct iavf_work *work)
5204 {
5205 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0)
5206 return;
5207
5208 kpreempt_disable();
5209 workqueue_enqueue(wq, &work->ixw_cookie, NULL);
5210 kpreempt_enable();
5211 }
5212
5213 static void
5214 iavf_work_wait(struct workqueue *wq, struct iavf_work *work)
5215 {
5216
5217 workqueue_wait(wq, &work->ixw_cookie);
5218 }
5219
5220 static void
5221 iavf_evcnt_attach(struct evcnt *ec,
5222 const char *n0, const char *n1)
5223 {
5224
5225 evcnt_attach_dynamic(ec, EVCNT_TYPE_MISC,
5226 NULL, n0, n1);
5227 }
5228
5229 MODULE(MODULE_CLASS_DRIVER, if_iavf, "pci");
5230
5231 #ifdef _MODULE
5232 #include "ioconf.c"
5233 #endif
5234
5235 #ifdef _MODULE
5236 static void
5237 iavf_parse_modprop(prop_dictionary_t dict)
5238 {
5239 prop_object_t obj;
5240 int64_t val;
5241 uint32_t n;
5242
5243 if (dict == NULL)
5244 return;
5245
5246 obj = prop_dictionary_get(dict, "debug_level");
5247 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5248 val = prop_number_signed_value((prop_number_t)obj);
5249
5250 if (val > 0) {
5251 iavf_params.debug = val;
5252 printf("iavf: debug level=%d\n", iavf_params.debug);
5253 }
5254 }
5255
5256 obj = prop_dictionary_get(dict, "max_qps");
5257 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5258 val = prop_number_signed_value((prop_number_t)obj);
5259
5260 if (val < 1 || val > I40E_MAX_VF_QUEUES) {
5261 printf("iavf: invalid queue size(1 <= n <= %d)",
5262 I40E_MAX_VF_QUEUES);
5263 } else {
5264 iavf_params.max_qps = val;
5265 printf("iavf: request queue pair = %u\n",
5266 iavf_params.max_qps);
5267 }
5268 }
5269
5270 obj = prop_dictionary_get(dict, "tx_itr");
5271 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5272 val = prop_number_signed_value((prop_number_t)obj);
5273 if (val > 0x07FF) {
5274 printf("iavf: TX ITR too big (%" PRId64 " <= %d)",
5275 val, 0x7FF);
5276 } else {
5277 iavf_params.tx_itr = val;
5278 printf("iavf: TX ITR = 0x%" PRIx32,
5279 iavf_params.tx_itr);
5280 }
5281 }
5282
5283 obj = prop_dictionary_get(dict, "rx_itr");
5284 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5285 val = prop_number_signed_value((prop_number_t)obj);
5286 if (val > 0x07FF) {
5287 printf("iavf: RX ITR too big (%" PRId64 " <= %d)",
5288 val, 0x7FF);
5289 } else {
5290 iavf_params.rx_itr = val;
5291 printf("iavf: RX ITR = 0x%" PRIx32,
5292 iavf_params.rx_itr);
5293 }
5294 }
5295
5296 obj = prop_dictionary_get(dict, "tx_ndescs");
5297 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5298 val = prop_number_signed_value((prop_number_t)obj);
5299 n = 1U << (fls32(val) - 1);
5300 if (val != (int64_t) n) {
5301 printf("iavf: TX desc invlaid size"
5302 "(%" PRId64 " != %" PRIu32 ")\n", val, n);
5303 } else if (val > (8192 - 32)) {
5304 printf("iavf: Tx desc too big (%" PRId64 " > %d)",
5305 val, (8192 - 32));
5306 } else {
5307 iavf_params.tx_ndescs = val;
5308 printf("iavf: TX descriptors = 0x%04x",
5309 iavf_params.tx_ndescs);
5310 }
5311 }
5312
5313 obj = prop_dictionary_get(dict, "rx_ndescs");
5314 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5315 val = prop_number_signed_value((prop_number_t)obj);
5316 n = 1U << (fls32(val) - 1);
5317 if (val != (int64_t) n) {
5318 printf("iavf: RX desc invlaid size"
5319 "(%" PRId64 " != %" PRIu32 ")\n", val, n);
5320 } else if (val > (8192 - 32)) {
5321 printf("iavf: Rx desc too big (%" PRId64 " > %d)",
5322 val, (8192 - 32));
5323 } else {
5324 iavf_params.rx_ndescs = val;
5325 printf("iavf: RX descriptors = 0x%04x",
5326 iavf_params.rx_ndescs);
5327 }
5328 }
5329 }
5330 #endif
5331
5332 static int
5333 if_iavf_modcmd(modcmd_t cmd, void *opaque)
5334 {
5335 int error = 0;
5336
5337 #ifdef _MODULE
5338 switch (cmd) {
5339 case MODULE_CMD_INIT:
5340 iavf_parse_modprop((prop_dictionary_t)opaque);
5341 error = config_init_component(cfdriver_ioconf_if_iavf,
5342 cfattach_ioconf_if_iavf, cfdata_ioconf_if_iavf);
5343 break;
5344 case MODULE_CMD_FINI:
5345 error = config_fini_component(cfdriver_ioconf_if_iavf,
5346 cfattach_ioconf_if_iavf, cfdata_ioconf_if_iavf);
5347 break;
5348 default:
5349 error = ENOTTY;
5350 break;
5351 }
5352 #endif
5353
5354 return error;
5355 }
5356