if_iavf.c revision 1.5 1 /* $NetBSD: if_iavf.c,v 1.5 2020/09/10 03:20:08 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2016,2017 David Gwynne <dlg (at) openbsd.org>
36 * Copyright (c) 2019 Jonathan Matthew <jmatthew (at) openbsd.org>
37 *
38 * Permission to use, copy, modify, and distribute this software for any
39 * purpose with or without fee is hereby granted, provided that the above
40 * copyright notice and this permission notice appear in all copies.
41 *
42 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
43 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
44 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
45 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
46 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
47 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
48 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 */
50
51 /*
52 * Copyright (c) 2020 Internet Initiative Japan, Inc.
53 * All rights reserved.
54 *
55 * Redistribution and use in source and binary forms, with or without
56 * modification, are permitted provided that the following conditions
57 * are met:
58 * 1. Redistributions of source code must retain the above copyright
59 * notice, this list of conditions and the following disclaimer.
60 * 2. Redistributions in binary form must reproduce the above copyright
61 * notice, this list of conditions and the following disclaimer in the
62 * documentation and/or other materials provided with the distribution.
63 *
64 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
65 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
66 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
67 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
68 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
69 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
70 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
71 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
72 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
73 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
74 * POSSIBILITY OF SUCH DAMAGE.
75 */
76
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: if_iavf.c,v 1.5 2020/09/10 03:20:08 yamaguchi Exp $");
79
80 #include <sys/param.h>
81 #include <sys/types.h>
82
83 #include <sys/bitops.h>
84 #include <sys/bus.h>
85 #include <sys/cprng.h>
86 #include <sys/cpu.h>
87 #include <sys/device.h>
88 #include <sys/evcnt.h>
89 #include <sys/interrupt.h>
90 #include <sys/kmem.h>
91 #include <sys/module.h>
92 #include <sys/mutex.h>
93 #include <sys/pcq.h>
94 #include <sys/queue.h>
95 #include <sys/syslog.h>
96 #include <sys/workqueue.h>
97
98 #include <net/bpf.h>
99 #include <net/if.h>
100 #include <net/if_dl.h>
101 #include <net/if_media.h>
102 #include <net/if_ether.h>
103 #include <net/rss_config.h>
104
105 #include <netinet/tcp.h> /* for struct tcphdr */
106 #include <netinet/udp.h> /* for struct udphdr */
107
108 #include <dev/pci/pcivar.h>
109 #include <dev/pci/pcidevs.h>
110
111 #include <dev/pci/if_ixlreg.h>
112 #include <dev/pci/if_ixlvar.h>
113 #include <dev/pci/if_iavfvar.h>
114
115 #include <prop/proplib.h>
116
117 #define IAVF_PCIREG PCI_MAPREG_START
118 #define IAVF_AQ_NUM 256
119 #define IAVF_AQ_MASK (IAVF_AQ_NUM-1)
120 #define IAVF_AQ_ALIGN 64
121 #define IAVF_AQ_BUFLEN 4096
122 #define I40E_AQ_LARGE_BUF 512
123 #define IAVF_VF_MAJOR 1
124 #define IAVF_VF_MINOR 1
125
126 #define IAVF_VFR_INPROGRESS 0
127 #define IAVF_VFR_COMPLETED 1
128 #define IAVF_VFR_VFACTIVE 2
129
130 #define IAVF_REG_VFR 0xdeadbeef
131
132 #define IAVF_ITR_RX 0x0
133 #define IAVF_ITR_TX 0x1
134 #define IAVF_ITR_MISC 0x2
135 #define IAVF_NOITR 0x3
136
137 #define IAVF_MTU_ETHERLEN (ETHER_HDR_LEN \
138 + ETHER_CRC_LEN)
139 #define IAVF_MAX_MTU (9600 - IAVF_MTU_ETHERLEN)
140 #define IAVF_MIN_MTU (ETHER_MIN_LEN - ETHER_CRC_LEN)
141
142 #define IAVF_WORKQUEUE_PRI PRI_SOFTNET
143
144 #define IAVF_TX_PKT_DESCS 8
145 #define IAVF_TX_QUEUE_ALIGN 128
146 #define IAVF_RX_QUEUE_ALIGN 128
147 #define IAVF_TX_PKT_MAXSIZE (MCLBYTES * IAVF_TX_PKT_DESCS)
148 #define IAVF_MCLBYTES (MCLBYTES - ETHER_ALIGN)
149
150 #define IAVF_TICK_INTERVAL (5 * hz)
151 #define IAVF_WATCHDOG_TICKS 3
152 #define IAVF_WATCHDOG_STOP 0
153
154 #define IAVF_TXRX_PROCESS_UNLIMIT UINT_MAX
155 #define IAVF_TX_PROCESS_LIMIT 256
156 #define IAVF_RX_PROCESS_LIMIT 256
157 #define IAVF_TX_INTR_PROCESS_LIMIT 256
158 #define IAVF_RX_INTR_PROCESS_LIMIT 0U
159
160 #define IAVF_EXEC_TIMEOUT 3000
161
162 #define IAVF_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx | \
163 IFCAP_CSUM_TCPv4_Rx | \
164 IFCAP_CSUM_UDPv4_Rx | \
165 IFCAP_CSUM_TCPv6_Rx | \
166 IFCAP_CSUM_UDPv6_Rx)
167 #define IAVF_IFCAP_TXCSUM (IFCAP_CSUM_IPv4_Tx | \
168 IFCAP_CSUM_TCPv4_Tx | \
169 IFCAP_CSUM_UDPv4_Tx | \
170 IFCAP_CSUM_TCPv6_Tx | \
171 IFCAP_CSUM_UDPv6_Tx)
172 #define IAVF_CSUM_ALL_OFFLOAD (M_CSUM_IPv4 | \
173 M_CSUM_TCPv4 | M_CSUM_TCPv6 | \
174 M_CSUM_UDPv4 | M_CSUM_UDPv6)
175
176 struct iavf_softc; /* defined */
177
178 struct iavf_module_params {
179 int debug;
180 uint32_t rx_itr;
181 uint32_t tx_itr;
182 unsigned int rx_ndescs;
183 unsigned int tx_ndescs;
184 int max_qps;
185 };
186
187 struct iavf_product {
188 unsigned int vendor_id;
189 unsigned int product_id;
190 };
191
192 struct iavf_link_speed {
193 uint64_t baudrate;
194 uint64_t media;
195 };
196
197 struct iavf_aq_regs {
198 bus_size_t atq_tail;
199 bus_size_t atq_head;
200 bus_size_t atq_len;
201 bus_size_t atq_bal;
202 bus_size_t atq_bah;
203
204 bus_size_t arq_tail;
205 bus_size_t arq_head;
206 bus_size_t arq_len;
207 bus_size_t arq_bal;
208 bus_size_t arq_bah;
209
210 uint32_t atq_len_enable;
211 uint32_t atq_tail_mask;
212 uint32_t atq_head_mask;
213
214 uint32_t arq_len_enable;
215 uint32_t arq_tail_mask;
216 uint32_t arq_head_mask;
217 };
218
219 struct iavf_work {
220 struct work ixw_cookie;
221 void (*ixw_func)(void *);
222 void *ixw_arg;
223 unsigned int ixw_added;
224 };
225
226 struct iavf_tx_map {
227 struct mbuf *txm_m;
228 bus_dmamap_t txm_map;
229 unsigned int txm_eop;
230 };
231
232 struct iavf_tx_ring {
233 unsigned int txr_qid;
234 char txr_name[16];
235
236 struct iavf_softc *txr_sc;
237 kmutex_t txr_lock;
238 pcq_t *txr_intrq;
239 void *txr_si;
240 unsigned int txr_prod;
241 unsigned int txr_cons;
242
243 struct iavf_tx_map *txr_maps;
244 struct ixl_dmamem txr_mem;
245 bus_size_t txr_tail;
246
247 int txr_watchdog;
248
249 struct evcnt txr_defragged;
250 struct evcnt txr_defrag_failed;
251 struct evcnt txr_pcqdrop;
252 struct evcnt txr_transmitdef;
253 struct evcnt txr_defer;
254 struct evcnt txr_watchdogto;
255 struct evcnt txr_intr;
256 };
257
258 struct iavf_rx_map {
259 struct mbuf *rxm_m;
260 bus_dmamap_t rxm_map;
261 };
262
263 struct iavf_rx_ring {
264 unsigned int rxr_qid;
265 char rxr_name[16];
266
267 struct iavf_softc *rxr_sc;
268 kmutex_t rxr_lock;
269
270 unsigned int rxr_prod;
271 unsigned int rxr_cons;
272
273 struct iavf_rx_map *rxr_maps;
274 struct ixl_dmamem rxr_mem;
275 bus_size_t rxr_tail;
276
277 struct mbuf *rxr_m_head;
278 struct mbuf **rxr_m_tail;
279
280 struct evcnt rxr_mgethdr_failed;
281 struct evcnt rxr_mgetcl_failed;
282 struct evcnt rxr_mbuf_load_failed;
283 struct evcnt rxr_defer;
284 struct evcnt rxr_intr;
285 };
286
287 struct iavf_queue_pair {
288 struct iavf_tx_ring *qp_txr;
289 struct iavf_rx_ring *qp_rxr;
290 struct work qp_work;
291 void *qp_si;
292 bool qp_workqueue;
293 };
294
295 struct iavf_stat_counters {
296 struct evcnt isc_rx_bytes;
297 struct evcnt isc_rx_unicast;
298 struct evcnt isc_rx_multicast;
299 struct evcnt isc_rx_broadcast;
300 struct evcnt isc_rx_discards;
301 struct evcnt isc_rx_unknown_protocol;
302 struct evcnt isc_tx_bytes;
303 struct evcnt isc_tx_unicast;
304 struct evcnt isc_tx_multicast;
305 struct evcnt isc_tx_broadcast;
306 struct evcnt isc_tx_discards;
307 struct evcnt isc_tx_errors;
308 };
309
310 /*
311 * Locking notes:
312 * + A field in iavf_tx_ring is protected by txr_lock (a spin mutex), and
313 * A field in iavf_rx_ring is protected by rxr_lock (a spin mutex).
314 * - more than one lock must not be held at once.
315 * + fields named sc_atq_*, sc_arq_*, and sc_adminq_* are protected by
316 * sc_adminq_lock(a spin mutex).
317 * - The lock is held while accessing sc_aq_regs
318 * and is not held with txr_lock and rxr_lock together.
319 * + Other fields in iavf_softc is protected by sc_cfg_lock
320 * (an adaptive mutex).
321 * - The lock must be held before acquiring another lock.
322 *
323 * Locking order:
324 * - IFNET_LOCK => sc_cfg_lock => sc_adminq_lock
325 * - sc_cfg_lock => ETHER_LOCK => sc_adminq_lock
326 * - sc_cfg_lock => txr_lock
327 * - sc_cfg_lock => rxr_lock
328 */
329
330 struct iavf_softc {
331 device_t sc_dev;
332 enum i40e_mac_type sc_mac_type;
333 int sc_debuglevel;
334 bool sc_attached;
335 bool sc_dead;
336 kmutex_t sc_cfg_lock;
337 callout_t sc_tick;
338 struct ifmedia sc_media;
339 uint64_t sc_media_status;
340 uint64_t sc_media_active;
341 int sc_link_state;
342
343 const struct iavf_aq_regs *
344 sc_aq_regs;
345
346 struct ethercom sc_ec;
347 uint8_t sc_enaddr[ETHER_ADDR_LEN];
348 uint8_t sc_enaddr_fake[ETHER_ADDR_LEN];
349 uint8_t sc_enaddr_added[ETHER_ADDR_LEN];
350 uint8_t sc_enaddr_reset[ETHER_ADDR_LEN];
351 struct if_percpuq *sc_ipq;
352
353 struct pci_attach_args sc_pa;
354 bus_dma_tag_t sc_dmat;
355 bus_space_tag_t sc_memt;
356 bus_space_handle_t sc_memh;
357 bus_size_t sc_mems;
358 pci_intr_handle_t *sc_ihp;
359 void **sc_ihs;
360 unsigned int sc_nintrs;
361
362 uint32_t sc_major_ver;
363 uint32_t sc_minor_ver;
364 uint32_t sc_vf_id;
365 uint32_t sc_vf_cap;
366 uint16_t sc_vsi_id;
367 uint16_t sc_qset_handle;
368 uint16_t sc_max_mtu;
369 bool sc_got_vf_resources;
370 bool sc_got_irq_map;
371 unsigned int sc_max_vectors;
372
373 kmutex_t sc_adminq_lock;
374 kcondvar_t sc_adminq_cv;
375 struct ixl_dmamem sc_atq;
376 unsigned int sc_atq_prod;
377 unsigned int sc_atq_cons;
378 struct ixl_aq_bufs sc_atq_idle;
379 struct ixl_aq_bufs sc_atq_live;
380 struct ixl_dmamem sc_arq;
381 struct ixl_aq_bufs sc_arq_idle;
382 struct ixl_aq_bufs sc_arq_live;
383 unsigned int sc_arq_prod;
384 unsigned int sc_arq_cons;
385 struct iavf_work sc_arq_refill;
386 uint32_t sc_arq_opcode;
387 uint32_t sc_arq_retval;
388
389 uint32_t sc_tx_itr;
390 uint32_t sc_rx_itr;
391 unsigned int sc_tx_ring_ndescs;
392 unsigned int sc_rx_ring_ndescs;
393 unsigned int sc_nqueue_pairs;
394 unsigned int sc_nqps_alloc;
395 unsigned int sc_nqps_vsi;
396 unsigned int sc_nqps_req;
397 struct iavf_queue_pair *sc_qps;
398 bool sc_txrx_workqueue;
399 u_int sc_tx_intr_process_limit;
400 u_int sc_tx_process_limit;
401 u_int sc_rx_intr_process_limit;
402 u_int sc_rx_process_limit;
403
404 struct workqueue *sc_workq;
405 struct workqueue *sc_workq_txrx;
406 struct iavf_work sc_reset_task;
407 struct iavf_work sc_wdto_task;
408 struct iavf_work sc_req_queues_task;
409 bool sc_req_queues_retried;
410 bool sc_resetting;
411 bool sc_reset_up;
412
413 struct sysctllog *sc_sysctllog;
414 struct iavf_stat_counters
415 sc_stat_counters;
416 };
417
418 #define IAVF_LOG(_sc, _lvl, _fmt, _args...) \
419 do { \
420 if (!(_sc)->sc_attached) { \
421 switch (_lvl) { \
422 case LOG_ERR: \
423 case LOG_WARNING: \
424 aprint_error_dev((_sc)->sc_dev, _fmt, ##_args); \
425 break; \
426 case LOG_INFO: \
427 aprint_normal_dev((_sc)->sc_dev,_fmt, ##_args); \
428 break; \
429 case LOG_DEBUG: \
430 default: \
431 aprint_debug_dev((_sc)->sc_dev, _fmt, ##_args); \
432 } \
433 } else { \
434 struct ifnet *_ifp = &(_sc)->sc_ec.ec_if; \
435 log((_lvl), "%s: " _fmt, _ifp->if_xname, ##_args); \
436 } \
437 } while (0)
438
439 static int iavf_dmamem_alloc(bus_dma_tag_t, struct ixl_dmamem *,
440 bus_size_t, bus_size_t);
441 static void iavf_dmamem_free(bus_dma_tag_t, struct ixl_dmamem *);
442 static struct ixl_aq_buf *
443 iavf_aqb_get(struct iavf_softc *, struct ixl_aq_bufs *);
444 static struct ixl_aq_buf *
445 iavf_aqb_get_locked(struct ixl_aq_bufs *);
446 static void iavf_aqb_put_locked(struct ixl_aq_bufs *, struct ixl_aq_buf *);
447 static void iavf_aqb_clean(struct ixl_aq_bufs *, bus_dma_tag_t);
448
449 static const struct iavf_product *
450 iavf_lookup(const struct pci_attach_args *);
451 static enum i40e_mac_type
452 iavf_mactype(pci_product_id_t);
453 static void iavf_pci_csr_setup(pci_chipset_tag_t, pcitag_t);
454 static int iavf_wait_active(struct iavf_softc *);
455 static bool iavf_is_etheranyaddr(const uint8_t *);
456 static void iavf_prepare_fakeaddr(struct iavf_softc *);
457 static int iavf_replace_lla(struct ifnet *,
458 const uint8_t *, const uint8_t *);
459 static void iavf_evcnt_attach(struct evcnt *,
460 const char *, const char *);
461 static int iavf_setup_interrupts(struct iavf_softc *);
462 static void iavf_teardown_interrupts(struct iavf_softc *);
463 static int iavf_setup_sysctls(struct iavf_softc *);
464 static void iavf_teardown_sysctls(struct iavf_softc *);
465 static int iavf_setup_stats(struct iavf_softc *);
466 static void iavf_teardown_stats(struct iavf_softc *);
467 static struct workqueue *
468 iavf_workq_create(const char *, pri_t, int, int);
469 static void iavf_workq_destroy(struct workqueue *);
470 static int iavf_work_set(struct iavf_work *, void (*)(void *), void *);
471 static void iavf_work_add(struct workqueue *, struct iavf_work *);
472 static void iavf_work_wait(struct workqueue *, struct iavf_work *);
473 static unsigned int
474 iavf_calc_msix_count(struct iavf_softc *);
475 static unsigned int
476 iavf_calc_queue_pair_size(struct iavf_softc *);
477 static int iavf_queue_pairs_alloc(struct iavf_softc *);
478 static void iavf_queue_pairs_free(struct iavf_softc *);
479 static int iavf_arq_fill(struct iavf_softc *);
480 static void iavf_arq_refill(void *);
481 static int iavf_arq_poll(struct iavf_softc *, uint32_t, int);
482 static void iavf_atq_done(struct iavf_softc *);
483 static int iavf_init_admin_queue(struct iavf_softc *);
484 static void iavf_cleanup_admin_queue(struct iavf_softc *);
485 static int iavf_arq(struct iavf_softc *);
486 static int iavf_adminq_exec(struct iavf_softc *,
487 struct ixl_aq_desc *, struct ixl_aq_buf *);
488 static int iavf_adminq_poll(struct iavf_softc *,
489 struct ixl_aq_desc *, struct ixl_aq_buf *, int);
490 static int iavf_adminq_poll_locked(struct iavf_softc *,
491 struct ixl_aq_desc *, struct ixl_aq_buf *, int);
492 static int iavf_add_multi(struct iavf_softc *, uint8_t *, uint8_t *);
493 static int iavf_del_multi(struct iavf_softc *, uint8_t *, uint8_t *);
494 static void iavf_del_all_multi(struct iavf_softc *);
495
496 static int iavf_get_version(struct iavf_softc *, struct ixl_aq_buf *);
497 static int iavf_get_vf_resources(struct iavf_softc *, struct ixl_aq_buf *);
498 static int iavf_get_stats(struct iavf_softc *);
499 static int iavf_config_irq_map(struct iavf_softc *, struct ixl_aq_buf *);
500 static int iavf_config_vsi_queues(struct iavf_softc *);
501 static int iavf_config_hena(struct iavf_softc *);
502 static int iavf_config_rss_key(struct iavf_softc *);
503 static int iavf_config_rss_lut(struct iavf_softc *);
504 static int iavf_config_promisc_mode(struct iavf_softc *, int, int);
505 static int iavf_config_vlan_stripping(struct iavf_softc *, int);
506 static int iavf_config_vlan_id(struct iavf_softc *, uint16_t, uint32_t);
507 static int iavf_queue_select(struct iavf_softc *, int);
508 static int iavf_request_queues(struct iavf_softc *, unsigned int);
509 static int iavf_reset_vf(struct iavf_softc *);
510 static int iavf_eth_addr(struct iavf_softc *, const uint8_t *, uint32_t);
511 static void iavf_process_version(struct iavf_softc *,
512 struct ixl_aq_desc *, struct ixl_aq_buf *);
513 static void iavf_process_vf_resources(struct iavf_softc *,
514 struct ixl_aq_desc *, struct ixl_aq_buf *);
515 static void iavf_process_irq_map(struct iavf_softc *,
516 struct ixl_aq_desc *);
517 static void iavf_process_vc_event(struct iavf_softc *,
518 struct ixl_aq_desc *, struct ixl_aq_buf *);
519 static void iavf_process_stats(struct iavf_softc *,
520 struct ixl_aq_desc *, struct ixl_aq_buf *);
521 static void iavf_process_req_queues(struct iavf_softc *,
522 struct ixl_aq_desc *, struct ixl_aq_buf *);
523
524 static int iavf_intr(void *);
525 static int iavf_queue_intr(void *);
526 static void iavf_tick(void *);
527 static void iavf_tick_halt(void *);
528 static void iavf_reset_request(void *);
529 static void iavf_reset_start(void *);
530 static void iavf_reset(void *);
531 static void iavf_reset_finish(struct iavf_softc *);
532 static int iavf_init(struct ifnet *);
533 static int iavf_init_locked(struct iavf_softc *);
534 static void iavf_stop(struct ifnet *, int);
535 static void iavf_stop_locked(struct iavf_softc *);
536 static int iavf_ioctl(struct ifnet *, u_long, void *);
537 static void iavf_start(struct ifnet *);
538 static int iavf_transmit(struct ifnet *, struct mbuf*);
539 static int iavf_watchdog(struct iavf_tx_ring *);
540 static void iavf_watchdog_timeout(void *);
541 static int iavf_media_change(struct ifnet *);
542 static void iavf_media_status(struct ifnet *, struct ifmediareq *);
543 static int iavf_ifflags_cb(struct ethercom *);
544 static int iavf_vlan_cb(struct ethercom *, uint16_t, bool);
545 static void iavf_deferred_transmit(void *);
546 static void iavf_handle_queue(void *);
547 static void iavf_handle_queue_wk(struct work *, void *);
548 static int iavf_reinit(struct iavf_softc *);
549 static int iavf_rxfill(struct iavf_softc *, struct iavf_rx_ring *);
550 static void iavf_txr_clean(struct iavf_softc *, struct iavf_tx_ring *);
551 static void iavf_rxr_clean(struct iavf_softc *, struct iavf_rx_ring *);
552 static int iavf_txeof(struct iavf_softc *, struct iavf_tx_ring *,
553 u_int, struct evcnt *);
554 static int iavf_rxeof(struct iavf_softc *, struct iavf_rx_ring *,
555 u_int, struct evcnt *);
556 static int iavf_iff(struct iavf_softc *);
557 static int iavf_iff_locked(struct iavf_softc *);
558 static void iavf_post_request_queues(void *);
559 static int iavf_sysctl_itr_handler(SYSCTLFN_PROTO);
560
561 static int iavf_match(device_t, cfdata_t, void *);
562 static void iavf_attach(device_t, device_t, void*);
563 static int iavf_detach(device_t, int);
564 static int iavf_finalize_teardown(device_t);
565
566 CFATTACH_DECL3_NEW(iavf, sizeof(struct iavf_softc),
567 iavf_match, iavf_attach, iavf_detach, NULL, NULL, NULL,
568 DVF_DETACH_SHUTDOWN);
569
570 static const struct iavf_product iavf_products[] = {
571 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_VF },
572 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_VF_HV },
573 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_VF },
574 /* required last entry */
575 {0, 0}
576 };
577
578 static const struct iavf_link_speed iavf_link_speeds[] = {
579 { 0, 0 },
580 { IF_Mbps(100), IFM_100_TX },
581 { IF_Mbps(1000), IFM_1000_T },
582 { IF_Gbps(10), IFM_10G_T },
583 { IF_Gbps(40), IFM_40G_CR4 },
584 { IF_Gbps(20), IFM_20G_KR2 },
585 { IF_Gbps(25), IFM_25G_CR }
586 };
587
588 static const struct iavf_aq_regs iavf_aq_regs = {
589 .atq_tail = I40E_VF_ATQT1,
590 .atq_tail_mask = I40E_VF_ATQT1_ATQT_MASK,
591 .atq_head = I40E_VF_ATQH1,
592 .atq_head_mask = I40E_VF_ARQH1_ARQH_MASK,
593 .atq_len = I40E_VF_ATQLEN1,
594 .atq_bal = I40E_VF_ATQBAL1,
595 .atq_bah = I40E_VF_ATQBAH1,
596 .atq_len_enable = I40E_VF_ATQLEN1_ATQENABLE_MASK,
597
598 .arq_tail = I40E_VF_ARQT1,
599 .arq_tail_mask = I40E_VF_ARQT1_ARQT_MASK,
600 .arq_head = I40E_VF_ARQH1,
601 .arq_head_mask = I40E_VF_ARQH1_ARQH_MASK,
602 .arq_len = I40E_VF_ARQLEN1,
603 .arq_bal = I40E_VF_ARQBAL1,
604 .arq_bah = I40E_VF_ARQBAH1,
605 .arq_len_enable = I40E_VF_ARQLEN1_ARQENABLE_MASK,
606 };
607
608 static struct iavf_module_params iavf_params = {
609 .debug = 0,
610 .rx_itr = 0x07a, /* 4K intrs/sec */
611 .tx_itr = 0x07a, /* 4K intrs/sec */
612 .tx_ndescs = 512,
613 .rx_ndescs = 256,
614 .max_qps = INT_MAX,
615 };
616
617 #define delaymsec(_x) DELAY(1000 * (_x))
618 #define iavf_rd(_s, _r) \
619 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
620 #define iavf_wr(_s, _r, _v) \
621 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
622 #define iavf_barrier(_s, _r, _l, _o) \
623 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
624 #define iavf_flush(_s) (void)iavf_rd((_s), I40E_VFGEN_RSTAT)
625 #define iavf_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1))
626 #define iavf_allqueues(_sc) ((1 << ((_sc)->sc_nqueue_pairs)) - 1)
627
628 static inline void
629 iavf_intr_enable(struct iavf_softc *sc)
630 {
631
632 iavf_wr(sc, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL0_INTENA_MASK |
633 I40E_VFINT_DYN_CTL0_CLEARPBA_MASK |
634 (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT));
635 iavf_wr(sc, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
636 iavf_flush(sc);
637 }
638
639 static inline void
640 iavf_intr_disable(struct iavf_softc *sc)
641 {
642
643 iavf_wr(sc, I40E_VFINT_DYN_CTL01,
644 (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT));
645 iavf_wr(sc, I40E_VFINT_ICR0_ENA1, 0);
646 iavf_flush(sc);
647 }
648
649 static inline void
650 iavf_queue_intr_enable(struct iavf_softc *sc, unsigned int qid)
651 {
652
653 iavf_wr(sc, I40E_VFINT_DYN_CTLN1(qid),
654 I40E_VFINT_DYN_CTLN1_INTENA_MASK |
655 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
656 (IAVF_NOITR << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT));
657 iavf_flush(sc);
658 }
659
660 static inline void
661 iavf_queue_intr_disable(struct iavf_softc *sc, unsigned int qid)
662 {
663
664 iavf_wr(sc, I40E_VFINT_DYN_CTLN1(qid),
665 (IAVF_NOITR << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT));
666 iavf_flush(sc);
667 }
668
669 static inline void
670 iavf_aq_vc_set_opcode(struct ixl_aq_desc *iaq, uint32_t opcode)
671 {
672 struct iavf_aq_vc *vc;
673
674 vc = (struct iavf_aq_vc *)&iaq->iaq_cookie;
675 vc->iaq_vc_opcode = htole32(opcode);
676 }
677
678 static inline uint32_t
679 iavf_aq_vc_get_opcode(const struct ixl_aq_desc *iaq)
680 {
681 const struct iavf_aq_vc *vc;
682
683 vc = (const struct iavf_aq_vc *)&iaq->iaq_cookie;
684 return le32toh(vc->iaq_vc_opcode);
685 }
686
687 static inline uint32_t
688 iavf_aq_vc_get_retval(const struct ixl_aq_desc *iaq)
689 {
690 const struct iavf_aq_vc *vc;
691
692 vc = (const struct iavf_aq_vc *)&iaq->iaq_cookie;
693 return le32toh(vc->iaq_vc_retval);
694 }
695
696 static int
697 iavf_match(device_t parent, cfdata_t match, void *aux)
698 {
699 const struct pci_attach_args *pa = aux;
700
701 return (iavf_lookup(pa) != NULL) ? 1 : 0;
702 }
703
704 static void
705 iavf_attach(device_t parent, device_t self, void *aux)
706 {
707 struct iavf_softc *sc;
708 struct pci_attach_args *pa = aux;
709 struct ifnet *ifp;
710 struct ixl_aq_buf *aqb;
711 pcireg_t memtype;
712 char xnamebuf[MAXCOMLEN];
713 int error, i;
714
715 sc = device_private(self);
716 sc->sc_dev = self;
717 ifp = &sc->sc_ec.ec_if;
718
719 sc->sc_pa = *pa;
720 sc->sc_dmat = (pci_dma64_available(pa)) ? pa->pa_dmat64 : pa->pa_dmat;
721 sc->sc_aq_regs = &iavf_aq_regs;
722 sc->sc_debuglevel = iavf_params.debug;
723 sc->sc_tx_ring_ndescs = iavf_params.tx_ndescs;
724 sc->sc_rx_ring_ndescs = iavf_params.rx_ndescs;
725 sc->sc_tx_itr = iavf_params.tx_itr;
726 sc->sc_rx_itr = iavf_params.rx_itr;
727 sc->sc_nqps_req = MIN(ncpu, iavf_params.max_qps);
728 iavf_prepare_fakeaddr(sc);
729
730 sc->sc_mac_type = iavf_mactype(PCI_PRODUCT(pa->pa_id));
731 iavf_pci_csr_setup(pa->pa_pc, pa->pa_tag);
732
733 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IAVF_PCIREG);
734 if (pci_mapreg_map(pa, IAVF_PCIREG, memtype, 0,
735 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) {
736 aprint_error(": unable to map registers\n");
737 return;
738 }
739
740 if (iavf_wait_active(sc) != 0) {
741 aprint_error(": VF reset timed out\n");
742 goto unmap;
743 }
744
745 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET);
746 mutex_init(&sc->sc_adminq_lock, MUTEX_DEFAULT, IPL_NET);
747 SIMPLEQ_INIT(&sc->sc_atq_idle);
748 SIMPLEQ_INIT(&sc->sc_atq_live);
749 SIMPLEQ_INIT(&sc->sc_arq_idle);
750 SIMPLEQ_INIT(&sc->sc_arq_live);
751 sc->sc_arq_cons = 0;
752 sc->sc_arq_prod = 0;
753 aqb = NULL;
754
755 if (iavf_dmamem_alloc(sc->sc_dmat, &sc->sc_atq,
756 sizeof(struct ixl_aq_desc) * IAVF_AQ_NUM, IAVF_AQ_ALIGN) != 0) {
757 aprint_error(": unable to allocate atq\n");
758 goto free_mutex;
759 }
760
761 if (iavf_dmamem_alloc(sc->sc_dmat, &sc->sc_arq,
762 sizeof(struct ixl_aq_desc) * IAVF_AQ_NUM, IAVF_AQ_ALIGN) != 0) {
763 aprint_error(": unable to allocate arq\n");
764 goto free_atq;
765 }
766
767 for (i = 0; i < IAVF_AQ_NUM; i++) {
768 aqb = iavf_aqb_get(sc, NULL);
769 if (aqb != NULL) {
770 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
771 }
772 }
773 aqb = NULL;
774
775 if (!iavf_arq_fill(sc)) {
776 aprint_error(": unable to fill arq descriptors\n");
777 goto free_arq;
778 }
779
780 if (iavf_init_admin_queue(sc) != 0) {
781 aprint_error(": unable to initialize admin queue\n");
782 goto shutdown;
783 }
784
785 aqb = iavf_aqb_get(sc, NULL);
786 if (aqb == NULL) {
787 aprint_error(": unable to allocate buffer for ATQ\n");
788 goto shutdown;
789 }
790
791 error = iavf_get_version(sc, aqb);
792 switch (error) {
793 case 0:
794 break;
795 case ETIMEDOUT:
796 aprint_error(": timeout waiting for VF version\n");
797 goto shutdown;
798 case ENOTSUP:
799 aprint_error(": unsupported VF version %d\n", sc->sc_major_ver);
800 goto shutdown;
801 default:
802 aprint_error(":unable to get VF interface version\n");
803 goto shutdown;
804 }
805
806 if (iavf_get_vf_resources(sc, aqb) != 0) {
807 aprint_error(": timeout waiting for VF resources\n");
808 goto shutdown;
809 }
810
811 aprint_normal(", VF version %d.%d%s",
812 sc->sc_major_ver, sc->sc_minor_ver,
813 (sc->sc_minor_ver > IAVF_VF_MINOR) ? "(minor mismatch)" : "");
814 aprint_normal(", VF %d, VSI %d", sc->sc_vf_id, sc->sc_vsi_id);
815 aprint_normal("\n");
816 aprint_naive("\n");
817
818 aprint_normal_dev(self, "Ethernet address %s\n",
819 ether_sprintf(sc->sc_enaddr));
820
821 if (iavf_queue_pairs_alloc(sc) != 0) {
822 goto shutdown;
823 }
824
825 if (iavf_setup_interrupts(sc) != 0) {
826 goto free_queue_pairs;
827 }
828
829 if (iavf_config_irq_map(sc, aqb) != 0) {
830 aprint_error(", timed out waiting for IRQ map response\n");
831 goto teardown_intrs;
832 }
833
834 if (iavf_setup_sysctls(sc) != 0) {
835 goto teardown_intrs;
836 }
837
838 if (iavf_setup_stats(sc) != 0) {
839 goto teardown_sysctls;
840 }
841
842 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
843 aqb = NULL;
844
845 snprintf(xnamebuf, sizeof(xnamebuf),
846 "%s_adminq_cv", device_xname(self));
847 cv_init(&sc->sc_adminq_cv, xnamebuf);
848
849 callout_init(&sc->sc_tick, CALLOUT_MPSAFE);
850 callout_setfunc(&sc->sc_tick, iavf_tick, sc);
851
852 iavf_work_set(&sc->sc_reset_task, iavf_reset_start, sc);
853 iavf_work_set(&sc->sc_arq_refill, iavf_arq_refill, sc);
854 iavf_work_set(&sc->sc_wdto_task, iavf_watchdog_timeout, sc);
855 iavf_work_set(&sc->sc_req_queues_task, iavf_post_request_queues, sc);
856 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self));
857 sc->sc_workq = iavf_workq_create(xnamebuf, IAVF_WORKQUEUE_PRI,
858 IPL_NET, WQ_MPSAFE);
859 if (sc->sc_workq == NULL)
860 goto destroy_cv;
861
862 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self));
863 error = workqueue_create(&sc->sc_workq_txrx, xnamebuf,
864 iavf_handle_queue_wk, sc, IAVF_WORKQUEUE_PRI, IPL_NET,
865 WQ_PERCPU|WQ_MPSAFE);
866 if (error != 0) {
867 sc->sc_workq_txrx = NULL;
868 goto teardown_wqs;
869 }
870
871 error = if_initialize(ifp);
872 if (error != 0) {
873 aprint_error_dev(self, "if_initialize failed=%d\n", error);
874 goto teardown_wqs;
875 }
876
877 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
878
879 ifp->if_softc = sc;
880 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
881 ifp->if_extflags = IFEF_MPSAFE;
882 ifp->if_ioctl = iavf_ioctl;
883 ifp->if_start = iavf_start;
884 ifp->if_transmit = iavf_transmit;
885 ifp->if_watchdog = NULL;
886 ifp->if_init = iavf_init;
887 ifp->if_stop = iavf_stop;
888
889 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs);
890 IFQ_SET_READY(&ifp->if_snd);
891 sc->sc_ipq = if_percpuq_create(ifp);
892
893 ifp->if_capabilities |= IAVF_IFCAP_RXCSUM;
894 ifp->if_capabilities |= IAVF_IFCAP_TXCSUM;
895
896 ether_set_vlan_cb(&sc->sc_ec, iavf_vlan_cb);
897 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
898 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
899 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities;
900
901 ether_set_ifflags_cb(&sc->sc_ec, iavf_ifflags_cb);
902
903 sc->sc_ec.ec_ifmedia = &sc->sc_media;
904 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, iavf_media_change,
905 iavf_media_status, &sc->sc_cfg_lock);
906
907 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
908 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
909
910 if_deferred_start_init(ifp, NULL);
911 ether_ifattach(ifp, sc->sc_enaddr);
912
913 sc->sc_txrx_workqueue = true;
914 sc->sc_tx_process_limit = IAVF_TX_PROCESS_LIMIT;
915 sc->sc_rx_process_limit = IAVF_RX_PROCESS_LIMIT;
916 sc->sc_tx_intr_process_limit = IAVF_TX_INTR_PROCESS_LIMIT;
917 sc->sc_rx_intr_process_limit = IAVF_RX_INTR_PROCESS_LIMIT;
918
919 if_register(ifp);
920 if_link_state_change(ifp, sc->sc_link_state);
921 iavf_intr_enable(sc);
922 if (sc->sc_nqps_vsi < sc->sc_nqps_req)
923 iavf_work_add(sc->sc_workq, &sc->sc_req_queues_task);
924 sc->sc_attached = true;
925 return;
926
927 teardown_wqs:
928 config_finalize_register(self, iavf_finalize_teardown);
929 destroy_cv:
930 cv_destroy(&sc->sc_adminq_cv);
931 callout_destroy(&sc->sc_tick);
932 iavf_teardown_stats(sc);
933 teardown_sysctls:
934 iavf_teardown_sysctls(sc);
935 teardown_intrs:
936 iavf_teardown_interrupts(sc);
937 free_queue_pairs:
938 iavf_queue_pairs_free(sc);
939 shutdown:
940 if (aqb != NULL)
941 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
942 iavf_cleanup_admin_queue(sc);
943 iavf_aqb_clean(&sc->sc_atq_idle, sc->sc_dmat);
944 iavf_aqb_clean(&sc->sc_arq_idle, sc->sc_dmat);
945 free_arq:
946 iavf_dmamem_free(sc->sc_dmat, &sc->sc_arq);
947 free_atq:
948 iavf_dmamem_free(sc->sc_dmat, &sc->sc_atq);
949 free_mutex:
950 mutex_destroy(&sc->sc_cfg_lock);
951 mutex_destroy(&sc->sc_adminq_lock);
952 unmap:
953 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
954 sc->sc_mems = 0;
955 sc->sc_attached = false;
956 }
957
958 static int
959 iavf_detach(device_t self, int flags)
960 {
961 struct iavf_softc *sc = device_private(self);
962 struct ifnet *ifp = &sc->sc_ec.ec_if;
963
964 if (!sc->sc_attached)
965 return 0;
966
967 iavf_stop(ifp, 1);
968 ether_ifdetach(ifp);
969 if_detach(ifp);
970 ifmedia_fini(&sc->sc_media);
971 if_percpuq_destroy(sc->sc_ipq);
972
973 iavf_intr_disable(sc);
974
975 mutex_enter(&sc->sc_adminq_lock);
976 mutex_exit(&sc->sc_adminq_lock);
977
978 /*
979 * set a dummy function to halt callout safely
980 * even if a workqueue entry calls callout_schedule()
981 */
982 callout_setfunc(&sc->sc_tick, iavf_tick_halt, sc);
983
984 iavf_work_wait(sc->sc_workq, &sc->sc_reset_task);
985 iavf_work_wait(sc->sc_workq, &sc->sc_arq_refill);
986 iavf_work_wait(sc->sc_workq, &sc->sc_wdto_task);
987 iavf_workq_destroy(sc->sc_workq);
988 sc->sc_workq = NULL;
989
990 callout_halt(&sc->sc_tick, NULL);
991 callout_destroy(&sc->sc_tick);
992
993 iavf_cleanup_admin_queue(sc);
994 iavf_aqb_clean(&sc->sc_atq_idle, sc->sc_dmat);
995 iavf_aqb_clean(&sc->sc_arq_idle, sc->sc_dmat);
996 iavf_dmamem_free(sc->sc_dmat, &sc->sc_arq);
997 iavf_dmamem_free(sc->sc_dmat, &sc->sc_atq);
998 cv_destroy(&sc->sc_adminq_cv);
999
1000 iavf_queue_pairs_free(sc);
1001 iavf_teardown_interrupts(sc);
1002 iavf_teardown_sysctls(sc);
1003 iavf_teardown_stats(sc);
1004 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1005
1006 mutex_destroy(&sc->sc_adminq_lock);
1007 mutex_destroy(&sc->sc_cfg_lock);
1008
1009 return 0;
1010 }
1011
1012 static int
1013 iavf_finalize_teardown(device_t self)
1014 {
1015 struct iavf_softc *sc = device_private(self);
1016
1017 if (sc->sc_workq != NULL) {
1018 iavf_workq_destroy(sc->sc_workq);
1019 sc->sc_workq = NULL;
1020 }
1021
1022 if (sc->sc_workq_txrx != NULL) {
1023 workqueue_destroy(sc->sc_workq_txrx);
1024 sc->sc_workq_txrx = NULL;
1025 }
1026
1027 return 0;
1028 }
1029
1030 static int
1031 iavf_init(struct ifnet *ifp)
1032 {
1033 struct iavf_softc *sc;
1034 int rv;
1035
1036 sc = ifp->if_softc;
1037 mutex_enter(&sc->sc_cfg_lock);
1038 rv = iavf_init_locked(sc);
1039 mutex_exit(&sc->sc_cfg_lock);
1040
1041 return rv;
1042 }
1043
1044 static int
1045 iavf_init_locked(struct iavf_softc *sc)
1046 {
1047 struct ifnet *ifp = &sc->sc_ec.ec_if;
1048 unsigned int i;
1049 int error;
1050
1051 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1052
1053 if (ISSET(ifp->if_flags, IFF_RUNNING))
1054 iavf_stop_locked(sc);
1055
1056 if (sc->sc_resetting)
1057 return ENXIO;
1058
1059 error = iavf_reinit(sc);
1060 if (error) {
1061 iavf_stop_locked(sc);
1062 return error;
1063 }
1064
1065 SET(ifp->if_flags, IFF_RUNNING);
1066 CLR(ifp->if_flags, IFF_OACTIVE);
1067
1068 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1069 iavf_wr(sc, I40E_VFINT_ITRN1(IAVF_ITR_RX, i), sc->sc_rx_itr);
1070 iavf_wr(sc, I40E_VFINT_ITRN1(IAVF_ITR_TX, i), sc->sc_tx_itr);
1071 }
1072 iavf_wr(sc, I40E_VFINT_ITR01(IAVF_ITR_RX), sc->sc_rx_itr);
1073 iavf_wr(sc, I40E_VFINT_ITR01(IAVF_ITR_TX), sc->sc_tx_itr);
1074 iavf_wr(sc, I40E_VFINT_ITR01(IAVF_ITR_MISC), 0);
1075
1076 error = iavf_iff_locked(sc);
1077 if (error) {
1078 iavf_stop_locked(sc);
1079 return error;
1080 };
1081
1082 /* ETHERCAP_VLAN_HWFILTER can not be disabled */
1083 SET(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1084
1085 callout_schedule(&sc->sc_tick, IAVF_TICK_INTERVAL);
1086 return 0;
1087 }
1088
1089 static int
1090 iavf_reinit(struct iavf_softc *sc)
1091 {
1092 struct iavf_rx_ring *rxr;
1093 struct iavf_tx_ring *txr;
1094 unsigned int i;
1095 uint32_t reg;
1096
1097 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1098
1099 sc->sc_reset_up = true;
1100 sc->sc_nqueue_pairs = MIN(sc->sc_nqps_alloc, sc->sc_nintrs - 1);
1101
1102 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1103 rxr = sc->sc_qps[i].qp_rxr;
1104 txr = sc->sc_qps[i].qp_txr;
1105
1106 iavf_rxfill(sc, rxr);
1107 txr->txr_watchdog = IAVF_WATCHDOG_STOP;
1108 }
1109
1110 if (iavf_config_vsi_queues(sc) != 0)
1111 return EIO;
1112
1113 if (iavf_config_hena(sc) != 0)
1114 return EIO;
1115
1116 iavf_config_rss_key(sc);
1117 iavf_config_rss_lut(sc);
1118
1119 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1120 iavf_queue_intr_enable(sc, i);
1121 }
1122 /* unmask */
1123 reg = iavf_rd(sc, I40E_VFINT_DYN_CTL01);
1124 reg |= (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT);
1125 iavf_wr(sc, I40E_VFINT_DYN_CTL01, reg);
1126
1127 if (iavf_queue_select(sc, IAVF_VC_OP_ENABLE_QUEUES) != 0)
1128 return EIO;
1129
1130 return 0;
1131 }
1132
1133 static void
1134 iavf_stop(struct ifnet *ifp, int disable)
1135 {
1136 struct iavf_softc *sc;
1137
1138 sc = ifp->if_softc;
1139 mutex_enter(&sc->sc_cfg_lock);
1140 iavf_stop_locked(sc);
1141 mutex_exit(&sc->sc_cfg_lock);
1142 }
1143
1144 static void
1145 iavf_stop_locked(struct iavf_softc *sc)
1146 {
1147 struct ifnet *ifp = &sc->sc_ec.ec_if;
1148 struct iavf_rx_ring *rxr;
1149 struct iavf_tx_ring *txr;
1150 uint32_t reg;
1151 unsigned int i;
1152
1153 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1154
1155 CLR(ifp->if_flags, IFF_RUNNING);
1156 sc->sc_reset_up = false;
1157 callout_stop(&sc->sc_tick);
1158
1159 if (!sc->sc_resetting) {
1160 /* disable queues*/
1161 if (iavf_queue_select(sc, IAVF_VC_OP_DISABLE_QUEUES) != 0) {
1162 goto die;
1163 }
1164 }
1165
1166 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1167 iavf_queue_intr_disable(sc, i);
1168 }
1169
1170 /* mask interrupts */
1171 reg = iavf_rd(sc, I40E_VFINT_DYN_CTL01);
1172 reg |= I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK |
1173 (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT);
1174 iavf_wr(sc, I40E_VFINT_DYN_CTL01, reg);
1175
1176 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1177 rxr = sc->sc_qps[i].qp_rxr;
1178 txr = sc->sc_qps[i].qp_txr;
1179
1180 mutex_enter(&rxr->rxr_lock);
1181 iavf_rxr_clean(sc, rxr);
1182 mutex_exit(&rxr->rxr_lock);
1183
1184 mutex_enter(&txr->txr_lock);
1185 iavf_txr_clean(sc, txr);
1186 mutex_exit(&txr->txr_lock);
1187
1188 workqueue_wait(sc->sc_workq_txrx,
1189 &sc->sc_qps[i].qp_work);
1190 }
1191
1192 return;
1193 die:
1194 if (!sc->sc_dead) {
1195 sc->sc_dead = true;
1196 log(LOG_INFO, "%s: Request VF reset\n", ifp->if_xname);
1197
1198 iavf_work_set(&sc->sc_reset_task, iavf_reset_request, sc);
1199 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
1200 }
1201 log(LOG_CRIT, "%s: failed to shut down rings\n", ifp->if_xname);
1202 }
1203
1204 static int
1205 iavf_watchdog(struct iavf_tx_ring *txr)
1206 {
1207 struct iavf_softc *sc;
1208
1209 sc = txr->txr_sc;
1210
1211 mutex_enter(&txr->txr_lock);
1212
1213 if (txr->txr_watchdog == IAVF_WATCHDOG_STOP
1214 || --txr->txr_watchdog > 0) {
1215 mutex_exit(&txr->txr_lock);
1216 return 0;
1217 }
1218
1219 txr->txr_watchdog = IAVF_WATCHDOG_STOP;
1220 txr->txr_watchdogto.ev_count++;
1221 mutex_exit(&txr->txr_lock);
1222
1223 device_printf(sc->sc_dev, "watchdog timeout on queue %d\n",
1224 txr->txr_qid);
1225 return 1;
1226 }
1227
1228 static void
1229 iavf_watchdog_timeout(void *xsc)
1230 {
1231 struct iavf_softc *sc;
1232 struct ifnet *ifp;
1233
1234 sc = xsc;
1235 ifp = &sc->sc_ec.ec_if;
1236
1237 mutex_enter(&sc->sc_cfg_lock);
1238 if (ISSET(ifp->if_flags, IFF_RUNNING))
1239 iavf_init_locked(sc);
1240 mutex_exit(&sc->sc_cfg_lock);
1241 }
1242
1243 static int
1244 iavf_media_change(struct ifnet *ifp)
1245 {
1246 struct iavf_softc *sc;
1247 struct ifmedia *ifm;
1248
1249 sc = ifp->if_softc;
1250 ifm = &sc->sc_media;
1251
1252 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1253 return EINVAL;
1254
1255 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1256 case IFM_AUTO:
1257 break;
1258 default:
1259 return EINVAL;
1260 }
1261
1262 return 0;
1263 }
1264
1265 static void
1266 iavf_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1267 {
1268 struct iavf_softc *sc = ifp->if_softc;
1269
1270 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1271
1272 ifmr->ifm_status = sc->sc_media_status;
1273 ifmr->ifm_active = sc->sc_media_active;
1274 }
1275
1276 static int
1277 iavf_ifflags_cb(struct ethercom *ec)
1278 {
1279 struct ifnet *ifp = &ec->ec_if;
1280 struct iavf_softc *sc = ifp->if_softc;
1281
1282 /* vlan hwfilter can not be disabled */
1283 SET(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER);
1284
1285 return iavf_iff(sc);
1286 }
1287
1288 static int
1289 iavf_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
1290 {
1291 struct ifnet *ifp = &ec->ec_if;
1292 struct iavf_softc *sc = ifp->if_softc;
1293 int rv;
1294
1295 mutex_enter(&sc->sc_cfg_lock);
1296
1297 if (sc->sc_resetting) {
1298 mutex_exit(&sc->sc_cfg_lock);
1299
1300 /* all vlan id was already removed */
1301 if (!set)
1302 return 0;
1303
1304 return ENXIO;
1305 }
1306
1307 /* ETHERCAP_VLAN_HWFILTER can not be disabled */
1308 SET(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1309
1310 if (set) {
1311 rv = iavf_config_vlan_id(sc, vid, IAVF_VC_OP_ADD_VLAN);
1312 if (!ISSET(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWTAGGING)) {
1313 iavf_config_vlan_stripping(sc,
1314 sc->sc_ec.ec_capenable);
1315 }
1316 } else {
1317 rv = iavf_config_vlan_id(sc, vid, IAVF_VC_OP_DEL_VLAN);
1318 }
1319
1320 mutex_exit(&sc->sc_cfg_lock);
1321
1322 if (rv != 0)
1323 return EIO;
1324
1325 return 0;
1326 }
1327
1328 static int
1329 iavf_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1330 {
1331 struct ifreq *ifr = (struct ifreq *)data;
1332 struct iavf_softc *sc = (struct iavf_softc *)ifp->if_softc;
1333 const struct sockaddr *sa;
1334 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1335 int s, error = 0;
1336 unsigned int nmtu;
1337
1338 switch (cmd) {
1339 case SIOCSIFMTU:
1340 nmtu = ifr->ifr_mtu;
1341
1342 if (nmtu < IAVF_MIN_MTU || nmtu > IAVF_MAX_MTU) {
1343 error = EINVAL;
1344 break;
1345 }
1346 if (ifp->if_mtu != nmtu) {
1347 s = splnet();
1348 error = ether_ioctl(ifp, cmd, data);
1349 splx(s);
1350 if (error == ENETRESET)
1351 error = iavf_init(ifp);
1352 }
1353 break;
1354 case SIOCADDMULTI:
1355 sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1356 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) {
1357 error = ether_multiaddr(sa, addrlo, addrhi);
1358 if (error != 0)
1359 return error;
1360
1361 error = iavf_add_multi(sc, addrlo, addrhi);
1362 if (error != 0 && error != ENETRESET) {
1363 ether_delmulti(sa, &sc->sc_ec);
1364 error = EIO;
1365 }
1366 }
1367 break;
1368
1369 case SIOCDELMULTI:
1370 sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1371 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) {
1372 error = ether_multiaddr(sa, addrlo, addrhi);
1373 if (error != 0)
1374 return error;
1375
1376 error = iavf_del_multi(sc, addrlo, addrhi);
1377 }
1378 break;
1379
1380 default:
1381 s = splnet();
1382 error = ether_ioctl(ifp, cmd, data);
1383 splx(s);
1384 }
1385
1386 if (error == ENETRESET)
1387 error = iavf_iff(sc);
1388
1389 return error;
1390 }
1391
1392 static int
1393 iavf_iff(struct iavf_softc *sc)
1394 {
1395 int error;
1396
1397 mutex_enter(&sc->sc_cfg_lock);
1398 error = iavf_iff_locked(sc);
1399 mutex_exit(&sc->sc_cfg_lock);
1400
1401 return error;
1402 }
1403
1404 static int
1405 iavf_iff_locked(struct iavf_softc *sc)
1406 {
1407 struct ifnet *ifp = &sc->sc_ec.ec_if;
1408 int unicast, multicast;
1409 const uint8_t *enaddr;
1410
1411 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1412
1413 if (!ISSET(ifp->if_flags, IFF_RUNNING))
1414 return 0;
1415
1416 unicast = 0;
1417 multicast = 0;
1418 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
1419 unicast = 1;
1420 multicast = 1;
1421 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1422 multicast = 1;
1423 }
1424
1425 iavf_config_promisc_mode(sc, unicast, multicast);
1426
1427 iavf_config_vlan_stripping(sc, sc->sc_ec.ec_capenable);
1428
1429 enaddr = CLLADDR(ifp->if_sadl);
1430 if (memcmp(enaddr, sc->sc_enaddr_added, ETHER_ADDR_LEN) != 0) {
1431 if (!iavf_is_etheranyaddr(sc->sc_enaddr_added)) {
1432 iavf_eth_addr(sc, sc->sc_enaddr_added,
1433 IAVF_VC_OP_DEL_ETH_ADDR);
1434 }
1435 memcpy(sc->sc_enaddr_added, enaddr, ETHER_ADDR_LEN);
1436 iavf_eth_addr(sc, enaddr, IAVF_VC_OP_ADD_ETH_ADDR);
1437 }
1438
1439 return 0;
1440 }
1441
1442 static const struct iavf_product *
1443 iavf_lookup(const struct pci_attach_args *pa)
1444 {
1445 const struct iavf_product *iavfp;
1446
1447 for (iavfp = iavf_products; iavfp->vendor_id != 0; iavfp++) {
1448 if (PCI_VENDOR(pa->pa_id) == iavfp->vendor_id &&
1449 PCI_PRODUCT(pa->pa_id) == iavfp->product_id)
1450 return iavfp;
1451 }
1452
1453 return NULL;
1454 }
1455
1456 static enum i40e_mac_type
1457 iavf_mactype(pci_product_id_t id)
1458 {
1459
1460 switch (id) {
1461 case PCI_PRODUCT_INTEL_XL710_VF:
1462 case PCI_PRODUCT_INTEL_XL710_VF_HV:
1463 return I40E_MAC_VF;
1464 case PCI_PRODUCT_INTEL_X722_VF:
1465 return I40E_MAC_X722_VF;
1466 }
1467
1468 return I40E_MAC_GENERIC;
1469 }
1470
1471 static const struct iavf_link_speed *
1472 iavf_find_link_speed(struct iavf_softc *sc, uint32_t link_speed)
1473 {
1474 size_t i;
1475
1476 for (i = 0; i < __arraycount(iavf_link_speeds); i++) {
1477 if (link_speed & (1 << i))
1478 return (&iavf_link_speeds[i]);
1479 }
1480
1481 return NULL;
1482 }
1483
1484 static void
1485 iavf_pci_csr_setup(pci_chipset_tag_t pc, pcitag_t tag)
1486 {
1487 pcireg_t csr;
1488
1489 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1490 csr |= (PCI_COMMAND_MASTER_ENABLE |
1491 PCI_COMMAND_MEM_ENABLE);
1492 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
1493 }
1494
1495 static int
1496 iavf_wait_active(struct iavf_softc *sc)
1497 {
1498 int tries;
1499 uint32_t reg;
1500
1501 for (tries = 0; tries < 100; tries++) {
1502 reg = iavf_rd(sc, I40E_VFGEN_RSTAT) &
1503 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1504 if (reg == IAVF_VFR_VFACTIVE ||
1505 reg == IAVF_VFR_COMPLETED)
1506 return 0;
1507
1508 delaymsec(10);
1509 }
1510
1511 return -1;
1512 }
1513
1514 static bool
1515 iavf_is_etheranyaddr(const uint8_t *enaddr)
1516 {
1517 static const uint8_t etheranyaddr[ETHER_ADDR_LEN] = {
1518 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1519 };
1520
1521 if (memcmp(enaddr, etheranyaddr, ETHER_ADDR_LEN) != 0)
1522 return false;
1523
1524 return true;
1525 }
1526
1527 static void
1528 iavf_prepare_fakeaddr(struct iavf_softc *sc)
1529 {
1530 uint64_t rndval;
1531
1532 if (!iavf_is_etheranyaddr(sc->sc_enaddr_fake))
1533 return;
1534
1535 rndval = cprng_strong64();
1536
1537 memcpy(sc->sc_enaddr_fake, &rndval, sizeof(sc->sc_enaddr_fake));
1538 sc->sc_enaddr_fake[0] &= 0xFE;
1539 sc->sc_enaddr_fake[0] |= 0x02;
1540 }
1541
1542 static int
1543 iavf_replace_lla(struct ifnet *ifp, const uint8_t *prev, const uint8_t *next)
1544 {
1545 union {
1546 struct sockaddr sa;
1547 struct sockaddr_dl sdl;
1548 struct sockaddr_storage ss;
1549 } u;
1550 struct psref psref_prev, psref_next;
1551 struct ifaddr *ifa_prev, *ifa_next;
1552 const struct sockaddr_dl *nsdl;
1553 int s, error;
1554
1555 KASSERT(IFNET_LOCKED(ifp));
1556
1557 error = 0;
1558 ifa_prev = ifa_next = NULL;
1559
1560 if (memcmp(prev, next, ETHER_ADDR_LEN) == 0) {
1561 goto done;
1562 }
1563
1564 if (sockaddr_dl_init(&u.sdl, sizeof(u.ss), ifp->if_index,
1565 ifp->if_type, ifp->if_xname, strlen(ifp->if_xname),
1566 prev, ETHER_ADDR_LEN) == NULL) {
1567 error = EINVAL;
1568 goto done;
1569 }
1570
1571 s = pserialize_read_enter();
1572 IFADDR_READER_FOREACH(ifa_prev, ifp) {
1573 if (sockaddr_cmp(&u.sa, ifa_prev->ifa_addr) == 0) {
1574 ifa_acquire(ifa_prev, &psref_prev);
1575 break;
1576 }
1577 }
1578 pserialize_read_exit(s);
1579
1580 if (sockaddr_dl_init(&u.sdl, sizeof(u.ss), ifp->if_index,
1581 ifp->if_type, ifp->if_xname, strlen(ifp->if_xname),
1582 next, ETHER_ADDR_LEN) == NULL) {
1583 error = EINVAL;
1584 goto done;
1585 }
1586
1587 s = pserialize_read_enter();
1588 IFADDR_READER_FOREACH(ifa_next, ifp) {
1589 if (sockaddr_cmp(&u.sa, ifa_next->ifa_addr) == 0) {
1590 ifa_acquire(ifa_next, &psref_next);
1591 break;
1592 }
1593 }
1594 pserialize_read_exit(s);
1595
1596 if (ifa_next == NULL) {
1597 nsdl = &u.sdl;
1598 ifa_next = if_dl_create(ifp, &nsdl);
1599 if (ifa_next == NULL) {
1600 error = ENOMEM;
1601 goto done;
1602 }
1603
1604 s = pserialize_read_enter();
1605 ifa_acquire(ifa_next, &psref_next);
1606 pserialize_read_exit(s);
1607
1608 sockaddr_copy(ifa_next->ifa_addr,
1609 ifa_next->ifa_addr->sa_len, &u.sa);
1610 ifa_insert(ifp, ifa_next);
1611 } else {
1612 nsdl = NULL;
1613 }
1614
1615 if (ifa_prev != NULL && ifa_prev == ifp->if_dl) {
1616 if_activate_sadl(ifp, ifa_next, nsdl);
1617 }
1618
1619 ifa_release(ifa_next, &psref_next);
1620 ifa_next = NULL;
1621
1622 if (ifa_prev != NULL && ifa_prev != ifp->if_hwdl) {
1623 ifaref(ifa_prev);
1624 ifa_release(ifa_prev, &psref_prev);
1625 ifa_remove(ifp, ifa_prev);
1626 KASSERTMSG(ifa_prev->ifa_refcnt == 1, "ifa_refcnt=%d",
1627 ifa_prev->ifa_refcnt);
1628 ifafree(ifa_prev);
1629 ifa_prev = NULL;
1630 }
1631
1632 if (ISSET(ifp->if_flags, IFF_RUNNING))
1633 error = ENETRESET;
1634
1635 done:
1636 if (ifa_prev != NULL)
1637 ifa_release(ifa_prev, &psref_prev);
1638 if (ifa_next != NULL)
1639 ifa_release(ifa_next, &psref_next);
1640
1641 return error;
1642 }
1643 static int
1644 iavf_add_multi(struct iavf_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1645 {
1646 struct ifnet *ifp = &sc->sc_ec.ec_if;
1647 int rv;
1648
1649 if (ISSET(ifp->if_flags, IFF_ALLMULTI))
1650 return 0;
1651
1652 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) {
1653 iavf_del_all_multi(sc);
1654 SET(ifp->if_flags, IFF_ALLMULTI);
1655 return ENETRESET;
1656 }
1657
1658 rv = iavf_eth_addr(sc, addrlo, IAVF_VC_OP_ADD_ETH_ADDR);
1659
1660 if (rv == ENOSPC) {
1661 iavf_del_all_multi(sc);
1662 SET(ifp->if_flags, IFF_ALLMULTI);
1663 return ENETRESET;
1664 }
1665
1666 return rv;
1667 }
1668
1669 static int
1670 iavf_del_multi(struct iavf_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1671 {
1672 struct ifnet *ifp = &sc->sc_ec.ec_if;
1673 struct ethercom *ec = &sc->sc_ec;
1674 struct ether_multi *enm, *enm_last;
1675 struct ether_multistep step;
1676 int error, rv = 0;
1677
1678 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1679 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0)
1680 return 0;
1681
1682 iavf_eth_addr(sc, addrlo, IAVF_VC_OP_DEL_ETH_ADDR);
1683 return 0;
1684 }
1685
1686 ETHER_LOCK(ec);
1687 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1688 ETHER_NEXT_MULTI(step, enm)) {
1689 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1690 ETHER_ADDR_LEN) != 0) {
1691 goto out;
1692 }
1693 }
1694
1695 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1696 ETHER_NEXT_MULTI(step, enm)) {
1697 error = iavf_eth_addr(sc, enm->enm_addrlo,
1698 IAVF_VC_OP_ADD_ETH_ADDR);
1699 if (error != 0)
1700 break;
1701 }
1702
1703 if (enm != NULL) {
1704 enm_last = enm;
1705 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1706 ETHER_NEXT_MULTI(step, enm)) {
1707 if (enm == enm_last)
1708 break;
1709
1710 iavf_eth_addr(sc, enm->enm_addrlo,
1711 IAVF_VC_OP_DEL_ETH_ADDR);
1712 }
1713 } else {
1714 CLR(ifp->if_flags, IFF_ALLMULTI);
1715 rv = ENETRESET;
1716 }
1717
1718 out:
1719 ETHER_UNLOCK(ec);
1720 return rv;
1721 }
1722
1723 static void
1724 iavf_del_all_multi(struct iavf_softc *sc)
1725 {
1726 struct ethercom *ec = &sc->sc_ec;
1727 struct ether_multi *enm;
1728 struct ether_multistep step;
1729
1730 ETHER_LOCK(ec);
1731 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1732 ETHER_NEXT_MULTI(step, enm)) {
1733 iavf_eth_addr(sc, enm->enm_addrlo,
1734 IAVF_VC_OP_DEL_ETH_ADDR);
1735 }
1736 ETHER_UNLOCK(ec);
1737 }
1738
1739 static int
1740 iavf_setup_interrupts(struct iavf_softc *sc)
1741 {
1742 struct pci_attach_args *pa;
1743 kcpuset_t *affinity = NULL;
1744 char intrbuf[PCI_INTRSTR_LEN], xnamebuf[32];
1745 char const *intrstr;
1746 int counts[PCI_INTR_TYPE_SIZE];
1747 int error, affinity_to;
1748 unsigned int vector, qid, num;
1749
1750 /* queue pairs + misc interrupt */
1751 num = sc->sc_nqps_alloc + 1;
1752
1753 num = MIN(num, iavf_calc_msix_count(sc));
1754 if (num <= 0) {
1755 return -1;
1756 }
1757
1758 KASSERT(sc->sc_nqps_alloc > 0);
1759 num = MIN(num, sc->sc_nqps_alloc + 1);
1760
1761 pa = &sc->sc_pa;
1762 memset(counts, 0, sizeof(counts));
1763 counts[PCI_INTR_TYPE_MSIX] = num;
1764
1765 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, PCI_INTR_TYPE_MSIX);
1766 if (error != 0) {
1767 IAVF_LOG(sc, LOG_WARNING, "couldn't allocate interrupts\n");
1768 return -1;
1769 }
1770
1771 KASSERT(pci_intr_type(pa->pa_pc, sc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX);
1772
1773 if (counts[PCI_INTR_TYPE_MSIX] < 1) {
1774 IAVF_LOG(sc, LOG_ERR, "couldn't allocate interrupts\n");
1775 } else if (counts[PCI_INTR_TYPE_MSIX] != (int)num) {
1776 IAVF_LOG(sc, LOG_DEBUG,
1777 "request %u intruppts, but allocate %d interrupts\n",
1778 num, counts[PCI_INTR_TYPE_MSIX]);
1779 num = counts[PCI_INTR_TYPE_MSIX];
1780 }
1781
1782 sc->sc_ihs = kmem_alloc(sizeof(sc->sc_ihs[0]) * num, KM_NOSLEEP);
1783 if (sc->sc_ihs == NULL) {
1784 IAVF_LOG(sc, LOG_ERR,
1785 "couldn't allocate memory for interrupts\n");
1786 goto fail;
1787 }
1788
1789 /* vector #0 is Misc interrupt */
1790 vector = 0;
1791 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[vector], PCI_INTR_MPSAFE, true);
1792 intrstr = pci_intr_string(pa->pa_pc, sc->sc_ihp[vector],
1793 intrbuf, sizeof(intrbuf));
1794 snprintf(xnamebuf, sizeof(xnamebuf), "%s-Misc",
1795 device_xname(sc->sc_dev));
1796
1797 sc->sc_ihs[vector] = pci_intr_establish_xname(pa->pa_pc,
1798 sc->sc_ihp[vector], IPL_NET, iavf_intr, sc, xnamebuf);
1799 if (sc->sc_ihs[vector] == NULL) {
1800 IAVF_LOG(sc, LOG_WARNING,
1801 "unable to establish interrupt at %s", intrstr);
1802 goto fail;
1803 }
1804
1805 kcpuset_create(&affinity, false);
1806 affinity_to = ((int)num <= ncpu) ? 1 : 0;
1807 qid = 0;
1808 for (vector = 1; vector < num; vector++) {
1809 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[vector],
1810 PCI_INTR_MPSAFE, true);
1811 intrstr = pci_intr_string(pa->pa_pc, sc->sc_ihp[vector],
1812 intrbuf, sizeof(intrbuf));
1813 snprintf(xnamebuf, sizeof(xnamebuf), "%s-TXRX%u",
1814 device_xname(sc->sc_dev), qid);
1815
1816 sc->sc_ihs[vector] = pci_intr_establish_xname(pa->pa_pc,
1817 sc->sc_ihp[vector], IPL_NET, iavf_queue_intr,
1818 (void *)&sc->sc_qps[qid], xnamebuf);
1819 if (sc->sc_ihs[vector] == NULL) {
1820 IAVF_LOG(sc, LOG_WARNING,
1821 "unable to establish interrupt at %s\n", intrstr);
1822 goto fail;
1823 }
1824
1825 kcpuset_zero(affinity);
1826 kcpuset_set(affinity, affinity_to);
1827 error = interrupt_distribute(sc->sc_ihs[vector],
1828 affinity, NULL);
1829
1830 if (error == 0) {
1831 IAVF_LOG(sc, LOG_INFO,
1832 "for TXRX%d interrupt at %s, affinity to %d\n",
1833 qid, intrstr, affinity_to);
1834 } else {
1835 IAVF_LOG(sc, LOG_INFO,
1836 "for TXRX%d interrupt at %s\n",
1837 qid, intrstr);
1838 }
1839
1840 qid++;
1841 affinity_to = (affinity_to + 1) % ncpu;
1842 }
1843
1844 kcpuset_destroy(affinity);
1845
1846 sc->sc_nintrs = num;
1847 return 0;
1848
1849 fail:
1850 if (affinity != NULL)
1851 kcpuset_destroy(affinity);
1852 for (vector = 0; vector < num; vector++) {
1853 if (sc->sc_ihs[vector] == NULL)
1854 continue;
1855 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[vector]);
1856 }
1857 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * num);
1858 pci_intr_release(pa->pa_pc, sc->sc_ihp, num);
1859
1860 return -1;
1861 }
1862
1863 static void
1864 iavf_teardown_interrupts(struct iavf_softc *sc)
1865 {
1866 struct pci_attach_args *pa;
1867 unsigned int i;
1868
1869 if (sc->sc_ihs == NULL)
1870 return;
1871
1872 pa = &sc->sc_pa;
1873
1874 for (i = 0; i < sc->sc_nintrs; i++) {
1875 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]);
1876 }
1877
1878 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
1879 sc->sc_ihs = NULL;
1880
1881 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
1882 sc->sc_nintrs = 0;
1883 }
1884
1885 static int
1886 iavf_setup_sysctls(struct iavf_softc *sc)
1887 {
1888 const char *devname;
1889 struct sysctllog **log;
1890 const struct sysctlnode *rnode, *rxnode, *txnode;
1891 int error;
1892
1893 log = &sc->sc_sysctllog;
1894 devname = device_xname(sc->sc_dev);
1895
1896 error = sysctl_createv(log, 0, NULL, &rnode,
1897 0, CTLTYPE_NODE, devname,
1898 SYSCTL_DESCR("iavf information and settings"),
1899 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
1900 if (error)
1901 goto out;
1902
1903 error = sysctl_createv(log, 0, &rnode, NULL,
1904 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
1905 SYSCTL_DESCR("Use workqueue for packet processing"),
1906 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
1907 if (error)
1908 goto out;
1909
1910 error = sysctl_createv(log, 0, &rnode, NULL,
1911 CTLFLAG_READWRITE, CTLTYPE_INT, "debug_level",
1912 SYSCTL_DESCR("Debug level"),
1913 NULL, 0, &sc->sc_debuglevel, 0, CTL_CREATE, CTL_EOL);
1914 if (error)
1915 goto out;
1916
1917 error = sysctl_createv(log, 0, &rnode, &rxnode,
1918 0, CTLTYPE_NODE, "rx",
1919 SYSCTL_DESCR("iavf information and settings for Rx"),
1920 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
1921 if (error)
1922 goto out;
1923
1924 error = sysctl_createv(log, 0, &rxnode, NULL,
1925 CTLFLAG_READWRITE, CTLTYPE_INT, "itr",
1926 SYSCTL_DESCR("Interrupt Throttling"),
1927 iavf_sysctl_itr_handler, 0,
1928 (void *)sc, 0, CTL_CREATE, CTL_EOL);
1929 if (error)
1930 goto out;
1931
1932 error = sysctl_createv(log, 0, &rxnode, NULL,
1933 CTLFLAG_READONLY, CTLTYPE_INT, "descriptor_num",
1934 SYSCTL_DESCR("descriptor size"),
1935 NULL, 0, &sc->sc_rx_ring_ndescs, 0, CTL_CREATE, CTL_EOL);
1936 if (error)
1937 goto out;
1938
1939 error = sysctl_createv(log, 0, &rxnode, NULL,
1940 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
1941 SYSCTL_DESCR("max number of Rx packets"
1942 " to process for interrupt processing"),
1943 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
1944 if (error)
1945 goto out;
1946
1947 error = sysctl_createv(log, 0, &rxnode, NULL,
1948 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
1949 SYSCTL_DESCR("max number of Rx packets"
1950 " to process for deferred processing"),
1951 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
1952 if (error)
1953 goto out;
1954
1955 error = sysctl_createv(log, 0, &rnode, &txnode,
1956 0, CTLTYPE_NODE, "tx",
1957 SYSCTL_DESCR("iavf information and settings for Tx"),
1958 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
1959 if (error)
1960 goto out;
1961
1962 error = sysctl_createv(log, 0, &txnode, NULL,
1963 CTLFLAG_READWRITE, CTLTYPE_INT, "itr",
1964 SYSCTL_DESCR("Interrupt Throttling"),
1965 iavf_sysctl_itr_handler, 0,
1966 (void *)sc, 0, CTL_CREATE, CTL_EOL);
1967 if (error)
1968 goto out;
1969
1970 error = sysctl_createv(log, 0, &txnode, NULL,
1971 CTLFLAG_READONLY, CTLTYPE_INT, "descriptor_num",
1972 SYSCTL_DESCR("the number of Tx descriptors"),
1973 NULL, 0, &sc->sc_tx_ring_ndescs, 0, CTL_CREATE, CTL_EOL);
1974 if (error)
1975 goto out;
1976
1977 error = sysctl_createv(log, 0, &txnode, NULL,
1978 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
1979 SYSCTL_DESCR("max number of Tx packets"
1980 " to process for interrupt processing"),
1981 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
1982 if (error)
1983 goto out;
1984
1985 error = sysctl_createv(log, 0, &txnode, NULL,
1986 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
1987 SYSCTL_DESCR("max number of Tx packets"
1988 " to process for deferred processing"),
1989 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
1990 if (error)
1991 goto out;
1992 out:
1993 return error;
1994 }
1995
1996 static void
1997 iavf_teardown_sysctls(struct iavf_softc *sc)
1998 {
1999
2000 sysctl_teardown(&sc->sc_sysctllog);
2001 }
2002
2003 static int
2004 iavf_setup_stats(struct iavf_softc *sc)
2005 {
2006 struct iavf_stat_counters *isc;
2007 const char *dn;
2008
2009 dn = device_xname(sc->sc_dev);
2010 isc = &sc->sc_stat_counters;
2011
2012 iavf_evcnt_attach(&isc->isc_rx_bytes, dn, "Rx bytes");
2013 iavf_evcnt_attach(&isc->isc_rx_unicast, dn, "Rx unicast");
2014 iavf_evcnt_attach(&isc->isc_rx_multicast, dn, "Rx multicast");
2015 iavf_evcnt_attach(&isc->isc_rx_broadcast, dn, "Rx broadcast");
2016 iavf_evcnt_attach(&isc->isc_rx_discards, dn, "Rx discards");
2017 iavf_evcnt_attach(&isc->isc_rx_unknown_protocol,
2018 dn, "Rx unknown protocol");
2019
2020 iavf_evcnt_attach(&isc->isc_tx_bytes, dn, "Tx bytes");
2021 iavf_evcnt_attach(&isc->isc_tx_unicast, dn, "Tx unicast");
2022 iavf_evcnt_attach(&isc->isc_tx_multicast, dn, "Tx multicast");
2023 iavf_evcnt_attach(&isc->isc_tx_broadcast, dn, "Tx broadcast");
2024 iavf_evcnt_attach(&isc->isc_tx_discards, dn, "Tx discards");
2025 iavf_evcnt_attach(&isc->isc_tx_errors, dn, "Tx errors");
2026
2027 return 0;
2028 }
2029
2030 static void
2031 iavf_teardown_stats(struct iavf_softc *sc)
2032 {
2033 struct iavf_stat_counters *isc;
2034
2035 isc = &sc->sc_stat_counters;
2036
2037 evcnt_detach(&isc->isc_rx_bytes);
2038 evcnt_detach(&isc->isc_rx_unicast);
2039 evcnt_detach(&isc->isc_rx_multicast);
2040 evcnt_detach(&isc->isc_rx_broadcast);
2041 evcnt_detach(&isc->isc_rx_discards);
2042 evcnt_detach(&isc->isc_rx_unknown_protocol);
2043
2044 evcnt_detach(&isc->isc_tx_bytes);
2045 evcnt_detach(&isc->isc_tx_unicast);
2046 evcnt_detach(&isc->isc_tx_multicast);
2047 evcnt_detach(&isc->isc_tx_broadcast);
2048 evcnt_detach(&isc->isc_tx_discards);
2049 evcnt_detach(&isc->isc_tx_errors);
2050
2051 }
2052
2053 static int
2054 iavf_init_admin_queue(struct iavf_softc *sc)
2055 {
2056 uint32_t reg;
2057
2058 sc->sc_atq_cons = 0;
2059 sc->sc_atq_prod = 0;
2060
2061 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2062 0, IXL_DMA_LEN(&sc->sc_atq),
2063 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2064 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2065 0, IXL_DMA_LEN(&sc->sc_arq),
2066 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2067
2068 iavf_wr(sc, sc->sc_aq_regs->atq_head, 0);
2069 iavf_wr(sc, sc->sc_aq_regs->arq_head, 0);
2070 iavf_wr(sc, sc->sc_aq_regs->atq_tail, 0);
2071 iavf_wr(sc, sc->sc_aq_regs->arq_tail, 0);
2072
2073 iavf_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
2074
2075 iavf_wr(sc, sc->sc_aq_regs->atq_bal,
2076 ixl_dmamem_lo(&sc->sc_atq));
2077 iavf_wr(sc, sc->sc_aq_regs->atq_bah,
2078 ixl_dmamem_hi(&sc->sc_atq));
2079 iavf_wr(sc, sc->sc_aq_regs->atq_len,
2080 sc->sc_aq_regs->atq_len_enable | IAVF_AQ_NUM);
2081
2082 iavf_wr(sc, sc->sc_aq_regs->arq_bal,
2083 ixl_dmamem_lo(&sc->sc_arq));
2084 iavf_wr(sc, sc->sc_aq_regs->arq_bah,
2085 ixl_dmamem_hi(&sc->sc_arq));
2086 iavf_wr(sc, sc->sc_aq_regs->arq_len,
2087 sc->sc_aq_regs->arq_len_enable | IAVF_AQ_NUM);
2088
2089 iavf_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
2090
2091 reg = iavf_rd(sc, sc->sc_aq_regs->atq_bal);
2092 if (reg != ixl_dmamem_lo(&sc->sc_atq))
2093 goto fail;
2094
2095 reg = iavf_rd(sc, sc->sc_aq_regs->arq_bal);
2096 if (reg != ixl_dmamem_lo(&sc->sc_arq))
2097 goto fail;
2098
2099 sc->sc_dead = false;
2100 return 0;
2101
2102 fail:
2103 iavf_wr(sc, sc->sc_aq_regs->atq_len, 0);
2104 iavf_wr(sc, sc->sc_aq_regs->arq_len, 0);
2105 return -1;
2106 }
2107
2108 static void
2109 iavf_cleanup_admin_queue(struct iavf_softc *sc)
2110 {
2111 struct ixl_aq_buf *aqb;
2112
2113 iavf_wr(sc, sc->sc_aq_regs->atq_head, 0);
2114 iavf_wr(sc, sc->sc_aq_regs->arq_head, 0);
2115 iavf_wr(sc, sc->sc_aq_regs->atq_tail, 0);
2116 iavf_wr(sc, sc->sc_aq_regs->arq_tail, 0);
2117
2118 iavf_wr(sc, sc->sc_aq_regs->atq_bal, 0);
2119 iavf_wr(sc, sc->sc_aq_regs->atq_bah, 0);
2120 iavf_wr(sc, sc->sc_aq_regs->atq_len, 0);
2121
2122 iavf_wr(sc, sc->sc_aq_regs->arq_bal, 0);
2123 iavf_wr(sc, sc->sc_aq_regs->arq_bah, 0);
2124 iavf_wr(sc, sc->sc_aq_regs->arq_len, 0);
2125 iavf_flush(sc);
2126
2127 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2128 0, IXL_DMA_LEN(&sc->sc_arq),
2129 BUS_DMASYNC_POSTREAD);
2130 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2131 0, IXL_DMA_LEN(&sc->sc_atq),
2132 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2133
2134 sc->sc_atq_cons = 0;
2135 sc->sc_atq_prod = 0;
2136 sc->sc_arq_cons = 0;
2137 sc->sc_arq_prod = 0;
2138
2139 memset(IXL_DMA_KVA(&sc->sc_arq), 0, IXL_DMA_LEN(&sc->sc_arq));
2140 memset(IXL_DMA_KVA(&sc->sc_atq), 0, IXL_DMA_LEN(&sc->sc_atq));
2141
2142 while ((aqb = iavf_aqb_get_locked(&sc->sc_arq_live)) != NULL) {
2143 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
2144 BUS_DMASYNC_POSTREAD);
2145 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
2146 }
2147
2148 while ((aqb = iavf_aqb_get_locked(&sc->sc_atq_live)) != NULL) {
2149 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
2150 BUS_DMASYNC_POSTREAD);
2151 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
2152 }
2153 }
2154
2155 static unsigned int
2156 iavf_calc_msix_count(struct iavf_softc *sc)
2157 {
2158 struct pci_attach_args *pa;
2159 int count;
2160
2161 pa = &sc->sc_pa;
2162 count = pci_msix_count(pa->pa_pc, pa->pa_tag);
2163 if (count < 0) {
2164 IAVF_LOG(sc, LOG_DEBUG,"MSIX config error\n");
2165 count = 0;
2166 }
2167
2168 return MIN(sc->sc_max_vectors, (unsigned int)count);
2169 }
2170
2171 static unsigned int
2172 iavf_calc_queue_pair_size(struct iavf_softc *sc)
2173 {
2174 unsigned int nqp, nvec;
2175
2176 nvec = iavf_calc_msix_count(sc);
2177 if (sc->sc_max_vectors > 1) {
2178 /* decrease the number of misc interrupt */
2179 nvec -= 1;
2180 }
2181
2182 nqp = ncpu;
2183 nqp = MIN(nqp, sc->sc_nqps_vsi);
2184 nqp = MIN(nqp, nvec);
2185 nqp = MIN(nqp, (unsigned int)iavf_params.max_qps);
2186
2187 return nqp;
2188 }
2189
2190 static struct iavf_tx_ring *
2191 iavf_txr_alloc(struct iavf_softc *sc, unsigned int qid)
2192 {
2193 struct iavf_tx_ring *txr;
2194 struct iavf_tx_map *maps;
2195 unsigned int i;
2196 int error;
2197
2198 txr = kmem_zalloc(sizeof(*txr), KM_NOSLEEP);
2199 if (txr == NULL)
2200 return NULL;
2201
2202 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs,
2203 KM_NOSLEEP);
2204 if (maps == NULL)
2205 goto free_txr;
2206
2207 if (iavf_dmamem_alloc(sc->sc_dmat, &txr->txr_mem,
2208 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2209 IAVF_TX_QUEUE_ALIGN) != 0) {
2210 goto free_maps;
2211 }
2212
2213 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2214 error = bus_dmamap_create(sc->sc_dmat, IAVF_TX_PKT_MAXSIZE,
2215 IAVF_TX_PKT_DESCS, IAVF_TX_PKT_MAXSIZE, 0,
2216 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &maps[i].txm_map);
2217 if (error)
2218 goto destroy_maps;
2219 }
2220
2221 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2222 if (txr->txr_intrq == NULL)
2223 goto destroy_maps;
2224
2225 txr->txr_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,
2226 iavf_deferred_transmit, txr);
2227 if (txr->txr_si == NULL)
2228 goto destroy_pcq;
2229
2230 snprintf(txr->txr_name, sizeof(txr->txr_name), "%s-tx%d",
2231 device_xname(sc->sc_dev), qid);
2232
2233 iavf_evcnt_attach(&txr->txr_defragged,
2234 txr->txr_name, "m_defrag successed");
2235 iavf_evcnt_attach(&txr->txr_defrag_failed,
2236 txr->txr_name, "m_defrag failed");
2237 iavf_evcnt_attach(&txr->txr_pcqdrop,
2238 txr->txr_name, "Dropped in pcq");
2239 iavf_evcnt_attach(&txr->txr_transmitdef,
2240 txr->txr_name, "Deferred transmit");
2241 iavf_evcnt_attach(&txr->txr_watchdogto,
2242 txr->txr_name, "Watchdog timedout on queue");
2243 iavf_evcnt_attach(&txr->txr_defer,
2244 txr->txr_name, "Handled queue in softint/workqueue");
2245
2246 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR, NULL,
2247 txr->txr_name, "Interrupt on queue");
2248
2249 txr->txr_qid = qid;
2250 txr->txr_sc = sc;
2251 txr->txr_maps = maps;
2252 txr->txr_prod = txr->txr_cons = 0;
2253 txr->txr_tail = I40E_QTX_TAIL1(qid);
2254 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2255
2256 return txr;
2257 destroy_pcq:
2258 pcq_destroy(txr->txr_intrq);
2259 destroy_maps:
2260 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2261 if (maps[i].txm_map == NULL)
2262 continue;
2263 bus_dmamap_destroy(sc->sc_dmat, maps[i].txm_map);
2264 }
2265
2266 iavf_dmamem_free(sc->sc_dmat, &txr->txr_mem);
2267 free_maps:
2268 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2269 free_txr:
2270 kmem_free(txr, sizeof(*txr));
2271 return NULL;
2272 }
2273
2274 static void
2275 iavf_txr_free(struct iavf_softc *sc, struct iavf_tx_ring *txr)
2276 {
2277 struct iavf_tx_map *maps;
2278 unsigned int i;
2279
2280 maps = txr->txr_maps;
2281 if (maps != NULL) {
2282 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2283 if (maps[i].txm_map == NULL)
2284 continue;
2285 bus_dmamap_destroy(sc->sc_dmat, maps[i].txm_map);
2286 }
2287 kmem_free(txr->txr_maps,
2288 sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2289 txr->txr_maps = NULL;
2290 }
2291
2292 evcnt_detach(&txr->txr_defragged);
2293 evcnt_detach(&txr->txr_defrag_failed);
2294 evcnt_detach(&txr->txr_pcqdrop);
2295 evcnt_detach(&txr->txr_transmitdef);
2296 evcnt_detach(&txr->txr_watchdogto);
2297 evcnt_detach(&txr->txr_defer);
2298 evcnt_detach(&txr->txr_intr);
2299
2300 iavf_dmamem_free(sc->sc_dmat, &txr->txr_mem);
2301 softint_disestablish(txr->txr_si);
2302 pcq_destroy(txr->txr_intrq);
2303 mutex_destroy(&txr->txr_lock);
2304 kmem_free(txr, sizeof(*txr));
2305 }
2306
2307 static struct iavf_rx_ring *
2308 iavf_rxr_alloc(struct iavf_softc *sc, unsigned int qid)
2309 {
2310 struct iavf_rx_ring *rxr;
2311 struct iavf_rx_map *maps;
2312 unsigned int i;
2313 int error;
2314
2315 rxr = kmem_zalloc(sizeof(*rxr), KM_NOSLEEP);
2316 if (rxr == NULL)
2317 return NULL;
2318
2319 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs,
2320 KM_NOSLEEP);
2321 if (maps == NULL)
2322 goto free_rxr;
2323
2324 if (iavf_dmamem_alloc(sc->sc_dmat, &rxr->rxr_mem,
2325 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
2326 IAVF_RX_QUEUE_ALIGN) != 0)
2327 goto free_maps;
2328
2329 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2330 error = bus_dmamap_create(sc->sc_dmat, IAVF_MCLBYTES,
2331 1, IAVF_MCLBYTES, 0,
2332 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &maps[i].rxm_map);
2333 if (error)
2334 goto destroy_maps;
2335 }
2336
2337 snprintf(rxr->rxr_name, sizeof(rxr->rxr_name), "%s-rx%d",
2338 device_xname(sc->sc_dev), qid);
2339
2340 iavf_evcnt_attach(&rxr->rxr_mgethdr_failed,
2341 rxr->rxr_name, "MGETHDR failed");
2342 iavf_evcnt_attach(&rxr->rxr_mgetcl_failed,
2343 rxr->rxr_name, "MCLGET failed");
2344 iavf_evcnt_attach(&rxr->rxr_mbuf_load_failed,
2345 rxr->rxr_name, "bus_dmamap_load_mbuf failed");
2346 iavf_evcnt_attach(&rxr->rxr_defer,
2347 rxr->rxr_name, "Handled queue in softint/workqueue");
2348
2349 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR, NULL,
2350 rxr->rxr_name, "Interrupt on queue");
2351
2352 rxr->rxr_qid = qid;
2353 rxr->rxr_sc = sc;
2354 rxr->rxr_cons = rxr->rxr_prod = 0;
2355 rxr->rxr_m_head = NULL;
2356 rxr->rxr_m_tail = &rxr->rxr_m_head;
2357 rxr->rxr_maps = maps;
2358 rxr->rxr_tail = I40E_QRX_TAIL1(qid);
2359 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
2360
2361 return rxr;
2362
2363 destroy_maps:
2364 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2365 if (maps[i].rxm_map == NULL)
2366 continue;
2367 bus_dmamap_destroy(sc->sc_dmat, maps[i].rxm_map);
2368 }
2369 iavf_dmamem_free(sc->sc_dmat, &rxr->rxr_mem);
2370 free_maps:
2371 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2372 free_rxr:
2373 kmem_free(rxr, sizeof(*rxr));
2374
2375 return NULL;
2376 }
2377
2378 static void
2379 iavf_rxr_free(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
2380 {
2381 struct iavf_rx_map *maps;
2382 unsigned int i;
2383
2384 maps = rxr->rxr_maps;
2385 if (maps != NULL) {
2386 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2387 if (maps[i].rxm_map == NULL)
2388 continue;
2389 bus_dmamap_destroy(sc->sc_dmat, maps[i].rxm_map);
2390 }
2391 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2392 rxr->rxr_maps = NULL;
2393 }
2394
2395 evcnt_detach(&rxr->rxr_mgethdr_failed);
2396 evcnt_detach(&rxr->rxr_mgetcl_failed);
2397 evcnt_detach(&rxr->rxr_mbuf_load_failed);
2398 evcnt_detach(&rxr->rxr_defer);
2399 evcnt_detach(&rxr->rxr_intr);
2400
2401 iavf_dmamem_free(sc->sc_dmat, &rxr->rxr_mem);
2402 mutex_destroy(&rxr->rxr_lock);
2403 kmem_free(rxr, sizeof(*rxr));
2404 }
2405
2406 static int
2407 iavf_queue_pairs_alloc(struct iavf_softc *sc)
2408 {
2409 struct iavf_queue_pair *qp;
2410 unsigned int i, num;
2411
2412 num = iavf_calc_queue_pair_size(sc);
2413 if (num <= 0) {
2414 return -1;
2415 }
2416
2417 sc->sc_qps = kmem_zalloc(sizeof(sc->sc_qps[0]) * num, KM_NOSLEEP);
2418 if (sc->sc_qps == NULL) {
2419 return -1;
2420 }
2421
2422 for (i = 0; i < num; i++) {
2423 qp = &sc->sc_qps[i];
2424
2425 qp->qp_rxr = iavf_rxr_alloc(sc, i);
2426 qp->qp_txr = iavf_txr_alloc(sc, i);
2427
2428 if (qp->qp_rxr == NULL || qp->qp_txr == NULL)
2429 goto free;
2430
2431 qp->qp_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,
2432 iavf_handle_queue, qp);
2433 if (qp->qp_si == NULL)
2434 goto free;
2435 }
2436
2437 sc->sc_nqps_alloc = num;
2438 return 0;
2439 free:
2440 for (i = 0; i < num; i++) {
2441 qp = &sc->sc_qps[i];
2442
2443 if (qp->qp_si != NULL)
2444 softint_disestablish(qp->qp_si);
2445 if (qp->qp_rxr != NULL)
2446 iavf_rxr_free(sc, qp->qp_rxr);
2447 if (qp->qp_txr != NULL)
2448 iavf_txr_free(sc, qp->qp_txr);
2449 }
2450
2451 kmem_free(sc->sc_qps, sizeof(sc->sc_qps[0]) * num);
2452 sc->sc_qps = NULL;
2453
2454 return -1;
2455 }
2456
2457 static void
2458 iavf_queue_pairs_free(struct iavf_softc *sc)
2459 {
2460 struct iavf_queue_pair *qp;
2461 unsigned int i;
2462 size_t sz;
2463
2464 if (sc->sc_qps == NULL)
2465 return;
2466
2467 for (i = 0; i < sc->sc_nqps_alloc; i++) {
2468 qp = &sc->sc_qps[i];
2469
2470 if (qp->qp_si != NULL)
2471 softint_disestablish(qp->qp_si);
2472 if (qp->qp_rxr != NULL)
2473 iavf_rxr_free(sc, qp->qp_rxr);
2474 if (qp->qp_txr != NULL)
2475 iavf_txr_free(sc, qp->qp_txr);
2476 }
2477
2478 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqps_alloc;
2479 kmem_free(sc->sc_qps, sz);
2480 sc->sc_qps = NULL;
2481 sc->sc_nqps_alloc = 0;
2482 }
2483
2484 static int
2485 iavf_rxfill(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
2486 {
2487 struct ixl_rx_rd_desc_32 *ring, *rxd;
2488 struct iavf_rx_map *rxm;
2489 bus_dmamap_t map;
2490 struct mbuf *m;
2491 unsigned int slots, prod, mask;
2492 int error, post;
2493
2494 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons,
2495 sc->sc_rx_ring_ndescs);
2496
2497 if (slots == 0)
2498 return 0;
2499
2500 error = 0;
2501 prod = rxr->rxr_prod;
2502
2503 ring = IXL_DMA_KVA(&rxr->rxr_mem);
2504 mask = sc->sc_rx_ring_ndescs - 1;
2505
2506 do {
2507 rxm = &rxr->rxr_maps[prod];
2508
2509 MGETHDR(m, M_DONTWAIT, MT_DATA);
2510 if (m == NULL) {
2511 rxr->rxr_mgethdr_failed.ev_count++;
2512 error = -1;
2513 break;
2514 }
2515
2516 MCLGET(m, M_DONTWAIT);
2517 if (!ISSET(m->m_flags, M_EXT)) {
2518 rxr->rxr_mgetcl_failed.ev_count++;
2519 error = -1;
2520 m_freem(m);
2521 break;
2522 }
2523
2524 m->m_len = m->m_pkthdr.len = MCLBYTES;
2525 m_adj(m, ETHER_ALIGN);
2526
2527 map = rxm->rxm_map;
2528
2529 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
2530 BUS_DMA_READ|BUS_DMA_NOWAIT) != 0) {
2531 rxr->rxr_mbuf_load_failed.ev_count++;
2532 error = -1;
2533 m_freem(m);
2534 break;
2535 }
2536
2537 rxm->rxm_m = m;
2538
2539 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2540 BUS_DMASYNC_PREREAD);
2541
2542 rxd = &ring[prod];
2543 rxd->paddr = htole64(map->dm_segs[0].ds_addr);
2544 rxd->haddr = htole64(0);
2545
2546 prod++;
2547 prod &= mask;
2548 post = 1;
2549 } while (--slots);
2550
2551 if (post) {
2552 rxr->rxr_prod = prod;
2553 iavf_wr(sc, rxr->rxr_tail, prod);
2554 }
2555
2556 return error;
2557 }
2558
2559 static inline void
2560 iavf_rx_csum(struct mbuf *m, uint64_t qword)
2561 {
2562 int flags_mask;
2563
2564 if (!ISSET(qword, IXL_RX_DESC_L3L4P)) {
2565 /* No L3 or L4 checksum was calculated */
2566 return;
2567 }
2568
2569 switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) {
2570 case IXL_RX_DESC_PTYPE_IPV4FRAG:
2571 case IXL_RX_DESC_PTYPE_IPV4:
2572 case IXL_RX_DESC_PTYPE_SCTPV4:
2573 case IXL_RX_DESC_PTYPE_ICMPV4:
2574 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
2575 break;
2576 case IXL_RX_DESC_PTYPE_TCPV4:
2577 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
2578 flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD;
2579 break;
2580 case IXL_RX_DESC_PTYPE_UDPV4:
2581 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
2582 flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD;
2583 break;
2584 case IXL_RX_DESC_PTYPE_TCPV6:
2585 flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD;
2586 break;
2587 case IXL_RX_DESC_PTYPE_UDPV6:
2588 flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD;
2589 break;
2590 default:
2591 flags_mask = 0;
2592 }
2593
2594 m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 |
2595 M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6));
2596
2597 if (ISSET(qword, IXL_RX_DESC_IPE)) {
2598 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD);
2599 }
2600
2601 if (ISSET(qword, IXL_RX_DESC_L4E)) {
2602 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD);
2603 }
2604 }
2605
2606 static int
2607 iavf_rxeof(struct iavf_softc *sc, struct iavf_rx_ring *rxr, u_int rxlimit,
2608 struct evcnt *ecnt)
2609 {
2610 struct ifnet *ifp = &sc->sc_ec.ec_if;
2611 struct ixl_rx_wb_desc_32 *ring, *rxd;
2612 struct iavf_rx_map *rxm;
2613 bus_dmamap_t map;
2614 unsigned int cons, prod;
2615 struct mbuf *m;
2616 uint64_t word, word0;
2617 unsigned int len;
2618 unsigned int mask;
2619 int done = 0, more = 0;
2620
2621 KASSERT(mutex_owned(&rxr->rxr_lock));
2622
2623 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2624 return 0;
2625
2626 prod = rxr->rxr_prod;
2627 cons = rxr->rxr_cons;
2628
2629 if (cons == prod)
2630 return 0;
2631
2632 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2633 0, IXL_DMA_LEN(&rxr->rxr_mem),
2634 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2635
2636 ring = IXL_DMA_KVA(&rxr->rxr_mem);
2637 mask = sc->sc_rx_ring_ndescs - 1;
2638
2639 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2640
2641 do {
2642 if (rxlimit-- <= 0) {
2643 more = 1;
2644 break;
2645 }
2646
2647 rxd = &ring[cons];
2648
2649 word = le64toh(rxd->qword1);
2650
2651 if (!ISSET(word, IXL_RX_DESC_DD))
2652 break;
2653
2654 rxm = &rxr->rxr_maps[cons];
2655
2656 map = rxm->rxm_map;
2657 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2658 BUS_DMASYNC_POSTREAD);
2659 bus_dmamap_unload(sc->sc_dmat, map);
2660
2661 m = rxm->rxm_m;
2662 rxm->rxm_m = NULL;
2663
2664 KASSERT(m != NULL);
2665
2666 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
2667 m->m_len = len;
2668 m->m_pkthdr.len = 0;
2669
2670 m->m_next = NULL;
2671 *rxr->rxr_m_tail = m;
2672 rxr->rxr_m_tail = &m->m_next;
2673
2674 m = rxr->rxr_m_head;
2675 m->m_pkthdr.len += len;
2676
2677 if (ISSET(word, IXL_RX_DESC_EOP)) {
2678 word0 = le64toh(rxd->qword0);
2679
2680 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
2681 vlan_set_tag(m,
2682 __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK));
2683 }
2684
2685 if ((ifp->if_capenable & IAVF_IFCAP_RXCSUM) != 0)
2686 iavf_rx_csum(m, word);
2687
2688 if (!ISSET(word,
2689 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
2690 m_set_rcvif(m, ifp);
2691 if_statinc_ref(nsr, if_ipackets);
2692 if_statadd_ref(nsr, if_ibytes,
2693 m->m_pkthdr.len);
2694 if_percpuq_enqueue(sc->sc_ipq, m);
2695 } else {
2696 if_statinc_ref(nsr, if_ierrors);
2697 m_freem(m);
2698 }
2699
2700 rxr->rxr_m_head = NULL;
2701 rxr->rxr_m_tail = &rxr->rxr_m_head;
2702 }
2703
2704 cons++;
2705 cons &= mask;
2706
2707 done = 1;
2708 } while (cons != prod);
2709
2710 if (done) {
2711 ecnt->ev_count++;
2712 rxr->rxr_cons = cons;
2713 if (iavf_rxfill(sc, rxr) == -1)
2714 if_statinc_ref(nsr, if_iqdrops);
2715 }
2716
2717 IF_STAT_PUTREF(ifp);
2718
2719 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2720 0, IXL_DMA_LEN(&rxr->rxr_mem),
2721 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2722
2723 return more;
2724 }
2725
2726 static void
2727 iavf_rxr_clean(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
2728 {
2729 struct iavf_rx_map *maps, *rxm;
2730 bus_dmamap_t map;
2731 unsigned int i;
2732
2733 KASSERT(mutex_owned(&rxr->rxr_lock));
2734
2735 maps = rxr->rxr_maps;
2736 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2737 rxm = &maps[i];
2738
2739 if (rxm->rxm_m == NULL)
2740 continue;
2741
2742 map = rxm->rxm_map;
2743 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2744 BUS_DMASYNC_POSTWRITE);
2745 bus_dmamap_unload(sc->sc_dmat, map);
2746
2747 m_freem(rxm->rxm_m);
2748 rxm->rxm_m = NULL;
2749 }
2750
2751 m_freem(rxr->rxr_m_head);
2752 rxr->rxr_m_head = NULL;
2753 rxr->rxr_m_tail = &rxr->rxr_m_head;
2754
2755 memset(IXL_DMA_KVA(&rxr->rxr_mem), 0, IXL_DMA_LEN(&rxr->rxr_mem));
2756 rxr->rxr_prod = rxr->rxr_cons = 0;
2757 }
2758
2759 static int
2760 iavf_txeof(struct iavf_softc *sc, struct iavf_tx_ring *txr, u_int txlimit,
2761 struct evcnt *ecnt)
2762 {
2763 struct ifnet *ifp = &sc->sc_ec.ec_if;
2764 struct ixl_tx_desc *ring, *txd;
2765 struct iavf_tx_map *txm;
2766 struct mbuf *m;
2767 bus_dmamap_t map;
2768 unsigned int cons, prod, last;
2769 unsigned int mask;
2770 uint64_t dtype;
2771 int done = 0, more = 0;
2772
2773 KASSERT(mutex_owned(&txr->txr_lock));
2774
2775 prod = txr->txr_prod;
2776 cons = txr->txr_cons;
2777
2778 if (cons == prod)
2779 return 0;
2780
2781 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2782 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2783
2784 ring = IXL_DMA_KVA(&txr->txr_mem);
2785 mask = sc->sc_tx_ring_ndescs - 1;
2786
2787 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2788
2789 do {
2790 if (txlimit-- <= 0) {
2791 more = 1;
2792 break;
2793 }
2794
2795 txm = &txr->txr_maps[cons];
2796 last = txm->txm_eop;
2797 txd = &ring[last];
2798
2799 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2800 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2801 break;
2802
2803 map = txm->txm_map;
2804
2805 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2806 BUS_DMASYNC_POSTWRITE);
2807 bus_dmamap_unload(sc->sc_dmat, map);
2808
2809 m = txm->txm_m;
2810 if (m != NULL) {
2811 if_statinc_ref(nsr, if_opackets);
2812 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
2813 if (ISSET(m->m_flags, M_MCAST))
2814 if_statinc_ref(nsr, if_omcasts);
2815 m_freem(m);
2816 }
2817
2818 txm->txm_m = NULL;
2819 txm->txm_eop = -1;
2820
2821 cons = last + 1;
2822 cons &= mask;
2823 done = 1;
2824 } while (cons != prod);
2825
2826 IF_STAT_PUTREF(ifp);
2827
2828 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2829 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2830
2831 txr->txr_cons = cons;
2832
2833 if (done) {
2834 ecnt->ev_count++;
2835 softint_schedule(txr->txr_si);
2836 if (txr->txr_qid == 0) {
2837 CLR(ifp->if_flags, IFF_OACTIVE);
2838 if_schedule_deferred_start(ifp);
2839 }
2840 }
2841
2842 if (txr->txr_cons == txr->txr_prod) {
2843 txr->txr_watchdog = IAVF_WATCHDOG_STOP;
2844 }
2845
2846 return more;
2847 }
2848
2849 static inline int
2850 iavf_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0,
2851 struct iavf_tx_ring *txr)
2852 {
2853 struct mbuf *m;
2854 int error;
2855
2856 KASSERT(mutex_owned(&txr->txr_lock));
2857
2858 m = *m0;
2859
2860 error = bus_dmamap_load_mbuf(dmat, map, m,
2861 BUS_DMA_STREAMING|BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2862 if (error != EFBIG)
2863 return error;
2864
2865 m = m_defrag(m, M_DONTWAIT);
2866 if (m != NULL) {
2867 *m0 = m;
2868 txr->txr_defragged.ev_count++;
2869 error = bus_dmamap_load_mbuf(dmat, map, m,
2870 BUS_DMA_STREAMING|BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2871 } else {
2872 txr->txr_defrag_failed.ev_count++;
2873 error = ENOBUFS;
2874 }
2875
2876 return error;
2877 }
2878
2879 static inline int
2880 iavf_tx_setup_offloads(struct mbuf *m, uint64_t *cmd_txd)
2881 {
2882 struct ether_header *eh;
2883 size_t len;
2884 uint64_t cmd;
2885
2886 cmd = 0;
2887
2888 eh = mtod(m, struct ether_header *);
2889 switch (htons(eh->ether_type)) {
2890 case ETHERTYPE_IP:
2891 case ETHERTYPE_IPV6:
2892 len = ETHER_HDR_LEN;
2893 break;
2894 case ETHERTYPE_VLAN:
2895 len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2896 break;
2897 default:
2898 len = 0;
2899 }
2900 cmd |= ((len >> 1) << IXL_TX_DESC_MACLEN_SHIFT);
2901
2902 if (m->m_pkthdr.csum_flags &
2903 (M_CSUM_TSOv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
2904 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4;
2905 }
2906 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2907 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4_CSUM;
2908 }
2909
2910 if (m->m_pkthdr.csum_flags &
2911 (M_CSUM_TSOv6 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
2912 cmd |= IXL_TX_DESC_CMD_IIPT_IPV6;
2913 }
2914
2915 switch (cmd & IXL_TX_DESC_CMD_IIPT_MASK) {
2916 case IXL_TX_DESC_CMD_IIPT_IPV4:
2917 case IXL_TX_DESC_CMD_IIPT_IPV4_CSUM:
2918 len = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2919 break;
2920 case IXL_TX_DESC_CMD_IIPT_IPV6:
2921 len = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
2922 break;
2923 default:
2924 len = 0;
2925 }
2926 cmd |= ((len >> 2) << IXL_TX_DESC_IPLEN_SHIFT);
2927
2928 if (m->m_pkthdr.csum_flags &
2929 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_TCPv4 | M_CSUM_TCPv6)) {
2930 len = sizeof(struct tcphdr);
2931 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_TCP;
2932 } else if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) {
2933 len = sizeof(struct udphdr);
2934 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_UDP;
2935 } else {
2936 len = 0;
2937 }
2938 cmd |= ((len >> 2) << IXL_TX_DESC_L4LEN_SHIFT);
2939
2940 *cmd_txd |= cmd;
2941 return 0;
2942 }
2943
2944 static void
2945 iavf_tx_common_locked(struct ifnet *ifp, struct iavf_tx_ring *txr,
2946 bool is_transmit)
2947 {
2948 struct iavf_softc *sc;
2949 struct ixl_tx_desc *ring, *txd;
2950 struct iavf_tx_map *txm;
2951 bus_dmamap_t map;
2952 struct mbuf *m;
2953 unsigned int prod, free, last, i;
2954 unsigned int mask;
2955 uint64_t cmd, cmd_txd;
2956 int post = 0;
2957
2958 KASSERT(mutex_owned(&txr->txr_lock));
2959
2960 sc = ifp->if_softc;
2961
2962 if (!ISSET(ifp->if_flags, IFF_RUNNING)
2963 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) {
2964 if (!is_transmit)
2965 IFQ_PURGE(&ifp->if_snd);
2966 return;
2967 }
2968
2969 prod = txr->txr_prod;
2970 free = txr->txr_cons;
2971
2972 if (free <= prod)
2973 free += sc->sc_tx_ring_ndescs;
2974 free -= prod;
2975
2976 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2977 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2978
2979 ring = IXL_DMA_KVA(&txr->txr_mem);
2980 mask = sc->sc_tx_ring_ndescs - 1;
2981 last = prod;
2982 cmd = 0;
2983 txd = NULL;
2984
2985 for (;;) {
2986 if (free < IAVF_TX_PKT_DESCS) {
2987 if (!is_transmit)
2988 SET(ifp->if_flags, IFF_OACTIVE);
2989 break;
2990 }
2991
2992 if (is_transmit)
2993 m = pcq_get(txr->txr_intrq);
2994 else
2995 IFQ_DEQUEUE(&ifp->if_snd, m);
2996
2997 if (m == NULL)
2998 break;
2999
3000 txm = &txr->txr_maps[prod];
3001 map = txm->txm_map;
3002
3003 if (iavf_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
3004 if_statinc(ifp, if_oerrors);
3005 m_freem(m);
3006 continue;
3007 }
3008
3009 cmd_txd = 0;
3010 if (m->m_pkthdr.csum_flags & IAVF_CSUM_ALL_OFFLOAD) {
3011 iavf_tx_setup_offloads(m, &cmd_txd);
3012 }
3013 if (vlan_has_tag(m)) {
3014 cmd_txd |= IXL_TX_DESC_CMD_IL2TAG1 |
3015 ((uint64_t)vlan_get_tag(m)
3016 << IXL_TX_DESC_L2TAG1_SHIFT);
3017 }
3018
3019 bus_dmamap_sync(sc->sc_dmat, map, 0,
3020 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
3021
3022 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) {
3023 txd = &ring[prod];
3024
3025 cmd = (uint64_t)map->dm_segs[i].ds_len <<
3026 IXL_TX_DESC_BSIZE_SHIFT;
3027 cmd |= IXL_TX_DESC_DTYPE_DATA|IXL_TX_DESC_CMD_ICRC|
3028 cmd_txd;
3029
3030 txd->addr = htole64(map->dm_segs[i].ds_addr);
3031 txd->cmd = htole64(cmd);
3032
3033 last = prod;
3034 prod++;
3035 prod &= mask;
3036 }
3037
3038 cmd |= IXL_TX_DESC_CMD_EOP|IXL_TX_DESC_CMD_RS;
3039 txd->cmd = htole64(cmd);
3040 txm->txm_m = m;
3041 txm->txm_eop = last;
3042
3043 bpf_mtap(ifp, m, BPF_D_OUT);
3044 free -= i;
3045 post = 1;
3046 }
3047
3048 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
3049 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
3050
3051 if (post) {
3052 txr->txr_prod = prod;
3053 iavf_wr(sc, txr->txr_tail, prod);
3054 txr->txr_watchdog = IAVF_WATCHDOG_TICKS;
3055 }
3056 }
3057
3058 static inline int
3059 iavf_handle_queue_common(struct iavf_softc *sc, struct iavf_queue_pair *qp,
3060 u_int txlimit, struct evcnt *txevcnt,
3061 u_int rxlimit, struct evcnt *rxevcnt)
3062 {
3063 struct iavf_tx_ring *txr;
3064 struct iavf_rx_ring *rxr;
3065 int txmore, rxmore;
3066 int rv;
3067
3068 txr = qp->qp_txr;
3069 rxr = qp->qp_rxr;
3070
3071 mutex_enter(&txr->txr_lock);
3072 txmore = iavf_txeof(sc, txr, txlimit, txevcnt);
3073 mutex_exit(&txr->txr_lock);
3074
3075 mutex_enter(&rxr->rxr_lock);
3076 rxmore = iavf_rxeof(sc, rxr, rxlimit, rxevcnt);
3077 mutex_exit(&rxr->rxr_lock);
3078
3079 rv = txmore | (rxmore << 1);
3080
3081 return rv;
3082 }
3083
3084 static void
3085 iavf_sched_handle_queue(struct iavf_softc *sc, struct iavf_queue_pair *qp)
3086 {
3087
3088 if (qp->qp_workqueue)
3089 workqueue_enqueue(sc->sc_workq_txrx, &qp->qp_work, NULL);
3090 else
3091 softint_schedule(qp->qp_si);
3092 }
3093
3094 static void
3095 iavf_start(struct ifnet *ifp)
3096 {
3097 struct iavf_softc *sc;
3098 struct iavf_tx_ring *txr;
3099
3100 sc = ifp->if_softc;
3101 txr = sc->sc_qps[0].qp_txr;
3102
3103 mutex_enter(&txr->txr_lock);
3104 iavf_tx_common_locked(ifp, txr, false);
3105 mutex_exit(&txr->txr_lock);
3106
3107 }
3108
3109 static inline unsigned int
3110 iavf_select_txqueue(struct iavf_softc *sc, struct mbuf *m)
3111 {
3112 u_int cpuid;
3113
3114 cpuid = cpu_index(curcpu());
3115
3116 return (unsigned int)(cpuid % sc->sc_nqueue_pairs);
3117 }
3118
3119 static int
3120 iavf_transmit(struct ifnet *ifp, struct mbuf *m)
3121 {
3122 struct iavf_softc *sc;
3123 struct iavf_tx_ring *txr;
3124 unsigned int qid;
3125
3126 sc = ifp->if_softc;
3127 qid = iavf_select_txqueue(sc, m);
3128
3129 txr = sc->sc_qps[qid].qp_txr;
3130
3131 if (__predict_false(!pcq_put(txr->txr_intrq, m))) {
3132 mutex_enter(&txr->txr_lock);
3133 txr->txr_pcqdrop.ev_count++;
3134 mutex_exit(&txr->txr_lock);
3135
3136 m_freem(m);
3137 return ENOBUFS;
3138 }
3139
3140 if (mutex_tryenter(&txr->txr_lock)) {
3141 iavf_tx_common_locked(ifp, txr, true);
3142 mutex_exit(&txr->txr_lock);
3143 } else {
3144 kpreempt_disable();
3145 softint_schedule(txr->txr_si);
3146 kpreempt_enable();
3147 }
3148 return 0;
3149 }
3150
3151 static void
3152 iavf_deferred_transmit(void *xtxr)
3153 {
3154 struct iavf_tx_ring *txr;
3155 struct iavf_softc *sc;
3156 struct ifnet *ifp;
3157
3158 txr = xtxr;
3159 sc = txr->txr_sc;
3160 ifp = &sc->sc_ec.ec_if;
3161
3162 mutex_enter(&txr->txr_lock);
3163 txr->txr_transmitdef.ev_count++;
3164 if (pcq_peek(txr->txr_intrq) != NULL)
3165 iavf_tx_common_locked(ifp, txr, true);
3166 mutex_exit(&txr->txr_lock);
3167 }
3168
3169 static void
3170 iavf_txr_clean(struct iavf_softc *sc, struct iavf_tx_ring *txr)
3171 {
3172 struct iavf_tx_map *maps, *txm;
3173 bus_dmamap_t map;
3174 unsigned int i;
3175
3176 KASSERT(mutex_owned(&txr->txr_lock));
3177
3178 maps = txr->txr_maps;
3179 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
3180 txm = &maps[i];
3181
3182 if (txm->txm_m == NULL)
3183 continue;
3184
3185 map = txm->txm_map;
3186 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3187 BUS_DMASYNC_POSTWRITE);
3188 bus_dmamap_unload(sc->sc_dmat, map);
3189
3190 m_freem(txm->txm_m);
3191 txm->txm_m = NULL;
3192 }
3193
3194 memset(IXL_DMA_KVA(&txr->txr_mem), 0, IXL_DMA_LEN(&txr->txr_mem));
3195 txr->txr_prod = txr->txr_cons = 0;
3196 }
3197
3198 static int
3199 iavf_intr(void *xsc)
3200 {
3201 struct iavf_softc *sc = xsc;
3202 struct ifnet *ifp = &sc->sc_ec.ec_if;
3203 struct iavf_rx_ring *rxr;
3204 struct iavf_tx_ring *txr;
3205 uint32_t icr;
3206 unsigned int i;
3207
3208 /* read I40E_VFINT_ICR_ENA1 to clear status */
3209 (void)iavf_rd(sc, I40E_VFINT_ICR0_ENA1);
3210
3211 iavf_intr_enable(sc);
3212 icr = iavf_rd(sc, I40E_VFINT_ICR01);
3213
3214 if (icr == IAVF_REG_VFR) {
3215 log(LOG_INFO, "%s: VF reset in progress\n",
3216 ifp->if_xname);
3217 iavf_work_set(&sc->sc_reset_task, iavf_reset_start, sc);
3218 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
3219 return 1;
3220 }
3221
3222 if (ISSET(icr, I40E_VFINT_ICR01_ADMINQ_MASK)) {
3223 mutex_enter(&sc->sc_adminq_lock);
3224 iavf_atq_done(sc);
3225 iavf_arq(sc);
3226 mutex_exit(&sc->sc_adminq_lock);
3227 }
3228
3229 if (ISSET(icr, I40E_VFINT_ICR01_QUEUE_0_MASK)) {
3230 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3231 rxr = sc->sc_qps[i].qp_rxr;
3232 txr = sc->sc_qps[i].qp_txr;
3233
3234 mutex_enter(&rxr->rxr_lock);
3235 while (iavf_rxeof(sc, rxr, UINT_MAX,
3236 &rxr->rxr_intr) != 0) {
3237 /* do nothing */
3238 }
3239 mutex_exit(&rxr->rxr_lock);
3240
3241 mutex_enter(&txr->txr_lock);
3242 while (iavf_txeof(sc, txr, UINT_MAX,
3243 &txr->txr_intr) != 0) {
3244 /* do nothing */
3245 }
3246 mutex_exit(&txr->txr_lock);
3247 }
3248 }
3249
3250 return 0;
3251 }
3252
3253 static int
3254 iavf_queue_intr(void *xqp)
3255 {
3256 struct iavf_queue_pair *qp = xqp;
3257 struct iavf_tx_ring *txr;
3258 struct iavf_rx_ring *rxr;
3259 struct iavf_softc *sc;
3260 unsigned int qid;
3261 u_int txlimit, rxlimit;
3262 int more;
3263
3264 txr = qp->qp_txr;
3265 rxr = qp->qp_rxr;
3266 sc = txr->txr_sc;
3267 qid = txr->txr_qid;
3268
3269 txlimit = sc->sc_tx_intr_process_limit;
3270 rxlimit = sc->sc_rx_intr_process_limit;
3271 qp->qp_workqueue = sc->sc_txrx_workqueue;
3272
3273 more = iavf_handle_queue_common(sc, qp,
3274 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr);
3275
3276 if (more != 0) {
3277 iavf_sched_handle_queue(sc, qp);
3278 } else {
3279 /* for ALTQ */
3280 if (txr->txr_qid == 0)
3281 if_schedule_deferred_start(&sc->sc_ec.ec_if);
3282 softint_schedule(txr->txr_si);
3283
3284 iavf_queue_intr_enable(sc, qid);
3285 }
3286
3287 return 0;
3288 }
3289
3290 static void
3291 iavf_handle_queue_wk(struct work *wk, void *xsc __unused)
3292 {
3293 struct iavf_queue_pair *qp;
3294
3295 qp = container_of(wk, struct iavf_queue_pair, qp_work);
3296 iavf_handle_queue(qp);
3297 }
3298
3299 static void
3300 iavf_handle_queue(void *xqp)
3301 {
3302 struct iavf_queue_pair *qp = xqp;
3303 struct iavf_tx_ring *txr;
3304 struct iavf_rx_ring *rxr;
3305 struct iavf_softc *sc;
3306 unsigned int qid;
3307 u_int txlimit, rxlimit;
3308 int more;
3309
3310 txr = qp->qp_txr;
3311 rxr = qp->qp_rxr;
3312 sc = txr->txr_sc;
3313 qid = txr->txr_qid;
3314
3315 txlimit = sc->sc_tx_process_limit;
3316 rxlimit = sc->sc_rx_process_limit;
3317
3318 more = iavf_handle_queue_common(sc, qp,
3319 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer);
3320
3321 if (more != 0)
3322 iavf_sched_handle_queue(sc, qp);
3323 else
3324 iavf_queue_intr_enable(sc, qid);
3325 }
3326
3327 static void
3328 iavf_tick(void *xsc)
3329 {
3330 struct iavf_softc *sc;
3331 unsigned int i;
3332 int timedout;
3333
3334 sc = xsc;
3335 timedout = 0;
3336
3337 mutex_enter(&sc->sc_cfg_lock);
3338
3339 if (sc->sc_resetting) {
3340 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
3341 mutex_exit(&sc->sc_cfg_lock);
3342 return;
3343 }
3344
3345 iavf_get_stats(sc);
3346
3347 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3348 timedout |= iavf_watchdog(sc->sc_qps[i].qp_txr);
3349 }
3350
3351 if (timedout != 0) {
3352 iavf_work_add(sc->sc_workq, &sc->sc_wdto_task);
3353 } else {
3354 callout_schedule(&sc->sc_tick, IAVF_TICK_INTERVAL);
3355 }
3356
3357 mutex_exit(&sc->sc_cfg_lock);
3358 }
3359
3360 static void
3361 iavf_tick_halt(void *unused __unused)
3362 {
3363
3364 /* do nothing */
3365 }
3366
3367 static void
3368 iavf_reset_request(void *xsc)
3369 {
3370 struct iavf_softc *sc = xsc;
3371
3372 iavf_reset_vf(sc);
3373 iavf_reset_start(sc);
3374 }
3375
3376 static void
3377 iavf_reset_start(void *xsc)
3378 {
3379 struct iavf_softc *sc = xsc;
3380 struct ifnet *ifp = &sc->sc_ec.ec_if;
3381
3382 mutex_enter(&sc->sc_cfg_lock);
3383
3384 if (sc->sc_resetting)
3385 goto do_reset;
3386
3387 sc->sc_resetting = true;
3388 if_link_state_change(ifp, LINK_STATE_DOWN);
3389
3390 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
3391 iavf_stop_locked(sc);
3392 sc->sc_reset_up = true;
3393 }
3394
3395 memcpy(sc->sc_enaddr_reset, sc->sc_enaddr, ETHER_ADDR_LEN);
3396
3397 do_reset:
3398 iavf_work_set(&sc->sc_reset_task, iavf_reset, sc);
3399
3400 mutex_exit(&sc->sc_cfg_lock);
3401
3402 iavf_reset((void *)sc);
3403 }
3404
3405 static void
3406 iavf_reset(void *xsc)
3407 {
3408 struct iavf_softc *sc = xsc;
3409 struct ifnet *ifp = &sc->sc_ec.ec_if;
3410 struct ixl_aq_buf *aqb;
3411 bool realloc_qps, realloc_intrs;
3412
3413 mutex_enter(&sc->sc_cfg_lock);
3414
3415 mutex_enter(&sc->sc_adminq_lock);
3416 iavf_cleanup_admin_queue(sc);
3417 mutex_exit(&sc->sc_adminq_lock);
3418
3419 sc->sc_major_ver = UINT_MAX;
3420 sc->sc_minor_ver = UINT_MAX;
3421 sc->sc_got_vf_resources = 0;
3422 sc->sc_got_irq_map = 0;
3423
3424 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
3425 if (aqb == NULL)
3426 goto failed;
3427
3428 if (iavf_wait_active(sc) != 0) {
3429 log(LOG_WARNING, "%s: VF reset timed out\n",
3430 ifp->if_xname);
3431 goto failed;
3432 }
3433
3434 if (!iavf_arq_fill(sc)) {
3435 log(LOG_ERR, "%s: unable to fill arq descriptors\n",
3436 ifp->if_xname);
3437 goto failed;
3438 }
3439
3440 if (iavf_init_admin_queue(sc) != 0) {
3441 log(LOG_ERR, "%s: unable to initialize admin queue\n",
3442 ifp->if_xname);
3443 goto failed;
3444 }
3445
3446 if (iavf_get_version(sc, aqb) != 0) {
3447 log(LOG_ERR, "%s: unable to get VF interface version\n",
3448 ifp->if_xname);
3449 goto failed;
3450 }
3451
3452 if (iavf_get_vf_resources(sc, aqb) != 0) {
3453 log(LOG_ERR, "%s: timed out waiting for VF resources\n",
3454 ifp->if_xname);
3455 goto failed;
3456 }
3457
3458 if (sc->sc_nqps_alloc < iavf_calc_queue_pair_size(sc)) {
3459 realloc_qps = true;
3460 } else {
3461 realloc_qps = false;
3462 }
3463
3464 if (sc->sc_nintrs < iavf_calc_msix_count(sc)) {
3465 realloc_intrs = true;
3466 } else {
3467 realloc_intrs = false;
3468 }
3469
3470 if (realloc_qps || realloc_intrs)
3471 iavf_teardown_interrupts(sc);
3472
3473 if (realloc_qps) {
3474 iavf_queue_pairs_free(sc);
3475 if (iavf_queue_pairs_alloc(sc) != 0) {
3476 log(LOG_ERR, "%s: failed to allocate queue pairs\n",
3477 ifp->if_xname);
3478 goto failed;
3479 }
3480 }
3481
3482 if (realloc_qps || realloc_intrs) {
3483 if (iavf_setup_interrupts(sc) != 0) {
3484 sc->sc_nintrs = 0;
3485 log(LOG_ERR, "%s: failed to allocate interrupts\n",
3486 ifp->if_xname);
3487 goto failed;
3488 }
3489 log(LOG_INFO, "%s: reallocated queues\n", ifp->if_xname);
3490 }
3491
3492 if (iavf_config_irq_map(sc, aqb) != 0) {
3493 log(LOG_ERR, "%s: timed out configuring IRQ map\n",
3494 ifp->if_xname);
3495 goto failed;
3496 }
3497
3498 mutex_enter(&sc->sc_adminq_lock);
3499 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
3500 mutex_exit(&sc->sc_adminq_lock);
3501
3502 iavf_reset_finish(sc);
3503
3504 mutex_exit(&sc->sc_cfg_lock);
3505 return;
3506
3507 failed:
3508 mutex_enter(&sc->sc_adminq_lock);
3509 iavf_cleanup_admin_queue(sc);
3510 if (aqb != NULL) {
3511 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
3512 }
3513 mutex_exit(&sc->sc_adminq_lock);
3514 callout_schedule(&sc->sc_tick, IAVF_TICK_INTERVAL);
3515 mutex_exit(&sc->sc_cfg_lock);
3516 }
3517
3518 static void
3519 iavf_reset_finish(struct iavf_softc *sc)
3520 {
3521 struct ethercom *ec = &sc->sc_ec;
3522 struct ether_multi *enm;
3523 struct ether_multistep step;
3524 struct ifnet *ifp = &ec->ec_if;
3525 struct vlanid_list *vlanidp;
3526 uint8_t enaddr_prev[ETHER_ADDR_LEN], enaddr_next[ETHER_ADDR_LEN];
3527
3528 KASSERT(mutex_owned(&sc->sc_cfg_lock));
3529
3530 callout_stop(&sc->sc_tick);
3531
3532 iavf_intr_enable(sc);
3533
3534 if (!iavf_is_etheranyaddr(sc->sc_enaddr_added)) {
3535 iavf_eth_addr(sc, sc->sc_enaddr_added, IAVF_VC_OP_ADD_ETH_ADDR);
3536 }
3537
3538 ETHER_LOCK(ec);
3539 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
3540 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
3541 ETHER_NEXT_MULTI(step, enm)) {
3542 iavf_add_multi(sc, enm->enm_addrlo, enm->enm_addrhi);
3543 }
3544 }
3545
3546 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
3547 ETHER_UNLOCK(ec);
3548 iavf_config_vlan_id(sc, vlanidp->vid, IAVF_VC_OP_ADD_VLAN);
3549 ETHER_LOCK(ec);
3550 }
3551 ETHER_UNLOCK(ec);
3552
3553 if (memcmp(sc->sc_enaddr, sc->sc_enaddr_reset, ETHER_ADDR_LEN) != 0) {
3554 memcpy(enaddr_prev, sc->sc_enaddr_reset, sizeof(enaddr_prev));
3555 memcpy(enaddr_next, sc->sc_enaddr, sizeof(enaddr_next));
3556 log(LOG_INFO, "%s: Ethernet address changed to %s\n",
3557 ifp->if_xname, ether_sprintf(enaddr_next));
3558
3559 mutex_exit(&sc->sc_cfg_lock);
3560 IFNET_LOCK(ifp);
3561 kpreempt_disable();
3562 /*XXX we need an API to change ethernet address. */
3563 iavf_replace_lla(ifp, enaddr_prev, enaddr_next);
3564 kpreempt_enable();
3565 IFNET_UNLOCK(ifp);
3566 mutex_enter(&sc->sc_cfg_lock);
3567 }
3568
3569 sc->sc_resetting = false;
3570
3571 if (sc->sc_reset_up) {
3572 iavf_init_locked(sc);
3573 }
3574
3575 if (sc->sc_link_state != LINK_STATE_DOWN) {
3576 if_link_state_change(ifp, sc->sc_link_state);
3577 }
3578
3579 }
3580
3581 static int
3582 iavf_dmamem_alloc(bus_dma_tag_t dmat, struct ixl_dmamem *ixm,
3583 bus_size_t size, bus_size_t align)
3584 {
3585 ixm->ixm_size = size;
3586
3587 if (bus_dmamap_create(dmat, ixm->ixm_size, 1,
3588 ixm->ixm_size, 0,
3589 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
3590 &ixm->ixm_map) != 0)
3591 return 1;
3592 if (bus_dmamem_alloc(dmat, ixm->ixm_size,
3593 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
3594 BUS_DMA_WAITOK) != 0)
3595 goto destroy;
3596 if (bus_dmamem_map(dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
3597 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
3598 goto free;
3599 if (bus_dmamap_load(dmat, ixm->ixm_map, ixm->ixm_kva,
3600 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
3601 goto unmap;
3602
3603 memset(ixm->ixm_kva, 0, ixm->ixm_size);
3604
3605 return 0;
3606 unmap:
3607 bus_dmamem_unmap(dmat, ixm->ixm_kva, ixm->ixm_size);
3608 free:
3609 bus_dmamem_free(dmat, &ixm->ixm_seg, 1);
3610 destroy:
3611 bus_dmamap_destroy(dmat, ixm->ixm_map);
3612 return 1;
3613 }
3614
3615 static void
3616 iavf_dmamem_free(bus_dma_tag_t dmat, struct ixl_dmamem *ixm)
3617 {
3618
3619 bus_dmamap_unload(dmat, ixm->ixm_map);
3620 bus_dmamem_unmap(dmat, ixm->ixm_kva, ixm->ixm_size);
3621 bus_dmamem_free(dmat, &ixm->ixm_seg, 1);
3622 bus_dmamap_destroy(dmat, ixm->ixm_map);
3623 }
3624
3625 static struct ixl_aq_buf *
3626 iavf_aqb_alloc(bus_dma_tag_t dmat, size_t buflen)
3627 {
3628 struct ixl_aq_buf *aqb;
3629
3630 aqb = kmem_alloc(sizeof(*aqb), KM_NOSLEEP);
3631 if (aqb == NULL)
3632 return NULL;
3633
3634 aqb->aqb_size = buflen;
3635
3636 if (bus_dmamap_create(dmat, aqb->aqb_size, 1,
3637 aqb->aqb_size, 0,
3638 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0)
3639 goto free;
3640 if (bus_dmamem_alloc(dmat, aqb->aqb_size,
3641 IAVF_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs,
3642 BUS_DMA_WAITOK) != 0)
3643 goto destroy;
3644 if (bus_dmamem_map(dmat, &aqb->aqb_seg, aqb->aqb_nsegs,
3645 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0)
3646 goto dma_free;
3647 if (bus_dmamap_load(dmat, aqb->aqb_map, aqb->aqb_data,
3648 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0)
3649 goto unmap;
3650
3651 return aqb;
3652 unmap:
3653 bus_dmamem_unmap(dmat, aqb->aqb_data, aqb->aqb_size);
3654 dma_free:
3655 bus_dmamem_free(dmat, &aqb->aqb_seg, 1);
3656 destroy:
3657 bus_dmamap_destroy(dmat, aqb->aqb_map);
3658 free:
3659 kmem_free(aqb, sizeof(*aqb));
3660
3661 return NULL;
3662 }
3663
3664 static void
3665 iavf_aqb_free(bus_dma_tag_t dmat, struct ixl_aq_buf *aqb)
3666 {
3667
3668 bus_dmamap_unload(dmat, aqb->aqb_map);
3669 bus_dmamem_unmap(dmat, aqb->aqb_data, aqb->aqb_size);
3670 bus_dmamem_free(dmat, &aqb->aqb_seg, 1);
3671 bus_dmamap_destroy(dmat, aqb->aqb_map);
3672 kmem_free(aqb, sizeof(*aqb));
3673 }
3674
3675 static struct ixl_aq_buf *
3676 iavf_aqb_get_locked(struct ixl_aq_bufs *q)
3677 {
3678 struct ixl_aq_buf *aqb;
3679
3680 aqb = SIMPLEQ_FIRST(q);
3681 if (aqb != NULL) {
3682 SIMPLEQ_REMOVE(q, aqb, ixl_aq_buf, aqb_entry);
3683 }
3684
3685 return aqb;
3686 }
3687
3688 static struct ixl_aq_buf *
3689 iavf_aqb_get(struct iavf_softc *sc, struct ixl_aq_bufs *q)
3690 {
3691 struct ixl_aq_buf *aqb;
3692
3693 if (q != NULL) {
3694 mutex_enter(&sc->sc_adminq_lock);
3695 aqb = iavf_aqb_get_locked(q);
3696 mutex_exit(&sc->sc_adminq_lock);
3697 } else {
3698 aqb = NULL;
3699 }
3700
3701 if (aqb == NULL) {
3702 aqb = iavf_aqb_alloc(sc->sc_dmat, IAVF_AQ_BUFLEN);
3703 }
3704
3705 return aqb;
3706 }
3707
3708 static void
3709 iavf_aqb_put_locked(struct ixl_aq_bufs *q, struct ixl_aq_buf *aqb)
3710 {
3711
3712 SIMPLEQ_INSERT_TAIL(q, aqb, aqb_entry);
3713 }
3714
3715 static void
3716 iavf_aqb_clean(struct ixl_aq_bufs *q, bus_dma_tag_t dmat)
3717 {
3718 struct ixl_aq_buf *aqb;
3719
3720 while ((aqb = SIMPLEQ_FIRST(q)) != NULL) {
3721 SIMPLEQ_REMOVE(q, aqb, ixl_aq_buf, aqb_entry);
3722 iavf_aqb_free(dmat, aqb);
3723 }
3724 }
3725
3726 static const char *
3727 iavf_aq_vc_opcode_str(const struct ixl_aq_desc *iaq)
3728 {
3729
3730 switch (iavf_aq_vc_get_opcode(iaq)) {
3731 case IAVF_VC_OP_VERSION:
3732 return "GET_VERSION";
3733 case IAVF_VC_OP_RESET_VF:
3734 return "RESET_VF";
3735 case IAVF_VC_OP_GET_VF_RESOURCES:
3736 return "GET_VF_RESOURCES";
3737 case IAVF_VC_OP_CONFIG_TX_QUEUE:
3738 return "CONFIG_TX_QUEUE";
3739 case IAVF_VC_OP_CONFIG_RX_QUEUE:
3740 return "CONFIG_RX_QUEUE";
3741 case IAVF_VC_OP_CONFIG_VSI_QUEUES:
3742 return "CONFIG_VSI_QUEUES";
3743 case IAVF_VC_OP_CONFIG_IRQ_MAP:
3744 return "CONFIG_IRQ_MAP";
3745 case IAVF_VC_OP_ENABLE_QUEUES:
3746 return "ENABLE_QUEUES";
3747 case IAVF_VC_OP_DISABLE_QUEUES:
3748 return "DISABLE_QUEUES";
3749 case IAVF_VC_OP_ADD_ETH_ADDR:
3750 return "ADD_ETH_ADDR";
3751 case IAVF_VC_OP_DEL_ETH_ADDR:
3752 return "DEL_ETH_ADDR";
3753 case IAVF_VC_OP_CONFIG_PROMISC:
3754 return "CONFIG_PROMISC";
3755 case IAVF_VC_OP_GET_STATS:
3756 return "GET_STATS";
3757 case IAVF_VC_OP_EVENT:
3758 return "EVENT";
3759 case IAVF_VC_OP_CONFIG_RSS_KEY:
3760 return "CONFIG_RSS_KEY";
3761 case IAVF_VC_OP_GET_RSS_HENA_CAPS:
3762 return "GET_RS_HENA_CAPS";
3763 case IAVF_VC_OP_SET_RSS_HENA:
3764 return "SET_RSS_HENA";
3765 case IAVF_VC_OP_ENABLE_VLAN_STRIP:
3766 return "ENABLE_VLAN_STRIPPING";
3767 case IAVF_VC_OP_DISABLE_VLAN_STRIP:
3768 return "DISABLE_VLAN_STRIPPING";
3769 case IAVF_VC_OP_REQUEST_QUEUES:
3770 return "REQUEST_QUEUES";
3771 }
3772
3773 return "unknown";
3774 }
3775
3776 static void
3777 iavf_aq_dump(const struct iavf_softc *sc, const struct ixl_aq_desc *iaq,
3778 const char *msg)
3779 {
3780 char buf[512];
3781 size_t len;
3782
3783 len = sizeof(buf);
3784 buf[--len] = '\0';
3785
3786 device_printf(sc->sc_dev, "%s\n", msg);
3787 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags));
3788 device_printf(sc->sc_dev, "flags %s opcode %04x\n",
3789 buf, le16toh(iaq->iaq_opcode));
3790 device_printf(sc->sc_dev, "datalen %u retval %u\n",
3791 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval));
3792 device_printf(sc->sc_dev, "vc-opcode %u (%s)\n",
3793 iavf_aq_vc_get_opcode(iaq),
3794 iavf_aq_vc_opcode_str(iaq));
3795 device_printf(sc->sc_dev, "vc-retval %u\n",
3796 iavf_aq_vc_get_retval(iaq));
3797 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie);
3798 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n",
3799 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]),
3800 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3]));
3801 }
3802
3803 static int
3804 iavf_arq_fill(struct iavf_softc *sc)
3805 {
3806 struct ixl_aq_buf *aqb;
3807 struct ixl_aq_desc *arq, *iaq;
3808 unsigned int prod = sc->sc_arq_prod;
3809 unsigned int n;
3810 int filled;
3811
3812 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
3813 IAVF_AQ_NUM);
3814
3815 if (__predict_false(n <= 0))
3816 return 0;
3817
3818 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3819 0, IXL_DMA_LEN(&sc->sc_arq),
3820 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3821
3822 arq = IXL_DMA_KVA(&sc->sc_arq);
3823
3824 do {
3825 iaq = &arq[prod];
3826
3827 if (ixl_aq_has_dva(iaq)) {
3828 /* already filled */
3829 break;
3830 }
3831
3832 aqb = iavf_aqb_get_locked(&sc->sc_arq_idle);
3833 if (aqb == NULL)
3834 break;
3835
3836 memset(aqb->aqb_data, 0, aqb->aqb_size);
3837
3838 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
3839 aqb->aqb_size, BUS_DMASYNC_PREREAD);
3840
3841 iaq->iaq_flags = htole16(IXL_AQ_BUF |
3842 (aqb->aqb_size > I40E_AQ_LARGE_BUF ?
3843 IXL_AQ_LB : 0));
3844 iaq->iaq_opcode = 0;
3845 iaq->iaq_datalen = htole16(aqb->aqb_size);
3846 iaq->iaq_retval = 0;
3847 iaq->iaq_cookie = 0;
3848 iaq->iaq_param[0] = 0;
3849 iaq->iaq_param[1] = 0;
3850 ixl_aq_dva(iaq, IXL_AQB_DVA(aqb));
3851 iavf_aqb_put_locked(&sc->sc_arq_live, aqb);
3852
3853 prod++;
3854 prod &= IAVF_AQ_MASK;
3855 filled = 1;
3856 } while (--n);
3857
3858 sc->sc_arq_prod = prod;
3859
3860 if (filled) {
3861 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3862 0, IXL_DMA_LEN(&sc->sc_arq),
3863 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3864 iavf_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
3865 }
3866
3867 return filled;
3868 }
3869
3870 static int
3871 iavf_arq_wait(struct iavf_softc *sc, uint32_t opcode)
3872 {
3873 int error;
3874
3875 KASSERT(mutex_owned(&sc->sc_adminq_lock));
3876
3877 while ((error = cv_timedwait(&sc->sc_adminq_cv,
3878 &sc->sc_adminq_lock, mstohz(IAVF_EXEC_TIMEOUT))) == 0) {
3879 if (opcode == sc->sc_arq_opcode)
3880 break;
3881 }
3882
3883 if (error != 0 &&
3884 atomic_load_relaxed(&sc->sc_debuglevel) >= 2)
3885 device_printf(sc->sc_dev, "cv_timedwait error=%d\n", error);
3886
3887 return error;
3888 }
3889
3890 static void
3891 iavf_arq_refill(void *xsc)
3892 {
3893 struct iavf_softc *sc = xsc;
3894 struct ixl_aq_bufs aqbs;
3895 struct ixl_aq_buf *aqb;
3896 unsigned int n, i;
3897
3898 mutex_enter(&sc->sc_adminq_lock);
3899 iavf_arq_fill(sc);
3900 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
3901 IAVF_AQ_NUM);
3902 mutex_exit(&sc->sc_adminq_lock);
3903
3904 if (n == 0)
3905 return;
3906
3907 if (atomic_load_relaxed(&sc->sc_debuglevel) >= 1)
3908 device_printf(sc->sc_dev, "Allocate %d bufs for arq\n", n);
3909
3910 SIMPLEQ_INIT(&aqbs);
3911 for (i = 0; i < n; i++) {
3912 aqb = iavf_aqb_get(sc, NULL);
3913 if (aqb == NULL)
3914 continue;
3915 SIMPLEQ_INSERT_TAIL(&aqbs, aqb, aqb_entry);
3916 }
3917
3918 mutex_enter(&sc->sc_adminq_lock);
3919 while ((aqb = SIMPLEQ_FIRST(&aqbs)) != NULL) {
3920 SIMPLEQ_REMOVE(&aqbs, aqb, ixl_aq_buf, aqb_entry);
3921 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
3922 }
3923 iavf_arq_fill(sc);
3924 mutex_exit(&sc->sc_adminq_lock);
3925 }
3926
3927 static uint32_t
3928 iavf_process_arq(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
3929 struct ixl_aq_buf *aqb)
3930 {
3931 uint32_t vc_retval, vc_opcode;
3932 int dbg;
3933
3934 dbg = atomic_load_relaxed(&sc->sc_debuglevel);
3935 if (dbg >= 3)
3936 iavf_aq_dump(sc, iaq, "arq proc");
3937
3938 if (dbg >= 2) {
3939 vc_retval = iavf_aq_vc_get_retval(iaq);
3940 if (vc_retval != IAVF_VC_RC_SUCCESS) {
3941 device_printf(sc->sc_dev, "%s failed=%d(arq)\n",
3942 iavf_aq_vc_opcode_str(iaq), vc_retval);
3943 }
3944 }
3945
3946 vc_opcode = iavf_aq_vc_get_opcode(iaq);
3947 switch (vc_opcode) {
3948 case IAVF_VC_OP_VERSION:
3949 iavf_process_version(sc, iaq, aqb);
3950 break;
3951 case IAVF_VC_OP_GET_VF_RESOURCES:
3952 iavf_process_vf_resources(sc, iaq, aqb);
3953 break;
3954 case IAVF_VC_OP_CONFIG_IRQ_MAP:
3955 iavf_process_irq_map(sc, iaq);
3956 break;
3957 case IAVF_VC_OP_EVENT:
3958 iavf_process_vc_event(sc, iaq, aqb);
3959 break;
3960 case IAVF_VC_OP_GET_STATS:
3961 iavf_process_stats(sc, iaq, aqb);
3962 break;
3963 case IAVF_VC_OP_REQUEST_QUEUES:
3964 iavf_process_req_queues(sc, iaq, aqb);
3965 break;
3966 }
3967
3968 return vc_opcode;
3969 }
3970
3971 static int
3972 iavf_arq_poll(struct iavf_softc *sc, uint32_t wait_opcode, int retry)
3973 {
3974 struct ixl_aq_desc *arq, *iaq;
3975 struct ixl_aq_buf *aqb;
3976 unsigned int cons = sc->sc_arq_cons;
3977 unsigned int prod;
3978 uint32_t vc_opcode;
3979 bool received;
3980 int i;
3981
3982 for (i = 0, received = false; i < retry && !received; i++) {
3983 prod = iavf_rd(sc, sc->sc_aq_regs->arq_head);
3984 prod &= sc->sc_aq_regs->arq_head_mask;
3985
3986 if (prod == cons) {
3987 delaymsec(1);
3988 continue;
3989 }
3990
3991 if (prod >= IAVF_AQ_NUM) {
3992 return EIO;
3993 }
3994
3995 arq = IXL_DMA_KVA(&sc->sc_arq);
3996
3997 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3998 0, IXL_DMA_LEN(&sc->sc_arq),
3999 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
4000
4001 do {
4002 iaq = &arq[cons];
4003 aqb = iavf_aqb_get_locked(&sc->sc_arq_live);
4004 KASSERT(aqb != NULL);
4005
4006 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
4007 IAVF_AQ_BUFLEN, BUS_DMASYNC_POSTREAD);
4008
4009 vc_opcode = iavf_process_arq(sc, iaq, aqb);
4010
4011 if (vc_opcode == wait_opcode)
4012 received = true;
4013
4014 memset(iaq, 0, sizeof(*iaq));
4015 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
4016
4017 cons++;
4018 cons &= IAVF_AQ_MASK;
4019
4020 } while (cons != prod);
4021
4022 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
4023 0, IXL_DMA_LEN(&sc->sc_arq),
4024 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4025
4026 sc->sc_arq_cons = cons;
4027 iavf_arq_fill(sc);
4028
4029 }
4030
4031 if (!received)
4032 return ETIMEDOUT;
4033
4034 return 0;
4035 }
4036
4037 static int
4038 iavf_arq(struct iavf_softc *sc)
4039 {
4040 struct ixl_aq_desc *arq, *iaq;
4041 struct ixl_aq_buf *aqb;
4042 unsigned int cons = sc->sc_arq_cons;
4043 unsigned int prod;
4044 uint32_t vc_opcode;
4045
4046 KASSERT(mutex_owned(&sc->sc_adminq_lock));
4047
4048 prod = iavf_rd(sc, sc->sc_aq_regs->arq_head);
4049 prod &= sc->sc_aq_regs->arq_head_mask;
4050
4051 /* broken value at resetting */
4052 if (prod >= IAVF_AQ_NUM) {
4053 iavf_work_set(&sc->sc_reset_task, iavf_reset_start, sc);
4054 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
4055 return 0;
4056 }
4057
4058 if (cons == prod)
4059 return 0;
4060
4061 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
4062 0, IXL_DMA_LEN(&sc->sc_arq),
4063 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
4064
4065 arq = IXL_DMA_KVA(&sc->sc_arq);
4066
4067 do {
4068 iaq = &arq[cons];
4069 aqb = iavf_aqb_get_locked(&sc->sc_arq_live);
4070
4071 KASSERT(aqb != NULL);
4072
4073 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IAVF_AQ_BUFLEN,
4074 BUS_DMASYNC_POSTREAD);
4075
4076 vc_opcode = iavf_process_arq(sc, iaq, aqb);
4077
4078 switch (vc_opcode) {
4079 case IAVF_VC_OP_CONFIG_TX_QUEUE:
4080 case IAVF_VC_OP_CONFIG_RX_QUEUE:
4081 case IAVF_VC_OP_CONFIG_VSI_QUEUES:
4082 case IAVF_VC_OP_ENABLE_QUEUES:
4083 case IAVF_VC_OP_DISABLE_QUEUES:
4084 case IAVF_VC_OP_GET_RSS_HENA_CAPS:
4085 case IAVF_VC_OP_SET_RSS_HENA:
4086 case IAVF_VC_OP_ADD_ETH_ADDR:
4087 case IAVF_VC_OP_DEL_ETH_ADDR:
4088 case IAVF_VC_OP_CONFIG_PROMISC:
4089 case IAVF_VC_OP_ADD_VLAN:
4090 case IAVF_VC_OP_DEL_VLAN:
4091 case IAVF_VC_OP_ENABLE_VLAN_STRIP:
4092 case IAVF_VC_OP_DISABLE_VLAN_STRIP:
4093 case IAVF_VC_OP_CONFIG_RSS_KEY:
4094 case IAVF_VC_OP_CONFIG_RSS_LUT:
4095 sc->sc_arq_retval = iavf_aq_vc_get_retval(iaq);
4096 sc->sc_arq_opcode = vc_opcode;
4097 cv_signal(&sc->sc_adminq_cv);
4098 break;
4099 }
4100
4101 memset(iaq, 0, sizeof(*iaq));
4102 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
4103
4104 cons++;
4105 cons &= IAVF_AQ_MASK;
4106 } while (cons != prod);
4107
4108 sc->sc_arq_cons = cons;
4109 iavf_work_add(sc->sc_workq, &sc->sc_arq_refill);
4110
4111 return 1;
4112 }
4113
4114 static int
4115 iavf_atq_post(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4116 struct ixl_aq_buf *aqb)
4117 {
4118 struct ixl_aq_desc *atq, *slot;
4119 unsigned int prod;
4120
4121 atq = IXL_DMA_KVA(&sc->sc_atq);
4122 prod = sc->sc_atq_prod;
4123 slot = &atq[prod];
4124
4125 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4126 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
4127
4128 *slot = *iaq;
4129 slot->iaq_flags |= htole16(IXL_AQ_SI);
4130 if (aqb != NULL) {
4131 ixl_aq_dva(slot, IXL_AQB_DVA(aqb));
4132 bus_dmamap_sync(sc->sc_dmat, IXL_AQB_MAP(aqb),
4133 0, IXL_AQB_LEN(aqb), BUS_DMASYNC_PREWRITE);
4134 iavf_aqb_put_locked(&sc->sc_atq_live, aqb);
4135 } else {
4136 ixl_aq_dva(slot, (bus_addr_t)0);
4137 }
4138
4139 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4140 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
4141
4142 if (atomic_load_relaxed(&sc->sc_debuglevel) >= 3)
4143 iavf_aq_dump(sc, slot, "post");
4144
4145 prod++;
4146 prod &= IAVF_AQ_MASK;
4147 sc->sc_atq_prod = prod;
4148 iavf_wr(sc, sc->sc_aq_regs->atq_tail, prod);
4149 return prod;
4150 }
4151
4152 static int
4153 iavf_atq_poll(struct iavf_softc *sc, unsigned int tm)
4154 {
4155 struct ixl_aq_desc *atq, *slot;
4156 struct ixl_aq_desc iaq;
4157 struct ixl_aq_buf *aqb;
4158 unsigned int prod;
4159 unsigned int t;
4160 int dbg;
4161
4162 dbg = atomic_load_relaxed(&sc->sc_debuglevel);
4163 atq = IXL_DMA_KVA(&sc->sc_atq);
4164 prod = sc->sc_atq_prod;
4165 slot = &atq[prod];
4166 t = 0;
4167
4168 while (iavf_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
4169 delaymsec(1);
4170
4171 if (t++ > tm) {
4172 if (dbg >= 2) {
4173 device_printf(sc->sc_dev,
4174 "atq timedout\n");
4175 }
4176 return ETIMEDOUT;
4177 }
4178 }
4179
4180 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4181 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
4182 iaq = *slot;
4183 memset(slot, 0, sizeof(*slot));
4184 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4185 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
4186
4187 aqb = iavf_aqb_get_locked(&sc->sc_atq_live);
4188 if (aqb != NULL) {
4189 bus_dmamap_sync(sc->sc_dmat, IXL_AQB_MAP(aqb),
4190 0, IXL_AQB_LEN(aqb), BUS_DMASYNC_POSTWRITE);
4191 /* no need to do iavf_aqb_put(&sc->sc_atq_idle, aqb) */
4192 }
4193
4194 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4195 if (dbg >= 2) {
4196 device_printf(sc->sc_dev,
4197 "atq retcode=0x%04x\n", le16toh(iaq.iaq_retval));
4198 }
4199 return EIO;
4200 }
4201
4202 return 0;
4203 }
4204
4205 static void
4206 iavf_atq_done(struct iavf_softc *sc)
4207 {
4208 struct ixl_aq_desc *atq, *slot;
4209 struct ixl_aq_buf *aqb;
4210 unsigned int cons;
4211 unsigned int prod;
4212
4213 KASSERT(mutex_owned(&sc->sc_adminq_lock));
4214
4215 prod = sc->sc_atq_prod;
4216 cons = sc->sc_atq_cons;
4217
4218 if (prod == cons)
4219 return;
4220
4221 atq = IXL_DMA_KVA(&sc->sc_atq);
4222
4223 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4224 0, IXL_DMA_LEN(&sc->sc_atq),
4225 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
4226
4227 do {
4228 slot = &atq[cons];
4229 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
4230 break;
4231
4232 if (ixl_aq_has_dva(slot) &&
4233 (aqb = iavf_aqb_get_locked(&sc->sc_atq_live)) != NULL) {
4234 bus_dmamap_sync(sc->sc_dmat, IXL_AQB_MAP(aqb),
4235 0, IXL_AQB_LEN(aqb), BUS_DMASYNC_POSTWRITE);
4236 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
4237 }
4238
4239 memset(slot, 0, sizeof(*slot));
4240
4241 cons++;
4242 cons &= IAVF_AQ_MASK;
4243 } while (cons != prod);
4244
4245 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4246 0, IXL_DMA_LEN(&sc->sc_atq),
4247 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4248
4249 sc->sc_atq_cons = cons;
4250 }
4251
4252 static int
4253 iavf_adminq_poll(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4254 struct ixl_aq_buf *aqb, int retry)
4255 {
4256 int error;
4257
4258 mutex_enter(&sc->sc_adminq_lock);
4259 error = iavf_adminq_poll_locked(sc, iaq, aqb, retry);
4260 mutex_exit(&sc->sc_adminq_lock);
4261
4262 return error;
4263 }
4264
4265 static int
4266 iavf_adminq_poll_locked(struct iavf_softc *sc,
4267 struct ixl_aq_desc *iaq, struct ixl_aq_buf *aqb, int retry)
4268 {
4269 uint32_t opcode;
4270 int error;
4271
4272 KASSERT(!sc->sc_attached || mutex_owned(&sc->sc_adminq_lock));
4273
4274 opcode = iavf_aq_vc_get_opcode(iaq);
4275
4276 iavf_atq_post(sc, iaq, aqb);
4277
4278 error = iavf_atq_poll(sc, retry);
4279 if (error)
4280 return error;
4281
4282 error = iavf_arq_poll(sc, opcode, retry);
4283
4284 if (error != 0 &&
4285 atomic_load_relaxed(&sc->sc_debuglevel) >= 1) {
4286 device_printf(sc->sc_dev, "%s failed=%d(polling)\n",
4287 iavf_aq_vc_opcode_str(iaq), error);
4288 }
4289
4290 return error;
4291 }
4292
4293 static int
4294 iavf_adminq_exec(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4295 struct ixl_aq_buf *aqb)
4296 {
4297 int error;
4298 uint32_t opcode;
4299
4300 opcode = iavf_aq_vc_get_opcode(iaq);
4301
4302 mutex_enter(&sc->sc_adminq_lock);
4303 iavf_atq_post(sc, iaq, aqb);
4304
4305 error = iavf_arq_wait(sc, opcode);
4306 if (error == 0) {
4307 error = sc->sc_arq_retval;
4308 if (error != IAVF_VC_RC_SUCCESS &&
4309 atomic_load_relaxed(&sc->sc_debuglevel) >= 1) {
4310 device_printf(sc->sc_dev, "%s failed=%d\n",
4311 iavf_aq_vc_opcode_str(iaq), error);
4312 }
4313 }
4314
4315 mutex_exit(&sc->sc_adminq_lock);
4316 return error;
4317 }
4318
4319 static void
4320 iavf_process_version(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4321 struct ixl_aq_buf *aqb)
4322 {
4323 struct iavf_vc_version_info *ver;
4324
4325 ver = (struct iavf_vc_version_info *)aqb->aqb_data;
4326 sc->sc_major_ver = le32toh(ver->major);
4327 sc->sc_minor_ver = le32toh(ver->minor);
4328 }
4329
4330 static void
4331 iavf_process_vf_resources(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4332 struct ixl_aq_buf *aqb)
4333 {
4334 struct iavf_vc_vf_resource *vf_res;
4335 struct iavf_vc_vsi_resource *vsi_res;
4336 uint8_t *enaddr;
4337 int mtu, dbg;
4338 char buf[512];
4339
4340 dbg = atomic_load_relaxed(&sc->sc_debuglevel);
4341 sc->sc_got_vf_resources = 1;
4342
4343 vf_res = aqb->aqb_data;
4344 sc->sc_max_vectors = le16toh(vf_res->max_vectors);
4345 if (le16toh(vf_res->num_vsis) == 0) {
4346 if (dbg >= 1) {
4347 device_printf(sc->sc_dev, "no vsi available\n");
4348 }
4349 return;
4350 }
4351 sc->sc_vf_cap = le32toh(vf_res->offload_flags);
4352 if (dbg >= 2) {
4353 snprintb(buf, sizeof(buf),
4354 IAVF_VC_OFFLOAD_FMT, sc->sc_vf_cap);
4355 device_printf(sc->sc_dev, "VF cap=%s\n", buf);
4356 }
4357
4358 mtu = le16toh(vf_res->max_mtu);
4359 if (IAVF_MIN_MTU < mtu && mtu < IAVF_MAX_MTU) {
4360 sc->sc_max_mtu = MIN(IAVF_MAX_MTU, mtu);
4361 }
4362
4363 vsi_res = &vf_res->vsi_res[0];
4364 sc->sc_vsi_id = le16toh(vsi_res->vsi_id);
4365 sc->sc_vf_id = le32toh(iaq->iaq_param[0]);
4366 sc->sc_qset_handle = le16toh(vsi_res->qset_handle);
4367 sc->sc_nqps_vsi = le16toh(vsi_res->num_queue_pairs);
4368 if (!iavf_is_etheranyaddr(vsi_res->default_mac)) {
4369 enaddr = vsi_res->default_mac;
4370 } else {
4371 enaddr = sc->sc_enaddr_fake;
4372 }
4373 memcpy(sc->sc_enaddr, enaddr, ETHER_ADDR_LEN);
4374 }
4375
4376 static void
4377 iavf_process_irq_map(struct iavf_softc *sc, struct ixl_aq_desc *iaq)
4378 {
4379 uint32_t retval;
4380
4381 retval = iavf_aq_vc_get_retval(iaq);
4382 if (retval != IAVF_VC_RC_SUCCESS) {
4383 return;
4384 }
4385
4386 sc->sc_got_irq_map = 1;
4387 }
4388
4389 static void
4390 iavf_process_vc_event(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4391 struct ixl_aq_buf *aqb)
4392 {
4393 struct iavf_vc_pf_event *event;
4394 struct ifnet *ifp = &sc->sc_ec.ec_if;
4395 const struct iavf_link_speed *speed;
4396 int link;
4397
4398 event = aqb->aqb_data;
4399 switch (event->event) {
4400 case IAVF_VC_EVENT_LINK_CHANGE:
4401 sc->sc_media_status = IFM_AVALID;
4402 sc->sc_media_active = IFM_ETHER;
4403 link = LINK_STATE_DOWN;
4404 if (event->link_status) {
4405 link = LINK_STATE_UP;
4406 sc->sc_media_status |= IFM_ACTIVE;
4407
4408 ifp->if_baudrate = 0;
4409 speed = iavf_find_link_speed(sc, event->link_speed);
4410 if (speed != NULL) {
4411 sc->sc_media_active |= speed->media;
4412 ifp->if_baudrate = speed->baudrate;
4413 }
4414 }
4415
4416 if (sc->sc_link_state != link) {
4417 sc->sc_link_state = link;
4418 if (sc->sc_attached) {
4419 if_link_state_change(ifp, link);
4420 }
4421 }
4422 break;
4423 case IAVF_VC_EVENT_RESET_IMPENDING:
4424 log(LOG_INFO, "%s: Reset warning received from the PF\n",
4425 ifp->if_xname);
4426 iavf_work_set(&sc->sc_reset_task, iavf_reset_request, sc);
4427 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
4428 break;
4429 }
4430 }
4431
4432 static void
4433 iavf_process_stats(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4434 struct ixl_aq_buf *aqb)
4435 {
4436 struct iavf_stat_counters *isc;
4437 struct i40e_eth_stats *st;
4438
4439 KASSERT(mutex_owned(&sc->sc_adminq_lock));
4440
4441 st = aqb->aqb_data;
4442 isc = &sc->sc_stat_counters;
4443
4444 isc->isc_rx_bytes.ev_count = st->rx_bytes;
4445 isc->isc_rx_unicast.ev_count = st->rx_unicast;
4446 isc->isc_rx_multicast.ev_count = st->rx_multicast;
4447 isc->isc_rx_broadcast.ev_count = st->rx_broadcast;
4448 isc->isc_rx_discards.ev_count = st->rx_discards;
4449 isc->isc_rx_unknown_protocol.ev_count = st->rx_unknown_protocol;
4450
4451 isc->isc_tx_bytes.ev_count = st->tx_bytes;
4452 isc->isc_tx_unicast.ev_count = st->tx_unicast;
4453 isc->isc_tx_multicast.ev_count = st->tx_multicast;
4454 isc->isc_tx_broadcast.ev_count = st->tx_broadcast;
4455 isc->isc_tx_discards.ev_count = st->tx_discards;
4456 isc->isc_tx_errors.ev_count = st->tx_errors;
4457 }
4458
4459 static void
4460 iavf_process_req_queues(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4461 struct ixl_aq_buf *aqb)
4462 {
4463 struct iavf_vc_res_request *req;
4464 struct ifnet *ifp;
4465 uint32_t vc_retval;
4466
4467 ifp = &sc->sc_ec.ec_if;
4468 req = aqb->aqb_data;
4469
4470 vc_retval = iavf_aq_vc_get_retval(iaq);
4471 if (vc_retval != IAVF_VC_RC_SUCCESS) {
4472 return;
4473 }
4474
4475 if (sc->sc_nqps_req < req->num_queue_pairs) {
4476 log(LOG_INFO,
4477 "%s: requested %d queues, but only %d left.\n",
4478 ifp->if_xname,
4479 sc->sc_nqps_req, req->num_queue_pairs);
4480 }
4481
4482 if (sc->sc_nqps_vsi < req->num_queue_pairs) {
4483 if (!sc->sc_req_queues_retried) {
4484 /* req->num_queue_pairs indicates max qps */
4485 sc->sc_nqps_req = req->num_queue_pairs;
4486
4487 sc->sc_req_queues_retried = true;
4488 iavf_work_add(sc->sc_workq, &sc->sc_req_queues_task);
4489 }
4490 }
4491 }
4492
4493 static int
4494 iavf_get_version(struct iavf_softc *sc, struct ixl_aq_buf *aqb)
4495 {
4496 struct ixl_aq_desc iaq;
4497 struct iavf_vc_version_info *ver;
4498 int error;
4499
4500 memset(&iaq, 0, sizeof(iaq));
4501 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4502 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4503 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_VERSION);
4504 iaq.iaq_datalen = htole16(sizeof(struct iavf_vc_version_info));
4505
4506 ver = IXL_AQB_KVA(aqb);
4507 ver->major = htole32(IAVF_VF_MAJOR);
4508 ver->minor = htole32(IAVF_VF_MINOR);
4509
4510 sc->sc_major_ver = UINT_MAX;
4511 sc->sc_minor_ver = UINT_MAX;
4512
4513 if (sc->sc_attached) {
4514 error = iavf_adminq_poll(sc, &iaq, aqb, 250);
4515 } else {
4516 error = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
4517 }
4518
4519 if (error)
4520 return -1;
4521
4522 return 0;
4523 }
4524
4525 static int
4526 iavf_get_vf_resources(struct iavf_softc *sc, struct ixl_aq_buf *aqb)
4527 {
4528 struct ixl_aq_desc iaq;
4529 uint32_t *cap, cap0;
4530 int error;
4531
4532 memset(&iaq, 0, sizeof(iaq));
4533 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4534 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4535 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_GET_VF_RESOURCES);
4536
4537 if (sc->sc_major_ver > 0) {
4538 cap0 = IAVF_VC_OFFLOAD_L2 |
4539 IAVF_VC_OFFLOAD_VLAN |
4540 IAVF_VC_OFFLOAD_RSS_PF |
4541 IAVF_VC_OFFLOAD_REQ_QUEUES;
4542
4543 cap = IXL_AQB_KVA(aqb);
4544 *cap = htole32(cap0);
4545 iaq.iaq_datalen = htole16(sizeof(*cap));
4546 }
4547
4548 sc->sc_got_vf_resources = 0;
4549 if (sc->sc_attached) {
4550 error = iavf_adminq_poll(sc, &iaq, aqb, 250);
4551 } else {
4552 error = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
4553 }
4554
4555 if (error)
4556 return -1;
4557 return 0;
4558 }
4559
4560 static int
4561 iavf_get_stats(struct iavf_softc *sc)
4562 {
4563 struct ixl_aq_desc iaq;
4564 struct ixl_aq_buf *aqb;
4565 struct iavf_vc_queue_select *qsel;
4566 int error;
4567
4568 mutex_enter(&sc->sc_adminq_lock);
4569 aqb = iavf_aqb_get_locked(&sc->sc_atq_idle);
4570 mutex_exit(&sc->sc_adminq_lock);
4571
4572 if (aqb == NULL)
4573 return ENOMEM;
4574
4575 qsel = IXL_AQB_KVA(aqb);
4576 memset(qsel, 0, sizeof(*qsel));
4577 qsel->vsi_id = htole16(sc->sc_vsi_id);
4578
4579 memset(&iaq, 0, sizeof(iaq));
4580
4581 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4582 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4583 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_GET_STATS);
4584 iaq.iaq_datalen = htole16(sizeof(*qsel));
4585
4586 if (atomic_load_relaxed(&sc->sc_debuglevel) >= 3) {
4587 device_printf(sc->sc_dev, "post GET_STATS command\n");
4588 }
4589
4590 mutex_enter(&sc->sc_adminq_lock);
4591 error = iavf_atq_post(sc, &iaq, aqb);
4592 mutex_exit(&sc->sc_adminq_lock);
4593
4594 return error;
4595 }
4596
4597 static int
4598 iavf_config_irq_map(struct iavf_softc *sc, struct ixl_aq_buf *aqb)
4599 {
4600 struct ixl_aq_desc iaq;
4601 struct iavf_vc_vector_map *vec;
4602 struct iavf_vc_irq_map_info *map;
4603 struct iavf_rx_ring *rxr;
4604 struct iavf_tx_ring *txr;
4605 unsigned int num_vec;
4606 int error;
4607
4608 map = IXL_AQB_KVA(aqb);
4609 vec = map->vecmap;
4610 num_vec = 0;
4611
4612 if (sc->sc_nintrs == 1) {
4613 vec[0].vsi_id = htole16(sc->sc_vsi_id);
4614 vec[0].vector_id = htole16(0);
4615 vec[0].rxq_map = htole16(iavf_allqueues(sc));
4616 vec[0].txq_map = htole16(iavf_allqueues(sc));
4617 vec[0].rxitr_idx = htole16(IAVF_NOITR);
4618 vec[0].rxitr_idx = htole16(IAVF_NOITR);
4619 num_vec = 1;
4620 } else if (sc->sc_nintrs > 1) {
4621 KASSERT(sc->sc_nqps_alloc >= (sc->sc_nintrs - 1));
4622 for (; num_vec < (sc->sc_nintrs - 1); num_vec++) {
4623 rxr = sc->sc_qps[num_vec].qp_rxr;
4624 txr = sc->sc_qps[num_vec].qp_txr;
4625
4626 vec[num_vec].vsi_id = htole16(sc->sc_vsi_id);
4627 vec[num_vec].vector_id = htole16(num_vec + 1);
4628 vec[num_vec].rxq_map = htole16(__BIT(rxr->rxr_qid));
4629 vec[num_vec].txq_map = htole16(__BIT(txr->txr_qid));
4630 vec[num_vec].rxitr_idx = htole16(IAVF_ITR_RX);
4631 vec[num_vec].txitr_idx = htole16(IAVF_ITR_TX);
4632 }
4633
4634 vec[num_vec].vsi_id = htole16(sc->sc_vsi_id);
4635 vec[num_vec].vector_id = htole16(0);
4636 vec[num_vec].rxq_map = htole16(0);
4637 vec[num_vec].txq_map = htole16(0);
4638 num_vec++;
4639 }
4640
4641 map->num_vectors = htole16(num_vec);
4642
4643 memset(&iaq, 0, sizeof(iaq));
4644 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4645 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4646 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_IRQ_MAP);
4647 iaq.iaq_datalen = htole16(sizeof(*map) + sizeof(*vec) * num_vec);
4648
4649 if (sc->sc_attached) {
4650 error = iavf_adminq_poll(sc, &iaq, aqb, 250);
4651 } else {
4652 error = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
4653 }
4654
4655 if (error)
4656 return -1;
4657
4658 return 0;
4659 }
4660
4661 static int
4662 iavf_config_vsi_queues(struct iavf_softc *sc)
4663 {
4664 struct ifnet *ifp = &sc->sc_ec.ec_if;
4665 struct ixl_aq_desc iaq;
4666 struct ixl_aq_buf *aqb;
4667 struct iavf_vc_queue_config_info *config;
4668 struct iavf_vc_txq_info *txq;
4669 struct iavf_vc_rxq_info *rxq;
4670 struct iavf_rx_ring *rxr;
4671 struct iavf_tx_ring *txr;
4672 uint32_t rxmtu_max;
4673 unsigned int i;
4674 int error;
4675
4676 rxmtu_max = ifp->if_mtu + IAVF_MTU_ETHERLEN;
4677
4678 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4679
4680 if (aqb == NULL)
4681 return -1;
4682
4683 config = IXL_AQB_KVA(aqb);
4684 memset(config, 0, sizeof(*config));
4685 config->vsi_id = htole16(sc->sc_vsi_id);
4686 config->num_queue_pairs = htole16(sc->sc_nqueue_pairs);
4687
4688 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
4689 rxr = sc->sc_qps[i].qp_rxr;
4690 txr = sc->sc_qps[i].qp_txr;
4691
4692 txq = &config->qpair[i].txq;
4693 txq->vsi_id = htole16(sc->sc_vsi_id);
4694 txq->queue_id = htole16(txr->txr_qid);
4695 txq->ring_len = htole16(sc->sc_tx_ring_ndescs);
4696 txq->headwb_ena = 0;
4697 txq->dma_ring_addr = htole64(IXL_DMA_DVA(&txr->txr_mem));
4698 txq->dma_headwb_addr = 0;
4699
4700 rxq = &config->qpair[i].rxq;
4701 rxq->vsi_id = htole16(sc->sc_vsi_id);
4702 rxq->queue_id = htole16(rxr->rxr_qid);
4703 rxq->ring_len = htole16(sc->sc_rx_ring_ndescs);
4704 rxq->splithdr_ena = 0;
4705 rxq->databuf_size = htole32(IAVF_MCLBYTES);
4706 rxq->max_pkt_size = htole32(rxmtu_max);
4707 rxq->dma_ring_addr = htole64(IXL_DMA_DVA(&rxr->rxr_mem));
4708 rxq->rx_split_pos = 0;
4709 }
4710
4711 memset(&iaq, 0, sizeof(iaq));
4712 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4713 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4714 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_VSI_QUEUES);
4715 iaq.iaq_datalen = htole16(sizeof(*config) +
4716 sizeof(config->qpair[0]) * sc->sc_nqueue_pairs);
4717
4718 error = iavf_adminq_exec(sc, &iaq, aqb);
4719 if (error != IAVF_VC_RC_SUCCESS) {
4720 return -1;
4721 }
4722
4723 return 0;
4724 }
4725
4726 static int
4727 iavf_config_hena(struct iavf_softc *sc)
4728 {
4729 struct ixl_aq_desc iaq;
4730 struct ixl_aq_buf *aqb;
4731 uint64_t *caps;
4732 int error;
4733
4734 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4735
4736 if (aqb == NULL)
4737 return -1;
4738
4739 caps = IXL_AQB_KVA(aqb);
4740 if (sc->sc_mac_type == I40E_MAC_X722_VF)
4741 *caps = IXL_RSS_HENA_DEFAULT_XL710;
4742 else
4743 *caps = IXL_RSS_HENA_DEFAULT_X722;
4744
4745 memset(&iaq, 0, sizeof(iaq));
4746 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4747 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4748 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_SET_RSS_HENA);
4749 iaq.iaq_datalen = htole16(sizeof(*caps));
4750
4751 error = iavf_adminq_exec(sc, &iaq, aqb);
4752 if (error != IAVF_VC_RC_SUCCESS) {
4753 return -1;
4754 }
4755
4756 return 0;
4757 }
4758
4759 static inline void
4760 iavf_get_default_rss_key(uint8_t *buf, size_t len)
4761 {
4762 uint8_t rss_seed[RSS_KEYSIZE];
4763 size_t cplen;
4764
4765 cplen = MIN(len, sizeof(rss_seed));
4766 rss_getkey(rss_seed);
4767
4768 memcpy(buf, rss_seed, cplen);
4769 if (cplen < len)
4770 memset(buf + cplen, 0, len - cplen);
4771 }
4772
4773 static int
4774 iavf_config_rss_key(struct iavf_softc *sc)
4775 {
4776 struct ixl_aq_desc iaq;
4777 struct ixl_aq_buf *aqb;
4778 struct iavf_vc_rss_key *rss_key;
4779 size_t key_len;
4780 int rv;
4781
4782 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4783 if (aqb == NULL)
4784 return -1;
4785
4786 rss_key = IXL_AQB_KVA(aqb);
4787 rss_key->vsi_id = htole16(sc->sc_vsi_id);
4788 key_len = IXL_RSS_KEY_SIZE;
4789 iavf_get_default_rss_key(rss_key->key, key_len);
4790 rss_key->key_len = key_len;
4791
4792 memset(&iaq, 0, sizeof(iaq));
4793 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4794 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4795 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_RSS_KEY);
4796 iaq.iaq_datalen = htole16(sizeof(*rss_key) - sizeof(rss_key->pad)
4797 + (sizeof(rss_key->key[0]) * key_len));
4798
4799 rv = iavf_adminq_exec(sc, &iaq, aqb);
4800 if (rv != IAVF_VC_RC_SUCCESS) {
4801 return -1;
4802 }
4803
4804 return 0;
4805 }
4806
4807 static int
4808 iavf_config_rss_lut(struct iavf_softc *sc)
4809 {
4810 struct ixl_aq_desc iaq;
4811 struct ixl_aq_buf *aqb;
4812 struct iavf_vc_rss_lut *rss_lut;
4813 uint8_t *lut, v;
4814 int rv, i;
4815
4816 mutex_enter(&sc->sc_adminq_lock);
4817 mutex_exit(&sc->sc_adminq_lock);
4818
4819 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4820 if (aqb == NULL)
4821 return -1;
4822
4823 rss_lut = IXL_AQB_KVA(aqb);
4824 rss_lut->vsi_id = htole16(sc->sc_vsi_id);
4825 rss_lut->lut_entries = htole16(IXL_RSS_VSI_LUT_SIZE);
4826
4827 lut = rss_lut->lut;
4828 for (i = 0; i < IXL_RSS_VSI_LUT_SIZE; i++) {
4829 v = i % sc->sc_nqueue_pairs;
4830 v &= IAVF_RSS_VSI_LUT_ENTRY_MASK;
4831 lut[i] = v;
4832 }
4833
4834 memset(&iaq, 0, sizeof(iaq));
4835 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4836 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4837 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_RSS_LUT);
4838 iaq.iaq_datalen = htole16(sizeof(*rss_lut) - sizeof(rss_lut->pad)
4839 + (sizeof(rss_lut->lut[0]) * IXL_RSS_VSI_LUT_SIZE));
4840
4841 rv = iavf_adminq_exec(sc, &iaq, aqb);
4842 if (rv != IAVF_VC_RC_SUCCESS) {
4843 return -1;
4844 }
4845
4846 return 0;
4847 }
4848
4849 static int
4850 iavf_queue_select(struct iavf_softc *sc, int opcode)
4851 {
4852 struct ixl_aq_desc iaq;
4853 struct ixl_aq_buf *aqb;
4854 struct iavf_vc_queue_select *qsel;
4855 int error;
4856
4857 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4858 if (aqb == NULL)
4859 return -1;
4860
4861 qsel = IXL_AQB_KVA(aqb);
4862 qsel->vsi_id = htole16(sc->sc_vsi_id);
4863 qsel->rx_queues = htole32(iavf_allqueues(sc));
4864 qsel->tx_queues = htole32(iavf_allqueues(sc));
4865
4866 memset(&iaq, 0, sizeof(iaq));
4867 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4868 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4869 iavf_aq_vc_set_opcode(&iaq, opcode);
4870 iaq.iaq_datalen = htole16(sizeof(*qsel));
4871
4872 error = iavf_adminq_exec(sc, &iaq, aqb);
4873 if (error != IAVF_VC_RC_SUCCESS) {
4874 return -1;
4875 }
4876
4877 return 0;
4878 }
4879
4880 static int
4881 iavf_request_queues(struct iavf_softc *sc, unsigned int req_num)
4882 {
4883 struct ixl_aq_desc iaq;
4884 struct ixl_aq_buf *aqb;
4885 struct iavf_vc_res_request *req;
4886 int rv;
4887
4888 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4889 if (aqb == NULL)
4890 return ENOMEM;
4891
4892 req = IXL_AQB_KVA(aqb);
4893 req->num_queue_pairs = req_num;
4894
4895 memset(&iaq, 0, sizeof(iaq));
4896 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4897 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4898 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_REQUEST_QUEUES);
4899 iaq.iaq_datalen = htole16(sizeof(*req));
4900
4901 mutex_enter(&sc->sc_adminq_lock);
4902 rv = iavf_atq_post(sc, &iaq, aqb);
4903 mutex_exit(&sc->sc_adminq_lock);
4904
4905 return rv;
4906 }
4907
4908 static int
4909 iavf_reset_vf(struct iavf_softc *sc)
4910 {
4911 struct ixl_aq_desc iaq;
4912 int error;
4913
4914 memset(&iaq, 0, sizeof(iaq));
4915 iaq.iaq_flags = htole16(IXL_AQ_RD);
4916 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4917 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_RESET_VF);
4918 iaq.iaq_datalen = htole16(0);
4919
4920 iavf_wr(sc, I40E_VFGEN_RSTAT, IAVF_VFR_INPROGRESS);
4921
4922 mutex_enter(&sc->sc_adminq_lock);
4923 error = iavf_atq_post(sc, &iaq, NULL);
4924 mutex_exit(&sc->sc_adminq_lock);
4925
4926 return error;
4927 }
4928
4929 static int
4930 iavf_eth_addr(struct iavf_softc *sc, const uint8_t *addr, uint32_t opcode)
4931 {
4932 struct ixl_aq_desc iaq;
4933 struct ixl_aq_buf *aqb;
4934 struct iavf_vc_eth_addr_list *addrs;
4935 struct iavf_vc_eth_addr *vcaddr;
4936 int rv;
4937
4938 KASSERT(sc->sc_attached);
4939 KASSERT(opcode == IAVF_VC_OP_ADD_ETH_ADDR ||
4940 opcode == IAVF_VC_OP_DEL_ETH_ADDR);
4941
4942 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4943 if (aqb == NULL)
4944 return -1;
4945
4946 addrs = IXL_AQB_KVA(aqb);
4947 addrs->vsi_id = htole16(sc->sc_vsi_id);
4948 addrs->num_elements = htole16(1);
4949 vcaddr = addrs->list;
4950 memcpy(vcaddr->addr, addr, ETHER_ADDR_LEN);
4951
4952 memset(&iaq, 0, sizeof(iaq));
4953 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4954 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4955 iavf_aq_vc_set_opcode(&iaq, opcode);
4956 iaq.iaq_datalen = htole16(sizeof(*addrs) + sizeof(*vcaddr));
4957
4958 if (sc->sc_resetting) {
4959 mutex_enter(&sc->sc_adminq_lock);
4960 rv = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
4961 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
4962 mutex_exit(&sc->sc_adminq_lock);
4963 } else {
4964 rv = iavf_adminq_exec(sc, &iaq, aqb);
4965 }
4966
4967 if (rv != IAVF_VC_RC_SUCCESS) {
4968 return -1;
4969 }
4970
4971 return 0;
4972 }
4973
4974 static int
4975 iavf_config_promisc_mode(struct iavf_softc *sc, int unicast, int multicast)
4976 {
4977 struct ixl_aq_desc iaq;
4978 struct ixl_aq_buf *aqb;
4979 struct iavf_vc_promisc_info *promisc;
4980 int flags;
4981
4982 KASSERT(sc->sc_attached);
4983
4984 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4985 if (aqb == NULL)
4986 return -1;
4987
4988 flags = 0;
4989 if (unicast)
4990 flags |= IAVF_FLAG_VF_UNICAST_PROMISC;
4991 if (multicast)
4992 flags |= IAVF_FLAG_VF_MULTICAST_PROMISC;
4993
4994 promisc = IXL_AQB_KVA(aqb);
4995 promisc->vsi_id = htole16(sc->sc_vsi_id);
4996 promisc->flags = htole16(flags);
4997
4998 memset(&iaq, 0, sizeof(iaq));
4999 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
5000 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
5001 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_PROMISC);
5002 iaq.iaq_datalen = htole16(sizeof(*promisc));
5003
5004 if (iavf_adminq_exec(sc, &iaq, aqb) != IAVF_VC_RC_SUCCESS) {
5005 return -1;
5006 }
5007
5008 return 0;
5009 }
5010
5011 static int
5012 iavf_config_vlan_stripping(struct iavf_softc *sc, int eccap)
5013 {
5014 struct ixl_aq_desc iaq;
5015 uint32_t opcode;
5016
5017 opcode = ISSET(eccap, ETHERCAP_VLAN_HWTAGGING) ?
5018 IAVF_VC_OP_ENABLE_VLAN_STRIP : IAVF_VC_OP_DISABLE_VLAN_STRIP;
5019
5020 memset(&iaq, 0, sizeof(iaq));
5021 iaq.iaq_flags = htole16(IXL_AQ_RD);
5022 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
5023 iavf_aq_vc_set_opcode(&iaq, opcode);
5024 iaq.iaq_datalen = htole16(0);
5025
5026 if (iavf_adminq_exec(sc, &iaq, NULL) != IAVF_VC_RC_SUCCESS) {
5027 return -1;
5028 }
5029
5030 return 0;
5031 }
5032
5033 static int
5034 iavf_config_vlan_id(struct iavf_softc *sc, uint16_t vid, uint32_t opcode)
5035 {
5036 struct ixl_aq_desc iaq;
5037 struct ixl_aq_buf *aqb;
5038 struct iavf_vc_vlan_filter *vfilter;
5039 int rv;
5040
5041 KASSERT(opcode == IAVF_VC_OP_ADD_VLAN || opcode == IAVF_VC_OP_DEL_VLAN);
5042
5043 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
5044
5045 if (aqb == NULL)
5046 return -1;
5047
5048 vfilter = IXL_AQB_KVA(aqb);
5049 vfilter->vsi_id = htole16(sc->sc_vsi_id);
5050 vfilter->num_vlan_id = htole16(1);
5051 vfilter->vlan_id[0] = vid;
5052
5053 memset(&iaq, 0, sizeof(iaq));
5054 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
5055 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
5056 iavf_aq_vc_set_opcode(&iaq, opcode);
5057 iaq.iaq_datalen = htole16(sizeof(*vfilter) + sizeof(vid));
5058
5059 if (sc->sc_resetting) {
5060 mutex_enter(&sc->sc_adminq_lock);
5061 rv = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
5062 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
5063 mutex_exit(&sc->sc_adminq_lock);
5064 } else {
5065 rv = iavf_adminq_exec(sc, &iaq, aqb);
5066 }
5067
5068 if (rv != IAVF_VC_RC_SUCCESS) {
5069 return -1;
5070 }
5071
5072 return 0;
5073 }
5074
5075 static void
5076 iavf_post_request_queues(void *xsc)
5077 {
5078 struct iavf_softc *sc;
5079 struct ifnet *ifp;
5080
5081 sc = xsc;
5082 ifp = &sc->sc_ec.ec_if;
5083
5084 if (!ISSET(sc->sc_vf_cap, IAVF_VC_OFFLOAD_REQ_QUEUES)) {
5085 log(LOG_DEBUG, "%s: the VF has no REQ_QUEUES capability\n",
5086 ifp->if_xname);
5087 return;
5088 }
5089
5090 log(LOG_INFO, "%s: try to change the number of queue pairs"
5091 " (vsi %u, %u allocated, request %u)\n",
5092 ifp->if_xname,
5093 sc->sc_nqps_vsi, sc->sc_nqps_alloc, sc->sc_nqps_req);
5094 iavf_request_queues(sc, sc->sc_nqps_req);
5095 }
5096
5097 static bool
5098 iavf_sysctlnode_is_rx(struct sysctlnode *node)
5099 {
5100
5101 if (strstr(node->sysctl_parent->sysctl_name, "rx") != NULL)
5102 return true;
5103
5104 return false;
5105 }
5106
5107 static int
5108 iavf_sysctl_itr_handler(SYSCTLFN_ARGS)
5109 {
5110 struct sysctlnode node = *rnode;
5111 struct iavf_softc *sc = (struct iavf_softc *)node.sysctl_data;
5112 uint32_t newitr, *itrptr;
5113 unsigned int i;
5114 int itr, error;
5115
5116 if (iavf_sysctlnode_is_rx(&node)) {
5117 itrptr = &sc->sc_rx_itr;
5118 itr = IAVF_ITR_RX;
5119 } else {
5120 itrptr = &sc->sc_tx_itr;
5121 itr = IAVF_ITR_TX;
5122 }
5123
5124 newitr = *itrptr;
5125 node.sysctl_data = &newitr;
5126 node.sysctl_size = sizeof(newitr);
5127
5128 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5129 if (error || newp == NULL)
5130 return error;
5131
5132 if (newitr > 0x07FF)
5133 return EINVAL;
5134
5135 *itrptr = newitr;
5136
5137 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
5138 iavf_wr(sc, I40E_VFINT_ITRN1(itr, i), *itrptr);
5139 }
5140 iavf_wr(sc, I40E_VFINT_ITR01(itr), *itrptr);
5141
5142 return 0;
5143 }
5144
5145 static void
5146 iavf_workq_work(struct work *wk, void *context)
5147 {
5148 struct iavf_work *work;
5149
5150 work = container_of(wk, struct iavf_work, ixw_cookie);
5151
5152 atomic_swap_uint(&work->ixw_added, 0);
5153 work->ixw_func(work->ixw_arg);
5154 }
5155
5156 static struct workqueue *
5157 iavf_workq_create(const char *name, pri_t prio, int ipl, int flags)
5158 {
5159 struct workqueue *wq;
5160 int error;
5161
5162 error = workqueue_create(&wq, name, iavf_workq_work, NULL,
5163 prio, ipl, flags);
5164
5165 if (error)
5166 return NULL;
5167
5168 return wq;
5169 }
5170
5171 static void
5172 iavf_workq_destroy(struct workqueue *wq)
5173 {
5174
5175 workqueue_destroy(wq);
5176 }
5177
5178 static int
5179 iavf_work_set(struct iavf_work *work, void (*func)(void *), void *arg)
5180 {
5181
5182 if (work->ixw_added != 0)
5183 return -1;
5184
5185 memset(work, 0, sizeof(*work));
5186 work->ixw_func = func;
5187 work->ixw_arg = arg;
5188
5189 return 0;
5190 }
5191
5192 static void
5193 iavf_work_add(struct workqueue *wq, struct iavf_work *work)
5194 {
5195 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0)
5196 return;
5197
5198 kpreempt_disable();
5199 workqueue_enqueue(wq, &work->ixw_cookie, NULL);
5200 kpreempt_enable();
5201 }
5202
5203 static void
5204 iavf_work_wait(struct workqueue *wq, struct iavf_work *work)
5205 {
5206
5207 workqueue_wait(wq, &work->ixw_cookie);
5208 }
5209
5210 static void
5211 iavf_evcnt_attach(struct evcnt *ec,
5212 const char *n0, const char *n1)
5213 {
5214
5215 evcnt_attach_dynamic(ec, EVCNT_TYPE_MISC,
5216 NULL, n0, n1);
5217 }
5218
5219 MODULE(MODULE_CLASS_DRIVER, if_iavf, "pci");
5220
5221 #ifdef _MODULE
5222 #include "ioconf.c"
5223 #endif
5224
5225 #ifdef _MODULE
5226 static void
5227 iavf_parse_modprop(prop_dictionary_t dict)
5228 {
5229 prop_object_t obj;
5230 int64_t val;
5231 uint32_t n;
5232
5233 if (dict == NULL)
5234 return;
5235
5236 obj = prop_dictionary_get(dict, "debug_level");
5237 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5238 val = prop_number_signed_value((prop_number_t)obj);
5239
5240 if (val > 0) {
5241 iavf_params.debug = val;
5242 printf("iavf: debug level=%d\n", iavf_params.debug);
5243 }
5244 }
5245
5246 obj = prop_dictionary_get(dict, "max_qps");
5247 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5248 val = prop_number_signed_value((prop_number_t)obj);
5249
5250 if (val < 1 || val > I40E_MAX_VF_QUEUES) {
5251 printf("iavf: invalid queue size(1 <= n <= %d)",
5252 I40E_MAX_VF_QUEUES);
5253 } else {
5254 iavf_params.max_qps = val;
5255 printf("iavf: request queue pair = %u\n",
5256 iavf_params.max_qps);
5257 }
5258 }
5259
5260 obj = prop_dictionary_get(dict, "tx_itr");
5261 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5262 val = prop_number_signed_value((prop_number_t)obj);
5263 if (val > 0x07FF) {
5264 printf("iavf: TX ITR too big (%" PRId64 " <= %d)",
5265 val, 0x7FF);
5266 } else {
5267 iavf_params.tx_itr = val;
5268 printf("iavf: TX ITR = 0x%" PRIx32,
5269 iavf_params.tx_itr);
5270 }
5271 }
5272
5273 obj = prop_dictionary_get(dict, "rx_itr");
5274 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5275 val = prop_number_signed_value((prop_number_t)obj);
5276 if (val > 0x07FF) {
5277 printf("iavf: RX ITR too big (%" PRId64 " <= %d)",
5278 val, 0x7FF);
5279 } else {
5280 iavf_params.rx_itr = val;
5281 printf("iavf: RX ITR = 0x%" PRIx32,
5282 iavf_params.rx_itr);
5283 }
5284 }
5285
5286 obj = prop_dictionary_get(dict, "tx_ndescs");
5287 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5288 val = prop_number_signed_value((prop_number_t)obj);
5289 n = 1U << (fls32(val) - 1);
5290 if (val != (int64_t) n) {
5291 printf("iavf: TX desc invlaid size"
5292 "(%" PRId64 " != %" PRIu32 ")\n", val, n);
5293 } else if (val > (8192 - 32)) {
5294 printf("iavf: Tx desc too big (%" PRId64 " > %d)",
5295 val, (8192 - 32));
5296 } else {
5297 iavf_params.tx_ndescs = val;
5298 printf("iavf: TX descriptors = 0x%04x",
5299 iavf_params.tx_ndescs);
5300 }
5301 }
5302
5303 obj = prop_dictionary_get(dict, "rx_ndescs");
5304 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5305 val = prop_number_signed_value((prop_number_t)obj);
5306 n = 1U << (fls32(val) - 1);
5307 if (val != (int64_t) n) {
5308 printf("iavf: RX desc invlaid size"
5309 "(%" PRId64 " != %" PRIu32 ")\n", val, n);
5310 } else if (val > (8192 - 32)) {
5311 printf("iavf: Rx desc too big (%" PRId64 " > %d)",
5312 val, (8192 - 32));
5313 } else {
5314 iavf_params.rx_ndescs = val;
5315 printf("iavf: RX descriptors = 0x%04x",
5316 iavf_params.rx_ndescs);
5317 }
5318 }
5319 }
5320 #endif
5321
5322 static int
5323 if_iavf_modcmd(modcmd_t cmd, void *opaque)
5324 {
5325 int error = 0;
5326
5327 #ifdef _MODULE
5328 switch (cmd) {
5329 case MODULE_CMD_INIT:
5330 iavf_parse_modprop((prop_dictionary_t)opaque);
5331 error = config_init_component(cfdriver_ioconf_if_iavf,
5332 cfattach_ioconf_if_iavf, cfdata_ioconf_if_iavf);
5333 break;
5334 case MODULE_CMD_FINI:
5335 error = config_fini_component(cfdriver_ioconf_if_iavf,
5336 cfattach_ioconf_if_iavf, cfdata_ioconf_if_iavf);
5337 break;
5338 default:
5339 error = ENOTTY;
5340 break;
5341 }
5342 #endif
5343
5344 return error;
5345 }
5346