if_iavf.c revision 1.2 1 /* $NetBSD: if_iavf.c,v 1.2 2020/09/08 13:28:51 jakllsch Exp $ */
2
3 /*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2016,2017 David Gwynne <dlg (at) openbsd.org>
36 * Copyright (c) 2019 Jonathan Matthew <jmatthew (at) openbsd.org>
37 *
38 * Permission to use, copy, modify, and distribute this software for any
39 * purpose with or without fee is hereby granted, provided that the above
40 * copyright notice and this permission notice appear in all copies.
41 *
42 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
43 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
44 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
45 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
46 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
47 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
48 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 */
50
51 /*
52 * Copyright (c) 2020 Internet Initiative Japan, Inc.
53 * All rights reserved.
54 *
55 * Redistribution and use in source and binary forms, with or without
56 * modification, are permitted provided that the following conditions
57 * are met:
58 * 1. Redistributions of source code must retain the above copyright
59 * notice, this list of conditions and the following disclaimer.
60 * 2. Redistributions in binary form must reproduce the above copyright
61 * notice, this list of conditions and the following disclaimer in the
62 * documentation and/or other materials provided with the distribution.
63 *
64 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
65 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
66 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
67 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
68 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
69 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
70 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
71 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
72 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
73 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
74 * POSSIBILITY OF SUCH DAMAGE.
75 */
76
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: if_iavf.c,v 1.2 2020/09/08 13:28:51 jakllsch Exp $");
79
80 #include <sys/param.h>
81 #include <sys/types.h>
82
83 #include <sys/bitops.h>
84 #include <sys/bus.h>
85 #include <sys/cprng.h>
86 #include <sys/cpu.h>
87 #include <sys/device.h>
88 #include <sys/evcnt.h>
89 #include <sys/interrupt.h>
90 #include <sys/kmem.h>
91 #include <sys/module.h>
92 #include <sys/mutex.h>
93 #include <sys/pcq.h>
94 #include <sys/queue.h>
95 #include <sys/syslog.h>
96 #include <sys/workqueue.h>
97
98 #include <net/bpf.h>
99 #include <net/if.h>
100 #include <net/if_dl.h>
101 #include <net/if_media.h>
102 #include <net/if_ether.h>
103 #include <net/rss_config.h>
104
105 #include <netinet/tcp.h> /* for struct tcphdr */
106 #include <netinet/udp.h> /* for struct udphdr */
107
108 #include <dev/pci/pcivar.h>
109 #include <dev/pci/pcidevs.h>
110
111 #include <dev/pci/if_ixlreg.h>
112 #include <dev/pci/if_ixlvar.h>
113 #include <dev/pci/if_iavfvar.h>
114
115 #include <prop/proplib.h>
116
117 #define IAVF_PCIREG PCI_MAPREG_START
118 #define IAVF_AQ_NUM 256
119 #define IAVF_AQ_MASK (IAVF_AQ_NUM-1)
120 #define IAVF_AQ_ALIGN 64
121 #define IAVF_AQ_BUFLEN 4096
122 #define I40E_AQ_LARGE_BUF 512
123 #define IAVF_VF_MAJOR 1
124 #define IAVF_VF_MINOR 1
125
126 #define IAVF_VFR_INPROGRESS 0
127 #define IAVF_VFR_COMPLETED 1
128 #define IAVF_VFR_VFACTIVE 2
129
130 #define IAVF_REG_VFR 0xdeadbeef
131
132 #define IAVF_ITR_RX 0x0
133 #define IAVF_ITR_TX 0x1
134 #define IAVF_ITR_MISC 0x2
135 #define IAVF_NOITR 0x3
136
137 #define IAVF_MTU_ETHERLEN (ETHER_HDR_LEN \
138 + ETHER_CRC_LEN)
139 #define IAVF_MAX_MTU (9600 - IAVF_MTU_ETHERLEN)
140 #define IAVF_MIN_MTU (ETHER_MIN_LEN - ETHER_CRC_LEN)
141
142 #define IAVF_WORKQUEUE_PRI PRI_SOFTNET
143
144 #define IAVF_TX_PKT_DESCS 8
145 #define IAVF_TX_QUEUE_ALIGN 128
146 #define IAVF_RX_QUEUE_ALIGN 128
147 #define IAVF_TX_PKT_MAXSIZE (MCLBYTES * IAVF_TX_PKT_DESCS)
148 #define IAVF_MCLBYTES (MCLBYTES - ETHER_ALIGN)
149
150 #define IAVF_TICK_INTERVAL (5 * hz)
151 #define IAVF_WATCHDOG_TICKS 3
152 #define IAVF_WATCHDOG_STOP 0
153
154 #define IAVF_TXRX_PROCESS_UNLIMIT UINT_MAX
155 #define IAVF_TX_PROCESS_LIMIT 256
156 #define IAVF_RX_PROCESS_LIMIT 256
157 #define IAVF_TX_INTR_PROCESS_LIMIT 256
158 #define IAVF_RX_INTR_PROCESS_LIMIT 0U
159
160 #define IAVF_EXEC_TIMEOUT 3000
161
162 #define IAVF_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx | \
163 IFCAP_CSUM_TCPv4_Rx | \
164 IFCAP_CSUM_UDPv4_Rx | \
165 IFCAP_CSUM_TCPv6_Rx | \
166 IFCAP_CSUM_UDPv6_Rx)
167 #define IAVF_IFCAP_TXCSUM (IFCAP_CSUM_IPv4_Tx | \
168 IFCAP_CSUM_TCPv4_Tx | \
169 IFCAP_CSUM_UDPv4_Tx | \
170 IFCAP_CSUM_TCPv6_Tx | \
171 IFCAP_CSUM_UDPv6_Tx)
172 #define IAVF_CSUM_ALL_OFFLOAD (M_CSUM_IPv4 | \
173 M_CSUM_TCPv4 | M_CSUM_TCPv6 | \
174 M_CSUM_UDPv4 | M_CSUM_UDPv6)
175
176 struct iavf_softc; /* defined */
177
178 struct iavf_module_params {
179 int debug;
180 uint32_t rx_itr;
181 uint32_t tx_itr;
182 unsigned int rx_ndescs;
183 unsigned int tx_ndescs;
184 int max_qps;
185 };
186
187 struct iavf_product {
188 unsigned int vendor_id;
189 unsigned int product_id;
190 };
191
192 struct iavf_link_speed {
193 uint64_t baudrate;
194 uint64_t media;
195 };
196
197 struct iavf_aq_regs {
198 bus_size_t atq_tail;
199 bus_size_t atq_head;
200 bus_size_t atq_len;
201 bus_size_t atq_bal;
202 bus_size_t atq_bah;
203
204 bus_size_t arq_tail;
205 bus_size_t arq_head;
206 bus_size_t arq_len;
207 bus_size_t arq_bal;
208 bus_size_t arq_bah;
209
210 uint32_t atq_len_enable;
211 uint32_t atq_tail_mask;
212 uint32_t atq_head_mask;
213
214 uint32_t arq_len_enable;
215 uint32_t arq_tail_mask;
216 uint32_t arq_head_mask;
217 };
218
219 struct iavf_work {
220 struct work ixw_cookie;
221 void (*ixw_func)(void *);
222 void *ixw_arg;
223 unsigned int ixw_added;
224 };
225
226 struct iavf_tx_map {
227 struct mbuf *txm_m;
228 bus_dmamap_t txm_map;
229 unsigned int txm_eop;
230 };
231
232 struct iavf_tx_ring {
233 unsigned int txr_qid;
234 char txr_name[16];
235
236 struct iavf_softc *txr_sc;
237 kmutex_t txr_lock;
238 pcq_t *txr_intrq;
239 void *txr_si;
240 unsigned int txr_prod;
241 unsigned int txr_cons;
242
243 struct iavf_tx_map *txr_maps;
244 struct ixl_dmamem txr_mem;
245 bus_size_t txr_tail;
246
247 int txr_watchdog;
248
249 struct evcnt txr_defragged;
250 struct evcnt txr_defrag_failed;
251 struct evcnt txr_pcqdrop;
252 struct evcnt txr_transmitdef;
253 struct evcnt txr_defer;
254 struct evcnt txr_watchdogto;
255 struct evcnt txr_intr;
256 };
257
258 struct iavf_rx_map {
259 struct mbuf *rxm_m;
260 bus_dmamap_t rxm_map;
261 };
262
263 struct iavf_rx_ring {
264 unsigned int rxr_qid;
265 char rxr_name[16];
266
267 struct iavf_softc *rxr_sc;
268 kmutex_t rxr_lock;
269
270 unsigned int rxr_prod;
271 unsigned int rxr_cons;
272
273 struct iavf_rx_map *rxr_maps;
274 struct ixl_dmamem rxr_mem;
275 bus_size_t rxr_tail;
276
277 struct mbuf *rxr_m_head;
278 struct mbuf **rxr_m_tail;
279
280 struct evcnt rxr_mgethdr_failed;
281 struct evcnt rxr_mgetcl_failed;
282 struct evcnt rxr_mbuf_load_failed;
283 struct evcnt rxr_defer;
284 struct evcnt rxr_intr;
285 };
286
287 struct iavf_queue_pair {
288 struct iavf_tx_ring *qp_txr;
289 struct iavf_rx_ring *qp_rxr;
290 struct work qp_work;
291 void *qp_si;
292 bool qp_workqueue;
293 };
294
295 struct iavf_stat_counters {
296 struct evcnt isc_rx_bytes;
297 struct evcnt isc_rx_unicast;
298 struct evcnt isc_rx_multicast;
299 struct evcnt isc_rx_broadcast;
300 struct evcnt isc_rx_discards;
301 struct evcnt isc_rx_unknown_protocol;
302 struct evcnt isc_tx_bytes;
303 struct evcnt isc_tx_unicast;
304 struct evcnt isc_tx_multicast;
305 struct evcnt isc_tx_broadcast;
306 struct evcnt isc_tx_discards;
307 struct evcnt isc_tx_errors;
308 };
309
310 /*
311 * Locking notes:
312 * + A field in iavf_tx_ring is protected by txr_lock (a spin mutex), and
313 * A field in iavf_rx_ring is protected by rxr_lock (a spin mutex).
314 * - more than one lock must not be held at once.
315 * + fields named sc_atq_*, sc_arq_*, and sc_adminq_* are protected by
316 * sc_adminq_lock(a spin mutex).
317 * - The lock is held while accessing sc_aq_regs
318 * and is not held with txr_lock and rxr_lock together.
319 * + Other fields in iavf_softc is protected by sc_cfg_lock
320 * (an adaptive mutex).
321 * - The lock must be held before acquiring another lock.
322 */
323
324 struct iavf_softc {
325 device_t sc_dev;
326 enum i40e_mac_type sc_mac_type;
327 int sc_debuglevel;
328 bool sc_attached;
329 bool sc_dead;
330 kmutex_t sc_cfg_lock;
331 callout_t sc_tick;
332 struct ifmedia sc_media;
333 uint64_t sc_media_status;
334 uint64_t sc_media_active;
335 int sc_link_state;
336
337 const struct iavf_aq_regs *
338 sc_aq_regs;
339
340 struct ethercom sc_ec;
341 uint8_t sc_enaddr[ETHER_ADDR_LEN];
342 uint8_t sc_enaddr_fake[ETHER_ADDR_LEN];
343 uint8_t sc_enaddr_added[ETHER_ADDR_LEN];
344 uint8_t sc_enaddr_reset[ETHER_ADDR_LEN];
345 struct if_percpuq *sc_ipq;
346
347 struct pci_attach_args sc_pa;
348 bus_dma_tag_t sc_dmat;
349 bus_space_tag_t sc_memt;
350 bus_space_handle_t sc_memh;
351 bus_size_t sc_mems;
352 pci_intr_handle_t *sc_ihp;
353 void **sc_ihs;
354 unsigned int sc_nintrs;
355
356 uint32_t sc_major_ver;
357 uint32_t sc_minor_ver;
358 uint32_t sc_vf_id;
359 uint32_t sc_vf_cap;
360 uint16_t sc_vsi_id;
361 uint16_t sc_qset_handle;
362 uint16_t sc_max_mtu;
363 bool sc_got_vf_resources;
364 bool sc_got_irq_map;
365 unsigned int sc_max_vectors;
366
367 kmutex_t sc_adminq_lock;
368 kcondvar_t sc_adminq_cv;
369 struct ixl_dmamem sc_atq;
370 unsigned int sc_atq_prod;
371 unsigned int sc_atq_cons;
372 struct ixl_aq_bufs sc_atq_idle;
373 struct ixl_aq_bufs sc_atq_live;
374 struct ixl_dmamem sc_arq;
375 struct ixl_aq_bufs sc_arq_idle;
376 struct ixl_aq_bufs sc_arq_live;
377 unsigned int sc_arq_prod;
378 unsigned int sc_arq_cons;
379 struct iavf_work sc_arq_refill;
380 uint32_t sc_arq_opcode;
381 uint32_t sc_arq_retval;
382
383 uint32_t sc_tx_itr;
384 uint32_t sc_rx_itr;
385 unsigned int sc_tx_ring_ndescs;
386 unsigned int sc_rx_ring_ndescs;
387 unsigned int sc_nqueue_pairs;
388 unsigned int sc_nqps_alloc;
389 unsigned int sc_nqps_vsi;
390 unsigned int sc_nqps_req;
391 struct iavf_queue_pair *sc_qps;
392 bool sc_txrx_workqueue;
393 u_int sc_tx_intr_process_limit;
394 u_int sc_tx_process_limit;
395 u_int sc_rx_intr_process_limit;
396 u_int sc_rx_process_limit;
397
398 struct workqueue *sc_workq;
399 struct workqueue *sc_workq_txrx;
400 struct iavf_work sc_reset_task;
401 struct iavf_work sc_wdto_task;
402 struct iavf_work sc_req_queues_task;
403 bool sc_req_queues_retried;
404 bool sc_resetting;
405 bool sc_reset_up;
406
407 struct sysctllog *sc_sysctllog;
408 struct iavf_stat_counters
409 sc_stat_counters;
410 };
411
412 #define IAVF_LOG(_sc, _lvl, _fmt, _args...) \
413 do { \
414 if (!(_sc)->sc_attached) { \
415 switch (_lvl) { \
416 case LOG_ERR: \
417 case LOG_WARNING: \
418 aprint_error_dev((_sc)->sc_dev, _fmt, ##_args); \
419 break; \
420 case LOG_INFO: \
421 aprint_normal_dev((_sc)->sc_dev,_fmt, ##_args); \
422 break; \
423 case LOG_DEBUG: \
424 default: \
425 aprint_debug_dev((_sc)->sc_dev, _fmt, ##_args); \
426 } \
427 } else { \
428 struct ifnet *_ifp = &(_sc)->sc_ec.ec_if; \
429 log((_lvl), "%s: " _fmt, _ifp->if_xname, ##_args); \
430 } \
431 } while (0)
432
433 static int iavf_dmamem_alloc(bus_dma_tag_t, struct ixl_dmamem *,
434 bus_size_t, bus_size_t);
435 static void iavf_dmamem_free(bus_dma_tag_t, struct ixl_dmamem *);
436 static struct ixl_aq_buf *
437 iavf_aqb_get(struct iavf_softc *, struct ixl_aq_bufs *);
438 static struct ixl_aq_buf *
439 iavf_aqb_get_locked(struct ixl_aq_bufs *);
440 static void iavf_aqb_put_locked(struct ixl_aq_bufs *, struct ixl_aq_buf *);
441 static void iavf_aqb_clean(struct ixl_aq_bufs *, bus_dma_tag_t);
442
443 static const struct iavf_product *
444 iavf_lookup(const struct pci_attach_args *);
445 static enum i40e_mac_type
446 iavf_mactype(pci_product_id_t);
447 static void iavf_pci_csr_setup(pci_chipset_tag_t, pcitag_t);
448 static int iavf_wait_active(struct iavf_softc *);
449 static bool iavf_is_etheranyaddr(const uint8_t *);
450 static void iavf_prepare_fakeaddr(struct iavf_softc *);
451 static int iavf_replace_lla(struct ifnet *,
452 const uint8_t *, const uint8_t *);
453 static void iavf_evcnt_attach(struct evcnt *,
454 const char *, const char *);
455 static int iavf_setup_interrupts(struct iavf_softc *);
456 static void iavf_teardown_interrupts(struct iavf_softc *);
457 static int iavf_setup_sysctls(struct iavf_softc *);
458 static void iavf_teardown_sysctls(struct iavf_softc *);
459 static int iavf_setup_stats(struct iavf_softc *);
460 static void iavf_teardown_stats(struct iavf_softc *);
461 static struct workqueue *
462 iavf_workq_create(const char *, pri_t, int, int);
463 static void iavf_workq_destroy(struct workqueue *);
464 static int iavf_work_set(struct iavf_work *, void (*)(void *), void *);
465 static void iavf_work_add(struct workqueue *, struct iavf_work *);
466 static void iavf_work_wait(struct workqueue *, struct iavf_work *);
467 static unsigned int
468 iavf_calc_msix_count(struct iavf_softc *);
469 static unsigned int
470 iavf_calc_queue_pair_size(struct iavf_softc *);
471 static int iavf_queue_pairs_alloc(struct iavf_softc *);
472 static void iavf_queue_pairs_free(struct iavf_softc *);
473 static int iavf_arq_fill(struct iavf_softc *);
474 static void iavf_arq_refill(void *);
475 static int iavf_arq_poll(struct iavf_softc *, uint32_t, int);
476 static void iavf_atq_done(struct iavf_softc *);
477 static int iavf_init_admin_queue(struct iavf_softc *);
478 static void iavf_cleanup_admin_queue(struct iavf_softc *);
479 static int iavf_arq(struct iavf_softc *);
480 static int iavf_adminq_exec(struct iavf_softc *,
481 struct ixl_aq_desc *, struct ixl_aq_buf *);
482 static int iavf_adminq_poll(struct iavf_softc *,
483 struct ixl_aq_desc *, struct ixl_aq_buf *, int);
484 static int iavf_adminq_poll_locked(struct iavf_softc *,
485 struct ixl_aq_desc *, struct ixl_aq_buf *, int);
486 static int iavf_add_multi(struct iavf_softc *, uint8_t *, uint8_t *);
487 static int iavf_del_multi(struct iavf_softc *, uint8_t *, uint8_t *);
488 static void iavf_del_all_multi(struct iavf_softc *);
489
490 static int iavf_get_version(struct iavf_softc *, struct ixl_aq_buf *);
491 static int iavf_get_vf_resources(struct iavf_softc *, struct ixl_aq_buf *);
492 static int iavf_get_stats(struct iavf_softc *);
493 static int iavf_config_irq_map(struct iavf_softc *, struct ixl_aq_buf *);
494 static int iavf_config_vsi_queues(struct iavf_softc *);
495 static int iavf_config_hena(struct iavf_softc *);
496 static int iavf_config_rss_key(struct iavf_softc *);
497 static int iavf_config_rss_lut(struct iavf_softc *);
498 static int iavf_config_promisc_mode(struct iavf_softc *, int, int);
499 static int iavf_config_vlan_stripping(struct iavf_softc *, int);
500 static int iavf_config_vlan_id(struct iavf_softc *, uint16_t, uint32_t);
501 static int iavf_queue_select(struct iavf_softc *, int);
502 static int iavf_request_queues(struct iavf_softc *, unsigned int);
503 static int iavf_reset_vf(struct iavf_softc *);
504 static int iavf_eth_addr(struct iavf_softc *, const uint8_t *, uint32_t);
505 static void iavf_process_version(struct iavf_softc *,
506 struct ixl_aq_desc *, struct ixl_aq_buf *);
507 static void iavf_process_vf_resources(struct iavf_softc *,
508 struct ixl_aq_desc *, struct ixl_aq_buf *);
509 static void iavf_process_irq_map(struct iavf_softc *,
510 struct ixl_aq_desc *);
511 static void iavf_process_vc_event(struct iavf_softc *,
512 struct ixl_aq_desc *, struct ixl_aq_buf *);
513 static void iavf_process_stats(struct iavf_softc *,
514 struct ixl_aq_desc *, struct ixl_aq_buf *);
515 static void iavf_process_req_queues(struct iavf_softc *,
516 struct ixl_aq_desc *, struct ixl_aq_buf *);
517
518 static int iavf_intr(void *);
519 static int iavf_queue_intr(void *);
520 static void iavf_tick(void *);
521 static void iavf_tick_halt(void *);
522 static void iavf_reset_request(void *);
523 static void iavf_reset_start(void *);
524 static void iavf_reset(void *);
525 static void iavf_reset_finish(struct iavf_softc *);
526 static int iavf_init(struct ifnet *);
527 static int iavf_init_locked(struct iavf_softc *);
528 static void iavf_stop(struct ifnet *, int);
529 static void iavf_stop_locked(struct iavf_softc *);
530 static int iavf_ioctl(struct ifnet *, u_long, void *);
531 static void iavf_start(struct ifnet *);
532 static int iavf_transmit(struct ifnet *, struct mbuf*);
533 static int iavf_watchdog(struct iavf_tx_ring *);
534 static void iavf_watchdog_timeout(void *);
535 static int iavf_media_change(struct ifnet *);
536 static void iavf_media_status(struct ifnet *, struct ifmediareq *);
537 static int iavf_ifflags_cb(struct ethercom *);
538 static int iavf_vlan_cb(struct ethercom *, uint16_t, bool);
539 static void iavf_deferred_transmit(void *);
540 static void iavf_handle_queue(void *);
541 static void iavf_handle_queue_wk(struct work *, void *);
542 static int iavf_reinit(struct iavf_softc *);
543 static int iavf_rxfill(struct iavf_softc *, struct iavf_rx_ring *);
544 static void iavf_txr_clean(struct iavf_softc *, struct iavf_tx_ring *);
545 static void iavf_rxr_clean(struct iavf_softc *, struct iavf_rx_ring *);
546 static int iavf_txeof(struct iavf_softc *, struct iavf_tx_ring *,
547 u_int, struct evcnt *);
548 static int iavf_rxeof(struct iavf_softc *, struct iavf_rx_ring *,
549 u_int, struct evcnt *);
550 static int iavf_iff(struct iavf_softc *);
551 static int iavf_iff_locked(struct iavf_softc *);
552 static void iavf_post_request_queues(void *);
553 static int iavf_sysctl_itr_handler(SYSCTLFN_PROTO);
554
555 static int iavf_match(device_t, cfdata_t, void *);
556 static void iavf_attach(device_t, device_t, void*);
557 static int iavf_detach(device_t, int);
558 static int iavf_finalize_teardown(device_t);
559
560 CFATTACH_DECL3_NEW(iavf, sizeof(struct iavf_softc),
561 iavf_match, iavf_attach, iavf_detach, NULL, NULL, NULL,
562 DVF_DETACH_SHUTDOWN);
563
564 static const struct iavf_product iavf_products[] = {
565 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_VF },
566 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_VF_HV },
567 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_VF },
568 /* required last entry */
569 {0, 0}
570 };
571
572 static const struct iavf_link_speed iavf_link_speeds[] = {
573 { 0, 0 },
574 { IF_Mbps(100), IFM_100_TX },
575 { IF_Mbps(1000), IFM_1000_T },
576 { IF_Gbps(10), IFM_10G_T },
577 { IF_Gbps(40), IFM_40G_CR4 },
578 { IF_Gbps(20), IFM_20G_KR2 },
579 { IF_Gbps(25), IFM_25G_CR }
580 };
581
582 static const struct iavf_aq_regs iavf_aq_regs = {
583 .atq_tail = I40E_VF_ATQT1,
584 .atq_tail_mask = I40E_VF_ATQT1_ATQT_MASK,
585 .atq_head = I40E_VF_ATQH1,
586 .atq_head_mask = I40E_VF_ARQH1_ARQH_MASK,
587 .atq_len = I40E_VF_ATQLEN1,
588 .atq_bal = I40E_VF_ATQBAL1,
589 .atq_bah = I40E_VF_ATQBAH1,
590 .atq_len_enable = I40E_VF_ATQLEN1_ATQENABLE_MASK,
591
592 .arq_tail = I40E_VF_ARQT1,
593 .arq_tail_mask = I40E_VF_ARQT1_ARQT_MASK,
594 .arq_head = I40E_VF_ARQH1,
595 .arq_head_mask = I40E_VF_ARQH1_ARQH_MASK,
596 .arq_len = I40E_VF_ARQLEN1,
597 .arq_bal = I40E_VF_ARQBAL1,
598 .arq_bah = I40E_VF_ARQBAH1,
599 .arq_len_enable = I40E_VF_ARQLEN1_ARQENABLE_MASK,
600 };
601
602 static struct iavf_module_params iavf_params = {
603 .debug = 0,
604 .rx_itr = 0x07a, /* 4K intrs/sec */
605 .tx_itr = 0x07a, /* 4K intrs/sec */
606 .tx_ndescs = 512,
607 .rx_ndescs = 256,
608 .max_qps = INT_MAX,
609 };
610
611 #define delaymsec(_x) DELAY(1000 * (_x))
612 #define iavf_rd(_s, _r) \
613 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
614 #define iavf_wr(_s, _r, _v) \
615 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
616 #define iavf_barrier(_s, _r, _l, _o) \
617 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
618 #define iavf_flush(_s) (void)iavf_rd((_s), I40E_VFGEN_RSTAT)
619 #define iavf_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1))
620 #define iavf_allqueues(_sc) ((1 << ((_sc)->sc_nqueue_pairs)) - 1)
621
622 static inline void
623 iavf_intr_enable(struct iavf_softc *sc)
624 {
625
626 iavf_wr(sc, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL0_INTENA_MASK |
627 I40E_VFINT_DYN_CTL0_CLEARPBA_MASK |
628 (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT));
629 iavf_wr(sc, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
630 iavf_flush(sc);
631 }
632
633 static inline void
634 iavf_intr_disable(struct iavf_softc *sc)
635 {
636
637 iavf_wr(sc, I40E_VFINT_DYN_CTL01,
638 (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT));
639 iavf_wr(sc, I40E_VFINT_ICR0_ENA1, 0);
640 iavf_flush(sc);
641 }
642
643 static inline void
644 iavf_queue_intr_enable(struct iavf_softc *sc, unsigned int qid)
645 {
646
647 iavf_wr(sc, I40E_VFINT_DYN_CTLN1(qid),
648 I40E_VFINT_DYN_CTLN1_INTENA_MASK |
649 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
650 (IAVF_NOITR << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT));
651 iavf_flush(sc);
652 }
653
654 static inline void
655 iavf_queue_intr_disable(struct iavf_softc *sc, unsigned int qid)
656 {
657
658 iavf_wr(sc, I40E_VFINT_DYN_CTLN1(qid),
659 (IAVF_NOITR << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT));
660 iavf_flush(sc);
661 }
662
663 static inline void
664 iavf_aq_vc_set_opcode(struct ixl_aq_desc *iaq, uint32_t opcode)
665 {
666 struct iavf_aq_vc *vc;
667
668 vc = (struct iavf_aq_vc *)&iaq->iaq_cookie;
669 vc->iaq_vc_opcode = htole32(opcode);
670 }
671
672 static inline uint32_t
673 iavf_aq_vc_get_opcode(const struct ixl_aq_desc *iaq)
674 {
675 const struct iavf_aq_vc *vc;
676
677 vc = (const struct iavf_aq_vc *)&iaq->iaq_cookie;
678 return le32toh(vc->iaq_vc_opcode);
679 }
680
681 static inline uint32_t
682 iavf_aq_vc_get_retval(const struct ixl_aq_desc *iaq)
683 {
684 const struct iavf_aq_vc *vc;
685
686 vc = (const struct iavf_aq_vc *)&iaq->iaq_cookie;
687 return le32toh(vc->iaq_vc_retval);
688 }
689
690 static int
691 iavf_match(device_t parent, cfdata_t match, void *aux)
692 {
693 const struct pci_attach_args *pa = aux;
694
695 return (iavf_lookup(pa) != NULL) ? 1 : 0;
696 }
697
698 static void
699 iavf_attach(device_t parent, device_t self, void *aux)
700 {
701 struct iavf_softc *sc;
702 struct pci_attach_args *pa = aux;
703 struct ifnet *ifp;
704 struct ixl_aq_buf *aqb;
705 pcireg_t memtype;
706 char xnamebuf[MAXCOMLEN];
707 int error, i;
708
709 sc = device_private(self);
710 sc->sc_dev = self;
711 ifp = &sc->sc_ec.ec_if;
712
713 sc->sc_pa = *pa;
714 sc->sc_dmat = (pci_dma64_available(pa)) ? pa->pa_dmat64 : pa->pa_dmat;
715 sc->sc_aq_regs = &iavf_aq_regs;
716 sc->sc_debuglevel = iavf_params.debug;
717 sc->sc_tx_ring_ndescs = iavf_params.tx_ndescs;
718 sc->sc_rx_ring_ndescs = iavf_params.rx_ndescs;
719 sc->sc_tx_itr = iavf_params.tx_itr;
720 sc->sc_rx_itr = iavf_params.rx_itr;
721 sc->sc_nqps_req = MIN(ncpu, iavf_params.max_qps);
722 iavf_prepare_fakeaddr(sc);
723
724 sc->sc_mac_type = iavf_mactype(PCI_PRODUCT(pa->pa_id));
725 iavf_pci_csr_setup(pa->pa_pc, pa->pa_tag);
726
727 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IAVF_PCIREG);
728 if (pci_mapreg_map(pa, IAVF_PCIREG, memtype, 0,
729 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) {
730 aprint_error(": unable to map registers\n");
731 return;
732 }
733
734 if (iavf_wait_active(sc) != 0) {
735 aprint_error(": VF reset timed out\n");
736 goto unmap;
737 }
738
739 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET);
740 mutex_init(&sc->sc_adminq_lock, MUTEX_DEFAULT, IPL_NET);
741 SIMPLEQ_INIT(&sc->sc_atq_idle);
742 SIMPLEQ_INIT(&sc->sc_atq_live);
743 SIMPLEQ_INIT(&sc->sc_arq_idle);
744 SIMPLEQ_INIT(&sc->sc_arq_live);
745 sc->sc_arq_cons = 0;
746 sc->sc_arq_prod = 0;
747 aqb = NULL;
748
749 if (iavf_dmamem_alloc(sc->sc_dmat, &sc->sc_atq,
750 sizeof(struct ixl_aq_desc) * IAVF_AQ_NUM, IAVF_AQ_ALIGN) != 0) {
751 aprint_error(": unable to allocate atq\n");
752 goto free_mutex;
753 }
754
755 if (iavf_dmamem_alloc(sc->sc_dmat, &sc->sc_arq,
756 sizeof(struct ixl_aq_desc) * IAVF_AQ_NUM, IAVF_AQ_ALIGN) != 0) {
757 aprint_error(": unable to allocate arq\n");
758 goto free_atq;
759 }
760
761 for (i = 0; i < IAVF_AQ_NUM; i++) {
762 aqb = iavf_aqb_get(sc, NULL);
763 if (aqb != NULL) {
764 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
765 }
766 }
767 aqb = NULL;
768
769 if (!iavf_arq_fill(sc)) {
770 aprint_error(": unable to fill arq descriptors\n");
771 goto free_arq;
772 }
773
774 if (iavf_init_admin_queue(sc) != 0) {
775 aprint_error(": unable to initialize admin queue\n");
776 goto shutdown;
777 }
778
779 aqb = iavf_aqb_get(sc, NULL);
780 if (aqb == NULL) {
781 aprint_error(": unable to allocate buffer for ATQ\n");
782 goto shutdown;
783 }
784
785 error = iavf_get_version(sc, aqb);
786 switch (error) {
787 case 0:
788 break;
789 case ETIMEDOUT:
790 aprint_error(": timeout waiting for VF version\n");
791 goto shutdown;
792 case ENOTSUP:
793 aprint_error(": unsupported VF version %d\n", sc->sc_major_ver);
794 goto shutdown;
795 default:
796 aprint_error(":unable to get VF interface version\n");
797 goto shutdown;
798 }
799
800 if (iavf_get_vf_resources(sc, aqb) != 0) {
801 aprint_error(": timeout waiting for VF resources\n");
802 goto shutdown;
803 }
804
805 aprint_normal(", VF version %d.%d%s",
806 sc->sc_major_ver, sc->sc_minor_ver,
807 (sc->sc_minor_ver > IAVF_VF_MINOR) ? "(minor mismatch)" : "");
808 aprint_normal(", VF %d, VSI %d", sc->sc_vf_id, sc->sc_vsi_id);
809 aprint_normal("\n");
810 aprint_naive("\n");
811
812 aprint_normal_dev(self, "Ethernet address %s\n",
813 ether_sprintf(sc->sc_enaddr));
814
815 if (iavf_queue_pairs_alloc(sc) != 0) {
816 goto shutdown;
817 }
818
819 if (iavf_setup_interrupts(sc) != 0) {
820 goto free_queue_pairs;
821 }
822
823 if (iavf_config_irq_map(sc, aqb) != 0) {
824 aprint_error(", timed out waiting for IRQ map response\n");
825 goto teardown_intrs;
826 }
827
828 if (iavf_setup_sysctls(sc) != 0) {
829 goto teardown_intrs;
830 }
831
832 if (iavf_setup_stats(sc) != 0) {
833 goto teardown_sysctls;
834 }
835
836 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
837 aqb = NULL;
838
839 snprintf(xnamebuf, sizeof(xnamebuf),
840 "%s_adminq_cv", device_xname(self));
841 cv_init(&sc->sc_adminq_cv, xnamebuf);
842
843 callout_init(&sc->sc_tick, CALLOUT_MPSAFE);
844 callout_setfunc(&sc->sc_tick, iavf_tick, sc);
845
846 iavf_work_set(&sc->sc_reset_task, iavf_reset_start, sc);
847 iavf_work_set(&sc->sc_arq_refill, iavf_arq_refill, sc);
848 iavf_work_set(&sc->sc_wdto_task, iavf_watchdog_timeout, sc);
849 iavf_work_set(&sc->sc_req_queues_task, iavf_post_request_queues, sc);
850 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self));
851 sc->sc_workq = iavf_workq_create(xnamebuf, IAVF_WORKQUEUE_PRI,
852 IPL_NET, WQ_MPSAFE);
853 if (sc->sc_workq == NULL)
854 goto destroy_cv;
855
856 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self));
857 error = workqueue_create(&sc->sc_workq_txrx, xnamebuf,
858 iavf_handle_queue_wk, sc, IAVF_WORKQUEUE_PRI, IPL_NET,
859 WQ_PERCPU|WQ_MPSAFE);
860 if (error != 0) {
861 sc->sc_workq_txrx = NULL;
862 goto teardown_wqs;
863 }
864
865 error = if_initialize(ifp);
866 if (error != 0) {
867 aprint_error_dev(self, "if_initialize failed=%d\n", error);
868 goto teardown_wqs;
869 }
870
871 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
872
873 ifp->if_softc = sc;
874 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
875 ifp->if_extflags = IFEF_MPSAFE;
876 ifp->if_ioctl = iavf_ioctl;
877 ifp->if_start = iavf_start;
878 ifp->if_transmit = iavf_transmit;
879 ifp->if_watchdog = NULL;
880 ifp->if_init = iavf_init;
881 ifp->if_stop = iavf_stop;
882
883 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs);
884 IFQ_SET_READY(&ifp->if_snd);
885 sc->sc_ipq = if_percpuq_create(ifp);
886
887 ifp->if_capabilities |= IAVF_IFCAP_RXCSUM;
888 ifp->if_capabilities |= IAVF_IFCAP_TXCSUM;
889
890 ether_set_vlan_cb(&sc->sc_ec, iavf_vlan_cb);
891 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
892 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
893 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities;
894
895 ether_set_ifflags_cb(&sc->sc_ec, iavf_ifflags_cb);
896
897 sc->sc_ec.ec_ifmedia = &sc->sc_media;
898 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, iavf_media_change,
899 iavf_media_status, &sc->sc_cfg_lock);
900
901 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
902 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
903
904 if_deferred_start_init(ifp, NULL);
905 ether_ifattach(ifp, sc->sc_enaddr);
906
907 sc->sc_txrx_workqueue = true;
908 sc->sc_tx_process_limit = IAVF_TX_PROCESS_LIMIT;
909 sc->sc_rx_process_limit = IAVF_RX_PROCESS_LIMIT;
910 sc->sc_tx_intr_process_limit = IAVF_TX_INTR_PROCESS_LIMIT;
911 sc->sc_rx_intr_process_limit = IAVF_RX_INTR_PROCESS_LIMIT;
912
913 if_register(ifp);
914 if_link_state_change(ifp, sc->sc_link_state);
915 iavf_intr_enable(sc);
916 if (sc->sc_nqps_vsi < sc->sc_nqps_req)
917 iavf_work_add(sc->sc_workq, &sc->sc_req_queues_task);
918 sc->sc_attached = true;
919 return;
920
921 teardown_wqs:
922 config_finalize_register(self, iavf_finalize_teardown);
923 destroy_cv:
924 cv_destroy(&sc->sc_adminq_cv);
925 callout_destroy(&sc->sc_tick);
926 iavf_teardown_stats(sc);
927 teardown_sysctls:
928 iavf_teardown_sysctls(sc);
929 teardown_intrs:
930 iavf_teardown_interrupts(sc);
931 free_queue_pairs:
932 iavf_queue_pairs_free(sc);
933 shutdown:
934 if (aqb != NULL)
935 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
936 iavf_cleanup_admin_queue(sc);
937 iavf_aqb_clean(&sc->sc_atq_idle, sc->sc_dmat);
938 iavf_aqb_clean(&sc->sc_arq_idle, sc->sc_dmat);
939 free_arq:
940 iavf_dmamem_free(sc->sc_dmat, &sc->sc_arq);
941 free_atq:
942 iavf_dmamem_free(sc->sc_dmat, &sc->sc_atq);
943 free_mutex:
944 mutex_destroy(&sc->sc_cfg_lock);
945 mutex_destroy(&sc->sc_adminq_lock);
946 unmap:
947 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
948 sc->sc_mems = 0;
949 sc->sc_attached = false;
950 }
951
952 static int
953 iavf_detach(device_t self, int flags)
954 {
955 struct iavf_softc *sc = device_private(self);
956 struct ifnet *ifp = &sc->sc_ec.ec_if;
957
958 if (!sc->sc_attached)
959 return 0;
960
961 iavf_stop(ifp, 1);
962 ether_ifdetach(ifp);
963 if_detach(ifp);
964 ifmedia_fini(&sc->sc_media);
965 if_percpuq_destroy(sc->sc_ipq);
966
967 iavf_intr_disable(sc);
968
969 mutex_enter(&sc->sc_adminq_lock);
970 mutex_exit(&sc->sc_adminq_lock);
971
972 /*
973 * set a dummy function to halt callout safely
974 * even if a workqueue entry calls callout_schedule()
975 */
976 callout_setfunc(&sc->sc_tick, iavf_tick_halt, sc);
977
978 iavf_work_wait(sc->sc_workq, &sc->sc_reset_task);
979 iavf_work_wait(sc->sc_workq, &sc->sc_arq_refill);
980 iavf_work_wait(sc->sc_workq, &sc->sc_wdto_task);
981 iavf_workq_destroy(sc->sc_workq);
982 sc->sc_workq = NULL;
983
984 callout_halt(&sc->sc_tick, NULL);
985 callout_destroy(&sc->sc_tick);
986
987 iavf_cleanup_admin_queue(sc);
988 iavf_aqb_clean(&sc->sc_atq_idle, sc->sc_dmat);
989 iavf_aqb_clean(&sc->sc_arq_idle, sc->sc_dmat);
990 iavf_dmamem_free(sc->sc_dmat, &sc->sc_arq);
991 iavf_dmamem_free(sc->sc_dmat, &sc->sc_atq);
992 cv_destroy(&sc->sc_adminq_cv);
993
994 iavf_queue_pairs_free(sc);
995 iavf_teardown_interrupts(sc);
996 iavf_teardown_sysctls(sc);
997 iavf_teardown_stats(sc);
998 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
999
1000 mutex_destroy(&sc->sc_adminq_lock);
1001 mutex_destroy(&sc->sc_cfg_lock);
1002
1003 return 0;
1004 }
1005
1006 static int
1007 iavf_finalize_teardown(device_t self)
1008 {
1009 struct iavf_softc *sc = device_private(self);
1010
1011 if (sc->sc_workq != NULL) {
1012 iavf_workq_destroy(sc->sc_workq);
1013 sc->sc_workq = NULL;
1014 }
1015
1016 if (sc->sc_workq_txrx != NULL) {
1017 workqueue_destroy(sc->sc_workq_txrx);
1018 sc->sc_workq_txrx = NULL;
1019 }
1020
1021 return 0;
1022 }
1023
1024 static int
1025 iavf_init(struct ifnet *ifp)
1026 {
1027 struct iavf_softc *sc;
1028 int rv;
1029
1030 sc = ifp->if_softc;
1031 mutex_enter(&sc->sc_cfg_lock);
1032 rv = iavf_init_locked(sc);
1033 mutex_exit(&sc->sc_cfg_lock);
1034
1035 return rv;
1036 }
1037
1038 static int
1039 iavf_init_locked(struct iavf_softc *sc)
1040 {
1041 struct ifnet *ifp = &sc->sc_ec.ec_if;
1042 unsigned int i;
1043 int error;
1044
1045 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1046
1047 if (ISSET(ifp->if_flags, IFF_RUNNING))
1048 iavf_stop_locked(sc);
1049
1050 if (sc->sc_resetting)
1051 return ENXIO;
1052
1053 error = iavf_reinit(sc);
1054 if (error) {
1055 iavf_stop_locked(sc);
1056 return error;
1057 }
1058
1059 SET(ifp->if_flags, IFF_RUNNING);
1060 CLR(ifp->if_flags, IFF_OACTIVE);
1061
1062 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1063 iavf_wr(sc, I40E_VFINT_ITRN1(IAVF_ITR_RX, i), sc->sc_rx_itr);
1064 iavf_wr(sc, I40E_VFINT_ITRN1(IAVF_ITR_TX, i), sc->sc_tx_itr);
1065 }
1066 iavf_wr(sc, I40E_VFINT_ITR01(IAVF_ITR_RX), sc->sc_rx_itr);
1067 iavf_wr(sc, I40E_VFINT_ITR01(IAVF_ITR_TX), sc->sc_tx_itr);
1068 iavf_wr(sc, I40E_VFINT_ITR01(IAVF_ITR_MISC), 0);
1069
1070 error = iavf_iff_locked(sc);
1071 if (error) {
1072 iavf_stop_locked(sc);
1073 return error;
1074 };
1075
1076 /* ETHERCAP_VLAN_HWFILTER can not be disabled */
1077 SET(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1078
1079 callout_schedule(&sc->sc_tick, IAVF_TICK_INTERVAL);
1080 return 0;
1081 }
1082
1083 static int
1084 iavf_reinit(struct iavf_softc *sc)
1085 {
1086 struct iavf_rx_ring *rxr;
1087 struct iavf_tx_ring *txr;
1088 unsigned int i;
1089 uint32_t reg;
1090
1091 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1092
1093 sc->sc_reset_up = true;
1094 sc->sc_nqueue_pairs = MIN(sc->sc_nqps_alloc, sc->sc_nintrs - 1);
1095
1096 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1097 rxr = sc->sc_qps[i].qp_rxr;
1098 txr = sc->sc_qps[i].qp_txr;
1099
1100 iavf_rxfill(sc, rxr);
1101 txr->txr_watchdog = IAVF_WATCHDOG_STOP;
1102 }
1103
1104 if (iavf_config_vsi_queues(sc) != 0)
1105 return EIO;
1106
1107 if (iavf_config_hena(sc) != 0)
1108 return EIO;
1109
1110 iavf_config_rss_key(sc);
1111 iavf_config_rss_lut(sc);
1112
1113 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1114 iavf_queue_intr_enable(sc, i);
1115 }
1116 /* unmask */
1117 reg = iavf_rd(sc, I40E_VFINT_DYN_CTL01);
1118 reg |= (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT);
1119 iavf_wr(sc, I40E_VFINT_DYN_CTL01, reg);
1120
1121 if (iavf_queue_select(sc, IAVF_VC_OP_ENABLE_QUEUES) != 0)
1122 return EIO;
1123
1124 return 0;
1125 }
1126
1127 static void
1128 iavf_stop(struct ifnet *ifp, int disable)
1129 {
1130 struct iavf_softc *sc;
1131
1132 sc = ifp->if_softc;
1133 mutex_enter(&sc->sc_cfg_lock);
1134 iavf_stop_locked(sc);
1135 mutex_exit(&sc->sc_cfg_lock);
1136 }
1137
1138 static void
1139 iavf_stop_locked(struct iavf_softc *sc)
1140 {
1141 struct ifnet *ifp = &sc->sc_ec.ec_if;
1142 struct iavf_rx_ring *rxr;
1143 struct iavf_tx_ring *txr;
1144 uint32_t reg;
1145 unsigned int i;
1146
1147 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1148
1149 CLR(ifp->if_flags, IFF_RUNNING);
1150 sc->sc_reset_up = false;
1151 callout_stop(&sc->sc_tick);
1152
1153 if (!sc->sc_resetting) {
1154 /* disable queues*/
1155 if (iavf_queue_select(sc, IAVF_VC_OP_DISABLE_QUEUES) != 0) {
1156 goto die;
1157 }
1158 }
1159
1160 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1161 iavf_queue_intr_disable(sc, i);
1162 }
1163
1164 /* mask interrupts */
1165 reg = iavf_rd(sc, I40E_VFINT_DYN_CTL01);
1166 reg |= I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK |
1167 (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT);
1168 iavf_wr(sc, I40E_VFINT_DYN_CTL01, reg);
1169
1170 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1171 rxr = sc->sc_qps[i].qp_rxr;
1172 txr = sc->sc_qps[i].qp_txr;
1173
1174 mutex_enter(&rxr->rxr_lock);
1175 iavf_rxr_clean(sc, rxr);
1176 mutex_exit(&rxr->rxr_lock);
1177
1178 mutex_enter(&txr->txr_lock);
1179 iavf_txr_clean(sc, txr);
1180 mutex_exit(&txr->txr_lock);
1181
1182 workqueue_wait(sc->sc_workq_txrx,
1183 &sc->sc_qps[i].qp_work);
1184 }
1185
1186 return;
1187 die:
1188 if (!sc->sc_dead) {
1189 sc->sc_dead = true;
1190 log(LOG_INFO, "%s: Request VF reset\n", ifp->if_xname);
1191
1192 iavf_work_set(&sc->sc_reset_task, iavf_reset_request, sc);
1193 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
1194 }
1195 log(LOG_CRIT, "%s: failed to shut down rings\n", ifp->if_xname);
1196 }
1197
1198 static int
1199 iavf_watchdog(struct iavf_tx_ring *txr)
1200 {
1201 struct iavf_softc *sc;
1202
1203 sc = txr->txr_sc;
1204
1205 mutex_enter(&txr->txr_lock);
1206
1207 if (txr->txr_watchdog == IAVF_WATCHDOG_STOP
1208 || --txr->txr_watchdog > 0) {
1209 mutex_exit(&txr->txr_lock);
1210 return 0;
1211 }
1212
1213 txr->txr_watchdog = IAVF_WATCHDOG_STOP;
1214 txr->txr_watchdogto.ev_count++;
1215 mutex_exit(&txr->txr_lock);
1216
1217 device_printf(sc->sc_dev, "watchdog timeout on queue %d\n",
1218 txr->txr_qid);
1219 return 1;
1220 }
1221
1222 static void
1223 iavf_watchdog_timeout(void *xsc)
1224 {
1225 struct iavf_softc *sc;
1226 struct ifnet *ifp;
1227
1228 sc = xsc;
1229 ifp = &sc->sc_ec.ec_if;
1230
1231 mutex_enter(&sc->sc_cfg_lock);
1232 if (ISSET(ifp->if_flags, IFF_RUNNING))
1233 iavf_init_locked(sc);
1234 mutex_exit(&sc->sc_cfg_lock);
1235 }
1236
1237 static int
1238 iavf_media_change(struct ifnet *ifp)
1239 {
1240 struct iavf_softc *sc;
1241 struct ifmedia *ifm;
1242
1243 sc = ifp->if_softc;
1244 ifm = &sc->sc_media;
1245
1246 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1247 return EINVAL;
1248
1249 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1250 case IFM_AUTO:
1251 break;
1252 default:
1253 return EINVAL;
1254 }
1255
1256 return 0;
1257 }
1258
1259 static void
1260 iavf_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1261 {
1262 struct iavf_softc *sc = ifp->if_softc;
1263
1264 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1265
1266 ifmr->ifm_status = sc->sc_media_status;
1267 ifmr->ifm_active = sc->sc_media_active;
1268 }
1269
1270 static int
1271 iavf_ifflags_cb(struct ethercom *ec)
1272 {
1273 struct ifnet *ifp = &ec->ec_if;
1274 struct iavf_softc *sc = ifp->if_softc;
1275
1276 /* vlan hwfilter can not be disabled */
1277 SET(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER);
1278
1279 return iavf_iff(sc);
1280 }
1281
1282 static int
1283 iavf_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
1284 {
1285 struct ifnet *ifp = &ec->ec_if;
1286 struct iavf_softc *sc = ifp->if_softc;
1287 int rv;
1288
1289 mutex_enter(&sc->sc_cfg_lock);
1290
1291 if (sc->sc_resetting) {
1292 mutex_exit(&sc->sc_cfg_lock);
1293
1294 /* all vlan id was already removed */
1295 if (!set)
1296 return 0;
1297
1298 return ENXIO;
1299 }
1300
1301 /* ETHERCAP_VLAN_HWFILTER can not be disabled */
1302 SET(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1303
1304 if (set) {
1305 rv = iavf_config_vlan_id(sc, vid, IAVF_VC_OP_ADD_VLAN);
1306 if (!ISSET(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWTAGGING)) {
1307 iavf_config_vlan_stripping(sc,
1308 sc->sc_ec.ec_capenable);
1309 }
1310 } else {
1311 rv = iavf_config_vlan_id(sc, vid, IAVF_VC_OP_DEL_VLAN);
1312 }
1313
1314 mutex_exit(&sc->sc_cfg_lock);
1315
1316 if (rv != 0)
1317 return EIO;
1318
1319 return 0;
1320 }
1321
1322 static int
1323 iavf_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1324 {
1325 struct ifreq *ifr = (struct ifreq *)data;
1326 struct iavf_softc *sc = (struct iavf_softc *)ifp->if_softc;
1327 const struct sockaddr *sa;
1328 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1329 int s, error = 0;
1330 unsigned int nmtu;
1331
1332 switch (cmd) {
1333 case SIOCSIFMTU:
1334 nmtu = ifr->ifr_mtu;
1335
1336 if (nmtu < IAVF_MIN_MTU || nmtu > IAVF_MAX_MTU) {
1337 error = EINVAL;
1338 break;
1339 }
1340 if (ifp->if_mtu != nmtu) {
1341 s = splnet();
1342 error = ether_ioctl(ifp, cmd, data);
1343 splx(s);
1344 if (error == ENETRESET)
1345 error = iavf_init(ifp);
1346 }
1347 break;
1348 case SIOCADDMULTI:
1349 sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1350 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) {
1351 error = ether_multiaddr(sa, addrlo, addrhi);
1352 if (error != 0)
1353 return error;
1354
1355 error = iavf_add_multi(sc, addrlo, addrhi);
1356 if (error != 0 && error != ENETRESET) {
1357 ether_delmulti(sa, &sc->sc_ec);
1358 error = EIO;
1359 }
1360 }
1361 break;
1362
1363 case SIOCDELMULTI:
1364 sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1365 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) {
1366 error = ether_multiaddr(sa, addrlo, addrhi);
1367 if (error != 0)
1368 return error;
1369
1370 error = iavf_del_multi(sc, addrlo, addrhi);
1371 }
1372 break;
1373
1374 default:
1375 s = splnet();
1376 error = ether_ioctl(ifp, cmd, data);
1377 splx(s);
1378 }
1379
1380 if (error == ENETRESET)
1381 error = iavf_iff(sc);
1382
1383 return error;
1384 }
1385
1386 static int
1387 iavf_iff(struct iavf_softc *sc)
1388 {
1389 int error;
1390
1391 mutex_enter(&sc->sc_cfg_lock);
1392 error = iavf_iff_locked(sc);
1393 mutex_exit(&sc->sc_cfg_lock);
1394
1395 return error;
1396 }
1397
1398 static int
1399 iavf_iff_locked(struct iavf_softc *sc)
1400 {
1401 struct ifnet *ifp = &sc->sc_ec.ec_if;
1402 int unicast, multicast;
1403 const uint8_t *enaddr;
1404
1405 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1406
1407 if (!ISSET(ifp->if_flags, IFF_RUNNING))
1408 return 0;
1409
1410 unicast = 0;
1411 multicast = 0;
1412 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
1413 unicast = 1;
1414 multicast = 1;
1415 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1416 multicast = 1;
1417 }
1418
1419 iavf_config_promisc_mode(sc, unicast, multicast);
1420
1421 iavf_config_vlan_stripping(sc, sc->sc_ec.ec_capenable);
1422
1423 enaddr = CLLADDR(ifp->if_sadl);
1424 if (memcmp(enaddr, sc->sc_enaddr_added, ETHER_ADDR_LEN) != 0) {
1425 if (!iavf_is_etheranyaddr(sc->sc_enaddr_added)) {
1426 iavf_eth_addr(sc, sc->sc_enaddr_added,
1427 IAVF_VC_OP_DEL_ETH_ADDR);
1428 }
1429 memcpy(sc->sc_enaddr_added, enaddr, ETHER_ADDR_LEN);
1430 iavf_eth_addr(sc, enaddr, IAVF_VC_OP_ADD_ETH_ADDR);
1431 }
1432
1433 return 0;
1434 }
1435
1436 static const struct iavf_product *
1437 iavf_lookup(const struct pci_attach_args *pa)
1438 {
1439 const struct iavf_product *iavfp;
1440
1441 for (iavfp = iavf_products; iavfp->vendor_id != 0; iavfp++) {
1442 if (PCI_VENDOR(pa->pa_id) == iavfp->vendor_id &&
1443 PCI_PRODUCT(pa->pa_id) == iavfp->product_id)
1444 return iavfp;
1445 }
1446
1447 return NULL;
1448 }
1449
1450 static enum i40e_mac_type
1451 iavf_mactype(pci_product_id_t id)
1452 {
1453
1454 switch (id) {
1455 case PCI_PRODUCT_INTEL_XL710_VF:
1456 case PCI_PRODUCT_INTEL_XL710_VF_HV:
1457 return I40E_MAC_VF;
1458 case PCI_PRODUCT_INTEL_X722_VF:
1459 return I40E_MAC_X722_VF;
1460 }
1461
1462 return I40E_MAC_GENERIC;
1463 }
1464
1465 static const struct iavf_link_speed *
1466 iavf_find_link_speed(struct iavf_softc *sc, uint32_t link_speed)
1467 {
1468 size_t i;
1469
1470 for (i = 0; i < __arraycount(iavf_link_speeds); i++) {
1471 if (link_speed & (1 << i))
1472 return (&iavf_link_speeds[i]);
1473 }
1474
1475 return NULL;
1476 }
1477
1478 static void
1479 iavf_pci_csr_setup(pci_chipset_tag_t pc, pcitag_t tag)
1480 {
1481 pcireg_t csr;
1482
1483 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1484 csr |= (PCI_COMMAND_MASTER_ENABLE |
1485 PCI_COMMAND_MEM_ENABLE);
1486 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
1487 }
1488
1489 static int
1490 iavf_wait_active(struct iavf_softc *sc)
1491 {
1492 int tries;
1493 uint32_t reg;
1494
1495 for (tries = 0; tries < 100; tries++) {
1496 reg = iavf_rd(sc, I40E_VFGEN_RSTAT) &
1497 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1498 if (reg == IAVF_VFR_VFACTIVE ||
1499 reg == IAVF_VFR_COMPLETED)
1500 return 0;
1501
1502 delaymsec(10);
1503 }
1504
1505 return -1;
1506 }
1507
1508 static bool
1509 iavf_is_etheranyaddr(const uint8_t *enaddr)
1510 {
1511 static const uint8_t etheranyaddr[ETHER_ADDR_LEN] = {
1512 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1513 };
1514
1515 if (memcmp(enaddr, etheranyaddr, ETHER_ADDR_LEN) != 0)
1516 return false;
1517
1518 return true;
1519 }
1520
1521 static void
1522 iavf_prepare_fakeaddr(struct iavf_softc *sc)
1523 {
1524 uint64_t rndval;
1525
1526 if (!iavf_is_etheranyaddr(sc->sc_enaddr_fake))
1527 return;
1528
1529 rndval = cprng_strong64();
1530
1531 memcpy(sc->sc_enaddr_fake, &rndval, sizeof(sc->sc_enaddr_fake));
1532 sc->sc_enaddr_fake[0] &= 0xFE;
1533 sc->sc_enaddr_fake[0] |= 0x02;
1534 }
1535
1536 static int
1537 iavf_replace_lla(struct ifnet *ifp, const uint8_t *prev, const uint8_t *next)
1538 {
1539 union {
1540 struct sockaddr sa;
1541 struct sockaddr_dl sdl;
1542 struct sockaddr_storage ss;
1543 } u;
1544 struct psref psref_prev, psref_next;
1545 struct ifaddr *ifa_prev, *ifa_next;
1546 const struct sockaddr_dl *nsdl;
1547 int s, error;
1548
1549 KASSERT(IFNET_LOCKED(ifp));
1550
1551 error = 0;
1552 ifa_prev = ifa_next = NULL;
1553
1554 if (memcmp(prev, next, ETHER_ADDR_LEN) == 0) {
1555 goto done;
1556 }
1557
1558 if (sockaddr_dl_init(&u.sdl, sizeof(u.ss), ifp->if_index,
1559 ifp->if_type, ifp->if_xname, strlen(ifp->if_xname),
1560 prev, ETHER_ADDR_LEN) == NULL) {
1561 error = EINVAL;
1562 goto done;
1563 }
1564
1565 s = pserialize_read_enter();
1566 IFADDR_READER_FOREACH(ifa_prev, ifp) {
1567 if (sockaddr_cmp(&u.sa, ifa_prev->ifa_addr) == 0) {
1568 ifa_acquire(ifa_prev, &psref_prev);
1569 break;
1570 }
1571 }
1572 pserialize_read_exit(s);
1573
1574 if (sockaddr_dl_init(&u.sdl, sizeof(u.ss), ifp->if_index,
1575 ifp->if_type, ifp->if_xname, strlen(ifp->if_xname),
1576 next, ETHER_ADDR_LEN) == NULL) {
1577 error = EINVAL;
1578 goto done;
1579 }
1580
1581 s = pserialize_read_enter();
1582 IFADDR_READER_FOREACH(ifa_next, ifp) {
1583 if (sockaddr_cmp(&u.sa, ifa_next->ifa_addr) == 0) {
1584 ifa_acquire(ifa_next, &psref_next);
1585 break;
1586 }
1587 }
1588 pserialize_read_exit(s);
1589
1590 if (ifa_next == NULL) {
1591 nsdl = &u.sdl;
1592 ifa_next = if_dl_create(ifp, &nsdl);
1593 if (ifa_next == NULL) {
1594 error = ENOMEM;
1595 goto done;
1596 }
1597
1598 s = pserialize_read_enter();
1599 ifa_acquire(ifa_next, &psref_next);
1600 pserialize_read_exit(s);
1601
1602 sockaddr_copy(ifa_next->ifa_addr,
1603 ifa_next->ifa_addr->sa_len, &u.sa);
1604 ifa_insert(ifp, ifa_next);
1605 } else {
1606 nsdl = NULL;
1607 }
1608
1609 if (ifa_prev != NULL && ifa_prev == ifp->if_dl) {
1610 if_activate_sadl(ifp, ifa_next, nsdl);
1611 }
1612
1613 ifa_release(ifa_next, &psref_next);
1614 ifa_next = NULL;
1615
1616 if (ifa_prev != NULL && ifa_prev != ifp->if_hwdl) {
1617 ifaref(ifa_prev);
1618 ifa_release(ifa_prev, &psref_prev);
1619 ifa_remove(ifp, ifa_prev);
1620 KASSERTMSG(ifa_prev->ifa_refcnt == 1, "ifa_refcnt=%d",
1621 ifa_prev->ifa_refcnt);
1622 ifafree(ifa_prev);
1623 ifa_prev = NULL;
1624 }
1625
1626 if (ISSET(ifp->if_flags, IFF_RUNNING))
1627 error = ENETRESET;
1628
1629 done:
1630 if (ifa_prev != NULL)
1631 ifa_release(ifa_prev, &psref_prev);
1632 if (ifa_next != NULL)
1633 ifa_release(ifa_next, &psref_next);
1634
1635 return error;
1636 }
1637 static int
1638 iavf_add_multi(struct iavf_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1639 {
1640 struct ifnet *ifp = &sc->sc_ec.ec_if;
1641 int rv;
1642
1643 if (ISSET(ifp->if_flags, IFF_ALLMULTI))
1644 return 0;
1645
1646 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) {
1647 iavf_del_all_multi(sc);
1648 SET(ifp->if_flags, IFF_ALLMULTI);
1649 return ENETRESET;
1650 }
1651
1652 rv = iavf_eth_addr(sc, addrlo, IAVF_VC_OP_ADD_ETH_ADDR);
1653
1654 if (rv == ENOSPC) {
1655 iavf_del_all_multi(sc);
1656 SET(ifp->if_flags, IFF_ALLMULTI);
1657 return ENETRESET;
1658 }
1659
1660 return rv;
1661 }
1662
1663 static int
1664 iavf_del_multi(struct iavf_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1665 {
1666 struct ifnet *ifp = &sc->sc_ec.ec_if;
1667 struct ethercom *ec = &sc->sc_ec;
1668 struct ether_multi *enm, *enm_last;
1669 struct ether_multistep step;
1670 int error, rv = 0;
1671
1672 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1673 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0)
1674 return 0;
1675
1676 iavf_eth_addr(sc, addrlo, IAVF_VC_OP_DEL_ETH_ADDR);
1677 return 0;
1678 }
1679
1680 ETHER_LOCK(ec);
1681 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1682 ETHER_NEXT_MULTI(step, enm)) {
1683 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1684 ETHER_ADDR_LEN) != 0) {
1685 goto out;
1686 }
1687 }
1688
1689 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1690 ETHER_NEXT_MULTI(step, enm)) {
1691 error = iavf_eth_addr(sc, enm->enm_addrlo,
1692 IAVF_VC_OP_ADD_ETH_ADDR);
1693 if (error != 0)
1694 break;
1695 }
1696
1697 if (enm != NULL) {
1698 enm_last = enm;
1699 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1700 ETHER_NEXT_MULTI(step, enm)) {
1701 if (enm == enm_last)
1702 break;
1703
1704 iavf_eth_addr(sc, enm->enm_addrlo,
1705 IAVF_VC_OP_DEL_ETH_ADDR);
1706 }
1707 } else {
1708 CLR(ifp->if_flags, IFF_ALLMULTI);
1709 rv = ENETRESET;
1710 }
1711
1712 out:
1713 ETHER_UNLOCK(ec);
1714 return rv;
1715 }
1716
1717 static void
1718 iavf_del_all_multi(struct iavf_softc *sc)
1719 {
1720 struct ethercom *ec = &sc->sc_ec;
1721 struct ether_multi *enm;
1722 struct ether_multistep step;
1723
1724 ETHER_LOCK(ec);
1725 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1726 ETHER_NEXT_MULTI(step, enm)) {
1727 iavf_eth_addr(sc, enm->enm_addrlo,
1728 IAVF_VC_OP_DEL_ETH_ADDR);
1729 }
1730 ETHER_UNLOCK(ec);
1731 }
1732
1733 static int
1734 iavf_setup_interrupts(struct iavf_softc *sc)
1735 {
1736 struct pci_attach_args *pa;
1737 kcpuset_t *affinity = NULL;
1738 char intrbuf[PCI_INTRSTR_LEN], xnamebuf[32];
1739 char const *intrstr;
1740 int counts[PCI_INTR_TYPE_SIZE];
1741 int error, affinity_to;
1742 unsigned int vector, qid, num;
1743
1744 /* queue pairs + misc interrupt */
1745 num = sc->sc_nqps_alloc + 1;
1746
1747 num = MIN(num, iavf_calc_msix_count(sc));
1748 if (num <= 0) {
1749 return -1;
1750 }
1751
1752 KASSERT(sc->sc_nqps_alloc > 0);
1753 num = MIN(num, sc->sc_nqps_alloc + 1);
1754
1755 pa = &sc->sc_pa;
1756 memset(counts, 0, sizeof(counts));
1757 counts[PCI_INTR_TYPE_MSIX] = num;
1758
1759 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, PCI_INTR_TYPE_MSIX);
1760 if (error != 0) {
1761 IAVF_LOG(sc, LOG_WARNING, "couldn't allocate interrupts\n");
1762 return -1;
1763 }
1764
1765 KASSERT(pci_intr_type(pa->pa_pc, sc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX);
1766
1767 if (counts[PCI_INTR_TYPE_MSIX] < 1) {
1768 IAVF_LOG(sc, LOG_ERR, "couldn't allocate interrupts\n");
1769 } else if (counts[PCI_INTR_TYPE_MSIX] != (int)num) {
1770 IAVF_LOG(sc, LOG_DEBUG,
1771 "request %u intruppts, but allocate %d interrupts\n",
1772 num, counts[PCI_INTR_TYPE_MSIX]);
1773 num = counts[PCI_INTR_TYPE_MSIX];
1774 }
1775
1776 sc->sc_ihs = kmem_alloc(sizeof(sc->sc_ihs[0]) * num, KM_NOSLEEP);
1777 if (sc->sc_ihs == NULL) {
1778 IAVF_LOG(sc, LOG_ERR,
1779 "couldn't allocate memory for interrupts\n");
1780 goto fail;
1781 }
1782
1783 /* vector #0 is Misc interrupt */
1784 vector = 0;
1785 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[vector], PCI_INTR_MPSAFE, true);
1786 intrstr = pci_intr_string(pa->pa_pc, sc->sc_ihp[vector],
1787 intrbuf, sizeof(intrbuf));
1788 snprintf(xnamebuf, sizeof(xnamebuf), "%s-Misc",
1789 device_xname(sc->sc_dev));
1790
1791 sc->sc_ihs[vector] = pci_intr_establish_xname(pa->pa_pc,
1792 sc->sc_ihp[vector], IPL_NET, iavf_intr, sc, xnamebuf);
1793 if (sc->sc_ihs[vector] == NULL) {
1794 IAVF_LOG(sc, LOG_WARNING,
1795 "unable to establish interrupt at %s", intrstr);
1796 goto fail;
1797 }
1798
1799 kcpuset_create(&affinity, false);
1800 affinity_to = ((int)num <= ncpu) ? 1 : 0;
1801 qid = 0;
1802 for (vector = 1; vector < num; vector++) {
1803 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[vector],
1804 PCI_INTR_MPSAFE, true);
1805 intrstr = pci_intr_string(pa->pa_pc, sc->sc_ihp[vector],
1806 intrbuf, sizeof(intrbuf));
1807 snprintf(xnamebuf, sizeof(xnamebuf), "%s-TXRX%u",
1808 device_xname(sc->sc_dev), qid);
1809
1810 sc->sc_ihs[vector] = pci_intr_establish_xname(pa->pa_pc,
1811 sc->sc_ihp[vector], IPL_NET, iavf_queue_intr,
1812 (void *)&sc->sc_qps[qid], xnamebuf);
1813 if (sc->sc_ihs[vector] == NULL) {
1814 IAVF_LOG(sc, LOG_WARNING,
1815 "unable to establish interrupt at %s\n", intrstr);
1816 goto fail;
1817 }
1818
1819 kcpuset_zero(affinity);
1820 kcpuset_set(affinity, affinity_to);
1821 error = interrupt_distribute(sc->sc_ihs[vector],
1822 affinity, NULL);
1823
1824 if (error == 0) {
1825 IAVF_LOG(sc, LOG_INFO,
1826 "for TXRX%d interrupt at %s, affinity to %d\n",
1827 qid, intrstr, affinity_to);
1828 } else {
1829 IAVF_LOG(sc, LOG_INFO,
1830 "for TXRX%d interrupt at %s\n",
1831 qid, intrstr);
1832 }
1833
1834 qid++;
1835 affinity_to = (affinity_to + 1) % ncpu;
1836 }
1837
1838 kcpuset_destroy(affinity);
1839
1840 sc->sc_nintrs = num;
1841 return 0;
1842
1843 fail:
1844 if (affinity != NULL)
1845 kcpuset_destroy(affinity);
1846 for (vector = 0; vector < num; vector++) {
1847 if (sc->sc_ihs[vector] == NULL)
1848 continue;
1849 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[vector]);
1850 }
1851 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * num);
1852 pci_intr_release(pa->pa_pc, sc->sc_ihp, num);
1853
1854 return -1;
1855 }
1856
1857 static void
1858 iavf_teardown_interrupts(struct iavf_softc *sc)
1859 {
1860 struct pci_attach_args *pa;
1861 unsigned int i;
1862
1863 if (sc->sc_ihs == NULL)
1864 return;
1865
1866 pa = &sc->sc_pa;
1867
1868 for (i = 0; i < sc->sc_nintrs; i++) {
1869 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]);
1870 }
1871
1872 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
1873 sc->sc_ihs = NULL;
1874
1875 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
1876 sc->sc_nintrs = 0;
1877 }
1878
1879 static int
1880 iavf_setup_sysctls(struct iavf_softc *sc)
1881 {
1882 const char *devname;
1883 struct sysctllog **log;
1884 const struct sysctlnode *rnode, *rxnode, *txnode;
1885 int error;
1886
1887 log = &sc->sc_sysctllog;
1888 devname = device_xname(sc->sc_dev);
1889
1890 error = sysctl_createv(log, 0, NULL, &rnode,
1891 0, CTLTYPE_NODE, devname,
1892 SYSCTL_DESCR("iavf information and settings"),
1893 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
1894 if (error)
1895 goto out;
1896
1897 error = sysctl_createv(log, 0, &rnode, NULL,
1898 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
1899 SYSCTL_DESCR("Use workqueue for packet processing"),
1900 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
1901 if (error)
1902 goto out;
1903
1904 error = sysctl_createv(log, 0, &rnode, NULL,
1905 CTLFLAG_READWRITE, CTLTYPE_INT, "debug_level",
1906 SYSCTL_DESCR("Debug level"),
1907 NULL, 0, &sc->sc_debuglevel, 0, CTL_CREATE, CTL_EOL);
1908 if (error)
1909 goto out;
1910
1911 error = sysctl_createv(log, 0, &rnode, &rxnode,
1912 0, CTLTYPE_NODE, "rx",
1913 SYSCTL_DESCR("iavf information and settings for Rx"),
1914 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
1915 if (error)
1916 goto out;
1917
1918 error = sysctl_createv(log, 0, &rxnode, NULL,
1919 CTLFLAG_READWRITE, CTLTYPE_INT, "itr",
1920 SYSCTL_DESCR("Interrupt Throttling"),
1921 iavf_sysctl_itr_handler, 0,
1922 (void *)sc, 0, CTL_CREATE, CTL_EOL);
1923 if (error)
1924 goto out;
1925
1926 error = sysctl_createv(log, 0, &rxnode, NULL,
1927 CTLFLAG_READONLY, CTLTYPE_INT, "descriptor_num",
1928 SYSCTL_DESCR("descriptor size"),
1929 NULL, 0, &sc->sc_rx_ring_ndescs, 0, CTL_CREATE, CTL_EOL);
1930 if (error)
1931 goto out;
1932
1933 error = sysctl_createv(log, 0, &rxnode, NULL,
1934 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
1935 SYSCTL_DESCR("max number of Rx packets"
1936 " to process for interrupt processing"),
1937 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
1938 if (error)
1939 goto out;
1940
1941 error = sysctl_createv(log, 0, &rxnode, NULL,
1942 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
1943 SYSCTL_DESCR("max number of Rx packets"
1944 " to process for deferred processing"),
1945 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
1946 if (error)
1947 goto out;
1948
1949 error = sysctl_createv(log, 0, &rnode, &txnode,
1950 0, CTLTYPE_NODE, "tx",
1951 SYSCTL_DESCR("iavf information and settings for Tx"),
1952 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
1953 if (error)
1954 goto out;
1955
1956 error = sysctl_createv(log, 0, &txnode, NULL,
1957 CTLFLAG_READWRITE, CTLTYPE_INT, "itr",
1958 SYSCTL_DESCR("Interrupt Throttling"),
1959 iavf_sysctl_itr_handler, 0,
1960 (void *)sc, 0, CTL_CREATE, CTL_EOL);
1961 if (error)
1962 goto out;
1963
1964 error = sysctl_createv(log, 0, &txnode, NULL,
1965 CTLFLAG_READONLY, CTLTYPE_INT, "descriptor_num",
1966 SYSCTL_DESCR("the number of Tx descriptors"),
1967 NULL, 0, &sc->sc_tx_ring_ndescs, 0, CTL_CREATE, CTL_EOL);
1968 if (error)
1969 goto out;
1970
1971 error = sysctl_createv(log, 0, &txnode, NULL,
1972 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
1973 SYSCTL_DESCR("max number of Tx packets"
1974 " to process for interrupt processing"),
1975 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
1976 if (error)
1977 goto out;
1978
1979 error = sysctl_createv(log, 0, &txnode, NULL,
1980 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
1981 SYSCTL_DESCR("max number of Tx packets"
1982 " to process for deferred processing"),
1983 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
1984 if (error)
1985 goto out;
1986 out:
1987 return error;
1988 }
1989
1990 static void
1991 iavf_teardown_sysctls(struct iavf_softc *sc)
1992 {
1993
1994 sysctl_teardown(&sc->sc_sysctllog);
1995 }
1996
1997 static int
1998 iavf_setup_stats(struct iavf_softc *sc)
1999 {
2000 struct iavf_stat_counters *isc;
2001 const char *dn;
2002
2003 dn = device_xname(sc->sc_dev);
2004 isc = &sc->sc_stat_counters;
2005
2006 iavf_evcnt_attach(&isc->isc_rx_bytes, dn, "Rx bytes");
2007 iavf_evcnt_attach(&isc->isc_rx_unicast, dn, "Rx unicast");
2008 iavf_evcnt_attach(&isc->isc_rx_multicast, dn, "Rx multicast");
2009 iavf_evcnt_attach(&isc->isc_rx_broadcast, dn, "Rx broadcast");
2010 iavf_evcnt_attach(&isc->isc_rx_discards, dn, "Rx discards");
2011 iavf_evcnt_attach(&isc->isc_rx_unknown_protocol,
2012 dn, "Rx unknown protocol");
2013
2014 iavf_evcnt_attach(&isc->isc_tx_bytes, dn, "Tx bytes");
2015 iavf_evcnt_attach(&isc->isc_tx_unicast, dn, "Tx unicast");
2016 iavf_evcnt_attach(&isc->isc_tx_multicast, dn, "Tx multicast");
2017 iavf_evcnt_attach(&isc->isc_tx_broadcast, dn, "Tx broadcast");
2018 iavf_evcnt_attach(&isc->isc_tx_discards, dn, "Tx discards");
2019 iavf_evcnt_attach(&isc->isc_tx_errors, dn, "Tx errors");
2020
2021 return 0;
2022 }
2023
2024 static void
2025 iavf_teardown_stats(struct iavf_softc *sc)
2026 {
2027 struct iavf_stat_counters *isc;
2028
2029 isc = &sc->sc_stat_counters;
2030
2031 evcnt_detach(&isc->isc_rx_bytes);
2032 evcnt_detach(&isc->isc_rx_unicast);
2033 evcnt_detach(&isc->isc_rx_multicast);
2034 evcnt_detach(&isc->isc_rx_broadcast);
2035 evcnt_detach(&isc->isc_rx_discards);
2036 evcnt_detach(&isc->isc_rx_unknown_protocol);
2037
2038 evcnt_detach(&isc->isc_tx_bytes);
2039 evcnt_detach(&isc->isc_tx_unicast);
2040 evcnt_detach(&isc->isc_tx_multicast);
2041 evcnt_detach(&isc->isc_tx_broadcast);
2042 evcnt_detach(&isc->isc_tx_discards);
2043 evcnt_detach(&isc->isc_tx_errors);
2044
2045 }
2046
2047 static int
2048 iavf_init_admin_queue(struct iavf_softc *sc)
2049 {
2050 uint32_t reg;
2051
2052 sc->sc_atq_cons = 0;
2053 sc->sc_atq_prod = 0;
2054
2055 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2056 0, IXL_DMA_LEN(&sc->sc_atq),
2057 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2058 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2059 0, IXL_DMA_LEN(&sc->sc_arq),
2060 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2061
2062 iavf_wr(sc, sc->sc_aq_regs->atq_head, 0);
2063 iavf_wr(sc, sc->sc_aq_regs->arq_head, 0);
2064 iavf_wr(sc, sc->sc_aq_regs->atq_tail, 0);
2065 iavf_wr(sc, sc->sc_aq_regs->arq_tail, 0);
2066
2067 iavf_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
2068
2069 iavf_wr(sc, sc->sc_aq_regs->atq_bal,
2070 ixl_dmamem_lo(&sc->sc_atq));
2071 iavf_wr(sc, sc->sc_aq_regs->atq_bah,
2072 ixl_dmamem_hi(&sc->sc_atq));
2073 iavf_wr(sc, sc->sc_aq_regs->atq_len,
2074 sc->sc_aq_regs->atq_len_enable | IAVF_AQ_NUM);
2075
2076 iavf_wr(sc, sc->sc_aq_regs->arq_bal,
2077 ixl_dmamem_lo(&sc->sc_arq));
2078 iavf_wr(sc, sc->sc_aq_regs->arq_bah,
2079 ixl_dmamem_hi(&sc->sc_arq));
2080 iavf_wr(sc, sc->sc_aq_regs->arq_len,
2081 sc->sc_aq_regs->arq_len_enable | IAVF_AQ_NUM);
2082
2083 iavf_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
2084
2085 reg = iavf_rd(sc, sc->sc_aq_regs->atq_bal);
2086 if (reg != ixl_dmamem_lo(&sc->sc_atq))
2087 goto fail;
2088
2089 reg = iavf_rd(sc, sc->sc_aq_regs->arq_bal);
2090 if (reg != ixl_dmamem_lo(&sc->sc_arq))
2091 goto fail;
2092
2093 sc->sc_dead = false;
2094 return 0;
2095
2096 fail:
2097 iavf_wr(sc, sc->sc_aq_regs->atq_len, 0);
2098 iavf_wr(sc, sc->sc_aq_regs->arq_len, 0);
2099 return -1;
2100 }
2101
2102 static void
2103 iavf_cleanup_admin_queue(struct iavf_softc *sc)
2104 {
2105 struct ixl_aq_buf *aqb;
2106
2107 iavf_wr(sc, sc->sc_aq_regs->atq_head, 0);
2108 iavf_wr(sc, sc->sc_aq_regs->arq_head, 0);
2109 iavf_wr(sc, sc->sc_aq_regs->atq_tail, 0);
2110 iavf_wr(sc, sc->sc_aq_regs->arq_tail, 0);
2111
2112 iavf_wr(sc, sc->sc_aq_regs->atq_bal, 0);
2113 iavf_wr(sc, sc->sc_aq_regs->atq_bah, 0);
2114 iavf_wr(sc, sc->sc_aq_regs->atq_len, 0);
2115
2116 iavf_wr(sc, sc->sc_aq_regs->arq_bal, 0);
2117 iavf_wr(sc, sc->sc_aq_regs->arq_bah, 0);
2118 iavf_wr(sc, sc->sc_aq_regs->arq_len, 0);
2119 iavf_flush(sc);
2120
2121 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2122 0, IXL_DMA_LEN(&sc->sc_arq),
2123 BUS_DMASYNC_POSTREAD);
2124 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2125 0, IXL_DMA_LEN(&sc->sc_atq),
2126 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2127
2128 sc->sc_atq_cons = 0;
2129 sc->sc_atq_prod = 0;
2130 sc->sc_arq_cons = 0;
2131 sc->sc_arq_prod = 0;
2132
2133 memset(IXL_DMA_KVA(&sc->sc_arq), 0, IXL_DMA_LEN(&sc->sc_arq));
2134 memset(IXL_DMA_KVA(&sc->sc_atq), 0, IXL_DMA_LEN(&sc->sc_atq));
2135
2136 while ((aqb = iavf_aqb_get_locked(&sc->sc_arq_live)) != NULL) {
2137 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
2138 BUS_DMASYNC_POSTREAD);
2139 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
2140 }
2141
2142 while ((aqb = iavf_aqb_get_locked(&sc->sc_atq_live)) != NULL) {
2143 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
2144 BUS_DMASYNC_POSTREAD);
2145 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
2146 }
2147 }
2148
2149 static unsigned int
2150 iavf_calc_msix_count(struct iavf_softc *sc)
2151 {
2152 struct pci_attach_args *pa;
2153 int count;
2154
2155 pa = &sc->sc_pa;
2156 count = pci_msix_count(pa->pa_pc, pa->pa_tag);
2157 if (count < 0) {
2158 IAVF_LOG(sc, LOG_DEBUG,"MSIX config error\n");
2159 count = 0;
2160 }
2161
2162 return MIN(sc->sc_max_vectors, (unsigned int)count);
2163 }
2164
2165 static unsigned int
2166 iavf_calc_queue_pair_size(struct iavf_softc *sc)
2167 {
2168 unsigned int nqp, nvec;
2169
2170 nvec = iavf_calc_msix_count(sc);
2171 if (sc->sc_max_vectors > 1) {
2172 /* decrease the number of misc interrupt */
2173 nvec -= 1;
2174 }
2175
2176 nqp = ncpu;
2177 nqp = MIN(nqp, sc->sc_nqps_vsi);
2178 nqp = MIN(nqp, nvec);
2179 nqp = MIN(nqp, (unsigned int)iavf_params.max_qps);
2180
2181 return nqp;
2182 }
2183
2184 static struct iavf_tx_ring *
2185 iavf_txr_alloc(struct iavf_softc *sc, unsigned int qid)
2186 {
2187 struct iavf_tx_ring *txr;
2188 struct iavf_tx_map *maps;
2189 unsigned int i;
2190 int error;
2191
2192 txr = kmem_zalloc(sizeof(*txr), KM_NOSLEEP);
2193 if (txr == NULL)
2194 return NULL;
2195
2196 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs,
2197 KM_NOSLEEP);
2198 if (maps == NULL)
2199 goto free_txr;
2200
2201 if (iavf_dmamem_alloc(sc->sc_dmat, &txr->txr_mem,
2202 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2203 IAVF_TX_QUEUE_ALIGN) != 0) {
2204 goto free_maps;
2205 }
2206
2207 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2208 error = bus_dmamap_create(sc->sc_dmat, IAVF_TX_PKT_MAXSIZE,
2209 IAVF_TX_PKT_DESCS, IAVF_TX_PKT_MAXSIZE, 0,
2210 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &maps[i].txm_map);
2211 if (error)
2212 goto destroy_maps;
2213 }
2214
2215 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2216 if (txr->txr_intrq == NULL)
2217 goto destroy_maps;
2218
2219 txr->txr_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,
2220 iavf_deferred_transmit, txr);
2221 if (txr->txr_si == NULL)
2222 goto destroy_pcq;
2223
2224 snprintf(txr->txr_name, sizeof(txr->txr_name), "%s-tx%d",
2225 device_xname(sc->sc_dev), qid);
2226
2227 iavf_evcnt_attach(&txr->txr_defragged,
2228 txr->txr_name, "m_defrag successed");
2229 iavf_evcnt_attach(&txr->txr_defrag_failed,
2230 txr->txr_name, "m_defrag failed");
2231 iavf_evcnt_attach(&txr->txr_pcqdrop,
2232 txr->txr_name, "Dropped in pcq");
2233 iavf_evcnt_attach(&txr->txr_transmitdef,
2234 txr->txr_name, "Deferred transmit");
2235 iavf_evcnt_attach(&txr->txr_watchdogto,
2236 txr->txr_name, "Watchdog timedout on queue");
2237 iavf_evcnt_attach(&txr->txr_defer,
2238 txr->txr_name, "Handled queue in softint/workqueue");
2239
2240 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR, NULL,
2241 txr->txr_name, "Interrupt on queue");
2242
2243 txr->txr_qid = qid;
2244 txr->txr_sc = sc;
2245 txr->txr_maps = maps;
2246 txr->txr_prod = txr->txr_cons = 0;
2247 txr->txr_tail = I40E_QTX_TAIL1(qid);
2248 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2249
2250 return txr;
2251 destroy_pcq:
2252 pcq_destroy(txr->txr_intrq);
2253 destroy_maps:
2254 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2255 if (maps[i].txm_map == NULL)
2256 continue;
2257 bus_dmamap_destroy(sc->sc_dmat, maps[i].txm_map);
2258 }
2259
2260 iavf_dmamem_free(sc->sc_dmat, &txr->txr_mem);
2261 free_maps:
2262 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2263 free_txr:
2264 kmem_free(txr, sizeof(*txr));
2265 return NULL;
2266 }
2267
2268 static void
2269 iavf_txr_free(struct iavf_softc *sc, struct iavf_tx_ring *txr)
2270 {
2271 struct iavf_tx_map *maps;
2272 unsigned int i;
2273
2274 maps = txr->txr_maps;
2275 if (maps != NULL) {
2276 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2277 if (maps[i].txm_map == NULL)
2278 continue;
2279 bus_dmamap_destroy(sc->sc_dmat, maps[i].txm_map);
2280 }
2281 kmem_free(txr->txr_maps,
2282 sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2283 txr->txr_maps = NULL;
2284 }
2285
2286 evcnt_detach(&txr->txr_defragged);
2287 evcnt_detach(&txr->txr_defrag_failed);
2288 evcnt_detach(&txr->txr_pcqdrop);
2289 evcnt_detach(&txr->txr_transmitdef);
2290 evcnt_detach(&txr->txr_watchdogto);
2291 evcnt_detach(&txr->txr_defer);
2292 evcnt_detach(&txr->txr_intr);
2293
2294 iavf_dmamem_free(sc->sc_dmat, &txr->txr_mem);
2295 softint_disestablish(txr->txr_si);
2296 pcq_destroy(txr->txr_intrq);
2297 mutex_destroy(&txr->txr_lock);
2298 kmem_free(txr, sizeof(*txr));
2299 }
2300
2301 static struct iavf_rx_ring *
2302 iavf_rxr_alloc(struct iavf_softc *sc, unsigned int qid)
2303 {
2304 struct iavf_rx_ring *rxr;
2305 struct iavf_rx_map *maps;
2306 unsigned int i;
2307 int error;
2308
2309 rxr = kmem_zalloc(sizeof(*rxr), KM_NOSLEEP);
2310 if (rxr == NULL)
2311 return NULL;
2312
2313 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs,
2314 KM_NOSLEEP);
2315 if (maps == NULL)
2316 goto free_rxr;
2317
2318 if (iavf_dmamem_alloc(sc->sc_dmat, &rxr->rxr_mem,
2319 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
2320 IAVF_RX_QUEUE_ALIGN) != 0)
2321 goto free_maps;
2322
2323 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2324 error = bus_dmamap_create(sc->sc_dmat, IAVF_MCLBYTES,
2325 1, IAVF_MCLBYTES, 0,
2326 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &maps[i].rxm_map);
2327 if (error)
2328 goto destroy_maps;
2329 }
2330
2331 snprintf(rxr->rxr_name, sizeof(rxr->rxr_name), "%s-rx%d",
2332 device_xname(sc->sc_dev), qid);
2333
2334 iavf_evcnt_attach(&rxr->rxr_mgethdr_failed,
2335 rxr->rxr_name, "MGETHDR failed");
2336 iavf_evcnt_attach(&rxr->rxr_mgetcl_failed,
2337 rxr->rxr_name, "MCLGET failed");
2338 iavf_evcnt_attach(&rxr->rxr_mbuf_load_failed,
2339 rxr->rxr_name, "bus_dmamap_load_mbuf failed");
2340 iavf_evcnt_attach(&rxr->rxr_defer,
2341 rxr->rxr_name, "Handled queue in softint/workqueue");
2342
2343 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR, NULL,
2344 rxr->rxr_name, "Interrupt on queue");
2345
2346 rxr->rxr_qid = qid;
2347 rxr->rxr_sc = sc;
2348 rxr->rxr_cons = rxr->rxr_prod = 0;
2349 rxr->rxr_m_head = NULL;
2350 rxr->rxr_m_tail = &rxr->rxr_m_head;
2351 rxr->rxr_maps = maps;
2352 rxr->rxr_tail = I40E_QRX_TAIL1(qid);
2353 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
2354
2355 return rxr;
2356
2357 destroy_maps:
2358 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2359 if (maps[i].rxm_map == NULL)
2360 continue;
2361 bus_dmamap_destroy(sc->sc_dmat, maps[i].rxm_map);
2362 }
2363 iavf_dmamem_free(sc->sc_dmat, &rxr->rxr_mem);
2364 free_maps:
2365 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2366 free_rxr:
2367 kmem_free(rxr, sizeof(*rxr));
2368
2369 return NULL;
2370 }
2371
2372 static void
2373 iavf_rxr_free(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
2374 {
2375 struct iavf_rx_map *maps;
2376 unsigned int i;
2377
2378 maps = rxr->rxr_maps;
2379 if (maps != NULL) {
2380 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2381 if (maps[i].rxm_map == NULL)
2382 continue;
2383 bus_dmamap_destroy(sc->sc_dmat, maps[i].rxm_map);
2384 }
2385 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2386 rxr->rxr_maps = NULL;
2387 }
2388
2389 evcnt_detach(&rxr->rxr_mgethdr_failed);
2390 evcnt_detach(&rxr->rxr_mgetcl_failed);
2391 evcnt_detach(&rxr->rxr_mbuf_load_failed);
2392 evcnt_detach(&rxr->rxr_defer);
2393 evcnt_detach(&rxr->rxr_intr);
2394
2395 iavf_dmamem_free(sc->sc_dmat, &rxr->rxr_mem);
2396 mutex_destroy(&rxr->rxr_lock);
2397 kmem_free(rxr, sizeof(*rxr));
2398 }
2399
2400 static int
2401 iavf_queue_pairs_alloc(struct iavf_softc *sc)
2402 {
2403 struct iavf_queue_pair *qp;
2404 unsigned int i, num;
2405
2406 num = iavf_calc_queue_pair_size(sc);
2407 if (num <= 0) {
2408 return -1;
2409 }
2410
2411 sc->sc_qps = kmem_zalloc(sizeof(sc->sc_qps[0]) * num, KM_NOSLEEP);
2412 if (sc->sc_qps == NULL) {
2413 return -1;
2414 }
2415
2416 for (i = 0; i < num; i++) {
2417 qp = &sc->sc_qps[i];
2418
2419 qp->qp_rxr = iavf_rxr_alloc(sc, i);
2420 qp->qp_txr = iavf_txr_alloc(sc, i);
2421
2422 if (qp->qp_rxr == NULL || qp->qp_txr == NULL)
2423 goto free;
2424
2425 qp->qp_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,
2426 iavf_handle_queue, qp);
2427 if (qp->qp_si == NULL)
2428 goto free;
2429 }
2430
2431 sc->sc_nqps_alloc = num;
2432 return 0;
2433 free:
2434 for (i = 0; i < num; i++) {
2435 qp = &sc->sc_qps[i];
2436
2437 if (qp->qp_si != NULL)
2438 softint_disestablish(qp->qp_si);
2439 if (qp->qp_rxr != NULL)
2440 iavf_rxr_free(sc, qp->qp_rxr);
2441 if (qp->qp_txr != NULL)
2442 iavf_txr_free(sc, qp->qp_txr);
2443 }
2444
2445 kmem_free(sc->sc_qps, sizeof(sc->sc_qps[0]) * num);
2446 sc->sc_qps = NULL;
2447
2448 return -1;
2449 }
2450
2451 static void
2452 iavf_queue_pairs_free(struct iavf_softc *sc)
2453 {
2454 struct iavf_queue_pair *qp;
2455 unsigned int i;
2456 size_t sz;
2457
2458 if (sc->sc_qps == NULL)
2459 return;
2460
2461 for (i = 0; i < sc->sc_nqps_alloc; i++) {
2462 qp = &sc->sc_qps[i];
2463
2464 if (qp->qp_si != NULL)
2465 softint_disestablish(qp->qp_si);
2466 if (qp->qp_rxr != NULL)
2467 iavf_rxr_free(sc, qp->qp_rxr);
2468 if (qp->qp_txr != NULL)
2469 iavf_txr_free(sc, qp->qp_txr);
2470 }
2471
2472 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqps_alloc;
2473 kmem_free(sc->sc_qps, sz);
2474 sc->sc_qps = NULL;
2475 sc->sc_nqps_alloc = 0;
2476 }
2477
2478 static int
2479 iavf_rxfill(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
2480 {
2481 struct ixl_rx_rd_desc_32 *ring, *rxd;
2482 struct iavf_rx_map *rxm;
2483 bus_dmamap_t map;
2484 struct mbuf *m;
2485 unsigned int slots, prod, mask;
2486 int error, post;
2487
2488 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons,
2489 sc->sc_rx_ring_ndescs);
2490
2491 if (slots == 0)
2492 return 0;
2493
2494 error = 0;
2495 prod = rxr->rxr_prod;
2496
2497 ring = IXL_DMA_KVA(&rxr->rxr_mem);
2498 mask = sc->sc_rx_ring_ndescs - 1;
2499
2500 do {
2501 rxm = &rxr->rxr_maps[prod];
2502
2503 MGETHDR(m, M_DONTWAIT, MT_DATA);
2504 if (m == NULL) {
2505 rxr->rxr_mgethdr_failed.ev_count++;
2506 error = -1;
2507 break;
2508 }
2509
2510 MCLGET(m, M_DONTWAIT);
2511 if (!ISSET(m->m_flags, M_EXT)) {
2512 rxr->rxr_mgetcl_failed.ev_count++;
2513 error = -1;
2514 m_freem(m);
2515 break;
2516 }
2517
2518 m->m_len = m->m_pkthdr.len = MCLBYTES;
2519 m_adj(m, ETHER_ALIGN);
2520
2521 map = rxm->rxm_map;
2522
2523 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
2524 BUS_DMA_READ|BUS_DMA_NOWAIT) != 0) {
2525 rxr->rxr_mbuf_load_failed.ev_count++;
2526 error = -1;
2527 m_freem(m);
2528 break;
2529 }
2530
2531 rxm->rxm_m = m;
2532
2533 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2534 BUS_DMASYNC_PREREAD);
2535
2536 rxd = &ring[prod];
2537 rxd->paddr = htole64(map->dm_segs[0].ds_addr);
2538 rxd->haddr = htole64(0);
2539
2540 prod++;
2541 prod &= mask;
2542 post = 1;
2543 } while (--slots);
2544
2545 if (post) {
2546 rxr->rxr_prod = prod;
2547 iavf_wr(sc, rxr->rxr_tail, prod);
2548 }
2549
2550 return error;
2551 }
2552
2553 static inline void
2554 iavf_rx_csum(struct mbuf *m, uint64_t qword)
2555 {
2556 int flags_mask;
2557
2558 if (!ISSET(qword, IXL_RX_DESC_L3L4P)) {
2559 /* No L3 or L4 checksum was calculated */
2560 return;
2561 }
2562
2563 switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) {
2564 case IXL_RX_DESC_PTYPE_IPV4FRAG:
2565 case IXL_RX_DESC_PTYPE_IPV4:
2566 case IXL_RX_DESC_PTYPE_SCTPV4:
2567 case IXL_RX_DESC_PTYPE_ICMPV4:
2568 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
2569 break;
2570 case IXL_RX_DESC_PTYPE_TCPV4:
2571 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
2572 flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD;
2573 break;
2574 case IXL_RX_DESC_PTYPE_UDPV4:
2575 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
2576 flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD;
2577 break;
2578 case IXL_RX_DESC_PTYPE_TCPV6:
2579 flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD;
2580 break;
2581 case IXL_RX_DESC_PTYPE_UDPV6:
2582 flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD;
2583 break;
2584 default:
2585 flags_mask = 0;
2586 }
2587
2588 m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 |
2589 M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6));
2590
2591 if (ISSET(qword, IXL_RX_DESC_IPE)) {
2592 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD);
2593 }
2594
2595 if (ISSET(qword, IXL_RX_DESC_L4E)) {
2596 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD);
2597 }
2598 }
2599
2600 static int
2601 iavf_rxeof(struct iavf_softc *sc, struct iavf_rx_ring *rxr, u_int rxlimit,
2602 struct evcnt *ecnt)
2603 {
2604 struct ifnet *ifp = &sc->sc_ec.ec_if;
2605 struct ixl_rx_wb_desc_32 *ring, *rxd;
2606 struct iavf_rx_map *rxm;
2607 bus_dmamap_t map;
2608 unsigned int cons, prod;
2609 struct mbuf *m;
2610 uint64_t word, word0;
2611 unsigned int len;
2612 unsigned int mask;
2613 int done = 0, more = 0;
2614
2615 KASSERT(mutex_owned(&rxr->rxr_lock));
2616
2617 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2618 return 0;
2619
2620 prod = rxr->rxr_prod;
2621 cons = rxr->rxr_cons;
2622
2623 if (cons == prod)
2624 return 0;
2625
2626 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2627 0, IXL_DMA_LEN(&rxr->rxr_mem),
2628 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2629
2630 ring = IXL_DMA_KVA(&rxr->rxr_mem);
2631 mask = sc->sc_rx_ring_ndescs - 1;
2632
2633 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2634
2635 do {
2636 if (rxlimit-- <= 0) {
2637 more = 1;
2638 break;
2639 }
2640
2641 rxd = &ring[cons];
2642
2643 word = le64toh(rxd->qword1);
2644
2645 if (!ISSET(word, IXL_RX_DESC_DD))
2646 break;
2647
2648 rxm = &rxr->rxr_maps[cons];
2649
2650 map = rxm->rxm_map;
2651 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2652 BUS_DMASYNC_POSTREAD);
2653 bus_dmamap_unload(sc->sc_dmat, map);
2654
2655 m = rxm->rxm_m;
2656 rxm->rxm_m = NULL;
2657
2658 KASSERT(m != NULL);
2659
2660 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
2661 m->m_len = len;
2662 m->m_pkthdr.len = 0;
2663
2664 m->m_next = NULL;
2665 *rxr->rxr_m_tail = m;
2666 rxr->rxr_m_tail = &m->m_next;
2667
2668 m = rxr->rxr_m_head;
2669 m->m_pkthdr.len += len;
2670
2671 if (ISSET(word, IXL_RX_DESC_EOP)) {
2672 word0 = le64toh(rxd->qword0);
2673
2674 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
2675 vlan_set_tag(m,
2676 __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK));
2677 }
2678
2679 if ((ifp->if_capenable & IAVF_IFCAP_RXCSUM) != 0)
2680 iavf_rx_csum(m, word);
2681
2682 if (!ISSET(word,
2683 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
2684 m_set_rcvif(m, ifp);
2685 if_statinc_ref(nsr, if_ipackets);
2686 if_statadd_ref(nsr, if_ibytes,
2687 m->m_pkthdr.len);
2688 if_percpuq_enqueue(sc->sc_ipq, m);
2689 } else {
2690 if_statinc_ref(nsr, if_ierrors);
2691 m_freem(m);
2692 }
2693
2694 rxr->rxr_m_head = NULL;
2695 rxr->rxr_m_tail = &rxr->rxr_m_head;
2696 }
2697
2698 cons++;
2699 cons &= mask;
2700
2701 done = 1;
2702 } while (cons != prod);
2703
2704 if (done) {
2705 ecnt->ev_count++;
2706 rxr->rxr_cons = cons;
2707 if (iavf_rxfill(sc, rxr) == -1)
2708 if_statinc_ref(nsr, if_iqdrops);
2709 }
2710
2711 IF_STAT_PUTREF(ifp);
2712
2713 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2714 0, IXL_DMA_LEN(&rxr->rxr_mem),
2715 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2716
2717 return more;
2718 }
2719
2720 static void
2721 iavf_rxr_clean(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
2722 {
2723 struct iavf_rx_map *maps, *rxm;
2724 bus_dmamap_t map;
2725 unsigned int i;
2726
2727 KASSERT(mutex_owned(&rxr->rxr_lock));
2728
2729 maps = rxr->rxr_maps;
2730 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2731 rxm = &maps[i];
2732
2733 if (rxm->rxm_m == NULL)
2734 continue;
2735
2736 map = rxm->rxm_map;
2737 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2738 BUS_DMASYNC_POSTWRITE);
2739 bus_dmamap_unload(sc->sc_dmat, map);
2740
2741 m_freem(rxm->rxm_m);
2742 rxm->rxm_m = NULL;
2743 }
2744
2745 m_freem(rxr->rxr_m_head);
2746 rxr->rxr_m_head = NULL;
2747 rxr->rxr_m_tail = &rxr->rxr_m_head;
2748
2749 memset(IXL_DMA_KVA(&rxr->rxr_mem), 0, IXL_DMA_LEN(&rxr->rxr_mem));
2750 rxr->rxr_prod = rxr->rxr_cons = 0;
2751 }
2752
2753 static int
2754 iavf_txeof(struct iavf_softc *sc, struct iavf_tx_ring *txr, u_int txlimit,
2755 struct evcnt *ecnt)
2756 {
2757 struct ifnet *ifp = &sc->sc_ec.ec_if;
2758 struct ixl_tx_desc *ring, *txd;
2759 struct iavf_tx_map *txm;
2760 struct mbuf *m;
2761 bus_dmamap_t map;
2762 unsigned int cons, prod, last;
2763 unsigned int mask;
2764 uint64_t dtype;
2765 int done = 0, more = 0;
2766
2767 KASSERT(mutex_owned(&txr->txr_lock));
2768
2769 prod = txr->txr_prod;
2770 cons = txr->txr_cons;
2771
2772 if (cons == prod)
2773 return 0;
2774
2775 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2776 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2777
2778 ring = IXL_DMA_KVA(&txr->txr_mem);
2779 mask = sc->sc_tx_ring_ndescs - 1;
2780
2781 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2782
2783 do {
2784 if (txlimit-- <= 0) {
2785 more = 1;
2786 break;
2787 }
2788
2789 txm = &txr->txr_maps[cons];
2790 last = txm->txm_eop;
2791 txd = &ring[last];
2792
2793 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2794 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2795 break;
2796
2797 map = txm->txm_map;
2798
2799 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2800 BUS_DMASYNC_POSTWRITE);
2801 bus_dmamap_unload(sc->sc_dmat, map);
2802
2803 m = txm->txm_m;
2804 if (m != NULL) {
2805 if_statinc_ref(nsr, if_opackets);
2806 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
2807 if (ISSET(m->m_flags, M_MCAST))
2808 if_statinc_ref(nsr, if_omcasts);
2809 m_freem(m);
2810 }
2811
2812 txm->txm_m = NULL;
2813 txm->txm_eop = -1;
2814
2815 cons = last + 1;
2816 cons &= mask;
2817 done = 1;
2818 } while (cons != prod);
2819
2820 IF_STAT_PUTREF(ifp);
2821
2822 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2823 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2824
2825 txr->txr_cons = cons;
2826
2827 if (done) {
2828 ecnt->ev_count++;
2829 softint_schedule(txr->txr_si);
2830 if (txr->txr_qid == 0) {
2831 CLR(ifp->if_flags, IFF_OACTIVE);
2832 if_schedule_deferred_start(ifp);
2833 }
2834 }
2835
2836 if (txr->txr_cons == txr->txr_prod) {
2837 txr->txr_watchdog = IAVF_WATCHDOG_STOP;
2838 }
2839
2840 return more;
2841 }
2842
2843 static inline int
2844 iavf_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0,
2845 struct iavf_tx_ring *txr)
2846 {
2847 struct mbuf *m;
2848 int error;
2849
2850 KASSERT(mutex_owned(&txr->txr_lock));
2851
2852 m = *m0;
2853
2854 error = bus_dmamap_load_mbuf(dmat, map, m,
2855 BUS_DMA_STREAMING|BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2856 if (error != EFBIG)
2857 return error;
2858
2859 m = m_defrag(m, M_DONTWAIT);
2860 if (m != NULL) {
2861 *m0 = m;
2862 txr->txr_defragged.ev_count++;
2863 error = bus_dmamap_load_mbuf(dmat, map, m,
2864 BUS_DMA_STREAMING|BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2865 } else {
2866 txr->txr_defrag_failed.ev_count++;
2867 error = ENOBUFS;
2868 }
2869
2870 return error;
2871 }
2872
2873 static inline int
2874 iavf_tx_setup_offloads(struct mbuf *m, uint64_t *cmd_txd)
2875 {
2876 struct ether_header *eh;
2877 size_t len;
2878 uint64_t cmd;
2879
2880 cmd = 0;
2881
2882 eh = mtod(m, struct ether_header *);
2883 switch (htons(eh->ether_type)) {
2884 case ETHERTYPE_IP:
2885 case ETHERTYPE_IPV6:
2886 len = ETHER_HDR_LEN;
2887 break;
2888 case ETHERTYPE_VLAN:
2889 len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2890 break;
2891 default:
2892 len = 0;
2893 }
2894 cmd |= ((len >> 1) << IXL_TX_DESC_MACLEN_SHIFT);
2895
2896 if (m->m_pkthdr.csum_flags &
2897 (M_CSUM_TSOv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
2898 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4;
2899 }
2900 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2901 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4_CSUM;
2902 }
2903
2904 if (m->m_pkthdr.csum_flags &
2905 (M_CSUM_TSOv6 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
2906 cmd |= IXL_TX_DESC_CMD_IIPT_IPV6;
2907 }
2908
2909 switch (cmd & IXL_TX_DESC_CMD_IIPT_MASK) {
2910 case IXL_TX_DESC_CMD_IIPT_IPV4:
2911 case IXL_TX_DESC_CMD_IIPT_IPV4_CSUM:
2912 len = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2913 break;
2914 case IXL_TX_DESC_CMD_IIPT_IPV6:
2915 len = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
2916 break;
2917 default:
2918 len = 0;
2919 }
2920 cmd |= ((len >> 2) << IXL_TX_DESC_IPLEN_SHIFT);
2921
2922 if (m->m_pkthdr.csum_flags &
2923 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_TCPv4 | M_CSUM_TCPv6)) {
2924 len = sizeof(struct tcphdr);
2925 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_TCP;
2926 } else if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) {
2927 len = sizeof(struct udphdr);
2928 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_UDP;
2929 } else {
2930 len = 0;
2931 }
2932 cmd |= ((len >> 2) << IXL_TX_DESC_L4LEN_SHIFT);
2933
2934 *cmd_txd |= cmd;
2935 return 0;
2936 }
2937
2938 static void
2939 iavf_tx_common_locked(struct ifnet *ifp, struct iavf_tx_ring *txr,
2940 bool is_transmit)
2941 {
2942 struct iavf_softc *sc;
2943 struct ixl_tx_desc *ring, *txd;
2944 struct iavf_tx_map *txm;
2945 bus_dmamap_t map;
2946 struct mbuf *m;
2947 unsigned int prod, free, last, i;
2948 unsigned int mask;
2949 uint64_t cmd, cmd_txd;
2950 int post = 0;
2951
2952 KASSERT(mutex_owned(&txr->txr_lock));
2953
2954 sc = ifp->if_softc;
2955
2956 if (!ISSET(ifp->if_flags, IFF_RUNNING)
2957 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) {
2958 if (!is_transmit)
2959 IFQ_PURGE(&ifp->if_snd);
2960 return;
2961 }
2962
2963 prod = txr->txr_prod;
2964 free = txr->txr_cons;
2965
2966 if (free <= prod)
2967 free += sc->sc_tx_ring_ndescs;
2968 free -= prod;
2969
2970 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2971 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2972
2973 ring = IXL_DMA_KVA(&txr->txr_mem);
2974 mask = sc->sc_tx_ring_ndescs - 1;
2975 last = prod;
2976 cmd = 0;
2977 txd = NULL;
2978
2979 for (;;) {
2980 if (free < IAVF_TX_PKT_DESCS) {
2981 if (!is_transmit)
2982 SET(ifp->if_flags, IFF_OACTIVE);
2983 break;
2984 }
2985
2986 if (is_transmit)
2987 m = pcq_get(txr->txr_intrq);
2988 else
2989 IFQ_DEQUEUE(&ifp->if_snd, m);
2990
2991 if (m == NULL)
2992 break;
2993
2994 txm = &txr->txr_maps[prod];
2995 map = txm->txm_map;
2996
2997 if (iavf_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
2998 if_statinc(ifp, if_oerrors);
2999 m_freem(m);
3000 continue;
3001 }
3002
3003 cmd_txd = 0;
3004 if (m->m_pkthdr.csum_flags & IAVF_CSUM_ALL_OFFLOAD) {
3005 iavf_tx_setup_offloads(m, &cmd_txd);
3006 }
3007 if (vlan_has_tag(m)) {
3008 cmd_txd |= IXL_TX_DESC_CMD_IL2TAG1 |
3009 ((uint64_t)vlan_get_tag(m)
3010 << IXL_TX_DESC_L2TAG1_SHIFT);
3011 }
3012
3013 bus_dmamap_sync(sc->sc_dmat, map, 0,
3014 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
3015
3016 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) {
3017 txd = &ring[prod];
3018
3019 cmd = (uint64_t)map->dm_segs[i].ds_len <<
3020 IXL_TX_DESC_BSIZE_SHIFT;
3021 cmd |= IXL_TX_DESC_DTYPE_DATA|IXL_TX_DESC_CMD_ICRC|
3022 cmd_txd;
3023
3024 txd->addr = htole64(map->dm_segs[i].ds_addr);
3025 txd->cmd = htole64(cmd);
3026
3027 last = prod;
3028 prod++;
3029 prod &= mask;
3030 }
3031
3032 cmd |= IXL_TX_DESC_CMD_EOP|IXL_TX_DESC_CMD_RS;
3033 txd->cmd = htole64(cmd);
3034 txm->txm_m = m;
3035 txm->txm_eop = last;
3036
3037 bpf_mtap(ifp, m, BPF_D_OUT);
3038 free -= i;
3039 post = 1;
3040 }
3041
3042 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
3043 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
3044
3045 if (post) {
3046 txr->txr_prod = prod;
3047 iavf_wr(sc, txr->txr_tail, prod);
3048 txr->txr_watchdog = IAVF_WATCHDOG_TICKS;
3049 }
3050 }
3051
3052 static inline int
3053 iavf_handle_queue_common(struct iavf_softc *sc, struct iavf_queue_pair *qp,
3054 u_int txlimit, struct evcnt *txevcnt,
3055 u_int rxlimit, struct evcnt *rxevcnt)
3056 {
3057 struct iavf_tx_ring *txr;
3058 struct iavf_rx_ring *rxr;
3059 int txmore, rxmore;
3060 int rv;
3061
3062 txr = qp->qp_txr;
3063 rxr = qp->qp_rxr;
3064
3065 mutex_enter(&txr->txr_lock);
3066 txmore = iavf_txeof(sc, txr, txlimit, txevcnt);
3067 mutex_exit(&txr->txr_lock);
3068
3069 mutex_enter(&rxr->rxr_lock);
3070 rxmore = iavf_rxeof(sc, rxr, rxlimit, rxevcnt);
3071 mutex_exit(&rxr->rxr_lock);
3072
3073 rv = txmore | (rxmore << 1);
3074
3075 return rv;
3076 }
3077
3078 static void
3079 iavf_sched_handle_queue(struct iavf_softc *sc, struct iavf_queue_pair *qp)
3080 {
3081
3082 if (qp->qp_workqueue)
3083 workqueue_enqueue(sc->sc_workq_txrx, &qp->qp_work, NULL);
3084 else
3085 softint_schedule(qp->qp_si);
3086 }
3087
3088 static void
3089 iavf_start(struct ifnet *ifp)
3090 {
3091 struct iavf_softc *sc;
3092 struct iavf_tx_ring *txr;
3093
3094 sc = ifp->if_softc;
3095 txr = sc->sc_qps[0].qp_txr;
3096
3097 mutex_enter(&txr->txr_lock);
3098 iavf_tx_common_locked(ifp, txr, false);
3099 mutex_exit(&txr->txr_lock);
3100
3101 }
3102
3103 static inline unsigned int
3104 iavf_select_txqueue(struct iavf_softc *sc, struct mbuf *m)
3105 {
3106 u_int cpuid;
3107
3108 cpuid = cpu_index(curcpu());
3109
3110 return (unsigned int)(cpuid % sc->sc_nqueue_pairs);
3111 }
3112
3113 static int
3114 iavf_transmit(struct ifnet *ifp, struct mbuf *m)
3115 {
3116 struct iavf_softc *sc;
3117 struct iavf_tx_ring *txr;
3118 unsigned int qid;
3119
3120 sc = ifp->if_softc;
3121 qid = iavf_select_txqueue(sc, m);
3122
3123 txr = sc->sc_qps[qid].qp_txr;
3124
3125 if (__predict_false(!pcq_put(txr->txr_intrq, m))) {
3126 mutex_enter(&txr->txr_lock);
3127 txr->txr_pcqdrop.ev_count++;
3128 mutex_exit(&txr->txr_lock);
3129
3130 m_freem(m);
3131 return ENOBUFS;
3132 }
3133
3134 if (mutex_tryenter(&txr->txr_lock)) {
3135 iavf_tx_common_locked(ifp, txr, true);
3136 mutex_exit(&txr->txr_lock);
3137 } else {
3138 kpreempt_disable();
3139 softint_schedule(txr->txr_si);
3140 kpreempt_enable();
3141 }
3142 return 0;
3143 }
3144
3145 static void
3146 iavf_deferred_transmit(void *xtxr)
3147 {
3148 struct iavf_tx_ring *txr;
3149 struct iavf_softc *sc;
3150 struct ifnet *ifp;
3151
3152 txr = xtxr;
3153 sc = txr->txr_sc;
3154 ifp = &sc->sc_ec.ec_if;
3155
3156 mutex_enter(&txr->txr_lock);
3157 txr->txr_transmitdef.ev_count++;
3158 if (pcq_peek(txr->txr_intrq) != NULL)
3159 iavf_tx_common_locked(ifp, txr, true);
3160 mutex_exit(&txr->txr_lock);
3161 }
3162
3163 static void
3164 iavf_txr_clean(struct iavf_softc *sc, struct iavf_tx_ring *txr)
3165 {
3166 struct iavf_tx_map *maps, *txm;
3167 bus_dmamap_t map;
3168 unsigned int i;
3169
3170 KASSERT(mutex_owned(&txr->txr_lock));
3171
3172 maps = txr->txr_maps;
3173 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
3174 txm = &maps[i];
3175
3176 if (txm->txm_m == NULL)
3177 continue;
3178
3179 map = txm->txm_map;
3180 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3181 BUS_DMASYNC_POSTWRITE);
3182 bus_dmamap_unload(sc->sc_dmat, map);
3183
3184 m_freem(txm->txm_m);
3185 txm->txm_m = NULL;
3186 }
3187
3188 memset(IXL_DMA_KVA(&txr->txr_mem), 0, IXL_DMA_LEN(&txr->txr_mem));
3189 txr->txr_prod = txr->txr_cons = 0;
3190 }
3191
3192 static int
3193 iavf_intr(void *xsc)
3194 {
3195 struct iavf_softc *sc = xsc;
3196 struct ifnet *ifp = &sc->sc_ec.ec_if;
3197 struct iavf_rx_ring *rxr;
3198 struct iavf_tx_ring *txr;
3199 uint32_t icr;
3200 unsigned int i;
3201
3202 /* read I40E_VFINT_ICR_ENA1 to clear status */
3203 (void)iavf_rd(sc, I40E_VFINT_ICR0_ENA1);
3204
3205 iavf_intr_enable(sc);
3206 icr = iavf_rd(sc, I40E_VFINT_ICR01);
3207
3208 if (icr == IAVF_REG_VFR) {
3209 log(LOG_INFO, "%s: VF reset in progress\n",
3210 ifp->if_xname);
3211 iavf_work_set(&sc->sc_reset_task, iavf_reset_start, sc);
3212 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
3213 return 1;
3214 }
3215
3216 if (ISSET(icr, I40E_VFINT_ICR01_ADMINQ_MASK)) {
3217 mutex_enter(&sc->sc_adminq_lock);
3218 iavf_atq_done(sc);
3219 iavf_arq(sc);
3220 mutex_exit(&sc->sc_adminq_lock);
3221 }
3222
3223 if (ISSET(icr, I40E_VFINT_ICR01_QUEUE_0_MASK)) {
3224 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3225 rxr = sc->sc_qps[i].qp_rxr;
3226 txr = sc->sc_qps[i].qp_txr;
3227
3228 mutex_enter(&rxr->rxr_lock);
3229 while (iavf_rxeof(sc, rxr, UINT_MAX,
3230 &rxr->rxr_intr) != 0) {
3231 /* do nothing */
3232 }
3233 mutex_exit(&rxr->rxr_lock);
3234
3235 mutex_enter(&txr->txr_lock);
3236 while (iavf_txeof(sc, txr, UINT_MAX,
3237 &txr->txr_intr) != 0) {
3238 /* do nothing */
3239 }
3240 mutex_exit(&txr->txr_lock);
3241 }
3242 }
3243
3244 return 0;
3245 }
3246
3247 static int
3248 iavf_queue_intr(void *xqp)
3249 {
3250 struct iavf_queue_pair *qp = xqp;
3251 struct iavf_tx_ring *txr;
3252 struct iavf_rx_ring *rxr;
3253 struct iavf_softc *sc;
3254 unsigned int qid;
3255 u_int txlimit, rxlimit;
3256 int more;
3257
3258 txr = qp->qp_txr;
3259 rxr = qp->qp_rxr;
3260 sc = txr->txr_sc;
3261 qid = txr->txr_qid;
3262
3263 txlimit = sc->sc_tx_intr_process_limit;
3264 rxlimit = sc->sc_rx_intr_process_limit;
3265 qp->qp_workqueue = sc->sc_txrx_workqueue;
3266
3267 more = iavf_handle_queue_common(sc, qp,
3268 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr);
3269
3270 if (more != 0) {
3271 iavf_sched_handle_queue(sc, qp);
3272 } else {
3273 /* for ALTQ */
3274 if (txr->txr_qid == 0)
3275 if_schedule_deferred_start(&sc->sc_ec.ec_if);
3276 softint_schedule(txr->txr_si);
3277
3278 iavf_queue_intr_enable(sc, qid);
3279 }
3280
3281 return 0;
3282 }
3283
3284 static void
3285 iavf_handle_queue_wk(struct work *wk, void *xsc __unused)
3286 {
3287 struct iavf_queue_pair *qp;
3288
3289 qp = container_of(wk, struct iavf_queue_pair, qp_work);
3290 iavf_handle_queue(qp);
3291 }
3292
3293 static void
3294 iavf_handle_queue(void *xqp)
3295 {
3296 struct iavf_queue_pair *qp = xqp;
3297 struct iavf_tx_ring *txr;
3298 struct iavf_rx_ring *rxr;
3299 struct iavf_softc *sc;
3300 unsigned int qid;
3301 u_int txlimit, rxlimit;
3302 int more;
3303
3304 txr = qp->qp_txr;
3305 rxr = qp->qp_rxr;
3306 sc = txr->txr_sc;
3307 qid = txr->txr_qid;
3308
3309 txlimit = sc->sc_tx_process_limit;
3310 rxlimit = sc->sc_rx_process_limit;
3311
3312 more = iavf_handle_queue_common(sc, qp,
3313 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer);
3314
3315 if (more != 0)
3316 iavf_sched_handle_queue(sc, qp);
3317 else
3318 iavf_queue_intr_enable(sc, qid);
3319 }
3320
3321 static void
3322 iavf_tick(void *xsc)
3323 {
3324 struct iavf_softc *sc;
3325 unsigned int i;
3326 int timedout;
3327
3328 sc = xsc;
3329 timedout = 0;
3330
3331 mutex_enter(&sc->sc_cfg_lock);
3332
3333 if (sc->sc_resetting) {
3334 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
3335 mutex_exit(&sc->sc_cfg_lock);
3336 return;
3337 }
3338
3339 iavf_get_stats(sc);
3340
3341 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3342 timedout |= iavf_watchdog(sc->sc_qps[i].qp_txr);
3343 }
3344
3345 if (timedout != 0) {
3346 iavf_work_add(sc->sc_workq, &sc->sc_wdto_task);
3347 } else {
3348 callout_schedule(&sc->sc_tick, IAVF_TICK_INTERVAL);
3349 }
3350
3351 mutex_exit(&sc->sc_cfg_lock);
3352 }
3353
3354 static void
3355 iavf_tick_halt(void *unused __unused)
3356 {
3357
3358 /* do nothing */
3359 }
3360
3361 static void
3362 iavf_reset_request(void *xsc)
3363 {
3364 struct iavf_softc *sc = xsc;
3365
3366 iavf_reset_vf(sc);
3367 iavf_reset_start(sc);
3368 }
3369
3370 static void
3371 iavf_reset_start(void *xsc)
3372 {
3373 struct iavf_softc *sc = xsc;
3374 struct ifnet *ifp = &sc->sc_ec.ec_if;
3375
3376 mutex_enter(&sc->sc_cfg_lock);
3377
3378 if (sc->sc_resetting)
3379 goto do_reset;
3380
3381 sc->sc_resetting = true;
3382 if_link_state_change(ifp, LINK_STATE_DOWN);
3383
3384 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
3385 iavf_stop_locked(sc);
3386 sc->sc_reset_up = true;
3387 }
3388
3389 memcpy(sc->sc_enaddr_reset, sc->sc_enaddr, ETHER_ADDR_LEN);
3390
3391 do_reset:
3392 iavf_work_set(&sc->sc_reset_task, iavf_reset, sc);
3393
3394 mutex_exit(&sc->sc_cfg_lock);
3395
3396 iavf_reset((void *)sc);
3397 }
3398
3399 static void
3400 iavf_reset(void *xsc)
3401 {
3402 struct iavf_softc *sc = xsc;
3403 struct ifnet *ifp = &sc->sc_ec.ec_if;
3404 struct ixl_aq_buf *aqb;
3405 bool realloc_qps, realloc_intrs;
3406
3407 mutex_enter(&sc->sc_cfg_lock);
3408
3409 mutex_enter(&sc->sc_adminq_lock);
3410 iavf_cleanup_admin_queue(sc);
3411 mutex_exit(&sc->sc_adminq_lock);
3412
3413 sc->sc_major_ver = UINT_MAX;
3414 sc->sc_minor_ver = UINT_MAX;
3415 sc->sc_got_vf_resources = 0;
3416 sc->sc_got_irq_map = 0;
3417
3418 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
3419 if (aqb == NULL)
3420 goto failed;
3421
3422 if (iavf_wait_active(sc) != 0) {
3423 log(LOG_WARNING, "%s: VF reset timed out\n",
3424 ifp->if_xname);
3425 goto failed;
3426 }
3427
3428 if (!iavf_arq_fill(sc)) {
3429 log(LOG_ERR, "%s: unable to fill arq descriptors\n",
3430 ifp->if_xname);
3431 goto failed;
3432 }
3433
3434 if (iavf_init_admin_queue(sc) != 0) {
3435 log(LOG_ERR, "%s: unable to initialize admin queue\n",
3436 ifp->if_xname);
3437 goto failed;
3438 }
3439
3440 if (iavf_get_version(sc, aqb) != 0) {
3441 log(LOG_ERR, "%s: unable to get VF interface version\n",
3442 ifp->if_xname);
3443 goto failed;
3444 }
3445
3446 if (iavf_get_vf_resources(sc, aqb) != 0) {
3447 log(LOG_ERR, "%s: timed out waiting for VF resources\n",
3448 ifp->if_xname);
3449 goto failed;
3450 }
3451
3452 if (sc->sc_nqps_alloc < iavf_calc_queue_pair_size(sc)) {
3453 realloc_qps = true;
3454 } else {
3455 realloc_qps = false;
3456 }
3457
3458 if (sc->sc_nintrs < iavf_calc_msix_count(sc)) {
3459 realloc_intrs = true;
3460 } else {
3461 realloc_intrs = false;
3462 }
3463
3464 if (realloc_qps || realloc_intrs)
3465 iavf_teardown_interrupts(sc);
3466
3467 if (realloc_qps) {
3468 iavf_queue_pairs_free(sc);
3469 if (iavf_queue_pairs_alloc(sc) != 0) {
3470 log(LOG_ERR, "%s: failed to allocate queue pairs\n",
3471 ifp->if_xname);
3472 goto failed;
3473 }
3474 }
3475
3476 if (realloc_qps || realloc_intrs) {
3477 if (iavf_setup_interrupts(sc) != 0) {
3478 sc->sc_nintrs = 0;
3479 log(LOG_ERR, "%s: failed to allocate interrupts\n",
3480 ifp->if_xname);
3481 goto failed;
3482 }
3483 log(LOG_INFO, "%s: reallocated queues\n", ifp->if_xname);
3484 }
3485
3486 if (iavf_config_irq_map(sc, aqb) != 0) {
3487 log(LOG_ERR, "%s: timed out configuring IRQ map\n",
3488 ifp->if_xname);
3489 goto failed;
3490 }
3491
3492 mutex_enter(&sc->sc_adminq_lock);
3493 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
3494 mutex_exit(&sc->sc_adminq_lock);
3495
3496 iavf_reset_finish(sc);
3497
3498 mutex_exit(&sc->sc_cfg_lock);
3499 return;
3500
3501 failed:
3502 mutex_enter(&sc->sc_adminq_lock);
3503 iavf_cleanup_admin_queue(sc);
3504 if (aqb != NULL) {
3505 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
3506 }
3507 mutex_exit(&sc->sc_adminq_lock);
3508 callout_schedule(&sc->sc_tick, IAVF_TICK_INTERVAL);
3509 mutex_exit(&sc->sc_cfg_lock);
3510 }
3511
3512 static void
3513 iavf_reset_finish(struct iavf_softc *sc)
3514 {
3515 struct ethercom *ec = &sc->sc_ec;
3516 struct ether_multi *enm;
3517 struct ether_multistep step;
3518 struct ifnet *ifp = &ec->ec_if;
3519 struct vlanid_list *vlanidp;
3520
3521 KASSERT(mutex_owned(&sc->sc_cfg_lock));
3522
3523 callout_stop(&sc->sc_tick);
3524
3525 iavf_intr_enable(sc);
3526
3527 if (!iavf_is_etheranyaddr(sc->sc_enaddr_added)) {
3528 iavf_eth_addr(sc, sc->sc_enaddr_added, IAVF_VC_OP_ADD_ETH_ADDR);
3529 }
3530
3531 ETHER_LOCK(ec);
3532 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
3533 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
3534 ETHER_NEXT_MULTI(step, enm)) {
3535 iavf_add_multi(sc, enm->enm_addrlo, enm->enm_addrhi);
3536 }
3537 }
3538
3539 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
3540 ETHER_UNLOCK(ec);
3541 iavf_config_vlan_id(sc, vlanidp->vid, IAVF_VC_OP_ADD_VLAN);
3542 ETHER_LOCK(ec);
3543 }
3544 ETHER_UNLOCK(ec);
3545
3546 if (memcmp(sc->sc_enaddr, sc->sc_enaddr_reset, ETHER_ADDR_LEN) != 0) {
3547 log(LOG_INFO, "%s: Ethernet address changed to %s\n",
3548 ifp->if_xname, ether_sprintf(sc->sc_enaddr));
3549 IFNET_LOCK(ifp);
3550 kpreempt_disable();
3551 /*XXX we need an API to change ethernet address. */
3552 iavf_replace_lla(ifp, sc->sc_enaddr_reset, sc->sc_enaddr);
3553 kpreempt_enable();
3554 IFNET_UNLOCK(ifp);
3555 }
3556
3557 sc->sc_resetting = false;
3558
3559 if (sc->sc_reset_up) {
3560 iavf_init_locked(sc);
3561 }
3562
3563 if (sc->sc_link_state != LINK_STATE_DOWN) {
3564 if_link_state_change(ifp, sc->sc_link_state);
3565 }
3566
3567 }
3568
3569 static int
3570 iavf_dmamem_alloc(bus_dma_tag_t dmat, struct ixl_dmamem *ixm,
3571 bus_size_t size, bus_size_t align)
3572 {
3573 ixm->ixm_size = size;
3574
3575 if (bus_dmamap_create(dmat, ixm->ixm_size, 1,
3576 ixm->ixm_size, 0,
3577 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
3578 &ixm->ixm_map) != 0)
3579 return 1;
3580 if (bus_dmamem_alloc(dmat, ixm->ixm_size,
3581 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
3582 BUS_DMA_WAITOK) != 0)
3583 goto destroy;
3584 if (bus_dmamem_map(dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
3585 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
3586 goto free;
3587 if (bus_dmamap_load(dmat, ixm->ixm_map, ixm->ixm_kva,
3588 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
3589 goto unmap;
3590
3591 memset(ixm->ixm_kva, 0, ixm->ixm_size);
3592
3593 return 0;
3594 unmap:
3595 bus_dmamem_unmap(dmat, ixm->ixm_kva, ixm->ixm_size);
3596 free:
3597 bus_dmamem_free(dmat, &ixm->ixm_seg, 1);
3598 destroy:
3599 bus_dmamap_destroy(dmat, ixm->ixm_map);
3600 return 1;
3601 }
3602
3603 static void
3604 iavf_dmamem_free(bus_dma_tag_t dmat, struct ixl_dmamem *ixm)
3605 {
3606
3607 bus_dmamap_unload(dmat, ixm->ixm_map);
3608 bus_dmamem_unmap(dmat, ixm->ixm_kva, ixm->ixm_size);
3609 bus_dmamem_free(dmat, &ixm->ixm_seg, 1);
3610 bus_dmamap_destroy(dmat, ixm->ixm_map);
3611 }
3612
3613 static struct ixl_aq_buf *
3614 iavf_aqb_alloc(bus_dma_tag_t dmat, size_t buflen)
3615 {
3616 struct ixl_aq_buf *aqb;
3617
3618 aqb = kmem_alloc(sizeof(*aqb), KM_NOSLEEP);
3619 if (aqb == NULL)
3620 return NULL;
3621
3622 aqb->aqb_size = buflen;
3623
3624 if (bus_dmamap_create(dmat, aqb->aqb_size, 1,
3625 aqb->aqb_size, 0,
3626 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0)
3627 goto free;
3628 if (bus_dmamem_alloc(dmat, aqb->aqb_size,
3629 IAVF_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs,
3630 BUS_DMA_WAITOK) != 0)
3631 goto destroy;
3632 if (bus_dmamem_map(dmat, &aqb->aqb_seg, aqb->aqb_nsegs,
3633 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0)
3634 goto dma_free;
3635 if (bus_dmamap_load(dmat, aqb->aqb_map, aqb->aqb_data,
3636 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0)
3637 goto unmap;
3638
3639 return aqb;
3640 unmap:
3641 bus_dmamem_unmap(dmat, aqb->aqb_data, aqb->aqb_size);
3642 dma_free:
3643 bus_dmamem_free(dmat, &aqb->aqb_seg, 1);
3644 destroy:
3645 bus_dmamap_destroy(dmat, aqb->aqb_map);
3646 free:
3647 kmem_free(aqb, sizeof(*aqb));
3648
3649 return NULL;
3650 }
3651
3652 static void
3653 iavf_aqb_free(bus_dma_tag_t dmat, struct ixl_aq_buf *aqb)
3654 {
3655
3656 bus_dmamap_unload(dmat, aqb->aqb_map);
3657 bus_dmamem_unmap(dmat, aqb->aqb_data, aqb->aqb_size);
3658 bus_dmamem_free(dmat, &aqb->aqb_seg, 1);
3659 bus_dmamap_destroy(dmat, aqb->aqb_map);
3660 kmem_free(aqb, sizeof(*aqb));
3661 }
3662
3663 static struct ixl_aq_buf *
3664 iavf_aqb_get_locked(struct ixl_aq_bufs *q)
3665 {
3666 struct ixl_aq_buf *aqb;
3667
3668 aqb = SIMPLEQ_FIRST(q);
3669 if (aqb != NULL) {
3670 SIMPLEQ_REMOVE(q, aqb, ixl_aq_buf, aqb_entry);
3671 }
3672
3673 return aqb;
3674 }
3675
3676 static struct ixl_aq_buf *
3677 iavf_aqb_get(struct iavf_softc *sc, struct ixl_aq_bufs *q)
3678 {
3679 struct ixl_aq_buf *aqb;
3680
3681 if (q != NULL) {
3682 mutex_enter(&sc->sc_adminq_lock);
3683 aqb = iavf_aqb_get_locked(q);
3684 mutex_exit(&sc->sc_adminq_lock);
3685 } else {
3686 aqb = NULL;
3687 }
3688
3689 if (aqb == NULL) {
3690 aqb = iavf_aqb_alloc(sc->sc_dmat, IAVF_AQ_BUFLEN);
3691 }
3692
3693 return aqb;
3694 }
3695
3696 static void
3697 iavf_aqb_put_locked(struct ixl_aq_bufs *q, struct ixl_aq_buf *aqb)
3698 {
3699
3700 SIMPLEQ_INSERT_TAIL(q, aqb, aqb_entry);
3701 }
3702
3703 static void
3704 iavf_aqb_clean(struct ixl_aq_bufs *q, bus_dma_tag_t dmat)
3705 {
3706 struct ixl_aq_buf *aqb;
3707
3708 while ((aqb = SIMPLEQ_FIRST(q)) != NULL) {
3709 SIMPLEQ_REMOVE(q, aqb, ixl_aq_buf, aqb_entry);
3710 iavf_aqb_free(dmat, aqb);
3711 }
3712 }
3713
3714 static const char *
3715 iavf_aq_vc_opcode_str(const struct ixl_aq_desc *iaq)
3716 {
3717
3718 switch (iavf_aq_vc_get_opcode(iaq)) {
3719 case IAVF_VC_OP_VERSION:
3720 return "GET_VERSION";
3721 case IAVF_VC_OP_RESET_VF:
3722 return "RESET_VF";
3723 case IAVF_VC_OP_GET_VF_RESOURCES:
3724 return "GET_VF_RESOURCES";
3725 case IAVF_VC_OP_CONFIG_TX_QUEUE:
3726 return "CONFIG_TX_QUEUE";
3727 case IAVF_VC_OP_CONFIG_RX_QUEUE:
3728 return "CONFIG_RX_QUEUE";
3729 case IAVF_VC_OP_CONFIG_VSI_QUEUES:
3730 return "CONFIG_VSI_QUEUES";
3731 case IAVF_VC_OP_CONFIG_IRQ_MAP:
3732 return "CONFIG_IRQ_MAP";
3733 case IAVF_VC_OP_ENABLE_QUEUES:
3734 return "ENABLE_QUEUES";
3735 case IAVF_VC_OP_DISABLE_QUEUES:
3736 return "DISABLE_QUEUES";
3737 case IAVF_VC_OP_ADD_ETH_ADDR:
3738 return "ADD_ETH_ADDR";
3739 case IAVF_VC_OP_DEL_ETH_ADDR:
3740 return "DEL_ETH_ADDR";
3741 case IAVF_VC_OP_CONFIG_PROMISC:
3742 return "CONFIG_PROMISC";
3743 case IAVF_VC_OP_GET_STATS:
3744 return "GET_STATS";
3745 case IAVF_VC_OP_EVENT:
3746 return "EVENT";
3747 case IAVF_VC_OP_CONFIG_RSS_KEY:
3748 return "CONFIG_RSS_KEY";
3749 case IAVF_VC_OP_GET_RSS_HENA_CAPS:
3750 return "GET_RS_HENA_CAPS";
3751 case IAVF_VC_OP_SET_RSS_HENA:
3752 return "SET_RSS_HENA";
3753 case IAVF_VC_OP_ENABLE_VLAN_STRIP:
3754 return "ENABLE_VLAN_STRIPPING";
3755 case IAVF_VC_OP_DISABLE_VLAN_STRIP:
3756 return "DISABLE_VLAN_STRIPPING";
3757 case IAVF_VC_OP_REQUEST_QUEUES:
3758 return "REQUEST_QUEUES";
3759 }
3760
3761 return "unknown";
3762 }
3763
3764 static void
3765 iavf_aq_dump(const struct iavf_softc *sc, const struct ixl_aq_desc *iaq,
3766 const char *msg)
3767 {
3768 char buf[512];
3769 size_t len;
3770
3771 len = sizeof(buf);
3772 buf[--len] = '\0';
3773
3774 device_printf(sc->sc_dev, "%s\n", msg);
3775 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags));
3776 device_printf(sc->sc_dev, "flags %s opcode %04x\n",
3777 buf, le16toh(iaq->iaq_opcode));
3778 device_printf(sc->sc_dev, "datalen %u retval %u\n",
3779 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval));
3780 device_printf(sc->sc_dev, "vc-opcode %u (%s)\n",
3781 iavf_aq_vc_get_opcode(iaq),
3782 iavf_aq_vc_opcode_str(iaq));
3783 device_printf(sc->sc_dev, "vc-retval %u\n",
3784 iavf_aq_vc_get_retval(iaq));
3785 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie);
3786 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n",
3787 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]),
3788 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3]));
3789 }
3790
3791 static int
3792 iavf_arq_fill(struct iavf_softc *sc)
3793 {
3794 struct ixl_aq_buf *aqb;
3795 struct ixl_aq_desc *arq, *iaq;
3796 unsigned int prod = sc->sc_arq_prod;
3797 unsigned int n;
3798 int filled;
3799
3800 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
3801 IAVF_AQ_NUM);
3802
3803 if (__predict_false(n <= 0))
3804 return 0;
3805
3806 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3807 0, IXL_DMA_LEN(&sc->sc_arq),
3808 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3809
3810 arq = IXL_DMA_KVA(&sc->sc_arq);
3811
3812 do {
3813 iaq = &arq[prod];
3814
3815 if (ixl_aq_has_dva(iaq)) {
3816 /* already filled */
3817 break;
3818 }
3819
3820 aqb = iavf_aqb_get_locked(&sc->sc_arq_idle);
3821 if (aqb == NULL)
3822 break;
3823
3824 memset(aqb->aqb_data, 0, aqb->aqb_size);
3825
3826 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
3827 aqb->aqb_size, BUS_DMASYNC_PREREAD);
3828
3829 iaq->iaq_flags = htole16(IXL_AQ_BUF |
3830 (aqb->aqb_size > I40E_AQ_LARGE_BUF ?
3831 IXL_AQ_LB : 0));
3832 iaq->iaq_opcode = 0;
3833 iaq->iaq_datalen = htole16(aqb->aqb_size);
3834 iaq->iaq_retval = 0;
3835 iaq->iaq_cookie = 0;
3836 iaq->iaq_param[0] = 0;
3837 iaq->iaq_param[1] = 0;
3838 ixl_aq_dva(iaq, IXL_AQB_DVA(aqb));
3839 iavf_aqb_put_locked(&sc->sc_arq_live, aqb);
3840
3841 prod++;
3842 prod &= IAVF_AQ_MASK;
3843 filled = 1;
3844 } while (--n);
3845
3846 sc->sc_arq_prod = prod;
3847
3848 if (filled) {
3849 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3850 0, IXL_DMA_LEN(&sc->sc_arq),
3851 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3852 iavf_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
3853 }
3854
3855 return filled;
3856 }
3857
3858 static int
3859 iavf_arq_wait(struct iavf_softc *sc, uint32_t opcode)
3860 {
3861 int error;
3862
3863 KASSERT(mutex_owned(&sc->sc_adminq_lock));
3864
3865 while ((error = cv_timedwait(&sc->sc_adminq_cv,
3866 &sc->sc_adminq_lock, mstohz(IAVF_EXEC_TIMEOUT))) == 0) {
3867 if (opcode == sc->sc_arq_opcode)
3868 break;
3869 }
3870
3871 if (error != 0 &&
3872 atomic_load_relaxed(&sc->sc_debuglevel) >= 2)
3873 device_printf(sc->sc_dev, "cv_timedwait error=%d\n", error);
3874
3875 return error;
3876 }
3877
3878 static void
3879 iavf_arq_refill(void *xsc)
3880 {
3881 struct iavf_softc *sc = xsc;
3882 struct ixl_aq_bufs aqbs;
3883 struct ixl_aq_buf *aqb;
3884 unsigned int n, i;
3885
3886 mutex_enter(&sc->sc_adminq_lock);
3887 iavf_arq_fill(sc);
3888 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
3889 IAVF_AQ_NUM);
3890 mutex_exit(&sc->sc_adminq_lock);
3891
3892 if (n == 0)
3893 return;
3894
3895 if (atomic_load_relaxed(&sc->sc_debuglevel) >= 1)
3896 device_printf(sc->sc_dev, "Allocate %d bufs for arq\n", n);
3897
3898 SIMPLEQ_INIT(&aqbs);
3899 for (i = 0; i < n; i++) {
3900 aqb = iavf_aqb_get(sc, NULL);
3901 if (aqb == NULL)
3902 continue;
3903 SIMPLEQ_INSERT_TAIL(&aqbs, aqb, aqb_entry);
3904 }
3905
3906 mutex_enter(&sc->sc_adminq_lock);
3907 while ((aqb = SIMPLEQ_FIRST(&aqbs)) != NULL) {
3908 SIMPLEQ_REMOVE(&aqbs, aqb, ixl_aq_buf, aqb_entry);
3909 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
3910 }
3911 iavf_arq_fill(sc);
3912 mutex_exit(&sc->sc_adminq_lock);
3913 }
3914
3915 static uint32_t
3916 iavf_process_arq(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
3917 struct ixl_aq_buf *aqb)
3918 {
3919 uint32_t vc_retval, vc_opcode;
3920 int dbg;
3921
3922 dbg = atomic_load_relaxed(&sc->sc_debuglevel);
3923 if (dbg >= 3)
3924 iavf_aq_dump(sc, iaq, "arq proc");
3925
3926 if (dbg >= 2) {
3927 vc_retval = iavf_aq_vc_get_retval(iaq);
3928 if (vc_retval != IAVF_VC_RC_SUCCESS) {
3929 device_printf(sc->sc_dev, "%s failed=%d(arq)\n",
3930 iavf_aq_vc_opcode_str(iaq), vc_retval);
3931 }
3932 }
3933
3934 vc_opcode = iavf_aq_vc_get_opcode(iaq);
3935 switch (vc_opcode) {
3936 case IAVF_VC_OP_VERSION:
3937 iavf_process_version(sc, iaq, aqb);
3938 break;
3939 case IAVF_VC_OP_GET_VF_RESOURCES:
3940 iavf_process_vf_resources(sc, iaq, aqb);
3941 break;
3942 case IAVF_VC_OP_CONFIG_IRQ_MAP:
3943 iavf_process_irq_map(sc, iaq);
3944 break;
3945 case IAVF_VC_OP_EVENT:
3946 iavf_process_vc_event(sc, iaq, aqb);
3947 break;
3948 case IAVF_VC_OP_GET_STATS:
3949 iavf_process_stats(sc, iaq, aqb);
3950 break;
3951 case IAVF_VC_OP_REQUEST_QUEUES:
3952 iavf_process_req_queues(sc, iaq, aqb);
3953 break;
3954 }
3955
3956 return vc_opcode;
3957 }
3958
3959 static int
3960 iavf_arq_poll(struct iavf_softc *sc, uint32_t wait_opcode, int retry)
3961 {
3962 struct ixl_aq_desc *arq, *iaq;
3963 struct ixl_aq_buf *aqb;
3964 unsigned int cons = sc->sc_arq_cons;
3965 unsigned int prod;
3966 uint32_t vc_opcode;
3967 bool received;
3968 int i;
3969
3970 for (i = 0, received = false; i < retry && !received; i++) {
3971 prod = iavf_rd(sc, sc->sc_aq_regs->arq_head);
3972 prod &= sc->sc_aq_regs->arq_head_mask;
3973
3974 if (prod == cons) {
3975 delaymsec(1);
3976 continue;
3977 }
3978
3979 if (prod >= IAVF_AQ_NUM) {
3980 return EIO;
3981 }
3982
3983 arq = IXL_DMA_KVA(&sc->sc_arq);
3984
3985 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3986 0, IXL_DMA_LEN(&sc->sc_arq),
3987 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3988
3989 do {
3990 iaq = &arq[cons];
3991 aqb = iavf_aqb_get_locked(&sc->sc_arq_live);
3992 KASSERT(aqb != NULL);
3993
3994 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
3995 IAVF_AQ_BUFLEN, BUS_DMASYNC_POSTREAD);
3996
3997 vc_opcode = iavf_process_arq(sc, iaq, aqb);
3998
3999 if (vc_opcode == wait_opcode)
4000 received = true;
4001
4002 memset(iaq, 0, sizeof(*iaq));
4003 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
4004
4005 cons++;
4006 cons &= IAVF_AQ_MASK;
4007
4008 } while (cons != prod);
4009
4010 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
4011 0, IXL_DMA_LEN(&sc->sc_arq),
4012 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4013
4014 sc->sc_arq_cons = cons;
4015 iavf_arq_fill(sc);
4016
4017 }
4018
4019 if (!received)
4020 return ETIMEDOUT;
4021
4022 return 0;
4023 }
4024
4025 static int
4026 iavf_arq(struct iavf_softc *sc)
4027 {
4028 struct ixl_aq_desc *arq, *iaq;
4029 struct ixl_aq_buf *aqb;
4030 unsigned int cons = sc->sc_arq_cons;
4031 unsigned int prod;
4032 uint32_t vc_opcode;
4033
4034 KASSERT(mutex_owned(&sc->sc_adminq_lock));
4035
4036 prod = iavf_rd(sc, sc->sc_aq_regs->arq_head);
4037 prod &= sc->sc_aq_regs->arq_head_mask;
4038
4039 /* broken value at resetting */
4040 if (prod >= IAVF_AQ_NUM) {
4041 iavf_work_set(&sc->sc_reset_task, iavf_reset_start, sc);
4042 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
4043 return 0;
4044 }
4045
4046 if (cons == prod)
4047 return 0;
4048
4049 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
4050 0, IXL_DMA_LEN(&sc->sc_arq),
4051 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
4052
4053 arq = IXL_DMA_KVA(&sc->sc_arq);
4054
4055 do {
4056 iaq = &arq[cons];
4057 aqb = iavf_aqb_get_locked(&sc->sc_arq_live);
4058
4059 KASSERT(aqb != NULL);
4060
4061 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IAVF_AQ_BUFLEN,
4062 BUS_DMASYNC_POSTREAD);
4063
4064 vc_opcode = iavf_process_arq(sc, iaq, aqb);
4065
4066 switch (vc_opcode) {
4067 case IAVF_VC_OP_CONFIG_TX_QUEUE:
4068 case IAVF_VC_OP_CONFIG_RX_QUEUE:
4069 case IAVF_VC_OP_CONFIG_VSI_QUEUES:
4070 case IAVF_VC_OP_ENABLE_QUEUES:
4071 case IAVF_VC_OP_DISABLE_QUEUES:
4072 case IAVF_VC_OP_GET_RSS_HENA_CAPS:
4073 case IAVF_VC_OP_SET_RSS_HENA:
4074 case IAVF_VC_OP_ADD_ETH_ADDR:
4075 case IAVF_VC_OP_DEL_ETH_ADDR:
4076 case IAVF_VC_OP_CONFIG_PROMISC:
4077 case IAVF_VC_OP_ADD_VLAN:
4078 case IAVF_VC_OP_DEL_VLAN:
4079 case IAVF_VC_OP_ENABLE_VLAN_STRIP:
4080 case IAVF_VC_OP_DISABLE_VLAN_STRIP:
4081 case IAVF_VC_OP_CONFIG_RSS_KEY:
4082 case IAVF_VC_OP_CONFIG_RSS_LUT:
4083 sc->sc_arq_retval = iavf_aq_vc_get_retval(iaq);
4084 sc->sc_arq_opcode = vc_opcode;
4085 cv_signal(&sc->sc_adminq_cv);
4086 break;
4087 }
4088
4089 memset(iaq, 0, sizeof(*iaq));
4090 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
4091
4092 cons++;
4093 cons &= IAVF_AQ_MASK;
4094 } while (cons != prod);
4095
4096 sc->sc_arq_cons = cons;
4097 iavf_work_add(sc->sc_workq, &sc->sc_arq_refill);
4098
4099 return 1;
4100 }
4101
4102 static int
4103 iavf_atq_post(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4104 struct ixl_aq_buf *aqb)
4105 {
4106 struct ixl_aq_desc *atq, *slot;
4107 unsigned int prod;
4108
4109 atq = IXL_DMA_KVA(&sc->sc_atq);
4110 prod = sc->sc_atq_prod;
4111 slot = &atq[prod];
4112
4113 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4114 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
4115
4116 *slot = *iaq;
4117 slot->iaq_flags |= htole16(IXL_AQ_SI);
4118 if (aqb != NULL) {
4119 ixl_aq_dva(slot, IXL_AQB_DVA(aqb));
4120 bus_dmamap_sync(sc->sc_dmat, IXL_AQB_MAP(aqb),
4121 0, IXL_AQB_LEN(aqb), BUS_DMASYNC_PREWRITE);
4122 iavf_aqb_put_locked(&sc->sc_atq_live, aqb);
4123 } else {
4124 ixl_aq_dva(slot, (bus_addr_t)0);
4125 }
4126
4127 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4128 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
4129
4130 if (atomic_load_relaxed(&sc->sc_debuglevel) >= 3)
4131 iavf_aq_dump(sc, slot, "post");
4132
4133 prod++;
4134 prod &= IAVF_AQ_MASK;
4135 sc->sc_atq_prod = prod;
4136 iavf_wr(sc, sc->sc_aq_regs->atq_tail, prod);
4137 return prod;
4138 }
4139
4140 static int
4141 iavf_atq_poll(struct iavf_softc *sc, unsigned int tm)
4142 {
4143 struct ixl_aq_desc *atq, *slot;
4144 struct ixl_aq_desc iaq;
4145 struct ixl_aq_buf *aqb;
4146 unsigned int prod;
4147 unsigned int t;
4148 int dbg;
4149
4150 dbg = atomic_load_relaxed(&sc->sc_debuglevel);
4151 atq = IXL_DMA_KVA(&sc->sc_atq);
4152 prod = sc->sc_atq_prod;
4153 slot = &atq[prod];
4154 t = 0;
4155
4156 while (iavf_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
4157 delaymsec(1);
4158
4159 if (t++ > tm) {
4160 if (dbg >= 2) {
4161 device_printf(sc->sc_dev,
4162 "atq timedout\n");
4163 }
4164 return ETIMEDOUT;
4165 }
4166 }
4167
4168 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4169 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
4170 iaq = *slot;
4171 memset(slot, 0, sizeof(*slot));
4172 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4173 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
4174
4175 aqb = iavf_aqb_get_locked(&sc->sc_atq_live);
4176 if (aqb != NULL) {
4177 bus_dmamap_sync(sc->sc_dmat, IXL_AQB_MAP(aqb),
4178 0, IXL_AQB_LEN(aqb), BUS_DMASYNC_POSTWRITE);
4179 /* no need to do iavf_aqb_put(&sc->sc_atq_idle, aqb) */
4180 }
4181
4182 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4183 if (dbg >= 2) {
4184 device_printf(sc->sc_dev,
4185 "atq retcode=0x%04x\n", le16toh(iaq.iaq_retval));
4186 }
4187 return EIO;
4188 }
4189
4190 return 0;
4191 }
4192
4193 static void
4194 iavf_atq_done(struct iavf_softc *sc)
4195 {
4196 struct ixl_aq_desc *atq, *slot;
4197 struct ixl_aq_buf *aqb;
4198 unsigned int cons;
4199 unsigned int prod;
4200
4201 KASSERT(mutex_owned(&sc->sc_adminq_lock));
4202
4203 prod = sc->sc_atq_prod;
4204 cons = sc->sc_atq_cons;
4205
4206 if (prod == cons)
4207 return;
4208
4209 atq = IXL_DMA_KVA(&sc->sc_atq);
4210
4211 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4212 0, IXL_DMA_LEN(&sc->sc_atq),
4213 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
4214
4215 do {
4216 slot = &atq[cons];
4217 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
4218 break;
4219
4220 if (ixl_aq_has_dva(slot) &&
4221 (aqb = iavf_aqb_get_locked(&sc->sc_atq_live)) != NULL) {
4222 bus_dmamap_sync(sc->sc_dmat, IXL_AQB_MAP(aqb),
4223 0, IXL_AQB_LEN(aqb), BUS_DMASYNC_POSTWRITE);
4224 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
4225 }
4226
4227 memset(slot, 0, sizeof(*slot));
4228
4229 cons++;
4230 cons &= IAVF_AQ_MASK;
4231 } while (cons != prod);
4232
4233 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4234 0, IXL_DMA_LEN(&sc->sc_atq),
4235 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4236
4237 sc->sc_atq_cons = cons;
4238 }
4239
4240 static int
4241 iavf_adminq_poll(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4242 struct ixl_aq_buf *aqb, int retry)
4243 {
4244 int error;
4245
4246 mutex_enter(&sc->sc_adminq_lock);
4247 error = iavf_adminq_poll_locked(sc, iaq, aqb, retry);
4248 mutex_exit(&sc->sc_adminq_lock);
4249
4250 return error;
4251 }
4252
4253 static int
4254 iavf_adminq_poll_locked(struct iavf_softc *sc,
4255 struct ixl_aq_desc *iaq, struct ixl_aq_buf *aqb, int retry)
4256 {
4257 uint32_t opcode;
4258 int error;
4259
4260 KASSERT(!sc->sc_attached || mutex_owned(&sc->sc_adminq_lock));
4261
4262 opcode = iavf_aq_vc_get_opcode(iaq);
4263
4264 iavf_atq_post(sc, iaq, aqb);
4265
4266 error = iavf_atq_poll(sc, retry);
4267 if (error)
4268 return error;
4269
4270 error = iavf_arq_poll(sc, opcode, retry);
4271
4272 if (error != 0 &&
4273 atomic_load_relaxed(&sc->sc_debuglevel) >= 1) {
4274 device_printf(sc->sc_dev, "%s failed=%d(polling)\n",
4275 iavf_aq_vc_opcode_str(iaq), error);
4276 }
4277
4278 return error;
4279 }
4280
4281 static int
4282 iavf_adminq_exec(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4283 struct ixl_aq_buf *aqb)
4284 {
4285 int error;
4286 uint32_t opcode;
4287
4288 opcode = iavf_aq_vc_get_opcode(iaq);
4289
4290 mutex_enter(&sc->sc_adminq_lock);
4291 iavf_atq_post(sc, iaq, aqb);
4292
4293 error = iavf_arq_wait(sc, opcode);
4294 if (error == 0) {
4295 error = sc->sc_arq_retval;
4296 if (error != IAVF_VC_RC_SUCCESS &&
4297 atomic_load_relaxed(&sc->sc_debuglevel) >= 1) {
4298 device_printf(sc->sc_dev, "%s failed=%d\n",
4299 iavf_aq_vc_opcode_str(iaq), error);
4300 }
4301 }
4302
4303 mutex_exit(&sc->sc_adminq_lock);
4304 return error;
4305 }
4306
4307 static void
4308 iavf_process_version(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4309 struct ixl_aq_buf *aqb)
4310 {
4311 struct iavf_vc_version_info *ver;
4312
4313 ver = (struct iavf_vc_version_info *)aqb->aqb_data;
4314 sc->sc_major_ver = le32toh(ver->major);
4315 sc->sc_minor_ver = le32toh(ver->minor);
4316 }
4317
4318 static void
4319 iavf_process_vf_resources(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4320 struct ixl_aq_buf *aqb)
4321 {
4322 struct iavf_vc_vf_resource *vf_res;
4323 struct iavf_vc_vsi_resource *vsi_res;
4324 uint8_t *enaddr;
4325 int mtu, dbg;
4326 char buf[512];
4327
4328 dbg = atomic_load_relaxed(&sc->sc_debuglevel);
4329 sc->sc_got_vf_resources = 1;
4330
4331 vf_res = aqb->aqb_data;
4332 sc->sc_max_vectors = le16toh(vf_res->max_vectors);
4333 if (le16toh(vf_res->num_vsis) == 0) {
4334 if (dbg >= 1) {
4335 device_printf(sc->sc_dev, "no vsi available\n");
4336 }
4337 return;
4338 }
4339 sc->sc_vf_cap = le32toh(vf_res->offload_flags);
4340 if (dbg >= 2) {
4341 snprintb(buf, sizeof(buf),
4342 IAVF_VC_OFFLOAD_FMT, sc->sc_vf_cap);
4343 device_printf(sc->sc_dev, "VF cap=%s\n", buf);
4344 }
4345
4346 mtu = le16toh(vf_res->max_mtu);
4347 if (IAVF_MIN_MTU < mtu && mtu < IAVF_MAX_MTU) {
4348 sc->sc_max_mtu = MIN(IAVF_MAX_MTU, mtu);
4349 }
4350
4351 vsi_res = &vf_res->vsi_res[0];
4352 sc->sc_vsi_id = le16toh(vsi_res->vsi_id);
4353 sc->sc_vf_id = le32toh(iaq->iaq_param[0]);
4354 sc->sc_qset_handle = le16toh(vsi_res->qset_handle);
4355 sc->sc_nqps_vsi = le16toh(vsi_res->num_queue_pairs);
4356 if (!iavf_is_etheranyaddr(vsi_res->default_mac)) {
4357 enaddr = vsi_res->default_mac;
4358 } else {
4359 enaddr = sc->sc_enaddr_fake;
4360 }
4361 memcpy(sc->sc_enaddr, enaddr, ETHER_ADDR_LEN);
4362 }
4363
4364 static void
4365 iavf_process_irq_map(struct iavf_softc *sc, struct ixl_aq_desc *iaq)
4366 {
4367 uint32_t retval;
4368
4369 retval = iavf_aq_vc_get_retval(iaq);
4370 if (retval != IAVF_VC_RC_SUCCESS) {
4371 return;
4372 }
4373
4374 sc->sc_got_irq_map = 1;
4375 }
4376
4377 static void
4378 iavf_process_vc_event(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4379 struct ixl_aq_buf *aqb)
4380 {
4381 struct iavf_vc_pf_event *event;
4382 struct ifnet *ifp = &sc->sc_ec.ec_if;
4383 const struct iavf_link_speed *speed;
4384 int link;
4385
4386 event = aqb->aqb_data;
4387 switch (event->event) {
4388 case IAVF_VC_EVENT_LINK_CHANGE:
4389 sc->sc_media_status = IFM_AVALID;
4390 sc->sc_media_active = IFM_ETHER;
4391 link = LINK_STATE_DOWN;
4392 if (event->link_status) {
4393 link = LINK_STATE_UP;
4394 sc->sc_media_status |= IFM_ACTIVE;
4395
4396 ifp->if_baudrate = 0;
4397 speed = iavf_find_link_speed(sc, event->link_speed);
4398 if (speed != NULL) {
4399 sc->sc_media_active |= speed->media;
4400 ifp->if_baudrate = speed->baudrate;
4401 }
4402 }
4403
4404 if (sc->sc_link_state != link) {
4405 sc->sc_link_state = link;
4406 if (sc->sc_attached) {
4407 if_link_state_change(ifp, link);
4408 }
4409 }
4410 break;
4411 case IAVF_VC_EVENT_RESET_IMPENDING:
4412 log(LOG_INFO, "%s: Reset warning received from the PF\n",
4413 ifp->if_xname);
4414 iavf_work_set(&sc->sc_reset_task, iavf_reset_request, sc);
4415 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
4416 break;
4417 }
4418 }
4419
4420 static void
4421 iavf_process_stats(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4422 struct ixl_aq_buf *aqb)
4423 {
4424 struct iavf_stat_counters *isc;
4425 struct i40e_eth_stats *st;
4426
4427 KASSERT(mutex_owned(&sc->sc_adminq_lock));
4428
4429 st = aqb->aqb_data;
4430 isc = &sc->sc_stat_counters;
4431
4432 isc->isc_rx_bytes.ev_count = st->rx_bytes;
4433 isc->isc_rx_unicast.ev_count = st->rx_unicast;
4434 isc->isc_rx_multicast.ev_count = st->rx_multicast;
4435 isc->isc_rx_broadcast.ev_count = st->rx_broadcast;
4436 isc->isc_rx_discards.ev_count = st->rx_discards;
4437 isc->isc_rx_unknown_protocol.ev_count = st->rx_unknown_protocol;
4438
4439 isc->isc_tx_bytes.ev_count = st->tx_bytes;
4440 isc->isc_tx_unicast.ev_count = st->tx_unicast;
4441 isc->isc_tx_multicast.ev_count = st->tx_multicast;
4442 isc->isc_tx_broadcast.ev_count = st->tx_broadcast;
4443 isc->isc_tx_discards.ev_count = st->tx_discards;
4444 isc->isc_tx_errors.ev_count = st->tx_errors;
4445 }
4446
4447 static void
4448 iavf_process_req_queues(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4449 struct ixl_aq_buf *aqb)
4450 {
4451 struct iavf_vc_res_request *req;
4452 struct ifnet *ifp;
4453 uint32_t vc_retval;
4454
4455 ifp = &sc->sc_ec.ec_if;
4456 req = aqb->aqb_data;
4457
4458 vc_retval = iavf_aq_vc_get_retval(iaq);
4459 if (vc_retval != IAVF_VC_RC_SUCCESS) {
4460 return;
4461 }
4462
4463 if (sc->sc_nqps_req < req->num_queue_pairs) {
4464 log(LOG_INFO,
4465 "%s: requested %d queues, but only %d left.\n",
4466 ifp->if_xname,
4467 sc->sc_nqps_req, req->num_queue_pairs);
4468 }
4469
4470 if (sc->sc_nqps_vsi < req->num_queue_pairs) {
4471 if (!sc->sc_req_queues_retried) {
4472 /* req->num_queue_pairs indicates max qps */
4473 sc->sc_nqps_req = req->num_queue_pairs;
4474
4475 sc->sc_req_queues_retried = true;
4476 iavf_work_add(sc->sc_workq, &sc->sc_req_queues_task);
4477 }
4478 }
4479 }
4480
4481 static int
4482 iavf_get_version(struct iavf_softc *sc, struct ixl_aq_buf *aqb)
4483 {
4484 struct ixl_aq_desc iaq;
4485 struct iavf_vc_version_info *ver;
4486 int error;
4487
4488 memset(&iaq, 0, sizeof(iaq));
4489 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4490 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4491 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_VERSION);
4492 iaq.iaq_datalen = htole16(sizeof(struct iavf_vc_version_info));
4493
4494 ver = IXL_AQB_KVA(aqb);
4495 ver->major = htole32(IAVF_VF_MAJOR);
4496 ver->minor = htole32(IAVF_VF_MINOR);
4497
4498 sc->sc_major_ver = UINT_MAX;
4499 sc->sc_minor_ver = UINT_MAX;
4500
4501 if (sc->sc_attached) {
4502 error = iavf_adminq_poll(sc, &iaq, aqb, 250);
4503 } else {
4504 error = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
4505 }
4506
4507 if (error)
4508 return -1;
4509
4510 return 0;
4511 }
4512
4513 static int
4514 iavf_get_vf_resources(struct iavf_softc *sc, struct ixl_aq_buf *aqb)
4515 {
4516 struct ixl_aq_desc iaq;
4517 uint32_t *cap, cap0;
4518 int error;
4519
4520 memset(&iaq, 0, sizeof(iaq));
4521 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4522 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4523 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_GET_VF_RESOURCES);
4524
4525 if (sc->sc_major_ver > 0) {
4526 cap0 = IAVF_VC_OFFLOAD_L2 |
4527 IAVF_VC_OFFLOAD_VLAN |
4528 IAVF_VC_OFFLOAD_RSS_PF |
4529 IAVF_VC_OFFLOAD_REQ_QUEUES;
4530
4531 cap = IXL_AQB_KVA(aqb);
4532 *cap = htole32(cap0);
4533 iaq.iaq_datalen = htole16(sizeof(*cap));
4534 }
4535
4536 sc->sc_got_vf_resources = 0;
4537 if (sc->sc_attached) {
4538 error = iavf_adminq_poll(sc, &iaq, aqb, 250);
4539 } else {
4540 error = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
4541 }
4542
4543 if (error)
4544 return -1;
4545 return 0;
4546 }
4547
4548 static int
4549 iavf_get_stats(struct iavf_softc *sc)
4550 {
4551 struct ixl_aq_desc iaq;
4552 struct ixl_aq_buf *aqb;
4553 struct iavf_vc_queue_select *qsel;
4554 int error;
4555
4556 mutex_enter(&sc->sc_adminq_lock);
4557 aqb = iavf_aqb_get_locked(&sc->sc_atq_idle);
4558 mutex_exit(&sc->sc_adminq_lock);
4559
4560 if (aqb == NULL)
4561 return ENOMEM;
4562
4563 qsel = IXL_AQB_KVA(aqb);
4564 memset(qsel, 0, sizeof(*qsel));
4565 qsel->vsi_id = htole16(sc->sc_vsi_id);
4566
4567 memset(&iaq, 0, sizeof(iaq));
4568
4569 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4570 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4571 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_GET_STATS);
4572 iaq.iaq_datalen = htole16(sizeof(*qsel));
4573
4574 if (atomic_load_relaxed(&sc->sc_debuglevel) >= 3) {
4575 device_printf(sc->sc_dev, "post GET_STATS command\n");
4576 }
4577
4578 mutex_enter(&sc->sc_adminq_lock);
4579 error = iavf_atq_post(sc, &iaq, aqb);
4580 mutex_exit(&sc->sc_adminq_lock);
4581
4582 return error;
4583 }
4584
4585 static int
4586 iavf_config_irq_map(struct iavf_softc *sc, struct ixl_aq_buf *aqb)
4587 {
4588 struct ixl_aq_desc iaq;
4589 struct iavf_vc_vector_map *vec;
4590 struct iavf_vc_irq_map_info *map;
4591 struct iavf_rx_ring *rxr;
4592 struct iavf_tx_ring *txr;
4593 unsigned int num_vec;
4594 int error;
4595
4596 map = IXL_AQB_KVA(aqb);
4597 vec = map->vecmap;
4598 num_vec = 0;
4599
4600 if (sc->sc_nintrs == 1) {
4601 vec[0].vsi_id = htole16(sc->sc_vsi_id);
4602 vec[0].vector_id = htole16(0);
4603 vec[0].rxq_map = htole16(iavf_allqueues(sc));
4604 vec[0].txq_map = htole16(iavf_allqueues(sc));
4605 vec[0].rxitr_idx = htole16(IAVF_NOITR);
4606 vec[0].rxitr_idx = htole16(IAVF_NOITR);
4607 num_vec = 1;
4608 } else if (sc->sc_nintrs > 1) {
4609 KASSERT(sc->sc_nqps_alloc >= (sc->sc_nintrs - 1));
4610 for (; num_vec < (sc->sc_nintrs - 1); num_vec++) {
4611 rxr = sc->sc_qps[num_vec].qp_rxr;
4612 txr = sc->sc_qps[num_vec].qp_txr;
4613
4614 vec[num_vec].vsi_id = htole16(sc->sc_vsi_id);
4615 vec[num_vec].vector_id = htole16(num_vec + 1);
4616 vec[num_vec].rxq_map = htole16(__BIT(rxr->rxr_qid));
4617 vec[num_vec].txq_map = htole16(__BIT(txr->txr_qid));
4618 vec[num_vec].rxitr_idx = htole16(IAVF_ITR_RX);
4619 vec[num_vec].txitr_idx = htole16(IAVF_ITR_TX);
4620 }
4621
4622 vec[num_vec].vsi_id = htole16(sc->sc_vsi_id);
4623 vec[num_vec].vector_id = htole16(0);
4624 vec[num_vec].rxq_map = htole16(0);
4625 vec[num_vec].txq_map = htole16(0);
4626 num_vec++;
4627 }
4628
4629 map->num_vectors = htole16(num_vec);
4630
4631 memset(&iaq, 0, sizeof(iaq));
4632 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4633 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4634 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_IRQ_MAP);
4635 iaq.iaq_datalen = htole16(sizeof(*map) + sizeof(*vec) * num_vec);
4636
4637 if (sc->sc_attached) {
4638 error = iavf_adminq_poll(sc, &iaq, aqb, 250);
4639 } else {
4640 error = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
4641 }
4642
4643 if (error)
4644 return -1;
4645
4646 return 0;
4647 }
4648
4649 static int
4650 iavf_config_vsi_queues(struct iavf_softc *sc)
4651 {
4652 struct ifnet *ifp = &sc->sc_ec.ec_if;
4653 struct ixl_aq_desc iaq;
4654 struct ixl_aq_buf *aqb;
4655 struct iavf_vc_queue_config_info *config;
4656 struct iavf_vc_txq_info *txq;
4657 struct iavf_vc_rxq_info *rxq;
4658 struct iavf_rx_ring *rxr;
4659 struct iavf_tx_ring *txr;
4660 uint32_t rxmtu_max;
4661 unsigned int i;
4662 int error;
4663
4664 rxmtu_max = ifp->if_mtu + IAVF_MTU_ETHERLEN;
4665
4666 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4667
4668 if (aqb == NULL)
4669 return -1;
4670
4671 config = IXL_AQB_KVA(aqb);
4672 memset(config, 0, sizeof(*config));
4673 config->vsi_id = htole16(sc->sc_vsi_id);
4674 config->num_queue_pairs = htole16(sc->sc_nqueue_pairs);
4675
4676 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
4677 rxr = sc->sc_qps[i].qp_rxr;
4678 txr = sc->sc_qps[i].qp_txr;
4679
4680 txq = &config->qpair[i].txq;
4681 txq->vsi_id = htole16(sc->sc_vsi_id);
4682 txq->queue_id = htole16(txr->txr_qid);
4683 txq->ring_len = htole16(sc->sc_tx_ring_ndescs);
4684 txq->headwb_ena = 0;
4685 txq->dma_ring_addr = htole64(IXL_DMA_DVA(&txr->txr_mem));
4686 txq->dma_headwb_addr = 0;
4687
4688 rxq = &config->qpair[i].rxq;
4689 rxq->vsi_id = htole16(sc->sc_vsi_id);
4690 rxq->queue_id = htole16(rxr->rxr_qid);
4691 rxq->ring_len = htole16(sc->sc_rx_ring_ndescs);
4692 rxq->splithdr_ena = 0;
4693 rxq->databuf_size = htole32(IAVF_MCLBYTES);
4694 rxq->max_pkt_size = htole32(rxmtu_max);
4695 rxq->dma_ring_addr = htole64(IXL_DMA_DVA(&rxr->rxr_mem));
4696 rxq->rx_split_pos = 0;
4697 }
4698
4699 memset(&iaq, 0, sizeof(iaq));
4700 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4701 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4702 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_VSI_QUEUES);
4703 iaq.iaq_datalen = htole16(sizeof(*config) +
4704 sizeof(config->qpair[0]) * sc->sc_nqueue_pairs);
4705
4706 error = iavf_adminq_exec(sc, &iaq, aqb);
4707 if (error != IAVF_VC_RC_SUCCESS) {
4708 return -1;
4709 }
4710
4711 return 0;
4712 }
4713
4714 static int
4715 iavf_config_hena(struct iavf_softc *sc)
4716 {
4717 struct ixl_aq_desc iaq;
4718 struct ixl_aq_buf *aqb;
4719 uint64_t *caps;
4720 int error;
4721
4722 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4723
4724 if (aqb == NULL)
4725 return -1;
4726
4727 caps = IXL_AQB_KVA(aqb);
4728 if (sc->sc_mac_type == I40E_MAC_X722_VF)
4729 *caps = IXL_RSS_HENA_DEFAULT_XL710;
4730 else
4731 *caps = IXL_RSS_HENA_DEFAULT_X722;
4732
4733 memset(&iaq, 0, sizeof(iaq));
4734 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4735 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4736 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_SET_RSS_HENA);
4737 iaq.iaq_datalen = htole16(sizeof(*caps));
4738
4739 error = iavf_adminq_exec(sc, &iaq, aqb);
4740 if (error != IAVF_VC_RC_SUCCESS) {
4741 return -1;
4742 }
4743
4744 return 0;
4745 }
4746
4747 static inline void
4748 iavf_get_default_rss_key(uint8_t *buf, size_t len)
4749 {
4750 uint8_t rss_seed[RSS_KEYSIZE];
4751 size_t cplen;
4752
4753 cplen = MIN(len, sizeof(rss_seed));
4754 rss_getkey(rss_seed);
4755
4756 memcpy(buf, rss_seed, cplen);
4757 if (cplen < len)
4758 memset(buf + cplen, 0, len - cplen);
4759 }
4760
4761 static int
4762 iavf_config_rss_key(struct iavf_softc *sc)
4763 {
4764 struct ixl_aq_desc iaq;
4765 struct ixl_aq_buf *aqb;
4766 struct iavf_vc_rss_key *rss_key;
4767 size_t key_len;
4768 int rv;
4769
4770 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4771 if (aqb == NULL)
4772 return -1;
4773
4774 rss_key = IXL_AQB_KVA(aqb);
4775 rss_key->vsi_id = htole16(sc->sc_vsi_id);
4776 key_len = IXL_RSS_KEY_SIZE;
4777 iavf_get_default_rss_key(rss_key->key, key_len);
4778 rss_key->key_len = key_len;
4779
4780 memset(&iaq, 0, sizeof(iaq));
4781 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4782 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4783 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_RSS_KEY);
4784 iaq.iaq_datalen = htole16(sizeof(*rss_key) - sizeof(rss_key->pad)
4785 + (sizeof(rss_key->key[0]) * key_len));
4786
4787 rv = iavf_adminq_exec(sc, &iaq, aqb);
4788 if (rv != IAVF_VC_RC_SUCCESS) {
4789 return -1;
4790 }
4791
4792 return 0;
4793 }
4794
4795 static int
4796 iavf_config_rss_lut(struct iavf_softc *sc)
4797 {
4798 struct ixl_aq_desc iaq;
4799 struct ixl_aq_buf *aqb;
4800 struct iavf_vc_rss_lut *rss_lut;
4801 uint8_t *lut, v;
4802 int rv, i;
4803
4804 mutex_enter(&sc->sc_adminq_lock);
4805 mutex_exit(&sc->sc_adminq_lock);
4806
4807 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4808 if (aqb == NULL)
4809 return -1;
4810
4811 rss_lut = IXL_AQB_KVA(aqb);
4812 rss_lut->vsi_id = htole16(sc->sc_vsi_id);
4813 rss_lut->lut_entries = htole16(IXL_RSS_VSI_LUT_SIZE);
4814
4815 lut = rss_lut->lut;
4816 for (i = 0; i < IXL_RSS_VSI_LUT_SIZE; i++) {
4817 v = i % sc->sc_nqueue_pairs;
4818 v &= IAVF_RSS_VSI_LUT_ENTRY_MASK;
4819 lut[i] = v;
4820 }
4821
4822 memset(&iaq, 0, sizeof(iaq));
4823 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4824 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4825 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_RSS_LUT);
4826 iaq.iaq_datalen = htole16(sizeof(*rss_lut) - sizeof(rss_lut->pad)
4827 + (sizeof(rss_lut->lut[0]) * IXL_RSS_VSI_LUT_SIZE));
4828
4829 rv = iavf_adminq_exec(sc, &iaq, aqb);
4830 if (rv != IAVF_VC_RC_SUCCESS) {
4831 return -1;
4832 }
4833
4834 return 0;
4835 }
4836
4837 static int
4838 iavf_queue_select(struct iavf_softc *sc, int opcode)
4839 {
4840 struct ixl_aq_desc iaq;
4841 struct ixl_aq_buf *aqb;
4842 struct iavf_vc_queue_select *qsel;
4843 int error;
4844
4845 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4846 if (aqb == NULL)
4847 return -1;
4848
4849 qsel = IXL_AQB_KVA(aqb);
4850 qsel->vsi_id = htole16(sc->sc_vsi_id);
4851 qsel->rx_queues = htole32(iavf_allqueues(sc));
4852 qsel->tx_queues = htole32(iavf_allqueues(sc));
4853
4854 memset(&iaq, 0, sizeof(iaq));
4855 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4856 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4857 iavf_aq_vc_set_opcode(&iaq, opcode);
4858 iaq.iaq_datalen = htole16(sizeof(*qsel));
4859
4860 error = iavf_adminq_exec(sc, &iaq, aqb);
4861 if (error != IAVF_VC_RC_SUCCESS) {
4862 return -1;
4863 }
4864
4865 return 0;
4866 }
4867
4868 static int
4869 iavf_request_queues(struct iavf_softc *sc, unsigned int req_num)
4870 {
4871 struct ixl_aq_desc iaq;
4872 struct ixl_aq_buf *aqb;
4873 struct iavf_vc_res_request *req;
4874 int rv;
4875
4876 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4877 if (aqb == NULL)
4878 return ENOMEM;
4879
4880 req = IXL_AQB_KVA(aqb);
4881 req->num_queue_pairs = req_num;
4882
4883 memset(&iaq, 0, sizeof(iaq));
4884 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4885 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4886 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_REQUEST_QUEUES);
4887 iaq.iaq_datalen = htole16(sizeof(*req));
4888
4889 mutex_enter(&sc->sc_adminq_lock);
4890 rv = iavf_atq_post(sc, &iaq, aqb);
4891 mutex_exit(&sc->sc_adminq_lock);
4892
4893 return rv;
4894 }
4895
4896 static int
4897 iavf_reset_vf(struct iavf_softc *sc)
4898 {
4899 struct ixl_aq_desc iaq;
4900 int error;
4901
4902 memset(&iaq, 0, sizeof(iaq));
4903 iaq.iaq_flags = htole16(IXL_AQ_RD);
4904 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4905 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_RESET_VF);
4906 iaq.iaq_datalen = htole16(0);
4907
4908 iavf_wr(sc, I40E_VFGEN_RSTAT, IAVF_VFR_INPROGRESS);
4909
4910 mutex_enter(&sc->sc_adminq_lock);
4911 error = iavf_atq_post(sc, &iaq, NULL);
4912 mutex_exit(&sc->sc_adminq_lock);
4913
4914 return error;
4915 }
4916
4917 static int
4918 iavf_eth_addr(struct iavf_softc *sc, const uint8_t *addr, uint32_t opcode)
4919 {
4920 struct ixl_aq_desc iaq;
4921 struct ixl_aq_buf *aqb;
4922 struct iavf_vc_eth_addr_list *addrs;
4923 struct iavf_vc_eth_addr *vcaddr;
4924 int rv;
4925
4926 KASSERT(sc->sc_attached);
4927 KASSERT(opcode == IAVF_VC_OP_ADD_ETH_ADDR ||
4928 opcode == IAVF_VC_OP_DEL_ETH_ADDR);
4929
4930 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4931 if (aqb == NULL)
4932 return -1;
4933
4934 addrs = IXL_AQB_KVA(aqb);
4935 addrs->vsi_id = htole16(sc->sc_vsi_id);
4936 addrs->num_elements = htole16(1);
4937 vcaddr = addrs->list;
4938 memcpy(vcaddr->addr, addr, ETHER_ADDR_LEN);
4939
4940 memset(&iaq, 0, sizeof(iaq));
4941 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4942 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4943 iavf_aq_vc_set_opcode(&iaq, opcode);
4944 iaq.iaq_datalen = htole16(sizeof(*addrs) + sizeof(*vcaddr));
4945
4946 if (sc->sc_resetting) {
4947 mutex_enter(&sc->sc_adminq_lock);
4948 rv = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
4949 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
4950 mutex_exit(&sc->sc_adminq_lock);
4951 } else {
4952 rv = iavf_adminq_exec(sc, &iaq, aqb);
4953 }
4954
4955 if (rv != IAVF_VC_RC_SUCCESS) {
4956 return -1;
4957 }
4958
4959 return 0;
4960 }
4961
4962 static int
4963 iavf_config_promisc_mode(struct iavf_softc *sc, int unicast, int multicast)
4964 {
4965 struct ixl_aq_desc iaq;
4966 struct ixl_aq_buf *aqb;
4967 struct iavf_vc_promisc_info *promisc;
4968 int flags;
4969
4970 KASSERT(sc->sc_attached);
4971
4972 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4973 if (aqb == NULL)
4974 return -1;
4975
4976 flags = 0;
4977 if (unicast)
4978 flags |= IAVF_FLAG_VF_UNICAST_PROMISC;
4979 if (multicast)
4980 flags |= IAVF_FLAG_VF_MULTICAST_PROMISC;
4981
4982 promisc = IXL_AQB_KVA(aqb);
4983 promisc->vsi_id = htole16(sc->sc_vsi_id);
4984 promisc->flags = htole16(flags);
4985
4986 memset(&iaq, 0, sizeof(iaq));
4987 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4988 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4989 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_PROMISC);
4990 iaq.iaq_datalen = htole16(sizeof(*promisc));
4991
4992 if (iavf_adminq_exec(sc, &iaq, aqb) != IAVF_VC_RC_SUCCESS) {
4993 return -1;
4994 }
4995
4996 return 0;
4997 }
4998
4999 static int
5000 iavf_config_vlan_stripping(struct iavf_softc *sc, int eccap)
5001 {
5002 struct ixl_aq_desc iaq;
5003 uint32_t opcode;
5004
5005 opcode = ISSET(eccap, ETHERCAP_VLAN_HWTAGGING) ?
5006 IAVF_VC_OP_ENABLE_VLAN_STRIP : IAVF_VC_OP_DISABLE_VLAN_STRIP;
5007
5008 memset(&iaq, 0, sizeof(iaq));
5009 iaq.iaq_flags = htole16(IXL_AQ_RD);
5010 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
5011 iavf_aq_vc_set_opcode(&iaq, opcode);
5012 iaq.iaq_datalen = htole16(0);
5013
5014 if (iavf_adminq_exec(sc, &iaq, NULL) != IAVF_VC_RC_SUCCESS) {
5015 return -1;
5016 }
5017
5018 return 0;
5019 }
5020
5021 static int
5022 iavf_config_vlan_id(struct iavf_softc *sc, uint16_t vid, uint32_t opcode)
5023 {
5024 struct ixl_aq_desc iaq;
5025 struct ixl_aq_buf *aqb;
5026 struct iavf_vc_vlan_filter *vfilter;
5027 int rv;
5028
5029 KASSERT(opcode == IAVF_VC_OP_ADD_VLAN || opcode == IAVF_VC_OP_DEL_VLAN);
5030
5031 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
5032
5033 if (aqb == NULL)
5034 return -1;
5035
5036 vfilter = IXL_AQB_KVA(aqb);
5037 vfilter->vsi_id = htole16(sc->sc_vsi_id);
5038 vfilter->num_vlan_id = htole16(1);
5039 vfilter->vlan_id[0] = vid;
5040
5041 memset(&iaq, 0, sizeof(iaq));
5042 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
5043 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
5044 iavf_aq_vc_set_opcode(&iaq, opcode);
5045 iaq.iaq_datalen = htole16(sizeof(*vfilter) + sizeof(vid));
5046
5047 if (sc->sc_resetting) {
5048 mutex_enter(&sc->sc_adminq_lock);
5049 rv = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
5050 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
5051 mutex_exit(&sc->sc_adminq_lock);
5052 } else {
5053 rv = iavf_adminq_exec(sc, &iaq, aqb);
5054 }
5055
5056 if (rv != IAVF_VC_RC_SUCCESS) {
5057 return -1;
5058 }
5059
5060 return 0;
5061 }
5062
5063 static void
5064 iavf_post_request_queues(void *xsc)
5065 {
5066 struct iavf_softc *sc;
5067 struct ifnet *ifp;
5068
5069 sc = xsc;
5070 ifp = &sc->sc_ec.ec_if;
5071
5072 if (!ISSET(sc->sc_vf_cap, IAVF_VC_OFFLOAD_REQ_QUEUES)) {
5073 log(LOG_DEBUG, "%s: the VF has no REQ_QUEUES capability\n",
5074 ifp->if_xname);
5075 return;
5076 }
5077
5078 log(LOG_INFO, "%s: try to change the number of queue pairs"
5079 " (vsi %u, %u allocated, request %u)\n",
5080 ifp->if_xname,
5081 sc->sc_nqps_vsi, sc->sc_nqps_alloc, sc->sc_nqps_req);
5082 iavf_request_queues(sc, sc->sc_nqps_req);
5083 }
5084
5085 static bool
5086 iavf_sysctlnode_is_rx(struct sysctlnode *node)
5087 {
5088
5089 if (strstr(node->sysctl_parent->sysctl_name, "rx") != NULL)
5090 return true;
5091
5092 return false;
5093 }
5094
5095 static int
5096 iavf_sysctl_itr_handler(SYSCTLFN_ARGS)
5097 {
5098 struct sysctlnode node = *rnode;
5099 struct iavf_softc *sc = (struct iavf_softc *)node.sysctl_data;
5100 uint32_t newitr, *itrptr;
5101 unsigned int i;
5102 int itr, error;
5103
5104 if (iavf_sysctlnode_is_rx(&node)) {
5105 itrptr = &sc->sc_rx_itr;
5106 itr = IAVF_ITR_RX;
5107 } else {
5108 itrptr = &sc->sc_tx_itr;
5109 itr = IAVF_ITR_TX;
5110 }
5111
5112 newitr = *itrptr;
5113 node.sysctl_data = &newitr;
5114 node.sysctl_size = sizeof(newitr);
5115
5116 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5117 if (error || newp == NULL)
5118 return error;
5119
5120 if (newitr > 0x07FF)
5121 return EINVAL;
5122
5123 *itrptr = newitr;
5124
5125 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
5126 iavf_wr(sc, I40E_VFINT_ITRN1(itr, i), *itrptr);
5127 }
5128 iavf_wr(sc, I40E_VFINT_ITR01(itr), *itrptr);
5129
5130 return 0;
5131 }
5132
5133 static void
5134 iavf_workq_work(struct work *wk, void *context)
5135 {
5136 struct iavf_work *work;
5137
5138 work = container_of(wk, struct iavf_work, ixw_cookie);
5139
5140 atomic_swap_uint(&work->ixw_added, 0);
5141 work->ixw_func(work->ixw_arg);
5142 }
5143
5144 static struct workqueue *
5145 iavf_workq_create(const char *name, pri_t prio, int ipl, int flags)
5146 {
5147 struct workqueue *wq;
5148 int error;
5149
5150 error = workqueue_create(&wq, name, iavf_workq_work, NULL,
5151 prio, ipl, flags);
5152
5153 if (error)
5154 return NULL;
5155
5156 return wq;
5157 }
5158
5159 static void
5160 iavf_workq_destroy(struct workqueue *wq)
5161 {
5162
5163 workqueue_destroy(wq);
5164 }
5165
5166 static int
5167 iavf_work_set(struct iavf_work *work, void (*func)(void *), void *arg)
5168 {
5169
5170 if (work->ixw_added != 0)
5171 return -1;
5172
5173 memset(work, 0, sizeof(*work));
5174 work->ixw_func = func;
5175 work->ixw_arg = arg;
5176
5177 return 0;
5178 }
5179
5180 static void
5181 iavf_work_add(struct workqueue *wq, struct iavf_work *work)
5182 {
5183 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0)
5184 return;
5185
5186 kpreempt_disable();
5187 workqueue_enqueue(wq, &work->ixw_cookie, NULL);
5188 kpreempt_enable();
5189 }
5190
5191 static void
5192 iavf_work_wait(struct workqueue *wq, struct iavf_work *work)
5193 {
5194
5195 workqueue_wait(wq, &work->ixw_cookie);
5196 }
5197
5198 static void
5199 iavf_evcnt_attach(struct evcnt *ec,
5200 const char *n0, const char *n1)
5201 {
5202
5203 evcnt_attach_dynamic(ec, EVCNT_TYPE_MISC,
5204 NULL, n0, n1);
5205 }
5206
5207 MODULE(MODULE_CLASS_DRIVER, if_iavf, "pci");
5208
5209 #ifdef _MODULE
5210 #include "ioconf.c"
5211 #endif
5212
5213 #ifdef _MODULE
5214 static void
5215 iavf_parse_modprop(prop_dictionary_t dict)
5216 {
5217 prop_object_t obj;
5218 int64_t val;
5219 uint32_t n;
5220
5221 if (dict == NULL)
5222 return;
5223
5224 obj = prop_dictionary_get(dict, "debug_level");
5225 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5226 val = prop_number_signed_value((prop_number_t)obj);
5227
5228 if (val > 0) {
5229 iavf_params.debug = val;
5230 printf("iavf: debug level=%d\n", iavf_params.debug);
5231 }
5232 }
5233
5234 obj = prop_dictionary_get(dict, "max_qps");
5235 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5236 val = prop_number_signed_value((prop_number_t)obj);
5237
5238 if (val < 1 || I40E_MAX_VF_QUEUES) {
5239 printf("iavf: invalid queue size(1 <= n <= %d)",
5240 I40E_MAX_VF_QUEUES);
5241 } else {
5242 iavf_params.max_qps = val;
5243 printf("iavf: request queue pair = %u\n",
5244 iavf_params.max_qps);
5245 }
5246 }
5247
5248 obj = prop_dictionary_get(dict, "tx_itr");
5249 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5250 val = prop_number_signed_value((prop_number_t)obj);
5251 if (val > 0x07FF) {
5252 printf("iavf: TX ITR too big (%" PRId64 " <= %d)",
5253 val, 0x7FF);
5254 } else {
5255 iavf_params.tx_itr = val;
5256 printf("iavf: TX ITR = 0x%" PRIx32,
5257 iavf_params.tx_itr);
5258 }
5259 }
5260
5261 obj = prop_dictionary_get(dict, "rx_itr");
5262 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5263 val = prop_number_signed_value((prop_number_t)obj);
5264 if (val > 0x07FF) {
5265 printf("iavf: RX ITR too big (%" PRId64 " <= %d)",
5266 val, 0x7FF);
5267 } else {
5268 iavf_params.rx_itr = val;
5269 printf("iavf: RX ITR = 0x%" PRIx32,
5270 iavf_params.rx_itr);
5271 }
5272 }
5273
5274 obj = prop_dictionary_get(dict, "tx_ndescs");
5275 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5276 val = prop_number_signed_value((prop_number_t)obj);
5277 n = 1U << (fls32(val) - 1);
5278 if (val != (int64_t) n) {
5279 printf("iavf: TX desc invlaid size"
5280 "(%" PRId64 " != %" PRIu32 ")\n", val, n);
5281 } else if (val > (8192 - 32)) {
5282 printf("iavf: Tx desc too big (%" PRId64 " > %d)",
5283 val, (8192 - 32));
5284 } else {
5285 iavf_params.tx_ndescs = val;
5286 printf("iavf: TX descriptors = 0x%04x",
5287 iavf_params.tx_ndescs);
5288 }
5289 }
5290
5291 obj = prop_dictionary_get(dict, "rx_ndescs");
5292 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5293 val = prop_number_signed_value((prop_number_t)obj);
5294 n = 1U << (fls32(val) - 1);
5295 if (val != (int64_t) n) {
5296 printf("iavf: RX desc invlaid size"
5297 "(%" PRId64 " != %" PRIu32 ")\n", val, n);
5298 } else if (val > (8192 - 32)) {
5299 printf("iavf: Rx desc too big (%" PRId64 " > %d)",
5300 val, (8192 - 32));
5301 } else {
5302 iavf_params.rx_ndescs = val;
5303 printf("iavf: RX descriptors = 0x%04x",
5304 iavf_params.rx_ndescs);
5305 }
5306 }
5307 }
5308 #endif
5309
5310 static int
5311 if_iavf_modcmd(modcmd_t cmd, void *opaque)
5312 {
5313 int error = 0;
5314
5315 #ifdef _MODULE
5316 switch (cmd) {
5317 case MODULE_CMD_INIT:
5318 iavf_parse_modprop((prop_dictionary_t)opaque);
5319 error = config_init_component(cfdriver_ioconf_if_iavf,
5320 cfattach_ioconf_if_iavf, cfdata_ioconf_if_iavf);
5321 break;
5322 case MODULE_CMD_FINI:
5323 error = config_fini_component(cfdriver_ioconf_if_iavf,
5324 cfattach_ioconf_if_iavf, cfdata_ioconf_if_iavf);
5325 break;
5326 default:
5327 error = ENOTTY;
5328 break;
5329 }
5330 #endif
5331
5332 return error;
5333 }
5334