if_ixl.c revision 1.88 1 /* $NetBSD: if_ixl.c,v 1.88 2022/09/16 03:12:03 knakahara Exp $ */
2
3 /*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2016,2017 David Gwynne <dlg (at) openbsd.org>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50 /*
51 * Copyright (c) 2019 Internet Initiative Japan, Inc.
52 * All rights reserved.
53 *
54 * Redistribution and use in source and binary forms, with or without
55 * modification, are permitted provided that the following conditions
56 * are met:
57 * 1. Redistributions of source code must retain the above copyright
58 * notice, this list of conditions and the following disclaimer.
59 * 2. Redistributions in binary form must reproduce the above copyright
60 * notice, this list of conditions and the following disclaimer in the
61 * documentation and/or other materials provided with the distribution.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
73 * POSSIBILITY OF SUCH DAMAGE.
74 */
75
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: if_ixl.c,v 1.88 2022/09/16 03:12:03 knakahara Exp $");
78
79 #ifdef _KERNEL_OPT
80 #include "opt_net_mpsafe.h"
81 #include "opt_if_ixl.h"
82 #endif
83
84 #include <sys/param.h>
85 #include <sys/types.h>
86
87 #include <sys/bitops.h>
88 #include <sys/cpu.h>
89 #include <sys/device.h>
90 #include <sys/evcnt.h>
91 #include <sys/interrupt.h>
92 #include <sys/kmem.h>
93 #include <sys/module.h>
94 #include <sys/mutex.h>
95 #include <sys/pcq.h>
96 #include <sys/syslog.h>
97 #include <sys/workqueue.h>
98 #include <sys/xcall.h>
99
100 #include <sys/bus.h>
101
102 #include <net/bpf.h>
103 #include <net/if.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 #include <net/if_ether.h>
107 #include <net/rss_config.h>
108
109 #include <netinet/tcp.h> /* for struct tcphdr */
110 #include <netinet/udp.h> /* for struct udphdr */
111
112 #include <dev/pci/pcivar.h>
113 #include <dev/pci/pcidevs.h>
114
115 #include <dev/pci/if_ixlreg.h>
116 #include <dev/pci/if_ixlvar.h>
117
118 #include <prop/proplib.h>
119
120 struct ixl_softc; /* defined */
121
122 #define I40E_PF_RESET_WAIT_COUNT 200
123 #define I40E_AQ_LARGE_BUF 512
124
125 /* bitfields for Tx queue mapping in QTX_CTL */
126 #define I40E_QTX_CTL_VF_QUEUE 0x0
127 #define I40E_QTX_CTL_VM_QUEUE 0x1
128 #define I40E_QTX_CTL_PF_QUEUE 0x2
129
130 #define I40E_QUEUE_TYPE_EOL 0x7ff
131 #define I40E_INTR_NOTX_QUEUE 0
132
133 #define I40E_QUEUE_TYPE_RX 0x0
134 #define I40E_QUEUE_TYPE_TX 0x1
135 #define I40E_QUEUE_TYPE_PE_CEQ 0x2
136 #define I40E_QUEUE_TYPE_UNKNOWN 0x3
137
138 #define I40E_ITR_INDEX_RX 0x0
139 #define I40E_ITR_INDEX_TX 0x1
140 #define I40E_ITR_INDEX_OTHER 0x2
141 #define I40E_ITR_INDEX_NONE 0x3
142 #define IXL_ITR_RX 0x7a /* 4K intrs/sec */
143 #define IXL_ITR_TX 0x7a /* 4K intrs/sec */
144
145 #define I40E_INTR_NOTX_QUEUE 0
146 #define I40E_INTR_NOTX_INTR 0
147 #define I40E_INTR_NOTX_RX_QUEUE 0
148 #define I40E_INTR_NOTX_TX_QUEUE 1
149 #define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK
150 #define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK
151
152 #define I40E_HASH_LUT_SIZE_128 0
153
154 #define IXL_ICR0_CRIT_ERR_MASK \
155 (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \
156 I40E_PFINT_ICR0_ECC_ERR_MASK | \
157 I40E_PFINT_ICR0_PE_CRITERR_MASK)
158
159 #define IXL_QUEUE_MAX_XL710 64
160 #define IXL_QUEUE_MAX_X722 128
161
162 #define IXL_TX_PKT_DESCS 8
163 #define IXL_TX_PKT_MAXSIZE (MCLBYTES * IXL_TX_PKT_DESCS)
164 #define IXL_TX_QUEUE_ALIGN 128
165 #define IXL_RX_QUEUE_ALIGN 128
166
167 #define IXL_MCLBYTES (MCLBYTES - ETHER_ALIGN)
168 #define IXL_MTU_ETHERLEN ETHER_HDR_LEN \
169 + ETHER_CRC_LEN
170 #if 0
171 #define IXL_MAX_MTU (9728 - IXL_MTU_ETHERLEN)
172 #else
173 /* (dbuff * 5) - ETHER_HDR_LEN - ETHER_CRC_LEN */
174 #define IXL_MAX_MTU (9600 - IXL_MTU_ETHERLEN)
175 #endif
176 #define IXL_MIN_MTU (ETHER_MIN_LEN - ETHER_CRC_LEN)
177
178 #define IXL_PCIREG PCI_MAPREG_START
179
180 #define IXL_ITR0 0x0
181 #define IXL_ITR1 0x1
182 #define IXL_ITR2 0x2
183 #define IXL_NOITR 0x3
184
185 #define IXL_AQ_NUM 256
186 #define IXL_AQ_MASK (IXL_AQ_NUM - 1)
187 #define IXL_AQ_ALIGN 64 /* lol */
188 #define IXL_AQ_BUFLEN 4096
189
190 #define IXL_HMC_ROUNDUP 512
191 #define IXL_HMC_PGSIZE 4096
192 #define IXL_HMC_DVASZ sizeof(uint64_t)
193 #define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
194 #define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS)
195 #define IXL_HMC_PDVALID 1ULL
196
197 #define IXL_ATQ_EXEC_TIMEOUT (10 * hz)
198
199 #define IXL_SRRD_SRCTL_ATTEMPTS 100000
200
201 struct ixl_aq_regs {
202 bus_size_t atq_tail;
203 bus_size_t atq_head;
204 bus_size_t atq_len;
205 bus_size_t atq_bal;
206 bus_size_t atq_bah;
207
208 bus_size_t arq_tail;
209 bus_size_t arq_head;
210 bus_size_t arq_len;
211 bus_size_t arq_bal;
212 bus_size_t arq_bah;
213
214 uint32_t atq_len_enable;
215 uint32_t atq_tail_mask;
216 uint32_t atq_head_mask;
217
218 uint32_t arq_len_enable;
219 uint32_t arq_tail_mask;
220 uint32_t arq_head_mask;
221 };
222
223 struct ixl_phy_type {
224 uint64_t phy_type;
225 uint64_t ifm_type;
226 };
227
228 struct ixl_speed_type {
229 uint8_t dev_speed;
230 uint64_t net_speed;
231 };
232
233 struct ixl_hmc_entry {
234 uint64_t hmc_base;
235 uint32_t hmc_count;
236 uint64_t hmc_size;
237 };
238
239 enum ixl_hmc_types {
240 IXL_HMC_LAN_TX = 0,
241 IXL_HMC_LAN_RX,
242 IXL_HMC_FCOE_CTX,
243 IXL_HMC_FCOE_FILTER,
244 IXL_HMC_COUNT
245 };
246
247 struct ixl_hmc_pack {
248 uint16_t offset;
249 uint16_t width;
250 uint16_t lsb;
251 };
252
253 /*
254 * these hmc objects have weird sizes and alignments, so these are abstract
255 * representations of them that are nice for c to populate.
256 *
257 * the packing code relies on little-endian values being stored in the fields,
258 * no high bits in the fields being set, and the fields must be packed in the
259 * same order as they are in the ctx structure.
260 */
261
262 struct ixl_hmc_rxq {
263 uint16_t head;
264 uint8_t cpuid;
265 uint64_t base;
266 #define IXL_HMC_RXQ_BASE_UNIT 128
267 uint16_t qlen;
268 uint16_t dbuff;
269 #define IXL_HMC_RXQ_DBUFF_UNIT 128
270 uint8_t hbuff;
271 #define IXL_HMC_RXQ_HBUFF_UNIT 64
272 uint8_t dtype;
273 #define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0
274 #define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1
275 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2
276 uint8_t dsize;
277 #define IXL_HMC_RXQ_DSIZE_16 0
278 #define IXL_HMC_RXQ_DSIZE_32 1
279 uint8_t crcstrip;
280 uint8_t fc_ena;
281 uint8_t l2sel;
282 uint8_t hsplit_0;
283 uint8_t hsplit_1;
284 uint8_t showiv;
285 uint16_t rxmax;
286 uint8_t tphrdesc_ena;
287 uint8_t tphwdesc_ena;
288 uint8_t tphdata_ena;
289 uint8_t tphhead_ena;
290 uint8_t lrxqthresh;
291 uint8_t prefena;
292 };
293
294 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
295 { offsetof(struct ixl_hmc_rxq, head), 13, 0 },
296 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 },
297 { offsetof(struct ixl_hmc_rxq, base), 57, 32 },
298 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 },
299 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 },
300 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 },
301 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 },
302 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 },
303 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 },
304 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 },
305 { offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 },
306 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 },
307 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 },
308 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 },
309 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 },
310 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 },
311 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 },
312 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 },
313 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 },
314 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 },
315 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 },
316 };
317
318 #define IXL_HMC_RXQ_MINSIZE (201 + 1)
319
320 struct ixl_hmc_txq {
321 uint16_t head;
322 uint8_t new_context;
323 uint64_t base;
324 #define IXL_HMC_TXQ_BASE_UNIT 128
325 uint8_t fc_ena;
326 uint8_t timesync_ena;
327 uint8_t fd_ena;
328 uint8_t alt_vlan_ena;
329 uint8_t cpuid;
330 uint16_t thead_wb;
331 uint8_t head_wb_ena;
332 #define IXL_HMC_TXQ_DESC_WB 0
333 #define IXL_HMC_TXQ_HEAD_WB 1
334 uint16_t qlen;
335 uint8_t tphrdesc_ena;
336 uint8_t tphrpacket_ena;
337 uint8_t tphwdesc_ena;
338 uint64_t head_wb_addr;
339 uint32_t crc;
340 uint16_t rdylist;
341 uint8_t rdylist_act;
342 };
343
344 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
345 { offsetof(struct ixl_hmc_txq, head), 13, 0 },
346 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 },
347 { offsetof(struct ixl_hmc_txq, base), 57, 32 },
348 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 },
349 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 },
350 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 },
351 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 },
352 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 },
353 /* line 1 */
354 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 },
355 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 },
356 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 },
357 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 },
358 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 },
359 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 },
360 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 },
361 /* line 7 */
362 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) },
363 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) },
364 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) },
365 };
366
367 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
368
369 struct ixl_work {
370 struct work ixw_cookie;
371 void (*ixw_func)(void *);
372 void *ixw_arg;
373 unsigned int ixw_added;
374 };
375 #define IXL_WORKQUEUE_PRI PRI_SOFTNET
376
377 struct ixl_tx_map {
378 struct mbuf *txm_m;
379 bus_dmamap_t txm_map;
380 unsigned int txm_eop;
381 };
382
383 struct ixl_tx_ring {
384 kmutex_t txr_lock;
385 struct ixl_softc *txr_sc;
386
387 unsigned int txr_prod;
388 unsigned int txr_cons;
389
390 struct ixl_tx_map *txr_maps;
391 struct ixl_dmamem txr_mem;
392
393 bus_size_t txr_tail;
394 unsigned int txr_qid;
395 pcq_t *txr_intrq;
396 void *txr_si;
397
398 struct evcnt txr_defragged;
399 struct evcnt txr_defrag_failed;
400 struct evcnt txr_pcqdrop;
401 struct evcnt txr_transmitdef;
402 struct evcnt txr_intr;
403 struct evcnt txr_defer;
404 };
405
406 struct ixl_rx_map {
407 struct mbuf *rxm_m;
408 bus_dmamap_t rxm_map;
409 };
410
411 struct ixl_rx_ring {
412 kmutex_t rxr_lock;
413
414 unsigned int rxr_prod;
415 unsigned int rxr_cons;
416
417 struct ixl_rx_map *rxr_maps;
418 struct ixl_dmamem rxr_mem;
419
420 struct mbuf *rxr_m_head;
421 struct mbuf **rxr_m_tail;
422
423 bus_size_t rxr_tail;
424 unsigned int rxr_qid;
425
426 struct evcnt rxr_mgethdr_failed;
427 struct evcnt rxr_mgetcl_failed;
428 struct evcnt rxr_mbuf_load_failed;
429 struct evcnt rxr_intr;
430 struct evcnt rxr_defer;
431 };
432
433 struct ixl_queue_pair {
434 struct ixl_softc *qp_sc;
435 struct ixl_tx_ring *qp_txr;
436 struct ixl_rx_ring *qp_rxr;
437
438 char qp_name[16];
439
440 void *qp_si;
441 struct work qp_work;
442 bool qp_workqueue;
443 };
444
445 struct ixl_atq {
446 struct ixl_aq_desc iatq_desc;
447 void (*iatq_fn)(struct ixl_softc *,
448 const struct ixl_aq_desc *);
449 };
450 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
451
452 struct ixl_product {
453 unsigned int vendor_id;
454 unsigned int product_id;
455 };
456
457 struct ixl_stats_counters {
458 bool isc_has_offset;
459 struct evcnt isc_crc_errors;
460 uint64_t isc_crc_errors_offset;
461 struct evcnt isc_illegal_bytes;
462 uint64_t isc_illegal_bytes_offset;
463 struct evcnt isc_rx_bytes;
464 uint64_t isc_rx_bytes_offset;
465 struct evcnt isc_rx_discards;
466 uint64_t isc_rx_discards_offset;
467 struct evcnt isc_rx_unicast;
468 uint64_t isc_rx_unicast_offset;
469 struct evcnt isc_rx_multicast;
470 uint64_t isc_rx_multicast_offset;
471 struct evcnt isc_rx_broadcast;
472 uint64_t isc_rx_broadcast_offset;
473 struct evcnt isc_rx_size_64;
474 uint64_t isc_rx_size_64_offset;
475 struct evcnt isc_rx_size_127;
476 uint64_t isc_rx_size_127_offset;
477 struct evcnt isc_rx_size_255;
478 uint64_t isc_rx_size_255_offset;
479 struct evcnt isc_rx_size_511;
480 uint64_t isc_rx_size_511_offset;
481 struct evcnt isc_rx_size_1023;
482 uint64_t isc_rx_size_1023_offset;
483 struct evcnt isc_rx_size_1522;
484 uint64_t isc_rx_size_1522_offset;
485 struct evcnt isc_rx_size_big;
486 uint64_t isc_rx_size_big_offset;
487 struct evcnt isc_rx_undersize;
488 uint64_t isc_rx_undersize_offset;
489 struct evcnt isc_rx_oversize;
490 uint64_t isc_rx_oversize_offset;
491 struct evcnt isc_rx_fragments;
492 uint64_t isc_rx_fragments_offset;
493 struct evcnt isc_rx_jabber;
494 uint64_t isc_rx_jabber_offset;
495 struct evcnt isc_tx_bytes;
496 uint64_t isc_tx_bytes_offset;
497 struct evcnt isc_tx_dropped_link_down;
498 uint64_t isc_tx_dropped_link_down_offset;
499 struct evcnt isc_tx_unicast;
500 uint64_t isc_tx_unicast_offset;
501 struct evcnt isc_tx_multicast;
502 uint64_t isc_tx_multicast_offset;
503 struct evcnt isc_tx_broadcast;
504 uint64_t isc_tx_broadcast_offset;
505 struct evcnt isc_tx_size_64;
506 uint64_t isc_tx_size_64_offset;
507 struct evcnt isc_tx_size_127;
508 uint64_t isc_tx_size_127_offset;
509 struct evcnt isc_tx_size_255;
510 uint64_t isc_tx_size_255_offset;
511 struct evcnt isc_tx_size_511;
512 uint64_t isc_tx_size_511_offset;
513 struct evcnt isc_tx_size_1023;
514 uint64_t isc_tx_size_1023_offset;
515 struct evcnt isc_tx_size_1522;
516 uint64_t isc_tx_size_1522_offset;
517 struct evcnt isc_tx_size_big;
518 uint64_t isc_tx_size_big_offset;
519 struct evcnt isc_mac_local_faults;
520 uint64_t isc_mac_local_faults_offset;
521 struct evcnt isc_mac_remote_faults;
522 uint64_t isc_mac_remote_faults_offset;
523 struct evcnt isc_link_xon_rx;
524 uint64_t isc_link_xon_rx_offset;
525 struct evcnt isc_link_xon_tx;
526 uint64_t isc_link_xon_tx_offset;
527 struct evcnt isc_link_xoff_rx;
528 uint64_t isc_link_xoff_rx_offset;
529 struct evcnt isc_link_xoff_tx;
530 uint64_t isc_link_xoff_tx_offset;
531 struct evcnt isc_vsi_rx_discards;
532 uint64_t isc_vsi_rx_discards_offset;
533 struct evcnt isc_vsi_rx_bytes;
534 uint64_t isc_vsi_rx_bytes_offset;
535 struct evcnt isc_vsi_rx_unicast;
536 uint64_t isc_vsi_rx_unicast_offset;
537 struct evcnt isc_vsi_rx_multicast;
538 uint64_t isc_vsi_rx_multicast_offset;
539 struct evcnt isc_vsi_rx_broadcast;
540 uint64_t isc_vsi_rx_broadcast_offset;
541 struct evcnt isc_vsi_tx_errors;
542 uint64_t isc_vsi_tx_errors_offset;
543 struct evcnt isc_vsi_tx_bytes;
544 uint64_t isc_vsi_tx_bytes_offset;
545 struct evcnt isc_vsi_tx_unicast;
546 uint64_t isc_vsi_tx_unicast_offset;
547 struct evcnt isc_vsi_tx_multicast;
548 uint64_t isc_vsi_tx_multicast_offset;
549 struct evcnt isc_vsi_tx_broadcast;
550 uint64_t isc_vsi_tx_broadcast_offset;
551 };
552
553 /*
554 * Locking notes:
555 * + a field in ixl_tx_ring is protected by txr_lock (a spin mutex), and
556 * a field in ixl_rx_ring is protected by rxr_lock (a spin mutex).
557 * - more than one lock of them cannot be held at once.
558 * + a field named sc_atq_* in ixl_softc is protected by sc_atq_lock
559 * (a spin mutex).
560 * - the lock cannot held with txr_lock or rxr_lock.
561 * + a field named sc_arq_* is not protected by any lock.
562 * - operations for sc_arq_* is done in one context related to
563 * sc_arq_task.
564 * + other fields in ixl_softc is protected by sc_cfg_lock
565 * (an adaptive mutex)
566 * - It must be held before another lock is held, and It can be
567 * released after the other lock is released.
568 * */
569
570 struct ixl_softc {
571 device_t sc_dev;
572 struct ethercom sc_ec;
573 bool sc_attached;
574 bool sc_dead;
575 uint32_t sc_port;
576 struct sysctllog *sc_sysctllog;
577 struct workqueue *sc_workq;
578 struct workqueue *sc_workq_txrx;
579 int sc_stats_intval;
580 callout_t sc_stats_callout;
581 struct ixl_work sc_stats_task;
582 struct ixl_stats_counters
583 sc_stats_counters;
584 uint8_t sc_enaddr[ETHER_ADDR_LEN];
585 struct ifmedia sc_media;
586 uint64_t sc_media_status;
587 uint64_t sc_media_active;
588 uint64_t sc_phy_types;
589 uint8_t sc_phy_abilities;
590 uint8_t sc_phy_linkspeed;
591 uint8_t sc_phy_fec_cfg;
592 uint16_t sc_eee_cap;
593 uint32_t sc_eeer_val;
594 uint8_t sc_d3_lpan;
595 kmutex_t sc_cfg_lock;
596 enum i40e_mac_type sc_mac_type;
597 uint32_t sc_rss_table_size;
598 uint32_t sc_rss_table_entry_width;
599 bool sc_txrx_workqueue;
600 u_int sc_tx_process_limit;
601 u_int sc_rx_process_limit;
602 u_int sc_tx_intr_process_limit;
603 u_int sc_rx_intr_process_limit;
604
605 int sc_cur_ec_capenable;
606
607 struct pci_attach_args sc_pa;
608 pci_intr_handle_t *sc_ihp;
609 void **sc_ihs;
610 unsigned int sc_nintrs;
611
612 bus_dma_tag_t sc_dmat;
613 bus_space_tag_t sc_memt;
614 bus_space_handle_t sc_memh;
615 bus_size_t sc_mems;
616
617 uint8_t sc_pf_id;
618 uint16_t sc_uplink_seid; /* le */
619 uint16_t sc_downlink_seid; /* le */
620 uint16_t sc_vsi_number;
621 uint16_t sc_vsi_stat_counter_idx;
622 uint16_t sc_seid;
623 unsigned int sc_base_queue;
624
625 pci_intr_type_t sc_intrtype;
626 unsigned int sc_msix_vector_queue;
627
628 struct ixl_dmamem sc_scratch;
629 struct ixl_dmamem sc_aqbuf;
630
631 const struct ixl_aq_regs *
632 sc_aq_regs;
633 uint32_t sc_aq_flags;
634 #define IXL_SC_AQ_FLAG_RXCTL __BIT(0)
635 #define IXL_SC_AQ_FLAG_NVMLOCK __BIT(1)
636 #define IXL_SC_AQ_FLAG_NVMREAD __BIT(2)
637 #define IXL_SC_AQ_FLAG_RSS __BIT(3)
638
639 kmutex_t sc_atq_lock;
640 kcondvar_t sc_atq_cv;
641 struct ixl_dmamem sc_atq;
642 unsigned int sc_atq_prod;
643 unsigned int sc_atq_cons;
644
645 struct ixl_dmamem sc_arq;
646 struct ixl_work sc_arq_task;
647 struct ixl_aq_bufs sc_arq_idle;
648 struct ixl_aq_buf *sc_arq_live[IXL_AQ_NUM];
649 unsigned int sc_arq_prod;
650 unsigned int sc_arq_cons;
651
652 struct ixl_work sc_link_state_task;
653 struct ixl_atq sc_link_state_atq;
654
655 struct ixl_dmamem sc_hmc_sd;
656 struct ixl_dmamem sc_hmc_pd;
657 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT];
658
659 struct if_percpuq *sc_ipq;
660 unsigned int sc_tx_ring_ndescs;
661 unsigned int sc_rx_ring_ndescs;
662 unsigned int sc_nqueue_pairs;
663 unsigned int sc_nqueue_pairs_max;
664 unsigned int sc_nqueue_pairs_device;
665 struct ixl_queue_pair *sc_qps;
666 uint32_t sc_itr_rx;
667 uint32_t sc_itr_tx;
668
669 struct evcnt sc_event_atq;
670 struct evcnt sc_event_link;
671 struct evcnt sc_event_ecc_err;
672 struct evcnt sc_event_pci_exception;
673 struct evcnt sc_event_crit_err;
674 };
675
676 #define IXL_TXRX_PROCESS_UNLIMIT UINT_MAX
677 #define IXL_TX_PROCESS_LIMIT 256
678 #define IXL_RX_PROCESS_LIMIT 256
679 #define IXL_TX_INTR_PROCESS_LIMIT 256
680 #define IXL_RX_INTR_PROCESS_LIMIT 0U
681
682 #define IXL_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx | \
683 IFCAP_CSUM_TCPv4_Rx | \
684 IFCAP_CSUM_UDPv4_Rx | \
685 IFCAP_CSUM_TCPv6_Rx | \
686 IFCAP_CSUM_UDPv6_Rx)
687 #define IXL_IFCAP_TXCSUM (IFCAP_CSUM_IPv4_Tx | \
688 IFCAP_CSUM_TCPv4_Tx | \
689 IFCAP_CSUM_UDPv4_Tx | \
690 IFCAP_CSUM_TCPv6_Tx | \
691 IFCAP_CSUM_UDPv6_Tx)
692 #define IXL_CSUM_ALL_OFFLOAD (M_CSUM_IPv4 | \
693 M_CSUM_TCPv4 | M_CSUM_TCPv6 | \
694 M_CSUM_UDPv4 | M_CSUM_UDPv6)
695
696 #define delaymsec(_x) DELAY(1000 * (_x))
697 #ifdef IXL_DEBUG
698 #define DDPRINTF(sc, fmt, args...) \
699 do { \
700 if ((sc) != NULL) { \
701 device_printf( \
702 ((struct ixl_softc *)(sc))->sc_dev, \
703 ""); \
704 } \
705 printf("%s:\t" fmt, __func__, ##args); \
706 } while (0)
707 #else
708 #define DDPRINTF(sc, fmt, args...) __nothing
709 #endif
710 #ifndef IXL_STATS_INTERVAL_MSEC
711 #define IXL_STATS_INTERVAL_MSEC 10000
712 #endif
713 #ifndef IXL_QUEUE_NUM
714 #define IXL_QUEUE_NUM 0
715 #endif
716
717 static bool ixl_param_nomsix = false;
718 static int ixl_param_stats_interval = IXL_STATS_INTERVAL_MSEC;
719 static int ixl_param_nqps_limit = IXL_QUEUE_NUM;
720 static unsigned int ixl_param_tx_ndescs = 512;
721 static unsigned int ixl_param_rx_ndescs = 512;
722
723 static enum i40e_mac_type
724 ixl_mactype(pci_product_id_t);
725 static void ixl_pci_csr_setup(pci_chipset_tag_t, pcitag_t);
726 static void ixl_clear_hw(struct ixl_softc *);
727 static int ixl_pf_reset(struct ixl_softc *);
728
729 static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
730 bus_size_t, bus_size_t);
731 static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
732
733 static int ixl_arq_fill(struct ixl_softc *);
734 static void ixl_arq_unfill(struct ixl_softc *);
735
736 static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
737 unsigned int);
738 static void ixl_atq_set(struct ixl_atq *,
739 void (*)(struct ixl_softc *, const struct ixl_aq_desc *));
740 static int ixl_atq_post_locked(struct ixl_softc *, struct ixl_atq *);
741 static void ixl_atq_done(struct ixl_softc *);
742 static int ixl_atq_exec(struct ixl_softc *, struct ixl_atq *);
743 static int ixl_atq_exec_locked(struct ixl_softc *, struct ixl_atq *);
744 static int ixl_get_version(struct ixl_softc *);
745 static int ixl_get_nvm_version(struct ixl_softc *);
746 static int ixl_get_hw_capabilities(struct ixl_softc *);
747 static int ixl_pxe_clear(struct ixl_softc *);
748 static int ixl_lldp_shut(struct ixl_softc *);
749 static int ixl_get_mac(struct ixl_softc *);
750 static int ixl_get_switch_config(struct ixl_softc *);
751 static int ixl_phy_mask_ints(struct ixl_softc *);
752 static int ixl_get_phy_info(struct ixl_softc *);
753 static int ixl_set_phy_config(struct ixl_softc *, uint8_t, uint8_t, bool);
754 static int ixl_set_phy_autoselect(struct ixl_softc *);
755 static int ixl_restart_an(struct ixl_softc *);
756 static int ixl_hmc(struct ixl_softc *);
757 static void ixl_hmc_free(struct ixl_softc *);
758 static int ixl_get_vsi(struct ixl_softc *);
759 static int ixl_set_vsi(struct ixl_softc *);
760 static void ixl_set_filter_control(struct ixl_softc *);
761 static void ixl_get_link_status(void *);
762 static int ixl_get_link_status_poll(struct ixl_softc *, int *);
763 static void ixl_get_link_status_done(struct ixl_softc *,
764 const struct ixl_aq_desc *);
765 static int ixl_set_link_status_locked(struct ixl_softc *,
766 const struct ixl_aq_desc *);
767 static uint64_t ixl_search_link_speed(uint8_t);
768 static uint8_t ixl_search_baudrate(uint64_t);
769 static void ixl_config_rss(struct ixl_softc *);
770 static int ixl_add_macvlan(struct ixl_softc *, const uint8_t *,
771 uint16_t, uint16_t);
772 static int ixl_remove_macvlan(struct ixl_softc *, const uint8_t *,
773 uint16_t, uint16_t);
774 static void ixl_arq(void *);
775 static void ixl_hmc_pack(void *, const void *,
776 const struct ixl_hmc_pack *, unsigned int);
777 static uint32_t ixl_rd_rx_csr(struct ixl_softc *, uint32_t);
778 static void ixl_wr_rx_csr(struct ixl_softc *, uint32_t, uint32_t);
779 static int ixl_rd16_nvm(struct ixl_softc *, uint16_t, uint16_t *);
780
781 static int ixl_match(device_t, cfdata_t, void *);
782 static void ixl_attach(device_t, device_t, void *);
783 static int ixl_detach(device_t, int);
784
785 static void ixl_media_add(struct ixl_softc *);
786 static int ixl_media_change(struct ifnet *);
787 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
788 static int ixl_ioctl(struct ifnet *, u_long, void *);
789 static void ixl_start(struct ifnet *);
790 static int ixl_transmit(struct ifnet *, struct mbuf *);
791 static void ixl_deferred_transmit(void *);
792 static int ixl_intr(void *);
793 static int ixl_queue_intr(void *);
794 static int ixl_other_intr(void *);
795 static void ixl_handle_queue(void *);
796 static void ixl_handle_queue_wk(struct work *, void *);
797 static void ixl_sched_handle_queue(struct ixl_softc *,
798 struct ixl_queue_pair *);
799 static int ixl_init(struct ifnet *);
800 static int ixl_init_locked(struct ixl_softc *);
801 static void ixl_stop(struct ifnet *, int);
802 static void ixl_stop_locked(struct ixl_softc *);
803 static int ixl_iff(struct ixl_softc *);
804 static int ixl_ifflags_cb(struct ethercom *);
805 static int ixl_setup_interrupts(struct ixl_softc *);
806 static int ixl_establish_intx(struct ixl_softc *);
807 static int ixl_establish_msix(struct ixl_softc *);
808 static void ixl_enable_queue_intr(struct ixl_softc *,
809 struct ixl_queue_pair *);
810 static void ixl_disable_queue_intr(struct ixl_softc *,
811 struct ixl_queue_pair *);
812 static void ixl_enable_other_intr(struct ixl_softc *);
813 static void ixl_disable_other_intr(struct ixl_softc *);
814 static void ixl_config_queue_intr(struct ixl_softc *);
815 static void ixl_config_other_intr(struct ixl_softc *);
816
817 static struct ixl_tx_ring *
818 ixl_txr_alloc(struct ixl_softc *, unsigned int);
819 static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
820 static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
821 static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
822 static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
823 static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
824 static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
825 static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
826 static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *, u_int);
827
828 static struct ixl_rx_ring *
829 ixl_rxr_alloc(struct ixl_softc *, unsigned int);
830 static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
831 static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
832 static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
833 static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
834 static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
835 static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
836 static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *, u_int);
837 static int ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
838
839 static struct workqueue *
840 ixl_workq_create(const char *, pri_t, int, int);
841 static void ixl_workq_destroy(struct workqueue *);
842 static int ixl_workqs_teardown(device_t);
843 static void ixl_work_set(struct ixl_work *, void (*)(void *), void *);
844 static void ixl_work_add(struct workqueue *, struct ixl_work *);
845 static void ixl_work_wait(struct workqueue *, struct ixl_work *);
846 static void ixl_workq_work(struct work *, void *);
847 static const struct ixl_product *
848 ixl_lookup(const struct pci_attach_args *pa);
849 static void ixl_link_state_update(struct ixl_softc *,
850 const struct ixl_aq_desc *);
851 static int ixl_vlan_cb(struct ethercom *, uint16_t, bool);
852 static int ixl_setup_vlan_hwfilter(struct ixl_softc *);
853 static void ixl_teardown_vlan_hwfilter(struct ixl_softc *);
854 static int ixl_update_macvlan(struct ixl_softc *);
855 static int ixl_setup_interrupts(struct ixl_softc *);
856 static void ixl_teardown_interrupts(struct ixl_softc *);
857 static int ixl_setup_stats(struct ixl_softc *);
858 static void ixl_teardown_stats(struct ixl_softc *);
859 static void ixl_stats_callout(void *);
860 static void ixl_stats_update(void *);
861 static int ixl_setup_sysctls(struct ixl_softc *);
862 static void ixl_teardown_sysctls(struct ixl_softc *);
863 static int ixl_sysctl_itr_handler(SYSCTLFN_PROTO);
864 static int ixl_queue_pairs_alloc(struct ixl_softc *);
865 static void ixl_queue_pairs_free(struct ixl_softc *);
866
867 static const struct ixl_phy_type ixl_phy_type_map[] = {
868 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII },
869 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX },
870 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 },
871 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR },
872 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 },
873 { 1ULL << IXL_PHY_TYPE_XAUI |
874 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 },
875 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI },
876 { 1ULL << IXL_PHY_TYPE_XLAUI |
877 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI },
878 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
879 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 },
880 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
881 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 },
882 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC },
883 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC },
884 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX },
885 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
886 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T },
887 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T },
888 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR },
889 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR },
890 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_TWINAX },
891 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 },
892 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 },
893 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX },
894 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX },
895 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 },
896 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR },
897 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR },
898 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR },
899 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR },
900 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC },
901 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_ACC },
902 { 1ULL << IXL_PHY_TYPE_2500BASE_T_1, IFM_2500_T },
903 { 1ULL << IXL_PHY_TYPE_5000BASE_T_1, IFM_5000_T },
904 { 1ULL << IXL_PHY_TYPE_2500BASE_T_2, IFM_2500_T },
905 { 1ULL << IXL_PHY_TYPE_5000BASE_T_2, IFM_5000_T },
906 };
907
908 static const struct ixl_speed_type ixl_speed_type_map[] = {
909 { IXL_AQ_LINK_SPEED_40GB, IF_Gbps(40) },
910 { IXL_AQ_LINK_SPEED_25GB, IF_Gbps(25) },
911 { IXL_AQ_LINK_SPEED_10GB, IF_Gbps(10) },
912 { IXL_AQ_LINK_SPEED_5000MB, IF_Mbps(5000) },
913 { IXL_AQ_LINK_SPEED_2500MB, IF_Mbps(2500) },
914 { IXL_AQ_LINK_SPEED_1000MB, IF_Mbps(1000) },
915 { IXL_AQ_LINK_SPEED_100MB, IF_Mbps(100)},
916 };
917
918 static const struct ixl_aq_regs ixl_pf_aq_regs = {
919 .atq_tail = I40E_PF_ATQT,
920 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK,
921 .atq_head = I40E_PF_ATQH,
922 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK,
923 .atq_len = I40E_PF_ATQLEN,
924 .atq_bal = I40E_PF_ATQBAL,
925 .atq_bah = I40E_PF_ATQBAH,
926 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK,
927
928 .arq_tail = I40E_PF_ARQT,
929 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK,
930 .arq_head = I40E_PF_ARQH,
931 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK,
932 .arq_len = I40E_PF_ARQLEN,
933 .arq_bal = I40E_PF_ARQBAL,
934 .arq_bah = I40E_PF_ARQBAH,
935 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK,
936 };
937
938 #define ixl_rd(_s, _r) \
939 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
940 #define ixl_wr(_s, _r, _v) \
941 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
942 #define ixl_barrier(_s, _r, _l, _o) \
943 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
944 #define ixl_flush(_s) (void)ixl_rd((_s), I40E_GLGEN_STAT)
945 #define ixl_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1))
946
947 CFATTACH_DECL3_NEW(ixl, sizeof(struct ixl_softc),
948 ixl_match, ixl_attach, ixl_detach, NULL, NULL, NULL,
949 DVF_DETACH_SHUTDOWN);
950
951 static const struct ixl_product ixl_products[] = {
952 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_SFP },
953 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_B },
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_C },
955 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_A },
956 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_B },
957 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_C },
958 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T_1 },
959 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T_2 },
960 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
961 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
962 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G },
963 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP },
964 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28 },
965 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_KX },
966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_QSFP },
967 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_SFP },
968 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G_BASET },
969 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_BASET },
970 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_I_SFP },
971 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_SFP },
972 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_BP },
973 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_V710_5G_T},
974 /* required last entry */
975 {0, 0}
976 };
977
978 static const struct ixl_product *
979 ixl_lookup(const struct pci_attach_args *pa)
980 {
981 const struct ixl_product *ixlp;
982
983 for (ixlp = ixl_products; ixlp->vendor_id != 0; ixlp++) {
984 if (PCI_VENDOR(pa->pa_id) == ixlp->vendor_id &&
985 PCI_PRODUCT(pa->pa_id) == ixlp->product_id)
986 return ixlp;
987 }
988
989 return NULL;
990 }
991
992 static void
993 ixl_intr_barrier(void)
994 {
995
996 /* wait for finish of all handler */
997 xc_barrier(0);
998 }
999
1000 static int
1001 ixl_match(device_t parent, cfdata_t match, void *aux)
1002 {
1003 const struct pci_attach_args *pa = aux;
1004
1005 return (ixl_lookup(pa) != NULL) ? 1 : 0;
1006 }
1007
1008 static void
1009 ixl_attach(device_t parent, device_t self, void *aux)
1010 {
1011 struct ixl_softc *sc;
1012 struct pci_attach_args *pa = aux;
1013 struct ifnet *ifp;
1014 pcireg_t memtype;
1015 uint32_t firstq, port, ari, func;
1016 char xnamebuf[32];
1017 int tries, rv, link;
1018
1019 sc = device_private(self);
1020 sc->sc_dev = self;
1021 ifp = &sc->sc_ec.ec_if;
1022
1023 sc->sc_pa = *pa;
1024 sc->sc_dmat = (pci_dma64_available(pa)) ?
1025 pa->pa_dmat64 : pa->pa_dmat;
1026 sc->sc_aq_regs = &ixl_pf_aq_regs;
1027
1028 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(pa->pa_id));
1029
1030 ixl_pci_csr_setup(pa->pa_pc, pa->pa_tag);
1031
1032 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG);
1033 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
1034 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) {
1035 aprint_error(": unable to map registers\n");
1036 return;
1037 }
1038
1039 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET);
1040
1041 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC);
1042 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK;
1043 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1044 sc->sc_base_queue = firstq;
1045
1046 ixl_clear_hw(sc);
1047 if (ixl_pf_reset(sc) == -1) {
1048 /* error printed by ixl pf_reset */
1049 goto unmap;
1050 }
1051
1052 port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
1053 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
1054 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
1055 sc->sc_port = port;
1056 aprint_normal(": port %u", sc->sc_port);
1057
1058 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
1059 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1060 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1061
1062 func = ixl_rd(sc, I40E_PF_FUNC_RID);
1063 sc->sc_pf_id = func & (ari ? 0xff : 0x7);
1064
1065 /* initialise the adminq */
1066
1067 mutex_init(&sc->sc_atq_lock, MUTEX_DEFAULT, IPL_NET);
1068
1069 if (ixl_dmamem_alloc(sc, &sc->sc_atq,
1070 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1071 aprint_error("\n" "%s: unable to allocate atq\n",
1072 device_xname(self));
1073 goto unmap;
1074 }
1075
1076 SIMPLEQ_INIT(&sc->sc_arq_idle);
1077 ixl_work_set(&sc->sc_arq_task, ixl_arq, sc);
1078 sc->sc_arq_cons = 0;
1079 sc->sc_arq_prod = 0;
1080
1081 if (ixl_dmamem_alloc(sc, &sc->sc_arq,
1082 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1083 aprint_error("\n" "%s: unable to allocate arq\n",
1084 device_xname(self));
1085 goto free_atq;
1086 }
1087
1088 if (!ixl_arq_fill(sc)) {
1089 aprint_error("\n" "%s: unable to fill arq descriptors\n",
1090 device_xname(self));
1091 goto free_arq;
1092 }
1093
1094 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1095 0, IXL_DMA_LEN(&sc->sc_atq),
1096 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1097
1098 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1099 0, IXL_DMA_LEN(&sc->sc_arq),
1100 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1101
1102 for (tries = 0; tries < 10; tries++) {
1103 sc->sc_atq_cons = 0;
1104 sc->sc_atq_prod = 0;
1105
1106 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1107 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1108 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1109 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1110
1111 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1112
1113 ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1114 ixl_dmamem_lo(&sc->sc_atq));
1115 ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1116 ixl_dmamem_hi(&sc->sc_atq));
1117 ixl_wr(sc, sc->sc_aq_regs->atq_len,
1118 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1119
1120 ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1121 ixl_dmamem_lo(&sc->sc_arq));
1122 ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1123 ixl_dmamem_hi(&sc->sc_arq));
1124 ixl_wr(sc, sc->sc_aq_regs->arq_len,
1125 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1126
1127 rv = ixl_get_version(sc);
1128 if (rv == 0)
1129 break;
1130 if (rv != ETIMEDOUT) {
1131 aprint_error(", unable to get firmware version\n");
1132 goto shutdown;
1133 }
1134
1135 delaymsec(100);
1136 }
1137
1138 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1139
1140 if (ixl_dmamem_alloc(sc, &sc->sc_aqbuf, IXL_AQ_BUFLEN, 0) != 0) {
1141 aprint_error_dev(self, ", unable to allocate nvm buffer\n");
1142 goto shutdown;
1143 }
1144
1145 ixl_get_nvm_version(sc);
1146
1147 if (sc->sc_mac_type == I40E_MAC_X722)
1148 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_X722;
1149 else
1150 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_XL710;
1151
1152 rv = ixl_get_hw_capabilities(sc);
1153 if (rv != 0) {
1154 aprint_error(", GET HW CAPABILITIES %s\n",
1155 rv == ETIMEDOUT ? "timeout" : "error");
1156 goto free_aqbuf;
1157 }
1158
1159 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_device, ncpu);
1160 if (ixl_param_nqps_limit > 0) {
1161 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_max,
1162 ixl_param_nqps_limit);
1163 }
1164
1165 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
1166 sc->sc_tx_ring_ndescs = ixl_param_tx_ndescs;
1167 sc->sc_rx_ring_ndescs = ixl_param_rx_ndescs;
1168
1169 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_rx_ring_ndescs);
1170 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_tx_ring_ndescs);
1171 KASSERT(sc->sc_rx_ring_ndescs ==
1172 (1U << (fls32(sc->sc_rx_ring_ndescs) - 1)));
1173 KASSERT(sc->sc_tx_ring_ndescs ==
1174 (1U << (fls32(sc->sc_tx_ring_ndescs) - 1)));
1175
1176 if (ixl_get_mac(sc) != 0) {
1177 /* error printed by ixl_get_mac */
1178 goto free_aqbuf;
1179 }
1180
1181 aprint_normal("\n");
1182 aprint_naive("\n");
1183
1184 aprint_normal_dev(self, "Ethernet address %s\n",
1185 ether_sprintf(sc->sc_enaddr));
1186
1187 rv = ixl_pxe_clear(sc);
1188 if (rv != 0) {
1189 aprint_debug_dev(self, "CLEAR PXE MODE %s\n",
1190 rv == ETIMEDOUT ? "timeout" : "error");
1191 }
1192
1193 ixl_set_filter_control(sc);
1194
1195 if (ixl_hmc(sc) != 0) {
1196 /* error printed by ixl_hmc */
1197 goto free_aqbuf;
1198 }
1199
1200 if (ixl_lldp_shut(sc) != 0) {
1201 /* error printed by ixl_lldp_shut */
1202 goto free_hmc;
1203 }
1204
1205 if (ixl_phy_mask_ints(sc) != 0) {
1206 /* error printed by ixl_phy_mask_ints */
1207 goto free_hmc;
1208 }
1209
1210 if (ixl_restart_an(sc) != 0) {
1211 /* error printed by ixl_restart_an */
1212 goto free_hmc;
1213 }
1214
1215 if (ixl_get_switch_config(sc) != 0) {
1216 /* error printed by ixl_get_switch_config */
1217 goto free_hmc;
1218 }
1219
1220 rv = ixl_get_link_status_poll(sc, NULL);
1221 if (rv != 0) {
1222 aprint_error_dev(self, "GET LINK STATUS %s\n",
1223 rv == ETIMEDOUT ? "timeout" : "error");
1224 goto free_hmc;
1225 }
1226
1227 /*
1228 * The FW often returns EIO in "Get PHY Abilities" command
1229 * if there is no delay
1230 */
1231 DELAY(500);
1232 if (ixl_get_phy_info(sc) != 0) {
1233 /* error printed by ixl_get_phy_info */
1234 goto free_hmc;
1235 }
1236
1237 if (ixl_dmamem_alloc(sc, &sc->sc_scratch,
1238 sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1239 aprint_error_dev(self, "unable to allocate scratch buffer\n");
1240 goto free_hmc;
1241 }
1242
1243 rv = ixl_get_vsi(sc);
1244 if (rv != 0) {
1245 aprint_error_dev(self, "GET VSI %s %d\n",
1246 rv == ETIMEDOUT ? "timeout" : "error", rv);
1247 goto free_scratch;
1248 }
1249
1250 rv = ixl_set_vsi(sc);
1251 if (rv != 0) {
1252 aprint_error_dev(self, "UPDATE VSI error %s %d\n",
1253 rv == ETIMEDOUT ? "timeout" : "error", rv);
1254 goto free_scratch;
1255 }
1256
1257 if (ixl_queue_pairs_alloc(sc) != 0) {
1258 /* error printed by ixl_queue_pairs_alloc */
1259 goto free_scratch;
1260 }
1261
1262 if (ixl_setup_interrupts(sc) != 0) {
1263 /* error printed by ixl_setup_interrupts */
1264 goto free_queue_pairs;
1265 }
1266
1267 if (ixl_setup_stats(sc) != 0) {
1268 aprint_error_dev(self, "failed to setup event counters\n");
1269 goto teardown_intrs;
1270 }
1271
1272 if (ixl_setup_sysctls(sc) != 0) {
1273 /* error printed by ixl_setup_sysctls */
1274 goto teardown_stats;
1275 }
1276
1277 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self));
1278 sc->sc_workq = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1279 IPL_NET, WQ_MPSAFE);
1280 if (sc->sc_workq == NULL)
1281 goto teardown_sysctls;
1282
1283 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self));
1284 rv = workqueue_create(&sc->sc_workq_txrx, xnamebuf, ixl_handle_queue_wk,
1285 sc, IXL_WORKQUEUE_PRI, IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1286 if (rv != 0) {
1287 sc->sc_workq_txrx = NULL;
1288 goto teardown_wqs;
1289 }
1290
1291 snprintf(xnamebuf, sizeof(xnamebuf), "%s_atq_cv", device_xname(self));
1292 cv_init(&sc->sc_atq_cv, xnamebuf);
1293
1294 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
1295
1296 ifp->if_softc = sc;
1297 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1298 ifp->if_extflags = IFEF_MPSAFE;
1299 ifp->if_ioctl = ixl_ioctl;
1300 ifp->if_start = ixl_start;
1301 ifp->if_transmit = ixl_transmit;
1302 ifp->if_init = ixl_init;
1303 ifp->if_stop = ixl_stop;
1304 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs);
1305 IFQ_SET_READY(&ifp->if_snd);
1306 ifp->if_capabilities |= IXL_IFCAP_RXCSUM;
1307 ifp->if_capabilities |= IXL_IFCAP_TXCSUM;
1308 #if 0
1309 ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
1310 #endif
1311 ether_set_vlan_cb(&sc->sc_ec, ixl_vlan_cb);
1312 sc->sc_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1313 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
1314 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1315
1316 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities;
1317 /* Disable VLAN_HWFILTER by default */
1318 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1319
1320 sc->sc_cur_ec_capenable = sc->sc_ec.ec_capenable;
1321
1322 sc->sc_ec.ec_ifmedia = &sc->sc_media;
1323 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, ixl_media_change,
1324 ixl_media_status, &sc->sc_cfg_lock);
1325
1326 ixl_media_add(sc);
1327 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1328 if (ISSET(sc->sc_phy_abilities,
1329 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) {
1330 ifmedia_add(&sc->sc_media,
1331 IFM_ETHER | IFM_AUTO | IFM_FLOW, 0, NULL);
1332 }
1333 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_NONE, 0, NULL);
1334 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1335
1336 if_initialize(ifp);
1337
1338 sc->sc_ipq = if_percpuq_create(ifp);
1339 if_deferred_start_init(ifp, NULL);
1340 ether_ifattach(ifp, sc->sc_enaddr);
1341 ether_set_ifflags_cb(&sc->sc_ec, ixl_ifflags_cb);
1342
1343 rv = ixl_get_link_status_poll(sc, &link);
1344 if (rv != 0)
1345 link = LINK_STATE_UNKNOWN;
1346 if_link_state_change(ifp, link);
1347
1348 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done);
1349 ixl_work_set(&sc->sc_link_state_task, ixl_get_link_status, sc);
1350
1351 ixl_config_other_intr(sc);
1352 ixl_enable_other_intr(sc);
1353
1354 ixl_set_phy_autoselect(sc);
1355
1356 /* remove default mac filter and replace it so we can see vlans */
1357 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 0);
1358 if (rv != ENOENT) {
1359 aprint_debug_dev(self,
1360 "unable to remove macvlan %u\n", rv);
1361 }
1362 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
1363 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1364 if (rv != ENOENT) {
1365 aprint_debug_dev(self,
1366 "unable to remove macvlan, ignore vlan %u\n", rv);
1367 }
1368
1369 if (ixl_update_macvlan(sc) != 0) {
1370 aprint_debug_dev(self,
1371 "couldn't enable vlan hardware filter\n");
1372 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1373 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
1374 }
1375
1376 sc->sc_txrx_workqueue = true;
1377 sc->sc_tx_process_limit = IXL_TX_PROCESS_LIMIT;
1378 sc->sc_rx_process_limit = IXL_RX_PROCESS_LIMIT;
1379 sc->sc_tx_intr_process_limit = IXL_TX_INTR_PROCESS_LIMIT;
1380 sc->sc_rx_intr_process_limit = IXL_RX_INTR_PROCESS_LIMIT;
1381
1382 ixl_stats_update(sc);
1383 sc->sc_stats_counters.isc_has_offset = true;
1384
1385 if (pmf_device_register(self, NULL, NULL) != true)
1386 aprint_debug_dev(self, "couldn't establish power handler\n");
1387 sc->sc_itr_rx = IXL_ITR_RX;
1388 sc->sc_itr_tx = IXL_ITR_TX;
1389 sc->sc_attached = true;
1390 if_register(ifp);
1391
1392 return;
1393
1394 teardown_wqs:
1395 config_finalize_register(self, ixl_workqs_teardown);
1396 teardown_sysctls:
1397 ixl_teardown_sysctls(sc);
1398 teardown_stats:
1399 ixl_teardown_stats(sc);
1400 teardown_intrs:
1401 ixl_teardown_interrupts(sc);
1402 free_queue_pairs:
1403 ixl_queue_pairs_free(sc);
1404 free_scratch:
1405 ixl_dmamem_free(sc, &sc->sc_scratch);
1406 free_hmc:
1407 ixl_hmc_free(sc);
1408 free_aqbuf:
1409 ixl_dmamem_free(sc, &sc->sc_aqbuf);
1410 shutdown:
1411 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1412 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1413 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1414 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1415
1416 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1417 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1418 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1419
1420 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1421 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1422 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1423
1424 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1425 0, IXL_DMA_LEN(&sc->sc_arq),
1426 BUS_DMASYNC_POSTREAD);
1427 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1428 0, IXL_DMA_LEN(&sc->sc_atq),
1429 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1430
1431 ixl_arq_unfill(sc);
1432 free_arq:
1433 ixl_dmamem_free(sc, &sc->sc_arq);
1434 free_atq:
1435 ixl_dmamem_free(sc, &sc->sc_atq);
1436 unmap:
1437 mutex_destroy(&sc->sc_atq_lock);
1438 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1439 mutex_destroy(&sc->sc_cfg_lock);
1440 sc->sc_mems = 0;
1441
1442 sc->sc_attached = false;
1443 }
1444
1445 static int
1446 ixl_detach(device_t self, int flags)
1447 {
1448 struct ixl_softc *sc = device_private(self);
1449 struct ifnet *ifp = &sc->sc_ec.ec_if;
1450
1451 if (!sc->sc_attached)
1452 return 0;
1453
1454 ixl_stop(ifp, 1);
1455
1456 callout_halt(&sc->sc_stats_callout, NULL);
1457 ixl_work_wait(sc->sc_workq, &sc->sc_stats_task);
1458
1459 /* detach the I/F before stop adminq due to callbacks */
1460 ether_ifdetach(ifp);
1461 if_detach(ifp);
1462 ifmedia_fini(&sc->sc_media);
1463 if_percpuq_destroy(sc->sc_ipq);
1464
1465 ixl_disable_other_intr(sc);
1466 ixl_intr_barrier();
1467 ixl_work_wait(sc->sc_workq, &sc->sc_arq_task);
1468 ixl_work_wait(sc->sc_workq, &sc->sc_link_state_task);
1469
1470 if (sc->sc_workq != NULL) {
1471 ixl_workq_destroy(sc->sc_workq);
1472 sc->sc_workq = NULL;
1473 }
1474
1475 if (sc->sc_workq_txrx != NULL) {
1476 workqueue_destroy(sc->sc_workq_txrx);
1477 sc->sc_workq_txrx = NULL;
1478 }
1479
1480 ixl_teardown_interrupts(sc);
1481 ixl_teardown_stats(sc);
1482 ixl_teardown_sysctls(sc);
1483
1484 ixl_queue_pairs_free(sc);
1485
1486 ixl_dmamem_free(sc, &sc->sc_scratch);
1487 ixl_hmc_free(sc);
1488
1489 /* shutdown */
1490 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1491 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1492 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1493 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1494
1495 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1496 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1497 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1498
1499 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1500 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1501 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1502
1503 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1504 0, IXL_DMA_LEN(&sc->sc_arq),
1505 BUS_DMASYNC_POSTREAD);
1506 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1507 0, IXL_DMA_LEN(&sc->sc_atq),
1508 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1509
1510 ixl_arq_unfill(sc);
1511
1512 ixl_dmamem_free(sc, &sc->sc_arq);
1513 ixl_dmamem_free(sc, &sc->sc_atq);
1514 ixl_dmamem_free(sc, &sc->sc_aqbuf);
1515
1516 cv_destroy(&sc->sc_atq_cv);
1517 mutex_destroy(&sc->sc_atq_lock);
1518
1519 if (sc->sc_mems != 0) {
1520 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1521 sc->sc_mems = 0;
1522 }
1523
1524 mutex_destroy(&sc->sc_cfg_lock);
1525
1526 return 0;
1527 }
1528
1529 static int
1530 ixl_workqs_teardown(device_t self)
1531 {
1532 struct ixl_softc *sc = device_private(self);
1533
1534 if (sc->sc_workq != NULL) {
1535 ixl_workq_destroy(sc->sc_workq);
1536 sc->sc_workq = NULL;
1537 }
1538
1539 if (sc->sc_workq_txrx != NULL) {
1540 workqueue_destroy(sc->sc_workq_txrx);
1541 sc->sc_workq_txrx = NULL;
1542 }
1543
1544 return 0;
1545 }
1546
1547 static int
1548 ixl_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
1549 {
1550 struct ifnet *ifp = &ec->ec_if;
1551 struct ixl_softc *sc = ifp->if_softc;
1552 int rv;
1553
1554 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
1555 return 0;
1556 }
1557
1558 if (set) {
1559 rv = ixl_add_macvlan(sc, sc->sc_enaddr, vid,
1560 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1561 if (rv == 0) {
1562 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
1563 vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1564 }
1565 } else {
1566 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, vid,
1567 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1568 (void)ixl_remove_macvlan(sc, etherbroadcastaddr, vid,
1569 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1570 }
1571
1572 return rv;
1573 }
1574
1575 static void
1576 ixl_media_add(struct ixl_softc *sc)
1577 {
1578 struct ifmedia *ifm = &sc->sc_media;
1579 const struct ixl_phy_type *itype;
1580 unsigned int i;
1581 bool flow;
1582
1583 if (ISSET(sc->sc_phy_abilities,
1584 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) {
1585 flow = true;
1586 } else {
1587 flow = false;
1588 }
1589
1590 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
1591 itype = &ixl_phy_type_map[i];
1592
1593 if (ISSET(sc->sc_phy_types, itype->phy_type)) {
1594 ifmedia_add(ifm,
1595 IFM_ETHER | IFM_FDX | itype->ifm_type, 0, NULL);
1596
1597 if (flow) {
1598 ifmedia_add(ifm,
1599 IFM_ETHER | IFM_FDX | IFM_FLOW |
1600 itype->ifm_type, 0, NULL);
1601 }
1602
1603 if (itype->ifm_type != IFM_100_TX)
1604 continue;
1605
1606 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type,
1607 0, NULL);
1608 if (flow) {
1609 ifmedia_add(ifm,
1610 IFM_ETHER | IFM_FLOW | itype->ifm_type,
1611 0, NULL);
1612 }
1613 }
1614 }
1615 }
1616
1617 static void
1618 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1619 {
1620 struct ixl_softc *sc = ifp->if_softc;
1621
1622 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1623
1624 ifmr->ifm_status = sc->sc_media_status;
1625 ifmr->ifm_active = sc->sc_media_active;
1626 }
1627
1628 static int
1629 ixl_media_change(struct ifnet *ifp)
1630 {
1631 struct ixl_softc *sc = ifp->if_softc;
1632 struct ifmedia *ifm = &sc->sc_media;
1633 uint64_t ifm_active = sc->sc_media_active;
1634 uint8_t link_speed, abilities;
1635
1636 switch (IFM_SUBTYPE(ifm_active)) {
1637 case IFM_1000_SGMII:
1638 case IFM_1000_KX:
1639 case IFM_10G_KX4:
1640 case IFM_10G_KR:
1641 case IFM_40G_KR4:
1642 case IFM_20G_KR2:
1643 case IFM_25G_KR:
1644 /* backplanes */
1645 return EINVAL;
1646 }
1647
1648 abilities = IXL_PHY_ABILITY_AUTONEGO | IXL_PHY_ABILITY_LINKUP;
1649
1650 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1651 case IFM_AUTO:
1652 link_speed = sc->sc_phy_linkspeed;
1653 break;
1654 case IFM_NONE:
1655 link_speed = 0;
1656 CLR(abilities, IXL_PHY_ABILITY_LINKUP);
1657 break;
1658 default:
1659 link_speed = ixl_search_baudrate(
1660 ifmedia_baudrate(ifm->ifm_media));
1661 }
1662
1663 if (ISSET(abilities, IXL_PHY_ABILITY_LINKUP)) {
1664 if (ISSET(link_speed, sc->sc_phy_linkspeed) == 0)
1665 return EINVAL;
1666 }
1667
1668 if (ifm->ifm_media & IFM_FLOW) {
1669 abilities |= sc->sc_phy_abilities &
1670 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX);
1671 }
1672
1673 return ixl_set_phy_config(sc, link_speed, abilities, false);
1674 }
1675
1676
1677 static void
1678 ixl_del_all_multiaddr(struct ixl_softc *sc)
1679 {
1680 struct ethercom *ec = &sc->sc_ec;
1681 struct ether_multi *enm;
1682 struct ether_multistep step;
1683
1684 ETHER_LOCK(ec);
1685 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1686 ETHER_NEXT_MULTI(step, enm)) {
1687 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1688 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1689 }
1690 ETHER_UNLOCK(ec);
1691 }
1692
1693 static int
1694 ixl_add_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1695 {
1696 struct ifnet *ifp = &sc->sc_ec.ec_if;
1697 int rv;
1698
1699 if (ISSET(ifp->if_flags, IFF_ALLMULTI))
1700 return 0;
1701
1702 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) {
1703 ixl_del_all_multiaddr(sc);
1704 SET(ifp->if_flags, IFF_ALLMULTI);
1705 return ENETRESET;
1706 }
1707
1708 /* multicast address can not use VLAN HWFILTER */
1709 rv = ixl_add_macvlan(sc, addrlo, 0,
1710 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1711
1712 if (rv == ENOSPC) {
1713 ixl_del_all_multiaddr(sc);
1714 SET(ifp->if_flags, IFF_ALLMULTI);
1715 return ENETRESET;
1716 }
1717
1718 return rv;
1719 }
1720
1721 static int
1722 ixl_del_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1723 {
1724 struct ifnet *ifp = &sc->sc_ec.ec_if;
1725 struct ethercom *ec = &sc->sc_ec;
1726 struct ether_multi *enm, *enm_last;
1727 struct ether_multistep step;
1728 int error, rv = 0;
1729
1730 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1731 ixl_remove_macvlan(sc, addrlo, 0,
1732 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1733 return 0;
1734 }
1735
1736 ETHER_LOCK(ec);
1737 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1738 ETHER_NEXT_MULTI(step, enm)) {
1739 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1740 ETHER_ADDR_LEN) != 0) {
1741 goto out;
1742 }
1743 }
1744
1745 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1746 ETHER_NEXT_MULTI(step, enm)) {
1747 error = ixl_add_macvlan(sc, enm->enm_addrlo, 0,
1748 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1749 if (error != 0)
1750 break;
1751 }
1752
1753 if (enm != NULL) {
1754 enm_last = enm;
1755 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1756 ETHER_NEXT_MULTI(step, enm)) {
1757 if (enm == enm_last)
1758 break;
1759
1760 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1761 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1762 }
1763 } else {
1764 CLR(ifp->if_flags, IFF_ALLMULTI);
1765 rv = ENETRESET;
1766 }
1767
1768 out:
1769 ETHER_UNLOCK(ec);
1770 return rv;
1771 }
1772
1773 static int
1774 ixl_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1775 {
1776 struct ifreq *ifr = (struct ifreq *)data;
1777 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
1778 const struct sockaddr *sa;
1779 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1780 int s, error = 0;
1781 unsigned int nmtu;
1782
1783 switch (cmd) {
1784 case SIOCSIFMTU:
1785 nmtu = ifr->ifr_mtu;
1786
1787 if (nmtu < IXL_MIN_MTU || nmtu > IXL_MAX_MTU) {
1788 error = EINVAL;
1789 break;
1790 }
1791 if (ifp->if_mtu != nmtu) {
1792 s = splnet();
1793 error = ether_ioctl(ifp, cmd, data);
1794 splx(s);
1795 if (error == ENETRESET)
1796 error = ixl_init(ifp);
1797 }
1798 break;
1799 case SIOCADDMULTI:
1800 sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1801 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) {
1802 error = ether_multiaddr(sa, addrlo, addrhi);
1803 if (error != 0)
1804 return error;
1805
1806 error = ixl_add_multi(sc, addrlo, addrhi);
1807 if (error != 0 && error != ENETRESET) {
1808 ether_delmulti(sa, &sc->sc_ec);
1809 error = EIO;
1810 }
1811 }
1812 break;
1813
1814 case SIOCDELMULTI:
1815 sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1816 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) {
1817 error = ether_multiaddr(sa, addrlo, addrhi);
1818 if (error != 0)
1819 return error;
1820
1821 error = ixl_del_multi(sc, addrlo, addrhi);
1822 }
1823 break;
1824
1825 default:
1826 s = splnet();
1827 error = ether_ioctl(ifp, cmd, data);
1828 splx(s);
1829 }
1830
1831 if (error == ENETRESET)
1832 error = ixl_iff(sc);
1833
1834 return error;
1835 }
1836
1837 static enum i40e_mac_type
1838 ixl_mactype(pci_product_id_t id)
1839 {
1840
1841 switch (id) {
1842 case PCI_PRODUCT_INTEL_XL710_SFP:
1843 case PCI_PRODUCT_INTEL_XL710_KX_B:
1844 case PCI_PRODUCT_INTEL_XL710_KX_C:
1845 case PCI_PRODUCT_INTEL_XL710_QSFP_A:
1846 case PCI_PRODUCT_INTEL_XL710_QSFP_B:
1847 case PCI_PRODUCT_INTEL_XL710_QSFP_C:
1848 case PCI_PRODUCT_INTEL_X710_10G_T_1:
1849 case PCI_PRODUCT_INTEL_X710_10G_T_2:
1850 case PCI_PRODUCT_INTEL_XL710_20G_BP_1:
1851 case PCI_PRODUCT_INTEL_XL710_20G_BP_2:
1852 case PCI_PRODUCT_INTEL_X710_T4_10G:
1853 case PCI_PRODUCT_INTEL_XXV710_25G_BP:
1854 case PCI_PRODUCT_INTEL_XXV710_25G_SFP28:
1855 case PCI_PRODUCT_INTEL_X710_10G_SFP:
1856 case PCI_PRODUCT_INTEL_X710_10G_BP:
1857 return I40E_MAC_XL710;
1858
1859 case PCI_PRODUCT_INTEL_X722_KX:
1860 case PCI_PRODUCT_INTEL_X722_QSFP:
1861 case PCI_PRODUCT_INTEL_X722_SFP:
1862 case PCI_PRODUCT_INTEL_X722_1G_BASET:
1863 case PCI_PRODUCT_INTEL_X722_10G_BASET:
1864 case PCI_PRODUCT_INTEL_X722_I_SFP:
1865 return I40E_MAC_X722;
1866 }
1867
1868 return I40E_MAC_GENERIC;
1869 }
1870
1871 static void
1872 ixl_pci_csr_setup(pci_chipset_tag_t pc, pcitag_t tag)
1873 {
1874 pcireg_t csr;
1875
1876 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1877 csr |= (PCI_COMMAND_MASTER_ENABLE |
1878 PCI_COMMAND_MEM_ENABLE);
1879 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
1880 }
1881
1882 static inline void *
1883 ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i)
1884 {
1885 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
1886 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1887
1888 if (i >= e->hmc_count)
1889 return NULL;
1890
1891 kva += e->hmc_base;
1892 kva += i * e->hmc_size;
1893
1894 return kva;
1895 }
1896
1897 static inline size_t
1898 ixl_hmc_len(struct ixl_softc *sc, enum ixl_hmc_types type)
1899 {
1900 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1901
1902 return e->hmc_size;
1903 }
1904
1905 static void
1906 ixl_enable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1907 {
1908 struct ixl_rx_ring *rxr = qp->qp_rxr;
1909
1910 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1911 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1912 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1913 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1914 ixl_flush(sc);
1915 }
1916
1917 static void
1918 ixl_disable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1919 {
1920 struct ixl_rx_ring *rxr = qp->qp_rxr;
1921
1922 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1923 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1924 ixl_flush(sc);
1925 }
1926
1927 static void
1928 ixl_enable_other_intr(struct ixl_softc *sc)
1929 {
1930
1931 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1932 I40E_PFINT_DYN_CTL0_INTENA_MASK |
1933 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1934 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1935 ixl_flush(sc);
1936 }
1937
1938 static void
1939 ixl_disable_other_intr(struct ixl_softc *sc)
1940 {
1941
1942 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1943 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1944 ixl_flush(sc);
1945 }
1946
1947 static int
1948 ixl_reinit(struct ixl_softc *sc)
1949 {
1950 struct ixl_rx_ring *rxr;
1951 struct ixl_tx_ring *txr;
1952 unsigned int i;
1953 uint32_t reg;
1954
1955 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1956
1957 if (ixl_get_vsi(sc) != 0)
1958 return EIO;
1959
1960 if (ixl_set_vsi(sc) != 0)
1961 return EIO;
1962
1963 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1964 txr = sc->sc_qps[i].qp_txr;
1965 rxr = sc->sc_qps[i].qp_rxr;
1966
1967 ixl_txr_config(sc, txr);
1968 ixl_rxr_config(sc, rxr);
1969 }
1970
1971 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
1972 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_PREWRITE);
1973
1974 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1975 txr = sc->sc_qps[i].qp_txr;
1976 rxr = sc->sc_qps[i].qp_rxr;
1977
1978 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
1979 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
1980 ixl_flush(sc);
1981
1982 ixl_wr(sc, txr->txr_tail, txr->txr_prod);
1983 ixl_wr(sc, rxr->rxr_tail, rxr->rxr_prod);
1984
1985 /* ixl_rxfill() needs lock held */
1986 mutex_enter(&rxr->rxr_lock);
1987 ixl_rxfill(sc, rxr);
1988 mutex_exit(&rxr->rxr_lock);
1989
1990 reg = ixl_rd(sc, I40E_QRX_ENA(i));
1991 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
1992 ixl_wr(sc, I40E_QRX_ENA(i), reg);
1993 if (ixl_rxr_enabled(sc, rxr) != 0)
1994 goto stop;
1995
1996 ixl_txr_qdis(sc, txr, 1);
1997
1998 reg = ixl_rd(sc, I40E_QTX_ENA(i));
1999 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2000 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2001
2002 if (ixl_txr_enabled(sc, txr) != 0)
2003 goto stop;
2004 }
2005
2006 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2007 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
2008
2009 return 0;
2010
2011 stop:
2012 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2013 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
2014
2015 return ETIMEDOUT;
2016 }
2017
2018 static int
2019 ixl_init_locked(struct ixl_softc *sc)
2020 {
2021 struct ifnet *ifp = &sc->sc_ec.ec_if;
2022 unsigned int i;
2023 int error, eccap_change;
2024
2025 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2026
2027 if (ISSET(ifp->if_flags, IFF_RUNNING))
2028 ixl_stop_locked(sc);
2029
2030 if (sc->sc_dead) {
2031 return ENXIO;
2032 }
2033
2034 eccap_change = sc->sc_ec.ec_capenable ^ sc->sc_cur_ec_capenable;
2035 if (ISSET(eccap_change, ETHERCAP_VLAN_HWTAGGING))
2036 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
2037
2038 if (ISSET(eccap_change, ETHERCAP_VLAN_HWFILTER)) {
2039 if (ixl_update_macvlan(sc) == 0) {
2040 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
2041 } else {
2042 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
2043 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
2044 }
2045 }
2046
2047 if (sc->sc_intrtype != PCI_INTR_TYPE_MSIX)
2048 sc->sc_nqueue_pairs = 1;
2049 else
2050 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
2051
2052 error = ixl_reinit(sc);
2053 if (error) {
2054 ixl_stop_locked(sc);
2055 return error;
2056 }
2057
2058 SET(ifp->if_flags, IFF_RUNNING);
2059 CLR(ifp->if_flags, IFF_OACTIVE);
2060
2061 ixl_config_rss(sc);
2062 ixl_config_queue_intr(sc);
2063
2064 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2065 ixl_enable_queue_intr(sc, &sc->sc_qps[i]);
2066 }
2067
2068 error = ixl_iff(sc);
2069 if (error) {
2070 ixl_stop_locked(sc);
2071 return error;
2072 }
2073
2074 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
2075
2076 return 0;
2077 }
2078
2079 static int
2080 ixl_init(struct ifnet *ifp)
2081 {
2082 struct ixl_softc *sc = ifp->if_softc;
2083 int error;
2084
2085 mutex_enter(&sc->sc_cfg_lock);
2086 error = ixl_init_locked(sc);
2087 mutex_exit(&sc->sc_cfg_lock);
2088
2089 if (error == 0)
2090 (void)ixl_get_link_status(sc);
2091
2092 return error;
2093 }
2094
2095 static int
2096 ixl_iff(struct ixl_softc *sc)
2097 {
2098 struct ifnet *ifp = &sc->sc_ec.ec_if;
2099 struct ixl_atq iatq;
2100 struct ixl_aq_desc *iaq;
2101 struct ixl_aq_vsi_promisc_param *param;
2102 uint16_t flag_add, flag_del;
2103 int error;
2104
2105 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2106 return 0;
2107
2108 memset(&iatq, 0, sizeof(iatq));
2109
2110 iaq = &iatq.iatq_desc;
2111 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
2112
2113 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
2114 param->flags = htole16(0);
2115
2116 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)
2117 || ISSET(ifp->if_flags, IFF_PROMISC)) {
2118 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2119 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2120 }
2121
2122 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
2123 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2124 IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2125 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
2126 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2127 }
2128 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2129 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2130 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2131 param->seid = sc->sc_seid;
2132
2133 error = ixl_atq_exec(sc, &iatq);
2134 if (error)
2135 return error;
2136
2137 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
2138 return EIO;
2139
2140 if (memcmp(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN) != 0) {
2141 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
2142 flag_add = IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH;
2143 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH;
2144 } else {
2145 flag_add = IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN;
2146 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN;
2147 }
2148
2149 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, flag_del);
2150
2151 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
2152 ixl_add_macvlan(sc, sc->sc_enaddr, 0, flag_add);
2153 }
2154 return 0;
2155 }
2156
2157 static void
2158 ixl_stop_locked(struct ixl_softc *sc)
2159 {
2160 struct ifnet *ifp = &sc->sc_ec.ec_if;
2161 struct ixl_rx_ring *rxr;
2162 struct ixl_tx_ring *txr;
2163 unsigned int i;
2164 uint32_t reg;
2165
2166 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2167
2168 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
2169 callout_stop(&sc->sc_stats_callout);
2170
2171 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2172 txr = sc->sc_qps[i].qp_txr;
2173 rxr = sc->sc_qps[i].qp_rxr;
2174
2175 ixl_disable_queue_intr(sc, &sc->sc_qps[i]);
2176
2177 mutex_enter(&txr->txr_lock);
2178 ixl_txr_qdis(sc, txr, 0);
2179 mutex_exit(&txr->txr_lock);
2180 }
2181
2182 /* XXX wait at least 400 usec for all tx queues in one go */
2183 ixl_flush(sc);
2184 DELAY(500);
2185
2186 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2187 txr = sc->sc_qps[i].qp_txr;
2188 rxr = sc->sc_qps[i].qp_rxr;
2189
2190 mutex_enter(&txr->txr_lock);
2191 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2192 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2193 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2194 mutex_exit(&txr->txr_lock);
2195
2196 mutex_enter(&rxr->rxr_lock);
2197 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2198 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2199 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2200 mutex_exit(&rxr->rxr_lock);
2201 }
2202
2203 /* XXX short wait for all queue disables to settle */
2204 ixl_flush(sc);
2205 DELAY(50);
2206
2207 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2208 txr = sc->sc_qps[i].qp_txr;
2209 rxr = sc->sc_qps[i].qp_rxr;
2210
2211 mutex_enter(&txr->txr_lock);
2212 if (ixl_txr_disabled(sc, txr) != 0) {
2213 mutex_exit(&txr->txr_lock);
2214 goto die;
2215 }
2216 mutex_exit(&txr->txr_lock);
2217
2218 mutex_enter(&rxr->rxr_lock);
2219 if (ixl_rxr_disabled(sc, rxr) != 0) {
2220 mutex_exit(&rxr->rxr_lock);
2221 goto die;
2222 }
2223 mutex_exit(&rxr->rxr_lock);
2224 }
2225
2226 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2227 sc->sc_qps[i].qp_workqueue = false;
2228 workqueue_wait(sc->sc_workq_txrx,
2229 &sc->sc_qps[i].qp_work);
2230 }
2231
2232 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2233 txr = sc->sc_qps[i].qp_txr;
2234 rxr = sc->sc_qps[i].qp_rxr;
2235
2236 mutex_enter(&txr->txr_lock);
2237 ixl_txr_unconfig(sc, txr);
2238 mutex_exit(&txr->txr_lock);
2239
2240 mutex_enter(&rxr->rxr_lock);
2241 ixl_rxr_unconfig(sc, rxr);
2242 mutex_exit(&rxr->rxr_lock);
2243
2244 ixl_txr_clean(sc, txr);
2245 ixl_rxr_clean(sc, rxr);
2246 }
2247
2248 return;
2249 die:
2250 sc->sc_dead = true;
2251 log(LOG_CRIT, "%s: failed to shut down rings",
2252 device_xname(sc->sc_dev));
2253 return;
2254 }
2255
2256 static void
2257 ixl_stop(struct ifnet *ifp, int disable)
2258 {
2259 struct ixl_softc *sc = ifp->if_softc;
2260
2261 mutex_enter(&sc->sc_cfg_lock);
2262 ixl_stop_locked(sc);
2263 mutex_exit(&sc->sc_cfg_lock);
2264 }
2265
2266 static int
2267 ixl_queue_pairs_alloc(struct ixl_softc *sc)
2268 {
2269 struct ixl_queue_pair *qp;
2270 unsigned int i;
2271 size_t sz;
2272
2273 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2274 sc->sc_qps = kmem_zalloc(sz, KM_SLEEP);
2275
2276 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2277 qp = &sc->sc_qps[i];
2278
2279 qp->qp_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2280 ixl_handle_queue, qp);
2281 if (qp->qp_si == NULL)
2282 goto free;
2283
2284 qp->qp_txr = ixl_txr_alloc(sc, i);
2285 if (qp->qp_txr == NULL)
2286 goto free;
2287
2288 qp->qp_rxr = ixl_rxr_alloc(sc, i);
2289 if (qp->qp_rxr == NULL)
2290 goto free;
2291
2292 qp->qp_sc = sc;
2293 snprintf(qp->qp_name, sizeof(qp->qp_name),
2294 "%s-TXRX%d", device_xname(sc->sc_dev), i);
2295 }
2296
2297 return 0;
2298 free:
2299 if (sc->sc_qps != NULL) {
2300 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2301 qp = &sc->sc_qps[i];
2302
2303 if (qp->qp_txr != NULL)
2304 ixl_txr_free(sc, qp->qp_txr);
2305 if (qp->qp_rxr != NULL)
2306 ixl_rxr_free(sc, qp->qp_rxr);
2307 if (qp->qp_si != NULL)
2308 softint_disestablish(qp->qp_si);
2309 }
2310
2311 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2312 kmem_free(sc->sc_qps, sz);
2313 sc->sc_qps = NULL;
2314 }
2315
2316 return -1;
2317 }
2318
2319 static void
2320 ixl_queue_pairs_free(struct ixl_softc *sc)
2321 {
2322 struct ixl_queue_pair *qp;
2323 unsigned int i;
2324 size_t sz;
2325
2326 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2327 qp = &sc->sc_qps[i];
2328 ixl_txr_free(sc, qp->qp_txr);
2329 ixl_rxr_free(sc, qp->qp_rxr);
2330 softint_disestablish(qp->qp_si);
2331 }
2332
2333 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2334 kmem_free(sc->sc_qps, sz);
2335 sc->sc_qps = NULL;
2336 }
2337
2338 static struct ixl_tx_ring *
2339 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
2340 {
2341 struct ixl_tx_ring *txr = NULL;
2342 struct ixl_tx_map *maps = NULL, *txm;
2343 unsigned int i;
2344
2345 txr = kmem_zalloc(sizeof(*txr), KM_SLEEP);
2346 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs,
2347 KM_SLEEP);
2348
2349 if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2350 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2351 IXL_TX_QUEUE_ALIGN) != 0)
2352 goto free;
2353
2354 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2355 txm = &maps[i];
2356
2357 if (bus_dmamap_create(sc->sc_dmat, IXL_TX_PKT_MAXSIZE,
2358 IXL_TX_PKT_DESCS, IXL_TX_PKT_MAXSIZE, 0,
2359 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &txm->txm_map) != 0)
2360 goto uncreate;
2361
2362 txm->txm_eop = -1;
2363 txm->txm_m = NULL;
2364 }
2365
2366 txr->txr_cons = txr->txr_prod = 0;
2367 txr->txr_maps = maps;
2368
2369 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2370 if (txr->txr_intrq == NULL)
2371 goto uncreate;
2372
2373 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2374 ixl_deferred_transmit, txr);
2375 if (txr->txr_si == NULL)
2376 goto destroy_pcq;
2377
2378 txr->txr_tail = I40E_QTX_TAIL(qid);
2379 txr->txr_qid = qid;
2380 txr->txr_sc = sc;
2381 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2382
2383 return txr;
2384
2385 destroy_pcq:
2386 pcq_destroy(txr->txr_intrq);
2387 uncreate:
2388 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2389 txm = &maps[i];
2390
2391 if (txm->txm_map == NULL)
2392 continue;
2393
2394 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2395 }
2396
2397 ixl_dmamem_free(sc, &txr->txr_mem);
2398 free:
2399 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2400 kmem_free(txr, sizeof(*txr));
2401
2402 return NULL;
2403 }
2404
2405 static void
2406 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2407 {
2408 unsigned int qid;
2409 bus_size_t reg;
2410 uint32_t r;
2411
2412 qid = txr->txr_qid + sc->sc_base_queue;
2413 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2414 qid %= 128;
2415
2416 r = ixl_rd(sc, reg);
2417 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2418 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2419 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2420 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2421 ixl_wr(sc, reg, r);
2422 }
2423
2424 static void
2425 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2426 {
2427 struct ixl_hmc_txq txq;
2428 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch);
2429 void *hmc;
2430
2431 memset(&txq, 0, sizeof(txq));
2432 txq.head = htole16(txr->txr_cons);
2433 txq.new_context = 1;
2434 txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2435 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2436 txq.qlen = htole16(sc->sc_tx_ring_ndescs);
2437 txq.tphrdesc_ena = 0;
2438 txq.tphrpacket_ena = 0;
2439 txq.tphwdesc_ena = 0;
2440 txq.rdylist = data->qs_handle[0];
2441
2442 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2443 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2444 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq,
2445 __arraycount(ixl_hmc_pack_txq));
2446 }
2447
2448 static void
2449 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2450 {
2451 void *hmc;
2452
2453 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2454 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2455 txr->txr_cons = txr->txr_prod = 0;
2456 }
2457
2458 static void
2459 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2460 {
2461 struct ixl_tx_map *maps, *txm;
2462 bus_dmamap_t map;
2463 unsigned int i;
2464
2465 maps = txr->txr_maps;
2466 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2467 txm = &maps[i];
2468
2469 if (txm->txm_m == NULL)
2470 continue;
2471
2472 map = txm->txm_map;
2473 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2474 BUS_DMASYNC_POSTWRITE);
2475 bus_dmamap_unload(sc->sc_dmat, map);
2476
2477 m_freem(txm->txm_m);
2478 txm->txm_m = NULL;
2479 }
2480 }
2481
2482 static int
2483 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2484 {
2485 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2486 uint32_t reg;
2487 int i;
2488
2489 for (i = 0; i < 10; i++) {
2490 reg = ixl_rd(sc, ena);
2491 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2492 return 0;
2493
2494 delaymsec(10);
2495 }
2496
2497 return ETIMEDOUT;
2498 }
2499
2500 static int
2501 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2502 {
2503 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2504 uint32_t reg;
2505 int i;
2506
2507 KASSERT(mutex_owned(&txr->txr_lock));
2508
2509 for (i = 0; i < 10; i++) {
2510 reg = ixl_rd(sc, ena);
2511 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2512 return 0;
2513
2514 delaymsec(10);
2515 }
2516
2517 return ETIMEDOUT;
2518 }
2519
2520 static void
2521 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2522 {
2523 struct ixl_tx_map *maps, *txm;
2524 struct mbuf *m;
2525 unsigned int i;
2526
2527 softint_disestablish(txr->txr_si);
2528
2529 maps = txr->txr_maps;
2530 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2531 txm = &maps[i];
2532
2533 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2534 }
2535
2536 while ((m = pcq_get(txr->txr_intrq)) != NULL)
2537 m_freem(m);
2538 pcq_destroy(txr->txr_intrq);
2539
2540 ixl_dmamem_free(sc, &txr->txr_mem);
2541 mutex_destroy(&txr->txr_lock);
2542 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2543 kmem_free(txr, sizeof(*txr));
2544 }
2545
2546 static inline int
2547 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0,
2548 struct ixl_tx_ring *txr)
2549 {
2550 struct mbuf *m;
2551 int error;
2552
2553 KASSERT(mutex_owned(&txr->txr_lock));
2554
2555 m = *m0;
2556
2557 error = bus_dmamap_load_mbuf(dmat, map, m,
2558 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2559 if (error != EFBIG)
2560 return error;
2561
2562 m = m_defrag(m, M_DONTWAIT);
2563 if (m != NULL) {
2564 *m0 = m;
2565 txr->txr_defragged.ev_count++;
2566
2567 error = bus_dmamap_load_mbuf(dmat, map, m,
2568 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2569 } else {
2570 txr->txr_defrag_failed.ev_count++;
2571 error = ENOBUFS;
2572 }
2573
2574 return error;
2575 }
2576
2577 static inline int
2578 ixl_tx_setup_offloads(struct mbuf *m, uint64_t *cmd_txd)
2579 {
2580 struct ether_header *eh;
2581 size_t len;
2582 uint64_t cmd;
2583
2584 cmd = 0;
2585
2586 eh = mtod(m, struct ether_header *);
2587 switch (htons(eh->ether_type)) {
2588 case ETHERTYPE_IP:
2589 case ETHERTYPE_IPV6:
2590 len = ETHER_HDR_LEN;
2591 break;
2592 case ETHERTYPE_VLAN:
2593 len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2594 break;
2595 default:
2596 len = 0;
2597 }
2598 cmd |= ((len >> 1) << IXL_TX_DESC_MACLEN_SHIFT);
2599
2600 if (m->m_pkthdr.csum_flags &
2601 (M_CSUM_TSOv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
2602 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4;
2603 }
2604 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2605 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4_CSUM;
2606 }
2607
2608 if (m->m_pkthdr.csum_flags &
2609 (M_CSUM_TSOv6 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
2610 cmd |= IXL_TX_DESC_CMD_IIPT_IPV6;
2611 }
2612
2613 switch (cmd & IXL_TX_DESC_CMD_IIPT_MASK) {
2614 case IXL_TX_DESC_CMD_IIPT_IPV4:
2615 case IXL_TX_DESC_CMD_IIPT_IPV4_CSUM:
2616 len = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2617 break;
2618 case IXL_TX_DESC_CMD_IIPT_IPV6:
2619 len = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
2620 break;
2621 default:
2622 len = 0;
2623 }
2624 cmd |= ((len >> 2) << IXL_TX_DESC_IPLEN_SHIFT);
2625
2626 if (m->m_pkthdr.csum_flags &
2627 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_TCPv4 | M_CSUM_TCPv6)) {
2628 len = sizeof(struct tcphdr);
2629 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_TCP;
2630 } else if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) {
2631 len = sizeof(struct udphdr);
2632 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_UDP;
2633 } else {
2634 len = 0;
2635 }
2636 cmd |= ((len >> 2) << IXL_TX_DESC_L4LEN_SHIFT);
2637
2638 *cmd_txd |= cmd;
2639 return 0;
2640 }
2641
2642 static void
2643 ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr,
2644 bool is_transmit)
2645 {
2646 struct ixl_softc *sc = ifp->if_softc;
2647 struct ixl_tx_desc *ring, *txd;
2648 struct ixl_tx_map *txm;
2649 bus_dmamap_t map;
2650 struct mbuf *m;
2651 uint64_t cmd, cmd_txd;
2652 unsigned int prod, free, last, i;
2653 unsigned int mask;
2654 int post = 0;
2655
2656 KASSERT(mutex_owned(&txr->txr_lock));
2657
2658 if (!ISSET(ifp->if_flags, IFF_RUNNING)
2659 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) {
2660 if (!is_transmit)
2661 IFQ_PURGE(&ifp->if_snd);
2662 return;
2663 }
2664
2665 prod = txr->txr_prod;
2666 free = txr->txr_cons;
2667 if (free <= prod)
2668 free += sc->sc_tx_ring_ndescs;
2669 free -= prod;
2670
2671 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2672 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2673
2674 ring = IXL_DMA_KVA(&txr->txr_mem);
2675 mask = sc->sc_tx_ring_ndescs - 1;
2676 last = prod;
2677 cmd = 0;
2678 txd = NULL;
2679
2680 for (;;) {
2681 if (free <= IXL_TX_PKT_DESCS) {
2682 if (!is_transmit)
2683 SET(ifp->if_flags, IFF_OACTIVE);
2684 break;
2685 }
2686
2687 if (is_transmit)
2688 m = pcq_get(txr->txr_intrq);
2689 else
2690 IFQ_DEQUEUE(&ifp->if_snd, m);
2691
2692 if (m == NULL)
2693 break;
2694
2695 txm = &txr->txr_maps[prod];
2696 map = txm->txm_map;
2697
2698 if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
2699 if_statinc(ifp, if_oerrors);
2700 m_freem(m);
2701 continue;
2702 }
2703
2704 cmd_txd = 0;
2705 if (m->m_pkthdr.csum_flags & IXL_CSUM_ALL_OFFLOAD) {
2706 ixl_tx_setup_offloads(m, &cmd_txd);
2707 }
2708
2709 if (vlan_has_tag(m)) {
2710 uint16_t vtag;
2711 vtag = htole16(vlan_get_tag(m));
2712 cmd_txd |= (uint64_t)vtag <<
2713 IXL_TX_DESC_L2TAG1_SHIFT;
2714 cmd_txd |= IXL_TX_DESC_CMD_IL2TAG1;
2715 }
2716
2717 bus_dmamap_sync(sc->sc_dmat, map, 0,
2718 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2719
2720 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) {
2721 txd = &ring[prod];
2722
2723 cmd = (uint64_t)map->dm_segs[i].ds_len <<
2724 IXL_TX_DESC_BSIZE_SHIFT;
2725 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2726 cmd |= cmd_txd;
2727
2728 txd->addr = htole64(map->dm_segs[i].ds_addr);
2729 txd->cmd = htole64(cmd);
2730
2731 last = prod;
2732
2733 prod++;
2734 prod &= mask;
2735 }
2736 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2737 txd->cmd = htole64(cmd);
2738
2739 txm->txm_m = m;
2740 txm->txm_eop = last;
2741
2742 bpf_mtap(ifp, m, BPF_D_OUT);
2743
2744 free -= i;
2745 post = 1;
2746 }
2747
2748 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2749 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2750
2751 if (post) {
2752 txr->txr_prod = prod;
2753 ixl_wr(sc, txr->txr_tail, prod);
2754 }
2755 }
2756
2757 static int
2758 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit)
2759 {
2760 struct ifnet *ifp = &sc->sc_ec.ec_if;
2761 struct ixl_tx_desc *ring, *txd;
2762 struct ixl_tx_map *txm;
2763 struct mbuf *m;
2764 bus_dmamap_t map;
2765 unsigned int cons, prod, last;
2766 unsigned int mask;
2767 uint64_t dtype;
2768 int done = 0, more = 0;
2769
2770 KASSERT(mutex_owned(&txr->txr_lock));
2771
2772 prod = txr->txr_prod;
2773 cons = txr->txr_cons;
2774
2775 if (cons == prod)
2776 return 0;
2777
2778 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2779 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2780
2781 ring = IXL_DMA_KVA(&txr->txr_mem);
2782 mask = sc->sc_tx_ring_ndescs - 1;
2783
2784 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2785
2786 do {
2787 if (txlimit-- <= 0) {
2788 more = 1;
2789 break;
2790 }
2791
2792 txm = &txr->txr_maps[cons];
2793 last = txm->txm_eop;
2794 txd = &ring[last];
2795
2796 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2797 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2798 break;
2799
2800 map = txm->txm_map;
2801
2802 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2803 BUS_DMASYNC_POSTWRITE);
2804 bus_dmamap_unload(sc->sc_dmat, map);
2805
2806 m = txm->txm_m;
2807 if (m != NULL) {
2808 if_statinc_ref(nsr, if_opackets);
2809 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
2810 if (ISSET(m->m_flags, M_MCAST))
2811 if_statinc_ref(nsr, if_omcasts);
2812 m_freem(m);
2813 }
2814
2815 txm->txm_m = NULL;
2816 txm->txm_eop = -1;
2817
2818 cons = last + 1;
2819 cons &= mask;
2820 done = 1;
2821 } while (cons != prod);
2822
2823 IF_STAT_PUTREF(ifp);
2824
2825 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2826 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2827
2828 txr->txr_cons = cons;
2829
2830 if (done) {
2831 softint_schedule(txr->txr_si);
2832 if (txr->txr_qid == 0) {
2833 CLR(ifp->if_flags, IFF_OACTIVE);
2834 if_schedule_deferred_start(ifp);
2835 }
2836 }
2837
2838 return more;
2839 }
2840
2841 static void
2842 ixl_start(struct ifnet *ifp)
2843 {
2844 struct ixl_softc *sc;
2845 struct ixl_tx_ring *txr;
2846
2847 sc = ifp->if_softc;
2848 txr = sc->sc_qps[0].qp_txr;
2849
2850 mutex_enter(&txr->txr_lock);
2851 ixl_tx_common_locked(ifp, txr, false);
2852 mutex_exit(&txr->txr_lock);
2853 }
2854
2855 static inline unsigned int
2856 ixl_select_txqueue(struct ixl_softc *sc, struct mbuf *m)
2857 {
2858 u_int cpuid;
2859
2860 cpuid = cpu_index(curcpu());
2861
2862 return (unsigned int)(cpuid % sc->sc_nqueue_pairs);
2863 }
2864
2865 static int
2866 ixl_transmit(struct ifnet *ifp, struct mbuf *m)
2867 {
2868 struct ixl_softc *sc;
2869 struct ixl_tx_ring *txr;
2870 unsigned int qid;
2871
2872 sc = ifp->if_softc;
2873 qid = ixl_select_txqueue(sc, m);
2874
2875 txr = sc->sc_qps[qid].qp_txr;
2876
2877 if (__predict_false(!pcq_put(txr->txr_intrq, m))) {
2878 mutex_enter(&txr->txr_lock);
2879 txr->txr_pcqdrop.ev_count++;
2880 mutex_exit(&txr->txr_lock);
2881
2882 m_freem(m);
2883 return ENOBUFS;
2884 }
2885
2886 #ifdef IXL_ALWAYS_TXDEFER
2887 kpreempt_disable();
2888 softint_schedule(txr->txr_si);
2889 kpreempt_enable();
2890 #else
2891 if (mutex_tryenter(&txr->txr_lock)) {
2892 ixl_tx_common_locked(ifp, txr, true);
2893 mutex_exit(&txr->txr_lock);
2894 } else {
2895 kpreempt_disable();
2896 softint_schedule(txr->txr_si);
2897 kpreempt_enable();
2898 }
2899 #endif
2900
2901 return 0;
2902 }
2903
2904 static void
2905 ixl_deferred_transmit(void *xtxr)
2906 {
2907 struct ixl_tx_ring *txr = xtxr;
2908 struct ixl_softc *sc = txr->txr_sc;
2909 struct ifnet *ifp = &sc->sc_ec.ec_if;
2910
2911 mutex_enter(&txr->txr_lock);
2912 txr->txr_transmitdef.ev_count++;
2913 if (pcq_peek(txr->txr_intrq) != NULL)
2914 ixl_tx_common_locked(ifp, txr, true);
2915 mutex_exit(&txr->txr_lock);
2916 }
2917
2918 static struct ixl_rx_ring *
2919 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
2920 {
2921 struct ixl_rx_ring *rxr = NULL;
2922 struct ixl_rx_map *maps = NULL, *rxm;
2923 unsigned int i;
2924
2925 rxr = kmem_zalloc(sizeof(*rxr), KM_SLEEP);
2926 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs,
2927 KM_SLEEP);
2928
2929 if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
2930 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
2931 IXL_RX_QUEUE_ALIGN) != 0)
2932 goto free;
2933
2934 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2935 rxm = &maps[i];
2936
2937 if (bus_dmamap_create(sc->sc_dmat,
2938 IXL_MCLBYTES, 1, IXL_MCLBYTES, 0,
2939 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &rxm->rxm_map) != 0)
2940 goto uncreate;
2941
2942 rxm->rxm_m = NULL;
2943 }
2944
2945 rxr->rxr_cons = rxr->rxr_prod = 0;
2946 rxr->rxr_m_head = NULL;
2947 rxr->rxr_m_tail = &rxr->rxr_m_head;
2948 rxr->rxr_maps = maps;
2949
2950 rxr->rxr_tail = I40E_QRX_TAIL(qid);
2951 rxr->rxr_qid = qid;
2952 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
2953
2954 return rxr;
2955
2956 uncreate:
2957 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2958 rxm = &maps[i];
2959
2960 if (rxm->rxm_map == NULL)
2961 continue;
2962
2963 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
2964 }
2965
2966 ixl_dmamem_free(sc, &rxr->rxr_mem);
2967 free:
2968 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2969 kmem_free(rxr, sizeof(*rxr));
2970
2971 return NULL;
2972 }
2973
2974 static void
2975 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2976 {
2977 struct ixl_rx_map *maps, *rxm;
2978 bus_dmamap_t map;
2979 unsigned int i;
2980
2981 maps = rxr->rxr_maps;
2982 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2983 rxm = &maps[i];
2984
2985 if (rxm->rxm_m == NULL)
2986 continue;
2987
2988 map = rxm->rxm_map;
2989 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2990 BUS_DMASYNC_POSTWRITE);
2991 bus_dmamap_unload(sc->sc_dmat, map);
2992
2993 m_freem(rxm->rxm_m);
2994 rxm->rxm_m = NULL;
2995 }
2996
2997 m_freem(rxr->rxr_m_head);
2998 rxr->rxr_m_head = NULL;
2999 rxr->rxr_m_tail = &rxr->rxr_m_head;
3000
3001 rxr->rxr_prod = rxr->rxr_cons = 0;
3002 }
3003
3004 static int
3005 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3006 {
3007 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3008 uint32_t reg;
3009 int i;
3010
3011 for (i = 0; i < 10; i++) {
3012 reg = ixl_rd(sc, ena);
3013 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
3014 return 0;
3015
3016 delaymsec(10);
3017 }
3018
3019 return ETIMEDOUT;
3020 }
3021
3022 static int
3023 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3024 {
3025 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3026 uint32_t reg;
3027 int i;
3028
3029 KASSERT(mutex_owned(&rxr->rxr_lock));
3030
3031 for (i = 0; i < 10; i++) {
3032 reg = ixl_rd(sc, ena);
3033 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3034 return 0;
3035
3036 delaymsec(10);
3037 }
3038
3039 return ETIMEDOUT;
3040 }
3041
3042 static void
3043 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3044 {
3045 struct ixl_hmc_rxq rxq;
3046 struct ifnet *ifp = &sc->sc_ec.ec_if;
3047 uint16_t rxmax;
3048 void *hmc;
3049
3050 memset(&rxq, 0, sizeof(rxq));
3051 rxmax = ifp->if_mtu + IXL_MTU_ETHERLEN;
3052
3053 rxq.head = htole16(rxr->rxr_cons);
3054 rxq.base = htole64(IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
3055 rxq.qlen = htole16(sc->sc_rx_ring_ndescs);
3056 rxq.dbuff = htole16(IXL_MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
3057 rxq.hbuff = 0;
3058 rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
3059 rxq.dsize = IXL_HMC_RXQ_DSIZE_32;
3060 rxq.crcstrip = 1;
3061 rxq.l2sel = 1;
3062 rxq.showiv = 1;
3063 rxq.rxmax = htole16(rxmax);
3064 rxq.tphrdesc_ena = 0;
3065 rxq.tphwdesc_ena = 0;
3066 rxq.tphdata_ena = 0;
3067 rxq.tphhead_ena = 0;
3068 rxq.lrxqthresh = 0;
3069 rxq.prefena = 1;
3070
3071 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3072 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3073 ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq,
3074 __arraycount(ixl_hmc_pack_rxq));
3075 }
3076
3077 static void
3078 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3079 {
3080 void *hmc;
3081
3082 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3083 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3084 rxr->rxr_cons = rxr->rxr_prod = 0;
3085 }
3086
3087 static void
3088 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3089 {
3090 struct ixl_rx_map *maps, *rxm;
3091 unsigned int i;
3092
3093 maps = rxr->rxr_maps;
3094 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3095 rxm = &maps[i];
3096
3097 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3098 }
3099
3100 ixl_dmamem_free(sc, &rxr->rxr_mem);
3101 mutex_destroy(&rxr->rxr_lock);
3102 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
3103 kmem_free(rxr, sizeof(*rxr));
3104 }
3105
3106 static inline void
3107 ixl_rx_csum(struct mbuf *m, uint64_t qword)
3108 {
3109 int flags_mask;
3110
3111 if (!ISSET(qword, IXL_RX_DESC_L3L4P)) {
3112 /* No L3 or L4 checksum was calculated */
3113 return;
3114 }
3115
3116 switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) {
3117 case IXL_RX_DESC_PTYPE_IPV4FRAG:
3118 case IXL_RX_DESC_PTYPE_IPV4:
3119 case IXL_RX_DESC_PTYPE_SCTPV4:
3120 case IXL_RX_DESC_PTYPE_ICMPV4:
3121 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3122 break;
3123 case IXL_RX_DESC_PTYPE_TCPV4:
3124 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3125 flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD;
3126 break;
3127 case IXL_RX_DESC_PTYPE_UDPV4:
3128 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3129 flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD;
3130 break;
3131 case IXL_RX_DESC_PTYPE_TCPV6:
3132 flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD;
3133 break;
3134 case IXL_RX_DESC_PTYPE_UDPV6:
3135 flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD;
3136 break;
3137 default:
3138 flags_mask = 0;
3139 }
3140
3141 m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 |
3142 M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6));
3143
3144 if (ISSET(qword, IXL_RX_DESC_IPE)) {
3145 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD);
3146 }
3147
3148 if (ISSET(qword, IXL_RX_DESC_L4E)) {
3149 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD);
3150 }
3151 }
3152
3153 static int
3154 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr, u_int rxlimit)
3155 {
3156 struct ifnet *ifp = &sc->sc_ec.ec_if;
3157 struct ixl_rx_wb_desc_32 *ring, *rxd;
3158 struct ixl_rx_map *rxm;
3159 bus_dmamap_t map;
3160 unsigned int cons, prod;
3161 struct mbuf *m;
3162 uint64_t word, word0;
3163 unsigned int len;
3164 unsigned int mask;
3165 int done = 0, more = 0;
3166
3167 KASSERT(mutex_owned(&rxr->rxr_lock));
3168
3169 if (!ISSET(ifp->if_flags, IFF_RUNNING))
3170 return 0;
3171
3172 prod = rxr->rxr_prod;
3173 cons = rxr->rxr_cons;
3174
3175 if (cons == prod)
3176 return 0;
3177
3178 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3179 0, IXL_DMA_LEN(&rxr->rxr_mem),
3180 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3181
3182 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3183 mask = sc->sc_rx_ring_ndescs - 1;
3184
3185 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
3186
3187 do {
3188 if (rxlimit-- <= 0) {
3189 more = 1;
3190 break;
3191 }
3192
3193 rxd = &ring[cons];
3194
3195 word = le64toh(rxd->qword1);
3196
3197 if (!ISSET(word, IXL_RX_DESC_DD))
3198 break;
3199
3200 rxm = &rxr->rxr_maps[cons];
3201
3202 map = rxm->rxm_map;
3203 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3204 BUS_DMASYNC_POSTREAD);
3205 bus_dmamap_unload(sc->sc_dmat, map);
3206
3207 m = rxm->rxm_m;
3208 rxm->rxm_m = NULL;
3209
3210 KASSERT(m != NULL);
3211
3212 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
3213 m->m_len = len;
3214 m->m_pkthdr.len = 0;
3215
3216 m->m_next = NULL;
3217 *rxr->rxr_m_tail = m;
3218 rxr->rxr_m_tail = &m->m_next;
3219
3220 m = rxr->rxr_m_head;
3221 m->m_pkthdr.len += len;
3222
3223 if (ISSET(word, IXL_RX_DESC_EOP)) {
3224 word0 = le64toh(rxd->qword0);
3225
3226 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
3227 uint16_t vtag;
3228 vtag = __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK);
3229 vlan_set_tag(m, le16toh(vtag));
3230 }
3231
3232 if ((ifp->if_capenable & IXL_IFCAP_RXCSUM) != 0)
3233 ixl_rx_csum(m, word);
3234
3235 if (!ISSET(word,
3236 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
3237 m_set_rcvif(m, ifp);
3238 if_statinc_ref(nsr, if_ipackets);
3239 if_statadd_ref(nsr, if_ibytes,
3240 m->m_pkthdr.len);
3241 if_percpuq_enqueue(sc->sc_ipq, m);
3242 } else {
3243 if_statinc_ref(nsr, if_ierrors);
3244 m_freem(m);
3245 }
3246
3247 rxr->rxr_m_head = NULL;
3248 rxr->rxr_m_tail = &rxr->rxr_m_head;
3249 }
3250
3251 cons++;
3252 cons &= mask;
3253
3254 done = 1;
3255 } while (cons != prod);
3256
3257 if (done) {
3258 rxr->rxr_cons = cons;
3259 if (ixl_rxfill(sc, rxr) == -1)
3260 if_statinc_ref(nsr, if_iqdrops);
3261 }
3262
3263 IF_STAT_PUTREF(ifp);
3264
3265 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3266 0, IXL_DMA_LEN(&rxr->rxr_mem),
3267 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3268
3269 return more;
3270 }
3271
3272 static int
3273 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3274 {
3275 struct ixl_rx_rd_desc_32 *ring, *rxd;
3276 struct ixl_rx_map *rxm;
3277 bus_dmamap_t map;
3278 struct mbuf *m;
3279 unsigned int prod;
3280 unsigned int slots;
3281 unsigned int mask;
3282 int post = 0, error = 0;
3283
3284 KASSERT(mutex_owned(&rxr->rxr_lock));
3285
3286 prod = rxr->rxr_prod;
3287 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons,
3288 sc->sc_rx_ring_ndescs);
3289
3290 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3291 mask = sc->sc_rx_ring_ndescs - 1;
3292
3293 if (__predict_false(slots <= 0))
3294 return -1;
3295
3296 do {
3297 rxm = &rxr->rxr_maps[prod];
3298
3299 MGETHDR(m, M_DONTWAIT, MT_DATA);
3300 if (m == NULL) {
3301 rxr->rxr_mgethdr_failed.ev_count++;
3302 error = -1;
3303 break;
3304 }
3305
3306 MCLGET(m, M_DONTWAIT);
3307 if (!ISSET(m->m_flags, M_EXT)) {
3308 rxr->rxr_mgetcl_failed.ev_count++;
3309 error = -1;
3310 m_freem(m);
3311 break;
3312 }
3313
3314 m->m_len = m->m_pkthdr.len = MCLBYTES;
3315 m_adj(m, ETHER_ALIGN);
3316
3317 map = rxm->rxm_map;
3318
3319 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
3320 BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
3321 rxr->rxr_mbuf_load_failed.ev_count++;
3322 error = -1;
3323 m_freem(m);
3324 break;
3325 }
3326
3327 rxm->rxm_m = m;
3328
3329 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3330 BUS_DMASYNC_PREREAD);
3331
3332 rxd = &ring[prod];
3333
3334 rxd->paddr = htole64(map->dm_segs[0].ds_addr);
3335 rxd->haddr = htole64(0);
3336
3337 prod++;
3338 prod &= mask;
3339
3340 post = 1;
3341
3342 } while (--slots);
3343
3344 if (post) {
3345 rxr->rxr_prod = prod;
3346 ixl_wr(sc, rxr->rxr_tail, prod);
3347 }
3348
3349 return error;
3350 }
3351
3352 static inline int
3353 ixl_handle_queue_common(struct ixl_softc *sc, struct ixl_queue_pair *qp,
3354 u_int txlimit, struct evcnt *txevcnt,
3355 u_int rxlimit, struct evcnt *rxevcnt)
3356 {
3357 struct ixl_tx_ring *txr = qp->qp_txr;
3358 struct ixl_rx_ring *rxr = qp->qp_rxr;
3359 int txmore, rxmore;
3360 int rv;
3361
3362 mutex_enter(&txr->txr_lock);
3363 txevcnt->ev_count++;
3364 txmore = ixl_txeof(sc, txr, txlimit);
3365 mutex_exit(&txr->txr_lock);
3366
3367 mutex_enter(&rxr->rxr_lock);
3368 rxevcnt->ev_count++;
3369 rxmore = ixl_rxeof(sc, rxr, rxlimit);
3370 mutex_exit(&rxr->rxr_lock);
3371
3372 rv = txmore | (rxmore << 1);
3373
3374 return rv;
3375 }
3376
3377 static void
3378 ixl_sched_handle_queue(struct ixl_softc *sc, struct ixl_queue_pair *qp)
3379 {
3380
3381 if (qp->qp_workqueue)
3382 workqueue_enqueue(sc->sc_workq_txrx, &qp->qp_work, NULL);
3383 else
3384 softint_schedule(qp->qp_si);
3385 }
3386
3387 static int
3388 ixl_intr(void *xsc)
3389 {
3390 struct ixl_softc *sc = xsc;
3391 struct ixl_tx_ring *txr;
3392 struct ixl_rx_ring *rxr;
3393 uint32_t icr, rxintr, txintr;
3394 int rv = 0;
3395 unsigned int i;
3396
3397 KASSERT(sc != NULL);
3398
3399 ixl_enable_other_intr(sc);
3400 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3401
3402 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3403 atomic_inc_64(&sc->sc_event_atq.ev_count);
3404 ixl_atq_done(sc);
3405 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3406 rv = 1;
3407 }
3408
3409 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3410 atomic_inc_64(&sc->sc_event_link.ev_count);
3411 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3412 rv = 1;
3413 }
3414
3415 rxintr = icr & I40E_INTR_NOTX_RX_MASK;
3416 txintr = icr & I40E_INTR_NOTX_TX_MASK;
3417
3418 if (txintr || rxintr) {
3419 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3420 txr = sc->sc_qps[i].qp_txr;
3421 rxr = sc->sc_qps[i].qp_rxr;
3422
3423 ixl_handle_queue_common(sc, &sc->sc_qps[i],
3424 IXL_TXRX_PROCESS_UNLIMIT, &txr->txr_intr,
3425 IXL_TXRX_PROCESS_UNLIMIT, &rxr->rxr_intr);
3426 }
3427 rv = 1;
3428 }
3429
3430 return rv;
3431 }
3432
3433 static int
3434 ixl_queue_intr(void *xqp)
3435 {
3436 struct ixl_queue_pair *qp = xqp;
3437 struct ixl_tx_ring *txr = qp->qp_txr;
3438 struct ixl_rx_ring *rxr = qp->qp_rxr;
3439 struct ixl_softc *sc = qp->qp_sc;
3440 u_int txlimit, rxlimit;
3441 int more;
3442
3443 txlimit = sc->sc_tx_intr_process_limit;
3444 rxlimit = sc->sc_rx_intr_process_limit;
3445 qp->qp_workqueue = sc->sc_txrx_workqueue;
3446
3447 more = ixl_handle_queue_common(sc, qp,
3448 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr);
3449
3450 if (more != 0) {
3451 ixl_sched_handle_queue(sc, qp);
3452 } else {
3453 /* for ALTQ */
3454 if (txr->txr_qid == 0)
3455 if_schedule_deferred_start(&sc->sc_ec.ec_if);
3456 softint_schedule(txr->txr_si);
3457
3458 ixl_enable_queue_intr(sc, qp);
3459 }
3460
3461 return 1;
3462 }
3463
3464 static void
3465 ixl_handle_queue_wk(struct work *wk, void *xsc)
3466 {
3467 struct ixl_queue_pair *qp;
3468
3469 qp = container_of(wk, struct ixl_queue_pair, qp_work);
3470 ixl_handle_queue(qp);
3471 }
3472
3473 static void
3474 ixl_handle_queue(void *xqp)
3475 {
3476 struct ixl_queue_pair *qp = xqp;
3477 struct ixl_softc *sc = qp->qp_sc;
3478 struct ixl_tx_ring *txr = qp->qp_txr;
3479 struct ixl_rx_ring *rxr = qp->qp_rxr;
3480 u_int txlimit, rxlimit;
3481 int more;
3482
3483 txlimit = sc->sc_tx_process_limit;
3484 rxlimit = sc->sc_rx_process_limit;
3485
3486 more = ixl_handle_queue_common(sc, qp,
3487 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer);
3488
3489 if (more != 0)
3490 ixl_sched_handle_queue(sc, qp);
3491 else
3492 ixl_enable_queue_intr(sc, qp);
3493 }
3494
3495 static inline void
3496 ixl_print_hmc_error(struct ixl_softc *sc, uint32_t reg)
3497 {
3498 uint32_t hmc_idx, hmc_isvf;
3499 uint32_t hmc_errtype, hmc_objtype, hmc_data;
3500
3501 hmc_idx = reg & I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK;
3502 hmc_idx = hmc_idx >> I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT;
3503 hmc_isvf = reg & I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK;
3504 hmc_isvf = hmc_isvf >> I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT;
3505 hmc_errtype = reg & I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK;
3506 hmc_errtype = hmc_errtype >> I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT;
3507 hmc_objtype = reg & I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK;
3508 hmc_objtype = hmc_objtype >> I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT;
3509 hmc_data = ixl_rd(sc, I40E_PFHMC_ERRORDATA);
3510
3511 device_printf(sc->sc_dev,
3512 "HMC Error (idx=0x%x, isvf=0x%x, err=0x%x, obj=0x%x, data=0x%x)\n",
3513 hmc_idx, hmc_isvf, hmc_errtype, hmc_objtype, hmc_data);
3514 }
3515
3516 static int
3517 ixl_other_intr(void *xsc)
3518 {
3519 struct ixl_softc *sc = xsc;
3520 uint32_t icr, mask, reg;
3521 int rv;
3522
3523 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3524 mask = ixl_rd(sc, I40E_PFINT_ICR0_ENA);
3525
3526 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3527 atomic_inc_64(&sc->sc_event_atq.ev_count);
3528 ixl_atq_done(sc);
3529 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3530 rv = 1;
3531 }
3532
3533 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3534 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3535 device_printf(sc->sc_dev, "link stat changed\n");
3536
3537 atomic_inc_64(&sc->sc_event_link.ev_count);
3538 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3539 rv = 1;
3540 }
3541
3542 if (ISSET(icr, I40E_PFINT_ICR0_GRST_MASK)) {
3543 CLR(mask, I40E_PFINT_ICR0_ENA_GRST_MASK);
3544 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
3545 reg = reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK;
3546 reg = reg >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3547
3548 device_printf(sc->sc_dev, "GRST: %s\n",
3549 reg == I40E_RESET_CORER ? "CORER" :
3550 reg == I40E_RESET_GLOBR ? "GLOBR" :
3551 reg == I40E_RESET_EMPR ? "EMPR" :
3552 "POR");
3553 }
3554
3555 if (ISSET(icr, I40E_PFINT_ICR0_ECC_ERR_MASK))
3556 atomic_inc_64(&sc->sc_event_ecc_err.ev_count);
3557 if (ISSET(icr, I40E_PFINT_ICR0_PCI_EXCEPTION_MASK))
3558 atomic_inc_64(&sc->sc_event_pci_exception.ev_count);
3559 if (ISSET(icr, I40E_PFINT_ICR0_PE_CRITERR_MASK))
3560 atomic_inc_64(&sc->sc_event_crit_err.ev_count);
3561
3562 if (ISSET(icr, IXL_ICR0_CRIT_ERR_MASK)) {
3563 CLR(mask, IXL_ICR0_CRIT_ERR_MASK);
3564 device_printf(sc->sc_dev, "critical error\n");
3565 }
3566
3567 if (ISSET(icr, I40E_PFINT_ICR0_HMC_ERR_MASK)) {
3568 reg = ixl_rd(sc, I40E_PFHMC_ERRORINFO);
3569 if (ISSET(reg, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK))
3570 ixl_print_hmc_error(sc, reg);
3571 ixl_wr(sc, I40E_PFHMC_ERRORINFO, 0);
3572 }
3573
3574 ixl_wr(sc, I40E_PFINT_ICR0_ENA, mask);
3575 ixl_flush(sc);
3576 ixl_enable_other_intr(sc);
3577 return rv;
3578 }
3579
3580 static void
3581 ixl_get_link_status_done(struct ixl_softc *sc,
3582 const struct ixl_aq_desc *iaq)
3583 {
3584 struct ixl_aq_desc iaq_buf;
3585
3586 memcpy(&iaq_buf, iaq, sizeof(iaq_buf));
3587
3588 /*
3589 * The lock can be released here
3590 * because there is no post processing about ATQ
3591 */
3592 mutex_exit(&sc->sc_atq_lock);
3593 ixl_link_state_update(sc, &iaq_buf);
3594 mutex_enter(&sc->sc_atq_lock);
3595 }
3596
3597 static void
3598 ixl_get_link_status(void *xsc)
3599 {
3600 struct ixl_softc *sc = xsc;
3601 struct ixl_aq_desc *iaq;
3602 struct ixl_aq_link_param *param;
3603 int error;
3604
3605 mutex_enter(&sc->sc_atq_lock);
3606
3607 iaq = &sc->sc_link_state_atq.iatq_desc;
3608 memset(iaq, 0, sizeof(*iaq));
3609 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3610 param = (struct ixl_aq_link_param *)iaq->iaq_param;
3611 param->notify = IXL_AQ_LINK_NOTIFY;
3612
3613 error = ixl_atq_exec_locked(sc, &sc->sc_link_state_atq);
3614 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done);
3615
3616 if (error == 0) {
3617 ixl_get_link_status_done(sc, iaq);
3618 }
3619
3620 mutex_exit(&sc->sc_atq_lock);
3621 }
3622
3623 static void
3624 ixl_link_state_update(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3625 {
3626 struct ifnet *ifp = &sc->sc_ec.ec_if;
3627 int link_state;
3628
3629 mutex_enter(&sc->sc_cfg_lock);
3630 link_state = ixl_set_link_status_locked(sc, iaq);
3631 mutex_exit(&sc->sc_cfg_lock);
3632
3633 if (ifp->if_link_state != link_state)
3634 if_link_state_change(ifp, link_state);
3635
3636 if (link_state != LINK_STATE_DOWN) {
3637 kpreempt_disable();
3638 if_schedule_deferred_start(ifp);
3639 kpreempt_enable();
3640 }
3641 }
3642
3643 static void
3644 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq,
3645 const char *msg)
3646 {
3647 char buf[512];
3648 size_t len;
3649
3650 len = sizeof(buf);
3651 buf[--len] = '\0';
3652
3653 device_printf(sc->sc_dev, "%s\n", msg);
3654 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags));
3655 device_printf(sc->sc_dev, "flags %s opcode %04x\n",
3656 buf, le16toh(iaq->iaq_opcode));
3657 device_printf(sc->sc_dev, "datalen %u retval %u\n",
3658 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval));
3659 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie);
3660 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n",
3661 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]),
3662 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3]));
3663 }
3664
3665 static void
3666 ixl_arq(void *xsc)
3667 {
3668 struct ixl_softc *sc = xsc;
3669 struct ixl_aq_desc *arq, *iaq;
3670 struct ixl_aq_buf *aqb;
3671 unsigned int cons = sc->sc_arq_cons;
3672 unsigned int prod;
3673 int done = 0;
3674
3675 prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
3676 sc->sc_aq_regs->arq_head_mask;
3677
3678 if (cons == prod)
3679 goto done;
3680
3681 arq = IXL_DMA_KVA(&sc->sc_arq);
3682
3683 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3684 0, IXL_DMA_LEN(&sc->sc_arq),
3685 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3686
3687 do {
3688 iaq = &arq[cons];
3689 aqb = sc->sc_arq_live[cons];
3690
3691 KASSERT(aqb != NULL);
3692
3693 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3694 BUS_DMASYNC_POSTREAD);
3695
3696 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3697 ixl_aq_dump(sc, iaq, "arq event");
3698
3699 switch (iaq->iaq_opcode) {
3700 case htole16(IXL_AQ_OP_PHY_LINK_STATUS):
3701 ixl_link_state_update(sc, iaq);
3702 break;
3703 }
3704
3705 memset(iaq, 0, sizeof(*iaq));
3706 sc->sc_arq_live[cons] = NULL;
3707 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
3708
3709 cons++;
3710 cons &= IXL_AQ_MASK;
3711
3712 done = 1;
3713 } while (cons != prod);
3714
3715 if (done) {
3716 sc->sc_arq_cons = cons;
3717 ixl_arq_fill(sc);
3718 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3719 0, IXL_DMA_LEN(&sc->sc_arq),
3720 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3721 }
3722
3723 done:
3724 ixl_enable_other_intr(sc);
3725 }
3726
3727 static void
3728 ixl_atq_set(struct ixl_atq *iatq,
3729 void (*fn)(struct ixl_softc *, const struct ixl_aq_desc *))
3730 {
3731
3732 iatq->iatq_fn = fn;
3733 }
3734
3735 static int
3736 ixl_atq_post_locked(struct ixl_softc *sc, struct ixl_atq *iatq)
3737 {
3738 struct ixl_aq_desc *atq, *slot;
3739 unsigned int prod, cons, prod_next;
3740
3741 /* assert locked */
3742 KASSERT(mutex_owned(&sc->sc_atq_lock));
3743
3744 atq = IXL_DMA_KVA(&sc->sc_atq);
3745 prod = sc->sc_atq_prod;
3746 cons = sc->sc_atq_cons;
3747 prod_next = (prod +1) & IXL_AQ_MASK;
3748
3749 if (cons == prod_next)
3750 return ENOMEM;
3751
3752 slot = &atq[prod];
3753
3754 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3755 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3756
3757 KASSERT(iatq->iatq_fn != NULL);
3758 *slot = iatq->iatq_desc;
3759 slot->iaq_cookie = (uint64_t)((intptr_t)iatq);
3760
3761 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3762 ixl_aq_dump(sc, slot, "atq command");
3763
3764 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3765 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3766
3767 sc->sc_atq_prod = prod_next;
3768 ixl_wr(sc, sc->sc_aq_regs->atq_tail, sc->sc_atq_prod);
3769
3770 return 0;
3771 }
3772
3773 static void
3774 ixl_atq_done_locked(struct ixl_softc *sc)
3775 {
3776 struct ixl_aq_desc *atq, *slot;
3777 struct ixl_atq *iatq;
3778 unsigned int cons;
3779 unsigned int prod;
3780
3781 KASSERT(mutex_owned(&sc->sc_atq_lock));
3782
3783 prod = sc->sc_atq_prod;
3784 cons = sc->sc_atq_cons;
3785
3786 if (prod == cons)
3787 return;
3788
3789 atq = IXL_DMA_KVA(&sc->sc_atq);
3790
3791 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3792 0, IXL_DMA_LEN(&sc->sc_atq),
3793 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3794
3795 do {
3796 slot = &atq[cons];
3797 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
3798 break;
3799
3800 iatq = (struct ixl_atq *)((intptr_t)slot->iaq_cookie);
3801 iatq->iatq_desc = *slot;
3802
3803 memset(slot, 0, sizeof(*slot));
3804
3805 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3806 ixl_aq_dump(sc, &iatq->iatq_desc, "atq response");
3807
3808 (*iatq->iatq_fn)(sc, &iatq->iatq_desc);
3809
3810 cons++;
3811 cons &= IXL_AQ_MASK;
3812 } while (cons != prod);
3813
3814 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3815 0, IXL_DMA_LEN(&sc->sc_atq),
3816 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3817
3818 sc->sc_atq_cons = cons;
3819 }
3820
3821 static void
3822 ixl_atq_done(struct ixl_softc *sc)
3823 {
3824
3825 mutex_enter(&sc->sc_atq_lock);
3826 ixl_atq_done_locked(sc);
3827 mutex_exit(&sc->sc_atq_lock);
3828 }
3829
3830 static void
3831 ixl_wakeup(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3832 {
3833
3834 KASSERT(mutex_owned(&sc->sc_atq_lock));
3835
3836 cv_signal(&sc->sc_atq_cv);
3837 }
3838
3839 static int
3840 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq)
3841 {
3842 int error;
3843
3844 mutex_enter(&sc->sc_atq_lock);
3845 error = ixl_atq_exec_locked(sc, iatq);
3846 mutex_exit(&sc->sc_atq_lock);
3847
3848 return error;
3849 }
3850
3851 static int
3852 ixl_atq_exec_locked(struct ixl_softc *sc, struct ixl_atq *iatq)
3853 {
3854 int error;
3855
3856 KASSERT(mutex_owned(&sc->sc_atq_lock));
3857 KASSERT(iatq->iatq_desc.iaq_cookie == 0);
3858
3859 ixl_atq_set(iatq, ixl_wakeup);
3860
3861 error = ixl_atq_post_locked(sc, iatq);
3862 if (error)
3863 return error;
3864
3865 error = cv_timedwait(&sc->sc_atq_cv, &sc->sc_atq_lock,
3866 IXL_ATQ_EXEC_TIMEOUT);
3867
3868 return error;
3869 }
3870
3871 static int
3872 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
3873 {
3874 struct ixl_aq_desc *atq, *slot;
3875 unsigned int prod;
3876 unsigned int t = 0;
3877
3878 mutex_enter(&sc->sc_atq_lock);
3879
3880 atq = IXL_DMA_KVA(&sc->sc_atq);
3881 prod = sc->sc_atq_prod;
3882 slot = atq + prod;
3883
3884 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3885 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3886
3887 *slot = *iaq;
3888 slot->iaq_flags |= htole16(IXL_AQ_SI);
3889
3890 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3891 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3892
3893 prod++;
3894 prod &= IXL_AQ_MASK;
3895 sc->sc_atq_prod = prod;
3896 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3897
3898 while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
3899 delaymsec(1);
3900
3901 if (t++ > tm) {
3902 mutex_exit(&sc->sc_atq_lock);
3903 return ETIMEDOUT;
3904 }
3905 }
3906
3907 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3908 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
3909 *iaq = *slot;
3910 memset(slot, 0, sizeof(*slot));
3911 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3912 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
3913
3914 sc->sc_atq_cons = prod;
3915
3916 mutex_exit(&sc->sc_atq_lock);
3917
3918 return 0;
3919 }
3920
3921 static int
3922 ixl_get_version(struct ixl_softc *sc)
3923 {
3924 struct ixl_aq_desc iaq;
3925 uint32_t fwbuild, fwver, apiver;
3926 uint16_t api_maj_ver, api_min_ver;
3927
3928 memset(&iaq, 0, sizeof(iaq));
3929 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
3930
3931 iaq.iaq_retval = le16toh(23);
3932
3933 if (ixl_atq_poll(sc, &iaq, 2000) != 0)
3934 return ETIMEDOUT;
3935 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
3936 return EIO;
3937
3938 fwbuild = le32toh(iaq.iaq_param[1]);
3939 fwver = le32toh(iaq.iaq_param[2]);
3940 apiver = le32toh(iaq.iaq_param[3]);
3941
3942 api_maj_ver = (uint16_t)apiver;
3943 api_min_ver = (uint16_t)(apiver >> 16);
3944
3945 aprint_normal(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
3946 (uint16_t)(fwver >> 16), fwbuild, api_maj_ver, api_min_ver);
3947
3948 if (sc->sc_mac_type == I40E_MAC_X722) {
3949 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK |
3950 IXL_SC_AQ_FLAG_NVMREAD);
3951 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL);
3952 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS);
3953 }
3954
3955 #define IXL_API_VER(maj, min) (((uint32_t)(maj) << 16) | (min))
3956 if (IXL_API_VER(api_maj_ver, api_min_ver) >= IXL_API_VER(1, 5)) {
3957 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL);
3958 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK);
3959 }
3960 #undef IXL_API_VER
3961
3962 return 0;
3963 }
3964
3965 static int
3966 ixl_get_nvm_version(struct ixl_softc *sc)
3967 {
3968 uint16_t nvmver, cfg_ptr, eetrack_hi, eetrack_lo, oem_hi, oem_lo;
3969 uint32_t eetrack, oem;
3970 uint16_t nvm_maj_ver, nvm_min_ver, oem_build;
3971 uint8_t oem_ver, oem_patch;
3972
3973 nvmver = cfg_ptr = eetrack_hi = eetrack_lo = oem_hi = oem_lo = 0;
3974 ixl_rd16_nvm(sc, I40E_SR_NVM_DEV_STARTER_VERSION, &nvmver);
3975 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
3976 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
3977 ixl_rd16_nvm(sc, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
3978 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF, &oem_hi);
3979 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF + 1, &oem_lo);
3980
3981 nvm_maj_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_HI_MASK);
3982 nvm_min_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_LO_MASK);
3983 eetrack = ((uint32_t)eetrack_hi << 16) | eetrack_lo;
3984 oem = ((uint32_t)oem_hi << 16) | oem_lo;
3985 oem_ver = __SHIFTOUT(oem, IXL_NVM_OEMVERSION_MASK);
3986 oem_build = __SHIFTOUT(oem, IXL_NVM_OEMBUILD_MASK);
3987 oem_patch = __SHIFTOUT(oem, IXL_NVM_OEMPATCH_MASK);
3988
3989 aprint_normal(" nvm %x.%02x etid %08x oem %d.%d.%d",
3990 nvm_maj_ver, nvm_min_ver, eetrack,
3991 oem_ver, oem_build, oem_patch);
3992
3993 return 0;
3994 }
3995
3996 static int
3997 ixl_pxe_clear(struct ixl_softc *sc)
3998 {
3999 struct ixl_aq_desc iaq;
4000 int rv;
4001
4002 memset(&iaq, 0, sizeof(iaq));
4003 iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
4004 iaq.iaq_param[0] = htole32(0x2);
4005
4006 rv = ixl_atq_poll(sc, &iaq, 250);
4007
4008 ixl_wr(sc, I40E_GLLAN_RCTL_0, 0x1);
4009
4010 if (rv != 0)
4011 return ETIMEDOUT;
4012
4013 switch (iaq.iaq_retval) {
4014 case htole16(IXL_AQ_RC_OK):
4015 case htole16(IXL_AQ_RC_EEXIST):
4016 break;
4017 default:
4018 return EIO;
4019 }
4020
4021 return 0;
4022 }
4023
4024 static int
4025 ixl_lldp_shut(struct ixl_softc *sc)
4026 {
4027 struct ixl_aq_desc iaq;
4028
4029 memset(&iaq, 0, sizeof(iaq));
4030 iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
4031 iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
4032
4033 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4034 aprint_error_dev(sc->sc_dev, "STOP LLDP AGENT timeout\n");
4035 return -1;
4036 }
4037
4038 switch (iaq.iaq_retval) {
4039 case htole16(IXL_AQ_RC_EMODE):
4040 case htole16(IXL_AQ_RC_EPERM):
4041 /* ignore silently */
4042 default:
4043 break;
4044 }
4045
4046 return 0;
4047 }
4048
4049 static void
4050 ixl_parse_hw_capability(struct ixl_softc *sc, struct ixl_aq_capability *cap)
4051 {
4052 uint16_t id;
4053 uint32_t number, logical_id;
4054
4055 id = le16toh(cap->cap_id);
4056 number = le32toh(cap->number);
4057 logical_id = le32toh(cap->logical_id);
4058
4059 switch (id) {
4060 case IXL_AQ_CAP_RSS:
4061 sc->sc_rss_table_size = number;
4062 sc->sc_rss_table_entry_width = logical_id;
4063 break;
4064 case IXL_AQ_CAP_RXQ:
4065 case IXL_AQ_CAP_TXQ:
4066 sc->sc_nqueue_pairs_device = MIN(number,
4067 sc->sc_nqueue_pairs_device);
4068 break;
4069 }
4070 }
4071
4072 static int
4073 ixl_get_hw_capabilities(struct ixl_softc *sc)
4074 {
4075 struct ixl_dmamem idm;
4076 struct ixl_aq_desc iaq;
4077 struct ixl_aq_capability *caps;
4078 size_t i, ncaps;
4079 bus_size_t caps_size;
4080 uint16_t status;
4081 int rv;
4082
4083 caps_size = sizeof(caps[0]) * 40;
4084 memset(&iaq, 0, sizeof(iaq));
4085 iaq.iaq_opcode = htole16(IXL_AQ_OP_LIST_FUNC_CAP);
4086
4087 do {
4088 if (ixl_dmamem_alloc(sc, &idm, caps_size, 0) != 0) {
4089 return -1;
4090 }
4091
4092 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4093 (caps_size > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4094 iaq.iaq_datalen = htole16(caps_size);
4095 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4096
4097 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
4098 IXL_DMA_LEN(&idm), BUS_DMASYNC_PREREAD);
4099
4100 rv = ixl_atq_poll(sc, &iaq, 250);
4101
4102 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
4103 IXL_DMA_LEN(&idm), BUS_DMASYNC_POSTREAD);
4104
4105 if (rv != 0) {
4106 aprint_error(", HW capabilities timeout\n");
4107 goto done;
4108 }
4109
4110 status = le16toh(iaq.iaq_retval);
4111
4112 if (status == IXL_AQ_RC_ENOMEM) {
4113 caps_size = le16toh(iaq.iaq_datalen);
4114 ixl_dmamem_free(sc, &idm);
4115 }
4116 } while (status == IXL_AQ_RC_ENOMEM);
4117
4118 if (status != IXL_AQ_RC_OK) {
4119 aprint_error(", HW capabilities error\n");
4120 goto done;
4121 }
4122
4123 caps = IXL_DMA_KVA(&idm);
4124 ncaps = le16toh(iaq.iaq_param[1]);
4125
4126 for (i = 0; i < ncaps; i++) {
4127 ixl_parse_hw_capability(sc, &caps[i]);
4128 }
4129
4130 done:
4131 ixl_dmamem_free(sc, &idm);
4132 return rv;
4133 }
4134
4135 static int
4136 ixl_get_mac(struct ixl_softc *sc)
4137 {
4138 struct ixl_dmamem idm;
4139 struct ixl_aq_desc iaq;
4140 struct ixl_aq_mac_addresses *addrs;
4141 int rv;
4142
4143 if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
4144 aprint_error(", unable to allocate mac addresses\n");
4145 return -1;
4146 }
4147
4148 memset(&iaq, 0, sizeof(iaq));
4149 iaq.iaq_flags = htole16(IXL_AQ_BUF);
4150 iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
4151 iaq.iaq_datalen = htole16(sizeof(*addrs));
4152 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4153
4154 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4155 BUS_DMASYNC_PREREAD);
4156
4157 rv = ixl_atq_poll(sc, &iaq, 250);
4158
4159 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4160 BUS_DMASYNC_POSTREAD);
4161
4162 if (rv != 0) {
4163 aprint_error(", MAC ADDRESS READ timeout\n");
4164 rv = -1;
4165 goto done;
4166 }
4167 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4168 aprint_error(", MAC ADDRESS READ error\n");
4169 rv = -1;
4170 goto done;
4171 }
4172
4173 addrs = IXL_DMA_KVA(&idm);
4174 if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
4175 printf(", port address is not valid\n");
4176 goto done;
4177 }
4178
4179 memcpy(sc->sc_enaddr, addrs->port, ETHER_ADDR_LEN);
4180 rv = 0;
4181
4182 done:
4183 ixl_dmamem_free(sc, &idm);
4184 return rv;
4185 }
4186
4187 static int
4188 ixl_get_switch_config(struct ixl_softc *sc)
4189 {
4190 struct ixl_dmamem idm;
4191 struct ixl_aq_desc iaq;
4192 struct ixl_aq_switch_config *hdr;
4193 struct ixl_aq_switch_config_element *elms, *elm;
4194 unsigned int nelm, i;
4195 int rv;
4196
4197 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4198 aprint_error_dev(sc->sc_dev,
4199 "unable to allocate switch config buffer\n");
4200 return -1;
4201 }
4202
4203 memset(&iaq, 0, sizeof(iaq));
4204 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4205 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4206 iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
4207 iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
4208 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4209
4210 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4211 BUS_DMASYNC_PREREAD);
4212
4213 rv = ixl_atq_poll(sc, &iaq, 250);
4214
4215 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4216 BUS_DMASYNC_POSTREAD);
4217
4218 if (rv != 0) {
4219 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG timeout\n");
4220 rv = -1;
4221 goto done;
4222 }
4223 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4224 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG error\n");
4225 rv = -1;
4226 goto done;
4227 }
4228
4229 hdr = IXL_DMA_KVA(&idm);
4230 elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
4231
4232 nelm = le16toh(hdr->num_reported);
4233 if (nelm < 1) {
4234 aprint_error_dev(sc->sc_dev, "no switch config available\n");
4235 rv = -1;
4236 goto done;
4237 }
4238
4239 for (i = 0; i < nelm; i++) {
4240 elm = &elms[i];
4241
4242 aprint_debug_dev(sc->sc_dev,
4243 "type %x revision %u seid %04x\n",
4244 elm->type, elm->revision, le16toh(elm->seid));
4245 aprint_debug_dev(sc->sc_dev,
4246 "uplink %04x downlink %04x\n",
4247 le16toh(elm->uplink_seid),
4248 le16toh(elm->downlink_seid));
4249 aprint_debug_dev(sc->sc_dev,
4250 "conntype %x scheduler %04x extra %04x\n",
4251 elm->connection_type,
4252 le16toh(elm->scheduler_id),
4253 le16toh(elm->element_info));
4254 }
4255
4256 elm = &elms[0];
4257
4258 sc->sc_uplink_seid = elm->uplink_seid;
4259 sc->sc_downlink_seid = elm->downlink_seid;
4260 sc->sc_seid = elm->seid;
4261
4262 if ((sc->sc_uplink_seid == htole16(0)) !=
4263 (sc->sc_downlink_seid == htole16(0))) {
4264 aprint_error_dev(sc->sc_dev, "SEIDs are misconfigured\n");
4265 rv = -1;
4266 goto done;
4267 }
4268
4269 done:
4270 ixl_dmamem_free(sc, &idm);
4271 return rv;
4272 }
4273
4274 static int
4275 ixl_phy_mask_ints(struct ixl_softc *sc)
4276 {
4277 struct ixl_aq_desc iaq;
4278
4279 memset(&iaq, 0, sizeof(iaq));
4280 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
4281 iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
4282 ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
4283 IXL_AQ_PHY_EV_MEDIA_NA));
4284
4285 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4286 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK timeout\n");
4287 return -1;
4288 }
4289 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4290 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK error\n");
4291 return -1;
4292 }
4293
4294 return 0;
4295 }
4296
4297 static int
4298 ixl_get_phy_abilities(struct ixl_softc *sc, struct ixl_dmamem *idm)
4299 {
4300 struct ixl_aq_desc iaq;
4301 int rv;
4302
4303 memset(&iaq, 0, sizeof(iaq));
4304 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4305 (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4306 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
4307 iaq.iaq_datalen = htole16(IXL_DMA_LEN(idm));
4308 iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
4309 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
4310
4311 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4312 BUS_DMASYNC_PREREAD);
4313
4314 rv = ixl_atq_poll(sc, &iaq, 250);
4315
4316 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4317 BUS_DMASYNC_POSTREAD);
4318
4319 if (rv != 0)
4320 return -1;
4321
4322 return le16toh(iaq.iaq_retval);
4323 }
4324
4325 static int
4326 ixl_get_phy_info(struct ixl_softc *sc)
4327 {
4328 struct ixl_dmamem idm;
4329 struct ixl_aq_phy_abilities *phy;
4330 int rv;
4331
4332 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4333 aprint_error_dev(sc->sc_dev,
4334 "unable to allocate phy abilities buffer\n");
4335 return -1;
4336 }
4337
4338 rv = ixl_get_phy_abilities(sc, &idm);
4339 switch (rv) {
4340 case -1:
4341 aprint_error_dev(sc->sc_dev, "GET PHY ABILITIES timeout\n");
4342 goto done;
4343 case IXL_AQ_RC_OK:
4344 break;
4345 case IXL_AQ_RC_EIO:
4346 aprint_error_dev(sc->sc_dev,"unable to query phy types\n");
4347 goto done;
4348 default:
4349 aprint_error_dev(sc->sc_dev,
4350 "GET PHY ABILITIIES error %u\n", rv);
4351 goto done;
4352 }
4353
4354 phy = IXL_DMA_KVA(&idm);
4355
4356 sc->sc_phy_types = le32toh(phy->phy_type);
4357 sc->sc_phy_types |= (uint64_t)le32toh(phy->phy_type_ext) << 32;
4358
4359 sc->sc_phy_abilities = phy->abilities;
4360 sc->sc_phy_linkspeed = phy->link_speed;
4361 sc->sc_phy_fec_cfg = phy->fec_cfg_curr_mod_ext_info &
4362 (IXL_AQ_ENABLE_FEC_KR | IXL_AQ_ENABLE_FEC_RS |
4363 IXL_AQ_REQUEST_FEC_KR | IXL_AQ_REQUEST_FEC_RS);
4364 sc->sc_eee_cap = phy->eee_capability;
4365 sc->sc_eeer_val = phy->eeer_val;
4366 sc->sc_d3_lpan = phy->d3_lpan;
4367
4368 rv = 0;
4369
4370 done:
4371 ixl_dmamem_free(sc, &idm);
4372 return rv;
4373 }
4374
4375 static int
4376 ixl_set_phy_config(struct ixl_softc *sc,
4377 uint8_t link_speed, uint8_t abilities, bool polling)
4378 {
4379 struct ixl_aq_phy_param *param;
4380 struct ixl_atq iatq;
4381 struct ixl_aq_desc *iaq;
4382 int error;
4383
4384 memset(&iatq, 0, sizeof(iatq));
4385
4386 iaq = &iatq.iatq_desc;
4387 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_CONFIG);
4388 param = (struct ixl_aq_phy_param *)&iaq->iaq_param;
4389 param->phy_types = htole32((uint32_t)sc->sc_phy_types);
4390 param->phy_type_ext = (uint8_t)(sc->sc_phy_types >> 32);
4391 param->link_speed = link_speed;
4392 param->abilities = abilities | IXL_AQ_PHY_ABILITY_AUTO_LINK;
4393 param->fec_cfg = sc->sc_phy_fec_cfg;
4394 param->eee_capability = sc->sc_eee_cap;
4395 param->eeer_val = sc->sc_eeer_val;
4396 param->d3_lpan = sc->sc_d3_lpan;
4397
4398 if (polling)
4399 error = ixl_atq_poll(sc, iaq, 250);
4400 else
4401 error = ixl_atq_exec(sc, &iatq);
4402
4403 if (error != 0)
4404 return error;
4405
4406 switch (le16toh(iaq->iaq_retval)) {
4407 case IXL_AQ_RC_OK:
4408 break;
4409 case IXL_AQ_RC_EPERM:
4410 return EPERM;
4411 default:
4412 return EIO;
4413 }
4414
4415 return 0;
4416 }
4417
4418 static int
4419 ixl_set_phy_autoselect(struct ixl_softc *sc)
4420 {
4421 uint8_t link_speed, abilities;
4422
4423 link_speed = sc->sc_phy_linkspeed;
4424 abilities = IXL_PHY_ABILITY_LINKUP | IXL_PHY_ABILITY_AUTONEGO;
4425
4426 return ixl_set_phy_config(sc, link_speed, abilities, true);
4427 }
4428
4429 static int
4430 ixl_get_link_status_poll(struct ixl_softc *sc, int *l)
4431 {
4432 struct ixl_aq_desc iaq;
4433 struct ixl_aq_link_param *param;
4434 int link;
4435
4436 memset(&iaq, 0, sizeof(iaq));
4437 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
4438 param = (struct ixl_aq_link_param *)iaq.iaq_param;
4439 param->notify = IXL_AQ_LINK_NOTIFY;
4440
4441 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4442 return ETIMEDOUT;
4443 }
4444 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4445 return EIO;
4446 }
4447
4448 /* It is unneccessary to hold lock */
4449 link = ixl_set_link_status_locked(sc, &iaq);
4450
4451 if (l != NULL)
4452 *l = link;
4453
4454 return 0;
4455 }
4456
4457 static int
4458 ixl_get_vsi(struct ixl_softc *sc)
4459 {
4460 struct ixl_dmamem *vsi = &sc->sc_scratch;
4461 struct ixl_aq_desc iaq;
4462 struct ixl_aq_vsi_param *param;
4463 struct ixl_aq_vsi_reply *reply;
4464 struct ixl_aq_vsi_data *data;
4465 int rv;
4466
4467 /* grumble, vsi info isn't "known" at compile time */
4468
4469 memset(&iaq, 0, sizeof(iaq));
4470 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4471 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4472 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
4473 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4474 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4475
4476 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4477 param->uplink_seid = sc->sc_seid;
4478
4479 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4480 BUS_DMASYNC_PREREAD);
4481
4482 rv = ixl_atq_poll(sc, &iaq, 250);
4483
4484 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4485 BUS_DMASYNC_POSTREAD);
4486
4487 if (rv != 0) {
4488 return ETIMEDOUT;
4489 }
4490
4491 switch (le16toh(iaq.iaq_retval)) {
4492 case IXL_AQ_RC_OK:
4493 break;
4494 case IXL_AQ_RC_ENOENT:
4495 return ENOENT;
4496 case IXL_AQ_RC_EACCES:
4497 return EACCES;
4498 default:
4499 return EIO;
4500 }
4501
4502 reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
4503 sc->sc_vsi_number = le16toh(reply->vsi_number);
4504 data = IXL_DMA_KVA(vsi);
4505 sc->sc_vsi_stat_counter_idx = le16toh(data->stat_counter_idx);
4506
4507 return 0;
4508 }
4509
4510 static int
4511 ixl_set_vsi(struct ixl_softc *sc)
4512 {
4513 struct ixl_dmamem *vsi = &sc->sc_scratch;
4514 struct ixl_aq_desc iaq;
4515 struct ixl_aq_vsi_param *param;
4516 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
4517 unsigned int qnum;
4518 uint16_t val;
4519 int rv;
4520
4521 qnum = sc->sc_nqueue_pairs - 1;
4522
4523 data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
4524 IXL_AQ_VSI_VALID_VLAN);
4525
4526 CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
4527 SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
4528 data->queue_mapping[0] = htole16(0);
4529 data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
4530 (qnum << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
4531
4532 val = le16toh(data->port_vlan_flags);
4533 CLR(val, IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK);
4534 SET(val, IXL_AQ_VSI_PVLAN_MODE_ALL);
4535
4536 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWTAGGING)) {
4537 SET(val, IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH);
4538 } else {
4539 SET(val, IXL_AQ_VSI_PVLAN_EMOD_NOTHING);
4540 }
4541
4542 data->port_vlan_flags = htole16(val);
4543
4544 /* grumble, vsi info isn't "known" at compile time */
4545
4546 memset(&iaq, 0, sizeof(iaq));
4547 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4548 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4549 iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
4550 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4551 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4552
4553 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4554 param->uplink_seid = sc->sc_seid;
4555
4556 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4557 BUS_DMASYNC_PREWRITE);
4558
4559 rv = ixl_atq_poll(sc, &iaq, 250);
4560
4561 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4562 BUS_DMASYNC_POSTWRITE);
4563
4564 if (rv != 0) {
4565 return ETIMEDOUT;
4566 }
4567
4568 switch (le16toh(iaq.iaq_retval)) {
4569 case IXL_AQ_RC_OK:
4570 break;
4571 case IXL_AQ_RC_ENOENT:
4572 return ENOENT;
4573 case IXL_AQ_RC_EACCES:
4574 return EACCES;
4575 default:
4576 return EIO;
4577 }
4578
4579 return 0;
4580 }
4581
4582 static void
4583 ixl_set_filter_control(struct ixl_softc *sc)
4584 {
4585 uint32_t reg;
4586
4587 reg = ixl_rd_rx_csr(sc, I40E_PFQF_CTL_0);
4588
4589 CLR(reg, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK);
4590 SET(reg, I40E_HASH_LUT_SIZE_128 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT);
4591
4592 SET(reg, I40E_PFQF_CTL_0_FD_ENA_MASK);
4593 SET(reg, I40E_PFQF_CTL_0_ETYPE_ENA_MASK);
4594 SET(reg, I40E_PFQF_CTL_0_MACVLAN_ENA_MASK);
4595
4596 ixl_wr_rx_csr(sc, I40E_PFQF_CTL_0, reg);
4597 }
4598
4599 static inline void
4600 ixl_get_default_rss_key(uint32_t *buf, size_t len)
4601 {
4602 size_t cplen;
4603 uint8_t rss_seed[RSS_KEYSIZE];
4604
4605 rss_getkey(rss_seed);
4606 memset(buf, 0, len);
4607
4608 cplen = MIN(len, sizeof(rss_seed));
4609 memcpy(buf, rss_seed, cplen);
4610 }
4611
4612 static int
4613 ixl_set_rss_key(struct ixl_softc *sc, uint8_t *key, size_t keylen)
4614 {
4615 struct ixl_dmamem *idm;
4616 struct ixl_atq iatq;
4617 struct ixl_aq_desc *iaq;
4618 struct ixl_aq_rss_key_param *param;
4619 struct ixl_aq_rss_key_data *data;
4620 size_t len, datalen, stdlen, extlen;
4621 uint16_t vsi_id;
4622 int rv;
4623
4624 memset(&iatq, 0, sizeof(iatq));
4625 iaq = &iatq.iatq_desc;
4626 idm = &sc->sc_aqbuf;
4627
4628 datalen = sizeof(*data);
4629
4630 /*XXX The buf size has to be less than the size of the register */
4631 datalen = MIN(IXL_RSS_KEY_SIZE_REG * sizeof(uint32_t), datalen);
4632
4633 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4634 (datalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4635 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_KEY);
4636 iaq->iaq_datalen = htole16(datalen);
4637
4638 param = (struct ixl_aq_rss_key_param *)iaq->iaq_param;
4639 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSKEY_VSI_ID_SHIFT) |
4640 IXL_AQ_RSSKEY_VSI_VALID;
4641 param->vsi_id = htole16(vsi_id);
4642
4643 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
4644 data = IXL_DMA_KVA(idm);
4645
4646 len = MIN(keylen, datalen);
4647 stdlen = MIN(sizeof(data->standard_rss_key), len);
4648 memcpy(data->standard_rss_key, key, stdlen);
4649 len = (len > stdlen) ? (len - stdlen) : 0;
4650
4651 extlen = MIN(sizeof(data->extended_hash_key), len);
4652 extlen = (stdlen < keylen) ? 0 : keylen - stdlen;
4653 memcpy(data->extended_hash_key, key + stdlen, extlen);
4654
4655 ixl_aq_dva(iaq, IXL_DMA_DVA(idm));
4656
4657 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4658 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE);
4659
4660 rv = ixl_atq_exec(sc, &iatq);
4661
4662 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4663 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE);
4664
4665 if (rv != 0) {
4666 return ETIMEDOUT;
4667 }
4668
4669 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) {
4670 return EIO;
4671 }
4672
4673 return 0;
4674 }
4675
4676 static int
4677 ixl_set_rss_lut(struct ixl_softc *sc, uint8_t *lut, size_t lutlen)
4678 {
4679 struct ixl_dmamem *idm;
4680 struct ixl_atq iatq;
4681 struct ixl_aq_desc *iaq;
4682 struct ixl_aq_rss_lut_param *param;
4683 uint16_t vsi_id;
4684 uint8_t *data;
4685 size_t dmalen;
4686 int rv;
4687
4688 memset(&iatq, 0, sizeof(iatq));
4689 iaq = &iatq.iatq_desc;
4690 idm = &sc->sc_aqbuf;
4691
4692 dmalen = MIN(lutlen, IXL_DMA_LEN(idm));
4693
4694 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4695 (dmalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4696 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_LUT);
4697 iaq->iaq_datalen = htole16(dmalen);
4698
4699 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
4700 data = IXL_DMA_KVA(idm);
4701 memcpy(data, lut, dmalen);
4702 ixl_aq_dva(iaq, IXL_DMA_DVA(idm));
4703
4704 param = (struct ixl_aq_rss_lut_param *)iaq->iaq_param;
4705 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSLUT_VSI_ID_SHIFT) |
4706 IXL_AQ_RSSLUT_VSI_VALID;
4707 param->vsi_id = htole16(vsi_id);
4708 param->flags = htole16(IXL_AQ_RSSLUT_TABLE_TYPE_PF <<
4709 IXL_AQ_RSSLUT_TABLE_TYPE_SHIFT);
4710
4711 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4712 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE);
4713
4714 rv = ixl_atq_exec(sc, &iatq);
4715
4716 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4717 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE);
4718
4719 if (rv != 0) {
4720 return ETIMEDOUT;
4721 }
4722
4723 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) {
4724 return EIO;
4725 }
4726
4727 return 0;
4728 }
4729
4730 static int
4731 ixl_register_rss_key(struct ixl_softc *sc)
4732 {
4733 uint32_t rss_seed[IXL_RSS_KEY_SIZE_REG];
4734 int rv;
4735 size_t i;
4736
4737 ixl_get_default_rss_key(rss_seed, sizeof(rss_seed));
4738
4739 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)) {
4740 rv = ixl_set_rss_key(sc, (uint8_t*)rss_seed,
4741 sizeof(rss_seed));
4742 } else {
4743 rv = 0;
4744 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4745 ixl_wr_rx_csr(sc, I40E_PFQF_HKEY(i), rss_seed[i]);
4746 }
4747 }
4748
4749 return rv;
4750 }
4751
4752 static void
4753 ixl_register_rss_pctype(struct ixl_softc *sc)
4754 {
4755 uint64_t set_hena = 0;
4756 uint32_t hena0, hena1;
4757
4758 /*
4759 * We use TCP/UDP with IPv4/IPv6 by default.
4760 * Note: the device can not use just IP header in each
4761 * TCP/UDP packets for the RSS hash calculation.
4762 */
4763 if (sc->sc_mac_type == I40E_MAC_X722)
4764 set_hena = IXL_RSS_HENA_DEFAULT_X722;
4765 else
4766 set_hena = IXL_RSS_HENA_DEFAULT_XL710;
4767
4768 hena0 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(0));
4769 hena1 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(1));
4770
4771 SET(hena0, set_hena);
4772 SET(hena1, set_hena >> 32);
4773
4774 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(0), hena0);
4775 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(1), hena1);
4776 }
4777
4778 static int
4779 ixl_register_rss_hlut(struct ixl_softc *sc)
4780 {
4781 unsigned int qid;
4782 uint8_t hlut_buf[512], lut_mask;
4783 uint32_t *hluts;
4784 size_t i, hluts_num;
4785 int rv;
4786
4787 lut_mask = (0x01 << sc->sc_rss_table_entry_width) - 1;
4788
4789 for (i = 0; i < sc->sc_rss_table_size; i++) {
4790 qid = i % sc->sc_nqueue_pairs;
4791 hlut_buf[i] = qid & lut_mask;
4792 }
4793
4794 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)) {
4795 rv = ixl_set_rss_lut(sc, hlut_buf, sizeof(hlut_buf));
4796 } else {
4797 rv = 0;
4798 hluts = (uint32_t *)hlut_buf;
4799 hluts_num = sc->sc_rss_table_size >> 2;
4800 for (i = 0; i < hluts_num; i++) {
4801 ixl_wr(sc, I40E_PFQF_HLUT(i), hluts[i]);
4802 }
4803 ixl_flush(sc);
4804 }
4805
4806 return rv;
4807 }
4808
4809 static void
4810 ixl_config_rss(struct ixl_softc *sc)
4811 {
4812
4813 KASSERT(mutex_owned(&sc->sc_cfg_lock));
4814
4815 ixl_register_rss_key(sc);
4816 ixl_register_rss_pctype(sc);
4817 ixl_register_rss_hlut(sc);
4818 }
4819
4820 static const struct ixl_phy_type *
4821 ixl_search_phy_type(uint8_t phy_type)
4822 {
4823 const struct ixl_phy_type *itype;
4824 uint64_t mask;
4825 unsigned int i;
4826
4827 if (phy_type >= 64)
4828 return NULL;
4829
4830 mask = 1ULL << phy_type;
4831
4832 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
4833 itype = &ixl_phy_type_map[i];
4834
4835 if (ISSET(itype->phy_type, mask))
4836 return itype;
4837 }
4838
4839 return NULL;
4840 }
4841
4842 static uint64_t
4843 ixl_search_link_speed(uint8_t link_speed)
4844 {
4845 const struct ixl_speed_type *type;
4846 unsigned int i;
4847
4848 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4849 type = &ixl_speed_type_map[i];
4850
4851 if (ISSET(type->dev_speed, link_speed))
4852 return type->net_speed;
4853 }
4854
4855 return 0;
4856 }
4857
4858 static uint8_t
4859 ixl_search_baudrate(uint64_t baudrate)
4860 {
4861 const struct ixl_speed_type *type;
4862 unsigned int i;
4863
4864 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4865 type = &ixl_speed_type_map[i];
4866
4867 if (type->net_speed == baudrate) {
4868 return type->dev_speed;
4869 }
4870 }
4871
4872 return 0;
4873 }
4874
4875 static int
4876 ixl_restart_an(struct ixl_softc *sc)
4877 {
4878 struct ixl_aq_desc iaq;
4879
4880 memset(&iaq, 0, sizeof(iaq));
4881 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
4882 iaq.iaq_param[0] =
4883 htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
4884
4885 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4886 aprint_error_dev(sc->sc_dev, "RESTART AN timeout\n");
4887 return -1;
4888 }
4889 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4890 aprint_error_dev(sc->sc_dev, "RESTART AN error\n");
4891 return -1;
4892 }
4893
4894 return 0;
4895 }
4896
4897 static int
4898 ixl_add_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4899 uint16_t vlan, uint16_t flags)
4900 {
4901 struct ixl_aq_desc iaq;
4902 struct ixl_aq_add_macvlan *param;
4903 struct ixl_aq_add_macvlan_elem *elem;
4904
4905 memset(&iaq, 0, sizeof(iaq));
4906 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4907 iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN);
4908 iaq.iaq_datalen = htole16(sizeof(*elem));
4909 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4910
4911 param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param;
4912 param->num_addrs = htole16(1);
4913 param->seid0 = htole16(0x8000) | sc->sc_seid;
4914 param->seid1 = 0;
4915 param->seid2 = 0;
4916
4917 elem = IXL_DMA_KVA(&sc->sc_scratch);
4918 memset(elem, 0, sizeof(*elem));
4919 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4920 elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags);
4921 elem->vlan = htole16(vlan);
4922
4923 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4924 return IXL_AQ_RC_EINVAL;
4925 }
4926
4927 switch (le16toh(iaq.iaq_retval)) {
4928 case IXL_AQ_RC_OK:
4929 break;
4930 case IXL_AQ_RC_ENOSPC:
4931 return ENOSPC;
4932 case IXL_AQ_RC_ENOENT:
4933 return ENOENT;
4934 case IXL_AQ_RC_EACCES:
4935 return EACCES;
4936 case IXL_AQ_RC_EEXIST:
4937 return EEXIST;
4938 case IXL_AQ_RC_EINVAL:
4939 return EINVAL;
4940 default:
4941 return EIO;
4942 }
4943
4944 return 0;
4945 }
4946
4947 static int
4948 ixl_remove_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4949 uint16_t vlan, uint16_t flags)
4950 {
4951 struct ixl_aq_desc iaq;
4952 struct ixl_aq_remove_macvlan *param;
4953 struct ixl_aq_remove_macvlan_elem *elem;
4954
4955 memset(&iaq, 0, sizeof(iaq));
4956 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4957 iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN);
4958 iaq.iaq_datalen = htole16(sizeof(*elem));
4959 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4960
4961 param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param;
4962 param->num_addrs = htole16(1);
4963 param->seid0 = htole16(0x8000) | sc->sc_seid;
4964 param->seid1 = 0;
4965 param->seid2 = 0;
4966
4967 elem = IXL_DMA_KVA(&sc->sc_scratch);
4968 memset(elem, 0, sizeof(*elem));
4969 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4970 elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags);
4971 elem->vlan = htole16(vlan);
4972
4973 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4974 return EINVAL;
4975 }
4976
4977 switch (le16toh(iaq.iaq_retval)) {
4978 case IXL_AQ_RC_OK:
4979 break;
4980 case IXL_AQ_RC_ENOENT:
4981 return ENOENT;
4982 case IXL_AQ_RC_EACCES:
4983 return EACCES;
4984 case IXL_AQ_RC_EINVAL:
4985 return EINVAL;
4986 default:
4987 return EIO;
4988 }
4989
4990 return 0;
4991 }
4992
4993 static int
4994 ixl_hmc(struct ixl_softc *sc)
4995 {
4996 struct {
4997 uint32_t count;
4998 uint32_t minsize;
4999 bus_size_t objsiz;
5000 bus_size_t setoff;
5001 bus_size_t setcnt;
5002 } regs[] = {
5003 {
5004 0,
5005 IXL_HMC_TXQ_MINSIZE,
5006 I40E_GLHMC_LANTXOBJSZ,
5007 I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
5008 I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
5009 },
5010 {
5011 0,
5012 IXL_HMC_RXQ_MINSIZE,
5013 I40E_GLHMC_LANRXOBJSZ,
5014 I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
5015 I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
5016 },
5017 {
5018 0,
5019 0,
5020 I40E_GLHMC_FCOEDDPOBJSZ,
5021 I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
5022 I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
5023 },
5024 {
5025 0,
5026 0,
5027 I40E_GLHMC_FCOEFOBJSZ,
5028 I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
5029 I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
5030 },
5031 };
5032 struct ixl_hmc_entry *e;
5033 uint64_t size, dva;
5034 uint8_t *kva;
5035 uint64_t *sdpage;
5036 unsigned int i;
5037 int npages, tables;
5038 uint32_t reg;
5039
5040 CTASSERT(__arraycount(regs) <= __arraycount(sc->sc_hmc_entries));
5041
5042 regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
5043 ixl_rd(sc, I40E_GLHMC_LANQMAX);
5044
5045 size = 0;
5046 for (i = 0; i < __arraycount(regs); i++) {
5047 e = &sc->sc_hmc_entries[i];
5048
5049 e->hmc_count = regs[i].count;
5050 reg = ixl_rd(sc, regs[i].objsiz);
5051 e->hmc_size = IXL_BIT_ULL(0x3F & reg);
5052 e->hmc_base = size;
5053
5054 if ((e->hmc_size * 8) < regs[i].minsize) {
5055 aprint_error_dev(sc->sc_dev,
5056 "kernel hmc entry is too big\n");
5057 return -1;
5058 }
5059
5060 size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
5061 }
5062 size = roundup(size, IXL_HMC_PGSIZE);
5063 npages = size / IXL_HMC_PGSIZE;
5064
5065 tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
5066
5067 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
5068 aprint_error_dev(sc->sc_dev,
5069 "unable to allocate hmc pd memory\n");
5070 return -1;
5071 }
5072
5073 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
5074 IXL_HMC_PGSIZE) != 0) {
5075 aprint_error_dev(sc->sc_dev,
5076 "unable to allocate hmc sd memory\n");
5077 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
5078 return -1;
5079 }
5080
5081 kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
5082 memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
5083
5084 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
5085 0, IXL_DMA_LEN(&sc->sc_hmc_pd),
5086 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5087
5088 dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
5089 sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
5090 memset(sdpage, 0, IXL_DMA_LEN(&sc->sc_hmc_sd));
5091
5092 for (i = 0; (int)i < npages; i++) {
5093 *sdpage = htole64(dva | IXL_HMC_PDVALID);
5094 sdpage++;
5095
5096 dva += IXL_HMC_PGSIZE;
5097 }
5098
5099 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
5100 0, IXL_DMA_LEN(&sc->sc_hmc_sd),
5101 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5102
5103 dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
5104 for (i = 0; (int)i < tables; i++) {
5105 uint32_t count;
5106
5107 KASSERT(npages >= 0);
5108
5109 count = ((unsigned int)npages > IXL_HMC_PGS) ?
5110 IXL_HMC_PGS : (unsigned int)npages;
5111
5112 ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
5113 ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
5114 (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
5115 (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
5116 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
5117 ixl_wr(sc, I40E_PFHMC_SDCMD,
5118 (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
5119
5120 npages -= IXL_HMC_PGS;
5121 dva += IXL_HMC_PGSIZE;
5122 }
5123
5124 for (i = 0; i < __arraycount(regs); i++) {
5125 e = &sc->sc_hmc_entries[i];
5126
5127 ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
5128 ixl_wr(sc, regs[i].setcnt, e->hmc_count);
5129 }
5130
5131 return 0;
5132 }
5133
5134 static void
5135 ixl_hmc_free(struct ixl_softc *sc)
5136 {
5137 ixl_dmamem_free(sc, &sc->sc_hmc_sd);
5138 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
5139 }
5140
5141 static void
5142 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
5143 unsigned int npacking)
5144 {
5145 uint8_t *dst = d;
5146 const uint8_t *src = s;
5147 unsigned int i;
5148
5149 for (i = 0; i < npacking; i++) {
5150 const struct ixl_hmc_pack *pack = &packing[i];
5151 unsigned int offset = pack->lsb / 8;
5152 unsigned int align = pack->lsb % 8;
5153 const uint8_t *in = src + pack->offset;
5154 uint8_t *out = dst + offset;
5155 int width = pack->width;
5156 unsigned int inbits = 0;
5157
5158 if (align) {
5159 inbits = (*in++) << align;
5160 *out++ |= (inbits & 0xff);
5161 inbits >>= 8;
5162
5163 width -= 8 - align;
5164 }
5165
5166 while (width >= 8) {
5167 inbits |= (*in++) << align;
5168 *out++ = (inbits & 0xff);
5169 inbits >>= 8;
5170
5171 width -= 8;
5172 }
5173
5174 if (width > 0) {
5175 inbits |= (*in) << align;
5176 *out |= (inbits & ((1 << width) - 1));
5177 }
5178 }
5179 }
5180
5181 static struct ixl_aq_buf *
5182 ixl_aqb_alloc(struct ixl_softc *sc)
5183 {
5184 struct ixl_aq_buf *aqb;
5185
5186 aqb = kmem_alloc(sizeof(*aqb), KM_SLEEP);
5187
5188 aqb->aqb_size = IXL_AQ_BUFLEN;
5189
5190 if (bus_dmamap_create(sc->sc_dmat, aqb->aqb_size, 1,
5191 aqb->aqb_size, 0,
5192 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0)
5193 goto free;
5194 if (bus_dmamem_alloc(sc->sc_dmat, aqb->aqb_size,
5195 IXL_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs,
5196 BUS_DMA_WAITOK) != 0)
5197 goto destroy;
5198 if (bus_dmamem_map(sc->sc_dmat, &aqb->aqb_seg, aqb->aqb_nsegs,
5199 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0)
5200 goto dma_free;
5201 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
5202 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0)
5203 goto unmap;
5204
5205 return aqb;
5206 unmap:
5207 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
5208 dma_free:
5209 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
5210 destroy:
5211 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
5212 free:
5213 kmem_free(aqb, sizeof(*aqb));
5214
5215 return NULL;
5216 }
5217
5218 static void
5219 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
5220 {
5221
5222 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
5223 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
5224 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
5225 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
5226 kmem_free(aqb, sizeof(*aqb));
5227 }
5228
5229 static int
5230 ixl_arq_fill(struct ixl_softc *sc)
5231 {
5232 struct ixl_aq_buf *aqb;
5233 struct ixl_aq_desc *arq, *iaq;
5234 unsigned int prod = sc->sc_arq_prod;
5235 unsigned int n;
5236 int post = 0;
5237
5238 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
5239 IXL_AQ_NUM);
5240 arq = IXL_DMA_KVA(&sc->sc_arq);
5241
5242 if (__predict_false(n <= 0))
5243 return 0;
5244
5245 do {
5246 aqb = sc->sc_arq_live[prod];
5247 iaq = &arq[prod];
5248
5249 if (aqb == NULL) {
5250 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
5251 if (aqb != NULL) {
5252 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
5253 ixl_aq_buf, aqb_entry);
5254 } else if ((aqb = ixl_aqb_alloc(sc)) == NULL) {
5255 break;
5256 }
5257
5258 sc->sc_arq_live[prod] = aqb;
5259 memset(aqb->aqb_data, 0, aqb->aqb_size);
5260
5261 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
5262 aqb->aqb_size, BUS_DMASYNC_PREREAD);
5263
5264 iaq->iaq_flags = htole16(IXL_AQ_BUF |
5265 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ?
5266 IXL_AQ_LB : 0));
5267 iaq->iaq_opcode = 0;
5268 iaq->iaq_datalen = htole16(aqb->aqb_size);
5269 iaq->iaq_retval = 0;
5270 iaq->iaq_cookie = 0;
5271 iaq->iaq_param[0] = 0;
5272 iaq->iaq_param[1] = 0;
5273 ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
5274 }
5275
5276 prod++;
5277 prod &= IXL_AQ_MASK;
5278
5279 post = 1;
5280
5281 } while (--n);
5282
5283 if (post) {
5284 sc->sc_arq_prod = prod;
5285 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
5286 }
5287
5288 return post;
5289 }
5290
5291 static void
5292 ixl_arq_unfill(struct ixl_softc *sc)
5293 {
5294 struct ixl_aq_buf *aqb;
5295 unsigned int i;
5296
5297 for (i = 0; i < __arraycount(sc->sc_arq_live); i++) {
5298 aqb = sc->sc_arq_live[i];
5299 if (aqb == NULL)
5300 continue;
5301
5302 sc->sc_arq_live[i] = NULL;
5303 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
5304 BUS_DMASYNC_POSTREAD);
5305 ixl_aqb_free(sc, aqb);
5306 }
5307
5308 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle)) != NULL) {
5309 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
5310 ixl_aq_buf, aqb_entry);
5311 ixl_aqb_free(sc, aqb);
5312 }
5313 }
5314
5315 static void
5316 ixl_clear_hw(struct ixl_softc *sc)
5317 {
5318 uint32_t num_queues, base_queue;
5319 uint32_t num_pf_int;
5320 uint32_t num_vf_int;
5321 uint32_t num_vfs;
5322 uint32_t i, j;
5323 uint32_t val;
5324 uint32_t eol = 0x7ff;
5325
5326 /* get number of interrupts, queues, and vfs */
5327 val = ixl_rd(sc, I40E_GLPCI_CNF2);
5328 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
5329 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
5330 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
5331 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
5332
5333 val = ixl_rd(sc, I40E_PFLAN_QALLOC);
5334 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
5335 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
5336 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
5337 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
5338 if (val & I40E_PFLAN_QALLOC_VALID_MASK)
5339 num_queues = (j - base_queue) + 1;
5340 else
5341 num_queues = 0;
5342
5343 val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
5344 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
5345 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
5346 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
5347 I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
5348 if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
5349 num_vfs = (j - i) + 1;
5350 else
5351 num_vfs = 0;
5352
5353 /* stop all the interrupts */
5354 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5355 ixl_flush(sc);
5356 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
5357 for (i = 0; i < num_pf_int - 2; i++)
5358 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
5359 ixl_flush(sc);
5360
5361 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
5362 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5363 ixl_wr(sc, I40E_PFINT_LNKLST0, val);
5364 for (i = 0; i < num_pf_int - 2; i++)
5365 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
5366 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5367 for (i = 0; i < num_vfs; i++)
5368 ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
5369 for (i = 0; i < num_vf_int - 2; i++)
5370 ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
5371
5372 /* warn the HW of the coming Tx disables */
5373 for (i = 0; i < num_queues; i++) {
5374 uint32_t abs_queue_idx = base_queue + i;
5375 uint32_t reg_block = 0;
5376
5377 if (abs_queue_idx >= 128) {
5378 reg_block = abs_queue_idx / 128;
5379 abs_queue_idx %= 128;
5380 }
5381
5382 val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
5383 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
5384 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
5385 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
5386
5387 ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
5388 }
5389 delaymsec(400);
5390
5391 /* stop all the queues */
5392 for (i = 0; i < num_queues; i++) {
5393 ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
5394 ixl_wr(sc, I40E_QTX_ENA(i), 0);
5395 ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
5396 ixl_wr(sc, I40E_QRX_ENA(i), 0);
5397 }
5398
5399 /* short wait for all queue disables to settle */
5400 delaymsec(50);
5401 }
5402
5403 static int
5404 ixl_pf_reset(struct ixl_softc *sc)
5405 {
5406 uint32_t cnt = 0;
5407 uint32_t cnt1 = 0;
5408 uint32_t reg = 0, reg0 = 0;
5409 uint32_t grst_del;
5410
5411 /*
5412 * Poll for Global Reset steady state in case of recent GRST.
5413 * The grst delay value is in 100ms units, and we'll wait a
5414 * couple counts longer to be sure we don't just miss the end.
5415 */
5416 grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
5417 grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
5418 grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
5419
5420 grst_del = grst_del * 20;
5421
5422 for (cnt = 0; cnt < grst_del; cnt++) {
5423 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
5424 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
5425 break;
5426 delaymsec(100);
5427 }
5428 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5429 aprint_error(", Global reset polling failed to complete\n");
5430 return -1;
5431 }
5432
5433 /* Now Wait for the FW to be ready */
5434 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
5435 reg = ixl_rd(sc, I40E_GLNVM_ULD);
5436 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5437 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
5438 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5439 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
5440 break;
5441
5442 delaymsec(10);
5443 }
5444 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5445 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
5446 aprint_error(", wait for FW Reset complete timed out "
5447 "(I40E_GLNVM_ULD = 0x%x)\n", reg);
5448 return -1;
5449 }
5450
5451 /*
5452 * If there was a Global Reset in progress when we got here,
5453 * we don't need to do the PF Reset
5454 */
5455 if (cnt == 0) {
5456 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5457 ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
5458 for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
5459 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5460 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
5461 break;
5462 delaymsec(1);
5463
5464 reg0 = ixl_rd(sc, I40E_GLGEN_RSTAT);
5465 if (reg0 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5466 aprint_error(", Core reset upcoming."
5467 " Skipping PF reset reset request\n");
5468 return -1;
5469 }
5470 }
5471 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
5472 aprint_error(", PF reset polling failed to complete"
5473 "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
5474 return -1;
5475 }
5476 }
5477
5478 return 0;
5479 }
5480
5481 static int
5482 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
5483 bus_size_t size, bus_size_t align)
5484 {
5485 ixm->ixm_size = size;
5486
5487 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
5488 ixm->ixm_size, 0,
5489 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
5490 &ixm->ixm_map) != 0)
5491 return 1;
5492 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
5493 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
5494 BUS_DMA_WAITOK) != 0)
5495 goto destroy;
5496 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
5497 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
5498 goto free;
5499 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
5500 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
5501 goto unmap;
5502
5503 memset(ixm->ixm_kva, 0, ixm->ixm_size);
5504
5505 return 0;
5506 unmap:
5507 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5508 free:
5509 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5510 destroy:
5511 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5512 return 1;
5513 }
5514
5515 static void
5516 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
5517 {
5518 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
5519 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5520 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5521 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5522 }
5523
5524 static int
5525 ixl_setup_vlan_hwfilter(struct ixl_softc *sc)
5526 {
5527 struct ethercom *ec = &sc->sc_ec;
5528 struct vlanid_list *vlanidp;
5529 int rv;
5530
5531 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5532 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5533 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5534 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5535
5536 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5537 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5538 if (rv != 0)
5539 return rv;
5540 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5541 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5542 if (rv != 0)
5543 return rv;
5544
5545 ETHER_LOCK(ec);
5546 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5547 rv = ixl_add_macvlan(sc, sc->sc_enaddr,
5548 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5549 if (rv != 0)
5550 break;
5551 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
5552 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5553 if (rv != 0)
5554 break;
5555 }
5556 ETHER_UNLOCK(ec);
5557
5558 return rv;
5559 }
5560
5561 static void
5562 ixl_teardown_vlan_hwfilter(struct ixl_softc *sc)
5563 {
5564 struct vlanid_list *vlanidp;
5565 struct ethercom *ec = &sc->sc_ec;
5566
5567 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5568 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5569 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5570 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5571
5572 ETHER_LOCK(ec);
5573 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5574 ixl_remove_macvlan(sc, sc->sc_enaddr,
5575 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5576 ixl_remove_macvlan(sc, etherbroadcastaddr,
5577 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5578 }
5579 ETHER_UNLOCK(ec);
5580
5581 ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5582 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5583 ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5584 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5585 }
5586
5587 static int
5588 ixl_update_macvlan(struct ixl_softc *sc)
5589 {
5590 int rv = 0;
5591 int next_ec_capenable = sc->sc_ec.ec_capenable;
5592
5593 if (ISSET(next_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
5594 rv = ixl_setup_vlan_hwfilter(sc);
5595 if (rv != 0)
5596 ixl_teardown_vlan_hwfilter(sc);
5597 } else {
5598 ixl_teardown_vlan_hwfilter(sc);
5599 }
5600
5601 return rv;
5602 }
5603
5604 static int
5605 ixl_ifflags_cb(struct ethercom *ec)
5606 {
5607 struct ifnet *ifp = &ec->ec_if;
5608 struct ixl_softc *sc = ifp->if_softc;
5609 int rv, change;
5610
5611 mutex_enter(&sc->sc_cfg_lock);
5612
5613 change = ec->ec_capenable ^ sc->sc_cur_ec_capenable;
5614
5615 if (ISSET(change, ETHERCAP_VLAN_HWTAGGING)) {
5616 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
5617 rv = ENETRESET;
5618 goto out;
5619 }
5620
5621 if (ISSET(change, ETHERCAP_VLAN_HWFILTER)) {
5622 rv = ixl_update_macvlan(sc);
5623 if (rv == 0) {
5624 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
5625 } else {
5626 CLR(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER);
5627 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
5628 }
5629 }
5630
5631 rv = ixl_iff(sc);
5632 out:
5633 mutex_exit(&sc->sc_cfg_lock);
5634
5635 return rv;
5636 }
5637
5638 static int
5639 ixl_set_link_status_locked(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
5640 {
5641 const struct ixl_aq_link_status *status;
5642 const struct ixl_phy_type *itype;
5643
5644 uint64_t ifm_active = IFM_ETHER;
5645 uint64_t ifm_status = IFM_AVALID;
5646 int link_state = LINK_STATE_DOWN;
5647 uint64_t baudrate = 0;
5648
5649 status = (const struct ixl_aq_link_status *)iaq->iaq_param;
5650 if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION)) {
5651 ifm_active |= IFM_NONE;
5652 goto done;
5653 }
5654
5655 ifm_active |= IFM_FDX;
5656 ifm_status |= IFM_ACTIVE;
5657 link_state = LINK_STATE_UP;
5658
5659 itype = ixl_search_phy_type(status->phy_type);
5660 if (itype != NULL)
5661 ifm_active |= itype->ifm_type;
5662
5663 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
5664 ifm_active |= IFM_ETH_TXPAUSE;
5665 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
5666 ifm_active |= IFM_ETH_RXPAUSE;
5667
5668 baudrate = ixl_search_link_speed(status->link_speed);
5669
5670 done:
5671 /* sc->sc_cfg_lock held expect during attach */
5672 sc->sc_media_active = ifm_active;
5673 sc->sc_media_status = ifm_status;
5674
5675 sc->sc_ec.ec_if.if_baudrate = baudrate;
5676
5677 return link_state;
5678 }
5679
5680 static int
5681 ixl_establish_intx(struct ixl_softc *sc)
5682 {
5683 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5684 pci_intr_handle_t *intr;
5685 char xnamebuf[32];
5686 char intrbuf[PCI_INTRSTR_LEN];
5687 char const *intrstr;
5688
5689 KASSERT(sc->sc_nintrs == 1);
5690
5691 intr = &sc->sc_ihp[0];
5692
5693 intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
5694 snprintf(xnamebuf, sizeof(xnamebuf), "%s:legacy",
5695 device_xname(sc->sc_dev));
5696
5697 sc->sc_ihs[0] = pci_intr_establish_xname(pc, *intr, IPL_NET, ixl_intr,
5698 sc, xnamebuf);
5699
5700 if (sc->sc_ihs[0] == NULL) {
5701 aprint_error_dev(sc->sc_dev,
5702 "unable to establish interrupt at %s\n", intrstr);
5703 return -1;
5704 }
5705
5706 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5707 return 0;
5708 }
5709
5710 static int
5711 ixl_establish_msix(struct ixl_softc *sc)
5712 {
5713 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5714 kcpuset_t *affinity;
5715 unsigned int vector = 0;
5716 unsigned int i;
5717 int affinity_to, r;
5718 char xnamebuf[32];
5719 char intrbuf[PCI_INTRSTR_LEN];
5720 char const *intrstr;
5721
5722 kcpuset_create(&affinity, false);
5723
5724 /* the "other" intr is mapped to vector 0 */
5725 vector = 0;
5726 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5727 intrbuf, sizeof(intrbuf));
5728 snprintf(xnamebuf, sizeof(xnamebuf), "%s others",
5729 device_xname(sc->sc_dev));
5730 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5731 sc->sc_ihp[vector], IPL_NET, ixl_other_intr,
5732 sc, xnamebuf);
5733 if (sc->sc_ihs[vector] == NULL) {
5734 aprint_error_dev(sc->sc_dev,
5735 "unable to establish interrupt at %s\n", intrstr);
5736 goto fail;
5737 }
5738
5739 aprint_normal_dev(sc->sc_dev, "other interrupt at %s", intrstr);
5740
5741 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0;
5742 affinity_to = (affinity_to + sc->sc_nqueue_pairs_max) % ncpu;
5743
5744 kcpuset_zero(affinity);
5745 kcpuset_set(affinity, affinity_to);
5746 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5747 if (r == 0) {
5748 aprint_normal(", affinity to %u", affinity_to);
5749 }
5750 aprint_normal("\n");
5751 vector++;
5752
5753 sc->sc_msix_vector_queue = vector;
5754 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0;
5755
5756 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5757 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5758 intrbuf, sizeof(intrbuf));
5759 snprintf(xnamebuf, sizeof(xnamebuf), "%s TXRX%d",
5760 device_xname(sc->sc_dev), i);
5761
5762 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5763 sc->sc_ihp[vector], IPL_NET, ixl_queue_intr,
5764 (void *)&sc->sc_qps[i], xnamebuf);
5765
5766 if (sc->sc_ihs[vector] == NULL) {
5767 aprint_error_dev(sc->sc_dev,
5768 "unable to establish interrupt at %s\n", intrstr);
5769 goto fail;
5770 }
5771
5772 aprint_normal_dev(sc->sc_dev,
5773 "for TXRX%d interrupt at %s", i, intrstr);
5774
5775 kcpuset_zero(affinity);
5776 kcpuset_set(affinity, affinity_to);
5777 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5778 if (r == 0) {
5779 aprint_normal(", affinity to %u", affinity_to);
5780 affinity_to = (affinity_to + 1) % ncpu;
5781 }
5782 aprint_normal("\n");
5783 vector++;
5784 }
5785
5786 kcpuset_destroy(affinity);
5787
5788 return 0;
5789 fail:
5790 for (i = 0; i < vector; i++) {
5791 pci_intr_disestablish(pc, sc->sc_ihs[i]);
5792 }
5793
5794 sc->sc_msix_vector_queue = 0;
5795 sc->sc_msix_vector_queue = 0;
5796 kcpuset_destroy(affinity);
5797
5798 return -1;
5799 }
5800
5801 static void
5802 ixl_config_queue_intr(struct ixl_softc *sc)
5803 {
5804 unsigned int i, vector;
5805
5806 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) {
5807 vector = sc->sc_msix_vector_queue;
5808 } else {
5809 vector = I40E_INTR_NOTX_INTR;
5810
5811 ixl_wr(sc, I40E_PFINT_LNKLST0,
5812 (I40E_INTR_NOTX_QUEUE <<
5813 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5814 (I40E_QUEUE_TYPE_RX <<
5815 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5816 }
5817
5818 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
5819 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), 0);
5820 ixl_flush(sc);
5821
5822 ixl_wr(sc, I40E_PFINT_LNKLSTN(i),
5823 ((i) << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
5824 (I40E_QUEUE_TYPE_RX <<
5825 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5826
5827 ixl_wr(sc, I40E_QINT_RQCTL(i),
5828 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5829 (I40E_ITR_INDEX_RX <<
5830 I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
5831 (I40E_INTR_NOTX_RX_QUEUE <<
5832 I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
5833 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5834 (I40E_QUEUE_TYPE_TX <<
5835 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5836 I40E_QINT_RQCTL_CAUSE_ENA_MASK);
5837
5838 ixl_wr(sc, I40E_QINT_TQCTL(i),
5839 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
5840 (I40E_ITR_INDEX_TX <<
5841 I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
5842 (I40E_INTR_NOTX_TX_QUEUE <<
5843 I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
5844 (I40E_QUEUE_TYPE_EOL <<
5845 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
5846 (I40E_QUEUE_TYPE_RX <<
5847 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
5848 I40E_QINT_TQCTL_CAUSE_ENA_MASK);
5849
5850 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) {
5851 ixl_wr(sc, I40E_PFINT_ITRN(I40E_ITR_INDEX_RX, i),
5852 sc->sc_itr_rx);
5853 ixl_wr(sc, I40E_PFINT_ITRN(I40E_ITR_INDEX_TX, i),
5854 sc->sc_itr_tx);
5855 vector++;
5856 }
5857 }
5858 ixl_flush(sc);
5859
5860 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_RX), sc->sc_itr_rx);
5861 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_TX), sc->sc_itr_tx);
5862 ixl_flush(sc);
5863 }
5864
5865 static void
5866 ixl_config_other_intr(struct ixl_softc *sc)
5867 {
5868 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5869 (void)ixl_rd(sc, I40E_PFINT_ICR0);
5870
5871 ixl_wr(sc, I40E_PFINT_ICR0_ENA,
5872 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
5873 I40E_PFINT_ICR0_ENA_GRST_MASK |
5874 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
5875 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
5876 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
5877 I40E_PFINT_ICR0_ENA_VFLR_MASK |
5878 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
5879 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
5880 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK);
5881
5882 ixl_wr(sc, I40E_PFINT_LNKLST0, 0x7FF);
5883 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_OTHER), 0);
5884 ixl_wr(sc, I40E_PFINT_STAT_CTL0,
5885 (I40E_ITR_INDEX_OTHER <<
5886 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT));
5887 ixl_flush(sc);
5888 }
5889
5890 static int
5891 ixl_setup_interrupts(struct ixl_softc *sc)
5892 {
5893 struct pci_attach_args *pa = &sc->sc_pa;
5894 pci_intr_type_t max_type, intr_type;
5895 int counts[PCI_INTR_TYPE_SIZE];
5896 int error;
5897 unsigned int i;
5898 bool retry;
5899
5900 memset(counts, 0, sizeof(counts));
5901 max_type = PCI_INTR_TYPE_MSIX;
5902 /* QPs + other interrupt */
5903 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueue_pairs_max + 1;
5904 counts[PCI_INTR_TYPE_INTX] = 1;
5905
5906 if (ixl_param_nomsix)
5907 counts[PCI_INTR_TYPE_MSIX] = 0;
5908
5909 do {
5910 retry = false;
5911 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, max_type);
5912 if (error != 0) {
5913 aprint_error_dev(sc->sc_dev,
5914 "couldn't map interrupt\n");
5915 break;
5916 }
5917
5918 intr_type = pci_intr_type(pa->pa_pc, sc->sc_ihp[0]);
5919 sc->sc_nintrs = counts[intr_type];
5920 KASSERT(sc->sc_nintrs > 0);
5921
5922 for (i = 0; i < sc->sc_nintrs; i++) {
5923 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[i],
5924 PCI_INTR_MPSAFE, true);
5925 }
5926
5927 sc->sc_ihs = kmem_zalloc(sizeof(sc->sc_ihs[0]) * sc->sc_nintrs,
5928 KM_SLEEP);
5929
5930 if (intr_type == PCI_INTR_TYPE_MSIX) {
5931 error = ixl_establish_msix(sc);
5932 if (error) {
5933 counts[PCI_INTR_TYPE_MSIX] = 0;
5934 retry = true;
5935 }
5936 } else if (intr_type == PCI_INTR_TYPE_INTX) {
5937 error = ixl_establish_intx(sc);
5938 } else {
5939 error = -1;
5940 }
5941
5942 if (error) {
5943 kmem_free(sc->sc_ihs,
5944 sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5945 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5946 } else {
5947 sc->sc_intrtype = intr_type;
5948 }
5949 } while (retry);
5950
5951 return error;
5952 }
5953
5954 static void
5955 ixl_teardown_interrupts(struct ixl_softc *sc)
5956 {
5957 struct pci_attach_args *pa = &sc->sc_pa;
5958 unsigned int i;
5959
5960 for (i = 0; i < sc->sc_nintrs; i++) {
5961 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]);
5962 }
5963
5964 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5965
5966 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5967 sc->sc_ihs = NULL;
5968 sc->sc_nintrs = 0;
5969 }
5970
5971 static int
5972 ixl_setup_stats(struct ixl_softc *sc)
5973 {
5974 struct ixl_queue_pair *qp;
5975 struct ixl_tx_ring *txr;
5976 struct ixl_rx_ring *rxr;
5977 struct ixl_stats_counters *isc;
5978 unsigned int i;
5979
5980 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5981 qp = &sc->sc_qps[i];
5982 txr = qp->qp_txr;
5983 rxr = qp->qp_rxr;
5984
5985 evcnt_attach_dynamic(&txr->txr_defragged, EVCNT_TYPE_MISC,
5986 NULL, qp->qp_name, "m_defrag successed");
5987 evcnt_attach_dynamic(&txr->txr_defrag_failed, EVCNT_TYPE_MISC,
5988 NULL, qp->qp_name, "m_defrag_failed");
5989 evcnt_attach_dynamic(&txr->txr_pcqdrop, EVCNT_TYPE_MISC,
5990 NULL, qp->qp_name, "Dropped in pcq");
5991 evcnt_attach_dynamic(&txr->txr_transmitdef, EVCNT_TYPE_MISC,
5992 NULL, qp->qp_name, "Deferred transmit");
5993 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR,
5994 NULL, qp->qp_name, "Interrupt on queue");
5995 evcnt_attach_dynamic(&txr->txr_defer, EVCNT_TYPE_MISC,
5996 NULL, qp->qp_name, "Handled queue in softint/workqueue");
5997
5998 evcnt_attach_dynamic(&rxr->rxr_mgethdr_failed, EVCNT_TYPE_MISC,
5999 NULL, qp->qp_name, "MGETHDR failed");
6000 evcnt_attach_dynamic(&rxr->rxr_mgetcl_failed, EVCNT_TYPE_MISC,
6001 NULL, qp->qp_name, "MCLGET failed");
6002 evcnt_attach_dynamic(&rxr->rxr_mbuf_load_failed,
6003 EVCNT_TYPE_MISC, NULL, qp->qp_name,
6004 "bus_dmamap_load_mbuf failed");
6005 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR,
6006 NULL, qp->qp_name, "Interrupt on queue");
6007 evcnt_attach_dynamic(&rxr->rxr_defer, EVCNT_TYPE_MISC,
6008 NULL, qp->qp_name, "Handled queue in softint/workqueue");
6009 }
6010
6011 evcnt_attach_dynamic(&sc->sc_event_atq, EVCNT_TYPE_INTR,
6012 NULL, device_xname(sc->sc_dev), "Interrupt for other events");
6013 evcnt_attach_dynamic(&sc->sc_event_link, EVCNT_TYPE_MISC,
6014 NULL, device_xname(sc->sc_dev), "Link status event");
6015 evcnt_attach_dynamic(&sc->sc_event_ecc_err, EVCNT_TYPE_MISC,
6016 NULL, device_xname(sc->sc_dev), "ECC error");
6017 evcnt_attach_dynamic(&sc->sc_event_pci_exception, EVCNT_TYPE_MISC,
6018 NULL, device_xname(sc->sc_dev), "PCI exception");
6019 evcnt_attach_dynamic(&sc->sc_event_crit_err, EVCNT_TYPE_MISC,
6020 NULL, device_xname(sc->sc_dev), "Critical error");
6021
6022 isc = &sc->sc_stats_counters;
6023 evcnt_attach_dynamic(&isc->isc_crc_errors, EVCNT_TYPE_MISC,
6024 NULL, device_xname(sc->sc_dev), "CRC errors");
6025 evcnt_attach_dynamic(&isc->isc_illegal_bytes, EVCNT_TYPE_MISC,
6026 NULL, device_xname(sc->sc_dev), "Illegal bytes");
6027 evcnt_attach_dynamic(&isc->isc_mac_local_faults, EVCNT_TYPE_MISC,
6028 NULL, device_xname(sc->sc_dev), "Mac local faults");
6029 evcnt_attach_dynamic(&isc->isc_mac_remote_faults, EVCNT_TYPE_MISC,
6030 NULL, device_xname(sc->sc_dev), "Mac remote faults");
6031 evcnt_attach_dynamic(&isc->isc_link_xon_rx, EVCNT_TYPE_MISC,
6032 NULL, device_xname(sc->sc_dev), "Rx xon");
6033 evcnt_attach_dynamic(&isc->isc_link_xon_tx, EVCNT_TYPE_MISC,
6034 NULL, device_xname(sc->sc_dev), "Tx xon");
6035 evcnt_attach_dynamic(&isc->isc_link_xoff_rx, EVCNT_TYPE_MISC,
6036 NULL, device_xname(sc->sc_dev), "Rx xoff");
6037 evcnt_attach_dynamic(&isc->isc_link_xoff_tx, EVCNT_TYPE_MISC,
6038 NULL, device_xname(sc->sc_dev), "Tx xoff");
6039 evcnt_attach_dynamic(&isc->isc_rx_fragments, EVCNT_TYPE_MISC,
6040 NULL, device_xname(sc->sc_dev), "Rx fragments");
6041 evcnt_attach_dynamic(&isc->isc_rx_jabber, EVCNT_TYPE_MISC,
6042 NULL, device_xname(sc->sc_dev), "Rx jabber");
6043
6044 evcnt_attach_dynamic(&isc->isc_rx_size_64, EVCNT_TYPE_MISC,
6045 NULL, device_xname(sc->sc_dev), "Rx size 64");
6046 evcnt_attach_dynamic(&isc->isc_rx_size_127, EVCNT_TYPE_MISC,
6047 NULL, device_xname(sc->sc_dev), "Rx size 127");
6048 evcnt_attach_dynamic(&isc->isc_rx_size_255, EVCNT_TYPE_MISC,
6049 NULL, device_xname(sc->sc_dev), "Rx size 255");
6050 evcnt_attach_dynamic(&isc->isc_rx_size_511, EVCNT_TYPE_MISC,
6051 NULL, device_xname(sc->sc_dev), "Rx size 511");
6052 evcnt_attach_dynamic(&isc->isc_rx_size_1023, EVCNT_TYPE_MISC,
6053 NULL, device_xname(sc->sc_dev), "Rx size 1023");
6054 evcnt_attach_dynamic(&isc->isc_rx_size_1522, EVCNT_TYPE_MISC,
6055 NULL, device_xname(sc->sc_dev), "Rx size 1522");
6056 evcnt_attach_dynamic(&isc->isc_rx_size_big, EVCNT_TYPE_MISC,
6057 NULL, device_xname(sc->sc_dev), "Rx jumbo packets");
6058 evcnt_attach_dynamic(&isc->isc_rx_undersize, EVCNT_TYPE_MISC,
6059 NULL, device_xname(sc->sc_dev), "Rx under size");
6060 evcnt_attach_dynamic(&isc->isc_rx_oversize, EVCNT_TYPE_MISC,
6061 NULL, device_xname(sc->sc_dev), "Rx over size");
6062
6063 evcnt_attach_dynamic(&isc->isc_rx_bytes, EVCNT_TYPE_MISC,
6064 NULL, device_xname(sc->sc_dev), "Rx bytes / port");
6065 evcnt_attach_dynamic(&isc->isc_rx_discards, EVCNT_TYPE_MISC,
6066 NULL, device_xname(sc->sc_dev), "Rx discards / port");
6067 evcnt_attach_dynamic(&isc->isc_rx_unicast, EVCNT_TYPE_MISC,
6068 NULL, device_xname(sc->sc_dev), "Rx unicast / port");
6069 evcnt_attach_dynamic(&isc->isc_rx_multicast, EVCNT_TYPE_MISC,
6070 NULL, device_xname(sc->sc_dev), "Rx multicast / port");
6071 evcnt_attach_dynamic(&isc->isc_rx_broadcast, EVCNT_TYPE_MISC,
6072 NULL, device_xname(sc->sc_dev), "Rx broadcast / port");
6073
6074 evcnt_attach_dynamic(&isc->isc_vsi_rx_bytes, EVCNT_TYPE_MISC,
6075 NULL, device_xname(sc->sc_dev), "Rx bytes / vsi");
6076 evcnt_attach_dynamic(&isc->isc_vsi_rx_discards, EVCNT_TYPE_MISC,
6077 NULL, device_xname(sc->sc_dev), "Rx discards / vsi");
6078 evcnt_attach_dynamic(&isc->isc_vsi_rx_unicast, EVCNT_TYPE_MISC,
6079 NULL, device_xname(sc->sc_dev), "Rx unicast / vsi");
6080 evcnt_attach_dynamic(&isc->isc_vsi_rx_multicast, EVCNT_TYPE_MISC,
6081 NULL, device_xname(sc->sc_dev), "Rx multicast / vsi");
6082 evcnt_attach_dynamic(&isc->isc_vsi_rx_broadcast, EVCNT_TYPE_MISC,
6083 NULL, device_xname(sc->sc_dev), "Rx broadcast / vsi");
6084
6085 evcnt_attach_dynamic(&isc->isc_tx_size_64, EVCNT_TYPE_MISC,
6086 NULL, device_xname(sc->sc_dev), "Tx size 64");
6087 evcnt_attach_dynamic(&isc->isc_tx_size_127, EVCNT_TYPE_MISC,
6088 NULL, device_xname(sc->sc_dev), "Tx size 127");
6089 evcnt_attach_dynamic(&isc->isc_tx_size_255, EVCNT_TYPE_MISC,
6090 NULL, device_xname(sc->sc_dev), "Tx size 255");
6091 evcnt_attach_dynamic(&isc->isc_tx_size_511, EVCNT_TYPE_MISC,
6092 NULL, device_xname(sc->sc_dev), "Tx size 511");
6093 evcnt_attach_dynamic(&isc->isc_tx_size_1023, EVCNT_TYPE_MISC,
6094 NULL, device_xname(sc->sc_dev), "Tx size 1023");
6095 evcnt_attach_dynamic(&isc->isc_tx_size_1522, EVCNT_TYPE_MISC,
6096 NULL, device_xname(sc->sc_dev), "Tx size 1522");
6097 evcnt_attach_dynamic(&isc->isc_tx_size_big, EVCNT_TYPE_MISC,
6098 NULL, device_xname(sc->sc_dev), "Tx jumbo packets");
6099
6100 evcnt_attach_dynamic(&isc->isc_tx_bytes, EVCNT_TYPE_MISC,
6101 NULL, device_xname(sc->sc_dev), "Tx bytes / port");
6102 evcnt_attach_dynamic(&isc->isc_tx_dropped_link_down, EVCNT_TYPE_MISC,
6103 NULL, device_xname(sc->sc_dev),
6104 "Tx dropped due to link down / port");
6105 evcnt_attach_dynamic(&isc->isc_tx_unicast, EVCNT_TYPE_MISC,
6106 NULL, device_xname(sc->sc_dev), "Tx unicast / port");
6107 evcnt_attach_dynamic(&isc->isc_tx_multicast, EVCNT_TYPE_MISC,
6108 NULL, device_xname(sc->sc_dev), "Tx multicast / port");
6109 evcnt_attach_dynamic(&isc->isc_tx_broadcast, EVCNT_TYPE_MISC,
6110 NULL, device_xname(sc->sc_dev), "Tx broadcast / port");
6111
6112 evcnt_attach_dynamic(&isc->isc_vsi_tx_bytes, EVCNT_TYPE_MISC,
6113 NULL, device_xname(sc->sc_dev), "Tx bytes / vsi");
6114 evcnt_attach_dynamic(&isc->isc_vsi_tx_errors, EVCNT_TYPE_MISC,
6115 NULL, device_xname(sc->sc_dev), "Tx errors / vsi");
6116 evcnt_attach_dynamic(&isc->isc_vsi_tx_unicast, EVCNT_TYPE_MISC,
6117 NULL, device_xname(sc->sc_dev), "Tx unicast / vsi");
6118 evcnt_attach_dynamic(&isc->isc_vsi_tx_multicast, EVCNT_TYPE_MISC,
6119 NULL, device_xname(sc->sc_dev), "Tx multicast / vsi");
6120 evcnt_attach_dynamic(&isc->isc_vsi_tx_broadcast, EVCNT_TYPE_MISC,
6121 NULL, device_xname(sc->sc_dev), "Tx broadcast / vsi");
6122
6123 sc->sc_stats_intval = ixl_param_stats_interval;
6124 callout_init(&sc->sc_stats_callout, CALLOUT_MPSAFE);
6125 callout_setfunc(&sc->sc_stats_callout, ixl_stats_callout, sc);
6126 ixl_work_set(&sc->sc_stats_task, ixl_stats_update, sc);
6127
6128 return 0;
6129 }
6130
6131 static void
6132 ixl_teardown_stats(struct ixl_softc *sc)
6133 {
6134 struct ixl_tx_ring *txr;
6135 struct ixl_rx_ring *rxr;
6136 struct ixl_stats_counters *isc;
6137 unsigned int i;
6138
6139 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
6140 txr = sc->sc_qps[i].qp_txr;
6141 rxr = sc->sc_qps[i].qp_rxr;
6142
6143 evcnt_detach(&txr->txr_defragged);
6144 evcnt_detach(&txr->txr_defrag_failed);
6145 evcnt_detach(&txr->txr_pcqdrop);
6146 evcnt_detach(&txr->txr_transmitdef);
6147 evcnt_detach(&txr->txr_intr);
6148 evcnt_detach(&txr->txr_defer);
6149
6150 evcnt_detach(&rxr->rxr_mgethdr_failed);
6151 evcnt_detach(&rxr->rxr_mgetcl_failed);
6152 evcnt_detach(&rxr->rxr_mbuf_load_failed);
6153 evcnt_detach(&rxr->rxr_intr);
6154 evcnt_detach(&rxr->rxr_defer);
6155 }
6156
6157 isc = &sc->sc_stats_counters;
6158 evcnt_detach(&isc->isc_crc_errors);
6159 evcnt_detach(&isc->isc_illegal_bytes);
6160 evcnt_detach(&isc->isc_mac_local_faults);
6161 evcnt_detach(&isc->isc_mac_remote_faults);
6162 evcnt_detach(&isc->isc_link_xon_rx);
6163 evcnt_detach(&isc->isc_link_xon_tx);
6164 evcnt_detach(&isc->isc_link_xoff_rx);
6165 evcnt_detach(&isc->isc_link_xoff_tx);
6166 evcnt_detach(&isc->isc_rx_fragments);
6167 evcnt_detach(&isc->isc_rx_jabber);
6168 evcnt_detach(&isc->isc_rx_bytes);
6169 evcnt_detach(&isc->isc_rx_discards);
6170 evcnt_detach(&isc->isc_rx_unicast);
6171 evcnt_detach(&isc->isc_rx_multicast);
6172 evcnt_detach(&isc->isc_rx_broadcast);
6173 evcnt_detach(&isc->isc_rx_size_64);
6174 evcnt_detach(&isc->isc_rx_size_127);
6175 evcnt_detach(&isc->isc_rx_size_255);
6176 evcnt_detach(&isc->isc_rx_size_511);
6177 evcnt_detach(&isc->isc_rx_size_1023);
6178 evcnt_detach(&isc->isc_rx_size_1522);
6179 evcnt_detach(&isc->isc_rx_size_big);
6180 evcnt_detach(&isc->isc_rx_undersize);
6181 evcnt_detach(&isc->isc_rx_oversize);
6182 evcnt_detach(&isc->isc_tx_bytes);
6183 evcnt_detach(&isc->isc_tx_dropped_link_down);
6184 evcnt_detach(&isc->isc_tx_unicast);
6185 evcnt_detach(&isc->isc_tx_multicast);
6186 evcnt_detach(&isc->isc_tx_broadcast);
6187 evcnt_detach(&isc->isc_tx_size_64);
6188 evcnt_detach(&isc->isc_tx_size_127);
6189 evcnt_detach(&isc->isc_tx_size_255);
6190 evcnt_detach(&isc->isc_tx_size_511);
6191 evcnt_detach(&isc->isc_tx_size_1023);
6192 evcnt_detach(&isc->isc_tx_size_1522);
6193 evcnt_detach(&isc->isc_tx_size_big);
6194 evcnt_detach(&isc->isc_vsi_rx_discards);
6195 evcnt_detach(&isc->isc_vsi_rx_bytes);
6196 evcnt_detach(&isc->isc_vsi_rx_unicast);
6197 evcnt_detach(&isc->isc_vsi_rx_multicast);
6198 evcnt_detach(&isc->isc_vsi_rx_broadcast);
6199 evcnt_detach(&isc->isc_vsi_tx_errors);
6200 evcnt_detach(&isc->isc_vsi_tx_bytes);
6201 evcnt_detach(&isc->isc_vsi_tx_unicast);
6202 evcnt_detach(&isc->isc_vsi_tx_multicast);
6203 evcnt_detach(&isc->isc_vsi_tx_broadcast);
6204
6205 evcnt_detach(&sc->sc_event_atq);
6206 evcnt_detach(&sc->sc_event_link);
6207 evcnt_detach(&sc->sc_event_ecc_err);
6208 evcnt_detach(&sc->sc_event_pci_exception);
6209 evcnt_detach(&sc->sc_event_crit_err);
6210
6211 callout_destroy(&sc->sc_stats_callout);
6212 }
6213
6214 static void
6215 ixl_stats_callout(void *xsc)
6216 {
6217 struct ixl_softc *sc = xsc;
6218
6219 ixl_work_add(sc->sc_workq, &sc->sc_stats_task);
6220 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
6221 }
6222
6223 static uint64_t
6224 ixl_stat_delta(struct ixl_softc *sc, uint32_t reg_hi, uint32_t reg_lo,
6225 uint64_t *offset, bool has_offset)
6226 {
6227 uint64_t value, delta;
6228 int bitwidth;
6229
6230 bitwidth = reg_hi == 0 ? 32 : 48;
6231
6232 value = ixl_rd(sc, reg_lo);
6233
6234 if (bitwidth > 32) {
6235 value |= ((uint64_t)ixl_rd(sc, reg_hi) << 32);
6236 }
6237
6238 if (__predict_true(has_offset)) {
6239 delta = value;
6240 if (value < *offset)
6241 delta += ((uint64_t)1 << bitwidth);
6242 delta -= *offset;
6243 } else {
6244 delta = 0;
6245 }
6246 atomic_swap_64(offset, value);
6247
6248 return delta;
6249 }
6250
6251 static void
6252 ixl_stats_update(void *xsc)
6253 {
6254 struct ixl_softc *sc = xsc;
6255 struct ixl_stats_counters *isc;
6256 uint64_t delta;
6257
6258 isc = &sc->sc_stats_counters;
6259
6260 /* errors */
6261 delta = ixl_stat_delta(sc,
6262 0, I40E_GLPRT_CRCERRS(sc->sc_port),
6263 &isc->isc_crc_errors_offset, isc->isc_has_offset);
6264 atomic_add_64(&isc->isc_crc_errors.ev_count, delta);
6265
6266 delta = ixl_stat_delta(sc,
6267 0, I40E_GLPRT_ILLERRC(sc->sc_port),
6268 &isc->isc_illegal_bytes_offset, isc->isc_has_offset);
6269 atomic_add_64(&isc->isc_illegal_bytes.ev_count, delta);
6270
6271 /* rx */
6272 delta = ixl_stat_delta(sc,
6273 I40E_GLPRT_GORCH(sc->sc_port), I40E_GLPRT_GORCL(sc->sc_port),
6274 &isc->isc_rx_bytes_offset, isc->isc_has_offset);
6275 atomic_add_64(&isc->isc_rx_bytes.ev_count, delta);
6276
6277 delta = ixl_stat_delta(sc,
6278 0, I40E_GLPRT_RDPC(sc->sc_port),
6279 &isc->isc_rx_discards_offset, isc->isc_has_offset);
6280 atomic_add_64(&isc->isc_rx_discards.ev_count, delta);
6281
6282 delta = ixl_stat_delta(sc,
6283 I40E_GLPRT_UPRCH(sc->sc_port), I40E_GLPRT_UPRCL(sc->sc_port),
6284 &isc->isc_rx_unicast_offset, isc->isc_has_offset);
6285 atomic_add_64(&isc->isc_rx_unicast.ev_count, delta);
6286
6287 delta = ixl_stat_delta(sc,
6288 I40E_GLPRT_MPRCH(sc->sc_port), I40E_GLPRT_MPRCL(sc->sc_port),
6289 &isc->isc_rx_multicast_offset, isc->isc_has_offset);
6290 atomic_add_64(&isc->isc_rx_multicast.ev_count, delta);
6291
6292 delta = ixl_stat_delta(sc,
6293 I40E_GLPRT_BPRCH(sc->sc_port), I40E_GLPRT_BPRCL(sc->sc_port),
6294 &isc->isc_rx_broadcast_offset, isc->isc_has_offset);
6295 atomic_add_64(&isc->isc_rx_broadcast.ev_count, delta);
6296
6297 /* Packet size stats rx */
6298 delta = ixl_stat_delta(sc,
6299 I40E_GLPRT_PRC64H(sc->sc_port), I40E_GLPRT_PRC64L(sc->sc_port),
6300 &isc->isc_rx_size_64_offset, isc->isc_has_offset);
6301 atomic_add_64(&isc->isc_rx_size_64.ev_count, delta);
6302
6303 delta = ixl_stat_delta(sc,
6304 I40E_GLPRT_PRC127H(sc->sc_port), I40E_GLPRT_PRC127L(sc->sc_port),
6305 &isc->isc_rx_size_127_offset, isc->isc_has_offset);
6306 atomic_add_64(&isc->isc_rx_size_127.ev_count, delta);
6307
6308 delta = ixl_stat_delta(sc,
6309 I40E_GLPRT_PRC255H(sc->sc_port), I40E_GLPRT_PRC255L(sc->sc_port),
6310 &isc->isc_rx_size_255_offset, isc->isc_has_offset);
6311 atomic_add_64(&isc->isc_rx_size_255.ev_count, delta);
6312
6313 delta = ixl_stat_delta(sc,
6314 I40E_GLPRT_PRC511H(sc->sc_port), I40E_GLPRT_PRC511L(sc->sc_port),
6315 &isc->isc_rx_size_511_offset, isc->isc_has_offset);
6316 atomic_add_64(&isc->isc_rx_size_511.ev_count, delta);
6317
6318 delta = ixl_stat_delta(sc,
6319 I40E_GLPRT_PRC1023H(sc->sc_port), I40E_GLPRT_PRC1023L(sc->sc_port),
6320 &isc->isc_rx_size_1023_offset, isc->isc_has_offset);
6321 atomic_add_64(&isc->isc_rx_size_1023.ev_count, delta);
6322
6323 delta = ixl_stat_delta(sc,
6324 I40E_GLPRT_PRC1522H(sc->sc_port), I40E_GLPRT_PRC1522L(sc->sc_port),
6325 &isc->isc_rx_size_1522_offset, isc->isc_has_offset);
6326 atomic_add_64(&isc->isc_rx_size_1522.ev_count, delta);
6327
6328 delta = ixl_stat_delta(sc,
6329 I40E_GLPRT_PRC9522H(sc->sc_port), I40E_GLPRT_PRC9522L(sc->sc_port),
6330 &isc->isc_rx_size_big_offset, isc->isc_has_offset);
6331 atomic_add_64(&isc->isc_rx_size_big.ev_count, delta);
6332
6333 delta = ixl_stat_delta(sc,
6334 0, I40E_GLPRT_RUC(sc->sc_port),
6335 &isc->isc_rx_undersize_offset, isc->isc_has_offset);
6336 atomic_add_64(&isc->isc_rx_undersize.ev_count, delta);
6337
6338 delta = ixl_stat_delta(sc,
6339 0, I40E_GLPRT_ROC(sc->sc_port),
6340 &isc->isc_rx_oversize_offset, isc->isc_has_offset);
6341 atomic_add_64(&isc->isc_rx_oversize.ev_count, delta);
6342
6343 /* tx */
6344 delta = ixl_stat_delta(sc,
6345 I40E_GLPRT_GOTCH(sc->sc_port), I40E_GLPRT_GOTCL(sc->sc_port),
6346 &isc->isc_tx_bytes_offset, isc->isc_has_offset);
6347 atomic_add_64(&isc->isc_tx_bytes.ev_count, delta);
6348
6349 delta = ixl_stat_delta(sc,
6350 0, I40E_GLPRT_TDOLD(sc->sc_port),
6351 &isc->isc_tx_dropped_link_down_offset, isc->isc_has_offset);
6352 atomic_add_64(&isc->isc_tx_dropped_link_down.ev_count, delta);
6353
6354 delta = ixl_stat_delta(sc,
6355 I40E_GLPRT_UPTCH(sc->sc_port), I40E_GLPRT_UPTCL(sc->sc_port),
6356 &isc->isc_tx_unicast_offset, isc->isc_has_offset);
6357 atomic_add_64(&isc->isc_tx_unicast.ev_count, delta);
6358
6359 delta = ixl_stat_delta(sc,
6360 I40E_GLPRT_MPTCH(sc->sc_port), I40E_GLPRT_MPTCL(sc->sc_port),
6361 &isc->isc_tx_multicast_offset, isc->isc_has_offset);
6362 atomic_add_64(&isc->isc_tx_multicast.ev_count, delta);
6363
6364 delta = ixl_stat_delta(sc,
6365 I40E_GLPRT_BPTCH(sc->sc_port), I40E_GLPRT_BPTCL(sc->sc_port),
6366 &isc->isc_tx_broadcast_offset, isc->isc_has_offset);
6367 atomic_add_64(&isc->isc_tx_broadcast.ev_count, delta);
6368
6369 /* Packet size stats tx */
6370 delta = ixl_stat_delta(sc,
6371 I40E_GLPRT_PTC64L(sc->sc_port), I40E_GLPRT_PTC64L(sc->sc_port),
6372 &isc->isc_tx_size_64_offset, isc->isc_has_offset);
6373 atomic_add_64(&isc->isc_tx_size_64.ev_count, delta);
6374
6375 delta = ixl_stat_delta(sc,
6376 I40E_GLPRT_PTC127H(sc->sc_port), I40E_GLPRT_PTC127L(sc->sc_port),
6377 &isc->isc_tx_size_127_offset, isc->isc_has_offset);
6378 atomic_add_64(&isc->isc_tx_size_127.ev_count, delta);
6379
6380 delta = ixl_stat_delta(sc,
6381 I40E_GLPRT_PTC255H(sc->sc_port), I40E_GLPRT_PTC255L(sc->sc_port),
6382 &isc->isc_tx_size_255_offset, isc->isc_has_offset);
6383 atomic_add_64(&isc->isc_tx_size_255.ev_count, delta);
6384
6385 delta = ixl_stat_delta(sc,
6386 I40E_GLPRT_PTC511H(sc->sc_port), I40E_GLPRT_PTC511L(sc->sc_port),
6387 &isc->isc_tx_size_511_offset, isc->isc_has_offset);
6388 atomic_add_64(&isc->isc_tx_size_511.ev_count, delta);
6389
6390 delta = ixl_stat_delta(sc,
6391 I40E_GLPRT_PTC1023H(sc->sc_port), I40E_GLPRT_PTC1023L(sc->sc_port),
6392 &isc->isc_tx_size_1023_offset, isc->isc_has_offset);
6393 atomic_add_64(&isc->isc_tx_size_1023.ev_count, delta);
6394
6395 delta = ixl_stat_delta(sc,
6396 I40E_GLPRT_PTC1522H(sc->sc_port), I40E_GLPRT_PTC1522L(sc->sc_port),
6397 &isc->isc_tx_size_1522_offset, isc->isc_has_offset);
6398 atomic_add_64(&isc->isc_tx_size_1522.ev_count, delta);
6399
6400 delta = ixl_stat_delta(sc,
6401 I40E_GLPRT_PTC9522H(sc->sc_port), I40E_GLPRT_PTC9522L(sc->sc_port),
6402 &isc->isc_tx_size_big_offset, isc->isc_has_offset);
6403 atomic_add_64(&isc->isc_tx_size_big.ev_count, delta);
6404
6405 /* mac faults */
6406 delta = ixl_stat_delta(sc,
6407 0, I40E_GLPRT_MLFC(sc->sc_port),
6408 &isc->isc_mac_local_faults_offset, isc->isc_has_offset);
6409 atomic_add_64(&isc->isc_mac_local_faults.ev_count, delta);
6410
6411 delta = ixl_stat_delta(sc,
6412 0, I40E_GLPRT_MRFC(sc->sc_port),
6413 &isc->isc_mac_remote_faults_offset, isc->isc_has_offset);
6414 atomic_add_64(&isc->isc_mac_remote_faults.ev_count, delta);
6415
6416 /* Flow control (LFC) stats */
6417 delta = ixl_stat_delta(sc,
6418 0, I40E_GLPRT_LXONRXC(sc->sc_port),
6419 &isc->isc_link_xon_rx_offset, isc->isc_has_offset);
6420 atomic_add_64(&isc->isc_link_xon_rx.ev_count, delta);
6421
6422 delta = ixl_stat_delta(sc,
6423 0, I40E_GLPRT_LXONTXC(sc->sc_port),
6424 &isc->isc_link_xon_tx_offset, isc->isc_has_offset);
6425 atomic_add_64(&isc->isc_link_xon_tx.ev_count, delta);
6426
6427 delta = ixl_stat_delta(sc,
6428 0, I40E_GLPRT_LXOFFRXC(sc->sc_port),
6429 &isc->isc_link_xoff_rx_offset, isc->isc_has_offset);
6430 atomic_add_64(&isc->isc_link_xoff_rx.ev_count, delta);
6431
6432 delta = ixl_stat_delta(sc,
6433 0, I40E_GLPRT_LXOFFTXC(sc->sc_port),
6434 &isc->isc_link_xoff_tx_offset, isc->isc_has_offset);
6435 atomic_add_64(&isc->isc_link_xoff_tx.ev_count, delta);
6436
6437 /* fragments */
6438 delta = ixl_stat_delta(sc,
6439 0, I40E_GLPRT_RFC(sc->sc_port),
6440 &isc->isc_rx_fragments_offset, isc->isc_has_offset);
6441 atomic_add_64(&isc->isc_rx_fragments.ev_count, delta);
6442
6443 delta = ixl_stat_delta(sc,
6444 0, I40E_GLPRT_RJC(sc->sc_port),
6445 &isc->isc_rx_jabber_offset, isc->isc_has_offset);
6446 atomic_add_64(&isc->isc_rx_jabber.ev_count, delta);
6447
6448 /* VSI rx counters */
6449 delta = ixl_stat_delta(sc,
6450 0, I40E_GLV_RDPC(sc->sc_vsi_stat_counter_idx),
6451 &isc->isc_vsi_rx_discards_offset, isc->isc_has_offset);
6452 atomic_add_64(&isc->isc_vsi_rx_discards.ev_count, delta);
6453
6454 delta = ixl_stat_delta(sc,
6455 I40E_GLV_GORCH(sc->sc_vsi_stat_counter_idx),
6456 I40E_GLV_GORCL(sc->sc_vsi_stat_counter_idx),
6457 &isc->isc_vsi_rx_bytes_offset, isc->isc_has_offset);
6458 atomic_add_64(&isc->isc_vsi_rx_bytes.ev_count, delta);
6459
6460 delta = ixl_stat_delta(sc,
6461 I40E_GLV_UPRCH(sc->sc_vsi_stat_counter_idx),
6462 I40E_GLV_UPRCL(sc->sc_vsi_stat_counter_idx),
6463 &isc->isc_vsi_rx_unicast_offset, isc->isc_has_offset);
6464 atomic_add_64(&isc->isc_vsi_rx_unicast.ev_count, delta);
6465
6466 delta = ixl_stat_delta(sc,
6467 I40E_GLV_MPRCH(sc->sc_vsi_stat_counter_idx),
6468 I40E_GLV_MPRCL(sc->sc_vsi_stat_counter_idx),
6469 &isc->isc_vsi_rx_multicast_offset, isc->isc_has_offset);
6470 atomic_add_64(&isc->isc_vsi_rx_multicast.ev_count, delta);
6471
6472 delta = ixl_stat_delta(sc,
6473 I40E_GLV_BPRCH(sc->sc_vsi_stat_counter_idx),
6474 I40E_GLV_BPRCL(sc->sc_vsi_stat_counter_idx),
6475 &isc->isc_vsi_rx_broadcast_offset, isc->isc_has_offset);
6476 atomic_add_64(&isc->isc_vsi_rx_broadcast.ev_count, delta);
6477
6478 /* VSI tx counters */
6479 delta = ixl_stat_delta(sc,
6480 0, I40E_GLV_TEPC(sc->sc_vsi_stat_counter_idx),
6481 &isc->isc_vsi_tx_errors_offset, isc->isc_has_offset);
6482 atomic_add_64(&isc->isc_vsi_tx_errors.ev_count, delta);
6483
6484 delta = ixl_stat_delta(sc,
6485 I40E_GLV_GOTCH(sc->sc_vsi_stat_counter_idx),
6486 I40E_GLV_GOTCL(sc->sc_vsi_stat_counter_idx),
6487 &isc->isc_vsi_tx_bytes_offset, isc->isc_has_offset);
6488 atomic_add_64(&isc->isc_vsi_tx_bytes.ev_count, delta);
6489
6490 delta = ixl_stat_delta(sc,
6491 I40E_GLV_UPTCH(sc->sc_vsi_stat_counter_idx),
6492 I40E_GLV_UPTCL(sc->sc_vsi_stat_counter_idx),
6493 &isc->isc_vsi_tx_unicast_offset, isc->isc_has_offset);
6494 atomic_add_64(&isc->isc_vsi_tx_unicast.ev_count, delta);
6495
6496 delta = ixl_stat_delta(sc,
6497 I40E_GLV_MPTCH(sc->sc_vsi_stat_counter_idx),
6498 I40E_GLV_MPTCL(sc->sc_vsi_stat_counter_idx),
6499 &isc->isc_vsi_tx_multicast_offset, isc->isc_has_offset);
6500 atomic_add_64(&isc->isc_vsi_tx_multicast.ev_count, delta);
6501
6502 delta = ixl_stat_delta(sc,
6503 I40E_GLV_BPTCH(sc->sc_vsi_stat_counter_idx),
6504 I40E_GLV_BPTCL(sc->sc_vsi_stat_counter_idx),
6505 &isc->isc_vsi_tx_broadcast_offset, isc->isc_has_offset);
6506 atomic_add_64(&isc->isc_vsi_tx_broadcast.ev_count, delta);
6507 }
6508
6509 static int
6510 ixl_setup_sysctls(struct ixl_softc *sc)
6511 {
6512 const char *devname;
6513 struct sysctllog **log;
6514 const struct sysctlnode *rnode, *rxnode, *txnode;
6515 int error;
6516
6517 log = &sc->sc_sysctllog;
6518 devname = device_xname(sc->sc_dev);
6519
6520 error = sysctl_createv(log, 0, NULL, &rnode,
6521 0, CTLTYPE_NODE, devname,
6522 SYSCTL_DESCR("ixl information and settings"),
6523 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
6524 if (error)
6525 goto out;
6526
6527 error = sysctl_createv(log, 0, &rnode, NULL,
6528 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
6529 SYSCTL_DESCR("Use workqueue for packet processing"),
6530 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
6531 if (error)
6532 goto out;
6533
6534 error = sysctl_createv(log, 0, &rnode, NULL,
6535 CTLFLAG_READONLY, CTLTYPE_INT, "stats_interval",
6536 SYSCTL_DESCR("Statistics collection interval in milliseconds"),
6537 NULL, 0, &sc->sc_stats_intval, 0, CTL_CREATE, CTL_EOL);
6538
6539 error = sysctl_createv(log, 0, &rnode, &rxnode,
6540 0, CTLTYPE_NODE, "rx",
6541 SYSCTL_DESCR("ixl information and settings for Rx"),
6542 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6543 if (error)
6544 goto out;
6545
6546 error = sysctl_createv(log, 0, &rxnode, NULL,
6547 CTLFLAG_READWRITE, CTLTYPE_INT, "itr",
6548 SYSCTL_DESCR("Interrupt Throttling"),
6549 ixl_sysctl_itr_handler, 0,
6550 (void *)sc, 0, CTL_CREATE, CTL_EOL);
6551 if (error)
6552 goto out;
6553
6554 error = sysctl_createv(log, 0, &rxnode, NULL,
6555 CTLFLAG_READONLY, CTLTYPE_INT, "descriptor_num",
6556 SYSCTL_DESCR("the number of rx descriptors"),
6557 NULL, 0, &sc->sc_rx_ring_ndescs, 0, CTL_CREATE, CTL_EOL);
6558 if (error)
6559 goto out;
6560
6561 error = sysctl_createv(log, 0, &rxnode, NULL,
6562 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6563 SYSCTL_DESCR("max number of Rx packets"
6564 " to process for interrupt processing"),
6565 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6566 if (error)
6567 goto out;
6568
6569 error = sysctl_createv(log, 0, &rxnode, NULL,
6570 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6571 SYSCTL_DESCR("max number of Rx packets"
6572 " to process for deferred processing"),
6573 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
6574 if (error)
6575 goto out;
6576
6577 error = sysctl_createv(log, 0, &rnode, &txnode,
6578 0, CTLTYPE_NODE, "tx",
6579 SYSCTL_DESCR("ixl information and settings for Tx"),
6580 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6581 if (error)
6582 goto out;
6583
6584 error = sysctl_createv(log, 0, &txnode, NULL,
6585 CTLFLAG_READWRITE, CTLTYPE_INT, "itr",
6586 SYSCTL_DESCR("Interrupt Throttling"),
6587 ixl_sysctl_itr_handler, 0,
6588 (void *)sc, 0, CTL_CREATE, CTL_EOL);
6589 if (error)
6590 goto out;
6591
6592 error = sysctl_createv(log, 0, &txnode, NULL,
6593 CTLFLAG_READONLY, CTLTYPE_INT, "descriptor_num",
6594 SYSCTL_DESCR("the number of tx descriptors"),
6595 NULL, 0, &sc->sc_tx_ring_ndescs, 0, CTL_CREATE, CTL_EOL);
6596 if (error)
6597 goto out;
6598
6599 error = sysctl_createv(log, 0, &txnode, NULL,
6600 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6601 SYSCTL_DESCR("max number of Tx packets"
6602 " to process for interrupt processing"),
6603 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6604 if (error)
6605 goto out;
6606
6607 error = sysctl_createv(log, 0, &txnode, NULL,
6608 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6609 SYSCTL_DESCR("max number of Tx packets"
6610 " to process for deferred processing"),
6611 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
6612 if (error)
6613 goto out;
6614
6615 out:
6616 if (error) {
6617 aprint_error_dev(sc->sc_dev,
6618 "unable to create sysctl node\n");
6619 sysctl_teardown(log);
6620 }
6621
6622 return error;
6623 }
6624
6625 static void
6626 ixl_teardown_sysctls(struct ixl_softc *sc)
6627 {
6628
6629 sysctl_teardown(&sc->sc_sysctllog);
6630 }
6631
6632 static bool
6633 ixl_sysctlnode_is_rx(struct sysctlnode *node)
6634 {
6635
6636 if (strstr(node->sysctl_parent->sysctl_name, "rx") != NULL)
6637 return true;
6638
6639 return false;
6640 }
6641
6642 static int
6643 ixl_sysctl_itr_handler(SYSCTLFN_ARGS)
6644 {
6645 struct sysctlnode node = *rnode;
6646 struct ixl_softc *sc = (struct ixl_softc *)node.sysctl_data;
6647 struct ifnet *ifp = &sc->sc_ec.ec_if;
6648 uint32_t newitr, *itrptr;
6649 int error;
6650
6651 if (ixl_sysctlnode_is_rx(&node)) {
6652 itrptr = &sc->sc_itr_rx;
6653 } else {
6654 itrptr = &sc->sc_itr_tx;
6655 }
6656
6657 newitr = *itrptr;
6658 node.sysctl_data = &newitr;
6659 node.sysctl_size = sizeof(newitr);
6660
6661 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6662
6663 if (error || newp == NULL)
6664 return error;
6665
6666 /* ITRs are applied in ixl_init() for simple implementaion */
6667 if (ISSET(ifp->if_flags, IFF_RUNNING))
6668 return EBUSY;
6669
6670 if (newitr > 0x07ff)
6671 return EINVAL;
6672
6673 *itrptr = newitr;
6674
6675 return 0;
6676 }
6677
6678 static struct workqueue *
6679 ixl_workq_create(const char *name, pri_t prio, int ipl, int flags)
6680 {
6681 struct workqueue *wq;
6682 int error;
6683
6684 error = workqueue_create(&wq, name, ixl_workq_work, NULL,
6685 prio, ipl, flags);
6686
6687 if (error)
6688 return NULL;
6689
6690 return wq;
6691 }
6692
6693 static void
6694 ixl_workq_destroy(struct workqueue *wq)
6695 {
6696
6697 workqueue_destroy(wq);
6698 }
6699
6700 static void
6701 ixl_work_set(struct ixl_work *work, void (*func)(void *), void *arg)
6702 {
6703
6704 memset(work, 0, sizeof(*work));
6705 work->ixw_func = func;
6706 work->ixw_arg = arg;
6707 }
6708
6709 static void
6710 ixl_work_add(struct workqueue *wq, struct ixl_work *work)
6711 {
6712 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0)
6713 return;
6714
6715 kpreempt_disable();
6716 workqueue_enqueue(wq, &work->ixw_cookie, NULL);
6717 kpreempt_enable();
6718 }
6719
6720 static void
6721 ixl_work_wait(struct workqueue *wq, struct ixl_work *work)
6722 {
6723
6724 workqueue_wait(wq, &work->ixw_cookie);
6725 }
6726
6727 static void
6728 ixl_workq_work(struct work *wk, void *context)
6729 {
6730 struct ixl_work *work;
6731
6732 work = container_of(wk, struct ixl_work, ixw_cookie);
6733
6734 atomic_swap_uint(&work->ixw_added, 0);
6735 work->ixw_func(work->ixw_arg);
6736 }
6737
6738 static int
6739 ixl_rx_ctl_read(struct ixl_softc *sc, uint32_t reg, uint32_t *rv)
6740 {
6741 struct ixl_aq_desc iaq;
6742
6743 memset(&iaq, 0, sizeof(iaq));
6744 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_READ);
6745 iaq.iaq_param[1] = htole32(reg);
6746
6747 if (ixl_atq_poll(sc, &iaq, 250) != 0)
6748 return ETIMEDOUT;
6749
6750 switch (htole16(iaq.iaq_retval)) {
6751 case IXL_AQ_RC_OK:
6752 /* success */
6753 break;
6754 case IXL_AQ_RC_EACCES:
6755 return EPERM;
6756 case IXL_AQ_RC_EAGAIN:
6757 return EAGAIN;
6758 default:
6759 return EIO;
6760 }
6761
6762 *rv = htole32(iaq.iaq_param[3]);
6763 return 0;
6764 }
6765
6766 static uint32_t
6767 ixl_rd_rx_csr(struct ixl_softc *sc, uint32_t reg)
6768 {
6769 uint32_t val;
6770 int rv, retry, retry_limit;
6771
6772 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) {
6773 retry_limit = 5;
6774 } else {
6775 retry_limit = 0;
6776 }
6777
6778 for (retry = 0; retry < retry_limit; retry++) {
6779 rv = ixl_rx_ctl_read(sc, reg, &val);
6780 if (rv == 0)
6781 return val;
6782 else if (rv == EAGAIN)
6783 delaymsec(1);
6784 else
6785 break;
6786 }
6787
6788 val = ixl_rd(sc, reg);
6789
6790 return val;
6791 }
6792
6793 static int
6794 ixl_rx_ctl_write(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6795 {
6796 struct ixl_aq_desc iaq;
6797
6798 memset(&iaq, 0, sizeof(iaq));
6799 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_WRITE);
6800 iaq.iaq_param[1] = htole32(reg);
6801 iaq.iaq_param[3] = htole32(value);
6802
6803 if (ixl_atq_poll(sc, &iaq, 250) != 0)
6804 return ETIMEDOUT;
6805
6806 switch (htole16(iaq.iaq_retval)) {
6807 case IXL_AQ_RC_OK:
6808 /* success */
6809 break;
6810 case IXL_AQ_RC_EACCES:
6811 return EPERM;
6812 case IXL_AQ_RC_EAGAIN:
6813 return EAGAIN;
6814 default:
6815 return EIO;
6816 }
6817
6818 return 0;
6819 }
6820
6821 static void
6822 ixl_wr_rx_csr(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6823 {
6824 int rv, retry, retry_limit;
6825
6826 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) {
6827 retry_limit = 5;
6828 } else {
6829 retry_limit = 0;
6830 }
6831
6832 for (retry = 0; retry < retry_limit; retry++) {
6833 rv = ixl_rx_ctl_write(sc, reg, value);
6834 if (rv == 0)
6835 return;
6836 else if (rv == EAGAIN)
6837 delaymsec(1);
6838 else
6839 break;
6840 }
6841
6842 ixl_wr(sc, reg, value);
6843 }
6844
6845 static int
6846 ixl_nvm_lock(struct ixl_softc *sc, char rw)
6847 {
6848 struct ixl_aq_desc iaq;
6849 struct ixl_aq_req_resource_param *param;
6850 int rv;
6851
6852 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK))
6853 return 0;
6854
6855 memset(&iaq, 0, sizeof(iaq));
6856 iaq.iaq_opcode = htole16(IXL_AQ_OP_REQUEST_RESOURCE);
6857
6858 param = (struct ixl_aq_req_resource_param *)&iaq.iaq_param;
6859 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM);
6860 if (rw == 'R') {
6861 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_READ);
6862 } else {
6863 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_WRITE);
6864 }
6865
6866 rv = ixl_atq_poll(sc, &iaq, 250);
6867
6868 if (rv != 0)
6869 return ETIMEDOUT;
6870
6871 switch (le16toh(iaq.iaq_retval)) {
6872 case IXL_AQ_RC_OK:
6873 break;
6874 case IXL_AQ_RC_EACCES:
6875 return EACCES;
6876 case IXL_AQ_RC_EBUSY:
6877 return EBUSY;
6878 case IXL_AQ_RC_EPERM:
6879 return EPERM;
6880 }
6881
6882 return 0;
6883 }
6884
6885 static int
6886 ixl_nvm_unlock(struct ixl_softc *sc)
6887 {
6888 struct ixl_aq_desc iaq;
6889 struct ixl_aq_rel_resource_param *param;
6890 int rv;
6891
6892 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK))
6893 return 0;
6894
6895 memset(&iaq, 0, sizeof(iaq));
6896 iaq.iaq_opcode = htole16(IXL_AQ_OP_RELEASE_RESOURCE);
6897
6898 param = (struct ixl_aq_rel_resource_param *)&iaq.iaq_param;
6899 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM);
6900
6901 rv = ixl_atq_poll(sc, &iaq, 250);
6902
6903 if (rv != 0)
6904 return ETIMEDOUT;
6905
6906 switch (le16toh(iaq.iaq_retval)) {
6907 case IXL_AQ_RC_OK:
6908 break;
6909 default:
6910 return EIO;
6911 }
6912 return 0;
6913 }
6914
6915 static int
6916 ixl_srdone_poll(struct ixl_softc *sc)
6917 {
6918 int wait_count;
6919 uint32_t reg;
6920
6921 for (wait_count = 0; wait_count < IXL_SRRD_SRCTL_ATTEMPTS;
6922 wait_count++) {
6923 reg = ixl_rd(sc, I40E_GLNVM_SRCTL);
6924 if (ISSET(reg, I40E_GLNVM_SRCTL_DONE_MASK))
6925 break;
6926
6927 delaymsec(5);
6928 }
6929
6930 if (wait_count == IXL_SRRD_SRCTL_ATTEMPTS)
6931 return -1;
6932
6933 return 0;
6934 }
6935
6936 static int
6937 ixl_nvm_read_srctl(struct ixl_softc *sc, uint16_t offset, uint16_t *data)
6938 {
6939 uint32_t reg;
6940
6941 if (ixl_srdone_poll(sc) != 0)
6942 return ETIMEDOUT;
6943
6944 reg = ((uint32_t)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
6945 __BIT(I40E_GLNVM_SRCTL_START_SHIFT);
6946 ixl_wr(sc, I40E_GLNVM_SRCTL, reg);
6947
6948 if (ixl_srdone_poll(sc) != 0) {
6949 aprint_debug("NVM read error: couldn't access "
6950 "Shadow RAM address: 0x%x\n", offset);
6951 return ETIMEDOUT;
6952 }
6953
6954 reg = ixl_rd(sc, I40E_GLNVM_SRDATA);
6955 *data = (uint16_t)__SHIFTOUT(reg, I40E_GLNVM_SRDATA_RDDATA_MASK);
6956
6957 return 0;
6958 }
6959
6960 static int
6961 ixl_nvm_read_aq(struct ixl_softc *sc, uint16_t offset_word,
6962 void *data, size_t len)
6963 {
6964 struct ixl_dmamem *idm;
6965 struct ixl_aq_desc iaq;
6966 struct ixl_aq_nvm_param *param;
6967 uint32_t offset_bytes;
6968 int rv;
6969
6970 idm = &sc->sc_aqbuf;
6971 if (len > IXL_DMA_LEN(idm))
6972 return ENOMEM;
6973
6974 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
6975 memset(&iaq, 0, sizeof(iaq));
6976 iaq.iaq_opcode = htole16(IXL_AQ_OP_NVM_READ);
6977 iaq.iaq_flags = htole16(IXL_AQ_BUF |
6978 ((len > I40E_AQ_LARGE_BUF) ? IXL_AQ_LB : 0));
6979 iaq.iaq_datalen = htole16(len);
6980 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
6981
6982 param = (struct ixl_aq_nvm_param *)iaq.iaq_param;
6983 param->command_flags = IXL_AQ_NVM_LAST_CMD;
6984 param->module_pointer = 0;
6985 param->length = htole16(len);
6986 offset_bytes = (uint32_t)offset_word * 2;
6987 offset_bytes &= 0x00FFFFFF;
6988 param->offset = htole32(offset_bytes);
6989
6990 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
6991 BUS_DMASYNC_PREREAD);
6992
6993 rv = ixl_atq_poll(sc, &iaq, 250);
6994
6995 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
6996 BUS_DMASYNC_POSTREAD);
6997
6998 if (rv != 0) {
6999 return ETIMEDOUT;
7000 }
7001
7002 switch (le16toh(iaq.iaq_retval)) {
7003 case IXL_AQ_RC_OK:
7004 break;
7005 case IXL_AQ_RC_EPERM:
7006 return EPERM;
7007 case IXL_AQ_RC_EINVAL:
7008 return EINVAL;
7009 case IXL_AQ_RC_EBUSY:
7010 return EBUSY;
7011 case IXL_AQ_RC_EIO:
7012 default:
7013 return EIO;
7014 }
7015
7016 memcpy(data, IXL_DMA_KVA(idm), len);
7017
7018 return 0;
7019 }
7020
7021 static int
7022 ixl_rd16_nvm(struct ixl_softc *sc, uint16_t offset, uint16_t *data)
7023 {
7024 int error;
7025 uint16_t buf;
7026
7027 error = ixl_nvm_lock(sc, 'R');
7028 if (error)
7029 return error;
7030
7031 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMREAD)) {
7032 error = ixl_nvm_read_aq(sc, offset,
7033 &buf, sizeof(buf));
7034 if (error == 0)
7035 *data = le16toh(buf);
7036 } else {
7037 error = ixl_nvm_read_srctl(sc, offset, &buf);
7038 if (error == 0)
7039 *data = buf;
7040 }
7041
7042 ixl_nvm_unlock(sc);
7043
7044 return error;
7045 }
7046
7047 MODULE(MODULE_CLASS_DRIVER, if_ixl, "pci");
7048
7049 #ifdef _MODULE
7050 #include "ioconf.c"
7051 #endif
7052
7053 #ifdef _MODULE
7054 static void
7055 ixl_parse_modprop(prop_dictionary_t dict)
7056 {
7057 prop_object_t obj;
7058 int64_t val;
7059 uint64_t uval;
7060
7061 if (dict == NULL)
7062 return;
7063
7064 obj = prop_dictionary_get(dict, "nomsix");
7065 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_BOOL) {
7066 ixl_param_nomsix = prop_bool_true((prop_bool_t)obj);
7067 }
7068
7069 obj = prop_dictionary_get(dict, "stats_interval");
7070 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7071 val = prop_number_signed_value((prop_number_t)obj);
7072
7073 /* the range has no reason */
7074 if (100 < val && val < 180000) {
7075 ixl_param_stats_interval = val;
7076 }
7077 }
7078
7079 obj = prop_dictionary_get(dict, "nqps_limit");
7080 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7081 val = prop_number_signed_value((prop_number_t)obj);
7082
7083 if (val <= INT32_MAX)
7084 ixl_param_nqps_limit = val;
7085 }
7086
7087 obj = prop_dictionary_get(dict, "rx_ndescs");
7088 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7089 uval = prop_number_unsigned_integer_value((prop_number_t)obj);
7090
7091 if (uval > 8)
7092 ixl_param_rx_ndescs = uval;
7093 }
7094
7095 obj = prop_dictionary_get(dict, "tx_ndescs");
7096 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7097 uval = prop_number_unsigned_integer_value((prop_number_t)obj);
7098
7099 if (uval > IXL_TX_PKT_DESCS)
7100 ixl_param_tx_ndescs = uval;
7101 }
7102
7103 }
7104 #endif
7105
7106 static int
7107 if_ixl_modcmd(modcmd_t cmd, void *opaque)
7108 {
7109 int error = 0;
7110
7111 #ifdef _MODULE
7112 switch (cmd) {
7113 case MODULE_CMD_INIT:
7114 ixl_parse_modprop((prop_dictionary_t)opaque);
7115 error = config_init_component(cfdriver_ioconf_if_ixl,
7116 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
7117 break;
7118 case MODULE_CMD_FINI:
7119 error = config_fini_component(cfdriver_ioconf_if_ixl,
7120 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
7121 break;
7122 default:
7123 error = ENOTTY;
7124 break;
7125 }
7126 #endif
7127
7128 return error;
7129 }
7130