if_ixl.c revision 1.40 1 /* $NetBSD: if_ixl.c,v 1.40 2020/02/12 06:26:02 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2016,2017 David Gwynne <dlg (at) openbsd.org>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50 /*
51 * Copyright (c) 2019 Internet Initiative Japan, Inc.
52 * All rights reserved.
53 *
54 * Redistribution and use in source and binary forms, with or without
55 * modification, are permitted provided that the following conditions
56 * are met:
57 * 1. Redistributions of source code must retain the above copyright
58 * notice, this list of conditions and the following disclaimer.
59 * 2. Redistributions in binary form must reproduce the above copyright
60 * notice, this list of conditions and the following disclaimer in the
61 * documentation and/or other materials provided with the distribution.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
73 * POSSIBILITY OF SUCH DAMAGE.
74 */
75
76 #include <sys/cdefs.h>
77
78 #ifdef _KERNEL_OPT
79 #include "opt_net_mpsafe.h"
80 #include "opt_if_ixl.h"
81 #endif
82
83 #include <sys/param.h>
84 #include <sys/types.h>
85
86 #include <sys/cpu.h>
87 #include <sys/device.h>
88 #include <sys/evcnt.h>
89 #include <sys/interrupt.h>
90 #include <sys/kmem.h>
91 #include <sys/malloc.h>
92 #include <sys/module.h>
93 #include <sys/mutex.h>
94 #include <sys/pcq.h>
95 #include <sys/syslog.h>
96 #include <sys/workqueue.h>
97
98 #include <sys/bus.h>
99
100 #include <net/bpf.h>
101 #include <net/if.h>
102 #include <net/if_dl.h>
103 #include <net/if_media.h>
104 #include <net/if_ether.h>
105 #include <net/rss_config.h>
106
107 #include <netinet/tcp.h> /* for struct tcphdr */
108 #include <netinet/udp.h> /* for struct udphdr */
109
110 #include <dev/pci/pcivar.h>
111 #include <dev/pci/pcidevs.h>
112
113 #include <dev/pci/if_ixlreg.h>
114 #include <dev/pci/if_ixlvar.h>
115
116 #include <prop/proplib.h>
117
118 struct ixl_softc; /* defined */
119
120 #define I40E_PF_RESET_WAIT_COUNT 200
121 #define I40E_AQ_LARGE_BUF 512
122
123 /* bitfields for Tx queue mapping in QTX_CTL */
124 #define I40E_QTX_CTL_VF_QUEUE 0x0
125 #define I40E_QTX_CTL_VM_QUEUE 0x1
126 #define I40E_QTX_CTL_PF_QUEUE 0x2
127
128 #define I40E_QUEUE_TYPE_EOL 0x7ff
129 #define I40E_INTR_NOTX_QUEUE 0
130
131 #define I40E_QUEUE_TYPE_RX 0x0
132 #define I40E_QUEUE_TYPE_TX 0x1
133 #define I40E_QUEUE_TYPE_PE_CEQ 0x2
134 #define I40E_QUEUE_TYPE_UNKNOWN 0x3
135
136 #define I40E_ITR_INDEX_RX 0x0
137 #define I40E_ITR_INDEX_TX 0x1
138 #define I40E_ITR_INDEX_OTHER 0x2
139 #define I40E_ITR_INDEX_NONE 0x3
140
141 #define I40E_INTR_NOTX_QUEUE 0
142 #define I40E_INTR_NOTX_INTR 0
143 #define I40E_INTR_NOTX_RX_QUEUE 0
144 #define I40E_INTR_NOTX_TX_QUEUE 1
145 #define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK
146 #define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK
147
148 #define BIT_ULL(a) (1ULL << (a))
149 #define IXL_RSS_HENA_DEFAULT_BASE \
150 (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
151 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
152 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
153 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
154 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
155 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
156 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
157 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
158 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
159 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
160 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
161 #define IXL_RSS_HENA_DEFAULT_XL710 IXL_RSS_HENA_DEFAULT_BASE
162 #define IXL_RSS_HENA_DEFAULT_X722 (IXL_RSS_HENA_DEFAULT_XL710 | \
163 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
164 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
165 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
166 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
167 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
168 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
169 #define I40E_HASH_LUT_SIZE_128 0
170 #define IXL_RSS_KEY_SIZE_REG 13
171
172 #define IXL_ICR0_CRIT_ERR_MASK \
173 (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \
174 I40E_PFINT_ICR0_ECC_ERR_MASK | \
175 I40E_PFINT_ICR0_PE_CRITERR_MASK)
176
177 #define IXL_TX_PKT_DESCS 8
178 #define IXL_TX_PKT_MAXSIZE (MCLBYTES * IXL_TX_PKT_DESCS)
179 #define IXL_TX_QUEUE_ALIGN 128
180 #define IXL_RX_QUEUE_ALIGN 128
181
182 #define IXL_MCLBYTES (MCLBYTES - ETHER_ALIGN)
183 #define IXL_MTU_ETHERLEN ETHER_HDR_LEN \
184 + ETHER_CRC_LEN
185 #if 0
186 #define IXL_MAX_MTU (9728 - IXL_MTU_ETHERLEN)
187 #else
188 /* (dbuff * 5) - ETHER_HDR_LEN - ETHER_CRC_LEN */
189 #define IXL_MAX_MTU (9600 - IXL_MTU_ETHERLEN)
190 #endif
191 #define IXL_MIN_MTU (ETHER_MIN_LEN - ETHER_CRC_LEN)
192
193 #define IXL_PCIREG PCI_MAPREG_START
194
195 #define IXL_ITR0 0x0
196 #define IXL_ITR1 0x1
197 #define IXL_ITR2 0x2
198 #define IXL_NOITR 0x3
199
200 #define IXL_AQ_NUM 256
201 #define IXL_AQ_MASK (IXL_AQ_NUM - 1)
202 #define IXL_AQ_ALIGN 64 /* lol */
203 #define IXL_AQ_BUFLEN 4096
204
205 #define IXL_HMC_ROUNDUP 512
206 #define IXL_HMC_PGSIZE 4096
207 #define IXL_HMC_DVASZ sizeof(uint64_t)
208 #define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
209 #define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS)
210 #define IXL_HMC_PDVALID 1ULL
211
212 #define IXL_ATQ_EXEC_TIMEOUT (10 * hz)
213
214 #define IXL_SRRD_SRCTL_ATTEMPTS 100000
215
216 struct ixl_aq_regs {
217 bus_size_t atq_tail;
218 bus_size_t atq_head;
219 bus_size_t atq_len;
220 bus_size_t atq_bal;
221 bus_size_t atq_bah;
222
223 bus_size_t arq_tail;
224 bus_size_t arq_head;
225 bus_size_t arq_len;
226 bus_size_t arq_bal;
227 bus_size_t arq_bah;
228
229 uint32_t atq_len_enable;
230 uint32_t atq_tail_mask;
231 uint32_t atq_head_mask;
232
233 uint32_t arq_len_enable;
234 uint32_t arq_tail_mask;
235 uint32_t arq_head_mask;
236 };
237
238 struct ixl_phy_type {
239 uint64_t phy_type;
240 uint64_t ifm_type;
241 };
242
243 struct ixl_speed_type {
244 uint8_t dev_speed;
245 uint64_t net_speed;
246 };
247
248 struct ixl_aq_buf {
249 SIMPLEQ_ENTRY(ixl_aq_buf)
250 aqb_entry;
251 void *aqb_data;
252 bus_dmamap_t aqb_map;
253 bus_dma_segment_t aqb_seg;
254 size_t aqb_size;
255 int aqb_nsegs;
256 };
257 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf);
258
259 struct ixl_dmamem {
260 bus_dmamap_t ixm_map;
261 bus_dma_segment_t ixm_seg;
262 int ixm_nsegs;
263 size_t ixm_size;
264 void *ixm_kva;
265 };
266
267 #define IXL_DMA_MAP(_ixm) ((_ixm)->ixm_map)
268 #define IXL_DMA_DVA(_ixm) ((_ixm)->ixm_map->dm_segs[0].ds_addr)
269 #define IXL_DMA_KVA(_ixm) ((void *)(_ixm)->ixm_kva)
270 #define IXL_DMA_LEN(_ixm) ((_ixm)->ixm_size)
271
272 struct ixl_hmc_entry {
273 uint64_t hmc_base;
274 uint32_t hmc_count;
275 uint64_t hmc_size;
276 };
277
278 enum ixl_hmc_types {
279 IXL_HMC_LAN_TX = 0,
280 IXL_HMC_LAN_RX,
281 IXL_HMC_FCOE_CTX,
282 IXL_HMC_FCOE_FILTER,
283 IXL_HMC_COUNT
284 };
285
286 struct ixl_hmc_pack {
287 uint16_t offset;
288 uint16_t width;
289 uint16_t lsb;
290 };
291
292 /*
293 * these hmc objects have weird sizes and alignments, so these are abstract
294 * representations of them that are nice for c to populate.
295 *
296 * the packing code relies on little-endian values being stored in the fields,
297 * no high bits in the fields being set, and the fields must be packed in the
298 * same order as they are in the ctx structure.
299 */
300
301 struct ixl_hmc_rxq {
302 uint16_t head;
303 uint8_t cpuid;
304 uint64_t base;
305 #define IXL_HMC_RXQ_BASE_UNIT 128
306 uint16_t qlen;
307 uint16_t dbuff;
308 #define IXL_HMC_RXQ_DBUFF_UNIT 128
309 uint8_t hbuff;
310 #define IXL_HMC_RXQ_HBUFF_UNIT 64
311 uint8_t dtype;
312 #define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0
313 #define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1
314 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2
315 uint8_t dsize;
316 #define IXL_HMC_RXQ_DSIZE_16 0
317 #define IXL_HMC_RXQ_DSIZE_32 1
318 uint8_t crcstrip;
319 uint8_t fc_ena;
320 uint8_t l2sel;
321 uint8_t hsplit_0;
322 uint8_t hsplit_1;
323 uint8_t showiv;
324 uint16_t rxmax;
325 uint8_t tphrdesc_ena;
326 uint8_t tphwdesc_ena;
327 uint8_t tphdata_ena;
328 uint8_t tphhead_ena;
329 uint8_t lrxqthresh;
330 uint8_t prefena;
331 };
332
333 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
334 { offsetof(struct ixl_hmc_rxq, head), 13, 0 },
335 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 },
336 { offsetof(struct ixl_hmc_rxq, base), 57, 32 },
337 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 },
338 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 },
339 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 },
340 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 },
341 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 },
342 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 },
343 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 },
344 { offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 },
345 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 },
346 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 },
347 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 },
348 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 },
349 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 },
350 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 },
351 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 },
352 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 },
353 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 },
354 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 },
355 };
356
357 #define IXL_HMC_RXQ_MINSIZE (201 + 1)
358
359 struct ixl_hmc_txq {
360 uint16_t head;
361 uint8_t new_context;
362 uint64_t base;
363 #define IXL_HMC_TXQ_BASE_UNIT 128
364 uint8_t fc_ena;
365 uint8_t timesync_ena;
366 uint8_t fd_ena;
367 uint8_t alt_vlan_ena;
368 uint16_t thead_wb;
369 uint8_t cpuid;
370 uint8_t head_wb_ena;
371 #define IXL_HMC_TXQ_DESC_WB 0
372 #define IXL_HMC_TXQ_HEAD_WB 1
373 uint16_t qlen;
374 uint8_t tphrdesc_ena;
375 uint8_t tphrpacket_ena;
376 uint8_t tphwdesc_ena;
377 uint64_t head_wb_addr;
378 uint32_t crc;
379 uint16_t rdylist;
380 uint8_t rdylist_act;
381 };
382
383 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
384 { offsetof(struct ixl_hmc_txq, head), 13, 0 },
385 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 },
386 { offsetof(struct ixl_hmc_txq, base), 57, 32 },
387 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 },
388 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 },
389 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 },
390 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 },
391 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 },
392 /* line 1 */
393 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 },
394 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 },
395 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 },
396 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 },
397 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 },
398 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 },
399 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 },
400 /* line 7 */
401 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) },
402 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) },
403 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) },
404 };
405
406 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
407
408 struct ixl_work {
409 struct work ixw_cookie;
410 void (*ixw_func)(void *);
411 void *ixw_arg;
412 unsigned int ixw_added;
413 };
414 #define IXL_WORKQUEUE_PRI PRI_SOFTNET
415
416 struct ixl_tx_map {
417 struct mbuf *txm_m;
418 bus_dmamap_t txm_map;
419 unsigned int txm_eop;
420 };
421
422 struct ixl_tx_ring {
423 kmutex_t txr_lock;
424 struct ixl_softc *txr_sc;
425
426 unsigned int txr_prod;
427 unsigned int txr_cons;
428
429 struct ixl_tx_map *txr_maps;
430 struct ixl_dmamem txr_mem;
431
432 bus_size_t txr_tail;
433 unsigned int txr_qid;
434 pcq_t *txr_intrq;
435 void *txr_si;
436
437 struct evcnt txr_defragged;
438 struct evcnt txr_defrag_failed;
439 struct evcnt txr_pcqdrop;
440 struct evcnt txr_transmitdef;
441 struct evcnt txr_intr;
442 struct evcnt txr_defer;
443 };
444
445 struct ixl_rx_map {
446 struct mbuf *rxm_m;
447 bus_dmamap_t rxm_map;
448 };
449
450 struct ixl_rx_ring {
451 kmutex_t rxr_lock;
452
453 unsigned int rxr_prod;
454 unsigned int rxr_cons;
455
456 struct ixl_rx_map *rxr_maps;
457 struct ixl_dmamem rxr_mem;
458
459 struct mbuf *rxr_m_head;
460 struct mbuf **rxr_m_tail;
461
462 bus_size_t rxr_tail;
463 unsigned int rxr_qid;
464
465 struct evcnt rxr_mgethdr_failed;
466 struct evcnt rxr_mgetcl_failed;
467 struct evcnt rxr_mbuf_load_failed;
468 struct evcnt rxr_intr;
469 struct evcnt rxr_defer;
470 };
471
472 struct ixl_queue_pair {
473 struct ixl_softc *qp_sc;
474 struct ixl_tx_ring *qp_txr;
475 struct ixl_rx_ring *qp_rxr;
476
477 char qp_name[16];
478
479 void *qp_si;
480 struct ixl_work qp_task;
481 bool qp_workqueue;
482 };
483
484 struct ixl_atq {
485 struct ixl_aq_desc iatq_desc;
486 void (*iatq_fn)(struct ixl_softc *,
487 const struct ixl_aq_desc *);
488 };
489 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
490
491 struct ixl_product {
492 unsigned int vendor_id;
493 unsigned int product_id;
494 };
495
496 struct ixl_stats_counters {
497 bool isc_has_offset;
498 struct evcnt isc_crc_errors;
499 uint64_t isc_crc_errors_offset;
500 struct evcnt isc_illegal_bytes;
501 uint64_t isc_illegal_bytes_offset;
502 struct evcnt isc_rx_bytes;
503 uint64_t isc_rx_bytes_offset;
504 struct evcnt isc_rx_discards;
505 uint64_t isc_rx_discards_offset;
506 struct evcnt isc_rx_unicast;
507 uint64_t isc_rx_unicast_offset;
508 struct evcnt isc_rx_multicast;
509 uint64_t isc_rx_multicast_offset;
510 struct evcnt isc_rx_broadcast;
511 uint64_t isc_rx_broadcast_offset;
512 struct evcnt isc_rx_size_64;
513 uint64_t isc_rx_size_64_offset;
514 struct evcnt isc_rx_size_127;
515 uint64_t isc_rx_size_127_offset;
516 struct evcnt isc_rx_size_255;
517 uint64_t isc_rx_size_255_offset;
518 struct evcnt isc_rx_size_511;
519 uint64_t isc_rx_size_511_offset;
520 struct evcnt isc_rx_size_1023;
521 uint64_t isc_rx_size_1023_offset;
522 struct evcnt isc_rx_size_1522;
523 uint64_t isc_rx_size_1522_offset;
524 struct evcnt isc_rx_size_big;
525 uint64_t isc_rx_size_big_offset;
526 struct evcnt isc_rx_undersize;
527 uint64_t isc_rx_undersize_offset;
528 struct evcnt isc_rx_oversize;
529 uint64_t isc_rx_oversize_offset;
530 struct evcnt isc_rx_fragments;
531 uint64_t isc_rx_fragments_offset;
532 struct evcnt isc_rx_jabber;
533 uint64_t isc_rx_jabber_offset;
534 struct evcnt isc_tx_bytes;
535 uint64_t isc_tx_bytes_offset;
536 struct evcnt isc_tx_dropped_link_down;
537 uint64_t isc_tx_dropped_link_down_offset;
538 struct evcnt isc_tx_unicast;
539 uint64_t isc_tx_unicast_offset;
540 struct evcnt isc_tx_multicast;
541 uint64_t isc_tx_multicast_offset;
542 struct evcnt isc_tx_broadcast;
543 uint64_t isc_tx_broadcast_offset;
544 struct evcnt isc_tx_size_64;
545 uint64_t isc_tx_size_64_offset;
546 struct evcnt isc_tx_size_127;
547 uint64_t isc_tx_size_127_offset;
548 struct evcnt isc_tx_size_255;
549 uint64_t isc_tx_size_255_offset;
550 struct evcnt isc_tx_size_511;
551 uint64_t isc_tx_size_511_offset;
552 struct evcnt isc_tx_size_1023;
553 uint64_t isc_tx_size_1023_offset;
554 struct evcnt isc_tx_size_1522;
555 uint64_t isc_tx_size_1522_offset;
556 struct evcnt isc_tx_size_big;
557 uint64_t isc_tx_size_big_offset;
558 struct evcnt isc_mac_local_faults;
559 uint64_t isc_mac_local_faults_offset;
560 struct evcnt isc_mac_remote_faults;
561 uint64_t isc_mac_remote_faults_offset;
562 struct evcnt isc_link_xon_rx;
563 uint64_t isc_link_xon_rx_offset;
564 struct evcnt isc_link_xon_tx;
565 uint64_t isc_link_xon_tx_offset;
566 struct evcnt isc_link_xoff_rx;
567 uint64_t isc_link_xoff_rx_offset;
568 struct evcnt isc_link_xoff_tx;
569 uint64_t isc_link_xoff_tx_offset;
570 struct evcnt isc_vsi_rx_discards;
571 uint64_t isc_vsi_rx_discards_offset;
572 struct evcnt isc_vsi_rx_bytes;
573 uint64_t isc_vsi_rx_bytes_offset;
574 struct evcnt isc_vsi_rx_unicast;
575 uint64_t isc_vsi_rx_unicast_offset;
576 struct evcnt isc_vsi_rx_multicast;
577 uint64_t isc_vsi_rx_multicast_offset;
578 struct evcnt isc_vsi_rx_broadcast;
579 uint64_t isc_vsi_rx_broadcast_offset;
580 struct evcnt isc_vsi_tx_errors;
581 uint64_t isc_vsi_tx_errors_offset;
582 struct evcnt isc_vsi_tx_bytes;
583 uint64_t isc_vsi_tx_bytes_offset;
584 struct evcnt isc_vsi_tx_unicast;
585 uint64_t isc_vsi_tx_unicast_offset;
586 struct evcnt isc_vsi_tx_multicast;
587 uint64_t isc_vsi_tx_multicast_offset;
588 struct evcnt isc_vsi_tx_broadcast;
589 uint64_t isc_vsi_tx_broadcast_offset;
590 };
591
592 /*
593 * Locking notes:
594 * + a field in ixl_tx_ring is protected by txr_lock (a spin mutex), and
595 * a field in ixl_rx_ring is protected by rxr_lock (a spin mutex).
596 * - more than one lock of them cannot be held at once.
597 * + a field named sc_atq_* in ixl_softc is protected by sc_atq_lock
598 * (a spin mutex).
599 * - the lock cannot held with txr_lock or rxr_lock.
600 * + a field named sc_arq_* is not protected by any lock.
601 * - operations for sc_arq_* is done in one context related to
602 * sc_arq_task.
603 * + other fields in ixl_softc is protected by sc_cfg_lock
604 * (an adaptive mutex)
605 * - It must be held before another lock is held, and It can be
606 * released after the other lock is released.
607 * */
608
609 struct ixl_softc {
610 device_t sc_dev;
611 struct ethercom sc_ec;
612 bool sc_attached;
613 bool sc_dead;
614 uint32_t sc_port;
615 struct sysctllog *sc_sysctllog;
616 struct workqueue *sc_workq;
617 struct workqueue *sc_workq_txrx;
618 int sc_stats_intval;
619 callout_t sc_stats_callout;
620 struct ixl_work sc_stats_task;
621 struct ixl_stats_counters
622 sc_stats_counters;
623 uint8_t sc_enaddr[ETHER_ADDR_LEN];
624 struct ifmedia sc_media;
625 uint64_t sc_media_status;
626 uint64_t sc_media_active;
627 uint64_t sc_phy_types;
628 uint8_t sc_phy_abilities;
629 uint8_t sc_phy_linkspeed;
630 uint8_t sc_phy_fec_cfg;
631 uint16_t sc_eee_cap;
632 uint32_t sc_eeer_val;
633 uint8_t sc_d3_lpan;
634 kmutex_t sc_cfg_lock;
635 enum i40e_mac_type sc_mac_type;
636 uint32_t sc_rss_table_size;
637 uint32_t sc_rss_table_entry_width;
638 bool sc_txrx_workqueue;
639 u_int sc_tx_process_limit;
640 u_int sc_rx_process_limit;
641 u_int sc_tx_intr_process_limit;
642 u_int sc_rx_intr_process_limit;
643
644 int sc_cur_ec_capenable;
645
646 struct pci_attach_args sc_pa;
647 pci_intr_handle_t *sc_ihp;
648 void **sc_ihs;
649 unsigned int sc_nintrs;
650
651 bus_dma_tag_t sc_dmat;
652 bus_space_tag_t sc_memt;
653 bus_space_handle_t sc_memh;
654 bus_size_t sc_mems;
655
656 uint8_t sc_pf_id;
657 uint16_t sc_uplink_seid; /* le */
658 uint16_t sc_downlink_seid; /* le */
659 uint16_t sc_vsi_number; /* le */
660 uint16_t sc_vsi_stat_counter_idx;
661 uint16_t sc_seid;
662 unsigned int sc_base_queue;
663
664 pci_intr_type_t sc_intrtype;
665 unsigned int sc_msix_vector_queue;
666
667 struct ixl_dmamem sc_scratch;
668 struct ixl_dmamem sc_aqbuf;
669
670 const struct ixl_aq_regs *
671 sc_aq_regs;
672 uint32_t sc_aq_flags;
673 #define IXL_SC_AQ_FLAG_RXCTL __BIT(0)
674 #define IXL_SC_AQ_FLAG_NVMLOCK __BIT(1)
675 #define IXL_SC_AQ_FLAG_NVMREAD __BIT(2)
676
677 kmutex_t sc_atq_lock;
678 kcondvar_t sc_atq_cv;
679 struct ixl_dmamem sc_atq;
680 unsigned int sc_atq_prod;
681 unsigned int sc_atq_cons;
682
683 struct ixl_dmamem sc_arq;
684 struct ixl_work sc_arq_task;
685 struct ixl_aq_bufs sc_arq_idle;
686 struct ixl_aq_buf *sc_arq_live[IXL_AQ_NUM];
687 unsigned int sc_arq_prod;
688 unsigned int sc_arq_cons;
689
690 struct ixl_work sc_link_state_task;
691 struct ixl_atq sc_link_state_atq;
692
693 struct ixl_dmamem sc_hmc_sd;
694 struct ixl_dmamem sc_hmc_pd;
695 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT];
696
697 unsigned int sc_tx_ring_ndescs;
698 unsigned int sc_rx_ring_ndescs;
699 unsigned int sc_nqueue_pairs;
700 unsigned int sc_nqueue_pairs_max;
701 unsigned int sc_nqueue_pairs_device;
702 struct ixl_queue_pair *sc_qps;
703
704 struct evcnt sc_event_atq;
705 struct evcnt sc_event_link;
706 struct evcnt sc_event_ecc_err;
707 struct evcnt sc_event_pci_exception;
708 struct evcnt sc_event_crit_err;
709 };
710
711 #define IXL_TXRX_PROCESS_UNLIMIT UINT_MAX
712 #define IXL_TX_PROCESS_LIMIT 256
713 #define IXL_RX_PROCESS_LIMIT 256
714 #define IXL_TX_INTR_PROCESS_LIMIT 256
715 #define IXL_RX_INTR_PROCESS_LIMIT 0U
716
717 #define IXL_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx | \
718 IFCAP_CSUM_TCPv4_Rx | \
719 IFCAP_CSUM_UDPv4_Rx | \
720 IFCAP_CSUM_TCPv6_Rx | \
721 IFCAP_CSUM_UDPv6_Rx)
722 #define IXL_IFCAP_TXCSUM (IFCAP_CSUM_IPv4_Tx | \
723 IFCAP_CSUM_TCPv4_Tx | \
724 IFCAP_CSUM_UDPv4_Tx | \
725 IFCAP_CSUM_TCPv6_Tx | \
726 IFCAP_CSUM_UDPv6_Tx)
727 #define IXL_CSUM_ALL_OFFLOAD (M_CSUM_IPv4 | \
728 M_CSUM_TCPv4 | M_CSUM_TCPv6 | \
729 M_CSUM_UDPv4 | M_CSUM_UDPv6)
730
731 #define delaymsec(_x) DELAY(1000 * (_x))
732 #ifdef IXL_DEBUG
733 #define DDPRINTF(sc, fmt, args...) \
734 do { \
735 if ((sc) != NULL) { \
736 device_printf( \
737 ((struct ixl_softc *)(sc))->sc_dev, \
738 ""); \
739 } \
740 printf("%s:\t" fmt, __func__, ##args); \
741 } while (0)
742 #else
743 #define DDPRINTF(sc, fmt, args...) __nothing
744 #endif
745 #ifndef IXL_STATS_INTERVAL_MSEC
746 #define IXL_STATS_INTERVAL_MSEC 10000
747 #endif
748 #ifndef IXL_QUEUE_NUM
749 #define IXL_QUEUE_NUM 0
750 #endif
751
752 static bool ixl_param_nomsix = false;
753 static int ixl_param_stats_interval = IXL_STATS_INTERVAL_MSEC;
754 static int ixl_param_nqps_limit = IXL_QUEUE_NUM;
755 static unsigned int ixl_param_tx_ndescs = 1024;
756 static unsigned int ixl_param_rx_ndescs = 1024;
757
758 static enum i40e_mac_type
759 ixl_mactype(pci_product_id_t);
760 static void ixl_clear_hw(struct ixl_softc *);
761 static int ixl_pf_reset(struct ixl_softc *);
762
763 static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
764 bus_size_t, bus_size_t);
765 static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
766
767 static int ixl_arq_fill(struct ixl_softc *);
768 static void ixl_arq_unfill(struct ixl_softc *);
769
770 static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
771 unsigned int);
772 static void ixl_atq_set(struct ixl_atq *,
773 void (*)(struct ixl_softc *, const struct ixl_aq_desc *));
774 static int ixl_atq_post(struct ixl_softc *, struct ixl_atq *);
775 static int ixl_atq_post_locked(struct ixl_softc *, struct ixl_atq *);
776 static void ixl_atq_done(struct ixl_softc *);
777 static int ixl_atq_exec(struct ixl_softc *, struct ixl_atq *);
778 static int ixl_get_version(struct ixl_softc *);
779 static int ixl_get_nvm_version(struct ixl_softc *);
780 static int ixl_get_hw_capabilities(struct ixl_softc *);
781 static int ixl_pxe_clear(struct ixl_softc *);
782 static int ixl_lldp_shut(struct ixl_softc *);
783 static int ixl_get_mac(struct ixl_softc *);
784 static int ixl_get_switch_config(struct ixl_softc *);
785 static int ixl_phy_mask_ints(struct ixl_softc *);
786 static int ixl_get_phy_info(struct ixl_softc *);
787 static int ixl_set_phy_config(struct ixl_softc *, uint8_t, uint8_t, bool);
788 static int ixl_set_phy_autoselect(struct ixl_softc *);
789 static int ixl_restart_an(struct ixl_softc *);
790 static int ixl_hmc(struct ixl_softc *);
791 static void ixl_hmc_free(struct ixl_softc *);
792 static int ixl_get_vsi(struct ixl_softc *);
793 static int ixl_set_vsi(struct ixl_softc *);
794 static void ixl_set_filter_control(struct ixl_softc *);
795 static void ixl_get_link_status(void *);
796 static int ixl_get_link_status_poll(struct ixl_softc *, int *);
797 static int ixl_set_link_status(struct ixl_softc *,
798 const struct ixl_aq_desc *);
799 static uint64_t ixl_search_link_speed(uint8_t);
800 static uint8_t ixl_search_baudrate(uint64_t);
801 static void ixl_config_rss(struct ixl_softc *);
802 static int ixl_add_macvlan(struct ixl_softc *, const uint8_t *,
803 uint16_t, uint16_t);
804 static int ixl_remove_macvlan(struct ixl_softc *, const uint8_t *,
805 uint16_t, uint16_t);
806 static void ixl_arq(void *);
807 static void ixl_hmc_pack(void *, const void *,
808 const struct ixl_hmc_pack *, unsigned int);
809 static uint32_t ixl_rd_rx_csr(struct ixl_softc *, uint32_t);
810 static void ixl_wr_rx_csr(struct ixl_softc *, uint32_t, uint32_t);
811 static int ixl_rd16_nvm(struct ixl_softc *, uint16_t, uint16_t *);
812
813 static int ixl_match(device_t, cfdata_t, void *);
814 static void ixl_attach(device_t, device_t, void *);
815 static int ixl_detach(device_t, int);
816
817 static void ixl_media_add(struct ixl_softc *);
818 static int ixl_media_change(struct ifnet *);
819 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
820 static void ixl_watchdog(struct ifnet *);
821 static int ixl_ioctl(struct ifnet *, u_long, void *);
822 static void ixl_start(struct ifnet *);
823 static int ixl_transmit(struct ifnet *, struct mbuf *);
824 static void ixl_deferred_transmit(void *);
825 static int ixl_intr(void *);
826 static int ixl_queue_intr(void *);
827 static int ixl_other_intr(void *);
828 static void ixl_handle_queue(void *);
829 static void ixl_sched_handle_queue(struct ixl_softc *,
830 struct ixl_queue_pair *);
831 static int ixl_init(struct ifnet *);
832 static int ixl_init_locked(struct ixl_softc *);
833 static void ixl_stop(struct ifnet *, int);
834 static void ixl_stop_locked(struct ixl_softc *);
835 static int ixl_iff(struct ixl_softc *);
836 static int ixl_ifflags_cb(struct ethercom *);
837 static int ixl_setup_interrupts(struct ixl_softc *);
838 static int ixl_establish_intx(struct ixl_softc *);
839 static int ixl_establish_msix(struct ixl_softc *);
840 static void ixl_enable_queue_intr(struct ixl_softc *,
841 struct ixl_queue_pair *);
842 static void ixl_disable_queue_intr(struct ixl_softc *,
843 struct ixl_queue_pair *);
844 static void ixl_enable_other_intr(struct ixl_softc *);
845 static void ixl_disable_other_intr(struct ixl_softc *);
846 static void ixl_config_queue_intr(struct ixl_softc *);
847 static void ixl_config_other_intr(struct ixl_softc *);
848
849 static struct ixl_tx_ring *
850 ixl_txr_alloc(struct ixl_softc *, unsigned int);
851 static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
852 static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
853 static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
854 static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
855 static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
856 static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
857 static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
858 static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *, u_int);
859
860 static struct ixl_rx_ring *
861 ixl_rxr_alloc(struct ixl_softc *, unsigned int);
862 static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
863 static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
864 static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
865 static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
866 static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
867 static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
868 static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *, u_int);
869 static int ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
870
871 static struct workqueue *
872 ixl_workq_create(const char *, pri_t, int, int);
873 static void ixl_workq_destroy(struct workqueue *);
874 static int ixl_workqs_teardown(device_t);
875 static void ixl_work_set(struct ixl_work *, void (*)(void *), void *);
876 static void ixl_work_add(struct workqueue *, struct ixl_work *);
877 static void ixl_work_wait(struct workqueue *, struct ixl_work *);
878 static void ixl_workq_work(struct work *, void *);
879 static const struct ixl_product *
880 ixl_lookup(const struct pci_attach_args *pa);
881 static void ixl_link_state_update(struct ixl_softc *,
882 const struct ixl_aq_desc *);
883 static int ixl_vlan_cb(struct ethercom *, uint16_t, bool);
884 static int ixl_setup_vlan_hwfilter(struct ixl_softc *);
885 static void ixl_teardown_vlan_hwfilter(struct ixl_softc *);
886 static int ixl_update_macvlan(struct ixl_softc *);
887 static int ixl_setup_interrupts(struct ixl_softc *);;
888 static void ixl_teardown_interrupts(struct ixl_softc *);
889 static int ixl_setup_stats(struct ixl_softc *);
890 static void ixl_teardown_stats(struct ixl_softc *);
891 static void ixl_stats_callout(void *);
892 static void ixl_stats_update(void *);
893 static int ixl_setup_sysctls(struct ixl_softc *);
894 static void ixl_teardown_sysctls(struct ixl_softc *);
895 static int ixl_queue_pairs_alloc(struct ixl_softc *);
896 static void ixl_queue_pairs_free(struct ixl_softc *);
897
898 static const struct ixl_phy_type ixl_phy_type_map[] = {
899 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII },
900 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX },
901 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 },
902 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR },
903 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 },
904 { 1ULL << IXL_PHY_TYPE_XAUI |
905 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 },
906 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI },
907 { 1ULL << IXL_PHY_TYPE_XLAUI |
908 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI },
909 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
910 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 },
911 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
912 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 },
913 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC },
914 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC },
915 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX },
916 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
917 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T },
918 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T },
919 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR },
920 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR },
921 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_TWINAX },
922 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 },
923 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 },
924 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX },
925 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX },
926 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 },
927 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR },
928 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR },
929 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR },
930 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR },
931 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC },
932 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_CR },
933 };
934
935 static const struct ixl_speed_type ixl_speed_type_map[] = {
936 { IXL_AQ_LINK_SPEED_40GB, IF_Gbps(40) },
937 { IXL_AQ_LINK_SPEED_25GB, IF_Gbps(25) },
938 { IXL_AQ_LINK_SPEED_10GB, IF_Gbps(10) },
939 { IXL_AQ_LINK_SPEED_1000MB, IF_Mbps(1000) },
940 { IXL_AQ_LINK_SPEED_100MB, IF_Mbps(100)},
941 };
942
943 static const struct ixl_aq_regs ixl_pf_aq_regs = {
944 .atq_tail = I40E_PF_ATQT,
945 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK,
946 .atq_head = I40E_PF_ATQH,
947 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK,
948 .atq_len = I40E_PF_ATQLEN,
949 .atq_bal = I40E_PF_ATQBAL,
950 .atq_bah = I40E_PF_ATQBAH,
951 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK,
952
953 .arq_tail = I40E_PF_ARQT,
954 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK,
955 .arq_head = I40E_PF_ARQH,
956 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK,
957 .arq_len = I40E_PF_ARQLEN,
958 .arq_bal = I40E_PF_ARQBAL,
959 .arq_bah = I40E_PF_ARQBAH,
960 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK,
961 };
962
963 #define ixl_rd(_s, _r) \
964 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
965 #define ixl_wr(_s, _r, _v) \
966 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
967 #define ixl_barrier(_s, _r, _l, _o) \
968 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
969 #define ixl_flush(_s) (void)ixl_rd((_s), I40E_GLGEN_STAT)
970 #define ixl_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1))
971
972 static inline uint32_t
973 ixl_dmamem_hi(struct ixl_dmamem *ixm)
974 {
975 uint32_t retval;
976 uint64_t val;
977
978 if (sizeof(IXL_DMA_DVA(ixm)) > 4) {
979 val = (intptr_t)IXL_DMA_DVA(ixm);
980 retval = (uint32_t)(val >> 32);
981 } else {
982 retval = 0;
983 }
984
985 return retval;
986 }
987
988 static inline uint32_t
989 ixl_dmamem_lo(struct ixl_dmamem *ixm)
990 {
991
992 return (uint32_t)IXL_DMA_DVA(ixm);
993 }
994
995 static inline void
996 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr)
997 {
998 uint64_t val;
999
1000 if (sizeof(addr) > 4) {
1001 val = (intptr_t)addr;
1002 iaq->iaq_param[2] = htole32(val >> 32);
1003 } else {
1004 iaq->iaq_param[2] = htole32(0);
1005 }
1006
1007 iaq->iaq_param[3] = htole32(addr);
1008 }
1009
1010 static inline unsigned int
1011 ixl_rxr_unrefreshed(unsigned int prod, unsigned int cons, unsigned int ndescs)
1012 {
1013 unsigned int num;
1014
1015 if (prod < cons)
1016 num = cons - prod;
1017 else
1018 num = (ndescs - prod) + cons;
1019
1020 if (__predict_true(num > 0)) {
1021 /* device cannot receive packets if all descripter is filled */
1022 num -= 1;
1023 }
1024
1025 return num;
1026 }
1027
1028 CFATTACH_DECL3_NEW(ixl, sizeof(struct ixl_softc),
1029 ixl_match, ixl_attach, ixl_detach, NULL, NULL, NULL,
1030 DVF_DETACH_SHUTDOWN);
1031
1032 static const struct ixl_product ixl_products[] = {
1033 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_SFP },
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_B },
1035 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_C },
1036 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_A },
1037 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_B },
1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_C },
1039 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T },
1040 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
1042 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G },
1043 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP },
1044 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28 },
1045 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_KX },
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_QSFP },
1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_SFP },
1048 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G_BASET },
1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_BASET },
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_I_SFP },
1051 /* required last entry */
1052 {0, 0}
1053 };
1054
1055 static const struct ixl_product *
1056 ixl_lookup(const struct pci_attach_args *pa)
1057 {
1058 const struct ixl_product *ixlp;
1059
1060 for (ixlp = ixl_products; ixlp->vendor_id != 0; ixlp++) {
1061 if (PCI_VENDOR(pa->pa_id) == ixlp->vendor_id &&
1062 PCI_PRODUCT(pa->pa_id) == ixlp->product_id)
1063 return ixlp;
1064 }
1065
1066 return NULL;
1067 }
1068
1069 static int
1070 ixl_match(device_t parent, cfdata_t match, void *aux)
1071 {
1072 const struct pci_attach_args *pa = aux;
1073
1074 return (ixl_lookup(pa) != NULL) ? 1 : 0;
1075 }
1076
1077 static void
1078 ixl_attach(device_t parent, device_t self, void *aux)
1079 {
1080 struct ixl_softc *sc;
1081 struct pci_attach_args *pa = aux;
1082 struct ifnet *ifp;
1083 pcireg_t memtype;
1084 uint32_t firstq, port, ari, func;
1085 char xnamebuf[32];
1086 int tries, rv, link;
1087
1088 sc = device_private(self);
1089 sc->sc_dev = self;
1090 ifp = &sc->sc_ec.ec_if;
1091
1092 sc->sc_pa = *pa;
1093 sc->sc_dmat = (pci_dma64_available(pa)) ?
1094 pa->pa_dmat64 : pa->pa_dmat;
1095 sc->sc_aq_regs = &ixl_pf_aq_regs;
1096
1097 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(pa->pa_id));
1098
1099 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG);
1100 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
1101 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) {
1102 aprint_error(": unable to map registers\n");
1103 return;
1104 }
1105
1106 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET);
1107
1108 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC);
1109 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK;
1110 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1111 sc->sc_base_queue = firstq;
1112
1113 ixl_clear_hw(sc);
1114 if (ixl_pf_reset(sc) == -1) {
1115 /* error printed by ixl pf_reset */
1116 goto unmap;
1117 }
1118
1119 port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
1120 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
1121 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
1122 sc->sc_port = port;
1123 aprint_normal(": port %u", sc->sc_port);
1124
1125 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
1126 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1127 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1128
1129 func = ixl_rd(sc, I40E_PF_FUNC_RID);
1130 sc->sc_pf_id = func & (ari ? 0xff : 0x7);
1131
1132 /* initialise the adminq */
1133
1134 mutex_init(&sc->sc_atq_lock, MUTEX_DEFAULT, IPL_NET);
1135
1136 if (ixl_dmamem_alloc(sc, &sc->sc_atq,
1137 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1138 aprint_error("\n" "%s: unable to allocate atq\n",
1139 device_xname(self));
1140 goto unmap;
1141 }
1142
1143 SIMPLEQ_INIT(&sc->sc_arq_idle);
1144 ixl_work_set(&sc->sc_arq_task, ixl_arq, sc);
1145 sc->sc_arq_cons = 0;
1146 sc->sc_arq_prod = 0;
1147
1148 if (ixl_dmamem_alloc(sc, &sc->sc_arq,
1149 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1150 aprint_error("\n" "%s: unable to allocate arq\n",
1151 device_xname(self));
1152 goto free_atq;
1153 }
1154
1155 if (!ixl_arq_fill(sc)) {
1156 aprint_error("\n" "%s: unable to fill arq descriptors\n",
1157 device_xname(self));
1158 goto free_arq;
1159 }
1160
1161 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1162 0, IXL_DMA_LEN(&sc->sc_atq),
1163 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1164
1165 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1166 0, IXL_DMA_LEN(&sc->sc_arq),
1167 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1168
1169 for (tries = 0; tries < 10; tries++) {
1170 sc->sc_atq_cons = 0;
1171 sc->sc_atq_prod = 0;
1172
1173 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1174 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1175 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1176 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1177
1178 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1179
1180 ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1181 ixl_dmamem_lo(&sc->sc_atq));
1182 ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1183 ixl_dmamem_hi(&sc->sc_atq));
1184 ixl_wr(sc, sc->sc_aq_regs->atq_len,
1185 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1186
1187 ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1188 ixl_dmamem_lo(&sc->sc_arq));
1189 ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1190 ixl_dmamem_hi(&sc->sc_arq));
1191 ixl_wr(sc, sc->sc_aq_regs->arq_len,
1192 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1193
1194 rv = ixl_get_version(sc);
1195 if (rv == 0)
1196 break;
1197 if (rv != ETIMEDOUT) {
1198 aprint_error(", unable to get firmware version\n");
1199 goto shutdown;
1200 }
1201
1202 delaymsec(100);
1203 }
1204
1205 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1206
1207 if (ixl_dmamem_alloc(sc, &sc->sc_aqbuf, IXL_AQ_BUFLEN, 0) != 0) {
1208 aprint_error_dev(self, ", unable to allocate nvm buffer\n");
1209 goto shutdown;
1210 }
1211
1212 ixl_get_nvm_version(sc);
1213
1214 if (sc->sc_mac_type == I40E_MAC_X722)
1215 sc->sc_nqueue_pairs_device = 128;
1216 else
1217 sc->sc_nqueue_pairs_device = 64;
1218
1219 rv = ixl_get_hw_capabilities(sc);
1220 if (rv != 0) {
1221 aprint_error(", GET HW CAPABILITIES %s\n",
1222 rv == ETIMEDOUT ? "timeout" : "error");
1223 goto free_aqbuf;
1224 }
1225
1226 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_device, ncpu);
1227 if (ixl_param_nqps_limit > 0) {
1228 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_max,
1229 ixl_param_nqps_limit);
1230 }
1231
1232 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
1233 sc->sc_tx_ring_ndescs = ixl_param_tx_ndescs;
1234 sc->sc_rx_ring_ndescs = ixl_param_rx_ndescs;
1235
1236 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_rx_ring_ndescs);
1237 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_tx_ring_ndescs);
1238
1239 if (ixl_get_mac(sc) != 0) {
1240 /* error printed by ixl_get_mac */
1241 goto free_aqbuf;
1242 }
1243
1244 aprint_normal("\n");
1245 aprint_naive("\n");
1246
1247 aprint_normal_dev(self, "Ethernet address %s\n",
1248 ether_sprintf(sc->sc_enaddr));
1249
1250 rv = ixl_pxe_clear(sc);
1251 if (rv != 0) {
1252 aprint_debug_dev(self, "CLEAR PXE MODE %s\n",
1253 rv == ETIMEDOUT ? "timeout" : "error");
1254 }
1255
1256 ixl_set_filter_control(sc);
1257
1258 if (ixl_hmc(sc) != 0) {
1259 /* error printed by ixl_hmc */
1260 goto free_aqbuf;
1261 }
1262
1263 if (ixl_lldp_shut(sc) != 0) {
1264 /* error printed by ixl_lldp_shut */
1265 goto free_hmc;
1266 }
1267
1268 if (ixl_phy_mask_ints(sc) != 0) {
1269 /* error printed by ixl_phy_mask_ints */
1270 goto free_hmc;
1271 }
1272
1273 if (ixl_restart_an(sc) != 0) {
1274 /* error printed by ixl_restart_an */
1275 goto free_hmc;
1276 }
1277
1278 if (ixl_get_switch_config(sc) != 0) {
1279 /* error printed by ixl_get_switch_config */
1280 goto free_hmc;
1281 }
1282
1283 rv = ixl_get_link_status_poll(sc, NULL);
1284 if (rv != 0) {
1285 aprint_error_dev(self, "GET LINK STATUS %s\n",
1286 rv == ETIMEDOUT ? "timeout" : "error");
1287 goto free_hmc;
1288 }
1289
1290 /*
1291 * The FW often returns EIO in "Get PHY Abilities" command
1292 * if there is no delay
1293 */
1294 DELAY(500);
1295 if (ixl_get_phy_info(sc) != 0) {
1296 /* error printed by ixl_get_phy_info */
1297 goto free_hmc;
1298 }
1299
1300 if (ixl_dmamem_alloc(sc, &sc->sc_scratch,
1301 sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1302 aprint_error_dev(self, "unable to allocate scratch buffer\n");
1303 goto free_hmc;
1304 }
1305
1306 rv = ixl_get_vsi(sc);
1307 if (rv != 0) {
1308 aprint_error_dev(self, "GET VSI %s %d\n",
1309 rv == ETIMEDOUT ? "timeout" : "error", rv);
1310 goto free_scratch;
1311 }
1312
1313 rv = ixl_set_vsi(sc);
1314 if (rv != 0) {
1315 aprint_error_dev(self, "UPDATE VSI error %s %d\n",
1316 rv == ETIMEDOUT ? "timeout" : "error", rv);
1317 goto free_scratch;
1318 }
1319
1320 if (ixl_queue_pairs_alloc(sc) != 0) {
1321 /* error printed by ixl_queue_pairs_alloc */
1322 goto free_scratch;
1323 }
1324
1325 if (ixl_setup_interrupts(sc) != 0) {
1326 /* error printed by ixl_setup_interrupts */
1327 goto free_queue_pairs;
1328 }
1329
1330 if (ixl_setup_stats(sc) != 0) {
1331 aprint_error_dev(self, "failed to setup event counters\n");
1332 goto teardown_intrs;
1333 }
1334
1335 if (ixl_setup_sysctls(sc) != 0) {
1336 /* error printed by ixl_setup_sysctls */
1337 goto teardown_stats;
1338 }
1339
1340 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self));
1341 sc->sc_workq = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1342 IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1343 if (sc->sc_workq == NULL)
1344 goto teardown_sysctls;
1345
1346 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self));
1347 sc->sc_workq_txrx = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1348 IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1349 if (sc->sc_workq_txrx == NULL)
1350 goto teardown_wqs;
1351
1352 snprintf(xnamebuf, sizeof(xnamebuf), "%s_atq_cv", device_xname(self));
1353 cv_init(&sc->sc_atq_cv, xnamebuf);
1354
1355 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
1356
1357 ifp->if_softc = sc;
1358 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1359 ifp->if_extflags = IFEF_MPSAFE;
1360 ifp->if_ioctl = ixl_ioctl;
1361 ifp->if_start = ixl_start;
1362 ifp->if_transmit = ixl_transmit;
1363 ifp->if_watchdog = ixl_watchdog;
1364 ifp->if_init = ixl_init;
1365 ifp->if_stop = ixl_stop;
1366 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs);
1367 IFQ_SET_READY(&ifp->if_snd);
1368 ifp->if_capabilities |= IXL_IFCAP_RXCSUM;
1369 ifp->if_capabilities |= IXL_IFCAP_TXCSUM;
1370 #if 0
1371 ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
1372 #endif
1373 ether_set_vlan_cb(&sc->sc_ec, ixl_vlan_cb);
1374 sc->sc_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1375 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
1376 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1377
1378 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities;
1379 /* Disable VLAN_HWFILTER by default */
1380 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1381
1382 sc->sc_cur_ec_capenable = sc->sc_ec.ec_capenable;
1383
1384 sc->sc_ec.ec_ifmedia = &sc->sc_media;
1385 ifmedia_init(&sc->sc_media, IFM_IMASK, ixl_media_change,
1386 ixl_media_status);
1387
1388 ixl_media_add(sc);
1389 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1390 if (ISSET(sc->sc_phy_abilities,
1391 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) {
1392 ifmedia_add(&sc->sc_media,
1393 IFM_ETHER | IFM_AUTO | IFM_FLOW, 0, NULL);
1394 }
1395 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_NONE, 0, NULL);
1396 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1397
1398 if_attach(ifp);
1399 if_deferred_start_init(ifp, NULL);
1400 ether_ifattach(ifp, sc->sc_enaddr);
1401 ether_set_ifflags_cb(&sc->sc_ec, ixl_ifflags_cb);
1402
1403 rv = ixl_get_link_status_poll(sc, &link);
1404 if (rv != 0)
1405 link = LINK_STATE_UNKNOWN;
1406 if_link_state_change(ifp, link);
1407
1408 ixl_work_set(&sc->sc_link_state_task, ixl_get_link_status, sc);
1409
1410 ixl_config_other_intr(sc);
1411 ixl_enable_other_intr(sc);
1412
1413 ixl_set_phy_autoselect(sc);
1414
1415 /* remove default mac filter and replace it so we can see vlans */
1416 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 0);
1417 if (rv != ENOENT) {
1418 aprint_debug_dev(self,
1419 "unable to remove macvlan %u\n", rv);
1420 }
1421 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
1422 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1423 if (rv != ENOENT) {
1424 aprint_debug_dev(self,
1425 "unable to remove macvlan, ignore vlan %u\n", rv);
1426 }
1427
1428 if (ixl_update_macvlan(sc) != 0) {
1429 aprint_debug_dev(self,
1430 "couldn't enable vlan hardware filter\n");
1431 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1432 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
1433 }
1434
1435 sc->sc_txrx_workqueue = true;
1436 sc->sc_tx_process_limit = IXL_TX_PROCESS_LIMIT;
1437 sc->sc_rx_process_limit = IXL_RX_PROCESS_LIMIT;
1438 sc->sc_tx_intr_process_limit = IXL_TX_INTR_PROCESS_LIMIT;
1439 sc->sc_rx_intr_process_limit = IXL_RX_INTR_PROCESS_LIMIT;
1440
1441 ixl_stats_update(sc);
1442 sc->sc_stats_counters.isc_has_offset = true;
1443 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
1444
1445 if (pmf_device_register(self, NULL, NULL) != true)
1446 aprint_debug_dev(self, "couldn't establish power handler\n");
1447 sc->sc_attached = true;
1448 return;
1449
1450 teardown_wqs:
1451 config_finalize_register(self, ixl_workqs_teardown);
1452 teardown_sysctls:
1453 ixl_teardown_sysctls(sc);
1454 teardown_stats:
1455 ixl_teardown_stats(sc);
1456 teardown_intrs:
1457 ixl_teardown_interrupts(sc);
1458 free_queue_pairs:
1459 ixl_queue_pairs_free(sc);
1460 free_scratch:
1461 ixl_dmamem_free(sc, &sc->sc_scratch);
1462 free_hmc:
1463 ixl_hmc_free(sc);
1464 free_aqbuf:
1465 ixl_dmamem_free(sc, &sc->sc_aqbuf);
1466 shutdown:
1467 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1468 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1469 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1470 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1471
1472 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1473 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1474 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1475
1476 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1477 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1478 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1479
1480 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1481 0, IXL_DMA_LEN(&sc->sc_arq),
1482 BUS_DMASYNC_POSTREAD);
1483 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1484 0, IXL_DMA_LEN(&sc->sc_atq),
1485 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1486
1487 ixl_arq_unfill(sc);
1488 free_arq:
1489 ixl_dmamem_free(sc, &sc->sc_arq);
1490 free_atq:
1491 ixl_dmamem_free(sc, &sc->sc_atq);
1492 unmap:
1493 mutex_destroy(&sc->sc_atq_lock);
1494 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1495 mutex_destroy(&sc->sc_cfg_lock);
1496 sc->sc_mems = 0;
1497
1498 sc->sc_attached = false;
1499 }
1500
1501 static int
1502 ixl_detach(device_t self, int flags)
1503 {
1504 struct ixl_softc *sc = device_private(self);
1505 struct ifnet *ifp = &sc->sc_ec.ec_if;
1506
1507 if (!sc->sc_attached)
1508 return 0;
1509
1510 ixl_stop(ifp, 1);
1511
1512 ixl_disable_other_intr(sc);
1513
1514 callout_stop(&sc->sc_stats_callout);
1515 ixl_work_wait(sc->sc_workq, &sc->sc_stats_task);
1516
1517 /* wait for ATQ handler */
1518 mutex_enter(&sc->sc_atq_lock);
1519 mutex_exit(&sc->sc_atq_lock);
1520
1521 ixl_work_wait(sc->sc_workq, &sc->sc_arq_task);
1522 ixl_work_wait(sc->sc_workq, &sc->sc_link_state_task);
1523
1524 if (sc->sc_workq != NULL) {
1525 ixl_workq_destroy(sc->sc_workq);
1526 sc->sc_workq = NULL;
1527 }
1528
1529 if (sc->sc_workq_txrx != NULL) {
1530 ixl_workq_destroy(sc->sc_workq_txrx);
1531 sc->sc_workq_txrx = NULL;
1532 }
1533
1534 ether_ifdetach(ifp);
1535 if_detach(ifp);
1536 ifmedia_fini(&sc->sc_media);
1537
1538 ixl_teardown_interrupts(sc);
1539 ixl_teardown_stats(sc);
1540 ixl_teardown_sysctls(sc);
1541
1542 ixl_queue_pairs_free(sc);
1543
1544 ixl_dmamem_free(sc, &sc->sc_scratch);
1545 ixl_hmc_free(sc);
1546
1547 /* shutdown */
1548 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1549 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1550 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1551 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1552
1553 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1554 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1555 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1556
1557 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1558 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1559 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1560
1561 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1562 0, IXL_DMA_LEN(&sc->sc_arq),
1563 BUS_DMASYNC_POSTREAD);
1564 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1565 0, IXL_DMA_LEN(&sc->sc_atq),
1566 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1567
1568 ixl_arq_unfill(sc);
1569
1570 ixl_dmamem_free(sc, &sc->sc_arq);
1571 ixl_dmamem_free(sc, &sc->sc_atq);
1572 ixl_dmamem_free(sc, &sc->sc_aqbuf);
1573
1574 cv_destroy(&sc->sc_atq_cv);
1575 mutex_destroy(&sc->sc_atq_lock);
1576
1577 if (sc->sc_mems != 0) {
1578 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1579 sc->sc_mems = 0;
1580 }
1581
1582 mutex_destroy(&sc->sc_cfg_lock);
1583
1584 return 0;
1585 }
1586
1587 static int
1588 ixl_workqs_teardown(device_t self)
1589 {
1590 struct ixl_softc *sc = device_private(self);
1591
1592 if (sc->sc_workq != NULL) {
1593 ixl_workq_destroy(sc->sc_workq);
1594 sc->sc_workq = NULL;
1595 }
1596
1597 if (sc->sc_workq_txrx != NULL) {
1598 ixl_workq_destroy(sc->sc_workq_txrx);
1599 sc->sc_workq_txrx = NULL;
1600 }
1601
1602 return 0;
1603 }
1604
1605 static int
1606 ixl_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
1607 {
1608 struct ifnet *ifp = &ec->ec_if;
1609 struct ixl_softc *sc = ifp->if_softc;
1610 int rv;
1611
1612 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
1613 return 0;
1614 }
1615
1616 if (set) {
1617 rv = ixl_add_macvlan(sc, sc->sc_enaddr, vid,
1618 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1619 if (rv == 0) {
1620 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
1621 vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1622 }
1623 } else {
1624 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, vid,
1625 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1626 (void)ixl_remove_macvlan(sc, etherbroadcastaddr, vid,
1627 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1628 }
1629
1630 return rv;
1631 }
1632
1633 static void
1634 ixl_media_add(struct ixl_softc *sc)
1635 {
1636 struct ifmedia *ifm = &sc->sc_media;
1637 const struct ixl_phy_type *itype;
1638 unsigned int i;
1639 bool flow;
1640
1641 if (ISSET(sc->sc_phy_abilities,
1642 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) {
1643 flow = true;
1644 } else {
1645 flow = false;
1646 }
1647
1648 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
1649 itype = &ixl_phy_type_map[i];
1650
1651 if (ISSET(sc->sc_phy_types, itype->phy_type)) {
1652 ifmedia_add(ifm,
1653 IFM_ETHER | IFM_FDX | itype->ifm_type, 0, NULL);
1654
1655 if (flow) {
1656 ifmedia_add(ifm,
1657 IFM_ETHER | IFM_FDX | IFM_FLOW |
1658 itype->ifm_type, 0, NULL);
1659 }
1660
1661 if (itype->ifm_type != IFM_100_TX)
1662 continue;
1663
1664 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type,
1665 0, NULL);
1666 if (flow) {
1667 ifmedia_add(ifm,
1668 IFM_ETHER | IFM_FLOW | itype->ifm_type,
1669 0, NULL);
1670 }
1671 }
1672 }
1673 }
1674
1675 static void
1676 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1677 {
1678 struct ixl_softc *sc = ifp->if_softc;
1679
1680 ifmr->ifm_status = sc->sc_media_status;
1681 ifmr->ifm_active = sc->sc_media_active;
1682
1683 mutex_enter(&sc->sc_cfg_lock);
1684 if (ifp->if_link_state == LINK_STATE_UP)
1685 SET(ifmr->ifm_status, IFM_ACTIVE);
1686 mutex_exit(&sc->sc_cfg_lock);
1687 }
1688
1689 static int
1690 ixl_media_change(struct ifnet *ifp)
1691 {
1692 struct ixl_softc *sc = ifp->if_softc;
1693 struct ifmedia *ifm = &sc->sc_media;
1694 uint64_t ifm_active = sc->sc_media_active;
1695 uint8_t link_speed, abilities;
1696
1697 switch (IFM_SUBTYPE(ifm_active)) {
1698 case IFM_1000_SGMII:
1699 case IFM_1000_KX:
1700 case IFM_10G_KX4:
1701 case IFM_10G_KR:
1702 case IFM_40G_KR4:
1703 case IFM_20G_KR2:
1704 case IFM_25G_KR:
1705 /* backplanes */
1706 return EINVAL;
1707 }
1708
1709 abilities = IXL_PHY_ABILITY_AUTONEGO | IXL_PHY_ABILITY_LINKUP;
1710
1711 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1712 case IFM_AUTO:
1713 link_speed = sc->sc_phy_linkspeed;
1714 break;
1715 case IFM_NONE:
1716 link_speed = 0;
1717 CLR(abilities, IXL_PHY_ABILITY_LINKUP);
1718 break;
1719 default:
1720 link_speed = ixl_search_baudrate(
1721 ifmedia_baudrate(ifm->ifm_media));
1722 }
1723
1724 if (ISSET(abilities, IXL_PHY_ABILITY_LINKUP)) {
1725 if (ISSET(link_speed, sc->sc_phy_linkspeed) == 0)
1726 return EINVAL;
1727 }
1728
1729 if (ifm->ifm_media & IFM_FLOW) {
1730 abilities |= sc->sc_phy_abilities &
1731 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX);
1732 }
1733
1734 return ixl_set_phy_config(sc, link_speed, abilities, false);
1735 }
1736
1737 static void
1738 ixl_watchdog(struct ifnet *ifp)
1739 {
1740
1741 }
1742
1743 static void
1744 ixl_del_all_multiaddr(struct ixl_softc *sc)
1745 {
1746 struct ethercom *ec = &sc->sc_ec;
1747 struct ether_multi *enm;
1748 struct ether_multistep step;
1749
1750 ETHER_LOCK(ec);
1751 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1752 ETHER_NEXT_MULTI(step, enm)) {
1753 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1754 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1755 }
1756 ETHER_UNLOCK(ec);
1757 }
1758
1759 static int
1760 ixl_add_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1761 {
1762 struct ifnet *ifp = &sc->sc_ec.ec_if;
1763 int rv;
1764
1765 if (ISSET(ifp->if_flags, IFF_ALLMULTI))
1766 return 0;
1767
1768 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) {
1769 ixl_del_all_multiaddr(sc);
1770 SET(ifp->if_flags, IFF_ALLMULTI);
1771 return ENETRESET;
1772 }
1773
1774 /* multicast address can not use VLAN HWFILTER */
1775 rv = ixl_add_macvlan(sc, addrlo, 0,
1776 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1777
1778 if (rv == ENOSPC) {
1779 ixl_del_all_multiaddr(sc);
1780 SET(ifp->if_flags, IFF_ALLMULTI);
1781 return ENETRESET;
1782 }
1783
1784 return rv;
1785 }
1786
1787 static int
1788 ixl_del_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1789 {
1790 struct ifnet *ifp = &sc->sc_ec.ec_if;
1791 struct ethercom *ec = &sc->sc_ec;
1792 struct ether_multi *enm, *enm_last;
1793 struct ether_multistep step;
1794 int error, rv = 0;
1795
1796 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1797 ixl_remove_macvlan(sc, addrlo, 0,
1798 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1799 return 0;
1800 }
1801
1802 ETHER_LOCK(ec);
1803 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1804 ETHER_NEXT_MULTI(step, enm)) {
1805 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1806 ETHER_ADDR_LEN) != 0) {
1807 goto out;
1808 }
1809 }
1810
1811 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1812 ETHER_NEXT_MULTI(step, enm)) {
1813 error = ixl_add_macvlan(sc, enm->enm_addrlo, 0,
1814 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1815 if (error != 0)
1816 break;
1817 }
1818
1819 if (enm != NULL) {
1820 enm_last = enm;
1821 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1822 ETHER_NEXT_MULTI(step, enm)) {
1823 if (enm == enm_last)
1824 break;
1825
1826 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1827 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1828 }
1829 } else {
1830 CLR(ifp->if_flags, IFF_ALLMULTI);
1831 rv = ENETRESET;
1832 }
1833
1834 out:
1835 ETHER_UNLOCK(ec);
1836 return rv;
1837 }
1838
1839 static int
1840 ixl_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1841 {
1842 struct ifreq *ifr = (struct ifreq *)data;
1843 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
1844 const struct sockaddr *sa;
1845 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1846 int s, error = 0;
1847 unsigned int nmtu;
1848
1849 switch (cmd) {
1850 case SIOCSIFMTU:
1851 nmtu = ifr->ifr_mtu;
1852
1853 if (nmtu < IXL_MIN_MTU || nmtu > IXL_MAX_MTU) {
1854 error = EINVAL;
1855 break;
1856 }
1857 if (ifp->if_mtu != nmtu) {
1858 s = splnet();
1859 error = ether_ioctl(ifp, cmd, data);
1860 splx(s);
1861 if (error == ENETRESET)
1862 error = ixl_init(ifp);
1863 }
1864 break;
1865 case SIOCADDMULTI:
1866 sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1867 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) {
1868 error = ether_multiaddr(sa, addrlo, addrhi);
1869 if (error != 0)
1870 return error;
1871
1872 error = ixl_add_multi(sc, addrlo, addrhi);
1873 if (error != 0 && error != ENETRESET) {
1874 ether_delmulti(sa, &sc->sc_ec);
1875 error = EIO;
1876 }
1877 }
1878 break;
1879
1880 case SIOCDELMULTI:
1881 sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1882 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) {
1883 error = ether_multiaddr(sa, addrlo, addrhi);
1884 if (error != 0)
1885 return error;
1886
1887 error = ixl_del_multi(sc, addrlo, addrhi);
1888 }
1889 break;
1890
1891 default:
1892 s = splnet();
1893 error = ether_ioctl(ifp, cmd, data);
1894 splx(s);
1895 }
1896
1897 if (error == ENETRESET)
1898 error = ixl_iff(sc);
1899
1900 return error;
1901 }
1902
1903 static enum i40e_mac_type
1904 ixl_mactype(pci_product_id_t id)
1905 {
1906
1907 switch (id) {
1908 case PCI_PRODUCT_INTEL_XL710_SFP:
1909 case PCI_PRODUCT_INTEL_XL710_KX_B:
1910 case PCI_PRODUCT_INTEL_XL710_KX_C:
1911 case PCI_PRODUCT_INTEL_XL710_QSFP_A:
1912 case PCI_PRODUCT_INTEL_XL710_QSFP_B:
1913 case PCI_PRODUCT_INTEL_XL710_QSFP_C:
1914 case PCI_PRODUCT_INTEL_X710_10G_T:
1915 case PCI_PRODUCT_INTEL_XL710_20G_BP_1:
1916 case PCI_PRODUCT_INTEL_XL710_20G_BP_2:
1917 case PCI_PRODUCT_INTEL_X710_T4_10G:
1918 case PCI_PRODUCT_INTEL_XXV710_25G_BP:
1919 case PCI_PRODUCT_INTEL_XXV710_25G_SFP28:
1920 return I40E_MAC_XL710;
1921
1922 case PCI_PRODUCT_INTEL_X722_KX:
1923 case PCI_PRODUCT_INTEL_X722_QSFP:
1924 case PCI_PRODUCT_INTEL_X722_SFP:
1925 case PCI_PRODUCT_INTEL_X722_1G_BASET:
1926 case PCI_PRODUCT_INTEL_X722_10G_BASET:
1927 case PCI_PRODUCT_INTEL_X722_I_SFP:
1928 return I40E_MAC_X722;
1929 }
1930
1931 return I40E_MAC_GENERIC;
1932 }
1933
1934 static inline void *
1935 ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i)
1936 {
1937 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
1938 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1939
1940 if (i >= e->hmc_count)
1941 return NULL;
1942
1943 kva += e->hmc_base;
1944 kva += i * e->hmc_size;
1945
1946 return kva;
1947 }
1948
1949 static inline size_t
1950 ixl_hmc_len(struct ixl_softc *sc, enum ixl_hmc_types type)
1951 {
1952 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1953
1954 return e->hmc_size;
1955 }
1956
1957 static void
1958 ixl_enable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1959 {
1960 struct ixl_rx_ring *rxr = qp->qp_rxr;
1961
1962 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1963 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1964 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1965 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1966 ixl_flush(sc);
1967 }
1968
1969 static void
1970 ixl_disable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1971 {
1972 struct ixl_rx_ring *rxr = qp->qp_rxr;
1973
1974 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1975 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1976 ixl_flush(sc);
1977 }
1978
1979 static void
1980 ixl_enable_other_intr(struct ixl_softc *sc)
1981 {
1982
1983 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1984 I40E_PFINT_DYN_CTL0_INTENA_MASK |
1985 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1986 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1987 ixl_flush(sc);
1988 }
1989
1990 static void
1991 ixl_disable_other_intr(struct ixl_softc *sc)
1992 {
1993
1994 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1995 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1996 ixl_flush(sc);
1997 }
1998
1999 static int
2000 ixl_reinit(struct ixl_softc *sc)
2001 {
2002 struct ixl_rx_ring *rxr;
2003 struct ixl_tx_ring *txr;
2004 unsigned int i;
2005 uint32_t reg;
2006
2007 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2008
2009 if (ixl_get_vsi(sc) != 0)
2010 return EIO;
2011
2012 if (ixl_set_vsi(sc) != 0)
2013 return EIO;
2014
2015 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2016 txr = sc->sc_qps[i].qp_txr;
2017 rxr = sc->sc_qps[i].qp_rxr;
2018
2019 txr->txr_cons = txr->txr_prod = 0;
2020 rxr->rxr_cons = rxr->rxr_prod = 0;
2021
2022 ixl_txr_config(sc, txr);
2023 ixl_rxr_config(sc, rxr);
2024 }
2025
2026 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2027 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_PREWRITE);
2028
2029 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2030 txr = sc->sc_qps[i].qp_txr;
2031 rxr = sc->sc_qps[i].qp_rxr;
2032
2033 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
2034 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
2035 ixl_flush(sc);
2036
2037 ixl_wr(sc, txr->txr_tail, txr->txr_prod);
2038 ixl_wr(sc, rxr->rxr_tail, rxr->rxr_prod);
2039
2040 /* ixl_rxfill() needs lock held */
2041 mutex_enter(&rxr->rxr_lock);
2042 ixl_rxfill(sc, rxr);
2043 mutex_exit(&rxr->rxr_lock);
2044
2045 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2046 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2047 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2048 if (ixl_rxr_enabled(sc, rxr) != 0)
2049 goto stop;
2050
2051 ixl_txr_qdis(sc, txr, 1);
2052
2053 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2054 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2055 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2056
2057 if (ixl_txr_enabled(sc, txr) != 0)
2058 goto stop;
2059 }
2060
2061 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2062 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
2063
2064 return 0;
2065
2066 stop:
2067 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2068 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
2069
2070 return ETIMEDOUT;
2071 }
2072
2073 static int
2074 ixl_init_locked(struct ixl_softc *sc)
2075 {
2076 struct ifnet *ifp = &sc->sc_ec.ec_if;
2077 unsigned int i;
2078 int error, eccap_change;
2079
2080 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2081
2082 if (ISSET(ifp->if_flags, IFF_RUNNING))
2083 ixl_stop_locked(sc);
2084
2085 if (sc->sc_dead) {
2086 return ENXIO;
2087 }
2088
2089 eccap_change = sc->sc_ec.ec_capenable ^ sc->sc_cur_ec_capenable;
2090 if (ISSET(eccap_change, ETHERCAP_VLAN_HWTAGGING))
2091 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
2092
2093 if (ISSET(eccap_change, ETHERCAP_VLAN_HWFILTER)) {
2094 if (ixl_update_macvlan(sc) == 0) {
2095 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
2096 } else {
2097 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
2098 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
2099 }
2100 }
2101
2102 if (sc->sc_intrtype != PCI_INTR_TYPE_MSIX)
2103 sc->sc_nqueue_pairs = 1;
2104 else
2105 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
2106
2107 error = ixl_reinit(sc);
2108 if (error) {
2109 ixl_stop_locked(sc);
2110 return error;
2111 }
2112
2113 SET(ifp->if_flags, IFF_RUNNING);
2114 CLR(ifp->if_flags, IFF_OACTIVE);
2115
2116 (void)ixl_get_link_status(sc);
2117
2118 ixl_config_rss(sc);
2119 ixl_config_queue_intr(sc);
2120
2121 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2122 ixl_enable_queue_intr(sc, &sc->sc_qps[i]);
2123 }
2124
2125 error = ixl_iff(sc);
2126 if (error) {
2127 ixl_stop_locked(sc);
2128 return error;
2129 }
2130
2131 return 0;
2132 }
2133
2134 static int
2135 ixl_init(struct ifnet *ifp)
2136 {
2137 struct ixl_softc *sc = ifp->if_softc;
2138 int error;
2139
2140 mutex_enter(&sc->sc_cfg_lock);
2141 error = ixl_init_locked(sc);
2142 mutex_exit(&sc->sc_cfg_lock);
2143
2144 return error;
2145 }
2146
2147 static int
2148 ixl_iff(struct ixl_softc *sc)
2149 {
2150 struct ifnet *ifp = &sc->sc_ec.ec_if;
2151 struct ixl_atq iatq;
2152 struct ixl_aq_desc *iaq;
2153 struct ixl_aq_vsi_promisc_param *param;
2154 uint16_t flag_add, flag_del;
2155 int error;
2156
2157 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2158 return 0;
2159
2160 memset(&iatq, 0, sizeof(iatq));
2161
2162 iaq = &iatq.iatq_desc;
2163 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
2164
2165 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
2166 param->flags = htole16(0);
2167
2168 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)
2169 || ISSET(ifp->if_flags, IFF_PROMISC)) {
2170 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2171 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2172 }
2173
2174 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
2175 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2176 IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2177 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
2178 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2179 }
2180 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2181 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2182 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2183 param->seid = sc->sc_seid;
2184
2185 error = ixl_atq_exec(sc, &iatq);
2186 if (error)
2187 return error;
2188
2189 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
2190 return EIO;
2191
2192 if (memcmp(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN) != 0) {
2193 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
2194 flag_add = IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH;
2195 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH;
2196 } else {
2197 flag_add = IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN;
2198 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN;
2199 }
2200
2201 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, flag_del);
2202
2203 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
2204 ixl_add_macvlan(sc, sc->sc_enaddr, 0, flag_add);
2205 }
2206 return 0;
2207 }
2208
2209 static void
2210 ixl_stop_rendezvous(struct ixl_softc *sc)
2211 {
2212 struct ixl_tx_ring *txr;
2213 struct ixl_rx_ring *rxr;
2214 unsigned int i;
2215
2216 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2217 txr = sc->sc_qps[i].qp_txr;
2218 rxr = sc->sc_qps[i].qp_rxr;
2219
2220 mutex_enter(&txr->txr_lock);
2221 mutex_exit(&txr->txr_lock);
2222
2223 mutex_enter(&rxr->rxr_lock);
2224 mutex_exit(&rxr->rxr_lock);
2225
2226 ixl_work_wait(sc->sc_workq_txrx,
2227 &sc->sc_qps[i].qp_task);
2228 }
2229 }
2230
2231 static void
2232 ixl_stop_locked(struct ixl_softc *sc)
2233 {
2234 struct ifnet *ifp = &sc->sc_ec.ec_if;
2235 struct ixl_rx_ring *rxr;
2236 struct ixl_tx_ring *txr;
2237 unsigned int i;
2238 uint32_t reg;
2239
2240 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2241
2242 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
2243
2244 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2245 txr = sc->sc_qps[i].qp_txr;
2246 rxr = sc->sc_qps[i].qp_rxr;
2247
2248 ixl_disable_queue_intr(sc, &sc->sc_qps[i]);
2249
2250 mutex_enter(&txr->txr_lock);
2251 ixl_txr_qdis(sc, txr, 0);
2252 /* XXX wait at least 400 usec for all tx queues in one go */
2253 ixl_flush(sc);
2254 DELAY(500);
2255
2256 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2257 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2258 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2259 /* XXX wait 50ms from completaion of the TX queue disable*/
2260 ixl_flush(sc);
2261 DELAY(50);
2262
2263 if (ixl_txr_disabled(sc, txr) != 0) {
2264 mutex_exit(&txr->txr_lock);
2265 goto die;
2266 }
2267 mutex_exit(&txr->txr_lock);
2268
2269 mutex_enter(&rxr->rxr_lock);
2270 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2271 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2272 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2273 /* XXX wait 50ms from completion of the RX queue disable */
2274 ixl_flush(sc);
2275 DELAY(50);
2276
2277 if (ixl_rxr_disabled(sc, rxr) != 0) {
2278 mutex_exit(&rxr->rxr_lock);
2279 goto die;
2280 }
2281 mutex_exit(&rxr->rxr_lock);
2282 }
2283
2284 ixl_stop_rendezvous(sc);
2285
2286 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2287 txr = sc->sc_qps[i].qp_txr;
2288 rxr = sc->sc_qps[i].qp_rxr;
2289
2290 ixl_txr_unconfig(sc, txr);
2291 ixl_rxr_unconfig(sc, rxr);
2292
2293 ixl_txr_clean(sc, txr);
2294 ixl_rxr_clean(sc, rxr);
2295 }
2296
2297 return;
2298 die:
2299 sc->sc_dead = true;
2300 log(LOG_CRIT, "%s: failed to shut down rings",
2301 device_xname(sc->sc_dev));
2302 return;
2303 }
2304
2305 static void
2306 ixl_stop(struct ifnet *ifp, int disable)
2307 {
2308 struct ixl_softc *sc = ifp->if_softc;
2309
2310 mutex_enter(&sc->sc_cfg_lock);
2311 ixl_stop_locked(sc);
2312 mutex_exit(&sc->sc_cfg_lock);
2313 }
2314
2315 static int
2316 ixl_queue_pairs_alloc(struct ixl_softc *sc)
2317 {
2318 struct ixl_queue_pair *qp;
2319 unsigned int i;
2320 size_t sz;
2321
2322 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2323 sc->sc_qps = kmem_zalloc(sz, KM_SLEEP);
2324
2325 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2326 qp = &sc->sc_qps[i];
2327
2328 qp->qp_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2329 ixl_handle_queue, qp);
2330 if (qp->qp_si == NULL)
2331 goto free;
2332
2333 qp->qp_txr = ixl_txr_alloc(sc, i);
2334 if (qp->qp_txr == NULL)
2335 goto free;
2336
2337 qp->qp_rxr = ixl_rxr_alloc(sc, i);
2338 if (qp->qp_rxr == NULL)
2339 goto free;
2340
2341 qp->qp_sc = sc;
2342 ixl_work_set(&qp->qp_task, ixl_handle_queue, qp);
2343 snprintf(qp->qp_name, sizeof(qp->qp_name),
2344 "%s-TXRX%d", device_xname(sc->sc_dev), i);
2345 }
2346
2347 return 0;
2348 free:
2349 if (sc->sc_qps != NULL) {
2350 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2351 qp = &sc->sc_qps[i];
2352
2353 if (qp->qp_txr != NULL)
2354 ixl_txr_free(sc, qp->qp_txr);
2355 if (qp->qp_rxr != NULL)
2356 ixl_rxr_free(sc, qp->qp_rxr);
2357 if (qp->qp_si != NULL)
2358 softint_disestablish(qp->qp_si);
2359 }
2360
2361 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2362 kmem_free(sc->sc_qps, sz);
2363 sc->sc_qps = NULL;
2364 }
2365
2366 return -1;
2367 }
2368
2369 static void
2370 ixl_queue_pairs_free(struct ixl_softc *sc)
2371 {
2372 struct ixl_queue_pair *qp;
2373 unsigned int i;
2374 size_t sz;
2375
2376 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2377 qp = &sc->sc_qps[i];
2378 ixl_txr_free(sc, qp->qp_txr);
2379 ixl_rxr_free(sc, qp->qp_rxr);
2380 softint_disestablish(qp->qp_si);
2381 }
2382
2383 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2384 kmem_free(sc->sc_qps, sz);
2385 sc->sc_qps = NULL;
2386 }
2387
2388 static struct ixl_tx_ring *
2389 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
2390 {
2391 struct ixl_tx_ring *txr = NULL;
2392 struct ixl_tx_map *maps = NULL, *txm;
2393 unsigned int i;
2394
2395 txr = kmem_zalloc(sizeof(*txr), KM_SLEEP);
2396 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs,
2397 KM_SLEEP);
2398
2399 if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2400 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2401 IXL_TX_QUEUE_ALIGN) != 0)
2402 goto free;
2403
2404 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2405 txm = &maps[i];
2406
2407 if (bus_dmamap_create(sc->sc_dmat, IXL_TX_PKT_MAXSIZE,
2408 IXL_TX_PKT_DESCS, IXL_TX_PKT_MAXSIZE, 0,
2409 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &txm->txm_map) != 0)
2410 goto uncreate;
2411
2412 txm->txm_eop = -1;
2413 txm->txm_m = NULL;
2414 }
2415
2416 txr->txr_cons = txr->txr_prod = 0;
2417 txr->txr_maps = maps;
2418
2419 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2420 if (txr->txr_intrq == NULL)
2421 goto uncreate;
2422
2423 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2424 ixl_deferred_transmit, txr);
2425 if (txr->txr_si == NULL)
2426 goto destroy_pcq;
2427
2428 txr->txr_tail = I40E_QTX_TAIL(qid);
2429 txr->txr_qid = qid;
2430 txr->txr_sc = sc;
2431 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2432
2433 return txr;
2434
2435 destroy_pcq:
2436 pcq_destroy(txr->txr_intrq);
2437 uncreate:
2438 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2439 txm = &maps[i];
2440
2441 if (txm->txm_map == NULL)
2442 continue;
2443
2444 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2445 }
2446
2447 ixl_dmamem_free(sc, &txr->txr_mem);
2448 free:
2449 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2450 kmem_free(txr, sizeof(*txr));
2451
2452 return NULL;
2453 }
2454
2455 static void
2456 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2457 {
2458 unsigned int qid;
2459 bus_size_t reg;
2460 uint32_t r;
2461
2462 qid = txr->txr_qid + sc->sc_base_queue;
2463 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2464 qid %= 128;
2465
2466 r = ixl_rd(sc, reg);
2467 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2468 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2469 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2470 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2471 ixl_wr(sc, reg, r);
2472 }
2473
2474 static void
2475 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2476 {
2477 struct ixl_hmc_txq txq;
2478 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch);
2479 void *hmc;
2480
2481 memset(&txq, 0, sizeof(txq));
2482 txq.head = htole16(txr->txr_cons);
2483 txq.new_context = 1;
2484 txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2485 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2486 txq.qlen = htole16(sc->sc_tx_ring_ndescs);
2487 txq.tphrdesc_ena = 0;
2488 txq.tphrpacket_ena = 0;
2489 txq.tphwdesc_ena = 0;
2490 txq.rdylist = data->qs_handle[0];
2491
2492 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2493 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2494 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq,
2495 __arraycount(ixl_hmc_pack_txq));
2496 }
2497
2498 static void
2499 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2500 {
2501 void *hmc;
2502
2503 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2504 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2505 }
2506
2507 static void
2508 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2509 {
2510 struct ixl_tx_map *maps, *txm;
2511 bus_dmamap_t map;
2512 unsigned int i;
2513
2514 maps = txr->txr_maps;
2515 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2516 txm = &maps[i];
2517
2518 if (txm->txm_m == NULL)
2519 continue;
2520
2521 map = txm->txm_map;
2522 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2523 BUS_DMASYNC_POSTWRITE);
2524 bus_dmamap_unload(sc->sc_dmat, map);
2525
2526 m_freem(txm->txm_m);
2527 txm->txm_m = NULL;
2528 }
2529 }
2530
2531 static int
2532 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2533 {
2534 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2535 uint32_t reg;
2536 int i;
2537
2538 for (i = 0; i < 10; i++) {
2539 reg = ixl_rd(sc, ena);
2540 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2541 return 0;
2542
2543 delaymsec(10);
2544 }
2545
2546 return ETIMEDOUT;
2547 }
2548
2549 static int
2550 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2551 {
2552 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2553 uint32_t reg;
2554 int i;
2555
2556 KASSERT(mutex_owned(&txr->txr_lock));
2557
2558 for (i = 0; i < 20; i++) {
2559 reg = ixl_rd(sc, ena);
2560 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2561 return 0;
2562
2563 delaymsec(10);
2564 }
2565
2566 return ETIMEDOUT;
2567 }
2568
2569 static void
2570 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2571 {
2572 struct ixl_tx_map *maps, *txm;
2573 struct mbuf *m;
2574 unsigned int i;
2575
2576 softint_disestablish(txr->txr_si);
2577 while ((m = pcq_get(txr->txr_intrq)) != NULL)
2578 m_freem(m);
2579 pcq_destroy(txr->txr_intrq);
2580
2581 maps = txr->txr_maps;
2582 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2583 txm = &maps[i];
2584
2585 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2586 }
2587
2588 ixl_dmamem_free(sc, &txr->txr_mem);
2589 mutex_destroy(&txr->txr_lock);
2590 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2591 kmem_free(txr, sizeof(*txr));
2592 }
2593
2594 static inline int
2595 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0,
2596 struct ixl_tx_ring *txr)
2597 {
2598 struct mbuf *m;
2599 int error;
2600
2601 KASSERT(mutex_owned(&txr->txr_lock));
2602
2603 m = *m0;
2604
2605 error = bus_dmamap_load_mbuf(dmat, map, m,
2606 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2607 if (error != EFBIG)
2608 return error;
2609
2610 m = m_defrag(m, M_DONTWAIT);
2611 if (m != NULL) {
2612 *m0 = m;
2613 txr->txr_defragged.ev_count++;
2614
2615 error = bus_dmamap_load_mbuf(dmat, map, m,
2616 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2617 } else {
2618 txr->txr_defrag_failed.ev_count++;
2619 error = ENOBUFS;
2620 }
2621
2622 return error;
2623 }
2624
2625 static inline int
2626 ixl_tx_setup_offloads(struct mbuf *m, uint64_t *cmd_txd)
2627 {
2628 struct ether_header *eh;
2629 size_t len;
2630 uint64_t cmd;
2631
2632 cmd = 0;
2633
2634 eh = mtod(m, struct ether_header *);
2635 switch (htons(eh->ether_type)) {
2636 case ETHERTYPE_IP:
2637 case ETHERTYPE_IPV6:
2638 len = ETHER_HDR_LEN;
2639 break;
2640 case ETHERTYPE_VLAN:
2641 len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2642 break;
2643 default:
2644 len = 0;
2645 }
2646 cmd |= ((len >> 1) << IXL_TX_DESC_MACLEN_SHIFT);
2647
2648 if (m->m_pkthdr.csum_flags &
2649 (M_CSUM_TSOv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
2650 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4;
2651 }
2652 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2653 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4_CSUM;
2654 }
2655
2656 if (m->m_pkthdr.csum_flags &
2657 (M_CSUM_TSOv6 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
2658 cmd |= IXL_TX_DESC_CMD_IIPT_IPV6;
2659 }
2660
2661 switch (cmd & IXL_TX_DESC_CMD_IIPT_MASK) {
2662 case IXL_TX_DESC_CMD_IIPT_IPV4:
2663 case IXL_TX_DESC_CMD_IIPT_IPV4_CSUM:
2664 len = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2665 break;
2666 case IXL_TX_DESC_CMD_IIPT_IPV6:
2667 len = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
2668 break;
2669 default:
2670 len = 0;
2671 }
2672 cmd |= ((len >> 2) << IXL_TX_DESC_IPLEN_SHIFT);
2673
2674 if (m->m_pkthdr.csum_flags &
2675 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_TCPv4 | M_CSUM_TCPv6)) {
2676 len = sizeof(struct tcphdr);
2677 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_TCP;
2678 } else if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) {
2679 len = sizeof(struct udphdr);
2680 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_UDP;
2681 } else {
2682 len = 0;
2683 }
2684 cmd |= ((len >> 2) << IXL_TX_DESC_L4LEN_SHIFT);
2685
2686 *cmd_txd |= cmd;
2687 return 0;
2688 }
2689
2690 static void
2691 ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr,
2692 bool is_transmit)
2693 {
2694 struct ixl_softc *sc = ifp->if_softc;
2695 struct ixl_tx_desc *ring, *txd;
2696 struct ixl_tx_map *txm;
2697 bus_dmamap_t map;
2698 struct mbuf *m;
2699 uint64_t cmd, cmd_txd;
2700 unsigned int prod, free, last, i;
2701 unsigned int mask;
2702 int post = 0;
2703
2704 KASSERT(mutex_owned(&txr->txr_lock));
2705
2706 if (ifp->if_link_state != LINK_STATE_UP
2707 || !ISSET(ifp->if_flags, IFF_RUNNING)
2708 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) {
2709 if (!is_transmit)
2710 IFQ_PURGE(&ifp->if_snd);
2711 return;
2712 }
2713
2714 prod = txr->txr_prod;
2715 free = txr->txr_cons;
2716 if (free <= prod)
2717 free += sc->sc_tx_ring_ndescs;
2718 free -= prod;
2719
2720 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2721 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2722
2723 ring = IXL_DMA_KVA(&txr->txr_mem);
2724 mask = sc->sc_tx_ring_ndescs - 1;
2725 last = prod;
2726 cmd = 0;
2727 txd = NULL;
2728
2729 for (;;) {
2730 if (free <= IXL_TX_PKT_DESCS) {
2731 if (!is_transmit)
2732 SET(ifp->if_flags, IFF_OACTIVE);
2733 break;
2734 }
2735
2736 if (is_transmit)
2737 m = pcq_get(txr->txr_intrq);
2738 else
2739 IFQ_DEQUEUE(&ifp->if_snd, m);
2740
2741 if (m == NULL)
2742 break;
2743
2744 txm = &txr->txr_maps[prod];
2745 map = txm->txm_map;
2746
2747 if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
2748 if_statinc(ifp, if_oerrors);
2749 m_freem(m);
2750 continue;
2751 }
2752
2753 cmd_txd = 0;
2754 if (m->m_pkthdr.csum_flags & IXL_CSUM_ALL_OFFLOAD) {
2755 ixl_tx_setup_offloads(m, &cmd_txd);
2756 }
2757
2758 if (vlan_has_tag(m)) {
2759 cmd_txd |= (uint64_t)vlan_get_tag(m) <<
2760 IXL_TX_DESC_L2TAG1_SHIFT;
2761 cmd_txd |= IXL_TX_DESC_CMD_IL2TAG1;
2762 }
2763
2764 bus_dmamap_sync(sc->sc_dmat, map, 0,
2765 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2766
2767 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) {
2768 txd = &ring[prod];
2769
2770 cmd = (uint64_t)map->dm_segs[i].ds_len <<
2771 IXL_TX_DESC_BSIZE_SHIFT;
2772 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2773 cmd |= cmd_txd;
2774
2775 txd->addr = htole64(map->dm_segs[i].ds_addr);
2776 txd->cmd = htole64(cmd);
2777
2778 last = prod;
2779
2780 prod++;
2781 prod &= mask;
2782 }
2783 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2784 txd->cmd = htole64(cmd);
2785
2786 txm->txm_m = m;
2787 txm->txm_eop = last;
2788
2789 bpf_mtap(ifp, m, BPF_D_OUT);
2790
2791 free -= i;
2792 post = 1;
2793 }
2794
2795 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2796 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2797
2798 if (post) {
2799 txr->txr_prod = prod;
2800 ixl_wr(sc, txr->txr_tail, prod);
2801 }
2802 }
2803
2804 static int
2805 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit)
2806 {
2807 struct ifnet *ifp = &sc->sc_ec.ec_if;
2808 struct ixl_tx_desc *ring, *txd;
2809 struct ixl_tx_map *txm;
2810 struct mbuf *m;
2811 bus_dmamap_t map;
2812 unsigned int cons, prod, last;
2813 unsigned int mask;
2814 uint64_t dtype;
2815 int done = 0, more = 0;
2816
2817 KASSERT(mutex_owned(&txr->txr_lock));
2818
2819 prod = txr->txr_prod;
2820 cons = txr->txr_cons;
2821
2822 if (cons == prod)
2823 return 0;
2824
2825 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2826 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2827
2828 ring = IXL_DMA_KVA(&txr->txr_mem);
2829 mask = sc->sc_tx_ring_ndescs - 1;
2830
2831 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2832
2833 do {
2834 if (txlimit-- <= 0) {
2835 more = 1;
2836 break;
2837 }
2838
2839 txm = &txr->txr_maps[cons];
2840 last = txm->txm_eop;
2841 txd = &ring[last];
2842
2843 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2844 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2845 break;
2846
2847 map = txm->txm_map;
2848
2849 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2850 BUS_DMASYNC_POSTWRITE);
2851 bus_dmamap_unload(sc->sc_dmat, map);
2852
2853 m = txm->txm_m;
2854 if (m != NULL) {
2855 if_statinc_ref(nsr, if_opackets);
2856 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
2857 if (ISSET(m->m_flags, M_MCAST))
2858 if_statinc_ref(nsr, if_omcasts);
2859 m_freem(m);
2860 }
2861
2862 txm->txm_m = NULL;
2863 txm->txm_eop = -1;
2864
2865 cons = last + 1;
2866 cons &= mask;
2867 done = 1;
2868 } while (cons != prod);
2869
2870 IF_STAT_PUTREF(ifp);
2871
2872 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2873 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2874
2875 txr->txr_cons = cons;
2876
2877 if (done) {
2878 softint_schedule(txr->txr_si);
2879 if (txr->txr_qid == 0) {
2880 CLR(ifp->if_flags, IFF_OACTIVE);
2881 if_schedule_deferred_start(ifp);
2882 }
2883 }
2884
2885 return more;
2886 }
2887
2888 static void
2889 ixl_start(struct ifnet *ifp)
2890 {
2891 struct ixl_softc *sc;
2892 struct ixl_tx_ring *txr;
2893
2894 sc = ifp->if_softc;
2895 txr = sc->sc_qps[0].qp_txr;
2896
2897 mutex_enter(&txr->txr_lock);
2898 ixl_tx_common_locked(ifp, txr, false);
2899 mutex_exit(&txr->txr_lock);
2900 }
2901
2902 static inline unsigned int
2903 ixl_select_txqueue(struct ixl_softc *sc, struct mbuf *m)
2904 {
2905 u_int cpuid;
2906
2907 cpuid = cpu_index(curcpu());
2908
2909 return (unsigned int)(cpuid % sc->sc_nqueue_pairs);
2910 }
2911
2912 static int
2913 ixl_transmit(struct ifnet *ifp, struct mbuf *m)
2914 {
2915 struct ixl_softc *sc;
2916 struct ixl_tx_ring *txr;
2917 unsigned int qid;
2918
2919 sc = ifp->if_softc;
2920 qid = ixl_select_txqueue(sc, m);
2921
2922 txr = sc->sc_qps[qid].qp_txr;
2923
2924 if (__predict_false(!pcq_put(txr->txr_intrq, m))) {
2925 mutex_enter(&txr->txr_lock);
2926 txr->txr_pcqdrop.ev_count++;
2927 mutex_exit(&txr->txr_lock);
2928
2929 m_freem(m);
2930 return ENOBUFS;
2931 }
2932
2933 if (mutex_tryenter(&txr->txr_lock)) {
2934 ixl_tx_common_locked(ifp, txr, true);
2935 mutex_exit(&txr->txr_lock);
2936 } else {
2937 kpreempt_disable();
2938 softint_schedule(txr->txr_si);
2939 kpreempt_enable();
2940 }
2941
2942 return 0;
2943 }
2944
2945 static void
2946 ixl_deferred_transmit(void *xtxr)
2947 {
2948 struct ixl_tx_ring *txr = xtxr;
2949 struct ixl_softc *sc = txr->txr_sc;
2950 struct ifnet *ifp = &sc->sc_ec.ec_if;
2951
2952 mutex_enter(&txr->txr_lock);
2953 txr->txr_transmitdef.ev_count++;
2954 if (pcq_peek(txr->txr_intrq) != NULL)
2955 ixl_tx_common_locked(ifp, txr, true);
2956 mutex_exit(&txr->txr_lock);
2957 }
2958
2959 static struct ixl_rx_ring *
2960 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
2961 {
2962 struct ixl_rx_ring *rxr = NULL;
2963 struct ixl_rx_map *maps = NULL, *rxm;
2964 unsigned int i;
2965
2966 rxr = kmem_zalloc(sizeof(*rxr), KM_SLEEP);
2967 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs,
2968 KM_SLEEP);
2969
2970 if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
2971 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
2972 IXL_RX_QUEUE_ALIGN) != 0)
2973 goto free;
2974
2975 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2976 rxm = &maps[i];
2977
2978 if (bus_dmamap_create(sc->sc_dmat,
2979 IXL_MCLBYTES, 1, IXL_MCLBYTES, 0,
2980 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &rxm->rxm_map) != 0)
2981 goto uncreate;
2982
2983 rxm->rxm_m = NULL;
2984 }
2985
2986 rxr->rxr_cons = rxr->rxr_prod = 0;
2987 rxr->rxr_m_head = NULL;
2988 rxr->rxr_m_tail = &rxr->rxr_m_head;
2989 rxr->rxr_maps = maps;
2990
2991 rxr->rxr_tail = I40E_QRX_TAIL(qid);
2992 rxr->rxr_qid = qid;
2993 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
2994
2995 return rxr;
2996
2997 uncreate:
2998 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2999 rxm = &maps[i];
3000
3001 if (rxm->rxm_map == NULL)
3002 continue;
3003
3004 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3005 }
3006
3007 ixl_dmamem_free(sc, &rxr->rxr_mem);
3008 free:
3009 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
3010 kmem_free(rxr, sizeof(*rxr));
3011
3012 return NULL;
3013 }
3014
3015 static void
3016 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3017 {
3018 struct ixl_rx_map *maps, *rxm;
3019 bus_dmamap_t map;
3020 unsigned int i;
3021
3022 maps = rxr->rxr_maps;
3023 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3024 rxm = &maps[i];
3025
3026 if (rxm->rxm_m == NULL)
3027 continue;
3028
3029 map = rxm->rxm_map;
3030 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3031 BUS_DMASYNC_POSTWRITE);
3032 bus_dmamap_unload(sc->sc_dmat, map);
3033
3034 m_freem(rxm->rxm_m);
3035 rxm->rxm_m = NULL;
3036 }
3037
3038 m_freem(rxr->rxr_m_head);
3039 rxr->rxr_m_head = NULL;
3040 rxr->rxr_m_tail = &rxr->rxr_m_head;
3041
3042 rxr->rxr_prod = rxr->rxr_cons = 0;
3043 }
3044
3045 static int
3046 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3047 {
3048 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3049 uint32_t reg;
3050 int i;
3051
3052 for (i = 0; i < 10; i++) {
3053 reg = ixl_rd(sc, ena);
3054 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
3055 return 0;
3056
3057 delaymsec(10);
3058 }
3059
3060 return ETIMEDOUT;
3061 }
3062
3063 static int
3064 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3065 {
3066 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3067 uint32_t reg;
3068 int i;
3069
3070 KASSERT(mutex_owned(&rxr->rxr_lock));
3071
3072 for (i = 0; i < 20; i++) {
3073 reg = ixl_rd(sc, ena);
3074 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3075 return 0;
3076
3077 delaymsec(10);
3078 }
3079
3080 return ETIMEDOUT;
3081 }
3082
3083 static void
3084 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3085 {
3086 struct ixl_hmc_rxq rxq;
3087 struct ifnet *ifp = &sc->sc_ec.ec_if;
3088 uint16_t rxmax;
3089 void *hmc;
3090
3091 memset(&rxq, 0, sizeof(rxq));
3092 rxmax = ifp->if_mtu + IXL_MTU_ETHERLEN;
3093
3094 rxq.head = htole16(rxr->rxr_cons);
3095 rxq.base = htole64(IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
3096 rxq.qlen = htole16(sc->sc_rx_ring_ndescs);
3097 rxq.dbuff = htole16(IXL_MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
3098 rxq.hbuff = 0;
3099 rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
3100 rxq.dsize = IXL_HMC_RXQ_DSIZE_32;
3101 rxq.crcstrip = 1;
3102 rxq.l2sel = 1;
3103 rxq.showiv = 1;
3104 rxq.rxmax = htole16(rxmax);
3105 rxq.tphrdesc_ena = 0;
3106 rxq.tphwdesc_ena = 0;
3107 rxq.tphdata_ena = 0;
3108 rxq.tphhead_ena = 0;
3109 rxq.lrxqthresh = 0;
3110 rxq.prefena = 1;
3111
3112 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3113 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3114 ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq,
3115 __arraycount(ixl_hmc_pack_rxq));
3116 }
3117
3118 static void
3119 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3120 {
3121 void *hmc;
3122
3123 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3124 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3125 }
3126
3127 static void
3128 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3129 {
3130 struct ixl_rx_map *maps, *rxm;
3131 unsigned int i;
3132
3133 maps = rxr->rxr_maps;
3134 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3135 rxm = &maps[i];
3136
3137 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3138 }
3139
3140 ixl_dmamem_free(sc, &rxr->rxr_mem);
3141 mutex_destroy(&rxr->rxr_lock);
3142 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
3143 kmem_free(rxr, sizeof(*rxr));
3144 }
3145
3146 static inline void
3147 ixl_rx_csum(struct mbuf *m, uint64_t qword)
3148 {
3149 int flags_mask;
3150
3151 if (!ISSET(qword, IXL_RX_DESC_L3L4P)) {
3152 /* No L3 or L4 checksum was calculated */
3153 return;
3154 }
3155
3156 switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) {
3157 case IXL_RX_DESC_PTYPE_IPV4FRAG:
3158 case IXL_RX_DESC_PTYPE_IPV4:
3159 case IXL_RX_DESC_PTYPE_SCTPV4:
3160 case IXL_RX_DESC_PTYPE_ICMPV4:
3161 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3162 break;
3163 case IXL_RX_DESC_PTYPE_TCPV4:
3164 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3165 flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD;
3166 break;
3167 case IXL_RX_DESC_PTYPE_UDPV4:
3168 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3169 flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD;
3170 break;
3171 case IXL_RX_DESC_PTYPE_TCPV6:
3172 flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD;
3173 break;
3174 case IXL_RX_DESC_PTYPE_UDPV6:
3175 flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD;
3176 break;
3177 default:
3178 flags_mask = 0;
3179 }
3180
3181 m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 |
3182 M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6));
3183
3184 if (ISSET(qword, IXL_RX_DESC_IPE)) {
3185 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD);
3186 }
3187
3188 if (ISSET(qword, IXL_RX_DESC_L4E)) {
3189 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD);
3190 }
3191 }
3192
3193 static int
3194 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr, u_int rxlimit)
3195 {
3196 struct ifnet *ifp = &sc->sc_ec.ec_if;
3197 struct ixl_rx_wb_desc_32 *ring, *rxd;
3198 struct ixl_rx_map *rxm;
3199 bus_dmamap_t map;
3200 unsigned int cons, prod;
3201 struct mbuf *m;
3202 uint64_t word, word0;
3203 unsigned int len;
3204 unsigned int mask;
3205 int done = 0, more = 0;
3206
3207 KASSERT(mutex_owned(&rxr->rxr_lock));
3208
3209 if (!ISSET(ifp->if_flags, IFF_RUNNING))
3210 return 0;
3211
3212 prod = rxr->rxr_prod;
3213 cons = rxr->rxr_cons;
3214
3215 if (cons == prod)
3216 return 0;
3217
3218 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3219 0, IXL_DMA_LEN(&rxr->rxr_mem),
3220 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3221
3222 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3223 mask = sc->sc_rx_ring_ndescs - 1;
3224
3225 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
3226
3227 do {
3228 if (rxlimit-- <= 0) {
3229 more = 1;
3230 break;
3231 }
3232
3233 rxd = &ring[cons];
3234
3235 word = le64toh(rxd->qword1);
3236
3237 if (!ISSET(word, IXL_RX_DESC_DD))
3238 break;
3239
3240 rxm = &rxr->rxr_maps[cons];
3241
3242 map = rxm->rxm_map;
3243 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3244 BUS_DMASYNC_POSTREAD);
3245 bus_dmamap_unload(sc->sc_dmat, map);
3246
3247 m = rxm->rxm_m;
3248 rxm->rxm_m = NULL;
3249
3250 KASSERT(m != NULL);
3251
3252 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
3253 m->m_len = len;
3254 m->m_pkthdr.len = 0;
3255
3256 m->m_next = NULL;
3257 *rxr->rxr_m_tail = m;
3258 rxr->rxr_m_tail = &m->m_next;
3259
3260 m = rxr->rxr_m_head;
3261 m->m_pkthdr.len += len;
3262
3263 if (ISSET(word, IXL_RX_DESC_EOP)) {
3264 word0 = le64toh(rxd->qword0);
3265
3266 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
3267 vlan_set_tag(m,
3268 __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK));
3269 }
3270
3271 if ((ifp->if_capenable & IXL_IFCAP_RXCSUM) != 0)
3272 ixl_rx_csum(m, word);
3273
3274 if (!ISSET(word,
3275 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
3276 m_set_rcvif(m, ifp);
3277 if_statinc_ref(nsr, if_ipackets);
3278 if_statadd_ref(nsr, if_ibytes,
3279 m->m_pkthdr.len);
3280 if_percpuq_enqueue(ifp->if_percpuq, m);
3281 } else {
3282 if_statinc_ref(nsr, if_ierrors);
3283 m_freem(m);
3284 }
3285
3286 rxr->rxr_m_head = NULL;
3287 rxr->rxr_m_tail = &rxr->rxr_m_head;
3288 }
3289
3290 cons++;
3291 cons &= mask;
3292
3293 done = 1;
3294 } while (cons != prod);
3295
3296 if (done) {
3297 rxr->rxr_cons = cons;
3298 if (ixl_rxfill(sc, rxr) == -1)
3299 if_statinc_ref(nsr, if_iqdrops);
3300 }
3301
3302 IF_STAT_PUTREF(ifp);
3303
3304 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3305 0, IXL_DMA_LEN(&rxr->rxr_mem),
3306 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3307
3308 return more;
3309 }
3310
3311 static int
3312 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3313 {
3314 struct ixl_rx_rd_desc_32 *ring, *rxd;
3315 struct ixl_rx_map *rxm;
3316 bus_dmamap_t map;
3317 struct mbuf *m;
3318 unsigned int prod;
3319 unsigned int slots;
3320 unsigned int mask;
3321 int post = 0, error = 0;
3322
3323 KASSERT(mutex_owned(&rxr->rxr_lock));
3324
3325 prod = rxr->rxr_prod;
3326 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons,
3327 sc->sc_rx_ring_ndescs);
3328
3329 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3330 mask = sc->sc_rx_ring_ndescs - 1;
3331
3332 if (__predict_false(slots <= 0))
3333 return -1;
3334
3335 do {
3336 rxm = &rxr->rxr_maps[prod];
3337
3338 MGETHDR(m, M_DONTWAIT, MT_DATA);
3339 if (m == NULL) {
3340 rxr->rxr_mgethdr_failed.ev_count++;
3341 error = -1;
3342 break;
3343 }
3344
3345 MCLGET(m, M_DONTWAIT);
3346 if (!ISSET(m->m_flags, M_EXT)) {
3347 rxr->rxr_mgetcl_failed.ev_count++;
3348 error = -1;
3349 m_freem(m);
3350 break;
3351 }
3352
3353 m->m_len = m->m_pkthdr.len = MCLBYTES;
3354 m_adj(m, ETHER_ALIGN);
3355
3356 map = rxm->rxm_map;
3357
3358 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
3359 BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
3360 rxr->rxr_mbuf_load_failed.ev_count++;
3361 error = -1;
3362 m_freem(m);
3363 break;
3364 }
3365
3366 rxm->rxm_m = m;
3367
3368 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3369 BUS_DMASYNC_PREREAD);
3370
3371 rxd = &ring[prod];
3372
3373 rxd->paddr = htole64(map->dm_segs[0].ds_addr);
3374 rxd->haddr = htole64(0);
3375
3376 prod++;
3377 prod &= mask;
3378
3379 post = 1;
3380
3381 } while (--slots);
3382
3383 if (post) {
3384 rxr->rxr_prod = prod;
3385 ixl_wr(sc, rxr->rxr_tail, prod);
3386 }
3387
3388 return error;
3389 }
3390
3391 static inline int
3392 ixl_handle_queue_common(struct ixl_softc *sc, struct ixl_queue_pair *qp,
3393 u_int txlimit, struct evcnt *txevcnt,
3394 u_int rxlimit, struct evcnt *rxevcnt)
3395 {
3396 struct ixl_tx_ring *txr = qp->qp_txr;
3397 struct ixl_rx_ring *rxr = qp->qp_rxr;
3398 int txmore, rxmore;
3399 int rv;
3400
3401 KASSERT(!mutex_owned(&txr->txr_lock));
3402 KASSERT(!mutex_owned(&rxr->rxr_lock));
3403
3404 mutex_enter(&txr->txr_lock);
3405 txevcnt->ev_count++;
3406 txmore = ixl_txeof(sc, txr, txlimit);
3407 mutex_exit(&txr->txr_lock);
3408
3409 mutex_enter(&rxr->rxr_lock);
3410 rxevcnt->ev_count++;
3411 rxmore = ixl_rxeof(sc, rxr, rxlimit);
3412 mutex_exit(&rxr->rxr_lock);
3413
3414 rv = txmore | (rxmore << 1);
3415
3416 return rv;
3417 }
3418
3419 static void
3420 ixl_sched_handle_queue(struct ixl_softc *sc, struct ixl_queue_pair *qp)
3421 {
3422
3423 if (qp->qp_workqueue)
3424 ixl_work_add(sc->sc_workq_txrx, &qp->qp_task);
3425 else
3426 softint_schedule(qp->qp_si);
3427 }
3428
3429 static int
3430 ixl_intr(void *xsc)
3431 {
3432 struct ixl_softc *sc = xsc;
3433 struct ixl_tx_ring *txr;
3434 struct ixl_rx_ring *rxr;
3435 uint32_t icr, rxintr, txintr;
3436 int rv = 0;
3437 unsigned int i;
3438
3439 KASSERT(sc != NULL);
3440
3441 ixl_enable_other_intr(sc);
3442 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3443
3444 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3445 atomic_inc_64(&sc->sc_event_atq.ev_count);
3446 ixl_atq_done(sc);
3447 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3448 rv = 1;
3449 }
3450
3451 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3452 atomic_inc_64(&sc->sc_event_link.ev_count);
3453 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3454 rv = 1;
3455 }
3456
3457 rxintr = icr & I40E_INTR_NOTX_RX_MASK;
3458 txintr = icr & I40E_INTR_NOTX_TX_MASK;
3459
3460 if (txintr || rxintr) {
3461 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3462 txr = sc->sc_qps[i].qp_txr;
3463 rxr = sc->sc_qps[i].qp_rxr;
3464
3465 ixl_handle_queue_common(sc, &sc->sc_qps[i],
3466 IXL_TXRX_PROCESS_UNLIMIT, &txr->txr_intr,
3467 IXL_TXRX_PROCESS_UNLIMIT, &rxr->rxr_intr);
3468 }
3469 rv = 1;
3470 }
3471
3472 return rv;
3473 }
3474
3475 static int
3476 ixl_queue_intr(void *xqp)
3477 {
3478 struct ixl_queue_pair *qp = xqp;
3479 struct ixl_tx_ring *txr = qp->qp_txr;
3480 struct ixl_rx_ring *rxr = qp->qp_rxr;
3481 struct ixl_softc *sc = qp->qp_sc;
3482 u_int txlimit, rxlimit;
3483 int more;
3484
3485 txlimit = sc->sc_tx_intr_process_limit;
3486 rxlimit = sc->sc_rx_intr_process_limit;
3487 qp->qp_workqueue = sc->sc_txrx_workqueue;
3488
3489 more = ixl_handle_queue_common(sc, qp,
3490 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr);
3491
3492 if (more != 0) {
3493 ixl_sched_handle_queue(sc, qp);
3494 } else {
3495 /* for ALTQ */
3496 if (txr->txr_qid == 0)
3497 if_schedule_deferred_start(&sc->sc_ec.ec_if);
3498 softint_schedule(txr->txr_si);
3499
3500 ixl_enable_queue_intr(sc, qp);
3501 }
3502
3503 return 1;
3504 }
3505
3506 static void
3507 ixl_handle_queue(void *xqp)
3508 {
3509 struct ixl_queue_pair *qp = xqp;
3510 struct ixl_softc *sc = qp->qp_sc;
3511 struct ixl_tx_ring *txr = qp->qp_txr;
3512 struct ixl_rx_ring *rxr = qp->qp_rxr;
3513 u_int txlimit, rxlimit;
3514 int more;
3515
3516 txlimit = sc->sc_tx_process_limit;
3517 rxlimit = sc->sc_rx_process_limit;
3518
3519 more = ixl_handle_queue_common(sc, qp,
3520 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer);
3521
3522 if (more != 0)
3523 ixl_sched_handle_queue(sc, qp);
3524 else
3525 ixl_enable_queue_intr(sc, qp);
3526 }
3527
3528 static inline void
3529 ixl_print_hmc_error(struct ixl_softc *sc, uint32_t reg)
3530 {
3531 uint32_t hmc_idx, hmc_isvf;
3532 uint32_t hmc_errtype, hmc_objtype, hmc_data;
3533
3534 hmc_idx = reg & I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK;
3535 hmc_idx = hmc_idx >> I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT;
3536 hmc_isvf = reg & I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK;
3537 hmc_isvf = hmc_isvf >> I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT;
3538 hmc_errtype = reg & I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK;
3539 hmc_errtype = hmc_errtype >> I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT;
3540 hmc_objtype = reg & I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK;
3541 hmc_objtype = hmc_objtype >> I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT;
3542 hmc_data = ixl_rd(sc, I40E_PFHMC_ERRORDATA);
3543
3544 device_printf(sc->sc_dev,
3545 "HMC Error (idx=0x%x, isvf=0x%x, err=0x%x, obj=0x%x, data=0x%x)\n",
3546 hmc_idx, hmc_isvf, hmc_errtype, hmc_objtype, hmc_data);
3547 }
3548
3549 static int
3550 ixl_other_intr(void *xsc)
3551 {
3552 struct ixl_softc *sc = xsc;
3553 uint32_t icr, mask, reg;
3554 int rv;
3555
3556 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3557 mask = ixl_rd(sc, I40E_PFINT_ICR0_ENA);
3558
3559 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3560 atomic_inc_64(&sc->sc_event_atq.ev_count);
3561 ixl_atq_done(sc);
3562 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3563 rv = 1;
3564 }
3565
3566 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3567 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3568 device_printf(sc->sc_dev, "link stat changed\n");
3569
3570 atomic_inc_64(&sc->sc_event_link.ev_count);
3571 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3572 rv = 1;
3573 }
3574
3575 if (ISSET(icr, I40E_PFINT_ICR0_GRST_MASK)) {
3576 CLR(mask, I40E_PFINT_ICR0_ENA_GRST_MASK);
3577 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
3578 reg = reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK;
3579 reg = reg >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3580
3581 device_printf(sc->sc_dev, "GRST: %s\n",
3582 reg == I40E_RESET_CORER ? "CORER" :
3583 reg == I40E_RESET_GLOBR ? "GLOBR" :
3584 reg == I40E_RESET_EMPR ? "EMPR" :
3585 "POR");
3586 }
3587
3588 if (ISSET(icr, I40E_PFINT_ICR0_ECC_ERR_MASK))
3589 atomic_inc_64(&sc->sc_event_ecc_err.ev_count);
3590 if (ISSET(icr, I40E_PFINT_ICR0_PCI_EXCEPTION_MASK))
3591 atomic_inc_64(&sc->sc_event_pci_exception.ev_count);
3592 if (ISSET(icr, I40E_PFINT_ICR0_PE_CRITERR_MASK))
3593 atomic_inc_64(&sc->sc_event_crit_err.ev_count);
3594
3595 if (ISSET(icr, IXL_ICR0_CRIT_ERR_MASK)) {
3596 CLR(mask, IXL_ICR0_CRIT_ERR_MASK);
3597 device_printf(sc->sc_dev, "critical error\n");
3598 }
3599
3600 if (ISSET(icr, I40E_PFINT_ICR0_HMC_ERR_MASK)) {
3601 reg = ixl_rd(sc, I40E_PFHMC_ERRORINFO);
3602 if (ISSET(reg, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK))
3603 ixl_print_hmc_error(sc, reg);
3604 ixl_wr(sc, I40E_PFHMC_ERRORINFO, 0);
3605 }
3606
3607 ixl_wr(sc, I40E_PFINT_ICR0_ENA, mask);
3608 ixl_flush(sc);
3609 ixl_enable_other_intr(sc);
3610 return rv;
3611 }
3612
3613 static void
3614 ixl_get_link_status_done(struct ixl_softc *sc,
3615 const struct ixl_aq_desc *iaq)
3616 {
3617
3618 ixl_link_state_update(sc, iaq);
3619 }
3620
3621 static void
3622 ixl_get_link_status(void *xsc)
3623 {
3624 struct ixl_softc *sc = xsc;
3625 struct ixl_aq_desc *iaq;
3626 struct ixl_aq_link_param *param;
3627
3628 memset(&sc->sc_link_state_atq, 0, sizeof(sc->sc_link_state_atq));
3629 iaq = &sc->sc_link_state_atq.iatq_desc;
3630 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3631 param = (struct ixl_aq_link_param *)iaq->iaq_param;
3632 param->notify = IXL_AQ_LINK_NOTIFY;
3633
3634 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done);
3635 (void)ixl_atq_post(sc, &sc->sc_link_state_atq);
3636 }
3637
3638 static void
3639 ixl_link_state_update(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3640 {
3641 struct ifnet *ifp = &sc->sc_ec.ec_if;
3642 int link_state;
3643
3644 KASSERT(kpreempt_disabled());
3645
3646 link_state = ixl_set_link_status(sc, iaq);
3647
3648 if (ifp->if_link_state != link_state)
3649 if_link_state_change(ifp, link_state);
3650
3651 if (link_state != LINK_STATE_DOWN) {
3652 if_schedule_deferred_start(ifp);
3653 }
3654 }
3655
3656 static void
3657 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq,
3658 const char *msg)
3659 {
3660 char buf[512];
3661 size_t len;
3662
3663 len = sizeof(buf);
3664 buf[--len] = '\0';
3665
3666 device_printf(sc->sc_dev, "%s\n", msg);
3667 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags));
3668 device_printf(sc->sc_dev, "flags %s opcode %04x\n",
3669 buf, le16toh(iaq->iaq_opcode));
3670 device_printf(sc->sc_dev, "datalen %u retval %u\n",
3671 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval));
3672 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie);
3673 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n",
3674 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]),
3675 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3]));
3676 }
3677
3678 static void
3679 ixl_arq(void *xsc)
3680 {
3681 struct ixl_softc *sc = xsc;
3682 struct ixl_aq_desc *arq, *iaq;
3683 struct ixl_aq_buf *aqb;
3684 unsigned int cons = sc->sc_arq_cons;
3685 unsigned int prod;
3686 int done = 0;
3687
3688 prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
3689 sc->sc_aq_regs->arq_head_mask;
3690
3691 if (cons == prod)
3692 goto done;
3693
3694 arq = IXL_DMA_KVA(&sc->sc_arq);
3695
3696 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3697 0, IXL_DMA_LEN(&sc->sc_arq),
3698 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3699
3700 do {
3701 iaq = &arq[cons];
3702 aqb = sc->sc_arq_live[cons];
3703
3704 KASSERT(aqb != NULL);
3705
3706 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3707 BUS_DMASYNC_POSTREAD);
3708
3709 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3710 ixl_aq_dump(sc, iaq, "arq event");
3711
3712 switch (iaq->iaq_opcode) {
3713 case htole16(IXL_AQ_OP_PHY_LINK_STATUS):
3714 kpreempt_disable();
3715 ixl_link_state_update(sc, iaq);
3716 kpreempt_enable();
3717 break;
3718 }
3719
3720 memset(iaq, 0, sizeof(*iaq));
3721 sc->sc_arq_live[cons] = NULL;
3722 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
3723
3724 cons++;
3725 cons &= IXL_AQ_MASK;
3726
3727 done = 1;
3728 } while (cons != prod);
3729
3730 if (done) {
3731 sc->sc_arq_cons = cons;
3732 ixl_arq_fill(sc);
3733 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3734 0, IXL_DMA_LEN(&sc->sc_arq),
3735 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3736 }
3737
3738 done:
3739 ixl_enable_other_intr(sc);
3740 }
3741
3742 static void
3743 ixl_atq_set(struct ixl_atq *iatq,
3744 void (*fn)(struct ixl_softc *, const struct ixl_aq_desc *))
3745 {
3746
3747 iatq->iatq_fn = fn;
3748 }
3749
3750 static int
3751 ixl_atq_post_locked(struct ixl_softc *sc, struct ixl_atq *iatq)
3752 {
3753 struct ixl_aq_desc *atq, *slot;
3754 unsigned int prod, cons, prod_next;
3755
3756 /* assert locked */
3757 KASSERT(mutex_owned(&sc->sc_atq_lock));
3758
3759 atq = IXL_DMA_KVA(&sc->sc_atq);
3760 prod = sc->sc_atq_prod;
3761 cons = sc->sc_atq_cons;
3762 prod_next = (prod +1) & IXL_AQ_MASK;
3763
3764 if (cons == prod_next)
3765 return ENOMEM;
3766
3767 slot = &atq[prod];
3768
3769 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3770 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3771
3772 *slot = iatq->iatq_desc;
3773 slot->iaq_cookie = (uint64_t)((intptr_t)iatq);
3774
3775 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3776 ixl_aq_dump(sc, slot, "atq command");
3777
3778 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3779 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3780
3781 sc->sc_atq_prod = prod_next;
3782 ixl_wr(sc, sc->sc_aq_regs->atq_tail, sc->sc_atq_prod);
3783
3784 return 0;
3785 }
3786
3787 static int
3788 ixl_atq_post(struct ixl_softc *sc, struct ixl_atq *iatq)
3789 {
3790 int rv;
3791
3792 mutex_enter(&sc->sc_atq_lock);
3793 rv = ixl_atq_post_locked(sc, iatq);
3794 mutex_exit(&sc->sc_atq_lock);
3795
3796 return rv;
3797 }
3798
3799 static void
3800 ixl_atq_done_locked(struct ixl_softc *sc)
3801 {
3802 struct ixl_aq_desc *atq, *slot;
3803 struct ixl_atq *iatq;
3804 unsigned int cons;
3805 unsigned int prod;
3806
3807 KASSERT(mutex_owned(&sc->sc_atq_lock));
3808
3809 prod = sc->sc_atq_prod;
3810 cons = sc->sc_atq_cons;
3811
3812 if (prod == cons)
3813 return;
3814
3815 atq = IXL_DMA_KVA(&sc->sc_atq);
3816
3817 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3818 0, IXL_DMA_LEN(&sc->sc_atq),
3819 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3820
3821 do {
3822 slot = &atq[cons];
3823 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
3824 break;
3825
3826 iatq = (struct ixl_atq *)((intptr_t)slot->iaq_cookie);
3827 iatq->iatq_desc = *slot;
3828
3829 memset(slot, 0, sizeof(*slot));
3830
3831 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3832 ixl_aq_dump(sc, &iatq->iatq_desc, "atq response");
3833
3834 (*iatq->iatq_fn)(sc, &iatq->iatq_desc);
3835
3836 cons++;
3837 cons &= IXL_AQ_MASK;
3838 } while (cons != prod);
3839
3840 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3841 0, IXL_DMA_LEN(&sc->sc_atq),
3842 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3843
3844 sc->sc_atq_cons = cons;
3845 }
3846
3847 static void
3848 ixl_atq_done(struct ixl_softc *sc)
3849 {
3850
3851 mutex_enter(&sc->sc_atq_lock);
3852 ixl_atq_done_locked(sc);
3853 mutex_exit(&sc->sc_atq_lock);
3854 }
3855
3856 static void
3857 ixl_wakeup(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3858 {
3859
3860 KASSERT(mutex_owned(&sc->sc_atq_lock));
3861
3862 cv_signal(&sc->sc_atq_cv);
3863 }
3864
3865 static int
3866 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq)
3867 {
3868 int error;
3869
3870 KASSERT(iatq->iatq_desc.iaq_cookie == 0);
3871
3872 ixl_atq_set(iatq, ixl_wakeup);
3873
3874 mutex_enter(&sc->sc_atq_lock);
3875 error = ixl_atq_post_locked(sc, iatq);
3876 if (error) {
3877 mutex_exit(&sc->sc_atq_lock);
3878 return error;
3879 }
3880
3881 error = cv_timedwait(&sc->sc_atq_cv, &sc->sc_atq_lock,
3882 IXL_ATQ_EXEC_TIMEOUT);
3883 mutex_exit(&sc->sc_atq_lock);
3884
3885 return error;
3886 }
3887
3888 static int
3889 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
3890 {
3891 struct ixl_aq_desc *atq, *slot;
3892 unsigned int prod;
3893 unsigned int t = 0;
3894
3895 mutex_enter(&sc->sc_atq_lock);
3896
3897 atq = IXL_DMA_KVA(&sc->sc_atq);
3898 prod = sc->sc_atq_prod;
3899 slot = atq + prod;
3900
3901 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3902 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3903
3904 *slot = *iaq;
3905 slot->iaq_flags |= htole16(IXL_AQ_SI);
3906
3907 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3908 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3909
3910 prod++;
3911 prod &= IXL_AQ_MASK;
3912 sc->sc_atq_prod = prod;
3913 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3914
3915 while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
3916 delaymsec(1);
3917
3918 if (t++ > tm) {
3919 mutex_exit(&sc->sc_atq_lock);
3920 return ETIMEDOUT;
3921 }
3922 }
3923
3924 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3925 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
3926 *iaq = *slot;
3927 memset(slot, 0, sizeof(*slot));
3928 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3929 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
3930
3931 sc->sc_atq_cons = prod;
3932
3933 mutex_exit(&sc->sc_atq_lock);
3934
3935 return 0;
3936 }
3937
3938 static int
3939 ixl_get_version(struct ixl_softc *sc)
3940 {
3941 struct ixl_aq_desc iaq;
3942 uint32_t fwbuild, fwver, apiver;
3943 uint16_t api_maj_ver, api_min_ver;
3944
3945 memset(&iaq, 0, sizeof(iaq));
3946 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
3947
3948 iaq.iaq_retval = le16toh(23);
3949
3950 if (ixl_atq_poll(sc, &iaq, 2000) != 0)
3951 return ETIMEDOUT;
3952 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
3953 return EIO;
3954
3955 fwbuild = le32toh(iaq.iaq_param[1]);
3956 fwver = le32toh(iaq.iaq_param[2]);
3957 apiver = le32toh(iaq.iaq_param[3]);
3958
3959 api_maj_ver = (uint16_t)apiver;
3960 api_min_ver = (uint16_t)(apiver >> 16);
3961
3962 aprint_normal(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
3963 (uint16_t)(fwver >> 16), fwbuild, api_maj_ver, api_min_ver);
3964
3965 if (sc->sc_mac_type == I40E_MAC_X722) {
3966 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK |
3967 IXL_SC_AQ_FLAG_NVMREAD);
3968 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL);
3969 }
3970
3971 #define IXL_API_VER(maj, min) (((uint32_t)(maj) << 16) | (min))
3972 if (IXL_API_VER(api_maj_ver, api_min_ver) >= IXL_API_VER(1, 5)) {
3973 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL);
3974 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK);
3975 }
3976 #undef IXL_API_VER
3977
3978 return 0;
3979 }
3980
3981 static int
3982 ixl_get_nvm_version(struct ixl_softc *sc)
3983 {
3984 uint16_t nvmver, cfg_ptr, eetrack_hi, eetrack_lo, oem_hi, oem_lo;
3985 uint32_t eetrack, oem;
3986 uint16_t nvm_maj_ver, nvm_min_ver, oem_build;
3987 uint8_t oem_ver, oem_patch;
3988
3989 nvmver = cfg_ptr = eetrack_hi = eetrack_lo = oem_hi = oem_lo = 0;
3990 ixl_rd16_nvm(sc, I40E_SR_NVM_DEV_STARTER_VERSION, &nvmver);
3991 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
3992 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
3993 ixl_rd16_nvm(sc, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
3994 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF, &oem_hi);
3995 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF + 1, &oem_lo);
3996
3997 nvm_maj_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_HI_MASK);
3998 nvm_min_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_LO_MASK);
3999 eetrack = ((uint32_t)eetrack_hi << 16) | eetrack_lo;
4000 oem = ((uint32_t)oem_hi << 16) | oem_lo;
4001 oem_ver = __SHIFTOUT(oem, IXL_NVM_OEMVERSION_MASK);
4002 oem_build = __SHIFTOUT(oem, IXL_NVM_OEMBUILD_MASK);
4003 oem_patch = __SHIFTOUT(oem, IXL_NVM_OEMPATCH_MASK);
4004
4005 aprint_normal(" nvm %x.%02x etid %08x oem %d.%d.%d",
4006 nvm_maj_ver, nvm_min_ver, eetrack,
4007 oem_ver, oem_build, oem_patch);
4008
4009 return 0;
4010 }
4011
4012 static int
4013 ixl_pxe_clear(struct ixl_softc *sc)
4014 {
4015 struct ixl_aq_desc iaq;
4016 int rv;
4017
4018 memset(&iaq, 0, sizeof(iaq));
4019 iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
4020 iaq.iaq_param[0] = htole32(0x2);
4021
4022 rv = ixl_atq_poll(sc, &iaq, 250);
4023
4024 ixl_wr(sc, I40E_GLLAN_RCTL_0, 0x1);
4025
4026 if (rv != 0)
4027 return ETIMEDOUT;
4028
4029 switch (iaq.iaq_retval) {
4030 case htole16(IXL_AQ_RC_OK):
4031 case htole16(IXL_AQ_RC_EEXIST):
4032 break;
4033 default:
4034 return EIO;
4035 }
4036
4037 return 0;
4038 }
4039
4040 static int
4041 ixl_lldp_shut(struct ixl_softc *sc)
4042 {
4043 struct ixl_aq_desc iaq;
4044
4045 memset(&iaq, 0, sizeof(iaq));
4046 iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
4047 iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
4048
4049 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4050 aprint_error_dev(sc->sc_dev, "STOP LLDP AGENT timeout\n");
4051 return -1;
4052 }
4053
4054 switch (iaq.iaq_retval) {
4055 case htole16(IXL_AQ_RC_EMODE):
4056 case htole16(IXL_AQ_RC_EPERM):
4057 /* ignore silently */
4058 default:
4059 break;
4060 }
4061
4062 return 0;
4063 }
4064
4065 static void
4066 ixl_parse_hw_capability(struct ixl_softc *sc, struct ixl_aq_capability *cap)
4067 {
4068 uint16_t id;
4069 uint32_t number, logical_id;
4070
4071 id = le16toh(cap->cap_id);
4072 number = le32toh(cap->number);
4073 logical_id = le32toh(cap->logical_id);
4074
4075 switch (id) {
4076 case IXL_AQ_CAP_RSS:
4077 sc->sc_rss_table_size = number;
4078 sc->sc_rss_table_entry_width = logical_id;
4079 break;
4080 case IXL_AQ_CAP_RXQ:
4081 case IXL_AQ_CAP_TXQ:
4082 sc->sc_nqueue_pairs_device = MIN(number,
4083 sc->sc_nqueue_pairs_device);
4084 break;
4085 }
4086 }
4087
4088 static int
4089 ixl_get_hw_capabilities(struct ixl_softc *sc)
4090 {
4091 struct ixl_dmamem idm;
4092 struct ixl_aq_desc iaq;
4093 struct ixl_aq_capability *caps;
4094 size_t i, ncaps;
4095 bus_size_t caps_size;
4096 uint16_t status;
4097 int rv;
4098
4099 caps_size = sizeof(caps[0]) * 40;
4100 memset(&iaq, 0, sizeof(iaq));
4101 iaq.iaq_opcode = htole16(IXL_AQ_OP_LIST_FUNC_CAP);
4102
4103 do {
4104 if (ixl_dmamem_alloc(sc, &idm, caps_size, 0) != 0) {
4105 return -1;
4106 }
4107
4108 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4109 (caps_size > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4110 iaq.iaq_datalen = htole16(caps_size);
4111 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4112
4113 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
4114 IXL_DMA_LEN(&idm), BUS_DMASYNC_PREREAD);
4115
4116 rv = ixl_atq_poll(sc, &iaq, 250);
4117
4118 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
4119 IXL_DMA_LEN(&idm), BUS_DMASYNC_POSTREAD);
4120
4121 if (rv != 0) {
4122 aprint_error(", HW capabilities timeout\n");
4123 goto done;
4124 }
4125
4126 status = le16toh(iaq.iaq_retval);
4127
4128 if (status == IXL_AQ_RC_ENOMEM) {
4129 caps_size = le16toh(iaq.iaq_datalen);
4130 ixl_dmamem_free(sc, &idm);
4131 }
4132 } while (status == IXL_AQ_RC_ENOMEM);
4133
4134 if (status != IXL_AQ_RC_OK) {
4135 aprint_error(", HW capabilities error\n");
4136 goto done;
4137 }
4138
4139 caps = IXL_DMA_KVA(&idm);
4140 ncaps = le16toh(iaq.iaq_param[1]);
4141
4142 for (i = 0; i < ncaps; i++) {
4143 ixl_parse_hw_capability(sc, &caps[i]);
4144 }
4145
4146 done:
4147 ixl_dmamem_free(sc, &idm);
4148 return rv;
4149 }
4150
4151 static int
4152 ixl_get_mac(struct ixl_softc *sc)
4153 {
4154 struct ixl_dmamem idm;
4155 struct ixl_aq_desc iaq;
4156 struct ixl_aq_mac_addresses *addrs;
4157 int rv;
4158
4159 if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
4160 aprint_error(", unable to allocate mac addresses\n");
4161 return -1;
4162 }
4163
4164 memset(&iaq, 0, sizeof(iaq));
4165 iaq.iaq_flags = htole16(IXL_AQ_BUF);
4166 iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
4167 iaq.iaq_datalen = htole16(sizeof(*addrs));
4168 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4169
4170 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4171 BUS_DMASYNC_PREREAD);
4172
4173 rv = ixl_atq_poll(sc, &iaq, 250);
4174
4175 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4176 BUS_DMASYNC_POSTREAD);
4177
4178 if (rv != 0) {
4179 aprint_error(", MAC ADDRESS READ timeout\n");
4180 rv = -1;
4181 goto done;
4182 }
4183 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4184 aprint_error(", MAC ADDRESS READ error\n");
4185 rv = -1;
4186 goto done;
4187 }
4188
4189 addrs = IXL_DMA_KVA(&idm);
4190 if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
4191 printf(", port address is not valid\n");
4192 goto done;
4193 }
4194
4195 memcpy(sc->sc_enaddr, addrs->port, ETHER_ADDR_LEN);
4196 rv = 0;
4197
4198 done:
4199 ixl_dmamem_free(sc, &idm);
4200 return rv;
4201 }
4202
4203 static int
4204 ixl_get_switch_config(struct ixl_softc *sc)
4205 {
4206 struct ixl_dmamem idm;
4207 struct ixl_aq_desc iaq;
4208 struct ixl_aq_switch_config *hdr;
4209 struct ixl_aq_switch_config_element *elms, *elm;
4210 unsigned int nelm, i;
4211 int rv;
4212
4213 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4214 aprint_error_dev(sc->sc_dev,
4215 "unable to allocate switch config buffer\n");
4216 return -1;
4217 }
4218
4219 memset(&iaq, 0, sizeof(iaq));
4220 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4221 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4222 iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
4223 iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
4224 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4225
4226 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4227 BUS_DMASYNC_PREREAD);
4228
4229 rv = ixl_atq_poll(sc, &iaq, 250);
4230
4231 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4232 BUS_DMASYNC_POSTREAD);
4233
4234 if (rv != 0) {
4235 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG timeout\n");
4236 rv = -1;
4237 goto done;
4238 }
4239 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4240 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG error\n");
4241 rv = -1;
4242 goto done;
4243 }
4244
4245 hdr = IXL_DMA_KVA(&idm);
4246 elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
4247
4248 nelm = le16toh(hdr->num_reported);
4249 if (nelm < 1) {
4250 aprint_error_dev(sc->sc_dev, "no switch config available\n");
4251 rv = -1;
4252 goto done;
4253 }
4254
4255 for (i = 0; i < nelm; i++) {
4256 elm = &elms[i];
4257
4258 aprint_debug_dev(sc->sc_dev,
4259 "type %x revision %u seid %04x\n",
4260 elm->type, elm->revision, le16toh(elm->seid));
4261 aprint_debug_dev(sc->sc_dev,
4262 "uplink %04x downlink %04x\n",
4263 le16toh(elm->uplink_seid),
4264 le16toh(elm->downlink_seid));
4265 aprint_debug_dev(sc->sc_dev,
4266 "conntype %x scheduler %04x extra %04x\n",
4267 elm->connection_type,
4268 le16toh(elm->scheduler_id),
4269 le16toh(elm->element_info));
4270 }
4271
4272 elm = &elms[0];
4273
4274 sc->sc_uplink_seid = elm->uplink_seid;
4275 sc->sc_downlink_seid = elm->downlink_seid;
4276 sc->sc_seid = elm->seid;
4277
4278 if ((sc->sc_uplink_seid == htole16(0)) !=
4279 (sc->sc_downlink_seid == htole16(0))) {
4280 aprint_error_dev(sc->sc_dev, "SEIDs are misconfigured\n");
4281 rv = -1;
4282 goto done;
4283 }
4284
4285 done:
4286 ixl_dmamem_free(sc, &idm);
4287 return rv;
4288 }
4289
4290 static int
4291 ixl_phy_mask_ints(struct ixl_softc *sc)
4292 {
4293 struct ixl_aq_desc iaq;
4294
4295 memset(&iaq, 0, sizeof(iaq));
4296 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
4297 iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
4298 ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
4299 IXL_AQ_PHY_EV_MEDIA_NA));
4300
4301 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4302 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK timeout\n");
4303 return -1;
4304 }
4305 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4306 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK error\n");
4307 return -1;
4308 }
4309
4310 return 0;
4311 }
4312
4313 static int
4314 ixl_get_phy_abilities(struct ixl_softc *sc,struct ixl_dmamem *idm)
4315 {
4316 struct ixl_aq_desc iaq;
4317 int rv;
4318
4319 memset(&iaq, 0, sizeof(iaq));
4320 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4321 (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4322 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
4323 iaq.iaq_datalen = htole16(IXL_DMA_LEN(idm));
4324 iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
4325 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
4326
4327 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4328 BUS_DMASYNC_PREREAD);
4329
4330 rv = ixl_atq_poll(sc, &iaq, 250);
4331
4332 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4333 BUS_DMASYNC_POSTREAD);
4334
4335 if (rv != 0)
4336 return -1;
4337
4338 return le16toh(iaq.iaq_retval);
4339 }
4340
4341 static int
4342 ixl_get_phy_info(struct ixl_softc *sc)
4343 {
4344 struct ixl_dmamem idm;
4345 struct ixl_aq_phy_abilities *phy;
4346 int rv;
4347
4348 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4349 aprint_error_dev(sc->sc_dev,
4350 "unable to allocate phy abilities buffer\n");
4351 return -1;
4352 }
4353
4354 rv = ixl_get_phy_abilities(sc, &idm);
4355 switch (rv) {
4356 case -1:
4357 aprint_error_dev(sc->sc_dev, "GET PHY ABILITIES timeout\n");
4358 goto done;
4359 case IXL_AQ_RC_OK:
4360 break;
4361 case IXL_AQ_RC_EIO:
4362 aprint_error_dev(sc->sc_dev,"unable to query phy types\n");
4363 goto done;
4364 default:
4365 aprint_error_dev(sc->sc_dev,
4366 "GET PHY ABILITIIES error %u\n", rv);
4367 goto done;
4368 }
4369
4370 phy = IXL_DMA_KVA(&idm);
4371
4372 sc->sc_phy_types = le32toh(phy->phy_type);
4373 sc->sc_phy_types |= (uint64_t)le32toh(phy->phy_type_ext) << 32;
4374
4375 sc->sc_phy_abilities = phy->abilities;
4376 sc->sc_phy_linkspeed = phy->link_speed;
4377 sc->sc_phy_fec_cfg = phy->fec_cfg_curr_mod_ext_info &
4378 (IXL_AQ_ENABLE_FEC_KR | IXL_AQ_ENABLE_FEC_RS |
4379 IXL_AQ_REQUEST_FEC_KR | IXL_AQ_REQUEST_FEC_RS);
4380 sc->sc_eee_cap = phy->eee_capability;
4381 sc->sc_eeer_val = phy->eeer_val;
4382 sc->sc_d3_lpan = phy->d3_lpan;
4383
4384 rv = 0;
4385
4386 done:
4387 ixl_dmamem_free(sc, &idm);
4388 return rv;
4389 }
4390
4391 static int
4392 ixl_set_phy_config(struct ixl_softc *sc,
4393 uint8_t link_speed, uint8_t abilities, bool polling)
4394 {
4395 struct ixl_aq_phy_param *param;
4396 struct ixl_atq iatq;
4397 struct ixl_aq_desc *iaq;
4398 int error;
4399
4400 memset(&iatq, 0, sizeof(iatq));
4401
4402 iaq = &iatq.iatq_desc;
4403 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_CONFIG);
4404 param = (struct ixl_aq_phy_param *)&iaq->iaq_param;
4405 param->phy_types = htole32((uint32_t)sc->sc_phy_types);
4406 param->phy_type_ext = (uint8_t)(sc->sc_phy_types >> 32);
4407 param->link_speed = link_speed;
4408 param->abilities = abilities | IXL_AQ_PHY_ABILITY_AUTO_LINK;
4409 param->fec_cfg = sc->sc_phy_fec_cfg;
4410 param->eee_capability = sc->sc_eee_cap;
4411 param->eeer_val = sc->sc_eeer_val;
4412 param->d3_lpan = sc->sc_d3_lpan;
4413
4414 if (polling)
4415 error = ixl_atq_poll(sc, iaq, 250);
4416 else
4417 error = ixl_atq_exec(sc, &iatq);
4418
4419 if (error != 0)
4420 return error;
4421
4422 switch (le16toh(iaq->iaq_retval)) {
4423 case IXL_AQ_RC_OK:
4424 break;
4425 case IXL_AQ_RC_EPERM:
4426 return EPERM;
4427 default:
4428 return EIO;
4429 }
4430
4431 return 0;
4432 }
4433
4434 static int
4435 ixl_set_phy_autoselect(struct ixl_softc *sc)
4436 {
4437 uint8_t link_speed, abilities;
4438
4439 link_speed = sc->sc_phy_linkspeed;
4440 abilities = IXL_PHY_ABILITY_LINKUP | IXL_PHY_ABILITY_AUTONEGO;
4441
4442 return ixl_set_phy_config(sc, link_speed, abilities, true);
4443 }
4444
4445 static int
4446 ixl_get_link_status_poll(struct ixl_softc *sc, int *l)
4447 {
4448 struct ixl_aq_desc iaq;
4449 struct ixl_aq_link_param *param;
4450 int link;
4451
4452 memset(&iaq, 0, sizeof(iaq));
4453 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
4454 param = (struct ixl_aq_link_param *)iaq.iaq_param;
4455 param->notify = IXL_AQ_LINK_NOTIFY;
4456
4457 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4458 return ETIMEDOUT;
4459 }
4460 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4461 return EIO;
4462 }
4463
4464 link = ixl_set_link_status(sc, &iaq);
4465
4466 if (l != NULL)
4467 *l = link;
4468
4469 return 0;
4470 }
4471
4472 static int
4473 ixl_get_vsi(struct ixl_softc *sc)
4474 {
4475 struct ixl_dmamem *vsi = &sc->sc_scratch;
4476 struct ixl_aq_desc iaq;
4477 struct ixl_aq_vsi_param *param;
4478 struct ixl_aq_vsi_reply *reply;
4479 struct ixl_aq_vsi_data *data;
4480 int rv;
4481
4482 /* grumble, vsi info isn't "known" at compile time */
4483
4484 memset(&iaq, 0, sizeof(iaq));
4485 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4486 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4487 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
4488 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4489 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4490
4491 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4492 param->uplink_seid = sc->sc_seid;
4493
4494 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4495 BUS_DMASYNC_PREREAD);
4496
4497 rv = ixl_atq_poll(sc, &iaq, 250);
4498
4499 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4500 BUS_DMASYNC_POSTREAD);
4501
4502 if (rv != 0) {
4503 return ETIMEDOUT;
4504 }
4505
4506 switch (le16toh(iaq.iaq_retval)) {
4507 case IXL_AQ_RC_OK:
4508 break;
4509 case IXL_AQ_RC_ENOENT:
4510 return ENOENT;
4511 case IXL_AQ_RC_EACCES:
4512 return EACCES;
4513 default:
4514 return EIO;
4515 }
4516
4517 reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
4518 sc->sc_vsi_number = reply->vsi_number;
4519 data = IXL_DMA_KVA(vsi);
4520 sc->sc_vsi_stat_counter_idx = le16toh(data->stat_counter_idx);
4521
4522 return 0;
4523 }
4524
4525 static int
4526 ixl_set_vsi(struct ixl_softc *sc)
4527 {
4528 struct ixl_dmamem *vsi = &sc->sc_scratch;
4529 struct ixl_aq_desc iaq;
4530 struct ixl_aq_vsi_param *param;
4531 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
4532 unsigned int qnum;
4533 uint16_t val;
4534 int rv;
4535
4536 qnum = sc->sc_nqueue_pairs - 1;
4537
4538 data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
4539 IXL_AQ_VSI_VALID_VLAN);
4540
4541 CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
4542 SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
4543 data->queue_mapping[0] = htole16(0);
4544 data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
4545 (qnum << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
4546
4547 val = le16toh(data->port_vlan_flags);
4548 CLR(val, IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK);
4549 SET(val, IXL_AQ_VSI_PVLAN_MODE_ALL);
4550
4551 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWTAGGING)) {
4552 SET(val, IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH);
4553 } else {
4554 SET(val, IXL_AQ_VSI_PVLAN_EMOD_NOTHING);
4555 }
4556
4557 data->port_vlan_flags = htole16(val);
4558
4559 /* grumble, vsi info isn't "known" at compile time */
4560
4561 memset(&iaq, 0, sizeof(iaq));
4562 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4563 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4564 iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
4565 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4566 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4567
4568 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4569 param->uplink_seid = sc->sc_seid;
4570
4571 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4572 BUS_DMASYNC_PREWRITE);
4573
4574 rv = ixl_atq_poll(sc, &iaq, 250);
4575
4576 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4577 BUS_DMASYNC_POSTWRITE);
4578
4579 if (rv != 0) {
4580 return ETIMEDOUT;
4581 }
4582
4583 switch (le16toh(iaq.iaq_retval)) {
4584 case IXL_AQ_RC_OK:
4585 break;
4586 case IXL_AQ_RC_ENOENT:
4587 return ENOENT;
4588 case IXL_AQ_RC_EACCES:
4589 return EACCES;
4590 default:
4591 return EIO;
4592 }
4593
4594 return 0;
4595 }
4596
4597 static void
4598 ixl_set_filter_control(struct ixl_softc *sc)
4599 {
4600 uint32_t reg;
4601
4602 reg = ixl_rd_rx_csr(sc, I40E_PFQF_CTL_0);
4603
4604 CLR(reg, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK);
4605 SET(reg, I40E_HASH_LUT_SIZE_128 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT);
4606
4607 SET(reg, I40E_PFQF_CTL_0_FD_ENA_MASK);
4608 SET(reg, I40E_PFQF_CTL_0_ETYPE_ENA_MASK);
4609 SET(reg, I40E_PFQF_CTL_0_MACVLAN_ENA_MASK);
4610
4611 ixl_wr_rx_csr(sc, I40E_PFQF_CTL_0, reg);
4612 }
4613
4614 static inline void
4615 ixl_get_default_rss_key(uint32_t *buf, size_t len)
4616 {
4617 size_t cplen;
4618 uint8_t rss_seed[RSS_KEYSIZE];
4619
4620 rss_getkey(rss_seed);
4621 memset(buf, 0, len);
4622
4623 cplen = MIN(len, sizeof(rss_seed));
4624 memcpy(buf, rss_seed, cplen);
4625 }
4626
4627 static void
4628 ixl_set_rss_key(struct ixl_softc *sc)
4629 {
4630 uint32_t rss_seed[IXL_RSS_KEY_SIZE_REG];
4631 size_t i;
4632
4633 ixl_get_default_rss_key(rss_seed, sizeof(rss_seed));
4634
4635 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4636 ixl_wr_rx_csr(sc, I40E_PFQF_HKEY(i), rss_seed[i]);
4637 }
4638 }
4639
4640 static void
4641 ixl_set_rss_pctype(struct ixl_softc *sc)
4642 {
4643 uint64_t set_hena = 0;
4644 uint32_t hena0, hena1;
4645
4646 if (sc->sc_mac_type == I40E_MAC_X722)
4647 set_hena = IXL_RSS_HENA_DEFAULT_X722;
4648 else
4649 set_hena = IXL_RSS_HENA_DEFAULT_XL710;
4650
4651 hena0 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(0));
4652 hena1 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(1));
4653
4654 SET(hena0, set_hena);
4655 SET(hena1, set_hena >> 32);
4656
4657 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(0), hena0);
4658 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(1), hena1);
4659 }
4660
4661 static void
4662 ixl_set_rss_hlut(struct ixl_softc *sc)
4663 {
4664 unsigned int qid;
4665 uint8_t hlut_buf[512], lut_mask;
4666 uint32_t *hluts;
4667 size_t i, hluts_num;
4668
4669 lut_mask = (0x01 << sc->sc_rss_table_entry_width) - 1;
4670
4671 for (i = 0; i < sc->sc_rss_table_size; i++) {
4672 qid = i % sc->sc_nqueue_pairs;
4673 hlut_buf[i] = qid & lut_mask;
4674 }
4675
4676 hluts = (uint32_t *)hlut_buf;
4677 hluts_num = sc->sc_rss_table_size >> 2;
4678 for (i = 0; i < hluts_num; i++) {
4679 ixl_wr(sc, I40E_PFQF_HLUT(i), hluts[i]);
4680 }
4681 ixl_flush(sc);
4682 }
4683
4684 static void
4685 ixl_config_rss(struct ixl_softc *sc)
4686 {
4687
4688 KASSERT(mutex_owned(&sc->sc_cfg_lock));
4689
4690 ixl_set_rss_key(sc);
4691 ixl_set_rss_pctype(sc);
4692 ixl_set_rss_hlut(sc);
4693 }
4694
4695 static const struct ixl_phy_type *
4696 ixl_search_phy_type(uint8_t phy_type)
4697 {
4698 const struct ixl_phy_type *itype;
4699 uint64_t mask;
4700 unsigned int i;
4701
4702 if (phy_type >= 64)
4703 return NULL;
4704
4705 mask = 1ULL << phy_type;
4706
4707 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
4708 itype = &ixl_phy_type_map[i];
4709
4710 if (ISSET(itype->phy_type, mask))
4711 return itype;
4712 }
4713
4714 return NULL;
4715 }
4716
4717 static uint64_t
4718 ixl_search_link_speed(uint8_t link_speed)
4719 {
4720 const struct ixl_speed_type *type;
4721 unsigned int i;
4722
4723 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4724 type = &ixl_speed_type_map[i];
4725
4726 if (ISSET(type->dev_speed, link_speed))
4727 return type->net_speed;
4728 }
4729
4730 return 0;
4731 }
4732
4733 static uint8_t
4734 ixl_search_baudrate(uint64_t baudrate)
4735 {
4736 const struct ixl_speed_type *type;
4737 unsigned int i;
4738
4739 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4740 type = &ixl_speed_type_map[i];
4741
4742 if (type->net_speed == baudrate) {
4743 return type->dev_speed;
4744 }
4745 }
4746
4747 return 0;
4748 }
4749
4750 static int
4751 ixl_restart_an(struct ixl_softc *sc)
4752 {
4753 struct ixl_aq_desc iaq;
4754
4755 memset(&iaq, 0, sizeof(iaq));
4756 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
4757 iaq.iaq_param[0] =
4758 htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
4759
4760 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4761 aprint_error_dev(sc->sc_dev, "RESTART AN timeout\n");
4762 return -1;
4763 }
4764 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4765 aprint_error_dev(sc->sc_dev, "RESTART AN error\n");
4766 return -1;
4767 }
4768
4769 return 0;
4770 }
4771
4772 static int
4773 ixl_add_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4774 uint16_t vlan, uint16_t flags)
4775 {
4776 struct ixl_aq_desc iaq;
4777 struct ixl_aq_add_macvlan *param;
4778 struct ixl_aq_add_macvlan_elem *elem;
4779
4780 memset(&iaq, 0, sizeof(iaq));
4781 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4782 iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN);
4783 iaq.iaq_datalen = htole16(sizeof(*elem));
4784 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4785
4786 param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param;
4787 param->num_addrs = htole16(1);
4788 param->seid0 = htole16(0x8000) | sc->sc_seid;
4789 param->seid1 = 0;
4790 param->seid2 = 0;
4791
4792 elem = IXL_DMA_KVA(&sc->sc_scratch);
4793 memset(elem, 0, sizeof(*elem));
4794 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4795 elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags);
4796 elem->vlan = htole16(vlan);
4797
4798 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4799 return IXL_AQ_RC_EINVAL;
4800 }
4801
4802 switch (le16toh(iaq.iaq_retval)) {
4803 case IXL_AQ_RC_OK:
4804 break;
4805 case IXL_AQ_RC_ENOSPC:
4806 return ENOSPC;
4807 case IXL_AQ_RC_ENOENT:
4808 return ENOENT;
4809 case IXL_AQ_RC_EACCES:
4810 return EACCES;
4811 case IXL_AQ_RC_EEXIST:
4812 return EEXIST;
4813 case IXL_AQ_RC_EINVAL:
4814 return EINVAL;
4815 default:
4816 return EIO;
4817 }
4818
4819 return 0;
4820 }
4821
4822 static int
4823 ixl_remove_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4824 uint16_t vlan, uint16_t flags)
4825 {
4826 struct ixl_aq_desc iaq;
4827 struct ixl_aq_remove_macvlan *param;
4828 struct ixl_aq_remove_macvlan_elem *elem;
4829
4830 memset(&iaq, 0, sizeof(iaq));
4831 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4832 iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN);
4833 iaq.iaq_datalen = htole16(sizeof(*elem));
4834 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4835
4836 param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param;
4837 param->num_addrs = htole16(1);
4838 param->seid0 = htole16(0x8000) | sc->sc_seid;
4839 param->seid1 = 0;
4840 param->seid2 = 0;
4841
4842 elem = IXL_DMA_KVA(&sc->sc_scratch);
4843 memset(elem, 0, sizeof(*elem));
4844 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4845 elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags);
4846 elem->vlan = htole16(vlan);
4847
4848 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4849 return EINVAL;
4850 }
4851
4852 switch (le16toh(iaq.iaq_retval)) {
4853 case IXL_AQ_RC_OK:
4854 break;
4855 case IXL_AQ_RC_ENOENT:
4856 return ENOENT;
4857 case IXL_AQ_RC_EACCES:
4858 return EACCES;
4859 case IXL_AQ_RC_EINVAL:
4860 return EINVAL;
4861 default:
4862 return EIO;
4863 }
4864
4865 return 0;
4866 }
4867
4868 static int
4869 ixl_hmc(struct ixl_softc *sc)
4870 {
4871 struct {
4872 uint32_t count;
4873 uint32_t minsize;
4874 bus_size_t objsiz;
4875 bus_size_t setoff;
4876 bus_size_t setcnt;
4877 } regs[] = {
4878 {
4879 0,
4880 IXL_HMC_TXQ_MINSIZE,
4881 I40E_GLHMC_LANTXOBJSZ,
4882 I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
4883 I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
4884 },
4885 {
4886 0,
4887 IXL_HMC_RXQ_MINSIZE,
4888 I40E_GLHMC_LANRXOBJSZ,
4889 I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
4890 I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
4891 },
4892 {
4893 0,
4894 0,
4895 I40E_GLHMC_FCOEDDPOBJSZ,
4896 I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
4897 I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
4898 },
4899 {
4900 0,
4901 0,
4902 I40E_GLHMC_FCOEFOBJSZ,
4903 I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
4904 I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
4905 },
4906 };
4907 struct ixl_hmc_entry *e;
4908 uint64_t size, dva;
4909 uint8_t *kva;
4910 uint64_t *sdpage;
4911 unsigned int i;
4912 int npages, tables;
4913 uint32_t reg;
4914
4915 CTASSERT(__arraycount(regs) <= __arraycount(sc->sc_hmc_entries));
4916
4917 regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
4918 ixl_rd(sc, I40E_GLHMC_LANQMAX);
4919
4920 size = 0;
4921 for (i = 0; i < __arraycount(regs); i++) {
4922 e = &sc->sc_hmc_entries[i];
4923
4924 e->hmc_count = regs[i].count;
4925 reg = ixl_rd(sc, regs[i].objsiz);
4926 e->hmc_size = BIT_ULL(0x3F & reg);
4927 e->hmc_base = size;
4928
4929 if ((e->hmc_size * 8) < regs[i].minsize) {
4930 aprint_error_dev(sc->sc_dev,
4931 "kernel hmc entry is too big\n");
4932 return -1;
4933 }
4934
4935 size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
4936 }
4937 size = roundup(size, IXL_HMC_PGSIZE);
4938 npages = size / IXL_HMC_PGSIZE;
4939
4940 tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
4941
4942 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
4943 aprint_error_dev(sc->sc_dev,
4944 "unable to allocate hmc pd memory\n");
4945 return -1;
4946 }
4947
4948 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
4949 IXL_HMC_PGSIZE) != 0) {
4950 aprint_error_dev(sc->sc_dev,
4951 "unable to allocate hmc sd memory\n");
4952 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4953 return -1;
4954 }
4955
4956 kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
4957 memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
4958
4959 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
4960 0, IXL_DMA_LEN(&sc->sc_hmc_pd),
4961 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4962
4963 dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
4964 sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
4965 memset(sdpage, 0, IXL_DMA_LEN(&sc->sc_hmc_sd));
4966
4967 for (i = 0; (int)i < npages; i++) {
4968 *sdpage = htole64(dva | IXL_HMC_PDVALID);
4969 sdpage++;
4970
4971 dva += IXL_HMC_PGSIZE;
4972 }
4973
4974 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
4975 0, IXL_DMA_LEN(&sc->sc_hmc_sd),
4976 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4977
4978 dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
4979 for (i = 0; (int)i < tables; i++) {
4980 uint32_t count;
4981
4982 KASSERT(npages >= 0);
4983
4984 count = ((unsigned int)npages > IXL_HMC_PGS) ?
4985 IXL_HMC_PGS : (unsigned int)npages;
4986
4987 ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
4988 ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
4989 (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
4990 (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
4991 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
4992 ixl_wr(sc, I40E_PFHMC_SDCMD,
4993 (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
4994
4995 npages -= IXL_HMC_PGS;
4996 dva += IXL_HMC_PGSIZE;
4997 }
4998
4999 for (i = 0; i < __arraycount(regs); i++) {
5000 e = &sc->sc_hmc_entries[i];
5001
5002 ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
5003 ixl_wr(sc, regs[i].setcnt, e->hmc_count);
5004 }
5005
5006 return 0;
5007 }
5008
5009 static void
5010 ixl_hmc_free(struct ixl_softc *sc)
5011 {
5012 ixl_dmamem_free(sc, &sc->sc_hmc_sd);
5013 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
5014 }
5015
5016 static void
5017 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
5018 unsigned int npacking)
5019 {
5020 uint8_t *dst = d;
5021 const uint8_t *src = s;
5022 unsigned int i;
5023
5024 for (i = 0; i < npacking; i++) {
5025 const struct ixl_hmc_pack *pack = &packing[i];
5026 unsigned int offset = pack->lsb / 8;
5027 unsigned int align = pack->lsb % 8;
5028 const uint8_t *in = src + pack->offset;
5029 uint8_t *out = dst + offset;
5030 int width = pack->width;
5031 unsigned int inbits = 0;
5032
5033 if (align) {
5034 inbits = (*in++) << align;
5035 *out++ |= (inbits & 0xff);
5036 inbits >>= 8;
5037
5038 width -= 8 - align;
5039 }
5040
5041 while (width >= 8) {
5042 inbits |= (*in++) << align;
5043 *out++ = (inbits & 0xff);
5044 inbits >>= 8;
5045
5046 width -= 8;
5047 }
5048
5049 if (width > 0) {
5050 inbits |= (*in) << align;
5051 *out |= (inbits & ((1 << width) - 1));
5052 }
5053 }
5054 }
5055
5056 static struct ixl_aq_buf *
5057 ixl_aqb_alloc(struct ixl_softc *sc)
5058 {
5059 struct ixl_aq_buf *aqb;
5060
5061 aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK);
5062 if (aqb == NULL)
5063 return NULL;
5064
5065 aqb->aqb_size = IXL_AQ_BUFLEN;
5066
5067 if (bus_dmamap_create(sc->sc_dmat, aqb->aqb_size, 1,
5068 aqb->aqb_size, 0,
5069 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0)
5070 goto free;
5071 if (bus_dmamem_alloc(sc->sc_dmat, aqb->aqb_size,
5072 IXL_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs,
5073 BUS_DMA_WAITOK) != 0)
5074 goto destroy;
5075 if (bus_dmamem_map(sc->sc_dmat, &aqb->aqb_seg, aqb->aqb_nsegs,
5076 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0)
5077 goto dma_free;
5078 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
5079 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0)
5080 goto unmap;
5081
5082 return aqb;
5083 unmap:
5084 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
5085 dma_free:
5086 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
5087 destroy:
5088 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
5089 free:
5090 free(aqb, M_DEVBUF);
5091
5092 return NULL;
5093 }
5094
5095 static void
5096 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
5097 {
5098 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
5099 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
5100 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
5101 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
5102 free(aqb, M_DEVBUF);
5103 }
5104
5105 static int
5106 ixl_arq_fill(struct ixl_softc *sc)
5107 {
5108 struct ixl_aq_buf *aqb;
5109 struct ixl_aq_desc *arq, *iaq;
5110 unsigned int prod = sc->sc_arq_prod;
5111 unsigned int n;
5112 int post = 0;
5113
5114 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
5115 IXL_AQ_NUM);
5116 arq = IXL_DMA_KVA(&sc->sc_arq);
5117
5118 if (__predict_false(n <= 0))
5119 return 0;
5120
5121 do {
5122 aqb = sc->sc_arq_live[prod];
5123 iaq = &arq[prod];
5124
5125 if (aqb == NULL) {
5126 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
5127 if (aqb != NULL) {
5128 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
5129 ixl_aq_buf, aqb_entry);
5130 } else if ((aqb = ixl_aqb_alloc(sc)) == NULL) {
5131 break;
5132 }
5133
5134 sc->sc_arq_live[prod] = aqb;
5135 memset(aqb->aqb_data, 0, aqb->aqb_size);
5136
5137 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
5138 aqb->aqb_size, BUS_DMASYNC_PREREAD);
5139
5140 iaq->iaq_flags = htole16(IXL_AQ_BUF |
5141 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ?
5142 IXL_AQ_LB : 0));
5143 iaq->iaq_opcode = 0;
5144 iaq->iaq_datalen = htole16(aqb->aqb_size);
5145 iaq->iaq_retval = 0;
5146 iaq->iaq_cookie = 0;
5147 iaq->iaq_param[0] = 0;
5148 iaq->iaq_param[1] = 0;
5149 ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
5150 }
5151
5152 prod++;
5153 prod &= IXL_AQ_MASK;
5154
5155 post = 1;
5156
5157 } while (--n);
5158
5159 if (post) {
5160 sc->sc_arq_prod = prod;
5161 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
5162 }
5163
5164 return post;
5165 }
5166
5167 static void
5168 ixl_arq_unfill(struct ixl_softc *sc)
5169 {
5170 struct ixl_aq_buf *aqb;
5171 unsigned int i;
5172
5173 for (i = 0; i < __arraycount(sc->sc_arq_live); i++) {
5174 aqb = sc->sc_arq_live[i];
5175 if (aqb == NULL)
5176 continue;
5177
5178 sc->sc_arq_live[i] = NULL;
5179 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
5180 BUS_DMASYNC_POSTREAD);
5181 ixl_aqb_free(sc, aqb);
5182 }
5183
5184 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle)) != NULL) {
5185 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
5186 ixl_aq_buf, aqb_entry);
5187 ixl_aqb_free(sc, aqb);
5188 }
5189 }
5190
5191 static void
5192 ixl_clear_hw(struct ixl_softc *sc)
5193 {
5194 uint32_t num_queues, base_queue;
5195 uint32_t num_pf_int;
5196 uint32_t num_vf_int;
5197 uint32_t num_vfs;
5198 uint32_t i, j;
5199 uint32_t val;
5200 uint32_t eol = 0x7ff;
5201
5202 /* get number of interrupts, queues, and vfs */
5203 val = ixl_rd(sc, I40E_GLPCI_CNF2);
5204 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
5205 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
5206 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
5207 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
5208
5209 val = ixl_rd(sc, I40E_PFLAN_QALLOC);
5210 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
5211 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
5212 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
5213 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
5214 if (val & I40E_PFLAN_QALLOC_VALID_MASK)
5215 num_queues = (j - base_queue) + 1;
5216 else
5217 num_queues = 0;
5218
5219 val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
5220 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
5221 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
5222 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
5223 I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
5224 if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
5225 num_vfs = (j - i) + 1;
5226 else
5227 num_vfs = 0;
5228
5229 /* stop all the interrupts */
5230 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5231 ixl_flush(sc);
5232 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
5233 for (i = 0; i < num_pf_int - 2; i++)
5234 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
5235 ixl_flush(sc);
5236
5237 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
5238 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5239 ixl_wr(sc, I40E_PFINT_LNKLST0, val);
5240 for (i = 0; i < num_pf_int - 2; i++)
5241 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
5242 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5243 for (i = 0; i < num_vfs; i++)
5244 ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
5245 for (i = 0; i < num_vf_int - 2; i++)
5246 ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
5247
5248 /* warn the HW of the coming Tx disables */
5249 for (i = 0; i < num_queues; i++) {
5250 uint32_t abs_queue_idx = base_queue + i;
5251 uint32_t reg_block = 0;
5252
5253 if (abs_queue_idx >= 128) {
5254 reg_block = abs_queue_idx / 128;
5255 abs_queue_idx %= 128;
5256 }
5257
5258 val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
5259 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
5260 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
5261 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
5262
5263 ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
5264 }
5265 delaymsec(400);
5266
5267 /* stop all the queues */
5268 for (i = 0; i < num_queues; i++) {
5269 ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
5270 ixl_wr(sc, I40E_QTX_ENA(i), 0);
5271 ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
5272 ixl_wr(sc, I40E_QRX_ENA(i), 0);
5273 }
5274
5275 /* short wait for all queue disables to settle */
5276 delaymsec(50);
5277 }
5278
5279 static int
5280 ixl_pf_reset(struct ixl_softc *sc)
5281 {
5282 uint32_t cnt = 0;
5283 uint32_t cnt1 = 0;
5284 uint32_t reg = 0, reg0 = 0;
5285 uint32_t grst_del;
5286
5287 /*
5288 * Poll for Global Reset steady state in case of recent GRST.
5289 * The grst delay value is in 100ms units, and we'll wait a
5290 * couple counts longer to be sure we don't just miss the end.
5291 */
5292 grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
5293 grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
5294 grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
5295
5296 grst_del = grst_del * 20;
5297
5298 for (cnt = 0; cnt < grst_del; cnt++) {
5299 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
5300 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
5301 break;
5302 delaymsec(100);
5303 }
5304 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5305 aprint_error(", Global reset polling failed to complete\n");
5306 return -1;
5307 }
5308
5309 /* Now Wait for the FW to be ready */
5310 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
5311 reg = ixl_rd(sc, I40E_GLNVM_ULD);
5312 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5313 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
5314 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5315 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
5316 break;
5317
5318 delaymsec(10);
5319 }
5320 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5321 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
5322 aprint_error(", wait for FW Reset complete timed out "
5323 "(I40E_GLNVM_ULD = 0x%x)\n", reg);
5324 return -1;
5325 }
5326
5327 /*
5328 * If there was a Global Reset in progress when we got here,
5329 * we don't need to do the PF Reset
5330 */
5331 if (cnt == 0) {
5332 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5333 ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
5334 for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
5335 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5336 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
5337 break;
5338 delaymsec(1);
5339
5340 reg0 = ixl_rd(sc, I40E_GLGEN_RSTAT);
5341 if (reg0 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5342 aprint_error(", Core reset upcoming."
5343 " Skipping PF reset reset request\n");
5344 return -1;
5345 }
5346 }
5347 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
5348 aprint_error(", PF reset polling failed to complete"
5349 "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
5350 return -1;
5351 }
5352 }
5353
5354 return 0;
5355 }
5356
5357 static int
5358 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
5359 bus_size_t size, bus_size_t align)
5360 {
5361 ixm->ixm_size = size;
5362
5363 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
5364 ixm->ixm_size, 0,
5365 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
5366 &ixm->ixm_map) != 0)
5367 return 1;
5368 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
5369 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
5370 BUS_DMA_WAITOK) != 0)
5371 goto destroy;
5372 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
5373 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
5374 goto free;
5375 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
5376 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
5377 goto unmap;
5378
5379 memset(ixm->ixm_kva, 0, ixm->ixm_size);
5380
5381 return 0;
5382 unmap:
5383 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5384 free:
5385 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5386 destroy:
5387 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5388 return 1;
5389 }
5390
5391 static void
5392 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
5393 {
5394 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
5395 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5396 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5397 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5398 }
5399
5400 static int
5401 ixl_setup_vlan_hwfilter(struct ixl_softc *sc)
5402 {
5403 struct ethercom *ec = &sc->sc_ec;
5404 struct vlanid_list *vlanidp;
5405 int rv;
5406
5407 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5408 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5409 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5410 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5411
5412 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5413 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5414 if (rv != 0)
5415 return rv;
5416 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5417 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5418 if (rv != 0)
5419 return rv;
5420
5421 ETHER_LOCK(ec);
5422 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5423 rv = ixl_add_macvlan(sc, sc->sc_enaddr,
5424 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5425 if (rv != 0)
5426 break;
5427 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
5428 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5429 if (rv != 0)
5430 break;
5431 }
5432 ETHER_UNLOCK(ec);
5433
5434 return rv;
5435 }
5436
5437 static void
5438 ixl_teardown_vlan_hwfilter(struct ixl_softc *sc)
5439 {
5440 struct vlanid_list *vlanidp;
5441 struct ethercom *ec = &sc->sc_ec;
5442
5443 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5444 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5445 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5446 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5447
5448 ETHER_LOCK(ec);
5449 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5450 ixl_remove_macvlan(sc, sc->sc_enaddr,
5451 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5452 ixl_remove_macvlan(sc, etherbroadcastaddr,
5453 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5454 }
5455 ETHER_UNLOCK(ec);
5456
5457 ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5458 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5459 ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5460 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5461 }
5462
5463 static int
5464 ixl_update_macvlan(struct ixl_softc *sc)
5465 {
5466 int rv = 0;
5467 int next_ec_capenable = sc->sc_ec.ec_capenable;
5468
5469 if (ISSET(next_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
5470 rv = ixl_setup_vlan_hwfilter(sc);
5471 if (rv != 0)
5472 ixl_teardown_vlan_hwfilter(sc);
5473 } else {
5474 ixl_teardown_vlan_hwfilter(sc);
5475 }
5476
5477 return rv;
5478 }
5479
5480 static int
5481 ixl_ifflags_cb(struct ethercom *ec)
5482 {
5483 struct ifnet *ifp = &ec->ec_if;
5484 struct ixl_softc *sc = ifp->if_softc;
5485 int rv, change;
5486
5487 mutex_enter(&sc->sc_cfg_lock);
5488
5489 change = ec->ec_capenable ^ sc->sc_cur_ec_capenable;
5490
5491 if (ISSET(change, ETHERCAP_VLAN_HWTAGGING)) {
5492 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
5493 rv = ENETRESET;
5494 goto out;
5495 }
5496
5497 if (ISSET(change, ETHERCAP_VLAN_HWFILTER)) {
5498 rv = ixl_update_macvlan(sc);
5499 if (rv == 0) {
5500 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
5501 } else {
5502 CLR(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER);
5503 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
5504 }
5505 }
5506
5507 rv = ixl_iff(sc);
5508 out:
5509 mutex_exit(&sc->sc_cfg_lock);
5510
5511 return rv;
5512 }
5513
5514 static int
5515 ixl_set_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
5516 {
5517 const struct ixl_aq_link_status *status;
5518 const struct ixl_phy_type *itype;
5519
5520 uint64_t ifm_active = IFM_ETHER;
5521 uint64_t ifm_status = IFM_AVALID;
5522 int link_state = LINK_STATE_DOWN;
5523 uint64_t baudrate = 0;
5524
5525 status = (const struct ixl_aq_link_status *)iaq->iaq_param;
5526 if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION)) {
5527 ifm_active |= IFM_NONE;
5528 goto done;
5529 }
5530
5531 ifm_active |= IFM_FDX;
5532 ifm_status |= IFM_ACTIVE;
5533 link_state = LINK_STATE_UP;
5534
5535 itype = ixl_search_phy_type(status->phy_type);
5536 if (itype != NULL)
5537 ifm_active |= itype->ifm_type;
5538
5539 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
5540 ifm_active |= IFM_ETH_TXPAUSE;
5541 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
5542 ifm_active |= IFM_ETH_RXPAUSE;
5543
5544 baudrate = ixl_search_link_speed(status->link_speed);
5545
5546 done:
5547 /* NET_ASSERT_LOCKED() except during attach */
5548 sc->sc_media_active = ifm_active;
5549 sc->sc_media_status = ifm_status;
5550
5551 sc->sc_ec.ec_if.if_baudrate = baudrate;
5552
5553 return link_state;
5554 }
5555
5556 static int
5557 ixl_establish_intx(struct ixl_softc *sc)
5558 {
5559 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5560 pci_intr_handle_t *intr;
5561 char xnamebuf[32];
5562 char intrbuf[PCI_INTRSTR_LEN];
5563 char const *intrstr;
5564
5565 KASSERT(sc->sc_nintrs == 1);
5566
5567 intr = &sc->sc_ihp[0];
5568
5569 intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
5570 snprintf(xnamebuf, sizeof(xnamebuf), "%s:legacy",
5571 device_xname(sc->sc_dev));
5572
5573 sc->sc_ihs[0] = pci_intr_establish_xname(pc, *intr, IPL_NET, ixl_intr,
5574 sc, xnamebuf);
5575
5576 if (sc->sc_ihs[0] == NULL) {
5577 aprint_error_dev(sc->sc_dev,
5578 "unable to establish interrupt at %s\n", intrstr);
5579 return -1;
5580 }
5581
5582 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5583 return 0;
5584 }
5585
5586 static int
5587 ixl_establish_msix(struct ixl_softc *sc)
5588 {
5589 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5590 kcpuset_t *affinity;
5591 unsigned int vector = 0;
5592 unsigned int i;
5593 int affinity_to, r;
5594 char xnamebuf[32];
5595 char intrbuf[PCI_INTRSTR_LEN];
5596 char const *intrstr;
5597
5598 kcpuset_create(&affinity, false);
5599
5600 /* the "other" intr is mapped to vector 0 */
5601 vector = 0;
5602 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5603 intrbuf, sizeof(intrbuf));
5604 snprintf(xnamebuf, sizeof(xnamebuf), "%s others",
5605 device_xname(sc->sc_dev));
5606 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5607 sc->sc_ihp[vector], IPL_NET, ixl_other_intr,
5608 sc, xnamebuf);
5609 if (sc->sc_ihs[vector] == NULL) {
5610 aprint_error_dev(sc->sc_dev,
5611 "unable to establish interrupt at %s\n", intrstr);
5612 goto fail;
5613 }
5614
5615 aprint_normal_dev(sc->sc_dev, "other interrupt at %s", intrstr);
5616
5617 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0;
5618 affinity_to = (affinity_to + sc->sc_nqueue_pairs_max) % ncpu;
5619
5620 kcpuset_zero(affinity);
5621 kcpuset_set(affinity, affinity_to);
5622 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5623 if (r == 0) {
5624 aprint_normal(", affinity to %u", affinity_to);
5625 }
5626 aprint_normal("\n");
5627 vector++;
5628
5629 sc->sc_msix_vector_queue = vector;
5630 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0;
5631
5632 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5633 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5634 intrbuf, sizeof(intrbuf));
5635 snprintf(xnamebuf, sizeof(xnamebuf), "%s TXRX%d",
5636 device_xname(sc->sc_dev), i);
5637
5638 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5639 sc->sc_ihp[vector], IPL_NET, ixl_queue_intr,
5640 (void *)&sc->sc_qps[i], xnamebuf);
5641
5642 if (sc->sc_ihs[vector] == NULL) {
5643 aprint_error_dev(sc->sc_dev,
5644 "unable to establish interrupt at %s\n", intrstr);
5645 goto fail;
5646 }
5647
5648 aprint_normal_dev(sc->sc_dev,
5649 "for TXRX%d interrupt at %s",i , intrstr);
5650
5651 kcpuset_zero(affinity);
5652 kcpuset_set(affinity, affinity_to);
5653 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5654 if (r == 0) {
5655 aprint_normal(", affinity to %u", affinity_to);
5656 affinity_to = (affinity_to + 1) % ncpu;
5657 }
5658 aprint_normal("\n");
5659 vector++;
5660 }
5661
5662 kcpuset_destroy(affinity);
5663
5664 return 0;
5665 fail:
5666 for (i = 0; i < vector; i++) {
5667 pci_intr_disestablish(pc, sc->sc_ihs[i]);
5668 }
5669
5670 sc->sc_msix_vector_queue = 0;
5671 sc->sc_msix_vector_queue = 0;
5672 kcpuset_destroy(affinity);
5673
5674 return -1;
5675 }
5676
5677 static void
5678 ixl_config_queue_intr(struct ixl_softc *sc)
5679 {
5680 unsigned int i, vector;
5681
5682 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) {
5683 vector = sc->sc_msix_vector_queue;
5684 } else {
5685 vector = I40E_INTR_NOTX_INTR;
5686
5687 ixl_wr(sc, I40E_PFINT_LNKLST0,
5688 (I40E_INTR_NOTX_QUEUE <<
5689 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5690 (I40E_QUEUE_TYPE_RX <<
5691 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5692 }
5693
5694 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
5695 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), 0);
5696 ixl_flush(sc);
5697
5698 ixl_wr(sc, I40E_PFINT_LNKLSTN(i),
5699 ((i) << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
5700 (I40E_QUEUE_TYPE_RX <<
5701 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5702
5703 ixl_wr(sc, I40E_QINT_RQCTL(i),
5704 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5705 (I40E_ITR_INDEX_RX <<
5706 I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
5707 (I40E_INTR_NOTX_RX_QUEUE <<
5708 I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
5709 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5710 (I40E_QUEUE_TYPE_TX <<
5711 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5712 I40E_QINT_RQCTL_CAUSE_ENA_MASK);
5713
5714 ixl_wr(sc, I40E_QINT_TQCTL(i),
5715 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
5716 (I40E_ITR_INDEX_TX <<
5717 I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
5718 (I40E_INTR_NOTX_TX_QUEUE <<
5719 I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
5720 (I40E_QUEUE_TYPE_EOL <<
5721 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
5722 (I40E_QUEUE_TYPE_RX <<
5723 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
5724 I40E_QINT_TQCTL_CAUSE_ENA_MASK);
5725
5726 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX)
5727 vector++;
5728 }
5729 ixl_flush(sc);
5730
5731 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_RX), 0x7a);
5732 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_TX), 0x7a);
5733 ixl_flush(sc);
5734 }
5735
5736 static void
5737 ixl_config_other_intr(struct ixl_softc *sc)
5738 {
5739 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5740 (void)ixl_rd(sc, I40E_PFINT_ICR0);
5741
5742 ixl_wr(sc, I40E_PFINT_ICR0_ENA,
5743 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
5744 I40E_PFINT_ICR0_ENA_GRST_MASK |
5745 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
5746 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
5747 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
5748 I40E_PFINT_ICR0_ENA_VFLR_MASK |
5749 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
5750 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
5751 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK);
5752
5753 ixl_wr(sc, I40E_PFINT_LNKLST0, 0x7FF);
5754 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_OTHER), 0);
5755 ixl_wr(sc, I40E_PFINT_STAT_CTL0,
5756 (I40E_ITR_INDEX_OTHER <<
5757 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT));
5758 ixl_flush(sc);
5759 }
5760
5761 static int
5762 ixl_setup_interrupts(struct ixl_softc *sc)
5763 {
5764 struct pci_attach_args *pa = &sc->sc_pa;
5765 pci_intr_type_t max_type, intr_type;
5766 int counts[PCI_INTR_TYPE_SIZE];
5767 int error;
5768 unsigned int i;
5769 bool retry;
5770
5771 memset(counts, 0, sizeof(counts));
5772 max_type = PCI_INTR_TYPE_MSIX;
5773 /* QPs + other interrupt */
5774 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueue_pairs_max + 1;
5775 counts[PCI_INTR_TYPE_INTX] = 1;
5776
5777 if (ixl_param_nomsix)
5778 counts[PCI_INTR_TYPE_MSIX] = 0;
5779
5780 do {
5781 retry = false;
5782 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, max_type);
5783 if (error != 0) {
5784 aprint_error_dev(sc->sc_dev,
5785 "couldn't map interrupt\n");
5786 break;
5787 }
5788
5789 intr_type = pci_intr_type(pa->pa_pc, sc->sc_ihp[0]);
5790 sc->sc_nintrs = counts[intr_type];
5791 KASSERT(sc->sc_nintrs > 0);
5792
5793 for (i = 0; i < sc->sc_nintrs; i++) {
5794 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[i],
5795 PCI_INTR_MPSAFE, true);
5796 }
5797
5798 sc->sc_ihs = kmem_alloc(sizeof(sc->sc_ihs[0]) * sc->sc_nintrs,
5799 KM_SLEEP);
5800
5801 if (intr_type == PCI_INTR_TYPE_MSIX) {
5802 error = ixl_establish_msix(sc);
5803 if (error) {
5804 counts[PCI_INTR_TYPE_MSIX] = 0;
5805 retry = true;
5806 }
5807 } else if (intr_type == PCI_INTR_TYPE_INTX) {
5808 error = ixl_establish_intx(sc);
5809 } else {
5810 error = -1;
5811 }
5812
5813 if (error) {
5814 kmem_free(sc->sc_ihs,
5815 sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5816 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5817 } else {
5818 sc->sc_intrtype = intr_type;
5819 }
5820 } while (retry);
5821
5822 return error;
5823 }
5824
5825 static void
5826 ixl_teardown_interrupts(struct ixl_softc *sc)
5827 {
5828 struct pci_attach_args *pa = &sc->sc_pa;
5829 unsigned int i;
5830
5831 for (i = 0; i < sc->sc_nintrs; i++) {
5832 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]);
5833 }
5834
5835 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5836
5837 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5838 sc->sc_ihs = NULL;
5839 sc->sc_nintrs = 0;
5840 }
5841
5842 static int
5843 ixl_setup_stats(struct ixl_softc *sc)
5844 {
5845 struct ixl_queue_pair *qp;
5846 struct ixl_tx_ring *txr;
5847 struct ixl_rx_ring *rxr;
5848 struct ixl_stats_counters *isc;
5849 unsigned int i;
5850
5851 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5852 qp = &sc->sc_qps[i];
5853 txr = qp->qp_txr;
5854 rxr = qp->qp_rxr;
5855
5856 evcnt_attach_dynamic(&txr->txr_defragged, EVCNT_TYPE_MISC,
5857 NULL, qp->qp_name, "m_defrag successed");
5858 evcnt_attach_dynamic(&txr->txr_defrag_failed, EVCNT_TYPE_MISC,
5859 NULL, qp->qp_name, "m_defrag_failed");
5860 evcnt_attach_dynamic(&txr->txr_pcqdrop, EVCNT_TYPE_MISC,
5861 NULL, qp->qp_name, "Dropped in pcq");
5862 evcnt_attach_dynamic(&txr->txr_transmitdef, EVCNT_TYPE_MISC,
5863 NULL, qp->qp_name, "Deferred transmit");
5864 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR,
5865 NULL, qp->qp_name, "Interrupt on queue");
5866 evcnt_attach_dynamic(&txr->txr_defer, EVCNT_TYPE_MISC,
5867 NULL, qp->qp_name, "Handled queue in softint/workqueue");
5868
5869 evcnt_attach_dynamic(&rxr->rxr_mgethdr_failed, EVCNT_TYPE_MISC,
5870 NULL, qp->qp_name, "MGETHDR failed");
5871 evcnt_attach_dynamic(&rxr->rxr_mgetcl_failed, EVCNT_TYPE_MISC,
5872 NULL, qp->qp_name, "MCLGET failed");
5873 evcnt_attach_dynamic(&rxr->rxr_mbuf_load_failed,
5874 EVCNT_TYPE_MISC, NULL, qp->qp_name,
5875 "bus_dmamap_load_mbuf failed");
5876 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR,
5877 NULL, qp->qp_name, "Interrupt on queue");
5878 evcnt_attach_dynamic(&rxr->rxr_defer, EVCNT_TYPE_MISC,
5879 NULL, qp->qp_name, "Handled queue in softint/workqueue");
5880 }
5881
5882 evcnt_attach_dynamic(&sc->sc_event_atq, EVCNT_TYPE_INTR,
5883 NULL, device_xname(sc->sc_dev), "Interrupt for other events");
5884 evcnt_attach_dynamic(&sc->sc_event_link, EVCNT_TYPE_MISC,
5885 NULL, device_xname(sc->sc_dev), "Link status event");
5886 evcnt_attach_dynamic(&sc->sc_event_ecc_err, EVCNT_TYPE_MISC,
5887 NULL, device_xname(sc->sc_dev), "ECC error");
5888 evcnt_attach_dynamic(&sc->sc_event_pci_exception, EVCNT_TYPE_MISC,
5889 NULL, device_xname(sc->sc_dev), "PCI exception");
5890 evcnt_attach_dynamic(&sc->sc_event_crit_err, EVCNT_TYPE_MISC,
5891 NULL, device_xname(sc->sc_dev), "Critical error");
5892
5893 isc = &sc->sc_stats_counters;
5894 evcnt_attach_dynamic(&isc->isc_crc_errors, EVCNT_TYPE_MISC,
5895 NULL, device_xname(sc->sc_dev), "CRC errors");
5896 evcnt_attach_dynamic(&isc->isc_illegal_bytes, EVCNT_TYPE_MISC,
5897 NULL, device_xname(sc->sc_dev), "Illegal bytes");
5898 evcnt_attach_dynamic(&isc->isc_mac_local_faults, EVCNT_TYPE_MISC,
5899 NULL, device_xname(sc->sc_dev), "Mac local faults");
5900 evcnt_attach_dynamic(&isc->isc_mac_remote_faults, EVCNT_TYPE_MISC,
5901 NULL, device_xname(sc->sc_dev), "Mac remote faults");
5902 evcnt_attach_dynamic(&isc->isc_link_xon_rx, EVCNT_TYPE_MISC,
5903 NULL, device_xname(sc->sc_dev), "Rx xon");
5904 evcnt_attach_dynamic(&isc->isc_link_xon_tx, EVCNT_TYPE_MISC,
5905 NULL, device_xname(sc->sc_dev), "Tx xon");
5906 evcnt_attach_dynamic(&isc->isc_link_xoff_rx, EVCNT_TYPE_MISC,
5907 NULL, device_xname(sc->sc_dev), "Rx xoff");
5908 evcnt_attach_dynamic(&isc->isc_link_xoff_tx, EVCNT_TYPE_MISC,
5909 NULL, device_xname(sc->sc_dev), "Tx xoff");
5910 evcnt_attach_dynamic(&isc->isc_rx_fragments, EVCNT_TYPE_MISC,
5911 NULL, device_xname(sc->sc_dev), "Rx fragments");
5912 evcnt_attach_dynamic(&isc->isc_rx_jabber, EVCNT_TYPE_MISC,
5913 NULL, device_xname(sc->sc_dev), "Rx jabber");
5914
5915 evcnt_attach_dynamic(&isc->isc_rx_size_64, EVCNT_TYPE_MISC,
5916 NULL, device_xname(sc->sc_dev), "Rx size 64");
5917 evcnt_attach_dynamic(&isc->isc_rx_size_127, EVCNT_TYPE_MISC,
5918 NULL, device_xname(sc->sc_dev), "Rx size 127");
5919 evcnt_attach_dynamic(&isc->isc_rx_size_255, EVCNT_TYPE_MISC,
5920 NULL, device_xname(sc->sc_dev), "Rx size 255");
5921 evcnt_attach_dynamic(&isc->isc_rx_size_511, EVCNT_TYPE_MISC,
5922 NULL, device_xname(sc->sc_dev), "Rx size 511");
5923 evcnt_attach_dynamic(&isc->isc_rx_size_1023, EVCNT_TYPE_MISC,
5924 NULL, device_xname(sc->sc_dev), "Rx size 1023");
5925 evcnt_attach_dynamic(&isc->isc_rx_size_1522, EVCNT_TYPE_MISC,
5926 NULL, device_xname(sc->sc_dev), "Rx size 1522");
5927 evcnt_attach_dynamic(&isc->isc_rx_size_big, EVCNT_TYPE_MISC,
5928 NULL, device_xname(sc->sc_dev), "Rx jumbo packets");
5929 evcnt_attach_dynamic(&isc->isc_rx_undersize, EVCNT_TYPE_MISC,
5930 NULL, device_xname(sc->sc_dev), "Rx under size");
5931 evcnt_attach_dynamic(&isc->isc_rx_oversize, EVCNT_TYPE_MISC,
5932 NULL, device_xname(sc->sc_dev), "Rx over size");
5933
5934 evcnt_attach_dynamic(&isc->isc_rx_bytes, EVCNT_TYPE_MISC,
5935 NULL, device_xname(sc->sc_dev), "Rx bytes / port");
5936 evcnt_attach_dynamic(&isc->isc_rx_discards, EVCNT_TYPE_MISC,
5937 NULL, device_xname(sc->sc_dev), "Rx discards / port");
5938 evcnt_attach_dynamic(&isc->isc_rx_unicast, EVCNT_TYPE_MISC,
5939 NULL, device_xname(sc->sc_dev), "Rx unicast / port");
5940 evcnt_attach_dynamic(&isc->isc_rx_multicast, EVCNT_TYPE_MISC,
5941 NULL, device_xname(sc->sc_dev), "Rx multicast / port");
5942 evcnt_attach_dynamic(&isc->isc_rx_broadcast, EVCNT_TYPE_MISC,
5943 NULL, device_xname(sc->sc_dev), "Rx broadcast / port");
5944
5945 evcnt_attach_dynamic(&isc->isc_vsi_rx_bytes, EVCNT_TYPE_MISC,
5946 NULL, device_xname(sc->sc_dev), "Rx bytes / vsi");
5947 evcnt_attach_dynamic(&isc->isc_vsi_rx_discards, EVCNT_TYPE_MISC,
5948 NULL, device_xname(sc->sc_dev), "Rx discard / vsi");
5949 evcnt_attach_dynamic(&isc->isc_vsi_rx_unicast, EVCNT_TYPE_MISC,
5950 NULL, device_xname(sc->sc_dev), "Rx unicast / vsi");
5951 evcnt_attach_dynamic(&isc->isc_vsi_rx_multicast, EVCNT_TYPE_MISC,
5952 NULL, device_xname(sc->sc_dev), "Rx multicast / vsi");
5953 evcnt_attach_dynamic(&isc->isc_vsi_rx_broadcast, EVCNT_TYPE_MISC,
5954 NULL, device_xname(sc->sc_dev), "Rx broadcast / vsi");
5955
5956 evcnt_attach_dynamic(&isc->isc_tx_size_64, EVCNT_TYPE_MISC,
5957 NULL, device_xname(sc->sc_dev), "Tx size 64");
5958 evcnt_attach_dynamic(&isc->isc_tx_size_127, EVCNT_TYPE_MISC,
5959 NULL, device_xname(sc->sc_dev), "Tx size 127");
5960 evcnt_attach_dynamic(&isc->isc_tx_size_255, EVCNT_TYPE_MISC,
5961 NULL, device_xname(sc->sc_dev), "Tx size 255");
5962 evcnt_attach_dynamic(&isc->isc_tx_size_511, EVCNT_TYPE_MISC,
5963 NULL, device_xname(sc->sc_dev), "Tx size 511");
5964 evcnt_attach_dynamic(&isc->isc_tx_size_1023, EVCNT_TYPE_MISC,
5965 NULL, device_xname(sc->sc_dev), "Tx size 1023");
5966 evcnt_attach_dynamic(&isc->isc_tx_size_1522, EVCNT_TYPE_MISC,
5967 NULL, device_xname(sc->sc_dev), "Tx size 1522");
5968 evcnt_attach_dynamic(&isc->isc_tx_size_big, EVCNT_TYPE_MISC,
5969 NULL, device_xname(sc->sc_dev), "Tx jumbo packets");
5970
5971 evcnt_attach_dynamic(&isc->isc_tx_bytes, EVCNT_TYPE_MISC,
5972 NULL, device_xname(sc->sc_dev), "Tx bytes / port");
5973 evcnt_attach_dynamic(&isc->isc_tx_dropped_link_down, EVCNT_TYPE_MISC,
5974 NULL, device_xname(sc->sc_dev),
5975 "Tx dropped due to link down / port");
5976 evcnt_attach_dynamic(&isc->isc_tx_unicast, EVCNT_TYPE_MISC,
5977 NULL, device_xname(sc->sc_dev), "Tx unicast / port");
5978 evcnt_attach_dynamic(&isc->isc_tx_multicast, EVCNT_TYPE_MISC,
5979 NULL, device_xname(sc->sc_dev), "Tx multicast / port");
5980 evcnt_attach_dynamic(&isc->isc_tx_broadcast, EVCNT_TYPE_MISC,
5981 NULL, device_xname(sc->sc_dev), "Tx broadcast / port");
5982
5983 evcnt_attach_dynamic(&isc->isc_vsi_tx_bytes, EVCNT_TYPE_MISC,
5984 NULL, device_xname(sc->sc_dev), "Tx bytes / vsi");
5985 evcnt_attach_dynamic(&isc->isc_vsi_tx_errors, EVCNT_TYPE_MISC,
5986 NULL, device_xname(sc->sc_dev), "Tx errors / vsi");
5987 evcnt_attach_dynamic(&isc->isc_vsi_tx_unicast, EVCNT_TYPE_MISC,
5988 NULL, device_xname(sc->sc_dev), "Tx unicast / vsi");
5989 evcnt_attach_dynamic(&isc->isc_vsi_tx_multicast, EVCNT_TYPE_MISC,
5990 NULL, device_xname(sc->sc_dev), "Tx multicast / vsi");
5991 evcnt_attach_dynamic(&isc->isc_vsi_tx_broadcast, EVCNT_TYPE_MISC,
5992 NULL, device_xname(sc->sc_dev), "Tx broadcast / vsi");
5993
5994 sc->sc_stats_intval = ixl_param_stats_interval;
5995 callout_init(&sc->sc_stats_callout, CALLOUT_MPSAFE);
5996 callout_setfunc(&sc->sc_stats_callout, ixl_stats_callout, sc);
5997 ixl_work_set(&sc->sc_stats_task, ixl_stats_update, sc);
5998
5999 return 0;
6000 }
6001
6002 static void
6003 ixl_teardown_stats(struct ixl_softc *sc)
6004 {
6005 struct ixl_tx_ring *txr;
6006 struct ixl_rx_ring *rxr;
6007 struct ixl_stats_counters *isc;
6008 unsigned int i;
6009
6010 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
6011 txr = sc->sc_qps[i].qp_txr;
6012 rxr = sc->sc_qps[i].qp_rxr;
6013
6014 evcnt_detach(&txr->txr_defragged);
6015 evcnt_detach(&txr->txr_defrag_failed);
6016 evcnt_detach(&txr->txr_pcqdrop);
6017 evcnt_detach(&txr->txr_transmitdef);
6018 evcnt_detach(&txr->txr_intr);
6019 evcnt_detach(&txr->txr_defer);
6020
6021 evcnt_detach(&rxr->rxr_mgethdr_failed);
6022 evcnt_detach(&rxr->rxr_mgetcl_failed);
6023 evcnt_detach(&rxr->rxr_mbuf_load_failed);
6024 evcnt_detach(&rxr->rxr_intr);
6025 evcnt_detach(&rxr->rxr_defer);
6026 }
6027
6028 isc = &sc->sc_stats_counters;
6029 evcnt_detach(&isc->isc_crc_errors);
6030 evcnt_detach(&isc->isc_illegal_bytes);
6031 evcnt_detach(&isc->isc_mac_local_faults);
6032 evcnt_detach(&isc->isc_mac_remote_faults);
6033 evcnt_detach(&isc->isc_link_xon_rx);
6034 evcnt_detach(&isc->isc_link_xon_tx);
6035 evcnt_detach(&isc->isc_link_xoff_rx);
6036 evcnt_detach(&isc->isc_link_xoff_tx);
6037 evcnt_detach(&isc->isc_rx_fragments);
6038 evcnt_detach(&isc->isc_rx_jabber);
6039 evcnt_detach(&isc->isc_rx_bytes);
6040 evcnt_detach(&isc->isc_rx_discards);
6041 evcnt_detach(&isc->isc_rx_unicast);
6042 evcnt_detach(&isc->isc_rx_multicast);
6043 evcnt_detach(&isc->isc_rx_broadcast);
6044 evcnt_detach(&isc->isc_rx_size_64);
6045 evcnt_detach(&isc->isc_rx_size_127);
6046 evcnt_detach(&isc->isc_rx_size_255);
6047 evcnt_detach(&isc->isc_rx_size_511);
6048 evcnt_detach(&isc->isc_rx_size_1023);
6049 evcnt_detach(&isc->isc_rx_size_1522);
6050 evcnt_detach(&isc->isc_rx_size_big);
6051 evcnt_detach(&isc->isc_rx_undersize);
6052 evcnt_detach(&isc->isc_rx_oversize);
6053 evcnt_detach(&isc->isc_tx_bytes);
6054 evcnt_detach(&isc->isc_tx_dropped_link_down);
6055 evcnt_detach(&isc->isc_tx_unicast);
6056 evcnt_detach(&isc->isc_tx_multicast);
6057 evcnt_detach(&isc->isc_tx_broadcast);
6058 evcnt_detach(&isc->isc_tx_size_64);
6059 evcnt_detach(&isc->isc_tx_size_127);
6060 evcnt_detach(&isc->isc_tx_size_255);
6061 evcnt_detach(&isc->isc_tx_size_511);
6062 evcnt_detach(&isc->isc_tx_size_1023);
6063 evcnt_detach(&isc->isc_tx_size_1522);
6064 evcnt_detach(&isc->isc_tx_size_big);
6065 evcnt_detach(&isc->isc_vsi_rx_discards);
6066 evcnt_detach(&isc->isc_vsi_rx_bytes);
6067 evcnt_detach(&isc->isc_vsi_rx_unicast);
6068 evcnt_detach(&isc->isc_vsi_rx_multicast);
6069 evcnt_detach(&isc->isc_vsi_rx_broadcast);
6070 evcnt_detach(&isc->isc_vsi_tx_errors);
6071 evcnt_detach(&isc->isc_vsi_tx_bytes);
6072 evcnt_detach(&isc->isc_vsi_tx_unicast);
6073 evcnt_detach(&isc->isc_vsi_tx_multicast);
6074 evcnt_detach(&isc->isc_vsi_tx_broadcast);
6075
6076 evcnt_detach(&sc->sc_event_atq);
6077 evcnt_detach(&sc->sc_event_link);
6078 evcnt_detach(&sc->sc_event_ecc_err);
6079 evcnt_detach(&sc->sc_event_pci_exception);
6080 evcnt_detach(&sc->sc_event_crit_err);
6081
6082 callout_destroy(&sc->sc_stats_callout);
6083 }
6084
6085 static void
6086 ixl_stats_callout(void *xsc)
6087 {
6088 struct ixl_softc *sc = xsc;
6089
6090 ixl_work_add(sc->sc_workq, &sc->sc_stats_task);
6091 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
6092 }
6093
6094 static uint64_t
6095 ixl_stat_delta(struct ixl_softc *sc, uint32_t reg_hi, uint32_t reg_lo,
6096 uint64_t *offset, bool has_offset)
6097 {
6098 uint64_t value, delta;
6099 int bitwidth;
6100
6101 bitwidth = reg_hi == 0 ? 32 : 48;
6102
6103 value = ixl_rd(sc, reg_lo);
6104
6105 if (bitwidth > 32) {
6106 value |= ((uint64_t)ixl_rd(sc, reg_hi) << 32);
6107 }
6108
6109 if (__predict_true(has_offset)) {
6110 delta = value;
6111 if (value < *offset)
6112 delta += ((uint64_t)1 << bitwidth);
6113 delta -= *offset;
6114 } else {
6115 delta = 0;
6116 }
6117 atomic_swap_64(offset, value);
6118
6119 return delta;
6120 }
6121
6122 static void
6123 ixl_stats_update(void *xsc)
6124 {
6125 struct ixl_softc *sc = xsc;
6126 struct ixl_stats_counters *isc;
6127 uint64_t delta;
6128
6129 isc = &sc->sc_stats_counters;
6130
6131 /* errors */
6132 delta = ixl_stat_delta(sc,
6133 0, I40E_GLPRT_CRCERRS(sc->sc_port),
6134 &isc->isc_crc_errors_offset, isc->isc_has_offset);
6135 atomic_add_64(&isc->isc_crc_errors.ev_count, delta);
6136
6137 delta = ixl_stat_delta(sc,
6138 0, I40E_GLPRT_ILLERRC(sc->sc_port),
6139 &isc->isc_illegal_bytes_offset, isc->isc_has_offset);
6140 atomic_add_64(&isc->isc_illegal_bytes.ev_count, delta);
6141
6142 /* rx */
6143 delta = ixl_stat_delta(sc,
6144 I40E_GLPRT_GORCH(sc->sc_port), I40E_GLPRT_GORCL(sc->sc_port),
6145 &isc->isc_rx_bytes_offset, isc->isc_has_offset);
6146 atomic_add_64(&isc->isc_rx_bytes.ev_count, delta);
6147
6148 delta = ixl_stat_delta(sc,
6149 0, I40E_GLPRT_RDPC(sc->sc_port),
6150 &isc->isc_rx_discards_offset, isc->isc_has_offset);
6151 atomic_add_64(&isc->isc_rx_discards.ev_count, delta);
6152
6153 delta = ixl_stat_delta(sc,
6154 I40E_GLPRT_UPRCH(sc->sc_port), I40E_GLPRT_UPRCL(sc->sc_port),
6155 &isc->isc_rx_unicast_offset, isc->isc_has_offset);
6156 atomic_add_64(&isc->isc_rx_unicast.ev_count, delta);
6157
6158 delta = ixl_stat_delta(sc,
6159 I40E_GLPRT_MPRCH(sc->sc_port), I40E_GLPRT_MPRCL(sc->sc_port),
6160 &isc->isc_rx_multicast_offset, isc->isc_has_offset);
6161 atomic_add_64(&isc->isc_rx_multicast.ev_count, delta);
6162
6163 delta = ixl_stat_delta(sc,
6164 I40E_GLPRT_BPRCH(sc->sc_port), I40E_GLPRT_BPRCL(sc->sc_port),
6165 &isc->isc_rx_broadcast_offset, isc->isc_has_offset);
6166 atomic_add_64(&isc->isc_rx_broadcast.ev_count, delta);
6167
6168 /* Packet size stats rx */
6169 delta = ixl_stat_delta(sc,
6170 I40E_GLPRT_PRC64H(sc->sc_port), I40E_GLPRT_PRC64L(sc->sc_port),
6171 &isc->isc_rx_size_64_offset, isc->isc_has_offset);
6172 atomic_add_64(&isc->isc_rx_size_64.ev_count, delta);
6173
6174 delta = ixl_stat_delta(sc,
6175 I40E_GLPRT_PRC127H(sc->sc_port), I40E_GLPRT_PRC127L(sc->sc_port),
6176 &isc->isc_rx_size_127_offset, isc->isc_has_offset);
6177 atomic_add_64(&isc->isc_rx_size_127.ev_count, delta);
6178
6179 delta = ixl_stat_delta(sc,
6180 I40E_GLPRT_PRC255H(sc->sc_port), I40E_GLPRT_PRC255L(sc->sc_port),
6181 &isc->isc_rx_size_255_offset, isc->isc_has_offset);
6182 atomic_add_64(&isc->isc_rx_size_255.ev_count, delta);
6183
6184 delta = ixl_stat_delta(sc,
6185 I40E_GLPRT_PRC511H(sc->sc_port), I40E_GLPRT_PRC511L(sc->sc_port),
6186 &isc->isc_rx_size_511_offset, isc->isc_has_offset);
6187 atomic_add_64(&isc->isc_rx_size_511.ev_count, delta);
6188
6189 delta = ixl_stat_delta(sc,
6190 I40E_GLPRT_PRC1023H(sc->sc_port), I40E_GLPRT_PRC1023L(sc->sc_port),
6191 &isc->isc_rx_size_1023_offset, isc->isc_has_offset);
6192 atomic_add_64(&isc->isc_rx_size_1023.ev_count, delta);
6193
6194 delta = ixl_stat_delta(sc,
6195 I40E_GLPRT_PRC1522H(sc->sc_port), I40E_GLPRT_PRC1522L(sc->sc_port),
6196 &isc->isc_rx_size_1522_offset, isc->isc_has_offset);
6197 atomic_add_64(&isc->isc_rx_size_1522.ev_count, delta);
6198
6199 delta = ixl_stat_delta(sc,
6200 I40E_GLPRT_PRC9522H(sc->sc_port), I40E_GLPRT_PRC9522L(sc->sc_port),
6201 &isc->isc_rx_size_big_offset, isc->isc_has_offset);
6202 atomic_add_64(&isc->isc_rx_size_big.ev_count, delta);
6203
6204 delta = ixl_stat_delta(sc,
6205 0, I40E_GLPRT_RUC(sc->sc_port),
6206 &isc->isc_rx_undersize_offset, isc->isc_has_offset);
6207 atomic_add_64(&isc->isc_rx_undersize.ev_count, delta);
6208
6209 delta = ixl_stat_delta(sc,
6210 0, I40E_GLPRT_ROC(sc->sc_port),
6211 &isc->isc_rx_oversize_offset, isc->isc_has_offset);
6212 atomic_add_64(&isc->isc_rx_oversize.ev_count, delta);
6213
6214 /* tx */
6215 delta = ixl_stat_delta(sc,
6216 I40E_GLPRT_GOTCH(sc->sc_port), I40E_GLPRT_GOTCL(sc->sc_port),
6217 &isc->isc_tx_bytes_offset, isc->isc_has_offset);
6218 atomic_add_64(&isc->isc_tx_bytes.ev_count, delta);
6219
6220 delta = ixl_stat_delta(sc,
6221 0, I40E_GLPRT_TDOLD(sc->sc_port),
6222 &isc->isc_tx_dropped_link_down_offset, isc->isc_has_offset);
6223 atomic_add_64(&isc->isc_tx_dropped_link_down.ev_count, delta);
6224
6225 delta = ixl_stat_delta(sc,
6226 I40E_GLPRT_UPTCH(sc->sc_port), I40E_GLPRT_UPTCL(sc->sc_port),
6227 &isc->isc_tx_unicast_offset, isc->isc_has_offset);
6228 atomic_add_64(&isc->isc_tx_unicast.ev_count, delta);
6229
6230 delta = ixl_stat_delta(sc,
6231 I40E_GLPRT_MPTCH(sc->sc_port), I40E_GLPRT_MPTCL(sc->sc_port),
6232 &isc->isc_tx_multicast_offset, isc->isc_has_offset);
6233 atomic_add_64(&isc->isc_tx_multicast.ev_count, delta);
6234
6235 delta = ixl_stat_delta(sc,
6236 I40E_GLPRT_BPTCH(sc->sc_port), I40E_GLPRT_BPTCL(sc->sc_port),
6237 &isc->isc_tx_broadcast_offset, isc->isc_has_offset);
6238 atomic_add_64(&isc->isc_tx_broadcast.ev_count, delta);
6239
6240 /* Packet size stats tx */
6241 delta = ixl_stat_delta(sc,
6242 I40E_GLPRT_PTC64L(sc->sc_port), I40E_GLPRT_PTC64L(sc->sc_port),
6243 &isc->isc_tx_size_64_offset, isc->isc_has_offset);
6244 atomic_add_64(&isc->isc_tx_size_64.ev_count, delta);
6245
6246 delta = ixl_stat_delta(sc,
6247 I40E_GLPRT_PTC127H(sc->sc_port), I40E_GLPRT_PTC127L(sc->sc_port),
6248 &isc->isc_tx_size_127_offset, isc->isc_has_offset);
6249 atomic_add_64(&isc->isc_tx_size_127.ev_count, delta);
6250
6251 delta = ixl_stat_delta(sc,
6252 I40E_GLPRT_PTC255H(sc->sc_port), I40E_GLPRT_PTC255L(sc->sc_port),
6253 &isc->isc_tx_size_255_offset, isc->isc_has_offset);
6254 atomic_add_64(&isc->isc_tx_size_255.ev_count, delta);
6255
6256 delta = ixl_stat_delta(sc,
6257 I40E_GLPRT_PTC511H(sc->sc_port), I40E_GLPRT_PTC511L(sc->sc_port),
6258 &isc->isc_tx_size_511_offset, isc->isc_has_offset);
6259 atomic_add_64(&isc->isc_tx_size_511.ev_count, delta);
6260
6261 delta = ixl_stat_delta(sc,
6262 I40E_GLPRT_PTC1023H(sc->sc_port), I40E_GLPRT_PTC1023L(sc->sc_port),
6263 &isc->isc_tx_size_1023_offset, isc->isc_has_offset);
6264 atomic_add_64(&isc->isc_tx_size_1023.ev_count, delta);
6265
6266 delta = ixl_stat_delta(sc,
6267 I40E_GLPRT_PTC1522H(sc->sc_port), I40E_GLPRT_PTC1522L(sc->sc_port),
6268 &isc->isc_tx_size_1522_offset, isc->isc_has_offset);
6269 atomic_add_64(&isc->isc_tx_size_1522.ev_count, delta);
6270
6271 delta = ixl_stat_delta(sc,
6272 I40E_GLPRT_PTC9522H(sc->sc_port), I40E_GLPRT_PTC9522L(sc->sc_port),
6273 &isc->isc_tx_size_big_offset, isc->isc_has_offset);
6274 atomic_add_64(&isc->isc_tx_size_big.ev_count, delta);
6275
6276 /* mac faults */
6277 delta = ixl_stat_delta(sc,
6278 0, I40E_GLPRT_MLFC(sc->sc_port),
6279 &isc->isc_mac_local_faults_offset, isc->isc_has_offset);
6280 atomic_add_64(&isc->isc_mac_local_faults.ev_count, delta);
6281
6282 delta = ixl_stat_delta(sc,
6283 0, I40E_GLPRT_MRFC(sc->sc_port),
6284 &isc->isc_mac_remote_faults_offset, isc->isc_has_offset);
6285 atomic_add_64(&isc->isc_mac_remote_faults.ev_count, delta);
6286
6287 /* Flow control (LFC) stats */
6288 delta = ixl_stat_delta(sc,
6289 0, I40E_GLPRT_LXONRXC(sc->sc_port),
6290 &isc->isc_link_xon_rx_offset, isc->isc_has_offset);
6291 atomic_add_64(&isc->isc_link_xon_rx.ev_count, delta);
6292
6293 delta = ixl_stat_delta(sc,
6294 0, I40E_GLPRT_LXONTXC(sc->sc_port),
6295 &isc->isc_link_xon_tx_offset, isc->isc_has_offset);
6296 atomic_add_64(&isc->isc_link_xon_tx.ev_count, delta);
6297
6298 delta = ixl_stat_delta(sc,
6299 0, I40E_GLPRT_LXOFFRXC(sc->sc_port),
6300 &isc->isc_link_xoff_rx_offset, isc->isc_has_offset);
6301 atomic_add_64(&isc->isc_link_xoff_rx.ev_count, delta);
6302
6303 delta = ixl_stat_delta(sc,
6304 0, I40E_GLPRT_LXOFFTXC(sc->sc_port),
6305 &isc->isc_link_xoff_tx_offset, isc->isc_has_offset);
6306 atomic_add_64(&isc->isc_link_xoff_tx.ev_count, delta);
6307
6308 /* fragments */
6309 delta = ixl_stat_delta(sc,
6310 0, I40E_GLPRT_RFC(sc->sc_port),
6311 &isc->isc_rx_fragments_offset, isc->isc_has_offset);
6312 atomic_add_64(&isc->isc_rx_fragments.ev_count, delta);
6313
6314 delta = ixl_stat_delta(sc,
6315 0, I40E_GLPRT_RJC(sc->sc_port),
6316 &isc->isc_rx_jabber_offset, isc->isc_has_offset);
6317 atomic_add_64(&isc->isc_rx_jabber.ev_count, delta);
6318
6319 /* VSI rx counters */
6320 delta = ixl_stat_delta(sc,
6321 0, I40E_GLV_RDPC(sc->sc_vsi_stat_counter_idx),
6322 &isc->isc_vsi_rx_discards_offset, isc->isc_has_offset);
6323 atomic_add_64(&isc->isc_vsi_rx_discards.ev_count, delta);
6324
6325 delta = ixl_stat_delta(sc,
6326 I40E_GLV_GORCH(sc->sc_vsi_stat_counter_idx),
6327 I40E_GLV_GORCL(sc->sc_vsi_stat_counter_idx),
6328 &isc->isc_vsi_rx_bytes_offset, isc->isc_has_offset);
6329 atomic_add_64(&isc->isc_vsi_rx_bytes.ev_count, delta);
6330
6331 delta = ixl_stat_delta(sc,
6332 I40E_GLV_UPRCH(sc->sc_vsi_stat_counter_idx),
6333 I40E_GLV_UPRCL(sc->sc_vsi_stat_counter_idx),
6334 &isc->isc_vsi_rx_unicast_offset, isc->isc_has_offset);
6335 atomic_add_64(&isc->isc_vsi_rx_unicast.ev_count, delta);
6336
6337 delta = ixl_stat_delta(sc,
6338 I40E_GLV_MPRCH(sc->sc_vsi_stat_counter_idx),
6339 I40E_GLV_MPRCL(sc->sc_vsi_stat_counter_idx),
6340 &isc->isc_vsi_rx_multicast_offset, isc->isc_has_offset);
6341 atomic_add_64(&isc->isc_vsi_rx_multicast.ev_count, delta);
6342
6343 delta = ixl_stat_delta(sc,
6344 I40E_GLV_BPRCH(sc->sc_vsi_stat_counter_idx),
6345 I40E_GLV_BPRCL(sc->sc_vsi_stat_counter_idx),
6346 &isc->isc_vsi_rx_broadcast_offset, isc->isc_has_offset);
6347 atomic_add_64(&isc->isc_vsi_rx_broadcast.ev_count, delta);
6348
6349 /* VSI tx counters */
6350 delta = ixl_stat_delta(sc,
6351 0, I40E_GLV_TEPC(sc->sc_vsi_stat_counter_idx),
6352 &isc->isc_vsi_tx_errors_offset, isc->isc_has_offset);
6353 atomic_add_64(&isc->isc_vsi_tx_errors.ev_count, delta);
6354
6355 delta = ixl_stat_delta(sc,
6356 I40E_GLV_GOTCH(sc->sc_vsi_stat_counter_idx),
6357 I40E_GLV_GOTCL(sc->sc_vsi_stat_counter_idx),
6358 &isc->isc_vsi_tx_bytes_offset, isc->isc_has_offset);
6359 atomic_add_64(&isc->isc_vsi_tx_bytes.ev_count, delta);
6360
6361 delta = ixl_stat_delta(sc,
6362 I40E_GLV_UPTCH(sc->sc_vsi_stat_counter_idx),
6363 I40E_GLV_UPTCL(sc->sc_vsi_stat_counter_idx),
6364 &isc->isc_vsi_tx_unicast_offset, isc->isc_has_offset);
6365 atomic_add_64(&isc->isc_vsi_tx_unicast.ev_count, delta);
6366
6367 delta = ixl_stat_delta(sc,
6368 I40E_GLV_MPTCH(sc->sc_vsi_stat_counter_idx),
6369 I40E_GLV_MPTCL(sc->sc_vsi_stat_counter_idx),
6370 &isc->isc_vsi_tx_multicast_offset, isc->isc_has_offset);
6371 atomic_add_64(&isc->isc_vsi_tx_multicast.ev_count, delta);
6372
6373 delta = ixl_stat_delta(sc,
6374 I40E_GLV_BPTCH(sc->sc_vsi_stat_counter_idx),
6375 I40E_GLV_BPTCL(sc->sc_vsi_stat_counter_idx),
6376 &isc->isc_vsi_tx_broadcast_offset, isc->isc_has_offset);
6377 atomic_add_64(&isc->isc_vsi_tx_broadcast.ev_count, delta);
6378 }
6379
6380 static int
6381 ixl_setup_sysctls(struct ixl_softc *sc)
6382 {
6383 const char *devname;
6384 struct sysctllog **log;
6385 const struct sysctlnode *rnode, *rxnode, *txnode;
6386 int error;
6387
6388 log = &sc->sc_sysctllog;
6389 devname = device_xname(sc->sc_dev);
6390
6391 error = sysctl_createv(log, 0, NULL, &rnode,
6392 0, CTLTYPE_NODE, devname,
6393 SYSCTL_DESCR("ixl information and settings"),
6394 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
6395 if (error)
6396 goto out;
6397
6398 error = sysctl_createv(log, 0, &rnode, NULL,
6399 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
6400 SYSCTL_DESCR("Use workqueue for packet processing"),
6401 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
6402 if (error)
6403 goto out;
6404
6405 error = sysctl_createv(log, 0, &rnode, NULL,
6406 CTLFLAG_READONLY, CTLTYPE_INT, "stats_interval",
6407 SYSCTL_DESCR("Statistics collection interval in milliseconds"),
6408 NULL, 0, &sc->sc_stats_intval, 0, CTL_CREATE, CTL_EOL);
6409
6410 error = sysctl_createv(log, 0, &rnode, &rxnode,
6411 0, CTLTYPE_NODE, "rx",
6412 SYSCTL_DESCR("ixl information and settings for Rx"),
6413 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6414 if (error)
6415 goto out;
6416
6417 error = sysctl_createv(log, 0, &rxnode, NULL,
6418 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6419 SYSCTL_DESCR("max number of Rx packets"
6420 " to process for interrupt processing"),
6421 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6422 if (error)
6423 goto out;
6424
6425 error = sysctl_createv(log, 0, &rxnode, NULL,
6426 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6427 SYSCTL_DESCR("max number of Rx packets"
6428 " to process for deferred processing"),
6429 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
6430 if (error)
6431 goto out;
6432
6433 error = sysctl_createv(log, 0, &rnode, &txnode,
6434 0, CTLTYPE_NODE, "tx",
6435 SYSCTL_DESCR("ixl information and settings for Tx"),
6436 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6437 if (error)
6438 goto out;
6439
6440 error = sysctl_createv(log, 0, &txnode, NULL,
6441 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6442 SYSCTL_DESCR("max number of Tx packets"
6443 " to process for interrupt processing"),
6444 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6445 if (error)
6446 goto out;
6447
6448 error = sysctl_createv(log, 0, &txnode, NULL,
6449 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6450 SYSCTL_DESCR("max number of Tx packets"
6451 " to process for deferred processing"),
6452 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
6453 if (error)
6454 goto out;
6455
6456 out:
6457 if (error) {
6458 aprint_error_dev(sc->sc_dev,
6459 "unable to create sysctl node\n");
6460 sysctl_teardown(log);
6461 }
6462
6463 return error;
6464 }
6465
6466 static void
6467 ixl_teardown_sysctls(struct ixl_softc *sc)
6468 {
6469
6470 sysctl_teardown(&sc->sc_sysctllog);
6471 }
6472
6473 static struct workqueue *
6474 ixl_workq_create(const char *name, pri_t prio, int ipl, int flags)
6475 {
6476 struct workqueue *wq;
6477 int error;
6478
6479 error = workqueue_create(&wq, name, ixl_workq_work, NULL,
6480 prio, ipl, flags);
6481
6482 if (error)
6483 return NULL;
6484
6485 return wq;
6486 }
6487
6488 static void
6489 ixl_workq_destroy(struct workqueue *wq)
6490 {
6491
6492 workqueue_destroy(wq);
6493 }
6494
6495 static void
6496 ixl_work_set(struct ixl_work *work, void (*func)(void *), void *arg)
6497 {
6498
6499 memset(work, 0, sizeof(*work));
6500 work->ixw_func = func;
6501 work->ixw_arg = arg;
6502 }
6503
6504 static void
6505 ixl_work_add(struct workqueue *wq, struct ixl_work *work)
6506 {
6507 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0)
6508 return;
6509
6510 workqueue_enqueue(wq, &work->ixw_cookie, NULL);
6511 }
6512
6513 static void
6514 ixl_work_wait(struct workqueue *wq, struct ixl_work *work)
6515 {
6516
6517 workqueue_wait(wq, &work->ixw_cookie);
6518 }
6519
6520 static void
6521 ixl_workq_work(struct work *wk, void *context)
6522 {
6523 struct ixl_work *work;
6524
6525 work = container_of(wk, struct ixl_work, ixw_cookie);
6526
6527 atomic_swap_uint(&work->ixw_added, 0);
6528 work->ixw_func(work->ixw_arg);
6529 }
6530
6531 static int
6532 ixl_rx_ctl_read(struct ixl_softc *sc, uint32_t reg, uint32_t *rv)
6533 {
6534 struct ixl_aq_desc iaq;
6535
6536 memset(&iaq, 0, sizeof(iaq));
6537 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_READ);
6538 iaq.iaq_param[1] = htole32(reg);
6539
6540 if (ixl_atq_poll(sc, &iaq, 250) != 0)
6541 return ETIMEDOUT;
6542
6543 switch (htole16(iaq.iaq_retval)) {
6544 case IXL_AQ_RC_OK:
6545 /* success */
6546 break;
6547 case IXL_AQ_RC_EACCES:
6548 return EPERM;
6549 case IXL_AQ_RC_EAGAIN:
6550 return EAGAIN;
6551 default:
6552 return EIO;
6553 }
6554
6555 *rv = htole32(iaq.iaq_param[3]);
6556 return 0;
6557 }
6558
6559 static uint32_t
6560 ixl_rd_rx_csr(struct ixl_softc *sc, uint32_t reg)
6561 {
6562 uint32_t val;
6563 int rv, retry, retry_limit;
6564
6565 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) {
6566 retry_limit = 5;
6567 } else {
6568 retry_limit = 0;
6569 }
6570
6571 for (retry = 0; retry < retry_limit; retry++) {
6572 rv = ixl_rx_ctl_read(sc, reg, &val);
6573 if (rv == 0)
6574 return val;
6575 else if (rv == EAGAIN)
6576 delaymsec(1);
6577 else
6578 break;
6579 }
6580
6581 val = ixl_rd(sc, reg);
6582
6583 return val;
6584 }
6585
6586 static int
6587 ixl_rx_ctl_write(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6588 {
6589 struct ixl_aq_desc iaq;
6590
6591 memset(&iaq, 0, sizeof(iaq));
6592 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_WRITE);
6593 iaq.iaq_param[1] = htole32(reg);
6594 iaq.iaq_param[3] = htole32(value);
6595
6596 if (ixl_atq_poll(sc, &iaq, 250) != 0)
6597 return ETIMEDOUT;
6598
6599 switch (htole16(iaq.iaq_retval)) {
6600 case IXL_AQ_RC_OK:
6601 /* success */
6602 break;
6603 case IXL_AQ_RC_EACCES:
6604 return EPERM;
6605 case IXL_AQ_RC_EAGAIN:
6606 return EAGAIN;
6607 default:
6608 return EIO;
6609 }
6610
6611 return 0;
6612 }
6613
6614 static void
6615 ixl_wr_rx_csr(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6616 {
6617 int rv, retry, retry_limit;
6618
6619 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) {
6620 retry_limit = 5;
6621 } else {
6622 retry_limit = 0;
6623 }
6624
6625 for (retry = 0; retry < retry_limit; retry++) {
6626 rv = ixl_rx_ctl_write(sc, reg, value);
6627 if (rv == 0)
6628 return;
6629 else if (rv == EAGAIN)
6630 delaymsec(1);
6631 else
6632 break;
6633 }
6634
6635 ixl_wr(sc, reg, value);
6636 }
6637
6638 static int
6639 ixl_nvm_lock(struct ixl_softc *sc, char rw)
6640 {
6641 struct ixl_aq_desc iaq;
6642 struct ixl_aq_req_resource_param *param;
6643 int rv;
6644
6645 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK))
6646 return 0;
6647
6648 memset(&iaq, 0, sizeof(iaq));
6649 iaq.iaq_opcode = htole16(IXL_AQ_OP_REQUEST_RESOURCE);
6650
6651 param = (struct ixl_aq_req_resource_param *)&iaq.iaq_param;
6652 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM);
6653 if (rw == 'R') {
6654 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_READ);
6655 } else {
6656 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_WRITE);
6657 }
6658
6659 rv = ixl_atq_poll(sc, &iaq, 250);
6660
6661 if (rv != 0)
6662 return ETIMEDOUT;
6663
6664 switch (le16toh(iaq.iaq_retval)) {
6665 case IXL_AQ_RC_OK:
6666 break;
6667 case IXL_AQ_RC_EACCES:
6668 return EACCES;
6669 case IXL_AQ_RC_EBUSY:
6670 return EBUSY;
6671 case IXL_AQ_RC_EPERM:
6672 return EPERM;
6673 }
6674
6675 return 0;
6676 }
6677
6678 static int
6679 ixl_nvm_unlock(struct ixl_softc *sc)
6680 {
6681 struct ixl_aq_desc iaq;
6682 struct ixl_aq_rel_resource_param *param;
6683 int rv;
6684
6685 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK))
6686 return 0;
6687
6688 memset(&iaq, 0, sizeof(iaq));
6689 iaq.iaq_opcode = htole16(IXL_AQ_OP_RELEASE_RESOURCE);
6690
6691 param = (struct ixl_aq_rel_resource_param *)&iaq.iaq_param;
6692 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM);
6693
6694 rv = ixl_atq_poll(sc, &iaq, 250);
6695
6696 if (rv != 0)
6697 return ETIMEDOUT;
6698
6699 switch (le16toh(iaq.iaq_retval)) {
6700 case IXL_AQ_RC_OK:
6701 break;
6702 default:
6703 return EIO;
6704 }
6705 return 0;
6706 }
6707
6708 static int
6709 ixl_srdone_poll(struct ixl_softc *sc)
6710 {
6711 int wait_count;
6712 uint32_t reg;
6713
6714 for (wait_count = 0; wait_count < IXL_SRRD_SRCTL_ATTEMPTS;
6715 wait_count++) {
6716 reg = ixl_rd(sc, I40E_GLNVM_SRCTL);
6717 if (ISSET(reg, I40E_GLNVM_SRCTL_DONE_MASK))
6718 break;
6719
6720 delaymsec(5);
6721 }
6722
6723 if (wait_count == IXL_SRRD_SRCTL_ATTEMPTS)
6724 return -1;
6725
6726 return 0;
6727 }
6728
6729 static int
6730 ixl_nvm_read_srctl(struct ixl_softc *sc, uint16_t offset, uint16_t *data)
6731 {
6732 uint32_t reg;
6733
6734 if (ixl_srdone_poll(sc) != 0)
6735 return ETIMEDOUT;
6736
6737 reg = ((uint32_t)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
6738 __BIT(I40E_GLNVM_SRCTL_START_SHIFT);
6739 ixl_wr(sc, I40E_GLNVM_SRCTL, reg);
6740
6741 if (ixl_srdone_poll(sc) != 0) {
6742 aprint_debug("NVM read error: couldn't access "
6743 "Shadow RAM address: 0x%x\n", offset);
6744 return ETIMEDOUT;
6745 }
6746
6747 reg = ixl_rd(sc, I40E_GLNVM_SRDATA);
6748 *data = (uint16_t)__SHIFTOUT(reg, I40E_GLNVM_SRDATA_RDDATA_MASK);
6749
6750 return 0;
6751 }
6752
6753 static int
6754 ixl_nvm_read_aq(struct ixl_softc *sc, uint16_t offset_word,
6755 void *data, size_t len)
6756 {
6757 struct ixl_dmamem *idm;
6758 struct ixl_aq_desc iaq;
6759 struct ixl_aq_nvm_param *param;
6760 uint32_t offset_bytes;
6761 int rv;
6762
6763 idm = &sc->sc_aqbuf;
6764 if (len > IXL_DMA_LEN(idm))
6765 return ENOMEM;
6766
6767 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
6768 memset(&iaq, 0, sizeof(iaq));
6769 iaq.iaq_opcode = htole16(IXL_AQ_OP_NVM_READ);
6770 iaq.iaq_flags = htole16(IXL_AQ_BUF |
6771 ((len > I40E_AQ_LARGE_BUF) ? IXL_AQ_LB : 0));
6772 iaq.iaq_datalen = htole16(len);
6773 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
6774
6775 param = (struct ixl_aq_nvm_param *)iaq.iaq_param;
6776 param->command_flags = IXL_AQ_NVM_LAST_CMD;
6777 param->module_pointer = 0;
6778 param->length = htole16(len);
6779 offset_bytes = (uint32_t)offset_word * 2;
6780 offset_bytes &= 0x00FFFFFF;
6781 param->offset = htole32(offset_bytes);
6782
6783 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
6784 BUS_DMASYNC_PREREAD);
6785
6786 rv = ixl_atq_poll(sc, &iaq, 250);
6787
6788 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
6789 BUS_DMASYNC_POSTREAD);
6790
6791 if (rv != 0) {
6792 return ETIMEDOUT;
6793 }
6794
6795 switch (le16toh(iaq.iaq_retval)) {
6796 case IXL_AQ_RC_OK:
6797 break;
6798 case IXL_AQ_RC_EPERM:
6799 return EPERM;
6800 case IXL_AQ_RC_EINVAL:
6801 return EINVAL;
6802 case IXL_AQ_RC_EBUSY:
6803 return EBUSY;
6804 case IXL_AQ_RC_EIO:
6805 default:
6806 return EIO;
6807 }
6808
6809 memcpy(data, IXL_DMA_KVA(idm), len);
6810
6811 return 0;
6812 }
6813
6814 static int
6815 ixl_rd16_nvm(struct ixl_softc *sc, uint16_t offset, uint16_t *data)
6816 {
6817 int error;
6818 uint16_t buf;
6819
6820 error = ixl_nvm_lock(sc, 'R');
6821 if (error)
6822 return error;
6823
6824 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMREAD)) {
6825 error = ixl_nvm_read_aq(sc, offset,
6826 &buf, sizeof(buf));
6827 if (error == 0)
6828 *data = le16toh(buf);
6829 } else {
6830 error = ixl_nvm_read_srctl(sc, offset, &buf);
6831 if (error == 0)
6832 *data = buf;
6833 }
6834
6835 ixl_nvm_unlock(sc);
6836
6837 return error;
6838 }
6839
6840 MODULE(MODULE_CLASS_DRIVER, if_ixl, "pci");
6841
6842 #ifdef _MODULE
6843 #include "ioconf.c"
6844 #endif
6845
6846 #ifdef _MODULE
6847 static void
6848 ixl_parse_modprop(prop_dictionary_t dict)
6849 {
6850 prop_object_t obj;
6851 int64_t val;
6852 uint64_t uval;
6853
6854 if (dict == NULL)
6855 return;
6856
6857 obj = prop_dictionary_get(dict, "nomsix");
6858 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_BOOL) {
6859 ixl_param_nomsix = prop_bool_true((prop_bool_t)obj);
6860 }
6861
6862 obj = prop_dictionary_get(dict, "stats_interval");
6863 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
6864 val = prop_number_integer_value((prop_number_t)obj);
6865
6866 /* the range has no reason */
6867 if (100 < val && val < 180000) {
6868 ixl_param_stats_interval = val;
6869 }
6870 }
6871
6872 obj = prop_dictionary_get(dict, "nqps_limit");
6873 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
6874 val = prop_number_integer_value((prop_number_t)obj);
6875
6876 if (val <= INT32_MAX)
6877 ixl_param_nqps_limit = val;
6878 }
6879
6880 obj = prop_dictionary_get(dict, "rx_ndescs");
6881 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
6882 uval = prop_number_unsigned_integer_value((prop_number_t)obj);
6883
6884 if (uval > 8)
6885 ixl_param_rx_ndescs = uval;
6886 }
6887
6888 obj = prop_dictionary_get(dict, "tx_ndescs");
6889 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
6890 uval = prop_number_unsigned_integer_value((prop_number_t)obj);
6891
6892 if (uval > IXL_TX_PKT_DESCS)
6893 ixl_param_tx_ndescs = uval;
6894 }
6895
6896 }
6897 #endif
6898
6899 static int
6900 if_ixl_modcmd(modcmd_t cmd, void *opaque)
6901 {
6902 int error = 0;
6903
6904 #ifdef _MODULE
6905 switch (cmd) {
6906 case MODULE_CMD_INIT:
6907 ixl_parse_modprop((prop_dictionary_t)opaque);
6908 error = config_init_component(cfdriver_ioconf_if_ixl,
6909 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
6910 break;
6911 case MODULE_CMD_FINI:
6912 error = config_fini_component(cfdriver_ioconf_if_ixl,
6913 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
6914 break;
6915 default:
6916 error = ENOTTY;
6917 break;
6918 }
6919 #endif
6920
6921 return error;
6922 }
6923