if_ixl.c revision 1.24 1 /* $NetBSD: if_ixl.c,v 1.24 2020/01/17 09:04:04 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2016,2017 David Gwynne <dlg (at) openbsd.org>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50 /*
51 * Copyright (c) 2019 Internet Initiative Japan, Inc.
52 * All rights reserved.
53 *
54 * Redistribution and use in source and binary forms, with or without
55 * modification, are permitted provided that the following conditions
56 * are met:
57 * 1. Redistributions of source code must retain the above copyright
58 * notice, this list of conditions and the following disclaimer.
59 * 2. Redistributions in binary form must reproduce the above copyright
60 * notice, this list of conditions and the following disclaimer in the
61 * documentation and/or other materials provided with the distribution.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
73 * POSSIBILITY OF SUCH DAMAGE.
74 */
75
76 #include <sys/cdefs.h>
77
78 #ifdef _KERNEL_OPT
79 #include "opt_net_mpsafe.h"
80 #include "opt_if_ixl.h"
81 #endif
82
83 #include <sys/param.h>
84 #include <sys/types.h>
85
86 #include <sys/cpu.h>
87 #include <sys/device.h>
88 #include <sys/evcnt.h>
89 #include <sys/interrupt.h>
90 #include <sys/kmem.h>
91 #include <sys/malloc.h>
92 #include <sys/module.h>
93 #include <sys/mutex.h>
94 #include <sys/pcq.h>
95 #include <sys/syslog.h>
96 #include <sys/workqueue.h>
97
98 #include <sys/bus.h>
99
100 #include <net/bpf.h>
101 #include <net/if.h>
102 #include <net/if_dl.h>
103 #include <net/if_media.h>
104 #include <net/if_ether.h>
105 #include <net/rss_config.h>
106
107 #include <dev/pci/pcivar.h>
108 #include <dev/pci/pcidevs.h>
109
110 #include <dev/pci/if_ixlreg.h>
111 #include <dev/pci/if_ixlvar.h>
112
113 #include <prop/proplib.h>
114
115 struct ixl_softc; /* defined */
116
117 #define I40E_PF_RESET_WAIT_COUNT 200
118 #define I40E_AQ_LARGE_BUF 512
119
120 /* bitfields for Tx queue mapping in QTX_CTL */
121 #define I40E_QTX_CTL_VF_QUEUE 0x0
122 #define I40E_QTX_CTL_VM_QUEUE 0x1
123 #define I40E_QTX_CTL_PF_QUEUE 0x2
124
125 #define I40E_QUEUE_TYPE_EOL 0x7ff
126 #define I40E_INTR_NOTX_QUEUE 0
127
128 #define I40E_QUEUE_TYPE_RX 0x0
129 #define I40E_QUEUE_TYPE_TX 0x1
130 #define I40E_QUEUE_TYPE_PE_CEQ 0x2
131 #define I40E_QUEUE_TYPE_UNKNOWN 0x3
132
133 #define I40E_ITR_INDEX_RX 0x0
134 #define I40E_ITR_INDEX_TX 0x1
135 #define I40E_ITR_INDEX_OTHER 0x2
136 #define I40E_ITR_INDEX_NONE 0x3
137
138 #define I40E_INTR_NOTX_QUEUE 0
139 #define I40E_INTR_NOTX_INTR 0
140 #define I40E_INTR_NOTX_RX_QUEUE 0
141 #define I40E_INTR_NOTX_TX_QUEUE 1
142 #define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK
143 #define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK
144
145 #define BIT_ULL(a) (1ULL << (a))
146 #define IXL_RSS_HENA_DEFAULT_BASE \
147 (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
148 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
149 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
150 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
151 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
152 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
153 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
154 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
155 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
156 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
157 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
158 #define IXL_RSS_HENA_DEFAULT_XL710 IXL_RSS_HENA_DEFAULT_BASE
159 #define IXL_RSS_HENA_DEFAULT_X722 (IXL_RSS_HENA_DEFAULT_XL710 | \
160 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
161 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
162 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
163 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
164 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
165 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
166 #define I40E_HASH_LUT_SIZE_128 0
167 #define IXL_RSS_KEY_SIZE_REG 13
168
169 #define IXL_ICR0_CRIT_ERR_MASK \
170 (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \
171 I40E_PFINT_ICR0_ECC_ERR_MASK | \
172 I40E_PFINT_ICR0_PE_CRITERR_MASK)
173
174 #define IXL_TX_PKT_DESCS 8
175 #define IXL_TX_QUEUE_ALIGN 128
176 #define IXL_RX_QUEUE_ALIGN 128
177
178 #define IXL_HARDMTU 9712 /* 9726 - ETHER_HDR_LEN */
179
180 #define IXL_PCIREG PCI_MAPREG_START
181
182 #define IXL_ITR0 0x0
183 #define IXL_ITR1 0x1
184 #define IXL_ITR2 0x2
185 #define IXL_NOITR 0x3
186
187 #define IXL_AQ_NUM 256
188 #define IXL_AQ_MASK (IXL_AQ_NUM - 1)
189 #define IXL_AQ_ALIGN 64 /* lol */
190 #define IXL_AQ_BUFLEN 4096
191
192 #define IXL_HMC_ROUNDUP 512
193 #define IXL_HMC_PGSIZE 4096
194 #define IXL_HMC_DVASZ sizeof(uint64_t)
195 #define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
196 #define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS)
197 #define IXL_HMC_PDVALID 1ULL
198
199 #define IXL_ATQ_EXEC_TIMEOUT (10 * hz)
200
201 struct ixl_aq_regs {
202 bus_size_t atq_tail;
203 bus_size_t atq_head;
204 bus_size_t atq_len;
205 bus_size_t atq_bal;
206 bus_size_t atq_bah;
207
208 bus_size_t arq_tail;
209 bus_size_t arq_head;
210 bus_size_t arq_len;
211 bus_size_t arq_bal;
212 bus_size_t arq_bah;
213
214 uint32_t atq_len_enable;
215 uint32_t atq_tail_mask;
216 uint32_t atq_head_mask;
217
218 uint32_t arq_len_enable;
219 uint32_t arq_tail_mask;
220 uint32_t arq_head_mask;
221 };
222
223 struct ixl_phy_type {
224 uint64_t phy_type;
225 uint64_t ifm_type;
226 };
227
228 struct ixl_speed_type {
229 uint8_t dev_speed;
230 uint64_t net_speed;
231 };
232
233 struct ixl_aq_buf {
234 SIMPLEQ_ENTRY(ixl_aq_buf)
235 aqb_entry;
236 void *aqb_data;
237 bus_dmamap_t aqb_map;
238 bus_dma_segment_t aqb_seg;
239 size_t aqb_size;
240 int aqb_nsegs;
241 };
242 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf);
243
244 struct ixl_dmamem {
245 bus_dmamap_t ixm_map;
246 bus_dma_segment_t ixm_seg;
247 int ixm_nsegs;
248 size_t ixm_size;
249 void *ixm_kva;
250 };
251
252 #define IXL_DMA_MAP(_ixm) ((_ixm)->ixm_map)
253 #define IXL_DMA_DVA(_ixm) ((_ixm)->ixm_map->dm_segs[0].ds_addr)
254 #define IXL_DMA_KVA(_ixm) ((void *)(_ixm)->ixm_kva)
255 #define IXL_DMA_LEN(_ixm) ((_ixm)->ixm_size)
256
257 struct ixl_hmc_entry {
258 uint64_t hmc_base;
259 uint32_t hmc_count;
260 uint64_t hmc_size;
261 };
262
263 enum ixl_hmc_types {
264 IXL_HMC_LAN_TX = 0,
265 IXL_HMC_LAN_RX,
266 IXL_HMC_FCOE_CTX,
267 IXL_HMC_FCOE_FILTER,
268 IXL_HMC_COUNT
269 };
270
271 struct ixl_hmc_pack {
272 uint16_t offset;
273 uint16_t width;
274 uint16_t lsb;
275 };
276
277 /*
278 * these hmc objects have weird sizes and alignments, so these are abstract
279 * representations of them that are nice for c to populate.
280 *
281 * the packing code relies on little-endian values being stored in the fields,
282 * no high bits in the fields being set, and the fields must be packed in the
283 * same order as they are in the ctx structure.
284 */
285
286 struct ixl_hmc_rxq {
287 uint16_t head;
288 uint8_t cpuid;
289 uint64_t base;
290 #define IXL_HMC_RXQ_BASE_UNIT 128
291 uint16_t qlen;
292 uint16_t dbuff;
293 #define IXL_HMC_RXQ_DBUFF_UNIT 128
294 uint8_t hbuff;
295 #define IXL_HMC_RXQ_HBUFF_UNIT 64
296 uint8_t dtype;
297 #define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0
298 #define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1
299 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2
300 uint8_t dsize;
301 #define IXL_HMC_RXQ_DSIZE_16 0
302 #define IXL_HMC_RXQ_DSIZE_32 1
303 uint8_t crcstrip;
304 uint8_t fc_ena;
305 uint8_t l2sel;
306 uint8_t hsplit_0;
307 uint8_t hsplit_1;
308 uint8_t showiv;
309 uint16_t rxmax;
310 uint8_t tphrdesc_ena;
311 uint8_t tphwdesc_ena;
312 uint8_t tphdata_ena;
313 uint8_t tphhead_ena;
314 uint8_t lrxqthresh;
315 uint8_t prefena;
316 };
317
318 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
319 { offsetof(struct ixl_hmc_rxq, head), 13, 0 },
320 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 },
321 { offsetof(struct ixl_hmc_rxq, base), 57, 32 },
322 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 },
323 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 },
324 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 },
325 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 },
326 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 },
327 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 },
328 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 },
329 { offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 },
330 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 },
331 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 },
332 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 },
333 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 },
334 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 },
335 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 },
336 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 },
337 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 },
338 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 },
339 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 },
340 };
341
342 #define IXL_HMC_RXQ_MINSIZE (201 + 1)
343
344 struct ixl_hmc_txq {
345 uint16_t head;
346 uint8_t new_context;
347 uint64_t base;
348 #define IXL_HMC_TXQ_BASE_UNIT 128
349 uint8_t fc_ena;
350 uint8_t timesync_ena;
351 uint8_t fd_ena;
352 uint8_t alt_vlan_ena;
353 uint16_t thead_wb;
354 uint8_t cpuid;
355 uint8_t head_wb_ena;
356 #define IXL_HMC_TXQ_DESC_WB 0
357 #define IXL_HMC_TXQ_HEAD_WB 1
358 uint16_t qlen;
359 uint8_t tphrdesc_ena;
360 uint8_t tphrpacket_ena;
361 uint8_t tphwdesc_ena;
362 uint64_t head_wb_addr;
363 uint32_t crc;
364 uint16_t rdylist;
365 uint8_t rdylist_act;
366 };
367
368 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
369 { offsetof(struct ixl_hmc_txq, head), 13, 0 },
370 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 },
371 { offsetof(struct ixl_hmc_txq, base), 57, 32 },
372 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 },
373 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 },
374 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 },
375 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 },
376 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 },
377 /* line 1 */
378 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 },
379 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 },
380 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 },
381 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 },
382 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 },
383 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 },
384 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 },
385 /* line 7 */
386 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) },
387 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) },
388 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) },
389 };
390
391 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
392
393 struct ixl_work {
394 struct work ixw_cookie;
395 void (*ixw_func)(void *);
396 void *ixw_arg;
397 unsigned int ixw_added;
398 };
399 #define IXL_WORKQUEUE_PRI PRI_SOFTNET
400
401 struct ixl_tx_map {
402 struct mbuf *txm_m;
403 bus_dmamap_t txm_map;
404 unsigned int txm_eop;
405 };
406
407 struct ixl_tx_ring {
408 kmutex_t txr_lock;
409 struct ixl_softc *txr_sc;
410
411 unsigned int txr_prod;
412 unsigned int txr_cons;
413
414 struct ixl_tx_map *txr_maps;
415 struct ixl_dmamem txr_mem;
416
417 bus_size_t txr_tail;
418 unsigned int txr_qid;
419 pcq_t *txr_intrq;
420 void *txr_si;
421
422 uint64_t txr_oerrors; /* if_oerrors */
423 uint64_t txr_opackets; /* if_opackets */
424 uint64_t txr_obytes; /* if_obytes */
425 uint64_t txr_omcasts; /* if_omcasts */
426
427 struct evcnt txr_defragged;
428 struct evcnt txr_defrag_failed;
429 struct evcnt txr_pcqdrop;
430 struct evcnt txr_transmitdef;
431 struct evcnt txr_intr;
432 struct evcnt txr_defer;
433 };
434
435 struct ixl_rx_map {
436 struct mbuf *rxm_m;
437 bus_dmamap_t rxm_map;
438 };
439
440 struct ixl_rx_ring {
441 kmutex_t rxr_lock;
442
443 unsigned int rxr_prod;
444 unsigned int rxr_cons;
445
446 struct ixl_rx_map *rxr_maps;
447 struct ixl_dmamem rxr_mem;
448
449 struct mbuf *rxr_m_head;
450 struct mbuf **rxr_m_tail;
451
452 bus_size_t rxr_tail;
453 unsigned int rxr_qid;
454
455 uint64_t rxr_ipackets; /* if_ipackets */
456 uint64_t rxr_ibytes; /* if_ibytes */
457 uint64_t rxr_iqdrops; /* iqdrops */
458 uint64_t rxr_ierrors; /* if_ierrors */
459
460 struct evcnt rxr_mgethdr_failed;
461 struct evcnt rxr_mgetcl_failed;
462 struct evcnt rxr_mbuf_load_failed;
463 struct evcnt rxr_intr;
464 struct evcnt rxr_defer;
465 };
466
467 struct ixl_queue_pair {
468 struct ixl_softc *qp_sc;
469 struct ixl_tx_ring *qp_txr;
470 struct ixl_rx_ring *qp_rxr;
471
472 char qp_name[16];
473
474 void *qp_si;
475 struct ixl_work qp_task;
476 bool qp_workqueue;
477 };
478
479 struct ixl_atq {
480 struct ixl_aq_desc iatq_desc;
481 void (*iatq_fn)(struct ixl_softc *,
482 const struct ixl_aq_desc *);
483 };
484 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
485
486 struct ixl_product {
487 unsigned int vendor_id;
488 unsigned int product_id;
489 };
490
491 struct ixl_stats_counters {
492 bool isc_has_offset;
493 struct evcnt isc_crc_errors;
494 uint64_t isc_crc_errors_offset;
495 struct evcnt isc_illegal_bytes;
496 uint64_t isc_illegal_bytes_offset;
497 struct evcnt isc_rx_bytes;
498 uint64_t isc_rx_bytes_offset;
499 struct evcnt isc_rx_discards;
500 uint64_t isc_rx_discards_offset;
501 struct evcnt isc_rx_unicast;
502 uint64_t isc_rx_unicast_offset;
503 struct evcnt isc_rx_multicast;
504 uint64_t isc_rx_multicast_offset;
505 struct evcnt isc_rx_broadcast;
506 uint64_t isc_rx_broadcast_offset;
507 struct evcnt isc_rx_size_64;
508 uint64_t isc_rx_size_64_offset;
509 struct evcnt isc_rx_size_127;
510 uint64_t isc_rx_size_127_offset;
511 struct evcnt isc_rx_size_255;
512 uint64_t isc_rx_size_255_offset;
513 struct evcnt isc_rx_size_511;
514 uint64_t isc_rx_size_511_offset;
515 struct evcnt isc_rx_size_1023;
516 uint64_t isc_rx_size_1023_offset;
517 struct evcnt isc_rx_size_1522;
518 uint64_t isc_rx_size_1522_offset;
519 struct evcnt isc_rx_size_big;
520 uint64_t isc_rx_size_big_offset;
521 struct evcnt isc_rx_undersize;
522 uint64_t isc_rx_undersize_offset;
523 struct evcnt isc_rx_oversize;
524 uint64_t isc_rx_oversize_offset;
525 struct evcnt isc_rx_fragments;
526 uint64_t isc_rx_fragments_offset;
527 struct evcnt isc_rx_jabber;
528 uint64_t isc_rx_jabber_offset;
529 struct evcnt isc_tx_bytes;
530 uint64_t isc_tx_bytes_offset;
531 struct evcnt isc_tx_dropped_link_down;
532 uint64_t isc_tx_dropped_link_down_offset;
533 struct evcnt isc_tx_unicast;
534 uint64_t isc_tx_unicast_offset;
535 struct evcnt isc_tx_multicast;
536 uint64_t isc_tx_multicast_offset;
537 struct evcnt isc_tx_broadcast;
538 uint64_t isc_tx_broadcast_offset;
539 struct evcnt isc_tx_size_64;
540 uint64_t isc_tx_size_64_offset;
541 struct evcnt isc_tx_size_127;
542 uint64_t isc_tx_size_127_offset;
543 struct evcnt isc_tx_size_255;
544 uint64_t isc_tx_size_255_offset;
545 struct evcnt isc_tx_size_511;
546 uint64_t isc_tx_size_511_offset;
547 struct evcnt isc_tx_size_1023;
548 uint64_t isc_tx_size_1023_offset;
549 struct evcnt isc_tx_size_1522;
550 uint64_t isc_tx_size_1522_offset;
551 struct evcnt isc_tx_size_big;
552 uint64_t isc_tx_size_big_offset;
553 struct evcnt isc_mac_local_faults;
554 uint64_t isc_mac_local_faults_offset;
555 struct evcnt isc_mac_remote_faults;
556 uint64_t isc_mac_remote_faults_offset;
557 struct evcnt isc_link_xon_rx;
558 uint64_t isc_link_xon_rx_offset;
559 struct evcnt isc_link_xon_tx;
560 uint64_t isc_link_xon_tx_offset;
561 struct evcnt isc_link_xoff_rx;
562 uint64_t isc_link_xoff_rx_offset;
563 struct evcnt isc_link_xoff_tx;
564 uint64_t isc_link_xoff_tx_offset;
565 struct evcnt isc_vsi_rx_discards;
566 uint64_t isc_vsi_rx_discards_offset;
567 struct evcnt isc_vsi_rx_bytes;
568 uint64_t isc_vsi_rx_bytes_offset;
569 struct evcnt isc_vsi_rx_unicast;
570 uint64_t isc_vsi_rx_unicast_offset;
571 struct evcnt isc_vsi_rx_multicast;
572 uint64_t isc_vsi_rx_multicast_offset;
573 struct evcnt isc_vsi_rx_broadcast;
574 uint64_t isc_vsi_rx_broadcast_offset;
575 struct evcnt isc_vsi_tx_errors;
576 uint64_t isc_vsi_tx_errors_offset;
577 struct evcnt isc_vsi_tx_bytes;
578 uint64_t isc_vsi_tx_bytes_offset;
579 struct evcnt isc_vsi_tx_unicast;
580 uint64_t isc_vsi_tx_unicast_offset;
581 struct evcnt isc_vsi_tx_multicast;
582 uint64_t isc_vsi_tx_multicast_offset;
583 struct evcnt isc_vsi_tx_broadcast;
584 uint64_t isc_vsi_tx_broadcast_offset;
585 };
586
587 /*
588 * Locking notes:
589 * + a field in ixl_tx_ring is protected by txr_lock (a spin mutex), and
590 * a field in ixl_rx_ring is protected by rxr_lock (a spin mutex).
591 * - more than one lock of them cannot be held at once.
592 * + a field named sc_atq_* in ixl_softc is protected by sc_atq_lock
593 * (a spin mutex).
594 * - the lock cannot held with txr_lock or rxr_lock.
595 * + a field named sc_arq_* is not protected by any lock.
596 * - operations for sc_arq_* is done in one context related to
597 * sc_arq_task.
598 * + other fields in ixl_softc is protected by sc_cfg_lock
599 * (an adaptive mutex)
600 * - It must be held before another lock is held, and It can be
601 * released after the other lock is released.
602 * */
603
604 struct ixl_softc {
605 device_t sc_dev;
606 struct ethercom sc_ec;
607 bool sc_attached;
608 bool sc_dead;
609 bool sc_rxctl_atq;
610 uint32_t sc_port;
611 struct sysctllog *sc_sysctllog;
612 struct workqueue *sc_workq;
613 struct workqueue *sc_workq_txrx;
614 int sc_stats_intval;
615 callout_t sc_stats_callout;
616 struct ixl_work sc_stats_task;
617 struct ixl_stats_counters
618 sc_stats_counters;
619 uint8_t sc_enaddr[ETHER_ADDR_LEN];
620 struct ifmedia sc_media;
621 uint64_t sc_media_status;
622 uint64_t sc_media_active;
623 kmutex_t sc_cfg_lock;
624 enum i40e_mac_type sc_mac_type;
625 uint32_t sc_rss_table_size;
626 uint32_t sc_rss_table_entry_width;
627 bool sc_txrx_workqueue;
628 u_int sc_tx_process_limit;
629 u_int sc_rx_process_limit;
630 u_int sc_tx_intr_process_limit;
631 u_int sc_rx_intr_process_limit;
632
633 int sc_cur_ec_capenable;
634
635 struct pci_attach_args sc_pa;
636 pci_intr_handle_t *sc_ihp;
637 void **sc_ihs;
638 unsigned int sc_nintrs;
639
640 bus_dma_tag_t sc_dmat;
641 bus_space_tag_t sc_memt;
642 bus_space_handle_t sc_memh;
643 bus_size_t sc_mems;
644
645 uint8_t sc_pf_id;
646 uint16_t sc_uplink_seid; /* le */
647 uint16_t sc_downlink_seid; /* le */
648 uint16_t sc_vsi_number; /* le */
649 uint16_t sc_vsi_stat_counter_idx;
650 uint16_t sc_seid;
651 unsigned int sc_base_queue;
652
653 pci_intr_type_t sc_intrtype;
654 unsigned int sc_msix_vector_queue;
655
656 struct ixl_dmamem sc_scratch;
657
658 const struct ixl_aq_regs *
659 sc_aq_regs;
660
661 kmutex_t sc_atq_lock;
662 kcondvar_t sc_atq_cv;
663 struct ixl_dmamem sc_atq;
664 unsigned int sc_atq_prod;
665 unsigned int sc_atq_cons;
666
667 struct ixl_dmamem sc_arq;
668 struct ixl_work sc_arq_task;
669 struct ixl_aq_bufs sc_arq_idle;
670 struct ixl_aq_buf *sc_arq_live[IXL_AQ_NUM];
671 unsigned int sc_arq_prod;
672 unsigned int sc_arq_cons;
673
674 struct ixl_work sc_link_state_task;
675 struct ixl_atq sc_link_state_atq;
676
677 struct ixl_dmamem sc_hmc_sd;
678 struct ixl_dmamem sc_hmc_pd;
679 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT];
680
681 unsigned int sc_tx_ring_ndescs;
682 unsigned int sc_rx_ring_ndescs;
683 unsigned int sc_nqueue_pairs;
684 unsigned int sc_nqueue_pairs_max;
685 unsigned int sc_nqueue_pairs_device;
686 struct ixl_queue_pair *sc_qps;
687
688 struct evcnt sc_event_atq;
689 struct evcnt sc_event_link;
690 struct evcnt sc_event_ecc_err;
691 struct evcnt sc_event_pci_exception;
692 struct evcnt sc_event_crit_err;
693 };
694
695 #define IXL_TXRX_PROCESS_UNLIMIT UINT_MAX
696 #define IXL_TX_PROCESS_LIMIT 256
697 #define IXL_RX_PROCESS_LIMIT 256
698 #define IXL_TX_INTR_PROCESS_LIMIT 256
699 #define IXL_RX_INTR_PROCESS_LIMIT 0U
700
701 #define IXL_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx| \
702 IFCAP_CSUM_TCPv4_Rx| \
703 IFCAP_CSUM_UDPv4_Rx| \
704 IFCAP_CSUM_TCPv6_Rx| \
705 IFCAP_CSUM_UDPv6_Rx)
706
707 #define delaymsec(_x) DELAY(1000 * (_x))
708 #ifdef IXL_DEBUG
709 #define DDPRINTF(sc, fmt, args...) \
710 do { \
711 if ((sc) != NULL) { \
712 device_printf( \
713 ((struct ixl_softc *)(sc))->sc_dev, \
714 ""); \
715 } \
716 printf("%s:\t" fmt, __func__, ##args); \
717 } while (0)
718 #else
719 #define DDPRINTF(sc, fmt, args...) __nothing
720 #endif
721 #ifndef IXL_STATS_INTERVAL_MSEC
722 #define IXL_STATS_INTERVAL_MSEC 10000
723 #endif
724 #ifndef IXL_QUEUE_NUM
725 #define IXL_QUEUE_NUM 0
726 #endif
727
728 static bool ixl_param_nomsix = false;
729 static int ixl_param_stats_interval = IXL_STATS_INTERVAL_MSEC;
730 static int ixl_param_nqps_limit = IXL_QUEUE_NUM;
731 static unsigned int ixl_param_tx_ndescs = 1024;
732 static unsigned int ixl_param_rx_ndescs = 1024;
733
734 static enum i40e_mac_type
735 ixl_mactype(pci_product_id_t);
736 static void ixl_clear_hw(struct ixl_softc *);
737 static int ixl_pf_reset(struct ixl_softc *);
738
739 static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
740 bus_size_t, bus_size_t);
741 static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
742
743 static int ixl_arq_fill(struct ixl_softc *);
744 static void ixl_arq_unfill(struct ixl_softc *);
745
746 static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
747 unsigned int);
748 static void ixl_atq_set(struct ixl_atq *,
749 void (*)(struct ixl_softc *, const struct ixl_aq_desc *));
750 static int ixl_atq_post(struct ixl_softc *, struct ixl_atq *);
751 static int ixl_atq_post_locked(struct ixl_softc *, struct ixl_atq *);
752 static void ixl_atq_done(struct ixl_softc *);
753 static int ixl_atq_exec(struct ixl_softc *, struct ixl_atq *);
754 static int ixl_get_version(struct ixl_softc *);
755 static int ixl_get_hw_capabilities(struct ixl_softc *);
756 static int ixl_pxe_clear(struct ixl_softc *);
757 static int ixl_lldp_shut(struct ixl_softc *);
758 static int ixl_get_mac(struct ixl_softc *);
759 static int ixl_get_switch_config(struct ixl_softc *);
760 static int ixl_phy_mask_ints(struct ixl_softc *);
761 static int ixl_get_phy_types(struct ixl_softc *, uint64_t *);
762 static int ixl_restart_an(struct ixl_softc *);
763 static int ixl_hmc(struct ixl_softc *);
764 static void ixl_hmc_free(struct ixl_softc *);
765 static int ixl_get_vsi(struct ixl_softc *);
766 static int ixl_set_vsi(struct ixl_softc *);
767 static void ixl_set_filter_control(struct ixl_softc *);
768 static void ixl_get_link_status(void *);
769 static int ixl_get_link_status_poll(struct ixl_softc *);
770 static int ixl_set_link_status(struct ixl_softc *,
771 const struct ixl_aq_desc *);
772 static void ixl_config_rss(struct ixl_softc *);
773 static int ixl_add_macvlan(struct ixl_softc *, const uint8_t *,
774 uint16_t, uint16_t);
775 static int ixl_remove_macvlan(struct ixl_softc *, const uint8_t *,
776 uint16_t, uint16_t);
777 static void ixl_arq(void *);
778 static void ixl_hmc_pack(void *, const void *,
779 const struct ixl_hmc_pack *, unsigned int);
780 static uint32_t ixl_rd_rx_csr(struct ixl_softc *, uint32_t);
781 static void ixl_wr_rx_csr(struct ixl_softc *, uint32_t, uint32_t);
782
783 static int ixl_match(device_t, cfdata_t, void *);
784 static void ixl_attach(device_t, device_t, void *);
785 static int ixl_detach(device_t, int);
786
787 static void ixl_media_add(struct ixl_softc *, uint64_t);
788 static int ixl_media_change(struct ifnet *);
789 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
790 static void ixl_watchdog(struct ifnet *);
791 static int ixl_ioctl(struct ifnet *, u_long, void *);
792 static void ixl_start(struct ifnet *);
793 static int ixl_transmit(struct ifnet *, struct mbuf *);
794 static void ixl_deferred_transmit(void *);
795 static int ixl_intr(void *);
796 static int ixl_queue_intr(void *);
797 static int ixl_other_intr(void *);
798 static void ixl_handle_queue(void *);
799 static void ixl_sched_handle_queue(struct ixl_softc *,
800 struct ixl_queue_pair *);
801 static int ixl_init(struct ifnet *);
802 static int ixl_init_locked(struct ixl_softc *);
803 static void ixl_stop(struct ifnet *, int);
804 static void ixl_stop_locked(struct ixl_softc *);
805 static int ixl_iff(struct ixl_softc *);
806 static int ixl_ifflags_cb(struct ethercom *);
807 static int ixl_setup_interrupts(struct ixl_softc *);
808 static int ixl_establish_intx(struct ixl_softc *);
809 static int ixl_establish_msix(struct ixl_softc *);
810 static void ixl_enable_queue_intr(struct ixl_softc *,
811 struct ixl_queue_pair *);
812 static void ixl_disable_queue_intr(struct ixl_softc *,
813 struct ixl_queue_pair *);
814 static void ixl_enable_other_intr(struct ixl_softc *);
815 static void ixl_disable_other_intr(struct ixl_softc *);
816 static void ixl_config_queue_intr(struct ixl_softc *);
817 static void ixl_config_other_intr(struct ixl_softc *);
818
819 static struct ixl_tx_ring *
820 ixl_txr_alloc(struct ixl_softc *, unsigned int);
821 static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
822 static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
823 static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
824 static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
825 static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
826 static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
827 static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
828 static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *, u_int);
829
830 static struct ixl_rx_ring *
831 ixl_rxr_alloc(struct ixl_softc *, unsigned int);
832 static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
833 static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
834 static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
835 static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
836 static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
837 static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
838 static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *, u_int);
839 static int ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
840
841 static struct workqueue *
842 ixl_workq_create(const char *, pri_t, int, int);
843 static void ixl_workq_destroy(struct workqueue *);
844 static int ixl_workqs_teardown(device_t);
845 static void ixl_work_set(struct ixl_work *, void (*)(void *), void *);
846 static void ixl_work_add(struct workqueue *, struct ixl_work *);
847 static void ixl_work_wait(struct workqueue *, struct ixl_work *);
848 static void ixl_workq_work(struct work *, void *);
849 static const struct ixl_product *
850 ixl_lookup(const struct pci_attach_args *pa);
851 static void ixl_link_state_update(struct ixl_softc *,
852 const struct ixl_aq_desc *);
853 static int ixl_vlan_cb(struct ethercom *, uint16_t, bool);
854 static int ixl_setup_vlan_hwfilter(struct ixl_softc *);
855 static void ixl_teardown_vlan_hwfilter(struct ixl_softc *);
856 static int ixl_update_macvlan(struct ixl_softc *);
857 static int ixl_setup_interrupts(struct ixl_softc *);;
858 static void ixl_teardown_interrupts(struct ixl_softc *);
859 static int ixl_setup_stats(struct ixl_softc *);
860 static void ixl_teardown_stats(struct ixl_softc *);
861 static void ixl_stats_callout(void *);
862 static void ixl_stats_update(void *);
863 static int ixl_setup_sysctls(struct ixl_softc *);
864 static void ixl_teardown_sysctls(struct ixl_softc *);
865 static int ixl_queue_pairs_alloc(struct ixl_softc *);
866 static void ixl_queue_pairs_free(struct ixl_softc *);
867
868 static const struct ixl_phy_type ixl_phy_type_map[] = {
869 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII },
870 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX },
871 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 },
872 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR },
873 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 },
874 { 1ULL << IXL_PHY_TYPE_XAUI |
875 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 },
876 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI },
877 { 1ULL << IXL_PHY_TYPE_XLAUI |
878 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI },
879 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
880 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 },
881 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
882 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 },
883 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC },
884 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC },
885 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX },
886 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
887 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T },
888 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T },
889 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR },
890 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR },
891 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_TWINAX },
892 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 },
893 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 },
894 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX },
895 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX },
896 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 },
897 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR },
898 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR },
899 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR },
900 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR },
901 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC },
902 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_CR },
903 };
904
905 static const struct ixl_speed_type ixl_speed_type_map[] = {
906 { IXL_AQ_LINK_SPEED_40GB, IF_Gbps(40) },
907 { IXL_AQ_LINK_SPEED_25GB, IF_Gbps(25) },
908 { IXL_AQ_LINK_SPEED_10GB, IF_Gbps(10) },
909 { IXL_AQ_LINK_SPEED_1000MB, IF_Mbps(1000) },
910 { IXL_AQ_LINK_SPEED_100MB, IF_Mbps(100)},
911 };
912
913 static const struct ixl_aq_regs ixl_pf_aq_regs = {
914 .atq_tail = I40E_PF_ATQT,
915 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK,
916 .atq_head = I40E_PF_ATQH,
917 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK,
918 .atq_len = I40E_PF_ATQLEN,
919 .atq_bal = I40E_PF_ATQBAL,
920 .atq_bah = I40E_PF_ATQBAH,
921 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK,
922
923 .arq_tail = I40E_PF_ARQT,
924 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK,
925 .arq_head = I40E_PF_ARQH,
926 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK,
927 .arq_len = I40E_PF_ARQLEN,
928 .arq_bal = I40E_PF_ARQBAL,
929 .arq_bah = I40E_PF_ARQBAH,
930 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK,
931 };
932
933 #define ixl_rd(_s, _r) \
934 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
935 #define ixl_wr(_s, _r, _v) \
936 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
937 #define ixl_barrier(_s, _r, _l, _o) \
938 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
939 #define ixl_flush(_s) (void)ixl_rd((_s), I40E_GLGEN_STAT)
940 #define ixl_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1))
941
942 static inline uint32_t
943 ixl_dmamem_hi(struct ixl_dmamem *ixm)
944 {
945 uint32_t retval;
946 uint64_t val;
947
948 if (sizeof(IXL_DMA_DVA(ixm)) > 4) {
949 val = (intptr_t)IXL_DMA_DVA(ixm);
950 retval = (uint32_t)(val >> 32);
951 } else {
952 retval = 0;
953 }
954
955 return retval;
956 }
957
958 static inline uint32_t
959 ixl_dmamem_lo(struct ixl_dmamem *ixm)
960 {
961
962 return (uint32_t)IXL_DMA_DVA(ixm);
963 }
964
965 static inline void
966 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr)
967 {
968 uint64_t val;
969
970 if (sizeof(addr) > 4) {
971 val = (intptr_t)addr;
972 iaq->iaq_param[2] = htole32(val >> 32);
973 } else {
974 iaq->iaq_param[2] = htole32(0);
975 }
976
977 iaq->iaq_param[3] = htole32(addr);
978 }
979
980 static inline unsigned int
981 ixl_rxr_unrefreshed(unsigned int prod, unsigned int cons, unsigned int ndescs)
982 {
983 unsigned int num;
984
985 if (prod < cons)
986 num = cons - prod;
987 else
988 num = (ndescs - prod) + cons;
989
990 if (__predict_true(num > 0)) {
991 /* device cannot receive packets if all descripter is filled */
992 num -= 1;
993 }
994
995 return num;
996 }
997
998 CFATTACH_DECL3_NEW(ixl, sizeof(struct ixl_softc),
999 ixl_match, ixl_attach, ixl_detach, NULL, NULL, NULL,
1000 DVF_DETACH_SHUTDOWN);
1001
1002 static const struct ixl_product ixl_products[] = {
1003 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_SFP },
1004 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_B },
1005 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_C },
1006 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_A },
1007 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_B },
1008 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_C },
1009 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T },
1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
1011 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
1012 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G },
1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP },
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28 },
1015 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_KX },
1016 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_QSFP },
1017 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_SFP },
1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G_BASET },
1019 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_BASET },
1020 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_I_SFP },
1021 /* required last entry */
1022 {0, 0}
1023 };
1024
1025 static const struct ixl_product *
1026 ixl_lookup(const struct pci_attach_args *pa)
1027 {
1028 const struct ixl_product *ixlp;
1029
1030 for (ixlp = ixl_products; ixlp->vendor_id != 0; ixlp++) {
1031 if (PCI_VENDOR(pa->pa_id) == ixlp->vendor_id &&
1032 PCI_PRODUCT(pa->pa_id) == ixlp->product_id)
1033 return ixlp;
1034 }
1035
1036 return NULL;
1037 }
1038
1039 static int
1040 ixl_match(device_t parent, cfdata_t match, void *aux)
1041 {
1042 const struct pci_attach_args *pa = aux;
1043
1044 return (ixl_lookup(pa) != NULL) ? 1 : 0;
1045 }
1046
1047 static void
1048 ixl_attach(device_t parent, device_t self, void *aux)
1049 {
1050 struct ixl_softc *sc;
1051 struct pci_attach_args *pa = aux;
1052 struct ifnet *ifp;
1053 pcireg_t memtype;
1054 uint32_t firstq, port, ari, func;
1055 uint64_t phy_types = 0;
1056 char xnamebuf[32];
1057 int tries, rv;
1058
1059 sc = device_private(self);
1060 sc->sc_dev = self;
1061 ifp = &sc->sc_ec.ec_if;
1062
1063 sc->sc_pa = *pa;
1064 sc->sc_dmat = (pci_dma64_available(pa)) ?
1065 pa->pa_dmat64 : pa->pa_dmat;
1066 sc->sc_aq_regs = &ixl_pf_aq_regs;
1067
1068 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(pa->pa_id));
1069
1070 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG);
1071 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
1072 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) {
1073 aprint_error(": unable to map registers\n");
1074 return;
1075 }
1076
1077 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET);
1078
1079 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC);
1080 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK;
1081 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1082 sc->sc_base_queue = firstq;
1083
1084 ixl_clear_hw(sc);
1085 if (ixl_pf_reset(sc) == -1) {
1086 /* error printed by ixl pf_reset */
1087 goto unmap;
1088 }
1089
1090 port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
1091 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
1092 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
1093 sc->sc_port = port;
1094 aprint_normal(": port %u", sc->sc_port);
1095
1096 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
1097 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1098 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1099
1100 func = ixl_rd(sc, I40E_PF_FUNC_RID);
1101 sc->sc_pf_id = func & (ari ? 0xff : 0x7);
1102
1103 /* initialise the adminq */
1104
1105 mutex_init(&sc->sc_atq_lock, MUTEX_DEFAULT, IPL_NET);
1106
1107 if (ixl_dmamem_alloc(sc, &sc->sc_atq,
1108 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1109 aprint_error("\n" "%s: unable to allocate atq\n",
1110 device_xname(self));
1111 goto unmap;
1112 }
1113
1114 SIMPLEQ_INIT(&sc->sc_arq_idle);
1115 ixl_work_set(&sc->sc_arq_task, ixl_arq, sc);
1116 sc->sc_arq_cons = 0;
1117 sc->sc_arq_prod = 0;
1118
1119 if (ixl_dmamem_alloc(sc, &sc->sc_arq,
1120 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1121 aprint_error("\n" "%s: unable to allocate arq\n",
1122 device_xname(self));
1123 goto free_atq;
1124 }
1125
1126 if (!ixl_arq_fill(sc)) {
1127 aprint_error("\n" "%s: unable to fill arq descriptors\n",
1128 device_xname(self));
1129 goto free_arq;
1130 }
1131
1132 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1133 0, IXL_DMA_LEN(&sc->sc_atq),
1134 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1135
1136 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1137 0, IXL_DMA_LEN(&sc->sc_arq),
1138 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1139
1140 for (tries = 0; tries < 10; tries++) {
1141 sc->sc_atq_cons = 0;
1142 sc->sc_atq_prod = 0;
1143
1144 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1145 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1146 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1147 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1148
1149 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1150
1151 ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1152 ixl_dmamem_lo(&sc->sc_atq));
1153 ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1154 ixl_dmamem_hi(&sc->sc_atq));
1155 ixl_wr(sc, sc->sc_aq_regs->atq_len,
1156 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1157
1158 ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1159 ixl_dmamem_lo(&sc->sc_arq));
1160 ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1161 ixl_dmamem_hi(&sc->sc_arq));
1162 ixl_wr(sc, sc->sc_aq_regs->arq_len,
1163 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1164
1165 rv = ixl_get_version(sc);
1166 if (rv == 0)
1167 break;
1168 if (rv != ETIMEDOUT) {
1169 aprint_error(", unable to get firmware version\n");
1170 goto shutdown;
1171 }
1172
1173 delaymsec(100);
1174 }
1175
1176 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1177
1178 if (sc->sc_mac_type == I40E_MAC_X722)
1179 sc->sc_nqueue_pairs_device = 128;
1180 else
1181 sc->sc_nqueue_pairs_device = 64;
1182
1183 rv = ixl_get_hw_capabilities(sc);
1184 if (rv != 0) {
1185 aprint_error(", GET HW CAPABILITIES %s\n",
1186 rv == ETIMEDOUT ? "timeout" : "error");
1187 goto shutdown;
1188 }
1189
1190 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_device, ncpu);
1191 if (ixl_param_nqps_limit > 0) {
1192 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_max,
1193 ixl_param_nqps_limit);
1194 }
1195
1196 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
1197 sc->sc_tx_ring_ndescs = ixl_param_tx_ndescs;
1198 sc->sc_rx_ring_ndescs = ixl_param_rx_ndescs;
1199
1200 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_rx_ring_ndescs);
1201 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_tx_ring_ndescs);
1202
1203 if (ixl_get_mac(sc) != 0) {
1204 /* error printed by ixl_get_mac */
1205 goto shutdown;
1206 }
1207
1208 aprint_normal("\n");
1209 aprint_naive("\n");
1210
1211 aprint_normal_dev(self, "Ethernet address %s\n",
1212 ether_sprintf(sc->sc_enaddr));
1213
1214 rv = ixl_pxe_clear(sc);
1215 if (rv != 0) {
1216 aprint_debug_dev(self, "CLEAR PXE MODE %s\n",
1217 rv == ETIMEDOUT ? "timeout" : "error");
1218 }
1219
1220 ixl_set_filter_control(sc);
1221
1222 if (ixl_hmc(sc) != 0) {
1223 /* error printed by ixl_hmc */
1224 goto shutdown;
1225 }
1226
1227 if (ixl_lldp_shut(sc) != 0) {
1228 /* error printed by ixl_lldp_shut */
1229 goto free_hmc;
1230 }
1231
1232 if (ixl_phy_mask_ints(sc) != 0) {
1233 /* error printed by ixl_phy_mask_ints */
1234 goto free_hmc;
1235 }
1236
1237 if (ixl_restart_an(sc) != 0) {
1238 /* error printed by ixl_restart_an */
1239 goto free_hmc;
1240 }
1241
1242 if (ixl_get_switch_config(sc) != 0) {
1243 /* error printed by ixl_get_switch_config */
1244 goto free_hmc;
1245 }
1246
1247 if (ixl_get_phy_types(sc, &phy_types) != 0) {
1248 /* error printed by ixl_get_phy_abilities */
1249 goto free_hmc;
1250 }
1251
1252 rv = ixl_get_link_status_poll(sc);
1253 if (rv != 0) {
1254 aprint_error_dev(self, "GET LINK STATUS %s\n",
1255 rv == ETIMEDOUT ? "timeout" : "error");
1256 goto free_hmc;
1257 }
1258
1259 if (ixl_dmamem_alloc(sc, &sc->sc_scratch,
1260 sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1261 aprint_error_dev(self, "unable to allocate scratch buffer\n");
1262 goto free_hmc;
1263 }
1264
1265 rv = ixl_get_vsi(sc);
1266 if (rv != 0) {
1267 aprint_error_dev(self, "GET VSI %s %d\n",
1268 rv == ETIMEDOUT ? "timeout" : "error", rv);
1269 goto free_scratch;
1270 }
1271
1272 rv = ixl_set_vsi(sc);
1273 if (rv != 0) {
1274 aprint_error_dev(self, "UPDATE VSI error %s %d\n",
1275 rv == ETIMEDOUT ? "timeout" : "error", rv);
1276 goto free_scratch;
1277 }
1278
1279 if (ixl_queue_pairs_alloc(sc) != 0) {
1280 /* error printed by ixl_queue_pairs_alloc */
1281 goto free_scratch;
1282 }
1283
1284 if (ixl_setup_interrupts(sc) != 0) {
1285 /* error printed by ixl_setup_interrupts */
1286 goto free_queue_pairs;
1287 }
1288
1289 if (ixl_setup_stats(sc) != 0) {
1290 aprint_error_dev(self, "failed to setup event counters\n");
1291 goto teardown_intrs;
1292 }
1293
1294 if (ixl_setup_sysctls(sc) != 0) {
1295 /* error printed by ixl_setup_sysctls */
1296 goto teardown_stats;
1297 }
1298
1299 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self));
1300 sc->sc_workq = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1301 IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1302 if (sc->sc_workq == NULL)
1303 goto teardown_sysctls;
1304
1305 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self));
1306 sc->sc_workq_txrx = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1307 IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1308 if (sc->sc_workq_txrx == NULL)
1309 goto teardown_wqs;
1310
1311 snprintf(xnamebuf, sizeof(xnamebuf), "%s_atq_cv", device_xname(self));
1312 cv_init(&sc->sc_atq_cv, xnamebuf);
1313
1314 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
1315
1316 ifp->if_softc = sc;
1317 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1318 ifp->if_extflags = IFEF_MPSAFE;
1319 ifp->if_ioctl = ixl_ioctl;
1320 ifp->if_start = ixl_start;
1321 ifp->if_transmit = ixl_transmit;
1322 ifp->if_watchdog = ixl_watchdog;
1323 ifp->if_init = ixl_init;
1324 ifp->if_stop = ixl_stop;
1325 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs);
1326 IFQ_SET_READY(&ifp->if_snd);
1327 ifp->if_capabilities |= IXL_IFCAP_RXCSUM;
1328 #if 0
1329 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx;
1330 #endif
1331 ether_set_vlan_cb(&sc->sc_ec, ixl_vlan_cb);
1332 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
1333 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
1334 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1335
1336 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities;
1337 /* Disable VLAN_HWFILTER by default */
1338 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1339
1340 sc->sc_cur_ec_capenable = sc->sc_ec.ec_capenable;
1341
1342 sc->sc_ec.ec_ifmedia = &sc->sc_media;
1343 ifmedia_init(&sc->sc_media, IFM_IMASK, ixl_media_change,
1344 ixl_media_status);
1345
1346 ixl_media_add(sc, phy_types);
1347 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1348 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1349
1350 if_attach(ifp);
1351 if_deferred_start_init(ifp, NULL);
1352 ether_ifattach(ifp, sc->sc_enaddr);
1353 ether_set_ifflags_cb(&sc->sc_ec, ixl_ifflags_cb);
1354
1355 (void)ixl_get_link_status_poll(sc);
1356 ixl_work_set(&sc->sc_link_state_task, ixl_get_link_status, sc);
1357
1358 ixl_config_other_intr(sc);
1359 ixl_enable_other_intr(sc);
1360
1361 /* remove default mac filter and replace it so we can see vlans */
1362 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 0);
1363 if (rv != ENOENT) {
1364 aprint_debug_dev(self,
1365 "unable to remove macvlan %u\n", rv);
1366 }
1367 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
1368 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1369 if (rv != ENOENT) {
1370 aprint_debug_dev(self,
1371 "unable to remove macvlan, ignore vlan %u\n", rv);
1372 }
1373
1374 if (ixl_update_macvlan(sc) != 0) {
1375 aprint_debug_dev(self,
1376 "couldn't enable vlan hardware filter\n");
1377 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1378 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
1379 }
1380
1381 sc->sc_txrx_workqueue = true;
1382 sc->sc_tx_process_limit = IXL_TX_PROCESS_LIMIT;
1383 sc->sc_rx_process_limit = IXL_RX_PROCESS_LIMIT;
1384 sc->sc_tx_intr_process_limit = IXL_TX_INTR_PROCESS_LIMIT;
1385 sc->sc_rx_intr_process_limit = IXL_RX_INTR_PROCESS_LIMIT;
1386
1387 ixl_stats_update(sc);
1388 sc->sc_stats_counters.isc_has_offset = true;
1389 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
1390
1391 if (pmf_device_register(self, NULL, NULL) != true)
1392 aprint_debug_dev(self, "couldn't establish power handler\n");
1393 sc->sc_attached = true;
1394 return;
1395
1396 teardown_wqs:
1397 config_finalize_register(self, ixl_workqs_teardown);
1398 teardown_sysctls:
1399 ixl_teardown_sysctls(sc);
1400 teardown_stats:
1401 ixl_teardown_stats(sc);
1402 teardown_intrs:
1403 ixl_teardown_interrupts(sc);
1404 free_queue_pairs:
1405 ixl_queue_pairs_free(sc);
1406 free_scratch:
1407 ixl_dmamem_free(sc, &sc->sc_scratch);
1408 free_hmc:
1409 ixl_hmc_free(sc);
1410 shutdown:
1411 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1412 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1413 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1414 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1415
1416 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1417 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1418 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1419
1420 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1421 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1422 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1423
1424 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1425 0, IXL_DMA_LEN(&sc->sc_arq),
1426 BUS_DMASYNC_POSTREAD);
1427 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1428 0, IXL_DMA_LEN(&sc->sc_atq),
1429 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1430
1431 ixl_arq_unfill(sc);
1432 free_arq:
1433 ixl_dmamem_free(sc, &sc->sc_arq);
1434 free_atq:
1435 ixl_dmamem_free(sc, &sc->sc_atq);
1436 unmap:
1437 mutex_destroy(&sc->sc_atq_lock);
1438 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1439 mutex_destroy(&sc->sc_cfg_lock);
1440 sc->sc_mems = 0;
1441
1442 sc->sc_attached = false;
1443 }
1444
1445 static int
1446 ixl_detach(device_t self, int flags)
1447 {
1448 struct ixl_softc *sc = device_private(self);
1449 struct ifnet *ifp = &sc->sc_ec.ec_if;
1450
1451 if (!sc->sc_attached)
1452 return 0;
1453
1454 ixl_stop(ifp, 1);
1455
1456 ixl_disable_other_intr(sc);
1457
1458 callout_stop(&sc->sc_stats_callout);
1459 ixl_work_wait(sc->sc_workq, &sc->sc_stats_task);
1460
1461 /* wait for ATQ handler */
1462 mutex_enter(&sc->sc_atq_lock);
1463 mutex_exit(&sc->sc_atq_lock);
1464
1465 ixl_work_wait(sc->sc_workq, &sc->sc_arq_task);
1466 ixl_work_wait(sc->sc_workq, &sc->sc_link_state_task);
1467
1468 if (sc->sc_workq != NULL) {
1469 ixl_workq_destroy(sc->sc_workq);
1470 sc->sc_workq = NULL;
1471 }
1472
1473 if (sc->sc_workq_txrx != NULL) {
1474 ixl_workq_destroy(sc->sc_workq_txrx);
1475 sc->sc_workq_txrx = NULL;
1476 }
1477
1478 ifmedia_delete_instance(&sc->sc_media, IFM_INST_ANY);
1479 ether_ifdetach(ifp);
1480 if_detach(ifp);
1481
1482 ixl_teardown_interrupts(sc);
1483 ixl_teardown_stats(sc);
1484 ixl_teardown_sysctls(sc);
1485
1486 ixl_queue_pairs_free(sc);
1487
1488 ixl_dmamem_free(sc, &sc->sc_scratch);
1489 ixl_hmc_free(sc);
1490
1491 /* shutdown */
1492 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1493 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1494 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1495 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1496
1497 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1498 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1499 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1500
1501 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1502 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1503 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1504
1505 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1506 0, IXL_DMA_LEN(&sc->sc_arq),
1507 BUS_DMASYNC_POSTREAD);
1508 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1509 0, IXL_DMA_LEN(&sc->sc_atq),
1510 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1511
1512 ixl_arq_unfill(sc);
1513
1514 ixl_dmamem_free(sc, &sc->sc_arq);
1515 ixl_dmamem_free(sc, &sc->sc_atq);
1516
1517 cv_destroy(&sc->sc_atq_cv);
1518 mutex_destroy(&sc->sc_atq_lock);
1519
1520 if (sc->sc_mems != 0) {
1521 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1522 sc->sc_mems = 0;
1523 }
1524
1525 mutex_destroy(&sc->sc_cfg_lock);
1526
1527 return 0;
1528 }
1529
1530 static int
1531 ixl_workqs_teardown(device_t self)
1532 {
1533 struct ixl_softc *sc = device_private(self);
1534
1535 if (sc->sc_workq != NULL) {
1536 ixl_workq_destroy(sc->sc_workq);
1537 sc->sc_workq = NULL;
1538 }
1539
1540 if (sc->sc_workq_txrx != NULL) {
1541 ixl_workq_destroy(sc->sc_workq_txrx);
1542 sc->sc_workq_txrx = NULL;
1543 }
1544
1545 return 0;
1546 }
1547
1548 static int
1549 ixl_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
1550 {
1551 struct ifnet *ifp = &ec->ec_if;
1552 struct ixl_softc *sc = ifp->if_softc;
1553 int rv;
1554
1555 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
1556 return 0;
1557 }
1558
1559 if (set) {
1560 rv = ixl_add_macvlan(sc, sc->sc_enaddr, vid,
1561 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1562 if (rv == 0) {
1563 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
1564 vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1565 }
1566 } else {
1567 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, vid,
1568 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1569 (void)ixl_remove_macvlan(sc, etherbroadcastaddr, vid,
1570 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1571 }
1572
1573 return rv;
1574 }
1575
1576 static void
1577 ixl_media_add(struct ixl_softc *sc, uint64_t phy_types)
1578 {
1579 struct ifmedia *ifm = &sc->sc_media;
1580 const struct ixl_phy_type *itype;
1581 unsigned int i;
1582
1583 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
1584 itype = &ixl_phy_type_map[i];
1585
1586 if (ISSET(phy_types, itype->phy_type)) {
1587 ifmedia_add(ifm,
1588 IFM_ETHER | IFM_FDX | itype->ifm_type, 0, NULL);
1589
1590 if (itype->ifm_type == IFM_100_TX) {
1591 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type,
1592 0, NULL);
1593 }
1594 }
1595 }
1596 }
1597
1598 static void
1599 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1600 {
1601 struct ixl_softc *sc = ifp->if_softc;
1602
1603 ifmr->ifm_status = sc->sc_media_status;
1604 ifmr->ifm_active = sc->sc_media_active;
1605
1606 mutex_enter(&sc->sc_cfg_lock);
1607 if (ifp->if_link_state == LINK_STATE_UP)
1608 SET(ifmr->ifm_status, IFM_ACTIVE);
1609 mutex_exit(&sc->sc_cfg_lock);
1610 }
1611
1612 static int
1613 ixl_media_change(struct ifnet *ifp)
1614 {
1615
1616 return 0;
1617 }
1618
1619 static void
1620 ixl_watchdog(struct ifnet *ifp)
1621 {
1622
1623 }
1624
1625 static void
1626 ixl_del_all_multiaddr(struct ixl_softc *sc)
1627 {
1628 struct ethercom *ec = &sc->sc_ec;
1629 struct ether_multi *enm;
1630 struct ether_multistep step;
1631
1632 ETHER_LOCK(ec);
1633 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1634 ETHER_NEXT_MULTI(step, enm)) {
1635 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1636 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1637 }
1638 ETHER_UNLOCK(ec);
1639 }
1640
1641 static int
1642 ixl_add_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1643 {
1644 struct ifnet *ifp = &sc->sc_ec.ec_if;
1645 int rv;
1646
1647 if (ISSET(ifp->if_flags, IFF_ALLMULTI))
1648 return 0;
1649
1650 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) {
1651 ixl_del_all_multiaddr(sc);
1652 SET(ifp->if_flags, IFF_ALLMULTI);
1653 return ENETRESET;
1654 }
1655
1656 /* multicast address can not use VLAN HWFILTER */
1657 rv = ixl_add_macvlan(sc, addrlo, 0,
1658 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1659
1660 if (rv == ENOSPC) {
1661 ixl_del_all_multiaddr(sc);
1662 SET(ifp->if_flags, IFF_ALLMULTI);
1663 return ENETRESET;
1664 }
1665
1666 return rv;
1667 }
1668
1669 static int
1670 ixl_del_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1671 {
1672 struct ifnet *ifp = &sc->sc_ec.ec_if;
1673 struct ethercom *ec = &sc->sc_ec;
1674 struct ether_multi *enm, *enm_last;
1675 struct ether_multistep step;
1676 int error, rv = 0;
1677
1678 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1679 ixl_remove_macvlan(sc, addrlo, 0,
1680 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1681 return 0;
1682 }
1683
1684 ETHER_LOCK(ec);
1685 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1686 ETHER_NEXT_MULTI(step, enm)) {
1687 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1688 ETHER_ADDR_LEN) != 0) {
1689 goto out;
1690 }
1691 }
1692
1693 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1694 ETHER_NEXT_MULTI(step, enm)) {
1695 error = ixl_add_macvlan(sc, enm->enm_addrlo, 0,
1696 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1697 if (error != 0)
1698 break;
1699 }
1700
1701 if (enm != NULL) {
1702 enm_last = enm;
1703 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1704 ETHER_NEXT_MULTI(step, enm)) {
1705 if (enm == enm_last)
1706 break;
1707
1708 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1709 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1710 }
1711 } else {
1712 CLR(ifp->if_flags, IFF_ALLMULTI);
1713 rv = ENETRESET;
1714 }
1715
1716 out:
1717 ETHER_UNLOCK(ec);
1718 return rv;
1719 }
1720
1721 static int
1722 ixl_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1723 {
1724 struct ifreq *ifr = (struct ifreq *)data;
1725 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
1726 struct ixl_tx_ring *txr;
1727 struct ixl_rx_ring *rxr;
1728 const struct sockaddr *sa;
1729 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1730 int s, error = 0;
1731 unsigned int i;
1732
1733 switch (cmd) {
1734 case SIOCADDMULTI:
1735 sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1736 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) {
1737 error = ether_multiaddr(sa, addrlo, addrhi);
1738 if (error != 0)
1739 return error;
1740
1741 error = ixl_add_multi(sc, addrlo, addrhi);
1742 if (error != 0 && error != ENETRESET) {
1743 ether_delmulti(sa, &sc->sc_ec);
1744 error = EIO;
1745 }
1746 }
1747 break;
1748
1749 case SIOCDELMULTI:
1750 sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1751 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) {
1752 error = ether_multiaddr(sa, addrlo, addrhi);
1753 if (error != 0)
1754 return error;
1755
1756 error = ixl_del_multi(sc, addrlo, addrhi);
1757 }
1758 break;
1759
1760 case SIOCGIFDATA:
1761 case SIOCZIFDATA:
1762 ifp->if_ipackets = 0;
1763 ifp->if_ibytes = 0;
1764 ifp->if_iqdrops = 0;
1765 ifp->if_ierrors = 0;
1766 ifp->if_opackets = 0;
1767 ifp->if_obytes = 0;
1768 ifp->if_omcasts = 0;
1769
1770 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
1771 txr = sc->sc_qps[i].qp_txr;
1772 rxr = sc->sc_qps[i].qp_rxr;
1773
1774 mutex_enter(&rxr->rxr_lock);
1775 ifp->if_ipackets += rxr->rxr_ipackets;
1776 ifp->if_ibytes += rxr->rxr_ibytes;
1777 ifp->if_iqdrops += rxr->rxr_iqdrops;
1778 ifp->if_ierrors += rxr->rxr_ierrors;
1779 if (cmd == SIOCZIFDATA) {
1780 rxr->rxr_ipackets = 0;
1781 rxr->rxr_ibytes = 0;
1782 rxr->rxr_iqdrops = 0;
1783 rxr->rxr_ierrors = 0;
1784 }
1785 mutex_exit(&rxr->rxr_lock);
1786
1787 mutex_enter(&txr->txr_lock);
1788 ifp->if_opackets += txr->txr_opackets;
1789 ifp->if_obytes += txr->txr_obytes;
1790 ifp->if_omcasts += txr->txr_omcasts;
1791 if (cmd == SIOCZIFDATA) {
1792 txr->txr_opackets = 0;
1793 txr->txr_obytes = 0;
1794 txr->txr_omcasts = 0;
1795 }
1796 mutex_exit(&txr->txr_lock);
1797 }
1798 /* FALLTHROUGH */
1799 default:
1800 s = splnet();
1801 error = ether_ioctl(ifp, cmd, data);
1802 splx(s);
1803 }
1804
1805 if (error == ENETRESET)
1806 error = ixl_iff(sc);
1807
1808 return error;
1809 }
1810
1811 static enum i40e_mac_type
1812 ixl_mactype(pci_product_id_t id)
1813 {
1814
1815 switch (id) {
1816 case PCI_PRODUCT_INTEL_XL710_SFP:
1817 case PCI_PRODUCT_INTEL_XL710_KX_B:
1818 case PCI_PRODUCT_INTEL_XL710_KX_C:
1819 case PCI_PRODUCT_INTEL_XL710_QSFP_A:
1820 case PCI_PRODUCT_INTEL_XL710_QSFP_B:
1821 case PCI_PRODUCT_INTEL_XL710_QSFP_C:
1822 case PCI_PRODUCT_INTEL_X710_10G_T:
1823 case PCI_PRODUCT_INTEL_XL710_20G_BP_1:
1824 case PCI_PRODUCT_INTEL_XL710_20G_BP_2:
1825 case PCI_PRODUCT_INTEL_X710_T4_10G:
1826 case PCI_PRODUCT_INTEL_XXV710_25G_BP:
1827 case PCI_PRODUCT_INTEL_XXV710_25G_SFP28:
1828 return I40E_MAC_XL710;
1829
1830 case PCI_PRODUCT_INTEL_X722_KX:
1831 case PCI_PRODUCT_INTEL_X722_QSFP:
1832 case PCI_PRODUCT_INTEL_X722_SFP:
1833 case PCI_PRODUCT_INTEL_X722_1G_BASET:
1834 case PCI_PRODUCT_INTEL_X722_10G_BASET:
1835 case PCI_PRODUCT_INTEL_X722_I_SFP:
1836 return I40E_MAC_X722;
1837 }
1838
1839 return I40E_MAC_GENERIC;
1840 }
1841
1842 static inline void *
1843 ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i)
1844 {
1845 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
1846 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1847
1848 if (i >= e->hmc_count)
1849 return NULL;
1850
1851 kva += e->hmc_base;
1852 kva += i * e->hmc_size;
1853
1854 return kva;
1855 }
1856
1857 static inline size_t
1858 ixl_hmc_len(struct ixl_softc *sc, enum ixl_hmc_types type)
1859 {
1860 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1861
1862 return e->hmc_size;
1863 }
1864
1865 static void
1866 ixl_enable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1867 {
1868 struct ixl_rx_ring *rxr = qp->qp_rxr;
1869
1870 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1871 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1872 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1873 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1874 ixl_flush(sc);
1875 }
1876
1877 static void
1878 ixl_disable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1879 {
1880 struct ixl_rx_ring *rxr = qp->qp_rxr;
1881
1882 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1883 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1884 ixl_flush(sc);
1885 }
1886
1887 static void
1888 ixl_enable_other_intr(struct ixl_softc *sc)
1889 {
1890
1891 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1892 I40E_PFINT_DYN_CTL0_INTENA_MASK |
1893 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1894 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1895 ixl_flush(sc);
1896 }
1897
1898 static void
1899 ixl_disable_other_intr(struct ixl_softc *sc)
1900 {
1901
1902 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1903 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1904 ixl_flush(sc);
1905 }
1906
1907 static int
1908 ixl_reinit(struct ixl_softc *sc)
1909 {
1910 struct ixl_rx_ring *rxr;
1911 struct ixl_tx_ring *txr;
1912 unsigned int i;
1913 uint32_t reg;
1914
1915 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1916
1917 if (ixl_get_vsi(sc) != 0)
1918 return EIO;
1919
1920 if (ixl_set_vsi(sc) != 0)
1921 return EIO;
1922
1923 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1924 txr = sc->sc_qps[i].qp_txr;
1925 rxr = sc->sc_qps[i].qp_rxr;
1926
1927 txr->txr_cons = txr->txr_prod = 0;
1928 rxr->rxr_cons = rxr->rxr_prod = 0;
1929
1930 ixl_txr_config(sc, txr);
1931 ixl_rxr_config(sc, rxr);
1932 }
1933
1934 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
1935 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_PREWRITE);
1936
1937 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1938 txr = sc->sc_qps[i].qp_txr;
1939 rxr = sc->sc_qps[i].qp_rxr;
1940
1941 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
1942 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
1943 ixl_flush(sc);
1944
1945 ixl_wr(sc, txr->txr_tail, txr->txr_prod);
1946 ixl_wr(sc, rxr->rxr_tail, rxr->rxr_prod);
1947
1948 /* ixl_rxfill() needs lock held */
1949 mutex_enter(&rxr->rxr_lock);
1950 ixl_rxfill(sc, rxr);
1951 mutex_exit(&rxr->rxr_lock);
1952
1953 reg = ixl_rd(sc, I40E_QRX_ENA(i));
1954 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
1955 ixl_wr(sc, I40E_QRX_ENA(i), reg);
1956 if (ixl_rxr_enabled(sc, rxr) != 0)
1957 goto stop;
1958
1959 ixl_txr_qdis(sc, txr, 1);
1960
1961 reg = ixl_rd(sc, I40E_QTX_ENA(i));
1962 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
1963 ixl_wr(sc, I40E_QTX_ENA(i), reg);
1964
1965 if (ixl_txr_enabled(sc, txr) != 0)
1966 goto stop;
1967 }
1968
1969 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
1970 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
1971
1972 return 0;
1973
1974 stop:
1975 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
1976 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
1977
1978 return ETIMEDOUT;
1979 }
1980
1981 static int
1982 ixl_init_locked(struct ixl_softc *sc)
1983 {
1984 struct ifnet *ifp = &sc->sc_ec.ec_if;
1985 unsigned int i;
1986 int error, eccap_change;
1987
1988 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1989
1990 if (ISSET(ifp->if_flags, IFF_RUNNING))
1991 ixl_stop_locked(sc);
1992
1993 if (sc->sc_dead) {
1994 return ENXIO;
1995 }
1996
1997 eccap_change = sc->sc_ec.ec_capenable ^ sc->sc_cur_ec_capenable;
1998 if (ISSET(eccap_change, ETHERCAP_VLAN_HWTAGGING))
1999 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
2000
2001 if (ISSET(eccap_change, ETHERCAP_VLAN_HWFILTER)) {
2002 if (ixl_update_macvlan(sc) == 0) {
2003 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
2004 } else {
2005 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
2006 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
2007 }
2008 }
2009
2010 if (sc->sc_intrtype != PCI_INTR_TYPE_MSIX)
2011 sc->sc_nqueue_pairs = 1;
2012 else
2013 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
2014
2015 error = ixl_reinit(sc);
2016 if (error) {
2017 ixl_stop_locked(sc);
2018 return error;
2019 }
2020
2021 SET(ifp->if_flags, IFF_RUNNING);
2022 CLR(ifp->if_flags, IFF_OACTIVE);
2023
2024 (void)ixl_get_link_status(sc);
2025
2026 ixl_config_rss(sc);
2027 ixl_config_queue_intr(sc);
2028
2029 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2030 ixl_enable_queue_intr(sc, &sc->sc_qps[i]);
2031 }
2032
2033 error = ixl_iff(sc);
2034 if (error) {
2035 ixl_stop_locked(sc);
2036 return error;
2037 }
2038
2039 return 0;
2040 }
2041
2042 static int
2043 ixl_init(struct ifnet *ifp)
2044 {
2045 struct ixl_softc *sc = ifp->if_softc;
2046 int error;
2047
2048 mutex_enter(&sc->sc_cfg_lock);
2049 error = ixl_init_locked(sc);
2050 mutex_exit(&sc->sc_cfg_lock);
2051
2052 return error;
2053 }
2054
2055 static int
2056 ixl_iff(struct ixl_softc *sc)
2057 {
2058 struct ifnet *ifp = &sc->sc_ec.ec_if;
2059 struct ixl_atq iatq;
2060 struct ixl_aq_desc *iaq;
2061 struct ixl_aq_vsi_promisc_param *param;
2062 uint16_t flag_add, flag_del;
2063 int error;
2064
2065 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2066 return 0;
2067
2068 memset(&iatq, 0, sizeof(iatq));
2069
2070 iaq = &iatq.iatq_desc;
2071 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
2072
2073 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
2074 param->flags = htole16(0);
2075
2076 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)
2077 || ISSET(ifp->if_flags, IFF_PROMISC)) {
2078 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2079 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2080 }
2081
2082 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
2083 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2084 IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2085 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
2086 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2087 }
2088 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2089 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2090 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2091 param->seid = sc->sc_seid;
2092
2093 error = ixl_atq_exec(sc, &iatq);
2094 if (error)
2095 return error;
2096
2097 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
2098 return EIO;
2099
2100 if (memcmp(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN) != 0) {
2101 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
2102 flag_add = IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH;
2103 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH;
2104 } else {
2105 flag_add = IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN;
2106 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN;
2107 }
2108
2109 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, flag_del);
2110
2111 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
2112 ixl_add_macvlan(sc, sc->sc_enaddr, 0, flag_add);
2113 }
2114 return 0;
2115 }
2116
2117 static void
2118 ixl_stop_rendezvous(struct ixl_softc *sc)
2119 {
2120 struct ixl_tx_ring *txr;
2121 struct ixl_rx_ring *rxr;
2122 unsigned int i;
2123
2124 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2125 txr = sc->sc_qps[i].qp_txr;
2126 rxr = sc->sc_qps[i].qp_rxr;
2127
2128 mutex_enter(&txr->txr_lock);
2129 mutex_exit(&txr->txr_lock);
2130
2131 mutex_enter(&rxr->rxr_lock);
2132 mutex_exit(&rxr->rxr_lock);
2133
2134 ixl_work_wait(sc->sc_workq_txrx,
2135 &sc->sc_qps[i].qp_task);
2136 }
2137 }
2138
2139 static void
2140 ixl_stop_locked(struct ixl_softc *sc)
2141 {
2142 struct ifnet *ifp = &sc->sc_ec.ec_if;
2143 struct ixl_rx_ring *rxr;
2144 struct ixl_tx_ring *txr;
2145 unsigned int i;
2146 uint32_t reg;
2147
2148 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2149
2150 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
2151
2152 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2153 txr = sc->sc_qps[i].qp_txr;
2154 rxr = sc->sc_qps[i].qp_rxr;
2155
2156 ixl_disable_queue_intr(sc, &sc->sc_qps[i]);
2157
2158 mutex_enter(&txr->txr_lock);
2159 ixl_txr_qdis(sc, txr, 0);
2160 /* XXX wait at least 400 usec for all tx queues in one go */
2161 ixl_flush(sc);
2162 DELAY(500);
2163
2164 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2165 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2166 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2167 /* XXX wait 50ms from completaion of the TX queue disable*/
2168 ixl_flush(sc);
2169 DELAY(50);
2170
2171 if (ixl_txr_disabled(sc, txr) != 0) {
2172 mutex_exit(&txr->txr_lock);
2173 goto die;
2174 }
2175 mutex_exit(&txr->txr_lock);
2176
2177 mutex_enter(&rxr->rxr_lock);
2178 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2179 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2180 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2181 /* XXX wait 50ms from completion of the RX queue disable */
2182 ixl_flush(sc);
2183 DELAY(50);
2184
2185 if (ixl_rxr_disabled(sc, rxr) != 0) {
2186 mutex_exit(&rxr->rxr_lock);
2187 goto die;
2188 }
2189 mutex_exit(&rxr->rxr_lock);
2190 }
2191
2192 ixl_stop_rendezvous(sc);
2193
2194 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2195 txr = sc->sc_qps[i].qp_txr;
2196 rxr = sc->sc_qps[i].qp_rxr;
2197
2198 ixl_txr_unconfig(sc, txr);
2199 ixl_rxr_unconfig(sc, rxr);
2200
2201 ixl_txr_clean(sc, txr);
2202 ixl_rxr_clean(sc, rxr);
2203 }
2204
2205 return;
2206 die:
2207 sc->sc_dead = true;
2208 log(LOG_CRIT, "%s: failed to shut down rings",
2209 device_xname(sc->sc_dev));
2210 return;
2211 }
2212
2213 static void
2214 ixl_stop(struct ifnet *ifp, int disable)
2215 {
2216 struct ixl_softc *sc = ifp->if_softc;
2217
2218 mutex_enter(&sc->sc_cfg_lock);
2219 ixl_stop_locked(sc);
2220 mutex_exit(&sc->sc_cfg_lock);
2221 }
2222
2223 static int
2224 ixl_queue_pairs_alloc(struct ixl_softc *sc)
2225 {
2226 struct ixl_queue_pair *qp;
2227 unsigned int i;
2228 size_t sz;
2229
2230 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2231 sc->sc_qps = kmem_zalloc(sz, KM_SLEEP);
2232
2233 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2234 qp = &sc->sc_qps[i];
2235
2236 qp->qp_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2237 ixl_handle_queue, qp);
2238 if (qp->qp_si == NULL)
2239 goto free;
2240
2241 qp->qp_txr = ixl_txr_alloc(sc, i);
2242 if (qp->qp_txr == NULL)
2243 goto free;
2244
2245 qp->qp_rxr = ixl_rxr_alloc(sc, i);
2246 if (qp->qp_rxr == NULL)
2247 goto free;
2248
2249 qp->qp_sc = sc;
2250 ixl_work_set(&qp->qp_task, ixl_handle_queue, qp);
2251 snprintf(qp->qp_name, sizeof(qp->qp_name),
2252 "%s-TXRX%d", device_xname(sc->sc_dev), i);
2253 }
2254
2255 return 0;
2256 free:
2257 if (sc->sc_qps != NULL) {
2258 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2259 qp = &sc->sc_qps[i];
2260
2261 if (qp->qp_txr != NULL)
2262 ixl_txr_free(sc, qp->qp_txr);
2263 if (qp->qp_rxr != NULL)
2264 ixl_rxr_free(sc, qp->qp_rxr);
2265 if (qp->qp_si != NULL)
2266 softint_disestablish(qp->qp_si);
2267 }
2268
2269 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2270 kmem_free(sc->sc_qps, sz);
2271 sc->sc_qps = NULL;
2272 }
2273
2274 return -1;
2275 }
2276
2277 static void
2278 ixl_queue_pairs_free(struct ixl_softc *sc)
2279 {
2280 struct ixl_queue_pair *qp;
2281 unsigned int i;
2282 size_t sz;
2283
2284 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2285 qp = &sc->sc_qps[i];
2286 ixl_txr_free(sc, qp->qp_txr);
2287 ixl_rxr_free(sc, qp->qp_rxr);
2288 softint_disestablish(qp->qp_si);
2289 }
2290
2291 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2292 kmem_free(sc->sc_qps, sz);
2293 sc->sc_qps = NULL;
2294 }
2295
2296 static struct ixl_tx_ring *
2297 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
2298 {
2299 struct ixl_tx_ring *txr = NULL;
2300 struct ixl_tx_map *maps = NULL, *txm;
2301 unsigned int i;
2302
2303 txr = kmem_zalloc(sizeof(*txr), KM_SLEEP);
2304 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs,
2305 KM_SLEEP);
2306
2307 if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2308 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2309 IXL_TX_QUEUE_ALIGN) != 0)
2310 goto free;
2311
2312 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2313 txm = &maps[i];
2314
2315 if (bus_dmamap_create(sc->sc_dmat,
2316 IXL_HARDMTU, IXL_TX_PKT_DESCS, IXL_HARDMTU, 0,
2317 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &txm->txm_map) != 0)
2318 goto uncreate;
2319
2320 txm->txm_eop = -1;
2321 txm->txm_m = NULL;
2322 }
2323
2324 txr->txr_cons = txr->txr_prod = 0;
2325 txr->txr_maps = maps;
2326
2327 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2328 if (txr->txr_intrq == NULL)
2329 goto uncreate;
2330
2331 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2332 ixl_deferred_transmit, txr);
2333 if (txr->txr_si == NULL)
2334 goto destroy_pcq;
2335
2336 txr->txr_tail = I40E_QTX_TAIL(qid);
2337 txr->txr_qid = qid;
2338 txr->txr_sc = sc;
2339 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2340
2341 return txr;
2342
2343 destroy_pcq:
2344 pcq_destroy(txr->txr_intrq);
2345 uncreate:
2346 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2347 txm = &maps[i];
2348
2349 if (txm->txm_map == NULL)
2350 continue;
2351
2352 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2353 }
2354
2355 ixl_dmamem_free(sc, &txr->txr_mem);
2356 free:
2357 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2358 kmem_free(txr, sizeof(*txr));
2359
2360 return NULL;
2361 }
2362
2363 static void
2364 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2365 {
2366 unsigned int qid;
2367 bus_size_t reg;
2368 uint32_t r;
2369
2370 qid = txr->txr_qid + sc->sc_base_queue;
2371 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2372 qid %= 128;
2373
2374 r = ixl_rd(sc, reg);
2375 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2376 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2377 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2378 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2379 ixl_wr(sc, reg, r);
2380 }
2381
2382 static void
2383 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2384 {
2385 struct ixl_hmc_txq txq;
2386 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch);
2387 void *hmc;
2388
2389 memset(&txq, 0, sizeof(txq));
2390 txq.head = htole16(txr->txr_cons);
2391 txq.new_context = 1;
2392 txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2393 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2394 txq.qlen = htole16(sc->sc_tx_ring_ndescs);
2395 txq.tphrdesc_ena = 0;
2396 txq.tphrpacket_ena = 0;
2397 txq.tphwdesc_ena = 0;
2398 txq.rdylist = data->qs_handle[0];
2399
2400 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2401 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2402 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq,
2403 __arraycount(ixl_hmc_pack_txq));
2404 }
2405
2406 static void
2407 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2408 {
2409 void *hmc;
2410
2411 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2412 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2413 }
2414
2415 static void
2416 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2417 {
2418 struct ixl_tx_map *maps, *txm;
2419 bus_dmamap_t map;
2420 unsigned int i;
2421
2422 maps = txr->txr_maps;
2423 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2424 txm = &maps[i];
2425
2426 if (txm->txm_m == NULL)
2427 continue;
2428
2429 map = txm->txm_map;
2430 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2431 BUS_DMASYNC_POSTWRITE);
2432 bus_dmamap_unload(sc->sc_dmat, map);
2433
2434 m_freem(txm->txm_m);
2435 txm->txm_m = NULL;
2436 }
2437 }
2438
2439 static int
2440 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2441 {
2442 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2443 uint32_t reg;
2444 int i;
2445
2446 for (i = 0; i < 10; i++) {
2447 reg = ixl_rd(sc, ena);
2448 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2449 return 0;
2450
2451 delaymsec(10);
2452 }
2453
2454 return ETIMEDOUT;
2455 }
2456
2457 static int
2458 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2459 {
2460 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2461 uint32_t reg;
2462 int i;
2463
2464 KASSERT(mutex_owned(&txr->txr_lock));
2465
2466 for (i = 0; i < 20; i++) {
2467 reg = ixl_rd(sc, ena);
2468 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2469 return 0;
2470
2471 delaymsec(10);
2472 }
2473
2474 return ETIMEDOUT;
2475 }
2476
2477 static void
2478 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2479 {
2480 struct ixl_tx_map *maps, *txm;
2481 struct mbuf *m;
2482 unsigned int i;
2483
2484 softint_disestablish(txr->txr_si);
2485 while ((m = pcq_get(txr->txr_intrq)) != NULL)
2486 m_freem(m);
2487 pcq_destroy(txr->txr_intrq);
2488
2489 maps = txr->txr_maps;
2490 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2491 txm = &maps[i];
2492
2493 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2494 }
2495
2496 ixl_dmamem_free(sc, &txr->txr_mem);
2497 mutex_destroy(&txr->txr_lock);
2498 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2499 kmem_free(txr, sizeof(*txr));
2500 }
2501
2502 static inline int
2503 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0,
2504 struct ixl_tx_ring *txr)
2505 {
2506 struct mbuf *m;
2507 int error;
2508
2509 KASSERT(mutex_owned(&txr->txr_lock));
2510
2511 m = *m0;
2512
2513 error = bus_dmamap_load_mbuf(dmat, map, m,
2514 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2515 if (error != EFBIG)
2516 return error;
2517
2518 m = m_defrag(m, M_DONTWAIT);
2519 if (m != NULL) {
2520 *m0 = m;
2521 txr->txr_defragged.ev_count++;
2522
2523 error = bus_dmamap_load_mbuf(dmat, map, m,
2524 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2525 } else {
2526 txr->txr_defrag_failed.ev_count++;
2527 error = ENOBUFS;
2528 }
2529
2530 return error;
2531 }
2532
2533 static void
2534 ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr,
2535 bool is_transmit)
2536 {
2537 struct ixl_softc *sc = ifp->if_softc;
2538 struct ixl_tx_desc *ring, *txd;
2539 struct ixl_tx_map *txm;
2540 bus_dmamap_t map;
2541 struct mbuf *m;
2542 uint64_t cmd, cmd_vlan;
2543 unsigned int prod, free, last, i;
2544 unsigned int mask;
2545 int post = 0;
2546
2547 KASSERT(mutex_owned(&txr->txr_lock));
2548
2549 if (ifp->if_link_state != LINK_STATE_UP
2550 || !ISSET(ifp->if_flags, IFF_RUNNING)
2551 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) {
2552 if (!is_transmit)
2553 IFQ_PURGE(&ifp->if_snd);
2554 return;
2555 }
2556
2557 prod = txr->txr_prod;
2558 free = txr->txr_cons;
2559 if (free <= prod)
2560 free += sc->sc_tx_ring_ndescs;
2561 free -= prod;
2562
2563 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2564 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2565
2566 ring = IXL_DMA_KVA(&txr->txr_mem);
2567 mask = sc->sc_tx_ring_ndescs - 1;
2568 last = prod;
2569 cmd = 0;
2570 txd = NULL;
2571
2572 for (;;) {
2573 if (free <= IXL_TX_PKT_DESCS) {
2574 if (!is_transmit)
2575 SET(ifp->if_flags, IFF_OACTIVE);
2576 break;
2577 }
2578
2579 if (is_transmit)
2580 m = pcq_get(txr->txr_intrq);
2581 else
2582 IFQ_DEQUEUE(&ifp->if_snd, m);
2583
2584 if (m == NULL)
2585 break;
2586
2587 txm = &txr->txr_maps[prod];
2588 map = txm->txm_map;
2589
2590 if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
2591 txr->txr_oerrors++;
2592 m_freem(m);
2593 continue;
2594 }
2595
2596 if (vlan_has_tag(m)) {
2597 cmd_vlan = (uint64_t)vlan_get_tag(m) <<
2598 IXL_TX_DESC_L2TAG1_SHIFT;
2599 cmd_vlan |= IXL_TX_DESC_CMD_IL2TAG1;
2600 } else {
2601 cmd_vlan = 0;
2602 }
2603
2604 bus_dmamap_sync(sc->sc_dmat, map, 0,
2605 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2606
2607 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) {
2608 txd = &ring[prod];
2609
2610 cmd = (uint64_t)map->dm_segs[i].ds_len <<
2611 IXL_TX_DESC_BSIZE_SHIFT;
2612 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2613 cmd |= cmd_vlan;
2614
2615 txd->addr = htole64(map->dm_segs[i].ds_addr);
2616 txd->cmd = htole64(cmd);
2617
2618 last = prod;
2619
2620 prod++;
2621 prod &= mask;
2622 }
2623 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2624 txd->cmd = htole64(cmd);
2625
2626 txm->txm_m = m;
2627 txm->txm_eop = last;
2628
2629 bpf_mtap(ifp, m, BPF_D_OUT);
2630
2631 free -= i;
2632 post = 1;
2633 }
2634
2635 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2636 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2637
2638 if (post) {
2639 txr->txr_prod = prod;
2640 ixl_wr(sc, txr->txr_tail, prod);
2641 }
2642 }
2643
2644 static int
2645 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit)
2646 {
2647 struct ifnet *ifp = &sc->sc_ec.ec_if;
2648 struct ixl_tx_desc *ring, *txd;
2649 struct ixl_tx_map *txm;
2650 struct mbuf *m;
2651 bus_dmamap_t map;
2652 unsigned int cons, prod, last;
2653 unsigned int mask;
2654 uint64_t dtype;
2655 int done = 0, more = 0;
2656
2657 KASSERT(mutex_owned(&txr->txr_lock));
2658
2659 prod = txr->txr_prod;
2660 cons = txr->txr_cons;
2661
2662 if (cons == prod)
2663 return 0;
2664
2665 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2666 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2667
2668 ring = IXL_DMA_KVA(&txr->txr_mem);
2669 mask = sc->sc_tx_ring_ndescs - 1;
2670
2671 do {
2672 if (txlimit-- <= 0) {
2673 more = 1;
2674 break;
2675 }
2676
2677 txm = &txr->txr_maps[cons];
2678 last = txm->txm_eop;
2679 txd = &ring[last];
2680
2681 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2682 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2683 break;
2684
2685 map = txm->txm_map;
2686
2687 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2688 BUS_DMASYNC_POSTWRITE);
2689 bus_dmamap_unload(sc->sc_dmat, map);
2690
2691 m = txm->txm_m;
2692 if (m != NULL) {
2693 txr->txr_opackets++;
2694 txr->txr_obytes += m->m_pkthdr.len;
2695 if (ISSET(m->m_flags, M_MCAST))
2696 txr->txr_omcasts++;
2697 m_freem(m);
2698 }
2699
2700 txm->txm_m = NULL;
2701 txm->txm_eop = -1;
2702
2703 cons = last + 1;
2704 cons &= mask;
2705 done = 1;
2706 } while (cons != prod);
2707
2708 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2709 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2710
2711 txr->txr_cons = cons;
2712
2713 if (done) {
2714 softint_schedule(txr->txr_si);
2715 if (txr->txr_qid == 0) {
2716 CLR(ifp->if_flags, IFF_OACTIVE);
2717 if_schedule_deferred_start(ifp);
2718 }
2719 }
2720
2721 return more;
2722 }
2723
2724 static void
2725 ixl_start(struct ifnet *ifp)
2726 {
2727 struct ixl_softc *sc;
2728 struct ixl_tx_ring *txr;
2729
2730 sc = ifp->if_softc;
2731 txr = sc->sc_qps[0].qp_txr;
2732
2733 mutex_enter(&txr->txr_lock);
2734 ixl_tx_common_locked(ifp, txr, false);
2735 mutex_exit(&txr->txr_lock);
2736 }
2737
2738 static inline unsigned int
2739 ixl_select_txqueue(struct ixl_softc *sc, struct mbuf *m)
2740 {
2741 u_int cpuid;
2742
2743 cpuid = cpu_index(curcpu());
2744
2745 return (unsigned int)(cpuid % sc->sc_nqueue_pairs);
2746 }
2747
2748 static int
2749 ixl_transmit(struct ifnet *ifp, struct mbuf *m)
2750 {
2751 struct ixl_softc *sc;
2752 struct ixl_tx_ring *txr;
2753 unsigned int qid;
2754
2755 sc = ifp->if_softc;
2756 qid = ixl_select_txqueue(sc, m);
2757
2758 txr = sc->sc_qps[qid].qp_txr;
2759
2760 if (__predict_false(!pcq_put(txr->txr_intrq, m))) {
2761 mutex_enter(&txr->txr_lock);
2762 txr->txr_pcqdrop.ev_count++;
2763 mutex_exit(&txr->txr_lock);
2764
2765 m_freem(m);
2766 return ENOBUFS;
2767 }
2768
2769 if (mutex_tryenter(&txr->txr_lock)) {
2770 ixl_tx_common_locked(ifp, txr, true);
2771 mutex_exit(&txr->txr_lock);
2772 } else {
2773 softint_schedule(txr->txr_si);
2774 }
2775
2776 return 0;
2777 }
2778
2779 static void
2780 ixl_deferred_transmit(void *xtxr)
2781 {
2782 struct ixl_tx_ring *txr = xtxr;
2783 struct ixl_softc *sc = txr->txr_sc;
2784 struct ifnet *ifp = &sc->sc_ec.ec_if;
2785
2786 mutex_enter(&txr->txr_lock);
2787 txr->txr_transmitdef.ev_count++;
2788 if (pcq_peek(txr->txr_intrq) != NULL)
2789 ixl_tx_common_locked(ifp, txr, true);
2790 mutex_exit(&txr->txr_lock);
2791 }
2792
2793 static struct ixl_rx_ring *
2794 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
2795 {
2796 struct ixl_rx_ring *rxr = NULL;
2797 struct ixl_rx_map *maps = NULL, *rxm;
2798 unsigned int i;
2799
2800 rxr = kmem_zalloc(sizeof(*rxr), KM_SLEEP);
2801 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs,
2802 KM_SLEEP);
2803
2804 if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
2805 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
2806 IXL_RX_QUEUE_ALIGN) != 0)
2807 goto free;
2808
2809 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2810 rxm = &maps[i];
2811
2812 if (bus_dmamap_create(sc->sc_dmat,
2813 IXL_HARDMTU, 1, IXL_HARDMTU, 0,
2814 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &rxm->rxm_map) != 0)
2815 goto uncreate;
2816
2817 rxm->rxm_m = NULL;
2818 }
2819
2820 rxr->rxr_cons = rxr->rxr_prod = 0;
2821 rxr->rxr_m_head = NULL;
2822 rxr->rxr_m_tail = &rxr->rxr_m_head;
2823 rxr->rxr_maps = maps;
2824
2825 rxr->rxr_tail = I40E_QRX_TAIL(qid);
2826 rxr->rxr_qid = qid;
2827 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
2828
2829 return rxr;
2830
2831 uncreate:
2832 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2833 rxm = &maps[i];
2834
2835 if (rxm->rxm_map == NULL)
2836 continue;
2837
2838 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
2839 }
2840
2841 ixl_dmamem_free(sc, &rxr->rxr_mem);
2842 free:
2843 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2844 kmem_free(rxr, sizeof(*rxr));
2845
2846 return NULL;
2847 }
2848
2849 static void
2850 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2851 {
2852 struct ixl_rx_map *maps, *rxm;
2853 bus_dmamap_t map;
2854 unsigned int i;
2855
2856 maps = rxr->rxr_maps;
2857 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2858 rxm = &maps[i];
2859
2860 if (rxm->rxm_m == NULL)
2861 continue;
2862
2863 map = rxm->rxm_map;
2864 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2865 BUS_DMASYNC_POSTWRITE);
2866 bus_dmamap_unload(sc->sc_dmat, map);
2867
2868 m_freem(rxm->rxm_m);
2869 rxm->rxm_m = NULL;
2870 }
2871
2872 m_freem(rxr->rxr_m_head);
2873 rxr->rxr_m_head = NULL;
2874 rxr->rxr_m_tail = &rxr->rxr_m_head;
2875
2876 rxr->rxr_prod = rxr->rxr_cons = 0;
2877 }
2878
2879 static int
2880 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2881 {
2882 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
2883 uint32_t reg;
2884 int i;
2885
2886 for (i = 0; i < 10; i++) {
2887 reg = ixl_rd(sc, ena);
2888 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
2889 return 0;
2890
2891 delaymsec(10);
2892 }
2893
2894 return ETIMEDOUT;
2895 }
2896
2897 static int
2898 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2899 {
2900 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
2901 uint32_t reg;
2902 int i;
2903
2904 KASSERT(mutex_owned(&rxr->rxr_lock));
2905
2906 for (i = 0; i < 20; i++) {
2907 reg = ixl_rd(sc, ena);
2908 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
2909 return 0;
2910
2911 delaymsec(10);
2912 }
2913
2914 return ETIMEDOUT;
2915 }
2916
2917 static void
2918 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2919 {
2920 struct ixl_hmc_rxq rxq;
2921 void *hmc;
2922
2923 memset(&rxq, 0, sizeof(rxq));
2924
2925 rxq.head = htole16(rxr->rxr_cons);
2926 rxq.base = htole64(IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
2927 rxq.qlen = htole16(sc->sc_rx_ring_ndescs);
2928 rxq.dbuff = htole16(MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
2929 rxq.hbuff = 0;
2930 rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
2931 rxq.dsize = IXL_HMC_RXQ_DSIZE_32;
2932 rxq.crcstrip = 1;
2933 rxq.l2sel = 1;
2934 rxq.showiv = 1;
2935 rxq.rxmax = htole16(IXL_HARDMTU);
2936 rxq.tphrdesc_ena = 0;
2937 rxq.tphwdesc_ena = 0;
2938 rxq.tphdata_ena = 0;
2939 rxq.tphhead_ena = 0;
2940 rxq.lrxqthresh = 0;
2941 rxq.prefena = 1;
2942
2943 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
2944 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
2945 ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq,
2946 __arraycount(ixl_hmc_pack_rxq));
2947 }
2948
2949 static void
2950 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2951 {
2952 void *hmc;
2953
2954 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
2955 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
2956 }
2957
2958 static void
2959 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2960 {
2961 struct ixl_rx_map *maps, *rxm;
2962 unsigned int i;
2963
2964 maps = rxr->rxr_maps;
2965 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2966 rxm = &maps[i];
2967
2968 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
2969 }
2970
2971 ixl_dmamem_free(sc, &rxr->rxr_mem);
2972 mutex_destroy(&rxr->rxr_lock);
2973 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2974 kmem_free(rxr, sizeof(*rxr));
2975 }
2976
2977 static inline void
2978 ixl_rx_csum(struct mbuf *m, uint64_t qword)
2979 {
2980 int flags_mask;
2981
2982 if (!ISSET(qword, IXL_RX_DESC_L3L4P)) {
2983 /* No L3 or L4 checksum was calculated */
2984 return;
2985 }
2986
2987 switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) {
2988 case IXL_RX_DESC_PTYPE_IPV4FRAG:
2989 case IXL_RX_DESC_PTYPE_IPV4:
2990 case IXL_RX_DESC_PTYPE_SCTPV4:
2991 case IXL_RX_DESC_PTYPE_ICMPV4:
2992 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
2993 break;
2994 case IXL_RX_DESC_PTYPE_TCPV4:
2995 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
2996 flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD;
2997 break;
2998 case IXL_RX_DESC_PTYPE_UDPV4:
2999 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3000 flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD;
3001 break;
3002 case IXL_RX_DESC_PTYPE_TCPV6:
3003 flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD;
3004 break;
3005 case IXL_RX_DESC_PTYPE_UDPV6:
3006 flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD;
3007 break;
3008 default:
3009 flags_mask = 0;
3010 }
3011
3012 m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 |
3013 M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6));
3014
3015 if (ISSET(qword, IXL_RX_DESC_IPE)) {
3016 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD);
3017 }
3018
3019 if (ISSET(qword, IXL_RX_DESC_L4E)) {
3020 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD);
3021 }
3022 }
3023
3024 static int
3025 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr, u_int rxlimit)
3026 {
3027 struct ifnet *ifp = &sc->sc_ec.ec_if;
3028 struct ixl_rx_wb_desc_32 *ring, *rxd;
3029 struct ixl_rx_map *rxm;
3030 bus_dmamap_t map;
3031 unsigned int cons, prod;
3032 struct mbuf *m;
3033 uint64_t word, word0;
3034 unsigned int len;
3035 unsigned int mask;
3036 int done = 0, more = 0;
3037
3038 KASSERT(mutex_owned(&rxr->rxr_lock));
3039
3040 if (!ISSET(ifp->if_flags, IFF_RUNNING))
3041 return 0;
3042
3043 prod = rxr->rxr_prod;
3044 cons = rxr->rxr_cons;
3045
3046 if (cons == prod)
3047 return 0;
3048
3049 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3050 0, IXL_DMA_LEN(&rxr->rxr_mem),
3051 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3052
3053 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3054 mask = sc->sc_rx_ring_ndescs - 1;
3055
3056 do {
3057 if (rxlimit-- <= 0) {
3058 more = 1;
3059 break;
3060 }
3061
3062 rxd = &ring[cons];
3063
3064 word = le64toh(rxd->qword1);
3065
3066 if (!ISSET(word, IXL_RX_DESC_DD))
3067 break;
3068
3069 rxm = &rxr->rxr_maps[cons];
3070
3071 map = rxm->rxm_map;
3072 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3073 BUS_DMASYNC_POSTREAD);
3074 bus_dmamap_unload(sc->sc_dmat, map);
3075
3076 m = rxm->rxm_m;
3077 rxm->rxm_m = NULL;
3078
3079 KASSERT(m != NULL);
3080
3081 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
3082 m->m_len = len;
3083 m->m_pkthdr.len = 0;
3084
3085 m->m_next = NULL;
3086 *rxr->rxr_m_tail = m;
3087 rxr->rxr_m_tail = &m->m_next;
3088
3089 m = rxr->rxr_m_head;
3090 m->m_pkthdr.len += len;
3091
3092 if (ISSET(word, IXL_RX_DESC_EOP)) {
3093 word0 = le64toh(rxd->qword0);
3094
3095 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
3096 vlan_set_tag(m,
3097 __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK));
3098 }
3099
3100 if ((ifp->if_capenable & IXL_IFCAP_RXCSUM) != 0)
3101 ixl_rx_csum(m, word);
3102
3103 if (!ISSET(word,
3104 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
3105 m_set_rcvif(m, ifp);
3106 rxr->rxr_ipackets++;
3107 rxr->rxr_ibytes += m->m_pkthdr.len;
3108 if_percpuq_enqueue(ifp->if_percpuq, m);
3109 } else {
3110 rxr->rxr_ierrors++;
3111 m_freem(m);
3112 }
3113
3114 rxr->rxr_m_head = NULL;
3115 rxr->rxr_m_tail = &rxr->rxr_m_head;
3116 }
3117
3118 cons++;
3119 cons &= mask;
3120
3121 done = 1;
3122 } while (cons != prod);
3123
3124 if (done) {
3125 rxr->rxr_cons = cons;
3126 if (ixl_rxfill(sc, rxr) == -1)
3127 rxr->rxr_iqdrops++;
3128 }
3129
3130 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3131 0, IXL_DMA_LEN(&rxr->rxr_mem),
3132 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3133
3134 return more;
3135 }
3136
3137 static int
3138 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3139 {
3140 struct ixl_rx_rd_desc_32 *ring, *rxd;
3141 struct ixl_rx_map *rxm;
3142 bus_dmamap_t map;
3143 struct mbuf *m;
3144 unsigned int prod;
3145 unsigned int slots;
3146 unsigned int mask;
3147 int post = 0, error = 0;
3148
3149 KASSERT(mutex_owned(&rxr->rxr_lock));
3150
3151 prod = rxr->rxr_prod;
3152 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons,
3153 sc->sc_rx_ring_ndescs);
3154
3155 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3156 mask = sc->sc_rx_ring_ndescs - 1;
3157
3158 if (__predict_false(slots <= 0))
3159 return -1;
3160
3161 do {
3162 rxm = &rxr->rxr_maps[prod];
3163
3164 MGETHDR(m, M_DONTWAIT, MT_DATA);
3165 if (m == NULL) {
3166 rxr->rxr_mgethdr_failed.ev_count++;
3167 error = -1;
3168 break;
3169 }
3170
3171 MCLGET(m, M_DONTWAIT);
3172 if (!ISSET(m->m_flags, M_EXT)) {
3173 rxr->rxr_mgetcl_failed.ev_count++;
3174 error = -1;
3175 m_freem(m);
3176 break;
3177 }
3178
3179 m->m_len = m->m_pkthdr.len = MCLBYTES + ETHER_ALIGN;
3180 m_adj(m, ETHER_ALIGN);
3181
3182 map = rxm->rxm_map;
3183
3184 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
3185 BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
3186 rxr->rxr_mbuf_load_failed.ev_count++;
3187 error = -1;
3188 m_freem(m);
3189 break;
3190 }
3191
3192 rxm->rxm_m = m;
3193
3194 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3195 BUS_DMASYNC_PREREAD);
3196
3197 rxd = &ring[prod];
3198
3199 rxd->paddr = htole64(map->dm_segs[0].ds_addr);
3200 rxd->haddr = htole64(0);
3201
3202 prod++;
3203 prod &= mask;
3204
3205 post = 1;
3206
3207 } while (--slots);
3208
3209 if (post) {
3210 rxr->rxr_prod = prod;
3211 ixl_wr(sc, rxr->rxr_tail, prod);
3212 }
3213
3214 return error;
3215 }
3216
3217 static inline int
3218 ixl_handle_queue_common(struct ixl_softc *sc, struct ixl_queue_pair *qp,
3219 u_int txlimit, struct evcnt *txevcnt,
3220 u_int rxlimit, struct evcnt *rxevcnt)
3221 {
3222 struct ixl_tx_ring *txr = qp->qp_txr;
3223 struct ixl_rx_ring *rxr = qp->qp_rxr;
3224 int txmore, rxmore;
3225 int rv;
3226
3227 KASSERT(!mutex_owned(&txr->txr_lock));
3228 KASSERT(!mutex_owned(&rxr->rxr_lock));
3229
3230 mutex_enter(&txr->txr_lock);
3231 txevcnt->ev_count++;
3232 txmore = ixl_txeof(sc, txr, txlimit);
3233 mutex_exit(&txr->txr_lock);
3234
3235 mutex_enter(&rxr->rxr_lock);
3236 rxevcnt->ev_count++;
3237 rxmore = ixl_rxeof(sc, rxr, rxlimit);
3238 mutex_exit(&rxr->rxr_lock);
3239
3240 rv = txmore | (rxmore << 1);
3241
3242 return rv;
3243 }
3244
3245 static void
3246 ixl_sched_handle_queue(struct ixl_softc *sc, struct ixl_queue_pair *qp)
3247 {
3248
3249 if (qp->qp_workqueue)
3250 ixl_work_add(sc->sc_workq_txrx, &qp->qp_task);
3251 else
3252 softint_schedule(qp->qp_si);
3253 }
3254
3255 static int
3256 ixl_intr(void *xsc)
3257 {
3258 struct ixl_softc *sc = xsc;
3259 struct ixl_tx_ring *txr;
3260 struct ixl_rx_ring *rxr;
3261 uint32_t icr, rxintr, txintr;
3262 int rv = 0;
3263 unsigned int i;
3264
3265 KASSERT(sc != NULL);
3266
3267 ixl_enable_other_intr(sc);
3268 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3269
3270 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3271 atomic_inc_64(&sc->sc_event_atq.ev_count);
3272 ixl_atq_done(sc);
3273 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3274 rv = 1;
3275 }
3276
3277 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3278 atomic_inc_64(&sc->sc_event_link.ev_count);
3279 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3280 rv = 1;
3281 }
3282
3283 rxintr = icr & I40E_INTR_NOTX_RX_MASK;
3284 txintr = icr & I40E_INTR_NOTX_TX_MASK;
3285
3286 if (txintr || rxintr) {
3287 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3288 txr = sc->sc_qps[i].qp_txr;
3289 rxr = sc->sc_qps[i].qp_rxr;
3290
3291 ixl_handle_queue_common(sc, &sc->sc_qps[i],
3292 IXL_TXRX_PROCESS_UNLIMIT, &txr->txr_intr,
3293 IXL_TXRX_PROCESS_UNLIMIT, &rxr->rxr_intr);
3294 }
3295 rv = 1;
3296 }
3297
3298 return rv;
3299 }
3300
3301 static int
3302 ixl_queue_intr(void *xqp)
3303 {
3304 struct ixl_queue_pair *qp = xqp;
3305 struct ixl_tx_ring *txr = qp->qp_txr;
3306 struct ixl_rx_ring *rxr = qp->qp_rxr;
3307 struct ixl_softc *sc = qp->qp_sc;
3308 u_int txlimit, rxlimit;
3309 int more;
3310
3311 txlimit = sc->sc_tx_intr_process_limit;
3312 rxlimit = sc->sc_rx_intr_process_limit;
3313 qp->qp_workqueue = sc->sc_txrx_workqueue;
3314
3315 more = ixl_handle_queue_common(sc, qp,
3316 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr);
3317
3318 if (more != 0) {
3319 ixl_sched_handle_queue(sc, qp);
3320 } else {
3321 /* for ALTQ */
3322 if (txr->txr_qid == 0)
3323 if_schedule_deferred_start(&sc->sc_ec.ec_if);
3324 softint_schedule(txr->txr_si);
3325
3326 ixl_enable_queue_intr(sc, qp);
3327 }
3328
3329 return 1;
3330 }
3331
3332 static void
3333 ixl_handle_queue(void *xqp)
3334 {
3335 struct ixl_queue_pair *qp = xqp;
3336 struct ixl_softc *sc = qp->qp_sc;
3337 struct ixl_tx_ring *txr = qp->qp_txr;
3338 struct ixl_rx_ring *rxr = qp->qp_rxr;
3339 u_int txlimit, rxlimit;
3340 int more;
3341
3342 txlimit = sc->sc_tx_process_limit;
3343 rxlimit = sc->sc_rx_process_limit;
3344
3345 more = ixl_handle_queue_common(sc, qp,
3346 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer);
3347
3348 if (more != 0)
3349 ixl_sched_handle_queue(sc, qp);
3350 else
3351 ixl_enable_queue_intr(sc, qp);
3352 }
3353
3354 static inline void
3355 ixl_print_hmc_error(struct ixl_softc *sc, uint32_t reg)
3356 {
3357 uint32_t hmc_idx, hmc_isvf;
3358 uint32_t hmc_errtype, hmc_objtype, hmc_data;
3359
3360 hmc_idx = reg & I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK;
3361 hmc_idx = hmc_idx >> I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT;
3362 hmc_isvf = reg & I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK;
3363 hmc_isvf = hmc_isvf >> I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT;
3364 hmc_errtype = reg & I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK;
3365 hmc_errtype = hmc_errtype >> I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT;
3366 hmc_objtype = reg & I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK;
3367 hmc_objtype = hmc_objtype >> I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT;
3368 hmc_data = ixl_rd(sc, I40E_PFHMC_ERRORDATA);
3369
3370 device_printf(sc->sc_dev,
3371 "HMC Error (idx=0x%x, isvf=0x%x, err=0x%x, obj=0x%x, data=0x%x)\n",
3372 hmc_idx, hmc_isvf, hmc_errtype, hmc_objtype, hmc_data);
3373 }
3374
3375 static int
3376 ixl_other_intr(void *xsc)
3377 {
3378 struct ixl_softc *sc = xsc;
3379 uint32_t icr, mask, reg;
3380 int rv;
3381
3382 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3383 mask = ixl_rd(sc, I40E_PFINT_ICR0_ENA);
3384
3385 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3386 atomic_inc_64(&sc->sc_event_atq.ev_count);
3387 ixl_atq_done(sc);
3388 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3389 rv = 1;
3390 }
3391
3392 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3393 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3394 device_printf(sc->sc_dev, "link stat changed\n");
3395
3396 atomic_inc_64(&sc->sc_event_link.ev_count);
3397 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3398 rv = 1;
3399 }
3400
3401 if (ISSET(icr, I40E_PFINT_ICR0_GRST_MASK)) {
3402 CLR(mask, I40E_PFINT_ICR0_ENA_GRST_MASK);
3403 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
3404 reg = reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK;
3405 reg = reg >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3406
3407 device_printf(sc->sc_dev, "GRST: %s\n",
3408 reg == I40E_RESET_CORER ? "CORER" :
3409 reg == I40E_RESET_GLOBR ? "GLOBR" :
3410 reg == I40E_RESET_EMPR ? "EMPR" :
3411 "POR");
3412 }
3413
3414 if (ISSET(icr, I40E_PFINT_ICR0_ECC_ERR_MASK))
3415 atomic_inc_64(&sc->sc_event_ecc_err.ev_count);
3416 if (ISSET(icr, I40E_PFINT_ICR0_PCI_EXCEPTION_MASK))
3417 atomic_inc_64(&sc->sc_event_pci_exception.ev_count);
3418 if (ISSET(icr, I40E_PFINT_ICR0_PE_CRITERR_MASK))
3419 atomic_inc_64(&sc->sc_event_crit_err.ev_count);
3420
3421 if (ISSET(icr, IXL_ICR0_CRIT_ERR_MASK)) {
3422 CLR(mask, IXL_ICR0_CRIT_ERR_MASK);
3423 device_printf(sc->sc_dev, "critical error\n");
3424 }
3425
3426 if (ISSET(icr, I40E_PFINT_ICR0_HMC_ERR_MASK)) {
3427 reg = ixl_rd(sc, I40E_PFHMC_ERRORINFO);
3428 if (ISSET(reg, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK))
3429 ixl_print_hmc_error(sc, reg);
3430 ixl_wr(sc, I40E_PFHMC_ERRORINFO, 0);
3431 }
3432
3433 ixl_wr(sc, I40E_PFINT_ICR0_ENA, mask);
3434 ixl_flush(sc);
3435 ixl_enable_other_intr(sc);
3436 return rv;
3437 }
3438
3439 static void
3440 ixl_get_link_status_done(struct ixl_softc *sc,
3441 const struct ixl_aq_desc *iaq)
3442 {
3443
3444 ixl_link_state_update(sc, iaq);
3445 }
3446
3447 static void
3448 ixl_get_link_status(void *xsc)
3449 {
3450 struct ixl_softc *sc = xsc;
3451 struct ixl_aq_desc *iaq;
3452 struct ixl_aq_link_param *param;
3453
3454 memset(&sc->sc_link_state_atq, 0, sizeof(sc->sc_link_state_atq));
3455 iaq = &sc->sc_link_state_atq.iatq_desc;
3456 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3457 param = (struct ixl_aq_link_param *)iaq->iaq_param;
3458 param->notify = IXL_AQ_LINK_NOTIFY;
3459
3460 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done);
3461 (void)ixl_atq_post(sc, &sc->sc_link_state_atq);
3462 }
3463
3464 static void
3465 ixl_link_state_update(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3466 {
3467 struct ifnet *ifp = &sc->sc_ec.ec_if;
3468 int link_state;
3469
3470 KASSERT(kpreempt_disabled());
3471
3472 link_state = ixl_set_link_status(sc, iaq);
3473
3474 if (ifp->if_link_state != link_state)
3475 if_link_state_change(ifp, link_state);
3476
3477 if (link_state != LINK_STATE_DOWN) {
3478 if_schedule_deferred_start(ifp);
3479 }
3480 }
3481
3482 static void
3483 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq,
3484 const char *msg)
3485 {
3486 char buf[512];
3487 size_t len;
3488
3489 len = sizeof(buf);
3490 buf[--len] = '\0';
3491
3492 device_printf(sc->sc_dev, "%s\n", msg);
3493 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags));
3494 device_printf(sc->sc_dev, "flags %s opcode %04x\n",
3495 buf, le16toh(iaq->iaq_opcode));
3496 device_printf(sc->sc_dev, "datalen %u retval %u\n",
3497 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval));
3498 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie);
3499 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n",
3500 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]),
3501 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3]));
3502 }
3503
3504 static void
3505 ixl_arq(void *xsc)
3506 {
3507 struct ixl_softc *sc = xsc;
3508 struct ixl_aq_desc *arq, *iaq;
3509 struct ixl_aq_buf *aqb;
3510 unsigned int cons = sc->sc_arq_cons;
3511 unsigned int prod;
3512 int done = 0;
3513
3514 prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
3515 sc->sc_aq_regs->arq_head_mask;
3516
3517 if (cons == prod)
3518 goto done;
3519
3520 arq = IXL_DMA_KVA(&sc->sc_arq);
3521
3522 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3523 0, IXL_DMA_LEN(&sc->sc_arq),
3524 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3525
3526 do {
3527 iaq = &arq[cons];
3528 aqb = sc->sc_arq_live[cons];
3529
3530 KASSERT(aqb != NULL);
3531
3532 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3533 BUS_DMASYNC_POSTREAD);
3534
3535 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3536 ixl_aq_dump(sc, iaq, "arq event");
3537
3538 switch (iaq->iaq_opcode) {
3539 case htole16(IXL_AQ_OP_PHY_LINK_STATUS):
3540 kpreempt_disable();
3541 ixl_link_state_update(sc, iaq);
3542 kpreempt_enable();
3543 break;
3544 }
3545
3546 memset(iaq, 0, sizeof(*iaq));
3547 sc->sc_arq_live[cons] = NULL;
3548 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
3549
3550 cons++;
3551 cons &= IXL_AQ_MASK;
3552
3553 done = 1;
3554 } while (cons != prod);
3555
3556 if (done) {
3557 sc->sc_arq_cons = cons;
3558 ixl_arq_fill(sc);
3559 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3560 0, IXL_DMA_LEN(&sc->sc_arq),
3561 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3562 }
3563
3564 done:
3565 ixl_enable_other_intr(sc);
3566 }
3567
3568 static void
3569 ixl_atq_set(struct ixl_atq *iatq,
3570 void (*fn)(struct ixl_softc *, const struct ixl_aq_desc *))
3571 {
3572
3573 iatq->iatq_fn = fn;
3574 }
3575
3576 static int
3577 ixl_atq_post_locked(struct ixl_softc *sc, struct ixl_atq *iatq)
3578 {
3579 struct ixl_aq_desc *atq, *slot;
3580 unsigned int prod, cons, prod_next;
3581
3582 /* assert locked */
3583 KASSERT(mutex_owned(&sc->sc_atq_lock));
3584
3585 atq = IXL_DMA_KVA(&sc->sc_atq);
3586 prod = sc->sc_atq_prod;
3587 cons = sc->sc_atq_cons;
3588 prod_next = (prod +1) & IXL_AQ_MASK;
3589
3590 if (cons == prod_next)
3591 return ENOMEM;
3592
3593 slot = &atq[prod];
3594
3595 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3596 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3597
3598 *slot = iatq->iatq_desc;
3599 slot->iaq_cookie = (uint64_t)((intptr_t)iatq);
3600
3601 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3602 ixl_aq_dump(sc, slot, "atq command");
3603
3604 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3605 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3606
3607 sc->sc_atq_prod = prod_next;
3608 ixl_wr(sc, sc->sc_aq_regs->atq_tail, sc->sc_atq_prod);
3609
3610 return 0;
3611 }
3612
3613 static int
3614 ixl_atq_post(struct ixl_softc *sc, struct ixl_atq *iatq)
3615 {
3616 int rv;
3617
3618 mutex_enter(&sc->sc_atq_lock);
3619 rv = ixl_atq_post_locked(sc, iatq);
3620 mutex_exit(&sc->sc_atq_lock);
3621
3622 return rv;
3623 }
3624
3625 static void
3626 ixl_atq_done_locked(struct ixl_softc *sc)
3627 {
3628 struct ixl_aq_desc *atq, *slot;
3629 struct ixl_atq *iatq;
3630 unsigned int cons;
3631 unsigned int prod;
3632
3633 KASSERT(mutex_owned(&sc->sc_atq_lock));
3634
3635 prod = sc->sc_atq_prod;
3636 cons = sc->sc_atq_cons;
3637
3638 if (prod == cons)
3639 return;
3640
3641 atq = IXL_DMA_KVA(&sc->sc_atq);
3642
3643 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3644 0, IXL_DMA_LEN(&sc->sc_atq),
3645 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3646
3647 do {
3648 slot = &atq[cons];
3649 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
3650 break;
3651
3652 iatq = (struct ixl_atq *)((intptr_t)slot->iaq_cookie);
3653 iatq->iatq_desc = *slot;
3654
3655 memset(slot, 0, sizeof(*slot));
3656
3657 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3658 ixl_aq_dump(sc, &iatq->iatq_desc, "atq response");
3659
3660 (*iatq->iatq_fn)(sc, &iatq->iatq_desc);
3661
3662 cons++;
3663 cons &= IXL_AQ_MASK;
3664 } while (cons != prod);
3665
3666 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3667 0, IXL_DMA_LEN(&sc->sc_atq),
3668 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3669
3670 sc->sc_atq_cons = cons;
3671 }
3672
3673 static void
3674 ixl_atq_done(struct ixl_softc *sc)
3675 {
3676
3677 mutex_enter(&sc->sc_atq_lock);
3678 ixl_atq_done_locked(sc);
3679 mutex_exit(&sc->sc_atq_lock);
3680 }
3681
3682 static void
3683 ixl_wakeup(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3684 {
3685
3686 KASSERT(mutex_owned(&sc->sc_atq_lock));
3687
3688 cv_signal(&sc->sc_atq_cv);
3689 }
3690
3691 static int
3692 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq)
3693 {
3694 int error;
3695
3696 KASSERT(iatq->iatq_desc.iaq_cookie == 0);
3697
3698 ixl_atq_set(iatq, ixl_wakeup);
3699
3700 mutex_enter(&sc->sc_atq_lock);
3701 error = ixl_atq_post_locked(sc, iatq);
3702 if (error) {
3703 mutex_exit(&sc->sc_atq_lock);
3704 return error;
3705 }
3706
3707 error = cv_timedwait(&sc->sc_atq_cv, &sc->sc_atq_lock,
3708 IXL_ATQ_EXEC_TIMEOUT);
3709 mutex_exit(&sc->sc_atq_lock);
3710
3711 return error;
3712 }
3713
3714 static int
3715 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
3716 {
3717 struct ixl_aq_desc *atq, *slot;
3718 unsigned int prod;
3719 unsigned int t = 0;
3720
3721 mutex_enter(&sc->sc_atq_lock);
3722
3723 atq = IXL_DMA_KVA(&sc->sc_atq);
3724 prod = sc->sc_atq_prod;
3725 slot = atq + prod;
3726
3727 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3728 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3729
3730 *slot = *iaq;
3731 slot->iaq_flags |= htole16(IXL_AQ_SI);
3732
3733 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3734 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3735
3736 prod++;
3737 prod &= IXL_AQ_MASK;
3738 sc->sc_atq_prod = prod;
3739 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3740
3741 while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
3742 delaymsec(1);
3743
3744 if (t++ > tm) {
3745 mutex_exit(&sc->sc_atq_lock);
3746 return ETIMEDOUT;
3747 }
3748 }
3749
3750 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3751 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
3752 *iaq = *slot;
3753 memset(slot, 0, sizeof(*slot));
3754 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3755 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
3756
3757 sc->sc_atq_cons = prod;
3758
3759 mutex_exit(&sc->sc_atq_lock);
3760
3761 return 0;
3762 }
3763
3764 static int
3765 ixl_get_version(struct ixl_softc *sc)
3766 {
3767 struct ixl_aq_desc iaq;
3768 uint32_t fwbuild, fwver, apiver;
3769 uint16_t api_maj_ver, api_min_ver;
3770
3771 memset(&iaq, 0, sizeof(iaq));
3772 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
3773
3774 iaq.iaq_retval = le16toh(23);
3775
3776 if (ixl_atq_poll(sc, &iaq, 2000) != 0)
3777 return ETIMEDOUT;
3778 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
3779 return EIO;
3780
3781 fwbuild = le32toh(iaq.iaq_param[1]);
3782 fwver = le32toh(iaq.iaq_param[2]);
3783 apiver = le32toh(iaq.iaq_param[3]);
3784
3785 api_maj_ver = (uint16_t)apiver;
3786 api_min_ver = (uint16_t)(apiver >> 16);
3787
3788 aprint_normal(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
3789 (uint16_t)(fwver >> 16), fwbuild, api_maj_ver, api_min_ver);
3790
3791 sc->sc_rxctl_atq = true;
3792 if (sc->sc_mac_type == I40E_MAC_X722) {
3793 if (api_maj_ver == 1 && api_min_ver < 5) {
3794 sc->sc_rxctl_atq = false;
3795 }
3796 }
3797
3798 return 0;
3799 }
3800
3801 static int
3802 ixl_pxe_clear(struct ixl_softc *sc)
3803 {
3804 struct ixl_aq_desc iaq;
3805 int rv;
3806
3807 memset(&iaq, 0, sizeof(iaq));
3808 iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
3809 iaq.iaq_param[0] = htole32(0x2);
3810
3811 rv = ixl_atq_poll(sc, &iaq, 250);
3812
3813 ixl_wr(sc, I40E_GLLAN_RCTL_0, 0x1);
3814
3815 if (rv != 0)
3816 return ETIMEDOUT;
3817
3818 switch (iaq.iaq_retval) {
3819 case htole16(IXL_AQ_RC_OK):
3820 case htole16(IXL_AQ_RC_EEXIST):
3821 break;
3822 default:
3823 return EIO;
3824 }
3825
3826 return 0;
3827 }
3828
3829 static int
3830 ixl_lldp_shut(struct ixl_softc *sc)
3831 {
3832 struct ixl_aq_desc iaq;
3833
3834 memset(&iaq, 0, sizeof(iaq));
3835 iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
3836 iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
3837
3838 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3839 aprint_error_dev(sc->sc_dev, "STOP LLDP AGENT timeout\n");
3840 return -1;
3841 }
3842
3843 switch (iaq.iaq_retval) {
3844 case htole16(IXL_AQ_RC_EMODE):
3845 case htole16(IXL_AQ_RC_EPERM):
3846 /* ignore silently */
3847 default:
3848 break;
3849 }
3850
3851 return 0;
3852 }
3853
3854 static void
3855 ixl_parse_hw_capability(struct ixl_softc *sc, struct ixl_aq_capability *cap)
3856 {
3857 uint16_t id;
3858 uint32_t number, logical_id;
3859
3860 id = le16toh(cap->cap_id);
3861 number = le32toh(cap->number);
3862 logical_id = le32toh(cap->logical_id);
3863
3864 switch (id) {
3865 case IXL_AQ_CAP_RSS:
3866 sc->sc_rss_table_size = number;
3867 sc->sc_rss_table_entry_width = logical_id;
3868 break;
3869 case IXL_AQ_CAP_RXQ:
3870 case IXL_AQ_CAP_TXQ:
3871 sc->sc_nqueue_pairs_device = MIN(number,
3872 sc->sc_nqueue_pairs_device);
3873 break;
3874 }
3875 }
3876
3877 static int
3878 ixl_get_hw_capabilities(struct ixl_softc *sc)
3879 {
3880 struct ixl_dmamem idm;
3881 struct ixl_aq_desc iaq;
3882 struct ixl_aq_capability *caps;
3883 size_t i, ncaps;
3884 bus_size_t caps_size;
3885 uint16_t status;
3886 int rv;
3887
3888 caps_size = sizeof(caps[0]) * 40;
3889 memset(&iaq, 0, sizeof(iaq));
3890 iaq.iaq_opcode = htole16(IXL_AQ_OP_LIST_FUNC_CAP);
3891
3892 do {
3893 if (ixl_dmamem_alloc(sc, &idm, caps_size, 0) != 0) {
3894 return -1;
3895 }
3896
3897 iaq.iaq_flags = htole16(IXL_AQ_BUF |
3898 (caps_size > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3899 iaq.iaq_datalen = htole16(caps_size);
3900 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3901
3902 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
3903 IXL_DMA_LEN(&idm), BUS_DMASYNC_PREREAD);
3904
3905 rv = ixl_atq_poll(sc, &iaq, 250);
3906
3907 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
3908 IXL_DMA_LEN(&idm), BUS_DMASYNC_POSTREAD);
3909
3910 if (rv != 0) {
3911 aprint_error(", HW capabilities timeout\n");
3912 goto done;
3913 }
3914
3915 status = le16toh(iaq.iaq_retval);
3916
3917 if (status == IXL_AQ_RC_ENOMEM) {
3918 caps_size = le16toh(iaq.iaq_datalen);
3919 ixl_dmamem_free(sc, &idm);
3920 }
3921 } while (status == IXL_AQ_RC_ENOMEM);
3922
3923 if (status != IXL_AQ_RC_OK) {
3924 aprint_error(", HW capabilities error\n");
3925 goto done;
3926 }
3927
3928 caps = IXL_DMA_KVA(&idm);
3929 ncaps = le16toh(iaq.iaq_param[1]);
3930
3931 for (i = 0; i < ncaps; i++) {
3932 ixl_parse_hw_capability(sc, &caps[i]);
3933 }
3934
3935 done:
3936 ixl_dmamem_free(sc, &idm);
3937 return rv;
3938 }
3939
3940 static int
3941 ixl_get_mac(struct ixl_softc *sc)
3942 {
3943 struct ixl_dmamem idm;
3944 struct ixl_aq_desc iaq;
3945 struct ixl_aq_mac_addresses *addrs;
3946 int rv;
3947
3948 if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
3949 aprint_error(", unable to allocate mac addresses\n");
3950 return -1;
3951 }
3952
3953 memset(&iaq, 0, sizeof(iaq));
3954 iaq.iaq_flags = htole16(IXL_AQ_BUF);
3955 iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
3956 iaq.iaq_datalen = htole16(sizeof(*addrs));
3957 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3958
3959 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3960 BUS_DMASYNC_PREREAD);
3961
3962 rv = ixl_atq_poll(sc, &iaq, 250);
3963
3964 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3965 BUS_DMASYNC_POSTREAD);
3966
3967 if (rv != 0) {
3968 aprint_error(", MAC ADDRESS READ timeout\n");
3969 rv = -1;
3970 goto done;
3971 }
3972 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3973 aprint_error(", MAC ADDRESS READ error\n");
3974 rv = -1;
3975 goto done;
3976 }
3977
3978 addrs = IXL_DMA_KVA(&idm);
3979 if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
3980 printf(", port address is not valid\n");
3981 goto done;
3982 }
3983
3984 memcpy(sc->sc_enaddr, addrs->port, ETHER_ADDR_LEN);
3985 rv = 0;
3986
3987 done:
3988 ixl_dmamem_free(sc, &idm);
3989 return rv;
3990 }
3991
3992 static int
3993 ixl_get_switch_config(struct ixl_softc *sc)
3994 {
3995 struct ixl_dmamem idm;
3996 struct ixl_aq_desc iaq;
3997 struct ixl_aq_switch_config *hdr;
3998 struct ixl_aq_switch_config_element *elms, *elm;
3999 unsigned int nelm, i;
4000 int rv;
4001
4002 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4003 aprint_error_dev(sc->sc_dev,
4004 "unable to allocate switch config buffer\n");
4005 return -1;
4006 }
4007
4008 memset(&iaq, 0, sizeof(iaq));
4009 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4010 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4011 iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
4012 iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
4013 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4014
4015 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4016 BUS_DMASYNC_PREREAD);
4017
4018 rv = ixl_atq_poll(sc, &iaq, 250);
4019
4020 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4021 BUS_DMASYNC_POSTREAD);
4022
4023 if (rv != 0) {
4024 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG timeout\n");
4025 rv = -1;
4026 goto done;
4027 }
4028 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4029 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG error\n");
4030 rv = -1;
4031 goto done;
4032 }
4033
4034 hdr = IXL_DMA_KVA(&idm);
4035 elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
4036
4037 nelm = le16toh(hdr->num_reported);
4038 if (nelm < 1) {
4039 aprint_error_dev(sc->sc_dev, "no switch config available\n");
4040 rv = -1;
4041 goto done;
4042 }
4043
4044 for (i = 0; i < nelm; i++) {
4045 elm = &elms[i];
4046
4047 aprint_debug_dev(sc->sc_dev,
4048 "type %x revision %u seid %04x\n",
4049 elm->type, elm->revision, le16toh(elm->seid));
4050 aprint_debug_dev(sc->sc_dev,
4051 "uplink %04x downlink %04x\n",
4052 le16toh(elm->uplink_seid),
4053 le16toh(elm->downlink_seid));
4054 aprint_debug_dev(sc->sc_dev,
4055 "conntype %x scheduler %04x extra %04x\n",
4056 elm->connection_type,
4057 le16toh(elm->scheduler_id),
4058 le16toh(elm->element_info));
4059 }
4060
4061 elm = &elms[0];
4062
4063 sc->sc_uplink_seid = elm->uplink_seid;
4064 sc->sc_downlink_seid = elm->downlink_seid;
4065 sc->sc_seid = elm->seid;
4066
4067 if ((sc->sc_uplink_seid == htole16(0)) !=
4068 (sc->sc_downlink_seid == htole16(0))) {
4069 aprint_error_dev(sc->sc_dev, "SEIDs are misconfigured\n");
4070 rv = -1;
4071 goto done;
4072 }
4073
4074 done:
4075 ixl_dmamem_free(sc, &idm);
4076 return rv;
4077 }
4078
4079 static int
4080 ixl_phy_mask_ints(struct ixl_softc *sc)
4081 {
4082 struct ixl_aq_desc iaq;
4083
4084 memset(&iaq, 0, sizeof(iaq));
4085 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
4086 iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
4087 ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
4088 IXL_AQ_PHY_EV_MEDIA_NA));
4089
4090 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4091 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK timeout\n");
4092 return -1;
4093 }
4094 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4095 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK error\n");
4096 return -1;
4097 }
4098
4099 return 0;
4100 }
4101
4102 static int
4103 ixl_get_phy_abilities(struct ixl_softc *sc,struct ixl_dmamem *idm)
4104 {
4105 struct ixl_aq_desc iaq;
4106 int rv;
4107
4108 memset(&iaq, 0, sizeof(iaq));
4109 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4110 (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4111 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
4112 iaq.iaq_datalen = htole16(IXL_DMA_LEN(idm));
4113 iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
4114 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
4115
4116 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4117 BUS_DMASYNC_PREREAD);
4118
4119 rv = ixl_atq_poll(sc, &iaq, 250);
4120
4121 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4122 BUS_DMASYNC_POSTREAD);
4123
4124 if (rv != 0)
4125 return -1;
4126
4127 return le16toh(iaq.iaq_retval);
4128 }
4129
4130 static int
4131 ixl_get_phy_types(struct ixl_softc *sc, uint64_t *phy_types_ptr)
4132 {
4133 struct ixl_dmamem idm;
4134 struct ixl_aq_phy_abilities *phy;
4135 uint64_t phy_types;
4136 int rv;
4137
4138 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4139 aprint_error_dev(sc->sc_dev,
4140 "unable to allocate switch config buffer\n");
4141 return -1;
4142 }
4143
4144 rv = ixl_get_phy_abilities(sc, &idm);
4145 switch (rv) {
4146 case -1:
4147 aprint_error_dev(sc->sc_dev, "GET PHY ABILITIES timeout\n");
4148 goto done;
4149 case IXL_AQ_RC_OK:
4150 break;
4151 case IXL_AQ_RC_EIO:
4152 aprint_error_dev(sc->sc_dev,"unable to query phy types\n");
4153 break;
4154 default:
4155 aprint_error_dev(sc->sc_dev,
4156 "GET PHY ABILITIIES error %u\n", rv);
4157 goto done;
4158 }
4159
4160 phy = IXL_DMA_KVA(&idm);
4161
4162 phy_types = le32toh(phy->phy_type);
4163 phy_types |= (uint64_t)le32toh(phy->phy_type_ext) << 32;
4164
4165 *phy_types_ptr = phy_types;
4166
4167 rv = 0;
4168
4169 done:
4170 ixl_dmamem_free(sc, &idm);
4171 return rv;
4172 }
4173
4174 static int
4175 ixl_get_link_status_poll(struct ixl_softc *sc)
4176 {
4177 struct ixl_aq_desc iaq;
4178 struct ixl_aq_link_param *param;
4179 int link;
4180
4181 memset(&iaq, 0, sizeof(iaq));
4182 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
4183 param = (struct ixl_aq_link_param *)iaq.iaq_param;
4184 param->notify = IXL_AQ_LINK_NOTIFY;
4185
4186 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4187 return ETIMEDOUT;
4188 }
4189 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4190 return EIO;
4191 }
4192
4193 link = ixl_set_link_status(sc, &iaq);
4194 sc->sc_ec.ec_if.if_link_state = link;
4195
4196 return 0;
4197 }
4198
4199 static int
4200 ixl_get_vsi(struct ixl_softc *sc)
4201 {
4202 struct ixl_dmamem *vsi = &sc->sc_scratch;
4203 struct ixl_aq_desc iaq;
4204 struct ixl_aq_vsi_param *param;
4205 struct ixl_aq_vsi_reply *reply;
4206 struct ixl_aq_vsi_data *data;
4207 int rv;
4208
4209 /* grumble, vsi info isn't "known" at compile time */
4210
4211 memset(&iaq, 0, sizeof(iaq));
4212 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4213 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4214 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
4215 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4216 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4217
4218 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4219 param->uplink_seid = sc->sc_seid;
4220
4221 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4222 BUS_DMASYNC_PREREAD);
4223
4224 rv = ixl_atq_poll(sc, &iaq, 250);
4225
4226 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4227 BUS_DMASYNC_POSTREAD);
4228
4229 if (rv != 0) {
4230 return ETIMEDOUT;
4231 }
4232
4233 switch (le16toh(iaq.iaq_retval)) {
4234 case IXL_AQ_RC_OK:
4235 break;
4236 case IXL_AQ_RC_ENOENT:
4237 return ENOENT;
4238 case IXL_AQ_RC_EACCES:
4239 return EACCES;
4240 default:
4241 return EIO;
4242 }
4243
4244 reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
4245 sc->sc_vsi_number = reply->vsi_number;
4246 data = IXL_DMA_KVA(vsi);
4247 sc->sc_vsi_stat_counter_idx = le16toh(data->stat_counter_idx);
4248
4249 return 0;
4250 }
4251
4252 static int
4253 ixl_set_vsi(struct ixl_softc *sc)
4254 {
4255 struct ixl_dmamem *vsi = &sc->sc_scratch;
4256 struct ixl_aq_desc iaq;
4257 struct ixl_aq_vsi_param *param;
4258 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
4259 unsigned int qnum;
4260 uint16_t val;
4261 int rv;
4262
4263 qnum = sc->sc_nqueue_pairs - 1;
4264
4265 data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
4266 IXL_AQ_VSI_VALID_VLAN);
4267
4268 CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
4269 SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
4270 data->queue_mapping[0] = htole16(0);
4271 data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
4272 (qnum << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
4273
4274 val = le16toh(data->port_vlan_flags);
4275 CLR(val, IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK);
4276 SET(val, IXL_AQ_VSI_PVLAN_MODE_ALL);
4277
4278 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWTAGGING)) {
4279 SET(val, IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH);
4280 } else {
4281 SET(val, IXL_AQ_VSI_PVLAN_EMOD_NOTHING);
4282 }
4283
4284 data->port_vlan_flags = htole16(val);
4285
4286 /* grumble, vsi info isn't "known" at compile time */
4287
4288 memset(&iaq, 0, sizeof(iaq));
4289 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4290 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4291 iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
4292 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4293 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4294
4295 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4296 param->uplink_seid = sc->sc_seid;
4297
4298 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4299 BUS_DMASYNC_PREWRITE);
4300
4301 rv = ixl_atq_poll(sc, &iaq, 250);
4302
4303 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4304 BUS_DMASYNC_POSTWRITE);
4305
4306 if (rv != 0) {
4307 return ETIMEDOUT;
4308 }
4309
4310 switch (le16toh(iaq.iaq_retval)) {
4311 case IXL_AQ_RC_OK:
4312 break;
4313 case IXL_AQ_RC_ENOENT:
4314 return ENOENT;
4315 case IXL_AQ_RC_EACCES:
4316 return EACCES;
4317 default:
4318 return EIO;
4319 }
4320
4321 return 0;
4322 }
4323
4324 static void
4325 ixl_set_filter_control(struct ixl_softc *sc)
4326 {
4327 uint32_t reg;
4328
4329 reg = ixl_rd_rx_csr(sc, I40E_PFQF_CTL_0);
4330
4331 CLR(reg, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK);
4332 SET(reg, I40E_HASH_LUT_SIZE_128 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT);
4333
4334 SET(reg, I40E_PFQF_CTL_0_FD_ENA_MASK);
4335 SET(reg, I40E_PFQF_CTL_0_ETYPE_ENA_MASK);
4336 SET(reg, I40E_PFQF_CTL_0_MACVLAN_ENA_MASK);
4337
4338 ixl_wr_rx_csr(sc, I40E_PFQF_CTL_0, reg);
4339 }
4340
4341 static inline void
4342 ixl_get_default_rss_key(uint32_t *buf, size_t len)
4343 {
4344 size_t cplen;
4345 uint8_t rss_seed[RSS_KEYSIZE];
4346
4347 rss_getkey(rss_seed);
4348 memset(buf, 0, len);
4349
4350 cplen = MIN(len, sizeof(rss_seed));
4351 memcpy(buf, rss_seed, cplen);
4352 }
4353
4354 static void
4355 ixl_set_rss_key(struct ixl_softc *sc)
4356 {
4357 uint32_t rss_seed[IXL_RSS_KEY_SIZE_REG];
4358 size_t i;
4359
4360 ixl_get_default_rss_key(rss_seed, sizeof(rss_seed));
4361
4362 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4363 ixl_wr_rx_csr(sc, I40E_PFQF_HKEY(i), rss_seed[i]);
4364 }
4365 }
4366
4367 static void
4368 ixl_set_rss_pctype(struct ixl_softc *sc)
4369 {
4370 uint64_t set_hena = 0;
4371 uint32_t hena0, hena1;
4372
4373 if (sc->sc_mac_type == I40E_MAC_X722)
4374 set_hena = IXL_RSS_HENA_DEFAULT_X722;
4375 else
4376 set_hena = IXL_RSS_HENA_DEFAULT_XL710;
4377
4378 hena0 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(0));
4379 hena1 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(1));
4380
4381 SET(hena0, set_hena);
4382 SET(hena1, set_hena >> 32);
4383
4384 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(0), hena0);
4385 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(1), hena1);
4386 }
4387
4388 static void
4389 ixl_set_rss_hlut(struct ixl_softc *sc)
4390 {
4391 unsigned int qid;
4392 uint8_t hlut_buf[512], lut_mask;
4393 uint32_t *hluts;
4394 size_t i, hluts_num;
4395
4396 lut_mask = (0x01 << sc->sc_rss_table_entry_width) - 1;
4397
4398 for (i = 0; i < sc->sc_rss_table_size; i++) {
4399 qid = i % sc->sc_nqueue_pairs;
4400 hlut_buf[i] = qid & lut_mask;
4401 }
4402
4403 hluts = (uint32_t *)hlut_buf;
4404 hluts_num = sc->sc_rss_table_size >> 2;
4405 for (i = 0; i < hluts_num; i++) {
4406 ixl_wr(sc, I40E_PFQF_HLUT(i), hluts[i]);
4407 }
4408 ixl_flush(sc);
4409 }
4410
4411 static void
4412 ixl_config_rss(struct ixl_softc *sc)
4413 {
4414
4415 KASSERT(mutex_owned(&sc->sc_cfg_lock));
4416
4417 ixl_set_rss_key(sc);
4418 ixl_set_rss_pctype(sc);
4419 ixl_set_rss_hlut(sc);
4420 }
4421
4422 static const struct ixl_phy_type *
4423 ixl_search_phy_type(uint8_t phy_type)
4424 {
4425 const struct ixl_phy_type *itype;
4426 uint64_t mask;
4427 unsigned int i;
4428
4429 if (phy_type >= 64)
4430 return NULL;
4431
4432 mask = 1ULL << phy_type;
4433
4434 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
4435 itype = &ixl_phy_type_map[i];
4436
4437 if (ISSET(itype->phy_type, mask))
4438 return itype;
4439 }
4440
4441 return NULL;
4442 }
4443
4444 static uint64_t
4445 ixl_search_link_speed(uint8_t link_speed)
4446 {
4447 const struct ixl_speed_type *type;
4448 unsigned int i;
4449
4450 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4451 type = &ixl_speed_type_map[i];
4452
4453 if (ISSET(type->dev_speed, link_speed))
4454 return type->net_speed;
4455 }
4456
4457 return 0;
4458 }
4459
4460 static int
4461 ixl_restart_an(struct ixl_softc *sc)
4462 {
4463 struct ixl_aq_desc iaq;
4464
4465 memset(&iaq, 0, sizeof(iaq));
4466 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
4467 iaq.iaq_param[0] =
4468 htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
4469
4470 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4471 aprint_error_dev(sc->sc_dev, "RESTART AN timeout\n");
4472 return -1;
4473 }
4474 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4475 aprint_error_dev(sc->sc_dev, "RESTART AN error\n");
4476 return -1;
4477 }
4478
4479 return 0;
4480 }
4481
4482 static int
4483 ixl_add_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4484 uint16_t vlan, uint16_t flags)
4485 {
4486 struct ixl_aq_desc iaq;
4487 struct ixl_aq_add_macvlan *param;
4488 struct ixl_aq_add_macvlan_elem *elem;
4489
4490 memset(&iaq, 0, sizeof(iaq));
4491 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4492 iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN);
4493 iaq.iaq_datalen = htole16(sizeof(*elem));
4494 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4495
4496 param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param;
4497 param->num_addrs = htole16(1);
4498 param->seid0 = htole16(0x8000) | sc->sc_seid;
4499 param->seid1 = 0;
4500 param->seid2 = 0;
4501
4502 elem = IXL_DMA_KVA(&sc->sc_scratch);
4503 memset(elem, 0, sizeof(*elem));
4504 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4505 elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags);
4506 elem->vlan = htole16(vlan);
4507
4508 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4509 return IXL_AQ_RC_EINVAL;
4510 }
4511
4512 switch (le16toh(iaq.iaq_retval)) {
4513 case IXL_AQ_RC_OK:
4514 break;
4515 case IXL_AQ_RC_ENOSPC:
4516 return ENOSPC;
4517 case IXL_AQ_RC_ENOENT:
4518 return ENOENT;
4519 case IXL_AQ_RC_EACCES:
4520 return EACCES;
4521 case IXL_AQ_RC_EEXIST:
4522 return EEXIST;
4523 case IXL_AQ_RC_EINVAL:
4524 return EINVAL;
4525 default:
4526 return EIO;
4527 }
4528
4529 return 0;
4530 }
4531
4532 static int
4533 ixl_remove_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4534 uint16_t vlan, uint16_t flags)
4535 {
4536 struct ixl_aq_desc iaq;
4537 struct ixl_aq_remove_macvlan *param;
4538 struct ixl_aq_remove_macvlan_elem *elem;
4539
4540 memset(&iaq, 0, sizeof(iaq));
4541 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4542 iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN);
4543 iaq.iaq_datalen = htole16(sizeof(*elem));
4544 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4545
4546 param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param;
4547 param->num_addrs = htole16(1);
4548 param->seid0 = htole16(0x8000) | sc->sc_seid;
4549 param->seid1 = 0;
4550 param->seid2 = 0;
4551
4552 elem = IXL_DMA_KVA(&sc->sc_scratch);
4553 memset(elem, 0, sizeof(*elem));
4554 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4555 elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags);
4556 elem->vlan = htole16(vlan);
4557
4558 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4559 return EINVAL;
4560 }
4561
4562 switch (le16toh(iaq.iaq_retval)) {
4563 case IXL_AQ_RC_OK:
4564 break;
4565 case IXL_AQ_RC_ENOENT:
4566 return ENOENT;
4567 case IXL_AQ_RC_EACCES:
4568 return EACCES;
4569 case IXL_AQ_RC_EINVAL:
4570 return EINVAL;
4571 default:
4572 return EIO;
4573 }
4574
4575 return 0;
4576 }
4577
4578 static int
4579 ixl_hmc(struct ixl_softc *sc)
4580 {
4581 struct {
4582 uint32_t count;
4583 uint32_t minsize;
4584 bus_size_t objsiz;
4585 bus_size_t setoff;
4586 bus_size_t setcnt;
4587 } regs[] = {
4588 {
4589 0,
4590 IXL_HMC_TXQ_MINSIZE,
4591 I40E_GLHMC_LANTXOBJSZ,
4592 I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
4593 I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
4594 },
4595 {
4596 0,
4597 IXL_HMC_RXQ_MINSIZE,
4598 I40E_GLHMC_LANRXOBJSZ,
4599 I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
4600 I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
4601 },
4602 {
4603 0,
4604 0,
4605 I40E_GLHMC_FCOEDDPOBJSZ,
4606 I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
4607 I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
4608 },
4609 {
4610 0,
4611 0,
4612 I40E_GLHMC_FCOEFOBJSZ,
4613 I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
4614 I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
4615 },
4616 };
4617 struct ixl_hmc_entry *e;
4618 uint64_t size, dva;
4619 uint8_t *kva;
4620 uint64_t *sdpage;
4621 unsigned int i;
4622 int npages, tables;
4623 uint32_t reg;
4624
4625 CTASSERT(__arraycount(regs) <= __arraycount(sc->sc_hmc_entries));
4626
4627 regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
4628 ixl_rd(sc, I40E_GLHMC_LANQMAX);
4629
4630 size = 0;
4631 for (i = 0; i < __arraycount(regs); i++) {
4632 e = &sc->sc_hmc_entries[i];
4633
4634 e->hmc_count = regs[i].count;
4635 reg = ixl_rd(sc, regs[i].objsiz);
4636 e->hmc_size = BIT_ULL(0x3F & reg);
4637 e->hmc_base = size;
4638
4639 if ((e->hmc_size * 8) < regs[i].minsize) {
4640 aprint_error_dev(sc->sc_dev,
4641 "kernel hmc entry is too big\n");
4642 return -1;
4643 }
4644
4645 size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
4646 }
4647 size = roundup(size, IXL_HMC_PGSIZE);
4648 npages = size / IXL_HMC_PGSIZE;
4649
4650 tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
4651
4652 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
4653 aprint_error_dev(sc->sc_dev,
4654 "unable to allocate hmc pd memory\n");
4655 return -1;
4656 }
4657
4658 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
4659 IXL_HMC_PGSIZE) != 0) {
4660 aprint_error_dev(sc->sc_dev,
4661 "unable to allocate hmc sd memory\n");
4662 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4663 return -1;
4664 }
4665
4666 kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
4667 memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
4668
4669 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
4670 0, IXL_DMA_LEN(&sc->sc_hmc_pd),
4671 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4672
4673 dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
4674 sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
4675 memset(sdpage, 0, IXL_DMA_LEN(&sc->sc_hmc_sd));
4676
4677 for (i = 0; (int)i < npages; i++) {
4678 *sdpage = htole64(dva | IXL_HMC_PDVALID);
4679 sdpage++;
4680
4681 dva += IXL_HMC_PGSIZE;
4682 }
4683
4684 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
4685 0, IXL_DMA_LEN(&sc->sc_hmc_sd),
4686 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4687
4688 dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
4689 for (i = 0; (int)i < tables; i++) {
4690 uint32_t count;
4691
4692 KASSERT(npages >= 0);
4693
4694 count = ((unsigned int)npages > IXL_HMC_PGS) ?
4695 IXL_HMC_PGS : (unsigned int)npages;
4696
4697 ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
4698 ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
4699 (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
4700 (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
4701 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
4702 ixl_wr(sc, I40E_PFHMC_SDCMD,
4703 (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
4704
4705 npages -= IXL_HMC_PGS;
4706 dva += IXL_HMC_PGSIZE;
4707 }
4708
4709 for (i = 0; i < __arraycount(regs); i++) {
4710 e = &sc->sc_hmc_entries[i];
4711
4712 ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
4713 ixl_wr(sc, regs[i].setcnt, e->hmc_count);
4714 }
4715
4716 return 0;
4717 }
4718
4719 static void
4720 ixl_hmc_free(struct ixl_softc *sc)
4721 {
4722 ixl_dmamem_free(sc, &sc->sc_hmc_sd);
4723 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4724 }
4725
4726 static void
4727 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
4728 unsigned int npacking)
4729 {
4730 uint8_t *dst = d;
4731 const uint8_t *src = s;
4732 unsigned int i;
4733
4734 for (i = 0; i < npacking; i++) {
4735 const struct ixl_hmc_pack *pack = &packing[i];
4736 unsigned int offset = pack->lsb / 8;
4737 unsigned int align = pack->lsb % 8;
4738 const uint8_t *in = src + pack->offset;
4739 uint8_t *out = dst + offset;
4740 int width = pack->width;
4741 unsigned int inbits = 0;
4742
4743 if (align) {
4744 inbits = (*in++) << align;
4745 *out++ |= (inbits & 0xff);
4746 inbits >>= 8;
4747
4748 width -= 8 - align;
4749 }
4750
4751 while (width >= 8) {
4752 inbits |= (*in++) << align;
4753 *out++ = (inbits & 0xff);
4754 inbits >>= 8;
4755
4756 width -= 8;
4757 }
4758
4759 if (width > 0) {
4760 inbits |= (*in) << align;
4761 *out |= (inbits & ((1 << width) - 1));
4762 }
4763 }
4764 }
4765
4766 static struct ixl_aq_buf *
4767 ixl_aqb_alloc(struct ixl_softc *sc)
4768 {
4769 struct ixl_aq_buf *aqb;
4770
4771 aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK);
4772 if (aqb == NULL)
4773 return NULL;
4774
4775 aqb->aqb_size = IXL_AQ_BUFLEN;
4776
4777 if (bus_dmamap_create(sc->sc_dmat, aqb->aqb_size, 1,
4778 aqb->aqb_size, 0,
4779 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0)
4780 goto free;
4781 if (bus_dmamem_alloc(sc->sc_dmat, aqb->aqb_size,
4782 IXL_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs,
4783 BUS_DMA_WAITOK) != 0)
4784 goto destroy;
4785 if (bus_dmamem_map(sc->sc_dmat, &aqb->aqb_seg, aqb->aqb_nsegs,
4786 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0)
4787 goto dma_free;
4788 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
4789 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0)
4790 goto unmap;
4791
4792 return aqb;
4793 unmap:
4794 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
4795 dma_free:
4796 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
4797 destroy:
4798 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4799 free:
4800 free(aqb, M_DEVBUF);
4801
4802 return NULL;
4803 }
4804
4805 static void
4806 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
4807 {
4808 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
4809 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
4810 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
4811 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4812 free(aqb, M_DEVBUF);
4813 }
4814
4815 static int
4816 ixl_arq_fill(struct ixl_softc *sc)
4817 {
4818 struct ixl_aq_buf *aqb;
4819 struct ixl_aq_desc *arq, *iaq;
4820 unsigned int prod = sc->sc_arq_prod;
4821 unsigned int n;
4822 int post = 0;
4823
4824 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
4825 IXL_AQ_NUM);
4826 arq = IXL_DMA_KVA(&sc->sc_arq);
4827
4828 if (__predict_false(n <= 0))
4829 return 0;
4830
4831 do {
4832 aqb = sc->sc_arq_live[prod];
4833 iaq = &arq[prod];
4834
4835 if (aqb == NULL) {
4836 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
4837 if (aqb != NULL) {
4838 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
4839 ixl_aq_buf, aqb_entry);
4840 } else if ((aqb = ixl_aqb_alloc(sc)) == NULL) {
4841 break;
4842 }
4843
4844 sc->sc_arq_live[prod] = aqb;
4845 memset(aqb->aqb_data, 0, aqb->aqb_size);
4846
4847 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
4848 aqb->aqb_size, BUS_DMASYNC_PREREAD);
4849
4850 iaq->iaq_flags = htole16(IXL_AQ_BUF |
4851 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ?
4852 IXL_AQ_LB : 0));
4853 iaq->iaq_opcode = 0;
4854 iaq->iaq_datalen = htole16(aqb->aqb_size);
4855 iaq->iaq_retval = 0;
4856 iaq->iaq_cookie = 0;
4857 iaq->iaq_param[0] = 0;
4858 iaq->iaq_param[1] = 0;
4859 ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
4860 }
4861
4862 prod++;
4863 prod &= IXL_AQ_MASK;
4864
4865 post = 1;
4866
4867 } while (--n);
4868
4869 if (post) {
4870 sc->sc_arq_prod = prod;
4871 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
4872 }
4873
4874 return post;
4875 }
4876
4877 static void
4878 ixl_arq_unfill(struct ixl_softc *sc)
4879 {
4880 struct ixl_aq_buf *aqb;
4881 unsigned int i;
4882
4883 for (i = 0; i < __arraycount(sc->sc_arq_live); i++) {
4884 aqb = sc->sc_arq_live[i];
4885 if (aqb == NULL)
4886 continue;
4887
4888 sc->sc_arq_live[i] = NULL;
4889 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
4890 BUS_DMASYNC_POSTREAD);
4891 ixl_aqb_free(sc, aqb);
4892 }
4893
4894 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle)) != NULL) {
4895 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
4896 ixl_aq_buf, aqb_entry);
4897 ixl_aqb_free(sc, aqb);
4898 }
4899 }
4900
4901 static void
4902 ixl_clear_hw(struct ixl_softc *sc)
4903 {
4904 uint32_t num_queues, base_queue;
4905 uint32_t num_pf_int;
4906 uint32_t num_vf_int;
4907 uint32_t num_vfs;
4908 uint32_t i, j;
4909 uint32_t val;
4910 uint32_t eol = 0x7ff;
4911
4912 /* get number of interrupts, queues, and vfs */
4913 val = ixl_rd(sc, I40E_GLPCI_CNF2);
4914 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
4915 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
4916 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
4917 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
4918
4919 val = ixl_rd(sc, I40E_PFLAN_QALLOC);
4920 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
4921 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
4922 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
4923 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
4924 if (val & I40E_PFLAN_QALLOC_VALID_MASK)
4925 num_queues = (j - base_queue) + 1;
4926 else
4927 num_queues = 0;
4928
4929 val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
4930 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
4931 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
4932 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
4933 I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
4934 if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
4935 num_vfs = (j - i) + 1;
4936 else
4937 num_vfs = 0;
4938
4939 /* stop all the interrupts */
4940 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
4941 ixl_flush(sc);
4942 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4943 for (i = 0; i < num_pf_int - 2; i++)
4944 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
4945 ixl_flush(sc);
4946
4947 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
4948 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4949 ixl_wr(sc, I40E_PFINT_LNKLST0, val);
4950 for (i = 0; i < num_pf_int - 2; i++)
4951 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
4952 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4953 for (i = 0; i < num_vfs; i++)
4954 ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
4955 for (i = 0; i < num_vf_int - 2; i++)
4956 ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
4957
4958 /* warn the HW of the coming Tx disables */
4959 for (i = 0; i < num_queues; i++) {
4960 uint32_t abs_queue_idx = base_queue + i;
4961 uint32_t reg_block = 0;
4962
4963 if (abs_queue_idx >= 128) {
4964 reg_block = abs_queue_idx / 128;
4965 abs_queue_idx %= 128;
4966 }
4967
4968 val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
4969 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
4970 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
4971 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
4972
4973 ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
4974 }
4975 delaymsec(400);
4976
4977 /* stop all the queues */
4978 for (i = 0; i < num_queues; i++) {
4979 ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
4980 ixl_wr(sc, I40E_QTX_ENA(i), 0);
4981 ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
4982 ixl_wr(sc, I40E_QRX_ENA(i), 0);
4983 }
4984
4985 /* short wait for all queue disables to settle */
4986 delaymsec(50);
4987 }
4988
4989 static int
4990 ixl_pf_reset(struct ixl_softc *sc)
4991 {
4992 uint32_t cnt = 0;
4993 uint32_t cnt1 = 0;
4994 uint32_t reg = 0, reg0 = 0;
4995 uint32_t grst_del;
4996
4997 /*
4998 * Poll for Global Reset steady state in case of recent GRST.
4999 * The grst delay value is in 100ms units, and we'll wait a
5000 * couple counts longer to be sure we don't just miss the end.
5001 */
5002 grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
5003 grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
5004 grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
5005
5006 grst_del = grst_del * 20;
5007
5008 for (cnt = 0; cnt < grst_del; cnt++) {
5009 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
5010 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
5011 break;
5012 delaymsec(100);
5013 }
5014 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5015 aprint_error(", Global reset polling failed to complete\n");
5016 return -1;
5017 }
5018
5019 /* Now Wait for the FW to be ready */
5020 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
5021 reg = ixl_rd(sc, I40E_GLNVM_ULD);
5022 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5023 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
5024 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5025 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
5026 break;
5027
5028 delaymsec(10);
5029 }
5030 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5031 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
5032 aprint_error(", wait for FW Reset complete timed out "
5033 "(I40E_GLNVM_ULD = 0x%x)\n", reg);
5034 return -1;
5035 }
5036
5037 /*
5038 * If there was a Global Reset in progress when we got here,
5039 * we don't need to do the PF Reset
5040 */
5041 if (cnt == 0) {
5042 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5043 ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
5044 for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
5045 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5046 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
5047 break;
5048 delaymsec(1);
5049
5050 reg0 = ixl_rd(sc, I40E_GLGEN_RSTAT);
5051 if (reg0 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5052 aprint_error(", Core reset upcoming."
5053 " Skipping PF reset reset request\n");
5054 return -1;
5055 }
5056 }
5057 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
5058 aprint_error(", PF reset polling failed to complete"
5059 "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
5060 return -1;
5061 }
5062 }
5063
5064 return 0;
5065 }
5066
5067 static int
5068 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
5069 bus_size_t size, bus_size_t align)
5070 {
5071 ixm->ixm_size = size;
5072
5073 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
5074 ixm->ixm_size, 0,
5075 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
5076 &ixm->ixm_map) != 0)
5077 return 1;
5078 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
5079 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
5080 BUS_DMA_WAITOK) != 0)
5081 goto destroy;
5082 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
5083 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
5084 goto free;
5085 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
5086 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
5087 goto unmap;
5088
5089 memset(ixm->ixm_kva, 0, ixm->ixm_size);
5090
5091 return 0;
5092 unmap:
5093 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5094 free:
5095 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5096 destroy:
5097 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5098 return 1;
5099 }
5100
5101 static void
5102 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
5103 {
5104 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
5105 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5106 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5107 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5108 }
5109
5110 static int
5111 ixl_setup_vlan_hwfilter(struct ixl_softc *sc)
5112 {
5113 struct ethercom *ec = &sc->sc_ec;
5114 struct vlanid_list *vlanidp;
5115 int rv;
5116
5117 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5118 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5119 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5120 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5121
5122 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5123 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5124 if (rv != 0)
5125 return rv;
5126 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5127 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5128 if (rv != 0)
5129 return rv;
5130
5131 ETHER_LOCK(ec);
5132 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5133 rv = ixl_add_macvlan(sc, sc->sc_enaddr,
5134 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5135 if (rv != 0)
5136 break;
5137 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
5138 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5139 if (rv != 0)
5140 break;
5141 }
5142 ETHER_UNLOCK(ec);
5143
5144 return rv;
5145 }
5146
5147 static void
5148 ixl_teardown_vlan_hwfilter(struct ixl_softc *sc)
5149 {
5150 struct vlanid_list *vlanidp;
5151 struct ethercom *ec = &sc->sc_ec;
5152
5153 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5154 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5155 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5156 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5157
5158 ETHER_LOCK(ec);
5159 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5160 ixl_remove_macvlan(sc, sc->sc_enaddr,
5161 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5162 ixl_remove_macvlan(sc, etherbroadcastaddr,
5163 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5164 }
5165 ETHER_UNLOCK(ec);
5166
5167 ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5168 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5169 ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5170 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5171 }
5172
5173 static int
5174 ixl_update_macvlan(struct ixl_softc *sc)
5175 {
5176 int rv = 0;
5177 int next_ec_capenable = sc->sc_ec.ec_capenable;
5178
5179 if (ISSET(next_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
5180 rv = ixl_setup_vlan_hwfilter(sc);
5181 if (rv != 0)
5182 ixl_teardown_vlan_hwfilter(sc);
5183 } else {
5184 ixl_teardown_vlan_hwfilter(sc);
5185 }
5186
5187 return rv;
5188 }
5189
5190 static int
5191 ixl_ifflags_cb(struct ethercom *ec)
5192 {
5193 struct ifnet *ifp = &ec->ec_if;
5194 struct ixl_softc *sc = ifp->if_softc;
5195 int rv, change;
5196
5197 mutex_enter(&sc->sc_cfg_lock);
5198
5199 change = ec->ec_capenable ^ sc->sc_cur_ec_capenable;
5200
5201 if (ISSET(change, ETHERCAP_VLAN_HWTAGGING)) {
5202 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
5203 rv = ENETRESET;
5204 goto out;
5205 }
5206
5207 if (ISSET(change, ETHERCAP_VLAN_HWFILTER)) {
5208 rv = ixl_update_macvlan(sc);
5209 if (rv == 0) {
5210 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
5211 } else {
5212 CLR(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER);
5213 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
5214 }
5215 }
5216
5217 rv = ixl_iff(sc);
5218 out:
5219 mutex_exit(&sc->sc_cfg_lock);
5220
5221 return rv;
5222 }
5223
5224 static int
5225 ixl_set_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
5226 {
5227 const struct ixl_aq_link_status *status;
5228 const struct ixl_phy_type *itype;
5229
5230 uint64_t ifm_active = IFM_ETHER;
5231 uint64_t ifm_status = IFM_AVALID;
5232 int link_state = LINK_STATE_DOWN;
5233 uint64_t baudrate = 0;
5234
5235 status = (const struct ixl_aq_link_status *)iaq->iaq_param;
5236 if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION))
5237 goto done;
5238
5239 ifm_active |= IFM_FDX;
5240 ifm_status |= IFM_ACTIVE;
5241 link_state = LINK_STATE_UP;
5242
5243 itype = ixl_search_phy_type(status->phy_type);
5244 if (itype != NULL)
5245 ifm_active |= itype->ifm_type;
5246
5247 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
5248 ifm_active |= IFM_ETH_TXPAUSE;
5249 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
5250 ifm_active |= IFM_ETH_RXPAUSE;
5251
5252 baudrate = ixl_search_link_speed(status->link_speed);
5253
5254 done:
5255 /* NET_ASSERT_LOCKED() except during attach */
5256 sc->sc_media_active = ifm_active;
5257 sc->sc_media_status = ifm_status;
5258
5259 sc->sc_ec.ec_if.if_baudrate = baudrate;
5260
5261 return link_state;
5262 }
5263
5264 static int
5265 ixl_establish_intx(struct ixl_softc *sc)
5266 {
5267 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5268 pci_intr_handle_t *intr;
5269 char xnamebuf[32];
5270 char intrbuf[PCI_INTRSTR_LEN];
5271 char const *intrstr;
5272
5273 KASSERT(sc->sc_nintrs == 1);
5274
5275 intr = &sc->sc_ihp[0];
5276
5277 intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
5278 snprintf(xnamebuf, sizeof(xnamebuf), "%s:legacy",
5279 device_xname(sc->sc_dev));
5280
5281 sc->sc_ihs[0] = pci_intr_establish_xname(pc, *intr, IPL_NET, ixl_intr,
5282 sc, xnamebuf);
5283
5284 if (sc->sc_ihs[0] == NULL) {
5285 aprint_error_dev(sc->sc_dev,
5286 "unable to establish interrupt at %s\n", intrstr);
5287 return -1;
5288 }
5289
5290 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5291 return 0;
5292 }
5293
5294 static int
5295 ixl_establish_msix(struct ixl_softc *sc)
5296 {
5297 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5298 kcpuset_t *affinity;
5299 unsigned int vector = 0;
5300 unsigned int i;
5301 int affinity_to, r;
5302 char xnamebuf[32];
5303 char intrbuf[PCI_INTRSTR_LEN];
5304 char const *intrstr;
5305
5306 kcpuset_create(&affinity, false);
5307
5308 /* the "other" intr is mapped to vector 0 */
5309 vector = 0;
5310 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5311 intrbuf, sizeof(intrbuf));
5312 snprintf(xnamebuf, sizeof(xnamebuf), "%s others",
5313 device_xname(sc->sc_dev));
5314 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5315 sc->sc_ihp[vector], IPL_NET, ixl_other_intr,
5316 sc, xnamebuf);
5317 if (sc->sc_ihs[vector] == NULL) {
5318 aprint_error_dev(sc->sc_dev,
5319 "unable to establish interrupt at %s\n", intrstr);
5320 goto fail;
5321 }
5322
5323 aprint_normal_dev(sc->sc_dev, "other interrupt at %s", intrstr);
5324
5325 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0;
5326 affinity_to = (affinity_to + sc->sc_nqueue_pairs_max) % ncpu;
5327
5328 kcpuset_zero(affinity);
5329 kcpuset_set(affinity, affinity_to);
5330 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5331 if (r == 0) {
5332 aprint_normal(", affinity to %u", affinity_to);
5333 }
5334 aprint_normal("\n");
5335 vector++;
5336
5337 sc->sc_msix_vector_queue = vector;
5338 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0;
5339
5340 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5341 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5342 intrbuf, sizeof(intrbuf));
5343 snprintf(xnamebuf, sizeof(xnamebuf), "%s TXRX%d",
5344 device_xname(sc->sc_dev), i);
5345
5346 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5347 sc->sc_ihp[vector], IPL_NET, ixl_queue_intr,
5348 (void *)&sc->sc_qps[i], xnamebuf);
5349
5350 if (sc->sc_ihs[vector] == NULL) {
5351 aprint_error_dev(sc->sc_dev,
5352 "unable to establish interrupt at %s\n", intrstr);
5353 goto fail;
5354 }
5355
5356 aprint_normal_dev(sc->sc_dev,
5357 "for TXRX%d interrupt at %s",i , intrstr);
5358
5359 kcpuset_zero(affinity);
5360 kcpuset_set(affinity, affinity_to);
5361 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5362 if (r == 0) {
5363 aprint_normal(", affinity to %u", affinity_to);
5364 affinity_to = (affinity_to + 1) % ncpu;
5365 }
5366 aprint_normal("\n");
5367 vector++;
5368 }
5369
5370 kcpuset_destroy(affinity);
5371
5372 return 0;
5373 fail:
5374 for (i = 0; i < vector; i++) {
5375 pci_intr_disestablish(pc, sc->sc_ihs[i]);
5376 }
5377
5378 sc->sc_msix_vector_queue = 0;
5379 sc->sc_msix_vector_queue = 0;
5380 kcpuset_destroy(affinity);
5381
5382 return -1;
5383 }
5384
5385 static void
5386 ixl_config_queue_intr(struct ixl_softc *sc)
5387 {
5388 unsigned int i, vector;
5389
5390 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) {
5391 vector = sc->sc_msix_vector_queue;
5392 } else {
5393 vector = I40E_INTR_NOTX_INTR;
5394
5395 ixl_wr(sc, I40E_PFINT_LNKLST0,
5396 (I40E_INTR_NOTX_QUEUE <<
5397 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5398 (I40E_QUEUE_TYPE_RX <<
5399 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5400 }
5401
5402 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
5403 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), 0);
5404 ixl_flush(sc);
5405
5406 ixl_wr(sc, I40E_PFINT_LNKLSTN(i),
5407 ((i) << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
5408 (I40E_QUEUE_TYPE_RX <<
5409 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5410
5411 ixl_wr(sc, I40E_QINT_RQCTL(i),
5412 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5413 (I40E_ITR_INDEX_RX <<
5414 I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
5415 (I40E_INTR_NOTX_RX_QUEUE <<
5416 I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
5417 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5418 (I40E_QUEUE_TYPE_TX <<
5419 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5420 I40E_QINT_RQCTL_CAUSE_ENA_MASK);
5421
5422 ixl_wr(sc, I40E_QINT_TQCTL(i),
5423 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
5424 (I40E_ITR_INDEX_TX <<
5425 I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
5426 (I40E_INTR_NOTX_TX_QUEUE <<
5427 I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
5428 (I40E_QUEUE_TYPE_EOL <<
5429 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
5430 (I40E_QUEUE_TYPE_RX <<
5431 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
5432 I40E_QINT_TQCTL_CAUSE_ENA_MASK);
5433
5434 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX)
5435 vector++;
5436 }
5437 ixl_flush(sc);
5438
5439 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_RX), 0x7a);
5440 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_TX), 0x7a);
5441 ixl_flush(sc);
5442 }
5443
5444 static void
5445 ixl_config_other_intr(struct ixl_softc *sc)
5446 {
5447 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5448 (void)ixl_rd(sc, I40E_PFINT_ICR0);
5449
5450 ixl_wr(sc, I40E_PFINT_ICR0_ENA,
5451 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
5452 I40E_PFINT_ICR0_ENA_GRST_MASK |
5453 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
5454 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
5455 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
5456 I40E_PFINT_ICR0_ENA_VFLR_MASK |
5457 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
5458 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
5459 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK);
5460
5461 ixl_wr(sc, I40E_PFINT_LNKLST0, 0x7FF);
5462 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_OTHER), 0);
5463 ixl_wr(sc, I40E_PFINT_STAT_CTL0,
5464 (I40E_ITR_INDEX_OTHER <<
5465 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT));
5466 ixl_flush(sc);
5467 }
5468
5469 static int
5470 ixl_setup_interrupts(struct ixl_softc *sc)
5471 {
5472 struct pci_attach_args *pa = &sc->sc_pa;
5473 pci_intr_type_t max_type, intr_type;
5474 int counts[PCI_INTR_TYPE_SIZE];
5475 int error;
5476 unsigned int i;
5477 bool retry;
5478
5479 memset(counts, 0, sizeof(counts));
5480 max_type = PCI_INTR_TYPE_MSIX;
5481 /* QPs + other interrupt */
5482 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueue_pairs_max + 1;
5483 counts[PCI_INTR_TYPE_INTX] = 1;
5484
5485 if (ixl_param_nomsix)
5486 counts[PCI_INTR_TYPE_MSIX] = 0;
5487
5488 do {
5489 retry = false;
5490 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, max_type);
5491 if (error != 0) {
5492 aprint_error_dev(sc->sc_dev,
5493 "couldn't map interrupt\n");
5494 break;
5495 }
5496 for (i = 0; i < sc->sc_nintrs; i++) {
5497 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[i],
5498 PCI_INTR_MPSAFE, true);
5499 }
5500
5501 intr_type = pci_intr_type(pa->pa_pc, sc->sc_ihp[0]);
5502 sc->sc_nintrs = counts[intr_type];
5503 KASSERT(sc->sc_nintrs > 0);
5504
5505 sc->sc_ihs = kmem_alloc(sizeof(sc->sc_ihs[0]) * sc->sc_nintrs,
5506 KM_SLEEP);
5507
5508 if (intr_type == PCI_INTR_TYPE_MSIX) {
5509 error = ixl_establish_msix(sc);
5510 if (error) {
5511 counts[PCI_INTR_TYPE_MSIX] = 0;
5512 retry = true;
5513 }
5514 } else if (intr_type == PCI_INTR_TYPE_INTX) {
5515 error = ixl_establish_intx(sc);
5516 } else {
5517 error = -1;
5518 }
5519
5520 if (error) {
5521 kmem_free(sc->sc_ihs,
5522 sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5523 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5524 } else {
5525 sc->sc_intrtype = intr_type;
5526 }
5527 } while (retry);
5528
5529 return error;
5530 }
5531
5532 static void
5533 ixl_teardown_interrupts(struct ixl_softc *sc)
5534 {
5535 struct pci_attach_args *pa = &sc->sc_pa;
5536 unsigned int i;
5537
5538 for (i = 0; i < sc->sc_nintrs; i++) {
5539 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]);
5540 }
5541
5542 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5543
5544 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5545 sc->sc_ihs = NULL;
5546 sc->sc_nintrs = 0;
5547 }
5548
5549 static int
5550 ixl_setup_stats(struct ixl_softc *sc)
5551 {
5552 struct ixl_queue_pair *qp;
5553 struct ixl_tx_ring *txr;
5554 struct ixl_rx_ring *rxr;
5555 struct ixl_stats_counters *isc;
5556 unsigned int i;
5557
5558 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5559 qp = &sc->sc_qps[i];
5560 txr = qp->qp_txr;
5561 rxr = qp->qp_rxr;
5562
5563 evcnt_attach_dynamic(&txr->txr_defragged, EVCNT_TYPE_MISC,
5564 NULL, qp->qp_name, "m_defrag successed");
5565 evcnt_attach_dynamic(&txr->txr_defrag_failed, EVCNT_TYPE_MISC,
5566 NULL, qp->qp_name, "m_defrag_failed");
5567 evcnt_attach_dynamic(&txr->txr_pcqdrop, EVCNT_TYPE_MISC,
5568 NULL, qp->qp_name, "Dropped in pcq");
5569 evcnt_attach_dynamic(&txr->txr_transmitdef, EVCNT_TYPE_MISC,
5570 NULL, qp->qp_name, "Deferred transmit");
5571 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR,
5572 NULL, qp->qp_name, "Interrupt on queue");
5573 evcnt_attach_dynamic(&txr->txr_defer, EVCNT_TYPE_MISC,
5574 NULL, qp->qp_name, "Handled queue in softint/workqueue");
5575
5576 evcnt_attach_dynamic(&rxr->rxr_mgethdr_failed, EVCNT_TYPE_MISC,
5577 NULL, qp->qp_name, "MGETHDR failed");
5578 evcnt_attach_dynamic(&rxr->rxr_mgetcl_failed, EVCNT_TYPE_MISC,
5579 NULL, qp->qp_name, "MCLGET failed");
5580 evcnt_attach_dynamic(&rxr->rxr_mbuf_load_failed,
5581 EVCNT_TYPE_MISC, NULL, qp->qp_name,
5582 "bus_dmamap_load_mbuf failed");
5583 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR,
5584 NULL, qp->qp_name, "Interrupt on queue");
5585 evcnt_attach_dynamic(&rxr->rxr_defer, EVCNT_TYPE_MISC,
5586 NULL, qp->qp_name, "Handled queue in softint/workqueue");
5587 }
5588
5589 evcnt_attach_dynamic(&sc->sc_event_atq, EVCNT_TYPE_INTR,
5590 NULL, device_xname(sc->sc_dev), "Interrupt for other events");
5591 evcnt_attach_dynamic(&sc->sc_event_link, EVCNT_TYPE_MISC,
5592 NULL, device_xname(sc->sc_dev), "Link status event");
5593 evcnt_attach_dynamic(&sc->sc_event_ecc_err, EVCNT_TYPE_MISC,
5594 NULL, device_xname(sc->sc_dev), "ECC error");
5595 evcnt_attach_dynamic(&sc->sc_event_pci_exception, EVCNT_TYPE_MISC,
5596 NULL, device_xname(sc->sc_dev), "PCI exception");
5597 evcnt_attach_dynamic(&sc->sc_event_crit_err, EVCNT_TYPE_MISC,
5598 NULL, device_xname(sc->sc_dev), "Critical error");
5599
5600 isc = &sc->sc_stats_counters;
5601 evcnt_attach_dynamic(&isc->isc_crc_errors, EVCNT_TYPE_MISC,
5602 NULL, device_xname(sc->sc_dev), "CRC errors");
5603 evcnt_attach_dynamic(&isc->isc_illegal_bytes, EVCNT_TYPE_MISC,
5604 NULL, device_xname(sc->sc_dev), "Illegal bytes");
5605 evcnt_attach_dynamic(&isc->isc_mac_local_faults, EVCNT_TYPE_MISC,
5606 NULL, device_xname(sc->sc_dev), "Mac local faults");
5607 evcnt_attach_dynamic(&isc->isc_mac_remote_faults, EVCNT_TYPE_MISC,
5608 NULL, device_xname(sc->sc_dev), "Mac remote faults");
5609 evcnt_attach_dynamic(&isc->isc_link_xon_rx, EVCNT_TYPE_MISC,
5610 NULL, device_xname(sc->sc_dev), "Rx xon");
5611 evcnt_attach_dynamic(&isc->isc_link_xon_tx, EVCNT_TYPE_MISC,
5612 NULL, device_xname(sc->sc_dev), "Tx xon");
5613 evcnt_attach_dynamic(&isc->isc_link_xoff_rx, EVCNT_TYPE_MISC,
5614 NULL, device_xname(sc->sc_dev), "Rx xoff");
5615 evcnt_attach_dynamic(&isc->isc_link_xoff_tx, EVCNT_TYPE_MISC,
5616 NULL, device_xname(sc->sc_dev), "Tx xoff");
5617 evcnt_attach_dynamic(&isc->isc_rx_fragments, EVCNT_TYPE_MISC,
5618 NULL, device_xname(sc->sc_dev), "Rx fragments");
5619 evcnt_attach_dynamic(&isc->isc_rx_jabber, EVCNT_TYPE_MISC,
5620 NULL, device_xname(sc->sc_dev), "Rx jabber");
5621
5622 evcnt_attach_dynamic(&isc->isc_rx_size_64, EVCNT_TYPE_MISC,
5623 NULL, device_xname(sc->sc_dev), "Rx size 64");
5624 evcnt_attach_dynamic(&isc->isc_rx_size_127, EVCNT_TYPE_MISC,
5625 NULL, device_xname(sc->sc_dev), "Rx size 127");
5626 evcnt_attach_dynamic(&isc->isc_rx_size_255, EVCNT_TYPE_MISC,
5627 NULL, device_xname(sc->sc_dev), "Rx size 255");
5628 evcnt_attach_dynamic(&isc->isc_rx_size_511, EVCNT_TYPE_MISC,
5629 NULL, device_xname(sc->sc_dev), "Rx size 511");
5630 evcnt_attach_dynamic(&isc->isc_rx_size_1023, EVCNT_TYPE_MISC,
5631 NULL, device_xname(sc->sc_dev), "Rx size 1023");
5632 evcnt_attach_dynamic(&isc->isc_rx_size_1522, EVCNT_TYPE_MISC,
5633 NULL, device_xname(sc->sc_dev), "Rx size 1522");
5634 evcnt_attach_dynamic(&isc->isc_rx_size_big, EVCNT_TYPE_MISC,
5635 NULL, device_xname(sc->sc_dev), "Rx jumbo packets");
5636 evcnt_attach_dynamic(&isc->isc_rx_undersize, EVCNT_TYPE_MISC,
5637 NULL, device_xname(sc->sc_dev), "Rx under size");
5638 evcnt_attach_dynamic(&isc->isc_rx_oversize, EVCNT_TYPE_MISC,
5639 NULL, device_xname(sc->sc_dev), "Rx over size");
5640
5641 evcnt_attach_dynamic(&isc->isc_rx_bytes, EVCNT_TYPE_MISC,
5642 NULL, device_xname(sc->sc_dev), "Rx bytes / port");
5643 evcnt_attach_dynamic(&isc->isc_rx_discards, EVCNT_TYPE_MISC,
5644 NULL, device_xname(sc->sc_dev), "Rx discards / port");
5645 evcnt_attach_dynamic(&isc->isc_rx_unicast, EVCNT_TYPE_MISC,
5646 NULL, device_xname(sc->sc_dev), "Rx unicast / port");
5647 evcnt_attach_dynamic(&isc->isc_rx_multicast, EVCNT_TYPE_MISC,
5648 NULL, device_xname(sc->sc_dev), "Rx multicast / port");
5649 evcnt_attach_dynamic(&isc->isc_rx_broadcast, EVCNT_TYPE_MISC,
5650 NULL, device_xname(sc->sc_dev), "Rx broadcast / port");
5651
5652 evcnt_attach_dynamic(&isc->isc_vsi_rx_bytes, EVCNT_TYPE_MISC,
5653 NULL, device_xname(sc->sc_dev), "Rx bytes / vsi");
5654 evcnt_attach_dynamic(&isc->isc_vsi_rx_discards, EVCNT_TYPE_MISC,
5655 NULL, device_xname(sc->sc_dev), "Rx discard / vsi");
5656 evcnt_attach_dynamic(&isc->isc_vsi_rx_unicast, EVCNT_TYPE_MISC,
5657 NULL, device_xname(sc->sc_dev), "Rx unicast / vsi");
5658 evcnt_attach_dynamic(&isc->isc_vsi_rx_multicast, EVCNT_TYPE_MISC,
5659 NULL, device_xname(sc->sc_dev), "Rx multicast / vsi");
5660 evcnt_attach_dynamic(&isc->isc_vsi_rx_broadcast, EVCNT_TYPE_MISC,
5661 NULL, device_xname(sc->sc_dev), "Rx broadcast / vsi");
5662
5663 evcnt_attach_dynamic(&isc->isc_tx_size_64, EVCNT_TYPE_MISC,
5664 NULL, device_xname(sc->sc_dev), "Tx size 64");
5665 evcnt_attach_dynamic(&isc->isc_tx_size_127, EVCNT_TYPE_MISC,
5666 NULL, device_xname(sc->sc_dev), "Tx size 127");
5667 evcnt_attach_dynamic(&isc->isc_tx_size_255, EVCNT_TYPE_MISC,
5668 NULL, device_xname(sc->sc_dev), "Tx size 255");
5669 evcnt_attach_dynamic(&isc->isc_tx_size_511, EVCNT_TYPE_MISC,
5670 NULL, device_xname(sc->sc_dev), "Tx size 511");
5671 evcnt_attach_dynamic(&isc->isc_tx_size_1023, EVCNT_TYPE_MISC,
5672 NULL, device_xname(sc->sc_dev), "Tx size 1023");
5673 evcnt_attach_dynamic(&isc->isc_tx_size_1522, EVCNT_TYPE_MISC,
5674 NULL, device_xname(sc->sc_dev), "Tx size 1522");
5675 evcnt_attach_dynamic(&isc->isc_tx_size_big, EVCNT_TYPE_MISC,
5676 NULL, device_xname(sc->sc_dev), "Tx jumbo packets");
5677
5678 evcnt_attach_dynamic(&isc->isc_tx_bytes, EVCNT_TYPE_MISC,
5679 NULL, device_xname(sc->sc_dev), "Tx bytes / port");
5680 evcnt_attach_dynamic(&isc->isc_tx_dropped_link_down, EVCNT_TYPE_MISC,
5681 NULL, device_xname(sc->sc_dev),
5682 "Tx dropped due to link down / port");
5683 evcnt_attach_dynamic(&isc->isc_tx_unicast, EVCNT_TYPE_MISC,
5684 NULL, device_xname(sc->sc_dev), "Tx unicast / port");
5685 evcnt_attach_dynamic(&isc->isc_tx_multicast, EVCNT_TYPE_MISC,
5686 NULL, device_xname(sc->sc_dev), "Tx multicast / port");
5687 evcnt_attach_dynamic(&isc->isc_tx_broadcast, EVCNT_TYPE_MISC,
5688 NULL, device_xname(sc->sc_dev), "Tx broadcast / port");
5689
5690 evcnt_attach_dynamic(&isc->isc_vsi_tx_bytes, EVCNT_TYPE_MISC,
5691 NULL, device_xname(sc->sc_dev), "Tx bytes / vsi");
5692 evcnt_attach_dynamic(&isc->isc_vsi_tx_errors, EVCNT_TYPE_MISC,
5693 NULL, device_xname(sc->sc_dev), "Tx errors / vsi");
5694 evcnt_attach_dynamic(&isc->isc_vsi_tx_unicast, EVCNT_TYPE_MISC,
5695 NULL, device_xname(sc->sc_dev), "Tx unicast / vsi");
5696 evcnt_attach_dynamic(&isc->isc_vsi_tx_multicast, EVCNT_TYPE_MISC,
5697 NULL, device_xname(sc->sc_dev), "Tx multicast / vsi");
5698 evcnt_attach_dynamic(&isc->isc_vsi_tx_broadcast, EVCNT_TYPE_MISC,
5699 NULL, device_xname(sc->sc_dev), "Tx broadcast / vsi");
5700
5701 sc->sc_stats_intval = ixl_param_stats_interval;
5702 callout_init(&sc->sc_stats_callout, CALLOUT_MPSAFE);
5703 callout_setfunc(&sc->sc_stats_callout, ixl_stats_callout, sc);
5704 ixl_work_set(&sc->sc_stats_task, ixl_stats_update, sc);
5705
5706 return 0;
5707 }
5708
5709 static void
5710 ixl_teardown_stats(struct ixl_softc *sc)
5711 {
5712 struct ixl_tx_ring *txr;
5713 struct ixl_rx_ring *rxr;
5714 struct ixl_stats_counters *isc;
5715 unsigned int i;
5716
5717 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5718 txr = sc->sc_qps[i].qp_txr;
5719 rxr = sc->sc_qps[i].qp_rxr;
5720
5721 evcnt_detach(&txr->txr_defragged);
5722 evcnt_detach(&txr->txr_defrag_failed);
5723 evcnt_detach(&txr->txr_pcqdrop);
5724 evcnt_detach(&txr->txr_transmitdef);
5725 evcnt_detach(&txr->txr_intr);
5726 evcnt_detach(&txr->txr_defer);
5727
5728 evcnt_detach(&rxr->rxr_mgethdr_failed);
5729 evcnt_detach(&rxr->rxr_mgetcl_failed);
5730 evcnt_detach(&rxr->rxr_mbuf_load_failed);
5731 evcnt_detach(&rxr->rxr_intr);
5732 evcnt_detach(&rxr->rxr_defer);
5733 }
5734
5735 isc = &sc->sc_stats_counters;
5736 evcnt_detach(&isc->isc_crc_errors);
5737 evcnt_detach(&isc->isc_illegal_bytes);
5738 evcnt_detach(&isc->isc_mac_local_faults);
5739 evcnt_detach(&isc->isc_mac_remote_faults);
5740 evcnt_detach(&isc->isc_link_xon_rx);
5741 evcnt_detach(&isc->isc_link_xon_tx);
5742 evcnt_detach(&isc->isc_link_xoff_rx);
5743 evcnt_detach(&isc->isc_link_xoff_tx);
5744 evcnt_detach(&isc->isc_rx_fragments);
5745 evcnt_detach(&isc->isc_rx_jabber);
5746 evcnt_detach(&isc->isc_rx_bytes);
5747 evcnt_detach(&isc->isc_rx_discards);
5748 evcnt_detach(&isc->isc_rx_unicast);
5749 evcnt_detach(&isc->isc_rx_multicast);
5750 evcnt_detach(&isc->isc_rx_broadcast);
5751 evcnt_detach(&isc->isc_rx_size_64);
5752 evcnt_detach(&isc->isc_rx_size_127);
5753 evcnt_detach(&isc->isc_rx_size_255);
5754 evcnt_detach(&isc->isc_rx_size_511);
5755 evcnt_detach(&isc->isc_rx_size_1023);
5756 evcnt_detach(&isc->isc_rx_size_1522);
5757 evcnt_detach(&isc->isc_rx_size_big);
5758 evcnt_detach(&isc->isc_rx_undersize);
5759 evcnt_detach(&isc->isc_rx_oversize);
5760 evcnt_detach(&isc->isc_tx_bytes);
5761 evcnt_detach(&isc->isc_tx_dropped_link_down);
5762 evcnt_detach(&isc->isc_tx_unicast);
5763 evcnt_detach(&isc->isc_tx_multicast);
5764 evcnt_detach(&isc->isc_tx_broadcast);
5765 evcnt_detach(&isc->isc_tx_size_64);
5766 evcnt_detach(&isc->isc_tx_size_127);
5767 evcnt_detach(&isc->isc_tx_size_255);
5768 evcnt_detach(&isc->isc_tx_size_511);
5769 evcnt_detach(&isc->isc_tx_size_1023);
5770 evcnt_detach(&isc->isc_tx_size_1522);
5771 evcnt_detach(&isc->isc_tx_size_big);
5772 evcnt_detach(&isc->isc_vsi_rx_discards);
5773 evcnt_detach(&isc->isc_vsi_rx_bytes);
5774 evcnt_detach(&isc->isc_vsi_rx_unicast);
5775 evcnt_detach(&isc->isc_vsi_rx_multicast);
5776 evcnt_detach(&isc->isc_vsi_rx_broadcast);
5777 evcnt_detach(&isc->isc_vsi_tx_errors);
5778 evcnt_detach(&isc->isc_vsi_tx_bytes);
5779 evcnt_detach(&isc->isc_vsi_tx_unicast);
5780 evcnt_detach(&isc->isc_vsi_tx_multicast);
5781 evcnt_detach(&isc->isc_vsi_tx_broadcast);
5782
5783 evcnt_detach(&sc->sc_event_atq);
5784 evcnt_detach(&sc->sc_event_link);
5785 evcnt_detach(&sc->sc_event_ecc_err);
5786 evcnt_detach(&sc->sc_event_pci_exception);
5787 evcnt_detach(&sc->sc_event_crit_err);
5788
5789 callout_destroy(&sc->sc_stats_callout);
5790 }
5791
5792 static void
5793 ixl_stats_callout(void *xsc)
5794 {
5795 struct ixl_softc *sc = xsc;
5796
5797 ixl_work_add(sc->sc_workq, &sc->sc_stats_task);
5798 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
5799 }
5800
5801 static uint64_t
5802 ixl_stat_delta(struct ixl_softc *sc, uint32_t reg_hi, uint32_t reg_lo,
5803 uint64_t *offset, bool has_offset)
5804 {
5805 uint64_t value, delta;
5806 int bitwidth;
5807
5808 bitwidth = reg_hi == 0 ? 32 : 48;
5809
5810 value = ixl_rd(sc, reg_lo);
5811
5812 if (bitwidth > 32) {
5813 value |= ((uint64_t)ixl_rd(sc, reg_hi) << 32);
5814 }
5815
5816 if (__predict_true(has_offset)) {
5817 delta = value;
5818 if (value < *offset)
5819 delta += ((uint64_t)1 << bitwidth);
5820 delta -= *offset;
5821 } else {
5822 delta = 0;
5823 }
5824 atomic_swap_64(offset, value);
5825
5826 return delta;
5827 }
5828
5829 static void
5830 ixl_stats_update(void *xsc)
5831 {
5832 struct ixl_softc *sc = xsc;
5833 struct ixl_stats_counters *isc;
5834 uint64_t delta;
5835
5836 isc = &sc->sc_stats_counters;
5837
5838 /* errors */
5839 delta = ixl_stat_delta(sc,
5840 0, I40E_GLPRT_CRCERRS(sc->sc_port),
5841 &isc->isc_crc_errors_offset, isc->isc_has_offset);
5842 atomic_add_64(&isc->isc_crc_errors.ev_count, delta);
5843
5844 delta = ixl_stat_delta(sc,
5845 0, I40E_GLPRT_ILLERRC(sc->sc_port),
5846 &isc->isc_illegal_bytes_offset, isc->isc_has_offset);
5847 atomic_add_64(&isc->isc_illegal_bytes.ev_count, delta);
5848
5849 /* rx */
5850 delta = ixl_stat_delta(sc,
5851 I40E_GLPRT_GORCH(sc->sc_port), I40E_GLPRT_GORCL(sc->sc_port),
5852 &isc->isc_rx_bytes_offset, isc->isc_has_offset);
5853 atomic_add_64(&isc->isc_rx_bytes.ev_count, delta);
5854
5855 delta = ixl_stat_delta(sc,
5856 0, I40E_GLPRT_RDPC(sc->sc_port),
5857 &isc->isc_rx_discards_offset, isc->isc_has_offset);
5858 atomic_add_64(&isc->isc_rx_discards.ev_count, delta);
5859
5860 delta = ixl_stat_delta(sc,
5861 I40E_GLPRT_UPRCH(sc->sc_port), I40E_GLPRT_UPRCL(sc->sc_port),
5862 &isc->isc_rx_unicast_offset, isc->isc_has_offset);
5863 atomic_add_64(&isc->isc_rx_unicast.ev_count, delta);
5864
5865 delta = ixl_stat_delta(sc,
5866 I40E_GLPRT_MPRCH(sc->sc_port), I40E_GLPRT_MPRCL(sc->sc_port),
5867 &isc->isc_rx_multicast_offset, isc->isc_has_offset);
5868 atomic_add_64(&isc->isc_rx_multicast.ev_count, delta);
5869
5870 delta = ixl_stat_delta(sc,
5871 I40E_GLPRT_BPRCH(sc->sc_port), I40E_GLPRT_BPRCL(sc->sc_port),
5872 &isc->isc_rx_broadcast_offset, isc->isc_has_offset);
5873 atomic_add_64(&isc->isc_rx_broadcast.ev_count, delta);
5874
5875 /* Packet size stats rx */
5876 delta = ixl_stat_delta(sc,
5877 I40E_GLPRT_PRC64H(sc->sc_port), I40E_GLPRT_PRC64L(sc->sc_port),
5878 &isc->isc_rx_size_64_offset, isc->isc_has_offset);
5879 atomic_add_64(&isc->isc_rx_size_64.ev_count, delta);
5880
5881 delta = ixl_stat_delta(sc,
5882 I40E_GLPRT_PRC127H(sc->sc_port), I40E_GLPRT_PRC127L(sc->sc_port),
5883 &isc->isc_rx_size_127_offset, isc->isc_has_offset);
5884 atomic_add_64(&isc->isc_rx_size_127.ev_count, delta);
5885
5886 delta = ixl_stat_delta(sc,
5887 I40E_GLPRT_PRC255H(sc->sc_port), I40E_GLPRT_PRC255L(sc->sc_port),
5888 &isc->isc_rx_size_255_offset, isc->isc_has_offset);
5889 atomic_add_64(&isc->isc_rx_size_255.ev_count, delta);
5890
5891 delta = ixl_stat_delta(sc,
5892 I40E_GLPRT_PRC511H(sc->sc_port), I40E_GLPRT_PRC511L(sc->sc_port),
5893 &isc->isc_rx_size_511_offset, isc->isc_has_offset);
5894 atomic_add_64(&isc->isc_rx_size_511.ev_count, delta);
5895
5896 delta = ixl_stat_delta(sc,
5897 I40E_GLPRT_PRC1023H(sc->sc_port), I40E_GLPRT_PRC1023L(sc->sc_port),
5898 &isc->isc_rx_size_1023_offset, isc->isc_has_offset);
5899 atomic_add_64(&isc->isc_rx_size_1023.ev_count, delta);
5900
5901 delta = ixl_stat_delta(sc,
5902 I40E_GLPRT_PRC1522H(sc->sc_port), I40E_GLPRT_PRC1522L(sc->sc_port),
5903 &isc->isc_rx_size_1522_offset, isc->isc_has_offset);
5904 atomic_add_64(&isc->isc_rx_size_1522.ev_count, delta);
5905
5906 delta = ixl_stat_delta(sc,
5907 I40E_GLPRT_PRC9522H(sc->sc_port), I40E_GLPRT_PRC9522L(sc->sc_port),
5908 &isc->isc_rx_size_big_offset, isc->isc_has_offset);
5909 atomic_add_64(&isc->isc_rx_size_big.ev_count, delta);
5910
5911 delta = ixl_stat_delta(sc,
5912 0, I40E_GLPRT_RUC(sc->sc_port),
5913 &isc->isc_rx_undersize_offset, isc->isc_has_offset);
5914 atomic_add_64(&isc->isc_rx_undersize.ev_count, delta);
5915
5916 delta = ixl_stat_delta(sc,
5917 0, I40E_GLPRT_ROC(sc->sc_port),
5918 &isc->isc_rx_oversize_offset, isc->isc_has_offset);
5919 atomic_add_64(&isc->isc_rx_oversize.ev_count, delta);
5920
5921 /* tx */
5922 delta = ixl_stat_delta(sc,
5923 I40E_GLPRT_GOTCH(sc->sc_port), I40E_GLPRT_GOTCL(sc->sc_port),
5924 &isc->isc_tx_bytes_offset, isc->isc_has_offset);
5925 atomic_add_64(&isc->isc_tx_bytes.ev_count, delta);
5926
5927 delta = ixl_stat_delta(sc,
5928 0, I40E_GLPRT_TDOLD(sc->sc_port),
5929 &isc->isc_tx_dropped_link_down_offset, isc->isc_has_offset);
5930 atomic_add_64(&isc->isc_tx_dropped_link_down.ev_count, delta);
5931
5932 delta = ixl_stat_delta(sc,
5933 I40E_GLPRT_UPTCH(sc->sc_port), I40E_GLPRT_UPTCL(sc->sc_port),
5934 &isc->isc_tx_unicast_offset, isc->isc_has_offset);
5935 atomic_add_64(&isc->isc_tx_unicast.ev_count, delta);
5936
5937 delta = ixl_stat_delta(sc,
5938 I40E_GLPRT_MPTCH(sc->sc_port), I40E_GLPRT_MPTCL(sc->sc_port),
5939 &isc->isc_tx_multicast_offset, isc->isc_has_offset);
5940 atomic_add_64(&isc->isc_tx_multicast.ev_count, delta);
5941
5942 delta = ixl_stat_delta(sc,
5943 I40E_GLPRT_BPTCH(sc->sc_port), I40E_GLPRT_BPTCL(sc->sc_port),
5944 &isc->isc_tx_broadcast_offset, isc->isc_has_offset);
5945 atomic_add_64(&isc->isc_tx_broadcast.ev_count, delta);
5946
5947 /* Packet size stats tx */
5948 delta = ixl_stat_delta(sc,
5949 I40E_GLPRT_PTC64L(sc->sc_port), I40E_GLPRT_PTC64L(sc->sc_port),
5950 &isc->isc_tx_size_64_offset, isc->isc_has_offset);
5951 atomic_add_64(&isc->isc_tx_size_64.ev_count, delta);
5952
5953 delta = ixl_stat_delta(sc,
5954 I40E_GLPRT_PTC127H(sc->sc_port), I40E_GLPRT_PTC127L(sc->sc_port),
5955 &isc->isc_tx_size_127_offset, isc->isc_has_offset);
5956 atomic_add_64(&isc->isc_tx_size_127.ev_count, delta);
5957
5958 delta = ixl_stat_delta(sc,
5959 I40E_GLPRT_PTC255H(sc->sc_port), I40E_GLPRT_PTC255L(sc->sc_port),
5960 &isc->isc_tx_size_255_offset, isc->isc_has_offset);
5961 atomic_add_64(&isc->isc_tx_size_255.ev_count, delta);
5962
5963 delta = ixl_stat_delta(sc,
5964 I40E_GLPRT_PTC511H(sc->sc_port), I40E_GLPRT_PTC511L(sc->sc_port),
5965 &isc->isc_tx_size_511_offset, isc->isc_has_offset);
5966 atomic_add_64(&isc->isc_tx_size_511.ev_count, delta);
5967
5968 delta = ixl_stat_delta(sc,
5969 I40E_GLPRT_PTC1023H(sc->sc_port), I40E_GLPRT_PTC1023L(sc->sc_port),
5970 &isc->isc_tx_size_1023_offset, isc->isc_has_offset);
5971 atomic_add_64(&isc->isc_tx_size_1023.ev_count, delta);
5972
5973 delta = ixl_stat_delta(sc,
5974 I40E_GLPRT_PTC1522H(sc->sc_port), I40E_GLPRT_PTC1522L(sc->sc_port),
5975 &isc->isc_tx_size_1522_offset, isc->isc_has_offset);
5976 atomic_add_64(&isc->isc_tx_size_1522.ev_count, delta);
5977
5978 delta = ixl_stat_delta(sc,
5979 I40E_GLPRT_PTC9522H(sc->sc_port), I40E_GLPRT_PTC9522L(sc->sc_port),
5980 &isc->isc_tx_size_big_offset, isc->isc_has_offset);
5981 atomic_add_64(&isc->isc_tx_size_big.ev_count, delta);
5982
5983 /* mac faults */
5984 delta = ixl_stat_delta(sc,
5985 0, I40E_GLPRT_MLFC(sc->sc_port),
5986 &isc->isc_mac_local_faults_offset, isc->isc_has_offset);
5987 atomic_add_64(&isc->isc_mac_local_faults.ev_count, delta);
5988
5989 delta = ixl_stat_delta(sc,
5990 0, I40E_GLPRT_MRFC(sc->sc_port),
5991 &isc->isc_mac_remote_faults_offset, isc->isc_has_offset);
5992 atomic_add_64(&isc->isc_mac_remote_faults.ev_count, delta);
5993
5994 /* Flow control (LFC) stats */
5995 delta = ixl_stat_delta(sc,
5996 0, I40E_GLPRT_LXONRXC(sc->sc_port),
5997 &isc->isc_link_xon_rx_offset, isc->isc_has_offset);
5998 atomic_add_64(&isc->isc_link_xon_rx.ev_count, delta);
5999
6000 delta = ixl_stat_delta(sc,
6001 0, I40E_GLPRT_LXONTXC(sc->sc_port),
6002 &isc->isc_link_xon_tx_offset, isc->isc_has_offset);
6003 atomic_add_64(&isc->isc_link_xon_tx.ev_count, delta);
6004
6005 delta = ixl_stat_delta(sc,
6006 0, I40E_GLPRT_LXOFFRXC(sc->sc_port),
6007 &isc->isc_link_xoff_rx_offset, isc->isc_has_offset);
6008 atomic_add_64(&isc->isc_link_xoff_rx.ev_count, delta);
6009
6010 delta = ixl_stat_delta(sc,
6011 0, I40E_GLPRT_LXOFFTXC(sc->sc_port),
6012 &isc->isc_link_xoff_tx_offset, isc->isc_has_offset);
6013 atomic_add_64(&isc->isc_link_xoff_tx.ev_count, delta);
6014
6015 /* fragments */
6016 delta = ixl_stat_delta(sc,
6017 0, I40E_GLPRT_RFC(sc->sc_port),
6018 &isc->isc_rx_fragments_offset, isc->isc_has_offset);
6019 atomic_add_64(&isc->isc_rx_fragments.ev_count, delta);
6020
6021 delta = ixl_stat_delta(sc,
6022 0, I40E_GLPRT_RJC(sc->sc_port),
6023 &isc->isc_rx_jabber_offset, isc->isc_has_offset);
6024 atomic_add_64(&isc->isc_rx_jabber.ev_count, delta);
6025
6026 /* VSI rx counters */
6027 delta = ixl_stat_delta(sc,
6028 0, I40E_GLV_RDPC(sc->sc_vsi_stat_counter_idx),
6029 &isc->isc_vsi_rx_discards_offset, isc->isc_has_offset);
6030 atomic_add_64(&isc->isc_vsi_rx_discards.ev_count, delta);
6031
6032 delta = ixl_stat_delta(sc,
6033 I40E_GLV_GORCH(sc->sc_vsi_stat_counter_idx),
6034 I40E_GLV_GORCL(sc->sc_vsi_stat_counter_idx),
6035 &isc->isc_vsi_rx_bytes_offset, isc->isc_has_offset);
6036 atomic_add_64(&isc->isc_vsi_rx_bytes.ev_count, delta);
6037
6038 delta = ixl_stat_delta(sc,
6039 I40E_GLV_UPRCH(sc->sc_vsi_stat_counter_idx),
6040 I40E_GLV_UPRCL(sc->sc_vsi_stat_counter_idx),
6041 &isc->isc_vsi_rx_unicast_offset, isc->isc_has_offset);
6042 atomic_add_64(&isc->isc_vsi_rx_unicast.ev_count, delta);
6043
6044 delta = ixl_stat_delta(sc,
6045 I40E_GLV_MPRCH(sc->sc_vsi_stat_counter_idx),
6046 I40E_GLV_MPRCL(sc->sc_vsi_stat_counter_idx),
6047 &isc->isc_vsi_rx_multicast_offset, isc->isc_has_offset);
6048 atomic_add_64(&isc->isc_vsi_rx_multicast.ev_count, delta);
6049
6050 delta = ixl_stat_delta(sc,
6051 I40E_GLV_BPRCH(sc->sc_vsi_stat_counter_idx),
6052 I40E_GLV_BPRCL(sc->sc_vsi_stat_counter_idx),
6053 &isc->isc_vsi_rx_broadcast_offset, isc->isc_has_offset);
6054 atomic_add_64(&isc->isc_vsi_rx_broadcast.ev_count, delta);
6055
6056 /* VSI tx counters */
6057 delta = ixl_stat_delta(sc,
6058 0, I40E_GLV_TEPC(sc->sc_vsi_stat_counter_idx),
6059 &isc->isc_vsi_tx_errors_offset, isc->isc_has_offset);
6060 atomic_add_64(&isc->isc_vsi_tx_errors.ev_count, delta);
6061
6062 delta = ixl_stat_delta(sc,
6063 I40E_GLV_GOTCH(sc->sc_vsi_stat_counter_idx),
6064 I40E_GLV_GOTCL(sc->sc_vsi_stat_counter_idx),
6065 &isc->isc_vsi_tx_bytes_offset, isc->isc_has_offset);
6066 atomic_add_64(&isc->isc_vsi_tx_bytes.ev_count, delta);
6067
6068 delta = ixl_stat_delta(sc,
6069 I40E_GLV_UPTCH(sc->sc_vsi_stat_counter_idx),
6070 I40E_GLV_UPTCL(sc->sc_vsi_stat_counter_idx),
6071 &isc->isc_vsi_tx_unicast_offset, isc->isc_has_offset);
6072 atomic_add_64(&isc->isc_vsi_tx_unicast.ev_count, delta);
6073
6074 delta = ixl_stat_delta(sc,
6075 I40E_GLV_MPTCH(sc->sc_vsi_stat_counter_idx),
6076 I40E_GLV_MPTCL(sc->sc_vsi_stat_counter_idx),
6077 &isc->isc_vsi_tx_multicast_offset, isc->isc_has_offset);
6078 atomic_add_64(&isc->isc_vsi_tx_multicast.ev_count, delta);
6079
6080 delta = ixl_stat_delta(sc,
6081 I40E_GLV_BPTCH(sc->sc_vsi_stat_counter_idx),
6082 I40E_GLV_BPTCL(sc->sc_vsi_stat_counter_idx),
6083 &isc->isc_vsi_tx_broadcast_offset, isc->isc_has_offset);
6084 atomic_add_64(&isc->isc_vsi_tx_broadcast.ev_count, delta);
6085 }
6086
6087 static int
6088 ixl_setup_sysctls(struct ixl_softc *sc)
6089 {
6090 const char *devname;
6091 struct sysctllog **log;
6092 const struct sysctlnode *rnode, *rxnode, *txnode;
6093 int error;
6094
6095 log = &sc->sc_sysctllog;
6096 devname = device_xname(sc->sc_dev);
6097
6098 error = sysctl_createv(log, 0, NULL, &rnode,
6099 0, CTLTYPE_NODE, devname,
6100 SYSCTL_DESCR("ixl information and settings"),
6101 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
6102 if (error)
6103 goto out;
6104
6105 error = sysctl_createv(log, 0, &rnode, NULL,
6106 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
6107 SYSCTL_DESCR("Use workqueue for packet processing"),
6108 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
6109 if (error)
6110 goto out;
6111
6112 error = sysctl_createv(log, 0, &rnode, NULL,
6113 CTLFLAG_READONLY, CTLTYPE_INT, "stats_interval",
6114 SYSCTL_DESCR("Statistics collection interval in milliseconds"),
6115 NULL, 0, &sc->sc_stats_intval, 0, CTL_CREATE, CTL_EOL);
6116
6117 error = sysctl_createv(log, 0, &rnode, &rxnode,
6118 0, CTLTYPE_NODE, "rx",
6119 SYSCTL_DESCR("ixl information and settings for Rx"),
6120 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6121 if (error)
6122 goto out;
6123
6124 error = sysctl_createv(log, 0, &rxnode, NULL,
6125 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6126 SYSCTL_DESCR("max number of Rx packets"
6127 " to process for interrupt processing"),
6128 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6129 if (error)
6130 goto out;
6131
6132 error = sysctl_createv(log, 0, &rxnode, NULL,
6133 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6134 SYSCTL_DESCR("max number of Rx packets"
6135 " to process for deferred processing"),
6136 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
6137 if (error)
6138 goto out;
6139
6140 error = sysctl_createv(log, 0, &rnode, &txnode,
6141 0, CTLTYPE_NODE, "tx",
6142 SYSCTL_DESCR("ixl information and settings for Tx"),
6143 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6144 if (error)
6145 goto out;
6146
6147 error = sysctl_createv(log, 0, &txnode, NULL,
6148 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6149 SYSCTL_DESCR("max number of Tx packets"
6150 " to process for interrupt processing"),
6151 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6152 if (error)
6153 goto out;
6154
6155 error = sysctl_createv(log, 0, &txnode, NULL,
6156 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6157 SYSCTL_DESCR("max number of Tx packets"
6158 " to process for deferred processing"),
6159 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
6160 if (error)
6161 goto out;
6162
6163 out:
6164 if (error) {
6165 aprint_error_dev(sc->sc_dev,
6166 "unable to create sysctl node\n");
6167 sysctl_teardown(log);
6168 }
6169
6170 return error;
6171 }
6172
6173 static void
6174 ixl_teardown_sysctls(struct ixl_softc *sc)
6175 {
6176
6177 sysctl_teardown(&sc->sc_sysctllog);
6178 }
6179
6180 static struct workqueue *
6181 ixl_workq_create(const char *name, pri_t prio, int ipl, int flags)
6182 {
6183 struct workqueue *wq;
6184 int error;
6185
6186 error = workqueue_create(&wq, name, ixl_workq_work, NULL,
6187 prio, ipl, flags);
6188
6189 if (error)
6190 return NULL;
6191
6192 return wq;
6193 }
6194
6195 static void
6196 ixl_workq_destroy(struct workqueue *wq)
6197 {
6198
6199 workqueue_destroy(wq);
6200 }
6201
6202 static void
6203 ixl_work_set(struct ixl_work *work, void (*func)(void *), void *arg)
6204 {
6205
6206 memset(work, 0, sizeof(*work));
6207 work->ixw_func = func;
6208 work->ixw_arg = arg;
6209 }
6210
6211 static void
6212 ixl_work_add(struct workqueue *wq, struct ixl_work *work)
6213 {
6214 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0)
6215 return;
6216
6217 workqueue_enqueue(wq, &work->ixw_cookie, NULL);
6218 }
6219
6220 static void
6221 ixl_work_wait(struct workqueue *wq, struct ixl_work *work)
6222 {
6223
6224 workqueue_wait(wq, &work->ixw_cookie);
6225 }
6226
6227 static void
6228 ixl_workq_work(struct work *wk, void *context)
6229 {
6230 struct ixl_work *work;
6231
6232 work = container_of(wk, struct ixl_work, ixw_cookie);
6233
6234 atomic_swap_uint(&work->ixw_added, 0);
6235 work->ixw_func(work->ixw_arg);
6236 }
6237
6238 static int
6239 ixl_rx_ctl_read(struct ixl_softc *sc, uint32_t reg, uint32_t *rv)
6240 {
6241 struct ixl_aq_desc iaq;
6242
6243 memset(&iaq, 0, sizeof(iaq));
6244 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_READ);
6245 iaq.iaq_param[1] = htole32(reg);
6246
6247 if (ixl_atq_poll(sc, &iaq, 250) != 0)
6248 return ETIMEDOUT;
6249
6250 switch (htole16(iaq.iaq_retval)) {
6251 case IXL_AQ_RC_OK:
6252 /* success */
6253 break;
6254 case IXL_AQ_RC_EACCES:
6255 return EPERM;
6256 case IXL_AQ_RC_EAGAIN:
6257 return EAGAIN;
6258 default:
6259 return EIO;
6260 }
6261
6262 *rv = htole32(iaq.iaq_param[3]);
6263 return 0;
6264 }
6265
6266 static uint32_t
6267 ixl_rd_rx_csr(struct ixl_softc *sc, uint32_t reg)
6268 {
6269 uint32_t val;
6270 int rv, retry, retry_limit;
6271
6272 retry_limit = sc->sc_rxctl_atq ? 5 : 0;
6273
6274 for (retry = 0; retry < retry_limit; retry++) {
6275 rv = ixl_rx_ctl_read(sc, reg, &val);
6276 if (rv == 0)
6277 return val;
6278 else if (rv == EAGAIN)
6279 delaymsec(1);
6280 else
6281 break;
6282 }
6283
6284 val = ixl_rd(sc, reg);
6285
6286 return val;
6287 }
6288
6289 static int
6290 ixl_rx_ctl_write(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6291 {
6292 struct ixl_aq_desc iaq;
6293
6294 memset(&iaq, 0, sizeof(iaq));
6295 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_WRITE);
6296 iaq.iaq_param[1] = htole32(reg);
6297 iaq.iaq_param[3] = htole32(value);
6298
6299 if (ixl_atq_poll(sc, &iaq, 250) != 0)
6300 return ETIMEDOUT;
6301
6302 switch (htole16(iaq.iaq_retval)) {
6303 case IXL_AQ_RC_OK:
6304 /* success */
6305 break;
6306 case IXL_AQ_RC_EACCES:
6307 return EPERM;
6308 case IXL_AQ_RC_EAGAIN:
6309 return EAGAIN;
6310 default:
6311 return EIO;
6312 }
6313
6314 return 0;
6315 }
6316
6317 static void
6318 ixl_wr_rx_csr(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6319 {
6320 int rv, retry, retry_limit;
6321
6322 retry_limit = sc->sc_rxctl_atq ? 5 : 0;
6323
6324 for (retry = 0; retry < retry_limit; retry++) {
6325 rv = ixl_rx_ctl_write(sc, reg, value);
6326 if (rv == 0)
6327 return;
6328 else if (rv == EAGAIN)
6329 delaymsec(1);
6330 else
6331 break;
6332 }
6333
6334 ixl_wr(sc, reg, value);
6335 }
6336
6337 MODULE(MODULE_CLASS_DRIVER, if_ixl, "pci");
6338
6339 #ifdef _MODULE
6340 #include "ioconf.c"
6341 #endif
6342
6343 #ifdef _MODULE
6344 static void
6345 ixl_parse_modprop(prop_dictionary_t dict)
6346 {
6347 prop_object_t obj;
6348 int64_t val;
6349 uint64_t uval;
6350
6351 if (dict == NULL)
6352 return;
6353
6354 obj = prop_dictionary_get(dict, "nomsix");
6355 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_BOOL) {
6356 ixl_param_nomsix = prop_bool_true((prop_bool_t)obj);
6357 }
6358
6359 obj = prop_dictionary_get(dict, "stats_interval");
6360 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
6361 val = prop_number_integer_value((prop_number_t)obj);
6362
6363 /* the range has no reason */
6364 if (100 < val && val < 180000) {
6365 ixl_param_stats_interval = val;
6366 }
6367 }
6368
6369 obj = prop_dictionary_get(dict, "nqps_limit");
6370 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
6371 val = prop_number_integer_value((prop_number_t)obj);
6372
6373 if (val <= INT32_MAX)
6374 ixl_param_nqps_limit = val;
6375 }
6376
6377 obj = prop_dictionary_get(dict, "rx_ndescs");
6378 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
6379 uval = prop_number_unsigned_integer_value((prop_number_t)obj);
6380
6381 if (uval > 8)
6382 ixl_param_rx_ndescs = uval;
6383 }
6384
6385 obj = prop_dictionary_get(dict, "tx_ndescs");
6386 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
6387 uval = prop_number_unsigned_integer_value((prop_number_t)obj);
6388
6389 if (uval > IXL_TX_PKT_DESCS)
6390 ixl_param_tx_ndescs = uval;
6391 }
6392
6393 }
6394 #endif
6395
6396 static int
6397 if_ixl_modcmd(modcmd_t cmd, void *opaque)
6398 {
6399 int error = 0;
6400
6401 #ifdef _MODULE
6402 switch (cmd) {
6403 case MODULE_CMD_INIT:
6404 ixl_parse_modprop((prop_dictionary_t)opaque);
6405 error = config_init_component(cfdriver_ioconf_if_ixl,
6406 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
6407 break;
6408 case MODULE_CMD_FINI:
6409 error = config_fini_component(cfdriver_ioconf_if_ixl,
6410 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
6411 break;
6412 default:
6413 error = ENOTTY;
6414 break;
6415 }
6416 #endif
6417
6418 return error;
6419 }
6420