if_ixl.c revision 1.21 1 /* $NetBSD: if_ixl.c,v 1.21 2020/01/16 07:11:50 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2016,2017 David Gwynne <dlg (at) openbsd.org>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50 /*
51 * Copyright (c) 2019 Internet Initiative Japan, Inc.
52 * All rights reserved.
53 *
54 * Redistribution and use in source and binary forms, with or without
55 * modification, are permitted provided that the following conditions
56 * are met:
57 * 1. Redistributions of source code must retain the above copyright
58 * notice, this list of conditions and the following disclaimer.
59 * 2. Redistributions in binary form must reproduce the above copyright
60 * notice, this list of conditions and the following disclaimer in the
61 * documentation and/or other materials provided with the distribution.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
73 * POSSIBILITY OF SUCH DAMAGE.
74 */
75
76 #include <sys/cdefs.h>
77
78 #ifdef _KERNEL_OPT
79 #include "opt_net_mpsafe.h"
80 #include "opt_if_ixl.h"
81 #endif
82
83 #include <sys/param.h>
84 #include <sys/types.h>
85
86 #include <sys/cpu.h>
87 #include <sys/device.h>
88 #include <sys/evcnt.h>
89 #include <sys/interrupt.h>
90 #include <sys/kmem.h>
91 #include <sys/malloc.h>
92 #include <sys/module.h>
93 #include <sys/mutex.h>
94 #include <sys/pcq.h>
95 #include <sys/syslog.h>
96 #include <sys/workqueue.h>
97
98 #include <sys/bus.h>
99
100 #include <net/bpf.h>
101 #include <net/if.h>
102 #include <net/if_dl.h>
103 #include <net/if_media.h>
104 #include <net/if_ether.h>
105 #include <net/rss_config.h>
106
107 #include <dev/pci/pcivar.h>
108 #include <dev/pci/pcidevs.h>
109
110 #include <dev/pci/if_ixlreg.h>
111 #include <dev/pci/if_ixlvar.h>
112
113 #include <prop/proplib.h>
114
115 struct ixl_softc; /* defined */
116
117 #define I40E_PF_RESET_WAIT_COUNT 200
118 #define I40E_AQ_LARGE_BUF 512
119
120 /* bitfields for Tx queue mapping in QTX_CTL */
121 #define I40E_QTX_CTL_VF_QUEUE 0x0
122 #define I40E_QTX_CTL_VM_QUEUE 0x1
123 #define I40E_QTX_CTL_PF_QUEUE 0x2
124
125 #define I40E_QUEUE_TYPE_EOL 0x7ff
126 #define I40E_INTR_NOTX_QUEUE 0
127
128 #define I40E_QUEUE_TYPE_RX 0x0
129 #define I40E_QUEUE_TYPE_TX 0x1
130 #define I40E_QUEUE_TYPE_PE_CEQ 0x2
131 #define I40E_QUEUE_TYPE_UNKNOWN 0x3
132
133 #define I40E_ITR_INDEX_RX 0x0
134 #define I40E_ITR_INDEX_TX 0x1
135 #define I40E_ITR_INDEX_OTHER 0x2
136 #define I40E_ITR_INDEX_NONE 0x3
137
138 #define I40E_INTR_NOTX_QUEUE 0
139 #define I40E_INTR_NOTX_INTR 0
140 #define I40E_INTR_NOTX_RX_QUEUE 0
141 #define I40E_INTR_NOTX_TX_QUEUE 1
142 #define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK
143 #define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK
144
145 #define BIT_ULL(a) (1ULL << (a))
146 #define IXL_RSS_HENA_DEFAULT_BASE \
147 (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
148 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
149 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
150 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
151 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
152 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
153 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
154 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
155 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
156 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
157 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
158 #define IXL_RSS_HENA_DEFAULT_XL710 IXL_RSS_HENA_DEFAULT_BASE
159 #define IXL_RSS_HENA_DEFAULT_X722 (IXL_RSS_HENA_DEFAULT_XL710 | \
160 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
161 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
162 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
163 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
164 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
165 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
166 #define I40E_HASH_LUT_SIZE_128 0
167 #define IXL_RSS_KEY_SIZE_REG 13
168
169 #define IXL_ICR0_CRIT_ERR_MASK \
170 (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \
171 I40E_PFINT_ICR0_ECC_ERR_MASK | \
172 I40E_PFINT_ICR0_PE_CRITERR_MASK)
173
174 #define IXL_TX_PKT_DESCS 8
175 #define IXL_TX_QUEUE_ALIGN 128
176 #define IXL_RX_QUEUE_ALIGN 128
177
178 #define IXL_HARDMTU 9712 /* 9726 - ETHER_HDR_LEN */
179
180 #define IXL_PCIREG PCI_MAPREG_START
181
182 #define IXL_ITR0 0x0
183 #define IXL_ITR1 0x1
184 #define IXL_ITR2 0x2
185 #define IXL_NOITR 0x3
186
187 #define IXL_AQ_NUM 256
188 #define IXL_AQ_MASK (IXL_AQ_NUM - 1)
189 #define IXL_AQ_ALIGN 64 /* lol */
190 #define IXL_AQ_BUFLEN 4096
191
192 #define IXL_HMC_ROUNDUP 512
193 #define IXL_HMC_PGSIZE 4096
194 #define IXL_HMC_DVASZ sizeof(uint64_t)
195 #define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
196 #define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS)
197 #define IXL_HMC_PDVALID 1ULL
198
199 #define IXL_ATQ_EXEC_TIMEOUT (10 * hz)
200
201 struct ixl_aq_regs {
202 bus_size_t atq_tail;
203 bus_size_t atq_head;
204 bus_size_t atq_len;
205 bus_size_t atq_bal;
206 bus_size_t atq_bah;
207
208 bus_size_t arq_tail;
209 bus_size_t arq_head;
210 bus_size_t arq_len;
211 bus_size_t arq_bal;
212 bus_size_t arq_bah;
213
214 uint32_t atq_len_enable;
215 uint32_t atq_tail_mask;
216 uint32_t atq_head_mask;
217
218 uint32_t arq_len_enable;
219 uint32_t arq_tail_mask;
220 uint32_t arq_head_mask;
221 };
222
223 struct ixl_phy_type {
224 uint64_t phy_type;
225 uint64_t ifm_type;
226 };
227
228 struct ixl_speed_type {
229 uint8_t dev_speed;
230 uint64_t net_speed;
231 };
232
233 struct ixl_aq_buf {
234 SIMPLEQ_ENTRY(ixl_aq_buf)
235 aqb_entry;
236 void *aqb_data;
237 bus_dmamap_t aqb_map;
238 bus_dma_segment_t aqb_seg;
239 size_t aqb_size;
240 int aqb_nsegs;
241 };
242 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf);
243
244 struct ixl_dmamem {
245 bus_dmamap_t ixm_map;
246 bus_dma_segment_t ixm_seg;
247 int ixm_nsegs;
248 size_t ixm_size;
249 void *ixm_kva;
250 };
251
252 #define IXL_DMA_MAP(_ixm) ((_ixm)->ixm_map)
253 #define IXL_DMA_DVA(_ixm) ((_ixm)->ixm_map->dm_segs[0].ds_addr)
254 #define IXL_DMA_KVA(_ixm) ((void *)(_ixm)->ixm_kva)
255 #define IXL_DMA_LEN(_ixm) ((_ixm)->ixm_size)
256
257 struct ixl_hmc_entry {
258 uint64_t hmc_base;
259 uint32_t hmc_count;
260 uint64_t hmc_size;
261 };
262
263 enum ixl_hmc_types {
264 IXL_HMC_LAN_TX = 0,
265 IXL_HMC_LAN_RX,
266 IXL_HMC_FCOE_CTX,
267 IXL_HMC_FCOE_FILTER,
268 IXL_HMC_COUNT
269 };
270
271 struct ixl_hmc_pack {
272 uint16_t offset;
273 uint16_t width;
274 uint16_t lsb;
275 };
276
277 /*
278 * these hmc objects have weird sizes and alignments, so these are abstract
279 * representations of them that are nice for c to populate.
280 *
281 * the packing code relies on little-endian values being stored in the fields,
282 * no high bits in the fields being set, and the fields must be packed in the
283 * same order as they are in the ctx structure.
284 */
285
286 struct ixl_hmc_rxq {
287 uint16_t head;
288 uint8_t cpuid;
289 uint64_t base;
290 #define IXL_HMC_RXQ_BASE_UNIT 128
291 uint16_t qlen;
292 uint16_t dbuff;
293 #define IXL_HMC_RXQ_DBUFF_UNIT 128
294 uint8_t hbuff;
295 #define IXL_HMC_RXQ_HBUFF_UNIT 64
296 uint8_t dtype;
297 #define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0
298 #define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1
299 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2
300 uint8_t dsize;
301 #define IXL_HMC_RXQ_DSIZE_16 0
302 #define IXL_HMC_RXQ_DSIZE_32 1
303 uint8_t crcstrip;
304 uint8_t fc_ena;
305 uint8_t l2sel;
306 uint8_t hsplit_0;
307 uint8_t hsplit_1;
308 uint8_t showiv;
309 uint16_t rxmax;
310 uint8_t tphrdesc_ena;
311 uint8_t tphwdesc_ena;
312 uint8_t tphdata_ena;
313 uint8_t tphhead_ena;
314 uint8_t lrxqthresh;
315 uint8_t prefena;
316 };
317
318 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
319 { offsetof(struct ixl_hmc_rxq, head), 13, 0 },
320 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 },
321 { offsetof(struct ixl_hmc_rxq, base), 57, 32 },
322 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 },
323 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 },
324 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 },
325 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 },
326 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 },
327 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 },
328 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 },
329 { offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 },
330 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 },
331 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 },
332 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 },
333 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 },
334 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 },
335 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 },
336 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 },
337 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 },
338 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 },
339 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 },
340 };
341
342 #define IXL_HMC_RXQ_MINSIZE (201 + 1)
343
344 struct ixl_hmc_txq {
345 uint16_t head;
346 uint8_t new_context;
347 uint64_t base;
348 #define IXL_HMC_TXQ_BASE_UNIT 128
349 uint8_t fc_ena;
350 uint8_t timesync_ena;
351 uint8_t fd_ena;
352 uint8_t alt_vlan_ena;
353 uint16_t thead_wb;
354 uint8_t cpuid;
355 uint8_t head_wb_ena;
356 #define IXL_HMC_TXQ_DESC_WB 0
357 #define IXL_HMC_TXQ_HEAD_WB 1
358 uint16_t qlen;
359 uint8_t tphrdesc_ena;
360 uint8_t tphrpacket_ena;
361 uint8_t tphwdesc_ena;
362 uint64_t head_wb_addr;
363 uint32_t crc;
364 uint16_t rdylist;
365 uint8_t rdylist_act;
366 };
367
368 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
369 { offsetof(struct ixl_hmc_txq, head), 13, 0 },
370 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 },
371 { offsetof(struct ixl_hmc_txq, base), 57, 32 },
372 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 },
373 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 },
374 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 },
375 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 },
376 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 },
377 /* line 1 */
378 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 },
379 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 },
380 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 },
381 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 },
382 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 },
383 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 },
384 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 },
385 /* line 7 */
386 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) },
387 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) },
388 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) },
389 };
390
391 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
392
393 struct ixl_work {
394 struct work ixw_cookie;
395 void (*ixw_func)(void *);
396 void *ixw_arg;
397 unsigned int ixw_added;
398 };
399 #define IXL_WORKQUEUE_PRI PRI_SOFTNET
400
401 struct ixl_tx_map {
402 struct mbuf *txm_m;
403 bus_dmamap_t txm_map;
404 unsigned int txm_eop;
405 };
406
407 struct ixl_tx_ring {
408 kmutex_t txr_lock;
409 struct ixl_softc *txr_sc;
410
411 unsigned int txr_prod;
412 unsigned int txr_cons;
413
414 struct ixl_tx_map *txr_maps;
415 struct ixl_dmamem txr_mem;
416
417 bus_size_t txr_tail;
418 unsigned int txr_qid;
419 pcq_t *txr_intrq;
420 void *txr_si;
421
422 uint64_t txr_oerrors; /* if_oerrors */
423 uint64_t txr_opackets; /* if_opackets */
424 uint64_t txr_obytes; /* if_obytes */
425 uint64_t txr_omcasts; /* if_omcasts */
426
427 struct evcnt txr_defragged;
428 struct evcnt txr_defrag_failed;
429 struct evcnt txr_pcqdrop;
430 struct evcnt txr_transmitdef;
431 struct evcnt txr_intr;
432 struct evcnt txr_defer;
433 };
434
435 struct ixl_rx_map {
436 struct mbuf *rxm_m;
437 bus_dmamap_t rxm_map;
438 };
439
440 struct ixl_rx_ring {
441 kmutex_t rxr_lock;
442
443 unsigned int rxr_prod;
444 unsigned int rxr_cons;
445
446 struct ixl_rx_map *rxr_maps;
447 struct ixl_dmamem rxr_mem;
448
449 struct mbuf *rxr_m_head;
450 struct mbuf **rxr_m_tail;
451
452 bus_size_t rxr_tail;
453 unsigned int rxr_qid;
454
455 uint64_t rxr_ipackets; /* if_ipackets */
456 uint64_t rxr_ibytes; /* if_ibytes */
457 uint64_t rxr_iqdrops; /* iqdrops */
458 uint64_t rxr_ierrors; /* if_ierrors */
459
460 struct evcnt rxr_mgethdr_failed;
461 struct evcnt rxr_mgetcl_failed;
462 struct evcnt rxr_mbuf_load_failed;
463 struct evcnt rxr_intr;
464 struct evcnt rxr_defer;
465 };
466
467 struct ixl_queue_pair {
468 struct ixl_softc *qp_sc;
469 struct ixl_tx_ring *qp_txr;
470 struct ixl_rx_ring *qp_rxr;
471
472 char qp_name[16];
473
474 void *qp_si;
475 struct ixl_work qp_task;
476 bool qp_workqueue;
477 };
478
479 struct ixl_atq {
480 struct ixl_aq_desc iatq_desc;
481 void (*iatq_fn)(struct ixl_softc *,
482 const struct ixl_aq_desc *);
483 };
484 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
485
486 struct ixl_product {
487 unsigned int vendor_id;
488 unsigned int product_id;
489 };
490
491 struct ixl_stats_counters {
492 bool isc_has_offset;
493 struct evcnt isc_crc_errors;
494 uint64_t isc_crc_errors_offset;
495 struct evcnt isc_illegal_bytes;
496 uint64_t isc_illegal_bytes_offset;
497 struct evcnt isc_rx_bytes;
498 uint64_t isc_rx_bytes_offset;
499 struct evcnt isc_rx_discards;
500 uint64_t isc_rx_discards_offset;
501 struct evcnt isc_rx_unicast;
502 uint64_t isc_rx_unicast_offset;
503 struct evcnt isc_rx_multicast;
504 uint64_t isc_rx_multicast_offset;
505 struct evcnt isc_rx_broadcast;
506 uint64_t isc_rx_broadcast_offset;
507 struct evcnt isc_rx_size_64;
508 uint64_t isc_rx_size_64_offset;
509 struct evcnt isc_rx_size_127;
510 uint64_t isc_rx_size_127_offset;
511 struct evcnt isc_rx_size_255;
512 uint64_t isc_rx_size_255_offset;
513 struct evcnt isc_rx_size_511;
514 uint64_t isc_rx_size_511_offset;
515 struct evcnt isc_rx_size_1023;
516 uint64_t isc_rx_size_1023_offset;
517 struct evcnt isc_rx_size_1522;
518 uint64_t isc_rx_size_1522_offset;
519 struct evcnt isc_rx_size_big;
520 uint64_t isc_rx_size_big_offset;
521 struct evcnt isc_rx_undersize;
522 uint64_t isc_rx_undersize_offset;
523 struct evcnt isc_rx_oversize;
524 uint64_t isc_rx_oversize_offset;
525 struct evcnt isc_rx_fragments;
526 uint64_t isc_rx_fragments_offset;
527 struct evcnt isc_rx_jabber;
528 uint64_t isc_rx_jabber_offset;
529 struct evcnt isc_tx_bytes;
530 uint64_t isc_tx_bytes_offset;
531 struct evcnt isc_tx_dropped_link_down;
532 uint64_t isc_tx_dropped_link_down_offset;
533 struct evcnt isc_tx_unicast;
534 uint64_t isc_tx_unicast_offset;
535 struct evcnt isc_tx_multicast;
536 uint64_t isc_tx_multicast_offset;
537 struct evcnt isc_tx_broadcast;
538 uint64_t isc_tx_broadcast_offset;
539 struct evcnt isc_tx_size_64;
540 uint64_t isc_tx_size_64_offset;
541 struct evcnt isc_tx_size_127;
542 uint64_t isc_tx_size_127_offset;
543 struct evcnt isc_tx_size_255;
544 uint64_t isc_tx_size_255_offset;
545 struct evcnt isc_tx_size_511;
546 uint64_t isc_tx_size_511_offset;
547 struct evcnt isc_tx_size_1023;
548 uint64_t isc_tx_size_1023_offset;
549 struct evcnt isc_tx_size_1522;
550 uint64_t isc_tx_size_1522_offset;
551 struct evcnt isc_tx_size_big;
552 uint64_t isc_tx_size_big_offset;
553 struct evcnt isc_mac_local_faults;
554 uint64_t isc_mac_local_faults_offset;
555 struct evcnt isc_mac_remote_faults;
556 uint64_t isc_mac_remote_faults_offset;
557 struct evcnt isc_link_xon_rx;
558 uint64_t isc_link_xon_rx_offset;
559 struct evcnt isc_link_xon_tx;
560 uint64_t isc_link_xon_tx_offset;
561 struct evcnt isc_link_xoff_rx;
562 uint64_t isc_link_xoff_rx_offset;
563 struct evcnt isc_link_xoff_tx;
564 uint64_t isc_link_xoff_tx_offset;
565 struct evcnt isc_vsi_rx_discards;
566 uint64_t isc_vsi_rx_discards_offset;
567 struct evcnt isc_vsi_rx_bytes;
568 uint64_t isc_vsi_rx_bytes_offset;
569 struct evcnt isc_vsi_rx_unicast;
570 uint64_t isc_vsi_rx_unicast_offset;
571 struct evcnt isc_vsi_rx_multicast;
572 uint64_t isc_vsi_rx_multicast_offset;
573 struct evcnt isc_vsi_rx_broadcast;
574 uint64_t isc_vsi_rx_broadcast_offset;
575 struct evcnt isc_vsi_tx_errors;
576 uint64_t isc_vsi_tx_errors_offset;
577 struct evcnt isc_vsi_tx_bytes;
578 uint64_t isc_vsi_tx_bytes_offset;
579 struct evcnt isc_vsi_tx_unicast;
580 uint64_t isc_vsi_tx_unicast_offset;
581 struct evcnt isc_vsi_tx_multicast;
582 uint64_t isc_vsi_tx_multicast_offset;
583 struct evcnt isc_vsi_tx_broadcast;
584 uint64_t isc_vsi_tx_broadcast_offset;
585 };
586
587 /*
588 * Locking notes:
589 * + a field in ixl_tx_ring is protected by txr_lock (a spin mutex), and
590 * a field in ixl_rx_ring is protected by rxr_lock (a spin mutex).
591 * - more than one lock of them cannot be held at once.
592 * + a field named sc_atq_* in ixl_softc is protected by sc_atq_lock
593 * (a spin mutex).
594 * - the lock cannot held with txr_lock or rxr_lock.
595 * + a field named sc_arq_* is not protected by any lock.
596 * - operations for sc_arq_* is done in one context related to
597 * sc_arq_task.
598 * + other fields in ixl_softc is protected by sc_cfg_lock
599 * (an adaptive mutex)
600 * - It must be held before another lock is held, and It can be
601 * released after the other lock is released.
602 * */
603
604 struct ixl_softc {
605 device_t sc_dev;
606 struct ethercom sc_ec;
607 bool sc_attached;
608 bool sc_dead;
609 bool sc_rxctl_atq;
610 uint32_t sc_port;
611 struct sysctllog *sc_sysctllog;
612 struct workqueue *sc_workq;
613 struct workqueue *sc_workq_txrx;
614 int sc_stats_intval;
615 callout_t sc_stats_callout;
616 struct ixl_work sc_stats_task;
617 struct ixl_stats_counters
618 sc_stats_counters;
619 uint8_t sc_enaddr[ETHER_ADDR_LEN];
620 struct ifmedia sc_media;
621 uint64_t sc_media_status;
622 uint64_t sc_media_active;
623 kmutex_t sc_cfg_lock;
624 enum i40e_mac_type sc_mac_type;
625 uint32_t sc_rss_table_size;
626 uint32_t sc_rss_table_entry_width;
627 bool sc_txrx_workqueue;
628 u_int sc_tx_process_limit;
629 u_int sc_rx_process_limit;
630 u_int sc_tx_intr_process_limit;
631 u_int sc_rx_intr_process_limit;
632
633 int sc_cur_ec_capenable;
634
635 struct pci_attach_args sc_pa;
636 pci_intr_handle_t *sc_ihp;
637 void **sc_ihs;
638 unsigned int sc_nintrs;
639
640 bus_dma_tag_t sc_dmat;
641 bus_space_tag_t sc_memt;
642 bus_space_handle_t sc_memh;
643 bus_size_t sc_mems;
644
645 uint8_t sc_pf_id;
646 uint16_t sc_uplink_seid; /* le */
647 uint16_t sc_downlink_seid; /* le */
648 uint16_t sc_vsi_number; /* le */
649 uint16_t sc_vsi_stat_counter_idx;
650 uint16_t sc_seid;
651 unsigned int sc_base_queue;
652
653 pci_intr_type_t sc_intrtype;
654 unsigned int sc_msix_vector_queue;
655
656 struct ixl_dmamem sc_scratch;
657
658 const struct ixl_aq_regs *
659 sc_aq_regs;
660
661 kmutex_t sc_atq_lock;
662 kcondvar_t sc_atq_cv;
663 struct ixl_dmamem sc_atq;
664 unsigned int sc_atq_prod;
665 unsigned int sc_atq_cons;
666
667 struct ixl_dmamem sc_arq;
668 struct ixl_work sc_arq_task;
669 struct ixl_aq_bufs sc_arq_idle;
670 struct ixl_aq_buf *sc_arq_live[IXL_AQ_NUM];
671 unsigned int sc_arq_prod;
672 unsigned int sc_arq_cons;
673
674 struct ixl_work sc_link_state_task;
675 struct ixl_atq sc_link_state_atq;
676
677 struct ixl_dmamem sc_hmc_sd;
678 struct ixl_dmamem sc_hmc_pd;
679 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT];
680
681 unsigned int sc_tx_ring_ndescs;
682 unsigned int sc_rx_ring_ndescs;
683 unsigned int sc_nqueue_pairs;
684 unsigned int sc_nqueue_pairs_max;
685 unsigned int sc_nqueue_pairs_device;
686 struct ixl_queue_pair *sc_qps;
687
688 struct evcnt sc_event_atq;
689 struct evcnt sc_event_link;
690 struct evcnt sc_event_ecc_err;
691 struct evcnt sc_event_pci_exception;
692 struct evcnt sc_event_crit_err;
693 };
694
695 #define IXL_TXRX_PROCESS_UNLIMIT UINT_MAX
696 #define IXL_TX_PROCESS_LIMIT 256
697 #define IXL_RX_PROCESS_LIMIT 256
698 #define IXL_TX_INTR_PROCESS_LIMIT 256
699 #define IXL_RX_INTR_PROCESS_LIMIT 0U
700
701 #define IXL_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx| \
702 IFCAP_CSUM_TCPv4_Rx| \
703 IFCAP_CSUM_UDPv4_Rx| \
704 IFCAP_CSUM_TCPv6_Rx| \
705 IFCAP_CSUM_UDPv6_Rx)
706
707 #define delaymsec(_x) DELAY(1000 * (_x))
708 #ifdef IXL_DEBUG
709 #define DDPRINTF(sc, fmt, args...) \
710 do { \
711 if ((sc) != NULL) { \
712 device_printf( \
713 ((struct ixl_softc *)(sc))->sc_dev, \
714 ""); \
715 } \
716 printf("%s:\t" fmt, __func__, ##args); \
717 } while (0)
718 #else
719 #define DDPRINTF(sc, fmt, args...) __nothing
720 #endif
721 #ifndef IXL_STATS_INTERVAL_MSEC
722 #define IXL_STATS_INTERVAL_MSEC 10000
723 #endif
724 #ifndef IXL_QUEUE_NUM
725 #define IXL_QUEUE_NUM 0
726 #endif
727
728 static bool ixl_param_nomsix = false;
729 static int ixl_param_stats_interval = IXL_STATS_INTERVAL_MSEC;
730 static int ixl_param_nqps_limit = IXL_QUEUE_NUM;
731 static unsigned int ixl_param_tx_ndescs = 1024;
732 static unsigned int ixl_param_rx_ndescs = 1024;
733
734 static enum i40e_mac_type
735 ixl_mactype(pci_product_id_t);
736 static void ixl_clear_hw(struct ixl_softc *);
737 static int ixl_pf_reset(struct ixl_softc *);
738
739 static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
740 bus_size_t, bus_size_t);
741 static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
742
743 static int ixl_arq_fill(struct ixl_softc *);
744 static void ixl_arq_unfill(struct ixl_softc *);
745
746 static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
747 unsigned int);
748 static void ixl_atq_set(struct ixl_atq *,
749 void (*)(struct ixl_softc *, const struct ixl_aq_desc *));
750 static int ixl_atq_post(struct ixl_softc *, struct ixl_atq *);
751 static int ixl_atq_post_locked(struct ixl_softc *, struct ixl_atq *);
752 static void ixl_atq_done(struct ixl_softc *);
753 static int ixl_atq_exec(struct ixl_softc *, struct ixl_atq *);
754 static int ixl_get_version(struct ixl_softc *);
755 static int ixl_get_hw_capabilities(struct ixl_softc *);
756 static int ixl_pxe_clear(struct ixl_softc *);
757 static int ixl_lldp_shut(struct ixl_softc *);
758 static int ixl_get_mac(struct ixl_softc *);
759 static int ixl_get_switch_config(struct ixl_softc *);
760 static int ixl_phy_mask_ints(struct ixl_softc *);
761 static int ixl_get_phy_types(struct ixl_softc *, uint64_t *);
762 static int ixl_restart_an(struct ixl_softc *);
763 static int ixl_hmc(struct ixl_softc *);
764 static void ixl_hmc_free(struct ixl_softc *);
765 static int ixl_get_vsi(struct ixl_softc *);
766 static int ixl_set_vsi(struct ixl_softc *);
767 static void ixl_set_filter_control(struct ixl_softc *);
768 static void ixl_get_link_status(void *);
769 static int ixl_get_link_status_poll(struct ixl_softc *);
770 static int ixl_set_link_status(struct ixl_softc *,
771 const struct ixl_aq_desc *);
772 static void ixl_config_rss(struct ixl_softc *);
773 static int ixl_add_macvlan(struct ixl_softc *, const uint8_t *,
774 uint16_t, uint16_t);
775 static int ixl_remove_macvlan(struct ixl_softc *, const uint8_t *,
776 uint16_t, uint16_t);
777 static void ixl_arq(void *);
778 static void ixl_hmc_pack(void *, const void *,
779 const struct ixl_hmc_pack *, unsigned int);
780 static uint32_t ixl_rd_rx_csr(struct ixl_softc *, uint32_t);
781 static void ixl_wr_rx_csr(struct ixl_softc *, uint32_t, uint32_t);
782
783 static int ixl_match(device_t, cfdata_t, void *);
784 static void ixl_attach(device_t, device_t, void *);
785 static int ixl_detach(device_t, int);
786
787 static void ixl_media_add(struct ixl_softc *, uint64_t);
788 static int ixl_media_change(struct ifnet *);
789 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
790 static void ixl_watchdog(struct ifnet *);
791 static int ixl_ioctl(struct ifnet *, u_long, void *);
792 static void ixl_start(struct ifnet *);
793 static int ixl_transmit(struct ifnet *, struct mbuf *);
794 static void ixl_deferred_transmit(void *);
795 static int ixl_intr(void *);
796 static int ixl_queue_intr(void *);
797 static int ixl_other_intr(void *);
798 static void ixl_handle_queue(void *);
799 static void ixl_sched_handle_queue(struct ixl_softc *,
800 struct ixl_queue_pair *);
801 static int ixl_init(struct ifnet *);
802 static int ixl_init_locked(struct ixl_softc *);
803 static void ixl_stop(struct ifnet *, int);
804 static void ixl_stop_locked(struct ixl_softc *);
805 static int ixl_iff(struct ixl_softc *);
806 static int ixl_ifflags_cb(struct ethercom *);
807 static int ixl_setup_interrupts(struct ixl_softc *);
808 static int ixl_establish_intx(struct ixl_softc *);
809 static int ixl_establish_msix(struct ixl_softc *);
810 static void ixl_set_affinity_msix(struct ixl_softc *);
811 static void ixl_enable_queue_intr(struct ixl_softc *,
812 struct ixl_queue_pair *);
813 static void ixl_disable_queue_intr(struct ixl_softc *,
814 struct ixl_queue_pair *);
815 static void ixl_enable_other_intr(struct ixl_softc *);
816 static void ixl_disable_other_intr(struct ixl_softc *);
817 static void ixl_config_queue_intr(struct ixl_softc *);
818 static void ixl_config_other_intr(struct ixl_softc *);
819
820 static struct ixl_tx_ring *
821 ixl_txr_alloc(struct ixl_softc *, unsigned int);
822 static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
823 static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
824 static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
825 static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
826 static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
827 static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
828 static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
829 static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *, u_int);
830
831 static struct ixl_rx_ring *
832 ixl_rxr_alloc(struct ixl_softc *, unsigned int);
833 static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
834 static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
835 static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
836 static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
837 static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
838 static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
839 static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *, u_int);
840 static int ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
841
842 static struct workqueue *
843 ixl_workq_create(const char *, pri_t, int, int);
844 static void ixl_workq_destroy(struct workqueue *);
845 static int ixl_workqs_teardown(device_t);
846 static void ixl_work_set(struct ixl_work *, void (*)(void *), void *);
847 static void ixl_work_add(struct workqueue *, struct ixl_work *);
848 static void ixl_work_wait(struct workqueue *, struct ixl_work *);
849 static void ixl_workq_work(struct work *, void *);
850 static const struct ixl_product *
851 ixl_lookup(const struct pci_attach_args *pa);
852 static void ixl_link_state_update(struct ixl_softc *,
853 const struct ixl_aq_desc *);
854 static int ixl_vlan_cb(struct ethercom *, uint16_t, bool);
855 static int ixl_setup_vlan_hwfilter(struct ixl_softc *);
856 static void ixl_teardown_vlan_hwfilter(struct ixl_softc *);
857 static int ixl_update_macvlan(struct ixl_softc *);
858 static int ixl_setup_interrupts(struct ixl_softc *);;
859 static void ixl_teardown_interrupts(struct ixl_softc *);
860 static int ixl_setup_stats(struct ixl_softc *);
861 static void ixl_teardown_stats(struct ixl_softc *);
862 static void ixl_stats_callout(void *);
863 static void ixl_stats_update(void *);
864 static int ixl_setup_sysctls(struct ixl_softc *);
865 static void ixl_teardown_sysctls(struct ixl_softc *);
866 static int ixl_queue_pairs_alloc(struct ixl_softc *);
867 static void ixl_queue_pairs_free(struct ixl_softc *);
868
869 static const struct ixl_phy_type ixl_phy_type_map[] = {
870 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII },
871 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX },
872 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 },
873 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR },
874 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 },
875 { 1ULL << IXL_PHY_TYPE_XAUI |
876 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 },
877 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI },
878 { 1ULL << IXL_PHY_TYPE_XLAUI |
879 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI },
880 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
881 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 },
882 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
883 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 },
884 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC },
885 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC },
886 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX },
887 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
888 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T },
889 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T },
890 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR },
891 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR },
892 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_TWINAX },
893 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 },
894 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 },
895 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX },
896 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX },
897 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 },
898 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR },
899 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR },
900 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR },
901 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR },
902 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC },
903 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_CR },
904 };
905
906 static const struct ixl_speed_type ixl_speed_type_map[] = {
907 { IXL_AQ_LINK_SPEED_40GB, IF_Gbps(40) },
908 { IXL_AQ_LINK_SPEED_25GB, IF_Gbps(25) },
909 { IXL_AQ_LINK_SPEED_10GB, IF_Gbps(10) },
910 { IXL_AQ_LINK_SPEED_1000MB, IF_Mbps(1000) },
911 { IXL_AQ_LINK_SPEED_100MB, IF_Mbps(100)},
912 };
913
914 static const struct ixl_aq_regs ixl_pf_aq_regs = {
915 .atq_tail = I40E_PF_ATQT,
916 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK,
917 .atq_head = I40E_PF_ATQH,
918 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK,
919 .atq_len = I40E_PF_ATQLEN,
920 .atq_bal = I40E_PF_ATQBAL,
921 .atq_bah = I40E_PF_ATQBAH,
922 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK,
923
924 .arq_tail = I40E_PF_ARQT,
925 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK,
926 .arq_head = I40E_PF_ARQH,
927 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK,
928 .arq_len = I40E_PF_ARQLEN,
929 .arq_bal = I40E_PF_ARQBAL,
930 .arq_bah = I40E_PF_ARQBAH,
931 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK,
932 };
933
934 #define ixl_rd(_s, _r) \
935 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
936 #define ixl_wr(_s, _r, _v) \
937 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
938 #define ixl_barrier(_s, _r, _l, _o) \
939 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
940 #define ixl_flush(_s) (void)ixl_rd((_s), I40E_GLGEN_STAT)
941 #define ixl_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1))
942
943 static inline uint32_t
944 ixl_dmamem_hi(struct ixl_dmamem *ixm)
945 {
946 uint32_t retval;
947 uint64_t val;
948
949 if (sizeof(IXL_DMA_DVA(ixm)) > 4) {
950 val = (intptr_t)IXL_DMA_DVA(ixm);
951 retval = (uint32_t)(val >> 32);
952 } else {
953 retval = 0;
954 }
955
956 return retval;
957 }
958
959 static inline uint32_t
960 ixl_dmamem_lo(struct ixl_dmamem *ixm)
961 {
962
963 return (uint32_t)IXL_DMA_DVA(ixm);
964 }
965
966 static inline void
967 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr)
968 {
969 uint64_t val;
970
971 if (sizeof(addr) > 4) {
972 val = (intptr_t)addr;
973 iaq->iaq_param[2] = htole32(val >> 32);
974 } else {
975 iaq->iaq_param[2] = htole32(0);
976 }
977
978 iaq->iaq_param[3] = htole32(addr);
979 }
980
981 static inline unsigned int
982 ixl_rxr_unrefreshed(unsigned int prod, unsigned int cons, unsigned int ndescs)
983 {
984 unsigned int num;
985
986 if (prod < cons)
987 num = cons - prod;
988 else
989 num = (ndescs - prod) + cons;
990
991 if (__predict_true(num > 0)) {
992 /* device cannot receive packets if all descripter is filled */
993 num -= 1;
994 }
995
996 return num;
997 }
998
999 CFATTACH_DECL3_NEW(ixl, sizeof(struct ixl_softc),
1000 ixl_match, ixl_attach, ixl_detach, NULL, NULL, NULL,
1001 DVF_DETACH_SHUTDOWN);
1002
1003 static const struct ixl_product ixl_products[] = {
1004 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_SFP },
1005 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_B },
1006 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_C },
1007 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_A },
1008 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_B },
1009 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_C },
1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T },
1011 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
1012 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G },
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP },
1015 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28 },
1016 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_KX },
1017 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_QSFP },
1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_SFP },
1019 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G_BASET },
1020 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_BASET },
1021 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_I_SFP },
1022 /* required last entry */
1023 {0, 0}
1024 };
1025
1026 static const struct ixl_product *
1027 ixl_lookup(const struct pci_attach_args *pa)
1028 {
1029 const struct ixl_product *ixlp;
1030
1031 for (ixlp = ixl_products; ixlp->vendor_id != 0; ixlp++) {
1032 if (PCI_VENDOR(pa->pa_id) == ixlp->vendor_id &&
1033 PCI_PRODUCT(pa->pa_id) == ixlp->product_id)
1034 return ixlp;
1035 }
1036
1037 return NULL;
1038 }
1039
1040 static int
1041 ixl_match(device_t parent, cfdata_t match, void *aux)
1042 {
1043 const struct pci_attach_args *pa = aux;
1044
1045 return (ixl_lookup(pa) != NULL) ? 1 : 0;
1046 }
1047
1048 static void
1049 ixl_attach(device_t parent, device_t self, void *aux)
1050 {
1051 struct ixl_softc *sc;
1052 struct pci_attach_args *pa = aux;
1053 struct ifnet *ifp;
1054 pcireg_t memtype, reg;
1055 uint32_t firstq, port, ari, func;
1056 uint64_t phy_types = 0;
1057 char xnamebuf[32];
1058 int tries, rv;
1059
1060 sc = device_private(self);
1061 sc->sc_dev = self;
1062 ifp = &sc->sc_ec.ec_if;
1063
1064 sc->sc_pa = *pa;
1065 sc->sc_dmat = (pci_dma64_available(pa)) ?
1066 pa->pa_dmat64 : pa->pa_dmat;
1067 sc->sc_aq_regs = &ixl_pf_aq_regs;
1068
1069 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1070 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(reg));
1071
1072 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG);
1073 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
1074 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) {
1075 aprint_error(": unable to map registers\n");
1076 return;
1077 }
1078
1079 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET);
1080
1081 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC);
1082 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK;
1083 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1084 sc->sc_base_queue = firstq;
1085
1086 ixl_clear_hw(sc);
1087 if (ixl_pf_reset(sc) == -1) {
1088 /* error printed by ixl pf_reset */
1089 goto unmap;
1090 }
1091
1092 port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
1093 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
1094 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
1095 sc->sc_port = port;
1096 aprint_normal(": port %u", sc->sc_port);
1097
1098 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
1099 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1100 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1101
1102 func = ixl_rd(sc, I40E_PF_FUNC_RID);
1103 sc->sc_pf_id = func & (ari ? 0xff : 0x7);
1104
1105 /* initialise the adminq */
1106
1107 mutex_init(&sc->sc_atq_lock, MUTEX_DEFAULT, IPL_NET);
1108
1109 if (ixl_dmamem_alloc(sc, &sc->sc_atq,
1110 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1111 aprint_error("\n" "%s: unable to allocate atq\n",
1112 device_xname(self));
1113 goto unmap;
1114 }
1115
1116 SIMPLEQ_INIT(&sc->sc_arq_idle);
1117 ixl_work_set(&sc->sc_arq_task, ixl_arq, sc);
1118 sc->sc_arq_cons = 0;
1119 sc->sc_arq_prod = 0;
1120
1121 if (ixl_dmamem_alloc(sc, &sc->sc_arq,
1122 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1123 aprint_error("\n" "%s: unable to allocate arq\n",
1124 device_xname(self));
1125 goto free_atq;
1126 }
1127
1128 if (!ixl_arq_fill(sc)) {
1129 aprint_error("\n" "%s: unable to fill arq descriptors\n",
1130 device_xname(self));
1131 goto free_arq;
1132 }
1133
1134 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1135 0, IXL_DMA_LEN(&sc->sc_atq),
1136 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1137
1138 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1139 0, IXL_DMA_LEN(&sc->sc_arq),
1140 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1141
1142 for (tries = 0; tries < 10; tries++) {
1143 sc->sc_atq_cons = 0;
1144 sc->sc_atq_prod = 0;
1145
1146 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1147 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1148 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1149 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1150
1151 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1152
1153 ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1154 ixl_dmamem_lo(&sc->sc_atq));
1155 ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1156 ixl_dmamem_hi(&sc->sc_atq));
1157 ixl_wr(sc, sc->sc_aq_regs->atq_len,
1158 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1159
1160 ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1161 ixl_dmamem_lo(&sc->sc_arq));
1162 ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1163 ixl_dmamem_hi(&sc->sc_arq));
1164 ixl_wr(sc, sc->sc_aq_regs->arq_len,
1165 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1166
1167 rv = ixl_get_version(sc);
1168 if (rv == 0)
1169 break;
1170 if (rv != ETIMEDOUT) {
1171 aprint_error(", unable to get firmware version\n");
1172 goto shutdown;
1173 }
1174
1175 delaymsec(100);
1176 }
1177
1178 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1179
1180 if (sc->sc_mac_type == I40E_MAC_X722)
1181 sc->sc_nqueue_pairs_device = 128;
1182 else
1183 sc->sc_nqueue_pairs_device = 64;
1184
1185 rv = ixl_get_hw_capabilities(sc);
1186 if (rv != 0) {
1187 aprint_error(", GET HW CAPABILITIES %s\n",
1188 rv == ETIMEDOUT ? "timeout" : "error");
1189 goto shutdown;
1190 }
1191
1192 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_device, ncpu);
1193 if (ixl_param_nqps_limit > 0) {
1194 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_max,
1195 ixl_param_nqps_limit);
1196 }
1197
1198 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
1199 sc->sc_tx_ring_ndescs = ixl_param_tx_ndescs;
1200 sc->sc_rx_ring_ndescs = ixl_param_rx_ndescs;
1201
1202 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_rx_ring_ndescs);
1203 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_tx_ring_ndescs);
1204
1205 if (ixl_get_mac(sc) != 0) {
1206 /* error printed by ixl_get_mac */
1207 goto shutdown;
1208 }
1209
1210 aprint_normal("\n");
1211 aprint_naive("\n");
1212
1213 aprint_normal_dev(self, "Ethernet address %s\n",
1214 ether_sprintf(sc->sc_enaddr));
1215
1216 rv = ixl_pxe_clear(sc);
1217 if (rv != 0) {
1218 aprint_debug_dev(self, "CLEAR PXE MODE %s\n",
1219 rv == ETIMEDOUT ? "timeout" : "error");
1220 }
1221
1222 ixl_set_filter_control(sc);
1223
1224 if (ixl_hmc(sc) != 0) {
1225 /* error printed by ixl_hmc */
1226 goto shutdown;
1227 }
1228
1229 if (ixl_lldp_shut(sc) != 0) {
1230 /* error printed by ixl_lldp_shut */
1231 goto free_hmc;
1232 }
1233
1234 if (ixl_phy_mask_ints(sc) != 0) {
1235 /* error printed by ixl_phy_mask_ints */
1236 goto free_hmc;
1237 }
1238
1239 if (ixl_restart_an(sc) != 0) {
1240 /* error printed by ixl_restart_an */
1241 goto free_hmc;
1242 }
1243
1244 if (ixl_get_switch_config(sc) != 0) {
1245 /* error printed by ixl_get_switch_config */
1246 goto free_hmc;
1247 }
1248
1249 if (ixl_get_phy_types(sc, &phy_types) != 0) {
1250 /* error printed by ixl_get_phy_abilities */
1251 goto free_hmc;
1252 }
1253
1254 rv = ixl_get_link_status_poll(sc);
1255 if (rv != 0) {
1256 aprint_error_dev(self, "GET LINK STATUS %s\n",
1257 rv == ETIMEDOUT ? "timeout" : "error");
1258 goto free_hmc;
1259 }
1260
1261 if (ixl_dmamem_alloc(sc, &sc->sc_scratch,
1262 sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1263 aprint_error_dev(self, "unable to allocate scratch buffer\n");
1264 goto free_hmc;
1265 }
1266
1267 rv = ixl_get_vsi(sc);
1268 if (rv != 0) {
1269 aprint_error_dev(self, "GET VSI %s %d\n",
1270 rv == ETIMEDOUT ? "timeout" : "error", rv);
1271 goto free_scratch;
1272 }
1273
1274 rv = ixl_set_vsi(sc);
1275 if (rv != 0) {
1276 aprint_error_dev(self, "UPDATE VSI error %s %d\n",
1277 rv == ETIMEDOUT ? "timeout" : "error", rv);
1278 goto free_scratch;
1279 }
1280
1281 if (ixl_queue_pairs_alloc(sc) != 0) {
1282 /* error printed by ixl_queue_pairs_alloc */
1283 goto free_scratch;
1284 }
1285
1286 if (ixl_setup_interrupts(sc) != 0) {
1287 /* error printed by ixl_setup_interrupts */
1288 goto free_queue_pairs;
1289 }
1290
1291 if (ixl_setup_stats(sc) != 0) {
1292 aprint_error_dev(self, "failed to setup event counters\n");
1293 goto teardown_intrs;
1294 }
1295
1296 if (ixl_setup_sysctls(sc) != 0) {
1297 /* error printed by ixl_setup_sysctls */
1298 goto teardown_stats;
1299 }
1300
1301 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self));
1302 sc->sc_workq = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1303 IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1304 if (sc->sc_workq == NULL)
1305 goto teardown_sysctls;
1306
1307 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self));
1308 sc->sc_workq_txrx = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1309 IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1310 if (sc->sc_workq_txrx == NULL)
1311 goto teardown_wqs;
1312
1313 snprintf(xnamebuf, sizeof(xnamebuf), "%s_atq_cv", device_xname(self));
1314 cv_init(&sc->sc_atq_cv, xnamebuf);
1315
1316 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
1317
1318 ifp->if_softc = sc;
1319 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1320 ifp->if_extflags = IFEF_MPSAFE;
1321 ifp->if_ioctl = ixl_ioctl;
1322 ifp->if_start = ixl_start;
1323 ifp->if_transmit = ixl_transmit;
1324 ifp->if_watchdog = ixl_watchdog;
1325 ifp->if_init = ixl_init;
1326 ifp->if_stop = ixl_stop;
1327 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs);
1328 IFQ_SET_READY(&ifp->if_snd);
1329 ifp->if_capabilities |= IXL_IFCAP_RXCSUM;
1330 #if 0
1331 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx;
1332 #endif
1333 ether_set_vlan_cb(&sc->sc_ec, ixl_vlan_cb);
1334 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
1335 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
1336 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1337
1338 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities;
1339 /* Disable VLAN_HWFILTER by default */
1340 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1341
1342 sc->sc_cur_ec_capenable = sc->sc_ec.ec_capenable;
1343
1344 sc->sc_ec.ec_ifmedia = &sc->sc_media;
1345 ifmedia_init(&sc->sc_media, IFM_IMASK, ixl_media_change,
1346 ixl_media_status);
1347
1348 ixl_media_add(sc, phy_types);
1349 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1350 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1351
1352 if_attach(ifp);
1353 if_deferred_start_init(ifp, NULL);
1354 ether_ifattach(ifp, sc->sc_enaddr);
1355 ether_set_ifflags_cb(&sc->sc_ec, ixl_ifflags_cb);
1356
1357 (void)ixl_get_link_status_poll(sc);
1358 ixl_work_set(&sc->sc_link_state_task, ixl_get_link_status, sc);
1359
1360 ixl_config_other_intr(sc);
1361 ixl_enable_other_intr(sc);
1362
1363 /* remove default mac filter and replace it so we can see vlans */
1364 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 0);
1365 if (rv != ENOENT) {
1366 aprint_debug_dev(self,
1367 "unable to remove macvlan %u\n", rv);
1368 }
1369 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
1370 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1371 if (rv != ENOENT) {
1372 aprint_debug_dev(self,
1373 "unable to remove macvlan, ignore vlan %u\n", rv);
1374 }
1375
1376 if (ixl_update_macvlan(sc) != 0) {
1377 aprint_debug_dev(self,
1378 "couldn't enable vlan hardware filter\n");
1379 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1380 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
1381 }
1382
1383 sc->sc_txrx_workqueue = true;
1384 sc->sc_tx_process_limit = IXL_TX_PROCESS_LIMIT;
1385 sc->sc_rx_process_limit = IXL_RX_PROCESS_LIMIT;
1386 sc->sc_tx_intr_process_limit = IXL_TX_INTR_PROCESS_LIMIT;
1387 sc->sc_rx_intr_process_limit = IXL_RX_INTR_PROCESS_LIMIT;
1388
1389 ixl_stats_update(sc);
1390 sc->sc_stats_counters.isc_has_offset = true;
1391 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
1392
1393 if (pmf_device_register(self, NULL, NULL) != true)
1394 aprint_debug_dev(self, "couldn't establish power handler\n");
1395 sc->sc_attached = true;
1396 return;
1397
1398 teardown_wqs:
1399 config_finalize_register(self, ixl_workqs_teardown);
1400 teardown_sysctls:
1401 ixl_teardown_sysctls(sc);
1402 teardown_stats:
1403 ixl_teardown_stats(sc);
1404 teardown_intrs:
1405 ixl_teardown_interrupts(sc);
1406 free_queue_pairs:
1407 ixl_queue_pairs_free(sc);
1408 free_scratch:
1409 ixl_dmamem_free(sc, &sc->sc_scratch);
1410 free_hmc:
1411 ixl_hmc_free(sc);
1412 shutdown:
1413 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1414 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1415 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1416 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1417
1418 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1419 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1420 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1421
1422 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1423 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1424 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1425
1426 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1427 0, IXL_DMA_LEN(&sc->sc_arq),
1428 BUS_DMASYNC_POSTREAD);
1429 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1430 0, IXL_DMA_LEN(&sc->sc_atq),
1431 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1432
1433 ixl_arq_unfill(sc);
1434 free_arq:
1435 ixl_dmamem_free(sc, &sc->sc_arq);
1436 free_atq:
1437 ixl_dmamem_free(sc, &sc->sc_atq);
1438 unmap:
1439 mutex_destroy(&sc->sc_atq_lock);
1440 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1441 mutex_destroy(&sc->sc_cfg_lock);
1442 sc->sc_mems = 0;
1443
1444 sc->sc_attached = false;
1445 }
1446
1447 static int
1448 ixl_detach(device_t self, int flags)
1449 {
1450 struct ixl_softc *sc = device_private(self);
1451 struct ifnet *ifp = &sc->sc_ec.ec_if;
1452
1453 if (!sc->sc_attached)
1454 return 0;
1455
1456 ixl_stop(ifp, 1);
1457
1458 ixl_disable_other_intr(sc);
1459
1460 callout_stop(&sc->sc_stats_callout);
1461 ixl_work_wait(sc->sc_workq, &sc->sc_stats_task);
1462
1463 /* wait for ATQ handler */
1464 mutex_enter(&sc->sc_atq_lock);
1465 mutex_exit(&sc->sc_atq_lock);
1466
1467 ixl_work_wait(sc->sc_workq, &sc->sc_arq_task);
1468 ixl_work_wait(sc->sc_workq, &sc->sc_link_state_task);
1469
1470 if (sc->sc_workq != NULL) {
1471 ixl_workq_destroy(sc->sc_workq);
1472 sc->sc_workq = NULL;
1473 }
1474
1475 if (sc->sc_workq_txrx != NULL) {
1476 ixl_workq_destroy(sc->sc_workq_txrx);
1477 sc->sc_workq_txrx = NULL;
1478 }
1479
1480 ifmedia_delete_instance(&sc->sc_media, IFM_INST_ANY);
1481 ether_ifdetach(ifp);
1482 if_detach(ifp);
1483
1484 ixl_teardown_interrupts(sc);
1485 ixl_teardown_stats(sc);
1486 ixl_teardown_sysctls(sc);
1487
1488 ixl_queue_pairs_free(sc);
1489
1490 ixl_dmamem_free(sc, &sc->sc_scratch);
1491 ixl_hmc_free(sc);
1492
1493 /* shutdown */
1494 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1495 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1496 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1497 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1498
1499 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1500 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1501 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1502
1503 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1504 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1505 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1506
1507 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1508 0, IXL_DMA_LEN(&sc->sc_arq),
1509 BUS_DMASYNC_POSTREAD);
1510 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1511 0, IXL_DMA_LEN(&sc->sc_atq),
1512 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1513
1514 ixl_arq_unfill(sc);
1515
1516 ixl_dmamem_free(sc, &sc->sc_arq);
1517 ixl_dmamem_free(sc, &sc->sc_atq);
1518
1519 cv_destroy(&sc->sc_atq_cv);
1520 mutex_destroy(&sc->sc_atq_lock);
1521
1522 if (sc->sc_mems != 0) {
1523 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1524 sc->sc_mems = 0;
1525 }
1526
1527 mutex_destroy(&sc->sc_cfg_lock);
1528
1529 return 0;
1530 }
1531
1532 static int
1533 ixl_workqs_teardown(device_t self)
1534 {
1535 struct ixl_softc *sc = device_private(self);
1536
1537 if (sc->sc_workq != NULL) {
1538 ixl_workq_destroy(sc->sc_workq);
1539 sc->sc_workq = NULL;
1540 }
1541
1542 if (sc->sc_workq_txrx != NULL) {
1543 ixl_workq_destroy(sc->sc_workq_txrx);
1544 sc->sc_workq_txrx = NULL;
1545 }
1546
1547 return 0;
1548 }
1549
1550 static int
1551 ixl_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
1552 {
1553 struct ifnet *ifp = &ec->ec_if;
1554 struct ixl_softc *sc = ifp->if_softc;
1555 int rv;
1556
1557 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
1558 return 0;
1559 }
1560
1561 if (set) {
1562 rv = ixl_add_macvlan(sc, sc->sc_enaddr, vid,
1563 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1564 if (rv == 0) {
1565 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
1566 vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1567 }
1568 } else {
1569 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, vid,
1570 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1571 (void)ixl_remove_macvlan(sc, etherbroadcastaddr, vid,
1572 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1573 }
1574
1575 return rv;
1576 }
1577
1578 static void
1579 ixl_media_add(struct ixl_softc *sc, uint64_t phy_types)
1580 {
1581 struct ifmedia *ifm = &sc->sc_media;
1582 const struct ixl_phy_type *itype;
1583 unsigned int i;
1584
1585 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
1586 itype = &ixl_phy_type_map[i];
1587
1588 if (ISSET(phy_types, itype->phy_type)) {
1589 ifmedia_add(ifm,
1590 IFM_ETHER | IFM_FDX | itype->ifm_type, 0, NULL);
1591
1592 if (itype->ifm_type == IFM_100_TX) {
1593 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type,
1594 0, NULL);
1595 }
1596 }
1597 }
1598 }
1599
1600 static void
1601 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1602 {
1603 struct ixl_softc *sc = ifp->if_softc;
1604
1605 ifmr->ifm_status = sc->sc_media_status;
1606 ifmr->ifm_active = sc->sc_media_active;
1607
1608 mutex_enter(&sc->sc_cfg_lock);
1609 if (ifp->if_link_state == LINK_STATE_UP)
1610 SET(ifmr->ifm_status, IFM_ACTIVE);
1611 mutex_exit(&sc->sc_cfg_lock);
1612 }
1613
1614 static int
1615 ixl_media_change(struct ifnet *ifp)
1616 {
1617
1618 return 0;
1619 }
1620
1621 static void
1622 ixl_watchdog(struct ifnet *ifp)
1623 {
1624
1625 }
1626
1627 static void
1628 ixl_del_all_multiaddr(struct ixl_softc *sc)
1629 {
1630 struct ethercom *ec = &sc->sc_ec;
1631 struct ether_multi *enm;
1632 struct ether_multistep step;
1633
1634 ETHER_LOCK(ec);
1635 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1636 ETHER_NEXT_MULTI(step, enm)) {
1637 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1638 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1639 }
1640 ETHER_UNLOCK(ec);
1641 }
1642
1643 static int
1644 ixl_add_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1645 {
1646 struct ifnet *ifp = &sc->sc_ec.ec_if;
1647 int rv;
1648
1649 if (ISSET(ifp->if_flags, IFF_ALLMULTI))
1650 return 0;
1651
1652 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) {
1653 ixl_del_all_multiaddr(sc);
1654 SET(ifp->if_flags, IFF_ALLMULTI);
1655 return ENETRESET;
1656 }
1657
1658 /* multicast address can not use VLAN HWFILTER */
1659 rv = ixl_add_macvlan(sc, addrlo, 0,
1660 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1661
1662 if (rv == ENOSPC) {
1663 ixl_del_all_multiaddr(sc);
1664 SET(ifp->if_flags, IFF_ALLMULTI);
1665 return ENETRESET;
1666 }
1667
1668 return rv;
1669 }
1670
1671 static int
1672 ixl_del_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1673 {
1674 struct ifnet *ifp = &sc->sc_ec.ec_if;
1675 struct ethercom *ec = &sc->sc_ec;
1676 struct ether_multi *enm, *enm_last;
1677 struct ether_multistep step;
1678 int error, rv = 0;
1679
1680 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1681 ixl_remove_macvlan(sc, addrlo, 0,
1682 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1683 return 0;
1684 }
1685
1686 ETHER_LOCK(ec);
1687 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1688 ETHER_NEXT_MULTI(step, enm)) {
1689 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1690 ETHER_ADDR_LEN) != 0) {
1691 goto out;
1692 }
1693 }
1694
1695 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1696 ETHER_NEXT_MULTI(step, enm)) {
1697 error = ixl_add_macvlan(sc, enm->enm_addrlo, 0,
1698 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1699 if (error != 0)
1700 break;
1701 }
1702
1703 if (enm != NULL) {
1704 enm_last = enm;
1705 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1706 ETHER_NEXT_MULTI(step, enm)) {
1707 if (enm == enm_last)
1708 break;
1709
1710 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1711 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1712 }
1713 } else {
1714 CLR(ifp->if_flags, IFF_ALLMULTI);
1715 rv = ENETRESET;
1716 }
1717
1718 out:
1719 ETHER_UNLOCK(ec);
1720 return rv;
1721 }
1722
1723 static int
1724 ixl_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1725 {
1726 struct ifreq *ifr = (struct ifreq *)data;
1727 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
1728 struct ixl_tx_ring *txr;
1729 struct ixl_rx_ring *rxr;
1730 const struct sockaddr *sa;
1731 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1732 int s, error = 0;
1733 unsigned int i;
1734
1735 switch (cmd) {
1736 case SIOCADDMULTI:
1737 sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1738 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) {
1739 error = ether_multiaddr(sa, addrlo, addrhi);
1740 if (error != 0)
1741 return error;
1742
1743 error = ixl_add_multi(sc, addrlo, addrhi);
1744 if (error != 0 && error != ENETRESET) {
1745 ether_delmulti(sa, &sc->sc_ec);
1746 error = EIO;
1747 }
1748 }
1749 break;
1750
1751 case SIOCDELMULTI:
1752 sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1753 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) {
1754 error = ether_multiaddr(sa, addrlo, addrhi);
1755 if (error != 0)
1756 return error;
1757
1758 error = ixl_del_multi(sc, addrlo, addrhi);
1759 }
1760 break;
1761
1762 case SIOCGIFDATA:
1763 case SIOCZIFDATA:
1764 ifp->if_ipackets = 0;
1765 ifp->if_ibytes = 0;
1766 ifp->if_iqdrops = 0;
1767 ifp->if_ierrors = 0;
1768 ifp->if_opackets = 0;
1769 ifp->if_obytes = 0;
1770 ifp->if_omcasts = 0;
1771
1772 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
1773 txr = sc->sc_qps[i].qp_txr;
1774 rxr = sc->sc_qps[i].qp_rxr;
1775
1776 mutex_enter(&rxr->rxr_lock);
1777 ifp->if_ipackets += rxr->rxr_ipackets;
1778 ifp->if_ibytes += rxr->rxr_ibytes;
1779 ifp->if_iqdrops += rxr->rxr_iqdrops;
1780 ifp->if_ierrors += rxr->rxr_ierrors;
1781 if (cmd == SIOCZIFDATA) {
1782 rxr->rxr_ipackets = 0;
1783 rxr->rxr_ibytes = 0;
1784 rxr->rxr_iqdrops = 0;
1785 rxr->rxr_ierrors = 0;
1786 }
1787 mutex_exit(&rxr->rxr_lock);
1788
1789 mutex_enter(&txr->txr_lock);
1790 ifp->if_opackets += txr->txr_opackets;
1791 ifp->if_obytes += txr->txr_obytes;
1792 ifp->if_omcasts += txr->txr_omcasts;
1793 if (cmd == SIOCZIFDATA) {
1794 txr->txr_opackets = 0;
1795 txr->txr_obytes = 0;
1796 txr->txr_omcasts = 0;
1797 }
1798 mutex_exit(&txr->txr_lock);
1799 }
1800 /* FALLTHROUGH */
1801 default:
1802 s = splnet();
1803 error = ether_ioctl(ifp, cmd, data);
1804 splx(s);
1805 }
1806
1807 if (error == ENETRESET)
1808 error = ixl_iff(sc);
1809
1810 return error;
1811 }
1812
1813 static enum i40e_mac_type
1814 ixl_mactype(pci_product_id_t id)
1815 {
1816
1817 switch (id) {
1818 case PCI_PRODUCT_INTEL_XL710_SFP:
1819 case PCI_PRODUCT_INTEL_XL710_KX_B:
1820 case PCI_PRODUCT_INTEL_XL710_KX_C:
1821 case PCI_PRODUCT_INTEL_XL710_QSFP_A:
1822 case PCI_PRODUCT_INTEL_XL710_QSFP_B:
1823 case PCI_PRODUCT_INTEL_XL710_QSFP_C:
1824 case PCI_PRODUCT_INTEL_X710_10G_T:
1825 case PCI_PRODUCT_INTEL_XL710_20G_BP_1:
1826 case PCI_PRODUCT_INTEL_XL710_20G_BP_2:
1827 case PCI_PRODUCT_INTEL_X710_T4_10G:
1828 case PCI_PRODUCT_INTEL_XXV710_25G_BP:
1829 case PCI_PRODUCT_INTEL_XXV710_25G_SFP28:
1830 return I40E_MAC_XL710;
1831
1832 case PCI_PRODUCT_INTEL_X722_KX:
1833 case PCI_PRODUCT_INTEL_X722_QSFP:
1834 case PCI_PRODUCT_INTEL_X722_SFP:
1835 case PCI_PRODUCT_INTEL_X722_1G_BASET:
1836 case PCI_PRODUCT_INTEL_X722_10G_BASET:
1837 case PCI_PRODUCT_INTEL_X722_I_SFP:
1838 return I40E_MAC_X722;
1839 }
1840
1841 return I40E_MAC_GENERIC;
1842 }
1843
1844 static inline void *
1845 ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i)
1846 {
1847 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
1848 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1849
1850 if (i >= e->hmc_count)
1851 return NULL;
1852
1853 kva += e->hmc_base;
1854 kva += i * e->hmc_size;
1855
1856 return kva;
1857 }
1858
1859 static inline size_t
1860 ixl_hmc_len(struct ixl_softc *sc, enum ixl_hmc_types type)
1861 {
1862 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1863
1864 return e->hmc_size;
1865 }
1866
1867 static void
1868 ixl_enable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1869 {
1870 struct ixl_rx_ring *rxr = qp->qp_rxr;
1871
1872 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1873 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1874 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1875 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1876 ixl_flush(sc);
1877 }
1878
1879 static void
1880 ixl_disable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1881 {
1882 struct ixl_rx_ring *rxr = qp->qp_rxr;
1883
1884 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1885 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1886 ixl_flush(sc);
1887 }
1888
1889 static void
1890 ixl_enable_other_intr(struct ixl_softc *sc)
1891 {
1892
1893 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1894 I40E_PFINT_DYN_CTL0_INTENA_MASK |
1895 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1896 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1897 ixl_flush(sc);
1898 }
1899
1900 static void
1901 ixl_disable_other_intr(struct ixl_softc *sc)
1902 {
1903
1904 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1905 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1906 ixl_flush(sc);
1907 }
1908
1909 static int
1910 ixl_reinit(struct ixl_softc *sc)
1911 {
1912 struct ixl_rx_ring *rxr;
1913 struct ixl_tx_ring *txr;
1914 unsigned int i;
1915 uint32_t reg;
1916
1917 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1918
1919 if (ixl_get_vsi(sc) != 0)
1920 return EIO;
1921
1922 if (ixl_set_vsi(sc) != 0)
1923 return EIO;
1924
1925 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1926 txr = sc->sc_qps[i].qp_txr;
1927 rxr = sc->sc_qps[i].qp_rxr;
1928
1929 txr->txr_cons = txr->txr_prod = 0;
1930 rxr->rxr_cons = rxr->rxr_prod = 0;
1931
1932 ixl_txr_config(sc, txr);
1933 ixl_rxr_config(sc, rxr);
1934 }
1935
1936 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
1937 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_PREWRITE);
1938
1939 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1940 txr = sc->sc_qps[i].qp_txr;
1941 rxr = sc->sc_qps[i].qp_rxr;
1942
1943 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
1944 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
1945 ixl_flush(sc);
1946
1947 ixl_wr(sc, txr->txr_tail, txr->txr_prod);
1948 ixl_wr(sc, rxr->rxr_tail, rxr->rxr_prod);
1949
1950 /* ixl_rxfill() needs lock held */
1951 mutex_enter(&rxr->rxr_lock);
1952 ixl_rxfill(sc, rxr);
1953 mutex_exit(&rxr->rxr_lock);
1954
1955 reg = ixl_rd(sc, I40E_QRX_ENA(i));
1956 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
1957 ixl_wr(sc, I40E_QRX_ENA(i), reg);
1958 if (ixl_rxr_enabled(sc, rxr) != 0)
1959 goto stop;
1960
1961 ixl_txr_qdis(sc, txr, 1);
1962
1963 reg = ixl_rd(sc, I40E_QTX_ENA(i));
1964 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
1965 ixl_wr(sc, I40E_QTX_ENA(i), reg);
1966
1967 if (ixl_txr_enabled(sc, txr) != 0)
1968 goto stop;
1969 }
1970
1971 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
1972 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
1973
1974 return 0;
1975
1976 stop:
1977 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
1978 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
1979
1980 return ETIMEDOUT;
1981 }
1982
1983 static int
1984 ixl_init_locked(struct ixl_softc *sc)
1985 {
1986 struct ifnet *ifp = &sc->sc_ec.ec_if;
1987 unsigned int i;
1988 int error, eccap_change;
1989
1990 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1991
1992 if (ISSET(ifp->if_flags, IFF_RUNNING))
1993 ixl_stop_locked(sc);
1994
1995 if (sc->sc_dead) {
1996 return ENXIO;
1997 }
1998
1999 eccap_change = sc->sc_ec.ec_capenable ^ sc->sc_cur_ec_capenable;
2000 if (ISSET(eccap_change, ETHERCAP_VLAN_HWTAGGING))
2001 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
2002
2003 if (ISSET(eccap_change, ETHERCAP_VLAN_HWFILTER)) {
2004 if (ixl_update_macvlan(sc) == 0) {
2005 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
2006 } else {
2007 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
2008 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
2009 }
2010 }
2011
2012 if (sc->sc_intrtype != PCI_INTR_TYPE_MSIX)
2013 sc->sc_nqueue_pairs = 1;
2014 else
2015 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
2016
2017 error = ixl_reinit(sc);
2018 if (error) {
2019 ixl_stop_locked(sc);
2020 return error;
2021 }
2022
2023 SET(ifp->if_flags, IFF_RUNNING);
2024 CLR(ifp->if_flags, IFF_OACTIVE);
2025
2026 (void)ixl_get_link_status(sc);
2027
2028 ixl_config_rss(sc);
2029 ixl_config_queue_intr(sc);
2030
2031 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2032 ixl_enable_queue_intr(sc, &sc->sc_qps[i]);
2033 }
2034
2035 error = ixl_iff(sc);
2036 if (error) {
2037 ixl_stop_locked(sc);
2038 return error;
2039 }
2040
2041 return 0;
2042 }
2043
2044 static int
2045 ixl_init(struct ifnet *ifp)
2046 {
2047 struct ixl_softc *sc = ifp->if_softc;
2048 int error;
2049
2050 mutex_enter(&sc->sc_cfg_lock);
2051 error = ixl_init_locked(sc);
2052 mutex_exit(&sc->sc_cfg_lock);
2053
2054 return error;
2055 }
2056
2057 static int
2058 ixl_iff(struct ixl_softc *sc)
2059 {
2060 struct ifnet *ifp = &sc->sc_ec.ec_if;
2061 struct ixl_atq iatq;
2062 struct ixl_aq_desc *iaq;
2063 struct ixl_aq_vsi_promisc_param *param;
2064 uint16_t flag_add, flag_del;
2065 int error;
2066
2067 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2068 return 0;
2069
2070 memset(&iatq, 0, sizeof(iatq));
2071
2072 iaq = &iatq.iatq_desc;
2073 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
2074
2075 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
2076 param->flags = htole16(0);
2077
2078 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)
2079 || ISSET(ifp->if_flags, IFF_PROMISC)) {
2080 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2081 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2082 }
2083
2084 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
2085 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2086 IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2087 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
2088 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2089 }
2090 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2091 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2092 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2093 param->seid = sc->sc_seid;
2094
2095 error = ixl_atq_exec(sc, &iatq);
2096 if (error)
2097 return error;
2098
2099 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
2100 return EIO;
2101
2102 if (memcmp(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN) != 0) {
2103 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
2104 flag_add = IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH;
2105 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH;
2106 } else {
2107 flag_add = IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN;
2108 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN;
2109 }
2110
2111 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, flag_del);
2112
2113 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
2114 ixl_add_macvlan(sc, sc->sc_enaddr, 0, flag_add);
2115 }
2116 return 0;
2117 }
2118
2119 static void
2120 ixl_stop_rendezvous(struct ixl_softc *sc)
2121 {
2122 struct ixl_tx_ring *txr;
2123 struct ixl_rx_ring *rxr;
2124 unsigned int i;
2125
2126 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2127 txr = sc->sc_qps[i].qp_txr;
2128 rxr = sc->sc_qps[i].qp_rxr;
2129
2130 mutex_enter(&txr->txr_lock);
2131 mutex_exit(&txr->txr_lock);
2132
2133 mutex_enter(&rxr->rxr_lock);
2134 mutex_exit(&rxr->rxr_lock);
2135
2136 ixl_work_wait(sc->sc_workq_txrx,
2137 &sc->sc_qps[i].qp_task);
2138 }
2139 }
2140
2141 static void
2142 ixl_stop_locked(struct ixl_softc *sc)
2143 {
2144 struct ifnet *ifp = &sc->sc_ec.ec_if;
2145 struct ixl_rx_ring *rxr;
2146 struct ixl_tx_ring *txr;
2147 unsigned int i;
2148 uint32_t reg;
2149
2150 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2151
2152 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
2153
2154 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2155 txr = sc->sc_qps[i].qp_txr;
2156 rxr = sc->sc_qps[i].qp_rxr;
2157
2158 ixl_disable_queue_intr(sc, &sc->sc_qps[i]);
2159
2160 mutex_enter(&txr->txr_lock);
2161 ixl_txr_qdis(sc, txr, 0);
2162 /* XXX wait at least 400 usec for all tx queues in one go */
2163 ixl_flush(sc);
2164 DELAY(500);
2165
2166 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2167 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2168 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2169 /* XXX wait 50ms from completaion of the TX queue disable*/
2170 ixl_flush(sc);
2171 DELAY(50);
2172
2173 if (ixl_txr_disabled(sc, txr) != 0) {
2174 mutex_exit(&txr->txr_lock);
2175 goto die;
2176 }
2177 mutex_exit(&txr->txr_lock);
2178
2179 mutex_enter(&rxr->rxr_lock);
2180 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2181 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2182 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2183 /* XXX wait 50ms from completion of the RX queue disable */
2184 ixl_flush(sc);
2185 DELAY(50);
2186
2187 if (ixl_rxr_disabled(sc, rxr) != 0) {
2188 mutex_exit(&rxr->rxr_lock);
2189 goto die;
2190 }
2191 mutex_exit(&rxr->rxr_lock);
2192 }
2193
2194 ixl_stop_rendezvous(sc);
2195
2196 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2197 txr = sc->sc_qps[i].qp_txr;
2198 rxr = sc->sc_qps[i].qp_rxr;
2199
2200 ixl_txr_unconfig(sc, txr);
2201 ixl_rxr_unconfig(sc, rxr);
2202
2203 ixl_txr_clean(sc, txr);
2204 ixl_rxr_clean(sc, rxr);
2205 }
2206
2207 return;
2208 die:
2209 sc->sc_dead = true;
2210 log(LOG_CRIT, "%s: failed to shut down rings",
2211 device_xname(sc->sc_dev));
2212 return;
2213 }
2214
2215 static void
2216 ixl_stop(struct ifnet *ifp, int disable)
2217 {
2218 struct ixl_softc *sc = ifp->if_softc;
2219
2220 mutex_enter(&sc->sc_cfg_lock);
2221 ixl_stop_locked(sc);
2222 mutex_exit(&sc->sc_cfg_lock);
2223 }
2224
2225 static int
2226 ixl_queue_pairs_alloc(struct ixl_softc *sc)
2227 {
2228 struct ixl_queue_pair *qp;
2229 unsigned int i;
2230 size_t sz;
2231
2232 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2233 sc->sc_qps = kmem_zalloc(sz, KM_SLEEP);
2234
2235 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2236 qp = &sc->sc_qps[i];
2237
2238 qp->qp_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2239 ixl_handle_queue, qp);
2240 if (qp->qp_si == NULL)
2241 goto free;
2242
2243 qp->qp_txr = ixl_txr_alloc(sc, i);
2244 if (qp->qp_txr == NULL)
2245 goto free;
2246
2247 qp->qp_rxr = ixl_rxr_alloc(sc, i);
2248 if (qp->qp_rxr == NULL)
2249 goto free;
2250
2251 qp->qp_sc = sc;
2252 ixl_work_set(&qp->qp_task, ixl_handle_queue, qp);
2253 snprintf(qp->qp_name, sizeof(qp->qp_name),
2254 "%s-TXRX%d", device_xname(sc->sc_dev), i);
2255 }
2256
2257 return 0;
2258 free:
2259 if (sc->sc_qps != NULL) {
2260 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2261 qp = &sc->sc_qps[i];
2262
2263 if (qp->qp_txr != NULL)
2264 ixl_txr_free(sc, qp->qp_txr);
2265 if (qp->qp_rxr != NULL)
2266 ixl_rxr_free(sc, qp->qp_rxr);
2267 if (qp->qp_si != NULL)
2268 softint_disestablish(qp->qp_si);
2269 }
2270
2271 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2272 kmem_free(sc->sc_qps, sz);
2273 sc->sc_qps = NULL;
2274 }
2275
2276 return -1;
2277 }
2278
2279 static void
2280 ixl_queue_pairs_free(struct ixl_softc *sc)
2281 {
2282 struct ixl_queue_pair *qp;
2283 unsigned int i;
2284 size_t sz;
2285
2286 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2287 qp = &sc->sc_qps[i];
2288 ixl_txr_free(sc, qp->qp_txr);
2289 ixl_rxr_free(sc, qp->qp_rxr);
2290 softint_disestablish(qp->qp_si);
2291 }
2292
2293 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2294 kmem_free(sc->sc_qps, sz);
2295 sc->sc_qps = NULL;
2296 }
2297
2298 static struct ixl_tx_ring *
2299 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
2300 {
2301 struct ixl_tx_ring *txr = NULL;
2302 struct ixl_tx_map *maps = NULL, *txm;
2303 unsigned int i;
2304
2305 txr = kmem_zalloc(sizeof(*txr), KM_SLEEP);
2306 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs,
2307 KM_SLEEP);
2308
2309 if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2310 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2311 IXL_TX_QUEUE_ALIGN) != 0)
2312 goto free;
2313
2314 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2315 txm = &maps[i];
2316
2317 if (bus_dmamap_create(sc->sc_dmat,
2318 IXL_HARDMTU, IXL_TX_PKT_DESCS, IXL_HARDMTU, 0,
2319 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &txm->txm_map) != 0)
2320 goto uncreate;
2321
2322 txm->txm_eop = -1;
2323 txm->txm_m = NULL;
2324 }
2325
2326 txr->txr_cons = txr->txr_prod = 0;
2327 txr->txr_maps = maps;
2328
2329 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2330 if (txr->txr_intrq == NULL)
2331 goto uncreate;
2332
2333 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2334 ixl_deferred_transmit, txr);
2335 if (txr->txr_si == NULL)
2336 goto destroy_pcq;
2337
2338 txr->txr_tail = I40E_QTX_TAIL(qid);
2339 txr->txr_qid = qid;
2340 txr->txr_sc = sc;
2341 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2342
2343 return txr;
2344
2345 destroy_pcq:
2346 pcq_destroy(txr->txr_intrq);
2347 uncreate:
2348 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2349 txm = &maps[i];
2350
2351 if (txm->txm_map == NULL)
2352 continue;
2353
2354 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2355 }
2356
2357 ixl_dmamem_free(sc, &txr->txr_mem);
2358 free:
2359 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2360 kmem_free(txr, sizeof(*txr));
2361
2362 return NULL;
2363 }
2364
2365 static void
2366 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2367 {
2368 unsigned int qid;
2369 bus_size_t reg;
2370 uint32_t r;
2371
2372 qid = txr->txr_qid + sc->sc_base_queue;
2373 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2374 qid %= 128;
2375
2376 r = ixl_rd(sc, reg);
2377 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2378 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2379 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2380 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2381 ixl_wr(sc, reg, r);
2382 }
2383
2384 static void
2385 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2386 {
2387 struct ixl_hmc_txq txq;
2388 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch);
2389 void *hmc;
2390
2391 memset(&txq, 0, sizeof(txq));
2392 txq.head = htole16(txr->txr_cons);
2393 txq.new_context = 1;
2394 txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2395 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2396 txq.qlen = htole16(sc->sc_tx_ring_ndescs);
2397 txq.tphrdesc_ena = 0;
2398 txq.tphrpacket_ena = 0;
2399 txq.tphwdesc_ena = 0;
2400 txq.rdylist = data->qs_handle[0];
2401
2402 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2403 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2404 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq,
2405 __arraycount(ixl_hmc_pack_txq));
2406 }
2407
2408 static void
2409 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2410 {
2411 void *hmc;
2412
2413 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2414 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2415 }
2416
2417 static void
2418 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2419 {
2420 struct ixl_tx_map *maps, *txm;
2421 bus_dmamap_t map;
2422 unsigned int i;
2423
2424 maps = txr->txr_maps;
2425 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2426 txm = &maps[i];
2427
2428 if (txm->txm_m == NULL)
2429 continue;
2430
2431 map = txm->txm_map;
2432 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2433 BUS_DMASYNC_POSTWRITE);
2434 bus_dmamap_unload(sc->sc_dmat, map);
2435
2436 m_freem(txm->txm_m);
2437 txm->txm_m = NULL;
2438 }
2439 }
2440
2441 static int
2442 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2443 {
2444 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2445 uint32_t reg;
2446 int i;
2447
2448 for (i = 0; i < 10; i++) {
2449 reg = ixl_rd(sc, ena);
2450 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2451 return 0;
2452
2453 delaymsec(10);
2454 }
2455
2456 return ETIMEDOUT;
2457 }
2458
2459 static int
2460 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2461 {
2462 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2463 uint32_t reg;
2464 int i;
2465
2466 KASSERT(mutex_owned(&txr->txr_lock));
2467
2468 for (i = 0; i < 20; i++) {
2469 reg = ixl_rd(sc, ena);
2470 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2471 return 0;
2472
2473 delaymsec(10);
2474 }
2475
2476 return ETIMEDOUT;
2477 }
2478
2479 static void
2480 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2481 {
2482 struct ixl_tx_map *maps, *txm;
2483 struct mbuf *m;
2484 unsigned int i;
2485
2486 softint_disestablish(txr->txr_si);
2487 while ((m = pcq_get(txr->txr_intrq)) != NULL)
2488 m_freem(m);
2489 pcq_destroy(txr->txr_intrq);
2490
2491 maps = txr->txr_maps;
2492 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2493 txm = &maps[i];
2494
2495 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2496 }
2497
2498 ixl_dmamem_free(sc, &txr->txr_mem);
2499 mutex_destroy(&txr->txr_lock);
2500 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2501 kmem_free(txr, sizeof(*txr));
2502 }
2503
2504 static inline int
2505 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0,
2506 struct ixl_tx_ring *txr)
2507 {
2508 struct mbuf *m;
2509 int error;
2510
2511 KASSERT(mutex_owned(&txr->txr_lock));
2512
2513 m = *m0;
2514
2515 error = bus_dmamap_load_mbuf(dmat, map, m,
2516 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2517 if (error != EFBIG)
2518 return error;
2519
2520 m = m_defrag(m, M_DONTWAIT);
2521 if (m != NULL) {
2522 *m0 = m;
2523 txr->txr_defragged.ev_count++;
2524
2525 error = bus_dmamap_load_mbuf(dmat, map, m,
2526 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2527 } else {
2528 txr->txr_defrag_failed.ev_count++;
2529 error = ENOBUFS;
2530 }
2531
2532 return error;
2533 }
2534
2535 static void
2536 ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr,
2537 bool is_transmit)
2538 {
2539 struct ixl_softc *sc = ifp->if_softc;
2540 struct ixl_tx_desc *ring, *txd;
2541 struct ixl_tx_map *txm;
2542 bus_dmamap_t map;
2543 struct mbuf *m;
2544 uint64_t cmd, cmd_vlan;
2545 unsigned int prod, free, last, i;
2546 unsigned int mask;
2547 int post = 0;
2548
2549 KASSERT(mutex_owned(&txr->txr_lock));
2550
2551 if (ifp->if_link_state != LINK_STATE_UP
2552 || !ISSET(ifp->if_flags, IFF_RUNNING)
2553 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) {
2554 if (!is_transmit)
2555 IFQ_PURGE(&ifp->if_snd);
2556 return;
2557 }
2558
2559 prod = txr->txr_prod;
2560 free = txr->txr_cons;
2561 if (free <= prod)
2562 free += sc->sc_tx_ring_ndescs;
2563 free -= prod;
2564
2565 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2566 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2567
2568 ring = IXL_DMA_KVA(&txr->txr_mem);
2569 mask = sc->sc_tx_ring_ndescs - 1;
2570 last = prod;
2571 cmd = 0;
2572 txd = NULL;
2573
2574 for (;;) {
2575 if (free <= IXL_TX_PKT_DESCS) {
2576 if (!is_transmit)
2577 SET(ifp->if_flags, IFF_OACTIVE);
2578 break;
2579 }
2580
2581 if (is_transmit)
2582 m = pcq_get(txr->txr_intrq);
2583 else
2584 IFQ_DEQUEUE(&ifp->if_snd, m);
2585
2586 if (m == NULL)
2587 break;
2588
2589 txm = &txr->txr_maps[prod];
2590 map = txm->txm_map;
2591
2592 if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
2593 txr->txr_oerrors++;
2594 m_freem(m);
2595 continue;
2596 }
2597
2598 if (vlan_has_tag(m)) {
2599 cmd_vlan = (uint64_t)vlan_get_tag(m) <<
2600 IXL_TX_DESC_L2TAG1_SHIFT;
2601 cmd_vlan |= IXL_TX_DESC_CMD_IL2TAG1;
2602 } else {
2603 cmd_vlan = 0;
2604 }
2605
2606 bus_dmamap_sync(sc->sc_dmat, map, 0,
2607 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2608
2609 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) {
2610 txd = &ring[prod];
2611
2612 cmd = (uint64_t)map->dm_segs[i].ds_len <<
2613 IXL_TX_DESC_BSIZE_SHIFT;
2614 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2615 cmd |= cmd_vlan;
2616
2617 txd->addr = htole64(map->dm_segs[i].ds_addr);
2618 txd->cmd = htole64(cmd);
2619
2620 last = prod;
2621
2622 prod++;
2623 prod &= mask;
2624 }
2625 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2626 txd->cmd = htole64(cmd);
2627
2628 txm->txm_m = m;
2629 txm->txm_eop = last;
2630
2631 bpf_mtap(ifp, m, BPF_D_OUT);
2632
2633 free -= i;
2634 post = 1;
2635 }
2636
2637 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2638 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2639
2640 if (post) {
2641 txr->txr_prod = prod;
2642 ixl_wr(sc, txr->txr_tail, prod);
2643 }
2644 }
2645
2646 static int
2647 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit)
2648 {
2649 struct ifnet *ifp = &sc->sc_ec.ec_if;
2650 struct ixl_tx_desc *ring, *txd;
2651 struct ixl_tx_map *txm;
2652 struct mbuf *m;
2653 bus_dmamap_t map;
2654 unsigned int cons, prod, last;
2655 unsigned int mask;
2656 uint64_t dtype;
2657 int done = 0, more = 0;
2658
2659 KASSERT(mutex_owned(&txr->txr_lock));
2660
2661 prod = txr->txr_prod;
2662 cons = txr->txr_cons;
2663
2664 if (cons == prod)
2665 return 0;
2666
2667 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2668 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2669
2670 ring = IXL_DMA_KVA(&txr->txr_mem);
2671 mask = sc->sc_tx_ring_ndescs - 1;
2672
2673 do {
2674 if (txlimit-- <= 0) {
2675 more = 1;
2676 break;
2677 }
2678
2679 txm = &txr->txr_maps[cons];
2680 last = txm->txm_eop;
2681 txd = &ring[last];
2682
2683 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2684 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2685 break;
2686
2687 map = txm->txm_map;
2688
2689 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2690 BUS_DMASYNC_POSTWRITE);
2691 bus_dmamap_unload(sc->sc_dmat, map);
2692
2693 m = txm->txm_m;
2694 if (m != NULL) {
2695 txr->txr_opackets++;
2696 txr->txr_obytes += m->m_pkthdr.len;
2697 if (ISSET(m->m_flags, M_MCAST))
2698 txr->txr_omcasts++;
2699 m_freem(m);
2700 }
2701
2702 txm->txm_m = NULL;
2703 txm->txm_eop = -1;
2704
2705 cons = last + 1;
2706 cons &= mask;
2707 done = 1;
2708 } while (cons != prod);
2709
2710 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2711 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2712
2713 txr->txr_cons = cons;
2714
2715 if (done) {
2716 softint_schedule(txr->txr_si);
2717 if (txr->txr_qid == 0) {
2718 CLR(ifp->if_flags, IFF_OACTIVE);
2719 if_schedule_deferred_start(ifp);
2720 }
2721 }
2722
2723 return more;
2724 }
2725
2726 static void
2727 ixl_start(struct ifnet *ifp)
2728 {
2729 struct ixl_softc *sc;
2730 struct ixl_tx_ring *txr;
2731
2732 sc = ifp->if_softc;
2733 txr = sc->sc_qps[0].qp_txr;
2734
2735 mutex_enter(&txr->txr_lock);
2736 ixl_tx_common_locked(ifp, txr, false);
2737 mutex_exit(&txr->txr_lock);
2738 }
2739
2740 static inline unsigned int
2741 ixl_select_txqueue(struct ixl_softc *sc, struct mbuf *m)
2742 {
2743 u_int cpuid;
2744
2745 cpuid = cpu_index(curcpu());
2746
2747 return (unsigned int)(cpuid % sc->sc_nqueue_pairs);
2748 }
2749
2750 static int
2751 ixl_transmit(struct ifnet *ifp, struct mbuf *m)
2752 {
2753 struct ixl_softc *sc;
2754 struct ixl_tx_ring *txr;
2755 unsigned int qid;
2756
2757 sc = ifp->if_softc;
2758 qid = ixl_select_txqueue(sc, m);
2759
2760 txr = sc->sc_qps[qid].qp_txr;
2761
2762 if (__predict_false(!pcq_put(txr->txr_intrq, m))) {
2763 mutex_enter(&txr->txr_lock);
2764 txr->txr_pcqdrop.ev_count++;
2765 mutex_exit(&txr->txr_lock);
2766
2767 m_freem(m);
2768 return ENOBUFS;
2769 }
2770
2771 if (mutex_tryenter(&txr->txr_lock)) {
2772 ixl_tx_common_locked(ifp, txr, true);
2773 mutex_exit(&txr->txr_lock);
2774 } else {
2775 softint_schedule(txr->txr_si);
2776 }
2777
2778 return 0;
2779 }
2780
2781 static void
2782 ixl_deferred_transmit(void *xtxr)
2783 {
2784 struct ixl_tx_ring *txr = xtxr;
2785 struct ixl_softc *sc = txr->txr_sc;
2786 struct ifnet *ifp = &sc->sc_ec.ec_if;
2787
2788 mutex_enter(&txr->txr_lock);
2789 txr->txr_transmitdef.ev_count++;
2790 if (pcq_peek(txr->txr_intrq) != NULL)
2791 ixl_tx_common_locked(ifp, txr, true);
2792 mutex_exit(&txr->txr_lock);
2793 }
2794
2795 static struct ixl_rx_ring *
2796 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
2797 {
2798 struct ixl_rx_ring *rxr = NULL;
2799 struct ixl_rx_map *maps = NULL, *rxm;
2800 unsigned int i;
2801
2802 rxr = kmem_zalloc(sizeof(*rxr), KM_SLEEP);
2803 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs,
2804 KM_SLEEP);
2805
2806 if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
2807 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
2808 IXL_RX_QUEUE_ALIGN) != 0)
2809 goto free;
2810
2811 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2812 rxm = &maps[i];
2813
2814 if (bus_dmamap_create(sc->sc_dmat,
2815 IXL_HARDMTU, 1, IXL_HARDMTU, 0,
2816 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &rxm->rxm_map) != 0)
2817 goto uncreate;
2818
2819 rxm->rxm_m = NULL;
2820 }
2821
2822 rxr->rxr_cons = rxr->rxr_prod = 0;
2823 rxr->rxr_m_head = NULL;
2824 rxr->rxr_m_tail = &rxr->rxr_m_head;
2825 rxr->rxr_maps = maps;
2826
2827 rxr->rxr_tail = I40E_QRX_TAIL(qid);
2828 rxr->rxr_qid = qid;
2829 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
2830
2831 return rxr;
2832
2833 uncreate:
2834 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2835 rxm = &maps[i];
2836
2837 if (rxm->rxm_map == NULL)
2838 continue;
2839
2840 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
2841 }
2842
2843 ixl_dmamem_free(sc, &rxr->rxr_mem);
2844 free:
2845 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2846 kmem_free(rxr, sizeof(*rxr));
2847
2848 return NULL;
2849 }
2850
2851 static void
2852 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2853 {
2854 struct ixl_rx_map *maps, *rxm;
2855 bus_dmamap_t map;
2856 unsigned int i;
2857
2858 maps = rxr->rxr_maps;
2859 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2860 rxm = &maps[i];
2861
2862 if (rxm->rxm_m == NULL)
2863 continue;
2864
2865 map = rxm->rxm_map;
2866 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2867 BUS_DMASYNC_POSTWRITE);
2868 bus_dmamap_unload(sc->sc_dmat, map);
2869
2870 m_freem(rxm->rxm_m);
2871 rxm->rxm_m = NULL;
2872 }
2873
2874 m_freem(rxr->rxr_m_head);
2875 rxr->rxr_m_head = NULL;
2876 rxr->rxr_m_tail = &rxr->rxr_m_head;
2877
2878 rxr->rxr_prod = rxr->rxr_cons = 0;
2879 }
2880
2881 static int
2882 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2883 {
2884 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
2885 uint32_t reg;
2886 int i;
2887
2888 for (i = 0; i < 10; i++) {
2889 reg = ixl_rd(sc, ena);
2890 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
2891 return 0;
2892
2893 delaymsec(10);
2894 }
2895
2896 return ETIMEDOUT;
2897 }
2898
2899 static int
2900 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2901 {
2902 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
2903 uint32_t reg;
2904 int i;
2905
2906 KASSERT(mutex_owned(&rxr->rxr_lock));
2907
2908 for (i = 0; i < 20; i++) {
2909 reg = ixl_rd(sc, ena);
2910 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
2911 return 0;
2912
2913 delaymsec(10);
2914 }
2915
2916 return ETIMEDOUT;
2917 }
2918
2919 static void
2920 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2921 {
2922 struct ixl_hmc_rxq rxq;
2923 void *hmc;
2924
2925 memset(&rxq, 0, sizeof(rxq));
2926
2927 rxq.head = htole16(rxr->rxr_cons);
2928 rxq.base = htole64(IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
2929 rxq.qlen = htole16(sc->sc_rx_ring_ndescs);
2930 rxq.dbuff = htole16(MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
2931 rxq.hbuff = 0;
2932 rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
2933 rxq.dsize = IXL_HMC_RXQ_DSIZE_32;
2934 rxq.crcstrip = 1;
2935 rxq.l2sel = 1;
2936 rxq.showiv = 1;
2937 rxq.rxmax = htole16(IXL_HARDMTU);
2938 rxq.tphrdesc_ena = 0;
2939 rxq.tphwdesc_ena = 0;
2940 rxq.tphdata_ena = 0;
2941 rxq.tphhead_ena = 0;
2942 rxq.lrxqthresh = 0;
2943 rxq.prefena = 1;
2944
2945 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
2946 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
2947 ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq,
2948 __arraycount(ixl_hmc_pack_rxq));
2949 }
2950
2951 static void
2952 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2953 {
2954 void *hmc;
2955
2956 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
2957 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
2958 }
2959
2960 static void
2961 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2962 {
2963 struct ixl_rx_map *maps, *rxm;
2964 unsigned int i;
2965
2966 maps = rxr->rxr_maps;
2967 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2968 rxm = &maps[i];
2969
2970 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
2971 }
2972
2973 ixl_dmamem_free(sc, &rxr->rxr_mem);
2974 mutex_destroy(&rxr->rxr_lock);
2975 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2976 kmem_free(rxr, sizeof(*rxr));
2977 }
2978
2979 static inline void
2980 ixl_rx_csum(struct mbuf *m, uint64_t qword)
2981 {
2982 int flags_mask;
2983
2984 if (!ISSET(qword, IXL_RX_DESC_L3L4P)) {
2985 /* No L3 or L4 checksum was calculated */
2986 return;
2987 }
2988
2989 switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) {
2990 case IXL_RX_DESC_PTYPE_IPV4FRAG:
2991 case IXL_RX_DESC_PTYPE_IPV4:
2992 case IXL_RX_DESC_PTYPE_SCTPV4:
2993 case IXL_RX_DESC_PTYPE_ICMPV4:
2994 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
2995 break;
2996 case IXL_RX_DESC_PTYPE_TCPV4:
2997 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
2998 flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD;
2999 break;
3000 case IXL_RX_DESC_PTYPE_UDPV4:
3001 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3002 flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD;
3003 break;
3004 case IXL_RX_DESC_PTYPE_TCPV6:
3005 flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD;
3006 break;
3007 case IXL_RX_DESC_PTYPE_UDPV6:
3008 flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD;
3009 break;
3010 default:
3011 flags_mask = 0;
3012 }
3013
3014 m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 |
3015 M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6));
3016
3017 if (ISSET(qword, IXL_RX_DESC_IPE)) {
3018 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD);
3019 }
3020
3021 if (ISSET(qword, IXL_RX_DESC_L4E)) {
3022 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD);
3023 }
3024 }
3025
3026 static int
3027 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr, u_int rxlimit)
3028 {
3029 struct ifnet *ifp = &sc->sc_ec.ec_if;
3030 struct ixl_rx_wb_desc_32 *ring, *rxd;
3031 struct ixl_rx_map *rxm;
3032 bus_dmamap_t map;
3033 unsigned int cons, prod;
3034 struct mbuf *m;
3035 uint64_t word, word0;
3036 unsigned int len;
3037 unsigned int mask;
3038 int done = 0, more = 0;
3039
3040 KASSERT(mutex_owned(&rxr->rxr_lock));
3041
3042 if (!ISSET(ifp->if_flags, IFF_RUNNING))
3043 return 0;
3044
3045 prod = rxr->rxr_prod;
3046 cons = rxr->rxr_cons;
3047
3048 if (cons == prod)
3049 return 0;
3050
3051 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3052 0, IXL_DMA_LEN(&rxr->rxr_mem),
3053 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3054
3055 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3056 mask = sc->sc_rx_ring_ndescs - 1;
3057
3058 do {
3059 if (rxlimit-- <= 0) {
3060 more = 1;
3061 break;
3062 }
3063
3064 rxd = &ring[cons];
3065
3066 word = le64toh(rxd->qword1);
3067
3068 if (!ISSET(word, IXL_RX_DESC_DD))
3069 break;
3070
3071 rxm = &rxr->rxr_maps[cons];
3072
3073 map = rxm->rxm_map;
3074 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3075 BUS_DMASYNC_POSTREAD);
3076 bus_dmamap_unload(sc->sc_dmat, map);
3077
3078 m = rxm->rxm_m;
3079 rxm->rxm_m = NULL;
3080
3081 KASSERT(m != NULL);
3082
3083 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
3084 m->m_len = len;
3085 m->m_pkthdr.len = 0;
3086
3087 m->m_next = NULL;
3088 *rxr->rxr_m_tail = m;
3089 rxr->rxr_m_tail = &m->m_next;
3090
3091 m = rxr->rxr_m_head;
3092 m->m_pkthdr.len += len;
3093
3094 if (ISSET(word, IXL_RX_DESC_EOP)) {
3095 word0 = le64toh(rxd->qword0);
3096
3097 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
3098 vlan_set_tag(m,
3099 __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK));
3100 }
3101
3102 if ((ifp->if_capenable & IXL_IFCAP_RXCSUM) != 0)
3103 ixl_rx_csum(m, word);
3104
3105 if (!ISSET(word,
3106 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
3107 m_set_rcvif(m, ifp);
3108 rxr->rxr_ipackets++;
3109 rxr->rxr_ibytes += m->m_pkthdr.len;
3110 if_percpuq_enqueue(ifp->if_percpuq, m);
3111 } else {
3112 rxr->rxr_ierrors++;
3113 m_freem(m);
3114 }
3115
3116 rxr->rxr_m_head = NULL;
3117 rxr->rxr_m_tail = &rxr->rxr_m_head;
3118 }
3119
3120 cons++;
3121 cons &= mask;
3122
3123 done = 1;
3124 } while (cons != prod);
3125
3126 if (done) {
3127 rxr->rxr_cons = cons;
3128 if (ixl_rxfill(sc, rxr) == -1)
3129 rxr->rxr_iqdrops++;
3130 }
3131
3132 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3133 0, IXL_DMA_LEN(&rxr->rxr_mem),
3134 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3135
3136 return more;
3137 }
3138
3139 static int
3140 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3141 {
3142 struct ixl_rx_rd_desc_32 *ring, *rxd;
3143 struct ixl_rx_map *rxm;
3144 bus_dmamap_t map;
3145 struct mbuf *m;
3146 unsigned int prod;
3147 unsigned int slots;
3148 unsigned int mask;
3149 int post = 0, error = 0;
3150
3151 KASSERT(mutex_owned(&rxr->rxr_lock));
3152
3153 prod = rxr->rxr_prod;
3154 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons,
3155 sc->sc_rx_ring_ndescs);
3156
3157 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3158 mask = sc->sc_rx_ring_ndescs - 1;
3159
3160 if (__predict_false(slots <= 0))
3161 return -1;
3162
3163 do {
3164 rxm = &rxr->rxr_maps[prod];
3165
3166 MGETHDR(m, M_DONTWAIT, MT_DATA);
3167 if (m == NULL) {
3168 rxr->rxr_mgethdr_failed.ev_count++;
3169 error = -1;
3170 break;
3171 }
3172
3173 MCLGET(m, M_DONTWAIT);
3174 if (!ISSET(m->m_flags, M_EXT)) {
3175 rxr->rxr_mgetcl_failed.ev_count++;
3176 error = -1;
3177 m_freem(m);
3178 break;
3179 }
3180
3181 m->m_len = m->m_pkthdr.len = MCLBYTES + ETHER_ALIGN;
3182 m_adj(m, ETHER_ALIGN);
3183
3184 map = rxm->rxm_map;
3185
3186 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
3187 BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
3188 rxr->rxr_mbuf_load_failed.ev_count++;
3189 error = -1;
3190 m_freem(m);
3191 break;
3192 }
3193
3194 rxm->rxm_m = m;
3195
3196 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3197 BUS_DMASYNC_PREREAD);
3198
3199 rxd = &ring[prod];
3200
3201 rxd->paddr = htole64(map->dm_segs[0].ds_addr);
3202 rxd->haddr = htole64(0);
3203
3204 prod++;
3205 prod &= mask;
3206
3207 post = 1;
3208
3209 } while (--slots);
3210
3211 if (post) {
3212 rxr->rxr_prod = prod;
3213 ixl_wr(sc, rxr->rxr_tail, prod);
3214 }
3215
3216 return error;
3217 }
3218
3219 static inline int
3220 ixl_handle_queue_common(struct ixl_softc *sc, struct ixl_queue_pair *qp,
3221 u_int txlimit, struct evcnt *txevcnt,
3222 u_int rxlimit, struct evcnt *rxevcnt)
3223 {
3224 struct ixl_tx_ring *txr = qp->qp_txr;
3225 struct ixl_rx_ring *rxr = qp->qp_rxr;
3226 int txmore, rxmore;
3227 int rv;
3228
3229 KASSERT(!mutex_owned(&txr->txr_lock));
3230 KASSERT(!mutex_owned(&rxr->rxr_lock));
3231
3232 mutex_enter(&txr->txr_lock);
3233 txevcnt->ev_count++;
3234 txmore = ixl_txeof(sc, txr, txlimit);
3235 mutex_exit(&txr->txr_lock);
3236
3237 mutex_enter(&rxr->rxr_lock);
3238 rxevcnt->ev_count++;
3239 rxmore = ixl_rxeof(sc, rxr, rxlimit);
3240 mutex_exit(&rxr->rxr_lock);
3241
3242 rv = txmore | (rxmore << 1);
3243
3244 return rv;
3245 }
3246
3247 static void
3248 ixl_sched_handle_queue(struct ixl_softc *sc, struct ixl_queue_pair *qp)
3249 {
3250
3251 if (qp->qp_workqueue)
3252 ixl_work_add(sc->sc_workq_txrx, &qp->qp_task);
3253 else
3254 softint_schedule(qp->qp_si);
3255 }
3256
3257 static int
3258 ixl_intr(void *xsc)
3259 {
3260 struct ixl_softc *sc = xsc;
3261 struct ixl_tx_ring *txr;
3262 struct ixl_rx_ring *rxr;
3263 uint32_t icr, rxintr, txintr;
3264 int rv = 0;
3265 unsigned int i;
3266
3267 KASSERT(sc != NULL);
3268
3269 ixl_enable_other_intr(sc);
3270 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3271
3272 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3273 atomic_inc_64(&sc->sc_event_atq.ev_count);
3274 ixl_atq_done(sc);
3275 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3276 rv = 1;
3277 }
3278
3279 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3280 atomic_inc_64(&sc->sc_event_link.ev_count);
3281 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3282 rv = 1;
3283 }
3284
3285 rxintr = icr & I40E_INTR_NOTX_RX_MASK;
3286 txintr = icr & I40E_INTR_NOTX_TX_MASK;
3287
3288 if (txintr || rxintr) {
3289 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3290 txr = sc->sc_qps[i].qp_txr;
3291 rxr = sc->sc_qps[i].qp_rxr;
3292
3293 ixl_handle_queue_common(sc, &sc->sc_qps[i],
3294 IXL_TXRX_PROCESS_UNLIMIT, &txr->txr_intr,
3295 IXL_TXRX_PROCESS_UNLIMIT, &rxr->rxr_intr);
3296 }
3297 rv = 1;
3298 }
3299
3300 return rv;
3301 }
3302
3303 static int
3304 ixl_queue_intr(void *xqp)
3305 {
3306 struct ixl_queue_pair *qp = xqp;
3307 struct ixl_tx_ring *txr = qp->qp_txr;
3308 struct ixl_rx_ring *rxr = qp->qp_rxr;
3309 struct ixl_softc *sc = qp->qp_sc;
3310 u_int txlimit, rxlimit;
3311 int more;
3312
3313 txlimit = sc->sc_tx_intr_process_limit;
3314 rxlimit = sc->sc_rx_intr_process_limit;
3315 qp->qp_workqueue = sc->sc_txrx_workqueue;
3316
3317 more = ixl_handle_queue_common(sc, qp,
3318 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr);
3319
3320 if (more != 0) {
3321 ixl_sched_handle_queue(sc, qp);
3322 } else {
3323 /* for ALTQ */
3324 if (txr->txr_qid == 0)
3325 if_schedule_deferred_start(&sc->sc_ec.ec_if);
3326 softint_schedule(txr->txr_si);
3327
3328 ixl_enable_queue_intr(sc, qp);
3329 }
3330
3331 return 1;
3332 }
3333
3334 static void
3335 ixl_handle_queue(void *xqp)
3336 {
3337 struct ixl_queue_pair *qp = xqp;
3338 struct ixl_softc *sc = qp->qp_sc;
3339 struct ixl_tx_ring *txr = qp->qp_txr;
3340 struct ixl_rx_ring *rxr = qp->qp_rxr;
3341 u_int txlimit, rxlimit;
3342 int more;
3343
3344 txlimit = sc->sc_tx_process_limit;
3345 rxlimit = sc->sc_rx_process_limit;
3346
3347 more = ixl_handle_queue_common(sc, qp,
3348 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer);
3349
3350 if (more != 0)
3351 ixl_sched_handle_queue(sc, qp);
3352 else
3353 ixl_enable_queue_intr(sc, qp);
3354 }
3355
3356 static inline void
3357 ixl_print_hmc_error(struct ixl_softc *sc, uint32_t reg)
3358 {
3359 uint32_t hmc_idx, hmc_isvf;
3360 uint32_t hmc_errtype, hmc_objtype, hmc_data;
3361
3362 hmc_idx = reg & I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK;
3363 hmc_idx = hmc_idx >> I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT;
3364 hmc_isvf = reg & I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK;
3365 hmc_isvf = hmc_isvf >> I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT;
3366 hmc_errtype = reg & I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK;
3367 hmc_errtype = hmc_errtype >> I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT;
3368 hmc_objtype = reg & I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK;
3369 hmc_objtype = hmc_objtype >> I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT;
3370 hmc_data = ixl_rd(sc, I40E_PFHMC_ERRORDATA);
3371
3372 device_printf(sc->sc_dev,
3373 "HMC Error (idx=0x%x, isvf=0x%x, err=0x%x, obj=0x%x, data=0x%x)\n",
3374 hmc_idx, hmc_isvf, hmc_errtype, hmc_objtype, hmc_data);
3375 }
3376
3377 static int
3378 ixl_other_intr(void *xsc)
3379 {
3380 struct ixl_softc *sc = xsc;
3381 uint32_t icr, mask, reg;
3382 int rv;
3383
3384 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3385 mask = ixl_rd(sc, I40E_PFINT_ICR0_ENA);
3386
3387 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3388 atomic_inc_64(&sc->sc_event_atq.ev_count);
3389 ixl_atq_done(sc);
3390 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3391 rv = 1;
3392 }
3393
3394 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3395 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3396 device_printf(sc->sc_dev, "link stat changed\n");
3397
3398 atomic_inc_64(&sc->sc_event_link.ev_count);
3399 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3400 rv = 1;
3401 }
3402
3403 if (ISSET(icr, I40E_PFINT_ICR0_GRST_MASK)) {
3404 CLR(mask, I40E_PFINT_ICR0_ENA_GRST_MASK);
3405 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
3406 reg = reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK;
3407 reg = reg >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3408
3409 device_printf(sc->sc_dev, "GRST: %s\n",
3410 reg == I40E_RESET_CORER ? "CORER" :
3411 reg == I40E_RESET_GLOBR ? "GLOBR" :
3412 reg == I40E_RESET_EMPR ? "EMPR" :
3413 "POR");
3414 }
3415
3416 if (ISSET(icr, I40E_PFINT_ICR0_ECC_ERR_MASK))
3417 atomic_inc_64(&sc->sc_event_ecc_err.ev_count);
3418 if (ISSET(icr, I40E_PFINT_ICR0_PCI_EXCEPTION_MASK))
3419 atomic_inc_64(&sc->sc_event_pci_exception.ev_count);
3420 if (ISSET(icr, I40E_PFINT_ICR0_PE_CRITERR_MASK))
3421 atomic_inc_64(&sc->sc_event_crit_err.ev_count);
3422
3423 if (ISSET(icr, IXL_ICR0_CRIT_ERR_MASK)) {
3424 CLR(mask, IXL_ICR0_CRIT_ERR_MASK);
3425 device_printf(sc->sc_dev, "critical error\n");
3426 }
3427
3428 if (ISSET(icr, I40E_PFINT_ICR0_HMC_ERR_MASK)) {
3429 reg = ixl_rd(sc, I40E_PFHMC_ERRORINFO);
3430 if (ISSET(reg, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK))
3431 ixl_print_hmc_error(sc, reg);
3432 ixl_wr(sc, I40E_PFHMC_ERRORINFO, 0);
3433 }
3434
3435 ixl_wr(sc, I40E_PFINT_ICR0_ENA, mask);
3436 ixl_flush(sc);
3437 ixl_enable_other_intr(sc);
3438 return rv;
3439 }
3440
3441 static void
3442 ixl_get_link_status_done(struct ixl_softc *sc,
3443 const struct ixl_aq_desc *iaq)
3444 {
3445
3446 ixl_link_state_update(sc, iaq);
3447 }
3448
3449 static void
3450 ixl_get_link_status(void *xsc)
3451 {
3452 struct ixl_softc *sc = xsc;
3453 struct ixl_aq_desc *iaq;
3454 struct ixl_aq_link_param *param;
3455
3456 memset(&sc->sc_link_state_atq, 0, sizeof(sc->sc_link_state_atq));
3457 iaq = &sc->sc_link_state_atq.iatq_desc;
3458 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3459 param = (struct ixl_aq_link_param *)iaq->iaq_param;
3460 param->notify = IXL_AQ_LINK_NOTIFY;
3461
3462 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done);
3463 (void)ixl_atq_post(sc, &sc->sc_link_state_atq);
3464 }
3465
3466 static void
3467 ixl_link_state_update(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3468 {
3469 struct ifnet *ifp = &sc->sc_ec.ec_if;
3470 int link_state;
3471
3472 KASSERT(kpreempt_disabled());
3473
3474 link_state = ixl_set_link_status(sc, iaq);
3475
3476 if (ifp->if_link_state != link_state)
3477 if_link_state_change(ifp, link_state);
3478
3479 if (link_state != LINK_STATE_DOWN) {
3480 if_schedule_deferred_start(ifp);
3481 }
3482 }
3483
3484 static void
3485 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq,
3486 const char *msg)
3487 {
3488 char buf[512];
3489 size_t len;
3490
3491 len = sizeof(buf);
3492 buf[--len] = '\0';
3493
3494 device_printf(sc->sc_dev, "%s\n", msg);
3495 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags));
3496 device_printf(sc->sc_dev, "flags %s opcode %04x\n",
3497 buf, le16toh(iaq->iaq_opcode));
3498 device_printf(sc->sc_dev, "datalen %u retval %u\n",
3499 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval));
3500 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie);
3501 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n",
3502 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]),
3503 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3]));
3504 }
3505
3506 static void
3507 ixl_arq(void *xsc)
3508 {
3509 struct ixl_softc *sc = xsc;
3510 struct ixl_aq_desc *arq, *iaq;
3511 struct ixl_aq_buf *aqb;
3512 unsigned int cons = sc->sc_arq_cons;
3513 unsigned int prod;
3514 int done = 0;
3515
3516 prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
3517 sc->sc_aq_regs->arq_head_mask;
3518
3519 if (cons == prod)
3520 goto done;
3521
3522 arq = IXL_DMA_KVA(&sc->sc_arq);
3523
3524 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3525 0, IXL_DMA_LEN(&sc->sc_arq),
3526 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3527
3528 do {
3529 iaq = &arq[cons];
3530 aqb = sc->sc_arq_live[cons];
3531
3532 KASSERT(aqb != NULL);
3533
3534 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3535 BUS_DMASYNC_POSTREAD);
3536
3537 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3538 ixl_aq_dump(sc, iaq, "arq event");
3539
3540 switch (iaq->iaq_opcode) {
3541 case htole16(IXL_AQ_OP_PHY_LINK_STATUS):
3542 kpreempt_disable();
3543 ixl_link_state_update(sc, iaq);
3544 kpreempt_enable();
3545 break;
3546 }
3547
3548 memset(iaq, 0, sizeof(*iaq));
3549 sc->sc_arq_live[cons] = NULL;
3550 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
3551
3552 cons++;
3553 cons &= IXL_AQ_MASK;
3554
3555 done = 1;
3556 } while (cons != prod);
3557
3558 if (done) {
3559 sc->sc_arq_cons = cons;
3560 ixl_arq_fill(sc);
3561 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3562 0, IXL_DMA_LEN(&sc->sc_arq),
3563 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3564 }
3565
3566 done:
3567 ixl_enable_other_intr(sc);
3568 }
3569
3570 static void
3571 ixl_atq_set(struct ixl_atq *iatq,
3572 void (*fn)(struct ixl_softc *, const struct ixl_aq_desc *))
3573 {
3574
3575 iatq->iatq_fn = fn;
3576 }
3577
3578 static int
3579 ixl_atq_post_locked(struct ixl_softc *sc, struct ixl_atq *iatq)
3580 {
3581 struct ixl_aq_desc *atq, *slot;
3582 unsigned int prod, cons, prod_next;
3583
3584 /* assert locked */
3585 KASSERT(mutex_owned(&sc->sc_atq_lock));
3586
3587 atq = IXL_DMA_KVA(&sc->sc_atq);
3588 prod = sc->sc_atq_prod;
3589 cons = sc->sc_atq_cons;
3590 prod_next = (prod +1) & IXL_AQ_MASK;
3591
3592 if (cons == prod_next)
3593 return ENOMEM;
3594
3595 slot = &atq[prod];
3596
3597 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3598 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3599
3600 *slot = iatq->iatq_desc;
3601 slot->iaq_cookie = (uint64_t)((intptr_t)iatq);
3602
3603 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3604 ixl_aq_dump(sc, slot, "atq command");
3605
3606 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3607 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3608
3609 sc->sc_atq_prod = prod_next;
3610 ixl_wr(sc, sc->sc_aq_regs->atq_tail, sc->sc_atq_prod);
3611
3612 return 0;
3613 }
3614
3615 static int
3616 ixl_atq_post(struct ixl_softc *sc, struct ixl_atq *iatq)
3617 {
3618 int rv;
3619
3620 mutex_enter(&sc->sc_atq_lock);
3621 rv = ixl_atq_post_locked(sc, iatq);
3622 mutex_exit(&sc->sc_atq_lock);
3623
3624 return rv;
3625 }
3626
3627 static void
3628 ixl_atq_done_locked(struct ixl_softc *sc)
3629 {
3630 struct ixl_aq_desc *atq, *slot;
3631 struct ixl_atq *iatq;
3632 unsigned int cons;
3633 unsigned int prod;
3634
3635 KASSERT(mutex_owned(&sc->sc_atq_lock));
3636
3637 prod = sc->sc_atq_prod;
3638 cons = sc->sc_atq_cons;
3639
3640 if (prod == cons)
3641 return;
3642
3643 atq = IXL_DMA_KVA(&sc->sc_atq);
3644
3645 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3646 0, IXL_DMA_LEN(&sc->sc_atq),
3647 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3648
3649 do {
3650 slot = &atq[cons];
3651 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
3652 break;
3653
3654 iatq = (struct ixl_atq *)((intptr_t)slot->iaq_cookie);
3655 iatq->iatq_desc = *slot;
3656
3657 memset(slot, 0, sizeof(*slot));
3658
3659 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3660 ixl_aq_dump(sc, &iatq->iatq_desc, "atq response");
3661
3662 (*iatq->iatq_fn)(sc, &iatq->iatq_desc);
3663
3664 cons++;
3665 cons &= IXL_AQ_MASK;
3666 } while (cons != prod);
3667
3668 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3669 0, IXL_DMA_LEN(&sc->sc_atq),
3670 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3671
3672 sc->sc_atq_cons = cons;
3673 }
3674
3675 static void
3676 ixl_atq_done(struct ixl_softc *sc)
3677 {
3678
3679 mutex_enter(&sc->sc_atq_lock);
3680 ixl_atq_done_locked(sc);
3681 mutex_exit(&sc->sc_atq_lock);
3682 }
3683
3684 static void
3685 ixl_wakeup(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3686 {
3687
3688 KASSERT(mutex_owned(&sc->sc_atq_lock));
3689
3690 cv_signal(&sc->sc_atq_cv);
3691 }
3692
3693 static int
3694 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq)
3695 {
3696 int error;
3697
3698 KASSERT(iatq->iatq_desc.iaq_cookie == 0);
3699
3700 ixl_atq_set(iatq, ixl_wakeup);
3701
3702 mutex_enter(&sc->sc_atq_lock);
3703 error = ixl_atq_post_locked(sc, iatq);
3704 if (error) {
3705 mutex_exit(&sc->sc_atq_lock);
3706 return error;
3707 }
3708
3709 error = cv_timedwait(&sc->sc_atq_cv, &sc->sc_atq_lock,
3710 IXL_ATQ_EXEC_TIMEOUT);
3711 mutex_exit(&sc->sc_atq_lock);
3712
3713 return error;
3714 }
3715
3716 static int
3717 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
3718 {
3719 struct ixl_aq_desc *atq, *slot;
3720 unsigned int prod;
3721 unsigned int t = 0;
3722
3723 mutex_enter(&sc->sc_atq_lock);
3724
3725 atq = IXL_DMA_KVA(&sc->sc_atq);
3726 prod = sc->sc_atq_prod;
3727 slot = atq + prod;
3728
3729 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3730 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3731
3732 *slot = *iaq;
3733 slot->iaq_flags |= htole16(IXL_AQ_SI);
3734
3735 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3736 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3737
3738 prod++;
3739 prod &= IXL_AQ_MASK;
3740 sc->sc_atq_prod = prod;
3741 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3742
3743 while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
3744 delaymsec(1);
3745
3746 if (t++ > tm) {
3747 mutex_exit(&sc->sc_atq_lock);
3748 return ETIMEDOUT;
3749 }
3750 }
3751
3752 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3753 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
3754 *iaq = *slot;
3755 memset(slot, 0, sizeof(*slot));
3756 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3757 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
3758
3759 sc->sc_atq_cons = prod;
3760
3761 mutex_exit(&sc->sc_atq_lock);
3762
3763 return 0;
3764 }
3765
3766 static int
3767 ixl_get_version(struct ixl_softc *sc)
3768 {
3769 struct ixl_aq_desc iaq;
3770 uint32_t fwbuild, fwver, apiver;
3771 uint16_t api_maj_ver, api_min_ver;
3772
3773 memset(&iaq, 0, sizeof(iaq));
3774 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
3775
3776 iaq.iaq_retval = le16toh(23);
3777
3778 if (ixl_atq_poll(sc, &iaq, 2000) != 0)
3779 return ETIMEDOUT;
3780 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
3781 return EIO;
3782
3783 fwbuild = le32toh(iaq.iaq_param[1]);
3784 fwver = le32toh(iaq.iaq_param[2]);
3785 apiver = le32toh(iaq.iaq_param[3]);
3786
3787 api_maj_ver = (uint16_t)apiver;
3788 api_min_ver = (uint16_t)(apiver >> 16);
3789
3790 aprint_normal(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
3791 (uint16_t)(fwver >> 16), fwbuild, api_maj_ver, api_min_ver);
3792
3793 sc->sc_rxctl_atq = true;
3794 if (sc->sc_mac_type == I40E_MAC_X722) {
3795 if (api_maj_ver == 1 && api_min_ver < 5) {
3796 sc->sc_rxctl_atq = false;
3797 }
3798 }
3799
3800 return 0;
3801 }
3802
3803 static int
3804 ixl_pxe_clear(struct ixl_softc *sc)
3805 {
3806 struct ixl_aq_desc iaq;
3807 int rv;
3808
3809 memset(&iaq, 0, sizeof(iaq));
3810 iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
3811 iaq.iaq_param[0] = htole32(0x2);
3812
3813 rv = ixl_atq_poll(sc, &iaq, 250);
3814
3815 ixl_wr(sc, I40E_GLLAN_RCTL_0, 0x1);
3816
3817 if (rv != 0)
3818 return ETIMEDOUT;
3819
3820 switch (iaq.iaq_retval) {
3821 case htole16(IXL_AQ_RC_OK):
3822 case htole16(IXL_AQ_RC_EEXIST):
3823 break;
3824 default:
3825 return EIO;
3826 }
3827
3828 return 0;
3829 }
3830
3831 static int
3832 ixl_lldp_shut(struct ixl_softc *sc)
3833 {
3834 struct ixl_aq_desc iaq;
3835
3836 memset(&iaq, 0, sizeof(iaq));
3837 iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
3838 iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
3839
3840 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3841 aprint_error_dev(sc->sc_dev, "STOP LLDP AGENT timeout\n");
3842 return -1;
3843 }
3844
3845 switch (iaq.iaq_retval) {
3846 case htole16(IXL_AQ_RC_EMODE):
3847 case htole16(IXL_AQ_RC_EPERM):
3848 /* ignore silently */
3849 default:
3850 break;
3851 }
3852
3853 return 0;
3854 }
3855
3856 static void
3857 ixl_parse_hw_capability(struct ixl_softc *sc, struct ixl_aq_capability *cap)
3858 {
3859 uint16_t id;
3860 uint32_t number, logical_id;
3861
3862 id = le16toh(cap->cap_id);
3863 number = le32toh(cap->number);
3864 logical_id = le32toh(cap->logical_id);
3865
3866 switch (id) {
3867 case IXL_AQ_CAP_RSS:
3868 sc->sc_rss_table_size = number;
3869 sc->sc_rss_table_entry_width = logical_id;
3870 break;
3871 case IXL_AQ_CAP_RXQ:
3872 case IXL_AQ_CAP_TXQ:
3873 sc->sc_nqueue_pairs_device = MIN(number,
3874 sc->sc_nqueue_pairs_device);
3875 break;
3876 }
3877 }
3878
3879 static int
3880 ixl_get_hw_capabilities(struct ixl_softc *sc)
3881 {
3882 struct ixl_dmamem idm;
3883 struct ixl_aq_desc iaq;
3884 struct ixl_aq_capability *caps;
3885 size_t i, ncaps;
3886 bus_size_t caps_size;
3887 uint16_t status;
3888 int rv;
3889
3890 caps_size = sizeof(caps[0]) * 40;
3891 memset(&iaq, 0, sizeof(iaq));
3892 iaq.iaq_opcode = htole16(IXL_AQ_OP_LIST_FUNC_CAP);
3893
3894 do {
3895 if (ixl_dmamem_alloc(sc, &idm, caps_size, 0) != 0) {
3896 return -1;
3897 }
3898
3899 iaq.iaq_flags = htole16(IXL_AQ_BUF |
3900 (caps_size > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3901 iaq.iaq_datalen = htole16(caps_size);
3902 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3903
3904 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
3905 IXL_DMA_LEN(&idm), BUS_DMASYNC_PREREAD);
3906
3907 rv = ixl_atq_poll(sc, &iaq, 250);
3908
3909 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
3910 IXL_DMA_LEN(&idm), BUS_DMASYNC_POSTREAD);
3911
3912 if (rv != 0) {
3913 aprint_error(", HW capabilities timeout\n");
3914 goto done;
3915 }
3916
3917 status = le16toh(iaq.iaq_retval);
3918
3919 if (status == IXL_AQ_RC_ENOMEM) {
3920 caps_size = le16toh(iaq.iaq_datalen);
3921 ixl_dmamem_free(sc, &idm);
3922 }
3923 } while (status == IXL_AQ_RC_ENOMEM);
3924
3925 if (status != IXL_AQ_RC_OK) {
3926 aprint_error(", HW capabilities error\n");
3927 goto done;
3928 }
3929
3930 caps = IXL_DMA_KVA(&idm);
3931 ncaps = le16toh(iaq.iaq_param[1]);
3932
3933 for (i = 0; i < ncaps; i++) {
3934 ixl_parse_hw_capability(sc, &caps[i]);
3935 }
3936
3937 done:
3938 ixl_dmamem_free(sc, &idm);
3939 return rv;
3940 }
3941
3942 static int
3943 ixl_get_mac(struct ixl_softc *sc)
3944 {
3945 struct ixl_dmamem idm;
3946 struct ixl_aq_desc iaq;
3947 struct ixl_aq_mac_addresses *addrs;
3948 int rv;
3949
3950 if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
3951 aprint_error(", unable to allocate mac addresses\n");
3952 return -1;
3953 }
3954
3955 memset(&iaq, 0, sizeof(iaq));
3956 iaq.iaq_flags = htole16(IXL_AQ_BUF);
3957 iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
3958 iaq.iaq_datalen = htole16(sizeof(*addrs));
3959 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3960
3961 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3962 BUS_DMASYNC_PREREAD);
3963
3964 rv = ixl_atq_poll(sc, &iaq, 250);
3965
3966 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3967 BUS_DMASYNC_POSTREAD);
3968
3969 if (rv != 0) {
3970 aprint_error(", MAC ADDRESS READ timeout\n");
3971 rv = -1;
3972 goto done;
3973 }
3974 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3975 aprint_error(", MAC ADDRESS READ error\n");
3976 rv = -1;
3977 goto done;
3978 }
3979
3980 addrs = IXL_DMA_KVA(&idm);
3981 if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
3982 printf(", port address is not valid\n");
3983 goto done;
3984 }
3985
3986 memcpy(sc->sc_enaddr, addrs->port, ETHER_ADDR_LEN);
3987 rv = 0;
3988
3989 done:
3990 ixl_dmamem_free(sc, &idm);
3991 return rv;
3992 }
3993
3994 static int
3995 ixl_get_switch_config(struct ixl_softc *sc)
3996 {
3997 struct ixl_dmamem idm;
3998 struct ixl_aq_desc iaq;
3999 struct ixl_aq_switch_config *hdr;
4000 struct ixl_aq_switch_config_element *elms, *elm;
4001 unsigned int nelm, i;
4002 int rv;
4003
4004 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4005 aprint_error_dev(sc->sc_dev,
4006 "unable to allocate switch config buffer\n");
4007 return -1;
4008 }
4009
4010 memset(&iaq, 0, sizeof(iaq));
4011 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4012 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4013 iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
4014 iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
4015 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4016
4017 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4018 BUS_DMASYNC_PREREAD);
4019
4020 rv = ixl_atq_poll(sc, &iaq, 250);
4021
4022 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4023 BUS_DMASYNC_POSTREAD);
4024
4025 if (rv != 0) {
4026 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG timeout\n");
4027 rv = -1;
4028 goto done;
4029 }
4030 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4031 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG error\n");
4032 rv = -1;
4033 goto done;
4034 }
4035
4036 hdr = IXL_DMA_KVA(&idm);
4037 elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
4038
4039 nelm = le16toh(hdr->num_reported);
4040 if (nelm < 1) {
4041 aprint_error_dev(sc->sc_dev, "no switch config available\n");
4042 rv = -1;
4043 goto done;
4044 }
4045
4046 for (i = 0; i < nelm; i++) {
4047 elm = &elms[i];
4048
4049 aprint_debug_dev(sc->sc_dev,
4050 "type %x revision %u seid %04x\n",
4051 elm->type, elm->revision, le16toh(elm->seid));
4052 aprint_debug_dev(sc->sc_dev,
4053 "uplink %04x downlink %04x\n",
4054 le16toh(elm->uplink_seid),
4055 le16toh(elm->downlink_seid));
4056 aprint_debug_dev(sc->sc_dev,
4057 "conntype %x scheduler %04x extra %04x\n",
4058 elm->connection_type,
4059 le16toh(elm->scheduler_id),
4060 le16toh(elm->element_info));
4061 }
4062
4063 elm = &elms[0];
4064
4065 sc->sc_uplink_seid = elm->uplink_seid;
4066 sc->sc_downlink_seid = elm->downlink_seid;
4067 sc->sc_seid = elm->seid;
4068
4069 if ((sc->sc_uplink_seid == htole16(0)) !=
4070 (sc->sc_downlink_seid == htole16(0))) {
4071 aprint_error_dev(sc->sc_dev, "SEIDs are misconfigured\n");
4072 rv = -1;
4073 goto done;
4074 }
4075
4076 done:
4077 ixl_dmamem_free(sc, &idm);
4078 return rv;
4079 }
4080
4081 static int
4082 ixl_phy_mask_ints(struct ixl_softc *sc)
4083 {
4084 struct ixl_aq_desc iaq;
4085
4086 memset(&iaq, 0, sizeof(iaq));
4087 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
4088 iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
4089 ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
4090 IXL_AQ_PHY_EV_MEDIA_NA));
4091
4092 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4093 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK timeout\n");
4094 return -1;
4095 }
4096 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4097 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK error\n");
4098 return -1;
4099 }
4100
4101 return 0;
4102 }
4103
4104 static int
4105 ixl_get_phy_abilities(struct ixl_softc *sc,struct ixl_dmamem *idm)
4106 {
4107 struct ixl_aq_desc iaq;
4108 int rv;
4109
4110 memset(&iaq, 0, sizeof(iaq));
4111 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4112 (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4113 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
4114 iaq.iaq_datalen = htole16(IXL_DMA_LEN(idm));
4115 iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
4116 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
4117
4118 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4119 BUS_DMASYNC_PREREAD);
4120
4121 rv = ixl_atq_poll(sc, &iaq, 250);
4122
4123 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4124 BUS_DMASYNC_POSTREAD);
4125
4126 if (rv != 0)
4127 return -1;
4128
4129 return le16toh(iaq.iaq_retval);
4130 }
4131
4132 static int
4133 ixl_get_phy_types(struct ixl_softc *sc, uint64_t *phy_types_ptr)
4134 {
4135 struct ixl_dmamem idm;
4136 struct ixl_aq_phy_abilities *phy;
4137 uint64_t phy_types;
4138 int rv;
4139
4140 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4141 aprint_error_dev(sc->sc_dev,
4142 "unable to allocate switch config buffer\n");
4143 return -1;
4144 }
4145
4146 rv = ixl_get_phy_abilities(sc, &idm);
4147 switch (rv) {
4148 case -1:
4149 aprint_error_dev(sc->sc_dev, "GET PHY ABILITIES timeout\n");
4150 goto done;
4151 case IXL_AQ_RC_OK:
4152 break;
4153 case IXL_AQ_RC_EIO:
4154 aprint_error_dev(sc->sc_dev,"unable to query phy types\n");
4155 break;
4156 default:
4157 aprint_error_dev(sc->sc_dev,
4158 "GET PHY ABILITIIES error %u\n", rv);
4159 goto done;
4160 }
4161
4162 phy = IXL_DMA_KVA(&idm);
4163
4164 phy_types = le32toh(phy->phy_type);
4165 phy_types |= (uint64_t)le32toh(phy->phy_type_ext) << 32;
4166
4167 *phy_types_ptr = phy_types;
4168
4169 rv = 0;
4170
4171 done:
4172 ixl_dmamem_free(sc, &idm);
4173 return rv;
4174 }
4175
4176 static int
4177 ixl_get_link_status_poll(struct ixl_softc *sc)
4178 {
4179 struct ixl_aq_desc iaq;
4180 struct ixl_aq_link_param *param;
4181 int link;
4182
4183 memset(&iaq, 0, sizeof(iaq));
4184 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
4185 param = (struct ixl_aq_link_param *)iaq.iaq_param;
4186 param->notify = IXL_AQ_LINK_NOTIFY;
4187
4188 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4189 return ETIMEDOUT;
4190 }
4191 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4192 return EIO;
4193 }
4194
4195 link = ixl_set_link_status(sc, &iaq);
4196 sc->sc_ec.ec_if.if_link_state = link;
4197
4198 return 0;
4199 }
4200
4201 static int
4202 ixl_get_vsi(struct ixl_softc *sc)
4203 {
4204 struct ixl_dmamem *vsi = &sc->sc_scratch;
4205 struct ixl_aq_desc iaq;
4206 struct ixl_aq_vsi_param *param;
4207 struct ixl_aq_vsi_reply *reply;
4208 struct ixl_aq_vsi_data *data;
4209 int rv;
4210
4211 /* grumble, vsi info isn't "known" at compile time */
4212
4213 memset(&iaq, 0, sizeof(iaq));
4214 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4215 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4216 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
4217 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4218 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4219
4220 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4221 param->uplink_seid = sc->sc_seid;
4222
4223 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4224 BUS_DMASYNC_PREREAD);
4225
4226 rv = ixl_atq_poll(sc, &iaq, 250);
4227
4228 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4229 BUS_DMASYNC_POSTREAD);
4230
4231 if (rv != 0) {
4232 return ETIMEDOUT;
4233 }
4234
4235 switch (le16toh(iaq.iaq_retval)) {
4236 case IXL_AQ_RC_OK:
4237 break;
4238 case IXL_AQ_RC_ENOENT:
4239 return ENOENT;
4240 case IXL_AQ_RC_EACCES:
4241 return EACCES;
4242 default:
4243 return EIO;
4244 }
4245
4246 reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
4247 sc->sc_vsi_number = reply->vsi_number;
4248 data = IXL_DMA_KVA(vsi);
4249 sc->sc_vsi_stat_counter_idx = le16toh(data->stat_counter_idx);
4250
4251 return 0;
4252 }
4253
4254 static int
4255 ixl_set_vsi(struct ixl_softc *sc)
4256 {
4257 struct ixl_dmamem *vsi = &sc->sc_scratch;
4258 struct ixl_aq_desc iaq;
4259 struct ixl_aq_vsi_param *param;
4260 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
4261 unsigned int qnum;
4262 uint16_t val;
4263 int rv;
4264
4265 qnum = sc->sc_nqueue_pairs - 1;
4266
4267 data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
4268 IXL_AQ_VSI_VALID_VLAN);
4269
4270 CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
4271 SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
4272 data->queue_mapping[0] = htole16(0);
4273 data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
4274 (qnum << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
4275
4276 val = le16toh(data->port_vlan_flags);
4277 CLR(val, IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK);
4278 SET(val, IXL_AQ_VSI_PVLAN_MODE_ALL);
4279
4280 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWTAGGING)) {
4281 SET(val, IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH);
4282 } else {
4283 SET(val, IXL_AQ_VSI_PVLAN_EMOD_NOTHING);
4284 }
4285
4286 data->port_vlan_flags = htole16(val);
4287
4288 /* grumble, vsi info isn't "known" at compile time */
4289
4290 memset(&iaq, 0, sizeof(iaq));
4291 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4292 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4293 iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
4294 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4295 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4296
4297 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4298 param->uplink_seid = sc->sc_seid;
4299
4300 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4301 BUS_DMASYNC_PREWRITE);
4302
4303 rv = ixl_atq_poll(sc, &iaq, 250);
4304
4305 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4306 BUS_DMASYNC_POSTWRITE);
4307
4308 if (rv != 0) {
4309 return ETIMEDOUT;
4310 }
4311
4312 switch (le16toh(iaq.iaq_retval)) {
4313 case IXL_AQ_RC_OK:
4314 break;
4315 case IXL_AQ_RC_ENOENT:
4316 return ENOENT;
4317 case IXL_AQ_RC_EACCES:
4318 return EACCES;
4319 default:
4320 return EIO;
4321 }
4322
4323 return 0;
4324 }
4325
4326 static void
4327 ixl_set_filter_control(struct ixl_softc *sc)
4328 {
4329 uint32_t reg;
4330
4331 reg = ixl_rd_rx_csr(sc, I40E_PFQF_CTL_0);
4332
4333 CLR(reg, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK);
4334 SET(reg, I40E_HASH_LUT_SIZE_128 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT);
4335
4336 SET(reg, I40E_PFQF_CTL_0_FD_ENA_MASK);
4337 SET(reg, I40E_PFQF_CTL_0_ETYPE_ENA_MASK);
4338 SET(reg, I40E_PFQF_CTL_0_MACVLAN_ENA_MASK);
4339
4340 ixl_wr_rx_csr(sc, I40E_PFQF_CTL_0, reg);
4341 }
4342
4343 static inline void
4344 ixl_get_default_rss_key(uint32_t *buf, size_t len)
4345 {
4346 size_t cplen;
4347 uint8_t rss_seed[RSS_KEYSIZE];
4348
4349 rss_getkey(rss_seed);
4350 memset(buf, 0, len);
4351
4352 cplen = MIN(len, sizeof(rss_seed));
4353 memcpy(buf, rss_seed, cplen);
4354 }
4355
4356 static void
4357 ixl_set_rss_key(struct ixl_softc *sc)
4358 {
4359 uint32_t rss_seed[IXL_RSS_KEY_SIZE_REG];
4360 size_t i;
4361
4362 ixl_get_default_rss_key(rss_seed, sizeof(rss_seed));
4363
4364 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4365 ixl_wr_rx_csr(sc, I40E_PFQF_HKEY(i), rss_seed[i]);
4366 }
4367 }
4368
4369 static void
4370 ixl_set_rss_pctype(struct ixl_softc *sc)
4371 {
4372 uint64_t set_hena = 0;
4373 uint32_t hena0, hena1;
4374
4375 if (sc->sc_mac_type == I40E_MAC_X722)
4376 set_hena = IXL_RSS_HENA_DEFAULT_X722;
4377 else
4378 set_hena = IXL_RSS_HENA_DEFAULT_XL710;
4379
4380 hena0 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(0));
4381 hena1 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(1));
4382
4383 SET(hena0, set_hena);
4384 SET(hena1, set_hena >> 32);
4385
4386 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(0), hena0);
4387 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(1), hena1);
4388 }
4389
4390 static void
4391 ixl_set_rss_hlut(struct ixl_softc *sc)
4392 {
4393 unsigned int qid;
4394 uint8_t hlut_buf[512], lut_mask;
4395 uint32_t *hluts;
4396 size_t i, hluts_num;
4397
4398 lut_mask = (0x01 << sc->sc_rss_table_entry_width) - 1;
4399
4400 for (i = 0; i < sc->sc_rss_table_size; i++) {
4401 qid = i % sc->sc_nqueue_pairs;
4402 hlut_buf[i] = qid & lut_mask;
4403 }
4404
4405 hluts = (uint32_t *)hlut_buf;
4406 hluts_num = sc->sc_rss_table_size >> 2;
4407 for (i = 0; i < hluts_num; i++) {
4408 ixl_wr(sc, I40E_PFQF_HLUT(i), hluts[i]);
4409 }
4410 ixl_flush(sc);
4411 }
4412
4413 static void
4414 ixl_config_rss(struct ixl_softc *sc)
4415 {
4416
4417 KASSERT(mutex_owned(&sc->sc_cfg_lock));
4418
4419 ixl_set_rss_key(sc);
4420 ixl_set_rss_pctype(sc);
4421 ixl_set_rss_hlut(sc);
4422 }
4423
4424 static const struct ixl_phy_type *
4425 ixl_search_phy_type(uint8_t phy_type)
4426 {
4427 const struct ixl_phy_type *itype;
4428 uint64_t mask;
4429 unsigned int i;
4430
4431 if (phy_type >= 64)
4432 return NULL;
4433
4434 mask = 1ULL << phy_type;
4435
4436 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
4437 itype = &ixl_phy_type_map[i];
4438
4439 if (ISSET(itype->phy_type, mask))
4440 return itype;
4441 }
4442
4443 return NULL;
4444 }
4445
4446 static uint64_t
4447 ixl_search_link_speed(uint8_t link_speed)
4448 {
4449 const struct ixl_speed_type *type;
4450 unsigned int i;
4451
4452 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4453 type = &ixl_speed_type_map[i];
4454
4455 if (ISSET(type->dev_speed, link_speed))
4456 return type->net_speed;
4457 }
4458
4459 return 0;
4460 }
4461
4462 static int
4463 ixl_restart_an(struct ixl_softc *sc)
4464 {
4465 struct ixl_aq_desc iaq;
4466
4467 memset(&iaq, 0, sizeof(iaq));
4468 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
4469 iaq.iaq_param[0] =
4470 htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
4471
4472 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4473 aprint_error_dev(sc->sc_dev, "RESTART AN timeout\n");
4474 return -1;
4475 }
4476 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4477 aprint_error_dev(sc->sc_dev, "RESTART AN error\n");
4478 return -1;
4479 }
4480
4481 return 0;
4482 }
4483
4484 static int
4485 ixl_add_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4486 uint16_t vlan, uint16_t flags)
4487 {
4488 struct ixl_aq_desc iaq;
4489 struct ixl_aq_add_macvlan *param;
4490 struct ixl_aq_add_macvlan_elem *elem;
4491
4492 memset(&iaq, 0, sizeof(iaq));
4493 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4494 iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN);
4495 iaq.iaq_datalen = htole16(sizeof(*elem));
4496 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4497
4498 param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param;
4499 param->num_addrs = htole16(1);
4500 param->seid0 = htole16(0x8000) | sc->sc_seid;
4501 param->seid1 = 0;
4502 param->seid2 = 0;
4503
4504 elem = IXL_DMA_KVA(&sc->sc_scratch);
4505 memset(elem, 0, sizeof(*elem));
4506 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4507 elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags);
4508 elem->vlan = htole16(vlan);
4509
4510 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4511 return IXL_AQ_RC_EINVAL;
4512 }
4513
4514 switch (le16toh(iaq.iaq_retval)) {
4515 case IXL_AQ_RC_OK:
4516 break;
4517 case IXL_AQ_RC_ENOSPC:
4518 return ENOSPC;
4519 case IXL_AQ_RC_ENOENT:
4520 return ENOENT;
4521 case IXL_AQ_RC_EACCES:
4522 return EACCES;
4523 case IXL_AQ_RC_EEXIST:
4524 return EEXIST;
4525 case IXL_AQ_RC_EINVAL:
4526 return EINVAL;
4527 default:
4528 return EIO;
4529 }
4530
4531 return 0;
4532 }
4533
4534 static int
4535 ixl_remove_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4536 uint16_t vlan, uint16_t flags)
4537 {
4538 struct ixl_aq_desc iaq;
4539 struct ixl_aq_remove_macvlan *param;
4540 struct ixl_aq_remove_macvlan_elem *elem;
4541
4542 memset(&iaq, 0, sizeof(iaq));
4543 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4544 iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN);
4545 iaq.iaq_datalen = htole16(sizeof(*elem));
4546 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4547
4548 param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param;
4549 param->num_addrs = htole16(1);
4550 param->seid0 = htole16(0x8000) | sc->sc_seid;
4551 param->seid1 = 0;
4552 param->seid2 = 0;
4553
4554 elem = IXL_DMA_KVA(&sc->sc_scratch);
4555 memset(elem, 0, sizeof(*elem));
4556 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4557 elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags);
4558 elem->vlan = htole16(vlan);
4559
4560 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4561 return EINVAL;
4562 }
4563
4564 switch (le16toh(iaq.iaq_retval)) {
4565 case IXL_AQ_RC_OK:
4566 break;
4567 case IXL_AQ_RC_ENOENT:
4568 return ENOENT;
4569 case IXL_AQ_RC_EACCES:
4570 return EACCES;
4571 case IXL_AQ_RC_EINVAL:
4572 return EINVAL;
4573 default:
4574 return EIO;
4575 }
4576
4577 return 0;
4578 }
4579
4580 static int
4581 ixl_hmc(struct ixl_softc *sc)
4582 {
4583 struct {
4584 uint32_t count;
4585 uint32_t minsize;
4586 bus_size_t objsiz;
4587 bus_size_t setoff;
4588 bus_size_t setcnt;
4589 } regs[] = {
4590 {
4591 0,
4592 IXL_HMC_TXQ_MINSIZE,
4593 I40E_GLHMC_LANTXOBJSZ,
4594 I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
4595 I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
4596 },
4597 {
4598 0,
4599 IXL_HMC_RXQ_MINSIZE,
4600 I40E_GLHMC_LANRXOBJSZ,
4601 I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
4602 I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
4603 },
4604 {
4605 0,
4606 0,
4607 I40E_GLHMC_FCOEDDPOBJSZ,
4608 I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
4609 I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
4610 },
4611 {
4612 0,
4613 0,
4614 I40E_GLHMC_FCOEFOBJSZ,
4615 I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
4616 I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
4617 },
4618 };
4619 struct ixl_hmc_entry *e;
4620 uint64_t size, dva;
4621 uint8_t *kva;
4622 uint64_t *sdpage;
4623 unsigned int i;
4624 int npages, tables;
4625 uint32_t reg;
4626
4627 CTASSERT(__arraycount(regs) <= __arraycount(sc->sc_hmc_entries));
4628
4629 regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
4630 ixl_rd(sc, I40E_GLHMC_LANQMAX);
4631
4632 size = 0;
4633 for (i = 0; i < __arraycount(regs); i++) {
4634 e = &sc->sc_hmc_entries[i];
4635
4636 e->hmc_count = regs[i].count;
4637 reg = ixl_rd(sc, regs[i].objsiz);
4638 e->hmc_size = BIT_ULL(0x3F & reg);
4639 e->hmc_base = size;
4640
4641 if ((e->hmc_size * 8) < regs[i].minsize) {
4642 aprint_error_dev(sc->sc_dev,
4643 "kernel hmc entry is too big\n");
4644 return -1;
4645 }
4646
4647 size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
4648 }
4649 size = roundup(size, IXL_HMC_PGSIZE);
4650 npages = size / IXL_HMC_PGSIZE;
4651
4652 tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
4653
4654 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
4655 aprint_error_dev(sc->sc_dev,
4656 "unable to allocate hmc pd memory\n");
4657 return -1;
4658 }
4659
4660 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
4661 IXL_HMC_PGSIZE) != 0) {
4662 aprint_error_dev(sc->sc_dev,
4663 "unable to allocate hmc sd memory\n");
4664 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4665 return -1;
4666 }
4667
4668 kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
4669 memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
4670
4671 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
4672 0, IXL_DMA_LEN(&sc->sc_hmc_pd),
4673 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4674
4675 dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
4676 sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
4677 memset(sdpage, 0, IXL_DMA_LEN(&sc->sc_hmc_sd));
4678
4679 for (i = 0; (int)i < npages; i++) {
4680 *sdpage = htole64(dva | IXL_HMC_PDVALID);
4681 sdpage++;
4682
4683 dva += IXL_HMC_PGSIZE;
4684 }
4685
4686 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
4687 0, IXL_DMA_LEN(&sc->sc_hmc_sd),
4688 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4689
4690 dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
4691 for (i = 0; (int)i < tables; i++) {
4692 uint32_t count;
4693
4694 KASSERT(npages >= 0);
4695
4696 count = ((unsigned int)npages > IXL_HMC_PGS) ?
4697 IXL_HMC_PGS : (unsigned int)npages;
4698
4699 ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
4700 ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
4701 (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
4702 (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
4703 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
4704 ixl_wr(sc, I40E_PFHMC_SDCMD,
4705 (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
4706
4707 npages -= IXL_HMC_PGS;
4708 dva += IXL_HMC_PGSIZE;
4709 }
4710
4711 for (i = 0; i < __arraycount(regs); i++) {
4712 e = &sc->sc_hmc_entries[i];
4713
4714 ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
4715 ixl_wr(sc, regs[i].setcnt, e->hmc_count);
4716 }
4717
4718 return 0;
4719 }
4720
4721 static void
4722 ixl_hmc_free(struct ixl_softc *sc)
4723 {
4724 ixl_dmamem_free(sc, &sc->sc_hmc_sd);
4725 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4726 }
4727
4728 static void
4729 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
4730 unsigned int npacking)
4731 {
4732 uint8_t *dst = d;
4733 const uint8_t *src = s;
4734 unsigned int i;
4735
4736 for (i = 0; i < npacking; i++) {
4737 const struct ixl_hmc_pack *pack = &packing[i];
4738 unsigned int offset = pack->lsb / 8;
4739 unsigned int align = pack->lsb % 8;
4740 const uint8_t *in = src + pack->offset;
4741 uint8_t *out = dst + offset;
4742 int width = pack->width;
4743 unsigned int inbits = 0;
4744
4745 if (align) {
4746 inbits = (*in++) << align;
4747 *out++ |= (inbits & 0xff);
4748 inbits >>= 8;
4749
4750 width -= 8 - align;
4751 }
4752
4753 while (width >= 8) {
4754 inbits |= (*in++) << align;
4755 *out++ = (inbits & 0xff);
4756 inbits >>= 8;
4757
4758 width -= 8;
4759 }
4760
4761 if (width > 0) {
4762 inbits |= (*in) << align;
4763 *out |= (inbits & ((1 << width) - 1));
4764 }
4765 }
4766 }
4767
4768 static struct ixl_aq_buf *
4769 ixl_aqb_alloc(struct ixl_softc *sc)
4770 {
4771 struct ixl_aq_buf *aqb;
4772
4773 aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK);
4774 if (aqb == NULL)
4775 return NULL;
4776
4777 aqb->aqb_size = IXL_AQ_BUFLEN;
4778
4779 if (bus_dmamap_create(sc->sc_dmat, aqb->aqb_size, 1,
4780 aqb->aqb_size, 0,
4781 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0)
4782 goto free;
4783 if (bus_dmamem_alloc(sc->sc_dmat, aqb->aqb_size,
4784 IXL_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs,
4785 BUS_DMA_WAITOK) != 0)
4786 goto destroy;
4787 if (bus_dmamem_map(sc->sc_dmat, &aqb->aqb_seg, aqb->aqb_nsegs,
4788 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0)
4789 goto dma_free;
4790 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
4791 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0)
4792 goto unmap;
4793
4794 return aqb;
4795 unmap:
4796 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
4797 dma_free:
4798 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
4799 destroy:
4800 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4801 free:
4802 free(aqb, M_DEVBUF);
4803
4804 return NULL;
4805 }
4806
4807 static void
4808 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
4809 {
4810 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
4811 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
4812 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
4813 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4814 free(aqb, M_DEVBUF);
4815 }
4816
4817 static int
4818 ixl_arq_fill(struct ixl_softc *sc)
4819 {
4820 struct ixl_aq_buf *aqb;
4821 struct ixl_aq_desc *arq, *iaq;
4822 unsigned int prod = sc->sc_arq_prod;
4823 unsigned int n;
4824 int post = 0;
4825
4826 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
4827 IXL_AQ_NUM);
4828 arq = IXL_DMA_KVA(&sc->sc_arq);
4829
4830 if (__predict_false(n <= 0))
4831 return 0;
4832
4833 do {
4834 aqb = sc->sc_arq_live[prod];
4835 iaq = &arq[prod];
4836
4837 if (aqb == NULL) {
4838 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
4839 if (aqb != NULL) {
4840 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
4841 ixl_aq_buf, aqb_entry);
4842 } else if ((aqb = ixl_aqb_alloc(sc)) == NULL) {
4843 break;
4844 }
4845
4846 sc->sc_arq_live[prod] = aqb;
4847 memset(aqb->aqb_data, 0, aqb->aqb_size);
4848
4849 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
4850 aqb->aqb_size, BUS_DMASYNC_PREREAD);
4851
4852 iaq->iaq_flags = htole16(IXL_AQ_BUF |
4853 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ?
4854 IXL_AQ_LB : 0));
4855 iaq->iaq_opcode = 0;
4856 iaq->iaq_datalen = htole16(aqb->aqb_size);
4857 iaq->iaq_retval = 0;
4858 iaq->iaq_cookie = 0;
4859 iaq->iaq_param[0] = 0;
4860 iaq->iaq_param[1] = 0;
4861 ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
4862 }
4863
4864 prod++;
4865 prod &= IXL_AQ_MASK;
4866
4867 post = 1;
4868
4869 } while (--n);
4870
4871 if (post) {
4872 sc->sc_arq_prod = prod;
4873 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
4874 }
4875
4876 return post;
4877 }
4878
4879 static void
4880 ixl_arq_unfill(struct ixl_softc *sc)
4881 {
4882 struct ixl_aq_buf *aqb;
4883 unsigned int i;
4884
4885 for (i = 0; i < __arraycount(sc->sc_arq_live); i++) {
4886 aqb = sc->sc_arq_live[i];
4887 if (aqb == NULL)
4888 continue;
4889
4890 sc->sc_arq_live[i] = NULL;
4891 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
4892 BUS_DMASYNC_POSTREAD);
4893 ixl_aqb_free(sc, aqb);
4894 }
4895
4896 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle)) != NULL) {
4897 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
4898 ixl_aq_buf, aqb_entry);
4899 ixl_aqb_free(sc, aqb);
4900 }
4901 }
4902
4903 static void
4904 ixl_clear_hw(struct ixl_softc *sc)
4905 {
4906 uint32_t num_queues, base_queue;
4907 uint32_t num_pf_int;
4908 uint32_t num_vf_int;
4909 uint32_t num_vfs;
4910 uint32_t i, j;
4911 uint32_t val;
4912 uint32_t eol = 0x7ff;
4913
4914 /* get number of interrupts, queues, and vfs */
4915 val = ixl_rd(sc, I40E_GLPCI_CNF2);
4916 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
4917 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
4918 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
4919 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
4920
4921 val = ixl_rd(sc, I40E_PFLAN_QALLOC);
4922 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
4923 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
4924 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
4925 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
4926 if (val & I40E_PFLAN_QALLOC_VALID_MASK)
4927 num_queues = (j - base_queue) + 1;
4928 else
4929 num_queues = 0;
4930
4931 val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
4932 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
4933 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
4934 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
4935 I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
4936 if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
4937 num_vfs = (j - i) + 1;
4938 else
4939 num_vfs = 0;
4940
4941 /* stop all the interrupts */
4942 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
4943 ixl_flush(sc);
4944 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4945 for (i = 0; i < num_pf_int - 2; i++)
4946 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
4947 ixl_flush(sc);
4948
4949 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
4950 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4951 ixl_wr(sc, I40E_PFINT_LNKLST0, val);
4952 for (i = 0; i < num_pf_int - 2; i++)
4953 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
4954 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4955 for (i = 0; i < num_vfs; i++)
4956 ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
4957 for (i = 0; i < num_vf_int - 2; i++)
4958 ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
4959
4960 /* warn the HW of the coming Tx disables */
4961 for (i = 0; i < num_queues; i++) {
4962 uint32_t abs_queue_idx = base_queue + i;
4963 uint32_t reg_block = 0;
4964
4965 if (abs_queue_idx >= 128) {
4966 reg_block = abs_queue_idx / 128;
4967 abs_queue_idx %= 128;
4968 }
4969
4970 val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
4971 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
4972 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
4973 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
4974
4975 ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
4976 }
4977 delaymsec(400);
4978
4979 /* stop all the queues */
4980 for (i = 0; i < num_queues; i++) {
4981 ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
4982 ixl_wr(sc, I40E_QTX_ENA(i), 0);
4983 ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
4984 ixl_wr(sc, I40E_QRX_ENA(i), 0);
4985 }
4986
4987 /* short wait for all queue disables to settle */
4988 delaymsec(50);
4989 }
4990
4991 static int
4992 ixl_pf_reset(struct ixl_softc *sc)
4993 {
4994 uint32_t cnt = 0;
4995 uint32_t cnt1 = 0;
4996 uint32_t reg = 0, reg0 = 0;
4997 uint32_t grst_del;
4998
4999 /*
5000 * Poll for Global Reset steady state in case of recent GRST.
5001 * The grst delay value is in 100ms units, and we'll wait a
5002 * couple counts longer to be sure we don't just miss the end.
5003 */
5004 grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
5005 grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
5006 grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
5007
5008 grst_del = grst_del * 20;
5009
5010 for (cnt = 0; cnt < grst_del; cnt++) {
5011 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
5012 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
5013 break;
5014 delaymsec(100);
5015 }
5016 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5017 aprint_error(", Global reset polling failed to complete\n");
5018 return -1;
5019 }
5020
5021 /* Now Wait for the FW to be ready */
5022 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
5023 reg = ixl_rd(sc, I40E_GLNVM_ULD);
5024 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5025 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
5026 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5027 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
5028 break;
5029
5030 delaymsec(10);
5031 }
5032 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5033 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
5034 aprint_error(", wait for FW Reset complete timed out "
5035 "(I40E_GLNVM_ULD = 0x%x)\n", reg);
5036 return -1;
5037 }
5038
5039 /*
5040 * If there was a Global Reset in progress when we got here,
5041 * we don't need to do the PF Reset
5042 */
5043 if (cnt == 0) {
5044 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5045 ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
5046 for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
5047 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5048 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
5049 break;
5050 delaymsec(1);
5051
5052 reg0 = ixl_rd(sc, I40E_GLGEN_RSTAT);
5053 if (reg0 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5054 aprint_error(", Core reset upcoming."
5055 " Skipping PF reset reset request\n");
5056 return -1;
5057 }
5058 }
5059 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
5060 aprint_error(", PF reset polling failed to complete"
5061 "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
5062 return -1;
5063 }
5064 }
5065
5066 return 0;
5067 }
5068
5069 static int
5070 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
5071 bus_size_t size, bus_size_t align)
5072 {
5073 ixm->ixm_size = size;
5074
5075 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
5076 ixm->ixm_size, 0,
5077 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
5078 &ixm->ixm_map) != 0)
5079 return 1;
5080 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
5081 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
5082 BUS_DMA_WAITOK) != 0)
5083 goto destroy;
5084 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
5085 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
5086 goto free;
5087 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
5088 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
5089 goto unmap;
5090
5091 memset(ixm->ixm_kva, 0, ixm->ixm_size);
5092
5093 return 0;
5094 unmap:
5095 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5096 free:
5097 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5098 destroy:
5099 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5100 return 1;
5101 }
5102
5103 static void
5104 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
5105 {
5106 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
5107 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5108 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5109 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5110 }
5111
5112 static int
5113 ixl_setup_vlan_hwfilter(struct ixl_softc *sc)
5114 {
5115 struct ethercom *ec = &sc->sc_ec;
5116 struct vlanid_list *vlanidp;
5117 int rv;
5118
5119 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5120 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5121 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5122 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5123
5124 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5125 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5126 if (rv != 0)
5127 return rv;
5128 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5129 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5130 if (rv != 0)
5131 return rv;
5132
5133 ETHER_LOCK(ec);
5134 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5135 rv = ixl_add_macvlan(sc, sc->sc_enaddr,
5136 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5137 if (rv != 0)
5138 break;
5139 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
5140 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5141 if (rv != 0)
5142 break;
5143 }
5144 ETHER_UNLOCK(ec);
5145
5146 return rv;
5147 }
5148
5149 static void
5150 ixl_teardown_vlan_hwfilter(struct ixl_softc *sc)
5151 {
5152 struct vlanid_list *vlanidp;
5153 struct ethercom *ec = &sc->sc_ec;
5154
5155 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5156 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5157 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5158 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5159
5160 ETHER_LOCK(ec);
5161 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5162 ixl_remove_macvlan(sc, sc->sc_enaddr,
5163 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5164 ixl_remove_macvlan(sc, etherbroadcastaddr,
5165 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5166 }
5167 ETHER_UNLOCK(ec);
5168
5169 ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5170 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5171 ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5172 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5173 }
5174
5175 static int
5176 ixl_update_macvlan(struct ixl_softc *sc)
5177 {
5178 int rv = 0;
5179 int next_ec_capenable = sc->sc_ec.ec_capenable;
5180
5181 if (ISSET(next_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
5182 rv = ixl_setup_vlan_hwfilter(sc);
5183 if (rv != 0)
5184 ixl_teardown_vlan_hwfilter(sc);
5185 } else {
5186 ixl_teardown_vlan_hwfilter(sc);
5187 }
5188
5189 return rv;
5190 }
5191
5192 static int
5193 ixl_ifflags_cb(struct ethercom *ec)
5194 {
5195 struct ifnet *ifp = &ec->ec_if;
5196 struct ixl_softc *sc = ifp->if_softc;
5197 int rv, change;
5198
5199 mutex_enter(&sc->sc_cfg_lock);
5200
5201 change = ec->ec_capenable ^ sc->sc_cur_ec_capenable;
5202
5203 if (ISSET(change, ETHERCAP_VLAN_HWTAGGING)) {
5204 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
5205 rv = ENETRESET;
5206 goto out;
5207 }
5208
5209 if (ISSET(change, ETHERCAP_VLAN_HWFILTER)) {
5210 rv = ixl_update_macvlan(sc);
5211 if (rv == 0) {
5212 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
5213 } else {
5214 CLR(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER);
5215 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
5216 }
5217 }
5218
5219 rv = ixl_iff(sc);
5220 out:
5221 mutex_exit(&sc->sc_cfg_lock);
5222
5223 return rv;
5224 }
5225
5226 static int
5227 ixl_set_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
5228 {
5229 const struct ixl_aq_link_status *status;
5230 const struct ixl_phy_type *itype;
5231
5232 uint64_t ifm_active = IFM_ETHER;
5233 uint64_t ifm_status = IFM_AVALID;
5234 int link_state = LINK_STATE_DOWN;
5235 uint64_t baudrate = 0;
5236
5237 status = (const struct ixl_aq_link_status *)iaq->iaq_param;
5238 if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION))
5239 goto done;
5240
5241 ifm_active |= IFM_FDX;
5242 ifm_status |= IFM_ACTIVE;
5243 link_state = LINK_STATE_UP;
5244
5245 itype = ixl_search_phy_type(status->phy_type);
5246 if (itype != NULL)
5247 ifm_active |= itype->ifm_type;
5248
5249 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
5250 ifm_active |= IFM_ETH_TXPAUSE;
5251 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
5252 ifm_active |= IFM_ETH_RXPAUSE;
5253
5254 baudrate = ixl_search_link_speed(status->link_speed);
5255
5256 done:
5257 /* NET_ASSERT_LOCKED() except during attach */
5258 sc->sc_media_active = ifm_active;
5259 sc->sc_media_status = ifm_status;
5260
5261 sc->sc_ec.ec_if.if_baudrate = baudrate;
5262
5263 return link_state;
5264 }
5265
5266 static int
5267 ixl_establish_intx(struct ixl_softc *sc)
5268 {
5269 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5270 pci_intr_handle_t *intr;
5271 char xnamebuf[32];
5272 char intrbuf[PCI_INTRSTR_LEN];
5273 char const *intrstr;
5274
5275 KASSERT(sc->sc_nintrs == 1);
5276
5277 intr = &sc->sc_ihp[0];
5278
5279 intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
5280 snprintf(xnamebuf, sizeof(xnamebuf), "%s:legacy",
5281 device_xname(sc->sc_dev));
5282
5283 sc->sc_ihs[0] = pci_intr_establish_xname(pc, *intr, IPL_NET, ixl_intr,
5284 sc, xnamebuf);
5285
5286 if (sc->sc_ihs[0] == NULL) {
5287 aprint_error_dev(sc->sc_dev,
5288 "unable to establish interrupt at %s\n", intrstr);
5289 return -1;
5290 }
5291
5292 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5293 return 0;
5294 }
5295
5296 static int
5297 ixl_establish_msix(struct ixl_softc *sc)
5298 {
5299 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5300 unsigned int vector = 0;
5301 unsigned int i;
5302 char xnamebuf[32];
5303 char intrbuf[PCI_INTRSTR_LEN];
5304 char const *intrstr;
5305
5306 /* the "other" intr is mapped to vector 0 */
5307 vector = 0;
5308 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5309 intrbuf, sizeof(intrbuf));
5310 snprintf(xnamebuf, sizeof(xnamebuf), "%s others",
5311 device_xname(sc->sc_dev));
5312 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5313 sc->sc_ihp[vector], IPL_NET, ixl_other_intr,
5314 sc, xnamebuf);
5315 if (sc->sc_ihs[vector] == NULL) {
5316 aprint_error_dev(sc->sc_dev,
5317 "unable to establish interrupt at %s\n", intrstr);
5318 goto fail;
5319 }
5320 vector++;
5321 aprint_normal_dev(sc->sc_dev, "interrupt at %s\n", intrstr);
5322
5323 sc->sc_msix_vector_queue = vector;
5324
5325 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5326 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5327 intrbuf, sizeof(intrbuf));
5328 snprintf(xnamebuf, sizeof(xnamebuf), "%s TXRX%d",
5329 device_xname(sc->sc_dev), i);
5330
5331 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5332 sc->sc_ihp[vector], IPL_NET, ixl_queue_intr,
5333 (void *)&sc->sc_qps[i], xnamebuf);
5334
5335 if (sc->sc_ihs[vector] == NULL) {
5336 aprint_error_dev(sc->sc_dev,
5337 "unable to establish interrupt at %s\n", intrstr);
5338 goto fail;
5339 }
5340 vector++;
5341 aprint_normal_dev(sc->sc_dev,
5342 "interrupt at %s\n", intrstr);
5343 }
5344
5345 return 0;
5346 fail:
5347 for (i = 0; i < vector; i++) {
5348 pci_intr_disestablish(pc, sc->sc_ihs[i]);
5349 }
5350
5351 sc->sc_msix_vector_queue = 0;
5352 sc->sc_msix_vector_queue = 0;
5353
5354 return -1;
5355 }
5356
5357 static void
5358 ixl_set_affinity_msix(struct ixl_softc *sc)
5359 {
5360 kcpuset_t *affinity;
5361 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5362 int affinity_to, r;
5363 unsigned int i, vector;
5364 char intrbuf[PCI_INTRSTR_LEN];
5365 char const *intrstr;
5366
5367 affinity_to = 0;
5368 kcpuset_create(&affinity, false);
5369
5370 vector = sc->sc_msix_vector_queue;
5371
5372 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5373 affinity_to = i % ncpu;
5374
5375 kcpuset_zero(affinity);
5376 kcpuset_set(affinity, affinity_to);
5377
5378 intrstr = pci_intr_string(pc, sc->sc_ihp[vector + i],
5379 intrbuf, sizeof(intrbuf));
5380 r = interrupt_distribute(sc->sc_ihs[vector + i],
5381 affinity, NULL);
5382 if (r == 0) {
5383 aprint_normal_dev(sc->sc_dev,
5384 "for TXRX%u interrupting at %s affinity to %u\n",
5385 i, intrstr, affinity_to);
5386 } else {
5387 aprint_normal_dev(sc->sc_dev,
5388 "for TXRX%u interrupting at %s\n",
5389 i, intrstr);
5390 }
5391 }
5392
5393 vector = 0; /* vector 0 means "other" interrupt */
5394 affinity_to = (affinity_to + 1) % ncpu;
5395 kcpuset_zero(affinity);
5396 kcpuset_set(affinity, affinity_to);
5397
5398 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5399 intrbuf, sizeof(intrbuf));
5400 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5401 if (r == 0) {
5402 aprint_normal_dev(sc->sc_dev,
5403 "for other interrupting at %s affinity to %u\n",
5404 intrstr, affinity_to);
5405 } else {
5406 aprint_normal_dev(sc->sc_dev,
5407 "for other interrupting at %s", intrstr);
5408 }
5409
5410 kcpuset_destroy(affinity);
5411 }
5412
5413 static void
5414 ixl_config_queue_intr(struct ixl_softc *sc)
5415 {
5416 unsigned int i, vector;
5417
5418 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) {
5419 vector = sc->sc_msix_vector_queue;
5420 } else {
5421 vector = I40E_INTR_NOTX_INTR;
5422
5423 ixl_wr(sc, I40E_PFINT_LNKLST0,
5424 (I40E_INTR_NOTX_QUEUE <<
5425 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5426 (I40E_QUEUE_TYPE_RX <<
5427 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5428 }
5429
5430 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
5431 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), 0);
5432 ixl_flush(sc);
5433
5434 ixl_wr(sc, I40E_PFINT_LNKLSTN(i),
5435 ((i) << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
5436 (I40E_QUEUE_TYPE_RX <<
5437 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5438
5439 ixl_wr(sc, I40E_QINT_RQCTL(i),
5440 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5441 (I40E_ITR_INDEX_RX <<
5442 I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
5443 (I40E_INTR_NOTX_RX_QUEUE <<
5444 I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
5445 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5446 (I40E_QUEUE_TYPE_TX <<
5447 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5448 I40E_QINT_RQCTL_CAUSE_ENA_MASK);
5449
5450 ixl_wr(sc, I40E_QINT_TQCTL(i),
5451 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
5452 (I40E_ITR_INDEX_TX <<
5453 I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
5454 (I40E_INTR_NOTX_TX_QUEUE <<
5455 I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
5456 (I40E_QUEUE_TYPE_EOL <<
5457 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
5458 (I40E_QUEUE_TYPE_RX <<
5459 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
5460 I40E_QINT_TQCTL_CAUSE_ENA_MASK);
5461
5462 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX)
5463 vector++;
5464 }
5465 ixl_flush(sc);
5466
5467 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_RX), 0x7a);
5468 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_TX), 0x7a);
5469 ixl_flush(sc);
5470 }
5471
5472 static void
5473 ixl_config_other_intr(struct ixl_softc *sc)
5474 {
5475 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5476 (void)ixl_rd(sc, I40E_PFINT_ICR0);
5477
5478 ixl_wr(sc, I40E_PFINT_ICR0_ENA,
5479 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
5480 I40E_PFINT_ICR0_ENA_GRST_MASK |
5481 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
5482 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
5483 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
5484 I40E_PFINT_ICR0_ENA_VFLR_MASK |
5485 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
5486 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
5487 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK);
5488
5489 ixl_wr(sc, I40E_PFINT_LNKLST0, 0x7FF);
5490 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_OTHER), 0);
5491 ixl_wr(sc, I40E_PFINT_STAT_CTL0,
5492 (I40E_ITR_INDEX_OTHER <<
5493 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT));
5494 ixl_flush(sc);
5495 }
5496
5497 static int
5498 ixl_setup_interrupts(struct ixl_softc *sc)
5499 {
5500 struct pci_attach_args *pa = &sc->sc_pa;
5501 pci_intr_type_t max_type, intr_type;
5502 int counts[PCI_INTR_TYPE_SIZE];
5503 int error;
5504 unsigned int i;
5505 bool retry;
5506
5507 memset(counts, 0, sizeof(counts));
5508 max_type = PCI_INTR_TYPE_MSIX;
5509 /* QPs + other interrupt */
5510 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueue_pairs_max + 1;
5511 counts[PCI_INTR_TYPE_INTX] = 1;
5512
5513 if (ixl_param_nomsix)
5514 counts[PCI_INTR_TYPE_MSIX] = 0;
5515
5516 do {
5517 retry = false;
5518 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, max_type);
5519 if (error != 0) {
5520 aprint_error_dev(sc->sc_dev,
5521 "couldn't map interrupt\n");
5522 break;
5523 }
5524 for (i = 0; i < sc->sc_nintrs; i++) {
5525 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[i],
5526 PCI_INTR_MPSAFE, true);
5527 }
5528
5529 intr_type = pci_intr_type(pa->pa_pc, sc->sc_ihp[0]);
5530 sc->sc_nintrs = counts[intr_type];
5531 KASSERT(sc->sc_nintrs > 0);
5532
5533 sc->sc_ihs = kmem_alloc(sizeof(sc->sc_ihs[0]) * sc->sc_nintrs,
5534 KM_SLEEP);
5535
5536 if (intr_type == PCI_INTR_TYPE_MSIX) {
5537 error = ixl_establish_msix(sc);
5538 if (error) {
5539 counts[PCI_INTR_TYPE_MSIX] = 0;
5540 retry = true;
5541 } else {
5542 ixl_set_affinity_msix(sc);
5543 }
5544 } else if (intr_type == PCI_INTR_TYPE_INTX) {
5545 error = ixl_establish_intx(sc);
5546 } else {
5547 error = -1;
5548 }
5549
5550 if (error) {
5551 kmem_free(sc->sc_ihs,
5552 sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5553 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5554 } else {
5555 sc->sc_intrtype = intr_type;
5556 }
5557 } while (retry);
5558
5559 return error;
5560 }
5561
5562 static void
5563 ixl_teardown_interrupts(struct ixl_softc *sc)
5564 {
5565 struct pci_attach_args *pa = &sc->sc_pa;
5566 unsigned int i;
5567
5568 for (i = 0; i < sc->sc_nintrs; i++) {
5569 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]);
5570 }
5571
5572 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5573
5574 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5575 sc->sc_ihs = NULL;
5576 sc->sc_nintrs = 0;
5577 }
5578
5579 static int
5580 ixl_setup_stats(struct ixl_softc *sc)
5581 {
5582 struct ixl_queue_pair *qp;
5583 struct ixl_tx_ring *txr;
5584 struct ixl_rx_ring *rxr;
5585 struct ixl_stats_counters *isc;
5586 unsigned int i;
5587
5588 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5589 qp = &sc->sc_qps[i];
5590 txr = qp->qp_txr;
5591 rxr = qp->qp_rxr;
5592
5593 evcnt_attach_dynamic(&txr->txr_defragged, EVCNT_TYPE_MISC,
5594 NULL, qp->qp_name, "m_defrag successed");
5595 evcnt_attach_dynamic(&txr->txr_defrag_failed, EVCNT_TYPE_MISC,
5596 NULL, qp->qp_name, "m_defrag_failed");
5597 evcnt_attach_dynamic(&txr->txr_pcqdrop, EVCNT_TYPE_MISC,
5598 NULL, qp->qp_name, "Dropped in pcq");
5599 evcnt_attach_dynamic(&txr->txr_transmitdef, EVCNT_TYPE_MISC,
5600 NULL, qp->qp_name, "Deferred transmit");
5601 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR,
5602 NULL, qp->qp_name, "Interrupt on queue");
5603 evcnt_attach_dynamic(&txr->txr_defer, EVCNT_TYPE_MISC,
5604 NULL, qp->qp_name, "Handled queue in softint/workqueue");
5605
5606 evcnt_attach_dynamic(&rxr->rxr_mgethdr_failed, EVCNT_TYPE_MISC,
5607 NULL, qp->qp_name, "MGETHDR failed");
5608 evcnt_attach_dynamic(&rxr->rxr_mgetcl_failed, EVCNT_TYPE_MISC,
5609 NULL, qp->qp_name, "MCLGET failed");
5610 evcnt_attach_dynamic(&rxr->rxr_mbuf_load_failed,
5611 EVCNT_TYPE_MISC, NULL, qp->qp_name,
5612 "bus_dmamap_load_mbuf failed");
5613 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR,
5614 NULL, qp->qp_name, "Interrupt on queue");
5615 evcnt_attach_dynamic(&rxr->rxr_defer, EVCNT_TYPE_MISC,
5616 NULL, qp->qp_name, "Handled queue in softint/workqueue");
5617 }
5618
5619 evcnt_attach_dynamic(&sc->sc_event_atq, EVCNT_TYPE_INTR,
5620 NULL, device_xname(sc->sc_dev), "Interrupt for other events");
5621 evcnt_attach_dynamic(&sc->sc_event_link, EVCNT_TYPE_MISC,
5622 NULL, device_xname(sc->sc_dev), "Link status event");
5623 evcnt_attach_dynamic(&sc->sc_event_ecc_err, EVCNT_TYPE_MISC,
5624 NULL, device_xname(sc->sc_dev), "ECC error");
5625 evcnt_attach_dynamic(&sc->sc_event_pci_exception, EVCNT_TYPE_MISC,
5626 NULL, device_xname(sc->sc_dev), "PCI exception");
5627 evcnt_attach_dynamic(&sc->sc_event_crit_err, EVCNT_TYPE_MISC,
5628 NULL, device_xname(sc->sc_dev), "Critical error");
5629
5630 isc = &sc->sc_stats_counters;
5631 evcnt_attach_dynamic(&isc->isc_crc_errors, EVCNT_TYPE_MISC,
5632 NULL, device_xname(sc->sc_dev), "CRC errors");
5633 evcnt_attach_dynamic(&isc->isc_illegal_bytes, EVCNT_TYPE_MISC,
5634 NULL, device_xname(sc->sc_dev), "Illegal bytes");
5635 evcnt_attach_dynamic(&isc->isc_mac_local_faults, EVCNT_TYPE_MISC,
5636 NULL, device_xname(sc->sc_dev), "Mac local faults");
5637 evcnt_attach_dynamic(&isc->isc_mac_remote_faults, EVCNT_TYPE_MISC,
5638 NULL, device_xname(sc->sc_dev), "Mac remote faults");
5639 evcnt_attach_dynamic(&isc->isc_link_xon_rx, EVCNT_TYPE_MISC,
5640 NULL, device_xname(sc->sc_dev), "Rx xon");
5641 evcnt_attach_dynamic(&isc->isc_link_xon_tx, EVCNT_TYPE_MISC,
5642 NULL, device_xname(sc->sc_dev), "Tx xon");
5643 evcnt_attach_dynamic(&isc->isc_link_xoff_rx, EVCNT_TYPE_MISC,
5644 NULL, device_xname(sc->sc_dev), "Rx xoff");
5645 evcnt_attach_dynamic(&isc->isc_link_xoff_tx, EVCNT_TYPE_MISC,
5646 NULL, device_xname(sc->sc_dev), "Tx xoff");
5647 evcnt_attach_dynamic(&isc->isc_rx_fragments, EVCNT_TYPE_MISC,
5648 NULL, device_xname(sc->sc_dev), "Rx fragments");
5649 evcnt_attach_dynamic(&isc->isc_rx_jabber, EVCNT_TYPE_MISC,
5650 NULL, device_xname(sc->sc_dev), "Rx jabber");
5651
5652 evcnt_attach_dynamic(&isc->isc_rx_size_64, EVCNT_TYPE_MISC,
5653 NULL, device_xname(sc->sc_dev), "Rx size 64");
5654 evcnt_attach_dynamic(&isc->isc_rx_size_127, EVCNT_TYPE_MISC,
5655 NULL, device_xname(sc->sc_dev), "Rx size 127");
5656 evcnt_attach_dynamic(&isc->isc_rx_size_255, EVCNT_TYPE_MISC,
5657 NULL, device_xname(sc->sc_dev), "Rx size 255");
5658 evcnt_attach_dynamic(&isc->isc_rx_size_511, EVCNT_TYPE_MISC,
5659 NULL, device_xname(sc->sc_dev), "Rx size 511");
5660 evcnt_attach_dynamic(&isc->isc_rx_size_1023, EVCNT_TYPE_MISC,
5661 NULL, device_xname(sc->sc_dev), "Rx size 1023");
5662 evcnt_attach_dynamic(&isc->isc_rx_size_1522, EVCNT_TYPE_MISC,
5663 NULL, device_xname(sc->sc_dev), "Rx size 1522");
5664 evcnt_attach_dynamic(&isc->isc_rx_size_big, EVCNT_TYPE_MISC,
5665 NULL, device_xname(sc->sc_dev), "Rx jumbo packets");
5666 evcnt_attach_dynamic(&isc->isc_rx_undersize, EVCNT_TYPE_MISC,
5667 NULL, device_xname(sc->sc_dev), "Rx under size");
5668 evcnt_attach_dynamic(&isc->isc_rx_oversize, EVCNT_TYPE_MISC,
5669 NULL, device_xname(sc->sc_dev), "Rx over size");
5670
5671 evcnt_attach_dynamic(&isc->isc_rx_bytes, EVCNT_TYPE_MISC,
5672 NULL, device_xname(sc->sc_dev), "Rx bytes / port");
5673 evcnt_attach_dynamic(&isc->isc_rx_discards, EVCNT_TYPE_MISC,
5674 NULL, device_xname(sc->sc_dev), "Rx discards / port");
5675 evcnt_attach_dynamic(&isc->isc_rx_unicast, EVCNT_TYPE_MISC,
5676 NULL, device_xname(sc->sc_dev), "Rx unicast / port");
5677 evcnt_attach_dynamic(&isc->isc_rx_multicast, EVCNT_TYPE_MISC,
5678 NULL, device_xname(sc->sc_dev), "Rx multicast / port");
5679 evcnt_attach_dynamic(&isc->isc_rx_broadcast, EVCNT_TYPE_MISC,
5680 NULL, device_xname(sc->sc_dev), "Rx broadcast / port");
5681
5682 evcnt_attach_dynamic(&isc->isc_vsi_rx_bytes, EVCNT_TYPE_MISC,
5683 NULL, device_xname(sc->sc_dev), "Rx bytes / vsi");
5684 evcnt_attach_dynamic(&isc->isc_vsi_rx_discards, EVCNT_TYPE_MISC,
5685 NULL, device_xname(sc->sc_dev), "Rx discard / vsi");
5686 evcnt_attach_dynamic(&isc->isc_vsi_rx_unicast, EVCNT_TYPE_MISC,
5687 NULL, device_xname(sc->sc_dev), "Rx unicast / vsi");
5688 evcnt_attach_dynamic(&isc->isc_vsi_rx_multicast, EVCNT_TYPE_MISC,
5689 NULL, device_xname(sc->sc_dev), "Rx multicast / vsi");
5690 evcnt_attach_dynamic(&isc->isc_vsi_rx_broadcast, EVCNT_TYPE_MISC,
5691 NULL, device_xname(sc->sc_dev), "Rx broadcast / vsi");
5692
5693 evcnt_attach_dynamic(&isc->isc_tx_size_64, EVCNT_TYPE_MISC,
5694 NULL, device_xname(sc->sc_dev), "Tx size 64");
5695 evcnt_attach_dynamic(&isc->isc_tx_size_127, EVCNT_TYPE_MISC,
5696 NULL, device_xname(sc->sc_dev), "Tx size 127");
5697 evcnt_attach_dynamic(&isc->isc_tx_size_255, EVCNT_TYPE_MISC,
5698 NULL, device_xname(sc->sc_dev), "Tx size 255");
5699 evcnt_attach_dynamic(&isc->isc_tx_size_511, EVCNT_TYPE_MISC,
5700 NULL, device_xname(sc->sc_dev), "Tx size 511");
5701 evcnt_attach_dynamic(&isc->isc_tx_size_1023, EVCNT_TYPE_MISC,
5702 NULL, device_xname(sc->sc_dev), "Tx size 1023");
5703 evcnt_attach_dynamic(&isc->isc_tx_size_1522, EVCNT_TYPE_MISC,
5704 NULL, device_xname(sc->sc_dev), "Tx size 1522");
5705 evcnt_attach_dynamic(&isc->isc_tx_size_big, EVCNT_TYPE_MISC,
5706 NULL, device_xname(sc->sc_dev), "Tx jumbo packets");
5707
5708 evcnt_attach_dynamic(&isc->isc_tx_bytes, EVCNT_TYPE_MISC,
5709 NULL, device_xname(sc->sc_dev), "Tx bytes / port");
5710 evcnt_attach_dynamic(&isc->isc_tx_dropped_link_down, EVCNT_TYPE_MISC,
5711 NULL, device_xname(sc->sc_dev),
5712 "Tx dropped due to link down / port");
5713 evcnt_attach_dynamic(&isc->isc_tx_unicast, EVCNT_TYPE_MISC,
5714 NULL, device_xname(sc->sc_dev), "Tx unicast / port");
5715 evcnt_attach_dynamic(&isc->isc_tx_multicast, EVCNT_TYPE_MISC,
5716 NULL, device_xname(sc->sc_dev), "Tx multicast / port");
5717 evcnt_attach_dynamic(&isc->isc_tx_broadcast, EVCNT_TYPE_MISC,
5718 NULL, device_xname(sc->sc_dev), "Tx broadcast / port");
5719
5720 evcnt_attach_dynamic(&isc->isc_vsi_tx_bytes, EVCNT_TYPE_MISC,
5721 NULL, device_xname(sc->sc_dev), "Tx bytes / vsi");
5722 evcnt_attach_dynamic(&isc->isc_vsi_tx_errors, EVCNT_TYPE_MISC,
5723 NULL, device_xname(sc->sc_dev), "Tx errors / vsi");
5724 evcnt_attach_dynamic(&isc->isc_vsi_tx_unicast, EVCNT_TYPE_MISC,
5725 NULL, device_xname(sc->sc_dev), "Tx unicast / vsi");
5726 evcnt_attach_dynamic(&isc->isc_vsi_tx_multicast, EVCNT_TYPE_MISC,
5727 NULL, device_xname(sc->sc_dev), "Tx multicast / vsi");
5728 evcnt_attach_dynamic(&isc->isc_vsi_tx_broadcast, EVCNT_TYPE_MISC,
5729 NULL, device_xname(sc->sc_dev), "Tx broadcast / vsi");
5730
5731 sc->sc_stats_intval = ixl_param_stats_interval;
5732 callout_init(&sc->sc_stats_callout, CALLOUT_MPSAFE);
5733 callout_setfunc(&sc->sc_stats_callout, ixl_stats_callout, sc);
5734 ixl_work_set(&sc->sc_stats_task, ixl_stats_update, sc);
5735
5736 return 0;
5737 }
5738
5739 static void
5740 ixl_teardown_stats(struct ixl_softc *sc)
5741 {
5742 struct ixl_tx_ring *txr;
5743 struct ixl_rx_ring *rxr;
5744 struct ixl_stats_counters *isc;
5745 unsigned int i;
5746
5747 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5748 txr = sc->sc_qps[i].qp_txr;
5749 rxr = sc->sc_qps[i].qp_rxr;
5750
5751 evcnt_detach(&txr->txr_defragged);
5752 evcnt_detach(&txr->txr_defrag_failed);
5753 evcnt_detach(&txr->txr_pcqdrop);
5754 evcnt_detach(&txr->txr_transmitdef);
5755 evcnt_detach(&txr->txr_intr);
5756 evcnt_detach(&txr->txr_defer);
5757
5758 evcnt_detach(&rxr->rxr_mgethdr_failed);
5759 evcnt_detach(&rxr->rxr_mgetcl_failed);
5760 evcnt_detach(&rxr->rxr_mbuf_load_failed);
5761 evcnt_detach(&rxr->rxr_intr);
5762 evcnt_detach(&rxr->rxr_defer);
5763 }
5764
5765 isc = &sc->sc_stats_counters;
5766 evcnt_detach(&isc->isc_crc_errors);
5767 evcnt_detach(&isc->isc_illegal_bytes);
5768 evcnt_detach(&isc->isc_mac_local_faults);
5769 evcnt_detach(&isc->isc_mac_remote_faults);
5770 evcnt_detach(&isc->isc_link_xon_rx);
5771 evcnt_detach(&isc->isc_link_xon_tx);
5772 evcnt_detach(&isc->isc_link_xoff_rx);
5773 evcnt_detach(&isc->isc_link_xoff_tx);
5774 evcnt_detach(&isc->isc_rx_fragments);
5775 evcnt_detach(&isc->isc_rx_jabber);
5776 evcnt_detach(&isc->isc_rx_bytes);
5777 evcnt_detach(&isc->isc_rx_discards);
5778 evcnt_detach(&isc->isc_rx_unicast);
5779 evcnt_detach(&isc->isc_rx_multicast);
5780 evcnt_detach(&isc->isc_rx_broadcast);
5781 evcnt_detach(&isc->isc_rx_size_64);
5782 evcnt_detach(&isc->isc_rx_size_127);
5783 evcnt_detach(&isc->isc_rx_size_255);
5784 evcnt_detach(&isc->isc_rx_size_511);
5785 evcnt_detach(&isc->isc_rx_size_1023);
5786 evcnt_detach(&isc->isc_rx_size_1522);
5787 evcnt_detach(&isc->isc_rx_size_big);
5788 evcnt_detach(&isc->isc_rx_undersize);
5789 evcnt_detach(&isc->isc_rx_oversize);
5790 evcnt_detach(&isc->isc_tx_bytes);
5791 evcnt_detach(&isc->isc_tx_dropped_link_down);
5792 evcnt_detach(&isc->isc_tx_unicast);
5793 evcnt_detach(&isc->isc_tx_multicast);
5794 evcnt_detach(&isc->isc_tx_broadcast);
5795 evcnt_detach(&isc->isc_tx_size_64);
5796 evcnt_detach(&isc->isc_tx_size_127);
5797 evcnt_detach(&isc->isc_tx_size_255);
5798 evcnt_detach(&isc->isc_tx_size_511);
5799 evcnt_detach(&isc->isc_tx_size_1023);
5800 evcnt_detach(&isc->isc_tx_size_1522);
5801 evcnt_detach(&isc->isc_tx_size_big);
5802 evcnt_detach(&isc->isc_vsi_rx_discards);
5803 evcnt_detach(&isc->isc_vsi_rx_bytes);
5804 evcnt_detach(&isc->isc_vsi_rx_unicast);
5805 evcnt_detach(&isc->isc_vsi_rx_multicast);
5806 evcnt_detach(&isc->isc_vsi_rx_broadcast);
5807 evcnt_detach(&isc->isc_vsi_tx_errors);
5808 evcnt_detach(&isc->isc_vsi_tx_bytes);
5809 evcnt_detach(&isc->isc_vsi_tx_unicast);
5810 evcnt_detach(&isc->isc_vsi_tx_multicast);
5811 evcnt_detach(&isc->isc_vsi_tx_broadcast);
5812
5813 evcnt_detach(&sc->sc_event_atq);
5814 evcnt_detach(&sc->sc_event_link);
5815 evcnt_detach(&sc->sc_event_ecc_err);
5816 evcnt_detach(&sc->sc_event_pci_exception);
5817 evcnt_detach(&sc->sc_event_crit_err);
5818
5819 callout_destroy(&sc->sc_stats_callout);
5820 }
5821
5822 static void
5823 ixl_stats_callout(void *xsc)
5824 {
5825 struct ixl_softc *sc = xsc;
5826
5827 ixl_work_add(sc->sc_workq, &sc->sc_stats_task);
5828 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
5829 }
5830
5831 static uint64_t
5832 ixl_stat_delta(struct ixl_softc *sc, uint32_t reg_hi, uint32_t reg_lo,
5833 uint64_t *offset, bool has_offset)
5834 {
5835 uint64_t value, delta;
5836 int bitwidth;
5837
5838 bitwidth = reg_hi == 0 ? 32 : 48;
5839
5840 value = ixl_rd(sc, reg_lo);
5841
5842 if (bitwidth > 32) {
5843 value |= ((uint64_t)ixl_rd(sc, reg_hi) << 32);
5844 }
5845
5846 if (__predict_true(has_offset)) {
5847 delta = value;
5848 if (value < *offset)
5849 delta += ((uint64_t)1 << bitwidth);
5850 delta -= *offset;
5851 } else {
5852 delta = 0;
5853 }
5854 atomic_swap_64(offset, value);
5855
5856 return delta;
5857 }
5858
5859 static void
5860 ixl_stats_update(void *xsc)
5861 {
5862 struct ixl_softc *sc = xsc;
5863 struct ixl_stats_counters *isc;
5864 uint64_t delta;
5865
5866 isc = &sc->sc_stats_counters;
5867
5868 /* errors */
5869 delta = ixl_stat_delta(sc,
5870 0, I40E_GLPRT_CRCERRS(sc->sc_port),
5871 &isc->isc_crc_errors_offset, isc->isc_has_offset);
5872 atomic_add_64(&isc->isc_crc_errors.ev_count, delta);
5873
5874 delta = ixl_stat_delta(sc,
5875 0, I40E_GLPRT_ILLERRC(sc->sc_port),
5876 &isc->isc_illegal_bytes_offset, isc->isc_has_offset);
5877 atomic_add_64(&isc->isc_illegal_bytes.ev_count, delta);
5878
5879 /* rx */
5880 delta = ixl_stat_delta(sc,
5881 I40E_GLPRT_GORCH(sc->sc_port), I40E_GLPRT_GORCL(sc->sc_port),
5882 &isc->isc_rx_bytes_offset, isc->isc_has_offset);
5883 atomic_add_64(&isc->isc_rx_bytes.ev_count, delta);
5884
5885 delta = ixl_stat_delta(sc,
5886 0, I40E_GLPRT_RDPC(sc->sc_port),
5887 &isc->isc_rx_discards_offset, isc->isc_has_offset);
5888 atomic_add_64(&isc->isc_rx_discards.ev_count, delta);
5889
5890 delta = ixl_stat_delta(sc,
5891 I40E_GLPRT_UPRCH(sc->sc_port), I40E_GLPRT_UPRCL(sc->sc_port),
5892 &isc->isc_rx_unicast_offset, isc->isc_has_offset);
5893 atomic_add_64(&isc->isc_rx_unicast.ev_count, delta);
5894
5895 delta = ixl_stat_delta(sc,
5896 I40E_GLPRT_MPRCH(sc->sc_port), I40E_GLPRT_MPRCL(sc->sc_port),
5897 &isc->isc_rx_multicast_offset, isc->isc_has_offset);
5898 atomic_add_64(&isc->isc_rx_multicast.ev_count, delta);
5899
5900 delta = ixl_stat_delta(sc,
5901 I40E_GLPRT_BPRCH(sc->sc_port), I40E_GLPRT_BPRCL(sc->sc_port),
5902 &isc->isc_rx_broadcast_offset, isc->isc_has_offset);
5903 atomic_add_64(&isc->isc_rx_broadcast.ev_count, delta);
5904
5905 /* Packet size stats rx */
5906 delta = ixl_stat_delta(sc,
5907 I40E_GLPRT_PRC64H(sc->sc_port), I40E_GLPRT_PRC64L(sc->sc_port),
5908 &isc->isc_rx_size_64_offset, isc->isc_has_offset);
5909 atomic_add_64(&isc->isc_rx_size_64.ev_count, delta);
5910
5911 delta = ixl_stat_delta(sc,
5912 I40E_GLPRT_PRC127H(sc->sc_port), I40E_GLPRT_PRC127L(sc->sc_port),
5913 &isc->isc_rx_size_127_offset, isc->isc_has_offset);
5914 atomic_add_64(&isc->isc_rx_size_127.ev_count, delta);
5915
5916 delta = ixl_stat_delta(sc,
5917 I40E_GLPRT_PRC255H(sc->sc_port), I40E_GLPRT_PRC255L(sc->sc_port),
5918 &isc->isc_rx_size_255_offset, isc->isc_has_offset);
5919 atomic_add_64(&isc->isc_rx_size_255.ev_count, delta);
5920
5921 delta = ixl_stat_delta(sc,
5922 I40E_GLPRT_PRC511H(sc->sc_port), I40E_GLPRT_PRC511L(sc->sc_port),
5923 &isc->isc_rx_size_511_offset, isc->isc_has_offset);
5924 atomic_add_64(&isc->isc_rx_size_511.ev_count, delta);
5925
5926 delta = ixl_stat_delta(sc,
5927 I40E_GLPRT_PRC1023H(sc->sc_port), I40E_GLPRT_PRC1023L(sc->sc_port),
5928 &isc->isc_rx_size_1023_offset, isc->isc_has_offset);
5929 atomic_add_64(&isc->isc_rx_size_1023.ev_count, delta);
5930
5931 delta = ixl_stat_delta(sc,
5932 I40E_GLPRT_PRC1522H(sc->sc_port), I40E_GLPRT_PRC1522L(sc->sc_port),
5933 &isc->isc_rx_size_1522_offset, isc->isc_has_offset);
5934 atomic_add_64(&isc->isc_rx_size_1522.ev_count, delta);
5935
5936 delta = ixl_stat_delta(sc,
5937 I40E_GLPRT_PRC9522H(sc->sc_port), I40E_GLPRT_PRC9522L(sc->sc_port),
5938 &isc->isc_rx_size_big_offset, isc->isc_has_offset);
5939 atomic_add_64(&isc->isc_rx_size_big.ev_count, delta);
5940
5941 delta = ixl_stat_delta(sc,
5942 0, I40E_GLPRT_RUC(sc->sc_port),
5943 &isc->isc_rx_undersize_offset, isc->isc_has_offset);
5944 atomic_add_64(&isc->isc_rx_undersize.ev_count, delta);
5945
5946 delta = ixl_stat_delta(sc,
5947 0, I40E_GLPRT_ROC(sc->sc_port),
5948 &isc->isc_rx_oversize_offset, isc->isc_has_offset);
5949 atomic_add_64(&isc->isc_rx_oversize.ev_count, delta);
5950
5951 /* tx */
5952 delta = ixl_stat_delta(sc,
5953 I40E_GLPRT_GOTCH(sc->sc_port), I40E_GLPRT_GOTCL(sc->sc_port),
5954 &isc->isc_tx_bytes_offset, isc->isc_has_offset);
5955 atomic_add_64(&isc->isc_tx_bytes.ev_count, delta);
5956
5957 delta = ixl_stat_delta(sc,
5958 0, I40E_GLPRT_TDOLD(sc->sc_port),
5959 &isc->isc_tx_dropped_link_down_offset, isc->isc_has_offset);
5960 atomic_add_64(&isc->isc_tx_dropped_link_down.ev_count, delta);
5961
5962 delta = ixl_stat_delta(sc,
5963 I40E_GLPRT_UPTCH(sc->sc_port), I40E_GLPRT_UPTCL(sc->sc_port),
5964 &isc->isc_tx_unicast_offset, isc->isc_has_offset);
5965 atomic_add_64(&isc->isc_tx_unicast.ev_count, delta);
5966
5967 delta = ixl_stat_delta(sc,
5968 I40E_GLPRT_MPTCH(sc->sc_port), I40E_GLPRT_MPTCL(sc->sc_port),
5969 &isc->isc_tx_multicast_offset, isc->isc_has_offset);
5970 atomic_add_64(&isc->isc_tx_multicast.ev_count, delta);
5971
5972 delta = ixl_stat_delta(sc,
5973 I40E_GLPRT_BPTCH(sc->sc_port), I40E_GLPRT_BPTCL(sc->sc_port),
5974 &isc->isc_tx_broadcast_offset, isc->isc_has_offset);
5975 atomic_add_64(&isc->isc_tx_broadcast.ev_count, delta);
5976
5977 /* Packet size stats tx */
5978 delta = ixl_stat_delta(sc,
5979 I40E_GLPRT_PTC64L(sc->sc_port), I40E_GLPRT_PTC64L(sc->sc_port),
5980 &isc->isc_tx_size_64_offset, isc->isc_has_offset);
5981 atomic_add_64(&isc->isc_tx_size_64.ev_count, delta);
5982
5983 delta = ixl_stat_delta(sc,
5984 I40E_GLPRT_PTC127H(sc->sc_port), I40E_GLPRT_PTC127L(sc->sc_port),
5985 &isc->isc_tx_size_127_offset, isc->isc_has_offset);
5986 atomic_add_64(&isc->isc_tx_size_127.ev_count, delta);
5987
5988 delta = ixl_stat_delta(sc,
5989 I40E_GLPRT_PTC255H(sc->sc_port), I40E_GLPRT_PTC255L(sc->sc_port),
5990 &isc->isc_tx_size_255_offset, isc->isc_has_offset);
5991 atomic_add_64(&isc->isc_tx_size_255.ev_count, delta);
5992
5993 delta = ixl_stat_delta(sc,
5994 I40E_GLPRT_PTC511H(sc->sc_port), I40E_GLPRT_PTC511L(sc->sc_port),
5995 &isc->isc_tx_size_511_offset, isc->isc_has_offset);
5996 atomic_add_64(&isc->isc_tx_size_511.ev_count, delta);
5997
5998 delta = ixl_stat_delta(sc,
5999 I40E_GLPRT_PTC1023H(sc->sc_port), I40E_GLPRT_PTC1023L(sc->sc_port),
6000 &isc->isc_tx_size_1023_offset, isc->isc_has_offset);
6001 atomic_add_64(&isc->isc_tx_size_1023.ev_count, delta);
6002
6003 delta = ixl_stat_delta(sc,
6004 I40E_GLPRT_PTC1522H(sc->sc_port), I40E_GLPRT_PTC1522L(sc->sc_port),
6005 &isc->isc_tx_size_1522_offset, isc->isc_has_offset);
6006 atomic_add_64(&isc->isc_tx_size_1522.ev_count, delta);
6007
6008 delta = ixl_stat_delta(sc,
6009 I40E_GLPRT_PTC9522H(sc->sc_port), I40E_GLPRT_PTC9522L(sc->sc_port),
6010 &isc->isc_tx_size_big_offset, isc->isc_has_offset);
6011 atomic_add_64(&isc->isc_tx_size_big.ev_count, delta);
6012
6013 /* mac faults */
6014 delta = ixl_stat_delta(sc,
6015 0, I40E_GLPRT_MLFC(sc->sc_port),
6016 &isc->isc_mac_local_faults_offset, isc->isc_has_offset);
6017 atomic_add_64(&isc->isc_mac_local_faults.ev_count, delta);
6018
6019 delta = ixl_stat_delta(sc,
6020 0, I40E_GLPRT_MRFC(sc->sc_port),
6021 &isc->isc_mac_remote_faults_offset, isc->isc_has_offset);
6022 atomic_add_64(&isc->isc_mac_remote_faults.ev_count, delta);
6023
6024 /* Flow control (LFC) stats */
6025 delta = ixl_stat_delta(sc,
6026 0, I40E_GLPRT_LXONRXC(sc->sc_port),
6027 &isc->isc_link_xon_rx_offset, isc->isc_has_offset);
6028 atomic_add_64(&isc->isc_link_xon_rx.ev_count, delta);
6029
6030 delta = ixl_stat_delta(sc,
6031 0, I40E_GLPRT_LXONTXC(sc->sc_port),
6032 &isc->isc_link_xon_tx_offset, isc->isc_has_offset);
6033 atomic_add_64(&isc->isc_link_xon_tx.ev_count, delta);
6034
6035 delta = ixl_stat_delta(sc,
6036 0, I40E_GLPRT_LXOFFRXC(sc->sc_port),
6037 &isc->isc_link_xoff_rx_offset, isc->isc_has_offset);
6038 atomic_add_64(&isc->isc_link_xoff_rx.ev_count, delta);
6039
6040 delta = ixl_stat_delta(sc,
6041 0, I40E_GLPRT_LXOFFTXC(sc->sc_port),
6042 &isc->isc_link_xoff_tx_offset, isc->isc_has_offset);
6043 atomic_add_64(&isc->isc_link_xoff_tx.ev_count, delta);
6044
6045 /* fragments */
6046 delta = ixl_stat_delta(sc,
6047 0, I40E_GLPRT_RFC(sc->sc_port),
6048 &isc->isc_rx_fragments_offset, isc->isc_has_offset);
6049 atomic_add_64(&isc->isc_rx_fragments.ev_count, delta);
6050
6051 delta = ixl_stat_delta(sc,
6052 0, I40E_GLPRT_RJC(sc->sc_port),
6053 &isc->isc_rx_jabber_offset, isc->isc_has_offset);
6054 atomic_add_64(&isc->isc_rx_jabber.ev_count, delta);
6055
6056 /* VSI rx counters */
6057 delta = ixl_stat_delta(sc,
6058 0, I40E_GLV_RDPC(sc->sc_vsi_stat_counter_idx),
6059 &isc->isc_vsi_rx_discards_offset, isc->isc_has_offset);
6060 atomic_add_64(&isc->isc_vsi_rx_discards.ev_count, delta);
6061
6062 delta = ixl_stat_delta(sc,
6063 I40E_GLV_GORCH(sc->sc_vsi_stat_counter_idx),
6064 I40E_GLV_GORCL(sc->sc_vsi_stat_counter_idx),
6065 &isc->isc_vsi_rx_bytes_offset, isc->isc_has_offset);
6066 atomic_add_64(&isc->isc_vsi_rx_bytes.ev_count, delta);
6067
6068 delta = ixl_stat_delta(sc,
6069 I40E_GLV_UPRCH(sc->sc_vsi_stat_counter_idx),
6070 I40E_GLV_UPRCL(sc->sc_vsi_stat_counter_idx),
6071 &isc->isc_vsi_rx_unicast_offset, isc->isc_has_offset);
6072 atomic_add_64(&isc->isc_vsi_rx_unicast.ev_count, delta);
6073
6074 delta = ixl_stat_delta(sc,
6075 I40E_GLV_MPRCH(sc->sc_vsi_stat_counter_idx),
6076 I40E_GLV_MPRCL(sc->sc_vsi_stat_counter_idx),
6077 &isc->isc_vsi_rx_multicast_offset, isc->isc_has_offset);
6078 atomic_add_64(&isc->isc_vsi_rx_multicast.ev_count, delta);
6079
6080 delta = ixl_stat_delta(sc,
6081 I40E_GLV_BPRCH(sc->sc_vsi_stat_counter_idx),
6082 I40E_GLV_BPRCL(sc->sc_vsi_stat_counter_idx),
6083 &isc->isc_vsi_rx_broadcast_offset, isc->isc_has_offset);
6084 atomic_add_64(&isc->isc_vsi_rx_broadcast.ev_count, delta);
6085
6086 /* VSI tx counters */
6087 delta = ixl_stat_delta(sc,
6088 0, I40E_GLV_TEPC(sc->sc_vsi_stat_counter_idx),
6089 &isc->isc_vsi_tx_errors_offset, isc->isc_has_offset);
6090 atomic_add_64(&isc->isc_vsi_tx_errors.ev_count, delta);
6091
6092 delta = ixl_stat_delta(sc,
6093 I40E_GLV_GOTCH(sc->sc_vsi_stat_counter_idx),
6094 I40E_GLV_GOTCL(sc->sc_vsi_stat_counter_idx),
6095 &isc->isc_vsi_tx_bytes_offset, isc->isc_has_offset);
6096 atomic_add_64(&isc->isc_vsi_tx_bytes.ev_count, delta);
6097
6098 delta = ixl_stat_delta(sc,
6099 I40E_GLV_UPTCH(sc->sc_vsi_stat_counter_idx),
6100 I40E_GLV_UPTCL(sc->sc_vsi_stat_counter_idx),
6101 &isc->isc_vsi_tx_unicast_offset, isc->isc_has_offset);
6102 atomic_add_64(&isc->isc_vsi_tx_unicast.ev_count, delta);
6103
6104 delta = ixl_stat_delta(sc,
6105 I40E_GLV_MPTCH(sc->sc_vsi_stat_counter_idx),
6106 I40E_GLV_MPTCL(sc->sc_vsi_stat_counter_idx),
6107 &isc->isc_vsi_tx_multicast_offset, isc->isc_has_offset);
6108 atomic_add_64(&isc->isc_vsi_tx_multicast.ev_count, delta);
6109
6110 delta = ixl_stat_delta(sc,
6111 I40E_GLV_BPTCH(sc->sc_vsi_stat_counter_idx),
6112 I40E_GLV_BPTCL(sc->sc_vsi_stat_counter_idx),
6113 &isc->isc_vsi_tx_broadcast_offset, isc->isc_has_offset);
6114 atomic_add_64(&isc->isc_vsi_tx_broadcast.ev_count, delta);
6115 }
6116
6117 static int
6118 ixl_setup_sysctls(struct ixl_softc *sc)
6119 {
6120 const char *devname;
6121 struct sysctllog **log;
6122 const struct sysctlnode *rnode, *rxnode, *txnode;
6123 int error;
6124
6125 log = &sc->sc_sysctllog;
6126 devname = device_xname(sc->sc_dev);
6127
6128 error = sysctl_createv(log, 0, NULL, &rnode,
6129 0, CTLTYPE_NODE, devname,
6130 SYSCTL_DESCR("ixl information and settings"),
6131 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
6132 if (error)
6133 goto out;
6134
6135 error = sysctl_createv(log, 0, &rnode, NULL,
6136 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
6137 SYSCTL_DESCR("Use workqueue for packet processing"),
6138 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
6139 if (error)
6140 goto out;
6141
6142 error = sysctl_createv(log, 0, &rnode, NULL,
6143 CTLFLAG_READONLY, CTLTYPE_INT, "stats_interval",
6144 SYSCTL_DESCR("Statistics collection interval in milliseconds"),
6145 NULL, 0, &sc->sc_stats_intval, 0, CTL_CREATE, CTL_EOL);
6146
6147 error = sysctl_createv(log, 0, &rnode, &rxnode,
6148 0, CTLTYPE_NODE, "rx",
6149 SYSCTL_DESCR("ixl information and settings for Rx"),
6150 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6151 if (error)
6152 goto out;
6153
6154 error = sysctl_createv(log, 0, &rxnode, NULL,
6155 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6156 SYSCTL_DESCR("max number of Rx packets"
6157 " to process for interrupt processing"),
6158 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6159 if (error)
6160 goto out;
6161
6162 error = sysctl_createv(log, 0, &rxnode, NULL,
6163 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6164 SYSCTL_DESCR("max number of Rx packets"
6165 " to process for deferred processing"),
6166 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
6167 if (error)
6168 goto out;
6169
6170 error = sysctl_createv(log, 0, &rnode, &txnode,
6171 0, CTLTYPE_NODE, "tx",
6172 SYSCTL_DESCR("ixl information and settings for Tx"),
6173 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6174 if (error)
6175 goto out;
6176
6177 error = sysctl_createv(log, 0, &txnode, NULL,
6178 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6179 SYSCTL_DESCR("max number of Tx packets"
6180 " to process for interrupt processing"),
6181 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6182 if (error)
6183 goto out;
6184
6185 error = sysctl_createv(log, 0, &txnode, NULL,
6186 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6187 SYSCTL_DESCR("max number of Tx packets"
6188 " to process for deferred processing"),
6189 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
6190 if (error)
6191 goto out;
6192
6193 out:
6194 if (error) {
6195 aprint_error_dev(sc->sc_dev,
6196 "unable to create sysctl node\n");
6197 sysctl_teardown(log);
6198 }
6199
6200 return error;
6201 }
6202
6203 static void
6204 ixl_teardown_sysctls(struct ixl_softc *sc)
6205 {
6206
6207 sysctl_teardown(&sc->sc_sysctllog);
6208 }
6209
6210 static struct workqueue *
6211 ixl_workq_create(const char *name, pri_t prio, int ipl, int flags)
6212 {
6213 struct workqueue *wq;
6214 int error;
6215
6216 error = workqueue_create(&wq, name, ixl_workq_work, NULL,
6217 prio, ipl, flags);
6218
6219 if (error)
6220 return NULL;
6221
6222 return wq;
6223 }
6224
6225 static void
6226 ixl_workq_destroy(struct workqueue *wq)
6227 {
6228
6229 workqueue_destroy(wq);
6230 }
6231
6232 static void
6233 ixl_work_set(struct ixl_work *work, void (*func)(void *), void *arg)
6234 {
6235
6236 memset(work, 0, sizeof(*work));
6237 work->ixw_func = func;
6238 work->ixw_arg = arg;
6239 }
6240
6241 static void
6242 ixl_work_add(struct workqueue *wq, struct ixl_work *work)
6243 {
6244 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0)
6245 return;
6246
6247 workqueue_enqueue(wq, &work->ixw_cookie, NULL);
6248 }
6249
6250 static void
6251 ixl_work_wait(struct workqueue *wq, struct ixl_work *work)
6252 {
6253
6254 workqueue_wait(wq, &work->ixw_cookie);
6255 }
6256
6257 static void
6258 ixl_workq_work(struct work *wk, void *context)
6259 {
6260 struct ixl_work *work;
6261
6262 work = container_of(wk, struct ixl_work, ixw_cookie);
6263
6264 atomic_swap_uint(&work->ixw_added, 0);
6265 work->ixw_func(work->ixw_arg);
6266 }
6267
6268 static int
6269 ixl_rx_ctl_read(struct ixl_softc *sc, uint32_t reg, uint32_t *rv)
6270 {
6271 struct ixl_aq_desc iaq;
6272
6273 memset(&iaq, 0, sizeof(iaq));
6274 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_READ);
6275 iaq.iaq_param[1] = htole32(reg);
6276
6277 if (ixl_atq_poll(sc, &iaq, 250) != 0)
6278 return ETIMEDOUT;
6279
6280 switch (htole16(iaq.iaq_retval)) {
6281 case IXL_AQ_RC_OK:
6282 /* success */
6283 break;
6284 case IXL_AQ_RC_EACCES:
6285 return EPERM;
6286 case IXL_AQ_RC_EAGAIN:
6287 return EAGAIN;
6288 default:
6289 return EIO;
6290 }
6291
6292 *rv = htole32(iaq.iaq_param[3]);
6293 return 0;
6294 }
6295
6296 static uint32_t
6297 ixl_rd_rx_csr(struct ixl_softc *sc, uint32_t reg)
6298 {
6299 uint32_t val;
6300 int rv, retry, retry_limit;
6301
6302 retry_limit = sc->sc_rxctl_atq ? 5 : 0;
6303
6304 for (retry = 0; retry < retry_limit; retry++) {
6305 rv = ixl_rx_ctl_read(sc, reg, &val);
6306 if (rv == 0)
6307 return val;
6308 else if (rv == EAGAIN)
6309 delaymsec(1);
6310 else
6311 break;
6312 }
6313
6314 val = ixl_rd(sc, reg);
6315
6316 return val;
6317 }
6318
6319 static int
6320 ixl_rx_ctl_write(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6321 {
6322 struct ixl_aq_desc iaq;
6323
6324 memset(&iaq, 0, sizeof(iaq));
6325 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_WRITE);
6326 iaq.iaq_param[1] = htole32(reg);
6327 iaq.iaq_param[3] = htole32(value);
6328
6329 if (ixl_atq_poll(sc, &iaq, 250) != 0)
6330 return ETIMEDOUT;
6331
6332 switch (htole16(iaq.iaq_retval)) {
6333 case IXL_AQ_RC_OK:
6334 /* success */
6335 break;
6336 case IXL_AQ_RC_EACCES:
6337 return EPERM;
6338 case IXL_AQ_RC_EAGAIN:
6339 return EAGAIN;
6340 default:
6341 return EIO;
6342 }
6343
6344 return 0;
6345 }
6346
6347 static void
6348 ixl_wr_rx_csr(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6349 {
6350 int rv, retry, retry_limit;
6351
6352 retry_limit = sc->sc_rxctl_atq ? 5 : 0;
6353
6354 for (retry = 0; retry < retry_limit; retry++) {
6355 rv = ixl_rx_ctl_write(sc, reg, value);
6356 if (rv == 0)
6357 return;
6358 else if (rv == EAGAIN)
6359 delaymsec(1);
6360 else
6361 break;
6362 }
6363
6364 ixl_wr(sc, reg, value);
6365 }
6366
6367 MODULE(MODULE_CLASS_DRIVER, if_ixl, "pci");
6368
6369 #ifdef _MODULE
6370 #include "ioconf.c"
6371 #endif
6372
6373 #ifdef _MODULE
6374 static void
6375 ixl_parse_modprop(prop_dictionary_t dict)
6376 {
6377 prop_object_t obj;
6378 int64_t val;
6379 uint64_t uval;
6380
6381 if (dict == NULL)
6382 return;
6383
6384 obj = prop_dictionary_get(dict, "nomsix");
6385 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_BOOL) {
6386 ixl_param_nomsix = prop_bool_true((prop_bool_t)obj);
6387 }
6388
6389 obj = prop_dictionary_get(dict, "stats_interval");
6390 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
6391 val = prop_number_integer_value((prop_number_t)obj);
6392
6393 /* the range has no reason */
6394 if (100 < val || val < 180000) {
6395 ixl_param_stats_interval = val;
6396 }
6397 }
6398
6399 obj = prop_dictionary_get(dict, "nqps_limit");
6400 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
6401 val = prop_number_integer_value((prop_number_t)obj);
6402
6403 if (val <= INT32_MAX)
6404 ixl_param_nqps_limit = val;
6405 }
6406
6407 obj = prop_dictionary_get(dict, "rx_ndescs");
6408 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
6409 uval = prop_number_unsigned_integer_value((prop_number_t)obj);
6410
6411 if (uval > 8)
6412 ixl_param_rx_ndescs = uval;
6413 }
6414
6415 obj = prop_dictionary_get(dict, "tx_ndescs");
6416 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
6417 uval = prop_number_unsigned_integer_value((prop_number_t)obj);
6418
6419 if (uval > IXL_TX_PKT_DESCS)
6420 ixl_param_tx_ndescs = uval;
6421 }
6422
6423 }
6424 #endif
6425
6426 static int
6427 if_ixl_modcmd(modcmd_t cmd, void *opaque)
6428 {
6429 int error = 0;
6430
6431 #ifdef _MODULE
6432 switch (cmd) {
6433 case MODULE_CMD_INIT:
6434 ixl_parse_modprop((prop_dictionary_t)opaque);
6435 error = config_init_component(cfdriver_ioconf_if_ixl,
6436 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
6437 break;
6438 case MODULE_CMD_FINI:
6439 error = config_fini_component(cfdriver_ioconf_if_ixl,
6440 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
6441 break;
6442 default:
6443 error = ENOTTY;
6444 break;
6445 }
6446 #endif
6447
6448 return error;
6449 }
6450