if_ixl.c revision 1.26 1 /* $NetBSD: if_ixl.c,v 1.26 2020/01/17 09:42:05 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2016,2017 David Gwynne <dlg (at) openbsd.org>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50 /*
51 * Copyright (c) 2019 Internet Initiative Japan, Inc.
52 * All rights reserved.
53 *
54 * Redistribution and use in source and binary forms, with or without
55 * modification, are permitted provided that the following conditions
56 * are met:
57 * 1. Redistributions of source code must retain the above copyright
58 * notice, this list of conditions and the following disclaimer.
59 * 2. Redistributions in binary form must reproduce the above copyright
60 * notice, this list of conditions and the following disclaimer in the
61 * documentation and/or other materials provided with the distribution.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
73 * POSSIBILITY OF SUCH DAMAGE.
74 */
75
76 #include <sys/cdefs.h>
77
78 #ifdef _KERNEL_OPT
79 #include "opt_net_mpsafe.h"
80 #include "opt_if_ixl.h"
81 #endif
82
83 #include <sys/param.h>
84 #include <sys/types.h>
85
86 #include <sys/cpu.h>
87 #include <sys/device.h>
88 #include <sys/evcnt.h>
89 #include <sys/interrupt.h>
90 #include <sys/kmem.h>
91 #include <sys/malloc.h>
92 #include <sys/module.h>
93 #include <sys/mutex.h>
94 #include <sys/pcq.h>
95 #include <sys/syslog.h>
96 #include <sys/workqueue.h>
97
98 #include <sys/bus.h>
99
100 #include <net/bpf.h>
101 #include <net/if.h>
102 #include <net/if_dl.h>
103 #include <net/if_media.h>
104 #include <net/if_ether.h>
105 #include <net/rss_config.h>
106
107 #include <dev/pci/pcivar.h>
108 #include <dev/pci/pcidevs.h>
109
110 #include <dev/pci/if_ixlreg.h>
111 #include <dev/pci/if_ixlvar.h>
112
113 #include <prop/proplib.h>
114
115 struct ixl_softc; /* defined */
116
117 #define I40E_PF_RESET_WAIT_COUNT 200
118 #define I40E_AQ_LARGE_BUF 512
119
120 /* bitfields for Tx queue mapping in QTX_CTL */
121 #define I40E_QTX_CTL_VF_QUEUE 0x0
122 #define I40E_QTX_CTL_VM_QUEUE 0x1
123 #define I40E_QTX_CTL_PF_QUEUE 0x2
124
125 #define I40E_QUEUE_TYPE_EOL 0x7ff
126 #define I40E_INTR_NOTX_QUEUE 0
127
128 #define I40E_QUEUE_TYPE_RX 0x0
129 #define I40E_QUEUE_TYPE_TX 0x1
130 #define I40E_QUEUE_TYPE_PE_CEQ 0x2
131 #define I40E_QUEUE_TYPE_UNKNOWN 0x3
132
133 #define I40E_ITR_INDEX_RX 0x0
134 #define I40E_ITR_INDEX_TX 0x1
135 #define I40E_ITR_INDEX_OTHER 0x2
136 #define I40E_ITR_INDEX_NONE 0x3
137
138 #define I40E_INTR_NOTX_QUEUE 0
139 #define I40E_INTR_NOTX_INTR 0
140 #define I40E_INTR_NOTX_RX_QUEUE 0
141 #define I40E_INTR_NOTX_TX_QUEUE 1
142 #define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK
143 #define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK
144
145 #define BIT_ULL(a) (1ULL << (a))
146 #define IXL_RSS_HENA_DEFAULT_BASE \
147 (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
148 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
149 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
150 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
151 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
152 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
153 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
154 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
155 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
156 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
157 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
158 #define IXL_RSS_HENA_DEFAULT_XL710 IXL_RSS_HENA_DEFAULT_BASE
159 #define IXL_RSS_HENA_DEFAULT_X722 (IXL_RSS_HENA_DEFAULT_XL710 | \
160 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
161 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
162 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
163 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
164 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
165 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
166 #define I40E_HASH_LUT_SIZE_128 0
167 #define IXL_RSS_KEY_SIZE_REG 13
168
169 #define IXL_ICR0_CRIT_ERR_MASK \
170 (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \
171 I40E_PFINT_ICR0_ECC_ERR_MASK | \
172 I40E_PFINT_ICR0_PE_CRITERR_MASK)
173
174 #define IXL_TX_PKT_DESCS 8
175 #define IXL_TX_QUEUE_ALIGN 128
176 #define IXL_RX_QUEUE_ALIGN 128
177
178 #define IXL_HARDMTU 9712 /* 9726 - ETHER_HDR_LEN */
179
180 #define IXL_PCIREG PCI_MAPREG_START
181
182 #define IXL_ITR0 0x0
183 #define IXL_ITR1 0x1
184 #define IXL_ITR2 0x2
185 #define IXL_NOITR 0x3
186
187 #define IXL_AQ_NUM 256
188 #define IXL_AQ_MASK (IXL_AQ_NUM - 1)
189 #define IXL_AQ_ALIGN 64 /* lol */
190 #define IXL_AQ_BUFLEN 4096
191
192 #define IXL_HMC_ROUNDUP 512
193 #define IXL_HMC_PGSIZE 4096
194 #define IXL_HMC_DVASZ sizeof(uint64_t)
195 #define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
196 #define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS)
197 #define IXL_HMC_PDVALID 1ULL
198
199 #define IXL_ATQ_EXEC_TIMEOUT (10 * hz)
200
201 #define IXL_SRRD_SRCTL_ATTEMPTS 100000
202
203 struct ixl_aq_regs {
204 bus_size_t atq_tail;
205 bus_size_t atq_head;
206 bus_size_t atq_len;
207 bus_size_t atq_bal;
208 bus_size_t atq_bah;
209
210 bus_size_t arq_tail;
211 bus_size_t arq_head;
212 bus_size_t arq_len;
213 bus_size_t arq_bal;
214 bus_size_t arq_bah;
215
216 uint32_t atq_len_enable;
217 uint32_t atq_tail_mask;
218 uint32_t atq_head_mask;
219
220 uint32_t arq_len_enable;
221 uint32_t arq_tail_mask;
222 uint32_t arq_head_mask;
223 };
224
225 struct ixl_phy_type {
226 uint64_t phy_type;
227 uint64_t ifm_type;
228 };
229
230 struct ixl_speed_type {
231 uint8_t dev_speed;
232 uint64_t net_speed;
233 };
234
235 struct ixl_aq_buf {
236 SIMPLEQ_ENTRY(ixl_aq_buf)
237 aqb_entry;
238 void *aqb_data;
239 bus_dmamap_t aqb_map;
240 bus_dma_segment_t aqb_seg;
241 size_t aqb_size;
242 int aqb_nsegs;
243 };
244 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf);
245
246 struct ixl_dmamem {
247 bus_dmamap_t ixm_map;
248 bus_dma_segment_t ixm_seg;
249 int ixm_nsegs;
250 size_t ixm_size;
251 void *ixm_kva;
252 };
253
254 #define IXL_DMA_MAP(_ixm) ((_ixm)->ixm_map)
255 #define IXL_DMA_DVA(_ixm) ((_ixm)->ixm_map->dm_segs[0].ds_addr)
256 #define IXL_DMA_KVA(_ixm) ((void *)(_ixm)->ixm_kva)
257 #define IXL_DMA_LEN(_ixm) ((_ixm)->ixm_size)
258
259 struct ixl_hmc_entry {
260 uint64_t hmc_base;
261 uint32_t hmc_count;
262 uint64_t hmc_size;
263 };
264
265 enum ixl_hmc_types {
266 IXL_HMC_LAN_TX = 0,
267 IXL_HMC_LAN_RX,
268 IXL_HMC_FCOE_CTX,
269 IXL_HMC_FCOE_FILTER,
270 IXL_HMC_COUNT
271 };
272
273 struct ixl_hmc_pack {
274 uint16_t offset;
275 uint16_t width;
276 uint16_t lsb;
277 };
278
279 /*
280 * these hmc objects have weird sizes and alignments, so these are abstract
281 * representations of them that are nice for c to populate.
282 *
283 * the packing code relies on little-endian values being stored in the fields,
284 * no high bits in the fields being set, and the fields must be packed in the
285 * same order as they are in the ctx structure.
286 */
287
288 struct ixl_hmc_rxq {
289 uint16_t head;
290 uint8_t cpuid;
291 uint64_t base;
292 #define IXL_HMC_RXQ_BASE_UNIT 128
293 uint16_t qlen;
294 uint16_t dbuff;
295 #define IXL_HMC_RXQ_DBUFF_UNIT 128
296 uint8_t hbuff;
297 #define IXL_HMC_RXQ_HBUFF_UNIT 64
298 uint8_t dtype;
299 #define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0
300 #define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1
301 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2
302 uint8_t dsize;
303 #define IXL_HMC_RXQ_DSIZE_16 0
304 #define IXL_HMC_RXQ_DSIZE_32 1
305 uint8_t crcstrip;
306 uint8_t fc_ena;
307 uint8_t l2sel;
308 uint8_t hsplit_0;
309 uint8_t hsplit_1;
310 uint8_t showiv;
311 uint16_t rxmax;
312 uint8_t tphrdesc_ena;
313 uint8_t tphwdesc_ena;
314 uint8_t tphdata_ena;
315 uint8_t tphhead_ena;
316 uint8_t lrxqthresh;
317 uint8_t prefena;
318 };
319
320 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
321 { offsetof(struct ixl_hmc_rxq, head), 13, 0 },
322 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 },
323 { offsetof(struct ixl_hmc_rxq, base), 57, 32 },
324 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 },
325 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 },
326 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 },
327 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 },
328 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 },
329 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 },
330 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 },
331 { offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 },
332 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 },
333 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 },
334 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 },
335 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 },
336 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 },
337 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 },
338 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 },
339 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 },
340 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 },
341 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 },
342 };
343
344 #define IXL_HMC_RXQ_MINSIZE (201 + 1)
345
346 struct ixl_hmc_txq {
347 uint16_t head;
348 uint8_t new_context;
349 uint64_t base;
350 #define IXL_HMC_TXQ_BASE_UNIT 128
351 uint8_t fc_ena;
352 uint8_t timesync_ena;
353 uint8_t fd_ena;
354 uint8_t alt_vlan_ena;
355 uint16_t thead_wb;
356 uint8_t cpuid;
357 uint8_t head_wb_ena;
358 #define IXL_HMC_TXQ_DESC_WB 0
359 #define IXL_HMC_TXQ_HEAD_WB 1
360 uint16_t qlen;
361 uint8_t tphrdesc_ena;
362 uint8_t tphrpacket_ena;
363 uint8_t tphwdesc_ena;
364 uint64_t head_wb_addr;
365 uint32_t crc;
366 uint16_t rdylist;
367 uint8_t rdylist_act;
368 };
369
370 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
371 { offsetof(struct ixl_hmc_txq, head), 13, 0 },
372 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 },
373 { offsetof(struct ixl_hmc_txq, base), 57, 32 },
374 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 },
375 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 },
376 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 },
377 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 },
378 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 },
379 /* line 1 */
380 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 },
381 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 },
382 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 },
383 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 },
384 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 },
385 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 },
386 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 },
387 /* line 7 */
388 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) },
389 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) },
390 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) },
391 };
392
393 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
394
395 struct ixl_work {
396 struct work ixw_cookie;
397 void (*ixw_func)(void *);
398 void *ixw_arg;
399 unsigned int ixw_added;
400 };
401 #define IXL_WORKQUEUE_PRI PRI_SOFTNET
402
403 struct ixl_tx_map {
404 struct mbuf *txm_m;
405 bus_dmamap_t txm_map;
406 unsigned int txm_eop;
407 };
408
409 struct ixl_tx_ring {
410 kmutex_t txr_lock;
411 struct ixl_softc *txr_sc;
412
413 unsigned int txr_prod;
414 unsigned int txr_cons;
415
416 struct ixl_tx_map *txr_maps;
417 struct ixl_dmamem txr_mem;
418
419 bus_size_t txr_tail;
420 unsigned int txr_qid;
421 pcq_t *txr_intrq;
422 void *txr_si;
423
424 uint64_t txr_oerrors; /* if_oerrors */
425 uint64_t txr_opackets; /* if_opackets */
426 uint64_t txr_obytes; /* if_obytes */
427 uint64_t txr_omcasts; /* if_omcasts */
428
429 struct evcnt txr_defragged;
430 struct evcnt txr_defrag_failed;
431 struct evcnt txr_pcqdrop;
432 struct evcnt txr_transmitdef;
433 struct evcnt txr_intr;
434 struct evcnt txr_defer;
435 };
436
437 struct ixl_rx_map {
438 struct mbuf *rxm_m;
439 bus_dmamap_t rxm_map;
440 };
441
442 struct ixl_rx_ring {
443 kmutex_t rxr_lock;
444
445 unsigned int rxr_prod;
446 unsigned int rxr_cons;
447
448 struct ixl_rx_map *rxr_maps;
449 struct ixl_dmamem rxr_mem;
450
451 struct mbuf *rxr_m_head;
452 struct mbuf **rxr_m_tail;
453
454 bus_size_t rxr_tail;
455 unsigned int rxr_qid;
456
457 uint64_t rxr_ipackets; /* if_ipackets */
458 uint64_t rxr_ibytes; /* if_ibytes */
459 uint64_t rxr_iqdrops; /* iqdrops */
460 uint64_t rxr_ierrors; /* if_ierrors */
461
462 struct evcnt rxr_mgethdr_failed;
463 struct evcnt rxr_mgetcl_failed;
464 struct evcnt rxr_mbuf_load_failed;
465 struct evcnt rxr_intr;
466 struct evcnt rxr_defer;
467 };
468
469 struct ixl_queue_pair {
470 struct ixl_softc *qp_sc;
471 struct ixl_tx_ring *qp_txr;
472 struct ixl_rx_ring *qp_rxr;
473
474 char qp_name[16];
475
476 void *qp_si;
477 struct ixl_work qp_task;
478 bool qp_workqueue;
479 };
480
481 struct ixl_atq {
482 struct ixl_aq_desc iatq_desc;
483 void (*iatq_fn)(struct ixl_softc *,
484 const struct ixl_aq_desc *);
485 };
486 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
487
488 struct ixl_product {
489 unsigned int vendor_id;
490 unsigned int product_id;
491 };
492
493 struct ixl_stats_counters {
494 bool isc_has_offset;
495 struct evcnt isc_crc_errors;
496 uint64_t isc_crc_errors_offset;
497 struct evcnt isc_illegal_bytes;
498 uint64_t isc_illegal_bytes_offset;
499 struct evcnt isc_rx_bytes;
500 uint64_t isc_rx_bytes_offset;
501 struct evcnt isc_rx_discards;
502 uint64_t isc_rx_discards_offset;
503 struct evcnt isc_rx_unicast;
504 uint64_t isc_rx_unicast_offset;
505 struct evcnt isc_rx_multicast;
506 uint64_t isc_rx_multicast_offset;
507 struct evcnt isc_rx_broadcast;
508 uint64_t isc_rx_broadcast_offset;
509 struct evcnt isc_rx_size_64;
510 uint64_t isc_rx_size_64_offset;
511 struct evcnt isc_rx_size_127;
512 uint64_t isc_rx_size_127_offset;
513 struct evcnt isc_rx_size_255;
514 uint64_t isc_rx_size_255_offset;
515 struct evcnt isc_rx_size_511;
516 uint64_t isc_rx_size_511_offset;
517 struct evcnt isc_rx_size_1023;
518 uint64_t isc_rx_size_1023_offset;
519 struct evcnt isc_rx_size_1522;
520 uint64_t isc_rx_size_1522_offset;
521 struct evcnt isc_rx_size_big;
522 uint64_t isc_rx_size_big_offset;
523 struct evcnt isc_rx_undersize;
524 uint64_t isc_rx_undersize_offset;
525 struct evcnt isc_rx_oversize;
526 uint64_t isc_rx_oversize_offset;
527 struct evcnt isc_rx_fragments;
528 uint64_t isc_rx_fragments_offset;
529 struct evcnt isc_rx_jabber;
530 uint64_t isc_rx_jabber_offset;
531 struct evcnt isc_tx_bytes;
532 uint64_t isc_tx_bytes_offset;
533 struct evcnt isc_tx_dropped_link_down;
534 uint64_t isc_tx_dropped_link_down_offset;
535 struct evcnt isc_tx_unicast;
536 uint64_t isc_tx_unicast_offset;
537 struct evcnt isc_tx_multicast;
538 uint64_t isc_tx_multicast_offset;
539 struct evcnt isc_tx_broadcast;
540 uint64_t isc_tx_broadcast_offset;
541 struct evcnt isc_tx_size_64;
542 uint64_t isc_tx_size_64_offset;
543 struct evcnt isc_tx_size_127;
544 uint64_t isc_tx_size_127_offset;
545 struct evcnt isc_tx_size_255;
546 uint64_t isc_tx_size_255_offset;
547 struct evcnt isc_tx_size_511;
548 uint64_t isc_tx_size_511_offset;
549 struct evcnt isc_tx_size_1023;
550 uint64_t isc_tx_size_1023_offset;
551 struct evcnt isc_tx_size_1522;
552 uint64_t isc_tx_size_1522_offset;
553 struct evcnt isc_tx_size_big;
554 uint64_t isc_tx_size_big_offset;
555 struct evcnt isc_mac_local_faults;
556 uint64_t isc_mac_local_faults_offset;
557 struct evcnt isc_mac_remote_faults;
558 uint64_t isc_mac_remote_faults_offset;
559 struct evcnt isc_link_xon_rx;
560 uint64_t isc_link_xon_rx_offset;
561 struct evcnt isc_link_xon_tx;
562 uint64_t isc_link_xon_tx_offset;
563 struct evcnt isc_link_xoff_rx;
564 uint64_t isc_link_xoff_rx_offset;
565 struct evcnt isc_link_xoff_tx;
566 uint64_t isc_link_xoff_tx_offset;
567 struct evcnt isc_vsi_rx_discards;
568 uint64_t isc_vsi_rx_discards_offset;
569 struct evcnt isc_vsi_rx_bytes;
570 uint64_t isc_vsi_rx_bytes_offset;
571 struct evcnt isc_vsi_rx_unicast;
572 uint64_t isc_vsi_rx_unicast_offset;
573 struct evcnt isc_vsi_rx_multicast;
574 uint64_t isc_vsi_rx_multicast_offset;
575 struct evcnt isc_vsi_rx_broadcast;
576 uint64_t isc_vsi_rx_broadcast_offset;
577 struct evcnt isc_vsi_tx_errors;
578 uint64_t isc_vsi_tx_errors_offset;
579 struct evcnt isc_vsi_tx_bytes;
580 uint64_t isc_vsi_tx_bytes_offset;
581 struct evcnt isc_vsi_tx_unicast;
582 uint64_t isc_vsi_tx_unicast_offset;
583 struct evcnt isc_vsi_tx_multicast;
584 uint64_t isc_vsi_tx_multicast_offset;
585 struct evcnt isc_vsi_tx_broadcast;
586 uint64_t isc_vsi_tx_broadcast_offset;
587 };
588
589 /*
590 * Locking notes:
591 * + a field in ixl_tx_ring is protected by txr_lock (a spin mutex), and
592 * a field in ixl_rx_ring is protected by rxr_lock (a spin mutex).
593 * - more than one lock of them cannot be held at once.
594 * + a field named sc_atq_* in ixl_softc is protected by sc_atq_lock
595 * (a spin mutex).
596 * - the lock cannot held with txr_lock or rxr_lock.
597 * + a field named sc_arq_* is not protected by any lock.
598 * - operations for sc_arq_* is done in one context related to
599 * sc_arq_task.
600 * + other fields in ixl_softc is protected by sc_cfg_lock
601 * (an adaptive mutex)
602 * - It must be held before another lock is held, and It can be
603 * released after the other lock is released.
604 * */
605
606 struct ixl_softc {
607 device_t sc_dev;
608 struct ethercom sc_ec;
609 bool sc_attached;
610 bool sc_dead;
611 uint32_t sc_port;
612 struct sysctllog *sc_sysctllog;
613 struct workqueue *sc_workq;
614 struct workqueue *sc_workq_txrx;
615 int sc_stats_intval;
616 callout_t sc_stats_callout;
617 struct ixl_work sc_stats_task;
618 struct ixl_stats_counters
619 sc_stats_counters;
620 uint8_t sc_enaddr[ETHER_ADDR_LEN];
621 struct ifmedia sc_media;
622 uint64_t sc_media_status;
623 uint64_t sc_media_active;
624 kmutex_t sc_cfg_lock;
625 enum i40e_mac_type sc_mac_type;
626 uint32_t sc_rss_table_size;
627 uint32_t sc_rss_table_entry_width;
628 bool sc_txrx_workqueue;
629 u_int sc_tx_process_limit;
630 u_int sc_rx_process_limit;
631 u_int sc_tx_intr_process_limit;
632 u_int sc_rx_intr_process_limit;
633
634 int sc_cur_ec_capenable;
635
636 struct pci_attach_args sc_pa;
637 pci_intr_handle_t *sc_ihp;
638 void **sc_ihs;
639 unsigned int sc_nintrs;
640
641 bus_dma_tag_t sc_dmat;
642 bus_space_tag_t sc_memt;
643 bus_space_handle_t sc_memh;
644 bus_size_t sc_mems;
645
646 uint8_t sc_pf_id;
647 uint16_t sc_uplink_seid; /* le */
648 uint16_t sc_downlink_seid; /* le */
649 uint16_t sc_vsi_number; /* le */
650 uint16_t sc_vsi_stat_counter_idx;
651 uint16_t sc_seid;
652 unsigned int sc_base_queue;
653
654 pci_intr_type_t sc_intrtype;
655 unsigned int sc_msix_vector_queue;
656
657 struct ixl_dmamem sc_scratch;
658 struct ixl_dmamem sc_aqbuf;
659
660 const struct ixl_aq_regs *
661 sc_aq_regs;
662 uint32_t sc_aq_flags;
663 #define IXL_SC_AQ_FLAG_RXCTL __BIT(0)
664 #define IXL_SC_AQ_FLAG_NVMLOCK __BIT(1)
665 #define IXL_SC_AQ_FLAG_NVMREAD __BIT(2)
666
667 kmutex_t sc_atq_lock;
668 kcondvar_t sc_atq_cv;
669 struct ixl_dmamem sc_atq;
670 unsigned int sc_atq_prod;
671 unsigned int sc_atq_cons;
672
673 struct ixl_dmamem sc_arq;
674 struct ixl_work sc_arq_task;
675 struct ixl_aq_bufs sc_arq_idle;
676 struct ixl_aq_buf *sc_arq_live[IXL_AQ_NUM];
677 unsigned int sc_arq_prod;
678 unsigned int sc_arq_cons;
679
680 struct ixl_work sc_link_state_task;
681 struct ixl_atq sc_link_state_atq;
682
683 struct ixl_dmamem sc_hmc_sd;
684 struct ixl_dmamem sc_hmc_pd;
685 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT];
686
687 unsigned int sc_tx_ring_ndescs;
688 unsigned int sc_rx_ring_ndescs;
689 unsigned int sc_nqueue_pairs;
690 unsigned int sc_nqueue_pairs_max;
691 unsigned int sc_nqueue_pairs_device;
692 struct ixl_queue_pair *sc_qps;
693
694 struct evcnt sc_event_atq;
695 struct evcnt sc_event_link;
696 struct evcnt sc_event_ecc_err;
697 struct evcnt sc_event_pci_exception;
698 struct evcnt sc_event_crit_err;
699 };
700
701 #define IXL_TXRX_PROCESS_UNLIMIT UINT_MAX
702 #define IXL_TX_PROCESS_LIMIT 256
703 #define IXL_RX_PROCESS_LIMIT 256
704 #define IXL_TX_INTR_PROCESS_LIMIT 256
705 #define IXL_RX_INTR_PROCESS_LIMIT 0U
706
707 #define IXL_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx| \
708 IFCAP_CSUM_TCPv4_Rx| \
709 IFCAP_CSUM_UDPv4_Rx| \
710 IFCAP_CSUM_TCPv6_Rx| \
711 IFCAP_CSUM_UDPv6_Rx)
712
713 #define delaymsec(_x) DELAY(1000 * (_x))
714 #ifdef IXL_DEBUG
715 #define DDPRINTF(sc, fmt, args...) \
716 do { \
717 if ((sc) != NULL) { \
718 device_printf( \
719 ((struct ixl_softc *)(sc))->sc_dev, \
720 ""); \
721 } \
722 printf("%s:\t" fmt, __func__, ##args); \
723 } while (0)
724 #else
725 #define DDPRINTF(sc, fmt, args...) __nothing
726 #endif
727 #ifndef IXL_STATS_INTERVAL_MSEC
728 #define IXL_STATS_INTERVAL_MSEC 10000
729 #endif
730 #ifndef IXL_QUEUE_NUM
731 #define IXL_QUEUE_NUM 0
732 #endif
733
734 static bool ixl_param_nomsix = false;
735 static int ixl_param_stats_interval = IXL_STATS_INTERVAL_MSEC;
736 static int ixl_param_nqps_limit = IXL_QUEUE_NUM;
737 static unsigned int ixl_param_tx_ndescs = 1024;
738 static unsigned int ixl_param_rx_ndescs = 1024;
739
740 static enum i40e_mac_type
741 ixl_mactype(pci_product_id_t);
742 static void ixl_clear_hw(struct ixl_softc *);
743 static int ixl_pf_reset(struct ixl_softc *);
744
745 static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
746 bus_size_t, bus_size_t);
747 static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
748
749 static int ixl_arq_fill(struct ixl_softc *);
750 static void ixl_arq_unfill(struct ixl_softc *);
751
752 static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
753 unsigned int);
754 static void ixl_atq_set(struct ixl_atq *,
755 void (*)(struct ixl_softc *, const struct ixl_aq_desc *));
756 static int ixl_atq_post(struct ixl_softc *, struct ixl_atq *);
757 static int ixl_atq_post_locked(struct ixl_softc *, struct ixl_atq *);
758 static void ixl_atq_done(struct ixl_softc *);
759 static int ixl_atq_exec(struct ixl_softc *, struct ixl_atq *);
760 static int ixl_get_version(struct ixl_softc *);
761 static int ixl_get_nvm_version(struct ixl_softc *);
762 static int ixl_get_hw_capabilities(struct ixl_softc *);
763 static int ixl_pxe_clear(struct ixl_softc *);
764 static int ixl_lldp_shut(struct ixl_softc *);
765 static int ixl_get_mac(struct ixl_softc *);
766 static int ixl_get_switch_config(struct ixl_softc *);
767 static int ixl_phy_mask_ints(struct ixl_softc *);
768 static int ixl_get_phy_types(struct ixl_softc *, uint64_t *);
769 static int ixl_restart_an(struct ixl_softc *);
770 static int ixl_hmc(struct ixl_softc *);
771 static void ixl_hmc_free(struct ixl_softc *);
772 static int ixl_get_vsi(struct ixl_softc *);
773 static int ixl_set_vsi(struct ixl_softc *);
774 static void ixl_set_filter_control(struct ixl_softc *);
775 static void ixl_get_link_status(void *);
776 static int ixl_get_link_status_poll(struct ixl_softc *);
777 static int ixl_set_link_status(struct ixl_softc *,
778 const struct ixl_aq_desc *);
779 static void ixl_config_rss(struct ixl_softc *);
780 static int ixl_add_macvlan(struct ixl_softc *, const uint8_t *,
781 uint16_t, uint16_t);
782 static int ixl_remove_macvlan(struct ixl_softc *, const uint8_t *,
783 uint16_t, uint16_t);
784 static void ixl_arq(void *);
785 static void ixl_hmc_pack(void *, const void *,
786 const struct ixl_hmc_pack *, unsigned int);
787 static uint32_t ixl_rd_rx_csr(struct ixl_softc *, uint32_t);
788 static void ixl_wr_rx_csr(struct ixl_softc *, uint32_t, uint32_t);
789 static int ixl_rd16_nvm(struct ixl_softc *, uint16_t, uint16_t *);
790
791 static int ixl_match(device_t, cfdata_t, void *);
792 static void ixl_attach(device_t, device_t, void *);
793 static int ixl_detach(device_t, int);
794
795 static void ixl_media_add(struct ixl_softc *, uint64_t);
796 static int ixl_media_change(struct ifnet *);
797 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
798 static void ixl_watchdog(struct ifnet *);
799 static int ixl_ioctl(struct ifnet *, u_long, void *);
800 static void ixl_start(struct ifnet *);
801 static int ixl_transmit(struct ifnet *, struct mbuf *);
802 static void ixl_deferred_transmit(void *);
803 static int ixl_intr(void *);
804 static int ixl_queue_intr(void *);
805 static int ixl_other_intr(void *);
806 static void ixl_handle_queue(void *);
807 static void ixl_sched_handle_queue(struct ixl_softc *,
808 struct ixl_queue_pair *);
809 static int ixl_init(struct ifnet *);
810 static int ixl_init_locked(struct ixl_softc *);
811 static void ixl_stop(struct ifnet *, int);
812 static void ixl_stop_locked(struct ixl_softc *);
813 static int ixl_iff(struct ixl_softc *);
814 static int ixl_ifflags_cb(struct ethercom *);
815 static int ixl_setup_interrupts(struct ixl_softc *);
816 static int ixl_establish_intx(struct ixl_softc *);
817 static int ixl_establish_msix(struct ixl_softc *);
818 static void ixl_enable_queue_intr(struct ixl_softc *,
819 struct ixl_queue_pair *);
820 static void ixl_disable_queue_intr(struct ixl_softc *,
821 struct ixl_queue_pair *);
822 static void ixl_enable_other_intr(struct ixl_softc *);
823 static void ixl_disable_other_intr(struct ixl_softc *);
824 static void ixl_config_queue_intr(struct ixl_softc *);
825 static void ixl_config_other_intr(struct ixl_softc *);
826
827 static struct ixl_tx_ring *
828 ixl_txr_alloc(struct ixl_softc *, unsigned int);
829 static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
830 static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
831 static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
832 static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
833 static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
834 static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
835 static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
836 static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *, u_int);
837
838 static struct ixl_rx_ring *
839 ixl_rxr_alloc(struct ixl_softc *, unsigned int);
840 static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
841 static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
842 static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
843 static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
844 static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
845 static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
846 static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *, u_int);
847 static int ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
848
849 static struct workqueue *
850 ixl_workq_create(const char *, pri_t, int, int);
851 static void ixl_workq_destroy(struct workqueue *);
852 static int ixl_workqs_teardown(device_t);
853 static void ixl_work_set(struct ixl_work *, void (*)(void *), void *);
854 static void ixl_work_add(struct workqueue *, struct ixl_work *);
855 static void ixl_work_wait(struct workqueue *, struct ixl_work *);
856 static void ixl_workq_work(struct work *, void *);
857 static const struct ixl_product *
858 ixl_lookup(const struct pci_attach_args *pa);
859 static void ixl_link_state_update(struct ixl_softc *,
860 const struct ixl_aq_desc *);
861 static int ixl_vlan_cb(struct ethercom *, uint16_t, bool);
862 static int ixl_setup_vlan_hwfilter(struct ixl_softc *);
863 static void ixl_teardown_vlan_hwfilter(struct ixl_softc *);
864 static int ixl_update_macvlan(struct ixl_softc *);
865 static int ixl_setup_interrupts(struct ixl_softc *);;
866 static void ixl_teardown_interrupts(struct ixl_softc *);
867 static int ixl_setup_stats(struct ixl_softc *);
868 static void ixl_teardown_stats(struct ixl_softc *);
869 static void ixl_stats_callout(void *);
870 static void ixl_stats_update(void *);
871 static int ixl_setup_sysctls(struct ixl_softc *);
872 static void ixl_teardown_sysctls(struct ixl_softc *);
873 static int ixl_queue_pairs_alloc(struct ixl_softc *);
874 static void ixl_queue_pairs_free(struct ixl_softc *);
875
876 static const struct ixl_phy_type ixl_phy_type_map[] = {
877 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII },
878 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX },
879 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 },
880 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR },
881 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 },
882 { 1ULL << IXL_PHY_TYPE_XAUI |
883 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 },
884 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI },
885 { 1ULL << IXL_PHY_TYPE_XLAUI |
886 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI },
887 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
888 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 },
889 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
890 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 },
891 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC },
892 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC },
893 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX },
894 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
895 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T },
896 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T },
897 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR },
898 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR },
899 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_TWINAX },
900 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 },
901 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 },
902 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX },
903 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX },
904 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 },
905 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR },
906 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR },
907 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR },
908 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR },
909 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC },
910 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_CR },
911 };
912
913 static const struct ixl_speed_type ixl_speed_type_map[] = {
914 { IXL_AQ_LINK_SPEED_40GB, IF_Gbps(40) },
915 { IXL_AQ_LINK_SPEED_25GB, IF_Gbps(25) },
916 { IXL_AQ_LINK_SPEED_10GB, IF_Gbps(10) },
917 { IXL_AQ_LINK_SPEED_1000MB, IF_Mbps(1000) },
918 { IXL_AQ_LINK_SPEED_100MB, IF_Mbps(100)},
919 };
920
921 static const struct ixl_aq_regs ixl_pf_aq_regs = {
922 .atq_tail = I40E_PF_ATQT,
923 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK,
924 .atq_head = I40E_PF_ATQH,
925 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK,
926 .atq_len = I40E_PF_ATQLEN,
927 .atq_bal = I40E_PF_ATQBAL,
928 .atq_bah = I40E_PF_ATQBAH,
929 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK,
930
931 .arq_tail = I40E_PF_ARQT,
932 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK,
933 .arq_head = I40E_PF_ARQH,
934 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK,
935 .arq_len = I40E_PF_ARQLEN,
936 .arq_bal = I40E_PF_ARQBAL,
937 .arq_bah = I40E_PF_ARQBAH,
938 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK,
939 };
940
941 #define ixl_rd(_s, _r) \
942 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
943 #define ixl_wr(_s, _r, _v) \
944 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
945 #define ixl_barrier(_s, _r, _l, _o) \
946 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
947 #define ixl_flush(_s) (void)ixl_rd((_s), I40E_GLGEN_STAT)
948 #define ixl_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1))
949
950 static inline uint32_t
951 ixl_dmamem_hi(struct ixl_dmamem *ixm)
952 {
953 uint32_t retval;
954 uint64_t val;
955
956 if (sizeof(IXL_DMA_DVA(ixm)) > 4) {
957 val = (intptr_t)IXL_DMA_DVA(ixm);
958 retval = (uint32_t)(val >> 32);
959 } else {
960 retval = 0;
961 }
962
963 return retval;
964 }
965
966 static inline uint32_t
967 ixl_dmamem_lo(struct ixl_dmamem *ixm)
968 {
969
970 return (uint32_t)IXL_DMA_DVA(ixm);
971 }
972
973 static inline void
974 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr)
975 {
976 uint64_t val;
977
978 if (sizeof(addr) > 4) {
979 val = (intptr_t)addr;
980 iaq->iaq_param[2] = htole32(val >> 32);
981 } else {
982 iaq->iaq_param[2] = htole32(0);
983 }
984
985 iaq->iaq_param[3] = htole32(addr);
986 }
987
988 static inline unsigned int
989 ixl_rxr_unrefreshed(unsigned int prod, unsigned int cons, unsigned int ndescs)
990 {
991 unsigned int num;
992
993 if (prod < cons)
994 num = cons - prod;
995 else
996 num = (ndescs - prod) + cons;
997
998 if (__predict_true(num > 0)) {
999 /* device cannot receive packets if all descripter is filled */
1000 num -= 1;
1001 }
1002
1003 return num;
1004 }
1005
1006 CFATTACH_DECL3_NEW(ixl, sizeof(struct ixl_softc),
1007 ixl_match, ixl_attach, ixl_detach, NULL, NULL, NULL,
1008 DVF_DETACH_SHUTDOWN);
1009
1010 static const struct ixl_product ixl_products[] = {
1011 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_SFP },
1012 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_B },
1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_C },
1014 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_A },
1015 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_B },
1016 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_C },
1017 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T },
1018 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
1019 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
1020 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G },
1021 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP },
1022 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28 },
1023 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_KX },
1024 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_QSFP },
1025 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_SFP },
1026 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G_BASET },
1027 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_BASET },
1028 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_I_SFP },
1029 /* required last entry */
1030 {0, 0}
1031 };
1032
1033 static const struct ixl_product *
1034 ixl_lookup(const struct pci_attach_args *pa)
1035 {
1036 const struct ixl_product *ixlp;
1037
1038 for (ixlp = ixl_products; ixlp->vendor_id != 0; ixlp++) {
1039 if (PCI_VENDOR(pa->pa_id) == ixlp->vendor_id &&
1040 PCI_PRODUCT(pa->pa_id) == ixlp->product_id)
1041 return ixlp;
1042 }
1043
1044 return NULL;
1045 }
1046
1047 static int
1048 ixl_match(device_t parent, cfdata_t match, void *aux)
1049 {
1050 const struct pci_attach_args *pa = aux;
1051
1052 return (ixl_lookup(pa) != NULL) ? 1 : 0;
1053 }
1054
1055 static void
1056 ixl_attach(device_t parent, device_t self, void *aux)
1057 {
1058 struct ixl_softc *sc;
1059 struct pci_attach_args *pa = aux;
1060 struct ifnet *ifp;
1061 pcireg_t memtype;
1062 uint32_t firstq, port, ari, func;
1063 uint64_t phy_types = 0;
1064 char xnamebuf[32];
1065 int tries, rv;
1066
1067 sc = device_private(self);
1068 sc->sc_dev = self;
1069 ifp = &sc->sc_ec.ec_if;
1070
1071 sc->sc_pa = *pa;
1072 sc->sc_dmat = (pci_dma64_available(pa)) ?
1073 pa->pa_dmat64 : pa->pa_dmat;
1074 sc->sc_aq_regs = &ixl_pf_aq_regs;
1075
1076 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(pa->pa_id));
1077
1078 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG);
1079 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
1080 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) {
1081 aprint_error(": unable to map registers\n");
1082 return;
1083 }
1084
1085 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET);
1086
1087 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC);
1088 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK;
1089 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1090 sc->sc_base_queue = firstq;
1091
1092 ixl_clear_hw(sc);
1093 if (ixl_pf_reset(sc) == -1) {
1094 /* error printed by ixl pf_reset */
1095 goto unmap;
1096 }
1097
1098 port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
1099 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
1100 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
1101 sc->sc_port = port;
1102 aprint_normal(": port %u", sc->sc_port);
1103
1104 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
1105 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1106 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1107
1108 func = ixl_rd(sc, I40E_PF_FUNC_RID);
1109 sc->sc_pf_id = func & (ari ? 0xff : 0x7);
1110
1111 /* initialise the adminq */
1112
1113 mutex_init(&sc->sc_atq_lock, MUTEX_DEFAULT, IPL_NET);
1114
1115 if (ixl_dmamem_alloc(sc, &sc->sc_atq,
1116 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1117 aprint_error("\n" "%s: unable to allocate atq\n",
1118 device_xname(self));
1119 goto unmap;
1120 }
1121
1122 SIMPLEQ_INIT(&sc->sc_arq_idle);
1123 ixl_work_set(&sc->sc_arq_task, ixl_arq, sc);
1124 sc->sc_arq_cons = 0;
1125 sc->sc_arq_prod = 0;
1126
1127 if (ixl_dmamem_alloc(sc, &sc->sc_arq,
1128 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1129 aprint_error("\n" "%s: unable to allocate arq\n",
1130 device_xname(self));
1131 goto free_atq;
1132 }
1133
1134 if (!ixl_arq_fill(sc)) {
1135 aprint_error("\n" "%s: unable to fill arq descriptors\n",
1136 device_xname(self));
1137 goto free_arq;
1138 }
1139
1140 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1141 0, IXL_DMA_LEN(&sc->sc_atq),
1142 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1143
1144 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1145 0, IXL_DMA_LEN(&sc->sc_arq),
1146 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1147
1148 for (tries = 0; tries < 10; tries++) {
1149 sc->sc_atq_cons = 0;
1150 sc->sc_atq_prod = 0;
1151
1152 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1153 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1154 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1155 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1156
1157 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1158
1159 ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1160 ixl_dmamem_lo(&sc->sc_atq));
1161 ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1162 ixl_dmamem_hi(&sc->sc_atq));
1163 ixl_wr(sc, sc->sc_aq_regs->atq_len,
1164 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1165
1166 ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1167 ixl_dmamem_lo(&sc->sc_arq));
1168 ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1169 ixl_dmamem_hi(&sc->sc_arq));
1170 ixl_wr(sc, sc->sc_aq_regs->arq_len,
1171 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1172
1173 rv = ixl_get_version(sc);
1174 if (rv == 0)
1175 break;
1176 if (rv != ETIMEDOUT) {
1177 aprint_error(", unable to get firmware version\n");
1178 goto shutdown;
1179 }
1180
1181 delaymsec(100);
1182 }
1183
1184 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1185
1186 if (ixl_dmamem_alloc(sc, &sc->sc_aqbuf, IXL_AQ_BUFLEN, 0) != 0) {
1187 aprint_error_dev(self, ", unable to allocate nvm buffer\n");
1188 goto shutdown;
1189 }
1190
1191 ixl_get_nvm_version(sc);
1192
1193 if (sc->sc_mac_type == I40E_MAC_X722)
1194 sc->sc_nqueue_pairs_device = 128;
1195 else
1196 sc->sc_nqueue_pairs_device = 64;
1197
1198 rv = ixl_get_hw_capabilities(sc);
1199 if (rv != 0) {
1200 aprint_error(", GET HW CAPABILITIES %s\n",
1201 rv == ETIMEDOUT ? "timeout" : "error");
1202 goto free_aqbuf;
1203 }
1204
1205 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_device, ncpu);
1206 if (ixl_param_nqps_limit > 0) {
1207 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_max,
1208 ixl_param_nqps_limit);
1209 }
1210
1211 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
1212 sc->sc_tx_ring_ndescs = ixl_param_tx_ndescs;
1213 sc->sc_rx_ring_ndescs = ixl_param_rx_ndescs;
1214
1215 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_rx_ring_ndescs);
1216 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_tx_ring_ndescs);
1217
1218 if (ixl_get_mac(sc) != 0) {
1219 /* error printed by ixl_get_mac */
1220 goto free_aqbuf;
1221 }
1222
1223 aprint_normal("\n");
1224 aprint_naive("\n");
1225
1226 aprint_normal_dev(self, "Ethernet address %s\n",
1227 ether_sprintf(sc->sc_enaddr));
1228
1229 rv = ixl_pxe_clear(sc);
1230 if (rv != 0) {
1231 aprint_debug_dev(self, "CLEAR PXE MODE %s\n",
1232 rv == ETIMEDOUT ? "timeout" : "error");
1233 }
1234
1235 ixl_set_filter_control(sc);
1236
1237 if (ixl_hmc(sc) != 0) {
1238 /* error printed by ixl_hmc */
1239 goto free_aqbuf;
1240 }
1241
1242 if (ixl_lldp_shut(sc) != 0) {
1243 /* error printed by ixl_lldp_shut */
1244 goto free_hmc;
1245 }
1246
1247 if (ixl_phy_mask_ints(sc) != 0) {
1248 /* error printed by ixl_phy_mask_ints */
1249 goto free_hmc;
1250 }
1251
1252 if (ixl_restart_an(sc) != 0) {
1253 /* error printed by ixl_restart_an */
1254 goto free_hmc;
1255 }
1256
1257 if (ixl_get_switch_config(sc) != 0) {
1258 /* error printed by ixl_get_switch_config */
1259 goto free_hmc;
1260 }
1261
1262 if (ixl_get_phy_types(sc, &phy_types) != 0) {
1263 /* error printed by ixl_get_phy_abilities */
1264 goto free_hmc;
1265 }
1266
1267 rv = ixl_get_link_status_poll(sc);
1268 if (rv != 0) {
1269 aprint_error_dev(self, "GET LINK STATUS %s\n",
1270 rv == ETIMEDOUT ? "timeout" : "error");
1271 goto free_hmc;
1272 }
1273
1274 if (ixl_dmamem_alloc(sc, &sc->sc_scratch,
1275 sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1276 aprint_error_dev(self, "unable to allocate scratch buffer\n");
1277 goto free_hmc;
1278 }
1279
1280 rv = ixl_get_vsi(sc);
1281 if (rv != 0) {
1282 aprint_error_dev(self, "GET VSI %s %d\n",
1283 rv == ETIMEDOUT ? "timeout" : "error", rv);
1284 goto free_scratch;
1285 }
1286
1287 rv = ixl_set_vsi(sc);
1288 if (rv != 0) {
1289 aprint_error_dev(self, "UPDATE VSI error %s %d\n",
1290 rv == ETIMEDOUT ? "timeout" : "error", rv);
1291 goto free_scratch;
1292 }
1293
1294 if (ixl_queue_pairs_alloc(sc) != 0) {
1295 /* error printed by ixl_queue_pairs_alloc */
1296 goto free_scratch;
1297 }
1298
1299 if (ixl_setup_interrupts(sc) != 0) {
1300 /* error printed by ixl_setup_interrupts */
1301 goto free_queue_pairs;
1302 }
1303
1304 if (ixl_setup_stats(sc) != 0) {
1305 aprint_error_dev(self, "failed to setup event counters\n");
1306 goto teardown_intrs;
1307 }
1308
1309 if (ixl_setup_sysctls(sc) != 0) {
1310 /* error printed by ixl_setup_sysctls */
1311 goto teardown_stats;
1312 }
1313
1314 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self));
1315 sc->sc_workq = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1316 IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1317 if (sc->sc_workq == NULL)
1318 goto teardown_sysctls;
1319
1320 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self));
1321 sc->sc_workq_txrx = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1322 IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1323 if (sc->sc_workq_txrx == NULL)
1324 goto teardown_wqs;
1325
1326 snprintf(xnamebuf, sizeof(xnamebuf), "%s_atq_cv", device_xname(self));
1327 cv_init(&sc->sc_atq_cv, xnamebuf);
1328
1329 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
1330
1331 ifp->if_softc = sc;
1332 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1333 ifp->if_extflags = IFEF_MPSAFE;
1334 ifp->if_ioctl = ixl_ioctl;
1335 ifp->if_start = ixl_start;
1336 ifp->if_transmit = ixl_transmit;
1337 ifp->if_watchdog = ixl_watchdog;
1338 ifp->if_init = ixl_init;
1339 ifp->if_stop = ixl_stop;
1340 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs);
1341 IFQ_SET_READY(&ifp->if_snd);
1342 ifp->if_capabilities |= IXL_IFCAP_RXCSUM;
1343 #if 0
1344 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx;
1345 #endif
1346 ether_set_vlan_cb(&sc->sc_ec, ixl_vlan_cb);
1347 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
1348 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
1349 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1350
1351 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities;
1352 /* Disable VLAN_HWFILTER by default */
1353 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1354
1355 sc->sc_cur_ec_capenable = sc->sc_ec.ec_capenable;
1356
1357 sc->sc_ec.ec_ifmedia = &sc->sc_media;
1358 ifmedia_init(&sc->sc_media, IFM_IMASK, ixl_media_change,
1359 ixl_media_status);
1360
1361 ixl_media_add(sc, phy_types);
1362 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1363 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1364
1365 if_attach(ifp);
1366 if_deferred_start_init(ifp, NULL);
1367 ether_ifattach(ifp, sc->sc_enaddr);
1368 ether_set_ifflags_cb(&sc->sc_ec, ixl_ifflags_cb);
1369
1370 (void)ixl_get_link_status_poll(sc);
1371 ixl_work_set(&sc->sc_link_state_task, ixl_get_link_status, sc);
1372
1373 ixl_config_other_intr(sc);
1374 ixl_enable_other_intr(sc);
1375
1376 /* remove default mac filter and replace it so we can see vlans */
1377 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 0);
1378 if (rv != ENOENT) {
1379 aprint_debug_dev(self,
1380 "unable to remove macvlan %u\n", rv);
1381 }
1382 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
1383 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1384 if (rv != ENOENT) {
1385 aprint_debug_dev(self,
1386 "unable to remove macvlan, ignore vlan %u\n", rv);
1387 }
1388
1389 if (ixl_update_macvlan(sc) != 0) {
1390 aprint_debug_dev(self,
1391 "couldn't enable vlan hardware filter\n");
1392 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1393 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
1394 }
1395
1396 sc->sc_txrx_workqueue = true;
1397 sc->sc_tx_process_limit = IXL_TX_PROCESS_LIMIT;
1398 sc->sc_rx_process_limit = IXL_RX_PROCESS_LIMIT;
1399 sc->sc_tx_intr_process_limit = IXL_TX_INTR_PROCESS_LIMIT;
1400 sc->sc_rx_intr_process_limit = IXL_RX_INTR_PROCESS_LIMIT;
1401
1402 ixl_stats_update(sc);
1403 sc->sc_stats_counters.isc_has_offset = true;
1404 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
1405
1406 if (pmf_device_register(self, NULL, NULL) != true)
1407 aprint_debug_dev(self, "couldn't establish power handler\n");
1408 sc->sc_attached = true;
1409 return;
1410
1411 teardown_wqs:
1412 config_finalize_register(self, ixl_workqs_teardown);
1413 teardown_sysctls:
1414 ixl_teardown_sysctls(sc);
1415 teardown_stats:
1416 ixl_teardown_stats(sc);
1417 teardown_intrs:
1418 ixl_teardown_interrupts(sc);
1419 free_queue_pairs:
1420 ixl_queue_pairs_free(sc);
1421 free_scratch:
1422 ixl_dmamem_free(sc, &sc->sc_scratch);
1423 free_hmc:
1424 ixl_hmc_free(sc);
1425 free_aqbuf:
1426 ixl_dmamem_free(sc, &sc->sc_aqbuf);
1427 shutdown:
1428 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1429 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1430 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1431 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1432
1433 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1434 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1435 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1436
1437 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1438 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1439 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1440
1441 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1442 0, IXL_DMA_LEN(&sc->sc_arq),
1443 BUS_DMASYNC_POSTREAD);
1444 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1445 0, IXL_DMA_LEN(&sc->sc_atq),
1446 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1447
1448 ixl_arq_unfill(sc);
1449 free_arq:
1450 ixl_dmamem_free(sc, &sc->sc_arq);
1451 free_atq:
1452 ixl_dmamem_free(sc, &sc->sc_atq);
1453 unmap:
1454 mutex_destroy(&sc->sc_atq_lock);
1455 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1456 mutex_destroy(&sc->sc_cfg_lock);
1457 sc->sc_mems = 0;
1458
1459 sc->sc_attached = false;
1460 }
1461
1462 static int
1463 ixl_detach(device_t self, int flags)
1464 {
1465 struct ixl_softc *sc = device_private(self);
1466 struct ifnet *ifp = &sc->sc_ec.ec_if;
1467
1468 if (!sc->sc_attached)
1469 return 0;
1470
1471 ixl_stop(ifp, 1);
1472
1473 ixl_disable_other_intr(sc);
1474
1475 callout_stop(&sc->sc_stats_callout);
1476 ixl_work_wait(sc->sc_workq, &sc->sc_stats_task);
1477
1478 /* wait for ATQ handler */
1479 mutex_enter(&sc->sc_atq_lock);
1480 mutex_exit(&sc->sc_atq_lock);
1481
1482 ixl_work_wait(sc->sc_workq, &sc->sc_arq_task);
1483 ixl_work_wait(sc->sc_workq, &sc->sc_link_state_task);
1484
1485 if (sc->sc_workq != NULL) {
1486 ixl_workq_destroy(sc->sc_workq);
1487 sc->sc_workq = NULL;
1488 }
1489
1490 if (sc->sc_workq_txrx != NULL) {
1491 ixl_workq_destroy(sc->sc_workq_txrx);
1492 sc->sc_workq_txrx = NULL;
1493 }
1494
1495 ifmedia_delete_instance(&sc->sc_media, IFM_INST_ANY);
1496 ether_ifdetach(ifp);
1497 if_detach(ifp);
1498
1499 ixl_teardown_interrupts(sc);
1500 ixl_teardown_stats(sc);
1501 ixl_teardown_sysctls(sc);
1502
1503 ixl_queue_pairs_free(sc);
1504
1505 ixl_dmamem_free(sc, &sc->sc_scratch);
1506 ixl_hmc_free(sc);
1507
1508 /* shutdown */
1509 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1510 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1511 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1512 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1513
1514 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1515 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1516 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1517
1518 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1519 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1520 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1521
1522 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1523 0, IXL_DMA_LEN(&sc->sc_arq),
1524 BUS_DMASYNC_POSTREAD);
1525 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1526 0, IXL_DMA_LEN(&sc->sc_atq),
1527 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1528
1529 ixl_arq_unfill(sc);
1530
1531 ixl_dmamem_free(sc, &sc->sc_arq);
1532 ixl_dmamem_free(sc, &sc->sc_atq);
1533 ixl_dmamem_free(sc, &sc->sc_aqbuf);
1534
1535 cv_destroy(&sc->sc_atq_cv);
1536 mutex_destroy(&sc->sc_atq_lock);
1537
1538 if (sc->sc_mems != 0) {
1539 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1540 sc->sc_mems = 0;
1541 }
1542
1543 mutex_destroy(&sc->sc_cfg_lock);
1544
1545 return 0;
1546 }
1547
1548 static int
1549 ixl_workqs_teardown(device_t self)
1550 {
1551 struct ixl_softc *sc = device_private(self);
1552
1553 if (sc->sc_workq != NULL) {
1554 ixl_workq_destroy(sc->sc_workq);
1555 sc->sc_workq = NULL;
1556 }
1557
1558 if (sc->sc_workq_txrx != NULL) {
1559 ixl_workq_destroy(sc->sc_workq_txrx);
1560 sc->sc_workq_txrx = NULL;
1561 }
1562
1563 return 0;
1564 }
1565
1566 static int
1567 ixl_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
1568 {
1569 struct ifnet *ifp = &ec->ec_if;
1570 struct ixl_softc *sc = ifp->if_softc;
1571 int rv;
1572
1573 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
1574 return 0;
1575 }
1576
1577 if (set) {
1578 rv = ixl_add_macvlan(sc, sc->sc_enaddr, vid,
1579 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1580 if (rv == 0) {
1581 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
1582 vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1583 }
1584 } else {
1585 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, vid,
1586 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1587 (void)ixl_remove_macvlan(sc, etherbroadcastaddr, vid,
1588 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1589 }
1590
1591 return rv;
1592 }
1593
1594 static void
1595 ixl_media_add(struct ixl_softc *sc, uint64_t phy_types)
1596 {
1597 struct ifmedia *ifm = &sc->sc_media;
1598 const struct ixl_phy_type *itype;
1599 unsigned int i;
1600
1601 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
1602 itype = &ixl_phy_type_map[i];
1603
1604 if (ISSET(phy_types, itype->phy_type)) {
1605 ifmedia_add(ifm,
1606 IFM_ETHER | IFM_FDX | itype->ifm_type, 0, NULL);
1607
1608 if (itype->ifm_type == IFM_100_TX) {
1609 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type,
1610 0, NULL);
1611 }
1612 }
1613 }
1614 }
1615
1616 static void
1617 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1618 {
1619 struct ixl_softc *sc = ifp->if_softc;
1620
1621 ifmr->ifm_status = sc->sc_media_status;
1622 ifmr->ifm_active = sc->sc_media_active;
1623
1624 mutex_enter(&sc->sc_cfg_lock);
1625 if (ifp->if_link_state == LINK_STATE_UP)
1626 SET(ifmr->ifm_status, IFM_ACTIVE);
1627 mutex_exit(&sc->sc_cfg_lock);
1628 }
1629
1630 static int
1631 ixl_media_change(struct ifnet *ifp)
1632 {
1633
1634 return 0;
1635 }
1636
1637 static void
1638 ixl_watchdog(struct ifnet *ifp)
1639 {
1640
1641 }
1642
1643 static void
1644 ixl_del_all_multiaddr(struct ixl_softc *sc)
1645 {
1646 struct ethercom *ec = &sc->sc_ec;
1647 struct ether_multi *enm;
1648 struct ether_multistep step;
1649
1650 ETHER_LOCK(ec);
1651 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1652 ETHER_NEXT_MULTI(step, enm)) {
1653 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1654 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1655 }
1656 ETHER_UNLOCK(ec);
1657 }
1658
1659 static int
1660 ixl_add_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1661 {
1662 struct ifnet *ifp = &sc->sc_ec.ec_if;
1663 int rv;
1664
1665 if (ISSET(ifp->if_flags, IFF_ALLMULTI))
1666 return 0;
1667
1668 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) {
1669 ixl_del_all_multiaddr(sc);
1670 SET(ifp->if_flags, IFF_ALLMULTI);
1671 return ENETRESET;
1672 }
1673
1674 /* multicast address can not use VLAN HWFILTER */
1675 rv = ixl_add_macvlan(sc, addrlo, 0,
1676 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1677
1678 if (rv == ENOSPC) {
1679 ixl_del_all_multiaddr(sc);
1680 SET(ifp->if_flags, IFF_ALLMULTI);
1681 return ENETRESET;
1682 }
1683
1684 return rv;
1685 }
1686
1687 static int
1688 ixl_del_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1689 {
1690 struct ifnet *ifp = &sc->sc_ec.ec_if;
1691 struct ethercom *ec = &sc->sc_ec;
1692 struct ether_multi *enm, *enm_last;
1693 struct ether_multistep step;
1694 int error, rv = 0;
1695
1696 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1697 ixl_remove_macvlan(sc, addrlo, 0,
1698 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1699 return 0;
1700 }
1701
1702 ETHER_LOCK(ec);
1703 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1704 ETHER_NEXT_MULTI(step, enm)) {
1705 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1706 ETHER_ADDR_LEN) != 0) {
1707 goto out;
1708 }
1709 }
1710
1711 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1712 ETHER_NEXT_MULTI(step, enm)) {
1713 error = ixl_add_macvlan(sc, enm->enm_addrlo, 0,
1714 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1715 if (error != 0)
1716 break;
1717 }
1718
1719 if (enm != NULL) {
1720 enm_last = enm;
1721 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1722 ETHER_NEXT_MULTI(step, enm)) {
1723 if (enm == enm_last)
1724 break;
1725
1726 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1727 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1728 }
1729 } else {
1730 CLR(ifp->if_flags, IFF_ALLMULTI);
1731 rv = ENETRESET;
1732 }
1733
1734 out:
1735 ETHER_UNLOCK(ec);
1736 return rv;
1737 }
1738
1739 static int
1740 ixl_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1741 {
1742 struct ifreq *ifr = (struct ifreq *)data;
1743 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
1744 struct ixl_tx_ring *txr;
1745 struct ixl_rx_ring *rxr;
1746 const struct sockaddr *sa;
1747 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1748 int s, error = 0;
1749 unsigned int i;
1750
1751 switch (cmd) {
1752 case SIOCADDMULTI:
1753 sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1754 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) {
1755 error = ether_multiaddr(sa, addrlo, addrhi);
1756 if (error != 0)
1757 return error;
1758
1759 error = ixl_add_multi(sc, addrlo, addrhi);
1760 if (error != 0 && error != ENETRESET) {
1761 ether_delmulti(sa, &sc->sc_ec);
1762 error = EIO;
1763 }
1764 }
1765 break;
1766
1767 case SIOCDELMULTI:
1768 sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1769 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) {
1770 error = ether_multiaddr(sa, addrlo, addrhi);
1771 if (error != 0)
1772 return error;
1773
1774 error = ixl_del_multi(sc, addrlo, addrhi);
1775 }
1776 break;
1777
1778 case SIOCGIFDATA:
1779 case SIOCZIFDATA:
1780 ifp->if_ipackets = 0;
1781 ifp->if_ibytes = 0;
1782 ifp->if_iqdrops = 0;
1783 ifp->if_ierrors = 0;
1784 ifp->if_opackets = 0;
1785 ifp->if_obytes = 0;
1786 ifp->if_omcasts = 0;
1787
1788 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
1789 txr = sc->sc_qps[i].qp_txr;
1790 rxr = sc->sc_qps[i].qp_rxr;
1791
1792 mutex_enter(&rxr->rxr_lock);
1793 ifp->if_ipackets += rxr->rxr_ipackets;
1794 ifp->if_ibytes += rxr->rxr_ibytes;
1795 ifp->if_iqdrops += rxr->rxr_iqdrops;
1796 ifp->if_ierrors += rxr->rxr_ierrors;
1797 if (cmd == SIOCZIFDATA) {
1798 rxr->rxr_ipackets = 0;
1799 rxr->rxr_ibytes = 0;
1800 rxr->rxr_iqdrops = 0;
1801 rxr->rxr_ierrors = 0;
1802 }
1803 mutex_exit(&rxr->rxr_lock);
1804
1805 mutex_enter(&txr->txr_lock);
1806 ifp->if_opackets += txr->txr_opackets;
1807 ifp->if_obytes += txr->txr_obytes;
1808 ifp->if_omcasts += txr->txr_omcasts;
1809 if (cmd == SIOCZIFDATA) {
1810 txr->txr_opackets = 0;
1811 txr->txr_obytes = 0;
1812 txr->txr_omcasts = 0;
1813 }
1814 mutex_exit(&txr->txr_lock);
1815 }
1816 /* FALLTHROUGH */
1817 default:
1818 s = splnet();
1819 error = ether_ioctl(ifp, cmd, data);
1820 splx(s);
1821 }
1822
1823 if (error == ENETRESET)
1824 error = ixl_iff(sc);
1825
1826 return error;
1827 }
1828
1829 static enum i40e_mac_type
1830 ixl_mactype(pci_product_id_t id)
1831 {
1832
1833 switch (id) {
1834 case PCI_PRODUCT_INTEL_XL710_SFP:
1835 case PCI_PRODUCT_INTEL_XL710_KX_B:
1836 case PCI_PRODUCT_INTEL_XL710_KX_C:
1837 case PCI_PRODUCT_INTEL_XL710_QSFP_A:
1838 case PCI_PRODUCT_INTEL_XL710_QSFP_B:
1839 case PCI_PRODUCT_INTEL_XL710_QSFP_C:
1840 case PCI_PRODUCT_INTEL_X710_10G_T:
1841 case PCI_PRODUCT_INTEL_XL710_20G_BP_1:
1842 case PCI_PRODUCT_INTEL_XL710_20G_BP_2:
1843 case PCI_PRODUCT_INTEL_X710_T4_10G:
1844 case PCI_PRODUCT_INTEL_XXV710_25G_BP:
1845 case PCI_PRODUCT_INTEL_XXV710_25G_SFP28:
1846 return I40E_MAC_XL710;
1847
1848 case PCI_PRODUCT_INTEL_X722_KX:
1849 case PCI_PRODUCT_INTEL_X722_QSFP:
1850 case PCI_PRODUCT_INTEL_X722_SFP:
1851 case PCI_PRODUCT_INTEL_X722_1G_BASET:
1852 case PCI_PRODUCT_INTEL_X722_10G_BASET:
1853 case PCI_PRODUCT_INTEL_X722_I_SFP:
1854 return I40E_MAC_X722;
1855 }
1856
1857 return I40E_MAC_GENERIC;
1858 }
1859
1860 static inline void *
1861 ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i)
1862 {
1863 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
1864 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1865
1866 if (i >= e->hmc_count)
1867 return NULL;
1868
1869 kva += e->hmc_base;
1870 kva += i * e->hmc_size;
1871
1872 return kva;
1873 }
1874
1875 static inline size_t
1876 ixl_hmc_len(struct ixl_softc *sc, enum ixl_hmc_types type)
1877 {
1878 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1879
1880 return e->hmc_size;
1881 }
1882
1883 static void
1884 ixl_enable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1885 {
1886 struct ixl_rx_ring *rxr = qp->qp_rxr;
1887
1888 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1889 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1890 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1891 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1892 ixl_flush(sc);
1893 }
1894
1895 static void
1896 ixl_disable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1897 {
1898 struct ixl_rx_ring *rxr = qp->qp_rxr;
1899
1900 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1901 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1902 ixl_flush(sc);
1903 }
1904
1905 static void
1906 ixl_enable_other_intr(struct ixl_softc *sc)
1907 {
1908
1909 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1910 I40E_PFINT_DYN_CTL0_INTENA_MASK |
1911 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1912 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1913 ixl_flush(sc);
1914 }
1915
1916 static void
1917 ixl_disable_other_intr(struct ixl_softc *sc)
1918 {
1919
1920 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1921 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1922 ixl_flush(sc);
1923 }
1924
1925 static int
1926 ixl_reinit(struct ixl_softc *sc)
1927 {
1928 struct ixl_rx_ring *rxr;
1929 struct ixl_tx_ring *txr;
1930 unsigned int i;
1931 uint32_t reg;
1932
1933 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1934
1935 if (ixl_get_vsi(sc) != 0)
1936 return EIO;
1937
1938 if (ixl_set_vsi(sc) != 0)
1939 return EIO;
1940
1941 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1942 txr = sc->sc_qps[i].qp_txr;
1943 rxr = sc->sc_qps[i].qp_rxr;
1944
1945 txr->txr_cons = txr->txr_prod = 0;
1946 rxr->rxr_cons = rxr->rxr_prod = 0;
1947
1948 ixl_txr_config(sc, txr);
1949 ixl_rxr_config(sc, rxr);
1950 }
1951
1952 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
1953 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_PREWRITE);
1954
1955 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1956 txr = sc->sc_qps[i].qp_txr;
1957 rxr = sc->sc_qps[i].qp_rxr;
1958
1959 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
1960 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
1961 ixl_flush(sc);
1962
1963 ixl_wr(sc, txr->txr_tail, txr->txr_prod);
1964 ixl_wr(sc, rxr->rxr_tail, rxr->rxr_prod);
1965
1966 /* ixl_rxfill() needs lock held */
1967 mutex_enter(&rxr->rxr_lock);
1968 ixl_rxfill(sc, rxr);
1969 mutex_exit(&rxr->rxr_lock);
1970
1971 reg = ixl_rd(sc, I40E_QRX_ENA(i));
1972 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
1973 ixl_wr(sc, I40E_QRX_ENA(i), reg);
1974 if (ixl_rxr_enabled(sc, rxr) != 0)
1975 goto stop;
1976
1977 ixl_txr_qdis(sc, txr, 1);
1978
1979 reg = ixl_rd(sc, I40E_QTX_ENA(i));
1980 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
1981 ixl_wr(sc, I40E_QTX_ENA(i), reg);
1982
1983 if (ixl_txr_enabled(sc, txr) != 0)
1984 goto stop;
1985 }
1986
1987 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
1988 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
1989
1990 return 0;
1991
1992 stop:
1993 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
1994 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
1995
1996 return ETIMEDOUT;
1997 }
1998
1999 static int
2000 ixl_init_locked(struct ixl_softc *sc)
2001 {
2002 struct ifnet *ifp = &sc->sc_ec.ec_if;
2003 unsigned int i;
2004 int error, eccap_change;
2005
2006 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2007
2008 if (ISSET(ifp->if_flags, IFF_RUNNING))
2009 ixl_stop_locked(sc);
2010
2011 if (sc->sc_dead) {
2012 return ENXIO;
2013 }
2014
2015 eccap_change = sc->sc_ec.ec_capenable ^ sc->sc_cur_ec_capenable;
2016 if (ISSET(eccap_change, ETHERCAP_VLAN_HWTAGGING))
2017 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
2018
2019 if (ISSET(eccap_change, ETHERCAP_VLAN_HWFILTER)) {
2020 if (ixl_update_macvlan(sc) == 0) {
2021 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
2022 } else {
2023 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
2024 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
2025 }
2026 }
2027
2028 if (sc->sc_intrtype != PCI_INTR_TYPE_MSIX)
2029 sc->sc_nqueue_pairs = 1;
2030 else
2031 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
2032
2033 error = ixl_reinit(sc);
2034 if (error) {
2035 ixl_stop_locked(sc);
2036 return error;
2037 }
2038
2039 SET(ifp->if_flags, IFF_RUNNING);
2040 CLR(ifp->if_flags, IFF_OACTIVE);
2041
2042 (void)ixl_get_link_status(sc);
2043
2044 ixl_config_rss(sc);
2045 ixl_config_queue_intr(sc);
2046
2047 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2048 ixl_enable_queue_intr(sc, &sc->sc_qps[i]);
2049 }
2050
2051 error = ixl_iff(sc);
2052 if (error) {
2053 ixl_stop_locked(sc);
2054 return error;
2055 }
2056
2057 return 0;
2058 }
2059
2060 static int
2061 ixl_init(struct ifnet *ifp)
2062 {
2063 struct ixl_softc *sc = ifp->if_softc;
2064 int error;
2065
2066 mutex_enter(&sc->sc_cfg_lock);
2067 error = ixl_init_locked(sc);
2068 mutex_exit(&sc->sc_cfg_lock);
2069
2070 return error;
2071 }
2072
2073 static int
2074 ixl_iff(struct ixl_softc *sc)
2075 {
2076 struct ifnet *ifp = &sc->sc_ec.ec_if;
2077 struct ixl_atq iatq;
2078 struct ixl_aq_desc *iaq;
2079 struct ixl_aq_vsi_promisc_param *param;
2080 uint16_t flag_add, flag_del;
2081 int error;
2082
2083 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2084 return 0;
2085
2086 memset(&iatq, 0, sizeof(iatq));
2087
2088 iaq = &iatq.iatq_desc;
2089 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
2090
2091 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
2092 param->flags = htole16(0);
2093
2094 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)
2095 || ISSET(ifp->if_flags, IFF_PROMISC)) {
2096 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2097 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2098 }
2099
2100 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
2101 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2102 IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2103 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
2104 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2105 }
2106 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2107 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2108 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2109 param->seid = sc->sc_seid;
2110
2111 error = ixl_atq_exec(sc, &iatq);
2112 if (error)
2113 return error;
2114
2115 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
2116 return EIO;
2117
2118 if (memcmp(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN) != 0) {
2119 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
2120 flag_add = IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH;
2121 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH;
2122 } else {
2123 flag_add = IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN;
2124 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN;
2125 }
2126
2127 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, flag_del);
2128
2129 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
2130 ixl_add_macvlan(sc, sc->sc_enaddr, 0, flag_add);
2131 }
2132 return 0;
2133 }
2134
2135 static void
2136 ixl_stop_rendezvous(struct ixl_softc *sc)
2137 {
2138 struct ixl_tx_ring *txr;
2139 struct ixl_rx_ring *rxr;
2140 unsigned int i;
2141
2142 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2143 txr = sc->sc_qps[i].qp_txr;
2144 rxr = sc->sc_qps[i].qp_rxr;
2145
2146 mutex_enter(&txr->txr_lock);
2147 mutex_exit(&txr->txr_lock);
2148
2149 mutex_enter(&rxr->rxr_lock);
2150 mutex_exit(&rxr->rxr_lock);
2151
2152 ixl_work_wait(sc->sc_workq_txrx,
2153 &sc->sc_qps[i].qp_task);
2154 }
2155 }
2156
2157 static void
2158 ixl_stop_locked(struct ixl_softc *sc)
2159 {
2160 struct ifnet *ifp = &sc->sc_ec.ec_if;
2161 struct ixl_rx_ring *rxr;
2162 struct ixl_tx_ring *txr;
2163 unsigned int i;
2164 uint32_t reg;
2165
2166 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2167
2168 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
2169
2170 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2171 txr = sc->sc_qps[i].qp_txr;
2172 rxr = sc->sc_qps[i].qp_rxr;
2173
2174 ixl_disable_queue_intr(sc, &sc->sc_qps[i]);
2175
2176 mutex_enter(&txr->txr_lock);
2177 ixl_txr_qdis(sc, txr, 0);
2178 /* XXX wait at least 400 usec for all tx queues in one go */
2179 ixl_flush(sc);
2180 DELAY(500);
2181
2182 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2183 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2184 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2185 /* XXX wait 50ms from completaion of the TX queue disable*/
2186 ixl_flush(sc);
2187 DELAY(50);
2188
2189 if (ixl_txr_disabled(sc, txr) != 0) {
2190 mutex_exit(&txr->txr_lock);
2191 goto die;
2192 }
2193 mutex_exit(&txr->txr_lock);
2194
2195 mutex_enter(&rxr->rxr_lock);
2196 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2197 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2198 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2199 /* XXX wait 50ms from completion of the RX queue disable */
2200 ixl_flush(sc);
2201 DELAY(50);
2202
2203 if (ixl_rxr_disabled(sc, rxr) != 0) {
2204 mutex_exit(&rxr->rxr_lock);
2205 goto die;
2206 }
2207 mutex_exit(&rxr->rxr_lock);
2208 }
2209
2210 ixl_stop_rendezvous(sc);
2211
2212 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2213 txr = sc->sc_qps[i].qp_txr;
2214 rxr = sc->sc_qps[i].qp_rxr;
2215
2216 ixl_txr_unconfig(sc, txr);
2217 ixl_rxr_unconfig(sc, rxr);
2218
2219 ixl_txr_clean(sc, txr);
2220 ixl_rxr_clean(sc, rxr);
2221 }
2222
2223 return;
2224 die:
2225 sc->sc_dead = true;
2226 log(LOG_CRIT, "%s: failed to shut down rings",
2227 device_xname(sc->sc_dev));
2228 return;
2229 }
2230
2231 static void
2232 ixl_stop(struct ifnet *ifp, int disable)
2233 {
2234 struct ixl_softc *sc = ifp->if_softc;
2235
2236 mutex_enter(&sc->sc_cfg_lock);
2237 ixl_stop_locked(sc);
2238 mutex_exit(&sc->sc_cfg_lock);
2239 }
2240
2241 static int
2242 ixl_queue_pairs_alloc(struct ixl_softc *sc)
2243 {
2244 struct ixl_queue_pair *qp;
2245 unsigned int i;
2246 size_t sz;
2247
2248 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2249 sc->sc_qps = kmem_zalloc(sz, KM_SLEEP);
2250
2251 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2252 qp = &sc->sc_qps[i];
2253
2254 qp->qp_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2255 ixl_handle_queue, qp);
2256 if (qp->qp_si == NULL)
2257 goto free;
2258
2259 qp->qp_txr = ixl_txr_alloc(sc, i);
2260 if (qp->qp_txr == NULL)
2261 goto free;
2262
2263 qp->qp_rxr = ixl_rxr_alloc(sc, i);
2264 if (qp->qp_rxr == NULL)
2265 goto free;
2266
2267 qp->qp_sc = sc;
2268 ixl_work_set(&qp->qp_task, ixl_handle_queue, qp);
2269 snprintf(qp->qp_name, sizeof(qp->qp_name),
2270 "%s-TXRX%d", device_xname(sc->sc_dev), i);
2271 }
2272
2273 return 0;
2274 free:
2275 if (sc->sc_qps != NULL) {
2276 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2277 qp = &sc->sc_qps[i];
2278
2279 if (qp->qp_txr != NULL)
2280 ixl_txr_free(sc, qp->qp_txr);
2281 if (qp->qp_rxr != NULL)
2282 ixl_rxr_free(sc, qp->qp_rxr);
2283 if (qp->qp_si != NULL)
2284 softint_disestablish(qp->qp_si);
2285 }
2286
2287 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2288 kmem_free(sc->sc_qps, sz);
2289 sc->sc_qps = NULL;
2290 }
2291
2292 return -1;
2293 }
2294
2295 static void
2296 ixl_queue_pairs_free(struct ixl_softc *sc)
2297 {
2298 struct ixl_queue_pair *qp;
2299 unsigned int i;
2300 size_t sz;
2301
2302 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2303 qp = &sc->sc_qps[i];
2304 ixl_txr_free(sc, qp->qp_txr);
2305 ixl_rxr_free(sc, qp->qp_rxr);
2306 softint_disestablish(qp->qp_si);
2307 }
2308
2309 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2310 kmem_free(sc->sc_qps, sz);
2311 sc->sc_qps = NULL;
2312 }
2313
2314 static struct ixl_tx_ring *
2315 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
2316 {
2317 struct ixl_tx_ring *txr = NULL;
2318 struct ixl_tx_map *maps = NULL, *txm;
2319 unsigned int i;
2320
2321 txr = kmem_zalloc(sizeof(*txr), KM_SLEEP);
2322 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs,
2323 KM_SLEEP);
2324
2325 if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2326 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2327 IXL_TX_QUEUE_ALIGN) != 0)
2328 goto free;
2329
2330 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2331 txm = &maps[i];
2332
2333 if (bus_dmamap_create(sc->sc_dmat,
2334 IXL_HARDMTU, IXL_TX_PKT_DESCS, IXL_HARDMTU, 0,
2335 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &txm->txm_map) != 0)
2336 goto uncreate;
2337
2338 txm->txm_eop = -1;
2339 txm->txm_m = NULL;
2340 }
2341
2342 txr->txr_cons = txr->txr_prod = 0;
2343 txr->txr_maps = maps;
2344
2345 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2346 if (txr->txr_intrq == NULL)
2347 goto uncreate;
2348
2349 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2350 ixl_deferred_transmit, txr);
2351 if (txr->txr_si == NULL)
2352 goto destroy_pcq;
2353
2354 txr->txr_tail = I40E_QTX_TAIL(qid);
2355 txr->txr_qid = qid;
2356 txr->txr_sc = sc;
2357 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2358
2359 return txr;
2360
2361 destroy_pcq:
2362 pcq_destroy(txr->txr_intrq);
2363 uncreate:
2364 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2365 txm = &maps[i];
2366
2367 if (txm->txm_map == NULL)
2368 continue;
2369
2370 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2371 }
2372
2373 ixl_dmamem_free(sc, &txr->txr_mem);
2374 free:
2375 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2376 kmem_free(txr, sizeof(*txr));
2377
2378 return NULL;
2379 }
2380
2381 static void
2382 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2383 {
2384 unsigned int qid;
2385 bus_size_t reg;
2386 uint32_t r;
2387
2388 qid = txr->txr_qid + sc->sc_base_queue;
2389 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2390 qid %= 128;
2391
2392 r = ixl_rd(sc, reg);
2393 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2394 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2395 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2396 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2397 ixl_wr(sc, reg, r);
2398 }
2399
2400 static void
2401 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2402 {
2403 struct ixl_hmc_txq txq;
2404 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch);
2405 void *hmc;
2406
2407 memset(&txq, 0, sizeof(txq));
2408 txq.head = htole16(txr->txr_cons);
2409 txq.new_context = 1;
2410 txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2411 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2412 txq.qlen = htole16(sc->sc_tx_ring_ndescs);
2413 txq.tphrdesc_ena = 0;
2414 txq.tphrpacket_ena = 0;
2415 txq.tphwdesc_ena = 0;
2416 txq.rdylist = data->qs_handle[0];
2417
2418 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2419 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2420 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq,
2421 __arraycount(ixl_hmc_pack_txq));
2422 }
2423
2424 static void
2425 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2426 {
2427 void *hmc;
2428
2429 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2430 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2431 }
2432
2433 static void
2434 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2435 {
2436 struct ixl_tx_map *maps, *txm;
2437 bus_dmamap_t map;
2438 unsigned int i;
2439
2440 maps = txr->txr_maps;
2441 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2442 txm = &maps[i];
2443
2444 if (txm->txm_m == NULL)
2445 continue;
2446
2447 map = txm->txm_map;
2448 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2449 BUS_DMASYNC_POSTWRITE);
2450 bus_dmamap_unload(sc->sc_dmat, map);
2451
2452 m_freem(txm->txm_m);
2453 txm->txm_m = NULL;
2454 }
2455 }
2456
2457 static int
2458 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2459 {
2460 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2461 uint32_t reg;
2462 int i;
2463
2464 for (i = 0; i < 10; i++) {
2465 reg = ixl_rd(sc, ena);
2466 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2467 return 0;
2468
2469 delaymsec(10);
2470 }
2471
2472 return ETIMEDOUT;
2473 }
2474
2475 static int
2476 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2477 {
2478 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2479 uint32_t reg;
2480 int i;
2481
2482 KASSERT(mutex_owned(&txr->txr_lock));
2483
2484 for (i = 0; i < 20; i++) {
2485 reg = ixl_rd(sc, ena);
2486 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2487 return 0;
2488
2489 delaymsec(10);
2490 }
2491
2492 return ETIMEDOUT;
2493 }
2494
2495 static void
2496 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2497 {
2498 struct ixl_tx_map *maps, *txm;
2499 struct mbuf *m;
2500 unsigned int i;
2501
2502 softint_disestablish(txr->txr_si);
2503 while ((m = pcq_get(txr->txr_intrq)) != NULL)
2504 m_freem(m);
2505 pcq_destroy(txr->txr_intrq);
2506
2507 maps = txr->txr_maps;
2508 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2509 txm = &maps[i];
2510
2511 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2512 }
2513
2514 ixl_dmamem_free(sc, &txr->txr_mem);
2515 mutex_destroy(&txr->txr_lock);
2516 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2517 kmem_free(txr, sizeof(*txr));
2518 }
2519
2520 static inline int
2521 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0,
2522 struct ixl_tx_ring *txr)
2523 {
2524 struct mbuf *m;
2525 int error;
2526
2527 KASSERT(mutex_owned(&txr->txr_lock));
2528
2529 m = *m0;
2530
2531 error = bus_dmamap_load_mbuf(dmat, map, m,
2532 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2533 if (error != EFBIG)
2534 return error;
2535
2536 m = m_defrag(m, M_DONTWAIT);
2537 if (m != NULL) {
2538 *m0 = m;
2539 txr->txr_defragged.ev_count++;
2540
2541 error = bus_dmamap_load_mbuf(dmat, map, m,
2542 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2543 } else {
2544 txr->txr_defrag_failed.ev_count++;
2545 error = ENOBUFS;
2546 }
2547
2548 return error;
2549 }
2550
2551 static void
2552 ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr,
2553 bool is_transmit)
2554 {
2555 struct ixl_softc *sc = ifp->if_softc;
2556 struct ixl_tx_desc *ring, *txd;
2557 struct ixl_tx_map *txm;
2558 bus_dmamap_t map;
2559 struct mbuf *m;
2560 uint64_t cmd, cmd_vlan;
2561 unsigned int prod, free, last, i;
2562 unsigned int mask;
2563 int post = 0;
2564
2565 KASSERT(mutex_owned(&txr->txr_lock));
2566
2567 if (ifp->if_link_state != LINK_STATE_UP
2568 || !ISSET(ifp->if_flags, IFF_RUNNING)
2569 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) {
2570 if (!is_transmit)
2571 IFQ_PURGE(&ifp->if_snd);
2572 return;
2573 }
2574
2575 prod = txr->txr_prod;
2576 free = txr->txr_cons;
2577 if (free <= prod)
2578 free += sc->sc_tx_ring_ndescs;
2579 free -= prod;
2580
2581 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2582 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2583
2584 ring = IXL_DMA_KVA(&txr->txr_mem);
2585 mask = sc->sc_tx_ring_ndescs - 1;
2586 last = prod;
2587 cmd = 0;
2588 txd = NULL;
2589
2590 for (;;) {
2591 if (free <= IXL_TX_PKT_DESCS) {
2592 if (!is_transmit)
2593 SET(ifp->if_flags, IFF_OACTIVE);
2594 break;
2595 }
2596
2597 if (is_transmit)
2598 m = pcq_get(txr->txr_intrq);
2599 else
2600 IFQ_DEQUEUE(&ifp->if_snd, m);
2601
2602 if (m == NULL)
2603 break;
2604
2605 txm = &txr->txr_maps[prod];
2606 map = txm->txm_map;
2607
2608 if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
2609 txr->txr_oerrors++;
2610 m_freem(m);
2611 continue;
2612 }
2613
2614 if (vlan_has_tag(m)) {
2615 cmd_vlan = (uint64_t)vlan_get_tag(m) <<
2616 IXL_TX_DESC_L2TAG1_SHIFT;
2617 cmd_vlan |= IXL_TX_DESC_CMD_IL2TAG1;
2618 } else {
2619 cmd_vlan = 0;
2620 }
2621
2622 bus_dmamap_sync(sc->sc_dmat, map, 0,
2623 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2624
2625 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) {
2626 txd = &ring[prod];
2627
2628 cmd = (uint64_t)map->dm_segs[i].ds_len <<
2629 IXL_TX_DESC_BSIZE_SHIFT;
2630 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2631 cmd |= cmd_vlan;
2632
2633 txd->addr = htole64(map->dm_segs[i].ds_addr);
2634 txd->cmd = htole64(cmd);
2635
2636 last = prod;
2637
2638 prod++;
2639 prod &= mask;
2640 }
2641 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2642 txd->cmd = htole64(cmd);
2643
2644 txm->txm_m = m;
2645 txm->txm_eop = last;
2646
2647 bpf_mtap(ifp, m, BPF_D_OUT);
2648
2649 free -= i;
2650 post = 1;
2651 }
2652
2653 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2654 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2655
2656 if (post) {
2657 txr->txr_prod = prod;
2658 ixl_wr(sc, txr->txr_tail, prod);
2659 }
2660 }
2661
2662 static int
2663 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit)
2664 {
2665 struct ifnet *ifp = &sc->sc_ec.ec_if;
2666 struct ixl_tx_desc *ring, *txd;
2667 struct ixl_tx_map *txm;
2668 struct mbuf *m;
2669 bus_dmamap_t map;
2670 unsigned int cons, prod, last;
2671 unsigned int mask;
2672 uint64_t dtype;
2673 int done = 0, more = 0;
2674
2675 KASSERT(mutex_owned(&txr->txr_lock));
2676
2677 prod = txr->txr_prod;
2678 cons = txr->txr_cons;
2679
2680 if (cons == prod)
2681 return 0;
2682
2683 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2684 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2685
2686 ring = IXL_DMA_KVA(&txr->txr_mem);
2687 mask = sc->sc_tx_ring_ndescs - 1;
2688
2689 do {
2690 if (txlimit-- <= 0) {
2691 more = 1;
2692 break;
2693 }
2694
2695 txm = &txr->txr_maps[cons];
2696 last = txm->txm_eop;
2697 txd = &ring[last];
2698
2699 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2700 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2701 break;
2702
2703 map = txm->txm_map;
2704
2705 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2706 BUS_DMASYNC_POSTWRITE);
2707 bus_dmamap_unload(sc->sc_dmat, map);
2708
2709 m = txm->txm_m;
2710 if (m != NULL) {
2711 txr->txr_opackets++;
2712 txr->txr_obytes += m->m_pkthdr.len;
2713 if (ISSET(m->m_flags, M_MCAST))
2714 txr->txr_omcasts++;
2715 m_freem(m);
2716 }
2717
2718 txm->txm_m = NULL;
2719 txm->txm_eop = -1;
2720
2721 cons = last + 1;
2722 cons &= mask;
2723 done = 1;
2724 } while (cons != prod);
2725
2726 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2727 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2728
2729 txr->txr_cons = cons;
2730
2731 if (done) {
2732 softint_schedule(txr->txr_si);
2733 if (txr->txr_qid == 0) {
2734 CLR(ifp->if_flags, IFF_OACTIVE);
2735 if_schedule_deferred_start(ifp);
2736 }
2737 }
2738
2739 return more;
2740 }
2741
2742 static void
2743 ixl_start(struct ifnet *ifp)
2744 {
2745 struct ixl_softc *sc;
2746 struct ixl_tx_ring *txr;
2747
2748 sc = ifp->if_softc;
2749 txr = sc->sc_qps[0].qp_txr;
2750
2751 mutex_enter(&txr->txr_lock);
2752 ixl_tx_common_locked(ifp, txr, false);
2753 mutex_exit(&txr->txr_lock);
2754 }
2755
2756 static inline unsigned int
2757 ixl_select_txqueue(struct ixl_softc *sc, struct mbuf *m)
2758 {
2759 u_int cpuid;
2760
2761 cpuid = cpu_index(curcpu());
2762
2763 return (unsigned int)(cpuid % sc->sc_nqueue_pairs);
2764 }
2765
2766 static int
2767 ixl_transmit(struct ifnet *ifp, struct mbuf *m)
2768 {
2769 struct ixl_softc *sc;
2770 struct ixl_tx_ring *txr;
2771 unsigned int qid;
2772
2773 sc = ifp->if_softc;
2774 qid = ixl_select_txqueue(sc, m);
2775
2776 txr = sc->sc_qps[qid].qp_txr;
2777
2778 if (__predict_false(!pcq_put(txr->txr_intrq, m))) {
2779 mutex_enter(&txr->txr_lock);
2780 txr->txr_pcqdrop.ev_count++;
2781 mutex_exit(&txr->txr_lock);
2782
2783 m_freem(m);
2784 return ENOBUFS;
2785 }
2786
2787 if (mutex_tryenter(&txr->txr_lock)) {
2788 ixl_tx_common_locked(ifp, txr, true);
2789 mutex_exit(&txr->txr_lock);
2790 } else {
2791 softint_schedule(txr->txr_si);
2792 }
2793
2794 return 0;
2795 }
2796
2797 static void
2798 ixl_deferred_transmit(void *xtxr)
2799 {
2800 struct ixl_tx_ring *txr = xtxr;
2801 struct ixl_softc *sc = txr->txr_sc;
2802 struct ifnet *ifp = &sc->sc_ec.ec_if;
2803
2804 mutex_enter(&txr->txr_lock);
2805 txr->txr_transmitdef.ev_count++;
2806 if (pcq_peek(txr->txr_intrq) != NULL)
2807 ixl_tx_common_locked(ifp, txr, true);
2808 mutex_exit(&txr->txr_lock);
2809 }
2810
2811 static struct ixl_rx_ring *
2812 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
2813 {
2814 struct ixl_rx_ring *rxr = NULL;
2815 struct ixl_rx_map *maps = NULL, *rxm;
2816 unsigned int i;
2817
2818 rxr = kmem_zalloc(sizeof(*rxr), KM_SLEEP);
2819 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs,
2820 KM_SLEEP);
2821
2822 if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
2823 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
2824 IXL_RX_QUEUE_ALIGN) != 0)
2825 goto free;
2826
2827 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2828 rxm = &maps[i];
2829
2830 if (bus_dmamap_create(sc->sc_dmat,
2831 IXL_HARDMTU, 1, IXL_HARDMTU, 0,
2832 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &rxm->rxm_map) != 0)
2833 goto uncreate;
2834
2835 rxm->rxm_m = NULL;
2836 }
2837
2838 rxr->rxr_cons = rxr->rxr_prod = 0;
2839 rxr->rxr_m_head = NULL;
2840 rxr->rxr_m_tail = &rxr->rxr_m_head;
2841 rxr->rxr_maps = maps;
2842
2843 rxr->rxr_tail = I40E_QRX_TAIL(qid);
2844 rxr->rxr_qid = qid;
2845 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
2846
2847 return rxr;
2848
2849 uncreate:
2850 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2851 rxm = &maps[i];
2852
2853 if (rxm->rxm_map == NULL)
2854 continue;
2855
2856 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
2857 }
2858
2859 ixl_dmamem_free(sc, &rxr->rxr_mem);
2860 free:
2861 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2862 kmem_free(rxr, sizeof(*rxr));
2863
2864 return NULL;
2865 }
2866
2867 static void
2868 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2869 {
2870 struct ixl_rx_map *maps, *rxm;
2871 bus_dmamap_t map;
2872 unsigned int i;
2873
2874 maps = rxr->rxr_maps;
2875 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2876 rxm = &maps[i];
2877
2878 if (rxm->rxm_m == NULL)
2879 continue;
2880
2881 map = rxm->rxm_map;
2882 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2883 BUS_DMASYNC_POSTWRITE);
2884 bus_dmamap_unload(sc->sc_dmat, map);
2885
2886 m_freem(rxm->rxm_m);
2887 rxm->rxm_m = NULL;
2888 }
2889
2890 m_freem(rxr->rxr_m_head);
2891 rxr->rxr_m_head = NULL;
2892 rxr->rxr_m_tail = &rxr->rxr_m_head;
2893
2894 rxr->rxr_prod = rxr->rxr_cons = 0;
2895 }
2896
2897 static int
2898 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2899 {
2900 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
2901 uint32_t reg;
2902 int i;
2903
2904 for (i = 0; i < 10; i++) {
2905 reg = ixl_rd(sc, ena);
2906 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
2907 return 0;
2908
2909 delaymsec(10);
2910 }
2911
2912 return ETIMEDOUT;
2913 }
2914
2915 static int
2916 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2917 {
2918 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
2919 uint32_t reg;
2920 int i;
2921
2922 KASSERT(mutex_owned(&rxr->rxr_lock));
2923
2924 for (i = 0; i < 20; i++) {
2925 reg = ixl_rd(sc, ena);
2926 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
2927 return 0;
2928
2929 delaymsec(10);
2930 }
2931
2932 return ETIMEDOUT;
2933 }
2934
2935 static void
2936 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2937 {
2938 struct ixl_hmc_rxq rxq;
2939 void *hmc;
2940
2941 memset(&rxq, 0, sizeof(rxq));
2942
2943 rxq.head = htole16(rxr->rxr_cons);
2944 rxq.base = htole64(IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
2945 rxq.qlen = htole16(sc->sc_rx_ring_ndescs);
2946 rxq.dbuff = htole16(MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
2947 rxq.hbuff = 0;
2948 rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
2949 rxq.dsize = IXL_HMC_RXQ_DSIZE_32;
2950 rxq.crcstrip = 1;
2951 rxq.l2sel = 1;
2952 rxq.showiv = 1;
2953 rxq.rxmax = htole16(IXL_HARDMTU);
2954 rxq.tphrdesc_ena = 0;
2955 rxq.tphwdesc_ena = 0;
2956 rxq.tphdata_ena = 0;
2957 rxq.tphhead_ena = 0;
2958 rxq.lrxqthresh = 0;
2959 rxq.prefena = 1;
2960
2961 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
2962 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
2963 ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq,
2964 __arraycount(ixl_hmc_pack_rxq));
2965 }
2966
2967 static void
2968 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2969 {
2970 void *hmc;
2971
2972 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
2973 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
2974 }
2975
2976 static void
2977 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2978 {
2979 struct ixl_rx_map *maps, *rxm;
2980 unsigned int i;
2981
2982 maps = rxr->rxr_maps;
2983 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2984 rxm = &maps[i];
2985
2986 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
2987 }
2988
2989 ixl_dmamem_free(sc, &rxr->rxr_mem);
2990 mutex_destroy(&rxr->rxr_lock);
2991 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2992 kmem_free(rxr, sizeof(*rxr));
2993 }
2994
2995 static inline void
2996 ixl_rx_csum(struct mbuf *m, uint64_t qword)
2997 {
2998 int flags_mask;
2999
3000 if (!ISSET(qword, IXL_RX_DESC_L3L4P)) {
3001 /* No L3 or L4 checksum was calculated */
3002 return;
3003 }
3004
3005 switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) {
3006 case IXL_RX_DESC_PTYPE_IPV4FRAG:
3007 case IXL_RX_DESC_PTYPE_IPV4:
3008 case IXL_RX_DESC_PTYPE_SCTPV4:
3009 case IXL_RX_DESC_PTYPE_ICMPV4:
3010 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3011 break;
3012 case IXL_RX_DESC_PTYPE_TCPV4:
3013 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3014 flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD;
3015 break;
3016 case IXL_RX_DESC_PTYPE_UDPV4:
3017 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3018 flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD;
3019 break;
3020 case IXL_RX_DESC_PTYPE_TCPV6:
3021 flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD;
3022 break;
3023 case IXL_RX_DESC_PTYPE_UDPV6:
3024 flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD;
3025 break;
3026 default:
3027 flags_mask = 0;
3028 }
3029
3030 m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 |
3031 M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6));
3032
3033 if (ISSET(qword, IXL_RX_DESC_IPE)) {
3034 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD);
3035 }
3036
3037 if (ISSET(qword, IXL_RX_DESC_L4E)) {
3038 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD);
3039 }
3040 }
3041
3042 static int
3043 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr, u_int rxlimit)
3044 {
3045 struct ifnet *ifp = &sc->sc_ec.ec_if;
3046 struct ixl_rx_wb_desc_32 *ring, *rxd;
3047 struct ixl_rx_map *rxm;
3048 bus_dmamap_t map;
3049 unsigned int cons, prod;
3050 struct mbuf *m;
3051 uint64_t word, word0;
3052 unsigned int len;
3053 unsigned int mask;
3054 int done = 0, more = 0;
3055
3056 KASSERT(mutex_owned(&rxr->rxr_lock));
3057
3058 if (!ISSET(ifp->if_flags, IFF_RUNNING))
3059 return 0;
3060
3061 prod = rxr->rxr_prod;
3062 cons = rxr->rxr_cons;
3063
3064 if (cons == prod)
3065 return 0;
3066
3067 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3068 0, IXL_DMA_LEN(&rxr->rxr_mem),
3069 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3070
3071 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3072 mask = sc->sc_rx_ring_ndescs - 1;
3073
3074 do {
3075 if (rxlimit-- <= 0) {
3076 more = 1;
3077 break;
3078 }
3079
3080 rxd = &ring[cons];
3081
3082 word = le64toh(rxd->qword1);
3083
3084 if (!ISSET(word, IXL_RX_DESC_DD))
3085 break;
3086
3087 rxm = &rxr->rxr_maps[cons];
3088
3089 map = rxm->rxm_map;
3090 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3091 BUS_DMASYNC_POSTREAD);
3092 bus_dmamap_unload(sc->sc_dmat, map);
3093
3094 m = rxm->rxm_m;
3095 rxm->rxm_m = NULL;
3096
3097 KASSERT(m != NULL);
3098
3099 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
3100 m->m_len = len;
3101 m->m_pkthdr.len = 0;
3102
3103 m->m_next = NULL;
3104 *rxr->rxr_m_tail = m;
3105 rxr->rxr_m_tail = &m->m_next;
3106
3107 m = rxr->rxr_m_head;
3108 m->m_pkthdr.len += len;
3109
3110 if (ISSET(word, IXL_RX_DESC_EOP)) {
3111 word0 = le64toh(rxd->qword0);
3112
3113 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
3114 vlan_set_tag(m,
3115 __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK));
3116 }
3117
3118 if ((ifp->if_capenable & IXL_IFCAP_RXCSUM) != 0)
3119 ixl_rx_csum(m, word);
3120
3121 if (!ISSET(word,
3122 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
3123 m_set_rcvif(m, ifp);
3124 rxr->rxr_ipackets++;
3125 rxr->rxr_ibytes += m->m_pkthdr.len;
3126 if_percpuq_enqueue(ifp->if_percpuq, m);
3127 } else {
3128 rxr->rxr_ierrors++;
3129 m_freem(m);
3130 }
3131
3132 rxr->rxr_m_head = NULL;
3133 rxr->rxr_m_tail = &rxr->rxr_m_head;
3134 }
3135
3136 cons++;
3137 cons &= mask;
3138
3139 done = 1;
3140 } while (cons != prod);
3141
3142 if (done) {
3143 rxr->rxr_cons = cons;
3144 if (ixl_rxfill(sc, rxr) == -1)
3145 rxr->rxr_iqdrops++;
3146 }
3147
3148 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3149 0, IXL_DMA_LEN(&rxr->rxr_mem),
3150 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3151
3152 return more;
3153 }
3154
3155 static int
3156 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3157 {
3158 struct ixl_rx_rd_desc_32 *ring, *rxd;
3159 struct ixl_rx_map *rxm;
3160 bus_dmamap_t map;
3161 struct mbuf *m;
3162 unsigned int prod;
3163 unsigned int slots;
3164 unsigned int mask;
3165 int post = 0, error = 0;
3166
3167 KASSERT(mutex_owned(&rxr->rxr_lock));
3168
3169 prod = rxr->rxr_prod;
3170 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons,
3171 sc->sc_rx_ring_ndescs);
3172
3173 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3174 mask = sc->sc_rx_ring_ndescs - 1;
3175
3176 if (__predict_false(slots <= 0))
3177 return -1;
3178
3179 do {
3180 rxm = &rxr->rxr_maps[prod];
3181
3182 MGETHDR(m, M_DONTWAIT, MT_DATA);
3183 if (m == NULL) {
3184 rxr->rxr_mgethdr_failed.ev_count++;
3185 error = -1;
3186 break;
3187 }
3188
3189 MCLGET(m, M_DONTWAIT);
3190 if (!ISSET(m->m_flags, M_EXT)) {
3191 rxr->rxr_mgetcl_failed.ev_count++;
3192 error = -1;
3193 m_freem(m);
3194 break;
3195 }
3196
3197 m->m_len = m->m_pkthdr.len = MCLBYTES + ETHER_ALIGN;
3198 m_adj(m, ETHER_ALIGN);
3199
3200 map = rxm->rxm_map;
3201
3202 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
3203 BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
3204 rxr->rxr_mbuf_load_failed.ev_count++;
3205 error = -1;
3206 m_freem(m);
3207 break;
3208 }
3209
3210 rxm->rxm_m = m;
3211
3212 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3213 BUS_DMASYNC_PREREAD);
3214
3215 rxd = &ring[prod];
3216
3217 rxd->paddr = htole64(map->dm_segs[0].ds_addr);
3218 rxd->haddr = htole64(0);
3219
3220 prod++;
3221 prod &= mask;
3222
3223 post = 1;
3224
3225 } while (--slots);
3226
3227 if (post) {
3228 rxr->rxr_prod = prod;
3229 ixl_wr(sc, rxr->rxr_tail, prod);
3230 }
3231
3232 return error;
3233 }
3234
3235 static inline int
3236 ixl_handle_queue_common(struct ixl_softc *sc, struct ixl_queue_pair *qp,
3237 u_int txlimit, struct evcnt *txevcnt,
3238 u_int rxlimit, struct evcnt *rxevcnt)
3239 {
3240 struct ixl_tx_ring *txr = qp->qp_txr;
3241 struct ixl_rx_ring *rxr = qp->qp_rxr;
3242 int txmore, rxmore;
3243 int rv;
3244
3245 KASSERT(!mutex_owned(&txr->txr_lock));
3246 KASSERT(!mutex_owned(&rxr->rxr_lock));
3247
3248 mutex_enter(&txr->txr_lock);
3249 txevcnt->ev_count++;
3250 txmore = ixl_txeof(sc, txr, txlimit);
3251 mutex_exit(&txr->txr_lock);
3252
3253 mutex_enter(&rxr->rxr_lock);
3254 rxevcnt->ev_count++;
3255 rxmore = ixl_rxeof(sc, rxr, rxlimit);
3256 mutex_exit(&rxr->rxr_lock);
3257
3258 rv = txmore | (rxmore << 1);
3259
3260 return rv;
3261 }
3262
3263 static void
3264 ixl_sched_handle_queue(struct ixl_softc *sc, struct ixl_queue_pair *qp)
3265 {
3266
3267 if (qp->qp_workqueue)
3268 ixl_work_add(sc->sc_workq_txrx, &qp->qp_task);
3269 else
3270 softint_schedule(qp->qp_si);
3271 }
3272
3273 static int
3274 ixl_intr(void *xsc)
3275 {
3276 struct ixl_softc *sc = xsc;
3277 struct ixl_tx_ring *txr;
3278 struct ixl_rx_ring *rxr;
3279 uint32_t icr, rxintr, txintr;
3280 int rv = 0;
3281 unsigned int i;
3282
3283 KASSERT(sc != NULL);
3284
3285 ixl_enable_other_intr(sc);
3286 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3287
3288 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3289 atomic_inc_64(&sc->sc_event_atq.ev_count);
3290 ixl_atq_done(sc);
3291 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3292 rv = 1;
3293 }
3294
3295 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3296 atomic_inc_64(&sc->sc_event_link.ev_count);
3297 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3298 rv = 1;
3299 }
3300
3301 rxintr = icr & I40E_INTR_NOTX_RX_MASK;
3302 txintr = icr & I40E_INTR_NOTX_TX_MASK;
3303
3304 if (txintr || rxintr) {
3305 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3306 txr = sc->sc_qps[i].qp_txr;
3307 rxr = sc->sc_qps[i].qp_rxr;
3308
3309 ixl_handle_queue_common(sc, &sc->sc_qps[i],
3310 IXL_TXRX_PROCESS_UNLIMIT, &txr->txr_intr,
3311 IXL_TXRX_PROCESS_UNLIMIT, &rxr->rxr_intr);
3312 }
3313 rv = 1;
3314 }
3315
3316 return rv;
3317 }
3318
3319 static int
3320 ixl_queue_intr(void *xqp)
3321 {
3322 struct ixl_queue_pair *qp = xqp;
3323 struct ixl_tx_ring *txr = qp->qp_txr;
3324 struct ixl_rx_ring *rxr = qp->qp_rxr;
3325 struct ixl_softc *sc = qp->qp_sc;
3326 u_int txlimit, rxlimit;
3327 int more;
3328
3329 txlimit = sc->sc_tx_intr_process_limit;
3330 rxlimit = sc->sc_rx_intr_process_limit;
3331 qp->qp_workqueue = sc->sc_txrx_workqueue;
3332
3333 more = ixl_handle_queue_common(sc, qp,
3334 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr);
3335
3336 if (more != 0) {
3337 ixl_sched_handle_queue(sc, qp);
3338 } else {
3339 /* for ALTQ */
3340 if (txr->txr_qid == 0)
3341 if_schedule_deferred_start(&sc->sc_ec.ec_if);
3342 softint_schedule(txr->txr_si);
3343
3344 ixl_enable_queue_intr(sc, qp);
3345 }
3346
3347 return 1;
3348 }
3349
3350 static void
3351 ixl_handle_queue(void *xqp)
3352 {
3353 struct ixl_queue_pair *qp = xqp;
3354 struct ixl_softc *sc = qp->qp_sc;
3355 struct ixl_tx_ring *txr = qp->qp_txr;
3356 struct ixl_rx_ring *rxr = qp->qp_rxr;
3357 u_int txlimit, rxlimit;
3358 int more;
3359
3360 txlimit = sc->sc_tx_process_limit;
3361 rxlimit = sc->sc_rx_process_limit;
3362
3363 more = ixl_handle_queue_common(sc, qp,
3364 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer);
3365
3366 if (more != 0)
3367 ixl_sched_handle_queue(sc, qp);
3368 else
3369 ixl_enable_queue_intr(sc, qp);
3370 }
3371
3372 static inline void
3373 ixl_print_hmc_error(struct ixl_softc *sc, uint32_t reg)
3374 {
3375 uint32_t hmc_idx, hmc_isvf;
3376 uint32_t hmc_errtype, hmc_objtype, hmc_data;
3377
3378 hmc_idx = reg & I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK;
3379 hmc_idx = hmc_idx >> I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT;
3380 hmc_isvf = reg & I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK;
3381 hmc_isvf = hmc_isvf >> I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT;
3382 hmc_errtype = reg & I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK;
3383 hmc_errtype = hmc_errtype >> I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT;
3384 hmc_objtype = reg & I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK;
3385 hmc_objtype = hmc_objtype >> I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT;
3386 hmc_data = ixl_rd(sc, I40E_PFHMC_ERRORDATA);
3387
3388 device_printf(sc->sc_dev,
3389 "HMC Error (idx=0x%x, isvf=0x%x, err=0x%x, obj=0x%x, data=0x%x)\n",
3390 hmc_idx, hmc_isvf, hmc_errtype, hmc_objtype, hmc_data);
3391 }
3392
3393 static int
3394 ixl_other_intr(void *xsc)
3395 {
3396 struct ixl_softc *sc = xsc;
3397 uint32_t icr, mask, reg;
3398 int rv;
3399
3400 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3401 mask = ixl_rd(sc, I40E_PFINT_ICR0_ENA);
3402
3403 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3404 atomic_inc_64(&sc->sc_event_atq.ev_count);
3405 ixl_atq_done(sc);
3406 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3407 rv = 1;
3408 }
3409
3410 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3411 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3412 device_printf(sc->sc_dev, "link stat changed\n");
3413
3414 atomic_inc_64(&sc->sc_event_link.ev_count);
3415 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3416 rv = 1;
3417 }
3418
3419 if (ISSET(icr, I40E_PFINT_ICR0_GRST_MASK)) {
3420 CLR(mask, I40E_PFINT_ICR0_ENA_GRST_MASK);
3421 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
3422 reg = reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK;
3423 reg = reg >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3424
3425 device_printf(sc->sc_dev, "GRST: %s\n",
3426 reg == I40E_RESET_CORER ? "CORER" :
3427 reg == I40E_RESET_GLOBR ? "GLOBR" :
3428 reg == I40E_RESET_EMPR ? "EMPR" :
3429 "POR");
3430 }
3431
3432 if (ISSET(icr, I40E_PFINT_ICR0_ECC_ERR_MASK))
3433 atomic_inc_64(&sc->sc_event_ecc_err.ev_count);
3434 if (ISSET(icr, I40E_PFINT_ICR0_PCI_EXCEPTION_MASK))
3435 atomic_inc_64(&sc->sc_event_pci_exception.ev_count);
3436 if (ISSET(icr, I40E_PFINT_ICR0_PE_CRITERR_MASK))
3437 atomic_inc_64(&sc->sc_event_crit_err.ev_count);
3438
3439 if (ISSET(icr, IXL_ICR0_CRIT_ERR_MASK)) {
3440 CLR(mask, IXL_ICR0_CRIT_ERR_MASK);
3441 device_printf(sc->sc_dev, "critical error\n");
3442 }
3443
3444 if (ISSET(icr, I40E_PFINT_ICR0_HMC_ERR_MASK)) {
3445 reg = ixl_rd(sc, I40E_PFHMC_ERRORINFO);
3446 if (ISSET(reg, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK))
3447 ixl_print_hmc_error(sc, reg);
3448 ixl_wr(sc, I40E_PFHMC_ERRORINFO, 0);
3449 }
3450
3451 ixl_wr(sc, I40E_PFINT_ICR0_ENA, mask);
3452 ixl_flush(sc);
3453 ixl_enable_other_intr(sc);
3454 return rv;
3455 }
3456
3457 static void
3458 ixl_get_link_status_done(struct ixl_softc *sc,
3459 const struct ixl_aq_desc *iaq)
3460 {
3461
3462 ixl_link_state_update(sc, iaq);
3463 }
3464
3465 static void
3466 ixl_get_link_status(void *xsc)
3467 {
3468 struct ixl_softc *sc = xsc;
3469 struct ixl_aq_desc *iaq;
3470 struct ixl_aq_link_param *param;
3471
3472 memset(&sc->sc_link_state_atq, 0, sizeof(sc->sc_link_state_atq));
3473 iaq = &sc->sc_link_state_atq.iatq_desc;
3474 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3475 param = (struct ixl_aq_link_param *)iaq->iaq_param;
3476 param->notify = IXL_AQ_LINK_NOTIFY;
3477
3478 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done);
3479 (void)ixl_atq_post(sc, &sc->sc_link_state_atq);
3480 }
3481
3482 static void
3483 ixl_link_state_update(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3484 {
3485 struct ifnet *ifp = &sc->sc_ec.ec_if;
3486 int link_state;
3487
3488 KASSERT(kpreempt_disabled());
3489
3490 link_state = ixl_set_link_status(sc, iaq);
3491
3492 if (ifp->if_link_state != link_state)
3493 if_link_state_change(ifp, link_state);
3494
3495 if (link_state != LINK_STATE_DOWN) {
3496 if_schedule_deferred_start(ifp);
3497 }
3498 }
3499
3500 static void
3501 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq,
3502 const char *msg)
3503 {
3504 char buf[512];
3505 size_t len;
3506
3507 len = sizeof(buf);
3508 buf[--len] = '\0';
3509
3510 device_printf(sc->sc_dev, "%s\n", msg);
3511 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags));
3512 device_printf(sc->sc_dev, "flags %s opcode %04x\n",
3513 buf, le16toh(iaq->iaq_opcode));
3514 device_printf(sc->sc_dev, "datalen %u retval %u\n",
3515 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval));
3516 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie);
3517 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n",
3518 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]),
3519 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3]));
3520 }
3521
3522 static void
3523 ixl_arq(void *xsc)
3524 {
3525 struct ixl_softc *sc = xsc;
3526 struct ixl_aq_desc *arq, *iaq;
3527 struct ixl_aq_buf *aqb;
3528 unsigned int cons = sc->sc_arq_cons;
3529 unsigned int prod;
3530 int done = 0;
3531
3532 prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
3533 sc->sc_aq_regs->arq_head_mask;
3534
3535 if (cons == prod)
3536 goto done;
3537
3538 arq = IXL_DMA_KVA(&sc->sc_arq);
3539
3540 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3541 0, IXL_DMA_LEN(&sc->sc_arq),
3542 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3543
3544 do {
3545 iaq = &arq[cons];
3546 aqb = sc->sc_arq_live[cons];
3547
3548 KASSERT(aqb != NULL);
3549
3550 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3551 BUS_DMASYNC_POSTREAD);
3552
3553 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3554 ixl_aq_dump(sc, iaq, "arq event");
3555
3556 switch (iaq->iaq_opcode) {
3557 case htole16(IXL_AQ_OP_PHY_LINK_STATUS):
3558 kpreempt_disable();
3559 ixl_link_state_update(sc, iaq);
3560 kpreempt_enable();
3561 break;
3562 }
3563
3564 memset(iaq, 0, sizeof(*iaq));
3565 sc->sc_arq_live[cons] = NULL;
3566 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
3567
3568 cons++;
3569 cons &= IXL_AQ_MASK;
3570
3571 done = 1;
3572 } while (cons != prod);
3573
3574 if (done) {
3575 sc->sc_arq_cons = cons;
3576 ixl_arq_fill(sc);
3577 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3578 0, IXL_DMA_LEN(&sc->sc_arq),
3579 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3580 }
3581
3582 done:
3583 ixl_enable_other_intr(sc);
3584 }
3585
3586 static void
3587 ixl_atq_set(struct ixl_atq *iatq,
3588 void (*fn)(struct ixl_softc *, const struct ixl_aq_desc *))
3589 {
3590
3591 iatq->iatq_fn = fn;
3592 }
3593
3594 static int
3595 ixl_atq_post_locked(struct ixl_softc *sc, struct ixl_atq *iatq)
3596 {
3597 struct ixl_aq_desc *atq, *slot;
3598 unsigned int prod, cons, prod_next;
3599
3600 /* assert locked */
3601 KASSERT(mutex_owned(&sc->sc_atq_lock));
3602
3603 atq = IXL_DMA_KVA(&sc->sc_atq);
3604 prod = sc->sc_atq_prod;
3605 cons = sc->sc_atq_cons;
3606 prod_next = (prod +1) & IXL_AQ_MASK;
3607
3608 if (cons == prod_next)
3609 return ENOMEM;
3610
3611 slot = &atq[prod];
3612
3613 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3614 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3615
3616 *slot = iatq->iatq_desc;
3617 slot->iaq_cookie = (uint64_t)((intptr_t)iatq);
3618
3619 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3620 ixl_aq_dump(sc, slot, "atq command");
3621
3622 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3623 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3624
3625 sc->sc_atq_prod = prod_next;
3626 ixl_wr(sc, sc->sc_aq_regs->atq_tail, sc->sc_atq_prod);
3627
3628 return 0;
3629 }
3630
3631 static int
3632 ixl_atq_post(struct ixl_softc *sc, struct ixl_atq *iatq)
3633 {
3634 int rv;
3635
3636 mutex_enter(&sc->sc_atq_lock);
3637 rv = ixl_atq_post_locked(sc, iatq);
3638 mutex_exit(&sc->sc_atq_lock);
3639
3640 return rv;
3641 }
3642
3643 static void
3644 ixl_atq_done_locked(struct ixl_softc *sc)
3645 {
3646 struct ixl_aq_desc *atq, *slot;
3647 struct ixl_atq *iatq;
3648 unsigned int cons;
3649 unsigned int prod;
3650
3651 KASSERT(mutex_owned(&sc->sc_atq_lock));
3652
3653 prod = sc->sc_atq_prod;
3654 cons = sc->sc_atq_cons;
3655
3656 if (prod == cons)
3657 return;
3658
3659 atq = IXL_DMA_KVA(&sc->sc_atq);
3660
3661 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3662 0, IXL_DMA_LEN(&sc->sc_atq),
3663 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3664
3665 do {
3666 slot = &atq[cons];
3667 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
3668 break;
3669
3670 iatq = (struct ixl_atq *)((intptr_t)slot->iaq_cookie);
3671 iatq->iatq_desc = *slot;
3672
3673 memset(slot, 0, sizeof(*slot));
3674
3675 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3676 ixl_aq_dump(sc, &iatq->iatq_desc, "atq response");
3677
3678 (*iatq->iatq_fn)(sc, &iatq->iatq_desc);
3679
3680 cons++;
3681 cons &= IXL_AQ_MASK;
3682 } while (cons != prod);
3683
3684 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3685 0, IXL_DMA_LEN(&sc->sc_atq),
3686 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3687
3688 sc->sc_atq_cons = cons;
3689 }
3690
3691 static void
3692 ixl_atq_done(struct ixl_softc *sc)
3693 {
3694
3695 mutex_enter(&sc->sc_atq_lock);
3696 ixl_atq_done_locked(sc);
3697 mutex_exit(&sc->sc_atq_lock);
3698 }
3699
3700 static void
3701 ixl_wakeup(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3702 {
3703
3704 KASSERT(mutex_owned(&sc->sc_atq_lock));
3705
3706 cv_signal(&sc->sc_atq_cv);
3707 }
3708
3709 static int
3710 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq)
3711 {
3712 int error;
3713
3714 KASSERT(iatq->iatq_desc.iaq_cookie == 0);
3715
3716 ixl_atq_set(iatq, ixl_wakeup);
3717
3718 mutex_enter(&sc->sc_atq_lock);
3719 error = ixl_atq_post_locked(sc, iatq);
3720 if (error) {
3721 mutex_exit(&sc->sc_atq_lock);
3722 return error;
3723 }
3724
3725 error = cv_timedwait(&sc->sc_atq_cv, &sc->sc_atq_lock,
3726 IXL_ATQ_EXEC_TIMEOUT);
3727 mutex_exit(&sc->sc_atq_lock);
3728
3729 return error;
3730 }
3731
3732 static int
3733 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
3734 {
3735 struct ixl_aq_desc *atq, *slot;
3736 unsigned int prod;
3737 unsigned int t = 0;
3738
3739 mutex_enter(&sc->sc_atq_lock);
3740
3741 atq = IXL_DMA_KVA(&sc->sc_atq);
3742 prod = sc->sc_atq_prod;
3743 slot = atq + prod;
3744
3745 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3746 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3747
3748 *slot = *iaq;
3749 slot->iaq_flags |= htole16(IXL_AQ_SI);
3750
3751 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3752 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3753
3754 prod++;
3755 prod &= IXL_AQ_MASK;
3756 sc->sc_atq_prod = prod;
3757 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3758
3759 while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
3760 delaymsec(1);
3761
3762 if (t++ > tm) {
3763 mutex_exit(&sc->sc_atq_lock);
3764 return ETIMEDOUT;
3765 }
3766 }
3767
3768 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3769 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
3770 *iaq = *slot;
3771 memset(slot, 0, sizeof(*slot));
3772 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3773 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
3774
3775 sc->sc_atq_cons = prod;
3776
3777 mutex_exit(&sc->sc_atq_lock);
3778
3779 return 0;
3780 }
3781
3782 static int
3783 ixl_get_version(struct ixl_softc *sc)
3784 {
3785 struct ixl_aq_desc iaq;
3786 uint32_t fwbuild, fwver, apiver;
3787 uint16_t api_maj_ver, api_min_ver;
3788
3789 memset(&iaq, 0, sizeof(iaq));
3790 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
3791
3792 iaq.iaq_retval = le16toh(23);
3793
3794 if (ixl_atq_poll(sc, &iaq, 2000) != 0)
3795 return ETIMEDOUT;
3796 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
3797 return EIO;
3798
3799 fwbuild = le32toh(iaq.iaq_param[1]);
3800 fwver = le32toh(iaq.iaq_param[2]);
3801 apiver = le32toh(iaq.iaq_param[3]);
3802
3803 api_maj_ver = (uint16_t)apiver;
3804 api_min_ver = (uint16_t)(apiver >> 16);
3805
3806 aprint_normal(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
3807 (uint16_t)(fwver >> 16), fwbuild, api_maj_ver, api_min_ver);
3808
3809 if (sc->sc_mac_type == I40E_MAC_X722) {
3810 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK |
3811 IXL_SC_AQ_FLAG_NVMREAD);
3812 }
3813
3814 #define IXL_API_VER(maj, min) (((uint32_t)(maj) << 16) | (min))
3815 if (IXL_API_VER(api_maj_ver, api_min_ver) >= IXL_API_VER(1, 5)) {
3816 if (sc->sc_mac_type == I40E_MAC_X722) {
3817 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL);
3818 }
3819 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK);
3820 }
3821 #undef IXL_API_VER
3822
3823 return 0;
3824 }
3825
3826 static int
3827 ixl_get_nvm_version(struct ixl_softc *sc)
3828 {
3829 uint16_t nvmver, cfg_ptr, eetrack_hi, eetrack_lo, oem_hi, oem_lo;
3830 uint32_t eetrack, oem;
3831 uint16_t nvm_maj_ver, nvm_min_ver, oem_build;
3832 uint8_t oem_ver, oem_patch;
3833
3834 nvmver = cfg_ptr = eetrack_hi = eetrack_lo = oem_hi = oem_lo = 0;
3835 ixl_rd16_nvm(sc, I40E_SR_NVM_DEV_STARTER_VERSION, &nvmver);
3836 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
3837 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
3838 ixl_rd16_nvm(sc, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
3839 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF, &oem_hi);
3840 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF + 1, &oem_lo);
3841
3842 nvm_maj_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_HI_MASK);
3843 nvm_min_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_LO_MASK);
3844 eetrack = ((uint32_t)eetrack_hi << 16) | eetrack_lo;
3845 oem = ((uint32_t)oem_hi << 16) | oem_lo;
3846 oem_ver = __SHIFTOUT(oem, IXL_NVM_OEMVERSION_MASK);
3847 oem_build = __SHIFTOUT(oem, IXL_NVM_OEMBUILD_MASK);
3848 oem_patch = __SHIFTOUT(oem, IXL_NVM_OEMPATCH_MASK);
3849
3850 aprint_normal(" nvm %x.%02x etid %08x oem %d.%d.%d",
3851 nvm_maj_ver, nvm_min_ver, eetrack,
3852 oem_ver, oem_build, oem_patch);
3853
3854 return 0;
3855 }
3856
3857 static int
3858 ixl_pxe_clear(struct ixl_softc *sc)
3859 {
3860 struct ixl_aq_desc iaq;
3861 int rv;
3862
3863 memset(&iaq, 0, sizeof(iaq));
3864 iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
3865 iaq.iaq_param[0] = htole32(0x2);
3866
3867 rv = ixl_atq_poll(sc, &iaq, 250);
3868
3869 ixl_wr(sc, I40E_GLLAN_RCTL_0, 0x1);
3870
3871 if (rv != 0)
3872 return ETIMEDOUT;
3873
3874 switch (iaq.iaq_retval) {
3875 case htole16(IXL_AQ_RC_OK):
3876 case htole16(IXL_AQ_RC_EEXIST):
3877 break;
3878 default:
3879 return EIO;
3880 }
3881
3882 return 0;
3883 }
3884
3885 static int
3886 ixl_lldp_shut(struct ixl_softc *sc)
3887 {
3888 struct ixl_aq_desc iaq;
3889
3890 memset(&iaq, 0, sizeof(iaq));
3891 iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
3892 iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
3893
3894 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3895 aprint_error_dev(sc->sc_dev, "STOP LLDP AGENT timeout\n");
3896 return -1;
3897 }
3898
3899 switch (iaq.iaq_retval) {
3900 case htole16(IXL_AQ_RC_EMODE):
3901 case htole16(IXL_AQ_RC_EPERM):
3902 /* ignore silently */
3903 default:
3904 break;
3905 }
3906
3907 return 0;
3908 }
3909
3910 static void
3911 ixl_parse_hw_capability(struct ixl_softc *sc, struct ixl_aq_capability *cap)
3912 {
3913 uint16_t id;
3914 uint32_t number, logical_id;
3915
3916 id = le16toh(cap->cap_id);
3917 number = le32toh(cap->number);
3918 logical_id = le32toh(cap->logical_id);
3919
3920 switch (id) {
3921 case IXL_AQ_CAP_RSS:
3922 sc->sc_rss_table_size = number;
3923 sc->sc_rss_table_entry_width = logical_id;
3924 break;
3925 case IXL_AQ_CAP_RXQ:
3926 case IXL_AQ_CAP_TXQ:
3927 sc->sc_nqueue_pairs_device = MIN(number,
3928 sc->sc_nqueue_pairs_device);
3929 break;
3930 }
3931 }
3932
3933 static int
3934 ixl_get_hw_capabilities(struct ixl_softc *sc)
3935 {
3936 struct ixl_dmamem idm;
3937 struct ixl_aq_desc iaq;
3938 struct ixl_aq_capability *caps;
3939 size_t i, ncaps;
3940 bus_size_t caps_size;
3941 uint16_t status;
3942 int rv;
3943
3944 caps_size = sizeof(caps[0]) * 40;
3945 memset(&iaq, 0, sizeof(iaq));
3946 iaq.iaq_opcode = htole16(IXL_AQ_OP_LIST_FUNC_CAP);
3947
3948 do {
3949 if (ixl_dmamem_alloc(sc, &idm, caps_size, 0) != 0) {
3950 return -1;
3951 }
3952
3953 iaq.iaq_flags = htole16(IXL_AQ_BUF |
3954 (caps_size > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3955 iaq.iaq_datalen = htole16(caps_size);
3956 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3957
3958 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
3959 IXL_DMA_LEN(&idm), BUS_DMASYNC_PREREAD);
3960
3961 rv = ixl_atq_poll(sc, &iaq, 250);
3962
3963 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
3964 IXL_DMA_LEN(&idm), BUS_DMASYNC_POSTREAD);
3965
3966 if (rv != 0) {
3967 aprint_error(", HW capabilities timeout\n");
3968 goto done;
3969 }
3970
3971 status = le16toh(iaq.iaq_retval);
3972
3973 if (status == IXL_AQ_RC_ENOMEM) {
3974 caps_size = le16toh(iaq.iaq_datalen);
3975 ixl_dmamem_free(sc, &idm);
3976 }
3977 } while (status == IXL_AQ_RC_ENOMEM);
3978
3979 if (status != IXL_AQ_RC_OK) {
3980 aprint_error(", HW capabilities error\n");
3981 goto done;
3982 }
3983
3984 caps = IXL_DMA_KVA(&idm);
3985 ncaps = le16toh(iaq.iaq_param[1]);
3986
3987 for (i = 0; i < ncaps; i++) {
3988 ixl_parse_hw_capability(sc, &caps[i]);
3989 }
3990
3991 done:
3992 ixl_dmamem_free(sc, &idm);
3993 return rv;
3994 }
3995
3996 static int
3997 ixl_get_mac(struct ixl_softc *sc)
3998 {
3999 struct ixl_dmamem idm;
4000 struct ixl_aq_desc iaq;
4001 struct ixl_aq_mac_addresses *addrs;
4002 int rv;
4003
4004 if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
4005 aprint_error(", unable to allocate mac addresses\n");
4006 return -1;
4007 }
4008
4009 memset(&iaq, 0, sizeof(iaq));
4010 iaq.iaq_flags = htole16(IXL_AQ_BUF);
4011 iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
4012 iaq.iaq_datalen = htole16(sizeof(*addrs));
4013 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4014
4015 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4016 BUS_DMASYNC_PREREAD);
4017
4018 rv = ixl_atq_poll(sc, &iaq, 250);
4019
4020 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4021 BUS_DMASYNC_POSTREAD);
4022
4023 if (rv != 0) {
4024 aprint_error(", MAC ADDRESS READ timeout\n");
4025 rv = -1;
4026 goto done;
4027 }
4028 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4029 aprint_error(", MAC ADDRESS READ error\n");
4030 rv = -1;
4031 goto done;
4032 }
4033
4034 addrs = IXL_DMA_KVA(&idm);
4035 if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
4036 printf(", port address is not valid\n");
4037 goto done;
4038 }
4039
4040 memcpy(sc->sc_enaddr, addrs->port, ETHER_ADDR_LEN);
4041 rv = 0;
4042
4043 done:
4044 ixl_dmamem_free(sc, &idm);
4045 return rv;
4046 }
4047
4048 static int
4049 ixl_get_switch_config(struct ixl_softc *sc)
4050 {
4051 struct ixl_dmamem idm;
4052 struct ixl_aq_desc iaq;
4053 struct ixl_aq_switch_config *hdr;
4054 struct ixl_aq_switch_config_element *elms, *elm;
4055 unsigned int nelm, i;
4056 int rv;
4057
4058 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4059 aprint_error_dev(sc->sc_dev,
4060 "unable to allocate switch config buffer\n");
4061 return -1;
4062 }
4063
4064 memset(&iaq, 0, sizeof(iaq));
4065 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4066 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4067 iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
4068 iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
4069 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4070
4071 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4072 BUS_DMASYNC_PREREAD);
4073
4074 rv = ixl_atq_poll(sc, &iaq, 250);
4075
4076 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4077 BUS_DMASYNC_POSTREAD);
4078
4079 if (rv != 0) {
4080 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG timeout\n");
4081 rv = -1;
4082 goto done;
4083 }
4084 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4085 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG error\n");
4086 rv = -1;
4087 goto done;
4088 }
4089
4090 hdr = IXL_DMA_KVA(&idm);
4091 elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
4092
4093 nelm = le16toh(hdr->num_reported);
4094 if (nelm < 1) {
4095 aprint_error_dev(sc->sc_dev, "no switch config available\n");
4096 rv = -1;
4097 goto done;
4098 }
4099
4100 for (i = 0; i < nelm; i++) {
4101 elm = &elms[i];
4102
4103 aprint_debug_dev(sc->sc_dev,
4104 "type %x revision %u seid %04x\n",
4105 elm->type, elm->revision, le16toh(elm->seid));
4106 aprint_debug_dev(sc->sc_dev,
4107 "uplink %04x downlink %04x\n",
4108 le16toh(elm->uplink_seid),
4109 le16toh(elm->downlink_seid));
4110 aprint_debug_dev(sc->sc_dev,
4111 "conntype %x scheduler %04x extra %04x\n",
4112 elm->connection_type,
4113 le16toh(elm->scheduler_id),
4114 le16toh(elm->element_info));
4115 }
4116
4117 elm = &elms[0];
4118
4119 sc->sc_uplink_seid = elm->uplink_seid;
4120 sc->sc_downlink_seid = elm->downlink_seid;
4121 sc->sc_seid = elm->seid;
4122
4123 if ((sc->sc_uplink_seid == htole16(0)) !=
4124 (sc->sc_downlink_seid == htole16(0))) {
4125 aprint_error_dev(sc->sc_dev, "SEIDs are misconfigured\n");
4126 rv = -1;
4127 goto done;
4128 }
4129
4130 done:
4131 ixl_dmamem_free(sc, &idm);
4132 return rv;
4133 }
4134
4135 static int
4136 ixl_phy_mask_ints(struct ixl_softc *sc)
4137 {
4138 struct ixl_aq_desc iaq;
4139
4140 memset(&iaq, 0, sizeof(iaq));
4141 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
4142 iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
4143 ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
4144 IXL_AQ_PHY_EV_MEDIA_NA));
4145
4146 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4147 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK timeout\n");
4148 return -1;
4149 }
4150 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4151 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK error\n");
4152 return -1;
4153 }
4154
4155 return 0;
4156 }
4157
4158 static int
4159 ixl_get_phy_abilities(struct ixl_softc *sc,struct ixl_dmamem *idm)
4160 {
4161 struct ixl_aq_desc iaq;
4162 int rv;
4163
4164 memset(&iaq, 0, sizeof(iaq));
4165 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4166 (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4167 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
4168 iaq.iaq_datalen = htole16(IXL_DMA_LEN(idm));
4169 iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
4170 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
4171
4172 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4173 BUS_DMASYNC_PREREAD);
4174
4175 rv = ixl_atq_poll(sc, &iaq, 250);
4176
4177 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4178 BUS_DMASYNC_POSTREAD);
4179
4180 if (rv != 0)
4181 return -1;
4182
4183 return le16toh(iaq.iaq_retval);
4184 }
4185
4186 static int
4187 ixl_get_phy_types(struct ixl_softc *sc, uint64_t *phy_types_ptr)
4188 {
4189 struct ixl_dmamem idm;
4190 struct ixl_aq_phy_abilities *phy;
4191 uint64_t phy_types;
4192 int rv;
4193
4194 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4195 aprint_error_dev(sc->sc_dev,
4196 "unable to allocate switch config buffer\n");
4197 return -1;
4198 }
4199
4200 rv = ixl_get_phy_abilities(sc, &idm);
4201 switch (rv) {
4202 case -1:
4203 aprint_error_dev(sc->sc_dev, "GET PHY ABILITIES timeout\n");
4204 goto done;
4205 case IXL_AQ_RC_OK:
4206 break;
4207 case IXL_AQ_RC_EIO:
4208 aprint_error_dev(sc->sc_dev,"unable to query phy types\n");
4209 break;
4210 default:
4211 aprint_error_dev(sc->sc_dev,
4212 "GET PHY ABILITIIES error %u\n", rv);
4213 goto done;
4214 }
4215
4216 phy = IXL_DMA_KVA(&idm);
4217
4218 phy_types = le32toh(phy->phy_type);
4219 phy_types |= (uint64_t)le32toh(phy->phy_type_ext) << 32;
4220
4221 *phy_types_ptr = phy_types;
4222
4223 rv = 0;
4224
4225 done:
4226 ixl_dmamem_free(sc, &idm);
4227 return rv;
4228 }
4229
4230 static int
4231 ixl_get_link_status_poll(struct ixl_softc *sc)
4232 {
4233 struct ixl_aq_desc iaq;
4234 struct ixl_aq_link_param *param;
4235 int link;
4236
4237 memset(&iaq, 0, sizeof(iaq));
4238 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
4239 param = (struct ixl_aq_link_param *)iaq.iaq_param;
4240 param->notify = IXL_AQ_LINK_NOTIFY;
4241
4242 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4243 return ETIMEDOUT;
4244 }
4245 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4246 return EIO;
4247 }
4248
4249 link = ixl_set_link_status(sc, &iaq);
4250 sc->sc_ec.ec_if.if_link_state = link;
4251
4252 return 0;
4253 }
4254
4255 static int
4256 ixl_get_vsi(struct ixl_softc *sc)
4257 {
4258 struct ixl_dmamem *vsi = &sc->sc_scratch;
4259 struct ixl_aq_desc iaq;
4260 struct ixl_aq_vsi_param *param;
4261 struct ixl_aq_vsi_reply *reply;
4262 struct ixl_aq_vsi_data *data;
4263 int rv;
4264
4265 /* grumble, vsi info isn't "known" at compile time */
4266
4267 memset(&iaq, 0, sizeof(iaq));
4268 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4269 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4270 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
4271 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4272 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4273
4274 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4275 param->uplink_seid = sc->sc_seid;
4276
4277 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4278 BUS_DMASYNC_PREREAD);
4279
4280 rv = ixl_atq_poll(sc, &iaq, 250);
4281
4282 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4283 BUS_DMASYNC_POSTREAD);
4284
4285 if (rv != 0) {
4286 return ETIMEDOUT;
4287 }
4288
4289 switch (le16toh(iaq.iaq_retval)) {
4290 case IXL_AQ_RC_OK:
4291 break;
4292 case IXL_AQ_RC_ENOENT:
4293 return ENOENT;
4294 case IXL_AQ_RC_EACCES:
4295 return EACCES;
4296 default:
4297 return EIO;
4298 }
4299
4300 reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
4301 sc->sc_vsi_number = reply->vsi_number;
4302 data = IXL_DMA_KVA(vsi);
4303 sc->sc_vsi_stat_counter_idx = le16toh(data->stat_counter_idx);
4304
4305 return 0;
4306 }
4307
4308 static int
4309 ixl_set_vsi(struct ixl_softc *sc)
4310 {
4311 struct ixl_dmamem *vsi = &sc->sc_scratch;
4312 struct ixl_aq_desc iaq;
4313 struct ixl_aq_vsi_param *param;
4314 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
4315 unsigned int qnum;
4316 uint16_t val;
4317 int rv;
4318
4319 qnum = sc->sc_nqueue_pairs - 1;
4320
4321 data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
4322 IXL_AQ_VSI_VALID_VLAN);
4323
4324 CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
4325 SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
4326 data->queue_mapping[0] = htole16(0);
4327 data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
4328 (qnum << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
4329
4330 val = le16toh(data->port_vlan_flags);
4331 CLR(val, IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK);
4332 SET(val, IXL_AQ_VSI_PVLAN_MODE_ALL);
4333
4334 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWTAGGING)) {
4335 SET(val, IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH);
4336 } else {
4337 SET(val, IXL_AQ_VSI_PVLAN_EMOD_NOTHING);
4338 }
4339
4340 data->port_vlan_flags = htole16(val);
4341
4342 /* grumble, vsi info isn't "known" at compile time */
4343
4344 memset(&iaq, 0, sizeof(iaq));
4345 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4346 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4347 iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
4348 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4349 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4350
4351 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4352 param->uplink_seid = sc->sc_seid;
4353
4354 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4355 BUS_DMASYNC_PREWRITE);
4356
4357 rv = ixl_atq_poll(sc, &iaq, 250);
4358
4359 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4360 BUS_DMASYNC_POSTWRITE);
4361
4362 if (rv != 0) {
4363 return ETIMEDOUT;
4364 }
4365
4366 switch (le16toh(iaq.iaq_retval)) {
4367 case IXL_AQ_RC_OK:
4368 break;
4369 case IXL_AQ_RC_ENOENT:
4370 return ENOENT;
4371 case IXL_AQ_RC_EACCES:
4372 return EACCES;
4373 default:
4374 return EIO;
4375 }
4376
4377 return 0;
4378 }
4379
4380 static void
4381 ixl_set_filter_control(struct ixl_softc *sc)
4382 {
4383 uint32_t reg;
4384
4385 reg = ixl_rd_rx_csr(sc, I40E_PFQF_CTL_0);
4386
4387 CLR(reg, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK);
4388 SET(reg, I40E_HASH_LUT_SIZE_128 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT);
4389
4390 SET(reg, I40E_PFQF_CTL_0_FD_ENA_MASK);
4391 SET(reg, I40E_PFQF_CTL_0_ETYPE_ENA_MASK);
4392 SET(reg, I40E_PFQF_CTL_0_MACVLAN_ENA_MASK);
4393
4394 ixl_wr_rx_csr(sc, I40E_PFQF_CTL_0, reg);
4395 }
4396
4397 static inline void
4398 ixl_get_default_rss_key(uint32_t *buf, size_t len)
4399 {
4400 size_t cplen;
4401 uint8_t rss_seed[RSS_KEYSIZE];
4402
4403 rss_getkey(rss_seed);
4404 memset(buf, 0, len);
4405
4406 cplen = MIN(len, sizeof(rss_seed));
4407 memcpy(buf, rss_seed, cplen);
4408 }
4409
4410 static void
4411 ixl_set_rss_key(struct ixl_softc *sc)
4412 {
4413 uint32_t rss_seed[IXL_RSS_KEY_SIZE_REG];
4414 size_t i;
4415
4416 ixl_get_default_rss_key(rss_seed, sizeof(rss_seed));
4417
4418 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4419 ixl_wr_rx_csr(sc, I40E_PFQF_HKEY(i), rss_seed[i]);
4420 }
4421 }
4422
4423 static void
4424 ixl_set_rss_pctype(struct ixl_softc *sc)
4425 {
4426 uint64_t set_hena = 0;
4427 uint32_t hena0, hena1;
4428
4429 if (sc->sc_mac_type == I40E_MAC_X722)
4430 set_hena = IXL_RSS_HENA_DEFAULT_X722;
4431 else
4432 set_hena = IXL_RSS_HENA_DEFAULT_XL710;
4433
4434 hena0 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(0));
4435 hena1 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(1));
4436
4437 SET(hena0, set_hena);
4438 SET(hena1, set_hena >> 32);
4439
4440 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(0), hena0);
4441 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(1), hena1);
4442 }
4443
4444 static void
4445 ixl_set_rss_hlut(struct ixl_softc *sc)
4446 {
4447 unsigned int qid;
4448 uint8_t hlut_buf[512], lut_mask;
4449 uint32_t *hluts;
4450 size_t i, hluts_num;
4451
4452 lut_mask = (0x01 << sc->sc_rss_table_entry_width) - 1;
4453
4454 for (i = 0; i < sc->sc_rss_table_size; i++) {
4455 qid = i % sc->sc_nqueue_pairs;
4456 hlut_buf[i] = qid & lut_mask;
4457 }
4458
4459 hluts = (uint32_t *)hlut_buf;
4460 hluts_num = sc->sc_rss_table_size >> 2;
4461 for (i = 0; i < hluts_num; i++) {
4462 ixl_wr(sc, I40E_PFQF_HLUT(i), hluts[i]);
4463 }
4464 ixl_flush(sc);
4465 }
4466
4467 static void
4468 ixl_config_rss(struct ixl_softc *sc)
4469 {
4470
4471 KASSERT(mutex_owned(&sc->sc_cfg_lock));
4472
4473 ixl_set_rss_key(sc);
4474 ixl_set_rss_pctype(sc);
4475 ixl_set_rss_hlut(sc);
4476 }
4477
4478 static const struct ixl_phy_type *
4479 ixl_search_phy_type(uint8_t phy_type)
4480 {
4481 const struct ixl_phy_type *itype;
4482 uint64_t mask;
4483 unsigned int i;
4484
4485 if (phy_type >= 64)
4486 return NULL;
4487
4488 mask = 1ULL << phy_type;
4489
4490 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
4491 itype = &ixl_phy_type_map[i];
4492
4493 if (ISSET(itype->phy_type, mask))
4494 return itype;
4495 }
4496
4497 return NULL;
4498 }
4499
4500 static uint64_t
4501 ixl_search_link_speed(uint8_t link_speed)
4502 {
4503 const struct ixl_speed_type *type;
4504 unsigned int i;
4505
4506 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4507 type = &ixl_speed_type_map[i];
4508
4509 if (ISSET(type->dev_speed, link_speed))
4510 return type->net_speed;
4511 }
4512
4513 return 0;
4514 }
4515
4516 static int
4517 ixl_restart_an(struct ixl_softc *sc)
4518 {
4519 struct ixl_aq_desc iaq;
4520
4521 memset(&iaq, 0, sizeof(iaq));
4522 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
4523 iaq.iaq_param[0] =
4524 htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
4525
4526 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4527 aprint_error_dev(sc->sc_dev, "RESTART AN timeout\n");
4528 return -1;
4529 }
4530 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4531 aprint_error_dev(sc->sc_dev, "RESTART AN error\n");
4532 return -1;
4533 }
4534
4535 return 0;
4536 }
4537
4538 static int
4539 ixl_add_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4540 uint16_t vlan, uint16_t flags)
4541 {
4542 struct ixl_aq_desc iaq;
4543 struct ixl_aq_add_macvlan *param;
4544 struct ixl_aq_add_macvlan_elem *elem;
4545
4546 memset(&iaq, 0, sizeof(iaq));
4547 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4548 iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN);
4549 iaq.iaq_datalen = htole16(sizeof(*elem));
4550 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4551
4552 param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param;
4553 param->num_addrs = htole16(1);
4554 param->seid0 = htole16(0x8000) | sc->sc_seid;
4555 param->seid1 = 0;
4556 param->seid2 = 0;
4557
4558 elem = IXL_DMA_KVA(&sc->sc_scratch);
4559 memset(elem, 0, sizeof(*elem));
4560 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4561 elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags);
4562 elem->vlan = htole16(vlan);
4563
4564 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4565 return IXL_AQ_RC_EINVAL;
4566 }
4567
4568 switch (le16toh(iaq.iaq_retval)) {
4569 case IXL_AQ_RC_OK:
4570 break;
4571 case IXL_AQ_RC_ENOSPC:
4572 return ENOSPC;
4573 case IXL_AQ_RC_ENOENT:
4574 return ENOENT;
4575 case IXL_AQ_RC_EACCES:
4576 return EACCES;
4577 case IXL_AQ_RC_EEXIST:
4578 return EEXIST;
4579 case IXL_AQ_RC_EINVAL:
4580 return EINVAL;
4581 default:
4582 return EIO;
4583 }
4584
4585 return 0;
4586 }
4587
4588 static int
4589 ixl_remove_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4590 uint16_t vlan, uint16_t flags)
4591 {
4592 struct ixl_aq_desc iaq;
4593 struct ixl_aq_remove_macvlan *param;
4594 struct ixl_aq_remove_macvlan_elem *elem;
4595
4596 memset(&iaq, 0, sizeof(iaq));
4597 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4598 iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN);
4599 iaq.iaq_datalen = htole16(sizeof(*elem));
4600 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4601
4602 param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param;
4603 param->num_addrs = htole16(1);
4604 param->seid0 = htole16(0x8000) | sc->sc_seid;
4605 param->seid1 = 0;
4606 param->seid2 = 0;
4607
4608 elem = IXL_DMA_KVA(&sc->sc_scratch);
4609 memset(elem, 0, sizeof(*elem));
4610 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4611 elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags);
4612 elem->vlan = htole16(vlan);
4613
4614 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4615 return EINVAL;
4616 }
4617
4618 switch (le16toh(iaq.iaq_retval)) {
4619 case IXL_AQ_RC_OK:
4620 break;
4621 case IXL_AQ_RC_ENOENT:
4622 return ENOENT;
4623 case IXL_AQ_RC_EACCES:
4624 return EACCES;
4625 case IXL_AQ_RC_EINVAL:
4626 return EINVAL;
4627 default:
4628 return EIO;
4629 }
4630
4631 return 0;
4632 }
4633
4634 static int
4635 ixl_hmc(struct ixl_softc *sc)
4636 {
4637 struct {
4638 uint32_t count;
4639 uint32_t minsize;
4640 bus_size_t objsiz;
4641 bus_size_t setoff;
4642 bus_size_t setcnt;
4643 } regs[] = {
4644 {
4645 0,
4646 IXL_HMC_TXQ_MINSIZE,
4647 I40E_GLHMC_LANTXOBJSZ,
4648 I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
4649 I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
4650 },
4651 {
4652 0,
4653 IXL_HMC_RXQ_MINSIZE,
4654 I40E_GLHMC_LANRXOBJSZ,
4655 I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
4656 I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
4657 },
4658 {
4659 0,
4660 0,
4661 I40E_GLHMC_FCOEDDPOBJSZ,
4662 I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
4663 I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
4664 },
4665 {
4666 0,
4667 0,
4668 I40E_GLHMC_FCOEFOBJSZ,
4669 I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
4670 I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
4671 },
4672 };
4673 struct ixl_hmc_entry *e;
4674 uint64_t size, dva;
4675 uint8_t *kva;
4676 uint64_t *sdpage;
4677 unsigned int i;
4678 int npages, tables;
4679 uint32_t reg;
4680
4681 CTASSERT(__arraycount(regs) <= __arraycount(sc->sc_hmc_entries));
4682
4683 regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
4684 ixl_rd(sc, I40E_GLHMC_LANQMAX);
4685
4686 size = 0;
4687 for (i = 0; i < __arraycount(regs); i++) {
4688 e = &sc->sc_hmc_entries[i];
4689
4690 e->hmc_count = regs[i].count;
4691 reg = ixl_rd(sc, regs[i].objsiz);
4692 e->hmc_size = BIT_ULL(0x3F & reg);
4693 e->hmc_base = size;
4694
4695 if ((e->hmc_size * 8) < regs[i].minsize) {
4696 aprint_error_dev(sc->sc_dev,
4697 "kernel hmc entry is too big\n");
4698 return -1;
4699 }
4700
4701 size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
4702 }
4703 size = roundup(size, IXL_HMC_PGSIZE);
4704 npages = size / IXL_HMC_PGSIZE;
4705
4706 tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
4707
4708 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
4709 aprint_error_dev(sc->sc_dev,
4710 "unable to allocate hmc pd memory\n");
4711 return -1;
4712 }
4713
4714 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
4715 IXL_HMC_PGSIZE) != 0) {
4716 aprint_error_dev(sc->sc_dev,
4717 "unable to allocate hmc sd memory\n");
4718 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4719 return -1;
4720 }
4721
4722 kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
4723 memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
4724
4725 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
4726 0, IXL_DMA_LEN(&sc->sc_hmc_pd),
4727 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4728
4729 dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
4730 sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
4731 memset(sdpage, 0, IXL_DMA_LEN(&sc->sc_hmc_sd));
4732
4733 for (i = 0; (int)i < npages; i++) {
4734 *sdpage = htole64(dva | IXL_HMC_PDVALID);
4735 sdpage++;
4736
4737 dva += IXL_HMC_PGSIZE;
4738 }
4739
4740 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
4741 0, IXL_DMA_LEN(&sc->sc_hmc_sd),
4742 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4743
4744 dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
4745 for (i = 0; (int)i < tables; i++) {
4746 uint32_t count;
4747
4748 KASSERT(npages >= 0);
4749
4750 count = ((unsigned int)npages > IXL_HMC_PGS) ?
4751 IXL_HMC_PGS : (unsigned int)npages;
4752
4753 ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
4754 ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
4755 (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
4756 (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
4757 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
4758 ixl_wr(sc, I40E_PFHMC_SDCMD,
4759 (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
4760
4761 npages -= IXL_HMC_PGS;
4762 dva += IXL_HMC_PGSIZE;
4763 }
4764
4765 for (i = 0; i < __arraycount(regs); i++) {
4766 e = &sc->sc_hmc_entries[i];
4767
4768 ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
4769 ixl_wr(sc, regs[i].setcnt, e->hmc_count);
4770 }
4771
4772 return 0;
4773 }
4774
4775 static void
4776 ixl_hmc_free(struct ixl_softc *sc)
4777 {
4778 ixl_dmamem_free(sc, &sc->sc_hmc_sd);
4779 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4780 }
4781
4782 static void
4783 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
4784 unsigned int npacking)
4785 {
4786 uint8_t *dst = d;
4787 const uint8_t *src = s;
4788 unsigned int i;
4789
4790 for (i = 0; i < npacking; i++) {
4791 const struct ixl_hmc_pack *pack = &packing[i];
4792 unsigned int offset = pack->lsb / 8;
4793 unsigned int align = pack->lsb % 8;
4794 const uint8_t *in = src + pack->offset;
4795 uint8_t *out = dst + offset;
4796 int width = pack->width;
4797 unsigned int inbits = 0;
4798
4799 if (align) {
4800 inbits = (*in++) << align;
4801 *out++ |= (inbits & 0xff);
4802 inbits >>= 8;
4803
4804 width -= 8 - align;
4805 }
4806
4807 while (width >= 8) {
4808 inbits |= (*in++) << align;
4809 *out++ = (inbits & 0xff);
4810 inbits >>= 8;
4811
4812 width -= 8;
4813 }
4814
4815 if (width > 0) {
4816 inbits |= (*in) << align;
4817 *out |= (inbits & ((1 << width) - 1));
4818 }
4819 }
4820 }
4821
4822 static struct ixl_aq_buf *
4823 ixl_aqb_alloc(struct ixl_softc *sc)
4824 {
4825 struct ixl_aq_buf *aqb;
4826
4827 aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK);
4828 if (aqb == NULL)
4829 return NULL;
4830
4831 aqb->aqb_size = IXL_AQ_BUFLEN;
4832
4833 if (bus_dmamap_create(sc->sc_dmat, aqb->aqb_size, 1,
4834 aqb->aqb_size, 0,
4835 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0)
4836 goto free;
4837 if (bus_dmamem_alloc(sc->sc_dmat, aqb->aqb_size,
4838 IXL_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs,
4839 BUS_DMA_WAITOK) != 0)
4840 goto destroy;
4841 if (bus_dmamem_map(sc->sc_dmat, &aqb->aqb_seg, aqb->aqb_nsegs,
4842 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0)
4843 goto dma_free;
4844 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
4845 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0)
4846 goto unmap;
4847
4848 return aqb;
4849 unmap:
4850 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
4851 dma_free:
4852 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
4853 destroy:
4854 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4855 free:
4856 free(aqb, M_DEVBUF);
4857
4858 return NULL;
4859 }
4860
4861 static void
4862 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
4863 {
4864 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
4865 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
4866 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
4867 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4868 free(aqb, M_DEVBUF);
4869 }
4870
4871 static int
4872 ixl_arq_fill(struct ixl_softc *sc)
4873 {
4874 struct ixl_aq_buf *aqb;
4875 struct ixl_aq_desc *arq, *iaq;
4876 unsigned int prod = sc->sc_arq_prod;
4877 unsigned int n;
4878 int post = 0;
4879
4880 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
4881 IXL_AQ_NUM);
4882 arq = IXL_DMA_KVA(&sc->sc_arq);
4883
4884 if (__predict_false(n <= 0))
4885 return 0;
4886
4887 do {
4888 aqb = sc->sc_arq_live[prod];
4889 iaq = &arq[prod];
4890
4891 if (aqb == NULL) {
4892 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
4893 if (aqb != NULL) {
4894 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
4895 ixl_aq_buf, aqb_entry);
4896 } else if ((aqb = ixl_aqb_alloc(sc)) == NULL) {
4897 break;
4898 }
4899
4900 sc->sc_arq_live[prod] = aqb;
4901 memset(aqb->aqb_data, 0, aqb->aqb_size);
4902
4903 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
4904 aqb->aqb_size, BUS_DMASYNC_PREREAD);
4905
4906 iaq->iaq_flags = htole16(IXL_AQ_BUF |
4907 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ?
4908 IXL_AQ_LB : 0));
4909 iaq->iaq_opcode = 0;
4910 iaq->iaq_datalen = htole16(aqb->aqb_size);
4911 iaq->iaq_retval = 0;
4912 iaq->iaq_cookie = 0;
4913 iaq->iaq_param[0] = 0;
4914 iaq->iaq_param[1] = 0;
4915 ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
4916 }
4917
4918 prod++;
4919 prod &= IXL_AQ_MASK;
4920
4921 post = 1;
4922
4923 } while (--n);
4924
4925 if (post) {
4926 sc->sc_arq_prod = prod;
4927 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
4928 }
4929
4930 return post;
4931 }
4932
4933 static void
4934 ixl_arq_unfill(struct ixl_softc *sc)
4935 {
4936 struct ixl_aq_buf *aqb;
4937 unsigned int i;
4938
4939 for (i = 0; i < __arraycount(sc->sc_arq_live); i++) {
4940 aqb = sc->sc_arq_live[i];
4941 if (aqb == NULL)
4942 continue;
4943
4944 sc->sc_arq_live[i] = NULL;
4945 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
4946 BUS_DMASYNC_POSTREAD);
4947 ixl_aqb_free(sc, aqb);
4948 }
4949
4950 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle)) != NULL) {
4951 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
4952 ixl_aq_buf, aqb_entry);
4953 ixl_aqb_free(sc, aqb);
4954 }
4955 }
4956
4957 static void
4958 ixl_clear_hw(struct ixl_softc *sc)
4959 {
4960 uint32_t num_queues, base_queue;
4961 uint32_t num_pf_int;
4962 uint32_t num_vf_int;
4963 uint32_t num_vfs;
4964 uint32_t i, j;
4965 uint32_t val;
4966 uint32_t eol = 0x7ff;
4967
4968 /* get number of interrupts, queues, and vfs */
4969 val = ixl_rd(sc, I40E_GLPCI_CNF2);
4970 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
4971 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
4972 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
4973 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
4974
4975 val = ixl_rd(sc, I40E_PFLAN_QALLOC);
4976 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
4977 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
4978 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
4979 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
4980 if (val & I40E_PFLAN_QALLOC_VALID_MASK)
4981 num_queues = (j - base_queue) + 1;
4982 else
4983 num_queues = 0;
4984
4985 val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
4986 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
4987 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
4988 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
4989 I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
4990 if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
4991 num_vfs = (j - i) + 1;
4992 else
4993 num_vfs = 0;
4994
4995 /* stop all the interrupts */
4996 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
4997 ixl_flush(sc);
4998 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4999 for (i = 0; i < num_pf_int - 2; i++)
5000 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
5001 ixl_flush(sc);
5002
5003 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
5004 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5005 ixl_wr(sc, I40E_PFINT_LNKLST0, val);
5006 for (i = 0; i < num_pf_int - 2; i++)
5007 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
5008 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5009 for (i = 0; i < num_vfs; i++)
5010 ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
5011 for (i = 0; i < num_vf_int - 2; i++)
5012 ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
5013
5014 /* warn the HW of the coming Tx disables */
5015 for (i = 0; i < num_queues; i++) {
5016 uint32_t abs_queue_idx = base_queue + i;
5017 uint32_t reg_block = 0;
5018
5019 if (abs_queue_idx >= 128) {
5020 reg_block = abs_queue_idx / 128;
5021 abs_queue_idx %= 128;
5022 }
5023
5024 val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
5025 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
5026 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
5027 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
5028
5029 ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
5030 }
5031 delaymsec(400);
5032
5033 /* stop all the queues */
5034 for (i = 0; i < num_queues; i++) {
5035 ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
5036 ixl_wr(sc, I40E_QTX_ENA(i), 0);
5037 ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
5038 ixl_wr(sc, I40E_QRX_ENA(i), 0);
5039 }
5040
5041 /* short wait for all queue disables to settle */
5042 delaymsec(50);
5043 }
5044
5045 static int
5046 ixl_pf_reset(struct ixl_softc *sc)
5047 {
5048 uint32_t cnt = 0;
5049 uint32_t cnt1 = 0;
5050 uint32_t reg = 0, reg0 = 0;
5051 uint32_t grst_del;
5052
5053 /*
5054 * Poll for Global Reset steady state in case of recent GRST.
5055 * The grst delay value is in 100ms units, and we'll wait a
5056 * couple counts longer to be sure we don't just miss the end.
5057 */
5058 grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
5059 grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
5060 grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
5061
5062 grst_del = grst_del * 20;
5063
5064 for (cnt = 0; cnt < grst_del; cnt++) {
5065 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
5066 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
5067 break;
5068 delaymsec(100);
5069 }
5070 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5071 aprint_error(", Global reset polling failed to complete\n");
5072 return -1;
5073 }
5074
5075 /* Now Wait for the FW to be ready */
5076 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
5077 reg = ixl_rd(sc, I40E_GLNVM_ULD);
5078 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5079 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
5080 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5081 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
5082 break;
5083
5084 delaymsec(10);
5085 }
5086 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5087 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
5088 aprint_error(", wait for FW Reset complete timed out "
5089 "(I40E_GLNVM_ULD = 0x%x)\n", reg);
5090 return -1;
5091 }
5092
5093 /*
5094 * If there was a Global Reset in progress when we got here,
5095 * we don't need to do the PF Reset
5096 */
5097 if (cnt == 0) {
5098 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5099 ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
5100 for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
5101 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5102 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
5103 break;
5104 delaymsec(1);
5105
5106 reg0 = ixl_rd(sc, I40E_GLGEN_RSTAT);
5107 if (reg0 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5108 aprint_error(", Core reset upcoming."
5109 " Skipping PF reset reset request\n");
5110 return -1;
5111 }
5112 }
5113 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
5114 aprint_error(", PF reset polling failed to complete"
5115 "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
5116 return -1;
5117 }
5118 }
5119
5120 return 0;
5121 }
5122
5123 static int
5124 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
5125 bus_size_t size, bus_size_t align)
5126 {
5127 ixm->ixm_size = size;
5128
5129 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
5130 ixm->ixm_size, 0,
5131 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
5132 &ixm->ixm_map) != 0)
5133 return 1;
5134 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
5135 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
5136 BUS_DMA_WAITOK) != 0)
5137 goto destroy;
5138 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
5139 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
5140 goto free;
5141 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
5142 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
5143 goto unmap;
5144
5145 memset(ixm->ixm_kva, 0, ixm->ixm_size);
5146
5147 return 0;
5148 unmap:
5149 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5150 free:
5151 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5152 destroy:
5153 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5154 return 1;
5155 }
5156
5157 static void
5158 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
5159 {
5160 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
5161 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5162 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5163 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5164 }
5165
5166 static int
5167 ixl_setup_vlan_hwfilter(struct ixl_softc *sc)
5168 {
5169 struct ethercom *ec = &sc->sc_ec;
5170 struct vlanid_list *vlanidp;
5171 int rv;
5172
5173 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5174 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5175 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5176 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5177
5178 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5179 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5180 if (rv != 0)
5181 return rv;
5182 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5183 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5184 if (rv != 0)
5185 return rv;
5186
5187 ETHER_LOCK(ec);
5188 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5189 rv = ixl_add_macvlan(sc, sc->sc_enaddr,
5190 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5191 if (rv != 0)
5192 break;
5193 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
5194 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5195 if (rv != 0)
5196 break;
5197 }
5198 ETHER_UNLOCK(ec);
5199
5200 return rv;
5201 }
5202
5203 static void
5204 ixl_teardown_vlan_hwfilter(struct ixl_softc *sc)
5205 {
5206 struct vlanid_list *vlanidp;
5207 struct ethercom *ec = &sc->sc_ec;
5208
5209 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5210 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5211 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5212 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5213
5214 ETHER_LOCK(ec);
5215 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5216 ixl_remove_macvlan(sc, sc->sc_enaddr,
5217 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5218 ixl_remove_macvlan(sc, etherbroadcastaddr,
5219 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5220 }
5221 ETHER_UNLOCK(ec);
5222
5223 ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5224 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5225 ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5226 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5227 }
5228
5229 static int
5230 ixl_update_macvlan(struct ixl_softc *sc)
5231 {
5232 int rv = 0;
5233 int next_ec_capenable = sc->sc_ec.ec_capenable;
5234
5235 if (ISSET(next_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
5236 rv = ixl_setup_vlan_hwfilter(sc);
5237 if (rv != 0)
5238 ixl_teardown_vlan_hwfilter(sc);
5239 } else {
5240 ixl_teardown_vlan_hwfilter(sc);
5241 }
5242
5243 return rv;
5244 }
5245
5246 static int
5247 ixl_ifflags_cb(struct ethercom *ec)
5248 {
5249 struct ifnet *ifp = &ec->ec_if;
5250 struct ixl_softc *sc = ifp->if_softc;
5251 int rv, change;
5252
5253 mutex_enter(&sc->sc_cfg_lock);
5254
5255 change = ec->ec_capenable ^ sc->sc_cur_ec_capenable;
5256
5257 if (ISSET(change, ETHERCAP_VLAN_HWTAGGING)) {
5258 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
5259 rv = ENETRESET;
5260 goto out;
5261 }
5262
5263 if (ISSET(change, ETHERCAP_VLAN_HWFILTER)) {
5264 rv = ixl_update_macvlan(sc);
5265 if (rv == 0) {
5266 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
5267 } else {
5268 CLR(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER);
5269 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
5270 }
5271 }
5272
5273 rv = ixl_iff(sc);
5274 out:
5275 mutex_exit(&sc->sc_cfg_lock);
5276
5277 return rv;
5278 }
5279
5280 static int
5281 ixl_set_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
5282 {
5283 const struct ixl_aq_link_status *status;
5284 const struct ixl_phy_type *itype;
5285
5286 uint64_t ifm_active = IFM_ETHER;
5287 uint64_t ifm_status = IFM_AVALID;
5288 int link_state = LINK_STATE_DOWN;
5289 uint64_t baudrate = 0;
5290
5291 status = (const struct ixl_aq_link_status *)iaq->iaq_param;
5292 if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION))
5293 goto done;
5294
5295 ifm_active |= IFM_FDX;
5296 ifm_status |= IFM_ACTIVE;
5297 link_state = LINK_STATE_UP;
5298
5299 itype = ixl_search_phy_type(status->phy_type);
5300 if (itype != NULL)
5301 ifm_active |= itype->ifm_type;
5302
5303 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
5304 ifm_active |= IFM_ETH_TXPAUSE;
5305 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
5306 ifm_active |= IFM_ETH_RXPAUSE;
5307
5308 baudrate = ixl_search_link_speed(status->link_speed);
5309
5310 done:
5311 /* NET_ASSERT_LOCKED() except during attach */
5312 sc->sc_media_active = ifm_active;
5313 sc->sc_media_status = ifm_status;
5314
5315 sc->sc_ec.ec_if.if_baudrate = baudrate;
5316
5317 return link_state;
5318 }
5319
5320 static int
5321 ixl_establish_intx(struct ixl_softc *sc)
5322 {
5323 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5324 pci_intr_handle_t *intr;
5325 char xnamebuf[32];
5326 char intrbuf[PCI_INTRSTR_LEN];
5327 char const *intrstr;
5328
5329 KASSERT(sc->sc_nintrs == 1);
5330
5331 intr = &sc->sc_ihp[0];
5332
5333 intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
5334 snprintf(xnamebuf, sizeof(xnamebuf), "%s:legacy",
5335 device_xname(sc->sc_dev));
5336
5337 sc->sc_ihs[0] = pci_intr_establish_xname(pc, *intr, IPL_NET, ixl_intr,
5338 sc, xnamebuf);
5339
5340 if (sc->sc_ihs[0] == NULL) {
5341 aprint_error_dev(sc->sc_dev,
5342 "unable to establish interrupt at %s\n", intrstr);
5343 return -1;
5344 }
5345
5346 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5347 return 0;
5348 }
5349
5350 static int
5351 ixl_establish_msix(struct ixl_softc *sc)
5352 {
5353 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5354 kcpuset_t *affinity;
5355 unsigned int vector = 0;
5356 unsigned int i;
5357 int affinity_to, r;
5358 char xnamebuf[32];
5359 char intrbuf[PCI_INTRSTR_LEN];
5360 char const *intrstr;
5361
5362 kcpuset_create(&affinity, false);
5363
5364 /* the "other" intr is mapped to vector 0 */
5365 vector = 0;
5366 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5367 intrbuf, sizeof(intrbuf));
5368 snprintf(xnamebuf, sizeof(xnamebuf), "%s others",
5369 device_xname(sc->sc_dev));
5370 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5371 sc->sc_ihp[vector], IPL_NET, ixl_other_intr,
5372 sc, xnamebuf);
5373 if (sc->sc_ihs[vector] == NULL) {
5374 aprint_error_dev(sc->sc_dev,
5375 "unable to establish interrupt at %s\n", intrstr);
5376 goto fail;
5377 }
5378
5379 aprint_normal_dev(sc->sc_dev, "other interrupt at %s", intrstr);
5380
5381 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0;
5382 affinity_to = (affinity_to + sc->sc_nqueue_pairs_max) % ncpu;
5383
5384 kcpuset_zero(affinity);
5385 kcpuset_set(affinity, affinity_to);
5386 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5387 if (r == 0) {
5388 aprint_normal(", affinity to %u", affinity_to);
5389 }
5390 aprint_normal("\n");
5391 vector++;
5392
5393 sc->sc_msix_vector_queue = vector;
5394 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0;
5395
5396 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5397 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5398 intrbuf, sizeof(intrbuf));
5399 snprintf(xnamebuf, sizeof(xnamebuf), "%s TXRX%d",
5400 device_xname(sc->sc_dev), i);
5401
5402 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5403 sc->sc_ihp[vector], IPL_NET, ixl_queue_intr,
5404 (void *)&sc->sc_qps[i], xnamebuf);
5405
5406 if (sc->sc_ihs[vector] == NULL) {
5407 aprint_error_dev(sc->sc_dev,
5408 "unable to establish interrupt at %s\n", intrstr);
5409 goto fail;
5410 }
5411
5412 aprint_normal_dev(sc->sc_dev,
5413 "for TXRX%d interrupt at %s",i , intrstr);
5414
5415 kcpuset_zero(affinity);
5416 kcpuset_set(affinity, affinity_to);
5417 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5418 if (r == 0) {
5419 aprint_normal(", affinity to %u", affinity_to);
5420 affinity_to = (affinity_to + 1) % ncpu;
5421 }
5422 aprint_normal("\n");
5423 vector++;
5424 }
5425
5426 kcpuset_destroy(affinity);
5427
5428 return 0;
5429 fail:
5430 for (i = 0; i < vector; i++) {
5431 pci_intr_disestablish(pc, sc->sc_ihs[i]);
5432 }
5433
5434 sc->sc_msix_vector_queue = 0;
5435 sc->sc_msix_vector_queue = 0;
5436 kcpuset_destroy(affinity);
5437
5438 return -1;
5439 }
5440
5441 static void
5442 ixl_config_queue_intr(struct ixl_softc *sc)
5443 {
5444 unsigned int i, vector;
5445
5446 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) {
5447 vector = sc->sc_msix_vector_queue;
5448 } else {
5449 vector = I40E_INTR_NOTX_INTR;
5450
5451 ixl_wr(sc, I40E_PFINT_LNKLST0,
5452 (I40E_INTR_NOTX_QUEUE <<
5453 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5454 (I40E_QUEUE_TYPE_RX <<
5455 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5456 }
5457
5458 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
5459 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), 0);
5460 ixl_flush(sc);
5461
5462 ixl_wr(sc, I40E_PFINT_LNKLSTN(i),
5463 ((i) << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
5464 (I40E_QUEUE_TYPE_RX <<
5465 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5466
5467 ixl_wr(sc, I40E_QINT_RQCTL(i),
5468 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5469 (I40E_ITR_INDEX_RX <<
5470 I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
5471 (I40E_INTR_NOTX_RX_QUEUE <<
5472 I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
5473 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5474 (I40E_QUEUE_TYPE_TX <<
5475 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5476 I40E_QINT_RQCTL_CAUSE_ENA_MASK);
5477
5478 ixl_wr(sc, I40E_QINT_TQCTL(i),
5479 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
5480 (I40E_ITR_INDEX_TX <<
5481 I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
5482 (I40E_INTR_NOTX_TX_QUEUE <<
5483 I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
5484 (I40E_QUEUE_TYPE_EOL <<
5485 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
5486 (I40E_QUEUE_TYPE_RX <<
5487 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
5488 I40E_QINT_TQCTL_CAUSE_ENA_MASK);
5489
5490 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX)
5491 vector++;
5492 }
5493 ixl_flush(sc);
5494
5495 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_RX), 0x7a);
5496 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_TX), 0x7a);
5497 ixl_flush(sc);
5498 }
5499
5500 static void
5501 ixl_config_other_intr(struct ixl_softc *sc)
5502 {
5503 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5504 (void)ixl_rd(sc, I40E_PFINT_ICR0);
5505
5506 ixl_wr(sc, I40E_PFINT_ICR0_ENA,
5507 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
5508 I40E_PFINT_ICR0_ENA_GRST_MASK |
5509 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
5510 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
5511 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
5512 I40E_PFINT_ICR0_ENA_VFLR_MASK |
5513 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
5514 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
5515 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK);
5516
5517 ixl_wr(sc, I40E_PFINT_LNKLST0, 0x7FF);
5518 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_OTHER), 0);
5519 ixl_wr(sc, I40E_PFINT_STAT_CTL0,
5520 (I40E_ITR_INDEX_OTHER <<
5521 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT));
5522 ixl_flush(sc);
5523 }
5524
5525 static int
5526 ixl_setup_interrupts(struct ixl_softc *sc)
5527 {
5528 struct pci_attach_args *pa = &sc->sc_pa;
5529 pci_intr_type_t max_type, intr_type;
5530 int counts[PCI_INTR_TYPE_SIZE];
5531 int error;
5532 unsigned int i;
5533 bool retry;
5534
5535 memset(counts, 0, sizeof(counts));
5536 max_type = PCI_INTR_TYPE_MSIX;
5537 /* QPs + other interrupt */
5538 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueue_pairs_max + 1;
5539 counts[PCI_INTR_TYPE_INTX] = 1;
5540
5541 if (ixl_param_nomsix)
5542 counts[PCI_INTR_TYPE_MSIX] = 0;
5543
5544 do {
5545 retry = false;
5546 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, max_type);
5547 if (error != 0) {
5548 aprint_error_dev(sc->sc_dev,
5549 "couldn't map interrupt\n");
5550 break;
5551 }
5552 for (i = 0; i < sc->sc_nintrs; i++) {
5553 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[i],
5554 PCI_INTR_MPSAFE, true);
5555 }
5556
5557 intr_type = pci_intr_type(pa->pa_pc, sc->sc_ihp[0]);
5558 sc->sc_nintrs = counts[intr_type];
5559 KASSERT(sc->sc_nintrs > 0);
5560
5561 sc->sc_ihs = kmem_alloc(sizeof(sc->sc_ihs[0]) * sc->sc_nintrs,
5562 KM_SLEEP);
5563
5564 if (intr_type == PCI_INTR_TYPE_MSIX) {
5565 error = ixl_establish_msix(sc);
5566 if (error) {
5567 counts[PCI_INTR_TYPE_MSIX] = 0;
5568 retry = true;
5569 }
5570 } else if (intr_type == PCI_INTR_TYPE_INTX) {
5571 error = ixl_establish_intx(sc);
5572 } else {
5573 error = -1;
5574 }
5575
5576 if (error) {
5577 kmem_free(sc->sc_ihs,
5578 sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5579 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5580 } else {
5581 sc->sc_intrtype = intr_type;
5582 }
5583 } while (retry);
5584
5585 return error;
5586 }
5587
5588 static void
5589 ixl_teardown_interrupts(struct ixl_softc *sc)
5590 {
5591 struct pci_attach_args *pa = &sc->sc_pa;
5592 unsigned int i;
5593
5594 for (i = 0; i < sc->sc_nintrs; i++) {
5595 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]);
5596 }
5597
5598 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5599
5600 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5601 sc->sc_ihs = NULL;
5602 sc->sc_nintrs = 0;
5603 }
5604
5605 static int
5606 ixl_setup_stats(struct ixl_softc *sc)
5607 {
5608 struct ixl_queue_pair *qp;
5609 struct ixl_tx_ring *txr;
5610 struct ixl_rx_ring *rxr;
5611 struct ixl_stats_counters *isc;
5612 unsigned int i;
5613
5614 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5615 qp = &sc->sc_qps[i];
5616 txr = qp->qp_txr;
5617 rxr = qp->qp_rxr;
5618
5619 evcnt_attach_dynamic(&txr->txr_defragged, EVCNT_TYPE_MISC,
5620 NULL, qp->qp_name, "m_defrag successed");
5621 evcnt_attach_dynamic(&txr->txr_defrag_failed, EVCNT_TYPE_MISC,
5622 NULL, qp->qp_name, "m_defrag_failed");
5623 evcnt_attach_dynamic(&txr->txr_pcqdrop, EVCNT_TYPE_MISC,
5624 NULL, qp->qp_name, "Dropped in pcq");
5625 evcnt_attach_dynamic(&txr->txr_transmitdef, EVCNT_TYPE_MISC,
5626 NULL, qp->qp_name, "Deferred transmit");
5627 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR,
5628 NULL, qp->qp_name, "Interrupt on queue");
5629 evcnt_attach_dynamic(&txr->txr_defer, EVCNT_TYPE_MISC,
5630 NULL, qp->qp_name, "Handled queue in softint/workqueue");
5631
5632 evcnt_attach_dynamic(&rxr->rxr_mgethdr_failed, EVCNT_TYPE_MISC,
5633 NULL, qp->qp_name, "MGETHDR failed");
5634 evcnt_attach_dynamic(&rxr->rxr_mgetcl_failed, EVCNT_TYPE_MISC,
5635 NULL, qp->qp_name, "MCLGET failed");
5636 evcnt_attach_dynamic(&rxr->rxr_mbuf_load_failed,
5637 EVCNT_TYPE_MISC, NULL, qp->qp_name,
5638 "bus_dmamap_load_mbuf failed");
5639 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR,
5640 NULL, qp->qp_name, "Interrupt on queue");
5641 evcnt_attach_dynamic(&rxr->rxr_defer, EVCNT_TYPE_MISC,
5642 NULL, qp->qp_name, "Handled queue in softint/workqueue");
5643 }
5644
5645 evcnt_attach_dynamic(&sc->sc_event_atq, EVCNT_TYPE_INTR,
5646 NULL, device_xname(sc->sc_dev), "Interrupt for other events");
5647 evcnt_attach_dynamic(&sc->sc_event_link, EVCNT_TYPE_MISC,
5648 NULL, device_xname(sc->sc_dev), "Link status event");
5649 evcnt_attach_dynamic(&sc->sc_event_ecc_err, EVCNT_TYPE_MISC,
5650 NULL, device_xname(sc->sc_dev), "ECC error");
5651 evcnt_attach_dynamic(&sc->sc_event_pci_exception, EVCNT_TYPE_MISC,
5652 NULL, device_xname(sc->sc_dev), "PCI exception");
5653 evcnt_attach_dynamic(&sc->sc_event_crit_err, EVCNT_TYPE_MISC,
5654 NULL, device_xname(sc->sc_dev), "Critical error");
5655
5656 isc = &sc->sc_stats_counters;
5657 evcnt_attach_dynamic(&isc->isc_crc_errors, EVCNT_TYPE_MISC,
5658 NULL, device_xname(sc->sc_dev), "CRC errors");
5659 evcnt_attach_dynamic(&isc->isc_illegal_bytes, EVCNT_TYPE_MISC,
5660 NULL, device_xname(sc->sc_dev), "Illegal bytes");
5661 evcnt_attach_dynamic(&isc->isc_mac_local_faults, EVCNT_TYPE_MISC,
5662 NULL, device_xname(sc->sc_dev), "Mac local faults");
5663 evcnt_attach_dynamic(&isc->isc_mac_remote_faults, EVCNT_TYPE_MISC,
5664 NULL, device_xname(sc->sc_dev), "Mac remote faults");
5665 evcnt_attach_dynamic(&isc->isc_link_xon_rx, EVCNT_TYPE_MISC,
5666 NULL, device_xname(sc->sc_dev), "Rx xon");
5667 evcnt_attach_dynamic(&isc->isc_link_xon_tx, EVCNT_TYPE_MISC,
5668 NULL, device_xname(sc->sc_dev), "Tx xon");
5669 evcnt_attach_dynamic(&isc->isc_link_xoff_rx, EVCNT_TYPE_MISC,
5670 NULL, device_xname(sc->sc_dev), "Rx xoff");
5671 evcnt_attach_dynamic(&isc->isc_link_xoff_tx, EVCNT_TYPE_MISC,
5672 NULL, device_xname(sc->sc_dev), "Tx xoff");
5673 evcnt_attach_dynamic(&isc->isc_rx_fragments, EVCNT_TYPE_MISC,
5674 NULL, device_xname(sc->sc_dev), "Rx fragments");
5675 evcnt_attach_dynamic(&isc->isc_rx_jabber, EVCNT_TYPE_MISC,
5676 NULL, device_xname(sc->sc_dev), "Rx jabber");
5677
5678 evcnt_attach_dynamic(&isc->isc_rx_size_64, EVCNT_TYPE_MISC,
5679 NULL, device_xname(sc->sc_dev), "Rx size 64");
5680 evcnt_attach_dynamic(&isc->isc_rx_size_127, EVCNT_TYPE_MISC,
5681 NULL, device_xname(sc->sc_dev), "Rx size 127");
5682 evcnt_attach_dynamic(&isc->isc_rx_size_255, EVCNT_TYPE_MISC,
5683 NULL, device_xname(sc->sc_dev), "Rx size 255");
5684 evcnt_attach_dynamic(&isc->isc_rx_size_511, EVCNT_TYPE_MISC,
5685 NULL, device_xname(sc->sc_dev), "Rx size 511");
5686 evcnt_attach_dynamic(&isc->isc_rx_size_1023, EVCNT_TYPE_MISC,
5687 NULL, device_xname(sc->sc_dev), "Rx size 1023");
5688 evcnt_attach_dynamic(&isc->isc_rx_size_1522, EVCNT_TYPE_MISC,
5689 NULL, device_xname(sc->sc_dev), "Rx size 1522");
5690 evcnt_attach_dynamic(&isc->isc_rx_size_big, EVCNT_TYPE_MISC,
5691 NULL, device_xname(sc->sc_dev), "Rx jumbo packets");
5692 evcnt_attach_dynamic(&isc->isc_rx_undersize, EVCNT_TYPE_MISC,
5693 NULL, device_xname(sc->sc_dev), "Rx under size");
5694 evcnt_attach_dynamic(&isc->isc_rx_oversize, EVCNT_TYPE_MISC,
5695 NULL, device_xname(sc->sc_dev), "Rx over size");
5696
5697 evcnt_attach_dynamic(&isc->isc_rx_bytes, EVCNT_TYPE_MISC,
5698 NULL, device_xname(sc->sc_dev), "Rx bytes / port");
5699 evcnt_attach_dynamic(&isc->isc_rx_discards, EVCNT_TYPE_MISC,
5700 NULL, device_xname(sc->sc_dev), "Rx discards / port");
5701 evcnt_attach_dynamic(&isc->isc_rx_unicast, EVCNT_TYPE_MISC,
5702 NULL, device_xname(sc->sc_dev), "Rx unicast / port");
5703 evcnt_attach_dynamic(&isc->isc_rx_multicast, EVCNT_TYPE_MISC,
5704 NULL, device_xname(sc->sc_dev), "Rx multicast / port");
5705 evcnt_attach_dynamic(&isc->isc_rx_broadcast, EVCNT_TYPE_MISC,
5706 NULL, device_xname(sc->sc_dev), "Rx broadcast / port");
5707
5708 evcnt_attach_dynamic(&isc->isc_vsi_rx_bytes, EVCNT_TYPE_MISC,
5709 NULL, device_xname(sc->sc_dev), "Rx bytes / vsi");
5710 evcnt_attach_dynamic(&isc->isc_vsi_rx_discards, EVCNT_TYPE_MISC,
5711 NULL, device_xname(sc->sc_dev), "Rx discard / vsi");
5712 evcnt_attach_dynamic(&isc->isc_vsi_rx_unicast, EVCNT_TYPE_MISC,
5713 NULL, device_xname(sc->sc_dev), "Rx unicast / vsi");
5714 evcnt_attach_dynamic(&isc->isc_vsi_rx_multicast, EVCNT_TYPE_MISC,
5715 NULL, device_xname(sc->sc_dev), "Rx multicast / vsi");
5716 evcnt_attach_dynamic(&isc->isc_vsi_rx_broadcast, EVCNT_TYPE_MISC,
5717 NULL, device_xname(sc->sc_dev), "Rx broadcast / vsi");
5718
5719 evcnt_attach_dynamic(&isc->isc_tx_size_64, EVCNT_TYPE_MISC,
5720 NULL, device_xname(sc->sc_dev), "Tx size 64");
5721 evcnt_attach_dynamic(&isc->isc_tx_size_127, EVCNT_TYPE_MISC,
5722 NULL, device_xname(sc->sc_dev), "Tx size 127");
5723 evcnt_attach_dynamic(&isc->isc_tx_size_255, EVCNT_TYPE_MISC,
5724 NULL, device_xname(sc->sc_dev), "Tx size 255");
5725 evcnt_attach_dynamic(&isc->isc_tx_size_511, EVCNT_TYPE_MISC,
5726 NULL, device_xname(sc->sc_dev), "Tx size 511");
5727 evcnt_attach_dynamic(&isc->isc_tx_size_1023, EVCNT_TYPE_MISC,
5728 NULL, device_xname(sc->sc_dev), "Tx size 1023");
5729 evcnt_attach_dynamic(&isc->isc_tx_size_1522, EVCNT_TYPE_MISC,
5730 NULL, device_xname(sc->sc_dev), "Tx size 1522");
5731 evcnt_attach_dynamic(&isc->isc_tx_size_big, EVCNT_TYPE_MISC,
5732 NULL, device_xname(sc->sc_dev), "Tx jumbo packets");
5733
5734 evcnt_attach_dynamic(&isc->isc_tx_bytes, EVCNT_TYPE_MISC,
5735 NULL, device_xname(sc->sc_dev), "Tx bytes / port");
5736 evcnt_attach_dynamic(&isc->isc_tx_dropped_link_down, EVCNT_TYPE_MISC,
5737 NULL, device_xname(sc->sc_dev),
5738 "Tx dropped due to link down / port");
5739 evcnt_attach_dynamic(&isc->isc_tx_unicast, EVCNT_TYPE_MISC,
5740 NULL, device_xname(sc->sc_dev), "Tx unicast / port");
5741 evcnt_attach_dynamic(&isc->isc_tx_multicast, EVCNT_TYPE_MISC,
5742 NULL, device_xname(sc->sc_dev), "Tx multicast / port");
5743 evcnt_attach_dynamic(&isc->isc_tx_broadcast, EVCNT_TYPE_MISC,
5744 NULL, device_xname(sc->sc_dev), "Tx broadcast / port");
5745
5746 evcnt_attach_dynamic(&isc->isc_vsi_tx_bytes, EVCNT_TYPE_MISC,
5747 NULL, device_xname(sc->sc_dev), "Tx bytes / vsi");
5748 evcnt_attach_dynamic(&isc->isc_vsi_tx_errors, EVCNT_TYPE_MISC,
5749 NULL, device_xname(sc->sc_dev), "Tx errors / vsi");
5750 evcnt_attach_dynamic(&isc->isc_vsi_tx_unicast, EVCNT_TYPE_MISC,
5751 NULL, device_xname(sc->sc_dev), "Tx unicast / vsi");
5752 evcnt_attach_dynamic(&isc->isc_vsi_tx_multicast, EVCNT_TYPE_MISC,
5753 NULL, device_xname(sc->sc_dev), "Tx multicast / vsi");
5754 evcnt_attach_dynamic(&isc->isc_vsi_tx_broadcast, EVCNT_TYPE_MISC,
5755 NULL, device_xname(sc->sc_dev), "Tx broadcast / vsi");
5756
5757 sc->sc_stats_intval = ixl_param_stats_interval;
5758 callout_init(&sc->sc_stats_callout, CALLOUT_MPSAFE);
5759 callout_setfunc(&sc->sc_stats_callout, ixl_stats_callout, sc);
5760 ixl_work_set(&sc->sc_stats_task, ixl_stats_update, sc);
5761
5762 return 0;
5763 }
5764
5765 static void
5766 ixl_teardown_stats(struct ixl_softc *sc)
5767 {
5768 struct ixl_tx_ring *txr;
5769 struct ixl_rx_ring *rxr;
5770 struct ixl_stats_counters *isc;
5771 unsigned int i;
5772
5773 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5774 txr = sc->sc_qps[i].qp_txr;
5775 rxr = sc->sc_qps[i].qp_rxr;
5776
5777 evcnt_detach(&txr->txr_defragged);
5778 evcnt_detach(&txr->txr_defrag_failed);
5779 evcnt_detach(&txr->txr_pcqdrop);
5780 evcnt_detach(&txr->txr_transmitdef);
5781 evcnt_detach(&txr->txr_intr);
5782 evcnt_detach(&txr->txr_defer);
5783
5784 evcnt_detach(&rxr->rxr_mgethdr_failed);
5785 evcnt_detach(&rxr->rxr_mgetcl_failed);
5786 evcnt_detach(&rxr->rxr_mbuf_load_failed);
5787 evcnt_detach(&rxr->rxr_intr);
5788 evcnt_detach(&rxr->rxr_defer);
5789 }
5790
5791 isc = &sc->sc_stats_counters;
5792 evcnt_detach(&isc->isc_crc_errors);
5793 evcnt_detach(&isc->isc_illegal_bytes);
5794 evcnt_detach(&isc->isc_mac_local_faults);
5795 evcnt_detach(&isc->isc_mac_remote_faults);
5796 evcnt_detach(&isc->isc_link_xon_rx);
5797 evcnt_detach(&isc->isc_link_xon_tx);
5798 evcnt_detach(&isc->isc_link_xoff_rx);
5799 evcnt_detach(&isc->isc_link_xoff_tx);
5800 evcnt_detach(&isc->isc_rx_fragments);
5801 evcnt_detach(&isc->isc_rx_jabber);
5802 evcnt_detach(&isc->isc_rx_bytes);
5803 evcnt_detach(&isc->isc_rx_discards);
5804 evcnt_detach(&isc->isc_rx_unicast);
5805 evcnt_detach(&isc->isc_rx_multicast);
5806 evcnt_detach(&isc->isc_rx_broadcast);
5807 evcnt_detach(&isc->isc_rx_size_64);
5808 evcnt_detach(&isc->isc_rx_size_127);
5809 evcnt_detach(&isc->isc_rx_size_255);
5810 evcnt_detach(&isc->isc_rx_size_511);
5811 evcnt_detach(&isc->isc_rx_size_1023);
5812 evcnt_detach(&isc->isc_rx_size_1522);
5813 evcnt_detach(&isc->isc_rx_size_big);
5814 evcnt_detach(&isc->isc_rx_undersize);
5815 evcnt_detach(&isc->isc_rx_oversize);
5816 evcnt_detach(&isc->isc_tx_bytes);
5817 evcnt_detach(&isc->isc_tx_dropped_link_down);
5818 evcnt_detach(&isc->isc_tx_unicast);
5819 evcnt_detach(&isc->isc_tx_multicast);
5820 evcnt_detach(&isc->isc_tx_broadcast);
5821 evcnt_detach(&isc->isc_tx_size_64);
5822 evcnt_detach(&isc->isc_tx_size_127);
5823 evcnt_detach(&isc->isc_tx_size_255);
5824 evcnt_detach(&isc->isc_tx_size_511);
5825 evcnt_detach(&isc->isc_tx_size_1023);
5826 evcnt_detach(&isc->isc_tx_size_1522);
5827 evcnt_detach(&isc->isc_tx_size_big);
5828 evcnt_detach(&isc->isc_vsi_rx_discards);
5829 evcnt_detach(&isc->isc_vsi_rx_bytes);
5830 evcnt_detach(&isc->isc_vsi_rx_unicast);
5831 evcnt_detach(&isc->isc_vsi_rx_multicast);
5832 evcnt_detach(&isc->isc_vsi_rx_broadcast);
5833 evcnt_detach(&isc->isc_vsi_tx_errors);
5834 evcnt_detach(&isc->isc_vsi_tx_bytes);
5835 evcnt_detach(&isc->isc_vsi_tx_unicast);
5836 evcnt_detach(&isc->isc_vsi_tx_multicast);
5837 evcnt_detach(&isc->isc_vsi_tx_broadcast);
5838
5839 evcnt_detach(&sc->sc_event_atq);
5840 evcnt_detach(&sc->sc_event_link);
5841 evcnt_detach(&sc->sc_event_ecc_err);
5842 evcnt_detach(&sc->sc_event_pci_exception);
5843 evcnt_detach(&sc->sc_event_crit_err);
5844
5845 callout_destroy(&sc->sc_stats_callout);
5846 }
5847
5848 static void
5849 ixl_stats_callout(void *xsc)
5850 {
5851 struct ixl_softc *sc = xsc;
5852
5853 ixl_work_add(sc->sc_workq, &sc->sc_stats_task);
5854 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
5855 }
5856
5857 static uint64_t
5858 ixl_stat_delta(struct ixl_softc *sc, uint32_t reg_hi, uint32_t reg_lo,
5859 uint64_t *offset, bool has_offset)
5860 {
5861 uint64_t value, delta;
5862 int bitwidth;
5863
5864 bitwidth = reg_hi == 0 ? 32 : 48;
5865
5866 value = ixl_rd(sc, reg_lo);
5867
5868 if (bitwidth > 32) {
5869 value |= ((uint64_t)ixl_rd(sc, reg_hi) << 32);
5870 }
5871
5872 if (__predict_true(has_offset)) {
5873 delta = value;
5874 if (value < *offset)
5875 delta += ((uint64_t)1 << bitwidth);
5876 delta -= *offset;
5877 } else {
5878 delta = 0;
5879 }
5880 atomic_swap_64(offset, value);
5881
5882 return delta;
5883 }
5884
5885 static void
5886 ixl_stats_update(void *xsc)
5887 {
5888 struct ixl_softc *sc = xsc;
5889 struct ixl_stats_counters *isc;
5890 uint64_t delta;
5891
5892 isc = &sc->sc_stats_counters;
5893
5894 /* errors */
5895 delta = ixl_stat_delta(sc,
5896 0, I40E_GLPRT_CRCERRS(sc->sc_port),
5897 &isc->isc_crc_errors_offset, isc->isc_has_offset);
5898 atomic_add_64(&isc->isc_crc_errors.ev_count, delta);
5899
5900 delta = ixl_stat_delta(sc,
5901 0, I40E_GLPRT_ILLERRC(sc->sc_port),
5902 &isc->isc_illegal_bytes_offset, isc->isc_has_offset);
5903 atomic_add_64(&isc->isc_illegal_bytes.ev_count, delta);
5904
5905 /* rx */
5906 delta = ixl_stat_delta(sc,
5907 I40E_GLPRT_GORCH(sc->sc_port), I40E_GLPRT_GORCL(sc->sc_port),
5908 &isc->isc_rx_bytes_offset, isc->isc_has_offset);
5909 atomic_add_64(&isc->isc_rx_bytes.ev_count, delta);
5910
5911 delta = ixl_stat_delta(sc,
5912 0, I40E_GLPRT_RDPC(sc->sc_port),
5913 &isc->isc_rx_discards_offset, isc->isc_has_offset);
5914 atomic_add_64(&isc->isc_rx_discards.ev_count, delta);
5915
5916 delta = ixl_stat_delta(sc,
5917 I40E_GLPRT_UPRCH(sc->sc_port), I40E_GLPRT_UPRCL(sc->sc_port),
5918 &isc->isc_rx_unicast_offset, isc->isc_has_offset);
5919 atomic_add_64(&isc->isc_rx_unicast.ev_count, delta);
5920
5921 delta = ixl_stat_delta(sc,
5922 I40E_GLPRT_MPRCH(sc->sc_port), I40E_GLPRT_MPRCL(sc->sc_port),
5923 &isc->isc_rx_multicast_offset, isc->isc_has_offset);
5924 atomic_add_64(&isc->isc_rx_multicast.ev_count, delta);
5925
5926 delta = ixl_stat_delta(sc,
5927 I40E_GLPRT_BPRCH(sc->sc_port), I40E_GLPRT_BPRCL(sc->sc_port),
5928 &isc->isc_rx_broadcast_offset, isc->isc_has_offset);
5929 atomic_add_64(&isc->isc_rx_broadcast.ev_count, delta);
5930
5931 /* Packet size stats rx */
5932 delta = ixl_stat_delta(sc,
5933 I40E_GLPRT_PRC64H(sc->sc_port), I40E_GLPRT_PRC64L(sc->sc_port),
5934 &isc->isc_rx_size_64_offset, isc->isc_has_offset);
5935 atomic_add_64(&isc->isc_rx_size_64.ev_count, delta);
5936
5937 delta = ixl_stat_delta(sc,
5938 I40E_GLPRT_PRC127H(sc->sc_port), I40E_GLPRT_PRC127L(sc->sc_port),
5939 &isc->isc_rx_size_127_offset, isc->isc_has_offset);
5940 atomic_add_64(&isc->isc_rx_size_127.ev_count, delta);
5941
5942 delta = ixl_stat_delta(sc,
5943 I40E_GLPRT_PRC255H(sc->sc_port), I40E_GLPRT_PRC255L(sc->sc_port),
5944 &isc->isc_rx_size_255_offset, isc->isc_has_offset);
5945 atomic_add_64(&isc->isc_rx_size_255.ev_count, delta);
5946
5947 delta = ixl_stat_delta(sc,
5948 I40E_GLPRT_PRC511H(sc->sc_port), I40E_GLPRT_PRC511L(sc->sc_port),
5949 &isc->isc_rx_size_511_offset, isc->isc_has_offset);
5950 atomic_add_64(&isc->isc_rx_size_511.ev_count, delta);
5951
5952 delta = ixl_stat_delta(sc,
5953 I40E_GLPRT_PRC1023H(sc->sc_port), I40E_GLPRT_PRC1023L(sc->sc_port),
5954 &isc->isc_rx_size_1023_offset, isc->isc_has_offset);
5955 atomic_add_64(&isc->isc_rx_size_1023.ev_count, delta);
5956
5957 delta = ixl_stat_delta(sc,
5958 I40E_GLPRT_PRC1522H(sc->sc_port), I40E_GLPRT_PRC1522L(sc->sc_port),
5959 &isc->isc_rx_size_1522_offset, isc->isc_has_offset);
5960 atomic_add_64(&isc->isc_rx_size_1522.ev_count, delta);
5961
5962 delta = ixl_stat_delta(sc,
5963 I40E_GLPRT_PRC9522H(sc->sc_port), I40E_GLPRT_PRC9522L(sc->sc_port),
5964 &isc->isc_rx_size_big_offset, isc->isc_has_offset);
5965 atomic_add_64(&isc->isc_rx_size_big.ev_count, delta);
5966
5967 delta = ixl_stat_delta(sc,
5968 0, I40E_GLPRT_RUC(sc->sc_port),
5969 &isc->isc_rx_undersize_offset, isc->isc_has_offset);
5970 atomic_add_64(&isc->isc_rx_undersize.ev_count, delta);
5971
5972 delta = ixl_stat_delta(sc,
5973 0, I40E_GLPRT_ROC(sc->sc_port),
5974 &isc->isc_rx_oversize_offset, isc->isc_has_offset);
5975 atomic_add_64(&isc->isc_rx_oversize.ev_count, delta);
5976
5977 /* tx */
5978 delta = ixl_stat_delta(sc,
5979 I40E_GLPRT_GOTCH(sc->sc_port), I40E_GLPRT_GOTCL(sc->sc_port),
5980 &isc->isc_tx_bytes_offset, isc->isc_has_offset);
5981 atomic_add_64(&isc->isc_tx_bytes.ev_count, delta);
5982
5983 delta = ixl_stat_delta(sc,
5984 0, I40E_GLPRT_TDOLD(sc->sc_port),
5985 &isc->isc_tx_dropped_link_down_offset, isc->isc_has_offset);
5986 atomic_add_64(&isc->isc_tx_dropped_link_down.ev_count, delta);
5987
5988 delta = ixl_stat_delta(sc,
5989 I40E_GLPRT_UPTCH(sc->sc_port), I40E_GLPRT_UPTCL(sc->sc_port),
5990 &isc->isc_tx_unicast_offset, isc->isc_has_offset);
5991 atomic_add_64(&isc->isc_tx_unicast.ev_count, delta);
5992
5993 delta = ixl_stat_delta(sc,
5994 I40E_GLPRT_MPTCH(sc->sc_port), I40E_GLPRT_MPTCL(sc->sc_port),
5995 &isc->isc_tx_multicast_offset, isc->isc_has_offset);
5996 atomic_add_64(&isc->isc_tx_multicast.ev_count, delta);
5997
5998 delta = ixl_stat_delta(sc,
5999 I40E_GLPRT_BPTCH(sc->sc_port), I40E_GLPRT_BPTCL(sc->sc_port),
6000 &isc->isc_tx_broadcast_offset, isc->isc_has_offset);
6001 atomic_add_64(&isc->isc_tx_broadcast.ev_count, delta);
6002
6003 /* Packet size stats tx */
6004 delta = ixl_stat_delta(sc,
6005 I40E_GLPRT_PTC64L(sc->sc_port), I40E_GLPRT_PTC64L(sc->sc_port),
6006 &isc->isc_tx_size_64_offset, isc->isc_has_offset);
6007 atomic_add_64(&isc->isc_tx_size_64.ev_count, delta);
6008
6009 delta = ixl_stat_delta(sc,
6010 I40E_GLPRT_PTC127H(sc->sc_port), I40E_GLPRT_PTC127L(sc->sc_port),
6011 &isc->isc_tx_size_127_offset, isc->isc_has_offset);
6012 atomic_add_64(&isc->isc_tx_size_127.ev_count, delta);
6013
6014 delta = ixl_stat_delta(sc,
6015 I40E_GLPRT_PTC255H(sc->sc_port), I40E_GLPRT_PTC255L(sc->sc_port),
6016 &isc->isc_tx_size_255_offset, isc->isc_has_offset);
6017 atomic_add_64(&isc->isc_tx_size_255.ev_count, delta);
6018
6019 delta = ixl_stat_delta(sc,
6020 I40E_GLPRT_PTC511H(sc->sc_port), I40E_GLPRT_PTC511L(sc->sc_port),
6021 &isc->isc_tx_size_511_offset, isc->isc_has_offset);
6022 atomic_add_64(&isc->isc_tx_size_511.ev_count, delta);
6023
6024 delta = ixl_stat_delta(sc,
6025 I40E_GLPRT_PTC1023H(sc->sc_port), I40E_GLPRT_PTC1023L(sc->sc_port),
6026 &isc->isc_tx_size_1023_offset, isc->isc_has_offset);
6027 atomic_add_64(&isc->isc_tx_size_1023.ev_count, delta);
6028
6029 delta = ixl_stat_delta(sc,
6030 I40E_GLPRT_PTC1522H(sc->sc_port), I40E_GLPRT_PTC1522L(sc->sc_port),
6031 &isc->isc_tx_size_1522_offset, isc->isc_has_offset);
6032 atomic_add_64(&isc->isc_tx_size_1522.ev_count, delta);
6033
6034 delta = ixl_stat_delta(sc,
6035 I40E_GLPRT_PTC9522H(sc->sc_port), I40E_GLPRT_PTC9522L(sc->sc_port),
6036 &isc->isc_tx_size_big_offset, isc->isc_has_offset);
6037 atomic_add_64(&isc->isc_tx_size_big.ev_count, delta);
6038
6039 /* mac faults */
6040 delta = ixl_stat_delta(sc,
6041 0, I40E_GLPRT_MLFC(sc->sc_port),
6042 &isc->isc_mac_local_faults_offset, isc->isc_has_offset);
6043 atomic_add_64(&isc->isc_mac_local_faults.ev_count, delta);
6044
6045 delta = ixl_stat_delta(sc,
6046 0, I40E_GLPRT_MRFC(sc->sc_port),
6047 &isc->isc_mac_remote_faults_offset, isc->isc_has_offset);
6048 atomic_add_64(&isc->isc_mac_remote_faults.ev_count, delta);
6049
6050 /* Flow control (LFC) stats */
6051 delta = ixl_stat_delta(sc,
6052 0, I40E_GLPRT_LXONRXC(sc->sc_port),
6053 &isc->isc_link_xon_rx_offset, isc->isc_has_offset);
6054 atomic_add_64(&isc->isc_link_xon_rx.ev_count, delta);
6055
6056 delta = ixl_stat_delta(sc,
6057 0, I40E_GLPRT_LXONTXC(sc->sc_port),
6058 &isc->isc_link_xon_tx_offset, isc->isc_has_offset);
6059 atomic_add_64(&isc->isc_link_xon_tx.ev_count, delta);
6060
6061 delta = ixl_stat_delta(sc,
6062 0, I40E_GLPRT_LXOFFRXC(sc->sc_port),
6063 &isc->isc_link_xoff_rx_offset, isc->isc_has_offset);
6064 atomic_add_64(&isc->isc_link_xoff_rx.ev_count, delta);
6065
6066 delta = ixl_stat_delta(sc,
6067 0, I40E_GLPRT_LXOFFTXC(sc->sc_port),
6068 &isc->isc_link_xoff_tx_offset, isc->isc_has_offset);
6069 atomic_add_64(&isc->isc_link_xoff_tx.ev_count, delta);
6070
6071 /* fragments */
6072 delta = ixl_stat_delta(sc,
6073 0, I40E_GLPRT_RFC(sc->sc_port),
6074 &isc->isc_rx_fragments_offset, isc->isc_has_offset);
6075 atomic_add_64(&isc->isc_rx_fragments.ev_count, delta);
6076
6077 delta = ixl_stat_delta(sc,
6078 0, I40E_GLPRT_RJC(sc->sc_port),
6079 &isc->isc_rx_jabber_offset, isc->isc_has_offset);
6080 atomic_add_64(&isc->isc_rx_jabber.ev_count, delta);
6081
6082 /* VSI rx counters */
6083 delta = ixl_stat_delta(sc,
6084 0, I40E_GLV_RDPC(sc->sc_vsi_stat_counter_idx),
6085 &isc->isc_vsi_rx_discards_offset, isc->isc_has_offset);
6086 atomic_add_64(&isc->isc_vsi_rx_discards.ev_count, delta);
6087
6088 delta = ixl_stat_delta(sc,
6089 I40E_GLV_GORCH(sc->sc_vsi_stat_counter_idx),
6090 I40E_GLV_GORCL(sc->sc_vsi_stat_counter_idx),
6091 &isc->isc_vsi_rx_bytes_offset, isc->isc_has_offset);
6092 atomic_add_64(&isc->isc_vsi_rx_bytes.ev_count, delta);
6093
6094 delta = ixl_stat_delta(sc,
6095 I40E_GLV_UPRCH(sc->sc_vsi_stat_counter_idx),
6096 I40E_GLV_UPRCL(sc->sc_vsi_stat_counter_idx),
6097 &isc->isc_vsi_rx_unicast_offset, isc->isc_has_offset);
6098 atomic_add_64(&isc->isc_vsi_rx_unicast.ev_count, delta);
6099
6100 delta = ixl_stat_delta(sc,
6101 I40E_GLV_MPRCH(sc->sc_vsi_stat_counter_idx),
6102 I40E_GLV_MPRCL(sc->sc_vsi_stat_counter_idx),
6103 &isc->isc_vsi_rx_multicast_offset, isc->isc_has_offset);
6104 atomic_add_64(&isc->isc_vsi_rx_multicast.ev_count, delta);
6105
6106 delta = ixl_stat_delta(sc,
6107 I40E_GLV_BPRCH(sc->sc_vsi_stat_counter_idx),
6108 I40E_GLV_BPRCL(sc->sc_vsi_stat_counter_idx),
6109 &isc->isc_vsi_rx_broadcast_offset, isc->isc_has_offset);
6110 atomic_add_64(&isc->isc_vsi_rx_broadcast.ev_count, delta);
6111
6112 /* VSI tx counters */
6113 delta = ixl_stat_delta(sc,
6114 0, I40E_GLV_TEPC(sc->sc_vsi_stat_counter_idx),
6115 &isc->isc_vsi_tx_errors_offset, isc->isc_has_offset);
6116 atomic_add_64(&isc->isc_vsi_tx_errors.ev_count, delta);
6117
6118 delta = ixl_stat_delta(sc,
6119 I40E_GLV_GOTCH(sc->sc_vsi_stat_counter_idx),
6120 I40E_GLV_GOTCL(sc->sc_vsi_stat_counter_idx),
6121 &isc->isc_vsi_tx_bytes_offset, isc->isc_has_offset);
6122 atomic_add_64(&isc->isc_vsi_tx_bytes.ev_count, delta);
6123
6124 delta = ixl_stat_delta(sc,
6125 I40E_GLV_UPTCH(sc->sc_vsi_stat_counter_idx),
6126 I40E_GLV_UPTCL(sc->sc_vsi_stat_counter_idx),
6127 &isc->isc_vsi_tx_unicast_offset, isc->isc_has_offset);
6128 atomic_add_64(&isc->isc_vsi_tx_unicast.ev_count, delta);
6129
6130 delta = ixl_stat_delta(sc,
6131 I40E_GLV_MPTCH(sc->sc_vsi_stat_counter_idx),
6132 I40E_GLV_MPTCL(sc->sc_vsi_stat_counter_idx),
6133 &isc->isc_vsi_tx_multicast_offset, isc->isc_has_offset);
6134 atomic_add_64(&isc->isc_vsi_tx_multicast.ev_count, delta);
6135
6136 delta = ixl_stat_delta(sc,
6137 I40E_GLV_BPTCH(sc->sc_vsi_stat_counter_idx),
6138 I40E_GLV_BPTCL(sc->sc_vsi_stat_counter_idx),
6139 &isc->isc_vsi_tx_broadcast_offset, isc->isc_has_offset);
6140 atomic_add_64(&isc->isc_vsi_tx_broadcast.ev_count, delta);
6141 }
6142
6143 static int
6144 ixl_setup_sysctls(struct ixl_softc *sc)
6145 {
6146 const char *devname;
6147 struct sysctllog **log;
6148 const struct sysctlnode *rnode, *rxnode, *txnode;
6149 int error;
6150
6151 log = &sc->sc_sysctllog;
6152 devname = device_xname(sc->sc_dev);
6153
6154 error = sysctl_createv(log, 0, NULL, &rnode,
6155 0, CTLTYPE_NODE, devname,
6156 SYSCTL_DESCR("ixl information and settings"),
6157 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
6158 if (error)
6159 goto out;
6160
6161 error = sysctl_createv(log, 0, &rnode, NULL,
6162 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
6163 SYSCTL_DESCR("Use workqueue for packet processing"),
6164 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
6165 if (error)
6166 goto out;
6167
6168 error = sysctl_createv(log, 0, &rnode, NULL,
6169 CTLFLAG_READONLY, CTLTYPE_INT, "stats_interval",
6170 SYSCTL_DESCR("Statistics collection interval in milliseconds"),
6171 NULL, 0, &sc->sc_stats_intval, 0, CTL_CREATE, CTL_EOL);
6172
6173 error = sysctl_createv(log, 0, &rnode, &rxnode,
6174 0, CTLTYPE_NODE, "rx",
6175 SYSCTL_DESCR("ixl information and settings for Rx"),
6176 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6177 if (error)
6178 goto out;
6179
6180 error = sysctl_createv(log, 0, &rxnode, NULL,
6181 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6182 SYSCTL_DESCR("max number of Rx packets"
6183 " to process for interrupt processing"),
6184 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6185 if (error)
6186 goto out;
6187
6188 error = sysctl_createv(log, 0, &rxnode, NULL,
6189 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6190 SYSCTL_DESCR("max number of Rx packets"
6191 " to process for deferred processing"),
6192 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
6193 if (error)
6194 goto out;
6195
6196 error = sysctl_createv(log, 0, &rnode, &txnode,
6197 0, CTLTYPE_NODE, "tx",
6198 SYSCTL_DESCR("ixl information and settings for Tx"),
6199 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6200 if (error)
6201 goto out;
6202
6203 error = sysctl_createv(log, 0, &txnode, NULL,
6204 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6205 SYSCTL_DESCR("max number of Tx packets"
6206 " to process for interrupt processing"),
6207 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6208 if (error)
6209 goto out;
6210
6211 error = sysctl_createv(log, 0, &txnode, NULL,
6212 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6213 SYSCTL_DESCR("max number of Tx packets"
6214 " to process for deferred processing"),
6215 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
6216 if (error)
6217 goto out;
6218
6219 out:
6220 if (error) {
6221 aprint_error_dev(sc->sc_dev,
6222 "unable to create sysctl node\n");
6223 sysctl_teardown(log);
6224 }
6225
6226 return error;
6227 }
6228
6229 static void
6230 ixl_teardown_sysctls(struct ixl_softc *sc)
6231 {
6232
6233 sysctl_teardown(&sc->sc_sysctllog);
6234 }
6235
6236 static struct workqueue *
6237 ixl_workq_create(const char *name, pri_t prio, int ipl, int flags)
6238 {
6239 struct workqueue *wq;
6240 int error;
6241
6242 error = workqueue_create(&wq, name, ixl_workq_work, NULL,
6243 prio, ipl, flags);
6244
6245 if (error)
6246 return NULL;
6247
6248 return wq;
6249 }
6250
6251 static void
6252 ixl_workq_destroy(struct workqueue *wq)
6253 {
6254
6255 workqueue_destroy(wq);
6256 }
6257
6258 static void
6259 ixl_work_set(struct ixl_work *work, void (*func)(void *), void *arg)
6260 {
6261
6262 memset(work, 0, sizeof(*work));
6263 work->ixw_func = func;
6264 work->ixw_arg = arg;
6265 }
6266
6267 static void
6268 ixl_work_add(struct workqueue *wq, struct ixl_work *work)
6269 {
6270 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0)
6271 return;
6272
6273 workqueue_enqueue(wq, &work->ixw_cookie, NULL);
6274 }
6275
6276 static void
6277 ixl_work_wait(struct workqueue *wq, struct ixl_work *work)
6278 {
6279
6280 workqueue_wait(wq, &work->ixw_cookie);
6281 }
6282
6283 static void
6284 ixl_workq_work(struct work *wk, void *context)
6285 {
6286 struct ixl_work *work;
6287
6288 work = container_of(wk, struct ixl_work, ixw_cookie);
6289
6290 atomic_swap_uint(&work->ixw_added, 0);
6291 work->ixw_func(work->ixw_arg);
6292 }
6293
6294 static int
6295 ixl_rx_ctl_read(struct ixl_softc *sc, uint32_t reg, uint32_t *rv)
6296 {
6297 struct ixl_aq_desc iaq;
6298
6299 memset(&iaq, 0, sizeof(iaq));
6300 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_READ);
6301 iaq.iaq_param[1] = htole32(reg);
6302
6303 if (ixl_atq_poll(sc, &iaq, 250) != 0)
6304 return ETIMEDOUT;
6305
6306 switch (htole16(iaq.iaq_retval)) {
6307 case IXL_AQ_RC_OK:
6308 /* success */
6309 break;
6310 case IXL_AQ_RC_EACCES:
6311 return EPERM;
6312 case IXL_AQ_RC_EAGAIN:
6313 return EAGAIN;
6314 default:
6315 return EIO;
6316 }
6317
6318 *rv = htole32(iaq.iaq_param[3]);
6319 return 0;
6320 }
6321
6322 static uint32_t
6323 ixl_rd_rx_csr(struct ixl_softc *sc, uint32_t reg)
6324 {
6325 uint32_t val;
6326 int rv, retry, retry_limit;
6327
6328 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) {
6329 retry_limit = 5;
6330 } else {
6331 retry_limit = 0;
6332 }
6333
6334 for (retry = 0; retry < retry_limit; retry++) {
6335 rv = ixl_rx_ctl_read(sc, reg, &val);
6336 if (rv == 0)
6337 return val;
6338 else if (rv == EAGAIN)
6339 delaymsec(1);
6340 else
6341 break;
6342 }
6343
6344 val = ixl_rd(sc, reg);
6345
6346 return val;
6347 }
6348
6349 static int
6350 ixl_rx_ctl_write(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6351 {
6352 struct ixl_aq_desc iaq;
6353
6354 memset(&iaq, 0, sizeof(iaq));
6355 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_WRITE);
6356 iaq.iaq_param[1] = htole32(reg);
6357 iaq.iaq_param[3] = htole32(value);
6358
6359 if (ixl_atq_poll(sc, &iaq, 250) != 0)
6360 return ETIMEDOUT;
6361
6362 switch (htole16(iaq.iaq_retval)) {
6363 case IXL_AQ_RC_OK:
6364 /* success */
6365 break;
6366 case IXL_AQ_RC_EACCES:
6367 return EPERM;
6368 case IXL_AQ_RC_EAGAIN:
6369 return EAGAIN;
6370 default:
6371 return EIO;
6372 }
6373
6374 return 0;
6375 }
6376
6377 static void
6378 ixl_wr_rx_csr(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6379 {
6380 int rv, retry, retry_limit;
6381
6382 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) {
6383 retry_limit = 5;
6384 } else {
6385 retry_limit = 0;
6386 }
6387
6388 for (retry = 0; retry < retry_limit; retry++) {
6389 rv = ixl_rx_ctl_write(sc, reg, value);
6390 if (rv == 0)
6391 return;
6392 else if (rv == EAGAIN)
6393 delaymsec(1);
6394 else
6395 break;
6396 }
6397
6398 ixl_wr(sc, reg, value);
6399 }
6400
6401 static int
6402 ixl_nvm_lock(struct ixl_softc *sc, char rw)
6403 {
6404 struct ixl_aq_desc iaq;
6405 struct ixl_aq_req_resource_param *param;
6406 int rv;
6407
6408 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK))
6409 return 0;
6410
6411 memset(&iaq, 0, sizeof(iaq));
6412 iaq.iaq_opcode = htole16(IXL_AQ_OP_REQUEST_RESOURCE);
6413
6414 param = (struct ixl_aq_req_resource_param *)&iaq.iaq_param;
6415 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM);
6416 if (rw == 'R') {
6417 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_READ);
6418 } else {
6419 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_WRITE);
6420 }
6421
6422 rv = ixl_atq_poll(sc, &iaq, 250);
6423
6424 if (rv != 0)
6425 return ETIMEDOUT;
6426
6427 switch (le16toh(iaq.iaq_retval)) {
6428 case IXL_AQ_RC_OK:
6429 break;
6430 case IXL_AQ_RC_EACCES:
6431 return EACCES;
6432 case IXL_AQ_RC_EBUSY:
6433 return EBUSY;
6434 case IXL_AQ_RC_EPERM:
6435 return EPERM;
6436 }
6437
6438 return 0;
6439 }
6440
6441 static int
6442 ixl_nvm_unlock(struct ixl_softc *sc)
6443 {
6444 struct ixl_aq_desc iaq;
6445 struct ixl_aq_rel_resource_param *param;
6446 int rv;
6447
6448 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK))
6449 return 0;
6450
6451 memset(&iaq, 0, sizeof(iaq));
6452 iaq.iaq_opcode = htole16(IXL_AQ_OP_RELEASE_RESOURCE);
6453
6454 param = (struct ixl_aq_rel_resource_param *)&iaq.iaq_param;
6455 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM);
6456
6457 rv = ixl_atq_poll(sc, &iaq, 250);
6458
6459 if (rv != 0)
6460 return ETIMEDOUT;
6461
6462 switch (le16toh(iaq.iaq_retval)) {
6463 case IXL_AQ_RC_OK:
6464 break;
6465 default:
6466 return EIO;
6467 }
6468 return 0;
6469 }
6470
6471 static int
6472 ixl_srdone_poll(struct ixl_softc *sc)
6473 {
6474 int wait_count;
6475 uint32_t reg;
6476
6477 for (wait_count = 0; wait_count < IXL_SRRD_SRCTL_ATTEMPTS;
6478 wait_count++) {
6479 reg = ixl_rd(sc, I40E_GLNVM_SRCTL);
6480 if (ISSET(reg, I40E_GLNVM_SRCTL_DONE_MASK))
6481 break;
6482
6483 delaymsec(5);
6484 }
6485
6486 if (wait_count == IXL_SRRD_SRCTL_ATTEMPTS)
6487 return -1;
6488
6489 return 0;
6490 }
6491
6492 static int
6493 ixl_nvm_read_srctl(struct ixl_softc *sc, uint16_t offset, uint16_t *data)
6494 {
6495 uint32_t reg;
6496
6497 if (ixl_srdone_poll(sc) != 0)
6498 return ETIMEDOUT;
6499
6500 reg = ((uint32_t)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
6501 __BIT(I40E_GLNVM_SRCTL_START_SHIFT);
6502 ixl_wr(sc, I40E_GLNVM_SRCTL, reg);
6503
6504 if (ixl_srdone_poll(sc) != 0) {
6505 aprint_debug("NVM read error: couldn't access "
6506 "Shadow RAM address: 0x%x\n", offset);
6507 return ETIMEDOUT;
6508 }
6509
6510 reg = ixl_rd(sc, I40E_GLNVM_SRDATA);
6511 *data = (uint16_t)__SHIFTOUT(reg, I40E_GLNVM_SRDATA_RDDATA_MASK);
6512
6513 return 0;
6514 }
6515
6516 static int
6517 ixl_nvm_read_aq(struct ixl_softc *sc, uint16_t offset_word,
6518 void *data, size_t len)
6519 {
6520 struct ixl_dmamem *idm;
6521 struct ixl_aq_desc iaq;
6522 struct ixl_aq_nvm_param *param;
6523 uint32_t offset_bytes;
6524 int rv;
6525
6526 idm = &sc->sc_aqbuf;
6527 if (len > IXL_DMA_LEN(idm))
6528 return ENOMEM;
6529
6530 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
6531 memset(&iaq, 0, sizeof(iaq));
6532 iaq.iaq_opcode = htole16(IXL_AQ_OP_NVM_READ);
6533 iaq.iaq_flags = htole16(IXL_AQ_BUF |
6534 ((len > I40E_AQ_LARGE_BUF) ? IXL_AQ_LB : 0));
6535 iaq.iaq_datalen = htole16(len);
6536 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
6537
6538 param = (struct ixl_aq_nvm_param *)iaq.iaq_param;
6539 param->command_flags = IXL_AQ_NVM_LAST_CMD;
6540 param->module_pointer = 0;
6541 param->length = htole16(len);
6542 offset_bytes = (uint32_t)offset_word * 2;
6543 offset_bytes &= 0x00FFFFFF;
6544 param->offset = htole32(offset_bytes);
6545
6546 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
6547 BUS_DMASYNC_PREREAD);
6548
6549 rv = ixl_atq_poll(sc, &iaq, 250);
6550
6551 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
6552 BUS_DMASYNC_POSTREAD);
6553
6554 if (rv != 0) {
6555 return ETIMEDOUT;
6556 }
6557
6558 switch (le16toh(iaq.iaq_retval)) {
6559 case IXL_AQ_RC_OK:
6560 break;
6561 case IXL_AQ_RC_EPERM:
6562 return EPERM;
6563 case IXL_AQ_RC_EINVAL:
6564 return EINVAL;
6565 case IXL_AQ_RC_EBUSY:
6566 return EBUSY;
6567 case IXL_AQ_RC_EIO:
6568 default:
6569 return EIO;
6570 }
6571
6572 memcpy(data, IXL_DMA_KVA(idm), len);
6573
6574 return 0;
6575 }
6576
6577 static int
6578 ixl_rd16_nvm(struct ixl_softc *sc, uint16_t offset, uint16_t *data)
6579 {
6580 int error;
6581 uint16_t buf;
6582
6583 error = ixl_nvm_lock(sc, 'R');
6584 if (error)
6585 return error;
6586
6587 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMREAD)) {
6588 error = ixl_nvm_read_aq(sc, offset,
6589 &buf, sizeof(buf));
6590 if (error == 0)
6591 *data = le16toh(buf);
6592 } else {
6593 error = ixl_nvm_read_srctl(sc, offset, &buf);
6594 if (error == 0)
6595 *data = buf;
6596 }
6597
6598 ixl_nvm_unlock(sc);
6599
6600 return error;
6601 }
6602
6603 MODULE(MODULE_CLASS_DRIVER, if_ixl, "pci");
6604
6605 #ifdef _MODULE
6606 #include "ioconf.c"
6607 #endif
6608
6609 #ifdef _MODULE
6610 static void
6611 ixl_parse_modprop(prop_dictionary_t dict)
6612 {
6613 prop_object_t obj;
6614 int64_t val;
6615 uint64_t uval;
6616
6617 if (dict == NULL)
6618 return;
6619
6620 obj = prop_dictionary_get(dict, "nomsix");
6621 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_BOOL) {
6622 ixl_param_nomsix = prop_bool_true((prop_bool_t)obj);
6623 }
6624
6625 obj = prop_dictionary_get(dict, "stats_interval");
6626 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
6627 val = prop_number_integer_value((prop_number_t)obj);
6628
6629 /* the range has no reason */
6630 if (100 < val && val < 180000) {
6631 ixl_param_stats_interval = val;
6632 }
6633 }
6634
6635 obj = prop_dictionary_get(dict, "nqps_limit");
6636 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
6637 val = prop_number_integer_value((prop_number_t)obj);
6638
6639 if (val <= INT32_MAX)
6640 ixl_param_nqps_limit = val;
6641 }
6642
6643 obj = prop_dictionary_get(dict, "rx_ndescs");
6644 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
6645 uval = prop_number_unsigned_integer_value((prop_number_t)obj);
6646
6647 if (uval > 8)
6648 ixl_param_rx_ndescs = uval;
6649 }
6650
6651 obj = prop_dictionary_get(dict, "tx_ndescs");
6652 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
6653 uval = prop_number_unsigned_integer_value((prop_number_t)obj);
6654
6655 if (uval > IXL_TX_PKT_DESCS)
6656 ixl_param_tx_ndescs = uval;
6657 }
6658
6659 }
6660 #endif
6661
6662 static int
6663 if_ixl_modcmd(modcmd_t cmd, void *opaque)
6664 {
6665 int error = 0;
6666
6667 #ifdef _MODULE
6668 switch (cmd) {
6669 case MODULE_CMD_INIT:
6670 ixl_parse_modprop((prop_dictionary_t)opaque);
6671 error = config_init_component(cfdriver_ioconf_if_ixl,
6672 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
6673 break;
6674 case MODULE_CMD_FINI:
6675 error = config_fini_component(cfdriver_ioconf_if_ixl,
6676 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
6677 break;
6678 default:
6679 error = ENOTTY;
6680 break;
6681 }
6682 #endif
6683
6684 return error;
6685 }
6686