if_ixl.c revision 1.70 1 /* $NetBSD: if_ixl.c,v 1.70 2020/07/31 09:25:42 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2016,2017 David Gwynne <dlg (at) openbsd.org>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50 /*
51 * Copyright (c) 2019 Internet Initiative Japan, Inc.
52 * All rights reserved.
53 *
54 * Redistribution and use in source and binary forms, with or without
55 * modification, are permitted provided that the following conditions
56 * are met:
57 * 1. Redistributions of source code must retain the above copyright
58 * notice, this list of conditions and the following disclaimer.
59 * 2. Redistributions in binary form must reproduce the above copyright
60 * notice, this list of conditions and the following disclaimer in the
61 * documentation and/or other materials provided with the distribution.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
73 * POSSIBILITY OF SUCH DAMAGE.
74 */
75
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: if_ixl.c,v 1.70 2020/07/31 09:25:42 yamaguchi Exp $");
78
79 #ifdef _KERNEL_OPT
80 #include "opt_net_mpsafe.h"
81 #include "opt_if_ixl.h"
82 #endif
83
84 #include <sys/param.h>
85 #include <sys/types.h>
86
87 #include <sys/bitops.h>
88 #include <sys/cpu.h>
89 #include <sys/device.h>
90 #include <sys/evcnt.h>
91 #include <sys/interrupt.h>
92 #include <sys/kmem.h>
93 #include <sys/module.h>
94 #include <sys/mutex.h>
95 #include <sys/pcq.h>
96 #include <sys/syslog.h>
97 #include <sys/workqueue.h>
98
99 #include <sys/bus.h>
100
101 #include <net/bpf.h>
102 #include <net/if.h>
103 #include <net/if_dl.h>
104 #include <net/if_media.h>
105 #include <net/if_ether.h>
106 #include <net/rss_config.h>
107
108 #include <netinet/tcp.h> /* for struct tcphdr */
109 #include <netinet/udp.h> /* for struct udphdr */
110
111 #include <dev/pci/pcivar.h>
112 #include <dev/pci/pcidevs.h>
113
114 #include <dev/pci/if_ixlreg.h>
115 #include <dev/pci/if_ixlvar.h>
116
117 #include <prop/proplib.h>
118
119 struct ixl_softc; /* defined */
120
121 #define I40E_PF_RESET_WAIT_COUNT 200
122 #define I40E_AQ_LARGE_BUF 512
123
124 /* bitfields for Tx queue mapping in QTX_CTL */
125 #define I40E_QTX_CTL_VF_QUEUE 0x0
126 #define I40E_QTX_CTL_VM_QUEUE 0x1
127 #define I40E_QTX_CTL_PF_QUEUE 0x2
128
129 #define I40E_QUEUE_TYPE_EOL 0x7ff
130 #define I40E_INTR_NOTX_QUEUE 0
131
132 #define I40E_QUEUE_TYPE_RX 0x0
133 #define I40E_QUEUE_TYPE_TX 0x1
134 #define I40E_QUEUE_TYPE_PE_CEQ 0x2
135 #define I40E_QUEUE_TYPE_UNKNOWN 0x3
136
137 #define I40E_ITR_INDEX_RX 0x0
138 #define I40E_ITR_INDEX_TX 0x1
139 #define I40E_ITR_INDEX_OTHER 0x2
140 #define I40E_ITR_INDEX_NONE 0x3
141 #define IXL_ITR_RX 0x7a /* 4K intrs/sec */
142 #define IXL_ITR_TX 0x7a /* 4K intrs/sec */
143
144 #define I40E_INTR_NOTX_QUEUE 0
145 #define I40E_INTR_NOTX_INTR 0
146 #define I40E_INTR_NOTX_RX_QUEUE 0
147 #define I40E_INTR_NOTX_TX_QUEUE 1
148 #define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK
149 #define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK
150
151 #define BIT_ULL(a) (1ULL << (a))
152 #define IXL_RSS_HENA_DEFAULT_BASE \
153 (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
154 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
155 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
156 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
157 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
158 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
159 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
160 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
161 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
162 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
163 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
164 #define IXL_RSS_HENA_DEFAULT_XL710 IXL_RSS_HENA_DEFAULT_BASE
165 #define IXL_RSS_HENA_DEFAULT_X722 (IXL_RSS_HENA_DEFAULT_XL710 | \
166 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
167 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
168 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
169 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
170 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
171 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
172 #define I40E_HASH_LUT_SIZE_128 0
173 #define IXL_RSS_KEY_SIZE_REG 13
174
175 #define IXL_ICR0_CRIT_ERR_MASK \
176 (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \
177 I40E_PFINT_ICR0_ECC_ERR_MASK | \
178 I40E_PFINT_ICR0_PE_CRITERR_MASK)
179
180 #define IXL_QUEUE_MAX_XL710 64
181 #define IXL_QUEUE_MAX_X722 128
182
183 #define IXL_TX_PKT_DESCS 8
184 #define IXL_TX_PKT_MAXSIZE (MCLBYTES * IXL_TX_PKT_DESCS)
185 #define IXL_TX_QUEUE_ALIGN 128
186 #define IXL_RX_QUEUE_ALIGN 128
187
188 #define IXL_MCLBYTES (MCLBYTES - ETHER_ALIGN)
189 #define IXL_MTU_ETHERLEN ETHER_HDR_LEN \
190 + ETHER_CRC_LEN
191 #if 0
192 #define IXL_MAX_MTU (9728 - IXL_MTU_ETHERLEN)
193 #else
194 /* (dbuff * 5) - ETHER_HDR_LEN - ETHER_CRC_LEN */
195 #define IXL_MAX_MTU (9600 - IXL_MTU_ETHERLEN)
196 #endif
197 #define IXL_MIN_MTU (ETHER_MIN_LEN - ETHER_CRC_LEN)
198
199 #define IXL_PCIREG PCI_MAPREG_START
200
201 #define IXL_ITR0 0x0
202 #define IXL_ITR1 0x1
203 #define IXL_ITR2 0x2
204 #define IXL_NOITR 0x3
205
206 #define IXL_AQ_NUM 256
207 #define IXL_AQ_MASK (IXL_AQ_NUM - 1)
208 #define IXL_AQ_ALIGN 64 /* lol */
209 #define IXL_AQ_BUFLEN 4096
210
211 #define IXL_HMC_ROUNDUP 512
212 #define IXL_HMC_PGSIZE 4096
213 #define IXL_HMC_DVASZ sizeof(uint64_t)
214 #define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
215 #define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS)
216 #define IXL_HMC_PDVALID 1ULL
217
218 #define IXL_ATQ_EXEC_TIMEOUT (10 * hz)
219
220 #define IXL_SRRD_SRCTL_ATTEMPTS 100000
221
222 struct ixl_aq_regs {
223 bus_size_t atq_tail;
224 bus_size_t atq_head;
225 bus_size_t atq_len;
226 bus_size_t atq_bal;
227 bus_size_t atq_bah;
228
229 bus_size_t arq_tail;
230 bus_size_t arq_head;
231 bus_size_t arq_len;
232 bus_size_t arq_bal;
233 bus_size_t arq_bah;
234
235 uint32_t atq_len_enable;
236 uint32_t atq_tail_mask;
237 uint32_t atq_head_mask;
238
239 uint32_t arq_len_enable;
240 uint32_t arq_tail_mask;
241 uint32_t arq_head_mask;
242 };
243
244 struct ixl_phy_type {
245 uint64_t phy_type;
246 uint64_t ifm_type;
247 };
248
249 struct ixl_speed_type {
250 uint8_t dev_speed;
251 uint64_t net_speed;
252 };
253
254 struct ixl_aq_buf {
255 SIMPLEQ_ENTRY(ixl_aq_buf)
256 aqb_entry;
257 void *aqb_data;
258 bus_dmamap_t aqb_map;
259 bus_dma_segment_t aqb_seg;
260 size_t aqb_size;
261 int aqb_nsegs;
262 };
263 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf);
264
265 struct ixl_dmamem {
266 bus_dmamap_t ixm_map;
267 bus_dma_segment_t ixm_seg;
268 int ixm_nsegs;
269 size_t ixm_size;
270 void *ixm_kva;
271 };
272
273 #define IXL_DMA_MAP(_ixm) ((_ixm)->ixm_map)
274 #define IXL_DMA_DVA(_ixm) ((_ixm)->ixm_map->dm_segs[0].ds_addr)
275 #define IXL_DMA_KVA(_ixm) ((void *)(_ixm)->ixm_kva)
276 #define IXL_DMA_LEN(_ixm) ((_ixm)->ixm_size)
277
278 struct ixl_hmc_entry {
279 uint64_t hmc_base;
280 uint32_t hmc_count;
281 uint64_t hmc_size;
282 };
283
284 enum ixl_hmc_types {
285 IXL_HMC_LAN_TX = 0,
286 IXL_HMC_LAN_RX,
287 IXL_HMC_FCOE_CTX,
288 IXL_HMC_FCOE_FILTER,
289 IXL_HMC_COUNT
290 };
291
292 struct ixl_hmc_pack {
293 uint16_t offset;
294 uint16_t width;
295 uint16_t lsb;
296 };
297
298 /*
299 * these hmc objects have weird sizes and alignments, so these are abstract
300 * representations of them that are nice for c to populate.
301 *
302 * the packing code relies on little-endian values being stored in the fields,
303 * no high bits in the fields being set, and the fields must be packed in the
304 * same order as they are in the ctx structure.
305 */
306
307 struct ixl_hmc_rxq {
308 uint16_t head;
309 uint8_t cpuid;
310 uint64_t base;
311 #define IXL_HMC_RXQ_BASE_UNIT 128
312 uint16_t qlen;
313 uint16_t dbuff;
314 #define IXL_HMC_RXQ_DBUFF_UNIT 128
315 uint8_t hbuff;
316 #define IXL_HMC_RXQ_HBUFF_UNIT 64
317 uint8_t dtype;
318 #define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0
319 #define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1
320 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2
321 uint8_t dsize;
322 #define IXL_HMC_RXQ_DSIZE_16 0
323 #define IXL_HMC_RXQ_DSIZE_32 1
324 uint8_t crcstrip;
325 uint8_t fc_ena;
326 uint8_t l2sel;
327 uint8_t hsplit_0;
328 uint8_t hsplit_1;
329 uint8_t showiv;
330 uint16_t rxmax;
331 uint8_t tphrdesc_ena;
332 uint8_t tphwdesc_ena;
333 uint8_t tphdata_ena;
334 uint8_t tphhead_ena;
335 uint8_t lrxqthresh;
336 uint8_t prefena;
337 };
338
339 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
340 { offsetof(struct ixl_hmc_rxq, head), 13, 0 },
341 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 },
342 { offsetof(struct ixl_hmc_rxq, base), 57, 32 },
343 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 },
344 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 },
345 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 },
346 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 },
347 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 },
348 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 },
349 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 },
350 { offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 },
351 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 },
352 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 },
353 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 },
354 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 },
355 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 },
356 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 },
357 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 },
358 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 },
359 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 },
360 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 },
361 };
362
363 #define IXL_HMC_RXQ_MINSIZE (201 + 1)
364
365 struct ixl_hmc_txq {
366 uint16_t head;
367 uint8_t new_context;
368 uint64_t base;
369 #define IXL_HMC_TXQ_BASE_UNIT 128
370 uint8_t fc_ena;
371 uint8_t timesync_ena;
372 uint8_t fd_ena;
373 uint8_t alt_vlan_ena;
374 uint8_t cpuid;
375 uint16_t thead_wb;
376 uint8_t head_wb_ena;
377 #define IXL_HMC_TXQ_DESC_WB 0
378 #define IXL_HMC_TXQ_HEAD_WB 1
379 uint16_t qlen;
380 uint8_t tphrdesc_ena;
381 uint8_t tphrpacket_ena;
382 uint8_t tphwdesc_ena;
383 uint64_t head_wb_addr;
384 uint32_t crc;
385 uint16_t rdylist;
386 uint8_t rdylist_act;
387 };
388
389 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
390 { offsetof(struct ixl_hmc_txq, head), 13, 0 },
391 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 },
392 { offsetof(struct ixl_hmc_txq, base), 57, 32 },
393 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 },
394 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 },
395 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 },
396 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 },
397 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 },
398 /* line 1 */
399 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 },
400 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 },
401 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 },
402 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 },
403 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 },
404 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 },
405 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 },
406 /* line 7 */
407 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) },
408 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) },
409 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) },
410 };
411
412 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
413
414 struct ixl_work {
415 struct work ixw_cookie;
416 void (*ixw_func)(void *);
417 void *ixw_arg;
418 unsigned int ixw_added;
419 };
420 #define IXL_WORKQUEUE_PRI PRI_SOFTNET
421
422 struct ixl_tx_map {
423 struct mbuf *txm_m;
424 bus_dmamap_t txm_map;
425 unsigned int txm_eop;
426 };
427
428 struct ixl_tx_ring {
429 kmutex_t txr_lock;
430 struct ixl_softc *txr_sc;
431
432 unsigned int txr_prod;
433 unsigned int txr_cons;
434
435 struct ixl_tx_map *txr_maps;
436 struct ixl_dmamem txr_mem;
437
438 bus_size_t txr_tail;
439 unsigned int txr_qid;
440 pcq_t *txr_intrq;
441 void *txr_si;
442
443 struct evcnt txr_defragged;
444 struct evcnt txr_defrag_failed;
445 struct evcnt txr_pcqdrop;
446 struct evcnt txr_transmitdef;
447 struct evcnt txr_intr;
448 struct evcnt txr_defer;
449 };
450
451 struct ixl_rx_map {
452 struct mbuf *rxm_m;
453 bus_dmamap_t rxm_map;
454 };
455
456 struct ixl_rx_ring {
457 kmutex_t rxr_lock;
458
459 unsigned int rxr_prod;
460 unsigned int rxr_cons;
461
462 struct ixl_rx_map *rxr_maps;
463 struct ixl_dmamem rxr_mem;
464
465 struct mbuf *rxr_m_head;
466 struct mbuf **rxr_m_tail;
467
468 bus_size_t rxr_tail;
469 unsigned int rxr_qid;
470
471 struct evcnt rxr_mgethdr_failed;
472 struct evcnt rxr_mgetcl_failed;
473 struct evcnt rxr_mbuf_load_failed;
474 struct evcnt rxr_intr;
475 struct evcnt rxr_defer;
476 };
477
478 struct ixl_queue_pair {
479 struct ixl_softc *qp_sc;
480 struct ixl_tx_ring *qp_txr;
481 struct ixl_rx_ring *qp_rxr;
482
483 char qp_name[16];
484
485 void *qp_si;
486 struct work qp_work;
487 bool qp_workqueue;
488 };
489
490 struct ixl_atq {
491 struct ixl_aq_desc iatq_desc;
492 void (*iatq_fn)(struct ixl_softc *,
493 const struct ixl_aq_desc *);
494 };
495 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
496
497 struct ixl_product {
498 unsigned int vendor_id;
499 unsigned int product_id;
500 };
501
502 struct ixl_stats_counters {
503 bool isc_has_offset;
504 struct evcnt isc_crc_errors;
505 uint64_t isc_crc_errors_offset;
506 struct evcnt isc_illegal_bytes;
507 uint64_t isc_illegal_bytes_offset;
508 struct evcnt isc_rx_bytes;
509 uint64_t isc_rx_bytes_offset;
510 struct evcnt isc_rx_discards;
511 uint64_t isc_rx_discards_offset;
512 struct evcnt isc_rx_unicast;
513 uint64_t isc_rx_unicast_offset;
514 struct evcnt isc_rx_multicast;
515 uint64_t isc_rx_multicast_offset;
516 struct evcnt isc_rx_broadcast;
517 uint64_t isc_rx_broadcast_offset;
518 struct evcnt isc_rx_size_64;
519 uint64_t isc_rx_size_64_offset;
520 struct evcnt isc_rx_size_127;
521 uint64_t isc_rx_size_127_offset;
522 struct evcnt isc_rx_size_255;
523 uint64_t isc_rx_size_255_offset;
524 struct evcnt isc_rx_size_511;
525 uint64_t isc_rx_size_511_offset;
526 struct evcnt isc_rx_size_1023;
527 uint64_t isc_rx_size_1023_offset;
528 struct evcnt isc_rx_size_1522;
529 uint64_t isc_rx_size_1522_offset;
530 struct evcnt isc_rx_size_big;
531 uint64_t isc_rx_size_big_offset;
532 struct evcnt isc_rx_undersize;
533 uint64_t isc_rx_undersize_offset;
534 struct evcnt isc_rx_oversize;
535 uint64_t isc_rx_oversize_offset;
536 struct evcnt isc_rx_fragments;
537 uint64_t isc_rx_fragments_offset;
538 struct evcnt isc_rx_jabber;
539 uint64_t isc_rx_jabber_offset;
540 struct evcnt isc_tx_bytes;
541 uint64_t isc_tx_bytes_offset;
542 struct evcnt isc_tx_dropped_link_down;
543 uint64_t isc_tx_dropped_link_down_offset;
544 struct evcnt isc_tx_unicast;
545 uint64_t isc_tx_unicast_offset;
546 struct evcnt isc_tx_multicast;
547 uint64_t isc_tx_multicast_offset;
548 struct evcnt isc_tx_broadcast;
549 uint64_t isc_tx_broadcast_offset;
550 struct evcnt isc_tx_size_64;
551 uint64_t isc_tx_size_64_offset;
552 struct evcnt isc_tx_size_127;
553 uint64_t isc_tx_size_127_offset;
554 struct evcnt isc_tx_size_255;
555 uint64_t isc_tx_size_255_offset;
556 struct evcnt isc_tx_size_511;
557 uint64_t isc_tx_size_511_offset;
558 struct evcnt isc_tx_size_1023;
559 uint64_t isc_tx_size_1023_offset;
560 struct evcnt isc_tx_size_1522;
561 uint64_t isc_tx_size_1522_offset;
562 struct evcnt isc_tx_size_big;
563 uint64_t isc_tx_size_big_offset;
564 struct evcnt isc_mac_local_faults;
565 uint64_t isc_mac_local_faults_offset;
566 struct evcnt isc_mac_remote_faults;
567 uint64_t isc_mac_remote_faults_offset;
568 struct evcnt isc_link_xon_rx;
569 uint64_t isc_link_xon_rx_offset;
570 struct evcnt isc_link_xon_tx;
571 uint64_t isc_link_xon_tx_offset;
572 struct evcnt isc_link_xoff_rx;
573 uint64_t isc_link_xoff_rx_offset;
574 struct evcnt isc_link_xoff_tx;
575 uint64_t isc_link_xoff_tx_offset;
576 struct evcnt isc_vsi_rx_discards;
577 uint64_t isc_vsi_rx_discards_offset;
578 struct evcnt isc_vsi_rx_bytes;
579 uint64_t isc_vsi_rx_bytes_offset;
580 struct evcnt isc_vsi_rx_unicast;
581 uint64_t isc_vsi_rx_unicast_offset;
582 struct evcnt isc_vsi_rx_multicast;
583 uint64_t isc_vsi_rx_multicast_offset;
584 struct evcnt isc_vsi_rx_broadcast;
585 uint64_t isc_vsi_rx_broadcast_offset;
586 struct evcnt isc_vsi_tx_errors;
587 uint64_t isc_vsi_tx_errors_offset;
588 struct evcnt isc_vsi_tx_bytes;
589 uint64_t isc_vsi_tx_bytes_offset;
590 struct evcnt isc_vsi_tx_unicast;
591 uint64_t isc_vsi_tx_unicast_offset;
592 struct evcnt isc_vsi_tx_multicast;
593 uint64_t isc_vsi_tx_multicast_offset;
594 struct evcnt isc_vsi_tx_broadcast;
595 uint64_t isc_vsi_tx_broadcast_offset;
596 };
597
598 /*
599 * Locking notes:
600 * + a field in ixl_tx_ring is protected by txr_lock (a spin mutex), and
601 * a field in ixl_rx_ring is protected by rxr_lock (a spin mutex).
602 * - more than one lock of them cannot be held at once.
603 * + a field named sc_atq_* in ixl_softc is protected by sc_atq_lock
604 * (a spin mutex).
605 * - the lock cannot held with txr_lock or rxr_lock.
606 * + a field named sc_arq_* is not protected by any lock.
607 * - operations for sc_arq_* is done in one context related to
608 * sc_arq_task.
609 * + other fields in ixl_softc is protected by sc_cfg_lock
610 * (an adaptive mutex)
611 * - It must be held before another lock is held, and It can be
612 * released after the other lock is released.
613 * */
614
615 struct ixl_softc {
616 device_t sc_dev;
617 struct ethercom sc_ec;
618 bool sc_attached;
619 bool sc_dead;
620 uint32_t sc_port;
621 struct sysctllog *sc_sysctllog;
622 struct workqueue *sc_workq;
623 struct workqueue *sc_workq_txrx;
624 int sc_stats_intval;
625 callout_t sc_stats_callout;
626 struct ixl_work sc_stats_task;
627 struct ixl_stats_counters
628 sc_stats_counters;
629 uint8_t sc_enaddr[ETHER_ADDR_LEN];
630 struct ifmedia sc_media;
631 uint64_t sc_media_status;
632 uint64_t sc_media_active;
633 uint64_t sc_phy_types;
634 uint8_t sc_phy_abilities;
635 uint8_t sc_phy_linkspeed;
636 uint8_t sc_phy_fec_cfg;
637 uint16_t sc_eee_cap;
638 uint32_t sc_eeer_val;
639 uint8_t sc_d3_lpan;
640 kmutex_t sc_cfg_lock;
641 enum i40e_mac_type sc_mac_type;
642 uint32_t sc_rss_table_size;
643 uint32_t sc_rss_table_entry_width;
644 bool sc_txrx_workqueue;
645 u_int sc_tx_process_limit;
646 u_int sc_rx_process_limit;
647 u_int sc_tx_intr_process_limit;
648 u_int sc_rx_intr_process_limit;
649
650 int sc_cur_ec_capenable;
651
652 struct pci_attach_args sc_pa;
653 pci_intr_handle_t *sc_ihp;
654 void **sc_ihs;
655 unsigned int sc_nintrs;
656
657 bus_dma_tag_t sc_dmat;
658 bus_space_tag_t sc_memt;
659 bus_space_handle_t sc_memh;
660 bus_size_t sc_mems;
661
662 uint8_t sc_pf_id;
663 uint16_t sc_uplink_seid; /* le */
664 uint16_t sc_downlink_seid; /* le */
665 uint16_t sc_vsi_number;
666 uint16_t sc_vsi_stat_counter_idx;
667 uint16_t sc_seid;
668 unsigned int sc_base_queue;
669
670 pci_intr_type_t sc_intrtype;
671 unsigned int sc_msix_vector_queue;
672
673 struct ixl_dmamem sc_scratch;
674 struct ixl_dmamem sc_aqbuf;
675
676 const struct ixl_aq_regs *
677 sc_aq_regs;
678 uint32_t sc_aq_flags;
679 #define IXL_SC_AQ_FLAG_RXCTL __BIT(0)
680 #define IXL_SC_AQ_FLAG_NVMLOCK __BIT(1)
681 #define IXL_SC_AQ_FLAG_NVMREAD __BIT(2)
682 #define IXL_SC_AQ_FLAG_RSS __BIT(3)
683
684 kmutex_t sc_atq_lock;
685 kcondvar_t sc_atq_cv;
686 struct ixl_dmamem sc_atq;
687 unsigned int sc_atq_prod;
688 unsigned int sc_atq_cons;
689
690 struct ixl_dmamem sc_arq;
691 struct ixl_work sc_arq_task;
692 struct ixl_aq_bufs sc_arq_idle;
693 struct ixl_aq_buf *sc_arq_live[IXL_AQ_NUM];
694 unsigned int sc_arq_prod;
695 unsigned int sc_arq_cons;
696
697 struct ixl_work sc_link_state_task;
698 struct ixl_atq sc_link_state_atq;
699
700 struct ixl_dmamem sc_hmc_sd;
701 struct ixl_dmamem sc_hmc_pd;
702 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT];
703
704 unsigned int sc_tx_ring_ndescs;
705 unsigned int sc_rx_ring_ndescs;
706 unsigned int sc_nqueue_pairs;
707 unsigned int sc_nqueue_pairs_max;
708 unsigned int sc_nqueue_pairs_device;
709 struct ixl_queue_pair *sc_qps;
710 uint32_t sc_itr_rx;
711 uint32_t sc_itr_tx;
712
713 struct evcnt sc_event_atq;
714 struct evcnt sc_event_link;
715 struct evcnt sc_event_ecc_err;
716 struct evcnt sc_event_pci_exception;
717 struct evcnt sc_event_crit_err;
718 };
719
720 #define IXL_TXRX_PROCESS_UNLIMIT UINT_MAX
721 #define IXL_TX_PROCESS_LIMIT 256
722 #define IXL_RX_PROCESS_LIMIT 256
723 #define IXL_TX_INTR_PROCESS_LIMIT 256
724 #define IXL_RX_INTR_PROCESS_LIMIT 0U
725
726 #define IXL_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx | \
727 IFCAP_CSUM_TCPv4_Rx | \
728 IFCAP_CSUM_UDPv4_Rx | \
729 IFCAP_CSUM_TCPv6_Rx | \
730 IFCAP_CSUM_UDPv6_Rx)
731 #define IXL_IFCAP_TXCSUM (IFCAP_CSUM_IPv4_Tx | \
732 IFCAP_CSUM_TCPv4_Tx | \
733 IFCAP_CSUM_UDPv4_Tx | \
734 IFCAP_CSUM_TCPv6_Tx | \
735 IFCAP_CSUM_UDPv6_Tx)
736 #define IXL_CSUM_ALL_OFFLOAD (M_CSUM_IPv4 | \
737 M_CSUM_TCPv4 | M_CSUM_TCPv6 | \
738 M_CSUM_UDPv4 | M_CSUM_UDPv6)
739
740 #define delaymsec(_x) DELAY(1000 * (_x))
741 #ifdef IXL_DEBUG
742 #define DDPRINTF(sc, fmt, args...) \
743 do { \
744 if ((sc) != NULL) { \
745 device_printf( \
746 ((struct ixl_softc *)(sc))->sc_dev, \
747 ""); \
748 } \
749 printf("%s:\t" fmt, __func__, ##args); \
750 } while (0)
751 #else
752 #define DDPRINTF(sc, fmt, args...) __nothing
753 #endif
754 #ifndef IXL_STATS_INTERVAL_MSEC
755 #define IXL_STATS_INTERVAL_MSEC 10000
756 #endif
757 #ifndef IXL_QUEUE_NUM
758 #define IXL_QUEUE_NUM 0
759 #endif
760
761 static bool ixl_param_nomsix = false;
762 static int ixl_param_stats_interval = IXL_STATS_INTERVAL_MSEC;
763 static int ixl_param_nqps_limit = IXL_QUEUE_NUM;
764 static unsigned int ixl_param_tx_ndescs = 1024;
765 static unsigned int ixl_param_rx_ndescs = 1024;
766
767 static enum i40e_mac_type
768 ixl_mactype(pci_product_id_t);
769 static void ixl_pci_csr_setup(pci_chipset_tag_t, pcitag_t);
770 static void ixl_clear_hw(struct ixl_softc *);
771 static int ixl_pf_reset(struct ixl_softc *);
772
773 static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
774 bus_size_t, bus_size_t);
775 static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
776
777 static int ixl_arq_fill(struct ixl_softc *);
778 static void ixl_arq_unfill(struct ixl_softc *);
779
780 static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
781 unsigned int);
782 static void ixl_atq_set(struct ixl_atq *,
783 void (*)(struct ixl_softc *, const struct ixl_aq_desc *));
784 static int ixl_atq_post_locked(struct ixl_softc *, struct ixl_atq *);
785 static void ixl_atq_done(struct ixl_softc *);
786 static int ixl_atq_exec(struct ixl_softc *, struct ixl_atq *);
787 static int ixl_atq_exec_locked(struct ixl_softc *, struct ixl_atq *);
788 static int ixl_get_version(struct ixl_softc *);
789 static int ixl_get_nvm_version(struct ixl_softc *);
790 static int ixl_get_hw_capabilities(struct ixl_softc *);
791 static int ixl_pxe_clear(struct ixl_softc *);
792 static int ixl_lldp_shut(struct ixl_softc *);
793 static int ixl_get_mac(struct ixl_softc *);
794 static int ixl_get_switch_config(struct ixl_softc *);
795 static int ixl_phy_mask_ints(struct ixl_softc *);
796 static int ixl_get_phy_info(struct ixl_softc *);
797 static int ixl_set_phy_config(struct ixl_softc *, uint8_t, uint8_t, bool);
798 static int ixl_set_phy_autoselect(struct ixl_softc *);
799 static int ixl_restart_an(struct ixl_softc *);
800 static int ixl_hmc(struct ixl_softc *);
801 static void ixl_hmc_free(struct ixl_softc *);
802 static int ixl_get_vsi(struct ixl_softc *);
803 static int ixl_set_vsi(struct ixl_softc *);
804 static void ixl_set_filter_control(struct ixl_softc *);
805 static void ixl_get_link_status(void *);
806 static int ixl_get_link_status_poll(struct ixl_softc *, int *);
807 static void ixl_get_link_status_done(struct ixl_softc *,
808 const struct ixl_aq_desc *);
809 static int ixl_set_link_status_locked(struct ixl_softc *,
810 const struct ixl_aq_desc *);
811 static uint64_t ixl_search_link_speed(uint8_t);
812 static uint8_t ixl_search_baudrate(uint64_t);
813 static void ixl_config_rss(struct ixl_softc *);
814 static int ixl_add_macvlan(struct ixl_softc *, const uint8_t *,
815 uint16_t, uint16_t);
816 static int ixl_remove_macvlan(struct ixl_softc *, const uint8_t *,
817 uint16_t, uint16_t);
818 static void ixl_arq(void *);
819 static void ixl_hmc_pack(void *, const void *,
820 const struct ixl_hmc_pack *, unsigned int);
821 static uint32_t ixl_rd_rx_csr(struct ixl_softc *, uint32_t);
822 static void ixl_wr_rx_csr(struct ixl_softc *, uint32_t, uint32_t);
823 static int ixl_rd16_nvm(struct ixl_softc *, uint16_t, uint16_t *);
824
825 static int ixl_match(device_t, cfdata_t, void *);
826 static void ixl_attach(device_t, device_t, void *);
827 static int ixl_detach(device_t, int);
828
829 static void ixl_media_add(struct ixl_softc *);
830 static int ixl_media_change(struct ifnet *);
831 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
832 static void ixl_watchdog(struct ifnet *);
833 static int ixl_ioctl(struct ifnet *, u_long, void *);
834 static void ixl_start(struct ifnet *);
835 static int ixl_transmit(struct ifnet *, struct mbuf *);
836 static void ixl_deferred_transmit(void *);
837 static int ixl_intr(void *);
838 static int ixl_queue_intr(void *);
839 static int ixl_other_intr(void *);
840 static void ixl_handle_queue(void *);
841 static void ixl_handle_queue_wk(struct work *, void *);
842 static void ixl_sched_handle_queue(struct ixl_softc *,
843 struct ixl_queue_pair *);
844 static int ixl_init(struct ifnet *);
845 static int ixl_init_locked(struct ixl_softc *);
846 static void ixl_stop(struct ifnet *, int);
847 static void ixl_stop_locked(struct ixl_softc *);
848 static int ixl_iff(struct ixl_softc *);
849 static int ixl_ifflags_cb(struct ethercom *);
850 static int ixl_setup_interrupts(struct ixl_softc *);
851 static int ixl_establish_intx(struct ixl_softc *);
852 static int ixl_establish_msix(struct ixl_softc *);
853 static void ixl_enable_queue_intr(struct ixl_softc *,
854 struct ixl_queue_pair *);
855 static void ixl_disable_queue_intr(struct ixl_softc *,
856 struct ixl_queue_pair *);
857 static void ixl_enable_other_intr(struct ixl_softc *);
858 static void ixl_disable_other_intr(struct ixl_softc *);
859 static void ixl_config_queue_intr(struct ixl_softc *);
860 static void ixl_config_other_intr(struct ixl_softc *);
861
862 static struct ixl_tx_ring *
863 ixl_txr_alloc(struct ixl_softc *, unsigned int);
864 static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
865 static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
866 static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
867 static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
868 static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
869 static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
870 static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
871 static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *, u_int);
872
873 static struct ixl_rx_ring *
874 ixl_rxr_alloc(struct ixl_softc *, unsigned int);
875 static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
876 static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
877 static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
878 static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
879 static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
880 static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
881 static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *, u_int);
882 static int ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
883
884 static struct workqueue *
885 ixl_workq_create(const char *, pri_t, int, int);
886 static void ixl_workq_destroy(struct workqueue *);
887 static int ixl_workqs_teardown(device_t);
888 static void ixl_work_set(struct ixl_work *, void (*)(void *), void *);
889 static void ixl_work_add(struct workqueue *, struct ixl_work *);
890 static void ixl_work_wait(struct workqueue *, struct ixl_work *);
891 static void ixl_workq_work(struct work *, void *);
892 static const struct ixl_product *
893 ixl_lookup(const struct pci_attach_args *pa);
894 static void ixl_link_state_update(struct ixl_softc *,
895 const struct ixl_aq_desc *);
896 static int ixl_vlan_cb(struct ethercom *, uint16_t, bool);
897 static int ixl_setup_vlan_hwfilter(struct ixl_softc *);
898 static void ixl_teardown_vlan_hwfilter(struct ixl_softc *);
899 static int ixl_update_macvlan(struct ixl_softc *);
900 static int ixl_setup_interrupts(struct ixl_softc *);
901 static void ixl_teardown_interrupts(struct ixl_softc *);
902 static int ixl_setup_stats(struct ixl_softc *);
903 static void ixl_teardown_stats(struct ixl_softc *);
904 static void ixl_stats_callout(void *);
905 static void ixl_stats_update(void *);
906 static int ixl_setup_sysctls(struct ixl_softc *);
907 static void ixl_teardown_sysctls(struct ixl_softc *);
908 static int ixl_sysctl_itr_handler(SYSCTLFN_PROTO);
909 static int ixl_sysctl_ndescs_handler(SYSCTLFN_PROTO);
910 static int ixl_queue_pairs_alloc(struct ixl_softc *);
911 static void ixl_queue_pairs_free(struct ixl_softc *);
912
913 static const struct ixl_phy_type ixl_phy_type_map[] = {
914 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII },
915 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX },
916 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 },
917 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR },
918 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 },
919 { 1ULL << IXL_PHY_TYPE_XAUI |
920 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 },
921 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI },
922 { 1ULL << IXL_PHY_TYPE_XLAUI |
923 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI },
924 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
925 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 },
926 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
927 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 },
928 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC },
929 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC },
930 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX },
931 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
932 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T },
933 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T },
934 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR },
935 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR },
936 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_TWINAX },
937 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 },
938 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 },
939 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX },
940 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX },
941 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 },
942 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR },
943 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR },
944 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR },
945 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR },
946 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC },
947 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_ACC },
948 };
949
950 static const struct ixl_speed_type ixl_speed_type_map[] = {
951 { IXL_AQ_LINK_SPEED_40GB, IF_Gbps(40) },
952 { IXL_AQ_LINK_SPEED_25GB, IF_Gbps(25) },
953 { IXL_AQ_LINK_SPEED_10GB, IF_Gbps(10) },
954 { IXL_AQ_LINK_SPEED_1000MB, IF_Mbps(1000) },
955 { IXL_AQ_LINK_SPEED_100MB, IF_Mbps(100)},
956 };
957
958 static const struct ixl_aq_regs ixl_pf_aq_regs = {
959 .atq_tail = I40E_PF_ATQT,
960 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK,
961 .atq_head = I40E_PF_ATQH,
962 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK,
963 .atq_len = I40E_PF_ATQLEN,
964 .atq_bal = I40E_PF_ATQBAL,
965 .atq_bah = I40E_PF_ATQBAH,
966 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK,
967
968 .arq_tail = I40E_PF_ARQT,
969 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK,
970 .arq_head = I40E_PF_ARQH,
971 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK,
972 .arq_len = I40E_PF_ARQLEN,
973 .arq_bal = I40E_PF_ARQBAL,
974 .arq_bah = I40E_PF_ARQBAH,
975 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK,
976 };
977
978 #define ixl_rd(_s, _r) \
979 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
980 #define ixl_wr(_s, _r, _v) \
981 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
982 #define ixl_barrier(_s, _r, _l, _o) \
983 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
984 #define ixl_flush(_s) (void)ixl_rd((_s), I40E_GLGEN_STAT)
985 #define ixl_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1))
986
987 static inline uint32_t
988 ixl_dmamem_hi(struct ixl_dmamem *ixm)
989 {
990 uint32_t retval;
991 uint64_t val;
992
993 if (sizeof(IXL_DMA_DVA(ixm)) > 4) {
994 val = (intptr_t)IXL_DMA_DVA(ixm);
995 retval = (uint32_t)(val >> 32);
996 } else {
997 retval = 0;
998 }
999
1000 return retval;
1001 }
1002
1003 static inline uint32_t
1004 ixl_dmamem_lo(struct ixl_dmamem *ixm)
1005 {
1006
1007 return (uint32_t)IXL_DMA_DVA(ixm);
1008 }
1009
1010 static inline void
1011 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr)
1012 {
1013 uint64_t val;
1014
1015 if (sizeof(addr) > 4) {
1016 val = (intptr_t)addr;
1017 iaq->iaq_param[2] = htole32(val >> 32);
1018 } else {
1019 iaq->iaq_param[2] = htole32(0);
1020 }
1021
1022 iaq->iaq_param[3] = htole32(addr);
1023 }
1024
1025 static inline unsigned int
1026 ixl_rxr_unrefreshed(unsigned int prod, unsigned int cons, unsigned int ndescs)
1027 {
1028 unsigned int num;
1029
1030 if (prod < cons)
1031 num = cons - prod;
1032 else
1033 num = (ndescs - prod) + cons;
1034
1035 if (__predict_true(num > 0)) {
1036 /* device cannot receive packets if all descripter is filled */
1037 num -= 1;
1038 }
1039
1040 return num;
1041 }
1042
1043 CFATTACH_DECL3_NEW(ixl, sizeof(struct ixl_softc),
1044 ixl_match, ixl_attach, ixl_detach, NULL, NULL, NULL,
1045 DVF_DETACH_SHUTDOWN);
1046
1047 static const struct ixl_product ixl_products[] = {
1048 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_SFP },
1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_B },
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_C },
1051 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_A },
1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_B },
1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_C },
1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T },
1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
1057 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G },
1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP },
1059 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28 },
1060 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_KX },
1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_QSFP },
1062 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_SFP },
1063 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G_BASET },
1064 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_BASET },
1065 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_I_SFP },
1066 /* required last entry */
1067 {0, 0}
1068 };
1069
1070 static const struct ixl_product *
1071 ixl_lookup(const struct pci_attach_args *pa)
1072 {
1073 const struct ixl_product *ixlp;
1074
1075 for (ixlp = ixl_products; ixlp->vendor_id != 0; ixlp++) {
1076 if (PCI_VENDOR(pa->pa_id) == ixlp->vendor_id &&
1077 PCI_PRODUCT(pa->pa_id) == ixlp->product_id)
1078 return ixlp;
1079 }
1080
1081 return NULL;
1082 }
1083
1084 static int
1085 ixl_match(device_t parent, cfdata_t match, void *aux)
1086 {
1087 const struct pci_attach_args *pa = aux;
1088
1089 return (ixl_lookup(pa) != NULL) ? 1 : 0;
1090 }
1091
1092 static void
1093 ixl_attach(device_t parent, device_t self, void *aux)
1094 {
1095 struct ixl_softc *sc;
1096 struct pci_attach_args *pa = aux;
1097 struct ifnet *ifp;
1098 pcireg_t memtype;
1099 uint32_t firstq, port, ari, func;
1100 char xnamebuf[32];
1101 int tries, rv, link;
1102
1103 sc = device_private(self);
1104 sc->sc_dev = self;
1105 ifp = &sc->sc_ec.ec_if;
1106
1107 sc->sc_pa = *pa;
1108 sc->sc_dmat = (pci_dma64_available(pa)) ?
1109 pa->pa_dmat64 : pa->pa_dmat;
1110 sc->sc_aq_regs = &ixl_pf_aq_regs;
1111
1112 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(pa->pa_id));
1113
1114 ixl_pci_csr_setup(pa->pa_pc, pa->pa_tag);
1115
1116 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG);
1117 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
1118 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) {
1119 aprint_error(": unable to map registers\n");
1120 return;
1121 }
1122
1123 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET);
1124
1125 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC);
1126 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK;
1127 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1128 sc->sc_base_queue = firstq;
1129
1130 ixl_clear_hw(sc);
1131 if (ixl_pf_reset(sc) == -1) {
1132 /* error printed by ixl pf_reset */
1133 goto unmap;
1134 }
1135
1136 port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
1137 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
1138 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
1139 sc->sc_port = port;
1140 aprint_normal(": port %u", sc->sc_port);
1141
1142 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
1143 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1144 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1145
1146 func = ixl_rd(sc, I40E_PF_FUNC_RID);
1147 sc->sc_pf_id = func & (ari ? 0xff : 0x7);
1148
1149 /* initialise the adminq */
1150
1151 mutex_init(&sc->sc_atq_lock, MUTEX_DEFAULT, IPL_NET);
1152
1153 if (ixl_dmamem_alloc(sc, &sc->sc_atq,
1154 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1155 aprint_error("\n" "%s: unable to allocate atq\n",
1156 device_xname(self));
1157 goto unmap;
1158 }
1159
1160 SIMPLEQ_INIT(&sc->sc_arq_idle);
1161 ixl_work_set(&sc->sc_arq_task, ixl_arq, sc);
1162 sc->sc_arq_cons = 0;
1163 sc->sc_arq_prod = 0;
1164
1165 if (ixl_dmamem_alloc(sc, &sc->sc_arq,
1166 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1167 aprint_error("\n" "%s: unable to allocate arq\n",
1168 device_xname(self));
1169 goto free_atq;
1170 }
1171
1172 if (!ixl_arq_fill(sc)) {
1173 aprint_error("\n" "%s: unable to fill arq descriptors\n",
1174 device_xname(self));
1175 goto free_arq;
1176 }
1177
1178 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1179 0, IXL_DMA_LEN(&sc->sc_atq),
1180 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1181
1182 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1183 0, IXL_DMA_LEN(&sc->sc_arq),
1184 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1185
1186 for (tries = 0; tries < 10; tries++) {
1187 sc->sc_atq_cons = 0;
1188 sc->sc_atq_prod = 0;
1189
1190 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1191 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1192 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1193 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1194
1195 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1196
1197 ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1198 ixl_dmamem_lo(&sc->sc_atq));
1199 ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1200 ixl_dmamem_hi(&sc->sc_atq));
1201 ixl_wr(sc, sc->sc_aq_regs->atq_len,
1202 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1203
1204 ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1205 ixl_dmamem_lo(&sc->sc_arq));
1206 ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1207 ixl_dmamem_hi(&sc->sc_arq));
1208 ixl_wr(sc, sc->sc_aq_regs->arq_len,
1209 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1210
1211 rv = ixl_get_version(sc);
1212 if (rv == 0)
1213 break;
1214 if (rv != ETIMEDOUT) {
1215 aprint_error(", unable to get firmware version\n");
1216 goto shutdown;
1217 }
1218
1219 delaymsec(100);
1220 }
1221
1222 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1223
1224 if (ixl_dmamem_alloc(sc, &sc->sc_aqbuf, IXL_AQ_BUFLEN, 0) != 0) {
1225 aprint_error_dev(self, ", unable to allocate nvm buffer\n");
1226 goto shutdown;
1227 }
1228
1229 ixl_get_nvm_version(sc);
1230
1231 if (sc->sc_mac_type == I40E_MAC_X722)
1232 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_X722;
1233 else
1234 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_XL710;
1235
1236 rv = ixl_get_hw_capabilities(sc);
1237 if (rv != 0) {
1238 aprint_error(", GET HW CAPABILITIES %s\n",
1239 rv == ETIMEDOUT ? "timeout" : "error");
1240 goto free_aqbuf;
1241 }
1242
1243 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_device, ncpu);
1244 if (ixl_param_nqps_limit > 0) {
1245 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_max,
1246 ixl_param_nqps_limit);
1247 }
1248
1249 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
1250 sc->sc_tx_ring_ndescs = ixl_param_tx_ndescs;
1251 sc->sc_rx_ring_ndescs = ixl_param_rx_ndescs;
1252
1253 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_rx_ring_ndescs);
1254 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_tx_ring_ndescs);
1255 KASSERT(sc->sc_rx_ring_ndescs ==
1256 (1U << (fls32(sc->sc_rx_ring_ndescs) - 1)));
1257 KASSERT(sc->sc_tx_ring_ndescs ==
1258 (1U << (fls32(sc->sc_tx_ring_ndescs) - 1)));
1259
1260 if (ixl_get_mac(sc) != 0) {
1261 /* error printed by ixl_get_mac */
1262 goto free_aqbuf;
1263 }
1264
1265 aprint_normal("\n");
1266 aprint_naive("\n");
1267
1268 aprint_normal_dev(self, "Ethernet address %s\n",
1269 ether_sprintf(sc->sc_enaddr));
1270
1271 rv = ixl_pxe_clear(sc);
1272 if (rv != 0) {
1273 aprint_debug_dev(self, "CLEAR PXE MODE %s\n",
1274 rv == ETIMEDOUT ? "timeout" : "error");
1275 }
1276
1277 ixl_set_filter_control(sc);
1278
1279 if (ixl_hmc(sc) != 0) {
1280 /* error printed by ixl_hmc */
1281 goto free_aqbuf;
1282 }
1283
1284 if (ixl_lldp_shut(sc) != 0) {
1285 /* error printed by ixl_lldp_shut */
1286 goto free_hmc;
1287 }
1288
1289 if (ixl_phy_mask_ints(sc) != 0) {
1290 /* error printed by ixl_phy_mask_ints */
1291 goto free_hmc;
1292 }
1293
1294 if (ixl_restart_an(sc) != 0) {
1295 /* error printed by ixl_restart_an */
1296 goto free_hmc;
1297 }
1298
1299 if (ixl_get_switch_config(sc) != 0) {
1300 /* error printed by ixl_get_switch_config */
1301 goto free_hmc;
1302 }
1303
1304 rv = ixl_get_link_status_poll(sc, NULL);
1305 if (rv != 0) {
1306 aprint_error_dev(self, "GET LINK STATUS %s\n",
1307 rv == ETIMEDOUT ? "timeout" : "error");
1308 goto free_hmc;
1309 }
1310
1311 /*
1312 * The FW often returns EIO in "Get PHY Abilities" command
1313 * if there is no delay
1314 */
1315 DELAY(500);
1316 if (ixl_get_phy_info(sc) != 0) {
1317 /* error printed by ixl_get_phy_info */
1318 goto free_hmc;
1319 }
1320
1321 if (ixl_dmamem_alloc(sc, &sc->sc_scratch,
1322 sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1323 aprint_error_dev(self, "unable to allocate scratch buffer\n");
1324 goto free_hmc;
1325 }
1326
1327 rv = ixl_get_vsi(sc);
1328 if (rv != 0) {
1329 aprint_error_dev(self, "GET VSI %s %d\n",
1330 rv == ETIMEDOUT ? "timeout" : "error", rv);
1331 goto free_scratch;
1332 }
1333
1334 rv = ixl_set_vsi(sc);
1335 if (rv != 0) {
1336 aprint_error_dev(self, "UPDATE VSI error %s %d\n",
1337 rv == ETIMEDOUT ? "timeout" : "error", rv);
1338 goto free_scratch;
1339 }
1340
1341 if (ixl_queue_pairs_alloc(sc) != 0) {
1342 /* error printed by ixl_queue_pairs_alloc */
1343 goto free_scratch;
1344 }
1345
1346 if (ixl_setup_interrupts(sc) != 0) {
1347 /* error printed by ixl_setup_interrupts */
1348 goto free_queue_pairs;
1349 }
1350
1351 if (ixl_setup_stats(sc) != 0) {
1352 aprint_error_dev(self, "failed to setup event counters\n");
1353 goto teardown_intrs;
1354 }
1355
1356 if (ixl_setup_sysctls(sc) != 0) {
1357 /* error printed by ixl_setup_sysctls */
1358 goto teardown_stats;
1359 }
1360
1361 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self));
1362 sc->sc_workq = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1363 IPL_NET, WQ_MPSAFE);
1364 if (sc->sc_workq == NULL)
1365 goto teardown_sysctls;
1366
1367 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self));
1368 rv = workqueue_create(&sc->sc_workq_txrx, xnamebuf, ixl_handle_queue_wk,
1369 sc, IXL_WORKQUEUE_PRI, IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1370 if (rv != 0) {
1371 sc->sc_workq_txrx = NULL;
1372 goto teardown_wqs;
1373 }
1374
1375 snprintf(xnamebuf, sizeof(xnamebuf), "%s_atq_cv", device_xname(self));
1376 cv_init(&sc->sc_atq_cv, xnamebuf);
1377
1378 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
1379
1380 ifp->if_softc = sc;
1381 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1382 ifp->if_extflags = IFEF_MPSAFE;
1383 ifp->if_ioctl = ixl_ioctl;
1384 ifp->if_start = ixl_start;
1385 ifp->if_transmit = ixl_transmit;
1386 ifp->if_watchdog = ixl_watchdog;
1387 ifp->if_init = ixl_init;
1388 ifp->if_stop = ixl_stop;
1389 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs);
1390 IFQ_SET_READY(&ifp->if_snd);
1391 ifp->if_capabilities |= IXL_IFCAP_RXCSUM;
1392 ifp->if_capabilities |= IXL_IFCAP_TXCSUM;
1393 #if 0
1394 ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
1395 #endif
1396 ether_set_vlan_cb(&sc->sc_ec, ixl_vlan_cb);
1397 sc->sc_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1398 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
1399 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1400
1401 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities;
1402 /* Disable VLAN_HWFILTER by default */
1403 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1404
1405 sc->sc_cur_ec_capenable = sc->sc_ec.ec_capenable;
1406
1407 sc->sc_ec.ec_ifmedia = &sc->sc_media;
1408 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, ixl_media_change,
1409 ixl_media_status, &sc->sc_cfg_lock);
1410
1411 ixl_media_add(sc);
1412 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1413 if (ISSET(sc->sc_phy_abilities,
1414 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) {
1415 ifmedia_add(&sc->sc_media,
1416 IFM_ETHER | IFM_AUTO | IFM_FLOW, 0, NULL);
1417 }
1418 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_NONE, 0, NULL);
1419 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1420
1421 if_attach(ifp);
1422 if_deferred_start_init(ifp, NULL);
1423 ether_ifattach(ifp, sc->sc_enaddr);
1424 ether_set_ifflags_cb(&sc->sc_ec, ixl_ifflags_cb);
1425
1426 rv = ixl_get_link_status_poll(sc, &link);
1427 if (rv != 0)
1428 link = LINK_STATE_UNKNOWN;
1429 if_link_state_change(ifp, link);
1430
1431 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done);
1432 ixl_work_set(&sc->sc_link_state_task, ixl_get_link_status, sc);
1433
1434 ixl_config_other_intr(sc);
1435 ixl_enable_other_intr(sc);
1436
1437 ixl_set_phy_autoselect(sc);
1438
1439 /* remove default mac filter and replace it so we can see vlans */
1440 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 0);
1441 if (rv != ENOENT) {
1442 aprint_debug_dev(self,
1443 "unable to remove macvlan %u\n", rv);
1444 }
1445 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
1446 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1447 if (rv != ENOENT) {
1448 aprint_debug_dev(self,
1449 "unable to remove macvlan, ignore vlan %u\n", rv);
1450 }
1451
1452 if (ixl_update_macvlan(sc) != 0) {
1453 aprint_debug_dev(self,
1454 "couldn't enable vlan hardware filter\n");
1455 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1456 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
1457 }
1458
1459 sc->sc_txrx_workqueue = true;
1460 sc->sc_tx_process_limit = IXL_TX_PROCESS_LIMIT;
1461 sc->sc_rx_process_limit = IXL_RX_PROCESS_LIMIT;
1462 sc->sc_tx_intr_process_limit = IXL_TX_INTR_PROCESS_LIMIT;
1463 sc->sc_rx_intr_process_limit = IXL_RX_INTR_PROCESS_LIMIT;
1464
1465 ixl_stats_update(sc);
1466 sc->sc_stats_counters.isc_has_offset = true;
1467
1468 if (pmf_device_register(self, NULL, NULL) != true)
1469 aprint_debug_dev(self, "couldn't establish power handler\n");
1470 sc->sc_itr_rx = IXL_ITR_RX;
1471 sc->sc_itr_tx = IXL_ITR_TX;
1472 sc->sc_attached = true;
1473 return;
1474
1475 teardown_wqs:
1476 config_finalize_register(self, ixl_workqs_teardown);
1477 teardown_sysctls:
1478 ixl_teardown_sysctls(sc);
1479 teardown_stats:
1480 ixl_teardown_stats(sc);
1481 teardown_intrs:
1482 ixl_teardown_interrupts(sc);
1483 free_queue_pairs:
1484 ixl_queue_pairs_free(sc);
1485 free_scratch:
1486 ixl_dmamem_free(sc, &sc->sc_scratch);
1487 free_hmc:
1488 ixl_hmc_free(sc);
1489 free_aqbuf:
1490 ixl_dmamem_free(sc, &sc->sc_aqbuf);
1491 shutdown:
1492 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1493 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1494 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1495 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1496
1497 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1498 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1499 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1500
1501 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1502 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1503 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1504
1505 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1506 0, IXL_DMA_LEN(&sc->sc_arq),
1507 BUS_DMASYNC_POSTREAD);
1508 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1509 0, IXL_DMA_LEN(&sc->sc_atq),
1510 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1511
1512 ixl_arq_unfill(sc);
1513 free_arq:
1514 ixl_dmamem_free(sc, &sc->sc_arq);
1515 free_atq:
1516 ixl_dmamem_free(sc, &sc->sc_atq);
1517 unmap:
1518 mutex_destroy(&sc->sc_atq_lock);
1519 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1520 mutex_destroy(&sc->sc_cfg_lock);
1521 sc->sc_mems = 0;
1522
1523 sc->sc_attached = false;
1524 }
1525
1526 static int
1527 ixl_detach(device_t self, int flags)
1528 {
1529 struct ixl_softc *sc = device_private(self);
1530 struct ifnet *ifp = &sc->sc_ec.ec_if;
1531
1532 if (!sc->sc_attached)
1533 return 0;
1534
1535 ixl_stop(ifp, 1);
1536
1537 ixl_disable_other_intr(sc);
1538
1539 callout_halt(&sc->sc_stats_callout, NULL);
1540 ixl_work_wait(sc->sc_workq, &sc->sc_stats_task);
1541
1542 /* wait for ATQ handler */
1543 mutex_enter(&sc->sc_atq_lock);
1544 mutex_exit(&sc->sc_atq_lock);
1545
1546 ixl_work_wait(sc->sc_workq, &sc->sc_arq_task);
1547 ixl_work_wait(sc->sc_workq, &sc->sc_link_state_task);
1548
1549 if (sc->sc_workq != NULL) {
1550 ixl_workq_destroy(sc->sc_workq);
1551 sc->sc_workq = NULL;
1552 }
1553
1554 if (sc->sc_workq_txrx != NULL) {
1555 workqueue_destroy(sc->sc_workq_txrx);
1556 sc->sc_workq_txrx = NULL;
1557 }
1558
1559 ether_ifdetach(ifp);
1560 if_detach(ifp);
1561 ifmedia_fini(&sc->sc_media);
1562
1563 ixl_teardown_interrupts(sc);
1564 ixl_teardown_stats(sc);
1565 ixl_teardown_sysctls(sc);
1566
1567 ixl_queue_pairs_free(sc);
1568
1569 ixl_dmamem_free(sc, &sc->sc_scratch);
1570 ixl_hmc_free(sc);
1571
1572 /* shutdown */
1573 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1574 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1575 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1576 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1577
1578 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1579 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1580 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1581
1582 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1583 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1584 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1585
1586 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1587 0, IXL_DMA_LEN(&sc->sc_arq),
1588 BUS_DMASYNC_POSTREAD);
1589 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1590 0, IXL_DMA_LEN(&sc->sc_atq),
1591 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1592
1593 ixl_arq_unfill(sc);
1594
1595 ixl_dmamem_free(sc, &sc->sc_arq);
1596 ixl_dmamem_free(sc, &sc->sc_atq);
1597 ixl_dmamem_free(sc, &sc->sc_aqbuf);
1598
1599 cv_destroy(&sc->sc_atq_cv);
1600 mutex_destroy(&sc->sc_atq_lock);
1601
1602 if (sc->sc_mems != 0) {
1603 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1604 sc->sc_mems = 0;
1605 }
1606
1607 mutex_destroy(&sc->sc_cfg_lock);
1608
1609 return 0;
1610 }
1611
1612 static int
1613 ixl_workqs_teardown(device_t self)
1614 {
1615 struct ixl_softc *sc = device_private(self);
1616
1617 if (sc->sc_workq != NULL) {
1618 ixl_workq_destroy(sc->sc_workq);
1619 sc->sc_workq = NULL;
1620 }
1621
1622 if (sc->sc_workq_txrx != NULL) {
1623 workqueue_destroy(sc->sc_workq_txrx);
1624 sc->sc_workq_txrx = NULL;
1625 }
1626
1627 return 0;
1628 }
1629
1630 static int
1631 ixl_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
1632 {
1633 struct ifnet *ifp = &ec->ec_if;
1634 struct ixl_softc *sc = ifp->if_softc;
1635 int rv;
1636
1637 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
1638 return 0;
1639 }
1640
1641 if (set) {
1642 rv = ixl_add_macvlan(sc, sc->sc_enaddr, vid,
1643 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1644 if (rv == 0) {
1645 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
1646 vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1647 }
1648 } else {
1649 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, vid,
1650 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1651 (void)ixl_remove_macvlan(sc, etherbroadcastaddr, vid,
1652 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1653 }
1654
1655 return rv;
1656 }
1657
1658 static void
1659 ixl_media_add(struct ixl_softc *sc)
1660 {
1661 struct ifmedia *ifm = &sc->sc_media;
1662 const struct ixl_phy_type *itype;
1663 unsigned int i;
1664 bool flow;
1665
1666 if (ISSET(sc->sc_phy_abilities,
1667 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) {
1668 flow = true;
1669 } else {
1670 flow = false;
1671 }
1672
1673 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
1674 itype = &ixl_phy_type_map[i];
1675
1676 if (ISSET(sc->sc_phy_types, itype->phy_type)) {
1677 ifmedia_add(ifm,
1678 IFM_ETHER | IFM_FDX | itype->ifm_type, 0, NULL);
1679
1680 if (flow) {
1681 ifmedia_add(ifm,
1682 IFM_ETHER | IFM_FDX | IFM_FLOW |
1683 itype->ifm_type, 0, NULL);
1684 }
1685
1686 if (itype->ifm_type != IFM_100_TX)
1687 continue;
1688
1689 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type,
1690 0, NULL);
1691 if (flow) {
1692 ifmedia_add(ifm,
1693 IFM_ETHER | IFM_FLOW | itype->ifm_type,
1694 0, NULL);
1695 }
1696 }
1697 }
1698 }
1699
1700 static void
1701 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1702 {
1703 struct ixl_softc *sc = ifp->if_softc;
1704
1705 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1706
1707 ifmr->ifm_status = sc->sc_media_status;
1708 ifmr->ifm_active = sc->sc_media_active;
1709 }
1710
1711 static int
1712 ixl_media_change(struct ifnet *ifp)
1713 {
1714 struct ixl_softc *sc = ifp->if_softc;
1715 struct ifmedia *ifm = &sc->sc_media;
1716 uint64_t ifm_active = sc->sc_media_active;
1717 uint8_t link_speed, abilities;
1718
1719 switch (IFM_SUBTYPE(ifm_active)) {
1720 case IFM_1000_SGMII:
1721 case IFM_1000_KX:
1722 case IFM_10G_KX4:
1723 case IFM_10G_KR:
1724 case IFM_40G_KR4:
1725 case IFM_20G_KR2:
1726 case IFM_25G_KR:
1727 /* backplanes */
1728 return EINVAL;
1729 }
1730
1731 abilities = IXL_PHY_ABILITY_AUTONEGO | IXL_PHY_ABILITY_LINKUP;
1732
1733 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1734 case IFM_AUTO:
1735 link_speed = sc->sc_phy_linkspeed;
1736 break;
1737 case IFM_NONE:
1738 link_speed = 0;
1739 CLR(abilities, IXL_PHY_ABILITY_LINKUP);
1740 break;
1741 default:
1742 link_speed = ixl_search_baudrate(
1743 ifmedia_baudrate(ifm->ifm_media));
1744 }
1745
1746 if (ISSET(abilities, IXL_PHY_ABILITY_LINKUP)) {
1747 if (ISSET(link_speed, sc->sc_phy_linkspeed) == 0)
1748 return EINVAL;
1749 }
1750
1751 if (ifm->ifm_media & IFM_FLOW) {
1752 abilities |= sc->sc_phy_abilities &
1753 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX);
1754 }
1755
1756 return ixl_set_phy_config(sc, link_speed, abilities, false);
1757 }
1758
1759 static void
1760 ixl_watchdog(struct ifnet *ifp)
1761 {
1762
1763 }
1764
1765 static void
1766 ixl_del_all_multiaddr(struct ixl_softc *sc)
1767 {
1768 struct ethercom *ec = &sc->sc_ec;
1769 struct ether_multi *enm;
1770 struct ether_multistep step;
1771
1772 ETHER_LOCK(ec);
1773 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1774 ETHER_NEXT_MULTI(step, enm)) {
1775 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1776 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1777 }
1778 ETHER_UNLOCK(ec);
1779 }
1780
1781 static int
1782 ixl_add_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1783 {
1784 struct ifnet *ifp = &sc->sc_ec.ec_if;
1785 int rv;
1786
1787 if (ISSET(ifp->if_flags, IFF_ALLMULTI))
1788 return 0;
1789
1790 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) {
1791 ixl_del_all_multiaddr(sc);
1792 SET(ifp->if_flags, IFF_ALLMULTI);
1793 return ENETRESET;
1794 }
1795
1796 /* multicast address can not use VLAN HWFILTER */
1797 rv = ixl_add_macvlan(sc, addrlo, 0,
1798 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1799
1800 if (rv == ENOSPC) {
1801 ixl_del_all_multiaddr(sc);
1802 SET(ifp->if_flags, IFF_ALLMULTI);
1803 return ENETRESET;
1804 }
1805
1806 return rv;
1807 }
1808
1809 static int
1810 ixl_del_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1811 {
1812 struct ifnet *ifp = &sc->sc_ec.ec_if;
1813 struct ethercom *ec = &sc->sc_ec;
1814 struct ether_multi *enm, *enm_last;
1815 struct ether_multistep step;
1816 int error, rv = 0;
1817
1818 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1819 ixl_remove_macvlan(sc, addrlo, 0,
1820 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1821 return 0;
1822 }
1823
1824 ETHER_LOCK(ec);
1825 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1826 ETHER_NEXT_MULTI(step, enm)) {
1827 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1828 ETHER_ADDR_LEN) != 0) {
1829 goto out;
1830 }
1831 }
1832
1833 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1834 ETHER_NEXT_MULTI(step, enm)) {
1835 error = ixl_add_macvlan(sc, enm->enm_addrlo, 0,
1836 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1837 if (error != 0)
1838 break;
1839 }
1840
1841 if (enm != NULL) {
1842 enm_last = enm;
1843 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1844 ETHER_NEXT_MULTI(step, enm)) {
1845 if (enm == enm_last)
1846 break;
1847
1848 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1849 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1850 }
1851 } else {
1852 CLR(ifp->if_flags, IFF_ALLMULTI);
1853 rv = ENETRESET;
1854 }
1855
1856 out:
1857 ETHER_UNLOCK(ec);
1858 return rv;
1859 }
1860
1861 static int
1862 ixl_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1863 {
1864 struct ifreq *ifr = (struct ifreq *)data;
1865 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
1866 const struct sockaddr *sa;
1867 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1868 int s, error = 0;
1869 unsigned int nmtu;
1870
1871 switch (cmd) {
1872 case SIOCSIFMTU:
1873 nmtu = ifr->ifr_mtu;
1874
1875 if (nmtu < IXL_MIN_MTU || nmtu > IXL_MAX_MTU) {
1876 error = EINVAL;
1877 break;
1878 }
1879 if (ifp->if_mtu != nmtu) {
1880 s = splnet();
1881 error = ether_ioctl(ifp, cmd, data);
1882 splx(s);
1883 if (error == ENETRESET)
1884 error = ixl_init(ifp);
1885 }
1886 break;
1887 case SIOCADDMULTI:
1888 sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1889 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) {
1890 error = ether_multiaddr(sa, addrlo, addrhi);
1891 if (error != 0)
1892 return error;
1893
1894 error = ixl_add_multi(sc, addrlo, addrhi);
1895 if (error != 0 && error != ENETRESET) {
1896 ether_delmulti(sa, &sc->sc_ec);
1897 error = EIO;
1898 }
1899 }
1900 break;
1901
1902 case SIOCDELMULTI:
1903 sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1904 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) {
1905 error = ether_multiaddr(sa, addrlo, addrhi);
1906 if (error != 0)
1907 return error;
1908
1909 error = ixl_del_multi(sc, addrlo, addrhi);
1910 }
1911 break;
1912
1913 default:
1914 s = splnet();
1915 error = ether_ioctl(ifp, cmd, data);
1916 splx(s);
1917 }
1918
1919 if (error == ENETRESET)
1920 error = ixl_iff(sc);
1921
1922 return error;
1923 }
1924
1925 static enum i40e_mac_type
1926 ixl_mactype(pci_product_id_t id)
1927 {
1928
1929 switch (id) {
1930 case PCI_PRODUCT_INTEL_XL710_SFP:
1931 case PCI_PRODUCT_INTEL_XL710_KX_B:
1932 case PCI_PRODUCT_INTEL_XL710_KX_C:
1933 case PCI_PRODUCT_INTEL_XL710_QSFP_A:
1934 case PCI_PRODUCT_INTEL_XL710_QSFP_B:
1935 case PCI_PRODUCT_INTEL_XL710_QSFP_C:
1936 case PCI_PRODUCT_INTEL_X710_10G_T:
1937 case PCI_PRODUCT_INTEL_XL710_20G_BP_1:
1938 case PCI_PRODUCT_INTEL_XL710_20G_BP_2:
1939 case PCI_PRODUCT_INTEL_X710_T4_10G:
1940 case PCI_PRODUCT_INTEL_XXV710_25G_BP:
1941 case PCI_PRODUCT_INTEL_XXV710_25G_SFP28:
1942 return I40E_MAC_XL710;
1943
1944 case PCI_PRODUCT_INTEL_X722_KX:
1945 case PCI_PRODUCT_INTEL_X722_QSFP:
1946 case PCI_PRODUCT_INTEL_X722_SFP:
1947 case PCI_PRODUCT_INTEL_X722_1G_BASET:
1948 case PCI_PRODUCT_INTEL_X722_10G_BASET:
1949 case PCI_PRODUCT_INTEL_X722_I_SFP:
1950 return I40E_MAC_X722;
1951 }
1952
1953 return I40E_MAC_GENERIC;
1954 }
1955
1956 static void
1957 ixl_pci_csr_setup(pci_chipset_tag_t pc, pcitag_t tag)
1958 {
1959 pcireg_t csr;
1960
1961 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1962 csr |= (PCI_COMMAND_MASTER_ENABLE |
1963 PCI_COMMAND_MEM_ENABLE);
1964 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
1965 }
1966
1967 static inline void *
1968 ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i)
1969 {
1970 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
1971 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1972
1973 if (i >= e->hmc_count)
1974 return NULL;
1975
1976 kva += e->hmc_base;
1977 kva += i * e->hmc_size;
1978
1979 return kva;
1980 }
1981
1982 static inline size_t
1983 ixl_hmc_len(struct ixl_softc *sc, enum ixl_hmc_types type)
1984 {
1985 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1986
1987 return e->hmc_size;
1988 }
1989
1990 static void
1991 ixl_enable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1992 {
1993 struct ixl_rx_ring *rxr = qp->qp_rxr;
1994
1995 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1996 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1997 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1998 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1999 ixl_flush(sc);
2000 }
2001
2002 static void
2003 ixl_disable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
2004 {
2005 struct ixl_rx_ring *rxr = qp->qp_rxr;
2006
2007 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
2008 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
2009 ixl_flush(sc);
2010 }
2011
2012 static void
2013 ixl_enable_other_intr(struct ixl_softc *sc)
2014 {
2015
2016 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
2017 I40E_PFINT_DYN_CTL0_INTENA_MASK |
2018 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2019 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
2020 ixl_flush(sc);
2021 }
2022
2023 static void
2024 ixl_disable_other_intr(struct ixl_softc *sc)
2025 {
2026
2027 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
2028 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
2029 ixl_flush(sc);
2030 }
2031
2032 static int
2033 ixl_reinit(struct ixl_softc *sc)
2034 {
2035 struct ixl_rx_ring *rxr;
2036 struct ixl_tx_ring *txr;
2037 unsigned int i;
2038 uint32_t reg;
2039
2040 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2041
2042 if (ixl_get_vsi(sc) != 0)
2043 return EIO;
2044
2045 if (ixl_set_vsi(sc) != 0)
2046 return EIO;
2047
2048 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2049 txr = sc->sc_qps[i].qp_txr;
2050 rxr = sc->sc_qps[i].qp_rxr;
2051
2052 ixl_txr_config(sc, txr);
2053 ixl_rxr_config(sc, rxr);
2054 }
2055
2056 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2057 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_PREWRITE);
2058
2059 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2060 txr = sc->sc_qps[i].qp_txr;
2061 rxr = sc->sc_qps[i].qp_rxr;
2062
2063 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
2064 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
2065 ixl_flush(sc);
2066
2067 ixl_wr(sc, txr->txr_tail, txr->txr_prod);
2068 ixl_wr(sc, rxr->rxr_tail, rxr->rxr_prod);
2069
2070 /* ixl_rxfill() needs lock held */
2071 mutex_enter(&rxr->rxr_lock);
2072 ixl_rxfill(sc, rxr);
2073 mutex_exit(&rxr->rxr_lock);
2074
2075 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2076 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2077 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2078 if (ixl_rxr_enabled(sc, rxr) != 0)
2079 goto stop;
2080
2081 ixl_txr_qdis(sc, txr, 1);
2082
2083 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2084 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2085 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2086
2087 if (ixl_txr_enabled(sc, txr) != 0)
2088 goto stop;
2089 }
2090
2091 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2092 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
2093
2094 return 0;
2095
2096 stop:
2097 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2098 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
2099
2100 return ETIMEDOUT;
2101 }
2102
2103 static int
2104 ixl_init_locked(struct ixl_softc *sc)
2105 {
2106 struct ifnet *ifp = &sc->sc_ec.ec_if;
2107 unsigned int i;
2108 int error, eccap_change;
2109
2110 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2111
2112 if (ISSET(ifp->if_flags, IFF_RUNNING))
2113 ixl_stop_locked(sc);
2114
2115 if (sc->sc_dead) {
2116 return ENXIO;
2117 }
2118
2119 eccap_change = sc->sc_ec.ec_capenable ^ sc->sc_cur_ec_capenable;
2120 if (ISSET(eccap_change, ETHERCAP_VLAN_HWTAGGING))
2121 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
2122
2123 if (ISSET(eccap_change, ETHERCAP_VLAN_HWFILTER)) {
2124 if (ixl_update_macvlan(sc) == 0) {
2125 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
2126 } else {
2127 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
2128 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
2129 }
2130 }
2131
2132 if (sc->sc_intrtype != PCI_INTR_TYPE_MSIX)
2133 sc->sc_nqueue_pairs = 1;
2134 else
2135 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
2136
2137 error = ixl_reinit(sc);
2138 if (error) {
2139 ixl_stop_locked(sc);
2140 return error;
2141 }
2142
2143 SET(ifp->if_flags, IFF_RUNNING);
2144 CLR(ifp->if_flags, IFF_OACTIVE);
2145
2146 ixl_config_rss(sc);
2147 ixl_config_queue_intr(sc);
2148
2149 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2150 ixl_enable_queue_intr(sc, &sc->sc_qps[i]);
2151 }
2152
2153 error = ixl_iff(sc);
2154 if (error) {
2155 ixl_stop_locked(sc);
2156 return error;
2157 }
2158
2159 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
2160
2161 return 0;
2162 }
2163
2164 static int
2165 ixl_init(struct ifnet *ifp)
2166 {
2167 struct ixl_softc *sc = ifp->if_softc;
2168 int error;
2169
2170 mutex_enter(&sc->sc_cfg_lock);
2171 error = ixl_init_locked(sc);
2172 mutex_exit(&sc->sc_cfg_lock);
2173
2174 if (error == 0)
2175 (void)ixl_get_link_status(sc);
2176
2177 return error;
2178 }
2179
2180 static int
2181 ixl_iff(struct ixl_softc *sc)
2182 {
2183 struct ifnet *ifp = &sc->sc_ec.ec_if;
2184 struct ixl_atq iatq;
2185 struct ixl_aq_desc *iaq;
2186 struct ixl_aq_vsi_promisc_param *param;
2187 uint16_t flag_add, flag_del;
2188 int error;
2189
2190 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2191 return 0;
2192
2193 memset(&iatq, 0, sizeof(iatq));
2194
2195 iaq = &iatq.iatq_desc;
2196 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
2197
2198 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
2199 param->flags = htole16(0);
2200
2201 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)
2202 || ISSET(ifp->if_flags, IFF_PROMISC)) {
2203 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2204 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2205 }
2206
2207 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
2208 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2209 IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2210 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
2211 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2212 }
2213 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2214 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2215 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2216 param->seid = sc->sc_seid;
2217
2218 error = ixl_atq_exec(sc, &iatq);
2219 if (error)
2220 return error;
2221
2222 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
2223 return EIO;
2224
2225 if (memcmp(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN) != 0) {
2226 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
2227 flag_add = IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH;
2228 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH;
2229 } else {
2230 flag_add = IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN;
2231 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN;
2232 }
2233
2234 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, flag_del);
2235
2236 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
2237 ixl_add_macvlan(sc, sc->sc_enaddr, 0, flag_add);
2238 }
2239 return 0;
2240 }
2241
2242 static void
2243 ixl_stop_rendezvous(struct ixl_softc *sc)
2244 {
2245 struct ixl_tx_ring *txr;
2246 struct ixl_rx_ring *rxr;
2247 unsigned int i;
2248
2249 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2250 txr = sc->sc_qps[i].qp_txr;
2251 rxr = sc->sc_qps[i].qp_rxr;
2252
2253 mutex_enter(&txr->txr_lock);
2254 mutex_exit(&txr->txr_lock);
2255
2256 mutex_enter(&rxr->rxr_lock);
2257 mutex_exit(&rxr->rxr_lock);
2258
2259 sc->sc_qps[i].qp_workqueue = false;
2260 workqueue_wait(sc->sc_workq_txrx,
2261 &sc->sc_qps[i].qp_work);
2262 }
2263 }
2264
2265 static void
2266 ixl_stop_locked(struct ixl_softc *sc)
2267 {
2268 struct ifnet *ifp = &sc->sc_ec.ec_if;
2269 struct ixl_rx_ring *rxr;
2270 struct ixl_tx_ring *txr;
2271 unsigned int i;
2272 uint32_t reg;
2273
2274 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2275
2276 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
2277 callout_stop(&sc->sc_stats_callout);
2278
2279 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2280 txr = sc->sc_qps[i].qp_txr;
2281 rxr = sc->sc_qps[i].qp_rxr;
2282
2283 ixl_disable_queue_intr(sc, &sc->sc_qps[i]);
2284
2285 mutex_enter(&txr->txr_lock);
2286 ixl_txr_qdis(sc, txr, 0);
2287 mutex_exit(&txr->txr_lock);
2288 }
2289
2290 /* XXX wait at least 400 usec for all tx queues in one go */
2291 ixl_flush(sc);
2292 DELAY(500);
2293
2294 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2295 txr = sc->sc_qps[i].qp_txr;
2296 rxr = sc->sc_qps[i].qp_rxr;
2297
2298 mutex_enter(&txr->txr_lock);
2299 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2300 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2301 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2302 mutex_exit(&txr->txr_lock);
2303
2304 mutex_enter(&rxr->rxr_lock);
2305 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2306 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2307 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2308 mutex_exit(&rxr->rxr_lock);
2309 }
2310
2311 /* XXX short wait for all queue disables to settle */
2312 ixl_flush(sc);
2313 DELAY(50);
2314
2315 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2316 txr = sc->sc_qps[i].qp_txr;
2317 rxr = sc->sc_qps[i].qp_rxr;
2318
2319 mutex_enter(&txr->txr_lock);
2320 if (ixl_txr_disabled(sc, txr) != 0) {
2321 mutex_exit(&txr->txr_lock);
2322 goto die;
2323 }
2324 mutex_exit(&txr->txr_lock);
2325
2326 mutex_enter(&rxr->rxr_lock);
2327 if (ixl_rxr_disabled(sc, rxr) != 0) {
2328 mutex_exit(&rxr->rxr_lock);
2329 goto die;
2330 }
2331 mutex_exit(&rxr->rxr_lock);
2332 }
2333
2334 ixl_stop_rendezvous(sc);
2335
2336 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2337 txr = sc->sc_qps[i].qp_txr;
2338 rxr = sc->sc_qps[i].qp_rxr;
2339
2340 mutex_enter(&txr->txr_lock);
2341 ixl_txr_unconfig(sc, txr);
2342 mutex_exit(&txr->txr_lock);
2343
2344 mutex_enter(&rxr->rxr_lock);
2345 ixl_rxr_unconfig(sc, rxr);
2346 mutex_exit(&rxr->rxr_lock);
2347
2348 ixl_txr_clean(sc, txr);
2349 ixl_rxr_clean(sc, rxr);
2350 }
2351
2352 return;
2353 die:
2354 sc->sc_dead = true;
2355 log(LOG_CRIT, "%s: failed to shut down rings",
2356 device_xname(sc->sc_dev));
2357 return;
2358 }
2359
2360 static void
2361 ixl_stop(struct ifnet *ifp, int disable)
2362 {
2363 struct ixl_softc *sc = ifp->if_softc;
2364
2365 mutex_enter(&sc->sc_cfg_lock);
2366 ixl_stop_locked(sc);
2367 mutex_exit(&sc->sc_cfg_lock);
2368 }
2369
2370 static int
2371 ixl_queue_pairs_alloc(struct ixl_softc *sc)
2372 {
2373 struct ixl_queue_pair *qp;
2374 unsigned int i;
2375 size_t sz;
2376
2377 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2378 sc->sc_qps = kmem_zalloc(sz, KM_SLEEP);
2379
2380 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2381 qp = &sc->sc_qps[i];
2382
2383 qp->qp_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2384 ixl_handle_queue, qp);
2385 if (qp->qp_si == NULL)
2386 goto free;
2387
2388 qp->qp_txr = ixl_txr_alloc(sc, i);
2389 if (qp->qp_txr == NULL)
2390 goto free;
2391
2392 qp->qp_rxr = ixl_rxr_alloc(sc, i);
2393 if (qp->qp_rxr == NULL)
2394 goto free;
2395
2396 qp->qp_sc = sc;
2397 snprintf(qp->qp_name, sizeof(qp->qp_name),
2398 "%s-TXRX%d", device_xname(sc->sc_dev), i);
2399 }
2400
2401 return 0;
2402 free:
2403 if (sc->sc_qps != NULL) {
2404 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2405 qp = &sc->sc_qps[i];
2406
2407 if (qp->qp_txr != NULL)
2408 ixl_txr_free(sc, qp->qp_txr);
2409 if (qp->qp_rxr != NULL)
2410 ixl_rxr_free(sc, qp->qp_rxr);
2411 if (qp->qp_si != NULL)
2412 softint_disestablish(qp->qp_si);
2413 }
2414
2415 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2416 kmem_free(sc->sc_qps, sz);
2417 sc->sc_qps = NULL;
2418 }
2419
2420 return -1;
2421 }
2422
2423 static void
2424 ixl_queue_pairs_free(struct ixl_softc *sc)
2425 {
2426 struct ixl_queue_pair *qp;
2427 unsigned int i;
2428 size_t sz;
2429
2430 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2431 qp = &sc->sc_qps[i];
2432 ixl_txr_free(sc, qp->qp_txr);
2433 ixl_rxr_free(sc, qp->qp_rxr);
2434 softint_disestablish(qp->qp_si);
2435 }
2436
2437 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2438 kmem_free(sc->sc_qps, sz);
2439 sc->sc_qps = NULL;
2440 }
2441
2442 static struct ixl_tx_ring *
2443 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
2444 {
2445 struct ixl_tx_ring *txr = NULL;
2446 struct ixl_tx_map *maps = NULL, *txm;
2447 unsigned int i;
2448
2449 txr = kmem_zalloc(sizeof(*txr), KM_SLEEP);
2450 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs,
2451 KM_SLEEP);
2452
2453 if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2454 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2455 IXL_TX_QUEUE_ALIGN) != 0)
2456 goto free;
2457
2458 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2459 txm = &maps[i];
2460
2461 if (bus_dmamap_create(sc->sc_dmat, IXL_TX_PKT_MAXSIZE,
2462 IXL_TX_PKT_DESCS, IXL_TX_PKT_MAXSIZE, 0,
2463 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &txm->txm_map) != 0)
2464 goto uncreate;
2465
2466 txm->txm_eop = -1;
2467 txm->txm_m = NULL;
2468 }
2469
2470 txr->txr_cons = txr->txr_prod = 0;
2471 txr->txr_maps = maps;
2472
2473 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2474 if (txr->txr_intrq == NULL)
2475 goto uncreate;
2476
2477 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2478 ixl_deferred_transmit, txr);
2479 if (txr->txr_si == NULL)
2480 goto destroy_pcq;
2481
2482 txr->txr_tail = I40E_QTX_TAIL(qid);
2483 txr->txr_qid = qid;
2484 txr->txr_sc = sc;
2485 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2486
2487 return txr;
2488
2489 destroy_pcq:
2490 pcq_destroy(txr->txr_intrq);
2491 uncreate:
2492 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2493 txm = &maps[i];
2494
2495 if (txm->txm_map == NULL)
2496 continue;
2497
2498 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2499 }
2500
2501 ixl_dmamem_free(sc, &txr->txr_mem);
2502 free:
2503 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2504 kmem_free(txr, sizeof(*txr));
2505
2506 return NULL;
2507 }
2508
2509 static void
2510 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2511 {
2512 unsigned int qid;
2513 bus_size_t reg;
2514 uint32_t r;
2515
2516 qid = txr->txr_qid + sc->sc_base_queue;
2517 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2518 qid %= 128;
2519
2520 r = ixl_rd(sc, reg);
2521 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2522 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2523 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2524 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2525 ixl_wr(sc, reg, r);
2526 }
2527
2528 static void
2529 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2530 {
2531 struct ixl_hmc_txq txq;
2532 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch);
2533 void *hmc;
2534
2535 memset(&txq, 0, sizeof(txq));
2536 txq.head = htole16(txr->txr_cons);
2537 txq.new_context = 1;
2538 txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2539 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2540 txq.qlen = htole16(sc->sc_tx_ring_ndescs);
2541 txq.tphrdesc_ena = 0;
2542 txq.tphrpacket_ena = 0;
2543 txq.tphwdesc_ena = 0;
2544 txq.rdylist = data->qs_handle[0];
2545
2546 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2547 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2548 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq,
2549 __arraycount(ixl_hmc_pack_txq));
2550 }
2551
2552 static void
2553 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2554 {
2555 void *hmc;
2556
2557 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2558 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2559 txr->txr_cons = txr->txr_prod = 0;
2560 }
2561
2562 static void
2563 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2564 {
2565 struct ixl_tx_map *maps, *txm;
2566 bus_dmamap_t map;
2567 unsigned int i;
2568
2569 maps = txr->txr_maps;
2570 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2571 txm = &maps[i];
2572
2573 if (txm->txm_m == NULL)
2574 continue;
2575
2576 map = txm->txm_map;
2577 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2578 BUS_DMASYNC_POSTWRITE);
2579 bus_dmamap_unload(sc->sc_dmat, map);
2580
2581 m_freem(txm->txm_m);
2582 txm->txm_m = NULL;
2583 }
2584 }
2585
2586 static int
2587 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2588 {
2589 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2590 uint32_t reg;
2591 int i;
2592
2593 for (i = 0; i < 10; i++) {
2594 reg = ixl_rd(sc, ena);
2595 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2596 return 0;
2597
2598 delaymsec(10);
2599 }
2600
2601 return ETIMEDOUT;
2602 }
2603
2604 static int
2605 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2606 {
2607 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2608 uint32_t reg;
2609 int i;
2610
2611 KASSERT(mutex_owned(&txr->txr_lock));
2612
2613 for (i = 0; i < 10; i++) {
2614 reg = ixl_rd(sc, ena);
2615 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2616 return 0;
2617
2618 delaymsec(10);
2619 }
2620
2621 return ETIMEDOUT;
2622 }
2623
2624 static void
2625 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2626 {
2627 struct ixl_tx_map *maps, *txm;
2628 struct mbuf *m;
2629 unsigned int i;
2630
2631 softint_disestablish(txr->txr_si);
2632 while ((m = pcq_get(txr->txr_intrq)) != NULL)
2633 m_freem(m);
2634 pcq_destroy(txr->txr_intrq);
2635
2636 maps = txr->txr_maps;
2637 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2638 txm = &maps[i];
2639
2640 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2641 }
2642
2643 ixl_dmamem_free(sc, &txr->txr_mem);
2644 mutex_destroy(&txr->txr_lock);
2645 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2646 kmem_free(txr, sizeof(*txr));
2647 }
2648
2649 static inline int
2650 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0,
2651 struct ixl_tx_ring *txr)
2652 {
2653 struct mbuf *m;
2654 int error;
2655
2656 KASSERT(mutex_owned(&txr->txr_lock));
2657
2658 m = *m0;
2659
2660 error = bus_dmamap_load_mbuf(dmat, map, m,
2661 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2662 if (error != EFBIG)
2663 return error;
2664
2665 m = m_defrag(m, M_DONTWAIT);
2666 if (m != NULL) {
2667 *m0 = m;
2668 txr->txr_defragged.ev_count++;
2669
2670 error = bus_dmamap_load_mbuf(dmat, map, m,
2671 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2672 } else {
2673 txr->txr_defrag_failed.ev_count++;
2674 error = ENOBUFS;
2675 }
2676
2677 return error;
2678 }
2679
2680 static inline int
2681 ixl_tx_setup_offloads(struct mbuf *m, uint64_t *cmd_txd)
2682 {
2683 struct ether_header *eh;
2684 size_t len;
2685 uint64_t cmd;
2686
2687 cmd = 0;
2688
2689 eh = mtod(m, struct ether_header *);
2690 switch (htons(eh->ether_type)) {
2691 case ETHERTYPE_IP:
2692 case ETHERTYPE_IPV6:
2693 len = ETHER_HDR_LEN;
2694 break;
2695 case ETHERTYPE_VLAN:
2696 len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2697 break;
2698 default:
2699 len = 0;
2700 }
2701 cmd |= ((len >> 1) << IXL_TX_DESC_MACLEN_SHIFT);
2702
2703 if (m->m_pkthdr.csum_flags &
2704 (M_CSUM_TSOv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
2705 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4;
2706 }
2707 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2708 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4_CSUM;
2709 }
2710
2711 if (m->m_pkthdr.csum_flags &
2712 (M_CSUM_TSOv6 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
2713 cmd |= IXL_TX_DESC_CMD_IIPT_IPV6;
2714 }
2715
2716 switch (cmd & IXL_TX_DESC_CMD_IIPT_MASK) {
2717 case IXL_TX_DESC_CMD_IIPT_IPV4:
2718 case IXL_TX_DESC_CMD_IIPT_IPV4_CSUM:
2719 len = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2720 break;
2721 case IXL_TX_DESC_CMD_IIPT_IPV6:
2722 len = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
2723 break;
2724 default:
2725 len = 0;
2726 }
2727 cmd |= ((len >> 2) << IXL_TX_DESC_IPLEN_SHIFT);
2728
2729 if (m->m_pkthdr.csum_flags &
2730 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_TCPv4 | M_CSUM_TCPv6)) {
2731 len = sizeof(struct tcphdr);
2732 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_TCP;
2733 } else if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) {
2734 len = sizeof(struct udphdr);
2735 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_UDP;
2736 } else {
2737 len = 0;
2738 }
2739 cmd |= ((len >> 2) << IXL_TX_DESC_L4LEN_SHIFT);
2740
2741 *cmd_txd |= cmd;
2742 return 0;
2743 }
2744
2745 static void
2746 ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr,
2747 bool is_transmit)
2748 {
2749 struct ixl_softc *sc = ifp->if_softc;
2750 struct ixl_tx_desc *ring, *txd;
2751 struct ixl_tx_map *txm;
2752 bus_dmamap_t map;
2753 struct mbuf *m;
2754 uint64_t cmd, cmd_txd;
2755 unsigned int prod, free, last, i;
2756 unsigned int mask;
2757 int post = 0;
2758
2759 KASSERT(mutex_owned(&txr->txr_lock));
2760
2761 if (!ISSET(ifp->if_flags, IFF_RUNNING)
2762 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) {
2763 if (!is_transmit)
2764 IFQ_PURGE(&ifp->if_snd);
2765 return;
2766 }
2767
2768 prod = txr->txr_prod;
2769 free = txr->txr_cons;
2770 if (free <= prod)
2771 free += sc->sc_tx_ring_ndescs;
2772 free -= prod;
2773
2774 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2775 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2776
2777 ring = IXL_DMA_KVA(&txr->txr_mem);
2778 mask = sc->sc_tx_ring_ndescs - 1;
2779 last = prod;
2780 cmd = 0;
2781 txd = NULL;
2782
2783 for (;;) {
2784 if (free <= IXL_TX_PKT_DESCS) {
2785 if (!is_transmit)
2786 SET(ifp->if_flags, IFF_OACTIVE);
2787 break;
2788 }
2789
2790 if (is_transmit)
2791 m = pcq_get(txr->txr_intrq);
2792 else
2793 IFQ_DEQUEUE(&ifp->if_snd, m);
2794
2795 if (m == NULL)
2796 break;
2797
2798 txm = &txr->txr_maps[prod];
2799 map = txm->txm_map;
2800
2801 if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
2802 if_statinc(ifp, if_oerrors);
2803 m_freem(m);
2804 continue;
2805 }
2806
2807 cmd_txd = 0;
2808 if (m->m_pkthdr.csum_flags & IXL_CSUM_ALL_OFFLOAD) {
2809 ixl_tx_setup_offloads(m, &cmd_txd);
2810 }
2811
2812 if (vlan_has_tag(m)) {
2813 cmd_txd |= (uint64_t)vlan_get_tag(m) <<
2814 IXL_TX_DESC_L2TAG1_SHIFT;
2815 cmd_txd |= IXL_TX_DESC_CMD_IL2TAG1;
2816 }
2817
2818 bus_dmamap_sync(sc->sc_dmat, map, 0,
2819 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2820
2821 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) {
2822 txd = &ring[prod];
2823
2824 cmd = (uint64_t)map->dm_segs[i].ds_len <<
2825 IXL_TX_DESC_BSIZE_SHIFT;
2826 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2827 cmd |= cmd_txd;
2828
2829 txd->addr = htole64(map->dm_segs[i].ds_addr);
2830 txd->cmd = htole64(cmd);
2831
2832 last = prod;
2833
2834 prod++;
2835 prod &= mask;
2836 }
2837 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2838 txd->cmd = htole64(cmd);
2839
2840 txm->txm_m = m;
2841 txm->txm_eop = last;
2842
2843 bpf_mtap(ifp, m, BPF_D_OUT);
2844
2845 free -= i;
2846 post = 1;
2847 }
2848
2849 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2850 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2851
2852 if (post) {
2853 txr->txr_prod = prod;
2854 ixl_wr(sc, txr->txr_tail, prod);
2855 }
2856 }
2857
2858 static int
2859 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit)
2860 {
2861 struct ifnet *ifp = &sc->sc_ec.ec_if;
2862 struct ixl_tx_desc *ring, *txd;
2863 struct ixl_tx_map *txm;
2864 struct mbuf *m;
2865 bus_dmamap_t map;
2866 unsigned int cons, prod, last;
2867 unsigned int mask;
2868 uint64_t dtype;
2869 int done = 0, more = 0;
2870
2871 KASSERT(mutex_owned(&txr->txr_lock));
2872
2873 prod = txr->txr_prod;
2874 cons = txr->txr_cons;
2875
2876 if (cons == prod)
2877 return 0;
2878
2879 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2880 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2881
2882 ring = IXL_DMA_KVA(&txr->txr_mem);
2883 mask = sc->sc_tx_ring_ndescs - 1;
2884
2885 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2886
2887 do {
2888 if (txlimit-- <= 0) {
2889 more = 1;
2890 break;
2891 }
2892
2893 txm = &txr->txr_maps[cons];
2894 last = txm->txm_eop;
2895 txd = &ring[last];
2896
2897 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2898 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2899 break;
2900
2901 map = txm->txm_map;
2902
2903 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2904 BUS_DMASYNC_POSTWRITE);
2905 bus_dmamap_unload(sc->sc_dmat, map);
2906
2907 m = txm->txm_m;
2908 if (m != NULL) {
2909 if_statinc_ref(nsr, if_opackets);
2910 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
2911 if (ISSET(m->m_flags, M_MCAST))
2912 if_statinc_ref(nsr, if_omcasts);
2913 m_freem(m);
2914 }
2915
2916 txm->txm_m = NULL;
2917 txm->txm_eop = -1;
2918
2919 cons = last + 1;
2920 cons &= mask;
2921 done = 1;
2922 } while (cons != prod);
2923
2924 IF_STAT_PUTREF(ifp);
2925
2926 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2927 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2928
2929 txr->txr_cons = cons;
2930
2931 if (done) {
2932 softint_schedule(txr->txr_si);
2933 if (txr->txr_qid == 0) {
2934 CLR(ifp->if_flags, IFF_OACTIVE);
2935 if_schedule_deferred_start(ifp);
2936 }
2937 }
2938
2939 return more;
2940 }
2941
2942 static void
2943 ixl_start(struct ifnet *ifp)
2944 {
2945 struct ixl_softc *sc;
2946 struct ixl_tx_ring *txr;
2947
2948 sc = ifp->if_softc;
2949 txr = sc->sc_qps[0].qp_txr;
2950
2951 mutex_enter(&txr->txr_lock);
2952 ixl_tx_common_locked(ifp, txr, false);
2953 mutex_exit(&txr->txr_lock);
2954 }
2955
2956 static inline unsigned int
2957 ixl_select_txqueue(struct ixl_softc *sc, struct mbuf *m)
2958 {
2959 u_int cpuid;
2960
2961 cpuid = cpu_index(curcpu());
2962
2963 return (unsigned int)(cpuid % sc->sc_nqueue_pairs);
2964 }
2965
2966 static int
2967 ixl_transmit(struct ifnet *ifp, struct mbuf *m)
2968 {
2969 struct ixl_softc *sc;
2970 struct ixl_tx_ring *txr;
2971 unsigned int qid;
2972
2973 sc = ifp->if_softc;
2974 qid = ixl_select_txqueue(sc, m);
2975
2976 txr = sc->sc_qps[qid].qp_txr;
2977
2978 if (__predict_false(!pcq_put(txr->txr_intrq, m))) {
2979 mutex_enter(&txr->txr_lock);
2980 txr->txr_pcqdrop.ev_count++;
2981 mutex_exit(&txr->txr_lock);
2982
2983 m_freem(m);
2984 return ENOBUFS;
2985 }
2986
2987 if (mutex_tryenter(&txr->txr_lock)) {
2988 ixl_tx_common_locked(ifp, txr, true);
2989 mutex_exit(&txr->txr_lock);
2990 } else {
2991 kpreempt_disable();
2992 softint_schedule(txr->txr_si);
2993 kpreempt_enable();
2994 }
2995
2996 return 0;
2997 }
2998
2999 static void
3000 ixl_deferred_transmit(void *xtxr)
3001 {
3002 struct ixl_tx_ring *txr = xtxr;
3003 struct ixl_softc *sc = txr->txr_sc;
3004 struct ifnet *ifp = &sc->sc_ec.ec_if;
3005
3006 mutex_enter(&txr->txr_lock);
3007 txr->txr_transmitdef.ev_count++;
3008 if (pcq_peek(txr->txr_intrq) != NULL)
3009 ixl_tx_common_locked(ifp, txr, true);
3010 mutex_exit(&txr->txr_lock);
3011 }
3012
3013 static struct ixl_rx_ring *
3014 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
3015 {
3016 struct ixl_rx_ring *rxr = NULL;
3017 struct ixl_rx_map *maps = NULL, *rxm;
3018 unsigned int i;
3019
3020 rxr = kmem_zalloc(sizeof(*rxr), KM_SLEEP);
3021 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs,
3022 KM_SLEEP);
3023
3024 if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
3025 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
3026 IXL_RX_QUEUE_ALIGN) != 0)
3027 goto free;
3028
3029 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3030 rxm = &maps[i];
3031
3032 if (bus_dmamap_create(sc->sc_dmat,
3033 IXL_MCLBYTES, 1, IXL_MCLBYTES, 0,
3034 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &rxm->rxm_map) != 0)
3035 goto uncreate;
3036
3037 rxm->rxm_m = NULL;
3038 }
3039
3040 rxr->rxr_cons = rxr->rxr_prod = 0;
3041 rxr->rxr_m_head = NULL;
3042 rxr->rxr_m_tail = &rxr->rxr_m_head;
3043 rxr->rxr_maps = maps;
3044
3045 rxr->rxr_tail = I40E_QRX_TAIL(qid);
3046 rxr->rxr_qid = qid;
3047 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
3048
3049 return rxr;
3050
3051 uncreate:
3052 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3053 rxm = &maps[i];
3054
3055 if (rxm->rxm_map == NULL)
3056 continue;
3057
3058 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3059 }
3060
3061 ixl_dmamem_free(sc, &rxr->rxr_mem);
3062 free:
3063 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
3064 kmem_free(rxr, sizeof(*rxr));
3065
3066 return NULL;
3067 }
3068
3069 static void
3070 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3071 {
3072 struct ixl_rx_map *maps, *rxm;
3073 bus_dmamap_t map;
3074 unsigned int i;
3075
3076 maps = rxr->rxr_maps;
3077 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3078 rxm = &maps[i];
3079
3080 if (rxm->rxm_m == NULL)
3081 continue;
3082
3083 map = rxm->rxm_map;
3084 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3085 BUS_DMASYNC_POSTWRITE);
3086 bus_dmamap_unload(sc->sc_dmat, map);
3087
3088 m_freem(rxm->rxm_m);
3089 rxm->rxm_m = NULL;
3090 }
3091
3092 m_freem(rxr->rxr_m_head);
3093 rxr->rxr_m_head = NULL;
3094 rxr->rxr_m_tail = &rxr->rxr_m_head;
3095
3096 rxr->rxr_prod = rxr->rxr_cons = 0;
3097 }
3098
3099 static int
3100 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3101 {
3102 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3103 uint32_t reg;
3104 int i;
3105
3106 for (i = 0; i < 10; i++) {
3107 reg = ixl_rd(sc, ena);
3108 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
3109 return 0;
3110
3111 delaymsec(10);
3112 }
3113
3114 return ETIMEDOUT;
3115 }
3116
3117 static int
3118 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3119 {
3120 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3121 uint32_t reg;
3122 int i;
3123
3124 KASSERT(mutex_owned(&rxr->rxr_lock));
3125
3126 for (i = 0; i < 10; i++) {
3127 reg = ixl_rd(sc, ena);
3128 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3129 return 0;
3130
3131 delaymsec(10);
3132 }
3133
3134 return ETIMEDOUT;
3135 }
3136
3137 static void
3138 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3139 {
3140 struct ixl_hmc_rxq rxq;
3141 struct ifnet *ifp = &sc->sc_ec.ec_if;
3142 uint16_t rxmax;
3143 void *hmc;
3144
3145 memset(&rxq, 0, sizeof(rxq));
3146 rxmax = ifp->if_mtu + IXL_MTU_ETHERLEN;
3147
3148 rxq.head = htole16(rxr->rxr_cons);
3149 rxq.base = htole64(IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
3150 rxq.qlen = htole16(sc->sc_rx_ring_ndescs);
3151 rxq.dbuff = htole16(IXL_MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
3152 rxq.hbuff = 0;
3153 rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
3154 rxq.dsize = IXL_HMC_RXQ_DSIZE_32;
3155 rxq.crcstrip = 1;
3156 rxq.l2sel = 1;
3157 rxq.showiv = 1;
3158 rxq.rxmax = htole16(rxmax);
3159 rxq.tphrdesc_ena = 0;
3160 rxq.tphwdesc_ena = 0;
3161 rxq.tphdata_ena = 0;
3162 rxq.tphhead_ena = 0;
3163 rxq.lrxqthresh = 0;
3164 rxq.prefena = 1;
3165
3166 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3167 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3168 ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq,
3169 __arraycount(ixl_hmc_pack_rxq));
3170 }
3171
3172 static void
3173 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3174 {
3175 void *hmc;
3176
3177 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3178 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3179 rxr->rxr_cons = rxr->rxr_prod = 0;
3180 }
3181
3182 static void
3183 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3184 {
3185 struct ixl_rx_map *maps, *rxm;
3186 unsigned int i;
3187
3188 maps = rxr->rxr_maps;
3189 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3190 rxm = &maps[i];
3191
3192 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3193 }
3194
3195 ixl_dmamem_free(sc, &rxr->rxr_mem);
3196 mutex_destroy(&rxr->rxr_lock);
3197 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
3198 kmem_free(rxr, sizeof(*rxr));
3199 }
3200
3201 static inline void
3202 ixl_rx_csum(struct mbuf *m, uint64_t qword)
3203 {
3204 int flags_mask;
3205
3206 if (!ISSET(qword, IXL_RX_DESC_L3L4P)) {
3207 /* No L3 or L4 checksum was calculated */
3208 return;
3209 }
3210
3211 switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) {
3212 case IXL_RX_DESC_PTYPE_IPV4FRAG:
3213 case IXL_RX_DESC_PTYPE_IPV4:
3214 case IXL_RX_DESC_PTYPE_SCTPV4:
3215 case IXL_RX_DESC_PTYPE_ICMPV4:
3216 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3217 break;
3218 case IXL_RX_DESC_PTYPE_TCPV4:
3219 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3220 flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD;
3221 break;
3222 case IXL_RX_DESC_PTYPE_UDPV4:
3223 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3224 flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD;
3225 break;
3226 case IXL_RX_DESC_PTYPE_TCPV6:
3227 flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD;
3228 break;
3229 case IXL_RX_DESC_PTYPE_UDPV6:
3230 flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD;
3231 break;
3232 default:
3233 flags_mask = 0;
3234 }
3235
3236 m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 |
3237 M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6));
3238
3239 if (ISSET(qword, IXL_RX_DESC_IPE)) {
3240 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD);
3241 }
3242
3243 if (ISSET(qword, IXL_RX_DESC_L4E)) {
3244 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD);
3245 }
3246 }
3247
3248 static int
3249 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr, u_int rxlimit)
3250 {
3251 struct ifnet *ifp = &sc->sc_ec.ec_if;
3252 struct ixl_rx_wb_desc_32 *ring, *rxd;
3253 struct ixl_rx_map *rxm;
3254 bus_dmamap_t map;
3255 unsigned int cons, prod;
3256 struct mbuf *m;
3257 uint64_t word, word0;
3258 unsigned int len;
3259 unsigned int mask;
3260 int done = 0, more = 0;
3261
3262 KASSERT(mutex_owned(&rxr->rxr_lock));
3263
3264 if (!ISSET(ifp->if_flags, IFF_RUNNING))
3265 return 0;
3266
3267 prod = rxr->rxr_prod;
3268 cons = rxr->rxr_cons;
3269
3270 if (cons == prod)
3271 return 0;
3272
3273 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3274 0, IXL_DMA_LEN(&rxr->rxr_mem),
3275 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3276
3277 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3278 mask = sc->sc_rx_ring_ndescs - 1;
3279
3280 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
3281
3282 do {
3283 if (rxlimit-- <= 0) {
3284 more = 1;
3285 break;
3286 }
3287
3288 rxd = &ring[cons];
3289
3290 word = le64toh(rxd->qword1);
3291
3292 if (!ISSET(word, IXL_RX_DESC_DD))
3293 break;
3294
3295 rxm = &rxr->rxr_maps[cons];
3296
3297 map = rxm->rxm_map;
3298 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3299 BUS_DMASYNC_POSTREAD);
3300 bus_dmamap_unload(sc->sc_dmat, map);
3301
3302 m = rxm->rxm_m;
3303 rxm->rxm_m = NULL;
3304
3305 KASSERT(m != NULL);
3306
3307 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
3308 m->m_len = len;
3309 m->m_pkthdr.len = 0;
3310
3311 m->m_next = NULL;
3312 *rxr->rxr_m_tail = m;
3313 rxr->rxr_m_tail = &m->m_next;
3314
3315 m = rxr->rxr_m_head;
3316 m->m_pkthdr.len += len;
3317
3318 if (ISSET(word, IXL_RX_DESC_EOP)) {
3319 word0 = le64toh(rxd->qword0);
3320
3321 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
3322 vlan_set_tag(m,
3323 __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK));
3324 }
3325
3326 if ((ifp->if_capenable & IXL_IFCAP_RXCSUM) != 0)
3327 ixl_rx_csum(m, word);
3328
3329 if (!ISSET(word,
3330 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
3331 m_set_rcvif(m, ifp);
3332 if_statinc_ref(nsr, if_ipackets);
3333 if_statadd_ref(nsr, if_ibytes,
3334 m->m_pkthdr.len);
3335 if_percpuq_enqueue(ifp->if_percpuq, m);
3336 } else {
3337 if_statinc_ref(nsr, if_ierrors);
3338 m_freem(m);
3339 }
3340
3341 rxr->rxr_m_head = NULL;
3342 rxr->rxr_m_tail = &rxr->rxr_m_head;
3343 }
3344
3345 cons++;
3346 cons &= mask;
3347
3348 done = 1;
3349 } while (cons != prod);
3350
3351 if (done) {
3352 rxr->rxr_cons = cons;
3353 if (ixl_rxfill(sc, rxr) == -1)
3354 if_statinc_ref(nsr, if_iqdrops);
3355 }
3356
3357 IF_STAT_PUTREF(ifp);
3358
3359 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3360 0, IXL_DMA_LEN(&rxr->rxr_mem),
3361 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3362
3363 return more;
3364 }
3365
3366 static int
3367 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3368 {
3369 struct ixl_rx_rd_desc_32 *ring, *rxd;
3370 struct ixl_rx_map *rxm;
3371 bus_dmamap_t map;
3372 struct mbuf *m;
3373 unsigned int prod;
3374 unsigned int slots;
3375 unsigned int mask;
3376 int post = 0, error = 0;
3377
3378 KASSERT(mutex_owned(&rxr->rxr_lock));
3379
3380 prod = rxr->rxr_prod;
3381 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons,
3382 sc->sc_rx_ring_ndescs);
3383
3384 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3385 mask = sc->sc_rx_ring_ndescs - 1;
3386
3387 if (__predict_false(slots <= 0))
3388 return -1;
3389
3390 do {
3391 rxm = &rxr->rxr_maps[prod];
3392
3393 MGETHDR(m, M_DONTWAIT, MT_DATA);
3394 if (m == NULL) {
3395 rxr->rxr_mgethdr_failed.ev_count++;
3396 error = -1;
3397 break;
3398 }
3399
3400 MCLGET(m, M_DONTWAIT);
3401 if (!ISSET(m->m_flags, M_EXT)) {
3402 rxr->rxr_mgetcl_failed.ev_count++;
3403 error = -1;
3404 m_freem(m);
3405 break;
3406 }
3407
3408 m->m_len = m->m_pkthdr.len = MCLBYTES;
3409 m_adj(m, ETHER_ALIGN);
3410
3411 map = rxm->rxm_map;
3412
3413 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
3414 BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
3415 rxr->rxr_mbuf_load_failed.ev_count++;
3416 error = -1;
3417 m_freem(m);
3418 break;
3419 }
3420
3421 rxm->rxm_m = m;
3422
3423 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3424 BUS_DMASYNC_PREREAD);
3425
3426 rxd = &ring[prod];
3427
3428 rxd->paddr = htole64(map->dm_segs[0].ds_addr);
3429 rxd->haddr = htole64(0);
3430
3431 prod++;
3432 prod &= mask;
3433
3434 post = 1;
3435
3436 } while (--slots);
3437
3438 if (post) {
3439 rxr->rxr_prod = prod;
3440 ixl_wr(sc, rxr->rxr_tail, prod);
3441 }
3442
3443 return error;
3444 }
3445
3446 static inline int
3447 ixl_handle_queue_common(struct ixl_softc *sc, struct ixl_queue_pair *qp,
3448 u_int txlimit, struct evcnt *txevcnt,
3449 u_int rxlimit, struct evcnt *rxevcnt)
3450 {
3451 struct ixl_tx_ring *txr = qp->qp_txr;
3452 struct ixl_rx_ring *rxr = qp->qp_rxr;
3453 int txmore, rxmore;
3454 int rv;
3455
3456 mutex_enter(&txr->txr_lock);
3457 txevcnt->ev_count++;
3458 txmore = ixl_txeof(sc, txr, txlimit);
3459 mutex_exit(&txr->txr_lock);
3460
3461 mutex_enter(&rxr->rxr_lock);
3462 rxevcnt->ev_count++;
3463 rxmore = ixl_rxeof(sc, rxr, rxlimit);
3464 mutex_exit(&rxr->rxr_lock);
3465
3466 rv = txmore | (rxmore << 1);
3467
3468 return rv;
3469 }
3470
3471 static void
3472 ixl_sched_handle_queue(struct ixl_softc *sc, struct ixl_queue_pair *qp)
3473 {
3474
3475 if (qp->qp_workqueue)
3476 workqueue_enqueue(sc->sc_workq_txrx, &qp->qp_work, NULL);
3477 else
3478 softint_schedule(qp->qp_si);
3479 }
3480
3481 static int
3482 ixl_intr(void *xsc)
3483 {
3484 struct ixl_softc *sc = xsc;
3485 struct ixl_tx_ring *txr;
3486 struct ixl_rx_ring *rxr;
3487 uint32_t icr, rxintr, txintr;
3488 int rv = 0;
3489 unsigned int i;
3490
3491 KASSERT(sc != NULL);
3492
3493 ixl_enable_other_intr(sc);
3494 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3495
3496 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3497 atomic_inc_64(&sc->sc_event_atq.ev_count);
3498 ixl_atq_done(sc);
3499 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3500 rv = 1;
3501 }
3502
3503 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3504 atomic_inc_64(&sc->sc_event_link.ev_count);
3505 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3506 rv = 1;
3507 }
3508
3509 rxintr = icr & I40E_INTR_NOTX_RX_MASK;
3510 txintr = icr & I40E_INTR_NOTX_TX_MASK;
3511
3512 if (txintr || rxintr) {
3513 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3514 txr = sc->sc_qps[i].qp_txr;
3515 rxr = sc->sc_qps[i].qp_rxr;
3516
3517 ixl_handle_queue_common(sc, &sc->sc_qps[i],
3518 IXL_TXRX_PROCESS_UNLIMIT, &txr->txr_intr,
3519 IXL_TXRX_PROCESS_UNLIMIT, &rxr->rxr_intr);
3520 }
3521 rv = 1;
3522 }
3523
3524 return rv;
3525 }
3526
3527 static int
3528 ixl_queue_intr(void *xqp)
3529 {
3530 struct ixl_queue_pair *qp = xqp;
3531 struct ixl_tx_ring *txr = qp->qp_txr;
3532 struct ixl_rx_ring *rxr = qp->qp_rxr;
3533 struct ixl_softc *sc = qp->qp_sc;
3534 u_int txlimit, rxlimit;
3535 int more;
3536
3537 txlimit = sc->sc_tx_intr_process_limit;
3538 rxlimit = sc->sc_rx_intr_process_limit;
3539 qp->qp_workqueue = sc->sc_txrx_workqueue;
3540
3541 more = ixl_handle_queue_common(sc, qp,
3542 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr);
3543
3544 if (more != 0) {
3545 ixl_sched_handle_queue(sc, qp);
3546 } else {
3547 /* for ALTQ */
3548 if (txr->txr_qid == 0)
3549 if_schedule_deferred_start(&sc->sc_ec.ec_if);
3550 softint_schedule(txr->txr_si);
3551
3552 ixl_enable_queue_intr(sc, qp);
3553 }
3554
3555 return 1;
3556 }
3557
3558 static void
3559 ixl_handle_queue_wk(struct work *wk, void *xsc)
3560 {
3561 struct ixl_queue_pair *qp;
3562
3563 qp = container_of(wk, struct ixl_queue_pair, qp_work);
3564 ixl_handle_queue(qp);
3565 }
3566
3567 static void
3568 ixl_handle_queue(void *xqp)
3569 {
3570 struct ixl_queue_pair *qp = xqp;
3571 struct ixl_softc *sc = qp->qp_sc;
3572 struct ixl_tx_ring *txr = qp->qp_txr;
3573 struct ixl_rx_ring *rxr = qp->qp_rxr;
3574 u_int txlimit, rxlimit;
3575 int more;
3576
3577 txlimit = sc->sc_tx_process_limit;
3578 rxlimit = sc->sc_rx_process_limit;
3579
3580 more = ixl_handle_queue_common(sc, qp,
3581 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer);
3582
3583 if (more != 0)
3584 ixl_sched_handle_queue(sc, qp);
3585 else
3586 ixl_enable_queue_intr(sc, qp);
3587 }
3588
3589 static inline void
3590 ixl_print_hmc_error(struct ixl_softc *sc, uint32_t reg)
3591 {
3592 uint32_t hmc_idx, hmc_isvf;
3593 uint32_t hmc_errtype, hmc_objtype, hmc_data;
3594
3595 hmc_idx = reg & I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK;
3596 hmc_idx = hmc_idx >> I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT;
3597 hmc_isvf = reg & I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK;
3598 hmc_isvf = hmc_isvf >> I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT;
3599 hmc_errtype = reg & I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK;
3600 hmc_errtype = hmc_errtype >> I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT;
3601 hmc_objtype = reg & I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK;
3602 hmc_objtype = hmc_objtype >> I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT;
3603 hmc_data = ixl_rd(sc, I40E_PFHMC_ERRORDATA);
3604
3605 device_printf(sc->sc_dev,
3606 "HMC Error (idx=0x%x, isvf=0x%x, err=0x%x, obj=0x%x, data=0x%x)\n",
3607 hmc_idx, hmc_isvf, hmc_errtype, hmc_objtype, hmc_data);
3608 }
3609
3610 static int
3611 ixl_other_intr(void *xsc)
3612 {
3613 struct ixl_softc *sc = xsc;
3614 uint32_t icr, mask, reg;
3615 int rv;
3616
3617 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3618 mask = ixl_rd(sc, I40E_PFINT_ICR0_ENA);
3619
3620 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3621 atomic_inc_64(&sc->sc_event_atq.ev_count);
3622 ixl_atq_done(sc);
3623 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3624 rv = 1;
3625 }
3626
3627 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3628 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3629 device_printf(sc->sc_dev, "link stat changed\n");
3630
3631 atomic_inc_64(&sc->sc_event_link.ev_count);
3632 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3633 rv = 1;
3634 }
3635
3636 if (ISSET(icr, I40E_PFINT_ICR0_GRST_MASK)) {
3637 CLR(mask, I40E_PFINT_ICR0_ENA_GRST_MASK);
3638 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
3639 reg = reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK;
3640 reg = reg >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3641
3642 device_printf(sc->sc_dev, "GRST: %s\n",
3643 reg == I40E_RESET_CORER ? "CORER" :
3644 reg == I40E_RESET_GLOBR ? "GLOBR" :
3645 reg == I40E_RESET_EMPR ? "EMPR" :
3646 "POR");
3647 }
3648
3649 if (ISSET(icr, I40E_PFINT_ICR0_ECC_ERR_MASK))
3650 atomic_inc_64(&sc->sc_event_ecc_err.ev_count);
3651 if (ISSET(icr, I40E_PFINT_ICR0_PCI_EXCEPTION_MASK))
3652 atomic_inc_64(&sc->sc_event_pci_exception.ev_count);
3653 if (ISSET(icr, I40E_PFINT_ICR0_PE_CRITERR_MASK))
3654 atomic_inc_64(&sc->sc_event_crit_err.ev_count);
3655
3656 if (ISSET(icr, IXL_ICR0_CRIT_ERR_MASK)) {
3657 CLR(mask, IXL_ICR0_CRIT_ERR_MASK);
3658 device_printf(sc->sc_dev, "critical error\n");
3659 }
3660
3661 if (ISSET(icr, I40E_PFINT_ICR0_HMC_ERR_MASK)) {
3662 reg = ixl_rd(sc, I40E_PFHMC_ERRORINFO);
3663 if (ISSET(reg, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK))
3664 ixl_print_hmc_error(sc, reg);
3665 ixl_wr(sc, I40E_PFHMC_ERRORINFO, 0);
3666 }
3667
3668 ixl_wr(sc, I40E_PFINT_ICR0_ENA, mask);
3669 ixl_flush(sc);
3670 ixl_enable_other_intr(sc);
3671 return rv;
3672 }
3673
3674 static void
3675 ixl_get_link_status_done(struct ixl_softc *sc,
3676 const struct ixl_aq_desc *iaq)
3677 {
3678 struct ixl_aq_desc iaq_buf;
3679
3680 memcpy(&iaq_buf, iaq, sizeof(iaq_buf));
3681
3682 /*
3683 * The lock can be released here
3684 * because there is no post processing about ATQ
3685 */
3686 mutex_exit(&sc->sc_atq_lock);
3687 ixl_link_state_update(sc, &iaq_buf);
3688 mutex_enter(&sc->sc_atq_lock);
3689 }
3690
3691 static void
3692 ixl_get_link_status(void *xsc)
3693 {
3694 struct ixl_softc *sc = xsc;
3695 struct ixl_aq_desc *iaq;
3696 struct ixl_aq_link_param *param;
3697 int error;
3698
3699 mutex_enter(&sc->sc_atq_lock);
3700
3701 iaq = &sc->sc_link_state_atq.iatq_desc;
3702 memset(iaq, 0, sizeof(*iaq));
3703 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3704 param = (struct ixl_aq_link_param *)iaq->iaq_param;
3705 param->notify = IXL_AQ_LINK_NOTIFY;
3706
3707 error = ixl_atq_exec_locked(sc, &sc->sc_link_state_atq);
3708 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done);
3709
3710 if (error == 0) {
3711 ixl_get_link_status_done(sc, iaq);
3712 }
3713
3714 mutex_exit(&sc->sc_atq_lock);
3715 }
3716
3717 static void
3718 ixl_link_state_update(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3719 {
3720 struct ifnet *ifp = &sc->sc_ec.ec_if;
3721 int link_state;
3722
3723 mutex_enter(&sc->sc_cfg_lock);
3724 link_state = ixl_set_link_status_locked(sc, iaq);
3725 mutex_exit(&sc->sc_cfg_lock);
3726
3727 if (ifp->if_link_state != link_state)
3728 if_link_state_change(ifp, link_state);
3729
3730 if (link_state != LINK_STATE_DOWN) {
3731 kpreempt_disable();
3732 if_schedule_deferred_start(ifp);
3733 kpreempt_enable();
3734 }
3735 }
3736
3737 static void
3738 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq,
3739 const char *msg)
3740 {
3741 char buf[512];
3742 size_t len;
3743
3744 len = sizeof(buf);
3745 buf[--len] = '\0';
3746
3747 device_printf(sc->sc_dev, "%s\n", msg);
3748 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags));
3749 device_printf(sc->sc_dev, "flags %s opcode %04x\n",
3750 buf, le16toh(iaq->iaq_opcode));
3751 device_printf(sc->sc_dev, "datalen %u retval %u\n",
3752 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval));
3753 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie);
3754 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n",
3755 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]),
3756 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3]));
3757 }
3758
3759 static void
3760 ixl_arq(void *xsc)
3761 {
3762 struct ixl_softc *sc = xsc;
3763 struct ixl_aq_desc *arq, *iaq;
3764 struct ixl_aq_buf *aqb;
3765 unsigned int cons = sc->sc_arq_cons;
3766 unsigned int prod;
3767 int done = 0;
3768
3769 prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
3770 sc->sc_aq_regs->arq_head_mask;
3771
3772 if (cons == prod)
3773 goto done;
3774
3775 arq = IXL_DMA_KVA(&sc->sc_arq);
3776
3777 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3778 0, IXL_DMA_LEN(&sc->sc_arq),
3779 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3780
3781 do {
3782 iaq = &arq[cons];
3783 aqb = sc->sc_arq_live[cons];
3784
3785 KASSERT(aqb != NULL);
3786
3787 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3788 BUS_DMASYNC_POSTREAD);
3789
3790 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3791 ixl_aq_dump(sc, iaq, "arq event");
3792
3793 switch (iaq->iaq_opcode) {
3794 case htole16(IXL_AQ_OP_PHY_LINK_STATUS):
3795 ixl_link_state_update(sc, iaq);
3796 break;
3797 }
3798
3799 memset(iaq, 0, sizeof(*iaq));
3800 sc->sc_arq_live[cons] = NULL;
3801 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
3802
3803 cons++;
3804 cons &= IXL_AQ_MASK;
3805
3806 done = 1;
3807 } while (cons != prod);
3808
3809 if (done) {
3810 sc->sc_arq_cons = cons;
3811 ixl_arq_fill(sc);
3812 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3813 0, IXL_DMA_LEN(&sc->sc_arq),
3814 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3815 }
3816
3817 done:
3818 ixl_enable_other_intr(sc);
3819 }
3820
3821 static void
3822 ixl_atq_set(struct ixl_atq *iatq,
3823 void (*fn)(struct ixl_softc *, const struct ixl_aq_desc *))
3824 {
3825
3826 iatq->iatq_fn = fn;
3827 }
3828
3829 static int
3830 ixl_atq_post_locked(struct ixl_softc *sc, struct ixl_atq *iatq)
3831 {
3832 struct ixl_aq_desc *atq, *slot;
3833 unsigned int prod, cons, prod_next;
3834
3835 /* assert locked */
3836 KASSERT(mutex_owned(&sc->sc_atq_lock));
3837
3838 atq = IXL_DMA_KVA(&sc->sc_atq);
3839 prod = sc->sc_atq_prod;
3840 cons = sc->sc_atq_cons;
3841 prod_next = (prod +1) & IXL_AQ_MASK;
3842
3843 if (cons == prod_next)
3844 return ENOMEM;
3845
3846 slot = &atq[prod];
3847
3848 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3849 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3850
3851 KASSERT(iatq->iatq_fn != NULL);
3852 *slot = iatq->iatq_desc;
3853 slot->iaq_cookie = (uint64_t)((intptr_t)iatq);
3854
3855 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3856 ixl_aq_dump(sc, slot, "atq command");
3857
3858 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3859 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3860
3861 sc->sc_atq_prod = prod_next;
3862 ixl_wr(sc, sc->sc_aq_regs->atq_tail, sc->sc_atq_prod);
3863
3864 return 0;
3865 }
3866
3867 static void
3868 ixl_atq_done_locked(struct ixl_softc *sc)
3869 {
3870 struct ixl_aq_desc *atq, *slot;
3871 struct ixl_atq *iatq;
3872 unsigned int cons;
3873 unsigned int prod;
3874
3875 KASSERT(mutex_owned(&sc->sc_atq_lock));
3876
3877 prod = sc->sc_atq_prod;
3878 cons = sc->sc_atq_cons;
3879
3880 if (prod == cons)
3881 return;
3882
3883 atq = IXL_DMA_KVA(&sc->sc_atq);
3884
3885 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3886 0, IXL_DMA_LEN(&sc->sc_atq),
3887 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3888
3889 do {
3890 slot = &atq[cons];
3891 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
3892 break;
3893
3894 iatq = (struct ixl_atq *)((intptr_t)slot->iaq_cookie);
3895 iatq->iatq_desc = *slot;
3896
3897 memset(slot, 0, sizeof(*slot));
3898
3899 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3900 ixl_aq_dump(sc, &iatq->iatq_desc, "atq response");
3901
3902 (*iatq->iatq_fn)(sc, &iatq->iatq_desc);
3903
3904 cons++;
3905 cons &= IXL_AQ_MASK;
3906 } while (cons != prod);
3907
3908 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3909 0, IXL_DMA_LEN(&sc->sc_atq),
3910 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3911
3912 sc->sc_atq_cons = cons;
3913 }
3914
3915 static void
3916 ixl_atq_done(struct ixl_softc *sc)
3917 {
3918
3919 mutex_enter(&sc->sc_atq_lock);
3920 ixl_atq_done_locked(sc);
3921 mutex_exit(&sc->sc_atq_lock);
3922 }
3923
3924 static void
3925 ixl_wakeup(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3926 {
3927
3928 KASSERT(mutex_owned(&sc->sc_atq_lock));
3929
3930 cv_signal(&sc->sc_atq_cv);
3931 }
3932
3933 static int
3934 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq)
3935 {
3936 int error;
3937
3938 mutex_enter(&sc->sc_atq_lock);
3939 error = ixl_atq_exec_locked(sc, iatq);
3940 mutex_exit(&sc->sc_atq_lock);
3941
3942 return error;
3943 }
3944
3945 static int
3946 ixl_atq_exec_locked(struct ixl_softc *sc, struct ixl_atq *iatq)
3947 {
3948 int error;
3949
3950 KASSERT(mutex_owned(&sc->sc_atq_lock));
3951 KASSERT(iatq->iatq_desc.iaq_cookie == 0);
3952
3953 ixl_atq_set(iatq, ixl_wakeup);
3954
3955 error = ixl_atq_post_locked(sc, iatq);
3956 if (error)
3957 return error;
3958
3959 error = cv_timedwait(&sc->sc_atq_cv, &sc->sc_atq_lock,
3960 IXL_ATQ_EXEC_TIMEOUT);
3961
3962 return error;
3963 }
3964
3965 static int
3966 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
3967 {
3968 struct ixl_aq_desc *atq, *slot;
3969 unsigned int prod;
3970 unsigned int t = 0;
3971
3972 mutex_enter(&sc->sc_atq_lock);
3973
3974 atq = IXL_DMA_KVA(&sc->sc_atq);
3975 prod = sc->sc_atq_prod;
3976 slot = atq + prod;
3977
3978 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3979 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3980
3981 *slot = *iaq;
3982 slot->iaq_flags |= htole16(IXL_AQ_SI);
3983
3984 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3985 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3986
3987 prod++;
3988 prod &= IXL_AQ_MASK;
3989 sc->sc_atq_prod = prod;
3990 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3991
3992 while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
3993 delaymsec(1);
3994
3995 if (t++ > tm) {
3996 mutex_exit(&sc->sc_atq_lock);
3997 return ETIMEDOUT;
3998 }
3999 }
4000
4001 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4002 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
4003 *iaq = *slot;
4004 memset(slot, 0, sizeof(*slot));
4005 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4006 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
4007
4008 sc->sc_atq_cons = prod;
4009
4010 mutex_exit(&sc->sc_atq_lock);
4011
4012 return 0;
4013 }
4014
4015 static int
4016 ixl_get_version(struct ixl_softc *sc)
4017 {
4018 struct ixl_aq_desc iaq;
4019 uint32_t fwbuild, fwver, apiver;
4020 uint16_t api_maj_ver, api_min_ver;
4021
4022 memset(&iaq, 0, sizeof(iaq));
4023 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
4024
4025 iaq.iaq_retval = le16toh(23);
4026
4027 if (ixl_atq_poll(sc, &iaq, 2000) != 0)
4028 return ETIMEDOUT;
4029 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
4030 return EIO;
4031
4032 fwbuild = le32toh(iaq.iaq_param[1]);
4033 fwver = le32toh(iaq.iaq_param[2]);
4034 apiver = le32toh(iaq.iaq_param[3]);
4035
4036 api_maj_ver = (uint16_t)apiver;
4037 api_min_ver = (uint16_t)(apiver >> 16);
4038
4039 aprint_normal(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
4040 (uint16_t)(fwver >> 16), fwbuild, api_maj_ver, api_min_ver);
4041
4042 if (sc->sc_mac_type == I40E_MAC_X722) {
4043 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK |
4044 IXL_SC_AQ_FLAG_NVMREAD);
4045 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL);
4046 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS);
4047 }
4048
4049 #define IXL_API_VER(maj, min) (((uint32_t)(maj) << 16) | (min))
4050 if (IXL_API_VER(api_maj_ver, api_min_ver) >= IXL_API_VER(1, 5)) {
4051 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL);
4052 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK);
4053 }
4054 #undef IXL_API_VER
4055
4056 return 0;
4057 }
4058
4059 static int
4060 ixl_get_nvm_version(struct ixl_softc *sc)
4061 {
4062 uint16_t nvmver, cfg_ptr, eetrack_hi, eetrack_lo, oem_hi, oem_lo;
4063 uint32_t eetrack, oem;
4064 uint16_t nvm_maj_ver, nvm_min_ver, oem_build;
4065 uint8_t oem_ver, oem_patch;
4066
4067 nvmver = cfg_ptr = eetrack_hi = eetrack_lo = oem_hi = oem_lo = 0;
4068 ixl_rd16_nvm(sc, I40E_SR_NVM_DEV_STARTER_VERSION, &nvmver);
4069 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
4070 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
4071 ixl_rd16_nvm(sc, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
4072 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF, &oem_hi);
4073 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF + 1, &oem_lo);
4074
4075 nvm_maj_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_HI_MASK);
4076 nvm_min_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_LO_MASK);
4077 eetrack = ((uint32_t)eetrack_hi << 16) | eetrack_lo;
4078 oem = ((uint32_t)oem_hi << 16) | oem_lo;
4079 oem_ver = __SHIFTOUT(oem, IXL_NVM_OEMVERSION_MASK);
4080 oem_build = __SHIFTOUT(oem, IXL_NVM_OEMBUILD_MASK);
4081 oem_patch = __SHIFTOUT(oem, IXL_NVM_OEMPATCH_MASK);
4082
4083 aprint_normal(" nvm %x.%02x etid %08x oem %d.%d.%d",
4084 nvm_maj_ver, nvm_min_ver, eetrack,
4085 oem_ver, oem_build, oem_patch);
4086
4087 return 0;
4088 }
4089
4090 static int
4091 ixl_pxe_clear(struct ixl_softc *sc)
4092 {
4093 struct ixl_aq_desc iaq;
4094 int rv;
4095
4096 memset(&iaq, 0, sizeof(iaq));
4097 iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
4098 iaq.iaq_param[0] = htole32(0x2);
4099
4100 rv = ixl_atq_poll(sc, &iaq, 250);
4101
4102 ixl_wr(sc, I40E_GLLAN_RCTL_0, 0x1);
4103
4104 if (rv != 0)
4105 return ETIMEDOUT;
4106
4107 switch (iaq.iaq_retval) {
4108 case htole16(IXL_AQ_RC_OK):
4109 case htole16(IXL_AQ_RC_EEXIST):
4110 break;
4111 default:
4112 return EIO;
4113 }
4114
4115 return 0;
4116 }
4117
4118 static int
4119 ixl_lldp_shut(struct ixl_softc *sc)
4120 {
4121 struct ixl_aq_desc iaq;
4122
4123 memset(&iaq, 0, sizeof(iaq));
4124 iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
4125 iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
4126
4127 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4128 aprint_error_dev(sc->sc_dev, "STOP LLDP AGENT timeout\n");
4129 return -1;
4130 }
4131
4132 switch (iaq.iaq_retval) {
4133 case htole16(IXL_AQ_RC_EMODE):
4134 case htole16(IXL_AQ_RC_EPERM):
4135 /* ignore silently */
4136 default:
4137 break;
4138 }
4139
4140 return 0;
4141 }
4142
4143 static void
4144 ixl_parse_hw_capability(struct ixl_softc *sc, struct ixl_aq_capability *cap)
4145 {
4146 uint16_t id;
4147 uint32_t number, logical_id;
4148
4149 id = le16toh(cap->cap_id);
4150 number = le32toh(cap->number);
4151 logical_id = le32toh(cap->logical_id);
4152
4153 switch (id) {
4154 case IXL_AQ_CAP_RSS:
4155 sc->sc_rss_table_size = number;
4156 sc->sc_rss_table_entry_width = logical_id;
4157 break;
4158 case IXL_AQ_CAP_RXQ:
4159 case IXL_AQ_CAP_TXQ:
4160 sc->sc_nqueue_pairs_device = MIN(number,
4161 sc->sc_nqueue_pairs_device);
4162 break;
4163 }
4164 }
4165
4166 static int
4167 ixl_get_hw_capabilities(struct ixl_softc *sc)
4168 {
4169 struct ixl_dmamem idm;
4170 struct ixl_aq_desc iaq;
4171 struct ixl_aq_capability *caps;
4172 size_t i, ncaps;
4173 bus_size_t caps_size;
4174 uint16_t status;
4175 int rv;
4176
4177 caps_size = sizeof(caps[0]) * 40;
4178 memset(&iaq, 0, sizeof(iaq));
4179 iaq.iaq_opcode = htole16(IXL_AQ_OP_LIST_FUNC_CAP);
4180
4181 do {
4182 if (ixl_dmamem_alloc(sc, &idm, caps_size, 0) != 0) {
4183 return -1;
4184 }
4185
4186 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4187 (caps_size > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4188 iaq.iaq_datalen = htole16(caps_size);
4189 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4190
4191 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
4192 IXL_DMA_LEN(&idm), BUS_DMASYNC_PREREAD);
4193
4194 rv = ixl_atq_poll(sc, &iaq, 250);
4195
4196 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
4197 IXL_DMA_LEN(&idm), BUS_DMASYNC_POSTREAD);
4198
4199 if (rv != 0) {
4200 aprint_error(", HW capabilities timeout\n");
4201 goto done;
4202 }
4203
4204 status = le16toh(iaq.iaq_retval);
4205
4206 if (status == IXL_AQ_RC_ENOMEM) {
4207 caps_size = le16toh(iaq.iaq_datalen);
4208 ixl_dmamem_free(sc, &idm);
4209 }
4210 } while (status == IXL_AQ_RC_ENOMEM);
4211
4212 if (status != IXL_AQ_RC_OK) {
4213 aprint_error(", HW capabilities error\n");
4214 goto done;
4215 }
4216
4217 caps = IXL_DMA_KVA(&idm);
4218 ncaps = le16toh(iaq.iaq_param[1]);
4219
4220 for (i = 0; i < ncaps; i++) {
4221 ixl_parse_hw_capability(sc, &caps[i]);
4222 }
4223
4224 done:
4225 ixl_dmamem_free(sc, &idm);
4226 return rv;
4227 }
4228
4229 static int
4230 ixl_get_mac(struct ixl_softc *sc)
4231 {
4232 struct ixl_dmamem idm;
4233 struct ixl_aq_desc iaq;
4234 struct ixl_aq_mac_addresses *addrs;
4235 int rv;
4236
4237 if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
4238 aprint_error(", unable to allocate mac addresses\n");
4239 return -1;
4240 }
4241
4242 memset(&iaq, 0, sizeof(iaq));
4243 iaq.iaq_flags = htole16(IXL_AQ_BUF);
4244 iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
4245 iaq.iaq_datalen = htole16(sizeof(*addrs));
4246 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4247
4248 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4249 BUS_DMASYNC_PREREAD);
4250
4251 rv = ixl_atq_poll(sc, &iaq, 250);
4252
4253 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4254 BUS_DMASYNC_POSTREAD);
4255
4256 if (rv != 0) {
4257 aprint_error(", MAC ADDRESS READ timeout\n");
4258 rv = -1;
4259 goto done;
4260 }
4261 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4262 aprint_error(", MAC ADDRESS READ error\n");
4263 rv = -1;
4264 goto done;
4265 }
4266
4267 addrs = IXL_DMA_KVA(&idm);
4268 if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
4269 printf(", port address is not valid\n");
4270 goto done;
4271 }
4272
4273 memcpy(sc->sc_enaddr, addrs->port, ETHER_ADDR_LEN);
4274 rv = 0;
4275
4276 done:
4277 ixl_dmamem_free(sc, &idm);
4278 return rv;
4279 }
4280
4281 static int
4282 ixl_get_switch_config(struct ixl_softc *sc)
4283 {
4284 struct ixl_dmamem idm;
4285 struct ixl_aq_desc iaq;
4286 struct ixl_aq_switch_config *hdr;
4287 struct ixl_aq_switch_config_element *elms, *elm;
4288 unsigned int nelm, i;
4289 int rv;
4290
4291 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4292 aprint_error_dev(sc->sc_dev,
4293 "unable to allocate switch config buffer\n");
4294 return -1;
4295 }
4296
4297 memset(&iaq, 0, sizeof(iaq));
4298 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4299 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4300 iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
4301 iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
4302 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4303
4304 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4305 BUS_DMASYNC_PREREAD);
4306
4307 rv = ixl_atq_poll(sc, &iaq, 250);
4308
4309 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4310 BUS_DMASYNC_POSTREAD);
4311
4312 if (rv != 0) {
4313 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG timeout\n");
4314 rv = -1;
4315 goto done;
4316 }
4317 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4318 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG error\n");
4319 rv = -1;
4320 goto done;
4321 }
4322
4323 hdr = IXL_DMA_KVA(&idm);
4324 elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
4325
4326 nelm = le16toh(hdr->num_reported);
4327 if (nelm < 1) {
4328 aprint_error_dev(sc->sc_dev, "no switch config available\n");
4329 rv = -1;
4330 goto done;
4331 }
4332
4333 for (i = 0; i < nelm; i++) {
4334 elm = &elms[i];
4335
4336 aprint_debug_dev(sc->sc_dev,
4337 "type %x revision %u seid %04x\n",
4338 elm->type, elm->revision, le16toh(elm->seid));
4339 aprint_debug_dev(sc->sc_dev,
4340 "uplink %04x downlink %04x\n",
4341 le16toh(elm->uplink_seid),
4342 le16toh(elm->downlink_seid));
4343 aprint_debug_dev(sc->sc_dev,
4344 "conntype %x scheduler %04x extra %04x\n",
4345 elm->connection_type,
4346 le16toh(elm->scheduler_id),
4347 le16toh(elm->element_info));
4348 }
4349
4350 elm = &elms[0];
4351
4352 sc->sc_uplink_seid = elm->uplink_seid;
4353 sc->sc_downlink_seid = elm->downlink_seid;
4354 sc->sc_seid = elm->seid;
4355
4356 if ((sc->sc_uplink_seid == htole16(0)) !=
4357 (sc->sc_downlink_seid == htole16(0))) {
4358 aprint_error_dev(sc->sc_dev, "SEIDs are misconfigured\n");
4359 rv = -1;
4360 goto done;
4361 }
4362
4363 done:
4364 ixl_dmamem_free(sc, &idm);
4365 return rv;
4366 }
4367
4368 static int
4369 ixl_phy_mask_ints(struct ixl_softc *sc)
4370 {
4371 struct ixl_aq_desc iaq;
4372
4373 memset(&iaq, 0, sizeof(iaq));
4374 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
4375 iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
4376 ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
4377 IXL_AQ_PHY_EV_MEDIA_NA));
4378
4379 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4380 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK timeout\n");
4381 return -1;
4382 }
4383 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4384 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK error\n");
4385 return -1;
4386 }
4387
4388 return 0;
4389 }
4390
4391 static int
4392 ixl_get_phy_abilities(struct ixl_softc *sc,struct ixl_dmamem *idm)
4393 {
4394 struct ixl_aq_desc iaq;
4395 int rv;
4396
4397 memset(&iaq, 0, sizeof(iaq));
4398 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4399 (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4400 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
4401 iaq.iaq_datalen = htole16(IXL_DMA_LEN(idm));
4402 iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
4403 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
4404
4405 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4406 BUS_DMASYNC_PREREAD);
4407
4408 rv = ixl_atq_poll(sc, &iaq, 250);
4409
4410 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4411 BUS_DMASYNC_POSTREAD);
4412
4413 if (rv != 0)
4414 return -1;
4415
4416 return le16toh(iaq.iaq_retval);
4417 }
4418
4419 static int
4420 ixl_get_phy_info(struct ixl_softc *sc)
4421 {
4422 struct ixl_dmamem idm;
4423 struct ixl_aq_phy_abilities *phy;
4424 int rv;
4425
4426 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4427 aprint_error_dev(sc->sc_dev,
4428 "unable to allocate phy abilities buffer\n");
4429 return -1;
4430 }
4431
4432 rv = ixl_get_phy_abilities(sc, &idm);
4433 switch (rv) {
4434 case -1:
4435 aprint_error_dev(sc->sc_dev, "GET PHY ABILITIES timeout\n");
4436 goto done;
4437 case IXL_AQ_RC_OK:
4438 break;
4439 case IXL_AQ_RC_EIO:
4440 aprint_error_dev(sc->sc_dev,"unable to query phy types\n");
4441 goto done;
4442 default:
4443 aprint_error_dev(sc->sc_dev,
4444 "GET PHY ABILITIIES error %u\n", rv);
4445 goto done;
4446 }
4447
4448 phy = IXL_DMA_KVA(&idm);
4449
4450 sc->sc_phy_types = le32toh(phy->phy_type);
4451 sc->sc_phy_types |= (uint64_t)le32toh(phy->phy_type_ext) << 32;
4452
4453 sc->sc_phy_abilities = phy->abilities;
4454 sc->sc_phy_linkspeed = phy->link_speed;
4455 sc->sc_phy_fec_cfg = phy->fec_cfg_curr_mod_ext_info &
4456 (IXL_AQ_ENABLE_FEC_KR | IXL_AQ_ENABLE_FEC_RS |
4457 IXL_AQ_REQUEST_FEC_KR | IXL_AQ_REQUEST_FEC_RS);
4458 sc->sc_eee_cap = phy->eee_capability;
4459 sc->sc_eeer_val = phy->eeer_val;
4460 sc->sc_d3_lpan = phy->d3_lpan;
4461
4462 rv = 0;
4463
4464 done:
4465 ixl_dmamem_free(sc, &idm);
4466 return rv;
4467 }
4468
4469 static int
4470 ixl_set_phy_config(struct ixl_softc *sc,
4471 uint8_t link_speed, uint8_t abilities, bool polling)
4472 {
4473 struct ixl_aq_phy_param *param;
4474 struct ixl_atq iatq;
4475 struct ixl_aq_desc *iaq;
4476 int error;
4477
4478 memset(&iatq, 0, sizeof(iatq));
4479
4480 iaq = &iatq.iatq_desc;
4481 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_CONFIG);
4482 param = (struct ixl_aq_phy_param *)&iaq->iaq_param;
4483 param->phy_types = htole32((uint32_t)sc->sc_phy_types);
4484 param->phy_type_ext = (uint8_t)(sc->sc_phy_types >> 32);
4485 param->link_speed = link_speed;
4486 param->abilities = abilities | IXL_AQ_PHY_ABILITY_AUTO_LINK;
4487 param->fec_cfg = sc->sc_phy_fec_cfg;
4488 param->eee_capability = sc->sc_eee_cap;
4489 param->eeer_val = sc->sc_eeer_val;
4490 param->d3_lpan = sc->sc_d3_lpan;
4491
4492 if (polling)
4493 error = ixl_atq_poll(sc, iaq, 250);
4494 else
4495 error = ixl_atq_exec(sc, &iatq);
4496
4497 if (error != 0)
4498 return error;
4499
4500 switch (le16toh(iaq->iaq_retval)) {
4501 case IXL_AQ_RC_OK:
4502 break;
4503 case IXL_AQ_RC_EPERM:
4504 return EPERM;
4505 default:
4506 return EIO;
4507 }
4508
4509 return 0;
4510 }
4511
4512 static int
4513 ixl_set_phy_autoselect(struct ixl_softc *sc)
4514 {
4515 uint8_t link_speed, abilities;
4516
4517 link_speed = sc->sc_phy_linkspeed;
4518 abilities = IXL_PHY_ABILITY_LINKUP | IXL_PHY_ABILITY_AUTONEGO;
4519
4520 return ixl_set_phy_config(sc, link_speed, abilities, true);
4521 }
4522
4523 static int
4524 ixl_get_link_status_poll(struct ixl_softc *sc, int *l)
4525 {
4526 struct ixl_aq_desc iaq;
4527 struct ixl_aq_link_param *param;
4528 int link;
4529
4530 memset(&iaq, 0, sizeof(iaq));
4531 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
4532 param = (struct ixl_aq_link_param *)iaq.iaq_param;
4533 param->notify = IXL_AQ_LINK_NOTIFY;
4534
4535 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4536 return ETIMEDOUT;
4537 }
4538 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4539 return EIO;
4540 }
4541
4542 /* It is unneccessary to hold lock */
4543 link = ixl_set_link_status_locked(sc, &iaq);
4544
4545 if (l != NULL)
4546 *l = link;
4547
4548 return 0;
4549 }
4550
4551 static int
4552 ixl_get_vsi(struct ixl_softc *sc)
4553 {
4554 struct ixl_dmamem *vsi = &sc->sc_scratch;
4555 struct ixl_aq_desc iaq;
4556 struct ixl_aq_vsi_param *param;
4557 struct ixl_aq_vsi_reply *reply;
4558 struct ixl_aq_vsi_data *data;
4559 int rv;
4560
4561 /* grumble, vsi info isn't "known" at compile time */
4562
4563 memset(&iaq, 0, sizeof(iaq));
4564 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4565 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4566 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
4567 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4568 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4569
4570 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4571 param->uplink_seid = sc->sc_seid;
4572
4573 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4574 BUS_DMASYNC_PREREAD);
4575
4576 rv = ixl_atq_poll(sc, &iaq, 250);
4577
4578 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4579 BUS_DMASYNC_POSTREAD);
4580
4581 if (rv != 0) {
4582 return ETIMEDOUT;
4583 }
4584
4585 switch (le16toh(iaq.iaq_retval)) {
4586 case IXL_AQ_RC_OK:
4587 break;
4588 case IXL_AQ_RC_ENOENT:
4589 return ENOENT;
4590 case IXL_AQ_RC_EACCES:
4591 return EACCES;
4592 default:
4593 return EIO;
4594 }
4595
4596 reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
4597 sc->sc_vsi_number = le16toh(reply->vsi_number);
4598 data = IXL_DMA_KVA(vsi);
4599 sc->sc_vsi_stat_counter_idx = le16toh(data->stat_counter_idx);
4600
4601 return 0;
4602 }
4603
4604 static int
4605 ixl_set_vsi(struct ixl_softc *sc)
4606 {
4607 struct ixl_dmamem *vsi = &sc->sc_scratch;
4608 struct ixl_aq_desc iaq;
4609 struct ixl_aq_vsi_param *param;
4610 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
4611 unsigned int qnum;
4612 uint16_t val;
4613 int rv;
4614
4615 qnum = sc->sc_nqueue_pairs - 1;
4616
4617 data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
4618 IXL_AQ_VSI_VALID_VLAN);
4619
4620 CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
4621 SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
4622 data->queue_mapping[0] = htole16(0);
4623 data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
4624 (qnum << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
4625
4626 val = le16toh(data->port_vlan_flags);
4627 CLR(val, IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK);
4628 SET(val, IXL_AQ_VSI_PVLAN_MODE_ALL);
4629
4630 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWTAGGING)) {
4631 SET(val, IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH);
4632 } else {
4633 SET(val, IXL_AQ_VSI_PVLAN_EMOD_NOTHING);
4634 }
4635
4636 data->port_vlan_flags = htole16(val);
4637
4638 /* grumble, vsi info isn't "known" at compile time */
4639
4640 memset(&iaq, 0, sizeof(iaq));
4641 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4642 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4643 iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
4644 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4645 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4646
4647 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4648 param->uplink_seid = sc->sc_seid;
4649
4650 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4651 BUS_DMASYNC_PREWRITE);
4652
4653 rv = ixl_atq_poll(sc, &iaq, 250);
4654
4655 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4656 BUS_DMASYNC_POSTWRITE);
4657
4658 if (rv != 0) {
4659 return ETIMEDOUT;
4660 }
4661
4662 switch (le16toh(iaq.iaq_retval)) {
4663 case IXL_AQ_RC_OK:
4664 break;
4665 case IXL_AQ_RC_ENOENT:
4666 return ENOENT;
4667 case IXL_AQ_RC_EACCES:
4668 return EACCES;
4669 default:
4670 return EIO;
4671 }
4672
4673 return 0;
4674 }
4675
4676 static void
4677 ixl_set_filter_control(struct ixl_softc *sc)
4678 {
4679 uint32_t reg;
4680
4681 reg = ixl_rd_rx_csr(sc, I40E_PFQF_CTL_0);
4682
4683 CLR(reg, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK);
4684 SET(reg, I40E_HASH_LUT_SIZE_128 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT);
4685
4686 SET(reg, I40E_PFQF_CTL_0_FD_ENA_MASK);
4687 SET(reg, I40E_PFQF_CTL_0_ETYPE_ENA_MASK);
4688 SET(reg, I40E_PFQF_CTL_0_MACVLAN_ENA_MASK);
4689
4690 ixl_wr_rx_csr(sc, I40E_PFQF_CTL_0, reg);
4691 }
4692
4693 static inline void
4694 ixl_get_default_rss_key(uint32_t *buf, size_t len)
4695 {
4696 size_t cplen;
4697 uint8_t rss_seed[RSS_KEYSIZE];
4698
4699 rss_getkey(rss_seed);
4700 memset(buf, 0, len);
4701
4702 cplen = MIN(len, sizeof(rss_seed));
4703 memcpy(buf, rss_seed, cplen);
4704 }
4705
4706 static int
4707 ixl_set_rss_key(struct ixl_softc *sc, uint8_t *key, size_t keylen)
4708 {
4709 struct ixl_dmamem *idm;
4710 struct ixl_atq iatq;
4711 struct ixl_aq_desc *iaq;
4712 struct ixl_aq_rss_key_param *param;
4713 struct ixl_aq_rss_key_data *data;
4714 size_t len, datalen, stdlen, extlen;
4715 uint16_t vsi_id;
4716 int rv;
4717
4718 memset(&iatq, 0, sizeof(iatq));
4719 iaq = &iatq.iatq_desc;
4720 idm = &sc->sc_aqbuf;
4721
4722 datalen = sizeof(*data);
4723
4724 /*XXX The buf size has to be less than the size of the register */
4725 datalen = MIN(IXL_RSS_KEY_SIZE_REG * sizeof(uint32_t), datalen);
4726
4727 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4728 (datalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4729 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_KEY);
4730 iaq->iaq_datalen = htole16(datalen);
4731
4732 param = (struct ixl_aq_rss_key_param *)iaq->iaq_param;
4733 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSKEY_VSI_ID_SHIFT) |
4734 IXL_AQ_RSSKEY_VSI_VALID;
4735 param->vsi_id = htole16(vsi_id);
4736
4737 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
4738 data = IXL_DMA_KVA(idm);
4739
4740 len = MIN(keylen, datalen);
4741 stdlen = MIN(sizeof(data->standard_rss_key), len);
4742 memcpy(data->standard_rss_key, key, stdlen);
4743 len = (len > stdlen) ? (len - stdlen) : 0;
4744
4745 extlen = MIN(sizeof(data->extended_hash_key), len);
4746 extlen = (stdlen < keylen) ? 0 : keylen - stdlen;
4747 memcpy(data->extended_hash_key, key + stdlen, extlen);
4748
4749 ixl_aq_dva(iaq, IXL_DMA_DVA(idm));
4750
4751 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4752 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE);
4753
4754 rv = ixl_atq_exec(sc, &iatq);
4755
4756 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4757 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE);
4758
4759 if (rv != 0) {
4760 return ETIMEDOUT;
4761 }
4762
4763 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) {
4764 return EIO;
4765 }
4766
4767 return 0;
4768 }
4769
4770 static int
4771 ixl_set_rss_lut(struct ixl_softc *sc, uint8_t *lut, size_t lutlen)
4772 {
4773 struct ixl_dmamem *idm;
4774 struct ixl_atq iatq;
4775 struct ixl_aq_desc *iaq;
4776 struct ixl_aq_rss_lut_param *param;
4777 uint16_t vsi_id;
4778 uint8_t *data;
4779 size_t dmalen;
4780 int rv;
4781
4782 memset(&iatq, 0, sizeof(iatq));
4783 iaq = &iatq.iatq_desc;
4784 idm = &sc->sc_aqbuf;
4785
4786 dmalen = MIN(lutlen, IXL_DMA_LEN(idm));
4787
4788 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4789 (dmalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4790 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_LUT);
4791 iaq->iaq_datalen = htole16(dmalen);
4792
4793 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
4794 data = IXL_DMA_KVA(idm);
4795 memcpy(data, lut, dmalen);
4796 ixl_aq_dva(iaq, IXL_DMA_DVA(idm));
4797
4798 param = (struct ixl_aq_rss_lut_param *)iaq->iaq_param;
4799 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSLUT_VSI_ID_SHIFT) |
4800 IXL_AQ_RSSLUT_VSI_VALID;
4801 param->vsi_id = htole16(vsi_id);
4802 param->flags = htole16(IXL_AQ_RSSLUT_TABLE_TYPE_PF <<
4803 IXL_AQ_RSSLUT_TABLE_TYPE_SHIFT);
4804
4805 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4806 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE);
4807
4808 rv = ixl_atq_exec(sc, &iatq);
4809
4810 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4811 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE);
4812
4813 if (rv != 0) {
4814 return ETIMEDOUT;
4815 }
4816
4817 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) {
4818 return EIO;
4819 }
4820
4821 return 0;
4822 }
4823
4824 static int
4825 ixl_register_rss_key(struct ixl_softc *sc)
4826 {
4827 uint32_t rss_seed[IXL_RSS_KEY_SIZE_REG];
4828 int rv;
4829 size_t i;
4830
4831 ixl_get_default_rss_key(rss_seed, sizeof(rss_seed));
4832
4833 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)){
4834 rv = ixl_set_rss_key(sc, (uint8_t*)rss_seed,
4835 sizeof(rss_seed));
4836 } else {
4837 rv = 0;
4838 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4839 ixl_wr_rx_csr(sc, I40E_PFQF_HKEY(i), rss_seed[i]);
4840 }
4841 }
4842
4843 return rv;
4844 }
4845
4846 static void
4847 ixl_register_rss_pctype(struct ixl_softc *sc)
4848 {
4849 uint64_t set_hena = 0;
4850 uint32_t hena0, hena1;
4851
4852 /*
4853 * We use TCP/UDP with IPv4/IPv6 by default.
4854 * Note: the device can not use just IP header in each
4855 * TCP/UDP packets for the RSS hash calculation.
4856 */
4857 if (sc->sc_mac_type == I40E_MAC_X722)
4858 set_hena = IXL_RSS_HENA_DEFAULT_X722;
4859 else
4860 set_hena = IXL_RSS_HENA_DEFAULT_XL710;
4861
4862 hena0 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(0));
4863 hena1 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(1));
4864
4865 SET(hena0, set_hena);
4866 SET(hena1, set_hena >> 32);
4867
4868 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(0), hena0);
4869 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(1), hena1);
4870 }
4871
4872 static int
4873 ixl_register_rss_hlut(struct ixl_softc *sc)
4874 {
4875 unsigned int qid;
4876 uint8_t hlut_buf[512], lut_mask;
4877 uint32_t *hluts;
4878 size_t i, hluts_num;
4879 int rv;
4880
4881 lut_mask = (0x01 << sc->sc_rss_table_entry_width) - 1;
4882
4883 for (i = 0; i < sc->sc_rss_table_size; i++) {
4884 qid = i % sc->sc_nqueue_pairs;
4885 hlut_buf[i] = qid & lut_mask;
4886 }
4887
4888 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)) {
4889 rv = ixl_set_rss_lut(sc, hlut_buf, sizeof(hlut_buf));
4890 } else {
4891 rv = 0;
4892 hluts = (uint32_t *)hlut_buf;
4893 hluts_num = sc->sc_rss_table_size >> 2;
4894 for (i = 0; i < hluts_num; i++) {
4895 ixl_wr(sc, I40E_PFQF_HLUT(i), hluts[i]);
4896 }
4897 ixl_flush(sc);
4898 }
4899
4900 return rv;
4901 }
4902
4903 static void
4904 ixl_config_rss(struct ixl_softc *sc)
4905 {
4906
4907 KASSERT(mutex_owned(&sc->sc_cfg_lock));
4908
4909 ixl_register_rss_key(sc);
4910 ixl_register_rss_pctype(sc);
4911 ixl_register_rss_hlut(sc);
4912 }
4913
4914 static const struct ixl_phy_type *
4915 ixl_search_phy_type(uint8_t phy_type)
4916 {
4917 const struct ixl_phy_type *itype;
4918 uint64_t mask;
4919 unsigned int i;
4920
4921 if (phy_type >= 64)
4922 return NULL;
4923
4924 mask = 1ULL << phy_type;
4925
4926 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
4927 itype = &ixl_phy_type_map[i];
4928
4929 if (ISSET(itype->phy_type, mask))
4930 return itype;
4931 }
4932
4933 return NULL;
4934 }
4935
4936 static uint64_t
4937 ixl_search_link_speed(uint8_t link_speed)
4938 {
4939 const struct ixl_speed_type *type;
4940 unsigned int i;
4941
4942 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4943 type = &ixl_speed_type_map[i];
4944
4945 if (ISSET(type->dev_speed, link_speed))
4946 return type->net_speed;
4947 }
4948
4949 return 0;
4950 }
4951
4952 static uint8_t
4953 ixl_search_baudrate(uint64_t baudrate)
4954 {
4955 const struct ixl_speed_type *type;
4956 unsigned int i;
4957
4958 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4959 type = &ixl_speed_type_map[i];
4960
4961 if (type->net_speed == baudrate) {
4962 return type->dev_speed;
4963 }
4964 }
4965
4966 return 0;
4967 }
4968
4969 static int
4970 ixl_restart_an(struct ixl_softc *sc)
4971 {
4972 struct ixl_aq_desc iaq;
4973
4974 memset(&iaq, 0, sizeof(iaq));
4975 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
4976 iaq.iaq_param[0] =
4977 htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
4978
4979 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4980 aprint_error_dev(sc->sc_dev, "RESTART AN timeout\n");
4981 return -1;
4982 }
4983 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4984 aprint_error_dev(sc->sc_dev, "RESTART AN error\n");
4985 return -1;
4986 }
4987
4988 return 0;
4989 }
4990
4991 static int
4992 ixl_add_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4993 uint16_t vlan, uint16_t flags)
4994 {
4995 struct ixl_aq_desc iaq;
4996 struct ixl_aq_add_macvlan *param;
4997 struct ixl_aq_add_macvlan_elem *elem;
4998
4999 memset(&iaq, 0, sizeof(iaq));
5000 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
5001 iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN);
5002 iaq.iaq_datalen = htole16(sizeof(*elem));
5003 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
5004
5005 param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param;
5006 param->num_addrs = htole16(1);
5007 param->seid0 = htole16(0x8000) | sc->sc_seid;
5008 param->seid1 = 0;
5009 param->seid2 = 0;
5010
5011 elem = IXL_DMA_KVA(&sc->sc_scratch);
5012 memset(elem, 0, sizeof(*elem));
5013 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
5014 elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags);
5015 elem->vlan = htole16(vlan);
5016
5017 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
5018 return IXL_AQ_RC_EINVAL;
5019 }
5020
5021 switch (le16toh(iaq.iaq_retval)) {
5022 case IXL_AQ_RC_OK:
5023 break;
5024 case IXL_AQ_RC_ENOSPC:
5025 return ENOSPC;
5026 case IXL_AQ_RC_ENOENT:
5027 return ENOENT;
5028 case IXL_AQ_RC_EACCES:
5029 return EACCES;
5030 case IXL_AQ_RC_EEXIST:
5031 return EEXIST;
5032 case IXL_AQ_RC_EINVAL:
5033 return EINVAL;
5034 default:
5035 return EIO;
5036 }
5037
5038 return 0;
5039 }
5040
5041 static int
5042 ixl_remove_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
5043 uint16_t vlan, uint16_t flags)
5044 {
5045 struct ixl_aq_desc iaq;
5046 struct ixl_aq_remove_macvlan *param;
5047 struct ixl_aq_remove_macvlan_elem *elem;
5048
5049 memset(&iaq, 0, sizeof(iaq));
5050 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
5051 iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN);
5052 iaq.iaq_datalen = htole16(sizeof(*elem));
5053 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
5054
5055 param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param;
5056 param->num_addrs = htole16(1);
5057 param->seid0 = htole16(0x8000) | sc->sc_seid;
5058 param->seid1 = 0;
5059 param->seid2 = 0;
5060
5061 elem = IXL_DMA_KVA(&sc->sc_scratch);
5062 memset(elem, 0, sizeof(*elem));
5063 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
5064 elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags);
5065 elem->vlan = htole16(vlan);
5066
5067 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
5068 return EINVAL;
5069 }
5070
5071 switch (le16toh(iaq.iaq_retval)) {
5072 case IXL_AQ_RC_OK:
5073 break;
5074 case IXL_AQ_RC_ENOENT:
5075 return ENOENT;
5076 case IXL_AQ_RC_EACCES:
5077 return EACCES;
5078 case IXL_AQ_RC_EINVAL:
5079 return EINVAL;
5080 default:
5081 return EIO;
5082 }
5083
5084 return 0;
5085 }
5086
5087 static int
5088 ixl_hmc(struct ixl_softc *sc)
5089 {
5090 struct {
5091 uint32_t count;
5092 uint32_t minsize;
5093 bus_size_t objsiz;
5094 bus_size_t setoff;
5095 bus_size_t setcnt;
5096 } regs[] = {
5097 {
5098 0,
5099 IXL_HMC_TXQ_MINSIZE,
5100 I40E_GLHMC_LANTXOBJSZ,
5101 I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
5102 I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
5103 },
5104 {
5105 0,
5106 IXL_HMC_RXQ_MINSIZE,
5107 I40E_GLHMC_LANRXOBJSZ,
5108 I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
5109 I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
5110 },
5111 {
5112 0,
5113 0,
5114 I40E_GLHMC_FCOEDDPOBJSZ,
5115 I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
5116 I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
5117 },
5118 {
5119 0,
5120 0,
5121 I40E_GLHMC_FCOEFOBJSZ,
5122 I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
5123 I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
5124 },
5125 };
5126 struct ixl_hmc_entry *e;
5127 uint64_t size, dva;
5128 uint8_t *kva;
5129 uint64_t *sdpage;
5130 unsigned int i;
5131 int npages, tables;
5132 uint32_t reg;
5133
5134 CTASSERT(__arraycount(regs) <= __arraycount(sc->sc_hmc_entries));
5135
5136 regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
5137 ixl_rd(sc, I40E_GLHMC_LANQMAX);
5138
5139 size = 0;
5140 for (i = 0; i < __arraycount(regs); i++) {
5141 e = &sc->sc_hmc_entries[i];
5142
5143 e->hmc_count = regs[i].count;
5144 reg = ixl_rd(sc, regs[i].objsiz);
5145 e->hmc_size = BIT_ULL(0x3F & reg);
5146 e->hmc_base = size;
5147
5148 if ((e->hmc_size * 8) < regs[i].minsize) {
5149 aprint_error_dev(sc->sc_dev,
5150 "kernel hmc entry is too big\n");
5151 return -1;
5152 }
5153
5154 size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
5155 }
5156 size = roundup(size, IXL_HMC_PGSIZE);
5157 npages = size / IXL_HMC_PGSIZE;
5158
5159 tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
5160
5161 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
5162 aprint_error_dev(sc->sc_dev,
5163 "unable to allocate hmc pd memory\n");
5164 return -1;
5165 }
5166
5167 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
5168 IXL_HMC_PGSIZE) != 0) {
5169 aprint_error_dev(sc->sc_dev,
5170 "unable to allocate hmc sd memory\n");
5171 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
5172 return -1;
5173 }
5174
5175 kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
5176 memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
5177
5178 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
5179 0, IXL_DMA_LEN(&sc->sc_hmc_pd),
5180 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5181
5182 dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
5183 sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
5184 memset(sdpage, 0, IXL_DMA_LEN(&sc->sc_hmc_sd));
5185
5186 for (i = 0; (int)i < npages; i++) {
5187 *sdpage = htole64(dva | IXL_HMC_PDVALID);
5188 sdpage++;
5189
5190 dva += IXL_HMC_PGSIZE;
5191 }
5192
5193 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
5194 0, IXL_DMA_LEN(&sc->sc_hmc_sd),
5195 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5196
5197 dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
5198 for (i = 0; (int)i < tables; i++) {
5199 uint32_t count;
5200
5201 KASSERT(npages >= 0);
5202
5203 count = ((unsigned int)npages > IXL_HMC_PGS) ?
5204 IXL_HMC_PGS : (unsigned int)npages;
5205
5206 ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
5207 ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
5208 (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
5209 (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
5210 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
5211 ixl_wr(sc, I40E_PFHMC_SDCMD,
5212 (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
5213
5214 npages -= IXL_HMC_PGS;
5215 dva += IXL_HMC_PGSIZE;
5216 }
5217
5218 for (i = 0; i < __arraycount(regs); i++) {
5219 e = &sc->sc_hmc_entries[i];
5220
5221 ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
5222 ixl_wr(sc, regs[i].setcnt, e->hmc_count);
5223 }
5224
5225 return 0;
5226 }
5227
5228 static void
5229 ixl_hmc_free(struct ixl_softc *sc)
5230 {
5231 ixl_dmamem_free(sc, &sc->sc_hmc_sd);
5232 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
5233 }
5234
5235 static void
5236 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
5237 unsigned int npacking)
5238 {
5239 uint8_t *dst = d;
5240 const uint8_t *src = s;
5241 unsigned int i;
5242
5243 for (i = 0; i < npacking; i++) {
5244 const struct ixl_hmc_pack *pack = &packing[i];
5245 unsigned int offset = pack->lsb / 8;
5246 unsigned int align = pack->lsb % 8;
5247 const uint8_t *in = src + pack->offset;
5248 uint8_t *out = dst + offset;
5249 int width = pack->width;
5250 unsigned int inbits = 0;
5251
5252 if (align) {
5253 inbits = (*in++) << align;
5254 *out++ |= (inbits & 0xff);
5255 inbits >>= 8;
5256
5257 width -= 8 - align;
5258 }
5259
5260 while (width >= 8) {
5261 inbits |= (*in++) << align;
5262 *out++ = (inbits & 0xff);
5263 inbits >>= 8;
5264
5265 width -= 8;
5266 }
5267
5268 if (width > 0) {
5269 inbits |= (*in) << align;
5270 *out |= (inbits & ((1 << width) - 1));
5271 }
5272 }
5273 }
5274
5275 static struct ixl_aq_buf *
5276 ixl_aqb_alloc(struct ixl_softc *sc)
5277 {
5278 struct ixl_aq_buf *aqb;
5279
5280 aqb = kmem_alloc(sizeof(*aqb), KM_SLEEP);
5281
5282 aqb->aqb_size = IXL_AQ_BUFLEN;
5283
5284 if (bus_dmamap_create(sc->sc_dmat, aqb->aqb_size, 1,
5285 aqb->aqb_size, 0,
5286 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0)
5287 goto free;
5288 if (bus_dmamem_alloc(sc->sc_dmat, aqb->aqb_size,
5289 IXL_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs,
5290 BUS_DMA_WAITOK) != 0)
5291 goto destroy;
5292 if (bus_dmamem_map(sc->sc_dmat, &aqb->aqb_seg, aqb->aqb_nsegs,
5293 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0)
5294 goto dma_free;
5295 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
5296 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0)
5297 goto unmap;
5298
5299 return aqb;
5300 unmap:
5301 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
5302 dma_free:
5303 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
5304 destroy:
5305 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
5306 free:
5307 kmem_free(aqb, sizeof(*aqb));
5308
5309 return NULL;
5310 }
5311
5312 static void
5313 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
5314 {
5315
5316 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
5317 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
5318 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
5319 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
5320 kmem_free(aqb, sizeof(*aqb));
5321 }
5322
5323 static int
5324 ixl_arq_fill(struct ixl_softc *sc)
5325 {
5326 struct ixl_aq_buf *aqb;
5327 struct ixl_aq_desc *arq, *iaq;
5328 unsigned int prod = sc->sc_arq_prod;
5329 unsigned int n;
5330 int post = 0;
5331
5332 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
5333 IXL_AQ_NUM);
5334 arq = IXL_DMA_KVA(&sc->sc_arq);
5335
5336 if (__predict_false(n <= 0))
5337 return 0;
5338
5339 do {
5340 aqb = sc->sc_arq_live[prod];
5341 iaq = &arq[prod];
5342
5343 if (aqb == NULL) {
5344 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
5345 if (aqb != NULL) {
5346 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
5347 ixl_aq_buf, aqb_entry);
5348 } else if ((aqb = ixl_aqb_alloc(sc)) == NULL) {
5349 break;
5350 }
5351
5352 sc->sc_arq_live[prod] = aqb;
5353 memset(aqb->aqb_data, 0, aqb->aqb_size);
5354
5355 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
5356 aqb->aqb_size, BUS_DMASYNC_PREREAD);
5357
5358 iaq->iaq_flags = htole16(IXL_AQ_BUF |
5359 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ?
5360 IXL_AQ_LB : 0));
5361 iaq->iaq_opcode = 0;
5362 iaq->iaq_datalen = htole16(aqb->aqb_size);
5363 iaq->iaq_retval = 0;
5364 iaq->iaq_cookie = 0;
5365 iaq->iaq_param[0] = 0;
5366 iaq->iaq_param[1] = 0;
5367 ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
5368 }
5369
5370 prod++;
5371 prod &= IXL_AQ_MASK;
5372
5373 post = 1;
5374
5375 } while (--n);
5376
5377 if (post) {
5378 sc->sc_arq_prod = prod;
5379 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
5380 }
5381
5382 return post;
5383 }
5384
5385 static void
5386 ixl_arq_unfill(struct ixl_softc *sc)
5387 {
5388 struct ixl_aq_buf *aqb;
5389 unsigned int i;
5390
5391 for (i = 0; i < __arraycount(sc->sc_arq_live); i++) {
5392 aqb = sc->sc_arq_live[i];
5393 if (aqb == NULL)
5394 continue;
5395
5396 sc->sc_arq_live[i] = NULL;
5397 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
5398 BUS_DMASYNC_POSTREAD);
5399 ixl_aqb_free(sc, aqb);
5400 }
5401
5402 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle)) != NULL) {
5403 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
5404 ixl_aq_buf, aqb_entry);
5405 ixl_aqb_free(sc, aqb);
5406 }
5407 }
5408
5409 static void
5410 ixl_clear_hw(struct ixl_softc *sc)
5411 {
5412 uint32_t num_queues, base_queue;
5413 uint32_t num_pf_int;
5414 uint32_t num_vf_int;
5415 uint32_t num_vfs;
5416 uint32_t i, j;
5417 uint32_t val;
5418 uint32_t eol = 0x7ff;
5419
5420 /* get number of interrupts, queues, and vfs */
5421 val = ixl_rd(sc, I40E_GLPCI_CNF2);
5422 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
5423 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
5424 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
5425 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
5426
5427 val = ixl_rd(sc, I40E_PFLAN_QALLOC);
5428 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
5429 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
5430 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
5431 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
5432 if (val & I40E_PFLAN_QALLOC_VALID_MASK)
5433 num_queues = (j - base_queue) + 1;
5434 else
5435 num_queues = 0;
5436
5437 val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
5438 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
5439 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
5440 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
5441 I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
5442 if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
5443 num_vfs = (j - i) + 1;
5444 else
5445 num_vfs = 0;
5446
5447 /* stop all the interrupts */
5448 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5449 ixl_flush(sc);
5450 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
5451 for (i = 0; i < num_pf_int - 2; i++)
5452 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
5453 ixl_flush(sc);
5454
5455 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
5456 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5457 ixl_wr(sc, I40E_PFINT_LNKLST0, val);
5458 for (i = 0; i < num_pf_int - 2; i++)
5459 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
5460 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5461 for (i = 0; i < num_vfs; i++)
5462 ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
5463 for (i = 0; i < num_vf_int - 2; i++)
5464 ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
5465
5466 /* warn the HW of the coming Tx disables */
5467 for (i = 0; i < num_queues; i++) {
5468 uint32_t abs_queue_idx = base_queue + i;
5469 uint32_t reg_block = 0;
5470
5471 if (abs_queue_idx >= 128) {
5472 reg_block = abs_queue_idx / 128;
5473 abs_queue_idx %= 128;
5474 }
5475
5476 val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
5477 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
5478 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
5479 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
5480
5481 ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
5482 }
5483 delaymsec(400);
5484
5485 /* stop all the queues */
5486 for (i = 0; i < num_queues; i++) {
5487 ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
5488 ixl_wr(sc, I40E_QTX_ENA(i), 0);
5489 ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
5490 ixl_wr(sc, I40E_QRX_ENA(i), 0);
5491 }
5492
5493 /* short wait for all queue disables to settle */
5494 delaymsec(50);
5495 }
5496
5497 static int
5498 ixl_pf_reset(struct ixl_softc *sc)
5499 {
5500 uint32_t cnt = 0;
5501 uint32_t cnt1 = 0;
5502 uint32_t reg = 0, reg0 = 0;
5503 uint32_t grst_del;
5504
5505 /*
5506 * Poll for Global Reset steady state in case of recent GRST.
5507 * The grst delay value is in 100ms units, and we'll wait a
5508 * couple counts longer to be sure we don't just miss the end.
5509 */
5510 grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
5511 grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
5512 grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
5513
5514 grst_del = grst_del * 20;
5515
5516 for (cnt = 0; cnt < grst_del; cnt++) {
5517 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
5518 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
5519 break;
5520 delaymsec(100);
5521 }
5522 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5523 aprint_error(", Global reset polling failed to complete\n");
5524 return -1;
5525 }
5526
5527 /* Now Wait for the FW to be ready */
5528 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
5529 reg = ixl_rd(sc, I40E_GLNVM_ULD);
5530 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5531 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
5532 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5533 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
5534 break;
5535
5536 delaymsec(10);
5537 }
5538 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5539 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
5540 aprint_error(", wait for FW Reset complete timed out "
5541 "(I40E_GLNVM_ULD = 0x%x)\n", reg);
5542 return -1;
5543 }
5544
5545 /*
5546 * If there was a Global Reset in progress when we got here,
5547 * we don't need to do the PF Reset
5548 */
5549 if (cnt == 0) {
5550 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5551 ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
5552 for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
5553 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5554 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
5555 break;
5556 delaymsec(1);
5557
5558 reg0 = ixl_rd(sc, I40E_GLGEN_RSTAT);
5559 if (reg0 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5560 aprint_error(", Core reset upcoming."
5561 " Skipping PF reset reset request\n");
5562 return -1;
5563 }
5564 }
5565 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
5566 aprint_error(", PF reset polling failed to complete"
5567 "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
5568 return -1;
5569 }
5570 }
5571
5572 return 0;
5573 }
5574
5575 static int
5576 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
5577 bus_size_t size, bus_size_t align)
5578 {
5579 ixm->ixm_size = size;
5580
5581 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
5582 ixm->ixm_size, 0,
5583 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
5584 &ixm->ixm_map) != 0)
5585 return 1;
5586 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
5587 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
5588 BUS_DMA_WAITOK) != 0)
5589 goto destroy;
5590 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
5591 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
5592 goto free;
5593 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
5594 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
5595 goto unmap;
5596
5597 memset(ixm->ixm_kva, 0, ixm->ixm_size);
5598
5599 return 0;
5600 unmap:
5601 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5602 free:
5603 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5604 destroy:
5605 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5606 return 1;
5607 }
5608
5609 static void
5610 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
5611 {
5612 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
5613 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5614 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5615 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5616 }
5617
5618 static int
5619 ixl_setup_vlan_hwfilter(struct ixl_softc *sc)
5620 {
5621 struct ethercom *ec = &sc->sc_ec;
5622 struct vlanid_list *vlanidp;
5623 int rv;
5624
5625 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5626 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5627 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5628 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5629
5630 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5631 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5632 if (rv != 0)
5633 return rv;
5634 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5635 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5636 if (rv != 0)
5637 return rv;
5638
5639 ETHER_LOCK(ec);
5640 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5641 rv = ixl_add_macvlan(sc, sc->sc_enaddr,
5642 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5643 if (rv != 0)
5644 break;
5645 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
5646 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5647 if (rv != 0)
5648 break;
5649 }
5650 ETHER_UNLOCK(ec);
5651
5652 return rv;
5653 }
5654
5655 static void
5656 ixl_teardown_vlan_hwfilter(struct ixl_softc *sc)
5657 {
5658 struct vlanid_list *vlanidp;
5659 struct ethercom *ec = &sc->sc_ec;
5660
5661 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5662 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5663 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5664 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5665
5666 ETHER_LOCK(ec);
5667 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5668 ixl_remove_macvlan(sc, sc->sc_enaddr,
5669 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5670 ixl_remove_macvlan(sc, etherbroadcastaddr,
5671 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5672 }
5673 ETHER_UNLOCK(ec);
5674
5675 ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5676 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5677 ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5678 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5679 }
5680
5681 static int
5682 ixl_update_macvlan(struct ixl_softc *sc)
5683 {
5684 int rv = 0;
5685 int next_ec_capenable = sc->sc_ec.ec_capenable;
5686
5687 if (ISSET(next_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
5688 rv = ixl_setup_vlan_hwfilter(sc);
5689 if (rv != 0)
5690 ixl_teardown_vlan_hwfilter(sc);
5691 } else {
5692 ixl_teardown_vlan_hwfilter(sc);
5693 }
5694
5695 return rv;
5696 }
5697
5698 static int
5699 ixl_ifflags_cb(struct ethercom *ec)
5700 {
5701 struct ifnet *ifp = &ec->ec_if;
5702 struct ixl_softc *sc = ifp->if_softc;
5703 int rv, change;
5704
5705 mutex_enter(&sc->sc_cfg_lock);
5706
5707 change = ec->ec_capenable ^ sc->sc_cur_ec_capenable;
5708
5709 if (ISSET(change, ETHERCAP_VLAN_HWTAGGING)) {
5710 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
5711 rv = ENETRESET;
5712 goto out;
5713 }
5714
5715 if (ISSET(change, ETHERCAP_VLAN_HWFILTER)) {
5716 rv = ixl_update_macvlan(sc);
5717 if (rv == 0) {
5718 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
5719 } else {
5720 CLR(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER);
5721 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
5722 }
5723 }
5724
5725 rv = ixl_iff(sc);
5726 out:
5727 mutex_exit(&sc->sc_cfg_lock);
5728
5729 return rv;
5730 }
5731
5732 static int
5733 ixl_set_link_status_locked(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
5734 {
5735 const struct ixl_aq_link_status *status;
5736 const struct ixl_phy_type *itype;
5737
5738 uint64_t ifm_active = IFM_ETHER;
5739 uint64_t ifm_status = IFM_AVALID;
5740 int link_state = LINK_STATE_DOWN;
5741 uint64_t baudrate = 0;
5742
5743 status = (const struct ixl_aq_link_status *)iaq->iaq_param;
5744 if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION)) {
5745 ifm_active |= IFM_NONE;
5746 goto done;
5747 }
5748
5749 ifm_active |= IFM_FDX;
5750 ifm_status |= IFM_ACTIVE;
5751 link_state = LINK_STATE_UP;
5752
5753 itype = ixl_search_phy_type(status->phy_type);
5754 if (itype != NULL)
5755 ifm_active |= itype->ifm_type;
5756
5757 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
5758 ifm_active |= IFM_ETH_TXPAUSE;
5759 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
5760 ifm_active |= IFM_ETH_RXPAUSE;
5761
5762 baudrate = ixl_search_link_speed(status->link_speed);
5763
5764 done:
5765 /* sc->sc_cfg_lock held expect during attach */
5766 sc->sc_media_active = ifm_active;
5767 sc->sc_media_status = ifm_status;
5768
5769 sc->sc_ec.ec_if.if_baudrate = baudrate;
5770
5771 return link_state;
5772 }
5773
5774 static int
5775 ixl_establish_intx(struct ixl_softc *sc)
5776 {
5777 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5778 pci_intr_handle_t *intr;
5779 char xnamebuf[32];
5780 char intrbuf[PCI_INTRSTR_LEN];
5781 char const *intrstr;
5782
5783 KASSERT(sc->sc_nintrs == 1);
5784
5785 intr = &sc->sc_ihp[0];
5786
5787 intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
5788 snprintf(xnamebuf, sizeof(xnamebuf), "%s:legacy",
5789 device_xname(sc->sc_dev));
5790
5791 sc->sc_ihs[0] = pci_intr_establish_xname(pc, *intr, IPL_NET, ixl_intr,
5792 sc, xnamebuf);
5793
5794 if (sc->sc_ihs[0] == NULL) {
5795 aprint_error_dev(sc->sc_dev,
5796 "unable to establish interrupt at %s\n", intrstr);
5797 return -1;
5798 }
5799
5800 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5801 return 0;
5802 }
5803
5804 static int
5805 ixl_establish_msix(struct ixl_softc *sc)
5806 {
5807 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5808 kcpuset_t *affinity;
5809 unsigned int vector = 0;
5810 unsigned int i;
5811 int affinity_to, r;
5812 char xnamebuf[32];
5813 char intrbuf[PCI_INTRSTR_LEN];
5814 char const *intrstr;
5815
5816 kcpuset_create(&affinity, false);
5817
5818 /* the "other" intr is mapped to vector 0 */
5819 vector = 0;
5820 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5821 intrbuf, sizeof(intrbuf));
5822 snprintf(xnamebuf, sizeof(xnamebuf), "%s others",
5823 device_xname(sc->sc_dev));
5824 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5825 sc->sc_ihp[vector], IPL_NET, ixl_other_intr,
5826 sc, xnamebuf);
5827 if (sc->sc_ihs[vector] == NULL) {
5828 aprint_error_dev(sc->sc_dev,
5829 "unable to establish interrupt at %s\n", intrstr);
5830 goto fail;
5831 }
5832
5833 aprint_normal_dev(sc->sc_dev, "other interrupt at %s", intrstr);
5834
5835 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0;
5836 affinity_to = (affinity_to + sc->sc_nqueue_pairs_max) % ncpu;
5837
5838 kcpuset_zero(affinity);
5839 kcpuset_set(affinity, affinity_to);
5840 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5841 if (r == 0) {
5842 aprint_normal(", affinity to %u", affinity_to);
5843 }
5844 aprint_normal("\n");
5845 vector++;
5846
5847 sc->sc_msix_vector_queue = vector;
5848 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0;
5849
5850 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5851 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5852 intrbuf, sizeof(intrbuf));
5853 snprintf(xnamebuf, sizeof(xnamebuf), "%s TXRX%d",
5854 device_xname(sc->sc_dev), i);
5855
5856 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5857 sc->sc_ihp[vector], IPL_NET, ixl_queue_intr,
5858 (void *)&sc->sc_qps[i], xnamebuf);
5859
5860 if (sc->sc_ihs[vector] == NULL) {
5861 aprint_error_dev(sc->sc_dev,
5862 "unable to establish interrupt at %s\n", intrstr);
5863 goto fail;
5864 }
5865
5866 aprint_normal_dev(sc->sc_dev,
5867 "for TXRX%d interrupt at %s",i , intrstr);
5868
5869 kcpuset_zero(affinity);
5870 kcpuset_set(affinity, affinity_to);
5871 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5872 if (r == 0) {
5873 aprint_normal(", affinity to %u", affinity_to);
5874 affinity_to = (affinity_to + 1) % ncpu;
5875 }
5876 aprint_normal("\n");
5877 vector++;
5878 }
5879
5880 kcpuset_destroy(affinity);
5881
5882 return 0;
5883 fail:
5884 for (i = 0; i < vector; i++) {
5885 pci_intr_disestablish(pc, sc->sc_ihs[i]);
5886 }
5887
5888 sc->sc_msix_vector_queue = 0;
5889 sc->sc_msix_vector_queue = 0;
5890 kcpuset_destroy(affinity);
5891
5892 return -1;
5893 }
5894
5895 static void
5896 ixl_config_queue_intr(struct ixl_softc *sc)
5897 {
5898 unsigned int i, vector;
5899
5900 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) {
5901 vector = sc->sc_msix_vector_queue;
5902 } else {
5903 vector = I40E_INTR_NOTX_INTR;
5904
5905 ixl_wr(sc, I40E_PFINT_LNKLST0,
5906 (I40E_INTR_NOTX_QUEUE <<
5907 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5908 (I40E_QUEUE_TYPE_RX <<
5909 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5910 }
5911
5912 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
5913 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), 0);
5914 ixl_flush(sc);
5915
5916 ixl_wr(sc, I40E_PFINT_LNKLSTN(i),
5917 ((i) << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
5918 (I40E_QUEUE_TYPE_RX <<
5919 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5920
5921 ixl_wr(sc, I40E_QINT_RQCTL(i),
5922 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5923 (I40E_ITR_INDEX_RX <<
5924 I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
5925 (I40E_INTR_NOTX_RX_QUEUE <<
5926 I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
5927 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5928 (I40E_QUEUE_TYPE_TX <<
5929 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5930 I40E_QINT_RQCTL_CAUSE_ENA_MASK);
5931
5932 ixl_wr(sc, I40E_QINT_TQCTL(i),
5933 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
5934 (I40E_ITR_INDEX_TX <<
5935 I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
5936 (I40E_INTR_NOTX_TX_QUEUE <<
5937 I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
5938 (I40E_QUEUE_TYPE_EOL <<
5939 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
5940 (I40E_QUEUE_TYPE_RX <<
5941 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
5942 I40E_QINT_TQCTL_CAUSE_ENA_MASK);
5943
5944 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) {
5945 ixl_wr(sc, I40E_PFINT_ITRN(I40E_ITR_INDEX_RX, i),
5946 sc->sc_itr_rx);
5947 ixl_wr(sc, I40E_PFINT_ITRN(I40E_ITR_INDEX_TX, i),
5948 sc->sc_itr_tx);
5949 vector++;
5950 }
5951 }
5952 ixl_flush(sc);
5953
5954 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_RX), sc->sc_itr_rx);
5955 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_TX), sc->sc_itr_tx);
5956 ixl_flush(sc);
5957 }
5958
5959 static void
5960 ixl_config_other_intr(struct ixl_softc *sc)
5961 {
5962 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5963 (void)ixl_rd(sc, I40E_PFINT_ICR0);
5964
5965 ixl_wr(sc, I40E_PFINT_ICR0_ENA,
5966 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
5967 I40E_PFINT_ICR0_ENA_GRST_MASK |
5968 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
5969 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
5970 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
5971 I40E_PFINT_ICR0_ENA_VFLR_MASK |
5972 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
5973 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
5974 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK);
5975
5976 ixl_wr(sc, I40E_PFINT_LNKLST0, 0x7FF);
5977 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_OTHER), 0);
5978 ixl_wr(sc, I40E_PFINT_STAT_CTL0,
5979 (I40E_ITR_INDEX_OTHER <<
5980 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT));
5981 ixl_flush(sc);
5982 }
5983
5984 static int
5985 ixl_setup_interrupts(struct ixl_softc *sc)
5986 {
5987 struct pci_attach_args *pa = &sc->sc_pa;
5988 pci_intr_type_t max_type, intr_type;
5989 int counts[PCI_INTR_TYPE_SIZE];
5990 int error;
5991 unsigned int i;
5992 bool retry;
5993
5994 memset(counts, 0, sizeof(counts));
5995 max_type = PCI_INTR_TYPE_MSIX;
5996 /* QPs + other interrupt */
5997 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueue_pairs_max + 1;
5998 counts[PCI_INTR_TYPE_INTX] = 1;
5999
6000 if (ixl_param_nomsix)
6001 counts[PCI_INTR_TYPE_MSIX] = 0;
6002
6003 do {
6004 retry = false;
6005 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, max_type);
6006 if (error != 0) {
6007 aprint_error_dev(sc->sc_dev,
6008 "couldn't map interrupt\n");
6009 break;
6010 }
6011
6012 intr_type = pci_intr_type(pa->pa_pc, sc->sc_ihp[0]);
6013 sc->sc_nintrs = counts[intr_type];
6014 KASSERT(sc->sc_nintrs > 0);
6015
6016 for (i = 0; i < sc->sc_nintrs; i++) {
6017 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[i],
6018 PCI_INTR_MPSAFE, true);
6019 }
6020
6021 sc->sc_ihs = kmem_alloc(sizeof(sc->sc_ihs[0]) * sc->sc_nintrs,
6022 KM_SLEEP);
6023
6024 if (intr_type == PCI_INTR_TYPE_MSIX) {
6025 error = ixl_establish_msix(sc);
6026 if (error) {
6027 counts[PCI_INTR_TYPE_MSIX] = 0;
6028 retry = true;
6029 }
6030 } else if (intr_type == PCI_INTR_TYPE_INTX) {
6031 error = ixl_establish_intx(sc);
6032 } else {
6033 error = -1;
6034 }
6035
6036 if (error) {
6037 kmem_free(sc->sc_ihs,
6038 sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
6039 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
6040 } else {
6041 sc->sc_intrtype = intr_type;
6042 }
6043 } while (retry);
6044
6045 return error;
6046 }
6047
6048 static void
6049 ixl_teardown_interrupts(struct ixl_softc *sc)
6050 {
6051 struct pci_attach_args *pa = &sc->sc_pa;
6052 unsigned int i;
6053
6054 for (i = 0; i < sc->sc_nintrs; i++) {
6055 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]);
6056 }
6057
6058 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
6059
6060 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
6061 sc->sc_ihs = NULL;
6062 sc->sc_nintrs = 0;
6063 }
6064
6065 static int
6066 ixl_setup_stats(struct ixl_softc *sc)
6067 {
6068 struct ixl_queue_pair *qp;
6069 struct ixl_tx_ring *txr;
6070 struct ixl_rx_ring *rxr;
6071 struct ixl_stats_counters *isc;
6072 unsigned int i;
6073
6074 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
6075 qp = &sc->sc_qps[i];
6076 txr = qp->qp_txr;
6077 rxr = qp->qp_rxr;
6078
6079 evcnt_attach_dynamic(&txr->txr_defragged, EVCNT_TYPE_MISC,
6080 NULL, qp->qp_name, "m_defrag successed");
6081 evcnt_attach_dynamic(&txr->txr_defrag_failed, EVCNT_TYPE_MISC,
6082 NULL, qp->qp_name, "m_defrag_failed");
6083 evcnt_attach_dynamic(&txr->txr_pcqdrop, EVCNT_TYPE_MISC,
6084 NULL, qp->qp_name, "Dropped in pcq");
6085 evcnt_attach_dynamic(&txr->txr_transmitdef, EVCNT_TYPE_MISC,
6086 NULL, qp->qp_name, "Deferred transmit");
6087 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR,
6088 NULL, qp->qp_name, "Interrupt on queue");
6089 evcnt_attach_dynamic(&txr->txr_defer, EVCNT_TYPE_MISC,
6090 NULL, qp->qp_name, "Handled queue in softint/workqueue");
6091
6092 evcnt_attach_dynamic(&rxr->rxr_mgethdr_failed, EVCNT_TYPE_MISC,
6093 NULL, qp->qp_name, "MGETHDR failed");
6094 evcnt_attach_dynamic(&rxr->rxr_mgetcl_failed, EVCNT_TYPE_MISC,
6095 NULL, qp->qp_name, "MCLGET failed");
6096 evcnt_attach_dynamic(&rxr->rxr_mbuf_load_failed,
6097 EVCNT_TYPE_MISC, NULL, qp->qp_name,
6098 "bus_dmamap_load_mbuf failed");
6099 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR,
6100 NULL, qp->qp_name, "Interrupt on queue");
6101 evcnt_attach_dynamic(&rxr->rxr_defer, EVCNT_TYPE_MISC,
6102 NULL, qp->qp_name, "Handled queue in softint/workqueue");
6103 }
6104
6105 evcnt_attach_dynamic(&sc->sc_event_atq, EVCNT_TYPE_INTR,
6106 NULL, device_xname(sc->sc_dev), "Interrupt for other events");
6107 evcnt_attach_dynamic(&sc->sc_event_link, EVCNT_TYPE_MISC,
6108 NULL, device_xname(sc->sc_dev), "Link status event");
6109 evcnt_attach_dynamic(&sc->sc_event_ecc_err, EVCNT_TYPE_MISC,
6110 NULL, device_xname(sc->sc_dev), "ECC error");
6111 evcnt_attach_dynamic(&sc->sc_event_pci_exception, EVCNT_TYPE_MISC,
6112 NULL, device_xname(sc->sc_dev), "PCI exception");
6113 evcnt_attach_dynamic(&sc->sc_event_crit_err, EVCNT_TYPE_MISC,
6114 NULL, device_xname(sc->sc_dev), "Critical error");
6115
6116 isc = &sc->sc_stats_counters;
6117 evcnt_attach_dynamic(&isc->isc_crc_errors, EVCNT_TYPE_MISC,
6118 NULL, device_xname(sc->sc_dev), "CRC errors");
6119 evcnt_attach_dynamic(&isc->isc_illegal_bytes, EVCNT_TYPE_MISC,
6120 NULL, device_xname(sc->sc_dev), "Illegal bytes");
6121 evcnt_attach_dynamic(&isc->isc_mac_local_faults, EVCNT_TYPE_MISC,
6122 NULL, device_xname(sc->sc_dev), "Mac local faults");
6123 evcnt_attach_dynamic(&isc->isc_mac_remote_faults, EVCNT_TYPE_MISC,
6124 NULL, device_xname(sc->sc_dev), "Mac remote faults");
6125 evcnt_attach_dynamic(&isc->isc_link_xon_rx, EVCNT_TYPE_MISC,
6126 NULL, device_xname(sc->sc_dev), "Rx xon");
6127 evcnt_attach_dynamic(&isc->isc_link_xon_tx, EVCNT_TYPE_MISC,
6128 NULL, device_xname(sc->sc_dev), "Tx xon");
6129 evcnt_attach_dynamic(&isc->isc_link_xoff_rx, EVCNT_TYPE_MISC,
6130 NULL, device_xname(sc->sc_dev), "Rx xoff");
6131 evcnt_attach_dynamic(&isc->isc_link_xoff_tx, EVCNT_TYPE_MISC,
6132 NULL, device_xname(sc->sc_dev), "Tx xoff");
6133 evcnt_attach_dynamic(&isc->isc_rx_fragments, EVCNT_TYPE_MISC,
6134 NULL, device_xname(sc->sc_dev), "Rx fragments");
6135 evcnt_attach_dynamic(&isc->isc_rx_jabber, EVCNT_TYPE_MISC,
6136 NULL, device_xname(sc->sc_dev), "Rx jabber");
6137
6138 evcnt_attach_dynamic(&isc->isc_rx_size_64, EVCNT_TYPE_MISC,
6139 NULL, device_xname(sc->sc_dev), "Rx size 64");
6140 evcnt_attach_dynamic(&isc->isc_rx_size_127, EVCNT_TYPE_MISC,
6141 NULL, device_xname(sc->sc_dev), "Rx size 127");
6142 evcnt_attach_dynamic(&isc->isc_rx_size_255, EVCNT_TYPE_MISC,
6143 NULL, device_xname(sc->sc_dev), "Rx size 255");
6144 evcnt_attach_dynamic(&isc->isc_rx_size_511, EVCNT_TYPE_MISC,
6145 NULL, device_xname(sc->sc_dev), "Rx size 511");
6146 evcnt_attach_dynamic(&isc->isc_rx_size_1023, EVCNT_TYPE_MISC,
6147 NULL, device_xname(sc->sc_dev), "Rx size 1023");
6148 evcnt_attach_dynamic(&isc->isc_rx_size_1522, EVCNT_TYPE_MISC,
6149 NULL, device_xname(sc->sc_dev), "Rx size 1522");
6150 evcnt_attach_dynamic(&isc->isc_rx_size_big, EVCNT_TYPE_MISC,
6151 NULL, device_xname(sc->sc_dev), "Rx jumbo packets");
6152 evcnt_attach_dynamic(&isc->isc_rx_undersize, EVCNT_TYPE_MISC,
6153 NULL, device_xname(sc->sc_dev), "Rx under size");
6154 evcnt_attach_dynamic(&isc->isc_rx_oversize, EVCNT_TYPE_MISC,
6155 NULL, device_xname(sc->sc_dev), "Rx over size");
6156
6157 evcnt_attach_dynamic(&isc->isc_rx_bytes, EVCNT_TYPE_MISC,
6158 NULL, device_xname(sc->sc_dev), "Rx bytes / port");
6159 evcnt_attach_dynamic(&isc->isc_rx_discards, EVCNT_TYPE_MISC,
6160 NULL, device_xname(sc->sc_dev), "Rx discards / port");
6161 evcnt_attach_dynamic(&isc->isc_rx_unicast, EVCNT_TYPE_MISC,
6162 NULL, device_xname(sc->sc_dev), "Rx unicast / port");
6163 evcnt_attach_dynamic(&isc->isc_rx_multicast, EVCNT_TYPE_MISC,
6164 NULL, device_xname(sc->sc_dev), "Rx multicast / port");
6165 evcnt_attach_dynamic(&isc->isc_rx_broadcast, EVCNT_TYPE_MISC,
6166 NULL, device_xname(sc->sc_dev), "Rx broadcast / port");
6167
6168 evcnt_attach_dynamic(&isc->isc_vsi_rx_bytes, EVCNT_TYPE_MISC,
6169 NULL, device_xname(sc->sc_dev), "Rx bytes / vsi");
6170 evcnt_attach_dynamic(&isc->isc_vsi_rx_discards, EVCNT_TYPE_MISC,
6171 NULL, device_xname(sc->sc_dev), "Rx discard / vsi");
6172 evcnt_attach_dynamic(&isc->isc_vsi_rx_unicast, EVCNT_TYPE_MISC,
6173 NULL, device_xname(sc->sc_dev), "Rx unicast / vsi");
6174 evcnt_attach_dynamic(&isc->isc_vsi_rx_multicast, EVCNT_TYPE_MISC,
6175 NULL, device_xname(sc->sc_dev), "Rx multicast / vsi");
6176 evcnt_attach_dynamic(&isc->isc_vsi_rx_broadcast, EVCNT_TYPE_MISC,
6177 NULL, device_xname(sc->sc_dev), "Rx broadcast / vsi");
6178
6179 evcnt_attach_dynamic(&isc->isc_tx_size_64, EVCNT_TYPE_MISC,
6180 NULL, device_xname(sc->sc_dev), "Tx size 64");
6181 evcnt_attach_dynamic(&isc->isc_tx_size_127, EVCNT_TYPE_MISC,
6182 NULL, device_xname(sc->sc_dev), "Tx size 127");
6183 evcnt_attach_dynamic(&isc->isc_tx_size_255, EVCNT_TYPE_MISC,
6184 NULL, device_xname(sc->sc_dev), "Tx size 255");
6185 evcnt_attach_dynamic(&isc->isc_tx_size_511, EVCNT_TYPE_MISC,
6186 NULL, device_xname(sc->sc_dev), "Tx size 511");
6187 evcnt_attach_dynamic(&isc->isc_tx_size_1023, EVCNT_TYPE_MISC,
6188 NULL, device_xname(sc->sc_dev), "Tx size 1023");
6189 evcnt_attach_dynamic(&isc->isc_tx_size_1522, EVCNT_TYPE_MISC,
6190 NULL, device_xname(sc->sc_dev), "Tx size 1522");
6191 evcnt_attach_dynamic(&isc->isc_tx_size_big, EVCNT_TYPE_MISC,
6192 NULL, device_xname(sc->sc_dev), "Tx jumbo packets");
6193
6194 evcnt_attach_dynamic(&isc->isc_tx_bytes, EVCNT_TYPE_MISC,
6195 NULL, device_xname(sc->sc_dev), "Tx bytes / port");
6196 evcnt_attach_dynamic(&isc->isc_tx_dropped_link_down, EVCNT_TYPE_MISC,
6197 NULL, device_xname(sc->sc_dev),
6198 "Tx dropped due to link down / port");
6199 evcnt_attach_dynamic(&isc->isc_tx_unicast, EVCNT_TYPE_MISC,
6200 NULL, device_xname(sc->sc_dev), "Tx unicast / port");
6201 evcnt_attach_dynamic(&isc->isc_tx_multicast, EVCNT_TYPE_MISC,
6202 NULL, device_xname(sc->sc_dev), "Tx multicast / port");
6203 evcnt_attach_dynamic(&isc->isc_tx_broadcast, EVCNT_TYPE_MISC,
6204 NULL, device_xname(sc->sc_dev), "Tx broadcast / port");
6205
6206 evcnt_attach_dynamic(&isc->isc_vsi_tx_bytes, EVCNT_TYPE_MISC,
6207 NULL, device_xname(sc->sc_dev), "Tx bytes / vsi");
6208 evcnt_attach_dynamic(&isc->isc_vsi_tx_errors, EVCNT_TYPE_MISC,
6209 NULL, device_xname(sc->sc_dev), "Tx errors / vsi");
6210 evcnt_attach_dynamic(&isc->isc_vsi_tx_unicast, EVCNT_TYPE_MISC,
6211 NULL, device_xname(sc->sc_dev), "Tx unicast / vsi");
6212 evcnt_attach_dynamic(&isc->isc_vsi_tx_multicast, EVCNT_TYPE_MISC,
6213 NULL, device_xname(sc->sc_dev), "Tx multicast / vsi");
6214 evcnt_attach_dynamic(&isc->isc_vsi_tx_broadcast, EVCNT_TYPE_MISC,
6215 NULL, device_xname(sc->sc_dev), "Tx broadcast / vsi");
6216
6217 sc->sc_stats_intval = ixl_param_stats_interval;
6218 callout_init(&sc->sc_stats_callout, CALLOUT_MPSAFE);
6219 callout_setfunc(&sc->sc_stats_callout, ixl_stats_callout, sc);
6220 ixl_work_set(&sc->sc_stats_task, ixl_stats_update, sc);
6221
6222 return 0;
6223 }
6224
6225 static void
6226 ixl_teardown_stats(struct ixl_softc *sc)
6227 {
6228 struct ixl_tx_ring *txr;
6229 struct ixl_rx_ring *rxr;
6230 struct ixl_stats_counters *isc;
6231 unsigned int i;
6232
6233 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
6234 txr = sc->sc_qps[i].qp_txr;
6235 rxr = sc->sc_qps[i].qp_rxr;
6236
6237 evcnt_detach(&txr->txr_defragged);
6238 evcnt_detach(&txr->txr_defrag_failed);
6239 evcnt_detach(&txr->txr_pcqdrop);
6240 evcnt_detach(&txr->txr_transmitdef);
6241 evcnt_detach(&txr->txr_intr);
6242 evcnt_detach(&txr->txr_defer);
6243
6244 evcnt_detach(&rxr->rxr_mgethdr_failed);
6245 evcnt_detach(&rxr->rxr_mgetcl_failed);
6246 evcnt_detach(&rxr->rxr_mbuf_load_failed);
6247 evcnt_detach(&rxr->rxr_intr);
6248 evcnt_detach(&rxr->rxr_defer);
6249 }
6250
6251 isc = &sc->sc_stats_counters;
6252 evcnt_detach(&isc->isc_crc_errors);
6253 evcnt_detach(&isc->isc_illegal_bytes);
6254 evcnt_detach(&isc->isc_mac_local_faults);
6255 evcnt_detach(&isc->isc_mac_remote_faults);
6256 evcnt_detach(&isc->isc_link_xon_rx);
6257 evcnt_detach(&isc->isc_link_xon_tx);
6258 evcnt_detach(&isc->isc_link_xoff_rx);
6259 evcnt_detach(&isc->isc_link_xoff_tx);
6260 evcnt_detach(&isc->isc_rx_fragments);
6261 evcnt_detach(&isc->isc_rx_jabber);
6262 evcnt_detach(&isc->isc_rx_bytes);
6263 evcnt_detach(&isc->isc_rx_discards);
6264 evcnt_detach(&isc->isc_rx_unicast);
6265 evcnt_detach(&isc->isc_rx_multicast);
6266 evcnt_detach(&isc->isc_rx_broadcast);
6267 evcnt_detach(&isc->isc_rx_size_64);
6268 evcnt_detach(&isc->isc_rx_size_127);
6269 evcnt_detach(&isc->isc_rx_size_255);
6270 evcnt_detach(&isc->isc_rx_size_511);
6271 evcnt_detach(&isc->isc_rx_size_1023);
6272 evcnt_detach(&isc->isc_rx_size_1522);
6273 evcnt_detach(&isc->isc_rx_size_big);
6274 evcnt_detach(&isc->isc_rx_undersize);
6275 evcnt_detach(&isc->isc_rx_oversize);
6276 evcnt_detach(&isc->isc_tx_bytes);
6277 evcnt_detach(&isc->isc_tx_dropped_link_down);
6278 evcnt_detach(&isc->isc_tx_unicast);
6279 evcnt_detach(&isc->isc_tx_multicast);
6280 evcnt_detach(&isc->isc_tx_broadcast);
6281 evcnt_detach(&isc->isc_tx_size_64);
6282 evcnt_detach(&isc->isc_tx_size_127);
6283 evcnt_detach(&isc->isc_tx_size_255);
6284 evcnt_detach(&isc->isc_tx_size_511);
6285 evcnt_detach(&isc->isc_tx_size_1023);
6286 evcnt_detach(&isc->isc_tx_size_1522);
6287 evcnt_detach(&isc->isc_tx_size_big);
6288 evcnt_detach(&isc->isc_vsi_rx_discards);
6289 evcnt_detach(&isc->isc_vsi_rx_bytes);
6290 evcnt_detach(&isc->isc_vsi_rx_unicast);
6291 evcnt_detach(&isc->isc_vsi_rx_multicast);
6292 evcnt_detach(&isc->isc_vsi_rx_broadcast);
6293 evcnt_detach(&isc->isc_vsi_tx_errors);
6294 evcnt_detach(&isc->isc_vsi_tx_bytes);
6295 evcnt_detach(&isc->isc_vsi_tx_unicast);
6296 evcnt_detach(&isc->isc_vsi_tx_multicast);
6297 evcnt_detach(&isc->isc_vsi_tx_broadcast);
6298
6299 evcnt_detach(&sc->sc_event_atq);
6300 evcnt_detach(&sc->sc_event_link);
6301 evcnt_detach(&sc->sc_event_ecc_err);
6302 evcnt_detach(&sc->sc_event_pci_exception);
6303 evcnt_detach(&sc->sc_event_crit_err);
6304
6305 callout_destroy(&sc->sc_stats_callout);
6306 }
6307
6308 static void
6309 ixl_stats_callout(void *xsc)
6310 {
6311 struct ixl_softc *sc = xsc;
6312
6313 ixl_work_add(sc->sc_workq, &sc->sc_stats_task);
6314 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
6315 }
6316
6317 static uint64_t
6318 ixl_stat_delta(struct ixl_softc *sc, uint32_t reg_hi, uint32_t reg_lo,
6319 uint64_t *offset, bool has_offset)
6320 {
6321 uint64_t value, delta;
6322 int bitwidth;
6323
6324 bitwidth = reg_hi == 0 ? 32 : 48;
6325
6326 value = ixl_rd(sc, reg_lo);
6327
6328 if (bitwidth > 32) {
6329 value |= ((uint64_t)ixl_rd(sc, reg_hi) << 32);
6330 }
6331
6332 if (__predict_true(has_offset)) {
6333 delta = value;
6334 if (value < *offset)
6335 delta += ((uint64_t)1 << bitwidth);
6336 delta -= *offset;
6337 } else {
6338 delta = 0;
6339 }
6340 atomic_swap_64(offset, value);
6341
6342 return delta;
6343 }
6344
6345 static void
6346 ixl_stats_update(void *xsc)
6347 {
6348 struct ixl_softc *sc = xsc;
6349 struct ixl_stats_counters *isc;
6350 uint64_t delta;
6351
6352 isc = &sc->sc_stats_counters;
6353
6354 /* errors */
6355 delta = ixl_stat_delta(sc,
6356 0, I40E_GLPRT_CRCERRS(sc->sc_port),
6357 &isc->isc_crc_errors_offset, isc->isc_has_offset);
6358 atomic_add_64(&isc->isc_crc_errors.ev_count, delta);
6359
6360 delta = ixl_stat_delta(sc,
6361 0, I40E_GLPRT_ILLERRC(sc->sc_port),
6362 &isc->isc_illegal_bytes_offset, isc->isc_has_offset);
6363 atomic_add_64(&isc->isc_illegal_bytes.ev_count, delta);
6364
6365 /* rx */
6366 delta = ixl_stat_delta(sc,
6367 I40E_GLPRT_GORCH(sc->sc_port), I40E_GLPRT_GORCL(sc->sc_port),
6368 &isc->isc_rx_bytes_offset, isc->isc_has_offset);
6369 atomic_add_64(&isc->isc_rx_bytes.ev_count, delta);
6370
6371 delta = ixl_stat_delta(sc,
6372 0, I40E_GLPRT_RDPC(sc->sc_port),
6373 &isc->isc_rx_discards_offset, isc->isc_has_offset);
6374 atomic_add_64(&isc->isc_rx_discards.ev_count, delta);
6375
6376 delta = ixl_stat_delta(sc,
6377 I40E_GLPRT_UPRCH(sc->sc_port), I40E_GLPRT_UPRCL(sc->sc_port),
6378 &isc->isc_rx_unicast_offset, isc->isc_has_offset);
6379 atomic_add_64(&isc->isc_rx_unicast.ev_count, delta);
6380
6381 delta = ixl_stat_delta(sc,
6382 I40E_GLPRT_MPRCH(sc->sc_port), I40E_GLPRT_MPRCL(sc->sc_port),
6383 &isc->isc_rx_multicast_offset, isc->isc_has_offset);
6384 atomic_add_64(&isc->isc_rx_multicast.ev_count, delta);
6385
6386 delta = ixl_stat_delta(sc,
6387 I40E_GLPRT_BPRCH(sc->sc_port), I40E_GLPRT_BPRCL(sc->sc_port),
6388 &isc->isc_rx_broadcast_offset, isc->isc_has_offset);
6389 atomic_add_64(&isc->isc_rx_broadcast.ev_count, delta);
6390
6391 /* Packet size stats rx */
6392 delta = ixl_stat_delta(sc,
6393 I40E_GLPRT_PRC64H(sc->sc_port), I40E_GLPRT_PRC64L(sc->sc_port),
6394 &isc->isc_rx_size_64_offset, isc->isc_has_offset);
6395 atomic_add_64(&isc->isc_rx_size_64.ev_count, delta);
6396
6397 delta = ixl_stat_delta(sc,
6398 I40E_GLPRT_PRC127H(sc->sc_port), I40E_GLPRT_PRC127L(sc->sc_port),
6399 &isc->isc_rx_size_127_offset, isc->isc_has_offset);
6400 atomic_add_64(&isc->isc_rx_size_127.ev_count, delta);
6401
6402 delta = ixl_stat_delta(sc,
6403 I40E_GLPRT_PRC255H(sc->sc_port), I40E_GLPRT_PRC255L(sc->sc_port),
6404 &isc->isc_rx_size_255_offset, isc->isc_has_offset);
6405 atomic_add_64(&isc->isc_rx_size_255.ev_count, delta);
6406
6407 delta = ixl_stat_delta(sc,
6408 I40E_GLPRT_PRC511H(sc->sc_port), I40E_GLPRT_PRC511L(sc->sc_port),
6409 &isc->isc_rx_size_511_offset, isc->isc_has_offset);
6410 atomic_add_64(&isc->isc_rx_size_511.ev_count, delta);
6411
6412 delta = ixl_stat_delta(sc,
6413 I40E_GLPRT_PRC1023H(sc->sc_port), I40E_GLPRT_PRC1023L(sc->sc_port),
6414 &isc->isc_rx_size_1023_offset, isc->isc_has_offset);
6415 atomic_add_64(&isc->isc_rx_size_1023.ev_count, delta);
6416
6417 delta = ixl_stat_delta(sc,
6418 I40E_GLPRT_PRC1522H(sc->sc_port), I40E_GLPRT_PRC1522L(sc->sc_port),
6419 &isc->isc_rx_size_1522_offset, isc->isc_has_offset);
6420 atomic_add_64(&isc->isc_rx_size_1522.ev_count, delta);
6421
6422 delta = ixl_stat_delta(sc,
6423 I40E_GLPRT_PRC9522H(sc->sc_port), I40E_GLPRT_PRC9522L(sc->sc_port),
6424 &isc->isc_rx_size_big_offset, isc->isc_has_offset);
6425 atomic_add_64(&isc->isc_rx_size_big.ev_count, delta);
6426
6427 delta = ixl_stat_delta(sc,
6428 0, I40E_GLPRT_RUC(sc->sc_port),
6429 &isc->isc_rx_undersize_offset, isc->isc_has_offset);
6430 atomic_add_64(&isc->isc_rx_undersize.ev_count, delta);
6431
6432 delta = ixl_stat_delta(sc,
6433 0, I40E_GLPRT_ROC(sc->sc_port),
6434 &isc->isc_rx_oversize_offset, isc->isc_has_offset);
6435 atomic_add_64(&isc->isc_rx_oversize.ev_count, delta);
6436
6437 /* tx */
6438 delta = ixl_stat_delta(sc,
6439 I40E_GLPRT_GOTCH(sc->sc_port), I40E_GLPRT_GOTCL(sc->sc_port),
6440 &isc->isc_tx_bytes_offset, isc->isc_has_offset);
6441 atomic_add_64(&isc->isc_tx_bytes.ev_count, delta);
6442
6443 delta = ixl_stat_delta(sc,
6444 0, I40E_GLPRT_TDOLD(sc->sc_port),
6445 &isc->isc_tx_dropped_link_down_offset, isc->isc_has_offset);
6446 atomic_add_64(&isc->isc_tx_dropped_link_down.ev_count, delta);
6447
6448 delta = ixl_stat_delta(sc,
6449 I40E_GLPRT_UPTCH(sc->sc_port), I40E_GLPRT_UPTCL(sc->sc_port),
6450 &isc->isc_tx_unicast_offset, isc->isc_has_offset);
6451 atomic_add_64(&isc->isc_tx_unicast.ev_count, delta);
6452
6453 delta = ixl_stat_delta(sc,
6454 I40E_GLPRT_MPTCH(sc->sc_port), I40E_GLPRT_MPTCL(sc->sc_port),
6455 &isc->isc_tx_multicast_offset, isc->isc_has_offset);
6456 atomic_add_64(&isc->isc_tx_multicast.ev_count, delta);
6457
6458 delta = ixl_stat_delta(sc,
6459 I40E_GLPRT_BPTCH(sc->sc_port), I40E_GLPRT_BPTCL(sc->sc_port),
6460 &isc->isc_tx_broadcast_offset, isc->isc_has_offset);
6461 atomic_add_64(&isc->isc_tx_broadcast.ev_count, delta);
6462
6463 /* Packet size stats tx */
6464 delta = ixl_stat_delta(sc,
6465 I40E_GLPRT_PTC64L(sc->sc_port), I40E_GLPRT_PTC64L(sc->sc_port),
6466 &isc->isc_tx_size_64_offset, isc->isc_has_offset);
6467 atomic_add_64(&isc->isc_tx_size_64.ev_count, delta);
6468
6469 delta = ixl_stat_delta(sc,
6470 I40E_GLPRT_PTC127H(sc->sc_port), I40E_GLPRT_PTC127L(sc->sc_port),
6471 &isc->isc_tx_size_127_offset, isc->isc_has_offset);
6472 atomic_add_64(&isc->isc_tx_size_127.ev_count, delta);
6473
6474 delta = ixl_stat_delta(sc,
6475 I40E_GLPRT_PTC255H(sc->sc_port), I40E_GLPRT_PTC255L(sc->sc_port),
6476 &isc->isc_tx_size_255_offset, isc->isc_has_offset);
6477 atomic_add_64(&isc->isc_tx_size_255.ev_count, delta);
6478
6479 delta = ixl_stat_delta(sc,
6480 I40E_GLPRT_PTC511H(sc->sc_port), I40E_GLPRT_PTC511L(sc->sc_port),
6481 &isc->isc_tx_size_511_offset, isc->isc_has_offset);
6482 atomic_add_64(&isc->isc_tx_size_511.ev_count, delta);
6483
6484 delta = ixl_stat_delta(sc,
6485 I40E_GLPRT_PTC1023H(sc->sc_port), I40E_GLPRT_PTC1023L(sc->sc_port),
6486 &isc->isc_tx_size_1023_offset, isc->isc_has_offset);
6487 atomic_add_64(&isc->isc_tx_size_1023.ev_count, delta);
6488
6489 delta = ixl_stat_delta(sc,
6490 I40E_GLPRT_PTC1522H(sc->sc_port), I40E_GLPRT_PTC1522L(sc->sc_port),
6491 &isc->isc_tx_size_1522_offset, isc->isc_has_offset);
6492 atomic_add_64(&isc->isc_tx_size_1522.ev_count, delta);
6493
6494 delta = ixl_stat_delta(sc,
6495 I40E_GLPRT_PTC9522H(sc->sc_port), I40E_GLPRT_PTC9522L(sc->sc_port),
6496 &isc->isc_tx_size_big_offset, isc->isc_has_offset);
6497 atomic_add_64(&isc->isc_tx_size_big.ev_count, delta);
6498
6499 /* mac faults */
6500 delta = ixl_stat_delta(sc,
6501 0, I40E_GLPRT_MLFC(sc->sc_port),
6502 &isc->isc_mac_local_faults_offset, isc->isc_has_offset);
6503 atomic_add_64(&isc->isc_mac_local_faults.ev_count, delta);
6504
6505 delta = ixl_stat_delta(sc,
6506 0, I40E_GLPRT_MRFC(sc->sc_port),
6507 &isc->isc_mac_remote_faults_offset, isc->isc_has_offset);
6508 atomic_add_64(&isc->isc_mac_remote_faults.ev_count, delta);
6509
6510 /* Flow control (LFC) stats */
6511 delta = ixl_stat_delta(sc,
6512 0, I40E_GLPRT_LXONRXC(sc->sc_port),
6513 &isc->isc_link_xon_rx_offset, isc->isc_has_offset);
6514 atomic_add_64(&isc->isc_link_xon_rx.ev_count, delta);
6515
6516 delta = ixl_stat_delta(sc,
6517 0, I40E_GLPRT_LXONTXC(sc->sc_port),
6518 &isc->isc_link_xon_tx_offset, isc->isc_has_offset);
6519 atomic_add_64(&isc->isc_link_xon_tx.ev_count, delta);
6520
6521 delta = ixl_stat_delta(sc,
6522 0, I40E_GLPRT_LXOFFRXC(sc->sc_port),
6523 &isc->isc_link_xoff_rx_offset, isc->isc_has_offset);
6524 atomic_add_64(&isc->isc_link_xoff_rx.ev_count, delta);
6525
6526 delta = ixl_stat_delta(sc,
6527 0, I40E_GLPRT_LXOFFTXC(sc->sc_port),
6528 &isc->isc_link_xoff_tx_offset, isc->isc_has_offset);
6529 atomic_add_64(&isc->isc_link_xoff_tx.ev_count, delta);
6530
6531 /* fragments */
6532 delta = ixl_stat_delta(sc,
6533 0, I40E_GLPRT_RFC(sc->sc_port),
6534 &isc->isc_rx_fragments_offset, isc->isc_has_offset);
6535 atomic_add_64(&isc->isc_rx_fragments.ev_count, delta);
6536
6537 delta = ixl_stat_delta(sc,
6538 0, I40E_GLPRT_RJC(sc->sc_port),
6539 &isc->isc_rx_jabber_offset, isc->isc_has_offset);
6540 atomic_add_64(&isc->isc_rx_jabber.ev_count, delta);
6541
6542 /* VSI rx counters */
6543 delta = ixl_stat_delta(sc,
6544 0, I40E_GLV_RDPC(sc->sc_vsi_stat_counter_idx),
6545 &isc->isc_vsi_rx_discards_offset, isc->isc_has_offset);
6546 atomic_add_64(&isc->isc_vsi_rx_discards.ev_count, delta);
6547
6548 delta = ixl_stat_delta(sc,
6549 I40E_GLV_GORCH(sc->sc_vsi_stat_counter_idx),
6550 I40E_GLV_GORCL(sc->sc_vsi_stat_counter_idx),
6551 &isc->isc_vsi_rx_bytes_offset, isc->isc_has_offset);
6552 atomic_add_64(&isc->isc_vsi_rx_bytes.ev_count, delta);
6553
6554 delta = ixl_stat_delta(sc,
6555 I40E_GLV_UPRCH(sc->sc_vsi_stat_counter_idx),
6556 I40E_GLV_UPRCL(sc->sc_vsi_stat_counter_idx),
6557 &isc->isc_vsi_rx_unicast_offset, isc->isc_has_offset);
6558 atomic_add_64(&isc->isc_vsi_rx_unicast.ev_count, delta);
6559
6560 delta = ixl_stat_delta(sc,
6561 I40E_GLV_MPRCH(sc->sc_vsi_stat_counter_idx),
6562 I40E_GLV_MPRCL(sc->sc_vsi_stat_counter_idx),
6563 &isc->isc_vsi_rx_multicast_offset, isc->isc_has_offset);
6564 atomic_add_64(&isc->isc_vsi_rx_multicast.ev_count, delta);
6565
6566 delta = ixl_stat_delta(sc,
6567 I40E_GLV_BPRCH(sc->sc_vsi_stat_counter_idx),
6568 I40E_GLV_BPRCL(sc->sc_vsi_stat_counter_idx),
6569 &isc->isc_vsi_rx_broadcast_offset, isc->isc_has_offset);
6570 atomic_add_64(&isc->isc_vsi_rx_broadcast.ev_count, delta);
6571
6572 /* VSI tx counters */
6573 delta = ixl_stat_delta(sc,
6574 0, I40E_GLV_TEPC(sc->sc_vsi_stat_counter_idx),
6575 &isc->isc_vsi_tx_errors_offset, isc->isc_has_offset);
6576 atomic_add_64(&isc->isc_vsi_tx_errors.ev_count, delta);
6577
6578 delta = ixl_stat_delta(sc,
6579 I40E_GLV_GOTCH(sc->sc_vsi_stat_counter_idx),
6580 I40E_GLV_GOTCL(sc->sc_vsi_stat_counter_idx),
6581 &isc->isc_vsi_tx_bytes_offset, isc->isc_has_offset);
6582 atomic_add_64(&isc->isc_vsi_tx_bytes.ev_count, delta);
6583
6584 delta = ixl_stat_delta(sc,
6585 I40E_GLV_UPTCH(sc->sc_vsi_stat_counter_idx),
6586 I40E_GLV_UPTCL(sc->sc_vsi_stat_counter_idx),
6587 &isc->isc_vsi_tx_unicast_offset, isc->isc_has_offset);
6588 atomic_add_64(&isc->isc_vsi_tx_unicast.ev_count, delta);
6589
6590 delta = ixl_stat_delta(sc,
6591 I40E_GLV_MPTCH(sc->sc_vsi_stat_counter_idx),
6592 I40E_GLV_MPTCL(sc->sc_vsi_stat_counter_idx),
6593 &isc->isc_vsi_tx_multicast_offset, isc->isc_has_offset);
6594 atomic_add_64(&isc->isc_vsi_tx_multicast.ev_count, delta);
6595
6596 delta = ixl_stat_delta(sc,
6597 I40E_GLV_BPTCH(sc->sc_vsi_stat_counter_idx),
6598 I40E_GLV_BPTCL(sc->sc_vsi_stat_counter_idx),
6599 &isc->isc_vsi_tx_broadcast_offset, isc->isc_has_offset);
6600 atomic_add_64(&isc->isc_vsi_tx_broadcast.ev_count, delta);
6601 }
6602
6603 static int
6604 ixl_setup_sysctls(struct ixl_softc *sc)
6605 {
6606 const char *devname;
6607 struct sysctllog **log;
6608 const struct sysctlnode *rnode, *rxnode, *txnode;
6609 int error;
6610
6611 log = &sc->sc_sysctllog;
6612 devname = device_xname(sc->sc_dev);
6613
6614 error = sysctl_createv(log, 0, NULL, &rnode,
6615 0, CTLTYPE_NODE, devname,
6616 SYSCTL_DESCR("ixl information and settings"),
6617 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
6618 if (error)
6619 goto out;
6620
6621 error = sysctl_createv(log, 0, &rnode, NULL,
6622 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
6623 SYSCTL_DESCR("Use workqueue for packet processing"),
6624 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
6625 if (error)
6626 goto out;
6627
6628 error = sysctl_createv(log, 0, &rnode, NULL,
6629 CTLFLAG_READONLY, CTLTYPE_INT, "stats_interval",
6630 SYSCTL_DESCR("Statistics collection interval in milliseconds"),
6631 NULL, 0, &sc->sc_stats_intval, 0, CTL_CREATE, CTL_EOL);
6632
6633 error = sysctl_createv(log, 0, &rnode, &rxnode,
6634 0, CTLTYPE_NODE, "rx",
6635 SYSCTL_DESCR("ixl information and settings for Rx"),
6636 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6637 if (error)
6638 goto out;
6639
6640 error = sysctl_createv(log, 0, &rxnode, NULL,
6641 CTLFLAG_READWRITE, CTLTYPE_INT, "itr",
6642 SYSCTL_DESCR("Interrupt Throttling"),
6643 ixl_sysctl_itr_handler, 0,
6644 (void *)sc, 0, CTL_CREATE, CTL_EOL);
6645 if (error)
6646 goto out;
6647
6648 error = sysctl_createv(log, 0, &rxnode, NULL,
6649 CTLFLAG_READWRITE, CTLTYPE_INT, "descriptor_num",
6650 SYSCTL_DESCR("the number of rx descriptors"),
6651 ixl_sysctl_ndescs_handler, 0,
6652 (void *)sc, 0, CTL_CREATE, CTL_EOL);
6653 if (error)
6654 goto out;
6655
6656 error = sysctl_createv(log, 0, &rxnode, NULL,
6657 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6658 SYSCTL_DESCR("max number of Rx packets"
6659 " to process for interrupt processing"),
6660 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6661 if (error)
6662 goto out;
6663
6664 error = sysctl_createv(log, 0, &rxnode, NULL,
6665 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6666 SYSCTL_DESCR("max number of Rx packets"
6667 " to process for deferred processing"),
6668 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
6669 if (error)
6670 goto out;
6671
6672 error = sysctl_createv(log, 0, &rnode, &txnode,
6673 0, CTLTYPE_NODE, "tx",
6674 SYSCTL_DESCR("ixl information and settings for Tx"),
6675 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6676 if (error)
6677 goto out;
6678
6679 error = sysctl_createv(log, 0, &txnode, NULL,
6680 CTLFLAG_READWRITE, CTLTYPE_INT, "itr",
6681 SYSCTL_DESCR("Interrupt Throttling"),
6682 ixl_sysctl_itr_handler, 0,
6683 (void *)sc, 0, CTL_CREATE, CTL_EOL);
6684 if (error)
6685 goto out;
6686
6687 error = sysctl_createv(log, 0, &txnode, NULL,
6688 CTLFLAG_READWRITE, CTLTYPE_INT, "descriptor_num",
6689 SYSCTL_DESCR("the number of tx descriptors"),
6690 ixl_sysctl_ndescs_handler, 0,
6691 (void *)sc, 0, CTL_CREATE, CTL_EOL);
6692 if (error)
6693 goto out;
6694
6695 error = sysctl_createv(log, 0, &txnode, NULL,
6696 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6697 SYSCTL_DESCR("max number of Tx packets"
6698 " to process for interrupt processing"),
6699 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6700 if (error)
6701 goto out;
6702
6703 error = sysctl_createv(log, 0, &txnode, NULL,
6704 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6705 SYSCTL_DESCR("max number of Tx packets"
6706 " to process for deferred processing"),
6707 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
6708 if (error)
6709 goto out;
6710
6711 out:
6712 if (error) {
6713 aprint_error_dev(sc->sc_dev,
6714 "unable to create sysctl node\n");
6715 sysctl_teardown(log);
6716 }
6717
6718 return error;
6719 }
6720
6721 static void
6722 ixl_teardown_sysctls(struct ixl_softc *sc)
6723 {
6724
6725 sysctl_teardown(&sc->sc_sysctllog);
6726 }
6727
6728 static bool
6729 ixl_sysctlnode_is_rx(struct sysctlnode *node)
6730 {
6731
6732 if (strstr(node->sysctl_parent->sysctl_name, "rx") != NULL)
6733 return true;
6734
6735 return false;
6736 }
6737
6738 static int
6739 ixl_sysctl_itr_handler(SYSCTLFN_ARGS)
6740 {
6741 struct sysctlnode node = *rnode;
6742 struct ixl_softc *sc = (struct ixl_softc *)node.sysctl_data;
6743 struct ifnet *ifp = &sc->sc_ec.ec_if;
6744 uint32_t newitr, *itrptr;
6745 int error;
6746
6747 if (ixl_sysctlnode_is_rx(&node)) {
6748 itrptr = &sc->sc_itr_rx;
6749 } else {
6750 itrptr = &sc->sc_itr_tx;
6751 }
6752
6753 newitr = *itrptr;
6754 node.sysctl_data = &newitr;
6755 node.sysctl_size = sizeof(newitr);
6756
6757 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6758
6759 if (error || newp == NULL)
6760 return error;
6761
6762 /* ITRs are applied in ixl_init() for simple implementaion */
6763 if (ISSET(ifp->if_flags, IFF_RUNNING))
6764 return EBUSY;
6765
6766 if (newitr > 0x07ff)
6767 return EINVAL;
6768
6769 *itrptr = newitr;
6770
6771 return 0;
6772 }
6773
6774 static int
6775 ixl_sysctl_ndescs_handler(SYSCTLFN_ARGS)
6776 {
6777 struct sysctlnode node = *rnode;
6778 struct ixl_softc *sc = (struct ixl_softc *)node.sysctl_data;
6779 struct ifnet *ifp = &sc->sc_ec.ec_if;
6780 unsigned int *ndescs_ptr, ndescs, n;
6781 int error;
6782
6783 if (ixl_sysctlnode_is_rx(&node)) {
6784 ndescs_ptr = &sc->sc_rx_ring_ndescs;
6785 } else {
6786 ndescs_ptr = &sc->sc_tx_ring_ndescs;
6787 }
6788
6789 ndescs = *ndescs_ptr;
6790 node.sysctl_data = &ndescs;
6791 node.sysctl_size = sizeof(ndescs);
6792
6793 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6794
6795 if (error || newp == NULL)
6796 return error;
6797
6798 if (ISSET(ifp->if_flags, IFF_RUNNING))
6799 return EBUSY;
6800
6801 if (ndescs < 8 || 0xffff < ndescs)
6802 return EINVAL;
6803
6804 n = 1U << (fls32(ndescs) - 1);
6805 if (n != ndescs)
6806 return EINVAL;
6807
6808 *ndescs_ptr = ndescs;
6809
6810 return 0;
6811 }
6812
6813 static struct workqueue *
6814 ixl_workq_create(const char *name, pri_t prio, int ipl, int flags)
6815 {
6816 struct workqueue *wq;
6817 int error;
6818
6819 error = workqueue_create(&wq, name, ixl_workq_work, NULL,
6820 prio, ipl, flags);
6821
6822 if (error)
6823 return NULL;
6824
6825 return wq;
6826 }
6827
6828 static void
6829 ixl_workq_destroy(struct workqueue *wq)
6830 {
6831
6832 workqueue_destroy(wq);
6833 }
6834
6835 static void
6836 ixl_work_set(struct ixl_work *work, void (*func)(void *), void *arg)
6837 {
6838
6839 memset(work, 0, sizeof(*work));
6840 work->ixw_func = func;
6841 work->ixw_arg = arg;
6842 }
6843
6844 static void
6845 ixl_work_add(struct workqueue *wq, struct ixl_work *work)
6846 {
6847 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0)
6848 return;
6849
6850 kpreempt_disable();
6851 workqueue_enqueue(wq, &work->ixw_cookie, NULL);
6852 kpreempt_enable();
6853 }
6854
6855 static void
6856 ixl_work_wait(struct workqueue *wq, struct ixl_work *work)
6857 {
6858
6859 workqueue_wait(wq, &work->ixw_cookie);
6860 }
6861
6862 static void
6863 ixl_workq_work(struct work *wk, void *context)
6864 {
6865 struct ixl_work *work;
6866
6867 work = container_of(wk, struct ixl_work, ixw_cookie);
6868
6869 atomic_swap_uint(&work->ixw_added, 0);
6870 work->ixw_func(work->ixw_arg);
6871 }
6872
6873 static int
6874 ixl_rx_ctl_read(struct ixl_softc *sc, uint32_t reg, uint32_t *rv)
6875 {
6876 struct ixl_aq_desc iaq;
6877
6878 memset(&iaq, 0, sizeof(iaq));
6879 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_READ);
6880 iaq.iaq_param[1] = htole32(reg);
6881
6882 if (ixl_atq_poll(sc, &iaq, 250) != 0)
6883 return ETIMEDOUT;
6884
6885 switch (htole16(iaq.iaq_retval)) {
6886 case IXL_AQ_RC_OK:
6887 /* success */
6888 break;
6889 case IXL_AQ_RC_EACCES:
6890 return EPERM;
6891 case IXL_AQ_RC_EAGAIN:
6892 return EAGAIN;
6893 default:
6894 return EIO;
6895 }
6896
6897 *rv = htole32(iaq.iaq_param[3]);
6898 return 0;
6899 }
6900
6901 static uint32_t
6902 ixl_rd_rx_csr(struct ixl_softc *sc, uint32_t reg)
6903 {
6904 uint32_t val;
6905 int rv, retry, retry_limit;
6906
6907 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) {
6908 retry_limit = 5;
6909 } else {
6910 retry_limit = 0;
6911 }
6912
6913 for (retry = 0; retry < retry_limit; retry++) {
6914 rv = ixl_rx_ctl_read(sc, reg, &val);
6915 if (rv == 0)
6916 return val;
6917 else if (rv == EAGAIN)
6918 delaymsec(1);
6919 else
6920 break;
6921 }
6922
6923 val = ixl_rd(sc, reg);
6924
6925 return val;
6926 }
6927
6928 static int
6929 ixl_rx_ctl_write(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6930 {
6931 struct ixl_aq_desc iaq;
6932
6933 memset(&iaq, 0, sizeof(iaq));
6934 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_WRITE);
6935 iaq.iaq_param[1] = htole32(reg);
6936 iaq.iaq_param[3] = htole32(value);
6937
6938 if (ixl_atq_poll(sc, &iaq, 250) != 0)
6939 return ETIMEDOUT;
6940
6941 switch (htole16(iaq.iaq_retval)) {
6942 case IXL_AQ_RC_OK:
6943 /* success */
6944 break;
6945 case IXL_AQ_RC_EACCES:
6946 return EPERM;
6947 case IXL_AQ_RC_EAGAIN:
6948 return EAGAIN;
6949 default:
6950 return EIO;
6951 }
6952
6953 return 0;
6954 }
6955
6956 static void
6957 ixl_wr_rx_csr(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6958 {
6959 int rv, retry, retry_limit;
6960
6961 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) {
6962 retry_limit = 5;
6963 } else {
6964 retry_limit = 0;
6965 }
6966
6967 for (retry = 0; retry < retry_limit; retry++) {
6968 rv = ixl_rx_ctl_write(sc, reg, value);
6969 if (rv == 0)
6970 return;
6971 else if (rv == EAGAIN)
6972 delaymsec(1);
6973 else
6974 break;
6975 }
6976
6977 ixl_wr(sc, reg, value);
6978 }
6979
6980 static int
6981 ixl_nvm_lock(struct ixl_softc *sc, char rw)
6982 {
6983 struct ixl_aq_desc iaq;
6984 struct ixl_aq_req_resource_param *param;
6985 int rv;
6986
6987 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK))
6988 return 0;
6989
6990 memset(&iaq, 0, sizeof(iaq));
6991 iaq.iaq_opcode = htole16(IXL_AQ_OP_REQUEST_RESOURCE);
6992
6993 param = (struct ixl_aq_req_resource_param *)&iaq.iaq_param;
6994 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM);
6995 if (rw == 'R') {
6996 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_READ);
6997 } else {
6998 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_WRITE);
6999 }
7000
7001 rv = ixl_atq_poll(sc, &iaq, 250);
7002
7003 if (rv != 0)
7004 return ETIMEDOUT;
7005
7006 switch (le16toh(iaq.iaq_retval)) {
7007 case IXL_AQ_RC_OK:
7008 break;
7009 case IXL_AQ_RC_EACCES:
7010 return EACCES;
7011 case IXL_AQ_RC_EBUSY:
7012 return EBUSY;
7013 case IXL_AQ_RC_EPERM:
7014 return EPERM;
7015 }
7016
7017 return 0;
7018 }
7019
7020 static int
7021 ixl_nvm_unlock(struct ixl_softc *sc)
7022 {
7023 struct ixl_aq_desc iaq;
7024 struct ixl_aq_rel_resource_param *param;
7025 int rv;
7026
7027 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK))
7028 return 0;
7029
7030 memset(&iaq, 0, sizeof(iaq));
7031 iaq.iaq_opcode = htole16(IXL_AQ_OP_RELEASE_RESOURCE);
7032
7033 param = (struct ixl_aq_rel_resource_param *)&iaq.iaq_param;
7034 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM);
7035
7036 rv = ixl_atq_poll(sc, &iaq, 250);
7037
7038 if (rv != 0)
7039 return ETIMEDOUT;
7040
7041 switch (le16toh(iaq.iaq_retval)) {
7042 case IXL_AQ_RC_OK:
7043 break;
7044 default:
7045 return EIO;
7046 }
7047 return 0;
7048 }
7049
7050 static int
7051 ixl_srdone_poll(struct ixl_softc *sc)
7052 {
7053 int wait_count;
7054 uint32_t reg;
7055
7056 for (wait_count = 0; wait_count < IXL_SRRD_SRCTL_ATTEMPTS;
7057 wait_count++) {
7058 reg = ixl_rd(sc, I40E_GLNVM_SRCTL);
7059 if (ISSET(reg, I40E_GLNVM_SRCTL_DONE_MASK))
7060 break;
7061
7062 delaymsec(5);
7063 }
7064
7065 if (wait_count == IXL_SRRD_SRCTL_ATTEMPTS)
7066 return -1;
7067
7068 return 0;
7069 }
7070
7071 static int
7072 ixl_nvm_read_srctl(struct ixl_softc *sc, uint16_t offset, uint16_t *data)
7073 {
7074 uint32_t reg;
7075
7076 if (ixl_srdone_poll(sc) != 0)
7077 return ETIMEDOUT;
7078
7079 reg = ((uint32_t)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
7080 __BIT(I40E_GLNVM_SRCTL_START_SHIFT);
7081 ixl_wr(sc, I40E_GLNVM_SRCTL, reg);
7082
7083 if (ixl_srdone_poll(sc) != 0) {
7084 aprint_debug("NVM read error: couldn't access "
7085 "Shadow RAM address: 0x%x\n", offset);
7086 return ETIMEDOUT;
7087 }
7088
7089 reg = ixl_rd(sc, I40E_GLNVM_SRDATA);
7090 *data = (uint16_t)__SHIFTOUT(reg, I40E_GLNVM_SRDATA_RDDATA_MASK);
7091
7092 return 0;
7093 }
7094
7095 static int
7096 ixl_nvm_read_aq(struct ixl_softc *sc, uint16_t offset_word,
7097 void *data, size_t len)
7098 {
7099 struct ixl_dmamem *idm;
7100 struct ixl_aq_desc iaq;
7101 struct ixl_aq_nvm_param *param;
7102 uint32_t offset_bytes;
7103 int rv;
7104
7105 idm = &sc->sc_aqbuf;
7106 if (len > IXL_DMA_LEN(idm))
7107 return ENOMEM;
7108
7109 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
7110 memset(&iaq, 0, sizeof(iaq));
7111 iaq.iaq_opcode = htole16(IXL_AQ_OP_NVM_READ);
7112 iaq.iaq_flags = htole16(IXL_AQ_BUF |
7113 ((len > I40E_AQ_LARGE_BUF) ? IXL_AQ_LB : 0));
7114 iaq.iaq_datalen = htole16(len);
7115 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
7116
7117 param = (struct ixl_aq_nvm_param *)iaq.iaq_param;
7118 param->command_flags = IXL_AQ_NVM_LAST_CMD;
7119 param->module_pointer = 0;
7120 param->length = htole16(len);
7121 offset_bytes = (uint32_t)offset_word * 2;
7122 offset_bytes &= 0x00FFFFFF;
7123 param->offset = htole32(offset_bytes);
7124
7125 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
7126 BUS_DMASYNC_PREREAD);
7127
7128 rv = ixl_atq_poll(sc, &iaq, 250);
7129
7130 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
7131 BUS_DMASYNC_POSTREAD);
7132
7133 if (rv != 0) {
7134 return ETIMEDOUT;
7135 }
7136
7137 switch (le16toh(iaq.iaq_retval)) {
7138 case IXL_AQ_RC_OK:
7139 break;
7140 case IXL_AQ_RC_EPERM:
7141 return EPERM;
7142 case IXL_AQ_RC_EINVAL:
7143 return EINVAL;
7144 case IXL_AQ_RC_EBUSY:
7145 return EBUSY;
7146 case IXL_AQ_RC_EIO:
7147 default:
7148 return EIO;
7149 }
7150
7151 memcpy(data, IXL_DMA_KVA(idm), len);
7152
7153 return 0;
7154 }
7155
7156 static int
7157 ixl_rd16_nvm(struct ixl_softc *sc, uint16_t offset, uint16_t *data)
7158 {
7159 int error;
7160 uint16_t buf;
7161
7162 error = ixl_nvm_lock(sc, 'R');
7163 if (error)
7164 return error;
7165
7166 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMREAD)) {
7167 error = ixl_nvm_read_aq(sc, offset,
7168 &buf, sizeof(buf));
7169 if (error == 0)
7170 *data = le16toh(buf);
7171 } else {
7172 error = ixl_nvm_read_srctl(sc, offset, &buf);
7173 if (error == 0)
7174 *data = buf;
7175 }
7176
7177 ixl_nvm_unlock(sc);
7178
7179 return error;
7180 }
7181
7182 MODULE(MODULE_CLASS_DRIVER, if_ixl, "pci");
7183
7184 #ifdef _MODULE
7185 #include "ioconf.c"
7186 #endif
7187
7188 #ifdef _MODULE
7189 static void
7190 ixl_parse_modprop(prop_dictionary_t dict)
7191 {
7192 prop_object_t obj;
7193 int64_t val;
7194 uint64_t uval;
7195
7196 if (dict == NULL)
7197 return;
7198
7199 obj = prop_dictionary_get(dict, "nomsix");
7200 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_BOOL) {
7201 ixl_param_nomsix = prop_bool_true((prop_bool_t)obj);
7202 }
7203
7204 obj = prop_dictionary_get(dict, "stats_interval");
7205 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7206 val = prop_number_signed_value((prop_number_t)obj);
7207
7208 /* the range has no reason */
7209 if (100 < val && val < 180000) {
7210 ixl_param_stats_interval = val;
7211 }
7212 }
7213
7214 obj = prop_dictionary_get(dict, "nqps_limit");
7215 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7216 val = prop_number_signed_value((prop_number_t)obj);
7217
7218 if (val <= INT32_MAX)
7219 ixl_param_nqps_limit = val;
7220 }
7221
7222 obj = prop_dictionary_get(dict, "rx_ndescs");
7223 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7224 uval = prop_number_unsigned_integer_value((prop_number_t)obj);
7225
7226 if (uval > 8)
7227 ixl_param_rx_ndescs = uval;
7228 }
7229
7230 obj = prop_dictionary_get(dict, "tx_ndescs");
7231 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7232 uval = prop_number_unsigned_integer_value((prop_number_t)obj);
7233
7234 if (uval > IXL_TX_PKT_DESCS)
7235 ixl_param_tx_ndescs = uval;
7236 }
7237
7238 }
7239 #endif
7240
7241 static int
7242 if_ixl_modcmd(modcmd_t cmd, void *opaque)
7243 {
7244 int error = 0;
7245
7246 #ifdef _MODULE
7247 switch (cmd) {
7248 case MODULE_CMD_INIT:
7249 ixl_parse_modprop((prop_dictionary_t)opaque);
7250 error = config_init_component(cfdriver_ioconf_if_ixl,
7251 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
7252 break;
7253 case MODULE_CMD_FINI:
7254 error = config_fini_component(cfdriver_ioconf_if_ixl,
7255 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
7256 break;
7257 default:
7258 error = ENOTTY;
7259 break;
7260 }
7261 #endif
7262
7263 return error;
7264 }
7265