if_ixl.c revision 1.41 1 /* $NetBSD: if_ixl.c,v 1.41 2020/02/12 06:37:21 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2016,2017 David Gwynne <dlg (at) openbsd.org>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50 /*
51 * Copyright (c) 2019 Internet Initiative Japan, Inc.
52 * All rights reserved.
53 *
54 * Redistribution and use in source and binary forms, with or without
55 * modification, are permitted provided that the following conditions
56 * are met:
57 * 1. Redistributions of source code must retain the above copyright
58 * notice, this list of conditions and the following disclaimer.
59 * 2. Redistributions in binary form must reproduce the above copyright
60 * notice, this list of conditions and the following disclaimer in the
61 * documentation and/or other materials provided with the distribution.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
73 * POSSIBILITY OF SUCH DAMAGE.
74 */
75
76 #include <sys/cdefs.h>
77
78 #ifdef _KERNEL_OPT
79 #include "opt_net_mpsafe.h"
80 #include "opt_if_ixl.h"
81 #endif
82
83 #include <sys/param.h>
84 #include <sys/types.h>
85
86 #include <sys/cpu.h>
87 #include <sys/device.h>
88 #include <sys/evcnt.h>
89 #include <sys/interrupt.h>
90 #include <sys/kmem.h>
91 #include <sys/malloc.h>
92 #include <sys/module.h>
93 #include <sys/mutex.h>
94 #include <sys/pcq.h>
95 #include <sys/syslog.h>
96 #include <sys/workqueue.h>
97
98 #include <sys/bus.h>
99
100 #include <net/bpf.h>
101 #include <net/if.h>
102 #include <net/if_dl.h>
103 #include <net/if_media.h>
104 #include <net/if_ether.h>
105 #include <net/rss_config.h>
106
107 #include <netinet/tcp.h> /* for struct tcphdr */
108 #include <netinet/udp.h> /* for struct udphdr */
109
110 #include <dev/pci/pcivar.h>
111 #include <dev/pci/pcidevs.h>
112
113 #include <dev/pci/if_ixlreg.h>
114 #include <dev/pci/if_ixlvar.h>
115
116 #include <prop/proplib.h>
117
118 struct ixl_softc; /* defined */
119
120 #define I40E_PF_RESET_WAIT_COUNT 200
121 #define I40E_AQ_LARGE_BUF 512
122
123 /* bitfields for Tx queue mapping in QTX_CTL */
124 #define I40E_QTX_CTL_VF_QUEUE 0x0
125 #define I40E_QTX_CTL_VM_QUEUE 0x1
126 #define I40E_QTX_CTL_PF_QUEUE 0x2
127
128 #define I40E_QUEUE_TYPE_EOL 0x7ff
129 #define I40E_INTR_NOTX_QUEUE 0
130
131 #define I40E_QUEUE_TYPE_RX 0x0
132 #define I40E_QUEUE_TYPE_TX 0x1
133 #define I40E_QUEUE_TYPE_PE_CEQ 0x2
134 #define I40E_QUEUE_TYPE_UNKNOWN 0x3
135
136 #define I40E_ITR_INDEX_RX 0x0
137 #define I40E_ITR_INDEX_TX 0x1
138 #define I40E_ITR_INDEX_OTHER 0x2
139 #define I40E_ITR_INDEX_NONE 0x3
140
141 #define I40E_INTR_NOTX_QUEUE 0
142 #define I40E_INTR_NOTX_INTR 0
143 #define I40E_INTR_NOTX_RX_QUEUE 0
144 #define I40E_INTR_NOTX_TX_QUEUE 1
145 #define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK
146 #define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK
147
148 #define BIT_ULL(a) (1ULL << (a))
149 #define IXL_RSS_HENA_DEFAULT_BASE \
150 (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
151 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
152 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
153 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
154 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
155 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
156 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
157 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
158 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
159 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
160 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
161 #define IXL_RSS_HENA_DEFAULT_XL710 IXL_RSS_HENA_DEFAULT_BASE
162 #define IXL_RSS_HENA_DEFAULT_X722 (IXL_RSS_HENA_DEFAULT_XL710 | \
163 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
164 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
165 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
166 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
167 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
168 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
169 #define I40E_HASH_LUT_SIZE_128 0
170 #define IXL_RSS_KEY_SIZE_REG 13
171
172 #define IXL_ICR0_CRIT_ERR_MASK \
173 (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \
174 I40E_PFINT_ICR0_ECC_ERR_MASK | \
175 I40E_PFINT_ICR0_PE_CRITERR_MASK)
176
177 #define IXL_TX_PKT_DESCS 8
178 #define IXL_TX_PKT_MAXSIZE (MCLBYTES * IXL_TX_PKT_DESCS)
179 #define IXL_TX_QUEUE_ALIGN 128
180 #define IXL_RX_QUEUE_ALIGN 128
181
182 #define IXL_MCLBYTES (MCLBYTES - ETHER_ALIGN)
183 #define IXL_MTU_ETHERLEN ETHER_HDR_LEN \
184 + ETHER_CRC_LEN
185 #if 0
186 #define IXL_MAX_MTU (9728 - IXL_MTU_ETHERLEN)
187 #else
188 /* (dbuff * 5) - ETHER_HDR_LEN - ETHER_CRC_LEN */
189 #define IXL_MAX_MTU (9600 - IXL_MTU_ETHERLEN)
190 #endif
191 #define IXL_MIN_MTU (ETHER_MIN_LEN - ETHER_CRC_LEN)
192
193 #define IXL_PCIREG PCI_MAPREG_START
194
195 #define IXL_ITR0 0x0
196 #define IXL_ITR1 0x1
197 #define IXL_ITR2 0x2
198 #define IXL_NOITR 0x3
199
200 #define IXL_AQ_NUM 256
201 #define IXL_AQ_MASK (IXL_AQ_NUM - 1)
202 #define IXL_AQ_ALIGN 64 /* lol */
203 #define IXL_AQ_BUFLEN 4096
204
205 #define IXL_HMC_ROUNDUP 512
206 #define IXL_HMC_PGSIZE 4096
207 #define IXL_HMC_DVASZ sizeof(uint64_t)
208 #define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
209 #define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS)
210 #define IXL_HMC_PDVALID 1ULL
211
212 #define IXL_ATQ_EXEC_TIMEOUT (10 * hz)
213
214 #define IXL_SRRD_SRCTL_ATTEMPTS 100000
215
216 struct ixl_aq_regs {
217 bus_size_t atq_tail;
218 bus_size_t atq_head;
219 bus_size_t atq_len;
220 bus_size_t atq_bal;
221 bus_size_t atq_bah;
222
223 bus_size_t arq_tail;
224 bus_size_t arq_head;
225 bus_size_t arq_len;
226 bus_size_t arq_bal;
227 bus_size_t arq_bah;
228
229 uint32_t atq_len_enable;
230 uint32_t atq_tail_mask;
231 uint32_t atq_head_mask;
232
233 uint32_t arq_len_enable;
234 uint32_t arq_tail_mask;
235 uint32_t arq_head_mask;
236 };
237
238 struct ixl_phy_type {
239 uint64_t phy_type;
240 uint64_t ifm_type;
241 };
242
243 struct ixl_speed_type {
244 uint8_t dev_speed;
245 uint64_t net_speed;
246 };
247
248 struct ixl_aq_buf {
249 SIMPLEQ_ENTRY(ixl_aq_buf)
250 aqb_entry;
251 void *aqb_data;
252 bus_dmamap_t aqb_map;
253 bus_dma_segment_t aqb_seg;
254 size_t aqb_size;
255 int aqb_nsegs;
256 };
257 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf);
258
259 struct ixl_dmamem {
260 bus_dmamap_t ixm_map;
261 bus_dma_segment_t ixm_seg;
262 int ixm_nsegs;
263 size_t ixm_size;
264 void *ixm_kva;
265 };
266
267 #define IXL_DMA_MAP(_ixm) ((_ixm)->ixm_map)
268 #define IXL_DMA_DVA(_ixm) ((_ixm)->ixm_map->dm_segs[0].ds_addr)
269 #define IXL_DMA_KVA(_ixm) ((void *)(_ixm)->ixm_kva)
270 #define IXL_DMA_LEN(_ixm) ((_ixm)->ixm_size)
271
272 struct ixl_hmc_entry {
273 uint64_t hmc_base;
274 uint32_t hmc_count;
275 uint64_t hmc_size;
276 };
277
278 enum ixl_hmc_types {
279 IXL_HMC_LAN_TX = 0,
280 IXL_HMC_LAN_RX,
281 IXL_HMC_FCOE_CTX,
282 IXL_HMC_FCOE_FILTER,
283 IXL_HMC_COUNT
284 };
285
286 struct ixl_hmc_pack {
287 uint16_t offset;
288 uint16_t width;
289 uint16_t lsb;
290 };
291
292 /*
293 * these hmc objects have weird sizes and alignments, so these are abstract
294 * representations of them that are nice for c to populate.
295 *
296 * the packing code relies on little-endian values being stored in the fields,
297 * no high bits in the fields being set, and the fields must be packed in the
298 * same order as they are in the ctx structure.
299 */
300
301 struct ixl_hmc_rxq {
302 uint16_t head;
303 uint8_t cpuid;
304 uint64_t base;
305 #define IXL_HMC_RXQ_BASE_UNIT 128
306 uint16_t qlen;
307 uint16_t dbuff;
308 #define IXL_HMC_RXQ_DBUFF_UNIT 128
309 uint8_t hbuff;
310 #define IXL_HMC_RXQ_HBUFF_UNIT 64
311 uint8_t dtype;
312 #define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0
313 #define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1
314 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2
315 uint8_t dsize;
316 #define IXL_HMC_RXQ_DSIZE_16 0
317 #define IXL_HMC_RXQ_DSIZE_32 1
318 uint8_t crcstrip;
319 uint8_t fc_ena;
320 uint8_t l2sel;
321 uint8_t hsplit_0;
322 uint8_t hsplit_1;
323 uint8_t showiv;
324 uint16_t rxmax;
325 uint8_t tphrdesc_ena;
326 uint8_t tphwdesc_ena;
327 uint8_t tphdata_ena;
328 uint8_t tphhead_ena;
329 uint8_t lrxqthresh;
330 uint8_t prefena;
331 };
332
333 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
334 { offsetof(struct ixl_hmc_rxq, head), 13, 0 },
335 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 },
336 { offsetof(struct ixl_hmc_rxq, base), 57, 32 },
337 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 },
338 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 },
339 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 },
340 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 },
341 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 },
342 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 },
343 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 },
344 { offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 },
345 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 },
346 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 },
347 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 },
348 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 },
349 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 },
350 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 },
351 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 },
352 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 },
353 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 },
354 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 },
355 };
356
357 #define IXL_HMC_RXQ_MINSIZE (201 + 1)
358
359 struct ixl_hmc_txq {
360 uint16_t head;
361 uint8_t new_context;
362 uint64_t base;
363 #define IXL_HMC_TXQ_BASE_UNIT 128
364 uint8_t fc_ena;
365 uint8_t timesync_ena;
366 uint8_t fd_ena;
367 uint8_t alt_vlan_ena;
368 uint16_t thead_wb;
369 uint8_t cpuid;
370 uint8_t head_wb_ena;
371 #define IXL_HMC_TXQ_DESC_WB 0
372 #define IXL_HMC_TXQ_HEAD_WB 1
373 uint16_t qlen;
374 uint8_t tphrdesc_ena;
375 uint8_t tphrpacket_ena;
376 uint8_t tphwdesc_ena;
377 uint64_t head_wb_addr;
378 uint32_t crc;
379 uint16_t rdylist;
380 uint8_t rdylist_act;
381 };
382
383 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
384 { offsetof(struct ixl_hmc_txq, head), 13, 0 },
385 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 },
386 { offsetof(struct ixl_hmc_txq, base), 57, 32 },
387 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 },
388 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 },
389 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 },
390 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 },
391 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 },
392 /* line 1 */
393 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 },
394 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 },
395 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 },
396 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 },
397 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 },
398 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 },
399 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 },
400 /* line 7 */
401 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) },
402 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) },
403 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) },
404 };
405
406 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
407
408 struct ixl_work {
409 struct work ixw_cookie;
410 void (*ixw_func)(void *);
411 void *ixw_arg;
412 unsigned int ixw_added;
413 };
414 #define IXL_WORKQUEUE_PRI PRI_SOFTNET
415
416 struct ixl_tx_map {
417 struct mbuf *txm_m;
418 bus_dmamap_t txm_map;
419 unsigned int txm_eop;
420 };
421
422 struct ixl_tx_ring {
423 kmutex_t txr_lock;
424 struct ixl_softc *txr_sc;
425
426 unsigned int txr_prod;
427 unsigned int txr_cons;
428
429 struct ixl_tx_map *txr_maps;
430 struct ixl_dmamem txr_mem;
431
432 bus_size_t txr_tail;
433 unsigned int txr_qid;
434 pcq_t *txr_intrq;
435 void *txr_si;
436
437 struct evcnt txr_defragged;
438 struct evcnt txr_defrag_failed;
439 struct evcnt txr_pcqdrop;
440 struct evcnt txr_transmitdef;
441 struct evcnt txr_intr;
442 struct evcnt txr_defer;
443 };
444
445 struct ixl_rx_map {
446 struct mbuf *rxm_m;
447 bus_dmamap_t rxm_map;
448 };
449
450 struct ixl_rx_ring {
451 kmutex_t rxr_lock;
452
453 unsigned int rxr_prod;
454 unsigned int rxr_cons;
455
456 struct ixl_rx_map *rxr_maps;
457 struct ixl_dmamem rxr_mem;
458
459 struct mbuf *rxr_m_head;
460 struct mbuf **rxr_m_tail;
461
462 bus_size_t rxr_tail;
463 unsigned int rxr_qid;
464
465 struct evcnt rxr_mgethdr_failed;
466 struct evcnt rxr_mgetcl_failed;
467 struct evcnt rxr_mbuf_load_failed;
468 struct evcnt rxr_intr;
469 struct evcnt rxr_defer;
470 };
471
472 struct ixl_queue_pair {
473 struct ixl_softc *qp_sc;
474 struct ixl_tx_ring *qp_txr;
475 struct ixl_rx_ring *qp_rxr;
476
477 char qp_name[16];
478
479 void *qp_si;
480 struct ixl_work qp_task;
481 bool qp_workqueue;
482 };
483
484 struct ixl_atq {
485 struct ixl_aq_desc iatq_desc;
486 void (*iatq_fn)(struct ixl_softc *,
487 const struct ixl_aq_desc *);
488 };
489 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
490
491 struct ixl_product {
492 unsigned int vendor_id;
493 unsigned int product_id;
494 };
495
496 struct ixl_stats_counters {
497 bool isc_has_offset;
498 struct evcnt isc_crc_errors;
499 uint64_t isc_crc_errors_offset;
500 struct evcnt isc_illegal_bytes;
501 uint64_t isc_illegal_bytes_offset;
502 struct evcnt isc_rx_bytes;
503 uint64_t isc_rx_bytes_offset;
504 struct evcnt isc_rx_discards;
505 uint64_t isc_rx_discards_offset;
506 struct evcnt isc_rx_unicast;
507 uint64_t isc_rx_unicast_offset;
508 struct evcnt isc_rx_multicast;
509 uint64_t isc_rx_multicast_offset;
510 struct evcnt isc_rx_broadcast;
511 uint64_t isc_rx_broadcast_offset;
512 struct evcnt isc_rx_size_64;
513 uint64_t isc_rx_size_64_offset;
514 struct evcnt isc_rx_size_127;
515 uint64_t isc_rx_size_127_offset;
516 struct evcnt isc_rx_size_255;
517 uint64_t isc_rx_size_255_offset;
518 struct evcnt isc_rx_size_511;
519 uint64_t isc_rx_size_511_offset;
520 struct evcnt isc_rx_size_1023;
521 uint64_t isc_rx_size_1023_offset;
522 struct evcnt isc_rx_size_1522;
523 uint64_t isc_rx_size_1522_offset;
524 struct evcnt isc_rx_size_big;
525 uint64_t isc_rx_size_big_offset;
526 struct evcnt isc_rx_undersize;
527 uint64_t isc_rx_undersize_offset;
528 struct evcnt isc_rx_oversize;
529 uint64_t isc_rx_oversize_offset;
530 struct evcnt isc_rx_fragments;
531 uint64_t isc_rx_fragments_offset;
532 struct evcnt isc_rx_jabber;
533 uint64_t isc_rx_jabber_offset;
534 struct evcnt isc_tx_bytes;
535 uint64_t isc_tx_bytes_offset;
536 struct evcnt isc_tx_dropped_link_down;
537 uint64_t isc_tx_dropped_link_down_offset;
538 struct evcnt isc_tx_unicast;
539 uint64_t isc_tx_unicast_offset;
540 struct evcnt isc_tx_multicast;
541 uint64_t isc_tx_multicast_offset;
542 struct evcnt isc_tx_broadcast;
543 uint64_t isc_tx_broadcast_offset;
544 struct evcnt isc_tx_size_64;
545 uint64_t isc_tx_size_64_offset;
546 struct evcnt isc_tx_size_127;
547 uint64_t isc_tx_size_127_offset;
548 struct evcnt isc_tx_size_255;
549 uint64_t isc_tx_size_255_offset;
550 struct evcnt isc_tx_size_511;
551 uint64_t isc_tx_size_511_offset;
552 struct evcnt isc_tx_size_1023;
553 uint64_t isc_tx_size_1023_offset;
554 struct evcnt isc_tx_size_1522;
555 uint64_t isc_tx_size_1522_offset;
556 struct evcnt isc_tx_size_big;
557 uint64_t isc_tx_size_big_offset;
558 struct evcnt isc_mac_local_faults;
559 uint64_t isc_mac_local_faults_offset;
560 struct evcnt isc_mac_remote_faults;
561 uint64_t isc_mac_remote_faults_offset;
562 struct evcnt isc_link_xon_rx;
563 uint64_t isc_link_xon_rx_offset;
564 struct evcnt isc_link_xon_tx;
565 uint64_t isc_link_xon_tx_offset;
566 struct evcnt isc_link_xoff_rx;
567 uint64_t isc_link_xoff_rx_offset;
568 struct evcnt isc_link_xoff_tx;
569 uint64_t isc_link_xoff_tx_offset;
570 struct evcnt isc_vsi_rx_discards;
571 uint64_t isc_vsi_rx_discards_offset;
572 struct evcnt isc_vsi_rx_bytes;
573 uint64_t isc_vsi_rx_bytes_offset;
574 struct evcnt isc_vsi_rx_unicast;
575 uint64_t isc_vsi_rx_unicast_offset;
576 struct evcnt isc_vsi_rx_multicast;
577 uint64_t isc_vsi_rx_multicast_offset;
578 struct evcnt isc_vsi_rx_broadcast;
579 uint64_t isc_vsi_rx_broadcast_offset;
580 struct evcnt isc_vsi_tx_errors;
581 uint64_t isc_vsi_tx_errors_offset;
582 struct evcnt isc_vsi_tx_bytes;
583 uint64_t isc_vsi_tx_bytes_offset;
584 struct evcnt isc_vsi_tx_unicast;
585 uint64_t isc_vsi_tx_unicast_offset;
586 struct evcnt isc_vsi_tx_multicast;
587 uint64_t isc_vsi_tx_multicast_offset;
588 struct evcnt isc_vsi_tx_broadcast;
589 uint64_t isc_vsi_tx_broadcast_offset;
590 };
591
592 /*
593 * Locking notes:
594 * + a field in ixl_tx_ring is protected by txr_lock (a spin mutex), and
595 * a field in ixl_rx_ring is protected by rxr_lock (a spin mutex).
596 * - more than one lock of them cannot be held at once.
597 * + a field named sc_atq_* in ixl_softc is protected by sc_atq_lock
598 * (a spin mutex).
599 * - the lock cannot held with txr_lock or rxr_lock.
600 * + a field named sc_arq_* is not protected by any lock.
601 * - operations for sc_arq_* is done in one context related to
602 * sc_arq_task.
603 * + other fields in ixl_softc is protected by sc_cfg_lock
604 * (an adaptive mutex)
605 * - It must be held before another lock is held, and It can be
606 * released after the other lock is released.
607 * */
608
609 struct ixl_softc {
610 device_t sc_dev;
611 struct ethercom sc_ec;
612 bool sc_attached;
613 bool sc_dead;
614 uint32_t sc_port;
615 struct sysctllog *sc_sysctllog;
616 struct workqueue *sc_workq;
617 struct workqueue *sc_workq_txrx;
618 int sc_stats_intval;
619 callout_t sc_stats_callout;
620 struct ixl_work sc_stats_task;
621 struct ixl_stats_counters
622 sc_stats_counters;
623 uint8_t sc_enaddr[ETHER_ADDR_LEN];
624 struct ifmedia sc_media;
625 uint64_t sc_media_status;
626 uint64_t sc_media_active;
627 uint64_t sc_phy_types;
628 uint8_t sc_phy_abilities;
629 uint8_t sc_phy_linkspeed;
630 uint8_t sc_phy_fec_cfg;
631 uint16_t sc_eee_cap;
632 uint32_t sc_eeer_val;
633 uint8_t sc_d3_lpan;
634 kmutex_t sc_cfg_lock;
635 enum i40e_mac_type sc_mac_type;
636 uint32_t sc_rss_table_size;
637 uint32_t sc_rss_table_entry_width;
638 bool sc_txrx_workqueue;
639 u_int sc_tx_process_limit;
640 u_int sc_rx_process_limit;
641 u_int sc_tx_intr_process_limit;
642 u_int sc_rx_intr_process_limit;
643
644 int sc_cur_ec_capenable;
645
646 struct pci_attach_args sc_pa;
647 pci_intr_handle_t *sc_ihp;
648 void **sc_ihs;
649 unsigned int sc_nintrs;
650
651 bus_dma_tag_t sc_dmat;
652 bus_space_tag_t sc_memt;
653 bus_space_handle_t sc_memh;
654 bus_size_t sc_mems;
655
656 uint8_t sc_pf_id;
657 uint16_t sc_uplink_seid; /* le */
658 uint16_t sc_downlink_seid; /* le */
659 uint16_t sc_vsi_number;
660 uint16_t sc_vsi_stat_counter_idx;
661 uint16_t sc_seid;
662 unsigned int sc_base_queue;
663
664 pci_intr_type_t sc_intrtype;
665 unsigned int sc_msix_vector_queue;
666
667 struct ixl_dmamem sc_scratch;
668 struct ixl_dmamem sc_aqbuf;
669
670 const struct ixl_aq_regs *
671 sc_aq_regs;
672 uint32_t sc_aq_flags;
673 #define IXL_SC_AQ_FLAG_RXCTL __BIT(0)
674 #define IXL_SC_AQ_FLAG_NVMLOCK __BIT(1)
675 #define IXL_SC_AQ_FLAG_NVMREAD __BIT(2)
676 #define IXL_SC_AQ_FLAG_RSS __BIT(3)
677
678 kmutex_t sc_atq_lock;
679 kcondvar_t sc_atq_cv;
680 struct ixl_dmamem sc_atq;
681 unsigned int sc_atq_prod;
682 unsigned int sc_atq_cons;
683
684 struct ixl_dmamem sc_arq;
685 struct ixl_work sc_arq_task;
686 struct ixl_aq_bufs sc_arq_idle;
687 struct ixl_aq_buf *sc_arq_live[IXL_AQ_NUM];
688 unsigned int sc_arq_prod;
689 unsigned int sc_arq_cons;
690
691 struct ixl_work sc_link_state_task;
692 struct ixl_atq sc_link_state_atq;
693
694 struct ixl_dmamem sc_hmc_sd;
695 struct ixl_dmamem sc_hmc_pd;
696 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT];
697
698 unsigned int sc_tx_ring_ndescs;
699 unsigned int sc_rx_ring_ndescs;
700 unsigned int sc_nqueue_pairs;
701 unsigned int sc_nqueue_pairs_max;
702 unsigned int sc_nqueue_pairs_device;
703 struct ixl_queue_pair *sc_qps;
704
705 struct evcnt sc_event_atq;
706 struct evcnt sc_event_link;
707 struct evcnt sc_event_ecc_err;
708 struct evcnt sc_event_pci_exception;
709 struct evcnt sc_event_crit_err;
710 };
711
712 #define IXL_TXRX_PROCESS_UNLIMIT UINT_MAX
713 #define IXL_TX_PROCESS_LIMIT 256
714 #define IXL_RX_PROCESS_LIMIT 256
715 #define IXL_TX_INTR_PROCESS_LIMIT 256
716 #define IXL_RX_INTR_PROCESS_LIMIT 0U
717
718 #define IXL_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx | \
719 IFCAP_CSUM_TCPv4_Rx | \
720 IFCAP_CSUM_UDPv4_Rx | \
721 IFCAP_CSUM_TCPv6_Rx | \
722 IFCAP_CSUM_UDPv6_Rx)
723 #define IXL_IFCAP_TXCSUM (IFCAP_CSUM_IPv4_Tx | \
724 IFCAP_CSUM_TCPv4_Tx | \
725 IFCAP_CSUM_UDPv4_Tx | \
726 IFCAP_CSUM_TCPv6_Tx | \
727 IFCAP_CSUM_UDPv6_Tx)
728 #define IXL_CSUM_ALL_OFFLOAD (M_CSUM_IPv4 | \
729 M_CSUM_TCPv4 | M_CSUM_TCPv6 | \
730 M_CSUM_UDPv4 | M_CSUM_UDPv6)
731
732 #define delaymsec(_x) DELAY(1000 * (_x))
733 #ifdef IXL_DEBUG
734 #define DDPRINTF(sc, fmt, args...) \
735 do { \
736 if ((sc) != NULL) { \
737 device_printf( \
738 ((struct ixl_softc *)(sc))->sc_dev, \
739 ""); \
740 } \
741 printf("%s:\t" fmt, __func__, ##args); \
742 } while (0)
743 #else
744 #define DDPRINTF(sc, fmt, args...) __nothing
745 #endif
746 #ifndef IXL_STATS_INTERVAL_MSEC
747 #define IXL_STATS_INTERVAL_MSEC 10000
748 #endif
749 #ifndef IXL_QUEUE_NUM
750 #define IXL_QUEUE_NUM 0
751 #endif
752
753 static bool ixl_param_nomsix = false;
754 static int ixl_param_stats_interval = IXL_STATS_INTERVAL_MSEC;
755 static int ixl_param_nqps_limit = IXL_QUEUE_NUM;
756 static unsigned int ixl_param_tx_ndescs = 1024;
757 static unsigned int ixl_param_rx_ndescs = 1024;
758
759 static enum i40e_mac_type
760 ixl_mactype(pci_product_id_t);
761 static void ixl_clear_hw(struct ixl_softc *);
762 static int ixl_pf_reset(struct ixl_softc *);
763
764 static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
765 bus_size_t, bus_size_t);
766 static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
767
768 static int ixl_arq_fill(struct ixl_softc *);
769 static void ixl_arq_unfill(struct ixl_softc *);
770
771 static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
772 unsigned int);
773 static void ixl_atq_set(struct ixl_atq *,
774 void (*)(struct ixl_softc *, const struct ixl_aq_desc *));
775 static int ixl_atq_post(struct ixl_softc *, struct ixl_atq *);
776 static int ixl_atq_post_locked(struct ixl_softc *, struct ixl_atq *);
777 static void ixl_atq_done(struct ixl_softc *);
778 static int ixl_atq_exec(struct ixl_softc *, struct ixl_atq *);
779 static int ixl_get_version(struct ixl_softc *);
780 static int ixl_get_nvm_version(struct ixl_softc *);
781 static int ixl_get_hw_capabilities(struct ixl_softc *);
782 static int ixl_pxe_clear(struct ixl_softc *);
783 static int ixl_lldp_shut(struct ixl_softc *);
784 static int ixl_get_mac(struct ixl_softc *);
785 static int ixl_get_switch_config(struct ixl_softc *);
786 static int ixl_phy_mask_ints(struct ixl_softc *);
787 static int ixl_get_phy_info(struct ixl_softc *);
788 static int ixl_set_phy_config(struct ixl_softc *, uint8_t, uint8_t, bool);
789 static int ixl_set_phy_autoselect(struct ixl_softc *);
790 static int ixl_restart_an(struct ixl_softc *);
791 static int ixl_hmc(struct ixl_softc *);
792 static void ixl_hmc_free(struct ixl_softc *);
793 static int ixl_get_vsi(struct ixl_softc *);
794 static int ixl_set_vsi(struct ixl_softc *);
795 static void ixl_set_filter_control(struct ixl_softc *);
796 static void ixl_get_link_status(void *);
797 static int ixl_get_link_status_poll(struct ixl_softc *, int *);
798 static int ixl_set_link_status(struct ixl_softc *,
799 const struct ixl_aq_desc *);
800 static uint64_t ixl_search_link_speed(uint8_t);
801 static uint8_t ixl_search_baudrate(uint64_t);
802 static void ixl_config_rss(struct ixl_softc *);
803 static int ixl_add_macvlan(struct ixl_softc *, const uint8_t *,
804 uint16_t, uint16_t);
805 static int ixl_remove_macvlan(struct ixl_softc *, const uint8_t *,
806 uint16_t, uint16_t);
807 static void ixl_arq(void *);
808 static void ixl_hmc_pack(void *, const void *,
809 const struct ixl_hmc_pack *, unsigned int);
810 static uint32_t ixl_rd_rx_csr(struct ixl_softc *, uint32_t);
811 static void ixl_wr_rx_csr(struct ixl_softc *, uint32_t, uint32_t);
812 static int ixl_rd16_nvm(struct ixl_softc *, uint16_t, uint16_t *);
813
814 static int ixl_match(device_t, cfdata_t, void *);
815 static void ixl_attach(device_t, device_t, void *);
816 static int ixl_detach(device_t, int);
817
818 static void ixl_media_add(struct ixl_softc *);
819 static int ixl_media_change(struct ifnet *);
820 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
821 static void ixl_watchdog(struct ifnet *);
822 static int ixl_ioctl(struct ifnet *, u_long, void *);
823 static void ixl_start(struct ifnet *);
824 static int ixl_transmit(struct ifnet *, struct mbuf *);
825 static void ixl_deferred_transmit(void *);
826 static int ixl_intr(void *);
827 static int ixl_queue_intr(void *);
828 static int ixl_other_intr(void *);
829 static void ixl_handle_queue(void *);
830 static void ixl_sched_handle_queue(struct ixl_softc *,
831 struct ixl_queue_pair *);
832 static int ixl_init(struct ifnet *);
833 static int ixl_init_locked(struct ixl_softc *);
834 static void ixl_stop(struct ifnet *, int);
835 static void ixl_stop_locked(struct ixl_softc *);
836 static int ixl_iff(struct ixl_softc *);
837 static int ixl_ifflags_cb(struct ethercom *);
838 static int ixl_setup_interrupts(struct ixl_softc *);
839 static int ixl_establish_intx(struct ixl_softc *);
840 static int ixl_establish_msix(struct ixl_softc *);
841 static void ixl_enable_queue_intr(struct ixl_softc *,
842 struct ixl_queue_pair *);
843 static void ixl_disable_queue_intr(struct ixl_softc *,
844 struct ixl_queue_pair *);
845 static void ixl_enable_other_intr(struct ixl_softc *);
846 static void ixl_disable_other_intr(struct ixl_softc *);
847 static void ixl_config_queue_intr(struct ixl_softc *);
848 static void ixl_config_other_intr(struct ixl_softc *);
849
850 static struct ixl_tx_ring *
851 ixl_txr_alloc(struct ixl_softc *, unsigned int);
852 static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
853 static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
854 static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
855 static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
856 static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
857 static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
858 static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
859 static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *, u_int);
860
861 static struct ixl_rx_ring *
862 ixl_rxr_alloc(struct ixl_softc *, unsigned int);
863 static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
864 static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
865 static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
866 static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
867 static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
868 static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
869 static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *, u_int);
870 static int ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
871
872 static struct workqueue *
873 ixl_workq_create(const char *, pri_t, int, int);
874 static void ixl_workq_destroy(struct workqueue *);
875 static int ixl_workqs_teardown(device_t);
876 static void ixl_work_set(struct ixl_work *, void (*)(void *), void *);
877 static void ixl_work_add(struct workqueue *, struct ixl_work *);
878 static void ixl_work_wait(struct workqueue *, struct ixl_work *);
879 static void ixl_workq_work(struct work *, void *);
880 static const struct ixl_product *
881 ixl_lookup(const struct pci_attach_args *pa);
882 static void ixl_link_state_update(struct ixl_softc *,
883 const struct ixl_aq_desc *);
884 static int ixl_vlan_cb(struct ethercom *, uint16_t, bool);
885 static int ixl_setup_vlan_hwfilter(struct ixl_softc *);
886 static void ixl_teardown_vlan_hwfilter(struct ixl_softc *);
887 static int ixl_update_macvlan(struct ixl_softc *);
888 static int ixl_setup_interrupts(struct ixl_softc *);;
889 static void ixl_teardown_interrupts(struct ixl_softc *);
890 static int ixl_setup_stats(struct ixl_softc *);
891 static void ixl_teardown_stats(struct ixl_softc *);
892 static void ixl_stats_callout(void *);
893 static void ixl_stats_update(void *);
894 static int ixl_setup_sysctls(struct ixl_softc *);
895 static void ixl_teardown_sysctls(struct ixl_softc *);
896 static int ixl_queue_pairs_alloc(struct ixl_softc *);
897 static void ixl_queue_pairs_free(struct ixl_softc *);
898
899 static const struct ixl_phy_type ixl_phy_type_map[] = {
900 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII },
901 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX },
902 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 },
903 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR },
904 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 },
905 { 1ULL << IXL_PHY_TYPE_XAUI |
906 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 },
907 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI },
908 { 1ULL << IXL_PHY_TYPE_XLAUI |
909 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI },
910 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
911 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 },
912 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
913 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 },
914 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC },
915 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC },
916 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX },
917 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
918 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T },
919 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T },
920 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR },
921 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR },
922 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_TWINAX },
923 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 },
924 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 },
925 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX },
926 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX },
927 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 },
928 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR },
929 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR },
930 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR },
931 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR },
932 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC },
933 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_CR },
934 };
935
936 static const struct ixl_speed_type ixl_speed_type_map[] = {
937 { IXL_AQ_LINK_SPEED_40GB, IF_Gbps(40) },
938 { IXL_AQ_LINK_SPEED_25GB, IF_Gbps(25) },
939 { IXL_AQ_LINK_SPEED_10GB, IF_Gbps(10) },
940 { IXL_AQ_LINK_SPEED_1000MB, IF_Mbps(1000) },
941 { IXL_AQ_LINK_SPEED_100MB, IF_Mbps(100)},
942 };
943
944 static const struct ixl_aq_regs ixl_pf_aq_regs = {
945 .atq_tail = I40E_PF_ATQT,
946 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK,
947 .atq_head = I40E_PF_ATQH,
948 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK,
949 .atq_len = I40E_PF_ATQLEN,
950 .atq_bal = I40E_PF_ATQBAL,
951 .atq_bah = I40E_PF_ATQBAH,
952 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK,
953
954 .arq_tail = I40E_PF_ARQT,
955 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK,
956 .arq_head = I40E_PF_ARQH,
957 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK,
958 .arq_len = I40E_PF_ARQLEN,
959 .arq_bal = I40E_PF_ARQBAL,
960 .arq_bah = I40E_PF_ARQBAH,
961 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK,
962 };
963
964 #define ixl_rd(_s, _r) \
965 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
966 #define ixl_wr(_s, _r, _v) \
967 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
968 #define ixl_barrier(_s, _r, _l, _o) \
969 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
970 #define ixl_flush(_s) (void)ixl_rd((_s), I40E_GLGEN_STAT)
971 #define ixl_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1))
972
973 static inline uint32_t
974 ixl_dmamem_hi(struct ixl_dmamem *ixm)
975 {
976 uint32_t retval;
977 uint64_t val;
978
979 if (sizeof(IXL_DMA_DVA(ixm)) > 4) {
980 val = (intptr_t)IXL_DMA_DVA(ixm);
981 retval = (uint32_t)(val >> 32);
982 } else {
983 retval = 0;
984 }
985
986 return retval;
987 }
988
989 static inline uint32_t
990 ixl_dmamem_lo(struct ixl_dmamem *ixm)
991 {
992
993 return (uint32_t)IXL_DMA_DVA(ixm);
994 }
995
996 static inline void
997 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr)
998 {
999 uint64_t val;
1000
1001 if (sizeof(addr) > 4) {
1002 val = (intptr_t)addr;
1003 iaq->iaq_param[2] = htole32(val >> 32);
1004 } else {
1005 iaq->iaq_param[2] = htole32(0);
1006 }
1007
1008 iaq->iaq_param[3] = htole32(addr);
1009 }
1010
1011 static inline unsigned int
1012 ixl_rxr_unrefreshed(unsigned int prod, unsigned int cons, unsigned int ndescs)
1013 {
1014 unsigned int num;
1015
1016 if (prod < cons)
1017 num = cons - prod;
1018 else
1019 num = (ndescs - prod) + cons;
1020
1021 if (__predict_true(num > 0)) {
1022 /* device cannot receive packets if all descripter is filled */
1023 num -= 1;
1024 }
1025
1026 return num;
1027 }
1028
1029 CFATTACH_DECL3_NEW(ixl, sizeof(struct ixl_softc),
1030 ixl_match, ixl_attach, ixl_detach, NULL, NULL, NULL,
1031 DVF_DETACH_SHUTDOWN);
1032
1033 static const struct ixl_product ixl_products[] = {
1034 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_SFP },
1035 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_B },
1036 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_C },
1037 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_A },
1038 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_B },
1039 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_C },
1040 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T },
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
1042 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
1043 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G },
1044 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP },
1045 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28 },
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_KX },
1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_QSFP },
1048 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_SFP },
1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G_BASET },
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_BASET },
1051 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_I_SFP },
1052 /* required last entry */
1053 {0, 0}
1054 };
1055
1056 static const struct ixl_product *
1057 ixl_lookup(const struct pci_attach_args *pa)
1058 {
1059 const struct ixl_product *ixlp;
1060
1061 for (ixlp = ixl_products; ixlp->vendor_id != 0; ixlp++) {
1062 if (PCI_VENDOR(pa->pa_id) == ixlp->vendor_id &&
1063 PCI_PRODUCT(pa->pa_id) == ixlp->product_id)
1064 return ixlp;
1065 }
1066
1067 return NULL;
1068 }
1069
1070 static int
1071 ixl_match(device_t parent, cfdata_t match, void *aux)
1072 {
1073 const struct pci_attach_args *pa = aux;
1074
1075 return (ixl_lookup(pa) != NULL) ? 1 : 0;
1076 }
1077
1078 static void
1079 ixl_attach(device_t parent, device_t self, void *aux)
1080 {
1081 struct ixl_softc *sc;
1082 struct pci_attach_args *pa = aux;
1083 struct ifnet *ifp;
1084 pcireg_t memtype;
1085 uint32_t firstq, port, ari, func;
1086 char xnamebuf[32];
1087 int tries, rv, link;
1088
1089 sc = device_private(self);
1090 sc->sc_dev = self;
1091 ifp = &sc->sc_ec.ec_if;
1092
1093 sc->sc_pa = *pa;
1094 sc->sc_dmat = (pci_dma64_available(pa)) ?
1095 pa->pa_dmat64 : pa->pa_dmat;
1096 sc->sc_aq_regs = &ixl_pf_aq_regs;
1097
1098 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(pa->pa_id));
1099
1100 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG);
1101 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
1102 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) {
1103 aprint_error(": unable to map registers\n");
1104 return;
1105 }
1106
1107 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET);
1108
1109 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC);
1110 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK;
1111 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1112 sc->sc_base_queue = firstq;
1113
1114 ixl_clear_hw(sc);
1115 if (ixl_pf_reset(sc) == -1) {
1116 /* error printed by ixl pf_reset */
1117 goto unmap;
1118 }
1119
1120 port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
1121 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
1122 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
1123 sc->sc_port = port;
1124 aprint_normal(": port %u", sc->sc_port);
1125
1126 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
1127 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1128 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1129
1130 func = ixl_rd(sc, I40E_PF_FUNC_RID);
1131 sc->sc_pf_id = func & (ari ? 0xff : 0x7);
1132
1133 /* initialise the adminq */
1134
1135 mutex_init(&sc->sc_atq_lock, MUTEX_DEFAULT, IPL_NET);
1136
1137 if (ixl_dmamem_alloc(sc, &sc->sc_atq,
1138 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1139 aprint_error("\n" "%s: unable to allocate atq\n",
1140 device_xname(self));
1141 goto unmap;
1142 }
1143
1144 SIMPLEQ_INIT(&sc->sc_arq_idle);
1145 ixl_work_set(&sc->sc_arq_task, ixl_arq, sc);
1146 sc->sc_arq_cons = 0;
1147 sc->sc_arq_prod = 0;
1148
1149 if (ixl_dmamem_alloc(sc, &sc->sc_arq,
1150 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1151 aprint_error("\n" "%s: unable to allocate arq\n",
1152 device_xname(self));
1153 goto free_atq;
1154 }
1155
1156 if (!ixl_arq_fill(sc)) {
1157 aprint_error("\n" "%s: unable to fill arq descriptors\n",
1158 device_xname(self));
1159 goto free_arq;
1160 }
1161
1162 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1163 0, IXL_DMA_LEN(&sc->sc_atq),
1164 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1165
1166 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1167 0, IXL_DMA_LEN(&sc->sc_arq),
1168 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1169
1170 for (tries = 0; tries < 10; tries++) {
1171 sc->sc_atq_cons = 0;
1172 sc->sc_atq_prod = 0;
1173
1174 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1175 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1176 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1177 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1178
1179 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1180
1181 ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1182 ixl_dmamem_lo(&sc->sc_atq));
1183 ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1184 ixl_dmamem_hi(&sc->sc_atq));
1185 ixl_wr(sc, sc->sc_aq_regs->atq_len,
1186 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1187
1188 ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1189 ixl_dmamem_lo(&sc->sc_arq));
1190 ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1191 ixl_dmamem_hi(&sc->sc_arq));
1192 ixl_wr(sc, sc->sc_aq_regs->arq_len,
1193 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1194
1195 rv = ixl_get_version(sc);
1196 if (rv == 0)
1197 break;
1198 if (rv != ETIMEDOUT) {
1199 aprint_error(", unable to get firmware version\n");
1200 goto shutdown;
1201 }
1202
1203 delaymsec(100);
1204 }
1205
1206 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1207
1208 if (ixl_dmamem_alloc(sc, &sc->sc_aqbuf, IXL_AQ_BUFLEN, 0) != 0) {
1209 aprint_error_dev(self, ", unable to allocate nvm buffer\n");
1210 goto shutdown;
1211 }
1212
1213 ixl_get_nvm_version(sc);
1214
1215 if (sc->sc_mac_type == I40E_MAC_X722)
1216 sc->sc_nqueue_pairs_device = 128;
1217 else
1218 sc->sc_nqueue_pairs_device = 64;
1219
1220 rv = ixl_get_hw_capabilities(sc);
1221 if (rv != 0) {
1222 aprint_error(", GET HW CAPABILITIES %s\n",
1223 rv == ETIMEDOUT ? "timeout" : "error");
1224 goto free_aqbuf;
1225 }
1226
1227 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_device, ncpu);
1228 if (ixl_param_nqps_limit > 0) {
1229 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_max,
1230 ixl_param_nqps_limit);
1231 }
1232
1233 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
1234 sc->sc_tx_ring_ndescs = ixl_param_tx_ndescs;
1235 sc->sc_rx_ring_ndescs = ixl_param_rx_ndescs;
1236
1237 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_rx_ring_ndescs);
1238 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_tx_ring_ndescs);
1239
1240 if (ixl_get_mac(sc) != 0) {
1241 /* error printed by ixl_get_mac */
1242 goto free_aqbuf;
1243 }
1244
1245 aprint_normal("\n");
1246 aprint_naive("\n");
1247
1248 aprint_normal_dev(self, "Ethernet address %s\n",
1249 ether_sprintf(sc->sc_enaddr));
1250
1251 rv = ixl_pxe_clear(sc);
1252 if (rv != 0) {
1253 aprint_debug_dev(self, "CLEAR PXE MODE %s\n",
1254 rv == ETIMEDOUT ? "timeout" : "error");
1255 }
1256
1257 ixl_set_filter_control(sc);
1258
1259 if (ixl_hmc(sc) != 0) {
1260 /* error printed by ixl_hmc */
1261 goto free_aqbuf;
1262 }
1263
1264 if (ixl_lldp_shut(sc) != 0) {
1265 /* error printed by ixl_lldp_shut */
1266 goto free_hmc;
1267 }
1268
1269 if (ixl_phy_mask_ints(sc) != 0) {
1270 /* error printed by ixl_phy_mask_ints */
1271 goto free_hmc;
1272 }
1273
1274 if (ixl_restart_an(sc) != 0) {
1275 /* error printed by ixl_restart_an */
1276 goto free_hmc;
1277 }
1278
1279 if (ixl_get_switch_config(sc) != 0) {
1280 /* error printed by ixl_get_switch_config */
1281 goto free_hmc;
1282 }
1283
1284 rv = ixl_get_link_status_poll(sc, NULL);
1285 if (rv != 0) {
1286 aprint_error_dev(self, "GET LINK STATUS %s\n",
1287 rv == ETIMEDOUT ? "timeout" : "error");
1288 goto free_hmc;
1289 }
1290
1291 /*
1292 * The FW often returns EIO in "Get PHY Abilities" command
1293 * if there is no delay
1294 */
1295 DELAY(500);
1296 if (ixl_get_phy_info(sc) != 0) {
1297 /* error printed by ixl_get_phy_info */
1298 goto free_hmc;
1299 }
1300
1301 if (ixl_dmamem_alloc(sc, &sc->sc_scratch,
1302 sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1303 aprint_error_dev(self, "unable to allocate scratch buffer\n");
1304 goto free_hmc;
1305 }
1306
1307 rv = ixl_get_vsi(sc);
1308 if (rv != 0) {
1309 aprint_error_dev(self, "GET VSI %s %d\n",
1310 rv == ETIMEDOUT ? "timeout" : "error", rv);
1311 goto free_scratch;
1312 }
1313
1314 rv = ixl_set_vsi(sc);
1315 if (rv != 0) {
1316 aprint_error_dev(self, "UPDATE VSI error %s %d\n",
1317 rv == ETIMEDOUT ? "timeout" : "error", rv);
1318 goto free_scratch;
1319 }
1320
1321 if (ixl_queue_pairs_alloc(sc) != 0) {
1322 /* error printed by ixl_queue_pairs_alloc */
1323 goto free_scratch;
1324 }
1325
1326 if (ixl_setup_interrupts(sc) != 0) {
1327 /* error printed by ixl_setup_interrupts */
1328 goto free_queue_pairs;
1329 }
1330
1331 if (ixl_setup_stats(sc) != 0) {
1332 aprint_error_dev(self, "failed to setup event counters\n");
1333 goto teardown_intrs;
1334 }
1335
1336 if (ixl_setup_sysctls(sc) != 0) {
1337 /* error printed by ixl_setup_sysctls */
1338 goto teardown_stats;
1339 }
1340
1341 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self));
1342 sc->sc_workq = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1343 IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1344 if (sc->sc_workq == NULL)
1345 goto teardown_sysctls;
1346
1347 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self));
1348 sc->sc_workq_txrx = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1349 IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1350 if (sc->sc_workq_txrx == NULL)
1351 goto teardown_wqs;
1352
1353 snprintf(xnamebuf, sizeof(xnamebuf), "%s_atq_cv", device_xname(self));
1354 cv_init(&sc->sc_atq_cv, xnamebuf);
1355
1356 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
1357
1358 ifp->if_softc = sc;
1359 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1360 ifp->if_extflags = IFEF_MPSAFE;
1361 ifp->if_ioctl = ixl_ioctl;
1362 ifp->if_start = ixl_start;
1363 ifp->if_transmit = ixl_transmit;
1364 ifp->if_watchdog = ixl_watchdog;
1365 ifp->if_init = ixl_init;
1366 ifp->if_stop = ixl_stop;
1367 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs);
1368 IFQ_SET_READY(&ifp->if_snd);
1369 ifp->if_capabilities |= IXL_IFCAP_RXCSUM;
1370 ifp->if_capabilities |= IXL_IFCAP_TXCSUM;
1371 #if 0
1372 ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
1373 #endif
1374 ether_set_vlan_cb(&sc->sc_ec, ixl_vlan_cb);
1375 sc->sc_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1376 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
1377 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1378
1379 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities;
1380 /* Disable VLAN_HWFILTER by default */
1381 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1382
1383 sc->sc_cur_ec_capenable = sc->sc_ec.ec_capenable;
1384
1385 sc->sc_ec.ec_ifmedia = &sc->sc_media;
1386 ifmedia_init(&sc->sc_media, IFM_IMASK, ixl_media_change,
1387 ixl_media_status);
1388
1389 ixl_media_add(sc);
1390 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1391 if (ISSET(sc->sc_phy_abilities,
1392 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) {
1393 ifmedia_add(&sc->sc_media,
1394 IFM_ETHER | IFM_AUTO | IFM_FLOW, 0, NULL);
1395 }
1396 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_NONE, 0, NULL);
1397 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1398
1399 if_attach(ifp);
1400 if_deferred_start_init(ifp, NULL);
1401 ether_ifattach(ifp, sc->sc_enaddr);
1402 ether_set_ifflags_cb(&sc->sc_ec, ixl_ifflags_cb);
1403
1404 rv = ixl_get_link_status_poll(sc, &link);
1405 if (rv != 0)
1406 link = LINK_STATE_UNKNOWN;
1407 if_link_state_change(ifp, link);
1408
1409 ixl_work_set(&sc->sc_link_state_task, ixl_get_link_status, sc);
1410
1411 ixl_config_other_intr(sc);
1412 ixl_enable_other_intr(sc);
1413
1414 ixl_set_phy_autoselect(sc);
1415
1416 /* remove default mac filter and replace it so we can see vlans */
1417 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 0);
1418 if (rv != ENOENT) {
1419 aprint_debug_dev(self,
1420 "unable to remove macvlan %u\n", rv);
1421 }
1422 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
1423 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1424 if (rv != ENOENT) {
1425 aprint_debug_dev(self,
1426 "unable to remove macvlan, ignore vlan %u\n", rv);
1427 }
1428
1429 if (ixl_update_macvlan(sc) != 0) {
1430 aprint_debug_dev(self,
1431 "couldn't enable vlan hardware filter\n");
1432 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1433 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
1434 }
1435
1436 sc->sc_txrx_workqueue = true;
1437 sc->sc_tx_process_limit = IXL_TX_PROCESS_LIMIT;
1438 sc->sc_rx_process_limit = IXL_RX_PROCESS_LIMIT;
1439 sc->sc_tx_intr_process_limit = IXL_TX_INTR_PROCESS_LIMIT;
1440 sc->sc_rx_intr_process_limit = IXL_RX_INTR_PROCESS_LIMIT;
1441
1442 ixl_stats_update(sc);
1443 sc->sc_stats_counters.isc_has_offset = true;
1444 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
1445
1446 if (pmf_device_register(self, NULL, NULL) != true)
1447 aprint_debug_dev(self, "couldn't establish power handler\n");
1448 sc->sc_attached = true;
1449 return;
1450
1451 teardown_wqs:
1452 config_finalize_register(self, ixl_workqs_teardown);
1453 teardown_sysctls:
1454 ixl_teardown_sysctls(sc);
1455 teardown_stats:
1456 ixl_teardown_stats(sc);
1457 teardown_intrs:
1458 ixl_teardown_interrupts(sc);
1459 free_queue_pairs:
1460 ixl_queue_pairs_free(sc);
1461 free_scratch:
1462 ixl_dmamem_free(sc, &sc->sc_scratch);
1463 free_hmc:
1464 ixl_hmc_free(sc);
1465 free_aqbuf:
1466 ixl_dmamem_free(sc, &sc->sc_aqbuf);
1467 shutdown:
1468 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1469 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1470 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1471 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1472
1473 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1474 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1475 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1476
1477 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1478 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1479 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1480
1481 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1482 0, IXL_DMA_LEN(&sc->sc_arq),
1483 BUS_DMASYNC_POSTREAD);
1484 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1485 0, IXL_DMA_LEN(&sc->sc_atq),
1486 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1487
1488 ixl_arq_unfill(sc);
1489 free_arq:
1490 ixl_dmamem_free(sc, &sc->sc_arq);
1491 free_atq:
1492 ixl_dmamem_free(sc, &sc->sc_atq);
1493 unmap:
1494 mutex_destroy(&sc->sc_atq_lock);
1495 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1496 mutex_destroy(&sc->sc_cfg_lock);
1497 sc->sc_mems = 0;
1498
1499 sc->sc_attached = false;
1500 }
1501
1502 static int
1503 ixl_detach(device_t self, int flags)
1504 {
1505 struct ixl_softc *sc = device_private(self);
1506 struct ifnet *ifp = &sc->sc_ec.ec_if;
1507
1508 if (!sc->sc_attached)
1509 return 0;
1510
1511 ixl_stop(ifp, 1);
1512
1513 ixl_disable_other_intr(sc);
1514
1515 callout_stop(&sc->sc_stats_callout);
1516 ixl_work_wait(sc->sc_workq, &sc->sc_stats_task);
1517
1518 /* wait for ATQ handler */
1519 mutex_enter(&sc->sc_atq_lock);
1520 mutex_exit(&sc->sc_atq_lock);
1521
1522 ixl_work_wait(sc->sc_workq, &sc->sc_arq_task);
1523 ixl_work_wait(sc->sc_workq, &sc->sc_link_state_task);
1524
1525 if (sc->sc_workq != NULL) {
1526 ixl_workq_destroy(sc->sc_workq);
1527 sc->sc_workq = NULL;
1528 }
1529
1530 if (sc->sc_workq_txrx != NULL) {
1531 ixl_workq_destroy(sc->sc_workq_txrx);
1532 sc->sc_workq_txrx = NULL;
1533 }
1534
1535 ether_ifdetach(ifp);
1536 if_detach(ifp);
1537 ifmedia_fini(&sc->sc_media);
1538
1539 ixl_teardown_interrupts(sc);
1540 ixl_teardown_stats(sc);
1541 ixl_teardown_sysctls(sc);
1542
1543 ixl_queue_pairs_free(sc);
1544
1545 ixl_dmamem_free(sc, &sc->sc_scratch);
1546 ixl_hmc_free(sc);
1547
1548 /* shutdown */
1549 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1550 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1551 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1552 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1553
1554 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1555 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1556 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1557
1558 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1559 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1560 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1561
1562 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1563 0, IXL_DMA_LEN(&sc->sc_arq),
1564 BUS_DMASYNC_POSTREAD);
1565 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1566 0, IXL_DMA_LEN(&sc->sc_atq),
1567 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1568
1569 ixl_arq_unfill(sc);
1570
1571 ixl_dmamem_free(sc, &sc->sc_arq);
1572 ixl_dmamem_free(sc, &sc->sc_atq);
1573 ixl_dmamem_free(sc, &sc->sc_aqbuf);
1574
1575 cv_destroy(&sc->sc_atq_cv);
1576 mutex_destroy(&sc->sc_atq_lock);
1577
1578 if (sc->sc_mems != 0) {
1579 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1580 sc->sc_mems = 0;
1581 }
1582
1583 mutex_destroy(&sc->sc_cfg_lock);
1584
1585 return 0;
1586 }
1587
1588 static int
1589 ixl_workqs_teardown(device_t self)
1590 {
1591 struct ixl_softc *sc = device_private(self);
1592
1593 if (sc->sc_workq != NULL) {
1594 ixl_workq_destroy(sc->sc_workq);
1595 sc->sc_workq = NULL;
1596 }
1597
1598 if (sc->sc_workq_txrx != NULL) {
1599 ixl_workq_destroy(sc->sc_workq_txrx);
1600 sc->sc_workq_txrx = NULL;
1601 }
1602
1603 return 0;
1604 }
1605
1606 static int
1607 ixl_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
1608 {
1609 struct ifnet *ifp = &ec->ec_if;
1610 struct ixl_softc *sc = ifp->if_softc;
1611 int rv;
1612
1613 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
1614 return 0;
1615 }
1616
1617 if (set) {
1618 rv = ixl_add_macvlan(sc, sc->sc_enaddr, vid,
1619 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1620 if (rv == 0) {
1621 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
1622 vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1623 }
1624 } else {
1625 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, vid,
1626 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1627 (void)ixl_remove_macvlan(sc, etherbroadcastaddr, vid,
1628 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1629 }
1630
1631 return rv;
1632 }
1633
1634 static void
1635 ixl_media_add(struct ixl_softc *sc)
1636 {
1637 struct ifmedia *ifm = &sc->sc_media;
1638 const struct ixl_phy_type *itype;
1639 unsigned int i;
1640 bool flow;
1641
1642 if (ISSET(sc->sc_phy_abilities,
1643 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) {
1644 flow = true;
1645 } else {
1646 flow = false;
1647 }
1648
1649 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
1650 itype = &ixl_phy_type_map[i];
1651
1652 if (ISSET(sc->sc_phy_types, itype->phy_type)) {
1653 ifmedia_add(ifm,
1654 IFM_ETHER | IFM_FDX | itype->ifm_type, 0, NULL);
1655
1656 if (flow) {
1657 ifmedia_add(ifm,
1658 IFM_ETHER | IFM_FDX | IFM_FLOW |
1659 itype->ifm_type, 0, NULL);
1660 }
1661
1662 if (itype->ifm_type != IFM_100_TX)
1663 continue;
1664
1665 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type,
1666 0, NULL);
1667 if (flow) {
1668 ifmedia_add(ifm,
1669 IFM_ETHER | IFM_FLOW | itype->ifm_type,
1670 0, NULL);
1671 }
1672 }
1673 }
1674 }
1675
1676 static void
1677 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1678 {
1679 struct ixl_softc *sc = ifp->if_softc;
1680
1681 ifmr->ifm_status = sc->sc_media_status;
1682 ifmr->ifm_active = sc->sc_media_active;
1683
1684 mutex_enter(&sc->sc_cfg_lock);
1685 if (ifp->if_link_state == LINK_STATE_UP)
1686 SET(ifmr->ifm_status, IFM_ACTIVE);
1687 mutex_exit(&sc->sc_cfg_lock);
1688 }
1689
1690 static int
1691 ixl_media_change(struct ifnet *ifp)
1692 {
1693 struct ixl_softc *sc = ifp->if_softc;
1694 struct ifmedia *ifm = &sc->sc_media;
1695 uint64_t ifm_active = sc->sc_media_active;
1696 uint8_t link_speed, abilities;
1697
1698 switch (IFM_SUBTYPE(ifm_active)) {
1699 case IFM_1000_SGMII:
1700 case IFM_1000_KX:
1701 case IFM_10G_KX4:
1702 case IFM_10G_KR:
1703 case IFM_40G_KR4:
1704 case IFM_20G_KR2:
1705 case IFM_25G_KR:
1706 /* backplanes */
1707 return EINVAL;
1708 }
1709
1710 abilities = IXL_PHY_ABILITY_AUTONEGO | IXL_PHY_ABILITY_LINKUP;
1711
1712 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1713 case IFM_AUTO:
1714 link_speed = sc->sc_phy_linkspeed;
1715 break;
1716 case IFM_NONE:
1717 link_speed = 0;
1718 CLR(abilities, IXL_PHY_ABILITY_LINKUP);
1719 break;
1720 default:
1721 link_speed = ixl_search_baudrate(
1722 ifmedia_baudrate(ifm->ifm_media));
1723 }
1724
1725 if (ISSET(abilities, IXL_PHY_ABILITY_LINKUP)) {
1726 if (ISSET(link_speed, sc->sc_phy_linkspeed) == 0)
1727 return EINVAL;
1728 }
1729
1730 if (ifm->ifm_media & IFM_FLOW) {
1731 abilities |= sc->sc_phy_abilities &
1732 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX);
1733 }
1734
1735 return ixl_set_phy_config(sc, link_speed, abilities, false);
1736 }
1737
1738 static void
1739 ixl_watchdog(struct ifnet *ifp)
1740 {
1741
1742 }
1743
1744 static void
1745 ixl_del_all_multiaddr(struct ixl_softc *sc)
1746 {
1747 struct ethercom *ec = &sc->sc_ec;
1748 struct ether_multi *enm;
1749 struct ether_multistep step;
1750
1751 ETHER_LOCK(ec);
1752 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1753 ETHER_NEXT_MULTI(step, enm)) {
1754 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1755 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1756 }
1757 ETHER_UNLOCK(ec);
1758 }
1759
1760 static int
1761 ixl_add_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1762 {
1763 struct ifnet *ifp = &sc->sc_ec.ec_if;
1764 int rv;
1765
1766 if (ISSET(ifp->if_flags, IFF_ALLMULTI))
1767 return 0;
1768
1769 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) {
1770 ixl_del_all_multiaddr(sc);
1771 SET(ifp->if_flags, IFF_ALLMULTI);
1772 return ENETRESET;
1773 }
1774
1775 /* multicast address can not use VLAN HWFILTER */
1776 rv = ixl_add_macvlan(sc, addrlo, 0,
1777 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1778
1779 if (rv == ENOSPC) {
1780 ixl_del_all_multiaddr(sc);
1781 SET(ifp->if_flags, IFF_ALLMULTI);
1782 return ENETRESET;
1783 }
1784
1785 return rv;
1786 }
1787
1788 static int
1789 ixl_del_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1790 {
1791 struct ifnet *ifp = &sc->sc_ec.ec_if;
1792 struct ethercom *ec = &sc->sc_ec;
1793 struct ether_multi *enm, *enm_last;
1794 struct ether_multistep step;
1795 int error, rv = 0;
1796
1797 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1798 ixl_remove_macvlan(sc, addrlo, 0,
1799 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1800 return 0;
1801 }
1802
1803 ETHER_LOCK(ec);
1804 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1805 ETHER_NEXT_MULTI(step, enm)) {
1806 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1807 ETHER_ADDR_LEN) != 0) {
1808 goto out;
1809 }
1810 }
1811
1812 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1813 ETHER_NEXT_MULTI(step, enm)) {
1814 error = ixl_add_macvlan(sc, enm->enm_addrlo, 0,
1815 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1816 if (error != 0)
1817 break;
1818 }
1819
1820 if (enm != NULL) {
1821 enm_last = enm;
1822 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1823 ETHER_NEXT_MULTI(step, enm)) {
1824 if (enm == enm_last)
1825 break;
1826
1827 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1828 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1829 }
1830 } else {
1831 CLR(ifp->if_flags, IFF_ALLMULTI);
1832 rv = ENETRESET;
1833 }
1834
1835 out:
1836 ETHER_UNLOCK(ec);
1837 return rv;
1838 }
1839
1840 static int
1841 ixl_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1842 {
1843 struct ifreq *ifr = (struct ifreq *)data;
1844 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
1845 const struct sockaddr *sa;
1846 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1847 int s, error = 0;
1848 unsigned int nmtu;
1849
1850 switch (cmd) {
1851 case SIOCSIFMTU:
1852 nmtu = ifr->ifr_mtu;
1853
1854 if (nmtu < IXL_MIN_MTU || nmtu > IXL_MAX_MTU) {
1855 error = EINVAL;
1856 break;
1857 }
1858 if (ifp->if_mtu != nmtu) {
1859 s = splnet();
1860 error = ether_ioctl(ifp, cmd, data);
1861 splx(s);
1862 if (error == ENETRESET)
1863 error = ixl_init(ifp);
1864 }
1865 break;
1866 case SIOCADDMULTI:
1867 sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1868 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) {
1869 error = ether_multiaddr(sa, addrlo, addrhi);
1870 if (error != 0)
1871 return error;
1872
1873 error = ixl_add_multi(sc, addrlo, addrhi);
1874 if (error != 0 && error != ENETRESET) {
1875 ether_delmulti(sa, &sc->sc_ec);
1876 error = EIO;
1877 }
1878 }
1879 break;
1880
1881 case SIOCDELMULTI:
1882 sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1883 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) {
1884 error = ether_multiaddr(sa, addrlo, addrhi);
1885 if (error != 0)
1886 return error;
1887
1888 error = ixl_del_multi(sc, addrlo, addrhi);
1889 }
1890 break;
1891
1892 default:
1893 s = splnet();
1894 error = ether_ioctl(ifp, cmd, data);
1895 splx(s);
1896 }
1897
1898 if (error == ENETRESET)
1899 error = ixl_iff(sc);
1900
1901 return error;
1902 }
1903
1904 static enum i40e_mac_type
1905 ixl_mactype(pci_product_id_t id)
1906 {
1907
1908 switch (id) {
1909 case PCI_PRODUCT_INTEL_XL710_SFP:
1910 case PCI_PRODUCT_INTEL_XL710_KX_B:
1911 case PCI_PRODUCT_INTEL_XL710_KX_C:
1912 case PCI_PRODUCT_INTEL_XL710_QSFP_A:
1913 case PCI_PRODUCT_INTEL_XL710_QSFP_B:
1914 case PCI_PRODUCT_INTEL_XL710_QSFP_C:
1915 case PCI_PRODUCT_INTEL_X710_10G_T:
1916 case PCI_PRODUCT_INTEL_XL710_20G_BP_1:
1917 case PCI_PRODUCT_INTEL_XL710_20G_BP_2:
1918 case PCI_PRODUCT_INTEL_X710_T4_10G:
1919 case PCI_PRODUCT_INTEL_XXV710_25G_BP:
1920 case PCI_PRODUCT_INTEL_XXV710_25G_SFP28:
1921 return I40E_MAC_XL710;
1922
1923 case PCI_PRODUCT_INTEL_X722_KX:
1924 case PCI_PRODUCT_INTEL_X722_QSFP:
1925 case PCI_PRODUCT_INTEL_X722_SFP:
1926 case PCI_PRODUCT_INTEL_X722_1G_BASET:
1927 case PCI_PRODUCT_INTEL_X722_10G_BASET:
1928 case PCI_PRODUCT_INTEL_X722_I_SFP:
1929 return I40E_MAC_X722;
1930 }
1931
1932 return I40E_MAC_GENERIC;
1933 }
1934
1935 static inline void *
1936 ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i)
1937 {
1938 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
1939 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1940
1941 if (i >= e->hmc_count)
1942 return NULL;
1943
1944 kva += e->hmc_base;
1945 kva += i * e->hmc_size;
1946
1947 return kva;
1948 }
1949
1950 static inline size_t
1951 ixl_hmc_len(struct ixl_softc *sc, enum ixl_hmc_types type)
1952 {
1953 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1954
1955 return e->hmc_size;
1956 }
1957
1958 static void
1959 ixl_enable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1960 {
1961 struct ixl_rx_ring *rxr = qp->qp_rxr;
1962
1963 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1964 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1965 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1966 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1967 ixl_flush(sc);
1968 }
1969
1970 static void
1971 ixl_disable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1972 {
1973 struct ixl_rx_ring *rxr = qp->qp_rxr;
1974
1975 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1976 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1977 ixl_flush(sc);
1978 }
1979
1980 static void
1981 ixl_enable_other_intr(struct ixl_softc *sc)
1982 {
1983
1984 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1985 I40E_PFINT_DYN_CTL0_INTENA_MASK |
1986 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1987 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1988 ixl_flush(sc);
1989 }
1990
1991 static void
1992 ixl_disable_other_intr(struct ixl_softc *sc)
1993 {
1994
1995 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1996 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1997 ixl_flush(sc);
1998 }
1999
2000 static int
2001 ixl_reinit(struct ixl_softc *sc)
2002 {
2003 struct ixl_rx_ring *rxr;
2004 struct ixl_tx_ring *txr;
2005 unsigned int i;
2006 uint32_t reg;
2007
2008 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2009
2010 if (ixl_get_vsi(sc) != 0)
2011 return EIO;
2012
2013 if (ixl_set_vsi(sc) != 0)
2014 return EIO;
2015
2016 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2017 txr = sc->sc_qps[i].qp_txr;
2018 rxr = sc->sc_qps[i].qp_rxr;
2019
2020 txr->txr_cons = txr->txr_prod = 0;
2021 rxr->rxr_cons = rxr->rxr_prod = 0;
2022
2023 ixl_txr_config(sc, txr);
2024 ixl_rxr_config(sc, rxr);
2025 }
2026
2027 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2028 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_PREWRITE);
2029
2030 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2031 txr = sc->sc_qps[i].qp_txr;
2032 rxr = sc->sc_qps[i].qp_rxr;
2033
2034 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
2035 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
2036 ixl_flush(sc);
2037
2038 ixl_wr(sc, txr->txr_tail, txr->txr_prod);
2039 ixl_wr(sc, rxr->rxr_tail, rxr->rxr_prod);
2040
2041 /* ixl_rxfill() needs lock held */
2042 mutex_enter(&rxr->rxr_lock);
2043 ixl_rxfill(sc, rxr);
2044 mutex_exit(&rxr->rxr_lock);
2045
2046 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2047 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2048 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2049 if (ixl_rxr_enabled(sc, rxr) != 0)
2050 goto stop;
2051
2052 ixl_txr_qdis(sc, txr, 1);
2053
2054 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2055 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2056 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2057
2058 if (ixl_txr_enabled(sc, txr) != 0)
2059 goto stop;
2060 }
2061
2062 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2063 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
2064
2065 return 0;
2066
2067 stop:
2068 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2069 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
2070
2071 return ETIMEDOUT;
2072 }
2073
2074 static int
2075 ixl_init_locked(struct ixl_softc *sc)
2076 {
2077 struct ifnet *ifp = &sc->sc_ec.ec_if;
2078 unsigned int i;
2079 int error, eccap_change;
2080
2081 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2082
2083 if (ISSET(ifp->if_flags, IFF_RUNNING))
2084 ixl_stop_locked(sc);
2085
2086 if (sc->sc_dead) {
2087 return ENXIO;
2088 }
2089
2090 eccap_change = sc->sc_ec.ec_capenable ^ sc->sc_cur_ec_capenable;
2091 if (ISSET(eccap_change, ETHERCAP_VLAN_HWTAGGING))
2092 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
2093
2094 if (ISSET(eccap_change, ETHERCAP_VLAN_HWFILTER)) {
2095 if (ixl_update_macvlan(sc) == 0) {
2096 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
2097 } else {
2098 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
2099 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
2100 }
2101 }
2102
2103 if (sc->sc_intrtype != PCI_INTR_TYPE_MSIX)
2104 sc->sc_nqueue_pairs = 1;
2105 else
2106 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
2107
2108 error = ixl_reinit(sc);
2109 if (error) {
2110 ixl_stop_locked(sc);
2111 return error;
2112 }
2113
2114 SET(ifp->if_flags, IFF_RUNNING);
2115 CLR(ifp->if_flags, IFF_OACTIVE);
2116
2117 (void)ixl_get_link_status(sc);
2118
2119 ixl_config_rss(sc);
2120 ixl_config_queue_intr(sc);
2121
2122 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2123 ixl_enable_queue_intr(sc, &sc->sc_qps[i]);
2124 }
2125
2126 error = ixl_iff(sc);
2127 if (error) {
2128 ixl_stop_locked(sc);
2129 return error;
2130 }
2131
2132 return 0;
2133 }
2134
2135 static int
2136 ixl_init(struct ifnet *ifp)
2137 {
2138 struct ixl_softc *sc = ifp->if_softc;
2139 int error;
2140
2141 mutex_enter(&sc->sc_cfg_lock);
2142 error = ixl_init_locked(sc);
2143 mutex_exit(&sc->sc_cfg_lock);
2144
2145 return error;
2146 }
2147
2148 static int
2149 ixl_iff(struct ixl_softc *sc)
2150 {
2151 struct ifnet *ifp = &sc->sc_ec.ec_if;
2152 struct ixl_atq iatq;
2153 struct ixl_aq_desc *iaq;
2154 struct ixl_aq_vsi_promisc_param *param;
2155 uint16_t flag_add, flag_del;
2156 int error;
2157
2158 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2159 return 0;
2160
2161 memset(&iatq, 0, sizeof(iatq));
2162
2163 iaq = &iatq.iatq_desc;
2164 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
2165
2166 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
2167 param->flags = htole16(0);
2168
2169 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)
2170 || ISSET(ifp->if_flags, IFF_PROMISC)) {
2171 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2172 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2173 }
2174
2175 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
2176 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2177 IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2178 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
2179 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2180 }
2181 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2182 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2183 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2184 param->seid = sc->sc_seid;
2185
2186 error = ixl_atq_exec(sc, &iatq);
2187 if (error)
2188 return error;
2189
2190 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
2191 return EIO;
2192
2193 if (memcmp(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN) != 0) {
2194 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
2195 flag_add = IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH;
2196 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH;
2197 } else {
2198 flag_add = IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN;
2199 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN;
2200 }
2201
2202 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, flag_del);
2203
2204 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
2205 ixl_add_macvlan(sc, sc->sc_enaddr, 0, flag_add);
2206 }
2207 return 0;
2208 }
2209
2210 static void
2211 ixl_stop_rendezvous(struct ixl_softc *sc)
2212 {
2213 struct ixl_tx_ring *txr;
2214 struct ixl_rx_ring *rxr;
2215 unsigned int i;
2216
2217 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2218 txr = sc->sc_qps[i].qp_txr;
2219 rxr = sc->sc_qps[i].qp_rxr;
2220
2221 mutex_enter(&txr->txr_lock);
2222 mutex_exit(&txr->txr_lock);
2223
2224 mutex_enter(&rxr->rxr_lock);
2225 mutex_exit(&rxr->rxr_lock);
2226
2227 ixl_work_wait(sc->sc_workq_txrx,
2228 &sc->sc_qps[i].qp_task);
2229 }
2230 }
2231
2232 static void
2233 ixl_stop_locked(struct ixl_softc *sc)
2234 {
2235 struct ifnet *ifp = &sc->sc_ec.ec_if;
2236 struct ixl_rx_ring *rxr;
2237 struct ixl_tx_ring *txr;
2238 unsigned int i;
2239 uint32_t reg;
2240
2241 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2242
2243 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
2244
2245 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2246 txr = sc->sc_qps[i].qp_txr;
2247 rxr = sc->sc_qps[i].qp_rxr;
2248
2249 ixl_disable_queue_intr(sc, &sc->sc_qps[i]);
2250
2251 mutex_enter(&txr->txr_lock);
2252 ixl_txr_qdis(sc, txr, 0);
2253 /* XXX wait at least 400 usec for all tx queues in one go */
2254 ixl_flush(sc);
2255 DELAY(500);
2256
2257 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2258 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2259 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2260 /* XXX wait 50ms from completaion of the TX queue disable*/
2261 ixl_flush(sc);
2262 DELAY(50);
2263
2264 if (ixl_txr_disabled(sc, txr) != 0) {
2265 mutex_exit(&txr->txr_lock);
2266 goto die;
2267 }
2268 mutex_exit(&txr->txr_lock);
2269
2270 mutex_enter(&rxr->rxr_lock);
2271 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2272 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2273 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2274 /* XXX wait 50ms from completion of the RX queue disable */
2275 ixl_flush(sc);
2276 DELAY(50);
2277
2278 if (ixl_rxr_disabled(sc, rxr) != 0) {
2279 mutex_exit(&rxr->rxr_lock);
2280 goto die;
2281 }
2282 mutex_exit(&rxr->rxr_lock);
2283 }
2284
2285 ixl_stop_rendezvous(sc);
2286
2287 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2288 txr = sc->sc_qps[i].qp_txr;
2289 rxr = sc->sc_qps[i].qp_rxr;
2290
2291 ixl_txr_unconfig(sc, txr);
2292 ixl_rxr_unconfig(sc, rxr);
2293
2294 ixl_txr_clean(sc, txr);
2295 ixl_rxr_clean(sc, rxr);
2296 }
2297
2298 return;
2299 die:
2300 sc->sc_dead = true;
2301 log(LOG_CRIT, "%s: failed to shut down rings",
2302 device_xname(sc->sc_dev));
2303 return;
2304 }
2305
2306 static void
2307 ixl_stop(struct ifnet *ifp, int disable)
2308 {
2309 struct ixl_softc *sc = ifp->if_softc;
2310
2311 mutex_enter(&sc->sc_cfg_lock);
2312 ixl_stop_locked(sc);
2313 mutex_exit(&sc->sc_cfg_lock);
2314 }
2315
2316 static int
2317 ixl_queue_pairs_alloc(struct ixl_softc *sc)
2318 {
2319 struct ixl_queue_pair *qp;
2320 unsigned int i;
2321 size_t sz;
2322
2323 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2324 sc->sc_qps = kmem_zalloc(sz, KM_SLEEP);
2325
2326 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2327 qp = &sc->sc_qps[i];
2328
2329 qp->qp_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2330 ixl_handle_queue, qp);
2331 if (qp->qp_si == NULL)
2332 goto free;
2333
2334 qp->qp_txr = ixl_txr_alloc(sc, i);
2335 if (qp->qp_txr == NULL)
2336 goto free;
2337
2338 qp->qp_rxr = ixl_rxr_alloc(sc, i);
2339 if (qp->qp_rxr == NULL)
2340 goto free;
2341
2342 qp->qp_sc = sc;
2343 ixl_work_set(&qp->qp_task, ixl_handle_queue, qp);
2344 snprintf(qp->qp_name, sizeof(qp->qp_name),
2345 "%s-TXRX%d", device_xname(sc->sc_dev), i);
2346 }
2347
2348 return 0;
2349 free:
2350 if (sc->sc_qps != NULL) {
2351 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2352 qp = &sc->sc_qps[i];
2353
2354 if (qp->qp_txr != NULL)
2355 ixl_txr_free(sc, qp->qp_txr);
2356 if (qp->qp_rxr != NULL)
2357 ixl_rxr_free(sc, qp->qp_rxr);
2358 if (qp->qp_si != NULL)
2359 softint_disestablish(qp->qp_si);
2360 }
2361
2362 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2363 kmem_free(sc->sc_qps, sz);
2364 sc->sc_qps = NULL;
2365 }
2366
2367 return -1;
2368 }
2369
2370 static void
2371 ixl_queue_pairs_free(struct ixl_softc *sc)
2372 {
2373 struct ixl_queue_pair *qp;
2374 unsigned int i;
2375 size_t sz;
2376
2377 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2378 qp = &sc->sc_qps[i];
2379 ixl_txr_free(sc, qp->qp_txr);
2380 ixl_rxr_free(sc, qp->qp_rxr);
2381 softint_disestablish(qp->qp_si);
2382 }
2383
2384 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2385 kmem_free(sc->sc_qps, sz);
2386 sc->sc_qps = NULL;
2387 }
2388
2389 static struct ixl_tx_ring *
2390 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
2391 {
2392 struct ixl_tx_ring *txr = NULL;
2393 struct ixl_tx_map *maps = NULL, *txm;
2394 unsigned int i;
2395
2396 txr = kmem_zalloc(sizeof(*txr), KM_SLEEP);
2397 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs,
2398 KM_SLEEP);
2399
2400 if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2401 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2402 IXL_TX_QUEUE_ALIGN) != 0)
2403 goto free;
2404
2405 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2406 txm = &maps[i];
2407
2408 if (bus_dmamap_create(sc->sc_dmat, IXL_TX_PKT_MAXSIZE,
2409 IXL_TX_PKT_DESCS, IXL_TX_PKT_MAXSIZE, 0,
2410 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &txm->txm_map) != 0)
2411 goto uncreate;
2412
2413 txm->txm_eop = -1;
2414 txm->txm_m = NULL;
2415 }
2416
2417 txr->txr_cons = txr->txr_prod = 0;
2418 txr->txr_maps = maps;
2419
2420 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2421 if (txr->txr_intrq == NULL)
2422 goto uncreate;
2423
2424 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2425 ixl_deferred_transmit, txr);
2426 if (txr->txr_si == NULL)
2427 goto destroy_pcq;
2428
2429 txr->txr_tail = I40E_QTX_TAIL(qid);
2430 txr->txr_qid = qid;
2431 txr->txr_sc = sc;
2432 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2433
2434 return txr;
2435
2436 destroy_pcq:
2437 pcq_destroy(txr->txr_intrq);
2438 uncreate:
2439 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2440 txm = &maps[i];
2441
2442 if (txm->txm_map == NULL)
2443 continue;
2444
2445 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2446 }
2447
2448 ixl_dmamem_free(sc, &txr->txr_mem);
2449 free:
2450 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2451 kmem_free(txr, sizeof(*txr));
2452
2453 return NULL;
2454 }
2455
2456 static void
2457 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2458 {
2459 unsigned int qid;
2460 bus_size_t reg;
2461 uint32_t r;
2462
2463 qid = txr->txr_qid + sc->sc_base_queue;
2464 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2465 qid %= 128;
2466
2467 r = ixl_rd(sc, reg);
2468 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2469 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2470 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2471 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2472 ixl_wr(sc, reg, r);
2473 }
2474
2475 static void
2476 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2477 {
2478 struct ixl_hmc_txq txq;
2479 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch);
2480 void *hmc;
2481
2482 memset(&txq, 0, sizeof(txq));
2483 txq.head = htole16(txr->txr_cons);
2484 txq.new_context = 1;
2485 txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2486 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2487 txq.qlen = htole16(sc->sc_tx_ring_ndescs);
2488 txq.tphrdesc_ena = 0;
2489 txq.tphrpacket_ena = 0;
2490 txq.tphwdesc_ena = 0;
2491 txq.rdylist = data->qs_handle[0];
2492
2493 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2494 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2495 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq,
2496 __arraycount(ixl_hmc_pack_txq));
2497 }
2498
2499 static void
2500 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2501 {
2502 void *hmc;
2503
2504 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2505 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2506 }
2507
2508 static void
2509 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2510 {
2511 struct ixl_tx_map *maps, *txm;
2512 bus_dmamap_t map;
2513 unsigned int i;
2514
2515 maps = txr->txr_maps;
2516 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2517 txm = &maps[i];
2518
2519 if (txm->txm_m == NULL)
2520 continue;
2521
2522 map = txm->txm_map;
2523 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2524 BUS_DMASYNC_POSTWRITE);
2525 bus_dmamap_unload(sc->sc_dmat, map);
2526
2527 m_freem(txm->txm_m);
2528 txm->txm_m = NULL;
2529 }
2530 }
2531
2532 static int
2533 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2534 {
2535 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2536 uint32_t reg;
2537 int i;
2538
2539 for (i = 0; i < 10; i++) {
2540 reg = ixl_rd(sc, ena);
2541 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2542 return 0;
2543
2544 delaymsec(10);
2545 }
2546
2547 return ETIMEDOUT;
2548 }
2549
2550 static int
2551 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2552 {
2553 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2554 uint32_t reg;
2555 int i;
2556
2557 KASSERT(mutex_owned(&txr->txr_lock));
2558
2559 for (i = 0; i < 20; i++) {
2560 reg = ixl_rd(sc, ena);
2561 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2562 return 0;
2563
2564 delaymsec(10);
2565 }
2566
2567 return ETIMEDOUT;
2568 }
2569
2570 static void
2571 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2572 {
2573 struct ixl_tx_map *maps, *txm;
2574 struct mbuf *m;
2575 unsigned int i;
2576
2577 softint_disestablish(txr->txr_si);
2578 while ((m = pcq_get(txr->txr_intrq)) != NULL)
2579 m_freem(m);
2580 pcq_destroy(txr->txr_intrq);
2581
2582 maps = txr->txr_maps;
2583 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2584 txm = &maps[i];
2585
2586 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2587 }
2588
2589 ixl_dmamem_free(sc, &txr->txr_mem);
2590 mutex_destroy(&txr->txr_lock);
2591 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2592 kmem_free(txr, sizeof(*txr));
2593 }
2594
2595 static inline int
2596 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0,
2597 struct ixl_tx_ring *txr)
2598 {
2599 struct mbuf *m;
2600 int error;
2601
2602 KASSERT(mutex_owned(&txr->txr_lock));
2603
2604 m = *m0;
2605
2606 error = bus_dmamap_load_mbuf(dmat, map, m,
2607 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2608 if (error != EFBIG)
2609 return error;
2610
2611 m = m_defrag(m, M_DONTWAIT);
2612 if (m != NULL) {
2613 *m0 = m;
2614 txr->txr_defragged.ev_count++;
2615
2616 error = bus_dmamap_load_mbuf(dmat, map, m,
2617 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2618 } else {
2619 txr->txr_defrag_failed.ev_count++;
2620 error = ENOBUFS;
2621 }
2622
2623 return error;
2624 }
2625
2626 static inline int
2627 ixl_tx_setup_offloads(struct mbuf *m, uint64_t *cmd_txd)
2628 {
2629 struct ether_header *eh;
2630 size_t len;
2631 uint64_t cmd;
2632
2633 cmd = 0;
2634
2635 eh = mtod(m, struct ether_header *);
2636 switch (htons(eh->ether_type)) {
2637 case ETHERTYPE_IP:
2638 case ETHERTYPE_IPV6:
2639 len = ETHER_HDR_LEN;
2640 break;
2641 case ETHERTYPE_VLAN:
2642 len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2643 break;
2644 default:
2645 len = 0;
2646 }
2647 cmd |= ((len >> 1) << IXL_TX_DESC_MACLEN_SHIFT);
2648
2649 if (m->m_pkthdr.csum_flags &
2650 (M_CSUM_TSOv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
2651 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4;
2652 }
2653 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2654 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4_CSUM;
2655 }
2656
2657 if (m->m_pkthdr.csum_flags &
2658 (M_CSUM_TSOv6 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
2659 cmd |= IXL_TX_DESC_CMD_IIPT_IPV6;
2660 }
2661
2662 switch (cmd & IXL_TX_DESC_CMD_IIPT_MASK) {
2663 case IXL_TX_DESC_CMD_IIPT_IPV4:
2664 case IXL_TX_DESC_CMD_IIPT_IPV4_CSUM:
2665 len = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2666 break;
2667 case IXL_TX_DESC_CMD_IIPT_IPV6:
2668 len = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
2669 break;
2670 default:
2671 len = 0;
2672 }
2673 cmd |= ((len >> 2) << IXL_TX_DESC_IPLEN_SHIFT);
2674
2675 if (m->m_pkthdr.csum_flags &
2676 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_TCPv4 | M_CSUM_TCPv6)) {
2677 len = sizeof(struct tcphdr);
2678 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_TCP;
2679 } else if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) {
2680 len = sizeof(struct udphdr);
2681 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_UDP;
2682 } else {
2683 len = 0;
2684 }
2685 cmd |= ((len >> 2) << IXL_TX_DESC_L4LEN_SHIFT);
2686
2687 *cmd_txd |= cmd;
2688 return 0;
2689 }
2690
2691 static void
2692 ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr,
2693 bool is_transmit)
2694 {
2695 struct ixl_softc *sc = ifp->if_softc;
2696 struct ixl_tx_desc *ring, *txd;
2697 struct ixl_tx_map *txm;
2698 bus_dmamap_t map;
2699 struct mbuf *m;
2700 uint64_t cmd, cmd_txd;
2701 unsigned int prod, free, last, i;
2702 unsigned int mask;
2703 int post = 0;
2704
2705 KASSERT(mutex_owned(&txr->txr_lock));
2706
2707 if (ifp->if_link_state != LINK_STATE_UP
2708 || !ISSET(ifp->if_flags, IFF_RUNNING)
2709 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) {
2710 if (!is_transmit)
2711 IFQ_PURGE(&ifp->if_snd);
2712 return;
2713 }
2714
2715 prod = txr->txr_prod;
2716 free = txr->txr_cons;
2717 if (free <= prod)
2718 free += sc->sc_tx_ring_ndescs;
2719 free -= prod;
2720
2721 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2722 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2723
2724 ring = IXL_DMA_KVA(&txr->txr_mem);
2725 mask = sc->sc_tx_ring_ndescs - 1;
2726 last = prod;
2727 cmd = 0;
2728 txd = NULL;
2729
2730 for (;;) {
2731 if (free <= IXL_TX_PKT_DESCS) {
2732 if (!is_transmit)
2733 SET(ifp->if_flags, IFF_OACTIVE);
2734 break;
2735 }
2736
2737 if (is_transmit)
2738 m = pcq_get(txr->txr_intrq);
2739 else
2740 IFQ_DEQUEUE(&ifp->if_snd, m);
2741
2742 if (m == NULL)
2743 break;
2744
2745 txm = &txr->txr_maps[prod];
2746 map = txm->txm_map;
2747
2748 if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
2749 if_statinc(ifp, if_oerrors);
2750 m_freem(m);
2751 continue;
2752 }
2753
2754 cmd_txd = 0;
2755 if (m->m_pkthdr.csum_flags & IXL_CSUM_ALL_OFFLOAD) {
2756 ixl_tx_setup_offloads(m, &cmd_txd);
2757 }
2758
2759 if (vlan_has_tag(m)) {
2760 cmd_txd |= (uint64_t)vlan_get_tag(m) <<
2761 IXL_TX_DESC_L2TAG1_SHIFT;
2762 cmd_txd |= IXL_TX_DESC_CMD_IL2TAG1;
2763 }
2764
2765 bus_dmamap_sync(sc->sc_dmat, map, 0,
2766 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2767
2768 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) {
2769 txd = &ring[prod];
2770
2771 cmd = (uint64_t)map->dm_segs[i].ds_len <<
2772 IXL_TX_DESC_BSIZE_SHIFT;
2773 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2774 cmd |= cmd_txd;
2775
2776 txd->addr = htole64(map->dm_segs[i].ds_addr);
2777 txd->cmd = htole64(cmd);
2778
2779 last = prod;
2780
2781 prod++;
2782 prod &= mask;
2783 }
2784 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2785 txd->cmd = htole64(cmd);
2786
2787 txm->txm_m = m;
2788 txm->txm_eop = last;
2789
2790 bpf_mtap(ifp, m, BPF_D_OUT);
2791
2792 free -= i;
2793 post = 1;
2794 }
2795
2796 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2797 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2798
2799 if (post) {
2800 txr->txr_prod = prod;
2801 ixl_wr(sc, txr->txr_tail, prod);
2802 }
2803 }
2804
2805 static int
2806 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit)
2807 {
2808 struct ifnet *ifp = &sc->sc_ec.ec_if;
2809 struct ixl_tx_desc *ring, *txd;
2810 struct ixl_tx_map *txm;
2811 struct mbuf *m;
2812 bus_dmamap_t map;
2813 unsigned int cons, prod, last;
2814 unsigned int mask;
2815 uint64_t dtype;
2816 int done = 0, more = 0;
2817
2818 KASSERT(mutex_owned(&txr->txr_lock));
2819
2820 prod = txr->txr_prod;
2821 cons = txr->txr_cons;
2822
2823 if (cons == prod)
2824 return 0;
2825
2826 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2827 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2828
2829 ring = IXL_DMA_KVA(&txr->txr_mem);
2830 mask = sc->sc_tx_ring_ndescs - 1;
2831
2832 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2833
2834 do {
2835 if (txlimit-- <= 0) {
2836 more = 1;
2837 break;
2838 }
2839
2840 txm = &txr->txr_maps[cons];
2841 last = txm->txm_eop;
2842 txd = &ring[last];
2843
2844 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2845 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2846 break;
2847
2848 map = txm->txm_map;
2849
2850 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2851 BUS_DMASYNC_POSTWRITE);
2852 bus_dmamap_unload(sc->sc_dmat, map);
2853
2854 m = txm->txm_m;
2855 if (m != NULL) {
2856 if_statinc_ref(nsr, if_opackets);
2857 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
2858 if (ISSET(m->m_flags, M_MCAST))
2859 if_statinc_ref(nsr, if_omcasts);
2860 m_freem(m);
2861 }
2862
2863 txm->txm_m = NULL;
2864 txm->txm_eop = -1;
2865
2866 cons = last + 1;
2867 cons &= mask;
2868 done = 1;
2869 } while (cons != prod);
2870
2871 IF_STAT_PUTREF(ifp);
2872
2873 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2874 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2875
2876 txr->txr_cons = cons;
2877
2878 if (done) {
2879 softint_schedule(txr->txr_si);
2880 if (txr->txr_qid == 0) {
2881 CLR(ifp->if_flags, IFF_OACTIVE);
2882 if_schedule_deferred_start(ifp);
2883 }
2884 }
2885
2886 return more;
2887 }
2888
2889 static void
2890 ixl_start(struct ifnet *ifp)
2891 {
2892 struct ixl_softc *sc;
2893 struct ixl_tx_ring *txr;
2894
2895 sc = ifp->if_softc;
2896 txr = sc->sc_qps[0].qp_txr;
2897
2898 mutex_enter(&txr->txr_lock);
2899 ixl_tx_common_locked(ifp, txr, false);
2900 mutex_exit(&txr->txr_lock);
2901 }
2902
2903 static inline unsigned int
2904 ixl_select_txqueue(struct ixl_softc *sc, struct mbuf *m)
2905 {
2906 u_int cpuid;
2907
2908 cpuid = cpu_index(curcpu());
2909
2910 return (unsigned int)(cpuid % sc->sc_nqueue_pairs);
2911 }
2912
2913 static int
2914 ixl_transmit(struct ifnet *ifp, struct mbuf *m)
2915 {
2916 struct ixl_softc *sc;
2917 struct ixl_tx_ring *txr;
2918 unsigned int qid;
2919
2920 sc = ifp->if_softc;
2921 qid = ixl_select_txqueue(sc, m);
2922
2923 txr = sc->sc_qps[qid].qp_txr;
2924
2925 if (__predict_false(!pcq_put(txr->txr_intrq, m))) {
2926 mutex_enter(&txr->txr_lock);
2927 txr->txr_pcqdrop.ev_count++;
2928 mutex_exit(&txr->txr_lock);
2929
2930 m_freem(m);
2931 return ENOBUFS;
2932 }
2933
2934 if (mutex_tryenter(&txr->txr_lock)) {
2935 ixl_tx_common_locked(ifp, txr, true);
2936 mutex_exit(&txr->txr_lock);
2937 } else {
2938 kpreempt_disable();
2939 softint_schedule(txr->txr_si);
2940 kpreempt_enable();
2941 }
2942
2943 return 0;
2944 }
2945
2946 static void
2947 ixl_deferred_transmit(void *xtxr)
2948 {
2949 struct ixl_tx_ring *txr = xtxr;
2950 struct ixl_softc *sc = txr->txr_sc;
2951 struct ifnet *ifp = &sc->sc_ec.ec_if;
2952
2953 mutex_enter(&txr->txr_lock);
2954 txr->txr_transmitdef.ev_count++;
2955 if (pcq_peek(txr->txr_intrq) != NULL)
2956 ixl_tx_common_locked(ifp, txr, true);
2957 mutex_exit(&txr->txr_lock);
2958 }
2959
2960 static struct ixl_rx_ring *
2961 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
2962 {
2963 struct ixl_rx_ring *rxr = NULL;
2964 struct ixl_rx_map *maps = NULL, *rxm;
2965 unsigned int i;
2966
2967 rxr = kmem_zalloc(sizeof(*rxr), KM_SLEEP);
2968 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs,
2969 KM_SLEEP);
2970
2971 if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
2972 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
2973 IXL_RX_QUEUE_ALIGN) != 0)
2974 goto free;
2975
2976 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2977 rxm = &maps[i];
2978
2979 if (bus_dmamap_create(sc->sc_dmat,
2980 IXL_MCLBYTES, 1, IXL_MCLBYTES, 0,
2981 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &rxm->rxm_map) != 0)
2982 goto uncreate;
2983
2984 rxm->rxm_m = NULL;
2985 }
2986
2987 rxr->rxr_cons = rxr->rxr_prod = 0;
2988 rxr->rxr_m_head = NULL;
2989 rxr->rxr_m_tail = &rxr->rxr_m_head;
2990 rxr->rxr_maps = maps;
2991
2992 rxr->rxr_tail = I40E_QRX_TAIL(qid);
2993 rxr->rxr_qid = qid;
2994 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
2995
2996 return rxr;
2997
2998 uncreate:
2999 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3000 rxm = &maps[i];
3001
3002 if (rxm->rxm_map == NULL)
3003 continue;
3004
3005 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3006 }
3007
3008 ixl_dmamem_free(sc, &rxr->rxr_mem);
3009 free:
3010 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
3011 kmem_free(rxr, sizeof(*rxr));
3012
3013 return NULL;
3014 }
3015
3016 static void
3017 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3018 {
3019 struct ixl_rx_map *maps, *rxm;
3020 bus_dmamap_t map;
3021 unsigned int i;
3022
3023 maps = rxr->rxr_maps;
3024 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3025 rxm = &maps[i];
3026
3027 if (rxm->rxm_m == NULL)
3028 continue;
3029
3030 map = rxm->rxm_map;
3031 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3032 BUS_DMASYNC_POSTWRITE);
3033 bus_dmamap_unload(sc->sc_dmat, map);
3034
3035 m_freem(rxm->rxm_m);
3036 rxm->rxm_m = NULL;
3037 }
3038
3039 m_freem(rxr->rxr_m_head);
3040 rxr->rxr_m_head = NULL;
3041 rxr->rxr_m_tail = &rxr->rxr_m_head;
3042
3043 rxr->rxr_prod = rxr->rxr_cons = 0;
3044 }
3045
3046 static int
3047 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3048 {
3049 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3050 uint32_t reg;
3051 int i;
3052
3053 for (i = 0; i < 10; i++) {
3054 reg = ixl_rd(sc, ena);
3055 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
3056 return 0;
3057
3058 delaymsec(10);
3059 }
3060
3061 return ETIMEDOUT;
3062 }
3063
3064 static int
3065 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3066 {
3067 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3068 uint32_t reg;
3069 int i;
3070
3071 KASSERT(mutex_owned(&rxr->rxr_lock));
3072
3073 for (i = 0; i < 20; i++) {
3074 reg = ixl_rd(sc, ena);
3075 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3076 return 0;
3077
3078 delaymsec(10);
3079 }
3080
3081 return ETIMEDOUT;
3082 }
3083
3084 static void
3085 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3086 {
3087 struct ixl_hmc_rxq rxq;
3088 struct ifnet *ifp = &sc->sc_ec.ec_if;
3089 uint16_t rxmax;
3090 void *hmc;
3091
3092 memset(&rxq, 0, sizeof(rxq));
3093 rxmax = ifp->if_mtu + IXL_MTU_ETHERLEN;
3094
3095 rxq.head = htole16(rxr->rxr_cons);
3096 rxq.base = htole64(IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
3097 rxq.qlen = htole16(sc->sc_rx_ring_ndescs);
3098 rxq.dbuff = htole16(IXL_MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
3099 rxq.hbuff = 0;
3100 rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
3101 rxq.dsize = IXL_HMC_RXQ_DSIZE_32;
3102 rxq.crcstrip = 1;
3103 rxq.l2sel = 1;
3104 rxq.showiv = 1;
3105 rxq.rxmax = htole16(rxmax);
3106 rxq.tphrdesc_ena = 0;
3107 rxq.tphwdesc_ena = 0;
3108 rxq.tphdata_ena = 0;
3109 rxq.tphhead_ena = 0;
3110 rxq.lrxqthresh = 0;
3111 rxq.prefena = 1;
3112
3113 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3114 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3115 ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq,
3116 __arraycount(ixl_hmc_pack_rxq));
3117 }
3118
3119 static void
3120 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3121 {
3122 void *hmc;
3123
3124 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3125 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3126 }
3127
3128 static void
3129 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3130 {
3131 struct ixl_rx_map *maps, *rxm;
3132 unsigned int i;
3133
3134 maps = rxr->rxr_maps;
3135 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3136 rxm = &maps[i];
3137
3138 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3139 }
3140
3141 ixl_dmamem_free(sc, &rxr->rxr_mem);
3142 mutex_destroy(&rxr->rxr_lock);
3143 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
3144 kmem_free(rxr, sizeof(*rxr));
3145 }
3146
3147 static inline void
3148 ixl_rx_csum(struct mbuf *m, uint64_t qword)
3149 {
3150 int flags_mask;
3151
3152 if (!ISSET(qword, IXL_RX_DESC_L3L4P)) {
3153 /* No L3 or L4 checksum was calculated */
3154 return;
3155 }
3156
3157 switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) {
3158 case IXL_RX_DESC_PTYPE_IPV4FRAG:
3159 case IXL_RX_DESC_PTYPE_IPV4:
3160 case IXL_RX_DESC_PTYPE_SCTPV4:
3161 case IXL_RX_DESC_PTYPE_ICMPV4:
3162 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3163 break;
3164 case IXL_RX_DESC_PTYPE_TCPV4:
3165 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3166 flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD;
3167 break;
3168 case IXL_RX_DESC_PTYPE_UDPV4:
3169 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3170 flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD;
3171 break;
3172 case IXL_RX_DESC_PTYPE_TCPV6:
3173 flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD;
3174 break;
3175 case IXL_RX_DESC_PTYPE_UDPV6:
3176 flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD;
3177 break;
3178 default:
3179 flags_mask = 0;
3180 }
3181
3182 m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 |
3183 M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6));
3184
3185 if (ISSET(qword, IXL_RX_DESC_IPE)) {
3186 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD);
3187 }
3188
3189 if (ISSET(qword, IXL_RX_DESC_L4E)) {
3190 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD);
3191 }
3192 }
3193
3194 static int
3195 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr, u_int rxlimit)
3196 {
3197 struct ifnet *ifp = &sc->sc_ec.ec_if;
3198 struct ixl_rx_wb_desc_32 *ring, *rxd;
3199 struct ixl_rx_map *rxm;
3200 bus_dmamap_t map;
3201 unsigned int cons, prod;
3202 struct mbuf *m;
3203 uint64_t word, word0;
3204 unsigned int len;
3205 unsigned int mask;
3206 int done = 0, more = 0;
3207
3208 KASSERT(mutex_owned(&rxr->rxr_lock));
3209
3210 if (!ISSET(ifp->if_flags, IFF_RUNNING))
3211 return 0;
3212
3213 prod = rxr->rxr_prod;
3214 cons = rxr->rxr_cons;
3215
3216 if (cons == prod)
3217 return 0;
3218
3219 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3220 0, IXL_DMA_LEN(&rxr->rxr_mem),
3221 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3222
3223 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3224 mask = sc->sc_rx_ring_ndescs - 1;
3225
3226 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
3227
3228 do {
3229 if (rxlimit-- <= 0) {
3230 more = 1;
3231 break;
3232 }
3233
3234 rxd = &ring[cons];
3235
3236 word = le64toh(rxd->qword1);
3237
3238 if (!ISSET(word, IXL_RX_DESC_DD))
3239 break;
3240
3241 rxm = &rxr->rxr_maps[cons];
3242
3243 map = rxm->rxm_map;
3244 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3245 BUS_DMASYNC_POSTREAD);
3246 bus_dmamap_unload(sc->sc_dmat, map);
3247
3248 m = rxm->rxm_m;
3249 rxm->rxm_m = NULL;
3250
3251 KASSERT(m != NULL);
3252
3253 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
3254 m->m_len = len;
3255 m->m_pkthdr.len = 0;
3256
3257 m->m_next = NULL;
3258 *rxr->rxr_m_tail = m;
3259 rxr->rxr_m_tail = &m->m_next;
3260
3261 m = rxr->rxr_m_head;
3262 m->m_pkthdr.len += len;
3263
3264 if (ISSET(word, IXL_RX_DESC_EOP)) {
3265 word0 = le64toh(rxd->qword0);
3266
3267 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
3268 vlan_set_tag(m,
3269 __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK));
3270 }
3271
3272 if ((ifp->if_capenable & IXL_IFCAP_RXCSUM) != 0)
3273 ixl_rx_csum(m, word);
3274
3275 if (!ISSET(word,
3276 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
3277 m_set_rcvif(m, ifp);
3278 if_statinc_ref(nsr, if_ipackets);
3279 if_statadd_ref(nsr, if_ibytes,
3280 m->m_pkthdr.len);
3281 if_percpuq_enqueue(ifp->if_percpuq, m);
3282 } else {
3283 if_statinc_ref(nsr, if_ierrors);
3284 m_freem(m);
3285 }
3286
3287 rxr->rxr_m_head = NULL;
3288 rxr->rxr_m_tail = &rxr->rxr_m_head;
3289 }
3290
3291 cons++;
3292 cons &= mask;
3293
3294 done = 1;
3295 } while (cons != prod);
3296
3297 if (done) {
3298 rxr->rxr_cons = cons;
3299 if (ixl_rxfill(sc, rxr) == -1)
3300 if_statinc_ref(nsr, if_iqdrops);
3301 }
3302
3303 IF_STAT_PUTREF(ifp);
3304
3305 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3306 0, IXL_DMA_LEN(&rxr->rxr_mem),
3307 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3308
3309 return more;
3310 }
3311
3312 static int
3313 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3314 {
3315 struct ixl_rx_rd_desc_32 *ring, *rxd;
3316 struct ixl_rx_map *rxm;
3317 bus_dmamap_t map;
3318 struct mbuf *m;
3319 unsigned int prod;
3320 unsigned int slots;
3321 unsigned int mask;
3322 int post = 0, error = 0;
3323
3324 KASSERT(mutex_owned(&rxr->rxr_lock));
3325
3326 prod = rxr->rxr_prod;
3327 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons,
3328 sc->sc_rx_ring_ndescs);
3329
3330 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3331 mask = sc->sc_rx_ring_ndescs - 1;
3332
3333 if (__predict_false(slots <= 0))
3334 return -1;
3335
3336 do {
3337 rxm = &rxr->rxr_maps[prod];
3338
3339 MGETHDR(m, M_DONTWAIT, MT_DATA);
3340 if (m == NULL) {
3341 rxr->rxr_mgethdr_failed.ev_count++;
3342 error = -1;
3343 break;
3344 }
3345
3346 MCLGET(m, M_DONTWAIT);
3347 if (!ISSET(m->m_flags, M_EXT)) {
3348 rxr->rxr_mgetcl_failed.ev_count++;
3349 error = -1;
3350 m_freem(m);
3351 break;
3352 }
3353
3354 m->m_len = m->m_pkthdr.len = MCLBYTES;
3355 m_adj(m, ETHER_ALIGN);
3356
3357 map = rxm->rxm_map;
3358
3359 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
3360 BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
3361 rxr->rxr_mbuf_load_failed.ev_count++;
3362 error = -1;
3363 m_freem(m);
3364 break;
3365 }
3366
3367 rxm->rxm_m = m;
3368
3369 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3370 BUS_DMASYNC_PREREAD);
3371
3372 rxd = &ring[prod];
3373
3374 rxd->paddr = htole64(map->dm_segs[0].ds_addr);
3375 rxd->haddr = htole64(0);
3376
3377 prod++;
3378 prod &= mask;
3379
3380 post = 1;
3381
3382 } while (--slots);
3383
3384 if (post) {
3385 rxr->rxr_prod = prod;
3386 ixl_wr(sc, rxr->rxr_tail, prod);
3387 }
3388
3389 return error;
3390 }
3391
3392 static inline int
3393 ixl_handle_queue_common(struct ixl_softc *sc, struct ixl_queue_pair *qp,
3394 u_int txlimit, struct evcnt *txevcnt,
3395 u_int rxlimit, struct evcnt *rxevcnt)
3396 {
3397 struct ixl_tx_ring *txr = qp->qp_txr;
3398 struct ixl_rx_ring *rxr = qp->qp_rxr;
3399 int txmore, rxmore;
3400 int rv;
3401
3402 KASSERT(!mutex_owned(&txr->txr_lock));
3403 KASSERT(!mutex_owned(&rxr->rxr_lock));
3404
3405 mutex_enter(&txr->txr_lock);
3406 txevcnt->ev_count++;
3407 txmore = ixl_txeof(sc, txr, txlimit);
3408 mutex_exit(&txr->txr_lock);
3409
3410 mutex_enter(&rxr->rxr_lock);
3411 rxevcnt->ev_count++;
3412 rxmore = ixl_rxeof(sc, rxr, rxlimit);
3413 mutex_exit(&rxr->rxr_lock);
3414
3415 rv = txmore | (rxmore << 1);
3416
3417 return rv;
3418 }
3419
3420 static void
3421 ixl_sched_handle_queue(struct ixl_softc *sc, struct ixl_queue_pair *qp)
3422 {
3423
3424 if (qp->qp_workqueue)
3425 ixl_work_add(sc->sc_workq_txrx, &qp->qp_task);
3426 else
3427 softint_schedule(qp->qp_si);
3428 }
3429
3430 static int
3431 ixl_intr(void *xsc)
3432 {
3433 struct ixl_softc *sc = xsc;
3434 struct ixl_tx_ring *txr;
3435 struct ixl_rx_ring *rxr;
3436 uint32_t icr, rxintr, txintr;
3437 int rv = 0;
3438 unsigned int i;
3439
3440 KASSERT(sc != NULL);
3441
3442 ixl_enable_other_intr(sc);
3443 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3444
3445 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3446 atomic_inc_64(&sc->sc_event_atq.ev_count);
3447 ixl_atq_done(sc);
3448 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3449 rv = 1;
3450 }
3451
3452 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3453 atomic_inc_64(&sc->sc_event_link.ev_count);
3454 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3455 rv = 1;
3456 }
3457
3458 rxintr = icr & I40E_INTR_NOTX_RX_MASK;
3459 txintr = icr & I40E_INTR_NOTX_TX_MASK;
3460
3461 if (txintr || rxintr) {
3462 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3463 txr = sc->sc_qps[i].qp_txr;
3464 rxr = sc->sc_qps[i].qp_rxr;
3465
3466 ixl_handle_queue_common(sc, &sc->sc_qps[i],
3467 IXL_TXRX_PROCESS_UNLIMIT, &txr->txr_intr,
3468 IXL_TXRX_PROCESS_UNLIMIT, &rxr->rxr_intr);
3469 }
3470 rv = 1;
3471 }
3472
3473 return rv;
3474 }
3475
3476 static int
3477 ixl_queue_intr(void *xqp)
3478 {
3479 struct ixl_queue_pair *qp = xqp;
3480 struct ixl_tx_ring *txr = qp->qp_txr;
3481 struct ixl_rx_ring *rxr = qp->qp_rxr;
3482 struct ixl_softc *sc = qp->qp_sc;
3483 u_int txlimit, rxlimit;
3484 int more;
3485
3486 txlimit = sc->sc_tx_intr_process_limit;
3487 rxlimit = sc->sc_rx_intr_process_limit;
3488 qp->qp_workqueue = sc->sc_txrx_workqueue;
3489
3490 more = ixl_handle_queue_common(sc, qp,
3491 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr);
3492
3493 if (more != 0) {
3494 ixl_sched_handle_queue(sc, qp);
3495 } else {
3496 /* for ALTQ */
3497 if (txr->txr_qid == 0)
3498 if_schedule_deferred_start(&sc->sc_ec.ec_if);
3499 softint_schedule(txr->txr_si);
3500
3501 ixl_enable_queue_intr(sc, qp);
3502 }
3503
3504 return 1;
3505 }
3506
3507 static void
3508 ixl_handle_queue(void *xqp)
3509 {
3510 struct ixl_queue_pair *qp = xqp;
3511 struct ixl_softc *sc = qp->qp_sc;
3512 struct ixl_tx_ring *txr = qp->qp_txr;
3513 struct ixl_rx_ring *rxr = qp->qp_rxr;
3514 u_int txlimit, rxlimit;
3515 int more;
3516
3517 txlimit = sc->sc_tx_process_limit;
3518 rxlimit = sc->sc_rx_process_limit;
3519
3520 more = ixl_handle_queue_common(sc, qp,
3521 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer);
3522
3523 if (more != 0)
3524 ixl_sched_handle_queue(sc, qp);
3525 else
3526 ixl_enable_queue_intr(sc, qp);
3527 }
3528
3529 static inline void
3530 ixl_print_hmc_error(struct ixl_softc *sc, uint32_t reg)
3531 {
3532 uint32_t hmc_idx, hmc_isvf;
3533 uint32_t hmc_errtype, hmc_objtype, hmc_data;
3534
3535 hmc_idx = reg & I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK;
3536 hmc_idx = hmc_idx >> I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT;
3537 hmc_isvf = reg & I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK;
3538 hmc_isvf = hmc_isvf >> I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT;
3539 hmc_errtype = reg & I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK;
3540 hmc_errtype = hmc_errtype >> I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT;
3541 hmc_objtype = reg & I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK;
3542 hmc_objtype = hmc_objtype >> I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT;
3543 hmc_data = ixl_rd(sc, I40E_PFHMC_ERRORDATA);
3544
3545 device_printf(sc->sc_dev,
3546 "HMC Error (idx=0x%x, isvf=0x%x, err=0x%x, obj=0x%x, data=0x%x)\n",
3547 hmc_idx, hmc_isvf, hmc_errtype, hmc_objtype, hmc_data);
3548 }
3549
3550 static int
3551 ixl_other_intr(void *xsc)
3552 {
3553 struct ixl_softc *sc = xsc;
3554 uint32_t icr, mask, reg;
3555 int rv;
3556
3557 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3558 mask = ixl_rd(sc, I40E_PFINT_ICR0_ENA);
3559
3560 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3561 atomic_inc_64(&sc->sc_event_atq.ev_count);
3562 ixl_atq_done(sc);
3563 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3564 rv = 1;
3565 }
3566
3567 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3568 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3569 device_printf(sc->sc_dev, "link stat changed\n");
3570
3571 atomic_inc_64(&sc->sc_event_link.ev_count);
3572 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3573 rv = 1;
3574 }
3575
3576 if (ISSET(icr, I40E_PFINT_ICR0_GRST_MASK)) {
3577 CLR(mask, I40E_PFINT_ICR0_ENA_GRST_MASK);
3578 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
3579 reg = reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK;
3580 reg = reg >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3581
3582 device_printf(sc->sc_dev, "GRST: %s\n",
3583 reg == I40E_RESET_CORER ? "CORER" :
3584 reg == I40E_RESET_GLOBR ? "GLOBR" :
3585 reg == I40E_RESET_EMPR ? "EMPR" :
3586 "POR");
3587 }
3588
3589 if (ISSET(icr, I40E_PFINT_ICR0_ECC_ERR_MASK))
3590 atomic_inc_64(&sc->sc_event_ecc_err.ev_count);
3591 if (ISSET(icr, I40E_PFINT_ICR0_PCI_EXCEPTION_MASK))
3592 atomic_inc_64(&sc->sc_event_pci_exception.ev_count);
3593 if (ISSET(icr, I40E_PFINT_ICR0_PE_CRITERR_MASK))
3594 atomic_inc_64(&sc->sc_event_crit_err.ev_count);
3595
3596 if (ISSET(icr, IXL_ICR0_CRIT_ERR_MASK)) {
3597 CLR(mask, IXL_ICR0_CRIT_ERR_MASK);
3598 device_printf(sc->sc_dev, "critical error\n");
3599 }
3600
3601 if (ISSET(icr, I40E_PFINT_ICR0_HMC_ERR_MASK)) {
3602 reg = ixl_rd(sc, I40E_PFHMC_ERRORINFO);
3603 if (ISSET(reg, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK))
3604 ixl_print_hmc_error(sc, reg);
3605 ixl_wr(sc, I40E_PFHMC_ERRORINFO, 0);
3606 }
3607
3608 ixl_wr(sc, I40E_PFINT_ICR0_ENA, mask);
3609 ixl_flush(sc);
3610 ixl_enable_other_intr(sc);
3611 return rv;
3612 }
3613
3614 static void
3615 ixl_get_link_status_done(struct ixl_softc *sc,
3616 const struct ixl_aq_desc *iaq)
3617 {
3618
3619 ixl_link_state_update(sc, iaq);
3620 }
3621
3622 static void
3623 ixl_get_link_status(void *xsc)
3624 {
3625 struct ixl_softc *sc = xsc;
3626 struct ixl_aq_desc *iaq;
3627 struct ixl_aq_link_param *param;
3628
3629 memset(&sc->sc_link_state_atq, 0, sizeof(sc->sc_link_state_atq));
3630 iaq = &sc->sc_link_state_atq.iatq_desc;
3631 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3632 param = (struct ixl_aq_link_param *)iaq->iaq_param;
3633 param->notify = IXL_AQ_LINK_NOTIFY;
3634
3635 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done);
3636 (void)ixl_atq_post(sc, &sc->sc_link_state_atq);
3637 }
3638
3639 static void
3640 ixl_link_state_update(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3641 {
3642 struct ifnet *ifp = &sc->sc_ec.ec_if;
3643 int link_state;
3644
3645 KASSERT(kpreempt_disabled());
3646
3647 link_state = ixl_set_link_status(sc, iaq);
3648
3649 if (ifp->if_link_state != link_state)
3650 if_link_state_change(ifp, link_state);
3651
3652 if (link_state != LINK_STATE_DOWN) {
3653 if_schedule_deferred_start(ifp);
3654 }
3655 }
3656
3657 static void
3658 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq,
3659 const char *msg)
3660 {
3661 char buf[512];
3662 size_t len;
3663
3664 len = sizeof(buf);
3665 buf[--len] = '\0';
3666
3667 device_printf(sc->sc_dev, "%s\n", msg);
3668 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags));
3669 device_printf(sc->sc_dev, "flags %s opcode %04x\n",
3670 buf, le16toh(iaq->iaq_opcode));
3671 device_printf(sc->sc_dev, "datalen %u retval %u\n",
3672 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval));
3673 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie);
3674 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n",
3675 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]),
3676 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3]));
3677 }
3678
3679 static void
3680 ixl_arq(void *xsc)
3681 {
3682 struct ixl_softc *sc = xsc;
3683 struct ixl_aq_desc *arq, *iaq;
3684 struct ixl_aq_buf *aqb;
3685 unsigned int cons = sc->sc_arq_cons;
3686 unsigned int prod;
3687 int done = 0;
3688
3689 prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
3690 sc->sc_aq_regs->arq_head_mask;
3691
3692 if (cons == prod)
3693 goto done;
3694
3695 arq = IXL_DMA_KVA(&sc->sc_arq);
3696
3697 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3698 0, IXL_DMA_LEN(&sc->sc_arq),
3699 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3700
3701 do {
3702 iaq = &arq[cons];
3703 aqb = sc->sc_arq_live[cons];
3704
3705 KASSERT(aqb != NULL);
3706
3707 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3708 BUS_DMASYNC_POSTREAD);
3709
3710 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3711 ixl_aq_dump(sc, iaq, "arq event");
3712
3713 switch (iaq->iaq_opcode) {
3714 case htole16(IXL_AQ_OP_PHY_LINK_STATUS):
3715 kpreempt_disable();
3716 ixl_link_state_update(sc, iaq);
3717 kpreempt_enable();
3718 break;
3719 }
3720
3721 memset(iaq, 0, sizeof(*iaq));
3722 sc->sc_arq_live[cons] = NULL;
3723 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
3724
3725 cons++;
3726 cons &= IXL_AQ_MASK;
3727
3728 done = 1;
3729 } while (cons != prod);
3730
3731 if (done) {
3732 sc->sc_arq_cons = cons;
3733 ixl_arq_fill(sc);
3734 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3735 0, IXL_DMA_LEN(&sc->sc_arq),
3736 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3737 }
3738
3739 done:
3740 ixl_enable_other_intr(sc);
3741 }
3742
3743 static void
3744 ixl_atq_set(struct ixl_atq *iatq,
3745 void (*fn)(struct ixl_softc *, const struct ixl_aq_desc *))
3746 {
3747
3748 iatq->iatq_fn = fn;
3749 }
3750
3751 static int
3752 ixl_atq_post_locked(struct ixl_softc *sc, struct ixl_atq *iatq)
3753 {
3754 struct ixl_aq_desc *atq, *slot;
3755 unsigned int prod, cons, prod_next;
3756
3757 /* assert locked */
3758 KASSERT(mutex_owned(&sc->sc_atq_lock));
3759
3760 atq = IXL_DMA_KVA(&sc->sc_atq);
3761 prod = sc->sc_atq_prod;
3762 cons = sc->sc_atq_cons;
3763 prod_next = (prod +1) & IXL_AQ_MASK;
3764
3765 if (cons == prod_next)
3766 return ENOMEM;
3767
3768 slot = &atq[prod];
3769
3770 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3771 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3772
3773 *slot = iatq->iatq_desc;
3774 slot->iaq_cookie = (uint64_t)((intptr_t)iatq);
3775
3776 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3777 ixl_aq_dump(sc, slot, "atq command");
3778
3779 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3780 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3781
3782 sc->sc_atq_prod = prod_next;
3783 ixl_wr(sc, sc->sc_aq_regs->atq_tail, sc->sc_atq_prod);
3784
3785 return 0;
3786 }
3787
3788 static int
3789 ixl_atq_post(struct ixl_softc *sc, struct ixl_atq *iatq)
3790 {
3791 int rv;
3792
3793 mutex_enter(&sc->sc_atq_lock);
3794 rv = ixl_atq_post_locked(sc, iatq);
3795 mutex_exit(&sc->sc_atq_lock);
3796
3797 return rv;
3798 }
3799
3800 static void
3801 ixl_atq_done_locked(struct ixl_softc *sc)
3802 {
3803 struct ixl_aq_desc *atq, *slot;
3804 struct ixl_atq *iatq;
3805 unsigned int cons;
3806 unsigned int prod;
3807
3808 KASSERT(mutex_owned(&sc->sc_atq_lock));
3809
3810 prod = sc->sc_atq_prod;
3811 cons = sc->sc_atq_cons;
3812
3813 if (prod == cons)
3814 return;
3815
3816 atq = IXL_DMA_KVA(&sc->sc_atq);
3817
3818 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3819 0, IXL_DMA_LEN(&sc->sc_atq),
3820 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3821
3822 do {
3823 slot = &atq[cons];
3824 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
3825 break;
3826
3827 iatq = (struct ixl_atq *)((intptr_t)slot->iaq_cookie);
3828 iatq->iatq_desc = *slot;
3829
3830 memset(slot, 0, sizeof(*slot));
3831
3832 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3833 ixl_aq_dump(sc, &iatq->iatq_desc, "atq response");
3834
3835 (*iatq->iatq_fn)(sc, &iatq->iatq_desc);
3836
3837 cons++;
3838 cons &= IXL_AQ_MASK;
3839 } while (cons != prod);
3840
3841 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3842 0, IXL_DMA_LEN(&sc->sc_atq),
3843 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3844
3845 sc->sc_atq_cons = cons;
3846 }
3847
3848 static void
3849 ixl_atq_done(struct ixl_softc *sc)
3850 {
3851
3852 mutex_enter(&sc->sc_atq_lock);
3853 ixl_atq_done_locked(sc);
3854 mutex_exit(&sc->sc_atq_lock);
3855 }
3856
3857 static void
3858 ixl_wakeup(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3859 {
3860
3861 KASSERT(mutex_owned(&sc->sc_atq_lock));
3862
3863 cv_signal(&sc->sc_atq_cv);
3864 }
3865
3866 static int
3867 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq)
3868 {
3869 int error;
3870
3871 KASSERT(iatq->iatq_desc.iaq_cookie == 0);
3872
3873 ixl_atq_set(iatq, ixl_wakeup);
3874
3875 mutex_enter(&sc->sc_atq_lock);
3876 error = ixl_atq_post_locked(sc, iatq);
3877 if (error) {
3878 mutex_exit(&sc->sc_atq_lock);
3879 return error;
3880 }
3881
3882 error = cv_timedwait(&sc->sc_atq_cv, &sc->sc_atq_lock,
3883 IXL_ATQ_EXEC_TIMEOUT);
3884 mutex_exit(&sc->sc_atq_lock);
3885
3886 return error;
3887 }
3888
3889 static int
3890 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
3891 {
3892 struct ixl_aq_desc *atq, *slot;
3893 unsigned int prod;
3894 unsigned int t = 0;
3895
3896 mutex_enter(&sc->sc_atq_lock);
3897
3898 atq = IXL_DMA_KVA(&sc->sc_atq);
3899 prod = sc->sc_atq_prod;
3900 slot = atq + prod;
3901
3902 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3903 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3904
3905 *slot = *iaq;
3906 slot->iaq_flags |= htole16(IXL_AQ_SI);
3907
3908 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3909 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3910
3911 prod++;
3912 prod &= IXL_AQ_MASK;
3913 sc->sc_atq_prod = prod;
3914 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3915
3916 while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
3917 delaymsec(1);
3918
3919 if (t++ > tm) {
3920 mutex_exit(&sc->sc_atq_lock);
3921 return ETIMEDOUT;
3922 }
3923 }
3924
3925 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3926 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
3927 *iaq = *slot;
3928 memset(slot, 0, sizeof(*slot));
3929 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3930 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
3931
3932 sc->sc_atq_cons = prod;
3933
3934 mutex_exit(&sc->sc_atq_lock);
3935
3936 return 0;
3937 }
3938
3939 static int
3940 ixl_get_version(struct ixl_softc *sc)
3941 {
3942 struct ixl_aq_desc iaq;
3943 uint32_t fwbuild, fwver, apiver;
3944 uint16_t api_maj_ver, api_min_ver;
3945
3946 memset(&iaq, 0, sizeof(iaq));
3947 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
3948
3949 iaq.iaq_retval = le16toh(23);
3950
3951 if (ixl_atq_poll(sc, &iaq, 2000) != 0)
3952 return ETIMEDOUT;
3953 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
3954 return EIO;
3955
3956 fwbuild = le32toh(iaq.iaq_param[1]);
3957 fwver = le32toh(iaq.iaq_param[2]);
3958 apiver = le32toh(iaq.iaq_param[3]);
3959
3960 api_maj_ver = (uint16_t)apiver;
3961 api_min_ver = (uint16_t)(apiver >> 16);
3962
3963 aprint_normal(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
3964 (uint16_t)(fwver >> 16), fwbuild, api_maj_ver, api_min_ver);
3965
3966 if (sc->sc_mac_type == I40E_MAC_X722) {
3967 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK |
3968 IXL_SC_AQ_FLAG_NVMREAD);
3969 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL);
3970 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS);
3971 }
3972
3973 #define IXL_API_VER(maj, min) (((uint32_t)(maj) << 16) | (min))
3974 if (IXL_API_VER(api_maj_ver, api_min_ver) >= IXL_API_VER(1, 5)) {
3975 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL);
3976 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK);
3977 }
3978 #undef IXL_API_VER
3979
3980 return 0;
3981 }
3982
3983 static int
3984 ixl_get_nvm_version(struct ixl_softc *sc)
3985 {
3986 uint16_t nvmver, cfg_ptr, eetrack_hi, eetrack_lo, oem_hi, oem_lo;
3987 uint32_t eetrack, oem;
3988 uint16_t nvm_maj_ver, nvm_min_ver, oem_build;
3989 uint8_t oem_ver, oem_patch;
3990
3991 nvmver = cfg_ptr = eetrack_hi = eetrack_lo = oem_hi = oem_lo = 0;
3992 ixl_rd16_nvm(sc, I40E_SR_NVM_DEV_STARTER_VERSION, &nvmver);
3993 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
3994 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
3995 ixl_rd16_nvm(sc, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
3996 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF, &oem_hi);
3997 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF + 1, &oem_lo);
3998
3999 nvm_maj_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_HI_MASK);
4000 nvm_min_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_LO_MASK);
4001 eetrack = ((uint32_t)eetrack_hi << 16) | eetrack_lo;
4002 oem = ((uint32_t)oem_hi << 16) | oem_lo;
4003 oem_ver = __SHIFTOUT(oem, IXL_NVM_OEMVERSION_MASK);
4004 oem_build = __SHIFTOUT(oem, IXL_NVM_OEMBUILD_MASK);
4005 oem_patch = __SHIFTOUT(oem, IXL_NVM_OEMPATCH_MASK);
4006
4007 aprint_normal(" nvm %x.%02x etid %08x oem %d.%d.%d",
4008 nvm_maj_ver, nvm_min_ver, eetrack,
4009 oem_ver, oem_build, oem_patch);
4010
4011 return 0;
4012 }
4013
4014 static int
4015 ixl_pxe_clear(struct ixl_softc *sc)
4016 {
4017 struct ixl_aq_desc iaq;
4018 int rv;
4019
4020 memset(&iaq, 0, sizeof(iaq));
4021 iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
4022 iaq.iaq_param[0] = htole32(0x2);
4023
4024 rv = ixl_atq_poll(sc, &iaq, 250);
4025
4026 ixl_wr(sc, I40E_GLLAN_RCTL_0, 0x1);
4027
4028 if (rv != 0)
4029 return ETIMEDOUT;
4030
4031 switch (iaq.iaq_retval) {
4032 case htole16(IXL_AQ_RC_OK):
4033 case htole16(IXL_AQ_RC_EEXIST):
4034 break;
4035 default:
4036 return EIO;
4037 }
4038
4039 return 0;
4040 }
4041
4042 static int
4043 ixl_lldp_shut(struct ixl_softc *sc)
4044 {
4045 struct ixl_aq_desc iaq;
4046
4047 memset(&iaq, 0, sizeof(iaq));
4048 iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
4049 iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
4050
4051 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4052 aprint_error_dev(sc->sc_dev, "STOP LLDP AGENT timeout\n");
4053 return -1;
4054 }
4055
4056 switch (iaq.iaq_retval) {
4057 case htole16(IXL_AQ_RC_EMODE):
4058 case htole16(IXL_AQ_RC_EPERM):
4059 /* ignore silently */
4060 default:
4061 break;
4062 }
4063
4064 return 0;
4065 }
4066
4067 static void
4068 ixl_parse_hw_capability(struct ixl_softc *sc, struct ixl_aq_capability *cap)
4069 {
4070 uint16_t id;
4071 uint32_t number, logical_id;
4072
4073 id = le16toh(cap->cap_id);
4074 number = le32toh(cap->number);
4075 logical_id = le32toh(cap->logical_id);
4076
4077 switch (id) {
4078 case IXL_AQ_CAP_RSS:
4079 sc->sc_rss_table_size = number;
4080 sc->sc_rss_table_entry_width = logical_id;
4081 break;
4082 case IXL_AQ_CAP_RXQ:
4083 case IXL_AQ_CAP_TXQ:
4084 sc->sc_nqueue_pairs_device = MIN(number,
4085 sc->sc_nqueue_pairs_device);
4086 break;
4087 }
4088 }
4089
4090 static int
4091 ixl_get_hw_capabilities(struct ixl_softc *sc)
4092 {
4093 struct ixl_dmamem idm;
4094 struct ixl_aq_desc iaq;
4095 struct ixl_aq_capability *caps;
4096 size_t i, ncaps;
4097 bus_size_t caps_size;
4098 uint16_t status;
4099 int rv;
4100
4101 caps_size = sizeof(caps[0]) * 40;
4102 memset(&iaq, 0, sizeof(iaq));
4103 iaq.iaq_opcode = htole16(IXL_AQ_OP_LIST_FUNC_CAP);
4104
4105 do {
4106 if (ixl_dmamem_alloc(sc, &idm, caps_size, 0) != 0) {
4107 return -1;
4108 }
4109
4110 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4111 (caps_size > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4112 iaq.iaq_datalen = htole16(caps_size);
4113 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4114
4115 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
4116 IXL_DMA_LEN(&idm), BUS_DMASYNC_PREREAD);
4117
4118 rv = ixl_atq_poll(sc, &iaq, 250);
4119
4120 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
4121 IXL_DMA_LEN(&idm), BUS_DMASYNC_POSTREAD);
4122
4123 if (rv != 0) {
4124 aprint_error(", HW capabilities timeout\n");
4125 goto done;
4126 }
4127
4128 status = le16toh(iaq.iaq_retval);
4129
4130 if (status == IXL_AQ_RC_ENOMEM) {
4131 caps_size = le16toh(iaq.iaq_datalen);
4132 ixl_dmamem_free(sc, &idm);
4133 }
4134 } while (status == IXL_AQ_RC_ENOMEM);
4135
4136 if (status != IXL_AQ_RC_OK) {
4137 aprint_error(", HW capabilities error\n");
4138 goto done;
4139 }
4140
4141 caps = IXL_DMA_KVA(&idm);
4142 ncaps = le16toh(iaq.iaq_param[1]);
4143
4144 for (i = 0; i < ncaps; i++) {
4145 ixl_parse_hw_capability(sc, &caps[i]);
4146 }
4147
4148 done:
4149 ixl_dmamem_free(sc, &idm);
4150 return rv;
4151 }
4152
4153 static int
4154 ixl_get_mac(struct ixl_softc *sc)
4155 {
4156 struct ixl_dmamem idm;
4157 struct ixl_aq_desc iaq;
4158 struct ixl_aq_mac_addresses *addrs;
4159 int rv;
4160
4161 if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
4162 aprint_error(", unable to allocate mac addresses\n");
4163 return -1;
4164 }
4165
4166 memset(&iaq, 0, sizeof(iaq));
4167 iaq.iaq_flags = htole16(IXL_AQ_BUF);
4168 iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
4169 iaq.iaq_datalen = htole16(sizeof(*addrs));
4170 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4171
4172 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4173 BUS_DMASYNC_PREREAD);
4174
4175 rv = ixl_atq_poll(sc, &iaq, 250);
4176
4177 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4178 BUS_DMASYNC_POSTREAD);
4179
4180 if (rv != 0) {
4181 aprint_error(", MAC ADDRESS READ timeout\n");
4182 rv = -1;
4183 goto done;
4184 }
4185 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4186 aprint_error(", MAC ADDRESS READ error\n");
4187 rv = -1;
4188 goto done;
4189 }
4190
4191 addrs = IXL_DMA_KVA(&idm);
4192 if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
4193 printf(", port address is not valid\n");
4194 goto done;
4195 }
4196
4197 memcpy(sc->sc_enaddr, addrs->port, ETHER_ADDR_LEN);
4198 rv = 0;
4199
4200 done:
4201 ixl_dmamem_free(sc, &idm);
4202 return rv;
4203 }
4204
4205 static int
4206 ixl_get_switch_config(struct ixl_softc *sc)
4207 {
4208 struct ixl_dmamem idm;
4209 struct ixl_aq_desc iaq;
4210 struct ixl_aq_switch_config *hdr;
4211 struct ixl_aq_switch_config_element *elms, *elm;
4212 unsigned int nelm, i;
4213 int rv;
4214
4215 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4216 aprint_error_dev(sc->sc_dev,
4217 "unable to allocate switch config buffer\n");
4218 return -1;
4219 }
4220
4221 memset(&iaq, 0, sizeof(iaq));
4222 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4223 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4224 iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
4225 iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
4226 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4227
4228 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4229 BUS_DMASYNC_PREREAD);
4230
4231 rv = ixl_atq_poll(sc, &iaq, 250);
4232
4233 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4234 BUS_DMASYNC_POSTREAD);
4235
4236 if (rv != 0) {
4237 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG timeout\n");
4238 rv = -1;
4239 goto done;
4240 }
4241 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4242 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG error\n");
4243 rv = -1;
4244 goto done;
4245 }
4246
4247 hdr = IXL_DMA_KVA(&idm);
4248 elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
4249
4250 nelm = le16toh(hdr->num_reported);
4251 if (nelm < 1) {
4252 aprint_error_dev(sc->sc_dev, "no switch config available\n");
4253 rv = -1;
4254 goto done;
4255 }
4256
4257 for (i = 0; i < nelm; i++) {
4258 elm = &elms[i];
4259
4260 aprint_debug_dev(sc->sc_dev,
4261 "type %x revision %u seid %04x\n",
4262 elm->type, elm->revision, le16toh(elm->seid));
4263 aprint_debug_dev(sc->sc_dev,
4264 "uplink %04x downlink %04x\n",
4265 le16toh(elm->uplink_seid),
4266 le16toh(elm->downlink_seid));
4267 aprint_debug_dev(sc->sc_dev,
4268 "conntype %x scheduler %04x extra %04x\n",
4269 elm->connection_type,
4270 le16toh(elm->scheduler_id),
4271 le16toh(elm->element_info));
4272 }
4273
4274 elm = &elms[0];
4275
4276 sc->sc_uplink_seid = elm->uplink_seid;
4277 sc->sc_downlink_seid = elm->downlink_seid;
4278 sc->sc_seid = elm->seid;
4279
4280 if ((sc->sc_uplink_seid == htole16(0)) !=
4281 (sc->sc_downlink_seid == htole16(0))) {
4282 aprint_error_dev(sc->sc_dev, "SEIDs are misconfigured\n");
4283 rv = -1;
4284 goto done;
4285 }
4286
4287 done:
4288 ixl_dmamem_free(sc, &idm);
4289 return rv;
4290 }
4291
4292 static int
4293 ixl_phy_mask_ints(struct ixl_softc *sc)
4294 {
4295 struct ixl_aq_desc iaq;
4296
4297 memset(&iaq, 0, sizeof(iaq));
4298 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
4299 iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
4300 ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
4301 IXL_AQ_PHY_EV_MEDIA_NA));
4302
4303 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4304 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK timeout\n");
4305 return -1;
4306 }
4307 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4308 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK error\n");
4309 return -1;
4310 }
4311
4312 return 0;
4313 }
4314
4315 static int
4316 ixl_get_phy_abilities(struct ixl_softc *sc,struct ixl_dmamem *idm)
4317 {
4318 struct ixl_aq_desc iaq;
4319 int rv;
4320
4321 memset(&iaq, 0, sizeof(iaq));
4322 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4323 (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4324 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
4325 iaq.iaq_datalen = htole16(IXL_DMA_LEN(idm));
4326 iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
4327 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
4328
4329 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4330 BUS_DMASYNC_PREREAD);
4331
4332 rv = ixl_atq_poll(sc, &iaq, 250);
4333
4334 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4335 BUS_DMASYNC_POSTREAD);
4336
4337 if (rv != 0)
4338 return -1;
4339
4340 return le16toh(iaq.iaq_retval);
4341 }
4342
4343 static int
4344 ixl_get_phy_info(struct ixl_softc *sc)
4345 {
4346 struct ixl_dmamem idm;
4347 struct ixl_aq_phy_abilities *phy;
4348 int rv;
4349
4350 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4351 aprint_error_dev(sc->sc_dev,
4352 "unable to allocate phy abilities buffer\n");
4353 return -1;
4354 }
4355
4356 rv = ixl_get_phy_abilities(sc, &idm);
4357 switch (rv) {
4358 case -1:
4359 aprint_error_dev(sc->sc_dev, "GET PHY ABILITIES timeout\n");
4360 goto done;
4361 case IXL_AQ_RC_OK:
4362 break;
4363 case IXL_AQ_RC_EIO:
4364 aprint_error_dev(sc->sc_dev,"unable to query phy types\n");
4365 goto done;
4366 default:
4367 aprint_error_dev(sc->sc_dev,
4368 "GET PHY ABILITIIES error %u\n", rv);
4369 goto done;
4370 }
4371
4372 phy = IXL_DMA_KVA(&idm);
4373
4374 sc->sc_phy_types = le32toh(phy->phy_type);
4375 sc->sc_phy_types |= (uint64_t)le32toh(phy->phy_type_ext) << 32;
4376
4377 sc->sc_phy_abilities = phy->abilities;
4378 sc->sc_phy_linkspeed = phy->link_speed;
4379 sc->sc_phy_fec_cfg = phy->fec_cfg_curr_mod_ext_info &
4380 (IXL_AQ_ENABLE_FEC_KR | IXL_AQ_ENABLE_FEC_RS |
4381 IXL_AQ_REQUEST_FEC_KR | IXL_AQ_REQUEST_FEC_RS);
4382 sc->sc_eee_cap = phy->eee_capability;
4383 sc->sc_eeer_val = phy->eeer_val;
4384 sc->sc_d3_lpan = phy->d3_lpan;
4385
4386 rv = 0;
4387
4388 done:
4389 ixl_dmamem_free(sc, &idm);
4390 return rv;
4391 }
4392
4393 static int
4394 ixl_set_phy_config(struct ixl_softc *sc,
4395 uint8_t link_speed, uint8_t abilities, bool polling)
4396 {
4397 struct ixl_aq_phy_param *param;
4398 struct ixl_atq iatq;
4399 struct ixl_aq_desc *iaq;
4400 int error;
4401
4402 memset(&iatq, 0, sizeof(iatq));
4403
4404 iaq = &iatq.iatq_desc;
4405 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_CONFIG);
4406 param = (struct ixl_aq_phy_param *)&iaq->iaq_param;
4407 param->phy_types = htole32((uint32_t)sc->sc_phy_types);
4408 param->phy_type_ext = (uint8_t)(sc->sc_phy_types >> 32);
4409 param->link_speed = link_speed;
4410 param->abilities = abilities | IXL_AQ_PHY_ABILITY_AUTO_LINK;
4411 param->fec_cfg = sc->sc_phy_fec_cfg;
4412 param->eee_capability = sc->sc_eee_cap;
4413 param->eeer_val = sc->sc_eeer_val;
4414 param->d3_lpan = sc->sc_d3_lpan;
4415
4416 if (polling)
4417 error = ixl_atq_poll(sc, iaq, 250);
4418 else
4419 error = ixl_atq_exec(sc, &iatq);
4420
4421 if (error != 0)
4422 return error;
4423
4424 switch (le16toh(iaq->iaq_retval)) {
4425 case IXL_AQ_RC_OK:
4426 break;
4427 case IXL_AQ_RC_EPERM:
4428 return EPERM;
4429 default:
4430 return EIO;
4431 }
4432
4433 return 0;
4434 }
4435
4436 static int
4437 ixl_set_phy_autoselect(struct ixl_softc *sc)
4438 {
4439 uint8_t link_speed, abilities;
4440
4441 link_speed = sc->sc_phy_linkspeed;
4442 abilities = IXL_PHY_ABILITY_LINKUP | IXL_PHY_ABILITY_AUTONEGO;
4443
4444 return ixl_set_phy_config(sc, link_speed, abilities, true);
4445 }
4446
4447 static int
4448 ixl_get_link_status_poll(struct ixl_softc *sc, int *l)
4449 {
4450 struct ixl_aq_desc iaq;
4451 struct ixl_aq_link_param *param;
4452 int link;
4453
4454 memset(&iaq, 0, sizeof(iaq));
4455 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
4456 param = (struct ixl_aq_link_param *)iaq.iaq_param;
4457 param->notify = IXL_AQ_LINK_NOTIFY;
4458
4459 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4460 return ETIMEDOUT;
4461 }
4462 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4463 return EIO;
4464 }
4465
4466 link = ixl_set_link_status(sc, &iaq);
4467
4468 if (l != NULL)
4469 *l = link;
4470
4471 return 0;
4472 }
4473
4474 static int
4475 ixl_get_vsi(struct ixl_softc *sc)
4476 {
4477 struct ixl_dmamem *vsi = &sc->sc_scratch;
4478 struct ixl_aq_desc iaq;
4479 struct ixl_aq_vsi_param *param;
4480 struct ixl_aq_vsi_reply *reply;
4481 struct ixl_aq_vsi_data *data;
4482 int rv;
4483
4484 /* grumble, vsi info isn't "known" at compile time */
4485
4486 memset(&iaq, 0, sizeof(iaq));
4487 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4488 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4489 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
4490 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4491 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4492
4493 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4494 param->uplink_seid = sc->sc_seid;
4495
4496 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4497 BUS_DMASYNC_PREREAD);
4498
4499 rv = ixl_atq_poll(sc, &iaq, 250);
4500
4501 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4502 BUS_DMASYNC_POSTREAD);
4503
4504 if (rv != 0) {
4505 return ETIMEDOUT;
4506 }
4507
4508 switch (le16toh(iaq.iaq_retval)) {
4509 case IXL_AQ_RC_OK:
4510 break;
4511 case IXL_AQ_RC_ENOENT:
4512 return ENOENT;
4513 case IXL_AQ_RC_EACCES:
4514 return EACCES;
4515 default:
4516 return EIO;
4517 }
4518
4519 reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
4520 sc->sc_vsi_number = le16toh(reply->vsi_number);
4521 data = IXL_DMA_KVA(vsi);
4522 sc->sc_vsi_stat_counter_idx = le16toh(data->stat_counter_idx);
4523
4524 return 0;
4525 }
4526
4527 static int
4528 ixl_set_vsi(struct ixl_softc *sc)
4529 {
4530 struct ixl_dmamem *vsi = &sc->sc_scratch;
4531 struct ixl_aq_desc iaq;
4532 struct ixl_aq_vsi_param *param;
4533 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
4534 unsigned int qnum;
4535 uint16_t val;
4536 int rv;
4537
4538 qnum = sc->sc_nqueue_pairs - 1;
4539
4540 data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
4541 IXL_AQ_VSI_VALID_VLAN);
4542
4543 CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
4544 SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
4545 data->queue_mapping[0] = htole16(0);
4546 data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
4547 (qnum << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
4548
4549 val = le16toh(data->port_vlan_flags);
4550 CLR(val, IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK);
4551 SET(val, IXL_AQ_VSI_PVLAN_MODE_ALL);
4552
4553 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWTAGGING)) {
4554 SET(val, IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH);
4555 } else {
4556 SET(val, IXL_AQ_VSI_PVLAN_EMOD_NOTHING);
4557 }
4558
4559 data->port_vlan_flags = htole16(val);
4560
4561 /* grumble, vsi info isn't "known" at compile time */
4562
4563 memset(&iaq, 0, sizeof(iaq));
4564 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4565 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4566 iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
4567 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4568 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4569
4570 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4571 param->uplink_seid = sc->sc_seid;
4572
4573 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4574 BUS_DMASYNC_PREWRITE);
4575
4576 rv = ixl_atq_poll(sc, &iaq, 250);
4577
4578 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4579 BUS_DMASYNC_POSTWRITE);
4580
4581 if (rv != 0) {
4582 return ETIMEDOUT;
4583 }
4584
4585 switch (le16toh(iaq.iaq_retval)) {
4586 case IXL_AQ_RC_OK:
4587 break;
4588 case IXL_AQ_RC_ENOENT:
4589 return ENOENT;
4590 case IXL_AQ_RC_EACCES:
4591 return EACCES;
4592 default:
4593 return EIO;
4594 }
4595
4596 return 0;
4597 }
4598
4599 static void
4600 ixl_set_filter_control(struct ixl_softc *sc)
4601 {
4602 uint32_t reg;
4603
4604 reg = ixl_rd_rx_csr(sc, I40E_PFQF_CTL_0);
4605
4606 CLR(reg, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK);
4607 SET(reg, I40E_HASH_LUT_SIZE_128 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT);
4608
4609 SET(reg, I40E_PFQF_CTL_0_FD_ENA_MASK);
4610 SET(reg, I40E_PFQF_CTL_0_ETYPE_ENA_MASK);
4611 SET(reg, I40E_PFQF_CTL_0_MACVLAN_ENA_MASK);
4612
4613 ixl_wr_rx_csr(sc, I40E_PFQF_CTL_0, reg);
4614 }
4615
4616 static inline void
4617 ixl_get_default_rss_key(uint32_t *buf, size_t len)
4618 {
4619 size_t cplen;
4620 uint8_t rss_seed[RSS_KEYSIZE];
4621
4622 rss_getkey(rss_seed);
4623 memset(buf, 0, len);
4624
4625 cplen = MIN(len, sizeof(rss_seed));
4626 memcpy(buf, rss_seed, cplen);
4627 }
4628
4629 static int
4630 ixl_set_rss_key(struct ixl_softc *sc, uint8_t *key, size_t keylen)
4631 {
4632 struct ixl_dmamem *idm;
4633 struct ixl_atq iatq;
4634 struct ixl_aq_desc *iaq;
4635 struct ixl_aq_rss_key_param *param;
4636 struct ixl_aq_rss_key_data *data;
4637 size_t len, datalen, stdlen, extlen;
4638 uint16_t vsi_id;
4639 int rv;
4640
4641 memset(&iatq, 0, sizeof(iatq));
4642 iaq = &iatq.iatq_desc;
4643 idm = &sc->sc_aqbuf;
4644
4645 datalen = sizeof(*data);
4646
4647 /*XXX The buf size has to be less than the size of the register */
4648 datalen = MIN(IXL_RSS_KEY_SIZE_REG * sizeof(uint32_t), datalen);
4649
4650 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4651 (datalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4652 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_KEY);
4653 iaq->iaq_datalen = htole16(datalen);
4654
4655 param = (struct ixl_aq_rss_key_param *)iaq->iaq_param;
4656 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSKEY_VSI_ID_SHIFT) |
4657 IXL_AQ_RSSKEY_VSI_VALID;
4658 param->vsi_id = htole16(vsi_id);
4659
4660 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
4661 data = IXL_DMA_KVA(idm);
4662
4663 len = MIN(keylen, datalen);
4664 stdlen = MIN(sizeof(data->standard_rss_key), len);
4665 memcpy(data->standard_rss_key, key, stdlen);
4666 len = (len > stdlen) ? (len - stdlen) : 0;
4667
4668 extlen = MIN(sizeof(data->extended_hash_key), len);
4669 extlen = (stdlen < keylen) ? 0 : keylen - stdlen;
4670 memcpy(data->extended_hash_key, key + stdlen, extlen);
4671
4672 ixl_aq_dva(iaq, IXL_DMA_DVA(idm));
4673
4674 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4675 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE);
4676
4677 rv = ixl_atq_exec(sc, &iatq);
4678
4679 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4680 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE);
4681
4682 if (rv != 0) {
4683 return ETIMEDOUT;
4684 }
4685
4686 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) {
4687 return EIO;
4688 }
4689
4690 return 0;
4691 }
4692
4693 static int
4694 ixl_set_rss_lut(struct ixl_softc *sc, uint8_t *lut, size_t lutlen)
4695 {
4696 struct ixl_dmamem *idm;
4697 struct ixl_atq iatq;
4698 struct ixl_aq_desc *iaq;
4699 struct ixl_aq_rss_lut_param *param;
4700 uint16_t vsi_id;
4701 uint8_t *data;
4702 size_t dmalen;
4703 int rv;
4704
4705 memset(&iatq, 0, sizeof(iatq));
4706 iaq = &iatq.iatq_desc;
4707 idm = &sc->sc_aqbuf;
4708
4709 dmalen = MIN(lutlen, IXL_DMA_LEN(idm));
4710
4711 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4712 (dmalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4713 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_LUT);
4714 iaq->iaq_datalen = htole16(dmalen);
4715
4716 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
4717 data = IXL_DMA_KVA(idm);
4718 memcpy(data, lut, dmalen);
4719 ixl_aq_dva(iaq, IXL_DMA_DVA(idm));
4720
4721 param = (struct ixl_aq_rss_lut_param *)iaq->iaq_param;
4722 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSLUT_VSI_ID_SHIFT) |
4723 IXL_AQ_RSSLUT_VSI_VALID;
4724 param->vsi_id = htole16(vsi_id);
4725 param->flags = htole16(IXL_AQ_RSSLUT_TABLE_TYPE_PF <<
4726 IXL_AQ_RSSLUT_TABLE_TYPE_SHIFT);
4727
4728 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4729 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE);
4730
4731 rv = ixl_atq_exec(sc, &iatq);
4732
4733 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4734 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE);
4735
4736 if (rv != 0) {
4737 return ETIMEDOUT;
4738 }
4739
4740 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) {
4741 return EIO;
4742 }
4743
4744 return 0;
4745 }
4746
4747 static int
4748 ixl_register_rss_key(struct ixl_softc *sc)
4749 {
4750 uint32_t rss_seed[IXL_RSS_KEY_SIZE_REG];
4751 int rv;
4752 size_t i;
4753
4754 ixl_get_default_rss_key(rss_seed, sizeof(rss_seed));
4755
4756 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)){
4757 rv = ixl_set_rss_key(sc, (uint8_t*)rss_seed,
4758 sizeof(rss_seed));
4759 } else {
4760 rv = 0;
4761 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4762 ixl_wr_rx_csr(sc, I40E_PFQF_HKEY(i), rss_seed[i]);
4763 }
4764 }
4765
4766 return rv;
4767 }
4768
4769 static void
4770 ixl_register_rss_pctype(struct ixl_softc *sc)
4771 {
4772 uint64_t set_hena = 0;
4773 uint32_t hena0, hena1;
4774
4775 if (sc->sc_mac_type == I40E_MAC_X722)
4776 set_hena = IXL_RSS_HENA_DEFAULT_X722;
4777 else
4778 set_hena = IXL_RSS_HENA_DEFAULT_XL710;
4779
4780 hena0 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(0));
4781 hena1 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(1));
4782
4783 SET(hena0, set_hena);
4784 SET(hena1, set_hena >> 32);
4785
4786 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(0), hena0);
4787 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(1), hena1);
4788 }
4789
4790 static int
4791 ixl_register_rss_hlut(struct ixl_softc *sc)
4792 {
4793 unsigned int qid;
4794 uint8_t hlut_buf[512], lut_mask;
4795 uint32_t *hluts;
4796 size_t i, hluts_num;
4797 int rv;
4798
4799 lut_mask = (0x01 << sc->sc_rss_table_entry_width) - 1;
4800
4801 for (i = 0; i < sc->sc_rss_table_size; i++) {
4802 qid = i % sc->sc_nqueue_pairs;
4803 hlut_buf[i] = qid & lut_mask;
4804 }
4805
4806 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)) {
4807 rv = ixl_set_rss_lut(sc, hlut_buf, sizeof(hlut_buf));
4808 } else {
4809 rv = 0;
4810 hluts = (uint32_t *)hlut_buf;
4811 hluts_num = sc->sc_rss_table_size >> 2;
4812 for (i = 0; i < hluts_num; i++) {
4813 ixl_wr(sc, I40E_PFQF_HLUT(i), hluts[i]);
4814 }
4815 ixl_flush(sc);
4816 }
4817
4818 return rv;
4819 }
4820
4821 static void
4822 ixl_config_rss(struct ixl_softc *sc)
4823 {
4824
4825 KASSERT(mutex_owned(&sc->sc_cfg_lock));
4826
4827 ixl_register_rss_key(sc);
4828 ixl_register_rss_pctype(sc);
4829 ixl_register_rss_hlut(sc);
4830 }
4831
4832 static const struct ixl_phy_type *
4833 ixl_search_phy_type(uint8_t phy_type)
4834 {
4835 const struct ixl_phy_type *itype;
4836 uint64_t mask;
4837 unsigned int i;
4838
4839 if (phy_type >= 64)
4840 return NULL;
4841
4842 mask = 1ULL << phy_type;
4843
4844 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
4845 itype = &ixl_phy_type_map[i];
4846
4847 if (ISSET(itype->phy_type, mask))
4848 return itype;
4849 }
4850
4851 return NULL;
4852 }
4853
4854 static uint64_t
4855 ixl_search_link_speed(uint8_t link_speed)
4856 {
4857 const struct ixl_speed_type *type;
4858 unsigned int i;
4859
4860 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4861 type = &ixl_speed_type_map[i];
4862
4863 if (ISSET(type->dev_speed, link_speed))
4864 return type->net_speed;
4865 }
4866
4867 return 0;
4868 }
4869
4870 static uint8_t
4871 ixl_search_baudrate(uint64_t baudrate)
4872 {
4873 const struct ixl_speed_type *type;
4874 unsigned int i;
4875
4876 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4877 type = &ixl_speed_type_map[i];
4878
4879 if (type->net_speed == baudrate) {
4880 return type->dev_speed;
4881 }
4882 }
4883
4884 return 0;
4885 }
4886
4887 static int
4888 ixl_restart_an(struct ixl_softc *sc)
4889 {
4890 struct ixl_aq_desc iaq;
4891
4892 memset(&iaq, 0, sizeof(iaq));
4893 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
4894 iaq.iaq_param[0] =
4895 htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
4896
4897 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4898 aprint_error_dev(sc->sc_dev, "RESTART AN timeout\n");
4899 return -1;
4900 }
4901 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4902 aprint_error_dev(sc->sc_dev, "RESTART AN error\n");
4903 return -1;
4904 }
4905
4906 return 0;
4907 }
4908
4909 static int
4910 ixl_add_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4911 uint16_t vlan, uint16_t flags)
4912 {
4913 struct ixl_aq_desc iaq;
4914 struct ixl_aq_add_macvlan *param;
4915 struct ixl_aq_add_macvlan_elem *elem;
4916
4917 memset(&iaq, 0, sizeof(iaq));
4918 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4919 iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN);
4920 iaq.iaq_datalen = htole16(sizeof(*elem));
4921 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4922
4923 param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param;
4924 param->num_addrs = htole16(1);
4925 param->seid0 = htole16(0x8000) | sc->sc_seid;
4926 param->seid1 = 0;
4927 param->seid2 = 0;
4928
4929 elem = IXL_DMA_KVA(&sc->sc_scratch);
4930 memset(elem, 0, sizeof(*elem));
4931 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4932 elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags);
4933 elem->vlan = htole16(vlan);
4934
4935 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4936 return IXL_AQ_RC_EINVAL;
4937 }
4938
4939 switch (le16toh(iaq.iaq_retval)) {
4940 case IXL_AQ_RC_OK:
4941 break;
4942 case IXL_AQ_RC_ENOSPC:
4943 return ENOSPC;
4944 case IXL_AQ_RC_ENOENT:
4945 return ENOENT;
4946 case IXL_AQ_RC_EACCES:
4947 return EACCES;
4948 case IXL_AQ_RC_EEXIST:
4949 return EEXIST;
4950 case IXL_AQ_RC_EINVAL:
4951 return EINVAL;
4952 default:
4953 return EIO;
4954 }
4955
4956 return 0;
4957 }
4958
4959 static int
4960 ixl_remove_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4961 uint16_t vlan, uint16_t flags)
4962 {
4963 struct ixl_aq_desc iaq;
4964 struct ixl_aq_remove_macvlan *param;
4965 struct ixl_aq_remove_macvlan_elem *elem;
4966
4967 memset(&iaq, 0, sizeof(iaq));
4968 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4969 iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN);
4970 iaq.iaq_datalen = htole16(sizeof(*elem));
4971 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4972
4973 param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param;
4974 param->num_addrs = htole16(1);
4975 param->seid0 = htole16(0x8000) | sc->sc_seid;
4976 param->seid1 = 0;
4977 param->seid2 = 0;
4978
4979 elem = IXL_DMA_KVA(&sc->sc_scratch);
4980 memset(elem, 0, sizeof(*elem));
4981 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4982 elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags);
4983 elem->vlan = htole16(vlan);
4984
4985 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4986 return EINVAL;
4987 }
4988
4989 switch (le16toh(iaq.iaq_retval)) {
4990 case IXL_AQ_RC_OK:
4991 break;
4992 case IXL_AQ_RC_ENOENT:
4993 return ENOENT;
4994 case IXL_AQ_RC_EACCES:
4995 return EACCES;
4996 case IXL_AQ_RC_EINVAL:
4997 return EINVAL;
4998 default:
4999 return EIO;
5000 }
5001
5002 return 0;
5003 }
5004
5005 static int
5006 ixl_hmc(struct ixl_softc *sc)
5007 {
5008 struct {
5009 uint32_t count;
5010 uint32_t minsize;
5011 bus_size_t objsiz;
5012 bus_size_t setoff;
5013 bus_size_t setcnt;
5014 } regs[] = {
5015 {
5016 0,
5017 IXL_HMC_TXQ_MINSIZE,
5018 I40E_GLHMC_LANTXOBJSZ,
5019 I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
5020 I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
5021 },
5022 {
5023 0,
5024 IXL_HMC_RXQ_MINSIZE,
5025 I40E_GLHMC_LANRXOBJSZ,
5026 I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
5027 I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
5028 },
5029 {
5030 0,
5031 0,
5032 I40E_GLHMC_FCOEDDPOBJSZ,
5033 I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
5034 I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
5035 },
5036 {
5037 0,
5038 0,
5039 I40E_GLHMC_FCOEFOBJSZ,
5040 I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
5041 I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
5042 },
5043 };
5044 struct ixl_hmc_entry *e;
5045 uint64_t size, dva;
5046 uint8_t *kva;
5047 uint64_t *sdpage;
5048 unsigned int i;
5049 int npages, tables;
5050 uint32_t reg;
5051
5052 CTASSERT(__arraycount(regs) <= __arraycount(sc->sc_hmc_entries));
5053
5054 regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
5055 ixl_rd(sc, I40E_GLHMC_LANQMAX);
5056
5057 size = 0;
5058 for (i = 0; i < __arraycount(regs); i++) {
5059 e = &sc->sc_hmc_entries[i];
5060
5061 e->hmc_count = regs[i].count;
5062 reg = ixl_rd(sc, regs[i].objsiz);
5063 e->hmc_size = BIT_ULL(0x3F & reg);
5064 e->hmc_base = size;
5065
5066 if ((e->hmc_size * 8) < regs[i].minsize) {
5067 aprint_error_dev(sc->sc_dev,
5068 "kernel hmc entry is too big\n");
5069 return -1;
5070 }
5071
5072 size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
5073 }
5074 size = roundup(size, IXL_HMC_PGSIZE);
5075 npages = size / IXL_HMC_PGSIZE;
5076
5077 tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
5078
5079 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
5080 aprint_error_dev(sc->sc_dev,
5081 "unable to allocate hmc pd memory\n");
5082 return -1;
5083 }
5084
5085 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
5086 IXL_HMC_PGSIZE) != 0) {
5087 aprint_error_dev(sc->sc_dev,
5088 "unable to allocate hmc sd memory\n");
5089 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
5090 return -1;
5091 }
5092
5093 kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
5094 memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
5095
5096 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
5097 0, IXL_DMA_LEN(&sc->sc_hmc_pd),
5098 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5099
5100 dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
5101 sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
5102 memset(sdpage, 0, IXL_DMA_LEN(&sc->sc_hmc_sd));
5103
5104 for (i = 0; (int)i < npages; i++) {
5105 *sdpage = htole64(dva | IXL_HMC_PDVALID);
5106 sdpage++;
5107
5108 dva += IXL_HMC_PGSIZE;
5109 }
5110
5111 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
5112 0, IXL_DMA_LEN(&sc->sc_hmc_sd),
5113 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5114
5115 dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
5116 for (i = 0; (int)i < tables; i++) {
5117 uint32_t count;
5118
5119 KASSERT(npages >= 0);
5120
5121 count = ((unsigned int)npages > IXL_HMC_PGS) ?
5122 IXL_HMC_PGS : (unsigned int)npages;
5123
5124 ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
5125 ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
5126 (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
5127 (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
5128 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
5129 ixl_wr(sc, I40E_PFHMC_SDCMD,
5130 (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
5131
5132 npages -= IXL_HMC_PGS;
5133 dva += IXL_HMC_PGSIZE;
5134 }
5135
5136 for (i = 0; i < __arraycount(regs); i++) {
5137 e = &sc->sc_hmc_entries[i];
5138
5139 ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
5140 ixl_wr(sc, regs[i].setcnt, e->hmc_count);
5141 }
5142
5143 return 0;
5144 }
5145
5146 static void
5147 ixl_hmc_free(struct ixl_softc *sc)
5148 {
5149 ixl_dmamem_free(sc, &sc->sc_hmc_sd);
5150 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
5151 }
5152
5153 static void
5154 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
5155 unsigned int npacking)
5156 {
5157 uint8_t *dst = d;
5158 const uint8_t *src = s;
5159 unsigned int i;
5160
5161 for (i = 0; i < npacking; i++) {
5162 const struct ixl_hmc_pack *pack = &packing[i];
5163 unsigned int offset = pack->lsb / 8;
5164 unsigned int align = pack->lsb % 8;
5165 const uint8_t *in = src + pack->offset;
5166 uint8_t *out = dst + offset;
5167 int width = pack->width;
5168 unsigned int inbits = 0;
5169
5170 if (align) {
5171 inbits = (*in++) << align;
5172 *out++ |= (inbits & 0xff);
5173 inbits >>= 8;
5174
5175 width -= 8 - align;
5176 }
5177
5178 while (width >= 8) {
5179 inbits |= (*in++) << align;
5180 *out++ = (inbits & 0xff);
5181 inbits >>= 8;
5182
5183 width -= 8;
5184 }
5185
5186 if (width > 0) {
5187 inbits |= (*in) << align;
5188 *out |= (inbits & ((1 << width) - 1));
5189 }
5190 }
5191 }
5192
5193 static struct ixl_aq_buf *
5194 ixl_aqb_alloc(struct ixl_softc *sc)
5195 {
5196 struct ixl_aq_buf *aqb;
5197
5198 aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK);
5199 if (aqb == NULL)
5200 return NULL;
5201
5202 aqb->aqb_size = IXL_AQ_BUFLEN;
5203
5204 if (bus_dmamap_create(sc->sc_dmat, aqb->aqb_size, 1,
5205 aqb->aqb_size, 0,
5206 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0)
5207 goto free;
5208 if (bus_dmamem_alloc(sc->sc_dmat, aqb->aqb_size,
5209 IXL_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs,
5210 BUS_DMA_WAITOK) != 0)
5211 goto destroy;
5212 if (bus_dmamem_map(sc->sc_dmat, &aqb->aqb_seg, aqb->aqb_nsegs,
5213 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0)
5214 goto dma_free;
5215 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
5216 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0)
5217 goto unmap;
5218
5219 return aqb;
5220 unmap:
5221 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
5222 dma_free:
5223 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
5224 destroy:
5225 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
5226 free:
5227 free(aqb, M_DEVBUF);
5228
5229 return NULL;
5230 }
5231
5232 static void
5233 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
5234 {
5235 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
5236 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
5237 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
5238 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
5239 free(aqb, M_DEVBUF);
5240 }
5241
5242 static int
5243 ixl_arq_fill(struct ixl_softc *sc)
5244 {
5245 struct ixl_aq_buf *aqb;
5246 struct ixl_aq_desc *arq, *iaq;
5247 unsigned int prod = sc->sc_arq_prod;
5248 unsigned int n;
5249 int post = 0;
5250
5251 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
5252 IXL_AQ_NUM);
5253 arq = IXL_DMA_KVA(&sc->sc_arq);
5254
5255 if (__predict_false(n <= 0))
5256 return 0;
5257
5258 do {
5259 aqb = sc->sc_arq_live[prod];
5260 iaq = &arq[prod];
5261
5262 if (aqb == NULL) {
5263 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
5264 if (aqb != NULL) {
5265 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
5266 ixl_aq_buf, aqb_entry);
5267 } else if ((aqb = ixl_aqb_alloc(sc)) == NULL) {
5268 break;
5269 }
5270
5271 sc->sc_arq_live[prod] = aqb;
5272 memset(aqb->aqb_data, 0, aqb->aqb_size);
5273
5274 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
5275 aqb->aqb_size, BUS_DMASYNC_PREREAD);
5276
5277 iaq->iaq_flags = htole16(IXL_AQ_BUF |
5278 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ?
5279 IXL_AQ_LB : 0));
5280 iaq->iaq_opcode = 0;
5281 iaq->iaq_datalen = htole16(aqb->aqb_size);
5282 iaq->iaq_retval = 0;
5283 iaq->iaq_cookie = 0;
5284 iaq->iaq_param[0] = 0;
5285 iaq->iaq_param[1] = 0;
5286 ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
5287 }
5288
5289 prod++;
5290 prod &= IXL_AQ_MASK;
5291
5292 post = 1;
5293
5294 } while (--n);
5295
5296 if (post) {
5297 sc->sc_arq_prod = prod;
5298 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
5299 }
5300
5301 return post;
5302 }
5303
5304 static void
5305 ixl_arq_unfill(struct ixl_softc *sc)
5306 {
5307 struct ixl_aq_buf *aqb;
5308 unsigned int i;
5309
5310 for (i = 0; i < __arraycount(sc->sc_arq_live); i++) {
5311 aqb = sc->sc_arq_live[i];
5312 if (aqb == NULL)
5313 continue;
5314
5315 sc->sc_arq_live[i] = NULL;
5316 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
5317 BUS_DMASYNC_POSTREAD);
5318 ixl_aqb_free(sc, aqb);
5319 }
5320
5321 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle)) != NULL) {
5322 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
5323 ixl_aq_buf, aqb_entry);
5324 ixl_aqb_free(sc, aqb);
5325 }
5326 }
5327
5328 static void
5329 ixl_clear_hw(struct ixl_softc *sc)
5330 {
5331 uint32_t num_queues, base_queue;
5332 uint32_t num_pf_int;
5333 uint32_t num_vf_int;
5334 uint32_t num_vfs;
5335 uint32_t i, j;
5336 uint32_t val;
5337 uint32_t eol = 0x7ff;
5338
5339 /* get number of interrupts, queues, and vfs */
5340 val = ixl_rd(sc, I40E_GLPCI_CNF2);
5341 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
5342 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
5343 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
5344 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
5345
5346 val = ixl_rd(sc, I40E_PFLAN_QALLOC);
5347 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
5348 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
5349 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
5350 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
5351 if (val & I40E_PFLAN_QALLOC_VALID_MASK)
5352 num_queues = (j - base_queue) + 1;
5353 else
5354 num_queues = 0;
5355
5356 val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
5357 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
5358 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
5359 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
5360 I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
5361 if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
5362 num_vfs = (j - i) + 1;
5363 else
5364 num_vfs = 0;
5365
5366 /* stop all the interrupts */
5367 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5368 ixl_flush(sc);
5369 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
5370 for (i = 0; i < num_pf_int - 2; i++)
5371 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
5372 ixl_flush(sc);
5373
5374 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
5375 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5376 ixl_wr(sc, I40E_PFINT_LNKLST0, val);
5377 for (i = 0; i < num_pf_int - 2; i++)
5378 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
5379 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5380 for (i = 0; i < num_vfs; i++)
5381 ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
5382 for (i = 0; i < num_vf_int - 2; i++)
5383 ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
5384
5385 /* warn the HW of the coming Tx disables */
5386 for (i = 0; i < num_queues; i++) {
5387 uint32_t abs_queue_idx = base_queue + i;
5388 uint32_t reg_block = 0;
5389
5390 if (abs_queue_idx >= 128) {
5391 reg_block = abs_queue_idx / 128;
5392 abs_queue_idx %= 128;
5393 }
5394
5395 val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
5396 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
5397 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
5398 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
5399
5400 ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
5401 }
5402 delaymsec(400);
5403
5404 /* stop all the queues */
5405 for (i = 0; i < num_queues; i++) {
5406 ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
5407 ixl_wr(sc, I40E_QTX_ENA(i), 0);
5408 ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
5409 ixl_wr(sc, I40E_QRX_ENA(i), 0);
5410 }
5411
5412 /* short wait for all queue disables to settle */
5413 delaymsec(50);
5414 }
5415
5416 static int
5417 ixl_pf_reset(struct ixl_softc *sc)
5418 {
5419 uint32_t cnt = 0;
5420 uint32_t cnt1 = 0;
5421 uint32_t reg = 0, reg0 = 0;
5422 uint32_t grst_del;
5423
5424 /*
5425 * Poll for Global Reset steady state in case of recent GRST.
5426 * The grst delay value is in 100ms units, and we'll wait a
5427 * couple counts longer to be sure we don't just miss the end.
5428 */
5429 grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
5430 grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
5431 grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
5432
5433 grst_del = grst_del * 20;
5434
5435 for (cnt = 0; cnt < grst_del; cnt++) {
5436 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
5437 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
5438 break;
5439 delaymsec(100);
5440 }
5441 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5442 aprint_error(", Global reset polling failed to complete\n");
5443 return -1;
5444 }
5445
5446 /* Now Wait for the FW to be ready */
5447 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
5448 reg = ixl_rd(sc, I40E_GLNVM_ULD);
5449 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5450 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
5451 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5452 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
5453 break;
5454
5455 delaymsec(10);
5456 }
5457 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5458 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
5459 aprint_error(", wait for FW Reset complete timed out "
5460 "(I40E_GLNVM_ULD = 0x%x)\n", reg);
5461 return -1;
5462 }
5463
5464 /*
5465 * If there was a Global Reset in progress when we got here,
5466 * we don't need to do the PF Reset
5467 */
5468 if (cnt == 0) {
5469 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5470 ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
5471 for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
5472 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5473 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
5474 break;
5475 delaymsec(1);
5476
5477 reg0 = ixl_rd(sc, I40E_GLGEN_RSTAT);
5478 if (reg0 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5479 aprint_error(", Core reset upcoming."
5480 " Skipping PF reset reset request\n");
5481 return -1;
5482 }
5483 }
5484 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
5485 aprint_error(", PF reset polling failed to complete"
5486 "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
5487 return -1;
5488 }
5489 }
5490
5491 return 0;
5492 }
5493
5494 static int
5495 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
5496 bus_size_t size, bus_size_t align)
5497 {
5498 ixm->ixm_size = size;
5499
5500 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
5501 ixm->ixm_size, 0,
5502 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
5503 &ixm->ixm_map) != 0)
5504 return 1;
5505 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
5506 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
5507 BUS_DMA_WAITOK) != 0)
5508 goto destroy;
5509 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
5510 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
5511 goto free;
5512 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
5513 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
5514 goto unmap;
5515
5516 memset(ixm->ixm_kva, 0, ixm->ixm_size);
5517
5518 return 0;
5519 unmap:
5520 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5521 free:
5522 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5523 destroy:
5524 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5525 return 1;
5526 }
5527
5528 static void
5529 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
5530 {
5531 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
5532 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5533 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5534 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5535 }
5536
5537 static int
5538 ixl_setup_vlan_hwfilter(struct ixl_softc *sc)
5539 {
5540 struct ethercom *ec = &sc->sc_ec;
5541 struct vlanid_list *vlanidp;
5542 int rv;
5543
5544 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5545 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5546 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5547 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5548
5549 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5550 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5551 if (rv != 0)
5552 return rv;
5553 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5554 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5555 if (rv != 0)
5556 return rv;
5557
5558 ETHER_LOCK(ec);
5559 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5560 rv = ixl_add_macvlan(sc, sc->sc_enaddr,
5561 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5562 if (rv != 0)
5563 break;
5564 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
5565 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5566 if (rv != 0)
5567 break;
5568 }
5569 ETHER_UNLOCK(ec);
5570
5571 return rv;
5572 }
5573
5574 static void
5575 ixl_teardown_vlan_hwfilter(struct ixl_softc *sc)
5576 {
5577 struct vlanid_list *vlanidp;
5578 struct ethercom *ec = &sc->sc_ec;
5579
5580 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5581 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5582 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5583 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5584
5585 ETHER_LOCK(ec);
5586 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5587 ixl_remove_macvlan(sc, sc->sc_enaddr,
5588 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5589 ixl_remove_macvlan(sc, etherbroadcastaddr,
5590 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5591 }
5592 ETHER_UNLOCK(ec);
5593
5594 ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5595 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5596 ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5597 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5598 }
5599
5600 static int
5601 ixl_update_macvlan(struct ixl_softc *sc)
5602 {
5603 int rv = 0;
5604 int next_ec_capenable = sc->sc_ec.ec_capenable;
5605
5606 if (ISSET(next_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
5607 rv = ixl_setup_vlan_hwfilter(sc);
5608 if (rv != 0)
5609 ixl_teardown_vlan_hwfilter(sc);
5610 } else {
5611 ixl_teardown_vlan_hwfilter(sc);
5612 }
5613
5614 return rv;
5615 }
5616
5617 static int
5618 ixl_ifflags_cb(struct ethercom *ec)
5619 {
5620 struct ifnet *ifp = &ec->ec_if;
5621 struct ixl_softc *sc = ifp->if_softc;
5622 int rv, change;
5623
5624 mutex_enter(&sc->sc_cfg_lock);
5625
5626 change = ec->ec_capenable ^ sc->sc_cur_ec_capenable;
5627
5628 if (ISSET(change, ETHERCAP_VLAN_HWTAGGING)) {
5629 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
5630 rv = ENETRESET;
5631 goto out;
5632 }
5633
5634 if (ISSET(change, ETHERCAP_VLAN_HWFILTER)) {
5635 rv = ixl_update_macvlan(sc);
5636 if (rv == 0) {
5637 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
5638 } else {
5639 CLR(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER);
5640 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
5641 }
5642 }
5643
5644 rv = ixl_iff(sc);
5645 out:
5646 mutex_exit(&sc->sc_cfg_lock);
5647
5648 return rv;
5649 }
5650
5651 static int
5652 ixl_set_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
5653 {
5654 const struct ixl_aq_link_status *status;
5655 const struct ixl_phy_type *itype;
5656
5657 uint64_t ifm_active = IFM_ETHER;
5658 uint64_t ifm_status = IFM_AVALID;
5659 int link_state = LINK_STATE_DOWN;
5660 uint64_t baudrate = 0;
5661
5662 status = (const struct ixl_aq_link_status *)iaq->iaq_param;
5663 if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION)) {
5664 ifm_active |= IFM_NONE;
5665 goto done;
5666 }
5667
5668 ifm_active |= IFM_FDX;
5669 ifm_status |= IFM_ACTIVE;
5670 link_state = LINK_STATE_UP;
5671
5672 itype = ixl_search_phy_type(status->phy_type);
5673 if (itype != NULL)
5674 ifm_active |= itype->ifm_type;
5675
5676 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
5677 ifm_active |= IFM_ETH_TXPAUSE;
5678 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
5679 ifm_active |= IFM_ETH_RXPAUSE;
5680
5681 baudrate = ixl_search_link_speed(status->link_speed);
5682
5683 done:
5684 /* NET_ASSERT_LOCKED() except during attach */
5685 sc->sc_media_active = ifm_active;
5686 sc->sc_media_status = ifm_status;
5687
5688 sc->sc_ec.ec_if.if_baudrate = baudrate;
5689
5690 return link_state;
5691 }
5692
5693 static int
5694 ixl_establish_intx(struct ixl_softc *sc)
5695 {
5696 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5697 pci_intr_handle_t *intr;
5698 char xnamebuf[32];
5699 char intrbuf[PCI_INTRSTR_LEN];
5700 char const *intrstr;
5701
5702 KASSERT(sc->sc_nintrs == 1);
5703
5704 intr = &sc->sc_ihp[0];
5705
5706 intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
5707 snprintf(xnamebuf, sizeof(xnamebuf), "%s:legacy",
5708 device_xname(sc->sc_dev));
5709
5710 sc->sc_ihs[0] = pci_intr_establish_xname(pc, *intr, IPL_NET, ixl_intr,
5711 sc, xnamebuf);
5712
5713 if (sc->sc_ihs[0] == NULL) {
5714 aprint_error_dev(sc->sc_dev,
5715 "unable to establish interrupt at %s\n", intrstr);
5716 return -1;
5717 }
5718
5719 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5720 return 0;
5721 }
5722
5723 static int
5724 ixl_establish_msix(struct ixl_softc *sc)
5725 {
5726 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5727 kcpuset_t *affinity;
5728 unsigned int vector = 0;
5729 unsigned int i;
5730 int affinity_to, r;
5731 char xnamebuf[32];
5732 char intrbuf[PCI_INTRSTR_LEN];
5733 char const *intrstr;
5734
5735 kcpuset_create(&affinity, false);
5736
5737 /* the "other" intr is mapped to vector 0 */
5738 vector = 0;
5739 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5740 intrbuf, sizeof(intrbuf));
5741 snprintf(xnamebuf, sizeof(xnamebuf), "%s others",
5742 device_xname(sc->sc_dev));
5743 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5744 sc->sc_ihp[vector], IPL_NET, ixl_other_intr,
5745 sc, xnamebuf);
5746 if (sc->sc_ihs[vector] == NULL) {
5747 aprint_error_dev(sc->sc_dev,
5748 "unable to establish interrupt at %s\n", intrstr);
5749 goto fail;
5750 }
5751
5752 aprint_normal_dev(sc->sc_dev, "other interrupt at %s", intrstr);
5753
5754 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0;
5755 affinity_to = (affinity_to + sc->sc_nqueue_pairs_max) % ncpu;
5756
5757 kcpuset_zero(affinity);
5758 kcpuset_set(affinity, affinity_to);
5759 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5760 if (r == 0) {
5761 aprint_normal(", affinity to %u", affinity_to);
5762 }
5763 aprint_normal("\n");
5764 vector++;
5765
5766 sc->sc_msix_vector_queue = vector;
5767 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0;
5768
5769 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5770 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5771 intrbuf, sizeof(intrbuf));
5772 snprintf(xnamebuf, sizeof(xnamebuf), "%s TXRX%d",
5773 device_xname(sc->sc_dev), i);
5774
5775 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5776 sc->sc_ihp[vector], IPL_NET, ixl_queue_intr,
5777 (void *)&sc->sc_qps[i], xnamebuf);
5778
5779 if (sc->sc_ihs[vector] == NULL) {
5780 aprint_error_dev(sc->sc_dev,
5781 "unable to establish interrupt at %s\n", intrstr);
5782 goto fail;
5783 }
5784
5785 aprint_normal_dev(sc->sc_dev,
5786 "for TXRX%d interrupt at %s",i , intrstr);
5787
5788 kcpuset_zero(affinity);
5789 kcpuset_set(affinity, affinity_to);
5790 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5791 if (r == 0) {
5792 aprint_normal(", affinity to %u", affinity_to);
5793 affinity_to = (affinity_to + 1) % ncpu;
5794 }
5795 aprint_normal("\n");
5796 vector++;
5797 }
5798
5799 kcpuset_destroy(affinity);
5800
5801 return 0;
5802 fail:
5803 for (i = 0; i < vector; i++) {
5804 pci_intr_disestablish(pc, sc->sc_ihs[i]);
5805 }
5806
5807 sc->sc_msix_vector_queue = 0;
5808 sc->sc_msix_vector_queue = 0;
5809 kcpuset_destroy(affinity);
5810
5811 return -1;
5812 }
5813
5814 static void
5815 ixl_config_queue_intr(struct ixl_softc *sc)
5816 {
5817 unsigned int i, vector;
5818
5819 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) {
5820 vector = sc->sc_msix_vector_queue;
5821 } else {
5822 vector = I40E_INTR_NOTX_INTR;
5823
5824 ixl_wr(sc, I40E_PFINT_LNKLST0,
5825 (I40E_INTR_NOTX_QUEUE <<
5826 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5827 (I40E_QUEUE_TYPE_RX <<
5828 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5829 }
5830
5831 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
5832 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), 0);
5833 ixl_flush(sc);
5834
5835 ixl_wr(sc, I40E_PFINT_LNKLSTN(i),
5836 ((i) << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
5837 (I40E_QUEUE_TYPE_RX <<
5838 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5839
5840 ixl_wr(sc, I40E_QINT_RQCTL(i),
5841 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5842 (I40E_ITR_INDEX_RX <<
5843 I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
5844 (I40E_INTR_NOTX_RX_QUEUE <<
5845 I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
5846 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5847 (I40E_QUEUE_TYPE_TX <<
5848 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5849 I40E_QINT_RQCTL_CAUSE_ENA_MASK);
5850
5851 ixl_wr(sc, I40E_QINT_TQCTL(i),
5852 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
5853 (I40E_ITR_INDEX_TX <<
5854 I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
5855 (I40E_INTR_NOTX_TX_QUEUE <<
5856 I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
5857 (I40E_QUEUE_TYPE_EOL <<
5858 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
5859 (I40E_QUEUE_TYPE_RX <<
5860 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
5861 I40E_QINT_TQCTL_CAUSE_ENA_MASK);
5862
5863 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX)
5864 vector++;
5865 }
5866 ixl_flush(sc);
5867
5868 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_RX), 0x7a);
5869 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_TX), 0x7a);
5870 ixl_flush(sc);
5871 }
5872
5873 static void
5874 ixl_config_other_intr(struct ixl_softc *sc)
5875 {
5876 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5877 (void)ixl_rd(sc, I40E_PFINT_ICR0);
5878
5879 ixl_wr(sc, I40E_PFINT_ICR0_ENA,
5880 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
5881 I40E_PFINT_ICR0_ENA_GRST_MASK |
5882 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
5883 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
5884 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
5885 I40E_PFINT_ICR0_ENA_VFLR_MASK |
5886 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
5887 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
5888 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK);
5889
5890 ixl_wr(sc, I40E_PFINT_LNKLST0, 0x7FF);
5891 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_OTHER), 0);
5892 ixl_wr(sc, I40E_PFINT_STAT_CTL0,
5893 (I40E_ITR_INDEX_OTHER <<
5894 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT));
5895 ixl_flush(sc);
5896 }
5897
5898 static int
5899 ixl_setup_interrupts(struct ixl_softc *sc)
5900 {
5901 struct pci_attach_args *pa = &sc->sc_pa;
5902 pci_intr_type_t max_type, intr_type;
5903 int counts[PCI_INTR_TYPE_SIZE];
5904 int error;
5905 unsigned int i;
5906 bool retry;
5907
5908 memset(counts, 0, sizeof(counts));
5909 max_type = PCI_INTR_TYPE_MSIX;
5910 /* QPs + other interrupt */
5911 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueue_pairs_max + 1;
5912 counts[PCI_INTR_TYPE_INTX] = 1;
5913
5914 if (ixl_param_nomsix)
5915 counts[PCI_INTR_TYPE_MSIX] = 0;
5916
5917 do {
5918 retry = false;
5919 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, max_type);
5920 if (error != 0) {
5921 aprint_error_dev(sc->sc_dev,
5922 "couldn't map interrupt\n");
5923 break;
5924 }
5925
5926 intr_type = pci_intr_type(pa->pa_pc, sc->sc_ihp[0]);
5927 sc->sc_nintrs = counts[intr_type];
5928 KASSERT(sc->sc_nintrs > 0);
5929
5930 for (i = 0; i < sc->sc_nintrs; i++) {
5931 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[i],
5932 PCI_INTR_MPSAFE, true);
5933 }
5934
5935 sc->sc_ihs = kmem_alloc(sizeof(sc->sc_ihs[0]) * sc->sc_nintrs,
5936 KM_SLEEP);
5937
5938 if (intr_type == PCI_INTR_TYPE_MSIX) {
5939 error = ixl_establish_msix(sc);
5940 if (error) {
5941 counts[PCI_INTR_TYPE_MSIX] = 0;
5942 retry = true;
5943 }
5944 } else if (intr_type == PCI_INTR_TYPE_INTX) {
5945 error = ixl_establish_intx(sc);
5946 } else {
5947 error = -1;
5948 }
5949
5950 if (error) {
5951 kmem_free(sc->sc_ihs,
5952 sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5953 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5954 } else {
5955 sc->sc_intrtype = intr_type;
5956 }
5957 } while (retry);
5958
5959 return error;
5960 }
5961
5962 static void
5963 ixl_teardown_interrupts(struct ixl_softc *sc)
5964 {
5965 struct pci_attach_args *pa = &sc->sc_pa;
5966 unsigned int i;
5967
5968 for (i = 0; i < sc->sc_nintrs; i++) {
5969 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]);
5970 }
5971
5972 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5973
5974 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5975 sc->sc_ihs = NULL;
5976 sc->sc_nintrs = 0;
5977 }
5978
5979 static int
5980 ixl_setup_stats(struct ixl_softc *sc)
5981 {
5982 struct ixl_queue_pair *qp;
5983 struct ixl_tx_ring *txr;
5984 struct ixl_rx_ring *rxr;
5985 struct ixl_stats_counters *isc;
5986 unsigned int i;
5987
5988 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5989 qp = &sc->sc_qps[i];
5990 txr = qp->qp_txr;
5991 rxr = qp->qp_rxr;
5992
5993 evcnt_attach_dynamic(&txr->txr_defragged, EVCNT_TYPE_MISC,
5994 NULL, qp->qp_name, "m_defrag successed");
5995 evcnt_attach_dynamic(&txr->txr_defrag_failed, EVCNT_TYPE_MISC,
5996 NULL, qp->qp_name, "m_defrag_failed");
5997 evcnt_attach_dynamic(&txr->txr_pcqdrop, EVCNT_TYPE_MISC,
5998 NULL, qp->qp_name, "Dropped in pcq");
5999 evcnt_attach_dynamic(&txr->txr_transmitdef, EVCNT_TYPE_MISC,
6000 NULL, qp->qp_name, "Deferred transmit");
6001 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR,
6002 NULL, qp->qp_name, "Interrupt on queue");
6003 evcnt_attach_dynamic(&txr->txr_defer, EVCNT_TYPE_MISC,
6004 NULL, qp->qp_name, "Handled queue in softint/workqueue");
6005
6006 evcnt_attach_dynamic(&rxr->rxr_mgethdr_failed, EVCNT_TYPE_MISC,
6007 NULL, qp->qp_name, "MGETHDR failed");
6008 evcnt_attach_dynamic(&rxr->rxr_mgetcl_failed, EVCNT_TYPE_MISC,
6009 NULL, qp->qp_name, "MCLGET failed");
6010 evcnt_attach_dynamic(&rxr->rxr_mbuf_load_failed,
6011 EVCNT_TYPE_MISC, NULL, qp->qp_name,
6012 "bus_dmamap_load_mbuf failed");
6013 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR,
6014 NULL, qp->qp_name, "Interrupt on queue");
6015 evcnt_attach_dynamic(&rxr->rxr_defer, EVCNT_TYPE_MISC,
6016 NULL, qp->qp_name, "Handled queue in softint/workqueue");
6017 }
6018
6019 evcnt_attach_dynamic(&sc->sc_event_atq, EVCNT_TYPE_INTR,
6020 NULL, device_xname(sc->sc_dev), "Interrupt for other events");
6021 evcnt_attach_dynamic(&sc->sc_event_link, EVCNT_TYPE_MISC,
6022 NULL, device_xname(sc->sc_dev), "Link status event");
6023 evcnt_attach_dynamic(&sc->sc_event_ecc_err, EVCNT_TYPE_MISC,
6024 NULL, device_xname(sc->sc_dev), "ECC error");
6025 evcnt_attach_dynamic(&sc->sc_event_pci_exception, EVCNT_TYPE_MISC,
6026 NULL, device_xname(sc->sc_dev), "PCI exception");
6027 evcnt_attach_dynamic(&sc->sc_event_crit_err, EVCNT_TYPE_MISC,
6028 NULL, device_xname(sc->sc_dev), "Critical error");
6029
6030 isc = &sc->sc_stats_counters;
6031 evcnt_attach_dynamic(&isc->isc_crc_errors, EVCNT_TYPE_MISC,
6032 NULL, device_xname(sc->sc_dev), "CRC errors");
6033 evcnt_attach_dynamic(&isc->isc_illegal_bytes, EVCNT_TYPE_MISC,
6034 NULL, device_xname(sc->sc_dev), "Illegal bytes");
6035 evcnt_attach_dynamic(&isc->isc_mac_local_faults, EVCNT_TYPE_MISC,
6036 NULL, device_xname(sc->sc_dev), "Mac local faults");
6037 evcnt_attach_dynamic(&isc->isc_mac_remote_faults, EVCNT_TYPE_MISC,
6038 NULL, device_xname(sc->sc_dev), "Mac remote faults");
6039 evcnt_attach_dynamic(&isc->isc_link_xon_rx, EVCNT_TYPE_MISC,
6040 NULL, device_xname(sc->sc_dev), "Rx xon");
6041 evcnt_attach_dynamic(&isc->isc_link_xon_tx, EVCNT_TYPE_MISC,
6042 NULL, device_xname(sc->sc_dev), "Tx xon");
6043 evcnt_attach_dynamic(&isc->isc_link_xoff_rx, EVCNT_TYPE_MISC,
6044 NULL, device_xname(sc->sc_dev), "Rx xoff");
6045 evcnt_attach_dynamic(&isc->isc_link_xoff_tx, EVCNT_TYPE_MISC,
6046 NULL, device_xname(sc->sc_dev), "Tx xoff");
6047 evcnt_attach_dynamic(&isc->isc_rx_fragments, EVCNT_TYPE_MISC,
6048 NULL, device_xname(sc->sc_dev), "Rx fragments");
6049 evcnt_attach_dynamic(&isc->isc_rx_jabber, EVCNT_TYPE_MISC,
6050 NULL, device_xname(sc->sc_dev), "Rx jabber");
6051
6052 evcnt_attach_dynamic(&isc->isc_rx_size_64, EVCNT_TYPE_MISC,
6053 NULL, device_xname(sc->sc_dev), "Rx size 64");
6054 evcnt_attach_dynamic(&isc->isc_rx_size_127, EVCNT_TYPE_MISC,
6055 NULL, device_xname(sc->sc_dev), "Rx size 127");
6056 evcnt_attach_dynamic(&isc->isc_rx_size_255, EVCNT_TYPE_MISC,
6057 NULL, device_xname(sc->sc_dev), "Rx size 255");
6058 evcnt_attach_dynamic(&isc->isc_rx_size_511, EVCNT_TYPE_MISC,
6059 NULL, device_xname(sc->sc_dev), "Rx size 511");
6060 evcnt_attach_dynamic(&isc->isc_rx_size_1023, EVCNT_TYPE_MISC,
6061 NULL, device_xname(sc->sc_dev), "Rx size 1023");
6062 evcnt_attach_dynamic(&isc->isc_rx_size_1522, EVCNT_TYPE_MISC,
6063 NULL, device_xname(sc->sc_dev), "Rx size 1522");
6064 evcnt_attach_dynamic(&isc->isc_rx_size_big, EVCNT_TYPE_MISC,
6065 NULL, device_xname(sc->sc_dev), "Rx jumbo packets");
6066 evcnt_attach_dynamic(&isc->isc_rx_undersize, EVCNT_TYPE_MISC,
6067 NULL, device_xname(sc->sc_dev), "Rx under size");
6068 evcnt_attach_dynamic(&isc->isc_rx_oversize, EVCNT_TYPE_MISC,
6069 NULL, device_xname(sc->sc_dev), "Rx over size");
6070
6071 evcnt_attach_dynamic(&isc->isc_rx_bytes, EVCNT_TYPE_MISC,
6072 NULL, device_xname(sc->sc_dev), "Rx bytes / port");
6073 evcnt_attach_dynamic(&isc->isc_rx_discards, EVCNT_TYPE_MISC,
6074 NULL, device_xname(sc->sc_dev), "Rx discards / port");
6075 evcnt_attach_dynamic(&isc->isc_rx_unicast, EVCNT_TYPE_MISC,
6076 NULL, device_xname(sc->sc_dev), "Rx unicast / port");
6077 evcnt_attach_dynamic(&isc->isc_rx_multicast, EVCNT_TYPE_MISC,
6078 NULL, device_xname(sc->sc_dev), "Rx multicast / port");
6079 evcnt_attach_dynamic(&isc->isc_rx_broadcast, EVCNT_TYPE_MISC,
6080 NULL, device_xname(sc->sc_dev), "Rx broadcast / port");
6081
6082 evcnt_attach_dynamic(&isc->isc_vsi_rx_bytes, EVCNT_TYPE_MISC,
6083 NULL, device_xname(sc->sc_dev), "Rx bytes / vsi");
6084 evcnt_attach_dynamic(&isc->isc_vsi_rx_discards, EVCNT_TYPE_MISC,
6085 NULL, device_xname(sc->sc_dev), "Rx discard / vsi");
6086 evcnt_attach_dynamic(&isc->isc_vsi_rx_unicast, EVCNT_TYPE_MISC,
6087 NULL, device_xname(sc->sc_dev), "Rx unicast / vsi");
6088 evcnt_attach_dynamic(&isc->isc_vsi_rx_multicast, EVCNT_TYPE_MISC,
6089 NULL, device_xname(sc->sc_dev), "Rx multicast / vsi");
6090 evcnt_attach_dynamic(&isc->isc_vsi_rx_broadcast, EVCNT_TYPE_MISC,
6091 NULL, device_xname(sc->sc_dev), "Rx broadcast / vsi");
6092
6093 evcnt_attach_dynamic(&isc->isc_tx_size_64, EVCNT_TYPE_MISC,
6094 NULL, device_xname(sc->sc_dev), "Tx size 64");
6095 evcnt_attach_dynamic(&isc->isc_tx_size_127, EVCNT_TYPE_MISC,
6096 NULL, device_xname(sc->sc_dev), "Tx size 127");
6097 evcnt_attach_dynamic(&isc->isc_tx_size_255, EVCNT_TYPE_MISC,
6098 NULL, device_xname(sc->sc_dev), "Tx size 255");
6099 evcnt_attach_dynamic(&isc->isc_tx_size_511, EVCNT_TYPE_MISC,
6100 NULL, device_xname(sc->sc_dev), "Tx size 511");
6101 evcnt_attach_dynamic(&isc->isc_tx_size_1023, EVCNT_TYPE_MISC,
6102 NULL, device_xname(sc->sc_dev), "Tx size 1023");
6103 evcnt_attach_dynamic(&isc->isc_tx_size_1522, EVCNT_TYPE_MISC,
6104 NULL, device_xname(sc->sc_dev), "Tx size 1522");
6105 evcnt_attach_dynamic(&isc->isc_tx_size_big, EVCNT_TYPE_MISC,
6106 NULL, device_xname(sc->sc_dev), "Tx jumbo packets");
6107
6108 evcnt_attach_dynamic(&isc->isc_tx_bytes, EVCNT_TYPE_MISC,
6109 NULL, device_xname(sc->sc_dev), "Tx bytes / port");
6110 evcnt_attach_dynamic(&isc->isc_tx_dropped_link_down, EVCNT_TYPE_MISC,
6111 NULL, device_xname(sc->sc_dev),
6112 "Tx dropped due to link down / port");
6113 evcnt_attach_dynamic(&isc->isc_tx_unicast, EVCNT_TYPE_MISC,
6114 NULL, device_xname(sc->sc_dev), "Tx unicast / port");
6115 evcnt_attach_dynamic(&isc->isc_tx_multicast, EVCNT_TYPE_MISC,
6116 NULL, device_xname(sc->sc_dev), "Tx multicast / port");
6117 evcnt_attach_dynamic(&isc->isc_tx_broadcast, EVCNT_TYPE_MISC,
6118 NULL, device_xname(sc->sc_dev), "Tx broadcast / port");
6119
6120 evcnt_attach_dynamic(&isc->isc_vsi_tx_bytes, EVCNT_TYPE_MISC,
6121 NULL, device_xname(sc->sc_dev), "Tx bytes / vsi");
6122 evcnt_attach_dynamic(&isc->isc_vsi_tx_errors, EVCNT_TYPE_MISC,
6123 NULL, device_xname(sc->sc_dev), "Tx errors / vsi");
6124 evcnt_attach_dynamic(&isc->isc_vsi_tx_unicast, EVCNT_TYPE_MISC,
6125 NULL, device_xname(sc->sc_dev), "Tx unicast / vsi");
6126 evcnt_attach_dynamic(&isc->isc_vsi_tx_multicast, EVCNT_TYPE_MISC,
6127 NULL, device_xname(sc->sc_dev), "Tx multicast / vsi");
6128 evcnt_attach_dynamic(&isc->isc_vsi_tx_broadcast, EVCNT_TYPE_MISC,
6129 NULL, device_xname(sc->sc_dev), "Tx broadcast / vsi");
6130
6131 sc->sc_stats_intval = ixl_param_stats_interval;
6132 callout_init(&sc->sc_stats_callout, CALLOUT_MPSAFE);
6133 callout_setfunc(&sc->sc_stats_callout, ixl_stats_callout, sc);
6134 ixl_work_set(&sc->sc_stats_task, ixl_stats_update, sc);
6135
6136 return 0;
6137 }
6138
6139 static void
6140 ixl_teardown_stats(struct ixl_softc *sc)
6141 {
6142 struct ixl_tx_ring *txr;
6143 struct ixl_rx_ring *rxr;
6144 struct ixl_stats_counters *isc;
6145 unsigned int i;
6146
6147 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
6148 txr = sc->sc_qps[i].qp_txr;
6149 rxr = sc->sc_qps[i].qp_rxr;
6150
6151 evcnt_detach(&txr->txr_defragged);
6152 evcnt_detach(&txr->txr_defrag_failed);
6153 evcnt_detach(&txr->txr_pcqdrop);
6154 evcnt_detach(&txr->txr_transmitdef);
6155 evcnt_detach(&txr->txr_intr);
6156 evcnt_detach(&txr->txr_defer);
6157
6158 evcnt_detach(&rxr->rxr_mgethdr_failed);
6159 evcnt_detach(&rxr->rxr_mgetcl_failed);
6160 evcnt_detach(&rxr->rxr_mbuf_load_failed);
6161 evcnt_detach(&rxr->rxr_intr);
6162 evcnt_detach(&rxr->rxr_defer);
6163 }
6164
6165 isc = &sc->sc_stats_counters;
6166 evcnt_detach(&isc->isc_crc_errors);
6167 evcnt_detach(&isc->isc_illegal_bytes);
6168 evcnt_detach(&isc->isc_mac_local_faults);
6169 evcnt_detach(&isc->isc_mac_remote_faults);
6170 evcnt_detach(&isc->isc_link_xon_rx);
6171 evcnt_detach(&isc->isc_link_xon_tx);
6172 evcnt_detach(&isc->isc_link_xoff_rx);
6173 evcnt_detach(&isc->isc_link_xoff_tx);
6174 evcnt_detach(&isc->isc_rx_fragments);
6175 evcnt_detach(&isc->isc_rx_jabber);
6176 evcnt_detach(&isc->isc_rx_bytes);
6177 evcnt_detach(&isc->isc_rx_discards);
6178 evcnt_detach(&isc->isc_rx_unicast);
6179 evcnt_detach(&isc->isc_rx_multicast);
6180 evcnt_detach(&isc->isc_rx_broadcast);
6181 evcnt_detach(&isc->isc_rx_size_64);
6182 evcnt_detach(&isc->isc_rx_size_127);
6183 evcnt_detach(&isc->isc_rx_size_255);
6184 evcnt_detach(&isc->isc_rx_size_511);
6185 evcnt_detach(&isc->isc_rx_size_1023);
6186 evcnt_detach(&isc->isc_rx_size_1522);
6187 evcnt_detach(&isc->isc_rx_size_big);
6188 evcnt_detach(&isc->isc_rx_undersize);
6189 evcnt_detach(&isc->isc_rx_oversize);
6190 evcnt_detach(&isc->isc_tx_bytes);
6191 evcnt_detach(&isc->isc_tx_dropped_link_down);
6192 evcnt_detach(&isc->isc_tx_unicast);
6193 evcnt_detach(&isc->isc_tx_multicast);
6194 evcnt_detach(&isc->isc_tx_broadcast);
6195 evcnt_detach(&isc->isc_tx_size_64);
6196 evcnt_detach(&isc->isc_tx_size_127);
6197 evcnt_detach(&isc->isc_tx_size_255);
6198 evcnt_detach(&isc->isc_tx_size_511);
6199 evcnt_detach(&isc->isc_tx_size_1023);
6200 evcnt_detach(&isc->isc_tx_size_1522);
6201 evcnt_detach(&isc->isc_tx_size_big);
6202 evcnt_detach(&isc->isc_vsi_rx_discards);
6203 evcnt_detach(&isc->isc_vsi_rx_bytes);
6204 evcnt_detach(&isc->isc_vsi_rx_unicast);
6205 evcnt_detach(&isc->isc_vsi_rx_multicast);
6206 evcnt_detach(&isc->isc_vsi_rx_broadcast);
6207 evcnt_detach(&isc->isc_vsi_tx_errors);
6208 evcnt_detach(&isc->isc_vsi_tx_bytes);
6209 evcnt_detach(&isc->isc_vsi_tx_unicast);
6210 evcnt_detach(&isc->isc_vsi_tx_multicast);
6211 evcnt_detach(&isc->isc_vsi_tx_broadcast);
6212
6213 evcnt_detach(&sc->sc_event_atq);
6214 evcnt_detach(&sc->sc_event_link);
6215 evcnt_detach(&sc->sc_event_ecc_err);
6216 evcnt_detach(&sc->sc_event_pci_exception);
6217 evcnt_detach(&sc->sc_event_crit_err);
6218
6219 callout_destroy(&sc->sc_stats_callout);
6220 }
6221
6222 static void
6223 ixl_stats_callout(void *xsc)
6224 {
6225 struct ixl_softc *sc = xsc;
6226
6227 ixl_work_add(sc->sc_workq, &sc->sc_stats_task);
6228 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
6229 }
6230
6231 static uint64_t
6232 ixl_stat_delta(struct ixl_softc *sc, uint32_t reg_hi, uint32_t reg_lo,
6233 uint64_t *offset, bool has_offset)
6234 {
6235 uint64_t value, delta;
6236 int bitwidth;
6237
6238 bitwidth = reg_hi == 0 ? 32 : 48;
6239
6240 value = ixl_rd(sc, reg_lo);
6241
6242 if (bitwidth > 32) {
6243 value |= ((uint64_t)ixl_rd(sc, reg_hi) << 32);
6244 }
6245
6246 if (__predict_true(has_offset)) {
6247 delta = value;
6248 if (value < *offset)
6249 delta += ((uint64_t)1 << bitwidth);
6250 delta -= *offset;
6251 } else {
6252 delta = 0;
6253 }
6254 atomic_swap_64(offset, value);
6255
6256 return delta;
6257 }
6258
6259 static void
6260 ixl_stats_update(void *xsc)
6261 {
6262 struct ixl_softc *sc = xsc;
6263 struct ixl_stats_counters *isc;
6264 uint64_t delta;
6265
6266 isc = &sc->sc_stats_counters;
6267
6268 /* errors */
6269 delta = ixl_stat_delta(sc,
6270 0, I40E_GLPRT_CRCERRS(sc->sc_port),
6271 &isc->isc_crc_errors_offset, isc->isc_has_offset);
6272 atomic_add_64(&isc->isc_crc_errors.ev_count, delta);
6273
6274 delta = ixl_stat_delta(sc,
6275 0, I40E_GLPRT_ILLERRC(sc->sc_port),
6276 &isc->isc_illegal_bytes_offset, isc->isc_has_offset);
6277 atomic_add_64(&isc->isc_illegal_bytes.ev_count, delta);
6278
6279 /* rx */
6280 delta = ixl_stat_delta(sc,
6281 I40E_GLPRT_GORCH(sc->sc_port), I40E_GLPRT_GORCL(sc->sc_port),
6282 &isc->isc_rx_bytes_offset, isc->isc_has_offset);
6283 atomic_add_64(&isc->isc_rx_bytes.ev_count, delta);
6284
6285 delta = ixl_stat_delta(sc,
6286 0, I40E_GLPRT_RDPC(sc->sc_port),
6287 &isc->isc_rx_discards_offset, isc->isc_has_offset);
6288 atomic_add_64(&isc->isc_rx_discards.ev_count, delta);
6289
6290 delta = ixl_stat_delta(sc,
6291 I40E_GLPRT_UPRCH(sc->sc_port), I40E_GLPRT_UPRCL(sc->sc_port),
6292 &isc->isc_rx_unicast_offset, isc->isc_has_offset);
6293 atomic_add_64(&isc->isc_rx_unicast.ev_count, delta);
6294
6295 delta = ixl_stat_delta(sc,
6296 I40E_GLPRT_MPRCH(sc->sc_port), I40E_GLPRT_MPRCL(sc->sc_port),
6297 &isc->isc_rx_multicast_offset, isc->isc_has_offset);
6298 atomic_add_64(&isc->isc_rx_multicast.ev_count, delta);
6299
6300 delta = ixl_stat_delta(sc,
6301 I40E_GLPRT_BPRCH(sc->sc_port), I40E_GLPRT_BPRCL(sc->sc_port),
6302 &isc->isc_rx_broadcast_offset, isc->isc_has_offset);
6303 atomic_add_64(&isc->isc_rx_broadcast.ev_count, delta);
6304
6305 /* Packet size stats rx */
6306 delta = ixl_stat_delta(sc,
6307 I40E_GLPRT_PRC64H(sc->sc_port), I40E_GLPRT_PRC64L(sc->sc_port),
6308 &isc->isc_rx_size_64_offset, isc->isc_has_offset);
6309 atomic_add_64(&isc->isc_rx_size_64.ev_count, delta);
6310
6311 delta = ixl_stat_delta(sc,
6312 I40E_GLPRT_PRC127H(sc->sc_port), I40E_GLPRT_PRC127L(sc->sc_port),
6313 &isc->isc_rx_size_127_offset, isc->isc_has_offset);
6314 atomic_add_64(&isc->isc_rx_size_127.ev_count, delta);
6315
6316 delta = ixl_stat_delta(sc,
6317 I40E_GLPRT_PRC255H(sc->sc_port), I40E_GLPRT_PRC255L(sc->sc_port),
6318 &isc->isc_rx_size_255_offset, isc->isc_has_offset);
6319 atomic_add_64(&isc->isc_rx_size_255.ev_count, delta);
6320
6321 delta = ixl_stat_delta(sc,
6322 I40E_GLPRT_PRC511H(sc->sc_port), I40E_GLPRT_PRC511L(sc->sc_port),
6323 &isc->isc_rx_size_511_offset, isc->isc_has_offset);
6324 atomic_add_64(&isc->isc_rx_size_511.ev_count, delta);
6325
6326 delta = ixl_stat_delta(sc,
6327 I40E_GLPRT_PRC1023H(sc->sc_port), I40E_GLPRT_PRC1023L(sc->sc_port),
6328 &isc->isc_rx_size_1023_offset, isc->isc_has_offset);
6329 atomic_add_64(&isc->isc_rx_size_1023.ev_count, delta);
6330
6331 delta = ixl_stat_delta(sc,
6332 I40E_GLPRT_PRC1522H(sc->sc_port), I40E_GLPRT_PRC1522L(sc->sc_port),
6333 &isc->isc_rx_size_1522_offset, isc->isc_has_offset);
6334 atomic_add_64(&isc->isc_rx_size_1522.ev_count, delta);
6335
6336 delta = ixl_stat_delta(sc,
6337 I40E_GLPRT_PRC9522H(sc->sc_port), I40E_GLPRT_PRC9522L(sc->sc_port),
6338 &isc->isc_rx_size_big_offset, isc->isc_has_offset);
6339 atomic_add_64(&isc->isc_rx_size_big.ev_count, delta);
6340
6341 delta = ixl_stat_delta(sc,
6342 0, I40E_GLPRT_RUC(sc->sc_port),
6343 &isc->isc_rx_undersize_offset, isc->isc_has_offset);
6344 atomic_add_64(&isc->isc_rx_undersize.ev_count, delta);
6345
6346 delta = ixl_stat_delta(sc,
6347 0, I40E_GLPRT_ROC(sc->sc_port),
6348 &isc->isc_rx_oversize_offset, isc->isc_has_offset);
6349 atomic_add_64(&isc->isc_rx_oversize.ev_count, delta);
6350
6351 /* tx */
6352 delta = ixl_stat_delta(sc,
6353 I40E_GLPRT_GOTCH(sc->sc_port), I40E_GLPRT_GOTCL(sc->sc_port),
6354 &isc->isc_tx_bytes_offset, isc->isc_has_offset);
6355 atomic_add_64(&isc->isc_tx_bytes.ev_count, delta);
6356
6357 delta = ixl_stat_delta(sc,
6358 0, I40E_GLPRT_TDOLD(sc->sc_port),
6359 &isc->isc_tx_dropped_link_down_offset, isc->isc_has_offset);
6360 atomic_add_64(&isc->isc_tx_dropped_link_down.ev_count, delta);
6361
6362 delta = ixl_stat_delta(sc,
6363 I40E_GLPRT_UPTCH(sc->sc_port), I40E_GLPRT_UPTCL(sc->sc_port),
6364 &isc->isc_tx_unicast_offset, isc->isc_has_offset);
6365 atomic_add_64(&isc->isc_tx_unicast.ev_count, delta);
6366
6367 delta = ixl_stat_delta(sc,
6368 I40E_GLPRT_MPTCH(sc->sc_port), I40E_GLPRT_MPTCL(sc->sc_port),
6369 &isc->isc_tx_multicast_offset, isc->isc_has_offset);
6370 atomic_add_64(&isc->isc_tx_multicast.ev_count, delta);
6371
6372 delta = ixl_stat_delta(sc,
6373 I40E_GLPRT_BPTCH(sc->sc_port), I40E_GLPRT_BPTCL(sc->sc_port),
6374 &isc->isc_tx_broadcast_offset, isc->isc_has_offset);
6375 atomic_add_64(&isc->isc_tx_broadcast.ev_count, delta);
6376
6377 /* Packet size stats tx */
6378 delta = ixl_stat_delta(sc,
6379 I40E_GLPRT_PTC64L(sc->sc_port), I40E_GLPRT_PTC64L(sc->sc_port),
6380 &isc->isc_tx_size_64_offset, isc->isc_has_offset);
6381 atomic_add_64(&isc->isc_tx_size_64.ev_count, delta);
6382
6383 delta = ixl_stat_delta(sc,
6384 I40E_GLPRT_PTC127H(sc->sc_port), I40E_GLPRT_PTC127L(sc->sc_port),
6385 &isc->isc_tx_size_127_offset, isc->isc_has_offset);
6386 atomic_add_64(&isc->isc_tx_size_127.ev_count, delta);
6387
6388 delta = ixl_stat_delta(sc,
6389 I40E_GLPRT_PTC255H(sc->sc_port), I40E_GLPRT_PTC255L(sc->sc_port),
6390 &isc->isc_tx_size_255_offset, isc->isc_has_offset);
6391 atomic_add_64(&isc->isc_tx_size_255.ev_count, delta);
6392
6393 delta = ixl_stat_delta(sc,
6394 I40E_GLPRT_PTC511H(sc->sc_port), I40E_GLPRT_PTC511L(sc->sc_port),
6395 &isc->isc_tx_size_511_offset, isc->isc_has_offset);
6396 atomic_add_64(&isc->isc_tx_size_511.ev_count, delta);
6397
6398 delta = ixl_stat_delta(sc,
6399 I40E_GLPRT_PTC1023H(sc->sc_port), I40E_GLPRT_PTC1023L(sc->sc_port),
6400 &isc->isc_tx_size_1023_offset, isc->isc_has_offset);
6401 atomic_add_64(&isc->isc_tx_size_1023.ev_count, delta);
6402
6403 delta = ixl_stat_delta(sc,
6404 I40E_GLPRT_PTC1522H(sc->sc_port), I40E_GLPRT_PTC1522L(sc->sc_port),
6405 &isc->isc_tx_size_1522_offset, isc->isc_has_offset);
6406 atomic_add_64(&isc->isc_tx_size_1522.ev_count, delta);
6407
6408 delta = ixl_stat_delta(sc,
6409 I40E_GLPRT_PTC9522H(sc->sc_port), I40E_GLPRT_PTC9522L(sc->sc_port),
6410 &isc->isc_tx_size_big_offset, isc->isc_has_offset);
6411 atomic_add_64(&isc->isc_tx_size_big.ev_count, delta);
6412
6413 /* mac faults */
6414 delta = ixl_stat_delta(sc,
6415 0, I40E_GLPRT_MLFC(sc->sc_port),
6416 &isc->isc_mac_local_faults_offset, isc->isc_has_offset);
6417 atomic_add_64(&isc->isc_mac_local_faults.ev_count, delta);
6418
6419 delta = ixl_stat_delta(sc,
6420 0, I40E_GLPRT_MRFC(sc->sc_port),
6421 &isc->isc_mac_remote_faults_offset, isc->isc_has_offset);
6422 atomic_add_64(&isc->isc_mac_remote_faults.ev_count, delta);
6423
6424 /* Flow control (LFC) stats */
6425 delta = ixl_stat_delta(sc,
6426 0, I40E_GLPRT_LXONRXC(sc->sc_port),
6427 &isc->isc_link_xon_rx_offset, isc->isc_has_offset);
6428 atomic_add_64(&isc->isc_link_xon_rx.ev_count, delta);
6429
6430 delta = ixl_stat_delta(sc,
6431 0, I40E_GLPRT_LXONTXC(sc->sc_port),
6432 &isc->isc_link_xon_tx_offset, isc->isc_has_offset);
6433 atomic_add_64(&isc->isc_link_xon_tx.ev_count, delta);
6434
6435 delta = ixl_stat_delta(sc,
6436 0, I40E_GLPRT_LXOFFRXC(sc->sc_port),
6437 &isc->isc_link_xoff_rx_offset, isc->isc_has_offset);
6438 atomic_add_64(&isc->isc_link_xoff_rx.ev_count, delta);
6439
6440 delta = ixl_stat_delta(sc,
6441 0, I40E_GLPRT_LXOFFTXC(sc->sc_port),
6442 &isc->isc_link_xoff_tx_offset, isc->isc_has_offset);
6443 atomic_add_64(&isc->isc_link_xoff_tx.ev_count, delta);
6444
6445 /* fragments */
6446 delta = ixl_stat_delta(sc,
6447 0, I40E_GLPRT_RFC(sc->sc_port),
6448 &isc->isc_rx_fragments_offset, isc->isc_has_offset);
6449 atomic_add_64(&isc->isc_rx_fragments.ev_count, delta);
6450
6451 delta = ixl_stat_delta(sc,
6452 0, I40E_GLPRT_RJC(sc->sc_port),
6453 &isc->isc_rx_jabber_offset, isc->isc_has_offset);
6454 atomic_add_64(&isc->isc_rx_jabber.ev_count, delta);
6455
6456 /* VSI rx counters */
6457 delta = ixl_stat_delta(sc,
6458 0, I40E_GLV_RDPC(sc->sc_vsi_stat_counter_idx),
6459 &isc->isc_vsi_rx_discards_offset, isc->isc_has_offset);
6460 atomic_add_64(&isc->isc_vsi_rx_discards.ev_count, delta);
6461
6462 delta = ixl_stat_delta(sc,
6463 I40E_GLV_GORCH(sc->sc_vsi_stat_counter_idx),
6464 I40E_GLV_GORCL(sc->sc_vsi_stat_counter_idx),
6465 &isc->isc_vsi_rx_bytes_offset, isc->isc_has_offset);
6466 atomic_add_64(&isc->isc_vsi_rx_bytes.ev_count, delta);
6467
6468 delta = ixl_stat_delta(sc,
6469 I40E_GLV_UPRCH(sc->sc_vsi_stat_counter_idx),
6470 I40E_GLV_UPRCL(sc->sc_vsi_stat_counter_idx),
6471 &isc->isc_vsi_rx_unicast_offset, isc->isc_has_offset);
6472 atomic_add_64(&isc->isc_vsi_rx_unicast.ev_count, delta);
6473
6474 delta = ixl_stat_delta(sc,
6475 I40E_GLV_MPRCH(sc->sc_vsi_stat_counter_idx),
6476 I40E_GLV_MPRCL(sc->sc_vsi_stat_counter_idx),
6477 &isc->isc_vsi_rx_multicast_offset, isc->isc_has_offset);
6478 atomic_add_64(&isc->isc_vsi_rx_multicast.ev_count, delta);
6479
6480 delta = ixl_stat_delta(sc,
6481 I40E_GLV_BPRCH(sc->sc_vsi_stat_counter_idx),
6482 I40E_GLV_BPRCL(sc->sc_vsi_stat_counter_idx),
6483 &isc->isc_vsi_rx_broadcast_offset, isc->isc_has_offset);
6484 atomic_add_64(&isc->isc_vsi_rx_broadcast.ev_count, delta);
6485
6486 /* VSI tx counters */
6487 delta = ixl_stat_delta(sc,
6488 0, I40E_GLV_TEPC(sc->sc_vsi_stat_counter_idx),
6489 &isc->isc_vsi_tx_errors_offset, isc->isc_has_offset);
6490 atomic_add_64(&isc->isc_vsi_tx_errors.ev_count, delta);
6491
6492 delta = ixl_stat_delta(sc,
6493 I40E_GLV_GOTCH(sc->sc_vsi_stat_counter_idx),
6494 I40E_GLV_GOTCL(sc->sc_vsi_stat_counter_idx),
6495 &isc->isc_vsi_tx_bytes_offset, isc->isc_has_offset);
6496 atomic_add_64(&isc->isc_vsi_tx_bytes.ev_count, delta);
6497
6498 delta = ixl_stat_delta(sc,
6499 I40E_GLV_UPTCH(sc->sc_vsi_stat_counter_idx),
6500 I40E_GLV_UPTCL(sc->sc_vsi_stat_counter_idx),
6501 &isc->isc_vsi_tx_unicast_offset, isc->isc_has_offset);
6502 atomic_add_64(&isc->isc_vsi_tx_unicast.ev_count, delta);
6503
6504 delta = ixl_stat_delta(sc,
6505 I40E_GLV_MPTCH(sc->sc_vsi_stat_counter_idx),
6506 I40E_GLV_MPTCL(sc->sc_vsi_stat_counter_idx),
6507 &isc->isc_vsi_tx_multicast_offset, isc->isc_has_offset);
6508 atomic_add_64(&isc->isc_vsi_tx_multicast.ev_count, delta);
6509
6510 delta = ixl_stat_delta(sc,
6511 I40E_GLV_BPTCH(sc->sc_vsi_stat_counter_idx),
6512 I40E_GLV_BPTCL(sc->sc_vsi_stat_counter_idx),
6513 &isc->isc_vsi_tx_broadcast_offset, isc->isc_has_offset);
6514 atomic_add_64(&isc->isc_vsi_tx_broadcast.ev_count, delta);
6515 }
6516
6517 static int
6518 ixl_setup_sysctls(struct ixl_softc *sc)
6519 {
6520 const char *devname;
6521 struct sysctllog **log;
6522 const struct sysctlnode *rnode, *rxnode, *txnode;
6523 int error;
6524
6525 log = &sc->sc_sysctllog;
6526 devname = device_xname(sc->sc_dev);
6527
6528 error = sysctl_createv(log, 0, NULL, &rnode,
6529 0, CTLTYPE_NODE, devname,
6530 SYSCTL_DESCR("ixl information and settings"),
6531 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
6532 if (error)
6533 goto out;
6534
6535 error = sysctl_createv(log, 0, &rnode, NULL,
6536 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
6537 SYSCTL_DESCR("Use workqueue for packet processing"),
6538 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
6539 if (error)
6540 goto out;
6541
6542 error = sysctl_createv(log, 0, &rnode, NULL,
6543 CTLFLAG_READONLY, CTLTYPE_INT, "stats_interval",
6544 SYSCTL_DESCR("Statistics collection interval in milliseconds"),
6545 NULL, 0, &sc->sc_stats_intval, 0, CTL_CREATE, CTL_EOL);
6546
6547 error = sysctl_createv(log, 0, &rnode, &rxnode,
6548 0, CTLTYPE_NODE, "rx",
6549 SYSCTL_DESCR("ixl information and settings for Rx"),
6550 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6551 if (error)
6552 goto out;
6553
6554 error = sysctl_createv(log, 0, &rxnode, NULL,
6555 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6556 SYSCTL_DESCR("max number of Rx packets"
6557 " to process for interrupt processing"),
6558 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6559 if (error)
6560 goto out;
6561
6562 error = sysctl_createv(log, 0, &rxnode, NULL,
6563 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6564 SYSCTL_DESCR("max number of Rx packets"
6565 " to process for deferred processing"),
6566 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
6567 if (error)
6568 goto out;
6569
6570 error = sysctl_createv(log, 0, &rnode, &txnode,
6571 0, CTLTYPE_NODE, "tx",
6572 SYSCTL_DESCR("ixl information and settings for Tx"),
6573 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6574 if (error)
6575 goto out;
6576
6577 error = sysctl_createv(log, 0, &txnode, NULL,
6578 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6579 SYSCTL_DESCR("max number of Tx packets"
6580 " to process for interrupt processing"),
6581 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6582 if (error)
6583 goto out;
6584
6585 error = sysctl_createv(log, 0, &txnode, NULL,
6586 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6587 SYSCTL_DESCR("max number of Tx packets"
6588 " to process for deferred processing"),
6589 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
6590 if (error)
6591 goto out;
6592
6593 out:
6594 if (error) {
6595 aprint_error_dev(sc->sc_dev,
6596 "unable to create sysctl node\n");
6597 sysctl_teardown(log);
6598 }
6599
6600 return error;
6601 }
6602
6603 static void
6604 ixl_teardown_sysctls(struct ixl_softc *sc)
6605 {
6606
6607 sysctl_teardown(&sc->sc_sysctllog);
6608 }
6609
6610 static struct workqueue *
6611 ixl_workq_create(const char *name, pri_t prio, int ipl, int flags)
6612 {
6613 struct workqueue *wq;
6614 int error;
6615
6616 error = workqueue_create(&wq, name, ixl_workq_work, NULL,
6617 prio, ipl, flags);
6618
6619 if (error)
6620 return NULL;
6621
6622 return wq;
6623 }
6624
6625 static void
6626 ixl_workq_destroy(struct workqueue *wq)
6627 {
6628
6629 workqueue_destroy(wq);
6630 }
6631
6632 static void
6633 ixl_work_set(struct ixl_work *work, void (*func)(void *), void *arg)
6634 {
6635
6636 memset(work, 0, sizeof(*work));
6637 work->ixw_func = func;
6638 work->ixw_arg = arg;
6639 }
6640
6641 static void
6642 ixl_work_add(struct workqueue *wq, struct ixl_work *work)
6643 {
6644 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0)
6645 return;
6646
6647 workqueue_enqueue(wq, &work->ixw_cookie, NULL);
6648 }
6649
6650 static void
6651 ixl_work_wait(struct workqueue *wq, struct ixl_work *work)
6652 {
6653
6654 workqueue_wait(wq, &work->ixw_cookie);
6655 }
6656
6657 static void
6658 ixl_workq_work(struct work *wk, void *context)
6659 {
6660 struct ixl_work *work;
6661
6662 work = container_of(wk, struct ixl_work, ixw_cookie);
6663
6664 atomic_swap_uint(&work->ixw_added, 0);
6665 work->ixw_func(work->ixw_arg);
6666 }
6667
6668 static int
6669 ixl_rx_ctl_read(struct ixl_softc *sc, uint32_t reg, uint32_t *rv)
6670 {
6671 struct ixl_aq_desc iaq;
6672
6673 memset(&iaq, 0, sizeof(iaq));
6674 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_READ);
6675 iaq.iaq_param[1] = htole32(reg);
6676
6677 if (ixl_atq_poll(sc, &iaq, 250) != 0)
6678 return ETIMEDOUT;
6679
6680 switch (htole16(iaq.iaq_retval)) {
6681 case IXL_AQ_RC_OK:
6682 /* success */
6683 break;
6684 case IXL_AQ_RC_EACCES:
6685 return EPERM;
6686 case IXL_AQ_RC_EAGAIN:
6687 return EAGAIN;
6688 default:
6689 return EIO;
6690 }
6691
6692 *rv = htole32(iaq.iaq_param[3]);
6693 return 0;
6694 }
6695
6696 static uint32_t
6697 ixl_rd_rx_csr(struct ixl_softc *sc, uint32_t reg)
6698 {
6699 uint32_t val;
6700 int rv, retry, retry_limit;
6701
6702 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) {
6703 retry_limit = 5;
6704 } else {
6705 retry_limit = 0;
6706 }
6707
6708 for (retry = 0; retry < retry_limit; retry++) {
6709 rv = ixl_rx_ctl_read(sc, reg, &val);
6710 if (rv == 0)
6711 return val;
6712 else if (rv == EAGAIN)
6713 delaymsec(1);
6714 else
6715 break;
6716 }
6717
6718 val = ixl_rd(sc, reg);
6719
6720 return val;
6721 }
6722
6723 static int
6724 ixl_rx_ctl_write(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6725 {
6726 struct ixl_aq_desc iaq;
6727
6728 memset(&iaq, 0, sizeof(iaq));
6729 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_WRITE);
6730 iaq.iaq_param[1] = htole32(reg);
6731 iaq.iaq_param[3] = htole32(value);
6732
6733 if (ixl_atq_poll(sc, &iaq, 250) != 0)
6734 return ETIMEDOUT;
6735
6736 switch (htole16(iaq.iaq_retval)) {
6737 case IXL_AQ_RC_OK:
6738 /* success */
6739 break;
6740 case IXL_AQ_RC_EACCES:
6741 return EPERM;
6742 case IXL_AQ_RC_EAGAIN:
6743 return EAGAIN;
6744 default:
6745 return EIO;
6746 }
6747
6748 return 0;
6749 }
6750
6751 static void
6752 ixl_wr_rx_csr(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6753 {
6754 int rv, retry, retry_limit;
6755
6756 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) {
6757 retry_limit = 5;
6758 } else {
6759 retry_limit = 0;
6760 }
6761
6762 for (retry = 0; retry < retry_limit; retry++) {
6763 rv = ixl_rx_ctl_write(sc, reg, value);
6764 if (rv == 0)
6765 return;
6766 else if (rv == EAGAIN)
6767 delaymsec(1);
6768 else
6769 break;
6770 }
6771
6772 ixl_wr(sc, reg, value);
6773 }
6774
6775 static int
6776 ixl_nvm_lock(struct ixl_softc *sc, char rw)
6777 {
6778 struct ixl_aq_desc iaq;
6779 struct ixl_aq_req_resource_param *param;
6780 int rv;
6781
6782 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK))
6783 return 0;
6784
6785 memset(&iaq, 0, sizeof(iaq));
6786 iaq.iaq_opcode = htole16(IXL_AQ_OP_REQUEST_RESOURCE);
6787
6788 param = (struct ixl_aq_req_resource_param *)&iaq.iaq_param;
6789 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM);
6790 if (rw == 'R') {
6791 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_READ);
6792 } else {
6793 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_WRITE);
6794 }
6795
6796 rv = ixl_atq_poll(sc, &iaq, 250);
6797
6798 if (rv != 0)
6799 return ETIMEDOUT;
6800
6801 switch (le16toh(iaq.iaq_retval)) {
6802 case IXL_AQ_RC_OK:
6803 break;
6804 case IXL_AQ_RC_EACCES:
6805 return EACCES;
6806 case IXL_AQ_RC_EBUSY:
6807 return EBUSY;
6808 case IXL_AQ_RC_EPERM:
6809 return EPERM;
6810 }
6811
6812 return 0;
6813 }
6814
6815 static int
6816 ixl_nvm_unlock(struct ixl_softc *sc)
6817 {
6818 struct ixl_aq_desc iaq;
6819 struct ixl_aq_rel_resource_param *param;
6820 int rv;
6821
6822 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK))
6823 return 0;
6824
6825 memset(&iaq, 0, sizeof(iaq));
6826 iaq.iaq_opcode = htole16(IXL_AQ_OP_RELEASE_RESOURCE);
6827
6828 param = (struct ixl_aq_rel_resource_param *)&iaq.iaq_param;
6829 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM);
6830
6831 rv = ixl_atq_poll(sc, &iaq, 250);
6832
6833 if (rv != 0)
6834 return ETIMEDOUT;
6835
6836 switch (le16toh(iaq.iaq_retval)) {
6837 case IXL_AQ_RC_OK:
6838 break;
6839 default:
6840 return EIO;
6841 }
6842 return 0;
6843 }
6844
6845 static int
6846 ixl_srdone_poll(struct ixl_softc *sc)
6847 {
6848 int wait_count;
6849 uint32_t reg;
6850
6851 for (wait_count = 0; wait_count < IXL_SRRD_SRCTL_ATTEMPTS;
6852 wait_count++) {
6853 reg = ixl_rd(sc, I40E_GLNVM_SRCTL);
6854 if (ISSET(reg, I40E_GLNVM_SRCTL_DONE_MASK))
6855 break;
6856
6857 delaymsec(5);
6858 }
6859
6860 if (wait_count == IXL_SRRD_SRCTL_ATTEMPTS)
6861 return -1;
6862
6863 return 0;
6864 }
6865
6866 static int
6867 ixl_nvm_read_srctl(struct ixl_softc *sc, uint16_t offset, uint16_t *data)
6868 {
6869 uint32_t reg;
6870
6871 if (ixl_srdone_poll(sc) != 0)
6872 return ETIMEDOUT;
6873
6874 reg = ((uint32_t)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
6875 __BIT(I40E_GLNVM_SRCTL_START_SHIFT);
6876 ixl_wr(sc, I40E_GLNVM_SRCTL, reg);
6877
6878 if (ixl_srdone_poll(sc) != 0) {
6879 aprint_debug("NVM read error: couldn't access "
6880 "Shadow RAM address: 0x%x\n", offset);
6881 return ETIMEDOUT;
6882 }
6883
6884 reg = ixl_rd(sc, I40E_GLNVM_SRDATA);
6885 *data = (uint16_t)__SHIFTOUT(reg, I40E_GLNVM_SRDATA_RDDATA_MASK);
6886
6887 return 0;
6888 }
6889
6890 static int
6891 ixl_nvm_read_aq(struct ixl_softc *sc, uint16_t offset_word,
6892 void *data, size_t len)
6893 {
6894 struct ixl_dmamem *idm;
6895 struct ixl_aq_desc iaq;
6896 struct ixl_aq_nvm_param *param;
6897 uint32_t offset_bytes;
6898 int rv;
6899
6900 idm = &sc->sc_aqbuf;
6901 if (len > IXL_DMA_LEN(idm))
6902 return ENOMEM;
6903
6904 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
6905 memset(&iaq, 0, sizeof(iaq));
6906 iaq.iaq_opcode = htole16(IXL_AQ_OP_NVM_READ);
6907 iaq.iaq_flags = htole16(IXL_AQ_BUF |
6908 ((len > I40E_AQ_LARGE_BUF) ? IXL_AQ_LB : 0));
6909 iaq.iaq_datalen = htole16(len);
6910 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
6911
6912 param = (struct ixl_aq_nvm_param *)iaq.iaq_param;
6913 param->command_flags = IXL_AQ_NVM_LAST_CMD;
6914 param->module_pointer = 0;
6915 param->length = htole16(len);
6916 offset_bytes = (uint32_t)offset_word * 2;
6917 offset_bytes &= 0x00FFFFFF;
6918 param->offset = htole32(offset_bytes);
6919
6920 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
6921 BUS_DMASYNC_PREREAD);
6922
6923 rv = ixl_atq_poll(sc, &iaq, 250);
6924
6925 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
6926 BUS_DMASYNC_POSTREAD);
6927
6928 if (rv != 0) {
6929 return ETIMEDOUT;
6930 }
6931
6932 switch (le16toh(iaq.iaq_retval)) {
6933 case IXL_AQ_RC_OK:
6934 break;
6935 case IXL_AQ_RC_EPERM:
6936 return EPERM;
6937 case IXL_AQ_RC_EINVAL:
6938 return EINVAL;
6939 case IXL_AQ_RC_EBUSY:
6940 return EBUSY;
6941 case IXL_AQ_RC_EIO:
6942 default:
6943 return EIO;
6944 }
6945
6946 memcpy(data, IXL_DMA_KVA(idm), len);
6947
6948 return 0;
6949 }
6950
6951 static int
6952 ixl_rd16_nvm(struct ixl_softc *sc, uint16_t offset, uint16_t *data)
6953 {
6954 int error;
6955 uint16_t buf;
6956
6957 error = ixl_nvm_lock(sc, 'R');
6958 if (error)
6959 return error;
6960
6961 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMREAD)) {
6962 error = ixl_nvm_read_aq(sc, offset,
6963 &buf, sizeof(buf));
6964 if (error == 0)
6965 *data = le16toh(buf);
6966 } else {
6967 error = ixl_nvm_read_srctl(sc, offset, &buf);
6968 if (error == 0)
6969 *data = buf;
6970 }
6971
6972 ixl_nvm_unlock(sc);
6973
6974 return error;
6975 }
6976
6977 MODULE(MODULE_CLASS_DRIVER, if_ixl, "pci");
6978
6979 #ifdef _MODULE
6980 #include "ioconf.c"
6981 #endif
6982
6983 #ifdef _MODULE
6984 static void
6985 ixl_parse_modprop(prop_dictionary_t dict)
6986 {
6987 prop_object_t obj;
6988 int64_t val;
6989 uint64_t uval;
6990
6991 if (dict == NULL)
6992 return;
6993
6994 obj = prop_dictionary_get(dict, "nomsix");
6995 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_BOOL) {
6996 ixl_param_nomsix = prop_bool_true((prop_bool_t)obj);
6997 }
6998
6999 obj = prop_dictionary_get(dict, "stats_interval");
7000 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7001 val = prop_number_integer_value((prop_number_t)obj);
7002
7003 /* the range has no reason */
7004 if (100 < val && val < 180000) {
7005 ixl_param_stats_interval = val;
7006 }
7007 }
7008
7009 obj = prop_dictionary_get(dict, "nqps_limit");
7010 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7011 val = prop_number_integer_value((prop_number_t)obj);
7012
7013 if (val <= INT32_MAX)
7014 ixl_param_nqps_limit = val;
7015 }
7016
7017 obj = prop_dictionary_get(dict, "rx_ndescs");
7018 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7019 uval = prop_number_unsigned_integer_value((prop_number_t)obj);
7020
7021 if (uval > 8)
7022 ixl_param_rx_ndescs = uval;
7023 }
7024
7025 obj = prop_dictionary_get(dict, "tx_ndescs");
7026 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7027 uval = prop_number_unsigned_integer_value((prop_number_t)obj);
7028
7029 if (uval > IXL_TX_PKT_DESCS)
7030 ixl_param_tx_ndescs = uval;
7031 }
7032
7033 }
7034 #endif
7035
7036 static int
7037 if_ixl_modcmd(modcmd_t cmd, void *opaque)
7038 {
7039 int error = 0;
7040
7041 #ifdef _MODULE
7042 switch (cmd) {
7043 case MODULE_CMD_INIT:
7044 ixl_parse_modprop((prop_dictionary_t)opaque);
7045 error = config_init_component(cfdriver_ioconf_if_ixl,
7046 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
7047 break;
7048 case MODULE_CMD_FINI:
7049 error = config_fini_component(cfdriver_ioconf_if_ixl,
7050 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
7051 break;
7052 default:
7053 error = ENOTTY;
7054 break;
7055 }
7056 #endif
7057
7058 return error;
7059 }
7060