if_ixl.c revision 1.47 1 /* $NetBSD: if_ixl.c,v 1.47 2020/02/25 07:22:18 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2016,2017 David Gwynne <dlg (at) openbsd.org>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50 /*
51 * Copyright (c) 2019 Internet Initiative Japan, Inc.
52 * All rights reserved.
53 *
54 * Redistribution and use in source and binary forms, with or without
55 * modification, are permitted provided that the following conditions
56 * are met:
57 * 1. Redistributions of source code must retain the above copyright
58 * notice, this list of conditions and the following disclaimer.
59 * 2. Redistributions in binary form must reproduce the above copyright
60 * notice, this list of conditions and the following disclaimer in the
61 * documentation and/or other materials provided with the distribution.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
73 * POSSIBILITY OF SUCH DAMAGE.
74 */
75
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: if_ixl.c,v 1.47 2020/02/25 07:22:18 yamaguchi Exp $");
78
79 #ifdef _KERNEL_OPT
80 #include "opt_net_mpsafe.h"
81 #include "opt_if_ixl.h"
82 #endif
83
84 #include <sys/param.h>
85 #include <sys/types.h>
86
87 #include <sys/cpu.h>
88 #include <sys/device.h>
89 #include <sys/evcnt.h>
90 #include <sys/interrupt.h>
91 #include <sys/kmem.h>
92 #include <sys/malloc.h>
93 #include <sys/module.h>
94 #include <sys/mutex.h>
95 #include <sys/pcq.h>
96 #include <sys/syslog.h>
97 #include <sys/workqueue.h>
98
99 #include <sys/bus.h>
100
101 #include <net/bpf.h>
102 #include <net/if.h>
103 #include <net/if_dl.h>
104 #include <net/if_media.h>
105 #include <net/if_ether.h>
106 #include <net/rss_config.h>
107
108 #include <netinet/tcp.h> /* for struct tcphdr */
109 #include <netinet/udp.h> /* for struct udphdr */
110
111 #include <dev/pci/pcivar.h>
112 #include <dev/pci/pcidevs.h>
113
114 #include <dev/pci/if_ixlreg.h>
115 #include <dev/pci/if_ixlvar.h>
116
117 #include <prop/proplib.h>
118
119 struct ixl_softc; /* defined */
120
121 #define I40E_PF_RESET_WAIT_COUNT 200
122 #define I40E_AQ_LARGE_BUF 512
123
124 /* bitfields for Tx queue mapping in QTX_CTL */
125 #define I40E_QTX_CTL_VF_QUEUE 0x0
126 #define I40E_QTX_CTL_VM_QUEUE 0x1
127 #define I40E_QTX_CTL_PF_QUEUE 0x2
128
129 #define I40E_QUEUE_TYPE_EOL 0x7ff
130 #define I40E_INTR_NOTX_QUEUE 0
131
132 #define I40E_QUEUE_TYPE_RX 0x0
133 #define I40E_QUEUE_TYPE_TX 0x1
134 #define I40E_QUEUE_TYPE_PE_CEQ 0x2
135 #define I40E_QUEUE_TYPE_UNKNOWN 0x3
136
137 #define I40E_ITR_INDEX_RX 0x0
138 #define I40E_ITR_INDEX_TX 0x1
139 #define I40E_ITR_INDEX_OTHER 0x2
140 #define I40E_ITR_INDEX_NONE 0x3
141
142 #define I40E_INTR_NOTX_QUEUE 0
143 #define I40E_INTR_NOTX_INTR 0
144 #define I40E_INTR_NOTX_RX_QUEUE 0
145 #define I40E_INTR_NOTX_TX_QUEUE 1
146 #define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK
147 #define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK
148
149 #define BIT_ULL(a) (1ULL << (a))
150 #define IXL_RSS_HENA_DEFAULT_BASE \
151 (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
152 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
153 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
154 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
155 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
156 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
157 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
158 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
159 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
160 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
161 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
162 #define IXL_RSS_HENA_DEFAULT_XL710 IXL_RSS_HENA_DEFAULT_BASE
163 #define IXL_RSS_HENA_DEFAULT_X722 (IXL_RSS_HENA_DEFAULT_XL710 | \
164 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
165 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
166 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
167 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
168 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
169 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
170 #define I40E_HASH_LUT_SIZE_128 0
171 #define IXL_RSS_KEY_SIZE_REG 13
172
173 #define IXL_ICR0_CRIT_ERR_MASK \
174 (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \
175 I40E_PFINT_ICR0_ECC_ERR_MASK | \
176 I40E_PFINT_ICR0_PE_CRITERR_MASK)
177
178 #define IXL_QUEUE_MAX_XL710 64
179 #define IXL_QUEUE_MAX_X722 128
180
181 #define IXL_TX_PKT_DESCS 8
182 #define IXL_TX_PKT_MAXSIZE (MCLBYTES * IXL_TX_PKT_DESCS)
183 #define IXL_TX_QUEUE_ALIGN 128
184 #define IXL_RX_QUEUE_ALIGN 128
185
186 #define IXL_MCLBYTES (MCLBYTES - ETHER_ALIGN)
187 #define IXL_MTU_ETHERLEN ETHER_HDR_LEN \
188 + ETHER_CRC_LEN
189 #if 0
190 #define IXL_MAX_MTU (9728 - IXL_MTU_ETHERLEN)
191 #else
192 /* (dbuff * 5) - ETHER_HDR_LEN - ETHER_CRC_LEN */
193 #define IXL_MAX_MTU (9600 - IXL_MTU_ETHERLEN)
194 #endif
195 #define IXL_MIN_MTU (ETHER_MIN_LEN - ETHER_CRC_LEN)
196
197 #define IXL_PCIREG PCI_MAPREG_START
198
199 #define IXL_ITR0 0x0
200 #define IXL_ITR1 0x1
201 #define IXL_ITR2 0x2
202 #define IXL_NOITR 0x3
203
204 #define IXL_AQ_NUM 256
205 #define IXL_AQ_MASK (IXL_AQ_NUM - 1)
206 #define IXL_AQ_ALIGN 64 /* lol */
207 #define IXL_AQ_BUFLEN 4096
208
209 #define IXL_HMC_ROUNDUP 512
210 #define IXL_HMC_PGSIZE 4096
211 #define IXL_HMC_DVASZ sizeof(uint64_t)
212 #define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
213 #define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS)
214 #define IXL_HMC_PDVALID 1ULL
215
216 #define IXL_ATQ_EXEC_TIMEOUT (10 * hz)
217
218 #define IXL_SRRD_SRCTL_ATTEMPTS 100000
219
220 struct ixl_aq_regs {
221 bus_size_t atq_tail;
222 bus_size_t atq_head;
223 bus_size_t atq_len;
224 bus_size_t atq_bal;
225 bus_size_t atq_bah;
226
227 bus_size_t arq_tail;
228 bus_size_t arq_head;
229 bus_size_t arq_len;
230 bus_size_t arq_bal;
231 bus_size_t arq_bah;
232
233 uint32_t atq_len_enable;
234 uint32_t atq_tail_mask;
235 uint32_t atq_head_mask;
236
237 uint32_t arq_len_enable;
238 uint32_t arq_tail_mask;
239 uint32_t arq_head_mask;
240 };
241
242 struct ixl_phy_type {
243 uint64_t phy_type;
244 uint64_t ifm_type;
245 };
246
247 struct ixl_speed_type {
248 uint8_t dev_speed;
249 uint64_t net_speed;
250 };
251
252 struct ixl_aq_buf {
253 SIMPLEQ_ENTRY(ixl_aq_buf)
254 aqb_entry;
255 void *aqb_data;
256 bus_dmamap_t aqb_map;
257 bus_dma_segment_t aqb_seg;
258 size_t aqb_size;
259 int aqb_nsegs;
260 };
261 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf);
262
263 struct ixl_dmamem {
264 bus_dmamap_t ixm_map;
265 bus_dma_segment_t ixm_seg;
266 int ixm_nsegs;
267 size_t ixm_size;
268 void *ixm_kva;
269 };
270
271 #define IXL_DMA_MAP(_ixm) ((_ixm)->ixm_map)
272 #define IXL_DMA_DVA(_ixm) ((_ixm)->ixm_map->dm_segs[0].ds_addr)
273 #define IXL_DMA_KVA(_ixm) ((void *)(_ixm)->ixm_kva)
274 #define IXL_DMA_LEN(_ixm) ((_ixm)->ixm_size)
275
276 struct ixl_hmc_entry {
277 uint64_t hmc_base;
278 uint32_t hmc_count;
279 uint64_t hmc_size;
280 };
281
282 enum ixl_hmc_types {
283 IXL_HMC_LAN_TX = 0,
284 IXL_HMC_LAN_RX,
285 IXL_HMC_FCOE_CTX,
286 IXL_HMC_FCOE_FILTER,
287 IXL_HMC_COUNT
288 };
289
290 struct ixl_hmc_pack {
291 uint16_t offset;
292 uint16_t width;
293 uint16_t lsb;
294 };
295
296 /*
297 * these hmc objects have weird sizes and alignments, so these are abstract
298 * representations of them that are nice for c to populate.
299 *
300 * the packing code relies on little-endian values being stored in the fields,
301 * no high bits in the fields being set, and the fields must be packed in the
302 * same order as they are in the ctx structure.
303 */
304
305 struct ixl_hmc_rxq {
306 uint16_t head;
307 uint8_t cpuid;
308 uint64_t base;
309 #define IXL_HMC_RXQ_BASE_UNIT 128
310 uint16_t qlen;
311 uint16_t dbuff;
312 #define IXL_HMC_RXQ_DBUFF_UNIT 128
313 uint8_t hbuff;
314 #define IXL_HMC_RXQ_HBUFF_UNIT 64
315 uint8_t dtype;
316 #define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0
317 #define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1
318 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2
319 uint8_t dsize;
320 #define IXL_HMC_RXQ_DSIZE_16 0
321 #define IXL_HMC_RXQ_DSIZE_32 1
322 uint8_t crcstrip;
323 uint8_t fc_ena;
324 uint8_t l2sel;
325 uint8_t hsplit_0;
326 uint8_t hsplit_1;
327 uint8_t showiv;
328 uint16_t rxmax;
329 uint8_t tphrdesc_ena;
330 uint8_t tphwdesc_ena;
331 uint8_t tphdata_ena;
332 uint8_t tphhead_ena;
333 uint8_t lrxqthresh;
334 uint8_t prefena;
335 };
336
337 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
338 { offsetof(struct ixl_hmc_rxq, head), 13, 0 },
339 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 },
340 { offsetof(struct ixl_hmc_rxq, base), 57, 32 },
341 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 },
342 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 },
343 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 },
344 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 },
345 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 },
346 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 },
347 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 },
348 { offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 },
349 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 },
350 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 },
351 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 },
352 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 },
353 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 },
354 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 },
355 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 },
356 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 },
357 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 },
358 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 },
359 };
360
361 #define IXL_HMC_RXQ_MINSIZE (201 + 1)
362
363 struct ixl_hmc_txq {
364 uint16_t head;
365 uint8_t new_context;
366 uint64_t base;
367 #define IXL_HMC_TXQ_BASE_UNIT 128
368 uint8_t fc_ena;
369 uint8_t timesync_ena;
370 uint8_t fd_ena;
371 uint8_t alt_vlan_ena;
372 uint16_t thead_wb;
373 uint8_t cpuid;
374 uint8_t head_wb_ena;
375 #define IXL_HMC_TXQ_DESC_WB 0
376 #define IXL_HMC_TXQ_HEAD_WB 1
377 uint16_t qlen;
378 uint8_t tphrdesc_ena;
379 uint8_t tphrpacket_ena;
380 uint8_t tphwdesc_ena;
381 uint64_t head_wb_addr;
382 uint32_t crc;
383 uint16_t rdylist;
384 uint8_t rdylist_act;
385 };
386
387 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
388 { offsetof(struct ixl_hmc_txq, head), 13, 0 },
389 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 },
390 { offsetof(struct ixl_hmc_txq, base), 57, 32 },
391 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 },
392 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 },
393 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 },
394 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 },
395 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 },
396 /* line 1 */
397 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 },
398 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 },
399 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 },
400 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 },
401 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 },
402 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 },
403 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 },
404 /* line 7 */
405 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) },
406 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) },
407 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) },
408 };
409
410 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
411
412 struct ixl_work {
413 struct work ixw_cookie;
414 void (*ixw_func)(void *);
415 void *ixw_arg;
416 unsigned int ixw_added;
417 };
418 #define IXL_WORKQUEUE_PRI PRI_SOFTNET
419
420 struct ixl_tx_map {
421 struct mbuf *txm_m;
422 bus_dmamap_t txm_map;
423 unsigned int txm_eop;
424 };
425
426 struct ixl_tx_ring {
427 kmutex_t txr_lock;
428 struct ixl_softc *txr_sc;
429
430 unsigned int txr_prod;
431 unsigned int txr_cons;
432
433 struct ixl_tx_map *txr_maps;
434 struct ixl_dmamem txr_mem;
435
436 bus_size_t txr_tail;
437 unsigned int txr_qid;
438 pcq_t *txr_intrq;
439 void *txr_si;
440
441 struct evcnt txr_defragged;
442 struct evcnt txr_defrag_failed;
443 struct evcnt txr_pcqdrop;
444 struct evcnt txr_transmitdef;
445 struct evcnt txr_intr;
446 struct evcnt txr_defer;
447 };
448
449 struct ixl_rx_map {
450 struct mbuf *rxm_m;
451 bus_dmamap_t rxm_map;
452 };
453
454 struct ixl_rx_ring {
455 kmutex_t rxr_lock;
456
457 unsigned int rxr_prod;
458 unsigned int rxr_cons;
459
460 struct ixl_rx_map *rxr_maps;
461 struct ixl_dmamem rxr_mem;
462
463 struct mbuf *rxr_m_head;
464 struct mbuf **rxr_m_tail;
465
466 bus_size_t rxr_tail;
467 unsigned int rxr_qid;
468
469 struct evcnt rxr_mgethdr_failed;
470 struct evcnt rxr_mgetcl_failed;
471 struct evcnt rxr_mbuf_load_failed;
472 struct evcnt rxr_intr;
473 struct evcnt rxr_defer;
474 };
475
476 struct ixl_queue_pair {
477 struct ixl_softc *qp_sc;
478 struct ixl_tx_ring *qp_txr;
479 struct ixl_rx_ring *qp_rxr;
480
481 char qp_name[16];
482
483 void *qp_si;
484 struct work qp_work;
485 bool qp_workqueue;
486 };
487
488 struct ixl_atq {
489 struct ixl_aq_desc iatq_desc;
490 void (*iatq_fn)(struct ixl_softc *,
491 const struct ixl_aq_desc *);
492 };
493 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
494
495 struct ixl_product {
496 unsigned int vendor_id;
497 unsigned int product_id;
498 };
499
500 struct ixl_stats_counters {
501 bool isc_has_offset;
502 struct evcnt isc_crc_errors;
503 uint64_t isc_crc_errors_offset;
504 struct evcnt isc_illegal_bytes;
505 uint64_t isc_illegal_bytes_offset;
506 struct evcnt isc_rx_bytes;
507 uint64_t isc_rx_bytes_offset;
508 struct evcnt isc_rx_discards;
509 uint64_t isc_rx_discards_offset;
510 struct evcnt isc_rx_unicast;
511 uint64_t isc_rx_unicast_offset;
512 struct evcnt isc_rx_multicast;
513 uint64_t isc_rx_multicast_offset;
514 struct evcnt isc_rx_broadcast;
515 uint64_t isc_rx_broadcast_offset;
516 struct evcnt isc_rx_size_64;
517 uint64_t isc_rx_size_64_offset;
518 struct evcnt isc_rx_size_127;
519 uint64_t isc_rx_size_127_offset;
520 struct evcnt isc_rx_size_255;
521 uint64_t isc_rx_size_255_offset;
522 struct evcnt isc_rx_size_511;
523 uint64_t isc_rx_size_511_offset;
524 struct evcnt isc_rx_size_1023;
525 uint64_t isc_rx_size_1023_offset;
526 struct evcnt isc_rx_size_1522;
527 uint64_t isc_rx_size_1522_offset;
528 struct evcnt isc_rx_size_big;
529 uint64_t isc_rx_size_big_offset;
530 struct evcnt isc_rx_undersize;
531 uint64_t isc_rx_undersize_offset;
532 struct evcnt isc_rx_oversize;
533 uint64_t isc_rx_oversize_offset;
534 struct evcnt isc_rx_fragments;
535 uint64_t isc_rx_fragments_offset;
536 struct evcnt isc_rx_jabber;
537 uint64_t isc_rx_jabber_offset;
538 struct evcnt isc_tx_bytes;
539 uint64_t isc_tx_bytes_offset;
540 struct evcnt isc_tx_dropped_link_down;
541 uint64_t isc_tx_dropped_link_down_offset;
542 struct evcnt isc_tx_unicast;
543 uint64_t isc_tx_unicast_offset;
544 struct evcnt isc_tx_multicast;
545 uint64_t isc_tx_multicast_offset;
546 struct evcnt isc_tx_broadcast;
547 uint64_t isc_tx_broadcast_offset;
548 struct evcnt isc_tx_size_64;
549 uint64_t isc_tx_size_64_offset;
550 struct evcnt isc_tx_size_127;
551 uint64_t isc_tx_size_127_offset;
552 struct evcnt isc_tx_size_255;
553 uint64_t isc_tx_size_255_offset;
554 struct evcnt isc_tx_size_511;
555 uint64_t isc_tx_size_511_offset;
556 struct evcnt isc_tx_size_1023;
557 uint64_t isc_tx_size_1023_offset;
558 struct evcnt isc_tx_size_1522;
559 uint64_t isc_tx_size_1522_offset;
560 struct evcnt isc_tx_size_big;
561 uint64_t isc_tx_size_big_offset;
562 struct evcnt isc_mac_local_faults;
563 uint64_t isc_mac_local_faults_offset;
564 struct evcnt isc_mac_remote_faults;
565 uint64_t isc_mac_remote_faults_offset;
566 struct evcnt isc_link_xon_rx;
567 uint64_t isc_link_xon_rx_offset;
568 struct evcnt isc_link_xon_tx;
569 uint64_t isc_link_xon_tx_offset;
570 struct evcnt isc_link_xoff_rx;
571 uint64_t isc_link_xoff_rx_offset;
572 struct evcnt isc_link_xoff_tx;
573 uint64_t isc_link_xoff_tx_offset;
574 struct evcnt isc_vsi_rx_discards;
575 uint64_t isc_vsi_rx_discards_offset;
576 struct evcnt isc_vsi_rx_bytes;
577 uint64_t isc_vsi_rx_bytes_offset;
578 struct evcnt isc_vsi_rx_unicast;
579 uint64_t isc_vsi_rx_unicast_offset;
580 struct evcnt isc_vsi_rx_multicast;
581 uint64_t isc_vsi_rx_multicast_offset;
582 struct evcnt isc_vsi_rx_broadcast;
583 uint64_t isc_vsi_rx_broadcast_offset;
584 struct evcnt isc_vsi_tx_errors;
585 uint64_t isc_vsi_tx_errors_offset;
586 struct evcnt isc_vsi_tx_bytes;
587 uint64_t isc_vsi_tx_bytes_offset;
588 struct evcnt isc_vsi_tx_unicast;
589 uint64_t isc_vsi_tx_unicast_offset;
590 struct evcnt isc_vsi_tx_multicast;
591 uint64_t isc_vsi_tx_multicast_offset;
592 struct evcnt isc_vsi_tx_broadcast;
593 uint64_t isc_vsi_tx_broadcast_offset;
594 };
595
596 /*
597 * Locking notes:
598 * + a field in ixl_tx_ring is protected by txr_lock (a spin mutex), and
599 * a field in ixl_rx_ring is protected by rxr_lock (a spin mutex).
600 * - more than one lock of them cannot be held at once.
601 * + a field named sc_atq_* in ixl_softc is protected by sc_atq_lock
602 * (a spin mutex).
603 * - the lock cannot held with txr_lock or rxr_lock.
604 * + a field named sc_arq_* is not protected by any lock.
605 * - operations for sc_arq_* is done in one context related to
606 * sc_arq_task.
607 * + other fields in ixl_softc is protected by sc_cfg_lock
608 * (an adaptive mutex)
609 * - It must be held before another lock is held, and It can be
610 * released after the other lock is released.
611 * */
612
613 struct ixl_softc {
614 device_t sc_dev;
615 struct ethercom sc_ec;
616 bool sc_attached;
617 bool sc_dead;
618 uint32_t sc_port;
619 struct sysctllog *sc_sysctllog;
620 struct workqueue *sc_workq;
621 struct workqueue *sc_workq_txrx;
622 int sc_stats_intval;
623 callout_t sc_stats_callout;
624 struct ixl_work sc_stats_task;
625 struct ixl_stats_counters
626 sc_stats_counters;
627 uint8_t sc_enaddr[ETHER_ADDR_LEN];
628 struct ifmedia sc_media;
629 uint64_t sc_media_status;
630 uint64_t sc_media_active;
631 uint64_t sc_phy_types;
632 uint8_t sc_phy_abilities;
633 uint8_t sc_phy_linkspeed;
634 uint8_t sc_phy_fec_cfg;
635 uint16_t sc_eee_cap;
636 uint32_t sc_eeer_val;
637 uint8_t sc_d3_lpan;
638 kmutex_t sc_cfg_lock;
639 enum i40e_mac_type sc_mac_type;
640 uint32_t sc_rss_table_size;
641 uint32_t sc_rss_table_entry_width;
642 bool sc_txrx_workqueue;
643 u_int sc_tx_process_limit;
644 u_int sc_rx_process_limit;
645 u_int sc_tx_intr_process_limit;
646 u_int sc_rx_intr_process_limit;
647
648 int sc_cur_ec_capenable;
649
650 struct pci_attach_args sc_pa;
651 pci_intr_handle_t *sc_ihp;
652 void **sc_ihs;
653 unsigned int sc_nintrs;
654
655 bus_dma_tag_t sc_dmat;
656 bus_space_tag_t sc_memt;
657 bus_space_handle_t sc_memh;
658 bus_size_t sc_mems;
659
660 uint8_t sc_pf_id;
661 uint16_t sc_uplink_seid; /* le */
662 uint16_t sc_downlink_seid; /* le */
663 uint16_t sc_vsi_number;
664 uint16_t sc_vsi_stat_counter_idx;
665 uint16_t sc_seid;
666 unsigned int sc_base_queue;
667
668 pci_intr_type_t sc_intrtype;
669 unsigned int sc_msix_vector_queue;
670
671 struct ixl_dmamem sc_scratch;
672 struct ixl_dmamem sc_aqbuf;
673
674 const struct ixl_aq_regs *
675 sc_aq_regs;
676 uint32_t sc_aq_flags;
677 #define IXL_SC_AQ_FLAG_RXCTL __BIT(0)
678 #define IXL_SC_AQ_FLAG_NVMLOCK __BIT(1)
679 #define IXL_SC_AQ_FLAG_NVMREAD __BIT(2)
680 #define IXL_SC_AQ_FLAG_RSS __BIT(3)
681
682 kmutex_t sc_atq_lock;
683 kcondvar_t sc_atq_cv;
684 struct ixl_dmamem sc_atq;
685 unsigned int sc_atq_prod;
686 unsigned int sc_atq_cons;
687
688 struct ixl_dmamem sc_arq;
689 struct ixl_work sc_arq_task;
690 struct ixl_aq_bufs sc_arq_idle;
691 struct ixl_aq_buf *sc_arq_live[IXL_AQ_NUM];
692 unsigned int sc_arq_prod;
693 unsigned int sc_arq_cons;
694
695 struct ixl_work sc_link_state_task;
696 struct ixl_atq sc_link_state_atq;
697
698 struct ixl_dmamem sc_hmc_sd;
699 struct ixl_dmamem sc_hmc_pd;
700 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT];
701
702 unsigned int sc_tx_ring_ndescs;
703 unsigned int sc_rx_ring_ndescs;
704 unsigned int sc_nqueue_pairs;
705 unsigned int sc_nqueue_pairs_max;
706 unsigned int sc_nqueue_pairs_device;
707 struct ixl_queue_pair *sc_qps;
708
709 struct evcnt sc_event_atq;
710 struct evcnt sc_event_link;
711 struct evcnt sc_event_ecc_err;
712 struct evcnt sc_event_pci_exception;
713 struct evcnt sc_event_crit_err;
714 };
715
716 #define IXL_TXRX_PROCESS_UNLIMIT UINT_MAX
717 #define IXL_TX_PROCESS_LIMIT 256
718 #define IXL_RX_PROCESS_LIMIT 256
719 #define IXL_TX_INTR_PROCESS_LIMIT 256
720 #define IXL_RX_INTR_PROCESS_LIMIT 0U
721
722 #define IXL_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx | \
723 IFCAP_CSUM_TCPv4_Rx | \
724 IFCAP_CSUM_UDPv4_Rx | \
725 IFCAP_CSUM_TCPv6_Rx | \
726 IFCAP_CSUM_UDPv6_Rx)
727 #define IXL_IFCAP_TXCSUM (IFCAP_CSUM_IPv4_Tx | \
728 IFCAP_CSUM_TCPv4_Tx | \
729 IFCAP_CSUM_UDPv4_Tx | \
730 IFCAP_CSUM_TCPv6_Tx | \
731 IFCAP_CSUM_UDPv6_Tx)
732 #define IXL_CSUM_ALL_OFFLOAD (M_CSUM_IPv4 | \
733 M_CSUM_TCPv4 | M_CSUM_TCPv6 | \
734 M_CSUM_UDPv4 | M_CSUM_UDPv6)
735
736 #define delaymsec(_x) DELAY(1000 * (_x))
737 #ifdef IXL_DEBUG
738 #define DDPRINTF(sc, fmt, args...) \
739 do { \
740 if ((sc) != NULL) { \
741 device_printf( \
742 ((struct ixl_softc *)(sc))->sc_dev, \
743 ""); \
744 } \
745 printf("%s:\t" fmt, __func__, ##args); \
746 } while (0)
747 #else
748 #define DDPRINTF(sc, fmt, args...) __nothing
749 #endif
750 #ifndef IXL_STATS_INTERVAL_MSEC
751 #define IXL_STATS_INTERVAL_MSEC 10000
752 #endif
753 #ifndef IXL_QUEUE_NUM
754 #define IXL_QUEUE_NUM 0
755 #endif
756
757 static bool ixl_param_nomsix = false;
758 static int ixl_param_stats_interval = IXL_STATS_INTERVAL_MSEC;
759 static int ixl_param_nqps_limit = IXL_QUEUE_NUM;
760 static unsigned int ixl_param_tx_ndescs = 1024;
761 static unsigned int ixl_param_rx_ndescs = 1024;
762
763 static enum i40e_mac_type
764 ixl_mactype(pci_product_id_t);
765 static void ixl_clear_hw(struct ixl_softc *);
766 static int ixl_pf_reset(struct ixl_softc *);
767
768 static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
769 bus_size_t, bus_size_t);
770 static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
771
772 static int ixl_arq_fill(struct ixl_softc *);
773 static void ixl_arq_unfill(struct ixl_softc *);
774
775 static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
776 unsigned int);
777 static void ixl_atq_set(struct ixl_atq *,
778 void (*)(struct ixl_softc *, const struct ixl_aq_desc *));
779 static int ixl_atq_post(struct ixl_softc *, struct ixl_atq *);
780 static int ixl_atq_post_locked(struct ixl_softc *, struct ixl_atq *);
781 static void ixl_atq_done(struct ixl_softc *);
782 static int ixl_atq_exec(struct ixl_softc *, struct ixl_atq *);
783 static int ixl_get_version(struct ixl_softc *);
784 static int ixl_get_nvm_version(struct ixl_softc *);
785 static int ixl_get_hw_capabilities(struct ixl_softc *);
786 static int ixl_pxe_clear(struct ixl_softc *);
787 static int ixl_lldp_shut(struct ixl_softc *);
788 static int ixl_get_mac(struct ixl_softc *);
789 static int ixl_get_switch_config(struct ixl_softc *);
790 static int ixl_phy_mask_ints(struct ixl_softc *);
791 static int ixl_get_phy_info(struct ixl_softc *);
792 static int ixl_set_phy_config(struct ixl_softc *, uint8_t, uint8_t, bool);
793 static int ixl_set_phy_autoselect(struct ixl_softc *);
794 static int ixl_restart_an(struct ixl_softc *);
795 static int ixl_hmc(struct ixl_softc *);
796 static void ixl_hmc_free(struct ixl_softc *);
797 static int ixl_get_vsi(struct ixl_softc *);
798 static int ixl_set_vsi(struct ixl_softc *);
799 static void ixl_set_filter_control(struct ixl_softc *);
800 static void ixl_get_link_status(void *);
801 static int ixl_get_link_status_poll(struct ixl_softc *, int *);
802 static int ixl_set_link_status(struct ixl_softc *,
803 const struct ixl_aq_desc *);
804 static uint64_t ixl_search_link_speed(uint8_t);
805 static uint8_t ixl_search_baudrate(uint64_t);
806 static void ixl_config_rss(struct ixl_softc *);
807 static int ixl_add_macvlan(struct ixl_softc *, const uint8_t *,
808 uint16_t, uint16_t);
809 static int ixl_remove_macvlan(struct ixl_softc *, const uint8_t *,
810 uint16_t, uint16_t);
811 static void ixl_arq(void *);
812 static void ixl_hmc_pack(void *, const void *,
813 const struct ixl_hmc_pack *, unsigned int);
814 static uint32_t ixl_rd_rx_csr(struct ixl_softc *, uint32_t);
815 static void ixl_wr_rx_csr(struct ixl_softc *, uint32_t, uint32_t);
816 static int ixl_rd16_nvm(struct ixl_softc *, uint16_t, uint16_t *);
817
818 static int ixl_match(device_t, cfdata_t, void *);
819 static void ixl_attach(device_t, device_t, void *);
820 static int ixl_detach(device_t, int);
821
822 static void ixl_media_add(struct ixl_softc *);
823 static int ixl_media_change(struct ifnet *);
824 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
825 static void ixl_watchdog(struct ifnet *);
826 static int ixl_ioctl(struct ifnet *, u_long, void *);
827 static void ixl_start(struct ifnet *);
828 static int ixl_transmit(struct ifnet *, struct mbuf *);
829 static void ixl_deferred_transmit(void *);
830 static int ixl_intr(void *);
831 static int ixl_queue_intr(void *);
832 static int ixl_other_intr(void *);
833 static void ixl_handle_queue(void *);
834 static void ixl_handle_queue_wk(struct work *, void *);
835 static void ixl_sched_handle_queue(struct ixl_softc *,
836 struct ixl_queue_pair *);
837 static int ixl_init(struct ifnet *);
838 static int ixl_init_locked(struct ixl_softc *);
839 static void ixl_stop(struct ifnet *, int);
840 static void ixl_stop_locked(struct ixl_softc *);
841 static int ixl_iff(struct ixl_softc *);
842 static int ixl_ifflags_cb(struct ethercom *);
843 static int ixl_setup_interrupts(struct ixl_softc *);
844 static int ixl_establish_intx(struct ixl_softc *);
845 static int ixl_establish_msix(struct ixl_softc *);
846 static void ixl_enable_queue_intr(struct ixl_softc *,
847 struct ixl_queue_pair *);
848 static void ixl_disable_queue_intr(struct ixl_softc *,
849 struct ixl_queue_pair *);
850 static void ixl_enable_other_intr(struct ixl_softc *);
851 static void ixl_disable_other_intr(struct ixl_softc *);
852 static void ixl_config_queue_intr(struct ixl_softc *);
853 static void ixl_config_other_intr(struct ixl_softc *);
854
855 static struct ixl_tx_ring *
856 ixl_txr_alloc(struct ixl_softc *, unsigned int);
857 static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
858 static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
859 static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
860 static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
861 static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
862 static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
863 static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
864 static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *, u_int);
865
866 static struct ixl_rx_ring *
867 ixl_rxr_alloc(struct ixl_softc *, unsigned int);
868 static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
869 static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
870 static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
871 static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
872 static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
873 static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
874 static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *, u_int);
875 static int ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
876
877 static struct workqueue *
878 ixl_workq_create(const char *, pri_t, int, int);
879 static void ixl_workq_destroy(struct workqueue *);
880 static int ixl_workqs_teardown(device_t);
881 static void ixl_work_set(struct ixl_work *, void (*)(void *), void *);
882 static void ixl_work_add(struct workqueue *, struct ixl_work *);
883 static void ixl_work_wait(struct workqueue *, struct ixl_work *);
884 static void ixl_workq_work(struct work *, void *);
885 static const struct ixl_product *
886 ixl_lookup(const struct pci_attach_args *pa);
887 static void ixl_link_state_update(struct ixl_softc *,
888 const struct ixl_aq_desc *);
889 static int ixl_vlan_cb(struct ethercom *, uint16_t, bool);
890 static int ixl_setup_vlan_hwfilter(struct ixl_softc *);
891 static void ixl_teardown_vlan_hwfilter(struct ixl_softc *);
892 static int ixl_update_macvlan(struct ixl_softc *);
893 static int ixl_setup_interrupts(struct ixl_softc *);;
894 static void ixl_teardown_interrupts(struct ixl_softc *);
895 static int ixl_setup_stats(struct ixl_softc *);
896 static void ixl_teardown_stats(struct ixl_softc *);
897 static void ixl_stats_callout(void *);
898 static void ixl_stats_update(void *);
899 static int ixl_setup_sysctls(struct ixl_softc *);
900 static void ixl_teardown_sysctls(struct ixl_softc *);
901 static int ixl_queue_pairs_alloc(struct ixl_softc *);
902 static void ixl_queue_pairs_free(struct ixl_softc *);
903
904 static const struct ixl_phy_type ixl_phy_type_map[] = {
905 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII },
906 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX },
907 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 },
908 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR },
909 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 },
910 { 1ULL << IXL_PHY_TYPE_XAUI |
911 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 },
912 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI },
913 { 1ULL << IXL_PHY_TYPE_XLAUI |
914 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI },
915 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
916 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 },
917 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
918 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 },
919 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC },
920 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC },
921 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX },
922 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
923 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T },
924 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T },
925 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR },
926 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR },
927 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_TWINAX },
928 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 },
929 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 },
930 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX },
931 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX },
932 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 },
933 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR },
934 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR },
935 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR },
936 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR },
937 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC },
938 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_CR },
939 };
940
941 static const struct ixl_speed_type ixl_speed_type_map[] = {
942 { IXL_AQ_LINK_SPEED_40GB, IF_Gbps(40) },
943 { IXL_AQ_LINK_SPEED_25GB, IF_Gbps(25) },
944 { IXL_AQ_LINK_SPEED_10GB, IF_Gbps(10) },
945 { IXL_AQ_LINK_SPEED_1000MB, IF_Mbps(1000) },
946 { IXL_AQ_LINK_SPEED_100MB, IF_Mbps(100)},
947 };
948
949 static const struct ixl_aq_regs ixl_pf_aq_regs = {
950 .atq_tail = I40E_PF_ATQT,
951 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK,
952 .atq_head = I40E_PF_ATQH,
953 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK,
954 .atq_len = I40E_PF_ATQLEN,
955 .atq_bal = I40E_PF_ATQBAL,
956 .atq_bah = I40E_PF_ATQBAH,
957 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK,
958
959 .arq_tail = I40E_PF_ARQT,
960 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK,
961 .arq_head = I40E_PF_ARQH,
962 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK,
963 .arq_len = I40E_PF_ARQLEN,
964 .arq_bal = I40E_PF_ARQBAL,
965 .arq_bah = I40E_PF_ARQBAH,
966 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK,
967 };
968
969 #define ixl_rd(_s, _r) \
970 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
971 #define ixl_wr(_s, _r, _v) \
972 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
973 #define ixl_barrier(_s, _r, _l, _o) \
974 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
975 #define ixl_flush(_s) (void)ixl_rd((_s), I40E_GLGEN_STAT)
976 #define ixl_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1))
977
978 static inline uint32_t
979 ixl_dmamem_hi(struct ixl_dmamem *ixm)
980 {
981 uint32_t retval;
982 uint64_t val;
983
984 if (sizeof(IXL_DMA_DVA(ixm)) > 4) {
985 val = (intptr_t)IXL_DMA_DVA(ixm);
986 retval = (uint32_t)(val >> 32);
987 } else {
988 retval = 0;
989 }
990
991 return retval;
992 }
993
994 static inline uint32_t
995 ixl_dmamem_lo(struct ixl_dmamem *ixm)
996 {
997
998 return (uint32_t)IXL_DMA_DVA(ixm);
999 }
1000
1001 static inline void
1002 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr)
1003 {
1004 uint64_t val;
1005
1006 if (sizeof(addr) > 4) {
1007 val = (intptr_t)addr;
1008 iaq->iaq_param[2] = htole32(val >> 32);
1009 } else {
1010 iaq->iaq_param[2] = htole32(0);
1011 }
1012
1013 iaq->iaq_param[3] = htole32(addr);
1014 }
1015
1016 static inline unsigned int
1017 ixl_rxr_unrefreshed(unsigned int prod, unsigned int cons, unsigned int ndescs)
1018 {
1019 unsigned int num;
1020
1021 if (prod < cons)
1022 num = cons - prod;
1023 else
1024 num = (ndescs - prod) + cons;
1025
1026 if (__predict_true(num > 0)) {
1027 /* device cannot receive packets if all descripter is filled */
1028 num -= 1;
1029 }
1030
1031 return num;
1032 }
1033
1034 CFATTACH_DECL3_NEW(ixl, sizeof(struct ixl_softc),
1035 ixl_match, ixl_attach, ixl_detach, NULL, NULL, NULL,
1036 DVF_DETACH_SHUTDOWN);
1037
1038 static const struct ixl_product ixl_products[] = {
1039 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_SFP },
1040 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_B },
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_C },
1042 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_A },
1043 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_B },
1044 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_C },
1045 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T },
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
1048 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G },
1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP },
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28 },
1051 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_KX },
1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_QSFP },
1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_SFP },
1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G_BASET },
1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_BASET },
1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_I_SFP },
1057 /* required last entry */
1058 {0, 0}
1059 };
1060
1061 static const struct ixl_product *
1062 ixl_lookup(const struct pci_attach_args *pa)
1063 {
1064 const struct ixl_product *ixlp;
1065
1066 for (ixlp = ixl_products; ixlp->vendor_id != 0; ixlp++) {
1067 if (PCI_VENDOR(pa->pa_id) == ixlp->vendor_id &&
1068 PCI_PRODUCT(pa->pa_id) == ixlp->product_id)
1069 return ixlp;
1070 }
1071
1072 return NULL;
1073 }
1074
1075 static int
1076 ixl_match(device_t parent, cfdata_t match, void *aux)
1077 {
1078 const struct pci_attach_args *pa = aux;
1079
1080 return (ixl_lookup(pa) != NULL) ? 1 : 0;
1081 }
1082
1083 static void
1084 ixl_attach(device_t parent, device_t self, void *aux)
1085 {
1086 struct ixl_softc *sc;
1087 struct pci_attach_args *pa = aux;
1088 struct ifnet *ifp;
1089 pcireg_t memtype;
1090 uint32_t firstq, port, ari, func;
1091 char xnamebuf[32];
1092 int tries, rv, link;
1093
1094 sc = device_private(self);
1095 sc->sc_dev = self;
1096 ifp = &sc->sc_ec.ec_if;
1097
1098 sc->sc_pa = *pa;
1099 sc->sc_dmat = (pci_dma64_available(pa)) ?
1100 pa->pa_dmat64 : pa->pa_dmat;
1101 sc->sc_aq_regs = &ixl_pf_aq_regs;
1102
1103 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(pa->pa_id));
1104
1105 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG);
1106 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
1107 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) {
1108 aprint_error(": unable to map registers\n");
1109 return;
1110 }
1111
1112 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET);
1113
1114 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC);
1115 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK;
1116 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1117 sc->sc_base_queue = firstq;
1118
1119 ixl_clear_hw(sc);
1120 if (ixl_pf_reset(sc) == -1) {
1121 /* error printed by ixl pf_reset */
1122 goto unmap;
1123 }
1124
1125 port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
1126 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
1127 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
1128 sc->sc_port = port;
1129 aprint_normal(": port %u", sc->sc_port);
1130
1131 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
1132 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1133 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1134
1135 func = ixl_rd(sc, I40E_PF_FUNC_RID);
1136 sc->sc_pf_id = func & (ari ? 0xff : 0x7);
1137
1138 /* initialise the adminq */
1139
1140 mutex_init(&sc->sc_atq_lock, MUTEX_DEFAULT, IPL_NET);
1141
1142 if (ixl_dmamem_alloc(sc, &sc->sc_atq,
1143 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1144 aprint_error("\n" "%s: unable to allocate atq\n",
1145 device_xname(self));
1146 goto unmap;
1147 }
1148
1149 SIMPLEQ_INIT(&sc->sc_arq_idle);
1150 ixl_work_set(&sc->sc_arq_task, ixl_arq, sc);
1151 sc->sc_arq_cons = 0;
1152 sc->sc_arq_prod = 0;
1153
1154 if (ixl_dmamem_alloc(sc, &sc->sc_arq,
1155 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1156 aprint_error("\n" "%s: unable to allocate arq\n",
1157 device_xname(self));
1158 goto free_atq;
1159 }
1160
1161 if (!ixl_arq_fill(sc)) {
1162 aprint_error("\n" "%s: unable to fill arq descriptors\n",
1163 device_xname(self));
1164 goto free_arq;
1165 }
1166
1167 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1168 0, IXL_DMA_LEN(&sc->sc_atq),
1169 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1170
1171 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1172 0, IXL_DMA_LEN(&sc->sc_arq),
1173 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1174
1175 for (tries = 0; tries < 10; tries++) {
1176 sc->sc_atq_cons = 0;
1177 sc->sc_atq_prod = 0;
1178
1179 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1180 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1181 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1182 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1183
1184 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1185
1186 ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1187 ixl_dmamem_lo(&sc->sc_atq));
1188 ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1189 ixl_dmamem_hi(&sc->sc_atq));
1190 ixl_wr(sc, sc->sc_aq_regs->atq_len,
1191 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1192
1193 ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1194 ixl_dmamem_lo(&sc->sc_arq));
1195 ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1196 ixl_dmamem_hi(&sc->sc_arq));
1197 ixl_wr(sc, sc->sc_aq_regs->arq_len,
1198 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1199
1200 rv = ixl_get_version(sc);
1201 if (rv == 0)
1202 break;
1203 if (rv != ETIMEDOUT) {
1204 aprint_error(", unable to get firmware version\n");
1205 goto shutdown;
1206 }
1207
1208 delaymsec(100);
1209 }
1210
1211 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1212
1213 if (ixl_dmamem_alloc(sc, &sc->sc_aqbuf, IXL_AQ_BUFLEN, 0) != 0) {
1214 aprint_error_dev(self, ", unable to allocate nvm buffer\n");
1215 goto shutdown;
1216 }
1217
1218 ixl_get_nvm_version(sc);
1219
1220 if (sc->sc_mac_type == I40E_MAC_X722)
1221 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_X722;
1222 else
1223 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_XL710;
1224
1225 rv = ixl_get_hw_capabilities(sc);
1226 if (rv != 0) {
1227 aprint_error(", GET HW CAPABILITIES %s\n",
1228 rv == ETIMEDOUT ? "timeout" : "error");
1229 goto free_aqbuf;
1230 }
1231
1232 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_device, ncpu);
1233 if (ixl_param_nqps_limit > 0) {
1234 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_max,
1235 ixl_param_nqps_limit);
1236 }
1237
1238 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
1239 sc->sc_tx_ring_ndescs = ixl_param_tx_ndescs;
1240 sc->sc_rx_ring_ndescs = ixl_param_rx_ndescs;
1241
1242 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_rx_ring_ndescs);
1243 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_tx_ring_ndescs);
1244
1245 if (ixl_get_mac(sc) != 0) {
1246 /* error printed by ixl_get_mac */
1247 goto free_aqbuf;
1248 }
1249
1250 aprint_normal("\n");
1251 aprint_naive("\n");
1252
1253 aprint_normal_dev(self, "Ethernet address %s\n",
1254 ether_sprintf(sc->sc_enaddr));
1255
1256 rv = ixl_pxe_clear(sc);
1257 if (rv != 0) {
1258 aprint_debug_dev(self, "CLEAR PXE MODE %s\n",
1259 rv == ETIMEDOUT ? "timeout" : "error");
1260 }
1261
1262 ixl_set_filter_control(sc);
1263
1264 if (ixl_hmc(sc) != 0) {
1265 /* error printed by ixl_hmc */
1266 goto free_aqbuf;
1267 }
1268
1269 if (ixl_lldp_shut(sc) != 0) {
1270 /* error printed by ixl_lldp_shut */
1271 goto free_hmc;
1272 }
1273
1274 if (ixl_phy_mask_ints(sc) != 0) {
1275 /* error printed by ixl_phy_mask_ints */
1276 goto free_hmc;
1277 }
1278
1279 if (ixl_restart_an(sc) != 0) {
1280 /* error printed by ixl_restart_an */
1281 goto free_hmc;
1282 }
1283
1284 if (ixl_get_switch_config(sc) != 0) {
1285 /* error printed by ixl_get_switch_config */
1286 goto free_hmc;
1287 }
1288
1289 rv = ixl_get_link_status_poll(sc, NULL);
1290 if (rv != 0) {
1291 aprint_error_dev(self, "GET LINK STATUS %s\n",
1292 rv == ETIMEDOUT ? "timeout" : "error");
1293 goto free_hmc;
1294 }
1295
1296 /*
1297 * The FW often returns EIO in "Get PHY Abilities" command
1298 * if there is no delay
1299 */
1300 DELAY(500);
1301 if (ixl_get_phy_info(sc) != 0) {
1302 /* error printed by ixl_get_phy_info */
1303 goto free_hmc;
1304 }
1305
1306 if (ixl_dmamem_alloc(sc, &sc->sc_scratch,
1307 sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1308 aprint_error_dev(self, "unable to allocate scratch buffer\n");
1309 goto free_hmc;
1310 }
1311
1312 rv = ixl_get_vsi(sc);
1313 if (rv != 0) {
1314 aprint_error_dev(self, "GET VSI %s %d\n",
1315 rv == ETIMEDOUT ? "timeout" : "error", rv);
1316 goto free_scratch;
1317 }
1318
1319 rv = ixl_set_vsi(sc);
1320 if (rv != 0) {
1321 aprint_error_dev(self, "UPDATE VSI error %s %d\n",
1322 rv == ETIMEDOUT ? "timeout" : "error", rv);
1323 goto free_scratch;
1324 }
1325
1326 if (ixl_queue_pairs_alloc(sc) != 0) {
1327 /* error printed by ixl_queue_pairs_alloc */
1328 goto free_scratch;
1329 }
1330
1331 if (ixl_setup_interrupts(sc) != 0) {
1332 /* error printed by ixl_setup_interrupts */
1333 goto free_queue_pairs;
1334 }
1335
1336 if (ixl_setup_stats(sc) != 0) {
1337 aprint_error_dev(self, "failed to setup event counters\n");
1338 goto teardown_intrs;
1339 }
1340
1341 if (ixl_setup_sysctls(sc) != 0) {
1342 /* error printed by ixl_setup_sysctls */
1343 goto teardown_stats;
1344 }
1345
1346 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self));
1347 sc->sc_workq = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1348 IPL_NET, WQ_MPSAFE);
1349 if (sc->sc_workq == NULL)
1350 goto teardown_sysctls;
1351
1352 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self));
1353 rv = workqueue_create(&sc->sc_workq_txrx, xnamebuf, ixl_handle_queue_wk,
1354 sc, IXL_WORKQUEUE_PRI, IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1355 if (rv != 0) {
1356 sc->sc_workq_txrx = NULL;
1357 goto teardown_wqs;
1358 }
1359
1360 snprintf(xnamebuf, sizeof(xnamebuf), "%s_atq_cv", device_xname(self));
1361 cv_init(&sc->sc_atq_cv, xnamebuf);
1362
1363 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
1364
1365 ifp->if_softc = sc;
1366 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1367 ifp->if_extflags = IFEF_MPSAFE;
1368 ifp->if_ioctl = ixl_ioctl;
1369 ifp->if_start = ixl_start;
1370 ifp->if_transmit = ixl_transmit;
1371 ifp->if_watchdog = ixl_watchdog;
1372 ifp->if_init = ixl_init;
1373 ifp->if_stop = ixl_stop;
1374 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs);
1375 IFQ_SET_READY(&ifp->if_snd);
1376 ifp->if_capabilities |= IXL_IFCAP_RXCSUM;
1377 ifp->if_capabilities |= IXL_IFCAP_TXCSUM;
1378 #if 0
1379 ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
1380 #endif
1381 ether_set_vlan_cb(&sc->sc_ec, ixl_vlan_cb);
1382 sc->sc_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1383 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
1384 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1385
1386 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities;
1387 /* Disable VLAN_HWFILTER by default */
1388 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1389
1390 sc->sc_cur_ec_capenable = sc->sc_ec.ec_capenable;
1391
1392 sc->sc_ec.ec_ifmedia = &sc->sc_media;
1393 ifmedia_init(&sc->sc_media, IFM_IMASK, ixl_media_change,
1394 ixl_media_status);
1395
1396 ixl_media_add(sc);
1397 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1398 if (ISSET(sc->sc_phy_abilities,
1399 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) {
1400 ifmedia_add(&sc->sc_media,
1401 IFM_ETHER | IFM_AUTO | IFM_FLOW, 0, NULL);
1402 }
1403 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_NONE, 0, NULL);
1404 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1405
1406 if_attach(ifp);
1407 if_deferred_start_init(ifp, NULL);
1408 ether_ifattach(ifp, sc->sc_enaddr);
1409 ether_set_ifflags_cb(&sc->sc_ec, ixl_ifflags_cb);
1410
1411 rv = ixl_get_link_status_poll(sc, &link);
1412 if (rv != 0)
1413 link = LINK_STATE_UNKNOWN;
1414 if_link_state_change(ifp, link);
1415
1416 ixl_work_set(&sc->sc_link_state_task, ixl_get_link_status, sc);
1417
1418 ixl_config_other_intr(sc);
1419 ixl_enable_other_intr(sc);
1420
1421 ixl_set_phy_autoselect(sc);
1422
1423 /* remove default mac filter and replace it so we can see vlans */
1424 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 0);
1425 if (rv != ENOENT) {
1426 aprint_debug_dev(self,
1427 "unable to remove macvlan %u\n", rv);
1428 }
1429 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
1430 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1431 if (rv != ENOENT) {
1432 aprint_debug_dev(self,
1433 "unable to remove macvlan, ignore vlan %u\n", rv);
1434 }
1435
1436 if (ixl_update_macvlan(sc) != 0) {
1437 aprint_debug_dev(self,
1438 "couldn't enable vlan hardware filter\n");
1439 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1440 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
1441 }
1442
1443 sc->sc_txrx_workqueue = true;
1444 sc->sc_tx_process_limit = IXL_TX_PROCESS_LIMIT;
1445 sc->sc_rx_process_limit = IXL_RX_PROCESS_LIMIT;
1446 sc->sc_tx_intr_process_limit = IXL_TX_INTR_PROCESS_LIMIT;
1447 sc->sc_rx_intr_process_limit = IXL_RX_INTR_PROCESS_LIMIT;
1448
1449 ixl_stats_update(sc);
1450 sc->sc_stats_counters.isc_has_offset = true;
1451 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
1452
1453 if (pmf_device_register(self, NULL, NULL) != true)
1454 aprint_debug_dev(self, "couldn't establish power handler\n");
1455 sc->sc_attached = true;
1456 return;
1457
1458 teardown_wqs:
1459 config_finalize_register(self, ixl_workqs_teardown);
1460 teardown_sysctls:
1461 ixl_teardown_sysctls(sc);
1462 teardown_stats:
1463 ixl_teardown_stats(sc);
1464 teardown_intrs:
1465 ixl_teardown_interrupts(sc);
1466 free_queue_pairs:
1467 ixl_queue_pairs_free(sc);
1468 free_scratch:
1469 ixl_dmamem_free(sc, &sc->sc_scratch);
1470 free_hmc:
1471 ixl_hmc_free(sc);
1472 free_aqbuf:
1473 ixl_dmamem_free(sc, &sc->sc_aqbuf);
1474 shutdown:
1475 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1476 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1477 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1478 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1479
1480 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1481 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1482 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1483
1484 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1485 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1486 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1487
1488 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1489 0, IXL_DMA_LEN(&sc->sc_arq),
1490 BUS_DMASYNC_POSTREAD);
1491 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1492 0, IXL_DMA_LEN(&sc->sc_atq),
1493 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1494
1495 ixl_arq_unfill(sc);
1496 free_arq:
1497 ixl_dmamem_free(sc, &sc->sc_arq);
1498 free_atq:
1499 ixl_dmamem_free(sc, &sc->sc_atq);
1500 unmap:
1501 mutex_destroy(&sc->sc_atq_lock);
1502 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1503 mutex_destroy(&sc->sc_cfg_lock);
1504 sc->sc_mems = 0;
1505
1506 sc->sc_attached = false;
1507 }
1508
1509 static int
1510 ixl_detach(device_t self, int flags)
1511 {
1512 struct ixl_softc *sc = device_private(self);
1513 struct ifnet *ifp = &sc->sc_ec.ec_if;
1514
1515 if (!sc->sc_attached)
1516 return 0;
1517
1518 ixl_stop(ifp, 1);
1519
1520 ixl_disable_other_intr(sc);
1521
1522 callout_stop(&sc->sc_stats_callout);
1523 ixl_work_wait(sc->sc_workq, &sc->sc_stats_task);
1524
1525 /* wait for ATQ handler */
1526 mutex_enter(&sc->sc_atq_lock);
1527 mutex_exit(&sc->sc_atq_lock);
1528
1529 ixl_work_wait(sc->sc_workq, &sc->sc_arq_task);
1530 ixl_work_wait(sc->sc_workq, &sc->sc_link_state_task);
1531
1532 if (sc->sc_workq != NULL) {
1533 ixl_workq_destroy(sc->sc_workq);
1534 sc->sc_workq = NULL;
1535 }
1536
1537 if (sc->sc_workq_txrx != NULL) {
1538 workqueue_destroy(sc->sc_workq_txrx);
1539 sc->sc_workq_txrx = NULL;
1540 }
1541
1542 ether_ifdetach(ifp);
1543 if_detach(ifp);
1544 ifmedia_fini(&sc->sc_media);
1545
1546 ixl_teardown_interrupts(sc);
1547 ixl_teardown_stats(sc);
1548 ixl_teardown_sysctls(sc);
1549
1550 ixl_queue_pairs_free(sc);
1551
1552 ixl_dmamem_free(sc, &sc->sc_scratch);
1553 ixl_hmc_free(sc);
1554
1555 /* shutdown */
1556 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1557 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1558 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1559 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1560
1561 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1562 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1563 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1564
1565 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1566 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1567 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1568
1569 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1570 0, IXL_DMA_LEN(&sc->sc_arq),
1571 BUS_DMASYNC_POSTREAD);
1572 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1573 0, IXL_DMA_LEN(&sc->sc_atq),
1574 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1575
1576 ixl_arq_unfill(sc);
1577
1578 ixl_dmamem_free(sc, &sc->sc_arq);
1579 ixl_dmamem_free(sc, &sc->sc_atq);
1580 ixl_dmamem_free(sc, &sc->sc_aqbuf);
1581
1582 cv_destroy(&sc->sc_atq_cv);
1583 mutex_destroy(&sc->sc_atq_lock);
1584
1585 if (sc->sc_mems != 0) {
1586 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1587 sc->sc_mems = 0;
1588 }
1589
1590 mutex_destroy(&sc->sc_cfg_lock);
1591
1592 return 0;
1593 }
1594
1595 static int
1596 ixl_workqs_teardown(device_t self)
1597 {
1598 struct ixl_softc *sc = device_private(self);
1599
1600 if (sc->sc_workq != NULL) {
1601 ixl_workq_destroy(sc->sc_workq);
1602 sc->sc_workq = NULL;
1603 }
1604
1605 if (sc->sc_workq_txrx != NULL) {
1606 workqueue_destroy(sc->sc_workq_txrx);
1607 sc->sc_workq_txrx = NULL;
1608 }
1609
1610 return 0;
1611 }
1612
1613 static int
1614 ixl_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
1615 {
1616 struct ifnet *ifp = &ec->ec_if;
1617 struct ixl_softc *sc = ifp->if_softc;
1618 int rv;
1619
1620 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
1621 return 0;
1622 }
1623
1624 if (set) {
1625 rv = ixl_add_macvlan(sc, sc->sc_enaddr, vid,
1626 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1627 if (rv == 0) {
1628 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
1629 vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1630 }
1631 } else {
1632 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, vid,
1633 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1634 (void)ixl_remove_macvlan(sc, etherbroadcastaddr, vid,
1635 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1636 }
1637
1638 return rv;
1639 }
1640
1641 static void
1642 ixl_media_add(struct ixl_softc *sc)
1643 {
1644 struct ifmedia *ifm = &sc->sc_media;
1645 const struct ixl_phy_type *itype;
1646 unsigned int i;
1647 bool flow;
1648
1649 if (ISSET(sc->sc_phy_abilities,
1650 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) {
1651 flow = true;
1652 } else {
1653 flow = false;
1654 }
1655
1656 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
1657 itype = &ixl_phy_type_map[i];
1658
1659 if (ISSET(sc->sc_phy_types, itype->phy_type)) {
1660 ifmedia_add(ifm,
1661 IFM_ETHER | IFM_FDX | itype->ifm_type, 0, NULL);
1662
1663 if (flow) {
1664 ifmedia_add(ifm,
1665 IFM_ETHER | IFM_FDX | IFM_FLOW |
1666 itype->ifm_type, 0, NULL);
1667 }
1668
1669 if (itype->ifm_type != IFM_100_TX)
1670 continue;
1671
1672 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type,
1673 0, NULL);
1674 if (flow) {
1675 ifmedia_add(ifm,
1676 IFM_ETHER | IFM_FLOW | itype->ifm_type,
1677 0, NULL);
1678 }
1679 }
1680 }
1681 }
1682
1683 static void
1684 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1685 {
1686 struct ixl_softc *sc = ifp->if_softc;
1687
1688 ifmr->ifm_status = sc->sc_media_status;
1689 ifmr->ifm_active = sc->sc_media_active;
1690
1691 mutex_enter(&sc->sc_cfg_lock);
1692 if (ifp->if_link_state == LINK_STATE_UP)
1693 SET(ifmr->ifm_status, IFM_ACTIVE);
1694 mutex_exit(&sc->sc_cfg_lock);
1695 }
1696
1697 static int
1698 ixl_media_change(struct ifnet *ifp)
1699 {
1700 struct ixl_softc *sc = ifp->if_softc;
1701 struct ifmedia *ifm = &sc->sc_media;
1702 uint64_t ifm_active = sc->sc_media_active;
1703 uint8_t link_speed, abilities;
1704
1705 switch (IFM_SUBTYPE(ifm_active)) {
1706 case IFM_1000_SGMII:
1707 case IFM_1000_KX:
1708 case IFM_10G_KX4:
1709 case IFM_10G_KR:
1710 case IFM_40G_KR4:
1711 case IFM_20G_KR2:
1712 case IFM_25G_KR:
1713 /* backplanes */
1714 return EINVAL;
1715 }
1716
1717 abilities = IXL_PHY_ABILITY_AUTONEGO | IXL_PHY_ABILITY_LINKUP;
1718
1719 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1720 case IFM_AUTO:
1721 link_speed = sc->sc_phy_linkspeed;
1722 break;
1723 case IFM_NONE:
1724 link_speed = 0;
1725 CLR(abilities, IXL_PHY_ABILITY_LINKUP);
1726 break;
1727 default:
1728 link_speed = ixl_search_baudrate(
1729 ifmedia_baudrate(ifm->ifm_media));
1730 }
1731
1732 if (ISSET(abilities, IXL_PHY_ABILITY_LINKUP)) {
1733 if (ISSET(link_speed, sc->sc_phy_linkspeed) == 0)
1734 return EINVAL;
1735 }
1736
1737 if (ifm->ifm_media & IFM_FLOW) {
1738 abilities |= sc->sc_phy_abilities &
1739 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX);
1740 }
1741
1742 return ixl_set_phy_config(sc, link_speed, abilities, false);
1743 }
1744
1745 static void
1746 ixl_watchdog(struct ifnet *ifp)
1747 {
1748
1749 }
1750
1751 static void
1752 ixl_del_all_multiaddr(struct ixl_softc *sc)
1753 {
1754 struct ethercom *ec = &sc->sc_ec;
1755 struct ether_multi *enm;
1756 struct ether_multistep step;
1757
1758 ETHER_LOCK(ec);
1759 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1760 ETHER_NEXT_MULTI(step, enm)) {
1761 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1762 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1763 }
1764 ETHER_UNLOCK(ec);
1765 }
1766
1767 static int
1768 ixl_add_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1769 {
1770 struct ifnet *ifp = &sc->sc_ec.ec_if;
1771 int rv;
1772
1773 if (ISSET(ifp->if_flags, IFF_ALLMULTI))
1774 return 0;
1775
1776 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) {
1777 ixl_del_all_multiaddr(sc);
1778 SET(ifp->if_flags, IFF_ALLMULTI);
1779 return ENETRESET;
1780 }
1781
1782 /* multicast address can not use VLAN HWFILTER */
1783 rv = ixl_add_macvlan(sc, addrlo, 0,
1784 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1785
1786 if (rv == ENOSPC) {
1787 ixl_del_all_multiaddr(sc);
1788 SET(ifp->if_flags, IFF_ALLMULTI);
1789 return ENETRESET;
1790 }
1791
1792 return rv;
1793 }
1794
1795 static int
1796 ixl_del_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1797 {
1798 struct ifnet *ifp = &sc->sc_ec.ec_if;
1799 struct ethercom *ec = &sc->sc_ec;
1800 struct ether_multi *enm, *enm_last;
1801 struct ether_multistep step;
1802 int error, rv = 0;
1803
1804 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1805 ixl_remove_macvlan(sc, addrlo, 0,
1806 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1807 return 0;
1808 }
1809
1810 ETHER_LOCK(ec);
1811 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1812 ETHER_NEXT_MULTI(step, enm)) {
1813 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1814 ETHER_ADDR_LEN) != 0) {
1815 goto out;
1816 }
1817 }
1818
1819 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1820 ETHER_NEXT_MULTI(step, enm)) {
1821 error = ixl_add_macvlan(sc, enm->enm_addrlo, 0,
1822 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1823 if (error != 0)
1824 break;
1825 }
1826
1827 if (enm != NULL) {
1828 enm_last = enm;
1829 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1830 ETHER_NEXT_MULTI(step, enm)) {
1831 if (enm == enm_last)
1832 break;
1833
1834 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1835 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1836 }
1837 } else {
1838 CLR(ifp->if_flags, IFF_ALLMULTI);
1839 rv = ENETRESET;
1840 }
1841
1842 out:
1843 ETHER_UNLOCK(ec);
1844 return rv;
1845 }
1846
1847 static int
1848 ixl_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1849 {
1850 struct ifreq *ifr = (struct ifreq *)data;
1851 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
1852 const struct sockaddr *sa;
1853 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1854 int s, error = 0;
1855 unsigned int nmtu;
1856
1857 switch (cmd) {
1858 case SIOCSIFMTU:
1859 nmtu = ifr->ifr_mtu;
1860
1861 if (nmtu < IXL_MIN_MTU || nmtu > IXL_MAX_MTU) {
1862 error = EINVAL;
1863 break;
1864 }
1865 if (ifp->if_mtu != nmtu) {
1866 s = splnet();
1867 error = ether_ioctl(ifp, cmd, data);
1868 splx(s);
1869 if (error == ENETRESET)
1870 error = ixl_init(ifp);
1871 }
1872 break;
1873 case SIOCADDMULTI:
1874 sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1875 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) {
1876 error = ether_multiaddr(sa, addrlo, addrhi);
1877 if (error != 0)
1878 return error;
1879
1880 error = ixl_add_multi(sc, addrlo, addrhi);
1881 if (error != 0 && error != ENETRESET) {
1882 ether_delmulti(sa, &sc->sc_ec);
1883 error = EIO;
1884 }
1885 }
1886 break;
1887
1888 case SIOCDELMULTI:
1889 sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1890 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) {
1891 error = ether_multiaddr(sa, addrlo, addrhi);
1892 if (error != 0)
1893 return error;
1894
1895 error = ixl_del_multi(sc, addrlo, addrhi);
1896 }
1897 break;
1898
1899 default:
1900 s = splnet();
1901 error = ether_ioctl(ifp, cmd, data);
1902 splx(s);
1903 }
1904
1905 if (error == ENETRESET)
1906 error = ixl_iff(sc);
1907
1908 return error;
1909 }
1910
1911 static enum i40e_mac_type
1912 ixl_mactype(pci_product_id_t id)
1913 {
1914
1915 switch (id) {
1916 case PCI_PRODUCT_INTEL_XL710_SFP:
1917 case PCI_PRODUCT_INTEL_XL710_KX_B:
1918 case PCI_PRODUCT_INTEL_XL710_KX_C:
1919 case PCI_PRODUCT_INTEL_XL710_QSFP_A:
1920 case PCI_PRODUCT_INTEL_XL710_QSFP_B:
1921 case PCI_PRODUCT_INTEL_XL710_QSFP_C:
1922 case PCI_PRODUCT_INTEL_X710_10G_T:
1923 case PCI_PRODUCT_INTEL_XL710_20G_BP_1:
1924 case PCI_PRODUCT_INTEL_XL710_20G_BP_2:
1925 case PCI_PRODUCT_INTEL_X710_T4_10G:
1926 case PCI_PRODUCT_INTEL_XXV710_25G_BP:
1927 case PCI_PRODUCT_INTEL_XXV710_25G_SFP28:
1928 return I40E_MAC_XL710;
1929
1930 case PCI_PRODUCT_INTEL_X722_KX:
1931 case PCI_PRODUCT_INTEL_X722_QSFP:
1932 case PCI_PRODUCT_INTEL_X722_SFP:
1933 case PCI_PRODUCT_INTEL_X722_1G_BASET:
1934 case PCI_PRODUCT_INTEL_X722_10G_BASET:
1935 case PCI_PRODUCT_INTEL_X722_I_SFP:
1936 return I40E_MAC_X722;
1937 }
1938
1939 return I40E_MAC_GENERIC;
1940 }
1941
1942 static inline void *
1943 ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i)
1944 {
1945 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
1946 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1947
1948 if (i >= e->hmc_count)
1949 return NULL;
1950
1951 kva += e->hmc_base;
1952 kva += i * e->hmc_size;
1953
1954 return kva;
1955 }
1956
1957 static inline size_t
1958 ixl_hmc_len(struct ixl_softc *sc, enum ixl_hmc_types type)
1959 {
1960 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1961
1962 return e->hmc_size;
1963 }
1964
1965 static void
1966 ixl_enable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1967 {
1968 struct ixl_rx_ring *rxr = qp->qp_rxr;
1969
1970 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1971 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1972 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1973 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1974 ixl_flush(sc);
1975 }
1976
1977 static void
1978 ixl_disable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1979 {
1980 struct ixl_rx_ring *rxr = qp->qp_rxr;
1981
1982 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1983 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1984 ixl_flush(sc);
1985 }
1986
1987 static void
1988 ixl_enable_other_intr(struct ixl_softc *sc)
1989 {
1990
1991 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1992 I40E_PFINT_DYN_CTL0_INTENA_MASK |
1993 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1994 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1995 ixl_flush(sc);
1996 }
1997
1998 static void
1999 ixl_disable_other_intr(struct ixl_softc *sc)
2000 {
2001
2002 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
2003 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
2004 ixl_flush(sc);
2005 }
2006
2007 static int
2008 ixl_reinit(struct ixl_softc *sc)
2009 {
2010 struct ixl_rx_ring *rxr;
2011 struct ixl_tx_ring *txr;
2012 unsigned int i;
2013 uint32_t reg;
2014
2015 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2016
2017 if (ixl_get_vsi(sc) != 0)
2018 return EIO;
2019
2020 if (ixl_set_vsi(sc) != 0)
2021 return EIO;
2022
2023 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2024 txr = sc->sc_qps[i].qp_txr;
2025 rxr = sc->sc_qps[i].qp_rxr;
2026
2027 ixl_txr_config(sc, txr);
2028 ixl_rxr_config(sc, rxr);
2029 }
2030
2031 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2032 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_PREWRITE);
2033
2034 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2035 txr = sc->sc_qps[i].qp_txr;
2036 rxr = sc->sc_qps[i].qp_rxr;
2037
2038 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
2039 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
2040 ixl_flush(sc);
2041
2042 ixl_wr(sc, txr->txr_tail, txr->txr_prod);
2043 ixl_wr(sc, rxr->rxr_tail, rxr->rxr_prod);
2044
2045 /* ixl_rxfill() needs lock held */
2046 mutex_enter(&rxr->rxr_lock);
2047 ixl_rxfill(sc, rxr);
2048 mutex_exit(&rxr->rxr_lock);
2049
2050 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2051 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2052 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2053 if (ixl_rxr_enabled(sc, rxr) != 0)
2054 goto stop;
2055
2056 ixl_txr_qdis(sc, txr, 1);
2057
2058 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2059 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2060 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2061
2062 if (ixl_txr_enabled(sc, txr) != 0)
2063 goto stop;
2064 }
2065
2066 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2067 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
2068
2069 return 0;
2070
2071 stop:
2072 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2073 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
2074
2075 return ETIMEDOUT;
2076 }
2077
2078 static int
2079 ixl_init_locked(struct ixl_softc *sc)
2080 {
2081 struct ifnet *ifp = &sc->sc_ec.ec_if;
2082 unsigned int i;
2083 int error, eccap_change;
2084
2085 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2086
2087 if (ISSET(ifp->if_flags, IFF_RUNNING))
2088 ixl_stop_locked(sc);
2089
2090 if (sc->sc_dead) {
2091 return ENXIO;
2092 }
2093
2094 eccap_change = sc->sc_ec.ec_capenable ^ sc->sc_cur_ec_capenable;
2095 if (ISSET(eccap_change, ETHERCAP_VLAN_HWTAGGING))
2096 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
2097
2098 if (ISSET(eccap_change, ETHERCAP_VLAN_HWFILTER)) {
2099 if (ixl_update_macvlan(sc) == 0) {
2100 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
2101 } else {
2102 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
2103 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
2104 }
2105 }
2106
2107 if (sc->sc_intrtype != PCI_INTR_TYPE_MSIX)
2108 sc->sc_nqueue_pairs = 1;
2109 else
2110 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
2111
2112 error = ixl_reinit(sc);
2113 if (error) {
2114 ixl_stop_locked(sc);
2115 return error;
2116 }
2117
2118 SET(ifp->if_flags, IFF_RUNNING);
2119 CLR(ifp->if_flags, IFF_OACTIVE);
2120
2121 (void)ixl_get_link_status(sc);
2122
2123 ixl_config_rss(sc);
2124 ixl_config_queue_intr(sc);
2125
2126 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2127 ixl_enable_queue_intr(sc, &sc->sc_qps[i]);
2128 }
2129
2130 error = ixl_iff(sc);
2131 if (error) {
2132 ixl_stop_locked(sc);
2133 return error;
2134 }
2135
2136 return 0;
2137 }
2138
2139 static int
2140 ixl_init(struct ifnet *ifp)
2141 {
2142 struct ixl_softc *sc = ifp->if_softc;
2143 int error;
2144
2145 mutex_enter(&sc->sc_cfg_lock);
2146 error = ixl_init_locked(sc);
2147 mutex_exit(&sc->sc_cfg_lock);
2148
2149 return error;
2150 }
2151
2152 static int
2153 ixl_iff(struct ixl_softc *sc)
2154 {
2155 struct ifnet *ifp = &sc->sc_ec.ec_if;
2156 struct ixl_atq iatq;
2157 struct ixl_aq_desc *iaq;
2158 struct ixl_aq_vsi_promisc_param *param;
2159 uint16_t flag_add, flag_del;
2160 int error;
2161
2162 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2163 return 0;
2164
2165 memset(&iatq, 0, sizeof(iatq));
2166
2167 iaq = &iatq.iatq_desc;
2168 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
2169
2170 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
2171 param->flags = htole16(0);
2172
2173 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)
2174 || ISSET(ifp->if_flags, IFF_PROMISC)) {
2175 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2176 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2177 }
2178
2179 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
2180 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2181 IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2182 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
2183 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2184 }
2185 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2186 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2187 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2188 param->seid = sc->sc_seid;
2189
2190 error = ixl_atq_exec(sc, &iatq);
2191 if (error)
2192 return error;
2193
2194 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
2195 return EIO;
2196
2197 if (memcmp(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN) != 0) {
2198 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
2199 flag_add = IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH;
2200 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH;
2201 } else {
2202 flag_add = IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN;
2203 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN;
2204 }
2205
2206 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, flag_del);
2207
2208 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
2209 ixl_add_macvlan(sc, sc->sc_enaddr, 0, flag_add);
2210 }
2211 return 0;
2212 }
2213
2214 static void
2215 ixl_stop_rendezvous(struct ixl_softc *sc)
2216 {
2217 struct ixl_tx_ring *txr;
2218 struct ixl_rx_ring *rxr;
2219 unsigned int i;
2220
2221 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2222 txr = sc->sc_qps[i].qp_txr;
2223 rxr = sc->sc_qps[i].qp_rxr;
2224
2225 mutex_enter(&txr->txr_lock);
2226 mutex_exit(&txr->txr_lock);
2227
2228 mutex_enter(&rxr->rxr_lock);
2229 mutex_exit(&rxr->rxr_lock);
2230
2231 sc->sc_qps[i].qp_workqueue = false;
2232 workqueue_wait(sc->sc_workq_txrx,
2233 &sc->sc_qps[i].qp_work);
2234 }
2235 }
2236
2237 static void
2238 ixl_stop_locked(struct ixl_softc *sc)
2239 {
2240 struct ifnet *ifp = &sc->sc_ec.ec_if;
2241 struct ixl_rx_ring *rxr;
2242 struct ixl_tx_ring *txr;
2243 unsigned int i;
2244 uint32_t reg;
2245
2246 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2247
2248 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
2249
2250 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2251 txr = sc->sc_qps[i].qp_txr;
2252 rxr = sc->sc_qps[i].qp_rxr;
2253
2254 ixl_disable_queue_intr(sc, &sc->sc_qps[i]);
2255
2256 mutex_enter(&txr->txr_lock);
2257 ixl_txr_qdis(sc, txr, 0);
2258 /* XXX wait at least 400 usec for all tx queues in one go */
2259 ixl_flush(sc);
2260 DELAY(500);
2261
2262 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2263 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2264 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2265 /* XXX wait 50ms from completaion of the TX queue disable*/
2266 ixl_flush(sc);
2267 DELAY(50);
2268
2269 if (ixl_txr_disabled(sc, txr) != 0) {
2270 mutex_exit(&txr->txr_lock);
2271 goto die;
2272 }
2273 mutex_exit(&txr->txr_lock);
2274
2275 mutex_enter(&rxr->rxr_lock);
2276 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2277 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2278 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2279 /* XXX wait 50ms from completion of the RX queue disable */
2280 ixl_flush(sc);
2281 DELAY(50);
2282
2283 if (ixl_rxr_disabled(sc, rxr) != 0) {
2284 mutex_exit(&rxr->rxr_lock);
2285 goto die;
2286 }
2287 mutex_exit(&rxr->rxr_lock);
2288 }
2289
2290 ixl_stop_rendezvous(sc);
2291
2292 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2293 txr = sc->sc_qps[i].qp_txr;
2294 rxr = sc->sc_qps[i].qp_rxr;
2295
2296 ixl_txr_unconfig(sc, txr);
2297 ixl_rxr_unconfig(sc, rxr);
2298
2299 ixl_txr_clean(sc, txr);
2300 ixl_rxr_clean(sc, rxr);
2301 }
2302
2303 return;
2304 die:
2305 sc->sc_dead = true;
2306 log(LOG_CRIT, "%s: failed to shut down rings",
2307 device_xname(sc->sc_dev));
2308 return;
2309 }
2310
2311 static void
2312 ixl_stop(struct ifnet *ifp, int disable)
2313 {
2314 struct ixl_softc *sc = ifp->if_softc;
2315
2316 mutex_enter(&sc->sc_cfg_lock);
2317 ixl_stop_locked(sc);
2318 mutex_exit(&sc->sc_cfg_lock);
2319 }
2320
2321 static int
2322 ixl_queue_pairs_alloc(struct ixl_softc *sc)
2323 {
2324 struct ixl_queue_pair *qp;
2325 unsigned int i;
2326 size_t sz;
2327
2328 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2329 sc->sc_qps = kmem_zalloc(sz, KM_SLEEP);
2330
2331 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2332 qp = &sc->sc_qps[i];
2333
2334 qp->qp_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2335 ixl_handle_queue, qp);
2336 if (qp->qp_si == NULL)
2337 goto free;
2338
2339 qp->qp_txr = ixl_txr_alloc(sc, i);
2340 if (qp->qp_txr == NULL)
2341 goto free;
2342
2343 qp->qp_rxr = ixl_rxr_alloc(sc, i);
2344 if (qp->qp_rxr == NULL)
2345 goto free;
2346
2347 qp->qp_sc = sc;
2348 snprintf(qp->qp_name, sizeof(qp->qp_name),
2349 "%s-TXRX%d", device_xname(sc->sc_dev), i);
2350 }
2351
2352 return 0;
2353 free:
2354 if (sc->sc_qps != NULL) {
2355 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2356 qp = &sc->sc_qps[i];
2357
2358 if (qp->qp_txr != NULL)
2359 ixl_txr_free(sc, qp->qp_txr);
2360 if (qp->qp_rxr != NULL)
2361 ixl_rxr_free(sc, qp->qp_rxr);
2362 if (qp->qp_si != NULL)
2363 softint_disestablish(qp->qp_si);
2364 }
2365
2366 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2367 kmem_free(sc->sc_qps, sz);
2368 sc->sc_qps = NULL;
2369 }
2370
2371 return -1;
2372 }
2373
2374 static void
2375 ixl_queue_pairs_free(struct ixl_softc *sc)
2376 {
2377 struct ixl_queue_pair *qp;
2378 unsigned int i;
2379 size_t sz;
2380
2381 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2382 qp = &sc->sc_qps[i];
2383 ixl_txr_free(sc, qp->qp_txr);
2384 ixl_rxr_free(sc, qp->qp_rxr);
2385 softint_disestablish(qp->qp_si);
2386 }
2387
2388 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2389 kmem_free(sc->sc_qps, sz);
2390 sc->sc_qps = NULL;
2391 }
2392
2393 static struct ixl_tx_ring *
2394 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
2395 {
2396 struct ixl_tx_ring *txr = NULL;
2397 struct ixl_tx_map *maps = NULL, *txm;
2398 unsigned int i;
2399
2400 txr = kmem_zalloc(sizeof(*txr), KM_SLEEP);
2401 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs,
2402 KM_SLEEP);
2403
2404 if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2405 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2406 IXL_TX_QUEUE_ALIGN) != 0)
2407 goto free;
2408
2409 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2410 txm = &maps[i];
2411
2412 if (bus_dmamap_create(sc->sc_dmat, IXL_TX_PKT_MAXSIZE,
2413 IXL_TX_PKT_DESCS, IXL_TX_PKT_MAXSIZE, 0,
2414 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &txm->txm_map) != 0)
2415 goto uncreate;
2416
2417 txm->txm_eop = -1;
2418 txm->txm_m = NULL;
2419 }
2420
2421 txr->txr_cons = txr->txr_prod = 0;
2422 txr->txr_maps = maps;
2423
2424 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2425 if (txr->txr_intrq == NULL)
2426 goto uncreate;
2427
2428 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2429 ixl_deferred_transmit, txr);
2430 if (txr->txr_si == NULL)
2431 goto destroy_pcq;
2432
2433 txr->txr_tail = I40E_QTX_TAIL(qid);
2434 txr->txr_qid = qid;
2435 txr->txr_sc = sc;
2436 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2437
2438 return txr;
2439
2440 destroy_pcq:
2441 pcq_destroy(txr->txr_intrq);
2442 uncreate:
2443 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2444 txm = &maps[i];
2445
2446 if (txm->txm_map == NULL)
2447 continue;
2448
2449 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2450 }
2451
2452 ixl_dmamem_free(sc, &txr->txr_mem);
2453 free:
2454 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2455 kmem_free(txr, sizeof(*txr));
2456
2457 return NULL;
2458 }
2459
2460 static void
2461 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2462 {
2463 unsigned int qid;
2464 bus_size_t reg;
2465 uint32_t r;
2466
2467 qid = txr->txr_qid + sc->sc_base_queue;
2468 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2469 qid %= 128;
2470
2471 r = ixl_rd(sc, reg);
2472 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2473 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2474 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2475 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2476 ixl_wr(sc, reg, r);
2477 }
2478
2479 static void
2480 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2481 {
2482 struct ixl_hmc_txq txq;
2483 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch);
2484 void *hmc;
2485
2486 memset(&txq, 0, sizeof(txq));
2487 txq.head = htole16(txr->txr_cons);
2488 txq.new_context = 1;
2489 txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2490 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2491 txq.qlen = htole16(sc->sc_tx_ring_ndescs);
2492 txq.tphrdesc_ena = 0;
2493 txq.tphrpacket_ena = 0;
2494 txq.tphwdesc_ena = 0;
2495 txq.rdylist = data->qs_handle[0];
2496
2497 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2498 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2499 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq,
2500 __arraycount(ixl_hmc_pack_txq));
2501 }
2502
2503 static void
2504 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2505 {
2506 void *hmc;
2507
2508 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2509 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2510 txr->txr_cons = txr->txr_prod = 0;
2511 }
2512
2513 static void
2514 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2515 {
2516 struct ixl_tx_map *maps, *txm;
2517 bus_dmamap_t map;
2518 unsigned int i;
2519
2520 maps = txr->txr_maps;
2521 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2522 txm = &maps[i];
2523
2524 if (txm->txm_m == NULL)
2525 continue;
2526
2527 map = txm->txm_map;
2528 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2529 BUS_DMASYNC_POSTWRITE);
2530 bus_dmamap_unload(sc->sc_dmat, map);
2531
2532 m_freem(txm->txm_m);
2533 txm->txm_m = NULL;
2534 }
2535 }
2536
2537 static int
2538 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2539 {
2540 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2541 uint32_t reg;
2542 int i;
2543
2544 for (i = 0; i < 10; i++) {
2545 reg = ixl_rd(sc, ena);
2546 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2547 return 0;
2548
2549 delaymsec(10);
2550 }
2551
2552 return ETIMEDOUT;
2553 }
2554
2555 static int
2556 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2557 {
2558 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2559 uint32_t reg;
2560 int i;
2561
2562 KASSERT(mutex_owned(&txr->txr_lock));
2563
2564 for (i = 0; i < 20; i++) {
2565 reg = ixl_rd(sc, ena);
2566 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2567 return 0;
2568
2569 delaymsec(10);
2570 }
2571
2572 return ETIMEDOUT;
2573 }
2574
2575 static void
2576 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2577 {
2578 struct ixl_tx_map *maps, *txm;
2579 struct mbuf *m;
2580 unsigned int i;
2581
2582 softint_disestablish(txr->txr_si);
2583 while ((m = pcq_get(txr->txr_intrq)) != NULL)
2584 m_freem(m);
2585 pcq_destroy(txr->txr_intrq);
2586
2587 maps = txr->txr_maps;
2588 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2589 txm = &maps[i];
2590
2591 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2592 }
2593
2594 ixl_dmamem_free(sc, &txr->txr_mem);
2595 mutex_destroy(&txr->txr_lock);
2596 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2597 kmem_free(txr, sizeof(*txr));
2598 }
2599
2600 static inline int
2601 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0,
2602 struct ixl_tx_ring *txr)
2603 {
2604 struct mbuf *m;
2605 int error;
2606
2607 KASSERT(mutex_owned(&txr->txr_lock));
2608
2609 m = *m0;
2610
2611 error = bus_dmamap_load_mbuf(dmat, map, m,
2612 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2613 if (error != EFBIG)
2614 return error;
2615
2616 m = m_defrag(m, M_DONTWAIT);
2617 if (m != NULL) {
2618 *m0 = m;
2619 txr->txr_defragged.ev_count++;
2620
2621 error = bus_dmamap_load_mbuf(dmat, map, m,
2622 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2623 } else {
2624 txr->txr_defrag_failed.ev_count++;
2625 error = ENOBUFS;
2626 }
2627
2628 return error;
2629 }
2630
2631 static inline int
2632 ixl_tx_setup_offloads(struct mbuf *m, uint64_t *cmd_txd)
2633 {
2634 struct ether_header *eh;
2635 size_t len;
2636 uint64_t cmd;
2637
2638 cmd = 0;
2639
2640 eh = mtod(m, struct ether_header *);
2641 switch (htons(eh->ether_type)) {
2642 case ETHERTYPE_IP:
2643 case ETHERTYPE_IPV6:
2644 len = ETHER_HDR_LEN;
2645 break;
2646 case ETHERTYPE_VLAN:
2647 len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2648 break;
2649 default:
2650 len = 0;
2651 }
2652 cmd |= ((len >> 1) << IXL_TX_DESC_MACLEN_SHIFT);
2653
2654 if (m->m_pkthdr.csum_flags &
2655 (M_CSUM_TSOv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
2656 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4;
2657 }
2658 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2659 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4_CSUM;
2660 }
2661
2662 if (m->m_pkthdr.csum_flags &
2663 (M_CSUM_TSOv6 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
2664 cmd |= IXL_TX_DESC_CMD_IIPT_IPV6;
2665 }
2666
2667 switch (cmd & IXL_TX_DESC_CMD_IIPT_MASK) {
2668 case IXL_TX_DESC_CMD_IIPT_IPV4:
2669 case IXL_TX_DESC_CMD_IIPT_IPV4_CSUM:
2670 len = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2671 break;
2672 case IXL_TX_DESC_CMD_IIPT_IPV6:
2673 len = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
2674 break;
2675 default:
2676 len = 0;
2677 }
2678 cmd |= ((len >> 2) << IXL_TX_DESC_IPLEN_SHIFT);
2679
2680 if (m->m_pkthdr.csum_flags &
2681 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_TCPv4 | M_CSUM_TCPv6)) {
2682 len = sizeof(struct tcphdr);
2683 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_TCP;
2684 } else if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) {
2685 len = sizeof(struct udphdr);
2686 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_UDP;
2687 } else {
2688 len = 0;
2689 }
2690 cmd |= ((len >> 2) << IXL_TX_DESC_L4LEN_SHIFT);
2691
2692 *cmd_txd |= cmd;
2693 return 0;
2694 }
2695
2696 static void
2697 ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr,
2698 bool is_transmit)
2699 {
2700 struct ixl_softc *sc = ifp->if_softc;
2701 struct ixl_tx_desc *ring, *txd;
2702 struct ixl_tx_map *txm;
2703 bus_dmamap_t map;
2704 struct mbuf *m;
2705 uint64_t cmd, cmd_txd;
2706 unsigned int prod, free, last, i;
2707 unsigned int mask;
2708 int post = 0;
2709
2710 KASSERT(mutex_owned(&txr->txr_lock));
2711
2712 if (ifp->if_link_state != LINK_STATE_UP
2713 || !ISSET(ifp->if_flags, IFF_RUNNING)
2714 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) {
2715 if (!is_transmit)
2716 IFQ_PURGE(&ifp->if_snd);
2717 return;
2718 }
2719
2720 prod = txr->txr_prod;
2721 free = txr->txr_cons;
2722 if (free <= prod)
2723 free += sc->sc_tx_ring_ndescs;
2724 free -= prod;
2725
2726 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2727 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2728
2729 ring = IXL_DMA_KVA(&txr->txr_mem);
2730 mask = sc->sc_tx_ring_ndescs - 1;
2731 last = prod;
2732 cmd = 0;
2733 txd = NULL;
2734
2735 for (;;) {
2736 if (free <= IXL_TX_PKT_DESCS) {
2737 if (!is_transmit)
2738 SET(ifp->if_flags, IFF_OACTIVE);
2739 break;
2740 }
2741
2742 if (is_transmit)
2743 m = pcq_get(txr->txr_intrq);
2744 else
2745 IFQ_DEQUEUE(&ifp->if_snd, m);
2746
2747 if (m == NULL)
2748 break;
2749
2750 txm = &txr->txr_maps[prod];
2751 map = txm->txm_map;
2752
2753 if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
2754 if_statinc(ifp, if_oerrors);
2755 m_freem(m);
2756 continue;
2757 }
2758
2759 cmd_txd = 0;
2760 if (m->m_pkthdr.csum_flags & IXL_CSUM_ALL_OFFLOAD) {
2761 ixl_tx_setup_offloads(m, &cmd_txd);
2762 }
2763
2764 if (vlan_has_tag(m)) {
2765 cmd_txd |= (uint64_t)vlan_get_tag(m) <<
2766 IXL_TX_DESC_L2TAG1_SHIFT;
2767 cmd_txd |= IXL_TX_DESC_CMD_IL2TAG1;
2768 }
2769
2770 bus_dmamap_sync(sc->sc_dmat, map, 0,
2771 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2772
2773 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) {
2774 txd = &ring[prod];
2775
2776 cmd = (uint64_t)map->dm_segs[i].ds_len <<
2777 IXL_TX_DESC_BSIZE_SHIFT;
2778 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2779 cmd |= cmd_txd;
2780
2781 txd->addr = htole64(map->dm_segs[i].ds_addr);
2782 txd->cmd = htole64(cmd);
2783
2784 last = prod;
2785
2786 prod++;
2787 prod &= mask;
2788 }
2789 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2790 txd->cmd = htole64(cmd);
2791
2792 txm->txm_m = m;
2793 txm->txm_eop = last;
2794
2795 bpf_mtap(ifp, m, BPF_D_OUT);
2796
2797 free -= i;
2798 post = 1;
2799 }
2800
2801 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2802 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2803
2804 if (post) {
2805 txr->txr_prod = prod;
2806 ixl_wr(sc, txr->txr_tail, prod);
2807 }
2808 }
2809
2810 static int
2811 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit)
2812 {
2813 struct ifnet *ifp = &sc->sc_ec.ec_if;
2814 struct ixl_tx_desc *ring, *txd;
2815 struct ixl_tx_map *txm;
2816 struct mbuf *m;
2817 bus_dmamap_t map;
2818 unsigned int cons, prod, last;
2819 unsigned int mask;
2820 uint64_t dtype;
2821 int done = 0, more = 0;
2822
2823 KASSERT(mutex_owned(&txr->txr_lock));
2824
2825 prod = txr->txr_prod;
2826 cons = txr->txr_cons;
2827
2828 if (cons == prod)
2829 return 0;
2830
2831 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2832 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2833
2834 ring = IXL_DMA_KVA(&txr->txr_mem);
2835 mask = sc->sc_tx_ring_ndescs - 1;
2836
2837 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2838
2839 do {
2840 if (txlimit-- <= 0) {
2841 more = 1;
2842 break;
2843 }
2844
2845 txm = &txr->txr_maps[cons];
2846 last = txm->txm_eop;
2847 txd = &ring[last];
2848
2849 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2850 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2851 break;
2852
2853 map = txm->txm_map;
2854
2855 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2856 BUS_DMASYNC_POSTWRITE);
2857 bus_dmamap_unload(sc->sc_dmat, map);
2858
2859 m = txm->txm_m;
2860 if (m != NULL) {
2861 if_statinc_ref(nsr, if_opackets);
2862 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
2863 if (ISSET(m->m_flags, M_MCAST))
2864 if_statinc_ref(nsr, if_omcasts);
2865 m_freem(m);
2866 }
2867
2868 txm->txm_m = NULL;
2869 txm->txm_eop = -1;
2870
2871 cons = last + 1;
2872 cons &= mask;
2873 done = 1;
2874 } while (cons != prod);
2875
2876 IF_STAT_PUTREF(ifp);
2877
2878 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2879 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2880
2881 txr->txr_cons = cons;
2882
2883 if (done) {
2884 softint_schedule(txr->txr_si);
2885 if (txr->txr_qid == 0) {
2886 CLR(ifp->if_flags, IFF_OACTIVE);
2887 if_schedule_deferred_start(ifp);
2888 }
2889 }
2890
2891 return more;
2892 }
2893
2894 static void
2895 ixl_start(struct ifnet *ifp)
2896 {
2897 struct ixl_softc *sc;
2898 struct ixl_tx_ring *txr;
2899
2900 sc = ifp->if_softc;
2901 txr = sc->sc_qps[0].qp_txr;
2902
2903 mutex_enter(&txr->txr_lock);
2904 ixl_tx_common_locked(ifp, txr, false);
2905 mutex_exit(&txr->txr_lock);
2906 }
2907
2908 static inline unsigned int
2909 ixl_select_txqueue(struct ixl_softc *sc, struct mbuf *m)
2910 {
2911 u_int cpuid;
2912
2913 cpuid = cpu_index(curcpu());
2914
2915 return (unsigned int)(cpuid % sc->sc_nqueue_pairs);
2916 }
2917
2918 static int
2919 ixl_transmit(struct ifnet *ifp, struct mbuf *m)
2920 {
2921 struct ixl_softc *sc;
2922 struct ixl_tx_ring *txr;
2923 unsigned int qid;
2924
2925 sc = ifp->if_softc;
2926 qid = ixl_select_txqueue(sc, m);
2927
2928 txr = sc->sc_qps[qid].qp_txr;
2929
2930 if (__predict_false(!pcq_put(txr->txr_intrq, m))) {
2931 mutex_enter(&txr->txr_lock);
2932 txr->txr_pcqdrop.ev_count++;
2933 mutex_exit(&txr->txr_lock);
2934
2935 m_freem(m);
2936 return ENOBUFS;
2937 }
2938
2939 if (mutex_tryenter(&txr->txr_lock)) {
2940 ixl_tx_common_locked(ifp, txr, true);
2941 mutex_exit(&txr->txr_lock);
2942 } else {
2943 kpreempt_disable();
2944 softint_schedule(txr->txr_si);
2945 kpreempt_enable();
2946 }
2947
2948 return 0;
2949 }
2950
2951 static void
2952 ixl_deferred_transmit(void *xtxr)
2953 {
2954 struct ixl_tx_ring *txr = xtxr;
2955 struct ixl_softc *sc = txr->txr_sc;
2956 struct ifnet *ifp = &sc->sc_ec.ec_if;
2957
2958 mutex_enter(&txr->txr_lock);
2959 txr->txr_transmitdef.ev_count++;
2960 if (pcq_peek(txr->txr_intrq) != NULL)
2961 ixl_tx_common_locked(ifp, txr, true);
2962 mutex_exit(&txr->txr_lock);
2963 }
2964
2965 static struct ixl_rx_ring *
2966 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
2967 {
2968 struct ixl_rx_ring *rxr = NULL;
2969 struct ixl_rx_map *maps = NULL, *rxm;
2970 unsigned int i;
2971
2972 rxr = kmem_zalloc(sizeof(*rxr), KM_SLEEP);
2973 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs,
2974 KM_SLEEP);
2975
2976 if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
2977 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
2978 IXL_RX_QUEUE_ALIGN) != 0)
2979 goto free;
2980
2981 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2982 rxm = &maps[i];
2983
2984 if (bus_dmamap_create(sc->sc_dmat,
2985 IXL_MCLBYTES, 1, IXL_MCLBYTES, 0,
2986 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &rxm->rxm_map) != 0)
2987 goto uncreate;
2988
2989 rxm->rxm_m = NULL;
2990 }
2991
2992 rxr->rxr_cons = rxr->rxr_prod = 0;
2993 rxr->rxr_m_head = NULL;
2994 rxr->rxr_m_tail = &rxr->rxr_m_head;
2995 rxr->rxr_maps = maps;
2996
2997 rxr->rxr_tail = I40E_QRX_TAIL(qid);
2998 rxr->rxr_qid = qid;
2999 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
3000
3001 return rxr;
3002
3003 uncreate:
3004 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3005 rxm = &maps[i];
3006
3007 if (rxm->rxm_map == NULL)
3008 continue;
3009
3010 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3011 }
3012
3013 ixl_dmamem_free(sc, &rxr->rxr_mem);
3014 free:
3015 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
3016 kmem_free(rxr, sizeof(*rxr));
3017
3018 return NULL;
3019 }
3020
3021 static void
3022 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3023 {
3024 struct ixl_rx_map *maps, *rxm;
3025 bus_dmamap_t map;
3026 unsigned int i;
3027
3028 maps = rxr->rxr_maps;
3029 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3030 rxm = &maps[i];
3031
3032 if (rxm->rxm_m == NULL)
3033 continue;
3034
3035 map = rxm->rxm_map;
3036 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3037 BUS_DMASYNC_POSTWRITE);
3038 bus_dmamap_unload(sc->sc_dmat, map);
3039
3040 m_freem(rxm->rxm_m);
3041 rxm->rxm_m = NULL;
3042 }
3043
3044 m_freem(rxr->rxr_m_head);
3045 rxr->rxr_m_head = NULL;
3046 rxr->rxr_m_tail = &rxr->rxr_m_head;
3047
3048 rxr->rxr_prod = rxr->rxr_cons = 0;
3049 }
3050
3051 static int
3052 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3053 {
3054 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3055 uint32_t reg;
3056 int i;
3057
3058 for (i = 0; i < 10; i++) {
3059 reg = ixl_rd(sc, ena);
3060 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
3061 return 0;
3062
3063 delaymsec(10);
3064 }
3065
3066 return ETIMEDOUT;
3067 }
3068
3069 static int
3070 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3071 {
3072 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3073 uint32_t reg;
3074 int i;
3075
3076 KASSERT(mutex_owned(&rxr->rxr_lock));
3077
3078 for (i = 0; i < 20; i++) {
3079 reg = ixl_rd(sc, ena);
3080 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3081 return 0;
3082
3083 delaymsec(10);
3084 }
3085
3086 return ETIMEDOUT;
3087 }
3088
3089 static void
3090 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3091 {
3092 struct ixl_hmc_rxq rxq;
3093 struct ifnet *ifp = &sc->sc_ec.ec_if;
3094 uint16_t rxmax;
3095 void *hmc;
3096
3097 memset(&rxq, 0, sizeof(rxq));
3098 rxmax = ifp->if_mtu + IXL_MTU_ETHERLEN;
3099
3100 rxq.head = htole16(rxr->rxr_cons);
3101 rxq.base = htole64(IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
3102 rxq.qlen = htole16(sc->sc_rx_ring_ndescs);
3103 rxq.dbuff = htole16(IXL_MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
3104 rxq.hbuff = 0;
3105 rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
3106 rxq.dsize = IXL_HMC_RXQ_DSIZE_32;
3107 rxq.crcstrip = 1;
3108 rxq.l2sel = 1;
3109 rxq.showiv = 1;
3110 rxq.rxmax = htole16(rxmax);
3111 rxq.tphrdesc_ena = 0;
3112 rxq.tphwdesc_ena = 0;
3113 rxq.tphdata_ena = 0;
3114 rxq.tphhead_ena = 0;
3115 rxq.lrxqthresh = 0;
3116 rxq.prefena = 1;
3117
3118 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3119 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3120 ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq,
3121 __arraycount(ixl_hmc_pack_rxq));
3122 }
3123
3124 static void
3125 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3126 {
3127 void *hmc;
3128
3129 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3130 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3131 rxr->rxr_cons = rxr->rxr_prod = 0;
3132 }
3133
3134 static void
3135 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3136 {
3137 struct ixl_rx_map *maps, *rxm;
3138 unsigned int i;
3139
3140 maps = rxr->rxr_maps;
3141 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3142 rxm = &maps[i];
3143
3144 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3145 }
3146
3147 ixl_dmamem_free(sc, &rxr->rxr_mem);
3148 mutex_destroy(&rxr->rxr_lock);
3149 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
3150 kmem_free(rxr, sizeof(*rxr));
3151 }
3152
3153 static inline void
3154 ixl_rx_csum(struct mbuf *m, uint64_t qword)
3155 {
3156 int flags_mask;
3157
3158 if (!ISSET(qword, IXL_RX_DESC_L3L4P)) {
3159 /* No L3 or L4 checksum was calculated */
3160 return;
3161 }
3162
3163 switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) {
3164 case IXL_RX_DESC_PTYPE_IPV4FRAG:
3165 case IXL_RX_DESC_PTYPE_IPV4:
3166 case IXL_RX_DESC_PTYPE_SCTPV4:
3167 case IXL_RX_DESC_PTYPE_ICMPV4:
3168 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3169 break;
3170 case IXL_RX_DESC_PTYPE_TCPV4:
3171 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3172 flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD;
3173 break;
3174 case IXL_RX_DESC_PTYPE_UDPV4:
3175 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3176 flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD;
3177 break;
3178 case IXL_RX_DESC_PTYPE_TCPV6:
3179 flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD;
3180 break;
3181 case IXL_RX_DESC_PTYPE_UDPV6:
3182 flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD;
3183 break;
3184 default:
3185 flags_mask = 0;
3186 }
3187
3188 m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 |
3189 M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6));
3190
3191 if (ISSET(qword, IXL_RX_DESC_IPE)) {
3192 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD);
3193 }
3194
3195 if (ISSET(qword, IXL_RX_DESC_L4E)) {
3196 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD);
3197 }
3198 }
3199
3200 static int
3201 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr, u_int rxlimit)
3202 {
3203 struct ifnet *ifp = &sc->sc_ec.ec_if;
3204 struct ixl_rx_wb_desc_32 *ring, *rxd;
3205 struct ixl_rx_map *rxm;
3206 bus_dmamap_t map;
3207 unsigned int cons, prod;
3208 struct mbuf *m;
3209 uint64_t word, word0;
3210 unsigned int len;
3211 unsigned int mask;
3212 int done = 0, more = 0;
3213
3214 KASSERT(mutex_owned(&rxr->rxr_lock));
3215
3216 if (!ISSET(ifp->if_flags, IFF_RUNNING))
3217 return 0;
3218
3219 prod = rxr->rxr_prod;
3220 cons = rxr->rxr_cons;
3221
3222 if (cons == prod)
3223 return 0;
3224
3225 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3226 0, IXL_DMA_LEN(&rxr->rxr_mem),
3227 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3228
3229 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3230 mask = sc->sc_rx_ring_ndescs - 1;
3231
3232 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
3233
3234 do {
3235 if (rxlimit-- <= 0) {
3236 more = 1;
3237 break;
3238 }
3239
3240 rxd = &ring[cons];
3241
3242 word = le64toh(rxd->qword1);
3243
3244 if (!ISSET(word, IXL_RX_DESC_DD))
3245 break;
3246
3247 rxm = &rxr->rxr_maps[cons];
3248
3249 map = rxm->rxm_map;
3250 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3251 BUS_DMASYNC_POSTREAD);
3252 bus_dmamap_unload(sc->sc_dmat, map);
3253
3254 m = rxm->rxm_m;
3255 rxm->rxm_m = NULL;
3256
3257 KASSERT(m != NULL);
3258
3259 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
3260 m->m_len = len;
3261 m->m_pkthdr.len = 0;
3262
3263 m->m_next = NULL;
3264 *rxr->rxr_m_tail = m;
3265 rxr->rxr_m_tail = &m->m_next;
3266
3267 m = rxr->rxr_m_head;
3268 m->m_pkthdr.len += len;
3269
3270 if (ISSET(word, IXL_RX_DESC_EOP)) {
3271 word0 = le64toh(rxd->qword0);
3272
3273 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
3274 vlan_set_tag(m,
3275 __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK));
3276 }
3277
3278 if ((ifp->if_capenable & IXL_IFCAP_RXCSUM) != 0)
3279 ixl_rx_csum(m, word);
3280
3281 if (!ISSET(word,
3282 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
3283 m_set_rcvif(m, ifp);
3284 if_statinc_ref(nsr, if_ipackets);
3285 if_statadd_ref(nsr, if_ibytes,
3286 m->m_pkthdr.len);
3287 if_percpuq_enqueue(ifp->if_percpuq, m);
3288 } else {
3289 if_statinc_ref(nsr, if_ierrors);
3290 m_freem(m);
3291 }
3292
3293 rxr->rxr_m_head = NULL;
3294 rxr->rxr_m_tail = &rxr->rxr_m_head;
3295 }
3296
3297 cons++;
3298 cons &= mask;
3299
3300 done = 1;
3301 } while (cons != prod);
3302
3303 if (done) {
3304 rxr->rxr_cons = cons;
3305 if (ixl_rxfill(sc, rxr) == -1)
3306 if_statinc_ref(nsr, if_iqdrops);
3307 }
3308
3309 IF_STAT_PUTREF(ifp);
3310
3311 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3312 0, IXL_DMA_LEN(&rxr->rxr_mem),
3313 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3314
3315 return more;
3316 }
3317
3318 static int
3319 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3320 {
3321 struct ixl_rx_rd_desc_32 *ring, *rxd;
3322 struct ixl_rx_map *rxm;
3323 bus_dmamap_t map;
3324 struct mbuf *m;
3325 unsigned int prod;
3326 unsigned int slots;
3327 unsigned int mask;
3328 int post = 0, error = 0;
3329
3330 KASSERT(mutex_owned(&rxr->rxr_lock));
3331
3332 prod = rxr->rxr_prod;
3333 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons,
3334 sc->sc_rx_ring_ndescs);
3335
3336 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3337 mask = sc->sc_rx_ring_ndescs - 1;
3338
3339 if (__predict_false(slots <= 0))
3340 return -1;
3341
3342 do {
3343 rxm = &rxr->rxr_maps[prod];
3344
3345 MGETHDR(m, M_DONTWAIT, MT_DATA);
3346 if (m == NULL) {
3347 rxr->rxr_mgethdr_failed.ev_count++;
3348 error = -1;
3349 break;
3350 }
3351
3352 MCLGET(m, M_DONTWAIT);
3353 if (!ISSET(m->m_flags, M_EXT)) {
3354 rxr->rxr_mgetcl_failed.ev_count++;
3355 error = -1;
3356 m_freem(m);
3357 break;
3358 }
3359
3360 m->m_len = m->m_pkthdr.len = MCLBYTES;
3361 m_adj(m, ETHER_ALIGN);
3362
3363 map = rxm->rxm_map;
3364
3365 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
3366 BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
3367 rxr->rxr_mbuf_load_failed.ev_count++;
3368 error = -1;
3369 m_freem(m);
3370 break;
3371 }
3372
3373 rxm->rxm_m = m;
3374
3375 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3376 BUS_DMASYNC_PREREAD);
3377
3378 rxd = &ring[prod];
3379
3380 rxd->paddr = htole64(map->dm_segs[0].ds_addr);
3381 rxd->haddr = htole64(0);
3382
3383 prod++;
3384 prod &= mask;
3385
3386 post = 1;
3387
3388 } while (--slots);
3389
3390 if (post) {
3391 rxr->rxr_prod = prod;
3392 ixl_wr(sc, rxr->rxr_tail, prod);
3393 }
3394
3395 return error;
3396 }
3397
3398 static inline int
3399 ixl_handle_queue_common(struct ixl_softc *sc, struct ixl_queue_pair *qp,
3400 u_int txlimit, struct evcnt *txevcnt,
3401 u_int rxlimit, struct evcnt *rxevcnt)
3402 {
3403 struct ixl_tx_ring *txr = qp->qp_txr;
3404 struct ixl_rx_ring *rxr = qp->qp_rxr;
3405 int txmore, rxmore;
3406 int rv;
3407
3408 mutex_enter(&txr->txr_lock);
3409 txevcnt->ev_count++;
3410 txmore = ixl_txeof(sc, txr, txlimit);
3411 mutex_exit(&txr->txr_lock);
3412
3413 mutex_enter(&rxr->rxr_lock);
3414 rxevcnt->ev_count++;
3415 rxmore = ixl_rxeof(sc, rxr, rxlimit);
3416 mutex_exit(&rxr->rxr_lock);
3417
3418 rv = txmore | (rxmore << 1);
3419
3420 return rv;
3421 }
3422
3423 static void
3424 ixl_sched_handle_queue(struct ixl_softc *sc, struct ixl_queue_pair *qp)
3425 {
3426
3427 if (qp->qp_workqueue)
3428 workqueue_enqueue(sc->sc_workq_txrx, &qp->qp_work, NULL);
3429 else
3430 softint_schedule(qp->qp_si);
3431 }
3432
3433 static int
3434 ixl_intr(void *xsc)
3435 {
3436 struct ixl_softc *sc = xsc;
3437 struct ixl_tx_ring *txr;
3438 struct ixl_rx_ring *rxr;
3439 uint32_t icr, rxintr, txintr;
3440 int rv = 0;
3441 unsigned int i;
3442
3443 KASSERT(sc != NULL);
3444
3445 ixl_enable_other_intr(sc);
3446 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3447
3448 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3449 atomic_inc_64(&sc->sc_event_atq.ev_count);
3450 ixl_atq_done(sc);
3451 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3452 rv = 1;
3453 }
3454
3455 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3456 atomic_inc_64(&sc->sc_event_link.ev_count);
3457 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3458 rv = 1;
3459 }
3460
3461 rxintr = icr & I40E_INTR_NOTX_RX_MASK;
3462 txintr = icr & I40E_INTR_NOTX_TX_MASK;
3463
3464 if (txintr || rxintr) {
3465 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3466 txr = sc->sc_qps[i].qp_txr;
3467 rxr = sc->sc_qps[i].qp_rxr;
3468
3469 ixl_handle_queue_common(sc, &sc->sc_qps[i],
3470 IXL_TXRX_PROCESS_UNLIMIT, &txr->txr_intr,
3471 IXL_TXRX_PROCESS_UNLIMIT, &rxr->rxr_intr);
3472 }
3473 rv = 1;
3474 }
3475
3476 return rv;
3477 }
3478
3479 static int
3480 ixl_queue_intr(void *xqp)
3481 {
3482 struct ixl_queue_pair *qp = xqp;
3483 struct ixl_tx_ring *txr = qp->qp_txr;
3484 struct ixl_rx_ring *rxr = qp->qp_rxr;
3485 struct ixl_softc *sc = qp->qp_sc;
3486 u_int txlimit, rxlimit;
3487 int more;
3488
3489 txlimit = sc->sc_tx_intr_process_limit;
3490 rxlimit = sc->sc_rx_intr_process_limit;
3491 qp->qp_workqueue = sc->sc_txrx_workqueue;
3492
3493 more = ixl_handle_queue_common(sc, qp,
3494 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr);
3495
3496 if (more != 0) {
3497 ixl_sched_handle_queue(sc, qp);
3498 } else {
3499 /* for ALTQ */
3500 if (txr->txr_qid == 0)
3501 if_schedule_deferred_start(&sc->sc_ec.ec_if);
3502 softint_schedule(txr->txr_si);
3503
3504 ixl_enable_queue_intr(sc, qp);
3505 }
3506
3507 return 1;
3508 }
3509
3510 static void
3511 ixl_handle_queue_wk(struct work *wk, void *xsc)
3512 {
3513 struct ixl_queue_pair *qp;
3514
3515 qp = container_of(wk, struct ixl_queue_pair, qp_work);
3516 ixl_handle_queue(qp);
3517 }
3518
3519 static void
3520 ixl_handle_queue(void *xqp)
3521 {
3522 struct ixl_queue_pair *qp = xqp;
3523 struct ixl_softc *sc = qp->qp_sc;
3524 struct ixl_tx_ring *txr = qp->qp_txr;
3525 struct ixl_rx_ring *rxr = qp->qp_rxr;
3526 u_int txlimit, rxlimit;
3527 int more;
3528
3529 txlimit = sc->sc_tx_process_limit;
3530 rxlimit = sc->sc_rx_process_limit;
3531
3532 more = ixl_handle_queue_common(sc, qp,
3533 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer);
3534
3535 if (more != 0)
3536 ixl_sched_handle_queue(sc, qp);
3537 else
3538 ixl_enable_queue_intr(sc, qp);
3539 }
3540
3541 static inline void
3542 ixl_print_hmc_error(struct ixl_softc *sc, uint32_t reg)
3543 {
3544 uint32_t hmc_idx, hmc_isvf;
3545 uint32_t hmc_errtype, hmc_objtype, hmc_data;
3546
3547 hmc_idx = reg & I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK;
3548 hmc_idx = hmc_idx >> I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT;
3549 hmc_isvf = reg & I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK;
3550 hmc_isvf = hmc_isvf >> I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT;
3551 hmc_errtype = reg & I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK;
3552 hmc_errtype = hmc_errtype >> I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT;
3553 hmc_objtype = reg & I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK;
3554 hmc_objtype = hmc_objtype >> I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT;
3555 hmc_data = ixl_rd(sc, I40E_PFHMC_ERRORDATA);
3556
3557 device_printf(sc->sc_dev,
3558 "HMC Error (idx=0x%x, isvf=0x%x, err=0x%x, obj=0x%x, data=0x%x)\n",
3559 hmc_idx, hmc_isvf, hmc_errtype, hmc_objtype, hmc_data);
3560 }
3561
3562 static int
3563 ixl_other_intr(void *xsc)
3564 {
3565 struct ixl_softc *sc = xsc;
3566 uint32_t icr, mask, reg;
3567 int rv;
3568
3569 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3570 mask = ixl_rd(sc, I40E_PFINT_ICR0_ENA);
3571
3572 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3573 atomic_inc_64(&sc->sc_event_atq.ev_count);
3574 ixl_atq_done(sc);
3575 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3576 rv = 1;
3577 }
3578
3579 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3580 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3581 device_printf(sc->sc_dev, "link stat changed\n");
3582
3583 atomic_inc_64(&sc->sc_event_link.ev_count);
3584 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3585 rv = 1;
3586 }
3587
3588 if (ISSET(icr, I40E_PFINT_ICR0_GRST_MASK)) {
3589 CLR(mask, I40E_PFINT_ICR0_ENA_GRST_MASK);
3590 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
3591 reg = reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK;
3592 reg = reg >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3593
3594 device_printf(sc->sc_dev, "GRST: %s\n",
3595 reg == I40E_RESET_CORER ? "CORER" :
3596 reg == I40E_RESET_GLOBR ? "GLOBR" :
3597 reg == I40E_RESET_EMPR ? "EMPR" :
3598 "POR");
3599 }
3600
3601 if (ISSET(icr, I40E_PFINT_ICR0_ECC_ERR_MASK))
3602 atomic_inc_64(&sc->sc_event_ecc_err.ev_count);
3603 if (ISSET(icr, I40E_PFINT_ICR0_PCI_EXCEPTION_MASK))
3604 atomic_inc_64(&sc->sc_event_pci_exception.ev_count);
3605 if (ISSET(icr, I40E_PFINT_ICR0_PE_CRITERR_MASK))
3606 atomic_inc_64(&sc->sc_event_crit_err.ev_count);
3607
3608 if (ISSET(icr, IXL_ICR0_CRIT_ERR_MASK)) {
3609 CLR(mask, IXL_ICR0_CRIT_ERR_MASK);
3610 device_printf(sc->sc_dev, "critical error\n");
3611 }
3612
3613 if (ISSET(icr, I40E_PFINT_ICR0_HMC_ERR_MASK)) {
3614 reg = ixl_rd(sc, I40E_PFHMC_ERRORINFO);
3615 if (ISSET(reg, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK))
3616 ixl_print_hmc_error(sc, reg);
3617 ixl_wr(sc, I40E_PFHMC_ERRORINFO, 0);
3618 }
3619
3620 ixl_wr(sc, I40E_PFINT_ICR0_ENA, mask);
3621 ixl_flush(sc);
3622 ixl_enable_other_intr(sc);
3623 return rv;
3624 }
3625
3626 static void
3627 ixl_get_link_status_done(struct ixl_softc *sc,
3628 const struct ixl_aq_desc *iaq)
3629 {
3630
3631 ixl_link_state_update(sc, iaq);
3632 }
3633
3634 static void
3635 ixl_get_link_status(void *xsc)
3636 {
3637 struct ixl_softc *sc = xsc;
3638 struct ixl_aq_desc *iaq;
3639 struct ixl_aq_link_param *param;
3640
3641 memset(&sc->sc_link_state_atq, 0, sizeof(sc->sc_link_state_atq));
3642 iaq = &sc->sc_link_state_atq.iatq_desc;
3643 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3644 param = (struct ixl_aq_link_param *)iaq->iaq_param;
3645 param->notify = IXL_AQ_LINK_NOTIFY;
3646
3647 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done);
3648 (void)ixl_atq_post(sc, &sc->sc_link_state_atq);
3649 }
3650
3651 static void
3652 ixl_link_state_update(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3653 {
3654 struct ifnet *ifp = &sc->sc_ec.ec_if;
3655 int link_state;
3656
3657 KASSERT(kpreempt_disabled());
3658
3659 link_state = ixl_set_link_status(sc, iaq);
3660
3661 if (ifp->if_link_state != link_state)
3662 if_link_state_change(ifp, link_state);
3663
3664 if (link_state != LINK_STATE_DOWN) {
3665 if_schedule_deferred_start(ifp);
3666 }
3667 }
3668
3669 static void
3670 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq,
3671 const char *msg)
3672 {
3673 char buf[512];
3674 size_t len;
3675
3676 len = sizeof(buf);
3677 buf[--len] = '\0';
3678
3679 device_printf(sc->sc_dev, "%s\n", msg);
3680 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags));
3681 device_printf(sc->sc_dev, "flags %s opcode %04x\n",
3682 buf, le16toh(iaq->iaq_opcode));
3683 device_printf(sc->sc_dev, "datalen %u retval %u\n",
3684 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval));
3685 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie);
3686 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n",
3687 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]),
3688 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3]));
3689 }
3690
3691 static void
3692 ixl_arq(void *xsc)
3693 {
3694 struct ixl_softc *sc = xsc;
3695 struct ixl_aq_desc *arq, *iaq;
3696 struct ixl_aq_buf *aqb;
3697 unsigned int cons = sc->sc_arq_cons;
3698 unsigned int prod;
3699 int done = 0;
3700
3701 prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
3702 sc->sc_aq_regs->arq_head_mask;
3703
3704 if (cons == prod)
3705 goto done;
3706
3707 arq = IXL_DMA_KVA(&sc->sc_arq);
3708
3709 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3710 0, IXL_DMA_LEN(&sc->sc_arq),
3711 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3712
3713 do {
3714 iaq = &arq[cons];
3715 aqb = sc->sc_arq_live[cons];
3716
3717 KASSERT(aqb != NULL);
3718
3719 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3720 BUS_DMASYNC_POSTREAD);
3721
3722 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3723 ixl_aq_dump(sc, iaq, "arq event");
3724
3725 switch (iaq->iaq_opcode) {
3726 case htole16(IXL_AQ_OP_PHY_LINK_STATUS):
3727 kpreempt_disable();
3728 ixl_link_state_update(sc, iaq);
3729 kpreempt_enable();
3730 break;
3731 }
3732
3733 memset(iaq, 0, sizeof(*iaq));
3734 sc->sc_arq_live[cons] = NULL;
3735 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
3736
3737 cons++;
3738 cons &= IXL_AQ_MASK;
3739
3740 done = 1;
3741 } while (cons != prod);
3742
3743 if (done) {
3744 sc->sc_arq_cons = cons;
3745 ixl_arq_fill(sc);
3746 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3747 0, IXL_DMA_LEN(&sc->sc_arq),
3748 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3749 }
3750
3751 done:
3752 ixl_enable_other_intr(sc);
3753 }
3754
3755 static void
3756 ixl_atq_set(struct ixl_atq *iatq,
3757 void (*fn)(struct ixl_softc *, const struct ixl_aq_desc *))
3758 {
3759
3760 iatq->iatq_fn = fn;
3761 }
3762
3763 static int
3764 ixl_atq_post_locked(struct ixl_softc *sc, struct ixl_atq *iatq)
3765 {
3766 struct ixl_aq_desc *atq, *slot;
3767 unsigned int prod, cons, prod_next;
3768
3769 /* assert locked */
3770 KASSERT(mutex_owned(&sc->sc_atq_lock));
3771
3772 atq = IXL_DMA_KVA(&sc->sc_atq);
3773 prod = sc->sc_atq_prod;
3774 cons = sc->sc_atq_cons;
3775 prod_next = (prod +1) & IXL_AQ_MASK;
3776
3777 if (cons == prod_next)
3778 return ENOMEM;
3779
3780 slot = &atq[prod];
3781
3782 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3783 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3784
3785 *slot = iatq->iatq_desc;
3786 slot->iaq_cookie = (uint64_t)((intptr_t)iatq);
3787
3788 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3789 ixl_aq_dump(sc, slot, "atq command");
3790
3791 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3792 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3793
3794 sc->sc_atq_prod = prod_next;
3795 ixl_wr(sc, sc->sc_aq_regs->atq_tail, sc->sc_atq_prod);
3796
3797 return 0;
3798 }
3799
3800 static int
3801 ixl_atq_post(struct ixl_softc *sc, struct ixl_atq *iatq)
3802 {
3803 int rv;
3804
3805 mutex_enter(&sc->sc_atq_lock);
3806 rv = ixl_atq_post_locked(sc, iatq);
3807 mutex_exit(&sc->sc_atq_lock);
3808
3809 return rv;
3810 }
3811
3812 static void
3813 ixl_atq_done_locked(struct ixl_softc *sc)
3814 {
3815 struct ixl_aq_desc *atq, *slot;
3816 struct ixl_atq *iatq;
3817 unsigned int cons;
3818 unsigned int prod;
3819
3820 KASSERT(mutex_owned(&sc->sc_atq_lock));
3821
3822 prod = sc->sc_atq_prod;
3823 cons = sc->sc_atq_cons;
3824
3825 if (prod == cons)
3826 return;
3827
3828 atq = IXL_DMA_KVA(&sc->sc_atq);
3829
3830 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3831 0, IXL_DMA_LEN(&sc->sc_atq),
3832 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3833
3834 do {
3835 slot = &atq[cons];
3836 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
3837 break;
3838
3839 iatq = (struct ixl_atq *)((intptr_t)slot->iaq_cookie);
3840 iatq->iatq_desc = *slot;
3841
3842 memset(slot, 0, sizeof(*slot));
3843
3844 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3845 ixl_aq_dump(sc, &iatq->iatq_desc, "atq response");
3846
3847 (*iatq->iatq_fn)(sc, &iatq->iatq_desc);
3848
3849 cons++;
3850 cons &= IXL_AQ_MASK;
3851 } while (cons != prod);
3852
3853 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3854 0, IXL_DMA_LEN(&sc->sc_atq),
3855 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3856
3857 sc->sc_atq_cons = cons;
3858 }
3859
3860 static void
3861 ixl_atq_done(struct ixl_softc *sc)
3862 {
3863
3864 mutex_enter(&sc->sc_atq_lock);
3865 ixl_atq_done_locked(sc);
3866 mutex_exit(&sc->sc_atq_lock);
3867 }
3868
3869 static void
3870 ixl_wakeup(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3871 {
3872
3873 KASSERT(mutex_owned(&sc->sc_atq_lock));
3874
3875 cv_signal(&sc->sc_atq_cv);
3876 }
3877
3878 static int
3879 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq)
3880 {
3881 int error;
3882
3883 KASSERT(iatq->iatq_desc.iaq_cookie == 0);
3884
3885 ixl_atq_set(iatq, ixl_wakeup);
3886
3887 mutex_enter(&sc->sc_atq_lock);
3888 error = ixl_atq_post_locked(sc, iatq);
3889 if (error) {
3890 mutex_exit(&sc->sc_atq_lock);
3891 return error;
3892 }
3893
3894 error = cv_timedwait(&sc->sc_atq_cv, &sc->sc_atq_lock,
3895 IXL_ATQ_EXEC_TIMEOUT);
3896 mutex_exit(&sc->sc_atq_lock);
3897
3898 return error;
3899 }
3900
3901 static int
3902 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
3903 {
3904 struct ixl_aq_desc *atq, *slot;
3905 unsigned int prod;
3906 unsigned int t = 0;
3907
3908 mutex_enter(&sc->sc_atq_lock);
3909
3910 atq = IXL_DMA_KVA(&sc->sc_atq);
3911 prod = sc->sc_atq_prod;
3912 slot = atq + prod;
3913
3914 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3915 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3916
3917 *slot = *iaq;
3918 slot->iaq_flags |= htole16(IXL_AQ_SI);
3919
3920 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3921 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3922
3923 prod++;
3924 prod &= IXL_AQ_MASK;
3925 sc->sc_atq_prod = prod;
3926 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3927
3928 while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
3929 delaymsec(1);
3930
3931 if (t++ > tm) {
3932 mutex_exit(&sc->sc_atq_lock);
3933 return ETIMEDOUT;
3934 }
3935 }
3936
3937 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3938 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
3939 *iaq = *slot;
3940 memset(slot, 0, sizeof(*slot));
3941 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3942 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
3943
3944 sc->sc_atq_cons = prod;
3945
3946 mutex_exit(&sc->sc_atq_lock);
3947
3948 return 0;
3949 }
3950
3951 static int
3952 ixl_get_version(struct ixl_softc *sc)
3953 {
3954 struct ixl_aq_desc iaq;
3955 uint32_t fwbuild, fwver, apiver;
3956 uint16_t api_maj_ver, api_min_ver;
3957
3958 memset(&iaq, 0, sizeof(iaq));
3959 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
3960
3961 iaq.iaq_retval = le16toh(23);
3962
3963 if (ixl_atq_poll(sc, &iaq, 2000) != 0)
3964 return ETIMEDOUT;
3965 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
3966 return EIO;
3967
3968 fwbuild = le32toh(iaq.iaq_param[1]);
3969 fwver = le32toh(iaq.iaq_param[2]);
3970 apiver = le32toh(iaq.iaq_param[3]);
3971
3972 api_maj_ver = (uint16_t)apiver;
3973 api_min_ver = (uint16_t)(apiver >> 16);
3974
3975 aprint_normal(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
3976 (uint16_t)(fwver >> 16), fwbuild, api_maj_ver, api_min_ver);
3977
3978 if (sc->sc_mac_type == I40E_MAC_X722) {
3979 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK |
3980 IXL_SC_AQ_FLAG_NVMREAD);
3981 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL);
3982 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS);
3983 }
3984
3985 #define IXL_API_VER(maj, min) (((uint32_t)(maj) << 16) | (min))
3986 if (IXL_API_VER(api_maj_ver, api_min_ver) >= IXL_API_VER(1, 5)) {
3987 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL);
3988 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK);
3989 }
3990 #undef IXL_API_VER
3991
3992 return 0;
3993 }
3994
3995 static int
3996 ixl_get_nvm_version(struct ixl_softc *sc)
3997 {
3998 uint16_t nvmver, cfg_ptr, eetrack_hi, eetrack_lo, oem_hi, oem_lo;
3999 uint32_t eetrack, oem;
4000 uint16_t nvm_maj_ver, nvm_min_ver, oem_build;
4001 uint8_t oem_ver, oem_patch;
4002
4003 nvmver = cfg_ptr = eetrack_hi = eetrack_lo = oem_hi = oem_lo = 0;
4004 ixl_rd16_nvm(sc, I40E_SR_NVM_DEV_STARTER_VERSION, &nvmver);
4005 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
4006 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
4007 ixl_rd16_nvm(sc, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
4008 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF, &oem_hi);
4009 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF + 1, &oem_lo);
4010
4011 nvm_maj_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_HI_MASK);
4012 nvm_min_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_LO_MASK);
4013 eetrack = ((uint32_t)eetrack_hi << 16) | eetrack_lo;
4014 oem = ((uint32_t)oem_hi << 16) | oem_lo;
4015 oem_ver = __SHIFTOUT(oem, IXL_NVM_OEMVERSION_MASK);
4016 oem_build = __SHIFTOUT(oem, IXL_NVM_OEMBUILD_MASK);
4017 oem_patch = __SHIFTOUT(oem, IXL_NVM_OEMPATCH_MASK);
4018
4019 aprint_normal(" nvm %x.%02x etid %08x oem %d.%d.%d",
4020 nvm_maj_ver, nvm_min_ver, eetrack,
4021 oem_ver, oem_build, oem_patch);
4022
4023 return 0;
4024 }
4025
4026 static int
4027 ixl_pxe_clear(struct ixl_softc *sc)
4028 {
4029 struct ixl_aq_desc iaq;
4030 int rv;
4031
4032 memset(&iaq, 0, sizeof(iaq));
4033 iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
4034 iaq.iaq_param[0] = htole32(0x2);
4035
4036 rv = ixl_atq_poll(sc, &iaq, 250);
4037
4038 ixl_wr(sc, I40E_GLLAN_RCTL_0, 0x1);
4039
4040 if (rv != 0)
4041 return ETIMEDOUT;
4042
4043 switch (iaq.iaq_retval) {
4044 case htole16(IXL_AQ_RC_OK):
4045 case htole16(IXL_AQ_RC_EEXIST):
4046 break;
4047 default:
4048 return EIO;
4049 }
4050
4051 return 0;
4052 }
4053
4054 static int
4055 ixl_lldp_shut(struct ixl_softc *sc)
4056 {
4057 struct ixl_aq_desc iaq;
4058
4059 memset(&iaq, 0, sizeof(iaq));
4060 iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
4061 iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
4062
4063 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4064 aprint_error_dev(sc->sc_dev, "STOP LLDP AGENT timeout\n");
4065 return -1;
4066 }
4067
4068 switch (iaq.iaq_retval) {
4069 case htole16(IXL_AQ_RC_EMODE):
4070 case htole16(IXL_AQ_RC_EPERM):
4071 /* ignore silently */
4072 default:
4073 break;
4074 }
4075
4076 return 0;
4077 }
4078
4079 static void
4080 ixl_parse_hw_capability(struct ixl_softc *sc, struct ixl_aq_capability *cap)
4081 {
4082 uint16_t id;
4083 uint32_t number, logical_id;
4084
4085 id = le16toh(cap->cap_id);
4086 number = le32toh(cap->number);
4087 logical_id = le32toh(cap->logical_id);
4088
4089 switch (id) {
4090 case IXL_AQ_CAP_RSS:
4091 sc->sc_rss_table_size = number;
4092 sc->sc_rss_table_entry_width = logical_id;
4093 break;
4094 case IXL_AQ_CAP_RXQ:
4095 case IXL_AQ_CAP_TXQ:
4096 sc->sc_nqueue_pairs_device = MIN(number,
4097 sc->sc_nqueue_pairs_device);
4098 break;
4099 }
4100 }
4101
4102 static int
4103 ixl_get_hw_capabilities(struct ixl_softc *sc)
4104 {
4105 struct ixl_dmamem idm;
4106 struct ixl_aq_desc iaq;
4107 struct ixl_aq_capability *caps;
4108 size_t i, ncaps;
4109 bus_size_t caps_size;
4110 uint16_t status;
4111 int rv;
4112
4113 caps_size = sizeof(caps[0]) * 40;
4114 memset(&iaq, 0, sizeof(iaq));
4115 iaq.iaq_opcode = htole16(IXL_AQ_OP_LIST_FUNC_CAP);
4116
4117 do {
4118 if (ixl_dmamem_alloc(sc, &idm, caps_size, 0) != 0) {
4119 return -1;
4120 }
4121
4122 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4123 (caps_size > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4124 iaq.iaq_datalen = htole16(caps_size);
4125 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4126
4127 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
4128 IXL_DMA_LEN(&idm), BUS_DMASYNC_PREREAD);
4129
4130 rv = ixl_atq_poll(sc, &iaq, 250);
4131
4132 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
4133 IXL_DMA_LEN(&idm), BUS_DMASYNC_POSTREAD);
4134
4135 if (rv != 0) {
4136 aprint_error(", HW capabilities timeout\n");
4137 goto done;
4138 }
4139
4140 status = le16toh(iaq.iaq_retval);
4141
4142 if (status == IXL_AQ_RC_ENOMEM) {
4143 caps_size = le16toh(iaq.iaq_datalen);
4144 ixl_dmamem_free(sc, &idm);
4145 }
4146 } while (status == IXL_AQ_RC_ENOMEM);
4147
4148 if (status != IXL_AQ_RC_OK) {
4149 aprint_error(", HW capabilities error\n");
4150 goto done;
4151 }
4152
4153 caps = IXL_DMA_KVA(&idm);
4154 ncaps = le16toh(iaq.iaq_param[1]);
4155
4156 for (i = 0; i < ncaps; i++) {
4157 ixl_parse_hw_capability(sc, &caps[i]);
4158 }
4159
4160 done:
4161 ixl_dmamem_free(sc, &idm);
4162 return rv;
4163 }
4164
4165 static int
4166 ixl_get_mac(struct ixl_softc *sc)
4167 {
4168 struct ixl_dmamem idm;
4169 struct ixl_aq_desc iaq;
4170 struct ixl_aq_mac_addresses *addrs;
4171 int rv;
4172
4173 if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
4174 aprint_error(", unable to allocate mac addresses\n");
4175 return -1;
4176 }
4177
4178 memset(&iaq, 0, sizeof(iaq));
4179 iaq.iaq_flags = htole16(IXL_AQ_BUF);
4180 iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
4181 iaq.iaq_datalen = htole16(sizeof(*addrs));
4182 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4183
4184 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4185 BUS_DMASYNC_PREREAD);
4186
4187 rv = ixl_atq_poll(sc, &iaq, 250);
4188
4189 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4190 BUS_DMASYNC_POSTREAD);
4191
4192 if (rv != 0) {
4193 aprint_error(", MAC ADDRESS READ timeout\n");
4194 rv = -1;
4195 goto done;
4196 }
4197 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4198 aprint_error(", MAC ADDRESS READ error\n");
4199 rv = -1;
4200 goto done;
4201 }
4202
4203 addrs = IXL_DMA_KVA(&idm);
4204 if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
4205 printf(", port address is not valid\n");
4206 goto done;
4207 }
4208
4209 memcpy(sc->sc_enaddr, addrs->port, ETHER_ADDR_LEN);
4210 rv = 0;
4211
4212 done:
4213 ixl_dmamem_free(sc, &idm);
4214 return rv;
4215 }
4216
4217 static int
4218 ixl_get_switch_config(struct ixl_softc *sc)
4219 {
4220 struct ixl_dmamem idm;
4221 struct ixl_aq_desc iaq;
4222 struct ixl_aq_switch_config *hdr;
4223 struct ixl_aq_switch_config_element *elms, *elm;
4224 unsigned int nelm, i;
4225 int rv;
4226
4227 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4228 aprint_error_dev(sc->sc_dev,
4229 "unable to allocate switch config buffer\n");
4230 return -1;
4231 }
4232
4233 memset(&iaq, 0, sizeof(iaq));
4234 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4235 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4236 iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
4237 iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
4238 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4239
4240 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4241 BUS_DMASYNC_PREREAD);
4242
4243 rv = ixl_atq_poll(sc, &iaq, 250);
4244
4245 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4246 BUS_DMASYNC_POSTREAD);
4247
4248 if (rv != 0) {
4249 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG timeout\n");
4250 rv = -1;
4251 goto done;
4252 }
4253 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4254 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG error\n");
4255 rv = -1;
4256 goto done;
4257 }
4258
4259 hdr = IXL_DMA_KVA(&idm);
4260 elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
4261
4262 nelm = le16toh(hdr->num_reported);
4263 if (nelm < 1) {
4264 aprint_error_dev(sc->sc_dev, "no switch config available\n");
4265 rv = -1;
4266 goto done;
4267 }
4268
4269 for (i = 0; i < nelm; i++) {
4270 elm = &elms[i];
4271
4272 aprint_debug_dev(sc->sc_dev,
4273 "type %x revision %u seid %04x\n",
4274 elm->type, elm->revision, le16toh(elm->seid));
4275 aprint_debug_dev(sc->sc_dev,
4276 "uplink %04x downlink %04x\n",
4277 le16toh(elm->uplink_seid),
4278 le16toh(elm->downlink_seid));
4279 aprint_debug_dev(sc->sc_dev,
4280 "conntype %x scheduler %04x extra %04x\n",
4281 elm->connection_type,
4282 le16toh(elm->scheduler_id),
4283 le16toh(elm->element_info));
4284 }
4285
4286 elm = &elms[0];
4287
4288 sc->sc_uplink_seid = elm->uplink_seid;
4289 sc->sc_downlink_seid = elm->downlink_seid;
4290 sc->sc_seid = elm->seid;
4291
4292 if ((sc->sc_uplink_seid == htole16(0)) !=
4293 (sc->sc_downlink_seid == htole16(0))) {
4294 aprint_error_dev(sc->sc_dev, "SEIDs are misconfigured\n");
4295 rv = -1;
4296 goto done;
4297 }
4298
4299 done:
4300 ixl_dmamem_free(sc, &idm);
4301 return rv;
4302 }
4303
4304 static int
4305 ixl_phy_mask_ints(struct ixl_softc *sc)
4306 {
4307 struct ixl_aq_desc iaq;
4308
4309 memset(&iaq, 0, sizeof(iaq));
4310 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
4311 iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
4312 ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
4313 IXL_AQ_PHY_EV_MEDIA_NA));
4314
4315 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4316 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK timeout\n");
4317 return -1;
4318 }
4319 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4320 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK error\n");
4321 return -1;
4322 }
4323
4324 return 0;
4325 }
4326
4327 static int
4328 ixl_get_phy_abilities(struct ixl_softc *sc,struct ixl_dmamem *idm)
4329 {
4330 struct ixl_aq_desc iaq;
4331 int rv;
4332
4333 memset(&iaq, 0, sizeof(iaq));
4334 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4335 (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4336 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
4337 iaq.iaq_datalen = htole16(IXL_DMA_LEN(idm));
4338 iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
4339 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
4340
4341 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4342 BUS_DMASYNC_PREREAD);
4343
4344 rv = ixl_atq_poll(sc, &iaq, 250);
4345
4346 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4347 BUS_DMASYNC_POSTREAD);
4348
4349 if (rv != 0)
4350 return -1;
4351
4352 return le16toh(iaq.iaq_retval);
4353 }
4354
4355 static int
4356 ixl_get_phy_info(struct ixl_softc *sc)
4357 {
4358 struct ixl_dmamem idm;
4359 struct ixl_aq_phy_abilities *phy;
4360 int rv;
4361
4362 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4363 aprint_error_dev(sc->sc_dev,
4364 "unable to allocate phy abilities buffer\n");
4365 return -1;
4366 }
4367
4368 rv = ixl_get_phy_abilities(sc, &idm);
4369 switch (rv) {
4370 case -1:
4371 aprint_error_dev(sc->sc_dev, "GET PHY ABILITIES timeout\n");
4372 goto done;
4373 case IXL_AQ_RC_OK:
4374 break;
4375 case IXL_AQ_RC_EIO:
4376 aprint_error_dev(sc->sc_dev,"unable to query phy types\n");
4377 goto done;
4378 default:
4379 aprint_error_dev(sc->sc_dev,
4380 "GET PHY ABILITIIES error %u\n", rv);
4381 goto done;
4382 }
4383
4384 phy = IXL_DMA_KVA(&idm);
4385
4386 sc->sc_phy_types = le32toh(phy->phy_type);
4387 sc->sc_phy_types |= (uint64_t)le32toh(phy->phy_type_ext) << 32;
4388
4389 sc->sc_phy_abilities = phy->abilities;
4390 sc->sc_phy_linkspeed = phy->link_speed;
4391 sc->sc_phy_fec_cfg = phy->fec_cfg_curr_mod_ext_info &
4392 (IXL_AQ_ENABLE_FEC_KR | IXL_AQ_ENABLE_FEC_RS |
4393 IXL_AQ_REQUEST_FEC_KR | IXL_AQ_REQUEST_FEC_RS);
4394 sc->sc_eee_cap = phy->eee_capability;
4395 sc->sc_eeer_val = phy->eeer_val;
4396 sc->sc_d3_lpan = phy->d3_lpan;
4397
4398 rv = 0;
4399
4400 done:
4401 ixl_dmamem_free(sc, &idm);
4402 return rv;
4403 }
4404
4405 static int
4406 ixl_set_phy_config(struct ixl_softc *sc,
4407 uint8_t link_speed, uint8_t abilities, bool polling)
4408 {
4409 struct ixl_aq_phy_param *param;
4410 struct ixl_atq iatq;
4411 struct ixl_aq_desc *iaq;
4412 int error;
4413
4414 memset(&iatq, 0, sizeof(iatq));
4415
4416 iaq = &iatq.iatq_desc;
4417 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_CONFIG);
4418 param = (struct ixl_aq_phy_param *)&iaq->iaq_param;
4419 param->phy_types = htole32((uint32_t)sc->sc_phy_types);
4420 param->phy_type_ext = (uint8_t)(sc->sc_phy_types >> 32);
4421 param->link_speed = link_speed;
4422 param->abilities = abilities | IXL_AQ_PHY_ABILITY_AUTO_LINK;
4423 param->fec_cfg = sc->sc_phy_fec_cfg;
4424 param->eee_capability = sc->sc_eee_cap;
4425 param->eeer_val = sc->sc_eeer_val;
4426 param->d3_lpan = sc->sc_d3_lpan;
4427
4428 if (polling)
4429 error = ixl_atq_poll(sc, iaq, 250);
4430 else
4431 error = ixl_atq_exec(sc, &iatq);
4432
4433 if (error != 0)
4434 return error;
4435
4436 switch (le16toh(iaq->iaq_retval)) {
4437 case IXL_AQ_RC_OK:
4438 break;
4439 case IXL_AQ_RC_EPERM:
4440 return EPERM;
4441 default:
4442 return EIO;
4443 }
4444
4445 return 0;
4446 }
4447
4448 static int
4449 ixl_set_phy_autoselect(struct ixl_softc *sc)
4450 {
4451 uint8_t link_speed, abilities;
4452
4453 link_speed = sc->sc_phy_linkspeed;
4454 abilities = IXL_PHY_ABILITY_LINKUP | IXL_PHY_ABILITY_AUTONEGO;
4455
4456 return ixl_set_phy_config(sc, link_speed, abilities, true);
4457 }
4458
4459 static int
4460 ixl_get_link_status_poll(struct ixl_softc *sc, int *l)
4461 {
4462 struct ixl_aq_desc iaq;
4463 struct ixl_aq_link_param *param;
4464 int link;
4465
4466 memset(&iaq, 0, sizeof(iaq));
4467 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
4468 param = (struct ixl_aq_link_param *)iaq.iaq_param;
4469 param->notify = IXL_AQ_LINK_NOTIFY;
4470
4471 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4472 return ETIMEDOUT;
4473 }
4474 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4475 return EIO;
4476 }
4477
4478 link = ixl_set_link_status(sc, &iaq);
4479
4480 if (l != NULL)
4481 *l = link;
4482
4483 return 0;
4484 }
4485
4486 static int
4487 ixl_get_vsi(struct ixl_softc *sc)
4488 {
4489 struct ixl_dmamem *vsi = &sc->sc_scratch;
4490 struct ixl_aq_desc iaq;
4491 struct ixl_aq_vsi_param *param;
4492 struct ixl_aq_vsi_reply *reply;
4493 struct ixl_aq_vsi_data *data;
4494 int rv;
4495
4496 /* grumble, vsi info isn't "known" at compile time */
4497
4498 memset(&iaq, 0, sizeof(iaq));
4499 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4500 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4501 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
4502 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4503 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4504
4505 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4506 param->uplink_seid = sc->sc_seid;
4507
4508 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4509 BUS_DMASYNC_PREREAD);
4510
4511 rv = ixl_atq_poll(sc, &iaq, 250);
4512
4513 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4514 BUS_DMASYNC_POSTREAD);
4515
4516 if (rv != 0) {
4517 return ETIMEDOUT;
4518 }
4519
4520 switch (le16toh(iaq.iaq_retval)) {
4521 case IXL_AQ_RC_OK:
4522 break;
4523 case IXL_AQ_RC_ENOENT:
4524 return ENOENT;
4525 case IXL_AQ_RC_EACCES:
4526 return EACCES;
4527 default:
4528 return EIO;
4529 }
4530
4531 reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
4532 sc->sc_vsi_number = le16toh(reply->vsi_number);
4533 data = IXL_DMA_KVA(vsi);
4534 sc->sc_vsi_stat_counter_idx = le16toh(data->stat_counter_idx);
4535
4536 return 0;
4537 }
4538
4539 static int
4540 ixl_set_vsi(struct ixl_softc *sc)
4541 {
4542 struct ixl_dmamem *vsi = &sc->sc_scratch;
4543 struct ixl_aq_desc iaq;
4544 struct ixl_aq_vsi_param *param;
4545 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
4546 unsigned int qnum;
4547 uint16_t val;
4548 int rv;
4549
4550 qnum = sc->sc_nqueue_pairs - 1;
4551
4552 data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
4553 IXL_AQ_VSI_VALID_VLAN);
4554
4555 CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
4556 SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
4557 data->queue_mapping[0] = htole16(0);
4558 data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
4559 (qnum << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
4560
4561 val = le16toh(data->port_vlan_flags);
4562 CLR(val, IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK);
4563 SET(val, IXL_AQ_VSI_PVLAN_MODE_ALL);
4564
4565 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWTAGGING)) {
4566 SET(val, IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH);
4567 } else {
4568 SET(val, IXL_AQ_VSI_PVLAN_EMOD_NOTHING);
4569 }
4570
4571 data->port_vlan_flags = htole16(val);
4572
4573 /* grumble, vsi info isn't "known" at compile time */
4574
4575 memset(&iaq, 0, sizeof(iaq));
4576 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4577 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4578 iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
4579 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4580 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4581
4582 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4583 param->uplink_seid = sc->sc_seid;
4584
4585 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4586 BUS_DMASYNC_PREWRITE);
4587
4588 rv = ixl_atq_poll(sc, &iaq, 250);
4589
4590 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4591 BUS_DMASYNC_POSTWRITE);
4592
4593 if (rv != 0) {
4594 return ETIMEDOUT;
4595 }
4596
4597 switch (le16toh(iaq.iaq_retval)) {
4598 case IXL_AQ_RC_OK:
4599 break;
4600 case IXL_AQ_RC_ENOENT:
4601 return ENOENT;
4602 case IXL_AQ_RC_EACCES:
4603 return EACCES;
4604 default:
4605 return EIO;
4606 }
4607
4608 return 0;
4609 }
4610
4611 static void
4612 ixl_set_filter_control(struct ixl_softc *sc)
4613 {
4614 uint32_t reg;
4615
4616 reg = ixl_rd_rx_csr(sc, I40E_PFQF_CTL_0);
4617
4618 CLR(reg, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK);
4619 SET(reg, I40E_HASH_LUT_SIZE_128 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT);
4620
4621 SET(reg, I40E_PFQF_CTL_0_FD_ENA_MASK);
4622 SET(reg, I40E_PFQF_CTL_0_ETYPE_ENA_MASK);
4623 SET(reg, I40E_PFQF_CTL_0_MACVLAN_ENA_MASK);
4624
4625 ixl_wr_rx_csr(sc, I40E_PFQF_CTL_0, reg);
4626 }
4627
4628 static inline void
4629 ixl_get_default_rss_key(uint32_t *buf, size_t len)
4630 {
4631 size_t cplen;
4632 uint8_t rss_seed[RSS_KEYSIZE];
4633
4634 rss_getkey(rss_seed);
4635 memset(buf, 0, len);
4636
4637 cplen = MIN(len, sizeof(rss_seed));
4638 memcpy(buf, rss_seed, cplen);
4639 }
4640
4641 static int
4642 ixl_set_rss_key(struct ixl_softc *sc, uint8_t *key, size_t keylen)
4643 {
4644 struct ixl_dmamem *idm;
4645 struct ixl_atq iatq;
4646 struct ixl_aq_desc *iaq;
4647 struct ixl_aq_rss_key_param *param;
4648 struct ixl_aq_rss_key_data *data;
4649 size_t len, datalen, stdlen, extlen;
4650 uint16_t vsi_id;
4651 int rv;
4652
4653 memset(&iatq, 0, sizeof(iatq));
4654 iaq = &iatq.iatq_desc;
4655 idm = &sc->sc_aqbuf;
4656
4657 datalen = sizeof(*data);
4658
4659 /*XXX The buf size has to be less than the size of the register */
4660 datalen = MIN(IXL_RSS_KEY_SIZE_REG * sizeof(uint32_t), datalen);
4661
4662 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4663 (datalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4664 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_KEY);
4665 iaq->iaq_datalen = htole16(datalen);
4666
4667 param = (struct ixl_aq_rss_key_param *)iaq->iaq_param;
4668 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSKEY_VSI_ID_SHIFT) |
4669 IXL_AQ_RSSKEY_VSI_VALID;
4670 param->vsi_id = htole16(vsi_id);
4671
4672 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
4673 data = IXL_DMA_KVA(idm);
4674
4675 len = MIN(keylen, datalen);
4676 stdlen = MIN(sizeof(data->standard_rss_key), len);
4677 memcpy(data->standard_rss_key, key, stdlen);
4678 len = (len > stdlen) ? (len - stdlen) : 0;
4679
4680 extlen = MIN(sizeof(data->extended_hash_key), len);
4681 extlen = (stdlen < keylen) ? 0 : keylen - stdlen;
4682 memcpy(data->extended_hash_key, key + stdlen, extlen);
4683
4684 ixl_aq_dva(iaq, IXL_DMA_DVA(idm));
4685
4686 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4687 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE);
4688
4689 rv = ixl_atq_exec(sc, &iatq);
4690
4691 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4692 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE);
4693
4694 if (rv != 0) {
4695 return ETIMEDOUT;
4696 }
4697
4698 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) {
4699 return EIO;
4700 }
4701
4702 return 0;
4703 }
4704
4705 static int
4706 ixl_set_rss_lut(struct ixl_softc *sc, uint8_t *lut, size_t lutlen)
4707 {
4708 struct ixl_dmamem *idm;
4709 struct ixl_atq iatq;
4710 struct ixl_aq_desc *iaq;
4711 struct ixl_aq_rss_lut_param *param;
4712 uint16_t vsi_id;
4713 uint8_t *data;
4714 size_t dmalen;
4715 int rv;
4716
4717 memset(&iatq, 0, sizeof(iatq));
4718 iaq = &iatq.iatq_desc;
4719 idm = &sc->sc_aqbuf;
4720
4721 dmalen = MIN(lutlen, IXL_DMA_LEN(idm));
4722
4723 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4724 (dmalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4725 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_LUT);
4726 iaq->iaq_datalen = htole16(dmalen);
4727
4728 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
4729 data = IXL_DMA_KVA(idm);
4730 memcpy(data, lut, dmalen);
4731 ixl_aq_dva(iaq, IXL_DMA_DVA(idm));
4732
4733 param = (struct ixl_aq_rss_lut_param *)iaq->iaq_param;
4734 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSLUT_VSI_ID_SHIFT) |
4735 IXL_AQ_RSSLUT_VSI_VALID;
4736 param->vsi_id = htole16(vsi_id);
4737 param->flags = htole16(IXL_AQ_RSSLUT_TABLE_TYPE_PF <<
4738 IXL_AQ_RSSLUT_TABLE_TYPE_SHIFT);
4739
4740 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4741 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE);
4742
4743 rv = ixl_atq_exec(sc, &iatq);
4744
4745 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4746 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE);
4747
4748 if (rv != 0) {
4749 return ETIMEDOUT;
4750 }
4751
4752 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) {
4753 return EIO;
4754 }
4755
4756 return 0;
4757 }
4758
4759 static int
4760 ixl_register_rss_key(struct ixl_softc *sc)
4761 {
4762 uint32_t rss_seed[IXL_RSS_KEY_SIZE_REG];
4763 int rv;
4764 size_t i;
4765
4766 ixl_get_default_rss_key(rss_seed, sizeof(rss_seed));
4767
4768 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)){
4769 rv = ixl_set_rss_key(sc, (uint8_t*)rss_seed,
4770 sizeof(rss_seed));
4771 } else {
4772 rv = 0;
4773 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4774 ixl_wr_rx_csr(sc, I40E_PFQF_HKEY(i), rss_seed[i]);
4775 }
4776 }
4777
4778 return rv;
4779 }
4780
4781 static void
4782 ixl_register_rss_pctype(struct ixl_softc *sc)
4783 {
4784 uint64_t set_hena = 0;
4785 uint32_t hena0, hena1;
4786
4787 if (sc->sc_mac_type == I40E_MAC_X722)
4788 set_hena = IXL_RSS_HENA_DEFAULT_X722;
4789 else
4790 set_hena = IXL_RSS_HENA_DEFAULT_XL710;
4791
4792 hena0 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(0));
4793 hena1 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(1));
4794
4795 SET(hena0, set_hena);
4796 SET(hena1, set_hena >> 32);
4797
4798 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(0), hena0);
4799 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(1), hena1);
4800 }
4801
4802 static int
4803 ixl_register_rss_hlut(struct ixl_softc *sc)
4804 {
4805 unsigned int qid;
4806 uint8_t hlut_buf[512], lut_mask;
4807 uint32_t *hluts;
4808 size_t i, hluts_num;
4809 int rv;
4810
4811 lut_mask = (0x01 << sc->sc_rss_table_entry_width) - 1;
4812
4813 for (i = 0; i < sc->sc_rss_table_size; i++) {
4814 qid = i % sc->sc_nqueue_pairs;
4815 hlut_buf[i] = qid & lut_mask;
4816 }
4817
4818 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)) {
4819 rv = ixl_set_rss_lut(sc, hlut_buf, sizeof(hlut_buf));
4820 } else {
4821 rv = 0;
4822 hluts = (uint32_t *)hlut_buf;
4823 hluts_num = sc->sc_rss_table_size >> 2;
4824 for (i = 0; i < hluts_num; i++) {
4825 ixl_wr(sc, I40E_PFQF_HLUT(i), hluts[i]);
4826 }
4827 ixl_flush(sc);
4828 }
4829
4830 return rv;
4831 }
4832
4833 static void
4834 ixl_config_rss(struct ixl_softc *sc)
4835 {
4836
4837 KASSERT(mutex_owned(&sc->sc_cfg_lock));
4838
4839 ixl_register_rss_key(sc);
4840 ixl_register_rss_pctype(sc);
4841 ixl_register_rss_hlut(sc);
4842 }
4843
4844 static const struct ixl_phy_type *
4845 ixl_search_phy_type(uint8_t phy_type)
4846 {
4847 const struct ixl_phy_type *itype;
4848 uint64_t mask;
4849 unsigned int i;
4850
4851 if (phy_type >= 64)
4852 return NULL;
4853
4854 mask = 1ULL << phy_type;
4855
4856 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
4857 itype = &ixl_phy_type_map[i];
4858
4859 if (ISSET(itype->phy_type, mask))
4860 return itype;
4861 }
4862
4863 return NULL;
4864 }
4865
4866 static uint64_t
4867 ixl_search_link_speed(uint8_t link_speed)
4868 {
4869 const struct ixl_speed_type *type;
4870 unsigned int i;
4871
4872 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4873 type = &ixl_speed_type_map[i];
4874
4875 if (ISSET(type->dev_speed, link_speed))
4876 return type->net_speed;
4877 }
4878
4879 return 0;
4880 }
4881
4882 static uint8_t
4883 ixl_search_baudrate(uint64_t baudrate)
4884 {
4885 const struct ixl_speed_type *type;
4886 unsigned int i;
4887
4888 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4889 type = &ixl_speed_type_map[i];
4890
4891 if (type->net_speed == baudrate) {
4892 return type->dev_speed;
4893 }
4894 }
4895
4896 return 0;
4897 }
4898
4899 static int
4900 ixl_restart_an(struct ixl_softc *sc)
4901 {
4902 struct ixl_aq_desc iaq;
4903
4904 memset(&iaq, 0, sizeof(iaq));
4905 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
4906 iaq.iaq_param[0] =
4907 htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
4908
4909 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4910 aprint_error_dev(sc->sc_dev, "RESTART AN timeout\n");
4911 return -1;
4912 }
4913 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4914 aprint_error_dev(sc->sc_dev, "RESTART AN error\n");
4915 return -1;
4916 }
4917
4918 return 0;
4919 }
4920
4921 static int
4922 ixl_add_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4923 uint16_t vlan, uint16_t flags)
4924 {
4925 struct ixl_aq_desc iaq;
4926 struct ixl_aq_add_macvlan *param;
4927 struct ixl_aq_add_macvlan_elem *elem;
4928
4929 memset(&iaq, 0, sizeof(iaq));
4930 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4931 iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN);
4932 iaq.iaq_datalen = htole16(sizeof(*elem));
4933 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4934
4935 param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param;
4936 param->num_addrs = htole16(1);
4937 param->seid0 = htole16(0x8000) | sc->sc_seid;
4938 param->seid1 = 0;
4939 param->seid2 = 0;
4940
4941 elem = IXL_DMA_KVA(&sc->sc_scratch);
4942 memset(elem, 0, sizeof(*elem));
4943 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4944 elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags);
4945 elem->vlan = htole16(vlan);
4946
4947 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4948 return IXL_AQ_RC_EINVAL;
4949 }
4950
4951 switch (le16toh(iaq.iaq_retval)) {
4952 case IXL_AQ_RC_OK:
4953 break;
4954 case IXL_AQ_RC_ENOSPC:
4955 return ENOSPC;
4956 case IXL_AQ_RC_ENOENT:
4957 return ENOENT;
4958 case IXL_AQ_RC_EACCES:
4959 return EACCES;
4960 case IXL_AQ_RC_EEXIST:
4961 return EEXIST;
4962 case IXL_AQ_RC_EINVAL:
4963 return EINVAL;
4964 default:
4965 return EIO;
4966 }
4967
4968 return 0;
4969 }
4970
4971 static int
4972 ixl_remove_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4973 uint16_t vlan, uint16_t flags)
4974 {
4975 struct ixl_aq_desc iaq;
4976 struct ixl_aq_remove_macvlan *param;
4977 struct ixl_aq_remove_macvlan_elem *elem;
4978
4979 memset(&iaq, 0, sizeof(iaq));
4980 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4981 iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN);
4982 iaq.iaq_datalen = htole16(sizeof(*elem));
4983 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4984
4985 param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param;
4986 param->num_addrs = htole16(1);
4987 param->seid0 = htole16(0x8000) | sc->sc_seid;
4988 param->seid1 = 0;
4989 param->seid2 = 0;
4990
4991 elem = IXL_DMA_KVA(&sc->sc_scratch);
4992 memset(elem, 0, sizeof(*elem));
4993 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4994 elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags);
4995 elem->vlan = htole16(vlan);
4996
4997 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4998 return EINVAL;
4999 }
5000
5001 switch (le16toh(iaq.iaq_retval)) {
5002 case IXL_AQ_RC_OK:
5003 break;
5004 case IXL_AQ_RC_ENOENT:
5005 return ENOENT;
5006 case IXL_AQ_RC_EACCES:
5007 return EACCES;
5008 case IXL_AQ_RC_EINVAL:
5009 return EINVAL;
5010 default:
5011 return EIO;
5012 }
5013
5014 return 0;
5015 }
5016
5017 static int
5018 ixl_hmc(struct ixl_softc *sc)
5019 {
5020 struct {
5021 uint32_t count;
5022 uint32_t minsize;
5023 bus_size_t objsiz;
5024 bus_size_t setoff;
5025 bus_size_t setcnt;
5026 } regs[] = {
5027 {
5028 0,
5029 IXL_HMC_TXQ_MINSIZE,
5030 I40E_GLHMC_LANTXOBJSZ,
5031 I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
5032 I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
5033 },
5034 {
5035 0,
5036 IXL_HMC_RXQ_MINSIZE,
5037 I40E_GLHMC_LANRXOBJSZ,
5038 I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
5039 I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
5040 },
5041 {
5042 0,
5043 0,
5044 I40E_GLHMC_FCOEDDPOBJSZ,
5045 I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
5046 I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
5047 },
5048 {
5049 0,
5050 0,
5051 I40E_GLHMC_FCOEFOBJSZ,
5052 I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
5053 I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
5054 },
5055 };
5056 struct ixl_hmc_entry *e;
5057 uint64_t size, dva;
5058 uint8_t *kva;
5059 uint64_t *sdpage;
5060 unsigned int i;
5061 int npages, tables;
5062 uint32_t reg;
5063
5064 CTASSERT(__arraycount(regs) <= __arraycount(sc->sc_hmc_entries));
5065
5066 regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
5067 ixl_rd(sc, I40E_GLHMC_LANQMAX);
5068
5069 size = 0;
5070 for (i = 0; i < __arraycount(regs); i++) {
5071 e = &sc->sc_hmc_entries[i];
5072
5073 e->hmc_count = regs[i].count;
5074 reg = ixl_rd(sc, regs[i].objsiz);
5075 e->hmc_size = BIT_ULL(0x3F & reg);
5076 e->hmc_base = size;
5077
5078 if ((e->hmc_size * 8) < regs[i].minsize) {
5079 aprint_error_dev(sc->sc_dev,
5080 "kernel hmc entry is too big\n");
5081 return -1;
5082 }
5083
5084 size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
5085 }
5086 size = roundup(size, IXL_HMC_PGSIZE);
5087 npages = size / IXL_HMC_PGSIZE;
5088
5089 tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
5090
5091 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
5092 aprint_error_dev(sc->sc_dev,
5093 "unable to allocate hmc pd memory\n");
5094 return -1;
5095 }
5096
5097 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
5098 IXL_HMC_PGSIZE) != 0) {
5099 aprint_error_dev(sc->sc_dev,
5100 "unable to allocate hmc sd memory\n");
5101 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
5102 return -1;
5103 }
5104
5105 kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
5106 memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
5107
5108 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
5109 0, IXL_DMA_LEN(&sc->sc_hmc_pd),
5110 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5111
5112 dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
5113 sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
5114 memset(sdpage, 0, IXL_DMA_LEN(&sc->sc_hmc_sd));
5115
5116 for (i = 0; (int)i < npages; i++) {
5117 *sdpage = htole64(dva | IXL_HMC_PDVALID);
5118 sdpage++;
5119
5120 dva += IXL_HMC_PGSIZE;
5121 }
5122
5123 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
5124 0, IXL_DMA_LEN(&sc->sc_hmc_sd),
5125 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5126
5127 dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
5128 for (i = 0; (int)i < tables; i++) {
5129 uint32_t count;
5130
5131 KASSERT(npages >= 0);
5132
5133 count = ((unsigned int)npages > IXL_HMC_PGS) ?
5134 IXL_HMC_PGS : (unsigned int)npages;
5135
5136 ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
5137 ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
5138 (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
5139 (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
5140 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
5141 ixl_wr(sc, I40E_PFHMC_SDCMD,
5142 (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
5143
5144 npages -= IXL_HMC_PGS;
5145 dva += IXL_HMC_PGSIZE;
5146 }
5147
5148 for (i = 0; i < __arraycount(regs); i++) {
5149 e = &sc->sc_hmc_entries[i];
5150
5151 ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
5152 ixl_wr(sc, regs[i].setcnt, e->hmc_count);
5153 }
5154
5155 return 0;
5156 }
5157
5158 static void
5159 ixl_hmc_free(struct ixl_softc *sc)
5160 {
5161 ixl_dmamem_free(sc, &sc->sc_hmc_sd);
5162 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
5163 }
5164
5165 static void
5166 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
5167 unsigned int npacking)
5168 {
5169 uint8_t *dst = d;
5170 const uint8_t *src = s;
5171 unsigned int i;
5172
5173 for (i = 0; i < npacking; i++) {
5174 const struct ixl_hmc_pack *pack = &packing[i];
5175 unsigned int offset = pack->lsb / 8;
5176 unsigned int align = pack->lsb % 8;
5177 const uint8_t *in = src + pack->offset;
5178 uint8_t *out = dst + offset;
5179 int width = pack->width;
5180 unsigned int inbits = 0;
5181
5182 if (align) {
5183 inbits = (*in++) << align;
5184 *out++ |= (inbits & 0xff);
5185 inbits >>= 8;
5186
5187 width -= 8 - align;
5188 }
5189
5190 while (width >= 8) {
5191 inbits |= (*in++) << align;
5192 *out++ = (inbits & 0xff);
5193 inbits >>= 8;
5194
5195 width -= 8;
5196 }
5197
5198 if (width > 0) {
5199 inbits |= (*in) << align;
5200 *out |= (inbits & ((1 << width) - 1));
5201 }
5202 }
5203 }
5204
5205 static struct ixl_aq_buf *
5206 ixl_aqb_alloc(struct ixl_softc *sc)
5207 {
5208 struct ixl_aq_buf *aqb;
5209
5210 aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK);
5211 if (aqb == NULL)
5212 return NULL;
5213
5214 aqb->aqb_size = IXL_AQ_BUFLEN;
5215
5216 if (bus_dmamap_create(sc->sc_dmat, aqb->aqb_size, 1,
5217 aqb->aqb_size, 0,
5218 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0)
5219 goto free;
5220 if (bus_dmamem_alloc(sc->sc_dmat, aqb->aqb_size,
5221 IXL_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs,
5222 BUS_DMA_WAITOK) != 0)
5223 goto destroy;
5224 if (bus_dmamem_map(sc->sc_dmat, &aqb->aqb_seg, aqb->aqb_nsegs,
5225 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0)
5226 goto dma_free;
5227 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
5228 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0)
5229 goto unmap;
5230
5231 return aqb;
5232 unmap:
5233 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
5234 dma_free:
5235 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
5236 destroy:
5237 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
5238 free:
5239 free(aqb, M_DEVBUF);
5240
5241 return NULL;
5242 }
5243
5244 static void
5245 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
5246 {
5247 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
5248 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
5249 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
5250 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
5251 free(aqb, M_DEVBUF);
5252 }
5253
5254 static int
5255 ixl_arq_fill(struct ixl_softc *sc)
5256 {
5257 struct ixl_aq_buf *aqb;
5258 struct ixl_aq_desc *arq, *iaq;
5259 unsigned int prod = sc->sc_arq_prod;
5260 unsigned int n;
5261 int post = 0;
5262
5263 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
5264 IXL_AQ_NUM);
5265 arq = IXL_DMA_KVA(&sc->sc_arq);
5266
5267 if (__predict_false(n <= 0))
5268 return 0;
5269
5270 do {
5271 aqb = sc->sc_arq_live[prod];
5272 iaq = &arq[prod];
5273
5274 if (aqb == NULL) {
5275 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
5276 if (aqb != NULL) {
5277 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
5278 ixl_aq_buf, aqb_entry);
5279 } else if ((aqb = ixl_aqb_alloc(sc)) == NULL) {
5280 break;
5281 }
5282
5283 sc->sc_arq_live[prod] = aqb;
5284 memset(aqb->aqb_data, 0, aqb->aqb_size);
5285
5286 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
5287 aqb->aqb_size, BUS_DMASYNC_PREREAD);
5288
5289 iaq->iaq_flags = htole16(IXL_AQ_BUF |
5290 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ?
5291 IXL_AQ_LB : 0));
5292 iaq->iaq_opcode = 0;
5293 iaq->iaq_datalen = htole16(aqb->aqb_size);
5294 iaq->iaq_retval = 0;
5295 iaq->iaq_cookie = 0;
5296 iaq->iaq_param[0] = 0;
5297 iaq->iaq_param[1] = 0;
5298 ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
5299 }
5300
5301 prod++;
5302 prod &= IXL_AQ_MASK;
5303
5304 post = 1;
5305
5306 } while (--n);
5307
5308 if (post) {
5309 sc->sc_arq_prod = prod;
5310 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
5311 }
5312
5313 return post;
5314 }
5315
5316 static void
5317 ixl_arq_unfill(struct ixl_softc *sc)
5318 {
5319 struct ixl_aq_buf *aqb;
5320 unsigned int i;
5321
5322 for (i = 0; i < __arraycount(sc->sc_arq_live); i++) {
5323 aqb = sc->sc_arq_live[i];
5324 if (aqb == NULL)
5325 continue;
5326
5327 sc->sc_arq_live[i] = NULL;
5328 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
5329 BUS_DMASYNC_POSTREAD);
5330 ixl_aqb_free(sc, aqb);
5331 }
5332
5333 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle)) != NULL) {
5334 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
5335 ixl_aq_buf, aqb_entry);
5336 ixl_aqb_free(sc, aqb);
5337 }
5338 }
5339
5340 static void
5341 ixl_clear_hw(struct ixl_softc *sc)
5342 {
5343 uint32_t num_queues, base_queue;
5344 uint32_t num_pf_int;
5345 uint32_t num_vf_int;
5346 uint32_t num_vfs;
5347 uint32_t i, j;
5348 uint32_t val;
5349 uint32_t eol = 0x7ff;
5350
5351 /* get number of interrupts, queues, and vfs */
5352 val = ixl_rd(sc, I40E_GLPCI_CNF2);
5353 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
5354 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
5355 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
5356 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
5357
5358 val = ixl_rd(sc, I40E_PFLAN_QALLOC);
5359 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
5360 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
5361 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
5362 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
5363 if (val & I40E_PFLAN_QALLOC_VALID_MASK)
5364 num_queues = (j - base_queue) + 1;
5365 else
5366 num_queues = 0;
5367
5368 val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
5369 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
5370 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
5371 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
5372 I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
5373 if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
5374 num_vfs = (j - i) + 1;
5375 else
5376 num_vfs = 0;
5377
5378 /* stop all the interrupts */
5379 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5380 ixl_flush(sc);
5381 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
5382 for (i = 0; i < num_pf_int - 2; i++)
5383 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
5384 ixl_flush(sc);
5385
5386 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
5387 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5388 ixl_wr(sc, I40E_PFINT_LNKLST0, val);
5389 for (i = 0; i < num_pf_int - 2; i++)
5390 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
5391 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5392 for (i = 0; i < num_vfs; i++)
5393 ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
5394 for (i = 0; i < num_vf_int - 2; i++)
5395 ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
5396
5397 /* warn the HW of the coming Tx disables */
5398 for (i = 0; i < num_queues; i++) {
5399 uint32_t abs_queue_idx = base_queue + i;
5400 uint32_t reg_block = 0;
5401
5402 if (abs_queue_idx >= 128) {
5403 reg_block = abs_queue_idx / 128;
5404 abs_queue_idx %= 128;
5405 }
5406
5407 val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
5408 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
5409 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
5410 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
5411
5412 ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
5413 }
5414 delaymsec(400);
5415
5416 /* stop all the queues */
5417 for (i = 0; i < num_queues; i++) {
5418 ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
5419 ixl_wr(sc, I40E_QTX_ENA(i), 0);
5420 ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
5421 ixl_wr(sc, I40E_QRX_ENA(i), 0);
5422 }
5423
5424 /* short wait for all queue disables to settle */
5425 delaymsec(50);
5426 }
5427
5428 static int
5429 ixl_pf_reset(struct ixl_softc *sc)
5430 {
5431 uint32_t cnt = 0;
5432 uint32_t cnt1 = 0;
5433 uint32_t reg = 0, reg0 = 0;
5434 uint32_t grst_del;
5435
5436 /*
5437 * Poll for Global Reset steady state in case of recent GRST.
5438 * The grst delay value is in 100ms units, and we'll wait a
5439 * couple counts longer to be sure we don't just miss the end.
5440 */
5441 grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
5442 grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
5443 grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
5444
5445 grst_del = grst_del * 20;
5446
5447 for (cnt = 0; cnt < grst_del; cnt++) {
5448 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
5449 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
5450 break;
5451 delaymsec(100);
5452 }
5453 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5454 aprint_error(", Global reset polling failed to complete\n");
5455 return -1;
5456 }
5457
5458 /* Now Wait for the FW to be ready */
5459 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
5460 reg = ixl_rd(sc, I40E_GLNVM_ULD);
5461 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5462 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
5463 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5464 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
5465 break;
5466
5467 delaymsec(10);
5468 }
5469 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5470 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
5471 aprint_error(", wait for FW Reset complete timed out "
5472 "(I40E_GLNVM_ULD = 0x%x)\n", reg);
5473 return -1;
5474 }
5475
5476 /*
5477 * If there was a Global Reset in progress when we got here,
5478 * we don't need to do the PF Reset
5479 */
5480 if (cnt == 0) {
5481 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5482 ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
5483 for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
5484 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5485 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
5486 break;
5487 delaymsec(1);
5488
5489 reg0 = ixl_rd(sc, I40E_GLGEN_RSTAT);
5490 if (reg0 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5491 aprint_error(", Core reset upcoming."
5492 " Skipping PF reset reset request\n");
5493 return -1;
5494 }
5495 }
5496 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
5497 aprint_error(", PF reset polling failed to complete"
5498 "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
5499 return -1;
5500 }
5501 }
5502
5503 return 0;
5504 }
5505
5506 static int
5507 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
5508 bus_size_t size, bus_size_t align)
5509 {
5510 ixm->ixm_size = size;
5511
5512 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
5513 ixm->ixm_size, 0,
5514 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
5515 &ixm->ixm_map) != 0)
5516 return 1;
5517 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
5518 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
5519 BUS_DMA_WAITOK) != 0)
5520 goto destroy;
5521 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
5522 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
5523 goto free;
5524 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
5525 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
5526 goto unmap;
5527
5528 memset(ixm->ixm_kva, 0, ixm->ixm_size);
5529
5530 return 0;
5531 unmap:
5532 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5533 free:
5534 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5535 destroy:
5536 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5537 return 1;
5538 }
5539
5540 static void
5541 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
5542 {
5543 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
5544 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5545 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5546 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5547 }
5548
5549 static int
5550 ixl_setup_vlan_hwfilter(struct ixl_softc *sc)
5551 {
5552 struct ethercom *ec = &sc->sc_ec;
5553 struct vlanid_list *vlanidp;
5554 int rv;
5555
5556 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5557 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5558 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5559 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5560
5561 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5562 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5563 if (rv != 0)
5564 return rv;
5565 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5566 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5567 if (rv != 0)
5568 return rv;
5569
5570 ETHER_LOCK(ec);
5571 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5572 rv = ixl_add_macvlan(sc, sc->sc_enaddr,
5573 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5574 if (rv != 0)
5575 break;
5576 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
5577 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5578 if (rv != 0)
5579 break;
5580 }
5581 ETHER_UNLOCK(ec);
5582
5583 return rv;
5584 }
5585
5586 static void
5587 ixl_teardown_vlan_hwfilter(struct ixl_softc *sc)
5588 {
5589 struct vlanid_list *vlanidp;
5590 struct ethercom *ec = &sc->sc_ec;
5591
5592 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5593 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5594 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5595 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5596
5597 ETHER_LOCK(ec);
5598 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5599 ixl_remove_macvlan(sc, sc->sc_enaddr,
5600 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5601 ixl_remove_macvlan(sc, etherbroadcastaddr,
5602 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5603 }
5604 ETHER_UNLOCK(ec);
5605
5606 ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5607 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5608 ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5609 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5610 }
5611
5612 static int
5613 ixl_update_macvlan(struct ixl_softc *sc)
5614 {
5615 int rv = 0;
5616 int next_ec_capenable = sc->sc_ec.ec_capenable;
5617
5618 if (ISSET(next_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
5619 rv = ixl_setup_vlan_hwfilter(sc);
5620 if (rv != 0)
5621 ixl_teardown_vlan_hwfilter(sc);
5622 } else {
5623 ixl_teardown_vlan_hwfilter(sc);
5624 }
5625
5626 return rv;
5627 }
5628
5629 static int
5630 ixl_ifflags_cb(struct ethercom *ec)
5631 {
5632 struct ifnet *ifp = &ec->ec_if;
5633 struct ixl_softc *sc = ifp->if_softc;
5634 int rv, change;
5635
5636 mutex_enter(&sc->sc_cfg_lock);
5637
5638 change = ec->ec_capenable ^ sc->sc_cur_ec_capenable;
5639
5640 if (ISSET(change, ETHERCAP_VLAN_HWTAGGING)) {
5641 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
5642 rv = ENETRESET;
5643 goto out;
5644 }
5645
5646 if (ISSET(change, ETHERCAP_VLAN_HWFILTER)) {
5647 rv = ixl_update_macvlan(sc);
5648 if (rv == 0) {
5649 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
5650 } else {
5651 CLR(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER);
5652 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
5653 }
5654 }
5655
5656 rv = ixl_iff(sc);
5657 out:
5658 mutex_exit(&sc->sc_cfg_lock);
5659
5660 return rv;
5661 }
5662
5663 static int
5664 ixl_set_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
5665 {
5666 const struct ixl_aq_link_status *status;
5667 const struct ixl_phy_type *itype;
5668
5669 uint64_t ifm_active = IFM_ETHER;
5670 uint64_t ifm_status = IFM_AVALID;
5671 int link_state = LINK_STATE_DOWN;
5672 uint64_t baudrate = 0;
5673
5674 status = (const struct ixl_aq_link_status *)iaq->iaq_param;
5675 if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION)) {
5676 ifm_active |= IFM_NONE;
5677 goto done;
5678 }
5679
5680 ifm_active |= IFM_FDX;
5681 ifm_status |= IFM_ACTIVE;
5682 link_state = LINK_STATE_UP;
5683
5684 itype = ixl_search_phy_type(status->phy_type);
5685 if (itype != NULL)
5686 ifm_active |= itype->ifm_type;
5687
5688 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
5689 ifm_active |= IFM_ETH_TXPAUSE;
5690 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
5691 ifm_active |= IFM_ETH_RXPAUSE;
5692
5693 baudrate = ixl_search_link_speed(status->link_speed);
5694
5695 done:
5696 /* NET_ASSERT_LOCKED() except during attach */
5697 sc->sc_media_active = ifm_active;
5698 sc->sc_media_status = ifm_status;
5699
5700 sc->sc_ec.ec_if.if_baudrate = baudrate;
5701
5702 return link_state;
5703 }
5704
5705 static int
5706 ixl_establish_intx(struct ixl_softc *sc)
5707 {
5708 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5709 pci_intr_handle_t *intr;
5710 char xnamebuf[32];
5711 char intrbuf[PCI_INTRSTR_LEN];
5712 char const *intrstr;
5713
5714 KASSERT(sc->sc_nintrs == 1);
5715
5716 intr = &sc->sc_ihp[0];
5717
5718 intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
5719 snprintf(xnamebuf, sizeof(xnamebuf), "%s:legacy",
5720 device_xname(sc->sc_dev));
5721
5722 sc->sc_ihs[0] = pci_intr_establish_xname(pc, *intr, IPL_NET, ixl_intr,
5723 sc, xnamebuf);
5724
5725 if (sc->sc_ihs[0] == NULL) {
5726 aprint_error_dev(sc->sc_dev,
5727 "unable to establish interrupt at %s\n", intrstr);
5728 return -1;
5729 }
5730
5731 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5732 return 0;
5733 }
5734
5735 static int
5736 ixl_establish_msix(struct ixl_softc *sc)
5737 {
5738 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5739 kcpuset_t *affinity;
5740 unsigned int vector = 0;
5741 unsigned int i;
5742 int affinity_to, r;
5743 char xnamebuf[32];
5744 char intrbuf[PCI_INTRSTR_LEN];
5745 char const *intrstr;
5746
5747 kcpuset_create(&affinity, false);
5748
5749 /* the "other" intr is mapped to vector 0 */
5750 vector = 0;
5751 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5752 intrbuf, sizeof(intrbuf));
5753 snprintf(xnamebuf, sizeof(xnamebuf), "%s others",
5754 device_xname(sc->sc_dev));
5755 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5756 sc->sc_ihp[vector], IPL_NET, ixl_other_intr,
5757 sc, xnamebuf);
5758 if (sc->sc_ihs[vector] == NULL) {
5759 aprint_error_dev(sc->sc_dev,
5760 "unable to establish interrupt at %s\n", intrstr);
5761 goto fail;
5762 }
5763
5764 aprint_normal_dev(sc->sc_dev, "other interrupt at %s", intrstr);
5765
5766 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0;
5767 affinity_to = (affinity_to + sc->sc_nqueue_pairs_max) % ncpu;
5768
5769 kcpuset_zero(affinity);
5770 kcpuset_set(affinity, affinity_to);
5771 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5772 if (r == 0) {
5773 aprint_normal(", affinity to %u", affinity_to);
5774 }
5775 aprint_normal("\n");
5776 vector++;
5777
5778 sc->sc_msix_vector_queue = vector;
5779 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0;
5780
5781 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5782 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5783 intrbuf, sizeof(intrbuf));
5784 snprintf(xnamebuf, sizeof(xnamebuf), "%s TXRX%d",
5785 device_xname(sc->sc_dev), i);
5786
5787 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5788 sc->sc_ihp[vector], IPL_NET, ixl_queue_intr,
5789 (void *)&sc->sc_qps[i], xnamebuf);
5790
5791 if (sc->sc_ihs[vector] == NULL) {
5792 aprint_error_dev(sc->sc_dev,
5793 "unable to establish interrupt at %s\n", intrstr);
5794 goto fail;
5795 }
5796
5797 aprint_normal_dev(sc->sc_dev,
5798 "for TXRX%d interrupt at %s",i , intrstr);
5799
5800 kcpuset_zero(affinity);
5801 kcpuset_set(affinity, affinity_to);
5802 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5803 if (r == 0) {
5804 aprint_normal(", affinity to %u", affinity_to);
5805 affinity_to = (affinity_to + 1) % ncpu;
5806 }
5807 aprint_normal("\n");
5808 vector++;
5809 }
5810
5811 kcpuset_destroy(affinity);
5812
5813 return 0;
5814 fail:
5815 for (i = 0; i < vector; i++) {
5816 pci_intr_disestablish(pc, sc->sc_ihs[i]);
5817 }
5818
5819 sc->sc_msix_vector_queue = 0;
5820 sc->sc_msix_vector_queue = 0;
5821 kcpuset_destroy(affinity);
5822
5823 return -1;
5824 }
5825
5826 static void
5827 ixl_config_queue_intr(struct ixl_softc *sc)
5828 {
5829 unsigned int i, vector;
5830
5831 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) {
5832 vector = sc->sc_msix_vector_queue;
5833 } else {
5834 vector = I40E_INTR_NOTX_INTR;
5835
5836 ixl_wr(sc, I40E_PFINT_LNKLST0,
5837 (I40E_INTR_NOTX_QUEUE <<
5838 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5839 (I40E_QUEUE_TYPE_RX <<
5840 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5841 }
5842
5843 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
5844 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), 0);
5845 ixl_flush(sc);
5846
5847 ixl_wr(sc, I40E_PFINT_LNKLSTN(i),
5848 ((i) << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
5849 (I40E_QUEUE_TYPE_RX <<
5850 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5851
5852 ixl_wr(sc, I40E_QINT_RQCTL(i),
5853 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5854 (I40E_ITR_INDEX_RX <<
5855 I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
5856 (I40E_INTR_NOTX_RX_QUEUE <<
5857 I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
5858 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5859 (I40E_QUEUE_TYPE_TX <<
5860 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5861 I40E_QINT_RQCTL_CAUSE_ENA_MASK);
5862
5863 ixl_wr(sc, I40E_QINT_TQCTL(i),
5864 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
5865 (I40E_ITR_INDEX_TX <<
5866 I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
5867 (I40E_INTR_NOTX_TX_QUEUE <<
5868 I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
5869 (I40E_QUEUE_TYPE_EOL <<
5870 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
5871 (I40E_QUEUE_TYPE_RX <<
5872 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
5873 I40E_QINT_TQCTL_CAUSE_ENA_MASK);
5874
5875 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX)
5876 vector++;
5877 }
5878 ixl_flush(sc);
5879
5880 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_RX), 0x7a);
5881 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_TX), 0x7a);
5882 ixl_flush(sc);
5883 }
5884
5885 static void
5886 ixl_config_other_intr(struct ixl_softc *sc)
5887 {
5888 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5889 (void)ixl_rd(sc, I40E_PFINT_ICR0);
5890
5891 ixl_wr(sc, I40E_PFINT_ICR0_ENA,
5892 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
5893 I40E_PFINT_ICR0_ENA_GRST_MASK |
5894 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
5895 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
5896 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
5897 I40E_PFINT_ICR0_ENA_VFLR_MASK |
5898 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
5899 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
5900 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK);
5901
5902 ixl_wr(sc, I40E_PFINT_LNKLST0, 0x7FF);
5903 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_OTHER), 0);
5904 ixl_wr(sc, I40E_PFINT_STAT_CTL0,
5905 (I40E_ITR_INDEX_OTHER <<
5906 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT));
5907 ixl_flush(sc);
5908 }
5909
5910 static int
5911 ixl_setup_interrupts(struct ixl_softc *sc)
5912 {
5913 struct pci_attach_args *pa = &sc->sc_pa;
5914 pci_intr_type_t max_type, intr_type;
5915 int counts[PCI_INTR_TYPE_SIZE];
5916 int error;
5917 unsigned int i;
5918 bool retry;
5919
5920 memset(counts, 0, sizeof(counts));
5921 max_type = PCI_INTR_TYPE_MSIX;
5922 /* QPs + other interrupt */
5923 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueue_pairs_max + 1;
5924 counts[PCI_INTR_TYPE_INTX] = 1;
5925
5926 if (ixl_param_nomsix)
5927 counts[PCI_INTR_TYPE_MSIX] = 0;
5928
5929 do {
5930 retry = false;
5931 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, max_type);
5932 if (error != 0) {
5933 aprint_error_dev(sc->sc_dev,
5934 "couldn't map interrupt\n");
5935 break;
5936 }
5937
5938 intr_type = pci_intr_type(pa->pa_pc, sc->sc_ihp[0]);
5939 sc->sc_nintrs = counts[intr_type];
5940 KASSERT(sc->sc_nintrs > 0);
5941
5942 for (i = 0; i < sc->sc_nintrs; i++) {
5943 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[i],
5944 PCI_INTR_MPSAFE, true);
5945 }
5946
5947 sc->sc_ihs = kmem_alloc(sizeof(sc->sc_ihs[0]) * sc->sc_nintrs,
5948 KM_SLEEP);
5949
5950 if (intr_type == PCI_INTR_TYPE_MSIX) {
5951 error = ixl_establish_msix(sc);
5952 if (error) {
5953 counts[PCI_INTR_TYPE_MSIX] = 0;
5954 retry = true;
5955 }
5956 } else if (intr_type == PCI_INTR_TYPE_INTX) {
5957 error = ixl_establish_intx(sc);
5958 } else {
5959 error = -1;
5960 }
5961
5962 if (error) {
5963 kmem_free(sc->sc_ihs,
5964 sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5965 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5966 } else {
5967 sc->sc_intrtype = intr_type;
5968 }
5969 } while (retry);
5970
5971 return error;
5972 }
5973
5974 static void
5975 ixl_teardown_interrupts(struct ixl_softc *sc)
5976 {
5977 struct pci_attach_args *pa = &sc->sc_pa;
5978 unsigned int i;
5979
5980 for (i = 0; i < sc->sc_nintrs; i++) {
5981 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]);
5982 }
5983
5984 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5985
5986 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5987 sc->sc_ihs = NULL;
5988 sc->sc_nintrs = 0;
5989 }
5990
5991 static int
5992 ixl_setup_stats(struct ixl_softc *sc)
5993 {
5994 struct ixl_queue_pair *qp;
5995 struct ixl_tx_ring *txr;
5996 struct ixl_rx_ring *rxr;
5997 struct ixl_stats_counters *isc;
5998 unsigned int i;
5999
6000 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
6001 qp = &sc->sc_qps[i];
6002 txr = qp->qp_txr;
6003 rxr = qp->qp_rxr;
6004
6005 evcnt_attach_dynamic(&txr->txr_defragged, EVCNT_TYPE_MISC,
6006 NULL, qp->qp_name, "m_defrag successed");
6007 evcnt_attach_dynamic(&txr->txr_defrag_failed, EVCNT_TYPE_MISC,
6008 NULL, qp->qp_name, "m_defrag_failed");
6009 evcnt_attach_dynamic(&txr->txr_pcqdrop, EVCNT_TYPE_MISC,
6010 NULL, qp->qp_name, "Dropped in pcq");
6011 evcnt_attach_dynamic(&txr->txr_transmitdef, EVCNT_TYPE_MISC,
6012 NULL, qp->qp_name, "Deferred transmit");
6013 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR,
6014 NULL, qp->qp_name, "Interrupt on queue");
6015 evcnt_attach_dynamic(&txr->txr_defer, EVCNT_TYPE_MISC,
6016 NULL, qp->qp_name, "Handled queue in softint/workqueue");
6017
6018 evcnt_attach_dynamic(&rxr->rxr_mgethdr_failed, EVCNT_TYPE_MISC,
6019 NULL, qp->qp_name, "MGETHDR failed");
6020 evcnt_attach_dynamic(&rxr->rxr_mgetcl_failed, EVCNT_TYPE_MISC,
6021 NULL, qp->qp_name, "MCLGET failed");
6022 evcnt_attach_dynamic(&rxr->rxr_mbuf_load_failed,
6023 EVCNT_TYPE_MISC, NULL, qp->qp_name,
6024 "bus_dmamap_load_mbuf failed");
6025 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR,
6026 NULL, qp->qp_name, "Interrupt on queue");
6027 evcnt_attach_dynamic(&rxr->rxr_defer, EVCNT_TYPE_MISC,
6028 NULL, qp->qp_name, "Handled queue in softint/workqueue");
6029 }
6030
6031 evcnt_attach_dynamic(&sc->sc_event_atq, EVCNT_TYPE_INTR,
6032 NULL, device_xname(sc->sc_dev), "Interrupt for other events");
6033 evcnt_attach_dynamic(&sc->sc_event_link, EVCNT_TYPE_MISC,
6034 NULL, device_xname(sc->sc_dev), "Link status event");
6035 evcnt_attach_dynamic(&sc->sc_event_ecc_err, EVCNT_TYPE_MISC,
6036 NULL, device_xname(sc->sc_dev), "ECC error");
6037 evcnt_attach_dynamic(&sc->sc_event_pci_exception, EVCNT_TYPE_MISC,
6038 NULL, device_xname(sc->sc_dev), "PCI exception");
6039 evcnt_attach_dynamic(&sc->sc_event_crit_err, EVCNT_TYPE_MISC,
6040 NULL, device_xname(sc->sc_dev), "Critical error");
6041
6042 isc = &sc->sc_stats_counters;
6043 evcnt_attach_dynamic(&isc->isc_crc_errors, EVCNT_TYPE_MISC,
6044 NULL, device_xname(sc->sc_dev), "CRC errors");
6045 evcnt_attach_dynamic(&isc->isc_illegal_bytes, EVCNT_TYPE_MISC,
6046 NULL, device_xname(sc->sc_dev), "Illegal bytes");
6047 evcnt_attach_dynamic(&isc->isc_mac_local_faults, EVCNT_TYPE_MISC,
6048 NULL, device_xname(sc->sc_dev), "Mac local faults");
6049 evcnt_attach_dynamic(&isc->isc_mac_remote_faults, EVCNT_TYPE_MISC,
6050 NULL, device_xname(sc->sc_dev), "Mac remote faults");
6051 evcnt_attach_dynamic(&isc->isc_link_xon_rx, EVCNT_TYPE_MISC,
6052 NULL, device_xname(sc->sc_dev), "Rx xon");
6053 evcnt_attach_dynamic(&isc->isc_link_xon_tx, EVCNT_TYPE_MISC,
6054 NULL, device_xname(sc->sc_dev), "Tx xon");
6055 evcnt_attach_dynamic(&isc->isc_link_xoff_rx, EVCNT_TYPE_MISC,
6056 NULL, device_xname(sc->sc_dev), "Rx xoff");
6057 evcnt_attach_dynamic(&isc->isc_link_xoff_tx, EVCNT_TYPE_MISC,
6058 NULL, device_xname(sc->sc_dev), "Tx xoff");
6059 evcnt_attach_dynamic(&isc->isc_rx_fragments, EVCNT_TYPE_MISC,
6060 NULL, device_xname(sc->sc_dev), "Rx fragments");
6061 evcnt_attach_dynamic(&isc->isc_rx_jabber, EVCNT_TYPE_MISC,
6062 NULL, device_xname(sc->sc_dev), "Rx jabber");
6063
6064 evcnt_attach_dynamic(&isc->isc_rx_size_64, EVCNT_TYPE_MISC,
6065 NULL, device_xname(sc->sc_dev), "Rx size 64");
6066 evcnt_attach_dynamic(&isc->isc_rx_size_127, EVCNT_TYPE_MISC,
6067 NULL, device_xname(sc->sc_dev), "Rx size 127");
6068 evcnt_attach_dynamic(&isc->isc_rx_size_255, EVCNT_TYPE_MISC,
6069 NULL, device_xname(sc->sc_dev), "Rx size 255");
6070 evcnt_attach_dynamic(&isc->isc_rx_size_511, EVCNT_TYPE_MISC,
6071 NULL, device_xname(sc->sc_dev), "Rx size 511");
6072 evcnt_attach_dynamic(&isc->isc_rx_size_1023, EVCNT_TYPE_MISC,
6073 NULL, device_xname(sc->sc_dev), "Rx size 1023");
6074 evcnt_attach_dynamic(&isc->isc_rx_size_1522, EVCNT_TYPE_MISC,
6075 NULL, device_xname(sc->sc_dev), "Rx size 1522");
6076 evcnt_attach_dynamic(&isc->isc_rx_size_big, EVCNT_TYPE_MISC,
6077 NULL, device_xname(sc->sc_dev), "Rx jumbo packets");
6078 evcnt_attach_dynamic(&isc->isc_rx_undersize, EVCNT_TYPE_MISC,
6079 NULL, device_xname(sc->sc_dev), "Rx under size");
6080 evcnt_attach_dynamic(&isc->isc_rx_oversize, EVCNT_TYPE_MISC,
6081 NULL, device_xname(sc->sc_dev), "Rx over size");
6082
6083 evcnt_attach_dynamic(&isc->isc_rx_bytes, EVCNT_TYPE_MISC,
6084 NULL, device_xname(sc->sc_dev), "Rx bytes / port");
6085 evcnt_attach_dynamic(&isc->isc_rx_discards, EVCNT_TYPE_MISC,
6086 NULL, device_xname(sc->sc_dev), "Rx discards / port");
6087 evcnt_attach_dynamic(&isc->isc_rx_unicast, EVCNT_TYPE_MISC,
6088 NULL, device_xname(sc->sc_dev), "Rx unicast / port");
6089 evcnt_attach_dynamic(&isc->isc_rx_multicast, EVCNT_TYPE_MISC,
6090 NULL, device_xname(sc->sc_dev), "Rx multicast / port");
6091 evcnt_attach_dynamic(&isc->isc_rx_broadcast, EVCNT_TYPE_MISC,
6092 NULL, device_xname(sc->sc_dev), "Rx broadcast / port");
6093
6094 evcnt_attach_dynamic(&isc->isc_vsi_rx_bytes, EVCNT_TYPE_MISC,
6095 NULL, device_xname(sc->sc_dev), "Rx bytes / vsi");
6096 evcnt_attach_dynamic(&isc->isc_vsi_rx_discards, EVCNT_TYPE_MISC,
6097 NULL, device_xname(sc->sc_dev), "Rx discard / vsi");
6098 evcnt_attach_dynamic(&isc->isc_vsi_rx_unicast, EVCNT_TYPE_MISC,
6099 NULL, device_xname(sc->sc_dev), "Rx unicast / vsi");
6100 evcnt_attach_dynamic(&isc->isc_vsi_rx_multicast, EVCNT_TYPE_MISC,
6101 NULL, device_xname(sc->sc_dev), "Rx multicast / vsi");
6102 evcnt_attach_dynamic(&isc->isc_vsi_rx_broadcast, EVCNT_TYPE_MISC,
6103 NULL, device_xname(sc->sc_dev), "Rx broadcast / vsi");
6104
6105 evcnt_attach_dynamic(&isc->isc_tx_size_64, EVCNT_TYPE_MISC,
6106 NULL, device_xname(sc->sc_dev), "Tx size 64");
6107 evcnt_attach_dynamic(&isc->isc_tx_size_127, EVCNT_TYPE_MISC,
6108 NULL, device_xname(sc->sc_dev), "Tx size 127");
6109 evcnt_attach_dynamic(&isc->isc_tx_size_255, EVCNT_TYPE_MISC,
6110 NULL, device_xname(sc->sc_dev), "Tx size 255");
6111 evcnt_attach_dynamic(&isc->isc_tx_size_511, EVCNT_TYPE_MISC,
6112 NULL, device_xname(sc->sc_dev), "Tx size 511");
6113 evcnt_attach_dynamic(&isc->isc_tx_size_1023, EVCNT_TYPE_MISC,
6114 NULL, device_xname(sc->sc_dev), "Tx size 1023");
6115 evcnt_attach_dynamic(&isc->isc_tx_size_1522, EVCNT_TYPE_MISC,
6116 NULL, device_xname(sc->sc_dev), "Tx size 1522");
6117 evcnt_attach_dynamic(&isc->isc_tx_size_big, EVCNT_TYPE_MISC,
6118 NULL, device_xname(sc->sc_dev), "Tx jumbo packets");
6119
6120 evcnt_attach_dynamic(&isc->isc_tx_bytes, EVCNT_TYPE_MISC,
6121 NULL, device_xname(sc->sc_dev), "Tx bytes / port");
6122 evcnt_attach_dynamic(&isc->isc_tx_dropped_link_down, EVCNT_TYPE_MISC,
6123 NULL, device_xname(sc->sc_dev),
6124 "Tx dropped due to link down / port");
6125 evcnt_attach_dynamic(&isc->isc_tx_unicast, EVCNT_TYPE_MISC,
6126 NULL, device_xname(sc->sc_dev), "Tx unicast / port");
6127 evcnt_attach_dynamic(&isc->isc_tx_multicast, EVCNT_TYPE_MISC,
6128 NULL, device_xname(sc->sc_dev), "Tx multicast / port");
6129 evcnt_attach_dynamic(&isc->isc_tx_broadcast, EVCNT_TYPE_MISC,
6130 NULL, device_xname(sc->sc_dev), "Tx broadcast / port");
6131
6132 evcnt_attach_dynamic(&isc->isc_vsi_tx_bytes, EVCNT_TYPE_MISC,
6133 NULL, device_xname(sc->sc_dev), "Tx bytes / vsi");
6134 evcnt_attach_dynamic(&isc->isc_vsi_tx_errors, EVCNT_TYPE_MISC,
6135 NULL, device_xname(sc->sc_dev), "Tx errors / vsi");
6136 evcnt_attach_dynamic(&isc->isc_vsi_tx_unicast, EVCNT_TYPE_MISC,
6137 NULL, device_xname(sc->sc_dev), "Tx unicast / vsi");
6138 evcnt_attach_dynamic(&isc->isc_vsi_tx_multicast, EVCNT_TYPE_MISC,
6139 NULL, device_xname(sc->sc_dev), "Tx multicast / vsi");
6140 evcnt_attach_dynamic(&isc->isc_vsi_tx_broadcast, EVCNT_TYPE_MISC,
6141 NULL, device_xname(sc->sc_dev), "Tx broadcast / vsi");
6142
6143 sc->sc_stats_intval = ixl_param_stats_interval;
6144 callout_init(&sc->sc_stats_callout, CALLOUT_MPSAFE);
6145 callout_setfunc(&sc->sc_stats_callout, ixl_stats_callout, sc);
6146 ixl_work_set(&sc->sc_stats_task, ixl_stats_update, sc);
6147
6148 return 0;
6149 }
6150
6151 static void
6152 ixl_teardown_stats(struct ixl_softc *sc)
6153 {
6154 struct ixl_tx_ring *txr;
6155 struct ixl_rx_ring *rxr;
6156 struct ixl_stats_counters *isc;
6157 unsigned int i;
6158
6159 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
6160 txr = sc->sc_qps[i].qp_txr;
6161 rxr = sc->sc_qps[i].qp_rxr;
6162
6163 evcnt_detach(&txr->txr_defragged);
6164 evcnt_detach(&txr->txr_defrag_failed);
6165 evcnt_detach(&txr->txr_pcqdrop);
6166 evcnt_detach(&txr->txr_transmitdef);
6167 evcnt_detach(&txr->txr_intr);
6168 evcnt_detach(&txr->txr_defer);
6169
6170 evcnt_detach(&rxr->rxr_mgethdr_failed);
6171 evcnt_detach(&rxr->rxr_mgetcl_failed);
6172 evcnt_detach(&rxr->rxr_mbuf_load_failed);
6173 evcnt_detach(&rxr->rxr_intr);
6174 evcnt_detach(&rxr->rxr_defer);
6175 }
6176
6177 isc = &sc->sc_stats_counters;
6178 evcnt_detach(&isc->isc_crc_errors);
6179 evcnt_detach(&isc->isc_illegal_bytes);
6180 evcnt_detach(&isc->isc_mac_local_faults);
6181 evcnt_detach(&isc->isc_mac_remote_faults);
6182 evcnt_detach(&isc->isc_link_xon_rx);
6183 evcnt_detach(&isc->isc_link_xon_tx);
6184 evcnt_detach(&isc->isc_link_xoff_rx);
6185 evcnt_detach(&isc->isc_link_xoff_tx);
6186 evcnt_detach(&isc->isc_rx_fragments);
6187 evcnt_detach(&isc->isc_rx_jabber);
6188 evcnt_detach(&isc->isc_rx_bytes);
6189 evcnt_detach(&isc->isc_rx_discards);
6190 evcnt_detach(&isc->isc_rx_unicast);
6191 evcnt_detach(&isc->isc_rx_multicast);
6192 evcnt_detach(&isc->isc_rx_broadcast);
6193 evcnt_detach(&isc->isc_rx_size_64);
6194 evcnt_detach(&isc->isc_rx_size_127);
6195 evcnt_detach(&isc->isc_rx_size_255);
6196 evcnt_detach(&isc->isc_rx_size_511);
6197 evcnt_detach(&isc->isc_rx_size_1023);
6198 evcnt_detach(&isc->isc_rx_size_1522);
6199 evcnt_detach(&isc->isc_rx_size_big);
6200 evcnt_detach(&isc->isc_rx_undersize);
6201 evcnt_detach(&isc->isc_rx_oversize);
6202 evcnt_detach(&isc->isc_tx_bytes);
6203 evcnt_detach(&isc->isc_tx_dropped_link_down);
6204 evcnt_detach(&isc->isc_tx_unicast);
6205 evcnt_detach(&isc->isc_tx_multicast);
6206 evcnt_detach(&isc->isc_tx_broadcast);
6207 evcnt_detach(&isc->isc_tx_size_64);
6208 evcnt_detach(&isc->isc_tx_size_127);
6209 evcnt_detach(&isc->isc_tx_size_255);
6210 evcnt_detach(&isc->isc_tx_size_511);
6211 evcnt_detach(&isc->isc_tx_size_1023);
6212 evcnt_detach(&isc->isc_tx_size_1522);
6213 evcnt_detach(&isc->isc_tx_size_big);
6214 evcnt_detach(&isc->isc_vsi_rx_discards);
6215 evcnt_detach(&isc->isc_vsi_rx_bytes);
6216 evcnt_detach(&isc->isc_vsi_rx_unicast);
6217 evcnt_detach(&isc->isc_vsi_rx_multicast);
6218 evcnt_detach(&isc->isc_vsi_rx_broadcast);
6219 evcnt_detach(&isc->isc_vsi_tx_errors);
6220 evcnt_detach(&isc->isc_vsi_tx_bytes);
6221 evcnt_detach(&isc->isc_vsi_tx_unicast);
6222 evcnt_detach(&isc->isc_vsi_tx_multicast);
6223 evcnt_detach(&isc->isc_vsi_tx_broadcast);
6224
6225 evcnt_detach(&sc->sc_event_atq);
6226 evcnt_detach(&sc->sc_event_link);
6227 evcnt_detach(&sc->sc_event_ecc_err);
6228 evcnt_detach(&sc->sc_event_pci_exception);
6229 evcnt_detach(&sc->sc_event_crit_err);
6230
6231 callout_destroy(&sc->sc_stats_callout);
6232 }
6233
6234 static void
6235 ixl_stats_callout(void *xsc)
6236 {
6237 struct ixl_softc *sc = xsc;
6238
6239 ixl_work_add(sc->sc_workq, &sc->sc_stats_task);
6240 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
6241 }
6242
6243 static uint64_t
6244 ixl_stat_delta(struct ixl_softc *sc, uint32_t reg_hi, uint32_t reg_lo,
6245 uint64_t *offset, bool has_offset)
6246 {
6247 uint64_t value, delta;
6248 int bitwidth;
6249
6250 bitwidth = reg_hi == 0 ? 32 : 48;
6251
6252 value = ixl_rd(sc, reg_lo);
6253
6254 if (bitwidth > 32) {
6255 value |= ((uint64_t)ixl_rd(sc, reg_hi) << 32);
6256 }
6257
6258 if (__predict_true(has_offset)) {
6259 delta = value;
6260 if (value < *offset)
6261 delta += ((uint64_t)1 << bitwidth);
6262 delta -= *offset;
6263 } else {
6264 delta = 0;
6265 }
6266 atomic_swap_64(offset, value);
6267
6268 return delta;
6269 }
6270
6271 static void
6272 ixl_stats_update(void *xsc)
6273 {
6274 struct ixl_softc *sc = xsc;
6275 struct ixl_stats_counters *isc;
6276 uint64_t delta;
6277
6278 isc = &sc->sc_stats_counters;
6279
6280 /* errors */
6281 delta = ixl_stat_delta(sc,
6282 0, I40E_GLPRT_CRCERRS(sc->sc_port),
6283 &isc->isc_crc_errors_offset, isc->isc_has_offset);
6284 atomic_add_64(&isc->isc_crc_errors.ev_count, delta);
6285
6286 delta = ixl_stat_delta(sc,
6287 0, I40E_GLPRT_ILLERRC(sc->sc_port),
6288 &isc->isc_illegal_bytes_offset, isc->isc_has_offset);
6289 atomic_add_64(&isc->isc_illegal_bytes.ev_count, delta);
6290
6291 /* rx */
6292 delta = ixl_stat_delta(sc,
6293 I40E_GLPRT_GORCH(sc->sc_port), I40E_GLPRT_GORCL(sc->sc_port),
6294 &isc->isc_rx_bytes_offset, isc->isc_has_offset);
6295 atomic_add_64(&isc->isc_rx_bytes.ev_count, delta);
6296
6297 delta = ixl_stat_delta(sc,
6298 0, I40E_GLPRT_RDPC(sc->sc_port),
6299 &isc->isc_rx_discards_offset, isc->isc_has_offset);
6300 atomic_add_64(&isc->isc_rx_discards.ev_count, delta);
6301
6302 delta = ixl_stat_delta(sc,
6303 I40E_GLPRT_UPRCH(sc->sc_port), I40E_GLPRT_UPRCL(sc->sc_port),
6304 &isc->isc_rx_unicast_offset, isc->isc_has_offset);
6305 atomic_add_64(&isc->isc_rx_unicast.ev_count, delta);
6306
6307 delta = ixl_stat_delta(sc,
6308 I40E_GLPRT_MPRCH(sc->sc_port), I40E_GLPRT_MPRCL(sc->sc_port),
6309 &isc->isc_rx_multicast_offset, isc->isc_has_offset);
6310 atomic_add_64(&isc->isc_rx_multicast.ev_count, delta);
6311
6312 delta = ixl_stat_delta(sc,
6313 I40E_GLPRT_BPRCH(sc->sc_port), I40E_GLPRT_BPRCL(sc->sc_port),
6314 &isc->isc_rx_broadcast_offset, isc->isc_has_offset);
6315 atomic_add_64(&isc->isc_rx_broadcast.ev_count, delta);
6316
6317 /* Packet size stats rx */
6318 delta = ixl_stat_delta(sc,
6319 I40E_GLPRT_PRC64H(sc->sc_port), I40E_GLPRT_PRC64L(sc->sc_port),
6320 &isc->isc_rx_size_64_offset, isc->isc_has_offset);
6321 atomic_add_64(&isc->isc_rx_size_64.ev_count, delta);
6322
6323 delta = ixl_stat_delta(sc,
6324 I40E_GLPRT_PRC127H(sc->sc_port), I40E_GLPRT_PRC127L(sc->sc_port),
6325 &isc->isc_rx_size_127_offset, isc->isc_has_offset);
6326 atomic_add_64(&isc->isc_rx_size_127.ev_count, delta);
6327
6328 delta = ixl_stat_delta(sc,
6329 I40E_GLPRT_PRC255H(sc->sc_port), I40E_GLPRT_PRC255L(sc->sc_port),
6330 &isc->isc_rx_size_255_offset, isc->isc_has_offset);
6331 atomic_add_64(&isc->isc_rx_size_255.ev_count, delta);
6332
6333 delta = ixl_stat_delta(sc,
6334 I40E_GLPRT_PRC511H(sc->sc_port), I40E_GLPRT_PRC511L(sc->sc_port),
6335 &isc->isc_rx_size_511_offset, isc->isc_has_offset);
6336 atomic_add_64(&isc->isc_rx_size_511.ev_count, delta);
6337
6338 delta = ixl_stat_delta(sc,
6339 I40E_GLPRT_PRC1023H(sc->sc_port), I40E_GLPRT_PRC1023L(sc->sc_port),
6340 &isc->isc_rx_size_1023_offset, isc->isc_has_offset);
6341 atomic_add_64(&isc->isc_rx_size_1023.ev_count, delta);
6342
6343 delta = ixl_stat_delta(sc,
6344 I40E_GLPRT_PRC1522H(sc->sc_port), I40E_GLPRT_PRC1522L(sc->sc_port),
6345 &isc->isc_rx_size_1522_offset, isc->isc_has_offset);
6346 atomic_add_64(&isc->isc_rx_size_1522.ev_count, delta);
6347
6348 delta = ixl_stat_delta(sc,
6349 I40E_GLPRT_PRC9522H(sc->sc_port), I40E_GLPRT_PRC9522L(sc->sc_port),
6350 &isc->isc_rx_size_big_offset, isc->isc_has_offset);
6351 atomic_add_64(&isc->isc_rx_size_big.ev_count, delta);
6352
6353 delta = ixl_stat_delta(sc,
6354 0, I40E_GLPRT_RUC(sc->sc_port),
6355 &isc->isc_rx_undersize_offset, isc->isc_has_offset);
6356 atomic_add_64(&isc->isc_rx_undersize.ev_count, delta);
6357
6358 delta = ixl_stat_delta(sc,
6359 0, I40E_GLPRT_ROC(sc->sc_port),
6360 &isc->isc_rx_oversize_offset, isc->isc_has_offset);
6361 atomic_add_64(&isc->isc_rx_oversize.ev_count, delta);
6362
6363 /* tx */
6364 delta = ixl_stat_delta(sc,
6365 I40E_GLPRT_GOTCH(sc->sc_port), I40E_GLPRT_GOTCL(sc->sc_port),
6366 &isc->isc_tx_bytes_offset, isc->isc_has_offset);
6367 atomic_add_64(&isc->isc_tx_bytes.ev_count, delta);
6368
6369 delta = ixl_stat_delta(sc,
6370 0, I40E_GLPRT_TDOLD(sc->sc_port),
6371 &isc->isc_tx_dropped_link_down_offset, isc->isc_has_offset);
6372 atomic_add_64(&isc->isc_tx_dropped_link_down.ev_count, delta);
6373
6374 delta = ixl_stat_delta(sc,
6375 I40E_GLPRT_UPTCH(sc->sc_port), I40E_GLPRT_UPTCL(sc->sc_port),
6376 &isc->isc_tx_unicast_offset, isc->isc_has_offset);
6377 atomic_add_64(&isc->isc_tx_unicast.ev_count, delta);
6378
6379 delta = ixl_stat_delta(sc,
6380 I40E_GLPRT_MPTCH(sc->sc_port), I40E_GLPRT_MPTCL(sc->sc_port),
6381 &isc->isc_tx_multicast_offset, isc->isc_has_offset);
6382 atomic_add_64(&isc->isc_tx_multicast.ev_count, delta);
6383
6384 delta = ixl_stat_delta(sc,
6385 I40E_GLPRT_BPTCH(sc->sc_port), I40E_GLPRT_BPTCL(sc->sc_port),
6386 &isc->isc_tx_broadcast_offset, isc->isc_has_offset);
6387 atomic_add_64(&isc->isc_tx_broadcast.ev_count, delta);
6388
6389 /* Packet size stats tx */
6390 delta = ixl_stat_delta(sc,
6391 I40E_GLPRT_PTC64L(sc->sc_port), I40E_GLPRT_PTC64L(sc->sc_port),
6392 &isc->isc_tx_size_64_offset, isc->isc_has_offset);
6393 atomic_add_64(&isc->isc_tx_size_64.ev_count, delta);
6394
6395 delta = ixl_stat_delta(sc,
6396 I40E_GLPRT_PTC127H(sc->sc_port), I40E_GLPRT_PTC127L(sc->sc_port),
6397 &isc->isc_tx_size_127_offset, isc->isc_has_offset);
6398 atomic_add_64(&isc->isc_tx_size_127.ev_count, delta);
6399
6400 delta = ixl_stat_delta(sc,
6401 I40E_GLPRT_PTC255H(sc->sc_port), I40E_GLPRT_PTC255L(sc->sc_port),
6402 &isc->isc_tx_size_255_offset, isc->isc_has_offset);
6403 atomic_add_64(&isc->isc_tx_size_255.ev_count, delta);
6404
6405 delta = ixl_stat_delta(sc,
6406 I40E_GLPRT_PTC511H(sc->sc_port), I40E_GLPRT_PTC511L(sc->sc_port),
6407 &isc->isc_tx_size_511_offset, isc->isc_has_offset);
6408 atomic_add_64(&isc->isc_tx_size_511.ev_count, delta);
6409
6410 delta = ixl_stat_delta(sc,
6411 I40E_GLPRT_PTC1023H(sc->sc_port), I40E_GLPRT_PTC1023L(sc->sc_port),
6412 &isc->isc_tx_size_1023_offset, isc->isc_has_offset);
6413 atomic_add_64(&isc->isc_tx_size_1023.ev_count, delta);
6414
6415 delta = ixl_stat_delta(sc,
6416 I40E_GLPRT_PTC1522H(sc->sc_port), I40E_GLPRT_PTC1522L(sc->sc_port),
6417 &isc->isc_tx_size_1522_offset, isc->isc_has_offset);
6418 atomic_add_64(&isc->isc_tx_size_1522.ev_count, delta);
6419
6420 delta = ixl_stat_delta(sc,
6421 I40E_GLPRT_PTC9522H(sc->sc_port), I40E_GLPRT_PTC9522L(sc->sc_port),
6422 &isc->isc_tx_size_big_offset, isc->isc_has_offset);
6423 atomic_add_64(&isc->isc_tx_size_big.ev_count, delta);
6424
6425 /* mac faults */
6426 delta = ixl_stat_delta(sc,
6427 0, I40E_GLPRT_MLFC(sc->sc_port),
6428 &isc->isc_mac_local_faults_offset, isc->isc_has_offset);
6429 atomic_add_64(&isc->isc_mac_local_faults.ev_count, delta);
6430
6431 delta = ixl_stat_delta(sc,
6432 0, I40E_GLPRT_MRFC(sc->sc_port),
6433 &isc->isc_mac_remote_faults_offset, isc->isc_has_offset);
6434 atomic_add_64(&isc->isc_mac_remote_faults.ev_count, delta);
6435
6436 /* Flow control (LFC) stats */
6437 delta = ixl_stat_delta(sc,
6438 0, I40E_GLPRT_LXONRXC(sc->sc_port),
6439 &isc->isc_link_xon_rx_offset, isc->isc_has_offset);
6440 atomic_add_64(&isc->isc_link_xon_rx.ev_count, delta);
6441
6442 delta = ixl_stat_delta(sc,
6443 0, I40E_GLPRT_LXONTXC(sc->sc_port),
6444 &isc->isc_link_xon_tx_offset, isc->isc_has_offset);
6445 atomic_add_64(&isc->isc_link_xon_tx.ev_count, delta);
6446
6447 delta = ixl_stat_delta(sc,
6448 0, I40E_GLPRT_LXOFFRXC(sc->sc_port),
6449 &isc->isc_link_xoff_rx_offset, isc->isc_has_offset);
6450 atomic_add_64(&isc->isc_link_xoff_rx.ev_count, delta);
6451
6452 delta = ixl_stat_delta(sc,
6453 0, I40E_GLPRT_LXOFFTXC(sc->sc_port),
6454 &isc->isc_link_xoff_tx_offset, isc->isc_has_offset);
6455 atomic_add_64(&isc->isc_link_xoff_tx.ev_count, delta);
6456
6457 /* fragments */
6458 delta = ixl_stat_delta(sc,
6459 0, I40E_GLPRT_RFC(sc->sc_port),
6460 &isc->isc_rx_fragments_offset, isc->isc_has_offset);
6461 atomic_add_64(&isc->isc_rx_fragments.ev_count, delta);
6462
6463 delta = ixl_stat_delta(sc,
6464 0, I40E_GLPRT_RJC(sc->sc_port),
6465 &isc->isc_rx_jabber_offset, isc->isc_has_offset);
6466 atomic_add_64(&isc->isc_rx_jabber.ev_count, delta);
6467
6468 /* VSI rx counters */
6469 delta = ixl_stat_delta(sc,
6470 0, I40E_GLV_RDPC(sc->sc_vsi_stat_counter_idx),
6471 &isc->isc_vsi_rx_discards_offset, isc->isc_has_offset);
6472 atomic_add_64(&isc->isc_vsi_rx_discards.ev_count, delta);
6473
6474 delta = ixl_stat_delta(sc,
6475 I40E_GLV_GORCH(sc->sc_vsi_stat_counter_idx),
6476 I40E_GLV_GORCL(sc->sc_vsi_stat_counter_idx),
6477 &isc->isc_vsi_rx_bytes_offset, isc->isc_has_offset);
6478 atomic_add_64(&isc->isc_vsi_rx_bytes.ev_count, delta);
6479
6480 delta = ixl_stat_delta(sc,
6481 I40E_GLV_UPRCH(sc->sc_vsi_stat_counter_idx),
6482 I40E_GLV_UPRCL(sc->sc_vsi_stat_counter_idx),
6483 &isc->isc_vsi_rx_unicast_offset, isc->isc_has_offset);
6484 atomic_add_64(&isc->isc_vsi_rx_unicast.ev_count, delta);
6485
6486 delta = ixl_stat_delta(sc,
6487 I40E_GLV_MPRCH(sc->sc_vsi_stat_counter_idx),
6488 I40E_GLV_MPRCL(sc->sc_vsi_stat_counter_idx),
6489 &isc->isc_vsi_rx_multicast_offset, isc->isc_has_offset);
6490 atomic_add_64(&isc->isc_vsi_rx_multicast.ev_count, delta);
6491
6492 delta = ixl_stat_delta(sc,
6493 I40E_GLV_BPRCH(sc->sc_vsi_stat_counter_idx),
6494 I40E_GLV_BPRCL(sc->sc_vsi_stat_counter_idx),
6495 &isc->isc_vsi_rx_broadcast_offset, isc->isc_has_offset);
6496 atomic_add_64(&isc->isc_vsi_rx_broadcast.ev_count, delta);
6497
6498 /* VSI tx counters */
6499 delta = ixl_stat_delta(sc,
6500 0, I40E_GLV_TEPC(sc->sc_vsi_stat_counter_idx),
6501 &isc->isc_vsi_tx_errors_offset, isc->isc_has_offset);
6502 atomic_add_64(&isc->isc_vsi_tx_errors.ev_count, delta);
6503
6504 delta = ixl_stat_delta(sc,
6505 I40E_GLV_GOTCH(sc->sc_vsi_stat_counter_idx),
6506 I40E_GLV_GOTCL(sc->sc_vsi_stat_counter_idx),
6507 &isc->isc_vsi_tx_bytes_offset, isc->isc_has_offset);
6508 atomic_add_64(&isc->isc_vsi_tx_bytes.ev_count, delta);
6509
6510 delta = ixl_stat_delta(sc,
6511 I40E_GLV_UPTCH(sc->sc_vsi_stat_counter_idx),
6512 I40E_GLV_UPTCL(sc->sc_vsi_stat_counter_idx),
6513 &isc->isc_vsi_tx_unicast_offset, isc->isc_has_offset);
6514 atomic_add_64(&isc->isc_vsi_tx_unicast.ev_count, delta);
6515
6516 delta = ixl_stat_delta(sc,
6517 I40E_GLV_MPTCH(sc->sc_vsi_stat_counter_idx),
6518 I40E_GLV_MPTCL(sc->sc_vsi_stat_counter_idx),
6519 &isc->isc_vsi_tx_multicast_offset, isc->isc_has_offset);
6520 atomic_add_64(&isc->isc_vsi_tx_multicast.ev_count, delta);
6521
6522 delta = ixl_stat_delta(sc,
6523 I40E_GLV_BPTCH(sc->sc_vsi_stat_counter_idx),
6524 I40E_GLV_BPTCL(sc->sc_vsi_stat_counter_idx),
6525 &isc->isc_vsi_tx_broadcast_offset, isc->isc_has_offset);
6526 atomic_add_64(&isc->isc_vsi_tx_broadcast.ev_count, delta);
6527 }
6528
6529 static int
6530 ixl_setup_sysctls(struct ixl_softc *sc)
6531 {
6532 const char *devname;
6533 struct sysctllog **log;
6534 const struct sysctlnode *rnode, *rxnode, *txnode;
6535 int error;
6536
6537 log = &sc->sc_sysctllog;
6538 devname = device_xname(sc->sc_dev);
6539
6540 error = sysctl_createv(log, 0, NULL, &rnode,
6541 0, CTLTYPE_NODE, devname,
6542 SYSCTL_DESCR("ixl information and settings"),
6543 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
6544 if (error)
6545 goto out;
6546
6547 error = sysctl_createv(log, 0, &rnode, NULL,
6548 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
6549 SYSCTL_DESCR("Use workqueue for packet processing"),
6550 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
6551 if (error)
6552 goto out;
6553
6554 error = sysctl_createv(log, 0, &rnode, NULL,
6555 CTLFLAG_READONLY, CTLTYPE_INT, "stats_interval",
6556 SYSCTL_DESCR("Statistics collection interval in milliseconds"),
6557 NULL, 0, &sc->sc_stats_intval, 0, CTL_CREATE, CTL_EOL);
6558
6559 error = sysctl_createv(log, 0, &rnode, &rxnode,
6560 0, CTLTYPE_NODE, "rx",
6561 SYSCTL_DESCR("ixl information and settings for Rx"),
6562 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6563 if (error)
6564 goto out;
6565
6566 error = sysctl_createv(log, 0, &rxnode, NULL,
6567 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6568 SYSCTL_DESCR("max number of Rx packets"
6569 " to process for interrupt processing"),
6570 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6571 if (error)
6572 goto out;
6573
6574 error = sysctl_createv(log, 0, &rxnode, NULL,
6575 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6576 SYSCTL_DESCR("max number of Rx packets"
6577 " to process for deferred processing"),
6578 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
6579 if (error)
6580 goto out;
6581
6582 error = sysctl_createv(log, 0, &rnode, &txnode,
6583 0, CTLTYPE_NODE, "tx",
6584 SYSCTL_DESCR("ixl information and settings for Tx"),
6585 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6586 if (error)
6587 goto out;
6588
6589 error = sysctl_createv(log, 0, &txnode, NULL,
6590 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6591 SYSCTL_DESCR("max number of Tx packets"
6592 " to process for interrupt processing"),
6593 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6594 if (error)
6595 goto out;
6596
6597 error = sysctl_createv(log, 0, &txnode, NULL,
6598 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6599 SYSCTL_DESCR("max number of Tx packets"
6600 " to process for deferred processing"),
6601 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
6602 if (error)
6603 goto out;
6604
6605 out:
6606 if (error) {
6607 aprint_error_dev(sc->sc_dev,
6608 "unable to create sysctl node\n");
6609 sysctl_teardown(log);
6610 }
6611
6612 return error;
6613 }
6614
6615 static void
6616 ixl_teardown_sysctls(struct ixl_softc *sc)
6617 {
6618
6619 sysctl_teardown(&sc->sc_sysctllog);
6620 }
6621
6622 static struct workqueue *
6623 ixl_workq_create(const char *name, pri_t prio, int ipl, int flags)
6624 {
6625 struct workqueue *wq;
6626 int error;
6627
6628 error = workqueue_create(&wq, name, ixl_workq_work, NULL,
6629 prio, ipl, flags);
6630
6631 if (error)
6632 return NULL;
6633
6634 return wq;
6635 }
6636
6637 static void
6638 ixl_workq_destroy(struct workqueue *wq)
6639 {
6640
6641 workqueue_destroy(wq);
6642 }
6643
6644 static void
6645 ixl_work_set(struct ixl_work *work, void (*func)(void *), void *arg)
6646 {
6647
6648 memset(work, 0, sizeof(*work));
6649 work->ixw_func = func;
6650 work->ixw_arg = arg;
6651 }
6652
6653 static void
6654 ixl_work_add(struct workqueue *wq, struct ixl_work *work)
6655 {
6656 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0)
6657 return;
6658
6659 kpreempt_disable();
6660 workqueue_enqueue(wq, &work->ixw_cookie, NULL);
6661 kpreempt_enable();
6662 }
6663
6664 static void
6665 ixl_work_wait(struct workqueue *wq, struct ixl_work *work)
6666 {
6667
6668 workqueue_wait(wq, &work->ixw_cookie);
6669 }
6670
6671 static void
6672 ixl_workq_work(struct work *wk, void *context)
6673 {
6674 struct ixl_work *work;
6675
6676 work = container_of(wk, struct ixl_work, ixw_cookie);
6677
6678 atomic_swap_uint(&work->ixw_added, 0);
6679 work->ixw_func(work->ixw_arg);
6680 }
6681
6682 static int
6683 ixl_rx_ctl_read(struct ixl_softc *sc, uint32_t reg, uint32_t *rv)
6684 {
6685 struct ixl_aq_desc iaq;
6686
6687 memset(&iaq, 0, sizeof(iaq));
6688 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_READ);
6689 iaq.iaq_param[1] = htole32(reg);
6690
6691 if (ixl_atq_poll(sc, &iaq, 250) != 0)
6692 return ETIMEDOUT;
6693
6694 switch (htole16(iaq.iaq_retval)) {
6695 case IXL_AQ_RC_OK:
6696 /* success */
6697 break;
6698 case IXL_AQ_RC_EACCES:
6699 return EPERM;
6700 case IXL_AQ_RC_EAGAIN:
6701 return EAGAIN;
6702 default:
6703 return EIO;
6704 }
6705
6706 *rv = htole32(iaq.iaq_param[3]);
6707 return 0;
6708 }
6709
6710 static uint32_t
6711 ixl_rd_rx_csr(struct ixl_softc *sc, uint32_t reg)
6712 {
6713 uint32_t val;
6714 int rv, retry, retry_limit;
6715
6716 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) {
6717 retry_limit = 5;
6718 } else {
6719 retry_limit = 0;
6720 }
6721
6722 for (retry = 0; retry < retry_limit; retry++) {
6723 rv = ixl_rx_ctl_read(sc, reg, &val);
6724 if (rv == 0)
6725 return val;
6726 else if (rv == EAGAIN)
6727 delaymsec(1);
6728 else
6729 break;
6730 }
6731
6732 val = ixl_rd(sc, reg);
6733
6734 return val;
6735 }
6736
6737 static int
6738 ixl_rx_ctl_write(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6739 {
6740 struct ixl_aq_desc iaq;
6741
6742 memset(&iaq, 0, sizeof(iaq));
6743 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_WRITE);
6744 iaq.iaq_param[1] = htole32(reg);
6745 iaq.iaq_param[3] = htole32(value);
6746
6747 if (ixl_atq_poll(sc, &iaq, 250) != 0)
6748 return ETIMEDOUT;
6749
6750 switch (htole16(iaq.iaq_retval)) {
6751 case IXL_AQ_RC_OK:
6752 /* success */
6753 break;
6754 case IXL_AQ_RC_EACCES:
6755 return EPERM;
6756 case IXL_AQ_RC_EAGAIN:
6757 return EAGAIN;
6758 default:
6759 return EIO;
6760 }
6761
6762 return 0;
6763 }
6764
6765 static void
6766 ixl_wr_rx_csr(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6767 {
6768 int rv, retry, retry_limit;
6769
6770 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) {
6771 retry_limit = 5;
6772 } else {
6773 retry_limit = 0;
6774 }
6775
6776 for (retry = 0; retry < retry_limit; retry++) {
6777 rv = ixl_rx_ctl_write(sc, reg, value);
6778 if (rv == 0)
6779 return;
6780 else if (rv == EAGAIN)
6781 delaymsec(1);
6782 else
6783 break;
6784 }
6785
6786 ixl_wr(sc, reg, value);
6787 }
6788
6789 static int
6790 ixl_nvm_lock(struct ixl_softc *sc, char rw)
6791 {
6792 struct ixl_aq_desc iaq;
6793 struct ixl_aq_req_resource_param *param;
6794 int rv;
6795
6796 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK))
6797 return 0;
6798
6799 memset(&iaq, 0, sizeof(iaq));
6800 iaq.iaq_opcode = htole16(IXL_AQ_OP_REQUEST_RESOURCE);
6801
6802 param = (struct ixl_aq_req_resource_param *)&iaq.iaq_param;
6803 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM);
6804 if (rw == 'R') {
6805 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_READ);
6806 } else {
6807 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_WRITE);
6808 }
6809
6810 rv = ixl_atq_poll(sc, &iaq, 250);
6811
6812 if (rv != 0)
6813 return ETIMEDOUT;
6814
6815 switch (le16toh(iaq.iaq_retval)) {
6816 case IXL_AQ_RC_OK:
6817 break;
6818 case IXL_AQ_RC_EACCES:
6819 return EACCES;
6820 case IXL_AQ_RC_EBUSY:
6821 return EBUSY;
6822 case IXL_AQ_RC_EPERM:
6823 return EPERM;
6824 }
6825
6826 return 0;
6827 }
6828
6829 static int
6830 ixl_nvm_unlock(struct ixl_softc *sc)
6831 {
6832 struct ixl_aq_desc iaq;
6833 struct ixl_aq_rel_resource_param *param;
6834 int rv;
6835
6836 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK))
6837 return 0;
6838
6839 memset(&iaq, 0, sizeof(iaq));
6840 iaq.iaq_opcode = htole16(IXL_AQ_OP_RELEASE_RESOURCE);
6841
6842 param = (struct ixl_aq_rel_resource_param *)&iaq.iaq_param;
6843 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM);
6844
6845 rv = ixl_atq_poll(sc, &iaq, 250);
6846
6847 if (rv != 0)
6848 return ETIMEDOUT;
6849
6850 switch (le16toh(iaq.iaq_retval)) {
6851 case IXL_AQ_RC_OK:
6852 break;
6853 default:
6854 return EIO;
6855 }
6856 return 0;
6857 }
6858
6859 static int
6860 ixl_srdone_poll(struct ixl_softc *sc)
6861 {
6862 int wait_count;
6863 uint32_t reg;
6864
6865 for (wait_count = 0; wait_count < IXL_SRRD_SRCTL_ATTEMPTS;
6866 wait_count++) {
6867 reg = ixl_rd(sc, I40E_GLNVM_SRCTL);
6868 if (ISSET(reg, I40E_GLNVM_SRCTL_DONE_MASK))
6869 break;
6870
6871 delaymsec(5);
6872 }
6873
6874 if (wait_count == IXL_SRRD_SRCTL_ATTEMPTS)
6875 return -1;
6876
6877 return 0;
6878 }
6879
6880 static int
6881 ixl_nvm_read_srctl(struct ixl_softc *sc, uint16_t offset, uint16_t *data)
6882 {
6883 uint32_t reg;
6884
6885 if (ixl_srdone_poll(sc) != 0)
6886 return ETIMEDOUT;
6887
6888 reg = ((uint32_t)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
6889 __BIT(I40E_GLNVM_SRCTL_START_SHIFT);
6890 ixl_wr(sc, I40E_GLNVM_SRCTL, reg);
6891
6892 if (ixl_srdone_poll(sc) != 0) {
6893 aprint_debug("NVM read error: couldn't access "
6894 "Shadow RAM address: 0x%x\n", offset);
6895 return ETIMEDOUT;
6896 }
6897
6898 reg = ixl_rd(sc, I40E_GLNVM_SRDATA);
6899 *data = (uint16_t)__SHIFTOUT(reg, I40E_GLNVM_SRDATA_RDDATA_MASK);
6900
6901 return 0;
6902 }
6903
6904 static int
6905 ixl_nvm_read_aq(struct ixl_softc *sc, uint16_t offset_word,
6906 void *data, size_t len)
6907 {
6908 struct ixl_dmamem *idm;
6909 struct ixl_aq_desc iaq;
6910 struct ixl_aq_nvm_param *param;
6911 uint32_t offset_bytes;
6912 int rv;
6913
6914 idm = &sc->sc_aqbuf;
6915 if (len > IXL_DMA_LEN(idm))
6916 return ENOMEM;
6917
6918 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
6919 memset(&iaq, 0, sizeof(iaq));
6920 iaq.iaq_opcode = htole16(IXL_AQ_OP_NVM_READ);
6921 iaq.iaq_flags = htole16(IXL_AQ_BUF |
6922 ((len > I40E_AQ_LARGE_BUF) ? IXL_AQ_LB : 0));
6923 iaq.iaq_datalen = htole16(len);
6924 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
6925
6926 param = (struct ixl_aq_nvm_param *)iaq.iaq_param;
6927 param->command_flags = IXL_AQ_NVM_LAST_CMD;
6928 param->module_pointer = 0;
6929 param->length = htole16(len);
6930 offset_bytes = (uint32_t)offset_word * 2;
6931 offset_bytes &= 0x00FFFFFF;
6932 param->offset = htole32(offset_bytes);
6933
6934 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
6935 BUS_DMASYNC_PREREAD);
6936
6937 rv = ixl_atq_poll(sc, &iaq, 250);
6938
6939 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
6940 BUS_DMASYNC_POSTREAD);
6941
6942 if (rv != 0) {
6943 return ETIMEDOUT;
6944 }
6945
6946 switch (le16toh(iaq.iaq_retval)) {
6947 case IXL_AQ_RC_OK:
6948 break;
6949 case IXL_AQ_RC_EPERM:
6950 return EPERM;
6951 case IXL_AQ_RC_EINVAL:
6952 return EINVAL;
6953 case IXL_AQ_RC_EBUSY:
6954 return EBUSY;
6955 case IXL_AQ_RC_EIO:
6956 default:
6957 return EIO;
6958 }
6959
6960 memcpy(data, IXL_DMA_KVA(idm), len);
6961
6962 return 0;
6963 }
6964
6965 static int
6966 ixl_rd16_nvm(struct ixl_softc *sc, uint16_t offset, uint16_t *data)
6967 {
6968 int error;
6969 uint16_t buf;
6970
6971 error = ixl_nvm_lock(sc, 'R');
6972 if (error)
6973 return error;
6974
6975 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMREAD)) {
6976 error = ixl_nvm_read_aq(sc, offset,
6977 &buf, sizeof(buf));
6978 if (error == 0)
6979 *data = le16toh(buf);
6980 } else {
6981 error = ixl_nvm_read_srctl(sc, offset, &buf);
6982 if (error == 0)
6983 *data = buf;
6984 }
6985
6986 ixl_nvm_unlock(sc);
6987
6988 return error;
6989 }
6990
6991 MODULE(MODULE_CLASS_DRIVER, if_ixl, "pci");
6992
6993 #ifdef _MODULE
6994 #include "ioconf.c"
6995 #endif
6996
6997 #ifdef _MODULE
6998 static void
6999 ixl_parse_modprop(prop_dictionary_t dict)
7000 {
7001 prop_object_t obj;
7002 int64_t val;
7003 uint64_t uval;
7004
7005 if (dict == NULL)
7006 return;
7007
7008 obj = prop_dictionary_get(dict, "nomsix");
7009 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_BOOL) {
7010 ixl_param_nomsix = prop_bool_true((prop_bool_t)obj);
7011 }
7012
7013 obj = prop_dictionary_get(dict, "stats_interval");
7014 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7015 val = prop_number_integer_value((prop_number_t)obj);
7016
7017 /* the range has no reason */
7018 if (100 < val && val < 180000) {
7019 ixl_param_stats_interval = val;
7020 }
7021 }
7022
7023 obj = prop_dictionary_get(dict, "nqps_limit");
7024 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7025 val = prop_number_integer_value((prop_number_t)obj);
7026
7027 if (val <= INT32_MAX)
7028 ixl_param_nqps_limit = val;
7029 }
7030
7031 obj = prop_dictionary_get(dict, "rx_ndescs");
7032 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7033 uval = prop_number_unsigned_integer_value((prop_number_t)obj);
7034
7035 if (uval > 8)
7036 ixl_param_rx_ndescs = uval;
7037 }
7038
7039 obj = prop_dictionary_get(dict, "tx_ndescs");
7040 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7041 uval = prop_number_unsigned_integer_value((prop_number_t)obj);
7042
7043 if (uval > IXL_TX_PKT_DESCS)
7044 ixl_param_tx_ndescs = uval;
7045 }
7046
7047 }
7048 #endif
7049
7050 static int
7051 if_ixl_modcmd(modcmd_t cmd, void *opaque)
7052 {
7053 int error = 0;
7054
7055 #ifdef _MODULE
7056 switch (cmd) {
7057 case MODULE_CMD_INIT:
7058 ixl_parse_modprop((prop_dictionary_t)opaque);
7059 error = config_init_component(cfdriver_ioconf_if_ixl,
7060 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
7061 break;
7062 case MODULE_CMD_FINI:
7063 error = config_fini_component(cfdriver_ioconf_if_ixl,
7064 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
7065 break;
7066 default:
7067 error = ENOTTY;
7068 break;
7069 }
7070 #endif
7071
7072 return error;
7073 }
7074