if_ixl.c revision 1.49 1 /* $NetBSD: if_ixl.c,v 1.49 2020/02/25 07:35:54 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2016,2017 David Gwynne <dlg (at) openbsd.org>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50 /*
51 * Copyright (c) 2019 Internet Initiative Japan, Inc.
52 * All rights reserved.
53 *
54 * Redistribution and use in source and binary forms, with or without
55 * modification, are permitted provided that the following conditions
56 * are met:
57 * 1. Redistributions of source code must retain the above copyright
58 * notice, this list of conditions and the following disclaimer.
59 * 2. Redistributions in binary form must reproduce the above copyright
60 * notice, this list of conditions and the following disclaimer in the
61 * documentation and/or other materials provided with the distribution.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
73 * POSSIBILITY OF SUCH DAMAGE.
74 */
75
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: if_ixl.c,v 1.49 2020/02/25 07:35:54 yamaguchi Exp $");
78
79 #ifdef _KERNEL_OPT
80 #include "opt_net_mpsafe.h"
81 #include "opt_if_ixl.h"
82 #endif
83
84 #include <sys/param.h>
85 #include <sys/types.h>
86
87 #include <sys/cpu.h>
88 #include <sys/device.h>
89 #include <sys/evcnt.h>
90 #include <sys/interrupt.h>
91 #include <sys/kmem.h>
92 #include <sys/malloc.h>
93 #include <sys/module.h>
94 #include <sys/mutex.h>
95 #include <sys/pcq.h>
96 #include <sys/syslog.h>
97 #include <sys/workqueue.h>
98
99 #include <sys/bus.h>
100
101 #include <net/bpf.h>
102 #include <net/if.h>
103 #include <net/if_dl.h>
104 #include <net/if_media.h>
105 #include <net/if_ether.h>
106 #include <net/rss_config.h>
107
108 #include <netinet/tcp.h> /* for struct tcphdr */
109 #include <netinet/udp.h> /* for struct udphdr */
110
111 #include <dev/pci/pcivar.h>
112 #include <dev/pci/pcidevs.h>
113
114 #include <dev/pci/if_ixlreg.h>
115 #include <dev/pci/if_ixlvar.h>
116
117 #include <prop/proplib.h>
118
119 struct ixl_softc; /* defined */
120
121 #define I40E_PF_RESET_WAIT_COUNT 200
122 #define I40E_AQ_LARGE_BUF 512
123
124 /* bitfields for Tx queue mapping in QTX_CTL */
125 #define I40E_QTX_CTL_VF_QUEUE 0x0
126 #define I40E_QTX_CTL_VM_QUEUE 0x1
127 #define I40E_QTX_CTL_PF_QUEUE 0x2
128
129 #define I40E_QUEUE_TYPE_EOL 0x7ff
130 #define I40E_INTR_NOTX_QUEUE 0
131
132 #define I40E_QUEUE_TYPE_RX 0x0
133 #define I40E_QUEUE_TYPE_TX 0x1
134 #define I40E_QUEUE_TYPE_PE_CEQ 0x2
135 #define I40E_QUEUE_TYPE_UNKNOWN 0x3
136
137 #define I40E_ITR_INDEX_RX 0x0
138 #define I40E_ITR_INDEX_TX 0x1
139 #define I40E_ITR_INDEX_OTHER 0x2
140 #define I40E_ITR_INDEX_NONE 0x3
141
142 #define I40E_INTR_NOTX_QUEUE 0
143 #define I40E_INTR_NOTX_INTR 0
144 #define I40E_INTR_NOTX_RX_QUEUE 0
145 #define I40E_INTR_NOTX_TX_QUEUE 1
146 #define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK
147 #define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK
148
149 #define BIT_ULL(a) (1ULL << (a))
150 #define IXL_RSS_HENA_DEFAULT_BASE \
151 (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
152 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
153 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
154 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
155 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
156 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
157 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
158 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
159 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
160 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
161 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
162 #define IXL_RSS_HENA_DEFAULT_XL710 IXL_RSS_HENA_DEFAULT_BASE
163 #define IXL_RSS_HENA_DEFAULT_X722 (IXL_RSS_HENA_DEFAULT_XL710 | \
164 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
165 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
166 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
167 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
168 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
169 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
170 #define I40E_HASH_LUT_SIZE_128 0
171 #define IXL_RSS_KEY_SIZE_REG 13
172
173 #define IXL_ICR0_CRIT_ERR_MASK \
174 (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \
175 I40E_PFINT_ICR0_ECC_ERR_MASK | \
176 I40E_PFINT_ICR0_PE_CRITERR_MASK)
177
178 #define IXL_QUEUE_MAX_XL710 64
179 #define IXL_QUEUE_MAX_X722 128
180
181 #define IXL_TX_PKT_DESCS 8
182 #define IXL_TX_PKT_MAXSIZE (MCLBYTES * IXL_TX_PKT_DESCS)
183 #define IXL_TX_QUEUE_ALIGN 128
184 #define IXL_RX_QUEUE_ALIGN 128
185
186 #define IXL_MCLBYTES (MCLBYTES - ETHER_ALIGN)
187 #define IXL_MTU_ETHERLEN ETHER_HDR_LEN \
188 + ETHER_CRC_LEN
189 #if 0
190 #define IXL_MAX_MTU (9728 - IXL_MTU_ETHERLEN)
191 #else
192 /* (dbuff * 5) - ETHER_HDR_LEN - ETHER_CRC_LEN */
193 #define IXL_MAX_MTU (9600 - IXL_MTU_ETHERLEN)
194 #endif
195 #define IXL_MIN_MTU (ETHER_MIN_LEN - ETHER_CRC_LEN)
196
197 #define IXL_PCIREG PCI_MAPREG_START
198
199 #define IXL_ITR0 0x0
200 #define IXL_ITR1 0x1
201 #define IXL_ITR2 0x2
202 #define IXL_NOITR 0x3
203
204 #define IXL_AQ_NUM 256
205 #define IXL_AQ_MASK (IXL_AQ_NUM - 1)
206 #define IXL_AQ_ALIGN 64 /* lol */
207 #define IXL_AQ_BUFLEN 4096
208
209 #define IXL_HMC_ROUNDUP 512
210 #define IXL_HMC_PGSIZE 4096
211 #define IXL_HMC_DVASZ sizeof(uint64_t)
212 #define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
213 #define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS)
214 #define IXL_HMC_PDVALID 1ULL
215
216 #define IXL_ATQ_EXEC_TIMEOUT (10 * hz)
217
218 #define IXL_SRRD_SRCTL_ATTEMPTS 100000
219
220 struct ixl_aq_regs {
221 bus_size_t atq_tail;
222 bus_size_t atq_head;
223 bus_size_t atq_len;
224 bus_size_t atq_bal;
225 bus_size_t atq_bah;
226
227 bus_size_t arq_tail;
228 bus_size_t arq_head;
229 bus_size_t arq_len;
230 bus_size_t arq_bal;
231 bus_size_t arq_bah;
232
233 uint32_t atq_len_enable;
234 uint32_t atq_tail_mask;
235 uint32_t atq_head_mask;
236
237 uint32_t arq_len_enable;
238 uint32_t arq_tail_mask;
239 uint32_t arq_head_mask;
240 };
241
242 struct ixl_phy_type {
243 uint64_t phy_type;
244 uint64_t ifm_type;
245 };
246
247 struct ixl_speed_type {
248 uint8_t dev_speed;
249 uint64_t net_speed;
250 };
251
252 struct ixl_aq_buf {
253 SIMPLEQ_ENTRY(ixl_aq_buf)
254 aqb_entry;
255 void *aqb_data;
256 bus_dmamap_t aqb_map;
257 bus_dma_segment_t aqb_seg;
258 size_t aqb_size;
259 int aqb_nsegs;
260 };
261 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf);
262
263 struct ixl_dmamem {
264 bus_dmamap_t ixm_map;
265 bus_dma_segment_t ixm_seg;
266 int ixm_nsegs;
267 size_t ixm_size;
268 void *ixm_kva;
269 };
270
271 #define IXL_DMA_MAP(_ixm) ((_ixm)->ixm_map)
272 #define IXL_DMA_DVA(_ixm) ((_ixm)->ixm_map->dm_segs[0].ds_addr)
273 #define IXL_DMA_KVA(_ixm) ((void *)(_ixm)->ixm_kva)
274 #define IXL_DMA_LEN(_ixm) ((_ixm)->ixm_size)
275
276 struct ixl_hmc_entry {
277 uint64_t hmc_base;
278 uint32_t hmc_count;
279 uint64_t hmc_size;
280 };
281
282 enum ixl_hmc_types {
283 IXL_HMC_LAN_TX = 0,
284 IXL_HMC_LAN_RX,
285 IXL_HMC_FCOE_CTX,
286 IXL_HMC_FCOE_FILTER,
287 IXL_HMC_COUNT
288 };
289
290 struct ixl_hmc_pack {
291 uint16_t offset;
292 uint16_t width;
293 uint16_t lsb;
294 };
295
296 /*
297 * these hmc objects have weird sizes and alignments, so these are abstract
298 * representations of them that are nice for c to populate.
299 *
300 * the packing code relies on little-endian values being stored in the fields,
301 * no high bits in the fields being set, and the fields must be packed in the
302 * same order as they are in the ctx structure.
303 */
304
305 struct ixl_hmc_rxq {
306 uint16_t head;
307 uint8_t cpuid;
308 uint64_t base;
309 #define IXL_HMC_RXQ_BASE_UNIT 128
310 uint16_t qlen;
311 uint16_t dbuff;
312 #define IXL_HMC_RXQ_DBUFF_UNIT 128
313 uint8_t hbuff;
314 #define IXL_HMC_RXQ_HBUFF_UNIT 64
315 uint8_t dtype;
316 #define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0
317 #define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1
318 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2
319 uint8_t dsize;
320 #define IXL_HMC_RXQ_DSIZE_16 0
321 #define IXL_HMC_RXQ_DSIZE_32 1
322 uint8_t crcstrip;
323 uint8_t fc_ena;
324 uint8_t l2sel;
325 uint8_t hsplit_0;
326 uint8_t hsplit_1;
327 uint8_t showiv;
328 uint16_t rxmax;
329 uint8_t tphrdesc_ena;
330 uint8_t tphwdesc_ena;
331 uint8_t tphdata_ena;
332 uint8_t tphhead_ena;
333 uint8_t lrxqthresh;
334 uint8_t prefena;
335 };
336
337 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
338 { offsetof(struct ixl_hmc_rxq, head), 13, 0 },
339 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 },
340 { offsetof(struct ixl_hmc_rxq, base), 57, 32 },
341 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 },
342 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 },
343 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 },
344 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 },
345 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 },
346 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 },
347 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 },
348 { offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 },
349 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 },
350 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 },
351 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 },
352 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 },
353 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 },
354 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 },
355 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 },
356 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 },
357 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 },
358 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 },
359 };
360
361 #define IXL_HMC_RXQ_MINSIZE (201 + 1)
362
363 struct ixl_hmc_txq {
364 uint16_t head;
365 uint8_t new_context;
366 uint64_t base;
367 #define IXL_HMC_TXQ_BASE_UNIT 128
368 uint8_t fc_ena;
369 uint8_t timesync_ena;
370 uint8_t fd_ena;
371 uint8_t alt_vlan_ena;
372 uint8_t cpuid;
373 uint16_t thead_wb;
374 uint8_t head_wb_ena;
375 #define IXL_HMC_TXQ_DESC_WB 0
376 #define IXL_HMC_TXQ_HEAD_WB 1
377 uint16_t qlen;
378 uint8_t tphrdesc_ena;
379 uint8_t tphrpacket_ena;
380 uint8_t tphwdesc_ena;
381 uint64_t head_wb_addr;
382 uint32_t crc;
383 uint16_t rdylist;
384 uint8_t rdylist_act;
385 };
386
387 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
388 { offsetof(struct ixl_hmc_txq, head), 13, 0 },
389 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 },
390 { offsetof(struct ixl_hmc_txq, base), 57, 32 },
391 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 },
392 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 },
393 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 },
394 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 },
395 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 },
396 /* line 1 */
397 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 },
398 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 },
399 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 },
400 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 },
401 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 },
402 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 },
403 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 },
404 /* line 7 */
405 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) },
406 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) },
407 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) },
408 };
409
410 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
411
412 struct ixl_work {
413 struct work ixw_cookie;
414 void (*ixw_func)(void *);
415 void *ixw_arg;
416 unsigned int ixw_added;
417 };
418 #define IXL_WORKQUEUE_PRI PRI_SOFTNET
419
420 struct ixl_tx_map {
421 struct mbuf *txm_m;
422 bus_dmamap_t txm_map;
423 unsigned int txm_eop;
424 };
425
426 struct ixl_tx_ring {
427 kmutex_t txr_lock;
428 struct ixl_softc *txr_sc;
429
430 unsigned int txr_prod;
431 unsigned int txr_cons;
432
433 struct ixl_tx_map *txr_maps;
434 struct ixl_dmamem txr_mem;
435
436 bus_size_t txr_tail;
437 unsigned int txr_qid;
438 pcq_t *txr_intrq;
439 void *txr_si;
440
441 struct evcnt txr_defragged;
442 struct evcnt txr_defrag_failed;
443 struct evcnt txr_pcqdrop;
444 struct evcnt txr_transmitdef;
445 struct evcnt txr_intr;
446 struct evcnt txr_defer;
447 };
448
449 struct ixl_rx_map {
450 struct mbuf *rxm_m;
451 bus_dmamap_t rxm_map;
452 };
453
454 struct ixl_rx_ring {
455 kmutex_t rxr_lock;
456
457 unsigned int rxr_prod;
458 unsigned int rxr_cons;
459
460 struct ixl_rx_map *rxr_maps;
461 struct ixl_dmamem rxr_mem;
462
463 struct mbuf *rxr_m_head;
464 struct mbuf **rxr_m_tail;
465
466 bus_size_t rxr_tail;
467 unsigned int rxr_qid;
468
469 struct evcnt rxr_mgethdr_failed;
470 struct evcnt rxr_mgetcl_failed;
471 struct evcnt rxr_mbuf_load_failed;
472 struct evcnt rxr_intr;
473 struct evcnt rxr_defer;
474 };
475
476 struct ixl_queue_pair {
477 struct ixl_softc *qp_sc;
478 struct ixl_tx_ring *qp_txr;
479 struct ixl_rx_ring *qp_rxr;
480
481 char qp_name[16];
482
483 void *qp_si;
484 struct work qp_work;
485 bool qp_workqueue;
486 };
487
488 struct ixl_atq {
489 struct ixl_aq_desc iatq_desc;
490 void (*iatq_fn)(struct ixl_softc *,
491 const struct ixl_aq_desc *);
492 };
493 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
494
495 struct ixl_product {
496 unsigned int vendor_id;
497 unsigned int product_id;
498 };
499
500 struct ixl_stats_counters {
501 bool isc_has_offset;
502 struct evcnt isc_crc_errors;
503 uint64_t isc_crc_errors_offset;
504 struct evcnt isc_illegal_bytes;
505 uint64_t isc_illegal_bytes_offset;
506 struct evcnt isc_rx_bytes;
507 uint64_t isc_rx_bytes_offset;
508 struct evcnt isc_rx_discards;
509 uint64_t isc_rx_discards_offset;
510 struct evcnt isc_rx_unicast;
511 uint64_t isc_rx_unicast_offset;
512 struct evcnt isc_rx_multicast;
513 uint64_t isc_rx_multicast_offset;
514 struct evcnt isc_rx_broadcast;
515 uint64_t isc_rx_broadcast_offset;
516 struct evcnt isc_rx_size_64;
517 uint64_t isc_rx_size_64_offset;
518 struct evcnt isc_rx_size_127;
519 uint64_t isc_rx_size_127_offset;
520 struct evcnt isc_rx_size_255;
521 uint64_t isc_rx_size_255_offset;
522 struct evcnt isc_rx_size_511;
523 uint64_t isc_rx_size_511_offset;
524 struct evcnt isc_rx_size_1023;
525 uint64_t isc_rx_size_1023_offset;
526 struct evcnt isc_rx_size_1522;
527 uint64_t isc_rx_size_1522_offset;
528 struct evcnt isc_rx_size_big;
529 uint64_t isc_rx_size_big_offset;
530 struct evcnt isc_rx_undersize;
531 uint64_t isc_rx_undersize_offset;
532 struct evcnt isc_rx_oversize;
533 uint64_t isc_rx_oversize_offset;
534 struct evcnt isc_rx_fragments;
535 uint64_t isc_rx_fragments_offset;
536 struct evcnt isc_rx_jabber;
537 uint64_t isc_rx_jabber_offset;
538 struct evcnt isc_tx_bytes;
539 uint64_t isc_tx_bytes_offset;
540 struct evcnt isc_tx_dropped_link_down;
541 uint64_t isc_tx_dropped_link_down_offset;
542 struct evcnt isc_tx_unicast;
543 uint64_t isc_tx_unicast_offset;
544 struct evcnt isc_tx_multicast;
545 uint64_t isc_tx_multicast_offset;
546 struct evcnt isc_tx_broadcast;
547 uint64_t isc_tx_broadcast_offset;
548 struct evcnt isc_tx_size_64;
549 uint64_t isc_tx_size_64_offset;
550 struct evcnt isc_tx_size_127;
551 uint64_t isc_tx_size_127_offset;
552 struct evcnt isc_tx_size_255;
553 uint64_t isc_tx_size_255_offset;
554 struct evcnt isc_tx_size_511;
555 uint64_t isc_tx_size_511_offset;
556 struct evcnt isc_tx_size_1023;
557 uint64_t isc_tx_size_1023_offset;
558 struct evcnt isc_tx_size_1522;
559 uint64_t isc_tx_size_1522_offset;
560 struct evcnt isc_tx_size_big;
561 uint64_t isc_tx_size_big_offset;
562 struct evcnt isc_mac_local_faults;
563 uint64_t isc_mac_local_faults_offset;
564 struct evcnt isc_mac_remote_faults;
565 uint64_t isc_mac_remote_faults_offset;
566 struct evcnt isc_link_xon_rx;
567 uint64_t isc_link_xon_rx_offset;
568 struct evcnt isc_link_xon_tx;
569 uint64_t isc_link_xon_tx_offset;
570 struct evcnt isc_link_xoff_rx;
571 uint64_t isc_link_xoff_rx_offset;
572 struct evcnt isc_link_xoff_tx;
573 uint64_t isc_link_xoff_tx_offset;
574 struct evcnt isc_vsi_rx_discards;
575 uint64_t isc_vsi_rx_discards_offset;
576 struct evcnt isc_vsi_rx_bytes;
577 uint64_t isc_vsi_rx_bytes_offset;
578 struct evcnt isc_vsi_rx_unicast;
579 uint64_t isc_vsi_rx_unicast_offset;
580 struct evcnt isc_vsi_rx_multicast;
581 uint64_t isc_vsi_rx_multicast_offset;
582 struct evcnt isc_vsi_rx_broadcast;
583 uint64_t isc_vsi_rx_broadcast_offset;
584 struct evcnt isc_vsi_tx_errors;
585 uint64_t isc_vsi_tx_errors_offset;
586 struct evcnt isc_vsi_tx_bytes;
587 uint64_t isc_vsi_tx_bytes_offset;
588 struct evcnt isc_vsi_tx_unicast;
589 uint64_t isc_vsi_tx_unicast_offset;
590 struct evcnt isc_vsi_tx_multicast;
591 uint64_t isc_vsi_tx_multicast_offset;
592 struct evcnt isc_vsi_tx_broadcast;
593 uint64_t isc_vsi_tx_broadcast_offset;
594 };
595
596 /*
597 * Locking notes:
598 * + a field in ixl_tx_ring is protected by txr_lock (a spin mutex), and
599 * a field in ixl_rx_ring is protected by rxr_lock (a spin mutex).
600 * - more than one lock of them cannot be held at once.
601 * + a field named sc_atq_* in ixl_softc is protected by sc_atq_lock
602 * (a spin mutex).
603 * - the lock cannot held with txr_lock or rxr_lock.
604 * + a field named sc_arq_* is not protected by any lock.
605 * - operations for sc_arq_* is done in one context related to
606 * sc_arq_task.
607 * + other fields in ixl_softc is protected by sc_cfg_lock
608 * (an adaptive mutex)
609 * - It must be held before another lock is held, and It can be
610 * released after the other lock is released.
611 * */
612
613 struct ixl_softc {
614 device_t sc_dev;
615 struct ethercom sc_ec;
616 bool sc_attached;
617 bool sc_dead;
618 uint32_t sc_port;
619 struct sysctllog *sc_sysctllog;
620 struct workqueue *sc_workq;
621 struct workqueue *sc_workq_txrx;
622 int sc_stats_intval;
623 callout_t sc_stats_callout;
624 struct ixl_work sc_stats_task;
625 struct ixl_stats_counters
626 sc_stats_counters;
627 uint8_t sc_enaddr[ETHER_ADDR_LEN];
628 struct ifmedia sc_media;
629 uint64_t sc_media_status;
630 uint64_t sc_media_active;
631 uint64_t sc_phy_types;
632 uint8_t sc_phy_abilities;
633 uint8_t sc_phy_linkspeed;
634 uint8_t sc_phy_fec_cfg;
635 uint16_t sc_eee_cap;
636 uint32_t sc_eeer_val;
637 uint8_t sc_d3_lpan;
638 kmutex_t sc_cfg_lock;
639 enum i40e_mac_type sc_mac_type;
640 uint32_t sc_rss_table_size;
641 uint32_t sc_rss_table_entry_width;
642 bool sc_txrx_workqueue;
643 u_int sc_tx_process_limit;
644 u_int sc_rx_process_limit;
645 u_int sc_tx_intr_process_limit;
646 u_int sc_rx_intr_process_limit;
647
648 int sc_cur_ec_capenable;
649
650 struct pci_attach_args sc_pa;
651 pci_intr_handle_t *sc_ihp;
652 void **sc_ihs;
653 unsigned int sc_nintrs;
654
655 bus_dma_tag_t sc_dmat;
656 bus_space_tag_t sc_memt;
657 bus_space_handle_t sc_memh;
658 bus_size_t sc_mems;
659
660 uint8_t sc_pf_id;
661 uint16_t sc_uplink_seid; /* le */
662 uint16_t sc_downlink_seid; /* le */
663 uint16_t sc_vsi_number;
664 uint16_t sc_vsi_stat_counter_idx;
665 uint16_t sc_seid;
666 unsigned int sc_base_queue;
667
668 pci_intr_type_t sc_intrtype;
669 unsigned int sc_msix_vector_queue;
670
671 struct ixl_dmamem sc_scratch;
672 struct ixl_dmamem sc_aqbuf;
673
674 const struct ixl_aq_regs *
675 sc_aq_regs;
676 uint32_t sc_aq_flags;
677 #define IXL_SC_AQ_FLAG_RXCTL __BIT(0)
678 #define IXL_SC_AQ_FLAG_NVMLOCK __BIT(1)
679 #define IXL_SC_AQ_FLAG_NVMREAD __BIT(2)
680 #define IXL_SC_AQ_FLAG_RSS __BIT(3)
681
682 kmutex_t sc_atq_lock;
683 kcondvar_t sc_atq_cv;
684 struct ixl_dmamem sc_atq;
685 unsigned int sc_atq_prod;
686 unsigned int sc_atq_cons;
687
688 struct ixl_dmamem sc_arq;
689 struct ixl_work sc_arq_task;
690 struct ixl_aq_bufs sc_arq_idle;
691 struct ixl_aq_buf *sc_arq_live[IXL_AQ_NUM];
692 unsigned int sc_arq_prod;
693 unsigned int sc_arq_cons;
694
695 struct ixl_work sc_link_state_task;
696 struct ixl_atq sc_link_state_atq;
697
698 struct ixl_dmamem sc_hmc_sd;
699 struct ixl_dmamem sc_hmc_pd;
700 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT];
701
702 unsigned int sc_tx_ring_ndescs;
703 unsigned int sc_rx_ring_ndescs;
704 unsigned int sc_nqueue_pairs;
705 unsigned int sc_nqueue_pairs_max;
706 unsigned int sc_nqueue_pairs_device;
707 struct ixl_queue_pair *sc_qps;
708
709 struct evcnt sc_event_atq;
710 struct evcnt sc_event_link;
711 struct evcnt sc_event_ecc_err;
712 struct evcnt sc_event_pci_exception;
713 struct evcnt sc_event_crit_err;
714 };
715
716 #define IXL_TXRX_PROCESS_UNLIMIT UINT_MAX
717 #define IXL_TX_PROCESS_LIMIT 256
718 #define IXL_RX_PROCESS_LIMIT 256
719 #define IXL_TX_INTR_PROCESS_LIMIT 256
720 #define IXL_RX_INTR_PROCESS_LIMIT 0U
721
722 #define IXL_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx | \
723 IFCAP_CSUM_TCPv4_Rx | \
724 IFCAP_CSUM_UDPv4_Rx | \
725 IFCAP_CSUM_TCPv6_Rx | \
726 IFCAP_CSUM_UDPv6_Rx)
727 #define IXL_IFCAP_TXCSUM (IFCAP_CSUM_IPv4_Tx | \
728 IFCAP_CSUM_TCPv4_Tx | \
729 IFCAP_CSUM_UDPv4_Tx | \
730 IFCAP_CSUM_TCPv6_Tx | \
731 IFCAP_CSUM_UDPv6_Tx)
732 #define IXL_CSUM_ALL_OFFLOAD (M_CSUM_IPv4 | \
733 M_CSUM_TCPv4 | M_CSUM_TCPv6 | \
734 M_CSUM_UDPv4 | M_CSUM_UDPv6)
735
736 #define delaymsec(_x) DELAY(1000 * (_x))
737 #ifdef IXL_DEBUG
738 #define DDPRINTF(sc, fmt, args...) \
739 do { \
740 if ((sc) != NULL) { \
741 device_printf( \
742 ((struct ixl_softc *)(sc))->sc_dev, \
743 ""); \
744 } \
745 printf("%s:\t" fmt, __func__, ##args); \
746 } while (0)
747 #else
748 #define DDPRINTF(sc, fmt, args...) __nothing
749 #endif
750 #ifndef IXL_STATS_INTERVAL_MSEC
751 #define IXL_STATS_INTERVAL_MSEC 10000
752 #endif
753 #ifndef IXL_QUEUE_NUM
754 #define IXL_QUEUE_NUM 0
755 #endif
756
757 static bool ixl_param_nomsix = false;
758 static int ixl_param_stats_interval = IXL_STATS_INTERVAL_MSEC;
759 static int ixl_param_nqps_limit = IXL_QUEUE_NUM;
760 static unsigned int ixl_param_tx_ndescs = 1024;
761 static unsigned int ixl_param_rx_ndescs = 1024;
762
763 static enum i40e_mac_type
764 ixl_mactype(pci_product_id_t);
765 static void ixl_clear_hw(struct ixl_softc *);
766 static int ixl_pf_reset(struct ixl_softc *);
767
768 static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
769 bus_size_t, bus_size_t);
770 static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
771
772 static int ixl_arq_fill(struct ixl_softc *);
773 static void ixl_arq_unfill(struct ixl_softc *);
774
775 static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
776 unsigned int);
777 static void ixl_atq_set(struct ixl_atq *,
778 void (*)(struct ixl_softc *, const struct ixl_aq_desc *));
779 static int ixl_atq_post(struct ixl_softc *, struct ixl_atq *);
780 static int ixl_atq_post_locked(struct ixl_softc *, struct ixl_atq *);
781 static void ixl_atq_done(struct ixl_softc *);
782 static int ixl_atq_exec(struct ixl_softc *, struct ixl_atq *);
783 static int ixl_get_version(struct ixl_softc *);
784 static int ixl_get_nvm_version(struct ixl_softc *);
785 static int ixl_get_hw_capabilities(struct ixl_softc *);
786 static int ixl_pxe_clear(struct ixl_softc *);
787 static int ixl_lldp_shut(struct ixl_softc *);
788 static int ixl_get_mac(struct ixl_softc *);
789 static int ixl_get_switch_config(struct ixl_softc *);
790 static int ixl_phy_mask_ints(struct ixl_softc *);
791 static int ixl_get_phy_info(struct ixl_softc *);
792 static int ixl_set_phy_config(struct ixl_softc *, uint8_t, uint8_t, bool);
793 static int ixl_set_phy_autoselect(struct ixl_softc *);
794 static int ixl_restart_an(struct ixl_softc *);
795 static int ixl_hmc(struct ixl_softc *);
796 static void ixl_hmc_free(struct ixl_softc *);
797 static int ixl_get_vsi(struct ixl_softc *);
798 static int ixl_set_vsi(struct ixl_softc *);
799 static void ixl_set_filter_control(struct ixl_softc *);
800 static void ixl_get_link_status(void *);
801 static int ixl_get_link_status_poll(struct ixl_softc *, int *);
802 static int ixl_set_link_status(struct ixl_softc *,
803 const struct ixl_aq_desc *);
804 static uint64_t ixl_search_link_speed(uint8_t);
805 static uint8_t ixl_search_baudrate(uint64_t);
806 static void ixl_config_rss(struct ixl_softc *);
807 static int ixl_add_macvlan(struct ixl_softc *, const uint8_t *,
808 uint16_t, uint16_t);
809 static int ixl_remove_macvlan(struct ixl_softc *, const uint8_t *,
810 uint16_t, uint16_t);
811 static void ixl_arq(void *);
812 static void ixl_hmc_pack(void *, const void *,
813 const struct ixl_hmc_pack *, unsigned int);
814 static uint32_t ixl_rd_rx_csr(struct ixl_softc *, uint32_t);
815 static void ixl_wr_rx_csr(struct ixl_softc *, uint32_t, uint32_t);
816 static int ixl_rd16_nvm(struct ixl_softc *, uint16_t, uint16_t *);
817
818 static int ixl_match(device_t, cfdata_t, void *);
819 static void ixl_attach(device_t, device_t, void *);
820 static int ixl_detach(device_t, int);
821
822 static void ixl_media_add(struct ixl_softc *);
823 static int ixl_media_change(struct ifnet *);
824 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
825 static void ixl_watchdog(struct ifnet *);
826 static int ixl_ioctl(struct ifnet *, u_long, void *);
827 static void ixl_start(struct ifnet *);
828 static int ixl_transmit(struct ifnet *, struct mbuf *);
829 static void ixl_deferred_transmit(void *);
830 static int ixl_intr(void *);
831 static int ixl_queue_intr(void *);
832 static int ixl_other_intr(void *);
833 static void ixl_handle_queue(void *);
834 static void ixl_handle_queue_wk(struct work *, void *);
835 static void ixl_sched_handle_queue(struct ixl_softc *,
836 struct ixl_queue_pair *);
837 static int ixl_init(struct ifnet *);
838 static int ixl_init_locked(struct ixl_softc *);
839 static void ixl_stop(struct ifnet *, int);
840 static void ixl_stop_locked(struct ixl_softc *);
841 static int ixl_iff(struct ixl_softc *);
842 static int ixl_ifflags_cb(struct ethercom *);
843 static int ixl_setup_interrupts(struct ixl_softc *);
844 static int ixl_establish_intx(struct ixl_softc *);
845 static int ixl_establish_msix(struct ixl_softc *);
846 static void ixl_enable_queue_intr(struct ixl_softc *,
847 struct ixl_queue_pair *);
848 static void ixl_disable_queue_intr(struct ixl_softc *,
849 struct ixl_queue_pair *);
850 static void ixl_enable_other_intr(struct ixl_softc *);
851 static void ixl_disable_other_intr(struct ixl_softc *);
852 static void ixl_config_queue_intr(struct ixl_softc *);
853 static void ixl_config_other_intr(struct ixl_softc *);
854
855 static struct ixl_tx_ring *
856 ixl_txr_alloc(struct ixl_softc *, unsigned int);
857 static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
858 static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
859 static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
860 static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
861 static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
862 static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
863 static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
864 static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *, u_int);
865
866 static struct ixl_rx_ring *
867 ixl_rxr_alloc(struct ixl_softc *, unsigned int);
868 static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
869 static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
870 static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
871 static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
872 static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
873 static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
874 static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *, u_int);
875 static int ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
876
877 static struct workqueue *
878 ixl_workq_create(const char *, pri_t, int, int);
879 static void ixl_workq_destroy(struct workqueue *);
880 static int ixl_workqs_teardown(device_t);
881 static void ixl_work_set(struct ixl_work *, void (*)(void *), void *);
882 static void ixl_work_add(struct workqueue *, struct ixl_work *);
883 static void ixl_work_wait(struct workqueue *, struct ixl_work *);
884 static void ixl_workq_work(struct work *, void *);
885 static const struct ixl_product *
886 ixl_lookup(const struct pci_attach_args *pa);
887 static void ixl_link_state_update(struct ixl_softc *,
888 const struct ixl_aq_desc *);
889 static int ixl_vlan_cb(struct ethercom *, uint16_t, bool);
890 static int ixl_setup_vlan_hwfilter(struct ixl_softc *);
891 static void ixl_teardown_vlan_hwfilter(struct ixl_softc *);
892 static int ixl_update_macvlan(struct ixl_softc *);
893 static int ixl_setup_interrupts(struct ixl_softc *);;
894 static void ixl_teardown_interrupts(struct ixl_softc *);
895 static int ixl_setup_stats(struct ixl_softc *);
896 static void ixl_teardown_stats(struct ixl_softc *);
897 static void ixl_stats_callout(void *);
898 static void ixl_stats_update(void *);
899 static int ixl_setup_sysctls(struct ixl_softc *);
900 static void ixl_teardown_sysctls(struct ixl_softc *);
901 static int ixl_queue_pairs_alloc(struct ixl_softc *);
902 static void ixl_queue_pairs_free(struct ixl_softc *);
903
904 static const struct ixl_phy_type ixl_phy_type_map[] = {
905 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII },
906 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX },
907 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 },
908 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR },
909 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 },
910 { 1ULL << IXL_PHY_TYPE_XAUI |
911 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 },
912 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI },
913 { 1ULL << IXL_PHY_TYPE_XLAUI |
914 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI },
915 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
916 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 },
917 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
918 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 },
919 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC },
920 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC },
921 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX },
922 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
923 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T },
924 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T },
925 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR },
926 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR },
927 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_TWINAX },
928 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 },
929 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 },
930 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX },
931 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX },
932 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 },
933 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR },
934 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR },
935 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR },
936 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR },
937 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC },
938 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_CR },
939 };
940
941 static const struct ixl_speed_type ixl_speed_type_map[] = {
942 { IXL_AQ_LINK_SPEED_40GB, IF_Gbps(40) },
943 { IXL_AQ_LINK_SPEED_25GB, IF_Gbps(25) },
944 { IXL_AQ_LINK_SPEED_10GB, IF_Gbps(10) },
945 { IXL_AQ_LINK_SPEED_1000MB, IF_Mbps(1000) },
946 { IXL_AQ_LINK_SPEED_100MB, IF_Mbps(100)},
947 };
948
949 static const struct ixl_aq_regs ixl_pf_aq_regs = {
950 .atq_tail = I40E_PF_ATQT,
951 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK,
952 .atq_head = I40E_PF_ATQH,
953 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK,
954 .atq_len = I40E_PF_ATQLEN,
955 .atq_bal = I40E_PF_ATQBAL,
956 .atq_bah = I40E_PF_ATQBAH,
957 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK,
958
959 .arq_tail = I40E_PF_ARQT,
960 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK,
961 .arq_head = I40E_PF_ARQH,
962 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK,
963 .arq_len = I40E_PF_ARQLEN,
964 .arq_bal = I40E_PF_ARQBAL,
965 .arq_bah = I40E_PF_ARQBAH,
966 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK,
967 };
968
969 #define ixl_rd(_s, _r) \
970 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
971 #define ixl_wr(_s, _r, _v) \
972 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
973 #define ixl_barrier(_s, _r, _l, _o) \
974 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
975 #define ixl_flush(_s) (void)ixl_rd((_s), I40E_GLGEN_STAT)
976 #define ixl_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1))
977
978 static inline uint32_t
979 ixl_dmamem_hi(struct ixl_dmamem *ixm)
980 {
981 uint32_t retval;
982 uint64_t val;
983
984 if (sizeof(IXL_DMA_DVA(ixm)) > 4) {
985 val = (intptr_t)IXL_DMA_DVA(ixm);
986 retval = (uint32_t)(val >> 32);
987 } else {
988 retval = 0;
989 }
990
991 return retval;
992 }
993
994 static inline uint32_t
995 ixl_dmamem_lo(struct ixl_dmamem *ixm)
996 {
997
998 return (uint32_t)IXL_DMA_DVA(ixm);
999 }
1000
1001 static inline void
1002 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr)
1003 {
1004 uint64_t val;
1005
1006 if (sizeof(addr) > 4) {
1007 val = (intptr_t)addr;
1008 iaq->iaq_param[2] = htole32(val >> 32);
1009 } else {
1010 iaq->iaq_param[2] = htole32(0);
1011 }
1012
1013 iaq->iaq_param[3] = htole32(addr);
1014 }
1015
1016 static inline unsigned int
1017 ixl_rxr_unrefreshed(unsigned int prod, unsigned int cons, unsigned int ndescs)
1018 {
1019 unsigned int num;
1020
1021 if (prod < cons)
1022 num = cons - prod;
1023 else
1024 num = (ndescs - prod) + cons;
1025
1026 if (__predict_true(num > 0)) {
1027 /* device cannot receive packets if all descripter is filled */
1028 num -= 1;
1029 }
1030
1031 return num;
1032 }
1033
1034 CFATTACH_DECL3_NEW(ixl, sizeof(struct ixl_softc),
1035 ixl_match, ixl_attach, ixl_detach, NULL, NULL, NULL,
1036 DVF_DETACH_SHUTDOWN);
1037
1038 static const struct ixl_product ixl_products[] = {
1039 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_SFP },
1040 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_B },
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_C },
1042 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_A },
1043 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_B },
1044 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_C },
1045 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T },
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
1048 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G },
1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP },
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28 },
1051 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_KX },
1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_QSFP },
1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_SFP },
1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G_BASET },
1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_BASET },
1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_I_SFP },
1057 /* required last entry */
1058 {0, 0}
1059 };
1060
1061 static const struct ixl_product *
1062 ixl_lookup(const struct pci_attach_args *pa)
1063 {
1064 const struct ixl_product *ixlp;
1065
1066 for (ixlp = ixl_products; ixlp->vendor_id != 0; ixlp++) {
1067 if (PCI_VENDOR(pa->pa_id) == ixlp->vendor_id &&
1068 PCI_PRODUCT(pa->pa_id) == ixlp->product_id)
1069 return ixlp;
1070 }
1071
1072 return NULL;
1073 }
1074
1075 static int
1076 ixl_match(device_t parent, cfdata_t match, void *aux)
1077 {
1078 const struct pci_attach_args *pa = aux;
1079
1080 return (ixl_lookup(pa) != NULL) ? 1 : 0;
1081 }
1082
1083 static void
1084 ixl_attach(device_t parent, device_t self, void *aux)
1085 {
1086 struct ixl_softc *sc;
1087 struct pci_attach_args *pa = aux;
1088 struct ifnet *ifp;
1089 pcireg_t memtype;
1090 uint32_t firstq, port, ari, func;
1091 char xnamebuf[32];
1092 int tries, rv, link;
1093
1094 sc = device_private(self);
1095 sc->sc_dev = self;
1096 ifp = &sc->sc_ec.ec_if;
1097
1098 sc->sc_pa = *pa;
1099 sc->sc_dmat = (pci_dma64_available(pa)) ?
1100 pa->pa_dmat64 : pa->pa_dmat;
1101 sc->sc_aq_regs = &ixl_pf_aq_regs;
1102
1103 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(pa->pa_id));
1104
1105 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG);
1106 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
1107 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) {
1108 aprint_error(": unable to map registers\n");
1109 return;
1110 }
1111
1112 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET);
1113
1114 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC);
1115 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK;
1116 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1117 sc->sc_base_queue = firstq;
1118
1119 ixl_clear_hw(sc);
1120 if (ixl_pf_reset(sc) == -1) {
1121 /* error printed by ixl pf_reset */
1122 goto unmap;
1123 }
1124
1125 port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
1126 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
1127 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
1128 sc->sc_port = port;
1129 aprint_normal(": port %u", sc->sc_port);
1130
1131 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
1132 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1133 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1134
1135 func = ixl_rd(sc, I40E_PF_FUNC_RID);
1136 sc->sc_pf_id = func & (ari ? 0xff : 0x7);
1137
1138 /* initialise the adminq */
1139
1140 mutex_init(&sc->sc_atq_lock, MUTEX_DEFAULT, IPL_NET);
1141
1142 if (ixl_dmamem_alloc(sc, &sc->sc_atq,
1143 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1144 aprint_error("\n" "%s: unable to allocate atq\n",
1145 device_xname(self));
1146 goto unmap;
1147 }
1148
1149 SIMPLEQ_INIT(&sc->sc_arq_idle);
1150 ixl_work_set(&sc->sc_arq_task, ixl_arq, sc);
1151 sc->sc_arq_cons = 0;
1152 sc->sc_arq_prod = 0;
1153
1154 if (ixl_dmamem_alloc(sc, &sc->sc_arq,
1155 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1156 aprint_error("\n" "%s: unable to allocate arq\n",
1157 device_xname(self));
1158 goto free_atq;
1159 }
1160
1161 if (!ixl_arq_fill(sc)) {
1162 aprint_error("\n" "%s: unable to fill arq descriptors\n",
1163 device_xname(self));
1164 goto free_arq;
1165 }
1166
1167 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1168 0, IXL_DMA_LEN(&sc->sc_atq),
1169 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1170
1171 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1172 0, IXL_DMA_LEN(&sc->sc_arq),
1173 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1174
1175 for (tries = 0; tries < 10; tries++) {
1176 sc->sc_atq_cons = 0;
1177 sc->sc_atq_prod = 0;
1178
1179 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1180 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1181 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1182 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1183
1184 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1185
1186 ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1187 ixl_dmamem_lo(&sc->sc_atq));
1188 ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1189 ixl_dmamem_hi(&sc->sc_atq));
1190 ixl_wr(sc, sc->sc_aq_regs->atq_len,
1191 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1192
1193 ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1194 ixl_dmamem_lo(&sc->sc_arq));
1195 ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1196 ixl_dmamem_hi(&sc->sc_arq));
1197 ixl_wr(sc, sc->sc_aq_regs->arq_len,
1198 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1199
1200 rv = ixl_get_version(sc);
1201 if (rv == 0)
1202 break;
1203 if (rv != ETIMEDOUT) {
1204 aprint_error(", unable to get firmware version\n");
1205 goto shutdown;
1206 }
1207
1208 delaymsec(100);
1209 }
1210
1211 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1212
1213 if (ixl_dmamem_alloc(sc, &sc->sc_aqbuf, IXL_AQ_BUFLEN, 0) != 0) {
1214 aprint_error_dev(self, ", unable to allocate nvm buffer\n");
1215 goto shutdown;
1216 }
1217
1218 ixl_get_nvm_version(sc);
1219
1220 if (sc->sc_mac_type == I40E_MAC_X722)
1221 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_X722;
1222 else
1223 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_XL710;
1224
1225 rv = ixl_get_hw_capabilities(sc);
1226 if (rv != 0) {
1227 aprint_error(", GET HW CAPABILITIES %s\n",
1228 rv == ETIMEDOUT ? "timeout" : "error");
1229 goto free_aqbuf;
1230 }
1231
1232 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_device, ncpu);
1233 if (ixl_param_nqps_limit > 0) {
1234 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_max,
1235 ixl_param_nqps_limit);
1236 }
1237
1238 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
1239 sc->sc_tx_ring_ndescs = ixl_param_tx_ndescs;
1240 sc->sc_rx_ring_ndescs = ixl_param_rx_ndescs;
1241
1242 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_rx_ring_ndescs);
1243 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_tx_ring_ndescs);
1244
1245 if (ixl_get_mac(sc) != 0) {
1246 /* error printed by ixl_get_mac */
1247 goto free_aqbuf;
1248 }
1249
1250 aprint_normal("\n");
1251 aprint_naive("\n");
1252
1253 aprint_normal_dev(self, "Ethernet address %s\n",
1254 ether_sprintf(sc->sc_enaddr));
1255
1256 rv = ixl_pxe_clear(sc);
1257 if (rv != 0) {
1258 aprint_debug_dev(self, "CLEAR PXE MODE %s\n",
1259 rv == ETIMEDOUT ? "timeout" : "error");
1260 }
1261
1262 ixl_set_filter_control(sc);
1263
1264 if (ixl_hmc(sc) != 0) {
1265 /* error printed by ixl_hmc */
1266 goto free_aqbuf;
1267 }
1268
1269 if (ixl_lldp_shut(sc) != 0) {
1270 /* error printed by ixl_lldp_shut */
1271 goto free_hmc;
1272 }
1273
1274 if (ixl_phy_mask_ints(sc) != 0) {
1275 /* error printed by ixl_phy_mask_ints */
1276 goto free_hmc;
1277 }
1278
1279 if (ixl_restart_an(sc) != 0) {
1280 /* error printed by ixl_restart_an */
1281 goto free_hmc;
1282 }
1283
1284 if (ixl_get_switch_config(sc) != 0) {
1285 /* error printed by ixl_get_switch_config */
1286 goto free_hmc;
1287 }
1288
1289 rv = ixl_get_link_status_poll(sc, NULL);
1290 if (rv != 0) {
1291 aprint_error_dev(self, "GET LINK STATUS %s\n",
1292 rv == ETIMEDOUT ? "timeout" : "error");
1293 goto free_hmc;
1294 }
1295
1296 /*
1297 * The FW often returns EIO in "Get PHY Abilities" command
1298 * if there is no delay
1299 */
1300 DELAY(500);
1301 if (ixl_get_phy_info(sc) != 0) {
1302 /* error printed by ixl_get_phy_info */
1303 goto free_hmc;
1304 }
1305
1306 if (ixl_dmamem_alloc(sc, &sc->sc_scratch,
1307 sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1308 aprint_error_dev(self, "unable to allocate scratch buffer\n");
1309 goto free_hmc;
1310 }
1311
1312 rv = ixl_get_vsi(sc);
1313 if (rv != 0) {
1314 aprint_error_dev(self, "GET VSI %s %d\n",
1315 rv == ETIMEDOUT ? "timeout" : "error", rv);
1316 goto free_scratch;
1317 }
1318
1319 rv = ixl_set_vsi(sc);
1320 if (rv != 0) {
1321 aprint_error_dev(self, "UPDATE VSI error %s %d\n",
1322 rv == ETIMEDOUT ? "timeout" : "error", rv);
1323 goto free_scratch;
1324 }
1325
1326 if (ixl_queue_pairs_alloc(sc) != 0) {
1327 /* error printed by ixl_queue_pairs_alloc */
1328 goto free_scratch;
1329 }
1330
1331 if (ixl_setup_interrupts(sc) != 0) {
1332 /* error printed by ixl_setup_interrupts */
1333 goto free_queue_pairs;
1334 }
1335
1336 if (ixl_setup_stats(sc) != 0) {
1337 aprint_error_dev(self, "failed to setup event counters\n");
1338 goto teardown_intrs;
1339 }
1340
1341 if (ixl_setup_sysctls(sc) != 0) {
1342 /* error printed by ixl_setup_sysctls */
1343 goto teardown_stats;
1344 }
1345
1346 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self));
1347 sc->sc_workq = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1348 IPL_NET, WQ_MPSAFE);
1349 if (sc->sc_workq == NULL)
1350 goto teardown_sysctls;
1351
1352 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self));
1353 rv = workqueue_create(&sc->sc_workq_txrx, xnamebuf, ixl_handle_queue_wk,
1354 sc, IXL_WORKQUEUE_PRI, IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1355 if (rv != 0) {
1356 sc->sc_workq_txrx = NULL;
1357 goto teardown_wqs;
1358 }
1359
1360 snprintf(xnamebuf, sizeof(xnamebuf), "%s_atq_cv", device_xname(self));
1361 cv_init(&sc->sc_atq_cv, xnamebuf);
1362
1363 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
1364
1365 ifp->if_softc = sc;
1366 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1367 ifp->if_extflags = IFEF_MPSAFE;
1368 ifp->if_ioctl = ixl_ioctl;
1369 ifp->if_start = ixl_start;
1370 ifp->if_transmit = ixl_transmit;
1371 ifp->if_watchdog = ixl_watchdog;
1372 ifp->if_init = ixl_init;
1373 ifp->if_stop = ixl_stop;
1374 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs);
1375 IFQ_SET_READY(&ifp->if_snd);
1376 ifp->if_capabilities |= IXL_IFCAP_RXCSUM;
1377 ifp->if_capabilities |= IXL_IFCAP_TXCSUM;
1378 #if 0
1379 ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
1380 #endif
1381 ether_set_vlan_cb(&sc->sc_ec, ixl_vlan_cb);
1382 sc->sc_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1383 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
1384 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1385
1386 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities;
1387 /* Disable VLAN_HWFILTER by default */
1388 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1389
1390 sc->sc_cur_ec_capenable = sc->sc_ec.ec_capenable;
1391
1392 sc->sc_ec.ec_ifmedia = &sc->sc_media;
1393 ifmedia_init(&sc->sc_media, IFM_IMASK, ixl_media_change,
1394 ixl_media_status);
1395
1396 ixl_media_add(sc);
1397 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1398 if (ISSET(sc->sc_phy_abilities,
1399 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) {
1400 ifmedia_add(&sc->sc_media,
1401 IFM_ETHER | IFM_AUTO | IFM_FLOW, 0, NULL);
1402 }
1403 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_NONE, 0, NULL);
1404 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1405
1406 if_attach(ifp);
1407 if_deferred_start_init(ifp, NULL);
1408 ether_ifattach(ifp, sc->sc_enaddr);
1409 ether_set_ifflags_cb(&sc->sc_ec, ixl_ifflags_cb);
1410
1411 rv = ixl_get_link_status_poll(sc, &link);
1412 if (rv != 0)
1413 link = LINK_STATE_UNKNOWN;
1414 if_link_state_change(ifp, link);
1415
1416 ixl_work_set(&sc->sc_link_state_task, ixl_get_link_status, sc);
1417
1418 ixl_config_other_intr(sc);
1419 ixl_enable_other_intr(sc);
1420
1421 ixl_set_phy_autoselect(sc);
1422
1423 /* remove default mac filter and replace it so we can see vlans */
1424 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 0);
1425 if (rv != ENOENT) {
1426 aprint_debug_dev(self,
1427 "unable to remove macvlan %u\n", rv);
1428 }
1429 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
1430 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1431 if (rv != ENOENT) {
1432 aprint_debug_dev(self,
1433 "unable to remove macvlan, ignore vlan %u\n", rv);
1434 }
1435
1436 if (ixl_update_macvlan(sc) != 0) {
1437 aprint_debug_dev(self,
1438 "couldn't enable vlan hardware filter\n");
1439 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1440 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
1441 }
1442
1443 sc->sc_txrx_workqueue = true;
1444 sc->sc_tx_process_limit = IXL_TX_PROCESS_LIMIT;
1445 sc->sc_rx_process_limit = IXL_RX_PROCESS_LIMIT;
1446 sc->sc_tx_intr_process_limit = IXL_TX_INTR_PROCESS_LIMIT;
1447 sc->sc_rx_intr_process_limit = IXL_RX_INTR_PROCESS_LIMIT;
1448
1449 ixl_stats_update(sc);
1450 sc->sc_stats_counters.isc_has_offset = true;
1451 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
1452
1453 if (pmf_device_register(self, NULL, NULL) != true)
1454 aprint_debug_dev(self, "couldn't establish power handler\n");
1455 sc->sc_attached = true;
1456 return;
1457
1458 teardown_wqs:
1459 config_finalize_register(self, ixl_workqs_teardown);
1460 teardown_sysctls:
1461 ixl_teardown_sysctls(sc);
1462 teardown_stats:
1463 ixl_teardown_stats(sc);
1464 teardown_intrs:
1465 ixl_teardown_interrupts(sc);
1466 free_queue_pairs:
1467 ixl_queue_pairs_free(sc);
1468 free_scratch:
1469 ixl_dmamem_free(sc, &sc->sc_scratch);
1470 free_hmc:
1471 ixl_hmc_free(sc);
1472 free_aqbuf:
1473 ixl_dmamem_free(sc, &sc->sc_aqbuf);
1474 shutdown:
1475 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1476 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1477 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1478 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1479
1480 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1481 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1482 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1483
1484 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1485 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1486 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1487
1488 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1489 0, IXL_DMA_LEN(&sc->sc_arq),
1490 BUS_DMASYNC_POSTREAD);
1491 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1492 0, IXL_DMA_LEN(&sc->sc_atq),
1493 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1494
1495 ixl_arq_unfill(sc);
1496 free_arq:
1497 ixl_dmamem_free(sc, &sc->sc_arq);
1498 free_atq:
1499 ixl_dmamem_free(sc, &sc->sc_atq);
1500 unmap:
1501 mutex_destroy(&sc->sc_atq_lock);
1502 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1503 mutex_destroy(&sc->sc_cfg_lock);
1504 sc->sc_mems = 0;
1505
1506 sc->sc_attached = false;
1507 }
1508
1509 static int
1510 ixl_detach(device_t self, int flags)
1511 {
1512 struct ixl_softc *sc = device_private(self);
1513 struct ifnet *ifp = &sc->sc_ec.ec_if;
1514
1515 if (!sc->sc_attached)
1516 return 0;
1517
1518 ixl_stop(ifp, 1);
1519
1520 ixl_disable_other_intr(sc);
1521
1522 callout_stop(&sc->sc_stats_callout);
1523 ixl_work_wait(sc->sc_workq, &sc->sc_stats_task);
1524
1525 /* wait for ATQ handler */
1526 mutex_enter(&sc->sc_atq_lock);
1527 mutex_exit(&sc->sc_atq_lock);
1528
1529 ixl_work_wait(sc->sc_workq, &sc->sc_arq_task);
1530 ixl_work_wait(sc->sc_workq, &sc->sc_link_state_task);
1531
1532 if (sc->sc_workq != NULL) {
1533 ixl_workq_destroy(sc->sc_workq);
1534 sc->sc_workq = NULL;
1535 }
1536
1537 if (sc->sc_workq_txrx != NULL) {
1538 workqueue_destroy(sc->sc_workq_txrx);
1539 sc->sc_workq_txrx = NULL;
1540 }
1541
1542 ether_ifdetach(ifp);
1543 if_detach(ifp);
1544 ifmedia_fini(&sc->sc_media);
1545
1546 ixl_teardown_interrupts(sc);
1547 ixl_teardown_stats(sc);
1548 ixl_teardown_sysctls(sc);
1549
1550 ixl_queue_pairs_free(sc);
1551
1552 ixl_dmamem_free(sc, &sc->sc_scratch);
1553 ixl_hmc_free(sc);
1554
1555 /* shutdown */
1556 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1557 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1558 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1559 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1560
1561 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1562 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1563 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1564
1565 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1566 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1567 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1568
1569 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1570 0, IXL_DMA_LEN(&sc->sc_arq),
1571 BUS_DMASYNC_POSTREAD);
1572 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1573 0, IXL_DMA_LEN(&sc->sc_atq),
1574 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1575
1576 ixl_arq_unfill(sc);
1577
1578 ixl_dmamem_free(sc, &sc->sc_arq);
1579 ixl_dmamem_free(sc, &sc->sc_atq);
1580 ixl_dmamem_free(sc, &sc->sc_aqbuf);
1581
1582 cv_destroy(&sc->sc_atq_cv);
1583 mutex_destroy(&sc->sc_atq_lock);
1584
1585 if (sc->sc_mems != 0) {
1586 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1587 sc->sc_mems = 0;
1588 }
1589
1590 mutex_destroy(&sc->sc_cfg_lock);
1591
1592 return 0;
1593 }
1594
1595 static int
1596 ixl_workqs_teardown(device_t self)
1597 {
1598 struct ixl_softc *sc = device_private(self);
1599
1600 if (sc->sc_workq != NULL) {
1601 ixl_workq_destroy(sc->sc_workq);
1602 sc->sc_workq = NULL;
1603 }
1604
1605 if (sc->sc_workq_txrx != NULL) {
1606 workqueue_destroy(sc->sc_workq_txrx);
1607 sc->sc_workq_txrx = NULL;
1608 }
1609
1610 return 0;
1611 }
1612
1613 static int
1614 ixl_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
1615 {
1616 struct ifnet *ifp = &ec->ec_if;
1617 struct ixl_softc *sc = ifp->if_softc;
1618 int rv;
1619
1620 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
1621 return 0;
1622 }
1623
1624 if (set) {
1625 rv = ixl_add_macvlan(sc, sc->sc_enaddr, vid,
1626 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1627 if (rv == 0) {
1628 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
1629 vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1630 }
1631 } else {
1632 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, vid,
1633 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1634 (void)ixl_remove_macvlan(sc, etherbroadcastaddr, vid,
1635 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1636 }
1637
1638 return rv;
1639 }
1640
1641 static void
1642 ixl_media_add(struct ixl_softc *sc)
1643 {
1644 struct ifmedia *ifm = &sc->sc_media;
1645 const struct ixl_phy_type *itype;
1646 unsigned int i;
1647 bool flow;
1648
1649 if (ISSET(sc->sc_phy_abilities,
1650 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) {
1651 flow = true;
1652 } else {
1653 flow = false;
1654 }
1655
1656 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
1657 itype = &ixl_phy_type_map[i];
1658
1659 if (ISSET(sc->sc_phy_types, itype->phy_type)) {
1660 ifmedia_add(ifm,
1661 IFM_ETHER | IFM_FDX | itype->ifm_type, 0, NULL);
1662
1663 if (flow) {
1664 ifmedia_add(ifm,
1665 IFM_ETHER | IFM_FDX | IFM_FLOW |
1666 itype->ifm_type, 0, NULL);
1667 }
1668
1669 if (itype->ifm_type != IFM_100_TX)
1670 continue;
1671
1672 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type,
1673 0, NULL);
1674 if (flow) {
1675 ifmedia_add(ifm,
1676 IFM_ETHER | IFM_FLOW | itype->ifm_type,
1677 0, NULL);
1678 }
1679 }
1680 }
1681 }
1682
1683 static void
1684 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1685 {
1686 struct ixl_softc *sc = ifp->if_softc;
1687
1688 ifmr->ifm_status = sc->sc_media_status;
1689 ifmr->ifm_active = sc->sc_media_active;
1690
1691 mutex_enter(&sc->sc_cfg_lock);
1692 if (ifp->if_link_state == LINK_STATE_UP)
1693 SET(ifmr->ifm_status, IFM_ACTIVE);
1694 mutex_exit(&sc->sc_cfg_lock);
1695 }
1696
1697 static int
1698 ixl_media_change(struct ifnet *ifp)
1699 {
1700 struct ixl_softc *sc = ifp->if_softc;
1701 struct ifmedia *ifm = &sc->sc_media;
1702 uint64_t ifm_active = sc->sc_media_active;
1703 uint8_t link_speed, abilities;
1704
1705 switch (IFM_SUBTYPE(ifm_active)) {
1706 case IFM_1000_SGMII:
1707 case IFM_1000_KX:
1708 case IFM_10G_KX4:
1709 case IFM_10G_KR:
1710 case IFM_40G_KR4:
1711 case IFM_20G_KR2:
1712 case IFM_25G_KR:
1713 /* backplanes */
1714 return EINVAL;
1715 }
1716
1717 abilities = IXL_PHY_ABILITY_AUTONEGO | IXL_PHY_ABILITY_LINKUP;
1718
1719 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1720 case IFM_AUTO:
1721 link_speed = sc->sc_phy_linkspeed;
1722 break;
1723 case IFM_NONE:
1724 link_speed = 0;
1725 CLR(abilities, IXL_PHY_ABILITY_LINKUP);
1726 break;
1727 default:
1728 link_speed = ixl_search_baudrate(
1729 ifmedia_baudrate(ifm->ifm_media));
1730 }
1731
1732 if (ISSET(abilities, IXL_PHY_ABILITY_LINKUP)) {
1733 if (ISSET(link_speed, sc->sc_phy_linkspeed) == 0)
1734 return EINVAL;
1735 }
1736
1737 if (ifm->ifm_media & IFM_FLOW) {
1738 abilities |= sc->sc_phy_abilities &
1739 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX);
1740 }
1741
1742 return ixl_set_phy_config(sc, link_speed, abilities, false);
1743 }
1744
1745 static void
1746 ixl_watchdog(struct ifnet *ifp)
1747 {
1748
1749 }
1750
1751 static void
1752 ixl_del_all_multiaddr(struct ixl_softc *sc)
1753 {
1754 struct ethercom *ec = &sc->sc_ec;
1755 struct ether_multi *enm;
1756 struct ether_multistep step;
1757
1758 ETHER_LOCK(ec);
1759 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1760 ETHER_NEXT_MULTI(step, enm)) {
1761 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1762 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1763 }
1764 ETHER_UNLOCK(ec);
1765 }
1766
1767 static int
1768 ixl_add_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1769 {
1770 struct ifnet *ifp = &sc->sc_ec.ec_if;
1771 int rv;
1772
1773 if (ISSET(ifp->if_flags, IFF_ALLMULTI))
1774 return 0;
1775
1776 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) {
1777 ixl_del_all_multiaddr(sc);
1778 SET(ifp->if_flags, IFF_ALLMULTI);
1779 return ENETRESET;
1780 }
1781
1782 /* multicast address can not use VLAN HWFILTER */
1783 rv = ixl_add_macvlan(sc, addrlo, 0,
1784 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1785
1786 if (rv == ENOSPC) {
1787 ixl_del_all_multiaddr(sc);
1788 SET(ifp->if_flags, IFF_ALLMULTI);
1789 return ENETRESET;
1790 }
1791
1792 return rv;
1793 }
1794
1795 static int
1796 ixl_del_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1797 {
1798 struct ifnet *ifp = &sc->sc_ec.ec_if;
1799 struct ethercom *ec = &sc->sc_ec;
1800 struct ether_multi *enm, *enm_last;
1801 struct ether_multistep step;
1802 int error, rv = 0;
1803
1804 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1805 ixl_remove_macvlan(sc, addrlo, 0,
1806 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1807 return 0;
1808 }
1809
1810 ETHER_LOCK(ec);
1811 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1812 ETHER_NEXT_MULTI(step, enm)) {
1813 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1814 ETHER_ADDR_LEN) != 0) {
1815 goto out;
1816 }
1817 }
1818
1819 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1820 ETHER_NEXT_MULTI(step, enm)) {
1821 error = ixl_add_macvlan(sc, enm->enm_addrlo, 0,
1822 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1823 if (error != 0)
1824 break;
1825 }
1826
1827 if (enm != NULL) {
1828 enm_last = enm;
1829 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1830 ETHER_NEXT_MULTI(step, enm)) {
1831 if (enm == enm_last)
1832 break;
1833
1834 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1835 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1836 }
1837 } else {
1838 CLR(ifp->if_flags, IFF_ALLMULTI);
1839 rv = ENETRESET;
1840 }
1841
1842 out:
1843 ETHER_UNLOCK(ec);
1844 return rv;
1845 }
1846
1847 static int
1848 ixl_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1849 {
1850 struct ifreq *ifr = (struct ifreq *)data;
1851 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
1852 const struct sockaddr *sa;
1853 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1854 int s, error = 0;
1855 unsigned int nmtu;
1856
1857 switch (cmd) {
1858 case SIOCSIFMTU:
1859 nmtu = ifr->ifr_mtu;
1860
1861 if (nmtu < IXL_MIN_MTU || nmtu > IXL_MAX_MTU) {
1862 error = EINVAL;
1863 break;
1864 }
1865 if (ifp->if_mtu != nmtu) {
1866 s = splnet();
1867 error = ether_ioctl(ifp, cmd, data);
1868 splx(s);
1869 if (error == ENETRESET)
1870 error = ixl_init(ifp);
1871 }
1872 break;
1873 case SIOCADDMULTI:
1874 sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1875 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) {
1876 error = ether_multiaddr(sa, addrlo, addrhi);
1877 if (error != 0)
1878 return error;
1879
1880 error = ixl_add_multi(sc, addrlo, addrhi);
1881 if (error != 0 && error != ENETRESET) {
1882 ether_delmulti(sa, &sc->sc_ec);
1883 error = EIO;
1884 }
1885 }
1886 break;
1887
1888 case SIOCDELMULTI:
1889 sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1890 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) {
1891 error = ether_multiaddr(sa, addrlo, addrhi);
1892 if (error != 0)
1893 return error;
1894
1895 error = ixl_del_multi(sc, addrlo, addrhi);
1896 }
1897 break;
1898
1899 default:
1900 s = splnet();
1901 error = ether_ioctl(ifp, cmd, data);
1902 splx(s);
1903 }
1904
1905 if (error == ENETRESET)
1906 error = ixl_iff(sc);
1907
1908 return error;
1909 }
1910
1911 static enum i40e_mac_type
1912 ixl_mactype(pci_product_id_t id)
1913 {
1914
1915 switch (id) {
1916 case PCI_PRODUCT_INTEL_XL710_SFP:
1917 case PCI_PRODUCT_INTEL_XL710_KX_B:
1918 case PCI_PRODUCT_INTEL_XL710_KX_C:
1919 case PCI_PRODUCT_INTEL_XL710_QSFP_A:
1920 case PCI_PRODUCT_INTEL_XL710_QSFP_B:
1921 case PCI_PRODUCT_INTEL_XL710_QSFP_C:
1922 case PCI_PRODUCT_INTEL_X710_10G_T:
1923 case PCI_PRODUCT_INTEL_XL710_20G_BP_1:
1924 case PCI_PRODUCT_INTEL_XL710_20G_BP_2:
1925 case PCI_PRODUCT_INTEL_X710_T4_10G:
1926 case PCI_PRODUCT_INTEL_XXV710_25G_BP:
1927 case PCI_PRODUCT_INTEL_XXV710_25G_SFP28:
1928 return I40E_MAC_XL710;
1929
1930 case PCI_PRODUCT_INTEL_X722_KX:
1931 case PCI_PRODUCT_INTEL_X722_QSFP:
1932 case PCI_PRODUCT_INTEL_X722_SFP:
1933 case PCI_PRODUCT_INTEL_X722_1G_BASET:
1934 case PCI_PRODUCT_INTEL_X722_10G_BASET:
1935 case PCI_PRODUCT_INTEL_X722_I_SFP:
1936 return I40E_MAC_X722;
1937 }
1938
1939 return I40E_MAC_GENERIC;
1940 }
1941
1942 static inline void *
1943 ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i)
1944 {
1945 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
1946 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1947
1948 if (i >= e->hmc_count)
1949 return NULL;
1950
1951 kva += e->hmc_base;
1952 kva += i * e->hmc_size;
1953
1954 return kva;
1955 }
1956
1957 static inline size_t
1958 ixl_hmc_len(struct ixl_softc *sc, enum ixl_hmc_types type)
1959 {
1960 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1961
1962 return e->hmc_size;
1963 }
1964
1965 static void
1966 ixl_enable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1967 {
1968 struct ixl_rx_ring *rxr = qp->qp_rxr;
1969
1970 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1971 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1972 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1973 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1974 ixl_flush(sc);
1975 }
1976
1977 static void
1978 ixl_disable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1979 {
1980 struct ixl_rx_ring *rxr = qp->qp_rxr;
1981
1982 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1983 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1984 ixl_flush(sc);
1985 }
1986
1987 static void
1988 ixl_enable_other_intr(struct ixl_softc *sc)
1989 {
1990
1991 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1992 I40E_PFINT_DYN_CTL0_INTENA_MASK |
1993 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1994 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1995 ixl_flush(sc);
1996 }
1997
1998 static void
1999 ixl_disable_other_intr(struct ixl_softc *sc)
2000 {
2001
2002 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
2003 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
2004 ixl_flush(sc);
2005 }
2006
2007 static int
2008 ixl_reinit(struct ixl_softc *sc)
2009 {
2010 struct ixl_rx_ring *rxr;
2011 struct ixl_tx_ring *txr;
2012 unsigned int i;
2013 uint32_t reg;
2014
2015 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2016
2017 if (ixl_get_vsi(sc) != 0)
2018 return EIO;
2019
2020 if (ixl_set_vsi(sc) != 0)
2021 return EIO;
2022
2023 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2024 txr = sc->sc_qps[i].qp_txr;
2025 rxr = sc->sc_qps[i].qp_rxr;
2026
2027 ixl_txr_config(sc, txr);
2028 ixl_rxr_config(sc, rxr);
2029 }
2030
2031 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2032 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_PREWRITE);
2033
2034 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2035 txr = sc->sc_qps[i].qp_txr;
2036 rxr = sc->sc_qps[i].qp_rxr;
2037
2038 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
2039 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
2040 ixl_flush(sc);
2041
2042 ixl_wr(sc, txr->txr_tail, txr->txr_prod);
2043 ixl_wr(sc, rxr->rxr_tail, rxr->rxr_prod);
2044
2045 /* ixl_rxfill() needs lock held */
2046 mutex_enter(&rxr->rxr_lock);
2047 ixl_rxfill(sc, rxr);
2048 mutex_exit(&rxr->rxr_lock);
2049
2050 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2051 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2052 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2053 if (ixl_rxr_enabled(sc, rxr) != 0)
2054 goto stop;
2055
2056 ixl_txr_qdis(sc, txr, 1);
2057
2058 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2059 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2060 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2061
2062 if (ixl_txr_enabled(sc, txr) != 0)
2063 goto stop;
2064 }
2065
2066 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2067 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
2068
2069 return 0;
2070
2071 stop:
2072 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2073 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
2074
2075 return ETIMEDOUT;
2076 }
2077
2078 static int
2079 ixl_init_locked(struct ixl_softc *sc)
2080 {
2081 struct ifnet *ifp = &sc->sc_ec.ec_if;
2082 unsigned int i;
2083 int error, eccap_change;
2084
2085 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2086
2087 if (ISSET(ifp->if_flags, IFF_RUNNING))
2088 ixl_stop_locked(sc);
2089
2090 if (sc->sc_dead) {
2091 return ENXIO;
2092 }
2093
2094 eccap_change = sc->sc_ec.ec_capenable ^ sc->sc_cur_ec_capenable;
2095 if (ISSET(eccap_change, ETHERCAP_VLAN_HWTAGGING))
2096 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
2097
2098 if (ISSET(eccap_change, ETHERCAP_VLAN_HWFILTER)) {
2099 if (ixl_update_macvlan(sc) == 0) {
2100 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
2101 } else {
2102 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
2103 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
2104 }
2105 }
2106
2107 if (sc->sc_intrtype != PCI_INTR_TYPE_MSIX)
2108 sc->sc_nqueue_pairs = 1;
2109 else
2110 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
2111
2112 error = ixl_reinit(sc);
2113 if (error) {
2114 ixl_stop_locked(sc);
2115 return error;
2116 }
2117
2118 SET(ifp->if_flags, IFF_RUNNING);
2119 CLR(ifp->if_flags, IFF_OACTIVE);
2120
2121 (void)ixl_get_link_status(sc);
2122
2123 ixl_config_rss(sc);
2124 ixl_config_queue_intr(sc);
2125
2126 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2127 ixl_enable_queue_intr(sc, &sc->sc_qps[i]);
2128 }
2129
2130 error = ixl_iff(sc);
2131 if (error) {
2132 ixl_stop_locked(sc);
2133 return error;
2134 }
2135
2136 return 0;
2137 }
2138
2139 static int
2140 ixl_init(struct ifnet *ifp)
2141 {
2142 struct ixl_softc *sc = ifp->if_softc;
2143 int error;
2144
2145 mutex_enter(&sc->sc_cfg_lock);
2146 error = ixl_init_locked(sc);
2147 mutex_exit(&sc->sc_cfg_lock);
2148
2149 return error;
2150 }
2151
2152 static int
2153 ixl_iff(struct ixl_softc *sc)
2154 {
2155 struct ifnet *ifp = &sc->sc_ec.ec_if;
2156 struct ixl_atq iatq;
2157 struct ixl_aq_desc *iaq;
2158 struct ixl_aq_vsi_promisc_param *param;
2159 uint16_t flag_add, flag_del;
2160 int error;
2161
2162 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2163 return 0;
2164
2165 memset(&iatq, 0, sizeof(iatq));
2166
2167 iaq = &iatq.iatq_desc;
2168 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
2169
2170 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
2171 param->flags = htole16(0);
2172
2173 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)
2174 || ISSET(ifp->if_flags, IFF_PROMISC)) {
2175 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2176 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2177 }
2178
2179 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
2180 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2181 IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2182 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
2183 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2184 }
2185 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2186 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2187 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2188 param->seid = sc->sc_seid;
2189
2190 error = ixl_atq_exec(sc, &iatq);
2191 if (error)
2192 return error;
2193
2194 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
2195 return EIO;
2196
2197 if (memcmp(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN) != 0) {
2198 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
2199 flag_add = IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH;
2200 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH;
2201 } else {
2202 flag_add = IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN;
2203 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN;
2204 }
2205
2206 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, flag_del);
2207
2208 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
2209 ixl_add_macvlan(sc, sc->sc_enaddr, 0, flag_add);
2210 }
2211 return 0;
2212 }
2213
2214 static void
2215 ixl_stop_rendezvous(struct ixl_softc *sc)
2216 {
2217 struct ixl_tx_ring *txr;
2218 struct ixl_rx_ring *rxr;
2219 unsigned int i;
2220
2221 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2222 txr = sc->sc_qps[i].qp_txr;
2223 rxr = sc->sc_qps[i].qp_rxr;
2224
2225 mutex_enter(&txr->txr_lock);
2226 mutex_exit(&txr->txr_lock);
2227
2228 mutex_enter(&rxr->rxr_lock);
2229 mutex_exit(&rxr->rxr_lock);
2230
2231 sc->sc_qps[i].qp_workqueue = false;
2232 workqueue_wait(sc->sc_workq_txrx,
2233 &sc->sc_qps[i].qp_work);
2234 }
2235 }
2236
2237 static void
2238 ixl_stop_locked(struct ixl_softc *sc)
2239 {
2240 struct ifnet *ifp = &sc->sc_ec.ec_if;
2241 struct ixl_rx_ring *rxr;
2242 struct ixl_tx_ring *txr;
2243 unsigned int i;
2244 uint32_t reg;
2245
2246 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2247
2248 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
2249
2250 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2251 txr = sc->sc_qps[i].qp_txr;
2252 rxr = sc->sc_qps[i].qp_rxr;
2253
2254 ixl_disable_queue_intr(sc, &sc->sc_qps[i]);
2255
2256 mutex_enter(&txr->txr_lock);
2257 ixl_txr_qdis(sc, txr, 0);
2258 /* XXX wait at least 400 usec for all tx queues in one go */
2259 ixl_flush(sc);
2260 DELAY(500);
2261
2262 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2263 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2264 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2265 /* XXX wait 50ms from completaion of the TX queue disable*/
2266 ixl_flush(sc);
2267 DELAY(50);
2268
2269 if (ixl_txr_disabled(sc, txr) != 0) {
2270 mutex_exit(&txr->txr_lock);
2271 goto die;
2272 }
2273 mutex_exit(&txr->txr_lock);
2274
2275 mutex_enter(&rxr->rxr_lock);
2276 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2277 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2278 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2279 /* XXX wait 50ms from completion of the RX queue disable */
2280 ixl_flush(sc);
2281 DELAY(50);
2282
2283 if (ixl_rxr_disabled(sc, rxr) != 0) {
2284 mutex_exit(&rxr->rxr_lock);
2285 goto die;
2286 }
2287 mutex_exit(&rxr->rxr_lock);
2288 }
2289
2290 ixl_stop_rendezvous(sc);
2291
2292 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2293 txr = sc->sc_qps[i].qp_txr;
2294 rxr = sc->sc_qps[i].qp_rxr;
2295
2296 mutex_enter(&txr->txr_lock);
2297 ixl_txr_unconfig(sc, txr);
2298 mutex_exit(&txr->txr_lock);
2299
2300 mutex_enter(&rxr->rxr_lock);
2301 ixl_rxr_unconfig(sc, rxr);
2302 mutex_exit(&rxr->rxr_lock);
2303
2304 ixl_txr_clean(sc, txr);
2305 ixl_rxr_clean(sc, rxr);
2306 }
2307
2308 return;
2309 die:
2310 sc->sc_dead = true;
2311 log(LOG_CRIT, "%s: failed to shut down rings",
2312 device_xname(sc->sc_dev));
2313 return;
2314 }
2315
2316 static void
2317 ixl_stop(struct ifnet *ifp, int disable)
2318 {
2319 struct ixl_softc *sc = ifp->if_softc;
2320
2321 mutex_enter(&sc->sc_cfg_lock);
2322 ixl_stop_locked(sc);
2323 mutex_exit(&sc->sc_cfg_lock);
2324 }
2325
2326 static int
2327 ixl_queue_pairs_alloc(struct ixl_softc *sc)
2328 {
2329 struct ixl_queue_pair *qp;
2330 unsigned int i;
2331 size_t sz;
2332
2333 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2334 sc->sc_qps = kmem_zalloc(sz, KM_SLEEP);
2335
2336 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2337 qp = &sc->sc_qps[i];
2338
2339 qp->qp_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2340 ixl_handle_queue, qp);
2341 if (qp->qp_si == NULL)
2342 goto free;
2343
2344 qp->qp_txr = ixl_txr_alloc(sc, i);
2345 if (qp->qp_txr == NULL)
2346 goto free;
2347
2348 qp->qp_rxr = ixl_rxr_alloc(sc, i);
2349 if (qp->qp_rxr == NULL)
2350 goto free;
2351
2352 qp->qp_sc = sc;
2353 snprintf(qp->qp_name, sizeof(qp->qp_name),
2354 "%s-TXRX%d", device_xname(sc->sc_dev), i);
2355 }
2356
2357 return 0;
2358 free:
2359 if (sc->sc_qps != NULL) {
2360 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2361 qp = &sc->sc_qps[i];
2362
2363 if (qp->qp_txr != NULL)
2364 ixl_txr_free(sc, qp->qp_txr);
2365 if (qp->qp_rxr != NULL)
2366 ixl_rxr_free(sc, qp->qp_rxr);
2367 if (qp->qp_si != NULL)
2368 softint_disestablish(qp->qp_si);
2369 }
2370
2371 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2372 kmem_free(sc->sc_qps, sz);
2373 sc->sc_qps = NULL;
2374 }
2375
2376 return -1;
2377 }
2378
2379 static void
2380 ixl_queue_pairs_free(struct ixl_softc *sc)
2381 {
2382 struct ixl_queue_pair *qp;
2383 unsigned int i;
2384 size_t sz;
2385
2386 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2387 qp = &sc->sc_qps[i];
2388 ixl_txr_free(sc, qp->qp_txr);
2389 ixl_rxr_free(sc, qp->qp_rxr);
2390 softint_disestablish(qp->qp_si);
2391 }
2392
2393 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2394 kmem_free(sc->sc_qps, sz);
2395 sc->sc_qps = NULL;
2396 }
2397
2398 static struct ixl_tx_ring *
2399 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
2400 {
2401 struct ixl_tx_ring *txr = NULL;
2402 struct ixl_tx_map *maps = NULL, *txm;
2403 unsigned int i;
2404
2405 txr = kmem_zalloc(sizeof(*txr), KM_SLEEP);
2406 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs,
2407 KM_SLEEP);
2408
2409 if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2410 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2411 IXL_TX_QUEUE_ALIGN) != 0)
2412 goto free;
2413
2414 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2415 txm = &maps[i];
2416
2417 if (bus_dmamap_create(sc->sc_dmat, IXL_TX_PKT_MAXSIZE,
2418 IXL_TX_PKT_DESCS, IXL_TX_PKT_MAXSIZE, 0,
2419 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &txm->txm_map) != 0)
2420 goto uncreate;
2421
2422 txm->txm_eop = -1;
2423 txm->txm_m = NULL;
2424 }
2425
2426 txr->txr_cons = txr->txr_prod = 0;
2427 txr->txr_maps = maps;
2428
2429 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2430 if (txr->txr_intrq == NULL)
2431 goto uncreate;
2432
2433 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2434 ixl_deferred_transmit, txr);
2435 if (txr->txr_si == NULL)
2436 goto destroy_pcq;
2437
2438 txr->txr_tail = I40E_QTX_TAIL(qid);
2439 txr->txr_qid = qid;
2440 txr->txr_sc = sc;
2441 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2442
2443 return txr;
2444
2445 destroy_pcq:
2446 pcq_destroy(txr->txr_intrq);
2447 uncreate:
2448 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2449 txm = &maps[i];
2450
2451 if (txm->txm_map == NULL)
2452 continue;
2453
2454 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2455 }
2456
2457 ixl_dmamem_free(sc, &txr->txr_mem);
2458 free:
2459 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2460 kmem_free(txr, sizeof(*txr));
2461
2462 return NULL;
2463 }
2464
2465 static void
2466 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2467 {
2468 unsigned int qid;
2469 bus_size_t reg;
2470 uint32_t r;
2471
2472 qid = txr->txr_qid + sc->sc_base_queue;
2473 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2474 qid %= 128;
2475
2476 r = ixl_rd(sc, reg);
2477 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2478 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2479 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2480 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2481 ixl_wr(sc, reg, r);
2482 }
2483
2484 static void
2485 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2486 {
2487 struct ixl_hmc_txq txq;
2488 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch);
2489 void *hmc;
2490
2491 memset(&txq, 0, sizeof(txq));
2492 txq.head = htole16(txr->txr_cons);
2493 txq.new_context = 1;
2494 txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2495 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2496 txq.qlen = htole16(sc->sc_tx_ring_ndescs);
2497 txq.tphrdesc_ena = 0;
2498 txq.tphrpacket_ena = 0;
2499 txq.tphwdesc_ena = 0;
2500 txq.rdylist = data->qs_handle[0];
2501
2502 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2503 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2504 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq,
2505 __arraycount(ixl_hmc_pack_txq));
2506 }
2507
2508 static void
2509 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2510 {
2511 void *hmc;
2512
2513 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2514 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2515 txr->txr_cons = txr->txr_prod = 0;
2516 }
2517
2518 static void
2519 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2520 {
2521 struct ixl_tx_map *maps, *txm;
2522 bus_dmamap_t map;
2523 unsigned int i;
2524
2525 maps = txr->txr_maps;
2526 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2527 txm = &maps[i];
2528
2529 if (txm->txm_m == NULL)
2530 continue;
2531
2532 map = txm->txm_map;
2533 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2534 BUS_DMASYNC_POSTWRITE);
2535 bus_dmamap_unload(sc->sc_dmat, map);
2536
2537 m_freem(txm->txm_m);
2538 txm->txm_m = NULL;
2539 }
2540 }
2541
2542 static int
2543 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2544 {
2545 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2546 uint32_t reg;
2547 int i;
2548
2549 for (i = 0; i < 10; i++) {
2550 reg = ixl_rd(sc, ena);
2551 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2552 return 0;
2553
2554 delaymsec(10);
2555 }
2556
2557 return ETIMEDOUT;
2558 }
2559
2560 static int
2561 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2562 {
2563 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2564 uint32_t reg;
2565 int i;
2566
2567 KASSERT(mutex_owned(&txr->txr_lock));
2568
2569 for (i = 0; i < 20; i++) {
2570 reg = ixl_rd(sc, ena);
2571 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2572 return 0;
2573
2574 delaymsec(10);
2575 }
2576
2577 return ETIMEDOUT;
2578 }
2579
2580 static void
2581 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2582 {
2583 struct ixl_tx_map *maps, *txm;
2584 struct mbuf *m;
2585 unsigned int i;
2586
2587 softint_disestablish(txr->txr_si);
2588 while ((m = pcq_get(txr->txr_intrq)) != NULL)
2589 m_freem(m);
2590 pcq_destroy(txr->txr_intrq);
2591
2592 maps = txr->txr_maps;
2593 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2594 txm = &maps[i];
2595
2596 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2597 }
2598
2599 ixl_dmamem_free(sc, &txr->txr_mem);
2600 mutex_destroy(&txr->txr_lock);
2601 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2602 kmem_free(txr, sizeof(*txr));
2603 }
2604
2605 static inline int
2606 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0,
2607 struct ixl_tx_ring *txr)
2608 {
2609 struct mbuf *m;
2610 int error;
2611
2612 KASSERT(mutex_owned(&txr->txr_lock));
2613
2614 m = *m0;
2615
2616 error = bus_dmamap_load_mbuf(dmat, map, m,
2617 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2618 if (error != EFBIG)
2619 return error;
2620
2621 m = m_defrag(m, M_DONTWAIT);
2622 if (m != NULL) {
2623 *m0 = m;
2624 txr->txr_defragged.ev_count++;
2625
2626 error = bus_dmamap_load_mbuf(dmat, map, m,
2627 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2628 } else {
2629 txr->txr_defrag_failed.ev_count++;
2630 error = ENOBUFS;
2631 }
2632
2633 return error;
2634 }
2635
2636 static inline int
2637 ixl_tx_setup_offloads(struct mbuf *m, uint64_t *cmd_txd)
2638 {
2639 struct ether_header *eh;
2640 size_t len;
2641 uint64_t cmd;
2642
2643 cmd = 0;
2644
2645 eh = mtod(m, struct ether_header *);
2646 switch (htons(eh->ether_type)) {
2647 case ETHERTYPE_IP:
2648 case ETHERTYPE_IPV6:
2649 len = ETHER_HDR_LEN;
2650 break;
2651 case ETHERTYPE_VLAN:
2652 len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2653 break;
2654 default:
2655 len = 0;
2656 }
2657 cmd |= ((len >> 1) << IXL_TX_DESC_MACLEN_SHIFT);
2658
2659 if (m->m_pkthdr.csum_flags &
2660 (M_CSUM_TSOv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
2661 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4;
2662 }
2663 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2664 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4_CSUM;
2665 }
2666
2667 if (m->m_pkthdr.csum_flags &
2668 (M_CSUM_TSOv6 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
2669 cmd |= IXL_TX_DESC_CMD_IIPT_IPV6;
2670 }
2671
2672 switch (cmd & IXL_TX_DESC_CMD_IIPT_MASK) {
2673 case IXL_TX_DESC_CMD_IIPT_IPV4:
2674 case IXL_TX_DESC_CMD_IIPT_IPV4_CSUM:
2675 len = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2676 break;
2677 case IXL_TX_DESC_CMD_IIPT_IPV6:
2678 len = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
2679 break;
2680 default:
2681 len = 0;
2682 }
2683 cmd |= ((len >> 2) << IXL_TX_DESC_IPLEN_SHIFT);
2684
2685 if (m->m_pkthdr.csum_flags &
2686 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_TCPv4 | M_CSUM_TCPv6)) {
2687 len = sizeof(struct tcphdr);
2688 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_TCP;
2689 } else if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) {
2690 len = sizeof(struct udphdr);
2691 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_UDP;
2692 } else {
2693 len = 0;
2694 }
2695 cmd |= ((len >> 2) << IXL_TX_DESC_L4LEN_SHIFT);
2696
2697 *cmd_txd |= cmd;
2698 return 0;
2699 }
2700
2701 static void
2702 ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr,
2703 bool is_transmit)
2704 {
2705 struct ixl_softc *sc = ifp->if_softc;
2706 struct ixl_tx_desc *ring, *txd;
2707 struct ixl_tx_map *txm;
2708 bus_dmamap_t map;
2709 struct mbuf *m;
2710 uint64_t cmd, cmd_txd;
2711 unsigned int prod, free, last, i;
2712 unsigned int mask;
2713 int post = 0;
2714
2715 KASSERT(mutex_owned(&txr->txr_lock));
2716
2717 if (ifp->if_link_state != LINK_STATE_UP
2718 || !ISSET(ifp->if_flags, IFF_RUNNING)
2719 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) {
2720 if (!is_transmit)
2721 IFQ_PURGE(&ifp->if_snd);
2722 return;
2723 }
2724
2725 prod = txr->txr_prod;
2726 free = txr->txr_cons;
2727 if (free <= prod)
2728 free += sc->sc_tx_ring_ndescs;
2729 free -= prod;
2730
2731 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2732 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2733
2734 ring = IXL_DMA_KVA(&txr->txr_mem);
2735 mask = sc->sc_tx_ring_ndescs - 1;
2736 last = prod;
2737 cmd = 0;
2738 txd = NULL;
2739
2740 for (;;) {
2741 if (free <= IXL_TX_PKT_DESCS) {
2742 if (!is_transmit)
2743 SET(ifp->if_flags, IFF_OACTIVE);
2744 break;
2745 }
2746
2747 if (is_transmit)
2748 m = pcq_get(txr->txr_intrq);
2749 else
2750 IFQ_DEQUEUE(&ifp->if_snd, m);
2751
2752 if (m == NULL)
2753 break;
2754
2755 txm = &txr->txr_maps[prod];
2756 map = txm->txm_map;
2757
2758 if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
2759 if_statinc(ifp, if_oerrors);
2760 m_freem(m);
2761 continue;
2762 }
2763
2764 cmd_txd = 0;
2765 if (m->m_pkthdr.csum_flags & IXL_CSUM_ALL_OFFLOAD) {
2766 ixl_tx_setup_offloads(m, &cmd_txd);
2767 }
2768
2769 if (vlan_has_tag(m)) {
2770 cmd_txd |= (uint64_t)vlan_get_tag(m) <<
2771 IXL_TX_DESC_L2TAG1_SHIFT;
2772 cmd_txd |= IXL_TX_DESC_CMD_IL2TAG1;
2773 }
2774
2775 bus_dmamap_sync(sc->sc_dmat, map, 0,
2776 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2777
2778 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) {
2779 txd = &ring[prod];
2780
2781 cmd = (uint64_t)map->dm_segs[i].ds_len <<
2782 IXL_TX_DESC_BSIZE_SHIFT;
2783 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2784 cmd |= cmd_txd;
2785
2786 txd->addr = htole64(map->dm_segs[i].ds_addr);
2787 txd->cmd = htole64(cmd);
2788
2789 last = prod;
2790
2791 prod++;
2792 prod &= mask;
2793 }
2794 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2795 txd->cmd = htole64(cmd);
2796
2797 txm->txm_m = m;
2798 txm->txm_eop = last;
2799
2800 bpf_mtap(ifp, m, BPF_D_OUT);
2801
2802 free -= i;
2803 post = 1;
2804 }
2805
2806 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2807 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2808
2809 if (post) {
2810 txr->txr_prod = prod;
2811 ixl_wr(sc, txr->txr_tail, prod);
2812 }
2813 }
2814
2815 static int
2816 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit)
2817 {
2818 struct ifnet *ifp = &sc->sc_ec.ec_if;
2819 struct ixl_tx_desc *ring, *txd;
2820 struct ixl_tx_map *txm;
2821 struct mbuf *m;
2822 bus_dmamap_t map;
2823 unsigned int cons, prod, last;
2824 unsigned int mask;
2825 uint64_t dtype;
2826 int done = 0, more = 0;
2827
2828 KASSERT(mutex_owned(&txr->txr_lock));
2829
2830 prod = txr->txr_prod;
2831 cons = txr->txr_cons;
2832
2833 if (cons == prod)
2834 return 0;
2835
2836 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2837 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2838
2839 ring = IXL_DMA_KVA(&txr->txr_mem);
2840 mask = sc->sc_tx_ring_ndescs - 1;
2841
2842 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2843
2844 do {
2845 if (txlimit-- <= 0) {
2846 more = 1;
2847 break;
2848 }
2849
2850 txm = &txr->txr_maps[cons];
2851 last = txm->txm_eop;
2852 txd = &ring[last];
2853
2854 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2855 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2856 break;
2857
2858 map = txm->txm_map;
2859
2860 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2861 BUS_DMASYNC_POSTWRITE);
2862 bus_dmamap_unload(sc->sc_dmat, map);
2863
2864 m = txm->txm_m;
2865 if (m != NULL) {
2866 if_statinc_ref(nsr, if_opackets);
2867 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
2868 if (ISSET(m->m_flags, M_MCAST))
2869 if_statinc_ref(nsr, if_omcasts);
2870 m_freem(m);
2871 }
2872
2873 txm->txm_m = NULL;
2874 txm->txm_eop = -1;
2875
2876 cons = last + 1;
2877 cons &= mask;
2878 done = 1;
2879 } while (cons != prod);
2880
2881 IF_STAT_PUTREF(ifp);
2882
2883 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2884 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2885
2886 txr->txr_cons = cons;
2887
2888 if (done) {
2889 softint_schedule(txr->txr_si);
2890 if (txr->txr_qid == 0) {
2891 CLR(ifp->if_flags, IFF_OACTIVE);
2892 if_schedule_deferred_start(ifp);
2893 }
2894 }
2895
2896 return more;
2897 }
2898
2899 static void
2900 ixl_start(struct ifnet *ifp)
2901 {
2902 struct ixl_softc *sc;
2903 struct ixl_tx_ring *txr;
2904
2905 sc = ifp->if_softc;
2906 txr = sc->sc_qps[0].qp_txr;
2907
2908 mutex_enter(&txr->txr_lock);
2909 ixl_tx_common_locked(ifp, txr, false);
2910 mutex_exit(&txr->txr_lock);
2911 }
2912
2913 static inline unsigned int
2914 ixl_select_txqueue(struct ixl_softc *sc, struct mbuf *m)
2915 {
2916 u_int cpuid;
2917
2918 cpuid = cpu_index(curcpu());
2919
2920 return (unsigned int)(cpuid % sc->sc_nqueue_pairs);
2921 }
2922
2923 static int
2924 ixl_transmit(struct ifnet *ifp, struct mbuf *m)
2925 {
2926 struct ixl_softc *sc;
2927 struct ixl_tx_ring *txr;
2928 unsigned int qid;
2929
2930 sc = ifp->if_softc;
2931 qid = ixl_select_txqueue(sc, m);
2932
2933 txr = sc->sc_qps[qid].qp_txr;
2934
2935 if (__predict_false(!pcq_put(txr->txr_intrq, m))) {
2936 mutex_enter(&txr->txr_lock);
2937 txr->txr_pcqdrop.ev_count++;
2938 mutex_exit(&txr->txr_lock);
2939
2940 m_freem(m);
2941 return ENOBUFS;
2942 }
2943
2944 if (mutex_tryenter(&txr->txr_lock)) {
2945 ixl_tx_common_locked(ifp, txr, true);
2946 mutex_exit(&txr->txr_lock);
2947 } else {
2948 kpreempt_disable();
2949 softint_schedule(txr->txr_si);
2950 kpreempt_enable();
2951 }
2952
2953 return 0;
2954 }
2955
2956 static void
2957 ixl_deferred_transmit(void *xtxr)
2958 {
2959 struct ixl_tx_ring *txr = xtxr;
2960 struct ixl_softc *sc = txr->txr_sc;
2961 struct ifnet *ifp = &sc->sc_ec.ec_if;
2962
2963 mutex_enter(&txr->txr_lock);
2964 txr->txr_transmitdef.ev_count++;
2965 if (pcq_peek(txr->txr_intrq) != NULL)
2966 ixl_tx_common_locked(ifp, txr, true);
2967 mutex_exit(&txr->txr_lock);
2968 }
2969
2970 static struct ixl_rx_ring *
2971 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
2972 {
2973 struct ixl_rx_ring *rxr = NULL;
2974 struct ixl_rx_map *maps = NULL, *rxm;
2975 unsigned int i;
2976
2977 rxr = kmem_zalloc(sizeof(*rxr), KM_SLEEP);
2978 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs,
2979 KM_SLEEP);
2980
2981 if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
2982 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
2983 IXL_RX_QUEUE_ALIGN) != 0)
2984 goto free;
2985
2986 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2987 rxm = &maps[i];
2988
2989 if (bus_dmamap_create(sc->sc_dmat,
2990 IXL_MCLBYTES, 1, IXL_MCLBYTES, 0,
2991 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &rxm->rxm_map) != 0)
2992 goto uncreate;
2993
2994 rxm->rxm_m = NULL;
2995 }
2996
2997 rxr->rxr_cons = rxr->rxr_prod = 0;
2998 rxr->rxr_m_head = NULL;
2999 rxr->rxr_m_tail = &rxr->rxr_m_head;
3000 rxr->rxr_maps = maps;
3001
3002 rxr->rxr_tail = I40E_QRX_TAIL(qid);
3003 rxr->rxr_qid = qid;
3004 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
3005
3006 return rxr;
3007
3008 uncreate:
3009 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3010 rxm = &maps[i];
3011
3012 if (rxm->rxm_map == NULL)
3013 continue;
3014
3015 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3016 }
3017
3018 ixl_dmamem_free(sc, &rxr->rxr_mem);
3019 free:
3020 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
3021 kmem_free(rxr, sizeof(*rxr));
3022
3023 return NULL;
3024 }
3025
3026 static void
3027 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3028 {
3029 struct ixl_rx_map *maps, *rxm;
3030 bus_dmamap_t map;
3031 unsigned int i;
3032
3033 maps = rxr->rxr_maps;
3034 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3035 rxm = &maps[i];
3036
3037 if (rxm->rxm_m == NULL)
3038 continue;
3039
3040 map = rxm->rxm_map;
3041 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3042 BUS_DMASYNC_POSTWRITE);
3043 bus_dmamap_unload(sc->sc_dmat, map);
3044
3045 m_freem(rxm->rxm_m);
3046 rxm->rxm_m = NULL;
3047 }
3048
3049 m_freem(rxr->rxr_m_head);
3050 rxr->rxr_m_head = NULL;
3051 rxr->rxr_m_tail = &rxr->rxr_m_head;
3052
3053 rxr->rxr_prod = rxr->rxr_cons = 0;
3054 }
3055
3056 static int
3057 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3058 {
3059 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3060 uint32_t reg;
3061 int i;
3062
3063 for (i = 0; i < 10; i++) {
3064 reg = ixl_rd(sc, ena);
3065 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
3066 return 0;
3067
3068 delaymsec(10);
3069 }
3070
3071 return ETIMEDOUT;
3072 }
3073
3074 static int
3075 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3076 {
3077 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3078 uint32_t reg;
3079 int i;
3080
3081 KASSERT(mutex_owned(&rxr->rxr_lock));
3082
3083 for (i = 0; i < 20; i++) {
3084 reg = ixl_rd(sc, ena);
3085 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3086 return 0;
3087
3088 delaymsec(10);
3089 }
3090
3091 return ETIMEDOUT;
3092 }
3093
3094 static void
3095 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3096 {
3097 struct ixl_hmc_rxq rxq;
3098 struct ifnet *ifp = &sc->sc_ec.ec_if;
3099 uint16_t rxmax;
3100 void *hmc;
3101
3102 memset(&rxq, 0, sizeof(rxq));
3103 rxmax = ifp->if_mtu + IXL_MTU_ETHERLEN;
3104
3105 rxq.head = htole16(rxr->rxr_cons);
3106 rxq.base = htole64(IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
3107 rxq.qlen = htole16(sc->sc_rx_ring_ndescs);
3108 rxq.dbuff = htole16(IXL_MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
3109 rxq.hbuff = 0;
3110 rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
3111 rxq.dsize = IXL_HMC_RXQ_DSIZE_32;
3112 rxq.crcstrip = 1;
3113 rxq.l2sel = 1;
3114 rxq.showiv = 1;
3115 rxq.rxmax = htole16(rxmax);
3116 rxq.tphrdesc_ena = 0;
3117 rxq.tphwdesc_ena = 0;
3118 rxq.tphdata_ena = 0;
3119 rxq.tphhead_ena = 0;
3120 rxq.lrxqthresh = 0;
3121 rxq.prefena = 1;
3122
3123 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3124 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3125 ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq,
3126 __arraycount(ixl_hmc_pack_rxq));
3127 }
3128
3129 static void
3130 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3131 {
3132 void *hmc;
3133
3134 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3135 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3136 rxr->rxr_cons = rxr->rxr_prod = 0;
3137 }
3138
3139 static void
3140 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3141 {
3142 struct ixl_rx_map *maps, *rxm;
3143 unsigned int i;
3144
3145 maps = rxr->rxr_maps;
3146 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3147 rxm = &maps[i];
3148
3149 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3150 }
3151
3152 ixl_dmamem_free(sc, &rxr->rxr_mem);
3153 mutex_destroy(&rxr->rxr_lock);
3154 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
3155 kmem_free(rxr, sizeof(*rxr));
3156 }
3157
3158 static inline void
3159 ixl_rx_csum(struct mbuf *m, uint64_t qword)
3160 {
3161 int flags_mask;
3162
3163 if (!ISSET(qword, IXL_RX_DESC_L3L4P)) {
3164 /* No L3 or L4 checksum was calculated */
3165 return;
3166 }
3167
3168 switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) {
3169 case IXL_RX_DESC_PTYPE_IPV4FRAG:
3170 case IXL_RX_DESC_PTYPE_IPV4:
3171 case IXL_RX_DESC_PTYPE_SCTPV4:
3172 case IXL_RX_DESC_PTYPE_ICMPV4:
3173 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3174 break;
3175 case IXL_RX_DESC_PTYPE_TCPV4:
3176 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3177 flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD;
3178 break;
3179 case IXL_RX_DESC_PTYPE_UDPV4:
3180 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3181 flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD;
3182 break;
3183 case IXL_RX_DESC_PTYPE_TCPV6:
3184 flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD;
3185 break;
3186 case IXL_RX_DESC_PTYPE_UDPV6:
3187 flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD;
3188 break;
3189 default:
3190 flags_mask = 0;
3191 }
3192
3193 m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 |
3194 M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6));
3195
3196 if (ISSET(qword, IXL_RX_DESC_IPE)) {
3197 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD);
3198 }
3199
3200 if (ISSET(qword, IXL_RX_DESC_L4E)) {
3201 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD);
3202 }
3203 }
3204
3205 static int
3206 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr, u_int rxlimit)
3207 {
3208 struct ifnet *ifp = &sc->sc_ec.ec_if;
3209 struct ixl_rx_wb_desc_32 *ring, *rxd;
3210 struct ixl_rx_map *rxm;
3211 bus_dmamap_t map;
3212 unsigned int cons, prod;
3213 struct mbuf *m;
3214 uint64_t word, word0;
3215 unsigned int len;
3216 unsigned int mask;
3217 int done = 0, more = 0;
3218
3219 KASSERT(mutex_owned(&rxr->rxr_lock));
3220
3221 if (!ISSET(ifp->if_flags, IFF_RUNNING))
3222 return 0;
3223
3224 prod = rxr->rxr_prod;
3225 cons = rxr->rxr_cons;
3226
3227 if (cons == prod)
3228 return 0;
3229
3230 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3231 0, IXL_DMA_LEN(&rxr->rxr_mem),
3232 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3233
3234 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3235 mask = sc->sc_rx_ring_ndescs - 1;
3236
3237 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
3238
3239 do {
3240 if (rxlimit-- <= 0) {
3241 more = 1;
3242 break;
3243 }
3244
3245 rxd = &ring[cons];
3246
3247 word = le64toh(rxd->qword1);
3248
3249 if (!ISSET(word, IXL_RX_DESC_DD))
3250 break;
3251
3252 rxm = &rxr->rxr_maps[cons];
3253
3254 map = rxm->rxm_map;
3255 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3256 BUS_DMASYNC_POSTREAD);
3257 bus_dmamap_unload(sc->sc_dmat, map);
3258
3259 m = rxm->rxm_m;
3260 rxm->rxm_m = NULL;
3261
3262 KASSERT(m != NULL);
3263
3264 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
3265 m->m_len = len;
3266 m->m_pkthdr.len = 0;
3267
3268 m->m_next = NULL;
3269 *rxr->rxr_m_tail = m;
3270 rxr->rxr_m_tail = &m->m_next;
3271
3272 m = rxr->rxr_m_head;
3273 m->m_pkthdr.len += len;
3274
3275 if (ISSET(word, IXL_RX_DESC_EOP)) {
3276 word0 = le64toh(rxd->qword0);
3277
3278 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
3279 vlan_set_tag(m,
3280 __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK));
3281 }
3282
3283 if ((ifp->if_capenable & IXL_IFCAP_RXCSUM) != 0)
3284 ixl_rx_csum(m, word);
3285
3286 if (!ISSET(word,
3287 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
3288 m_set_rcvif(m, ifp);
3289 if_statinc_ref(nsr, if_ipackets);
3290 if_statadd_ref(nsr, if_ibytes,
3291 m->m_pkthdr.len);
3292 if_percpuq_enqueue(ifp->if_percpuq, m);
3293 } else {
3294 if_statinc_ref(nsr, if_ierrors);
3295 m_freem(m);
3296 }
3297
3298 rxr->rxr_m_head = NULL;
3299 rxr->rxr_m_tail = &rxr->rxr_m_head;
3300 }
3301
3302 cons++;
3303 cons &= mask;
3304
3305 done = 1;
3306 } while (cons != prod);
3307
3308 if (done) {
3309 rxr->rxr_cons = cons;
3310 if (ixl_rxfill(sc, rxr) == -1)
3311 if_statinc_ref(nsr, if_iqdrops);
3312 }
3313
3314 IF_STAT_PUTREF(ifp);
3315
3316 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3317 0, IXL_DMA_LEN(&rxr->rxr_mem),
3318 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3319
3320 return more;
3321 }
3322
3323 static int
3324 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3325 {
3326 struct ixl_rx_rd_desc_32 *ring, *rxd;
3327 struct ixl_rx_map *rxm;
3328 bus_dmamap_t map;
3329 struct mbuf *m;
3330 unsigned int prod;
3331 unsigned int slots;
3332 unsigned int mask;
3333 int post = 0, error = 0;
3334
3335 KASSERT(mutex_owned(&rxr->rxr_lock));
3336
3337 prod = rxr->rxr_prod;
3338 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons,
3339 sc->sc_rx_ring_ndescs);
3340
3341 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3342 mask = sc->sc_rx_ring_ndescs - 1;
3343
3344 if (__predict_false(slots <= 0))
3345 return -1;
3346
3347 do {
3348 rxm = &rxr->rxr_maps[prod];
3349
3350 MGETHDR(m, M_DONTWAIT, MT_DATA);
3351 if (m == NULL) {
3352 rxr->rxr_mgethdr_failed.ev_count++;
3353 error = -1;
3354 break;
3355 }
3356
3357 MCLGET(m, M_DONTWAIT);
3358 if (!ISSET(m->m_flags, M_EXT)) {
3359 rxr->rxr_mgetcl_failed.ev_count++;
3360 error = -1;
3361 m_freem(m);
3362 break;
3363 }
3364
3365 m->m_len = m->m_pkthdr.len = MCLBYTES;
3366 m_adj(m, ETHER_ALIGN);
3367
3368 map = rxm->rxm_map;
3369
3370 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
3371 BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
3372 rxr->rxr_mbuf_load_failed.ev_count++;
3373 error = -1;
3374 m_freem(m);
3375 break;
3376 }
3377
3378 rxm->rxm_m = m;
3379
3380 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3381 BUS_DMASYNC_PREREAD);
3382
3383 rxd = &ring[prod];
3384
3385 rxd->paddr = htole64(map->dm_segs[0].ds_addr);
3386 rxd->haddr = htole64(0);
3387
3388 prod++;
3389 prod &= mask;
3390
3391 post = 1;
3392
3393 } while (--slots);
3394
3395 if (post) {
3396 rxr->rxr_prod = prod;
3397 ixl_wr(sc, rxr->rxr_tail, prod);
3398 }
3399
3400 return error;
3401 }
3402
3403 static inline int
3404 ixl_handle_queue_common(struct ixl_softc *sc, struct ixl_queue_pair *qp,
3405 u_int txlimit, struct evcnt *txevcnt,
3406 u_int rxlimit, struct evcnt *rxevcnt)
3407 {
3408 struct ixl_tx_ring *txr = qp->qp_txr;
3409 struct ixl_rx_ring *rxr = qp->qp_rxr;
3410 int txmore, rxmore;
3411 int rv;
3412
3413 mutex_enter(&txr->txr_lock);
3414 txevcnt->ev_count++;
3415 txmore = ixl_txeof(sc, txr, txlimit);
3416 mutex_exit(&txr->txr_lock);
3417
3418 mutex_enter(&rxr->rxr_lock);
3419 rxevcnt->ev_count++;
3420 rxmore = ixl_rxeof(sc, rxr, rxlimit);
3421 mutex_exit(&rxr->rxr_lock);
3422
3423 rv = txmore | (rxmore << 1);
3424
3425 return rv;
3426 }
3427
3428 static void
3429 ixl_sched_handle_queue(struct ixl_softc *sc, struct ixl_queue_pair *qp)
3430 {
3431
3432 if (qp->qp_workqueue)
3433 workqueue_enqueue(sc->sc_workq_txrx, &qp->qp_work, NULL);
3434 else
3435 softint_schedule(qp->qp_si);
3436 }
3437
3438 static int
3439 ixl_intr(void *xsc)
3440 {
3441 struct ixl_softc *sc = xsc;
3442 struct ixl_tx_ring *txr;
3443 struct ixl_rx_ring *rxr;
3444 uint32_t icr, rxintr, txintr;
3445 int rv = 0;
3446 unsigned int i;
3447
3448 KASSERT(sc != NULL);
3449
3450 ixl_enable_other_intr(sc);
3451 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3452
3453 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3454 atomic_inc_64(&sc->sc_event_atq.ev_count);
3455 ixl_atq_done(sc);
3456 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3457 rv = 1;
3458 }
3459
3460 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3461 atomic_inc_64(&sc->sc_event_link.ev_count);
3462 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3463 rv = 1;
3464 }
3465
3466 rxintr = icr & I40E_INTR_NOTX_RX_MASK;
3467 txintr = icr & I40E_INTR_NOTX_TX_MASK;
3468
3469 if (txintr || rxintr) {
3470 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3471 txr = sc->sc_qps[i].qp_txr;
3472 rxr = sc->sc_qps[i].qp_rxr;
3473
3474 ixl_handle_queue_common(sc, &sc->sc_qps[i],
3475 IXL_TXRX_PROCESS_UNLIMIT, &txr->txr_intr,
3476 IXL_TXRX_PROCESS_UNLIMIT, &rxr->rxr_intr);
3477 }
3478 rv = 1;
3479 }
3480
3481 return rv;
3482 }
3483
3484 static int
3485 ixl_queue_intr(void *xqp)
3486 {
3487 struct ixl_queue_pair *qp = xqp;
3488 struct ixl_tx_ring *txr = qp->qp_txr;
3489 struct ixl_rx_ring *rxr = qp->qp_rxr;
3490 struct ixl_softc *sc = qp->qp_sc;
3491 u_int txlimit, rxlimit;
3492 int more;
3493
3494 txlimit = sc->sc_tx_intr_process_limit;
3495 rxlimit = sc->sc_rx_intr_process_limit;
3496 qp->qp_workqueue = sc->sc_txrx_workqueue;
3497
3498 more = ixl_handle_queue_common(sc, qp,
3499 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr);
3500
3501 if (more != 0) {
3502 ixl_sched_handle_queue(sc, qp);
3503 } else {
3504 /* for ALTQ */
3505 if (txr->txr_qid == 0)
3506 if_schedule_deferred_start(&sc->sc_ec.ec_if);
3507 softint_schedule(txr->txr_si);
3508
3509 ixl_enable_queue_intr(sc, qp);
3510 }
3511
3512 return 1;
3513 }
3514
3515 static void
3516 ixl_handle_queue_wk(struct work *wk, void *xsc)
3517 {
3518 struct ixl_queue_pair *qp;
3519
3520 qp = container_of(wk, struct ixl_queue_pair, qp_work);
3521 ixl_handle_queue(qp);
3522 }
3523
3524 static void
3525 ixl_handle_queue(void *xqp)
3526 {
3527 struct ixl_queue_pair *qp = xqp;
3528 struct ixl_softc *sc = qp->qp_sc;
3529 struct ixl_tx_ring *txr = qp->qp_txr;
3530 struct ixl_rx_ring *rxr = qp->qp_rxr;
3531 u_int txlimit, rxlimit;
3532 int more;
3533
3534 txlimit = sc->sc_tx_process_limit;
3535 rxlimit = sc->sc_rx_process_limit;
3536
3537 more = ixl_handle_queue_common(sc, qp,
3538 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer);
3539
3540 if (more != 0)
3541 ixl_sched_handle_queue(sc, qp);
3542 else
3543 ixl_enable_queue_intr(sc, qp);
3544 }
3545
3546 static inline void
3547 ixl_print_hmc_error(struct ixl_softc *sc, uint32_t reg)
3548 {
3549 uint32_t hmc_idx, hmc_isvf;
3550 uint32_t hmc_errtype, hmc_objtype, hmc_data;
3551
3552 hmc_idx = reg & I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK;
3553 hmc_idx = hmc_idx >> I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT;
3554 hmc_isvf = reg & I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK;
3555 hmc_isvf = hmc_isvf >> I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT;
3556 hmc_errtype = reg & I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK;
3557 hmc_errtype = hmc_errtype >> I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT;
3558 hmc_objtype = reg & I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK;
3559 hmc_objtype = hmc_objtype >> I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT;
3560 hmc_data = ixl_rd(sc, I40E_PFHMC_ERRORDATA);
3561
3562 device_printf(sc->sc_dev,
3563 "HMC Error (idx=0x%x, isvf=0x%x, err=0x%x, obj=0x%x, data=0x%x)\n",
3564 hmc_idx, hmc_isvf, hmc_errtype, hmc_objtype, hmc_data);
3565 }
3566
3567 static int
3568 ixl_other_intr(void *xsc)
3569 {
3570 struct ixl_softc *sc = xsc;
3571 uint32_t icr, mask, reg;
3572 int rv;
3573
3574 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3575 mask = ixl_rd(sc, I40E_PFINT_ICR0_ENA);
3576
3577 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3578 atomic_inc_64(&sc->sc_event_atq.ev_count);
3579 ixl_atq_done(sc);
3580 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3581 rv = 1;
3582 }
3583
3584 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3585 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3586 device_printf(sc->sc_dev, "link stat changed\n");
3587
3588 atomic_inc_64(&sc->sc_event_link.ev_count);
3589 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3590 rv = 1;
3591 }
3592
3593 if (ISSET(icr, I40E_PFINT_ICR0_GRST_MASK)) {
3594 CLR(mask, I40E_PFINT_ICR0_ENA_GRST_MASK);
3595 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
3596 reg = reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK;
3597 reg = reg >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3598
3599 device_printf(sc->sc_dev, "GRST: %s\n",
3600 reg == I40E_RESET_CORER ? "CORER" :
3601 reg == I40E_RESET_GLOBR ? "GLOBR" :
3602 reg == I40E_RESET_EMPR ? "EMPR" :
3603 "POR");
3604 }
3605
3606 if (ISSET(icr, I40E_PFINT_ICR0_ECC_ERR_MASK))
3607 atomic_inc_64(&sc->sc_event_ecc_err.ev_count);
3608 if (ISSET(icr, I40E_PFINT_ICR0_PCI_EXCEPTION_MASK))
3609 atomic_inc_64(&sc->sc_event_pci_exception.ev_count);
3610 if (ISSET(icr, I40E_PFINT_ICR0_PE_CRITERR_MASK))
3611 atomic_inc_64(&sc->sc_event_crit_err.ev_count);
3612
3613 if (ISSET(icr, IXL_ICR0_CRIT_ERR_MASK)) {
3614 CLR(mask, IXL_ICR0_CRIT_ERR_MASK);
3615 device_printf(sc->sc_dev, "critical error\n");
3616 }
3617
3618 if (ISSET(icr, I40E_PFINT_ICR0_HMC_ERR_MASK)) {
3619 reg = ixl_rd(sc, I40E_PFHMC_ERRORINFO);
3620 if (ISSET(reg, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK))
3621 ixl_print_hmc_error(sc, reg);
3622 ixl_wr(sc, I40E_PFHMC_ERRORINFO, 0);
3623 }
3624
3625 ixl_wr(sc, I40E_PFINT_ICR0_ENA, mask);
3626 ixl_flush(sc);
3627 ixl_enable_other_intr(sc);
3628 return rv;
3629 }
3630
3631 static void
3632 ixl_get_link_status_done(struct ixl_softc *sc,
3633 const struct ixl_aq_desc *iaq)
3634 {
3635
3636 ixl_link_state_update(sc, iaq);
3637 }
3638
3639 static void
3640 ixl_get_link_status(void *xsc)
3641 {
3642 struct ixl_softc *sc = xsc;
3643 struct ixl_aq_desc *iaq;
3644 struct ixl_aq_link_param *param;
3645
3646 memset(&sc->sc_link_state_atq, 0, sizeof(sc->sc_link_state_atq));
3647 iaq = &sc->sc_link_state_atq.iatq_desc;
3648 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3649 param = (struct ixl_aq_link_param *)iaq->iaq_param;
3650 param->notify = IXL_AQ_LINK_NOTIFY;
3651
3652 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done);
3653 (void)ixl_atq_post(sc, &sc->sc_link_state_atq);
3654 }
3655
3656 static void
3657 ixl_link_state_update(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3658 {
3659 struct ifnet *ifp = &sc->sc_ec.ec_if;
3660 int link_state;
3661
3662 KASSERT(kpreempt_disabled());
3663
3664 link_state = ixl_set_link_status(sc, iaq);
3665
3666 if (ifp->if_link_state != link_state)
3667 if_link_state_change(ifp, link_state);
3668
3669 if (link_state != LINK_STATE_DOWN) {
3670 if_schedule_deferred_start(ifp);
3671 }
3672 }
3673
3674 static void
3675 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq,
3676 const char *msg)
3677 {
3678 char buf[512];
3679 size_t len;
3680
3681 len = sizeof(buf);
3682 buf[--len] = '\0';
3683
3684 device_printf(sc->sc_dev, "%s\n", msg);
3685 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags));
3686 device_printf(sc->sc_dev, "flags %s opcode %04x\n",
3687 buf, le16toh(iaq->iaq_opcode));
3688 device_printf(sc->sc_dev, "datalen %u retval %u\n",
3689 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval));
3690 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie);
3691 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n",
3692 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]),
3693 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3]));
3694 }
3695
3696 static void
3697 ixl_arq(void *xsc)
3698 {
3699 struct ixl_softc *sc = xsc;
3700 struct ixl_aq_desc *arq, *iaq;
3701 struct ixl_aq_buf *aqb;
3702 unsigned int cons = sc->sc_arq_cons;
3703 unsigned int prod;
3704 int done = 0;
3705
3706 prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
3707 sc->sc_aq_regs->arq_head_mask;
3708
3709 if (cons == prod)
3710 goto done;
3711
3712 arq = IXL_DMA_KVA(&sc->sc_arq);
3713
3714 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3715 0, IXL_DMA_LEN(&sc->sc_arq),
3716 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3717
3718 do {
3719 iaq = &arq[cons];
3720 aqb = sc->sc_arq_live[cons];
3721
3722 KASSERT(aqb != NULL);
3723
3724 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3725 BUS_DMASYNC_POSTREAD);
3726
3727 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3728 ixl_aq_dump(sc, iaq, "arq event");
3729
3730 switch (iaq->iaq_opcode) {
3731 case htole16(IXL_AQ_OP_PHY_LINK_STATUS):
3732 kpreempt_disable();
3733 ixl_link_state_update(sc, iaq);
3734 kpreempt_enable();
3735 break;
3736 }
3737
3738 memset(iaq, 0, sizeof(*iaq));
3739 sc->sc_arq_live[cons] = NULL;
3740 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
3741
3742 cons++;
3743 cons &= IXL_AQ_MASK;
3744
3745 done = 1;
3746 } while (cons != prod);
3747
3748 if (done) {
3749 sc->sc_arq_cons = cons;
3750 ixl_arq_fill(sc);
3751 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3752 0, IXL_DMA_LEN(&sc->sc_arq),
3753 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3754 }
3755
3756 done:
3757 ixl_enable_other_intr(sc);
3758 }
3759
3760 static void
3761 ixl_atq_set(struct ixl_atq *iatq,
3762 void (*fn)(struct ixl_softc *, const struct ixl_aq_desc *))
3763 {
3764
3765 iatq->iatq_fn = fn;
3766 }
3767
3768 static int
3769 ixl_atq_post_locked(struct ixl_softc *sc, struct ixl_atq *iatq)
3770 {
3771 struct ixl_aq_desc *atq, *slot;
3772 unsigned int prod, cons, prod_next;
3773
3774 /* assert locked */
3775 KASSERT(mutex_owned(&sc->sc_atq_lock));
3776
3777 atq = IXL_DMA_KVA(&sc->sc_atq);
3778 prod = sc->sc_atq_prod;
3779 cons = sc->sc_atq_cons;
3780 prod_next = (prod +1) & IXL_AQ_MASK;
3781
3782 if (cons == prod_next)
3783 return ENOMEM;
3784
3785 slot = &atq[prod];
3786
3787 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3788 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3789
3790 *slot = iatq->iatq_desc;
3791 slot->iaq_cookie = (uint64_t)((intptr_t)iatq);
3792
3793 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3794 ixl_aq_dump(sc, slot, "atq command");
3795
3796 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3797 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3798
3799 sc->sc_atq_prod = prod_next;
3800 ixl_wr(sc, sc->sc_aq_regs->atq_tail, sc->sc_atq_prod);
3801
3802 return 0;
3803 }
3804
3805 static int
3806 ixl_atq_post(struct ixl_softc *sc, struct ixl_atq *iatq)
3807 {
3808 int rv;
3809
3810 mutex_enter(&sc->sc_atq_lock);
3811 rv = ixl_atq_post_locked(sc, iatq);
3812 mutex_exit(&sc->sc_atq_lock);
3813
3814 return rv;
3815 }
3816
3817 static void
3818 ixl_atq_done_locked(struct ixl_softc *sc)
3819 {
3820 struct ixl_aq_desc *atq, *slot;
3821 struct ixl_atq *iatq;
3822 unsigned int cons;
3823 unsigned int prod;
3824
3825 KASSERT(mutex_owned(&sc->sc_atq_lock));
3826
3827 prod = sc->sc_atq_prod;
3828 cons = sc->sc_atq_cons;
3829
3830 if (prod == cons)
3831 return;
3832
3833 atq = IXL_DMA_KVA(&sc->sc_atq);
3834
3835 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3836 0, IXL_DMA_LEN(&sc->sc_atq),
3837 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3838
3839 do {
3840 slot = &atq[cons];
3841 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
3842 break;
3843
3844 iatq = (struct ixl_atq *)((intptr_t)slot->iaq_cookie);
3845 iatq->iatq_desc = *slot;
3846
3847 memset(slot, 0, sizeof(*slot));
3848
3849 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3850 ixl_aq_dump(sc, &iatq->iatq_desc, "atq response");
3851
3852 (*iatq->iatq_fn)(sc, &iatq->iatq_desc);
3853
3854 cons++;
3855 cons &= IXL_AQ_MASK;
3856 } while (cons != prod);
3857
3858 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3859 0, IXL_DMA_LEN(&sc->sc_atq),
3860 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3861
3862 sc->sc_atq_cons = cons;
3863 }
3864
3865 static void
3866 ixl_atq_done(struct ixl_softc *sc)
3867 {
3868
3869 mutex_enter(&sc->sc_atq_lock);
3870 ixl_atq_done_locked(sc);
3871 mutex_exit(&sc->sc_atq_lock);
3872 }
3873
3874 static void
3875 ixl_wakeup(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3876 {
3877
3878 KASSERT(mutex_owned(&sc->sc_atq_lock));
3879
3880 cv_signal(&sc->sc_atq_cv);
3881 }
3882
3883 static int
3884 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq)
3885 {
3886 int error;
3887
3888 KASSERT(iatq->iatq_desc.iaq_cookie == 0);
3889
3890 ixl_atq_set(iatq, ixl_wakeup);
3891
3892 mutex_enter(&sc->sc_atq_lock);
3893 error = ixl_atq_post_locked(sc, iatq);
3894 if (error) {
3895 mutex_exit(&sc->sc_atq_lock);
3896 return error;
3897 }
3898
3899 error = cv_timedwait(&sc->sc_atq_cv, &sc->sc_atq_lock,
3900 IXL_ATQ_EXEC_TIMEOUT);
3901 mutex_exit(&sc->sc_atq_lock);
3902
3903 return error;
3904 }
3905
3906 static int
3907 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
3908 {
3909 struct ixl_aq_desc *atq, *slot;
3910 unsigned int prod;
3911 unsigned int t = 0;
3912
3913 mutex_enter(&sc->sc_atq_lock);
3914
3915 atq = IXL_DMA_KVA(&sc->sc_atq);
3916 prod = sc->sc_atq_prod;
3917 slot = atq + prod;
3918
3919 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3920 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3921
3922 *slot = *iaq;
3923 slot->iaq_flags |= htole16(IXL_AQ_SI);
3924
3925 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3926 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3927
3928 prod++;
3929 prod &= IXL_AQ_MASK;
3930 sc->sc_atq_prod = prod;
3931 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3932
3933 while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
3934 delaymsec(1);
3935
3936 if (t++ > tm) {
3937 mutex_exit(&sc->sc_atq_lock);
3938 return ETIMEDOUT;
3939 }
3940 }
3941
3942 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3943 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
3944 *iaq = *slot;
3945 memset(slot, 0, sizeof(*slot));
3946 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3947 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
3948
3949 sc->sc_atq_cons = prod;
3950
3951 mutex_exit(&sc->sc_atq_lock);
3952
3953 return 0;
3954 }
3955
3956 static int
3957 ixl_get_version(struct ixl_softc *sc)
3958 {
3959 struct ixl_aq_desc iaq;
3960 uint32_t fwbuild, fwver, apiver;
3961 uint16_t api_maj_ver, api_min_ver;
3962
3963 memset(&iaq, 0, sizeof(iaq));
3964 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
3965
3966 iaq.iaq_retval = le16toh(23);
3967
3968 if (ixl_atq_poll(sc, &iaq, 2000) != 0)
3969 return ETIMEDOUT;
3970 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
3971 return EIO;
3972
3973 fwbuild = le32toh(iaq.iaq_param[1]);
3974 fwver = le32toh(iaq.iaq_param[2]);
3975 apiver = le32toh(iaq.iaq_param[3]);
3976
3977 api_maj_ver = (uint16_t)apiver;
3978 api_min_ver = (uint16_t)(apiver >> 16);
3979
3980 aprint_normal(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
3981 (uint16_t)(fwver >> 16), fwbuild, api_maj_ver, api_min_ver);
3982
3983 if (sc->sc_mac_type == I40E_MAC_X722) {
3984 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK |
3985 IXL_SC_AQ_FLAG_NVMREAD);
3986 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL);
3987 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS);
3988 }
3989
3990 #define IXL_API_VER(maj, min) (((uint32_t)(maj) << 16) | (min))
3991 if (IXL_API_VER(api_maj_ver, api_min_ver) >= IXL_API_VER(1, 5)) {
3992 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL);
3993 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK);
3994 }
3995 #undef IXL_API_VER
3996
3997 return 0;
3998 }
3999
4000 static int
4001 ixl_get_nvm_version(struct ixl_softc *sc)
4002 {
4003 uint16_t nvmver, cfg_ptr, eetrack_hi, eetrack_lo, oem_hi, oem_lo;
4004 uint32_t eetrack, oem;
4005 uint16_t nvm_maj_ver, nvm_min_ver, oem_build;
4006 uint8_t oem_ver, oem_patch;
4007
4008 nvmver = cfg_ptr = eetrack_hi = eetrack_lo = oem_hi = oem_lo = 0;
4009 ixl_rd16_nvm(sc, I40E_SR_NVM_DEV_STARTER_VERSION, &nvmver);
4010 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
4011 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
4012 ixl_rd16_nvm(sc, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
4013 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF, &oem_hi);
4014 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF + 1, &oem_lo);
4015
4016 nvm_maj_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_HI_MASK);
4017 nvm_min_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_LO_MASK);
4018 eetrack = ((uint32_t)eetrack_hi << 16) | eetrack_lo;
4019 oem = ((uint32_t)oem_hi << 16) | oem_lo;
4020 oem_ver = __SHIFTOUT(oem, IXL_NVM_OEMVERSION_MASK);
4021 oem_build = __SHIFTOUT(oem, IXL_NVM_OEMBUILD_MASK);
4022 oem_patch = __SHIFTOUT(oem, IXL_NVM_OEMPATCH_MASK);
4023
4024 aprint_normal(" nvm %x.%02x etid %08x oem %d.%d.%d",
4025 nvm_maj_ver, nvm_min_ver, eetrack,
4026 oem_ver, oem_build, oem_patch);
4027
4028 return 0;
4029 }
4030
4031 static int
4032 ixl_pxe_clear(struct ixl_softc *sc)
4033 {
4034 struct ixl_aq_desc iaq;
4035 int rv;
4036
4037 memset(&iaq, 0, sizeof(iaq));
4038 iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
4039 iaq.iaq_param[0] = htole32(0x2);
4040
4041 rv = ixl_atq_poll(sc, &iaq, 250);
4042
4043 ixl_wr(sc, I40E_GLLAN_RCTL_0, 0x1);
4044
4045 if (rv != 0)
4046 return ETIMEDOUT;
4047
4048 switch (iaq.iaq_retval) {
4049 case htole16(IXL_AQ_RC_OK):
4050 case htole16(IXL_AQ_RC_EEXIST):
4051 break;
4052 default:
4053 return EIO;
4054 }
4055
4056 return 0;
4057 }
4058
4059 static int
4060 ixl_lldp_shut(struct ixl_softc *sc)
4061 {
4062 struct ixl_aq_desc iaq;
4063
4064 memset(&iaq, 0, sizeof(iaq));
4065 iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
4066 iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
4067
4068 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4069 aprint_error_dev(sc->sc_dev, "STOP LLDP AGENT timeout\n");
4070 return -1;
4071 }
4072
4073 switch (iaq.iaq_retval) {
4074 case htole16(IXL_AQ_RC_EMODE):
4075 case htole16(IXL_AQ_RC_EPERM):
4076 /* ignore silently */
4077 default:
4078 break;
4079 }
4080
4081 return 0;
4082 }
4083
4084 static void
4085 ixl_parse_hw_capability(struct ixl_softc *sc, struct ixl_aq_capability *cap)
4086 {
4087 uint16_t id;
4088 uint32_t number, logical_id;
4089
4090 id = le16toh(cap->cap_id);
4091 number = le32toh(cap->number);
4092 logical_id = le32toh(cap->logical_id);
4093
4094 switch (id) {
4095 case IXL_AQ_CAP_RSS:
4096 sc->sc_rss_table_size = number;
4097 sc->sc_rss_table_entry_width = logical_id;
4098 break;
4099 case IXL_AQ_CAP_RXQ:
4100 case IXL_AQ_CAP_TXQ:
4101 sc->sc_nqueue_pairs_device = MIN(number,
4102 sc->sc_nqueue_pairs_device);
4103 break;
4104 }
4105 }
4106
4107 static int
4108 ixl_get_hw_capabilities(struct ixl_softc *sc)
4109 {
4110 struct ixl_dmamem idm;
4111 struct ixl_aq_desc iaq;
4112 struct ixl_aq_capability *caps;
4113 size_t i, ncaps;
4114 bus_size_t caps_size;
4115 uint16_t status;
4116 int rv;
4117
4118 caps_size = sizeof(caps[0]) * 40;
4119 memset(&iaq, 0, sizeof(iaq));
4120 iaq.iaq_opcode = htole16(IXL_AQ_OP_LIST_FUNC_CAP);
4121
4122 do {
4123 if (ixl_dmamem_alloc(sc, &idm, caps_size, 0) != 0) {
4124 return -1;
4125 }
4126
4127 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4128 (caps_size > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4129 iaq.iaq_datalen = htole16(caps_size);
4130 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4131
4132 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
4133 IXL_DMA_LEN(&idm), BUS_DMASYNC_PREREAD);
4134
4135 rv = ixl_atq_poll(sc, &iaq, 250);
4136
4137 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
4138 IXL_DMA_LEN(&idm), BUS_DMASYNC_POSTREAD);
4139
4140 if (rv != 0) {
4141 aprint_error(", HW capabilities timeout\n");
4142 goto done;
4143 }
4144
4145 status = le16toh(iaq.iaq_retval);
4146
4147 if (status == IXL_AQ_RC_ENOMEM) {
4148 caps_size = le16toh(iaq.iaq_datalen);
4149 ixl_dmamem_free(sc, &idm);
4150 }
4151 } while (status == IXL_AQ_RC_ENOMEM);
4152
4153 if (status != IXL_AQ_RC_OK) {
4154 aprint_error(", HW capabilities error\n");
4155 goto done;
4156 }
4157
4158 caps = IXL_DMA_KVA(&idm);
4159 ncaps = le16toh(iaq.iaq_param[1]);
4160
4161 for (i = 0; i < ncaps; i++) {
4162 ixl_parse_hw_capability(sc, &caps[i]);
4163 }
4164
4165 done:
4166 ixl_dmamem_free(sc, &idm);
4167 return rv;
4168 }
4169
4170 static int
4171 ixl_get_mac(struct ixl_softc *sc)
4172 {
4173 struct ixl_dmamem idm;
4174 struct ixl_aq_desc iaq;
4175 struct ixl_aq_mac_addresses *addrs;
4176 int rv;
4177
4178 if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
4179 aprint_error(", unable to allocate mac addresses\n");
4180 return -1;
4181 }
4182
4183 memset(&iaq, 0, sizeof(iaq));
4184 iaq.iaq_flags = htole16(IXL_AQ_BUF);
4185 iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
4186 iaq.iaq_datalen = htole16(sizeof(*addrs));
4187 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4188
4189 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4190 BUS_DMASYNC_PREREAD);
4191
4192 rv = ixl_atq_poll(sc, &iaq, 250);
4193
4194 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4195 BUS_DMASYNC_POSTREAD);
4196
4197 if (rv != 0) {
4198 aprint_error(", MAC ADDRESS READ timeout\n");
4199 rv = -1;
4200 goto done;
4201 }
4202 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4203 aprint_error(", MAC ADDRESS READ error\n");
4204 rv = -1;
4205 goto done;
4206 }
4207
4208 addrs = IXL_DMA_KVA(&idm);
4209 if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
4210 printf(", port address is not valid\n");
4211 goto done;
4212 }
4213
4214 memcpy(sc->sc_enaddr, addrs->port, ETHER_ADDR_LEN);
4215 rv = 0;
4216
4217 done:
4218 ixl_dmamem_free(sc, &idm);
4219 return rv;
4220 }
4221
4222 static int
4223 ixl_get_switch_config(struct ixl_softc *sc)
4224 {
4225 struct ixl_dmamem idm;
4226 struct ixl_aq_desc iaq;
4227 struct ixl_aq_switch_config *hdr;
4228 struct ixl_aq_switch_config_element *elms, *elm;
4229 unsigned int nelm, i;
4230 int rv;
4231
4232 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4233 aprint_error_dev(sc->sc_dev,
4234 "unable to allocate switch config buffer\n");
4235 return -1;
4236 }
4237
4238 memset(&iaq, 0, sizeof(iaq));
4239 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4240 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4241 iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
4242 iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
4243 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4244
4245 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4246 BUS_DMASYNC_PREREAD);
4247
4248 rv = ixl_atq_poll(sc, &iaq, 250);
4249
4250 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4251 BUS_DMASYNC_POSTREAD);
4252
4253 if (rv != 0) {
4254 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG timeout\n");
4255 rv = -1;
4256 goto done;
4257 }
4258 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4259 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG error\n");
4260 rv = -1;
4261 goto done;
4262 }
4263
4264 hdr = IXL_DMA_KVA(&idm);
4265 elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
4266
4267 nelm = le16toh(hdr->num_reported);
4268 if (nelm < 1) {
4269 aprint_error_dev(sc->sc_dev, "no switch config available\n");
4270 rv = -1;
4271 goto done;
4272 }
4273
4274 for (i = 0; i < nelm; i++) {
4275 elm = &elms[i];
4276
4277 aprint_debug_dev(sc->sc_dev,
4278 "type %x revision %u seid %04x\n",
4279 elm->type, elm->revision, le16toh(elm->seid));
4280 aprint_debug_dev(sc->sc_dev,
4281 "uplink %04x downlink %04x\n",
4282 le16toh(elm->uplink_seid),
4283 le16toh(elm->downlink_seid));
4284 aprint_debug_dev(sc->sc_dev,
4285 "conntype %x scheduler %04x extra %04x\n",
4286 elm->connection_type,
4287 le16toh(elm->scheduler_id),
4288 le16toh(elm->element_info));
4289 }
4290
4291 elm = &elms[0];
4292
4293 sc->sc_uplink_seid = elm->uplink_seid;
4294 sc->sc_downlink_seid = elm->downlink_seid;
4295 sc->sc_seid = elm->seid;
4296
4297 if ((sc->sc_uplink_seid == htole16(0)) !=
4298 (sc->sc_downlink_seid == htole16(0))) {
4299 aprint_error_dev(sc->sc_dev, "SEIDs are misconfigured\n");
4300 rv = -1;
4301 goto done;
4302 }
4303
4304 done:
4305 ixl_dmamem_free(sc, &idm);
4306 return rv;
4307 }
4308
4309 static int
4310 ixl_phy_mask_ints(struct ixl_softc *sc)
4311 {
4312 struct ixl_aq_desc iaq;
4313
4314 memset(&iaq, 0, sizeof(iaq));
4315 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
4316 iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
4317 ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
4318 IXL_AQ_PHY_EV_MEDIA_NA));
4319
4320 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4321 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK timeout\n");
4322 return -1;
4323 }
4324 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4325 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK error\n");
4326 return -1;
4327 }
4328
4329 return 0;
4330 }
4331
4332 static int
4333 ixl_get_phy_abilities(struct ixl_softc *sc,struct ixl_dmamem *idm)
4334 {
4335 struct ixl_aq_desc iaq;
4336 int rv;
4337
4338 memset(&iaq, 0, sizeof(iaq));
4339 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4340 (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4341 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
4342 iaq.iaq_datalen = htole16(IXL_DMA_LEN(idm));
4343 iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
4344 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
4345
4346 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4347 BUS_DMASYNC_PREREAD);
4348
4349 rv = ixl_atq_poll(sc, &iaq, 250);
4350
4351 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4352 BUS_DMASYNC_POSTREAD);
4353
4354 if (rv != 0)
4355 return -1;
4356
4357 return le16toh(iaq.iaq_retval);
4358 }
4359
4360 static int
4361 ixl_get_phy_info(struct ixl_softc *sc)
4362 {
4363 struct ixl_dmamem idm;
4364 struct ixl_aq_phy_abilities *phy;
4365 int rv;
4366
4367 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4368 aprint_error_dev(sc->sc_dev,
4369 "unable to allocate phy abilities buffer\n");
4370 return -1;
4371 }
4372
4373 rv = ixl_get_phy_abilities(sc, &idm);
4374 switch (rv) {
4375 case -1:
4376 aprint_error_dev(sc->sc_dev, "GET PHY ABILITIES timeout\n");
4377 goto done;
4378 case IXL_AQ_RC_OK:
4379 break;
4380 case IXL_AQ_RC_EIO:
4381 aprint_error_dev(sc->sc_dev,"unable to query phy types\n");
4382 goto done;
4383 default:
4384 aprint_error_dev(sc->sc_dev,
4385 "GET PHY ABILITIIES error %u\n", rv);
4386 goto done;
4387 }
4388
4389 phy = IXL_DMA_KVA(&idm);
4390
4391 sc->sc_phy_types = le32toh(phy->phy_type);
4392 sc->sc_phy_types |= (uint64_t)le32toh(phy->phy_type_ext) << 32;
4393
4394 sc->sc_phy_abilities = phy->abilities;
4395 sc->sc_phy_linkspeed = phy->link_speed;
4396 sc->sc_phy_fec_cfg = phy->fec_cfg_curr_mod_ext_info &
4397 (IXL_AQ_ENABLE_FEC_KR | IXL_AQ_ENABLE_FEC_RS |
4398 IXL_AQ_REQUEST_FEC_KR | IXL_AQ_REQUEST_FEC_RS);
4399 sc->sc_eee_cap = phy->eee_capability;
4400 sc->sc_eeer_val = phy->eeer_val;
4401 sc->sc_d3_lpan = phy->d3_lpan;
4402
4403 rv = 0;
4404
4405 done:
4406 ixl_dmamem_free(sc, &idm);
4407 return rv;
4408 }
4409
4410 static int
4411 ixl_set_phy_config(struct ixl_softc *sc,
4412 uint8_t link_speed, uint8_t abilities, bool polling)
4413 {
4414 struct ixl_aq_phy_param *param;
4415 struct ixl_atq iatq;
4416 struct ixl_aq_desc *iaq;
4417 int error;
4418
4419 memset(&iatq, 0, sizeof(iatq));
4420
4421 iaq = &iatq.iatq_desc;
4422 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_CONFIG);
4423 param = (struct ixl_aq_phy_param *)&iaq->iaq_param;
4424 param->phy_types = htole32((uint32_t)sc->sc_phy_types);
4425 param->phy_type_ext = (uint8_t)(sc->sc_phy_types >> 32);
4426 param->link_speed = link_speed;
4427 param->abilities = abilities | IXL_AQ_PHY_ABILITY_AUTO_LINK;
4428 param->fec_cfg = sc->sc_phy_fec_cfg;
4429 param->eee_capability = sc->sc_eee_cap;
4430 param->eeer_val = sc->sc_eeer_val;
4431 param->d3_lpan = sc->sc_d3_lpan;
4432
4433 if (polling)
4434 error = ixl_atq_poll(sc, iaq, 250);
4435 else
4436 error = ixl_atq_exec(sc, &iatq);
4437
4438 if (error != 0)
4439 return error;
4440
4441 switch (le16toh(iaq->iaq_retval)) {
4442 case IXL_AQ_RC_OK:
4443 break;
4444 case IXL_AQ_RC_EPERM:
4445 return EPERM;
4446 default:
4447 return EIO;
4448 }
4449
4450 return 0;
4451 }
4452
4453 static int
4454 ixl_set_phy_autoselect(struct ixl_softc *sc)
4455 {
4456 uint8_t link_speed, abilities;
4457
4458 link_speed = sc->sc_phy_linkspeed;
4459 abilities = IXL_PHY_ABILITY_LINKUP | IXL_PHY_ABILITY_AUTONEGO;
4460
4461 return ixl_set_phy_config(sc, link_speed, abilities, true);
4462 }
4463
4464 static int
4465 ixl_get_link_status_poll(struct ixl_softc *sc, int *l)
4466 {
4467 struct ixl_aq_desc iaq;
4468 struct ixl_aq_link_param *param;
4469 int link;
4470
4471 memset(&iaq, 0, sizeof(iaq));
4472 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
4473 param = (struct ixl_aq_link_param *)iaq.iaq_param;
4474 param->notify = IXL_AQ_LINK_NOTIFY;
4475
4476 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4477 return ETIMEDOUT;
4478 }
4479 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4480 return EIO;
4481 }
4482
4483 link = ixl_set_link_status(sc, &iaq);
4484
4485 if (l != NULL)
4486 *l = link;
4487
4488 return 0;
4489 }
4490
4491 static int
4492 ixl_get_vsi(struct ixl_softc *sc)
4493 {
4494 struct ixl_dmamem *vsi = &sc->sc_scratch;
4495 struct ixl_aq_desc iaq;
4496 struct ixl_aq_vsi_param *param;
4497 struct ixl_aq_vsi_reply *reply;
4498 struct ixl_aq_vsi_data *data;
4499 int rv;
4500
4501 /* grumble, vsi info isn't "known" at compile time */
4502
4503 memset(&iaq, 0, sizeof(iaq));
4504 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4505 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4506 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
4507 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4508 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4509
4510 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4511 param->uplink_seid = sc->sc_seid;
4512
4513 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4514 BUS_DMASYNC_PREREAD);
4515
4516 rv = ixl_atq_poll(sc, &iaq, 250);
4517
4518 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4519 BUS_DMASYNC_POSTREAD);
4520
4521 if (rv != 0) {
4522 return ETIMEDOUT;
4523 }
4524
4525 switch (le16toh(iaq.iaq_retval)) {
4526 case IXL_AQ_RC_OK:
4527 break;
4528 case IXL_AQ_RC_ENOENT:
4529 return ENOENT;
4530 case IXL_AQ_RC_EACCES:
4531 return EACCES;
4532 default:
4533 return EIO;
4534 }
4535
4536 reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
4537 sc->sc_vsi_number = le16toh(reply->vsi_number);
4538 data = IXL_DMA_KVA(vsi);
4539 sc->sc_vsi_stat_counter_idx = le16toh(data->stat_counter_idx);
4540
4541 return 0;
4542 }
4543
4544 static int
4545 ixl_set_vsi(struct ixl_softc *sc)
4546 {
4547 struct ixl_dmamem *vsi = &sc->sc_scratch;
4548 struct ixl_aq_desc iaq;
4549 struct ixl_aq_vsi_param *param;
4550 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
4551 unsigned int qnum;
4552 uint16_t val;
4553 int rv;
4554
4555 qnum = sc->sc_nqueue_pairs - 1;
4556
4557 data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
4558 IXL_AQ_VSI_VALID_VLAN);
4559
4560 CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
4561 SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
4562 data->queue_mapping[0] = htole16(0);
4563 data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
4564 (qnum << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
4565
4566 val = le16toh(data->port_vlan_flags);
4567 CLR(val, IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK);
4568 SET(val, IXL_AQ_VSI_PVLAN_MODE_ALL);
4569
4570 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWTAGGING)) {
4571 SET(val, IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH);
4572 } else {
4573 SET(val, IXL_AQ_VSI_PVLAN_EMOD_NOTHING);
4574 }
4575
4576 data->port_vlan_flags = htole16(val);
4577
4578 /* grumble, vsi info isn't "known" at compile time */
4579
4580 memset(&iaq, 0, sizeof(iaq));
4581 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4582 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4583 iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
4584 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4585 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4586
4587 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4588 param->uplink_seid = sc->sc_seid;
4589
4590 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4591 BUS_DMASYNC_PREWRITE);
4592
4593 rv = ixl_atq_poll(sc, &iaq, 250);
4594
4595 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4596 BUS_DMASYNC_POSTWRITE);
4597
4598 if (rv != 0) {
4599 return ETIMEDOUT;
4600 }
4601
4602 switch (le16toh(iaq.iaq_retval)) {
4603 case IXL_AQ_RC_OK:
4604 break;
4605 case IXL_AQ_RC_ENOENT:
4606 return ENOENT;
4607 case IXL_AQ_RC_EACCES:
4608 return EACCES;
4609 default:
4610 return EIO;
4611 }
4612
4613 return 0;
4614 }
4615
4616 static void
4617 ixl_set_filter_control(struct ixl_softc *sc)
4618 {
4619 uint32_t reg;
4620
4621 reg = ixl_rd_rx_csr(sc, I40E_PFQF_CTL_0);
4622
4623 CLR(reg, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK);
4624 SET(reg, I40E_HASH_LUT_SIZE_128 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT);
4625
4626 SET(reg, I40E_PFQF_CTL_0_FD_ENA_MASK);
4627 SET(reg, I40E_PFQF_CTL_0_ETYPE_ENA_MASK);
4628 SET(reg, I40E_PFQF_CTL_0_MACVLAN_ENA_MASK);
4629
4630 ixl_wr_rx_csr(sc, I40E_PFQF_CTL_0, reg);
4631 }
4632
4633 static inline void
4634 ixl_get_default_rss_key(uint32_t *buf, size_t len)
4635 {
4636 size_t cplen;
4637 uint8_t rss_seed[RSS_KEYSIZE];
4638
4639 rss_getkey(rss_seed);
4640 memset(buf, 0, len);
4641
4642 cplen = MIN(len, sizeof(rss_seed));
4643 memcpy(buf, rss_seed, cplen);
4644 }
4645
4646 static int
4647 ixl_set_rss_key(struct ixl_softc *sc, uint8_t *key, size_t keylen)
4648 {
4649 struct ixl_dmamem *idm;
4650 struct ixl_atq iatq;
4651 struct ixl_aq_desc *iaq;
4652 struct ixl_aq_rss_key_param *param;
4653 struct ixl_aq_rss_key_data *data;
4654 size_t len, datalen, stdlen, extlen;
4655 uint16_t vsi_id;
4656 int rv;
4657
4658 memset(&iatq, 0, sizeof(iatq));
4659 iaq = &iatq.iatq_desc;
4660 idm = &sc->sc_aqbuf;
4661
4662 datalen = sizeof(*data);
4663
4664 /*XXX The buf size has to be less than the size of the register */
4665 datalen = MIN(IXL_RSS_KEY_SIZE_REG * sizeof(uint32_t), datalen);
4666
4667 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4668 (datalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4669 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_KEY);
4670 iaq->iaq_datalen = htole16(datalen);
4671
4672 param = (struct ixl_aq_rss_key_param *)iaq->iaq_param;
4673 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSKEY_VSI_ID_SHIFT) |
4674 IXL_AQ_RSSKEY_VSI_VALID;
4675 param->vsi_id = htole16(vsi_id);
4676
4677 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
4678 data = IXL_DMA_KVA(idm);
4679
4680 len = MIN(keylen, datalen);
4681 stdlen = MIN(sizeof(data->standard_rss_key), len);
4682 memcpy(data->standard_rss_key, key, stdlen);
4683 len = (len > stdlen) ? (len - stdlen) : 0;
4684
4685 extlen = MIN(sizeof(data->extended_hash_key), len);
4686 extlen = (stdlen < keylen) ? 0 : keylen - stdlen;
4687 memcpy(data->extended_hash_key, key + stdlen, extlen);
4688
4689 ixl_aq_dva(iaq, IXL_DMA_DVA(idm));
4690
4691 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4692 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE);
4693
4694 rv = ixl_atq_exec(sc, &iatq);
4695
4696 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4697 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE);
4698
4699 if (rv != 0) {
4700 return ETIMEDOUT;
4701 }
4702
4703 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) {
4704 return EIO;
4705 }
4706
4707 return 0;
4708 }
4709
4710 static int
4711 ixl_set_rss_lut(struct ixl_softc *sc, uint8_t *lut, size_t lutlen)
4712 {
4713 struct ixl_dmamem *idm;
4714 struct ixl_atq iatq;
4715 struct ixl_aq_desc *iaq;
4716 struct ixl_aq_rss_lut_param *param;
4717 uint16_t vsi_id;
4718 uint8_t *data;
4719 size_t dmalen;
4720 int rv;
4721
4722 memset(&iatq, 0, sizeof(iatq));
4723 iaq = &iatq.iatq_desc;
4724 idm = &sc->sc_aqbuf;
4725
4726 dmalen = MIN(lutlen, IXL_DMA_LEN(idm));
4727
4728 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4729 (dmalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4730 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_LUT);
4731 iaq->iaq_datalen = htole16(dmalen);
4732
4733 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
4734 data = IXL_DMA_KVA(idm);
4735 memcpy(data, lut, dmalen);
4736 ixl_aq_dva(iaq, IXL_DMA_DVA(idm));
4737
4738 param = (struct ixl_aq_rss_lut_param *)iaq->iaq_param;
4739 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSLUT_VSI_ID_SHIFT) |
4740 IXL_AQ_RSSLUT_VSI_VALID;
4741 param->vsi_id = htole16(vsi_id);
4742 param->flags = htole16(IXL_AQ_RSSLUT_TABLE_TYPE_PF <<
4743 IXL_AQ_RSSLUT_TABLE_TYPE_SHIFT);
4744
4745 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4746 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE);
4747
4748 rv = ixl_atq_exec(sc, &iatq);
4749
4750 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4751 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE);
4752
4753 if (rv != 0) {
4754 return ETIMEDOUT;
4755 }
4756
4757 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) {
4758 return EIO;
4759 }
4760
4761 return 0;
4762 }
4763
4764 static int
4765 ixl_register_rss_key(struct ixl_softc *sc)
4766 {
4767 uint32_t rss_seed[IXL_RSS_KEY_SIZE_REG];
4768 int rv;
4769 size_t i;
4770
4771 ixl_get_default_rss_key(rss_seed, sizeof(rss_seed));
4772
4773 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)){
4774 rv = ixl_set_rss_key(sc, (uint8_t*)rss_seed,
4775 sizeof(rss_seed));
4776 } else {
4777 rv = 0;
4778 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4779 ixl_wr_rx_csr(sc, I40E_PFQF_HKEY(i), rss_seed[i]);
4780 }
4781 }
4782
4783 return rv;
4784 }
4785
4786 static void
4787 ixl_register_rss_pctype(struct ixl_softc *sc)
4788 {
4789 uint64_t set_hena = 0;
4790 uint32_t hena0, hena1;
4791
4792 if (sc->sc_mac_type == I40E_MAC_X722)
4793 set_hena = IXL_RSS_HENA_DEFAULT_X722;
4794 else
4795 set_hena = IXL_RSS_HENA_DEFAULT_XL710;
4796
4797 hena0 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(0));
4798 hena1 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(1));
4799
4800 SET(hena0, set_hena);
4801 SET(hena1, set_hena >> 32);
4802
4803 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(0), hena0);
4804 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(1), hena1);
4805 }
4806
4807 static int
4808 ixl_register_rss_hlut(struct ixl_softc *sc)
4809 {
4810 unsigned int qid;
4811 uint8_t hlut_buf[512], lut_mask;
4812 uint32_t *hluts;
4813 size_t i, hluts_num;
4814 int rv;
4815
4816 lut_mask = (0x01 << sc->sc_rss_table_entry_width) - 1;
4817
4818 for (i = 0; i < sc->sc_rss_table_size; i++) {
4819 qid = i % sc->sc_nqueue_pairs;
4820 hlut_buf[i] = qid & lut_mask;
4821 }
4822
4823 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)) {
4824 rv = ixl_set_rss_lut(sc, hlut_buf, sizeof(hlut_buf));
4825 } else {
4826 rv = 0;
4827 hluts = (uint32_t *)hlut_buf;
4828 hluts_num = sc->sc_rss_table_size >> 2;
4829 for (i = 0; i < hluts_num; i++) {
4830 ixl_wr(sc, I40E_PFQF_HLUT(i), hluts[i]);
4831 }
4832 ixl_flush(sc);
4833 }
4834
4835 return rv;
4836 }
4837
4838 static void
4839 ixl_config_rss(struct ixl_softc *sc)
4840 {
4841
4842 KASSERT(mutex_owned(&sc->sc_cfg_lock));
4843
4844 ixl_register_rss_key(sc);
4845 ixl_register_rss_pctype(sc);
4846 ixl_register_rss_hlut(sc);
4847 }
4848
4849 static const struct ixl_phy_type *
4850 ixl_search_phy_type(uint8_t phy_type)
4851 {
4852 const struct ixl_phy_type *itype;
4853 uint64_t mask;
4854 unsigned int i;
4855
4856 if (phy_type >= 64)
4857 return NULL;
4858
4859 mask = 1ULL << phy_type;
4860
4861 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
4862 itype = &ixl_phy_type_map[i];
4863
4864 if (ISSET(itype->phy_type, mask))
4865 return itype;
4866 }
4867
4868 return NULL;
4869 }
4870
4871 static uint64_t
4872 ixl_search_link_speed(uint8_t link_speed)
4873 {
4874 const struct ixl_speed_type *type;
4875 unsigned int i;
4876
4877 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4878 type = &ixl_speed_type_map[i];
4879
4880 if (ISSET(type->dev_speed, link_speed))
4881 return type->net_speed;
4882 }
4883
4884 return 0;
4885 }
4886
4887 static uint8_t
4888 ixl_search_baudrate(uint64_t baudrate)
4889 {
4890 const struct ixl_speed_type *type;
4891 unsigned int i;
4892
4893 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4894 type = &ixl_speed_type_map[i];
4895
4896 if (type->net_speed == baudrate) {
4897 return type->dev_speed;
4898 }
4899 }
4900
4901 return 0;
4902 }
4903
4904 static int
4905 ixl_restart_an(struct ixl_softc *sc)
4906 {
4907 struct ixl_aq_desc iaq;
4908
4909 memset(&iaq, 0, sizeof(iaq));
4910 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
4911 iaq.iaq_param[0] =
4912 htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
4913
4914 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4915 aprint_error_dev(sc->sc_dev, "RESTART AN timeout\n");
4916 return -1;
4917 }
4918 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4919 aprint_error_dev(sc->sc_dev, "RESTART AN error\n");
4920 return -1;
4921 }
4922
4923 return 0;
4924 }
4925
4926 static int
4927 ixl_add_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4928 uint16_t vlan, uint16_t flags)
4929 {
4930 struct ixl_aq_desc iaq;
4931 struct ixl_aq_add_macvlan *param;
4932 struct ixl_aq_add_macvlan_elem *elem;
4933
4934 memset(&iaq, 0, sizeof(iaq));
4935 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4936 iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN);
4937 iaq.iaq_datalen = htole16(sizeof(*elem));
4938 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4939
4940 param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param;
4941 param->num_addrs = htole16(1);
4942 param->seid0 = htole16(0x8000) | sc->sc_seid;
4943 param->seid1 = 0;
4944 param->seid2 = 0;
4945
4946 elem = IXL_DMA_KVA(&sc->sc_scratch);
4947 memset(elem, 0, sizeof(*elem));
4948 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4949 elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags);
4950 elem->vlan = htole16(vlan);
4951
4952 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4953 return IXL_AQ_RC_EINVAL;
4954 }
4955
4956 switch (le16toh(iaq.iaq_retval)) {
4957 case IXL_AQ_RC_OK:
4958 break;
4959 case IXL_AQ_RC_ENOSPC:
4960 return ENOSPC;
4961 case IXL_AQ_RC_ENOENT:
4962 return ENOENT;
4963 case IXL_AQ_RC_EACCES:
4964 return EACCES;
4965 case IXL_AQ_RC_EEXIST:
4966 return EEXIST;
4967 case IXL_AQ_RC_EINVAL:
4968 return EINVAL;
4969 default:
4970 return EIO;
4971 }
4972
4973 return 0;
4974 }
4975
4976 static int
4977 ixl_remove_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4978 uint16_t vlan, uint16_t flags)
4979 {
4980 struct ixl_aq_desc iaq;
4981 struct ixl_aq_remove_macvlan *param;
4982 struct ixl_aq_remove_macvlan_elem *elem;
4983
4984 memset(&iaq, 0, sizeof(iaq));
4985 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4986 iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN);
4987 iaq.iaq_datalen = htole16(sizeof(*elem));
4988 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4989
4990 param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param;
4991 param->num_addrs = htole16(1);
4992 param->seid0 = htole16(0x8000) | sc->sc_seid;
4993 param->seid1 = 0;
4994 param->seid2 = 0;
4995
4996 elem = IXL_DMA_KVA(&sc->sc_scratch);
4997 memset(elem, 0, sizeof(*elem));
4998 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4999 elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags);
5000 elem->vlan = htole16(vlan);
5001
5002 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
5003 return EINVAL;
5004 }
5005
5006 switch (le16toh(iaq.iaq_retval)) {
5007 case IXL_AQ_RC_OK:
5008 break;
5009 case IXL_AQ_RC_ENOENT:
5010 return ENOENT;
5011 case IXL_AQ_RC_EACCES:
5012 return EACCES;
5013 case IXL_AQ_RC_EINVAL:
5014 return EINVAL;
5015 default:
5016 return EIO;
5017 }
5018
5019 return 0;
5020 }
5021
5022 static int
5023 ixl_hmc(struct ixl_softc *sc)
5024 {
5025 struct {
5026 uint32_t count;
5027 uint32_t minsize;
5028 bus_size_t objsiz;
5029 bus_size_t setoff;
5030 bus_size_t setcnt;
5031 } regs[] = {
5032 {
5033 0,
5034 IXL_HMC_TXQ_MINSIZE,
5035 I40E_GLHMC_LANTXOBJSZ,
5036 I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
5037 I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
5038 },
5039 {
5040 0,
5041 IXL_HMC_RXQ_MINSIZE,
5042 I40E_GLHMC_LANRXOBJSZ,
5043 I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
5044 I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
5045 },
5046 {
5047 0,
5048 0,
5049 I40E_GLHMC_FCOEDDPOBJSZ,
5050 I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
5051 I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
5052 },
5053 {
5054 0,
5055 0,
5056 I40E_GLHMC_FCOEFOBJSZ,
5057 I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
5058 I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
5059 },
5060 };
5061 struct ixl_hmc_entry *e;
5062 uint64_t size, dva;
5063 uint8_t *kva;
5064 uint64_t *sdpage;
5065 unsigned int i;
5066 int npages, tables;
5067 uint32_t reg;
5068
5069 CTASSERT(__arraycount(regs) <= __arraycount(sc->sc_hmc_entries));
5070
5071 regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
5072 ixl_rd(sc, I40E_GLHMC_LANQMAX);
5073
5074 size = 0;
5075 for (i = 0; i < __arraycount(regs); i++) {
5076 e = &sc->sc_hmc_entries[i];
5077
5078 e->hmc_count = regs[i].count;
5079 reg = ixl_rd(sc, regs[i].objsiz);
5080 e->hmc_size = BIT_ULL(0x3F & reg);
5081 e->hmc_base = size;
5082
5083 if ((e->hmc_size * 8) < regs[i].minsize) {
5084 aprint_error_dev(sc->sc_dev,
5085 "kernel hmc entry is too big\n");
5086 return -1;
5087 }
5088
5089 size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
5090 }
5091 size = roundup(size, IXL_HMC_PGSIZE);
5092 npages = size / IXL_HMC_PGSIZE;
5093
5094 tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
5095
5096 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
5097 aprint_error_dev(sc->sc_dev,
5098 "unable to allocate hmc pd memory\n");
5099 return -1;
5100 }
5101
5102 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
5103 IXL_HMC_PGSIZE) != 0) {
5104 aprint_error_dev(sc->sc_dev,
5105 "unable to allocate hmc sd memory\n");
5106 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
5107 return -1;
5108 }
5109
5110 kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
5111 memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
5112
5113 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
5114 0, IXL_DMA_LEN(&sc->sc_hmc_pd),
5115 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5116
5117 dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
5118 sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
5119 memset(sdpage, 0, IXL_DMA_LEN(&sc->sc_hmc_sd));
5120
5121 for (i = 0; (int)i < npages; i++) {
5122 *sdpage = htole64(dva | IXL_HMC_PDVALID);
5123 sdpage++;
5124
5125 dva += IXL_HMC_PGSIZE;
5126 }
5127
5128 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
5129 0, IXL_DMA_LEN(&sc->sc_hmc_sd),
5130 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5131
5132 dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
5133 for (i = 0; (int)i < tables; i++) {
5134 uint32_t count;
5135
5136 KASSERT(npages >= 0);
5137
5138 count = ((unsigned int)npages > IXL_HMC_PGS) ?
5139 IXL_HMC_PGS : (unsigned int)npages;
5140
5141 ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
5142 ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
5143 (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
5144 (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
5145 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
5146 ixl_wr(sc, I40E_PFHMC_SDCMD,
5147 (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
5148
5149 npages -= IXL_HMC_PGS;
5150 dva += IXL_HMC_PGSIZE;
5151 }
5152
5153 for (i = 0; i < __arraycount(regs); i++) {
5154 e = &sc->sc_hmc_entries[i];
5155
5156 ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
5157 ixl_wr(sc, regs[i].setcnt, e->hmc_count);
5158 }
5159
5160 return 0;
5161 }
5162
5163 static void
5164 ixl_hmc_free(struct ixl_softc *sc)
5165 {
5166 ixl_dmamem_free(sc, &sc->sc_hmc_sd);
5167 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
5168 }
5169
5170 static void
5171 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
5172 unsigned int npacking)
5173 {
5174 uint8_t *dst = d;
5175 const uint8_t *src = s;
5176 unsigned int i;
5177
5178 for (i = 0; i < npacking; i++) {
5179 const struct ixl_hmc_pack *pack = &packing[i];
5180 unsigned int offset = pack->lsb / 8;
5181 unsigned int align = pack->lsb % 8;
5182 const uint8_t *in = src + pack->offset;
5183 uint8_t *out = dst + offset;
5184 int width = pack->width;
5185 unsigned int inbits = 0;
5186
5187 if (align) {
5188 inbits = (*in++) << align;
5189 *out++ |= (inbits & 0xff);
5190 inbits >>= 8;
5191
5192 width -= 8 - align;
5193 }
5194
5195 while (width >= 8) {
5196 inbits |= (*in++) << align;
5197 *out++ = (inbits & 0xff);
5198 inbits >>= 8;
5199
5200 width -= 8;
5201 }
5202
5203 if (width > 0) {
5204 inbits |= (*in) << align;
5205 *out |= (inbits & ((1 << width) - 1));
5206 }
5207 }
5208 }
5209
5210 static struct ixl_aq_buf *
5211 ixl_aqb_alloc(struct ixl_softc *sc)
5212 {
5213 struct ixl_aq_buf *aqb;
5214
5215 aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK);
5216 if (aqb == NULL)
5217 return NULL;
5218
5219 aqb->aqb_size = IXL_AQ_BUFLEN;
5220
5221 if (bus_dmamap_create(sc->sc_dmat, aqb->aqb_size, 1,
5222 aqb->aqb_size, 0,
5223 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0)
5224 goto free;
5225 if (bus_dmamem_alloc(sc->sc_dmat, aqb->aqb_size,
5226 IXL_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs,
5227 BUS_DMA_WAITOK) != 0)
5228 goto destroy;
5229 if (bus_dmamem_map(sc->sc_dmat, &aqb->aqb_seg, aqb->aqb_nsegs,
5230 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0)
5231 goto dma_free;
5232 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
5233 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0)
5234 goto unmap;
5235
5236 return aqb;
5237 unmap:
5238 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
5239 dma_free:
5240 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
5241 destroy:
5242 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
5243 free:
5244 free(aqb, M_DEVBUF);
5245
5246 return NULL;
5247 }
5248
5249 static void
5250 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
5251 {
5252 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
5253 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
5254 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
5255 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
5256 free(aqb, M_DEVBUF);
5257 }
5258
5259 static int
5260 ixl_arq_fill(struct ixl_softc *sc)
5261 {
5262 struct ixl_aq_buf *aqb;
5263 struct ixl_aq_desc *arq, *iaq;
5264 unsigned int prod = sc->sc_arq_prod;
5265 unsigned int n;
5266 int post = 0;
5267
5268 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
5269 IXL_AQ_NUM);
5270 arq = IXL_DMA_KVA(&sc->sc_arq);
5271
5272 if (__predict_false(n <= 0))
5273 return 0;
5274
5275 do {
5276 aqb = sc->sc_arq_live[prod];
5277 iaq = &arq[prod];
5278
5279 if (aqb == NULL) {
5280 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
5281 if (aqb != NULL) {
5282 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
5283 ixl_aq_buf, aqb_entry);
5284 } else if ((aqb = ixl_aqb_alloc(sc)) == NULL) {
5285 break;
5286 }
5287
5288 sc->sc_arq_live[prod] = aqb;
5289 memset(aqb->aqb_data, 0, aqb->aqb_size);
5290
5291 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
5292 aqb->aqb_size, BUS_DMASYNC_PREREAD);
5293
5294 iaq->iaq_flags = htole16(IXL_AQ_BUF |
5295 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ?
5296 IXL_AQ_LB : 0));
5297 iaq->iaq_opcode = 0;
5298 iaq->iaq_datalen = htole16(aqb->aqb_size);
5299 iaq->iaq_retval = 0;
5300 iaq->iaq_cookie = 0;
5301 iaq->iaq_param[0] = 0;
5302 iaq->iaq_param[1] = 0;
5303 ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
5304 }
5305
5306 prod++;
5307 prod &= IXL_AQ_MASK;
5308
5309 post = 1;
5310
5311 } while (--n);
5312
5313 if (post) {
5314 sc->sc_arq_prod = prod;
5315 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
5316 }
5317
5318 return post;
5319 }
5320
5321 static void
5322 ixl_arq_unfill(struct ixl_softc *sc)
5323 {
5324 struct ixl_aq_buf *aqb;
5325 unsigned int i;
5326
5327 for (i = 0; i < __arraycount(sc->sc_arq_live); i++) {
5328 aqb = sc->sc_arq_live[i];
5329 if (aqb == NULL)
5330 continue;
5331
5332 sc->sc_arq_live[i] = NULL;
5333 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
5334 BUS_DMASYNC_POSTREAD);
5335 ixl_aqb_free(sc, aqb);
5336 }
5337
5338 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle)) != NULL) {
5339 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
5340 ixl_aq_buf, aqb_entry);
5341 ixl_aqb_free(sc, aqb);
5342 }
5343 }
5344
5345 static void
5346 ixl_clear_hw(struct ixl_softc *sc)
5347 {
5348 uint32_t num_queues, base_queue;
5349 uint32_t num_pf_int;
5350 uint32_t num_vf_int;
5351 uint32_t num_vfs;
5352 uint32_t i, j;
5353 uint32_t val;
5354 uint32_t eol = 0x7ff;
5355
5356 /* get number of interrupts, queues, and vfs */
5357 val = ixl_rd(sc, I40E_GLPCI_CNF2);
5358 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
5359 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
5360 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
5361 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
5362
5363 val = ixl_rd(sc, I40E_PFLAN_QALLOC);
5364 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
5365 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
5366 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
5367 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
5368 if (val & I40E_PFLAN_QALLOC_VALID_MASK)
5369 num_queues = (j - base_queue) + 1;
5370 else
5371 num_queues = 0;
5372
5373 val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
5374 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
5375 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
5376 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
5377 I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
5378 if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
5379 num_vfs = (j - i) + 1;
5380 else
5381 num_vfs = 0;
5382
5383 /* stop all the interrupts */
5384 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5385 ixl_flush(sc);
5386 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
5387 for (i = 0; i < num_pf_int - 2; i++)
5388 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
5389 ixl_flush(sc);
5390
5391 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
5392 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5393 ixl_wr(sc, I40E_PFINT_LNKLST0, val);
5394 for (i = 0; i < num_pf_int - 2; i++)
5395 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
5396 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5397 for (i = 0; i < num_vfs; i++)
5398 ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
5399 for (i = 0; i < num_vf_int - 2; i++)
5400 ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
5401
5402 /* warn the HW of the coming Tx disables */
5403 for (i = 0; i < num_queues; i++) {
5404 uint32_t abs_queue_idx = base_queue + i;
5405 uint32_t reg_block = 0;
5406
5407 if (abs_queue_idx >= 128) {
5408 reg_block = abs_queue_idx / 128;
5409 abs_queue_idx %= 128;
5410 }
5411
5412 val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
5413 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
5414 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
5415 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
5416
5417 ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
5418 }
5419 delaymsec(400);
5420
5421 /* stop all the queues */
5422 for (i = 0; i < num_queues; i++) {
5423 ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
5424 ixl_wr(sc, I40E_QTX_ENA(i), 0);
5425 ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
5426 ixl_wr(sc, I40E_QRX_ENA(i), 0);
5427 }
5428
5429 /* short wait for all queue disables to settle */
5430 delaymsec(50);
5431 }
5432
5433 static int
5434 ixl_pf_reset(struct ixl_softc *sc)
5435 {
5436 uint32_t cnt = 0;
5437 uint32_t cnt1 = 0;
5438 uint32_t reg = 0, reg0 = 0;
5439 uint32_t grst_del;
5440
5441 /*
5442 * Poll for Global Reset steady state in case of recent GRST.
5443 * The grst delay value is in 100ms units, and we'll wait a
5444 * couple counts longer to be sure we don't just miss the end.
5445 */
5446 grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
5447 grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
5448 grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
5449
5450 grst_del = grst_del * 20;
5451
5452 for (cnt = 0; cnt < grst_del; cnt++) {
5453 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
5454 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
5455 break;
5456 delaymsec(100);
5457 }
5458 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5459 aprint_error(", Global reset polling failed to complete\n");
5460 return -1;
5461 }
5462
5463 /* Now Wait for the FW to be ready */
5464 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
5465 reg = ixl_rd(sc, I40E_GLNVM_ULD);
5466 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5467 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
5468 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5469 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
5470 break;
5471
5472 delaymsec(10);
5473 }
5474 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5475 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
5476 aprint_error(", wait for FW Reset complete timed out "
5477 "(I40E_GLNVM_ULD = 0x%x)\n", reg);
5478 return -1;
5479 }
5480
5481 /*
5482 * If there was a Global Reset in progress when we got here,
5483 * we don't need to do the PF Reset
5484 */
5485 if (cnt == 0) {
5486 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5487 ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
5488 for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
5489 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5490 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
5491 break;
5492 delaymsec(1);
5493
5494 reg0 = ixl_rd(sc, I40E_GLGEN_RSTAT);
5495 if (reg0 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5496 aprint_error(", Core reset upcoming."
5497 " Skipping PF reset reset request\n");
5498 return -1;
5499 }
5500 }
5501 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
5502 aprint_error(", PF reset polling failed to complete"
5503 "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
5504 return -1;
5505 }
5506 }
5507
5508 return 0;
5509 }
5510
5511 static int
5512 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
5513 bus_size_t size, bus_size_t align)
5514 {
5515 ixm->ixm_size = size;
5516
5517 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
5518 ixm->ixm_size, 0,
5519 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
5520 &ixm->ixm_map) != 0)
5521 return 1;
5522 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
5523 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
5524 BUS_DMA_WAITOK) != 0)
5525 goto destroy;
5526 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
5527 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
5528 goto free;
5529 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
5530 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
5531 goto unmap;
5532
5533 memset(ixm->ixm_kva, 0, ixm->ixm_size);
5534
5535 return 0;
5536 unmap:
5537 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5538 free:
5539 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5540 destroy:
5541 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5542 return 1;
5543 }
5544
5545 static void
5546 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
5547 {
5548 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
5549 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5550 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5551 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5552 }
5553
5554 static int
5555 ixl_setup_vlan_hwfilter(struct ixl_softc *sc)
5556 {
5557 struct ethercom *ec = &sc->sc_ec;
5558 struct vlanid_list *vlanidp;
5559 int rv;
5560
5561 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5562 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5563 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5564 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5565
5566 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5567 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5568 if (rv != 0)
5569 return rv;
5570 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5571 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5572 if (rv != 0)
5573 return rv;
5574
5575 ETHER_LOCK(ec);
5576 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5577 rv = ixl_add_macvlan(sc, sc->sc_enaddr,
5578 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5579 if (rv != 0)
5580 break;
5581 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
5582 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5583 if (rv != 0)
5584 break;
5585 }
5586 ETHER_UNLOCK(ec);
5587
5588 return rv;
5589 }
5590
5591 static void
5592 ixl_teardown_vlan_hwfilter(struct ixl_softc *sc)
5593 {
5594 struct vlanid_list *vlanidp;
5595 struct ethercom *ec = &sc->sc_ec;
5596
5597 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5598 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5599 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5600 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5601
5602 ETHER_LOCK(ec);
5603 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5604 ixl_remove_macvlan(sc, sc->sc_enaddr,
5605 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5606 ixl_remove_macvlan(sc, etherbroadcastaddr,
5607 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5608 }
5609 ETHER_UNLOCK(ec);
5610
5611 ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5612 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5613 ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5614 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5615 }
5616
5617 static int
5618 ixl_update_macvlan(struct ixl_softc *sc)
5619 {
5620 int rv = 0;
5621 int next_ec_capenable = sc->sc_ec.ec_capenable;
5622
5623 if (ISSET(next_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
5624 rv = ixl_setup_vlan_hwfilter(sc);
5625 if (rv != 0)
5626 ixl_teardown_vlan_hwfilter(sc);
5627 } else {
5628 ixl_teardown_vlan_hwfilter(sc);
5629 }
5630
5631 return rv;
5632 }
5633
5634 static int
5635 ixl_ifflags_cb(struct ethercom *ec)
5636 {
5637 struct ifnet *ifp = &ec->ec_if;
5638 struct ixl_softc *sc = ifp->if_softc;
5639 int rv, change;
5640
5641 mutex_enter(&sc->sc_cfg_lock);
5642
5643 change = ec->ec_capenable ^ sc->sc_cur_ec_capenable;
5644
5645 if (ISSET(change, ETHERCAP_VLAN_HWTAGGING)) {
5646 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
5647 rv = ENETRESET;
5648 goto out;
5649 }
5650
5651 if (ISSET(change, ETHERCAP_VLAN_HWFILTER)) {
5652 rv = ixl_update_macvlan(sc);
5653 if (rv == 0) {
5654 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
5655 } else {
5656 CLR(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER);
5657 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
5658 }
5659 }
5660
5661 rv = ixl_iff(sc);
5662 out:
5663 mutex_exit(&sc->sc_cfg_lock);
5664
5665 return rv;
5666 }
5667
5668 static int
5669 ixl_set_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
5670 {
5671 const struct ixl_aq_link_status *status;
5672 const struct ixl_phy_type *itype;
5673
5674 uint64_t ifm_active = IFM_ETHER;
5675 uint64_t ifm_status = IFM_AVALID;
5676 int link_state = LINK_STATE_DOWN;
5677 uint64_t baudrate = 0;
5678
5679 status = (const struct ixl_aq_link_status *)iaq->iaq_param;
5680 if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION)) {
5681 ifm_active |= IFM_NONE;
5682 goto done;
5683 }
5684
5685 ifm_active |= IFM_FDX;
5686 ifm_status |= IFM_ACTIVE;
5687 link_state = LINK_STATE_UP;
5688
5689 itype = ixl_search_phy_type(status->phy_type);
5690 if (itype != NULL)
5691 ifm_active |= itype->ifm_type;
5692
5693 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
5694 ifm_active |= IFM_ETH_TXPAUSE;
5695 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
5696 ifm_active |= IFM_ETH_RXPAUSE;
5697
5698 baudrate = ixl_search_link_speed(status->link_speed);
5699
5700 done:
5701 /* NET_ASSERT_LOCKED() except during attach */
5702 sc->sc_media_active = ifm_active;
5703 sc->sc_media_status = ifm_status;
5704
5705 sc->sc_ec.ec_if.if_baudrate = baudrate;
5706
5707 return link_state;
5708 }
5709
5710 static int
5711 ixl_establish_intx(struct ixl_softc *sc)
5712 {
5713 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5714 pci_intr_handle_t *intr;
5715 char xnamebuf[32];
5716 char intrbuf[PCI_INTRSTR_LEN];
5717 char const *intrstr;
5718
5719 KASSERT(sc->sc_nintrs == 1);
5720
5721 intr = &sc->sc_ihp[0];
5722
5723 intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
5724 snprintf(xnamebuf, sizeof(xnamebuf), "%s:legacy",
5725 device_xname(sc->sc_dev));
5726
5727 sc->sc_ihs[0] = pci_intr_establish_xname(pc, *intr, IPL_NET, ixl_intr,
5728 sc, xnamebuf);
5729
5730 if (sc->sc_ihs[0] == NULL) {
5731 aprint_error_dev(sc->sc_dev,
5732 "unable to establish interrupt at %s\n", intrstr);
5733 return -1;
5734 }
5735
5736 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5737 return 0;
5738 }
5739
5740 static int
5741 ixl_establish_msix(struct ixl_softc *sc)
5742 {
5743 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5744 kcpuset_t *affinity;
5745 unsigned int vector = 0;
5746 unsigned int i;
5747 int affinity_to, r;
5748 char xnamebuf[32];
5749 char intrbuf[PCI_INTRSTR_LEN];
5750 char const *intrstr;
5751
5752 kcpuset_create(&affinity, false);
5753
5754 /* the "other" intr is mapped to vector 0 */
5755 vector = 0;
5756 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5757 intrbuf, sizeof(intrbuf));
5758 snprintf(xnamebuf, sizeof(xnamebuf), "%s others",
5759 device_xname(sc->sc_dev));
5760 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5761 sc->sc_ihp[vector], IPL_NET, ixl_other_intr,
5762 sc, xnamebuf);
5763 if (sc->sc_ihs[vector] == NULL) {
5764 aprint_error_dev(sc->sc_dev,
5765 "unable to establish interrupt at %s\n", intrstr);
5766 goto fail;
5767 }
5768
5769 aprint_normal_dev(sc->sc_dev, "other interrupt at %s", intrstr);
5770
5771 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0;
5772 affinity_to = (affinity_to + sc->sc_nqueue_pairs_max) % ncpu;
5773
5774 kcpuset_zero(affinity);
5775 kcpuset_set(affinity, affinity_to);
5776 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5777 if (r == 0) {
5778 aprint_normal(", affinity to %u", affinity_to);
5779 }
5780 aprint_normal("\n");
5781 vector++;
5782
5783 sc->sc_msix_vector_queue = vector;
5784 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0;
5785
5786 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5787 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5788 intrbuf, sizeof(intrbuf));
5789 snprintf(xnamebuf, sizeof(xnamebuf), "%s TXRX%d",
5790 device_xname(sc->sc_dev), i);
5791
5792 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5793 sc->sc_ihp[vector], IPL_NET, ixl_queue_intr,
5794 (void *)&sc->sc_qps[i], xnamebuf);
5795
5796 if (sc->sc_ihs[vector] == NULL) {
5797 aprint_error_dev(sc->sc_dev,
5798 "unable to establish interrupt at %s\n", intrstr);
5799 goto fail;
5800 }
5801
5802 aprint_normal_dev(sc->sc_dev,
5803 "for TXRX%d interrupt at %s",i , intrstr);
5804
5805 kcpuset_zero(affinity);
5806 kcpuset_set(affinity, affinity_to);
5807 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5808 if (r == 0) {
5809 aprint_normal(", affinity to %u", affinity_to);
5810 affinity_to = (affinity_to + 1) % ncpu;
5811 }
5812 aprint_normal("\n");
5813 vector++;
5814 }
5815
5816 kcpuset_destroy(affinity);
5817
5818 return 0;
5819 fail:
5820 for (i = 0; i < vector; i++) {
5821 pci_intr_disestablish(pc, sc->sc_ihs[i]);
5822 }
5823
5824 sc->sc_msix_vector_queue = 0;
5825 sc->sc_msix_vector_queue = 0;
5826 kcpuset_destroy(affinity);
5827
5828 return -1;
5829 }
5830
5831 static void
5832 ixl_config_queue_intr(struct ixl_softc *sc)
5833 {
5834 unsigned int i, vector;
5835
5836 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) {
5837 vector = sc->sc_msix_vector_queue;
5838 } else {
5839 vector = I40E_INTR_NOTX_INTR;
5840
5841 ixl_wr(sc, I40E_PFINT_LNKLST0,
5842 (I40E_INTR_NOTX_QUEUE <<
5843 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5844 (I40E_QUEUE_TYPE_RX <<
5845 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5846 }
5847
5848 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
5849 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), 0);
5850 ixl_flush(sc);
5851
5852 ixl_wr(sc, I40E_PFINT_LNKLSTN(i),
5853 ((i) << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
5854 (I40E_QUEUE_TYPE_RX <<
5855 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5856
5857 ixl_wr(sc, I40E_QINT_RQCTL(i),
5858 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5859 (I40E_ITR_INDEX_RX <<
5860 I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
5861 (I40E_INTR_NOTX_RX_QUEUE <<
5862 I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
5863 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5864 (I40E_QUEUE_TYPE_TX <<
5865 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5866 I40E_QINT_RQCTL_CAUSE_ENA_MASK);
5867
5868 ixl_wr(sc, I40E_QINT_TQCTL(i),
5869 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
5870 (I40E_ITR_INDEX_TX <<
5871 I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
5872 (I40E_INTR_NOTX_TX_QUEUE <<
5873 I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
5874 (I40E_QUEUE_TYPE_EOL <<
5875 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
5876 (I40E_QUEUE_TYPE_RX <<
5877 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
5878 I40E_QINT_TQCTL_CAUSE_ENA_MASK);
5879
5880 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX)
5881 vector++;
5882 }
5883 ixl_flush(sc);
5884
5885 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_RX), 0x7a);
5886 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_TX), 0x7a);
5887 ixl_flush(sc);
5888 }
5889
5890 static void
5891 ixl_config_other_intr(struct ixl_softc *sc)
5892 {
5893 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5894 (void)ixl_rd(sc, I40E_PFINT_ICR0);
5895
5896 ixl_wr(sc, I40E_PFINT_ICR0_ENA,
5897 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
5898 I40E_PFINT_ICR0_ENA_GRST_MASK |
5899 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
5900 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
5901 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
5902 I40E_PFINT_ICR0_ENA_VFLR_MASK |
5903 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
5904 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
5905 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK);
5906
5907 ixl_wr(sc, I40E_PFINT_LNKLST0, 0x7FF);
5908 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_OTHER), 0);
5909 ixl_wr(sc, I40E_PFINT_STAT_CTL0,
5910 (I40E_ITR_INDEX_OTHER <<
5911 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT));
5912 ixl_flush(sc);
5913 }
5914
5915 static int
5916 ixl_setup_interrupts(struct ixl_softc *sc)
5917 {
5918 struct pci_attach_args *pa = &sc->sc_pa;
5919 pci_intr_type_t max_type, intr_type;
5920 int counts[PCI_INTR_TYPE_SIZE];
5921 int error;
5922 unsigned int i;
5923 bool retry;
5924
5925 memset(counts, 0, sizeof(counts));
5926 max_type = PCI_INTR_TYPE_MSIX;
5927 /* QPs + other interrupt */
5928 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueue_pairs_max + 1;
5929 counts[PCI_INTR_TYPE_INTX] = 1;
5930
5931 if (ixl_param_nomsix)
5932 counts[PCI_INTR_TYPE_MSIX] = 0;
5933
5934 do {
5935 retry = false;
5936 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, max_type);
5937 if (error != 0) {
5938 aprint_error_dev(sc->sc_dev,
5939 "couldn't map interrupt\n");
5940 break;
5941 }
5942
5943 intr_type = pci_intr_type(pa->pa_pc, sc->sc_ihp[0]);
5944 sc->sc_nintrs = counts[intr_type];
5945 KASSERT(sc->sc_nintrs > 0);
5946
5947 for (i = 0; i < sc->sc_nintrs; i++) {
5948 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[i],
5949 PCI_INTR_MPSAFE, true);
5950 }
5951
5952 sc->sc_ihs = kmem_alloc(sizeof(sc->sc_ihs[0]) * sc->sc_nintrs,
5953 KM_SLEEP);
5954
5955 if (intr_type == PCI_INTR_TYPE_MSIX) {
5956 error = ixl_establish_msix(sc);
5957 if (error) {
5958 counts[PCI_INTR_TYPE_MSIX] = 0;
5959 retry = true;
5960 }
5961 } else if (intr_type == PCI_INTR_TYPE_INTX) {
5962 error = ixl_establish_intx(sc);
5963 } else {
5964 error = -1;
5965 }
5966
5967 if (error) {
5968 kmem_free(sc->sc_ihs,
5969 sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5970 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5971 } else {
5972 sc->sc_intrtype = intr_type;
5973 }
5974 } while (retry);
5975
5976 return error;
5977 }
5978
5979 static void
5980 ixl_teardown_interrupts(struct ixl_softc *sc)
5981 {
5982 struct pci_attach_args *pa = &sc->sc_pa;
5983 unsigned int i;
5984
5985 for (i = 0; i < sc->sc_nintrs; i++) {
5986 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]);
5987 }
5988
5989 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5990
5991 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5992 sc->sc_ihs = NULL;
5993 sc->sc_nintrs = 0;
5994 }
5995
5996 static int
5997 ixl_setup_stats(struct ixl_softc *sc)
5998 {
5999 struct ixl_queue_pair *qp;
6000 struct ixl_tx_ring *txr;
6001 struct ixl_rx_ring *rxr;
6002 struct ixl_stats_counters *isc;
6003 unsigned int i;
6004
6005 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
6006 qp = &sc->sc_qps[i];
6007 txr = qp->qp_txr;
6008 rxr = qp->qp_rxr;
6009
6010 evcnt_attach_dynamic(&txr->txr_defragged, EVCNT_TYPE_MISC,
6011 NULL, qp->qp_name, "m_defrag successed");
6012 evcnt_attach_dynamic(&txr->txr_defrag_failed, EVCNT_TYPE_MISC,
6013 NULL, qp->qp_name, "m_defrag_failed");
6014 evcnt_attach_dynamic(&txr->txr_pcqdrop, EVCNT_TYPE_MISC,
6015 NULL, qp->qp_name, "Dropped in pcq");
6016 evcnt_attach_dynamic(&txr->txr_transmitdef, EVCNT_TYPE_MISC,
6017 NULL, qp->qp_name, "Deferred transmit");
6018 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR,
6019 NULL, qp->qp_name, "Interrupt on queue");
6020 evcnt_attach_dynamic(&txr->txr_defer, EVCNT_TYPE_MISC,
6021 NULL, qp->qp_name, "Handled queue in softint/workqueue");
6022
6023 evcnt_attach_dynamic(&rxr->rxr_mgethdr_failed, EVCNT_TYPE_MISC,
6024 NULL, qp->qp_name, "MGETHDR failed");
6025 evcnt_attach_dynamic(&rxr->rxr_mgetcl_failed, EVCNT_TYPE_MISC,
6026 NULL, qp->qp_name, "MCLGET failed");
6027 evcnt_attach_dynamic(&rxr->rxr_mbuf_load_failed,
6028 EVCNT_TYPE_MISC, NULL, qp->qp_name,
6029 "bus_dmamap_load_mbuf failed");
6030 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR,
6031 NULL, qp->qp_name, "Interrupt on queue");
6032 evcnt_attach_dynamic(&rxr->rxr_defer, EVCNT_TYPE_MISC,
6033 NULL, qp->qp_name, "Handled queue in softint/workqueue");
6034 }
6035
6036 evcnt_attach_dynamic(&sc->sc_event_atq, EVCNT_TYPE_INTR,
6037 NULL, device_xname(sc->sc_dev), "Interrupt for other events");
6038 evcnt_attach_dynamic(&sc->sc_event_link, EVCNT_TYPE_MISC,
6039 NULL, device_xname(sc->sc_dev), "Link status event");
6040 evcnt_attach_dynamic(&sc->sc_event_ecc_err, EVCNT_TYPE_MISC,
6041 NULL, device_xname(sc->sc_dev), "ECC error");
6042 evcnt_attach_dynamic(&sc->sc_event_pci_exception, EVCNT_TYPE_MISC,
6043 NULL, device_xname(sc->sc_dev), "PCI exception");
6044 evcnt_attach_dynamic(&sc->sc_event_crit_err, EVCNT_TYPE_MISC,
6045 NULL, device_xname(sc->sc_dev), "Critical error");
6046
6047 isc = &sc->sc_stats_counters;
6048 evcnt_attach_dynamic(&isc->isc_crc_errors, EVCNT_TYPE_MISC,
6049 NULL, device_xname(sc->sc_dev), "CRC errors");
6050 evcnt_attach_dynamic(&isc->isc_illegal_bytes, EVCNT_TYPE_MISC,
6051 NULL, device_xname(sc->sc_dev), "Illegal bytes");
6052 evcnt_attach_dynamic(&isc->isc_mac_local_faults, EVCNT_TYPE_MISC,
6053 NULL, device_xname(sc->sc_dev), "Mac local faults");
6054 evcnt_attach_dynamic(&isc->isc_mac_remote_faults, EVCNT_TYPE_MISC,
6055 NULL, device_xname(sc->sc_dev), "Mac remote faults");
6056 evcnt_attach_dynamic(&isc->isc_link_xon_rx, EVCNT_TYPE_MISC,
6057 NULL, device_xname(sc->sc_dev), "Rx xon");
6058 evcnt_attach_dynamic(&isc->isc_link_xon_tx, EVCNT_TYPE_MISC,
6059 NULL, device_xname(sc->sc_dev), "Tx xon");
6060 evcnt_attach_dynamic(&isc->isc_link_xoff_rx, EVCNT_TYPE_MISC,
6061 NULL, device_xname(sc->sc_dev), "Rx xoff");
6062 evcnt_attach_dynamic(&isc->isc_link_xoff_tx, EVCNT_TYPE_MISC,
6063 NULL, device_xname(sc->sc_dev), "Tx xoff");
6064 evcnt_attach_dynamic(&isc->isc_rx_fragments, EVCNT_TYPE_MISC,
6065 NULL, device_xname(sc->sc_dev), "Rx fragments");
6066 evcnt_attach_dynamic(&isc->isc_rx_jabber, EVCNT_TYPE_MISC,
6067 NULL, device_xname(sc->sc_dev), "Rx jabber");
6068
6069 evcnt_attach_dynamic(&isc->isc_rx_size_64, EVCNT_TYPE_MISC,
6070 NULL, device_xname(sc->sc_dev), "Rx size 64");
6071 evcnt_attach_dynamic(&isc->isc_rx_size_127, EVCNT_TYPE_MISC,
6072 NULL, device_xname(sc->sc_dev), "Rx size 127");
6073 evcnt_attach_dynamic(&isc->isc_rx_size_255, EVCNT_TYPE_MISC,
6074 NULL, device_xname(sc->sc_dev), "Rx size 255");
6075 evcnt_attach_dynamic(&isc->isc_rx_size_511, EVCNT_TYPE_MISC,
6076 NULL, device_xname(sc->sc_dev), "Rx size 511");
6077 evcnt_attach_dynamic(&isc->isc_rx_size_1023, EVCNT_TYPE_MISC,
6078 NULL, device_xname(sc->sc_dev), "Rx size 1023");
6079 evcnt_attach_dynamic(&isc->isc_rx_size_1522, EVCNT_TYPE_MISC,
6080 NULL, device_xname(sc->sc_dev), "Rx size 1522");
6081 evcnt_attach_dynamic(&isc->isc_rx_size_big, EVCNT_TYPE_MISC,
6082 NULL, device_xname(sc->sc_dev), "Rx jumbo packets");
6083 evcnt_attach_dynamic(&isc->isc_rx_undersize, EVCNT_TYPE_MISC,
6084 NULL, device_xname(sc->sc_dev), "Rx under size");
6085 evcnt_attach_dynamic(&isc->isc_rx_oversize, EVCNT_TYPE_MISC,
6086 NULL, device_xname(sc->sc_dev), "Rx over size");
6087
6088 evcnt_attach_dynamic(&isc->isc_rx_bytes, EVCNT_TYPE_MISC,
6089 NULL, device_xname(sc->sc_dev), "Rx bytes / port");
6090 evcnt_attach_dynamic(&isc->isc_rx_discards, EVCNT_TYPE_MISC,
6091 NULL, device_xname(sc->sc_dev), "Rx discards / port");
6092 evcnt_attach_dynamic(&isc->isc_rx_unicast, EVCNT_TYPE_MISC,
6093 NULL, device_xname(sc->sc_dev), "Rx unicast / port");
6094 evcnt_attach_dynamic(&isc->isc_rx_multicast, EVCNT_TYPE_MISC,
6095 NULL, device_xname(sc->sc_dev), "Rx multicast / port");
6096 evcnt_attach_dynamic(&isc->isc_rx_broadcast, EVCNT_TYPE_MISC,
6097 NULL, device_xname(sc->sc_dev), "Rx broadcast / port");
6098
6099 evcnt_attach_dynamic(&isc->isc_vsi_rx_bytes, EVCNT_TYPE_MISC,
6100 NULL, device_xname(sc->sc_dev), "Rx bytes / vsi");
6101 evcnt_attach_dynamic(&isc->isc_vsi_rx_discards, EVCNT_TYPE_MISC,
6102 NULL, device_xname(sc->sc_dev), "Rx discard / vsi");
6103 evcnt_attach_dynamic(&isc->isc_vsi_rx_unicast, EVCNT_TYPE_MISC,
6104 NULL, device_xname(sc->sc_dev), "Rx unicast / vsi");
6105 evcnt_attach_dynamic(&isc->isc_vsi_rx_multicast, EVCNT_TYPE_MISC,
6106 NULL, device_xname(sc->sc_dev), "Rx multicast / vsi");
6107 evcnt_attach_dynamic(&isc->isc_vsi_rx_broadcast, EVCNT_TYPE_MISC,
6108 NULL, device_xname(sc->sc_dev), "Rx broadcast / vsi");
6109
6110 evcnt_attach_dynamic(&isc->isc_tx_size_64, EVCNT_TYPE_MISC,
6111 NULL, device_xname(sc->sc_dev), "Tx size 64");
6112 evcnt_attach_dynamic(&isc->isc_tx_size_127, EVCNT_TYPE_MISC,
6113 NULL, device_xname(sc->sc_dev), "Tx size 127");
6114 evcnt_attach_dynamic(&isc->isc_tx_size_255, EVCNT_TYPE_MISC,
6115 NULL, device_xname(sc->sc_dev), "Tx size 255");
6116 evcnt_attach_dynamic(&isc->isc_tx_size_511, EVCNT_TYPE_MISC,
6117 NULL, device_xname(sc->sc_dev), "Tx size 511");
6118 evcnt_attach_dynamic(&isc->isc_tx_size_1023, EVCNT_TYPE_MISC,
6119 NULL, device_xname(sc->sc_dev), "Tx size 1023");
6120 evcnt_attach_dynamic(&isc->isc_tx_size_1522, EVCNT_TYPE_MISC,
6121 NULL, device_xname(sc->sc_dev), "Tx size 1522");
6122 evcnt_attach_dynamic(&isc->isc_tx_size_big, EVCNT_TYPE_MISC,
6123 NULL, device_xname(sc->sc_dev), "Tx jumbo packets");
6124
6125 evcnt_attach_dynamic(&isc->isc_tx_bytes, EVCNT_TYPE_MISC,
6126 NULL, device_xname(sc->sc_dev), "Tx bytes / port");
6127 evcnt_attach_dynamic(&isc->isc_tx_dropped_link_down, EVCNT_TYPE_MISC,
6128 NULL, device_xname(sc->sc_dev),
6129 "Tx dropped due to link down / port");
6130 evcnt_attach_dynamic(&isc->isc_tx_unicast, EVCNT_TYPE_MISC,
6131 NULL, device_xname(sc->sc_dev), "Tx unicast / port");
6132 evcnt_attach_dynamic(&isc->isc_tx_multicast, EVCNT_TYPE_MISC,
6133 NULL, device_xname(sc->sc_dev), "Tx multicast / port");
6134 evcnt_attach_dynamic(&isc->isc_tx_broadcast, EVCNT_TYPE_MISC,
6135 NULL, device_xname(sc->sc_dev), "Tx broadcast / port");
6136
6137 evcnt_attach_dynamic(&isc->isc_vsi_tx_bytes, EVCNT_TYPE_MISC,
6138 NULL, device_xname(sc->sc_dev), "Tx bytes / vsi");
6139 evcnt_attach_dynamic(&isc->isc_vsi_tx_errors, EVCNT_TYPE_MISC,
6140 NULL, device_xname(sc->sc_dev), "Tx errors / vsi");
6141 evcnt_attach_dynamic(&isc->isc_vsi_tx_unicast, EVCNT_TYPE_MISC,
6142 NULL, device_xname(sc->sc_dev), "Tx unicast / vsi");
6143 evcnt_attach_dynamic(&isc->isc_vsi_tx_multicast, EVCNT_TYPE_MISC,
6144 NULL, device_xname(sc->sc_dev), "Tx multicast / vsi");
6145 evcnt_attach_dynamic(&isc->isc_vsi_tx_broadcast, EVCNT_TYPE_MISC,
6146 NULL, device_xname(sc->sc_dev), "Tx broadcast / vsi");
6147
6148 sc->sc_stats_intval = ixl_param_stats_interval;
6149 callout_init(&sc->sc_stats_callout, CALLOUT_MPSAFE);
6150 callout_setfunc(&sc->sc_stats_callout, ixl_stats_callout, sc);
6151 ixl_work_set(&sc->sc_stats_task, ixl_stats_update, sc);
6152
6153 return 0;
6154 }
6155
6156 static void
6157 ixl_teardown_stats(struct ixl_softc *sc)
6158 {
6159 struct ixl_tx_ring *txr;
6160 struct ixl_rx_ring *rxr;
6161 struct ixl_stats_counters *isc;
6162 unsigned int i;
6163
6164 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
6165 txr = sc->sc_qps[i].qp_txr;
6166 rxr = sc->sc_qps[i].qp_rxr;
6167
6168 evcnt_detach(&txr->txr_defragged);
6169 evcnt_detach(&txr->txr_defrag_failed);
6170 evcnt_detach(&txr->txr_pcqdrop);
6171 evcnt_detach(&txr->txr_transmitdef);
6172 evcnt_detach(&txr->txr_intr);
6173 evcnt_detach(&txr->txr_defer);
6174
6175 evcnt_detach(&rxr->rxr_mgethdr_failed);
6176 evcnt_detach(&rxr->rxr_mgetcl_failed);
6177 evcnt_detach(&rxr->rxr_mbuf_load_failed);
6178 evcnt_detach(&rxr->rxr_intr);
6179 evcnt_detach(&rxr->rxr_defer);
6180 }
6181
6182 isc = &sc->sc_stats_counters;
6183 evcnt_detach(&isc->isc_crc_errors);
6184 evcnt_detach(&isc->isc_illegal_bytes);
6185 evcnt_detach(&isc->isc_mac_local_faults);
6186 evcnt_detach(&isc->isc_mac_remote_faults);
6187 evcnt_detach(&isc->isc_link_xon_rx);
6188 evcnt_detach(&isc->isc_link_xon_tx);
6189 evcnt_detach(&isc->isc_link_xoff_rx);
6190 evcnt_detach(&isc->isc_link_xoff_tx);
6191 evcnt_detach(&isc->isc_rx_fragments);
6192 evcnt_detach(&isc->isc_rx_jabber);
6193 evcnt_detach(&isc->isc_rx_bytes);
6194 evcnt_detach(&isc->isc_rx_discards);
6195 evcnt_detach(&isc->isc_rx_unicast);
6196 evcnt_detach(&isc->isc_rx_multicast);
6197 evcnt_detach(&isc->isc_rx_broadcast);
6198 evcnt_detach(&isc->isc_rx_size_64);
6199 evcnt_detach(&isc->isc_rx_size_127);
6200 evcnt_detach(&isc->isc_rx_size_255);
6201 evcnt_detach(&isc->isc_rx_size_511);
6202 evcnt_detach(&isc->isc_rx_size_1023);
6203 evcnt_detach(&isc->isc_rx_size_1522);
6204 evcnt_detach(&isc->isc_rx_size_big);
6205 evcnt_detach(&isc->isc_rx_undersize);
6206 evcnt_detach(&isc->isc_rx_oversize);
6207 evcnt_detach(&isc->isc_tx_bytes);
6208 evcnt_detach(&isc->isc_tx_dropped_link_down);
6209 evcnt_detach(&isc->isc_tx_unicast);
6210 evcnt_detach(&isc->isc_tx_multicast);
6211 evcnt_detach(&isc->isc_tx_broadcast);
6212 evcnt_detach(&isc->isc_tx_size_64);
6213 evcnt_detach(&isc->isc_tx_size_127);
6214 evcnt_detach(&isc->isc_tx_size_255);
6215 evcnt_detach(&isc->isc_tx_size_511);
6216 evcnt_detach(&isc->isc_tx_size_1023);
6217 evcnt_detach(&isc->isc_tx_size_1522);
6218 evcnt_detach(&isc->isc_tx_size_big);
6219 evcnt_detach(&isc->isc_vsi_rx_discards);
6220 evcnt_detach(&isc->isc_vsi_rx_bytes);
6221 evcnt_detach(&isc->isc_vsi_rx_unicast);
6222 evcnt_detach(&isc->isc_vsi_rx_multicast);
6223 evcnt_detach(&isc->isc_vsi_rx_broadcast);
6224 evcnt_detach(&isc->isc_vsi_tx_errors);
6225 evcnt_detach(&isc->isc_vsi_tx_bytes);
6226 evcnt_detach(&isc->isc_vsi_tx_unicast);
6227 evcnt_detach(&isc->isc_vsi_tx_multicast);
6228 evcnt_detach(&isc->isc_vsi_tx_broadcast);
6229
6230 evcnt_detach(&sc->sc_event_atq);
6231 evcnt_detach(&sc->sc_event_link);
6232 evcnt_detach(&sc->sc_event_ecc_err);
6233 evcnt_detach(&sc->sc_event_pci_exception);
6234 evcnt_detach(&sc->sc_event_crit_err);
6235
6236 callout_destroy(&sc->sc_stats_callout);
6237 }
6238
6239 static void
6240 ixl_stats_callout(void *xsc)
6241 {
6242 struct ixl_softc *sc = xsc;
6243
6244 ixl_work_add(sc->sc_workq, &sc->sc_stats_task);
6245 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
6246 }
6247
6248 static uint64_t
6249 ixl_stat_delta(struct ixl_softc *sc, uint32_t reg_hi, uint32_t reg_lo,
6250 uint64_t *offset, bool has_offset)
6251 {
6252 uint64_t value, delta;
6253 int bitwidth;
6254
6255 bitwidth = reg_hi == 0 ? 32 : 48;
6256
6257 value = ixl_rd(sc, reg_lo);
6258
6259 if (bitwidth > 32) {
6260 value |= ((uint64_t)ixl_rd(sc, reg_hi) << 32);
6261 }
6262
6263 if (__predict_true(has_offset)) {
6264 delta = value;
6265 if (value < *offset)
6266 delta += ((uint64_t)1 << bitwidth);
6267 delta -= *offset;
6268 } else {
6269 delta = 0;
6270 }
6271 atomic_swap_64(offset, value);
6272
6273 return delta;
6274 }
6275
6276 static void
6277 ixl_stats_update(void *xsc)
6278 {
6279 struct ixl_softc *sc = xsc;
6280 struct ixl_stats_counters *isc;
6281 uint64_t delta;
6282
6283 isc = &sc->sc_stats_counters;
6284
6285 /* errors */
6286 delta = ixl_stat_delta(sc,
6287 0, I40E_GLPRT_CRCERRS(sc->sc_port),
6288 &isc->isc_crc_errors_offset, isc->isc_has_offset);
6289 atomic_add_64(&isc->isc_crc_errors.ev_count, delta);
6290
6291 delta = ixl_stat_delta(sc,
6292 0, I40E_GLPRT_ILLERRC(sc->sc_port),
6293 &isc->isc_illegal_bytes_offset, isc->isc_has_offset);
6294 atomic_add_64(&isc->isc_illegal_bytes.ev_count, delta);
6295
6296 /* rx */
6297 delta = ixl_stat_delta(sc,
6298 I40E_GLPRT_GORCH(sc->sc_port), I40E_GLPRT_GORCL(sc->sc_port),
6299 &isc->isc_rx_bytes_offset, isc->isc_has_offset);
6300 atomic_add_64(&isc->isc_rx_bytes.ev_count, delta);
6301
6302 delta = ixl_stat_delta(sc,
6303 0, I40E_GLPRT_RDPC(sc->sc_port),
6304 &isc->isc_rx_discards_offset, isc->isc_has_offset);
6305 atomic_add_64(&isc->isc_rx_discards.ev_count, delta);
6306
6307 delta = ixl_stat_delta(sc,
6308 I40E_GLPRT_UPRCH(sc->sc_port), I40E_GLPRT_UPRCL(sc->sc_port),
6309 &isc->isc_rx_unicast_offset, isc->isc_has_offset);
6310 atomic_add_64(&isc->isc_rx_unicast.ev_count, delta);
6311
6312 delta = ixl_stat_delta(sc,
6313 I40E_GLPRT_MPRCH(sc->sc_port), I40E_GLPRT_MPRCL(sc->sc_port),
6314 &isc->isc_rx_multicast_offset, isc->isc_has_offset);
6315 atomic_add_64(&isc->isc_rx_multicast.ev_count, delta);
6316
6317 delta = ixl_stat_delta(sc,
6318 I40E_GLPRT_BPRCH(sc->sc_port), I40E_GLPRT_BPRCL(sc->sc_port),
6319 &isc->isc_rx_broadcast_offset, isc->isc_has_offset);
6320 atomic_add_64(&isc->isc_rx_broadcast.ev_count, delta);
6321
6322 /* Packet size stats rx */
6323 delta = ixl_stat_delta(sc,
6324 I40E_GLPRT_PRC64H(sc->sc_port), I40E_GLPRT_PRC64L(sc->sc_port),
6325 &isc->isc_rx_size_64_offset, isc->isc_has_offset);
6326 atomic_add_64(&isc->isc_rx_size_64.ev_count, delta);
6327
6328 delta = ixl_stat_delta(sc,
6329 I40E_GLPRT_PRC127H(sc->sc_port), I40E_GLPRT_PRC127L(sc->sc_port),
6330 &isc->isc_rx_size_127_offset, isc->isc_has_offset);
6331 atomic_add_64(&isc->isc_rx_size_127.ev_count, delta);
6332
6333 delta = ixl_stat_delta(sc,
6334 I40E_GLPRT_PRC255H(sc->sc_port), I40E_GLPRT_PRC255L(sc->sc_port),
6335 &isc->isc_rx_size_255_offset, isc->isc_has_offset);
6336 atomic_add_64(&isc->isc_rx_size_255.ev_count, delta);
6337
6338 delta = ixl_stat_delta(sc,
6339 I40E_GLPRT_PRC511H(sc->sc_port), I40E_GLPRT_PRC511L(sc->sc_port),
6340 &isc->isc_rx_size_511_offset, isc->isc_has_offset);
6341 atomic_add_64(&isc->isc_rx_size_511.ev_count, delta);
6342
6343 delta = ixl_stat_delta(sc,
6344 I40E_GLPRT_PRC1023H(sc->sc_port), I40E_GLPRT_PRC1023L(sc->sc_port),
6345 &isc->isc_rx_size_1023_offset, isc->isc_has_offset);
6346 atomic_add_64(&isc->isc_rx_size_1023.ev_count, delta);
6347
6348 delta = ixl_stat_delta(sc,
6349 I40E_GLPRT_PRC1522H(sc->sc_port), I40E_GLPRT_PRC1522L(sc->sc_port),
6350 &isc->isc_rx_size_1522_offset, isc->isc_has_offset);
6351 atomic_add_64(&isc->isc_rx_size_1522.ev_count, delta);
6352
6353 delta = ixl_stat_delta(sc,
6354 I40E_GLPRT_PRC9522H(sc->sc_port), I40E_GLPRT_PRC9522L(sc->sc_port),
6355 &isc->isc_rx_size_big_offset, isc->isc_has_offset);
6356 atomic_add_64(&isc->isc_rx_size_big.ev_count, delta);
6357
6358 delta = ixl_stat_delta(sc,
6359 0, I40E_GLPRT_RUC(sc->sc_port),
6360 &isc->isc_rx_undersize_offset, isc->isc_has_offset);
6361 atomic_add_64(&isc->isc_rx_undersize.ev_count, delta);
6362
6363 delta = ixl_stat_delta(sc,
6364 0, I40E_GLPRT_ROC(sc->sc_port),
6365 &isc->isc_rx_oversize_offset, isc->isc_has_offset);
6366 atomic_add_64(&isc->isc_rx_oversize.ev_count, delta);
6367
6368 /* tx */
6369 delta = ixl_stat_delta(sc,
6370 I40E_GLPRT_GOTCH(sc->sc_port), I40E_GLPRT_GOTCL(sc->sc_port),
6371 &isc->isc_tx_bytes_offset, isc->isc_has_offset);
6372 atomic_add_64(&isc->isc_tx_bytes.ev_count, delta);
6373
6374 delta = ixl_stat_delta(sc,
6375 0, I40E_GLPRT_TDOLD(sc->sc_port),
6376 &isc->isc_tx_dropped_link_down_offset, isc->isc_has_offset);
6377 atomic_add_64(&isc->isc_tx_dropped_link_down.ev_count, delta);
6378
6379 delta = ixl_stat_delta(sc,
6380 I40E_GLPRT_UPTCH(sc->sc_port), I40E_GLPRT_UPTCL(sc->sc_port),
6381 &isc->isc_tx_unicast_offset, isc->isc_has_offset);
6382 atomic_add_64(&isc->isc_tx_unicast.ev_count, delta);
6383
6384 delta = ixl_stat_delta(sc,
6385 I40E_GLPRT_MPTCH(sc->sc_port), I40E_GLPRT_MPTCL(sc->sc_port),
6386 &isc->isc_tx_multicast_offset, isc->isc_has_offset);
6387 atomic_add_64(&isc->isc_tx_multicast.ev_count, delta);
6388
6389 delta = ixl_stat_delta(sc,
6390 I40E_GLPRT_BPTCH(sc->sc_port), I40E_GLPRT_BPTCL(sc->sc_port),
6391 &isc->isc_tx_broadcast_offset, isc->isc_has_offset);
6392 atomic_add_64(&isc->isc_tx_broadcast.ev_count, delta);
6393
6394 /* Packet size stats tx */
6395 delta = ixl_stat_delta(sc,
6396 I40E_GLPRT_PTC64L(sc->sc_port), I40E_GLPRT_PTC64L(sc->sc_port),
6397 &isc->isc_tx_size_64_offset, isc->isc_has_offset);
6398 atomic_add_64(&isc->isc_tx_size_64.ev_count, delta);
6399
6400 delta = ixl_stat_delta(sc,
6401 I40E_GLPRT_PTC127H(sc->sc_port), I40E_GLPRT_PTC127L(sc->sc_port),
6402 &isc->isc_tx_size_127_offset, isc->isc_has_offset);
6403 atomic_add_64(&isc->isc_tx_size_127.ev_count, delta);
6404
6405 delta = ixl_stat_delta(sc,
6406 I40E_GLPRT_PTC255H(sc->sc_port), I40E_GLPRT_PTC255L(sc->sc_port),
6407 &isc->isc_tx_size_255_offset, isc->isc_has_offset);
6408 atomic_add_64(&isc->isc_tx_size_255.ev_count, delta);
6409
6410 delta = ixl_stat_delta(sc,
6411 I40E_GLPRT_PTC511H(sc->sc_port), I40E_GLPRT_PTC511L(sc->sc_port),
6412 &isc->isc_tx_size_511_offset, isc->isc_has_offset);
6413 atomic_add_64(&isc->isc_tx_size_511.ev_count, delta);
6414
6415 delta = ixl_stat_delta(sc,
6416 I40E_GLPRT_PTC1023H(sc->sc_port), I40E_GLPRT_PTC1023L(sc->sc_port),
6417 &isc->isc_tx_size_1023_offset, isc->isc_has_offset);
6418 atomic_add_64(&isc->isc_tx_size_1023.ev_count, delta);
6419
6420 delta = ixl_stat_delta(sc,
6421 I40E_GLPRT_PTC1522H(sc->sc_port), I40E_GLPRT_PTC1522L(sc->sc_port),
6422 &isc->isc_tx_size_1522_offset, isc->isc_has_offset);
6423 atomic_add_64(&isc->isc_tx_size_1522.ev_count, delta);
6424
6425 delta = ixl_stat_delta(sc,
6426 I40E_GLPRT_PTC9522H(sc->sc_port), I40E_GLPRT_PTC9522L(sc->sc_port),
6427 &isc->isc_tx_size_big_offset, isc->isc_has_offset);
6428 atomic_add_64(&isc->isc_tx_size_big.ev_count, delta);
6429
6430 /* mac faults */
6431 delta = ixl_stat_delta(sc,
6432 0, I40E_GLPRT_MLFC(sc->sc_port),
6433 &isc->isc_mac_local_faults_offset, isc->isc_has_offset);
6434 atomic_add_64(&isc->isc_mac_local_faults.ev_count, delta);
6435
6436 delta = ixl_stat_delta(sc,
6437 0, I40E_GLPRT_MRFC(sc->sc_port),
6438 &isc->isc_mac_remote_faults_offset, isc->isc_has_offset);
6439 atomic_add_64(&isc->isc_mac_remote_faults.ev_count, delta);
6440
6441 /* Flow control (LFC) stats */
6442 delta = ixl_stat_delta(sc,
6443 0, I40E_GLPRT_LXONRXC(sc->sc_port),
6444 &isc->isc_link_xon_rx_offset, isc->isc_has_offset);
6445 atomic_add_64(&isc->isc_link_xon_rx.ev_count, delta);
6446
6447 delta = ixl_stat_delta(sc,
6448 0, I40E_GLPRT_LXONTXC(sc->sc_port),
6449 &isc->isc_link_xon_tx_offset, isc->isc_has_offset);
6450 atomic_add_64(&isc->isc_link_xon_tx.ev_count, delta);
6451
6452 delta = ixl_stat_delta(sc,
6453 0, I40E_GLPRT_LXOFFRXC(sc->sc_port),
6454 &isc->isc_link_xoff_rx_offset, isc->isc_has_offset);
6455 atomic_add_64(&isc->isc_link_xoff_rx.ev_count, delta);
6456
6457 delta = ixl_stat_delta(sc,
6458 0, I40E_GLPRT_LXOFFTXC(sc->sc_port),
6459 &isc->isc_link_xoff_tx_offset, isc->isc_has_offset);
6460 atomic_add_64(&isc->isc_link_xoff_tx.ev_count, delta);
6461
6462 /* fragments */
6463 delta = ixl_stat_delta(sc,
6464 0, I40E_GLPRT_RFC(sc->sc_port),
6465 &isc->isc_rx_fragments_offset, isc->isc_has_offset);
6466 atomic_add_64(&isc->isc_rx_fragments.ev_count, delta);
6467
6468 delta = ixl_stat_delta(sc,
6469 0, I40E_GLPRT_RJC(sc->sc_port),
6470 &isc->isc_rx_jabber_offset, isc->isc_has_offset);
6471 atomic_add_64(&isc->isc_rx_jabber.ev_count, delta);
6472
6473 /* VSI rx counters */
6474 delta = ixl_stat_delta(sc,
6475 0, I40E_GLV_RDPC(sc->sc_vsi_stat_counter_idx),
6476 &isc->isc_vsi_rx_discards_offset, isc->isc_has_offset);
6477 atomic_add_64(&isc->isc_vsi_rx_discards.ev_count, delta);
6478
6479 delta = ixl_stat_delta(sc,
6480 I40E_GLV_GORCH(sc->sc_vsi_stat_counter_idx),
6481 I40E_GLV_GORCL(sc->sc_vsi_stat_counter_idx),
6482 &isc->isc_vsi_rx_bytes_offset, isc->isc_has_offset);
6483 atomic_add_64(&isc->isc_vsi_rx_bytes.ev_count, delta);
6484
6485 delta = ixl_stat_delta(sc,
6486 I40E_GLV_UPRCH(sc->sc_vsi_stat_counter_idx),
6487 I40E_GLV_UPRCL(sc->sc_vsi_stat_counter_idx),
6488 &isc->isc_vsi_rx_unicast_offset, isc->isc_has_offset);
6489 atomic_add_64(&isc->isc_vsi_rx_unicast.ev_count, delta);
6490
6491 delta = ixl_stat_delta(sc,
6492 I40E_GLV_MPRCH(sc->sc_vsi_stat_counter_idx),
6493 I40E_GLV_MPRCL(sc->sc_vsi_stat_counter_idx),
6494 &isc->isc_vsi_rx_multicast_offset, isc->isc_has_offset);
6495 atomic_add_64(&isc->isc_vsi_rx_multicast.ev_count, delta);
6496
6497 delta = ixl_stat_delta(sc,
6498 I40E_GLV_BPRCH(sc->sc_vsi_stat_counter_idx),
6499 I40E_GLV_BPRCL(sc->sc_vsi_stat_counter_idx),
6500 &isc->isc_vsi_rx_broadcast_offset, isc->isc_has_offset);
6501 atomic_add_64(&isc->isc_vsi_rx_broadcast.ev_count, delta);
6502
6503 /* VSI tx counters */
6504 delta = ixl_stat_delta(sc,
6505 0, I40E_GLV_TEPC(sc->sc_vsi_stat_counter_idx),
6506 &isc->isc_vsi_tx_errors_offset, isc->isc_has_offset);
6507 atomic_add_64(&isc->isc_vsi_tx_errors.ev_count, delta);
6508
6509 delta = ixl_stat_delta(sc,
6510 I40E_GLV_GOTCH(sc->sc_vsi_stat_counter_idx),
6511 I40E_GLV_GOTCL(sc->sc_vsi_stat_counter_idx),
6512 &isc->isc_vsi_tx_bytes_offset, isc->isc_has_offset);
6513 atomic_add_64(&isc->isc_vsi_tx_bytes.ev_count, delta);
6514
6515 delta = ixl_stat_delta(sc,
6516 I40E_GLV_UPTCH(sc->sc_vsi_stat_counter_idx),
6517 I40E_GLV_UPTCL(sc->sc_vsi_stat_counter_idx),
6518 &isc->isc_vsi_tx_unicast_offset, isc->isc_has_offset);
6519 atomic_add_64(&isc->isc_vsi_tx_unicast.ev_count, delta);
6520
6521 delta = ixl_stat_delta(sc,
6522 I40E_GLV_MPTCH(sc->sc_vsi_stat_counter_idx),
6523 I40E_GLV_MPTCL(sc->sc_vsi_stat_counter_idx),
6524 &isc->isc_vsi_tx_multicast_offset, isc->isc_has_offset);
6525 atomic_add_64(&isc->isc_vsi_tx_multicast.ev_count, delta);
6526
6527 delta = ixl_stat_delta(sc,
6528 I40E_GLV_BPTCH(sc->sc_vsi_stat_counter_idx),
6529 I40E_GLV_BPTCL(sc->sc_vsi_stat_counter_idx),
6530 &isc->isc_vsi_tx_broadcast_offset, isc->isc_has_offset);
6531 atomic_add_64(&isc->isc_vsi_tx_broadcast.ev_count, delta);
6532 }
6533
6534 static int
6535 ixl_setup_sysctls(struct ixl_softc *sc)
6536 {
6537 const char *devname;
6538 struct sysctllog **log;
6539 const struct sysctlnode *rnode, *rxnode, *txnode;
6540 int error;
6541
6542 log = &sc->sc_sysctllog;
6543 devname = device_xname(sc->sc_dev);
6544
6545 error = sysctl_createv(log, 0, NULL, &rnode,
6546 0, CTLTYPE_NODE, devname,
6547 SYSCTL_DESCR("ixl information and settings"),
6548 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
6549 if (error)
6550 goto out;
6551
6552 error = sysctl_createv(log, 0, &rnode, NULL,
6553 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
6554 SYSCTL_DESCR("Use workqueue for packet processing"),
6555 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
6556 if (error)
6557 goto out;
6558
6559 error = sysctl_createv(log, 0, &rnode, NULL,
6560 CTLFLAG_READONLY, CTLTYPE_INT, "stats_interval",
6561 SYSCTL_DESCR("Statistics collection interval in milliseconds"),
6562 NULL, 0, &sc->sc_stats_intval, 0, CTL_CREATE, CTL_EOL);
6563
6564 error = sysctl_createv(log, 0, &rnode, &rxnode,
6565 0, CTLTYPE_NODE, "rx",
6566 SYSCTL_DESCR("ixl information and settings for Rx"),
6567 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6568 if (error)
6569 goto out;
6570
6571 error = sysctl_createv(log, 0, &rxnode, NULL,
6572 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6573 SYSCTL_DESCR("max number of Rx packets"
6574 " to process for interrupt processing"),
6575 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6576 if (error)
6577 goto out;
6578
6579 error = sysctl_createv(log, 0, &rxnode, NULL,
6580 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6581 SYSCTL_DESCR("max number of Rx packets"
6582 " to process for deferred processing"),
6583 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
6584 if (error)
6585 goto out;
6586
6587 error = sysctl_createv(log, 0, &rnode, &txnode,
6588 0, CTLTYPE_NODE, "tx",
6589 SYSCTL_DESCR("ixl information and settings for Tx"),
6590 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6591 if (error)
6592 goto out;
6593
6594 error = sysctl_createv(log, 0, &txnode, NULL,
6595 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6596 SYSCTL_DESCR("max number of Tx packets"
6597 " to process for interrupt processing"),
6598 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6599 if (error)
6600 goto out;
6601
6602 error = sysctl_createv(log, 0, &txnode, NULL,
6603 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6604 SYSCTL_DESCR("max number of Tx packets"
6605 " to process for deferred processing"),
6606 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
6607 if (error)
6608 goto out;
6609
6610 out:
6611 if (error) {
6612 aprint_error_dev(sc->sc_dev,
6613 "unable to create sysctl node\n");
6614 sysctl_teardown(log);
6615 }
6616
6617 return error;
6618 }
6619
6620 static void
6621 ixl_teardown_sysctls(struct ixl_softc *sc)
6622 {
6623
6624 sysctl_teardown(&sc->sc_sysctllog);
6625 }
6626
6627 static struct workqueue *
6628 ixl_workq_create(const char *name, pri_t prio, int ipl, int flags)
6629 {
6630 struct workqueue *wq;
6631 int error;
6632
6633 error = workqueue_create(&wq, name, ixl_workq_work, NULL,
6634 prio, ipl, flags);
6635
6636 if (error)
6637 return NULL;
6638
6639 return wq;
6640 }
6641
6642 static void
6643 ixl_workq_destroy(struct workqueue *wq)
6644 {
6645
6646 workqueue_destroy(wq);
6647 }
6648
6649 static void
6650 ixl_work_set(struct ixl_work *work, void (*func)(void *), void *arg)
6651 {
6652
6653 memset(work, 0, sizeof(*work));
6654 work->ixw_func = func;
6655 work->ixw_arg = arg;
6656 }
6657
6658 static void
6659 ixl_work_add(struct workqueue *wq, struct ixl_work *work)
6660 {
6661 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0)
6662 return;
6663
6664 kpreempt_disable();
6665 workqueue_enqueue(wq, &work->ixw_cookie, NULL);
6666 kpreempt_enable();
6667 }
6668
6669 static void
6670 ixl_work_wait(struct workqueue *wq, struct ixl_work *work)
6671 {
6672
6673 workqueue_wait(wq, &work->ixw_cookie);
6674 }
6675
6676 static void
6677 ixl_workq_work(struct work *wk, void *context)
6678 {
6679 struct ixl_work *work;
6680
6681 work = container_of(wk, struct ixl_work, ixw_cookie);
6682
6683 atomic_swap_uint(&work->ixw_added, 0);
6684 work->ixw_func(work->ixw_arg);
6685 }
6686
6687 static int
6688 ixl_rx_ctl_read(struct ixl_softc *sc, uint32_t reg, uint32_t *rv)
6689 {
6690 struct ixl_aq_desc iaq;
6691
6692 memset(&iaq, 0, sizeof(iaq));
6693 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_READ);
6694 iaq.iaq_param[1] = htole32(reg);
6695
6696 if (ixl_atq_poll(sc, &iaq, 250) != 0)
6697 return ETIMEDOUT;
6698
6699 switch (htole16(iaq.iaq_retval)) {
6700 case IXL_AQ_RC_OK:
6701 /* success */
6702 break;
6703 case IXL_AQ_RC_EACCES:
6704 return EPERM;
6705 case IXL_AQ_RC_EAGAIN:
6706 return EAGAIN;
6707 default:
6708 return EIO;
6709 }
6710
6711 *rv = htole32(iaq.iaq_param[3]);
6712 return 0;
6713 }
6714
6715 static uint32_t
6716 ixl_rd_rx_csr(struct ixl_softc *sc, uint32_t reg)
6717 {
6718 uint32_t val;
6719 int rv, retry, retry_limit;
6720
6721 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) {
6722 retry_limit = 5;
6723 } else {
6724 retry_limit = 0;
6725 }
6726
6727 for (retry = 0; retry < retry_limit; retry++) {
6728 rv = ixl_rx_ctl_read(sc, reg, &val);
6729 if (rv == 0)
6730 return val;
6731 else if (rv == EAGAIN)
6732 delaymsec(1);
6733 else
6734 break;
6735 }
6736
6737 val = ixl_rd(sc, reg);
6738
6739 return val;
6740 }
6741
6742 static int
6743 ixl_rx_ctl_write(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6744 {
6745 struct ixl_aq_desc iaq;
6746
6747 memset(&iaq, 0, sizeof(iaq));
6748 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_WRITE);
6749 iaq.iaq_param[1] = htole32(reg);
6750 iaq.iaq_param[3] = htole32(value);
6751
6752 if (ixl_atq_poll(sc, &iaq, 250) != 0)
6753 return ETIMEDOUT;
6754
6755 switch (htole16(iaq.iaq_retval)) {
6756 case IXL_AQ_RC_OK:
6757 /* success */
6758 break;
6759 case IXL_AQ_RC_EACCES:
6760 return EPERM;
6761 case IXL_AQ_RC_EAGAIN:
6762 return EAGAIN;
6763 default:
6764 return EIO;
6765 }
6766
6767 return 0;
6768 }
6769
6770 static void
6771 ixl_wr_rx_csr(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6772 {
6773 int rv, retry, retry_limit;
6774
6775 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) {
6776 retry_limit = 5;
6777 } else {
6778 retry_limit = 0;
6779 }
6780
6781 for (retry = 0; retry < retry_limit; retry++) {
6782 rv = ixl_rx_ctl_write(sc, reg, value);
6783 if (rv == 0)
6784 return;
6785 else if (rv == EAGAIN)
6786 delaymsec(1);
6787 else
6788 break;
6789 }
6790
6791 ixl_wr(sc, reg, value);
6792 }
6793
6794 static int
6795 ixl_nvm_lock(struct ixl_softc *sc, char rw)
6796 {
6797 struct ixl_aq_desc iaq;
6798 struct ixl_aq_req_resource_param *param;
6799 int rv;
6800
6801 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK))
6802 return 0;
6803
6804 memset(&iaq, 0, sizeof(iaq));
6805 iaq.iaq_opcode = htole16(IXL_AQ_OP_REQUEST_RESOURCE);
6806
6807 param = (struct ixl_aq_req_resource_param *)&iaq.iaq_param;
6808 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM);
6809 if (rw == 'R') {
6810 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_READ);
6811 } else {
6812 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_WRITE);
6813 }
6814
6815 rv = ixl_atq_poll(sc, &iaq, 250);
6816
6817 if (rv != 0)
6818 return ETIMEDOUT;
6819
6820 switch (le16toh(iaq.iaq_retval)) {
6821 case IXL_AQ_RC_OK:
6822 break;
6823 case IXL_AQ_RC_EACCES:
6824 return EACCES;
6825 case IXL_AQ_RC_EBUSY:
6826 return EBUSY;
6827 case IXL_AQ_RC_EPERM:
6828 return EPERM;
6829 }
6830
6831 return 0;
6832 }
6833
6834 static int
6835 ixl_nvm_unlock(struct ixl_softc *sc)
6836 {
6837 struct ixl_aq_desc iaq;
6838 struct ixl_aq_rel_resource_param *param;
6839 int rv;
6840
6841 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK))
6842 return 0;
6843
6844 memset(&iaq, 0, sizeof(iaq));
6845 iaq.iaq_opcode = htole16(IXL_AQ_OP_RELEASE_RESOURCE);
6846
6847 param = (struct ixl_aq_rel_resource_param *)&iaq.iaq_param;
6848 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM);
6849
6850 rv = ixl_atq_poll(sc, &iaq, 250);
6851
6852 if (rv != 0)
6853 return ETIMEDOUT;
6854
6855 switch (le16toh(iaq.iaq_retval)) {
6856 case IXL_AQ_RC_OK:
6857 break;
6858 default:
6859 return EIO;
6860 }
6861 return 0;
6862 }
6863
6864 static int
6865 ixl_srdone_poll(struct ixl_softc *sc)
6866 {
6867 int wait_count;
6868 uint32_t reg;
6869
6870 for (wait_count = 0; wait_count < IXL_SRRD_SRCTL_ATTEMPTS;
6871 wait_count++) {
6872 reg = ixl_rd(sc, I40E_GLNVM_SRCTL);
6873 if (ISSET(reg, I40E_GLNVM_SRCTL_DONE_MASK))
6874 break;
6875
6876 delaymsec(5);
6877 }
6878
6879 if (wait_count == IXL_SRRD_SRCTL_ATTEMPTS)
6880 return -1;
6881
6882 return 0;
6883 }
6884
6885 static int
6886 ixl_nvm_read_srctl(struct ixl_softc *sc, uint16_t offset, uint16_t *data)
6887 {
6888 uint32_t reg;
6889
6890 if (ixl_srdone_poll(sc) != 0)
6891 return ETIMEDOUT;
6892
6893 reg = ((uint32_t)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
6894 __BIT(I40E_GLNVM_SRCTL_START_SHIFT);
6895 ixl_wr(sc, I40E_GLNVM_SRCTL, reg);
6896
6897 if (ixl_srdone_poll(sc) != 0) {
6898 aprint_debug("NVM read error: couldn't access "
6899 "Shadow RAM address: 0x%x\n", offset);
6900 return ETIMEDOUT;
6901 }
6902
6903 reg = ixl_rd(sc, I40E_GLNVM_SRDATA);
6904 *data = (uint16_t)__SHIFTOUT(reg, I40E_GLNVM_SRDATA_RDDATA_MASK);
6905
6906 return 0;
6907 }
6908
6909 static int
6910 ixl_nvm_read_aq(struct ixl_softc *sc, uint16_t offset_word,
6911 void *data, size_t len)
6912 {
6913 struct ixl_dmamem *idm;
6914 struct ixl_aq_desc iaq;
6915 struct ixl_aq_nvm_param *param;
6916 uint32_t offset_bytes;
6917 int rv;
6918
6919 idm = &sc->sc_aqbuf;
6920 if (len > IXL_DMA_LEN(idm))
6921 return ENOMEM;
6922
6923 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
6924 memset(&iaq, 0, sizeof(iaq));
6925 iaq.iaq_opcode = htole16(IXL_AQ_OP_NVM_READ);
6926 iaq.iaq_flags = htole16(IXL_AQ_BUF |
6927 ((len > I40E_AQ_LARGE_BUF) ? IXL_AQ_LB : 0));
6928 iaq.iaq_datalen = htole16(len);
6929 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
6930
6931 param = (struct ixl_aq_nvm_param *)iaq.iaq_param;
6932 param->command_flags = IXL_AQ_NVM_LAST_CMD;
6933 param->module_pointer = 0;
6934 param->length = htole16(len);
6935 offset_bytes = (uint32_t)offset_word * 2;
6936 offset_bytes &= 0x00FFFFFF;
6937 param->offset = htole32(offset_bytes);
6938
6939 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
6940 BUS_DMASYNC_PREREAD);
6941
6942 rv = ixl_atq_poll(sc, &iaq, 250);
6943
6944 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
6945 BUS_DMASYNC_POSTREAD);
6946
6947 if (rv != 0) {
6948 return ETIMEDOUT;
6949 }
6950
6951 switch (le16toh(iaq.iaq_retval)) {
6952 case IXL_AQ_RC_OK:
6953 break;
6954 case IXL_AQ_RC_EPERM:
6955 return EPERM;
6956 case IXL_AQ_RC_EINVAL:
6957 return EINVAL;
6958 case IXL_AQ_RC_EBUSY:
6959 return EBUSY;
6960 case IXL_AQ_RC_EIO:
6961 default:
6962 return EIO;
6963 }
6964
6965 memcpy(data, IXL_DMA_KVA(idm), len);
6966
6967 return 0;
6968 }
6969
6970 static int
6971 ixl_rd16_nvm(struct ixl_softc *sc, uint16_t offset, uint16_t *data)
6972 {
6973 int error;
6974 uint16_t buf;
6975
6976 error = ixl_nvm_lock(sc, 'R');
6977 if (error)
6978 return error;
6979
6980 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMREAD)) {
6981 error = ixl_nvm_read_aq(sc, offset,
6982 &buf, sizeof(buf));
6983 if (error == 0)
6984 *data = le16toh(buf);
6985 } else {
6986 error = ixl_nvm_read_srctl(sc, offset, &buf);
6987 if (error == 0)
6988 *data = buf;
6989 }
6990
6991 ixl_nvm_unlock(sc);
6992
6993 return error;
6994 }
6995
6996 MODULE(MODULE_CLASS_DRIVER, if_ixl, "pci");
6997
6998 #ifdef _MODULE
6999 #include "ioconf.c"
7000 #endif
7001
7002 #ifdef _MODULE
7003 static void
7004 ixl_parse_modprop(prop_dictionary_t dict)
7005 {
7006 prop_object_t obj;
7007 int64_t val;
7008 uint64_t uval;
7009
7010 if (dict == NULL)
7011 return;
7012
7013 obj = prop_dictionary_get(dict, "nomsix");
7014 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_BOOL) {
7015 ixl_param_nomsix = prop_bool_true((prop_bool_t)obj);
7016 }
7017
7018 obj = prop_dictionary_get(dict, "stats_interval");
7019 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7020 val = prop_number_integer_value((prop_number_t)obj);
7021
7022 /* the range has no reason */
7023 if (100 < val && val < 180000) {
7024 ixl_param_stats_interval = val;
7025 }
7026 }
7027
7028 obj = prop_dictionary_get(dict, "nqps_limit");
7029 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7030 val = prop_number_integer_value((prop_number_t)obj);
7031
7032 if (val <= INT32_MAX)
7033 ixl_param_nqps_limit = val;
7034 }
7035
7036 obj = prop_dictionary_get(dict, "rx_ndescs");
7037 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7038 uval = prop_number_unsigned_integer_value((prop_number_t)obj);
7039
7040 if (uval > 8)
7041 ixl_param_rx_ndescs = uval;
7042 }
7043
7044 obj = prop_dictionary_get(dict, "tx_ndescs");
7045 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7046 uval = prop_number_unsigned_integer_value((prop_number_t)obj);
7047
7048 if (uval > IXL_TX_PKT_DESCS)
7049 ixl_param_tx_ndescs = uval;
7050 }
7051
7052 }
7053 #endif
7054
7055 static int
7056 if_ixl_modcmd(modcmd_t cmd, void *opaque)
7057 {
7058 int error = 0;
7059
7060 #ifdef _MODULE
7061 switch (cmd) {
7062 case MODULE_CMD_INIT:
7063 ixl_parse_modprop((prop_dictionary_t)opaque);
7064 error = config_init_component(cfdriver_ioconf_if_ixl,
7065 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
7066 break;
7067 case MODULE_CMD_FINI:
7068 error = config_fini_component(cfdriver_ioconf_if_ixl,
7069 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
7070 break;
7071 default:
7072 error = ENOTTY;
7073 break;
7074 }
7075 #endif
7076
7077 return error;
7078 }
7079