if_ixl.c revision 1.53 1 /* $NetBSD: if_ixl.c,v 1.53 2020/02/25 07:53:55 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2016,2017 David Gwynne <dlg (at) openbsd.org>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50 /*
51 * Copyright (c) 2019 Internet Initiative Japan, Inc.
52 * All rights reserved.
53 *
54 * Redistribution and use in source and binary forms, with or without
55 * modification, are permitted provided that the following conditions
56 * are met:
57 * 1. Redistributions of source code must retain the above copyright
58 * notice, this list of conditions and the following disclaimer.
59 * 2. Redistributions in binary form must reproduce the above copyright
60 * notice, this list of conditions and the following disclaimer in the
61 * documentation and/or other materials provided with the distribution.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
73 * POSSIBILITY OF SUCH DAMAGE.
74 */
75
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: if_ixl.c,v 1.53 2020/02/25 07:53:55 yamaguchi Exp $");
78
79 #ifdef _KERNEL_OPT
80 #include "opt_net_mpsafe.h"
81 #include "opt_if_ixl.h"
82 #endif
83
84 #include <sys/param.h>
85 #include <sys/types.h>
86
87 #include <sys/cpu.h>
88 #include <sys/device.h>
89 #include <sys/evcnt.h>
90 #include <sys/interrupt.h>
91 #include <sys/kmem.h>
92 #include <sys/malloc.h>
93 #include <sys/module.h>
94 #include <sys/mutex.h>
95 #include <sys/pcq.h>
96 #include <sys/syslog.h>
97 #include <sys/workqueue.h>
98
99 #include <sys/bus.h>
100
101 #include <net/bpf.h>
102 #include <net/if.h>
103 #include <net/if_dl.h>
104 #include <net/if_media.h>
105 #include <net/if_ether.h>
106 #include <net/rss_config.h>
107
108 #include <netinet/tcp.h> /* for struct tcphdr */
109 #include <netinet/udp.h> /* for struct udphdr */
110
111 #include <dev/pci/pcivar.h>
112 #include <dev/pci/pcidevs.h>
113
114 #include <dev/pci/if_ixlreg.h>
115 #include <dev/pci/if_ixlvar.h>
116
117 #include <prop/proplib.h>
118
119 struct ixl_softc; /* defined */
120
121 #define I40E_PF_RESET_WAIT_COUNT 200
122 #define I40E_AQ_LARGE_BUF 512
123
124 /* bitfields for Tx queue mapping in QTX_CTL */
125 #define I40E_QTX_CTL_VF_QUEUE 0x0
126 #define I40E_QTX_CTL_VM_QUEUE 0x1
127 #define I40E_QTX_CTL_PF_QUEUE 0x2
128
129 #define I40E_QUEUE_TYPE_EOL 0x7ff
130 #define I40E_INTR_NOTX_QUEUE 0
131
132 #define I40E_QUEUE_TYPE_RX 0x0
133 #define I40E_QUEUE_TYPE_TX 0x1
134 #define I40E_QUEUE_TYPE_PE_CEQ 0x2
135 #define I40E_QUEUE_TYPE_UNKNOWN 0x3
136
137 #define I40E_ITR_INDEX_RX 0x0
138 #define I40E_ITR_INDEX_TX 0x1
139 #define I40E_ITR_INDEX_OTHER 0x2
140 #define I40E_ITR_INDEX_NONE 0x3
141
142 #define I40E_INTR_NOTX_QUEUE 0
143 #define I40E_INTR_NOTX_INTR 0
144 #define I40E_INTR_NOTX_RX_QUEUE 0
145 #define I40E_INTR_NOTX_TX_QUEUE 1
146 #define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK
147 #define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK
148
149 #define BIT_ULL(a) (1ULL << (a))
150 #define IXL_RSS_HENA_DEFAULT_BASE \
151 (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
152 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
153 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
154 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
155 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
156 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
157 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
158 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
159 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
160 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
161 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
162 #define IXL_RSS_HENA_DEFAULT_XL710 IXL_RSS_HENA_DEFAULT_BASE
163 #define IXL_RSS_HENA_DEFAULT_X722 (IXL_RSS_HENA_DEFAULT_XL710 | \
164 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
165 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
166 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
167 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
168 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
169 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
170 #define I40E_HASH_LUT_SIZE_128 0
171 #define IXL_RSS_KEY_SIZE_REG 13
172
173 #define IXL_ICR0_CRIT_ERR_MASK \
174 (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \
175 I40E_PFINT_ICR0_ECC_ERR_MASK | \
176 I40E_PFINT_ICR0_PE_CRITERR_MASK)
177
178 #define IXL_QUEUE_MAX_XL710 64
179 #define IXL_QUEUE_MAX_X722 128
180
181 #define IXL_TX_PKT_DESCS 8
182 #define IXL_TX_PKT_MAXSIZE (MCLBYTES * IXL_TX_PKT_DESCS)
183 #define IXL_TX_QUEUE_ALIGN 128
184 #define IXL_RX_QUEUE_ALIGN 128
185
186 #define IXL_MCLBYTES (MCLBYTES - ETHER_ALIGN)
187 #define IXL_MTU_ETHERLEN ETHER_HDR_LEN \
188 + ETHER_CRC_LEN
189 #if 0
190 #define IXL_MAX_MTU (9728 - IXL_MTU_ETHERLEN)
191 #else
192 /* (dbuff * 5) - ETHER_HDR_LEN - ETHER_CRC_LEN */
193 #define IXL_MAX_MTU (9600 - IXL_MTU_ETHERLEN)
194 #endif
195 #define IXL_MIN_MTU (ETHER_MIN_LEN - ETHER_CRC_LEN)
196
197 #define IXL_PCIREG PCI_MAPREG_START
198
199 #define IXL_ITR0 0x0
200 #define IXL_ITR1 0x1
201 #define IXL_ITR2 0x2
202 #define IXL_NOITR 0x3
203
204 #define IXL_AQ_NUM 256
205 #define IXL_AQ_MASK (IXL_AQ_NUM - 1)
206 #define IXL_AQ_ALIGN 64 /* lol */
207 #define IXL_AQ_BUFLEN 4096
208
209 #define IXL_HMC_ROUNDUP 512
210 #define IXL_HMC_PGSIZE 4096
211 #define IXL_HMC_DVASZ sizeof(uint64_t)
212 #define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
213 #define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS)
214 #define IXL_HMC_PDVALID 1ULL
215
216 #define IXL_ATQ_EXEC_TIMEOUT (10 * hz)
217
218 #define IXL_SRRD_SRCTL_ATTEMPTS 100000
219
220 struct ixl_aq_regs {
221 bus_size_t atq_tail;
222 bus_size_t atq_head;
223 bus_size_t atq_len;
224 bus_size_t atq_bal;
225 bus_size_t atq_bah;
226
227 bus_size_t arq_tail;
228 bus_size_t arq_head;
229 bus_size_t arq_len;
230 bus_size_t arq_bal;
231 bus_size_t arq_bah;
232
233 uint32_t atq_len_enable;
234 uint32_t atq_tail_mask;
235 uint32_t atq_head_mask;
236
237 uint32_t arq_len_enable;
238 uint32_t arq_tail_mask;
239 uint32_t arq_head_mask;
240 };
241
242 struct ixl_phy_type {
243 uint64_t phy_type;
244 uint64_t ifm_type;
245 };
246
247 struct ixl_speed_type {
248 uint8_t dev_speed;
249 uint64_t net_speed;
250 };
251
252 struct ixl_aq_buf {
253 SIMPLEQ_ENTRY(ixl_aq_buf)
254 aqb_entry;
255 void *aqb_data;
256 bus_dmamap_t aqb_map;
257 bus_dma_segment_t aqb_seg;
258 size_t aqb_size;
259 int aqb_nsegs;
260 };
261 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf);
262
263 struct ixl_dmamem {
264 bus_dmamap_t ixm_map;
265 bus_dma_segment_t ixm_seg;
266 int ixm_nsegs;
267 size_t ixm_size;
268 void *ixm_kva;
269 };
270
271 #define IXL_DMA_MAP(_ixm) ((_ixm)->ixm_map)
272 #define IXL_DMA_DVA(_ixm) ((_ixm)->ixm_map->dm_segs[0].ds_addr)
273 #define IXL_DMA_KVA(_ixm) ((void *)(_ixm)->ixm_kva)
274 #define IXL_DMA_LEN(_ixm) ((_ixm)->ixm_size)
275
276 struct ixl_hmc_entry {
277 uint64_t hmc_base;
278 uint32_t hmc_count;
279 uint64_t hmc_size;
280 };
281
282 enum ixl_hmc_types {
283 IXL_HMC_LAN_TX = 0,
284 IXL_HMC_LAN_RX,
285 IXL_HMC_FCOE_CTX,
286 IXL_HMC_FCOE_FILTER,
287 IXL_HMC_COUNT
288 };
289
290 struct ixl_hmc_pack {
291 uint16_t offset;
292 uint16_t width;
293 uint16_t lsb;
294 };
295
296 /*
297 * these hmc objects have weird sizes and alignments, so these are abstract
298 * representations of them that are nice for c to populate.
299 *
300 * the packing code relies on little-endian values being stored in the fields,
301 * no high bits in the fields being set, and the fields must be packed in the
302 * same order as they are in the ctx structure.
303 */
304
305 struct ixl_hmc_rxq {
306 uint16_t head;
307 uint8_t cpuid;
308 uint64_t base;
309 #define IXL_HMC_RXQ_BASE_UNIT 128
310 uint16_t qlen;
311 uint16_t dbuff;
312 #define IXL_HMC_RXQ_DBUFF_UNIT 128
313 uint8_t hbuff;
314 #define IXL_HMC_RXQ_HBUFF_UNIT 64
315 uint8_t dtype;
316 #define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0
317 #define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1
318 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2
319 uint8_t dsize;
320 #define IXL_HMC_RXQ_DSIZE_16 0
321 #define IXL_HMC_RXQ_DSIZE_32 1
322 uint8_t crcstrip;
323 uint8_t fc_ena;
324 uint8_t l2sel;
325 uint8_t hsplit_0;
326 uint8_t hsplit_1;
327 uint8_t showiv;
328 uint16_t rxmax;
329 uint8_t tphrdesc_ena;
330 uint8_t tphwdesc_ena;
331 uint8_t tphdata_ena;
332 uint8_t tphhead_ena;
333 uint8_t lrxqthresh;
334 uint8_t prefena;
335 };
336
337 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
338 { offsetof(struct ixl_hmc_rxq, head), 13, 0 },
339 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 },
340 { offsetof(struct ixl_hmc_rxq, base), 57, 32 },
341 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 },
342 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 },
343 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 },
344 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 },
345 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 },
346 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 },
347 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 },
348 { offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 },
349 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 },
350 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 },
351 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 },
352 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 },
353 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 },
354 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 },
355 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 },
356 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 },
357 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 },
358 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 },
359 };
360
361 #define IXL_HMC_RXQ_MINSIZE (201 + 1)
362
363 struct ixl_hmc_txq {
364 uint16_t head;
365 uint8_t new_context;
366 uint64_t base;
367 #define IXL_HMC_TXQ_BASE_UNIT 128
368 uint8_t fc_ena;
369 uint8_t timesync_ena;
370 uint8_t fd_ena;
371 uint8_t alt_vlan_ena;
372 uint8_t cpuid;
373 uint16_t thead_wb;
374 uint8_t head_wb_ena;
375 #define IXL_HMC_TXQ_DESC_WB 0
376 #define IXL_HMC_TXQ_HEAD_WB 1
377 uint16_t qlen;
378 uint8_t tphrdesc_ena;
379 uint8_t tphrpacket_ena;
380 uint8_t tphwdesc_ena;
381 uint64_t head_wb_addr;
382 uint32_t crc;
383 uint16_t rdylist;
384 uint8_t rdylist_act;
385 };
386
387 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
388 { offsetof(struct ixl_hmc_txq, head), 13, 0 },
389 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 },
390 { offsetof(struct ixl_hmc_txq, base), 57, 32 },
391 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 },
392 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 },
393 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 },
394 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 },
395 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 },
396 /* line 1 */
397 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 },
398 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 },
399 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 },
400 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 },
401 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 },
402 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 },
403 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 },
404 /* line 7 */
405 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) },
406 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) },
407 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) },
408 };
409
410 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
411
412 struct ixl_work {
413 struct work ixw_cookie;
414 void (*ixw_func)(void *);
415 void *ixw_arg;
416 unsigned int ixw_added;
417 };
418 #define IXL_WORKQUEUE_PRI PRI_SOFTNET
419
420 struct ixl_tx_map {
421 struct mbuf *txm_m;
422 bus_dmamap_t txm_map;
423 unsigned int txm_eop;
424 };
425
426 struct ixl_tx_ring {
427 kmutex_t txr_lock;
428 struct ixl_softc *txr_sc;
429
430 unsigned int txr_prod;
431 unsigned int txr_cons;
432
433 struct ixl_tx_map *txr_maps;
434 struct ixl_dmamem txr_mem;
435
436 bus_size_t txr_tail;
437 unsigned int txr_qid;
438 pcq_t *txr_intrq;
439 void *txr_si;
440
441 struct evcnt txr_defragged;
442 struct evcnt txr_defrag_failed;
443 struct evcnt txr_pcqdrop;
444 struct evcnt txr_transmitdef;
445 struct evcnt txr_intr;
446 struct evcnt txr_defer;
447 };
448
449 struct ixl_rx_map {
450 struct mbuf *rxm_m;
451 bus_dmamap_t rxm_map;
452 };
453
454 struct ixl_rx_ring {
455 kmutex_t rxr_lock;
456
457 unsigned int rxr_prod;
458 unsigned int rxr_cons;
459
460 struct ixl_rx_map *rxr_maps;
461 struct ixl_dmamem rxr_mem;
462
463 struct mbuf *rxr_m_head;
464 struct mbuf **rxr_m_tail;
465
466 bus_size_t rxr_tail;
467 unsigned int rxr_qid;
468
469 struct evcnt rxr_mgethdr_failed;
470 struct evcnt rxr_mgetcl_failed;
471 struct evcnt rxr_mbuf_load_failed;
472 struct evcnt rxr_intr;
473 struct evcnt rxr_defer;
474 };
475
476 struct ixl_queue_pair {
477 struct ixl_softc *qp_sc;
478 struct ixl_tx_ring *qp_txr;
479 struct ixl_rx_ring *qp_rxr;
480
481 char qp_name[16];
482
483 void *qp_si;
484 struct work qp_work;
485 bool qp_workqueue;
486 };
487
488 struct ixl_atq {
489 struct ixl_aq_desc iatq_desc;
490 void (*iatq_fn)(struct ixl_softc *,
491 const struct ixl_aq_desc *);
492 };
493 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
494
495 struct ixl_product {
496 unsigned int vendor_id;
497 unsigned int product_id;
498 };
499
500 struct ixl_stats_counters {
501 bool isc_has_offset;
502 struct evcnt isc_crc_errors;
503 uint64_t isc_crc_errors_offset;
504 struct evcnt isc_illegal_bytes;
505 uint64_t isc_illegal_bytes_offset;
506 struct evcnt isc_rx_bytes;
507 uint64_t isc_rx_bytes_offset;
508 struct evcnt isc_rx_discards;
509 uint64_t isc_rx_discards_offset;
510 struct evcnt isc_rx_unicast;
511 uint64_t isc_rx_unicast_offset;
512 struct evcnt isc_rx_multicast;
513 uint64_t isc_rx_multicast_offset;
514 struct evcnt isc_rx_broadcast;
515 uint64_t isc_rx_broadcast_offset;
516 struct evcnt isc_rx_size_64;
517 uint64_t isc_rx_size_64_offset;
518 struct evcnt isc_rx_size_127;
519 uint64_t isc_rx_size_127_offset;
520 struct evcnt isc_rx_size_255;
521 uint64_t isc_rx_size_255_offset;
522 struct evcnt isc_rx_size_511;
523 uint64_t isc_rx_size_511_offset;
524 struct evcnt isc_rx_size_1023;
525 uint64_t isc_rx_size_1023_offset;
526 struct evcnt isc_rx_size_1522;
527 uint64_t isc_rx_size_1522_offset;
528 struct evcnt isc_rx_size_big;
529 uint64_t isc_rx_size_big_offset;
530 struct evcnt isc_rx_undersize;
531 uint64_t isc_rx_undersize_offset;
532 struct evcnt isc_rx_oversize;
533 uint64_t isc_rx_oversize_offset;
534 struct evcnt isc_rx_fragments;
535 uint64_t isc_rx_fragments_offset;
536 struct evcnt isc_rx_jabber;
537 uint64_t isc_rx_jabber_offset;
538 struct evcnt isc_tx_bytes;
539 uint64_t isc_tx_bytes_offset;
540 struct evcnt isc_tx_dropped_link_down;
541 uint64_t isc_tx_dropped_link_down_offset;
542 struct evcnt isc_tx_unicast;
543 uint64_t isc_tx_unicast_offset;
544 struct evcnt isc_tx_multicast;
545 uint64_t isc_tx_multicast_offset;
546 struct evcnt isc_tx_broadcast;
547 uint64_t isc_tx_broadcast_offset;
548 struct evcnt isc_tx_size_64;
549 uint64_t isc_tx_size_64_offset;
550 struct evcnt isc_tx_size_127;
551 uint64_t isc_tx_size_127_offset;
552 struct evcnt isc_tx_size_255;
553 uint64_t isc_tx_size_255_offset;
554 struct evcnt isc_tx_size_511;
555 uint64_t isc_tx_size_511_offset;
556 struct evcnt isc_tx_size_1023;
557 uint64_t isc_tx_size_1023_offset;
558 struct evcnt isc_tx_size_1522;
559 uint64_t isc_tx_size_1522_offset;
560 struct evcnt isc_tx_size_big;
561 uint64_t isc_tx_size_big_offset;
562 struct evcnt isc_mac_local_faults;
563 uint64_t isc_mac_local_faults_offset;
564 struct evcnt isc_mac_remote_faults;
565 uint64_t isc_mac_remote_faults_offset;
566 struct evcnt isc_link_xon_rx;
567 uint64_t isc_link_xon_rx_offset;
568 struct evcnt isc_link_xon_tx;
569 uint64_t isc_link_xon_tx_offset;
570 struct evcnt isc_link_xoff_rx;
571 uint64_t isc_link_xoff_rx_offset;
572 struct evcnt isc_link_xoff_tx;
573 uint64_t isc_link_xoff_tx_offset;
574 struct evcnt isc_vsi_rx_discards;
575 uint64_t isc_vsi_rx_discards_offset;
576 struct evcnt isc_vsi_rx_bytes;
577 uint64_t isc_vsi_rx_bytes_offset;
578 struct evcnt isc_vsi_rx_unicast;
579 uint64_t isc_vsi_rx_unicast_offset;
580 struct evcnt isc_vsi_rx_multicast;
581 uint64_t isc_vsi_rx_multicast_offset;
582 struct evcnt isc_vsi_rx_broadcast;
583 uint64_t isc_vsi_rx_broadcast_offset;
584 struct evcnt isc_vsi_tx_errors;
585 uint64_t isc_vsi_tx_errors_offset;
586 struct evcnt isc_vsi_tx_bytes;
587 uint64_t isc_vsi_tx_bytes_offset;
588 struct evcnt isc_vsi_tx_unicast;
589 uint64_t isc_vsi_tx_unicast_offset;
590 struct evcnt isc_vsi_tx_multicast;
591 uint64_t isc_vsi_tx_multicast_offset;
592 struct evcnt isc_vsi_tx_broadcast;
593 uint64_t isc_vsi_tx_broadcast_offset;
594 };
595
596 /*
597 * Locking notes:
598 * + a field in ixl_tx_ring is protected by txr_lock (a spin mutex), and
599 * a field in ixl_rx_ring is protected by rxr_lock (a spin mutex).
600 * - more than one lock of them cannot be held at once.
601 * + a field named sc_atq_* in ixl_softc is protected by sc_atq_lock
602 * (a spin mutex).
603 * - the lock cannot held with txr_lock or rxr_lock.
604 * + a field named sc_arq_* is not protected by any lock.
605 * - operations for sc_arq_* is done in one context related to
606 * sc_arq_task.
607 * + other fields in ixl_softc is protected by sc_cfg_lock
608 * (an adaptive mutex)
609 * - It must be held before another lock is held, and It can be
610 * released after the other lock is released.
611 * */
612
613 struct ixl_softc {
614 device_t sc_dev;
615 struct ethercom sc_ec;
616 bool sc_attached;
617 bool sc_dead;
618 uint32_t sc_port;
619 struct sysctllog *sc_sysctllog;
620 struct workqueue *sc_workq;
621 struct workqueue *sc_workq_txrx;
622 int sc_stats_intval;
623 callout_t sc_stats_callout;
624 struct ixl_work sc_stats_task;
625 struct ixl_stats_counters
626 sc_stats_counters;
627 uint8_t sc_enaddr[ETHER_ADDR_LEN];
628 struct ifmedia sc_media;
629 uint64_t sc_media_status;
630 uint64_t sc_media_active;
631 uint64_t sc_phy_types;
632 uint8_t sc_phy_abilities;
633 uint8_t sc_phy_linkspeed;
634 uint8_t sc_phy_fec_cfg;
635 uint16_t sc_eee_cap;
636 uint32_t sc_eeer_val;
637 uint8_t sc_d3_lpan;
638 kmutex_t sc_cfg_lock;
639 enum i40e_mac_type sc_mac_type;
640 uint32_t sc_rss_table_size;
641 uint32_t sc_rss_table_entry_width;
642 bool sc_txrx_workqueue;
643 u_int sc_tx_process_limit;
644 u_int sc_rx_process_limit;
645 u_int sc_tx_intr_process_limit;
646 u_int sc_rx_intr_process_limit;
647
648 int sc_cur_ec_capenable;
649
650 struct pci_attach_args sc_pa;
651 pci_intr_handle_t *sc_ihp;
652 void **sc_ihs;
653 unsigned int sc_nintrs;
654
655 bus_dma_tag_t sc_dmat;
656 bus_space_tag_t sc_memt;
657 bus_space_handle_t sc_memh;
658 bus_size_t sc_mems;
659
660 uint8_t sc_pf_id;
661 uint16_t sc_uplink_seid; /* le */
662 uint16_t sc_downlink_seid; /* le */
663 uint16_t sc_vsi_number;
664 uint16_t sc_vsi_stat_counter_idx;
665 uint16_t sc_seid;
666 unsigned int sc_base_queue;
667
668 pci_intr_type_t sc_intrtype;
669 unsigned int sc_msix_vector_queue;
670
671 struct ixl_dmamem sc_scratch;
672 struct ixl_dmamem sc_aqbuf;
673
674 const struct ixl_aq_regs *
675 sc_aq_regs;
676 uint32_t sc_aq_flags;
677 #define IXL_SC_AQ_FLAG_RXCTL __BIT(0)
678 #define IXL_SC_AQ_FLAG_NVMLOCK __BIT(1)
679 #define IXL_SC_AQ_FLAG_NVMREAD __BIT(2)
680 #define IXL_SC_AQ_FLAG_RSS __BIT(3)
681
682 kmutex_t sc_atq_lock;
683 kcondvar_t sc_atq_cv;
684 struct ixl_dmamem sc_atq;
685 unsigned int sc_atq_prod;
686 unsigned int sc_atq_cons;
687
688 struct ixl_dmamem sc_arq;
689 struct ixl_work sc_arq_task;
690 struct ixl_aq_bufs sc_arq_idle;
691 struct ixl_aq_buf *sc_arq_live[IXL_AQ_NUM];
692 unsigned int sc_arq_prod;
693 unsigned int sc_arq_cons;
694
695 struct ixl_work sc_link_state_task;
696 struct ixl_atq sc_link_state_atq;
697
698 struct ixl_dmamem sc_hmc_sd;
699 struct ixl_dmamem sc_hmc_pd;
700 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT];
701
702 unsigned int sc_tx_ring_ndescs;
703 unsigned int sc_rx_ring_ndescs;
704 unsigned int sc_nqueue_pairs;
705 unsigned int sc_nqueue_pairs_max;
706 unsigned int sc_nqueue_pairs_device;
707 struct ixl_queue_pair *sc_qps;
708
709 struct evcnt sc_event_atq;
710 struct evcnt sc_event_link;
711 struct evcnt sc_event_ecc_err;
712 struct evcnt sc_event_pci_exception;
713 struct evcnt sc_event_crit_err;
714 };
715
716 #define IXL_TXRX_PROCESS_UNLIMIT UINT_MAX
717 #define IXL_TX_PROCESS_LIMIT 256
718 #define IXL_RX_PROCESS_LIMIT 256
719 #define IXL_TX_INTR_PROCESS_LIMIT 256
720 #define IXL_RX_INTR_PROCESS_LIMIT 0U
721
722 #define IXL_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx | \
723 IFCAP_CSUM_TCPv4_Rx | \
724 IFCAP_CSUM_UDPv4_Rx | \
725 IFCAP_CSUM_TCPv6_Rx | \
726 IFCAP_CSUM_UDPv6_Rx)
727 #define IXL_IFCAP_TXCSUM (IFCAP_CSUM_IPv4_Tx | \
728 IFCAP_CSUM_TCPv4_Tx | \
729 IFCAP_CSUM_UDPv4_Tx | \
730 IFCAP_CSUM_TCPv6_Tx | \
731 IFCAP_CSUM_UDPv6_Tx)
732 #define IXL_CSUM_ALL_OFFLOAD (M_CSUM_IPv4 | \
733 M_CSUM_TCPv4 | M_CSUM_TCPv6 | \
734 M_CSUM_UDPv4 | M_CSUM_UDPv6)
735
736 #define delaymsec(_x) DELAY(1000 * (_x))
737 #ifdef IXL_DEBUG
738 #define DDPRINTF(sc, fmt, args...) \
739 do { \
740 if ((sc) != NULL) { \
741 device_printf( \
742 ((struct ixl_softc *)(sc))->sc_dev, \
743 ""); \
744 } \
745 printf("%s:\t" fmt, __func__, ##args); \
746 } while (0)
747 #else
748 #define DDPRINTF(sc, fmt, args...) __nothing
749 #endif
750 #ifndef IXL_STATS_INTERVAL_MSEC
751 #define IXL_STATS_INTERVAL_MSEC 10000
752 #endif
753 #ifndef IXL_QUEUE_NUM
754 #define IXL_QUEUE_NUM 0
755 #endif
756
757 static bool ixl_param_nomsix = false;
758 static int ixl_param_stats_interval = IXL_STATS_INTERVAL_MSEC;
759 static int ixl_param_nqps_limit = IXL_QUEUE_NUM;
760 static unsigned int ixl_param_tx_ndescs = 1024;
761 static unsigned int ixl_param_rx_ndescs = 1024;
762
763 static enum i40e_mac_type
764 ixl_mactype(pci_product_id_t);
765 static void ixl_clear_hw(struct ixl_softc *);
766 static int ixl_pf_reset(struct ixl_softc *);
767
768 static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
769 bus_size_t, bus_size_t);
770 static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
771
772 static int ixl_arq_fill(struct ixl_softc *);
773 static void ixl_arq_unfill(struct ixl_softc *);
774
775 static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
776 unsigned int);
777 static void ixl_atq_set(struct ixl_atq *,
778 void (*)(struct ixl_softc *, const struct ixl_aq_desc *));
779 static int ixl_atq_post(struct ixl_softc *, struct ixl_atq *);
780 static int ixl_atq_post_locked(struct ixl_softc *, struct ixl_atq *);
781 static void ixl_atq_done(struct ixl_softc *);
782 static int ixl_atq_exec(struct ixl_softc *, struct ixl_atq *);
783 static int ixl_get_version(struct ixl_softc *);
784 static int ixl_get_nvm_version(struct ixl_softc *);
785 static int ixl_get_hw_capabilities(struct ixl_softc *);
786 static int ixl_pxe_clear(struct ixl_softc *);
787 static int ixl_lldp_shut(struct ixl_softc *);
788 static int ixl_get_mac(struct ixl_softc *);
789 static int ixl_get_switch_config(struct ixl_softc *);
790 static int ixl_phy_mask_ints(struct ixl_softc *);
791 static int ixl_get_phy_info(struct ixl_softc *);
792 static int ixl_set_phy_config(struct ixl_softc *, uint8_t, uint8_t, bool);
793 static int ixl_set_phy_autoselect(struct ixl_softc *);
794 static int ixl_restart_an(struct ixl_softc *);
795 static int ixl_hmc(struct ixl_softc *);
796 static void ixl_hmc_free(struct ixl_softc *);
797 static int ixl_get_vsi(struct ixl_softc *);
798 static int ixl_set_vsi(struct ixl_softc *);
799 static void ixl_set_filter_control(struct ixl_softc *);
800 static void ixl_get_link_status(void *);
801 static int ixl_get_link_status_poll(struct ixl_softc *, int *);
802 static int ixl_set_link_status(struct ixl_softc *,
803 const struct ixl_aq_desc *);
804 static uint64_t ixl_search_link_speed(uint8_t);
805 static uint8_t ixl_search_baudrate(uint64_t);
806 static void ixl_config_rss(struct ixl_softc *);
807 static int ixl_add_macvlan(struct ixl_softc *, const uint8_t *,
808 uint16_t, uint16_t);
809 static int ixl_remove_macvlan(struct ixl_softc *, const uint8_t *,
810 uint16_t, uint16_t);
811 static void ixl_arq(void *);
812 static void ixl_hmc_pack(void *, const void *,
813 const struct ixl_hmc_pack *, unsigned int);
814 static uint32_t ixl_rd_rx_csr(struct ixl_softc *, uint32_t);
815 static void ixl_wr_rx_csr(struct ixl_softc *, uint32_t, uint32_t);
816 static int ixl_rd16_nvm(struct ixl_softc *, uint16_t, uint16_t *);
817
818 static int ixl_match(device_t, cfdata_t, void *);
819 static void ixl_attach(device_t, device_t, void *);
820 static int ixl_detach(device_t, int);
821
822 static void ixl_media_add(struct ixl_softc *);
823 static int ixl_media_change(struct ifnet *);
824 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
825 static void ixl_watchdog(struct ifnet *);
826 static int ixl_ioctl(struct ifnet *, u_long, void *);
827 static void ixl_start(struct ifnet *);
828 static int ixl_transmit(struct ifnet *, struct mbuf *);
829 static void ixl_deferred_transmit(void *);
830 static int ixl_intr(void *);
831 static int ixl_queue_intr(void *);
832 static int ixl_other_intr(void *);
833 static void ixl_handle_queue(void *);
834 static void ixl_handle_queue_wk(struct work *, void *);
835 static void ixl_sched_handle_queue(struct ixl_softc *,
836 struct ixl_queue_pair *);
837 static int ixl_init(struct ifnet *);
838 static int ixl_init_locked(struct ixl_softc *);
839 static void ixl_stop(struct ifnet *, int);
840 static void ixl_stop_locked(struct ixl_softc *);
841 static int ixl_iff(struct ixl_softc *);
842 static int ixl_ifflags_cb(struct ethercom *);
843 static int ixl_setup_interrupts(struct ixl_softc *);
844 static int ixl_establish_intx(struct ixl_softc *);
845 static int ixl_establish_msix(struct ixl_softc *);
846 static void ixl_enable_queue_intr(struct ixl_softc *,
847 struct ixl_queue_pair *);
848 static void ixl_disable_queue_intr(struct ixl_softc *,
849 struct ixl_queue_pair *);
850 static void ixl_enable_other_intr(struct ixl_softc *);
851 static void ixl_disable_other_intr(struct ixl_softc *);
852 static void ixl_config_queue_intr(struct ixl_softc *);
853 static void ixl_config_other_intr(struct ixl_softc *);
854
855 static struct ixl_tx_ring *
856 ixl_txr_alloc(struct ixl_softc *, unsigned int);
857 static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
858 static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
859 static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
860 static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
861 static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
862 static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
863 static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
864 static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *, u_int);
865
866 static struct ixl_rx_ring *
867 ixl_rxr_alloc(struct ixl_softc *, unsigned int);
868 static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
869 static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
870 static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
871 static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
872 static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
873 static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
874 static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *, u_int);
875 static int ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
876
877 static struct workqueue *
878 ixl_workq_create(const char *, pri_t, int, int);
879 static void ixl_workq_destroy(struct workqueue *);
880 static int ixl_workqs_teardown(device_t);
881 static void ixl_work_set(struct ixl_work *, void (*)(void *), void *);
882 static void ixl_work_add(struct workqueue *, struct ixl_work *);
883 static void ixl_work_wait(struct workqueue *, struct ixl_work *);
884 static void ixl_workq_work(struct work *, void *);
885 static const struct ixl_product *
886 ixl_lookup(const struct pci_attach_args *pa);
887 static void ixl_link_state_update(struct ixl_softc *,
888 const struct ixl_aq_desc *);
889 static int ixl_vlan_cb(struct ethercom *, uint16_t, bool);
890 static int ixl_setup_vlan_hwfilter(struct ixl_softc *);
891 static void ixl_teardown_vlan_hwfilter(struct ixl_softc *);
892 static int ixl_update_macvlan(struct ixl_softc *);
893 static int ixl_setup_interrupts(struct ixl_softc *);;
894 static void ixl_teardown_interrupts(struct ixl_softc *);
895 static int ixl_setup_stats(struct ixl_softc *);
896 static void ixl_teardown_stats(struct ixl_softc *);
897 static void ixl_stats_callout(void *);
898 static void ixl_stats_update(void *);
899 static int ixl_setup_sysctls(struct ixl_softc *);
900 static void ixl_teardown_sysctls(struct ixl_softc *);
901 static int ixl_queue_pairs_alloc(struct ixl_softc *);
902 static void ixl_queue_pairs_free(struct ixl_softc *);
903
904 static const struct ixl_phy_type ixl_phy_type_map[] = {
905 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII },
906 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX },
907 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 },
908 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR },
909 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 },
910 { 1ULL << IXL_PHY_TYPE_XAUI |
911 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 },
912 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI },
913 { 1ULL << IXL_PHY_TYPE_XLAUI |
914 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI },
915 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
916 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 },
917 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
918 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 },
919 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC },
920 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC },
921 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX },
922 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
923 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T },
924 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T },
925 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR },
926 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR },
927 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_TWINAX },
928 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 },
929 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 },
930 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX },
931 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX },
932 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 },
933 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR },
934 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR },
935 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR },
936 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR },
937 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC },
938 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_CR },
939 };
940
941 static const struct ixl_speed_type ixl_speed_type_map[] = {
942 { IXL_AQ_LINK_SPEED_40GB, IF_Gbps(40) },
943 { IXL_AQ_LINK_SPEED_25GB, IF_Gbps(25) },
944 { IXL_AQ_LINK_SPEED_10GB, IF_Gbps(10) },
945 { IXL_AQ_LINK_SPEED_1000MB, IF_Mbps(1000) },
946 { IXL_AQ_LINK_SPEED_100MB, IF_Mbps(100)},
947 };
948
949 static const struct ixl_aq_regs ixl_pf_aq_regs = {
950 .atq_tail = I40E_PF_ATQT,
951 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK,
952 .atq_head = I40E_PF_ATQH,
953 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK,
954 .atq_len = I40E_PF_ATQLEN,
955 .atq_bal = I40E_PF_ATQBAL,
956 .atq_bah = I40E_PF_ATQBAH,
957 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK,
958
959 .arq_tail = I40E_PF_ARQT,
960 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK,
961 .arq_head = I40E_PF_ARQH,
962 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK,
963 .arq_len = I40E_PF_ARQLEN,
964 .arq_bal = I40E_PF_ARQBAL,
965 .arq_bah = I40E_PF_ARQBAH,
966 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK,
967 };
968
969 #define ixl_rd(_s, _r) \
970 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
971 #define ixl_wr(_s, _r, _v) \
972 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
973 #define ixl_barrier(_s, _r, _l, _o) \
974 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
975 #define ixl_flush(_s) (void)ixl_rd((_s), I40E_GLGEN_STAT)
976 #define ixl_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1))
977
978 static inline uint32_t
979 ixl_dmamem_hi(struct ixl_dmamem *ixm)
980 {
981 uint32_t retval;
982 uint64_t val;
983
984 if (sizeof(IXL_DMA_DVA(ixm)) > 4) {
985 val = (intptr_t)IXL_DMA_DVA(ixm);
986 retval = (uint32_t)(val >> 32);
987 } else {
988 retval = 0;
989 }
990
991 return retval;
992 }
993
994 static inline uint32_t
995 ixl_dmamem_lo(struct ixl_dmamem *ixm)
996 {
997
998 return (uint32_t)IXL_DMA_DVA(ixm);
999 }
1000
1001 static inline void
1002 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr)
1003 {
1004 uint64_t val;
1005
1006 if (sizeof(addr) > 4) {
1007 val = (intptr_t)addr;
1008 iaq->iaq_param[2] = htole32(val >> 32);
1009 } else {
1010 iaq->iaq_param[2] = htole32(0);
1011 }
1012
1013 iaq->iaq_param[3] = htole32(addr);
1014 }
1015
1016 static inline unsigned int
1017 ixl_rxr_unrefreshed(unsigned int prod, unsigned int cons, unsigned int ndescs)
1018 {
1019 unsigned int num;
1020
1021 if (prod < cons)
1022 num = cons - prod;
1023 else
1024 num = (ndescs - prod) + cons;
1025
1026 if (__predict_true(num > 0)) {
1027 /* device cannot receive packets if all descripter is filled */
1028 num -= 1;
1029 }
1030
1031 return num;
1032 }
1033
1034 CFATTACH_DECL3_NEW(ixl, sizeof(struct ixl_softc),
1035 ixl_match, ixl_attach, ixl_detach, NULL, NULL, NULL,
1036 DVF_DETACH_SHUTDOWN);
1037
1038 static const struct ixl_product ixl_products[] = {
1039 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_SFP },
1040 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_B },
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_C },
1042 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_A },
1043 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_B },
1044 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_C },
1045 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T },
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
1048 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G },
1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP },
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28 },
1051 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_KX },
1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_QSFP },
1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_SFP },
1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G_BASET },
1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_BASET },
1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_I_SFP },
1057 /* required last entry */
1058 {0, 0}
1059 };
1060
1061 static const struct ixl_product *
1062 ixl_lookup(const struct pci_attach_args *pa)
1063 {
1064 const struct ixl_product *ixlp;
1065
1066 for (ixlp = ixl_products; ixlp->vendor_id != 0; ixlp++) {
1067 if (PCI_VENDOR(pa->pa_id) == ixlp->vendor_id &&
1068 PCI_PRODUCT(pa->pa_id) == ixlp->product_id)
1069 return ixlp;
1070 }
1071
1072 return NULL;
1073 }
1074
1075 static int
1076 ixl_match(device_t parent, cfdata_t match, void *aux)
1077 {
1078 const struct pci_attach_args *pa = aux;
1079
1080 return (ixl_lookup(pa) != NULL) ? 1 : 0;
1081 }
1082
1083 static void
1084 ixl_attach(device_t parent, device_t self, void *aux)
1085 {
1086 struct ixl_softc *sc;
1087 struct pci_attach_args *pa = aux;
1088 struct ifnet *ifp;
1089 pcireg_t memtype;
1090 uint32_t firstq, port, ari, func;
1091 char xnamebuf[32];
1092 int tries, rv, link;
1093
1094 sc = device_private(self);
1095 sc->sc_dev = self;
1096 ifp = &sc->sc_ec.ec_if;
1097
1098 sc->sc_pa = *pa;
1099 sc->sc_dmat = (pci_dma64_available(pa)) ?
1100 pa->pa_dmat64 : pa->pa_dmat;
1101 sc->sc_aq_regs = &ixl_pf_aq_regs;
1102
1103 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(pa->pa_id));
1104
1105 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG);
1106 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
1107 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) {
1108 aprint_error(": unable to map registers\n");
1109 return;
1110 }
1111
1112 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET);
1113
1114 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC);
1115 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK;
1116 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1117 sc->sc_base_queue = firstq;
1118
1119 ixl_clear_hw(sc);
1120 if (ixl_pf_reset(sc) == -1) {
1121 /* error printed by ixl pf_reset */
1122 goto unmap;
1123 }
1124
1125 port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
1126 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
1127 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
1128 sc->sc_port = port;
1129 aprint_normal(": port %u", sc->sc_port);
1130
1131 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
1132 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1133 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1134
1135 func = ixl_rd(sc, I40E_PF_FUNC_RID);
1136 sc->sc_pf_id = func & (ari ? 0xff : 0x7);
1137
1138 /* initialise the adminq */
1139
1140 mutex_init(&sc->sc_atq_lock, MUTEX_DEFAULT, IPL_NET);
1141
1142 if (ixl_dmamem_alloc(sc, &sc->sc_atq,
1143 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1144 aprint_error("\n" "%s: unable to allocate atq\n",
1145 device_xname(self));
1146 goto unmap;
1147 }
1148
1149 SIMPLEQ_INIT(&sc->sc_arq_idle);
1150 ixl_work_set(&sc->sc_arq_task, ixl_arq, sc);
1151 sc->sc_arq_cons = 0;
1152 sc->sc_arq_prod = 0;
1153
1154 if (ixl_dmamem_alloc(sc, &sc->sc_arq,
1155 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1156 aprint_error("\n" "%s: unable to allocate arq\n",
1157 device_xname(self));
1158 goto free_atq;
1159 }
1160
1161 if (!ixl_arq_fill(sc)) {
1162 aprint_error("\n" "%s: unable to fill arq descriptors\n",
1163 device_xname(self));
1164 goto free_arq;
1165 }
1166
1167 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1168 0, IXL_DMA_LEN(&sc->sc_atq),
1169 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1170
1171 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1172 0, IXL_DMA_LEN(&sc->sc_arq),
1173 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1174
1175 for (tries = 0; tries < 10; tries++) {
1176 sc->sc_atq_cons = 0;
1177 sc->sc_atq_prod = 0;
1178
1179 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1180 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1181 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1182 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1183
1184 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1185
1186 ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1187 ixl_dmamem_lo(&sc->sc_atq));
1188 ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1189 ixl_dmamem_hi(&sc->sc_atq));
1190 ixl_wr(sc, sc->sc_aq_regs->atq_len,
1191 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1192
1193 ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1194 ixl_dmamem_lo(&sc->sc_arq));
1195 ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1196 ixl_dmamem_hi(&sc->sc_arq));
1197 ixl_wr(sc, sc->sc_aq_regs->arq_len,
1198 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1199
1200 rv = ixl_get_version(sc);
1201 if (rv == 0)
1202 break;
1203 if (rv != ETIMEDOUT) {
1204 aprint_error(", unable to get firmware version\n");
1205 goto shutdown;
1206 }
1207
1208 delaymsec(100);
1209 }
1210
1211 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1212
1213 if (ixl_dmamem_alloc(sc, &sc->sc_aqbuf, IXL_AQ_BUFLEN, 0) != 0) {
1214 aprint_error_dev(self, ", unable to allocate nvm buffer\n");
1215 goto shutdown;
1216 }
1217
1218 ixl_get_nvm_version(sc);
1219
1220 if (sc->sc_mac_type == I40E_MAC_X722)
1221 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_X722;
1222 else
1223 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_XL710;
1224
1225 rv = ixl_get_hw_capabilities(sc);
1226 if (rv != 0) {
1227 aprint_error(", GET HW CAPABILITIES %s\n",
1228 rv == ETIMEDOUT ? "timeout" : "error");
1229 goto free_aqbuf;
1230 }
1231
1232 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_device, ncpu);
1233 if (ixl_param_nqps_limit > 0) {
1234 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_max,
1235 ixl_param_nqps_limit);
1236 }
1237
1238 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
1239 sc->sc_tx_ring_ndescs = ixl_param_tx_ndescs;
1240 sc->sc_rx_ring_ndescs = ixl_param_rx_ndescs;
1241
1242 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_rx_ring_ndescs);
1243 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_tx_ring_ndescs);
1244
1245 if (ixl_get_mac(sc) != 0) {
1246 /* error printed by ixl_get_mac */
1247 goto free_aqbuf;
1248 }
1249
1250 aprint_normal("\n");
1251 aprint_naive("\n");
1252
1253 aprint_normal_dev(self, "Ethernet address %s\n",
1254 ether_sprintf(sc->sc_enaddr));
1255
1256 rv = ixl_pxe_clear(sc);
1257 if (rv != 0) {
1258 aprint_debug_dev(self, "CLEAR PXE MODE %s\n",
1259 rv == ETIMEDOUT ? "timeout" : "error");
1260 }
1261
1262 ixl_set_filter_control(sc);
1263
1264 if (ixl_hmc(sc) != 0) {
1265 /* error printed by ixl_hmc */
1266 goto free_aqbuf;
1267 }
1268
1269 if (ixl_lldp_shut(sc) != 0) {
1270 /* error printed by ixl_lldp_shut */
1271 goto free_hmc;
1272 }
1273
1274 if (ixl_phy_mask_ints(sc) != 0) {
1275 /* error printed by ixl_phy_mask_ints */
1276 goto free_hmc;
1277 }
1278
1279 if (ixl_restart_an(sc) != 0) {
1280 /* error printed by ixl_restart_an */
1281 goto free_hmc;
1282 }
1283
1284 if (ixl_get_switch_config(sc) != 0) {
1285 /* error printed by ixl_get_switch_config */
1286 goto free_hmc;
1287 }
1288
1289 rv = ixl_get_link_status_poll(sc, NULL);
1290 if (rv != 0) {
1291 aprint_error_dev(self, "GET LINK STATUS %s\n",
1292 rv == ETIMEDOUT ? "timeout" : "error");
1293 goto free_hmc;
1294 }
1295
1296 /*
1297 * The FW often returns EIO in "Get PHY Abilities" command
1298 * if there is no delay
1299 */
1300 DELAY(500);
1301 if (ixl_get_phy_info(sc) != 0) {
1302 /* error printed by ixl_get_phy_info */
1303 goto free_hmc;
1304 }
1305
1306 if (ixl_dmamem_alloc(sc, &sc->sc_scratch,
1307 sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1308 aprint_error_dev(self, "unable to allocate scratch buffer\n");
1309 goto free_hmc;
1310 }
1311
1312 rv = ixl_get_vsi(sc);
1313 if (rv != 0) {
1314 aprint_error_dev(self, "GET VSI %s %d\n",
1315 rv == ETIMEDOUT ? "timeout" : "error", rv);
1316 goto free_scratch;
1317 }
1318
1319 rv = ixl_set_vsi(sc);
1320 if (rv != 0) {
1321 aprint_error_dev(self, "UPDATE VSI error %s %d\n",
1322 rv == ETIMEDOUT ? "timeout" : "error", rv);
1323 goto free_scratch;
1324 }
1325
1326 if (ixl_queue_pairs_alloc(sc) != 0) {
1327 /* error printed by ixl_queue_pairs_alloc */
1328 goto free_scratch;
1329 }
1330
1331 if (ixl_setup_interrupts(sc) != 0) {
1332 /* error printed by ixl_setup_interrupts */
1333 goto free_queue_pairs;
1334 }
1335
1336 if (ixl_setup_stats(sc) != 0) {
1337 aprint_error_dev(self, "failed to setup event counters\n");
1338 goto teardown_intrs;
1339 }
1340
1341 if (ixl_setup_sysctls(sc) != 0) {
1342 /* error printed by ixl_setup_sysctls */
1343 goto teardown_stats;
1344 }
1345
1346 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self));
1347 sc->sc_workq = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1348 IPL_NET, WQ_MPSAFE);
1349 if (sc->sc_workq == NULL)
1350 goto teardown_sysctls;
1351
1352 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self));
1353 rv = workqueue_create(&sc->sc_workq_txrx, xnamebuf, ixl_handle_queue_wk,
1354 sc, IXL_WORKQUEUE_PRI, IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1355 if (rv != 0) {
1356 sc->sc_workq_txrx = NULL;
1357 goto teardown_wqs;
1358 }
1359
1360 snprintf(xnamebuf, sizeof(xnamebuf), "%s_atq_cv", device_xname(self));
1361 cv_init(&sc->sc_atq_cv, xnamebuf);
1362
1363 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
1364
1365 ifp->if_softc = sc;
1366 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1367 ifp->if_extflags = IFEF_MPSAFE;
1368 ifp->if_ioctl = ixl_ioctl;
1369 ifp->if_start = ixl_start;
1370 ifp->if_transmit = ixl_transmit;
1371 ifp->if_watchdog = ixl_watchdog;
1372 ifp->if_init = ixl_init;
1373 ifp->if_stop = ixl_stop;
1374 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs);
1375 IFQ_SET_READY(&ifp->if_snd);
1376 ifp->if_capabilities |= IXL_IFCAP_RXCSUM;
1377 ifp->if_capabilities |= IXL_IFCAP_TXCSUM;
1378 #if 0
1379 ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
1380 #endif
1381 ether_set_vlan_cb(&sc->sc_ec, ixl_vlan_cb);
1382 sc->sc_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1383 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
1384 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1385
1386 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities;
1387 /* Disable VLAN_HWFILTER by default */
1388 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1389
1390 sc->sc_cur_ec_capenable = sc->sc_ec.ec_capenable;
1391
1392 sc->sc_ec.ec_ifmedia = &sc->sc_media;
1393 ifmedia_init(&sc->sc_media, IFM_IMASK, ixl_media_change,
1394 ixl_media_status);
1395
1396 ixl_media_add(sc);
1397 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1398 if (ISSET(sc->sc_phy_abilities,
1399 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) {
1400 ifmedia_add(&sc->sc_media,
1401 IFM_ETHER | IFM_AUTO | IFM_FLOW, 0, NULL);
1402 }
1403 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_NONE, 0, NULL);
1404 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1405
1406 if_attach(ifp);
1407 if_deferred_start_init(ifp, NULL);
1408 ether_ifattach(ifp, sc->sc_enaddr);
1409 ether_set_ifflags_cb(&sc->sc_ec, ixl_ifflags_cb);
1410
1411 rv = ixl_get_link_status_poll(sc, &link);
1412 if (rv != 0)
1413 link = LINK_STATE_UNKNOWN;
1414 if_link_state_change(ifp, link);
1415
1416 ixl_work_set(&sc->sc_link_state_task, ixl_get_link_status, sc);
1417
1418 ixl_config_other_intr(sc);
1419 ixl_enable_other_intr(sc);
1420
1421 ixl_set_phy_autoselect(sc);
1422
1423 /* remove default mac filter and replace it so we can see vlans */
1424 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 0);
1425 if (rv != ENOENT) {
1426 aprint_debug_dev(self,
1427 "unable to remove macvlan %u\n", rv);
1428 }
1429 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
1430 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1431 if (rv != ENOENT) {
1432 aprint_debug_dev(self,
1433 "unable to remove macvlan, ignore vlan %u\n", rv);
1434 }
1435
1436 if (ixl_update_macvlan(sc) != 0) {
1437 aprint_debug_dev(self,
1438 "couldn't enable vlan hardware filter\n");
1439 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1440 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
1441 }
1442
1443 sc->sc_txrx_workqueue = true;
1444 sc->sc_tx_process_limit = IXL_TX_PROCESS_LIMIT;
1445 sc->sc_rx_process_limit = IXL_RX_PROCESS_LIMIT;
1446 sc->sc_tx_intr_process_limit = IXL_TX_INTR_PROCESS_LIMIT;
1447 sc->sc_rx_intr_process_limit = IXL_RX_INTR_PROCESS_LIMIT;
1448
1449 ixl_stats_update(sc);
1450 sc->sc_stats_counters.isc_has_offset = true;
1451
1452 if (pmf_device_register(self, NULL, NULL) != true)
1453 aprint_debug_dev(self, "couldn't establish power handler\n");
1454 sc->sc_attached = true;
1455 return;
1456
1457 teardown_wqs:
1458 config_finalize_register(self, ixl_workqs_teardown);
1459 teardown_sysctls:
1460 ixl_teardown_sysctls(sc);
1461 teardown_stats:
1462 ixl_teardown_stats(sc);
1463 teardown_intrs:
1464 ixl_teardown_interrupts(sc);
1465 free_queue_pairs:
1466 ixl_queue_pairs_free(sc);
1467 free_scratch:
1468 ixl_dmamem_free(sc, &sc->sc_scratch);
1469 free_hmc:
1470 ixl_hmc_free(sc);
1471 free_aqbuf:
1472 ixl_dmamem_free(sc, &sc->sc_aqbuf);
1473 shutdown:
1474 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1475 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1476 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1477 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1478
1479 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1480 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1481 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1482
1483 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1484 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1485 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1486
1487 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1488 0, IXL_DMA_LEN(&sc->sc_arq),
1489 BUS_DMASYNC_POSTREAD);
1490 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1491 0, IXL_DMA_LEN(&sc->sc_atq),
1492 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1493
1494 ixl_arq_unfill(sc);
1495 free_arq:
1496 ixl_dmamem_free(sc, &sc->sc_arq);
1497 free_atq:
1498 ixl_dmamem_free(sc, &sc->sc_atq);
1499 unmap:
1500 mutex_destroy(&sc->sc_atq_lock);
1501 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1502 mutex_destroy(&sc->sc_cfg_lock);
1503 sc->sc_mems = 0;
1504
1505 sc->sc_attached = false;
1506 }
1507
1508 static int
1509 ixl_detach(device_t self, int flags)
1510 {
1511 struct ixl_softc *sc = device_private(self);
1512 struct ifnet *ifp = &sc->sc_ec.ec_if;
1513
1514 if (!sc->sc_attached)
1515 return 0;
1516
1517 ixl_stop(ifp, 1);
1518
1519 ixl_disable_other_intr(sc);
1520
1521 callout_halt(&sc->sc_stats_callout, NULL);
1522 ixl_work_wait(sc->sc_workq, &sc->sc_stats_task);
1523
1524 /* wait for ATQ handler */
1525 mutex_enter(&sc->sc_atq_lock);
1526 mutex_exit(&sc->sc_atq_lock);
1527
1528 ixl_work_wait(sc->sc_workq, &sc->sc_arq_task);
1529 ixl_work_wait(sc->sc_workq, &sc->sc_link_state_task);
1530
1531 if (sc->sc_workq != NULL) {
1532 ixl_workq_destroy(sc->sc_workq);
1533 sc->sc_workq = NULL;
1534 }
1535
1536 if (sc->sc_workq_txrx != NULL) {
1537 workqueue_destroy(sc->sc_workq_txrx);
1538 sc->sc_workq_txrx = NULL;
1539 }
1540
1541 ether_ifdetach(ifp);
1542 if_detach(ifp);
1543 ifmedia_fini(&sc->sc_media);
1544
1545 ixl_teardown_interrupts(sc);
1546 ixl_teardown_stats(sc);
1547 ixl_teardown_sysctls(sc);
1548
1549 ixl_queue_pairs_free(sc);
1550
1551 ixl_dmamem_free(sc, &sc->sc_scratch);
1552 ixl_hmc_free(sc);
1553
1554 /* shutdown */
1555 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1556 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1557 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1558 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1559
1560 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1561 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1562 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1563
1564 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1565 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1566 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1567
1568 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1569 0, IXL_DMA_LEN(&sc->sc_arq),
1570 BUS_DMASYNC_POSTREAD);
1571 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1572 0, IXL_DMA_LEN(&sc->sc_atq),
1573 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1574
1575 ixl_arq_unfill(sc);
1576
1577 ixl_dmamem_free(sc, &sc->sc_arq);
1578 ixl_dmamem_free(sc, &sc->sc_atq);
1579 ixl_dmamem_free(sc, &sc->sc_aqbuf);
1580
1581 cv_destroy(&sc->sc_atq_cv);
1582 mutex_destroy(&sc->sc_atq_lock);
1583
1584 if (sc->sc_mems != 0) {
1585 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1586 sc->sc_mems = 0;
1587 }
1588
1589 mutex_destroy(&sc->sc_cfg_lock);
1590
1591 return 0;
1592 }
1593
1594 static int
1595 ixl_workqs_teardown(device_t self)
1596 {
1597 struct ixl_softc *sc = device_private(self);
1598
1599 if (sc->sc_workq != NULL) {
1600 ixl_workq_destroy(sc->sc_workq);
1601 sc->sc_workq = NULL;
1602 }
1603
1604 if (sc->sc_workq_txrx != NULL) {
1605 workqueue_destroy(sc->sc_workq_txrx);
1606 sc->sc_workq_txrx = NULL;
1607 }
1608
1609 return 0;
1610 }
1611
1612 static int
1613 ixl_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
1614 {
1615 struct ifnet *ifp = &ec->ec_if;
1616 struct ixl_softc *sc = ifp->if_softc;
1617 int rv;
1618
1619 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
1620 return 0;
1621 }
1622
1623 if (set) {
1624 rv = ixl_add_macvlan(sc, sc->sc_enaddr, vid,
1625 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1626 if (rv == 0) {
1627 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
1628 vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1629 }
1630 } else {
1631 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, vid,
1632 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1633 (void)ixl_remove_macvlan(sc, etherbroadcastaddr, vid,
1634 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1635 }
1636
1637 return rv;
1638 }
1639
1640 static void
1641 ixl_media_add(struct ixl_softc *sc)
1642 {
1643 struct ifmedia *ifm = &sc->sc_media;
1644 const struct ixl_phy_type *itype;
1645 unsigned int i;
1646 bool flow;
1647
1648 if (ISSET(sc->sc_phy_abilities,
1649 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) {
1650 flow = true;
1651 } else {
1652 flow = false;
1653 }
1654
1655 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
1656 itype = &ixl_phy_type_map[i];
1657
1658 if (ISSET(sc->sc_phy_types, itype->phy_type)) {
1659 ifmedia_add(ifm,
1660 IFM_ETHER | IFM_FDX | itype->ifm_type, 0, NULL);
1661
1662 if (flow) {
1663 ifmedia_add(ifm,
1664 IFM_ETHER | IFM_FDX | IFM_FLOW |
1665 itype->ifm_type, 0, NULL);
1666 }
1667
1668 if (itype->ifm_type != IFM_100_TX)
1669 continue;
1670
1671 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type,
1672 0, NULL);
1673 if (flow) {
1674 ifmedia_add(ifm,
1675 IFM_ETHER | IFM_FLOW | itype->ifm_type,
1676 0, NULL);
1677 }
1678 }
1679 }
1680 }
1681
1682 static void
1683 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1684 {
1685 struct ixl_softc *sc = ifp->if_softc;
1686
1687 ifmr->ifm_status = sc->sc_media_status;
1688 ifmr->ifm_active = sc->sc_media_active;
1689
1690 mutex_enter(&sc->sc_cfg_lock);
1691 if (ifp->if_link_state == LINK_STATE_UP)
1692 SET(ifmr->ifm_status, IFM_ACTIVE);
1693 mutex_exit(&sc->sc_cfg_lock);
1694 }
1695
1696 static int
1697 ixl_media_change(struct ifnet *ifp)
1698 {
1699 struct ixl_softc *sc = ifp->if_softc;
1700 struct ifmedia *ifm = &sc->sc_media;
1701 uint64_t ifm_active = sc->sc_media_active;
1702 uint8_t link_speed, abilities;
1703
1704 switch (IFM_SUBTYPE(ifm_active)) {
1705 case IFM_1000_SGMII:
1706 case IFM_1000_KX:
1707 case IFM_10G_KX4:
1708 case IFM_10G_KR:
1709 case IFM_40G_KR4:
1710 case IFM_20G_KR2:
1711 case IFM_25G_KR:
1712 /* backplanes */
1713 return EINVAL;
1714 }
1715
1716 abilities = IXL_PHY_ABILITY_AUTONEGO | IXL_PHY_ABILITY_LINKUP;
1717
1718 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1719 case IFM_AUTO:
1720 link_speed = sc->sc_phy_linkspeed;
1721 break;
1722 case IFM_NONE:
1723 link_speed = 0;
1724 CLR(abilities, IXL_PHY_ABILITY_LINKUP);
1725 break;
1726 default:
1727 link_speed = ixl_search_baudrate(
1728 ifmedia_baudrate(ifm->ifm_media));
1729 }
1730
1731 if (ISSET(abilities, IXL_PHY_ABILITY_LINKUP)) {
1732 if (ISSET(link_speed, sc->sc_phy_linkspeed) == 0)
1733 return EINVAL;
1734 }
1735
1736 if (ifm->ifm_media & IFM_FLOW) {
1737 abilities |= sc->sc_phy_abilities &
1738 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX);
1739 }
1740
1741 return ixl_set_phy_config(sc, link_speed, abilities, false);
1742 }
1743
1744 static void
1745 ixl_watchdog(struct ifnet *ifp)
1746 {
1747
1748 }
1749
1750 static void
1751 ixl_del_all_multiaddr(struct ixl_softc *sc)
1752 {
1753 struct ethercom *ec = &sc->sc_ec;
1754 struct ether_multi *enm;
1755 struct ether_multistep step;
1756
1757 ETHER_LOCK(ec);
1758 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1759 ETHER_NEXT_MULTI(step, enm)) {
1760 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1761 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1762 }
1763 ETHER_UNLOCK(ec);
1764 }
1765
1766 static int
1767 ixl_add_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1768 {
1769 struct ifnet *ifp = &sc->sc_ec.ec_if;
1770 int rv;
1771
1772 if (ISSET(ifp->if_flags, IFF_ALLMULTI))
1773 return 0;
1774
1775 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) {
1776 ixl_del_all_multiaddr(sc);
1777 SET(ifp->if_flags, IFF_ALLMULTI);
1778 return ENETRESET;
1779 }
1780
1781 /* multicast address can not use VLAN HWFILTER */
1782 rv = ixl_add_macvlan(sc, addrlo, 0,
1783 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1784
1785 if (rv == ENOSPC) {
1786 ixl_del_all_multiaddr(sc);
1787 SET(ifp->if_flags, IFF_ALLMULTI);
1788 return ENETRESET;
1789 }
1790
1791 return rv;
1792 }
1793
1794 static int
1795 ixl_del_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1796 {
1797 struct ifnet *ifp = &sc->sc_ec.ec_if;
1798 struct ethercom *ec = &sc->sc_ec;
1799 struct ether_multi *enm, *enm_last;
1800 struct ether_multistep step;
1801 int error, rv = 0;
1802
1803 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1804 ixl_remove_macvlan(sc, addrlo, 0,
1805 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1806 return 0;
1807 }
1808
1809 ETHER_LOCK(ec);
1810 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1811 ETHER_NEXT_MULTI(step, enm)) {
1812 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1813 ETHER_ADDR_LEN) != 0) {
1814 goto out;
1815 }
1816 }
1817
1818 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1819 ETHER_NEXT_MULTI(step, enm)) {
1820 error = ixl_add_macvlan(sc, enm->enm_addrlo, 0,
1821 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1822 if (error != 0)
1823 break;
1824 }
1825
1826 if (enm != NULL) {
1827 enm_last = enm;
1828 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1829 ETHER_NEXT_MULTI(step, enm)) {
1830 if (enm == enm_last)
1831 break;
1832
1833 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1834 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1835 }
1836 } else {
1837 CLR(ifp->if_flags, IFF_ALLMULTI);
1838 rv = ENETRESET;
1839 }
1840
1841 out:
1842 ETHER_UNLOCK(ec);
1843 return rv;
1844 }
1845
1846 static int
1847 ixl_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1848 {
1849 struct ifreq *ifr = (struct ifreq *)data;
1850 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
1851 const struct sockaddr *sa;
1852 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1853 int s, error = 0;
1854 unsigned int nmtu;
1855
1856 switch (cmd) {
1857 case SIOCSIFMTU:
1858 nmtu = ifr->ifr_mtu;
1859
1860 if (nmtu < IXL_MIN_MTU || nmtu > IXL_MAX_MTU) {
1861 error = EINVAL;
1862 break;
1863 }
1864 if (ifp->if_mtu != nmtu) {
1865 s = splnet();
1866 error = ether_ioctl(ifp, cmd, data);
1867 splx(s);
1868 if (error == ENETRESET)
1869 error = ixl_init(ifp);
1870 }
1871 break;
1872 case SIOCADDMULTI:
1873 sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1874 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) {
1875 error = ether_multiaddr(sa, addrlo, addrhi);
1876 if (error != 0)
1877 return error;
1878
1879 error = ixl_add_multi(sc, addrlo, addrhi);
1880 if (error != 0 && error != ENETRESET) {
1881 ether_delmulti(sa, &sc->sc_ec);
1882 error = EIO;
1883 }
1884 }
1885 break;
1886
1887 case SIOCDELMULTI:
1888 sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1889 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) {
1890 error = ether_multiaddr(sa, addrlo, addrhi);
1891 if (error != 0)
1892 return error;
1893
1894 error = ixl_del_multi(sc, addrlo, addrhi);
1895 }
1896 break;
1897
1898 default:
1899 s = splnet();
1900 error = ether_ioctl(ifp, cmd, data);
1901 splx(s);
1902 }
1903
1904 if (error == ENETRESET)
1905 error = ixl_iff(sc);
1906
1907 return error;
1908 }
1909
1910 static enum i40e_mac_type
1911 ixl_mactype(pci_product_id_t id)
1912 {
1913
1914 switch (id) {
1915 case PCI_PRODUCT_INTEL_XL710_SFP:
1916 case PCI_PRODUCT_INTEL_XL710_KX_B:
1917 case PCI_PRODUCT_INTEL_XL710_KX_C:
1918 case PCI_PRODUCT_INTEL_XL710_QSFP_A:
1919 case PCI_PRODUCT_INTEL_XL710_QSFP_B:
1920 case PCI_PRODUCT_INTEL_XL710_QSFP_C:
1921 case PCI_PRODUCT_INTEL_X710_10G_T:
1922 case PCI_PRODUCT_INTEL_XL710_20G_BP_1:
1923 case PCI_PRODUCT_INTEL_XL710_20G_BP_2:
1924 case PCI_PRODUCT_INTEL_X710_T4_10G:
1925 case PCI_PRODUCT_INTEL_XXV710_25G_BP:
1926 case PCI_PRODUCT_INTEL_XXV710_25G_SFP28:
1927 return I40E_MAC_XL710;
1928
1929 case PCI_PRODUCT_INTEL_X722_KX:
1930 case PCI_PRODUCT_INTEL_X722_QSFP:
1931 case PCI_PRODUCT_INTEL_X722_SFP:
1932 case PCI_PRODUCT_INTEL_X722_1G_BASET:
1933 case PCI_PRODUCT_INTEL_X722_10G_BASET:
1934 case PCI_PRODUCT_INTEL_X722_I_SFP:
1935 return I40E_MAC_X722;
1936 }
1937
1938 return I40E_MAC_GENERIC;
1939 }
1940
1941 static inline void *
1942 ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i)
1943 {
1944 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
1945 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1946
1947 if (i >= e->hmc_count)
1948 return NULL;
1949
1950 kva += e->hmc_base;
1951 kva += i * e->hmc_size;
1952
1953 return kva;
1954 }
1955
1956 static inline size_t
1957 ixl_hmc_len(struct ixl_softc *sc, enum ixl_hmc_types type)
1958 {
1959 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1960
1961 return e->hmc_size;
1962 }
1963
1964 static void
1965 ixl_enable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1966 {
1967 struct ixl_rx_ring *rxr = qp->qp_rxr;
1968
1969 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1970 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1971 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1972 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1973 ixl_flush(sc);
1974 }
1975
1976 static void
1977 ixl_disable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1978 {
1979 struct ixl_rx_ring *rxr = qp->qp_rxr;
1980
1981 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1982 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1983 ixl_flush(sc);
1984 }
1985
1986 static void
1987 ixl_enable_other_intr(struct ixl_softc *sc)
1988 {
1989
1990 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1991 I40E_PFINT_DYN_CTL0_INTENA_MASK |
1992 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1993 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1994 ixl_flush(sc);
1995 }
1996
1997 static void
1998 ixl_disable_other_intr(struct ixl_softc *sc)
1999 {
2000
2001 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
2002 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
2003 ixl_flush(sc);
2004 }
2005
2006 static int
2007 ixl_reinit(struct ixl_softc *sc)
2008 {
2009 struct ixl_rx_ring *rxr;
2010 struct ixl_tx_ring *txr;
2011 unsigned int i;
2012 uint32_t reg;
2013
2014 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2015
2016 if (ixl_get_vsi(sc) != 0)
2017 return EIO;
2018
2019 if (ixl_set_vsi(sc) != 0)
2020 return EIO;
2021
2022 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2023 txr = sc->sc_qps[i].qp_txr;
2024 rxr = sc->sc_qps[i].qp_rxr;
2025
2026 ixl_txr_config(sc, txr);
2027 ixl_rxr_config(sc, rxr);
2028 }
2029
2030 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2031 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_PREWRITE);
2032
2033 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2034 txr = sc->sc_qps[i].qp_txr;
2035 rxr = sc->sc_qps[i].qp_rxr;
2036
2037 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
2038 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
2039 ixl_flush(sc);
2040
2041 ixl_wr(sc, txr->txr_tail, txr->txr_prod);
2042 ixl_wr(sc, rxr->rxr_tail, rxr->rxr_prod);
2043
2044 /* ixl_rxfill() needs lock held */
2045 mutex_enter(&rxr->rxr_lock);
2046 ixl_rxfill(sc, rxr);
2047 mutex_exit(&rxr->rxr_lock);
2048
2049 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2050 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2051 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2052 if (ixl_rxr_enabled(sc, rxr) != 0)
2053 goto stop;
2054
2055 ixl_txr_qdis(sc, txr, 1);
2056
2057 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2058 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2059 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2060
2061 if (ixl_txr_enabled(sc, txr) != 0)
2062 goto stop;
2063 }
2064
2065 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2066 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
2067
2068 return 0;
2069
2070 stop:
2071 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2072 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
2073
2074 return ETIMEDOUT;
2075 }
2076
2077 static int
2078 ixl_init_locked(struct ixl_softc *sc)
2079 {
2080 struct ifnet *ifp = &sc->sc_ec.ec_if;
2081 unsigned int i;
2082 int error, eccap_change;
2083
2084 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2085
2086 if (ISSET(ifp->if_flags, IFF_RUNNING))
2087 ixl_stop_locked(sc);
2088
2089 if (sc->sc_dead) {
2090 return ENXIO;
2091 }
2092
2093 eccap_change = sc->sc_ec.ec_capenable ^ sc->sc_cur_ec_capenable;
2094 if (ISSET(eccap_change, ETHERCAP_VLAN_HWTAGGING))
2095 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
2096
2097 if (ISSET(eccap_change, ETHERCAP_VLAN_HWFILTER)) {
2098 if (ixl_update_macvlan(sc) == 0) {
2099 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
2100 } else {
2101 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
2102 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
2103 }
2104 }
2105
2106 if (sc->sc_intrtype != PCI_INTR_TYPE_MSIX)
2107 sc->sc_nqueue_pairs = 1;
2108 else
2109 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
2110
2111 error = ixl_reinit(sc);
2112 if (error) {
2113 ixl_stop_locked(sc);
2114 return error;
2115 }
2116
2117 SET(ifp->if_flags, IFF_RUNNING);
2118 CLR(ifp->if_flags, IFF_OACTIVE);
2119
2120 (void)ixl_get_link_status(sc);
2121
2122 ixl_config_rss(sc);
2123 ixl_config_queue_intr(sc);
2124
2125 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2126 ixl_enable_queue_intr(sc, &sc->sc_qps[i]);
2127 }
2128
2129 error = ixl_iff(sc);
2130 if (error) {
2131 ixl_stop_locked(sc);
2132 return error;
2133 }
2134
2135 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
2136
2137 return 0;
2138 }
2139
2140 static int
2141 ixl_init(struct ifnet *ifp)
2142 {
2143 struct ixl_softc *sc = ifp->if_softc;
2144 int error;
2145
2146 mutex_enter(&sc->sc_cfg_lock);
2147 error = ixl_init_locked(sc);
2148 mutex_exit(&sc->sc_cfg_lock);
2149
2150 return error;
2151 }
2152
2153 static int
2154 ixl_iff(struct ixl_softc *sc)
2155 {
2156 struct ifnet *ifp = &sc->sc_ec.ec_if;
2157 struct ixl_atq iatq;
2158 struct ixl_aq_desc *iaq;
2159 struct ixl_aq_vsi_promisc_param *param;
2160 uint16_t flag_add, flag_del;
2161 int error;
2162
2163 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2164 return 0;
2165
2166 memset(&iatq, 0, sizeof(iatq));
2167
2168 iaq = &iatq.iatq_desc;
2169 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
2170
2171 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
2172 param->flags = htole16(0);
2173
2174 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)
2175 || ISSET(ifp->if_flags, IFF_PROMISC)) {
2176 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2177 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2178 }
2179
2180 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
2181 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2182 IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2183 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
2184 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2185 }
2186 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2187 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2188 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2189 param->seid = sc->sc_seid;
2190
2191 error = ixl_atq_exec(sc, &iatq);
2192 if (error)
2193 return error;
2194
2195 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
2196 return EIO;
2197
2198 if (memcmp(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN) != 0) {
2199 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
2200 flag_add = IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH;
2201 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH;
2202 } else {
2203 flag_add = IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN;
2204 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN;
2205 }
2206
2207 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, flag_del);
2208
2209 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
2210 ixl_add_macvlan(sc, sc->sc_enaddr, 0, flag_add);
2211 }
2212 return 0;
2213 }
2214
2215 static void
2216 ixl_stop_rendezvous(struct ixl_softc *sc)
2217 {
2218 struct ixl_tx_ring *txr;
2219 struct ixl_rx_ring *rxr;
2220 unsigned int i;
2221
2222 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2223 txr = sc->sc_qps[i].qp_txr;
2224 rxr = sc->sc_qps[i].qp_rxr;
2225
2226 mutex_enter(&txr->txr_lock);
2227 mutex_exit(&txr->txr_lock);
2228
2229 mutex_enter(&rxr->rxr_lock);
2230 mutex_exit(&rxr->rxr_lock);
2231
2232 sc->sc_qps[i].qp_workqueue = false;
2233 workqueue_wait(sc->sc_workq_txrx,
2234 &sc->sc_qps[i].qp_work);
2235 }
2236 }
2237
2238 static void
2239 ixl_stop_locked(struct ixl_softc *sc)
2240 {
2241 struct ifnet *ifp = &sc->sc_ec.ec_if;
2242 struct ixl_rx_ring *rxr;
2243 struct ixl_tx_ring *txr;
2244 unsigned int i;
2245 uint32_t reg;
2246
2247 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2248
2249 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
2250 callout_stop(&sc->sc_stats_callout);
2251
2252 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2253 txr = sc->sc_qps[i].qp_txr;
2254 rxr = sc->sc_qps[i].qp_rxr;
2255
2256 ixl_disable_queue_intr(sc, &sc->sc_qps[i]);
2257
2258 mutex_enter(&txr->txr_lock);
2259 ixl_txr_qdis(sc, txr, 0);
2260 mutex_exit(&txr->txr_lock);
2261 }
2262
2263 /* XXX wait at least 400 usec for all tx queues in one go */
2264 ixl_flush(sc);
2265 DELAY(500);
2266
2267 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2268 txr = sc->sc_qps[i].qp_txr;
2269 rxr = sc->sc_qps[i].qp_rxr;
2270
2271 mutex_enter(&txr->txr_lock);
2272 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2273 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2274 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2275 mutex_exit(&txr->txr_lock);
2276
2277 mutex_enter(&rxr->rxr_lock);
2278 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2279 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2280 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2281 mutex_exit(&rxr->rxr_lock);
2282 }
2283
2284 /* XXX short wait for all queue disables to settle */
2285 ixl_flush(sc);
2286 DELAY(50);
2287
2288 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2289 txr = sc->sc_qps[i].qp_txr;
2290 rxr = sc->sc_qps[i].qp_rxr;
2291
2292 mutex_enter(&txr->txr_lock);
2293 if (ixl_txr_disabled(sc, txr) != 0) {
2294 mutex_exit(&txr->txr_lock);
2295 goto die;
2296 }
2297 mutex_exit(&txr->txr_lock);
2298
2299 mutex_enter(&rxr->rxr_lock);
2300 if (ixl_rxr_disabled(sc, rxr) != 0) {
2301 mutex_exit(&rxr->rxr_lock);
2302 goto die;
2303 }
2304 mutex_exit(&rxr->rxr_lock);
2305 }
2306
2307 ixl_stop_rendezvous(sc);
2308
2309 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2310 txr = sc->sc_qps[i].qp_txr;
2311 rxr = sc->sc_qps[i].qp_rxr;
2312
2313 mutex_enter(&txr->txr_lock);
2314 ixl_txr_unconfig(sc, txr);
2315 mutex_exit(&txr->txr_lock);
2316
2317 mutex_enter(&rxr->rxr_lock);
2318 ixl_rxr_unconfig(sc, rxr);
2319 mutex_exit(&rxr->rxr_lock);
2320
2321 ixl_txr_clean(sc, txr);
2322 ixl_rxr_clean(sc, rxr);
2323 }
2324
2325 return;
2326 die:
2327 sc->sc_dead = true;
2328 log(LOG_CRIT, "%s: failed to shut down rings",
2329 device_xname(sc->sc_dev));
2330 return;
2331 }
2332
2333 static void
2334 ixl_stop(struct ifnet *ifp, int disable)
2335 {
2336 struct ixl_softc *sc = ifp->if_softc;
2337
2338 mutex_enter(&sc->sc_cfg_lock);
2339 ixl_stop_locked(sc);
2340 mutex_exit(&sc->sc_cfg_lock);
2341 }
2342
2343 static int
2344 ixl_queue_pairs_alloc(struct ixl_softc *sc)
2345 {
2346 struct ixl_queue_pair *qp;
2347 unsigned int i;
2348 size_t sz;
2349
2350 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2351 sc->sc_qps = kmem_zalloc(sz, KM_SLEEP);
2352
2353 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2354 qp = &sc->sc_qps[i];
2355
2356 qp->qp_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2357 ixl_handle_queue, qp);
2358 if (qp->qp_si == NULL)
2359 goto free;
2360
2361 qp->qp_txr = ixl_txr_alloc(sc, i);
2362 if (qp->qp_txr == NULL)
2363 goto free;
2364
2365 qp->qp_rxr = ixl_rxr_alloc(sc, i);
2366 if (qp->qp_rxr == NULL)
2367 goto free;
2368
2369 qp->qp_sc = sc;
2370 snprintf(qp->qp_name, sizeof(qp->qp_name),
2371 "%s-TXRX%d", device_xname(sc->sc_dev), i);
2372 }
2373
2374 return 0;
2375 free:
2376 if (sc->sc_qps != NULL) {
2377 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2378 qp = &sc->sc_qps[i];
2379
2380 if (qp->qp_txr != NULL)
2381 ixl_txr_free(sc, qp->qp_txr);
2382 if (qp->qp_rxr != NULL)
2383 ixl_rxr_free(sc, qp->qp_rxr);
2384 if (qp->qp_si != NULL)
2385 softint_disestablish(qp->qp_si);
2386 }
2387
2388 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2389 kmem_free(sc->sc_qps, sz);
2390 sc->sc_qps = NULL;
2391 }
2392
2393 return -1;
2394 }
2395
2396 static void
2397 ixl_queue_pairs_free(struct ixl_softc *sc)
2398 {
2399 struct ixl_queue_pair *qp;
2400 unsigned int i;
2401 size_t sz;
2402
2403 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2404 qp = &sc->sc_qps[i];
2405 ixl_txr_free(sc, qp->qp_txr);
2406 ixl_rxr_free(sc, qp->qp_rxr);
2407 softint_disestablish(qp->qp_si);
2408 }
2409
2410 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2411 kmem_free(sc->sc_qps, sz);
2412 sc->sc_qps = NULL;
2413 }
2414
2415 static struct ixl_tx_ring *
2416 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
2417 {
2418 struct ixl_tx_ring *txr = NULL;
2419 struct ixl_tx_map *maps = NULL, *txm;
2420 unsigned int i;
2421
2422 txr = kmem_zalloc(sizeof(*txr), KM_SLEEP);
2423 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs,
2424 KM_SLEEP);
2425
2426 if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2427 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2428 IXL_TX_QUEUE_ALIGN) != 0)
2429 goto free;
2430
2431 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2432 txm = &maps[i];
2433
2434 if (bus_dmamap_create(sc->sc_dmat, IXL_TX_PKT_MAXSIZE,
2435 IXL_TX_PKT_DESCS, IXL_TX_PKT_MAXSIZE, 0,
2436 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &txm->txm_map) != 0)
2437 goto uncreate;
2438
2439 txm->txm_eop = -1;
2440 txm->txm_m = NULL;
2441 }
2442
2443 txr->txr_cons = txr->txr_prod = 0;
2444 txr->txr_maps = maps;
2445
2446 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2447 if (txr->txr_intrq == NULL)
2448 goto uncreate;
2449
2450 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2451 ixl_deferred_transmit, txr);
2452 if (txr->txr_si == NULL)
2453 goto destroy_pcq;
2454
2455 txr->txr_tail = I40E_QTX_TAIL(qid);
2456 txr->txr_qid = qid;
2457 txr->txr_sc = sc;
2458 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2459
2460 return txr;
2461
2462 destroy_pcq:
2463 pcq_destroy(txr->txr_intrq);
2464 uncreate:
2465 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2466 txm = &maps[i];
2467
2468 if (txm->txm_map == NULL)
2469 continue;
2470
2471 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2472 }
2473
2474 ixl_dmamem_free(sc, &txr->txr_mem);
2475 free:
2476 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2477 kmem_free(txr, sizeof(*txr));
2478
2479 return NULL;
2480 }
2481
2482 static void
2483 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2484 {
2485 unsigned int qid;
2486 bus_size_t reg;
2487 uint32_t r;
2488
2489 qid = txr->txr_qid + sc->sc_base_queue;
2490 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2491 qid %= 128;
2492
2493 r = ixl_rd(sc, reg);
2494 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2495 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2496 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2497 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2498 ixl_wr(sc, reg, r);
2499 }
2500
2501 static void
2502 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2503 {
2504 struct ixl_hmc_txq txq;
2505 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch);
2506 void *hmc;
2507
2508 memset(&txq, 0, sizeof(txq));
2509 txq.head = htole16(txr->txr_cons);
2510 txq.new_context = 1;
2511 txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2512 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2513 txq.qlen = htole16(sc->sc_tx_ring_ndescs);
2514 txq.tphrdesc_ena = 0;
2515 txq.tphrpacket_ena = 0;
2516 txq.tphwdesc_ena = 0;
2517 txq.rdylist = data->qs_handle[0];
2518
2519 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2520 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2521 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq,
2522 __arraycount(ixl_hmc_pack_txq));
2523 }
2524
2525 static void
2526 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2527 {
2528 void *hmc;
2529
2530 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2531 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2532 txr->txr_cons = txr->txr_prod = 0;
2533 }
2534
2535 static void
2536 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2537 {
2538 struct ixl_tx_map *maps, *txm;
2539 bus_dmamap_t map;
2540 unsigned int i;
2541
2542 maps = txr->txr_maps;
2543 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2544 txm = &maps[i];
2545
2546 if (txm->txm_m == NULL)
2547 continue;
2548
2549 map = txm->txm_map;
2550 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2551 BUS_DMASYNC_POSTWRITE);
2552 bus_dmamap_unload(sc->sc_dmat, map);
2553
2554 m_freem(txm->txm_m);
2555 txm->txm_m = NULL;
2556 }
2557 }
2558
2559 static int
2560 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2561 {
2562 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2563 uint32_t reg;
2564 int i;
2565
2566 for (i = 0; i < 10; i++) {
2567 reg = ixl_rd(sc, ena);
2568 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2569 return 0;
2570
2571 delaymsec(10);
2572 }
2573
2574 return ETIMEDOUT;
2575 }
2576
2577 static int
2578 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2579 {
2580 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2581 uint32_t reg;
2582 int i;
2583
2584 KASSERT(mutex_owned(&txr->txr_lock));
2585
2586 for (i = 0; i < 10; i++) {
2587 reg = ixl_rd(sc, ena);
2588 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2589 return 0;
2590
2591 delaymsec(10);
2592 }
2593
2594 return ETIMEDOUT;
2595 }
2596
2597 static void
2598 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2599 {
2600 struct ixl_tx_map *maps, *txm;
2601 struct mbuf *m;
2602 unsigned int i;
2603
2604 softint_disestablish(txr->txr_si);
2605 while ((m = pcq_get(txr->txr_intrq)) != NULL)
2606 m_freem(m);
2607 pcq_destroy(txr->txr_intrq);
2608
2609 maps = txr->txr_maps;
2610 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2611 txm = &maps[i];
2612
2613 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2614 }
2615
2616 ixl_dmamem_free(sc, &txr->txr_mem);
2617 mutex_destroy(&txr->txr_lock);
2618 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2619 kmem_free(txr, sizeof(*txr));
2620 }
2621
2622 static inline int
2623 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0,
2624 struct ixl_tx_ring *txr)
2625 {
2626 struct mbuf *m;
2627 int error;
2628
2629 KASSERT(mutex_owned(&txr->txr_lock));
2630
2631 m = *m0;
2632
2633 error = bus_dmamap_load_mbuf(dmat, map, m,
2634 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2635 if (error != EFBIG)
2636 return error;
2637
2638 m = m_defrag(m, M_DONTWAIT);
2639 if (m != NULL) {
2640 *m0 = m;
2641 txr->txr_defragged.ev_count++;
2642
2643 error = bus_dmamap_load_mbuf(dmat, map, m,
2644 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2645 } else {
2646 txr->txr_defrag_failed.ev_count++;
2647 error = ENOBUFS;
2648 }
2649
2650 return error;
2651 }
2652
2653 static inline int
2654 ixl_tx_setup_offloads(struct mbuf *m, uint64_t *cmd_txd)
2655 {
2656 struct ether_header *eh;
2657 size_t len;
2658 uint64_t cmd;
2659
2660 cmd = 0;
2661
2662 eh = mtod(m, struct ether_header *);
2663 switch (htons(eh->ether_type)) {
2664 case ETHERTYPE_IP:
2665 case ETHERTYPE_IPV6:
2666 len = ETHER_HDR_LEN;
2667 break;
2668 case ETHERTYPE_VLAN:
2669 len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2670 break;
2671 default:
2672 len = 0;
2673 }
2674 cmd |= ((len >> 1) << IXL_TX_DESC_MACLEN_SHIFT);
2675
2676 if (m->m_pkthdr.csum_flags &
2677 (M_CSUM_TSOv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
2678 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4;
2679 }
2680 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2681 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4_CSUM;
2682 }
2683
2684 if (m->m_pkthdr.csum_flags &
2685 (M_CSUM_TSOv6 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
2686 cmd |= IXL_TX_DESC_CMD_IIPT_IPV6;
2687 }
2688
2689 switch (cmd & IXL_TX_DESC_CMD_IIPT_MASK) {
2690 case IXL_TX_DESC_CMD_IIPT_IPV4:
2691 case IXL_TX_DESC_CMD_IIPT_IPV4_CSUM:
2692 len = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2693 break;
2694 case IXL_TX_DESC_CMD_IIPT_IPV6:
2695 len = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
2696 break;
2697 default:
2698 len = 0;
2699 }
2700 cmd |= ((len >> 2) << IXL_TX_DESC_IPLEN_SHIFT);
2701
2702 if (m->m_pkthdr.csum_flags &
2703 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_TCPv4 | M_CSUM_TCPv6)) {
2704 len = sizeof(struct tcphdr);
2705 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_TCP;
2706 } else if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) {
2707 len = sizeof(struct udphdr);
2708 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_UDP;
2709 } else {
2710 len = 0;
2711 }
2712 cmd |= ((len >> 2) << IXL_TX_DESC_L4LEN_SHIFT);
2713
2714 *cmd_txd |= cmd;
2715 return 0;
2716 }
2717
2718 static void
2719 ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr,
2720 bool is_transmit)
2721 {
2722 struct ixl_softc *sc = ifp->if_softc;
2723 struct ixl_tx_desc *ring, *txd;
2724 struct ixl_tx_map *txm;
2725 bus_dmamap_t map;
2726 struct mbuf *m;
2727 uint64_t cmd, cmd_txd;
2728 unsigned int prod, free, last, i;
2729 unsigned int mask;
2730 int post = 0;
2731
2732 KASSERT(mutex_owned(&txr->txr_lock));
2733
2734 if (ifp->if_link_state != LINK_STATE_UP
2735 || !ISSET(ifp->if_flags, IFF_RUNNING)
2736 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) {
2737 if (!is_transmit)
2738 IFQ_PURGE(&ifp->if_snd);
2739 return;
2740 }
2741
2742 prod = txr->txr_prod;
2743 free = txr->txr_cons;
2744 if (free <= prod)
2745 free += sc->sc_tx_ring_ndescs;
2746 free -= prod;
2747
2748 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2749 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2750
2751 ring = IXL_DMA_KVA(&txr->txr_mem);
2752 mask = sc->sc_tx_ring_ndescs - 1;
2753 last = prod;
2754 cmd = 0;
2755 txd = NULL;
2756
2757 for (;;) {
2758 if (free <= IXL_TX_PKT_DESCS) {
2759 if (!is_transmit)
2760 SET(ifp->if_flags, IFF_OACTIVE);
2761 break;
2762 }
2763
2764 if (is_transmit)
2765 m = pcq_get(txr->txr_intrq);
2766 else
2767 IFQ_DEQUEUE(&ifp->if_snd, m);
2768
2769 if (m == NULL)
2770 break;
2771
2772 txm = &txr->txr_maps[prod];
2773 map = txm->txm_map;
2774
2775 if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
2776 if_statinc(ifp, if_oerrors);
2777 m_freem(m);
2778 continue;
2779 }
2780
2781 cmd_txd = 0;
2782 if (m->m_pkthdr.csum_flags & IXL_CSUM_ALL_OFFLOAD) {
2783 ixl_tx_setup_offloads(m, &cmd_txd);
2784 }
2785
2786 if (vlan_has_tag(m)) {
2787 cmd_txd |= (uint64_t)vlan_get_tag(m) <<
2788 IXL_TX_DESC_L2TAG1_SHIFT;
2789 cmd_txd |= IXL_TX_DESC_CMD_IL2TAG1;
2790 }
2791
2792 bus_dmamap_sync(sc->sc_dmat, map, 0,
2793 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2794
2795 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) {
2796 txd = &ring[prod];
2797
2798 cmd = (uint64_t)map->dm_segs[i].ds_len <<
2799 IXL_TX_DESC_BSIZE_SHIFT;
2800 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2801 cmd |= cmd_txd;
2802
2803 txd->addr = htole64(map->dm_segs[i].ds_addr);
2804 txd->cmd = htole64(cmd);
2805
2806 last = prod;
2807
2808 prod++;
2809 prod &= mask;
2810 }
2811 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2812 txd->cmd = htole64(cmd);
2813
2814 txm->txm_m = m;
2815 txm->txm_eop = last;
2816
2817 bpf_mtap(ifp, m, BPF_D_OUT);
2818
2819 free -= i;
2820 post = 1;
2821 }
2822
2823 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2824 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2825
2826 if (post) {
2827 txr->txr_prod = prod;
2828 ixl_wr(sc, txr->txr_tail, prod);
2829 }
2830 }
2831
2832 static int
2833 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit)
2834 {
2835 struct ifnet *ifp = &sc->sc_ec.ec_if;
2836 struct ixl_tx_desc *ring, *txd;
2837 struct ixl_tx_map *txm;
2838 struct mbuf *m;
2839 bus_dmamap_t map;
2840 unsigned int cons, prod, last;
2841 unsigned int mask;
2842 uint64_t dtype;
2843 int done = 0, more = 0;
2844
2845 KASSERT(mutex_owned(&txr->txr_lock));
2846
2847 prod = txr->txr_prod;
2848 cons = txr->txr_cons;
2849
2850 if (cons == prod)
2851 return 0;
2852
2853 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2854 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2855
2856 ring = IXL_DMA_KVA(&txr->txr_mem);
2857 mask = sc->sc_tx_ring_ndescs - 1;
2858
2859 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2860
2861 do {
2862 if (txlimit-- <= 0) {
2863 more = 1;
2864 break;
2865 }
2866
2867 txm = &txr->txr_maps[cons];
2868 last = txm->txm_eop;
2869 txd = &ring[last];
2870
2871 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2872 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2873 break;
2874
2875 map = txm->txm_map;
2876
2877 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2878 BUS_DMASYNC_POSTWRITE);
2879 bus_dmamap_unload(sc->sc_dmat, map);
2880
2881 m = txm->txm_m;
2882 if (m != NULL) {
2883 if_statinc_ref(nsr, if_opackets);
2884 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
2885 if (ISSET(m->m_flags, M_MCAST))
2886 if_statinc_ref(nsr, if_omcasts);
2887 m_freem(m);
2888 }
2889
2890 txm->txm_m = NULL;
2891 txm->txm_eop = -1;
2892
2893 cons = last + 1;
2894 cons &= mask;
2895 done = 1;
2896 } while (cons != prod);
2897
2898 IF_STAT_PUTREF(ifp);
2899
2900 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2901 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2902
2903 txr->txr_cons = cons;
2904
2905 if (done) {
2906 softint_schedule(txr->txr_si);
2907 if (txr->txr_qid == 0) {
2908 CLR(ifp->if_flags, IFF_OACTIVE);
2909 if_schedule_deferred_start(ifp);
2910 }
2911 }
2912
2913 return more;
2914 }
2915
2916 static void
2917 ixl_start(struct ifnet *ifp)
2918 {
2919 struct ixl_softc *sc;
2920 struct ixl_tx_ring *txr;
2921
2922 sc = ifp->if_softc;
2923 txr = sc->sc_qps[0].qp_txr;
2924
2925 mutex_enter(&txr->txr_lock);
2926 ixl_tx_common_locked(ifp, txr, false);
2927 mutex_exit(&txr->txr_lock);
2928 }
2929
2930 static inline unsigned int
2931 ixl_select_txqueue(struct ixl_softc *sc, struct mbuf *m)
2932 {
2933 u_int cpuid;
2934
2935 cpuid = cpu_index(curcpu());
2936
2937 return (unsigned int)(cpuid % sc->sc_nqueue_pairs);
2938 }
2939
2940 static int
2941 ixl_transmit(struct ifnet *ifp, struct mbuf *m)
2942 {
2943 struct ixl_softc *sc;
2944 struct ixl_tx_ring *txr;
2945 unsigned int qid;
2946
2947 sc = ifp->if_softc;
2948 qid = ixl_select_txqueue(sc, m);
2949
2950 txr = sc->sc_qps[qid].qp_txr;
2951
2952 if (__predict_false(!pcq_put(txr->txr_intrq, m))) {
2953 mutex_enter(&txr->txr_lock);
2954 txr->txr_pcqdrop.ev_count++;
2955 mutex_exit(&txr->txr_lock);
2956
2957 m_freem(m);
2958 return ENOBUFS;
2959 }
2960
2961 if (mutex_tryenter(&txr->txr_lock)) {
2962 ixl_tx_common_locked(ifp, txr, true);
2963 mutex_exit(&txr->txr_lock);
2964 } else {
2965 kpreempt_disable();
2966 softint_schedule(txr->txr_si);
2967 kpreempt_enable();
2968 }
2969
2970 return 0;
2971 }
2972
2973 static void
2974 ixl_deferred_transmit(void *xtxr)
2975 {
2976 struct ixl_tx_ring *txr = xtxr;
2977 struct ixl_softc *sc = txr->txr_sc;
2978 struct ifnet *ifp = &sc->sc_ec.ec_if;
2979
2980 mutex_enter(&txr->txr_lock);
2981 txr->txr_transmitdef.ev_count++;
2982 if (pcq_peek(txr->txr_intrq) != NULL)
2983 ixl_tx_common_locked(ifp, txr, true);
2984 mutex_exit(&txr->txr_lock);
2985 }
2986
2987 static struct ixl_rx_ring *
2988 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
2989 {
2990 struct ixl_rx_ring *rxr = NULL;
2991 struct ixl_rx_map *maps = NULL, *rxm;
2992 unsigned int i;
2993
2994 rxr = kmem_zalloc(sizeof(*rxr), KM_SLEEP);
2995 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs,
2996 KM_SLEEP);
2997
2998 if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
2999 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
3000 IXL_RX_QUEUE_ALIGN) != 0)
3001 goto free;
3002
3003 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3004 rxm = &maps[i];
3005
3006 if (bus_dmamap_create(sc->sc_dmat,
3007 IXL_MCLBYTES, 1, IXL_MCLBYTES, 0,
3008 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &rxm->rxm_map) != 0)
3009 goto uncreate;
3010
3011 rxm->rxm_m = NULL;
3012 }
3013
3014 rxr->rxr_cons = rxr->rxr_prod = 0;
3015 rxr->rxr_m_head = NULL;
3016 rxr->rxr_m_tail = &rxr->rxr_m_head;
3017 rxr->rxr_maps = maps;
3018
3019 rxr->rxr_tail = I40E_QRX_TAIL(qid);
3020 rxr->rxr_qid = qid;
3021 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
3022
3023 return rxr;
3024
3025 uncreate:
3026 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3027 rxm = &maps[i];
3028
3029 if (rxm->rxm_map == NULL)
3030 continue;
3031
3032 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3033 }
3034
3035 ixl_dmamem_free(sc, &rxr->rxr_mem);
3036 free:
3037 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
3038 kmem_free(rxr, sizeof(*rxr));
3039
3040 return NULL;
3041 }
3042
3043 static void
3044 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3045 {
3046 struct ixl_rx_map *maps, *rxm;
3047 bus_dmamap_t map;
3048 unsigned int i;
3049
3050 maps = rxr->rxr_maps;
3051 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3052 rxm = &maps[i];
3053
3054 if (rxm->rxm_m == NULL)
3055 continue;
3056
3057 map = rxm->rxm_map;
3058 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3059 BUS_DMASYNC_POSTWRITE);
3060 bus_dmamap_unload(sc->sc_dmat, map);
3061
3062 m_freem(rxm->rxm_m);
3063 rxm->rxm_m = NULL;
3064 }
3065
3066 m_freem(rxr->rxr_m_head);
3067 rxr->rxr_m_head = NULL;
3068 rxr->rxr_m_tail = &rxr->rxr_m_head;
3069
3070 rxr->rxr_prod = rxr->rxr_cons = 0;
3071 }
3072
3073 static int
3074 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3075 {
3076 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3077 uint32_t reg;
3078 int i;
3079
3080 for (i = 0; i < 10; i++) {
3081 reg = ixl_rd(sc, ena);
3082 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
3083 return 0;
3084
3085 delaymsec(10);
3086 }
3087
3088 return ETIMEDOUT;
3089 }
3090
3091 static int
3092 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3093 {
3094 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3095 uint32_t reg;
3096 int i;
3097
3098 KASSERT(mutex_owned(&rxr->rxr_lock));
3099
3100 for (i = 0; i < 10; i++) {
3101 reg = ixl_rd(sc, ena);
3102 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3103 return 0;
3104
3105 delaymsec(10);
3106 }
3107
3108 return ETIMEDOUT;
3109 }
3110
3111 static void
3112 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3113 {
3114 struct ixl_hmc_rxq rxq;
3115 struct ifnet *ifp = &sc->sc_ec.ec_if;
3116 uint16_t rxmax;
3117 void *hmc;
3118
3119 memset(&rxq, 0, sizeof(rxq));
3120 rxmax = ifp->if_mtu + IXL_MTU_ETHERLEN;
3121
3122 rxq.head = htole16(rxr->rxr_cons);
3123 rxq.base = htole64(IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
3124 rxq.qlen = htole16(sc->sc_rx_ring_ndescs);
3125 rxq.dbuff = htole16(IXL_MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
3126 rxq.hbuff = 0;
3127 rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
3128 rxq.dsize = IXL_HMC_RXQ_DSIZE_32;
3129 rxq.crcstrip = 1;
3130 rxq.l2sel = 1;
3131 rxq.showiv = 1;
3132 rxq.rxmax = htole16(rxmax);
3133 rxq.tphrdesc_ena = 0;
3134 rxq.tphwdesc_ena = 0;
3135 rxq.tphdata_ena = 0;
3136 rxq.tphhead_ena = 0;
3137 rxq.lrxqthresh = 0;
3138 rxq.prefena = 1;
3139
3140 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3141 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3142 ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq,
3143 __arraycount(ixl_hmc_pack_rxq));
3144 }
3145
3146 static void
3147 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3148 {
3149 void *hmc;
3150
3151 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3152 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3153 rxr->rxr_cons = rxr->rxr_prod = 0;
3154 }
3155
3156 static void
3157 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3158 {
3159 struct ixl_rx_map *maps, *rxm;
3160 unsigned int i;
3161
3162 maps = rxr->rxr_maps;
3163 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3164 rxm = &maps[i];
3165
3166 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3167 }
3168
3169 ixl_dmamem_free(sc, &rxr->rxr_mem);
3170 mutex_destroy(&rxr->rxr_lock);
3171 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
3172 kmem_free(rxr, sizeof(*rxr));
3173 }
3174
3175 static inline void
3176 ixl_rx_csum(struct mbuf *m, uint64_t qword)
3177 {
3178 int flags_mask;
3179
3180 if (!ISSET(qword, IXL_RX_DESC_L3L4P)) {
3181 /* No L3 or L4 checksum was calculated */
3182 return;
3183 }
3184
3185 switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) {
3186 case IXL_RX_DESC_PTYPE_IPV4FRAG:
3187 case IXL_RX_DESC_PTYPE_IPV4:
3188 case IXL_RX_DESC_PTYPE_SCTPV4:
3189 case IXL_RX_DESC_PTYPE_ICMPV4:
3190 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3191 break;
3192 case IXL_RX_DESC_PTYPE_TCPV4:
3193 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3194 flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD;
3195 break;
3196 case IXL_RX_DESC_PTYPE_UDPV4:
3197 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3198 flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD;
3199 break;
3200 case IXL_RX_DESC_PTYPE_TCPV6:
3201 flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD;
3202 break;
3203 case IXL_RX_DESC_PTYPE_UDPV6:
3204 flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD;
3205 break;
3206 default:
3207 flags_mask = 0;
3208 }
3209
3210 m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 |
3211 M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6));
3212
3213 if (ISSET(qword, IXL_RX_DESC_IPE)) {
3214 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD);
3215 }
3216
3217 if (ISSET(qword, IXL_RX_DESC_L4E)) {
3218 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD);
3219 }
3220 }
3221
3222 static int
3223 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr, u_int rxlimit)
3224 {
3225 struct ifnet *ifp = &sc->sc_ec.ec_if;
3226 struct ixl_rx_wb_desc_32 *ring, *rxd;
3227 struct ixl_rx_map *rxm;
3228 bus_dmamap_t map;
3229 unsigned int cons, prod;
3230 struct mbuf *m;
3231 uint64_t word, word0;
3232 unsigned int len;
3233 unsigned int mask;
3234 int done = 0, more = 0;
3235
3236 KASSERT(mutex_owned(&rxr->rxr_lock));
3237
3238 if (!ISSET(ifp->if_flags, IFF_RUNNING))
3239 return 0;
3240
3241 prod = rxr->rxr_prod;
3242 cons = rxr->rxr_cons;
3243
3244 if (cons == prod)
3245 return 0;
3246
3247 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3248 0, IXL_DMA_LEN(&rxr->rxr_mem),
3249 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3250
3251 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3252 mask = sc->sc_rx_ring_ndescs - 1;
3253
3254 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
3255
3256 do {
3257 if (rxlimit-- <= 0) {
3258 more = 1;
3259 break;
3260 }
3261
3262 rxd = &ring[cons];
3263
3264 word = le64toh(rxd->qword1);
3265
3266 if (!ISSET(word, IXL_RX_DESC_DD))
3267 break;
3268
3269 rxm = &rxr->rxr_maps[cons];
3270
3271 map = rxm->rxm_map;
3272 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3273 BUS_DMASYNC_POSTREAD);
3274 bus_dmamap_unload(sc->sc_dmat, map);
3275
3276 m = rxm->rxm_m;
3277 rxm->rxm_m = NULL;
3278
3279 KASSERT(m != NULL);
3280
3281 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
3282 m->m_len = len;
3283 m->m_pkthdr.len = 0;
3284
3285 m->m_next = NULL;
3286 *rxr->rxr_m_tail = m;
3287 rxr->rxr_m_tail = &m->m_next;
3288
3289 m = rxr->rxr_m_head;
3290 m->m_pkthdr.len += len;
3291
3292 if (ISSET(word, IXL_RX_DESC_EOP)) {
3293 word0 = le64toh(rxd->qword0);
3294
3295 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
3296 vlan_set_tag(m,
3297 __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK));
3298 }
3299
3300 if ((ifp->if_capenable & IXL_IFCAP_RXCSUM) != 0)
3301 ixl_rx_csum(m, word);
3302
3303 if (!ISSET(word,
3304 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
3305 m_set_rcvif(m, ifp);
3306 if_statinc_ref(nsr, if_ipackets);
3307 if_statadd_ref(nsr, if_ibytes,
3308 m->m_pkthdr.len);
3309 if_percpuq_enqueue(ifp->if_percpuq, m);
3310 } else {
3311 if_statinc_ref(nsr, if_ierrors);
3312 m_freem(m);
3313 }
3314
3315 rxr->rxr_m_head = NULL;
3316 rxr->rxr_m_tail = &rxr->rxr_m_head;
3317 }
3318
3319 cons++;
3320 cons &= mask;
3321
3322 done = 1;
3323 } while (cons != prod);
3324
3325 if (done) {
3326 rxr->rxr_cons = cons;
3327 if (ixl_rxfill(sc, rxr) == -1)
3328 if_statinc_ref(nsr, if_iqdrops);
3329 }
3330
3331 IF_STAT_PUTREF(ifp);
3332
3333 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3334 0, IXL_DMA_LEN(&rxr->rxr_mem),
3335 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3336
3337 return more;
3338 }
3339
3340 static int
3341 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3342 {
3343 struct ixl_rx_rd_desc_32 *ring, *rxd;
3344 struct ixl_rx_map *rxm;
3345 bus_dmamap_t map;
3346 struct mbuf *m;
3347 unsigned int prod;
3348 unsigned int slots;
3349 unsigned int mask;
3350 int post = 0, error = 0;
3351
3352 KASSERT(mutex_owned(&rxr->rxr_lock));
3353
3354 prod = rxr->rxr_prod;
3355 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons,
3356 sc->sc_rx_ring_ndescs);
3357
3358 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3359 mask = sc->sc_rx_ring_ndescs - 1;
3360
3361 if (__predict_false(slots <= 0))
3362 return -1;
3363
3364 do {
3365 rxm = &rxr->rxr_maps[prod];
3366
3367 MGETHDR(m, M_DONTWAIT, MT_DATA);
3368 if (m == NULL) {
3369 rxr->rxr_mgethdr_failed.ev_count++;
3370 error = -1;
3371 break;
3372 }
3373
3374 MCLGET(m, M_DONTWAIT);
3375 if (!ISSET(m->m_flags, M_EXT)) {
3376 rxr->rxr_mgetcl_failed.ev_count++;
3377 error = -1;
3378 m_freem(m);
3379 break;
3380 }
3381
3382 m->m_len = m->m_pkthdr.len = MCLBYTES;
3383 m_adj(m, ETHER_ALIGN);
3384
3385 map = rxm->rxm_map;
3386
3387 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
3388 BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
3389 rxr->rxr_mbuf_load_failed.ev_count++;
3390 error = -1;
3391 m_freem(m);
3392 break;
3393 }
3394
3395 rxm->rxm_m = m;
3396
3397 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3398 BUS_DMASYNC_PREREAD);
3399
3400 rxd = &ring[prod];
3401
3402 rxd->paddr = htole64(map->dm_segs[0].ds_addr);
3403 rxd->haddr = htole64(0);
3404
3405 prod++;
3406 prod &= mask;
3407
3408 post = 1;
3409
3410 } while (--slots);
3411
3412 if (post) {
3413 rxr->rxr_prod = prod;
3414 ixl_wr(sc, rxr->rxr_tail, prod);
3415 }
3416
3417 return error;
3418 }
3419
3420 static inline int
3421 ixl_handle_queue_common(struct ixl_softc *sc, struct ixl_queue_pair *qp,
3422 u_int txlimit, struct evcnt *txevcnt,
3423 u_int rxlimit, struct evcnt *rxevcnt)
3424 {
3425 struct ixl_tx_ring *txr = qp->qp_txr;
3426 struct ixl_rx_ring *rxr = qp->qp_rxr;
3427 int txmore, rxmore;
3428 int rv;
3429
3430 mutex_enter(&txr->txr_lock);
3431 txevcnt->ev_count++;
3432 txmore = ixl_txeof(sc, txr, txlimit);
3433 mutex_exit(&txr->txr_lock);
3434
3435 mutex_enter(&rxr->rxr_lock);
3436 rxevcnt->ev_count++;
3437 rxmore = ixl_rxeof(sc, rxr, rxlimit);
3438 mutex_exit(&rxr->rxr_lock);
3439
3440 rv = txmore | (rxmore << 1);
3441
3442 return rv;
3443 }
3444
3445 static void
3446 ixl_sched_handle_queue(struct ixl_softc *sc, struct ixl_queue_pair *qp)
3447 {
3448
3449 if (qp->qp_workqueue)
3450 workqueue_enqueue(sc->sc_workq_txrx, &qp->qp_work, NULL);
3451 else
3452 softint_schedule(qp->qp_si);
3453 }
3454
3455 static int
3456 ixl_intr(void *xsc)
3457 {
3458 struct ixl_softc *sc = xsc;
3459 struct ixl_tx_ring *txr;
3460 struct ixl_rx_ring *rxr;
3461 uint32_t icr, rxintr, txintr;
3462 int rv = 0;
3463 unsigned int i;
3464
3465 KASSERT(sc != NULL);
3466
3467 ixl_enable_other_intr(sc);
3468 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3469
3470 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3471 atomic_inc_64(&sc->sc_event_atq.ev_count);
3472 ixl_atq_done(sc);
3473 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3474 rv = 1;
3475 }
3476
3477 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3478 atomic_inc_64(&sc->sc_event_link.ev_count);
3479 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3480 rv = 1;
3481 }
3482
3483 rxintr = icr & I40E_INTR_NOTX_RX_MASK;
3484 txintr = icr & I40E_INTR_NOTX_TX_MASK;
3485
3486 if (txintr || rxintr) {
3487 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3488 txr = sc->sc_qps[i].qp_txr;
3489 rxr = sc->sc_qps[i].qp_rxr;
3490
3491 ixl_handle_queue_common(sc, &sc->sc_qps[i],
3492 IXL_TXRX_PROCESS_UNLIMIT, &txr->txr_intr,
3493 IXL_TXRX_PROCESS_UNLIMIT, &rxr->rxr_intr);
3494 }
3495 rv = 1;
3496 }
3497
3498 return rv;
3499 }
3500
3501 static int
3502 ixl_queue_intr(void *xqp)
3503 {
3504 struct ixl_queue_pair *qp = xqp;
3505 struct ixl_tx_ring *txr = qp->qp_txr;
3506 struct ixl_rx_ring *rxr = qp->qp_rxr;
3507 struct ixl_softc *sc = qp->qp_sc;
3508 u_int txlimit, rxlimit;
3509 int more;
3510
3511 txlimit = sc->sc_tx_intr_process_limit;
3512 rxlimit = sc->sc_rx_intr_process_limit;
3513 qp->qp_workqueue = sc->sc_txrx_workqueue;
3514
3515 more = ixl_handle_queue_common(sc, qp,
3516 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr);
3517
3518 if (more != 0) {
3519 ixl_sched_handle_queue(sc, qp);
3520 } else {
3521 /* for ALTQ */
3522 if (txr->txr_qid == 0)
3523 if_schedule_deferred_start(&sc->sc_ec.ec_if);
3524 softint_schedule(txr->txr_si);
3525
3526 ixl_enable_queue_intr(sc, qp);
3527 }
3528
3529 return 1;
3530 }
3531
3532 static void
3533 ixl_handle_queue_wk(struct work *wk, void *xsc)
3534 {
3535 struct ixl_queue_pair *qp;
3536
3537 qp = container_of(wk, struct ixl_queue_pair, qp_work);
3538 ixl_handle_queue(qp);
3539 }
3540
3541 static void
3542 ixl_handle_queue(void *xqp)
3543 {
3544 struct ixl_queue_pair *qp = xqp;
3545 struct ixl_softc *sc = qp->qp_sc;
3546 struct ixl_tx_ring *txr = qp->qp_txr;
3547 struct ixl_rx_ring *rxr = qp->qp_rxr;
3548 u_int txlimit, rxlimit;
3549 int more;
3550
3551 txlimit = sc->sc_tx_process_limit;
3552 rxlimit = sc->sc_rx_process_limit;
3553
3554 more = ixl_handle_queue_common(sc, qp,
3555 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer);
3556
3557 if (more != 0)
3558 ixl_sched_handle_queue(sc, qp);
3559 else
3560 ixl_enable_queue_intr(sc, qp);
3561 }
3562
3563 static inline void
3564 ixl_print_hmc_error(struct ixl_softc *sc, uint32_t reg)
3565 {
3566 uint32_t hmc_idx, hmc_isvf;
3567 uint32_t hmc_errtype, hmc_objtype, hmc_data;
3568
3569 hmc_idx = reg & I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK;
3570 hmc_idx = hmc_idx >> I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT;
3571 hmc_isvf = reg & I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK;
3572 hmc_isvf = hmc_isvf >> I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT;
3573 hmc_errtype = reg & I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK;
3574 hmc_errtype = hmc_errtype >> I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT;
3575 hmc_objtype = reg & I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK;
3576 hmc_objtype = hmc_objtype >> I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT;
3577 hmc_data = ixl_rd(sc, I40E_PFHMC_ERRORDATA);
3578
3579 device_printf(sc->sc_dev,
3580 "HMC Error (idx=0x%x, isvf=0x%x, err=0x%x, obj=0x%x, data=0x%x)\n",
3581 hmc_idx, hmc_isvf, hmc_errtype, hmc_objtype, hmc_data);
3582 }
3583
3584 static int
3585 ixl_other_intr(void *xsc)
3586 {
3587 struct ixl_softc *sc = xsc;
3588 uint32_t icr, mask, reg;
3589 int rv;
3590
3591 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3592 mask = ixl_rd(sc, I40E_PFINT_ICR0_ENA);
3593
3594 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3595 atomic_inc_64(&sc->sc_event_atq.ev_count);
3596 ixl_atq_done(sc);
3597 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3598 rv = 1;
3599 }
3600
3601 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3602 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3603 device_printf(sc->sc_dev, "link stat changed\n");
3604
3605 atomic_inc_64(&sc->sc_event_link.ev_count);
3606 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3607 rv = 1;
3608 }
3609
3610 if (ISSET(icr, I40E_PFINT_ICR0_GRST_MASK)) {
3611 CLR(mask, I40E_PFINT_ICR0_ENA_GRST_MASK);
3612 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
3613 reg = reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK;
3614 reg = reg >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3615
3616 device_printf(sc->sc_dev, "GRST: %s\n",
3617 reg == I40E_RESET_CORER ? "CORER" :
3618 reg == I40E_RESET_GLOBR ? "GLOBR" :
3619 reg == I40E_RESET_EMPR ? "EMPR" :
3620 "POR");
3621 }
3622
3623 if (ISSET(icr, I40E_PFINT_ICR0_ECC_ERR_MASK))
3624 atomic_inc_64(&sc->sc_event_ecc_err.ev_count);
3625 if (ISSET(icr, I40E_PFINT_ICR0_PCI_EXCEPTION_MASK))
3626 atomic_inc_64(&sc->sc_event_pci_exception.ev_count);
3627 if (ISSET(icr, I40E_PFINT_ICR0_PE_CRITERR_MASK))
3628 atomic_inc_64(&sc->sc_event_crit_err.ev_count);
3629
3630 if (ISSET(icr, IXL_ICR0_CRIT_ERR_MASK)) {
3631 CLR(mask, IXL_ICR0_CRIT_ERR_MASK);
3632 device_printf(sc->sc_dev, "critical error\n");
3633 }
3634
3635 if (ISSET(icr, I40E_PFINT_ICR0_HMC_ERR_MASK)) {
3636 reg = ixl_rd(sc, I40E_PFHMC_ERRORINFO);
3637 if (ISSET(reg, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK))
3638 ixl_print_hmc_error(sc, reg);
3639 ixl_wr(sc, I40E_PFHMC_ERRORINFO, 0);
3640 }
3641
3642 ixl_wr(sc, I40E_PFINT_ICR0_ENA, mask);
3643 ixl_flush(sc);
3644 ixl_enable_other_intr(sc);
3645 return rv;
3646 }
3647
3648 static void
3649 ixl_get_link_status_done(struct ixl_softc *sc,
3650 const struct ixl_aq_desc *iaq)
3651 {
3652
3653 ixl_link_state_update(sc, iaq);
3654 }
3655
3656 static void
3657 ixl_get_link_status(void *xsc)
3658 {
3659 struct ixl_softc *sc = xsc;
3660 struct ixl_aq_desc *iaq;
3661 struct ixl_aq_link_param *param;
3662
3663 memset(&sc->sc_link_state_atq, 0, sizeof(sc->sc_link_state_atq));
3664 iaq = &sc->sc_link_state_atq.iatq_desc;
3665 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3666 param = (struct ixl_aq_link_param *)iaq->iaq_param;
3667 param->notify = IXL_AQ_LINK_NOTIFY;
3668
3669 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done);
3670 (void)ixl_atq_post(sc, &sc->sc_link_state_atq);
3671 }
3672
3673 static void
3674 ixl_link_state_update(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3675 {
3676 struct ifnet *ifp = &sc->sc_ec.ec_if;
3677 int link_state;
3678
3679 KASSERT(kpreempt_disabled());
3680
3681 link_state = ixl_set_link_status(sc, iaq);
3682
3683 if (ifp->if_link_state != link_state)
3684 if_link_state_change(ifp, link_state);
3685
3686 if (link_state != LINK_STATE_DOWN) {
3687 if_schedule_deferred_start(ifp);
3688 }
3689 }
3690
3691 static void
3692 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq,
3693 const char *msg)
3694 {
3695 char buf[512];
3696 size_t len;
3697
3698 len = sizeof(buf);
3699 buf[--len] = '\0';
3700
3701 device_printf(sc->sc_dev, "%s\n", msg);
3702 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags));
3703 device_printf(sc->sc_dev, "flags %s opcode %04x\n",
3704 buf, le16toh(iaq->iaq_opcode));
3705 device_printf(sc->sc_dev, "datalen %u retval %u\n",
3706 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval));
3707 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie);
3708 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n",
3709 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]),
3710 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3]));
3711 }
3712
3713 static void
3714 ixl_arq(void *xsc)
3715 {
3716 struct ixl_softc *sc = xsc;
3717 struct ixl_aq_desc *arq, *iaq;
3718 struct ixl_aq_buf *aqb;
3719 unsigned int cons = sc->sc_arq_cons;
3720 unsigned int prod;
3721 int done = 0;
3722
3723 prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
3724 sc->sc_aq_regs->arq_head_mask;
3725
3726 if (cons == prod)
3727 goto done;
3728
3729 arq = IXL_DMA_KVA(&sc->sc_arq);
3730
3731 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3732 0, IXL_DMA_LEN(&sc->sc_arq),
3733 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3734
3735 do {
3736 iaq = &arq[cons];
3737 aqb = sc->sc_arq_live[cons];
3738
3739 KASSERT(aqb != NULL);
3740
3741 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3742 BUS_DMASYNC_POSTREAD);
3743
3744 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3745 ixl_aq_dump(sc, iaq, "arq event");
3746
3747 switch (iaq->iaq_opcode) {
3748 case htole16(IXL_AQ_OP_PHY_LINK_STATUS):
3749 kpreempt_disable();
3750 ixl_link_state_update(sc, iaq);
3751 kpreempt_enable();
3752 break;
3753 }
3754
3755 memset(iaq, 0, sizeof(*iaq));
3756 sc->sc_arq_live[cons] = NULL;
3757 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
3758
3759 cons++;
3760 cons &= IXL_AQ_MASK;
3761
3762 done = 1;
3763 } while (cons != prod);
3764
3765 if (done) {
3766 sc->sc_arq_cons = cons;
3767 ixl_arq_fill(sc);
3768 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3769 0, IXL_DMA_LEN(&sc->sc_arq),
3770 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3771 }
3772
3773 done:
3774 ixl_enable_other_intr(sc);
3775 }
3776
3777 static void
3778 ixl_atq_set(struct ixl_atq *iatq,
3779 void (*fn)(struct ixl_softc *, const struct ixl_aq_desc *))
3780 {
3781
3782 iatq->iatq_fn = fn;
3783 }
3784
3785 static int
3786 ixl_atq_post_locked(struct ixl_softc *sc, struct ixl_atq *iatq)
3787 {
3788 struct ixl_aq_desc *atq, *slot;
3789 unsigned int prod, cons, prod_next;
3790
3791 /* assert locked */
3792 KASSERT(mutex_owned(&sc->sc_atq_lock));
3793
3794 atq = IXL_DMA_KVA(&sc->sc_atq);
3795 prod = sc->sc_atq_prod;
3796 cons = sc->sc_atq_cons;
3797 prod_next = (prod +1) & IXL_AQ_MASK;
3798
3799 if (cons == prod_next)
3800 return ENOMEM;
3801
3802 slot = &atq[prod];
3803
3804 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3805 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3806
3807 *slot = iatq->iatq_desc;
3808 slot->iaq_cookie = (uint64_t)((intptr_t)iatq);
3809
3810 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3811 ixl_aq_dump(sc, slot, "atq command");
3812
3813 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3814 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3815
3816 sc->sc_atq_prod = prod_next;
3817 ixl_wr(sc, sc->sc_aq_regs->atq_tail, sc->sc_atq_prod);
3818
3819 return 0;
3820 }
3821
3822 static int
3823 ixl_atq_post(struct ixl_softc *sc, struct ixl_atq *iatq)
3824 {
3825 int rv;
3826
3827 mutex_enter(&sc->sc_atq_lock);
3828 rv = ixl_atq_post_locked(sc, iatq);
3829 mutex_exit(&sc->sc_atq_lock);
3830
3831 return rv;
3832 }
3833
3834 static void
3835 ixl_atq_done_locked(struct ixl_softc *sc)
3836 {
3837 struct ixl_aq_desc *atq, *slot;
3838 struct ixl_atq *iatq;
3839 unsigned int cons;
3840 unsigned int prod;
3841
3842 KASSERT(mutex_owned(&sc->sc_atq_lock));
3843
3844 prod = sc->sc_atq_prod;
3845 cons = sc->sc_atq_cons;
3846
3847 if (prod == cons)
3848 return;
3849
3850 atq = IXL_DMA_KVA(&sc->sc_atq);
3851
3852 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3853 0, IXL_DMA_LEN(&sc->sc_atq),
3854 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3855
3856 do {
3857 slot = &atq[cons];
3858 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
3859 break;
3860
3861 iatq = (struct ixl_atq *)((intptr_t)slot->iaq_cookie);
3862 iatq->iatq_desc = *slot;
3863
3864 memset(slot, 0, sizeof(*slot));
3865
3866 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3867 ixl_aq_dump(sc, &iatq->iatq_desc, "atq response");
3868
3869 (*iatq->iatq_fn)(sc, &iatq->iatq_desc);
3870
3871 cons++;
3872 cons &= IXL_AQ_MASK;
3873 } while (cons != prod);
3874
3875 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3876 0, IXL_DMA_LEN(&sc->sc_atq),
3877 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3878
3879 sc->sc_atq_cons = cons;
3880 }
3881
3882 static void
3883 ixl_atq_done(struct ixl_softc *sc)
3884 {
3885
3886 mutex_enter(&sc->sc_atq_lock);
3887 ixl_atq_done_locked(sc);
3888 mutex_exit(&sc->sc_atq_lock);
3889 }
3890
3891 static void
3892 ixl_wakeup(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3893 {
3894
3895 KASSERT(mutex_owned(&sc->sc_atq_lock));
3896
3897 cv_signal(&sc->sc_atq_cv);
3898 }
3899
3900 static int
3901 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq)
3902 {
3903 int error;
3904
3905 KASSERT(iatq->iatq_desc.iaq_cookie == 0);
3906
3907 ixl_atq_set(iatq, ixl_wakeup);
3908
3909 mutex_enter(&sc->sc_atq_lock);
3910 error = ixl_atq_post_locked(sc, iatq);
3911 if (error) {
3912 mutex_exit(&sc->sc_atq_lock);
3913 return error;
3914 }
3915
3916 error = cv_timedwait(&sc->sc_atq_cv, &sc->sc_atq_lock,
3917 IXL_ATQ_EXEC_TIMEOUT);
3918 mutex_exit(&sc->sc_atq_lock);
3919
3920 return error;
3921 }
3922
3923 static int
3924 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
3925 {
3926 struct ixl_aq_desc *atq, *slot;
3927 unsigned int prod;
3928 unsigned int t = 0;
3929
3930 mutex_enter(&sc->sc_atq_lock);
3931
3932 atq = IXL_DMA_KVA(&sc->sc_atq);
3933 prod = sc->sc_atq_prod;
3934 slot = atq + prod;
3935
3936 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3937 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3938
3939 *slot = *iaq;
3940 slot->iaq_flags |= htole16(IXL_AQ_SI);
3941
3942 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3943 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3944
3945 prod++;
3946 prod &= IXL_AQ_MASK;
3947 sc->sc_atq_prod = prod;
3948 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3949
3950 while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
3951 delaymsec(1);
3952
3953 if (t++ > tm) {
3954 mutex_exit(&sc->sc_atq_lock);
3955 return ETIMEDOUT;
3956 }
3957 }
3958
3959 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3960 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
3961 *iaq = *slot;
3962 memset(slot, 0, sizeof(*slot));
3963 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3964 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
3965
3966 sc->sc_atq_cons = prod;
3967
3968 mutex_exit(&sc->sc_atq_lock);
3969
3970 return 0;
3971 }
3972
3973 static int
3974 ixl_get_version(struct ixl_softc *sc)
3975 {
3976 struct ixl_aq_desc iaq;
3977 uint32_t fwbuild, fwver, apiver;
3978 uint16_t api_maj_ver, api_min_ver;
3979
3980 memset(&iaq, 0, sizeof(iaq));
3981 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
3982
3983 iaq.iaq_retval = le16toh(23);
3984
3985 if (ixl_atq_poll(sc, &iaq, 2000) != 0)
3986 return ETIMEDOUT;
3987 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
3988 return EIO;
3989
3990 fwbuild = le32toh(iaq.iaq_param[1]);
3991 fwver = le32toh(iaq.iaq_param[2]);
3992 apiver = le32toh(iaq.iaq_param[3]);
3993
3994 api_maj_ver = (uint16_t)apiver;
3995 api_min_ver = (uint16_t)(apiver >> 16);
3996
3997 aprint_normal(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
3998 (uint16_t)(fwver >> 16), fwbuild, api_maj_ver, api_min_ver);
3999
4000 if (sc->sc_mac_type == I40E_MAC_X722) {
4001 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK |
4002 IXL_SC_AQ_FLAG_NVMREAD);
4003 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL);
4004 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS);
4005 }
4006
4007 #define IXL_API_VER(maj, min) (((uint32_t)(maj) << 16) | (min))
4008 if (IXL_API_VER(api_maj_ver, api_min_ver) >= IXL_API_VER(1, 5)) {
4009 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL);
4010 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK);
4011 }
4012 #undef IXL_API_VER
4013
4014 return 0;
4015 }
4016
4017 static int
4018 ixl_get_nvm_version(struct ixl_softc *sc)
4019 {
4020 uint16_t nvmver, cfg_ptr, eetrack_hi, eetrack_lo, oem_hi, oem_lo;
4021 uint32_t eetrack, oem;
4022 uint16_t nvm_maj_ver, nvm_min_ver, oem_build;
4023 uint8_t oem_ver, oem_patch;
4024
4025 nvmver = cfg_ptr = eetrack_hi = eetrack_lo = oem_hi = oem_lo = 0;
4026 ixl_rd16_nvm(sc, I40E_SR_NVM_DEV_STARTER_VERSION, &nvmver);
4027 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
4028 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
4029 ixl_rd16_nvm(sc, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
4030 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF, &oem_hi);
4031 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF + 1, &oem_lo);
4032
4033 nvm_maj_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_HI_MASK);
4034 nvm_min_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_LO_MASK);
4035 eetrack = ((uint32_t)eetrack_hi << 16) | eetrack_lo;
4036 oem = ((uint32_t)oem_hi << 16) | oem_lo;
4037 oem_ver = __SHIFTOUT(oem, IXL_NVM_OEMVERSION_MASK);
4038 oem_build = __SHIFTOUT(oem, IXL_NVM_OEMBUILD_MASK);
4039 oem_patch = __SHIFTOUT(oem, IXL_NVM_OEMPATCH_MASK);
4040
4041 aprint_normal(" nvm %x.%02x etid %08x oem %d.%d.%d",
4042 nvm_maj_ver, nvm_min_ver, eetrack,
4043 oem_ver, oem_build, oem_patch);
4044
4045 return 0;
4046 }
4047
4048 static int
4049 ixl_pxe_clear(struct ixl_softc *sc)
4050 {
4051 struct ixl_aq_desc iaq;
4052 int rv;
4053
4054 memset(&iaq, 0, sizeof(iaq));
4055 iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
4056 iaq.iaq_param[0] = htole32(0x2);
4057
4058 rv = ixl_atq_poll(sc, &iaq, 250);
4059
4060 ixl_wr(sc, I40E_GLLAN_RCTL_0, 0x1);
4061
4062 if (rv != 0)
4063 return ETIMEDOUT;
4064
4065 switch (iaq.iaq_retval) {
4066 case htole16(IXL_AQ_RC_OK):
4067 case htole16(IXL_AQ_RC_EEXIST):
4068 break;
4069 default:
4070 return EIO;
4071 }
4072
4073 return 0;
4074 }
4075
4076 static int
4077 ixl_lldp_shut(struct ixl_softc *sc)
4078 {
4079 struct ixl_aq_desc iaq;
4080
4081 memset(&iaq, 0, sizeof(iaq));
4082 iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
4083 iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
4084
4085 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4086 aprint_error_dev(sc->sc_dev, "STOP LLDP AGENT timeout\n");
4087 return -1;
4088 }
4089
4090 switch (iaq.iaq_retval) {
4091 case htole16(IXL_AQ_RC_EMODE):
4092 case htole16(IXL_AQ_RC_EPERM):
4093 /* ignore silently */
4094 default:
4095 break;
4096 }
4097
4098 return 0;
4099 }
4100
4101 static void
4102 ixl_parse_hw_capability(struct ixl_softc *sc, struct ixl_aq_capability *cap)
4103 {
4104 uint16_t id;
4105 uint32_t number, logical_id;
4106
4107 id = le16toh(cap->cap_id);
4108 number = le32toh(cap->number);
4109 logical_id = le32toh(cap->logical_id);
4110
4111 switch (id) {
4112 case IXL_AQ_CAP_RSS:
4113 sc->sc_rss_table_size = number;
4114 sc->sc_rss_table_entry_width = logical_id;
4115 break;
4116 case IXL_AQ_CAP_RXQ:
4117 case IXL_AQ_CAP_TXQ:
4118 sc->sc_nqueue_pairs_device = MIN(number,
4119 sc->sc_nqueue_pairs_device);
4120 break;
4121 }
4122 }
4123
4124 static int
4125 ixl_get_hw_capabilities(struct ixl_softc *sc)
4126 {
4127 struct ixl_dmamem idm;
4128 struct ixl_aq_desc iaq;
4129 struct ixl_aq_capability *caps;
4130 size_t i, ncaps;
4131 bus_size_t caps_size;
4132 uint16_t status;
4133 int rv;
4134
4135 caps_size = sizeof(caps[0]) * 40;
4136 memset(&iaq, 0, sizeof(iaq));
4137 iaq.iaq_opcode = htole16(IXL_AQ_OP_LIST_FUNC_CAP);
4138
4139 do {
4140 if (ixl_dmamem_alloc(sc, &idm, caps_size, 0) != 0) {
4141 return -1;
4142 }
4143
4144 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4145 (caps_size > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4146 iaq.iaq_datalen = htole16(caps_size);
4147 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4148
4149 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
4150 IXL_DMA_LEN(&idm), BUS_DMASYNC_PREREAD);
4151
4152 rv = ixl_atq_poll(sc, &iaq, 250);
4153
4154 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
4155 IXL_DMA_LEN(&idm), BUS_DMASYNC_POSTREAD);
4156
4157 if (rv != 0) {
4158 aprint_error(", HW capabilities timeout\n");
4159 goto done;
4160 }
4161
4162 status = le16toh(iaq.iaq_retval);
4163
4164 if (status == IXL_AQ_RC_ENOMEM) {
4165 caps_size = le16toh(iaq.iaq_datalen);
4166 ixl_dmamem_free(sc, &idm);
4167 }
4168 } while (status == IXL_AQ_RC_ENOMEM);
4169
4170 if (status != IXL_AQ_RC_OK) {
4171 aprint_error(", HW capabilities error\n");
4172 goto done;
4173 }
4174
4175 caps = IXL_DMA_KVA(&idm);
4176 ncaps = le16toh(iaq.iaq_param[1]);
4177
4178 for (i = 0; i < ncaps; i++) {
4179 ixl_parse_hw_capability(sc, &caps[i]);
4180 }
4181
4182 done:
4183 ixl_dmamem_free(sc, &idm);
4184 return rv;
4185 }
4186
4187 static int
4188 ixl_get_mac(struct ixl_softc *sc)
4189 {
4190 struct ixl_dmamem idm;
4191 struct ixl_aq_desc iaq;
4192 struct ixl_aq_mac_addresses *addrs;
4193 int rv;
4194
4195 if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
4196 aprint_error(", unable to allocate mac addresses\n");
4197 return -1;
4198 }
4199
4200 memset(&iaq, 0, sizeof(iaq));
4201 iaq.iaq_flags = htole16(IXL_AQ_BUF);
4202 iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
4203 iaq.iaq_datalen = htole16(sizeof(*addrs));
4204 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4205
4206 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4207 BUS_DMASYNC_PREREAD);
4208
4209 rv = ixl_atq_poll(sc, &iaq, 250);
4210
4211 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4212 BUS_DMASYNC_POSTREAD);
4213
4214 if (rv != 0) {
4215 aprint_error(", MAC ADDRESS READ timeout\n");
4216 rv = -1;
4217 goto done;
4218 }
4219 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4220 aprint_error(", MAC ADDRESS READ error\n");
4221 rv = -1;
4222 goto done;
4223 }
4224
4225 addrs = IXL_DMA_KVA(&idm);
4226 if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
4227 printf(", port address is not valid\n");
4228 goto done;
4229 }
4230
4231 memcpy(sc->sc_enaddr, addrs->port, ETHER_ADDR_LEN);
4232 rv = 0;
4233
4234 done:
4235 ixl_dmamem_free(sc, &idm);
4236 return rv;
4237 }
4238
4239 static int
4240 ixl_get_switch_config(struct ixl_softc *sc)
4241 {
4242 struct ixl_dmamem idm;
4243 struct ixl_aq_desc iaq;
4244 struct ixl_aq_switch_config *hdr;
4245 struct ixl_aq_switch_config_element *elms, *elm;
4246 unsigned int nelm, i;
4247 int rv;
4248
4249 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4250 aprint_error_dev(sc->sc_dev,
4251 "unable to allocate switch config buffer\n");
4252 return -1;
4253 }
4254
4255 memset(&iaq, 0, sizeof(iaq));
4256 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4257 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4258 iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
4259 iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
4260 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4261
4262 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4263 BUS_DMASYNC_PREREAD);
4264
4265 rv = ixl_atq_poll(sc, &iaq, 250);
4266
4267 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4268 BUS_DMASYNC_POSTREAD);
4269
4270 if (rv != 0) {
4271 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG timeout\n");
4272 rv = -1;
4273 goto done;
4274 }
4275 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4276 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG error\n");
4277 rv = -1;
4278 goto done;
4279 }
4280
4281 hdr = IXL_DMA_KVA(&idm);
4282 elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
4283
4284 nelm = le16toh(hdr->num_reported);
4285 if (nelm < 1) {
4286 aprint_error_dev(sc->sc_dev, "no switch config available\n");
4287 rv = -1;
4288 goto done;
4289 }
4290
4291 for (i = 0; i < nelm; i++) {
4292 elm = &elms[i];
4293
4294 aprint_debug_dev(sc->sc_dev,
4295 "type %x revision %u seid %04x\n",
4296 elm->type, elm->revision, le16toh(elm->seid));
4297 aprint_debug_dev(sc->sc_dev,
4298 "uplink %04x downlink %04x\n",
4299 le16toh(elm->uplink_seid),
4300 le16toh(elm->downlink_seid));
4301 aprint_debug_dev(sc->sc_dev,
4302 "conntype %x scheduler %04x extra %04x\n",
4303 elm->connection_type,
4304 le16toh(elm->scheduler_id),
4305 le16toh(elm->element_info));
4306 }
4307
4308 elm = &elms[0];
4309
4310 sc->sc_uplink_seid = elm->uplink_seid;
4311 sc->sc_downlink_seid = elm->downlink_seid;
4312 sc->sc_seid = elm->seid;
4313
4314 if ((sc->sc_uplink_seid == htole16(0)) !=
4315 (sc->sc_downlink_seid == htole16(0))) {
4316 aprint_error_dev(sc->sc_dev, "SEIDs are misconfigured\n");
4317 rv = -1;
4318 goto done;
4319 }
4320
4321 done:
4322 ixl_dmamem_free(sc, &idm);
4323 return rv;
4324 }
4325
4326 static int
4327 ixl_phy_mask_ints(struct ixl_softc *sc)
4328 {
4329 struct ixl_aq_desc iaq;
4330
4331 memset(&iaq, 0, sizeof(iaq));
4332 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
4333 iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
4334 ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
4335 IXL_AQ_PHY_EV_MEDIA_NA));
4336
4337 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4338 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK timeout\n");
4339 return -1;
4340 }
4341 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4342 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK error\n");
4343 return -1;
4344 }
4345
4346 return 0;
4347 }
4348
4349 static int
4350 ixl_get_phy_abilities(struct ixl_softc *sc,struct ixl_dmamem *idm)
4351 {
4352 struct ixl_aq_desc iaq;
4353 int rv;
4354
4355 memset(&iaq, 0, sizeof(iaq));
4356 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4357 (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4358 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
4359 iaq.iaq_datalen = htole16(IXL_DMA_LEN(idm));
4360 iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
4361 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
4362
4363 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4364 BUS_DMASYNC_PREREAD);
4365
4366 rv = ixl_atq_poll(sc, &iaq, 250);
4367
4368 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4369 BUS_DMASYNC_POSTREAD);
4370
4371 if (rv != 0)
4372 return -1;
4373
4374 return le16toh(iaq.iaq_retval);
4375 }
4376
4377 static int
4378 ixl_get_phy_info(struct ixl_softc *sc)
4379 {
4380 struct ixl_dmamem idm;
4381 struct ixl_aq_phy_abilities *phy;
4382 int rv;
4383
4384 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4385 aprint_error_dev(sc->sc_dev,
4386 "unable to allocate phy abilities buffer\n");
4387 return -1;
4388 }
4389
4390 rv = ixl_get_phy_abilities(sc, &idm);
4391 switch (rv) {
4392 case -1:
4393 aprint_error_dev(sc->sc_dev, "GET PHY ABILITIES timeout\n");
4394 goto done;
4395 case IXL_AQ_RC_OK:
4396 break;
4397 case IXL_AQ_RC_EIO:
4398 aprint_error_dev(sc->sc_dev,"unable to query phy types\n");
4399 goto done;
4400 default:
4401 aprint_error_dev(sc->sc_dev,
4402 "GET PHY ABILITIIES error %u\n", rv);
4403 goto done;
4404 }
4405
4406 phy = IXL_DMA_KVA(&idm);
4407
4408 sc->sc_phy_types = le32toh(phy->phy_type);
4409 sc->sc_phy_types |= (uint64_t)le32toh(phy->phy_type_ext) << 32;
4410
4411 sc->sc_phy_abilities = phy->abilities;
4412 sc->sc_phy_linkspeed = phy->link_speed;
4413 sc->sc_phy_fec_cfg = phy->fec_cfg_curr_mod_ext_info &
4414 (IXL_AQ_ENABLE_FEC_KR | IXL_AQ_ENABLE_FEC_RS |
4415 IXL_AQ_REQUEST_FEC_KR | IXL_AQ_REQUEST_FEC_RS);
4416 sc->sc_eee_cap = phy->eee_capability;
4417 sc->sc_eeer_val = phy->eeer_val;
4418 sc->sc_d3_lpan = phy->d3_lpan;
4419
4420 rv = 0;
4421
4422 done:
4423 ixl_dmamem_free(sc, &idm);
4424 return rv;
4425 }
4426
4427 static int
4428 ixl_set_phy_config(struct ixl_softc *sc,
4429 uint8_t link_speed, uint8_t abilities, bool polling)
4430 {
4431 struct ixl_aq_phy_param *param;
4432 struct ixl_atq iatq;
4433 struct ixl_aq_desc *iaq;
4434 int error;
4435
4436 memset(&iatq, 0, sizeof(iatq));
4437
4438 iaq = &iatq.iatq_desc;
4439 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_CONFIG);
4440 param = (struct ixl_aq_phy_param *)&iaq->iaq_param;
4441 param->phy_types = htole32((uint32_t)sc->sc_phy_types);
4442 param->phy_type_ext = (uint8_t)(sc->sc_phy_types >> 32);
4443 param->link_speed = link_speed;
4444 param->abilities = abilities | IXL_AQ_PHY_ABILITY_AUTO_LINK;
4445 param->fec_cfg = sc->sc_phy_fec_cfg;
4446 param->eee_capability = sc->sc_eee_cap;
4447 param->eeer_val = sc->sc_eeer_val;
4448 param->d3_lpan = sc->sc_d3_lpan;
4449
4450 if (polling)
4451 error = ixl_atq_poll(sc, iaq, 250);
4452 else
4453 error = ixl_atq_exec(sc, &iatq);
4454
4455 if (error != 0)
4456 return error;
4457
4458 switch (le16toh(iaq->iaq_retval)) {
4459 case IXL_AQ_RC_OK:
4460 break;
4461 case IXL_AQ_RC_EPERM:
4462 return EPERM;
4463 default:
4464 return EIO;
4465 }
4466
4467 return 0;
4468 }
4469
4470 static int
4471 ixl_set_phy_autoselect(struct ixl_softc *sc)
4472 {
4473 uint8_t link_speed, abilities;
4474
4475 link_speed = sc->sc_phy_linkspeed;
4476 abilities = IXL_PHY_ABILITY_LINKUP | IXL_PHY_ABILITY_AUTONEGO;
4477
4478 return ixl_set_phy_config(sc, link_speed, abilities, true);
4479 }
4480
4481 static int
4482 ixl_get_link_status_poll(struct ixl_softc *sc, int *l)
4483 {
4484 struct ixl_aq_desc iaq;
4485 struct ixl_aq_link_param *param;
4486 int link;
4487
4488 memset(&iaq, 0, sizeof(iaq));
4489 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
4490 param = (struct ixl_aq_link_param *)iaq.iaq_param;
4491 param->notify = IXL_AQ_LINK_NOTIFY;
4492
4493 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4494 return ETIMEDOUT;
4495 }
4496 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4497 return EIO;
4498 }
4499
4500 link = ixl_set_link_status(sc, &iaq);
4501
4502 if (l != NULL)
4503 *l = link;
4504
4505 return 0;
4506 }
4507
4508 static int
4509 ixl_get_vsi(struct ixl_softc *sc)
4510 {
4511 struct ixl_dmamem *vsi = &sc->sc_scratch;
4512 struct ixl_aq_desc iaq;
4513 struct ixl_aq_vsi_param *param;
4514 struct ixl_aq_vsi_reply *reply;
4515 struct ixl_aq_vsi_data *data;
4516 int rv;
4517
4518 /* grumble, vsi info isn't "known" at compile time */
4519
4520 memset(&iaq, 0, sizeof(iaq));
4521 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4522 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4523 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
4524 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4525 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4526
4527 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4528 param->uplink_seid = sc->sc_seid;
4529
4530 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4531 BUS_DMASYNC_PREREAD);
4532
4533 rv = ixl_atq_poll(sc, &iaq, 250);
4534
4535 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4536 BUS_DMASYNC_POSTREAD);
4537
4538 if (rv != 0) {
4539 return ETIMEDOUT;
4540 }
4541
4542 switch (le16toh(iaq.iaq_retval)) {
4543 case IXL_AQ_RC_OK:
4544 break;
4545 case IXL_AQ_RC_ENOENT:
4546 return ENOENT;
4547 case IXL_AQ_RC_EACCES:
4548 return EACCES;
4549 default:
4550 return EIO;
4551 }
4552
4553 reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
4554 sc->sc_vsi_number = le16toh(reply->vsi_number);
4555 data = IXL_DMA_KVA(vsi);
4556 sc->sc_vsi_stat_counter_idx = le16toh(data->stat_counter_idx);
4557
4558 return 0;
4559 }
4560
4561 static int
4562 ixl_set_vsi(struct ixl_softc *sc)
4563 {
4564 struct ixl_dmamem *vsi = &sc->sc_scratch;
4565 struct ixl_aq_desc iaq;
4566 struct ixl_aq_vsi_param *param;
4567 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
4568 unsigned int qnum;
4569 uint16_t val;
4570 int rv;
4571
4572 qnum = sc->sc_nqueue_pairs - 1;
4573
4574 data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
4575 IXL_AQ_VSI_VALID_VLAN);
4576
4577 CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
4578 SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
4579 data->queue_mapping[0] = htole16(0);
4580 data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
4581 (qnum << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
4582
4583 val = le16toh(data->port_vlan_flags);
4584 CLR(val, IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK);
4585 SET(val, IXL_AQ_VSI_PVLAN_MODE_ALL);
4586
4587 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWTAGGING)) {
4588 SET(val, IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH);
4589 } else {
4590 SET(val, IXL_AQ_VSI_PVLAN_EMOD_NOTHING);
4591 }
4592
4593 data->port_vlan_flags = htole16(val);
4594
4595 /* grumble, vsi info isn't "known" at compile time */
4596
4597 memset(&iaq, 0, sizeof(iaq));
4598 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4599 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4600 iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
4601 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4602 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4603
4604 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4605 param->uplink_seid = sc->sc_seid;
4606
4607 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4608 BUS_DMASYNC_PREWRITE);
4609
4610 rv = ixl_atq_poll(sc, &iaq, 250);
4611
4612 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4613 BUS_DMASYNC_POSTWRITE);
4614
4615 if (rv != 0) {
4616 return ETIMEDOUT;
4617 }
4618
4619 switch (le16toh(iaq.iaq_retval)) {
4620 case IXL_AQ_RC_OK:
4621 break;
4622 case IXL_AQ_RC_ENOENT:
4623 return ENOENT;
4624 case IXL_AQ_RC_EACCES:
4625 return EACCES;
4626 default:
4627 return EIO;
4628 }
4629
4630 return 0;
4631 }
4632
4633 static void
4634 ixl_set_filter_control(struct ixl_softc *sc)
4635 {
4636 uint32_t reg;
4637
4638 reg = ixl_rd_rx_csr(sc, I40E_PFQF_CTL_0);
4639
4640 CLR(reg, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK);
4641 SET(reg, I40E_HASH_LUT_SIZE_128 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT);
4642
4643 SET(reg, I40E_PFQF_CTL_0_FD_ENA_MASK);
4644 SET(reg, I40E_PFQF_CTL_0_ETYPE_ENA_MASK);
4645 SET(reg, I40E_PFQF_CTL_0_MACVLAN_ENA_MASK);
4646
4647 ixl_wr_rx_csr(sc, I40E_PFQF_CTL_0, reg);
4648 }
4649
4650 static inline void
4651 ixl_get_default_rss_key(uint32_t *buf, size_t len)
4652 {
4653 size_t cplen;
4654 uint8_t rss_seed[RSS_KEYSIZE];
4655
4656 rss_getkey(rss_seed);
4657 memset(buf, 0, len);
4658
4659 cplen = MIN(len, sizeof(rss_seed));
4660 memcpy(buf, rss_seed, cplen);
4661 }
4662
4663 static int
4664 ixl_set_rss_key(struct ixl_softc *sc, uint8_t *key, size_t keylen)
4665 {
4666 struct ixl_dmamem *idm;
4667 struct ixl_atq iatq;
4668 struct ixl_aq_desc *iaq;
4669 struct ixl_aq_rss_key_param *param;
4670 struct ixl_aq_rss_key_data *data;
4671 size_t len, datalen, stdlen, extlen;
4672 uint16_t vsi_id;
4673 int rv;
4674
4675 memset(&iatq, 0, sizeof(iatq));
4676 iaq = &iatq.iatq_desc;
4677 idm = &sc->sc_aqbuf;
4678
4679 datalen = sizeof(*data);
4680
4681 /*XXX The buf size has to be less than the size of the register */
4682 datalen = MIN(IXL_RSS_KEY_SIZE_REG * sizeof(uint32_t), datalen);
4683
4684 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4685 (datalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4686 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_KEY);
4687 iaq->iaq_datalen = htole16(datalen);
4688
4689 param = (struct ixl_aq_rss_key_param *)iaq->iaq_param;
4690 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSKEY_VSI_ID_SHIFT) |
4691 IXL_AQ_RSSKEY_VSI_VALID;
4692 param->vsi_id = htole16(vsi_id);
4693
4694 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
4695 data = IXL_DMA_KVA(idm);
4696
4697 len = MIN(keylen, datalen);
4698 stdlen = MIN(sizeof(data->standard_rss_key), len);
4699 memcpy(data->standard_rss_key, key, stdlen);
4700 len = (len > stdlen) ? (len - stdlen) : 0;
4701
4702 extlen = MIN(sizeof(data->extended_hash_key), len);
4703 extlen = (stdlen < keylen) ? 0 : keylen - stdlen;
4704 memcpy(data->extended_hash_key, key + stdlen, extlen);
4705
4706 ixl_aq_dva(iaq, IXL_DMA_DVA(idm));
4707
4708 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4709 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE);
4710
4711 rv = ixl_atq_exec(sc, &iatq);
4712
4713 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4714 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE);
4715
4716 if (rv != 0) {
4717 return ETIMEDOUT;
4718 }
4719
4720 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) {
4721 return EIO;
4722 }
4723
4724 return 0;
4725 }
4726
4727 static int
4728 ixl_set_rss_lut(struct ixl_softc *sc, uint8_t *lut, size_t lutlen)
4729 {
4730 struct ixl_dmamem *idm;
4731 struct ixl_atq iatq;
4732 struct ixl_aq_desc *iaq;
4733 struct ixl_aq_rss_lut_param *param;
4734 uint16_t vsi_id;
4735 uint8_t *data;
4736 size_t dmalen;
4737 int rv;
4738
4739 memset(&iatq, 0, sizeof(iatq));
4740 iaq = &iatq.iatq_desc;
4741 idm = &sc->sc_aqbuf;
4742
4743 dmalen = MIN(lutlen, IXL_DMA_LEN(idm));
4744
4745 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4746 (dmalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4747 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_LUT);
4748 iaq->iaq_datalen = htole16(dmalen);
4749
4750 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
4751 data = IXL_DMA_KVA(idm);
4752 memcpy(data, lut, dmalen);
4753 ixl_aq_dva(iaq, IXL_DMA_DVA(idm));
4754
4755 param = (struct ixl_aq_rss_lut_param *)iaq->iaq_param;
4756 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSLUT_VSI_ID_SHIFT) |
4757 IXL_AQ_RSSLUT_VSI_VALID;
4758 param->vsi_id = htole16(vsi_id);
4759 param->flags = htole16(IXL_AQ_RSSLUT_TABLE_TYPE_PF <<
4760 IXL_AQ_RSSLUT_TABLE_TYPE_SHIFT);
4761
4762 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4763 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE);
4764
4765 rv = ixl_atq_exec(sc, &iatq);
4766
4767 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4768 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE);
4769
4770 if (rv != 0) {
4771 return ETIMEDOUT;
4772 }
4773
4774 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) {
4775 return EIO;
4776 }
4777
4778 return 0;
4779 }
4780
4781 static int
4782 ixl_register_rss_key(struct ixl_softc *sc)
4783 {
4784 uint32_t rss_seed[IXL_RSS_KEY_SIZE_REG];
4785 int rv;
4786 size_t i;
4787
4788 ixl_get_default_rss_key(rss_seed, sizeof(rss_seed));
4789
4790 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)){
4791 rv = ixl_set_rss_key(sc, (uint8_t*)rss_seed,
4792 sizeof(rss_seed));
4793 } else {
4794 rv = 0;
4795 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4796 ixl_wr_rx_csr(sc, I40E_PFQF_HKEY(i), rss_seed[i]);
4797 }
4798 }
4799
4800 return rv;
4801 }
4802
4803 static void
4804 ixl_register_rss_pctype(struct ixl_softc *sc)
4805 {
4806 uint64_t set_hena = 0;
4807 uint32_t hena0, hena1;
4808
4809 if (sc->sc_mac_type == I40E_MAC_X722)
4810 set_hena = IXL_RSS_HENA_DEFAULT_X722;
4811 else
4812 set_hena = IXL_RSS_HENA_DEFAULT_XL710;
4813
4814 hena0 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(0));
4815 hena1 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(1));
4816
4817 SET(hena0, set_hena);
4818 SET(hena1, set_hena >> 32);
4819
4820 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(0), hena0);
4821 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(1), hena1);
4822 }
4823
4824 static int
4825 ixl_register_rss_hlut(struct ixl_softc *sc)
4826 {
4827 unsigned int qid;
4828 uint8_t hlut_buf[512], lut_mask;
4829 uint32_t *hluts;
4830 size_t i, hluts_num;
4831 int rv;
4832
4833 lut_mask = (0x01 << sc->sc_rss_table_entry_width) - 1;
4834
4835 for (i = 0; i < sc->sc_rss_table_size; i++) {
4836 qid = i % sc->sc_nqueue_pairs;
4837 hlut_buf[i] = qid & lut_mask;
4838 }
4839
4840 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)) {
4841 rv = ixl_set_rss_lut(sc, hlut_buf, sizeof(hlut_buf));
4842 } else {
4843 rv = 0;
4844 hluts = (uint32_t *)hlut_buf;
4845 hluts_num = sc->sc_rss_table_size >> 2;
4846 for (i = 0; i < hluts_num; i++) {
4847 ixl_wr(sc, I40E_PFQF_HLUT(i), hluts[i]);
4848 }
4849 ixl_flush(sc);
4850 }
4851
4852 return rv;
4853 }
4854
4855 static void
4856 ixl_config_rss(struct ixl_softc *sc)
4857 {
4858
4859 KASSERT(mutex_owned(&sc->sc_cfg_lock));
4860
4861 ixl_register_rss_key(sc);
4862 ixl_register_rss_pctype(sc);
4863 ixl_register_rss_hlut(sc);
4864 }
4865
4866 static const struct ixl_phy_type *
4867 ixl_search_phy_type(uint8_t phy_type)
4868 {
4869 const struct ixl_phy_type *itype;
4870 uint64_t mask;
4871 unsigned int i;
4872
4873 if (phy_type >= 64)
4874 return NULL;
4875
4876 mask = 1ULL << phy_type;
4877
4878 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
4879 itype = &ixl_phy_type_map[i];
4880
4881 if (ISSET(itype->phy_type, mask))
4882 return itype;
4883 }
4884
4885 return NULL;
4886 }
4887
4888 static uint64_t
4889 ixl_search_link_speed(uint8_t link_speed)
4890 {
4891 const struct ixl_speed_type *type;
4892 unsigned int i;
4893
4894 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4895 type = &ixl_speed_type_map[i];
4896
4897 if (ISSET(type->dev_speed, link_speed))
4898 return type->net_speed;
4899 }
4900
4901 return 0;
4902 }
4903
4904 static uint8_t
4905 ixl_search_baudrate(uint64_t baudrate)
4906 {
4907 const struct ixl_speed_type *type;
4908 unsigned int i;
4909
4910 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4911 type = &ixl_speed_type_map[i];
4912
4913 if (type->net_speed == baudrate) {
4914 return type->dev_speed;
4915 }
4916 }
4917
4918 return 0;
4919 }
4920
4921 static int
4922 ixl_restart_an(struct ixl_softc *sc)
4923 {
4924 struct ixl_aq_desc iaq;
4925
4926 memset(&iaq, 0, sizeof(iaq));
4927 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
4928 iaq.iaq_param[0] =
4929 htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
4930
4931 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4932 aprint_error_dev(sc->sc_dev, "RESTART AN timeout\n");
4933 return -1;
4934 }
4935 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4936 aprint_error_dev(sc->sc_dev, "RESTART AN error\n");
4937 return -1;
4938 }
4939
4940 return 0;
4941 }
4942
4943 static int
4944 ixl_add_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4945 uint16_t vlan, uint16_t flags)
4946 {
4947 struct ixl_aq_desc iaq;
4948 struct ixl_aq_add_macvlan *param;
4949 struct ixl_aq_add_macvlan_elem *elem;
4950
4951 memset(&iaq, 0, sizeof(iaq));
4952 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4953 iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN);
4954 iaq.iaq_datalen = htole16(sizeof(*elem));
4955 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4956
4957 param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param;
4958 param->num_addrs = htole16(1);
4959 param->seid0 = htole16(0x8000) | sc->sc_seid;
4960 param->seid1 = 0;
4961 param->seid2 = 0;
4962
4963 elem = IXL_DMA_KVA(&sc->sc_scratch);
4964 memset(elem, 0, sizeof(*elem));
4965 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4966 elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags);
4967 elem->vlan = htole16(vlan);
4968
4969 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4970 return IXL_AQ_RC_EINVAL;
4971 }
4972
4973 switch (le16toh(iaq.iaq_retval)) {
4974 case IXL_AQ_RC_OK:
4975 break;
4976 case IXL_AQ_RC_ENOSPC:
4977 return ENOSPC;
4978 case IXL_AQ_RC_ENOENT:
4979 return ENOENT;
4980 case IXL_AQ_RC_EACCES:
4981 return EACCES;
4982 case IXL_AQ_RC_EEXIST:
4983 return EEXIST;
4984 case IXL_AQ_RC_EINVAL:
4985 return EINVAL;
4986 default:
4987 return EIO;
4988 }
4989
4990 return 0;
4991 }
4992
4993 static int
4994 ixl_remove_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4995 uint16_t vlan, uint16_t flags)
4996 {
4997 struct ixl_aq_desc iaq;
4998 struct ixl_aq_remove_macvlan *param;
4999 struct ixl_aq_remove_macvlan_elem *elem;
5000
5001 memset(&iaq, 0, sizeof(iaq));
5002 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
5003 iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN);
5004 iaq.iaq_datalen = htole16(sizeof(*elem));
5005 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
5006
5007 param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param;
5008 param->num_addrs = htole16(1);
5009 param->seid0 = htole16(0x8000) | sc->sc_seid;
5010 param->seid1 = 0;
5011 param->seid2 = 0;
5012
5013 elem = IXL_DMA_KVA(&sc->sc_scratch);
5014 memset(elem, 0, sizeof(*elem));
5015 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
5016 elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags);
5017 elem->vlan = htole16(vlan);
5018
5019 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
5020 return EINVAL;
5021 }
5022
5023 switch (le16toh(iaq.iaq_retval)) {
5024 case IXL_AQ_RC_OK:
5025 break;
5026 case IXL_AQ_RC_ENOENT:
5027 return ENOENT;
5028 case IXL_AQ_RC_EACCES:
5029 return EACCES;
5030 case IXL_AQ_RC_EINVAL:
5031 return EINVAL;
5032 default:
5033 return EIO;
5034 }
5035
5036 return 0;
5037 }
5038
5039 static int
5040 ixl_hmc(struct ixl_softc *sc)
5041 {
5042 struct {
5043 uint32_t count;
5044 uint32_t minsize;
5045 bus_size_t objsiz;
5046 bus_size_t setoff;
5047 bus_size_t setcnt;
5048 } regs[] = {
5049 {
5050 0,
5051 IXL_HMC_TXQ_MINSIZE,
5052 I40E_GLHMC_LANTXOBJSZ,
5053 I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
5054 I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
5055 },
5056 {
5057 0,
5058 IXL_HMC_RXQ_MINSIZE,
5059 I40E_GLHMC_LANRXOBJSZ,
5060 I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
5061 I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
5062 },
5063 {
5064 0,
5065 0,
5066 I40E_GLHMC_FCOEDDPOBJSZ,
5067 I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
5068 I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
5069 },
5070 {
5071 0,
5072 0,
5073 I40E_GLHMC_FCOEFOBJSZ,
5074 I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
5075 I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
5076 },
5077 };
5078 struct ixl_hmc_entry *e;
5079 uint64_t size, dva;
5080 uint8_t *kva;
5081 uint64_t *sdpage;
5082 unsigned int i;
5083 int npages, tables;
5084 uint32_t reg;
5085
5086 CTASSERT(__arraycount(regs) <= __arraycount(sc->sc_hmc_entries));
5087
5088 regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
5089 ixl_rd(sc, I40E_GLHMC_LANQMAX);
5090
5091 size = 0;
5092 for (i = 0; i < __arraycount(regs); i++) {
5093 e = &sc->sc_hmc_entries[i];
5094
5095 e->hmc_count = regs[i].count;
5096 reg = ixl_rd(sc, regs[i].objsiz);
5097 e->hmc_size = BIT_ULL(0x3F & reg);
5098 e->hmc_base = size;
5099
5100 if ((e->hmc_size * 8) < regs[i].minsize) {
5101 aprint_error_dev(sc->sc_dev,
5102 "kernel hmc entry is too big\n");
5103 return -1;
5104 }
5105
5106 size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
5107 }
5108 size = roundup(size, IXL_HMC_PGSIZE);
5109 npages = size / IXL_HMC_PGSIZE;
5110
5111 tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
5112
5113 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
5114 aprint_error_dev(sc->sc_dev,
5115 "unable to allocate hmc pd memory\n");
5116 return -1;
5117 }
5118
5119 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
5120 IXL_HMC_PGSIZE) != 0) {
5121 aprint_error_dev(sc->sc_dev,
5122 "unable to allocate hmc sd memory\n");
5123 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
5124 return -1;
5125 }
5126
5127 kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
5128 memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
5129
5130 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
5131 0, IXL_DMA_LEN(&sc->sc_hmc_pd),
5132 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5133
5134 dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
5135 sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
5136 memset(sdpage, 0, IXL_DMA_LEN(&sc->sc_hmc_sd));
5137
5138 for (i = 0; (int)i < npages; i++) {
5139 *sdpage = htole64(dva | IXL_HMC_PDVALID);
5140 sdpage++;
5141
5142 dva += IXL_HMC_PGSIZE;
5143 }
5144
5145 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
5146 0, IXL_DMA_LEN(&sc->sc_hmc_sd),
5147 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5148
5149 dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
5150 for (i = 0; (int)i < tables; i++) {
5151 uint32_t count;
5152
5153 KASSERT(npages >= 0);
5154
5155 count = ((unsigned int)npages > IXL_HMC_PGS) ?
5156 IXL_HMC_PGS : (unsigned int)npages;
5157
5158 ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
5159 ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
5160 (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
5161 (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
5162 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
5163 ixl_wr(sc, I40E_PFHMC_SDCMD,
5164 (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
5165
5166 npages -= IXL_HMC_PGS;
5167 dva += IXL_HMC_PGSIZE;
5168 }
5169
5170 for (i = 0; i < __arraycount(regs); i++) {
5171 e = &sc->sc_hmc_entries[i];
5172
5173 ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
5174 ixl_wr(sc, regs[i].setcnt, e->hmc_count);
5175 }
5176
5177 return 0;
5178 }
5179
5180 static void
5181 ixl_hmc_free(struct ixl_softc *sc)
5182 {
5183 ixl_dmamem_free(sc, &sc->sc_hmc_sd);
5184 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
5185 }
5186
5187 static void
5188 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
5189 unsigned int npacking)
5190 {
5191 uint8_t *dst = d;
5192 const uint8_t *src = s;
5193 unsigned int i;
5194
5195 for (i = 0; i < npacking; i++) {
5196 const struct ixl_hmc_pack *pack = &packing[i];
5197 unsigned int offset = pack->lsb / 8;
5198 unsigned int align = pack->lsb % 8;
5199 const uint8_t *in = src + pack->offset;
5200 uint8_t *out = dst + offset;
5201 int width = pack->width;
5202 unsigned int inbits = 0;
5203
5204 if (align) {
5205 inbits = (*in++) << align;
5206 *out++ |= (inbits & 0xff);
5207 inbits >>= 8;
5208
5209 width -= 8 - align;
5210 }
5211
5212 while (width >= 8) {
5213 inbits |= (*in++) << align;
5214 *out++ = (inbits & 0xff);
5215 inbits >>= 8;
5216
5217 width -= 8;
5218 }
5219
5220 if (width > 0) {
5221 inbits |= (*in) << align;
5222 *out |= (inbits & ((1 << width) - 1));
5223 }
5224 }
5225 }
5226
5227 static struct ixl_aq_buf *
5228 ixl_aqb_alloc(struct ixl_softc *sc)
5229 {
5230 struct ixl_aq_buf *aqb;
5231
5232 aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK);
5233 if (aqb == NULL)
5234 return NULL;
5235
5236 aqb->aqb_size = IXL_AQ_BUFLEN;
5237
5238 if (bus_dmamap_create(sc->sc_dmat, aqb->aqb_size, 1,
5239 aqb->aqb_size, 0,
5240 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0)
5241 goto free;
5242 if (bus_dmamem_alloc(sc->sc_dmat, aqb->aqb_size,
5243 IXL_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs,
5244 BUS_DMA_WAITOK) != 0)
5245 goto destroy;
5246 if (bus_dmamem_map(sc->sc_dmat, &aqb->aqb_seg, aqb->aqb_nsegs,
5247 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0)
5248 goto dma_free;
5249 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
5250 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0)
5251 goto unmap;
5252
5253 return aqb;
5254 unmap:
5255 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
5256 dma_free:
5257 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
5258 destroy:
5259 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
5260 free:
5261 free(aqb, M_DEVBUF);
5262
5263 return NULL;
5264 }
5265
5266 static void
5267 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
5268 {
5269 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
5270 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
5271 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
5272 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
5273 free(aqb, M_DEVBUF);
5274 }
5275
5276 static int
5277 ixl_arq_fill(struct ixl_softc *sc)
5278 {
5279 struct ixl_aq_buf *aqb;
5280 struct ixl_aq_desc *arq, *iaq;
5281 unsigned int prod = sc->sc_arq_prod;
5282 unsigned int n;
5283 int post = 0;
5284
5285 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
5286 IXL_AQ_NUM);
5287 arq = IXL_DMA_KVA(&sc->sc_arq);
5288
5289 if (__predict_false(n <= 0))
5290 return 0;
5291
5292 do {
5293 aqb = sc->sc_arq_live[prod];
5294 iaq = &arq[prod];
5295
5296 if (aqb == NULL) {
5297 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
5298 if (aqb != NULL) {
5299 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
5300 ixl_aq_buf, aqb_entry);
5301 } else if ((aqb = ixl_aqb_alloc(sc)) == NULL) {
5302 break;
5303 }
5304
5305 sc->sc_arq_live[prod] = aqb;
5306 memset(aqb->aqb_data, 0, aqb->aqb_size);
5307
5308 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
5309 aqb->aqb_size, BUS_DMASYNC_PREREAD);
5310
5311 iaq->iaq_flags = htole16(IXL_AQ_BUF |
5312 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ?
5313 IXL_AQ_LB : 0));
5314 iaq->iaq_opcode = 0;
5315 iaq->iaq_datalen = htole16(aqb->aqb_size);
5316 iaq->iaq_retval = 0;
5317 iaq->iaq_cookie = 0;
5318 iaq->iaq_param[0] = 0;
5319 iaq->iaq_param[1] = 0;
5320 ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
5321 }
5322
5323 prod++;
5324 prod &= IXL_AQ_MASK;
5325
5326 post = 1;
5327
5328 } while (--n);
5329
5330 if (post) {
5331 sc->sc_arq_prod = prod;
5332 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
5333 }
5334
5335 return post;
5336 }
5337
5338 static void
5339 ixl_arq_unfill(struct ixl_softc *sc)
5340 {
5341 struct ixl_aq_buf *aqb;
5342 unsigned int i;
5343
5344 for (i = 0; i < __arraycount(sc->sc_arq_live); i++) {
5345 aqb = sc->sc_arq_live[i];
5346 if (aqb == NULL)
5347 continue;
5348
5349 sc->sc_arq_live[i] = NULL;
5350 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
5351 BUS_DMASYNC_POSTREAD);
5352 ixl_aqb_free(sc, aqb);
5353 }
5354
5355 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle)) != NULL) {
5356 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
5357 ixl_aq_buf, aqb_entry);
5358 ixl_aqb_free(sc, aqb);
5359 }
5360 }
5361
5362 static void
5363 ixl_clear_hw(struct ixl_softc *sc)
5364 {
5365 uint32_t num_queues, base_queue;
5366 uint32_t num_pf_int;
5367 uint32_t num_vf_int;
5368 uint32_t num_vfs;
5369 uint32_t i, j;
5370 uint32_t val;
5371 uint32_t eol = 0x7ff;
5372
5373 /* get number of interrupts, queues, and vfs */
5374 val = ixl_rd(sc, I40E_GLPCI_CNF2);
5375 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
5376 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
5377 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
5378 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
5379
5380 val = ixl_rd(sc, I40E_PFLAN_QALLOC);
5381 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
5382 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
5383 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
5384 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
5385 if (val & I40E_PFLAN_QALLOC_VALID_MASK)
5386 num_queues = (j - base_queue) + 1;
5387 else
5388 num_queues = 0;
5389
5390 val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
5391 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
5392 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
5393 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
5394 I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
5395 if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
5396 num_vfs = (j - i) + 1;
5397 else
5398 num_vfs = 0;
5399
5400 /* stop all the interrupts */
5401 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5402 ixl_flush(sc);
5403 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
5404 for (i = 0; i < num_pf_int - 2; i++)
5405 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
5406 ixl_flush(sc);
5407
5408 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
5409 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5410 ixl_wr(sc, I40E_PFINT_LNKLST0, val);
5411 for (i = 0; i < num_pf_int - 2; i++)
5412 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
5413 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5414 for (i = 0; i < num_vfs; i++)
5415 ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
5416 for (i = 0; i < num_vf_int - 2; i++)
5417 ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
5418
5419 /* warn the HW of the coming Tx disables */
5420 for (i = 0; i < num_queues; i++) {
5421 uint32_t abs_queue_idx = base_queue + i;
5422 uint32_t reg_block = 0;
5423
5424 if (abs_queue_idx >= 128) {
5425 reg_block = abs_queue_idx / 128;
5426 abs_queue_idx %= 128;
5427 }
5428
5429 val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
5430 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
5431 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
5432 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
5433
5434 ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
5435 }
5436 delaymsec(400);
5437
5438 /* stop all the queues */
5439 for (i = 0; i < num_queues; i++) {
5440 ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
5441 ixl_wr(sc, I40E_QTX_ENA(i), 0);
5442 ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
5443 ixl_wr(sc, I40E_QRX_ENA(i), 0);
5444 }
5445
5446 /* short wait for all queue disables to settle */
5447 delaymsec(50);
5448 }
5449
5450 static int
5451 ixl_pf_reset(struct ixl_softc *sc)
5452 {
5453 uint32_t cnt = 0;
5454 uint32_t cnt1 = 0;
5455 uint32_t reg = 0, reg0 = 0;
5456 uint32_t grst_del;
5457
5458 /*
5459 * Poll for Global Reset steady state in case of recent GRST.
5460 * The grst delay value is in 100ms units, and we'll wait a
5461 * couple counts longer to be sure we don't just miss the end.
5462 */
5463 grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
5464 grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
5465 grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
5466
5467 grst_del = grst_del * 20;
5468
5469 for (cnt = 0; cnt < grst_del; cnt++) {
5470 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
5471 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
5472 break;
5473 delaymsec(100);
5474 }
5475 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5476 aprint_error(", Global reset polling failed to complete\n");
5477 return -1;
5478 }
5479
5480 /* Now Wait for the FW to be ready */
5481 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
5482 reg = ixl_rd(sc, I40E_GLNVM_ULD);
5483 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5484 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
5485 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5486 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
5487 break;
5488
5489 delaymsec(10);
5490 }
5491 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5492 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
5493 aprint_error(", wait for FW Reset complete timed out "
5494 "(I40E_GLNVM_ULD = 0x%x)\n", reg);
5495 return -1;
5496 }
5497
5498 /*
5499 * If there was a Global Reset in progress when we got here,
5500 * we don't need to do the PF Reset
5501 */
5502 if (cnt == 0) {
5503 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5504 ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
5505 for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
5506 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5507 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
5508 break;
5509 delaymsec(1);
5510
5511 reg0 = ixl_rd(sc, I40E_GLGEN_RSTAT);
5512 if (reg0 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5513 aprint_error(", Core reset upcoming."
5514 " Skipping PF reset reset request\n");
5515 return -1;
5516 }
5517 }
5518 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
5519 aprint_error(", PF reset polling failed to complete"
5520 "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
5521 return -1;
5522 }
5523 }
5524
5525 return 0;
5526 }
5527
5528 static int
5529 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
5530 bus_size_t size, bus_size_t align)
5531 {
5532 ixm->ixm_size = size;
5533
5534 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
5535 ixm->ixm_size, 0,
5536 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
5537 &ixm->ixm_map) != 0)
5538 return 1;
5539 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
5540 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
5541 BUS_DMA_WAITOK) != 0)
5542 goto destroy;
5543 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
5544 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
5545 goto free;
5546 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
5547 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
5548 goto unmap;
5549
5550 memset(ixm->ixm_kva, 0, ixm->ixm_size);
5551
5552 return 0;
5553 unmap:
5554 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5555 free:
5556 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5557 destroy:
5558 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5559 return 1;
5560 }
5561
5562 static void
5563 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
5564 {
5565 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
5566 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5567 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5568 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5569 }
5570
5571 static int
5572 ixl_setup_vlan_hwfilter(struct ixl_softc *sc)
5573 {
5574 struct ethercom *ec = &sc->sc_ec;
5575 struct vlanid_list *vlanidp;
5576 int rv;
5577
5578 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5579 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5580 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5581 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5582
5583 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5584 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5585 if (rv != 0)
5586 return rv;
5587 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5588 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5589 if (rv != 0)
5590 return rv;
5591
5592 ETHER_LOCK(ec);
5593 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5594 rv = ixl_add_macvlan(sc, sc->sc_enaddr,
5595 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5596 if (rv != 0)
5597 break;
5598 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
5599 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5600 if (rv != 0)
5601 break;
5602 }
5603 ETHER_UNLOCK(ec);
5604
5605 return rv;
5606 }
5607
5608 static void
5609 ixl_teardown_vlan_hwfilter(struct ixl_softc *sc)
5610 {
5611 struct vlanid_list *vlanidp;
5612 struct ethercom *ec = &sc->sc_ec;
5613
5614 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5615 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5616 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5617 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5618
5619 ETHER_LOCK(ec);
5620 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5621 ixl_remove_macvlan(sc, sc->sc_enaddr,
5622 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5623 ixl_remove_macvlan(sc, etherbroadcastaddr,
5624 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5625 }
5626 ETHER_UNLOCK(ec);
5627
5628 ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5629 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5630 ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5631 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5632 }
5633
5634 static int
5635 ixl_update_macvlan(struct ixl_softc *sc)
5636 {
5637 int rv = 0;
5638 int next_ec_capenable = sc->sc_ec.ec_capenable;
5639
5640 if (ISSET(next_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
5641 rv = ixl_setup_vlan_hwfilter(sc);
5642 if (rv != 0)
5643 ixl_teardown_vlan_hwfilter(sc);
5644 } else {
5645 ixl_teardown_vlan_hwfilter(sc);
5646 }
5647
5648 return rv;
5649 }
5650
5651 static int
5652 ixl_ifflags_cb(struct ethercom *ec)
5653 {
5654 struct ifnet *ifp = &ec->ec_if;
5655 struct ixl_softc *sc = ifp->if_softc;
5656 int rv, change;
5657
5658 mutex_enter(&sc->sc_cfg_lock);
5659
5660 change = ec->ec_capenable ^ sc->sc_cur_ec_capenable;
5661
5662 if (ISSET(change, ETHERCAP_VLAN_HWTAGGING)) {
5663 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
5664 rv = ENETRESET;
5665 goto out;
5666 }
5667
5668 if (ISSET(change, ETHERCAP_VLAN_HWFILTER)) {
5669 rv = ixl_update_macvlan(sc);
5670 if (rv == 0) {
5671 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
5672 } else {
5673 CLR(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER);
5674 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
5675 }
5676 }
5677
5678 rv = ixl_iff(sc);
5679 out:
5680 mutex_exit(&sc->sc_cfg_lock);
5681
5682 return rv;
5683 }
5684
5685 static int
5686 ixl_set_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
5687 {
5688 const struct ixl_aq_link_status *status;
5689 const struct ixl_phy_type *itype;
5690
5691 uint64_t ifm_active = IFM_ETHER;
5692 uint64_t ifm_status = IFM_AVALID;
5693 int link_state = LINK_STATE_DOWN;
5694 uint64_t baudrate = 0;
5695
5696 status = (const struct ixl_aq_link_status *)iaq->iaq_param;
5697 if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION)) {
5698 ifm_active |= IFM_NONE;
5699 goto done;
5700 }
5701
5702 ifm_active |= IFM_FDX;
5703 ifm_status |= IFM_ACTIVE;
5704 link_state = LINK_STATE_UP;
5705
5706 itype = ixl_search_phy_type(status->phy_type);
5707 if (itype != NULL)
5708 ifm_active |= itype->ifm_type;
5709
5710 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
5711 ifm_active |= IFM_ETH_TXPAUSE;
5712 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
5713 ifm_active |= IFM_ETH_RXPAUSE;
5714
5715 baudrate = ixl_search_link_speed(status->link_speed);
5716
5717 done:
5718 /* NET_ASSERT_LOCKED() except during attach */
5719 sc->sc_media_active = ifm_active;
5720 sc->sc_media_status = ifm_status;
5721
5722 sc->sc_ec.ec_if.if_baudrate = baudrate;
5723
5724 return link_state;
5725 }
5726
5727 static int
5728 ixl_establish_intx(struct ixl_softc *sc)
5729 {
5730 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5731 pci_intr_handle_t *intr;
5732 char xnamebuf[32];
5733 char intrbuf[PCI_INTRSTR_LEN];
5734 char const *intrstr;
5735
5736 KASSERT(sc->sc_nintrs == 1);
5737
5738 intr = &sc->sc_ihp[0];
5739
5740 intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
5741 snprintf(xnamebuf, sizeof(xnamebuf), "%s:legacy",
5742 device_xname(sc->sc_dev));
5743
5744 sc->sc_ihs[0] = pci_intr_establish_xname(pc, *intr, IPL_NET, ixl_intr,
5745 sc, xnamebuf);
5746
5747 if (sc->sc_ihs[0] == NULL) {
5748 aprint_error_dev(sc->sc_dev,
5749 "unable to establish interrupt at %s\n", intrstr);
5750 return -1;
5751 }
5752
5753 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5754 return 0;
5755 }
5756
5757 static int
5758 ixl_establish_msix(struct ixl_softc *sc)
5759 {
5760 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5761 kcpuset_t *affinity;
5762 unsigned int vector = 0;
5763 unsigned int i;
5764 int affinity_to, r;
5765 char xnamebuf[32];
5766 char intrbuf[PCI_INTRSTR_LEN];
5767 char const *intrstr;
5768
5769 kcpuset_create(&affinity, false);
5770
5771 /* the "other" intr is mapped to vector 0 */
5772 vector = 0;
5773 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5774 intrbuf, sizeof(intrbuf));
5775 snprintf(xnamebuf, sizeof(xnamebuf), "%s others",
5776 device_xname(sc->sc_dev));
5777 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5778 sc->sc_ihp[vector], IPL_NET, ixl_other_intr,
5779 sc, xnamebuf);
5780 if (sc->sc_ihs[vector] == NULL) {
5781 aprint_error_dev(sc->sc_dev,
5782 "unable to establish interrupt at %s\n", intrstr);
5783 goto fail;
5784 }
5785
5786 aprint_normal_dev(sc->sc_dev, "other interrupt at %s", intrstr);
5787
5788 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0;
5789 affinity_to = (affinity_to + sc->sc_nqueue_pairs_max) % ncpu;
5790
5791 kcpuset_zero(affinity);
5792 kcpuset_set(affinity, affinity_to);
5793 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5794 if (r == 0) {
5795 aprint_normal(", affinity to %u", affinity_to);
5796 }
5797 aprint_normal("\n");
5798 vector++;
5799
5800 sc->sc_msix_vector_queue = vector;
5801 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0;
5802
5803 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5804 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5805 intrbuf, sizeof(intrbuf));
5806 snprintf(xnamebuf, sizeof(xnamebuf), "%s TXRX%d",
5807 device_xname(sc->sc_dev), i);
5808
5809 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5810 sc->sc_ihp[vector], IPL_NET, ixl_queue_intr,
5811 (void *)&sc->sc_qps[i], xnamebuf);
5812
5813 if (sc->sc_ihs[vector] == NULL) {
5814 aprint_error_dev(sc->sc_dev,
5815 "unable to establish interrupt at %s\n", intrstr);
5816 goto fail;
5817 }
5818
5819 aprint_normal_dev(sc->sc_dev,
5820 "for TXRX%d interrupt at %s",i , intrstr);
5821
5822 kcpuset_zero(affinity);
5823 kcpuset_set(affinity, affinity_to);
5824 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5825 if (r == 0) {
5826 aprint_normal(", affinity to %u", affinity_to);
5827 affinity_to = (affinity_to + 1) % ncpu;
5828 }
5829 aprint_normal("\n");
5830 vector++;
5831 }
5832
5833 kcpuset_destroy(affinity);
5834
5835 return 0;
5836 fail:
5837 for (i = 0; i < vector; i++) {
5838 pci_intr_disestablish(pc, sc->sc_ihs[i]);
5839 }
5840
5841 sc->sc_msix_vector_queue = 0;
5842 sc->sc_msix_vector_queue = 0;
5843 kcpuset_destroy(affinity);
5844
5845 return -1;
5846 }
5847
5848 static void
5849 ixl_config_queue_intr(struct ixl_softc *sc)
5850 {
5851 unsigned int i, vector;
5852
5853 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) {
5854 vector = sc->sc_msix_vector_queue;
5855 } else {
5856 vector = I40E_INTR_NOTX_INTR;
5857
5858 ixl_wr(sc, I40E_PFINT_LNKLST0,
5859 (I40E_INTR_NOTX_QUEUE <<
5860 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5861 (I40E_QUEUE_TYPE_RX <<
5862 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5863 }
5864
5865 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
5866 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), 0);
5867 ixl_flush(sc);
5868
5869 ixl_wr(sc, I40E_PFINT_LNKLSTN(i),
5870 ((i) << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
5871 (I40E_QUEUE_TYPE_RX <<
5872 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5873
5874 ixl_wr(sc, I40E_QINT_RQCTL(i),
5875 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5876 (I40E_ITR_INDEX_RX <<
5877 I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
5878 (I40E_INTR_NOTX_RX_QUEUE <<
5879 I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
5880 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5881 (I40E_QUEUE_TYPE_TX <<
5882 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5883 I40E_QINT_RQCTL_CAUSE_ENA_MASK);
5884
5885 ixl_wr(sc, I40E_QINT_TQCTL(i),
5886 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
5887 (I40E_ITR_INDEX_TX <<
5888 I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
5889 (I40E_INTR_NOTX_TX_QUEUE <<
5890 I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
5891 (I40E_QUEUE_TYPE_EOL <<
5892 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
5893 (I40E_QUEUE_TYPE_RX <<
5894 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
5895 I40E_QINT_TQCTL_CAUSE_ENA_MASK);
5896
5897 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX)
5898 vector++;
5899 }
5900 ixl_flush(sc);
5901
5902 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_RX), 0x7a);
5903 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_TX), 0x7a);
5904 ixl_flush(sc);
5905 }
5906
5907 static void
5908 ixl_config_other_intr(struct ixl_softc *sc)
5909 {
5910 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5911 (void)ixl_rd(sc, I40E_PFINT_ICR0);
5912
5913 ixl_wr(sc, I40E_PFINT_ICR0_ENA,
5914 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
5915 I40E_PFINT_ICR0_ENA_GRST_MASK |
5916 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
5917 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
5918 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
5919 I40E_PFINT_ICR0_ENA_VFLR_MASK |
5920 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
5921 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
5922 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK);
5923
5924 ixl_wr(sc, I40E_PFINT_LNKLST0, 0x7FF);
5925 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_OTHER), 0);
5926 ixl_wr(sc, I40E_PFINT_STAT_CTL0,
5927 (I40E_ITR_INDEX_OTHER <<
5928 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT));
5929 ixl_flush(sc);
5930 }
5931
5932 static int
5933 ixl_setup_interrupts(struct ixl_softc *sc)
5934 {
5935 struct pci_attach_args *pa = &sc->sc_pa;
5936 pci_intr_type_t max_type, intr_type;
5937 int counts[PCI_INTR_TYPE_SIZE];
5938 int error;
5939 unsigned int i;
5940 bool retry;
5941
5942 memset(counts, 0, sizeof(counts));
5943 max_type = PCI_INTR_TYPE_MSIX;
5944 /* QPs + other interrupt */
5945 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueue_pairs_max + 1;
5946 counts[PCI_INTR_TYPE_INTX] = 1;
5947
5948 if (ixl_param_nomsix)
5949 counts[PCI_INTR_TYPE_MSIX] = 0;
5950
5951 do {
5952 retry = false;
5953 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, max_type);
5954 if (error != 0) {
5955 aprint_error_dev(sc->sc_dev,
5956 "couldn't map interrupt\n");
5957 break;
5958 }
5959
5960 intr_type = pci_intr_type(pa->pa_pc, sc->sc_ihp[0]);
5961 sc->sc_nintrs = counts[intr_type];
5962 KASSERT(sc->sc_nintrs > 0);
5963
5964 for (i = 0; i < sc->sc_nintrs; i++) {
5965 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[i],
5966 PCI_INTR_MPSAFE, true);
5967 }
5968
5969 sc->sc_ihs = kmem_alloc(sizeof(sc->sc_ihs[0]) * sc->sc_nintrs,
5970 KM_SLEEP);
5971
5972 if (intr_type == PCI_INTR_TYPE_MSIX) {
5973 error = ixl_establish_msix(sc);
5974 if (error) {
5975 counts[PCI_INTR_TYPE_MSIX] = 0;
5976 retry = true;
5977 }
5978 } else if (intr_type == PCI_INTR_TYPE_INTX) {
5979 error = ixl_establish_intx(sc);
5980 } else {
5981 error = -1;
5982 }
5983
5984 if (error) {
5985 kmem_free(sc->sc_ihs,
5986 sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5987 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5988 } else {
5989 sc->sc_intrtype = intr_type;
5990 }
5991 } while (retry);
5992
5993 return error;
5994 }
5995
5996 static void
5997 ixl_teardown_interrupts(struct ixl_softc *sc)
5998 {
5999 struct pci_attach_args *pa = &sc->sc_pa;
6000 unsigned int i;
6001
6002 for (i = 0; i < sc->sc_nintrs; i++) {
6003 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]);
6004 }
6005
6006 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
6007
6008 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
6009 sc->sc_ihs = NULL;
6010 sc->sc_nintrs = 0;
6011 }
6012
6013 static int
6014 ixl_setup_stats(struct ixl_softc *sc)
6015 {
6016 struct ixl_queue_pair *qp;
6017 struct ixl_tx_ring *txr;
6018 struct ixl_rx_ring *rxr;
6019 struct ixl_stats_counters *isc;
6020 unsigned int i;
6021
6022 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
6023 qp = &sc->sc_qps[i];
6024 txr = qp->qp_txr;
6025 rxr = qp->qp_rxr;
6026
6027 evcnt_attach_dynamic(&txr->txr_defragged, EVCNT_TYPE_MISC,
6028 NULL, qp->qp_name, "m_defrag successed");
6029 evcnt_attach_dynamic(&txr->txr_defrag_failed, EVCNT_TYPE_MISC,
6030 NULL, qp->qp_name, "m_defrag_failed");
6031 evcnt_attach_dynamic(&txr->txr_pcqdrop, EVCNT_TYPE_MISC,
6032 NULL, qp->qp_name, "Dropped in pcq");
6033 evcnt_attach_dynamic(&txr->txr_transmitdef, EVCNT_TYPE_MISC,
6034 NULL, qp->qp_name, "Deferred transmit");
6035 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR,
6036 NULL, qp->qp_name, "Interrupt on queue");
6037 evcnt_attach_dynamic(&txr->txr_defer, EVCNT_TYPE_MISC,
6038 NULL, qp->qp_name, "Handled queue in softint/workqueue");
6039
6040 evcnt_attach_dynamic(&rxr->rxr_mgethdr_failed, EVCNT_TYPE_MISC,
6041 NULL, qp->qp_name, "MGETHDR failed");
6042 evcnt_attach_dynamic(&rxr->rxr_mgetcl_failed, EVCNT_TYPE_MISC,
6043 NULL, qp->qp_name, "MCLGET failed");
6044 evcnt_attach_dynamic(&rxr->rxr_mbuf_load_failed,
6045 EVCNT_TYPE_MISC, NULL, qp->qp_name,
6046 "bus_dmamap_load_mbuf failed");
6047 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR,
6048 NULL, qp->qp_name, "Interrupt on queue");
6049 evcnt_attach_dynamic(&rxr->rxr_defer, EVCNT_TYPE_MISC,
6050 NULL, qp->qp_name, "Handled queue in softint/workqueue");
6051 }
6052
6053 evcnt_attach_dynamic(&sc->sc_event_atq, EVCNT_TYPE_INTR,
6054 NULL, device_xname(sc->sc_dev), "Interrupt for other events");
6055 evcnt_attach_dynamic(&sc->sc_event_link, EVCNT_TYPE_MISC,
6056 NULL, device_xname(sc->sc_dev), "Link status event");
6057 evcnt_attach_dynamic(&sc->sc_event_ecc_err, EVCNT_TYPE_MISC,
6058 NULL, device_xname(sc->sc_dev), "ECC error");
6059 evcnt_attach_dynamic(&sc->sc_event_pci_exception, EVCNT_TYPE_MISC,
6060 NULL, device_xname(sc->sc_dev), "PCI exception");
6061 evcnt_attach_dynamic(&sc->sc_event_crit_err, EVCNT_TYPE_MISC,
6062 NULL, device_xname(sc->sc_dev), "Critical error");
6063
6064 isc = &sc->sc_stats_counters;
6065 evcnt_attach_dynamic(&isc->isc_crc_errors, EVCNT_TYPE_MISC,
6066 NULL, device_xname(sc->sc_dev), "CRC errors");
6067 evcnt_attach_dynamic(&isc->isc_illegal_bytes, EVCNT_TYPE_MISC,
6068 NULL, device_xname(sc->sc_dev), "Illegal bytes");
6069 evcnt_attach_dynamic(&isc->isc_mac_local_faults, EVCNT_TYPE_MISC,
6070 NULL, device_xname(sc->sc_dev), "Mac local faults");
6071 evcnt_attach_dynamic(&isc->isc_mac_remote_faults, EVCNT_TYPE_MISC,
6072 NULL, device_xname(sc->sc_dev), "Mac remote faults");
6073 evcnt_attach_dynamic(&isc->isc_link_xon_rx, EVCNT_TYPE_MISC,
6074 NULL, device_xname(sc->sc_dev), "Rx xon");
6075 evcnt_attach_dynamic(&isc->isc_link_xon_tx, EVCNT_TYPE_MISC,
6076 NULL, device_xname(sc->sc_dev), "Tx xon");
6077 evcnt_attach_dynamic(&isc->isc_link_xoff_rx, EVCNT_TYPE_MISC,
6078 NULL, device_xname(sc->sc_dev), "Rx xoff");
6079 evcnt_attach_dynamic(&isc->isc_link_xoff_tx, EVCNT_TYPE_MISC,
6080 NULL, device_xname(sc->sc_dev), "Tx xoff");
6081 evcnt_attach_dynamic(&isc->isc_rx_fragments, EVCNT_TYPE_MISC,
6082 NULL, device_xname(sc->sc_dev), "Rx fragments");
6083 evcnt_attach_dynamic(&isc->isc_rx_jabber, EVCNT_TYPE_MISC,
6084 NULL, device_xname(sc->sc_dev), "Rx jabber");
6085
6086 evcnt_attach_dynamic(&isc->isc_rx_size_64, EVCNT_TYPE_MISC,
6087 NULL, device_xname(sc->sc_dev), "Rx size 64");
6088 evcnt_attach_dynamic(&isc->isc_rx_size_127, EVCNT_TYPE_MISC,
6089 NULL, device_xname(sc->sc_dev), "Rx size 127");
6090 evcnt_attach_dynamic(&isc->isc_rx_size_255, EVCNT_TYPE_MISC,
6091 NULL, device_xname(sc->sc_dev), "Rx size 255");
6092 evcnt_attach_dynamic(&isc->isc_rx_size_511, EVCNT_TYPE_MISC,
6093 NULL, device_xname(sc->sc_dev), "Rx size 511");
6094 evcnt_attach_dynamic(&isc->isc_rx_size_1023, EVCNT_TYPE_MISC,
6095 NULL, device_xname(sc->sc_dev), "Rx size 1023");
6096 evcnt_attach_dynamic(&isc->isc_rx_size_1522, EVCNT_TYPE_MISC,
6097 NULL, device_xname(sc->sc_dev), "Rx size 1522");
6098 evcnt_attach_dynamic(&isc->isc_rx_size_big, EVCNT_TYPE_MISC,
6099 NULL, device_xname(sc->sc_dev), "Rx jumbo packets");
6100 evcnt_attach_dynamic(&isc->isc_rx_undersize, EVCNT_TYPE_MISC,
6101 NULL, device_xname(sc->sc_dev), "Rx under size");
6102 evcnt_attach_dynamic(&isc->isc_rx_oversize, EVCNT_TYPE_MISC,
6103 NULL, device_xname(sc->sc_dev), "Rx over size");
6104
6105 evcnt_attach_dynamic(&isc->isc_rx_bytes, EVCNT_TYPE_MISC,
6106 NULL, device_xname(sc->sc_dev), "Rx bytes / port");
6107 evcnt_attach_dynamic(&isc->isc_rx_discards, EVCNT_TYPE_MISC,
6108 NULL, device_xname(sc->sc_dev), "Rx discards / port");
6109 evcnt_attach_dynamic(&isc->isc_rx_unicast, EVCNT_TYPE_MISC,
6110 NULL, device_xname(sc->sc_dev), "Rx unicast / port");
6111 evcnt_attach_dynamic(&isc->isc_rx_multicast, EVCNT_TYPE_MISC,
6112 NULL, device_xname(sc->sc_dev), "Rx multicast / port");
6113 evcnt_attach_dynamic(&isc->isc_rx_broadcast, EVCNT_TYPE_MISC,
6114 NULL, device_xname(sc->sc_dev), "Rx broadcast / port");
6115
6116 evcnt_attach_dynamic(&isc->isc_vsi_rx_bytes, EVCNT_TYPE_MISC,
6117 NULL, device_xname(sc->sc_dev), "Rx bytes / vsi");
6118 evcnt_attach_dynamic(&isc->isc_vsi_rx_discards, EVCNT_TYPE_MISC,
6119 NULL, device_xname(sc->sc_dev), "Rx discard / vsi");
6120 evcnt_attach_dynamic(&isc->isc_vsi_rx_unicast, EVCNT_TYPE_MISC,
6121 NULL, device_xname(sc->sc_dev), "Rx unicast / vsi");
6122 evcnt_attach_dynamic(&isc->isc_vsi_rx_multicast, EVCNT_TYPE_MISC,
6123 NULL, device_xname(sc->sc_dev), "Rx multicast / vsi");
6124 evcnt_attach_dynamic(&isc->isc_vsi_rx_broadcast, EVCNT_TYPE_MISC,
6125 NULL, device_xname(sc->sc_dev), "Rx broadcast / vsi");
6126
6127 evcnt_attach_dynamic(&isc->isc_tx_size_64, EVCNT_TYPE_MISC,
6128 NULL, device_xname(sc->sc_dev), "Tx size 64");
6129 evcnt_attach_dynamic(&isc->isc_tx_size_127, EVCNT_TYPE_MISC,
6130 NULL, device_xname(sc->sc_dev), "Tx size 127");
6131 evcnt_attach_dynamic(&isc->isc_tx_size_255, EVCNT_TYPE_MISC,
6132 NULL, device_xname(sc->sc_dev), "Tx size 255");
6133 evcnt_attach_dynamic(&isc->isc_tx_size_511, EVCNT_TYPE_MISC,
6134 NULL, device_xname(sc->sc_dev), "Tx size 511");
6135 evcnt_attach_dynamic(&isc->isc_tx_size_1023, EVCNT_TYPE_MISC,
6136 NULL, device_xname(sc->sc_dev), "Tx size 1023");
6137 evcnt_attach_dynamic(&isc->isc_tx_size_1522, EVCNT_TYPE_MISC,
6138 NULL, device_xname(sc->sc_dev), "Tx size 1522");
6139 evcnt_attach_dynamic(&isc->isc_tx_size_big, EVCNT_TYPE_MISC,
6140 NULL, device_xname(sc->sc_dev), "Tx jumbo packets");
6141
6142 evcnt_attach_dynamic(&isc->isc_tx_bytes, EVCNT_TYPE_MISC,
6143 NULL, device_xname(sc->sc_dev), "Tx bytes / port");
6144 evcnt_attach_dynamic(&isc->isc_tx_dropped_link_down, EVCNT_TYPE_MISC,
6145 NULL, device_xname(sc->sc_dev),
6146 "Tx dropped due to link down / port");
6147 evcnt_attach_dynamic(&isc->isc_tx_unicast, EVCNT_TYPE_MISC,
6148 NULL, device_xname(sc->sc_dev), "Tx unicast / port");
6149 evcnt_attach_dynamic(&isc->isc_tx_multicast, EVCNT_TYPE_MISC,
6150 NULL, device_xname(sc->sc_dev), "Tx multicast / port");
6151 evcnt_attach_dynamic(&isc->isc_tx_broadcast, EVCNT_TYPE_MISC,
6152 NULL, device_xname(sc->sc_dev), "Tx broadcast / port");
6153
6154 evcnt_attach_dynamic(&isc->isc_vsi_tx_bytes, EVCNT_TYPE_MISC,
6155 NULL, device_xname(sc->sc_dev), "Tx bytes / vsi");
6156 evcnt_attach_dynamic(&isc->isc_vsi_tx_errors, EVCNT_TYPE_MISC,
6157 NULL, device_xname(sc->sc_dev), "Tx errors / vsi");
6158 evcnt_attach_dynamic(&isc->isc_vsi_tx_unicast, EVCNT_TYPE_MISC,
6159 NULL, device_xname(sc->sc_dev), "Tx unicast / vsi");
6160 evcnt_attach_dynamic(&isc->isc_vsi_tx_multicast, EVCNT_TYPE_MISC,
6161 NULL, device_xname(sc->sc_dev), "Tx multicast / vsi");
6162 evcnt_attach_dynamic(&isc->isc_vsi_tx_broadcast, EVCNT_TYPE_MISC,
6163 NULL, device_xname(sc->sc_dev), "Tx broadcast / vsi");
6164
6165 sc->sc_stats_intval = ixl_param_stats_interval;
6166 callout_init(&sc->sc_stats_callout, CALLOUT_MPSAFE);
6167 callout_setfunc(&sc->sc_stats_callout, ixl_stats_callout, sc);
6168 ixl_work_set(&sc->sc_stats_task, ixl_stats_update, sc);
6169
6170 return 0;
6171 }
6172
6173 static void
6174 ixl_teardown_stats(struct ixl_softc *sc)
6175 {
6176 struct ixl_tx_ring *txr;
6177 struct ixl_rx_ring *rxr;
6178 struct ixl_stats_counters *isc;
6179 unsigned int i;
6180
6181 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
6182 txr = sc->sc_qps[i].qp_txr;
6183 rxr = sc->sc_qps[i].qp_rxr;
6184
6185 evcnt_detach(&txr->txr_defragged);
6186 evcnt_detach(&txr->txr_defrag_failed);
6187 evcnt_detach(&txr->txr_pcqdrop);
6188 evcnt_detach(&txr->txr_transmitdef);
6189 evcnt_detach(&txr->txr_intr);
6190 evcnt_detach(&txr->txr_defer);
6191
6192 evcnt_detach(&rxr->rxr_mgethdr_failed);
6193 evcnt_detach(&rxr->rxr_mgetcl_failed);
6194 evcnt_detach(&rxr->rxr_mbuf_load_failed);
6195 evcnt_detach(&rxr->rxr_intr);
6196 evcnt_detach(&rxr->rxr_defer);
6197 }
6198
6199 isc = &sc->sc_stats_counters;
6200 evcnt_detach(&isc->isc_crc_errors);
6201 evcnt_detach(&isc->isc_illegal_bytes);
6202 evcnt_detach(&isc->isc_mac_local_faults);
6203 evcnt_detach(&isc->isc_mac_remote_faults);
6204 evcnt_detach(&isc->isc_link_xon_rx);
6205 evcnt_detach(&isc->isc_link_xon_tx);
6206 evcnt_detach(&isc->isc_link_xoff_rx);
6207 evcnt_detach(&isc->isc_link_xoff_tx);
6208 evcnt_detach(&isc->isc_rx_fragments);
6209 evcnt_detach(&isc->isc_rx_jabber);
6210 evcnt_detach(&isc->isc_rx_bytes);
6211 evcnt_detach(&isc->isc_rx_discards);
6212 evcnt_detach(&isc->isc_rx_unicast);
6213 evcnt_detach(&isc->isc_rx_multicast);
6214 evcnt_detach(&isc->isc_rx_broadcast);
6215 evcnt_detach(&isc->isc_rx_size_64);
6216 evcnt_detach(&isc->isc_rx_size_127);
6217 evcnt_detach(&isc->isc_rx_size_255);
6218 evcnt_detach(&isc->isc_rx_size_511);
6219 evcnt_detach(&isc->isc_rx_size_1023);
6220 evcnt_detach(&isc->isc_rx_size_1522);
6221 evcnt_detach(&isc->isc_rx_size_big);
6222 evcnt_detach(&isc->isc_rx_undersize);
6223 evcnt_detach(&isc->isc_rx_oversize);
6224 evcnt_detach(&isc->isc_tx_bytes);
6225 evcnt_detach(&isc->isc_tx_dropped_link_down);
6226 evcnt_detach(&isc->isc_tx_unicast);
6227 evcnt_detach(&isc->isc_tx_multicast);
6228 evcnt_detach(&isc->isc_tx_broadcast);
6229 evcnt_detach(&isc->isc_tx_size_64);
6230 evcnt_detach(&isc->isc_tx_size_127);
6231 evcnt_detach(&isc->isc_tx_size_255);
6232 evcnt_detach(&isc->isc_tx_size_511);
6233 evcnt_detach(&isc->isc_tx_size_1023);
6234 evcnt_detach(&isc->isc_tx_size_1522);
6235 evcnt_detach(&isc->isc_tx_size_big);
6236 evcnt_detach(&isc->isc_vsi_rx_discards);
6237 evcnt_detach(&isc->isc_vsi_rx_bytes);
6238 evcnt_detach(&isc->isc_vsi_rx_unicast);
6239 evcnt_detach(&isc->isc_vsi_rx_multicast);
6240 evcnt_detach(&isc->isc_vsi_rx_broadcast);
6241 evcnt_detach(&isc->isc_vsi_tx_errors);
6242 evcnt_detach(&isc->isc_vsi_tx_bytes);
6243 evcnt_detach(&isc->isc_vsi_tx_unicast);
6244 evcnt_detach(&isc->isc_vsi_tx_multicast);
6245 evcnt_detach(&isc->isc_vsi_tx_broadcast);
6246
6247 evcnt_detach(&sc->sc_event_atq);
6248 evcnt_detach(&sc->sc_event_link);
6249 evcnt_detach(&sc->sc_event_ecc_err);
6250 evcnt_detach(&sc->sc_event_pci_exception);
6251 evcnt_detach(&sc->sc_event_crit_err);
6252
6253 callout_destroy(&sc->sc_stats_callout);
6254 }
6255
6256 static void
6257 ixl_stats_callout(void *xsc)
6258 {
6259 struct ixl_softc *sc = xsc;
6260
6261 ixl_work_add(sc->sc_workq, &sc->sc_stats_task);
6262 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
6263 }
6264
6265 static uint64_t
6266 ixl_stat_delta(struct ixl_softc *sc, uint32_t reg_hi, uint32_t reg_lo,
6267 uint64_t *offset, bool has_offset)
6268 {
6269 uint64_t value, delta;
6270 int bitwidth;
6271
6272 bitwidth = reg_hi == 0 ? 32 : 48;
6273
6274 value = ixl_rd(sc, reg_lo);
6275
6276 if (bitwidth > 32) {
6277 value |= ((uint64_t)ixl_rd(sc, reg_hi) << 32);
6278 }
6279
6280 if (__predict_true(has_offset)) {
6281 delta = value;
6282 if (value < *offset)
6283 delta += ((uint64_t)1 << bitwidth);
6284 delta -= *offset;
6285 } else {
6286 delta = 0;
6287 }
6288 atomic_swap_64(offset, value);
6289
6290 return delta;
6291 }
6292
6293 static void
6294 ixl_stats_update(void *xsc)
6295 {
6296 struct ixl_softc *sc = xsc;
6297 struct ixl_stats_counters *isc;
6298 uint64_t delta;
6299
6300 isc = &sc->sc_stats_counters;
6301
6302 /* errors */
6303 delta = ixl_stat_delta(sc,
6304 0, I40E_GLPRT_CRCERRS(sc->sc_port),
6305 &isc->isc_crc_errors_offset, isc->isc_has_offset);
6306 atomic_add_64(&isc->isc_crc_errors.ev_count, delta);
6307
6308 delta = ixl_stat_delta(sc,
6309 0, I40E_GLPRT_ILLERRC(sc->sc_port),
6310 &isc->isc_illegal_bytes_offset, isc->isc_has_offset);
6311 atomic_add_64(&isc->isc_illegal_bytes.ev_count, delta);
6312
6313 /* rx */
6314 delta = ixl_stat_delta(sc,
6315 I40E_GLPRT_GORCH(sc->sc_port), I40E_GLPRT_GORCL(sc->sc_port),
6316 &isc->isc_rx_bytes_offset, isc->isc_has_offset);
6317 atomic_add_64(&isc->isc_rx_bytes.ev_count, delta);
6318
6319 delta = ixl_stat_delta(sc,
6320 0, I40E_GLPRT_RDPC(sc->sc_port),
6321 &isc->isc_rx_discards_offset, isc->isc_has_offset);
6322 atomic_add_64(&isc->isc_rx_discards.ev_count, delta);
6323
6324 delta = ixl_stat_delta(sc,
6325 I40E_GLPRT_UPRCH(sc->sc_port), I40E_GLPRT_UPRCL(sc->sc_port),
6326 &isc->isc_rx_unicast_offset, isc->isc_has_offset);
6327 atomic_add_64(&isc->isc_rx_unicast.ev_count, delta);
6328
6329 delta = ixl_stat_delta(sc,
6330 I40E_GLPRT_MPRCH(sc->sc_port), I40E_GLPRT_MPRCL(sc->sc_port),
6331 &isc->isc_rx_multicast_offset, isc->isc_has_offset);
6332 atomic_add_64(&isc->isc_rx_multicast.ev_count, delta);
6333
6334 delta = ixl_stat_delta(sc,
6335 I40E_GLPRT_BPRCH(sc->sc_port), I40E_GLPRT_BPRCL(sc->sc_port),
6336 &isc->isc_rx_broadcast_offset, isc->isc_has_offset);
6337 atomic_add_64(&isc->isc_rx_broadcast.ev_count, delta);
6338
6339 /* Packet size stats rx */
6340 delta = ixl_stat_delta(sc,
6341 I40E_GLPRT_PRC64H(sc->sc_port), I40E_GLPRT_PRC64L(sc->sc_port),
6342 &isc->isc_rx_size_64_offset, isc->isc_has_offset);
6343 atomic_add_64(&isc->isc_rx_size_64.ev_count, delta);
6344
6345 delta = ixl_stat_delta(sc,
6346 I40E_GLPRT_PRC127H(sc->sc_port), I40E_GLPRT_PRC127L(sc->sc_port),
6347 &isc->isc_rx_size_127_offset, isc->isc_has_offset);
6348 atomic_add_64(&isc->isc_rx_size_127.ev_count, delta);
6349
6350 delta = ixl_stat_delta(sc,
6351 I40E_GLPRT_PRC255H(sc->sc_port), I40E_GLPRT_PRC255L(sc->sc_port),
6352 &isc->isc_rx_size_255_offset, isc->isc_has_offset);
6353 atomic_add_64(&isc->isc_rx_size_255.ev_count, delta);
6354
6355 delta = ixl_stat_delta(sc,
6356 I40E_GLPRT_PRC511H(sc->sc_port), I40E_GLPRT_PRC511L(sc->sc_port),
6357 &isc->isc_rx_size_511_offset, isc->isc_has_offset);
6358 atomic_add_64(&isc->isc_rx_size_511.ev_count, delta);
6359
6360 delta = ixl_stat_delta(sc,
6361 I40E_GLPRT_PRC1023H(sc->sc_port), I40E_GLPRT_PRC1023L(sc->sc_port),
6362 &isc->isc_rx_size_1023_offset, isc->isc_has_offset);
6363 atomic_add_64(&isc->isc_rx_size_1023.ev_count, delta);
6364
6365 delta = ixl_stat_delta(sc,
6366 I40E_GLPRT_PRC1522H(sc->sc_port), I40E_GLPRT_PRC1522L(sc->sc_port),
6367 &isc->isc_rx_size_1522_offset, isc->isc_has_offset);
6368 atomic_add_64(&isc->isc_rx_size_1522.ev_count, delta);
6369
6370 delta = ixl_stat_delta(sc,
6371 I40E_GLPRT_PRC9522H(sc->sc_port), I40E_GLPRT_PRC9522L(sc->sc_port),
6372 &isc->isc_rx_size_big_offset, isc->isc_has_offset);
6373 atomic_add_64(&isc->isc_rx_size_big.ev_count, delta);
6374
6375 delta = ixl_stat_delta(sc,
6376 0, I40E_GLPRT_RUC(sc->sc_port),
6377 &isc->isc_rx_undersize_offset, isc->isc_has_offset);
6378 atomic_add_64(&isc->isc_rx_undersize.ev_count, delta);
6379
6380 delta = ixl_stat_delta(sc,
6381 0, I40E_GLPRT_ROC(sc->sc_port),
6382 &isc->isc_rx_oversize_offset, isc->isc_has_offset);
6383 atomic_add_64(&isc->isc_rx_oversize.ev_count, delta);
6384
6385 /* tx */
6386 delta = ixl_stat_delta(sc,
6387 I40E_GLPRT_GOTCH(sc->sc_port), I40E_GLPRT_GOTCL(sc->sc_port),
6388 &isc->isc_tx_bytes_offset, isc->isc_has_offset);
6389 atomic_add_64(&isc->isc_tx_bytes.ev_count, delta);
6390
6391 delta = ixl_stat_delta(sc,
6392 0, I40E_GLPRT_TDOLD(sc->sc_port),
6393 &isc->isc_tx_dropped_link_down_offset, isc->isc_has_offset);
6394 atomic_add_64(&isc->isc_tx_dropped_link_down.ev_count, delta);
6395
6396 delta = ixl_stat_delta(sc,
6397 I40E_GLPRT_UPTCH(sc->sc_port), I40E_GLPRT_UPTCL(sc->sc_port),
6398 &isc->isc_tx_unicast_offset, isc->isc_has_offset);
6399 atomic_add_64(&isc->isc_tx_unicast.ev_count, delta);
6400
6401 delta = ixl_stat_delta(sc,
6402 I40E_GLPRT_MPTCH(sc->sc_port), I40E_GLPRT_MPTCL(sc->sc_port),
6403 &isc->isc_tx_multicast_offset, isc->isc_has_offset);
6404 atomic_add_64(&isc->isc_tx_multicast.ev_count, delta);
6405
6406 delta = ixl_stat_delta(sc,
6407 I40E_GLPRT_BPTCH(sc->sc_port), I40E_GLPRT_BPTCL(sc->sc_port),
6408 &isc->isc_tx_broadcast_offset, isc->isc_has_offset);
6409 atomic_add_64(&isc->isc_tx_broadcast.ev_count, delta);
6410
6411 /* Packet size stats tx */
6412 delta = ixl_stat_delta(sc,
6413 I40E_GLPRT_PTC64L(sc->sc_port), I40E_GLPRT_PTC64L(sc->sc_port),
6414 &isc->isc_tx_size_64_offset, isc->isc_has_offset);
6415 atomic_add_64(&isc->isc_tx_size_64.ev_count, delta);
6416
6417 delta = ixl_stat_delta(sc,
6418 I40E_GLPRT_PTC127H(sc->sc_port), I40E_GLPRT_PTC127L(sc->sc_port),
6419 &isc->isc_tx_size_127_offset, isc->isc_has_offset);
6420 atomic_add_64(&isc->isc_tx_size_127.ev_count, delta);
6421
6422 delta = ixl_stat_delta(sc,
6423 I40E_GLPRT_PTC255H(sc->sc_port), I40E_GLPRT_PTC255L(sc->sc_port),
6424 &isc->isc_tx_size_255_offset, isc->isc_has_offset);
6425 atomic_add_64(&isc->isc_tx_size_255.ev_count, delta);
6426
6427 delta = ixl_stat_delta(sc,
6428 I40E_GLPRT_PTC511H(sc->sc_port), I40E_GLPRT_PTC511L(sc->sc_port),
6429 &isc->isc_tx_size_511_offset, isc->isc_has_offset);
6430 atomic_add_64(&isc->isc_tx_size_511.ev_count, delta);
6431
6432 delta = ixl_stat_delta(sc,
6433 I40E_GLPRT_PTC1023H(sc->sc_port), I40E_GLPRT_PTC1023L(sc->sc_port),
6434 &isc->isc_tx_size_1023_offset, isc->isc_has_offset);
6435 atomic_add_64(&isc->isc_tx_size_1023.ev_count, delta);
6436
6437 delta = ixl_stat_delta(sc,
6438 I40E_GLPRT_PTC1522H(sc->sc_port), I40E_GLPRT_PTC1522L(sc->sc_port),
6439 &isc->isc_tx_size_1522_offset, isc->isc_has_offset);
6440 atomic_add_64(&isc->isc_tx_size_1522.ev_count, delta);
6441
6442 delta = ixl_stat_delta(sc,
6443 I40E_GLPRT_PTC9522H(sc->sc_port), I40E_GLPRT_PTC9522L(sc->sc_port),
6444 &isc->isc_tx_size_big_offset, isc->isc_has_offset);
6445 atomic_add_64(&isc->isc_tx_size_big.ev_count, delta);
6446
6447 /* mac faults */
6448 delta = ixl_stat_delta(sc,
6449 0, I40E_GLPRT_MLFC(sc->sc_port),
6450 &isc->isc_mac_local_faults_offset, isc->isc_has_offset);
6451 atomic_add_64(&isc->isc_mac_local_faults.ev_count, delta);
6452
6453 delta = ixl_stat_delta(sc,
6454 0, I40E_GLPRT_MRFC(sc->sc_port),
6455 &isc->isc_mac_remote_faults_offset, isc->isc_has_offset);
6456 atomic_add_64(&isc->isc_mac_remote_faults.ev_count, delta);
6457
6458 /* Flow control (LFC) stats */
6459 delta = ixl_stat_delta(sc,
6460 0, I40E_GLPRT_LXONRXC(sc->sc_port),
6461 &isc->isc_link_xon_rx_offset, isc->isc_has_offset);
6462 atomic_add_64(&isc->isc_link_xon_rx.ev_count, delta);
6463
6464 delta = ixl_stat_delta(sc,
6465 0, I40E_GLPRT_LXONTXC(sc->sc_port),
6466 &isc->isc_link_xon_tx_offset, isc->isc_has_offset);
6467 atomic_add_64(&isc->isc_link_xon_tx.ev_count, delta);
6468
6469 delta = ixl_stat_delta(sc,
6470 0, I40E_GLPRT_LXOFFRXC(sc->sc_port),
6471 &isc->isc_link_xoff_rx_offset, isc->isc_has_offset);
6472 atomic_add_64(&isc->isc_link_xoff_rx.ev_count, delta);
6473
6474 delta = ixl_stat_delta(sc,
6475 0, I40E_GLPRT_LXOFFTXC(sc->sc_port),
6476 &isc->isc_link_xoff_tx_offset, isc->isc_has_offset);
6477 atomic_add_64(&isc->isc_link_xoff_tx.ev_count, delta);
6478
6479 /* fragments */
6480 delta = ixl_stat_delta(sc,
6481 0, I40E_GLPRT_RFC(sc->sc_port),
6482 &isc->isc_rx_fragments_offset, isc->isc_has_offset);
6483 atomic_add_64(&isc->isc_rx_fragments.ev_count, delta);
6484
6485 delta = ixl_stat_delta(sc,
6486 0, I40E_GLPRT_RJC(sc->sc_port),
6487 &isc->isc_rx_jabber_offset, isc->isc_has_offset);
6488 atomic_add_64(&isc->isc_rx_jabber.ev_count, delta);
6489
6490 /* VSI rx counters */
6491 delta = ixl_stat_delta(sc,
6492 0, I40E_GLV_RDPC(sc->sc_vsi_stat_counter_idx),
6493 &isc->isc_vsi_rx_discards_offset, isc->isc_has_offset);
6494 atomic_add_64(&isc->isc_vsi_rx_discards.ev_count, delta);
6495
6496 delta = ixl_stat_delta(sc,
6497 I40E_GLV_GORCH(sc->sc_vsi_stat_counter_idx),
6498 I40E_GLV_GORCL(sc->sc_vsi_stat_counter_idx),
6499 &isc->isc_vsi_rx_bytes_offset, isc->isc_has_offset);
6500 atomic_add_64(&isc->isc_vsi_rx_bytes.ev_count, delta);
6501
6502 delta = ixl_stat_delta(sc,
6503 I40E_GLV_UPRCH(sc->sc_vsi_stat_counter_idx),
6504 I40E_GLV_UPRCL(sc->sc_vsi_stat_counter_idx),
6505 &isc->isc_vsi_rx_unicast_offset, isc->isc_has_offset);
6506 atomic_add_64(&isc->isc_vsi_rx_unicast.ev_count, delta);
6507
6508 delta = ixl_stat_delta(sc,
6509 I40E_GLV_MPRCH(sc->sc_vsi_stat_counter_idx),
6510 I40E_GLV_MPRCL(sc->sc_vsi_stat_counter_idx),
6511 &isc->isc_vsi_rx_multicast_offset, isc->isc_has_offset);
6512 atomic_add_64(&isc->isc_vsi_rx_multicast.ev_count, delta);
6513
6514 delta = ixl_stat_delta(sc,
6515 I40E_GLV_BPRCH(sc->sc_vsi_stat_counter_idx),
6516 I40E_GLV_BPRCL(sc->sc_vsi_stat_counter_idx),
6517 &isc->isc_vsi_rx_broadcast_offset, isc->isc_has_offset);
6518 atomic_add_64(&isc->isc_vsi_rx_broadcast.ev_count, delta);
6519
6520 /* VSI tx counters */
6521 delta = ixl_stat_delta(sc,
6522 0, I40E_GLV_TEPC(sc->sc_vsi_stat_counter_idx),
6523 &isc->isc_vsi_tx_errors_offset, isc->isc_has_offset);
6524 atomic_add_64(&isc->isc_vsi_tx_errors.ev_count, delta);
6525
6526 delta = ixl_stat_delta(sc,
6527 I40E_GLV_GOTCH(sc->sc_vsi_stat_counter_idx),
6528 I40E_GLV_GOTCL(sc->sc_vsi_stat_counter_idx),
6529 &isc->isc_vsi_tx_bytes_offset, isc->isc_has_offset);
6530 atomic_add_64(&isc->isc_vsi_tx_bytes.ev_count, delta);
6531
6532 delta = ixl_stat_delta(sc,
6533 I40E_GLV_UPTCH(sc->sc_vsi_stat_counter_idx),
6534 I40E_GLV_UPTCL(sc->sc_vsi_stat_counter_idx),
6535 &isc->isc_vsi_tx_unicast_offset, isc->isc_has_offset);
6536 atomic_add_64(&isc->isc_vsi_tx_unicast.ev_count, delta);
6537
6538 delta = ixl_stat_delta(sc,
6539 I40E_GLV_MPTCH(sc->sc_vsi_stat_counter_idx),
6540 I40E_GLV_MPTCL(sc->sc_vsi_stat_counter_idx),
6541 &isc->isc_vsi_tx_multicast_offset, isc->isc_has_offset);
6542 atomic_add_64(&isc->isc_vsi_tx_multicast.ev_count, delta);
6543
6544 delta = ixl_stat_delta(sc,
6545 I40E_GLV_BPTCH(sc->sc_vsi_stat_counter_idx),
6546 I40E_GLV_BPTCL(sc->sc_vsi_stat_counter_idx),
6547 &isc->isc_vsi_tx_broadcast_offset, isc->isc_has_offset);
6548 atomic_add_64(&isc->isc_vsi_tx_broadcast.ev_count, delta);
6549 }
6550
6551 static int
6552 ixl_setup_sysctls(struct ixl_softc *sc)
6553 {
6554 const char *devname;
6555 struct sysctllog **log;
6556 const struct sysctlnode *rnode, *rxnode, *txnode;
6557 int error;
6558
6559 log = &sc->sc_sysctllog;
6560 devname = device_xname(sc->sc_dev);
6561
6562 error = sysctl_createv(log, 0, NULL, &rnode,
6563 0, CTLTYPE_NODE, devname,
6564 SYSCTL_DESCR("ixl information and settings"),
6565 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
6566 if (error)
6567 goto out;
6568
6569 error = sysctl_createv(log, 0, &rnode, NULL,
6570 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
6571 SYSCTL_DESCR("Use workqueue for packet processing"),
6572 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
6573 if (error)
6574 goto out;
6575
6576 error = sysctl_createv(log, 0, &rnode, NULL,
6577 CTLFLAG_READONLY, CTLTYPE_INT, "stats_interval",
6578 SYSCTL_DESCR("Statistics collection interval in milliseconds"),
6579 NULL, 0, &sc->sc_stats_intval, 0, CTL_CREATE, CTL_EOL);
6580
6581 error = sysctl_createv(log, 0, &rnode, &rxnode,
6582 0, CTLTYPE_NODE, "rx",
6583 SYSCTL_DESCR("ixl information and settings for Rx"),
6584 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6585 if (error)
6586 goto out;
6587
6588 error = sysctl_createv(log, 0, &rxnode, NULL,
6589 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6590 SYSCTL_DESCR("max number of Rx packets"
6591 " to process for interrupt processing"),
6592 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6593 if (error)
6594 goto out;
6595
6596 error = sysctl_createv(log, 0, &rxnode, NULL,
6597 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6598 SYSCTL_DESCR("max number of Rx packets"
6599 " to process for deferred processing"),
6600 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
6601 if (error)
6602 goto out;
6603
6604 error = sysctl_createv(log, 0, &rnode, &txnode,
6605 0, CTLTYPE_NODE, "tx",
6606 SYSCTL_DESCR("ixl information and settings for Tx"),
6607 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6608 if (error)
6609 goto out;
6610
6611 error = sysctl_createv(log, 0, &txnode, NULL,
6612 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6613 SYSCTL_DESCR("max number of Tx packets"
6614 " to process for interrupt processing"),
6615 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6616 if (error)
6617 goto out;
6618
6619 error = sysctl_createv(log, 0, &txnode, NULL,
6620 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6621 SYSCTL_DESCR("max number of Tx packets"
6622 " to process for deferred processing"),
6623 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
6624 if (error)
6625 goto out;
6626
6627 out:
6628 if (error) {
6629 aprint_error_dev(sc->sc_dev,
6630 "unable to create sysctl node\n");
6631 sysctl_teardown(log);
6632 }
6633
6634 return error;
6635 }
6636
6637 static void
6638 ixl_teardown_sysctls(struct ixl_softc *sc)
6639 {
6640
6641 sysctl_teardown(&sc->sc_sysctllog);
6642 }
6643
6644 static struct workqueue *
6645 ixl_workq_create(const char *name, pri_t prio, int ipl, int flags)
6646 {
6647 struct workqueue *wq;
6648 int error;
6649
6650 error = workqueue_create(&wq, name, ixl_workq_work, NULL,
6651 prio, ipl, flags);
6652
6653 if (error)
6654 return NULL;
6655
6656 return wq;
6657 }
6658
6659 static void
6660 ixl_workq_destroy(struct workqueue *wq)
6661 {
6662
6663 workqueue_destroy(wq);
6664 }
6665
6666 static void
6667 ixl_work_set(struct ixl_work *work, void (*func)(void *), void *arg)
6668 {
6669
6670 memset(work, 0, sizeof(*work));
6671 work->ixw_func = func;
6672 work->ixw_arg = arg;
6673 }
6674
6675 static void
6676 ixl_work_add(struct workqueue *wq, struct ixl_work *work)
6677 {
6678 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0)
6679 return;
6680
6681 kpreempt_disable();
6682 workqueue_enqueue(wq, &work->ixw_cookie, NULL);
6683 kpreempt_enable();
6684 }
6685
6686 static void
6687 ixl_work_wait(struct workqueue *wq, struct ixl_work *work)
6688 {
6689
6690 workqueue_wait(wq, &work->ixw_cookie);
6691 }
6692
6693 static void
6694 ixl_workq_work(struct work *wk, void *context)
6695 {
6696 struct ixl_work *work;
6697
6698 work = container_of(wk, struct ixl_work, ixw_cookie);
6699
6700 atomic_swap_uint(&work->ixw_added, 0);
6701 work->ixw_func(work->ixw_arg);
6702 }
6703
6704 static int
6705 ixl_rx_ctl_read(struct ixl_softc *sc, uint32_t reg, uint32_t *rv)
6706 {
6707 struct ixl_aq_desc iaq;
6708
6709 memset(&iaq, 0, sizeof(iaq));
6710 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_READ);
6711 iaq.iaq_param[1] = htole32(reg);
6712
6713 if (ixl_atq_poll(sc, &iaq, 250) != 0)
6714 return ETIMEDOUT;
6715
6716 switch (htole16(iaq.iaq_retval)) {
6717 case IXL_AQ_RC_OK:
6718 /* success */
6719 break;
6720 case IXL_AQ_RC_EACCES:
6721 return EPERM;
6722 case IXL_AQ_RC_EAGAIN:
6723 return EAGAIN;
6724 default:
6725 return EIO;
6726 }
6727
6728 *rv = htole32(iaq.iaq_param[3]);
6729 return 0;
6730 }
6731
6732 static uint32_t
6733 ixl_rd_rx_csr(struct ixl_softc *sc, uint32_t reg)
6734 {
6735 uint32_t val;
6736 int rv, retry, retry_limit;
6737
6738 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) {
6739 retry_limit = 5;
6740 } else {
6741 retry_limit = 0;
6742 }
6743
6744 for (retry = 0; retry < retry_limit; retry++) {
6745 rv = ixl_rx_ctl_read(sc, reg, &val);
6746 if (rv == 0)
6747 return val;
6748 else if (rv == EAGAIN)
6749 delaymsec(1);
6750 else
6751 break;
6752 }
6753
6754 val = ixl_rd(sc, reg);
6755
6756 return val;
6757 }
6758
6759 static int
6760 ixl_rx_ctl_write(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6761 {
6762 struct ixl_aq_desc iaq;
6763
6764 memset(&iaq, 0, sizeof(iaq));
6765 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_WRITE);
6766 iaq.iaq_param[1] = htole32(reg);
6767 iaq.iaq_param[3] = htole32(value);
6768
6769 if (ixl_atq_poll(sc, &iaq, 250) != 0)
6770 return ETIMEDOUT;
6771
6772 switch (htole16(iaq.iaq_retval)) {
6773 case IXL_AQ_RC_OK:
6774 /* success */
6775 break;
6776 case IXL_AQ_RC_EACCES:
6777 return EPERM;
6778 case IXL_AQ_RC_EAGAIN:
6779 return EAGAIN;
6780 default:
6781 return EIO;
6782 }
6783
6784 return 0;
6785 }
6786
6787 static void
6788 ixl_wr_rx_csr(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6789 {
6790 int rv, retry, retry_limit;
6791
6792 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) {
6793 retry_limit = 5;
6794 } else {
6795 retry_limit = 0;
6796 }
6797
6798 for (retry = 0; retry < retry_limit; retry++) {
6799 rv = ixl_rx_ctl_write(sc, reg, value);
6800 if (rv == 0)
6801 return;
6802 else if (rv == EAGAIN)
6803 delaymsec(1);
6804 else
6805 break;
6806 }
6807
6808 ixl_wr(sc, reg, value);
6809 }
6810
6811 static int
6812 ixl_nvm_lock(struct ixl_softc *sc, char rw)
6813 {
6814 struct ixl_aq_desc iaq;
6815 struct ixl_aq_req_resource_param *param;
6816 int rv;
6817
6818 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK))
6819 return 0;
6820
6821 memset(&iaq, 0, sizeof(iaq));
6822 iaq.iaq_opcode = htole16(IXL_AQ_OP_REQUEST_RESOURCE);
6823
6824 param = (struct ixl_aq_req_resource_param *)&iaq.iaq_param;
6825 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM);
6826 if (rw == 'R') {
6827 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_READ);
6828 } else {
6829 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_WRITE);
6830 }
6831
6832 rv = ixl_atq_poll(sc, &iaq, 250);
6833
6834 if (rv != 0)
6835 return ETIMEDOUT;
6836
6837 switch (le16toh(iaq.iaq_retval)) {
6838 case IXL_AQ_RC_OK:
6839 break;
6840 case IXL_AQ_RC_EACCES:
6841 return EACCES;
6842 case IXL_AQ_RC_EBUSY:
6843 return EBUSY;
6844 case IXL_AQ_RC_EPERM:
6845 return EPERM;
6846 }
6847
6848 return 0;
6849 }
6850
6851 static int
6852 ixl_nvm_unlock(struct ixl_softc *sc)
6853 {
6854 struct ixl_aq_desc iaq;
6855 struct ixl_aq_rel_resource_param *param;
6856 int rv;
6857
6858 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK))
6859 return 0;
6860
6861 memset(&iaq, 0, sizeof(iaq));
6862 iaq.iaq_opcode = htole16(IXL_AQ_OP_RELEASE_RESOURCE);
6863
6864 param = (struct ixl_aq_rel_resource_param *)&iaq.iaq_param;
6865 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM);
6866
6867 rv = ixl_atq_poll(sc, &iaq, 250);
6868
6869 if (rv != 0)
6870 return ETIMEDOUT;
6871
6872 switch (le16toh(iaq.iaq_retval)) {
6873 case IXL_AQ_RC_OK:
6874 break;
6875 default:
6876 return EIO;
6877 }
6878 return 0;
6879 }
6880
6881 static int
6882 ixl_srdone_poll(struct ixl_softc *sc)
6883 {
6884 int wait_count;
6885 uint32_t reg;
6886
6887 for (wait_count = 0; wait_count < IXL_SRRD_SRCTL_ATTEMPTS;
6888 wait_count++) {
6889 reg = ixl_rd(sc, I40E_GLNVM_SRCTL);
6890 if (ISSET(reg, I40E_GLNVM_SRCTL_DONE_MASK))
6891 break;
6892
6893 delaymsec(5);
6894 }
6895
6896 if (wait_count == IXL_SRRD_SRCTL_ATTEMPTS)
6897 return -1;
6898
6899 return 0;
6900 }
6901
6902 static int
6903 ixl_nvm_read_srctl(struct ixl_softc *sc, uint16_t offset, uint16_t *data)
6904 {
6905 uint32_t reg;
6906
6907 if (ixl_srdone_poll(sc) != 0)
6908 return ETIMEDOUT;
6909
6910 reg = ((uint32_t)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
6911 __BIT(I40E_GLNVM_SRCTL_START_SHIFT);
6912 ixl_wr(sc, I40E_GLNVM_SRCTL, reg);
6913
6914 if (ixl_srdone_poll(sc) != 0) {
6915 aprint_debug("NVM read error: couldn't access "
6916 "Shadow RAM address: 0x%x\n", offset);
6917 return ETIMEDOUT;
6918 }
6919
6920 reg = ixl_rd(sc, I40E_GLNVM_SRDATA);
6921 *data = (uint16_t)__SHIFTOUT(reg, I40E_GLNVM_SRDATA_RDDATA_MASK);
6922
6923 return 0;
6924 }
6925
6926 static int
6927 ixl_nvm_read_aq(struct ixl_softc *sc, uint16_t offset_word,
6928 void *data, size_t len)
6929 {
6930 struct ixl_dmamem *idm;
6931 struct ixl_aq_desc iaq;
6932 struct ixl_aq_nvm_param *param;
6933 uint32_t offset_bytes;
6934 int rv;
6935
6936 idm = &sc->sc_aqbuf;
6937 if (len > IXL_DMA_LEN(idm))
6938 return ENOMEM;
6939
6940 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
6941 memset(&iaq, 0, sizeof(iaq));
6942 iaq.iaq_opcode = htole16(IXL_AQ_OP_NVM_READ);
6943 iaq.iaq_flags = htole16(IXL_AQ_BUF |
6944 ((len > I40E_AQ_LARGE_BUF) ? IXL_AQ_LB : 0));
6945 iaq.iaq_datalen = htole16(len);
6946 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
6947
6948 param = (struct ixl_aq_nvm_param *)iaq.iaq_param;
6949 param->command_flags = IXL_AQ_NVM_LAST_CMD;
6950 param->module_pointer = 0;
6951 param->length = htole16(len);
6952 offset_bytes = (uint32_t)offset_word * 2;
6953 offset_bytes &= 0x00FFFFFF;
6954 param->offset = htole32(offset_bytes);
6955
6956 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
6957 BUS_DMASYNC_PREREAD);
6958
6959 rv = ixl_atq_poll(sc, &iaq, 250);
6960
6961 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
6962 BUS_DMASYNC_POSTREAD);
6963
6964 if (rv != 0) {
6965 return ETIMEDOUT;
6966 }
6967
6968 switch (le16toh(iaq.iaq_retval)) {
6969 case IXL_AQ_RC_OK:
6970 break;
6971 case IXL_AQ_RC_EPERM:
6972 return EPERM;
6973 case IXL_AQ_RC_EINVAL:
6974 return EINVAL;
6975 case IXL_AQ_RC_EBUSY:
6976 return EBUSY;
6977 case IXL_AQ_RC_EIO:
6978 default:
6979 return EIO;
6980 }
6981
6982 memcpy(data, IXL_DMA_KVA(idm), len);
6983
6984 return 0;
6985 }
6986
6987 static int
6988 ixl_rd16_nvm(struct ixl_softc *sc, uint16_t offset, uint16_t *data)
6989 {
6990 int error;
6991 uint16_t buf;
6992
6993 error = ixl_nvm_lock(sc, 'R');
6994 if (error)
6995 return error;
6996
6997 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMREAD)) {
6998 error = ixl_nvm_read_aq(sc, offset,
6999 &buf, sizeof(buf));
7000 if (error == 0)
7001 *data = le16toh(buf);
7002 } else {
7003 error = ixl_nvm_read_srctl(sc, offset, &buf);
7004 if (error == 0)
7005 *data = buf;
7006 }
7007
7008 ixl_nvm_unlock(sc);
7009
7010 return error;
7011 }
7012
7013 MODULE(MODULE_CLASS_DRIVER, if_ixl, "pci");
7014
7015 #ifdef _MODULE
7016 #include "ioconf.c"
7017 #endif
7018
7019 #ifdef _MODULE
7020 static void
7021 ixl_parse_modprop(prop_dictionary_t dict)
7022 {
7023 prop_object_t obj;
7024 int64_t val;
7025 uint64_t uval;
7026
7027 if (dict == NULL)
7028 return;
7029
7030 obj = prop_dictionary_get(dict, "nomsix");
7031 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_BOOL) {
7032 ixl_param_nomsix = prop_bool_true((prop_bool_t)obj);
7033 }
7034
7035 obj = prop_dictionary_get(dict, "stats_interval");
7036 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7037 val = prop_number_integer_value((prop_number_t)obj);
7038
7039 /* the range has no reason */
7040 if (100 < val && val < 180000) {
7041 ixl_param_stats_interval = val;
7042 }
7043 }
7044
7045 obj = prop_dictionary_get(dict, "nqps_limit");
7046 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7047 val = prop_number_integer_value((prop_number_t)obj);
7048
7049 if (val <= INT32_MAX)
7050 ixl_param_nqps_limit = val;
7051 }
7052
7053 obj = prop_dictionary_get(dict, "rx_ndescs");
7054 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7055 uval = prop_number_unsigned_integer_value((prop_number_t)obj);
7056
7057 if (uval > 8)
7058 ixl_param_rx_ndescs = uval;
7059 }
7060
7061 obj = prop_dictionary_get(dict, "tx_ndescs");
7062 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7063 uval = prop_number_unsigned_integer_value((prop_number_t)obj);
7064
7065 if (uval > IXL_TX_PKT_DESCS)
7066 ixl_param_tx_ndescs = uval;
7067 }
7068
7069 }
7070 #endif
7071
7072 static int
7073 if_ixl_modcmd(modcmd_t cmd, void *opaque)
7074 {
7075 int error = 0;
7076
7077 #ifdef _MODULE
7078 switch (cmd) {
7079 case MODULE_CMD_INIT:
7080 ixl_parse_modprop((prop_dictionary_t)opaque);
7081 error = config_init_component(cfdriver_ioconf_if_ixl,
7082 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
7083 break;
7084 case MODULE_CMD_FINI:
7085 error = config_fini_component(cfdriver_ioconf_if_ixl,
7086 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
7087 break;
7088 default:
7089 error = ENOTTY;
7090 break;
7091 }
7092 #endif
7093
7094 return error;
7095 }
7096