if_ixl.c revision 1.51 1 /* $NetBSD: if_ixl.c,v 1.51 2020/02/25 07:45:28 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2016,2017 David Gwynne <dlg (at) openbsd.org>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50 /*
51 * Copyright (c) 2019 Internet Initiative Japan, Inc.
52 * All rights reserved.
53 *
54 * Redistribution and use in source and binary forms, with or without
55 * modification, are permitted provided that the following conditions
56 * are met:
57 * 1. Redistributions of source code must retain the above copyright
58 * notice, this list of conditions and the following disclaimer.
59 * 2. Redistributions in binary form must reproduce the above copyright
60 * notice, this list of conditions and the following disclaimer in the
61 * documentation and/or other materials provided with the distribution.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
73 * POSSIBILITY OF SUCH DAMAGE.
74 */
75
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: if_ixl.c,v 1.51 2020/02/25 07:45:28 yamaguchi Exp $");
78
79 #ifdef _KERNEL_OPT
80 #include "opt_net_mpsafe.h"
81 #include "opt_if_ixl.h"
82 #endif
83
84 #include <sys/param.h>
85 #include <sys/types.h>
86
87 #include <sys/cpu.h>
88 #include <sys/device.h>
89 #include <sys/evcnt.h>
90 #include <sys/interrupt.h>
91 #include <sys/kmem.h>
92 #include <sys/malloc.h>
93 #include <sys/module.h>
94 #include <sys/mutex.h>
95 #include <sys/pcq.h>
96 #include <sys/syslog.h>
97 #include <sys/workqueue.h>
98
99 #include <sys/bus.h>
100
101 #include <net/bpf.h>
102 #include <net/if.h>
103 #include <net/if_dl.h>
104 #include <net/if_media.h>
105 #include <net/if_ether.h>
106 #include <net/rss_config.h>
107
108 #include <netinet/tcp.h> /* for struct tcphdr */
109 #include <netinet/udp.h> /* for struct udphdr */
110
111 #include <dev/pci/pcivar.h>
112 #include <dev/pci/pcidevs.h>
113
114 #include <dev/pci/if_ixlreg.h>
115 #include <dev/pci/if_ixlvar.h>
116
117 #include <prop/proplib.h>
118
119 struct ixl_softc; /* defined */
120
121 #define I40E_PF_RESET_WAIT_COUNT 200
122 #define I40E_AQ_LARGE_BUF 512
123
124 /* bitfields for Tx queue mapping in QTX_CTL */
125 #define I40E_QTX_CTL_VF_QUEUE 0x0
126 #define I40E_QTX_CTL_VM_QUEUE 0x1
127 #define I40E_QTX_CTL_PF_QUEUE 0x2
128
129 #define I40E_QUEUE_TYPE_EOL 0x7ff
130 #define I40E_INTR_NOTX_QUEUE 0
131
132 #define I40E_QUEUE_TYPE_RX 0x0
133 #define I40E_QUEUE_TYPE_TX 0x1
134 #define I40E_QUEUE_TYPE_PE_CEQ 0x2
135 #define I40E_QUEUE_TYPE_UNKNOWN 0x3
136
137 #define I40E_ITR_INDEX_RX 0x0
138 #define I40E_ITR_INDEX_TX 0x1
139 #define I40E_ITR_INDEX_OTHER 0x2
140 #define I40E_ITR_INDEX_NONE 0x3
141
142 #define I40E_INTR_NOTX_QUEUE 0
143 #define I40E_INTR_NOTX_INTR 0
144 #define I40E_INTR_NOTX_RX_QUEUE 0
145 #define I40E_INTR_NOTX_TX_QUEUE 1
146 #define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK
147 #define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK
148
149 #define BIT_ULL(a) (1ULL << (a))
150 #define IXL_RSS_HENA_DEFAULT_BASE \
151 (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
152 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
153 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
154 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
155 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
156 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
157 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
158 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
159 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
160 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
161 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
162 #define IXL_RSS_HENA_DEFAULT_XL710 IXL_RSS_HENA_DEFAULT_BASE
163 #define IXL_RSS_HENA_DEFAULT_X722 (IXL_RSS_HENA_DEFAULT_XL710 | \
164 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
165 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
166 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
167 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
168 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
169 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
170 #define I40E_HASH_LUT_SIZE_128 0
171 #define IXL_RSS_KEY_SIZE_REG 13
172
173 #define IXL_ICR0_CRIT_ERR_MASK \
174 (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \
175 I40E_PFINT_ICR0_ECC_ERR_MASK | \
176 I40E_PFINT_ICR0_PE_CRITERR_MASK)
177
178 #define IXL_QUEUE_MAX_XL710 64
179 #define IXL_QUEUE_MAX_X722 128
180
181 #define IXL_TX_PKT_DESCS 8
182 #define IXL_TX_PKT_MAXSIZE (MCLBYTES * IXL_TX_PKT_DESCS)
183 #define IXL_TX_QUEUE_ALIGN 128
184 #define IXL_RX_QUEUE_ALIGN 128
185
186 #define IXL_MCLBYTES (MCLBYTES - ETHER_ALIGN)
187 #define IXL_MTU_ETHERLEN ETHER_HDR_LEN \
188 + ETHER_CRC_LEN
189 #if 0
190 #define IXL_MAX_MTU (9728 - IXL_MTU_ETHERLEN)
191 #else
192 /* (dbuff * 5) - ETHER_HDR_LEN - ETHER_CRC_LEN */
193 #define IXL_MAX_MTU (9600 - IXL_MTU_ETHERLEN)
194 #endif
195 #define IXL_MIN_MTU (ETHER_MIN_LEN - ETHER_CRC_LEN)
196
197 #define IXL_PCIREG PCI_MAPREG_START
198
199 #define IXL_ITR0 0x0
200 #define IXL_ITR1 0x1
201 #define IXL_ITR2 0x2
202 #define IXL_NOITR 0x3
203
204 #define IXL_AQ_NUM 256
205 #define IXL_AQ_MASK (IXL_AQ_NUM - 1)
206 #define IXL_AQ_ALIGN 64 /* lol */
207 #define IXL_AQ_BUFLEN 4096
208
209 #define IXL_HMC_ROUNDUP 512
210 #define IXL_HMC_PGSIZE 4096
211 #define IXL_HMC_DVASZ sizeof(uint64_t)
212 #define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
213 #define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS)
214 #define IXL_HMC_PDVALID 1ULL
215
216 #define IXL_ATQ_EXEC_TIMEOUT (10 * hz)
217
218 #define IXL_SRRD_SRCTL_ATTEMPTS 100000
219
220 struct ixl_aq_regs {
221 bus_size_t atq_tail;
222 bus_size_t atq_head;
223 bus_size_t atq_len;
224 bus_size_t atq_bal;
225 bus_size_t atq_bah;
226
227 bus_size_t arq_tail;
228 bus_size_t arq_head;
229 bus_size_t arq_len;
230 bus_size_t arq_bal;
231 bus_size_t arq_bah;
232
233 uint32_t atq_len_enable;
234 uint32_t atq_tail_mask;
235 uint32_t atq_head_mask;
236
237 uint32_t arq_len_enable;
238 uint32_t arq_tail_mask;
239 uint32_t arq_head_mask;
240 };
241
242 struct ixl_phy_type {
243 uint64_t phy_type;
244 uint64_t ifm_type;
245 };
246
247 struct ixl_speed_type {
248 uint8_t dev_speed;
249 uint64_t net_speed;
250 };
251
252 struct ixl_aq_buf {
253 SIMPLEQ_ENTRY(ixl_aq_buf)
254 aqb_entry;
255 void *aqb_data;
256 bus_dmamap_t aqb_map;
257 bus_dma_segment_t aqb_seg;
258 size_t aqb_size;
259 int aqb_nsegs;
260 };
261 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf);
262
263 struct ixl_dmamem {
264 bus_dmamap_t ixm_map;
265 bus_dma_segment_t ixm_seg;
266 int ixm_nsegs;
267 size_t ixm_size;
268 void *ixm_kva;
269 };
270
271 #define IXL_DMA_MAP(_ixm) ((_ixm)->ixm_map)
272 #define IXL_DMA_DVA(_ixm) ((_ixm)->ixm_map->dm_segs[0].ds_addr)
273 #define IXL_DMA_KVA(_ixm) ((void *)(_ixm)->ixm_kva)
274 #define IXL_DMA_LEN(_ixm) ((_ixm)->ixm_size)
275
276 struct ixl_hmc_entry {
277 uint64_t hmc_base;
278 uint32_t hmc_count;
279 uint64_t hmc_size;
280 };
281
282 enum ixl_hmc_types {
283 IXL_HMC_LAN_TX = 0,
284 IXL_HMC_LAN_RX,
285 IXL_HMC_FCOE_CTX,
286 IXL_HMC_FCOE_FILTER,
287 IXL_HMC_COUNT
288 };
289
290 struct ixl_hmc_pack {
291 uint16_t offset;
292 uint16_t width;
293 uint16_t lsb;
294 };
295
296 /*
297 * these hmc objects have weird sizes and alignments, so these are abstract
298 * representations of them that are nice for c to populate.
299 *
300 * the packing code relies on little-endian values being stored in the fields,
301 * no high bits in the fields being set, and the fields must be packed in the
302 * same order as they are in the ctx structure.
303 */
304
305 struct ixl_hmc_rxq {
306 uint16_t head;
307 uint8_t cpuid;
308 uint64_t base;
309 #define IXL_HMC_RXQ_BASE_UNIT 128
310 uint16_t qlen;
311 uint16_t dbuff;
312 #define IXL_HMC_RXQ_DBUFF_UNIT 128
313 uint8_t hbuff;
314 #define IXL_HMC_RXQ_HBUFF_UNIT 64
315 uint8_t dtype;
316 #define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0
317 #define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1
318 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2
319 uint8_t dsize;
320 #define IXL_HMC_RXQ_DSIZE_16 0
321 #define IXL_HMC_RXQ_DSIZE_32 1
322 uint8_t crcstrip;
323 uint8_t fc_ena;
324 uint8_t l2sel;
325 uint8_t hsplit_0;
326 uint8_t hsplit_1;
327 uint8_t showiv;
328 uint16_t rxmax;
329 uint8_t tphrdesc_ena;
330 uint8_t tphwdesc_ena;
331 uint8_t tphdata_ena;
332 uint8_t tphhead_ena;
333 uint8_t lrxqthresh;
334 uint8_t prefena;
335 };
336
337 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
338 { offsetof(struct ixl_hmc_rxq, head), 13, 0 },
339 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 },
340 { offsetof(struct ixl_hmc_rxq, base), 57, 32 },
341 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 },
342 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 },
343 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 },
344 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 },
345 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 },
346 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 },
347 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 },
348 { offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 },
349 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 },
350 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 },
351 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 },
352 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 },
353 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 },
354 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 },
355 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 },
356 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 },
357 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 },
358 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 },
359 };
360
361 #define IXL_HMC_RXQ_MINSIZE (201 + 1)
362
363 struct ixl_hmc_txq {
364 uint16_t head;
365 uint8_t new_context;
366 uint64_t base;
367 #define IXL_HMC_TXQ_BASE_UNIT 128
368 uint8_t fc_ena;
369 uint8_t timesync_ena;
370 uint8_t fd_ena;
371 uint8_t alt_vlan_ena;
372 uint8_t cpuid;
373 uint16_t thead_wb;
374 uint8_t head_wb_ena;
375 #define IXL_HMC_TXQ_DESC_WB 0
376 #define IXL_HMC_TXQ_HEAD_WB 1
377 uint16_t qlen;
378 uint8_t tphrdesc_ena;
379 uint8_t tphrpacket_ena;
380 uint8_t tphwdesc_ena;
381 uint64_t head_wb_addr;
382 uint32_t crc;
383 uint16_t rdylist;
384 uint8_t rdylist_act;
385 };
386
387 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
388 { offsetof(struct ixl_hmc_txq, head), 13, 0 },
389 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 },
390 { offsetof(struct ixl_hmc_txq, base), 57, 32 },
391 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 },
392 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 },
393 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 },
394 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 },
395 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 },
396 /* line 1 */
397 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 },
398 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 },
399 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 },
400 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 },
401 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 },
402 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 },
403 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 },
404 /* line 7 */
405 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) },
406 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) },
407 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) },
408 };
409
410 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
411
412 struct ixl_work {
413 struct work ixw_cookie;
414 void (*ixw_func)(void *);
415 void *ixw_arg;
416 unsigned int ixw_added;
417 };
418 #define IXL_WORKQUEUE_PRI PRI_SOFTNET
419
420 struct ixl_tx_map {
421 struct mbuf *txm_m;
422 bus_dmamap_t txm_map;
423 unsigned int txm_eop;
424 };
425
426 struct ixl_tx_ring {
427 kmutex_t txr_lock;
428 struct ixl_softc *txr_sc;
429
430 unsigned int txr_prod;
431 unsigned int txr_cons;
432
433 struct ixl_tx_map *txr_maps;
434 struct ixl_dmamem txr_mem;
435
436 bus_size_t txr_tail;
437 unsigned int txr_qid;
438 pcq_t *txr_intrq;
439 void *txr_si;
440
441 struct evcnt txr_defragged;
442 struct evcnt txr_defrag_failed;
443 struct evcnt txr_pcqdrop;
444 struct evcnt txr_transmitdef;
445 struct evcnt txr_intr;
446 struct evcnt txr_defer;
447 };
448
449 struct ixl_rx_map {
450 struct mbuf *rxm_m;
451 bus_dmamap_t rxm_map;
452 };
453
454 struct ixl_rx_ring {
455 kmutex_t rxr_lock;
456
457 unsigned int rxr_prod;
458 unsigned int rxr_cons;
459
460 struct ixl_rx_map *rxr_maps;
461 struct ixl_dmamem rxr_mem;
462
463 struct mbuf *rxr_m_head;
464 struct mbuf **rxr_m_tail;
465
466 bus_size_t rxr_tail;
467 unsigned int rxr_qid;
468
469 struct evcnt rxr_mgethdr_failed;
470 struct evcnt rxr_mgetcl_failed;
471 struct evcnt rxr_mbuf_load_failed;
472 struct evcnt rxr_intr;
473 struct evcnt rxr_defer;
474 };
475
476 struct ixl_queue_pair {
477 struct ixl_softc *qp_sc;
478 struct ixl_tx_ring *qp_txr;
479 struct ixl_rx_ring *qp_rxr;
480
481 char qp_name[16];
482
483 void *qp_si;
484 struct work qp_work;
485 bool qp_workqueue;
486 };
487
488 struct ixl_atq {
489 struct ixl_aq_desc iatq_desc;
490 void (*iatq_fn)(struct ixl_softc *,
491 const struct ixl_aq_desc *);
492 };
493 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
494
495 struct ixl_product {
496 unsigned int vendor_id;
497 unsigned int product_id;
498 };
499
500 struct ixl_stats_counters {
501 bool isc_has_offset;
502 struct evcnt isc_crc_errors;
503 uint64_t isc_crc_errors_offset;
504 struct evcnt isc_illegal_bytes;
505 uint64_t isc_illegal_bytes_offset;
506 struct evcnt isc_rx_bytes;
507 uint64_t isc_rx_bytes_offset;
508 struct evcnt isc_rx_discards;
509 uint64_t isc_rx_discards_offset;
510 struct evcnt isc_rx_unicast;
511 uint64_t isc_rx_unicast_offset;
512 struct evcnt isc_rx_multicast;
513 uint64_t isc_rx_multicast_offset;
514 struct evcnt isc_rx_broadcast;
515 uint64_t isc_rx_broadcast_offset;
516 struct evcnt isc_rx_size_64;
517 uint64_t isc_rx_size_64_offset;
518 struct evcnt isc_rx_size_127;
519 uint64_t isc_rx_size_127_offset;
520 struct evcnt isc_rx_size_255;
521 uint64_t isc_rx_size_255_offset;
522 struct evcnt isc_rx_size_511;
523 uint64_t isc_rx_size_511_offset;
524 struct evcnt isc_rx_size_1023;
525 uint64_t isc_rx_size_1023_offset;
526 struct evcnt isc_rx_size_1522;
527 uint64_t isc_rx_size_1522_offset;
528 struct evcnt isc_rx_size_big;
529 uint64_t isc_rx_size_big_offset;
530 struct evcnt isc_rx_undersize;
531 uint64_t isc_rx_undersize_offset;
532 struct evcnt isc_rx_oversize;
533 uint64_t isc_rx_oversize_offset;
534 struct evcnt isc_rx_fragments;
535 uint64_t isc_rx_fragments_offset;
536 struct evcnt isc_rx_jabber;
537 uint64_t isc_rx_jabber_offset;
538 struct evcnt isc_tx_bytes;
539 uint64_t isc_tx_bytes_offset;
540 struct evcnt isc_tx_dropped_link_down;
541 uint64_t isc_tx_dropped_link_down_offset;
542 struct evcnt isc_tx_unicast;
543 uint64_t isc_tx_unicast_offset;
544 struct evcnt isc_tx_multicast;
545 uint64_t isc_tx_multicast_offset;
546 struct evcnt isc_tx_broadcast;
547 uint64_t isc_tx_broadcast_offset;
548 struct evcnt isc_tx_size_64;
549 uint64_t isc_tx_size_64_offset;
550 struct evcnt isc_tx_size_127;
551 uint64_t isc_tx_size_127_offset;
552 struct evcnt isc_tx_size_255;
553 uint64_t isc_tx_size_255_offset;
554 struct evcnt isc_tx_size_511;
555 uint64_t isc_tx_size_511_offset;
556 struct evcnt isc_tx_size_1023;
557 uint64_t isc_tx_size_1023_offset;
558 struct evcnt isc_tx_size_1522;
559 uint64_t isc_tx_size_1522_offset;
560 struct evcnt isc_tx_size_big;
561 uint64_t isc_tx_size_big_offset;
562 struct evcnt isc_mac_local_faults;
563 uint64_t isc_mac_local_faults_offset;
564 struct evcnt isc_mac_remote_faults;
565 uint64_t isc_mac_remote_faults_offset;
566 struct evcnt isc_link_xon_rx;
567 uint64_t isc_link_xon_rx_offset;
568 struct evcnt isc_link_xon_tx;
569 uint64_t isc_link_xon_tx_offset;
570 struct evcnt isc_link_xoff_rx;
571 uint64_t isc_link_xoff_rx_offset;
572 struct evcnt isc_link_xoff_tx;
573 uint64_t isc_link_xoff_tx_offset;
574 struct evcnt isc_vsi_rx_discards;
575 uint64_t isc_vsi_rx_discards_offset;
576 struct evcnt isc_vsi_rx_bytes;
577 uint64_t isc_vsi_rx_bytes_offset;
578 struct evcnt isc_vsi_rx_unicast;
579 uint64_t isc_vsi_rx_unicast_offset;
580 struct evcnt isc_vsi_rx_multicast;
581 uint64_t isc_vsi_rx_multicast_offset;
582 struct evcnt isc_vsi_rx_broadcast;
583 uint64_t isc_vsi_rx_broadcast_offset;
584 struct evcnt isc_vsi_tx_errors;
585 uint64_t isc_vsi_tx_errors_offset;
586 struct evcnt isc_vsi_tx_bytes;
587 uint64_t isc_vsi_tx_bytes_offset;
588 struct evcnt isc_vsi_tx_unicast;
589 uint64_t isc_vsi_tx_unicast_offset;
590 struct evcnt isc_vsi_tx_multicast;
591 uint64_t isc_vsi_tx_multicast_offset;
592 struct evcnt isc_vsi_tx_broadcast;
593 uint64_t isc_vsi_tx_broadcast_offset;
594 };
595
596 /*
597 * Locking notes:
598 * + a field in ixl_tx_ring is protected by txr_lock (a spin mutex), and
599 * a field in ixl_rx_ring is protected by rxr_lock (a spin mutex).
600 * - more than one lock of them cannot be held at once.
601 * + a field named sc_atq_* in ixl_softc is protected by sc_atq_lock
602 * (a spin mutex).
603 * - the lock cannot held with txr_lock or rxr_lock.
604 * + a field named sc_arq_* is not protected by any lock.
605 * - operations for sc_arq_* is done in one context related to
606 * sc_arq_task.
607 * + other fields in ixl_softc is protected by sc_cfg_lock
608 * (an adaptive mutex)
609 * - It must be held before another lock is held, and It can be
610 * released after the other lock is released.
611 * */
612
613 struct ixl_softc {
614 device_t sc_dev;
615 struct ethercom sc_ec;
616 bool sc_attached;
617 bool sc_dead;
618 uint32_t sc_port;
619 struct sysctllog *sc_sysctllog;
620 struct workqueue *sc_workq;
621 struct workqueue *sc_workq_txrx;
622 int sc_stats_intval;
623 callout_t sc_stats_callout;
624 struct ixl_work sc_stats_task;
625 struct ixl_stats_counters
626 sc_stats_counters;
627 uint8_t sc_enaddr[ETHER_ADDR_LEN];
628 struct ifmedia sc_media;
629 uint64_t sc_media_status;
630 uint64_t sc_media_active;
631 uint64_t sc_phy_types;
632 uint8_t sc_phy_abilities;
633 uint8_t sc_phy_linkspeed;
634 uint8_t sc_phy_fec_cfg;
635 uint16_t sc_eee_cap;
636 uint32_t sc_eeer_val;
637 uint8_t sc_d3_lpan;
638 kmutex_t sc_cfg_lock;
639 enum i40e_mac_type sc_mac_type;
640 uint32_t sc_rss_table_size;
641 uint32_t sc_rss_table_entry_width;
642 bool sc_txrx_workqueue;
643 u_int sc_tx_process_limit;
644 u_int sc_rx_process_limit;
645 u_int sc_tx_intr_process_limit;
646 u_int sc_rx_intr_process_limit;
647
648 int sc_cur_ec_capenable;
649
650 struct pci_attach_args sc_pa;
651 pci_intr_handle_t *sc_ihp;
652 void **sc_ihs;
653 unsigned int sc_nintrs;
654
655 bus_dma_tag_t sc_dmat;
656 bus_space_tag_t sc_memt;
657 bus_space_handle_t sc_memh;
658 bus_size_t sc_mems;
659
660 uint8_t sc_pf_id;
661 uint16_t sc_uplink_seid; /* le */
662 uint16_t sc_downlink_seid; /* le */
663 uint16_t sc_vsi_number;
664 uint16_t sc_vsi_stat_counter_idx;
665 uint16_t sc_seid;
666 unsigned int sc_base_queue;
667
668 pci_intr_type_t sc_intrtype;
669 unsigned int sc_msix_vector_queue;
670
671 struct ixl_dmamem sc_scratch;
672 struct ixl_dmamem sc_aqbuf;
673
674 const struct ixl_aq_regs *
675 sc_aq_regs;
676 uint32_t sc_aq_flags;
677 #define IXL_SC_AQ_FLAG_RXCTL __BIT(0)
678 #define IXL_SC_AQ_FLAG_NVMLOCK __BIT(1)
679 #define IXL_SC_AQ_FLAG_NVMREAD __BIT(2)
680 #define IXL_SC_AQ_FLAG_RSS __BIT(3)
681
682 kmutex_t sc_atq_lock;
683 kcondvar_t sc_atq_cv;
684 struct ixl_dmamem sc_atq;
685 unsigned int sc_atq_prod;
686 unsigned int sc_atq_cons;
687
688 struct ixl_dmamem sc_arq;
689 struct ixl_work sc_arq_task;
690 struct ixl_aq_bufs sc_arq_idle;
691 struct ixl_aq_buf *sc_arq_live[IXL_AQ_NUM];
692 unsigned int sc_arq_prod;
693 unsigned int sc_arq_cons;
694
695 struct ixl_work sc_link_state_task;
696 struct ixl_atq sc_link_state_atq;
697
698 struct ixl_dmamem sc_hmc_sd;
699 struct ixl_dmamem sc_hmc_pd;
700 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT];
701
702 unsigned int sc_tx_ring_ndescs;
703 unsigned int sc_rx_ring_ndescs;
704 unsigned int sc_nqueue_pairs;
705 unsigned int sc_nqueue_pairs_max;
706 unsigned int sc_nqueue_pairs_device;
707 struct ixl_queue_pair *sc_qps;
708
709 struct evcnt sc_event_atq;
710 struct evcnt sc_event_link;
711 struct evcnt sc_event_ecc_err;
712 struct evcnt sc_event_pci_exception;
713 struct evcnt sc_event_crit_err;
714 };
715
716 #define IXL_TXRX_PROCESS_UNLIMIT UINT_MAX
717 #define IXL_TX_PROCESS_LIMIT 256
718 #define IXL_RX_PROCESS_LIMIT 256
719 #define IXL_TX_INTR_PROCESS_LIMIT 256
720 #define IXL_RX_INTR_PROCESS_LIMIT 0U
721
722 #define IXL_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx | \
723 IFCAP_CSUM_TCPv4_Rx | \
724 IFCAP_CSUM_UDPv4_Rx | \
725 IFCAP_CSUM_TCPv6_Rx | \
726 IFCAP_CSUM_UDPv6_Rx)
727 #define IXL_IFCAP_TXCSUM (IFCAP_CSUM_IPv4_Tx | \
728 IFCAP_CSUM_TCPv4_Tx | \
729 IFCAP_CSUM_UDPv4_Tx | \
730 IFCAP_CSUM_TCPv6_Tx | \
731 IFCAP_CSUM_UDPv6_Tx)
732 #define IXL_CSUM_ALL_OFFLOAD (M_CSUM_IPv4 | \
733 M_CSUM_TCPv4 | M_CSUM_TCPv6 | \
734 M_CSUM_UDPv4 | M_CSUM_UDPv6)
735
736 #define delaymsec(_x) DELAY(1000 * (_x))
737 #ifdef IXL_DEBUG
738 #define DDPRINTF(sc, fmt, args...) \
739 do { \
740 if ((sc) != NULL) { \
741 device_printf( \
742 ((struct ixl_softc *)(sc))->sc_dev, \
743 ""); \
744 } \
745 printf("%s:\t" fmt, __func__, ##args); \
746 } while (0)
747 #else
748 #define DDPRINTF(sc, fmt, args...) __nothing
749 #endif
750 #ifndef IXL_STATS_INTERVAL_MSEC
751 #define IXL_STATS_INTERVAL_MSEC 10000
752 #endif
753 #ifndef IXL_QUEUE_NUM
754 #define IXL_QUEUE_NUM 0
755 #endif
756
757 static bool ixl_param_nomsix = false;
758 static int ixl_param_stats_interval = IXL_STATS_INTERVAL_MSEC;
759 static int ixl_param_nqps_limit = IXL_QUEUE_NUM;
760 static unsigned int ixl_param_tx_ndescs = 1024;
761 static unsigned int ixl_param_rx_ndescs = 1024;
762
763 static enum i40e_mac_type
764 ixl_mactype(pci_product_id_t);
765 static void ixl_clear_hw(struct ixl_softc *);
766 static int ixl_pf_reset(struct ixl_softc *);
767
768 static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
769 bus_size_t, bus_size_t);
770 static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
771
772 static int ixl_arq_fill(struct ixl_softc *);
773 static void ixl_arq_unfill(struct ixl_softc *);
774
775 static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
776 unsigned int);
777 static void ixl_atq_set(struct ixl_atq *,
778 void (*)(struct ixl_softc *, const struct ixl_aq_desc *));
779 static int ixl_atq_post(struct ixl_softc *, struct ixl_atq *);
780 static int ixl_atq_post_locked(struct ixl_softc *, struct ixl_atq *);
781 static void ixl_atq_done(struct ixl_softc *);
782 static int ixl_atq_exec(struct ixl_softc *, struct ixl_atq *);
783 static int ixl_get_version(struct ixl_softc *);
784 static int ixl_get_nvm_version(struct ixl_softc *);
785 static int ixl_get_hw_capabilities(struct ixl_softc *);
786 static int ixl_pxe_clear(struct ixl_softc *);
787 static int ixl_lldp_shut(struct ixl_softc *);
788 static int ixl_get_mac(struct ixl_softc *);
789 static int ixl_get_switch_config(struct ixl_softc *);
790 static int ixl_phy_mask_ints(struct ixl_softc *);
791 static int ixl_get_phy_info(struct ixl_softc *);
792 static int ixl_set_phy_config(struct ixl_softc *, uint8_t, uint8_t, bool);
793 static int ixl_set_phy_autoselect(struct ixl_softc *);
794 static int ixl_restart_an(struct ixl_softc *);
795 static int ixl_hmc(struct ixl_softc *);
796 static void ixl_hmc_free(struct ixl_softc *);
797 static int ixl_get_vsi(struct ixl_softc *);
798 static int ixl_set_vsi(struct ixl_softc *);
799 static void ixl_set_filter_control(struct ixl_softc *);
800 static void ixl_get_link_status(void *);
801 static int ixl_get_link_status_poll(struct ixl_softc *, int *);
802 static int ixl_set_link_status(struct ixl_softc *,
803 const struct ixl_aq_desc *);
804 static uint64_t ixl_search_link_speed(uint8_t);
805 static uint8_t ixl_search_baudrate(uint64_t);
806 static void ixl_config_rss(struct ixl_softc *);
807 static int ixl_add_macvlan(struct ixl_softc *, const uint8_t *,
808 uint16_t, uint16_t);
809 static int ixl_remove_macvlan(struct ixl_softc *, const uint8_t *,
810 uint16_t, uint16_t);
811 static void ixl_arq(void *);
812 static void ixl_hmc_pack(void *, const void *,
813 const struct ixl_hmc_pack *, unsigned int);
814 static uint32_t ixl_rd_rx_csr(struct ixl_softc *, uint32_t);
815 static void ixl_wr_rx_csr(struct ixl_softc *, uint32_t, uint32_t);
816 static int ixl_rd16_nvm(struct ixl_softc *, uint16_t, uint16_t *);
817
818 static int ixl_match(device_t, cfdata_t, void *);
819 static void ixl_attach(device_t, device_t, void *);
820 static int ixl_detach(device_t, int);
821
822 static void ixl_media_add(struct ixl_softc *);
823 static int ixl_media_change(struct ifnet *);
824 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
825 static void ixl_watchdog(struct ifnet *);
826 static int ixl_ioctl(struct ifnet *, u_long, void *);
827 static void ixl_start(struct ifnet *);
828 static int ixl_transmit(struct ifnet *, struct mbuf *);
829 static void ixl_deferred_transmit(void *);
830 static int ixl_intr(void *);
831 static int ixl_queue_intr(void *);
832 static int ixl_other_intr(void *);
833 static void ixl_handle_queue(void *);
834 static void ixl_handle_queue_wk(struct work *, void *);
835 static void ixl_sched_handle_queue(struct ixl_softc *,
836 struct ixl_queue_pair *);
837 static int ixl_init(struct ifnet *);
838 static int ixl_init_locked(struct ixl_softc *);
839 static void ixl_stop(struct ifnet *, int);
840 static void ixl_stop_locked(struct ixl_softc *);
841 static int ixl_iff(struct ixl_softc *);
842 static int ixl_ifflags_cb(struct ethercom *);
843 static int ixl_setup_interrupts(struct ixl_softc *);
844 static int ixl_establish_intx(struct ixl_softc *);
845 static int ixl_establish_msix(struct ixl_softc *);
846 static void ixl_enable_queue_intr(struct ixl_softc *,
847 struct ixl_queue_pair *);
848 static void ixl_disable_queue_intr(struct ixl_softc *,
849 struct ixl_queue_pair *);
850 static void ixl_enable_other_intr(struct ixl_softc *);
851 static void ixl_disable_other_intr(struct ixl_softc *);
852 static void ixl_config_queue_intr(struct ixl_softc *);
853 static void ixl_config_other_intr(struct ixl_softc *);
854
855 static struct ixl_tx_ring *
856 ixl_txr_alloc(struct ixl_softc *, unsigned int);
857 static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
858 static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
859 static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
860 static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
861 static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
862 static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
863 static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
864 static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *, u_int);
865
866 static struct ixl_rx_ring *
867 ixl_rxr_alloc(struct ixl_softc *, unsigned int);
868 static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
869 static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
870 static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
871 static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
872 static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
873 static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
874 static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *, u_int);
875 static int ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
876
877 static struct workqueue *
878 ixl_workq_create(const char *, pri_t, int, int);
879 static void ixl_workq_destroy(struct workqueue *);
880 static int ixl_workqs_teardown(device_t);
881 static void ixl_work_set(struct ixl_work *, void (*)(void *), void *);
882 static void ixl_work_add(struct workqueue *, struct ixl_work *);
883 static void ixl_work_wait(struct workqueue *, struct ixl_work *);
884 static void ixl_workq_work(struct work *, void *);
885 static const struct ixl_product *
886 ixl_lookup(const struct pci_attach_args *pa);
887 static void ixl_link_state_update(struct ixl_softc *,
888 const struct ixl_aq_desc *);
889 static int ixl_vlan_cb(struct ethercom *, uint16_t, bool);
890 static int ixl_setup_vlan_hwfilter(struct ixl_softc *);
891 static void ixl_teardown_vlan_hwfilter(struct ixl_softc *);
892 static int ixl_update_macvlan(struct ixl_softc *);
893 static int ixl_setup_interrupts(struct ixl_softc *);;
894 static void ixl_teardown_interrupts(struct ixl_softc *);
895 static int ixl_setup_stats(struct ixl_softc *);
896 static void ixl_teardown_stats(struct ixl_softc *);
897 static void ixl_stats_callout(void *);
898 static void ixl_stats_update(void *);
899 static int ixl_setup_sysctls(struct ixl_softc *);
900 static void ixl_teardown_sysctls(struct ixl_softc *);
901 static int ixl_queue_pairs_alloc(struct ixl_softc *);
902 static void ixl_queue_pairs_free(struct ixl_softc *);
903
904 static const struct ixl_phy_type ixl_phy_type_map[] = {
905 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII },
906 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX },
907 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 },
908 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR },
909 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 },
910 { 1ULL << IXL_PHY_TYPE_XAUI |
911 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 },
912 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI },
913 { 1ULL << IXL_PHY_TYPE_XLAUI |
914 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI },
915 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
916 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 },
917 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
918 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 },
919 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC },
920 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC },
921 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX },
922 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
923 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T },
924 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T },
925 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR },
926 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR },
927 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_TWINAX },
928 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 },
929 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 },
930 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX },
931 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX },
932 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 },
933 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR },
934 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR },
935 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR },
936 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR },
937 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC },
938 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_CR },
939 };
940
941 static const struct ixl_speed_type ixl_speed_type_map[] = {
942 { IXL_AQ_LINK_SPEED_40GB, IF_Gbps(40) },
943 { IXL_AQ_LINK_SPEED_25GB, IF_Gbps(25) },
944 { IXL_AQ_LINK_SPEED_10GB, IF_Gbps(10) },
945 { IXL_AQ_LINK_SPEED_1000MB, IF_Mbps(1000) },
946 { IXL_AQ_LINK_SPEED_100MB, IF_Mbps(100)},
947 };
948
949 static const struct ixl_aq_regs ixl_pf_aq_regs = {
950 .atq_tail = I40E_PF_ATQT,
951 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK,
952 .atq_head = I40E_PF_ATQH,
953 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK,
954 .atq_len = I40E_PF_ATQLEN,
955 .atq_bal = I40E_PF_ATQBAL,
956 .atq_bah = I40E_PF_ATQBAH,
957 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK,
958
959 .arq_tail = I40E_PF_ARQT,
960 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK,
961 .arq_head = I40E_PF_ARQH,
962 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK,
963 .arq_len = I40E_PF_ARQLEN,
964 .arq_bal = I40E_PF_ARQBAL,
965 .arq_bah = I40E_PF_ARQBAH,
966 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK,
967 };
968
969 #define ixl_rd(_s, _r) \
970 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
971 #define ixl_wr(_s, _r, _v) \
972 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
973 #define ixl_barrier(_s, _r, _l, _o) \
974 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
975 #define ixl_flush(_s) (void)ixl_rd((_s), I40E_GLGEN_STAT)
976 #define ixl_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1))
977
978 static inline uint32_t
979 ixl_dmamem_hi(struct ixl_dmamem *ixm)
980 {
981 uint32_t retval;
982 uint64_t val;
983
984 if (sizeof(IXL_DMA_DVA(ixm)) > 4) {
985 val = (intptr_t)IXL_DMA_DVA(ixm);
986 retval = (uint32_t)(val >> 32);
987 } else {
988 retval = 0;
989 }
990
991 return retval;
992 }
993
994 static inline uint32_t
995 ixl_dmamem_lo(struct ixl_dmamem *ixm)
996 {
997
998 return (uint32_t)IXL_DMA_DVA(ixm);
999 }
1000
1001 static inline void
1002 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr)
1003 {
1004 uint64_t val;
1005
1006 if (sizeof(addr) > 4) {
1007 val = (intptr_t)addr;
1008 iaq->iaq_param[2] = htole32(val >> 32);
1009 } else {
1010 iaq->iaq_param[2] = htole32(0);
1011 }
1012
1013 iaq->iaq_param[3] = htole32(addr);
1014 }
1015
1016 static inline unsigned int
1017 ixl_rxr_unrefreshed(unsigned int prod, unsigned int cons, unsigned int ndescs)
1018 {
1019 unsigned int num;
1020
1021 if (prod < cons)
1022 num = cons - prod;
1023 else
1024 num = (ndescs - prod) + cons;
1025
1026 if (__predict_true(num > 0)) {
1027 /* device cannot receive packets if all descripter is filled */
1028 num -= 1;
1029 }
1030
1031 return num;
1032 }
1033
1034 CFATTACH_DECL3_NEW(ixl, sizeof(struct ixl_softc),
1035 ixl_match, ixl_attach, ixl_detach, NULL, NULL, NULL,
1036 DVF_DETACH_SHUTDOWN);
1037
1038 static const struct ixl_product ixl_products[] = {
1039 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_SFP },
1040 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_B },
1041 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_C },
1042 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_A },
1043 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_B },
1044 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_C },
1045 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T },
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
1048 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G },
1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP },
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28 },
1051 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_KX },
1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_QSFP },
1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_SFP },
1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G_BASET },
1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_BASET },
1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_I_SFP },
1057 /* required last entry */
1058 {0, 0}
1059 };
1060
1061 static const struct ixl_product *
1062 ixl_lookup(const struct pci_attach_args *pa)
1063 {
1064 const struct ixl_product *ixlp;
1065
1066 for (ixlp = ixl_products; ixlp->vendor_id != 0; ixlp++) {
1067 if (PCI_VENDOR(pa->pa_id) == ixlp->vendor_id &&
1068 PCI_PRODUCT(pa->pa_id) == ixlp->product_id)
1069 return ixlp;
1070 }
1071
1072 return NULL;
1073 }
1074
1075 static int
1076 ixl_match(device_t parent, cfdata_t match, void *aux)
1077 {
1078 const struct pci_attach_args *pa = aux;
1079
1080 return (ixl_lookup(pa) != NULL) ? 1 : 0;
1081 }
1082
1083 static void
1084 ixl_attach(device_t parent, device_t self, void *aux)
1085 {
1086 struct ixl_softc *sc;
1087 struct pci_attach_args *pa = aux;
1088 struct ifnet *ifp;
1089 pcireg_t memtype;
1090 uint32_t firstq, port, ari, func;
1091 char xnamebuf[32];
1092 int tries, rv, link;
1093
1094 sc = device_private(self);
1095 sc->sc_dev = self;
1096 ifp = &sc->sc_ec.ec_if;
1097
1098 sc->sc_pa = *pa;
1099 sc->sc_dmat = (pci_dma64_available(pa)) ?
1100 pa->pa_dmat64 : pa->pa_dmat;
1101 sc->sc_aq_regs = &ixl_pf_aq_regs;
1102
1103 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(pa->pa_id));
1104
1105 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG);
1106 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
1107 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) {
1108 aprint_error(": unable to map registers\n");
1109 return;
1110 }
1111
1112 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET);
1113
1114 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC);
1115 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK;
1116 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1117 sc->sc_base_queue = firstq;
1118
1119 ixl_clear_hw(sc);
1120 if (ixl_pf_reset(sc) == -1) {
1121 /* error printed by ixl pf_reset */
1122 goto unmap;
1123 }
1124
1125 port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
1126 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
1127 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
1128 sc->sc_port = port;
1129 aprint_normal(": port %u", sc->sc_port);
1130
1131 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
1132 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1133 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1134
1135 func = ixl_rd(sc, I40E_PF_FUNC_RID);
1136 sc->sc_pf_id = func & (ari ? 0xff : 0x7);
1137
1138 /* initialise the adminq */
1139
1140 mutex_init(&sc->sc_atq_lock, MUTEX_DEFAULT, IPL_NET);
1141
1142 if (ixl_dmamem_alloc(sc, &sc->sc_atq,
1143 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1144 aprint_error("\n" "%s: unable to allocate atq\n",
1145 device_xname(self));
1146 goto unmap;
1147 }
1148
1149 SIMPLEQ_INIT(&sc->sc_arq_idle);
1150 ixl_work_set(&sc->sc_arq_task, ixl_arq, sc);
1151 sc->sc_arq_cons = 0;
1152 sc->sc_arq_prod = 0;
1153
1154 if (ixl_dmamem_alloc(sc, &sc->sc_arq,
1155 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1156 aprint_error("\n" "%s: unable to allocate arq\n",
1157 device_xname(self));
1158 goto free_atq;
1159 }
1160
1161 if (!ixl_arq_fill(sc)) {
1162 aprint_error("\n" "%s: unable to fill arq descriptors\n",
1163 device_xname(self));
1164 goto free_arq;
1165 }
1166
1167 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1168 0, IXL_DMA_LEN(&sc->sc_atq),
1169 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1170
1171 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1172 0, IXL_DMA_LEN(&sc->sc_arq),
1173 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1174
1175 for (tries = 0; tries < 10; tries++) {
1176 sc->sc_atq_cons = 0;
1177 sc->sc_atq_prod = 0;
1178
1179 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1180 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1181 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1182 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1183
1184 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1185
1186 ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1187 ixl_dmamem_lo(&sc->sc_atq));
1188 ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1189 ixl_dmamem_hi(&sc->sc_atq));
1190 ixl_wr(sc, sc->sc_aq_regs->atq_len,
1191 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1192
1193 ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1194 ixl_dmamem_lo(&sc->sc_arq));
1195 ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1196 ixl_dmamem_hi(&sc->sc_arq));
1197 ixl_wr(sc, sc->sc_aq_regs->arq_len,
1198 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1199
1200 rv = ixl_get_version(sc);
1201 if (rv == 0)
1202 break;
1203 if (rv != ETIMEDOUT) {
1204 aprint_error(", unable to get firmware version\n");
1205 goto shutdown;
1206 }
1207
1208 delaymsec(100);
1209 }
1210
1211 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1212
1213 if (ixl_dmamem_alloc(sc, &sc->sc_aqbuf, IXL_AQ_BUFLEN, 0) != 0) {
1214 aprint_error_dev(self, ", unable to allocate nvm buffer\n");
1215 goto shutdown;
1216 }
1217
1218 ixl_get_nvm_version(sc);
1219
1220 if (sc->sc_mac_type == I40E_MAC_X722)
1221 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_X722;
1222 else
1223 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_XL710;
1224
1225 rv = ixl_get_hw_capabilities(sc);
1226 if (rv != 0) {
1227 aprint_error(", GET HW CAPABILITIES %s\n",
1228 rv == ETIMEDOUT ? "timeout" : "error");
1229 goto free_aqbuf;
1230 }
1231
1232 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_device, ncpu);
1233 if (ixl_param_nqps_limit > 0) {
1234 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_max,
1235 ixl_param_nqps_limit);
1236 }
1237
1238 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
1239 sc->sc_tx_ring_ndescs = ixl_param_tx_ndescs;
1240 sc->sc_rx_ring_ndescs = ixl_param_rx_ndescs;
1241
1242 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_rx_ring_ndescs);
1243 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_tx_ring_ndescs);
1244
1245 if (ixl_get_mac(sc) != 0) {
1246 /* error printed by ixl_get_mac */
1247 goto free_aqbuf;
1248 }
1249
1250 aprint_normal("\n");
1251 aprint_naive("\n");
1252
1253 aprint_normal_dev(self, "Ethernet address %s\n",
1254 ether_sprintf(sc->sc_enaddr));
1255
1256 rv = ixl_pxe_clear(sc);
1257 if (rv != 0) {
1258 aprint_debug_dev(self, "CLEAR PXE MODE %s\n",
1259 rv == ETIMEDOUT ? "timeout" : "error");
1260 }
1261
1262 ixl_set_filter_control(sc);
1263
1264 if (ixl_hmc(sc) != 0) {
1265 /* error printed by ixl_hmc */
1266 goto free_aqbuf;
1267 }
1268
1269 if (ixl_lldp_shut(sc) != 0) {
1270 /* error printed by ixl_lldp_shut */
1271 goto free_hmc;
1272 }
1273
1274 if (ixl_phy_mask_ints(sc) != 0) {
1275 /* error printed by ixl_phy_mask_ints */
1276 goto free_hmc;
1277 }
1278
1279 if (ixl_restart_an(sc) != 0) {
1280 /* error printed by ixl_restart_an */
1281 goto free_hmc;
1282 }
1283
1284 if (ixl_get_switch_config(sc) != 0) {
1285 /* error printed by ixl_get_switch_config */
1286 goto free_hmc;
1287 }
1288
1289 rv = ixl_get_link_status_poll(sc, NULL);
1290 if (rv != 0) {
1291 aprint_error_dev(self, "GET LINK STATUS %s\n",
1292 rv == ETIMEDOUT ? "timeout" : "error");
1293 goto free_hmc;
1294 }
1295
1296 /*
1297 * The FW often returns EIO in "Get PHY Abilities" command
1298 * if there is no delay
1299 */
1300 DELAY(500);
1301 if (ixl_get_phy_info(sc) != 0) {
1302 /* error printed by ixl_get_phy_info */
1303 goto free_hmc;
1304 }
1305
1306 if (ixl_dmamem_alloc(sc, &sc->sc_scratch,
1307 sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1308 aprint_error_dev(self, "unable to allocate scratch buffer\n");
1309 goto free_hmc;
1310 }
1311
1312 rv = ixl_get_vsi(sc);
1313 if (rv != 0) {
1314 aprint_error_dev(self, "GET VSI %s %d\n",
1315 rv == ETIMEDOUT ? "timeout" : "error", rv);
1316 goto free_scratch;
1317 }
1318
1319 rv = ixl_set_vsi(sc);
1320 if (rv != 0) {
1321 aprint_error_dev(self, "UPDATE VSI error %s %d\n",
1322 rv == ETIMEDOUT ? "timeout" : "error", rv);
1323 goto free_scratch;
1324 }
1325
1326 if (ixl_queue_pairs_alloc(sc) != 0) {
1327 /* error printed by ixl_queue_pairs_alloc */
1328 goto free_scratch;
1329 }
1330
1331 if (ixl_setup_interrupts(sc) != 0) {
1332 /* error printed by ixl_setup_interrupts */
1333 goto free_queue_pairs;
1334 }
1335
1336 if (ixl_setup_stats(sc) != 0) {
1337 aprint_error_dev(self, "failed to setup event counters\n");
1338 goto teardown_intrs;
1339 }
1340
1341 if (ixl_setup_sysctls(sc) != 0) {
1342 /* error printed by ixl_setup_sysctls */
1343 goto teardown_stats;
1344 }
1345
1346 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self));
1347 sc->sc_workq = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1348 IPL_NET, WQ_MPSAFE);
1349 if (sc->sc_workq == NULL)
1350 goto teardown_sysctls;
1351
1352 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self));
1353 rv = workqueue_create(&sc->sc_workq_txrx, xnamebuf, ixl_handle_queue_wk,
1354 sc, IXL_WORKQUEUE_PRI, IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1355 if (rv != 0) {
1356 sc->sc_workq_txrx = NULL;
1357 goto teardown_wqs;
1358 }
1359
1360 snprintf(xnamebuf, sizeof(xnamebuf), "%s_atq_cv", device_xname(self));
1361 cv_init(&sc->sc_atq_cv, xnamebuf);
1362
1363 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
1364
1365 ifp->if_softc = sc;
1366 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1367 ifp->if_extflags = IFEF_MPSAFE;
1368 ifp->if_ioctl = ixl_ioctl;
1369 ifp->if_start = ixl_start;
1370 ifp->if_transmit = ixl_transmit;
1371 ifp->if_watchdog = ixl_watchdog;
1372 ifp->if_init = ixl_init;
1373 ifp->if_stop = ixl_stop;
1374 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs);
1375 IFQ_SET_READY(&ifp->if_snd);
1376 ifp->if_capabilities |= IXL_IFCAP_RXCSUM;
1377 ifp->if_capabilities |= IXL_IFCAP_TXCSUM;
1378 #if 0
1379 ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
1380 #endif
1381 ether_set_vlan_cb(&sc->sc_ec, ixl_vlan_cb);
1382 sc->sc_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1383 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
1384 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1385
1386 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities;
1387 /* Disable VLAN_HWFILTER by default */
1388 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1389
1390 sc->sc_cur_ec_capenable = sc->sc_ec.ec_capenable;
1391
1392 sc->sc_ec.ec_ifmedia = &sc->sc_media;
1393 ifmedia_init(&sc->sc_media, IFM_IMASK, ixl_media_change,
1394 ixl_media_status);
1395
1396 ixl_media_add(sc);
1397 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1398 if (ISSET(sc->sc_phy_abilities,
1399 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) {
1400 ifmedia_add(&sc->sc_media,
1401 IFM_ETHER | IFM_AUTO | IFM_FLOW, 0, NULL);
1402 }
1403 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_NONE, 0, NULL);
1404 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1405
1406 if_attach(ifp);
1407 if_deferred_start_init(ifp, NULL);
1408 ether_ifattach(ifp, sc->sc_enaddr);
1409 ether_set_ifflags_cb(&sc->sc_ec, ixl_ifflags_cb);
1410
1411 rv = ixl_get_link_status_poll(sc, &link);
1412 if (rv != 0)
1413 link = LINK_STATE_UNKNOWN;
1414 if_link_state_change(ifp, link);
1415
1416 ixl_work_set(&sc->sc_link_state_task, ixl_get_link_status, sc);
1417
1418 ixl_config_other_intr(sc);
1419 ixl_enable_other_intr(sc);
1420
1421 ixl_set_phy_autoselect(sc);
1422
1423 /* remove default mac filter and replace it so we can see vlans */
1424 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 0);
1425 if (rv != ENOENT) {
1426 aprint_debug_dev(self,
1427 "unable to remove macvlan %u\n", rv);
1428 }
1429 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
1430 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1431 if (rv != ENOENT) {
1432 aprint_debug_dev(self,
1433 "unable to remove macvlan, ignore vlan %u\n", rv);
1434 }
1435
1436 if (ixl_update_macvlan(sc) != 0) {
1437 aprint_debug_dev(self,
1438 "couldn't enable vlan hardware filter\n");
1439 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1440 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
1441 }
1442
1443 sc->sc_txrx_workqueue = true;
1444 sc->sc_tx_process_limit = IXL_TX_PROCESS_LIMIT;
1445 sc->sc_rx_process_limit = IXL_RX_PROCESS_LIMIT;
1446 sc->sc_tx_intr_process_limit = IXL_TX_INTR_PROCESS_LIMIT;
1447 sc->sc_rx_intr_process_limit = IXL_RX_INTR_PROCESS_LIMIT;
1448
1449 ixl_stats_update(sc);
1450 sc->sc_stats_counters.isc_has_offset = true;
1451
1452 if (pmf_device_register(self, NULL, NULL) != true)
1453 aprint_debug_dev(self, "couldn't establish power handler\n");
1454 sc->sc_attached = true;
1455 return;
1456
1457 teardown_wqs:
1458 config_finalize_register(self, ixl_workqs_teardown);
1459 teardown_sysctls:
1460 ixl_teardown_sysctls(sc);
1461 teardown_stats:
1462 ixl_teardown_stats(sc);
1463 teardown_intrs:
1464 ixl_teardown_interrupts(sc);
1465 free_queue_pairs:
1466 ixl_queue_pairs_free(sc);
1467 free_scratch:
1468 ixl_dmamem_free(sc, &sc->sc_scratch);
1469 free_hmc:
1470 ixl_hmc_free(sc);
1471 free_aqbuf:
1472 ixl_dmamem_free(sc, &sc->sc_aqbuf);
1473 shutdown:
1474 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1475 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1476 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1477 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1478
1479 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1480 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1481 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1482
1483 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1484 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1485 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1486
1487 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1488 0, IXL_DMA_LEN(&sc->sc_arq),
1489 BUS_DMASYNC_POSTREAD);
1490 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1491 0, IXL_DMA_LEN(&sc->sc_atq),
1492 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1493
1494 ixl_arq_unfill(sc);
1495 free_arq:
1496 ixl_dmamem_free(sc, &sc->sc_arq);
1497 free_atq:
1498 ixl_dmamem_free(sc, &sc->sc_atq);
1499 unmap:
1500 mutex_destroy(&sc->sc_atq_lock);
1501 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1502 mutex_destroy(&sc->sc_cfg_lock);
1503 sc->sc_mems = 0;
1504
1505 sc->sc_attached = false;
1506 }
1507
1508 static int
1509 ixl_detach(device_t self, int flags)
1510 {
1511 struct ixl_softc *sc = device_private(self);
1512 struct ifnet *ifp = &sc->sc_ec.ec_if;
1513
1514 if (!sc->sc_attached)
1515 return 0;
1516
1517 ixl_stop(ifp, 1);
1518
1519 ixl_disable_other_intr(sc);
1520
1521 callout_halt(&sc->sc_stats_callout, NULL);
1522 ixl_work_wait(sc->sc_workq, &sc->sc_stats_task);
1523
1524 /* wait for ATQ handler */
1525 mutex_enter(&sc->sc_atq_lock);
1526 mutex_exit(&sc->sc_atq_lock);
1527
1528 ixl_work_wait(sc->sc_workq, &sc->sc_arq_task);
1529 ixl_work_wait(sc->sc_workq, &sc->sc_link_state_task);
1530
1531 if (sc->sc_workq != NULL) {
1532 ixl_workq_destroy(sc->sc_workq);
1533 sc->sc_workq = NULL;
1534 }
1535
1536 if (sc->sc_workq_txrx != NULL) {
1537 workqueue_destroy(sc->sc_workq_txrx);
1538 sc->sc_workq_txrx = NULL;
1539 }
1540
1541 ether_ifdetach(ifp);
1542 if_detach(ifp);
1543 ifmedia_fini(&sc->sc_media);
1544
1545 ixl_teardown_interrupts(sc);
1546 ixl_teardown_stats(sc);
1547 ixl_teardown_sysctls(sc);
1548
1549 ixl_queue_pairs_free(sc);
1550
1551 ixl_dmamem_free(sc, &sc->sc_scratch);
1552 ixl_hmc_free(sc);
1553
1554 /* shutdown */
1555 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1556 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1557 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1558 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1559
1560 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1561 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1562 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1563
1564 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1565 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1566 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1567
1568 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1569 0, IXL_DMA_LEN(&sc->sc_arq),
1570 BUS_DMASYNC_POSTREAD);
1571 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1572 0, IXL_DMA_LEN(&sc->sc_atq),
1573 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1574
1575 ixl_arq_unfill(sc);
1576
1577 ixl_dmamem_free(sc, &sc->sc_arq);
1578 ixl_dmamem_free(sc, &sc->sc_atq);
1579 ixl_dmamem_free(sc, &sc->sc_aqbuf);
1580
1581 cv_destroy(&sc->sc_atq_cv);
1582 mutex_destroy(&sc->sc_atq_lock);
1583
1584 if (sc->sc_mems != 0) {
1585 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1586 sc->sc_mems = 0;
1587 }
1588
1589 mutex_destroy(&sc->sc_cfg_lock);
1590
1591 return 0;
1592 }
1593
1594 static int
1595 ixl_workqs_teardown(device_t self)
1596 {
1597 struct ixl_softc *sc = device_private(self);
1598
1599 if (sc->sc_workq != NULL) {
1600 ixl_workq_destroy(sc->sc_workq);
1601 sc->sc_workq = NULL;
1602 }
1603
1604 if (sc->sc_workq_txrx != NULL) {
1605 workqueue_destroy(sc->sc_workq_txrx);
1606 sc->sc_workq_txrx = NULL;
1607 }
1608
1609 return 0;
1610 }
1611
1612 static int
1613 ixl_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
1614 {
1615 struct ifnet *ifp = &ec->ec_if;
1616 struct ixl_softc *sc = ifp->if_softc;
1617 int rv;
1618
1619 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
1620 return 0;
1621 }
1622
1623 if (set) {
1624 rv = ixl_add_macvlan(sc, sc->sc_enaddr, vid,
1625 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1626 if (rv == 0) {
1627 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
1628 vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1629 }
1630 } else {
1631 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, vid,
1632 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1633 (void)ixl_remove_macvlan(sc, etherbroadcastaddr, vid,
1634 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1635 }
1636
1637 return rv;
1638 }
1639
1640 static void
1641 ixl_media_add(struct ixl_softc *sc)
1642 {
1643 struct ifmedia *ifm = &sc->sc_media;
1644 const struct ixl_phy_type *itype;
1645 unsigned int i;
1646 bool flow;
1647
1648 if (ISSET(sc->sc_phy_abilities,
1649 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) {
1650 flow = true;
1651 } else {
1652 flow = false;
1653 }
1654
1655 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
1656 itype = &ixl_phy_type_map[i];
1657
1658 if (ISSET(sc->sc_phy_types, itype->phy_type)) {
1659 ifmedia_add(ifm,
1660 IFM_ETHER | IFM_FDX | itype->ifm_type, 0, NULL);
1661
1662 if (flow) {
1663 ifmedia_add(ifm,
1664 IFM_ETHER | IFM_FDX | IFM_FLOW |
1665 itype->ifm_type, 0, NULL);
1666 }
1667
1668 if (itype->ifm_type != IFM_100_TX)
1669 continue;
1670
1671 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type,
1672 0, NULL);
1673 if (flow) {
1674 ifmedia_add(ifm,
1675 IFM_ETHER | IFM_FLOW | itype->ifm_type,
1676 0, NULL);
1677 }
1678 }
1679 }
1680 }
1681
1682 static void
1683 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1684 {
1685 struct ixl_softc *sc = ifp->if_softc;
1686
1687 ifmr->ifm_status = sc->sc_media_status;
1688 ifmr->ifm_active = sc->sc_media_active;
1689
1690 mutex_enter(&sc->sc_cfg_lock);
1691 if (ifp->if_link_state == LINK_STATE_UP)
1692 SET(ifmr->ifm_status, IFM_ACTIVE);
1693 mutex_exit(&sc->sc_cfg_lock);
1694 }
1695
1696 static int
1697 ixl_media_change(struct ifnet *ifp)
1698 {
1699 struct ixl_softc *sc = ifp->if_softc;
1700 struct ifmedia *ifm = &sc->sc_media;
1701 uint64_t ifm_active = sc->sc_media_active;
1702 uint8_t link_speed, abilities;
1703
1704 switch (IFM_SUBTYPE(ifm_active)) {
1705 case IFM_1000_SGMII:
1706 case IFM_1000_KX:
1707 case IFM_10G_KX4:
1708 case IFM_10G_KR:
1709 case IFM_40G_KR4:
1710 case IFM_20G_KR2:
1711 case IFM_25G_KR:
1712 /* backplanes */
1713 return EINVAL;
1714 }
1715
1716 abilities = IXL_PHY_ABILITY_AUTONEGO | IXL_PHY_ABILITY_LINKUP;
1717
1718 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1719 case IFM_AUTO:
1720 link_speed = sc->sc_phy_linkspeed;
1721 break;
1722 case IFM_NONE:
1723 link_speed = 0;
1724 CLR(abilities, IXL_PHY_ABILITY_LINKUP);
1725 break;
1726 default:
1727 link_speed = ixl_search_baudrate(
1728 ifmedia_baudrate(ifm->ifm_media));
1729 }
1730
1731 if (ISSET(abilities, IXL_PHY_ABILITY_LINKUP)) {
1732 if (ISSET(link_speed, sc->sc_phy_linkspeed) == 0)
1733 return EINVAL;
1734 }
1735
1736 if (ifm->ifm_media & IFM_FLOW) {
1737 abilities |= sc->sc_phy_abilities &
1738 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX);
1739 }
1740
1741 return ixl_set_phy_config(sc, link_speed, abilities, false);
1742 }
1743
1744 static void
1745 ixl_watchdog(struct ifnet *ifp)
1746 {
1747
1748 }
1749
1750 static void
1751 ixl_del_all_multiaddr(struct ixl_softc *sc)
1752 {
1753 struct ethercom *ec = &sc->sc_ec;
1754 struct ether_multi *enm;
1755 struct ether_multistep step;
1756
1757 ETHER_LOCK(ec);
1758 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1759 ETHER_NEXT_MULTI(step, enm)) {
1760 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1761 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1762 }
1763 ETHER_UNLOCK(ec);
1764 }
1765
1766 static int
1767 ixl_add_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1768 {
1769 struct ifnet *ifp = &sc->sc_ec.ec_if;
1770 int rv;
1771
1772 if (ISSET(ifp->if_flags, IFF_ALLMULTI))
1773 return 0;
1774
1775 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) {
1776 ixl_del_all_multiaddr(sc);
1777 SET(ifp->if_flags, IFF_ALLMULTI);
1778 return ENETRESET;
1779 }
1780
1781 /* multicast address can not use VLAN HWFILTER */
1782 rv = ixl_add_macvlan(sc, addrlo, 0,
1783 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1784
1785 if (rv == ENOSPC) {
1786 ixl_del_all_multiaddr(sc);
1787 SET(ifp->if_flags, IFF_ALLMULTI);
1788 return ENETRESET;
1789 }
1790
1791 return rv;
1792 }
1793
1794 static int
1795 ixl_del_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1796 {
1797 struct ifnet *ifp = &sc->sc_ec.ec_if;
1798 struct ethercom *ec = &sc->sc_ec;
1799 struct ether_multi *enm, *enm_last;
1800 struct ether_multistep step;
1801 int error, rv = 0;
1802
1803 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1804 ixl_remove_macvlan(sc, addrlo, 0,
1805 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1806 return 0;
1807 }
1808
1809 ETHER_LOCK(ec);
1810 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1811 ETHER_NEXT_MULTI(step, enm)) {
1812 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1813 ETHER_ADDR_LEN) != 0) {
1814 goto out;
1815 }
1816 }
1817
1818 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1819 ETHER_NEXT_MULTI(step, enm)) {
1820 error = ixl_add_macvlan(sc, enm->enm_addrlo, 0,
1821 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1822 if (error != 0)
1823 break;
1824 }
1825
1826 if (enm != NULL) {
1827 enm_last = enm;
1828 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1829 ETHER_NEXT_MULTI(step, enm)) {
1830 if (enm == enm_last)
1831 break;
1832
1833 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1834 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1835 }
1836 } else {
1837 CLR(ifp->if_flags, IFF_ALLMULTI);
1838 rv = ENETRESET;
1839 }
1840
1841 out:
1842 ETHER_UNLOCK(ec);
1843 return rv;
1844 }
1845
1846 static int
1847 ixl_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1848 {
1849 struct ifreq *ifr = (struct ifreq *)data;
1850 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
1851 const struct sockaddr *sa;
1852 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1853 int s, error = 0;
1854 unsigned int nmtu;
1855
1856 switch (cmd) {
1857 case SIOCSIFMTU:
1858 nmtu = ifr->ifr_mtu;
1859
1860 if (nmtu < IXL_MIN_MTU || nmtu > IXL_MAX_MTU) {
1861 error = EINVAL;
1862 break;
1863 }
1864 if (ifp->if_mtu != nmtu) {
1865 s = splnet();
1866 error = ether_ioctl(ifp, cmd, data);
1867 splx(s);
1868 if (error == ENETRESET)
1869 error = ixl_init(ifp);
1870 }
1871 break;
1872 case SIOCADDMULTI:
1873 sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1874 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) {
1875 error = ether_multiaddr(sa, addrlo, addrhi);
1876 if (error != 0)
1877 return error;
1878
1879 error = ixl_add_multi(sc, addrlo, addrhi);
1880 if (error != 0 && error != ENETRESET) {
1881 ether_delmulti(sa, &sc->sc_ec);
1882 error = EIO;
1883 }
1884 }
1885 break;
1886
1887 case SIOCDELMULTI:
1888 sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1889 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) {
1890 error = ether_multiaddr(sa, addrlo, addrhi);
1891 if (error != 0)
1892 return error;
1893
1894 error = ixl_del_multi(sc, addrlo, addrhi);
1895 }
1896 break;
1897
1898 default:
1899 s = splnet();
1900 error = ether_ioctl(ifp, cmd, data);
1901 splx(s);
1902 }
1903
1904 if (error == ENETRESET)
1905 error = ixl_iff(sc);
1906
1907 return error;
1908 }
1909
1910 static enum i40e_mac_type
1911 ixl_mactype(pci_product_id_t id)
1912 {
1913
1914 switch (id) {
1915 case PCI_PRODUCT_INTEL_XL710_SFP:
1916 case PCI_PRODUCT_INTEL_XL710_KX_B:
1917 case PCI_PRODUCT_INTEL_XL710_KX_C:
1918 case PCI_PRODUCT_INTEL_XL710_QSFP_A:
1919 case PCI_PRODUCT_INTEL_XL710_QSFP_B:
1920 case PCI_PRODUCT_INTEL_XL710_QSFP_C:
1921 case PCI_PRODUCT_INTEL_X710_10G_T:
1922 case PCI_PRODUCT_INTEL_XL710_20G_BP_1:
1923 case PCI_PRODUCT_INTEL_XL710_20G_BP_2:
1924 case PCI_PRODUCT_INTEL_X710_T4_10G:
1925 case PCI_PRODUCT_INTEL_XXV710_25G_BP:
1926 case PCI_PRODUCT_INTEL_XXV710_25G_SFP28:
1927 return I40E_MAC_XL710;
1928
1929 case PCI_PRODUCT_INTEL_X722_KX:
1930 case PCI_PRODUCT_INTEL_X722_QSFP:
1931 case PCI_PRODUCT_INTEL_X722_SFP:
1932 case PCI_PRODUCT_INTEL_X722_1G_BASET:
1933 case PCI_PRODUCT_INTEL_X722_10G_BASET:
1934 case PCI_PRODUCT_INTEL_X722_I_SFP:
1935 return I40E_MAC_X722;
1936 }
1937
1938 return I40E_MAC_GENERIC;
1939 }
1940
1941 static inline void *
1942 ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i)
1943 {
1944 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
1945 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1946
1947 if (i >= e->hmc_count)
1948 return NULL;
1949
1950 kva += e->hmc_base;
1951 kva += i * e->hmc_size;
1952
1953 return kva;
1954 }
1955
1956 static inline size_t
1957 ixl_hmc_len(struct ixl_softc *sc, enum ixl_hmc_types type)
1958 {
1959 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1960
1961 return e->hmc_size;
1962 }
1963
1964 static void
1965 ixl_enable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1966 {
1967 struct ixl_rx_ring *rxr = qp->qp_rxr;
1968
1969 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1970 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1971 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1972 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1973 ixl_flush(sc);
1974 }
1975
1976 static void
1977 ixl_disable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1978 {
1979 struct ixl_rx_ring *rxr = qp->qp_rxr;
1980
1981 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1982 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1983 ixl_flush(sc);
1984 }
1985
1986 static void
1987 ixl_enable_other_intr(struct ixl_softc *sc)
1988 {
1989
1990 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1991 I40E_PFINT_DYN_CTL0_INTENA_MASK |
1992 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1993 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1994 ixl_flush(sc);
1995 }
1996
1997 static void
1998 ixl_disable_other_intr(struct ixl_softc *sc)
1999 {
2000
2001 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
2002 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
2003 ixl_flush(sc);
2004 }
2005
2006 static int
2007 ixl_reinit(struct ixl_softc *sc)
2008 {
2009 struct ixl_rx_ring *rxr;
2010 struct ixl_tx_ring *txr;
2011 unsigned int i;
2012 uint32_t reg;
2013
2014 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2015
2016 if (ixl_get_vsi(sc) != 0)
2017 return EIO;
2018
2019 if (ixl_set_vsi(sc) != 0)
2020 return EIO;
2021
2022 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2023 txr = sc->sc_qps[i].qp_txr;
2024 rxr = sc->sc_qps[i].qp_rxr;
2025
2026 ixl_txr_config(sc, txr);
2027 ixl_rxr_config(sc, rxr);
2028 }
2029
2030 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2031 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_PREWRITE);
2032
2033 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2034 txr = sc->sc_qps[i].qp_txr;
2035 rxr = sc->sc_qps[i].qp_rxr;
2036
2037 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
2038 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
2039 ixl_flush(sc);
2040
2041 ixl_wr(sc, txr->txr_tail, txr->txr_prod);
2042 ixl_wr(sc, rxr->rxr_tail, rxr->rxr_prod);
2043
2044 /* ixl_rxfill() needs lock held */
2045 mutex_enter(&rxr->rxr_lock);
2046 ixl_rxfill(sc, rxr);
2047 mutex_exit(&rxr->rxr_lock);
2048
2049 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2050 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2051 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2052 if (ixl_rxr_enabled(sc, rxr) != 0)
2053 goto stop;
2054
2055 ixl_txr_qdis(sc, txr, 1);
2056
2057 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2058 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2059 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2060
2061 if (ixl_txr_enabled(sc, txr) != 0)
2062 goto stop;
2063 }
2064
2065 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2066 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
2067
2068 return 0;
2069
2070 stop:
2071 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2072 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
2073
2074 return ETIMEDOUT;
2075 }
2076
2077 static int
2078 ixl_init_locked(struct ixl_softc *sc)
2079 {
2080 struct ifnet *ifp = &sc->sc_ec.ec_if;
2081 unsigned int i;
2082 int error, eccap_change;
2083
2084 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2085
2086 if (ISSET(ifp->if_flags, IFF_RUNNING))
2087 ixl_stop_locked(sc);
2088
2089 if (sc->sc_dead) {
2090 return ENXIO;
2091 }
2092
2093 eccap_change = sc->sc_ec.ec_capenable ^ sc->sc_cur_ec_capenable;
2094 if (ISSET(eccap_change, ETHERCAP_VLAN_HWTAGGING))
2095 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
2096
2097 if (ISSET(eccap_change, ETHERCAP_VLAN_HWFILTER)) {
2098 if (ixl_update_macvlan(sc) == 0) {
2099 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
2100 } else {
2101 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
2102 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
2103 }
2104 }
2105
2106 if (sc->sc_intrtype != PCI_INTR_TYPE_MSIX)
2107 sc->sc_nqueue_pairs = 1;
2108 else
2109 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
2110
2111 error = ixl_reinit(sc);
2112 if (error) {
2113 ixl_stop_locked(sc);
2114 return error;
2115 }
2116
2117 SET(ifp->if_flags, IFF_RUNNING);
2118 CLR(ifp->if_flags, IFF_OACTIVE);
2119
2120 (void)ixl_get_link_status(sc);
2121
2122 ixl_config_rss(sc);
2123 ixl_config_queue_intr(sc);
2124
2125 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2126 ixl_enable_queue_intr(sc, &sc->sc_qps[i]);
2127 }
2128
2129 error = ixl_iff(sc);
2130 if (error) {
2131 ixl_stop_locked(sc);
2132 return error;
2133 }
2134
2135 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
2136
2137 return 0;
2138 }
2139
2140 static int
2141 ixl_init(struct ifnet *ifp)
2142 {
2143 struct ixl_softc *sc = ifp->if_softc;
2144 int error;
2145
2146 mutex_enter(&sc->sc_cfg_lock);
2147 error = ixl_init_locked(sc);
2148 mutex_exit(&sc->sc_cfg_lock);
2149
2150 return error;
2151 }
2152
2153 static int
2154 ixl_iff(struct ixl_softc *sc)
2155 {
2156 struct ifnet *ifp = &sc->sc_ec.ec_if;
2157 struct ixl_atq iatq;
2158 struct ixl_aq_desc *iaq;
2159 struct ixl_aq_vsi_promisc_param *param;
2160 uint16_t flag_add, flag_del;
2161 int error;
2162
2163 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2164 return 0;
2165
2166 memset(&iatq, 0, sizeof(iatq));
2167
2168 iaq = &iatq.iatq_desc;
2169 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
2170
2171 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
2172 param->flags = htole16(0);
2173
2174 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)
2175 || ISSET(ifp->if_flags, IFF_PROMISC)) {
2176 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2177 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2178 }
2179
2180 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
2181 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2182 IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2183 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
2184 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2185 }
2186 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2187 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2188 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2189 param->seid = sc->sc_seid;
2190
2191 error = ixl_atq_exec(sc, &iatq);
2192 if (error)
2193 return error;
2194
2195 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
2196 return EIO;
2197
2198 if (memcmp(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN) != 0) {
2199 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
2200 flag_add = IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH;
2201 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH;
2202 } else {
2203 flag_add = IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN;
2204 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN;
2205 }
2206
2207 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, flag_del);
2208
2209 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
2210 ixl_add_macvlan(sc, sc->sc_enaddr, 0, flag_add);
2211 }
2212 return 0;
2213 }
2214
2215 static void
2216 ixl_stop_rendezvous(struct ixl_softc *sc)
2217 {
2218 struct ixl_tx_ring *txr;
2219 struct ixl_rx_ring *rxr;
2220 unsigned int i;
2221
2222 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2223 txr = sc->sc_qps[i].qp_txr;
2224 rxr = sc->sc_qps[i].qp_rxr;
2225
2226 mutex_enter(&txr->txr_lock);
2227 mutex_exit(&txr->txr_lock);
2228
2229 mutex_enter(&rxr->rxr_lock);
2230 mutex_exit(&rxr->rxr_lock);
2231
2232 sc->sc_qps[i].qp_workqueue = false;
2233 workqueue_wait(sc->sc_workq_txrx,
2234 &sc->sc_qps[i].qp_work);
2235 }
2236 }
2237
2238 static void
2239 ixl_stop_locked(struct ixl_softc *sc)
2240 {
2241 struct ifnet *ifp = &sc->sc_ec.ec_if;
2242 struct ixl_rx_ring *rxr;
2243 struct ixl_tx_ring *txr;
2244 unsigned int i;
2245 uint32_t reg;
2246
2247 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2248
2249 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
2250 callout_stop(&sc->sc_stats_callout);
2251
2252 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2253 txr = sc->sc_qps[i].qp_txr;
2254 rxr = sc->sc_qps[i].qp_rxr;
2255
2256 ixl_disable_queue_intr(sc, &sc->sc_qps[i]);
2257
2258 mutex_enter(&txr->txr_lock);
2259 ixl_txr_qdis(sc, txr, 0);
2260 /* XXX wait at least 400 usec for all tx queues in one go */
2261 ixl_flush(sc);
2262 DELAY(500);
2263
2264 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2265 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2266 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2267 /* XXX wait 50ms from completaion of the TX queue disable*/
2268 ixl_flush(sc);
2269 DELAY(50);
2270
2271 if (ixl_txr_disabled(sc, txr) != 0) {
2272 mutex_exit(&txr->txr_lock);
2273 goto die;
2274 }
2275 mutex_exit(&txr->txr_lock);
2276
2277 mutex_enter(&rxr->rxr_lock);
2278 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2279 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2280 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2281 /* XXX wait 50ms from completion of the RX queue disable */
2282 ixl_flush(sc);
2283 DELAY(50);
2284
2285 if (ixl_rxr_disabled(sc, rxr) != 0) {
2286 mutex_exit(&rxr->rxr_lock);
2287 goto die;
2288 }
2289 mutex_exit(&rxr->rxr_lock);
2290 }
2291
2292 ixl_stop_rendezvous(sc);
2293
2294 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2295 txr = sc->sc_qps[i].qp_txr;
2296 rxr = sc->sc_qps[i].qp_rxr;
2297
2298 mutex_enter(&txr->txr_lock);
2299 ixl_txr_unconfig(sc, txr);
2300 mutex_exit(&txr->txr_lock);
2301
2302 mutex_enter(&rxr->rxr_lock);
2303 ixl_rxr_unconfig(sc, rxr);
2304 mutex_exit(&rxr->rxr_lock);
2305
2306 ixl_txr_clean(sc, txr);
2307 ixl_rxr_clean(sc, rxr);
2308 }
2309
2310 return;
2311 die:
2312 sc->sc_dead = true;
2313 log(LOG_CRIT, "%s: failed to shut down rings",
2314 device_xname(sc->sc_dev));
2315 return;
2316 }
2317
2318 static void
2319 ixl_stop(struct ifnet *ifp, int disable)
2320 {
2321 struct ixl_softc *sc = ifp->if_softc;
2322
2323 mutex_enter(&sc->sc_cfg_lock);
2324 ixl_stop_locked(sc);
2325 mutex_exit(&sc->sc_cfg_lock);
2326 }
2327
2328 static int
2329 ixl_queue_pairs_alloc(struct ixl_softc *sc)
2330 {
2331 struct ixl_queue_pair *qp;
2332 unsigned int i;
2333 size_t sz;
2334
2335 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2336 sc->sc_qps = kmem_zalloc(sz, KM_SLEEP);
2337
2338 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2339 qp = &sc->sc_qps[i];
2340
2341 qp->qp_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2342 ixl_handle_queue, qp);
2343 if (qp->qp_si == NULL)
2344 goto free;
2345
2346 qp->qp_txr = ixl_txr_alloc(sc, i);
2347 if (qp->qp_txr == NULL)
2348 goto free;
2349
2350 qp->qp_rxr = ixl_rxr_alloc(sc, i);
2351 if (qp->qp_rxr == NULL)
2352 goto free;
2353
2354 qp->qp_sc = sc;
2355 snprintf(qp->qp_name, sizeof(qp->qp_name),
2356 "%s-TXRX%d", device_xname(sc->sc_dev), i);
2357 }
2358
2359 return 0;
2360 free:
2361 if (sc->sc_qps != NULL) {
2362 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2363 qp = &sc->sc_qps[i];
2364
2365 if (qp->qp_txr != NULL)
2366 ixl_txr_free(sc, qp->qp_txr);
2367 if (qp->qp_rxr != NULL)
2368 ixl_rxr_free(sc, qp->qp_rxr);
2369 if (qp->qp_si != NULL)
2370 softint_disestablish(qp->qp_si);
2371 }
2372
2373 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2374 kmem_free(sc->sc_qps, sz);
2375 sc->sc_qps = NULL;
2376 }
2377
2378 return -1;
2379 }
2380
2381 static void
2382 ixl_queue_pairs_free(struct ixl_softc *sc)
2383 {
2384 struct ixl_queue_pair *qp;
2385 unsigned int i;
2386 size_t sz;
2387
2388 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2389 qp = &sc->sc_qps[i];
2390 ixl_txr_free(sc, qp->qp_txr);
2391 ixl_rxr_free(sc, qp->qp_rxr);
2392 softint_disestablish(qp->qp_si);
2393 }
2394
2395 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2396 kmem_free(sc->sc_qps, sz);
2397 sc->sc_qps = NULL;
2398 }
2399
2400 static struct ixl_tx_ring *
2401 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
2402 {
2403 struct ixl_tx_ring *txr = NULL;
2404 struct ixl_tx_map *maps = NULL, *txm;
2405 unsigned int i;
2406
2407 txr = kmem_zalloc(sizeof(*txr), KM_SLEEP);
2408 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs,
2409 KM_SLEEP);
2410
2411 if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2412 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2413 IXL_TX_QUEUE_ALIGN) != 0)
2414 goto free;
2415
2416 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2417 txm = &maps[i];
2418
2419 if (bus_dmamap_create(sc->sc_dmat, IXL_TX_PKT_MAXSIZE,
2420 IXL_TX_PKT_DESCS, IXL_TX_PKT_MAXSIZE, 0,
2421 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &txm->txm_map) != 0)
2422 goto uncreate;
2423
2424 txm->txm_eop = -1;
2425 txm->txm_m = NULL;
2426 }
2427
2428 txr->txr_cons = txr->txr_prod = 0;
2429 txr->txr_maps = maps;
2430
2431 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2432 if (txr->txr_intrq == NULL)
2433 goto uncreate;
2434
2435 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2436 ixl_deferred_transmit, txr);
2437 if (txr->txr_si == NULL)
2438 goto destroy_pcq;
2439
2440 txr->txr_tail = I40E_QTX_TAIL(qid);
2441 txr->txr_qid = qid;
2442 txr->txr_sc = sc;
2443 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2444
2445 return txr;
2446
2447 destroy_pcq:
2448 pcq_destroy(txr->txr_intrq);
2449 uncreate:
2450 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2451 txm = &maps[i];
2452
2453 if (txm->txm_map == NULL)
2454 continue;
2455
2456 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2457 }
2458
2459 ixl_dmamem_free(sc, &txr->txr_mem);
2460 free:
2461 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2462 kmem_free(txr, sizeof(*txr));
2463
2464 return NULL;
2465 }
2466
2467 static void
2468 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2469 {
2470 unsigned int qid;
2471 bus_size_t reg;
2472 uint32_t r;
2473
2474 qid = txr->txr_qid + sc->sc_base_queue;
2475 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2476 qid %= 128;
2477
2478 r = ixl_rd(sc, reg);
2479 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2480 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2481 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2482 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2483 ixl_wr(sc, reg, r);
2484 }
2485
2486 static void
2487 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2488 {
2489 struct ixl_hmc_txq txq;
2490 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch);
2491 void *hmc;
2492
2493 memset(&txq, 0, sizeof(txq));
2494 txq.head = htole16(txr->txr_cons);
2495 txq.new_context = 1;
2496 txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2497 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2498 txq.qlen = htole16(sc->sc_tx_ring_ndescs);
2499 txq.tphrdesc_ena = 0;
2500 txq.tphrpacket_ena = 0;
2501 txq.tphwdesc_ena = 0;
2502 txq.rdylist = data->qs_handle[0];
2503
2504 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2505 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2506 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq,
2507 __arraycount(ixl_hmc_pack_txq));
2508 }
2509
2510 static void
2511 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2512 {
2513 void *hmc;
2514
2515 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2516 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2517 txr->txr_cons = txr->txr_prod = 0;
2518 }
2519
2520 static void
2521 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2522 {
2523 struct ixl_tx_map *maps, *txm;
2524 bus_dmamap_t map;
2525 unsigned int i;
2526
2527 maps = txr->txr_maps;
2528 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2529 txm = &maps[i];
2530
2531 if (txm->txm_m == NULL)
2532 continue;
2533
2534 map = txm->txm_map;
2535 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2536 BUS_DMASYNC_POSTWRITE);
2537 bus_dmamap_unload(sc->sc_dmat, map);
2538
2539 m_freem(txm->txm_m);
2540 txm->txm_m = NULL;
2541 }
2542 }
2543
2544 static int
2545 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2546 {
2547 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2548 uint32_t reg;
2549 int i;
2550
2551 for (i = 0; i < 10; i++) {
2552 reg = ixl_rd(sc, ena);
2553 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2554 return 0;
2555
2556 delaymsec(10);
2557 }
2558
2559 return ETIMEDOUT;
2560 }
2561
2562 static int
2563 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2564 {
2565 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2566 uint32_t reg;
2567 int i;
2568
2569 KASSERT(mutex_owned(&txr->txr_lock));
2570
2571 for (i = 0; i < 20; i++) {
2572 reg = ixl_rd(sc, ena);
2573 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2574 return 0;
2575
2576 delaymsec(10);
2577 }
2578
2579 return ETIMEDOUT;
2580 }
2581
2582 static void
2583 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2584 {
2585 struct ixl_tx_map *maps, *txm;
2586 struct mbuf *m;
2587 unsigned int i;
2588
2589 softint_disestablish(txr->txr_si);
2590 while ((m = pcq_get(txr->txr_intrq)) != NULL)
2591 m_freem(m);
2592 pcq_destroy(txr->txr_intrq);
2593
2594 maps = txr->txr_maps;
2595 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2596 txm = &maps[i];
2597
2598 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2599 }
2600
2601 ixl_dmamem_free(sc, &txr->txr_mem);
2602 mutex_destroy(&txr->txr_lock);
2603 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2604 kmem_free(txr, sizeof(*txr));
2605 }
2606
2607 static inline int
2608 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0,
2609 struct ixl_tx_ring *txr)
2610 {
2611 struct mbuf *m;
2612 int error;
2613
2614 KASSERT(mutex_owned(&txr->txr_lock));
2615
2616 m = *m0;
2617
2618 error = bus_dmamap_load_mbuf(dmat, map, m,
2619 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2620 if (error != EFBIG)
2621 return error;
2622
2623 m = m_defrag(m, M_DONTWAIT);
2624 if (m != NULL) {
2625 *m0 = m;
2626 txr->txr_defragged.ev_count++;
2627
2628 error = bus_dmamap_load_mbuf(dmat, map, m,
2629 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2630 } else {
2631 txr->txr_defrag_failed.ev_count++;
2632 error = ENOBUFS;
2633 }
2634
2635 return error;
2636 }
2637
2638 static inline int
2639 ixl_tx_setup_offloads(struct mbuf *m, uint64_t *cmd_txd)
2640 {
2641 struct ether_header *eh;
2642 size_t len;
2643 uint64_t cmd;
2644
2645 cmd = 0;
2646
2647 eh = mtod(m, struct ether_header *);
2648 switch (htons(eh->ether_type)) {
2649 case ETHERTYPE_IP:
2650 case ETHERTYPE_IPV6:
2651 len = ETHER_HDR_LEN;
2652 break;
2653 case ETHERTYPE_VLAN:
2654 len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2655 break;
2656 default:
2657 len = 0;
2658 }
2659 cmd |= ((len >> 1) << IXL_TX_DESC_MACLEN_SHIFT);
2660
2661 if (m->m_pkthdr.csum_flags &
2662 (M_CSUM_TSOv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
2663 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4;
2664 }
2665 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2666 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4_CSUM;
2667 }
2668
2669 if (m->m_pkthdr.csum_flags &
2670 (M_CSUM_TSOv6 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
2671 cmd |= IXL_TX_DESC_CMD_IIPT_IPV6;
2672 }
2673
2674 switch (cmd & IXL_TX_DESC_CMD_IIPT_MASK) {
2675 case IXL_TX_DESC_CMD_IIPT_IPV4:
2676 case IXL_TX_DESC_CMD_IIPT_IPV4_CSUM:
2677 len = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2678 break;
2679 case IXL_TX_DESC_CMD_IIPT_IPV6:
2680 len = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
2681 break;
2682 default:
2683 len = 0;
2684 }
2685 cmd |= ((len >> 2) << IXL_TX_DESC_IPLEN_SHIFT);
2686
2687 if (m->m_pkthdr.csum_flags &
2688 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_TCPv4 | M_CSUM_TCPv6)) {
2689 len = sizeof(struct tcphdr);
2690 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_TCP;
2691 } else if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) {
2692 len = sizeof(struct udphdr);
2693 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_UDP;
2694 } else {
2695 len = 0;
2696 }
2697 cmd |= ((len >> 2) << IXL_TX_DESC_L4LEN_SHIFT);
2698
2699 *cmd_txd |= cmd;
2700 return 0;
2701 }
2702
2703 static void
2704 ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr,
2705 bool is_transmit)
2706 {
2707 struct ixl_softc *sc = ifp->if_softc;
2708 struct ixl_tx_desc *ring, *txd;
2709 struct ixl_tx_map *txm;
2710 bus_dmamap_t map;
2711 struct mbuf *m;
2712 uint64_t cmd, cmd_txd;
2713 unsigned int prod, free, last, i;
2714 unsigned int mask;
2715 int post = 0;
2716
2717 KASSERT(mutex_owned(&txr->txr_lock));
2718
2719 if (ifp->if_link_state != LINK_STATE_UP
2720 || !ISSET(ifp->if_flags, IFF_RUNNING)
2721 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) {
2722 if (!is_transmit)
2723 IFQ_PURGE(&ifp->if_snd);
2724 return;
2725 }
2726
2727 prod = txr->txr_prod;
2728 free = txr->txr_cons;
2729 if (free <= prod)
2730 free += sc->sc_tx_ring_ndescs;
2731 free -= prod;
2732
2733 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2734 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2735
2736 ring = IXL_DMA_KVA(&txr->txr_mem);
2737 mask = sc->sc_tx_ring_ndescs - 1;
2738 last = prod;
2739 cmd = 0;
2740 txd = NULL;
2741
2742 for (;;) {
2743 if (free <= IXL_TX_PKT_DESCS) {
2744 if (!is_transmit)
2745 SET(ifp->if_flags, IFF_OACTIVE);
2746 break;
2747 }
2748
2749 if (is_transmit)
2750 m = pcq_get(txr->txr_intrq);
2751 else
2752 IFQ_DEQUEUE(&ifp->if_snd, m);
2753
2754 if (m == NULL)
2755 break;
2756
2757 txm = &txr->txr_maps[prod];
2758 map = txm->txm_map;
2759
2760 if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
2761 if_statinc(ifp, if_oerrors);
2762 m_freem(m);
2763 continue;
2764 }
2765
2766 cmd_txd = 0;
2767 if (m->m_pkthdr.csum_flags & IXL_CSUM_ALL_OFFLOAD) {
2768 ixl_tx_setup_offloads(m, &cmd_txd);
2769 }
2770
2771 if (vlan_has_tag(m)) {
2772 cmd_txd |= (uint64_t)vlan_get_tag(m) <<
2773 IXL_TX_DESC_L2TAG1_SHIFT;
2774 cmd_txd |= IXL_TX_DESC_CMD_IL2TAG1;
2775 }
2776
2777 bus_dmamap_sync(sc->sc_dmat, map, 0,
2778 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2779
2780 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) {
2781 txd = &ring[prod];
2782
2783 cmd = (uint64_t)map->dm_segs[i].ds_len <<
2784 IXL_TX_DESC_BSIZE_SHIFT;
2785 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2786 cmd |= cmd_txd;
2787
2788 txd->addr = htole64(map->dm_segs[i].ds_addr);
2789 txd->cmd = htole64(cmd);
2790
2791 last = prod;
2792
2793 prod++;
2794 prod &= mask;
2795 }
2796 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2797 txd->cmd = htole64(cmd);
2798
2799 txm->txm_m = m;
2800 txm->txm_eop = last;
2801
2802 bpf_mtap(ifp, m, BPF_D_OUT);
2803
2804 free -= i;
2805 post = 1;
2806 }
2807
2808 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2809 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2810
2811 if (post) {
2812 txr->txr_prod = prod;
2813 ixl_wr(sc, txr->txr_tail, prod);
2814 }
2815 }
2816
2817 static int
2818 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit)
2819 {
2820 struct ifnet *ifp = &sc->sc_ec.ec_if;
2821 struct ixl_tx_desc *ring, *txd;
2822 struct ixl_tx_map *txm;
2823 struct mbuf *m;
2824 bus_dmamap_t map;
2825 unsigned int cons, prod, last;
2826 unsigned int mask;
2827 uint64_t dtype;
2828 int done = 0, more = 0;
2829
2830 KASSERT(mutex_owned(&txr->txr_lock));
2831
2832 prod = txr->txr_prod;
2833 cons = txr->txr_cons;
2834
2835 if (cons == prod)
2836 return 0;
2837
2838 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2839 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2840
2841 ring = IXL_DMA_KVA(&txr->txr_mem);
2842 mask = sc->sc_tx_ring_ndescs - 1;
2843
2844 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2845
2846 do {
2847 if (txlimit-- <= 0) {
2848 more = 1;
2849 break;
2850 }
2851
2852 txm = &txr->txr_maps[cons];
2853 last = txm->txm_eop;
2854 txd = &ring[last];
2855
2856 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2857 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2858 break;
2859
2860 map = txm->txm_map;
2861
2862 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2863 BUS_DMASYNC_POSTWRITE);
2864 bus_dmamap_unload(sc->sc_dmat, map);
2865
2866 m = txm->txm_m;
2867 if (m != NULL) {
2868 if_statinc_ref(nsr, if_opackets);
2869 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
2870 if (ISSET(m->m_flags, M_MCAST))
2871 if_statinc_ref(nsr, if_omcasts);
2872 m_freem(m);
2873 }
2874
2875 txm->txm_m = NULL;
2876 txm->txm_eop = -1;
2877
2878 cons = last + 1;
2879 cons &= mask;
2880 done = 1;
2881 } while (cons != prod);
2882
2883 IF_STAT_PUTREF(ifp);
2884
2885 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2886 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2887
2888 txr->txr_cons = cons;
2889
2890 if (done) {
2891 softint_schedule(txr->txr_si);
2892 if (txr->txr_qid == 0) {
2893 CLR(ifp->if_flags, IFF_OACTIVE);
2894 if_schedule_deferred_start(ifp);
2895 }
2896 }
2897
2898 return more;
2899 }
2900
2901 static void
2902 ixl_start(struct ifnet *ifp)
2903 {
2904 struct ixl_softc *sc;
2905 struct ixl_tx_ring *txr;
2906
2907 sc = ifp->if_softc;
2908 txr = sc->sc_qps[0].qp_txr;
2909
2910 mutex_enter(&txr->txr_lock);
2911 ixl_tx_common_locked(ifp, txr, false);
2912 mutex_exit(&txr->txr_lock);
2913 }
2914
2915 static inline unsigned int
2916 ixl_select_txqueue(struct ixl_softc *sc, struct mbuf *m)
2917 {
2918 u_int cpuid;
2919
2920 cpuid = cpu_index(curcpu());
2921
2922 return (unsigned int)(cpuid % sc->sc_nqueue_pairs);
2923 }
2924
2925 static int
2926 ixl_transmit(struct ifnet *ifp, struct mbuf *m)
2927 {
2928 struct ixl_softc *sc;
2929 struct ixl_tx_ring *txr;
2930 unsigned int qid;
2931
2932 sc = ifp->if_softc;
2933 qid = ixl_select_txqueue(sc, m);
2934
2935 txr = sc->sc_qps[qid].qp_txr;
2936
2937 if (__predict_false(!pcq_put(txr->txr_intrq, m))) {
2938 mutex_enter(&txr->txr_lock);
2939 txr->txr_pcqdrop.ev_count++;
2940 mutex_exit(&txr->txr_lock);
2941
2942 m_freem(m);
2943 return ENOBUFS;
2944 }
2945
2946 if (mutex_tryenter(&txr->txr_lock)) {
2947 ixl_tx_common_locked(ifp, txr, true);
2948 mutex_exit(&txr->txr_lock);
2949 } else {
2950 kpreempt_disable();
2951 softint_schedule(txr->txr_si);
2952 kpreempt_enable();
2953 }
2954
2955 return 0;
2956 }
2957
2958 static void
2959 ixl_deferred_transmit(void *xtxr)
2960 {
2961 struct ixl_tx_ring *txr = xtxr;
2962 struct ixl_softc *sc = txr->txr_sc;
2963 struct ifnet *ifp = &sc->sc_ec.ec_if;
2964
2965 mutex_enter(&txr->txr_lock);
2966 txr->txr_transmitdef.ev_count++;
2967 if (pcq_peek(txr->txr_intrq) != NULL)
2968 ixl_tx_common_locked(ifp, txr, true);
2969 mutex_exit(&txr->txr_lock);
2970 }
2971
2972 static struct ixl_rx_ring *
2973 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
2974 {
2975 struct ixl_rx_ring *rxr = NULL;
2976 struct ixl_rx_map *maps = NULL, *rxm;
2977 unsigned int i;
2978
2979 rxr = kmem_zalloc(sizeof(*rxr), KM_SLEEP);
2980 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs,
2981 KM_SLEEP);
2982
2983 if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
2984 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
2985 IXL_RX_QUEUE_ALIGN) != 0)
2986 goto free;
2987
2988 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2989 rxm = &maps[i];
2990
2991 if (bus_dmamap_create(sc->sc_dmat,
2992 IXL_MCLBYTES, 1, IXL_MCLBYTES, 0,
2993 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &rxm->rxm_map) != 0)
2994 goto uncreate;
2995
2996 rxm->rxm_m = NULL;
2997 }
2998
2999 rxr->rxr_cons = rxr->rxr_prod = 0;
3000 rxr->rxr_m_head = NULL;
3001 rxr->rxr_m_tail = &rxr->rxr_m_head;
3002 rxr->rxr_maps = maps;
3003
3004 rxr->rxr_tail = I40E_QRX_TAIL(qid);
3005 rxr->rxr_qid = qid;
3006 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
3007
3008 return rxr;
3009
3010 uncreate:
3011 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3012 rxm = &maps[i];
3013
3014 if (rxm->rxm_map == NULL)
3015 continue;
3016
3017 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3018 }
3019
3020 ixl_dmamem_free(sc, &rxr->rxr_mem);
3021 free:
3022 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
3023 kmem_free(rxr, sizeof(*rxr));
3024
3025 return NULL;
3026 }
3027
3028 static void
3029 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3030 {
3031 struct ixl_rx_map *maps, *rxm;
3032 bus_dmamap_t map;
3033 unsigned int i;
3034
3035 maps = rxr->rxr_maps;
3036 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3037 rxm = &maps[i];
3038
3039 if (rxm->rxm_m == NULL)
3040 continue;
3041
3042 map = rxm->rxm_map;
3043 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3044 BUS_DMASYNC_POSTWRITE);
3045 bus_dmamap_unload(sc->sc_dmat, map);
3046
3047 m_freem(rxm->rxm_m);
3048 rxm->rxm_m = NULL;
3049 }
3050
3051 m_freem(rxr->rxr_m_head);
3052 rxr->rxr_m_head = NULL;
3053 rxr->rxr_m_tail = &rxr->rxr_m_head;
3054
3055 rxr->rxr_prod = rxr->rxr_cons = 0;
3056 }
3057
3058 static int
3059 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3060 {
3061 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3062 uint32_t reg;
3063 int i;
3064
3065 for (i = 0; i < 10; i++) {
3066 reg = ixl_rd(sc, ena);
3067 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
3068 return 0;
3069
3070 delaymsec(10);
3071 }
3072
3073 return ETIMEDOUT;
3074 }
3075
3076 static int
3077 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3078 {
3079 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3080 uint32_t reg;
3081 int i;
3082
3083 KASSERT(mutex_owned(&rxr->rxr_lock));
3084
3085 for (i = 0; i < 20; i++) {
3086 reg = ixl_rd(sc, ena);
3087 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3088 return 0;
3089
3090 delaymsec(10);
3091 }
3092
3093 return ETIMEDOUT;
3094 }
3095
3096 static void
3097 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3098 {
3099 struct ixl_hmc_rxq rxq;
3100 struct ifnet *ifp = &sc->sc_ec.ec_if;
3101 uint16_t rxmax;
3102 void *hmc;
3103
3104 memset(&rxq, 0, sizeof(rxq));
3105 rxmax = ifp->if_mtu + IXL_MTU_ETHERLEN;
3106
3107 rxq.head = htole16(rxr->rxr_cons);
3108 rxq.base = htole64(IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
3109 rxq.qlen = htole16(sc->sc_rx_ring_ndescs);
3110 rxq.dbuff = htole16(IXL_MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
3111 rxq.hbuff = 0;
3112 rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
3113 rxq.dsize = IXL_HMC_RXQ_DSIZE_32;
3114 rxq.crcstrip = 1;
3115 rxq.l2sel = 1;
3116 rxq.showiv = 1;
3117 rxq.rxmax = htole16(rxmax);
3118 rxq.tphrdesc_ena = 0;
3119 rxq.tphwdesc_ena = 0;
3120 rxq.tphdata_ena = 0;
3121 rxq.tphhead_ena = 0;
3122 rxq.lrxqthresh = 0;
3123 rxq.prefena = 1;
3124
3125 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3126 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3127 ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq,
3128 __arraycount(ixl_hmc_pack_rxq));
3129 }
3130
3131 static void
3132 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3133 {
3134 void *hmc;
3135
3136 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3137 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3138 rxr->rxr_cons = rxr->rxr_prod = 0;
3139 }
3140
3141 static void
3142 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3143 {
3144 struct ixl_rx_map *maps, *rxm;
3145 unsigned int i;
3146
3147 maps = rxr->rxr_maps;
3148 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3149 rxm = &maps[i];
3150
3151 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3152 }
3153
3154 ixl_dmamem_free(sc, &rxr->rxr_mem);
3155 mutex_destroy(&rxr->rxr_lock);
3156 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
3157 kmem_free(rxr, sizeof(*rxr));
3158 }
3159
3160 static inline void
3161 ixl_rx_csum(struct mbuf *m, uint64_t qword)
3162 {
3163 int flags_mask;
3164
3165 if (!ISSET(qword, IXL_RX_DESC_L3L4P)) {
3166 /* No L3 or L4 checksum was calculated */
3167 return;
3168 }
3169
3170 switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) {
3171 case IXL_RX_DESC_PTYPE_IPV4FRAG:
3172 case IXL_RX_DESC_PTYPE_IPV4:
3173 case IXL_RX_DESC_PTYPE_SCTPV4:
3174 case IXL_RX_DESC_PTYPE_ICMPV4:
3175 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3176 break;
3177 case IXL_RX_DESC_PTYPE_TCPV4:
3178 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3179 flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD;
3180 break;
3181 case IXL_RX_DESC_PTYPE_UDPV4:
3182 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3183 flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD;
3184 break;
3185 case IXL_RX_DESC_PTYPE_TCPV6:
3186 flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD;
3187 break;
3188 case IXL_RX_DESC_PTYPE_UDPV6:
3189 flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD;
3190 break;
3191 default:
3192 flags_mask = 0;
3193 }
3194
3195 m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 |
3196 M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6));
3197
3198 if (ISSET(qword, IXL_RX_DESC_IPE)) {
3199 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD);
3200 }
3201
3202 if (ISSET(qword, IXL_RX_DESC_L4E)) {
3203 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD);
3204 }
3205 }
3206
3207 static int
3208 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr, u_int rxlimit)
3209 {
3210 struct ifnet *ifp = &sc->sc_ec.ec_if;
3211 struct ixl_rx_wb_desc_32 *ring, *rxd;
3212 struct ixl_rx_map *rxm;
3213 bus_dmamap_t map;
3214 unsigned int cons, prod;
3215 struct mbuf *m;
3216 uint64_t word, word0;
3217 unsigned int len;
3218 unsigned int mask;
3219 int done = 0, more = 0;
3220
3221 KASSERT(mutex_owned(&rxr->rxr_lock));
3222
3223 if (!ISSET(ifp->if_flags, IFF_RUNNING))
3224 return 0;
3225
3226 prod = rxr->rxr_prod;
3227 cons = rxr->rxr_cons;
3228
3229 if (cons == prod)
3230 return 0;
3231
3232 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3233 0, IXL_DMA_LEN(&rxr->rxr_mem),
3234 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3235
3236 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3237 mask = sc->sc_rx_ring_ndescs - 1;
3238
3239 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
3240
3241 do {
3242 if (rxlimit-- <= 0) {
3243 more = 1;
3244 break;
3245 }
3246
3247 rxd = &ring[cons];
3248
3249 word = le64toh(rxd->qword1);
3250
3251 if (!ISSET(word, IXL_RX_DESC_DD))
3252 break;
3253
3254 rxm = &rxr->rxr_maps[cons];
3255
3256 map = rxm->rxm_map;
3257 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3258 BUS_DMASYNC_POSTREAD);
3259 bus_dmamap_unload(sc->sc_dmat, map);
3260
3261 m = rxm->rxm_m;
3262 rxm->rxm_m = NULL;
3263
3264 KASSERT(m != NULL);
3265
3266 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
3267 m->m_len = len;
3268 m->m_pkthdr.len = 0;
3269
3270 m->m_next = NULL;
3271 *rxr->rxr_m_tail = m;
3272 rxr->rxr_m_tail = &m->m_next;
3273
3274 m = rxr->rxr_m_head;
3275 m->m_pkthdr.len += len;
3276
3277 if (ISSET(word, IXL_RX_DESC_EOP)) {
3278 word0 = le64toh(rxd->qword0);
3279
3280 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
3281 vlan_set_tag(m,
3282 __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK));
3283 }
3284
3285 if ((ifp->if_capenable & IXL_IFCAP_RXCSUM) != 0)
3286 ixl_rx_csum(m, word);
3287
3288 if (!ISSET(word,
3289 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
3290 m_set_rcvif(m, ifp);
3291 if_statinc_ref(nsr, if_ipackets);
3292 if_statadd_ref(nsr, if_ibytes,
3293 m->m_pkthdr.len);
3294 if_percpuq_enqueue(ifp->if_percpuq, m);
3295 } else {
3296 if_statinc_ref(nsr, if_ierrors);
3297 m_freem(m);
3298 }
3299
3300 rxr->rxr_m_head = NULL;
3301 rxr->rxr_m_tail = &rxr->rxr_m_head;
3302 }
3303
3304 cons++;
3305 cons &= mask;
3306
3307 done = 1;
3308 } while (cons != prod);
3309
3310 if (done) {
3311 rxr->rxr_cons = cons;
3312 if (ixl_rxfill(sc, rxr) == -1)
3313 if_statinc_ref(nsr, if_iqdrops);
3314 }
3315
3316 IF_STAT_PUTREF(ifp);
3317
3318 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3319 0, IXL_DMA_LEN(&rxr->rxr_mem),
3320 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3321
3322 return more;
3323 }
3324
3325 static int
3326 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3327 {
3328 struct ixl_rx_rd_desc_32 *ring, *rxd;
3329 struct ixl_rx_map *rxm;
3330 bus_dmamap_t map;
3331 struct mbuf *m;
3332 unsigned int prod;
3333 unsigned int slots;
3334 unsigned int mask;
3335 int post = 0, error = 0;
3336
3337 KASSERT(mutex_owned(&rxr->rxr_lock));
3338
3339 prod = rxr->rxr_prod;
3340 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons,
3341 sc->sc_rx_ring_ndescs);
3342
3343 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3344 mask = sc->sc_rx_ring_ndescs - 1;
3345
3346 if (__predict_false(slots <= 0))
3347 return -1;
3348
3349 do {
3350 rxm = &rxr->rxr_maps[prod];
3351
3352 MGETHDR(m, M_DONTWAIT, MT_DATA);
3353 if (m == NULL) {
3354 rxr->rxr_mgethdr_failed.ev_count++;
3355 error = -1;
3356 break;
3357 }
3358
3359 MCLGET(m, M_DONTWAIT);
3360 if (!ISSET(m->m_flags, M_EXT)) {
3361 rxr->rxr_mgetcl_failed.ev_count++;
3362 error = -1;
3363 m_freem(m);
3364 break;
3365 }
3366
3367 m->m_len = m->m_pkthdr.len = MCLBYTES;
3368 m_adj(m, ETHER_ALIGN);
3369
3370 map = rxm->rxm_map;
3371
3372 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
3373 BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
3374 rxr->rxr_mbuf_load_failed.ev_count++;
3375 error = -1;
3376 m_freem(m);
3377 break;
3378 }
3379
3380 rxm->rxm_m = m;
3381
3382 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3383 BUS_DMASYNC_PREREAD);
3384
3385 rxd = &ring[prod];
3386
3387 rxd->paddr = htole64(map->dm_segs[0].ds_addr);
3388 rxd->haddr = htole64(0);
3389
3390 prod++;
3391 prod &= mask;
3392
3393 post = 1;
3394
3395 } while (--slots);
3396
3397 if (post) {
3398 rxr->rxr_prod = prod;
3399 ixl_wr(sc, rxr->rxr_tail, prod);
3400 }
3401
3402 return error;
3403 }
3404
3405 static inline int
3406 ixl_handle_queue_common(struct ixl_softc *sc, struct ixl_queue_pair *qp,
3407 u_int txlimit, struct evcnt *txevcnt,
3408 u_int rxlimit, struct evcnt *rxevcnt)
3409 {
3410 struct ixl_tx_ring *txr = qp->qp_txr;
3411 struct ixl_rx_ring *rxr = qp->qp_rxr;
3412 int txmore, rxmore;
3413 int rv;
3414
3415 mutex_enter(&txr->txr_lock);
3416 txevcnt->ev_count++;
3417 txmore = ixl_txeof(sc, txr, txlimit);
3418 mutex_exit(&txr->txr_lock);
3419
3420 mutex_enter(&rxr->rxr_lock);
3421 rxevcnt->ev_count++;
3422 rxmore = ixl_rxeof(sc, rxr, rxlimit);
3423 mutex_exit(&rxr->rxr_lock);
3424
3425 rv = txmore | (rxmore << 1);
3426
3427 return rv;
3428 }
3429
3430 static void
3431 ixl_sched_handle_queue(struct ixl_softc *sc, struct ixl_queue_pair *qp)
3432 {
3433
3434 if (qp->qp_workqueue)
3435 workqueue_enqueue(sc->sc_workq_txrx, &qp->qp_work, NULL);
3436 else
3437 softint_schedule(qp->qp_si);
3438 }
3439
3440 static int
3441 ixl_intr(void *xsc)
3442 {
3443 struct ixl_softc *sc = xsc;
3444 struct ixl_tx_ring *txr;
3445 struct ixl_rx_ring *rxr;
3446 uint32_t icr, rxintr, txintr;
3447 int rv = 0;
3448 unsigned int i;
3449
3450 KASSERT(sc != NULL);
3451
3452 ixl_enable_other_intr(sc);
3453 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3454
3455 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3456 atomic_inc_64(&sc->sc_event_atq.ev_count);
3457 ixl_atq_done(sc);
3458 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3459 rv = 1;
3460 }
3461
3462 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3463 atomic_inc_64(&sc->sc_event_link.ev_count);
3464 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3465 rv = 1;
3466 }
3467
3468 rxintr = icr & I40E_INTR_NOTX_RX_MASK;
3469 txintr = icr & I40E_INTR_NOTX_TX_MASK;
3470
3471 if (txintr || rxintr) {
3472 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3473 txr = sc->sc_qps[i].qp_txr;
3474 rxr = sc->sc_qps[i].qp_rxr;
3475
3476 ixl_handle_queue_common(sc, &sc->sc_qps[i],
3477 IXL_TXRX_PROCESS_UNLIMIT, &txr->txr_intr,
3478 IXL_TXRX_PROCESS_UNLIMIT, &rxr->rxr_intr);
3479 }
3480 rv = 1;
3481 }
3482
3483 return rv;
3484 }
3485
3486 static int
3487 ixl_queue_intr(void *xqp)
3488 {
3489 struct ixl_queue_pair *qp = xqp;
3490 struct ixl_tx_ring *txr = qp->qp_txr;
3491 struct ixl_rx_ring *rxr = qp->qp_rxr;
3492 struct ixl_softc *sc = qp->qp_sc;
3493 u_int txlimit, rxlimit;
3494 int more;
3495
3496 txlimit = sc->sc_tx_intr_process_limit;
3497 rxlimit = sc->sc_rx_intr_process_limit;
3498 qp->qp_workqueue = sc->sc_txrx_workqueue;
3499
3500 more = ixl_handle_queue_common(sc, qp,
3501 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr);
3502
3503 if (more != 0) {
3504 ixl_sched_handle_queue(sc, qp);
3505 } else {
3506 /* for ALTQ */
3507 if (txr->txr_qid == 0)
3508 if_schedule_deferred_start(&sc->sc_ec.ec_if);
3509 softint_schedule(txr->txr_si);
3510
3511 ixl_enable_queue_intr(sc, qp);
3512 }
3513
3514 return 1;
3515 }
3516
3517 static void
3518 ixl_handle_queue_wk(struct work *wk, void *xsc)
3519 {
3520 struct ixl_queue_pair *qp;
3521
3522 qp = container_of(wk, struct ixl_queue_pair, qp_work);
3523 ixl_handle_queue(qp);
3524 }
3525
3526 static void
3527 ixl_handle_queue(void *xqp)
3528 {
3529 struct ixl_queue_pair *qp = xqp;
3530 struct ixl_softc *sc = qp->qp_sc;
3531 struct ixl_tx_ring *txr = qp->qp_txr;
3532 struct ixl_rx_ring *rxr = qp->qp_rxr;
3533 u_int txlimit, rxlimit;
3534 int more;
3535
3536 txlimit = sc->sc_tx_process_limit;
3537 rxlimit = sc->sc_rx_process_limit;
3538
3539 more = ixl_handle_queue_common(sc, qp,
3540 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer);
3541
3542 if (more != 0)
3543 ixl_sched_handle_queue(sc, qp);
3544 else
3545 ixl_enable_queue_intr(sc, qp);
3546 }
3547
3548 static inline void
3549 ixl_print_hmc_error(struct ixl_softc *sc, uint32_t reg)
3550 {
3551 uint32_t hmc_idx, hmc_isvf;
3552 uint32_t hmc_errtype, hmc_objtype, hmc_data;
3553
3554 hmc_idx = reg & I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK;
3555 hmc_idx = hmc_idx >> I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT;
3556 hmc_isvf = reg & I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK;
3557 hmc_isvf = hmc_isvf >> I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT;
3558 hmc_errtype = reg & I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK;
3559 hmc_errtype = hmc_errtype >> I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT;
3560 hmc_objtype = reg & I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK;
3561 hmc_objtype = hmc_objtype >> I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT;
3562 hmc_data = ixl_rd(sc, I40E_PFHMC_ERRORDATA);
3563
3564 device_printf(sc->sc_dev,
3565 "HMC Error (idx=0x%x, isvf=0x%x, err=0x%x, obj=0x%x, data=0x%x)\n",
3566 hmc_idx, hmc_isvf, hmc_errtype, hmc_objtype, hmc_data);
3567 }
3568
3569 static int
3570 ixl_other_intr(void *xsc)
3571 {
3572 struct ixl_softc *sc = xsc;
3573 uint32_t icr, mask, reg;
3574 int rv;
3575
3576 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3577 mask = ixl_rd(sc, I40E_PFINT_ICR0_ENA);
3578
3579 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3580 atomic_inc_64(&sc->sc_event_atq.ev_count);
3581 ixl_atq_done(sc);
3582 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3583 rv = 1;
3584 }
3585
3586 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3587 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3588 device_printf(sc->sc_dev, "link stat changed\n");
3589
3590 atomic_inc_64(&sc->sc_event_link.ev_count);
3591 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3592 rv = 1;
3593 }
3594
3595 if (ISSET(icr, I40E_PFINT_ICR0_GRST_MASK)) {
3596 CLR(mask, I40E_PFINT_ICR0_ENA_GRST_MASK);
3597 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
3598 reg = reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK;
3599 reg = reg >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3600
3601 device_printf(sc->sc_dev, "GRST: %s\n",
3602 reg == I40E_RESET_CORER ? "CORER" :
3603 reg == I40E_RESET_GLOBR ? "GLOBR" :
3604 reg == I40E_RESET_EMPR ? "EMPR" :
3605 "POR");
3606 }
3607
3608 if (ISSET(icr, I40E_PFINT_ICR0_ECC_ERR_MASK))
3609 atomic_inc_64(&sc->sc_event_ecc_err.ev_count);
3610 if (ISSET(icr, I40E_PFINT_ICR0_PCI_EXCEPTION_MASK))
3611 atomic_inc_64(&sc->sc_event_pci_exception.ev_count);
3612 if (ISSET(icr, I40E_PFINT_ICR0_PE_CRITERR_MASK))
3613 atomic_inc_64(&sc->sc_event_crit_err.ev_count);
3614
3615 if (ISSET(icr, IXL_ICR0_CRIT_ERR_MASK)) {
3616 CLR(mask, IXL_ICR0_CRIT_ERR_MASK);
3617 device_printf(sc->sc_dev, "critical error\n");
3618 }
3619
3620 if (ISSET(icr, I40E_PFINT_ICR0_HMC_ERR_MASK)) {
3621 reg = ixl_rd(sc, I40E_PFHMC_ERRORINFO);
3622 if (ISSET(reg, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK))
3623 ixl_print_hmc_error(sc, reg);
3624 ixl_wr(sc, I40E_PFHMC_ERRORINFO, 0);
3625 }
3626
3627 ixl_wr(sc, I40E_PFINT_ICR0_ENA, mask);
3628 ixl_flush(sc);
3629 ixl_enable_other_intr(sc);
3630 return rv;
3631 }
3632
3633 static void
3634 ixl_get_link_status_done(struct ixl_softc *sc,
3635 const struct ixl_aq_desc *iaq)
3636 {
3637
3638 ixl_link_state_update(sc, iaq);
3639 }
3640
3641 static void
3642 ixl_get_link_status(void *xsc)
3643 {
3644 struct ixl_softc *sc = xsc;
3645 struct ixl_aq_desc *iaq;
3646 struct ixl_aq_link_param *param;
3647
3648 memset(&sc->sc_link_state_atq, 0, sizeof(sc->sc_link_state_atq));
3649 iaq = &sc->sc_link_state_atq.iatq_desc;
3650 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3651 param = (struct ixl_aq_link_param *)iaq->iaq_param;
3652 param->notify = IXL_AQ_LINK_NOTIFY;
3653
3654 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done);
3655 (void)ixl_atq_post(sc, &sc->sc_link_state_atq);
3656 }
3657
3658 static void
3659 ixl_link_state_update(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3660 {
3661 struct ifnet *ifp = &sc->sc_ec.ec_if;
3662 int link_state;
3663
3664 KASSERT(kpreempt_disabled());
3665
3666 link_state = ixl_set_link_status(sc, iaq);
3667
3668 if (ifp->if_link_state != link_state)
3669 if_link_state_change(ifp, link_state);
3670
3671 if (link_state != LINK_STATE_DOWN) {
3672 if_schedule_deferred_start(ifp);
3673 }
3674 }
3675
3676 static void
3677 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq,
3678 const char *msg)
3679 {
3680 char buf[512];
3681 size_t len;
3682
3683 len = sizeof(buf);
3684 buf[--len] = '\0';
3685
3686 device_printf(sc->sc_dev, "%s\n", msg);
3687 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags));
3688 device_printf(sc->sc_dev, "flags %s opcode %04x\n",
3689 buf, le16toh(iaq->iaq_opcode));
3690 device_printf(sc->sc_dev, "datalen %u retval %u\n",
3691 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval));
3692 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie);
3693 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n",
3694 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]),
3695 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3]));
3696 }
3697
3698 static void
3699 ixl_arq(void *xsc)
3700 {
3701 struct ixl_softc *sc = xsc;
3702 struct ixl_aq_desc *arq, *iaq;
3703 struct ixl_aq_buf *aqb;
3704 unsigned int cons = sc->sc_arq_cons;
3705 unsigned int prod;
3706 int done = 0;
3707
3708 prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
3709 sc->sc_aq_regs->arq_head_mask;
3710
3711 if (cons == prod)
3712 goto done;
3713
3714 arq = IXL_DMA_KVA(&sc->sc_arq);
3715
3716 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3717 0, IXL_DMA_LEN(&sc->sc_arq),
3718 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3719
3720 do {
3721 iaq = &arq[cons];
3722 aqb = sc->sc_arq_live[cons];
3723
3724 KASSERT(aqb != NULL);
3725
3726 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3727 BUS_DMASYNC_POSTREAD);
3728
3729 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3730 ixl_aq_dump(sc, iaq, "arq event");
3731
3732 switch (iaq->iaq_opcode) {
3733 case htole16(IXL_AQ_OP_PHY_LINK_STATUS):
3734 kpreempt_disable();
3735 ixl_link_state_update(sc, iaq);
3736 kpreempt_enable();
3737 break;
3738 }
3739
3740 memset(iaq, 0, sizeof(*iaq));
3741 sc->sc_arq_live[cons] = NULL;
3742 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
3743
3744 cons++;
3745 cons &= IXL_AQ_MASK;
3746
3747 done = 1;
3748 } while (cons != prod);
3749
3750 if (done) {
3751 sc->sc_arq_cons = cons;
3752 ixl_arq_fill(sc);
3753 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3754 0, IXL_DMA_LEN(&sc->sc_arq),
3755 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3756 }
3757
3758 done:
3759 ixl_enable_other_intr(sc);
3760 }
3761
3762 static void
3763 ixl_atq_set(struct ixl_atq *iatq,
3764 void (*fn)(struct ixl_softc *, const struct ixl_aq_desc *))
3765 {
3766
3767 iatq->iatq_fn = fn;
3768 }
3769
3770 static int
3771 ixl_atq_post_locked(struct ixl_softc *sc, struct ixl_atq *iatq)
3772 {
3773 struct ixl_aq_desc *atq, *slot;
3774 unsigned int prod, cons, prod_next;
3775
3776 /* assert locked */
3777 KASSERT(mutex_owned(&sc->sc_atq_lock));
3778
3779 atq = IXL_DMA_KVA(&sc->sc_atq);
3780 prod = sc->sc_atq_prod;
3781 cons = sc->sc_atq_cons;
3782 prod_next = (prod +1) & IXL_AQ_MASK;
3783
3784 if (cons == prod_next)
3785 return ENOMEM;
3786
3787 slot = &atq[prod];
3788
3789 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3790 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3791
3792 *slot = iatq->iatq_desc;
3793 slot->iaq_cookie = (uint64_t)((intptr_t)iatq);
3794
3795 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3796 ixl_aq_dump(sc, slot, "atq command");
3797
3798 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3799 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3800
3801 sc->sc_atq_prod = prod_next;
3802 ixl_wr(sc, sc->sc_aq_regs->atq_tail, sc->sc_atq_prod);
3803
3804 return 0;
3805 }
3806
3807 static int
3808 ixl_atq_post(struct ixl_softc *sc, struct ixl_atq *iatq)
3809 {
3810 int rv;
3811
3812 mutex_enter(&sc->sc_atq_lock);
3813 rv = ixl_atq_post_locked(sc, iatq);
3814 mutex_exit(&sc->sc_atq_lock);
3815
3816 return rv;
3817 }
3818
3819 static void
3820 ixl_atq_done_locked(struct ixl_softc *sc)
3821 {
3822 struct ixl_aq_desc *atq, *slot;
3823 struct ixl_atq *iatq;
3824 unsigned int cons;
3825 unsigned int prod;
3826
3827 KASSERT(mutex_owned(&sc->sc_atq_lock));
3828
3829 prod = sc->sc_atq_prod;
3830 cons = sc->sc_atq_cons;
3831
3832 if (prod == cons)
3833 return;
3834
3835 atq = IXL_DMA_KVA(&sc->sc_atq);
3836
3837 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3838 0, IXL_DMA_LEN(&sc->sc_atq),
3839 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3840
3841 do {
3842 slot = &atq[cons];
3843 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
3844 break;
3845
3846 iatq = (struct ixl_atq *)((intptr_t)slot->iaq_cookie);
3847 iatq->iatq_desc = *slot;
3848
3849 memset(slot, 0, sizeof(*slot));
3850
3851 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3852 ixl_aq_dump(sc, &iatq->iatq_desc, "atq response");
3853
3854 (*iatq->iatq_fn)(sc, &iatq->iatq_desc);
3855
3856 cons++;
3857 cons &= IXL_AQ_MASK;
3858 } while (cons != prod);
3859
3860 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3861 0, IXL_DMA_LEN(&sc->sc_atq),
3862 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3863
3864 sc->sc_atq_cons = cons;
3865 }
3866
3867 static void
3868 ixl_atq_done(struct ixl_softc *sc)
3869 {
3870
3871 mutex_enter(&sc->sc_atq_lock);
3872 ixl_atq_done_locked(sc);
3873 mutex_exit(&sc->sc_atq_lock);
3874 }
3875
3876 static void
3877 ixl_wakeup(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3878 {
3879
3880 KASSERT(mutex_owned(&sc->sc_atq_lock));
3881
3882 cv_signal(&sc->sc_atq_cv);
3883 }
3884
3885 static int
3886 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq)
3887 {
3888 int error;
3889
3890 KASSERT(iatq->iatq_desc.iaq_cookie == 0);
3891
3892 ixl_atq_set(iatq, ixl_wakeup);
3893
3894 mutex_enter(&sc->sc_atq_lock);
3895 error = ixl_atq_post_locked(sc, iatq);
3896 if (error) {
3897 mutex_exit(&sc->sc_atq_lock);
3898 return error;
3899 }
3900
3901 error = cv_timedwait(&sc->sc_atq_cv, &sc->sc_atq_lock,
3902 IXL_ATQ_EXEC_TIMEOUT);
3903 mutex_exit(&sc->sc_atq_lock);
3904
3905 return error;
3906 }
3907
3908 static int
3909 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
3910 {
3911 struct ixl_aq_desc *atq, *slot;
3912 unsigned int prod;
3913 unsigned int t = 0;
3914
3915 mutex_enter(&sc->sc_atq_lock);
3916
3917 atq = IXL_DMA_KVA(&sc->sc_atq);
3918 prod = sc->sc_atq_prod;
3919 slot = atq + prod;
3920
3921 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3922 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3923
3924 *slot = *iaq;
3925 slot->iaq_flags |= htole16(IXL_AQ_SI);
3926
3927 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3928 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3929
3930 prod++;
3931 prod &= IXL_AQ_MASK;
3932 sc->sc_atq_prod = prod;
3933 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3934
3935 while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
3936 delaymsec(1);
3937
3938 if (t++ > tm) {
3939 mutex_exit(&sc->sc_atq_lock);
3940 return ETIMEDOUT;
3941 }
3942 }
3943
3944 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3945 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
3946 *iaq = *slot;
3947 memset(slot, 0, sizeof(*slot));
3948 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3949 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
3950
3951 sc->sc_atq_cons = prod;
3952
3953 mutex_exit(&sc->sc_atq_lock);
3954
3955 return 0;
3956 }
3957
3958 static int
3959 ixl_get_version(struct ixl_softc *sc)
3960 {
3961 struct ixl_aq_desc iaq;
3962 uint32_t fwbuild, fwver, apiver;
3963 uint16_t api_maj_ver, api_min_ver;
3964
3965 memset(&iaq, 0, sizeof(iaq));
3966 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
3967
3968 iaq.iaq_retval = le16toh(23);
3969
3970 if (ixl_atq_poll(sc, &iaq, 2000) != 0)
3971 return ETIMEDOUT;
3972 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
3973 return EIO;
3974
3975 fwbuild = le32toh(iaq.iaq_param[1]);
3976 fwver = le32toh(iaq.iaq_param[2]);
3977 apiver = le32toh(iaq.iaq_param[3]);
3978
3979 api_maj_ver = (uint16_t)apiver;
3980 api_min_ver = (uint16_t)(apiver >> 16);
3981
3982 aprint_normal(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
3983 (uint16_t)(fwver >> 16), fwbuild, api_maj_ver, api_min_ver);
3984
3985 if (sc->sc_mac_type == I40E_MAC_X722) {
3986 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK |
3987 IXL_SC_AQ_FLAG_NVMREAD);
3988 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL);
3989 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS);
3990 }
3991
3992 #define IXL_API_VER(maj, min) (((uint32_t)(maj) << 16) | (min))
3993 if (IXL_API_VER(api_maj_ver, api_min_ver) >= IXL_API_VER(1, 5)) {
3994 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL);
3995 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK);
3996 }
3997 #undef IXL_API_VER
3998
3999 return 0;
4000 }
4001
4002 static int
4003 ixl_get_nvm_version(struct ixl_softc *sc)
4004 {
4005 uint16_t nvmver, cfg_ptr, eetrack_hi, eetrack_lo, oem_hi, oem_lo;
4006 uint32_t eetrack, oem;
4007 uint16_t nvm_maj_ver, nvm_min_ver, oem_build;
4008 uint8_t oem_ver, oem_patch;
4009
4010 nvmver = cfg_ptr = eetrack_hi = eetrack_lo = oem_hi = oem_lo = 0;
4011 ixl_rd16_nvm(sc, I40E_SR_NVM_DEV_STARTER_VERSION, &nvmver);
4012 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
4013 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
4014 ixl_rd16_nvm(sc, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
4015 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF, &oem_hi);
4016 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF + 1, &oem_lo);
4017
4018 nvm_maj_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_HI_MASK);
4019 nvm_min_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_LO_MASK);
4020 eetrack = ((uint32_t)eetrack_hi << 16) | eetrack_lo;
4021 oem = ((uint32_t)oem_hi << 16) | oem_lo;
4022 oem_ver = __SHIFTOUT(oem, IXL_NVM_OEMVERSION_MASK);
4023 oem_build = __SHIFTOUT(oem, IXL_NVM_OEMBUILD_MASK);
4024 oem_patch = __SHIFTOUT(oem, IXL_NVM_OEMPATCH_MASK);
4025
4026 aprint_normal(" nvm %x.%02x etid %08x oem %d.%d.%d",
4027 nvm_maj_ver, nvm_min_ver, eetrack,
4028 oem_ver, oem_build, oem_patch);
4029
4030 return 0;
4031 }
4032
4033 static int
4034 ixl_pxe_clear(struct ixl_softc *sc)
4035 {
4036 struct ixl_aq_desc iaq;
4037 int rv;
4038
4039 memset(&iaq, 0, sizeof(iaq));
4040 iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
4041 iaq.iaq_param[0] = htole32(0x2);
4042
4043 rv = ixl_atq_poll(sc, &iaq, 250);
4044
4045 ixl_wr(sc, I40E_GLLAN_RCTL_0, 0x1);
4046
4047 if (rv != 0)
4048 return ETIMEDOUT;
4049
4050 switch (iaq.iaq_retval) {
4051 case htole16(IXL_AQ_RC_OK):
4052 case htole16(IXL_AQ_RC_EEXIST):
4053 break;
4054 default:
4055 return EIO;
4056 }
4057
4058 return 0;
4059 }
4060
4061 static int
4062 ixl_lldp_shut(struct ixl_softc *sc)
4063 {
4064 struct ixl_aq_desc iaq;
4065
4066 memset(&iaq, 0, sizeof(iaq));
4067 iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
4068 iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
4069
4070 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4071 aprint_error_dev(sc->sc_dev, "STOP LLDP AGENT timeout\n");
4072 return -1;
4073 }
4074
4075 switch (iaq.iaq_retval) {
4076 case htole16(IXL_AQ_RC_EMODE):
4077 case htole16(IXL_AQ_RC_EPERM):
4078 /* ignore silently */
4079 default:
4080 break;
4081 }
4082
4083 return 0;
4084 }
4085
4086 static void
4087 ixl_parse_hw_capability(struct ixl_softc *sc, struct ixl_aq_capability *cap)
4088 {
4089 uint16_t id;
4090 uint32_t number, logical_id;
4091
4092 id = le16toh(cap->cap_id);
4093 number = le32toh(cap->number);
4094 logical_id = le32toh(cap->logical_id);
4095
4096 switch (id) {
4097 case IXL_AQ_CAP_RSS:
4098 sc->sc_rss_table_size = number;
4099 sc->sc_rss_table_entry_width = logical_id;
4100 break;
4101 case IXL_AQ_CAP_RXQ:
4102 case IXL_AQ_CAP_TXQ:
4103 sc->sc_nqueue_pairs_device = MIN(number,
4104 sc->sc_nqueue_pairs_device);
4105 break;
4106 }
4107 }
4108
4109 static int
4110 ixl_get_hw_capabilities(struct ixl_softc *sc)
4111 {
4112 struct ixl_dmamem idm;
4113 struct ixl_aq_desc iaq;
4114 struct ixl_aq_capability *caps;
4115 size_t i, ncaps;
4116 bus_size_t caps_size;
4117 uint16_t status;
4118 int rv;
4119
4120 caps_size = sizeof(caps[0]) * 40;
4121 memset(&iaq, 0, sizeof(iaq));
4122 iaq.iaq_opcode = htole16(IXL_AQ_OP_LIST_FUNC_CAP);
4123
4124 do {
4125 if (ixl_dmamem_alloc(sc, &idm, caps_size, 0) != 0) {
4126 return -1;
4127 }
4128
4129 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4130 (caps_size > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4131 iaq.iaq_datalen = htole16(caps_size);
4132 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4133
4134 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
4135 IXL_DMA_LEN(&idm), BUS_DMASYNC_PREREAD);
4136
4137 rv = ixl_atq_poll(sc, &iaq, 250);
4138
4139 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
4140 IXL_DMA_LEN(&idm), BUS_DMASYNC_POSTREAD);
4141
4142 if (rv != 0) {
4143 aprint_error(", HW capabilities timeout\n");
4144 goto done;
4145 }
4146
4147 status = le16toh(iaq.iaq_retval);
4148
4149 if (status == IXL_AQ_RC_ENOMEM) {
4150 caps_size = le16toh(iaq.iaq_datalen);
4151 ixl_dmamem_free(sc, &idm);
4152 }
4153 } while (status == IXL_AQ_RC_ENOMEM);
4154
4155 if (status != IXL_AQ_RC_OK) {
4156 aprint_error(", HW capabilities error\n");
4157 goto done;
4158 }
4159
4160 caps = IXL_DMA_KVA(&idm);
4161 ncaps = le16toh(iaq.iaq_param[1]);
4162
4163 for (i = 0; i < ncaps; i++) {
4164 ixl_parse_hw_capability(sc, &caps[i]);
4165 }
4166
4167 done:
4168 ixl_dmamem_free(sc, &idm);
4169 return rv;
4170 }
4171
4172 static int
4173 ixl_get_mac(struct ixl_softc *sc)
4174 {
4175 struct ixl_dmamem idm;
4176 struct ixl_aq_desc iaq;
4177 struct ixl_aq_mac_addresses *addrs;
4178 int rv;
4179
4180 if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
4181 aprint_error(", unable to allocate mac addresses\n");
4182 return -1;
4183 }
4184
4185 memset(&iaq, 0, sizeof(iaq));
4186 iaq.iaq_flags = htole16(IXL_AQ_BUF);
4187 iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
4188 iaq.iaq_datalen = htole16(sizeof(*addrs));
4189 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4190
4191 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4192 BUS_DMASYNC_PREREAD);
4193
4194 rv = ixl_atq_poll(sc, &iaq, 250);
4195
4196 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4197 BUS_DMASYNC_POSTREAD);
4198
4199 if (rv != 0) {
4200 aprint_error(", MAC ADDRESS READ timeout\n");
4201 rv = -1;
4202 goto done;
4203 }
4204 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4205 aprint_error(", MAC ADDRESS READ error\n");
4206 rv = -1;
4207 goto done;
4208 }
4209
4210 addrs = IXL_DMA_KVA(&idm);
4211 if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
4212 printf(", port address is not valid\n");
4213 goto done;
4214 }
4215
4216 memcpy(sc->sc_enaddr, addrs->port, ETHER_ADDR_LEN);
4217 rv = 0;
4218
4219 done:
4220 ixl_dmamem_free(sc, &idm);
4221 return rv;
4222 }
4223
4224 static int
4225 ixl_get_switch_config(struct ixl_softc *sc)
4226 {
4227 struct ixl_dmamem idm;
4228 struct ixl_aq_desc iaq;
4229 struct ixl_aq_switch_config *hdr;
4230 struct ixl_aq_switch_config_element *elms, *elm;
4231 unsigned int nelm, i;
4232 int rv;
4233
4234 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4235 aprint_error_dev(sc->sc_dev,
4236 "unable to allocate switch config buffer\n");
4237 return -1;
4238 }
4239
4240 memset(&iaq, 0, sizeof(iaq));
4241 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4242 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4243 iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
4244 iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
4245 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4246
4247 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4248 BUS_DMASYNC_PREREAD);
4249
4250 rv = ixl_atq_poll(sc, &iaq, 250);
4251
4252 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4253 BUS_DMASYNC_POSTREAD);
4254
4255 if (rv != 0) {
4256 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG timeout\n");
4257 rv = -1;
4258 goto done;
4259 }
4260 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4261 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG error\n");
4262 rv = -1;
4263 goto done;
4264 }
4265
4266 hdr = IXL_DMA_KVA(&idm);
4267 elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
4268
4269 nelm = le16toh(hdr->num_reported);
4270 if (nelm < 1) {
4271 aprint_error_dev(sc->sc_dev, "no switch config available\n");
4272 rv = -1;
4273 goto done;
4274 }
4275
4276 for (i = 0; i < nelm; i++) {
4277 elm = &elms[i];
4278
4279 aprint_debug_dev(sc->sc_dev,
4280 "type %x revision %u seid %04x\n",
4281 elm->type, elm->revision, le16toh(elm->seid));
4282 aprint_debug_dev(sc->sc_dev,
4283 "uplink %04x downlink %04x\n",
4284 le16toh(elm->uplink_seid),
4285 le16toh(elm->downlink_seid));
4286 aprint_debug_dev(sc->sc_dev,
4287 "conntype %x scheduler %04x extra %04x\n",
4288 elm->connection_type,
4289 le16toh(elm->scheduler_id),
4290 le16toh(elm->element_info));
4291 }
4292
4293 elm = &elms[0];
4294
4295 sc->sc_uplink_seid = elm->uplink_seid;
4296 sc->sc_downlink_seid = elm->downlink_seid;
4297 sc->sc_seid = elm->seid;
4298
4299 if ((sc->sc_uplink_seid == htole16(0)) !=
4300 (sc->sc_downlink_seid == htole16(0))) {
4301 aprint_error_dev(sc->sc_dev, "SEIDs are misconfigured\n");
4302 rv = -1;
4303 goto done;
4304 }
4305
4306 done:
4307 ixl_dmamem_free(sc, &idm);
4308 return rv;
4309 }
4310
4311 static int
4312 ixl_phy_mask_ints(struct ixl_softc *sc)
4313 {
4314 struct ixl_aq_desc iaq;
4315
4316 memset(&iaq, 0, sizeof(iaq));
4317 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
4318 iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
4319 ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
4320 IXL_AQ_PHY_EV_MEDIA_NA));
4321
4322 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4323 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK timeout\n");
4324 return -1;
4325 }
4326 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4327 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK error\n");
4328 return -1;
4329 }
4330
4331 return 0;
4332 }
4333
4334 static int
4335 ixl_get_phy_abilities(struct ixl_softc *sc,struct ixl_dmamem *idm)
4336 {
4337 struct ixl_aq_desc iaq;
4338 int rv;
4339
4340 memset(&iaq, 0, sizeof(iaq));
4341 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4342 (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4343 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
4344 iaq.iaq_datalen = htole16(IXL_DMA_LEN(idm));
4345 iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
4346 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
4347
4348 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4349 BUS_DMASYNC_PREREAD);
4350
4351 rv = ixl_atq_poll(sc, &iaq, 250);
4352
4353 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4354 BUS_DMASYNC_POSTREAD);
4355
4356 if (rv != 0)
4357 return -1;
4358
4359 return le16toh(iaq.iaq_retval);
4360 }
4361
4362 static int
4363 ixl_get_phy_info(struct ixl_softc *sc)
4364 {
4365 struct ixl_dmamem idm;
4366 struct ixl_aq_phy_abilities *phy;
4367 int rv;
4368
4369 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4370 aprint_error_dev(sc->sc_dev,
4371 "unable to allocate phy abilities buffer\n");
4372 return -1;
4373 }
4374
4375 rv = ixl_get_phy_abilities(sc, &idm);
4376 switch (rv) {
4377 case -1:
4378 aprint_error_dev(sc->sc_dev, "GET PHY ABILITIES timeout\n");
4379 goto done;
4380 case IXL_AQ_RC_OK:
4381 break;
4382 case IXL_AQ_RC_EIO:
4383 aprint_error_dev(sc->sc_dev,"unable to query phy types\n");
4384 goto done;
4385 default:
4386 aprint_error_dev(sc->sc_dev,
4387 "GET PHY ABILITIIES error %u\n", rv);
4388 goto done;
4389 }
4390
4391 phy = IXL_DMA_KVA(&idm);
4392
4393 sc->sc_phy_types = le32toh(phy->phy_type);
4394 sc->sc_phy_types |= (uint64_t)le32toh(phy->phy_type_ext) << 32;
4395
4396 sc->sc_phy_abilities = phy->abilities;
4397 sc->sc_phy_linkspeed = phy->link_speed;
4398 sc->sc_phy_fec_cfg = phy->fec_cfg_curr_mod_ext_info &
4399 (IXL_AQ_ENABLE_FEC_KR | IXL_AQ_ENABLE_FEC_RS |
4400 IXL_AQ_REQUEST_FEC_KR | IXL_AQ_REQUEST_FEC_RS);
4401 sc->sc_eee_cap = phy->eee_capability;
4402 sc->sc_eeer_val = phy->eeer_val;
4403 sc->sc_d3_lpan = phy->d3_lpan;
4404
4405 rv = 0;
4406
4407 done:
4408 ixl_dmamem_free(sc, &idm);
4409 return rv;
4410 }
4411
4412 static int
4413 ixl_set_phy_config(struct ixl_softc *sc,
4414 uint8_t link_speed, uint8_t abilities, bool polling)
4415 {
4416 struct ixl_aq_phy_param *param;
4417 struct ixl_atq iatq;
4418 struct ixl_aq_desc *iaq;
4419 int error;
4420
4421 memset(&iatq, 0, sizeof(iatq));
4422
4423 iaq = &iatq.iatq_desc;
4424 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_CONFIG);
4425 param = (struct ixl_aq_phy_param *)&iaq->iaq_param;
4426 param->phy_types = htole32((uint32_t)sc->sc_phy_types);
4427 param->phy_type_ext = (uint8_t)(sc->sc_phy_types >> 32);
4428 param->link_speed = link_speed;
4429 param->abilities = abilities | IXL_AQ_PHY_ABILITY_AUTO_LINK;
4430 param->fec_cfg = sc->sc_phy_fec_cfg;
4431 param->eee_capability = sc->sc_eee_cap;
4432 param->eeer_val = sc->sc_eeer_val;
4433 param->d3_lpan = sc->sc_d3_lpan;
4434
4435 if (polling)
4436 error = ixl_atq_poll(sc, iaq, 250);
4437 else
4438 error = ixl_atq_exec(sc, &iatq);
4439
4440 if (error != 0)
4441 return error;
4442
4443 switch (le16toh(iaq->iaq_retval)) {
4444 case IXL_AQ_RC_OK:
4445 break;
4446 case IXL_AQ_RC_EPERM:
4447 return EPERM;
4448 default:
4449 return EIO;
4450 }
4451
4452 return 0;
4453 }
4454
4455 static int
4456 ixl_set_phy_autoselect(struct ixl_softc *sc)
4457 {
4458 uint8_t link_speed, abilities;
4459
4460 link_speed = sc->sc_phy_linkspeed;
4461 abilities = IXL_PHY_ABILITY_LINKUP | IXL_PHY_ABILITY_AUTONEGO;
4462
4463 return ixl_set_phy_config(sc, link_speed, abilities, true);
4464 }
4465
4466 static int
4467 ixl_get_link_status_poll(struct ixl_softc *sc, int *l)
4468 {
4469 struct ixl_aq_desc iaq;
4470 struct ixl_aq_link_param *param;
4471 int link;
4472
4473 memset(&iaq, 0, sizeof(iaq));
4474 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
4475 param = (struct ixl_aq_link_param *)iaq.iaq_param;
4476 param->notify = IXL_AQ_LINK_NOTIFY;
4477
4478 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4479 return ETIMEDOUT;
4480 }
4481 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4482 return EIO;
4483 }
4484
4485 link = ixl_set_link_status(sc, &iaq);
4486
4487 if (l != NULL)
4488 *l = link;
4489
4490 return 0;
4491 }
4492
4493 static int
4494 ixl_get_vsi(struct ixl_softc *sc)
4495 {
4496 struct ixl_dmamem *vsi = &sc->sc_scratch;
4497 struct ixl_aq_desc iaq;
4498 struct ixl_aq_vsi_param *param;
4499 struct ixl_aq_vsi_reply *reply;
4500 struct ixl_aq_vsi_data *data;
4501 int rv;
4502
4503 /* grumble, vsi info isn't "known" at compile time */
4504
4505 memset(&iaq, 0, sizeof(iaq));
4506 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4507 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4508 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
4509 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4510 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4511
4512 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4513 param->uplink_seid = sc->sc_seid;
4514
4515 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4516 BUS_DMASYNC_PREREAD);
4517
4518 rv = ixl_atq_poll(sc, &iaq, 250);
4519
4520 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4521 BUS_DMASYNC_POSTREAD);
4522
4523 if (rv != 0) {
4524 return ETIMEDOUT;
4525 }
4526
4527 switch (le16toh(iaq.iaq_retval)) {
4528 case IXL_AQ_RC_OK:
4529 break;
4530 case IXL_AQ_RC_ENOENT:
4531 return ENOENT;
4532 case IXL_AQ_RC_EACCES:
4533 return EACCES;
4534 default:
4535 return EIO;
4536 }
4537
4538 reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
4539 sc->sc_vsi_number = le16toh(reply->vsi_number);
4540 data = IXL_DMA_KVA(vsi);
4541 sc->sc_vsi_stat_counter_idx = le16toh(data->stat_counter_idx);
4542
4543 return 0;
4544 }
4545
4546 static int
4547 ixl_set_vsi(struct ixl_softc *sc)
4548 {
4549 struct ixl_dmamem *vsi = &sc->sc_scratch;
4550 struct ixl_aq_desc iaq;
4551 struct ixl_aq_vsi_param *param;
4552 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
4553 unsigned int qnum;
4554 uint16_t val;
4555 int rv;
4556
4557 qnum = sc->sc_nqueue_pairs - 1;
4558
4559 data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
4560 IXL_AQ_VSI_VALID_VLAN);
4561
4562 CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
4563 SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
4564 data->queue_mapping[0] = htole16(0);
4565 data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
4566 (qnum << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
4567
4568 val = le16toh(data->port_vlan_flags);
4569 CLR(val, IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK);
4570 SET(val, IXL_AQ_VSI_PVLAN_MODE_ALL);
4571
4572 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWTAGGING)) {
4573 SET(val, IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH);
4574 } else {
4575 SET(val, IXL_AQ_VSI_PVLAN_EMOD_NOTHING);
4576 }
4577
4578 data->port_vlan_flags = htole16(val);
4579
4580 /* grumble, vsi info isn't "known" at compile time */
4581
4582 memset(&iaq, 0, sizeof(iaq));
4583 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4584 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4585 iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
4586 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4587 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4588
4589 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4590 param->uplink_seid = sc->sc_seid;
4591
4592 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4593 BUS_DMASYNC_PREWRITE);
4594
4595 rv = ixl_atq_poll(sc, &iaq, 250);
4596
4597 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4598 BUS_DMASYNC_POSTWRITE);
4599
4600 if (rv != 0) {
4601 return ETIMEDOUT;
4602 }
4603
4604 switch (le16toh(iaq.iaq_retval)) {
4605 case IXL_AQ_RC_OK:
4606 break;
4607 case IXL_AQ_RC_ENOENT:
4608 return ENOENT;
4609 case IXL_AQ_RC_EACCES:
4610 return EACCES;
4611 default:
4612 return EIO;
4613 }
4614
4615 return 0;
4616 }
4617
4618 static void
4619 ixl_set_filter_control(struct ixl_softc *sc)
4620 {
4621 uint32_t reg;
4622
4623 reg = ixl_rd_rx_csr(sc, I40E_PFQF_CTL_0);
4624
4625 CLR(reg, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK);
4626 SET(reg, I40E_HASH_LUT_SIZE_128 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT);
4627
4628 SET(reg, I40E_PFQF_CTL_0_FD_ENA_MASK);
4629 SET(reg, I40E_PFQF_CTL_0_ETYPE_ENA_MASK);
4630 SET(reg, I40E_PFQF_CTL_0_MACVLAN_ENA_MASK);
4631
4632 ixl_wr_rx_csr(sc, I40E_PFQF_CTL_0, reg);
4633 }
4634
4635 static inline void
4636 ixl_get_default_rss_key(uint32_t *buf, size_t len)
4637 {
4638 size_t cplen;
4639 uint8_t rss_seed[RSS_KEYSIZE];
4640
4641 rss_getkey(rss_seed);
4642 memset(buf, 0, len);
4643
4644 cplen = MIN(len, sizeof(rss_seed));
4645 memcpy(buf, rss_seed, cplen);
4646 }
4647
4648 static int
4649 ixl_set_rss_key(struct ixl_softc *sc, uint8_t *key, size_t keylen)
4650 {
4651 struct ixl_dmamem *idm;
4652 struct ixl_atq iatq;
4653 struct ixl_aq_desc *iaq;
4654 struct ixl_aq_rss_key_param *param;
4655 struct ixl_aq_rss_key_data *data;
4656 size_t len, datalen, stdlen, extlen;
4657 uint16_t vsi_id;
4658 int rv;
4659
4660 memset(&iatq, 0, sizeof(iatq));
4661 iaq = &iatq.iatq_desc;
4662 idm = &sc->sc_aqbuf;
4663
4664 datalen = sizeof(*data);
4665
4666 /*XXX The buf size has to be less than the size of the register */
4667 datalen = MIN(IXL_RSS_KEY_SIZE_REG * sizeof(uint32_t), datalen);
4668
4669 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4670 (datalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4671 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_KEY);
4672 iaq->iaq_datalen = htole16(datalen);
4673
4674 param = (struct ixl_aq_rss_key_param *)iaq->iaq_param;
4675 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSKEY_VSI_ID_SHIFT) |
4676 IXL_AQ_RSSKEY_VSI_VALID;
4677 param->vsi_id = htole16(vsi_id);
4678
4679 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
4680 data = IXL_DMA_KVA(idm);
4681
4682 len = MIN(keylen, datalen);
4683 stdlen = MIN(sizeof(data->standard_rss_key), len);
4684 memcpy(data->standard_rss_key, key, stdlen);
4685 len = (len > stdlen) ? (len - stdlen) : 0;
4686
4687 extlen = MIN(sizeof(data->extended_hash_key), len);
4688 extlen = (stdlen < keylen) ? 0 : keylen - stdlen;
4689 memcpy(data->extended_hash_key, key + stdlen, extlen);
4690
4691 ixl_aq_dva(iaq, IXL_DMA_DVA(idm));
4692
4693 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4694 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE);
4695
4696 rv = ixl_atq_exec(sc, &iatq);
4697
4698 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4699 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE);
4700
4701 if (rv != 0) {
4702 return ETIMEDOUT;
4703 }
4704
4705 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) {
4706 return EIO;
4707 }
4708
4709 return 0;
4710 }
4711
4712 static int
4713 ixl_set_rss_lut(struct ixl_softc *sc, uint8_t *lut, size_t lutlen)
4714 {
4715 struct ixl_dmamem *idm;
4716 struct ixl_atq iatq;
4717 struct ixl_aq_desc *iaq;
4718 struct ixl_aq_rss_lut_param *param;
4719 uint16_t vsi_id;
4720 uint8_t *data;
4721 size_t dmalen;
4722 int rv;
4723
4724 memset(&iatq, 0, sizeof(iatq));
4725 iaq = &iatq.iatq_desc;
4726 idm = &sc->sc_aqbuf;
4727
4728 dmalen = MIN(lutlen, IXL_DMA_LEN(idm));
4729
4730 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4731 (dmalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4732 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_LUT);
4733 iaq->iaq_datalen = htole16(dmalen);
4734
4735 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
4736 data = IXL_DMA_KVA(idm);
4737 memcpy(data, lut, dmalen);
4738 ixl_aq_dva(iaq, IXL_DMA_DVA(idm));
4739
4740 param = (struct ixl_aq_rss_lut_param *)iaq->iaq_param;
4741 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSLUT_VSI_ID_SHIFT) |
4742 IXL_AQ_RSSLUT_VSI_VALID;
4743 param->vsi_id = htole16(vsi_id);
4744 param->flags = htole16(IXL_AQ_RSSLUT_TABLE_TYPE_PF <<
4745 IXL_AQ_RSSLUT_TABLE_TYPE_SHIFT);
4746
4747 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4748 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE);
4749
4750 rv = ixl_atq_exec(sc, &iatq);
4751
4752 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4753 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE);
4754
4755 if (rv != 0) {
4756 return ETIMEDOUT;
4757 }
4758
4759 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) {
4760 return EIO;
4761 }
4762
4763 return 0;
4764 }
4765
4766 static int
4767 ixl_register_rss_key(struct ixl_softc *sc)
4768 {
4769 uint32_t rss_seed[IXL_RSS_KEY_SIZE_REG];
4770 int rv;
4771 size_t i;
4772
4773 ixl_get_default_rss_key(rss_seed, sizeof(rss_seed));
4774
4775 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)){
4776 rv = ixl_set_rss_key(sc, (uint8_t*)rss_seed,
4777 sizeof(rss_seed));
4778 } else {
4779 rv = 0;
4780 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4781 ixl_wr_rx_csr(sc, I40E_PFQF_HKEY(i), rss_seed[i]);
4782 }
4783 }
4784
4785 return rv;
4786 }
4787
4788 static void
4789 ixl_register_rss_pctype(struct ixl_softc *sc)
4790 {
4791 uint64_t set_hena = 0;
4792 uint32_t hena0, hena1;
4793
4794 if (sc->sc_mac_type == I40E_MAC_X722)
4795 set_hena = IXL_RSS_HENA_DEFAULT_X722;
4796 else
4797 set_hena = IXL_RSS_HENA_DEFAULT_XL710;
4798
4799 hena0 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(0));
4800 hena1 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(1));
4801
4802 SET(hena0, set_hena);
4803 SET(hena1, set_hena >> 32);
4804
4805 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(0), hena0);
4806 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(1), hena1);
4807 }
4808
4809 static int
4810 ixl_register_rss_hlut(struct ixl_softc *sc)
4811 {
4812 unsigned int qid;
4813 uint8_t hlut_buf[512], lut_mask;
4814 uint32_t *hluts;
4815 size_t i, hluts_num;
4816 int rv;
4817
4818 lut_mask = (0x01 << sc->sc_rss_table_entry_width) - 1;
4819
4820 for (i = 0; i < sc->sc_rss_table_size; i++) {
4821 qid = i % sc->sc_nqueue_pairs;
4822 hlut_buf[i] = qid & lut_mask;
4823 }
4824
4825 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)) {
4826 rv = ixl_set_rss_lut(sc, hlut_buf, sizeof(hlut_buf));
4827 } else {
4828 rv = 0;
4829 hluts = (uint32_t *)hlut_buf;
4830 hluts_num = sc->sc_rss_table_size >> 2;
4831 for (i = 0; i < hluts_num; i++) {
4832 ixl_wr(sc, I40E_PFQF_HLUT(i), hluts[i]);
4833 }
4834 ixl_flush(sc);
4835 }
4836
4837 return rv;
4838 }
4839
4840 static void
4841 ixl_config_rss(struct ixl_softc *sc)
4842 {
4843
4844 KASSERT(mutex_owned(&sc->sc_cfg_lock));
4845
4846 ixl_register_rss_key(sc);
4847 ixl_register_rss_pctype(sc);
4848 ixl_register_rss_hlut(sc);
4849 }
4850
4851 static const struct ixl_phy_type *
4852 ixl_search_phy_type(uint8_t phy_type)
4853 {
4854 const struct ixl_phy_type *itype;
4855 uint64_t mask;
4856 unsigned int i;
4857
4858 if (phy_type >= 64)
4859 return NULL;
4860
4861 mask = 1ULL << phy_type;
4862
4863 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
4864 itype = &ixl_phy_type_map[i];
4865
4866 if (ISSET(itype->phy_type, mask))
4867 return itype;
4868 }
4869
4870 return NULL;
4871 }
4872
4873 static uint64_t
4874 ixl_search_link_speed(uint8_t link_speed)
4875 {
4876 const struct ixl_speed_type *type;
4877 unsigned int i;
4878
4879 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4880 type = &ixl_speed_type_map[i];
4881
4882 if (ISSET(type->dev_speed, link_speed))
4883 return type->net_speed;
4884 }
4885
4886 return 0;
4887 }
4888
4889 static uint8_t
4890 ixl_search_baudrate(uint64_t baudrate)
4891 {
4892 const struct ixl_speed_type *type;
4893 unsigned int i;
4894
4895 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4896 type = &ixl_speed_type_map[i];
4897
4898 if (type->net_speed == baudrate) {
4899 return type->dev_speed;
4900 }
4901 }
4902
4903 return 0;
4904 }
4905
4906 static int
4907 ixl_restart_an(struct ixl_softc *sc)
4908 {
4909 struct ixl_aq_desc iaq;
4910
4911 memset(&iaq, 0, sizeof(iaq));
4912 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
4913 iaq.iaq_param[0] =
4914 htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
4915
4916 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4917 aprint_error_dev(sc->sc_dev, "RESTART AN timeout\n");
4918 return -1;
4919 }
4920 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4921 aprint_error_dev(sc->sc_dev, "RESTART AN error\n");
4922 return -1;
4923 }
4924
4925 return 0;
4926 }
4927
4928 static int
4929 ixl_add_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4930 uint16_t vlan, uint16_t flags)
4931 {
4932 struct ixl_aq_desc iaq;
4933 struct ixl_aq_add_macvlan *param;
4934 struct ixl_aq_add_macvlan_elem *elem;
4935
4936 memset(&iaq, 0, sizeof(iaq));
4937 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4938 iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN);
4939 iaq.iaq_datalen = htole16(sizeof(*elem));
4940 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4941
4942 param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param;
4943 param->num_addrs = htole16(1);
4944 param->seid0 = htole16(0x8000) | sc->sc_seid;
4945 param->seid1 = 0;
4946 param->seid2 = 0;
4947
4948 elem = IXL_DMA_KVA(&sc->sc_scratch);
4949 memset(elem, 0, sizeof(*elem));
4950 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4951 elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags);
4952 elem->vlan = htole16(vlan);
4953
4954 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4955 return IXL_AQ_RC_EINVAL;
4956 }
4957
4958 switch (le16toh(iaq.iaq_retval)) {
4959 case IXL_AQ_RC_OK:
4960 break;
4961 case IXL_AQ_RC_ENOSPC:
4962 return ENOSPC;
4963 case IXL_AQ_RC_ENOENT:
4964 return ENOENT;
4965 case IXL_AQ_RC_EACCES:
4966 return EACCES;
4967 case IXL_AQ_RC_EEXIST:
4968 return EEXIST;
4969 case IXL_AQ_RC_EINVAL:
4970 return EINVAL;
4971 default:
4972 return EIO;
4973 }
4974
4975 return 0;
4976 }
4977
4978 static int
4979 ixl_remove_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4980 uint16_t vlan, uint16_t flags)
4981 {
4982 struct ixl_aq_desc iaq;
4983 struct ixl_aq_remove_macvlan *param;
4984 struct ixl_aq_remove_macvlan_elem *elem;
4985
4986 memset(&iaq, 0, sizeof(iaq));
4987 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4988 iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN);
4989 iaq.iaq_datalen = htole16(sizeof(*elem));
4990 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4991
4992 param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param;
4993 param->num_addrs = htole16(1);
4994 param->seid0 = htole16(0x8000) | sc->sc_seid;
4995 param->seid1 = 0;
4996 param->seid2 = 0;
4997
4998 elem = IXL_DMA_KVA(&sc->sc_scratch);
4999 memset(elem, 0, sizeof(*elem));
5000 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
5001 elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags);
5002 elem->vlan = htole16(vlan);
5003
5004 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
5005 return EINVAL;
5006 }
5007
5008 switch (le16toh(iaq.iaq_retval)) {
5009 case IXL_AQ_RC_OK:
5010 break;
5011 case IXL_AQ_RC_ENOENT:
5012 return ENOENT;
5013 case IXL_AQ_RC_EACCES:
5014 return EACCES;
5015 case IXL_AQ_RC_EINVAL:
5016 return EINVAL;
5017 default:
5018 return EIO;
5019 }
5020
5021 return 0;
5022 }
5023
5024 static int
5025 ixl_hmc(struct ixl_softc *sc)
5026 {
5027 struct {
5028 uint32_t count;
5029 uint32_t minsize;
5030 bus_size_t objsiz;
5031 bus_size_t setoff;
5032 bus_size_t setcnt;
5033 } regs[] = {
5034 {
5035 0,
5036 IXL_HMC_TXQ_MINSIZE,
5037 I40E_GLHMC_LANTXOBJSZ,
5038 I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
5039 I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
5040 },
5041 {
5042 0,
5043 IXL_HMC_RXQ_MINSIZE,
5044 I40E_GLHMC_LANRXOBJSZ,
5045 I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
5046 I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
5047 },
5048 {
5049 0,
5050 0,
5051 I40E_GLHMC_FCOEDDPOBJSZ,
5052 I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
5053 I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
5054 },
5055 {
5056 0,
5057 0,
5058 I40E_GLHMC_FCOEFOBJSZ,
5059 I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
5060 I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
5061 },
5062 };
5063 struct ixl_hmc_entry *e;
5064 uint64_t size, dva;
5065 uint8_t *kva;
5066 uint64_t *sdpage;
5067 unsigned int i;
5068 int npages, tables;
5069 uint32_t reg;
5070
5071 CTASSERT(__arraycount(regs) <= __arraycount(sc->sc_hmc_entries));
5072
5073 regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
5074 ixl_rd(sc, I40E_GLHMC_LANQMAX);
5075
5076 size = 0;
5077 for (i = 0; i < __arraycount(regs); i++) {
5078 e = &sc->sc_hmc_entries[i];
5079
5080 e->hmc_count = regs[i].count;
5081 reg = ixl_rd(sc, regs[i].objsiz);
5082 e->hmc_size = BIT_ULL(0x3F & reg);
5083 e->hmc_base = size;
5084
5085 if ((e->hmc_size * 8) < regs[i].minsize) {
5086 aprint_error_dev(sc->sc_dev,
5087 "kernel hmc entry is too big\n");
5088 return -1;
5089 }
5090
5091 size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
5092 }
5093 size = roundup(size, IXL_HMC_PGSIZE);
5094 npages = size / IXL_HMC_PGSIZE;
5095
5096 tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
5097
5098 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
5099 aprint_error_dev(sc->sc_dev,
5100 "unable to allocate hmc pd memory\n");
5101 return -1;
5102 }
5103
5104 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
5105 IXL_HMC_PGSIZE) != 0) {
5106 aprint_error_dev(sc->sc_dev,
5107 "unable to allocate hmc sd memory\n");
5108 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
5109 return -1;
5110 }
5111
5112 kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
5113 memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
5114
5115 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
5116 0, IXL_DMA_LEN(&sc->sc_hmc_pd),
5117 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5118
5119 dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
5120 sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
5121 memset(sdpage, 0, IXL_DMA_LEN(&sc->sc_hmc_sd));
5122
5123 for (i = 0; (int)i < npages; i++) {
5124 *sdpage = htole64(dva | IXL_HMC_PDVALID);
5125 sdpage++;
5126
5127 dva += IXL_HMC_PGSIZE;
5128 }
5129
5130 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
5131 0, IXL_DMA_LEN(&sc->sc_hmc_sd),
5132 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5133
5134 dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
5135 for (i = 0; (int)i < tables; i++) {
5136 uint32_t count;
5137
5138 KASSERT(npages >= 0);
5139
5140 count = ((unsigned int)npages > IXL_HMC_PGS) ?
5141 IXL_HMC_PGS : (unsigned int)npages;
5142
5143 ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
5144 ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
5145 (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
5146 (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
5147 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
5148 ixl_wr(sc, I40E_PFHMC_SDCMD,
5149 (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
5150
5151 npages -= IXL_HMC_PGS;
5152 dva += IXL_HMC_PGSIZE;
5153 }
5154
5155 for (i = 0; i < __arraycount(regs); i++) {
5156 e = &sc->sc_hmc_entries[i];
5157
5158 ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
5159 ixl_wr(sc, regs[i].setcnt, e->hmc_count);
5160 }
5161
5162 return 0;
5163 }
5164
5165 static void
5166 ixl_hmc_free(struct ixl_softc *sc)
5167 {
5168 ixl_dmamem_free(sc, &sc->sc_hmc_sd);
5169 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
5170 }
5171
5172 static void
5173 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
5174 unsigned int npacking)
5175 {
5176 uint8_t *dst = d;
5177 const uint8_t *src = s;
5178 unsigned int i;
5179
5180 for (i = 0; i < npacking; i++) {
5181 const struct ixl_hmc_pack *pack = &packing[i];
5182 unsigned int offset = pack->lsb / 8;
5183 unsigned int align = pack->lsb % 8;
5184 const uint8_t *in = src + pack->offset;
5185 uint8_t *out = dst + offset;
5186 int width = pack->width;
5187 unsigned int inbits = 0;
5188
5189 if (align) {
5190 inbits = (*in++) << align;
5191 *out++ |= (inbits & 0xff);
5192 inbits >>= 8;
5193
5194 width -= 8 - align;
5195 }
5196
5197 while (width >= 8) {
5198 inbits |= (*in++) << align;
5199 *out++ = (inbits & 0xff);
5200 inbits >>= 8;
5201
5202 width -= 8;
5203 }
5204
5205 if (width > 0) {
5206 inbits |= (*in) << align;
5207 *out |= (inbits & ((1 << width) - 1));
5208 }
5209 }
5210 }
5211
5212 static struct ixl_aq_buf *
5213 ixl_aqb_alloc(struct ixl_softc *sc)
5214 {
5215 struct ixl_aq_buf *aqb;
5216
5217 aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK);
5218 if (aqb == NULL)
5219 return NULL;
5220
5221 aqb->aqb_size = IXL_AQ_BUFLEN;
5222
5223 if (bus_dmamap_create(sc->sc_dmat, aqb->aqb_size, 1,
5224 aqb->aqb_size, 0,
5225 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0)
5226 goto free;
5227 if (bus_dmamem_alloc(sc->sc_dmat, aqb->aqb_size,
5228 IXL_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs,
5229 BUS_DMA_WAITOK) != 0)
5230 goto destroy;
5231 if (bus_dmamem_map(sc->sc_dmat, &aqb->aqb_seg, aqb->aqb_nsegs,
5232 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0)
5233 goto dma_free;
5234 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
5235 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0)
5236 goto unmap;
5237
5238 return aqb;
5239 unmap:
5240 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
5241 dma_free:
5242 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
5243 destroy:
5244 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
5245 free:
5246 free(aqb, M_DEVBUF);
5247
5248 return NULL;
5249 }
5250
5251 static void
5252 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
5253 {
5254 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
5255 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
5256 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
5257 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
5258 free(aqb, M_DEVBUF);
5259 }
5260
5261 static int
5262 ixl_arq_fill(struct ixl_softc *sc)
5263 {
5264 struct ixl_aq_buf *aqb;
5265 struct ixl_aq_desc *arq, *iaq;
5266 unsigned int prod = sc->sc_arq_prod;
5267 unsigned int n;
5268 int post = 0;
5269
5270 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
5271 IXL_AQ_NUM);
5272 arq = IXL_DMA_KVA(&sc->sc_arq);
5273
5274 if (__predict_false(n <= 0))
5275 return 0;
5276
5277 do {
5278 aqb = sc->sc_arq_live[prod];
5279 iaq = &arq[prod];
5280
5281 if (aqb == NULL) {
5282 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
5283 if (aqb != NULL) {
5284 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
5285 ixl_aq_buf, aqb_entry);
5286 } else if ((aqb = ixl_aqb_alloc(sc)) == NULL) {
5287 break;
5288 }
5289
5290 sc->sc_arq_live[prod] = aqb;
5291 memset(aqb->aqb_data, 0, aqb->aqb_size);
5292
5293 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
5294 aqb->aqb_size, BUS_DMASYNC_PREREAD);
5295
5296 iaq->iaq_flags = htole16(IXL_AQ_BUF |
5297 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ?
5298 IXL_AQ_LB : 0));
5299 iaq->iaq_opcode = 0;
5300 iaq->iaq_datalen = htole16(aqb->aqb_size);
5301 iaq->iaq_retval = 0;
5302 iaq->iaq_cookie = 0;
5303 iaq->iaq_param[0] = 0;
5304 iaq->iaq_param[1] = 0;
5305 ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
5306 }
5307
5308 prod++;
5309 prod &= IXL_AQ_MASK;
5310
5311 post = 1;
5312
5313 } while (--n);
5314
5315 if (post) {
5316 sc->sc_arq_prod = prod;
5317 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
5318 }
5319
5320 return post;
5321 }
5322
5323 static void
5324 ixl_arq_unfill(struct ixl_softc *sc)
5325 {
5326 struct ixl_aq_buf *aqb;
5327 unsigned int i;
5328
5329 for (i = 0; i < __arraycount(sc->sc_arq_live); i++) {
5330 aqb = sc->sc_arq_live[i];
5331 if (aqb == NULL)
5332 continue;
5333
5334 sc->sc_arq_live[i] = NULL;
5335 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
5336 BUS_DMASYNC_POSTREAD);
5337 ixl_aqb_free(sc, aqb);
5338 }
5339
5340 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle)) != NULL) {
5341 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
5342 ixl_aq_buf, aqb_entry);
5343 ixl_aqb_free(sc, aqb);
5344 }
5345 }
5346
5347 static void
5348 ixl_clear_hw(struct ixl_softc *sc)
5349 {
5350 uint32_t num_queues, base_queue;
5351 uint32_t num_pf_int;
5352 uint32_t num_vf_int;
5353 uint32_t num_vfs;
5354 uint32_t i, j;
5355 uint32_t val;
5356 uint32_t eol = 0x7ff;
5357
5358 /* get number of interrupts, queues, and vfs */
5359 val = ixl_rd(sc, I40E_GLPCI_CNF2);
5360 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
5361 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
5362 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
5363 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
5364
5365 val = ixl_rd(sc, I40E_PFLAN_QALLOC);
5366 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
5367 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
5368 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
5369 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
5370 if (val & I40E_PFLAN_QALLOC_VALID_MASK)
5371 num_queues = (j - base_queue) + 1;
5372 else
5373 num_queues = 0;
5374
5375 val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
5376 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
5377 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
5378 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
5379 I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
5380 if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
5381 num_vfs = (j - i) + 1;
5382 else
5383 num_vfs = 0;
5384
5385 /* stop all the interrupts */
5386 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5387 ixl_flush(sc);
5388 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
5389 for (i = 0; i < num_pf_int - 2; i++)
5390 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
5391 ixl_flush(sc);
5392
5393 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
5394 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5395 ixl_wr(sc, I40E_PFINT_LNKLST0, val);
5396 for (i = 0; i < num_pf_int - 2; i++)
5397 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
5398 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5399 for (i = 0; i < num_vfs; i++)
5400 ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
5401 for (i = 0; i < num_vf_int - 2; i++)
5402 ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
5403
5404 /* warn the HW of the coming Tx disables */
5405 for (i = 0; i < num_queues; i++) {
5406 uint32_t abs_queue_idx = base_queue + i;
5407 uint32_t reg_block = 0;
5408
5409 if (abs_queue_idx >= 128) {
5410 reg_block = abs_queue_idx / 128;
5411 abs_queue_idx %= 128;
5412 }
5413
5414 val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
5415 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
5416 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
5417 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
5418
5419 ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
5420 }
5421 delaymsec(400);
5422
5423 /* stop all the queues */
5424 for (i = 0; i < num_queues; i++) {
5425 ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
5426 ixl_wr(sc, I40E_QTX_ENA(i), 0);
5427 ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
5428 ixl_wr(sc, I40E_QRX_ENA(i), 0);
5429 }
5430
5431 /* short wait for all queue disables to settle */
5432 delaymsec(50);
5433 }
5434
5435 static int
5436 ixl_pf_reset(struct ixl_softc *sc)
5437 {
5438 uint32_t cnt = 0;
5439 uint32_t cnt1 = 0;
5440 uint32_t reg = 0, reg0 = 0;
5441 uint32_t grst_del;
5442
5443 /*
5444 * Poll for Global Reset steady state in case of recent GRST.
5445 * The grst delay value is in 100ms units, and we'll wait a
5446 * couple counts longer to be sure we don't just miss the end.
5447 */
5448 grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
5449 grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
5450 grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
5451
5452 grst_del = grst_del * 20;
5453
5454 for (cnt = 0; cnt < grst_del; cnt++) {
5455 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
5456 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
5457 break;
5458 delaymsec(100);
5459 }
5460 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5461 aprint_error(", Global reset polling failed to complete\n");
5462 return -1;
5463 }
5464
5465 /* Now Wait for the FW to be ready */
5466 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
5467 reg = ixl_rd(sc, I40E_GLNVM_ULD);
5468 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5469 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
5470 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5471 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
5472 break;
5473
5474 delaymsec(10);
5475 }
5476 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5477 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
5478 aprint_error(", wait for FW Reset complete timed out "
5479 "(I40E_GLNVM_ULD = 0x%x)\n", reg);
5480 return -1;
5481 }
5482
5483 /*
5484 * If there was a Global Reset in progress when we got here,
5485 * we don't need to do the PF Reset
5486 */
5487 if (cnt == 0) {
5488 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5489 ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
5490 for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
5491 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5492 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
5493 break;
5494 delaymsec(1);
5495
5496 reg0 = ixl_rd(sc, I40E_GLGEN_RSTAT);
5497 if (reg0 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5498 aprint_error(", Core reset upcoming."
5499 " Skipping PF reset reset request\n");
5500 return -1;
5501 }
5502 }
5503 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
5504 aprint_error(", PF reset polling failed to complete"
5505 "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
5506 return -1;
5507 }
5508 }
5509
5510 return 0;
5511 }
5512
5513 static int
5514 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
5515 bus_size_t size, bus_size_t align)
5516 {
5517 ixm->ixm_size = size;
5518
5519 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
5520 ixm->ixm_size, 0,
5521 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
5522 &ixm->ixm_map) != 0)
5523 return 1;
5524 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
5525 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
5526 BUS_DMA_WAITOK) != 0)
5527 goto destroy;
5528 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
5529 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
5530 goto free;
5531 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
5532 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
5533 goto unmap;
5534
5535 memset(ixm->ixm_kva, 0, ixm->ixm_size);
5536
5537 return 0;
5538 unmap:
5539 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5540 free:
5541 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5542 destroy:
5543 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5544 return 1;
5545 }
5546
5547 static void
5548 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
5549 {
5550 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
5551 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5552 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5553 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5554 }
5555
5556 static int
5557 ixl_setup_vlan_hwfilter(struct ixl_softc *sc)
5558 {
5559 struct ethercom *ec = &sc->sc_ec;
5560 struct vlanid_list *vlanidp;
5561 int rv;
5562
5563 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5564 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5565 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5566 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5567
5568 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5569 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5570 if (rv != 0)
5571 return rv;
5572 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5573 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5574 if (rv != 0)
5575 return rv;
5576
5577 ETHER_LOCK(ec);
5578 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5579 rv = ixl_add_macvlan(sc, sc->sc_enaddr,
5580 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5581 if (rv != 0)
5582 break;
5583 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
5584 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5585 if (rv != 0)
5586 break;
5587 }
5588 ETHER_UNLOCK(ec);
5589
5590 return rv;
5591 }
5592
5593 static void
5594 ixl_teardown_vlan_hwfilter(struct ixl_softc *sc)
5595 {
5596 struct vlanid_list *vlanidp;
5597 struct ethercom *ec = &sc->sc_ec;
5598
5599 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5600 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5601 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5602 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5603
5604 ETHER_LOCK(ec);
5605 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5606 ixl_remove_macvlan(sc, sc->sc_enaddr,
5607 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5608 ixl_remove_macvlan(sc, etherbroadcastaddr,
5609 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5610 }
5611 ETHER_UNLOCK(ec);
5612
5613 ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5614 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5615 ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5616 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5617 }
5618
5619 static int
5620 ixl_update_macvlan(struct ixl_softc *sc)
5621 {
5622 int rv = 0;
5623 int next_ec_capenable = sc->sc_ec.ec_capenable;
5624
5625 if (ISSET(next_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
5626 rv = ixl_setup_vlan_hwfilter(sc);
5627 if (rv != 0)
5628 ixl_teardown_vlan_hwfilter(sc);
5629 } else {
5630 ixl_teardown_vlan_hwfilter(sc);
5631 }
5632
5633 return rv;
5634 }
5635
5636 static int
5637 ixl_ifflags_cb(struct ethercom *ec)
5638 {
5639 struct ifnet *ifp = &ec->ec_if;
5640 struct ixl_softc *sc = ifp->if_softc;
5641 int rv, change;
5642
5643 mutex_enter(&sc->sc_cfg_lock);
5644
5645 change = ec->ec_capenable ^ sc->sc_cur_ec_capenable;
5646
5647 if (ISSET(change, ETHERCAP_VLAN_HWTAGGING)) {
5648 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
5649 rv = ENETRESET;
5650 goto out;
5651 }
5652
5653 if (ISSET(change, ETHERCAP_VLAN_HWFILTER)) {
5654 rv = ixl_update_macvlan(sc);
5655 if (rv == 0) {
5656 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
5657 } else {
5658 CLR(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER);
5659 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
5660 }
5661 }
5662
5663 rv = ixl_iff(sc);
5664 out:
5665 mutex_exit(&sc->sc_cfg_lock);
5666
5667 return rv;
5668 }
5669
5670 static int
5671 ixl_set_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
5672 {
5673 const struct ixl_aq_link_status *status;
5674 const struct ixl_phy_type *itype;
5675
5676 uint64_t ifm_active = IFM_ETHER;
5677 uint64_t ifm_status = IFM_AVALID;
5678 int link_state = LINK_STATE_DOWN;
5679 uint64_t baudrate = 0;
5680
5681 status = (const struct ixl_aq_link_status *)iaq->iaq_param;
5682 if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION)) {
5683 ifm_active |= IFM_NONE;
5684 goto done;
5685 }
5686
5687 ifm_active |= IFM_FDX;
5688 ifm_status |= IFM_ACTIVE;
5689 link_state = LINK_STATE_UP;
5690
5691 itype = ixl_search_phy_type(status->phy_type);
5692 if (itype != NULL)
5693 ifm_active |= itype->ifm_type;
5694
5695 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
5696 ifm_active |= IFM_ETH_TXPAUSE;
5697 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
5698 ifm_active |= IFM_ETH_RXPAUSE;
5699
5700 baudrate = ixl_search_link_speed(status->link_speed);
5701
5702 done:
5703 /* NET_ASSERT_LOCKED() except during attach */
5704 sc->sc_media_active = ifm_active;
5705 sc->sc_media_status = ifm_status;
5706
5707 sc->sc_ec.ec_if.if_baudrate = baudrate;
5708
5709 return link_state;
5710 }
5711
5712 static int
5713 ixl_establish_intx(struct ixl_softc *sc)
5714 {
5715 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5716 pci_intr_handle_t *intr;
5717 char xnamebuf[32];
5718 char intrbuf[PCI_INTRSTR_LEN];
5719 char const *intrstr;
5720
5721 KASSERT(sc->sc_nintrs == 1);
5722
5723 intr = &sc->sc_ihp[0];
5724
5725 intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
5726 snprintf(xnamebuf, sizeof(xnamebuf), "%s:legacy",
5727 device_xname(sc->sc_dev));
5728
5729 sc->sc_ihs[0] = pci_intr_establish_xname(pc, *intr, IPL_NET, ixl_intr,
5730 sc, xnamebuf);
5731
5732 if (sc->sc_ihs[0] == NULL) {
5733 aprint_error_dev(sc->sc_dev,
5734 "unable to establish interrupt at %s\n", intrstr);
5735 return -1;
5736 }
5737
5738 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5739 return 0;
5740 }
5741
5742 static int
5743 ixl_establish_msix(struct ixl_softc *sc)
5744 {
5745 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5746 kcpuset_t *affinity;
5747 unsigned int vector = 0;
5748 unsigned int i;
5749 int affinity_to, r;
5750 char xnamebuf[32];
5751 char intrbuf[PCI_INTRSTR_LEN];
5752 char const *intrstr;
5753
5754 kcpuset_create(&affinity, false);
5755
5756 /* the "other" intr is mapped to vector 0 */
5757 vector = 0;
5758 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5759 intrbuf, sizeof(intrbuf));
5760 snprintf(xnamebuf, sizeof(xnamebuf), "%s others",
5761 device_xname(sc->sc_dev));
5762 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5763 sc->sc_ihp[vector], IPL_NET, ixl_other_intr,
5764 sc, xnamebuf);
5765 if (sc->sc_ihs[vector] == NULL) {
5766 aprint_error_dev(sc->sc_dev,
5767 "unable to establish interrupt at %s\n", intrstr);
5768 goto fail;
5769 }
5770
5771 aprint_normal_dev(sc->sc_dev, "other interrupt at %s", intrstr);
5772
5773 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0;
5774 affinity_to = (affinity_to + sc->sc_nqueue_pairs_max) % ncpu;
5775
5776 kcpuset_zero(affinity);
5777 kcpuset_set(affinity, affinity_to);
5778 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5779 if (r == 0) {
5780 aprint_normal(", affinity to %u", affinity_to);
5781 }
5782 aprint_normal("\n");
5783 vector++;
5784
5785 sc->sc_msix_vector_queue = vector;
5786 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0;
5787
5788 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5789 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5790 intrbuf, sizeof(intrbuf));
5791 snprintf(xnamebuf, sizeof(xnamebuf), "%s TXRX%d",
5792 device_xname(sc->sc_dev), i);
5793
5794 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5795 sc->sc_ihp[vector], IPL_NET, ixl_queue_intr,
5796 (void *)&sc->sc_qps[i], xnamebuf);
5797
5798 if (sc->sc_ihs[vector] == NULL) {
5799 aprint_error_dev(sc->sc_dev,
5800 "unable to establish interrupt at %s\n", intrstr);
5801 goto fail;
5802 }
5803
5804 aprint_normal_dev(sc->sc_dev,
5805 "for TXRX%d interrupt at %s",i , intrstr);
5806
5807 kcpuset_zero(affinity);
5808 kcpuset_set(affinity, affinity_to);
5809 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5810 if (r == 0) {
5811 aprint_normal(", affinity to %u", affinity_to);
5812 affinity_to = (affinity_to + 1) % ncpu;
5813 }
5814 aprint_normal("\n");
5815 vector++;
5816 }
5817
5818 kcpuset_destroy(affinity);
5819
5820 return 0;
5821 fail:
5822 for (i = 0; i < vector; i++) {
5823 pci_intr_disestablish(pc, sc->sc_ihs[i]);
5824 }
5825
5826 sc->sc_msix_vector_queue = 0;
5827 sc->sc_msix_vector_queue = 0;
5828 kcpuset_destroy(affinity);
5829
5830 return -1;
5831 }
5832
5833 static void
5834 ixl_config_queue_intr(struct ixl_softc *sc)
5835 {
5836 unsigned int i, vector;
5837
5838 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) {
5839 vector = sc->sc_msix_vector_queue;
5840 } else {
5841 vector = I40E_INTR_NOTX_INTR;
5842
5843 ixl_wr(sc, I40E_PFINT_LNKLST0,
5844 (I40E_INTR_NOTX_QUEUE <<
5845 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5846 (I40E_QUEUE_TYPE_RX <<
5847 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5848 }
5849
5850 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
5851 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), 0);
5852 ixl_flush(sc);
5853
5854 ixl_wr(sc, I40E_PFINT_LNKLSTN(i),
5855 ((i) << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
5856 (I40E_QUEUE_TYPE_RX <<
5857 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5858
5859 ixl_wr(sc, I40E_QINT_RQCTL(i),
5860 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5861 (I40E_ITR_INDEX_RX <<
5862 I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
5863 (I40E_INTR_NOTX_RX_QUEUE <<
5864 I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
5865 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5866 (I40E_QUEUE_TYPE_TX <<
5867 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5868 I40E_QINT_RQCTL_CAUSE_ENA_MASK);
5869
5870 ixl_wr(sc, I40E_QINT_TQCTL(i),
5871 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
5872 (I40E_ITR_INDEX_TX <<
5873 I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
5874 (I40E_INTR_NOTX_TX_QUEUE <<
5875 I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
5876 (I40E_QUEUE_TYPE_EOL <<
5877 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
5878 (I40E_QUEUE_TYPE_RX <<
5879 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
5880 I40E_QINT_TQCTL_CAUSE_ENA_MASK);
5881
5882 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX)
5883 vector++;
5884 }
5885 ixl_flush(sc);
5886
5887 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_RX), 0x7a);
5888 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_TX), 0x7a);
5889 ixl_flush(sc);
5890 }
5891
5892 static void
5893 ixl_config_other_intr(struct ixl_softc *sc)
5894 {
5895 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5896 (void)ixl_rd(sc, I40E_PFINT_ICR0);
5897
5898 ixl_wr(sc, I40E_PFINT_ICR0_ENA,
5899 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
5900 I40E_PFINT_ICR0_ENA_GRST_MASK |
5901 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
5902 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
5903 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
5904 I40E_PFINT_ICR0_ENA_VFLR_MASK |
5905 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
5906 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
5907 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK);
5908
5909 ixl_wr(sc, I40E_PFINT_LNKLST0, 0x7FF);
5910 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_OTHER), 0);
5911 ixl_wr(sc, I40E_PFINT_STAT_CTL0,
5912 (I40E_ITR_INDEX_OTHER <<
5913 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT));
5914 ixl_flush(sc);
5915 }
5916
5917 static int
5918 ixl_setup_interrupts(struct ixl_softc *sc)
5919 {
5920 struct pci_attach_args *pa = &sc->sc_pa;
5921 pci_intr_type_t max_type, intr_type;
5922 int counts[PCI_INTR_TYPE_SIZE];
5923 int error;
5924 unsigned int i;
5925 bool retry;
5926
5927 memset(counts, 0, sizeof(counts));
5928 max_type = PCI_INTR_TYPE_MSIX;
5929 /* QPs + other interrupt */
5930 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueue_pairs_max + 1;
5931 counts[PCI_INTR_TYPE_INTX] = 1;
5932
5933 if (ixl_param_nomsix)
5934 counts[PCI_INTR_TYPE_MSIX] = 0;
5935
5936 do {
5937 retry = false;
5938 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, max_type);
5939 if (error != 0) {
5940 aprint_error_dev(sc->sc_dev,
5941 "couldn't map interrupt\n");
5942 break;
5943 }
5944
5945 intr_type = pci_intr_type(pa->pa_pc, sc->sc_ihp[0]);
5946 sc->sc_nintrs = counts[intr_type];
5947 KASSERT(sc->sc_nintrs > 0);
5948
5949 for (i = 0; i < sc->sc_nintrs; i++) {
5950 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[i],
5951 PCI_INTR_MPSAFE, true);
5952 }
5953
5954 sc->sc_ihs = kmem_alloc(sizeof(sc->sc_ihs[0]) * sc->sc_nintrs,
5955 KM_SLEEP);
5956
5957 if (intr_type == PCI_INTR_TYPE_MSIX) {
5958 error = ixl_establish_msix(sc);
5959 if (error) {
5960 counts[PCI_INTR_TYPE_MSIX] = 0;
5961 retry = true;
5962 }
5963 } else if (intr_type == PCI_INTR_TYPE_INTX) {
5964 error = ixl_establish_intx(sc);
5965 } else {
5966 error = -1;
5967 }
5968
5969 if (error) {
5970 kmem_free(sc->sc_ihs,
5971 sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5972 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5973 } else {
5974 sc->sc_intrtype = intr_type;
5975 }
5976 } while (retry);
5977
5978 return error;
5979 }
5980
5981 static void
5982 ixl_teardown_interrupts(struct ixl_softc *sc)
5983 {
5984 struct pci_attach_args *pa = &sc->sc_pa;
5985 unsigned int i;
5986
5987 for (i = 0; i < sc->sc_nintrs; i++) {
5988 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]);
5989 }
5990
5991 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5992
5993 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5994 sc->sc_ihs = NULL;
5995 sc->sc_nintrs = 0;
5996 }
5997
5998 static int
5999 ixl_setup_stats(struct ixl_softc *sc)
6000 {
6001 struct ixl_queue_pair *qp;
6002 struct ixl_tx_ring *txr;
6003 struct ixl_rx_ring *rxr;
6004 struct ixl_stats_counters *isc;
6005 unsigned int i;
6006
6007 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
6008 qp = &sc->sc_qps[i];
6009 txr = qp->qp_txr;
6010 rxr = qp->qp_rxr;
6011
6012 evcnt_attach_dynamic(&txr->txr_defragged, EVCNT_TYPE_MISC,
6013 NULL, qp->qp_name, "m_defrag successed");
6014 evcnt_attach_dynamic(&txr->txr_defrag_failed, EVCNT_TYPE_MISC,
6015 NULL, qp->qp_name, "m_defrag_failed");
6016 evcnt_attach_dynamic(&txr->txr_pcqdrop, EVCNT_TYPE_MISC,
6017 NULL, qp->qp_name, "Dropped in pcq");
6018 evcnt_attach_dynamic(&txr->txr_transmitdef, EVCNT_TYPE_MISC,
6019 NULL, qp->qp_name, "Deferred transmit");
6020 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR,
6021 NULL, qp->qp_name, "Interrupt on queue");
6022 evcnt_attach_dynamic(&txr->txr_defer, EVCNT_TYPE_MISC,
6023 NULL, qp->qp_name, "Handled queue in softint/workqueue");
6024
6025 evcnt_attach_dynamic(&rxr->rxr_mgethdr_failed, EVCNT_TYPE_MISC,
6026 NULL, qp->qp_name, "MGETHDR failed");
6027 evcnt_attach_dynamic(&rxr->rxr_mgetcl_failed, EVCNT_TYPE_MISC,
6028 NULL, qp->qp_name, "MCLGET failed");
6029 evcnt_attach_dynamic(&rxr->rxr_mbuf_load_failed,
6030 EVCNT_TYPE_MISC, NULL, qp->qp_name,
6031 "bus_dmamap_load_mbuf failed");
6032 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR,
6033 NULL, qp->qp_name, "Interrupt on queue");
6034 evcnt_attach_dynamic(&rxr->rxr_defer, EVCNT_TYPE_MISC,
6035 NULL, qp->qp_name, "Handled queue in softint/workqueue");
6036 }
6037
6038 evcnt_attach_dynamic(&sc->sc_event_atq, EVCNT_TYPE_INTR,
6039 NULL, device_xname(sc->sc_dev), "Interrupt for other events");
6040 evcnt_attach_dynamic(&sc->sc_event_link, EVCNT_TYPE_MISC,
6041 NULL, device_xname(sc->sc_dev), "Link status event");
6042 evcnt_attach_dynamic(&sc->sc_event_ecc_err, EVCNT_TYPE_MISC,
6043 NULL, device_xname(sc->sc_dev), "ECC error");
6044 evcnt_attach_dynamic(&sc->sc_event_pci_exception, EVCNT_TYPE_MISC,
6045 NULL, device_xname(sc->sc_dev), "PCI exception");
6046 evcnt_attach_dynamic(&sc->sc_event_crit_err, EVCNT_TYPE_MISC,
6047 NULL, device_xname(sc->sc_dev), "Critical error");
6048
6049 isc = &sc->sc_stats_counters;
6050 evcnt_attach_dynamic(&isc->isc_crc_errors, EVCNT_TYPE_MISC,
6051 NULL, device_xname(sc->sc_dev), "CRC errors");
6052 evcnt_attach_dynamic(&isc->isc_illegal_bytes, EVCNT_TYPE_MISC,
6053 NULL, device_xname(sc->sc_dev), "Illegal bytes");
6054 evcnt_attach_dynamic(&isc->isc_mac_local_faults, EVCNT_TYPE_MISC,
6055 NULL, device_xname(sc->sc_dev), "Mac local faults");
6056 evcnt_attach_dynamic(&isc->isc_mac_remote_faults, EVCNT_TYPE_MISC,
6057 NULL, device_xname(sc->sc_dev), "Mac remote faults");
6058 evcnt_attach_dynamic(&isc->isc_link_xon_rx, EVCNT_TYPE_MISC,
6059 NULL, device_xname(sc->sc_dev), "Rx xon");
6060 evcnt_attach_dynamic(&isc->isc_link_xon_tx, EVCNT_TYPE_MISC,
6061 NULL, device_xname(sc->sc_dev), "Tx xon");
6062 evcnt_attach_dynamic(&isc->isc_link_xoff_rx, EVCNT_TYPE_MISC,
6063 NULL, device_xname(sc->sc_dev), "Rx xoff");
6064 evcnt_attach_dynamic(&isc->isc_link_xoff_tx, EVCNT_TYPE_MISC,
6065 NULL, device_xname(sc->sc_dev), "Tx xoff");
6066 evcnt_attach_dynamic(&isc->isc_rx_fragments, EVCNT_TYPE_MISC,
6067 NULL, device_xname(sc->sc_dev), "Rx fragments");
6068 evcnt_attach_dynamic(&isc->isc_rx_jabber, EVCNT_TYPE_MISC,
6069 NULL, device_xname(sc->sc_dev), "Rx jabber");
6070
6071 evcnt_attach_dynamic(&isc->isc_rx_size_64, EVCNT_TYPE_MISC,
6072 NULL, device_xname(sc->sc_dev), "Rx size 64");
6073 evcnt_attach_dynamic(&isc->isc_rx_size_127, EVCNT_TYPE_MISC,
6074 NULL, device_xname(sc->sc_dev), "Rx size 127");
6075 evcnt_attach_dynamic(&isc->isc_rx_size_255, EVCNT_TYPE_MISC,
6076 NULL, device_xname(sc->sc_dev), "Rx size 255");
6077 evcnt_attach_dynamic(&isc->isc_rx_size_511, EVCNT_TYPE_MISC,
6078 NULL, device_xname(sc->sc_dev), "Rx size 511");
6079 evcnt_attach_dynamic(&isc->isc_rx_size_1023, EVCNT_TYPE_MISC,
6080 NULL, device_xname(sc->sc_dev), "Rx size 1023");
6081 evcnt_attach_dynamic(&isc->isc_rx_size_1522, EVCNT_TYPE_MISC,
6082 NULL, device_xname(sc->sc_dev), "Rx size 1522");
6083 evcnt_attach_dynamic(&isc->isc_rx_size_big, EVCNT_TYPE_MISC,
6084 NULL, device_xname(sc->sc_dev), "Rx jumbo packets");
6085 evcnt_attach_dynamic(&isc->isc_rx_undersize, EVCNT_TYPE_MISC,
6086 NULL, device_xname(sc->sc_dev), "Rx under size");
6087 evcnt_attach_dynamic(&isc->isc_rx_oversize, EVCNT_TYPE_MISC,
6088 NULL, device_xname(sc->sc_dev), "Rx over size");
6089
6090 evcnt_attach_dynamic(&isc->isc_rx_bytes, EVCNT_TYPE_MISC,
6091 NULL, device_xname(sc->sc_dev), "Rx bytes / port");
6092 evcnt_attach_dynamic(&isc->isc_rx_discards, EVCNT_TYPE_MISC,
6093 NULL, device_xname(sc->sc_dev), "Rx discards / port");
6094 evcnt_attach_dynamic(&isc->isc_rx_unicast, EVCNT_TYPE_MISC,
6095 NULL, device_xname(sc->sc_dev), "Rx unicast / port");
6096 evcnt_attach_dynamic(&isc->isc_rx_multicast, EVCNT_TYPE_MISC,
6097 NULL, device_xname(sc->sc_dev), "Rx multicast / port");
6098 evcnt_attach_dynamic(&isc->isc_rx_broadcast, EVCNT_TYPE_MISC,
6099 NULL, device_xname(sc->sc_dev), "Rx broadcast / port");
6100
6101 evcnt_attach_dynamic(&isc->isc_vsi_rx_bytes, EVCNT_TYPE_MISC,
6102 NULL, device_xname(sc->sc_dev), "Rx bytes / vsi");
6103 evcnt_attach_dynamic(&isc->isc_vsi_rx_discards, EVCNT_TYPE_MISC,
6104 NULL, device_xname(sc->sc_dev), "Rx discard / vsi");
6105 evcnt_attach_dynamic(&isc->isc_vsi_rx_unicast, EVCNT_TYPE_MISC,
6106 NULL, device_xname(sc->sc_dev), "Rx unicast / vsi");
6107 evcnt_attach_dynamic(&isc->isc_vsi_rx_multicast, EVCNT_TYPE_MISC,
6108 NULL, device_xname(sc->sc_dev), "Rx multicast / vsi");
6109 evcnt_attach_dynamic(&isc->isc_vsi_rx_broadcast, EVCNT_TYPE_MISC,
6110 NULL, device_xname(sc->sc_dev), "Rx broadcast / vsi");
6111
6112 evcnt_attach_dynamic(&isc->isc_tx_size_64, EVCNT_TYPE_MISC,
6113 NULL, device_xname(sc->sc_dev), "Tx size 64");
6114 evcnt_attach_dynamic(&isc->isc_tx_size_127, EVCNT_TYPE_MISC,
6115 NULL, device_xname(sc->sc_dev), "Tx size 127");
6116 evcnt_attach_dynamic(&isc->isc_tx_size_255, EVCNT_TYPE_MISC,
6117 NULL, device_xname(sc->sc_dev), "Tx size 255");
6118 evcnt_attach_dynamic(&isc->isc_tx_size_511, EVCNT_TYPE_MISC,
6119 NULL, device_xname(sc->sc_dev), "Tx size 511");
6120 evcnt_attach_dynamic(&isc->isc_tx_size_1023, EVCNT_TYPE_MISC,
6121 NULL, device_xname(sc->sc_dev), "Tx size 1023");
6122 evcnt_attach_dynamic(&isc->isc_tx_size_1522, EVCNT_TYPE_MISC,
6123 NULL, device_xname(sc->sc_dev), "Tx size 1522");
6124 evcnt_attach_dynamic(&isc->isc_tx_size_big, EVCNT_TYPE_MISC,
6125 NULL, device_xname(sc->sc_dev), "Tx jumbo packets");
6126
6127 evcnt_attach_dynamic(&isc->isc_tx_bytes, EVCNT_TYPE_MISC,
6128 NULL, device_xname(sc->sc_dev), "Tx bytes / port");
6129 evcnt_attach_dynamic(&isc->isc_tx_dropped_link_down, EVCNT_TYPE_MISC,
6130 NULL, device_xname(sc->sc_dev),
6131 "Tx dropped due to link down / port");
6132 evcnt_attach_dynamic(&isc->isc_tx_unicast, EVCNT_TYPE_MISC,
6133 NULL, device_xname(sc->sc_dev), "Tx unicast / port");
6134 evcnt_attach_dynamic(&isc->isc_tx_multicast, EVCNT_TYPE_MISC,
6135 NULL, device_xname(sc->sc_dev), "Tx multicast / port");
6136 evcnt_attach_dynamic(&isc->isc_tx_broadcast, EVCNT_TYPE_MISC,
6137 NULL, device_xname(sc->sc_dev), "Tx broadcast / port");
6138
6139 evcnt_attach_dynamic(&isc->isc_vsi_tx_bytes, EVCNT_TYPE_MISC,
6140 NULL, device_xname(sc->sc_dev), "Tx bytes / vsi");
6141 evcnt_attach_dynamic(&isc->isc_vsi_tx_errors, EVCNT_TYPE_MISC,
6142 NULL, device_xname(sc->sc_dev), "Tx errors / vsi");
6143 evcnt_attach_dynamic(&isc->isc_vsi_tx_unicast, EVCNT_TYPE_MISC,
6144 NULL, device_xname(sc->sc_dev), "Tx unicast / vsi");
6145 evcnt_attach_dynamic(&isc->isc_vsi_tx_multicast, EVCNT_TYPE_MISC,
6146 NULL, device_xname(sc->sc_dev), "Tx multicast / vsi");
6147 evcnt_attach_dynamic(&isc->isc_vsi_tx_broadcast, EVCNT_TYPE_MISC,
6148 NULL, device_xname(sc->sc_dev), "Tx broadcast / vsi");
6149
6150 sc->sc_stats_intval = ixl_param_stats_interval;
6151 callout_init(&sc->sc_stats_callout, CALLOUT_MPSAFE);
6152 callout_setfunc(&sc->sc_stats_callout, ixl_stats_callout, sc);
6153 ixl_work_set(&sc->sc_stats_task, ixl_stats_update, sc);
6154
6155 return 0;
6156 }
6157
6158 static void
6159 ixl_teardown_stats(struct ixl_softc *sc)
6160 {
6161 struct ixl_tx_ring *txr;
6162 struct ixl_rx_ring *rxr;
6163 struct ixl_stats_counters *isc;
6164 unsigned int i;
6165
6166 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
6167 txr = sc->sc_qps[i].qp_txr;
6168 rxr = sc->sc_qps[i].qp_rxr;
6169
6170 evcnt_detach(&txr->txr_defragged);
6171 evcnt_detach(&txr->txr_defrag_failed);
6172 evcnt_detach(&txr->txr_pcqdrop);
6173 evcnt_detach(&txr->txr_transmitdef);
6174 evcnt_detach(&txr->txr_intr);
6175 evcnt_detach(&txr->txr_defer);
6176
6177 evcnt_detach(&rxr->rxr_mgethdr_failed);
6178 evcnt_detach(&rxr->rxr_mgetcl_failed);
6179 evcnt_detach(&rxr->rxr_mbuf_load_failed);
6180 evcnt_detach(&rxr->rxr_intr);
6181 evcnt_detach(&rxr->rxr_defer);
6182 }
6183
6184 isc = &sc->sc_stats_counters;
6185 evcnt_detach(&isc->isc_crc_errors);
6186 evcnt_detach(&isc->isc_illegal_bytes);
6187 evcnt_detach(&isc->isc_mac_local_faults);
6188 evcnt_detach(&isc->isc_mac_remote_faults);
6189 evcnt_detach(&isc->isc_link_xon_rx);
6190 evcnt_detach(&isc->isc_link_xon_tx);
6191 evcnt_detach(&isc->isc_link_xoff_rx);
6192 evcnt_detach(&isc->isc_link_xoff_tx);
6193 evcnt_detach(&isc->isc_rx_fragments);
6194 evcnt_detach(&isc->isc_rx_jabber);
6195 evcnt_detach(&isc->isc_rx_bytes);
6196 evcnt_detach(&isc->isc_rx_discards);
6197 evcnt_detach(&isc->isc_rx_unicast);
6198 evcnt_detach(&isc->isc_rx_multicast);
6199 evcnt_detach(&isc->isc_rx_broadcast);
6200 evcnt_detach(&isc->isc_rx_size_64);
6201 evcnt_detach(&isc->isc_rx_size_127);
6202 evcnt_detach(&isc->isc_rx_size_255);
6203 evcnt_detach(&isc->isc_rx_size_511);
6204 evcnt_detach(&isc->isc_rx_size_1023);
6205 evcnt_detach(&isc->isc_rx_size_1522);
6206 evcnt_detach(&isc->isc_rx_size_big);
6207 evcnt_detach(&isc->isc_rx_undersize);
6208 evcnt_detach(&isc->isc_rx_oversize);
6209 evcnt_detach(&isc->isc_tx_bytes);
6210 evcnt_detach(&isc->isc_tx_dropped_link_down);
6211 evcnt_detach(&isc->isc_tx_unicast);
6212 evcnt_detach(&isc->isc_tx_multicast);
6213 evcnt_detach(&isc->isc_tx_broadcast);
6214 evcnt_detach(&isc->isc_tx_size_64);
6215 evcnt_detach(&isc->isc_tx_size_127);
6216 evcnt_detach(&isc->isc_tx_size_255);
6217 evcnt_detach(&isc->isc_tx_size_511);
6218 evcnt_detach(&isc->isc_tx_size_1023);
6219 evcnt_detach(&isc->isc_tx_size_1522);
6220 evcnt_detach(&isc->isc_tx_size_big);
6221 evcnt_detach(&isc->isc_vsi_rx_discards);
6222 evcnt_detach(&isc->isc_vsi_rx_bytes);
6223 evcnt_detach(&isc->isc_vsi_rx_unicast);
6224 evcnt_detach(&isc->isc_vsi_rx_multicast);
6225 evcnt_detach(&isc->isc_vsi_rx_broadcast);
6226 evcnt_detach(&isc->isc_vsi_tx_errors);
6227 evcnt_detach(&isc->isc_vsi_tx_bytes);
6228 evcnt_detach(&isc->isc_vsi_tx_unicast);
6229 evcnt_detach(&isc->isc_vsi_tx_multicast);
6230 evcnt_detach(&isc->isc_vsi_tx_broadcast);
6231
6232 evcnt_detach(&sc->sc_event_atq);
6233 evcnt_detach(&sc->sc_event_link);
6234 evcnt_detach(&sc->sc_event_ecc_err);
6235 evcnt_detach(&sc->sc_event_pci_exception);
6236 evcnt_detach(&sc->sc_event_crit_err);
6237
6238 callout_destroy(&sc->sc_stats_callout);
6239 }
6240
6241 static void
6242 ixl_stats_callout(void *xsc)
6243 {
6244 struct ixl_softc *sc = xsc;
6245
6246 ixl_work_add(sc->sc_workq, &sc->sc_stats_task);
6247 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
6248 }
6249
6250 static uint64_t
6251 ixl_stat_delta(struct ixl_softc *sc, uint32_t reg_hi, uint32_t reg_lo,
6252 uint64_t *offset, bool has_offset)
6253 {
6254 uint64_t value, delta;
6255 int bitwidth;
6256
6257 bitwidth = reg_hi == 0 ? 32 : 48;
6258
6259 value = ixl_rd(sc, reg_lo);
6260
6261 if (bitwidth > 32) {
6262 value |= ((uint64_t)ixl_rd(sc, reg_hi) << 32);
6263 }
6264
6265 if (__predict_true(has_offset)) {
6266 delta = value;
6267 if (value < *offset)
6268 delta += ((uint64_t)1 << bitwidth);
6269 delta -= *offset;
6270 } else {
6271 delta = 0;
6272 }
6273 atomic_swap_64(offset, value);
6274
6275 return delta;
6276 }
6277
6278 static void
6279 ixl_stats_update(void *xsc)
6280 {
6281 struct ixl_softc *sc = xsc;
6282 struct ixl_stats_counters *isc;
6283 uint64_t delta;
6284
6285 isc = &sc->sc_stats_counters;
6286
6287 /* errors */
6288 delta = ixl_stat_delta(sc,
6289 0, I40E_GLPRT_CRCERRS(sc->sc_port),
6290 &isc->isc_crc_errors_offset, isc->isc_has_offset);
6291 atomic_add_64(&isc->isc_crc_errors.ev_count, delta);
6292
6293 delta = ixl_stat_delta(sc,
6294 0, I40E_GLPRT_ILLERRC(sc->sc_port),
6295 &isc->isc_illegal_bytes_offset, isc->isc_has_offset);
6296 atomic_add_64(&isc->isc_illegal_bytes.ev_count, delta);
6297
6298 /* rx */
6299 delta = ixl_stat_delta(sc,
6300 I40E_GLPRT_GORCH(sc->sc_port), I40E_GLPRT_GORCL(sc->sc_port),
6301 &isc->isc_rx_bytes_offset, isc->isc_has_offset);
6302 atomic_add_64(&isc->isc_rx_bytes.ev_count, delta);
6303
6304 delta = ixl_stat_delta(sc,
6305 0, I40E_GLPRT_RDPC(sc->sc_port),
6306 &isc->isc_rx_discards_offset, isc->isc_has_offset);
6307 atomic_add_64(&isc->isc_rx_discards.ev_count, delta);
6308
6309 delta = ixl_stat_delta(sc,
6310 I40E_GLPRT_UPRCH(sc->sc_port), I40E_GLPRT_UPRCL(sc->sc_port),
6311 &isc->isc_rx_unicast_offset, isc->isc_has_offset);
6312 atomic_add_64(&isc->isc_rx_unicast.ev_count, delta);
6313
6314 delta = ixl_stat_delta(sc,
6315 I40E_GLPRT_MPRCH(sc->sc_port), I40E_GLPRT_MPRCL(sc->sc_port),
6316 &isc->isc_rx_multicast_offset, isc->isc_has_offset);
6317 atomic_add_64(&isc->isc_rx_multicast.ev_count, delta);
6318
6319 delta = ixl_stat_delta(sc,
6320 I40E_GLPRT_BPRCH(sc->sc_port), I40E_GLPRT_BPRCL(sc->sc_port),
6321 &isc->isc_rx_broadcast_offset, isc->isc_has_offset);
6322 atomic_add_64(&isc->isc_rx_broadcast.ev_count, delta);
6323
6324 /* Packet size stats rx */
6325 delta = ixl_stat_delta(sc,
6326 I40E_GLPRT_PRC64H(sc->sc_port), I40E_GLPRT_PRC64L(sc->sc_port),
6327 &isc->isc_rx_size_64_offset, isc->isc_has_offset);
6328 atomic_add_64(&isc->isc_rx_size_64.ev_count, delta);
6329
6330 delta = ixl_stat_delta(sc,
6331 I40E_GLPRT_PRC127H(sc->sc_port), I40E_GLPRT_PRC127L(sc->sc_port),
6332 &isc->isc_rx_size_127_offset, isc->isc_has_offset);
6333 atomic_add_64(&isc->isc_rx_size_127.ev_count, delta);
6334
6335 delta = ixl_stat_delta(sc,
6336 I40E_GLPRT_PRC255H(sc->sc_port), I40E_GLPRT_PRC255L(sc->sc_port),
6337 &isc->isc_rx_size_255_offset, isc->isc_has_offset);
6338 atomic_add_64(&isc->isc_rx_size_255.ev_count, delta);
6339
6340 delta = ixl_stat_delta(sc,
6341 I40E_GLPRT_PRC511H(sc->sc_port), I40E_GLPRT_PRC511L(sc->sc_port),
6342 &isc->isc_rx_size_511_offset, isc->isc_has_offset);
6343 atomic_add_64(&isc->isc_rx_size_511.ev_count, delta);
6344
6345 delta = ixl_stat_delta(sc,
6346 I40E_GLPRT_PRC1023H(sc->sc_port), I40E_GLPRT_PRC1023L(sc->sc_port),
6347 &isc->isc_rx_size_1023_offset, isc->isc_has_offset);
6348 atomic_add_64(&isc->isc_rx_size_1023.ev_count, delta);
6349
6350 delta = ixl_stat_delta(sc,
6351 I40E_GLPRT_PRC1522H(sc->sc_port), I40E_GLPRT_PRC1522L(sc->sc_port),
6352 &isc->isc_rx_size_1522_offset, isc->isc_has_offset);
6353 atomic_add_64(&isc->isc_rx_size_1522.ev_count, delta);
6354
6355 delta = ixl_stat_delta(sc,
6356 I40E_GLPRT_PRC9522H(sc->sc_port), I40E_GLPRT_PRC9522L(sc->sc_port),
6357 &isc->isc_rx_size_big_offset, isc->isc_has_offset);
6358 atomic_add_64(&isc->isc_rx_size_big.ev_count, delta);
6359
6360 delta = ixl_stat_delta(sc,
6361 0, I40E_GLPRT_RUC(sc->sc_port),
6362 &isc->isc_rx_undersize_offset, isc->isc_has_offset);
6363 atomic_add_64(&isc->isc_rx_undersize.ev_count, delta);
6364
6365 delta = ixl_stat_delta(sc,
6366 0, I40E_GLPRT_ROC(sc->sc_port),
6367 &isc->isc_rx_oversize_offset, isc->isc_has_offset);
6368 atomic_add_64(&isc->isc_rx_oversize.ev_count, delta);
6369
6370 /* tx */
6371 delta = ixl_stat_delta(sc,
6372 I40E_GLPRT_GOTCH(sc->sc_port), I40E_GLPRT_GOTCL(sc->sc_port),
6373 &isc->isc_tx_bytes_offset, isc->isc_has_offset);
6374 atomic_add_64(&isc->isc_tx_bytes.ev_count, delta);
6375
6376 delta = ixl_stat_delta(sc,
6377 0, I40E_GLPRT_TDOLD(sc->sc_port),
6378 &isc->isc_tx_dropped_link_down_offset, isc->isc_has_offset);
6379 atomic_add_64(&isc->isc_tx_dropped_link_down.ev_count, delta);
6380
6381 delta = ixl_stat_delta(sc,
6382 I40E_GLPRT_UPTCH(sc->sc_port), I40E_GLPRT_UPTCL(sc->sc_port),
6383 &isc->isc_tx_unicast_offset, isc->isc_has_offset);
6384 atomic_add_64(&isc->isc_tx_unicast.ev_count, delta);
6385
6386 delta = ixl_stat_delta(sc,
6387 I40E_GLPRT_MPTCH(sc->sc_port), I40E_GLPRT_MPTCL(sc->sc_port),
6388 &isc->isc_tx_multicast_offset, isc->isc_has_offset);
6389 atomic_add_64(&isc->isc_tx_multicast.ev_count, delta);
6390
6391 delta = ixl_stat_delta(sc,
6392 I40E_GLPRT_BPTCH(sc->sc_port), I40E_GLPRT_BPTCL(sc->sc_port),
6393 &isc->isc_tx_broadcast_offset, isc->isc_has_offset);
6394 atomic_add_64(&isc->isc_tx_broadcast.ev_count, delta);
6395
6396 /* Packet size stats tx */
6397 delta = ixl_stat_delta(sc,
6398 I40E_GLPRT_PTC64L(sc->sc_port), I40E_GLPRT_PTC64L(sc->sc_port),
6399 &isc->isc_tx_size_64_offset, isc->isc_has_offset);
6400 atomic_add_64(&isc->isc_tx_size_64.ev_count, delta);
6401
6402 delta = ixl_stat_delta(sc,
6403 I40E_GLPRT_PTC127H(sc->sc_port), I40E_GLPRT_PTC127L(sc->sc_port),
6404 &isc->isc_tx_size_127_offset, isc->isc_has_offset);
6405 atomic_add_64(&isc->isc_tx_size_127.ev_count, delta);
6406
6407 delta = ixl_stat_delta(sc,
6408 I40E_GLPRT_PTC255H(sc->sc_port), I40E_GLPRT_PTC255L(sc->sc_port),
6409 &isc->isc_tx_size_255_offset, isc->isc_has_offset);
6410 atomic_add_64(&isc->isc_tx_size_255.ev_count, delta);
6411
6412 delta = ixl_stat_delta(sc,
6413 I40E_GLPRT_PTC511H(sc->sc_port), I40E_GLPRT_PTC511L(sc->sc_port),
6414 &isc->isc_tx_size_511_offset, isc->isc_has_offset);
6415 atomic_add_64(&isc->isc_tx_size_511.ev_count, delta);
6416
6417 delta = ixl_stat_delta(sc,
6418 I40E_GLPRT_PTC1023H(sc->sc_port), I40E_GLPRT_PTC1023L(sc->sc_port),
6419 &isc->isc_tx_size_1023_offset, isc->isc_has_offset);
6420 atomic_add_64(&isc->isc_tx_size_1023.ev_count, delta);
6421
6422 delta = ixl_stat_delta(sc,
6423 I40E_GLPRT_PTC1522H(sc->sc_port), I40E_GLPRT_PTC1522L(sc->sc_port),
6424 &isc->isc_tx_size_1522_offset, isc->isc_has_offset);
6425 atomic_add_64(&isc->isc_tx_size_1522.ev_count, delta);
6426
6427 delta = ixl_stat_delta(sc,
6428 I40E_GLPRT_PTC9522H(sc->sc_port), I40E_GLPRT_PTC9522L(sc->sc_port),
6429 &isc->isc_tx_size_big_offset, isc->isc_has_offset);
6430 atomic_add_64(&isc->isc_tx_size_big.ev_count, delta);
6431
6432 /* mac faults */
6433 delta = ixl_stat_delta(sc,
6434 0, I40E_GLPRT_MLFC(sc->sc_port),
6435 &isc->isc_mac_local_faults_offset, isc->isc_has_offset);
6436 atomic_add_64(&isc->isc_mac_local_faults.ev_count, delta);
6437
6438 delta = ixl_stat_delta(sc,
6439 0, I40E_GLPRT_MRFC(sc->sc_port),
6440 &isc->isc_mac_remote_faults_offset, isc->isc_has_offset);
6441 atomic_add_64(&isc->isc_mac_remote_faults.ev_count, delta);
6442
6443 /* Flow control (LFC) stats */
6444 delta = ixl_stat_delta(sc,
6445 0, I40E_GLPRT_LXONRXC(sc->sc_port),
6446 &isc->isc_link_xon_rx_offset, isc->isc_has_offset);
6447 atomic_add_64(&isc->isc_link_xon_rx.ev_count, delta);
6448
6449 delta = ixl_stat_delta(sc,
6450 0, I40E_GLPRT_LXONTXC(sc->sc_port),
6451 &isc->isc_link_xon_tx_offset, isc->isc_has_offset);
6452 atomic_add_64(&isc->isc_link_xon_tx.ev_count, delta);
6453
6454 delta = ixl_stat_delta(sc,
6455 0, I40E_GLPRT_LXOFFRXC(sc->sc_port),
6456 &isc->isc_link_xoff_rx_offset, isc->isc_has_offset);
6457 atomic_add_64(&isc->isc_link_xoff_rx.ev_count, delta);
6458
6459 delta = ixl_stat_delta(sc,
6460 0, I40E_GLPRT_LXOFFTXC(sc->sc_port),
6461 &isc->isc_link_xoff_tx_offset, isc->isc_has_offset);
6462 atomic_add_64(&isc->isc_link_xoff_tx.ev_count, delta);
6463
6464 /* fragments */
6465 delta = ixl_stat_delta(sc,
6466 0, I40E_GLPRT_RFC(sc->sc_port),
6467 &isc->isc_rx_fragments_offset, isc->isc_has_offset);
6468 atomic_add_64(&isc->isc_rx_fragments.ev_count, delta);
6469
6470 delta = ixl_stat_delta(sc,
6471 0, I40E_GLPRT_RJC(sc->sc_port),
6472 &isc->isc_rx_jabber_offset, isc->isc_has_offset);
6473 atomic_add_64(&isc->isc_rx_jabber.ev_count, delta);
6474
6475 /* VSI rx counters */
6476 delta = ixl_stat_delta(sc,
6477 0, I40E_GLV_RDPC(sc->sc_vsi_stat_counter_idx),
6478 &isc->isc_vsi_rx_discards_offset, isc->isc_has_offset);
6479 atomic_add_64(&isc->isc_vsi_rx_discards.ev_count, delta);
6480
6481 delta = ixl_stat_delta(sc,
6482 I40E_GLV_GORCH(sc->sc_vsi_stat_counter_idx),
6483 I40E_GLV_GORCL(sc->sc_vsi_stat_counter_idx),
6484 &isc->isc_vsi_rx_bytes_offset, isc->isc_has_offset);
6485 atomic_add_64(&isc->isc_vsi_rx_bytes.ev_count, delta);
6486
6487 delta = ixl_stat_delta(sc,
6488 I40E_GLV_UPRCH(sc->sc_vsi_stat_counter_idx),
6489 I40E_GLV_UPRCL(sc->sc_vsi_stat_counter_idx),
6490 &isc->isc_vsi_rx_unicast_offset, isc->isc_has_offset);
6491 atomic_add_64(&isc->isc_vsi_rx_unicast.ev_count, delta);
6492
6493 delta = ixl_stat_delta(sc,
6494 I40E_GLV_MPRCH(sc->sc_vsi_stat_counter_idx),
6495 I40E_GLV_MPRCL(sc->sc_vsi_stat_counter_idx),
6496 &isc->isc_vsi_rx_multicast_offset, isc->isc_has_offset);
6497 atomic_add_64(&isc->isc_vsi_rx_multicast.ev_count, delta);
6498
6499 delta = ixl_stat_delta(sc,
6500 I40E_GLV_BPRCH(sc->sc_vsi_stat_counter_idx),
6501 I40E_GLV_BPRCL(sc->sc_vsi_stat_counter_idx),
6502 &isc->isc_vsi_rx_broadcast_offset, isc->isc_has_offset);
6503 atomic_add_64(&isc->isc_vsi_rx_broadcast.ev_count, delta);
6504
6505 /* VSI tx counters */
6506 delta = ixl_stat_delta(sc,
6507 0, I40E_GLV_TEPC(sc->sc_vsi_stat_counter_idx),
6508 &isc->isc_vsi_tx_errors_offset, isc->isc_has_offset);
6509 atomic_add_64(&isc->isc_vsi_tx_errors.ev_count, delta);
6510
6511 delta = ixl_stat_delta(sc,
6512 I40E_GLV_GOTCH(sc->sc_vsi_stat_counter_idx),
6513 I40E_GLV_GOTCL(sc->sc_vsi_stat_counter_idx),
6514 &isc->isc_vsi_tx_bytes_offset, isc->isc_has_offset);
6515 atomic_add_64(&isc->isc_vsi_tx_bytes.ev_count, delta);
6516
6517 delta = ixl_stat_delta(sc,
6518 I40E_GLV_UPTCH(sc->sc_vsi_stat_counter_idx),
6519 I40E_GLV_UPTCL(sc->sc_vsi_stat_counter_idx),
6520 &isc->isc_vsi_tx_unicast_offset, isc->isc_has_offset);
6521 atomic_add_64(&isc->isc_vsi_tx_unicast.ev_count, delta);
6522
6523 delta = ixl_stat_delta(sc,
6524 I40E_GLV_MPTCH(sc->sc_vsi_stat_counter_idx),
6525 I40E_GLV_MPTCL(sc->sc_vsi_stat_counter_idx),
6526 &isc->isc_vsi_tx_multicast_offset, isc->isc_has_offset);
6527 atomic_add_64(&isc->isc_vsi_tx_multicast.ev_count, delta);
6528
6529 delta = ixl_stat_delta(sc,
6530 I40E_GLV_BPTCH(sc->sc_vsi_stat_counter_idx),
6531 I40E_GLV_BPTCL(sc->sc_vsi_stat_counter_idx),
6532 &isc->isc_vsi_tx_broadcast_offset, isc->isc_has_offset);
6533 atomic_add_64(&isc->isc_vsi_tx_broadcast.ev_count, delta);
6534 }
6535
6536 static int
6537 ixl_setup_sysctls(struct ixl_softc *sc)
6538 {
6539 const char *devname;
6540 struct sysctllog **log;
6541 const struct sysctlnode *rnode, *rxnode, *txnode;
6542 int error;
6543
6544 log = &sc->sc_sysctllog;
6545 devname = device_xname(sc->sc_dev);
6546
6547 error = sysctl_createv(log, 0, NULL, &rnode,
6548 0, CTLTYPE_NODE, devname,
6549 SYSCTL_DESCR("ixl information and settings"),
6550 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
6551 if (error)
6552 goto out;
6553
6554 error = sysctl_createv(log, 0, &rnode, NULL,
6555 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
6556 SYSCTL_DESCR("Use workqueue for packet processing"),
6557 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
6558 if (error)
6559 goto out;
6560
6561 error = sysctl_createv(log, 0, &rnode, NULL,
6562 CTLFLAG_READONLY, CTLTYPE_INT, "stats_interval",
6563 SYSCTL_DESCR("Statistics collection interval in milliseconds"),
6564 NULL, 0, &sc->sc_stats_intval, 0, CTL_CREATE, CTL_EOL);
6565
6566 error = sysctl_createv(log, 0, &rnode, &rxnode,
6567 0, CTLTYPE_NODE, "rx",
6568 SYSCTL_DESCR("ixl information and settings for Rx"),
6569 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6570 if (error)
6571 goto out;
6572
6573 error = sysctl_createv(log, 0, &rxnode, NULL,
6574 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6575 SYSCTL_DESCR("max number of Rx packets"
6576 " to process for interrupt processing"),
6577 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6578 if (error)
6579 goto out;
6580
6581 error = sysctl_createv(log, 0, &rxnode, NULL,
6582 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6583 SYSCTL_DESCR("max number of Rx packets"
6584 " to process for deferred processing"),
6585 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
6586 if (error)
6587 goto out;
6588
6589 error = sysctl_createv(log, 0, &rnode, &txnode,
6590 0, CTLTYPE_NODE, "tx",
6591 SYSCTL_DESCR("ixl information and settings for Tx"),
6592 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6593 if (error)
6594 goto out;
6595
6596 error = sysctl_createv(log, 0, &txnode, NULL,
6597 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6598 SYSCTL_DESCR("max number of Tx packets"
6599 " to process for interrupt processing"),
6600 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6601 if (error)
6602 goto out;
6603
6604 error = sysctl_createv(log, 0, &txnode, NULL,
6605 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6606 SYSCTL_DESCR("max number of Tx packets"
6607 " to process for deferred processing"),
6608 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
6609 if (error)
6610 goto out;
6611
6612 out:
6613 if (error) {
6614 aprint_error_dev(sc->sc_dev,
6615 "unable to create sysctl node\n");
6616 sysctl_teardown(log);
6617 }
6618
6619 return error;
6620 }
6621
6622 static void
6623 ixl_teardown_sysctls(struct ixl_softc *sc)
6624 {
6625
6626 sysctl_teardown(&sc->sc_sysctllog);
6627 }
6628
6629 static struct workqueue *
6630 ixl_workq_create(const char *name, pri_t prio, int ipl, int flags)
6631 {
6632 struct workqueue *wq;
6633 int error;
6634
6635 error = workqueue_create(&wq, name, ixl_workq_work, NULL,
6636 prio, ipl, flags);
6637
6638 if (error)
6639 return NULL;
6640
6641 return wq;
6642 }
6643
6644 static void
6645 ixl_workq_destroy(struct workqueue *wq)
6646 {
6647
6648 workqueue_destroy(wq);
6649 }
6650
6651 static void
6652 ixl_work_set(struct ixl_work *work, void (*func)(void *), void *arg)
6653 {
6654
6655 memset(work, 0, sizeof(*work));
6656 work->ixw_func = func;
6657 work->ixw_arg = arg;
6658 }
6659
6660 static void
6661 ixl_work_add(struct workqueue *wq, struct ixl_work *work)
6662 {
6663 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0)
6664 return;
6665
6666 kpreempt_disable();
6667 workqueue_enqueue(wq, &work->ixw_cookie, NULL);
6668 kpreempt_enable();
6669 }
6670
6671 static void
6672 ixl_work_wait(struct workqueue *wq, struct ixl_work *work)
6673 {
6674
6675 workqueue_wait(wq, &work->ixw_cookie);
6676 }
6677
6678 static void
6679 ixl_workq_work(struct work *wk, void *context)
6680 {
6681 struct ixl_work *work;
6682
6683 work = container_of(wk, struct ixl_work, ixw_cookie);
6684
6685 atomic_swap_uint(&work->ixw_added, 0);
6686 work->ixw_func(work->ixw_arg);
6687 }
6688
6689 static int
6690 ixl_rx_ctl_read(struct ixl_softc *sc, uint32_t reg, uint32_t *rv)
6691 {
6692 struct ixl_aq_desc iaq;
6693
6694 memset(&iaq, 0, sizeof(iaq));
6695 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_READ);
6696 iaq.iaq_param[1] = htole32(reg);
6697
6698 if (ixl_atq_poll(sc, &iaq, 250) != 0)
6699 return ETIMEDOUT;
6700
6701 switch (htole16(iaq.iaq_retval)) {
6702 case IXL_AQ_RC_OK:
6703 /* success */
6704 break;
6705 case IXL_AQ_RC_EACCES:
6706 return EPERM;
6707 case IXL_AQ_RC_EAGAIN:
6708 return EAGAIN;
6709 default:
6710 return EIO;
6711 }
6712
6713 *rv = htole32(iaq.iaq_param[3]);
6714 return 0;
6715 }
6716
6717 static uint32_t
6718 ixl_rd_rx_csr(struct ixl_softc *sc, uint32_t reg)
6719 {
6720 uint32_t val;
6721 int rv, retry, retry_limit;
6722
6723 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) {
6724 retry_limit = 5;
6725 } else {
6726 retry_limit = 0;
6727 }
6728
6729 for (retry = 0; retry < retry_limit; retry++) {
6730 rv = ixl_rx_ctl_read(sc, reg, &val);
6731 if (rv == 0)
6732 return val;
6733 else if (rv == EAGAIN)
6734 delaymsec(1);
6735 else
6736 break;
6737 }
6738
6739 val = ixl_rd(sc, reg);
6740
6741 return val;
6742 }
6743
6744 static int
6745 ixl_rx_ctl_write(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6746 {
6747 struct ixl_aq_desc iaq;
6748
6749 memset(&iaq, 0, sizeof(iaq));
6750 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_WRITE);
6751 iaq.iaq_param[1] = htole32(reg);
6752 iaq.iaq_param[3] = htole32(value);
6753
6754 if (ixl_atq_poll(sc, &iaq, 250) != 0)
6755 return ETIMEDOUT;
6756
6757 switch (htole16(iaq.iaq_retval)) {
6758 case IXL_AQ_RC_OK:
6759 /* success */
6760 break;
6761 case IXL_AQ_RC_EACCES:
6762 return EPERM;
6763 case IXL_AQ_RC_EAGAIN:
6764 return EAGAIN;
6765 default:
6766 return EIO;
6767 }
6768
6769 return 0;
6770 }
6771
6772 static void
6773 ixl_wr_rx_csr(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6774 {
6775 int rv, retry, retry_limit;
6776
6777 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) {
6778 retry_limit = 5;
6779 } else {
6780 retry_limit = 0;
6781 }
6782
6783 for (retry = 0; retry < retry_limit; retry++) {
6784 rv = ixl_rx_ctl_write(sc, reg, value);
6785 if (rv == 0)
6786 return;
6787 else if (rv == EAGAIN)
6788 delaymsec(1);
6789 else
6790 break;
6791 }
6792
6793 ixl_wr(sc, reg, value);
6794 }
6795
6796 static int
6797 ixl_nvm_lock(struct ixl_softc *sc, char rw)
6798 {
6799 struct ixl_aq_desc iaq;
6800 struct ixl_aq_req_resource_param *param;
6801 int rv;
6802
6803 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK))
6804 return 0;
6805
6806 memset(&iaq, 0, sizeof(iaq));
6807 iaq.iaq_opcode = htole16(IXL_AQ_OP_REQUEST_RESOURCE);
6808
6809 param = (struct ixl_aq_req_resource_param *)&iaq.iaq_param;
6810 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM);
6811 if (rw == 'R') {
6812 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_READ);
6813 } else {
6814 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_WRITE);
6815 }
6816
6817 rv = ixl_atq_poll(sc, &iaq, 250);
6818
6819 if (rv != 0)
6820 return ETIMEDOUT;
6821
6822 switch (le16toh(iaq.iaq_retval)) {
6823 case IXL_AQ_RC_OK:
6824 break;
6825 case IXL_AQ_RC_EACCES:
6826 return EACCES;
6827 case IXL_AQ_RC_EBUSY:
6828 return EBUSY;
6829 case IXL_AQ_RC_EPERM:
6830 return EPERM;
6831 }
6832
6833 return 0;
6834 }
6835
6836 static int
6837 ixl_nvm_unlock(struct ixl_softc *sc)
6838 {
6839 struct ixl_aq_desc iaq;
6840 struct ixl_aq_rel_resource_param *param;
6841 int rv;
6842
6843 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK))
6844 return 0;
6845
6846 memset(&iaq, 0, sizeof(iaq));
6847 iaq.iaq_opcode = htole16(IXL_AQ_OP_RELEASE_RESOURCE);
6848
6849 param = (struct ixl_aq_rel_resource_param *)&iaq.iaq_param;
6850 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM);
6851
6852 rv = ixl_atq_poll(sc, &iaq, 250);
6853
6854 if (rv != 0)
6855 return ETIMEDOUT;
6856
6857 switch (le16toh(iaq.iaq_retval)) {
6858 case IXL_AQ_RC_OK:
6859 break;
6860 default:
6861 return EIO;
6862 }
6863 return 0;
6864 }
6865
6866 static int
6867 ixl_srdone_poll(struct ixl_softc *sc)
6868 {
6869 int wait_count;
6870 uint32_t reg;
6871
6872 for (wait_count = 0; wait_count < IXL_SRRD_SRCTL_ATTEMPTS;
6873 wait_count++) {
6874 reg = ixl_rd(sc, I40E_GLNVM_SRCTL);
6875 if (ISSET(reg, I40E_GLNVM_SRCTL_DONE_MASK))
6876 break;
6877
6878 delaymsec(5);
6879 }
6880
6881 if (wait_count == IXL_SRRD_SRCTL_ATTEMPTS)
6882 return -1;
6883
6884 return 0;
6885 }
6886
6887 static int
6888 ixl_nvm_read_srctl(struct ixl_softc *sc, uint16_t offset, uint16_t *data)
6889 {
6890 uint32_t reg;
6891
6892 if (ixl_srdone_poll(sc) != 0)
6893 return ETIMEDOUT;
6894
6895 reg = ((uint32_t)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
6896 __BIT(I40E_GLNVM_SRCTL_START_SHIFT);
6897 ixl_wr(sc, I40E_GLNVM_SRCTL, reg);
6898
6899 if (ixl_srdone_poll(sc) != 0) {
6900 aprint_debug("NVM read error: couldn't access "
6901 "Shadow RAM address: 0x%x\n", offset);
6902 return ETIMEDOUT;
6903 }
6904
6905 reg = ixl_rd(sc, I40E_GLNVM_SRDATA);
6906 *data = (uint16_t)__SHIFTOUT(reg, I40E_GLNVM_SRDATA_RDDATA_MASK);
6907
6908 return 0;
6909 }
6910
6911 static int
6912 ixl_nvm_read_aq(struct ixl_softc *sc, uint16_t offset_word,
6913 void *data, size_t len)
6914 {
6915 struct ixl_dmamem *idm;
6916 struct ixl_aq_desc iaq;
6917 struct ixl_aq_nvm_param *param;
6918 uint32_t offset_bytes;
6919 int rv;
6920
6921 idm = &sc->sc_aqbuf;
6922 if (len > IXL_DMA_LEN(idm))
6923 return ENOMEM;
6924
6925 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
6926 memset(&iaq, 0, sizeof(iaq));
6927 iaq.iaq_opcode = htole16(IXL_AQ_OP_NVM_READ);
6928 iaq.iaq_flags = htole16(IXL_AQ_BUF |
6929 ((len > I40E_AQ_LARGE_BUF) ? IXL_AQ_LB : 0));
6930 iaq.iaq_datalen = htole16(len);
6931 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
6932
6933 param = (struct ixl_aq_nvm_param *)iaq.iaq_param;
6934 param->command_flags = IXL_AQ_NVM_LAST_CMD;
6935 param->module_pointer = 0;
6936 param->length = htole16(len);
6937 offset_bytes = (uint32_t)offset_word * 2;
6938 offset_bytes &= 0x00FFFFFF;
6939 param->offset = htole32(offset_bytes);
6940
6941 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
6942 BUS_DMASYNC_PREREAD);
6943
6944 rv = ixl_atq_poll(sc, &iaq, 250);
6945
6946 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
6947 BUS_DMASYNC_POSTREAD);
6948
6949 if (rv != 0) {
6950 return ETIMEDOUT;
6951 }
6952
6953 switch (le16toh(iaq.iaq_retval)) {
6954 case IXL_AQ_RC_OK:
6955 break;
6956 case IXL_AQ_RC_EPERM:
6957 return EPERM;
6958 case IXL_AQ_RC_EINVAL:
6959 return EINVAL;
6960 case IXL_AQ_RC_EBUSY:
6961 return EBUSY;
6962 case IXL_AQ_RC_EIO:
6963 default:
6964 return EIO;
6965 }
6966
6967 memcpy(data, IXL_DMA_KVA(idm), len);
6968
6969 return 0;
6970 }
6971
6972 static int
6973 ixl_rd16_nvm(struct ixl_softc *sc, uint16_t offset, uint16_t *data)
6974 {
6975 int error;
6976 uint16_t buf;
6977
6978 error = ixl_nvm_lock(sc, 'R');
6979 if (error)
6980 return error;
6981
6982 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMREAD)) {
6983 error = ixl_nvm_read_aq(sc, offset,
6984 &buf, sizeof(buf));
6985 if (error == 0)
6986 *data = le16toh(buf);
6987 } else {
6988 error = ixl_nvm_read_srctl(sc, offset, &buf);
6989 if (error == 0)
6990 *data = buf;
6991 }
6992
6993 ixl_nvm_unlock(sc);
6994
6995 return error;
6996 }
6997
6998 MODULE(MODULE_CLASS_DRIVER, if_ixl, "pci");
6999
7000 #ifdef _MODULE
7001 #include "ioconf.c"
7002 #endif
7003
7004 #ifdef _MODULE
7005 static void
7006 ixl_parse_modprop(prop_dictionary_t dict)
7007 {
7008 prop_object_t obj;
7009 int64_t val;
7010 uint64_t uval;
7011
7012 if (dict == NULL)
7013 return;
7014
7015 obj = prop_dictionary_get(dict, "nomsix");
7016 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_BOOL) {
7017 ixl_param_nomsix = prop_bool_true((prop_bool_t)obj);
7018 }
7019
7020 obj = prop_dictionary_get(dict, "stats_interval");
7021 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7022 val = prop_number_integer_value((prop_number_t)obj);
7023
7024 /* the range has no reason */
7025 if (100 < val && val < 180000) {
7026 ixl_param_stats_interval = val;
7027 }
7028 }
7029
7030 obj = prop_dictionary_get(dict, "nqps_limit");
7031 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7032 val = prop_number_integer_value((prop_number_t)obj);
7033
7034 if (val <= INT32_MAX)
7035 ixl_param_nqps_limit = val;
7036 }
7037
7038 obj = prop_dictionary_get(dict, "rx_ndescs");
7039 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7040 uval = prop_number_unsigned_integer_value((prop_number_t)obj);
7041
7042 if (uval > 8)
7043 ixl_param_rx_ndescs = uval;
7044 }
7045
7046 obj = prop_dictionary_get(dict, "tx_ndescs");
7047 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7048 uval = prop_number_unsigned_integer_value((prop_number_t)obj);
7049
7050 if (uval > IXL_TX_PKT_DESCS)
7051 ixl_param_tx_ndescs = uval;
7052 }
7053
7054 }
7055 #endif
7056
7057 static int
7058 if_ixl_modcmd(modcmd_t cmd, void *opaque)
7059 {
7060 int error = 0;
7061
7062 #ifdef _MODULE
7063 switch (cmd) {
7064 case MODULE_CMD_INIT:
7065 ixl_parse_modprop((prop_dictionary_t)opaque);
7066 error = config_init_component(cfdriver_ioconf_if_ixl,
7067 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
7068 break;
7069 case MODULE_CMD_FINI:
7070 error = config_fini_component(cfdriver_ioconf_if_ixl,
7071 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
7072 break;
7073 default:
7074 error = ENOTTY;
7075 break;
7076 }
7077 #endif
7078
7079 return error;
7080 }
7081