if_ixl.c revision 1.95 1 /* $NetBSD: if_ixl.c,v 1.95 2023/10/13 04:43:35 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2016,2017 David Gwynne <dlg (at) openbsd.org>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50 /*
51 * Copyright (c) 2019 Internet Initiative Japan, Inc.
52 * All rights reserved.
53 *
54 * Redistribution and use in source and binary forms, with or without
55 * modification, are permitted provided that the following conditions
56 * are met:
57 * 1. Redistributions of source code must retain the above copyright
58 * notice, this list of conditions and the following disclaimer.
59 * 2. Redistributions in binary form must reproduce the above copyright
60 * notice, this list of conditions and the following disclaimer in the
61 * documentation and/or other materials provided with the distribution.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
73 * POSSIBILITY OF SUCH DAMAGE.
74 */
75
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: if_ixl.c,v 1.95 2023/10/13 04:43:35 yamaguchi Exp $");
78
79 #ifdef _KERNEL_OPT
80 #include "opt_net_mpsafe.h"
81 #include "opt_if_ixl.h"
82 #endif
83
84 #include <sys/param.h>
85 #include <sys/types.h>
86
87 #include <sys/bitops.h>
88 #include <sys/cpu.h>
89 #include <sys/device.h>
90 #include <sys/evcnt.h>
91 #include <sys/interrupt.h>
92 #include <sys/kmem.h>
93 #include <sys/module.h>
94 #include <sys/mutex.h>
95 #include <sys/pcq.h>
96 #include <sys/syslog.h>
97 #include <sys/workqueue.h>
98 #include <sys/xcall.h>
99
100 #include <sys/bus.h>
101
102 #include <net/bpf.h>
103 #include <net/if.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 #include <net/if_ether.h>
107 #include <net/rss_config.h>
108
109 #include <netinet/tcp.h> /* for struct tcphdr */
110 #include <netinet/udp.h> /* for struct udphdr */
111
112 #include <dev/pci/pcivar.h>
113 #include <dev/pci/pcidevs.h>
114
115 #include <dev/pci/if_ixlreg.h>
116 #include <dev/pci/if_ixlvar.h>
117
118 #include <prop/proplib.h>
119
120 struct ixl_softc; /* defined */
121
122 #define I40E_PF_RESET_WAIT_COUNT 200
123 #define I40E_AQ_LARGE_BUF 512
124
125 /* bitfields for Tx queue mapping in QTX_CTL */
126 #define I40E_QTX_CTL_VF_QUEUE 0x0
127 #define I40E_QTX_CTL_VM_QUEUE 0x1
128 #define I40E_QTX_CTL_PF_QUEUE 0x2
129
130 #define I40E_QUEUE_TYPE_EOL 0x7ff
131 #define I40E_INTR_NOTX_QUEUE 0
132
133 #define I40E_QUEUE_TYPE_RX 0x0
134 #define I40E_QUEUE_TYPE_TX 0x1
135 #define I40E_QUEUE_TYPE_PE_CEQ 0x2
136 #define I40E_QUEUE_TYPE_UNKNOWN 0x3
137
138 #define I40E_ITR_INDEX_RX 0x0
139 #define I40E_ITR_INDEX_TX 0x1
140 #define I40E_ITR_INDEX_OTHER 0x2
141 #define I40E_ITR_INDEX_NONE 0x3
142 #define IXL_ITR_RX 0x7a /* 4K intrs/sec */
143 #define IXL_ITR_TX 0x7a /* 4K intrs/sec */
144
145 #define I40E_INTR_NOTX_QUEUE 0
146 #define I40E_INTR_NOTX_INTR 0
147 #define I40E_INTR_NOTX_RX_QUEUE 0
148 #define I40E_INTR_NOTX_TX_QUEUE 1
149 #define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK
150 #define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK
151
152 #define I40E_HASH_LUT_SIZE_128 0
153
154 #define IXL_ICR0_CRIT_ERR_MASK \
155 (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \
156 I40E_PFINT_ICR0_ECC_ERR_MASK | \
157 I40E_PFINT_ICR0_PE_CRITERR_MASK)
158
159 #define IXL_QUEUE_MAX_XL710 64
160 #define IXL_QUEUE_MAX_X722 128
161
162 #define IXL_TX_PKT_DESCS 8
163 #define IXL_TX_PKT_MAXSIZE (MCLBYTES * IXL_TX_PKT_DESCS)
164 #define IXL_TX_QUEUE_ALIGN 128
165 #define IXL_RX_QUEUE_ALIGN 128
166
167 #define IXL_MCLBYTES (MCLBYTES - ETHER_ALIGN)
168 #define IXL_MTU_ETHERLEN ETHER_HDR_LEN \
169 + ETHER_CRC_LEN \
170 + ETHER_VLAN_ENCAP_LEN
171 #if 0
172 #define IXL_MAX_MTU (9728 - IXL_MTU_ETHERLEN)
173 #else
174 /* (dbuff * 5) - ETHER_HDR_LEN - ETHER_CRC_LEN */
175 #define IXL_MAX_MTU (9600 - IXL_MTU_ETHERLEN)
176 #endif
177 #define IXL_MIN_MTU (ETHER_MIN_LEN - ETHER_CRC_LEN)
178
179 #define IXL_PCIREG PCI_MAPREG_START
180
181 #define IXL_ITR0 0x0
182 #define IXL_ITR1 0x1
183 #define IXL_ITR2 0x2
184 #define IXL_NOITR 0x3
185
186 #define IXL_AQ_NUM 256
187 #define IXL_AQ_MASK (IXL_AQ_NUM - 1)
188 #define IXL_AQ_ALIGN 64 /* lol */
189 #define IXL_AQ_BUFLEN 4096
190
191 #define IXL_HMC_ROUNDUP 512
192 #define IXL_HMC_PGSIZE 4096
193 #define IXL_HMC_DVASZ sizeof(uint64_t)
194 #define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
195 #define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS)
196 #define IXL_HMC_PDVALID 1ULL
197
198 #define IXL_ATQ_EXEC_TIMEOUT (10 * hz)
199
200 #define IXL_SRRD_SRCTL_ATTEMPTS 100000
201
202 struct ixl_aq_regs {
203 bus_size_t atq_tail;
204 bus_size_t atq_head;
205 bus_size_t atq_len;
206 bus_size_t atq_bal;
207 bus_size_t atq_bah;
208
209 bus_size_t arq_tail;
210 bus_size_t arq_head;
211 bus_size_t arq_len;
212 bus_size_t arq_bal;
213 bus_size_t arq_bah;
214
215 uint32_t atq_len_enable;
216 uint32_t atq_tail_mask;
217 uint32_t atq_head_mask;
218
219 uint32_t arq_len_enable;
220 uint32_t arq_tail_mask;
221 uint32_t arq_head_mask;
222 };
223
224 struct ixl_phy_type {
225 uint64_t phy_type;
226 uint64_t ifm_type;
227 };
228
229 struct ixl_speed_type {
230 uint8_t dev_speed;
231 uint64_t net_speed;
232 };
233
234 struct ixl_hmc_entry {
235 uint64_t hmc_base;
236 uint32_t hmc_count;
237 uint64_t hmc_size;
238 };
239
240 enum ixl_hmc_types {
241 IXL_HMC_LAN_TX = 0,
242 IXL_HMC_LAN_RX,
243 IXL_HMC_FCOE_CTX,
244 IXL_HMC_FCOE_FILTER,
245 IXL_HMC_COUNT
246 };
247
248 struct ixl_hmc_pack {
249 uint16_t offset;
250 uint16_t width;
251 uint16_t lsb;
252 };
253
254 /*
255 * these hmc objects have weird sizes and alignments, so these are abstract
256 * representations of them that are nice for c to populate.
257 *
258 * the packing code relies on little-endian values being stored in the fields,
259 * no high bits in the fields being set, and the fields must be packed in the
260 * same order as they are in the ctx structure.
261 */
262
263 struct ixl_hmc_rxq {
264 uint16_t head;
265 uint8_t cpuid;
266 uint64_t base;
267 #define IXL_HMC_RXQ_BASE_UNIT 128
268 uint16_t qlen;
269 uint16_t dbuff;
270 #define IXL_HMC_RXQ_DBUFF_UNIT 128
271 uint8_t hbuff;
272 #define IXL_HMC_RXQ_HBUFF_UNIT 64
273 uint8_t dtype;
274 #define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0
275 #define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1
276 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2
277 uint8_t dsize;
278 #define IXL_HMC_RXQ_DSIZE_16 0
279 #define IXL_HMC_RXQ_DSIZE_32 1
280 uint8_t crcstrip;
281 uint8_t fc_ena;
282 uint8_t l2sel;
283 uint8_t hsplit_0;
284 uint8_t hsplit_1;
285 uint8_t showiv;
286 uint16_t rxmax;
287 uint8_t tphrdesc_ena;
288 uint8_t tphwdesc_ena;
289 uint8_t tphdata_ena;
290 uint8_t tphhead_ena;
291 uint8_t lrxqthresh;
292 uint8_t prefena;
293 };
294
295 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
296 { offsetof(struct ixl_hmc_rxq, head), 13, 0 },
297 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 },
298 { offsetof(struct ixl_hmc_rxq, base), 57, 32 },
299 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 },
300 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 },
301 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 },
302 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 },
303 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 },
304 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 },
305 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 },
306 { offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 },
307 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 },
308 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 },
309 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 },
310 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 },
311 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 },
312 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 },
313 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 },
314 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 },
315 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 },
316 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 },
317 };
318
319 #define IXL_HMC_RXQ_MINSIZE (201 + 1)
320
321 struct ixl_hmc_txq {
322 uint16_t head;
323 uint8_t new_context;
324 uint64_t base;
325 #define IXL_HMC_TXQ_BASE_UNIT 128
326 uint8_t fc_ena;
327 uint8_t timesync_ena;
328 uint8_t fd_ena;
329 uint8_t alt_vlan_ena;
330 uint8_t cpuid;
331 uint16_t thead_wb;
332 uint8_t head_wb_ena;
333 #define IXL_HMC_TXQ_DESC_WB 0
334 #define IXL_HMC_TXQ_HEAD_WB 1
335 uint16_t qlen;
336 uint8_t tphrdesc_ena;
337 uint8_t tphrpacket_ena;
338 uint8_t tphwdesc_ena;
339 uint64_t head_wb_addr;
340 uint32_t crc;
341 uint16_t rdylist;
342 uint8_t rdylist_act;
343 };
344
345 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
346 { offsetof(struct ixl_hmc_txq, head), 13, 0 },
347 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 },
348 { offsetof(struct ixl_hmc_txq, base), 57, 32 },
349 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 },
350 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 },
351 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 },
352 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 },
353 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 },
354 /* line 1 */
355 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 },
356 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 },
357 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 },
358 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 },
359 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 },
360 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 },
361 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 },
362 /* line 7 */
363 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) },
364 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) },
365 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) },
366 };
367
368 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
369
370 struct ixl_work {
371 struct work ixw_cookie;
372 void (*ixw_func)(void *);
373 void *ixw_arg;
374 unsigned int ixw_added;
375 };
376 #define IXL_WORKQUEUE_PRI PRI_SOFTNET
377
378 struct ixl_tx_map {
379 struct mbuf *txm_m;
380 bus_dmamap_t txm_map;
381 unsigned int txm_eop;
382 };
383
384 struct ixl_tx_ring {
385 kmutex_t txr_lock;
386 struct ixl_softc *txr_sc;
387
388 unsigned int txr_prod;
389 unsigned int txr_cons;
390
391 struct ixl_tx_map *txr_maps;
392 struct ixl_dmamem txr_mem;
393
394 bus_size_t txr_tail;
395 unsigned int txr_qid;
396 pcq_t *txr_intrq;
397 void *txr_si;
398
399 struct evcnt txr_defragged;
400 struct evcnt txr_defrag_failed;
401 struct evcnt txr_pcqdrop;
402 struct evcnt txr_transmitdef;
403 struct evcnt txr_intr;
404 struct evcnt txr_defer;
405 };
406
407 struct ixl_rx_map {
408 struct mbuf *rxm_m;
409 bus_dmamap_t rxm_map;
410 };
411
412 struct ixl_rx_ring {
413 kmutex_t rxr_lock;
414
415 unsigned int rxr_prod;
416 unsigned int rxr_cons;
417
418 struct ixl_rx_map *rxr_maps;
419 struct ixl_dmamem rxr_mem;
420
421 struct mbuf *rxr_m_head;
422 struct mbuf **rxr_m_tail;
423
424 bus_size_t rxr_tail;
425 unsigned int rxr_qid;
426
427 struct evcnt rxr_mgethdr_failed;
428 struct evcnt rxr_mgetcl_failed;
429 struct evcnt rxr_mbuf_load_failed;
430 struct evcnt rxr_intr;
431 struct evcnt rxr_defer;
432 };
433
434 struct ixl_queue_pair {
435 struct ixl_softc *qp_sc;
436 struct ixl_tx_ring *qp_txr;
437 struct ixl_rx_ring *qp_rxr;
438
439 char qp_name[16];
440
441 void *qp_si;
442 struct work qp_work;
443 bool qp_workqueue;
444 };
445
446 struct ixl_atq {
447 struct ixl_aq_desc iatq_desc;
448 void (*iatq_fn)(struct ixl_softc *,
449 const struct ixl_aq_desc *);
450 bool iatq_inuse;
451 };
452 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
453
454 struct ixl_product {
455 unsigned int vendor_id;
456 unsigned int product_id;
457 };
458
459 struct ixl_stats_counters {
460 bool isc_has_offset;
461 struct evcnt isc_crc_errors;
462 uint64_t isc_crc_errors_offset;
463 struct evcnt isc_illegal_bytes;
464 uint64_t isc_illegal_bytes_offset;
465 struct evcnt isc_rx_bytes;
466 uint64_t isc_rx_bytes_offset;
467 struct evcnt isc_rx_discards;
468 uint64_t isc_rx_discards_offset;
469 struct evcnt isc_rx_unicast;
470 uint64_t isc_rx_unicast_offset;
471 struct evcnt isc_rx_multicast;
472 uint64_t isc_rx_multicast_offset;
473 struct evcnt isc_rx_broadcast;
474 uint64_t isc_rx_broadcast_offset;
475 struct evcnt isc_rx_size_64;
476 uint64_t isc_rx_size_64_offset;
477 struct evcnt isc_rx_size_127;
478 uint64_t isc_rx_size_127_offset;
479 struct evcnt isc_rx_size_255;
480 uint64_t isc_rx_size_255_offset;
481 struct evcnt isc_rx_size_511;
482 uint64_t isc_rx_size_511_offset;
483 struct evcnt isc_rx_size_1023;
484 uint64_t isc_rx_size_1023_offset;
485 struct evcnt isc_rx_size_1522;
486 uint64_t isc_rx_size_1522_offset;
487 struct evcnt isc_rx_size_big;
488 uint64_t isc_rx_size_big_offset;
489 struct evcnt isc_rx_undersize;
490 uint64_t isc_rx_undersize_offset;
491 struct evcnt isc_rx_oversize;
492 uint64_t isc_rx_oversize_offset;
493 struct evcnt isc_rx_fragments;
494 uint64_t isc_rx_fragments_offset;
495 struct evcnt isc_rx_jabber;
496 uint64_t isc_rx_jabber_offset;
497 struct evcnt isc_tx_bytes;
498 uint64_t isc_tx_bytes_offset;
499 struct evcnt isc_tx_dropped_link_down;
500 uint64_t isc_tx_dropped_link_down_offset;
501 struct evcnt isc_tx_unicast;
502 uint64_t isc_tx_unicast_offset;
503 struct evcnt isc_tx_multicast;
504 uint64_t isc_tx_multicast_offset;
505 struct evcnt isc_tx_broadcast;
506 uint64_t isc_tx_broadcast_offset;
507 struct evcnt isc_tx_size_64;
508 uint64_t isc_tx_size_64_offset;
509 struct evcnt isc_tx_size_127;
510 uint64_t isc_tx_size_127_offset;
511 struct evcnt isc_tx_size_255;
512 uint64_t isc_tx_size_255_offset;
513 struct evcnt isc_tx_size_511;
514 uint64_t isc_tx_size_511_offset;
515 struct evcnt isc_tx_size_1023;
516 uint64_t isc_tx_size_1023_offset;
517 struct evcnt isc_tx_size_1522;
518 uint64_t isc_tx_size_1522_offset;
519 struct evcnt isc_tx_size_big;
520 uint64_t isc_tx_size_big_offset;
521 struct evcnt isc_mac_local_faults;
522 uint64_t isc_mac_local_faults_offset;
523 struct evcnt isc_mac_remote_faults;
524 uint64_t isc_mac_remote_faults_offset;
525 struct evcnt isc_link_xon_rx;
526 uint64_t isc_link_xon_rx_offset;
527 struct evcnt isc_link_xon_tx;
528 uint64_t isc_link_xon_tx_offset;
529 struct evcnt isc_link_xoff_rx;
530 uint64_t isc_link_xoff_rx_offset;
531 struct evcnt isc_link_xoff_tx;
532 uint64_t isc_link_xoff_tx_offset;
533 struct evcnt isc_vsi_rx_discards;
534 uint64_t isc_vsi_rx_discards_offset;
535 struct evcnt isc_vsi_rx_bytes;
536 uint64_t isc_vsi_rx_bytes_offset;
537 struct evcnt isc_vsi_rx_unicast;
538 uint64_t isc_vsi_rx_unicast_offset;
539 struct evcnt isc_vsi_rx_multicast;
540 uint64_t isc_vsi_rx_multicast_offset;
541 struct evcnt isc_vsi_rx_broadcast;
542 uint64_t isc_vsi_rx_broadcast_offset;
543 struct evcnt isc_vsi_tx_errors;
544 uint64_t isc_vsi_tx_errors_offset;
545 struct evcnt isc_vsi_tx_bytes;
546 uint64_t isc_vsi_tx_bytes_offset;
547 struct evcnt isc_vsi_tx_unicast;
548 uint64_t isc_vsi_tx_unicast_offset;
549 struct evcnt isc_vsi_tx_multicast;
550 uint64_t isc_vsi_tx_multicast_offset;
551 struct evcnt isc_vsi_tx_broadcast;
552 uint64_t isc_vsi_tx_broadcast_offset;
553 };
554
555 /*
556 * Locking notes:
557 * + a field in ixl_tx_ring is protected by txr_lock (a spin mutex), and
558 * a field in ixl_rx_ring is protected by rxr_lock (a spin mutex).
559 * - more than one lock of them cannot be held at once.
560 * + a field named sc_atq_* in ixl_softc is protected by sc_atq_lock
561 * (a spin mutex).
562 * - the lock cannot held with txr_lock or rxr_lock.
563 * + a field named sc_arq_* is not protected by any lock.
564 * - operations for sc_arq_* is done in one context related to
565 * sc_arq_task.
566 * + other fields in ixl_softc is protected by sc_cfg_lock
567 * (an adaptive mutex)
568 * - It must be held before another lock is held, and It can be
569 * released after the other lock is released.
570 * */
571
572 struct ixl_softc {
573 device_t sc_dev;
574 struct ethercom sc_ec;
575 bool sc_attached;
576 bool sc_dead;
577 uint32_t sc_port;
578 struct sysctllog *sc_sysctllog;
579 struct workqueue *sc_workq;
580 struct workqueue *sc_workq_txrx;
581 int sc_stats_intval;
582 callout_t sc_stats_callout;
583 struct ixl_work sc_stats_task;
584 struct ixl_stats_counters
585 sc_stats_counters;
586 uint8_t sc_enaddr[ETHER_ADDR_LEN];
587 struct ifmedia sc_media;
588 uint64_t sc_media_status;
589 uint64_t sc_media_active;
590 uint64_t sc_phy_types;
591 uint8_t sc_phy_abilities;
592 uint8_t sc_phy_linkspeed;
593 uint8_t sc_phy_fec_cfg;
594 uint16_t sc_eee_cap;
595 uint32_t sc_eeer_val;
596 uint8_t sc_d3_lpan;
597 kmutex_t sc_cfg_lock;
598 enum i40e_mac_type sc_mac_type;
599 uint32_t sc_rss_table_size;
600 uint32_t sc_rss_table_entry_width;
601 bool sc_txrx_workqueue;
602 u_int sc_tx_process_limit;
603 u_int sc_rx_process_limit;
604 u_int sc_tx_intr_process_limit;
605 u_int sc_rx_intr_process_limit;
606
607 int sc_cur_ec_capenable;
608
609 struct pci_attach_args sc_pa;
610 pci_intr_handle_t *sc_ihp;
611 void **sc_ihs;
612 unsigned int sc_nintrs;
613
614 bus_dma_tag_t sc_dmat;
615 bus_space_tag_t sc_memt;
616 bus_space_handle_t sc_memh;
617 bus_size_t sc_mems;
618
619 uint8_t sc_pf_id;
620 uint16_t sc_uplink_seid; /* le */
621 uint16_t sc_downlink_seid; /* le */
622 uint16_t sc_vsi_number;
623 uint16_t sc_vsi_stat_counter_idx;
624 uint16_t sc_seid;
625 unsigned int sc_base_queue;
626
627 pci_intr_type_t sc_intrtype;
628 unsigned int sc_msix_vector_queue;
629
630 struct ixl_dmamem sc_scratch;
631 struct ixl_dmamem sc_aqbuf;
632
633 const struct ixl_aq_regs *
634 sc_aq_regs;
635 uint32_t sc_aq_flags;
636 #define IXL_SC_AQ_FLAG_RXCTL __BIT(0)
637 #define IXL_SC_AQ_FLAG_NVMLOCK __BIT(1)
638 #define IXL_SC_AQ_FLAG_NVMREAD __BIT(2)
639 #define IXL_SC_AQ_FLAG_RSS __BIT(3)
640
641 kmutex_t sc_atq_lock;
642 kcondvar_t sc_atq_cv;
643 struct ixl_dmamem sc_atq;
644 unsigned int sc_atq_prod;
645 unsigned int sc_atq_cons;
646
647 struct ixl_dmamem sc_arq;
648 struct ixl_work sc_arq_task;
649 struct ixl_aq_bufs sc_arq_idle;
650 struct ixl_aq_buf *sc_arq_live[IXL_AQ_NUM];
651 unsigned int sc_arq_prod;
652 unsigned int sc_arq_cons;
653
654 struct ixl_work sc_link_state_task;
655 struct ixl_work sc_link_state_done_task;
656 struct ixl_atq sc_link_state_atq;
657
658 struct ixl_dmamem sc_hmc_sd;
659 struct ixl_dmamem sc_hmc_pd;
660 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT];
661
662 struct if_percpuq *sc_ipq;
663 unsigned int sc_tx_ring_ndescs;
664 unsigned int sc_rx_ring_ndescs;
665 unsigned int sc_nqueue_pairs;
666 unsigned int sc_nqueue_pairs_max;
667 unsigned int sc_nqueue_pairs_device;
668 struct ixl_queue_pair *sc_qps;
669 uint32_t sc_itr_rx;
670 uint32_t sc_itr_tx;
671
672 struct evcnt sc_event_atq;
673 struct evcnt sc_event_link;
674 struct evcnt sc_event_ecc_err;
675 struct evcnt sc_event_pci_exception;
676 struct evcnt sc_event_crit_err;
677 };
678
679 #define IXL_TXRX_PROCESS_UNLIMIT UINT_MAX
680 #define IXL_TX_PROCESS_LIMIT 256
681 #define IXL_RX_PROCESS_LIMIT 256
682 #define IXL_TX_INTR_PROCESS_LIMIT 256
683 #define IXL_RX_INTR_PROCESS_LIMIT 0U
684
685 #define IXL_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx | \
686 IFCAP_CSUM_TCPv4_Rx | \
687 IFCAP_CSUM_UDPv4_Rx | \
688 IFCAP_CSUM_TCPv6_Rx | \
689 IFCAP_CSUM_UDPv6_Rx)
690 #define IXL_IFCAP_TXCSUM (IFCAP_CSUM_IPv4_Tx | \
691 IFCAP_CSUM_TCPv4_Tx | \
692 IFCAP_CSUM_UDPv4_Tx | \
693 IFCAP_CSUM_TCPv6_Tx | \
694 IFCAP_CSUM_UDPv6_Tx)
695 #define IXL_CSUM_ALL_OFFLOAD (M_CSUM_IPv4 | \
696 M_CSUM_TCPv4 | M_CSUM_TCPv6 | \
697 M_CSUM_UDPv4 | M_CSUM_UDPv6)
698
699 #define delaymsec(_x) DELAY(1000 * (_x))
700 #ifdef IXL_DEBUG
701 #define DDPRINTF(sc, fmt, args...) \
702 do { \
703 if ((sc) != NULL) { \
704 device_printf( \
705 ((struct ixl_softc *)(sc))->sc_dev, \
706 ""); \
707 } \
708 printf("%s:\t" fmt, __func__, ##args); \
709 } while (0)
710 #else
711 #define DDPRINTF(sc, fmt, args...) __nothing
712 #endif
713 #ifndef IXL_STATS_INTERVAL_MSEC
714 #define IXL_STATS_INTERVAL_MSEC 10000
715 #endif
716 #ifndef IXL_QUEUE_NUM
717 #define IXL_QUEUE_NUM 0
718 #endif
719
720 enum ixl_link_flags {
721 IXL_LINK_NOFLAGS = 0,
722 IXL_LINK_FLAG_WAITDONE = __BIT(0),
723 };
724
725 static bool ixl_param_nomsix = false;
726 static int ixl_param_stats_interval = IXL_STATS_INTERVAL_MSEC;
727 static int ixl_param_nqps_limit = IXL_QUEUE_NUM;
728 static unsigned int ixl_param_tx_ndescs = 512;
729 static unsigned int ixl_param_rx_ndescs = 512;
730
731 static enum i40e_mac_type
732 ixl_mactype(pci_product_id_t);
733 static void ixl_pci_csr_setup(pci_chipset_tag_t, pcitag_t);
734 static void ixl_clear_hw(struct ixl_softc *);
735 static int ixl_pf_reset(struct ixl_softc *);
736
737 static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
738 bus_size_t, bus_size_t);
739 static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
740
741 static int ixl_arq_fill(struct ixl_softc *);
742 static void ixl_arq_unfill(struct ixl_softc *);
743
744 static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
745 unsigned int);
746 static void ixl_atq_set(struct ixl_atq *,
747 void (*)(struct ixl_softc *, const struct ixl_aq_desc *));
748 static void ixl_wakeup(struct ixl_softc *, const struct ixl_aq_desc *);
749 static int ixl_atq_post_locked(struct ixl_softc *, struct ixl_atq *);
750 static void ixl_atq_done(struct ixl_softc *);
751 static int ixl_atq_exec(struct ixl_softc *, struct ixl_atq *);
752 static int ixl_atq_exec_locked(struct ixl_softc *, struct ixl_atq *);
753 static int ixl_get_version(struct ixl_softc *);
754 static int ixl_get_nvm_version(struct ixl_softc *);
755 static int ixl_get_hw_capabilities(struct ixl_softc *);
756 static int ixl_pxe_clear(struct ixl_softc *);
757 static int ixl_lldp_shut(struct ixl_softc *);
758 static int ixl_get_mac(struct ixl_softc *);
759 static int ixl_get_switch_config(struct ixl_softc *);
760 static int ixl_phy_mask_ints(struct ixl_softc *);
761 static int ixl_get_phy_info(struct ixl_softc *);
762 static int ixl_set_phy_config(struct ixl_softc *, uint8_t, uint8_t, bool);
763 static int ixl_set_phy_autoselect(struct ixl_softc *);
764 static int ixl_restart_an(struct ixl_softc *);
765 static int ixl_hmc(struct ixl_softc *);
766 static void ixl_hmc_free(struct ixl_softc *);
767 static int ixl_get_vsi(struct ixl_softc *);
768 static int ixl_set_vsi(struct ixl_softc *);
769 static void ixl_set_filter_control(struct ixl_softc *);
770 static int ixl_get_link_status(struct ixl_softc *, enum ixl_link_flags);
771 static void ixl_get_link_status_work(void *);
772 static int ixl_get_link_status_poll(struct ixl_softc *, int *);
773 static void ixl_get_link_status_done(struct ixl_softc *,
774 const struct ixl_aq_desc *);
775 static void ixl_get_link_status_done_work(void *);
776 static int ixl_set_link_status_locked(struct ixl_softc *,
777 const struct ixl_aq_desc *);
778 static uint64_t ixl_search_link_speed(uint8_t);
779 static uint8_t ixl_search_baudrate(uint64_t);
780 static void ixl_config_rss(struct ixl_softc *);
781 static int ixl_add_macvlan(struct ixl_softc *, const uint8_t *,
782 uint16_t, uint16_t);
783 static int ixl_remove_macvlan(struct ixl_softc *, const uint8_t *,
784 uint16_t, uint16_t);
785 static void ixl_arq(void *);
786 static void ixl_hmc_pack(void *, const void *,
787 const struct ixl_hmc_pack *, unsigned int);
788 static uint32_t ixl_rd_rx_csr(struct ixl_softc *, uint32_t);
789 static void ixl_wr_rx_csr(struct ixl_softc *, uint32_t, uint32_t);
790 static int ixl_rd16_nvm(struct ixl_softc *, uint16_t, uint16_t *);
791
792 static int ixl_match(device_t, cfdata_t, void *);
793 static void ixl_attach(device_t, device_t, void *);
794 static int ixl_detach(device_t, int);
795
796 static void ixl_media_add(struct ixl_softc *);
797 static int ixl_media_change(struct ifnet *);
798 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
799 static int ixl_ioctl(struct ifnet *, u_long, void *);
800 static void ixl_start(struct ifnet *);
801 static int ixl_transmit(struct ifnet *, struct mbuf *);
802 static void ixl_deferred_transmit(void *);
803 static int ixl_intr(void *);
804 static int ixl_queue_intr(void *);
805 static int ixl_other_intr(void *);
806 static void ixl_handle_queue(void *);
807 static void ixl_handle_queue_wk(struct work *, void *);
808 static void ixl_sched_handle_queue(struct ixl_softc *,
809 struct ixl_queue_pair *);
810 static int ixl_init(struct ifnet *);
811 static int ixl_init_locked(struct ixl_softc *);
812 static void ixl_stop(struct ifnet *, int);
813 static void ixl_stop_locked(struct ixl_softc *);
814 static int ixl_iff(struct ixl_softc *);
815 static int ixl_ifflags_cb(struct ethercom *);
816 static int ixl_setup_interrupts(struct ixl_softc *);
817 static int ixl_establish_intx(struct ixl_softc *);
818 static int ixl_establish_msix(struct ixl_softc *);
819 static void ixl_enable_queue_intr(struct ixl_softc *,
820 struct ixl_queue_pair *);
821 static void ixl_disable_queue_intr(struct ixl_softc *,
822 struct ixl_queue_pair *);
823 static void ixl_enable_other_intr(struct ixl_softc *);
824 static void ixl_disable_other_intr(struct ixl_softc *);
825 static void ixl_config_queue_intr(struct ixl_softc *);
826 static void ixl_config_other_intr(struct ixl_softc *);
827
828 static struct ixl_tx_ring *
829 ixl_txr_alloc(struct ixl_softc *, unsigned int);
830 static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
831 static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
832 static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
833 static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
834 static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
835 static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
836 static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
837 static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *, u_int);
838
839 static struct ixl_rx_ring *
840 ixl_rxr_alloc(struct ixl_softc *, unsigned int);
841 static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
842 static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
843 static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
844 static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
845 static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
846 static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
847 static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *, u_int);
848 static int ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
849
850 static struct workqueue *
851 ixl_workq_create(const char *, pri_t, int, int);
852 static void ixl_workq_destroy(struct workqueue *);
853 static int ixl_workqs_teardown(device_t);
854 static void ixl_work_set(struct ixl_work *, void (*)(void *), void *);
855 static void ixl_work_add(struct workqueue *, struct ixl_work *);
856 static void ixl_work_wait(struct workqueue *, struct ixl_work *);
857 static void ixl_workq_work(struct work *, void *);
858 static const struct ixl_product *
859 ixl_lookup(const struct pci_attach_args *pa);
860 static void ixl_link_state_update(struct ixl_softc *,
861 const struct ixl_aq_desc *);
862 static int ixl_vlan_cb(struct ethercom *, uint16_t, bool);
863 static int ixl_setup_vlan_hwfilter(struct ixl_softc *);
864 static void ixl_teardown_vlan_hwfilter(struct ixl_softc *);
865 static int ixl_update_macvlan(struct ixl_softc *);
866 static int ixl_setup_interrupts(struct ixl_softc *);
867 static void ixl_teardown_interrupts(struct ixl_softc *);
868 static int ixl_setup_stats(struct ixl_softc *);
869 static void ixl_teardown_stats(struct ixl_softc *);
870 static void ixl_stats_callout(void *);
871 static void ixl_stats_update(void *);
872 static int ixl_setup_sysctls(struct ixl_softc *);
873 static void ixl_teardown_sysctls(struct ixl_softc *);
874 static int ixl_sysctl_itr_handler(SYSCTLFN_PROTO);
875 static int ixl_queue_pairs_alloc(struct ixl_softc *);
876 static void ixl_queue_pairs_free(struct ixl_softc *);
877
878 static const struct ixl_phy_type ixl_phy_type_map[] = {
879 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII },
880 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX },
881 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 },
882 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR },
883 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 },
884 { 1ULL << IXL_PHY_TYPE_XAUI |
885 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 },
886 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI },
887 { 1ULL << IXL_PHY_TYPE_XLAUI |
888 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI },
889 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
890 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 },
891 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
892 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 },
893 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC },
894 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC },
895 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX },
896 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
897 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T },
898 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T },
899 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR },
900 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR },
901 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_TWINAX },
902 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 },
903 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 },
904 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX },
905 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX },
906 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 },
907 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR },
908 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR },
909 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR },
910 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR },
911 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC },
912 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_ACC },
913 { 1ULL << IXL_PHY_TYPE_2500BASE_T_1, IFM_2500_T },
914 { 1ULL << IXL_PHY_TYPE_5000BASE_T_1, IFM_5000_T },
915 { 1ULL << IXL_PHY_TYPE_2500BASE_T_2, IFM_2500_T },
916 { 1ULL << IXL_PHY_TYPE_5000BASE_T_2, IFM_5000_T },
917 };
918
919 static const struct ixl_speed_type ixl_speed_type_map[] = {
920 { IXL_AQ_LINK_SPEED_40GB, IF_Gbps(40) },
921 { IXL_AQ_LINK_SPEED_25GB, IF_Gbps(25) },
922 { IXL_AQ_LINK_SPEED_10GB, IF_Gbps(10) },
923 { IXL_AQ_LINK_SPEED_5000MB, IF_Mbps(5000) },
924 { IXL_AQ_LINK_SPEED_2500MB, IF_Mbps(2500) },
925 { IXL_AQ_LINK_SPEED_1000MB, IF_Mbps(1000) },
926 { IXL_AQ_LINK_SPEED_100MB, IF_Mbps(100)},
927 };
928
929 static const struct ixl_aq_regs ixl_pf_aq_regs = {
930 .atq_tail = I40E_PF_ATQT,
931 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK,
932 .atq_head = I40E_PF_ATQH,
933 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK,
934 .atq_len = I40E_PF_ATQLEN,
935 .atq_bal = I40E_PF_ATQBAL,
936 .atq_bah = I40E_PF_ATQBAH,
937 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK,
938
939 .arq_tail = I40E_PF_ARQT,
940 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK,
941 .arq_head = I40E_PF_ARQH,
942 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK,
943 .arq_len = I40E_PF_ARQLEN,
944 .arq_bal = I40E_PF_ARQBAL,
945 .arq_bah = I40E_PF_ARQBAH,
946 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK,
947 };
948
949 #define ixl_rd(_s, _r) \
950 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
951 #define ixl_wr(_s, _r, _v) \
952 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
953 #define ixl_barrier(_s, _r, _l, _o) \
954 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
955 #define ixl_flush(_s) (void)ixl_rd((_s), I40E_GLGEN_STAT)
956 #define ixl_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1))
957
958 CFATTACH_DECL3_NEW(ixl, sizeof(struct ixl_softc),
959 ixl_match, ixl_attach, ixl_detach, NULL, NULL, NULL,
960 DVF_DETACH_SHUTDOWN);
961
962 static const struct ixl_product ixl_products[] = {
963 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_SFP },
964 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_B },
965 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_C },
966 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_A },
967 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_B },
968 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_C },
969 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T_1 },
970 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T_2 },
971 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
972 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
973 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G },
974 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP },
975 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28 },
976 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_KX },
977 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_QSFP },
978 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_SFP },
979 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G_BASET },
980 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_BASET },
981 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_I_SFP },
982 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_SFP },
983 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_BP },
984 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_V710_5G_T},
985 /* required last entry */
986 {0, 0}
987 };
988
989 static const struct ixl_product *
990 ixl_lookup(const struct pci_attach_args *pa)
991 {
992 const struct ixl_product *ixlp;
993
994 for (ixlp = ixl_products; ixlp->vendor_id != 0; ixlp++) {
995 if (PCI_VENDOR(pa->pa_id) == ixlp->vendor_id &&
996 PCI_PRODUCT(pa->pa_id) == ixlp->product_id)
997 return ixlp;
998 }
999
1000 return NULL;
1001 }
1002
1003 static void
1004 ixl_intr_barrier(void)
1005 {
1006
1007 /* wait for finish of all handler */
1008 xc_barrier(0);
1009 }
1010
1011 static int
1012 ixl_match(device_t parent, cfdata_t match, void *aux)
1013 {
1014 const struct pci_attach_args *pa = aux;
1015
1016 return (ixl_lookup(pa) != NULL) ? 1 : 0;
1017 }
1018
1019 static void
1020 ixl_attach(device_t parent, device_t self, void *aux)
1021 {
1022 struct ixl_softc *sc;
1023 struct pci_attach_args *pa = aux;
1024 struct ifnet *ifp;
1025 pcireg_t memtype;
1026 uint32_t firstq, port, ari, func;
1027 char xnamebuf[32];
1028 int tries, rv, link;
1029
1030 sc = device_private(self);
1031 sc->sc_dev = self;
1032 ifp = &sc->sc_ec.ec_if;
1033
1034 sc->sc_pa = *pa;
1035 sc->sc_dmat = (pci_dma64_available(pa)) ?
1036 pa->pa_dmat64 : pa->pa_dmat;
1037 sc->sc_aq_regs = &ixl_pf_aq_regs;
1038
1039 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(pa->pa_id));
1040
1041 ixl_pci_csr_setup(pa->pa_pc, pa->pa_tag);
1042
1043 pci_aprint_devinfo(pa, "Ethernet controller");
1044
1045 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG);
1046 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
1047 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) {
1048 aprint_error(": unable to map registers\n");
1049 return;
1050 }
1051
1052 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET);
1053
1054 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC);
1055 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK;
1056 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1057 sc->sc_base_queue = firstq;
1058
1059 ixl_clear_hw(sc);
1060 if (ixl_pf_reset(sc) == -1) {
1061 /* error printed by ixl pf_reset */
1062 goto unmap;
1063 }
1064
1065 port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
1066 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
1067 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
1068 sc->sc_port = port;
1069 aprint_normal_dev(self, "port %u", sc->sc_port);
1070
1071 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
1072 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1073 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1074
1075 func = ixl_rd(sc, I40E_PF_FUNC_RID);
1076 sc->sc_pf_id = func & (ari ? 0xff : 0x7);
1077
1078 /* initialise the adminq */
1079
1080 mutex_init(&sc->sc_atq_lock, MUTEX_DEFAULT, IPL_NET);
1081
1082 if (ixl_dmamem_alloc(sc, &sc->sc_atq,
1083 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1084 aprint_error("\n" "%s: unable to allocate atq\n",
1085 device_xname(self));
1086 goto unmap;
1087 }
1088
1089 SIMPLEQ_INIT(&sc->sc_arq_idle);
1090 ixl_work_set(&sc->sc_arq_task, ixl_arq, sc);
1091 sc->sc_arq_cons = 0;
1092 sc->sc_arq_prod = 0;
1093
1094 if (ixl_dmamem_alloc(sc, &sc->sc_arq,
1095 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1096 aprint_error("\n" "%s: unable to allocate arq\n",
1097 device_xname(self));
1098 goto free_atq;
1099 }
1100
1101 if (!ixl_arq_fill(sc)) {
1102 aprint_error("\n" "%s: unable to fill arq descriptors\n",
1103 device_xname(self));
1104 goto free_arq;
1105 }
1106
1107 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1108 0, IXL_DMA_LEN(&sc->sc_atq),
1109 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1110
1111 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1112 0, IXL_DMA_LEN(&sc->sc_arq),
1113 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1114
1115 for (tries = 0; tries < 10; tries++) {
1116 sc->sc_atq_cons = 0;
1117 sc->sc_atq_prod = 0;
1118
1119 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1120 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1121 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1122 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1123
1124 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1125
1126 ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1127 ixl_dmamem_lo(&sc->sc_atq));
1128 ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1129 ixl_dmamem_hi(&sc->sc_atq));
1130 ixl_wr(sc, sc->sc_aq_regs->atq_len,
1131 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1132
1133 ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1134 ixl_dmamem_lo(&sc->sc_arq));
1135 ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1136 ixl_dmamem_hi(&sc->sc_arq));
1137 ixl_wr(sc, sc->sc_aq_regs->arq_len,
1138 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1139
1140 rv = ixl_get_version(sc);
1141 if (rv == 0)
1142 break;
1143 if (rv != ETIMEDOUT) {
1144 aprint_error(", unable to get firmware version\n");
1145 goto shutdown;
1146 }
1147
1148 delaymsec(100);
1149 }
1150
1151 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1152
1153 if (ixl_dmamem_alloc(sc, &sc->sc_aqbuf, IXL_AQ_BUFLEN, 0) != 0) {
1154 aprint_error_dev(self, ", unable to allocate nvm buffer\n");
1155 goto shutdown;
1156 }
1157
1158 ixl_get_nvm_version(sc);
1159
1160 if (sc->sc_mac_type == I40E_MAC_X722)
1161 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_X722;
1162 else
1163 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_XL710;
1164
1165 rv = ixl_get_hw_capabilities(sc);
1166 if (rv != 0) {
1167 aprint_error(", GET HW CAPABILITIES %s\n",
1168 rv == ETIMEDOUT ? "timeout" : "error");
1169 goto free_aqbuf;
1170 }
1171
1172 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_device, ncpu);
1173 if (ixl_param_nqps_limit > 0) {
1174 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_max,
1175 ixl_param_nqps_limit);
1176 }
1177
1178 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
1179 sc->sc_tx_ring_ndescs = ixl_param_tx_ndescs;
1180 sc->sc_rx_ring_ndescs = ixl_param_rx_ndescs;
1181
1182 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_rx_ring_ndescs);
1183 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_tx_ring_ndescs);
1184 KASSERT(sc->sc_rx_ring_ndescs ==
1185 (1U << (fls32(sc->sc_rx_ring_ndescs) - 1)));
1186 KASSERT(sc->sc_tx_ring_ndescs ==
1187 (1U << (fls32(sc->sc_tx_ring_ndescs) - 1)));
1188
1189 if (ixl_get_mac(sc) != 0) {
1190 /* error printed by ixl_get_mac */
1191 goto free_aqbuf;
1192 }
1193
1194 aprint_normal("\n");
1195 aprint_naive("\n");
1196
1197 aprint_normal_dev(self, "Ethernet address %s\n",
1198 ether_sprintf(sc->sc_enaddr));
1199
1200 rv = ixl_pxe_clear(sc);
1201 if (rv != 0) {
1202 aprint_debug_dev(self, "CLEAR PXE MODE %s\n",
1203 rv == ETIMEDOUT ? "timeout" : "error");
1204 }
1205
1206 ixl_set_filter_control(sc);
1207
1208 if (ixl_hmc(sc) != 0) {
1209 /* error printed by ixl_hmc */
1210 goto free_aqbuf;
1211 }
1212
1213 if (ixl_lldp_shut(sc) != 0) {
1214 /* error printed by ixl_lldp_shut */
1215 goto free_hmc;
1216 }
1217
1218 if (ixl_phy_mask_ints(sc) != 0) {
1219 /* error printed by ixl_phy_mask_ints */
1220 goto free_hmc;
1221 }
1222
1223 if (ixl_restart_an(sc) != 0) {
1224 /* error printed by ixl_restart_an */
1225 goto free_hmc;
1226 }
1227
1228 if (ixl_get_switch_config(sc) != 0) {
1229 /* error printed by ixl_get_switch_config */
1230 goto free_hmc;
1231 }
1232
1233 rv = ixl_get_link_status_poll(sc, NULL);
1234 if (rv != 0) {
1235 aprint_error_dev(self, "GET LINK STATUS %s\n",
1236 rv == ETIMEDOUT ? "timeout" : "error");
1237 goto free_hmc;
1238 }
1239
1240 /*
1241 * The FW often returns EIO in "Get PHY Abilities" command
1242 * if there is no delay
1243 */
1244 DELAY(500);
1245 if (ixl_get_phy_info(sc) != 0) {
1246 /* error printed by ixl_get_phy_info */
1247 goto free_hmc;
1248 }
1249
1250 if (ixl_dmamem_alloc(sc, &sc->sc_scratch,
1251 sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1252 aprint_error_dev(self, "unable to allocate scratch buffer\n");
1253 goto free_hmc;
1254 }
1255
1256 rv = ixl_get_vsi(sc);
1257 if (rv != 0) {
1258 aprint_error_dev(self, "GET VSI %s %d\n",
1259 rv == ETIMEDOUT ? "timeout" : "error", rv);
1260 goto free_scratch;
1261 }
1262
1263 rv = ixl_set_vsi(sc);
1264 if (rv != 0) {
1265 aprint_error_dev(self, "UPDATE VSI error %s %d\n",
1266 rv == ETIMEDOUT ? "timeout" : "error", rv);
1267 goto free_scratch;
1268 }
1269
1270 if (ixl_queue_pairs_alloc(sc) != 0) {
1271 /* error printed by ixl_queue_pairs_alloc */
1272 goto free_scratch;
1273 }
1274
1275 if (ixl_setup_interrupts(sc) != 0) {
1276 /* error printed by ixl_setup_interrupts */
1277 goto free_queue_pairs;
1278 }
1279
1280 if (ixl_setup_stats(sc) != 0) {
1281 aprint_error_dev(self, "failed to setup event counters\n");
1282 goto teardown_intrs;
1283 }
1284
1285 if (ixl_setup_sysctls(sc) != 0) {
1286 /* error printed by ixl_setup_sysctls */
1287 goto teardown_stats;
1288 }
1289
1290 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self));
1291 sc->sc_workq = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1292 IPL_NET, WQ_MPSAFE);
1293 if (sc->sc_workq == NULL)
1294 goto teardown_sysctls;
1295
1296 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self));
1297 rv = workqueue_create(&sc->sc_workq_txrx, xnamebuf, ixl_handle_queue_wk,
1298 sc, IXL_WORKQUEUE_PRI, IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1299 if (rv != 0) {
1300 sc->sc_workq_txrx = NULL;
1301 goto teardown_wqs;
1302 }
1303
1304 snprintf(xnamebuf, sizeof(xnamebuf), "%s_atq_cv", device_xname(self));
1305 cv_init(&sc->sc_atq_cv, xnamebuf);
1306
1307 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
1308
1309 ifp->if_softc = sc;
1310 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1311 ifp->if_extflags = IFEF_MPSAFE;
1312 ifp->if_ioctl = ixl_ioctl;
1313 ifp->if_start = ixl_start;
1314 ifp->if_transmit = ixl_transmit;
1315 ifp->if_init = ixl_init;
1316 ifp->if_stop = ixl_stop;
1317 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs);
1318 IFQ_SET_READY(&ifp->if_snd);
1319 ifp->if_capabilities |= IXL_IFCAP_RXCSUM;
1320 ifp->if_capabilities |= IXL_IFCAP_TXCSUM;
1321 #if 0
1322 ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
1323 #endif
1324 ether_set_vlan_cb(&sc->sc_ec, ixl_vlan_cb);
1325 sc->sc_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1326 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
1327 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
1328 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1329
1330 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities;
1331 /* Disable VLAN_HWFILTER by default */
1332 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1333
1334 sc->sc_cur_ec_capenable = sc->sc_ec.ec_capenable;
1335
1336 sc->sc_ec.ec_ifmedia = &sc->sc_media;
1337 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, ixl_media_change,
1338 ixl_media_status, &sc->sc_cfg_lock);
1339
1340 ixl_media_add(sc);
1341 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1342 if (ISSET(sc->sc_phy_abilities,
1343 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) {
1344 ifmedia_add(&sc->sc_media,
1345 IFM_ETHER | IFM_AUTO | IFM_FLOW, 0, NULL);
1346 }
1347 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_NONE, 0, NULL);
1348 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1349
1350 if_initialize(ifp);
1351
1352 sc->sc_ipq = if_percpuq_create(ifp);
1353 if_deferred_start_init(ifp, NULL);
1354 ether_ifattach(ifp, sc->sc_enaddr);
1355 ether_set_ifflags_cb(&sc->sc_ec, ixl_ifflags_cb);
1356
1357 rv = ixl_get_link_status_poll(sc, &link);
1358 if (rv != 0)
1359 link = LINK_STATE_UNKNOWN;
1360 if_link_state_change(ifp, link);
1361
1362 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done);
1363 ixl_work_set(&sc->sc_link_state_task,
1364 ixl_get_link_status_work, sc);
1365 ixl_work_set(&sc->sc_link_state_done_task,
1366 ixl_get_link_status_done_work, sc);
1367
1368 ixl_config_other_intr(sc);
1369 ixl_enable_other_intr(sc);
1370
1371 ixl_set_phy_autoselect(sc);
1372
1373 /* remove default mac filter and replace it so we can see vlans */
1374 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 0);
1375 if (rv != ENOENT) {
1376 aprint_debug_dev(self,
1377 "unable to remove macvlan %u\n", rv);
1378 }
1379 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
1380 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1381 if (rv != ENOENT) {
1382 aprint_debug_dev(self,
1383 "unable to remove macvlan, ignore vlan %u\n", rv);
1384 }
1385
1386 if (ixl_update_macvlan(sc) != 0) {
1387 aprint_debug_dev(self,
1388 "couldn't enable vlan hardware filter\n");
1389 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1390 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
1391 }
1392
1393 sc->sc_txrx_workqueue = true;
1394 sc->sc_tx_process_limit = IXL_TX_PROCESS_LIMIT;
1395 sc->sc_rx_process_limit = IXL_RX_PROCESS_LIMIT;
1396 sc->sc_tx_intr_process_limit = IXL_TX_INTR_PROCESS_LIMIT;
1397 sc->sc_rx_intr_process_limit = IXL_RX_INTR_PROCESS_LIMIT;
1398
1399 ixl_stats_update(sc);
1400 sc->sc_stats_counters.isc_has_offset = true;
1401
1402 if (pmf_device_register(self, NULL, NULL) != true)
1403 aprint_debug_dev(self, "couldn't establish power handler\n");
1404 sc->sc_itr_rx = IXL_ITR_RX;
1405 sc->sc_itr_tx = IXL_ITR_TX;
1406 sc->sc_attached = true;
1407 if_register(ifp);
1408
1409 return;
1410
1411 teardown_wqs:
1412 config_finalize_register(self, ixl_workqs_teardown);
1413 teardown_sysctls:
1414 ixl_teardown_sysctls(sc);
1415 teardown_stats:
1416 ixl_teardown_stats(sc);
1417 teardown_intrs:
1418 ixl_teardown_interrupts(sc);
1419 free_queue_pairs:
1420 ixl_queue_pairs_free(sc);
1421 free_scratch:
1422 ixl_dmamem_free(sc, &sc->sc_scratch);
1423 free_hmc:
1424 ixl_hmc_free(sc);
1425 free_aqbuf:
1426 ixl_dmamem_free(sc, &sc->sc_aqbuf);
1427 shutdown:
1428 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1429 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1430 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1431 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1432
1433 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1434 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1435 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1436
1437 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1438 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1439 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1440
1441 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1442 0, IXL_DMA_LEN(&sc->sc_arq),
1443 BUS_DMASYNC_POSTREAD);
1444 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1445 0, IXL_DMA_LEN(&sc->sc_atq),
1446 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1447
1448 ixl_arq_unfill(sc);
1449 free_arq:
1450 ixl_dmamem_free(sc, &sc->sc_arq);
1451 free_atq:
1452 ixl_dmamem_free(sc, &sc->sc_atq);
1453 unmap:
1454 mutex_destroy(&sc->sc_atq_lock);
1455 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1456 mutex_destroy(&sc->sc_cfg_lock);
1457 sc->sc_mems = 0;
1458
1459 sc->sc_attached = false;
1460 }
1461
1462 static int
1463 ixl_detach(device_t self, int flags)
1464 {
1465 struct ixl_softc *sc = device_private(self);
1466 struct ifnet *ifp = &sc->sc_ec.ec_if;
1467
1468 if (!sc->sc_attached)
1469 return 0;
1470
1471 ixl_stop(ifp, 1);
1472
1473 callout_halt(&sc->sc_stats_callout, NULL);
1474 ixl_work_wait(sc->sc_workq, &sc->sc_stats_task);
1475
1476 /* detach the I/F before stop adminq due to callbacks */
1477 ether_ifdetach(ifp);
1478 if_detach(ifp);
1479 ifmedia_fini(&sc->sc_media);
1480 if_percpuq_destroy(sc->sc_ipq);
1481
1482 ixl_disable_other_intr(sc);
1483 ixl_intr_barrier();
1484 ixl_work_wait(sc->sc_workq, &sc->sc_arq_task);
1485 ixl_work_wait(sc->sc_workq, &sc->sc_link_state_task);
1486
1487 if (sc->sc_workq != NULL) {
1488 ixl_workq_destroy(sc->sc_workq);
1489 sc->sc_workq = NULL;
1490 }
1491
1492 if (sc->sc_workq_txrx != NULL) {
1493 workqueue_destroy(sc->sc_workq_txrx);
1494 sc->sc_workq_txrx = NULL;
1495 }
1496
1497 ixl_teardown_interrupts(sc);
1498 ixl_teardown_stats(sc);
1499 ixl_teardown_sysctls(sc);
1500
1501 ixl_queue_pairs_free(sc);
1502
1503 ixl_dmamem_free(sc, &sc->sc_scratch);
1504 ixl_hmc_free(sc);
1505
1506 /* shutdown */
1507 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1508 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1509 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1510 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1511
1512 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1513 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1514 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1515
1516 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1517 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1518 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1519
1520 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1521 0, IXL_DMA_LEN(&sc->sc_arq),
1522 BUS_DMASYNC_POSTREAD);
1523 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1524 0, IXL_DMA_LEN(&sc->sc_atq),
1525 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1526
1527 ixl_arq_unfill(sc);
1528
1529 ixl_dmamem_free(sc, &sc->sc_arq);
1530 ixl_dmamem_free(sc, &sc->sc_atq);
1531 ixl_dmamem_free(sc, &sc->sc_aqbuf);
1532
1533 cv_destroy(&sc->sc_atq_cv);
1534 mutex_destroy(&sc->sc_atq_lock);
1535
1536 if (sc->sc_mems != 0) {
1537 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1538 sc->sc_mems = 0;
1539 }
1540
1541 mutex_destroy(&sc->sc_cfg_lock);
1542
1543 return 0;
1544 }
1545
1546 static int
1547 ixl_workqs_teardown(device_t self)
1548 {
1549 struct ixl_softc *sc = device_private(self);
1550
1551 if (sc->sc_workq != NULL) {
1552 ixl_workq_destroy(sc->sc_workq);
1553 sc->sc_workq = NULL;
1554 }
1555
1556 if (sc->sc_workq_txrx != NULL) {
1557 workqueue_destroy(sc->sc_workq_txrx);
1558 sc->sc_workq_txrx = NULL;
1559 }
1560
1561 return 0;
1562 }
1563
1564 static int
1565 ixl_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
1566 {
1567 struct ifnet *ifp = &ec->ec_if;
1568 struct ixl_softc *sc = ifp->if_softc;
1569 int rv;
1570
1571 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
1572 return 0;
1573 }
1574
1575 if (set) {
1576 rv = ixl_add_macvlan(sc, sc->sc_enaddr, vid,
1577 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1578 if (rv == 0) {
1579 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
1580 vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1581 }
1582 } else {
1583 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, vid,
1584 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1585 (void)ixl_remove_macvlan(sc, etherbroadcastaddr, vid,
1586 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1587 }
1588
1589 return rv;
1590 }
1591
1592 static void
1593 ixl_media_add(struct ixl_softc *sc)
1594 {
1595 struct ifmedia *ifm = &sc->sc_media;
1596 const struct ixl_phy_type *itype;
1597 unsigned int i;
1598 bool flow;
1599
1600 if (ISSET(sc->sc_phy_abilities,
1601 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) {
1602 flow = true;
1603 } else {
1604 flow = false;
1605 }
1606
1607 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
1608 itype = &ixl_phy_type_map[i];
1609
1610 if (ISSET(sc->sc_phy_types, itype->phy_type)) {
1611 ifmedia_add(ifm,
1612 IFM_ETHER | IFM_FDX | itype->ifm_type, 0, NULL);
1613
1614 if (flow) {
1615 ifmedia_add(ifm,
1616 IFM_ETHER | IFM_FDX | IFM_FLOW |
1617 itype->ifm_type, 0, NULL);
1618 }
1619
1620 if (itype->ifm_type != IFM_100_TX)
1621 continue;
1622
1623 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type,
1624 0, NULL);
1625 if (flow) {
1626 ifmedia_add(ifm,
1627 IFM_ETHER | IFM_FLOW | itype->ifm_type,
1628 0, NULL);
1629 }
1630 }
1631 }
1632 }
1633
1634 static void
1635 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1636 {
1637 struct ixl_softc *sc = ifp->if_softc;
1638
1639 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1640
1641 ifmr->ifm_status = sc->sc_media_status;
1642 ifmr->ifm_active = sc->sc_media_active;
1643 }
1644
1645 static int
1646 ixl_media_change(struct ifnet *ifp)
1647 {
1648 struct ixl_softc *sc = ifp->if_softc;
1649 struct ifmedia *ifm = &sc->sc_media;
1650 uint64_t ifm_active = sc->sc_media_active;
1651 uint8_t link_speed, abilities;
1652
1653 switch (IFM_SUBTYPE(ifm_active)) {
1654 case IFM_1000_SGMII:
1655 case IFM_1000_KX:
1656 case IFM_10G_KX4:
1657 case IFM_10G_KR:
1658 case IFM_40G_KR4:
1659 case IFM_20G_KR2:
1660 case IFM_25G_KR:
1661 /* backplanes */
1662 return EINVAL;
1663 }
1664
1665 abilities = IXL_PHY_ABILITY_AUTONEGO | IXL_PHY_ABILITY_LINKUP;
1666
1667 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1668 case IFM_AUTO:
1669 link_speed = sc->sc_phy_linkspeed;
1670 break;
1671 case IFM_NONE:
1672 link_speed = 0;
1673 CLR(abilities, IXL_PHY_ABILITY_LINKUP);
1674 break;
1675 default:
1676 link_speed = ixl_search_baudrate(
1677 ifmedia_baudrate(ifm->ifm_media));
1678 }
1679
1680 if (ISSET(abilities, IXL_PHY_ABILITY_LINKUP)) {
1681 if (ISSET(link_speed, sc->sc_phy_linkspeed) == 0)
1682 return EINVAL;
1683 }
1684
1685 if (ifm->ifm_media & IFM_FLOW) {
1686 abilities |= sc->sc_phy_abilities &
1687 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX);
1688 }
1689
1690 return ixl_set_phy_config(sc, link_speed, abilities, false);
1691 }
1692
1693
1694 static void
1695 ixl_del_all_multiaddr(struct ixl_softc *sc)
1696 {
1697 struct ethercom *ec = &sc->sc_ec;
1698 struct ether_multi *enm;
1699 struct ether_multistep step;
1700
1701 ETHER_LOCK(ec);
1702 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1703 ETHER_NEXT_MULTI(step, enm)) {
1704 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1705 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1706 }
1707 ETHER_UNLOCK(ec);
1708 }
1709
1710 static int
1711 ixl_add_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1712 {
1713 struct ifnet *ifp = &sc->sc_ec.ec_if;
1714 int rv;
1715
1716 if (ISSET(ifp->if_flags, IFF_ALLMULTI))
1717 return 0;
1718
1719 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) {
1720 ixl_del_all_multiaddr(sc);
1721 SET(ifp->if_flags, IFF_ALLMULTI);
1722 return ENETRESET;
1723 }
1724
1725 /* multicast address can not use VLAN HWFILTER */
1726 rv = ixl_add_macvlan(sc, addrlo, 0,
1727 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1728
1729 if (rv == ENOSPC) {
1730 ixl_del_all_multiaddr(sc);
1731 SET(ifp->if_flags, IFF_ALLMULTI);
1732 return ENETRESET;
1733 }
1734
1735 return rv;
1736 }
1737
1738 static int
1739 ixl_del_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1740 {
1741 struct ifnet *ifp = &sc->sc_ec.ec_if;
1742 struct ethercom *ec = &sc->sc_ec;
1743 struct ether_multi *enm, *enm_last;
1744 struct ether_multistep step;
1745 int error, rv = 0;
1746
1747 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1748 ixl_remove_macvlan(sc, addrlo, 0,
1749 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1750 return 0;
1751 }
1752
1753 ETHER_LOCK(ec);
1754 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1755 ETHER_NEXT_MULTI(step, enm)) {
1756 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1757 ETHER_ADDR_LEN) != 0) {
1758 goto out;
1759 }
1760 }
1761
1762 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1763 ETHER_NEXT_MULTI(step, enm)) {
1764 error = ixl_add_macvlan(sc, enm->enm_addrlo, 0,
1765 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1766 if (error != 0)
1767 break;
1768 }
1769
1770 if (enm != NULL) {
1771 enm_last = enm;
1772 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1773 ETHER_NEXT_MULTI(step, enm)) {
1774 if (enm == enm_last)
1775 break;
1776
1777 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1778 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1779 }
1780 } else {
1781 CLR(ifp->if_flags, IFF_ALLMULTI);
1782 rv = ENETRESET;
1783 }
1784
1785 out:
1786 ETHER_UNLOCK(ec);
1787 return rv;
1788 }
1789
1790 static int
1791 ixl_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1792 {
1793 struct ifreq *ifr = (struct ifreq *)data;
1794 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
1795 const struct sockaddr *sa;
1796 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1797 int s, error = 0;
1798 unsigned int nmtu;
1799
1800 switch (cmd) {
1801 case SIOCSIFMTU:
1802 nmtu = ifr->ifr_mtu;
1803
1804 if (nmtu < IXL_MIN_MTU || nmtu > IXL_MAX_MTU) {
1805 error = EINVAL;
1806 break;
1807 }
1808 if (ifp->if_mtu != nmtu) {
1809 s = splnet();
1810 error = ether_ioctl(ifp, cmd, data);
1811 splx(s);
1812 if (error == ENETRESET)
1813 error = ixl_init(ifp);
1814 }
1815 break;
1816 case SIOCADDMULTI:
1817 sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1818 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) {
1819 error = ether_multiaddr(sa, addrlo, addrhi);
1820 if (error != 0)
1821 return error;
1822
1823 error = ixl_add_multi(sc, addrlo, addrhi);
1824 if (error != 0 && error != ENETRESET) {
1825 ether_delmulti(sa, &sc->sc_ec);
1826 error = EIO;
1827 }
1828 }
1829 break;
1830
1831 case SIOCDELMULTI:
1832 sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1833 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) {
1834 error = ether_multiaddr(sa, addrlo, addrhi);
1835 if (error != 0)
1836 return error;
1837
1838 error = ixl_del_multi(sc, addrlo, addrhi);
1839 }
1840 break;
1841
1842 default:
1843 s = splnet();
1844 error = ether_ioctl(ifp, cmd, data);
1845 splx(s);
1846 }
1847
1848 if (error == ENETRESET)
1849 error = ixl_iff(sc);
1850
1851 return error;
1852 }
1853
1854 static enum i40e_mac_type
1855 ixl_mactype(pci_product_id_t id)
1856 {
1857
1858 switch (id) {
1859 case PCI_PRODUCT_INTEL_XL710_SFP:
1860 case PCI_PRODUCT_INTEL_XL710_KX_B:
1861 case PCI_PRODUCT_INTEL_XL710_KX_C:
1862 case PCI_PRODUCT_INTEL_XL710_QSFP_A:
1863 case PCI_PRODUCT_INTEL_XL710_QSFP_B:
1864 case PCI_PRODUCT_INTEL_XL710_QSFP_C:
1865 case PCI_PRODUCT_INTEL_X710_10G_T_1:
1866 case PCI_PRODUCT_INTEL_X710_10G_T_2:
1867 case PCI_PRODUCT_INTEL_XL710_20G_BP_1:
1868 case PCI_PRODUCT_INTEL_XL710_20G_BP_2:
1869 case PCI_PRODUCT_INTEL_X710_T4_10G:
1870 case PCI_PRODUCT_INTEL_XXV710_25G_BP:
1871 case PCI_PRODUCT_INTEL_XXV710_25G_SFP28:
1872 case PCI_PRODUCT_INTEL_X710_10G_SFP:
1873 case PCI_PRODUCT_INTEL_X710_10G_BP:
1874 return I40E_MAC_XL710;
1875
1876 case PCI_PRODUCT_INTEL_X722_KX:
1877 case PCI_PRODUCT_INTEL_X722_QSFP:
1878 case PCI_PRODUCT_INTEL_X722_SFP:
1879 case PCI_PRODUCT_INTEL_X722_1G_BASET:
1880 case PCI_PRODUCT_INTEL_X722_10G_BASET:
1881 case PCI_PRODUCT_INTEL_X722_I_SFP:
1882 return I40E_MAC_X722;
1883 }
1884
1885 return I40E_MAC_GENERIC;
1886 }
1887
1888 static void
1889 ixl_pci_csr_setup(pci_chipset_tag_t pc, pcitag_t tag)
1890 {
1891 pcireg_t csr;
1892
1893 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1894 csr |= (PCI_COMMAND_MASTER_ENABLE |
1895 PCI_COMMAND_MEM_ENABLE);
1896 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
1897 }
1898
1899 static inline void *
1900 ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i)
1901 {
1902 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
1903 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1904
1905 if (i >= e->hmc_count)
1906 return NULL;
1907
1908 kva += e->hmc_base;
1909 kva += i * e->hmc_size;
1910
1911 return kva;
1912 }
1913
1914 static inline size_t
1915 ixl_hmc_len(struct ixl_softc *sc, enum ixl_hmc_types type)
1916 {
1917 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1918
1919 return e->hmc_size;
1920 }
1921
1922 static void
1923 ixl_enable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1924 {
1925 struct ixl_rx_ring *rxr = qp->qp_rxr;
1926
1927 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1928 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1929 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1930 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1931 ixl_flush(sc);
1932 }
1933
1934 static void
1935 ixl_disable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1936 {
1937 struct ixl_rx_ring *rxr = qp->qp_rxr;
1938
1939 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1940 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1941 ixl_flush(sc);
1942 }
1943
1944 static void
1945 ixl_enable_other_intr(struct ixl_softc *sc)
1946 {
1947
1948 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1949 I40E_PFINT_DYN_CTL0_INTENA_MASK |
1950 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1951 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1952 ixl_flush(sc);
1953 }
1954
1955 static void
1956 ixl_disable_other_intr(struct ixl_softc *sc)
1957 {
1958
1959 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1960 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1961 ixl_flush(sc);
1962 }
1963
1964 static int
1965 ixl_reinit(struct ixl_softc *sc)
1966 {
1967 struct ixl_rx_ring *rxr;
1968 struct ixl_tx_ring *txr;
1969 unsigned int i;
1970 uint32_t reg;
1971
1972 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1973
1974 if (ixl_get_vsi(sc) != 0)
1975 return EIO;
1976
1977 if (ixl_set_vsi(sc) != 0)
1978 return EIO;
1979
1980 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1981 txr = sc->sc_qps[i].qp_txr;
1982 rxr = sc->sc_qps[i].qp_rxr;
1983
1984 ixl_txr_config(sc, txr);
1985 ixl_rxr_config(sc, rxr);
1986 }
1987
1988 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
1989 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_PREWRITE);
1990
1991 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1992 txr = sc->sc_qps[i].qp_txr;
1993 rxr = sc->sc_qps[i].qp_rxr;
1994
1995 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
1996 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
1997 ixl_flush(sc);
1998
1999 ixl_wr(sc, txr->txr_tail, txr->txr_prod);
2000 ixl_wr(sc, rxr->rxr_tail, rxr->rxr_prod);
2001
2002 /* ixl_rxfill() needs lock held */
2003 mutex_enter(&rxr->rxr_lock);
2004 ixl_rxfill(sc, rxr);
2005 mutex_exit(&rxr->rxr_lock);
2006
2007 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2008 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2009 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2010 if (ixl_rxr_enabled(sc, rxr) != 0)
2011 goto stop;
2012
2013 ixl_txr_qdis(sc, txr, 1);
2014
2015 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2016 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2017 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2018
2019 if (ixl_txr_enabled(sc, txr) != 0)
2020 goto stop;
2021 }
2022
2023 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2024 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
2025
2026 return 0;
2027
2028 stop:
2029 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2030 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
2031
2032 return ETIMEDOUT;
2033 }
2034
2035 static int
2036 ixl_init_locked(struct ixl_softc *sc)
2037 {
2038 struct ifnet *ifp = &sc->sc_ec.ec_if;
2039 unsigned int i;
2040 int error, eccap_change;
2041
2042 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2043
2044 if (ISSET(ifp->if_flags, IFF_RUNNING))
2045 ixl_stop_locked(sc);
2046
2047 if (sc->sc_dead) {
2048 return ENXIO;
2049 }
2050
2051 eccap_change = sc->sc_ec.ec_capenable ^ sc->sc_cur_ec_capenable;
2052 if (ISSET(eccap_change, ETHERCAP_VLAN_HWTAGGING))
2053 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
2054
2055 if (ISSET(eccap_change, ETHERCAP_VLAN_HWFILTER)) {
2056 if (ixl_update_macvlan(sc) == 0) {
2057 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
2058 } else {
2059 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
2060 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
2061 }
2062 }
2063
2064 if (sc->sc_intrtype != PCI_INTR_TYPE_MSIX)
2065 sc->sc_nqueue_pairs = 1;
2066 else
2067 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
2068
2069 error = ixl_reinit(sc);
2070 if (error) {
2071 ixl_stop_locked(sc);
2072 return error;
2073 }
2074
2075 SET(ifp->if_flags, IFF_RUNNING);
2076 CLR(ifp->if_flags, IFF_OACTIVE);
2077
2078 ixl_config_rss(sc);
2079 ixl_config_queue_intr(sc);
2080
2081 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2082 ixl_enable_queue_intr(sc, &sc->sc_qps[i]);
2083 }
2084
2085 error = ixl_iff(sc);
2086 if (error) {
2087 ixl_stop_locked(sc);
2088 return error;
2089 }
2090
2091 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
2092
2093 return 0;
2094 }
2095
2096 static int
2097 ixl_init(struct ifnet *ifp)
2098 {
2099 struct ixl_softc *sc = ifp->if_softc;
2100 int error;
2101
2102 mutex_enter(&sc->sc_cfg_lock);
2103 error = ixl_init_locked(sc);
2104 mutex_exit(&sc->sc_cfg_lock);
2105
2106 if (error == 0) {
2107 error = ixl_get_link_status(sc,
2108 IXL_LINK_FLAG_WAITDONE);
2109 }
2110
2111 return error;
2112 }
2113
2114 static int
2115 ixl_iff(struct ixl_softc *sc)
2116 {
2117 struct ifnet *ifp = &sc->sc_ec.ec_if;
2118 struct ixl_atq iatq;
2119 struct ixl_aq_desc *iaq;
2120 struct ixl_aq_vsi_promisc_param *param;
2121 uint16_t flag_add, flag_del;
2122 int error;
2123
2124 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2125 return 0;
2126
2127 memset(&iatq, 0, sizeof(iatq));
2128
2129 iaq = &iatq.iatq_desc;
2130 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
2131
2132 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
2133 param->flags = htole16(0);
2134
2135 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)
2136 || ISSET(ifp->if_flags, IFF_PROMISC)) {
2137 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2138 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2139 }
2140
2141 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
2142 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2143 IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2144 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
2145 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2146 }
2147 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2148 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2149 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2150 param->seid = sc->sc_seid;
2151
2152 error = ixl_atq_exec(sc, &iatq);
2153 if (error)
2154 return error;
2155
2156 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
2157 return EIO;
2158
2159 if (memcmp(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN) != 0) {
2160 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
2161 flag_add = IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH;
2162 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH;
2163 } else {
2164 flag_add = IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN;
2165 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN;
2166 }
2167
2168 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, flag_del);
2169
2170 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
2171 ixl_add_macvlan(sc, sc->sc_enaddr, 0, flag_add);
2172 }
2173 return 0;
2174 }
2175
2176 static void
2177 ixl_stop_locked(struct ixl_softc *sc)
2178 {
2179 struct ifnet *ifp = &sc->sc_ec.ec_if;
2180 struct ixl_rx_ring *rxr;
2181 struct ixl_tx_ring *txr;
2182 unsigned int i;
2183 uint32_t reg;
2184
2185 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2186
2187 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
2188 callout_stop(&sc->sc_stats_callout);
2189
2190 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2191 txr = sc->sc_qps[i].qp_txr;
2192 rxr = sc->sc_qps[i].qp_rxr;
2193
2194 ixl_disable_queue_intr(sc, &sc->sc_qps[i]);
2195
2196 mutex_enter(&txr->txr_lock);
2197 ixl_txr_qdis(sc, txr, 0);
2198 mutex_exit(&txr->txr_lock);
2199 }
2200
2201 /* XXX wait at least 400 usec for all tx queues in one go */
2202 ixl_flush(sc);
2203 DELAY(500);
2204
2205 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2206 txr = sc->sc_qps[i].qp_txr;
2207 rxr = sc->sc_qps[i].qp_rxr;
2208
2209 mutex_enter(&txr->txr_lock);
2210 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2211 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2212 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2213 mutex_exit(&txr->txr_lock);
2214
2215 mutex_enter(&rxr->rxr_lock);
2216 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2217 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2218 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2219 mutex_exit(&rxr->rxr_lock);
2220 }
2221
2222 /* XXX short wait for all queue disables to settle */
2223 ixl_flush(sc);
2224 DELAY(50);
2225
2226 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2227 txr = sc->sc_qps[i].qp_txr;
2228 rxr = sc->sc_qps[i].qp_rxr;
2229
2230 mutex_enter(&txr->txr_lock);
2231 if (ixl_txr_disabled(sc, txr) != 0) {
2232 mutex_exit(&txr->txr_lock);
2233 goto die;
2234 }
2235 mutex_exit(&txr->txr_lock);
2236
2237 mutex_enter(&rxr->rxr_lock);
2238 if (ixl_rxr_disabled(sc, rxr) != 0) {
2239 mutex_exit(&rxr->rxr_lock);
2240 goto die;
2241 }
2242 mutex_exit(&rxr->rxr_lock);
2243 }
2244
2245 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2246 sc->sc_qps[i].qp_workqueue = false;
2247 workqueue_wait(sc->sc_workq_txrx,
2248 &sc->sc_qps[i].qp_work);
2249 }
2250
2251 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2252 txr = sc->sc_qps[i].qp_txr;
2253 rxr = sc->sc_qps[i].qp_rxr;
2254
2255 mutex_enter(&txr->txr_lock);
2256 ixl_txr_unconfig(sc, txr);
2257 mutex_exit(&txr->txr_lock);
2258
2259 mutex_enter(&rxr->rxr_lock);
2260 ixl_rxr_unconfig(sc, rxr);
2261 mutex_exit(&rxr->rxr_lock);
2262
2263 ixl_txr_clean(sc, txr);
2264 ixl_rxr_clean(sc, rxr);
2265 }
2266
2267 return;
2268 die:
2269 sc->sc_dead = true;
2270 log(LOG_CRIT, "%s: failed to shut down rings",
2271 device_xname(sc->sc_dev));
2272 return;
2273 }
2274
2275 static void
2276 ixl_stop(struct ifnet *ifp, int disable)
2277 {
2278 struct ixl_softc *sc = ifp->if_softc;
2279
2280 mutex_enter(&sc->sc_cfg_lock);
2281 ixl_stop_locked(sc);
2282 mutex_exit(&sc->sc_cfg_lock);
2283 }
2284
2285 static int
2286 ixl_queue_pairs_alloc(struct ixl_softc *sc)
2287 {
2288 struct ixl_queue_pair *qp;
2289 unsigned int i;
2290 size_t sz;
2291
2292 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2293 sc->sc_qps = kmem_zalloc(sz, KM_SLEEP);
2294
2295 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2296 qp = &sc->sc_qps[i];
2297
2298 qp->qp_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2299 ixl_handle_queue, qp);
2300 if (qp->qp_si == NULL)
2301 goto free;
2302
2303 qp->qp_txr = ixl_txr_alloc(sc, i);
2304 if (qp->qp_txr == NULL)
2305 goto free;
2306
2307 qp->qp_rxr = ixl_rxr_alloc(sc, i);
2308 if (qp->qp_rxr == NULL)
2309 goto free;
2310
2311 qp->qp_sc = sc;
2312 snprintf(qp->qp_name, sizeof(qp->qp_name),
2313 "%s-TXRX%d", device_xname(sc->sc_dev), i);
2314 }
2315
2316 return 0;
2317 free:
2318 if (sc->sc_qps != NULL) {
2319 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2320 qp = &sc->sc_qps[i];
2321
2322 if (qp->qp_txr != NULL)
2323 ixl_txr_free(sc, qp->qp_txr);
2324 if (qp->qp_rxr != NULL)
2325 ixl_rxr_free(sc, qp->qp_rxr);
2326 if (qp->qp_si != NULL)
2327 softint_disestablish(qp->qp_si);
2328 }
2329
2330 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2331 kmem_free(sc->sc_qps, sz);
2332 sc->sc_qps = NULL;
2333 }
2334
2335 return -1;
2336 }
2337
2338 static void
2339 ixl_queue_pairs_free(struct ixl_softc *sc)
2340 {
2341 struct ixl_queue_pair *qp;
2342 unsigned int i;
2343 size_t sz;
2344
2345 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2346 qp = &sc->sc_qps[i];
2347 ixl_txr_free(sc, qp->qp_txr);
2348 ixl_rxr_free(sc, qp->qp_rxr);
2349 softint_disestablish(qp->qp_si);
2350 }
2351
2352 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2353 kmem_free(sc->sc_qps, sz);
2354 sc->sc_qps = NULL;
2355 }
2356
2357 static struct ixl_tx_ring *
2358 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
2359 {
2360 struct ixl_tx_ring *txr = NULL;
2361 struct ixl_tx_map *maps = NULL, *txm;
2362 unsigned int i;
2363
2364 txr = kmem_zalloc(sizeof(*txr), KM_SLEEP);
2365 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs,
2366 KM_SLEEP);
2367
2368 if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2369 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2370 IXL_TX_QUEUE_ALIGN) != 0)
2371 goto free;
2372
2373 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2374 txm = &maps[i];
2375
2376 if (bus_dmamap_create(sc->sc_dmat, IXL_TX_PKT_MAXSIZE,
2377 IXL_TX_PKT_DESCS, IXL_TX_PKT_MAXSIZE, 0,
2378 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &txm->txm_map) != 0)
2379 goto uncreate;
2380
2381 txm->txm_eop = -1;
2382 txm->txm_m = NULL;
2383 }
2384
2385 txr->txr_cons = txr->txr_prod = 0;
2386 txr->txr_maps = maps;
2387
2388 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2389 if (txr->txr_intrq == NULL)
2390 goto uncreate;
2391
2392 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2393 ixl_deferred_transmit, txr);
2394 if (txr->txr_si == NULL)
2395 goto destroy_pcq;
2396
2397 txr->txr_tail = I40E_QTX_TAIL(qid);
2398 txr->txr_qid = qid;
2399 txr->txr_sc = sc;
2400 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2401
2402 return txr;
2403
2404 destroy_pcq:
2405 pcq_destroy(txr->txr_intrq);
2406 uncreate:
2407 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2408 txm = &maps[i];
2409
2410 if (txm->txm_map == NULL)
2411 continue;
2412
2413 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2414 }
2415
2416 ixl_dmamem_free(sc, &txr->txr_mem);
2417 free:
2418 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2419 kmem_free(txr, sizeof(*txr));
2420
2421 return NULL;
2422 }
2423
2424 static void
2425 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2426 {
2427 unsigned int qid;
2428 bus_size_t reg;
2429 uint32_t r;
2430
2431 qid = txr->txr_qid + sc->sc_base_queue;
2432 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2433 qid %= 128;
2434
2435 r = ixl_rd(sc, reg);
2436 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2437 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2438 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2439 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2440 ixl_wr(sc, reg, r);
2441 }
2442
2443 static void
2444 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2445 {
2446 struct ixl_hmc_txq txq;
2447 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch);
2448 void *hmc;
2449
2450 memset(&txq, 0, sizeof(txq));
2451 txq.head = htole16(txr->txr_cons);
2452 txq.new_context = 1;
2453 txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2454 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2455 txq.qlen = htole16(sc->sc_tx_ring_ndescs);
2456 txq.tphrdesc_ena = 0;
2457 txq.tphrpacket_ena = 0;
2458 txq.tphwdesc_ena = 0;
2459 txq.rdylist = data->qs_handle[0];
2460
2461 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2462 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2463 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq,
2464 __arraycount(ixl_hmc_pack_txq));
2465 }
2466
2467 static void
2468 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2469 {
2470 void *hmc;
2471
2472 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2473 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2474 txr->txr_cons = txr->txr_prod = 0;
2475 }
2476
2477 static void
2478 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2479 {
2480 struct ixl_tx_map *maps, *txm;
2481 bus_dmamap_t map;
2482 unsigned int i;
2483
2484 maps = txr->txr_maps;
2485 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2486 txm = &maps[i];
2487
2488 if (txm->txm_m == NULL)
2489 continue;
2490
2491 map = txm->txm_map;
2492 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2493 BUS_DMASYNC_POSTWRITE);
2494 bus_dmamap_unload(sc->sc_dmat, map);
2495
2496 m_freem(txm->txm_m);
2497 txm->txm_m = NULL;
2498 }
2499 }
2500
2501 static int
2502 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2503 {
2504 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2505 uint32_t reg;
2506 int i;
2507
2508 for (i = 0; i < 10; i++) {
2509 reg = ixl_rd(sc, ena);
2510 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2511 return 0;
2512
2513 delaymsec(10);
2514 }
2515
2516 return ETIMEDOUT;
2517 }
2518
2519 static int
2520 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2521 {
2522 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2523 uint32_t reg;
2524 int i;
2525
2526 KASSERT(mutex_owned(&txr->txr_lock));
2527
2528 for (i = 0; i < 10; i++) {
2529 reg = ixl_rd(sc, ena);
2530 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2531 return 0;
2532
2533 delaymsec(10);
2534 }
2535
2536 return ETIMEDOUT;
2537 }
2538
2539 static void
2540 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2541 {
2542 struct ixl_tx_map *maps, *txm;
2543 struct mbuf *m;
2544 unsigned int i;
2545
2546 softint_disestablish(txr->txr_si);
2547
2548 maps = txr->txr_maps;
2549 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2550 txm = &maps[i];
2551
2552 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2553 }
2554
2555 while ((m = pcq_get(txr->txr_intrq)) != NULL)
2556 m_freem(m);
2557 pcq_destroy(txr->txr_intrq);
2558
2559 ixl_dmamem_free(sc, &txr->txr_mem);
2560 mutex_destroy(&txr->txr_lock);
2561 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2562 kmem_free(txr, sizeof(*txr));
2563 }
2564
2565 static inline int
2566 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0,
2567 struct ixl_tx_ring *txr)
2568 {
2569 struct mbuf *m;
2570 int error;
2571
2572 KASSERT(mutex_owned(&txr->txr_lock));
2573
2574 m = *m0;
2575
2576 error = bus_dmamap_load_mbuf(dmat, map, m,
2577 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2578 if (error != EFBIG)
2579 return error;
2580
2581 m = m_defrag(m, M_DONTWAIT);
2582 if (m != NULL) {
2583 *m0 = m;
2584 txr->txr_defragged.ev_count++;
2585
2586 error = bus_dmamap_load_mbuf(dmat, map, m,
2587 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2588 } else {
2589 txr->txr_defrag_failed.ev_count++;
2590 error = ENOBUFS;
2591 }
2592
2593 return error;
2594 }
2595
2596 static inline int
2597 ixl_tx_setup_offloads(struct mbuf *m, uint64_t *cmd_txd)
2598 {
2599 struct ether_header *eh;
2600 size_t len;
2601 uint64_t cmd;
2602
2603 cmd = 0;
2604
2605 eh = mtod(m, struct ether_header *);
2606 switch (htons(eh->ether_type)) {
2607 case ETHERTYPE_IP:
2608 case ETHERTYPE_IPV6:
2609 len = ETHER_HDR_LEN;
2610 break;
2611 case ETHERTYPE_VLAN:
2612 len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2613 break;
2614 default:
2615 len = 0;
2616 }
2617 cmd |= ((len >> 1) << IXL_TX_DESC_MACLEN_SHIFT);
2618
2619 if (m->m_pkthdr.csum_flags &
2620 (M_CSUM_TSOv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
2621 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4;
2622 }
2623 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2624 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4_CSUM;
2625 }
2626
2627 if (m->m_pkthdr.csum_flags &
2628 (M_CSUM_TSOv6 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
2629 cmd |= IXL_TX_DESC_CMD_IIPT_IPV6;
2630 }
2631
2632 switch (cmd & IXL_TX_DESC_CMD_IIPT_MASK) {
2633 case IXL_TX_DESC_CMD_IIPT_IPV4:
2634 case IXL_TX_DESC_CMD_IIPT_IPV4_CSUM:
2635 len = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2636 break;
2637 case IXL_TX_DESC_CMD_IIPT_IPV6:
2638 len = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
2639 break;
2640 default:
2641 len = 0;
2642 }
2643 cmd |= ((len >> 2) << IXL_TX_DESC_IPLEN_SHIFT);
2644
2645 if (m->m_pkthdr.csum_flags &
2646 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_TCPv4 | M_CSUM_TCPv6)) {
2647 len = sizeof(struct tcphdr);
2648 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_TCP;
2649 } else if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) {
2650 len = sizeof(struct udphdr);
2651 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_UDP;
2652 } else {
2653 len = 0;
2654 }
2655 cmd |= ((len >> 2) << IXL_TX_DESC_L4LEN_SHIFT);
2656
2657 *cmd_txd |= cmd;
2658 return 0;
2659 }
2660
2661 static void
2662 ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr,
2663 bool is_transmit)
2664 {
2665 struct ixl_softc *sc = ifp->if_softc;
2666 struct ixl_tx_desc *ring, *txd;
2667 struct ixl_tx_map *txm;
2668 bus_dmamap_t map;
2669 struct mbuf *m;
2670 uint64_t cmd, cmd_txd;
2671 unsigned int prod, free, last, i;
2672 unsigned int mask;
2673 int post = 0;
2674
2675 KASSERT(mutex_owned(&txr->txr_lock));
2676
2677 if (!ISSET(ifp->if_flags, IFF_RUNNING)
2678 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) {
2679 if (!is_transmit)
2680 IFQ_PURGE(&ifp->if_snd);
2681 return;
2682 }
2683
2684 prod = txr->txr_prod;
2685 free = txr->txr_cons;
2686 if (free <= prod)
2687 free += sc->sc_tx_ring_ndescs;
2688 free -= prod;
2689
2690 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2691 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2692
2693 ring = IXL_DMA_KVA(&txr->txr_mem);
2694 mask = sc->sc_tx_ring_ndescs - 1;
2695 last = prod;
2696 cmd = 0;
2697 txd = NULL;
2698
2699 for (;;) {
2700 if (free <= IXL_TX_PKT_DESCS) {
2701 if (!is_transmit)
2702 SET(ifp->if_flags, IFF_OACTIVE);
2703 break;
2704 }
2705
2706 if (is_transmit)
2707 m = pcq_get(txr->txr_intrq);
2708 else
2709 IFQ_DEQUEUE(&ifp->if_snd, m);
2710
2711 if (m == NULL)
2712 break;
2713
2714 txm = &txr->txr_maps[prod];
2715 map = txm->txm_map;
2716
2717 if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
2718 if_statinc(ifp, if_oerrors);
2719 m_freem(m);
2720 continue;
2721 }
2722
2723 cmd_txd = 0;
2724 if (m->m_pkthdr.csum_flags & IXL_CSUM_ALL_OFFLOAD) {
2725 ixl_tx_setup_offloads(m, &cmd_txd);
2726 }
2727
2728 if (vlan_has_tag(m)) {
2729 uint16_t vtag;
2730 vtag = htole16(vlan_get_tag(m));
2731 cmd_txd |= (uint64_t)vtag <<
2732 IXL_TX_DESC_L2TAG1_SHIFT;
2733 cmd_txd |= IXL_TX_DESC_CMD_IL2TAG1;
2734 }
2735
2736 bus_dmamap_sync(sc->sc_dmat, map, 0,
2737 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2738
2739 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) {
2740 txd = &ring[prod];
2741
2742 cmd = (uint64_t)map->dm_segs[i].ds_len <<
2743 IXL_TX_DESC_BSIZE_SHIFT;
2744 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2745 cmd |= cmd_txd;
2746
2747 txd->addr = htole64(map->dm_segs[i].ds_addr);
2748 txd->cmd = htole64(cmd);
2749
2750 last = prod;
2751
2752 prod++;
2753 prod &= mask;
2754 }
2755 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2756 txd->cmd = htole64(cmd);
2757
2758 txm->txm_m = m;
2759 txm->txm_eop = last;
2760
2761 bpf_mtap(ifp, m, BPF_D_OUT);
2762
2763 free -= i;
2764 post = 1;
2765 }
2766
2767 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2768 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2769
2770 if (post) {
2771 txr->txr_prod = prod;
2772 ixl_wr(sc, txr->txr_tail, prod);
2773 }
2774 }
2775
2776 static int
2777 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit)
2778 {
2779 struct ifnet *ifp = &sc->sc_ec.ec_if;
2780 struct ixl_tx_desc *ring, *txd;
2781 struct ixl_tx_map *txm;
2782 struct mbuf *m;
2783 bus_dmamap_t map;
2784 unsigned int cons, prod, last;
2785 unsigned int mask;
2786 uint64_t dtype;
2787 int done = 0, more = 0;
2788
2789 KASSERT(mutex_owned(&txr->txr_lock));
2790
2791 prod = txr->txr_prod;
2792 cons = txr->txr_cons;
2793
2794 if (cons == prod)
2795 return 0;
2796
2797 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2798 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2799
2800 ring = IXL_DMA_KVA(&txr->txr_mem);
2801 mask = sc->sc_tx_ring_ndescs - 1;
2802
2803 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2804
2805 do {
2806 if (txlimit-- <= 0) {
2807 more = 1;
2808 break;
2809 }
2810
2811 txm = &txr->txr_maps[cons];
2812 last = txm->txm_eop;
2813 txd = &ring[last];
2814
2815 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2816 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2817 break;
2818
2819 map = txm->txm_map;
2820
2821 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2822 BUS_DMASYNC_POSTWRITE);
2823 bus_dmamap_unload(sc->sc_dmat, map);
2824
2825 m = txm->txm_m;
2826 if (m != NULL) {
2827 if_statinc_ref(nsr, if_opackets);
2828 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
2829 if (ISSET(m->m_flags, M_MCAST))
2830 if_statinc_ref(nsr, if_omcasts);
2831 m_freem(m);
2832 }
2833
2834 txm->txm_m = NULL;
2835 txm->txm_eop = -1;
2836
2837 cons = last + 1;
2838 cons &= mask;
2839 done = 1;
2840 } while (cons != prod);
2841
2842 IF_STAT_PUTREF(ifp);
2843
2844 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2845 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2846
2847 txr->txr_cons = cons;
2848
2849 if (done) {
2850 softint_schedule(txr->txr_si);
2851 if (txr->txr_qid == 0) {
2852 CLR(ifp->if_flags, IFF_OACTIVE);
2853 if_schedule_deferred_start(ifp);
2854 }
2855 }
2856
2857 return more;
2858 }
2859
2860 static void
2861 ixl_start(struct ifnet *ifp)
2862 {
2863 struct ixl_softc *sc;
2864 struct ixl_tx_ring *txr;
2865
2866 sc = ifp->if_softc;
2867 txr = sc->sc_qps[0].qp_txr;
2868
2869 mutex_enter(&txr->txr_lock);
2870 ixl_tx_common_locked(ifp, txr, false);
2871 mutex_exit(&txr->txr_lock);
2872 }
2873
2874 static inline unsigned int
2875 ixl_select_txqueue(struct ixl_softc *sc, struct mbuf *m)
2876 {
2877 u_int cpuid;
2878
2879 cpuid = cpu_index(curcpu());
2880
2881 return (unsigned int)(cpuid % sc->sc_nqueue_pairs);
2882 }
2883
2884 static int
2885 ixl_transmit(struct ifnet *ifp, struct mbuf *m)
2886 {
2887 struct ixl_softc *sc;
2888 struct ixl_tx_ring *txr;
2889 unsigned int qid;
2890
2891 sc = ifp->if_softc;
2892 qid = ixl_select_txqueue(sc, m);
2893
2894 txr = sc->sc_qps[qid].qp_txr;
2895
2896 if (__predict_false(!pcq_put(txr->txr_intrq, m))) {
2897 mutex_enter(&txr->txr_lock);
2898 txr->txr_pcqdrop.ev_count++;
2899 mutex_exit(&txr->txr_lock);
2900
2901 m_freem(m);
2902 return ENOBUFS;
2903 }
2904
2905 #ifdef IXL_ALWAYS_TXDEFER
2906 kpreempt_disable();
2907 softint_schedule(txr->txr_si);
2908 kpreempt_enable();
2909 #else
2910 if (mutex_tryenter(&txr->txr_lock)) {
2911 ixl_tx_common_locked(ifp, txr, true);
2912 mutex_exit(&txr->txr_lock);
2913 } else {
2914 kpreempt_disable();
2915 softint_schedule(txr->txr_si);
2916 kpreempt_enable();
2917 }
2918 #endif
2919
2920 return 0;
2921 }
2922
2923 static void
2924 ixl_deferred_transmit(void *xtxr)
2925 {
2926 struct ixl_tx_ring *txr = xtxr;
2927 struct ixl_softc *sc = txr->txr_sc;
2928 struct ifnet *ifp = &sc->sc_ec.ec_if;
2929
2930 mutex_enter(&txr->txr_lock);
2931 txr->txr_transmitdef.ev_count++;
2932 if (pcq_peek(txr->txr_intrq) != NULL)
2933 ixl_tx_common_locked(ifp, txr, true);
2934 mutex_exit(&txr->txr_lock);
2935 }
2936
2937 static struct ixl_rx_ring *
2938 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
2939 {
2940 struct ixl_rx_ring *rxr = NULL;
2941 struct ixl_rx_map *maps = NULL, *rxm;
2942 unsigned int i;
2943
2944 rxr = kmem_zalloc(sizeof(*rxr), KM_SLEEP);
2945 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs,
2946 KM_SLEEP);
2947
2948 if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
2949 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
2950 IXL_RX_QUEUE_ALIGN) != 0)
2951 goto free;
2952
2953 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2954 rxm = &maps[i];
2955
2956 if (bus_dmamap_create(sc->sc_dmat,
2957 IXL_MCLBYTES, 1, IXL_MCLBYTES, 0,
2958 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &rxm->rxm_map) != 0)
2959 goto uncreate;
2960
2961 rxm->rxm_m = NULL;
2962 }
2963
2964 rxr->rxr_cons = rxr->rxr_prod = 0;
2965 rxr->rxr_m_head = NULL;
2966 rxr->rxr_m_tail = &rxr->rxr_m_head;
2967 rxr->rxr_maps = maps;
2968
2969 rxr->rxr_tail = I40E_QRX_TAIL(qid);
2970 rxr->rxr_qid = qid;
2971 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
2972
2973 return rxr;
2974
2975 uncreate:
2976 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2977 rxm = &maps[i];
2978
2979 if (rxm->rxm_map == NULL)
2980 continue;
2981
2982 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
2983 }
2984
2985 ixl_dmamem_free(sc, &rxr->rxr_mem);
2986 free:
2987 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2988 kmem_free(rxr, sizeof(*rxr));
2989
2990 return NULL;
2991 }
2992
2993 static void
2994 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2995 {
2996 struct ixl_rx_map *maps, *rxm;
2997 bus_dmamap_t map;
2998 unsigned int i;
2999
3000 maps = rxr->rxr_maps;
3001 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3002 rxm = &maps[i];
3003
3004 if (rxm->rxm_m == NULL)
3005 continue;
3006
3007 map = rxm->rxm_map;
3008 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3009 BUS_DMASYNC_POSTWRITE);
3010 bus_dmamap_unload(sc->sc_dmat, map);
3011
3012 m_freem(rxm->rxm_m);
3013 rxm->rxm_m = NULL;
3014 }
3015
3016 m_freem(rxr->rxr_m_head);
3017 rxr->rxr_m_head = NULL;
3018 rxr->rxr_m_tail = &rxr->rxr_m_head;
3019
3020 rxr->rxr_prod = rxr->rxr_cons = 0;
3021 }
3022
3023 static int
3024 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3025 {
3026 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3027 uint32_t reg;
3028 int i;
3029
3030 for (i = 0; i < 10; i++) {
3031 reg = ixl_rd(sc, ena);
3032 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
3033 return 0;
3034
3035 delaymsec(10);
3036 }
3037
3038 return ETIMEDOUT;
3039 }
3040
3041 static int
3042 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3043 {
3044 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3045 uint32_t reg;
3046 int i;
3047
3048 KASSERT(mutex_owned(&rxr->rxr_lock));
3049
3050 for (i = 0; i < 10; i++) {
3051 reg = ixl_rd(sc, ena);
3052 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3053 return 0;
3054
3055 delaymsec(10);
3056 }
3057
3058 return ETIMEDOUT;
3059 }
3060
3061 static void
3062 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3063 {
3064 struct ixl_hmc_rxq rxq;
3065 struct ifnet *ifp = &sc->sc_ec.ec_if;
3066 uint16_t rxmax;
3067 void *hmc;
3068
3069 memset(&rxq, 0, sizeof(rxq));
3070 rxmax = ifp->if_mtu + IXL_MTU_ETHERLEN;
3071 if (!ISSET(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_MTU))
3072 rxmax -= ETHER_VLAN_ENCAP_LEN;
3073
3074 rxq.head = htole16(rxr->rxr_cons);
3075 rxq.base = htole64(IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
3076 rxq.qlen = htole16(sc->sc_rx_ring_ndescs);
3077 rxq.dbuff = htole16(IXL_MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
3078 rxq.hbuff = 0;
3079 rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
3080 rxq.dsize = IXL_HMC_RXQ_DSIZE_32;
3081 rxq.crcstrip = 1;
3082 rxq.l2sel = 1;
3083 rxq.showiv = 1;
3084 rxq.rxmax = htole16(rxmax);
3085 rxq.tphrdesc_ena = 0;
3086 rxq.tphwdesc_ena = 0;
3087 rxq.tphdata_ena = 0;
3088 rxq.tphhead_ena = 0;
3089 rxq.lrxqthresh = 0;
3090 rxq.prefena = 1;
3091
3092 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3093 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3094 ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq,
3095 __arraycount(ixl_hmc_pack_rxq));
3096 }
3097
3098 static void
3099 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3100 {
3101 void *hmc;
3102
3103 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3104 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3105 rxr->rxr_cons = rxr->rxr_prod = 0;
3106 }
3107
3108 static void
3109 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3110 {
3111 struct ixl_rx_map *maps, *rxm;
3112 unsigned int i;
3113
3114 maps = rxr->rxr_maps;
3115 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3116 rxm = &maps[i];
3117
3118 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3119 }
3120
3121 ixl_dmamem_free(sc, &rxr->rxr_mem);
3122 mutex_destroy(&rxr->rxr_lock);
3123 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
3124 kmem_free(rxr, sizeof(*rxr));
3125 }
3126
3127 static inline void
3128 ixl_rx_csum(struct mbuf *m, uint64_t qword)
3129 {
3130 int flags_mask;
3131
3132 if (!ISSET(qword, IXL_RX_DESC_L3L4P)) {
3133 /* No L3 or L4 checksum was calculated */
3134 return;
3135 }
3136
3137 switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) {
3138 case IXL_RX_DESC_PTYPE_IPV4FRAG:
3139 case IXL_RX_DESC_PTYPE_IPV4:
3140 case IXL_RX_DESC_PTYPE_SCTPV4:
3141 case IXL_RX_DESC_PTYPE_ICMPV4:
3142 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3143 break;
3144 case IXL_RX_DESC_PTYPE_TCPV4:
3145 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3146 flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD;
3147 break;
3148 case IXL_RX_DESC_PTYPE_UDPV4:
3149 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3150 flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD;
3151 break;
3152 case IXL_RX_DESC_PTYPE_TCPV6:
3153 flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD;
3154 break;
3155 case IXL_RX_DESC_PTYPE_UDPV6:
3156 flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD;
3157 break;
3158 default:
3159 flags_mask = 0;
3160 }
3161
3162 m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 |
3163 M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6));
3164
3165 if (ISSET(qword, IXL_RX_DESC_IPE)) {
3166 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD);
3167 }
3168
3169 if (ISSET(qword, IXL_RX_DESC_L4E)) {
3170 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD);
3171 }
3172 }
3173
3174 static int
3175 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr, u_int rxlimit)
3176 {
3177 struct ifnet *ifp = &sc->sc_ec.ec_if;
3178 struct ixl_rx_wb_desc_32 *ring, *rxd;
3179 struct ixl_rx_map *rxm;
3180 bus_dmamap_t map;
3181 unsigned int cons, prod;
3182 struct mbuf *m;
3183 uint64_t word, word0;
3184 unsigned int len;
3185 unsigned int mask;
3186 int done = 0, more = 0;
3187
3188 KASSERT(mutex_owned(&rxr->rxr_lock));
3189
3190 if (!ISSET(ifp->if_flags, IFF_RUNNING))
3191 return 0;
3192
3193 prod = rxr->rxr_prod;
3194 cons = rxr->rxr_cons;
3195
3196 if (cons == prod)
3197 return 0;
3198
3199 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3200 0, IXL_DMA_LEN(&rxr->rxr_mem),
3201 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3202
3203 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3204 mask = sc->sc_rx_ring_ndescs - 1;
3205
3206 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
3207
3208 do {
3209 if (rxlimit-- <= 0) {
3210 more = 1;
3211 break;
3212 }
3213
3214 rxd = &ring[cons];
3215
3216 word = le64toh(rxd->qword1);
3217
3218 if (!ISSET(word, IXL_RX_DESC_DD))
3219 break;
3220
3221 rxm = &rxr->rxr_maps[cons];
3222
3223 map = rxm->rxm_map;
3224 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3225 BUS_DMASYNC_POSTREAD);
3226 bus_dmamap_unload(sc->sc_dmat, map);
3227
3228 m = rxm->rxm_m;
3229 rxm->rxm_m = NULL;
3230
3231 KASSERT(m != NULL);
3232
3233 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
3234 m->m_len = len;
3235 m->m_pkthdr.len = 0;
3236
3237 m->m_next = NULL;
3238 *rxr->rxr_m_tail = m;
3239 rxr->rxr_m_tail = &m->m_next;
3240
3241 m = rxr->rxr_m_head;
3242 m->m_pkthdr.len += len;
3243
3244 if (ISSET(word, IXL_RX_DESC_EOP)) {
3245 word0 = le64toh(rxd->qword0);
3246
3247 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
3248 uint16_t vtag;
3249 vtag = __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK);
3250 vlan_set_tag(m, le16toh(vtag));
3251 }
3252
3253 if ((ifp->if_capenable & IXL_IFCAP_RXCSUM) != 0)
3254 ixl_rx_csum(m, word);
3255
3256 if (!ISSET(word,
3257 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
3258 m_set_rcvif(m, ifp);
3259 if_statinc_ref(nsr, if_ipackets);
3260 if_statadd_ref(nsr, if_ibytes,
3261 m->m_pkthdr.len);
3262 if_percpuq_enqueue(sc->sc_ipq, m);
3263 } else {
3264 if_statinc_ref(nsr, if_ierrors);
3265 m_freem(m);
3266 }
3267
3268 rxr->rxr_m_head = NULL;
3269 rxr->rxr_m_tail = &rxr->rxr_m_head;
3270 }
3271
3272 cons++;
3273 cons &= mask;
3274
3275 done = 1;
3276 } while (cons != prod);
3277
3278 if (done) {
3279 rxr->rxr_cons = cons;
3280 if (ixl_rxfill(sc, rxr) == -1)
3281 if_statinc_ref(nsr, if_iqdrops);
3282 }
3283
3284 IF_STAT_PUTREF(ifp);
3285
3286 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3287 0, IXL_DMA_LEN(&rxr->rxr_mem),
3288 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3289
3290 return more;
3291 }
3292
3293 static int
3294 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3295 {
3296 struct ixl_rx_rd_desc_32 *ring, *rxd;
3297 struct ixl_rx_map *rxm;
3298 bus_dmamap_t map;
3299 struct mbuf *m;
3300 unsigned int prod;
3301 unsigned int slots;
3302 unsigned int mask;
3303 int post = 0, error = 0;
3304
3305 KASSERT(mutex_owned(&rxr->rxr_lock));
3306
3307 prod = rxr->rxr_prod;
3308 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons,
3309 sc->sc_rx_ring_ndescs);
3310
3311 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3312 mask = sc->sc_rx_ring_ndescs - 1;
3313
3314 if (__predict_false(slots <= 0))
3315 return -1;
3316
3317 do {
3318 rxm = &rxr->rxr_maps[prod];
3319
3320 MGETHDR(m, M_DONTWAIT, MT_DATA);
3321 if (m == NULL) {
3322 rxr->rxr_mgethdr_failed.ev_count++;
3323 error = -1;
3324 break;
3325 }
3326
3327 MCLGET(m, M_DONTWAIT);
3328 if (!ISSET(m->m_flags, M_EXT)) {
3329 rxr->rxr_mgetcl_failed.ev_count++;
3330 error = -1;
3331 m_freem(m);
3332 break;
3333 }
3334
3335 m->m_len = m->m_pkthdr.len = MCLBYTES;
3336 m_adj(m, ETHER_ALIGN);
3337
3338 map = rxm->rxm_map;
3339
3340 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
3341 BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
3342 rxr->rxr_mbuf_load_failed.ev_count++;
3343 error = -1;
3344 m_freem(m);
3345 break;
3346 }
3347
3348 rxm->rxm_m = m;
3349
3350 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3351 BUS_DMASYNC_PREREAD);
3352
3353 rxd = &ring[prod];
3354
3355 rxd->paddr = htole64(map->dm_segs[0].ds_addr);
3356 rxd->haddr = htole64(0);
3357
3358 prod++;
3359 prod &= mask;
3360
3361 post = 1;
3362
3363 } while (--slots);
3364
3365 if (post) {
3366 rxr->rxr_prod = prod;
3367 ixl_wr(sc, rxr->rxr_tail, prod);
3368 }
3369
3370 return error;
3371 }
3372
3373 static inline int
3374 ixl_handle_queue_common(struct ixl_softc *sc, struct ixl_queue_pair *qp,
3375 u_int txlimit, struct evcnt *txevcnt,
3376 u_int rxlimit, struct evcnt *rxevcnt)
3377 {
3378 struct ixl_tx_ring *txr = qp->qp_txr;
3379 struct ixl_rx_ring *rxr = qp->qp_rxr;
3380 int txmore, rxmore;
3381 int rv;
3382
3383 mutex_enter(&txr->txr_lock);
3384 txevcnt->ev_count++;
3385 txmore = ixl_txeof(sc, txr, txlimit);
3386 mutex_exit(&txr->txr_lock);
3387
3388 mutex_enter(&rxr->rxr_lock);
3389 rxevcnt->ev_count++;
3390 rxmore = ixl_rxeof(sc, rxr, rxlimit);
3391 mutex_exit(&rxr->rxr_lock);
3392
3393 rv = txmore | (rxmore << 1);
3394
3395 return rv;
3396 }
3397
3398 static void
3399 ixl_sched_handle_queue(struct ixl_softc *sc, struct ixl_queue_pair *qp)
3400 {
3401
3402 if (qp->qp_workqueue)
3403 workqueue_enqueue(sc->sc_workq_txrx, &qp->qp_work, NULL);
3404 else
3405 softint_schedule(qp->qp_si);
3406 }
3407
3408 static int
3409 ixl_intr(void *xsc)
3410 {
3411 struct ixl_softc *sc = xsc;
3412 struct ixl_tx_ring *txr;
3413 struct ixl_rx_ring *rxr;
3414 uint32_t icr, rxintr, txintr;
3415 int rv = 0;
3416 unsigned int i;
3417
3418 KASSERT(sc != NULL);
3419
3420 ixl_enable_other_intr(sc);
3421 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3422
3423 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3424 atomic_inc_64(&sc->sc_event_atq.ev_count);
3425 ixl_atq_done(sc);
3426 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3427 rv = 1;
3428 }
3429
3430 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3431 atomic_inc_64(&sc->sc_event_link.ev_count);
3432 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3433 rv = 1;
3434 }
3435
3436 rxintr = icr & I40E_INTR_NOTX_RX_MASK;
3437 txintr = icr & I40E_INTR_NOTX_TX_MASK;
3438
3439 if (txintr || rxintr) {
3440 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3441 txr = sc->sc_qps[i].qp_txr;
3442 rxr = sc->sc_qps[i].qp_rxr;
3443
3444 ixl_handle_queue_common(sc, &sc->sc_qps[i],
3445 IXL_TXRX_PROCESS_UNLIMIT, &txr->txr_intr,
3446 IXL_TXRX_PROCESS_UNLIMIT, &rxr->rxr_intr);
3447 }
3448 rv = 1;
3449 }
3450
3451 return rv;
3452 }
3453
3454 static int
3455 ixl_queue_intr(void *xqp)
3456 {
3457 struct ixl_queue_pair *qp = xqp;
3458 struct ixl_tx_ring *txr = qp->qp_txr;
3459 struct ixl_rx_ring *rxr = qp->qp_rxr;
3460 struct ixl_softc *sc = qp->qp_sc;
3461 u_int txlimit, rxlimit;
3462 int more;
3463
3464 txlimit = sc->sc_tx_intr_process_limit;
3465 rxlimit = sc->sc_rx_intr_process_limit;
3466 qp->qp_workqueue = sc->sc_txrx_workqueue;
3467
3468 more = ixl_handle_queue_common(sc, qp,
3469 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr);
3470
3471 if (more != 0) {
3472 ixl_sched_handle_queue(sc, qp);
3473 } else {
3474 /* for ALTQ */
3475 if (txr->txr_qid == 0)
3476 if_schedule_deferred_start(&sc->sc_ec.ec_if);
3477 softint_schedule(txr->txr_si);
3478
3479 ixl_enable_queue_intr(sc, qp);
3480 }
3481
3482 return 1;
3483 }
3484
3485 static void
3486 ixl_handle_queue_wk(struct work *wk, void *xsc)
3487 {
3488 struct ixl_queue_pair *qp;
3489
3490 qp = container_of(wk, struct ixl_queue_pair, qp_work);
3491 ixl_handle_queue(qp);
3492 }
3493
3494 static void
3495 ixl_handle_queue(void *xqp)
3496 {
3497 struct ixl_queue_pair *qp = xqp;
3498 struct ixl_softc *sc = qp->qp_sc;
3499 struct ixl_tx_ring *txr = qp->qp_txr;
3500 struct ixl_rx_ring *rxr = qp->qp_rxr;
3501 u_int txlimit, rxlimit;
3502 int more;
3503
3504 txlimit = sc->sc_tx_process_limit;
3505 rxlimit = sc->sc_rx_process_limit;
3506
3507 more = ixl_handle_queue_common(sc, qp,
3508 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer);
3509
3510 if (more != 0)
3511 ixl_sched_handle_queue(sc, qp);
3512 else
3513 ixl_enable_queue_intr(sc, qp);
3514 }
3515
3516 static inline void
3517 ixl_print_hmc_error(struct ixl_softc *sc, uint32_t reg)
3518 {
3519 uint32_t hmc_idx, hmc_isvf;
3520 uint32_t hmc_errtype, hmc_objtype, hmc_data;
3521
3522 hmc_idx = reg & I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK;
3523 hmc_idx = hmc_idx >> I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT;
3524 hmc_isvf = reg & I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK;
3525 hmc_isvf = hmc_isvf >> I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT;
3526 hmc_errtype = reg & I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK;
3527 hmc_errtype = hmc_errtype >> I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT;
3528 hmc_objtype = reg & I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK;
3529 hmc_objtype = hmc_objtype >> I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT;
3530 hmc_data = ixl_rd(sc, I40E_PFHMC_ERRORDATA);
3531
3532 device_printf(sc->sc_dev,
3533 "HMC Error (idx=0x%x, isvf=0x%x, err=0x%x, obj=0x%x, data=0x%x)\n",
3534 hmc_idx, hmc_isvf, hmc_errtype, hmc_objtype, hmc_data);
3535 }
3536
3537 static int
3538 ixl_other_intr(void *xsc)
3539 {
3540 struct ixl_softc *sc = xsc;
3541 uint32_t icr, mask, reg;
3542 int rv;
3543
3544 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3545 mask = ixl_rd(sc, I40E_PFINT_ICR0_ENA);
3546
3547 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3548 atomic_inc_64(&sc->sc_event_atq.ev_count);
3549 ixl_atq_done(sc);
3550 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3551 rv = 1;
3552 }
3553
3554 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3555 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3556 device_printf(sc->sc_dev, "link stat changed\n");
3557
3558 atomic_inc_64(&sc->sc_event_link.ev_count);
3559 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3560 rv = 1;
3561 }
3562
3563 if (ISSET(icr, I40E_PFINT_ICR0_GRST_MASK)) {
3564 CLR(mask, I40E_PFINT_ICR0_ENA_GRST_MASK);
3565 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
3566 reg = reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK;
3567 reg = reg >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3568
3569 device_printf(sc->sc_dev, "GRST: %s\n",
3570 reg == I40E_RESET_CORER ? "CORER" :
3571 reg == I40E_RESET_GLOBR ? "GLOBR" :
3572 reg == I40E_RESET_EMPR ? "EMPR" :
3573 "POR");
3574 }
3575
3576 if (ISSET(icr, I40E_PFINT_ICR0_ECC_ERR_MASK))
3577 atomic_inc_64(&sc->sc_event_ecc_err.ev_count);
3578 if (ISSET(icr, I40E_PFINT_ICR0_PCI_EXCEPTION_MASK))
3579 atomic_inc_64(&sc->sc_event_pci_exception.ev_count);
3580 if (ISSET(icr, I40E_PFINT_ICR0_PE_CRITERR_MASK))
3581 atomic_inc_64(&sc->sc_event_crit_err.ev_count);
3582
3583 if (ISSET(icr, IXL_ICR0_CRIT_ERR_MASK)) {
3584 CLR(mask, IXL_ICR0_CRIT_ERR_MASK);
3585 device_printf(sc->sc_dev, "critical error\n");
3586 }
3587
3588 if (ISSET(icr, I40E_PFINT_ICR0_HMC_ERR_MASK)) {
3589 reg = ixl_rd(sc, I40E_PFHMC_ERRORINFO);
3590 if (ISSET(reg, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK))
3591 ixl_print_hmc_error(sc, reg);
3592 ixl_wr(sc, I40E_PFHMC_ERRORINFO, 0);
3593 }
3594
3595 ixl_wr(sc, I40E_PFINT_ICR0_ENA, mask);
3596 ixl_flush(sc);
3597 ixl_enable_other_intr(sc);
3598 return rv;
3599 }
3600
3601 static void
3602 ixl_get_link_status_done_work(void *xsc)
3603 {
3604 struct ixl_softc *sc = xsc;
3605 struct ixl_aq_desc *iaq, iaq_buf;
3606
3607 mutex_enter(&sc->sc_atq_lock);
3608 iaq = &sc->sc_link_state_atq.iatq_desc;
3609 iaq_buf = *iaq;
3610 mutex_exit(&sc->sc_atq_lock);
3611
3612 ixl_link_state_update(sc, &iaq_buf);
3613
3614 mutex_enter(&sc->sc_atq_lock);
3615 CLR(iaq->iaq_flags, htole16(IXL_AQ_DD));
3616 ixl_wakeup(sc, iaq);
3617 mutex_exit(&sc->sc_atq_lock);
3618 }
3619
3620 static void
3621 ixl_get_link_status_done(struct ixl_softc *sc,
3622 const struct ixl_aq_desc *iaq)
3623 {
3624
3625 ixl_work_add(sc->sc_workq, &sc->sc_link_state_done_task);
3626 }
3627
3628 static int
3629 ixl_get_link_status(struct ixl_softc *sc, enum ixl_link_flags flags)
3630 {
3631 struct ixl_atq *iatq;
3632 struct ixl_aq_desc *iaq;
3633 struct ixl_aq_link_param *param;
3634 int error;
3635
3636 mutex_enter(&sc->sc_atq_lock);
3637
3638 iatq = &sc->sc_link_state_atq;
3639 iaq = &iatq->iatq_desc;
3640
3641 if (!sc->sc_link_state_atq.iatq_inuse &&
3642 !ISSET(iaq->iaq_flags, htole16(IXL_AQ_DD))) {
3643 memset(iaq, 0, sizeof(*iaq));
3644 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3645 param = (struct ixl_aq_link_param *)iaq->iaq_param;
3646 param->notify = IXL_AQ_LINK_NOTIFY;
3647
3648 KASSERT(iatq->iatq_fn == ixl_get_link_status_done);
3649 error = ixl_atq_post_locked(sc, iatq);
3650 if (error != 0)
3651 goto out;
3652 } else {
3653 /* the previous command is not completed */
3654 error = EBUSY;
3655 }
3656
3657 if (ISSET(flags, IXL_LINK_FLAG_WAITDONE)) {
3658 do {
3659 error = cv_timedwait(&sc->sc_atq_cv, &sc->sc_atq_lock,
3660 IXL_ATQ_EXEC_TIMEOUT);
3661 if (error == EWOULDBLOCK)
3662 break;
3663 } while (iatq->iatq_inuse ||
3664 ISSET(iaq->iaq_flags, htole16(IXL_AQ_DD)));
3665 }
3666
3667 out:
3668 mutex_exit(&sc->sc_atq_lock);
3669
3670 return error;
3671 }
3672
3673 static void
3674 ixl_get_link_status_work(void *xsc)
3675 {
3676 struct ixl_softc *sc = xsc;
3677
3678 /*
3679 * IXL_LINK_FLAG_WAITDONE causes deadlock
3680 * because of doing ixl_gt_link_status_done_work()
3681 * in the same workqueue.
3682 */
3683 (void)ixl_get_link_status(sc, IXL_LINK_NOFLAGS);
3684 }
3685
3686 static void
3687 ixl_link_state_update(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3688 {
3689 struct ifnet *ifp = &sc->sc_ec.ec_if;
3690 int link_state;
3691
3692 mutex_enter(&sc->sc_cfg_lock);
3693 link_state = ixl_set_link_status_locked(sc, iaq);
3694 mutex_exit(&sc->sc_cfg_lock);
3695
3696 if (ifp->if_link_state != link_state)
3697 if_link_state_change(ifp, link_state);
3698
3699 if (link_state != LINK_STATE_DOWN) {
3700 kpreempt_disable();
3701 if_schedule_deferred_start(ifp);
3702 kpreempt_enable();
3703 }
3704 }
3705
3706 static void
3707 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq,
3708 const char *msg)
3709 {
3710 char buf[512];
3711 size_t len;
3712
3713 len = sizeof(buf);
3714 buf[--len] = '\0';
3715
3716 device_printf(sc->sc_dev, "%s\n", msg);
3717 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags));
3718 device_printf(sc->sc_dev, "flags %s opcode %04x\n",
3719 buf, le16toh(iaq->iaq_opcode));
3720 device_printf(sc->sc_dev, "datalen %u retval %u\n",
3721 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval));
3722 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie);
3723 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n",
3724 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]),
3725 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3]));
3726 }
3727
3728 static void
3729 ixl_arq(void *xsc)
3730 {
3731 struct ixl_softc *sc = xsc;
3732 struct ixl_aq_desc *arq, *iaq;
3733 struct ixl_aq_buf *aqb;
3734 unsigned int cons = sc->sc_arq_cons;
3735 unsigned int prod;
3736 int done = 0;
3737
3738 prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
3739 sc->sc_aq_regs->arq_head_mask;
3740
3741 if (cons == prod)
3742 goto done;
3743
3744 arq = IXL_DMA_KVA(&sc->sc_arq);
3745
3746 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3747 0, IXL_DMA_LEN(&sc->sc_arq),
3748 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3749
3750 do {
3751 iaq = &arq[cons];
3752 aqb = sc->sc_arq_live[cons];
3753
3754 KASSERT(aqb != NULL);
3755
3756 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3757 BUS_DMASYNC_POSTREAD);
3758
3759 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3760 ixl_aq_dump(sc, iaq, "arq event");
3761
3762 switch (iaq->iaq_opcode) {
3763 case htole16(IXL_AQ_OP_PHY_LINK_STATUS):
3764 ixl_link_state_update(sc, iaq);
3765 break;
3766 }
3767
3768 memset(iaq, 0, sizeof(*iaq));
3769 sc->sc_arq_live[cons] = NULL;
3770 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
3771
3772 cons++;
3773 cons &= IXL_AQ_MASK;
3774
3775 done = 1;
3776 } while (cons != prod);
3777
3778 if (done) {
3779 sc->sc_arq_cons = cons;
3780 ixl_arq_fill(sc);
3781 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3782 0, IXL_DMA_LEN(&sc->sc_arq),
3783 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3784 }
3785
3786 done:
3787 ixl_enable_other_intr(sc);
3788 }
3789
3790 static void
3791 ixl_atq_set(struct ixl_atq *iatq,
3792 void (*fn)(struct ixl_softc *, const struct ixl_aq_desc *))
3793 {
3794
3795 iatq->iatq_fn = fn;
3796 }
3797
3798 static int
3799 ixl_atq_post_locked(struct ixl_softc *sc, struct ixl_atq *iatq)
3800 {
3801 struct ixl_aq_desc *atq, *slot;
3802 unsigned int prod, cons, prod_next;
3803
3804 /* assert locked */
3805 KASSERT(mutex_owned(&sc->sc_atq_lock));
3806
3807 atq = IXL_DMA_KVA(&sc->sc_atq);
3808 prod = sc->sc_atq_prod;
3809 cons = sc->sc_atq_cons;
3810 prod_next = (prod +1) & IXL_AQ_MASK;
3811
3812 if (cons == prod_next)
3813 return ENOMEM;
3814
3815 slot = &atq[prod];
3816
3817 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3818 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3819
3820 KASSERT(iatq->iatq_fn != NULL);
3821 *slot = iatq->iatq_desc;
3822 slot->iaq_cookie = (uint64_t)((intptr_t)iatq);
3823
3824 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3825 ixl_aq_dump(sc, slot, "atq command");
3826
3827 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3828 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3829
3830 sc->sc_atq_prod = prod_next;
3831 ixl_wr(sc, sc->sc_aq_regs->atq_tail, sc->sc_atq_prod);
3832 iatq->iatq_inuse = true;
3833
3834 return 0;
3835 }
3836
3837 static void
3838 ixl_atq_done_locked(struct ixl_softc *sc)
3839 {
3840 struct ixl_aq_desc *atq, *slot;
3841 struct ixl_atq *iatq;
3842 unsigned int cons;
3843 unsigned int prod;
3844
3845 KASSERT(mutex_owned(&sc->sc_atq_lock));
3846
3847 prod = sc->sc_atq_prod;
3848 cons = sc->sc_atq_cons;
3849
3850 if (prod == cons)
3851 return;
3852
3853 atq = IXL_DMA_KVA(&sc->sc_atq);
3854
3855 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3856 0, IXL_DMA_LEN(&sc->sc_atq),
3857 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3858
3859 do {
3860 slot = &atq[cons];
3861 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
3862 break;
3863
3864 iatq = (struct ixl_atq *)((intptr_t)slot->iaq_cookie);
3865 iatq->iatq_desc = *slot;
3866 iatq->iatq_inuse = false;
3867
3868 memset(slot, 0, sizeof(*slot));
3869
3870 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3871 ixl_aq_dump(sc, &iatq->iatq_desc, "atq response");
3872
3873 (*iatq->iatq_fn)(sc, &iatq->iatq_desc);
3874
3875 cons++;
3876 cons &= IXL_AQ_MASK;
3877 } while (cons != prod);
3878
3879 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3880 0, IXL_DMA_LEN(&sc->sc_atq),
3881 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3882
3883 sc->sc_atq_cons = cons;
3884 }
3885
3886 static void
3887 ixl_atq_done(struct ixl_softc *sc)
3888 {
3889
3890 mutex_enter(&sc->sc_atq_lock);
3891 ixl_atq_done_locked(sc);
3892 mutex_exit(&sc->sc_atq_lock);
3893 }
3894
3895 static void
3896 ixl_wakeup(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3897 {
3898
3899 KASSERT(mutex_owned(&sc->sc_atq_lock));
3900
3901 cv_broadcast(&sc->sc_atq_cv);
3902 }
3903
3904 static int
3905 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq)
3906 {
3907 int error;
3908
3909 mutex_enter(&sc->sc_atq_lock);
3910 error = ixl_atq_exec_locked(sc, iatq);
3911 mutex_exit(&sc->sc_atq_lock);
3912
3913 return error;
3914 }
3915
3916 static int
3917 ixl_atq_exec_locked(struct ixl_softc *sc, struct ixl_atq *iatq)
3918 {
3919 int error;
3920
3921 KASSERT(mutex_owned(&sc->sc_atq_lock));
3922 KASSERT(iatq->iatq_desc.iaq_cookie == 0);
3923
3924 ixl_atq_set(iatq, ixl_wakeup);
3925
3926 error = ixl_atq_post_locked(sc, iatq);
3927 if (error)
3928 return error;
3929
3930 do {
3931 error = cv_timedwait(&sc->sc_atq_cv, &sc->sc_atq_lock,
3932 IXL_ATQ_EXEC_TIMEOUT);
3933 if (error == EWOULDBLOCK)
3934 break;
3935 } while (iatq->iatq_inuse);
3936
3937 return error;
3938 }
3939
3940 static int
3941 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
3942 {
3943 struct ixl_aq_desc *atq, *slot;
3944 unsigned int prod;
3945 unsigned int t = 0;
3946
3947 mutex_enter(&sc->sc_atq_lock);
3948
3949 atq = IXL_DMA_KVA(&sc->sc_atq);
3950 prod = sc->sc_atq_prod;
3951 slot = atq + prod;
3952
3953 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3954 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3955
3956 *slot = *iaq;
3957 slot->iaq_flags |= htole16(IXL_AQ_SI);
3958
3959 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3960 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3961
3962 prod++;
3963 prod &= IXL_AQ_MASK;
3964 sc->sc_atq_prod = prod;
3965 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3966
3967 while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
3968 delaymsec(1);
3969
3970 if (t++ > tm) {
3971 mutex_exit(&sc->sc_atq_lock);
3972 return ETIMEDOUT;
3973 }
3974 }
3975
3976 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3977 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
3978 *iaq = *slot;
3979 memset(slot, 0, sizeof(*slot));
3980 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3981 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
3982
3983 sc->sc_atq_cons = prod;
3984
3985 mutex_exit(&sc->sc_atq_lock);
3986
3987 return 0;
3988 }
3989
3990 static int
3991 ixl_get_version(struct ixl_softc *sc)
3992 {
3993 struct ixl_aq_desc iaq;
3994 uint32_t fwbuild, fwver, apiver;
3995 uint16_t api_maj_ver, api_min_ver;
3996
3997 memset(&iaq, 0, sizeof(iaq));
3998 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
3999
4000 iaq.iaq_retval = le16toh(23);
4001
4002 if (ixl_atq_poll(sc, &iaq, 2000) != 0)
4003 return ETIMEDOUT;
4004 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
4005 return EIO;
4006
4007 fwbuild = le32toh(iaq.iaq_param[1]);
4008 fwver = le32toh(iaq.iaq_param[2]);
4009 apiver = le32toh(iaq.iaq_param[3]);
4010
4011 api_maj_ver = (uint16_t)apiver;
4012 api_min_ver = (uint16_t)(apiver >> 16);
4013
4014 aprint_normal(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
4015 (uint16_t)(fwver >> 16), fwbuild, api_maj_ver, api_min_ver);
4016
4017 if (sc->sc_mac_type == I40E_MAC_X722) {
4018 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK |
4019 IXL_SC_AQ_FLAG_NVMREAD);
4020 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL);
4021 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS);
4022 }
4023
4024 #define IXL_API_VER(maj, min) (((uint32_t)(maj) << 16) | (min))
4025 if (IXL_API_VER(api_maj_ver, api_min_ver) >= IXL_API_VER(1, 5)) {
4026 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL);
4027 SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK);
4028 }
4029 #undef IXL_API_VER
4030
4031 return 0;
4032 }
4033
4034 static int
4035 ixl_get_nvm_version(struct ixl_softc *sc)
4036 {
4037 uint16_t nvmver, cfg_ptr, eetrack_hi, eetrack_lo, oem_hi, oem_lo;
4038 uint32_t eetrack, oem;
4039 uint16_t nvm_maj_ver, nvm_min_ver, oem_build;
4040 uint8_t oem_ver, oem_patch;
4041
4042 nvmver = cfg_ptr = eetrack_hi = eetrack_lo = oem_hi = oem_lo = 0;
4043 ixl_rd16_nvm(sc, I40E_SR_NVM_DEV_STARTER_VERSION, &nvmver);
4044 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
4045 ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
4046 ixl_rd16_nvm(sc, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
4047 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF, &oem_hi);
4048 ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF + 1, &oem_lo);
4049
4050 nvm_maj_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_HI_MASK);
4051 nvm_min_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_LO_MASK);
4052 eetrack = ((uint32_t)eetrack_hi << 16) | eetrack_lo;
4053 oem = ((uint32_t)oem_hi << 16) | oem_lo;
4054 oem_ver = __SHIFTOUT(oem, IXL_NVM_OEMVERSION_MASK);
4055 oem_build = __SHIFTOUT(oem, IXL_NVM_OEMBUILD_MASK);
4056 oem_patch = __SHIFTOUT(oem, IXL_NVM_OEMPATCH_MASK);
4057
4058 aprint_normal(" nvm %x.%02x etid %08x oem %d.%d.%d",
4059 nvm_maj_ver, nvm_min_ver, eetrack,
4060 oem_ver, oem_build, oem_patch);
4061
4062 return 0;
4063 }
4064
4065 static int
4066 ixl_pxe_clear(struct ixl_softc *sc)
4067 {
4068 struct ixl_aq_desc iaq;
4069 int rv;
4070
4071 memset(&iaq, 0, sizeof(iaq));
4072 iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
4073 iaq.iaq_param[0] = htole32(0x2);
4074
4075 rv = ixl_atq_poll(sc, &iaq, 250);
4076
4077 ixl_wr(sc, I40E_GLLAN_RCTL_0, 0x1);
4078
4079 if (rv != 0)
4080 return ETIMEDOUT;
4081
4082 switch (iaq.iaq_retval) {
4083 case htole16(IXL_AQ_RC_OK):
4084 case htole16(IXL_AQ_RC_EEXIST):
4085 break;
4086 default:
4087 return EIO;
4088 }
4089
4090 return 0;
4091 }
4092
4093 static int
4094 ixl_lldp_shut(struct ixl_softc *sc)
4095 {
4096 struct ixl_aq_desc iaq;
4097
4098 memset(&iaq, 0, sizeof(iaq));
4099 iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
4100 iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
4101
4102 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4103 aprint_error_dev(sc->sc_dev, "STOP LLDP AGENT timeout\n");
4104 return -1;
4105 }
4106
4107 switch (iaq.iaq_retval) {
4108 case htole16(IXL_AQ_RC_EMODE):
4109 case htole16(IXL_AQ_RC_EPERM):
4110 /* ignore silently */
4111 default:
4112 break;
4113 }
4114
4115 return 0;
4116 }
4117
4118 static void
4119 ixl_parse_hw_capability(struct ixl_softc *sc, struct ixl_aq_capability *cap)
4120 {
4121 uint16_t id;
4122 uint32_t number, logical_id;
4123
4124 id = le16toh(cap->cap_id);
4125 number = le32toh(cap->number);
4126 logical_id = le32toh(cap->logical_id);
4127
4128 switch (id) {
4129 case IXL_AQ_CAP_RSS:
4130 sc->sc_rss_table_size = number;
4131 sc->sc_rss_table_entry_width = logical_id;
4132 break;
4133 case IXL_AQ_CAP_RXQ:
4134 case IXL_AQ_CAP_TXQ:
4135 sc->sc_nqueue_pairs_device = MIN(number,
4136 sc->sc_nqueue_pairs_device);
4137 break;
4138 }
4139 }
4140
4141 static int
4142 ixl_get_hw_capabilities(struct ixl_softc *sc)
4143 {
4144 struct ixl_dmamem idm;
4145 struct ixl_aq_desc iaq;
4146 struct ixl_aq_capability *caps;
4147 size_t i, ncaps;
4148 bus_size_t caps_size;
4149 uint16_t status;
4150 int rv;
4151
4152 caps_size = sizeof(caps[0]) * 40;
4153 memset(&iaq, 0, sizeof(iaq));
4154 iaq.iaq_opcode = htole16(IXL_AQ_OP_LIST_FUNC_CAP);
4155
4156 do {
4157 if (ixl_dmamem_alloc(sc, &idm, caps_size, 0) != 0) {
4158 return -1;
4159 }
4160
4161 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4162 (caps_size > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4163 iaq.iaq_datalen = htole16(caps_size);
4164 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4165
4166 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
4167 IXL_DMA_LEN(&idm), BUS_DMASYNC_PREREAD);
4168
4169 rv = ixl_atq_poll(sc, &iaq, 250);
4170
4171 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
4172 IXL_DMA_LEN(&idm), BUS_DMASYNC_POSTREAD);
4173
4174 if (rv != 0) {
4175 aprint_error(", HW capabilities timeout\n");
4176 goto done;
4177 }
4178
4179 status = le16toh(iaq.iaq_retval);
4180
4181 if (status == IXL_AQ_RC_ENOMEM) {
4182 caps_size = le16toh(iaq.iaq_datalen);
4183 ixl_dmamem_free(sc, &idm);
4184 }
4185 } while (status == IXL_AQ_RC_ENOMEM);
4186
4187 if (status != IXL_AQ_RC_OK) {
4188 aprint_error(", HW capabilities error\n");
4189 goto done;
4190 }
4191
4192 caps = IXL_DMA_KVA(&idm);
4193 ncaps = le16toh(iaq.iaq_param[1]);
4194
4195 for (i = 0; i < ncaps; i++) {
4196 ixl_parse_hw_capability(sc, &caps[i]);
4197 }
4198
4199 done:
4200 ixl_dmamem_free(sc, &idm);
4201 return rv;
4202 }
4203
4204 static int
4205 ixl_get_mac(struct ixl_softc *sc)
4206 {
4207 struct ixl_dmamem idm;
4208 struct ixl_aq_desc iaq;
4209 struct ixl_aq_mac_addresses *addrs;
4210 int rv;
4211
4212 if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
4213 aprint_error(", unable to allocate mac addresses\n");
4214 return -1;
4215 }
4216
4217 memset(&iaq, 0, sizeof(iaq));
4218 iaq.iaq_flags = htole16(IXL_AQ_BUF);
4219 iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
4220 iaq.iaq_datalen = htole16(sizeof(*addrs));
4221 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4222
4223 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4224 BUS_DMASYNC_PREREAD);
4225
4226 rv = ixl_atq_poll(sc, &iaq, 250);
4227
4228 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4229 BUS_DMASYNC_POSTREAD);
4230
4231 if (rv != 0) {
4232 aprint_error(", MAC ADDRESS READ timeout\n");
4233 rv = -1;
4234 goto done;
4235 }
4236 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4237 aprint_error(", MAC ADDRESS READ error\n");
4238 rv = -1;
4239 goto done;
4240 }
4241
4242 addrs = IXL_DMA_KVA(&idm);
4243 if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
4244 printf(", port address is not valid\n");
4245 goto done;
4246 }
4247
4248 memcpy(sc->sc_enaddr, addrs->port, ETHER_ADDR_LEN);
4249 rv = 0;
4250
4251 done:
4252 ixl_dmamem_free(sc, &idm);
4253 return rv;
4254 }
4255
4256 static int
4257 ixl_get_switch_config(struct ixl_softc *sc)
4258 {
4259 struct ixl_dmamem idm;
4260 struct ixl_aq_desc iaq;
4261 struct ixl_aq_switch_config *hdr;
4262 struct ixl_aq_switch_config_element *elms, *elm;
4263 unsigned int nelm, i;
4264 int rv;
4265
4266 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4267 aprint_error_dev(sc->sc_dev,
4268 "unable to allocate switch config buffer\n");
4269 return -1;
4270 }
4271
4272 memset(&iaq, 0, sizeof(iaq));
4273 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4274 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4275 iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
4276 iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
4277 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4278
4279 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4280 BUS_DMASYNC_PREREAD);
4281
4282 rv = ixl_atq_poll(sc, &iaq, 250);
4283
4284 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4285 BUS_DMASYNC_POSTREAD);
4286
4287 if (rv != 0) {
4288 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG timeout\n");
4289 rv = -1;
4290 goto done;
4291 }
4292 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4293 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG error\n");
4294 rv = -1;
4295 goto done;
4296 }
4297
4298 hdr = IXL_DMA_KVA(&idm);
4299 elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
4300
4301 nelm = le16toh(hdr->num_reported);
4302 if (nelm < 1) {
4303 aprint_error_dev(sc->sc_dev, "no switch config available\n");
4304 rv = -1;
4305 goto done;
4306 }
4307
4308 for (i = 0; i < nelm; i++) {
4309 elm = &elms[i];
4310
4311 aprint_debug_dev(sc->sc_dev,
4312 "type %x revision %u seid %04x\n",
4313 elm->type, elm->revision, le16toh(elm->seid));
4314 aprint_debug_dev(sc->sc_dev,
4315 "uplink %04x downlink %04x\n",
4316 le16toh(elm->uplink_seid),
4317 le16toh(elm->downlink_seid));
4318 aprint_debug_dev(sc->sc_dev,
4319 "conntype %x scheduler %04x extra %04x\n",
4320 elm->connection_type,
4321 le16toh(elm->scheduler_id),
4322 le16toh(elm->element_info));
4323 }
4324
4325 elm = &elms[0];
4326
4327 sc->sc_uplink_seid = elm->uplink_seid;
4328 sc->sc_downlink_seid = elm->downlink_seid;
4329 sc->sc_seid = elm->seid;
4330
4331 if ((sc->sc_uplink_seid == htole16(0)) !=
4332 (sc->sc_downlink_seid == htole16(0))) {
4333 aprint_error_dev(sc->sc_dev, "SEIDs are misconfigured\n");
4334 rv = -1;
4335 goto done;
4336 }
4337
4338 done:
4339 ixl_dmamem_free(sc, &idm);
4340 return rv;
4341 }
4342
4343 static int
4344 ixl_phy_mask_ints(struct ixl_softc *sc)
4345 {
4346 struct ixl_aq_desc iaq;
4347
4348 memset(&iaq, 0, sizeof(iaq));
4349 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
4350 iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
4351 ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
4352 IXL_AQ_PHY_EV_MEDIA_NA));
4353
4354 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4355 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK timeout\n");
4356 return -1;
4357 }
4358 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4359 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK error\n");
4360 return -1;
4361 }
4362
4363 return 0;
4364 }
4365
4366 static int
4367 ixl_get_phy_abilities(struct ixl_softc *sc, struct ixl_dmamem *idm)
4368 {
4369 struct ixl_aq_desc iaq;
4370 int rv;
4371
4372 memset(&iaq, 0, sizeof(iaq));
4373 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4374 (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4375 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
4376 iaq.iaq_datalen = htole16(IXL_DMA_LEN(idm));
4377 iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
4378 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
4379
4380 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4381 BUS_DMASYNC_PREREAD);
4382
4383 rv = ixl_atq_poll(sc, &iaq, 250);
4384
4385 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4386 BUS_DMASYNC_POSTREAD);
4387
4388 if (rv != 0)
4389 return -1;
4390
4391 return le16toh(iaq.iaq_retval);
4392 }
4393
4394 static int
4395 ixl_get_phy_info(struct ixl_softc *sc)
4396 {
4397 struct ixl_dmamem idm;
4398 struct ixl_aq_phy_abilities *phy;
4399 int rv;
4400
4401 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4402 aprint_error_dev(sc->sc_dev,
4403 "unable to allocate phy abilities buffer\n");
4404 return -1;
4405 }
4406
4407 rv = ixl_get_phy_abilities(sc, &idm);
4408 switch (rv) {
4409 case -1:
4410 aprint_error_dev(sc->sc_dev, "GET PHY ABILITIES timeout\n");
4411 goto done;
4412 case IXL_AQ_RC_OK:
4413 break;
4414 case IXL_AQ_RC_EIO:
4415 aprint_error_dev(sc->sc_dev,"unable to query phy types\n");
4416 goto done;
4417 default:
4418 aprint_error_dev(sc->sc_dev,
4419 "GET PHY ABILITIIES error %u\n", rv);
4420 goto done;
4421 }
4422
4423 phy = IXL_DMA_KVA(&idm);
4424
4425 sc->sc_phy_types = le32toh(phy->phy_type);
4426 sc->sc_phy_types |= (uint64_t)le32toh(phy->phy_type_ext) << 32;
4427
4428 sc->sc_phy_abilities = phy->abilities;
4429 sc->sc_phy_linkspeed = phy->link_speed;
4430 sc->sc_phy_fec_cfg = phy->fec_cfg_curr_mod_ext_info &
4431 (IXL_AQ_ENABLE_FEC_KR | IXL_AQ_ENABLE_FEC_RS |
4432 IXL_AQ_REQUEST_FEC_KR | IXL_AQ_REQUEST_FEC_RS);
4433 sc->sc_eee_cap = phy->eee_capability;
4434 sc->sc_eeer_val = phy->eeer_val;
4435 sc->sc_d3_lpan = phy->d3_lpan;
4436
4437 rv = 0;
4438
4439 done:
4440 ixl_dmamem_free(sc, &idm);
4441 return rv;
4442 }
4443
4444 static int
4445 ixl_set_phy_config(struct ixl_softc *sc,
4446 uint8_t link_speed, uint8_t abilities, bool polling)
4447 {
4448 struct ixl_aq_phy_param *param;
4449 struct ixl_atq iatq;
4450 struct ixl_aq_desc *iaq;
4451 int error;
4452
4453 memset(&iatq, 0, sizeof(iatq));
4454
4455 iaq = &iatq.iatq_desc;
4456 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_CONFIG);
4457 param = (struct ixl_aq_phy_param *)&iaq->iaq_param;
4458 param->phy_types = htole32((uint32_t)sc->sc_phy_types);
4459 param->phy_type_ext = (uint8_t)(sc->sc_phy_types >> 32);
4460 param->link_speed = link_speed;
4461 param->abilities = abilities | IXL_AQ_PHY_ABILITY_AUTO_LINK;
4462 param->fec_cfg = sc->sc_phy_fec_cfg;
4463 param->eee_capability = sc->sc_eee_cap;
4464 param->eeer_val = sc->sc_eeer_val;
4465 param->d3_lpan = sc->sc_d3_lpan;
4466
4467 if (polling)
4468 error = ixl_atq_poll(sc, iaq, 250);
4469 else
4470 error = ixl_atq_exec(sc, &iatq);
4471
4472 if (error != 0)
4473 return error;
4474
4475 switch (le16toh(iaq->iaq_retval)) {
4476 case IXL_AQ_RC_OK:
4477 break;
4478 case IXL_AQ_RC_EPERM:
4479 return EPERM;
4480 default:
4481 return EIO;
4482 }
4483
4484 return 0;
4485 }
4486
4487 static int
4488 ixl_set_phy_autoselect(struct ixl_softc *sc)
4489 {
4490 uint8_t link_speed, abilities;
4491
4492 link_speed = sc->sc_phy_linkspeed;
4493 abilities = IXL_PHY_ABILITY_LINKUP | IXL_PHY_ABILITY_AUTONEGO;
4494
4495 return ixl_set_phy_config(sc, link_speed, abilities, true);
4496 }
4497
4498 static int
4499 ixl_get_link_status_poll(struct ixl_softc *sc, int *l)
4500 {
4501 struct ixl_aq_desc iaq;
4502 struct ixl_aq_link_param *param;
4503 int link;
4504
4505 memset(&iaq, 0, sizeof(iaq));
4506 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
4507 param = (struct ixl_aq_link_param *)iaq.iaq_param;
4508 param->notify = IXL_AQ_LINK_NOTIFY;
4509
4510 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4511 return ETIMEDOUT;
4512 }
4513 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4514 return EIO;
4515 }
4516
4517 /* It is unneccessary to hold lock */
4518 link = ixl_set_link_status_locked(sc, &iaq);
4519
4520 if (l != NULL)
4521 *l = link;
4522
4523 return 0;
4524 }
4525
4526 static int
4527 ixl_get_vsi(struct ixl_softc *sc)
4528 {
4529 struct ixl_dmamem *vsi = &sc->sc_scratch;
4530 struct ixl_aq_desc iaq;
4531 struct ixl_aq_vsi_param *param;
4532 struct ixl_aq_vsi_reply *reply;
4533 struct ixl_aq_vsi_data *data;
4534 int rv;
4535
4536 /* grumble, vsi info isn't "known" at compile time */
4537
4538 memset(&iaq, 0, sizeof(iaq));
4539 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4540 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4541 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
4542 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4543 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4544
4545 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4546 param->uplink_seid = sc->sc_seid;
4547
4548 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4549 BUS_DMASYNC_PREREAD);
4550
4551 rv = ixl_atq_poll(sc, &iaq, 250);
4552
4553 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4554 BUS_DMASYNC_POSTREAD);
4555
4556 if (rv != 0) {
4557 return ETIMEDOUT;
4558 }
4559
4560 switch (le16toh(iaq.iaq_retval)) {
4561 case IXL_AQ_RC_OK:
4562 break;
4563 case IXL_AQ_RC_ENOENT:
4564 return ENOENT;
4565 case IXL_AQ_RC_EACCES:
4566 return EACCES;
4567 default:
4568 return EIO;
4569 }
4570
4571 reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
4572 sc->sc_vsi_number = le16toh(reply->vsi_number);
4573 data = IXL_DMA_KVA(vsi);
4574 sc->sc_vsi_stat_counter_idx = le16toh(data->stat_counter_idx);
4575
4576 return 0;
4577 }
4578
4579 static int
4580 ixl_set_vsi(struct ixl_softc *sc)
4581 {
4582 struct ixl_dmamem *vsi = &sc->sc_scratch;
4583 struct ixl_aq_desc iaq;
4584 struct ixl_aq_vsi_param *param;
4585 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
4586 unsigned int qnum;
4587 uint16_t val;
4588 int rv;
4589
4590 qnum = sc->sc_nqueue_pairs - 1;
4591
4592 data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
4593 IXL_AQ_VSI_VALID_VLAN);
4594
4595 CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
4596 SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
4597 data->queue_mapping[0] = htole16(0);
4598 data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
4599 (qnum << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
4600
4601 val = le16toh(data->port_vlan_flags);
4602 CLR(val, IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK);
4603 SET(val, IXL_AQ_VSI_PVLAN_MODE_ALL);
4604
4605 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWTAGGING)) {
4606 SET(val, IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH);
4607 } else {
4608 SET(val, IXL_AQ_VSI_PVLAN_EMOD_NOTHING);
4609 }
4610
4611 data->port_vlan_flags = htole16(val);
4612
4613 /* grumble, vsi info isn't "known" at compile time */
4614
4615 memset(&iaq, 0, sizeof(iaq));
4616 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4617 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4618 iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
4619 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4620 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4621
4622 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4623 param->uplink_seid = sc->sc_seid;
4624
4625 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4626 BUS_DMASYNC_PREWRITE);
4627
4628 rv = ixl_atq_poll(sc, &iaq, 250);
4629
4630 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4631 BUS_DMASYNC_POSTWRITE);
4632
4633 if (rv != 0) {
4634 return ETIMEDOUT;
4635 }
4636
4637 switch (le16toh(iaq.iaq_retval)) {
4638 case IXL_AQ_RC_OK:
4639 break;
4640 case IXL_AQ_RC_ENOENT:
4641 return ENOENT;
4642 case IXL_AQ_RC_EACCES:
4643 return EACCES;
4644 default:
4645 return EIO;
4646 }
4647
4648 return 0;
4649 }
4650
4651 static void
4652 ixl_set_filter_control(struct ixl_softc *sc)
4653 {
4654 uint32_t reg;
4655
4656 reg = ixl_rd_rx_csr(sc, I40E_PFQF_CTL_0);
4657
4658 CLR(reg, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK);
4659 SET(reg, I40E_HASH_LUT_SIZE_128 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT);
4660
4661 SET(reg, I40E_PFQF_CTL_0_FD_ENA_MASK);
4662 SET(reg, I40E_PFQF_CTL_0_ETYPE_ENA_MASK);
4663 SET(reg, I40E_PFQF_CTL_0_MACVLAN_ENA_MASK);
4664
4665 ixl_wr_rx_csr(sc, I40E_PFQF_CTL_0, reg);
4666 }
4667
4668 static inline void
4669 ixl_get_default_rss_key(uint32_t *buf, size_t len)
4670 {
4671 size_t cplen;
4672 uint8_t rss_seed[RSS_KEYSIZE];
4673
4674 rss_getkey(rss_seed);
4675 memset(buf, 0, len);
4676
4677 cplen = MIN(len, sizeof(rss_seed));
4678 memcpy(buf, rss_seed, cplen);
4679 }
4680
4681 static int
4682 ixl_set_rss_key(struct ixl_softc *sc, uint8_t *key, size_t keylen)
4683 {
4684 struct ixl_dmamem *idm;
4685 struct ixl_atq iatq;
4686 struct ixl_aq_desc *iaq;
4687 struct ixl_aq_rss_key_param *param;
4688 struct ixl_aq_rss_key_data *data;
4689 size_t len, datalen, stdlen, extlen;
4690 uint16_t vsi_id;
4691 int rv;
4692
4693 memset(&iatq, 0, sizeof(iatq));
4694 iaq = &iatq.iatq_desc;
4695 idm = &sc->sc_aqbuf;
4696
4697 datalen = sizeof(*data);
4698
4699 /*XXX The buf size has to be less than the size of the register */
4700 datalen = MIN(IXL_RSS_KEY_SIZE_REG * sizeof(uint32_t), datalen);
4701
4702 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4703 (datalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4704 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_KEY);
4705 iaq->iaq_datalen = htole16(datalen);
4706
4707 param = (struct ixl_aq_rss_key_param *)iaq->iaq_param;
4708 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSKEY_VSI_ID_SHIFT) |
4709 IXL_AQ_RSSKEY_VSI_VALID;
4710 param->vsi_id = htole16(vsi_id);
4711
4712 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
4713 data = IXL_DMA_KVA(idm);
4714
4715 len = MIN(keylen, datalen);
4716 stdlen = MIN(sizeof(data->standard_rss_key), len);
4717 memcpy(data->standard_rss_key, key, stdlen);
4718 len = (len > stdlen) ? (len - stdlen) : 0;
4719
4720 extlen = MIN(sizeof(data->extended_hash_key), len);
4721 extlen = (stdlen < keylen) ? 0 : keylen - stdlen;
4722 memcpy(data->extended_hash_key, key + stdlen, extlen);
4723
4724 ixl_aq_dva(iaq, IXL_DMA_DVA(idm));
4725
4726 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4727 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE);
4728
4729 rv = ixl_atq_exec(sc, &iatq);
4730
4731 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4732 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE);
4733
4734 if (rv != 0) {
4735 return ETIMEDOUT;
4736 }
4737
4738 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) {
4739 return EIO;
4740 }
4741
4742 return 0;
4743 }
4744
4745 static int
4746 ixl_set_rss_lut(struct ixl_softc *sc, uint8_t *lut, size_t lutlen)
4747 {
4748 struct ixl_dmamem *idm;
4749 struct ixl_atq iatq;
4750 struct ixl_aq_desc *iaq;
4751 struct ixl_aq_rss_lut_param *param;
4752 uint16_t vsi_id;
4753 uint8_t *data;
4754 size_t dmalen;
4755 int rv;
4756
4757 memset(&iatq, 0, sizeof(iatq));
4758 iaq = &iatq.iatq_desc;
4759 idm = &sc->sc_aqbuf;
4760
4761 dmalen = MIN(lutlen, IXL_DMA_LEN(idm));
4762
4763 iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4764 (dmalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4765 iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_LUT);
4766 iaq->iaq_datalen = htole16(dmalen);
4767
4768 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
4769 data = IXL_DMA_KVA(idm);
4770 memcpy(data, lut, dmalen);
4771 ixl_aq_dva(iaq, IXL_DMA_DVA(idm));
4772
4773 param = (struct ixl_aq_rss_lut_param *)iaq->iaq_param;
4774 vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSLUT_VSI_ID_SHIFT) |
4775 IXL_AQ_RSSLUT_VSI_VALID;
4776 param->vsi_id = htole16(vsi_id);
4777 param->flags = htole16(IXL_AQ_RSSLUT_TABLE_TYPE_PF <<
4778 IXL_AQ_RSSLUT_TABLE_TYPE_SHIFT);
4779
4780 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4781 IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE);
4782
4783 rv = ixl_atq_exec(sc, &iatq);
4784
4785 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4786 IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE);
4787
4788 if (rv != 0) {
4789 return ETIMEDOUT;
4790 }
4791
4792 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) {
4793 return EIO;
4794 }
4795
4796 return 0;
4797 }
4798
4799 static int
4800 ixl_register_rss_key(struct ixl_softc *sc)
4801 {
4802 uint32_t rss_seed[IXL_RSS_KEY_SIZE_REG];
4803 int rv;
4804 size_t i;
4805
4806 ixl_get_default_rss_key(rss_seed, sizeof(rss_seed));
4807
4808 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)) {
4809 rv = ixl_set_rss_key(sc, (uint8_t*)rss_seed,
4810 sizeof(rss_seed));
4811 } else {
4812 rv = 0;
4813 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4814 ixl_wr_rx_csr(sc, I40E_PFQF_HKEY(i), rss_seed[i]);
4815 }
4816 }
4817
4818 return rv;
4819 }
4820
4821 static void
4822 ixl_register_rss_pctype(struct ixl_softc *sc)
4823 {
4824 uint64_t set_hena = 0;
4825 uint32_t hena0, hena1;
4826
4827 /*
4828 * We use TCP/UDP with IPv4/IPv6 by default.
4829 * Note: the device can not use just IP header in each
4830 * TCP/UDP packets for the RSS hash calculation.
4831 */
4832 if (sc->sc_mac_type == I40E_MAC_X722)
4833 set_hena = IXL_RSS_HENA_DEFAULT_X722;
4834 else
4835 set_hena = IXL_RSS_HENA_DEFAULT_XL710;
4836
4837 hena0 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(0));
4838 hena1 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(1));
4839
4840 SET(hena0, set_hena);
4841 SET(hena1, set_hena >> 32);
4842
4843 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(0), hena0);
4844 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(1), hena1);
4845 }
4846
4847 static int
4848 ixl_register_rss_hlut(struct ixl_softc *sc)
4849 {
4850 unsigned int qid;
4851 uint8_t hlut_buf[512], lut_mask;
4852 uint32_t *hluts;
4853 size_t i, hluts_num;
4854 int rv;
4855
4856 lut_mask = (0x01 << sc->sc_rss_table_entry_width) - 1;
4857
4858 for (i = 0; i < sc->sc_rss_table_size; i++) {
4859 qid = i % sc->sc_nqueue_pairs;
4860 hlut_buf[i] = qid & lut_mask;
4861 }
4862
4863 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)) {
4864 rv = ixl_set_rss_lut(sc, hlut_buf, sizeof(hlut_buf));
4865 } else {
4866 rv = 0;
4867 hluts = (uint32_t *)hlut_buf;
4868 hluts_num = sc->sc_rss_table_size >> 2;
4869 for (i = 0; i < hluts_num; i++) {
4870 ixl_wr(sc, I40E_PFQF_HLUT(i), hluts[i]);
4871 }
4872 ixl_flush(sc);
4873 }
4874
4875 return rv;
4876 }
4877
4878 static void
4879 ixl_config_rss(struct ixl_softc *sc)
4880 {
4881
4882 KASSERT(mutex_owned(&sc->sc_cfg_lock));
4883
4884 ixl_register_rss_key(sc);
4885 ixl_register_rss_pctype(sc);
4886 ixl_register_rss_hlut(sc);
4887 }
4888
4889 static const struct ixl_phy_type *
4890 ixl_search_phy_type(uint8_t phy_type)
4891 {
4892 const struct ixl_phy_type *itype;
4893 uint64_t mask;
4894 unsigned int i;
4895
4896 if (phy_type >= 64)
4897 return NULL;
4898
4899 mask = 1ULL << phy_type;
4900
4901 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
4902 itype = &ixl_phy_type_map[i];
4903
4904 if (ISSET(itype->phy_type, mask))
4905 return itype;
4906 }
4907
4908 return NULL;
4909 }
4910
4911 static uint64_t
4912 ixl_search_link_speed(uint8_t link_speed)
4913 {
4914 const struct ixl_speed_type *type;
4915 unsigned int i;
4916
4917 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4918 type = &ixl_speed_type_map[i];
4919
4920 if (ISSET(type->dev_speed, link_speed))
4921 return type->net_speed;
4922 }
4923
4924 return 0;
4925 }
4926
4927 static uint8_t
4928 ixl_search_baudrate(uint64_t baudrate)
4929 {
4930 const struct ixl_speed_type *type;
4931 unsigned int i;
4932
4933 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4934 type = &ixl_speed_type_map[i];
4935
4936 if (type->net_speed == baudrate) {
4937 return type->dev_speed;
4938 }
4939 }
4940
4941 return 0;
4942 }
4943
4944 static int
4945 ixl_restart_an(struct ixl_softc *sc)
4946 {
4947 struct ixl_aq_desc iaq;
4948
4949 memset(&iaq, 0, sizeof(iaq));
4950 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
4951 iaq.iaq_param[0] =
4952 htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
4953
4954 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4955 aprint_error_dev(sc->sc_dev, "RESTART AN timeout\n");
4956 return -1;
4957 }
4958 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4959 aprint_error_dev(sc->sc_dev, "RESTART AN error\n");
4960 return -1;
4961 }
4962
4963 return 0;
4964 }
4965
4966 static int
4967 ixl_add_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4968 uint16_t vlan, uint16_t flags)
4969 {
4970 struct ixl_aq_desc iaq;
4971 struct ixl_aq_add_macvlan *param;
4972 struct ixl_aq_add_macvlan_elem *elem;
4973
4974 memset(&iaq, 0, sizeof(iaq));
4975 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4976 iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN);
4977 iaq.iaq_datalen = htole16(sizeof(*elem));
4978 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4979
4980 param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param;
4981 param->num_addrs = htole16(1);
4982 param->seid0 = htole16(0x8000) | sc->sc_seid;
4983 param->seid1 = 0;
4984 param->seid2 = 0;
4985
4986 elem = IXL_DMA_KVA(&sc->sc_scratch);
4987 memset(elem, 0, sizeof(*elem));
4988 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4989 elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags);
4990 elem->vlan = htole16(vlan);
4991
4992 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4993 return IXL_AQ_RC_EINVAL;
4994 }
4995
4996 switch (le16toh(iaq.iaq_retval)) {
4997 case IXL_AQ_RC_OK:
4998 break;
4999 case IXL_AQ_RC_ENOSPC:
5000 return ENOSPC;
5001 case IXL_AQ_RC_ENOENT:
5002 return ENOENT;
5003 case IXL_AQ_RC_EACCES:
5004 return EACCES;
5005 case IXL_AQ_RC_EEXIST:
5006 return EEXIST;
5007 case IXL_AQ_RC_EINVAL:
5008 return EINVAL;
5009 default:
5010 return EIO;
5011 }
5012
5013 return 0;
5014 }
5015
5016 static int
5017 ixl_remove_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
5018 uint16_t vlan, uint16_t flags)
5019 {
5020 struct ixl_aq_desc iaq;
5021 struct ixl_aq_remove_macvlan *param;
5022 struct ixl_aq_remove_macvlan_elem *elem;
5023
5024 memset(&iaq, 0, sizeof(iaq));
5025 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
5026 iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN);
5027 iaq.iaq_datalen = htole16(sizeof(*elem));
5028 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
5029
5030 param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param;
5031 param->num_addrs = htole16(1);
5032 param->seid0 = htole16(0x8000) | sc->sc_seid;
5033 param->seid1 = 0;
5034 param->seid2 = 0;
5035
5036 elem = IXL_DMA_KVA(&sc->sc_scratch);
5037 memset(elem, 0, sizeof(*elem));
5038 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
5039 elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags);
5040 elem->vlan = htole16(vlan);
5041
5042 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
5043 return EINVAL;
5044 }
5045
5046 switch (le16toh(iaq.iaq_retval)) {
5047 case IXL_AQ_RC_OK:
5048 break;
5049 case IXL_AQ_RC_ENOENT:
5050 return ENOENT;
5051 case IXL_AQ_RC_EACCES:
5052 return EACCES;
5053 case IXL_AQ_RC_EINVAL:
5054 return EINVAL;
5055 default:
5056 return EIO;
5057 }
5058
5059 return 0;
5060 }
5061
5062 static int
5063 ixl_hmc(struct ixl_softc *sc)
5064 {
5065 struct {
5066 uint32_t count;
5067 uint32_t minsize;
5068 bus_size_t objsiz;
5069 bus_size_t setoff;
5070 bus_size_t setcnt;
5071 } regs[] = {
5072 {
5073 0,
5074 IXL_HMC_TXQ_MINSIZE,
5075 I40E_GLHMC_LANTXOBJSZ,
5076 I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
5077 I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
5078 },
5079 {
5080 0,
5081 IXL_HMC_RXQ_MINSIZE,
5082 I40E_GLHMC_LANRXOBJSZ,
5083 I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
5084 I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
5085 },
5086 {
5087 0,
5088 0,
5089 I40E_GLHMC_FCOEDDPOBJSZ,
5090 I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
5091 I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
5092 },
5093 {
5094 0,
5095 0,
5096 I40E_GLHMC_FCOEFOBJSZ,
5097 I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
5098 I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
5099 },
5100 };
5101 struct ixl_hmc_entry *e;
5102 uint64_t size, dva;
5103 uint8_t *kva;
5104 uint64_t *sdpage;
5105 unsigned int i;
5106 int npages, tables;
5107 uint32_t reg;
5108
5109 CTASSERT(__arraycount(regs) <= __arraycount(sc->sc_hmc_entries));
5110
5111 regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
5112 ixl_rd(sc, I40E_GLHMC_LANQMAX);
5113
5114 size = 0;
5115 for (i = 0; i < __arraycount(regs); i++) {
5116 e = &sc->sc_hmc_entries[i];
5117
5118 e->hmc_count = regs[i].count;
5119 reg = ixl_rd(sc, regs[i].objsiz);
5120 e->hmc_size = IXL_BIT_ULL(0x3F & reg);
5121 e->hmc_base = size;
5122
5123 if ((e->hmc_size * 8) < regs[i].minsize) {
5124 aprint_error_dev(sc->sc_dev,
5125 "kernel hmc entry is too big\n");
5126 return -1;
5127 }
5128
5129 size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
5130 }
5131 size = roundup(size, IXL_HMC_PGSIZE);
5132 npages = size / IXL_HMC_PGSIZE;
5133
5134 tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
5135
5136 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
5137 aprint_error_dev(sc->sc_dev,
5138 "unable to allocate hmc pd memory\n");
5139 return -1;
5140 }
5141
5142 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
5143 IXL_HMC_PGSIZE) != 0) {
5144 aprint_error_dev(sc->sc_dev,
5145 "unable to allocate hmc sd memory\n");
5146 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
5147 return -1;
5148 }
5149
5150 kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
5151 memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
5152
5153 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
5154 0, IXL_DMA_LEN(&sc->sc_hmc_pd),
5155 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5156
5157 dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
5158 sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
5159 memset(sdpage, 0, IXL_DMA_LEN(&sc->sc_hmc_sd));
5160
5161 for (i = 0; (int)i < npages; i++) {
5162 *sdpage = htole64(dva | IXL_HMC_PDVALID);
5163 sdpage++;
5164
5165 dva += IXL_HMC_PGSIZE;
5166 }
5167
5168 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
5169 0, IXL_DMA_LEN(&sc->sc_hmc_sd),
5170 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5171
5172 dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
5173 for (i = 0; (int)i < tables; i++) {
5174 uint32_t count;
5175
5176 KASSERT(npages >= 0);
5177
5178 count = ((unsigned int)npages > IXL_HMC_PGS) ?
5179 IXL_HMC_PGS : (unsigned int)npages;
5180
5181 ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
5182 ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
5183 (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
5184 (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
5185 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
5186 ixl_wr(sc, I40E_PFHMC_SDCMD,
5187 (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
5188
5189 npages -= IXL_HMC_PGS;
5190 dva += IXL_HMC_PGSIZE;
5191 }
5192
5193 for (i = 0; i < __arraycount(regs); i++) {
5194 e = &sc->sc_hmc_entries[i];
5195
5196 ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
5197 ixl_wr(sc, regs[i].setcnt, e->hmc_count);
5198 }
5199
5200 return 0;
5201 }
5202
5203 static void
5204 ixl_hmc_free(struct ixl_softc *sc)
5205 {
5206 ixl_dmamem_free(sc, &sc->sc_hmc_sd);
5207 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
5208 }
5209
5210 static void
5211 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
5212 unsigned int npacking)
5213 {
5214 uint8_t *dst = d;
5215 const uint8_t *src = s;
5216 unsigned int i;
5217
5218 for (i = 0; i < npacking; i++) {
5219 const struct ixl_hmc_pack *pack = &packing[i];
5220 unsigned int offset = pack->lsb / 8;
5221 unsigned int align = pack->lsb % 8;
5222 const uint8_t *in = src + pack->offset;
5223 uint8_t *out = dst + offset;
5224 int width = pack->width;
5225 unsigned int inbits = 0;
5226
5227 if (align) {
5228 inbits = (*in++) << align;
5229 *out++ |= (inbits & 0xff);
5230 inbits >>= 8;
5231
5232 width -= 8 - align;
5233 }
5234
5235 while (width >= 8) {
5236 inbits |= (*in++) << align;
5237 *out++ = (inbits & 0xff);
5238 inbits >>= 8;
5239
5240 width -= 8;
5241 }
5242
5243 if (width > 0) {
5244 inbits |= (*in) << align;
5245 *out |= (inbits & ((1 << width) - 1));
5246 }
5247 }
5248 }
5249
5250 static struct ixl_aq_buf *
5251 ixl_aqb_alloc(struct ixl_softc *sc)
5252 {
5253 struct ixl_aq_buf *aqb;
5254
5255 aqb = kmem_alloc(sizeof(*aqb), KM_SLEEP);
5256
5257 aqb->aqb_size = IXL_AQ_BUFLEN;
5258
5259 if (bus_dmamap_create(sc->sc_dmat, aqb->aqb_size, 1,
5260 aqb->aqb_size, 0,
5261 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0)
5262 goto free;
5263 if (bus_dmamem_alloc(sc->sc_dmat, aqb->aqb_size,
5264 IXL_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs,
5265 BUS_DMA_WAITOK) != 0)
5266 goto destroy;
5267 if (bus_dmamem_map(sc->sc_dmat, &aqb->aqb_seg, aqb->aqb_nsegs,
5268 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0)
5269 goto dma_free;
5270 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
5271 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0)
5272 goto unmap;
5273
5274 return aqb;
5275 unmap:
5276 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
5277 dma_free:
5278 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
5279 destroy:
5280 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
5281 free:
5282 kmem_free(aqb, sizeof(*aqb));
5283
5284 return NULL;
5285 }
5286
5287 static void
5288 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
5289 {
5290
5291 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
5292 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
5293 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
5294 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
5295 kmem_free(aqb, sizeof(*aqb));
5296 }
5297
5298 static int
5299 ixl_arq_fill(struct ixl_softc *sc)
5300 {
5301 struct ixl_aq_buf *aqb;
5302 struct ixl_aq_desc *arq, *iaq;
5303 unsigned int prod = sc->sc_arq_prod;
5304 unsigned int n;
5305 int post = 0;
5306
5307 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
5308 IXL_AQ_NUM);
5309 arq = IXL_DMA_KVA(&sc->sc_arq);
5310
5311 if (__predict_false(n <= 0))
5312 return 0;
5313
5314 do {
5315 aqb = sc->sc_arq_live[prod];
5316 iaq = &arq[prod];
5317
5318 if (aqb == NULL) {
5319 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
5320 if (aqb != NULL) {
5321 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
5322 ixl_aq_buf, aqb_entry);
5323 } else if ((aqb = ixl_aqb_alloc(sc)) == NULL) {
5324 break;
5325 }
5326
5327 sc->sc_arq_live[prod] = aqb;
5328 memset(aqb->aqb_data, 0, aqb->aqb_size);
5329
5330 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
5331 aqb->aqb_size, BUS_DMASYNC_PREREAD);
5332
5333 iaq->iaq_flags = htole16(IXL_AQ_BUF |
5334 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ?
5335 IXL_AQ_LB : 0));
5336 iaq->iaq_opcode = 0;
5337 iaq->iaq_datalen = htole16(aqb->aqb_size);
5338 iaq->iaq_retval = 0;
5339 iaq->iaq_cookie = 0;
5340 iaq->iaq_param[0] = 0;
5341 iaq->iaq_param[1] = 0;
5342 ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
5343 }
5344
5345 prod++;
5346 prod &= IXL_AQ_MASK;
5347
5348 post = 1;
5349
5350 } while (--n);
5351
5352 if (post) {
5353 sc->sc_arq_prod = prod;
5354 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
5355 }
5356
5357 return post;
5358 }
5359
5360 static void
5361 ixl_arq_unfill(struct ixl_softc *sc)
5362 {
5363 struct ixl_aq_buf *aqb;
5364 unsigned int i;
5365
5366 for (i = 0; i < __arraycount(sc->sc_arq_live); i++) {
5367 aqb = sc->sc_arq_live[i];
5368 if (aqb == NULL)
5369 continue;
5370
5371 sc->sc_arq_live[i] = NULL;
5372 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
5373 BUS_DMASYNC_POSTREAD);
5374 ixl_aqb_free(sc, aqb);
5375 }
5376
5377 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle)) != NULL) {
5378 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
5379 ixl_aq_buf, aqb_entry);
5380 ixl_aqb_free(sc, aqb);
5381 }
5382 }
5383
5384 static void
5385 ixl_clear_hw(struct ixl_softc *sc)
5386 {
5387 uint32_t num_queues, base_queue;
5388 uint32_t num_pf_int;
5389 uint32_t num_vf_int;
5390 uint32_t num_vfs;
5391 uint32_t i, j;
5392 uint32_t val;
5393 uint32_t eol = 0x7ff;
5394
5395 /* get number of interrupts, queues, and vfs */
5396 val = ixl_rd(sc, I40E_GLPCI_CNF2);
5397 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
5398 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
5399 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
5400 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
5401
5402 val = ixl_rd(sc, I40E_PFLAN_QALLOC);
5403 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
5404 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
5405 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
5406 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
5407 if (val & I40E_PFLAN_QALLOC_VALID_MASK)
5408 num_queues = (j - base_queue) + 1;
5409 else
5410 num_queues = 0;
5411
5412 val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
5413 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
5414 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
5415 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
5416 I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
5417 if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
5418 num_vfs = (j - i) + 1;
5419 else
5420 num_vfs = 0;
5421
5422 /* stop all the interrupts */
5423 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5424 ixl_flush(sc);
5425 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
5426 for (i = 0; i < num_pf_int - 2; i++)
5427 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
5428 ixl_flush(sc);
5429
5430 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
5431 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5432 ixl_wr(sc, I40E_PFINT_LNKLST0, val);
5433 for (i = 0; i < num_pf_int - 2; i++)
5434 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
5435 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5436 for (i = 0; i < num_vfs; i++)
5437 ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
5438 for (i = 0; i < num_vf_int - 2; i++)
5439 ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
5440
5441 /* warn the HW of the coming Tx disables */
5442 for (i = 0; i < num_queues; i++) {
5443 uint32_t abs_queue_idx = base_queue + i;
5444 uint32_t reg_block = 0;
5445
5446 if (abs_queue_idx >= 128) {
5447 reg_block = abs_queue_idx / 128;
5448 abs_queue_idx %= 128;
5449 }
5450
5451 val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
5452 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
5453 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
5454 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
5455
5456 ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
5457 }
5458 delaymsec(400);
5459
5460 /* stop all the queues */
5461 for (i = 0; i < num_queues; i++) {
5462 ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
5463 ixl_wr(sc, I40E_QTX_ENA(i), 0);
5464 ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
5465 ixl_wr(sc, I40E_QRX_ENA(i), 0);
5466 }
5467
5468 /* short wait for all queue disables to settle */
5469 delaymsec(50);
5470 }
5471
5472 static int
5473 ixl_pf_reset(struct ixl_softc *sc)
5474 {
5475 uint32_t cnt = 0;
5476 uint32_t cnt1 = 0;
5477 uint32_t reg = 0, reg0 = 0;
5478 uint32_t grst_del;
5479
5480 /*
5481 * Poll for Global Reset steady state in case of recent GRST.
5482 * The grst delay value is in 100ms units, and we'll wait a
5483 * couple counts longer to be sure we don't just miss the end.
5484 */
5485 grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
5486 grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
5487 grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
5488
5489 grst_del = grst_del * 20;
5490
5491 for (cnt = 0; cnt < grst_del; cnt++) {
5492 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
5493 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
5494 break;
5495 delaymsec(100);
5496 }
5497 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5498 aprint_error(", Global reset polling failed to complete\n");
5499 return -1;
5500 }
5501
5502 /* Now Wait for the FW to be ready */
5503 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
5504 reg = ixl_rd(sc, I40E_GLNVM_ULD);
5505 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5506 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
5507 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5508 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
5509 break;
5510
5511 delaymsec(10);
5512 }
5513 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5514 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
5515 aprint_error(", wait for FW Reset complete timed out "
5516 "(I40E_GLNVM_ULD = 0x%x)\n", reg);
5517 return -1;
5518 }
5519
5520 /*
5521 * If there was a Global Reset in progress when we got here,
5522 * we don't need to do the PF Reset
5523 */
5524 if (cnt == 0) {
5525 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5526 ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
5527 for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
5528 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5529 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
5530 break;
5531 delaymsec(1);
5532
5533 reg0 = ixl_rd(sc, I40E_GLGEN_RSTAT);
5534 if (reg0 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5535 aprint_error(", Core reset upcoming."
5536 " Skipping PF reset reset request\n");
5537 return -1;
5538 }
5539 }
5540 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
5541 aprint_error(", PF reset polling failed to complete"
5542 "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
5543 return -1;
5544 }
5545 }
5546
5547 return 0;
5548 }
5549
5550 static int
5551 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
5552 bus_size_t size, bus_size_t align)
5553 {
5554 ixm->ixm_size = size;
5555
5556 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
5557 ixm->ixm_size, 0,
5558 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
5559 &ixm->ixm_map) != 0)
5560 return 1;
5561 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
5562 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
5563 BUS_DMA_WAITOK) != 0)
5564 goto destroy;
5565 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
5566 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
5567 goto free;
5568 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
5569 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
5570 goto unmap;
5571
5572 memset(ixm->ixm_kva, 0, ixm->ixm_size);
5573
5574 return 0;
5575 unmap:
5576 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5577 free:
5578 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5579 destroy:
5580 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5581 return 1;
5582 }
5583
5584 static void
5585 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
5586 {
5587 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
5588 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5589 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5590 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5591 }
5592
5593 static int
5594 ixl_setup_vlan_hwfilter(struct ixl_softc *sc)
5595 {
5596 struct ethercom *ec = &sc->sc_ec;
5597 struct vlanid_list *vlanidp;
5598 int rv;
5599
5600 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5601 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5602 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5603 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5604
5605 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5606 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5607 if (rv != 0)
5608 return rv;
5609 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5610 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5611 if (rv != 0)
5612 return rv;
5613
5614 ETHER_LOCK(ec);
5615 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5616 rv = ixl_add_macvlan(sc, sc->sc_enaddr,
5617 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5618 if (rv != 0)
5619 break;
5620 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
5621 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5622 if (rv != 0)
5623 break;
5624 }
5625 ETHER_UNLOCK(ec);
5626
5627 return rv;
5628 }
5629
5630 static void
5631 ixl_teardown_vlan_hwfilter(struct ixl_softc *sc)
5632 {
5633 struct vlanid_list *vlanidp;
5634 struct ethercom *ec = &sc->sc_ec;
5635
5636 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5637 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5638 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5639 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5640
5641 ETHER_LOCK(ec);
5642 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5643 ixl_remove_macvlan(sc, sc->sc_enaddr,
5644 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5645 ixl_remove_macvlan(sc, etherbroadcastaddr,
5646 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5647 }
5648 ETHER_UNLOCK(ec);
5649
5650 ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5651 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5652 ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5653 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5654 }
5655
5656 static int
5657 ixl_update_macvlan(struct ixl_softc *sc)
5658 {
5659 int rv = 0;
5660 int next_ec_capenable = sc->sc_ec.ec_capenable;
5661
5662 if (ISSET(next_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
5663 rv = ixl_setup_vlan_hwfilter(sc);
5664 if (rv != 0)
5665 ixl_teardown_vlan_hwfilter(sc);
5666 } else {
5667 ixl_teardown_vlan_hwfilter(sc);
5668 }
5669
5670 return rv;
5671 }
5672
5673 static int
5674 ixl_ifflags_cb(struct ethercom *ec)
5675 {
5676 struct ifnet *ifp = &ec->ec_if;
5677 struct ixl_softc *sc = ifp->if_softc;
5678 int rv, change, reset_bits;
5679
5680 mutex_enter(&sc->sc_cfg_lock);
5681
5682 change = ec->ec_capenable ^ sc->sc_cur_ec_capenable;
5683 reset_bits = change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU);
5684 if (reset_bits != 0) {
5685 sc->sc_cur_ec_capenable ^= reset_bits;
5686 rv = ENETRESET;
5687 goto out;
5688 }
5689
5690 if (ISSET(change, ETHERCAP_VLAN_HWFILTER)) {
5691 rv = ixl_update_macvlan(sc);
5692 if (rv == 0) {
5693 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
5694 } else {
5695 CLR(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER);
5696 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
5697 }
5698 }
5699
5700 rv = ixl_iff(sc);
5701 out:
5702 mutex_exit(&sc->sc_cfg_lock);
5703
5704 return rv;
5705 }
5706
5707 static int
5708 ixl_set_link_status_locked(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
5709 {
5710 const struct ixl_aq_link_status *status;
5711 const struct ixl_phy_type *itype;
5712
5713 uint64_t ifm_active = IFM_ETHER;
5714 uint64_t ifm_status = IFM_AVALID;
5715 int link_state = LINK_STATE_DOWN;
5716 uint64_t baudrate = 0;
5717
5718 status = (const struct ixl_aq_link_status *)iaq->iaq_param;
5719 if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION)) {
5720 ifm_active |= IFM_NONE;
5721 goto done;
5722 }
5723
5724 ifm_active |= IFM_FDX;
5725 ifm_status |= IFM_ACTIVE;
5726 link_state = LINK_STATE_UP;
5727
5728 itype = ixl_search_phy_type(status->phy_type);
5729 if (itype != NULL)
5730 ifm_active |= itype->ifm_type;
5731
5732 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
5733 ifm_active |= IFM_ETH_TXPAUSE;
5734 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
5735 ifm_active |= IFM_ETH_RXPAUSE;
5736
5737 baudrate = ixl_search_link_speed(status->link_speed);
5738
5739 done:
5740 /* sc->sc_cfg_lock held expect during attach */
5741 sc->sc_media_active = ifm_active;
5742 sc->sc_media_status = ifm_status;
5743
5744 sc->sc_ec.ec_if.if_baudrate = baudrate;
5745
5746 return link_state;
5747 }
5748
5749 static int
5750 ixl_establish_intx(struct ixl_softc *sc)
5751 {
5752 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5753 pci_intr_handle_t *intr;
5754 char xnamebuf[32];
5755 char intrbuf[PCI_INTRSTR_LEN];
5756 char const *intrstr;
5757
5758 KASSERT(sc->sc_nintrs == 1);
5759
5760 intr = &sc->sc_ihp[0];
5761
5762 intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
5763 snprintf(xnamebuf, sizeof(xnamebuf), "%s:legacy",
5764 device_xname(sc->sc_dev));
5765
5766 sc->sc_ihs[0] = pci_intr_establish_xname(pc, *intr, IPL_NET, ixl_intr,
5767 sc, xnamebuf);
5768
5769 if (sc->sc_ihs[0] == NULL) {
5770 aprint_error_dev(sc->sc_dev,
5771 "unable to establish interrupt at %s\n", intrstr);
5772 return -1;
5773 }
5774
5775 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5776 return 0;
5777 }
5778
5779 static int
5780 ixl_establish_msix(struct ixl_softc *sc)
5781 {
5782 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5783 kcpuset_t *affinity;
5784 unsigned int vector = 0;
5785 unsigned int i;
5786 int affinity_to, r;
5787 char xnamebuf[32];
5788 char intrbuf[PCI_INTRSTR_LEN];
5789 char const *intrstr;
5790
5791 kcpuset_create(&affinity, false);
5792
5793 /* the "other" intr is mapped to vector 0 */
5794 vector = 0;
5795 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5796 intrbuf, sizeof(intrbuf));
5797 snprintf(xnamebuf, sizeof(xnamebuf), "%s others",
5798 device_xname(sc->sc_dev));
5799 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5800 sc->sc_ihp[vector], IPL_NET, ixl_other_intr,
5801 sc, xnamebuf);
5802 if (sc->sc_ihs[vector] == NULL) {
5803 aprint_error_dev(sc->sc_dev,
5804 "unable to establish interrupt at %s\n", intrstr);
5805 goto fail;
5806 }
5807
5808 aprint_normal_dev(sc->sc_dev, "other interrupt at %s", intrstr);
5809
5810 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0;
5811 affinity_to = (affinity_to + sc->sc_nqueue_pairs_max) % ncpu;
5812
5813 kcpuset_zero(affinity);
5814 kcpuset_set(affinity, affinity_to);
5815 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5816 if (r == 0) {
5817 aprint_normal(", affinity to %u", affinity_to);
5818 }
5819 aprint_normal("\n");
5820 vector++;
5821
5822 sc->sc_msix_vector_queue = vector;
5823 affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0;
5824
5825 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5826 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5827 intrbuf, sizeof(intrbuf));
5828 snprintf(xnamebuf, sizeof(xnamebuf), "%s TXRX%d",
5829 device_xname(sc->sc_dev), i);
5830
5831 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5832 sc->sc_ihp[vector], IPL_NET, ixl_queue_intr,
5833 (void *)&sc->sc_qps[i], xnamebuf);
5834
5835 if (sc->sc_ihs[vector] == NULL) {
5836 aprint_error_dev(sc->sc_dev,
5837 "unable to establish interrupt at %s\n", intrstr);
5838 goto fail;
5839 }
5840
5841 aprint_normal_dev(sc->sc_dev,
5842 "for TXRX%d interrupt at %s", i, intrstr);
5843
5844 kcpuset_zero(affinity);
5845 kcpuset_set(affinity, affinity_to);
5846 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5847 if (r == 0) {
5848 aprint_normal(", affinity to %u", affinity_to);
5849 affinity_to = (affinity_to + 1) % ncpu;
5850 }
5851 aprint_normal("\n");
5852 vector++;
5853 }
5854
5855 kcpuset_destroy(affinity);
5856
5857 return 0;
5858 fail:
5859 for (i = 0; i < vector; i++) {
5860 pci_intr_disestablish(pc, sc->sc_ihs[i]);
5861 }
5862
5863 sc->sc_msix_vector_queue = 0;
5864 sc->sc_msix_vector_queue = 0;
5865 kcpuset_destroy(affinity);
5866
5867 return -1;
5868 }
5869
5870 static void
5871 ixl_config_queue_intr(struct ixl_softc *sc)
5872 {
5873 unsigned int i, vector;
5874
5875 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) {
5876 vector = sc->sc_msix_vector_queue;
5877 } else {
5878 vector = I40E_INTR_NOTX_INTR;
5879
5880 ixl_wr(sc, I40E_PFINT_LNKLST0,
5881 (I40E_INTR_NOTX_QUEUE <<
5882 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5883 (I40E_QUEUE_TYPE_RX <<
5884 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5885 }
5886
5887 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
5888 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), 0);
5889 ixl_flush(sc);
5890
5891 ixl_wr(sc, I40E_PFINT_LNKLSTN(i),
5892 ((i) << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
5893 (I40E_QUEUE_TYPE_RX <<
5894 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5895
5896 ixl_wr(sc, I40E_QINT_RQCTL(i),
5897 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5898 (I40E_ITR_INDEX_RX <<
5899 I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
5900 (I40E_INTR_NOTX_RX_QUEUE <<
5901 I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
5902 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5903 (I40E_QUEUE_TYPE_TX <<
5904 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5905 I40E_QINT_RQCTL_CAUSE_ENA_MASK);
5906
5907 ixl_wr(sc, I40E_QINT_TQCTL(i),
5908 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
5909 (I40E_ITR_INDEX_TX <<
5910 I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
5911 (I40E_INTR_NOTX_TX_QUEUE <<
5912 I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
5913 (I40E_QUEUE_TYPE_EOL <<
5914 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
5915 (I40E_QUEUE_TYPE_RX <<
5916 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
5917 I40E_QINT_TQCTL_CAUSE_ENA_MASK);
5918
5919 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) {
5920 ixl_wr(sc, I40E_PFINT_ITRN(I40E_ITR_INDEX_RX, i),
5921 sc->sc_itr_rx);
5922 ixl_wr(sc, I40E_PFINT_ITRN(I40E_ITR_INDEX_TX, i),
5923 sc->sc_itr_tx);
5924 vector++;
5925 }
5926 }
5927 ixl_flush(sc);
5928
5929 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_RX), sc->sc_itr_rx);
5930 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_TX), sc->sc_itr_tx);
5931 ixl_flush(sc);
5932 }
5933
5934 static void
5935 ixl_config_other_intr(struct ixl_softc *sc)
5936 {
5937 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5938 (void)ixl_rd(sc, I40E_PFINT_ICR0);
5939
5940 ixl_wr(sc, I40E_PFINT_ICR0_ENA,
5941 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
5942 I40E_PFINT_ICR0_ENA_GRST_MASK |
5943 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
5944 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
5945 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
5946 I40E_PFINT_ICR0_ENA_VFLR_MASK |
5947 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
5948 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
5949 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK);
5950
5951 ixl_wr(sc, I40E_PFINT_LNKLST0, 0x7FF);
5952 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_OTHER), 0);
5953 ixl_wr(sc, I40E_PFINT_STAT_CTL0,
5954 (I40E_ITR_INDEX_OTHER <<
5955 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT));
5956 ixl_flush(sc);
5957 }
5958
5959 static int
5960 ixl_setup_interrupts(struct ixl_softc *sc)
5961 {
5962 struct pci_attach_args *pa = &sc->sc_pa;
5963 pci_intr_type_t max_type, intr_type;
5964 int counts[PCI_INTR_TYPE_SIZE];
5965 int error;
5966 unsigned int i;
5967 bool retry;
5968
5969 memset(counts, 0, sizeof(counts));
5970 max_type = PCI_INTR_TYPE_MSIX;
5971 /* QPs + other interrupt */
5972 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueue_pairs_max + 1;
5973 counts[PCI_INTR_TYPE_INTX] = 1;
5974
5975 if (ixl_param_nomsix)
5976 counts[PCI_INTR_TYPE_MSIX] = 0;
5977
5978 do {
5979 retry = false;
5980 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, max_type);
5981 if (error != 0) {
5982 aprint_error_dev(sc->sc_dev,
5983 "couldn't map interrupt\n");
5984 break;
5985 }
5986
5987 intr_type = pci_intr_type(pa->pa_pc, sc->sc_ihp[0]);
5988 sc->sc_nintrs = counts[intr_type];
5989 KASSERT(sc->sc_nintrs > 0);
5990
5991 for (i = 0; i < sc->sc_nintrs; i++) {
5992 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[i],
5993 PCI_INTR_MPSAFE, true);
5994 }
5995
5996 sc->sc_ihs = kmem_zalloc(sizeof(sc->sc_ihs[0]) * sc->sc_nintrs,
5997 KM_SLEEP);
5998
5999 if (intr_type == PCI_INTR_TYPE_MSIX) {
6000 error = ixl_establish_msix(sc);
6001 if (error) {
6002 counts[PCI_INTR_TYPE_MSIX] = 0;
6003 retry = true;
6004 }
6005 } else if (intr_type == PCI_INTR_TYPE_INTX) {
6006 error = ixl_establish_intx(sc);
6007 } else {
6008 error = -1;
6009 }
6010
6011 if (error) {
6012 kmem_free(sc->sc_ihs,
6013 sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
6014 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
6015 } else {
6016 sc->sc_intrtype = intr_type;
6017 }
6018 } while (retry);
6019
6020 return error;
6021 }
6022
6023 static void
6024 ixl_teardown_interrupts(struct ixl_softc *sc)
6025 {
6026 struct pci_attach_args *pa = &sc->sc_pa;
6027 unsigned int i;
6028
6029 for (i = 0; i < sc->sc_nintrs; i++) {
6030 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]);
6031 }
6032
6033 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
6034
6035 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
6036 sc->sc_ihs = NULL;
6037 sc->sc_nintrs = 0;
6038 }
6039
6040 static int
6041 ixl_setup_stats(struct ixl_softc *sc)
6042 {
6043 struct ixl_queue_pair *qp;
6044 struct ixl_tx_ring *txr;
6045 struct ixl_rx_ring *rxr;
6046 struct ixl_stats_counters *isc;
6047 unsigned int i;
6048
6049 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
6050 qp = &sc->sc_qps[i];
6051 txr = qp->qp_txr;
6052 rxr = qp->qp_rxr;
6053
6054 evcnt_attach_dynamic(&txr->txr_defragged, EVCNT_TYPE_MISC,
6055 NULL, qp->qp_name, "m_defrag successed");
6056 evcnt_attach_dynamic(&txr->txr_defrag_failed, EVCNT_TYPE_MISC,
6057 NULL, qp->qp_name, "m_defrag_failed");
6058 evcnt_attach_dynamic(&txr->txr_pcqdrop, EVCNT_TYPE_MISC,
6059 NULL, qp->qp_name, "Dropped in pcq");
6060 evcnt_attach_dynamic(&txr->txr_transmitdef, EVCNT_TYPE_MISC,
6061 NULL, qp->qp_name, "Deferred transmit");
6062 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR,
6063 NULL, qp->qp_name, "Interrupt on queue");
6064 evcnt_attach_dynamic(&txr->txr_defer, EVCNT_TYPE_MISC,
6065 NULL, qp->qp_name, "Handled queue in softint/workqueue");
6066
6067 evcnt_attach_dynamic(&rxr->rxr_mgethdr_failed, EVCNT_TYPE_MISC,
6068 NULL, qp->qp_name, "MGETHDR failed");
6069 evcnt_attach_dynamic(&rxr->rxr_mgetcl_failed, EVCNT_TYPE_MISC,
6070 NULL, qp->qp_name, "MCLGET failed");
6071 evcnt_attach_dynamic(&rxr->rxr_mbuf_load_failed,
6072 EVCNT_TYPE_MISC, NULL, qp->qp_name,
6073 "bus_dmamap_load_mbuf failed");
6074 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR,
6075 NULL, qp->qp_name, "Interrupt on queue");
6076 evcnt_attach_dynamic(&rxr->rxr_defer, EVCNT_TYPE_MISC,
6077 NULL, qp->qp_name, "Handled queue in softint/workqueue");
6078 }
6079
6080 evcnt_attach_dynamic(&sc->sc_event_atq, EVCNT_TYPE_INTR,
6081 NULL, device_xname(sc->sc_dev), "Interrupt for other events");
6082 evcnt_attach_dynamic(&sc->sc_event_link, EVCNT_TYPE_MISC,
6083 NULL, device_xname(sc->sc_dev), "Link status event");
6084 evcnt_attach_dynamic(&sc->sc_event_ecc_err, EVCNT_TYPE_MISC,
6085 NULL, device_xname(sc->sc_dev), "ECC error");
6086 evcnt_attach_dynamic(&sc->sc_event_pci_exception, EVCNT_TYPE_MISC,
6087 NULL, device_xname(sc->sc_dev), "PCI exception");
6088 evcnt_attach_dynamic(&sc->sc_event_crit_err, EVCNT_TYPE_MISC,
6089 NULL, device_xname(sc->sc_dev), "Critical error");
6090
6091 isc = &sc->sc_stats_counters;
6092 evcnt_attach_dynamic(&isc->isc_crc_errors, EVCNT_TYPE_MISC,
6093 NULL, device_xname(sc->sc_dev), "CRC errors");
6094 evcnt_attach_dynamic(&isc->isc_illegal_bytes, EVCNT_TYPE_MISC,
6095 NULL, device_xname(sc->sc_dev), "Illegal bytes");
6096 evcnt_attach_dynamic(&isc->isc_mac_local_faults, EVCNT_TYPE_MISC,
6097 NULL, device_xname(sc->sc_dev), "Mac local faults");
6098 evcnt_attach_dynamic(&isc->isc_mac_remote_faults, EVCNT_TYPE_MISC,
6099 NULL, device_xname(sc->sc_dev), "Mac remote faults");
6100 evcnt_attach_dynamic(&isc->isc_link_xon_rx, EVCNT_TYPE_MISC,
6101 NULL, device_xname(sc->sc_dev), "Rx xon");
6102 evcnt_attach_dynamic(&isc->isc_link_xon_tx, EVCNT_TYPE_MISC,
6103 NULL, device_xname(sc->sc_dev), "Tx xon");
6104 evcnt_attach_dynamic(&isc->isc_link_xoff_rx, EVCNT_TYPE_MISC,
6105 NULL, device_xname(sc->sc_dev), "Rx xoff");
6106 evcnt_attach_dynamic(&isc->isc_link_xoff_tx, EVCNT_TYPE_MISC,
6107 NULL, device_xname(sc->sc_dev), "Tx xoff");
6108 evcnt_attach_dynamic(&isc->isc_rx_fragments, EVCNT_TYPE_MISC,
6109 NULL, device_xname(sc->sc_dev), "Rx fragments");
6110 evcnt_attach_dynamic(&isc->isc_rx_jabber, EVCNT_TYPE_MISC,
6111 NULL, device_xname(sc->sc_dev), "Rx jabber");
6112
6113 evcnt_attach_dynamic(&isc->isc_rx_size_64, EVCNT_TYPE_MISC,
6114 NULL, device_xname(sc->sc_dev), "Rx size 64");
6115 evcnt_attach_dynamic(&isc->isc_rx_size_127, EVCNT_TYPE_MISC,
6116 NULL, device_xname(sc->sc_dev), "Rx size 127");
6117 evcnt_attach_dynamic(&isc->isc_rx_size_255, EVCNT_TYPE_MISC,
6118 NULL, device_xname(sc->sc_dev), "Rx size 255");
6119 evcnt_attach_dynamic(&isc->isc_rx_size_511, EVCNT_TYPE_MISC,
6120 NULL, device_xname(sc->sc_dev), "Rx size 511");
6121 evcnt_attach_dynamic(&isc->isc_rx_size_1023, EVCNT_TYPE_MISC,
6122 NULL, device_xname(sc->sc_dev), "Rx size 1023");
6123 evcnt_attach_dynamic(&isc->isc_rx_size_1522, EVCNT_TYPE_MISC,
6124 NULL, device_xname(sc->sc_dev), "Rx size 1522");
6125 evcnt_attach_dynamic(&isc->isc_rx_size_big, EVCNT_TYPE_MISC,
6126 NULL, device_xname(sc->sc_dev), "Rx jumbo packets");
6127 evcnt_attach_dynamic(&isc->isc_rx_undersize, EVCNT_TYPE_MISC,
6128 NULL, device_xname(sc->sc_dev), "Rx under size");
6129 evcnt_attach_dynamic(&isc->isc_rx_oversize, EVCNT_TYPE_MISC,
6130 NULL, device_xname(sc->sc_dev), "Rx over size");
6131
6132 evcnt_attach_dynamic(&isc->isc_rx_bytes, EVCNT_TYPE_MISC,
6133 NULL, device_xname(sc->sc_dev), "Rx bytes / port");
6134 evcnt_attach_dynamic(&isc->isc_rx_discards, EVCNT_TYPE_MISC,
6135 NULL, device_xname(sc->sc_dev), "Rx discards / port");
6136 evcnt_attach_dynamic(&isc->isc_rx_unicast, EVCNT_TYPE_MISC,
6137 NULL, device_xname(sc->sc_dev), "Rx unicast / port");
6138 evcnt_attach_dynamic(&isc->isc_rx_multicast, EVCNT_TYPE_MISC,
6139 NULL, device_xname(sc->sc_dev), "Rx multicast / port");
6140 evcnt_attach_dynamic(&isc->isc_rx_broadcast, EVCNT_TYPE_MISC,
6141 NULL, device_xname(sc->sc_dev), "Rx broadcast / port");
6142
6143 evcnt_attach_dynamic(&isc->isc_vsi_rx_bytes, EVCNT_TYPE_MISC,
6144 NULL, device_xname(sc->sc_dev), "Rx bytes / vsi");
6145 evcnt_attach_dynamic(&isc->isc_vsi_rx_discards, EVCNT_TYPE_MISC,
6146 NULL, device_xname(sc->sc_dev), "Rx discards / vsi");
6147 evcnt_attach_dynamic(&isc->isc_vsi_rx_unicast, EVCNT_TYPE_MISC,
6148 NULL, device_xname(sc->sc_dev), "Rx unicast / vsi");
6149 evcnt_attach_dynamic(&isc->isc_vsi_rx_multicast, EVCNT_TYPE_MISC,
6150 NULL, device_xname(sc->sc_dev), "Rx multicast / vsi");
6151 evcnt_attach_dynamic(&isc->isc_vsi_rx_broadcast, EVCNT_TYPE_MISC,
6152 NULL, device_xname(sc->sc_dev), "Rx broadcast / vsi");
6153
6154 evcnt_attach_dynamic(&isc->isc_tx_size_64, EVCNT_TYPE_MISC,
6155 NULL, device_xname(sc->sc_dev), "Tx size 64");
6156 evcnt_attach_dynamic(&isc->isc_tx_size_127, EVCNT_TYPE_MISC,
6157 NULL, device_xname(sc->sc_dev), "Tx size 127");
6158 evcnt_attach_dynamic(&isc->isc_tx_size_255, EVCNT_TYPE_MISC,
6159 NULL, device_xname(sc->sc_dev), "Tx size 255");
6160 evcnt_attach_dynamic(&isc->isc_tx_size_511, EVCNT_TYPE_MISC,
6161 NULL, device_xname(sc->sc_dev), "Tx size 511");
6162 evcnt_attach_dynamic(&isc->isc_tx_size_1023, EVCNT_TYPE_MISC,
6163 NULL, device_xname(sc->sc_dev), "Tx size 1023");
6164 evcnt_attach_dynamic(&isc->isc_tx_size_1522, EVCNT_TYPE_MISC,
6165 NULL, device_xname(sc->sc_dev), "Tx size 1522");
6166 evcnt_attach_dynamic(&isc->isc_tx_size_big, EVCNT_TYPE_MISC,
6167 NULL, device_xname(sc->sc_dev), "Tx jumbo packets");
6168
6169 evcnt_attach_dynamic(&isc->isc_tx_bytes, EVCNT_TYPE_MISC,
6170 NULL, device_xname(sc->sc_dev), "Tx bytes / port");
6171 evcnt_attach_dynamic(&isc->isc_tx_dropped_link_down, EVCNT_TYPE_MISC,
6172 NULL, device_xname(sc->sc_dev),
6173 "Tx dropped due to link down / port");
6174 evcnt_attach_dynamic(&isc->isc_tx_unicast, EVCNT_TYPE_MISC,
6175 NULL, device_xname(sc->sc_dev), "Tx unicast / port");
6176 evcnt_attach_dynamic(&isc->isc_tx_multicast, EVCNT_TYPE_MISC,
6177 NULL, device_xname(sc->sc_dev), "Tx multicast / port");
6178 evcnt_attach_dynamic(&isc->isc_tx_broadcast, EVCNT_TYPE_MISC,
6179 NULL, device_xname(sc->sc_dev), "Tx broadcast / port");
6180
6181 evcnt_attach_dynamic(&isc->isc_vsi_tx_bytes, EVCNT_TYPE_MISC,
6182 NULL, device_xname(sc->sc_dev), "Tx bytes / vsi");
6183 evcnt_attach_dynamic(&isc->isc_vsi_tx_errors, EVCNT_TYPE_MISC,
6184 NULL, device_xname(sc->sc_dev), "Tx errors / vsi");
6185 evcnt_attach_dynamic(&isc->isc_vsi_tx_unicast, EVCNT_TYPE_MISC,
6186 NULL, device_xname(sc->sc_dev), "Tx unicast / vsi");
6187 evcnt_attach_dynamic(&isc->isc_vsi_tx_multicast, EVCNT_TYPE_MISC,
6188 NULL, device_xname(sc->sc_dev), "Tx multicast / vsi");
6189 evcnt_attach_dynamic(&isc->isc_vsi_tx_broadcast, EVCNT_TYPE_MISC,
6190 NULL, device_xname(sc->sc_dev), "Tx broadcast / vsi");
6191
6192 sc->sc_stats_intval = ixl_param_stats_interval;
6193 callout_init(&sc->sc_stats_callout, CALLOUT_MPSAFE);
6194 callout_setfunc(&sc->sc_stats_callout, ixl_stats_callout, sc);
6195 ixl_work_set(&sc->sc_stats_task, ixl_stats_update, sc);
6196
6197 return 0;
6198 }
6199
6200 static void
6201 ixl_teardown_stats(struct ixl_softc *sc)
6202 {
6203 struct ixl_tx_ring *txr;
6204 struct ixl_rx_ring *rxr;
6205 struct ixl_stats_counters *isc;
6206 unsigned int i;
6207
6208 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
6209 txr = sc->sc_qps[i].qp_txr;
6210 rxr = sc->sc_qps[i].qp_rxr;
6211
6212 evcnt_detach(&txr->txr_defragged);
6213 evcnt_detach(&txr->txr_defrag_failed);
6214 evcnt_detach(&txr->txr_pcqdrop);
6215 evcnt_detach(&txr->txr_transmitdef);
6216 evcnt_detach(&txr->txr_intr);
6217 evcnt_detach(&txr->txr_defer);
6218
6219 evcnt_detach(&rxr->rxr_mgethdr_failed);
6220 evcnt_detach(&rxr->rxr_mgetcl_failed);
6221 evcnt_detach(&rxr->rxr_mbuf_load_failed);
6222 evcnt_detach(&rxr->rxr_intr);
6223 evcnt_detach(&rxr->rxr_defer);
6224 }
6225
6226 isc = &sc->sc_stats_counters;
6227 evcnt_detach(&isc->isc_crc_errors);
6228 evcnt_detach(&isc->isc_illegal_bytes);
6229 evcnt_detach(&isc->isc_mac_local_faults);
6230 evcnt_detach(&isc->isc_mac_remote_faults);
6231 evcnt_detach(&isc->isc_link_xon_rx);
6232 evcnt_detach(&isc->isc_link_xon_tx);
6233 evcnt_detach(&isc->isc_link_xoff_rx);
6234 evcnt_detach(&isc->isc_link_xoff_tx);
6235 evcnt_detach(&isc->isc_rx_fragments);
6236 evcnt_detach(&isc->isc_rx_jabber);
6237 evcnt_detach(&isc->isc_rx_bytes);
6238 evcnt_detach(&isc->isc_rx_discards);
6239 evcnt_detach(&isc->isc_rx_unicast);
6240 evcnt_detach(&isc->isc_rx_multicast);
6241 evcnt_detach(&isc->isc_rx_broadcast);
6242 evcnt_detach(&isc->isc_rx_size_64);
6243 evcnt_detach(&isc->isc_rx_size_127);
6244 evcnt_detach(&isc->isc_rx_size_255);
6245 evcnt_detach(&isc->isc_rx_size_511);
6246 evcnt_detach(&isc->isc_rx_size_1023);
6247 evcnt_detach(&isc->isc_rx_size_1522);
6248 evcnt_detach(&isc->isc_rx_size_big);
6249 evcnt_detach(&isc->isc_rx_undersize);
6250 evcnt_detach(&isc->isc_rx_oversize);
6251 evcnt_detach(&isc->isc_tx_bytes);
6252 evcnt_detach(&isc->isc_tx_dropped_link_down);
6253 evcnt_detach(&isc->isc_tx_unicast);
6254 evcnt_detach(&isc->isc_tx_multicast);
6255 evcnt_detach(&isc->isc_tx_broadcast);
6256 evcnt_detach(&isc->isc_tx_size_64);
6257 evcnt_detach(&isc->isc_tx_size_127);
6258 evcnt_detach(&isc->isc_tx_size_255);
6259 evcnt_detach(&isc->isc_tx_size_511);
6260 evcnt_detach(&isc->isc_tx_size_1023);
6261 evcnt_detach(&isc->isc_tx_size_1522);
6262 evcnt_detach(&isc->isc_tx_size_big);
6263 evcnt_detach(&isc->isc_vsi_rx_discards);
6264 evcnt_detach(&isc->isc_vsi_rx_bytes);
6265 evcnt_detach(&isc->isc_vsi_rx_unicast);
6266 evcnt_detach(&isc->isc_vsi_rx_multicast);
6267 evcnt_detach(&isc->isc_vsi_rx_broadcast);
6268 evcnt_detach(&isc->isc_vsi_tx_errors);
6269 evcnt_detach(&isc->isc_vsi_tx_bytes);
6270 evcnt_detach(&isc->isc_vsi_tx_unicast);
6271 evcnt_detach(&isc->isc_vsi_tx_multicast);
6272 evcnt_detach(&isc->isc_vsi_tx_broadcast);
6273
6274 evcnt_detach(&sc->sc_event_atq);
6275 evcnt_detach(&sc->sc_event_link);
6276 evcnt_detach(&sc->sc_event_ecc_err);
6277 evcnt_detach(&sc->sc_event_pci_exception);
6278 evcnt_detach(&sc->sc_event_crit_err);
6279
6280 callout_destroy(&sc->sc_stats_callout);
6281 }
6282
6283 static void
6284 ixl_stats_callout(void *xsc)
6285 {
6286 struct ixl_softc *sc = xsc;
6287
6288 ixl_work_add(sc->sc_workq, &sc->sc_stats_task);
6289 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
6290 }
6291
6292 static uint64_t
6293 ixl_stat_delta(struct ixl_softc *sc, uint32_t reg_hi, uint32_t reg_lo,
6294 uint64_t *offset, bool has_offset)
6295 {
6296 uint64_t value, delta;
6297 int bitwidth;
6298
6299 bitwidth = reg_hi == 0 ? 32 : 48;
6300
6301 value = ixl_rd(sc, reg_lo);
6302
6303 if (bitwidth > 32) {
6304 value |= ((uint64_t)ixl_rd(sc, reg_hi) << 32);
6305 }
6306
6307 if (__predict_true(has_offset)) {
6308 delta = value;
6309 if (value < *offset)
6310 delta += ((uint64_t)1 << bitwidth);
6311 delta -= *offset;
6312 } else {
6313 delta = 0;
6314 }
6315 atomic_swap_64(offset, value);
6316
6317 return delta;
6318 }
6319
6320 static void
6321 ixl_stats_update(void *xsc)
6322 {
6323 struct ixl_softc *sc = xsc;
6324 struct ixl_stats_counters *isc;
6325 uint64_t delta;
6326
6327 isc = &sc->sc_stats_counters;
6328
6329 /* errors */
6330 delta = ixl_stat_delta(sc,
6331 0, I40E_GLPRT_CRCERRS(sc->sc_port),
6332 &isc->isc_crc_errors_offset, isc->isc_has_offset);
6333 atomic_add_64(&isc->isc_crc_errors.ev_count, delta);
6334
6335 delta = ixl_stat_delta(sc,
6336 0, I40E_GLPRT_ILLERRC(sc->sc_port),
6337 &isc->isc_illegal_bytes_offset, isc->isc_has_offset);
6338 atomic_add_64(&isc->isc_illegal_bytes.ev_count, delta);
6339
6340 /* rx */
6341 delta = ixl_stat_delta(sc,
6342 I40E_GLPRT_GORCH(sc->sc_port), I40E_GLPRT_GORCL(sc->sc_port),
6343 &isc->isc_rx_bytes_offset, isc->isc_has_offset);
6344 atomic_add_64(&isc->isc_rx_bytes.ev_count, delta);
6345
6346 delta = ixl_stat_delta(sc,
6347 0, I40E_GLPRT_RDPC(sc->sc_port),
6348 &isc->isc_rx_discards_offset, isc->isc_has_offset);
6349 atomic_add_64(&isc->isc_rx_discards.ev_count, delta);
6350
6351 delta = ixl_stat_delta(sc,
6352 I40E_GLPRT_UPRCH(sc->sc_port), I40E_GLPRT_UPRCL(sc->sc_port),
6353 &isc->isc_rx_unicast_offset, isc->isc_has_offset);
6354 atomic_add_64(&isc->isc_rx_unicast.ev_count, delta);
6355
6356 delta = ixl_stat_delta(sc,
6357 I40E_GLPRT_MPRCH(sc->sc_port), I40E_GLPRT_MPRCL(sc->sc_port),
6358 &isc->isc_rx_multicast_offset, isc->isc_has_offset);
6359 atomic_add_64(&isc->isc_rx_multicast.ev_count, delta);
6360
6361 delta = ixl_stat_delta(sc,
6362 I40E_GLPRT_BPRCH(sc->sc_port), I40E_GLPRT_BPRCL(sc->sc_port),
6363 &isc->isc_rx_broadcast_offset, isc->isc_has_offset);
6364 atomic_add_64(&isc->isc_rx_broadcast.ev_count, delta);
6365
6366 /* Packet size stats rx */
6367 delta = ixl_stat_delta(sc,
6368 I40E_GLPRT_PRC64H(sc->sc_port), I40E_GLPRT_PRC64L(sc->sc_port),
6369 &isc->isc_rx_size_64_offset, isc->isc_has_offset);
6370 atomic_add_64(&isc->isc_rx_size_64.ev_count, delta);
6371
6372 delta = ixl_stat_delta(sc,
6373 I40E_GLPRT_PRC127H(sc->sc_port), I40E_GLPRT_PRC127L(sc->sc_port),
6374 &isc->isc_rx_size_127_offset, isc->isc_has_offset);
6375 atomic_add_64(&isc->isc_rx_size_127.ev_count, delta);
6376
6377 delta = ixl_stat_delta(sc,
6378 I40E_GLPRT_PRC255H(sc->sc_port), I40E_GLPRT_PRC255L(sc->sc_port),
6379 &isc->isc_rx_size_255_offset, isc->isc_has_offset);
6380 atomic_add_64(&isc->isc_rx_size_255.ev_count, delta);
6381
6382 delta = ixl_stat_delta(sc,
6383 I40E_GLPRT_PRC511H(sc->sc_port), I40E_GLPRT_PRC511L(sc->sc_port),
6384 &isc->isc_rx_size_511_offset, isc->isc_has_offset);
6385 atomic_add_64(&isc->isc_rx_size_511.ev_count, delta);
6386
6387 delta = ixl_stat_delta(sc,
6388 I40E_GLPRT_PRC1023H(sc->sc_port), I40E_GLPRT_PRC1023L(sc->sc_port),
6389 &isc->isc_rx_size_1023_offset, isc->isc_has_offset);
6390 atomic_add_64(&isc->isc_rx_size_1023.ev_count, delta);
6391
6392 delta = ixl_stat_delta(sc,
6393 I40E_GLPRT_PRC1522H(sc->sc_port), I40E_GLPRT_PRC1522L(sc->sc_port),
6394 &isc->isc_rx_size_1522_offset, isc->isc_has_offset);
6395 atomic_add_64(&isc->isc_rx_size_1522.ev_count, delta);
6396
6397 delta = ixl_stat_delta(sc,
6398 I40E_GLPRT_PRC9522H(sc->sc_port), I40E_GLPRT_PRC9522L(sc->sc_port),
6399 &isc->isc_rx_size_big_offset, isc->isc_has_offset);
6400 atomic_add_64(&isc->isc_rx_size_big.ev_count, delta);
6401
6402 delta = ixl_stat_delta(sc,
6403 0, I40E_GLPRT_RUC(sc->sc_port),
6404 &isc->isc_rx_undersize_offset, isc->isc_has_offset);
6405 atomic_add_64(&isc->isc_rx_undersize.ev_count, delta);
6406
6407 delta = ixl_stat_delta(sc,
6408 0, I40E_GLPRT_ROC(sc->sc_port),
6409 &isc->isc_rx_oversize_offset, isc->isc_has_offset);
6410 atomic_add_64(&isc->isc_rx_oversize.ev_count, delta);
6411
6412 /* tx */
6413 delta = ixl_stat_delta(sc,
6414 I40E_GLPRT_GOTCH(sc->sc_port), I40E_GLPRT_GOTCL(sc->sc_port),
6415 &isc->isc_tx_bytes_offset, isc->isc_has_offset);
6416 atomic_add_64(&isc->isc_tx_bytes.ev_count, delta);
6417
6418 delta = ixl_stat_delta(sc,
6419 0, I40E_GLPRT_TDOLD(sc->sc_port),
6420 &isc->isc_tx_dropped_link_down_offset, isc->isc_has_offset);
6421 atomic_add_64(&isc->isc_tx_dropped_link_down.ev_count, delta);
6422
6423 delta = ixl_stat_delta(sc,
6424 I40E_GLPRT_UPTCH(sc->sc_port), I40E_GLPRT_UPTCL(sc->sc_port),
6425 &isc->isc_tx_unicast_offset, isc->isc_has_offset);
6426 atomic_add_64(&isc->isc_tx_unicast.ev_count, delta);
6427
6428 delta = ixl_stat_delta(sc,
6429 I40E_GLPRT_MPTCH(sc->sc_port), I40E_GLPRT_MPTCL(sc->sc_port),
6430 &isc->isc_tx_multicast_offset, isc->isc_has_offset);
6431 atomic_add_64(&isc->isc_tx_multicast.ev_count, delta);
6432
6433 delta = ixl_stat_delta(sc,
6434 I40E_GLPRT_BPTCH(sc->sc_port), I40E_GLPRT_BPTCL(sc->sc_port),
6435 &isc->isc_tx_broadcast_offset, isc->isc_has_offset);
6436 atomic_add_64(&isc->isc_tx_broadcast.ev_count, delta);
6437
6438 /* Packet size stats tx */
6439 delta = ixl_stat_delta(sc,
6440 I40E_GLPRT_PTC64L(sc->sc_port), I40E_GLPRT_PTC64L(sc->sc_port),
6441 &isc->isc_tx_size_64_offset, isc->isc_has_offset);
6442 atomic_add_64(&isc->isc_tx_size_64.ev_count, delta);
6443
6444 delta = ixl_stat_delta(sc,
6445 I40E_GLPRT_PTC127H(sc->sc_port), I40E_GLPRT_PTC127L(sc->sc_port),
6446 &isc->isc_tx_size_127_offset, isc->isc_has_offset);
6447 atomic_add_64(&isc->isc_tx_size_127.ev_count, delta);
6448
6449 delta = ixl_stat_delta(sc,
6450 I40E_GLPRT_PTC255H(sc->sc_port), I40E_GLPRT_PTC255L(sc->sc_port),
6451 &isc->isc_tx_size_255_offset, isc->isc_has_offset);
6452 atomic_add_64(&isc->isc_tx_size_255.ev_count, delta);
6453
6454 delta = ixl_stat_delta(sc,
6455 I40E_GLPRT_PTC511H(sc->sc_port), I40E_GLPRT_PTC511L(sc->sc_port),
6456 &isc->isc_tx_size_511_offset, isc->isc_has_offset);
6457 atomic_add_64(&isc->isc_tx_size_511.ev_count, delta);
6458
6459 delta = ixl_stat_delta(sc,
6460 I40E_GLPRT_PTC1023H(sc->sc_port), I40E_GLPRT_PTC1023L(sc->sc_port),
6461 &isc->isc_tx_size_1023_offset, isc->isc_has_offset);
6462 atomic_add_64(&isc->isc_tx_size_1023.ev_count, delta);
6463
6464 delta = ixl_stat_delta(sc,
6465 I40E_GLPRT_PTC1522H(sc->sc_port), I40E_GLPRT_PTC1522L(sc->sc_port),
6466 &isc->isc_tx_size_1522_offset, isc->isc_has_offset);
6467 atomic_add_64(&isc->isc_tx_size_1522.ev_count, delta);
6468
6469 delta = ixl_stat_delta(sc,
6470 I40E_GLPRT_PTC9522H(sc->sc_port), I40E_GLPRT_PTC9522L(sc->sc_port),
6471 &isc->isc_tx_size_big_offset, isc->isc_has_offset);
6472 atomic_add_64(&isc->isc_tx_size_big.ev_count, delta);
6473
6474 /* mac faults */
6475 delta = ixl_stat_delta(sc,
6476 0, I40E_GLPRT_MLFC(sc->sc_port),
6477 &isc->isc_mac_local_faults_offset, isc->isc_has_offset);
6478 atomic_add_64(&isc->isc_mac_local_faults.ev_count, delta);
6479
6480 delta = ixl_stat_delta(sc,
6481 0, I40E_GLPRT_MRFC(sc->sc_port),
6482 &isc->isc_mac_remote_faults_offset, isc->isc_has_offset);
6483 atomic_add_64(&isc->isc_mac_remote_faults.ev_count, delta);
6484
6485 /* Flow control (LFC) stats */
6486 delta = ixl_stat_delta(sc,
6487 0, I40E_GLPRT_LXONRXC(sc->sc_port),
6488 &isc->isc_link_xon_rx_offset, isc->isc_has_offset);
6489 atomic_add_64(&isc->isc_link_xon_rx.ev_count, delta);
6490
6491 delta = ixl_stat_delta(sc,
6492 0, I40E_GLPRT_LXONTXC(sc->sc_port),
6493 &isc->isc_link_xon_tx_offset, isc->isc_has_offset);
6494 atomic_add_64(&isc->isc_link_xon_tx.ev_count, delta);
6495
6496 delta = ixl_stat_delta(sc,
6497 0, I40E_GLPRT_LXOFFRXC(sc->sc_port),
6498 &isc->isc_link_xoff_rx_offset, isc->isc_has_offset);
6499 atomic_add_64(&isc->isc_link_xoff_rx.ev_count, delta);
6500
6501 delta = ixl_stat_delta(sc,
6502 0, I40E_GLPRT_LXOFFTXC(sc->sc_port),
6503 &isc->isc_link_xoff_tx_offset, isc->isc_has_offset);
6504 atomic_add_64(&isc->isc_link_xoff_tx.ev_count, delta);
6505
6506 /* fragments */
6507 delta = ixl_stat_delta(sc,
6508 0, I40E_GLPRT_RFC(sc->sc_port),
6509 &isc->isc_rx_fragments_offset, isc->isc_has_offset);
6510 atomic_add_64(&isc->isc_rx_fragments.ev_count, delta);
6511
6512 delta = ixl_stat_delta(sc,
6513 0, I40E_GLPRT_RJC(sc->sc_port),
6514 &isc->isc_rx_jabber_offset, isc->isc_has_offset);
6515 atomic_add_64(&isc->isc_rx_jabber.ev_count, delta);
6516
6517 /* VSI rx counters */
6518 delta = ixl_stat_delta(sc,
6519 0, I40E_GLV_RDPC(sc->sc_vsi_stat_counter_idx),
6520 &isc->isc_vsi_rx_discards_offset, isc->isc_has_offset);
6521 atomic_add_64(&isc->isc_vsi_rx_discards.ev_count, delta);
6522
6523 delta = ixl_stat_delta(sc,
6524 I40E_GLV_GORCH(sc->sc_vsi_stat_counter_idx),
6525 I40E_GLV_GORCL(sc->sc_vsi_stat_counter_idx),
6526 &isc->isc_vsi_rx_bytes_offset, isc->isc_has_offset);
6527 atomic_add_64(&isc->isc_vsi_rx_bytes.ev_count, delta);
6528
6529 delta = ixl_stat_delta(sc,
6530 I40E_GLV_UPRCH(sc->sc_vsi_stat_counter_idx),
6531 I40E_GLV_UPRCL(sc->sc_vsi_stat_counter_idx),
6532 &isc->isc_vsi_rx_unicast_offset, isc->isc_has_offset);
6533 atomic_add_64(&isc->isc_vsi_rx_unicast.ev_count, delta);
6534
6535 delta = ixl_stat_delta(sc,
6536 I40E_GLV_MPRCH(sc->sc_vsi_stat_counter_idx),
6537 I40E_GLV_MPRCL(sc->sc_vsi_stat_counter_idx),
6538 &isc->isc_vsi_rx_multicast_offset, isc->isc_has_offset);
6539 atomic_add_64(&isc->isc_vsi_rx_multicast.ev_count, delta);
6540
6541 delta = ixl_stat_delta(sc,
6542 I40E_GLV_BPRCH(sc->sc_vsi_stat_counter_idx),
6543 I40E_GLV_BPRCL(sc->sc_vsi_stat_counter_idx),
6544 &isc->isc_vsi_rx_broadcast_offset, isc->isc_has_offset);
6545 atomic_add_64(&isc->isc_vsi_rx_broadcast.ev_count, delta);
6546
6547 /* VSI tx counters */
6548 delta = ixl_stat_delta(sc,
6549 0, I40E_GLV_TEPC(sc->sc_vsi_stat_counter_idx),
6550 &isc->isc_vsi_tx_errors_offset, isc->isc_has_offset);
6551 atomic_add_64(&isc->isc_vsi_tx_errors.ev_count, delta);
6552
6553 delta = ixl_stat_delta(sc,
6554 I40E_GLV_GOTCH(sc->sc_vsi_stat_counter_idx),
6555 I40E_GLV_GOTCL(sc->sc_vsi_stat_counter_idx),
6556 &isc->isc_vsi_tx_bytes_offset, isc->isc_has_offset);
6557 atomic_add_64(&isc->isc_vsi_tx_bytes.ev_count, delta);
6558
6559 delta = ixl_stat_delta(sc,
6560 I40E_GLV_UPTCH(sc->sc_vsi_stat_counter_idx),
6561 I40E_GLV_UPTCL(sc->sc_vsi_stat_counter_idx),
6562 &isc->isc_vsi_tx_unicast_offset, isc->isc_has_offset);
6563 atomic_add_64(&isc->isc_vsi_tx_unicast.ev_count, delta);
6564
6565 delta = ixl_stat_delta(sc,
6566 I40E_GLV_MPTCH(sc->sc_vsi_stat_counter_idx),
6567 I40E_GLV_MPTCL(sc->sc_vsi_stat_counter_idx),
6568 &isc->isc_vsi_tx_multicast_offset, isc->isc_has_offset);
6569 atomic_add_64(&isc->isc_vsi_tx_multicast.ev_count, delta);
6570
6571 delta = ixl_stat_delta(sc,
6572 I40E_GLV_BPTCH(sc->sc_vsi_stat_counter_idx),
6573 I40E_GLV_BPTCL(sc->sc_vsi_stat_counter_idx),
6574 &isc->isc_vsi_tx_broadcast_offset, isc->isc_has_offset);
6575 atomic_add_64(&isc->isc_vsi_tx_broadcast.ev_count, delta);
6576 }
6577
6578 static int
6579 ixl_setup_sysctls(struct ixl_softc *sc)
6580 {
6581 const char *devname;
6582 struct sysctllog **log;
6583 const struct sysctlnode *rnode, *rxnode, *txnode;
6584 int error;
6585
6586 log = &sc->sc_sysctllog;
6587 devname = device_xname(sc->sc_dev);
6588
6589 error = sysctl_createv(log, 0, NULL, &rnode,
6590 0, CTLTYPE_NODE, devname,
6591 SYSCTL_DESCR("ixl information and settings"),
6592 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
6593 if (error)
6594 goto out;
6595
6596 error = sysctl_createv(log, 0, &rnode, NULL,
6597 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
6598 SYSCTL_DESCR("Use workqueue for packet processing"),
6599 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
6600 if (error)
6601 goto out;
6602
6603 error = sysctl_createv(log, 0, &rnode, NULL,
6604 CTLFLAG_READONLY, CTLTYPE_INT, "stats_interval",
6605 SYSCTL_DESCR("Statistics collection interval in milliseconds"),
6606 NULL, 0, &sc->sc_stats_intval, 0, CTL_CREATE, CTL_EOL);
6607
6608 error = sysctl_createv(log, 0, &rnode, &rxnode,
6609 0, CTLTYPE_NODE, "rx",
6610 SYSCTL_DESCR("ixl information and settings for Rx"),
6611 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6612 if (error)
6613 goto out;
6614
6615 error = sysctl_createv(log, 0, &rxnode, NULL,
6616 CTLFLAG_READWRITE, CTLTYPE_INT, "itr",
6617 SYSCTL_DESCR("Interrupt Throttling"),
6618 ixl_sysctl_itr_handler, 0,
6619 (void *)sc, 0, CTL_CREATE, CTL_EOL);
6620 if (error)
6621 goto out;
6622
6623 error = sysctl_createv(log, 0, &rxnode, NULL,
6624 CTLFLAG_READONLY, CTLTYPE_INT, "descriptor_num",
6625 SYSCTL_DESCR("the number of rx descriptors"),
6626 NULL, 0, &sc->sc_rx_ring_ndescs, 0, CTL_CREATE, CTL_EOL);
6627 if (error)
6628 goto out;
6629
6630 error = sysctl_createv(log, 0, &rxnode, NULL,
6631 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6632 SYSCTL_DESCR("max number of Rx packets"
6633 " to process for interrupt processing"),
6634 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6635 if (error)
6636 goto out;
6637
6638 error = sysctl_createv(log, 0, &rxnode, NULL,
6639 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6640 SYSCTL_DESCR("max number of Rx packets"
6641 " to process for deferred processing"),
6642 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
6643 if (error)
6644 goto out;
6645
6646 error = sysctl_createv(log, 0, &rnode, &txnode,
6647 0, CTLTYPE_NODE, "tx",
6648 SYSCTL_DESCR("ixl information and settings for Tx"),
6649 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6650 if (error)
6651 goto out;
6652
6653 error = sysctl_createv(log, 0, &txnode, NULL,
6654 CTLFLAG_READWRITE, CTLTYPE_INT, "itr",
6655 SYSCTL_DESCR("Interrupt Throttling"),
6656 ixl_sysctl_itr_handler, 0,
6657 (void *)sc, 0, CTL_CREATE, CTL_EOL);
6658 if (error)
6659 goto out;
6660
6661 error = sysctl_createv(log, 0, &txnode, NULL,
6662 CTLFLAG_READONLY, CTLTYPE_INT, "descriptor_num",
6663 SYSCTL_DESCR("the number of tx descriptors"),
6664 NULL, 0, &sc->sc_tx_ring_ndescs, 0, CTL_CREATE, CTL_EOL);
6665 if (error)
6666 goto out;
6667
6668 error = sysctl_createv(log, 0, &txnode, NULL,
6669 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6670 SYSCTL_DESCR("max number of Tx packets"
6671 " to process for interrupt processing"),
6672 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6673 if (error)
6674 goto out;
6675
6676 error = sysctl_createv(log, 0, &txnode, NULL,
6677 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6678 SYSCTL_DESCR("max number of Tx packets"
6679 " to process for deferred processing"),
6680 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
6681 if (error)
6682 goto out;
6683
6684 out:
6685 if (error) {
6686 aprint_error_dev(sc->sc_dev,
6687 "unable to create sysctl node\n");
6688 sysctl_teardown(log);
6689 }
6690
6691 return error;
6692 }
6693
6694 static void
6695 ixl_teardown_sysctls(struct ixl_softc *sc)
6696 {
6697
6698 sysctl_teardown(&sc->sc_sysctllog);
6699 }
6700
6701 static bool
6702 ixl_sysctlnode_is_rx(struct sysctlnode *node)
6703 {
6704
6705 if (strstr(node->sysctl_parent->sysctl_name, "rx") != NULL)
6706 return true;
6707
6708 return false;
6709 }
6710
6711 static int
6712 ixl_sysctl_itr_handler(SYSCTLFN_ARGS)
6713 {
6714 struct sysctlnode node = *rnode;
6715 struct ixl_softc *sc = (struct ixl_softc *)node.sysctl_data;
6716 struct ifnet *ifp = &sc->sc_ec.ec_if;
6717 uint32_t newitr, *itrptr;
6718 int error;
6719
6720 if (ixl_sysctlnode_is_rx(&node)) {
6721 itrptr = &sc->sc_itr_rx;
6722 } else {
6723 itrptr = &sc->sc_itr_tx;
6724 }
6725
6726 newitr = *itrptr;
6727 node.sysctl_data = &newitr;
6728 node.sysctl_size = sizeof(newitr);
6729
6730 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6731
6732 if (error || newp == NULL)
6733 return error;
6734
6735 /* ITRs are applied in ixl_init() for simple implementation */
6736 if (ISSET(ifp->if_flags, IFF_RUNNING))
6737 return EBUSY;
6738
6739 if (newitr > 0x07ff)
6740 return EINVAL;
6741
6742 *itrptr = newitr;
6743
6744 return 0;
6745 }
6746
6747 static struct workqueue *
6748 ixl_workq_create(const char *name, pri_t prio, int ipl, int flags)
6749 {
6750 struct workqueue *wq;
6751 int error;
6752
6753 error = workqueue_create(&wq, name, ixl_workq_work, NULL,
6754 prio, ipl, flags);
6755
6756 if (error)
6757 return NULL;
6758
6759 return wq;
6760 }
6761
6762 static void
6763 ixl_workq_destroy(struct workqueue *wq)
6764 {
6765
6766 workqueue_destroy(wq);
6767 }
6768
6769 static void
6770 ixl_work_set(struct ixl_work *work, void (*func)(void *), void *arg)
6771 {
6772
6773 memset(work, 0, sizeof(*work));
6774 work->ixw_func = func;
6775 work->ixw_arg = arg;
6776 }
6777
6778 static void
6779 ixl_work_add(struct workqueue *wq, struct ixl_work *work)
6780 {
6781 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0)
6782 return;
6783
6784 kpreempt_disable();
6785 workqueue_enqueue(wq, &work->ixw_cookie, NULL);
6786 kpreempt_enable();
6787 }
6788
6789 static void
6790 ixl_work_wait(struct workqueue *wq, struct ixl_work *work)
6791 {
6792
6793 workqueue_wait(wq, &work->ixw_cookie);
6794 }
6795
6796 static void
6797 ixl_workq_work(struct work *wk, void *context)
6798 {
6799 struct ixl_work *work;
6800
6801 work = container_of(wk, struct ixl_work, ixw_cookie);
6802
6803 atomic_swap_uint(&work->ixw_added, 0);
6804 work->ixw_func(work->ixw_arg);
6805 }
6806
6807 static int
6808 ixl_rx_ctl_read(struct ixl_softc *sc, uint32_t reg, uint32_t *rv)
6809 {
6810 struct ixl_aq_desc iaq;
6811
6812 memset(&iaq, 0, sizeof(iaq));
6813 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_READ);
6814 iaq.iaq_param[1] = htole32(reg);
6815
6816 if (ixl_atq_poll(sc, &iaq, 250) != 0)
6817 return ETIMEDOUT;
6818
6819 switch (htole16(iaq.iaq_retval)) {
6820 case IXL_AQ_RC_OK:
6821 /* success */
6822 break;
6823 case IXL_AQ_RC_EACCES:
6824 return EPERM;
6825 case IXL_AQ_RC_EAGAIN:
6826 return EAGAIN;
6827 default:
6828 return EIO;
6829 }
6830
6831 *rv = htole32(iaq.iaq_param[3]);
6832 return 0;
6833 }
6834
6835 static uint32_t
6836 ixl_rd_rx_csr(struct ixl_softc *sc, uint32_t reg)
6837 {
6838 uint32_t val;
6839 int rv, retry, retry_limit;
6840
6841 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) {
6842 retry_limit = 5;
6843 } else {
6844 retry_limit = 0;
6845 }
6846
6847 for (retry = 0; retry < retry_limit; retry++) {
6848 rv = ixl_rx_ctl_read(sc, reg, &val);
6849 if (rv == 0)
6850 return val;
6851 else if (rv == EAGAIN)
6852 delaymsec(1);
6853 else
6854 break;
6855 }
6856
6857 val = ixl_rd(sc, reg);
6858
6859 return val;
6860 }
6861
6862 static int
6863 ixl_rx_ctl_write(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6864 {
6865 struct ixl_aq_desc iaq;
6866
6867 memset(&iaq, 0, sizeof(iaq));
6868 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_WRITE);
6869 iaq.iaq_param[1] = htole32(reg);
6870 iaq.iaq_param[3] = htole32(value);
6871
6872 if (ixl_atq_poll(sc, &iaq, 250) != 0)
6873 return ETIMEDOUT;
6874
6875 switch (htole16(iaq.iaq_retval)) {
6876 case IXL_AQ_RC_OK:
6877 /* success */
6878 break;
6879 case IXL_AQ_RC_EACCES:
6880 return EPERM;
6881 case IXL_AQ_RC_EAGAIN:
6882 return EAGAIN;
6883 default:
6884 return EIO;
6885 }
6886
6887 return 0;
6888 }
6889
6890 static void
6891 ixl_wr_rx_csr(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6892 {
6893 int rv, retry, retry_limit;
6894
6895 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) {
6896 retry_limit = 5;
6897 } else {
6898 retry_limit = 0;
6899 }
6900
6901 for (retry = 0; retry < retry_limit; retry++) {
6902 rv = ixl_rx_ctl_write(sc, reg, value);
6903 if (rv == 0)
6904 return;
6905 else if (rv == EAGAIN)
6906 delaymsec(1);
6907 else
6908 break;
6909 }
6910
6911 ixl_wr(sc, reg, value);
6912 }
6913
6914 static int
6915 ixl_nvm_lock(struct ixl_softc *sc, char rw)
6916 {
6917 struct ixl_aq_desc iaq;
6918 struct ixl_aq_req_resource_param *param;
6919 int rv;
6920
6921 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK))
6922 return 0;
6923
6924 memset(&iaq, 0, sizeof(iaq));
6925 iaq.iaq_opcode = htole16(IXL_AQ_OP_REQUEST_RESOURCE);
6926
6927 param = (struct ixl_aq_req_resource_param *)&iaq.iaq_param;
6928 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM);
6929 if (rw == 'R') {
6930 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_READ);
6931 } else {
6932 param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_WRITE);
6933 }
6934
6935 rv = ixl_atq_poll(sc, &iaq, 250);
6936
6937 if (rv != 0)
6938 return ETIMEDOUT;
6939
6940 switch (le16toh(iaq.iaq_retval)) {
6941 case IXL_AQ_RC_OK:
6942 break;
6943 case IXL_AQ_RC_EACCES:
6944 return EACCES;
6945 case IXL_AQ_RC_EBUSY:
6946 return EBUSY;
6947 case IXL_AQ_RC_EPERM:
6948 return EPERM;
6949 }
6950
6951 return 0;
6952 }
6953
6954 static int
6955 ixl_nvm_unlock(struct ixl_softc *sc)
6956 {
6957 struct ixl_aq_desc iaq;
6958 struct ixl_aq_rel_resource_param *param;
6959 int rv;
6960
6961 if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK))
6962 return 0;
6963
6964 memset(&iaq, 0, sizeof(iaq));
6965 iaq.iaq_opcode = htole16(IXL_AQ_OP_RELEASE_RESOURCE);
6966
6967 param = (struct ixl_aq_rel_resource_param *)&iaq.iaq_param;
6968 param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM);
6969
6970 rv = ixl_atq_poll(sc, &iaq, 250);
6971
6972 if (rv != 0)
6973 return ETIMEDOUT;
6974
6975 switch (le16toh(iaq.iaq_retval)) {
6976 case IXL_AQ_RC_OK:
6977 break;
6978 default:
6979 return EIO;
6980 }
6981 return 0;
6982 }
6983
6984 static int
6985 ixl_srdone_poll(struct ixl_softc *sc)
6986 {
6987 int wait_count;
6988 uint32_t reg;
6989
6990 for (wait_count = 0; wait_count < IXL_SRRD_SRCTL_ATTEMPTS;
6991 wait_count++) {
6992 reg = ixl_rd(sc, I40E_GLNVM_SRCTL);
6993 if (ISSET(reg, I40E_GLNVM_SRCTL_DONE_MASK))
6994 break;
6995
6996 delaymsec(5);
6997 }
6998
6999 if (wait_count == IXL_SRRD_SRCTL_ATTEMPTS)
7000 return -1;
7001
7002 return 0;
7003 }
7004
7005 static int
7006 ixl_nvm_read_srctl(struct ixl_softc *sc, uint16_t offset, uint16_t *data)
7007 {
7008 uint32_t reg;
7009
7010 if (ixl_srdone_poll(sc) != 0)
7011 return ETIMEDOUT;
7012
7013 reg = ((uint32_t)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
7014 __BIT(I40E_GLNVM_SRCTL_START_SHIFT);
7015 ixl_wr(sc, I40E_GLNVM_SRCTL, reg);
7016
7017 if (ixl_srdone_poll(sc) != 0) {
7018 aprint_debug("NVM read error: couldn't access "
7019 "Shadow RAM address: 0x%x\n", offset);
7020 return ETIMEDOUT;
7021 }
7022
7023 reg = ixl_rd(sc, I40E_GLNVM_SRDATA);
7024 *data = (uint16_t)__SHIFTOUT(reg, I40E_GLNVM_SRDATA_RDDATA_MASK);
7025
7026 return 0;
7027 }
7028
7029 static int
7030 ixl_nvm_read_aq(struct ixl_softc *sc, uint16_t offset_word,
7031 void *data, size_t len)
7032 {
7033 struct ixl_dmamem *idm;
7034 struct ixl_aq_desc iaq;
7035 struct ixl_aq_nvm_param *param;
7036 uint32_t offset_bytes;
7037 int rv;
7038
7039 idm = &sc->sc_aqbuf;
7040 if (len > IXL_DMA_LEN(idm))
7041 return ENOMEM;
7042
7043 memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
7044 memset(&iaq, 0, sizeof(iaq));
7045 iaq.iaq_opcode = htole16(IXL_AQ_OP_NVM_READ);
7046 iaq.iaq_flags = htole16(IXL_AQ_BUF |
7047 ((len > I40E_AQ_LARGE_BUF) ? IXL_AQ_LB : 0));
7048 iaq.iaq_datalen = htole16(len);
7049 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
7050
7051 param = (struct ixl_aq_nvm_param *)iaq.iaq_param;
7052 param->command_flags = IXL_AQ_NVM_LAST_CMD;
7053 param->module_pointer = 0;
7054 param->length = htole16(len);
7055 offset_bytes = (uint32_t)offset_word * 2;
7056 offset_bytes &= 0x00FFFFFF;
7057 param->offset = htole32(offset_bytes);
7058
7059 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
7060 BUS_DMASYNC_PREREAD);
7061
7062 rv = ixl_atq_poll(sc, &iaq, 250);
7063
7064 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
7065 BUS_DMASYNC_POSTREAD);
7066
7067 if (rv != 0) {
7068 return ETIMEDOUT;
7069 }
7070
7071 switch (le16toh(iaq.iaq_retval)) {
7072 case IXL_AQ_RC_OK:
7073 break;
7074 case IXL_AQ_RC_EPERM:
7075 return EPERM;
7076 case IXL_AQ_RC_EINVAL:
7077 return EINVAL;
7078 case IXL_AQ_RC_EBUSY:
7079 return EBUSY;
7080 case IXL_AQ_RC_EIO:
7081 default:
7082 return EIO;
7083 }
7084
7085 memcpy(data, IXL_DMA_KVA(idm), len);
7086
7087 return 0;
7088 }
7089
7090 static int
7091 ixl_rd16_nvm(struct ixl_softc *sc, uint16_t offset, uint16_t *data)
7092 {
7093 int error;
7094 uint16_t buf;
7095
7096 error = ixl_nvm_lock(sc, 'R');
7097 if (error)
7098 return error;
7099
7100 if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMREAD)) {
7101 error = ixl_nvm_read_aq(sc, offset,
7102 &buf, sizeof(buf));
7103 if (error == 0)
7104 *data = le16toh(buf);
7105 } else {
7106 error = ixl_nvm_read_srctl(sc, offset, &buf);
7107 if (error == 0)
7108 *data = buf;
7109 }
7110
7111 ixl_nvm_unlock(sc);
7112
7113 return error;
7114 }
7115
7116 MODULE(MODULE_CLASS_DRIVER, if_ixl, "pci");
7117
7118 #ifdef _MODULE
7119 #include "ioconf.c"
7120 #endif
7121
7122 #ifdef _MODULE
7123 static void
7124 ixl_parse_modprop(prop_dictionary_t dict)
7125 {
7126 prop_object_t obj;
7127 int64_t val;
7128 uint64_t uval;
7129
7130 if (dict == NULL)
7131 return;
7132
7133 obj = prop_dictionary_get(dict, "nomsix");
7134 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_BOOL) {
7135 ixl_param_nomsix = prop_bool_true((prop_bool_t)obj);
7136 }
7137
7138 obj = prop_dictionary_get(dict, "stats_interval");
7139 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7140 val = prop_number_signed_value((prop_number_t)obj);
7141
7142 /* the range has no reason */
7143 if (100 < val && val < 180000) {
7144 ixl_param_stats_interval = val;
7145 }
7146 }
7147
7148 obj = prop_dictionary_get(dict, "nqps_limit");
7149 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7150 val = prop_number_signed_value((prop_number_t)obj);
7151
7152 if (val <= INT32_MAX)
7153 ixl_param_nqps_limit = val;
7154 }
7155
7156 obj = prop_dictionary_get(dict, "rx_ndescs");
7157 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7158 uval = prop_number_unsigned_integer_value((prop_number_t)obj);
7159
7160 if (uval > 8)
7161 ixl_param_rx_ndescs = uval;
7162 }
7163
7164 obj = prop_dictionary_get(dict, "tx_ndescs");
7165 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7166 uval = prop_number_unsigned_integer_value((prop_number_t)obj);
7167
7168 if (uval > IXL_TX_PKT_DESCS)
7169 ixl_param_tx_ndescs = uval;
7170 }
7171
7172 }
7173 #endif
7174
7175 static int
7176 if_ixl_modcmd(modcmd_t cmd, void *opaque)
7177 {
7178 int error = 0;
7179
7180 #ifdef _MODULE
7181 switch (cmd) {
7182 case MODULE_CMD_INIT:
7183 ixl_parse_modprop((prop_dictionary_t)opaque);
7184 error = config_init_component(cfdriver_ioconf_if_ixl,
7185 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
7186 break;
7187 case MODULE_CMD_FINI:
7188 error = config_fini_component(cfdriver_ioconf_if_ixl,
7189 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
7190 break;
7191 default:
7192 error = ENOTTY;
7193 break;
7194 }
7195 #endif
7196
7197 return error;
7198 }
7199