if_ixl.c revision 1.11 1 /* $NetBSD: if_ixl.c,v 1.11 2019/12/20 02:12:31 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2016,2017 David Gwynne <dlg (at) openbsd.org>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50 /*
51 * Copyright (c) 2019 Internet Initiative Japan, Inc.
52 * All rights reserved.
53 *
54 * Redistribution and use in source and binary forms, with or without
55 * modification, are permitted provided that the following conditions
56 * are met:
57 * 1. Redistributions of source code must retain the above copyright
58 * notice, this list of conditions and the following disclaimer.
59 * 2. Redistributions in binary form must reproduce the above copyright
60 * notice, this list of conditions and the following disclaimer in the
61 * documentation and/or other materials provided with the distribution.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
73 * POSSIBILITY OF SUCH DAMAGE.
74 */
75
76 #include <sys/cdefs.h>
77
78 #ifdef _KERNEL_OPT
79 #include "opt_net_mpsafe.h"
80 #endif
81
82 #include <sys/param.h>
83 #include <sys/types.h>
84
85 #include <sys/cpu.h>
86 #include <sys/device.h>
87 #include <sys/evcnt.h>
88 #include <sys/interrupt.h>
89 #include <sys/kmem.h>
90 #include <sys/malloc.h>
91 #include <sys/module.h>
92 #include <sys/mutex.h>
93 #include <sys/pcq.h>
94 #include <sys/syslog.h>
95 #include <sys/workqueue.h>
96
97 #include <sys/bus.h>
98
99 #include <net/bpf.h>
100 #include <net/if.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103 #include <net/if_ether.h>
104 #include <net/rss_config.h>
105
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108
109 #include <dev/pci/if_ixlreg.h>
110 #include <dev/pci/if_ixlvar.h>
111
112 struct ixl_softc; /* defined */
113
114 #define I40E_PF_RESET_WAIT_COUNT 200
115 #define I40E_AQ_LARGE_BUF 512
116
117 /* bitfields for Tx queue mapping in QTX_CTL */
118 #define I40E_QTX_CTL_VF_QUEUE 0x0
119 #define I40E_QTX_CTL_VM_QUEUE 0x1
120 #define I40E_QTX_CTL_PF_QUEUE 0x2
121
122 #define I40E_QUEUE_TYPE_EOL 0x7ff
123 #define I40E_INTR_NOTX_QUEUE 0
124
125 #define I40E_QUEUE_TYPE_RX 0x0
126 #define I40E_QUEUE_TYPE_TX 0x1
127 #define I40E_QUEUE_TYPE_PE_CEQ 0x2
128 #define I40E_QUEUE_TYPE_UNKNOWN 0x3
129
130 #define I40E_ITR_INDEX_RX 0x0
131 #define I40E_ITR_INDEX_TX 0x1
132 #define I40E_ITR_INDEX_OTHER 0x2
133 #define I40E_ITR_INDEX_NONE 0x3
134
135 #define I40E_INTR_NOTX_QUEUE 0
136 #define I40E_INTR_NOTX_INTR 0
137 #define I40E_INTR_NOTX_RX_QUEUE 0
138 #define I40E_INTR_NOTX_TX_QUEUE 1
139 #define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK
140 #define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK
141
142 #define BIT_ULL(a) (1ULL << (a))
143 #define IXL_RSS_HENA_DEFAULT_BASE \
144 (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
145 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
146 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
147 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
148 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
149 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
150 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
151 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
152 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
153 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
154 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
155 #define IXL_RSS_HENA_DEFAULT_XL710 IXL_RSS_HENA_DEFAULT_BASE
156 #define IXL_RSS_HENA_DEFAULT_X722 (IXL_RSS_HENA_DEFAULT_XL710 | \
157 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
158 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
159 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
160 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
161 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
162 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
163 #define I40E_HASH_LUT_SIZE_128 0
164 #define IXL_RSS_KEY_SIZE_REG 13
165
166 #define IXL_ICR0_CRIT_ERR_MASK \
167 (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \
168 I40E_PFINT_ICR0_ECC_ERR_MASK | \
169 I40E_PFINT_ICR0_PE_CRITERR_MASK)
170
171 #define IXL_TX_PKT_DESCS 8
172 #define IXL_TX_QUEUE_ALIGN 128
173 #define IXL_RX_QUEUE_ALIGN 128
174
175 #define IXL_HARDMTU 9712 /* 9726 - ETHER_HDR_LEN */
176
177 #define IXL_PCIREG PCI_MAPREG_START
178
179 #define IXL_ITR0 0x0
180 #define IXL_ITR1 0x1
181 #define IXL_ITR2 0x2
182 #define IXL_NOITR 0x3
183
184 #define IXL_AQ_NUM 256
185 #define IXL_AQ_MASK (IXL_AQ_NUM - 1)
186 #define IXL_AQ_ALIGN 64 /* lol */
187 #define IXL_AQ_BUFLEN 4096
188
189 #define IXL_HMC_ROUNDUP 512
190 #define IXL_HMC_PGSIZE 4096
191 #define IXL_HMC_DVASZ sizeof(uint64_t)
192 #define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
193 #define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS)
194 #define IXL_HMC_PDVALID 1ULL
195
196 #define IXL_ATQ_EXEC_TIMEOUT (10 * hz)
197
198 struct ixl_aq_regs {
199 bus_size_t atq_tail;
200 bus_size_t atq_head;
201 bus_size_t atq_len;
202 bus_size_t atq_bal;
203 bus_size_t atq_bah;
204
205 bus_size_t arq_tail;
206 bus_size_t arq_head;
207 bus_size_t arq_len;
208 bus_size_t arq_bal;
209 bus_size_t arq_bah;
210
211 uint32_t atq_len_enable;
212 uint32_t atq_tail_mask;
213 uint32_t atq_head_mask;
214
215 uint32_t arq_len_enable;
216 uint32_t arq_tail_mask;
217 uint32_t arq_head_mask;
218 };
219
220 struct ixl_phy_type {
221 uint64_t phy_type;
222 uint64_t ifm_type;
223 };
224
225 struct ixl_speed_type {
226 uint8_t dev_speed;
227 uint64_t net_speed;
228 };
229
230 struct ixl_aq_buf {
231 SIMPLEQ_ENTRY(ixl_aq_buf)
232 aqb_entry;
233 void *aqb_data;
234 bus_dmamap_t aqb_map;
235 bus_dma_segment_t aqb_seg;
236 size_t aqb_size;
237 int aqb_nsegs;
238 };
239 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf);
240
241 struct ixl_dmamem {
242 bus_dmamap_t ixm_map;
243 bus_dma_segment_t ixm_seg;
244 int ixm_nsegs;
245 size_t ixm_size;
246 void *ixm_kva;
247 };
248
249 #define IXL_DMA_MAP(_ixm) ((_ixm)->ixm_map)
250 #define IXL_DMA_DVA(_ixm) ((_ixm)->ixm_map->dm_segs[0].ds_addr)
251 #define IXL_DMA_KVA(_ixm) ((void *)(_ixm)->ixm_kva)
252 #define IXL_DMA_LEN(_ixm) ((_ixm)->ixm_size)
253
254 struct ixl_hmc_entry {
255 uint64_t hmc_base;
256 uint32_t hmc_count;
257 uint64_t hmc_size;
258 };
259
260 enum ixl_hmc_types {
261 IXL_HMC_LAN_TX = 0,
262 IXL_HMC_LAN_RX,
263 IXL_HMC_FCOE_CTX,
264 IXL_HMC_FCOE_FILTER,
265 IXL_HMC_COUNT
266 };
267
268 struct ixl_hmc_pack {
269 uint16_t offset;
270 uint16_t width;
271 uint16_t lsb;
272 };
273
274 /*
275 * these hmc objects have weird sizes and alignments, so these are abstract
276 * representations of them that are nice for c to populate.
277 *
278 * the packing code relies on little-endian values being stored in the fields,
279 * no high bits in the fields being set, and the fields must be packed in the
280 * same order as they are in the ctx structure.
281 */
282
283 struct ixl_hmc_rxq {
284 uint16_t head;
285 uint8_t cpuid;
286 uint64_t base;
287 #define IXL_HMC_RXQ_BASE_UNIT 128
288 uint16_t qlen;
289 uint16_t dbuff;
290 #define IXL_HMC_RXQ_DBUFF_UNIT 128
291 uint8_t hbuff;
292 #define IXL_HMC_RXQ_HBUFF_UNIT 64
293 uint8_t dtype;
294 #define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0
295 #define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1
296 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2
297 uint8_t dsize;
298 #define IXL_HMC_RXQ_DSIZE_16 0
299 #define IXL_HMC_RXQ_DSIZE_32 1
300 uint8_t crcstrip;
301 uint8_t fc_ena;
302 uint8_t l2sel;
303 uint8_t hsplit_0;
304 uint8_t hsplit_1;
305 uint8_t showiv;
306 uint16_t rxmax;
307 uint8_t tphrdesc_ena;
308 uint8_t tphwdesc_ena;
309 uint8_t tphdata_ena;
310 uint8_t tphhead_ena;
311 uint8_t lrxqthresh;
312 uint8_t prefena;
313 };
314
315 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
316 { offsetof(struct ixl_hmc_rxq, head), 13, 0 },
317 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 },
318 { offsetof(struct ixl_hmc_rxq, base), 57, 32 },
319 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 },
320 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 },
321 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 },
322 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 },
323 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 },
324 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 },
325 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 },
326 { offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 },
327 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 },
328 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 },
329 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 },
330 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 },
331 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 },
332 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 },
333 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 },
334 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 },
335 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 },
336 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 },
337 };
338
339 #define IXL_HMC_RXQ_MINSIZE (201 + 1)
340
341 struct ixl_hmc_txq {
342 uint16_t head;
343 uint8_t new_context;
344 uint64_t base;
345 #define IXL_HMC_TXQ_BASE_UNIT 128
346 uint8_t fc_ena;
347 uint8_t timesync_ena;
348 uint8_t fd_ena;
349 uint8_t alt_vlan_ena;
350 uint16_t thead_wb;
351 uint8_t cpuid;
352 uint8_t head_wb_ena;
353 #define IXL_HMC_TXQ_DESC_WB 0
354 #define IXL_HMC_TXQ_HEAD_WB 1
355 uint16_t qlen;
356 uint8_t tphrdesc_ena;
357 uint8_t tphrpacket_ena;
358 uint8_t tphwdesc_ena;
359 uint64_t head_wb_addr;
360 uint32_t crc;
361 uint16_t rdylist;
362 uint8_t rdylist_act;
363 };
364
365 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
366 { offsetof(struct ixl_hmc_txq, head), 13, 0 },
367 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 },
368 { offsetof(struct ixl_hmc_txq, base), 57, 32 },
369 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 },
370 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 },
371 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 },
372 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 },
373 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 },
374 /* line 1 */
375 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 },
376 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 },
377 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 },
378 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 },
379 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 },
380 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 },
381 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 },
382 /* line 7 */
383 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) },
384 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) },
385 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) },
386 };
387
388 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
389
390 struct ixl_work {
391 struct work ixw_cookie;
392 void (*ixw_func)(void *);
393 void *ixw_arg;
394 unsigned int ixw_added;
395 };
396 #define IXL_WORKQUEUE_PRI PRI_SOFTNET
397
398 struct ixl_tx_map {
399 struct mbuf *txm_m;
400 bus_dmamap_t txm_map;
401 unsigned int txm_eop;
402 };
403
404 struct ixl_tx_ring {
405 kmutex_t txr_lock;
406 struct ixl_softc *txr_sc;
407
408 unsigned int txr_prod;
409 unsigned int txr_cons;
410
411 struct ixl_tx_map *txr_maps;
412 struct ixl_dmamem txr_mem;
413
414 bus_size_t txr_tail;
415 unsigned int txr_qid;
416 pcq_t *txr_intrq;
417 void *txr_si;
418
419 uint64_t txr_oerrors; /* if_oerrors */
420 uint64_t txr_opackets; /* if_opackets */
421 uint64_t txr_obytes; /* if_obytes */
422 uint64_t txr_omcasts; /* if_omcasts */
423
424 struct evcnt txr_defragged;
425 struct evcnt txr_defrag_failed;
426 struct evcnt txr_pcqdrop;
427 struct evcnt txr_transmitdef;
428 struct evcnt txr_intr;
429 struct evcnt txr_defer;
430 };
431
432 struct ixl_rx_map {
433 struct mbuf *rxm_m;
434 bus_dmamap_t rxm_map;
435 };
436
437 struct ixl_rx_ring {
438 kmutex_t rxr_lock;
439
440 unsigned int rxr_prod;
441 unsigned int rxr_cons;
442
443 struct ixl_rx_map *rxr_maps;
444 struct ixl_dmamem rxr_mem;
445
446 struct mbuf *rxr_m_head;
447 struct mbuf **rxr_m_tail;
448
449 bus_size_t rxr_tail;
450 unsigned int rxr_qid;
451
452 uint64_t rxr_ipackets; /* if_ipackets */
453 uint64_t rxr_ibytes; /* if_ibytes */
454 uint64_t rxr_iqdrops; /* iqdrops */
455 uint64_t rxr_ierrors; /* if_ierrors */
456
457 struct evcnt rxr_mgethdr_failed;
458 struct evcnt rxr_mgetcl_failed;
459 struct evcnt rxr_mbuf_load_failed;
460 struct evcnt rxr_intr;
461 struct evcnt rxr_defer;
462 };
463
464 struct ixl_queue_pair {
465 struct ixl_softc *qp_sc;
466 struct ixl_tx_ring *qp_txr;
467 struct ixl_rx_ring *qp_rxr;
468
469 char qp_name[16];
470
471 void *qp_si;
472 struct ixl_work qp_task;
473 bool qp_workqueue;
474 };
475
476 struct ixl_atq {
477 struct ixl_aq_desc iatq_desc;
478 void (*iatq_fn)(struct ixl_softc *,
479 const struct ixl_aq_desc *);
480 };
481 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
482
483 struct ixl_product {
484 unsigned int vendor_id;
485 unsigned int product_id;
486 };
487
488 /*
489 * Locking notes:
490 * + a field in ixl_tx_ring is protected by txr_lock (a spin mutex), and
491 * a field in ixl_rx_ring is protected by rxr_lock (a spin mutex).
492 * - more than one lock of them cannot be held at once.
493 * + a field named sc_atq_* in ixl_softc is protected by sc_atq_lock
494 * (a spin mutex).
495 * - the lock cannot held with txr_lock or rxr_lock.
496 * + a field named sc_arq_* is not protected by any lock.
497 * - operations for sc_arq_* is done in one context related to
498 * sc_arq_task.
499 * + other fields in ixl_softc is protected by sc_cfg_lock
500 * (an adaptive mutex)
501 * - It must be held before another lock is held, and It can be
502 * released after the other lock is released.
503 * */
504
505 struct ixl_softc {
506 device_t sc_dev;
507 struct ethercom sc_ec;
508 bool sc_attached;
509 bool sc_dead;
510 bool sc_rxctl_atq;
511 struct sysctllog *sc_sysctllog;
512 struct workqueue *sc_workq;
513 struct workqueue *sc_workq_txrx;
514 uint8_t sc_enaddr[ETHER_ADDR_LEN];
515 struct ifmedia sc_media;
516 uint64_t sc_media_status;
517 uint64_t sc_media_active;
518 kmutex_t sc_cfg_lock;
519 enum i40e_mac_type sc_mac_type;
520 uint32_t sc_rss_table_size;
521 uint32_t sc_rss_table_entry_width;
522 bool sc_txrx_workqueue;
523 u_int sc_tx_process_limit;
524 u_int sc_rx_process_limit;
525 u_int sc_tx_intr_process_limit;
526 u_int sc_rx_intr_process_limit;
527
528 int sc_cur_ec_capenable;
529
530 struct pci_attach_args sc_pa;
531 pci_intr_handle_t *sc_ihp;
532 void **sc_ihs;
533 unsigned int sc_nintrs;
534
535 bus_dma_tag_t sc_dmat;
536 bus_space_tag_t sc_memt;
537 bus_space_handle_t sc_memh;
538 bus_size_t sc_mems;
539
540 uint8_t sc_pf_id;
541 uint16_t sc_uplink_seid; /* le */
542 uint16_t sc_downlink_seid; /* le */
543 uint16_t sc_vsi_number; /* le */
544 uint16_t sc_seid;
545 unsigned int sc_base_queue;
546
547 pci_intr_type_t sc_intrtype;
548 unsigned int sc_msix_vector_queue;
549
550 struct ixl_dmamem sc_scratch;
551
552 const struct ixl_aq_regs *
553 sc_aq_regs;
554
555 kmutex_t sc_atq_lock;
556 kcondvar_t sc_atq_cv;
557 struct ixl_dmamem sc_atq;
558 unsigned int sc_atq_prod;
559 unsigned int sc_atq_cons;
560
561 struct ixl_dmamem sc_arq;
562 struct ixl_work sc_arq_task;
563 struct ixl_aq_bufs sc_arq_idle;
564 struct ixl_aq_buf *sc_arq_live[IXL_AQ_NUM];
565 unsigned int sc_arq_prod;
566 unsigned int sc_arq_cons;
567
568 struct ixl_work sc_link_state_task;
569 struct ixl_atq sc_link_state_atq;
570
571 struct ixl_dmamem sc_hmc_sd;
572 struct ixl_dmamem sc_hmc_pd;
573 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT];
574
575 unsigned int sc_tx_ring_ndescs;
576 unsigned int sc_rx_ring_ndescs;
577 unsigned int sc_nqueue_pairs;
578 unsigned int sc_nqueue_pairs_max;
579 unsigned int sc_nqueue_pairs_device;
580 struct ixl_queue_pair *sc_qps;
581
582 struct evcnt sc_event_atq;
583 struct evcnt sc_event_link;
584 struct evcnt sc_event_ecc_err;
585 struct evcnt sc_event_pci_exception;
586 struct evcnt sc_event_crit_err;
587 };
588
589 #define IXL_TXRX_PROCESS_UNLIMIT UINT_MAX
590 #define IXL_TX_PROCESS_LIMIT 256
591 #define IXL_RX_PROCESS_LIMIT 256
592 #define IXL_TX_INTR_PROCESS_LIMIT 256
593 #define IXL_RX_INTR_PROCESS_LIMIT 0U
594
595 #define delaymsec(_x) DELAY(1000 * (_x))
596 #ifdef IXL_DEBUG
597 #define DDPRINTF(sc, fmt, args...) \
598 do { \
599 if (sc != NULL) \
600 device_printf(sc->sc_dev, ""); \
601 printf("%s:\t" fmt, __func__, ##args); \
602 } while (0)
603 #else
604 #define DDPRINTF(sc, fmt, args...) __nothing
605 #endif
606 #define IXL_NOMSIX false
607
608 static enum i40e_mac_type
609 ixl_mactype(pci_product_id_t);
610 static void ixl_clear_hw(struct ixl_softc *);
611 static int ixl_pf_reset(struct ixl_softc *);
612
613 static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
614 bus_size_t, bus_size_t);
615 static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
616
617 static int ixl_arq_fill(struct ixl_softc *);
618 static void ixl_arq_unfill(struct ixl_softc *);
619
620 static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
621 unsigned int);
622 static void ixl_atq_set(struct ixl_atq *,
623 void (*)(struct ixl_softc *, const struct ixl_aq_desc *));
624 static int ixl_atq_post(struct ixl_softc *, struct ixl_atq *);
625 static int ixl_atq_post_locked(struct ixl_softc *, struct ixl_atq *);
626 static void ixl_atq_done(struct ixl_softc *);
627 static int ixl_atq_exec(struct ixl_softc *, struct ixl_atq *);
628 static int ixl_get_version(struct ixl_softc *);
629 static int ixl_get_hw_capabilities(struct ixl_softc *);
630 static int ixl_pxe_clear(struct ixl_softc *);
631 static int ixl_lldp_shut(struct ixl_softc *);
632 static int ixl_get_mac(struct ixl_softc *);
633 static int ixl_get_switch_config(struct ixl_softc *);
634 static int ixl_phy_mask_ints(struct ixl_softc *);
635 static int ixl_get_phy_types(struct ixl_softc *, uint64_t *);
636 static int ixl_restart_an(struct ixl_softc *);
637 static int ixl_hmc(struct ixl_softc *);
638 static void ixl_hmc_free(struct ixl_softc *);
639 static int ixl_get_vsi(struct ixl_softc *);
640 static int ixl_set_vsi(struct ixl_softc *);
641 static void ixl_set_filter_control(struct ixl_softc *);
642 static void ixl_get_link_status(void *);
643 static int ixl_get_link_status_poll(struct ixl_softc *);
644 static int ixl_set_link_status(struct ixl_softc *,
645 const struct ixl_aq_desc *);
646 static void ixl_config_rss(struct ixl_softc *);
647 static int ixl_add_macvlan(struct ixl_softc *, const uint8_t *,
648 uint16_t, uint16_t);
649 static int ixl_remove_macvlan(struct ixl_softc *, uint8_t *, uint16_t,
650 uint16_t);
651 static void ixl_arq(void *);
652 static void ixl_hmc_pack(void *, const void *,
653 const struct ixl_hmc_pack *, unsigned int);
654 static uint32_t ixl_rd_rx_csr(struct ixl_softc *, uint32_t);
655 static void ixl_wr_rx_csr(struct ixl_softc *, uint32_t, uint32_t);
656
657 static int ixl_match(device_t, cfdata_t, void *);
658 static void ixl_attach(device_t, device_t, void *);
659 static int ixl_detach(device_t, int);
660
661 static void ixl_media_add(struct ixl_softc *, uint64_t);
662 static int ixl_media_change(struct ifnet *);
663 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
664 static void ixl_watchdog(struct ifnet *);
665 static int ixl_ioctl(struct ifnet *, u_long, void *);
666 static void ixl_start(struct ifnet *);
667 static int ixl_transmit(struct ifnet *, struct mbuf *);
668 static void ixl_deferred_transmit(void *);
669 static int ixl_intr(void *);
670 static int ixl_queue_intr(void *);
671 static int ixl_other_intr(void *);
672 static void ixl_handle_queue(void *);
673 static void ixl_sched_handle_queue(struct ixl_softc *,
674 struct ixl_queue_pair *);
675 static int ixl_init(struct ifnet *);
676 static int ixl_init_locked(struct ixl_softc *);
677 static void ixl_stop(struct ifnet *, int);
678 static void ixl_stop_locked(struct ixl_softc *);
679 static int ixl_iff(struct ixl_softc *);
680 static int ixl_ifflags_cb(struct ethercom *);
681 static int ixl_setup_interrupts(struct ixl_softc *);
682 static int ixl_establish_intx(struct ixl_softc *);
683 static int ixl_establish_msix(struct ixl_softc *);
684 static void ixl_set_affinity_msix(struct ixl_softc *);
685 static void ixl_enable_queue_intr(struct ixl_softc *,
686 struct ixl_queue_pair *);
687 static void ixl_disable_queue_intr(struct ixl_softc *,
688 struct ixl_queue_pair *);
689 static void ixl_enable_other_intr(struct ixl_softc *);
690 static void ixl_disable_other_intr(struct ixl_softc *);
691 static void ixl_config_queue_intr(struct ixl_softc *);
692 static void ixl_config_other_intr(struct ixl_softc *);
693
694 static struct ixl_tx_ring *
695 ixl_txr_alloc(struct ixl_softc *, unsigned int);
696 static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
697 static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
698 static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
699 static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
700 static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
701 static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
702 static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
703 static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *, u_int);
704
705 static struct ixl_rx_ring *
706 ixl_rxr_alloc(struct ixl_softc *, unsigned int);
707 static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
708 static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
709 static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
710 static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
711 static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
712 static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
713 static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *, u_int);
714 static int ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
715
716 static struct workqueue *
717 ixl_workq_create(const char *, pri_t, int, int);
718 static void ixl_workq_destroy(struct workqueue *);
719 static int ixl_workqs_teardown(device_t);
720 static void ixl_work_set(struct ixl_work *, void (*)(void *), void *);
721 static void ixl_work_add(struct workqueue *, struct ixl_work *);
722 static void ixl_work_wait(struct workqueue *, struct ixl_work *);
723 static void ixl_workq_work(struct work *, void *);
724 static const struct ixl_product *
725 ixl_lookup(const struct pci_attach_args *pa);
726 static void ixl_link_state_update(struct ixl_softc *,
727 const struct ixl_aq_desc *);
728 static int ixl_set_macvlan(struct ixl_softc *);
729 static int ixl_setup_interrupts(struct ixl_softc *);;
730 static void ixl_teardown_interrupts(struct ixl_softc *);
731 static int ixl_setup_stats(struct ixl_softc *);
732 static void ixl_teardown_stats(struct ixl_softc *);
733 static int ixl_setup_sysctls(struct ixl_softc *);
734 static void ixl_teardown_sysctls(struct ixl_softc *);
735 static int ixl_queue_pairs_alloc(struct ixl_softc *);
736 static void ixl_queue_pairs_free(struct ixl_softc *);
737
738 static const struct ixl_phy_type ixl_phy_type_map[] = {
739 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII },
740 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX },
741 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 },
742 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR },
743 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 },
744 { 1ULL << IXL_PHY_TYPE_XAUI |
745 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 },
746 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI },
747 { 1ULL << IXL_PHY_TYPE_XLAUI |
748 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI },
749 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
750 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 },
751 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
752 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 },
753 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC },
754 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC },
755 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX },
756 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
757 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T },
758 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T },
759 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR },
760 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR },
761 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_TWINAX },
762 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 },
763 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 },
764 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX },
765 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX },
766 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 },
767 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR },
768 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR },
769 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR },
770 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR },
771 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC },
772 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_CR },
773 };
774
775 static const struct ixl_speed_type ixl_speed_type_map[] = {
776 { IXL_AQ_LINK_SPEED_40GB, IF_Gbps(40) },
777 { IXL_AQ_LINK_SPEED_25GB, IF_Gbps(25) },
778 { IXL_AQ_LINK_SPEED_10GB, IF_Gbps(10) },
779 { IXL_AQ_LINK_SPEED_1000MB, IF_Mbps(1000) },
780 { IXL_AQ_LINK_SPEED_100MB, IF_Mbps(100)},
781 };
782
783 static const struct ixl_aq_regs ixl_pf_aq_regs = {
784 .atq_tail = I40E_PF_ATQT,
785 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK,
786 .atq_head = I40E_PF_ATQH,
787 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK,
788 .atq_len = I40E_PF_ATQLEN,
789 .atq_bal = I40E_PF_ATQBAL,
790 .atq_bah = I40E_PF_ATQBAH,
791 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK,
792
793 .arq_tail = I40E_PF_ARQT,
794 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK,
795 .arq_head = I40E_PF_ARQH,
796 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK,
797 .arq_len = I40E_PF_ARQLEN,
798 .arq_bal = I40E_PF_ARQBAL,
799 .arq_bah = I40E_PF_ARQBAH,
800 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK,
801 };
802
803 #define ixl_rd(_s, _r) \
804 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
805 #define ixl_wr(_s, _r, _v) \
806 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
807 #define ixl_barrier(_s, _r, _l, _o) \
808 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
809 #define ixl_flush(_s) (void)ixl_rd((_s), I40E_GLGEN_STAT)
810 #define ixl_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1))
811
812 static inline uint32_t
813 ixl_dmamem_hi(struct ixl_dmamem *ixm)
814 {
815 uint32_t retval;
816 uint64_t val;
817
818 if (sizeof(IXL_DMA_DVA(ixm)) > 4) {
819 val = (intptr_t)IXL_DMA_DVA(ixm);
820 retval = (uint32_t)(val >> 32);
821 } else {
822 retval = 0;
823 }
824
825 return retval;
826 }
827
828 static inline uint32_t
829 ixl_dmamem_lo(struct ixl_dmamem *ixm)
830 {
831
832 return (uint32_t)IXL_DMA_DVA(ixm);
833 }
834
835 static inline void
836 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr)
837 {
838 uint64_t val;
839
840 if (sizeof(addr) > 4) {
841 val = (intptr_t)addr;
842 iaq->iaq_param[2] = htole32(val >> 32);
843 } else {
844 iaq->iaq_param[2] = htole32(0);
845 }
846
847 iaq->iaq_param[3] = htole32(addr);
848 }
849
850 static inline unsigned int
851 ixl_rxr_unrefreshed(unsigned int prod, unsigned int cons, unsigned int ndescs)
852 {
853 unsigned int num;
854
855 if (prod < cons)
856 num = cons - prod;
857 else
858 num = (ndescs - prod) + cons;
859
860 if (__predict_true(num > 0)) {
861 /* device cannot receive packets if all descripter is filled */
862 num -= 1;
863 }
864
865 return num;
866 }
867
868 CFATTACH_DECL3_NEW(ixl, sizeof(struct ixl_softc),
869 ixl_match, ixl_attach, ixl_detach, NULL, NULL, NULL,
870 DVF_DETACH_SHUTDOWN);
871
872 static const struct ixl_product ixl_products[] = {
873 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_SFP },
874 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_B },
875 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_C },
876 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_A },
877 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_B },
878 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_C },
879 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T },
880 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
881 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
882 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G },
883 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP },
884 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28 },
885 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_KX },
886 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_QSFP },
887 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_SFP },
888 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G_BASET },
889 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_BASET },
890 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_I_SFP },
891 /* required last entry */
892 {0, 0}
893 };
894
895 static const struct ixl_product *
896 ixl_lookup(const struct pci_attach_args *pa)
897 {
898 const struct ixl_product *ixlp;
899
900 for (ixlp = ixl_products; ixlp->vendor_id != 0; ixlp++) {
901 if (PCI_VENDOR(pa->pa_id) == ixlp->vendor_id &&
902 PCI_PRODUCT(pa->pa_id) == ixlp->product_id)
903 return ixlp;
904 }
905
906 return NULL;
907 }
908
909 static int
910 ixl_match(device_t parent, cfdata_t match, void *aux)
911 {
912 const struct pci_attach_args *pa = aux;
913
914 return (ixl_lookup(pa) != NULL) ? 1 : 0;
915 }
916
917 static void
918 ixl_attach(device_t parent, device_t self, void *aux)
919 {
920 struct ixl_softc *sc;
921 struct pci_attach_args *pa = aux;
922 struct ifnet *ifp;
923 pcireg_t memtype, reg;
924 uint32_t firstq, port, ari, func;
925 uint64_t phy_types = 0;
926 char xnamebuf[32];
927 int tries, rv;
928
929 sc = device_private(self);
930 sc->sc_dev = self;
931 ifp = &sc->sc_ec.ec_if;
932
933 sc->sc_pa = *pa;
934 sc->sc_dmat = (pci_dma64_available(pa)) ?
935 pa->pa_dmat64 : pa->pa_dmat;
936 sc->sc_aq_regs = &ixl_pf_aq_regs;
937
938 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
939 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(reg));
940
941 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG);
942 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
943 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) {
944 aprint_error(": unable to map registers\n");
945 return;
946 }
947
948 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET);
949
950 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC);
951 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK;
952 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
953 sc->sc_base_queue = firstq;
954
955 ixl_clear_hw(sc);
956 if (ixl_pf_reset(sc) == -1) {
957 /* error printed by ixl pf_reset */
958 goto unmap;
959 }
960
961 port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
962 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
963 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
964 aprint_normal(": port %u", port);
965
966 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
967 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
968 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
969
970 func = ixl_rd(sc, I40E_PF_FUNC_RID);
971 sc->sc_pf_id = func & (ari ? 0xff : 0x7);
972
973 /* initialise the adminq */
974
975 mutex_init(&sc->sc_atq_lock, MUTEX_DEFAULT, IPL_NET);
976
977 if (ixl_dmamem_alloc(sc, &sc->sc_atq,
978 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
979 aprint_error("\n" "%s: unable to allocate atq\n",
980 device_xname(self));
981 goto unmap;
982 }
983
984 SIMPLEQ_INIT(&sc->sc_arq_idle);
985 ixl_work_set(&sc->sc_arq_task, ixl_arq, sc);
986 sc->sc_arq_cons = 0;
987 sc->sc_arq_prod = 0;
988
989 if (ixl_dmamem_alloc(sc, &sc->sc_arq,
990 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
991 aprint_error("\n" "%s: unable to allocate arq\n",
992 device_xname(self));
993 goto free_atq;
994 }
995
996 if (!ixl_arq_fill(sc)) {
997 aprint_error("\n" "%s: unable to fill arq descriptors\n",
998 device_xname(self));
999 goto free_arq;
1000 }
1001
1002 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1003 0, IXL_DMA_LEN(&sc->sc_atq),
1004 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1005
1006 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1007 0, IXL_DMA_LEN(&sc->sc_arq),
1008 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1009
1010 for (tries = 0; tries < 10; tries++) {
1011 sc->sc_atq_cons = 0;
1012 sc->sc_atq_prod = 0;
1013
1014 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1015 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1016 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1017 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1018
1019 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1020
1021 ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1022 ixl_dmamem_lo(&sc->sc_atq));
1023 ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1024 ixl_dmamem_hi(&sc->sc_atq));
1025 ixl_wr(sc, sc->sc_aq_regs->atq_len,
1026 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1027
1028 ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1029 ixl_dmamem_lo(&sc->sc_arq));
1030 ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1031 ixl_dmamem_hi(&sc->sc_arq));
1032 ixl_wr(sc, sc->sc_aq_regs->arq_len,
1033 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1034
1035 rv = ixl_get_version(sc);
1036 if (rv == 0)
1037 break;
1038 if (rv != ETIMEDOUT) {
1039 aprint_error(", unable to get firmware version\n");
1040 goto shutdown;
1041 }
1042
1043 delaymsec(100);
1044 }
1045
1046 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1047
1048 if (sc->sc_mac_type == I40E_MAC_X722)
1049 sc->sc_nqueue_pairs_device = 128;
1050 else
1051 sc->sc_nqueue_pairs_device = 64;
1052
1053 rv = ixl_get_hw_capabilities(sc);
1054 if (rv != 0) {
1055 aprint_error(", GET HW CAPABILITIES %s\n",
1056 rv == ETIMEDOUT ? "timeout" : "error");
1057 goto shutdown;
1058 }
1059
1060 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max =
1061 MIN((int)sc->sc_nqueue_pairs_device, ncpu);
1062 sc->sc_tx_ring_ndescs = 1024;
1063 sc->sc_rx_ring_ndescs = 1024;
1064
1065 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_rx_ring_ndescs);
1066 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_tx_ring_ndescs);
1067
1068 if (ixl_get_mac(sc) != 0) {
1069 /* error printed by ixl_get_mac */
1070 goto shutdown;
1071 }
1072
1073 aprint_normal("\n");
1074 aprint_naive("\n");
1075
1076 aprint_normal_dev(self, "Ethernet address %s\n",
1077 ether_sprintf(sc->sc_enaddr));
1078
1079 rv = ixl_pxe_clear(sc);
1080 if (rv != 0) {
1081 aprint_debug_dev(self, "CLEAR PXE MODE %s\n",
1082 rv == ETIMEDOUT ? "timeout" : "error");
1083 }
1084
1085 ixl_set_filter_control(sc);
1086
1087 if (ixl_hmc(sc) != 0) {
1088 /* error printed by ixl_hmc */
1089 goto shutdown;
1090 }
1091
1092 if (ixl_lldp_shut(sc) != 0) {
1093 /* error printed by ixl_lldp_shut */
1094 goto free_hmc;
1095 }
1096
1097 if (ixl_phy_mask_ints(sc) != 0) {
1098 /* error printed by ixl_phy_mask_ints */
1099 goto free_hmc;
1100 }
1101
1102 if (ixl_restart_an(sc) != 0) {
1103 /* error printed by ixl_restart_an */
1104 goto free_hmc;
1105 }
1106
1107 if (ixl_get_switch_config(sc) != 0) {
1108 /* error printed by ixl_get_switch_config */
1109 goto free_hmc;
1110 }
1111
1112 if (ixl_get_phy_types(sc, &phy_types) != 0) {
1113 /* error printed by ixl_get_phy_abilities */
1114 goto free_hmc;
1115 }
1116
1117 rv = ixl_get_link_status_poll(sc);
1118 if (rv != 0) {
1119 aprint_error_dev(self, "GET LINK STATUS %s\n",
1120 rv == ETIMEDOUT ? "timeout" : "error");
1121 goto free_hmc;
1122 }
1123
1124 if (ixl_dmamem_alloc(sc, &sc->sc_scratch,
1125 sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1126 aprint_error_dev(self, "unable to allocate scratch buffer\n");
1127 goto free_hmc;
1128 }
1129
1130 rv = ixl_get_vsi(sc);
1131 if (rv != 0) {
1132 aprint_error_dev(self, "GET VSI %s %d\n",
1133 rv == ETIMEDOUT ? "timeout" : "error", rv);
1134 goto free_scratch;
1135 }
1136
1137 rv = ixl_set_vsi(sc);
1138 if (rv != 0) {
1139 aprint_error_dev(self, "UPDATE VSI error %s %d\n",
1140 rv == ETIMEDOUT ? "timeout" : "error", rv);
1141 goto free_scratch;
1142 }
1143
1144 if (ixl_queue_pairs_alloc(sc) != 0) {
1145 /* error printed by ixl_queue_pairs_alloc */
1146 goto free_scratch;
1147 }
1148
1149 if (ixl_setup_interrupts(sc) != 0) {
1150 /* error printed by ixl_setup_interrupts */
1151 goto free_queue_pairs;
1152 }
1153
1154 if (ixl_setup_stats(sc) != 0) {
1155 aprint_error_dev(self, "failed to setup event counters\n");
1156 goto teardown_intrs;
1157 }
1158
1159 if (ixl_setup_sysctls(sc) != 0) {
1160 /* error printed by ixl_setup_sysctls */
1161 goto teardown_stats;
1162 }
1163
1164 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self));
1165 sc->sc_workq = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1166 IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1167 if (sc->sc_workq == NULL)
1168 goto teardown_sysctls;
1169
1170 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self));
1171 sc->sc_workq_txrx = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1172 IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1173 if (sc->sc_workq_txrx == NULL)
1174 goto teardown_wqs;
1175
1176 snprintf(xnamebuf, sizeof(xnamebuf), "%s_atq_cv", device_xname(self));
1177 cv_init(&sc->sc_atq_cv, xnamebuf);
1178
1179 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
1180
1181 ifp->if_softc = sc;
1182 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1183 ifp->if_extflags = IFEF_MPSAFE;
1184 ifp->if_ioctl = ixl_ioctl;
1185 ifp->if_start = ixl_start;
1186 ifp->if_transmit = ixl_transmit;
1187 ifp->if_watchdog = ixl_watchdog;
1188 ifp->if_init = ixl_init;
1189 ifp->if_stop = ixl_stop;
1190 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs);
1191 IFQ_SET_READY(&ifp->if_snd);
1192 #if 0
1193 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx |
1194 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1195 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
1196 #endif
1197 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
1198 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
1199
1200 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities;
1201 sc->sc_cur_ec_capenable = sc->sc_ec.ec_capenable;
1202
1203 sc->sc_ec.ec_ifmedia = &sc->sc_media;
1204 ifmedia_init(&sc->sc_media, IFM_IMASK, ixl_media_change,
1205 ixl_media_status);
1206
1207 ixl_media_add(sc, phy_types);
1208 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1209 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1210
1211 if_attach(ifp);
1212 if_deferred_start_init(ifp, NULL);
1213 ether_ifattach(ifp, sc->sc_enaddr);
1214 ether_set_ifflags_cb(&sc->sc_ec, ixl_ifflags_cb);
1215
1216 (void)ixl_get_link_status_poll(sc);
1217 ixl_work_set(&sc->sc_link_state_task, ixl_get_link_status, sc);
1218
1219 ixl_config_other_intr(sc);
1220 ixl_enable_other_intr(sc);
1221
1222 ixl_set_macvlan(sc);
1223
1224 sc->sc_txrx_workqueue = true;
1225 sc->sc_tx_process_limit = IXL_TX_PROCESS_LIMIT;
1226 sc->sc_rx_process_limit = IXL_RX_PROCESS_LIMIT;
1227 sc->sc_tx_intr_process_limit = IXL_TX_INTR_PROCESS_LIMIT;
1228 sc->sc_rx_intr_process_limit = IXL_RX_INTR_PROCESS_LIMIT;
1229
1230 if (pmf_device_register(self, NULL, NULL) != true)
1231 aprint_debug_dev(self, "couldn't establish power handler\n");
1232 sc->sc_attached = true;
1233 return;
1234
1235 teardown_wqs:
1236 config_finalize_register(self, ixl_workqs_teardown);
1237 teardown_sysctls:
1238 ixl_teardown_sysctls(sc);
1239 teardown_stats:
1240 ixl_teardown_stats(sc);
1241 teardown_intrs:
1242 ixl_teardown_interrupts(sc);
1243 free_queue_pairs:
1244 ixl_queue_pairs_free(sc);
1245 free_scratch:
1246 ixl_dmamem_free(sc, &sc->sc_scratch);
1247 free_hmc:
1248 ixl_hmc_free(sc);
1249 shutdown:
1250 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1251 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1252 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1253 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1254
1255 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1256 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1257 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1258
1259 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1260 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1261 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1262
1263 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1264 0, IXL_DMA_LEN(&sc->sc_arq),
1265 BUS_DMASYNC_POSTREAD);
1266 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1267 0, IXL_DMA_LEN(&sc->sc_atq),
1268 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1269
1270 ixl_arq_unfill(sc);
1271 free_arq:
1272 ixl_dmamem_free(sc, &sc->sc_arq);
1273 free_atq:
1274 ixl_dmamem_free(sc, &sc->sc_atq);
1275 unmap:
1276 mutex_destroy(&sc->sc_atq_lock);
1277 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1278 mutex_destroy(&sc->sc_cfg_lock);
1279 sc->sc_mems = 0;
1280
1281 sc->sc_attached = false;
1282 }
1283
1284 static int
1285 ixl_detach(device_t self, int flags)
1286 {
1287 struct ixl_softc *sc = device_private(self);
1288 struct ifnet *ifp = &sc->sc_ec.ec_if;
1289
1290 if (!sc->sc_attached)
1291 return 0;
1292
1293 ixl_stop(ifp, 1);
1294
1295 ixl_disable_other_intr(sc);
1296
1297 /* wait for ATQ handler */
1298 mutex_enter(&sc->sc_atq_lock);
1299 mutex_exit(&sc->sc_atq_lock);
1300
1301 ixl_work_wait(sc->sc_workq, &sc->sc_arq_task);
1302 ixl_work_wait(sc->sc_workq, &sc->sc_link_state_task);
1303
1304 if (sc->sc_workq != NULL) {
1305 ixl_workq_destroy(sc->sc_workq);
1306 sc->sc_workq = NULL;
1307 }
1308
1309 if (sc->sc_workq_txrx != NULL) {
1310 ixl_workq_destroy(sc->sc_workq_txrx);
1311 sc->sc_workq_txrx = NULL;
1312 }
1313
1314 ifmedia_delete_instance(&sc->sc_media, IFM_INST_ANY);
1315 ether_ifdetach(ifp);
1316 if_detach(ifp);
1317
1318 ixl_teardown_interrupts(sc);
1319 ixl_teardown_stats(sc);
1320
1321 ixl_queue_pairs_free(sc);
1322
1323 ixl_dmamem_free(sc, &sc->sc_scratch);
1324 ixl_hmc_free(sc);
1325
1326 /* shutdown */
1327 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1328 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1329 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1330 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1331
1332 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1333 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1334 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1335
1336 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1337 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1338 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1339
1340 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1341 0, IXL_DMA_LEN(&sc->sc_arq),
1342 BUS_DMASYNC_POSTREAD);
1343 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1344 0, IXL_DMA_LEN(&sc->sc_atq),
1345 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1346
1347 ixl_arq_unfill(sc);
1348
1349 ixl_dmamem_free(sc, &sc->sc_arq);
1350 ixl_dmamem_free(sc, &sc->sc_atq);
1351
1352 cv_destroy(&sc->sc_atq_cv);
1353 mutex_destroy(&sc->sc_atq_lock);
1354
1355 if (sc->sc_mems != 0) {
1356 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1357 sc->sc_mems = 0;
1358 }
1359
1360 mutex_destroy(&sc->sc_cfg_lock);
1361
1362 return 0;
1363 }
1364
1365 static int
1366 ixl_workqs_teardown(device_t self)
1367 {
1368 struct ixl_softc *sc = device_private(self);
1369
1370 if (sc->sc_workq != NULL) {
1371 ixl_workq_destroy(sc->sc_workq);
1372 sc->sc_workq = NULL;
1373 }
1374
1375 if (sc->sc_workq_txrx != NULL) {
1376 ixl_workq_destroy(sc->sc_workq_txrx);
1377 sc->sc_workq_txrx = NULL;
1378 }
1379
1380 return 0;
1381 }
1382
1383 static void
1384 ixl_media_add(struct ixl_softc *sc, uint64_t phy_types)
1385 {
1386 struct ifmedia *ifm = &sc->sc_media;
1387 const struct ixl_phy_type *itype;
1388 unsigned int i;
1389
1390 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
1391 itype = &ixl_phy_type_map[i];
1392
1393 if (ISSET(phy_types, itype->phy_type)) {
1394 ifmedia_add(ifm,
1395 IFM_ETHER | IFM_FDX | itype->ifm_type, 0, NULL);
1396
1397 if (itype->ifm_type == IFM_100_TX) {
1398 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type,
1399 0, NULL);
1400 }
1401 }
1402 }
1403 }
1404
1405 static void
1406 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1407 {
1408 struct ixl_softc *sc = ifp->if_softc;
1409
1410 ifmr->ifm_status = sc->sc_media_status;
1411 ifmr->ifm_active = sc->sc_media_active;
1412
1413 mutex_enter(&sc->sc_cfg_lock);
1414 if (ifp->if_link_state == LINK_STATE_UP)
1415 SET(ifmr->ifm_status, IFM_ACTIVE);
1416 mutex_exit(&sc->sc_cfg_lock);
1417 }
1418
1419 static int
1420 ixl_media_change(struct ifnet *ifp)
1421 {
1422
1423 return 0;
1424 }
1425
1426 static void
1427 ixl_watchdog(struct ifnet *ifp)
1428 {
1429
1430 }
1431
1432 static void
1433 ixl_del_all_multiaddr(struct ixl_softc *sc)
1434 {
1435 struct ethercom *ec = &sc->sc_ec;
1436 struct ether_multi *enm;
1437 struct ether_multistep step;
1438
1439 ETHER_LOCK(ec);
1440 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1441 ETHER_NEXT_MULTI(step, enm)) {
1442 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1443 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1444 }
1445 ETHER_UNLOCK(ec);
1446 }
1447
1448 static int
1449 ixl_add_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1450 {
1451 struct ifnet *ifp = &sc->sc_ec.ec_if;
1452 int rv;
1453
1454 if (ISSET(ifp->if_flags, IFF_ALLMULTI))
1455 return 0;
1456
1457 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) {
1458 ixl_del_all_multiaddr(sc);
1459 SET(ifp->if_flags, IFF_ALLMULTI);
1460 return ENETRESET;
1461 }
1462
1463 rv = ixl_add_macvlan(sc, addrlo, 0,
1464 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1465
1466 if (rv == ENOSPC) {
1467 ixl_del_all_multiaddr(sc);
1468 SET(ifp->if_flags, IFF_ALLMULTI);
1469 return ENETRESET;
1470 }
1471
1472 return rv;
1473 }
1474
1475 static int
1476 ixl_del_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1477 {
1478 struct ifnet *ifp = &sc->sc_ec.ec_if;
1479 struct ethercom *ec = &sc->sc_ec;
1480 struct ether_multi *enm, *enm_last;
1481 struct ether_multistep step;
1482 int error, rv = 0;
1483
1484 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1485 ixl_remove_macvlan(sc, addrlo, 0,
1486 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1487 return 0;
1488 }
1489
1490 ETHER_LOCK(ec);
1491 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1492 ETHER_NEXT_MULTI(step, enm)) {
1493 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1494 ETHER_ADDR_LEN) != 0) {
1495 goto out;
1496 }
1497 }
1498
1499 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1500 ETHER_NEXT_MULTI(step, enm)) {
1501 error = ixl_add_macvlan(sc, enm->enm_addrlo, 0,
1502 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1503 if (error != 0)
1504 break;
1505 }
1506
1507 if (enm != NULL) {
1508 enm_last = enm;
1509 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1510 ETHER_NEXT_MULTI(step, enm)) {
1511 if (enm == enm_last)
1512 break;
1513
1514 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1515 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1516 }
1517 } else {
1518 CLR(ifp->if_flags, IFF_ALLMULTI);
1519 rv = ENETRESET;
1520 }
1521
1522 out:
1523 ETHER_UNLOCK(ec);
1524 return rv;
1525 }
1526
1527 static int
1528 ixl_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1529 {
1530 struct ifreq *ifr = (struct ifreq *)data;
1531 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
1532 struct ixl_tx_ring *txr;
1533 struct ixl_rx_ring *rxr;
1534 const struct sockaddr *sa;
1535 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1536 int s, error = 0;
1537 unsigned int i;
1538
1539 switch (cmd) {
1540 case SIOCADDMULTI:
1541 sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1542 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) {
1543 error = ether_multiaddr(sa, addrlo, addrhi);
1544 if (error != 0)
1545 return error;
1546
1547 error = ixl_add_multi(sc, addrlo, addrhi);
1548 if (error != 0 && error != ENETRESET) {
1549 ether_delmulti(sa, &sc->sc_ec);
1550 error = EIO;
1551 }
1552 }
1553 break;
1554
1555 case SIOCDELMULTI:
1556 sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1557 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) {
1558 error = ether_multiaddr(sa, addrlo, addrhi);
1559 if (error != 0)
1560 return error;
1561
1562 error = ixl_del_multi(sc, addrlo, addrhi);
1563 }
1564 break;
1565
1566 case SIOCGIFDATA:
1567 case SIOCZIFDATA:
1568 ifp->if_ipackets = 0;
1569 ifp->if_ibytes = 0;
1570 ifp->if_iqdrops = 0;
1571 ifp->if_ierrors = 0;
1572 ifp->if_opackets = 0;
1573 ifp->if_obytes = 0;
1574 ifp->if_omcasts = 0;
1575
1576 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
1577 txr = sc->sc_qps[i].qp_txr;
1578 rxr = sc->sc_qps[i].qp_rxr;
1579
1580 mutex_enter(&rxr->rxr_lock);
1581 ifp->if_ipackets += rxr->rxr_ipackets;
1582 ifp->if_ibytes += rxr->rxr_ibytes;
1583 ifp->if_iqdrops += rxr->rxr_iqdrops;
1584 ifp->if_ierrors += rxr->rxr_ierrors;
1585 if (cmd == SIOCZIFDATA) {
1586 rxr->rxr_ipackets = 0;
1587 rxr->rxr_ibytes = 0;
1588 rxr->rxr_iqdrops = 0;
1589 rxr->rxr_ierrors = 0;
1590 }
1591 mutex_exit(&rxr->rxr_lock);
1592
1593 mutex_enter(&txr->txr_lock);
1594 ifp->if_opackets += txr->txr_opackets;
1595 ifp->if_obytes += txr->txr_opackets;
1596 ifp->if_omcasts += txr->txr_omcasts;
1597 if (cmd == SIOCZIFDATA) {
1598 txr->txr_opackets = 0;
1599 txr->txr_opackets = 0;
1600 txr->txr_omcasts = 0;
1601 }
1602 mutex_exit(&txr->txr_lock);
1603 }
1604 /* FALLTHROUGH */
1605 default:
1606 s = splnet();
1607 error = ether_ioctl(ifp, cmd, data);
1608 splx(s);
1609 }
1610
1611 if (error == ENETRESET)
1612 error = ixl_iff(sc);
1613
1614 return error;
1615 }
1616
1617 static enum i40e_mac_type
1618 ixl_mactype(pci_product_id_t id)
1619 {
1620
1621 switch (id) {
1622 case PCI_PRODUCT_INTEL_XL710_SFP:
1623 case PCI_PRODUCT_INTEL_XL710_KX_B:
1624 case PCI_PRODUCT_INTEL_XL710_KX_C:
1625 case PCI_PRODUCT_INTEL_XL710_QSFP_A:
1626 case PCI_PRODUCT_INTEL_XL710_QSFP_B:
1627 case PCI_PRODUCT_INTEL_XL710_QSFP_C:
1628 case PCI_PRODUCT_INTEL_X710_10G_T:
1629 case PCI_PRODUCT_INTEL_XL710_20G_BP_1:
1630 case PCI_PRODUCT_INTEL_XL710_20G_BP_2:
1631 case PCI_PRODUCT_INTEL_X710_T4_10G:
1632 case PCI_PRODUCT_INTEL_XXV710_25G_BP:
1633 case PCI_PRODUCT_INTEL_XXV710_25G_SFP28:
1634 return I40E_MAC_XL710;
1635
1636 case PCI_PRODUCT_INTEL_X722_KX:
1637 case PCI_PRODUCT_INTEL_X722_QSFP:
1638 case PCI_PRODUCT_INTEL_X722_SFP:
1639 case PCI_PRODUCT_INTEL_X722_1G_BASET:
1640 case PCI_PRODUCT_INTEL_X722_10G_BASET:
1641 case PCI_PRODUCT_INTEL_X722_I_SFP:
1642 return I40E_MAC_X722;
1643 }
1644
1645 return I40E_MAC_GENERIC;
1646 }
1647
1648 static inline void *
1649 ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i)
1650 {
1651 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
1652 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1653
1654 if (i >= e->hmc_count)
1655 return NULL;
1656
1657 kva += e->hmc_base;
1658 kva += i * e->hmc_size;
1659
1660 return kva;
1661 }
1662
1663 static inline size_t
1664 ixl_hmc_len(struct ixl_softc *sc, enum ixl_hmc_types type)
1665 {
1666 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1667
1668 return e->hmc_size;
1669 }
1670
1671 static void
1672 ixl_enable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1673 {
1674 struct ixl_rx_ring *rxr = qp->qp_rxr;
1675
1676 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1677 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1678 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1679 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1680 ixl_flush(sc);
1681 }
1682
1683 static void
1684 ixl_disable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1685 {
1686 struct ixl_rx_ring *rxr = qp->qp_rxr;
1687
1688 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1689 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1690 ixl_flush(sc);
1691 }
1692
1693 static void
1694 ixl_enable_other_intr(struct ixl_softc *sc)
1695 {
1696
1697 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1698 I40E_PFINT_DYN_CTL0_INTENA_MASK |
1699 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1700 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1701 ixl_flush(sc);
1702 }
1703
1704 static void
1705 ixl_disable_other_intr(struct ixl_softc *sc)
1706 {
1707
1708 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1709 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1710 ixl_flush(sc);
1711 }
1712
1713 static int
1714 ixl_reinit(struct ixl_softc *sc)
1715 {
1716 struct ixl_rx_ring *rxr;
1717 struct ixl_tx_ring *txr;
1718 unsigned int i;
1719 uint32_t reg;
1720
1721 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1722
1723 if (ixl_get_vsi(sc) != 0)
1724 return EIO;
1725
1726 if (ixl_set_vsi(sc) != 0)
1727 return EIO;
1728
1729
1730 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1731 txr = sc->sc_qps[i].qp_txr;
1732 rxr = sc->sc_qps[i].qp_rxr;
1733
1734 txr->txr_cons = txr->txr_prod = 0;
1735 rxr->rxr_cons = rxr->rxr_prod = 0;
1736
1737 ixl_txr_config(sc, txr);
1738 ixl_rxr_config(sc, rxr);
1739 }
1740
1741 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
1742 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_PREWRITE);
1743
1744 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1745 txr = sc->sc_qps[i].qp_txr;
1746 rxr = sc->sc_qps[i].qp_rxr;
1747
1748 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
1749 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
1750 ixl_flush(sc);
1751
1752 ixl_wr(sc, txr->txr_tail, txr->txr_prod);
1753 ixl_wr(sc, rxr->rxr_tail, rxr->rxr_prod);
1754
1755
1756 /* ixl_rxfill() needs lock held */
1757 mutex_enter(&rxr->rxr_lock);
1758 ixl_rxfill(sc, rxr);
1759 mutex_exit(&rxr->rxr_lock);
1760
1761 reg = ixl_rd(sc, I40E_QRX_ENA(i));
1762 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
1763 ixl_wr(sc, I40E_QRX_ENA(i), reg);
1764 if (ixl_rxr_enabled(sc, rxr) != 0)
1765 goto stop;
1766
1767 ixl_txr_qdis(sc, txr, 1);
1768
1769 reg = ixl_rd(sc, I40E_QTX_ENA(i));
1770 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
1771 ixl_wr(sc, I40E_QTX_ENA(i), reg);
1772
1773 if (ixl_txr_enabled(sc, txr) != 0)
1774 goto stop;
1775 }
1776
1777 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
1778 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
1779
1780 return 0;
1781
1782 stop:
1783 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
1784 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
1785
1786 return ETIMEDOUT;
1787 }
1788
1789 static int
1790 ixl_init_locked(struct ixl_softc *sc)
1791 {
1792 struct ifnet *ifp = &sc->sc_ec.ec_if;
1793 unsigned int i;
1794 int error;
1795
1796 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1797
1798 if (ISSET(ifp->if_flags, IFF_RUNNING))
1799 ixl_stop_locked(sc);
1800
1801 if (sc->sc_dead) {
1802 return ENXIO;
1803 }
1804
1805 sc->sc_cur_ec_capenable = sc->sc_ec.ec_capenable;
1806
1807 if (sc->sc_intrtype != PCI_INTR_TYPE_MSIX)
1808 sc->sc_nqueue_pairs = 1;
1809 else
1810 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
1811
1812 error = ixl_reinit(sc);
1813 if (error) {
1814 ixl_stop_locked(sc);
1815 return error;
1816 }
1817
1818 SET(ifp->if_flags, IFF_RUNNING);
1819 CLR(ifp->if_flags, IFF_OACTIVE);
1820
1821 (void)ixl_get_link_status(sc);
1822
1823 ixl_config_rss(sc);
1824 ixl_config_queue_intr(sc);
1825
1826 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1827 ixl_enable_queue_intr(sc, &sc->sc_qps[i]);
1828 }
1829
1830 error = ixl_iff(sc);
1831 if (error) {
1832 ixl_stop_locked(sc);
1833 return error;
1834 }
1835
1836 return 0;
1837 }
1838
1839 static int
1840 ixl_init(struct ifnet *ifp)
1841 {
1842 struct ixl_softc *sc = ifp->if_softc;
1843 int error;
1844
1845 mutex_enter(&sc->sc_cfg_lock);
1846 error = ixl_init_locked(sc);
1847 mutex_exit(&sc->sc_cfg_lock);
1848
1849 return error;
1850 }
1851
1852 static int
1853 ixl_iff(struct ixl_softc *sc)
1854 {
1855 struct ifnet *ifp = &sc->sc_ec.ec_if;
1856 struct ixl_atq iatq;
1857 struct ixl_aq_desc *iaq;
1858 struct ixl_aq_vsi_promisc_param *param;
1859 int error;
1860
1861 if (!ISSET(ifp->if_flags, IFF_RUNNING))
1862 return 0;
1863
1864 memset(&iatq, 0, sizeof(iatq));
1865
1866 iaq = &iatq.iatq_desc;
1867 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
1868
1869 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
1870 param->flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |
1871 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
1872 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
1873 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
1874 IXL_AQ_VSI_PROMISC_FLAG_MCAST);
1875 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1876 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST);
1877 }
1878 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
1879 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |
1880 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
1881 param->seid = sc->sc_seid;
1882
1883 error = ixl_atq_exec(sc, &iatq);
1884 if (error)
1885 return error;
1886
1887 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
1888 return EIO;
1889
1890 if (memcmp(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN) != 0) {
1891 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
1892 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1893
1894 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
1895 ixl_add_macvlan(sc, sc->sc_enaddr, 0,
1896 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1897 }
1898 return 0;
1899 }
1900
1901 static void
1902 ixl_stop_rendezvous(struct ixl_softc *sc)
1903 {
1904 struct ixl_tx_ring *txr;
1905 struct ixl_rx_ring *rxr;
1906 unsigned int i;
1907
1908 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1909 txr = sc->sc_qps[i].qp_txr;
1910 rxr = sc->sc_qps[i].qp_rxr;
1911
1912 mutex_enter(&txr->txr_lock);
1913 mutex_exit(&txr->txr_lock);
1914
1915 mutex_enter(&rxr->rxr_lock);
1916 mutex_exit(&rxr->rxr_lock);
1917
1918 ixl_work_wait(sc->sc_workq_txrx,
1919 &sc->sc_qps[i].qp_task);
1920 }
1921 }
1922
1923 static void
1924 ixl_stop_locked(struct ixl_softc *sc)
1925 {
1926 struct ifnet *ifp = &sc->sc_ec.ec_if;
1927 struct ixl_rx_ring *rxr;
1928 struct ixl_tx_ring *txr;
1929 unsigned int i;
1930 uint32_t reg;
1931
1932 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1933
1934 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
1935
1936 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1937 txr = sc->sc_qps[i].qp_txr;
1938 rxr = sc->sc_qps[i].qp_rxr;
1939
1940 ixl_disable_queue_intr(sc, &sc->sc_qps[i]);
1941
1942 mutex_enter(&txr->txr_lock);
1943 ixl_txr_qdis(sc, txr, 0);
1944 /* XXX wait at least 400 usec for all tx queues in one go */
1945 ixl_flush(sc);
1946 DELAY(500);
1947
1948 reg = ixl_rd(sc, I40E_QTX_ENA(i));
1949 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
1950 ixl_wr(sc, I40E_QTX_ENA(i), reg);
1951 /* XXX wait 50ms from completaion of the TX queue disable*/
1952 ixl_flush(sc);
1953 DELAY(50);
1954
1955 if (ixl_txr_disabled(sc, txr) != 0) {
1956 mutex_exit(&txr->txr_lock);
1957 goto die;
1958 }
1959 mutex_exit(&txr->txr_lock);
1960
1961 mutex_enter(&rxr->rxr_lock);
1962 reg = ixl_rd(sc, I40E_QRX_ENA(i));
1963 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
1964 ixl_wr(sc, I40E_QRX_ENA(i), reg);
1965 /* XXX wait 50ms from completion of the RX queue disable */
1966 ixl_flush(sc);
1967 DELAY(50);
1968
1969 if (ixl_rxr_disabled(sc, rxr) != 0) {
1970 mutex_exit(&rxr->rxr_lock);
1971 goto die;
1972 }
1973 mutex_exit(&rxr->rxr_lock);
1974 }
1975
1976 ixl_stop_rendezvous(sc);
1977
1978 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1979 txr = sc->sc_qps[i].qp_txr;
1980 rxr = sc->sc_qps[i].qp_rxr;
1981
1982 ixl_txr_unconfig(sc, txr);
1983 ixl_rxr_unconfig(sc, rxr);
1984
1985 ixl_txr_clean(sc, txr);
1986 ixl_rxr_clean(sc, rxr);
1987 }
1988
1989 return;
1990 die:
1991 sc->sc_dead = true;
1992 log(LOG_CRIT, "%s: failed to shut down rings",
1993 device_xname(sc->sc_dev));
1994 return;
1995 }
1996
1997 static void
1998 ixl_stop(struct ifnet *ifp, int disable)
1999 {
2000 struct ixl_softc *sc = ifp->if_softc;
2001
2002 mutex_enter(&sc->sc_cfg_lock);
2003 ixl_stop_locked(sc);
2004 mutex_exit(&sc->sc_cfg_lock);
2005 }
2006
2007 static int
2008 ixl_queue_pairs_alloc(struct ixl_softc *sc)
2009 {
2010 struct ixl_queue_pair *qp;
2011 unsigned int i;
2012 size_t sz;
2013
2014 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2015 sc->sc_qps = kmem_zalloc(sz, KM_SLEEP);
2016
2017 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2018 qp = &sc->sc_qps[i];
2019
2020 qp->qp_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2021 ixl_handle_queue, qp);
2022 if (qp->qp_si == NULL)
2023 goto free;
2024
2025 qp->qp_txr = ixl_txr_alloc(sc, i);
2026 if (qp->qp_txr == NULL)
2027 goto free;
2028
2029 qp->qp_rxr = ixl_rxr_alloc(sc, i);
2030 if (qp->qp_rxr == NULL)
2031 goto free;
2032
2033 qp->qp_sc = sc;
2034 ixl_work_set(&qp->qp_task, ixl_handle_queue, qp);
2035 snprintf(qp->qp_name, sizeof(qp->qp_name),
2036 "%s-TXRX%d", device_xname(sc->sc_dev), i);
2037 }
2038
2039 return 0;
2040 free:
2041 if (sc->sc_qps != NULL) {
2042 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2043 qp = &sc->sc_qps[i];
2044
2045 if (qp->qp_txr != NULL)
2046 ixl_txr_free(sc, qp->qp_txr);
2047 if (qp->qp_rxr != NULL)
2048 ixl_rxr_free(sc, qp->qp_rxr);
2049 if (qp->qp_si != NULL)
2050 softint_disestablish(qp->qp_si);
2051 }
2052
2053 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2054 kmem_free(sc->sc_qps, sz);
2055 sc->sc_qps = NULL;
2056 }
2057
2058 return -1;
2059 }
2060
2061 static void
2062 ixl_queue_pairs_free(struct ixl_softc *sc)
2063 {
2064 struct ixl_queue_pair *qp;
2065 unsigned int i;
2066 size_t sz;
2067
2068 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2069 qp = &sc->sc_qps[i];
2070 ixl_txr_free(sc, qp->qp_txr);
2071 ixl_rxr_free(sc, qp->qp_rxr);
2072 softint_disestablish(qp->qp_si);
2073 }
2074
2075 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2076 kmem_free(sc->sc_qps, sz);
2077 sc->sc_qps = NULL;
2078 }
2079
2080 static struct ixl_tx_ring *
2081 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
2082 {
2083 struct ixl_tx_ring *txr = NULL;
2084 struct ixl_tx_map *maps = NULL, *txm;
2085 unsigned int i;
2086
2087 txr = kmem_zalloc(sizeof(*txr), KM_SLEEP);
2088 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs,
2089 KM_SLEEP);
2090
2091 if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2092 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2093 IXL_TX_QUEUE_ALIGN) != 0)
2094 goto free;
2095
2096 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2097 txm = &maps[i];
2098
2099 if (bus_dmamap_create(sc->sc_dmat,
2100 IXL_HARDMTU, IXL_TX_PKT_DESCS, IXL_HARDMTU, 0,
2101 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &txm->txm_map) != 0)
2102 goto uncreate;
2103
2104 txm->txm_eop = -1;
2105 txm->txm_m = NULL;
2106 }
2107
2108 txr->txr_cons = txr->txr_prod = 0;
2109 txr->txr_maps = maps;
2110
2111 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2112 if (txr->txr_intrq == NULL)
2113 goto uncreate;
2114
2115 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2116 ixl_deferred_transmit, txr);
2117 if (txr->txr_si == NULL)
2118 goto destroy_pcq;
2119
2120 txr->txr_tail = I40E_QTX_TAIL(qid);
2121 txr->txr_qid = qid;
2122 txr->txr_sc = sc;
2123 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2124
2125 return txr;
2126
2127 destroy_pcq:
2128 pcq_destroy(txr->txr_intrq);
2129 uncreate:
2130 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2131 txm = &maps[i];
2132
2133 if (txm->txm_map == NULL)
2134 continue;
2135
2136 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2137 }
2138
2139 ixl_dmamem_free(sc, &txr->txr_mem);
2140 free:
2141 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2142 kmem_free(txr, sizeof(*txr));
2143
2144 return NULL;
2145 }
2146
2147 static void
2148 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2149 {
2150 unsigned int qid;
2151 bus_size_t reg;
2152 uint32_t r;
2153
2154 qid = txr->txr_qid + sc->sc_base_queue;
2155 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2156 qid %= 128;
2157
2158 r = ixl_rd(sc, reg);
2159 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2160 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2161 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2162 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2163 ixl_wr(sc, reg, r);
2164 }
2165
2166 static void
2167 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2168 {
2169 struct ixl_hmc_txq txq;
2170 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch);
2171 void *hmc;
2172
2173 memset(&txq, 0, sizeof(txq));
2174 txq.head = htole16(txr->txr_cons);
2175 txq.new_context = 1;
2176 txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2177 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2178 txq.qlen = htole16(sc->sc_tx_ring_ndescs);
2179 txq.tphrdesc_ena = 0;
2180 txq.tphrpacket_ena = 0;
2181 txq.tphwdesc_ena = 0;
2182 txq.rdylist = data->qs_handle[0];
2183
2184 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2185 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2186 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq,
2187 __arraycount(ixl_hmc_pack_txq));
2188 }
2189
2190 static void
2191 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2192 {
2193 void *hmc;
2194
2195 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2196 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2197 }
2198
2199 static void
2200 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2201 {
2202 struct ixl_tx_map *maps, *txm;
2203 bus_dmamap_t map;
2204 unsigned int i;
2205
2206 maps = txr->txr_maps;
2207 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2208 txm = &maps[i];
2209
2210 if (txm->txm_m == NULL)
2211 continue;
2212
2213 map = txm->txm_map;
2214 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2215 BUS_DMASYNC_POSTWRITE);
2216 bus_dmamap_unload(sc->sc_dmat, map);
2217
2218 m_freem(txm->txm_m);
2219 txm->txm_m = NULL;
2220 }
2221 }
2222
2223 static int
2224 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2225 {
2226 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2227 uint32_t reg;
2228 int i;
2229
2230 for (i = 0; i < 10; i++) {
2231 reg = ixl_rd(sc, ena);
2232 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2233 return 0;
2234
2235 delaymsec(10);
2236 }
2237
2238 return ETIMEDOUT;
2239 }
2240
2241 static int
2242 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2243 {
2244 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2245 uint32_t reg;
2246 int i;
2247
2248 KASSERT(mutex_owned(&txr->txr_lock));
2249
2250 for (i = 0; i < 20; i++) {
2251 reg = ixl_rd(sc, ena);
2252 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2253 return 0;
2254
2255 delaymsec(10);
2256 }
2257
2258 return ETIMEDOUT;
2259 }
2260
2261 static void
2262 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2263 {
2264 struct ixl_tx_map *maps, *txm;
2265 struct mbuf *m;
2266 unsigned int i;
2267
2268 softint_disestablish(txr->txr_si);
2269 while ((m = pcq_get(txr->txr_intrq)) != NULL)
2270 m_freem(m);
2271 pcq_destroy(txr->txr_intrq);
2272
2273 maps = txr->txr_maps;
2274 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2275 txm = &maps[i];
2276
2277 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2278 }
2279
2280 ixl_dmamem_free(sc, &txr->txr_mem);
2281 mutex_destroy(&txr->txr_lock);
2282 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2283 kmem_free(txr, sizeof(*txr));
2284 }
2285
2286 static inline int
2287 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0,
2288 struct ixl_tx_ring *txr)
2289 {
2290 struct mbuf *m;
2291 int error;
2292
2293 KASSERT(mutex_owned(&txr->txr_lock));
2294
2295 m = *m0;
2296
2297 error = bus_dmamap_load_mbuf(dmat, map, m,
2298 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2299 if (error != EFBIG)
2300 return error;
2301
2302 m = m_defrag(m, M_DONTWAIT);
2303 if (m != NULL) {
2304 *m0 = m;
2305 txr->txr_defragged.ev_count++;
2306
2307 error = bus_dmamap_load_mbuf(dmat, map, m,
2308 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2309 } else {
2310 txr->txr_defrag_failed.ev_count++;
2311 error = ENOBUFS;
2312 }
2313
2314 return error;
2315 }
2316
2317 static void
2318 ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr,
2319 bool is_transmit)
2320 {
2321 struct ixl_softc *sc = ifp->if_softc;
2322 struct ixl_tx_desc *ring, *txd;
2323 struct ixl_tx_map *txm;
2324 bus_dmamap_t map;
2325 struct mbuf *m;
2326 uint64_t cmd, cmd_vlan;
2327 unsigned int prod, free, last, i;
2328 unsigned int mask;
2329 int post = 0;
2330
2331 KASSERT(mutex_owned(&txr->txr_lock));
2332
2333 if (ifp->if_link_state != LINK_STATE_UP
2334 || !ISSET(ifp->if_flags, IFF_RUNNING)
2335 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) {
2336 if (!is_transmit)
2337 IFQ_PURGE(&ifp->if_snd);
2338 return;
2339 }
2340
2341 prod = txr->txr_prod;
2342 free = txr->txr_cons;
2343 if (free <= prod)
2344 free += sc->sc_tx_ring_ndescs;
2345 free -= prod;
2346
2347 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2348 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2349
2350 ring = IXL_DMA_KVA(&txr->txr_mem);
2351 mask = sc->sc_tx_ring_ndescs - 1;
2352 last = prod;
2353 cmd = 0;
2354 txd = NULL;
2355
2356 for (;;) {
2357 if (free <= IXL_TX_PKT_DESCS) {
2358 if (!is_transmit)
2359 SET(ifp->if_flags, IFF_OACTIVE);
2360 break;
2361 }
2362
2363 if (is_transmit)
2364 m = pcq_get(txr->txr_intrq);
2365 else
2366 IFQ_DEQUEUE(&ifp->if_snd, m);
2367
2368 if (m == NULL)
2369 break;
2370
2371 txm = &txr->txr_maps[prod];
2372 map = txm->txm_map;
2373
2374 if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
2375 txr->txr_oerrors++;
2376 m_freem(m);
2377 continue;
2378 }
2379
2380 if (vlan_has_tag(m)) {
2381 cmd_vlan = (uint64_t)vlan_get_tag(m) <<
2382 IXL_TX_DESC_L2TAG1_SHIFT;
2383 cmd_vlan |= IXL_TX_DESC_CMD_IL2TAG1;
2384 } else {
2385 cmd_vlan = 0;
2386 }
2387
2388 bus_dmamap_sync(sc->sc_dmat, map, 0,
2389 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2390
2391 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) {
2392 txd = &ring[prod];
2393
2394 cmd = (uint64_t)map->dm_segs[i].ds_len <<
2395 IXL_TX_DESC_BSIZE_SHIFT;
2396 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2397 cmd |= cmd_vlan;
2398
2399 txd->addr = htole64(map->dm_segs[i].ds_addr);
2400 txd->cmd = htole64(cmd);
2401
2402 last = prod;
2403
2404 prod++;
2405 prod &= mask;
2406 }
2407 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2408 txd->cmd = htole64(cmd);
2409
2410 txm->txm_m = m;
2411 txm->txm_eop = last;
2412
2413 bpf_mtap(ifp, m, BPF_D_OUT);
2414
2415 free -= i;
2416 post = 1;
2417 }
2418
2419 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2420 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2421
2422 if (post) {
2423 txr->txr_prod = prod;
2424 ixl_wr(sc, txr->txr_tail, prod);
2425 }
2426 }
2427
2428 static int
2429 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit)
2430 {
2431 struct ifnet *ifp = &sc->sc_ec.ec_if;
2432 struct ixl_tx_desc *ring, *txd;
2433 struct ixl_tx_map *txm;
2434 struct mbuf *m;
2435 bus_dmamap_t map;
2436 unsigned int cons, prod, last;
2437 unsigned int mask;
2438 uint64_t dtype;
2439 int done = 0, more = 0;
2440
2441 KASSERT(mutex_owned(&txr->txr_lock));
2442
2443 prod = txr->txr_prod;
2444 cons = txr->txr_cons;
2445
2446 if (cons == prod)
2447 return 0;
2448
2449 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2450 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2451
2452 ring = IXL_DMA_KVA(&txr->txr_mem);
2453 mask = sc->sc_tx_ring_ndescs - 1;
2454
2455 do {
2456 if (txlimit-- <= 0) {
2457 more = 1;
2458 break;
2459 }
2460
2461 txm = &txr->txr_maps[cons];
2462 last = txm->txm_eop;
2463 txd = &ring[last];
2464
2465 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2466 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2467 break;
2468
2469 map = txm->txm_map;
2470
2471 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2472 BUS_DMASYNC_POSTWRITE);
2473 bus_dmamap_unload(sc->sc_dmat, map);
2474
2475 m = txm->txm_m;
2476 if (m != NULL) {
2477 txr->txr_opackets++;
2478 txr->txr_obytes += m->m_pkthdr.len;
2479 if (ISSET(m->m_flags, M_MCAST))
2480 txr->txr_omcasts++;
2481 m_freem(m);
2482 }
2483
2484 txm->txm_m = NULL;
2485 txm->txm_eop = -1;
2486
2487 cons = last + 1;
2488 cons &= mask;
2489 done = 1;
2490 } while (cons != prod);
2491
2492 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2493 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2494
2495 txr->txr_cons = cons;
2496
2497 if (done) {
2498 softint_schedule(txr->txr_si);
2499 if (txr->txr_qid == 0) {
2500 CLR(ifp->if_flags, IFF_OACTIVE);
2501 if_schedule_deferred_start(ifp);
2502 }
2503 }
2504
2505 return more;
2506 }
2507
2508 static void
2509 ixl_start(struct ifnet *ifp)
2510 {
2511 struct ixl_softc *sc;
2512 struct ixl_tx_ring *txr;
2513
2514 sc = ifp->if_softc;
2515 txr = sc->sc_qps[0].qp_txr;
2516
2517 mutex_enter(&txr->txr_lock);
2518 ixl_tx_common_locked(ifp, txr, false);
2519 mutex_exit(&txr->txr_lock);
2520 }
2521
2522 static inline unsigned int
2523 ixl_select_txqueue(struct ixl_softc *sc, struct mbuf *m)
2524 {
2525 u_int cpuid;
2526
2527 cpuid = cpu_index(curcpu());
2528
2529 return (unsigned int)(cpuid % sc->sc_nqueue_pairs);
2530 }
2531
2532 static int
2533 ixl_transmit(struct ifnet *ifp, struct mbuf *m)
2534 {
2535 struct ixl_softc *sc;
2536 struct ixl_tx_ring *txr;
2537 unsigned int qid;
2538
2539 sc = ifp->if_softc;
2540 qid = ixl_select_txqueue(sc, m);
2541
2542 txr = sc->sc_qps[qid].qp_txr;
2543
2544 if (__predict_false(!pcq_put(txr->txr_intrq, m))) {
2545 mutex_enter(&txr->txr_lock);
2546 txr->txr_pcqdrop.ev_count++;
2547 mutex_exit(&txr->txr_lock);
2548
2549 m_freem(m);
2550 return ENOBUFS;
2551 }
2552
2553 if (mutex_tryenter(&txr->txr_lock)) {
2554 ixl_tx_common_locked(ifp, txr, true);
2555 mutex_exit(&txr->txr_lock);
2556 } else {
2557 softint_schedule(txr->txr_si);
2558 }
2559
2560 return 0;
2561 }
2562
2563 static void
2564 ixl_deferred_transmit(void *xtxr)
2565 {
2566 struct ixl_tx_ring *txr = xtxr;
2567 struct ixl_softc *sc = txr->txr_sc;
2568 struct ifnet *ifp = &sc->sc_ec.ec_if;
2569
2570 mutex_enter(&txr->txr_lock);
2571 txr->txr_transmitdef.ev_count++;
2572 if (pcq_peek(txr->txr_intrq) != NULL)
2573 ixl_tx_common_locked(ifp, txr, true);
2574 mutex_exit(&txr->txr_lock);
2575 }
2576
2577 static struct ixl_rx_ring *
2578 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
2579 {
2580 struct ixl_rx_ring *rxr = NULL;
2581 struct ixl_rx_map *maps = NULL, *rxm;
2582 unsigned int i;
2583
2584 rxr = kmem_zalloc(sizeof(*rxr), KM_SLEEP);
2585 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs,
2586 KM_SLEEP);
2587
2588 if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
2589 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
2590 IXL_RX_QUEUE_ALIGN) != 0)
2591 goto free;
2592
2593 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2594 rxm = &maps[i];
2595
2596 if (bus_dmamap_create(sc->sc_dmat,
2597 IXL_HARDMTU, 1, IXL_HARDMTU, 0,
2598 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &rxm->rxm_map) != 0)
2599 goto uncreate;
2600
2601 rxm->rxm_m = NULL;
2602 }
2603
2604 rxr->rxr_cons = rxr->rxr_prod = 0;
2605 rxr->rxr_m_head = NULL;
2606 rxr->rxr_m_tail = &rxr->rxr_m_head;
2607 rxr->rxr_maps = maps;
2608
2609 rxr->rxr_tail = I40E_QRX_TAIL(qid);
2610 rxr->rxr_qid = qid;
2611 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
2612
2613 return rxr;
2614
2615 uncreate:
2616 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2617 rxm = &maps[i];
2618
2619 if (rxm->rxm_map == NULL)
2620 continue;
2621
2622 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
2623 }
2624
2625 ixl_dmamem_free(sc, &rxr->rxr_mem);
2626 free:
2627 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2628 kmem_free(rxr, sizeof(*rxr));
2629
2630 return NULL;
2631 }
2632
2633 static void
2634 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2635 {
2636 struct ixl_rx_map *maps, *rxm;
2637 bus_dmamap_t map;
2638 unsigned int i;
2639
2640 maps = rxr->rxr_maps;
2641 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2642 rxm = &maps[i];
2643
2644 if (rxm->rxm_m == NULL)
2645 continue;
2646
2647 map = rxm->rxm_map;
2648 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2649 BUS_DMASYNC_POSTWRITE);
2650 bus_dmamap_unload(sc->sc_dmat, map);
2651
2652 m_freem(rxm->rxm_m);
2653 rxm->rxm_m = NULL;
2654 }
2655
2656 m_freem(rxr->rxr_m_head);
2657 rxr->rxr_m_head = NULL;
2658 rxr->rxr_m_tail = &rxr->rxr_m_head;
2659
2660 rxr->rxr_prod = rxr->rxr_cons = 0;
2661 }
2662
2663 static int
2664 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2665 {
2666 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
2667 uint32_t reg;
2668 int i;
2669
2670 for (i = 0; i < 10; i++) {
2671 reg = ixl_rd(sc, ena);
2672 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
2673 return 0;
2674
2675 delaymsec(10);
2676 }
2677
2678 return ETIMEDOUT;
2679 }
2680
2681 static int
2682 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2683 {
2684 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
2685 uint32_t reg;
2686 int i;
2687
2688 KASSERT(mutex_owned(&rxr->rxr_lock));
2689
2690 for (i = 0; i < 20; i++) {
2691 reg = ixl_rd(sc, ena);
2692 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
2693 return 0;
2694
2695 delaymsec(10);
2696 }
2697
2698 return ETIMEDOUT;
2699 }
2700
2701 static void
2702 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2703 {
2704 struct ixl_hmc_rxq rxq;
2705 void *hmc;
2706
2707 memset(&rxq, 0, sizeof(rxq));
2708
2709 rxq.head = htole16(rxr->rxr_cons);
2710 rxq.base = htole64(IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
2711 rxq.qlen = htole16(sc->sc_rx_ring_ndescs);
2712 rxq.dbuff = htole16(MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
2713 rxq.hbuff = 0;
2714 rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
2715 rxq.dsize = IXL_HMC_RXQ_DSIZE_32;
2716 rxq.crcstrip = 1;
2717 rxq.l2sel = 1;
2718 rxq.showiv = 1;
2719 rxq.rxmax = htole16(IXL_HARDMTU);
2720 rxq.tphrdesc_ena = 0;
2721 rxq.tphwdesc_ena = 0;
2722 rxq.tphdata_ena = 0;
2723 rxq.tphhead_ena = 0;
2724 rxq.lrxqthresh = 0;
2725 rxq.prefena = 1;
2726
2727 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
2728 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
2729 ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq,
2730 __arraycount(ixl_hmc_pack_rxq));
2731 }
2732
2733 static void
2734 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2735 {
2736 void *hmc;
2737
2738 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
2739 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
2740 }
2741
2742 static void
2743 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2744 {
2745 struct ixl_rx_map *maps, *rxm;
2746 unsigned int i;
2747
2748 maps = rxr->rxr_maps;
2749 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2750 rxm = &maps[i];
2751
2752 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
2753 }
2754
2755 ixl_dmamem_free(sc, &rxr->rxr_mem);
2756 mutex_destroy(&rxr->rxr_lock);
2757 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2758 kmem_free(rxr, sizeof(*rxr));
2759 }
2760
2761 static int
2762 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr, u_int rxlimit)
2763 {
2764 struct ifnet *ifp = &sc->sc_ec.ec_if;
2765 struct ixl_rx_wb_desc_32 *ring, *rxd;
2766 struct ixl_rx_map *rxm;
2767 bus_dmamap_t map;
2768 unsigned int cons, prod;
2769 struct mbuf *m;
2770 uint64_t word, word0;
2771 unsigned int len;
2772 unsigned int mask;
2773 int done = 0, more = 0;
2774
2775 KASSERT(mutex_owned(&rxr->rxr_lock));
2776
2777 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2778 return 0;
2779
2780 prod = rxr->rxr_prod;
2781 cons = rxr->rxr_cons;
2782
2783 if (cons == prod)
2784 return 0;
2785
2786 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2787 0, IXL_DMA_LEN(&rxr->rxr_mem),
2788 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2789
2790 ring = IXL_DMA_KVA(&rxr->rxr_mem);
2791 mask = sc->sc_rx_ring_ndescs - 1;
2792
2793 do {
2794 if (rxlimit-- <= 0) {
2795 more = 1;
2796 break;
2797 }
2798
2799 rxd = &ring[cons];
2800
2801 word = le64toh(rxd->qword1);
2802
2803 if (!ISSET(word, IXL_RX_DESC_DD))
2804 break;
2805
2806 rxm = &rxr->rxr_maps[cons];
2807
2808 map = rxm->rxm_map;
2809 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2810 BUS_DMASYNC_POSTREAD);
2811 bus_dmamap_unload(sc->sc_dmat, map);
2812
2813 m = rxm->rxm_m;
2814 rxm->rxm_m = NULL;
2815
2816 KASSERT(m != NULL);
2817
2818 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
2819 m->m_len = len;
2820 m->m_pkthdr.len = 0;
2821
2822 m->m_next = NULL;
2823 *rxr->rxr_m_tail = m;
2824 rxr->rxr_m_tail = &m->m_next;
2825
2826 m = rxr->rxr_m_head;
2827 m->m_pkthdr.len += len;
2828
2829 if (ISSET(word, IXL_RX_DESC_EOP)) {
2830 word0 = le64toh(rxd->qword0);
2831
2832 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
2833 vlan_set_tag(m,
2834 __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK));
2835 }
2836
2837 if (!ISSET(word,
2838 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
2839 m_set_rcvif(m, ifp);
2840 rxr->rxr_ipackets++;
2841 rxr->rxr_ibytes += m->m_pkthdr.len;
2842 if_percpuq_enqueue(ifp->if_percpuq, m);
2843 } else {
2844 rxr->rxr_ierrors++;
2845 m_freem(m);
2846 }
2847
2848 rxr->rxr_m_head = NULL;
2849 rxr->rxr_m_tail = &rxr->rxr_m_head;
2850 }
2851
2852 cons++;
2853 cons &= mask;
2854
2855 done = 1;
2856 } while (cons != prod);
2857
2858 if (done) {
2859 rxr->rxr_cons = cons;
2860 if (ixl_rxfill(sc, rxr) == -1)
2861 rxr->rxr_iqdrops++;
2862 }
2863
2864 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2865 0, IXL_DMA_LEN(&rxr->rxr_mem),
2866 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2867
2868 return more;
2869 }
2870
2871 static int
2872 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2873 {
2874 struct ixl_rx_rd_desc_32 *ring, *rxd;
2875 struct ixl_rx_map *rxm;
2876 bus_dmamap_t map;
2877 struct mbuf *m;
2878 unsigned int prod;
2879 unsigned int slots;
2880 unsigned int mask;
2881 int post = 0, error = 0;
2882
2883 KASSERT(mutex_owned(&rxr->rxr_lock));
2884
2885 prod = rxr->rxr_prod;
2886 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons,
2887 sc->sc_rx_ring_ndescs);
2888
2889 ring = IXL_DMA_KVA(&rxr->rxr_mem);
2890 mask = sc->sc_rx_ring_ndescs - 1;
2891
2892 if (__predict_false(slots <= 0))
2893 return -1;
2894
2895 do {
2896 rxm = &rxr->rxr_maps[prod];
2897
2898 MGETHDR(m, M_DONTWAIT, MT_DATA);
2899 if (m == NULL) {
2900 rxr->rxr_mgethdr_failed.ev_count++;
2901 error = -1;
2902 break;
2903 }
2904
2905 MCLGET(m, M_DONTWAIT);
2906 if (!ISSET(m->m_flags, M_EXT)) {
2907 rxr->rxr_mgetcl_failed.ev_count++;
2908 error = -1;
2909 m_freem(m);
2910 break;
2911 }
2912
2913 m->m_len = m->m_pkthdr.len = MCLBYTES + ETHER_ALIGN;
2914 m_adj(m, ETHER_ALIGN);
2915
2916 map = rxm->rxm_map;
2917
2918 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
2919 BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
2920 rxr->rxr_mbuf_load_failed.ev_count++;
2921 error = -1;
2922 m_freem(m);
2923 break;
2924 }
2925
2926 rxm->rxm_m = m;
2927
2928 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2929 BUS_DMASYNC_PREREAD);
2930
2931 rxd = &ring[prod];
2932
2933 rxd->paddr = htole64(map->dm_segs[0].ds_addr);
2934 rxd->haddr = htole64(0);
2935
2936 prod++;
2937 prod &= mask;
2938
2939 post = 1;
2940
2941 } while (--slots);
2942
2943 if (post) {
2944 rxr->rxr_prod = prod;
2945 ixl_wr(sc, rxr->rxr_tail, prod);
2946 }
2947
2948 return error;
2949 }
2950
2951 static inline int
2952 ixl_handle_queue_common(struct ixl_softc *sc, struct ixl_queue_pair *qp,
2953 u_int txlimit, struct evcnt *txevcnt,
2954 u_int rxlimit, struct evcnt *rxevcnt)
2955 {
2956 struct ixl_tx_ring *txr = qp->qp_txr;
2957 struct ixl_rx_ring *rxr = qp->qp_rxr;
2958 int txmore, rxmore;
2959 int rv;
2960
2961 KASSERT(!mutex_owned(&txr->txr_lock));
2962 KASSERT(!mutex_owned(&rxr->rxr_lock));
2963
2964 mutex_enter(&txr->txr_lock);
2965 txevcnt->ev_count++;
2966 txmore = ixl_txeof(sc, txr, txlimit);
2967 mutex_exit(&txr->txr_lock);
2968
2969 mutex_enter(&rxr->rxr_lock);
2970 rxevcnt->ev_count++;
2971 rxmore = ixl_rxeof(sc, rxr, rxlimit);
2972 mutex_exit(&rxr->rxr_lock);
2973
2974 rv = txmore | (rxmore << 1);
2975
2976 return rv;
2977 }
2978
2979 static void
2980 ixl_sched_handle_queue(struct ixl_softc *sc, struct ixl_queue_pair *qp)
2981 {
2982
2983 if (qp->qp_workqueue)
2984 ixl_work_add(sc->sc_workq_txrx, &qp->qp_task);
2985 else
2986 softint_schedule(qp->qp_si);
2987 }
2988
2989 static int
2990 ixl_intr(void *xsc)
2991 {
2992 struct ixl_softc *sc = xsc;
2993 struct ixl_tx_ring *txr;
2994 struct ixl_rx_ring *rxr;
2995 uint32_t icr, rxintr, txintr;
2996 int rv = 0;
2997 unsigned int i;
2998
2999 KASSERT(sc != NULL);
3000
3001 ixl_enable_other_intr(sc);
3002 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3003
3004 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3005 atomic_inc_64(&sc->sc_event_atq.ev_count);
3006 ixl_atq_done(sc);
3007 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3008 rv = 1;
3009 }
3010
3011 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3012 atomic_inc_64(&sc->sc_event_link.ev_count);
3013 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3014 rv = 1;
3015 }
3016
3017 rxintr = icr & I40E_INTR_NOTX_RX_MASK;
3018 txintr = icr & I40E_INTR_NOTX_TX_MASK;
3019
3020 if (txintr || rxintr) {
3021 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3022 txr = sc->sc_qps[i].qp_txr;
3023 rxr = sc->sc_qps[i].qp_rxr;
3024
3025 ixl_handle_queue_common(sc, &sc->sc_qps[i],
3026 IXL_TXRX_PROCESS_UNLIMIT, &txr->txr_intr,
3027 IXL_TXRX_PROCESS_UNLIMIT, &rxr->rxr_intr);
3028 }
3029 rv = 1;
3030 }
3031
3032 return rv;
3033 }
3034
3035 static int
3036 ixl_queue_intr(void *xqp)
3037 {
3038 struct ixl_queue_pair *qp = xqp;
3039 struct ixl_tx_ring *txr = qp->qp_txr;
3040 struct ixl_rx_ring *rxr = qp->qp_rxr;
3041 struct ixl_softc *sc = qp->qp_sc;
3042 u_int txlimit, rxlimit;
3043 int more;
3044
3045 txlimit = sc->sc_tx_intr_process_limit;
3046 rxlimit = sc->sc_rx_intr_process_limit;
3047 qp->qp_workqueue = sc->sc_txrx_workqueue;
3048
3049 more = ixl_handle_queue_common(sc, qp,
3050 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr);
3051
3052 if (more != 0) {
3053 ixl_sched_handle_queue(sc, qp);
3054 } else {
3055 /* for ALTQ */
3056 if (txr->txr_qid == 0)
3057 if_schedule_deferred_start(&sc->sc_ec.ec_if);
3058 softint_schedule(txr->txr_si);
3059
3060 ixl_enable_queue_intr(sc, qp);
3061 }
3062
3063 return 1;
3064 }
3065
3066 static void
3067 ixl_handle_queue(void *xqp)
3068 {
3069 struct ixl_queue_pair *qp = xqp;
3070 struct ixl_softc *sc = qp->qp_sc;
3071 struct ixl_tx_ring *txr = qp->qp_txr;
3072 struct ixl_rx_ring *rxr = qp->qp_rxr;
3073 u_int txlimit, rxlimit;
3074 int more;
3075
3076 txlimit = sc->sc_tx_process_limit;
3077 rxlimit = sc->sc_rx_process_limit;
3078
3079 more = ixl_handle_queue_common(sc, qp,
3080 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer);
3081
3082 if (more != 0)
3083 ixl_sched_handle_queue(sc, qp);
3084 else
3085 ixl_enable_queue_intr(sc, qp);
3086 }
3087
3088 static inline void
3089 ixl_print_hmc_error(struct ixl_softc *sc, uint32_t reg)
3090 {
3091 uint32_t hmc_idx, hmc_isvf;
3092 uint32_t hmc_errtype, hmc_objtype, hmc_data;
3093
3094 hmc_idx = reg & I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK;
3095 hmc_idx = hmc_idx >> I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT;
3096 hmc_isvf = reg & I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK;
3097 hmc_isvf = hmc_isvf >> I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT;
3098 hmc_errtype = reg & I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK;
3099 hmc_errtype = hmc_errtype >> I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT;
3100 hmc_objtype = reg & I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK;
3101 hmc_objtype = hmc_objtype >> I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT;
3102 hmc_data = ixl_rd(sc, I40E_PFHMC_ERRORDATA);
3103
3104 device_printf(sc->sc_dev,
3105 "HMC Error (idx=0x%x, isvf=0x%x, err=0x%x, obj=0x%x, data=0x%x)\n",
3106 hmc_idx, hmc_isvf, hmc_errtype, hmc_objtype, hmc_data);
3107 }
3108
3109 static int
3110 ixl_other_intr(void *xsc)
3111 {
3112 struct ixl_softc *sc = xsc;
3113 uint32_t icr, mask, reg;
3114 int rv;
3115
3116 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3117 mask = ixl_rd(sc, I40E_PFINT_ICR0_ENA);
3118
3119 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3120 atomic_inc_64(&sc->sc_event_atq.ev_count);
3121 ixl_atq_done(sc);
3122 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3123 rv = 1;
3124 }
3125
3126 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3127 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3128 device_printf(sc->sc_dev, "link stat changed\n");
3129
3130 atomic_inc_64(&sc->sc_event_link.ev_count);
3131 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3132 rv = 1;
3133 }
3134
3135 if (ISSET(icr, I40E_PFINT_ICR0_GRST_MASK)) {
3136 CLR(mask, I40E_PFINT_ICR0_ENA_GRST_MASK);
3137 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
3138 reg = reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK;
3139 reg = reg >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3140
3141 device_printf(sc->sc_dev, "GRST: %s\n",
3142 reg == I40E_RESET_CORER ? "CORER" :
3143 reg == I40E_RESET_GLOBR ? "GLOBR" :
3144 reg == I40E_RESET_EMPR ? "EMPR" :
3145 "POR");
3146 }
3147
3148 if (ISSET(icr, I40E_PFINT_ICR0_ECC_ERR_MASK))
3149 atomic_inc_64(&sc->sc_event_ecc_err.ev_count);
3150 if (ISSET(icr, I40E_PFINT_ICR0_PCI_EXCEPTION_MASK))
3151 atomic_inc_64(&sc->sc_event_pci_exception.ev_count);
3152 if (ISSET(icr, I40E_PFINT_ICR0_PE_CRITERR_MASK))
3153 atomic_inc_64(&sc->sc_event_crit_err.ev_count);
3154
3155 if (ISSET(icr, IXL_ICR0_CRIT_ERR_MASK)) {
3156 CLR(mask, IXL_ICR0_CRIT_ERR_MASK);
3157 device_printf(sc->sc_dev, "critical error\n");
3158 }
3159
3160 if (ISSET(icr, I40E_PFINT_ICR0_HMC_ERR_MASK)) {
3161 reg = ixl_rd(sc, I40E_PFHMC_ERRORINFO);
3162 if (ISSET(reg, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK))
3163 ixl_print_hmc_error(sc, reg);
3164 ixl_wr(sc, I40E_PFHMC_ERRORINFO, 0);
3165 }
3166
3167 ixl_wr(sc, I40E_PFINT_ICR0_ENA, mask);
3168 ixl_flush(sc);
3169 ixl_enable_other_intr(sc);
3170 return rv;
3171 }
3172
3173 static void
3174 ixl_get_link_status_done(struct ixl_softc *sc,
3175 const struct ixl_aq_desc *iaq)
3176 {
3177
3178 ixl_link_state_update(sc, iaq);
3179 }
3180
3181 static void
3182 ixl_get_link_status(void *xsc)
3183 {
3184 struct ixl_softc *sc = xsc;
3185 struct ixl_aq_desc *iaq;
3186 struct ixl_aq_link_param *param;
3187
3188 memset(&sc->sc_link_state_atq, 0, sizeof(sc->sc_link_state_atq));
3189 iaq = &sc->sc_link_state_atq.iatq_desc;
3190 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3191 param = (struct ixl_aq_link_param *)iaq->iaq_param;
3192 param->notify = IXL_AQ_LINK_NOTIFY;
3193
3194 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done);
3195 (void)ixl_atq_post(sc, &sc->sc_link_state_atq);
3196 }
3197
3198 static void
3199 ixl_link_state_update(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3200 {
3201 struct ifnet *ifp = &sc->sc_ec.ec_if;
3202 int link_state;
3203
3204 link_state = ixl_set_link_status(sc, iaq);
3205
3206 if (ifp->if_link_state != link_state)
3207 if_link_state_change(ifp, link_state);
3208
3209 if (link_state != LINK_STATE_DOWN) {
3210 if_schedule_deferred_start(ifp);
3211 }
3212 }
3213
3214 static void
3215 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq,
3216 const char *msg)
3217 {
3218 char buf[512];
3219 size_t len;
3220
3221 len = sizeof(buf);
3222 buf[--len] = '\0';
3223
3224 device_printf(sc->sc_dev, "%s\n", msg);
3225 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags));
3226 device_printf(sc->sc_dev, "flags %s opcode %04x\n",
3227 buf, le16toh(iaq->iaq_opcode));
3228 device_printf(sc->sc_dev, "datalen %u retval %u\n",
3229 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval));
3230 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie);
3231 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n",
3232 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]),
3233 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3]));
3234 }
3235
3236 static void
3237 ixl_arq(void *xsc)
3238 {
3239 struct ixl_softc *sc = xsc;
3240 struct ixl_aq_desc *arq, *iaq;
3241 struct ixl_aq_buf *aqb;
3242 unsigned int cons = sc->sc_arq_cons;
3243 unsigned int prod;
3244 int done = 0;
3245
3246 prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
3247 sc->sc_aq_regs->arq_head_mask;
3248
3249 if (cons == prod)
3250 goto done;
3251
3252 arq = IXL_DMA_KVA(&sc->sc_arq);
3253
3254 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3255 0, IXL_DMA_LEN(&sc->sc_arq),
3256 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3257
3258 do {
3259 iaq = &arq[cons];
3260 aqb = sc->sc_arq_live[cons];
3261
3262 KASSERT(aqb != NULL);
3263
3264 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3265 BUS_DMASYNC_POSTREAD);
3266
3267 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3268 ixl_aq_dump(sc, iaq, "arq event");
3269
3270 switch (iaq->iaq_opcode) {
3271 case htole16(IXL_AQ_OP_PHY_LINK_STATUS):
3272 ixl_link_state_update(sc, iaq);
3273 break;
3274 }
3275
3276 memset(iaq, 0, sizeof(*iaq));
3277 sc->sc_arq_live[cons] = NULL;
3278 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
3279
3280 cons++;
3281 cons &= IXL_AQ_MASK;
3282
3283 done = 1;
3284 } while (cons != prod);
3285
3286 if (done) {
3287 sc->sc_arq_cons = cons;
3288 ixl_arq_fill(sc);
3289 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3290 0, IXL_DMA_LEN(&sc->sc_arq),
3291 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3292 }
3293
3294 done:
3295 ixl_enable_other_intr(sc);
3296 }
3297
3298 static void
3299 ixl_atq_set(struct ixl_atq *iatq,
3300 void (*fn)(struct ixl_softc *, const struct ixl_aq_desc *))
3301 {
3302
3303 iatq->iatq_fn = fn;
3304 }
3305
3306 static int
3307 ixl_atq_post_locked(struct ixl_softc *sc, struct ixl_atq *iatq)
3308 {
3309 struct ixl_aq_desc *atq, *slot;
3310 unsigned int prod, cons, prod_next;
3311
3312 /* assert locked */
3313 KASSERT(mutex_owned(&sc->sc_atq_lock));
3314
3315 atq = IXL_DMA_KVA(&sc->sc_atq);
3316 prod = sc->sc_atq_prod;
3317 cons = sc->sc_atq_cons;
3318 prod_next = (prod +1) & IXL_AQ_MASK;
3319
3320 if (cons == prod_next)
3321 return ENOMEM;
3322
3323 slot = &atq[prod];
3324
3325 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3326 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3327
3328 *slot = iatq->iatq_desc;
3329 slot->iaq_cookie = (uint64_t)((intptr_t)iatq);
3330
3331 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3332 ixl_aq_dump(sc, slot, "atq command");
3333
3334 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3335 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3336
3337 sc->sc_atq_prod = prod_next;
3338 ixl_wr(sc, sc->sc_aq_regs->atq_tail, sc->sc_atq_prod);
3339
3340 return 0;
3341 }
3342
3343 static int
3344 ixl_atq_post(struct ixl_softc *sc, struct ixl_atq *iatq)
3345 {
3346 int rv;
3347
3348 mutex_enter(&sc->sc_atq_lock);
3349 rv = ixl_atq_post_locked(sc, iatq);
3350 mutex_exit(&sc->sc_atq_lock);
3351
3352 return rv;
3353 }
3354
3355 static void
3356 ixl_atq_done_locked(struct ixl_softc *sc)
3357 {
3358 struct ixl_aq_desc *atq, *slot;
3359 struct ixl_atq *iatq;
3360 unsigned int cons;
3361 unsigned int prod;
3362
3363 KASSERT(mutex_owned(&sc->sc_atq_lock));
3364
3365 prod = sc->sc_atq_prod;
3366 cons = sc->sc_atq_cons;
3367
3368 if (prod == cons)
3369 return;
3370
3371 atq = IXL_DMA_KVA(&sc->sc_atq);
3372
3373 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3374 0, IXL_DMA_LEN(&sc->sc_atq),
3375 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3376
3377 do {
3378 slot = &atq[cons];
3379 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
3380 break;
3381
3382 iatq = (struct ixl_atq *)((intptr_t)slot->iaq_cookie);
3383 iatq->iatq_desc = *slot;
3384
3385 memset(slot, 0, sizeof(*slot));
3386
3387 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3388 ixl_aq_dump(sc, &iatq->iatq_desc, "atq response");
3389
3390 (*iatq->iatq_fn)(sc, &iatq->iatq_desc);
3391
3392 cons++;
3393 cons &= IXL_AQ_MASK;
3394 } while (cons != prod);
3395
3396 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3397 0, IXL_DMA_LEN(&sc->sc_atq),
3398 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3399
3400 sc->sc_atq_cons = cons;
3401 }
3402
3403 static void
3404 ixl_atq_done(struct ixl_softc *sc)
3405 {
3406
3407 mutex_enter(&sc->sc_atq_lock);
3408 ixl_atq_done_locked(sc);
3409 mutex_exit(&sc->sc_atq_lock);
3410 }
3411
3412 static void
3413 ixl_wakeup(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3414 {
3415
3416 KASSERT(mutex_owned(&sc->sc_atq_lock));
3417
3418 cv_signal(&sc->sc_atq_cv);
3419 }
3420
3421 static int
3422 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq)
3423 {
3424 int error;
3425
3426 KASSERT(iatq->iatq_desc.iaq_cookie == 0);
3427
3428 ixl_atq_set(iatq, ixl_wakeup);
3429
3430 mutex_enter(&sc->sc_atq_lock);
3431 error = ixl_atq_post_locked(sc, iatq);
3432 if (error) {
3433 mutex_exit(&sc->sc_atq_lock);
3434 return error;
3435 }
3436
3437 error = cv_timedwait(&sc->sc_atq_cv, &sc->sc_atq_lock,
3438 IXL_ATQ_EXEC_TIMEOUT);
3439 mutex_exit(&sc->sc_atq_lock);
3440
3441 return error;
3442 }
3443
3444 static int
3445 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
3446 {
3447 struct ixl_aq_desc *atq, *slot;
3448 unsigned int prod;
3449 unsigned int t = 0;
3450
3451 mutex_enter(&sc->sc_atq_lock);
3452
3453 atq = IXL_DMA_KVA(&sc->sc_atq);
3454 prod = sc->sc_atq_prod;
3455 slot = atq + prod;
3456
3457 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3458 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3459
3460 *slot = *iaq;
3461 slot->iaq_flags |= htole16(IXL_AQ_SI);
3462
3463 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3464 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3465
3466 prod++;
3467 prod &= IXL_AQ_MASK;
3468 sc->sc_atq_prod = prod;
3469 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3470
3471 while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
3472 delaymsec(1);
3473
3474 if (t++ > tm) {
3475 mutex_exit(&sc->sc_atq_lock);
3476 return ETIMEDOUT;
3477 }
3478 }
3479
3480 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3481 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
3482 *iaq = *slot;
3483 memset(slot, 0, sizeof(*slot));
3484 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3485 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
3486
3487 sc->sc_atq_cons = prod;
3488
3489 mutex_exit(&sc->sc_atq_lock);
3490
3491 return 0;
3492 }
3493
3494 static int
3495 ixl_get_version(struct ixl_softc *sc)
3496 {
3497 struct ixl_aq_desc iaq;
3498 uint32_t fwbuild, fwver, apiver;
3499 uint16_t api_maj_ver, api_min_ver;
3500
3501 memset(&iaq, 0, sizeof(iaq));
3502 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
3503
3504 iaq.iaq_retval = le16toh(23);
3505
3506 if (ixl_atq_poll(sc, &iaq, 2000) != 0)
3507 return ETIMEDOUT;
3508 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
3509 return EIO;
3510
3511 fwbuild = le32toh(iaq.iaq_param[1]);
3512 fwver = le32toh(iaq.iaq_param[2]);
3513 apiver = le32toh(iaq.iaq_param[3]);
3514
3515 api_maj_ver = (uint16_t)apiver;
3516 api_min_ver = (uint16_t)(apiver >> 16);
3517
3518 aprint_normal(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
3519 (uint16_t)(fwver >> 16), fwbuild, api_maj_ver, api_min_ver);
3520
3521 sc->sc_rxctl_atq = true;
3522 if (sc->sc_mac_type == I40E_MAC_X722) {
3523 if (api_maj_ver == 1 && api_min_ver < 5) {
3524 sc->sc_rxctl_atq = false;
3525 }
3526 }
3527
3528 return 0;
3529 }
3530
3531 static int
3532 ixl_pxe_clear(struct ixl_softc *sc)
3533 {
3534 struct ixl_aq_desc iaq;
3535 int rv;
3536
3537 memset(&iaq, 0, sizeof(iaq));
3538 iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
3539 iaq.iaq_param[0] = htole32(0x2);
3540
3541 rv = ixl_atq_poll(sc, &iaq, 250);
3542
3543 ixl_wr(sc, I40E_GLLAN_RCTL_0, 0x1);
3544
3545 if (rv != 0)
3546 return ETIMEDOUT;
3547
3548 switch (iaq.iaq_retval) {
3549 case htole16(IXL_AQ_RC_OK):
3550 case htole16(IXL_AQ_RC_EEXIST):
3551 break;
3552 default:
3553 return EIO;
3554 }
3555
3556 return 0;
3557 }
3558
3559 static int
3560 ixl_lldp_shut(struct ixl_softc *sc)
3561 {
3562 struct ixl_aq_desc iaq;
3563
3564 memset(&iaq, 0, sizeof(iaq));
3565 iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
3566 iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
3567
3568 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3569 aprint_error_dev(sc->sc_dev, "STOP LLDP AGENT timeout\n");
3570 return -1;
3571 }
3572
3573 switch (iaq.iaq_retval) {
3574 case htole16(IXL_AQ_RC_EMODE):
3575 case htole16(IXL_AQ_RC_EPERM):
3576 /* ignore silently */
3577 default:
3578 break;
3579 }
3580
3581 return 0;
3582 }
3583
3584 static void
3585 ixl_parse_hw_capability(struct ixl_softc *sc, struct ixl_aq_capability *cap)
3586 {
3587 uint16_t id;
3588 uint32_t number, logical_id;
3589
3590 id = le16toh(cap->cap_id);
3591 number = le32toh(cap->number);
3592 logical_id = le32toh(cap->logical_id);
3593
3594 switch (id) {
3595 case IXL_AQ_CAP_RSS:
3596 sc->sc_rss_table_size = number;
3597 sc->sc_rss_table_entry_width = logical_id;
3598 break;
3599 case IXL_AQ_CAP_RXQ:
3600 case IXL_AQ_CAP_TXQ:
3601 sc->sc_nqueue_pairs_device = MIN(number,
3602 sc->sc_nqueue_pairs_device);
3603 break;
3604 }
3605 }
3606
3607 static int
3608 ixl_get_hw_capabilities(struct ixl_softc *sc)
3609 {
3610 struct ixl_dmamem idm;
3611 struct ixl_aq_desc iaq;
3612 struct ixl_aq_capability *caps;
3613 size_t i, ncaps;
3614 bus_size_t caps_size;
3615 uint16_t status;
3616 int rv;
3617
3618 caps_size = sizeof(caps[0]) * 40;
3619 memset(&iaq, 0, sizeof(iaq));
3620 iaq.iaq_opcode = htole16(IXL_AQ_OP_LIST_FUNC_CAP);
3621
3622 do {
3623 if (ixl_dmamem_alloc(sc, &idm, caps_size, 0) != 0) {
3624 return -1;
3625 }
3626
3627 iaq.iaq_flags = htole16(IXL_AQ_BUF |
3628 (caps_size > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3629 iaq.iaq_datalen = htole16(caps_size);
3630 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3631
3632 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
3633 IXL_DMA_LEN(&idm), BUS_DMASYNC_PREREAD);
3634
3635 rv = ixl_atq_poll(sc, &iaq, 250);
3636
3637 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
3638 IXL_DMA_LEN(&idm), BUS_DMASYNC_POSTREAD);
3639
3640 if (rv != 0) {
3641 aprint_error(", HW capabilities timeout\n");
3642 goto done;
3643 }
3644
3645 status = le16toh(iaq.iaq_retval);
3646
3647 if (status == IXL_AQ_RC_ENOMEM) {
3648 caps_size = le16toh(iaq.iaq_datalen);
3649 ixl_dmamem_free(sc, &idm);
3650 }
3651 } while (status == IXL_AQ_RC_ENOMEM);
3652
3653 if (status != IXL_AQ_RC_OK) {
3654 aprint_error(", HW capabilities error\n");
3655 goto done;
3656 }
3657
3658 caps = IXL_DMA_KVA(&idm);
3659 ncaps = le16toh(iaq.iaq_param[1]);
3660
3661 for (i = 0; i < ncaps; i++) {
3662 ixl_parse_hw_capability(sc, &caps[i]);
3663 }
3664
3665 done:
3666 ixl_dmamem_free(sc, &idm);
3667 return rv;
3668 }
3669
3670 static int
3671 ixl_get_mac(struct ixl_softc *sc)
3672 {
3673 struct ixl_dmamem idm;
3674 struct ixl_aq_desc iaq;
3675 struct ixl_aq_mac_addresses *addrs;
3676 int rv;
3677
3678 if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
3679 aprint_error(", unable to allocate mac addresses\n");
3680 return -1;
3681 }
3682
3683 memset(&iaq, 0, sizeof(iaq));
3684 iaq.iaq_flags = htole16(IXL_AQ_BUF);
3685 iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
3686 iaq.iaq_datalen = htole16(sizeof(*addrs));
3687 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3688
3689 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3690 BUS_DMASYNC_PREREAD);
3691
3692 rv = ixl_atq_poll(sc, &iaq, 250);
3693
3694 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3695 BUS_DMASYNC_POSTREAD);
3696
3697 if (rv != 0) {
3698 aprint_error(", MAC ADDRESS READ timeout\n");
3699 rv = -1;
3700 goto done;
3701 }
3702 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3703 aprint_error(", MAC ADDRESS READ error\n");
3704 rv = -1;
3705 goto done;
3706 }
3707
3708 addrs = IXL_DMA_KVA(&idm);
3709 if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
3710 printf(", port address is not valid\n");
3711 goto done;
3712 }
3713
3714 memcpy(sc->sc_enaddr, addrs->port, ETHER_ADDR_LEN);
3715 rv = 0;
3716
3717 done:
3718 ixl_dmamem_free(sc, &idm);
3719 return rv;
3720 }
3721
3722 static int
3723 ixl_get_switch_config(struct ixl_softc *sc)
3724 {
3725 struct ixl_dmamem idm;
3726 struct ixl_aq_desc iaq;
3727 struct ixl_aq_switch_config *hdr;
3728 struct ixl_aq_switch_config_element *elms, *elm;
3729 unsigned int nelm, i;
3730 int rv;
3731
3732 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
3733 aprint_error_dev(sc->sc_dev,
3734 "unable to allocate switch config buffer\n");
3735 return -1;
3736 }
3737
3738 memset(&iaq, 0, sizeof(iaq));
3739 iaq.iaq_flags = htole16(IXL_AQ_BUF |
3740 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3741 iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
3742 iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
3743 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3744
3745 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3746 BUS_DMASYNC_PREREAD);
3747
3748 rv = ixl_atq_poll(sc, &iaq, 250);
3749
3750 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3751 BUS_DMASYNC_POSTREAD);
3752
3753 if (rv != 0) {
3754 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG timeout\n");
3755 rv = -1;
3756 goto done;
3757 }
3758 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3759 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG error\n");
3760 rv = -1;
3761 goto done;
3762 }
3763
3764 hdr = IXL_DMA_KVA(&idm);
3765 elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
3766
3767 nelm = le16toh(hdr->num_reported);
3768 if (nelm < 1) {
3769 aprint_error_dev(sc->sc_dev, "no switch config available\n");
3770 rv = -1;
3771 goto done;
3772 }
3773
3774 for (i = 0; i < nelm; i++) {
3775 elm = &elms[i];
3776
3777 aprint_debug_dev(sc->sc_dev,
3778 "type %x revision %u seid %04x\n",
3779 elm->type, elm->revision, le16toh(elm->seid));
3780 aprint_debug_dev(sc->sc_dev,
3781 "uplink %04x downlink %04x\n",
3782 le16toh(elm->uplink_seid),
3783 le16toh(elm->downlink_seid));
3784 aprint_debug_dev(sc->sc_dev,
3785 "conntype %x scheduler %04x extra %04x\n",
3786 elm->connection_type,
3787 le16toh(elm->scheduler_id),
3788 le16toh(elm->element_info));
3789 }
3790
3791 elm = &elms[0];
3792
3793 sc->sc_uplink_seid = elm->uplink_seid;
3794 sc->sc_downlink_seid = elm->downlink_seid;
3795 sc->sc_seid = elm->seid;
3796
3797 if ((sc->sc_uplink_seid == htole16(0)) !=
3798 (sc->sc_downlink_seid == htole16(0))) {
3799 aprint_error_dev(sc->sc_dev, "SEIDs are misconfigured\n");
3800 rv = -1;
3801 goto done;
3802 }
3803
3804 done:
3805 ixl_dmamem_free(sc, &idm);
3806 return rv;
3807 }
3808
3809 static int
3810 ixl_phy_mask_ints(struct ixl_softc *sc)
3811 {
3812 struct ixl_aq_desc iaq;
3813
3814 memset(&iaq, 0, sizeof(iaq));
3815 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
3816 iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
3817 ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
3818 IXL_AQ_PHY_EV_MEDIA_NA));
3819
3820 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3821 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK timeout\n");
3822 return -1;
3823 }
3824 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3825 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK error\n");
3826 return -1;
3827 }
3828
3829 return 0;
3830 }
3831
3832 static int
3833 ixl_get_phy_abilities(struct ixl_softc *sc,struct ixl_dmamem *idm)
3834 {
3835 struct ixl_aq_desc iaq;
3836 int rv;
3837
3838 memset(&iaq, 0, sizeof(iaq));
3839 iaq.iaq_flags = htole16(IXL_AQ_BUF |
3840 (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3841 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
3842 iaq.iaq_datalen = htole16(IXL_DMA_LEN(idm));
3843 iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
3844 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
3845
3846 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
3847 BUS_DMASYNC_PREREAD);
3848
3849 rv = ixl_atq_poll(sc, &iaq, 250);
3850
3851 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
3852 BUS_DMASYNC_POSTREAD);
3853
3854 if (rv != 0)
3855 return -1;
3856
3857 return le16toh(iaq.iaq_retval);
3858 }
3859
3860 static int
3861 ixl_get_phy_types(struct ixl_softc *sc, uint64_t *phy_types_ptr)
3862 {
3863 struct ixl_dmamem idm;
3864 struct ixl_aq_phy_abilities *phy;
3865 uint64_t phy_types;
3866 int rv;
3867
3868 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
3869 aprint_error_dev(sc->sc_dev,
3870 "unable to allocate switch config buffer\n");
3871 return -1;
3872 }
3873
3874 rv = ixl_get_phy_abilities(sc, &idm);
3875 switch (rv) {
3876 case -1:
3877 aprint_error_dev(sc->sc_dev, "GET PHY ABILITIES timeout\n");
3878 goto done;
3879 case IXL_AQ_RC_OK:
3880 break;
3881 case IXL_AQ_RC_EIO:
3882 aprint_error_dev(sc->sc_dev,"unable to query phy types\n");
3883 break;
3884 default:
3885 aprint_error_dev(sc->sc_dev,
3886 "GET PHY ABILITIIES error %u\n", rv);
3887 goto done;
3888 }
3889
3890 phy = IXL_DMA_KVA(&idm);
3891
3892 phy_types = le32toh(phy->phy_type);
3893 phy_types |= (uint64_t)le32toh(phy->phy_type_ext) << 32;
3894
3895 *phy_types_ptr = phy_types;
3896
3897 rv = 0;
3898
3899 done:
3900 ixl_dmamem_free(sc, &idm);
3901 return rv;
3902 }
3903
3904 static int
3905 ixl_get_link_status_poll(struct ixl_softc *sc)
3906 {
3907 struct ixl_aq_desc iaq;
3908 struct ixl_aq_link_param *param;
3909 int link;
3910
3911 memset(&iaq, 0, sizeof(iaq));
3912 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3913 param = (struct ixl_aq_link_param *)iaq.iaq_param;
3914 param->notify = IXL_AQ_LINK_NOTIFY;
3915
3916 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3917 return ETIMEDOUT;
3918 }
3919 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3920 return EIO;
3921 }
3922
3923 link = ixl_set_link_status(sc, &iaq);
3924 sc->sc_ec.ec_if.if_link_state = link;
3925
3926 return 0;
3927 }
3928
3929 static int
3930 ixl_get_vsi(struct ixl_softc *sc)
3931 {
3932 struct ixl_dmamem *vsi = &sc->sc_scratch;
3933 struct ixl_aq_desc iaq;
3934 struct ixl_aq_vsi_param *param;
3935 struct ixl_aq_vsi_reply *reply;
3936 int rv;
3937
3938 /* grumble, vsi info isn't "known" at compile time */
3939
3940 memset(&iaq, 0, sizeof(iaq));
3941 iaq.iaq_flags = htole16(IXL_AQ_BUF |
3942 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3943 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
3944 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
3945 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
3946
3947 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
3948 param->uplink_seid = sc->sc_seid;
3949
3950 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
3951 BUS_DMASYNC_PREREAD);
3952
3953 rv = ixl_atq_poll(sc, &iaq, 250);
3954
3955 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
3956 BUS_DMASYNC_POSTREAD);
3957
3958 if (rv != 0) {
3959 return ETIMEDOUT;
3960 }
3961
3962 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3963 return EIO;
3964 }
3965
3966 reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
3967 sc->sc_vsi_number = reply->vsi_number;
3968
3969 return 0;
3970 }
3971
3972 static int
3973 ixl_set_vsi(struct ixl_softc *sc)
3974 {
3975 struct ixl_dmamem *vsi = &sc->sc_scratch;
3976 struct ixl_aq_desc iaq;
3977 struct ixl_aq_vsi_param *param;
3978 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
3979 unsigned int qnum;
3980 uint16_t val;
3981 int rv;
3982
3983 qnum = sc->sc_nqueue_pairs - 1;
3984
3985 data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
3986 IXL_AQ_VSI_VALID_VLAN);
3987
3988 CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
3989 SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
3990 data->queue_mapping[0] = htole16(0);
3991 data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
3992 (qnum << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
3993
3994 val = le16toh(data->port_vlan_flags);
3995 CLR(val, IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK);
3996 SET(val, IXL_AQ_VSI_PVLAN_MODE_ALL);
3997
3998 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWTAGGING)) {
3999 SET(val, IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH);
4000 } else {
4001 SET(val, IXL_AQ_VSI_PVLAN_EMOD_NOTHING);
4002 }
4003
4004 data->port_vlan_flags = htole16(val);
4005
4006 /* grumble, vsi info isn't "known" at compile time */
4007
4008 memset(&iaq, 0, sizeof(iaq));
4009 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4010 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4011 iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
4012 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4013 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4014
4015 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4016 param->uplink_seid = sc->sc_seid;
4017
4018 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4019 BUS_DMASYNC_PREWRITE);
4020
4021 rv = ixl_atq_poll(sc, &iaq, 250);
4022
4023 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4024 BUS_DMASYNC_POSTWRITE);
4025
4026 if (rv != 0) {
4027 return ETIMEDOUT;
4028 }
4029
4030 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4031 return EIO;
4032 }
4033
4034 return 0;
4035 }
4036
4037 static void
4038 ixl_set_filter_control(struct ixl_softc *sc)
4039 {
4040 uint32_t reg;
4041
4042 reg = ixl_rd_rx_csr(sc, I40E_PFQF_CTL_0);
4043
4044 CLR(reg, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK);
4045 SET(reg, I40E_HASH_LUT_SIZE_128 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT);
4046
4047 SET(reg, I40E_PFQF_CTL_0_FD_ENA_MASK);
4048 SET(reg, I40E_PFQF_CTL_0_ETYPE_ENA_MASK);
4049 SET(reg, I40E_PFQF_CTL_0_MACVLAN_ENA_MASK);
4050
4051 ixl_wr_rx_csr(sc, I40E_PFQF_CTL_0, reg);
4052 }
4053
4054 static inline void
4055 ixl_get_default_rss_key(uint32_t *buf, size_t len)
4056 {
4057 size_t cplen;
4058 uint8_t rss_seed[RSS_KEYSIZE];
4059
4060 rss_getkey(rss_seed);
4061 memset(buf, 0, len);
4062
4063 cplen = MIN(len, sizeof(rss_seed));
4064 memcpy(buf, rss_seed, cplen);
4065 }
4066
4067 static void
4068 ixl_set_rss_key(struct ixl_softc *sc)
4069 {
4070 uint32_t rss_seed[IXL_RSS_KEY_SIZE_REG];
4071 size_t i;
4072
4073 ixl_get_default_rss_key(rss_seed, sizeof(rss_seed));
4074
4075 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4076 ixl_wr_rx_csr(sc, I40E_PFQF_HKEY(i), rss_seed[i]);
4077 }
4078 }
4079
4080 static void
4081 ixl_set_rss_pctype(struct ixl_softc *sc)
4082 {
4083 uint64_t set_hena = 0;
4084 uint32_t hena0, hena1;
4085
4086 if (sc->sc_mac_type == I40E_MAC_X722)
4087 set_hena = IXL_RSS_HENA_DEFAULT_X722;
4088 else
4089 set_hena = IXL_RSS_HENA_DEFAULT_XL710;
4090
4091 hena0 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(0));
4092 hena1 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(1));
4093
4094 SET(hena0, set_hena);
4095 SET(hena1, set_hena >> 32);
4096
4097 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(0), hena0);
4098 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(1), hena1);
4099 }
4100
4101 static void
4102 ixl_set_rss_hlut(struct ixl_softc *sc)
4103 {
4104 unsigned int qid;
4105 uint8_t hlut_buf[512], lut_mask;
4106 uint32_t *hluts;
4107 size_t i, hluts_num;
4108
4109 lut_mask = (0x01 << sc->sc_rss_table_entry_width) - 1;
4110
4111 for (i = 0; i < sc->sc_rss_table_size; i++) {
4112 qid = i % sc->sc_nqueue_pairs;
4113 hlut_buf[i] = qid & lut_mask;
4114 }
4115
4116 hluts = (uint32_t *)hlut_buf;
4117 hluts_num = sc->sc_rss_table_size >> 2;
4118 for (i = 0; i < hluts_num; i++) {
4119 ixl_wr(sc, I40E_PFQF_HLUT(i), hluts[i]);
4120 }
4121 ixl_flush(sc);
4122 }
4123
4124 static void
4125 ixl_config_rss(struct ixl_softc *sc)
4126 {
4127
4128 KASSERT(mutex_owned(&sc->sc_cfg_lock));
4129
4130 ixl_set_rss_key(sc);
4131 ixl_set_rss_pctype(sc);
4132 ixl_set_rss_hlut(sc);
4133 }
4134
4135 static const struct ixl_phy_type *
4136 ixl_search_phy_type(uint8_t phy_type)
4137 {
4138 const struct ixl_phy_type *itype;
4139 uint64_t mask;
4140 unsigned int i;
4141
4142 if (phy_type >= 64)
4143 return NULL;
4144
4145 mask = 1ULL << phy_type;
4146
4147 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
4148 itype = &ixl_phy_type_map[i];
4149
4150 if (ISSET(itype->phy_type, mask))
4151 return itype;
4152 }
4153
4154 return NULL;
4155 }
4156
4157 static uint64_t
4158 ixl_search_link_speed(uint8_t link_speed)
4159 {
4160 const struct ixl_speed_type *type;
4161 unsigned int i;
4162
4163 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4164 type = &ixl_speed_type_map[i];
4165
4166 if (ISSET(type->dev_speed, link_speed))
4167 return type->net_speed;
4168 }
4169
4170 return 0;
4171 }
4172
4173 static int
4174 ixl_restart_an(struct ixl_softc *sc)
4175 {
4176 struct ixl_aq_desc iaq;
4177
4178 memset(&iaq, 0, sizeof(iaq));
4179 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
4180 iaq.iaq_param[0] =
4181 htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
4182
4183 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4184 aprint_error_dev(sc->sc_dev, "RESTART AN timeout\n");
4185 return -1;
4186 }
4187 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4188 aprint_error_dev(sc->sc_dev, "RESTART AN error\n");
4189 return -1;
4190 }
4191
4192 return 0;
4193 }
4194
4195 static int
4196 ixl_add_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4197 uint16_t vlan, uint16_t flags)
4198 {
4199 struct ixl_aq_desc iaq;
4200 struct ixl_aq_add_macvlan *param;
4201 struct ixl_aq_add_macvlan_elem *elem;
4202
4203 memset(&iaq, 0, sizeof(iaq));
4204 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4205 iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN);
4206 iaq.iaq_datalen = htole16(sizeof(*elem));
4207 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4208
4209 param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param;
4210 param->num_addrs = htole16(1);
4211 param->seid0 = htole16(0x8000) | sc->sc_seid;
4212 param->seid1 = 0;
4213 param->seid2 = 0;
4214
4215 elem = IXL_DMA_KVA(&sc->sc_scratch);
4216 memset(elem, 0, sizeof(*elem));
4217 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4218 elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags);
4219 elem->vlan = htole16(vlan);
4220
4221 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4222 return IXL_AQ_RC_EINVAL;
4223 }
4224
4225 switch (le16toh(iaq.iaq_retval)) {
4226 case IXL_AQ_RC_OK:
4227 break;
4228 case IXL_AQ_RC_ENOSPC:
4229 return ENOSPC;
4230 case IXL_AQ_RC_ENOENT:
4231 return ENOENT;
4232 case IXL_AQ_RC_EACCES:
4233 return EACCES;
4234 case IXL_AQ_RC_EEXIST:
4235 return EEXIST;
4236 case IXL_AQ_RC_EINVAL:
4237 return EINVAL;
4238 default:
4239 return EIO;
4240 }
4241
4242 return 0;
4243 }
4244
4245 static int
4246 ixl_remove_macvlan(struct ixl_softc *sc, uint8_t *macaddr,
4247 uint16_t vlan, uint16_t flags)
4248 {
4249 struct ixl_aq_desc iaq;
4250 struct ixl_aq_remove_macvlan *param;
4251 struct ixl_aq_remove_macvlan_elem *elem;
4252
4253 memset(&iaq, 0, sizeof(iaq));
4254 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4255 iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN);
4256 iaq.iaq_datalen = htole16(sizeof(*elem));
4257 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4258
4259 param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param;
4260 param->num_addrs = htole16(1);
4261 param->seid0 = htole16(0x8000) | sc->sc_seid;
4262 param->seid1 = 0;
4263 param->seid2 = 0;
4264
4265 elem = IXL_DMA_KVA(&sc->sc_scratch);
4266 memset(elem, 0, sizeof(*elem));
4267 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4268 elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags);
4269 elem->vlan = htole16(vlan);
4270
4271 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4272 return EINVAL;
4273 }
4274
4275 switch (le16toh(iaq.iaq_retval)) {
4276 case IXL_AQ_RC_OK:
4277 break;
4278 case IXL_AQ_RC_ENOENT:
4279 return ENOENT;
4280 case IXL_AQ_RC_EACCES:
4281 return EACCES;
4282 case IXL_AQ_RC_EINVAL:
4283 return EINVAL;
4284 default:
4285 return EIO;
4286 }
4287
4288 return 0;
4289 }
4290
4291 static int
4292 ixl_hmc(struct ixl_softc *sc)
4293 {
4294 struct {
4295 uint32_t count;
4296 uint32_t minsize;
4297 bus_size_t objsiz;
4298 bus_size_t setoff;
4299 bus_size_t setcnt;
4300 } regs[] = {
4301 {
4302 0,
4303 IXL_HMC_TXQ_MINSIZE,
4304 I40E_GLHMC_LANTXOBJSZ,
4305 I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
4306 I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
4307 },
4308 {
4309 0,
4310 IXL_HMC_RXQ_MINSIZE,
4311 I40E_GLHMC_LANRXOBJSZ,
4312 I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
4313 I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
4314 },
4315 {
4316 0,
4317 0,
4318 I40E_GLHMC_FCOEDDPOBJSZ,
4319 I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
4320 I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
4321 },
4322 {
4323 0,
4324 0,
4325 I40E_GLHMC_FCOEFOBJSZ,
4326 I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
4327 I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
4328 },
4329 };
4330 struct ixl_hmc_entry *e;
4331 uint64_t size, dva;
4332 uint8_t *kva;
4333 uint64_t *sdpage;
4334 unsigned int i;
4335 int npages, tables;
4336 uint32_t reg;
4337
4338 CTASSERT(__arraycount(regs) <= __arraycount(sc->sc_hmc_entries));
4339
4340 regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
4341 ixl_rd(sc, I40E_GLHMC_LANQMAX);
4342
4343 size = 0;
4344 for (i = 0; i < __arraycount(regs); i++) {
4345 e = &sc->sc_hmc_entries[i];
4346
4347 e->hmc_count = regs[i].count;
4348 reg = ixl_rd(sc, regs[i].objsiz);
4349 e->hmc_size = BIT_ULL(0x3F & reg);
4350 e->hmc_base = size;
4351
4352 if ((e->hmc_size * 8) < regs[i].minsize) {
4353 aprint_error_dev(sc->sc_dev,
4354 "kernel hmc entry is too big\n");
4355 return -1;
4356 }
4357
4358 size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
4359 }
4360 size = roundup(size, IXL_HMC_PGSIZE);
4361 npages = size / IXL_HMC_PGSIZE;
4362
4363 tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
4364
4365 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
4366 aprint_error_dev(sc->sc_dev,
4367 "unable to allocate hmc pd memory\n");
4368 return -1;
4369 }
4370
4371 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
4372 IXL_HMC_PGSIZE) != 0) {
4373 aprint_error_dev(sc->sc_dev,
4374 "unable to allocate hmc sd memory\n");
4375 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4376 return -1;
4377 }
4378
4379 kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
4380 memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
4381
4382 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
4383 0, IXL_DMA_LEN(&sc->sc_hmc_pd),
4384 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4385
4386 dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
4387 sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
4388 memset(sdpage, 0, IXL_DMA_LEN(&sc->sc_hmc_sd));
4389
4390 for (i = 0; (int)i < npages; i++) {
4391 *sdpage = htole64(dva | IXL_HMC_PDVALID);
4392 sdpage++;
4393
4394 dva += IXL_HMC_PGSIZE;
4395 }
4396
4397 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
4398 0, IXL_DMA_LEN(&sc->sc_hmc_sd),
4399 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4400
4401 dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
4402 for (i = 0; (int)i < tables; i++) {
4403 uint32_t count;
4404
4405 KASSERT(npages >= 0);
4406
4407 count = ((unsigned int)npages > IXL_HMC_PGS) ?
4408 IXL_HMC_PGS : (unsigned int)npages;
4409
4410 ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
4411 ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
4412 (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
4413 (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
4414 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
4415 ixl_wr(sc, I40E_PFHMC_SDCMD,
4416 (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
4417
4418 npages -= IXL_HMC_PGS;
4419 dva += IXL_HMC_PGSIZE;
4420 }
4421
4422 for (i = 0; i < __arraycount(regs); i++) {
4423 e = &sc->sc_hmc_entries[i];
4424
4425 ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
4426 ixl_wr(sc, regs[i].setcnt, e->hmc_count);
4427 }
4428
4429 return 0;
4430 }
4431
4432 static void
4433 ixl_hmc_free(struct ixl_softc *sc)
4434 {
4435 ixl_dmamem_free(sc, &sc->sc_hmc_sd);
4436 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4437 }
4438
4439 static void
4440 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
4441 unsigned int npacking)
4442 {
4443 uint8_t *dst = d;
4444 const uint8_t *src = s;
4445 unsigned int i;
4446
4447 for (i = 0; i < npacking; i++) {
4448 const struct ixl_hmc_pack *pack = &packing[i];
4449 unsigned int offset = pack->lsb / 8;
4450 unsigned int align = pack->lsb % 8;
4451 const uint8_t *in = src + pack->offset;
4452 uint8_t *out = dst + offset;
4453 int width = pack->width;
4454 unsigned int inbits = 0;
4455
4456 if (align) {
4457 inbits = (*in++) << align;
4458 *out++ |= (inbits & 0xff);
4459 inbits >>= 8;
4460
4461 width -= 8 - align;
4462 }
4463
4464 while (width >= 8) {
4465 inbits |= (*in++) << align;
4466 *out++ = (inbits & 0xff);
4467 inbits >>= 8;
4468
4469 width -= 8;
4470 }
4471
4472 if (width > 0) {
4473 inbits |= (*in) << align;
4474 *out |= (inbits & ((1 << width) - 1));
4475 }
4476 }
4477 }
4478
4479 static struct ixl_aq_buf *
4480 ixl_aqb_alloc(struct ixl_softc *sc)
4481 {
4482 struct ixl_aq_buf *aqb;
4483
4484 aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK);
4485 if (aqb == NULL)
4486 return NULL;
4487
4488 aqb->aqb_size = IXL_AQ_BUFLEN;
4489
4490 if (bus_dmamap_create(sc->sc_dmat, aqb->aqb_size, 1,
4491 aqb->aqb_size, 0,
4492 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0)
4493 goto free;
4494 if (bus_dmamem_alloc(sc->sc_dmat, aqb->aqb_size,
4495 IXL_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs,
4496 BUS_DMA_WAITOK) != 0)
4497 goto destroy;
4498 if (bus_dmamem_map(sc->sc_dmat, &aqb->aqb_seg, aqb->aqb_nsegs,
4499 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0)
4500 goto dma_free;
4501 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
4502 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0)
4503 goto unmap;
4504
4505 return aqb;
4506 unmap:
4507 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
4508 dma_free:
4509 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
4510 destroy:
4511 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4512 free:
4513 free(aqb, M_DEVBUF);
4514
4515 return NULL;
4516 }
4517
4518 static void
4519 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
4520 {
4521 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
4522 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
4523 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
4524 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4525 free(aqb, M_DEVBUF);
4526 }
4527
4528 static int
4529 ixl_arq_fill(struct ixl_softc *sc)
4530 {
4531 struct ixl_aq_buf *aqb;
4532 struct ixl_aq_desc *arq, *iaq;
4533 unsigned int prod = sc->sc_arq_prod;
4534 unsigned int n;
4535 int post = 0;
4536
4537 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
4538 IXL_AQ_NUM);
4539 arq = IXL_DMA_KVA(&sc->sc_arq);
4540
4541 if (__predict_false(n <= 0))
4542 return 0;
4543
4544 do {
4545 aqb = sc->sc_arq_live[prod];
4546 iaq = &arq[prod];
4547
4548 if (aqb == NULL) {
4549 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
4550 if (aqb != NULL) {
4551 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
4552 ixl_aq_buf, aqb_entry);
4553 } else if ((aqb = ixl_aqb_alloc(sc)) == NULL) {
4554 break;
4555 }
4556
4557 sc->sc_arq_live[prod] = aqb;
4558 memset(aqb->aqb_data, 0, aqb->aqb_size);
4559
4560 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
4561 aqb->aqb_size, BUS_DMASYNC_PREREAD);
4562
4563 iaq->iaq_flags = htole16(IXL_AQ_BUF |
4564 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ?
4565 IXL_AQ_LB : 0));
4566 iaq->iaq_opcode = 0;
4567 iaq->iaq_datalen = htole16(aqb->aqb_size);
4568 iaq->iaq_retval = 0;
4569 iaq->iaq_cookie = 0;
4570 iaq->iaq_param[0] = 0;
4571 iaq->iaq_param[1] = 0;
4572 ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
4573 }
4574
4575 prod++;
4576 prod &= IXL_AQ_MASK;
4577
4578 post = 1;
4579
4580 } while (--n);
4581
4582 if (post) {
4583 sc->sc_arq_prod = prod;
4584 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
4585 }
4586
4587 return post;
4588 }
4589
4590 static void
4591 ixl_arq_unfill(struct ixl_softc *sc)
4592 {
4593 struct ixl_aq_buf *aqb;
4594 unsigned int i;
4595
4596 for (i = 0; i < __arraycount(sc->sc_arq_live); i++) {
4597 aqb = sc->sc_arq_live[i];
4598 if (aqb == NULL)
4599 continue;
4600
4601 sc->sc_arq_live[i] = NULL;
4602 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
4603 BUS_DMASYNC_POSTREAD);
4604 ixl_aqb_free(sc, aqb);
4605 }
4606
4607 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle)) != NULL) {
4608 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
4609 ixl_aq_buf, aqb_entry);
4610 ixl_aqb_free(sc, aqb);
4611 }
4612 }
4613
4614 static void
4615 ixl_clear_hw(struct ixl_softc *sc)
4616 {
4617 uint32_t num_queues, base_queue;
4618 uint32_t num_pf_int;
4619 uint32_t num_vf_int;
4620 uint32_t num_vfs;
4621 uint32_t i, j;
4622 uint32_t val;
4623 uint32_t eol = 0x7ff;
4624
4625 /* get number of interrupts, queues, and vfs */
4626 val = ixl_rd(sc, I40E_GLPCI_CNF2);
4627 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
4628 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
4629 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
4630 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
4631
4632 val = ixl_rd(sc, I40E_PFLAN_QALLOC);
4633 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
4634 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
4635 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
4636 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
4637 if (val & I40E_PFLAN_QALLOC_VALID_MASK)
4638 num_queues = (j - base_queue) + 1;
4639 else
4640 num_queues = 0;
4641
4642 val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
4643 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
4644 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
4645 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
4646 I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
4647 if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
4648 num_vfs = (j - i) + 1;
4649 else
4650 num_vfs = 0;
4651
4652 /* stop all the interrupts */
4653 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
4654 ixl_flush(sc);
4655 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4656 for (i = 0; i < num_pf_int - 2; i++)
4657 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
4658 ixl_flush(sc);
4659
4660 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
4661 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4662 ixl_wr(sc, I40E_PFINT_LNKLST0, val);
4663 for (i = 0; i < num_pf_int - 2; i++)
4664 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
4665 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4666 for (i = 0; i < num_vfs; i++)
4667 ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
4668 for (i = 0; i < num_vf_int - 2; i++)
4669 ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
4670
4671 /* warn the HW of the coming Tx disables */
4672 for (i = 0; i < num_queues; i++) {
4673 uint32_t abs_queue_idx = base_queue + i;
4674 uint32_t reg_block = 0;
4675
4676 if (abs_queue_idx >= 128) {
4677 reg_block = abs_queue_idx / 128;
4678 abs_queue_idx %= 128;
4679 }
4680
4681 val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
4682 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
4683 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
4684 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
4685
4686 ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
4687 }
4688 delaymsec(400);
4689
4690 /* stop all the queues */
4691 for (i = 0; i < num_queues; i++) {
4692 ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
4693 ixl_wr(sc, I40E_QTX_ENA(i), 0);
4694 ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
4695 ixl_wr(sc, I40E_QRX_ENA(i), 0);
4696 }
4697
4698 /* short wait for all queue disables to settle */
4699 delaymsec(50);
4700 }
4701
4702 static int
4703 ixl_pf_reset(struct ixl_softc *sc)
4704 {
4705 uint32_t cnt = 0;
4706 uint32_t cnt1 = 0;
4707 uint32_t reg = 0, reg0 = 0;
4708 uint32_t grst_del;
4709
4710 /*
4711 * Poll for Global Reset steady state in case of recent GRST.
4712 * The grst delay value is in 100ms units, and we'll wait a
4713 * couple counts longer to be sure we don't just miss the end.
4714 */
4715 grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
4716 grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
4717 grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
4718
4719 grst_del = grst_del * 20;
4720
4721 for (cnt = 0; cnt < grst_del; cnt++) {
4722 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
4723 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
4724 break;
4725 delaymsec(100);
4726 }
4727 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
4728 aprint_error(", Global reset polling failed to complete\n");
4729 return -1;
4730 }
4731
4732 /* Now Wait for the FW to be ready */
4733 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
4734 reg = ixl_rd(sc, I40E_GLNVM_ULD);
4735 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
4736 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
4737 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
4738 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
4739 break;
4740
4741 delaymsec(10);
4742 }
4743 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
4744 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
4745 aprint_error(", wait for FW Reset complete timed out "
4746 "(I40E_GLNVM_ULD = 0x%x)\n", reg);
4747 return -1;
4748 }
4749
4750 /*
4751 * If there was a Global Reset in progress when we got here,
4752 * we don't need to do the PF Reset
4753 */
4754 if (cnt == 0) {
4755 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
4756 ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
4757 for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
4758 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
4759 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
4760 break;
4761 delaymsec(1);
4762
4763 reg0 = ixl_rd(sc, I40E_GLGEN_RSTAT);
4764 if (reg0 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
4765 aprint_error(", Core reset upcoming."
4766 " Skipping PF reset reset request\n");
4767 return -1;
4768 }
4769 }
4770 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
4771 aprint_error(", PF reset polling failed to complete"
4772 "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
4773 return -1;
4774 }
4775 }
4776
4777 return 0;
4778 }
4779
4780 static int
4781 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
4782 bus_size_t size, bus_size_t align)
4783 {
4784 ixm->ixm_size = size;
4785
4786 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
4787 ixm->ixm_size, 0,
4788 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
4789 &ixm->ixm_map) != 0)
4790 return 1;
4791 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
4792 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
4793 BUS_DMA_WAITOK) != 0)
4794 goto destroy;
4795 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
4796 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
4797 goto free;
4798 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
4799 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
4800 goto unmap;
4801
4802 memset(ixm->ixm_kva, 0, ixm->ixm_size);
4803
4804 return 0;
4805 unmap:
4806 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
4807 free:
4808 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
4809 destroy:
4810 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
4811 return 1;
4812 }
4813
4814 static void
4815 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
4816 {
4817 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
4818 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
4819 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
4820 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
4821 }
4822
4823 static int
4824 ixl_set_macvlan(struct ixl_softc *sc)
4825 {
4826 int error, rv = 0;
4827
4828 /* remove default mac filter and replace it so we can see vlans */
4829
4830 error = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 0);
4831 if (error != 0 && error != ENOENT) {
4832 aprint_debug_dev(sc->sc_dev, "unable to remove macvlan\n");
4833 rv = -1;
4834 }
4835
4836 error = ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
4837 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
4838 if (error != 0 && error != ENOENT) {
4839 aprint_debug_dev(sc->sc_dev,
4840 "unable to remove macvlan(IGNORE_VLAN)\n");
4841 rv = -1;
4842 }
4843
4844 error = ixl_add_macvlan(sc, sc->sc_enaddr, 0,
4845 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
4846 if (error != 0) {
4847 aprint_debug_dev(sc->sc_dev, "unable to add mac address\n");
4848 rv = -1;
4849 }
4850
4851 error = ixl_add_macvlan(sc, etherbroadcastaddr, 0,
4852 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
4853 if (error != 0) {
4854 aprint_debug_dev(sc->sc_dev,
4855 "unable to add broadcast mac address\n");
4856 rv = -1;
4857 }
4858
4859 return rv;
4860 }
4861
4862 static int
4863 ixl_ifflags_cb(struct ethercom *ec)
4864 {
4865 struct ifnet *ifp = &ec->ec_if;
4866 struct ixl_softc *sc = ifp->if_softc;
4867 int rv, change;
4868
4869 mutex_enter(&sc->sc_cfg_lock);
4870
4871 change = ec->ec_capenable ^ sc->sc_cur_ec_capenable;
4872
4873 if (ISSET(change, ETHERCAP_VLAN_HWTAGGING)) {
4874 rv = ENETRESET;
4875 goto out;
4876 }
4877
4878 rv = ixl_iff(sc);
4879 out:
4880 mutex_exit(&sc->sc_cfg_lock);
4881
4882 return rv;
4883 }
4884
4885 static int
4886 ixl_set_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
4887 {
4888 const struct ixl_aq_link_status *status;
4889 const struct ixl_phy_type *itype;
4890
4891 uint64_t ifm_active = IFM_ETHER;
4892 uint64_t ifm_status = IFM_AVALID;
4893 int link_state = LINK_STATE_DOWN;
4894 uint64_t baudrate = 0;
4895
4896 status = (const struct ixl_aq_link_status *)iaq->iaq_param;
4897 if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION))
4898 goto done;
4899
4900 ifm_active |= IFM_FDX;
4901 ifm_status |= IFM_ACTIVE;
4902 link_state = LINK_STATE_UP;
4903
4904 itype = ixl_search_phy_type(status->phy_type);
4905 if (itype != NULL)
4906 ifm_active |= itype->ifm_type;
4907
4908 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
4909 ifm_active |= IFM_ETH_TXPAUSE;
4910 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
4911 ifm_active |= IFM_ETH_RXPAUSE;
4912
4913 baudrate = ixl_search_link_speed(status->link_speed);
4914
4915 done:
4916 /* NET_ASSERT_LOCKED() except during attach */
4917 sc->sc_media_active = ifm_active;
4918 sc->sc_media_status = ifm_status;
4919
4920 sc->sc_ec.ec_if.if_baudrate = baudrate;
4921
4922 return link_state;
4923 }
4924
4925 static int
4926 ixl_establish_intx(struct ixl_softc *sc)
4927 {
4928 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
4929 pci_intr_handle_t *intr;
4930 char xnamebuf[32];
4931 char intrbuf[PCI_INTRSTR_LEN];
4932 char const *intrstr;
4933
4934 KASSERT(sc->sc_nintrs == 1);
4935
4936 intr = &sc->sc_ihp[0];
4937
4938 intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
4939 snprintf(xnamebuf, sizeof(xnamebuf), "%s:legacy",
4940 device_xname(sc->sc_dev));
4941
4942 sc->sc_ihs[0] = pci_intr_establish_xname(pc, *intr, IPL_NET, ixl_intr,
4943 sc, xnamebuf);
4944
4945 if (sc->sc_ihs[0] == NULL) {
4946 aprint_error_dev(sc->sc_dev,
4947 "unable to establish interrupt at %s\n", intrstr);
4948 return -1;
4949 }
4950
4951 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
4952 return 0;
4953 }
4954
4955 static int
4956 ixl_establish_msix(struct ixl_softc *sc)
4957 {
4958 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
4959 unsigned int vector = 0;
4960 unsigned int i;
4961 char xnamebuf[32];
4962 char intrbuf[PCI_INTRSTR_LEN];
4963 char const *intrstr;
4964
4965 /* the "other" intr is mapped to vector 0 */
4966 vector = 0;
4967 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
4968 intrbuf, sizeof(intrbuf));
4969 snprintf(xnamebuf, sizeof(xnamebuf), "%s others",
4970 device_xname(sc->sc_dev));
4971 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
4972 sc->sc_ihp[vector], IPL_NET, ixl_other_intr,
4973 sc, xnamebuf);
4974 if (sc->sc_ihs[vector] == NULL) {
4975 aprint_error_dev(sc->sc_dev,
4976 "unable to establish interrupt at %s\n", intrstr);
4977 goto fail;
4978 }
4979 vector++;
4980 aprint_normal_dev(sc->sc_dev, "interrupt at %s\n", intrstr);
4981
4982 sc->sc_msix_vector_queue = vector;
4983
4984 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
4985 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
4986 intrbuf, sizeof(intrbuf));
4987 snprintf(xnamebuf, sizeof(xnamebuf), "%s TXRX%d",
4988 device_xname(sc->sc_dev), i);
4989
4990 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
4991 sc->sc_ihp[vector], IPL_NET, ixl_queue_intr,
4992 (void *)&sc->sc_qps[i], xnamebuf);
4993
4994 if (sc->sc_ihs[vector] == NULL) {
4995 aprint_error_dev(sc->sc_dev,
4996 "unable to establish interrupt at %s\n", intrstr);
4997 goto fail;
4998 }
4999 vector++;
5000 aprint_normal_dev(sc->sc_dev,
5001 "interrupt at %s\n", intrstr);
5002 }
5003
5004 return 0;
5005 fail:
5006 for (i = 0; i < vector; i++) {
5007 pci_intr_disestablish(pc, sc->sc_ihs[i]);
5008 }
5009
5010 sc->sc_msix_vector_queue = 0;
5011 sc->sc_msix_vector_queue = 0;
5012
5013 return -1;
5014 }
5015
5016 static void
5017 ixl_set_affinity_msix(struct ixl_softc *sc)
5018 {
5019 kcpuset_t *affinity;
5020 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5021 int affinity_to, r;
5022 unsigned int i, vector;
5023 char intrbuf[PCI_INTRSTR_LEN];
5024 char const *intrstr;
5025
5026 affinity_to = 0;
5027 kcpuset_create(&affinity, false);
5028
5029 vector = sc->sc_msix_vector_queue;
5030
5031 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5032 affinity_to = i % ncpu;
5033
5034 kcpuset_zero(affinity);
5035 kcpuset_set(affinity, affinity_to);
5036
5037 intrstr = pci_intr_string(pc, sc->sc_ihp[vector + i],
5038 intrbuf, sizeof(intrbuf));
5039 r = interrupt_distribute(sc->sc_ihs[vector + i],
5040 affinity, NULL);
5041 if (r == 0) {
5042 aprint_normal_dev(sc->sc_dev,
5043 "for TXRX%u interrupting at %s affinity to %u\n",
5044 i, intrstr, affinity_to);
5045 } else {
5046 aprint_normal_dev(sc->sc_dev,
5047 "for TXRX%u interrupting at %s\n",
5048 i, intrstr);
5049 }
5050 }
5051
5052 vector = 0; /* vector 0 means "other" interrupt */
5053 affinity_to = (affinity_to + 1) % ncpu;
5054 kcpuset_zero(affinity);
5055 kcpuset_set(affinity, affinity_to);
5056
5057 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5058 intrbuf, sizeof(intrbuf));
5059 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5060 if (r == 0) {
5061 aprint_normal_dev(sc->sc_dev,
5062 "for other interrupting at %s affinity to %u\n",
5063 intrstr, affinity_to);
5064 } else {
5065 aprint_normal_dev(sc->sc_dev,
5066 "for other interrupting at %s", intrstr);
5067 }
5068
5069 kcpuset_destroy(affinity);
5070 }
5071
5072 static void
5073 ixl_config_queue_intr(struct ixl_softc *sc)
5074 {
5075 unsigned int i, vector;
5076
5077 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) {
5078 vector = sc->sc_msix_vector_queue;
5079 } else {
5080 vector = I40E_INTR_NOTX_INTR;
5081
5082 ixl_wr(sc, I40E_PFINT_LNKLST0,
5083 (I40E_INTR_NOTX_QUEUE <<
5084 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5085 (I40E_QUEUE_TYPE_RX <<
5086 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5087 }
5088
5089 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
5090 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), 0);
5091 ixl_flush(sc);
5092
5093 ixl_wr(sc, I40E_PFINT_LNKLSTN(i),
5094 ((i) << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
5095 (I40E_QUEUE_TYPE_RX <<
5096 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5097
5098 ixl_wr(sc, I40E_QINT_RQCTL(i),
5099 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5100 (I40E_ITR_INDEX_RX <<
5101 I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
5102 (I40E_INTR_NOTX_RX_QUEUE <<
5103 I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
5104 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5105 (I40E_QUEUE_TYPE_TX <<
5106 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5107 I40E_QINT_RQCTL_CAUSE_ENA_MASK);
5108
5109 ixl_wr(sc, I40E_QINT_TQCTL(i),
5110 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
5111 (I40E_ITR_INDEX_TX <<
5112 I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
5113 (I40E_INTR_NOTX_TX_QUEUE <<
5114 I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
5115 (I40E_QUEUE_TYPE_EOL <<
5116 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
5117 (I40E_QUEUE_TYPE_RX <<
5118 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
5119 I40E_QINT_TQCTL_CAUSE_ENA_MASK);
5120
5121 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX)
5122 vector++;
5123 }
5124 ixl_flush(sc);
5125
5126 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_RX), 0x7a);
5127 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_TX), 0x7a);
5128 ixl_flush(sc);
5129 }
5130
5131 static void
5132 ixl_config_other_intr(struct ixl_softc *sc)
5133 {
5134 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5135 (void)ixl_rd(sc, I40E_PFINT_ICR0);
5136
5137 ixl_wr(sc, I40E_PFINT_ICR0_ENA,
5138 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
5139 I40E_PFINT_ICR0_ENA_GRST_MASK |
5140 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
5141 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
5142 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
5143 I40E_PFINT_ICR0_ENA_VFLR_MASK |
5144 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
5145 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
5146 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK);
5147
5148 ixl_wr(sc, I40E_PFINT_LNKLST0, 0x7FF);
5149 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_OTHER), 0);
5150 ixl_wr(sc, I40E_PFINT_STAT_CTL0,
5151 (I40E_ITR_INDEX_OTHER <<
5152 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT));
5153 ixl_flush(sc);
5154 }
5155
5156 static int
5157 ixl_setup_interrupts(struct ixl_softc *sc)
5158 {
5159 struct pci_attach_args *pa = &sc->sc_pa;
5160 pci_intr_type_t max_type, intr_type;
5161 int counts[PCI_INTR_TYPE_SIZE];
5162 int error;
5163 unsigned int i;
5164 bool retry, nomsix = IXL_NOMSIX;
5165
5166 memset(counts, 0, sizeof(counts));
5167 max_type = PCI_INTR_TYPE_MSIX;
5168 /* QPs + other interrupt */
5169 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueue_pairs_max + 1;
5170 counts[PCI_INTR_TYPE_INTX] = 1;
5171
5172 if (nomsix)
5173 counts[PCI_INTR_TYPE_MSIX] = 0;
5174
5175 do {
5176 retry = false;
5177 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, max_type);
5178 if (error != 0) {
5179 aprint_error_dev(sc->sc_dev,
5180 "couldn't map interrupt\n");
5181 break;
5182 }
5183 for (i = 0; i < sc->sc_nintrs; i++) {
5184 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[i],
5185 PCI_INTR_MPSAFE, true);
5186 }
5187
5188 intr_type = pci_intr_type(pa->pa_pc, sc->sc_ihp[0]);
5189 sc->sc_nintrs = counts[intr_type];
5190 KASSERT(sc->sc_nintrs > 0);
5191
5192 sc->sc_ihs = kmem_alloc(sizeof(sc->sc_ihs[0]) * sc->sc_nintrs,
5193 KM_SLEEP);
5194
5195 if (intr_type == PCI_INTR_TYPE_MSIX) {
5196 error = ixl_establish_msix(sc);
5197 if (error) {
5198 counts[PCI_INTR_TYPE_MSIX] = 0;
5199 retry = true;
5200 } else {
5201 ixl_set_affinity_msix(sc);
5202 }
5203 } else if (intr_type == PCI_INTR_TYPE_INTX) {
5204 error = ixl_establish_intx(sc);
5205 } else {
5206 error = -1;
5207 }
5208
5209 if (error) {
5210 kmem_free(sc->sc_ihs,
5211 sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5212 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5213 } else {
5214 sc->sc_intrtype = intr_type;
5215 }
5216 } while (retry);
5217
5218 return error;
5219 }
5220
5221 static void
5222 ixl_teardown_interrupts(struct ixl_softc *sc)
5223 {
5224 struct pci_attach_args *pa = &sc->sc_pa;
5225 unsigned int i;
5226
5227 for (i = 0; i < sc->sc_nintrs; i++) {
5228 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]);
5229 }
5230
5231 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5232
5233 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5234 sc->sc_ihs = NULL;
5235 sc->sc_nintrs = 0;
5236 }
5237
5238 static int
5239 ixl_setup_stats(struct ixl_softc *sc)
5240 {
5241 struct ixl_queue_pair *qp;
5242 struct ixl_tx_ring *txr;
5243 struct ixl_rx_ring *rxr;
5244 unsigned int i;
5245
5246 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5247 qp = &sc->sc_qps[i];
5248 txr = qp->qp_txr;
5249 rxr = qp->qp_rxr;
5250
5251 evcnt_attach_dynamic(&txr->txr_defragged, EVCNT_TYPE_MISC,
5252 NULL, qp->qp_name, "m_defrag successed");
5253 evcnt_attach_dynamic(&txr->txr_defrag_failed, EVCNT_TYPE_MISC,
5254 NULL, qp->qp_name, "m_defrag_failed");
5255 evcnt_attach_dynamic(&txr->txr_pcqdrop, EVCNT_TYPE_MISC,
5256 NULL, qp->qp_name, "Dropped in pcq");
5257 evcnt_attach_dynamic(&txr->txr_transmitdef, EVCNT_TYPE_MISC,
5258 NULL, qp->qp_name, "Deferred transmit");
5259 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR,
5260 NULL, qp->qp_name, "Interrupt on queue");
5261 evcnt_attach_dynamic(&txr->txr_defer, EVCNT_TYPE_MISC,
5262 NULL, qp->qp_name, "Handled queue in softint/workqueue");
5263
5264 evcnt_attach_dynamic(&rxr->rxr_mgethdr_failed, EVCNT_TYPE_MISC,
5265 NULL, qp->qp_name, "MGETHDR failed");
5266 evcnt_attach_dynamic(&rxr->rxr_mgetcl_failed, EVCNT_TYPE_MISC,
5267 NULL, qp->qp_name, "MCLGET failed");
5268 evcnt_attach_dynamic(&rxr->rxr_mbuf_load_failed,
5269 EVCNT_TYPE_MISC, NULL, qp->qp_name,
5270 "bus_dmamap_load_mbuf failed");
5271 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR,
5272 NULL, qp->qp_name, "Interrupt on queue");
5273 evcnt_attach_dynamic(&rxr->rxr_defer, EVCNT_TYPE_MISC,
5274 NULL, qp->qp_name, "Handled queue in softint/workqueue");
5275 }
5276
5277 evcnt_attach_dynamic(&sc->sc_event_atq, EVCNT_TYPE_INTR,
5278 NULL, device_xname(sc->sc_dev), "Interrupt for other events");
5279 evcnt_attach_dynamic(&sc->sc_event_link, EVCNT_TYPE_MISC,
5280 NULL, device_xname(sc->sc_dev), "Link status event");
5281 evcnt_attach_dynamic(&sc->sc_event_ecc_err, EVCNT_TYPE_MISC,
5282 NULL, device_xname(sc->sc_dev), "ECC error");
5283 evcnt_attach_dynamic(&sc->sc_event_pci_exception, EVCNT_TYPE_MISC,
5284 NULL, device_xname(sc->sc_dev), "PCI exception");
5285 evcnt_attach_dynamic(&sc->sc_event_crit_err, EVCNT_TYPE_MISC,
5286 NULL, device_xname(sc->sc_dev), "Critical error");
5287
5288 return 0;
5289 }
5290
5291 static void
5292 ixl_teardown_stats(struct ixl_softc *sc)
5293 {
5294 struct ixl_tx_ring *txr;
5295 struct ixl_rx_ring *rxr;
5296 unsigned int i;
5297
5298 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5299 txr = sc->sc_qps[i].qp_txr;
5300 rxr = sc->sc_qps[i].qp_rxr;
5301
5302 evcnt_detach(&txr->txr_defragged);
5303 evcnt_detach(&txr->txr_defrag_failed);
5304 evcnt_detach(&txr->txr_pcqdrop);
5305 evcnt_detach(&txr->txr_transmitdef);
5306 evcnt_detach(&txr->txr_intr);
5307 evcnt_detach(&txr->txr_defer);
5308
5309 evcnt_detach(&rxr->rxr_mgethdr_failed);
5310 evcnt_detach(&rxr->rxr_mgetcl_failed);
5311 evcnt_detach(&rxr->rxr_mbuf_load_failed);
5312 evcnt_detach(&rxr->rxr_intr);
5313 evcnt_detach(&rxr->rxr_defer);
5314 }
5315
5316 evcnt_detach(&sc->sc_event_atq);
5317 evcnt_detach(&sc->sc_event_link);
5318 evcnt_detach(&sc->sc_event_ecc_err);
5319 evcnt_detach(&sc->sc_event_pci_exception);
5320 evcnt_detach(&sc->sc_event_crit_err);
5321 }
5322
5323 static int
5324 ixl_setup_sysctls(struct ixl_softc *sc)
5325 {
5326 const char *devname;
5327 struct sysctllog **log;
5328 const struct sysctlnode *rnode, *rxnode, *txnode;
5329 int error;
5330
5331 log = &sc->sc_sysctllog;
5332 devname = device_xname(sc->sc_dev);
5333
5334 error = sysctl_createv(log, 0, NULL, &rnode,
5335 0, CTLTYPE_NODE, devname,
5336 SYSCTL_DESCR("ixl information and settings"),
5337 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
5338 if (error)
5339 goto out;
5340
5341 error = sysctl_createv(log, 0, &rnode, NULL,
5342 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
5343 SYSCTL_DESCR("Use workqueue for packet processing"),
5344 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
5345 if (error)
5346 goto out;
5347
5348 error = sysctl_createv(log, 0, &rnode, &rxnode,
5349 0, CTLTYPE_NODE, "rx",
5350 SYSCTL_DESCR("ixl information and settings for Rx"),
5351 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
5352 if (error)
5353 goto out;
5354
5355 error = sysctl_createv(log, 0, &rxnode, NULL,
5356 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
5357 SYSCTL_DESCR("max number of Rx packets"
5358 " to process for interrupt processing"),
5359 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
5360 if (error)
5361 goto out;
5362
5363 error = sysctl_createv(log, 0, &rxnode, NULL,
5364 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
5365 SYSCTL_DESCR("max number of Rx packets"
5366 " to process for deferred processing"),
5367 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
5368 if (error)
5369 goto out;
5370
5371 error = sysctl_createv(log, 0, &rnode, &txnode,
5372 0, CTLTYPE_NODE, "tx",
5373 SYSCTL_DESCR("ixl information and settings for Tx"),
5374 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
5375 if (error)
5376 goto out;
5377
5378 error = sysctl_createv(log, 0, &txnode, NULL,
5379 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
5380 SYSCTL_DESCR("max number of Tx packets"
5381 " to process for interrupt processing"),
5382 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
5383 if (error)
5384 goto out;
5385
5386 error = sysctl_createv(log, 0, &txnode, NULL,
5387 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
5388 SYSCTL_DESCR("max number of Tx packets"
5389 " to process for deferred processing"),
5390 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
5391 if (error)
5392 goto out;
5393
5394 out:
5395 if (error) {
5396 aprint_error_dev(sc->sc_dev,
5397 "unable to create sysctl node\n");
5398 sysctl_teardown(log);
5399 }
5400
5401 return error;
5402 }
5403
5404 static void
5405 ixl_teardown_sysctls(struct ixl_softc *sc)
5406 {
5407
5408 sysctl_teardown(&sc->sc_sysctllog);
5409 }
5410
5411 static struct workqueue *
5412 ixl_workq_create(const char *name, pri_t prio, int ipl, int flags)
5413 {
5414 struct workqueue *wq;
5415 int error;
5416
5417 error = workqueue_create(&wq, name, ixl_workq_work, NULL,
5418 prio, ipl, flags);
5419
5420 if (error)
5421 return NULL;
5422
5423 return wq;
5424 }
5425
5426 static void
5427 ixl_workq_destroy(struct workqueue *wq)
5428 {
5429
5430 workqueue_destroy(wq);
5431 }
5432
5433 static void
5434 ixl_work_set(struct ixl_work *work, void (*func)(void *), void *arg)
5435 {
5436
5437 memset(work, 0, sizeof(*work));
5438 work->ixw_func = func;
5439 work->ixw_arg = arg;
5440 }
5441
5442 static void
5443 ixl_work_add(struct workqueue *wq, struct ixl_work *work)
5444 {
5445 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0)
5446 return;
5447
5448 workqueue_enqueue(wq, &work->ixw_cookie, NULL);
5449 }
5450
5451 static void
5452 ixl_work_wait(struct workqueue *wq, struct ixl_work *work)
5453 {
5454
5455 workqueue_wait(wq, &work->ixw_cookie);
5456 }
5457
5458 static void
5459 ixl_workq_work(struct work *wk, void *context)
5460 {
5461 struct ixl_work *work;
5462
5463 work = container_of(wk, struct ixl_work, ixw_cookie);
5464
5465 atomic_swap_uint(&work->ixw_added, 0);
5466 kpreempt_disable();
5467 work->ixw_func(work->ixw_arg);
5468 kpreempt_enable();
5469 }
5470
5471 static int
5472 ixl_rx_ctl_read(struct ixl_softc *sc, uint32_t reg, uint32_t *rv)
5473 {
5474 struct ixl_aq_desc iaq;
5475
5476 memset(&iaq, 0, sizeof(iaq));
5477 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_READ);
5478 iaq.iaq_param[1] = htole32(reg);
5479
5480 if (ixl_atq_poll(sc, &iaq, 250) != 0)
5481 return ETIMEDOUT;
5482
5483 switch (htole16(iaq.iaq_retval)) {
5484 case IXL_AQ_RC_OK:
5485 /* success */
5486 break;
5487 case IXL_AQ_RC_EACCES:
5488 return EPERM;
5489 case IXL_AQ_RC_EAGAIN:
5490 return EAGAIN;
5491 default:
5492 return EIO;
5493 }
5494
5495 *rv = htole32(iaq.iaq_param[3]);
5496 return 0;
5497 }
5498
5499
5500
5501 static uint32_t
5502 ixl_rd_rx_csr(struct ixl_softc *sc, uint32_t reg)
5503 {
5504 uint32_t val;
5505 int rv, retry, retry_limit;
5506
5507 retry_limit = sc->sc_rxctl_atq ? 5 : 0;
5508
5509 for (retry = 0; retry < retry_limit; retry++) {
5510 rv = ixl_rx_ctl_read(sc, reg, &val);
5511 if (rv == 0)
5512 return val;
5513 else if (rv == EAGAIN)
5514 delaymsec(1);
5515 else
5516 break;
5517 }
5518
5519 val = ixl_rd(sc, reg);
5520
5521 return val;
5522 }
5523
5524 static int
5525 ixl_rx_ctl_write(struct ixl_softc *sc, uint32_t reg, uint32_t value)
5526 {
5527 struct ixl_aq_desc iaq;
5528
5529 memset(&iaq, 0, sizeof(iaq));
5530 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_WRITE);
5531 iaq.iaq_param[1] = htole32(reg);
5532 iaq.iaq_param[3] = htole32(value);
5533
5534 if (ixl_atq_poll(sc, &iaq, 250) != 0)
5535 return ETIMEDOUT;
5536
5537 switch (htole16(iaq.iaq_retval)) {
5538 case IXL_AQ_RC_OK:
5539 /* success */
5540 break;
5541 case IXL_AQ_RC_EACCES:
5542 return EPERM;
5543 case IXL_AQ_RC_EAGAIN:
5544 return EAGAIN;
5545 default:
5546 return EIO;
5547 }
5548
5549 return 0;
5550 }
5551
5552 static void
5553 ixl_wr_rx_csr(struct ixl_softc *sc, uint32_t reg, uint32_t value)
5554 {
5555 int rv, retry, retry_limit;
5556
5557 retry_limit = sc->sc_rxctl_atq ? 5 : 0;
5558
5559 for (retry = 0; retry < retry_limit; retry++) {
5560 rv = ixl_rx_ctl_write(sc, reg, value);
5561 if (rv == 0)
5562 return;
5563 else if (rv == EAGAIN)
5564 delaymsec(1);
5565 else
5566 break;
5567 }
5568
5569 ixl_wr(sc, reg, value);
5570 }
5571
5572 MODULE(MODULE_CLASS_DRIVER, if_ixl, "pci");
5573
5574 #ifdef _MODULE
5575 #include "ioconf.c"
5576 #endif
5577
5578 static int
5579 if_ixl_modcmd(modcmd_t cmd, void *opaque)
5580 {
5581 int error = 0;
5582
5583 #ifdef _MODULE
5584 switch (cmd) {
5585 case MODULE_CMD_INIT:
5586 error = config_init_component(cfdriver_ioconf_if_ixl,
5587 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
5588 break;
5589 case MODULE_CMD_FINI:
5590 error = config_fini_component(cfdriver_ioconf_if_ixl,
5591 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
5592 break;
5593 default:
5594 error = ENOTTY;
5595 break;
5596 }
5597 #endif
5598
5599 return error;
5600 }
5601