if_ixl.c revision 1.5 1 /* $NetBSD: if_ixl.c,v 1.5 2019/12/20 01:12:51 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2016,2017 David Gwynne <dlg (at) openbsd.org>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50 /*
51 * Copyright (c) 2019 Internet Initiative Japan, Inc.
52 * All rights reserved.
53 *
54 * Redistribution and use in source and binary forms, with or without
55 * modification, are permitted provided that the following conditions
56 * are met:
57 * 1. Redistributions of source code must retain the above copyright
58 * notice, this list of conditions and the following disclaimer.
59 * 2. Redistributions in binary form must reproduce the above copyright
60 * notice, this list of conditions and the following disclaimer in the
61 * documentation and/or other materials provided with the distribution.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
73 * POSSIBILITY OF SUCH DAMAGE.
74 */
75
76 #include <sys/cdefs.h>
77
78 #ifdef _KERNEL_OPT
79 #include "opt_net_mpsafe.h"
80 #endif
81
82 #include <sys/param.h>
83 #include <sys/types.h>
84
85 #include <sys/cpu.h>
86 #include <sys/device.h>
87 #include <sys/evcnt.h>
88 #include <sys/interrupt.h>
89 #include <sys/kmem.h>
90 #include <sys/malloc.h>
91 #include <sys/module.h>
92 #include <sys/mutex.h>
93 #include <sys/pcq.h>
94 #include <sys/syslog.h>
95 #include <sys/workqueue.h>
96
97 #include <sys/bus.h>
98
99 #include <net/bpf.h>
100 #include <net/if.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103 #include <net/if_ether.h>
104 #include <net/rss_config.h>
105
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108
109 #include <dev/pci/if_ixlreg.h>
110 #include <dev/pci/if_ixlvar.h>
111
112 struct ixl_softc; /* defined */
113
114 #define I40E_PF_RESET_WAIT_COUNT 200
115 #define I40E_AQ_LARGE_BUF 512
116
117 /* bitfields for Tx queue mapping in QTX_CTL */
118 #define I40E_QTX_CTL_VF_QUEUE 0x0
119 #define I40E_QTX_CTL_VM_QUEUE 0x1
120 #define I40E_QTX_CTL_PF_QUEUE 0x2
121
122 #define I40E_QUEUE_TYPE_EOL 0x7ff
123 #define I40E_INTR_NOTX_QUEUE 0
124
125 #define I40E_QUEUE_TYPE_RX 0x0
126 #define I40E_QUEUE_TYPE_TX 0x1
127 #define I40E_QUEUE_TYPE_PE_CEQ 0x2
128 #define I40E_QUEUE_TYPE_UNKNOWN 0x3
129
130 #define I40E_ITR_INDEX_RX 0x0
131 #define I40E_ITR_INDEX_TX 0x1
132 #define I40E_ITR_INDEX_OTHER 0x2
133 #define I40E_ITR_INDEX_NONE 0x3
134
135 #define I40E_INTR_NOTX_QUEUE 0
136 #define I40E_INTR_NOTX_INTR 0
137 #define I40E_INTR_NOTX_RX_QUEUE 0
138 #define I40E_INTR_NOTX_TX_QUEUE 1
139 #define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK
140 #define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK
141
142 #define BIT_ULL(a) (1ULL << (a))
143 #define IXL_RSS_HENA_DEFAULT_BASE \
144 (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
145 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
146 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
147 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
148 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
149 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
150 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
151 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
152 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
153 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
154 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
155 #define IXL_RSS_HENA_DEFAULT_XL710 IXL_RSS_HENA_DEFAULT_BASE
156 #define IXL_RSS_HENA_DEFAULT_X722 (IXL_RSS_HENA_DEFAULT_XL710 | \
157 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
158 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
159 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
160 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
161 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
162 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
163 #define I40E_HASH_LUT_SIZE_128 0
164 #define IXL_RSS_KEY_SIZE_REG 13
165
166 #define IXL_ICR0_CRIT_ERR_MASK \
167 (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \
168 I40E_PFINT_ICR0_ECC_ERR_MASK | \
169 I40E_PFINT_ICR0_PE_CRITERR_MASK)
170
171 #define IXL_TX_PKT_DESCS 8
172 #define IXL_TX_QUEUE_ALIGN 128
173 #define IXL_RX_QUEUE_ALIGN 128
174
175 #define IXL_HARDMTU 9712 /* 9726 - ETHER_HDR_LEN */
176
177 #define IXL_PCIREG PCI_MAPREG_START
178
179 #define IXL_ITR0 0x0
180 #define IXL_ITR1 0x1
181 #define IXL_ITR2 0x2
182 #define IXL_NOITR 0x3
183
184 #define IXL_AQ_NUM 256
185 #define IXL_AQ_MASK (IXL_AQ_NUM - 1)
186 #define IXL_AQ_ALIGN 64 /* lol */
187 #define IXL_AQ_BUFLEN 4096
188
189 #define IXL_HMC_ROUNDUP 512
190 #define IXL_HMC_PGSIZE 4096
191 #define IXL_HMC_DVASZ sizeof(uint64_t)
192 #define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
193 #define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS)
194 #define IXL_HMC_PDVALID 1ULL
195
196 #define IXL_ATQ_EXEC_TIMEOUT (10 * hz)
197
198 struct ixl_aq_regs {
199 bus_size_t atq_tail;
200 bus_size_t atq_head;
201 bus_size_t atq_len;
202 bus_size_t atq_bal;
203 bus_size_t atq_bah;
204
205 bus_size_t arq_tail;
206 bus_size_t arq_head;
207 bus_size_t arq_len;
208 bus_size_t arq_bal;
209 bus_size_t arq_bah;
210
211 uint32_t atq_len_enable;
212 uint32_t atq_tail_mask;
213 uint32_t atq_head_mask;
214
215 uint32_t arq_len_enable;
216 uint32_t arq_tail_mask;
217 uint32_t arq_head_mask;
218 };
219
220 struct ixl_phy_type {
221 uint64_t phy_type;
222 uint64_t ifm_type;
223 };
224
225 struct ixl_speed_type {
226 uint8_t dev_speed;
227 uint64_t net_speed;
228 };
229
230 struct ixl_aq_buf {
231 SIMPLEQ_ENTRY(ixl_aq_buf)
232 aqb_entry;
233 void *aqb_data;
234 bus_dmamap_t aqb_map;
235 bus_dma_segment_t aqb_seg;
236 size_t aqb_size;
237 int aqb_nsegs;
238 };
239 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf);
240
241 struct ixl_dmamem {
242 bus_dmamap_t ixm_map;
243 bus_dma_segment_t ixm_seg;
244 int ixm_nsegs;
245 size_t ixm_size;
246 void *ixm_kva;
247 };
248
249 #define IXL_DMA_MAP(_ixm) ((_ixm)->ixm_map)
250 #define IXL_DMA_DVA(_ixm) ((_ixm)->ixm_map->dm_segs[0].ds_addr)
251 #define IXL_DMA_KVA(_ixm) ((void *)(_ixm)->ixm_kva)
252 #define IXL_DMA_LEN(_ixm) ((_ixm)->ixm_size)
253
254 struct ixl_hmc_entry {
255 uint64_t hmc_base;
256 uint32_t hmc_count;
257 uint64_t hmc_size;
258 };
259
260 enum ixl_hmc_types {
261 IXL_HMC_LAN_TX = 0,
262 IXL_HMC_LAN_RX,
263 IXL_HMC_FCOE_CTX,
264 IXL_HMC_FCOE_FILTER,
265 IXL_HMC_COUNT
266 };
267
268 struct ixl_hmc_pack {
269 uint16_t offset;
270 uint16_t width;
271 uint16_t lsb;
272 };
273
274 /*
275 * these hmc objects have weird sizes and alignments, so these are abstract
276 * representations of them that are nice for c to populate.
277 *
278 * the packing code relies on little-endian values being stored in the fields,
279 * no high bits in the fields being set, and the fields must be packed in the
280 * same order as they are in the ctx structure.
281 */
282
283 struct ixl_hmc_rxq {
284 uint16_t head;
285 uint8_t cpuid;
286 uint64_t base;
287 #define IXL_HMC_RXQ_BASE_UNIT 128
288 uint16_t qlen;
289 uint16_t dbuff;
290 #define IXL_HMC_RXQ_DBUFF_UNIT 128
291 uint8_t hbuff;
292 #define IXL_HMC_RXQ_HBUFF_UNIT 64
293 uint8_t dtype;
294 #define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0
295 #define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1
296 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2
297 uint8_t dsize;
298 #define IXL_HMC_RXQ_DSIZE_16 0
299 #define IXL_HMC_RXQ_DSIZE_32 1
300 uint8_t crcstrip;
301 uint8_t fc_ena;
302 uint8_t l2sel;
303 uint8_t hsplit_0;
304 uint8_t hsplit_1;
305 uint8_t showiv;
306 uint16_t rxmax;
307 uint8_t tphrdesc_ena;
308 uint8_t tphwdesc_ena;
309 uint8_t tphdata_ena;
310 uint8_t tphhead_ena;
311 uint8_t lrxqthresh;
312 uint8_t prefena;
313 };
314
315 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
316 { offsetof(struct ixl_hmc_rxq, head), 13, 0 },
317 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 },
318 { offsetof(struct ixl_hmc_rxq, base), 57, 32 },
319 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 },
320 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 },
321 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 },
322 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 },
323 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 },
324 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 },
325 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 },
326 { offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 },
327 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 },
328 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 },
329 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 },
330 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 },
331 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 },
332 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 },
333 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 },
334 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 },
335 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 },
336 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 },
337 };
338
339 #define IXL_HMC_RXQ_MINSIZE (201 + 1)
340
341 struct ixl_hmc_txq {
342 uint16_t head;
343 uint8_t new_context;
344 uint64_t base;
345 #define IXL_HMC_TXQ_BASE_UNIT 128
346 uint8_t fc_ena;
347 uint8_t timesync_ena;
348 uint8_t fd_ena;
349 uint8_t alt_vlan_ena;
350 uint16_t thead_wb;
351 uint8_t cpuid;
352 uint8_t head_wb_ena;
353 #define IXL_HMC_TXQ_DESC_WB 0
354 #define IXL_HMC_TXQ_HEAD_WB 1
355 uint16_t qlen;
356 uint8_t tphrdesc_ena;
357 uint8_t tphrpacket_ena;
358 uint8_t tphwdesc_ena;
359 uint64_t head_wb_addr;
360 uint32_t crc;
361 uint16_t rdylist;
362 uint8_t rdylist_act;
363 };
364
365 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
366 { offsetof(struct ixl_hmc_txq, head), 13, 0 },
367 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 },
368 { offsetof(struct ixl_hmc_txq, base), 57, 32 },
369 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 },
370 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 },
371 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 },
372 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 },
373 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 },
374 /* line 1 */
375 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 },
376 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 },
377 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 },
378 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 },
379 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 },
380 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 },
381 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 },
382 /* line 7 */
383 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) },
384 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) },
385 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) },
386 };
387
388 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
389
390 struct ixl_work {
391 struct work ixw_cookie;
392 void (*ixw_func)(void *);
393 void *ixw_arg;
394 unsigned int ixw_added;
395 };
396 #define IXL_WORKQUEUE_PRI PRI_SOFTNET
397
398 struct ixl_tx_map {
399 struct mbuf *txm_m;
400 bus_dmamap_t txm_map;
401 unsigned int txm_eop;
402 };
403
404 struct ixl_tx_ring {
405 kmutex_t txr_lock;
406 struct ixl_softc *txr_sc;
407
408 unsigned int txr_prod;
409 unsigned int txr_cons;
410
411 struct ixl_tx_map *txr_maps;
412 struct ixl_dmamem txr_mem;
413
414 bus_size_t txr_tail;
415 unsigned int txr_qid;
416 pcq_t *txr_intrq;
417 void *txr_si;
418
419 uint64_t txr_oerrors; /* if_oerrors */
420 uint64_t txr_opackets; /* if_opackets */
421 uint64_t txr_obytes; /* if_obytes */
422 uint64_t txr_omcasts; /* if_omcasts */
423
424 struct evcnt txr_defragged;
425 struct evcnt txr_defrag_failed;
426 struct evcnt txr_pcqdrop;
427 struct evcnt txr_transmitdef;
428 struct evcnt txr_intr;
429 struct evcnt txr_defer;
430 };
431
432 struct ixl_rx_map {
433 struct mbuf *rxm_m;
434 bus_dmamap_t rxm_map;
435 };
436
437 struct ixl_rx_ring {
438 kmutex_t rxr_lock;
439
440 unsigned int rxr_prod;
441 unsigned int rxr_cons;
442
443 struct ixl_rx_map *rxr_maps;
444 struct ixl_dmamem rxr_mem;
445
446 struct mbuf *rxr_m_head;
447 struct mbuf **rxr_m_tail;
448
449 bus_size_t rxr_tail;
450 unsigned int rxr_qid;
451
452 uint64_t rxr_ipackets; /* if_ipackets */
453 uint64_t rxr_ibytes; /* if_ibytes */
454 uint64_t rxr_iqdrops; /* iqdrops */
455 uint64_t rxr_ierrors; /* if_ierrors */
456
457 struct evcnt rxr_mgethdr_failed;
458 struct evcnt rxr_mgetcl_failed;
459 struct evcnt rxr_mbuf_load_failed;
460 struct evcnt rxr_intr;
461 struct evcnt rxr_defer;
462 };
463
464 struct ixl_queue_pair {
465 struct ixl_softc *qp_sc;
466 struct ixl_tx_ring *qp_txr;
467 struct ixl_rx_ring *qp_rxr;
468
469 char qp_name[16];
470
471 void *qp_si;
472 struct ixl_work qp_task;
473 bool qp_workqueue;
474 };
475
476 struct ixl_atq {
477 struct ixl_aq_desc iatq_desc;
478 void (*iatq_fn)(struct ixl_softc *,
479 const struct ixl_aq_desc *);
480 };
481 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
482
483 struct ixl_product {
484 unsigned int vendor_id;
485 unsigned int product_id;
486 };
487
488 /*
489 * Locking notes:
490 * + a field in ixl_tx_ring is protected by txr_lock (a spin mutex), and
491 * a field in ixl_rx_ring is protected by rxr_lock (a spin mutex).
492 * - more than one lock of them cannot be held at once.
493 * + a field named sc_atq_* in ixl_softc is protected by sc_atq_lock
494 * (a spin mutex).
495 * - the lock cannot held with txr_lock or rxr_lock.
496 * + a field named sc_arq_* is not protected by any lock.
497 * - operations for sc_arq_* is done in one context related to
498 * sc_arq_task.
499 * + other fields in ixl_softc is protected by sc_cfg_lock
500 * (an adaptive mutex)
501 * - It must be held before another lock is held, and It can be
502 * released after the other lock is released.
503 * */
504
505 struct ixl_softc {
506 device_t sc_dev;
507 struct ethercom sc_ec;
508 bool sc_attached;
509 bool sc_dead;
510 bool sc_rxctl_atq;
511 struct sysctllog *sc_sysctllog;
512 struct workqueue *sc_workq;
513 struct workqueue *sc_workq_txrx;
514 uint8_t sc_enaddr[ETHER_ADDR_LEN];
515 struct ifmedia sc_media;
516 uint64_t sc_media_status;
517 uint64_t sc_media_active;
518 kmutex_t sc_cfg_lock;
519 enum i40e_mac_type sc_mac_type;
520 uint32_t sc_rss_table_size;
521 uint32_t sc_rss_table_entry_width;
522 bool sc_txrx_workqueue;
523 u_int sc_tx_process_limit;
524 u_int sc_rx_process_limit;
525 u_int sc_tx_intr_process_limit;
526 u_int sc_rx_intr_process_limit;
527
528 struct pci_attach_args sc_pa;
529 pci_intr_handle_t *sc_ihp;
530 void **sc_ihs;
531 unsigned int sc_nintrs;
532
533 bus_dma_tag_t sc_dmat;
534 bus_space_tag_t sc_memt;
535 bus_space_handle_t sc_memh;
536 bus_size_t sc_mems;
537
538 uint8_t sc_pf_id;
539 uint16_t sc_uplink_seid; /* le */
540 uint16_t sc_downlink_seid; /* le */
541 uint16_t sc_vsi_number; /* le */
542 uint16_t sc_seid;
543 unsigned int sc_base_queue;
544
545 pci_intr_type_t sc_intrtype;
546 unsigned int sc_msix_vector_queue;
547
548 struct ixl_dmamem sc_scratch;
549
550 const struct ixl_aq_regs *
551 sc_aq_regs;
552
553 kmutex_t sc_atq_lock;
554 kcondvar_t sc_atq_cv;
555 struct ixl_dmamem sc_atq;
556 unsigned int sc_atq_prod;
557 unsigned int sc_atq_cons;
558
559 struct ixl_dmamem sc_arq;
560 struct ixl_work sc_arq_task;
561 struct ixl_aq_bufs sc_arq_idle;
562 struct ixl_aq_buf *sc_arq_live[IXL_AQ_NUM];
563 unsigned int sc_arq_prod;
564 unsigned int sc_arq_cons;
565
566 struct ixl_work sc_link_state_task;
567 struct ixl_atq sc_link_state_atq;
568
569 struct ixl_dmamem sc_hmc_sd;
570 struct ixl_dmamem sc_hmc_pd;
571 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT];
572
573 unsigned int sc_tx_ring_ndescs;
574 unsigned int sc_rx_ring_ndescs;
575 unsigned int sc_nqueue_pairs;
576 unsigned int sc_nqueue_pairs_max;
577 unsigned int sc_nqueue_pairs_device;
578 struct ixl_queue_pair *sc_qps;
579
580 struct evcnt sc_event_atq;
581 struct evcnt sc_event_link;
582 struct evcnt sc_event_ecc_err;
583 struct evcnt sc_event_pci_exception;
584 struct evcnt sc_event_crit_err;
585 };
586
587 #define IXL_TXRX_PROCESS_UNLIMIT UINT_MAX
588 #define IXL_TX_PROCESS_LIMIT 256
589 #define IXL_RX_PROCESS_LIMIT 256
590 #define IXL_TX_INTR_PROCESS_LIMIT 256
591 #define IXL_RX_INTR_PROCESS_LIMIT 0U
592
593 #define delaymsec(_x) DELAY(1000 * (_x))
594 #ifdef IXL_DEBUG
595 #define DDPRINTF(sc, fmt, args...) \
596 do { \
597 if (sc != NULL) \
598 device_printf(sc->sc_dev, ""); \
599 printf("%s:\t" fmt, __func__, ##args); \
600 } while (0)
601 #else
602 #define DDPRINTF(sc, fmt, args...) __nothing
603 #endif
604 #define IXL_NOMSIX false
605
606 static enum i40e_mac_type
607 ixl_mactype(pci_product_id_t);
608 static void ixl_clear_hw(struct ixl_softc *);
609 static int ixl_pf_reset(struct ixl_softc *);
610
611 static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
612 bus_size_t, bus_size_t);
613 static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
614
615 static int ixl_arq_fill(struct ixl_softc *);
616 static void ixl_arq_unfill(struct ixl_softc *);
617
618 static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
619 unsigned int);
620 static void ixl_atq_set(struct ixl_atq *,
621 void (*)(struct ixl_softc *, const struct ixl_aq_desc *));
622 static int ixl_atq_post(struct ixl_softc *, struct ixl_atq *);
623 static int ixl_atq_post_locked(struct ixl_softc *, struct ixl_atq *);
624 static void ixl_atq_done(struct ixl_softc *);
625 static int ixl_atq_exec(struct ixl_softc *, struct ixl_atq *);
626 static int ixl_get_version(struct ixl_softc *);
627 static int ixl_get_hw_capabilities(struct ixl_softc *);
628 static int ixl_pxe_clear(struct ixl_softc *);
629 static int ixl_lldp_shut(struct ixl_softc *);
630 static int ixl_get_mac(struct ixl_softc *);
631 static int ixl_get_switch_config(struct ixl_softc *);
632 static int ixl_phy_mask_ints(struct ixl_softc *);
633 static int ixl_get_phy_types(struct ixl_softc *, uint64_t *);
634 static int ixl_restart_an(struct ixl_softc *);
635 static int ixl_hmc(struct ixl_softc *);
636 static void ixl_hmc_free(struct ixl_softc *);
637 static int ixl_get_vsi(struct ixl_softc *);
638 static int ixl_set_vsi(struct ixl_softc *);
639 static void ixl_set_filter_control(struct ixl_softc *);
640 static void ixl_get_link_status(void *);
641 static int ixl_get_link_status_poll(struct ixl_softc *);
642 static int ixl_set_link_status(struct ixl_softc *,
643 const struct ixl_aq_desc *);
644 static void ixl_config_rss(struct ixl_softc *);
645 static int ixl_add_macvlan(struct ixl_softc *, const uint8_t *,
646 uint16_t, uint16_t);
647 static int ixl_remove_macvlan(struct ixl_softc *, uint8_t *, uint16_t,
648 uint16_t);
649 static void ixl_arq(void *);
650 static void ixl_hmc_pack(void *, const void *,
651 const struct ixl_hmc_pack *, unsigned int);
652 static uint32_t ixl_rd_rx_csr(struct ixl_softc *, uint32_t);
653 static void ixl_wr_rx_csr(struct ixl_softc *, uint32_t, uint32_t);
654
655 static int ixl_match(device_t, cfdata_t, void *);
656 static void ixl_attach(device_t, device_t, void *);
657 static int ixl_detach(device_t, int);
658
659 static void ixl_media_add(struct ixl_softc *, uint64_t);
660 static int ixl_media_change(struct ifnet *);
661 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
662 static void ixl_watchdog(struct ifnet *);
663 static int ixl_ioctl(struct ifnet *, u_long, void *);
664 static void ixl_start(struct ifnet *);
665 static int ixl_transmit(struct ifnet *, struct mbuf *);
666 static void ixl_deferred_transmit(void *);
667 static int ixl_intr(void *);
668 static int ixl_queue_intr(void *);
669 static int ixl_other_intr(void *);
670 static void ixl_handle_queue(void *);
671 static void ixl_sched_handle_queue(struct ixl_softc *,
672 struct ixl_queue_pair *);
673 static int ixl_init(struct ifnet *);
674 static int ixl_init_locked(struct ixl_softc *);
675 static void ixl_stop(struct ifnet *, int);
676 static void ixl_stop_locked(struct ixl_softc *);
677 static int ixl_iff(struct ixl_softc *);
678 static int ixl_ifflags_cb(struct ethercom *);
679 static int ixl_setup_interrupts(struct ixl_softc *);
680 static int ixl_establish_intx(struct ixl_softc *);
681 static int ixl_establish_msix(struct ixl_softc *);
682 static void ixl_set_affinity_msix(struct ixl_softc *);
683 static void ixl_enable_queue_intr(struct ixl_softc *,
684 struct ixl_queue_pair *);
685 static void ixl_disable_queue_intr(struct ixl_softc *,
686 struct ixl_queue_pair *);
687 static void ixl_enable_other_intr(struct ixl_softc *);
688 static void ixl_disable_other_intr(struct ixl_softc *);
689 static void ixl_config_queue_intr(struct ixl_softc *);
690 static void ixl_config_other_intr(struct ixl_softc *);
691
692 static struct ixl_tx_ring *
693 ixl_txr_alloc(struct ixl_softc *, unsigned int);
694 static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
695 static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
696 static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
697 static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
698 static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
699 static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
700 static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
701 static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *, u_int);
702
703 static struct ixl_rx_ring *
704 ixl_rxr_alloc(struct ixl_softc *, unsigned int);
705 static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
706 static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
707 static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
708 static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
709 static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
710 static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
711 static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *, u_int);
712 static int ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
713
714 static struct workqueue *
715 ixl_workq_create(const char *, pri_t, int, int);
716 static void ixl_workq_destroy(struct workqueue *);
717 static int ixl_workqs_teardown(device_t);
718 static void ixl_work_set(struct ixl_work *, void (*)(void *), void *);
719 static void ixl_work_add(struct workqueue *, struct ixl_work *);
720 static void ixl_work_wait(struct workqueue *, struct ixl_work *);
721 static void ixl_workq_work(struct work *, void *);
722 static const struct ixl_product *
723 ixl_lookup(const struct pci_attach_args *pa);
724 static void ixl_link_state_update(struct ixl_softc *,
725 const struct ixl_aq_desc *);
726 static int ixl_set_macvlan(struct ixl_softc *);
727 static int ixl_setup_interrupts(struct ixl_softc *);;
728 static void ixl_teardown_interrupts(struct ixl_softc *);
729 static int ixl_setup_stats(struct ixl_softc *);
730 static void ixl_teardown_stats(struct ixl_softc *);
731 static int ixl_setup_sysctls(struct ixl_softc *);
732 static void ixl_teardown_sysctls(struct ixl_softc *);
733 static int ixl_queue_pairs_alloc(struct ixl_softc *);
734 static void ixl_queue_pairs_free(struct ixl_softc *);
735
736 static const struct ixl_phy_type ixl_phy_type_map[] = {
737 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII },
738 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX },
739 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 },
740 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR },
741 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 },
742 { 1ULL << IXL_PHY_TYPE_XAUI |
743 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 },
744 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI },
745 { 1ULL << IXL_PHY_TYPE_XLAUI |
746 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI },
747 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
748 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 },
749 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
750 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 },
751 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC },
752 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC },
753 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX },
754 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
755 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T },
756 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T },
757 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR },
758 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR },
759 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_TWINAX },
760 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 },
761 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 },
762 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX },
763 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX },
764 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 },
765 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR },
766 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR },
767 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR },
768 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR },
769 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC },
770 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_CR },
771 };
772
773 static const struct ixl_speed_type ixl_speed_type_map[] = {
774 { IXL_AQ_LINK_SPEED_40GB, IF_Gbps(40) },
775 { IXL_AQ_LINK_SPEED_25GB, IF_Gbps(25) },
776 { IXL_AQ_LINK_SPEED_10GB, IF_Gbps(10) },
777 { IXL_AQ_LINK_SPEED_1000MB, IF_Mbps(1000) },
778 { IXL_AQ_LINK_SPEED_100MB, IF_Mbps(100)},
779 };
780
781 static const struct ixl_aq_regs ixl_pf_aq_regs = {
782 .atq_tail = I40E_PF_ATQT,
783 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK,
784 .atq_head = I40E_PF_ATQH,
785 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK,
786 .atq_len = I40E_PF_ATQLEN,
787 .atq_bal = I40E_PF_ATQBAL,
788 .atq_bah = I40E_PF_ATQBAH,
789 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK,
790
791 .arq_tail = I40E_PF_ARQT,
792 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK,
793 .arq_head = I40E_PF_ARQH,
794 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK,
795 .arq_len = I40E_PF_ARQLEN,
796 .arq_bal = I40E_PF_ARQBAL,
797 .arq_bah = I40E_PF_ARQBAH,
798 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK,
799 };
800
801 #define ixl_rd(_s, _r) \
802 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
803 #define ixl_wr(_s, _r, _v) \
804 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
805 #define ixl_barrier(_s, _r, _l, _o) \
806 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
807 #define ixl_flush(_s) (void)ixl_rd((_s), I40E_GLGEN_STAT)
808 #define ixl_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1))
809
810 static inline uint32_t
811 ixl_dmamem_hi(struct ixl_dmamem *ixm)
812 {
813 uint32_t retval;
814 uint64_t val;
815
816 if (sizeof(IXL_DMA_DVA(ixm)) > 4) {
817 val = (intptr_t)IXL_DMA_DVA(ixm);
818 retval = (uint32_t)(val >> 32);
819 } else {
820 retval = 0;
821 }
822
823 return retval;
824 }
825
826 static inline uint32_t
827 ixl_dmamem_lo(struct ixl_dmamem *ixm)
828 {
829
830 return (uint32_t)IXL_DMA_DVA(ixm);
831 }
832
833 static inline void
834 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr)
835 {
836 uint64_t val;
837
838 if (sizeof(addr) > 4) {
839 val = (intptr_t)addr;
840 iaq->iaq_param[2] = htole32(val >> 32);
841 } else {
842 iaq->iaq_param[2] = htole32(0);
843 }
844
845 iaq->iaq_param[3] = htole32(addr);
846 }
847
848 static inline unsigned int
849 ixl_rxr_unrefreshed(unsigned int prod, unsigned int cons, unsigned int ndescs)
850 {
851 unsigned int num;
852
853 if (prod < cons)
854 num = cons - prod;
855 else
856 num = (ndescs - prod) + cons;
857
858 if (__predict_true(num > 0)) {
859 /* device cannot receive packets if all descripter is filled */
860 num -= 1;
861 }
862
863 return num;
864 }
865
866 CFATTACH_DECL3_NEW(ixl, sizeof(struct ixl_softc),
867 ixl_match, ixl_attach, ixl_detach, NULL, NULL, NULL,
868 DVF_DETACH_SHUTDOWN);
869
870 static const struct ixl_product ixl_products[] = {
871 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_SFP },
872 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_B },
873 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_C },
874 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_A },
875 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_B },
876 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_C },
877 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T },
878 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
879 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
880 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G },
881 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP },
882 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28 },
883 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_KX },
884 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_QSFP },
885 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_SFP },
886 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G_BASET },
887 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_BASET },
888 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_I_SFP },
889 /* required last entry */
890 {0, 0}
891 };
892
893 static const struct ixl_product *
894 ixl_lookup(const struct pci_attach_args *pa)
895 {
896 const struct ixl_product *ixlp;
897
898 for (ixlp = ixl_products; ixlp->vendor_id != 0; ixlp++) {
899 if (PCI_VENDOR(pa->pa_id) == ixlp->vendor_id &&
900 PCI_PRODUCT(pa->pa_id) == ixlp->product_id)
901 return ixlp;
902 }
903
904 return NULL;
905 }
906
907 static int
908 ixl_match(device_t parent, cfdata_t match, void *aux)
909 {
910 const struct pci_attach_args *pa = aux;
911
912 return (ixl_lookup(pa) != NULL) ? 1 : 0;
913 }
914
915 static void
916 ixl_attach(device_t parent, device_t self, void *aux)
917 {
918 struct ixl_softc *sc;
919 struct pci_attach_args *pa = aux;
920 struct ifnet *ifp;
921 pcireg_t memtype, reg;
922 uint32_t firstq, port, ari, func;
923 uint64_t phy_types = 0;
924 char xnamebuf[32];
925 int tries, rv;
926
927 sc = device_private(self);
928 sc->sc_dev = self;
929 ifp = &sc->sc_ec.ec_if;
930
931 sc->sc_pa = *pa;
932 sc->sc_dmat = (pci_dma64_available(pa)) ?
933 pa->pa_dmat64 : pa->pa_dmat;
934 sc->sc_aq_regs = &ixl_pf_aq_regs;
935
936 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
937 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(reg));
938
939 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG);
940 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
941 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) {
942 aprint_error(": unable to map registers\n");
943 return;
944 }
945
946 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET);
947
948 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC);
949 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK;
950 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
951 sc->sc_base_queue = firstq;
952
953 ixl_clear_hw(sc);
954 if (ixl_pf_reset(sc) == -1) {
955 /* error printed by ixl pf_reset */
956 goto unmap;
957 }
958
959 port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
960 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
961 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
962 aprint_normal(": port %u", port);
963
964 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
965 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
966 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
967
968 func = ixl_rd(sc, I40E_PF_FUNC_RID);
969 sc->sc_pf_id = func & (ari ? 0xff : 0x7);
970
971 /* initialise the adminq */
972
973 mutex_init(&sc->sc_atq_lock, MUTEX_DEFAULT, IPL_NET);
974
975 if (ixl_dmamem_alloc(sc, &sc->sc_atq,
976 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
977 aprint_error("\n" "%s: unable to allocate atq\n",
978 device_xname(self));
979 goto unmap;
980 }
981
982 SIMPLEQ_INIT(&sc->sc_arq_idle);
983 ixl_work_set(&sc->sc_arq_task, ixl_arq, sc);
984 sc->sc_arq_cons = 0;
985 sc->sc_arq_prod = 0;
986
987 if (ixl_dmamem_alloc(sc, &sc->sc_arq,
988 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
989 aprint_error("\n" "%s: unable to allocate arq\n",
990 device_xname(self));
991 goto free_atq;
992 }
993
994 if (!ixl_arq_fill(sc)) {
995 aprint_error("\n" "%s: unable to fill arq descriptors\n",
996 device_xname(self));
997 goto free_arq;
998 }
999
1000 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1001 0, IXL_DMA_LEN(&sc->sc_atq),
1002 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1003
1004 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1005 0, IXL_DMA_LEN(&sc->sc_arq),
1006 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1007
1008 for (tries = 0; tries < 10; tries++) {
1009 sc->sc_atq_cons = 0;
1010 sc->sc_atq_prod = 0;
1011
1012 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1013 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1014 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1015 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1016
1017 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1018
1019 ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1020 ixl_dmamem_lo(&sc->sc_atq));
1021 ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1022 ixl_dmamem_hi(&sc->sc_atq));
1023 ixl_wr(sc, sc->sc_aq_regs->atq_len,
1024 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1025
1026 ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1027 ixl_dmamem_lo(&sc->sc_arq));
1028 ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1029 ixl_dmamem_hi(&sc->sc_arq));
1030 ixl_wr(sc, sc->sc_aq_regs->arq_len,
1031 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1032
1033 rv = ixl_get_version(sc);
1034 if (rv == 0)
1035 break;
1036 if (rv != ETIMEDOUT) {
1037 aprint_error(", unable to get firmware version\n");
1038 goto shutdown;
1039 }
1040
1041 delaymsec(100);
1042 }
1043
1044 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1045
1046 if (sc->sc_mac_type == I40E_MAC_X722)
1047 sc->sc_nqueue_pairs_device = 128;
1048 else
1049 sc->sc_nqueue_pairs_device = 64;
1050
1051 rv = ixl_get_hw_capabilities(sc);
1052 if (rv != 0) {
1053 aprint_error(", GET HW CAPABILITIES %s\n",
1054 rv == ETIMEDOUT ? "timeout" : "error");
1055 goto shutdown;
1056 }
1057
1058 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max =
1059 MIN((int)sc->sc_nqueue_pairs_device, ncpu);
1060 sc->sc_tx_ring_ndescs = 1024;
1061 sc->sc_rx_ring_ndescs = 1024;
1062
1063 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_rx_ring_ndescs);
1064 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_tx_ring_ndescs);
1065
1066 if (ixl_get_mac(sc) != 0) {
1067 /* error printed by ixl_get_mac */
1068 goto shutdown;
1069 }
1070
1071 aprint_normal("\n");
1072 aprint_naive("\n");
1073
1074 aprint_normal_dev(self, "Ethernet address %s\n",
1075 ether_sprintf(sc->sc_enaddr));
1076
1077 rv = ixl_pxe_clear(sc);
1078 if (rv != 0) {
1079 aprint_debug_dev(self, "CLEAR PXE MODE %s\n",
1080 rv == ETIMEDOUT ? "timeout" : "error");
1081 }
1082
1083 ixl_set_filter_control(sc);
1084
1085 if (ixl_hmc(sc) != 0) {
1086 /* error printed by ixl_hmc */
1087 goto shutdown;
1088 }
1089
1090 if (ixl_lldp_shut(sc) != 0) {
1091 /* error printed by ixl_lldp_shut */
1092 goto free_hmc;
1093 }
1094
1095 if (ixl_phy_mask_ints(sc) != 0) {
1096 /* error printed by ixl_phy_mask_ints */
1097 goto free_hmc;
1098 }
1099
1100 if (ixl_restart_an(sc) != 0) {
1101 /* error printed by ixl_restart_an */
1102 goto free_hmc;
1103 }
1104
1105 if (ixl_get_switch_config(sc) != 0) {
1106 /* error printed by ixl_get_switch_config */
1107 goto free_hmc;
1108 }
1109
1110 if (ixl_get_phy_types(sc, &phy_types) != 0) {
1111 /* error printed by ixl_get_phy_abilities */
1112 goto free_hmc;
1113 }
1114
1115 rv = ixl_get_link_status_poll(sc);
1116 if (rv != 0) {
1117 aprint_error_dev(self, "GET LINK STATUS %s\n",
1118 rv == ETIMEDOUT ? "timeout" : "error");
1119 goto free_hmc;
1120 }
1121
1122 if (ixl_dmamem_alloc(sc, &sc->sc_scratch,
1123 sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1124 aprint_error_dev(self, "unable to allocate scratch buffer\n");
1125 goto free_hmc;
1126 }
1127
1128 if (ixl_get_vsi(sc) != 0) {
1129 /* error printed by ixl_get_vsi */
1130 goto free_scratch;
1131 }
1132
1133 if (ixl_set_vsi(sc) != 0) {
1134 /* error printed by ixl_set_vsi */
1135 goto free_scratch;
1136 }
1137
1138 if (ixl_queue_pairs_alloc(sc) != 0) {
1139 /* error printed by ixl_queue_pairs_alloc */
1140 goto free_scratch;
1141 }
1142
1143 if (ixl_setup_interrupts(sc) != 0) {
1144 /* error printed by ixl_setup_interrupts */
1145 goto free_queue_pairs;
1146 }
1147
1148 if (ixl_setup_stats(sc) != 0) {
1149 aprint_error_dev(self, "failed to setup event counters\n");
1150 goto teardown_intrs;
1151 }
1152
1153 if (ixl_setup_sysctls(sc) != 0) {
1154 /* error printed by ixl_setup_sysctls */
1155 goto teardown_stats;
1156 }
1157
1158 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self));
1159 sc->sc_workq = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1160 IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1161 if (sc->sc_workq == NULL)
1162 goto teardown_sysctls;
1163
1164 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self));
1165 sc->sc_workq_txrx = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1166 IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1167 if (sc->sc_workq_txrx == NULL)
1168 goto teardown_wqs;
1169
1170 snprintf(xnamebuf, sizeof(xnamebuf), "%s_atq_cv", device_xname(self));
1171 cv_init(&sc->sc_atq_cv, xnamebuf);
1172
1173 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
1174
1175 ifp->if_softc = sc;
1176 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1177 ifp->if_extflags = IFEF_MPSAFE;
1178 ifp->if_ioctl = ixl_ioctl;
1179 ifp->if_start = ixl_start;
1180 ifp->if_transmit = ixl_transmit;
1181 ifp->if_watchdog = ixl_watchdog;
1182 ifp->if_init = ixl_init;
1183 ifp->if_stop = ixl_stop;
1184 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs);
1185 IFQ_SET_READY(&ifp->if_snd);
1186 #if 0
1187 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx |
1188 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1189 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
1190 #endif
1191 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
1192 #if 0
1193 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
1194 #endif
1195
1196 sc->sc_ec.ec_ifmedia = &sc->sc_media;
1197 ifmedia_init(&sc->sc_media, IFM_IMASK, ixl_media_change,
1198 ixl_media_status);
1199
1200 ixl_media_add(sc, phy_types);
1201 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1202 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1203
1204 if_attach(ifp);
1205 if_deferred_start_init(ifp, NULL);
1206 ether_ifattach(ifp, sc->sc_enaddr);
1207 ether_set_ifflags_cb(&sc->sc_ec, ixl_ifflags_cb);
1208 (void)ixl_get_link_status(sc);
1209
1210 ixl_work_set(&sc->sc_link_state_task, ixl_get_link_status, sc);
1211
1212 ixl_config_other_intr(sc);
1213
1214 ixl_set_macvlan(sc);
1215
1216 ixl_enable_other_intr(sc);
1217
1218 sc->sc_txrx_workqueue = true;
1219 sc->sc_tx_process_limit = IXL_TX_PROCESS_LIMIT;
1220 sc->sc_rx_process_limit = IXL_RX_PROCESS_LIMIT;
1221 sc->sc_tx_intr_process_limit = IXL_TX_INTR_PROCESS_LIMIT;
1222 sc->sc_rx_intr_process_limit = IXL_RX_INTR_PROCESS_LIMIT;
1223
1224 if (pmf_device_register(self, NULL, NULL) != true)
1225 aprint_debug_dev(self, "couldn't establish power handler\n");
1226 sc->sc_attached = true;
1227 return;
1228
1229 teardown_wqs:
1230 config_finalize_register(self, ixl_workqs_teardown);
1231 teardown_sysctls:
1232 ixl_teardown_sysctls(sc);
1233 teardown_stats:
1234 ixl_teardown_stats(sc);
1235 teardown_intrs:
1236 ixl_teardown_interrupts(sc);
1237 free_queue_pairs:
1238 ixl_queue_pairs_free(sc);
1239 free_scratch:
1240 ixl_dmamem_free(sc, &sc->sc_scratch);
1241 free_hmc:
1242 ixl_hmc_free(sc);
1243 shutdown:
1244 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1245 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1246 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1247 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1248
1249 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1250 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1251 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1252
1253 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1254 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1255 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1256
1257 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1258 0, IXL_DMA_LEN(&sc->sc_arq),
1259 BUS_DMASYNC_POSTREAD);
1260 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1261 0, IXL_DMA_LEN(&sc->sc_atq),
1262 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1263
1264 ixl_arq_unfill(sc);
1265 free_arq:
1266 ixl_dmamem_free(sc, &sc->sc_arq);
1267 free_atq:
1268 ixl_dmamem_free(sc, &sc->sc_atq);
1269 unmap:
1270 mutex_destroy(&sc->sc_atq_lock);
1271 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1272 mutex_destroy(&sc->sc_cfg_lock);
1273 sc->sc_mems = 0;
1274
1275 sc->sc_attached = false;
1276 }
1277
1278 static int
1279 ixl_detach(device_t self, int flags)
1280 {
1281 struct ixl_softc *sc = device_private(self);
1282 struct ifnet *ifp = &sc->sc_ec.ec_if;
1283
1284 if (!sc->sc_attached)
1285 return 0;
1286
1287 ixl_stop(ifp, 1);
1288
1289 if (sc->sc_workq != NULL) {
1290 ixl_workq_destroy(sc->sc_workq);
1291 sc->sc_workq = NULL;
1292 }
1293
1294 if (sc->sc_workq_txrx != NULL) {
1295 ixl_workq_destroy(sc->sc_workq_txrx);
1296 sc->sc_workq_txrx = NULL;
1297 }
1298
1299 ifmedia_delete_instance(&sc->sc_media, IFM_INST_ANY);
1300 ether_ifdetach(ifp);
1301 if_detach(ifp);
1302
1303 ixl_teardown_interrupts(sc);
1304 ixl_teardown_stats(sc);
1305
1306 ixl_queue_pairs_free(sc);
1307
1308 ixl_dmamem_free(sc, &sc->sc_scratch);
1309 ixl_hmc_free(sc);
1310
1311 /* shutdown */
1312 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1313 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1314 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1315 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1316
1317 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1318 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1319 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1320
1321 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1322 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1323 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1324
1325 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1326 0, IXL_DMA_LEN(&sc->sc_arq),
1327 BUS_DMASYNC_POSTREAD);
1328 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1329 0, IXL_DMA_LEN(&sc->sc_atq),
1330 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1331
1332 ixl_arq_unfill(sc);
1333
1334 ixl_dmamem_free(sc, &sc->sc_arq);
1335 ixl_dmamem_free(sc, &sc->sc_atq);
1336
1337 cv_destroy(&sc->sc_atq_cv);
1338 mutex_destroy(&sc->sc_atq_lock);
1339
1340 if (sc->sc_mems != 0) {
1341 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1342 sc->sc_mems = 0;
1343 }
1344
1345 mutex_destroy(&sc->sc_cfg_lock);
1346
1347 return 0;
1348 }
1349
1350 static int
1351 ixl_workqs_teardown(device_t self)
1352 {
1353 struct ixl_softc *sc = device_private(self);
1354
1355 if (sc->sc_workq != NULL) {
1356 ixl_workq_destroy(sc->sc_workq);
1357 sc->sc_workq = NULL;
1358 }
1359
1360 if (sc->sc_workq_txrx != NULL) {
1361 ixl_workq_destroy(sc->sc_workq_txrx);
1362 sc->sc_workq_txrx = NULL;
1363 }
1364
1365 return 0;
1366 }
1367
1368 static void
1369 ixl_media_add(struct ixl_softc *sc, uint64_t phy_types)
1370 {
1371 struct ifmedia *ifm = &sc->sc_media;
1372 const struct ixl_phy_type *itype;
1373 unsigned int i;
1374
1375 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
1376 itype = &ixl_phy_type_map[i];
1377
1378 if (ISSET(phy_types, itype->phy_type)) {
1379 ifmedia_add(ifm,
1380 IFM_ETHER | IFM_FDX | itype->ifm_type, 0, NULL);
1381
1382 if (itype->ifm_type == IFM_100_TX) {
1383 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type,
1384 0, NULL);
1385 }
1386 }
1387 }
1388 }
1389
1390 static void
1391 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1392 {
1393 struct ixl_softc *sc = ifp->if_softc;
1394
1395 ifmr->ifm_status = sc->sc_media_status;
1396 ifmr->ifm_active = sc->sc_media_active;
1397
1398 mutex_enter(&sc->sc_cfg_lock);
1399 if (ifp->if_link_state == LINK_STATE_UP)
1400 SET(ifmr->ifm_status, IFM_ACTIVE);
1401 mutex_exit(&sc->sc_cfg_lock);
1402 }
1403
1404 static int
1405 ixl_media_change(struct ifnet *ifp)
1406 {
1407
1408 return 0;
1409 }
1410
1411 static void
1412 ixl_watchdog(struct ifnet *ifp)
1413 {
1414
1415 }
1416
1417 static void
1418 ixl_del_all_multiaddr(struct ixl_softc *sc)
1419 {
1420 struct ethercom *ec = &sc->sc_ec;
1421 struct ether_multi *enm;
1422 struct ether_multistep step;
1423
1424 ETHER_LOCK(ec);
1425 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1426 ETHER_NEXT_MULTI(step, enm)) {
1427 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1428 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1429 }
1430 ETHER_UNLOCK(ec);
1431 }
1432
1433 static int
1434 ixl_add_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1435 {
1436 struct ifnet *ifp = &sc->sc_ec.ec_if;
1437 int rv;
1438
1439 if (ISSET(ifp->if_flags, IFF_ALLMULTI))
1440 return 0;
1441
1442 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) {
1443 ixl_del_all_multiaddr(sc);
1444 SET(ifp->if_flags, IFF_ALLMULTI);
1445 return 0;
1446 }
1447
1448 rv = ixl_add_macvlan(sc, addrlo, 0,
1449 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1450
1451 if (rv == IXL_AQ_RC_ENOSPC) {
1452 ixl_del_all_multiaddr(sc);
1453 SET(ifp->if_flags, IFF_ALLMULTI);
1454 return 0;
1455 }
1456
1457 if (rv != IXL_AQ_RC_OK)
1458 return EIO;
1459
1460 return 0;
1461 }
1462
1463 static void
1464 ixl_del_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1465 {
1466 struct ifnet *ifp = &sc->sc_ec.ec_if;
1467 struct ethercom *ec = &sc->sc_ec;
1468 struct ether_multi *enm, *enm_last;
1469 struct ether_multistep step;
1470 int rv;
1471
1472 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1473 ixl_remove_macvlan(sc, addrlo, 0,
1474 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1475 return;
1476 }
1477
1478 ETHER_LOCK(ec);
1479 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1480 ETHER_NEXT_MULTI(step, enm)) {
1481 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1482 ETHER_ADDR_LEN) != 0) {
1483 ETHER_UNLOCK(ec);
1484 return;
1485 }
1486 }
1487
1488 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1489 ETHER_NEXT_MULTI(step, enm)) {
1490 rv = ixl_add_macvlan(sc, enm->enm_addrlo, 0,
1491 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1492 if (rv != IXL_AQ_RC_OK)
1493 break;
1494 }
1495
1496 if (enm != NULL) {
1497 enm_last = enm;
1498 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1499 ETHER_NEXT_MULTI(step, enm)) {
1500 if (enm == enm_last)
1501 break;
1502
1503 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1504 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1505 }
1506 } else {
1507 CLR(ifp->if_flags, IFF_ALLMULTI);
1508 }
1509
1510 ETHER_UNLOCK(ec);
1511 }
1512
1513 static int
1514 ixl_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1515 {
1516 struct ifreq *ifr = (struct ifreq *)data;
1517 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
1518 struct ixl_tx_ring *txr;
1519 struct ixl_rx_ring *rxr;
1520 const struct sockaddr *sa;
1521 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1522 int s, error = 0;
1523 unsigned int i;
1524
1525 switch (cmd) {
1526 case SIOCADDMULTI:
1527 sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1528 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) {
1529 error = ether_multiaddr(sa, addrlo, addrhi);
1530 if (error != 0)
1531 return error;
1532
1533 if (ixl_add_multi(sc, addrlo, addrhi) != 0) {
1534 ether_delmulti(sa, &sc->sc_ec);
1535 error = EIO;
1536 }
1537 }
1538 break;
1539
1540 case SIOCDELMULTI:
1541 sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1542 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) {
1543 error = ether_multiaddr(sa, addrlo, addrhi);
1544 if (error != 0)
1545 return error;
1546
1547 ixl_del_multi(sc, addrlo, addrhi);
1548 }
1549 break;
1550
1551 case SIOCGIFDATA:
1552 case SIOCZIFDATA:
1553 ifp->if_ipackets = 0;
1554 ifp->if_ibytes = 0;
1555 ifp->if_iqdrops = 0;
1556 ifp->if_ierrors = 0;
1557 ifp->if_opackets = 0;
1558 ifp->if_obytes = 0;
1559 ifp->if_omcasts = 0;
1560
1561 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
1562 txr = sc->sc_qps[i].qp_txr;
1563 rxr = sc->sc_qps[i].qp_rxr;
1564
1565 mutex_enter(&rxr->rxr_lock);
1566 ifp->if_ipackets += rxr->rxr_ipackets;
1567 ifp->if_ibytes += rxr->rxr_ibytes;
1568 ifp->if_iqdrops += rxr->rxr_iqdrops;
1569 ifp->if_ierrors += rxr->rxr_ierrors;
1570 if (cmd == SIOCZIFDATA) {
1571 rxr->rxr_ipackets = 0;
1572 rxr->rxr_ibytes = 0;
1573 rxr->rxr_iqdrops = 0;
1574 rxr->rxr_ierrors = 0;
1575 }
1576 mutex_exit(&rxr->rxr_lock);
1577
1578 mutex_enter(&txr->txr_lock);
1579 ifp->if_opackets += txr->txr_opackets;
1580 ifp->if_obytes += txr->txr_opackets;
1581 ifp->if_omcasts += txr->txr_omcasts;
1582 if (cmd == SIOCZIFDATA) {
1583 txr->txr_opackets = 0;
1584 txr->txr_opackets = 0;
1585 txr->txr_omcasts = 0;
1586 }
1587 mutex_exit(&txr->txr_lock);
1588 }
1589 /* FALLTHROUGH */
1590 default:
1591 s = splnet();
1592 error = ether_ioctl(ifp, cmd, data);
1593 splx(s);
1594 }
1595
1596 if (error == ENETRESET)
1597 error = ixl_iff(sc);
1598
1599 return error;
1600 }
1601
1602 static enum i40e_mac_type
1603 ixl_mactype(pci_product_id_t id)
1604 {
1605
1606 switch (id) {
1607 case PCI_PRODUCT_INTEL_XL710_SFP:
1608 case PCI_PRODUCT_INTEL_XL710_KX_B:
1609 case PCI_PRODUCT_INTEL_XL710_KX_C:
1610 case PCI_PRODUCT_INTEL_XL710_QSFP_A:
1611 case PCI_PRODUCT_INTEL_XL710_QSFP_B:
1612 case PCI_PRODUCT_INTEL_XL710_QSFP_C:
1613 case PCI_PRODUCT_INTEL_X710_10G_T:
1614 case PCI_PRODUCT_INTEL_XL710_20G_BP_1:
1615 case PCI_PRODUCT_INTEL_XL710_20G_BP_2:
1616 case PCI_PRODUCT_INTEL_X710_T4_10G:
1617 case PCI_PRODUCT_INTEL_XXV710_25G_BP:
1618 case PCI_PRODUCT_INTEL_XXV710_25G_SFP28:
1619 return I40E_MAC_XL710;
1620
1621 case PCI_PRODUCT_INTEL_X722_KX:
1622 case PCI_PRODUCT_INTEL_X722_QSFP:
1623 case PCI_PRODUCT_INTEL_X722_SFP:
1624 case PCI_PRODUCT_INTEL_X722_1G_BASET:
1625 case PCI_PRODUCT_INTEL_X722_10G_BASET:
1626 case PCI_PRODUCT_INTEL_X722_I_SFP:
1627 return I40E_MAC_X722;
1628 }
1629
1630 return I40E_MAC_GENERIC;
1631 }
1632
1633 static inline void *
1634 ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i)
1635 {
1636 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
1637 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1638
1639 if (i >= e->hmc_count)
1640 return NULL;
1641
1642 kva += e->hmc_base;
1643 kva += i * e->hmc_size;
1644
1645 return kva;
1646 }
1647
1648 static inline size_t
1649 ixl_hmc_len(struct ixl_softc *sc, enum ixl_hmc_types type)
1650 {
1651 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1652
1653 return e->hmc_size;
1654 }
1655
1656 static void
1657 ixl_enable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1658 {
1659 struct ixl_rx_ring *rxr = qp->qp_rxr;
1660
1661 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1662 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1663 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1664 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1665 ixl_flush(sc);
1666 }
1667
1668 static void
1669 ixl_disable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1670 {
1671 struct ixl_rx_ring *rxr = qp->qp_rxr;
1672
1673 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1674 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1675 ixl_flush(sc);
1676 }
1677
1678 static void
1679 ixl_enable_other_intr(struct ixl_softc *sc)
1680 {
1681
1682 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1683 I40E_PFINT_DYN_CTL0_INTENA_MASK |
1684 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1685 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1686 ixl_flush(sc);
1687 }
1688
1689 static void
1690 ixl_disable_other_intr(struct ixl_softc *sc)
1691 {
1692
1693 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1694 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1695 ixl_flush(sc);
1696 }
1697
1698 static int
1699 ixl_reinit(struct ixl_softc *sc)
1700 {
1701 struct ixl_rx_ring *rxr;
1702 struct ixl_tx_ring *txr;
1703 unsigned int i;
1704 uint32_t reg;
1705
1706 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1707
1708 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1709 txr = sc->sc_qps[i].qp_txr;
1710 rxr = sc->sc_qps[i].qp_rxr;
1711
1712 txr->txr_cons = txr->txr_prod = 0;
1713 rxr->rxr_cons = rxr->rxr_prod = 0;
1714
1715 ixl_txr_config(sc, txr);
1716 ixl_rxr_config(sc, rxr);
1717 }
1718
1719 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
1720 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_PREWRITE);
1721
1722 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1723 txr = sc->sc_qps[i].qp_txr;
1724 rxr = sc->sc_qps[i].qp_rxr;
1725
1726 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
1727 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
1728 ixl_flush(sc);
1729
1730 ixl_wr(sc, txr->txr_tail, txr->txr_prod);
1731 ixl_wr(sc, rxr->rxr_tail, rxr->rxr_prod);
1732
1733
1734 /* ixl_rxfill() needs lock held */
1735 mutex_enter(&rxr->rxr_lock);
1736 ixl_rxfill(sc, rxr);
1737 mutex_exit(&rxr->rxr_lock);
1738
1739 reg = ixl_rd(sc, I40E_QRX_ENA(i));
1740 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
1741 ixl_wr(sc, I40E_QRX_ENA(i), reg);
1742 if (ixl_rxr_enabled(sc, rxr) != 0)
1743 goto stop;
1744
1745 ixl_txr_qdis(sc, txr, 1);
1746
1747 reg = ixl_rd(sc, I40E_QTX_ENA(i));
1748 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
1749 ixl_wr(sc, I40E_QTX_ENA(i), reg);
1750
1751 if (ixl_txr_enabled(sc, txr) != 0)
1752 goto stop;
1753 }
1754
1755 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
1756 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
1757
1758 return 0;
1759
1760 stop:
1761 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
1762 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
1763
1764 return ETIMEDOUT;
1765 }
1766
1767 static int
1768 ixl_init_locked(struct ixl_softc *sc)
1769 {
1770 struct ifnet *ifp = &sc->sc_ec.ec_if;
1771 unsigned int i;
1772 int error;
1773
1774 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1775
1776 if (sc->sc_dead) {
1777 return ENXIO;
1778 }
1779
1780 if (sc->sc_intrtype != PCI_INTR_TYPE_MSIX)
1781 sc->sc_nqueue_pairs = 1;
1782 else
1783 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
1784
1785 error = ixl_reinit(sc);
1786 if (error) {
1787 ixl_stop_locked(sc);
1788 return error;
1789 }
1790
1791 SET(ifp->if_flags, IFF_RUNNING);
1792 CLR(ifp->if_flags, IFF_OACTIVE);
1793
1794 (void)ixl_get_link_status_poll(sc);
1795
1796 ixl_config_rss(sc);
1797 ixl_config_queue_intr(sc);
1798
1799 ixl_enable_other_intr(sc);
1800 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1801 ixl_enable_queue_intr(sc, &sc->sc_qps[i]);
1802 }
1803
1804 error = ixl_iff(sc);
1805 if (error) {
1806 ixl_stop_locked(sc);
1807 return error;
1808 }
1809
1810 return 0;
1811 }
1812
1813 static int
1814 ixl_init(struct ifnet *ifp)
1815 {
1816 struct ixl_softc *sc = ifp->if_softc;
1817 int error;
1818
1819 mutex_enter(&sc->sc_cfg_lock);
1820 error = ixl_init_locked(sc);
1821 mutex_exit(&sc->sc_cfg_lock);
1822
1823 return error;
1824 }
1825
1826 static int
1827 ixl_iff(struct ixl_softc *sc)
1828 {
1829 struct ifnet *ifp = &sc->sc_ec.ec_if;
1830 struct ixl_atq iatq;
1831 struct ixl_aq_desc *iaq;
1832 struct ixl_aq_vsi_promisc_param *param;
1833 int error;
1834
1835 if (!ISSET(ifp->if_flags, IFF_RUNNING))
1836 return 0;
1837
1838 memset(&iatq, 0, sizeof(iatq));
1839
1840 iaq = &iatq.iatq_desc;
1841 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
1842
1843 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
1844 param->flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |
1845 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
1846 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
1847 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
1848 IXL_AQ_VSI_PROMISC_FLAG_MCAST);
1849 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1850 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST);
1851 }
1852 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
1853 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |
1854 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
1855 param->seid = sc->sc_seid;
1856
1857 error = ixl_atq_exec(sc, &iatq);
1858 if (error)
1859 return error;
1860
1861 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
1862 return EIO;
1863
1864 if (memcmp(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN) != 0) {
1865 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
1866 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1867
1868 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
1869 ixl_add_macvlan(sc, sc->sc_enaddr, 0,
1870 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1871 }
1872 return 0;
1873 }
1874
1875 static void
1876 ixl_stop_rendezvous(struct ixl_softc *sc)
1877 {
1878 struct ixl_tx_ring *txr;
1879 struct ixl_rx_ring *rxr;
1880 unsigned int i;
1881
1882 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1883 txr = sc->sc_qps[i].qp_txr;
1884 rxr = sc->sc_qps[i].qp_rxr;
1885
1886 mutex_enter(&txr->txr_lock);
1887 mutex_exit(&txr->txr_lock);
1888
1889 mutex_enter(&rxr->rxr_lock);
1890 mutex_exit(&rxr->rxr_lock);
1891
1892 ixl_work_wait(sc->sc_workq_txrx,
1893 &sc->sc_qps[i].qp_task);
1894 }
1895
1896 mutex_enter(&sc->sc_atq_lock);
1897 mutex_exit(&sc->sc_atq_lock);
1898
1899 ixl_work_wait(sc->sc_workq, &sc->sc_arq_task);
1900 ixl_work_wait(sc->sc_workq, &sc->sc_link_state_task);
1901
1902 }
1903
1904 static void
1905 ixl_stop_locked(struct ixl_softc *sc)
1906 {
1907 struct ifnet *ifp = &sc->sc_ec.ec_if;
1908 struct ixl_rx_ring *rxr;
1909 struct ixl_tx_ring *txr;
1910 unsigned int i;
1911 uint32_t reg;
1912
1913 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1914
1915 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
1916
1917 ixl_disable_other_intr(sc);
1918 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1919 txr = sc->sc_qps[i].qp_txr;
1920 rxr = sc->sc_qps[i].qp_rxr;
1921
1922 ixl_disable_queue_intr(sc, &sc->sc_qps[i]);
1923
1924 mutex_enter(&txr->txr_lock);
1925 ixl_txr_qdis(sc, txr, 0);
1926 /* XXX wait at least 400 usec for all tx queues in one go */
1927 ixl_flush(sc);
1928 DELAY(500);
1929
1930 reg = ixl_rd(sc, I40E_QTX_ENA(i));
1931 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
1932 ixl_wr(sc, I40E_QTX_ENA(i), reg);
1933 /* XXX wait 50ms from completaion of the TX queue disable*/
1934 ixl_flush(sc);
1935 DELAY(50);
1936
1937 if (ixl_txr_disabled(sc, txr) != 0) {
1938 mutex_exit(&txr->txr_lock);
1939 goto die;
1940 }
1941 mutex_exit(&txr->txr_lock);
1942
1943 mutex_enter(&rxr->rxr_lock);
1944 reg = ixl_rd(sc, I40E_QRX_ENA(i));
1945 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
1946 ixl_wr(sc, I40E_QRX_ENA(i), reg);
1947 /* XXX wait 50ms from completion of the RX queue disable */
1948 ixl_flush(sc);
1949 DELAY(50);
1950
1951 if (ixl_rxr_disabled(sc, rxr) != 0) {
1952 mutex_exit(&rxr->rxr_lock);
1953 goto die;
1954 }
1955 mutex_exit(&rxr->rxr_lock);
1956 }
1957
1958 ixl_stop_rendezvous(sc);
1959
1960 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1961 txr = sc->sc_qps[i].qp_txr;
1962 rxr = sc->sc_qps[i].qp_rxr;
1963
1964 ixl_txr_unconfig(sc, txr);
1965 ixl_rxr_unconfig(sc, rxr);
1966
1967 ixl_txr_clean(sc, txr);
1968 ixl_rxr_clean(sc, rxr);
1969 }
1970
1971 return;
1972 die:
1973 sc->sc_dead = true;
1974 log(LOG_CRIT, "%s: failed to shut down rings",
1975 device_xname(sc->sc_dev));
1976 return;
1977 }
1978
1979 static void
1980 ixl_stop(struct ifnet *ifp, int disable)
1981 {
1982 struct ixl_softc *sc = ifp->if_softc;
1983
1984 mutex_enter(&sc->sc_cfg_lock);
1985 ixl_stop_locked(sc);
1986 mutex_exit(&sc->sc_cfg_lock);
1987 }
1988
1989 static int
1990 ixl_queue_pairs_alloc(struct ixl_softc *sc)
1991 {
1992 struct ixl_queue_pair *qp;
1993 unsigned int i;
1994 size_t sz;
1995
1996 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
1997 sc->sc_qps = kmem_zalloc(sz, KM_SLEEP);
1998
1999 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2000 qp = &sc->sc_qps[i];
2001
2002 qp->qp_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2003 ixl_handle_queue, qp);
2004
2005 qp->qp_txr = ixl_txr_alloc(sc, i);
2006 if (qp->qp_txr == NULL)
2007 goto free;
2008
2009 qp->qp_rxr = ixl_rxr_alloc(sc, i);
2010 if (qp->qp_rxr == NULL)
2011 goto free;
2012
2013 qp->qp_sc = sc;
2014 ixl_work_set(&qp->qp_task, ixl_handle_queue, qp);
2015 snprintf(qp->qp_name, sizeof(qp->qp_name),
2016 "%s-TXRX%d", device_xname(sc->sc_dev), i);
2017 }
2018
2019 return 0;
2020 free:
2021 if (sc->sc_qps != NULL) {
2022 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2023 qp = &sc->sc_qps[i];
2024
2025 if (qp->qp_txr != NULL)
2026 ixl_txr_free(sc, qp->qp_txr);
2027 if (qp->qp_rxr != NULL)
2028 ixl_rxr_free(sc, qp->qp_rxr);
2029 }
2030
2031 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2032 kmem_free(sc->sc_qps, sz);
2033 sc->sc_qps = NULL;
2034 }
2035
2036 return -1;
2037 }
2038
2039 static void
2040 ixl_queue_pairs_free(struct ixl_softc *sc)
2041 {
2042 struct ixl_queue_pair *qp;
2043 unsigned int i;
2044 size_t sz;
2045
2046 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2047 qp = &sc->sc_qps[i];
2048 ixl_txr_free(sc, qp->qp_txr);
2049 ixl_rxr_free(sc, qp->qp_rxr);
2050 }
2051
2052 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2053 kmem_free(sc->sc_qps, sz);
2054 sc->sc_qps = NULL;
2055 }
2056
2057 static struct ixl_tx_ring *
2058 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
2059 {
2060 struct ixl_tx_ring *txr = NULL;
2061 struct ixl_tx_map *maps = NULL, *txm;
2062 unsigned int i;
2063
2064 txr = kmem_zalloc(sizeof(*txr), KM_SLEEP);
2065 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs,
2066 KM_SLEEP);
2067
2068 if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2069 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2070 IXL_TX_QUEUE_ALIGN) != 0)
2071 goto free;
2072
2073 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2074 txm = &maps[i];
2075
2076 if (bus_dmamap_create(sc->sc_dmat,
2077 IXL_HARDMTU, IXL_TX_PKT_DESCS, IXL_HARDMTU, 0,
2078 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &txm->txm_map) != 0)
2079 goto uncreate;
2080
2081 txm->txm_eop = -1;
2082 txm->txm_m = NULL;
2083 }
2084
2085 txr->txr_cons = txr->txr_prod = 0;
2086 txr->txr_maps = maps;
2087
2088 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2089 if (txr->txr_intrq == NULL)
2090 goto uncreate;
2091
2092 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2093 ixl_deferred_transmit, txr);
2094 if (txr->txr_si == NULL)
2095 goto destroy_pcq;
2096
2097 txr->txr_tail = I40E_QTX_TAIL(qid);
2098 txr->txr_qid = qid;
2099 txr->txr_sc = sc;
2100 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2101
2102 return txr;
2103
2104 destroy_pcq:
2105 pcq_destroy(txr->txr_intrq);
2106 uncreate:
2107 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2108 txm = &maps[i];
2109
2110 if (txm->txm_map == NULL)
2111 continue;
2112
2113 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2114 }
2115
2116 ixl_dmamem_free(sc, &txr->txr_mem);
2117 free:
2118 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2119 kmem_free(txr, sizeof(*txr));
2120
2121 return NULL;
2122 }
2123
2124 static void
2125 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2126 {
2127 unsigned int qid;
2128 bus_size_t reg;
2129 uint32_t r;
2130
2131 qid = txr->txr_qid + sc->sc_base_queue;
2132 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2133 qid %= 128;
2134
2135 r = ixl_rd(sc, reg);
2136 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2137 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2138 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2139 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2140 ixl_wr(sc, reg, r);
2141 }
2142
2143 static void
2144 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2145 {
2146 struct ixl_hmc_txq txq;
2147 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch);
2148 void *hmc;
2149
2150 memset(&txq, 0, sizeof(txq));
2151 txq.head = htole16(txr->txr_cons);
2152 txq.new_context = 1;
2153 txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2154 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2155 txq.qlen = htole16(sc->sc_tx_ring_ndescs);
2156 txq.tphrdesc_ena = 0;
2157 txq.tphrpacket_ena = 0;
2158 txq.tphwdesc_ena = 0;
2159 txq.rdylist = data->qs_handle[0];
2160
2161 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2162 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2163 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq,
2164 __arraycount(ixl_hmc_pack_txq));
2165 }
2166
2167 static void
2168 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2169 {
2170 void *hmc;
2171
2172 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2173 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2174 }
2175
2176 static void
2177 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2178 {
2179 struct ixl_tx_map *maps, *txm;
2180 bus_dmamap_t map;
2181 unsigned int i;
2182
2183 maps = txr->txr_maps;
2184 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2185 txm = &maps[i];
2186
2187 if (txm->txm_m == NULL)
2188 continue;
2189
2190 map = txm->txm_map;
2191 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2192 BUS_DMASYNC_POSTWRITE);
2193 bus_dmamap_unload(sc->sc_dmat, map);
2194
2195 m_freem(txm->txm_m);
2196 txm->txm_m = NULL;
2197 }
2198 }
2199
2200 static int
2201 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2202 {
2203 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2204 uint32_t reg;
2205 int i;
2206
2207 for (i = 0; i < 10; i++) {
2208 reg = ixl_rd(sc, ena);
2209 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2210 return 0;
2211
2212 delaymsec(10);
2213 }
2214
2215 return ETIMEDOUT;
2216 }
2217
2218 static int
2219 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2220 {
2221 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2222 uint32_t reg;
2223 int i;
2224
2225 KASSERT(mutex_owned(&txr->txr_lock));
2226
2227 for (i = 0; i < 20; i++) {
2228 reg = ixl_rd(sc, ena);
2229 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2230 return 0;
2231
2232 delaymsec(10);
2233 }
2234
2235 return ETIMEDOUT;
2236 }
2237
2238 static void
2239 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2240 {
2241 struct ixl_tx_map *maps, *txm;
2242 struct mbuf *m;
2243 unsigned int i;
2244
2245 softint_disestablish(txr->txr_si);
2246 while ((m = pcq_get(txr->txr_intrq)) != NULL)
2247 m_freem(m);
2248 pcq_destroy(txr->txr_intrq);
2249
2250 maps = txr->txr_maps;
2251 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2252 txm = &maps[i];
2253
2254 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2255 }
2256
2257 ixl_dmamem_free(sc, &txr->txr_mem);
2258 mutex_destroy(&txr->txr_lock);
2259 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2260 kmem_free(txr, sizeof(*txr));
2261 }
2262
2263 static inline int
2264 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0,
2265 struct ixl_tx_ring *txr)
2266 {
2267 struct mbuf *m;
2268 int error;
2269
2270 KASSERT(mutex_owned(&txr->txr_lock));
2271
2272 m = *m0;
2273
2274 error = bus_dmamap_load_mbuf(dmat, map, m,
2275 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2276 if (error != EFBIG)
2277 return error;
2278
2279 m = m_defrag(m, M_DONTWAIT);
2280 if (m != NULL) {
2281 *m0 = m;
2282 txr->txr_defragged.ev_count++;
2283
2284 error = bus_dmamap_load_mbuf(dmat, map, m,
2285 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2286 } else {
2287 txr->txr_defrag_failed.ev_count++;
2288 error = ENOBUFS;
2289 }
2290
2291 return error;
2292 }
2293
2294 static void
2295 ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr,
2296 bool is_transmit)
2297 {
2298 struct ixl_softc *sc = ifp->if_softc;
2299 struct ixl_tx_desc *ring, *txd;
2300 struct ixl_tx_map *txm;
2301 bus_dmamap_t map;
2302 struct mbuf *m;
2303 uint64_t cmd;
2304 unsigned int prod, free, last, i;
2305 unsigned int mask;
2306 int post = 0;
2307
2308 KASSERT(mutex_owned(&txr->txr_lock));
2309
2310 if (ifp->if_link_state != LINK_STATE_UP
2311 || !ISSET(ifp->if_flags, IFF_RUNNING)
2312 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) {
2313 if (!is_transmit)
2314 IFQ_PURGE(&ifp->if_snd);
2315 return;
2316 }
2317
2318 prod = txr->txr_prod;
2319 free = txr->txr_cons;
2320 if (free <= prod)
2321 free += sc->sc_tx_ring_ndescs;
2322 free -= prod;
2323
2324 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2325 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2326
2327 ring = IXL_DMA_KVA(&txr->txr_mem);
2328 mask = sc->sc_tx_ring_ndescs - 1;
2329 last = prod;
2330 cmd = 0;
2331 txd = NULL;
2332
2333 for (;;) {
2334 if (free <= IXL_TX_PKT_DESCS) {
2335 if (!is_transmit)
2336 SET(ifp->if_flags, IFF_OACTIVE);
2337 break;
2338 }
2339
2340 if (is_transmit)
2341 m = pcq_get(txr->txr_intrq);
2342 else
2343 IFQ_DEQUEUE(&ifp->if_snd, m);
2344
2345 if (m == NULL)
2346 break;
2347
2348 txm = &txr->txr_maps[prod];
2349 map = txm->txm_map;
2350
2351 if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
2352 txr->txr_oerrors++;
2353 m_freem(m);
2354 continue;
2355 }
2356
2357 bus_dmamap_sync(sc->sc_dmat, map, 0,
2358 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2359
2360 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) {
2361 txd = &ring[prod];
2362
2363 cmd = (uint64_t)map->dm_segs[i].ds_len <<
2364 IXL_TX_DESC_BSIZE_SHIFT;
2365 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2366
2367 txd->addr = htole64(map->dm_segs[i].ds_addr);
2368 txd->cmd = htole64(cmd);
2369
2370 last = prod;
2371
2372 prod++;
2373 prod &= mask;
2374 }
2375 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2376 txd->cmd = htole64(cmd);
2377
2378 txm->txm_m = m;
2379 txm->txm_eop = last;
2380
2381 bpf_mtap(ifp, m, BPF_D_OUT);
2382
2383 free -= i;
2384 post = 1;
2385 }
2386
2387 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2388 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2389
2390 if (post) {
2391 txr->txr_prod = prod;
2392 ixl_wr(sc, txr->txr_tail, prod);
2393 }
2394 }
2395
2396 static int
2397 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit)
2398 {
2399 struct ifnet *ifp = &sc->sc_ec.ec_if;
2400 struct ixl_tx_desc *ring, *txd;
2401 struct ixl_tx_map *txm;
2402 struct mbuf *m;
2403 bus_dmamap_t map;
2404 unsigned int cons, prod, last;
2405 unsigned int mask;
2406 uint64_t dtype;
2407 int done = 0, more = 0;
2408
2409 KASSERT(mutex_owned(&txr->txr_lock));
2410
2411 prod = txr->txr_prod;
2412 cons = txr->txr_cons;
2413
2414 if (cons == prod)
2415 return 0;
2416
2417 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2418 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2419
2420 ring = IXL_DMA_KVA(&txr->txr_mem);
2421 mask = sc->sc_tx_ring_ndescs - 1;
2422
2423 do {
2424 if (txlimit-- <= 0) {
2425 more = 1;
2426 break;
2427 }
2428
2429 txm = &txr->txr_maps[cons];
2430 last = txm->txm_eop;
2431 txd = &ring[last];
2432
2433 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2434 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2435 break;
2436
2437 map = txm->txm_map;
2438
2439 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2440 BUS_DMASYNC_POSTWRITE);
2441 bus_dmamap_unload(sc->sc_dmat, map);
2442
2443 m = txm->txm_m;
2444 if (m != NULL) {
2445 txr->txr_opackets++;
2446 txr->txr_obytes += m->m_pkthdr.len;
2447 if (ISSET(m->m_flags, M_MCAST))
2448 txr->txr_omcasts++;
2449 m_freem(m);
2450 }
2451
2452 txm->txm_m = NULL;
2453 txm->txm_eop = -1;
2454
2455 cons = last + 1;
2456 cons &= mask;
2457 done = 1;
2458 } while (cons != prod);
2459
2460 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2461 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2462
2463 txr->txr_cons = cons;
2464
2465 if (done) {
2466 softint_schedule(txr->txr_si);
2467 if (txr->txr_qid == 0) {
2468 CLR(ifp->if_flags, IFF_OACTIVE);
2469 if_schedule_deferred_start(ifp);
2470 }
2471 }
2472
2473 return more;
2474 }
2475
2476 static void
2477 ixl_start(struct ifnet *ifp)
2478 {
2479 struct ixl_softc *sc;
2480 struct ixl_tx_ring *txr;
2481
2482 sc = ifp->if_softc;
2483 txr = sc->sc_qps[0].qp_txr;
2484
2485 mutex_enter(&txr->txr_lock);
2486 ixl_tx_common_locked(ifp, txr, false);
2487 mutex_exit(&txr->txr_lock);
2488 }
2489
2490 static inline unsigned int
2491 ixl_select_txqueue(struct ixl_softc *sc, struct mbuf *m)
2492 {
2493 u_int cpuid;
2494
2495 cpuid = cpu_index(curcpu());
2496
2497 return (unsigned int)(cpuid % sc->sc_nqueue_pairs);
2498 }
2499
2500 static int
2501 ixl_transmit(struct ifnet *ifp, struct mbuf *m)
2502 {
2503 struct ixl_softc *sc;
2504 struct ixl_tx_ring *txr;
2505 unsigned int qid;
2506
2507 sc = ifp->if_softc;
2508 qid = ixl_select_txqueue(sc, m);
2509
2510 txr = sc->sc_qps[qid].qp_txr;
2511
2512 if (__predict_false(!pcq_put(txr->txr_intrq, m))) {
2513 mutex_enter(&txr->txr_lock);
2514 txr->txr_pcqdrop.ev_count++;
2515 mutex_exit(&txr->txr_lock);
2516
2517 m_freem(m);
2518 return ENOBUFS;
2519 }
2520
2521 if (mutex_tryenter(&txr->txr_lock)) {
2522 ixl_tx_common_locked(ifp, txr, true);
2523 mutex_exit(&txr->txr_lock);
2524 } else {
2525 softint_schedule(txr->txr_si);
2526 }
2527
2528 return 0;
2529 }
2530
2531 static void
2532 ixl_deferred_transmit(void *xtxr)
2533 {
2534 struct ixl_tx_ring *txr = xtxr;
2535 struct ixl_softc *sc = txr->txr_sc;
2536 struct ifnet *ifp = &sc->sc_ec.ec_if;
2537
2538 mutex_enter(&txr->txr_lock);
2539 txr->txr_transmitdef.ev_count++;
2540 if (pcq_peek(txr->txr_intrq) != NULL)
2541 ixl_tx_common_locked(ifp, txr, true);
2542 mutex_exit(&txr->txr_lock);
2543 }
2544
2545 static struct ixl_rx_ring *
2546 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
2547 {
2548 struct ixl_rx_ring *rxr = NULL;
2549 struct ixl_rx_map *maps = NULL, *rxm;
2550 unsigned int i;
2551
2552 rxr = kmem_zalloc(sizeof(*rxr), KM_SLEEP);
2553 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs,
2554 KM_SLEEP);
2555
2556 if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
2557 sizeof(struct ixl_rx_rd_desc_16) * sc->sc_rx_ring_ndescs,
2558 IXL_RX_QUEUE_ALIGN) != 0)
2559 goto free;
2560
2561 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2562 rxm = &maps[i];
2563
2564 if (bus_dmamap_create(sc->sc_dmat,
2565 IXL_HARDMTU, 1, IXL_HARDMTU, 0,
2566 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &rxm->rxm_map) != 0)
2567 goto uncreate;
2568
2569 rxm->rxm_m = NULL;
2570 }
2571
2572 rxr->rxr_cons = rxr->rxr_prod = 0;
2573 rxr->rxr_m_head = NULL;
2574 rxr->rxr_m_tail = &rxr->rxr_m_head;
2575 rxr->rxr_maps = maps;
2576
2577 rxr->rxr_tail = I40E_QRX_TAIL(qid);
2578 rxr->rxr_qid = qid;
2579 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
2580
2581 return rxr;
2582
2583 uncreate:
2584 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2585 rxm = &maps[i];
2586
2587 if (rxm->rxm_map == NULL)
2588 continue;
2589
2590 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
2591 }
2592
2593 ixl_dmamem_free(sc, &rxr->rxr_mem);
2594 free:
2595 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2596 kmem_free(rxr, sizeof(*rxr));
2597
2598 return NULL;
2599 }
2600
2601 static void
2602 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2603 {
2604 struct ixl_rx_map *maps, *rxm;
2605 bus_dmamap_t map;
2606 unsigned int i;
2607
2608 maps = rxr->rxr_maps;
2609 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2610 rxm = &maps[i];
2611
2612 if (rxm->rxm_m == NULL)
2613 continue;
2614
2615 map = rxm->rxm_map;
2616 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2617 BUS_DMASYNC_POSTWRITE);
2618 bus_dmamap_unload(sc->sc_dmat, map);
2619
2620 m_freem(rxm->rxm_m);
2621 rxm->rxm_m = NULL;
2622 }
2623
2624 m_freem(rxr->rxr_m_head);
2625 rxr->rxr_m_head = NULL;
2626 rxr->rxr_m_tail = &rxr->rxr_m_head;
2627
2628 rxr->rxr_prod = rxr->rxr_cons = 0;
2629 }
2630
2631 static int
2632 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2633 {
2634 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
2635 uint32_t reg;
2636 int i;
2637
2638 for (i = 0; i < 10; i++) {
2639 reg = ixl_rd(sc, ena);
2640 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
2641 return 0;
2642
2643 delaymsec(10);
2644 }
2645
2646 return ETIMEDOUT;
2647 }
2648
2649 static int
2650 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2651 {
2652 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
2653 uint32_t reg;
2654 int i;
2655
2656 KASSERT(mutex_owned(&rxr->rxr_lock));
2657
2658 for (i = 0; i < 20; i++) {
2659 reg = ixl_rd(sc, ena);
2660 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
2661 return 0;
2662
2663 delaymsec(10);
2664 }
2665
2666 return ETIMEDOUT;
2667 }
2668
2669 static void
2670 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2671 {
2672 struct ixl_hmc_rxq rxq;
2673 void *hmc;
2674
2675 memset(&rxq, 0, sizeof(rxq));
2676
2677 rxq.head = htole16(rxr->rxr_cons);
2678 rxq.base = htole64(IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
2679 rxq.qlen = htole16(sc->sc_rx_ring_ndescs);
2680 rxq.dbuff = htole16(MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
2681 rxq.hbuff = 0;
2682 rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
2683 rxq.dsize = IXL_HMC_RXQ_DSIZE_16;
2684 rxq.crcstrip = 1;
2685 rxq.l2sel = 0;
2686 rxq.showiv = 0;
2687 rxq.rxmax = htole16(IXL_HARDMTU);
2688 rxq.tphrdesc_ena = 0;
2689 rxq.tphwdesc_ena = 0;
2690 rxq.tphdata_ena = 0;
2691 rxq.tphhead_ena = 0;
2692 rxq.lrxqthresh = 0;
2693 rxq.prefena = 1;
2694
2695 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
2696 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
2697 ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq,
2698 __arraycount(ixl_hmc_pack_rxq));
2699 }
2700
2701 static void
2702 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2703 {
2704 void *hmc;
2705
2706 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
2707 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
2708 }
2709
2710 static void
2711 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2712 {
2713 struct ixl_rx_map *maps, *rxm;
2714 unsigned int i;
2715
2716 maps = rxr->rxr_maps;
2717 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2718 rxm = &maps[i];
2719
2720 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
2721 }
2722
2723 ixl_dmamem_free(sc, &rxr->rxr_mem);
2724 mutex_destroy(&rxr->rxr_lock);
2725 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2726 kmem_free(rxr, sizeof(*rxr));
2727 }
2728
2729 static int
2730 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr, u_int rxlimit)
2731 {
2732 struct ifnet *ifp = &sc->sc_ec.ec_if;
2733 struct ixl_rx_wb_desc_16 *ring, *rxd;
2734 struct ixl_rx_map *rxm;
2735 bus_dmamap_t map;
2736 unsigned int cons, prod;
2737 struct mbuf *m;
2738 uint64_t word;
2739 unsigned int len;
2740 unsigned int mask;
2741 int done = 0, more = 0;
2742
2743 KASSERT(mutex_owned(&rxr->rxr_lock));
2744
2745 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2746 return 0;
2747
2748 prod = rxr->rxr_prod;
2749 cons = rxr->rxr_cons;
2750
2751 if (cons == prod)
2752 return 0;
2753
2754 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2755 0, IXL_DMA_LEN(&rxr->rxr_mem),
2756 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2757
2758 ring = IXL_DMA_KVA(&rxr->rxr_mem);
2759 mask = sc->sc_rx_ring_ndescs - 1;
2760
2761 do {
2762 if (rxlimit-- <= 0) {
2763 more = 1;
2764 break;
2765 }
2766
2767 rxd = &ring[cons];
2768
2769 word = le64toh(rxd->qword1);
2770
2771 if (!ISSET(word, IXL_RX_DESC_DD))
2772 break;
2773
2774 rxm = &rxr->rxr_maps[cons];
2775
2776 map = rxm->rxm_map;
2777 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2778 BUS_DMASYNC_POSTREAD);
2779 bus_dmamap_unload(sc->sc_dmat, map);
2780
2781 m = rxm->rxm_m;
2782 rxm->rxm_m = NULL;
2783
2784 KASSERT(m != NULL);
2785
2786 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
2787 m->m_len = len;
2788 m->m_pkthdr.len = 0;
2789
2790 m->m_next = NULL;
2791 *rxr->rxr_m_tail = m;
2792 rxr->rxr_m_tail = &m->m_next;
2793
2794 m = rxr->rxr_m_head;
2795 m->m_pkthdr.len += len;
2796
2797 if (ISSET(word, IXL_RX_DESC_EOP)) {
2798 if (!ISSET(word,
2799 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
2800 m_set_rcvif(m, ifp);
2801 rxr->rxr_ipackets++;
2802 rxr->rxr_ibytes += m->m_pkthdr.len;
2803 if_percpuq_enqueue(ifp->if_percpuq, m);
2804 } else {
2805 rxr->rxr_ierrors++;
2806 m_freem(m);
2807 }
2808
2809 rxr->rxr_m_head = NULL;
2810 rxr->rxr_m_tail = &rxr->rxr_m_head;
2811 }
2812
2813 cons++;
2814 cons &= mask;
2815
2816 done = 1;
2817 } while (cons != prod);
2818
2819 if (done) {
2820 rxr->rxr_cons = cons;
2821 if (ixl_rxfill(sc, rxr) == -1)
2822 rxr->rxr_iqdrops++;
2823 }
2824
2825 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2826 0, IXL_DMA_LEN(&rxr->rxr_mem),
2827 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2828
2829 return more;
2830 }
2831
2832 static int
2833 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2834 {
2835 struct ixl_rx_rd_desc_16 *ring, *rxd;
2836 struct ixl_rx_map *rxm;
2837 bus_dmamap_t map;
2838 struct mbuf *m;
2839 unsigned int prod;
2840 unsigned int slots;
2841 unsigned int mask;
2842 int post = 0, error = 0;
2843
2844 KASSERT(mutex_owned(&rxr->rxr_lock));
2845
2846 prod = rxr->rxr_prod;
2847 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons,
2848 sc->sc_rx_ring_ndescs);
2849
2850 ring = IXL_DMA_KVA(&rxr->rxr_mem);
2851 mask = sc->sc_rx_ring_ndescs - 1;
2852
2853 if (__predict_false(slots <= 0))
2854 return -1;
2855
2856 do {
2857 rxm = &rxr->rxr_maps[prod];
2858
2859 MGETHDR(m, M_DONTWAIT, MT_DATA);
2860 if (m == NULL) {
2861 rxr->rxr_mgethdr_failed.ev_count++;
2862 error = -1;
2863 break;
2864 }
2865
2866 MCLGET(m, M_DONTWAIT);
2867 if (!ISSET(m->m_flags, M_EXT)) {
2868 rxr->rxr_mgetcl_failed.ev_count++;
2869 error = -1;
2870 m_freem(m);
2871 break;
2872 }
2873
2874 m->m_len = m->m_pkthdr.len = MCLBYTES + ETHER_ALIGN;
2875 m_adj(m, ETHER_ALIGN);
2876
2877 map = rxm->rxm_map;
2878
2879 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
2880 BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
2881 rxr->rxr_mbuf_load_failed.ev_count++;
2882 error = -1;
2883 m_freem(m);
2884 break;
2885 }
2886
2887 rxm->rxm_m = m;
2888
2889 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2890 BUS_DMASYNC_PREREAD);
2891
2892 rxd = &ring[prod];
2893
2894 rxd->paddr = htole64(map->dm_segs[0].ds_addr);
2895 rxd->haddr = htole64(0);
2896
2897 prod++;
2898 prod &= mask;
2899
2900 post = 1;
2901
2902 } while (--slots);
2903
2904 if (post) {
2905 rxr->rxr_prod = prod;
2906 ixl_wr(sc, rxr->rxr_tail, prod);
2907 }
2908
2909 return error;
2910 }
2911
2912 static inline int
2913 ixl_handle_queue_common(struct ixl_softc *sc, struct ixl_queue_pair *qp,
2914 u_int txlimit, struct evcnt *txevcnt,
2915 u_int rxlimit, struct evcnt *rxevcnt)
2916 {
2917 struct ixl_tx_ring *txr = qp->qp_txr;
2918 struct ixl_rx_ring *rxr = qp->qp_rxr;
2919 int txmore, rxmore;
2920 int rv;
2921
2922 KASSERT(!mutex_owned(&txr->txr_lock));
2923 KASSERT(!mutex_owned(&rxr->rxr_lock));
2924
2925 mutex_enter(&txr->txr_lock);
2926 txevcnt->ev_count++;
2927 txmore = ixl_txeof(sc, txr, txlimit);
2928 mutex_exit(&txr->txr_lock);
2929
2930 mutex_enter(&rxr->rxr_lock);
2931 rxevcnt->ev_count++;
2932 rxmore = ixl_rxeof(sc, rxr, rxlimit);
2933 mutex_exit(&rxr->rxr_lock);
2934
2935 rv = txmore | (rxmore << 1);
2936
2937 return rv;
2938 }
2939
2940 static void
2941 ixl_sched_handle_queue(struct ixl_softc *sc, struct ixl_queue_pair *qp)
2942 {
2943
2944 if (qp->qp_workqueue)
2945 ixl_work_add(sc->sc_workq_txrx, &qp->qp_task);
2946 else
2947 softint_schedule(qp->qp_si);
2948 }
2949
2950 static int
2951 ixl_intr(void *xsc)
2952 {
2953 struct ixl_softc *sc = xsc;
2954 struct ixl_tx_ring *txr;
2955 struct ixl_rx_ring *rxr;
2956 uint32_t icr, rxintr, txintr;
2957 int rv = 0;
2958 unsigned int i;
2959
2960 KASSERT(sc != NULL);
2961
2962 ixl_enable_other_intr(sc);
2963 icr = ixl_rd(sc, I40E_PFINT_ICR0);
2964
2965 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
2966 atomic_inc_64(&sc->sc_event_atq.ev_count);
2967 ixl_atq_done(sc);
2968 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
2969 rv = 1;
2970 }
2971
2972 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
2973 atomic_inc_64(&sc->sc_event_link.ev_count);
2974 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
2975 rv = 1;
2976 }
2977
2978 rxintr = icr & I40E_INTR_NOTX_RX_MASK;
2979 txintr = icr & I40E_INTR_NOTX_TX_MASK;
2980
2981 if (txintr || rxintr) {
2982 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2983 txr = sc->sc_qps[i].qp_txr;
2984 rxr = sc->sc_qps[i].qp_rxr;
2985
2986 ixl_handle_queue_common(sc, &sc->sc_qps[i],
2987 IXL_TXRX_PROCESS_UNLIMIT, &txr->txr_intr,
2988 IXL_TXRX_PROCESS_UNLIMIT, &rxr->rxr_intr);
2989 }
2990 rv = 1;
2991 }
2992
2993 return rv;
2994 }
2995
2996 static int
2997 ixl_queue_intr(void *xqp)
2998 {
2999 struct ixl_queue_pair *qp = xqp;
3000 struct ixl_tx_ring *txr = qp->qp_txr;
3001 struct ixl_rx_ring *rxr = qp->qp_rxr;
3002 struct ixl_softc *sc = qp->qp_sc;
3003 u_int txlimit, rxlimit;
3004 int more;
3005
3006 txlimit = sc->sc_tx_intr_process_limit;
3007 rxlimit = sc->sc_rx_intr_process_limit;
3008 qp->qp_workqueue = sc->sc_txrx_workqueue;
3009
3010 more = ixl_handle_queue_common(sc, qp,
3011 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr);
3012
3013 if (more != 0) {
3014 ixl_sched_handle_queue(sc, qp);
3015 } else {
3016 /* for ALTQ */
3017 if (txr->txr_qid == 0)
3018 if_schedule_deferred_start(&sc->sc_ec.ec_if);
3019 softint_schedule(txr->txr_si);
3020
3021 ixl_enable_queue_intr(sc, qp);
3022 }
3023
3024 return 1;
3025 }
3026
3027 static void
3028 ixl_handle_queue(void *xqp)
3029 {
3030 struct ixl_queue_pair *qp = xqp;
3031 struct ixl_softc *sc = qp->qp_sc;
3032 struct ixl_tx_ring *txr = qp->qp_txr;
3033 struct ixl_rx_ring *rxr = qp->qp_rxr;
3034 u_int txlimit, rxlimit;
3035 int more;
3036
3037 txlimit = sc->sc_tx_process_limit;
3038 rxlimit = sc->sc_rx_process_limit;
3039
3040 more = ixl_handle_queue_common(sc, qp,
3041 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer);
3042
3043 if (more != 0)
3044 ixl_sched_handle_queue(sc, qp);
3045 else
3046 ixl_enable_queue_intr(sc, qp);
3047 }
3048
3049 static inline void
3050 ixl_print_hmc_error(struct ixl_softc *sc, uint32_t reg)
3051 {
3052 uint32_t hmc_idx, hmc_isvf;
3053 uint32_t hmc_errtype, hmc_objtype, hmc_data;
3054
3055 hmc_idx = reg & I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK;
3056 hmc_idx = hmc_idx >> I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT;
3057 hmc_isvf = reg & I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK;
3058 hmc_isvf = hmc_isvf >> I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT;
3059 hmc_errtype = reg & I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK;
3060 hmc_errtype = hmc_errtype >> I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT;
3061 hmc_objtype = reg & I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK;
3062 hmc_objtype = hmc_objtype >> I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT;
3063 hmc_data = ixl_rd(sc, I40E_PFHMC_ERRORDATA);
3064
3065 device_printf(sc->sc_dev,
3066 "HMC Error (idx=0x%x, isvf=0x%x, err=0x%x, obj=0x%x, data=0x%x)\n",
3067 hmc_idx, hmc_isvf, hmc_errtype, hmc_objtype, hmc_data);
3068 }
3069
3070 static int
3071 ixl_other_intr(void *xsc)
3072 {
3073 struct ixl_softc *sc = xsc;
3074 uint32_t icr, mask, reg;
3075 int rv;
3076
3077 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3078 mask = ixl_rd(sc, I40E_PFINT_ICR0_ENA);
3079
3080 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3081 atomic_inc_64(&sc->sc_event_atq.ev_count);
3082 ixl_atq_done(sc);
3083 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3084 rv = 1;
3085 }
3086
3087 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3088 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3089 device_printf(sc->sc_dev, "link stat changed\n");
3090
3091 atomic_inc_64(&sc->sc_event_link.ev_count);
3092 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3093 rv = 1;
3094 }
3095
3096 if (ISSET(icr, I40E_PFINT_ICR0_GRST_MASK)) {
3097 CLR(mask, I40E_PFINT_ICR0_ENA_GRST_MASK);
3098 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
3099 reg = reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK;
3100 reg = reg >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3101
3102 device_printf(sc->sc_dev, "GRST: %s\n",
3103 reg == I40E_RESET_CORER ? "CORER" :
3104 reg == I40E_RESET_GLOBR ? "GLOBR" :
3105 reg == I40E_RESET_EMPR ? "EMPR" :
3106 "POR");
3107 }
3108
3109 if (ISSET(icr, I40E_PFINT_ICR0_ECC_ERR_MASK))
3110 atomic_inc_64(&sc->sc_event_ecc_err.ev_count);
3111 if (ISSET(icr, I40E_PFINT_ICR0_PCI_EXCEPTION_MASK))
3112 atomic_inc_64(&sc->sc_event_pci_exception.ev_count);
3113 if (ISSET(icr, I40E_PFINT_ICR0_PE_CRITERR_MASK))
3114 atomic_inc_64(&sc->sc_event_crit_err.ev_count);
3115
3116 if (ISSET(icr, IXL_ICR0_CRIT_ERR_MASK)) {
3117 CLR(mask, IXL_ICR0_CRIT_ERR_MASK);
3118 device_printf(sc->sc_dev, "critical error\n");
3119 }
3120
3121 if (ISSET(icr, I40E_PFINT_ICR0_HMC_ERR_MASK)) {
3122 reg = ixl_rd(sc, I40E_PFHMC_ERRORINFO);
3123 if (ISSET(reg, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK))
3124 ixl_print_hmc_error(sc, reg);
3125 ixl_wr(sc, I40E_PFHMC_ERRORINFO, 0);
3126 }
3127
3128 ixl_wr(sc, I40E_PFINT_ICR0_ENA, mask);
3129 ixl_flush(sc);
3130 ixl_enable_other_intr(sc);
3131 return rv;
3132 }
3133
3134 static void
3135 ixl_get_link_status_done(struct ixl_softc *sc,
3136 const struct ixl_aq_desc *iaq)
3137 {
3138
3139 ixl_link_state_update(sc, iaq);
3140 }
3141
3142 static void
3143 ixl_get_link_status(void *xsc)
3144 {
3145 struct ixl_softc *sc = xsc;
3146 struct ixl_aq_desc *iaq;
3147 struct ixl_aq_link_param *param;
3148
3149 memset(&sc->sc_link_state_atq, 0, sizeof(sc->sc_link_state_atq));
3150 iaq = &sc->sc_link_state_atq.iatq_desc;
3151 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3152 param = (struct ixl_aq_link_param *)iaq->iaq_param;
3153 param->notify = IXL_AQ_LINK_NOTIFY;
3154
3155 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done);
3156 (void)ixl_atq_post(sc, &sc->sc_link_state_atq);
3157 }
3158
3159 static void
3160 ixl_link_state_update(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3161 {
3162 struct ifnet *ifp = &sc->sc_ec.ec_if;
3163 int link_state;
3164
3165 link_state = ixl_set_link_status(sc, iaq);
3166
3167 if (ifp->if_link_state != link_state)
3168 if_link_state_change(ifp, link_state);
3169
3170 if (link_state != LINK_STATE_DOWN) {
3171 if_schedule_deferred_start(ifp);
3172 }
3173 }
3174
3175 static void
3176 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq,
3177 const char *msg)
3178 {
3179 char buf[512];
3180 size_t len;
3181
3182 len = sizeof(buf);
3183 buf[--len] = '\0';
3184
3185 device_printf(sc->sc_dev, "%s\n", msg);
3186 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags));
3187 device_printf(sc->sc_dev, "flags %s opcode %04x\n",
3188 buf, le16toh(iaq->iaq_opcode));
3189 device_printf(sc->sc_dev, "datalen %u retval %u\n",
3190 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval));
3191 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie);
3192 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n",
3193 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]),
3194 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3]));
3195 }
3196
3197 static void
3198 ixl_arq(void *xsc)
3199 {
3200 struct ixl_softc *sc = xsc;
3201 struct ixl_aq_desc *arq, *iaq;
3202 struct ixl_aq_buf *aqb;
3203 unsigned int cons = sc->sc_arq_cons;
3204 unsigned int prod;
3205 int done = 0;
3206
3207 prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
3208 sc->sc_aq_regs->arq_head_mask;
3209
3210 if (cons == prod)
3211 goto done;
3212
3213 arq = IXL_DMA_KVA(&sc->sc_arq);
3214
3215 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3216 0, IXL_DMA_LEN(&sc->sc_arq),
3217 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3218
3219 do {
3220 iaq = &arq[cons];
3221 aqb = sc->sc_arq_live[cons];
3222
3223 KASSERT(aqb != NULL);
3224
3225 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3226 BUS_DMASYNC_POSTREAD);
3227
3228 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3229 ixl_aq_dump(sc, iaq, "arq event");
3230
3231 switch (iaq->iaq_opcode) {
3232 case htole16(IXL_AQ_OP_PHY_LINK_STATUS):
3233 ixl_link_state_update(sc, iaq);
3234 break;
3235 }
3236
3237 memset(iaq, 0, sizeof(*iaq));
3238 sc->sc_arq_live[cons] = NULL;
3239 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
3240
3241 cons++;
3242 cons &= IXL_AQ_MASK;
3243
3244 done = 1;
3245 } while (cons != prod);
3246
3247 if (done) {
3248 sc->sc_arq_cons = cons;
3249 ixl_arq_fill(sc);
3250 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3251 0, IXL_DMA_LEN(&sc->sc_arq),
3252 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3253 }
3254
3255 done:
3256 ixl_enable_other_intr(sc);
3257 }
3258
3259 static void
3260 ixl_atq_set(struct ixl_atq *iatq,
3261 void (*fn)(struct ixl_softc *, const struct ixl_aq_desc *))
3262 {
3263
3264 iatq->iatq_fn = fn;
3265 }
3266
3267 static int
3268 ixl_atq_post_locked(struct ixl_softc *sc, struct ixl_atq *iatq)
3269 {
3270 struct ixl_aq_desc *atq, *slot;
3271 unsigned int prod, cons, prod_next;
3272
3273 /* assert locked */
3274 KASSERT(mutex_owned(&sc->sc_atq_lock));
3275
3276 atq = IXL_DMA_KVA(&sc->sc_atq);
3277 prod = sc->sc_atq_prod;
3278 cons = sc->sc_atq_cons;
3279 prod_next = (prod +1) & IXL_AQ_MASK;
3280
3281 if (cons == prod_next)
3282 return ENOMEM;
3283
3284 slot = &atq[prod];
3285
3286 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3287 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3288
3289 *slot = iatq->iatq_desc;
3290 slot->iaq_cookie = (uint64_t)((intptr_t)iatq);
3291
3292 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3293 ixl_aq_dump(sc, slot, "atq command");
3294
3295 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3296 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3297
3298 sc->sc_atq_prod = prod_next;
3299 ixl_wr(sc, sc->sc_aq_regs->atq_tail, sc->sc_atq_prod);
3300
3301 return 0;
3302 }
3303
3304 static int
3305 ixl_atq_post(struct ixl_softc *sc, struct ixl_atq *iatq)
3306 {
3307 int rv;
3308
3309 mutex_enter(&sc->sc_atq_lock);
3310 rv = ixl_atq_post_locked(sc, iatq);
3311 mutex_exit(&sc->sc_atq_lock);
3312
3313 return rv;
3314 }
3315
3316 static void
3317 ixl_atq_done_locked(struct ixl_softc *sc)
3318 {
3319 struct ixl_aq_desc *atq, *slot;
3320 struct ixl_atq *iatq;
3321 unsigned int cons;
3322 unsigned int prod;
3323
3324 KASSERT(mutex_owned(&sc->sc_atq_lock));
3325
3326 prod = sc->sc_atq_prod;
3327 cons = sc->sc_atq_cons;
3328
3329 if (prod == cons)
3330 return;
3331
3332 atq = IXL_DMA_KVA(&sc->sc_atq);
3333
3334 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3335 0, IXL_DMA_LEN(&sc->sc_atq),
3336 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3337
3338 do {
3339 slot = &atq[cons];
3340 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
3341 break;
3342
3343 iatq = (struct ixl_atq *)((intptr_t)slot->iaq_cookie);
3344 iatq->iatq_desc = *slot;
3345
3346 memset(slot, 0, sizeof(*slot));
3347
3348 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3349 ixl_aq_dump(sc, &iatq->iatq_desc, "atq response");
3350
3351 (*iatq->iatq_fn)(sc, &iatq->iatq_desc);
3352
3353 cons++;
3354 cons &= IXL_AQ_MASK;
3355 } while (cons != prod);
3356
3357 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3358 0, IXL_DMA_LEN(&sc->sc_atq),
3359 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3360
3361 sc->sc_atq_cons = cons;
3362 }
3363
3364 static void
3365 ixl_atq_done(struct ixl_softc *sc)
3366 {
3367
3368 mutex_enter(&sc->sc_atq_lock);
3369 ixl_atq_done_locked(sc);
3370 mutex_exit(&sc->sc_atq_lock);
3371 }
3372
3373 static void
3374 ixl_wakeup(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3375 {
3376
3377 KASSERT(mutex_owned(&sc->sc_atq_lock));
3378
3379 cv_signal(&sc->sc_atq_cv);
3380 }
3381
3382 static int
3383 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq)
3384 {
3385 int error;
3386
3387 KASSERT(iatq->iatq_desc.iaq_cookie == 0);
3388
3389 ixl_atq_set(iatq, ixl_wakeup);
3390
3391 mutex_enter(&sc->sc_atq_lock);
3392 error = ixl_atq_post_locked(sc, iatq);
3393 if (error) {
3394 mutex_exit(&sc->sc_atq_lock);
3395 return error;
3396 }
3397
3398 error = cv_timedwait(&sc->sc_atq_cv, &sc->sc_atq_lock,
3399 IXL_ATQ_EXEC_TIMEOUT);
3400 mutex_exit(&sc->sc_atq_lock);
3401
3402 return error;
3403 }
3404
3405 static int
3406 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
3407 {
3408 struct ixl_aq_desc *atq, *slot;
3409 unsigned int prod;
3410 unsigned int t = 0;
3411
3412 atq = IXL_DMA_KVA(&sc->sc_atq);
3413 prod = sc->sc_atq_prod;
3414 slot = atq + prod;
3415
3416 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3417 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3418
3419 *slot = *iaq;
3420 slot->iaq_flags |= htole16(IXL_AQ_SI);
3421
3422 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3423 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3424
3425 prod++;
3426 prod &= IXL_AQ_MASK;
3427 sc->sc_atq_prod = prod;
3428 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3429
3430 while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
3431 delaymsec(1);
3432
3433 if (t++ > tm)
3434 return ETIMEDOUT;
3435 }
3436
3437 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3438 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
3439 *iaq = *slot;
3440 memset(slot, 0, sizeof(*slot));
3441 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3442 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
3443
3444 sc->sc_atq_cons = prod;
3445
3446 return 0;
3447 }
3448
3449 static int
3450 ixl_get_version(struct ixl_softc *sc)
3451 {
3452 struct ixl_aq_desc iaq;
3453 uint32_t fwbuild, fwver, apiver;
3454 uint16_t api_maj_ver, api_min_ver;
3455
3456 memset(&iaq, 0, sizeof(iaq));
3457 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
3458
3459 iaq.iaq_retval = le16toh(23);
3460
3461 if (ixl_atq_poll(sc, &iaq, 2000) != 0)
3462 return ETIMEDOUT;
3463 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
3464 return EIO;
3465
3466 fwbuild = le32toh(iaq.iaq_param[1]);
3467 fwver = le32toh(iaq.iaq_param[2]);
3468 apiver = le32toh(iaq.iaq_param[3]);
3469
3470 api_maj_ver = (uint16_t)apiver;
3471 api_min_ver = (uint16_t)(apiver >> 16);
3472
3473 aprint_normal(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
3474 (uint16_t)(fwver >> 16), fwbuild, api_maj_ver, api_min_ver);
3475
3476 sc->sc_rxctl_atq = true;
3477 if (sc->sc_mac_type == I40E_MAC_X722) {
3478 if (api_maj_ver == 1 && api_min_ver < 5) {
3479 sc->sc_rxctl_atq = false;
3480 }
3481 }
3482
3483 return 0;
3484 }
3485
3486 static int
3487 ixl_pxe_clear(struct ixl_softc *sc)
3488 {
3489 struct ixl_aq_desc iaq;
3490 int rv;
3491
3492 memset(&iaq, 0, sizeof(iaq));
3493 iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
3494 iaq.iaq_param[0] = htole32(0x2);
3495
3496 rv = ixl_atq_poll(sc, &iaq, 250);
3497
3498 ixl_wr(sc, I40E_GLLAN_RCTL_0, 0x1);
3499
3500 if (rv != 0)
3501 return ETIMEDOUT;
3502
3503 switch (iaq.iaq_retval) {
3504 case htole16(IXL_AQ_RC_OK):
3505 case htole16(IXL_AQ_RC_EEXIST):
3506 break;
3507 default:
3508 return EIO;
3509 }
3510
3511 return 0;
3512 }
3513
3514 static int
3515 ixl_lldp_shut(struct ixl_softc *sc)
3516 {
3517 struct ixl_aq_desc iaq;
3518
3519 memset(&iaq, 0, sizeof(iaq));
3520 iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
3521 iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
3522
3523 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3524 aprint_error_dev(sc->sc_dev, "STOP LLDP AGENT timeout\n");
3525 return -1;
3526 }
3527
3528 switch (iaq.iaq_retval) {
3529 case htole16(IXL_AQ_RC_EMODE):
3530 case htole16(IXL_AQ_RC_EPERM):
3531 /* ignore silently */
3532 default:
3533 break;
3534 }
3535
3536 return 0;
3537 }
3538
3539 static void
3540 ixl_parse_hw_capability(struct ixl_softc *sc, struct ixl_aq_capability *cap)
3541 {
3542 uint16_t id;
3543 uint32_t number, logical_id;
3544
3545 id = le16toh(cap->cap_id);
3546 number = le32toh(cap->number);
3547 logical_id = le32toh(cap->logical_id);
3548
3549 switch (id) {
3550 case IXL_AQ_CAP_RSS:
3551 sc->sc_rss_table_size = number;
3552 sc->sc_rss_table_entry_width = logical_id;
3553 break;
3554 case IXL_AQ_CAP_RXQ:
3555 case IXL_AQ_CAP_TXQ:
3556 sc->sc_nqueue_pairs_device = MIN(number,
3557 sc->sc_nqueue_pairs_device);
3558 break;
3559 }
3560 }
3561
3562 static int
3563 ixl_get_hw_capabilities(struct ixl_softc *sc)
3564 {
3565 struct ixl_dmamem idm;
3566 struct ixl_aq_desc iaq;
3567 struct ixl_aq_capability *caps;
3568 size_t i, ncaps;
3569 bus_size_t caps_size;
3570 uint16_t status;
3571 int rv;
3572
3573 caps_size = sizeof(caps[0]) * 40;
3574 memset(&iaq, 0, sizeof(iaq));
3575 iaq.iaq_opcode = htole16(IXL_AQ_OP_LIST_FUNC_CAP);
3576
3577 do {
3578 if (ixl_dmamem_alloc(sc, &idm, caps_size, 0) != 0) {
3579 return -1;
3580 }
3581
3582 iaq.iaq_flags = htole16(IXL_AQ_BUF |
3583 (caps_size > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3584 iaq.iaq_datalen = htole16(caps_size);
3585 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3586
3587 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
3588 IXL_DMA_LEN(&idm), BUS_DMASYNC_PREREAD);
3589
3590 rv = ixl_atq_poll(sc, &iaq, 250);
3591
3592 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
3593 IXL_DMA_LEN(&idm), BUS_DMASYNC_POSTREAD);
3594
3595 if (rv != 0) {
3596 aprint_error(", HW capabilities timeout\n");
3597 goto done;
3598 }
3599
3600 status = le16toh(iaq.iaq_retval);
3601
3602 if (status == IXL_AQ_RC_ENOMEM) {
3603 caps_size = le16toh(iaq.iaq_datalen);
3604 ixl_dmamem_free(sc, &idm);
3605 }
3606 } while (status == IXL_AQ_RC_ENOMEM);
3607
3608 if (status != IXL_AQ_RC_OK) {
3609 aprint_error(", HW capabilities error\n");
3610 goto done;
3611 }
3612
3613 caps = IXL_DMA_KVA(&idm);
3614 ncaps = le16toh(iaq.iaq_param[1]);
3615
3616 for (i = 0; i < ncaps; i++) {
3617 ixl_parse_hw_capability(sc, &caps[i]);
3618 }
3619
3620 done:
3621 ixl_dmamem_free(sc, &idm);
3622 return rv;
3623 }
3624
3625 static int
3626 ixl_get_mac(struct ixl_softc *sc)
3627 {
3628 struct ixl_dmamem idm;
3629 struct ixl_aq_desc iaq;
3630 struct ixl_aq_mac_addresses *addrs;
3631 int rv;
3632
3633 if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
3634 aprint_error(", unable to allocate mac addresses\n");
3635 return -1;
3636 }
3637
3638 memset(&iaq, 0, sizeof(iaq));
3639 iaq.iaq_flags = htole16(IXL_AQ_BUF);
3640 iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
3641 iaq.iaq_datalen = htole16(sizeof(*addrs));
3642 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3643
3644 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3645 BUS_DMASYNC_PREREAD);
3646
3647 rv = ixl_atq_poll(sc, &iaq, 250);
3648
3649 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3650 BUS_DMASYNC_POSTREAD);
3651
3652 if (rv != 0) {
3653 aprint_error(", MAC ADDRESS READ timeout\n");
3654 rv = -1;
3655 goto done;
3656 }
3657 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3658 aprint_error(", MAC ADDRESS READ error\n");
3659 rv = -1;
3660 goto done;
3661 }
3662
3663 addrs = IXL_DMA_KVA(&idm);
3664 if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
3665 printf(", port address is not valid\n");
3666 goto done;
3667 }
3668
3669 memcpy(sc->sc_enaddr, addrs->port, ETHER_ADDR_LEN);
3670 rv = 0;
3671
3672 done:
3673 ixl_dmamem_free(sc, &idm);
3674 return rv;
3675 }
3676
3677 static int
3678 ixl_get_switch_config(struct ixl_softc *sc)
3679 {
3680 struct ixl_dmamem idm;
3681 struct ixl_aq_desc iaq;
3682 struct ixl_aq_switch_config *hdr;
3683 struct ixl_aq_switch_config_element *elms, *elm;
3684 unsigned int nelm, i;
3685 int rv;
3686
3687 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
3688 aprint_error_dev(sc->sc_dev,
3689 "unable to allocate switch config buffer\n");
3690 return -1;
3691 }
3692
3693 memset(&iaq, 0, sizeof(iaq));
3694 iaq.iaq_flags = htole16(IXL_AQ_BUF |
3695 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3696 iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
3697 iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
3698 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3699
3700 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3701 BUS_DMASYNC_PREREAD);
3702
3703 rv = ixl_atq_poll(sc, &iaq, 250);
3704
3705 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3706 BUS_DMASYNC_POSTREAD);
3707
3708 if (rv != 0) {
3709 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG timeout\n");
3710 rv = -1;
3711 goto done;
3712 }
3713 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3714 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG error\n");
3715 rv = -1;
3716 goto done;
3717 }
3718
3719 hdr = IXL_DMA_KVA(&idm);
3720 elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
3721
3722 nelm = le16toh(hdr->num_reported);
3723 if (nelm < 1) {
3724 aprint_error_dev(sc->sc_dev, "no switch config available\n");
3725 rv = -1;
3726 goto done;
3727 }
3728
3729 for (i = 0; i < nelm; i++) {
3730 elm = &elms[i];
3731
3732 aprint_debug_dev(sc->sc_dev,
3733 "type %x revision %u seid %04x\n",
3734 elm->type, elm->revision, le16toh(elm->seid));
3735 aprint_debug_dev(sc->sc_dev,
3736 "uplink %04x downlink %04x\n",
3737 le16toh(elm->uplink_seid),
3738 le16toh(elm->downlink_seid));
3739 aprint_debug_dev(sc->sc_dev,
3740 "conntype %x scheduler %04x extra %04x\n",
3741 elm->connection_type,
3742 le16toh(elm->scheduler_id),
3743 le16toh(elm->element_info));
3744 }
3745
3746 elm = &elms[0];
3747
3748 sc->sc_uplink_seid = elm->uplink_seid;
3749 sc->sc_downlink_seid = elm->downlink_seid;
3750 sc->sc_seid = elm->seid;
3751
3752 if ((sc->sc_uplink_seid == htole16(0)) !=
3753 (sc->sc_downlink_seid == htole16(0))) {
3754 aprint_error_dev(sc->sc_dev, "SEIDs are misconfigured\n");
3755 rv = -1;
3756 goto done;
3757 }
3758
3759 done:
3760 ixl_dmamem_free(sc, &idm);
3761 return rv;
3762 }
3763
3764 static int
3765 ixl_phy_mask_ints(struct ixl_softc *sc)
3766 {
3767 struct ixl_aq_desc iaq;
3768
3769 memset(&iaq, 0, sizeof(iaq));
3770 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
3771 iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
3772 ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
3773 IXL_AQ_PHY_EV_MEDIA_NA));
3774
3775 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3776 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK timeout\n");
3777 return -1;
3778 }
3779 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3780 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK error\n");
3781 return -1;
3782 }
3783
3784 return 0;
3785 }
3786
3787 static int
3788 ixl_get_phy_abilities(struct ixl_softc *sc,struct ixl_dmamem *idm)
3789 {
3790 struct ixl_aq_desc iaq;
3791 int rv;
3792
3793 memset(&iaq, 0, sizeof(iaq));
3794 iaq.iaq_flags = htole16(IXL_AQ_BUF |
3795 (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3796 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
3797 iaq.iaq_datalen = htole16(IXL_DMA_LEN(idm));
3798 iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
3799 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
3800
3801 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
3802 BUS_DMASYNC_PREREAD);
3803
3804 rv = ixl_atq_poll(sc, &iaq, 250);
3805
3806 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
3807 BUS_DMASYNC_POSTREAD);
3808
3809 if (rv != 0)
3810 return -1;
3811
3812 return le16toh(iaq.iaq_retval);
3813 }
3814
3815 static int
3816 ixl_get_phy_types(struct ixl_softc *sc, uint64_t *phy_types_ptr)
3817 {
3818 struct ixl_dmamem idm;
3819 struct ixl_aq_phy_abilities *phy;
3820 uint64_t phy_types;
3821 int rv;
3822
3823 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
3824 aprint_error_dev(sc->sc_dev,
3825 "unable to allocate switch config buffer\n");
3826 return -1;
3827 }
3828
3829 rv = ixl_get_phy_abilities(sc, &idm);
3830 switch (rv) {
3831 case -1:
3832 aprint_error_dev(sc->sc_dev, "GET PHY ABILITIES timeout\n");
3833 goto done;
3834 case IXL_AQ_RC_OK:
3835 break;
3836 case IXL_AQ_RC_EIO:
3837 aprint_error_dev(sc->sc_dev,"unable to query phy types\n");
3838 break;
3839 default:
3840 aprint_error_dev(sc->sc_dev,
3841 "GET PHY ABILITIIES error %u\n", rv);
3842 goto done;
3843 }
3844
3845 phy = IXL_DMA_KVA(&idm);
3846
3847 phy_types = le32toh(phy->phy_type);
3848 phy_types |= (uint64_t)le32toh(phy->phy_type_ext) << 32;
3849
3850 *phy_types_ptr = phy_types;
3851
3852 rv = 0;
3853
3854 done:
3855 ixl_dmamem_free(sc, &idm);
3856 return rv;
3857 }
3858
3859 static int
3860 ixl_get_link_status_poll(struct ixl_softc *sc)
3861 {
3862 struct ixl_aq_desc iaq;
3863 struct ixl_aq_link_param *param;
3864 int link;
3865
3866 memset(&iaq, 0, sizeof(iaq));
3867 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3868 param = (struct ixl_aq_link_param *)iaq.iaq_param;
3869 param->notify = IXL_AQ_LINK_NOTIFY;
3870
3871 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3872 return ETIMEDOUT;
3873 }
3874 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3875 return EIO;
3876 }
3877
3878 link = ixl_set_link_status(sc, &iaq);
3879 sc->sc_ec.ec_if.if_link_state = link;
3880
3881 return 0;
3882 }
3883
3884 static int
3885 ixl_get_vsi(struct ixl_softc *sc)
3886 {
3887 struct ixl_dmamem *vsi = &sc->sc_scratch;
3888 struct ixl_aq_desc iaq;
3889 struct ixl_aq_vsi_param *param;
3890 struct ixl_aq_vsi_reply *reply;
3891 int rv;
3892
3893 /* grumble, vsi info isn't "known" at compile time */
3894
3895 memset(&iaq, 0, sizeof(iaq));
3896 iaq.iaq_flags = htole16(IXL_AQ_BUF |
3897 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3898 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
3899 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
3900 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
3901
3902 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
3903 param->uplink_seid = sc->sc_seid;
3904
3905 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
3906 BUS_DMASYNC_PREREAD);
3907
3908 rv = ixl_atq_poll(sc, &iaq, 250);
3909
3910 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
3911 BUS_DMASYNC_POSTREAD);
3912
3913 if (rv != 0) {
3914 aprint_error_dev(sc->sc_dev, "GET VSI timeout\n");
3915 return -1;
3916 }
3917
3918 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3919 aprint_error_dev(sc->sc_dev, "GET VSI error %u\n",
3920 le16toh(iaq.iaq_retval));
3921 return -1;
3922 }
3923
3924 reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
3925 sc->sc_vsi_number = reply->vsi_number;
3926
3927 return 0;
3928 }
3929
3930 static int
3931 ixl_set_vsi(struct ixl_softc *sc)
3932 {
3933 struct ixl_dmamem *vsi = &sc->sc_scratch;
3934 struct ixl_aq_desc iaq;
3935 struct ixl_aq_vsi_param *param;
3936 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
3937 unsigned int qnum;
3938 int rv;
3939
3940 qnum = sc->sc_nqueue_pairs - 1;
3941
3942 data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
3943 IXL_AQ_VSI_VALID_VLAN);
3944
3945 CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
3946 SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
3947 data->queue_mapping[0] = htole16(0);
3948 data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
3949 (qnum << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
3950
3951 CLR(data->port_vlan_flags,
3952 htole16(IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK));
3953 SET(data->port_vlan_flags,
3954 htole16(IXL_AQ_VSI_PVLAN_MODE_ALL | IXL_AQ_VSI_PVLAN_EMOD_NOTHING));
3955
3956 /* grumble, vsi info isn't "known" at compile time */
3957
3958 memset(&iaq, 0, sizeof(iaq));
3959 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
3960 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3961 iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
3962 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
3963 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
3964
3965 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
3966 param->uplink_seid = sc->sc_seid;
3967
3968 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
3969 BUS_DMASYNC_PREWRITE);
3970
3971 rv = ixl_atq_poll(sc, &iaq, 250);
3972
3973 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
3974 BUS_DMASYNC_POSTWRITE);
3975
3976 if (rv != 0) {
3977 aprint_error_dev(sc->sc_dev, "UPDATE VSI timeout\n");
3978 return -1;
3979 }
3980
3981 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3982 aprint_error_dev(sc->sc_dev, "UPDATE VSI error %u\n",
3983 le16toh(iaq.iaq_retval));
3984 return -1;
3985 }
3986
3987 return 0;
3988 }
3989
3990 static void
3991 ixl_set_filter_control(struct ixl_softc *sc)
3992 {
3993 uint32_t reg;
3994
3995 reg = ixl_rd_rx_csr(sc, I40E_PFQF_CTL_0);
3996
3997 CLR(reg, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK);
3998 SET(reg, I40E_HASH_LUT_SIZE_128 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT);
3999
4000 SET(reg, I40E_PFQF_CTL_0_FD_ENA_MASK);
4001 SET(reg, I40E_PFQF_CTL_0_ETYPE_ENA_MASK);
4002 SET(reg, I40E_PFQF_CTL_0_MACVLAN_ENA_MASK);
4003
4004 ixl_wr_rx_csr(sc, I40E_PFQF_CTL_0, reg);
4005 }
4006
4007 static inline void
4008 ixl_get_default_rss_key(uint32_t *buf, size_t len)
4009 {
4010 size_t cplen;
4011 uint8_t rss_seed[RSS_KEYSIZE];
4012
4013 rss_getkey(rss_seed);
4014 memset(buf, 0, len);
4015
4016 cplen = MIN(len, sizeof(rss_seed));
4017 memcpy(buf, rss_seed, cplen);
4018 }
4019
4020 static void
4021 ixl_set_rss_key(struct ixl_softc *sc)
4022 {
4023 uint32_t rss_seed[IXL_RSS_KEY_SIZE_REG];
4024 size_t i;
4025
4026 ixl_get_default_rss_key(rss_seed, sizeof(rss_seed));
4027
4028 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4029 ixl_wr_rx_csr(sc, I40E_PFQF_HKEY(i), rss_seed[i]);
4030 }
4031 }
4032
4033 static void
4034 ixl_set_rss_pctype(struct ixl_softc *sc)
4035 {
4036 uint64_t set_hena = 0;
4037 uint32_t hena0, hena1;
4038
4039 if (sc->sc_mac_type == I40E_MAC_X722)
4040 set_hena = IXL_RSS_HENA_DEFAULT_X722;
4041 else
4042 set_hena = IXL_RSS_HENA_DEFAULT_XL710;
4043
4044 hena0 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(0));
4045 hena1 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(1));
4046
4047 SET(hena0, set_hena);
4048 SET(hena1, set_hena >> 32);
4049
4050 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(0), hena0);
4051 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(1), hena1);
4052 }
4053
4054 static void
4055 ixl_set_rss_hlut(struct ixl_softc *sc)
4056 {
4057 unsigned int qid;
4058 uint8_t hlut_buf[512], lut_mask;
4059 uint32_t *hluts;
4060 size_t i, hluts_num;
4061
4062 lut_mask = (0x01 << sc->sc_rss_table_entry_width) - 1;
4063
4064 for (i = 0; i < sc->sc_rss_table_size; i++) {
4065 qid = i % sc->sc_nqueue_pairs;
4066 hlut_buf[i] = qid & lut_mask;
4067 }
4068
4069 hluts = (uint32_t *)hlut_buf;
4070 hluts_num = sc->sc_rss_table_size >> 2;
4071 for (i = 0; i < hluts_num; i++) {
4072 ixl_wr(sc, I40E_PFQF_HLUT(i), hluts[i]);
4073 }
4074 ixl_flush(sc);
4075 }
4076
4077 static void
4078 ixl_config_rss(struct ixl_softc *sc)
4079 {
4080
4081 KASSERT(mutex_owned(&sc->sc_cfg_lock));
4082
4083 ixl_set_rss_key(sc);
4084 ixl_set_rss_pctype(sc);
4085 ixl_set_rss_hlut(sc);
4086 }
4087
4088 static const struct ixl_phy_type *
4089 ixl_search_phy_type(uint8_t phy_type)
4090 {
4091 const struct ixl_phy_type *itype;
4092 uint64_t mask;
4093 unsigned int i;
4094
4095 if (phy_type >= 64)
4096 return NULL;
4097
4098 mask = 1ULL << phy_type;
4099
4100 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
4101 itype = &ixl_phy_type_map[i];
4102
4103 if (ISSET(itype->phy_type, mask))
4104 return itype;
4105 }
4106
4107 return NULL;
4108 }
4109
4110 static uint64_t
4111 ixl_search_link_speed(uint8_t link_speed)
4112 {
4113 const struct ixl_speed_type *type;
4114 unsigned int i;
4115
4116 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4117 type = &ixl_speed_type_map[i];
4118
4119 if (ISSET(type->dev_speed, link_speed))
4120 return type->net_speed;
4121 }
4122
4123 return 0;
4124 }
4125
4126 static int
4127 ixl_restart_an(struct ixl_softc *sc)
4128 {
4129 struct ixl_aq_desc iaq;
4130
4131 memset(&iaq, 0, sizeof(iaq));
4132 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
4133 iaq.iaq_param[0] =
4134 htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
4135
4136 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4137 aprint_error_dev(sc->sc_dev, "RESTART AN timeout\n");
4138 return -1;
4139 }
4140 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4141 aprint_error_dev(sc->sc_dev, "RESTART AN error\n");
4142 return -1;
4143 }
4144
4145 return 0;
4146 }
4147
4148 static int
4149 ixl_add_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4150 uint16_t vlan, uint16_t flags)
4151 {
4152 struct ixl_aq_desc iaq;
4153 struct ixl_aq_add_macvlan *param;
4154 struct ixl_aq_add_macvlan_elem *elem;
4155
4156 memset(&iaq, 0, sizeof(iaq));
4157 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4158 iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN);
4159 iaq.iaq_datalen = htole16(sizeof(*elem));
4160 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4161
4162 param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param;
4163 param->num_addrs = htole16(1);
4164 param->seid0 = htole16(0x8000) | sc->sc_seid;
4165 param->seid1 = 0;
4166 param->seid2 = 0;
4167
4168 elem = IXL_DMA_KVA(&sc->sc_scratch);
4169 memset(elem, 0, sizeof(*elem));
4170 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4171 elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags);
4172 elem->vlan = htole16(vlan);
4173
4174 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4175 return IXL_AQ_RC_EINVAL;
4176 }
4177
4178 return le16toh(iaq.iaq_retval);
4179 }
4180
4181 static int
4182 ixl_remove_macvlan(struct ixl_softc *sc, uint8_t *macaddr,
4183 uint16_t vlan, uint16_t flags)
4184 {
4185 struct ixl_aq_desc iaq;
4186 struct ixl_aq_remove_macvlan *param;
4187 struct ixl_aq_remove_macvlan_elem *elem;
4188
4189 memset(&iaq, 0, sizeof(iaq));
4190 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4191 iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN);
4192 iaq.iaq_datalen = htole16(sizeof(*elem));
4193 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4194
4195 param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param;
4196 param->num_addrs = htole16(1);
4197 param->seid0 = htole16(0x8000) | sc->sc_seid;
4198 param->seid1 = 0;
4199 param->seid2 = 0;
4200
4201 elem = IXL_DMA_KVA(&sc->sc_scratch);
4202 memset(elem, 0, sizeof(*elem));
4203 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4204 elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags);
4205 elem->vlan = htole16(vlan);
4206
4207 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4208 return IXL_AQ_RC_EINVAL;
4209 }
4210
4211 return le16toh(iaq.iaq_retval);
4212 }
4213
4214 static int
4215 ixl_hmc(struct ixl_softc *sc)
4216 {
4217 struct {
4218 uint32_t count;
4219 uint32_t minsize;
4220 bus_size_t objsiz;
4221 bus_size_t setoff;
4222 bus_size_t setcnt;
4223 } regs[] = {
4224 {
4225 0,
4226 IXL_HMC_TXQ_MINSIZE,
4227 I40E_GLHMC_LANTXOBJSZ,
4228 I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
4229 I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
4230 },
4231 {
4232 0,
4233 IXL_HMC_RXQ_MINSIZE,
4234 I40E_GLHMC_LANRXOBJSZ,
4235 I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
4236 I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
4237 },
4238 {
4239 0,
4240 0,
4241 I40E_GLHMC_FCOEDDPOBJSZ,
4242 I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
4243 I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
4244 },
4245 {
4246 0,
4247 0,
4248 I40E_GLHMC_FCOEFOBJSZ,
4249 I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
4250 I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
4251 },
4252 };
4253 struct ixl_hmc_entry *e;
4254 uint64_t size, dva;
4255 uint8_t *kva;
4256 uint64_t *sdpage;
4257 unsigned int i;
4258 int npages, tables;
4259 uint32_t reg;
4260
4261 CTASSERT(__arraycount(regs) <= __arraycount(sc->sc_hmc_entries));
4262
4263 regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
4264 ixl_rd(sc, I40E_GLHMC_LANQMAX);
4265
4266 size = 0;
4267 for (i = 0; i < __arraycount(regs); i++) {
4268 e = &sc->sc_hmc_entries[i];
4269
4270 e->hmc_count = regs[i].count;
4271 reg = ixl_rd(sc, regs[i].objsiz);
4272 e->hmc_size = BIT_ULL(0x3F & reg);
4273 e->hmc_base = size;
4274
4275 if ((e->hmc_size * 8) < regs[i].minsize) {
4276 aprint_error_dev(sc->sc_dev,
4277 "kernel hmc entry is too big\n");
4278 return -1;
4279 }
4280
4281 size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
4282 }
4283 size = roundup(size, IXL_HMC_PGSIZE);
4284 npages = size / IXL_HMC_PGSIZE;
4285
4286 tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
4287
4288 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
4289 aprint_error_dev(sc->sc_dev,
4290 "unable to allocate hmc pd memory\n");
4291 return -1;
4292 }
4293
4294 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
4295 IXL_HMC_PGSIZE) != 0) {
4296 aprint_error_dev(sc->sc_dev,
4297 "unable to allocate hmc sd memory\n");
4298 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4299 return -1;
4300 }
4301
4302 kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
4303 memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
4304
4305 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
4306 0, IXL_DMA_LEN(&sc->sc_hmc_pd),
4307 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4308
4309 dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
4310 sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
4311 memset(sdpage, 0, IXL_DMA_LEN(&sc->sc_hmc_sd));
4312
4313 for (i = 0; (int)i < npages; i++) {
4314 *sdpage = htole64(dva | IXL_HMC_PDVALID);
4315 sdpage++;
4316
4317 dva += IXL_HMC_PGSIZE;
4318 }
4319
4320 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
4321 0, IXL_DMA_LEN(&sc->sc_hmc_sd),
4322 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4323
4324 dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
4325 for (i = 0; (int)i < tables; i++) {
4326 uint32_t count;
4327
4328 KASSERT(npages >= 0);
4329
4330 count = ((unsigned int)npages > IXL_HMC_PGS) ?
4331 IXL_HMC_PGS : (unsigned int)npages;
4332
4333 ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
4334 ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
4335 (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
4336 (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
4337 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
4338 ixl_wr(sc, I40E_PFHMC_SDCMD,
4339 (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
4340
4341 npages -= IXL_HMC_PGS;
4342 dva += IXL_HMC_PGSIZE;
4343 }
4344
4345 for (i = 0; i < __arraycount(regs); i++) {
4346 e = &sc->sc_hmc_entries[i];
4347
4348 ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
4349 ixl_wr(sc, regs[i].setcnt, e->hmc_count);
4350 }
4351
4352 return 0;
4353 }
4354
4355 static void
4356 ixl_hmc_free(struct ixl_softc *sc)
4357 {
4358 ixl_dmamem_free(sc, &sc->sc_hmc_sd);
4359 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4360 }
4361
4362 static void
4363 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
4364 unsigned int npacking)
4365 {
4366 uint8_t *dst = d;
4367 const uint8_t *src = s;
4368 unsigned int i;
4369
4370 for (i = 0; i < npacking; i++) {
4371 const struct ixl_hmc_pack *pack = &packing[i];
4372 unsigned int offset = pack->lsb / 8;
4373 unsigned int align = pack->lsb % 8;
4374 const uint8_t *in = src + pack->offset;
4375 uint8_t *out = dst + offset;
4376 int width = pack->width;
4377 unsigned int inbits = 0;
4378
4379 if (align) {
4380 inbits = (*in++) << align;
4381 *out++ |= (inbits & 0xff);
4382 inbits >>= 8;
4383
4384 width -= 8 - align;
4385 }
4386
4387 while (width >= 8) {
4388 inbits |= (*in++) << align;
4389 *out++ = (inbits & 0xff);
4390 inbits >>= 8;
4391
4392 width -= 8;
4393 }
4394
4395 if (width > 0) {
4396 inbits |= (*in) << align;
4397 *out |= (inbits & ((1 << width) - 1));
4398 }
4399 }
4400 }
4401
4402 static struct ixl_aq_buf *
4403 ixl_aqb_alloc(struct ixl_softc *sc)
4404 {
4405 struct ixl_aq_buf *aqb;
4406
4407 aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK);
4408 if (aqb == NULL)
4409 return NULL;
4410
4411 aqb->aqb_size = IXL_AQ_BUFLEN;
4412
4413 if (bus_dmamap_create(sc->sc_dmat, aqb->aqb_size, 1,
4414 aqb->aqb_size, 0,
4415 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0)
4416 goto free;
4417 if (bus_dmamem_alloc(sc->sc_dmat, aqb->aqb_size,
4418 IXL_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs,
4419 BUS_DMA_WAITOK) != 0)
4420 goto destroy;
4421 if (bus_dmamem_map(sc->sc_dmat, &aqb->aqb_seg, aqb->aqb_nsegs,
4422 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0)
4423 goto dma_free;
4424 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
4425 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0)
4426 goto unmap;
4427
4428 return aqb;
4429 unmap:
4430 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
4431 dma_free:
4432 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
4433 destroy:
4434 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4435 free:
4436 free(aqb, M_DEVBUF);
4437
4438 return NULL;
4439 }
4440
4441 static void
4442 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
4443 {
4444 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
4445 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
4446 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
4447 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4448 free(aqb, M_DEVBUF);
4449 }
4450
4451 static int
4452 ixl_arq_fill(struct ixl_softc *sc)
4453 {
4454 struct ixl_aq_buf *aqb;
4455 struct ixl_aq_desc *arq, *iaq;
4456 unsigned int prod = sc->sc_arq_prod;
4457 unsigned int n;
4458 int post = 0;
4459
4460 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
4461 IXL_AQ_NUM);
4462 arq = IXL_DMA_KVA(&sc->sc_arq);
4463
4464 if (__predict_false(n <= 0))
4465 return 0;
4466
4467 do {
4468 aqb = sc->sc_arq_live[prod];
4469 iaq = &arq[prod];
4470
4471 if (aqb == NULL) {
4472 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
4473 if (aqb != NULL) {
4474 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
4475 ixl_aq_buf, aqb_entry);
4476 } else if ((aqb = ixl_aqb_alloc(sc)) == NULL) {
4477 break;
4478 }
4479
4480 sc->sc_arq_live[prod] = aqb;
4481 memset(aqb->aqb_data, 0, aqb->aqb_size);
4482
4483 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
4484 aqb->aqb_size, BUS_DMASYNC_PREREAD);
4485
4486 iaq->iaq_flags = htole16(IXL_AQ_BUF |
4487 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ?
4488 IXL_AQ_LB : 0));
4489 iaq->iaq_opcode = 0;
4490 iaq->iaq_datalen = htole16(aqb->aqb_size);
4491 iaq->iaq_retval = 0;
4492 iaq->iaq_cookie = 0;
4493 iaq->iaq_param[0] = 0;
4494 iaq->iaq_param[1] = 0;
4495 ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
4496 }
4497
4498 prod++;
4499 prod &= IXL_AQ_MASK;
4500
4501 post = 1;
4502
4503 } while (--n);
4504
4505 if (post) {
4506 sc->sc_arq_prod = prod;
4507 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
4508 }
4509
4510 return post;
4511 }
4512
4513 static void
4514 ixl_arq_unfill(struct ixl_softc *sc)
4515 {
4516 struct ixl_aq_buf *aqb;
4517 unsigned int i;
4518
4519 for (i = 0; i < __arraycount(sc->sc_arq_live); i++) {
4520 aqb = sc->sc_arq_live[i];
4521 if (aqb == NULL)
4522 continue;
4523
4524 sc->sc_arq_live[i] = NULL;
4525 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
4526 BUS_DMASYNC_POSTREAD);
4527 ixl_aqb_free(sc, aqb);
4528 }
4529
4530 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle)) != NULL) {
4531 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
4532 ixl_aq_buf, aqb_entry);
4533 ixl_aqb_free(sc, aqb);
4534 }
4535 }
4536
4537 static void
4538 ixl_clear_hw(struct ixl_softc *sc)
4539 {
4540 uint32_t num_queues, base_queue;
4541 uint32_t num_pf_int;
4542 uint32_t num_vf_int;
4543 uint32_t num_vfs;
4544 uint32_t i, j;
4545 uint32_t val;
4546 uint32_t eol = 0x7ff;
4547
4548 /* get number of interrupts, queues, and vfs */
4549 val = ixl_rd(sc, I40E_GLPCI_CNF2);
4550 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
4551 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
4552 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
4553 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
4554
4555 val = ixl_rd(sc, I40E_PFLAN_QALLOC);
4556 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
4557 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
4558 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
4559 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
4560 if (val & I40E_PFLAN_QALLOC_VALID_MASK)
4561 num_queues = (j - base_queue) + 1;
4562 else
4563 num_queues = 0;
4564
4565 val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
4566 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
4567 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
4568 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
4569 I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
4570 if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
4571 num_vfs = (j - i) + 1;
4572 else
4573 num_vfs = 0;
4574
4575 /* stop all the interrupts */
4576 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
4577 ixl_flush(sc);
4578 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4579 for (i = 0; i < num_pf_int - 2; i++)
4580 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
4581 ixl_flush(sc);
4582
4583 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
4584 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4585 ixl_wr(sc, I40E_PFINT_LNKLST0, val);
4586 for (i = 0; i < num_pf_int - 2; i++)
4587 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
4588 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4589 for (i = 0; i < num_vfs; i++)
4590 ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
4591 for (i = 0; i < num_vf_int - 2; i++)
4592 ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
4593
4594 /* warn the HW of the coming Tx disables */
4595 for (i = 0; i < num_queues; i++) {
4596 uint32_t abs_queue_idx = base_queue + i;
4597 uint32_t reg_block = 0;
4598
4599 if (abs_queue_idx >= 128) {
4600 reg_block = abs_queue_idx / 128;
4601 abs_queue_idx %= 128;
4602 }
4603
4604 val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
4605 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
4606 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
4607 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
4608
4609 ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
4610 }
4611 delaymsec(400);
4612
4613 /* stop all the queues */
4614 for (i = 0; i < num_queues; i++) {
4615 ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
4616 ixl_wr(sc, I40E_QTX_ENA(i), 0);
4617 ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
4618 ixl_wr(sc, I40E_QRX_ENA(i), 0);
4619 }
4620
4621 /* short wait for all queue disables to settle */
4622 delaymsec(50);
4623 }
4624
4625 static int
4626 ixl_pf_reset(struct ixl_softc *sc)
4627 {
4628 uint32_t cnt = 0;
4629 uint32_t cnt1 = 0;
4630 uint32_t reg = 0, reg0 = 0;
4631 uint32_t grst_del;
4632
4633 /*
4634 * Poll for Global Reset steady state in case of recent GRST.
4635 * The grst delay value is in 100ms units, and we'll wait a
4636 * couple counts longer to be sure we don't just miss the end.
4637 */
4638 grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
4639 grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
4640 grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
4641
4642 grst_del = grst_del * 20;
4643
4644 for (cnt = 0; cnt < grst_del; cnt++) {
4645 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
4646 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
4647 break;
4648 delaymsec(100);
4649 }
4650 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
4651 aprint_error(", Global reset polling failed to complete\n");
4652 return -1;
4653 }
4654
4655 /* Now Wait for the FW to be ready */
4656 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
4657 reg = ixl_rd(sc, I40E_GLNVM_ULD);
4658 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
4659 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
4660 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
4661 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
4662 break;
4663
4664 delaymsec(10);
4665 }
4666 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
4667 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
4668 aprint_error(", wait for FW Reset complete timed out "
4669 "(I40E_GLNVM_ULD = 0x%x)\n", reg);
4670 return -1;
4671 }
4672
4673 /*
4674 * If there was a Global Reset in progress when we got here,
4675 * we don't need to do the PF Reset
4676 */
4677 if (cnt == 0) {
4678 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
4679 ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
4680 for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
4681 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
4682 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
4683 break;
4684 delaymsec(1);
4685
4686 reg0 = ixl_rd(sc, I40E_GLGEN_RSTAT);
4687 if (reg0 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
4688 aprint_error(", Core reset upcoming."
4689 " Skipping PF reset reset request\n");
4690 return -1;
4691 }
4692 }
4693 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
4694 aprint_error(", PF reset polling failed to complete"
4695 "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
4696 return -1;
4697 }
4698 }
4699
4700 return 0;
4701 }
4702
4703 static int
4704 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
4705 bus_size_t size, bus_size_t align)
4706 {
4707 ixm->ixm_size = size;
4708
4709 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
4710 ixm->ixm_size, 0,
4711 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
4712 &ixm->ixm_map) != 0)
4713 return 1;
4714 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
4715 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
4716 BUS_DMA_WAITOK) != 0)
4717 goto destroy;
4718 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
4719 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
4720 goto free;
4721 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
4722 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
4723 goto unmap;
4724
4725 memset(ixm->ixm_kva, 0, ixm->ixm_size);
4726
4727 return 0;
4728 unmap:
4729 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
4730 free:
4731 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
4732 destroy:
4733 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
4734 return 1;
4735 }
4736
4737 static void
4738 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
4739 {
4740 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
4741 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
4742 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
4743 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
4744 }
4745
4746 static int
4747 ixl_set_macvlan(struct ixl_softc *sc)
4748 {
4749 int error, rv = 0;
4750
4751 /* remove default mac filter and replace it so we can see vlans */
4752
4753 error = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 0);
4754 if (error != IXL_AQ_RC_OK) {
4755 aprint_debug_dev(sc->sc_dev, "unable to remove macvlan\n");
4756 rv = -1;
4757 }
4758
4759 error = ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
4760 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
4761 if (error != IXL_AQ_RC_OK && error != IXL_AQ_RC_ENOENT) {
4762 aprint_debug_dev(sc->sc_dev,
4763 "unable to remove macvlan(IGNORE_VLAN)\n");
4764 rv = -1;
4765 }
4766
4767 error = ixl_add_macvlan(sc, sc->sc_enaddr, 0,
4768 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
4769 if (error != IXL_AQ_RC_OK) {
4770 aprint_debug_dev(sc->sc_dev, "unable to add mac address\n");
4771 rv = -1;
4772 }
4773
4774 error = ixl_add_macvlan(sc, etherbroadcastaddr, 0,
4775 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
4776 if (error != IXL_AQ_RC_OK) {
4777 aprint_debug_dev(sc->sc_dev,
4778 "unable to add broadcast mac address\n");
4779 rv = -1;
4780 }
4781
4782 return rv;
4783 }
4784
4785 static int
4786 ixl_ifflags_cb(struct ethercom *ec)
4787 {
4788
4789 return 0;
4790 }
4791
4792 static int
4793 ixl_set_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
4794 {
4795 const struct ixl_aq_link_status *status;
4796 const struct ixl_phy_type *itype;
4797
4798 uint64_t ifm_active = IFM_ETHER;
4799 uint64_t ifm_status = IFM_AVALID;
4800 int link_state = LINK_STATE_DOWN;
4801 uint64_t baudrate = 0;
4802
4803 status = (const struct ixl_aq_link_status *)iaq->iaq_param;
4804 if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION))
4805 goto done;
4806
4807 ifm_active |= IFM_FDX;
4808 ifm_status |= IFM_ACTIVE;
4809 link_state = LINK_STATE_UP;
4810
4811 itype = ixl_search_phy_type(status->phy_type);
4812 if (itype != NULL)
4813 ifm_active |= itype->ifm_type;
4814
4815 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
4816 ifm_active |= IFM_ETH_TXPAUSE;
4817 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
4818 ifm_active |= IFM_ETH_RXPAUSE;
4819
4820 baudrate = ixl_search_link_speed(status->link_speed);
4821
4822 done:
4823 /* NET_ASSERT_LOCKED() except during attach */
4824 sc->sc_media_active = ifm_active;
4825 sc->sc_media_status = ifm_status;
4826
4827 sc->sc_ec.ec_if.if_baudrate = baudrate;
4828
4829 return link_state;
4830 }
4831
4832 static int
4833 ixl_establish_intx(struct ixl_softc *sc)
4834 {
4835 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
4836 pci_intr_handle_t *intr;
4837 char xnamebuf[32];
4838 char intrbuf[PCI_INTRSTR_LEN];
4839 char const *intrstr;
4840
4841 KASSERT(sc->sc_nintrs == 1);
4842
4843 intr = &sc->sc_ihp[0];
4844
4845 intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
4846 snprintf(xnamebuf, sizeof(xnamebuf), "%s:legacy",
4847 device_xname(sc->sc_dev));
4848
4849 sc->sc_ihs[0] = pci_intr_establish_xname(pc, *intr, IPL_NET, ixl_intr,
4850 sc, xnamebuf);
4851
4852 if (sc->sc_ihs[0] == NULL) {
4853 aprint_error_dev(sc->sc_dev,
4854 "unable to establish interrupt at %s\n", intrstr);
4855 return -1;
4856 }
4857
4858 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
4859 return 0;
4860 }
4861
4862 static int
4863 ixl_establish_msix(struct ixl_softc *sc)
4864 {
4865 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
4866 unsigned int vector = 0;
4867 unsigned int i;
4868 char xnamebuf[32];
4869 char intrbuf[PCI_INTRSTR_LEN];
4870 char const *intrstr;
4871
4872 /* the "other" intr is mapped to vector 0 */
4873 vector = 0;
4874 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
4875 intrbuf, sizeof(intrbuf));
4876 snprintf(xnamebuf, sizeof(xnamebuf), "%s others",
4877 device_xname(sc->sc_dev));
4878 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
4879 sc->sc_ihp[vector], IPL_NET, ixl_other_intr,
4880 sc, xnamebuf);
4881 if (sc->sc_ihs[vector] == NULL) {
4882 aprint_error_dev(sc->sc_dev,
4883 "unable to establish interrupt at %s\n", intrstr);
4884 goto fail;
4885 }
4886 vector++;
4887 aprint_normal_dev(sc->sc_dev, "interrupt at %s\n", intrstr);
4888
4889 sc->sc_msix_vector_queue = vector;
4890
4891 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
4892 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
4893 intrbuf, sizeof(intrbuf));
4894 snprintf(xnamebuf, sizeof(xnamebuf), "%s TXRX%d",
4895 device_xname(sc->sc_dev), i);
4896
4897 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
4898 sc->sc_ihp[vector], IPL_NET, ixl_queue_intr,
4899 (void *)&sc->sc_qps[i], xnamebuf);
4900
4901 if (sc->sc_ihs[vector] == NULL) {
4902 aprint_error_dev(sc->sc_dev,
4903 "unable to establish interrupt at %s\n", intrstr);
4904 goto fail;
4905 }
4906 vector++;
4907 aprint_normal_dev(sc->sc_dev,
4908 "interrupt at %s\n", intrstr);
4909 }
4910
4911 return 0;
4912 fail:
4913 for (i = 0; i < vector; i++) {
4914 pci_intr_disestablish(pc, sc->sc_ihs[i]);
4915 }
4916
4917 sc->sc_msix_vector_queue = 0;
4918 sc->sc_msix_vector_queue = 0;
4919
4920 return -1;
4921 }
4922
4923 static void
4924 ixl_set_affinity_msix(struct ixl_softc *sc)
4925 {
4926 kcpuset_t *affinity;
4927 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
4928 int affinity_to, r;
4929 unsigned int i, vector;
4930 char intrbuf[PCI_INTRSTR_LEN];
4931 char const *intrstr;
4932
4933 affinity_to = 0;
4934 kcpuset_create(&affinity, false);
4935
4936 vector = sc->sc_msix_vector_queue;
4937
4938 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
4939 affinity_to = i % ncpu;
4940
4941 kcpuset_zero(affinity);
4942 kcpuset_set(affinity, affinity_to);
4943
4944 intrstr = pci_intr_string(pc, sc->sc_ihp[vector + i],
4945 intrbuf, sizeof(intrbuf));
4946 r = interrupt_distribute(sc->sc_ihs[vector + i],
4947 affinity, NULL);
4948 if (r == 0) {
4949 aprint_normal_dev(sc->sc_dev,
4950 "for TXRX%u interrupting at %s affinity to %u\n",
4951 i, intrstr, affinity_to);
4952 } else {
4953 aprint_normal_dev(sc->sc_dev,
4954 "for TXRX%u interrupting at %s\n",
4955 i, intrstr);
4956 }
4957 }
4958
4959 vector = 0; /* vector 0 means "other" interrupt */
4960 affinity_to = (affinity_to + 1) % ncpu;
4961 kcpuset_zero(affinity);
4962 kcpuset_set(affinity, affinity_to);
4963
4964 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
4965 intrbuf, sizeof(intrbuf));
4966 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
4967 if (r == 0) {
4968 aprint_normal_dev(sc->sc_dev,
4969 "for other interrupting at %s affinity to %u\n",
4970 intrstr, affinity_to);
4971 } else {
4972 aprint_normal_dev(sc->sc_dev,
4973 "for other interrupting at %s", intrstr);
4974 }
4975
4976 kcpuset_destroy(affinity);
4977 }
4978
4979 static void
4980 ixl_config_queue_intr(struct ixl_softc *sc)
4981 {
4982 unsigned int i, vector;
4983
4984 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) {
4985 vector = sc->sc_msix_vector_queue;
4986 } else {
4987 vector = I40E_INTR_NOTX_INTR;
4988
4989 ixl_wr(sc, I40E_PFINT_LNKLST0,
4990 (I40E_INTR_NOTX_QUEUE <<
4991 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
4992 (I40E_QUEUE_TYPE_RX <<
4993 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
4994 }
4995
4996 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
4997 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), 0);
4998 ixl_flush(sc);
4999
5000 ixl_wr(sc, I40E_PFINT_LNKLSTN(i),
5001 ((i) << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
5002 (I40E_QUEUE_TYPE_RX <<
5003 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5004
5005 ixl_wr(sc, I40E_QINT_RQCTL(i),
5006 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5007 (I40E_ITR_INDEX_RX <<
5008 I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
5009 (I40E_INTR_NOTX_RX_QUEUE <<
5010 I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
5011 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5012 (I40E_QUEUE_TYPE_TX <<
5013 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5014 I40E_QINT_RQCTL_CAUSE_ENA_MASK);
5015
5016 ixl_wr(sc, I40E_QINT_TQCTL(i),
5017 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
5018 (I40E_ITR_INDEX_TX <<
5019 I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
5020 (I40E_INTR_NOTX_TX_QUEUE <<
5021 I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
5022 (I40E_QUEUE_TYPE_EOL <<
5023 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
5024 (I40E_QUEUE_TYPE_RX <<
5025 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
5026 I40E_QINT_TQCTL_CAUSE_ENA_MASK);
5027
5028 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX)
5029 vector++;
5030 }
5031 ixl_flush(sc);
5032
5033 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_RX), 0x7a);
5034 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_TX), 0x7a);
5035 ixl_flush(sc);
5036 }
5037
5038 static void
5039 ixl_config_other_intr(struct ixl_softc *sc)
5040 {
5041 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5042 (void)ixl_rd(sc, I40E_PFINT_ICR0);
5043
5044 ixl_wr(sc, I40E_PFINT_ICR0_ENA,
5045 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
5046 I40E_PFINT_ICR0_ENA_GRST_MASK |
5047 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
5048 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
5049 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
5050 I40E_PFINT_ICR0_ENA_VFLR_MASK |
5051 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
5052 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
5053 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK);
5054
5055 ixl_wr(sc, I40E_PFINT_LNKLST0, 0x7FF);
5056 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_OTHER), 0);
5057 ixl_wr(sc, I40E_PFINT_STAT_CTL0,
5058 (I40E_ITR_INDEX_OTHER <<
5059 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT));
5060 ixl_flush(sc);
5061 }
5062
5063 static int
5064 ixl_setup_interrupts(struct ixl_softc *sc)
5065 {
5066 struct pci_attach_args *pa = &sc->sc_pa;
5067 pci_intr_type_t max_type, intr_type;
5068 int counts[PCI_INTR_TYPE_SIZE];
5069 int error;
5070 unsigned int i;
5071 bool retry, nomsix = IXL_NOMSIX;
5072
5073 memset(counts, 0, sizeof(counts));
5074 max_type = PCI_INTR_TYPE_MSIX;
5075 /* QPs + other interrupt */
5076 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueue_pairs_max + 1;
5077 counts[PCI_INTR_TYPE_INTX] = 1;
5078
5079 if (nomsix)
5080 counts[PCI_INTR_TYPE_MSIX] = 0;
5081
5082 do {
5083 retry = false;
5084 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, max_type);
5085 if (error != 0) {
5086 aprint_error_dev(sc->sc_dev,
5087 "couldn't map interrupt\n");
5088 break;
5089 }
5090 for (i = 0; i < sc->sc_nintrs; i++) {
5091 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[i],
5092 PCI_INTR_MPSAFE, true);
5093 }
5094
5095 intr_type = pci_intr_type(pa->pa_pc, sc->sc_ihp[0]);
5096 sc->sc_nintrs = counts[intr_type];
5097 KASSERT(sc->sc_nintrs > 0);
5098
5099 sc->sc_ihs = kmem_alloc(sizeof(sc->sc_ihs[0]) * sc->sc_nintrs,
5100 KM_SLEEP);
5101
5102 if (intr_type == PCI_INTR_TYPE_MSIX) {
5103 error = ixl_establish_msix(sc);
5104 if (error) {
5105 counts[PCI_INTR_TYPE_MSIX] = 0;
5106 retry = true;
5107 } else {
5108 ixl_set_affinity_msix(sc);
5109 }
5110 } else if (intr_type == PCI_INTR_TYPE_INTX) {
5111 error = ixl_establish_intx(sc);
5112 } else {
5113 error = -1;
5114 }
5115
5116 if (error) {
5117 kmem_free(sc->sc_ihs,
5118 sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5119 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5120 } else {
5121 sc->sc_intrtype = intr_type;
5122 }
5123 } while (retry);
5124
5125 return error;
5126 }
5127
5128 static void
5129 ixl_teardown_interrupts(struct ixl_softc *sc)
5130 {
5131 struct pci_attach_args *pa = &sc->sc_pa;
5132 unsigned int i;
5133
5134 for (i = 0; i < sc->sc_nintrs; i++) {
5135 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]);
5136 }
5137
5138 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5139
5140 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5141 sc->sc_ihs = NULL;
5142 sc->sc_nintrs = 0;
5143 }
5144
5145 static int
5146 ixl_setup_stats(struct ixl_softc *sc)
5147 {
5148 struct ixl_queue_pair *qp;
5149 struct ixl_tx_ring *txr;
5150 struct ixl_rx_ring *rxr;
5151 unsigned int i;
5152
5153 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5154 qp = &sc->sc_qps[i];
5155 txr = qp->qp_txr;
5156 rxr = qp->qp_rxr;
5157
5158 evcnt_attach_dynamic(&txr->txr_defragged, EVCNT_TYPE_MISC,
5159 NULL, qp->qp_name, "m_defrag successed");
5160 evcnt_attach_dynamic(&txr->txr_defrag_failed, EVCNT_TYPE_MISC,
5161 NULL, qp->qp_name, "m_defrag_failed");
5162 evcnt_attach_dynamic(&txr->txr_pcqdrop, EVCNT_TYPE_MISC,
5163 NULL, qp->qp_name, "Dropped in pcq");
5164 evcnt_attach_dynamic(&txr->txr_transmitdef, EVCNT_TYPE_MISC,
5165 NULL, qp->qp_name, "Deferred transmit");
5166 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR,
5167 NULL, qp->qp_name, "Interrupt on queue");
5168 evcnt_attach_dynamic(&txr->txr_defer, EVCNT_TYPE_MISC,
5169 NULL, qp->qp_name, "Handled queue in softint/workqueue");
5170
5171 evcnt_attach_dynamic(&rxr->rxr_mgethdr_failed, EVCNT_TYPE_MISC,
5172 NULL, qp->qp_name, "MGETHDR failed");
5173 evcnt_attach_dynamic(&rxr->rxr_mgetcl_failed, EVCNT_TYPE_MISC,
5174 NULL, qp->qp_name, "MCLGET failed");
5175 evcnt_attach_dynamic(&rxr->rxr_mbuf_load_failed,
5176 EVCNT_TYPE_MISC, NULL, qp->qp_name,
5177 "bus_dmamap_load_mbuf failed");
5178 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR,
5179 NULL, qp->qp_name, "Interrupt on queue");
5180 evcnt_attach_dynamic(&rxr->rxr_defer, EVCNT_TYPE_MISC,
5181 NULL, qp->qp_name, "Handled queue in softint/workqueue");
5182 }
5183
5184 evcnt_attach_dynamic(&sc->sc_event_atq, EVCNT_TYPE_INTR,
5185 NULL, device_xname(sc->sc_dev), "Interrupt for other events");
5186 evcnt_attach_dynamic(&sc->sc_event_link, EVCNT_TYPE_MISC,
5187 NULL, device_xname(sc->sc_dev), "Link status event");
5188 evcnt_attach_dynamic(&sc->sc_event_ecc_err, EVCNT_TYPE_MISC,
5189 NULL, device_xname(sc->sc_dev), "ECC error");
5190 evcnt_attach_dynamic(&sc->sc_event_pci_exception, EVCNT_TYPE_MISC,
5191 NULL, device_xname(sc->sc_dev), "PCI exception");
5192 evcnt_attach_dynamic(&sc->sc_event_crit_err, EVCNT_TYPE_MISC,
5193 NULL, device_xname(sc->sc_dev), "Critical error");
5194
5195 return 0;
5196 }
5197
5198 static void
5199 ixl_teardown_stats(struct ixl_softc *sc)
5200 {
5201 struct ixl_tx_ring *txr;
5202 struct ixl_rx_ring *rxr;
5203 unsigned int i;
5204
5205 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5206 txr = sc->sc_qps[i].qp_txr;
5207 rxr = sc->sc_qps[i].qp_rxr;
5208
5209 evcnt_detach(&txr->txr_defragged);
5210 evcnt_detach(&txr->txr_defrag_failed);
5211 evcnt_detach(&txr->txr_pcqdrop);
5212 evcnt_detach(&txr->txr_transmitdef);
5213 evcnt_detach(&txr->txr_intr);
5214 evcnt_detach(&txr->txr_defer);
5215
5216 evcnt_detach(&rxr->rxr_mgethdr_failed);
5217 evcnt_detach(&rxr->rxr_mgetcl_failed);
5218 evcnt_detach(&rxr->rxr_mbuf_load_failed);
5219 evcnt_detach(&rxr->rxr_intr);
5220 evcnt_detach(&rxr->rxr_defer);
5221 }
5222
5223 evcnt_detach(&sc->sc_event_atq);
5224 evcnt_detach(&sc->sc_event_link);
5225 evcnt_detach(&sc->sc_event_ecc_err);
5226 evcnt_detach(&sc->sc_event_pci_exception);
5227 evcnt_detach(&sc->sc_event_crit_err);
5228 }
5229
5230 static int
5231 ixl_setup_sysctls(struct ixl_softc *sc)
5232 {
5233 const char *devname;
5234 struct sysctllog **log;
5235 const struct sysctlnode *rnode, *rxnode, *txnode;
5236 int error;
5237
5238 log = &sc->sc_sysctllog;
5239 devname = device_xname(sc->sc_dev);
5240
5241 error = sysctl_createv(log, 0, NULL, &rnode,
5242 0, CTLTYPE_NODE, devname,
5243 SYSCTL_DESCR("ixl information and settings"),
5244 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
5245 if (error)
5246 goto out;
5247
5248 error = sysctl_createv(log, 0, &rnode, NULL,
5249 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
5250 SYSCTL_DESCR("Use workqueue for packet processing"),
5251 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
5252 if (error)
5253 goto out;
5254
5255 error = sysctl_createv(log, 0, &rnode, &rxnode,
5256 0, CTLTYPE_NODE, "rx",
5257 SYSCTL_DESCR("ixl information and settings for Rx"),
5258 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
5259 if (error)
5260 goto out;
5261
5262 error = sysctl_createv(log, 0, &rxnode, NULL,
5263 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
5264 SYSCTL_DESCR("max number of Rx packets"
5265 " to process for interrupt processing"),
5266 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
5267 if (error)
5268 goto out;
5269
5270 error = sysctl_createv(log, 0, &rxnode, NULL,
5271 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
5272 SYSCTL_DESCR("max number of Rx packets"
5273 " to process for deferred processing"),
5274 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
5275 if (error)
5276 goto out;
5277
5278 error = sysctl_createv(log, 0, &rnode, &txnode,
5279 0, CTLTYPE_NODE, "tx",
5280 SYSCTL_DESCR("ixl information and settings for Tx"),
5281 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
5282 if (error)
5283 goto out;
5284
5285 error = sysctl_createv(log, 0, &txnode, NULL,
5286 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
5287 SYSCTL_DESCR("max number of Tx packets"
5288 " to process for interrupt processing"),
5289 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
5290 if (error)
5291 goto out;
5292
5293 error = sysctl_createv(log, 0, &txnode, NULL,
5294 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
5295 SYSCTL_DESCR("max number of Tx packets"
5296 " to process for deferred processing"),
5297 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
5298 if (error)
5299 goto out;
5300
5301 out:
5302 if (error) {
5303 aprint_error_dev(sc->sc_dev,
5304 "unable to create sysctl node\n");
5305 sysctl_teardown(log);
5306 }
5307
5308 return error;
5309 }
5310
5311 static void
5312 ixl_teardown_sysctls(struct ixl_softc *sc)
5313 {
5314
5315 sysctl_teardown(&sc->sc_sysctllog);
5316 }
5317
5318 static struct workqueue *
5319 ixl_workq_create(const char *name, pri_t prio, int ipl, int flags)
5320 {
5321 struct workqueue *wq;
5322 int error;
5323
5324 error = workqueue_create(&wq, name, ixl_workq_work, NULL,
5325 prio, ipl, flags);
5326
5327 if (error)
5328 return NULL;
5329
5330 return wq;
5331 }
5332
5333 static void
5334 ixl_workq_destroy(struct workqueue *wq)
5335 {
5336
5337 workqueue_destroy(wq);
5338 }
5339
5340 static void
5341 ixl_work_set(struct ixl_work *work, void (*func)(void *), void *arg)
5342 {
5343
5344 memset(work, 0, sizeof(*work));
5345 work->ixw_func = func;
5346 work->ixw_arg = arg;
5347 }
5348
5349 static void
5350 ixl_work_add(struct workqueue *wq, struct ixl_work *work)
5351 {
5352 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0)
5353 return;
5354
5355 workqueue_enqueue(wq, &work->ixw_cookie, NULL);
5356 }
5357
5358 static void
5359 ixl_work_wait(struct workqueue *wq, struct ixl_work *work)
5360 {
5361
5362 workqueue_wait(wq, &work->ixw_cookie);
5363 }
5364
5365 static void
5366 ixl_workq_work(struct work *wk, void *context)
5367 {
5368 struct ixl_work *work;
5369
5370 work = container_of(wk, struct ixl_work, ixw_cookie);
5371
5372 atomic_swap_uint(&work->ixw_added, 0);
5373 kpreempt_disable();
5374 work->ixw_func(work->ixw_arg);
5375 kpreempt_enable();
5376 }
5377
5378 static int
5379 ixl_rx_ctl_read(struct ixl_softc *sc, uint32_t reg, uint32_t *rv)
5380 {
5381 struct ixl_aq_desc iaq;
5382
5383 memset(&iaq, 0, sizeof(iaq));
5384 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_READ);
5385 iaq.iaq_param[1] = htole32(reg);
5386
5387 if (ixl_atq_poll(sc, &iaq, 250) != 0)
5388 return ETIMEDOUT;
5389
5390 switch (htole16(iaq.iaq_retval)) {
5391 case IXL_AQ_RC_OK:
5392 /* success */
5393 break;
5394 case IXL_AQ_RC_EACCES:
5395 return EPERM;
5396 case IXL_AQ_RC_EAGAIN:
5397 return EAGAIN;
5398 default:
5399 return EIO;
5400 }
5401
5402 *rv = htole32(iaq.iaq_param[3]);
5403 return 0;
5404 }
5405
5406
5407
5408 static uint32_t
5409 ixl_rd_rx_csr(struct ixl_softc *sc, uint32_t reg)
5410 {
5411 uint32_t val;
5412 int rv, retry, retry_limit;
5413
5414 retry_limit = sc->sc_rxctl_atq ? 5 : 0;
5415
5416 for (retry = 0; retry < retry_limit; retry++) {
5417 rv = ixl_rx_ctl_read(sc, reg, &val);
5418 if (rv == 0)
5419 return val;
5420 else if (rv == EAGAIN)
5421 delaymsec(1);
5422 else
5423 break;
5424 }
5425
5426 val = ixl_rd(sc, reg);
5427
5428 return val;
5429 }
5430
5431 static int
5432 ixl_rx_ctl_write(struct ixl_softc *sc, uint32_t reg, uint32_t value)
5433 {
5434 struct ixl_aq_desc iaq;
5435
5436 memset(&iaq, 0, sizeof(iaq));
5437 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_WRITE);
5438 iaq.iaq_param[1] = htole32(reg);
5439 iaq.iaq_param[3] = htole32(value);
5440
5441 if (ixl_atq_poll(sc, &iaq, 250) != 0)
5442 return ETIMEDOUT;
5443
5444 switch (htole16(iaq.iaq_retval)) {
5445 case IXL_AQ_RC_OK:
5446 /* success */
5447 break;
5448 case IXL_AQ_RC_EACCES:
5449 return EPERM;
5450 case IXL_AQ_RC_EAGAIN:
5451 return EAGAIN;
5452 default:
5453 return EIO;
5454 }
5455
5456 return 0;
5457 }
5458
5459 static void
5460 ixl_wr_rx_csr(struct ixl_softc *sc, uint32_t reg, uint32_t value)
5461 {
5462 int rv, retry, retry_limit;
5463
5464 retry_limit = sc->sc_rxctl_atq ? 5 : 0;
5465
5466 for (retry = 0; retry < retry_limit; retry++) {
5467 rv = ixl_rx_ctl_write(sc, reg, value);
5468 if (rv == 0)
5469 return;
5470 else if (rv == EAGAIN)
5471 delaymsec(1);
5472 else
5473 break;
5474 }
5475
5476 ixl_wr(sc, reg, value);
5477 }
5478
5479 MODULE(MODULE_CLASS_DRIVER, if_ixl, "pci");
5480
5481 #ifdef _MODULE
5482 #include "ioconf.c"
5483 #endif
5484
5485 static int
5486 if_ixl_modcmd(modcmd_t cmd, void *opaque)
5487 {
5488 int error = 0;
5489
5490 #ifdef _MODULE
5491 switch (cmd) {
5492 case MODULE_CMD_INIT:
5493 error = config_init_component(cfdriver_ioconf_if_ixl,
5494 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
5495 break;
5496 case MODULE_CMD_FINI:
5497 error = config_fini_component(cfdriver_ioconf_if_ixl,
5498 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
5499 break;
5500 default:
5501 error = ENOTTY;
5502 break;
5503 }
5504 #endif
5505
5506 return error;
5507 }
5508