if_ixl.c revision 1.13 1 /* $NetBSD: if_ixl.c,v 1.13 2019/12/26 03:08:19 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2016,2017 David Gwynne <dlg (at) openbsd.org>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50 /*
51 * Copyright (c) 2019 Internet Initiative Japan, Inc.
52 * All rights reserved.
53 *
54 * Redistribution and use in source and binary forms, with or without
55 * modification, are permitted provided that the following conditions
56 * are met:
57 * 1. Redistributions of source code must retain the above copyright
58 * notice, this list of conditions and the following disclaimer.
59 * 2. Redistributions in binary form must reproduce the above copyright
60 * notice, this list of conditions and the following disclaimer in the
61 * documentation and/or other materials provided with the distribution.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
73 * POSSIBILITY OF SUCH DAMAGE.
74 */
75
76 #include <sys/cdefs.h>
77
78 #ifdef _KERNEL_OPT
79 #include "opt_net_mpsafe.h"
80 #endif
81
82 #include <sys/param.h>
83 #include <sys/types.h>
84
85 #include <sys/cpu.h>
86 #include <sys/device.h>
87 #include <sys/evcnt.h>
88 #include <sys/interrupt.h>
89 #include <sys/kmem.h>
90 #include <sys/malloc.h>
91 #include <sys/module.h>
92 #include <sys/mutex.h>
93 #include <sys/pcq.h>
94 #include <sys/syslog.h>
95 #include <sys/workqueue.h>
96
97 #include <sys/bus.h>
98
99 #include <net/bpf.h>
100 #include <net/if.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103 #include <net/if_ether.h>
104 #include <net/rss_config.h>
105
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108
109 #include <dev/pci/if_ixlreg.h>
110 #include <dev/pci/if_ixlvar.h>
111
112 struct ixl_softc; /* defined */
113
114 #define I40E_PF_RESET_WAIT_COUNT 200
115 #define I40E_AQ_LARGE_BUF 512
116
117 /* bitfields for Tx queue mapping in QTX_CTL */
118 #define I40E_QTX_CTL_VF_QUEUE 0x0
119 #define I40E_QTX_CTL_VM_QUEUE 0x1
120 #define I40E_QTX_CTL_PF_QUEUE 0x2
121
122 #define I40E_QUEUE_TYPE_EOL 0x7ff
123 #define I40E_INTR_NOTX_QUEUE 0
124
125 #define I40E_QUEUE_TYPE_RX 0x0
126 #define I40E_QUEUE_TYPE_TX 0x1
127 #define I40E_QUEUE_TYPE_PE_CEQ 0x2
128 #define I40E_QUEUE_TYPE_UNKNOWN 0x3
129
130 #define I40E_ITR_INDEX_RX 0x0
131 #define I40E_ITR_INDEX_TX 0x1
132 #define I40E_ITR_INDEX_OTHER 0x2
133 #define I40E_ITR_INDEX_NONE 0x3
134
135 #define I40E_INTR_NOTX_QUEUE 0
136 #define I40E_INTR_NOTX_INTR 0
137 #define I40E_INTR_NOTX_RX_QUEUE 0
138 #define I40E_INTR_NOTX_TX_QUEUE 1
139 #define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK
140 #define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK
141
142 #define BIT_ULL(a) (1ULL << (a))
143 #define IXL_RSS_HENA_DEFAULT_BASE \
144 (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
145 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
146 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
147 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
148 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
149 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
150 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
151 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
152 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
153 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
154 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
155 #define IXL_RSS_HENA_DEFAULT_XL710 IXL_RSS_HENA_DEFAULT_BASE
156 #define IXL_RSS_HENA_DEFAULT_X722 (IXL_RSS_HENA_DEFAULT_XL710 | \
157 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
158 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
159 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
160 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
161 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
162 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
163 #define I40E_HASH_LUT_SIZE_128 0
164 #define IXL_RSS_KEY_SIZE_REG 13
165
166 #define IXL_ICR0_CRIT_ERR_MASK \
167 (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \
168 I40E_PFINT_ICR0_ECC_ERR_MASK | \
169 I40E_PFINT_ICR0_PE_CRITERR_MASK)
170
171 #define IXL_TX_PKT_DESCS 8
172 #define IXL_TX_QUEUE_ALIGN 128
173 #define IXL_RX_QUEUE_ALIGN 128
174
175 #define IXL_HARDMTU 9712 /* 9726 - ETHER_HDR_LEN */
176
177 #define IXL_PCIREG PCI_MAPREG_START
178
179 #define IXL_ITR0 0x0
180 #define IXL_ITR1 0x1
181 #define IXL_ITR2 0x2
182 #define IXL_NOITR 0x3
183
184 #define IXL_AQ_NUM 256
185 #define IXL_AQ_MASK (IXL_AQ_NUM - 1)
186 #define IXL_AQ_ALIGN 64 /* lol */
187 #define IXL_AQ_BUFLEN 4096
188
189 #define IXL_HMC_ROUNDUP 512
190 #define IXL_HMC_PGSIZE 4096
191 #define IXL_HMC_DVASZ sizeof(uint64_t)
192 #define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
193 #define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS)
194 #define IXL_HMC_PDVALID 1ULL
195
196 #define IXL_ATQ_EXEC_TIMEOUT (10 * hz)
197
198 struct ixl_aq_regs {
199 bus_size_t atq_tail;
200 bus_size_t atq_head;
201 bus_size_t atq_len;
202 bus_size_t atq_bal;
203 bus_size_t atq_bah;
204
205 bus_size_t arq_tail;
206 bus_size_t arq_head;
207 bus_size_t arq_len;
208 bus_size_t arq_bal;
209 bus_size_t arq_bah;
210
211 uint32_t atq_len_enable;
212 uint32_t atq_tail_mask;
213 uint32_t atq_head_mask;
214
215 uint32_t arq_len_enable;
216 uint32_t arq_tail_mask;
217 uint32_t arq_head_mask;
218 };
219
220 struct ixl_phy_type {
221 uint64_t phy_type;
222 uint64_t ifm_type;
223 };
224
225 struct ixl_speed_type {
226 uint8_t dev_speed;
227 uint64_t net_speed;
228 };
229
230 struct ixl_aq_buf {
231 SIMPLEQ_ENTRY(ixl_aq_buf)
232 aqb_entry;
233 void *aqb_data;
234 bus_dmamap_t aqb_map;
235 bus_dma_segment_t aqb_seg;
236 size_t aqb_size;
237 int aqb_nsegs;
238 };
239 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf);
240
241 struct ixl_dmamem {
242 bus_dmamap_t ixm_map;
243 bus_dma_segment_t ixm_seg;
244 int ixm_nsegs;
245 size_t ixm_size;
246 void *ixm_kva;
247 };
248
249 #define IXL_DMA_MAP(_ixm) ((_ixm)->ixm_map)
250 #define IXL_DMA_DVA(_ixm) ((_ixm)->ixm_map->dm_segs[0].ds_addr)
251 #define IXL_DMA_KVA(_ixm) ((void *)(_ixm)->ixm_kva)
252 #define IXL_DMA_LEN(_ixm) ((_ixm)->ixm_size)
253
254 struct ixl_hmc_entry {
255 uint64_t hmc_base;
256 uint32_t hmc_count;
257 uint64_t hmc_size;
258 };
259
260 enum ixl_hmc_types {
261 IXL_HMC_LAN_TX = 0,
262 IXL_HMC_LAN_RX,
263 IXL_HMC_FCOE_CTX,
264 IXL_HMC_FCOE_FILTER,
265 IXL_HMC_COUNT
266 };
267
268 struct ixl_hmc_pack {
269 uint16_t offset;
270 uint16_t width;
271 uint16_t lsb;
272 };
273
274 /*
275 * these hmc objects have weird sizes and alignments, so these are abstract
276 * representations of them that are nice for c to populate.
277 *
278 * the packing code relies on little-endian values being stored in the fields,
279 * no high bits in the fields being set, and the fields must be packed in the
280 * same order as they are in the ctx structure.
281 */
282
283 struct ixl_hmc_rxq {
284 uint16_t head;
285 uint8_t cpuid;
286 uint64_t base;
287 #define IXL_HMC_RXQ_BASE_UNIT 128
288 uint16_t qlen;
289 uint16_t dbuff;
290 #define IXL_HMC_RXQ_DBUFF_UNIT 128
291 uint8_t hbuff;
292 #define IXL_HMC_RXQ_HBUFF_UNIT 64
293 uint8_t dtype;
294 #define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0
295 #define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1
296 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2
297 uint8_t dsize;
298 #define IXL_HMC_RXQ_DSIZE_16 0
299 #define IXL_HMC_RXQ_DSIZE_32 1
300 uint8_t crcstrip;
301 uint8_t fc_ena;
302 uint8_t l2sel;
303 uint8_t hsplit_0;
304 uint8_t hsplit_1;
305 uint8_t showiv;
306 uint16_t rxmax;
307 uint8_t tphrdesc_ena;
308 uint8_t tphwdesc_ena;
309 uint8_t tphdata_ena;
310 uint8_t tphhead_ena;
311 uint8_t lrxqthresh;
312 uint8_t prefena;
313 };
314
315 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
316 { offsetof(struct ixl_hmc_rxq, head), 13, 0 },
317 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 },
318 { offsetof(struct ixl_hmc_rxq, base), 57, 32 },
319 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 },
320 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 },
321 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 },
322 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 },
323 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 },
324 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 },
325 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 },
326 { offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 },
327 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 },
328 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 },
329 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 },
330 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 },
331 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 },
332 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 },
333 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 },
334 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 },
335 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 },
336 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 },
337 };
338
339 #define IXL_HMC_RXQ_MINSIZE (201 + 1)
340
341 struct ixl_hmc_txq {
342 uint16_t head;
343 uint8_t new_context;
344 uint64_t base;
345 #define IXL_HMC_TXQ_BASE_UNIT 128
346 uint8_t fc_ena;
347 uint8_t timesync_ena;
348 uint8_t fd_ena;
349 uint8_t alt_vlan_ena;
350 uint16_t thead_wb;
351 uint8_t cpuid;
352 uint8_t head_wb_ena;
353 #define IXL_HMC_TXQ_DESC_WB 0
354 #define IXL_HMC_TXQ_HEAD_WB 1
355 uint16_t qlen;
356 uint8_t tphrdesc_ena;
357 uint8_t tphrpacket_ena;
358 uint8_t tphwdesc_ena;
359 uint64_t head_wb_addr;
360 uint32_t crc;
361 uint16_t rdylist;
362 uint8_t rdylist_act;
363 };
364
365 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
366 { offsetof(struct ixl_hmc_txq, head), 13, 0 },
367 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 },
368 { offsetof(struct ixl_hmc_txq, base), 57, 32 },
369 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 },
370 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 },
371 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 },
372 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 },
373 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 },
374 /* line 1 */
375 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 },
376 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 },
377 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 },
378 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 },
379 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 },
380 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 },
381 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 },
382 /* line 7 */
383 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) },
384 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) },
385 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) },
386 };
387
388 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
389
390 struct ixl_work {
391 struct work ixw_cookie;
392 void (*ixw_func)(void *);
393 void *ixw_arg;
394 unsigned int ixw_added;
395 };
396 #define IXL_WORKQUEUE_PRI PRI_SOFTNET
397
398 struct ixl_tx_map {
399 struct mbuf *txm_m;
400 bus_dmamap_t txm_map;
401 unsigned int txm_eop;
402 };
403
404 struct ixl_tx_ring {
405 kmutex_t txr_lock;
406 struct ixl_softc *txr_sc;
407
408 unsigned int txr_prod;
409 unsigned int txr_cons;
410
411 struct ixl_tx_map *txr_maps;
412 struct ixl_dmamem txr_mem;
413
414 bus_size_t txr_tail;
415 unsigned int txr_qid;
416 pcq_t *txr_intrq;
417 void *txr_si;
418
419 uint64_t txr_oerrors; /* if_oerrors */
420 uint64_t txr_opackets; /* if_opackets */
421 uint64_t txr_obytes; /* if_obytes */
422 uint64_t txr_omcasts; /* if_omcasts */
423
424 struct evcnt txr_defragged;
425 struct evcnt txr_defrag_failed;
426 struct evcnt txr_pcqdrop;
427 struct evcnt txr_transmitdef;
428 struct evcnt txr_intr;
429 struct evcnt txr_defer;
430 };
431
432 struct ixl_rx_map {
433 struct mbuf *rxm_m;
434 bus_dmamap_t rxm_map;
435 };
436
437 struct ixl_rx_ring {
438 kmutex_t rxr_lock;
439
440 unsigned int rxr_prod;
441 unsigned int rxr_cons;
442
443 struct ixl_rx_map *rxr_maps;
444 struct ixl_dmamem rxr_mem;
445
446 struct mbuf *rxr_m_head;
447 struct mbuf **rxr_m_tail;
448
449 bus_size_t rxr_tail;
450 unsigned int rxr_qid;
451
452 uint64_t rxr_ipackets; /* if_ipackets */
453 uint64_t rxr_ibytes; /* if_ibytes */
454 uint64_t rxr_iqdrops; /* iqdrops */
455 uint64_t rxr_ierrors; /* if_ierrors */
456
457 struct evcnt rxr_mgethdr_failed;
458 struct evcnt rxr_mgetcl_failed;
459 struct evcnt rxr_mbuf_load_failed;
460 struct evcnt rxr_intr;
461 struct evcnt rxr_defer;
462 };
463
464 struct ixl_queue_pair {
465 struct ixl_softc *qp_sc;
466 struct ixl_tx_ring *qp_txr;
467 struct ixl_rx_ring *qp_rxr;
468
469 char qp_name[16];
470
471 void *qp_si;
472 struct ixl_work qp_task;
473 bool qp_workqueue;
474 };
475
476 struct ixl_atq {
477 struct ixl_aq_desc iatq_desc;
478 void (*iatq_fn)(struct ixl_softc *,
479 const struct ixl_aq_desc *);
480 };
481 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
482
483 struct ixl_product {
484 unsigned int vendor_id;
485 unsigned int product_id;
486 };
487
488 /*
489 * Locking notes:
490 * + a field in ixl_tx_ring is protected by txr_lock (a spin mutex), and
491 * a field in ixl_rx_ring is protected by rxr_lock (a spin mutex).
492 * - more than one lock of them cannot be held at once.
493 * + a field named sc_atq_* in ixl_softc is protected by sc_atq_lock
494 * (a spin mutex).
495 * - the lock cannot held with txr_lock or rxr_lock.
496 * + a field named sc_arq_* is not protected by any lock.
497 * - operations for sc_arq_* is done in one context related to
498 * sc_arq_task.
499 * + other fields in ixl_softc is protected by sc_cfg_lock
500 * (an adaptive mutex)
501 * - It must be held before another lock is held, and It can be
502 * released after the other lock is released.
503 * */
504
505 struct ixl_softc {
506 device_t sc_dev;
507 struct ethercom sc_ec;
508 bool sc_attached;
509 bool sc_dead;
510 bool sc_rxctl_atq;
511 struct sysctllog *sc_sysctllog;
512 struct workqueue *sc_workq;
513 struct workqueue *sc_workq_txrx;
514 uint8_t sc_enaddr[ETHER_ADDR_LEN];
515 struct ifmedia sc_media;
516 uint64_t sc_media_status;
517 uint64_t sc_media_active;
518 kmutex_t sc_cfg_lock;
519 enum i40e_mac_type sc_mac_type;
520 uint32_t sc_rss_table_size;
521 uint32_t sc_rss_table_entry_width;
522 bool sc_txrx_workqueue;
523 u_int sc_tx_process_limit;
524 u_int sc_rx_process_limit;
525 u_int sc_tx_intr_process_limit;
526 u_int sc_rx_intr_process_limit;
527
528 int sc_cur_ec_capenable;
529
530 struct pci_attach_args sc_pa;
531 pci_intr_handle_t *sc_ihp;
532 void **sc_ihs;
533 unsigned int sc_nintrs;
534
535 bus_dma_tag_t sc_dmat;
536 bus_space_tag_t sc_memt;
537 bus_space_handle_t sc_memh;
538 bus_size_t sc_mems;
539
540 uint8_t sc_pf_id;
541 uint16_t sc_uplink_seid; /* le */
542 uint16_t sc_downlink_seid; /* le */
543 uint16_t sc_vsi_number; /* le */
544 uint16_t sc_seid;
545 unsigned int sc_base_queue;
546
547 pci_intr_type_t sc_intrtype;
548 unsigned int sc_msix_vector_queue;
549
550 struct ixl_dmamem sc_scratch;
551
552 const struct ixl_aq_regs *
553 sc_aq_regs;
554
555 kmutex_t sc_atq_lock;
556 kcondvar_t sc_atq_cv;
557 struct ixl_dmamem sc_atq;
558 unsigned int sc_atq_prod;
559 unsigned int sc_atq_cons;
560
561 struct ixl_dmamem sc_arq;
562 struct ixl_work sc_arq_task;
563 struct ixl_aq_bufs sc_arq_idle;
564 struct ixl_aq_buf *sc_arq_live[IXL_AQ_NUM];
565 unsigned int sc_arq_prod;
566 unsigned int sc_arq_cons;
567
568 struct ixl_work sc_link_state_task;
569 struct ixl_atq sc_link_state_atq;
570
571 struct ixl_dmamem sc_hmc_sd;
572 struct ixl_dmamem sc_hmc_pd;
573 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT];
574
575 unsigned int sc_tx_ring_ndescs;
576 unsigned int sc_rx_ring_ndescs;
577 unsigned int sc_nqueue_pairs;
578 unsigned int sc_nqueue_pairs_max;
579 unsigned int sc_nqueue_pairs_device;
580 struct ixl_queue_pair *sc_qps;
581
582 struct evcnt sc_event_atq;
583 struct evcnt sc_event_link;
584 struct evcnt sc_event_ecc_err;
585 struct evcnt sc_event_pci_exception;
586 struct evcnt sc_event_crit_err;
587 };
588
589 #define IXL_TXRX_PROCESS_UNLIMIT UINT_MAX
590 #define IXL_TX_PROCESS_LIMIT 256
591 #define IXL_RX_PROCESS_LIMIT 256
592 #define IXL_TX_INTR_PROCESS_LIMIT 256
593 #define IXL_RX_INTR_PROCESS_LIMIT 0U
594
595 #define delaymsec(_x) DELAY(1000 * (_x))
596 #ifdef IXL_DEBUG
597 #define DDPRINTF(sc, fmt, args...) \
598 do { \
599 if ((sc) != NULL) { \
600 device_printf( \
601 ((struct ixl_softc *)(sc))->sc_dev, \
602 ""); \
603 } \
604 printf("%s:\t" fmt, __func__, ##args); \
605 } while (0)
606 #else
607 #define DDPRINTF(sc, fmt, args...) __nothing
608 #endif
609 #define IXL_NOMSIX false
610
611 static enum i40e_mac_type
612 ixl_mactype(pci_product_id_t);
613 static void ixl_clear_hw(struct ixl_softc *);
614 static int ixl_pf_reset(struct ixl_softc *);
615
616 static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
617 bus_size_t, bus_size_t);
618 static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
619
620 static int ixl_arq_fill(struct ixl_softc *);
621 static void ixl_arq_unfill(struct ixl_softc *);
622
623 static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
624 unsigned int);
625 static void ixl_atq_set(struct ixl_atq *,
626 void (*)(struct ixl_softc *, const struct ixl_aq_desc *));
627 static int ixl_atq_post(struct ixl_softc *, struct ixl_atq *);
628 static int ixl_atq_post_locked(struct ixl_softc *, struct ixl_atq *);
629 static void ixl_atq_done(struct ixl_softc *);
630 static int ixl_atq_exec(struct ixl_softc *, struct ixl_atq *);
631 static int ixl_get_version(struct ixl_softc *);
632 static int ixl_get_hw_capabilities(struct ixl_softc *);
633 static int ixl_pxe_clear(struct ixl_softc *);
634 static int ixl_lldp_shut(struct ixl_softc *);
635 static int ixl_get_mac(struct ixl_softc *);
636 static int ixl_get_switch_config(struct ixl_softc *);
637 static int ixl_phy_mask_ints(struct ixl_softc *);
638 static int ixl_get_phy_types(struct ixl_softc *, uint64_t *);
639 static int ixl_restart_an(struct ixl_softc *);
640 static int ixl_hmc(struct ixl_softc *);
641 static void ixl_hmc_free(struct ixl_softc *);
642 static int ixl_get_vsi(struct ixl_softc *);
643 static int ixl_set_vsi(struct ixl_softc *);
644 static void ixl_set_filter_control(struct ixl_softc *);
645 static void ixl_get_link_status(void *);
646 static int ixl_get_link_status_poll(struct ixl_softc *);
647 static int ixl_set_link_status(struct ixl_softc *,
648 const struct ixl_aq_desc *);
649 static void ixl_config_rss(struct ixl_softc *);
650 static int ixl_add_macvlan(struct ixl_softc *, const uint8_t *,
651 uint16_t, uint16_t);
652 static int ixl_remove_macvlan(struct ixl_softc *, const uint8_t *,
653 uint16_t, uint16_t);
654 static void ixl_arq(void *);
655 static void ixl_hmc_pack(void *, const void *,
656 const struct ixl_hmc_pack *, unsigned int);
657 static uint32_t ixl_rd_rx_csr(struct ixl_softc *, uint32_t);
658 static void ixl_wr_rx_csr(struct ixl_softc *, uint32_t, uint32_t);
659
660 static int ixl_match(device_t, cfdata_t, void *);
661 static void ixl_attach(device_t, device_t, void *);
662 static int ixl_detach(device_t, int);
663
664 static void ixl_media_add(struct ixl_softc *, uint64_t);
665 static int ixl_media_change(struct ifnet *);
666 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
667 static void ixl_watchdog(struct ifnet *);
668 static int ixl_ioctl(struct ifnet *, u_long, void *);
669 static void ixl_start(struct ifnet *);
670 static int ixl_transmit(struct ifnet *, struct mbuf *);
671 static void ixl_deferred_transmit(void *);
672 static int ixl_intr(void *);
673 static int ixl_queue_intr(void *);
674 static int ixl_other_intr(void *);
675 static void ixl_handle_queue(void *);
676 static void ixl_sched_handle_queue(struct ixl_softc *,
677 struct ixl_queue_pair *);
678 static int ixl_init(struct ifnet *);
679 static int ixl_init_locked(struct ixl_softc *);
680 static void ixl_stop(struct ifnet *, int);
681 static void ixl_stop_locked(struct ixl_softc *);
682 static int ixl_iff(struct ixl_softc *);
683 static int ixl_ifflags_cb(struct ethercom *);
684 static int ixl_setup_interrupts(struct ixl_softc *);
685 static int ixl_establish_intx(struct ixl_softc *);
686 static int ixl_establish_msix(struct ixl_softc *);
687 static void ixl_set_affinity_msix(struct ixl_softc *);
688 static void ixl_enable_queue_intr(struct ixl_softc *,
689 struct ixl_queue_pair *);
690 static void ixl_disable_queue_intr(struct ixl_softc *,
691 struct ixl_queue_pair *);
692 static void ixl_enable_other_intr(struct ixl_softc *);
693 static void ixl_disable_other_intr(struct ixl_softc *);
694 static void ixl_config_queue_intr(struct ixl_softc *);
695 static void ixl_config_other_intr(struct ixl_softc *);
696
697 static struct ixl_tx_ring *
698 ixl_txr_alloc(struct ixl_softc *, unsigned int);
699 static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
700 static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
701 static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
702 static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
703 static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
704 static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
705 static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
706 static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *, u_int);
707
708 static struct ixl_rx_ring *
709 ixl_rxr_alloc(struct ixl_softc *, unsigned int);
710 static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
711 static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
712 static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
713 static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
714 static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
715 static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
716 static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *, u_int);
717 static int ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
718
719 static struct workqueue *
720 ixl_workq_create(const char *, pri_t, int, int);
721 static void ixl_workq_destroy(struct workqueue *);
722 static int ixl_workqs_teardown(device_t);
723 static void ixl_work_set(struct ixl_work *, void (*)(void *), void *);
724 static void ixl_work_add(struct workqueue *, struct ixl_work *);
725 static void ixl_work_wait(struct workqueue *, struct ixl_work *);
726 static void ixl_workq_work(struct work *, void *);
727 static const struct ixl_product *
728 ixl_lookup(const struct pci_attach_args *pa);
729 static void ixl_link_state_update(struct ixl_softc *,
730 const struct ixl_aq_desc *);
731 static int ixl_vlan_cb(struct ethercom *, uint16_t, bool);
732 static int ixl_setup_vlan_hwfilter(struct ixl_softc *);
733 static void ixl_teardown_vlan_hwfilter(struct ixl_softc *);
734 static int ixl_update_macvlan(struct ixl_softc *);
735 static int ixl_setup_interrupts(struct ixl_softc *);;
736 static void ixl_teardown_interrupts(struct ixl_softc *);
737 static int ixl_setup_stats(struct ixl_softc *);
738 static void ixl_teardown_stats(struct ixl_softc *);
739 static int ixl_setup_sysctls(struct ixl_softc *);
740 static void ixl_teardown_sysctls(struct ixl_softc *);
741 static int ixl_queue_pairs_alloc(struct ixl_softc *);
742 static void ixl_queue_pairs_free(struct ixl_softc *);
743
744 static const struct ixl_phy_type ixl_phy_type_map[] = {
745 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII },
746 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX },
747 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 },
748 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR },
749 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 },
750 { 1ULL << IXL_PHY_TYPE_XAUI |
751 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 },
752 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI },
753 { 1ULL << IXL_PHY_TYPE_XLAUI |
754 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI },
755 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
756 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 },
757 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
758 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 },
759 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC },
760 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC },
761 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX },
762 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
763 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T },
764 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T },
765 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR },
766 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR },
767 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_TWINAX },
768 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 },
769 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 },
770 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX },
771 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX },
772 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 },
773 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR },
774 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR },
775 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR },
776 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR },
777 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC },
778 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_CR },
779 };
780
781 static const struct ixl_speed_type ixl_speed_type_map[] = {
782 { IXL_AQ_LINK_SPEED_40GB, IF_Gbps(40) },
783 { IXL_AQ_LINK_SPEED_25GB, IF_Gbps(25) },
784 { IXL_AQ_LINK_SPEED_10GB, IF_Gbps(10) },
785 { IXL_AQ_LINK_SPEED_1000MB, IF_Mbps(1000) },
786 { IXL_AQ_LINK_SPEED_100MB, IF_Mbps(100)},
787 };
788
789 static const struct ixl_aq_regs ixl_pf_aq_regs = {
790 .atq_tail = I40E_PF_ATQT,
791 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK,
792 .atq_head = I40E_PF_ATQH,
793 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK,
794 .atq_len = I40E_PF_ATQLEN,
795 .atq_bal = I40E_PF_ATQBAL,
796 .atq_bah = I40E_PF_ATQBAH,
797 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK,
798
799 .arq_tail = I40E_PF_ARQT,
800 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK,
801 .arq_head = I40E_PF_ARQH,
802 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK,
803 .arq_len = I40E_PF_ARQLEN,
804 .arq_bal = I40E_PF_ARQBAL,
805 .arq_bah = I40E_PF_ARQBAH,
806 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK,
807 };
808
809 #define ixl_rd(_s, _r) \
810 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
811 #define ixl_wr(_s, _r, _v) \
812 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
813 #define ixl_barrier(_s, _r, _l, _o) \
814 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
815 #define ixl_flush(_s) (void)ixl_rd((_s), I40E_GLGEN_STAT)
816 #define ixl_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1))
817
818 static inline uint32_t
819 ixl_dmamem_hi(struct ixl_dmamem *ixm)
820 {
821 uint32_t retval;
822 uint64_t val;
823
824 if (sizeof(IXL_DMA_DVA(ixm)) > 4) {
825 val = (intptr_t)IXL_DMA_DVA(ixm);
826 retval = (uint32_t)(val >> 32);
827 } else {
828 retval = 0;
829 }
830
831 return retval;
832 }
833
834 static inline uint32_t
835 ixl_dmamem_lo(struct ixl_dmamem *ixm)
836 {
837
838 return (uint32_t)IXL_DMA_DVA(ixm);
839 }
840
841 static inline void
842 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr)
843 {
844 uint64_t val;
845
846 if (sizeof(addr) > 4) {
847 val = (intptr_t)addr;
848 iaq->iaq_param[2] = htole32(val >> 32);
849 } else {
850 iaq->iaq_param[2] = htole32(0);
851 }
852
853 iaq->iaq_param[3] = htole32(addr);
854 }
855
856 static inline unsigned int
857 ixl_rxr_unrefreshed(unsigned int prod, unsigned int cons, unsigned int ndescs)
858 {
859 unsigned int num;
860
861 if (prod < cons)
862 num = cons - prod;
863 else
864 num = (ndescs - prod) + cons;
865
866 if (__predict_true(num > 0)) {
867 /* device cannot receive packets if all descripter is filled */
868 num -= 1;
869 }
870
871 return num;
872 }
873
874 CFATTACH_DECL3_NEW(ixl, sizeof(struct ixl_softc),
875 ixl_match, ixl_attach, ixl_detach, NULL, NULL, NULL,
876 DVF_DETACH_SHUTDOWN);
877
878 static const struct ixl_product ixl_products[] = {
879 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_SFP },
880 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_B },
881 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_C },
882 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_A },
883 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_B },
884 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_C },
885 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T },
886 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
887 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
888 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G },
889 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP },
890 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28 },
891 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_KX },
892 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_QSFP },
893 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_SFP },
894 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G_BASET },
895 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_BASET },
896 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_I_SFP },
897 /* required last entry */
898 {0, 0}
899 };
900
901 static const struct ixl_product *
902 ixl_lookup(const struct pci_attach_args *pa)
903 {
904 const struct ixl_product *ixlp;
905
906 for (ixlp = ixl_products; ixlp->vendor_id != 0; ixlp++) {
907 if (PCI_VENDOR(pa->pa_id) == ixlp->vendor_id &&
908 PCI_PRODUCT(pa->pa_id) == ixlp->product_id)
909 return ixlp;
910 }
911
912 return NULL;
913 }
914
915 static int
916 ixl_match(device_t parent, cfdata_t match, void *aux)
917 {
918 const struct pci_attach_args *pa = aux;
919
920 return (ixl_lookup(pa) != NULL) ? 1 : 0;
921 }
922
923 static void
924 ixl_attach(device_t parent, device_t self, void *aux)
925 {
926 struct ixl_softc *sc;
927 struct pci_attach_args *pa = aux;
928 struct ifnet *ifp;
929 pcireg_t memtype, reg;
930 uint32_t firstq, port, ari, func;
931 uint64_t phy_types = 0;
932 char xnamebuf[32];
933 int tries, rv;
934
935 sc = device_private(self);
936 sc->sc_dev = self;
937 ifp = &sc->sc_ec.ec_if;
938
939 sc->sc_pa = *pa;
940 sc->sc_dmat = (pci_dma64_available(pa)) ?
941 pa->pa_dmat64 : pa->pa_dmat;
942 sc->sc_aq_regs = &ixl_pf_aq_regs;
943
944 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
945 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(reg));
946
947 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG);
948 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
949 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) {
950 aprint_error(": unable to map registers\n");
951 return;
952 }
953
954 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET);
955
956 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC);
957 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK;
958 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
959 sc->sc_base_queue = firstq;
960
961 ixl_clear_hw(sc);
962 if (ixl_pf_reset(sc) == -1) {
963 /* error printed by ixl pf_reset */
964 goto unmap;
965 }
966
967 port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
968 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
969 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
970 aprint_normal(": port %u", port);
971
972 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
973 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
974 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
975
976 func = ixl_rd(sc, I40E_PF_FUNC_RID);
977 sc->sc_pf_id = func & (ari ? 0xff : 0x7);
978
979 /* initialise the adminq */
980
981 mutex_init(&sc->sc_atq_lock, MUTEX_DEFAULT, IPL_NET);
982
983 if (ixl_dmamem_alloc(sc, &sc->sc_atq,
984 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
985 aprint_error("\n" "%s: unable to allocate atq\n",
986 device_xname(self));
987 goto unmap;
988 }
989
990 SIMPLEQ_INIT(&sc->sc_arq_idle);
991 ixl_work_set(&sc->sc_arq_task, ixl_arq, sc);
992 sc->sc_arq_cons = 0;
993 sc->sc_arq_prod = 0;
994
995 if (ixl_dmamem_alloc(sc, &sc->sc_arq,
996 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
997 aprint_error("\n" "%s: unable to allocate arq\n",
998 device_xname(self));
999 goto free_atq;
1000 }
1001
1002 if (!ixl_arq_fill(sc)) {
1003 aprint_error("\n" "%s: unable to fill arq descriptors\n",
1004 device_xname(self));
1005 goto free_arq;
1006 }
1007
1008 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1009 0, IXL_DMA_LEN(&sc->sc_atq),
1010 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1011
1012 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1013 0, IXL_DMA_LEN(&sc->sc_arq),
1014 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1015
1016 for (tries = 0; tries < 10; tries++) {
1017 sc->sc_atq_cons = 0;
1018 sc->sc_atq_prod = 0;
1019
1020 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1021 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1022 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1023 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1024
1025 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1026
1027 ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1028 ixl_dmamem_lo(&sc->sc_atq));
1029 ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1030 ixl_dmamem_hi(&sc->sc_atq));
1031 ixl_wr(sc, sc->sc_aq_regs->atq_len,
1032 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1033
1034 ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1035 ixl_dmamem_lo(&sc->sc_arq));
1036 ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1037 ixl_dmamem_hi(&sc->sc_arq));
1038 ixl_wr(sc, sc->sc_aq_regs->arq_len,
1039 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1040
1041 rv = ixl_get_version(sc);
1042 if (rv == 0)
1043 break;
1044 if (rv != ETIMEDOUT) {
1045 aprint_error(", unable to get firmware version\n");
1046 goto shutdown;
1047 }
1048
1049 delaymsec(100);
1050 }
1051
1052 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1053
1054 if (sc->sc_mac_type == I40E_MAC_X722)
1055 sc->sc_nqueue_pairs_device = 128;
1056 else
1057 sc->sc_nqueue_pairs_device = 64;
1058
1059 rv = ixl_get_hw_capabilities(sc);
1060 if (rv != 0) {
1061 aprint_error(", GET HW CAPABILITIES %s\n",
1062 rv == ETIMEDOUT ? "timeout" : "error");
1063 goto shutdown;
1064 }
1065
1066 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max =
1067 MIN((int)sc->sc_nqueue_pairs_device, ncpu);
1068 sc->sc_tx_ring_ndescs = 1024;
1069 sc->sc_rx_ring_ndescs = 1024;
1070
1071 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_rx_ring_ndescs);
1072 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_tx_ring_ndescs);
1073
1074 if (ixl_get_mac(sc) != 0) {
1075 /* error printed by ixl_get_mac */
1076 goto shutdown;
1077 }
1078
1079 aprint_normal("\n");
1080 aprint_naive("\n");
1081
1082 aprint_normal_dev(self, "Ethernet address %s\n",
1083 ether_sprintf(sc->sc_enaddr));
1084
1085 rv = ixl_pxe_clear(sc);
1086 if (rv != 0) {
1087 aprint_debug_dev(self, "CLEAR PXE MODE %s\n",
1088 rv == ETIMEDOUT ? "timeout" : "error");
1089 }
1090
1091 ixl_set_filter_control(sc);
1092
1093 if (ixl_hmc(sc) != 0) {
1094 /* error printed by ixl_hmc */
1095 goto shutdown;
1096 }
1097
1098 if (ixl_lldp_shut(sc) != 0) {
1099 /* error printed by ixl_lldp_shut */
1100 goto free_hmc;
1101 }
1102
1103 if (ixl_phy_mask_ints(sc) != 0) {
1104 /* error printed by ixl_phy_mask_ints */
1105 goto free_hmc;
1106 }
1107
1108 if (ixl_restart_an(sc) != 0) {
1109 /* error printed by ixl_restart_an */
1110 goto free_hmc;
1111 }
1112
1113 if (ixl_get_switch_config(sc) != 0) {
1114 /* error printed by ixl_get_switch_config */
1115 goto free_hmc;
1116 }
1117
1118 if (ixl_get_phy_types(sc, &phy_types) != 0) {
1119 /* error printed by ixl_get_phy_abilities */
1120 goto free_hmc;
1121 }
1122
1123 rv = ixl_get_link_status_poll(sc);
1124 if (rv != 0) {
1125 aprint_error_dev(self, "GET LINK STATUS %s\n",
1126 rv == ETIMEDOUT ? "timeout" : "error");
1127 goto free_hmc;
1128 }
1129
1130 if (ixl_dmamem_alloc(sc, &sc->sc_scratch,
1131 sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1132 aprint_error_dev(self, "unable to allocate scratch buffer\n");
1133 goto free_hmc;
1134 }
1135
1136 rv = ixl_get_vsi(sc);
1137 if (rv != 0) {
1138 aprint_error_dev(self, "GET VSI %s %d\n",
1139 rv == ETIMEDOUT ? "timeout" : "error", rv);
1140 goto free_scratch;
1141 }
1142
1143 rv = ixl_set_vsi(sc);
1144 if (rv != 0) {
1145 aprint_error_dev(self, "UPDATE VSI error %s %d\n",
1146 rv == ETIMEDOUT ? "timeout" : "error", rv);
1147 goto free_scratch;
1148 }
1149
1150 if (ixl_queue_pairs_alloc(sc) != 0) {
1151 /* error printed by ixl_queue_pairs_alloc */
1152 goto free_scratch;
1153 }
1154
1155 if (ixl_setup_interrupts(sc) != 0) {
1156 /* error printed by ixl_setup_interrupts */
1157 goto free_queue_pairs;
1158 }
1159
1160 if (ixl_setup_stats(sc) != 0) {
1161 aprint_error_dev(self, "failed to setup event counters\n");
1162 goto teardown_intrs;
1163 }
1164
1165 if (ixl_setup_sysctls(sc) != 0) {
1166 /* error printed by ixl_setup_sysctls */
1167 goto teardown_stats;
1168 }
1169
1170 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self));
1171 sc->sc_workq = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1172 IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1173 if (sc->sc_workq == NULL)
1174 goto teardown_sysctls;
1175
1176 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self));
1177 sc->sc_workq_txrx = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1178 IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1179 if (sc->sc_workq_txrx == NULL)
1180 goto teardown_wqs;
1181
1182 snprintf(xnamebuf, sizeof(xnamebuf), "%s_atq_cv", device_xname(self));
1183 cv_init(&sc->sc_atq_cv, xnamebuf);
1184
1185 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
1186
1187 ifp->if_softc = sc;
1188 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1189 ifp->if_extflags = IFEF_MPSAFE;
1190 ifp->if_ioctl = ixl_ioctl;
1191 ifp->if_start = ixl_start;
1192 ifp->if_transmit = ixl_transmit;
1193 ifp->if_watchdog = ixl_watchdog;
1194 ifp->if_init = ixl_init;
1195 ifp->if_stop = ixl_stop;
1196 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs);
1197 IFQ_SET_READY(&ifp->if_snd);
1198 #if 0
1199 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx |
1200 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1201 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
1202 #endif
1203 ether_set_vlan_cb(&sc->sc_ec, ixl_vlan_cb);
1204 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
1205 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
1206 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1207
1208 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities;
1209 /* Disable VLAN_HWFILTER by default */
1210 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1211
1212 sc->sc_cur_ec_capenable = sc->sc_ec.ec_capenable;
1213
1214 sc->sc_ec.ec_ifmedia = &sc->sc_media;
1215 ifmedia_init(&sc->sc_media, IFM_IMASK, ixl_media_change,
1216 ixl_media_status);
1217
1218 ixl_media_add(sc, phy_types);
1219 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1220 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1221
1222 if_attach(ifp);
1223 if_deferred_start_init(ifp, NULL);
1224 ether_ifattach(ifp, sc->sc_enaddr);
1225 ether_set_ifflags_cb(&sc->sc_ec, ixl_ifflags_cb);
1226
1227 (void)ixl_get_link_status_poll(sc);
1228 ixl_work_set(&sc->sc_link_state_task, ixl_get_link_status, sc);
1229
1230 ixl_config_other_intr(sc);
1231 ixl_enable_other_intr(sc);
1232
1233 /* remove default mac filter and replace it so we can see vlans */
1234 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 0);
1235 if (rv != ENOENT) {
1236 aprint_debug_dev(self,
1237 "unable to remove macvlan %u\n", rv);
1238 }
1239 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
1240 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1241 if (rv != ENOENT) {
1242 aprint_debug_dev(self,
1243 "unable to remove macvlan, ignore vlan %u\n", rv);
1244 }
1245
1246 if (ixl_update_macvlan(sc) != 0) {
1247 aprint_debug_dev(self,
1248 "couldn't enable vlan hardware filter\n");
1249 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1250 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
1251 }
1252
1253 sc->sc_txrx_workqueue = true;
1254 sc->sc_tx_process_limit = IXL_TX_PROCESS_LIMIT;
1255 sc->sc_rx_process_limit = IXL_RX_PROCESS_LIMIT;
1256 sc->sc_tx_intr_process_limit = IXL_TX_INTR_PROCESS_LIMIT;
1257 sc->sc_rx_intr_process_limit = IXL_RX_INTR_PROCESS_LIMIT;
1258
1259 if (pmf_device_register(self, NULL, NULL) != true)
1260 aprint_debug_dev(self, "couldn't establish power handler\n");
1261 sc->sc_attached = true;
1262 return;
1263
1264 teardown_wqs:
1265 config_finalize_register(self, ixl_workqs_teardown);
1266 teardown_sysctls:
1267 ixl_teardown_sysctls(sc);
1268 teardown_stats:
1269 ixl_teardown_stats(sc);
1270 teardown_intrs:
1271 ixl_teardown_interrupts(sc);
1272 free_queue_pairs:
1273 ixl_queue_pairs_free(sc);
1274 free_scratch:
1275 ixl_dmamem_free(sc, &sc->sc_scratch);
1276 free_hmc:
1277 ixl_hmc_free(sc);
1278 shutdown:
1279 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1280 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1281 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1282 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1283
1284 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1285 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1286 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1287
1288 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1289 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1290 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1291
1292 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1293 0, IXL_DMA_LEN(&sc->sc_arq),
1294 BUS_DMASYNC_POSTREAD);
1295 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1296 0, IXL_DMA_LEN(&sc->sc_atq),
1297 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1298
1299 ixl_arq_unfill(sc);
1300 free_arq:
1301 ixl_dmamem_free(sc, &sc->sc_arq);
1302 free_atq:
1303 ixl_dmamem_free(sc, &sc->sc_atq);
1304 unmap:
1305 mutex_destroy(&sc->sc_atq_lock);
1306 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1307 mutex_destroy(&sc->sc_cfg_lock);
1308 sc->sc_mems = 0;
1309
1310 sc->sc_attached = false;
1311 }
1312
1313 static int
1314 ixl_detach(device_t self, int flags)
1315 {
1316 struct ixl_softc *sc = device_private(self);
1317 struct ifnet *ifp = &sc->sc_ec.ec_if;
1318
1319 if (!sc->sc_attached)
1320 return 0;
1321
1322 ixl_stop(ifp, 1);
1323
1324 ixl_disable_other_intr(sc);
1325
1326 /* wait for ATQ handler */
1327 mutex_enter(&sc->sc_atq_lock);
1328 mutex_exit(&sc->sc_atq_lock);
1329
1330 ixl_work_wait(sc->sc_workq, &sc->sc_arq_task);
1331 ixl_work_wait(sc->sc_workq, &sc->sc_link_state_task);
1332
1333 if (sc->sc_workq != NULL) {
1334 ixl_workq_destroy(sc->sc_workq);
1335 sc->sc_workq = NULL;
1336 }
1337
1338 if (sc->sc_workq_txrx != NULL) {
1339 ixl_workq_destroy(sc->sc_workq_txrx);
1340 sc->sc_workq_txrx = NULL;
1341 }
1342
1343 ifmedia_delete_instance(&sc->sc_media, IFM_INST_ANY);
1344 ether_ifdetach(ifp);
1345 if_detach(ifp);
1346
1347 ixl_teardown_interrupts(sc);
1348 ixl_teardown_stats(sc);
1349
1350 ixl_queue_pairs_free(sc);
1351
1352 ixl_dmamem_free(sc, &sc->sc_scratch);
1353 ixl_hmc_free(sc);
1354
1355 /* shutdown */
1356 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1357 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1358 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1359 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1360
1361 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1362 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1363 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1364
1365 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1366 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1367 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1368
1369 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1370 0, IXL_DMA_LEN(&sc->sc_arq),
1371 BUS_DMASYNC_POSTREAD);
1372 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1373 0, IXL_DMA_LEN(&sc->sc_atq),
1374 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1375
1376 ixl_arq_unfill(sc);
1377
1378 ixl_dmamem_free(sc, &sc->sc_arq);
1379 ixl_dmamem_free(sc, &sc->sc_atq);
1380
1381 cv_destroy(&sc->sc_atq_cv);
1382 mutex_destroy(&sc->sc_atq_lock);
1383
1384 if (sc->sc_mems != 0) {
1385 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1386 sc->sc_mems = 0;
1387 }
1388
1389 mutex_destroy(&sc->sc_cfg_lock);
1390
1391 return 0;
1392 }
1393
1394 static int
1395 ixl_workqs_teardown(device_t self)
1396 {
1397 struct ixl_softc *sc = device_private(self);
1398
1399 if (sc->sc_workq != NULL) {
1400 ixl_workq_destroy(sc->sc_workq);
1401 sc->sc_workq = NULL;
1402 }
1403
1404 if (sc->sc_workq_txrx != NULL) {
1405 ixl_workq_destroy(sc->sc_workq_txrx);
1406 sc->sc_workq_txrx = NULL;
1407 }
1408
1409 return 0;
1410 }
1411
1412 static int
1413 ixl_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
1414 {
1415 struct ifnet *ifp = &ec->ec_if;
1416 struct ixl_softc *sc = ifp->if_softc;
1417 int rv;
1418
1419 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
1420 return 0;
1421 }
1422
1423 if (set) {
1424 rv = ixl_add_macvlan(sc, sc->sc_enaddr, vid,
1425 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1426 if (rv == 0) {
1427 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
1428 vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1429 }
1430 } else {
1431 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, vid,
1432 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1433 (void)ixl_remove_macvlan(sc, etherbroadcastaddr, vid,
1434 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1435 }
1436
1437 return rv;
1438 }
1439
1440 static void
1441 ixl_media_add(struct ixl_softc *sc, uint64_t phy_types)
1442 {
1443 struct ifmedia *ifm = &sc->sc_media;
1444 const struct ixl_phy_type *itype;
1445 unsigned int i;
1446
1447 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
1448 itype = &ixl_phy_type_map[i];
1449
1450 if (ISSET(phy_types, itype->phy_type)) {
1451 ifmedia_add(ifm,
1452 IFM_ETHER | IFM_FDX | itype->ifm_type, 0, NULL);
1453
1454 if (itype->ifm_type == IFM_100_TX) {
1455 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type,
1456 0, NULL);
1457 }
1458 }
1459 }
1460 }
1461
1462 static void
1463 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1464 {
1465 struct ixl_softc *sc = ifp->if_softc;
1466
1467 ifmr->ifm_status = sc->sc_media_status;
1468 ifmr->ifm_active = sc->sc_media_active;
1469
1470 mutex_enter(&sc->sc_cfg_lock);
1471 if (ifp->if_link_state == LINK_STATE_UP)
1472 SET(ifmr->ifm_status, IFM_ACTIVE);
1473 mutex_exit(&sc->sc_cfg_lock);
1474 }
1475
1476 static int
1477 ixl_media_change(struct ifnet *ifp)
1478 {
1479
1480 return 0;
1481 }
1482
1483 static void
1484 ixl_watchdog(struct ifnet *ifp)
1485 {
1486
1487 }
1488
1489 static void
1490 ixl_del_all_multiaddr(struct ixl_softc *sc)
1491 {
1492 struct ethercom *ec = &sc->sc_ec;
1493 struct ether_multi *enm;
1494 struct ether_multistep step;
1495
1496 ETHER_LOCK(ec);
1497 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1498 ETHER_NEXT_MULTI(step, enm)) {
1499 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1500 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1501 }
1502 ETHER_UNLOCK(ec);
1503 }
1504
1505 static int
1506 ixl_add_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1507 {
1508 struct ifnet *ifp = &sc->sc_ec.ec_if;
1509 int rv;
1510
1511 if (ISSET(ifp->if_flags, IFF_ALLMULTI))
1512 return 0;
1513
1514 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) {
1515 ixl_del_all_multiaddr(sc);
1516 SET(ifp->if_flags, IFF_ALLMULTI);
1517 return ENETRESET;
1518 }
1519
1520 /* multicast address can not use VLAN HWFILTER */
1521 rv = ixl_add_macvlan(sc, addrlo, 0,
1522 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1523
1524 if (rv == ENOSPC) {
1525 ixl_del_all_multiaddr(sc);
1526 SET(ifp->if_flags, IFF_ALLMULTI);
1527 return ENETRESET;
1528 }
1529
1530 return rv;
1531 }
1532
1533 static int
1534 ixl_del_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1535 {
1536 struct ifnet *ifp = &sc->sc_ec.ec_if;
1537 struct ethercom *ec = &sc->sc_ec;
1538 struct ether_multi *enm, *enm_last;
1539 struct ether_multistep step;
1540 int error, rv = 0;
1541
1542 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1543 ixl_remove_macvlan(sc, addrlo, 0,
1544 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1545 return 0;
1546 }
1547
1548 ETHER_LOCK(ec);
1549 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1550 ETHER_NEXT_MULTI(step, enm)) {
1551 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1552 ETHER_ADDR_LEN) != 0) {
1553 goto out;
1554 }
1555 }
1556
1557 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1558 ETHER_NEXT_MULTI(step, enm)) {
1559 error = ixl_add_macvlan(sc, enm->enm_addrlo, 0,
1560 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1561 if (error != 0)
1562 break;
1563 }
1564
1565 if (enm != NULL) {
1566 enm_last = enm;
1567 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1568 ETHER_NEXT_MULTI(step, enm)) {
1569 if (enm == enm_last)
1570 break;
1571
1572 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1573 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1574 }
1575 } else {
1576 CLR(ifp->if_flags, IFF_ALLMULTI);
1577 rv = ENETRESET;
1578 }
1579
1580 out:
1581 ETHER_UNLOCK(ec);
1582 return rv;
1583 }
1584
1585 static int
1586 ixl_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1587 {
1588 struct ifreq *ifr = (struct ifreq *)data;
1589 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
1590 struct ixl_tx_ring *txr;
1591 struct ixl_rx_ring *rxr;
1592 const struct sockaddr *sa;
1593 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1594 int s, error = 0;
1595 unsigned int i;
1596
1597 switch (cmd) {
1598 case SIOCADDMULTI:
1599 sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1600 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) {
1601 error = ether_multiaddr(sa, addrlo, addrhi);
1602 if (error != 0)
1603 return error;
1604
1605 error = ixl_add_multi(sc, addrlo, addrhi);
1606 if (error != 0 && error != ENETRESET) {
1607 ether_delmulti(sa, &sc->sc_ec);
1608 error = EIO;
1609 }
1610 }
1611 break;
1612
1613 case SIOCDELMULTI:
1614 sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1615 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) {
1616 error = ether_multiaddr(sa, addrlo, addrhi);
1617 if (error != 0)
1618 return error;
1619
1620 error = ixl_del_multi(sc, addrlo, addrhi);
1621 }
1622 break;
1623
1624 case SIOCGIFDATA:
1625 case SIOCZIFDATA:
1626 ifp->if_ipackets = 0;
1627 ifp->if_ibytes = 0;
1628 ifp->if_iqdrops = 0;
1629 ifp->if_ierrors = 0;
1630 ifp->if_opackets = 0;
1631 ifp->if_obytes = 0;
1632 ifp->if_omcasts = 0;
1633
1634 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
1635 txr = sc->sc_qps[i].qp_txr;
1636 rxr = sc->sc_qps[i].qp_rxr;
1637
1638 mutex_enter(&rxr->rxr_lock);
1639 ifp->if_ipackets += rxr->rxr_ipackets;
1640 ifp->if_ibytes += rxr->rxr_ibytes;
1641 ifp->if_iqdrops += rxr->rxr_iqdrops;
1642 ifp->if_ierrors += rxr->rxr_ierrors;
1643 if (cmd == SIOCZIFDATA) {
1644 rxr->rxr_ipackets = 0;
1645 rxr->rxr_ibytes = 0;
1646 rxr->rxr_iqdrops = 0;
1647 rxr->rxr_ierrors = 0;
1648 }
1649 mutex_exit(&rxr->rxr_lock);
1650
1651 mutex_enter(&txr->txr_lock);
1652 ifp->if_opackets += txr->txr_opackets;
1653 ifp->if_obytes += txr->txr_opackets;
1654 ifp->if_omcasts += txr->txr_omcasts;
1655 if (cmd == SIOCZIFDATA) {
1656 txr->txr_opackets = 0;
1657 txr->txr_opackets = 0;
1658 txr->txr_omcasts = 0;
1659 }
1660 mutex_exit(&txr->txr_lock);
1661 }
1662 /* FALLTHROUGH */
1663 default:
1664 s = splnet();
1665 error = ether_ioctl(ifp, cmd, data);
1666 splx(s);
1667 }
1668
1669 if (error == ENETRESET)
1670 error = ixl_iff(sc);
1671
1672 return error;
1673 }
1674
1675 static enum i40e_mac_type
1676 ixl_mactype(pci_product_id_t id)
1677 {
1678
1679 switch (id) {
1680 case PCI_PRODUCT_INTEL_XL710_SFP:
1681 case PCI_PRODUCT_INTEL_XL710_KX_B:
1682 case PCI_PRODUCT_INTEL_XL710_KX_C:
1683 case PCI_PRODUCT_INTEL_XL710_QSFP_A:
1684 case PCI_PRODUCT_INTEL_XL710_QSFP_B:
1685 case PCI_PRODUCT_INTEL_XL710_QSFP_C:
1686 case PCI_PRODUCT_INTEL_X710_10G_T:
1687 case PCI_PRODUCT_INTEL_XL710_20G_BP_1:
1688 case PCI_PRODUCT_INTEL_XL710_20G_BP_2:
1689 case PCI_PRODUCT_INTEL_X710_T4_10G:
1690 case PCI_PRODUCT_INTEL_XXV710_25G_BP:
1691 case PCI_PRODUCT_INTEL_XXV710_25G_SFP28:
1692 return I40E_MAC_XL710;
1693
1694 case PCI_PRODUCT_INTEL_X722_KX:
1695 case PCI_PRODUCT_INTEL_X722_QSFP:
1696 case PCI_PRODUCT_INTEL_X722_SFP:
1697 case PCI_PRODUCT_INTEL_X722_1G_BASET:
1698 case PCI_PRODUCT_INTEL_X722_10G_BASET:
1699 case PCI_PRODUCT_INTEL_X722_I_SFP:
1700 return I40E_MAC_X722;
1701 }
1702
1703 return I40E_MAC_GENERIC;
1704 }
1705
1706 static inline void *
1707 ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i)
1708 {
1709 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
1710 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1711
1712 if (i >= e->hmc_count)
1713 return NULL;
1714
1715 kva += e->hmc_base;
1716 kva += i * e->hmc_size;
1717
1718 return kva;
1719 }
1720
1721 static inline size_t
1722 ixl_hmc_len(struct ixl_softc *sc, enum ixl_hmc_types type)
1723 {
1724 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1725
1726 return e->hmc_size;
1727 }
1728
1729 static void
1730 ixl_enable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1731 {
1732 struct ixl_rx_ring *rxr = qp->qp_rxr;
1733
1734 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1735 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1736 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1737 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1738 ixl_flush(sc);
1739 }
1740
1741 static void
1742 ixl_disable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1743 {
1744 struct ixl_rx_ring *rxr = qp->qp_rxr;
1745
1746 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1747 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1748 ixl_flush(sc);
1749 }
1750
1751 static void
1752 ixl_enable_other_intr(struct ixl_softc *sc)
1753 {
1754
1755 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1756 I40E_PFINT_DYN_CTL0_INTENA_MASK |
1757 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1758 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1759 ixl_flush(sc);
1760 }
1761
1762 static void
1763 ixl_disable_other_intr(struct ixl_softc *sc)
1764 {
1765
1766 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1767 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1768 ixl_flush(sc);
1769 }
1770
1771 static int
1772 ixl_reinit(struct ixl_softc *sc)
1773 {
1774 struct ixl_rx_ring *rxr;
1775 struct ixl_tx_ring *txr;
1776 unsigned int i;
1777 uint32_t reg;
1778
1779 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1780
1781 if (ixl_get_vsi(sc) != 0)
1782 return EIO;
1783
1784 if (ixl_set_vsi(sc) != 0)
1785 return EIO;
1786
1787 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1788 txr = sc->sc_qps[i].qp_txr;
1789 rxr = sc->sc_qps[i].qp_rxr;
1790
1791 txr->txr_cons = txr->txr_prod = 0;
1792 rxr->rxr_cons = rxr->rxr_prod = 0;
1793
1794 ixl_txr_config(sc, txr);
1795 ixl_rxr_config(sc, rxr);
1796 }
1797
1798 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
1799 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_PREWRITE);
1800
1801 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1802 txr = sc->sc_qps[i].qp_txr;
1803 rxr = sc->sc_qps[i].qp_rxr;
1804
1805 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
1806 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
1807 ixl_flush(sc);
1808
1809 ixl_wr(sc, txr->txr_tail, txr->txr_prod);
1810 ixl_wr(sc, rxr->rxr_tail, rxr->rxr_prod);
1811
1812 /* ixl_rxfill() needs lock held */
1813 mutex_enter(&rxr->rxr_lock);
1814 ixl_rxfill(sc, rxr);
1815 mutex_exit(&rxr->rxr_lock);
1816
1817 reg = ixl_rd(sc, I40E_QRX_ENA(i));
1818 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
1819 ixl_wr(sc, I40E_QRX_ENA(i), reg);
1820 if (ixl_rxr_enabled(sc, rxr) != 0)
1821 goto stop;
1822
1823 ixl_txr_qdis(sc, txr, 1);
1824
1825 reg = ixl_rd(sc, I40E_QTX_ENA(i));
1826 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
1827 ixl_wr(sc, I40E_QTX_ENA(i), reg);
1828
1829 if (ixl_txr_enabled(sc, txr) != 0)
1830 goto stop;
1831 }
1832
1833 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
1834 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
1835
1836 return 0;
1837
1838 stop:
1839 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
1840 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
1841
1842 return ETIMEDOUT;
1843 }
1844
1845 static int
1846 ixl_init_locked(struct ixl_softc *sc)
1847 {
1848 struct ifnet *ifp = &sc->sc_ec.ec_if;
1849 unsigned int i;
1850 int error, eccap_change;
1851
1852 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1853
1854 if (ISSET(ifp->if_flags, IFF_RUNNING))
1855 ixl_stop_locked(sc);
1856
1857 if (sc->sc_dead) {
1858 return ENXIO;
1859 }
1860
1861 eccap_change = sc->sc_ec.ec_capenable ^ sc->sc_cur_ec_capenable;
1862 if (ISSET(eccap_change, ETHERCAP_VLAN_HWTAGGING))
1863 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
1864
1865 if (ISSET(eccap_change, ETHERCAP_VLAN_HWFILTER)) {
1866 if (ixl_update_macvlan(sc) == 0) {
1867 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
1868 } else {
1869 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1870 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
1871 }
1872 }
1873
1874 if (sc->sc_intrtype != PCI_INTR_TYPE_MSIX)
1875 sc->sc_nqueue_pairs = 1;
1876 else
1877 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
1878
1879 error = ixl_reinit(sc);
1880 if (error) {
1881 ixl_stop_locked(sc);
1882 return error;
1883 }
1884
1885 SET(ifp->if_flags, IFF_RUNNING);
1886 CLR(ifp->if_flags, IFF_OACTIVE);
1887
1888 (void)ixl_get_link_status(sc);
1889
1890 ixl_config_rss(sc);
1891 ixl_config_queue_intr(sc);
1892
1893 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1894 ixl_enable_queue_intr(sc, &sc->sc_qps[i]);
1895 }
1896
1897 error = ixl_iff(sc);
1898 if (error) {
1899 ixl_stop_locked(sc);
1900 return error;
1901 }
1902
1903 return 0;
1904 }
1905
1906 static int
1907 ixl_init(struct ifnet *ifp)
1908 {
1909 struct ixl_softc *sc = ifp->if_softc;
1910 int error;
1911
1912 mutex_enter(&sc->sc_cfg_lock);
1913 error = ixl_init_locked(sc);
1914 mutex_exit(&sc->sc_cfg_lock);
1915
1916 return error;
1917 }
1918
1919 static int
1920 ixl_iff(struct ixl_softc *sc)
1921 {
1922 struct ifnet *ifp = &sc->sc_ec.ec_if;
1923 struct ixl_atq iatq;
1924 struct ixl_aq_desc *iaq;
1925 struct ixl_aq_vsi_promisc_param *param;
1926 uint16_t flag_add, flag_del;
1927 int error;
1928
1929 if (!ISSET(ifp->if_flags, IFF_RUNNING))
1930 return 0;
1931
1932 memset(&iatq, 0, sizeof(iatq));
1933
1934 iaq = &iatq.iatq_desc;
1935 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
1936
1937 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
1938 param->flags = htole16(0);
1939
1940 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)
1941 || ISSET(ifp->if_flags, IFF_PROMISC)) {
1942 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |
1943 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
1944 }
1945
1946 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
1947 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
1948 IXL_AQ_VSI_PROMISC_FLAG_MCAST);
1949 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1950 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST);
1951 }
1952 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
1953 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |
1954 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
1955 param->seid = sc->sc_seid;
1956
1957 error = ixl_atq_exec(sc, &iatq);
1958 if (error)
1959 return error;
1960
1961 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
1962 return EIO;
1963
1964 if (memcmp(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN) != 0) {
1965 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
1966 flag_add = IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH;
1967 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH;
1968 } else {
1969 flag_add = IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN;
1970 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN;
1971 }
1972
1973 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, flag_del);
1974
1975 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
1976 ixl_add_macvlan(sc, sc->sc_enaddr, 0, flag_add);
1977 }
1978 return 0;
1979 }
1980
1981 static void
1982 ixl_stop_rendezvous(struct ixl_softc *sc)
1983 {
1984 struct ixl_tx_ring *txr;
1985 struct ixl_rx_ring *rxr;
1986 unsigned int i;
1987
1988 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1989 txr = sc->sc_qps[i].qp_txr;
1990 rxr = sc->sc_qps[i].qp_rxr;
1991
1992 mutex_enter(&txr->txr_lock);
1993 mutex_exit(&txr->txr_lock);
1994
1995 mutex_enter(&rxr->rxr_lock);
1996 mutex_exit(&rxr->rxr_lock);
1997
1998 ixl_work_wait(sc->sc_workq_txrx,
1999 &sc->sc_qps[i].qp_task);
2000 }
2001 }
2002
2003 static void
2004 ixl_stop_locked(struct ixl_softc *sc)
2005 {
2006 struct ifnet *ifp = &sc->sc_ec.ec_if;
2007 struct ixl_rx_ring *rxr;
2008 struct ixl_tx_ring *txr;
2009 unsigned int i;
2010 uint32_t reg;
2011
2012 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2013
2014 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
2015
2016 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2017 txr = sc->sc_qps[i].qp_txr;
2018 rxr = sc->sc_qps[i].qp_rxr;
2019
2020 ixl_disable_queue_intr(sc, &sc->sc_qps[i]);
2021
2022 mutex_enter(&txr->txr_lock);
2023 ixl_txr_qdis(sc, txr, 0);
2024 /* XXX wait at least 400 usec for all tx queues in one go */
2025 ixl_flush(sc);
2026 DELAY(500);
2027
2028 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2029 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2030 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2031 /* XXX wait 50ms from completaion of the TX queue disable*/
2032 ixl_flush(sc);
2033 DELAY(50);
2034
2035 if (ixl_txr_disabled(sc, txr) != 0) {
2036 mutex_exit(&txr->txr_lock);
2037 goto die;
2038 }
2039 mutex_exit(&txr->txr_lock);
2040
2041 mutex_enter(&rxr->rxr_lock);
2042 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2043 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2044 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2045 /* XXX wait 50ms from completion of the RX queue disable */
2046 ixl_flush(sc);
2047 DELAY(50);
2048
2049 if (ixl_rxr_disabled(sc, rxr) != 0) {
2050 mutex_exit(&rxr->rxr_lock);
2051 goto die;
2052 }
2053 mutex_exit(&rxr->rxr_lock);
2054 }
2055
2056 ixl_stop_rendezvous(sc);
2057
2058 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2059 txr = sc->sc_qps[i].qp_txr;
2060 rxr = sc->sc_qps[i].qp_rxr;
2061
2062 ixl_txr_unconfig(sc, txr);
2063 ixl_rxr_unconfig(sc, rxr);
2064
2065 ixl_txr_clean(sc, txr);
2066 ixl_rxr_clean(sc, rxr);
2067 }
2068
2069 return;
2070 die:
2071 sc->sc_dead = true;
2072 log(LOG_CRIT, "%s: failed to shut down rings",
2073 device_xname(sc->sc_dev));
2074 return;
2075 }
2076
2077 static void
2078 ixl_stop(struct ifnet *ifp, int disable)
2079 {
2080 struct ixl_softc *sc = ifp->if_softc;
2081
2082 mutex_enter(&sc->sc_cfg_lock);
2083 ixl_stop_locked(sc);
2084 mutex_exit(&sc->sc_cfg_lock);
2085 }
2086
2087 static int
2088 ixl_queue_pairs_alloc(struct ixl_softc *sc)
2089 {
2090 struct ixl_queue_pair *qp;
2091 unsigned int i;
2092 size_t sz;
2093
2094 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2095 sc->sc_qps = kmem_zalloc(sz, KM_SLEEP);
2096
2097 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2098 qp = &sc->sc_qps[i];
2099
2100 qp->qp_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2101 ixl_handle_queue, qp);
2102 if (qp->qp_si == NULL)
2103 goto free;
2104
2105 qp->qp_txr = ixl_txr_alloc(sc, i);
2106 if (qp->qp_txr == NULL)
2107 goto free;
2108
2109 qp->qp_rxr = ixl_rxr_alloc(sc, i);
2110 if (qp->qp_rxr == NULL)
2111 goto free;
2112
2113 qp->qp_sc = sc;
2114 ixl_work_set(&qp->qp_task, ixl_handle_queue, qp);
2115 snprintf(qp->qp_name, sizeof(qp->qp_name),
2116 "%s-TXRX%d", device_xname(sc->sc_dev), i);
2117 }
2118
2119 return 0;
2120 free:
2121 if (sc->sc_qps != NULL) {
2122 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2123 qp = &sc->sc_qps[i];
2124
2125 if (qp->qp_txr != NULL)
2126 ixl_txr_free(sc, qp->qp_txr);
2127 if (qp->qp_rxr != NULL)
2128 ixl_rxr_free(sc, qp->qp_rxr);
2129 if (qp->qp_si != NULL)
2130 softint_disestablish(qp->qp_si);
2131 }
2132
2133 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2134 kmem_free(sc->sc_qps, sz);
2135 sc->sc_qps = NULL;
2136 }
2137
2138 return -1;
2139 }
2140
2141 static void
2142 ixl_queue_pairs_free(struct ixl_softc *sc)
2143 {
2144 struct ixl_queue_pair *qp;
2145 unsigned int i;
2146 size_t sz;
2147
2148 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2149 qp = &sc->sc_qps[i];
2150 ixl_txr_free(sc, qp->qp_txr);
2151 ixl_rxr_free(sc, qp->qp_rxr);
2152 softint_disestablish(qp->qp_si);
2153 }
2154
2155 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2156 kmem_free(sc->sc_qps, sz);
2157 sc->sc_qps = NULL;
2158 }
2159
2160 static struct ixl_tx_ring *
2161 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
2162 {
2163 struct ixl_tx_ring *txr = NULL;
2164 struct ixl_tx_map *maps = NULL, *txm;
2165 unsigned int i;
2166
2167 txr = kmem_zalloc(sizeof(*txr), KM_SLEEP);
2168 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs,
2169 KM_SLEEP);
2170
2171 if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2172 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2173 IXL_TX_QUEUE_ALIGN) != 0)
2174 goto free;
2175
2176 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2177 txm = &maps[i];
2178
2179 if (bus_dmamap_create(sc->sc_dmat,
2180 IXL_HARDMTU, IXL_TX_PKT_DESCS, IXL_HARDMTU, 0,
2181 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &txm->txm_map) != 0)
2182 goto uncreate;
2183
2184 txm->txm_eop = -1;
2185 txm->txm_m = NULL;
2186 }
2187
2188 txr->txr_cons = txr->txr_prod = 0;
2189 txr->txr_maps = maps;
2190
2191 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2192 if (txr->txr_intrq == NULL)
2193 goto uncreate;
2194
2195 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2196 ixl_deferred_transmit, txr);
2197 if (txr->txr_si == NULL)
2198 goto destroy_pcq;
2199
2200 txr->txr_tail = I40E_QTX_TAIL(qid);
2201 txr->txr_qid = qid;
2202 txr->txr_sc = sc;
2203 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2204
2205 return txr;
2206
2207 destroy_pcq:
2208 pcq_destroy(txr->txr_intrq);
2209 uncreate:
2210 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2211 txm = &maps[i];
2212
2213 if (txm->txm_map == NULL)
2214 continue;
2215
2216 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2217 }
2218
2219 ixl_dmamem_free(sc, &txr->txr_mem);
2220 free:
2221 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2222 kmem_free(txr, sizeof(*txr));
2223
2224 return NULL;
2225 }
2226
2227 static void
2228 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2229 {
2230 unsigned int qid;
2231 bus_size_t reg;
2232 uint32_t r;
2233
2234 qid = txr->txr_qid + sc->sc_base_queue;
2235 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2236 qid %= 128;
2237
2238 r = ixl_rd(sc, reg);
2239 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2240 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2241 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2242 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2243 ixl_wr(sc, reg, r);
2244 }
2245
2246 static void
2247 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2248 {
2249 struct ixl_hmc_txq txq;
2250 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch);
2251 void *hmc;
2252
2253 memset(&txq, 0, sizeof(txq));
2254 txq.head = htole16(txr->txr_cons);
2255 txq.new_context = 1;
2256 txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2257 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2258 txq.qlen = htole16(sc->sc_tx_ring_ndescs);
2259 txq.tphrdesc_ena = 0;
2260 txq.tphrpacket_ena = 0;
2261 txq.tphwdesc_ena = 0;
2262 txq.rdylist = data->qs_handle[0];
2263
2264 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2265 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2266 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq,
2267 __arraycount(ixl_hmc_pack_txq));
2268 }
2269
2270 static void
2271 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2272 {
2273 void *hmc;
2274
2275 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2276 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2277 }
2278
2279 static void
2280 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2281 {
2282 struct ixl_tx_map *maps, *txm;
2283 bus_dmamap_t map;
2284 unsigned int i;
2285
2286 maps = txr->txr_maps;
2287 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2288 txm = &maps[i];
2289
2290 if (txm->txm_m == NULL)
2291 continue;
2292
2293 map = txm->txm_map;
2294 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2295 BUS_DMASYNC_POSTWRITE);
2296 bus_dmamap_unload(sc->sc_dmat, map);
2297
2298 m_freem(txm->txm_m);
2299 txm->txm_m = NULL;
2300 }
2301 }
2302
2303 static int
2304 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2305 {
2306 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2307 uint32_t reg;
2308 int i;
2309
2310 for (i = 0; i < 10; i++) {
2311 reg = ixl_rd(sc, ena);
2312 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2313 return 0;
2314
2315 delaymsec(10);
2316 }
2317
2318 return ETIMEDOUT;
2319 }
2320
2321 static int
2322 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2323 {
2324 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2325 uint32_t reg;
2326 int i;
2327
2328 KASSERT(mutex_owned(&txr->txr_lock));
2329
2330 for (i = 0; i < 20; i++) {
2331 reg = ixl_rd(sc, ena);
2332 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2333 return 0;
2334
2335 delaymsec(10);
2336 }
2337
2338 return ETIMEDOUT;
2339 }
2340
2341 static void
2342 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2343 {
2344 struct ixl_tx_map *maps, *txm;
2345 struct mbuf *m;
2346 unsigned int i;
2347
2348 softint_disestablish(txr->txr_si);
2349 while ((m = pcq_get(txr->txr_intrq)) != NULL)
2350 m_freem(m);
2351 pcq_destroy(txr->txr_intrq);
2352
2353 maps = txr->txr_maps;
2354 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2355 txm = &maps[i];
2356
2357 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2358 }
2359
2360 ixl_dmamem_free(sc, &txr->txr_mem);
2361 mutex_destroy(&txr->txr_lock);
2362 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2363 kmem_free(txr, sizeof(*txr));
2364 }
2365
2366 static inline int
2367 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0,
2368 struct ixl_tx_ring *txr)
2369 {
2370 struct mbuf *m;
2371 int error;
2372
2373 KASSERT(mutex_owned(&txr->txr_lock));
2374
2375 m = *m0;
2376
2377 error = bus_dmamap_load_mbuf(dmat, map, m,
2378 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2379 if (error != EFBIG)
2380 return error;
2381
2382 m = m_defrag(m, M_DONTWAIT);
2383 if (m != NULL) {
2384 *m0 = m;
2385 txr->txr_defragged.ev_count++;
2386
2387 error = bus_dmamap_load_mbuf(dmat, map, m,
2388 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2389 } else {
2390 txr->txr_defrag_failed.ev_count++;
2391 error = ENOBUFS;
2392 }
2393
2394 return error;
2395 }
2396
2397 static void
2398 ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr,
2399 bool is_transmit)
2400 {
2401 struct ixl_softc *sc = ifp->if_softc;
2402 struct ixl_tx_desc *ring, *txd;
2403 struct ixl_tx_map *txm;
2404 bus_dmamap_t map;
2405 struct mbuf *m;
2406 uint64_t cmd, cmd_vlan;
2407 unsigned int prod, free, last, i;
2408 unsigned int mask;
2409 int post = 0;
2410
2411 KASSERT(mutex_owned(&txr->txr_lock));
2412
2413 if (ifp->if_link_state != LINK_STATE_UP
2414 || !ISSET(ifp->if_flags, IFF_RUNNING)
2415 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) {
2416 if (!is_transmit)
2417 IFQ_PURGE(&ifp->if_snd);
2418 return;
2419 }
2420
2421 prod = txr->txr_prod;
2422 free = txr->txr_cons;
2423 if (free <= prod)
2424 free += sc->sc_tx_ring_ndescs;
2425 free -= prod;
2426
2427 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2428 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2429
2430 ring = IXL_DMA_KVA(&txr->txr_mem);
2431 mask = sc->sc_tx_ring_ndescs - 1;
2432 last = prod;
2433 cmd = 0;
2434 txd = NULL;
2435
2436 for (;;) {
2437 if (free <= IXL_TX_PKT_DESCS) {
2438 if (!is_transmit)
2439 SET(ifp->if_flags, IFF_OACTIVE);
2440 break;
2441 }
2442
2443 if (is_transmit)
2444 m = pcq_get(txr->txr_intrq);
2445 else
2446 IFQ_DEQUEUE(&ifp->if_snd, m);
2447
2448 if (m == NULL)
2449 break;
2450
2451 txm = &txr->txr_maps[prod];
2452 map = txm->txm_map;
2453
2454 if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
2455 txr->txr_oerrors++;
2456 m_freem(m);
2457 continue;
2458 }
2459
2460 if (vlan_has_tag(m)) {
2461 cmd_vlan = (uint64_t)vlan_get_tag(m) <<
2462 IXL_TX_DESC_L2TAG1_SHIFT;
2463 cmd_vlan |= IXL_TX_DESC_CMD_IL2TAG1;
2464 } else {
2465 cmd_vlan = 0;
2466 }
2467
2468 bus_dmamap_sync(sc->sc_dmat, map, 0,
2469 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2470
2471 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) {
2472 txd = &ring[prod];
2473
2474 cmd = (uint64_t)map->dm_segs[i].ds_len <<
2475 IXL_TX_DESC_BSIZE_SHIFT;
2476 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2477 cmd |= cmd_vlan;
2478
2479 txd->addr = htole64(map->dm_segs[i].ds_addr);
2480 txd->cmd = htole64(cmd);
2481
2482 last = prod;
2483
2484 prod++;
2485 prod &= mask;
2486 }
2487 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2488 txd->cmd = htole64(cmd);
2489
2490 txm->txm_m = m;
2491 txm->txm_eop = last;
2492
2493 bpf_mtap(ifp, m, BPF_D_OUT);
2494
2495 free -= i;
2496 post = 1;
2497 }
2498
2499 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2500 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2501
2502 if (post) {
2503 txr->txr_prod = prod;
2504 ixl_wr(sc, txr->txr_tail, prod);
2505 }
2506 }
2507
2508 static int
2509 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit)
2510 {
2511 struct ifnet *ifp = &sc->sc_ec.ec_if;
2512 struct ixl_tx_desc *ring, *txd;
2513 struct ixl_tx_map *txm;
2514 struct mbuf *m;
2515 bus_dmamap_t map;
2516 unsigned int cons, prod, last;
2517 unsigned int mask;
2518 uint64_t dtype;
2519 int done = 0, more = 0;
2520
2521 KASSERT(mutex_owned(&txr->txr_lock));
2522
2523 prod = txr->txr_prod;
2524 cons = txr->txr_cons;
2525
2526 if (cons == prod)
2527 return 0;
2528
2529 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2530 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2531
2532 ring = IXL_DMA_KVA(&txr->txr_mem);
2533 mask = sc->sc_tx_ring_ndescs - 1;
2534
2535 do {
2536 if (txlimit-- <= 0) {
2537 more = 1;
2538 break;
2539 }
2540
2541 txm = &txr->txr_maps[cons];
2542 last = txm->txm_eop;
2543 txd = &ring[last];
2544
2545 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2546 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2547 break;
2548
2549 map = txm->txm_map;
2550
2551 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2552 BUS_DMASYNC_POSTWRITE);
2553 bus_dmamap_unload(sc->sc_dmat, map);
2554
2555 m = txm->txm_m;
2556 if (m != NULL) {
2557 txr->txr_opackets++;
2558 txr->txr_obytes += m->m_pkthdr.len;
2559 if (ISSET(m->m_flags, M_MCAST))
2560 txr->txr_omcasts++;
2561 m_freem(m);
2562 }
2563
2564 txm->txm_m = NULL;
2565 txm->txm_eop = -1;
2566
2567 cons = last + 1;
2568 cons &= mask;
2569 done = 1;
2570 } while (cons != prod);
2571
2572 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2573 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2574
2575 txr->txr_cons = cons;
2576
2577 if (done) {
2578 softint_schedule(txr->txr_si);
2579 if (txr->txr_qid == 0) {
2580 CLR(ifp->if_flags, IFF_OACTIVE);
2581 if_schedule_deferred_start(ifp);
2582 }
2583 }
2584
2585 return more;
2586 }
2587
2588 static void
2589 ixl_start(struct ifnet *ifp)
2590 {
2591 struct ixl_softc *sc;
2592 struct ixl_tx_ring *txr;
2593
2594 sc = ifp->if_softc;
2595 txr = sc->sc_qps[0].qp_txr;
2596
2597 mutex_enter(&txr->txr_lock);
2598 ixl_tx_common_locked(ifp, txr, false);
2599 mutex_exit(&txr->txr_lock);
2600 }
2601
2602 static inline unsigned int
2603 ixl_select_txqueue(struct ixl_softc *sc, struct mbuf *m)
2604 {
2605 u_int cpuid;
2606
2607 cpuid = cpu_index(curcpu());
2608
2609 return (unsigned int)(cpuid % sc->sc_nqueue_pairs);
2610 }
2611
2612 static int
2613 ixl_transmit(struct ifnet *ifp, struct mbuf *m)
2614 {
2615 struct ixl_softc *sc;
2616 struct ixl_tx_ring *txr;
2617 unsigned int qid;
2618
2619 sc = ifp->if_softc;
2620 qid = ixl_select_txqueue(sc, m);
2621
2622 txr = sc->sc_qps[qid].qp_txr;
2623
2624 if (__predict_false(!pcq_put(txr->txr_intrq, m))) {
2625 mutex_enter(&txr->txr_lock);
2626 txr->txr_pcqdrop.ev_count++;
2627 mutex_exit(&txr->txr_lock);
2628
2629 m_freem(m);
2630 return ENOBUFS;
2631 }
2632
2633 if (mutex_tryenter(&txr->txr_lock)) {
2634 ixl_tx_common_locked(ifp, txr, true);
2635 mutex_exit(&txr->txr_lock);
2636 } else {
2637 softint_schedule(txr->txr_si);
2638 }
2639
2640 return 0;
2641 }
2642
2643 static void
2644 ixl_deferred_transmit(void *xtxr)
2645 {
2646 struct ixl_tx_ring *txr = xtxr;
2647 struct ixl_softc *sc = txr->txr_sc;
2648 struct ifnet *ifp = &sc->sc_ec.ec_if;
2649
2650 mutex_enter(&txr->txr_lock);
2651 txr->txr_transmitdef.ev_count++;
2652 if (pcq_peek(txr->txr_intrq) != NULL)
2653 ixl_tx_common_locked(ifp, txr, true);
2654 mutex_exit(&txr->txr_lock);
2655 }
2656
2657 static struct ixl_rx_ring *
2658 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
2659 {
2660 struct ixl_rx_ring *rxr = NULL;
2661 struct ixl_rx_map *maps = NULL, *rxm;
2662 unsigned int i;
2663
2664 rxr = kmem_zalloc(sizeof(*rxr), KM_SLEEP);
2665 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs,
2666 KM_SLEEP);
2667
2668 if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
2669 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
2670 IXL_RX_QUEUE_ALIGN) != 0)
2671 goto free;
2672
2673 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2674 rxm = &maps[i];
2675
2676 if (bus_dmamap_create(sc->sc_dmat,
2677 IXL_HARDMTU, 1, IXL_HARDMTU, 0,
2678 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &rxm->rxm_map) != 0)
2679 goto uncreate;
2680
2681 rxm->rxm_m = NULL;
2682 }
2683
2684 rxr->rxr_cons = rxr->rxr_prod = 0;
2685 rxr->rxr_m_head = NULL;
2686 rxr->rxr_m_tail = &rxr->rxr_m_head;
2687 rxr->rxr_maps = maps;
2688
2689 rxr->rxr_tail = I40E_QRX_TAIL(qid);
2690 rxr->rxr_qid = qid;
2691 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
2692
2693 return rxr;
2694
2695 uncreate:
2696 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2697 rxm = &maps[i];
2698
2699 if (rxm->rxm_map == NULL)
2700 continue;
2701
2702 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
2703 }
2704
2705 ixl_dmamem_free(sc, &rxr->rxr_mem);
2706 free:
2707 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2708 kmem_free(rxr, sizeof(*rxr));
2709
2710 return NULL;
2711 }
2712
2713 static void
2714 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2715 {
2716 struct ixl_rx_map *maps, *rxm;
2717 bus_dmamap_t map;
2718 unsigned int i;
2719
2720 maps = rxr->rxr_maps;
2721 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2722 rxm = &maps[i];
2723
2724 if (rxm->rxm_m == NULL)
2725 continue;
2726
2727 map = rxm->rxm_map;
2728 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2729 BUS_DMASYNC_POSTWRITE);
2730 bus_dmamap_unload(sc->sc_dmat, map);
2731
2732 m_freem(rxm->rxm_m);
2733 rxm->rxm_m = NULL;
2734 }
2735
2736 m_freem(rxr->rxr_m_head);
2737 rxr->rxr_m_head = NULL;
2738 rxr->rxr_m_tail = &rxr->rxr_m_head;
2739
2740 rxr->rxr_prod = rxr->rxr_cons = 0;
2741 }
2742
2743 static int
2744 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2745 {
2746 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
2747 uint32_t reg;
2748 int i;
2749
2750 for (i = 0; i < 10; i++) {
2751 reg = ixl_rd(sc, ena);
2752 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
2753 return 0;
2754
2755 delaymsec(10);
2756 }
2757
2758 return ETIMEDOUT;
2759 }
2760
2761 static int
2762 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2763 {
2764 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
2765 uint32_t reg;
2766 int i;
2767
2768 KASSERT(mutex_owned(&rxr->rxr_lock));
2769
2770 for (i = 0; i < 20; i++) {
2771 reg = ixl_rd(sc, ena);
2772 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
2773 return 0;
2774
2775 delaymsec(10);
2776 }
2777
2778 return ETIMEDOUT;
2779 }
2780
2781 static void
2782 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2783 {
2784 struct ixl_hmc_rxq rxq;
2785 void *hmc;
2786
2787 memset(&rxq, 0, sizeof(rxq));
2788
2789 rxq.head = htole16(rxr->rxr_cons);
2790 rxq.base = htole64(IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
2791 rxq.qlen = htole16(sc->sc_rx_ring_ndescs);
2792 rxq.dbuff = htole16(MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
2793 rxq.hbuff = 0;
2794 rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
2795 rxq.dsize = IXL_HMC_RXQ_DSIZE_32;
2796 rxq.crcstrip = 1;
2797 rxq.l2sel = 1;
2798 rxq.showiv = 1;
2799 rxq.rxmax = htole16(IXL_HARDMTU);
2800 rxq.tphrdesc_ena = 0;
2801 rxq.tphwdesc_ena = 0;
2802 rxq.tphdata_ena = 0;
2803 rxq.tphhead_ena = 0;
2804 rxq.lrxqthresh = 0;
2805 rxq.prefena = 1;
2806
2807 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
2808 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
2809 ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq,
2810 __arraycount(ixl_hmc_pack_rxq));
2811 }
2812
2813 static void
2814 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2815 {
2816 void *hmc;
2817
2818 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
2819 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
2820 }
2821
2822 static void
2823 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2824 {
2825 struct ixl_rx_map *maps, *rxm;
2826 unsigned int i;
2827
2828 maps = rxr->rxr_maps;
2829 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2830 rxm = &maps[i];
2831
2832 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
2833 }
2834
2835 ixl_dmamem_free(sc, &rxr->rxr_mem);
2836 mutex_destroy(&rxr->rxr_lock);
2837 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2838 kmem_free(rxr, sizeof(*rxr));
2839 }
2840
2841 static int
2842 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr, u_int rxlimit)
2843 {
2844 struct ifnet *ifp = &sc->sc_ec.ec_if;
2845 struct ixl_rx_wb_desc_32 *ring, *rxd;
2846 struct ixl_rx_map *rxm;
2847 bus_dmamap_t map;
2848 unsigned int cons, prod;
2849 struct mbuf *m;
2850 uint64_t word, word0;
2851 unsigned int len;
2852 unsigned int mask;
2853 int done = 0, more = 0;
2854
2855 KASSERT(mutex_owned(&rxr->rxr_lock));
2856
2857 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2858 return 0;
2859
2860 prod = rxr->rxr_prod;
2861 cons = rxr->rxr_cons;
2862
2863 if (cons == prod)
2864 return 0;
2865
2866 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2867 0, IXL_DMA_LEN(&rxr->rxr_mem),
2868 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2869
2870 ring = IXL_DMA_KVA(&rxr->rxr_mem);
2871 mask = sc->sc_rx_ring_ndescs - 1;
2872
2873 do {
2874 if (rxlimit-- <= 0) {
2875 more = 1;
2876 break;
2877 }
2878
2879 rxd = &ring[cons];
2880
2881 word = le64toh(rxd->qword1);
2882
2883 if (!ISSET(word, IXL_RX_DESC_DD))
2884 break;
2885
2886 rxm = &rxr->rxr_maps[cons];
2887
2888 map = rxm->rxm_map;
2889 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2890 BUS_DMASYNC_POSTREAD);
2891 bus_dmamap_unload(sc->sc_dmat, map);
2892
2893 m = rxm->rxm_m;
2894 rxm->rxm_m = NULL;
2895
2896 KASSERT(m != NULL);
2897
2898 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
2899 m->m_len = len;
2900 m->m_pkthdr.len = 0;
2901
2902 m->m_next = NULL;
2903 *rxr->rxr_m_tail = m;
2904 rxr->rxr_m_tail = &m->m_next;
2905
2906 m = rxr->rxr_m_head;
2907 m->m_pkthdr.len += len;
2908
2909 if (ISSET(word, IXL_RX_DESC_EOP)) {
2910 word0 = le64toh(rxd->qword0);
2911
2912 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
2913 vlan_set_tag(m,
2914 __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK));
2915 }
2916
2917 if (!ISSET(word,
2918 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
2919 m_set_rcvif(m, ifp);
2920 rxr->rxr_ipackets++;
2921 rxr->rxr_ibytes += m->m_pkthdr.len;
2922 if_percpuq_enqueue(ifp->if_percpuq, m);
2923 } else {
2924 rxr->rxr_ierrors++;
2925 m_freem(m);
2926 }
2927
2928 rxr->rxr_m_head = NULL;
2929 rxr->rxr_m_tail = &rxr->rxr_m_head;
2930 }
2931
2932 cons++;
2933 cons &= mask;
2934
2935 done = 1;
2936 } while (cons != prod);
2937
2938 if (done) {
2939 rxr->rxr_cons = cons;
2940 if (ixl_rxfill(sc, rxr) == -1)
2941 rxr->rxr_iqdrops++;
2942 }
2943
2944 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2945 0, IXL_DMA_LEN(&rxr->rxr_mem),
2946 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2947
2948 return more;
2949 }
2950
2951 static int
2952 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2953 {
2954 struct ixl_rx_rd_desc_32 *ring, *rxd;
2955 struct ixl_rx_map *rxm;
2956 bus_dmamap_t map;
2957 struct mbuf *m;
2958 unsigned int prod;
2959 unsigned int slots;
2960 unsigned int mask;
2961 int post = 0, error = 0;
2962
2963 KASSERT(mutex_owned(&rxr->rxr_lock));
2964
2965 prod = rxr->rxr_prod;
2966 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons,
2967 sc->sc_rx_ring_ndescs);
2968
2969 ring = IXL_DMA_KVA(&rxr->rxr_mem);
2970 mask = sc->sc_rx_ring_ndescs - 1;
2971
2972 if (__predict_false(slots <= 0))
2973 return -1;
2974
2975 do {
2976 rxm = &rxr->rxr_maps[prod];
2977
2978 MGETHDR(m, M_DONTWAIT, MT_DATA);
2979 if (m == NULL) {
2980 rxr->rxr_mgethdr_failed.ev_count++;
2981 error = -1;
2982 break;
2983 }
2984
2985 MCLGET(m, M_DONTWAIT);
2986 if (!ISSET(m->m_flags, M_EXT)) {
2987 rxr->rxr_mgetcl_failed.ev_count++;
2988 error = -1;
2989 m_freem(m);
2990 break;
2991 }
2992
2993 m->m_len = m->m_pkthdr.len = MCLBYTES + ETHER_ALIGN;
2994 m_adj(m, ETHER_ALIGN);
2995
2996 map = rxm->rxm_map;
2997
2998 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
2999 BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
3000 rxr->rxr_mbuf_load_failed.ev_count++;
3001 error = -1;
3002 m_freem(m);
3003 break;
3004 }
3005
3006 rxm->rxm_m = m;
3007
3008 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3009 BUS_DMASYNC_PREREAD);
3010
3011 rxd = &ring[prod];
3012
3013 rxd->paddr = htole64(map->dm_segs[0].ds_addr);
3014 rxd->haddr = htole64(0);
3015
3016 prod++;
3017 prod &= mask;
3018
3019 post = 1;
3020
3021 } while (--slots);
3022
3023 if (post) {
3024 rxr->rxr_prod = prod;
3025 ixl_wr(sc, rxr->rxr_tail, prod);
3026 }
3027
3028 return error;
3029 }
3030
3031 static inline int
3032 ixl_handle_queue_common(struct ixl_softc *sc, struct ixl_queue_pair *qp,
3033 u_int txlimit, struct evcnt *txevcnt,
3034 u_int rxlimit, struct evcnt *rxevcnt)
3035 {
3036 struct ixl_tx_ring *txr = qp->qp_txr;
3037 struct ixl_rx_ring *rxr = qp->qp_rxr;
3038 int txmore, rxmore;
3039 int rv;
3040
3041 KASSERT(!mutex_owned(&txr->txr_lock));
3042 KASSERT(!mutex_owned(&rxr->rxr_lock));
3043
3044 mutex_enter(&txr->txr_lock);
3045 txevcnt->ev_count++;
3046 txmore = ixl_txeof(sc, txr, txlimit);
3047 mutex_exit(&txr->txr_lock);
3048
3049 mutex_enter(&rxr->rxr_lock);
3050 rxevcnt->ev_count++;
3051 rxmore = ixl_rxeof(sc, rxr, rxlimit);
3052 mutex_exit(&rxr->rxr_lock);
3053
3054 rv = txmore | (rxmore << 1);
3055
3056 return rv;
3057 }
3058
3059 static void
3060 ixl_sched_handle_queue(struct ixl_softc *sc, struct ixl_queue_pair *qp)
3061 {
3062
3063 if (qp->qp_workqueue)
3064 ixl_work_add(sc->sc_workq_txrx, &qp->qp_task);
3065 else
3066 softint_schedule(qp->qp_si);
3067 }
3068
3069 static int
3070 ixl_intr(void *xsc)
3071 {
3072 struct ixl_softc *sc = xsc;
3073 struct ixl_tx_ring *txr;
3074 struct ixl_rx_ring *rxr;
3075 uint32_t icr, rxintr, txintr;
3076 int rv = 0;
3077 unsigned int i;
3078
3079 KASSERT(sc != NULL);
3080
3081 ixl_enable_other_intr(sc);
3082 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3083
3084 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3085 atomic_inc_64(&sc->sc_event_atq.ev_count);
3086 ixl_atq_done(sc);
3087 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3088 rv = 1;
3089 }
3090
3091 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3092 atomic_inc_64(&sc->sc_event_link.ev_count);
3093 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3094 rv = 1;
3095 }
3096
3097 rxintr = icr & I40E_INTR_NOTX_RX_MASK;
3098 txintr = icr & I40E_INTR_NOTX_TX_MASK;
3099
3100 if (txintr || rxintr) {
3101 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3102 txr = sc->sc_qps[i].qp_txr;
3103 rxr = sc->sc_qps[i].qp_rxr;
3104
3105 ixl_handle_queue_common(sc, &sc->sc_qps[i],
3106 IXL_TXRX_PROCESS_UNLIMIT, &txr->txr_intr,
3107 IXL_TXRX_PROCESS_UNLIMIT, &rxr->rxr_intr);
3108 }
3109 rv = 1;
3110 }
3111
3112 return rv;
3113 }
3114
3115 static int
3116 ixl_queue_intr(void *xqp)
3117 {
3118 struct ixl_queue_pair *qp = xqp;
3119 struct ixl_tx_ring *txr = qp->qp_txr;
3120 struct ixl_rx_ring *rxr = qp->qp_rxr;
3121 struct ixl_softc *sc = qp->qp_sc;
3122 u_int txlimit, rxlimit;
3123 int more;
3124
3125 txlimit = sc->sc_tx_intr_process_limit;
3126 rxlimit = sc->sc_rx_intr_process_limit;
3127 qp->qp_workqueue = sc->sc_txrx_workqueue;
3128
3129 more = ixl_handle_queue_common(sc, qp,
3130 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr);
3131
3132 if (more != 0) {
3133 ixl_sched_handle_queue(sc, qp);
3134 } else {
3135 /* for ALTQ */
3136 if (txr->txr_qid == 0)
3137 if_schedule_deferred_start(&sc->sc_ec.ec_if);
3138 softint_schedule(txr->txr_si);
3139
3140 ixl_enable_queue_intr(sc, qp);
3141 }
3142
3143 return 1;
3144 }
3145
3146 static void
3147 ixl_handle_queue(void *xqp)
3148 {
3149 struct ixl_queue_pair *qp = xqp;
3150 struct ixl_softc *sc = qp->qp_sc;
3151 struct ixl_tx_ring *txr = qp->qp_txr;
3152 struct ixl_rx_ring *rxr = qp->qp_rxr;
3153 u_int txlimit, rxlimit;
3154 int more;
3155
3156 txlimit = sc->sc_tx_process_limit;
3157 rxlimit = sc->sc_rx_process_limit;
3158
3159 more = ixl_handle_queue_common(sc, qp,
3160 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer);
3161
3162 if (more != 0)
3163 ixl_sched_handle_queue(sc, qp);
3164 else
3165 ixl_enable_queue_intr(sc, qp);
3166 }
3167
3168 static inline void
3169 ixl_print_hmc_error(struct ixl_softc *sc, uint32_t reg)
3170 {
3171 uint32_t hmc_idx, hmc_isvf;
3172 uint32_t hmc_errtype, hmc_objtype, hmc_data;
3173
3174 hmc_idx = reg & I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK;
3175 hmc_idx = hmc_idx >> I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT;
3176 hmc_isvf = reg & I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK;
3177 hmc_isvf = hmc_isvf >> I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT;
3178 hmc_errtype = reg & I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK;
3179 hmc_errtype = hmc_errtype >> I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT;
3180 hmc_objtype = reg & I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK;
3181 hmc_objtype = hmc_objtype >> I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT;
3182 hmc_data = ixl_rd(sc, I40E_PFHMC_ERRORDATA);
3183
3184 device_printf(sc->sc_dev,
3185 "HMC Error (idx=0x%x, isvf=0x%x, err=0x%x, obj=0x%x, data=0x%x)\n",
3186 hmc_idx, hmc_isvf, hmc_errtype, hmc_objtype, hmc_data);
3187 }
3188
3189 static int
3190 ixl_other_intr(void *xsc)
3191 {
3192 struct ixl_softc *sc = xsc;
3193 uint32_t icr, mask, reg;
3194 int rv;
3195
3196 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3197 mask = ixl_rd(sc, I40E_PFINT_ICR0_ENA);
3198
3199 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3200 atomic_inc_64(&sc->sc_event_atq.ev_count);
3201 ixl_atq_done(sc);
3202 ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3203 rv = 1;
3204 }
3205
3206 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3207 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3208 device_printf(sc->sc_dev, "link stat changed\n");
3209
3210 atomic_inc_64(&sc->sc_event_link.ev_count);
3211 ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3212 rv = 1;
3213 }
3214
3215 if (ISSET(icr, I40E_PFINT_ICR0_GRST_MASK)) {
3216 CLR(mask, I40E_PFINT_ICR0_ENA_GRST_MASK);
3217 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
3218 reg = reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK;
3219 reg = reg >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3220
3221 device_printf(sc->sc_dev, "GRST: %s\n",
3222 reg == I40E_RESET_CORER ? "CORER" :
3223 reg == I40E_RESET_GLOBR ? "GLOBR" :
3224 reg == I40E_RESET_EMPR ? "EMPR" :
3225 "POR");
3226 }
3227
3228 if (ISSET(icr, I40E_PFINT_ICR0_ECC_ERR_MASK))
3229 atomic_inc_64(&sc->sc_event_ecc_err.ev_count);
3230 if (ISSET(icr, I40E_PFINT_ICR0_PCI_EXCEPTION_MASK))
3231 atomic_inc_64(&sc->sc_event_pci_exception.ev_count);
3232 if (ISSET(icr, I40E_PFINT_ICR0_PE_CRITERR_MASK))
3233 atomic_inc_64(&sc->sc_event_crit_err.ev_count);
3234
3235 if (ISSET(icr, IXL_ICR0_CRIT_ERR_MASK)) {
3236 CLR(mask, IXL_ICR0_CRIT_ERR_MASK);
3237 device_printf(sc->sc_dev, "critical error\n");
3238 }
3239
3240 if (ISSET(icr, I40E_PFINT_ICR0_HMC_ERR_MASK)) {
3241 reg = ixl_rd(sc, I40E_PFHMC_ERRORINFO);
3242 if (ISSET(reg, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK))
3243 ixl_print_hmc_error(sc, reg);
3244 ixl_wr(sc, I40E_PFHMC_ERRORINFO, 0);
3245 }
3246
3247 ixl_wr(sc, I40E_PFINT_ICR0_ENA, mask);
3248 ixl_flush(sc);
3249 ixl_enable_other_intr(sc);
3250 return rv;
3251 }
3252
3253 static void
3254 ixl_get_link_status_done(struct ixl_softc *sc,
3255 const struct ixl_aq_desc *iaq)
3256 {
3257
3258 ixl_link_state_update(sc, iaq);
3259 }
3260
3261 static void
3262 ixl_get_link_status(void *xsc)
3263 {
3264 struct ixl_softc *sc = xsc;
3265 struct ixl_aq_desc *iaq;
3266 struct ixl_aq_link_param *param;
3267
3268 memset(&sc->sc_link_state_atq, 0, sizeof(sc->sc_link_state_atq));
3269 iaq = &sc->sc_link_state_atq.iatq_desc;
3270 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3271 param = (struct ixl_aq_link_param *)iaq->iaq_param;
3272 param->notify = IXL_AQ_LINK_NOTIFY;
3273
3274 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done);
3275 (void)ixl_atq_post(sc, &sc->sc_link_state_atq);
3276 }
3277
3278 static void
3279 ixl_link_state_update(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3280 {
3281 struct ifnet *ifp = &sc->sc_ec.ec_if;
3282 int link_state;
3283
3284 link_state = ixl_set_link_status(sc, iaq);
3285
3286 if (ifp->if_link_state != link_state)
3287 if_link_state_change(ifp, link_state);
3288
3289 if (link_state != LINK_STATE_DOWN) {
3290 if_schedule_deferred_start(ifp);
3291 }
3292 }
3293
3294 static void
3295 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq,
3296 const char *msg)
3297 {
3298 char buf[512];
3299 size_t len;
3300
3301 len = sizeof(buf);
3302 buf[--len] = '\0';
3303
3304 device_printf(sc->sc_dev, "%s\n", msg);
3305 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags));
3306 device_printf(sc->sc_dev, "flags %s opcode %04x\n",
3307 buf, le16toh(iaq->iaq_opcode));
3308 device_printf(sc->sc_dev, "datalen %u retval %u\n",
3309 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval));
3310 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie);
3311 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n",
3312 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]),
3313 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3]));
3314 }
3315
3316 static void
3317 ixl_arq(void *xsc)
3318 {
3319 struct ixl_softc *sc = xsc;
3320 struct ixl_aq_desc *arq, *iaq;
3321 struct ixl_aq_buf *aqb;
3322 unsigned int cons = sc->sc_arq_cons;
3323 unsigned int prod;
3324 int done = 0;
3325
3326 prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
3327 sc->sc_aq_regs->arq_head_mask;
3328
3329 if (cons == prod)
3330 goto done;
3331
3332 arq = IXL_DMA_KVA(&sc->sc_arq);
3333
3334 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3335 0, IXL_DMA_LEN(&sc->sc_arq),
3336 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3337
3338 do {
3339 iaq = &arq[cons];
3340 aqb = sc->sc_arq_live[cons];
3341
3342 KASSERT(aqb != NULL);
3343
3344 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3345 BUS_DMASYNC_POSTREAD);
3346
3347 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3348 ixl_aq_dump(sc, iaq, "arq event");
3349
3350 switch (iaq->iaq_opcode) {
3351 case htole16(IXL_AQ_OP_PHY_LINK_STATUS):
3352 ixl_link_state_update(sc, iaq);
3353 break;
3354 }
3355
3356 memset(iaq, 0, sizeof(*iaq));
3357 sc->sc_arq_live[cons] = NULL;
3358 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
3359
3360 cons++;
3361 cons &= IXL_AQ_MASK;
3362
3363 done = 1;
3364 } while (cons != prod);
3365
3366 if (done) {
3367 sc->sc_arq_cons = cons;
3368 ixl_arq_fill(sc);
3369 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3370 0, IXL_DMA_LEN(&sc->sc_arq),
3371 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3372 }
3373
3374 done:
3375 ixl_enable_other_intr(sc);
3376 }
3377
3378 static void
3379 ixl_atq_set(struct ixl_atq *iatq,
3380 void (*fn)(struct ixl_softc *, const struct ixl_aq_desc *))
3381 {
3382
3383 iatq->iatq_fn = fn;
3384 }
3385
3386 static int
3387 ixl_atq_post_locked(struct ixl_softc *sc, struct ixl_atq *iatq)
3388 {
3389 struct ixl_aq_desc *atq, *slot;
3390 unsigned int prod, cons, prod_next;
3391
3392 /* assert locked */
3393 KASSERT(mutex_owned(&sc->sc_atq_lock));
3394
3395 atq = IXL_DMA_KVA(&sc->sc_atq);
3396 prod = sc->sc_atq_prod;
3397 cons = sc->sc_atq_cons;
3398 prod_next = (prod +1) & IXL_AQ_MASK;
3399
3400 if (cons == prod_next)
3401 return ENOMEM;
3402
3403 slot = &atq[prod];
3404
3405 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3406 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3407
3408 *slot = iatq->iatq_desc;
3409 slot->iaq_cookie = (uint64_t)((intptr_t)iatq);
3410
3411 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3412 ixl_aq_dump(sc, slot, "atq command");
3413
3414 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3415 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3416
3417 sc->sc_atq_prod = prod_next;
3418 ixl_wr(sc, sc->sc_aq_regs->atq_tail, sc->sc_atq_prod);
3419
3420 return 0;
3421 }
3422
3423 static int
3424 ixl_atq_post(struct ixl_softc *sc, struct ixl_atq *iatq)
3425 {
3426 int rv;
3427
3428 mutex_enter(&sc->sc_atq_lock);
3429 rv = ixl_atq_post_locked(sc, iatq);
3430 mutex_exit(&sc->sc_atq_lock);
3431
3432 return rv;
3433 }
3434
3435 static void
3436 ixl_atq_done_locked(struct ixl_softc *sc)
3437 {
3438 struct ixl_aq_desc *atq, *slot;
3439 struct ixl_atq *iatq;
3440 unsigned int cons;
3441 unsigned int prod;
3442
3443 KASSERT(mutex_owned(&sc->sc_atq_lock));
3444
3445 prod = sc->sc_atq_prod;
3446 cons = sc->sc_atq_cons;
3447
3448 if (prod == cons)
3449 return;
3450
3451 atq = IXL_DMA_KVA(&sc->sc_atq);
3452
3453 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3454 0, IXL_DMA_LEN(&sc->sc_atq),
3455 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3456
3457 do {
3458 slot = &atq[cons];
3459 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
3460 break;
3461
3462 iatq = (struct ixl_atq *)((intptr_t)slot->iaq_cookie);
3463 iatq->iatq_desc = *slot;
3464
3465 memset(slot, 0, sizeof(*slot));
3466
3467 if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3468 ixl_aq_dump(sc, &iatq->iatq_desc, "atq response");
3469
3470 (*iatq->iatq_fn)(sc, &iatq->iatq_desc);
3471
3472 cons++;
3473 cons &= IXL_AQ_MASK;
3474 } while (cons != prod);
3475
3476 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3477 0, IXL_DMA_LEN(&sc->sc_atq),
3478 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3479
3480 sc->sc_atq_cons = cons;
3481 }
3482
3483 static void
3484 ixl_atq_done(struct ixl_softc *sc)
3485 {
3486
3487 mutex_enter(&sc->sc_atq_lock);
3488 ixl_atq_done_locked(sc);
3489 mutex_exit(&sc->sc_atq_lock);
3490 }
3491
3492 static void
3493 ixl_wakeup(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3494 {
3495
3496 KASSERT(mutex_owned(&sc->sc_atq_lock));
3497
3498 cv_signal(&sc->sc_atq_cv);
3499 }
3500
3501 static int
3502 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq)
3503 {
3504 int error;
3505
3506 KASSERT(iatq->iatq_desc.iaq_cookie == 0);
3507
3508 ixl_atq_set(iatq, ixl_wakeup);
3509
3510 mutex_enter(&sc->sc_atq_lock);
3511 error = ixl_atq_post_locked(sc, iatq);
3512 if (error) {
3513 mutex_exit(&sc->sc_atq_lock);
3514 return error;
3515 }
3516
3517 error = cv_timedwait(&sc->sc_atq_cv, &sc->sc_atq_lock,
3518 IXL_ATQ_EXEC_TIMEOUT);
3519 mutex_exit(&sc->sc_atq_lock);
3520
3521 return error;
3522 }
3523
3524 static int
3525 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
3526 {
3527 struct ixl_aq_desc *atq, *slot;
3528 unsigned int prod;
3529 unsigned int t = 0;
3530
3531 mutex_enter(&sc->sc_atq_lock);
3532
3533 atq = IXL_DMA_KVA(&sc->sc_atq);
3534 prod = sc->sc_atq_prod;
3535 slot = atq + prod;
3536
3537 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3538 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3539
3540 *slot = *iaq;
3541 slot->iaq_flags |= htole16(IXL_AQ_SI);
3542
3543 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3544 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3545
3546 prod++;
3547 prod &= IXL_AQ_MASK;
3548 sc->sc_atq_prod = prod;
3549 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3550
3551 while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
3552 delaymsec(1);
3553
3554 if (t++ > tm) {
3555 mutex_exit(&sc->sc_atq_lock);
3556 return ETIMEDOUT;
3557 }
3558 }
3559
3560 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3561 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
3562 *iaq = *slot;
3563 memset(slot, 0, sizeof(*slot));
3564 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3565 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
3566
3567 sc->sc_atq_cons = prod;
3568
3569 mutex_exit(&sc->sc_atq_lock);
3570
3571 return 0;
3572 }
3573
3574 static int
3575 ixl_get_version(struct ixl_softc *sc)
3576 {
3577 struct ixl_aq_desc iaq;
3578 uint32_t fwbuild, fwver, apiver;
3579 uint16_t api_maj_ver, api_min_ver;
3580
3581 memset(&iaq, 0, sizeof(iaq));
3582 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
3583
3584 iaq.iaq_retval = le16toh(23);
3585
3586 if (ixl_atq_poll(sc, &iaq, 2000) != 0)
3587 return ETIMEDOUT;
3588 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
3589 return EIO;
3590
3591 fwbuild = le32toh(iaq.iaq_param[1]);
3592 fwver = le32toh(iaq.iaq_param[2]);
3593 apiver = le32toh(iaq.iaq_param[3]);
3594
3595 api_maj_ver = (uint16_t)apiver;
3596 api_min_ver = (uint16_t)(apiver >> 16);
3597
3598 aprint_normal(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
3599 (uint16_t)(fwver >> 16), fwbuild, api_maj_ver, api_min_ver);
3600
3601 sc->sc_rxctl_atq = true;
3602 if (sc->sc_mac_type == I40E_MAC_X722) {
3603 if (api_maj_ver == 1 && api_min_ver < 5) {
3604 sc->sc_rxctl_atq = false;
3605 }
3606 }
3607
3608 return 0;
3609 }
3610
3611 static int
3612 ixl_pxe_clear(struct ixl_softc *sc)
3613 {
3614 struct ixl_aq_desc iaq;
3615 int rv;
3616
3617 memset(&iaq, 0, sizeof(iaq));
3618 iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
3619 iaq.iaq_param[0] = htole32(0x2);
3620
3621 rv = ixl_atq_poll(sc, &iaq, 250);
3622
3623 ixl_wr(sc, I40E_GLLAN_RCTL_0, 0x1);
3624
3625 if (rv != 0)
3626 return ETIMEDOUT;
3627
3628 switch (iaq.iaq_retval) {
3629 case htole16(IXL_AQ_RC_OK):
3630 case htole16(IXL_AQ_RC_EEXIST):
3631 break;
3632 default:
3633 return EIO;
3634 }
3635
3636 return 0;
3637 }
3638
3639 static int
3640 ixl_lldp_shut(struct ixl_softc *sc)
3641 {
3642 struct ixl_aq_desc iaq;
3643
3644 memset(&iaq, 0, sizeof(iaq));
3645 iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
3646 iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
3647
3648 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3649 aprint_error_dev(sc->sc_dev, "STOP LLDP AGENT timeout\n");
3650 return -1;
3651 }
3652
3653 switch (iaq.iaq_retval) {
3654 case htole16(IXL_AQ_RC_EMODE):
3655 case htole16(IXL_AQ_RC_EPERM):
3656 /* ignore silently */
3657 default:
3658 break;
3659 }
3660
3661 return 0;
3662 }
3663
3664 static void
3665 ixl_parse_hw_capability(struct ixl_softc *sc, struct ixl_aq_capability *cap)
3666 {
3667 uint16_t id;
3668 uint32_t number, logical_id;
3669
3670 id = le16toh(cap->cap_id);
3671 number = le32toh(cap->number);
3672 logical_id = le32toh(cap->logical_id);
3673
3674 switch (id) {
3675 case IXL_AQ_CAP_RSS:
3676 sc->sc_rss_table_size = number;
3677 sc->sc_rss_table_entry_width = logical_id;
3678 break;
3679 case IXL_AQ_CAP_RXQ:
3680 case IXL_AQ_CAP_TXQ:
3681 sc->sc_nqueue_pairs_device = MIN(number,
3682 sc->sc_nqueue_pairs_device);
3683 break;
3684 }
3685 }
3686
3687 static int
3688 ixl_get_hw_capabilities(struct ixl_softc *sc)
3689 {
3690 struct ixl_dmamem idm;
3691 struct ixl_aq_desc iaq;
3692 struct ixl_aq_capability *caps;
3693 size_t i, ncaps;
3694 bus_size_t caps_size;
3695 uint16_t status;
3696 int rv;
3697
3698 caps_size = sizeof(caps[0]) * 40;
3699 memset(&iaq, 0, sizeof(iaq));
3700 iaq.iaq_opcode = htole16(IXL_AQ_OP_LIST_FUNC_CAP);
3701
3702 do {
3703 if (ixl_dmamem_alloc(sc, &idm, caps_size, 0) != 0) {
3704 return -1;
3705 }
3706
3707 iaq.iaq_flags = htole16(IXL_AQ_BUF |
3708 (caps_size > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3709 iaq.iaq_datalen = htole16(caps_size);
3710 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3711
3712 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
3713 IXL_DMA_LEN(&idm), BUS_DMASYNC_PREREAD);
3714
3715 rv = ixl_atq_poll(sc, &iaq, 250);
3716
3717 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
3718 IXL_DMA_LEN(&idm), BUS_DMASYNC_POSTREAD);
3719
3720 if (rv != 0) {
3721 aprint_error(", HW capabilities timeout\n");
3722 goto done;
3723 }
3724
3725 status = le16toh(iaq.iaq_retval);
3726
3727 if (status == IXL_AQ_RC_ENOMEM) {
3728 caps_size = le16toh(iaq.iaq_datalen);
3729 ixl_dmamem_free(sc, &idm);
3730 }
3731 } while (status == IXL_AQ_RC_ENOMEM);
3732
3733 if (status != IXL_AQ_RC_OK) {
3734 aprint_error(", HW capabilities error\n");
3735 goto done;
3736 }
3737
3738 caps = IXL_DMA_KVA(&idm);
3739 ncaps = le16toh(iaq.iaq_param[1]);
3740
3741 for (i = 0; i < ncaps; i++) {
3742 ixl_parse_hw_capability(sc, &caps[i]);
3743 }
3744
3745 done:
3746 ixl_dmamem_free(sc, &idm);
3747 return rv;
3748 }
3749
3750 static int
3751 ixl_get_mac(struct ixl_softc *sc)
3752 {
3753 struct ixl_dmamem idm;
3754 struct ixl_aq_desc iaq;
3755 struct ixl_aq_mac_addresses *addrs;
3756 int rv;
3757
3758 if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
3759 aprint_error(", unable to allocate mac addresses\n");
3760 return -1;
3761 }
3762
3763 memset(&iaq, 0, sizeof(iaq));
3764 iaq.iaq_flags = htole16(IXL_AQ_BUF);
3765 iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
3766 iaq.iaq_datalen = htole16(sizeof(*addrs));
3767 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3768
3769 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3770 BUS_DMASYNC_PREREAD);
3771
3772 rv = ixl_atq_poll(sc, &iaq, 250);
3773
3774 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3775 BUS_DMASYNC_POSTREAD);
3776
3777 if (rv != 0) {
3778 aprint_error(", MAC ADDRESS READ timeout\n");
3779 rv = -1;
3780 goto done;
3781 }
3782 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3783 aprint_error(", MAC ADDRESS READ error\n");
3784 rv = -1;
3785 goto done;
3786 }
3787
3788 addrs = IXL_DMA_KVA(&idm);
3789 if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
3790 printf(", port address is not valid\n");
3791 goto done;
3792 }
3793
3794 memcpy(sc->sc_enaddr, addrs->port, ETHER_ADDR_LEN);
3795 rv = 0;
3796
3797 done:
3798 ixl_dmamem_free(sc, &idm);
3799 return rv;
3800 }
3801
3802 static int
3803 ixl_get_switch_config(struct ixl_softc *sc)
3804 {
3805 struct ixl_dmamem idm;
3806 struct ixl_aq_desc iaq;
3807 struct ixl_aq_switch_config *hdr;
3808 struct ixl_aq_switch_config_element *elms, *elm;
3809 unsigned int nelm, i;
3810 int rv;
3811
3812 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
3813 aprint_error_dev(sc->sc_dev,
3814 "unable to allocate switch config buffer\n");
3815 return -1;
3816 }
3817
3818 memset(&iaq, 0, sizeof(iaq));
3819 iaq.iaq_flags = htole16(IXL_AQ_BUF |
3820 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3821 iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
3822 iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
3823 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3824
3825 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3826 BUS_DMASYNC_PREREAD);
3827
3828 rv = ixl_atq_poll(sc, &iaq, 250);
3829
3830 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3831 BUS_DMASYNC_POSTREAD);
3832
3833 if (rv != 0) {
3834 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG timeout\n");
3835 rv = -1;
3836 goto done;
3837 }
3838 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3839 aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG error\n");
3840 rv = -1;
3841 goto done;
3842 }
3843
3844 hdr = IXL_DMA_KVA(&idm);
3845 elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
3846
3847 nelm = le16toh(hdr->num_reported);
3848 if (nelm < 1) {
3849 aprint_error_dev(sc->sc_dev, "no switch config available\n");
3850 rv = -1;
3851 goto done;
3852 }
3853
3854 for (i = 0; i < nelm; i++) {
3855 elm = &elms[i];
3856
3857 aprint_debug_dev(sc->sc_dev,
3858 "type %x revision %u seid %04x\n",
3859 elm->type, elm->revision, le16toh(elm->seid));
3860 aprint_debug_dev(sc->sc_dev,
3861 "uplink %04x downlink %04x\n",
3862 le16toh(elm->uplink_seid),
3863 le16toh(elm->downlink_seid));
3864 aprint_debug_dev(sc->sc_dev,
3865 "conntype %x scheduler %04x extra %04x\n",
3866 elm->connection_type,
3867 le16toh(elm->scheduler_id),
3868 le16toh(elm->element_info));
3869 }
3870
3871 elm = &elms[0];
3872
3873 sc->sc_uplink_seid = elm->uplink_seid;
3874 sc->sc_downlink_seid = elm->downlink_seid;
3875 sc->sc_seid = elm->seid;
3876
3877 if ((sc->sc_uplink_seid == htole16(0)) !=
3878 (sc->sc_downlink_seid == htole16(0))) {
3879 aprint_error_dev(sc->sc_dev, "SEIDs are misconfigured\n");
3880 rv = -1;
3881 goto done;
3882 }
3883
3884 done:
3885 ixl_dmamem_free(sc, &idm);
3886 return rv;
3887 }
3888
3889 static int
3890 ixl_phy_mask_ints(struct ixl_softc *sc)
3891 {
3892 struct ixl_aq_desc iaq;
3893
3894 memset(&iaq, 0, sizeof(iaq));
3895 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
3896 iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
3897 ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
3898 IXL_AQ_PHY_EV_MEDIA_NA));
3899
3900 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3901 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK timeout\n");
3902 return -1;
3903 }
3904 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3905 aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK error\n");
3906 return -1;
3907 }
3908
3909 return 0;
3910 }
3911
3912 static int
3913 ixl_get_phy_abilities(struct ixl_softc *sc,struct ixl_dmamem *idm)
3914 {
3915 struct ixl_aq_desc iaq;
3916 int rv;
3917
3918 memset(&iaq, 0, sizeof(iaq));
3919 iaq.iaq_flags = htole16(IXL_AQ_BUF |
3920 (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3921 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
3922 iaq.iaq_datalen = htole16(IXL_DMA_LEN(idm));
3923 iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
3924 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
3925
3926 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
3927 BUS_DMASYNC_PREREAD);
3928
3929 rv = ixl_atq_poll(sc, &iaq, 250);
3930
3931 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
3932 BUS_DMASYNC_POSTREAD);
3933
3934 if (rv != 0)
3935 return -1;
3936
3937 return le16toh(iaq.iaq_retval);
3938 }
3939
3940 static int
3941 ixl_get_phy_types(struct ixl_softc *sc, uint64_t *phy_types_ptr)
3942 {
3943 struct ixl_dmamem idm;
3944 struct ixl_aq_phy_abilities *phy;
3945 uint64_t phy_types;
3946 int rv;
3947
3948 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
3949 aprint_error_dev(sc->sc_dev,
3950 "unable to allocate switch config buffer\n");
3951 return -1;
3952 }
3953
3954 rv = ixl_get_phy_abilities(sc, &idm);
3955 switch (rv) {
3956 case -1:
3957 aprint_error_dev(sc->sc_dev, "GET PHY ABILITIES timeout\n");
3958 goto done;
3959 case IXL_AQ_RC_OK:
3960 break;
3961 case IXL_AQ_RC_EIO:
3962 aprint_error_dev(sc->sc_dev,"unable to query phy types\n");
3963 break;
3964 default:
3965 aprint_error_dev(sc->sc_dev,
3966 "GET PHY ABILITIIES error %u\n", rv);
3967 goto done;
3968 }
3969
3970 phy = IXL_DMA_KVA(&idm);
3971
3972 phy_types = le32toh(phy->phy_type);
3973 phy_types |= (uint64_t)le32toh(phy->phy_type_ext) << 32;
3974
3975 *phy_types_ptr = phy_types;
3976
3977 rv = 0;
3978
3979 done:
3980 ixl_dmamem_free(sc, &idm);
3981 return rv;
3982 }
3983
3984 static int
3985 ixl_get_link_status_poll(struct ixl_softc *sc)
3986 {
3987 struct ixl_aq_desc iaq;
3988 struct ixl_aq_link_param *param;
3989 int link;
3990
3991 memset(&iaq, 0, sizeof(iaq));
3992 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3993 param = (struct ixl_aq_link_param *)iaq.iaq_param;
3994 param->notify = IXL_AQ_LINK_NOTIFY;
3995
3996 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3997 return ETIMEDOUT;
3998 }
3999 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4000 return EIO;
4001 }
4002
4003 link = ixl_set_link_status(sc, &iaq);
4004 sc->sc_ec.ec_if.if_link_state = link;
4005
4006 return 0;
4007 }
4008
4009 static int
4010 ixl_get_vsi(struct ixl_softc *sc)
4011 {
4012 struct ixl_dmamem *vsi = &sc->sc_scratch;
4013 struct ixl_aq_desc iaq;
4014 struct ixl_aq_vsi_param *param;
4015 struct ixl_aq_vsi_reply *reply;
4016 int rv;
4017
4018 /* grumble, vsi info isn't "known" at compile time */
4019
4020 memset(&iaq, 0, sizeof(iaq));
4021 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4022 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4023 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
4024 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4025 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4026
4027 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4028 param->uplink_seid = sc->sc_seid;
4029
4030 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4031 BUS_DMASYNC_PREREAD);
4032
4033 rv = ixl_atq_poll(sc, &iaq, 250);
4034
4035 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4036 BUS_DMASYNC_POSTREAD);
4037
4038 if (rv != 0) {
4039 return ETIMEDOUT;
4040 }
4041
4042 switch (le16toh(iaq.iaq_retval)) {
4043 case IXL_AQ_RC_OK:
4044 break;
4045 case IXL_AQ_RC_ENOENT:
4046 return ENOENT;
4047 case IXL_AQ_RC_EACCES:
4048 return EACCES;
4049 default:
4050 return EIO;
4051 }
4052
4053 reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
4054 sc->sc_vsi_number = reply->vsi_number;
4055
4056 return 0;
4057 }
4058
4059 static int
4060 ixl_set_vsi(struct ixl_softc *sc)
4061 {
4062 struct ixl_dmamem *vsi = &sc->sc_scratch;
4063 struct ixl_aq_desc iaq;
4064 struct ixl_aq_vsi_param *param;
4065 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
4066 unsigned int qnum;
4067 uint16_t val;
4068 int rv;
4069
4070 qnum = sc->sc_nqueue_pairs - 1;
4071
4072 data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
4073 IXL_AQ_VSI_VALID_VLAN);
4074
4075 CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
4076 SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
4077 data->queue_mapping[0] = htole16(0);
4078 data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
4079 (qnum << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
4080
4081 val = le16toh(data->port_vlan_flags);
4082 CLR(val, IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK);
4083 SET(val, IXL_AQ_VSI_PVLAN_MODE_ALL);
4084
4085 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWTAGGING)) {
4086 SET(val, IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH);
4087 } else {
4088 SET(val, IXL_AQ_VSI_PVLAN_EMOD_NOTHING);
4089 }
4090
4091 data->port_vlan_flags = htole16(val);
4092
4093 /* grumble, vsi info isn't "known" at compile time */
4094
4095 memset(&iaq, 0, sizeof(iaq));
4096 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4097 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4098 iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
4099 iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4100 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4101
4102 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4103 param->uplink_seid = sc->sc_seid;
4104
4105 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4106 BUS_DMASYNC_PREWRITE);
4107
4108 rv = ixl_atq_poll(sc, &iaq, 250);
4109
4110 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4111 BUS_DMASYNC_POSTWRITE);
4112
4113 if (rv != 0) {
4114 return ETIMEDOUT;
4115 }
4116
4117 switch (le16toh(iaq.iaq_retval)) {
4118 case IXL_AQ_RC_OK:
4119 break;
4120 case IXL_AQ_RC_ENOENT:
4121 return ENOENT;
4122 case IXL_AQ_RC_EACCES:
4123 return EACCES;
4124 default:
4125 return EIO;
4126 }
4127
4128 return 0;
4129 }
4130
4131 static void
4132 ixl_set_filter_control(struct ixl_softc *sc)
4133 {
4134 uint32_t reg;
4135
4136 reg = ixl_rd_rx_csr(sc, I40E_PFQF_CTL_0);
4137
4138 CLR(reg, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK);
4139 SET(reg, I40E_HASH_LUT_SIZE_128 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT);
4140
4141 SET(reg, I40E_PFQF_CTL_0_FD_ENA_MASK);
4142 SET(reg, I40E_PFQF_CTL_0_ETYPE_ENA_MASK);
4143 SET(reg, I40E_PFQF_CTL_0_MACVLAN_ENA_MASK);
4144
4145 ixl_wr_rx_csr(sc, I40E_PFQF_CTL_0, reg);
4146 }
4147
4148 static inline void
4149 ixl_get_default_rss_key(uint32_t *buf, size_t len)
4150 {
4151 size_t cplen;
4152 uint8_t rss_seed[RSS_KEYSIZE];
4153
4154 rss_getkey(rss_seed);
4155 memset(buf, 0, len);
4156
4157 cplen = MIN(len, sizeof(rss_seed));
4158 memcpy(buf, rss_seed, cplen);
4159 }
4160
4161 static void
4162 ixl_set_rss_key(struct ixl_softc *sc)
4163 {
4164 uint32_t rss_seed[IXL_RSS_KEY_SIZE_REG];
4165 size_t i;
4166
4167 ixl_get_default_rss_key(rss_seed, sizeof(rss_seed));
4168
4169 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4170 ixl_wr_rx_csr(sc, I40E_PFQF_HKEY(i), rss_seed[i]);
4171 }
4172 }
4173
4174 static void
4175 ixl_set_rss_pctype(struct ixl_softc *sc)
4176 {
4177 uint64_t set_hena = 0;
4178 uint32_t hena0, hena1;
4179
4180 if (sc->sc_mac_type == I40E_MAC_X722)
4181 set_hena = IXL_RSS_HENA_DEFAULT_X722;
4182 else
4183 set_hena = IXL_RSS_HENA_DEFAULT_XL710;
4184
4185 hena0 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(0));
4186 hena1 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(1));
4187
4188 SET(hena0, set_hena);
4189 SET(hena1, set_hena >> 32);
4190
4191 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(0), hena0);
4192 ixl_wr_rx_csr(sc, I40E_PFQF_HENA(1), hena1);
4193 }
4194
4195 static void
4196 ixl_set_rss_hlut(struct ixl_softc *sc)
4197 {
4198 unsigned int qid;
4199 uint8_t hlut_buf[512], lut_mask;
4200 uint32_t *hluts;
4201 size_t i, hluts_num;
4202
4203 lut_mask = (0x01 << sc->sc_rss_table_entry_width) - 1;
4204
4205 for (i = 0; i < sc->sc_rss_table_size; i++) {
4206 qid = i % sc->sc_nqueue_pairs;
4207 hlut_buf[i] = qid & lut_mask;
4208 }
4209
4210 hluts = (uint32_t *)hlut_buf;
4211 hluts_num = sc->sc_rss_table_size >> 2;
4212 for (i = 0; i < hluts_num; i++) {
4213 ixl_wr(sc, I40E_PFQF_HLUT(i), hluts[i]);
4214 }
4215 ixl_flush(sc);
4216 }
4217
4218 static void
4219 ixl_config_rss(struct ixl_softc *sc)
4220 {
4221
4222 KASSERT(mutex_owned(&sc->sc_cfg_lock));
4223
4224 ixl_set_rss_key(sc);
4225 ixl_set_rss_pctype(sc);
4226 ixl_set_rss_hlut(sc);
4227 }
4228
4229 static const struct ixl_phy_type *
4230 ixl_search_phy_type(uint8_t phy_type)
4231 {
4232 const struct ixl_phy_type *itype;
4233 uint64_t mask;
4234 unsigned int i;
4235
4236 if (phy_type >= 64)
4237 return NULL;
4238
4239 mask = 1ULL << phy_type;
4240
4241 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
4242 itype = &ixl_phy_type_map[i];
4243
4244 if (ISSET(itype->phy_type, mask))
4245 return itype;
4246 }
4247
4248 return NULL;
4249 }
4250
4251 static uint64_t
4252 ixl_search_link_speed(uint8_t link_speed)
4253 {
4254 const struct ixl_speed_type *type;
4255 unsigned int i;
4256
4257 for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4258 type = &ixl_speed_type_map[i];
4259
4260 if (ISSET(type->dev_speed, link_speed))
4261 return type->net_speed;
4262 }
4263
4264 return 0;
4265 }
4266
4267 static int
4268 ixl_restart_an(struct ixl_softc *sc)
4269 {
4270 struct ixl_aq_desc iaq;
4271
4272 memset(&iaq, 0, sizeof(iaq));
4273 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
4274 iaq.iaq_param[0] =
4275 htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
4276
4277 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4278 aprint_error_dev(sc->sc_dev, "RESTART AN timeout\n");
4279 return -1;
4280 }
4281 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4282 aprint_error_dev(sc->sc_dev, "RESTART AN error\n");
4283 return -1;
4284 }
4285
4286 return 0;
4287 }
4288
4289 static int
4290 ixl_add_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4291 uint16_t vlan, uint16_t flags)
4292 {
4293 struct ixl_aq_desc iaq;
4294 struct ixl_aq_add_macvlan *param;
4295 struct ixl_aq_add_macvlan_elem *elem;
4296
4297 memset(&iaq, 0, sizeof(iaq));
4298 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4299 iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN);
4300 iaq.iaq_datalen = htole16(sizeof(*elem));
4301 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4302
4303 param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param;
4304 param->num_addrs = htole16(1);
4305 param->seid0 = htole16(0x8000) | sc->sc_seid;
4306 param->seid1 = 0;
4307 param->seid2 = 0;
4308
4309 elem = IXL_DMA_KVA(&sc->sc_scratch);
4310 memset(elem, 0, sizeof(*elem));
4311 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4312 elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags);
4313 elem->vlan = htole16(vlan);
4314
4315 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4316 return IXL_AQ_RC_EINVAL;
4317 }
4318
4319 switch (le16toh(iaq.iaq_retval)) {
4320 case IXL_AQ_RC_OK:
4321 break;
4322 case IXL_AQ_RC_ENOSPC:
4323 return ENOSPC;
4324 case IXL_AQ_RC_ENOENT:
4325 return ENOENT;
4326 case IXL_AQ_RC_EACCES:
4327 return EACCES;
4328 case IXL_AQ_RC_EEXIST:
4329 return EEXIST;
4330 case IXL_AQ_RC_EINVAL:
4331 return EINVAL;
4332 default:
4333 return EIO;
4334 }
4335
4336 return 0;
4337 }
4338
4339 static int
4340 ixl_remove_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4341 uint16_t vlan, uint16_t flags)
4342 {
4343 struct ixl_aq_desc iaq;
4344 struct ixl_aq_remove_macvlan *param;
4345 struct ixl_aq_remove_macvlan_elem *elem;
4346
4347 memset(&iaq, 0, sizeof(iaq));
4348 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4349 iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN);
4350 iaq.iaq_datalen = htole16(sizeof(*elem));
4351 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4352
4353 param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param;
4354 param->num_addrs = htole16(1);
4355 param->seid0 = htole16(0x8000) | sc->sc_seid;
4356 param->seid1 = 0;
4357 param->seid2 = 0;
4358
4359 elem = IXL_DMA_KVA(&sc->sc_scratch);
4360 memset(elem, 0, sizeof(*elem));
4361 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4362 elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags);
4363 elem->vlan = htole16(vlan);
4364
4365 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4366 return EINVAL;
4367 }
4368
4369 switch (le16toh(iaq.iaq_retval)) {
4370 case IXL_AQ_RC_OK:
4371 break;
4372 case IXL_AQ_RC_ENOENT:
4373 return ENOENT;
4374 case IXL_AQ_RC_EACCES:
4375 return EACCES;
4376 case IXL_AQ_RC_EINVAL:
4377 return EINVAL;
4378 default:
4379 return EIO;
4380 }
4381
4382 return 0;
4383 }
4384
4385 static int
4386 ixl_hmc(struct ixl_softc *sc)
4387 {
4388 struct {
4389 uint32_t count;
4390 uint32_t minsize;
4391 bus_size_t objsiz;
4392 bus_size_t setoff;
4393 bus_size_t setcnt;
4394 } regs[] = {
4395 {
4396 0,
4397 IXL_HMC_TXQ_MINSIZE,
4398 I40E_GLHMC_LANTXOBJSZ,
4399 I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
4400 I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
4401 },
4402 {
4403 0,
4404 IXL_HMC_RXQ_MINSIZE,
4405 I40E_GLHMC_LANRXOBJSZ,
4406 I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
4407 I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
4408 },
4409 {
4410 0,
4411 0,
4412 I40E_GLHMC_FCOEDDPOBJSZ,
4413 I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
4414 I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
4415 },
4416 {
4417 0,
4418 0,
4419 I40E_GLHMC_FCOEFOBJSZ,
4420 I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
4421 I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
4422 },
4423 };
4424 struct ixl_hmc_entry *e;
4425 uint64_t size, dva;
4426 uint8_t *kva;
4427 uint64_t *sdpage;
4428 unsigned int i;
4429 int npages, tables;
4430 uint32_t reg;
4431
4432 CTASSERT(__arraycount(regs) <= __arraycount(sc->sc_hmc_entries));
4433
4434 regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
4435 ixl_rd(sc, I40E_GLHMC_LANQMAX);
4436
4437 size = 0;
4438 for (i = 0; i < __arraycount(regs); i++) {
4439 e = &sc->sc_hmc_entries[i];
4440
4441 e->hmc_count = regs[i].count;
4442 reg = ixl_rd(sc, regs[i].objsiz);
4443 e->hmc_size = BIT_ULL(0x3F & reg);
4444 e->hmc_base = size;
4445
4446 if ((e->hmc_size * 8) < regs[i].minsize) {
4447 aprint_error_dev(sc->sc_dev,
4448 "kernel hmc entry is too big\n");
4449 return -1;
4450 }
4451
4452 size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
4453 }
4454 size = roundup(size, IXL_HMC_PGSIZE);
4455 npages = size / IXL_HMC_PGSIZE;
4456
4457 tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
4458
4459 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
4460 aprint_error_dev(sc->sc_dev,
4461 "unable to allocate hmc pd memory\n");
4462 return -1;
4463 }
4464
4465 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
4466 IXL_HMC_PGSIZE) != 0) {
4467 aprint_error_dev(sc->sc_dev,
4468 "unable to allocate hmc sd memory\n");
4469 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4470 return -1;
4471 }
4472
4473 kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
4474 memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
4475
4476 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
4477 0, IXL_DMA_LEN(&sc->sc_hmc_pd),
4478 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4479
4480 dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
4481 sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
4482 memset(sdpage, 0, IXL_DMA_LEN(&sc->sc_hmc_sd));
4483
4484 for (i = 0; (int)i < npages; i++) {
4485 *sdpage = htole64(dva | IXL_HMC_PDVALID);
4486 sdpage++;
4487
4488 dva += IXL_HMC_PGSIZE;
4489 }
4490
4491 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
4492 0, IXL_DMA_LEN(&sc->sc_hmc_sd),
4493 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4494
4495 dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
4496 for (i = 0; (int)i < tables; i++) {
4497 uint32_t count;
4498
4499 KASSERT(npages >= 0);
4500
4501 count = ((unsigned int)npages > IXL_HMC_PGS) ?
4502 IXL_HMC_PGS : (unsigned int)npages;
4503
4504 ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
4505 ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
4506 (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
4507 (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
4508 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
4509 ixl_wr(sc, I40E_PFHMC_SDCMD,
4510 (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
4511
4512 npages -= IXL_HMC_PGS;
4513 dva += IXL_HMC_PGSIZE;
4514 }
4515
4516 for (i = 0; i < __arraycount(regs); i++) {
4517 e = &sc->sc_hmc_entries[i];
4518
4519 ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
4520 ixl_wr(sc, regs[i].setcnt, e->hmc_count);
4521 }
4522
4523 return 0;
4524 }
4525
4526 static void
4527 ixl_hmc_free(struct ixl_softc *sc)
4528 {
4529 ixl_dmamem_free(sc, &sc->sc_hmc_sd);
4530 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4531 }
4532
4533 static void
4534 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
4535 unsigned int npacking)
4536 {
4537 uint8_t *dst = d;
4538 const uint8_t *src = s;
4539 unsigned int i;
4540
4541 for (i = 0; i < npacking; i++) {
4542 const struct ixl_hmc_pack *pack = &packing[i];
4543 unsigned int offset = pack->lsb / 8;
4544 unsigned int align = pack->lsb % 8;
4545 const uint8_t *in = src + pack->offset;
4546 uint8_t *out = dst + offset;
4547 int width = pack->width;
4548 unsigned int inbits = 0;
4549
4550 if (align) {
4551 inbits = (*in++) << align;
4552 *out++ |= (inbits & 0xff);
4553 inbits >>= 8;
4554
4555 width -= 8 - align;
4556 }
4557
4558 while (width >= 8) {
4559 inbits |= (*in++) << align;
4560 *out++ = (inbits & 0xff);
4561 inbits >>= 8;
4562
4563 width -= 8;
4564 }
4565
4566 if (width > 0) {
4567 inbits |= (*in) << align;
4568 *out |= (inbits & ((1 << width) - 1));
4569 }
4570 }
4571 }
4572
4573 static struct ixl_aq_buf *
4574 ixl_aqb_alloc(struct ixl_softc *sc)
4575 {
4576 struct ixl_aq_buf *aqb;
4577
4578 aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK);
4579 if (aqb == NULL)
4580 return NULL;
4581
4582 aqb->aqb_size = IXL_AQ_BUFLEN;
4583
4584 if (bus_dmamap_create(sc->sc_dmat, aqb->aqb_size, 1,
4585 aqb->aqb_size, 0,
4586 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0)
4587 goto free;
4588 if (bus_dmamem_alloc(sc->sc_dmat, aqb->aqb_size,
4589 IXL_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs,
4590 BUS_DMA_WAITOK) != 0)
4591 goto destroy;
4592 if (bus_dmamem_map(sc->sc_dmat, &aqb->aqb_seg, aqb->aqb_nsegs,
4593 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0)
4594 goto dma_free;
4595 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
4596 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0)
4597 goto unmap;
4598
4599 return aqb;
4600 unmap:
4601 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
4602 dma_free:
4603 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
4604 destroy:
4605 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4606 free:
4607 free(aqb, M_DEVBUF);
4608
4609 return NULL;
4610 }
4611
4612 static void
4613 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
4614 {
4615 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
4616 bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
4617 bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
4618 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4619 free(aqb, M_DEVBUF);
4620 }
4621
4622 static int
4623 ixl_arq_fill(struct ixl_softc *sc)
4624 {
4625 struct ixl_aq_buf *aqb;
4626 struct ixl_aq_desc *arq, *iaq;
4627 unsigned int prod = sc->sc_arq_prod;
4628 unsigned int n;
4629 int post = 0;
4630
4631 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
4632 IXL_AQ_NUM);
4633 arq = IXL_DMA_KVA(&sc->sc_arq);
4634
4635 if (__predict_false(n <= 0))
4636 return 0;
4637
4638 do {
4639 aqb = sc->sc_arq_live[prod];
4640 iaq = &arq[prod];
4641
4642 if (aqb == NULL) {
4643 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
4644 if (aqb != NULL) {
4645 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
4646 ixl_aq_buf, aqb_entry);
4647 } else if ((aqb = ixl_aqb_alloc(sc)) == NULL) {
4648 break;
4649 }
4650
4651 sc->sc_arq_live[prod] = aqb;
4652 memset(aqb->aqb_data, 0, aqb->aqb_size);
4653
4654 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
4655 aqb->aqb_size, BUS_DMASYNC_PREREAD);
4656
4657 iaq->iaq_flags = htole16(IXL_AQ_BUF |
4658 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ?
4659 IXL_AQ_LB : 0));
4660 iaq->iaq_opcode = 0;
4661 iaq->iaq_datalen = htole16(aqb->aqb_size);
4662 iaq->iaq_retval = 0;
4663 iaq->iaq_cookie = 0;
4664 iaq->iaq_param[0] = 0;
4665 iaq->iaq_param[1] = 0;
4666 ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
4667 }
4668
4669 prod++;
4670 prod &= IXL_AQ_MASK;
4671
4672 post = 1;
4673
4674 } while (--n);
4675
4676 if (post) {
4677 sc->sc_arq_prod = prod;
4678 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
4679 }
4680
4681 return post;
4682 }
4683
4684 static void
4685 ixl_arq_unfill(struct ixl_softc *sc)
4686 {
4687 struct ixl_aq_buf *aqb;
4688 unsigned int i;
4689
4690 for (i = 0; i < __arraycount(sc->sc_arq_live); i++) {
4691 aqb = sc->sc_arq_live[i];
4692 if (aqb == NULL)
4693 continue;
4694
4695 sc->sc_arq_live[i] = NULL;
4696 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
4697 BUS_DMASYNC_POSTREAD);
4698 ixl_aqb_free(sc, aqb);
4699 }
4700
4701 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle)) != NULL) {
4702 SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
4703 ixl_aq_buf, aqb_entry);
4704 ixl_aqb_free(sc, aqb);
4705 }
4706 }
4707
4708 static void
4709 ixl_clear_hw(struct ixl_softc *sc)
4710 {
4711 uint32_t num_queues, base_queue;
4712 uint32_t num_pf_int;
4713 uint32_t num_vf_int;
4714 uint32_t num_vfs;
4715 uint32_t i, j;
4716 uint32_t val;
4717 uint32_t eol = 0x7ff;
4718
4719 /* get number of interrupts, queues, and vfs */
4720 val = ixl_rd(sc, I40E_GLPCI_CNF2);
4721 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
4722 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
4723 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
4724 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
4725
4726 val = ixl_rd(sc, I40E_PFLAN_QALLOC);
4727 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
4728 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
4729 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
4730 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
4731 if (val & I40E_PFLAN_QALLOC_VALID_MASK)
4732 num_queues = (j - base_queue) + 1;
4733 else
4734 num_queues = 0;
4735
4736 val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
4737 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
4738 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
4739 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
4740 I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
4741 if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
4742 num_vfs = (j - i) + 1;
4743 else
4744 num_vfs = 0;
4745
4746 /* stop all the interrupts */
4747 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
4748 ixl_flush(sc);
4749 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4750 for (i = 0; i < num_pf_int - 2; i++)
4751 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
4752 ixl_flush(sc);
4753
4754 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
4755 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4756 ixl_wr(sc, I40E_PFINT_LNKLST0, val);
4757 for (i = 0; i < num_pf_int - 2; i++)
4758 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
4759 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4760 for (i = 0; i < num_vfs; i++)
4761 ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
4762 for (i = 0; i < num_vf_int - 2; i++)
4763 ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
4764
4765 /* warn the HW of the coming Tx disables */
4766 for (i = 0; i < num_queues; i++) {
4767 uint32_t abs_queue_idx = base_queue + i;
4768 uint32_t reg_block = 0;
4769
4770 if (abs_queue_idx >= 128) {
4771 reg_block = abs_queue_idx / 128;
4772 abs_queue_idx %= 128;
4773 }
4774
4775 val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
4776 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
4777 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
4778 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
4779
4780 ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
4781 }
4782 delaymsec(400);
4783
4784 /* stop all the queues */
4785 for (i = 0; i < num_queues; i++) {
4786 ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
4787 ixl_wr(sc, I40E_QTX_ENA(i), 0);
4788 ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
4789 ixl_wr(sc, I40E_QRX_ENA(i), 0);
4790 }
4791
4792 /* short wait for all queue disables to settle */
4793 delaymsec(50);
4794 }
4795
4796 static int
4797 ixl_pf_reset(struct ixl_softc *sc)
4798 {
4799 uint32_t cnt = 0;
4800 uint32_t cnt1 = 0;
4801 uint32_t reg = 0, reg0 = 0;
4802 uint32_t grst_del;
4803
4804 /*
4805 * Poll for Global Reset steady state in case of recent GRST.
4806 * The grst delay value is in 100ms units, and we'll wait a
4807 * couple counts longer to be sure we don't just miss the end.
4808 */
4809 grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
4810 grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
4811 grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
4812
4813 grst_del = grst_del * 20;
4814
4815 for (cnt = 0; cnt < grst_del; cnt++) {
4816 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
4817 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
4818 break;
4819 delaymsec(100);
4820 }
4821 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
4822 aprint_error(", Global reset polling failed to complete\n");
4823 return -1;
4824 }
4825
4826 /* Now Wait for the FW to be ready */
4827 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
4828 reg = ixl_rd(sc, I40E_GLNVM_ULD);
4829 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
4830 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
4831 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
4832 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
4833 break;
4834
4835 delaymsec(10);
4836 }
4837 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
4838 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
4839 aprint_error(", wait for FW Reset complete timed out "
4840 "(I40E_GLNVM_ULD = 0x%x)\n", reg);
4841 return -1;
4842 }
4843
4844 /*
4845 * If there was a Global Reset in progress when we got here,
4846 * we don't need to do the PF Reset
4847 */
4848 if (cnt == 0) {
4849 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
4850 ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
4851 for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
4852 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
4853 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
4854 break;
4855 delaymsec(1);
4856
4857 reg0 = ixl_rd(sc, I40E_GLGEN_RSTAT);
4858 if (reg0 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
4859 aprint_error(", Core reset upcoming."
4860 " Skipping PF reset reset request\n");
4861 return -1;
4862 }
4863 }
4864 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
4865 aprint_error(", PF reset polling failed to complete"
4866 "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
4867 return -1;
4868 }
4869 }
4870
4871 return 0;
4872 }
4873
4874 static int
4875 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
4876 bus_size_t size, bus_size_t align)
4877 {
4878 ixm->ixm_size = size;
4879
4880 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
4881 ixm->ixm_size, 0,
4882 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
4883 &ixm->ixm_map) != 0)
4884 return 1;
4885 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
4886 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
4887 BUS_DMA_WAITOK) != 0)
4888 goto destroy;
4889 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
4890 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
4891 goto free;
4892 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
4893 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
4894 goto unmap;
4895
4896 memset(ixm->ixm_kva, 0, ixm->ixm_size);
4897
4898 return 0;
4899 unmap:
4900 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
4901 free:
4902 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
4903 destroy:
4904 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
4905 return 1;
4906 }
4907
4908 static void
4909 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
4910 {
4911 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
4912 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
4913 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
4914 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
4915 }
4916
4917 static int
4918 ixl_setup_vlan_hwfilter(struct ixl_softc *sc)
4919 {
4920 struct ethercom *ec = &sc->sc_ec;
4921 struct vlanid_list *vlanidp;
4922 int rv;
4923
4924 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
4925 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
4926 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
4927 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
4928
4929 rv = ixl_add_macvlan(sc, sc->sc_enaddr, 0,
4930 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
4931 if (rv != 0)
4932 return rv;
4933 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 0,
4934 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
4935 if (rv != 0)
4936 return rv;
4937
4938 ETHER_LOCK(ec);
4939 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
4940 rv = ixl_add_macvlan(sc, sc->sc_enaddr,
4941 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
4942 if (rv != 0)
4943 break;
4944 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
4945 vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
4946 if (rv != 0)
4947 break;
4948 }
4949 ETHER_UNLOCK(ec);
4950
4951 return rv;
4952 }
4953
4954 static void
4955 ixl_teardown_vlan_hwfilter(struct ixl_softc *sc)
4956 {
4957 struct vlanid_list *vlanidp;
4958 struct ethercom *ec = &sc->sc_ec;
4959
4960 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
4961 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
4962 ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
4963 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
4964
4965 ETHER_LOCK(ec);
4966 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
4967 ixl_remove_macvlan(sc, sc->sc_enaddr,
4968 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
4969 ixl_remove_macvlan(sc, etherbroadcastaddr,
4970 vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
4971 }
4972 ETHER_UNLOCK(ec);
4973
4974 ixl_add_macvlan(sc, sc->sc_enaddr, 0,
4975 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
4976 ixl_add_macvlan(sc, etherbroadcastaddr, 0,
4977 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
4978 }
4979
4980 static int
4981 ixl_update_macvlan(struct ixl_softc *sc)
4982 {
4983 int rv = 0;
4984 int next_ec_capenable = sc->sc_ec.ec_capenable;
4985
4986 if (ISSET(next_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
4987 rv = ixl_setup_vlan_hwfilter(sc);
4988 if (rv != 0)
4989 ixl_teardown_vlan_hwfilter(sc);
4990 } else {
4991 ixl_teardown_vlan_hwfilter(sc);
4992 }
4993
4994 return rv;
4995 }
4996
4997 static int
4998 ixl_ifflags_cb(struct ethercom *ec)
4999 {
5000 struct ifnet *ifp = &ec->ec_if;
5001 struct ixl_softc *sc = ifp->if_softc;
5002 int rv, change;
5003
5004 mutex_enter(&sc->sc_cfg_lock);
5005
5006 change = ec->ec_capenable ^ sc->sc_cur_ec_capenable;
5007
5008 if (ISSET(change, ETHERCAP_VLAN_HWTAGGING)) {
5009 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
5010 rv = ENETRESET;
5011 goto out;
5012 }
5013
5014 if (ISSET(change, ETHERCAP_VLAN_HWFILTER)) {
5015 rv = ixl_update_macvlan(sc);
5016 if (rv == 0) {
5017 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
5018 } else {
5019 CLR(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER);
5020 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
5021 }
5022 }
5023
5024 rv = ixl_iff(sc);
5025 out:
5026 mutex_exit(&sc->sc_cfg_lock);
5027
5028 return rv;
5029 }
5030
5031 static int
5032 ixl_set_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
5033 {
5034 const struct ixl_aq_link_status *status;
5035 const struct ixl_phy_type *itype;
5036
5037 uint64_t ifm_active = IFM_ETHER;
5038 uint64_t ifm_status = IFM_AVALID;
5039 int link_state = LINK_STATE_DOWN;
5040 uint64_t baudrate = 0;
5041
5042 status = (const struct ixl_aq_link_status *)iaq->iaq_param;
5043 if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION))
5044 goto done;
5045
5046 ifm_active |= IFM_FDX;
5047 ifm_status |= IFM_ACTIVE;
5048 link_state = LINK_STATE_UP;
5049
5050 itype = ixl_search_phy_type(status->phy_type);
5051 if (itype != NULL)
5052 ifm_active |= itype->ifm_type;
5053
5054 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
5055 ifm_active |= IFM_ETH_TXPAUSE;
5056 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
5057 ifm_active |= IFM_ETH_RXPAUSE;
5058
5059 baudrate = ixl_search_link_speed(status->link_speed);
5060
5061 done:
5062 /* NET_ASSERT_LOCKED() except during attach */
5063 sc->sc_media_active = ifm_active;
5064 sc->sc_media_status = ifm_status;
5065
5066 sc->sc_ec.ec_if.if_baudrate = baudrate;
5067
5068 return link_state;
5069 }
5070
5071 static int
5072 ixl_establish_intx(struct ixl_softc *sc)
5073 {
5074 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5075 pci_intr_handle_t *intr;
5076 char xnamebuf[32];
5077 char intrbuf[PCI_INTRSTR_LEN];
5078 char const *intrstr;
5079
5080 KASSERT(sc->sc_nintrs == 1);
5081
5082 intr = &sc->sc_ihp[0];
5083
5084 intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
5085 snprintf(xnamebuf, sizeof(xnamebuf), "%s:legacy",
5086 device_xname(sc->sc_dev));
5087
5088 sc->sc_ihs[0] = pci_intr_establish_xname(pc, *intr, IPL_NET, ixl_intr,
5089 sc, xnamebuf);
5090
5091 if (sc->sc_ihs[0] == NULL) {
5092 aprint_error_dev(sc->sc_dev,
5093 "unable to establish interrupt at %s\n", intrstr);
5094 return -1;
5095 }
5096
5097 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5098 return 0;
5099 }
5100
5101 static int
5102 ixl_establish_msix(struct ixl_softc *sc)
5103 {
5104 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5105 unsigned int vector = 0;
5106 unsigned int i;
5107 char xnamebuf[32];
5108 char intrbuf[PCI_INTRSTR_LEN];
5109 char const *intrstr;
5110
5111 /* the "other" intr is mapped to vector 0 */
5112 vector = 0;
5113 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5114 intrbuf, sizeof(intrbuf));
5115 snprintf(xnamebuf, sizeof(xnamebuf), "%s others",
5116 device_xname(sc->sc_dev));
5117 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5118 sc->sc_ihp[vector], IPL_NET, ixl_other_intr,
5119 sc, xnamebuf);
5120 if (sc->sc_ihs[vector] == NULL) {
5121 aprint_error_dev(sc->sc_dev,
5122 "unable to establish interrupt at %s\n", intrstr);
5123 goto fail;
5124 }
5125 vector++;
5126 aprint_normal_dev(sc->sc_dev, "interrupt at %s\n", intrstr);
5127
5128 sc->sc_msix_vector_queue = vector;
5129
5130 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5131 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5132 intrbuf, sizeof(intrbuf));
5133 snprintf(xnamebuf, sizeof(xnamebuf), "%s TXRX%d",
5134 device_xname(sc->sc_dev), i);
5135
5136 sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5137 sc->sc_ihp[vector], IPL_NET, ixl_queue_intr,
5138 (void *)&sc->sc_qps[i], xnamebuf);
5139
5140 if (sc->sc_ihs[vector] == NULL) {
5141 aprint_error_dev(sc->sc_dev,
5142 "unable to establish interrupt at %s\n", intrstr);
5143 goto fail;
5144 }
5145 vector++;
5146 aprint_normal_dev(sc->sc_dev,
5147 "interrupt at %s\n", intrstr);
5148 }
5149
5150 return 0;
5151 fail:
5152 for (i = 0; i < vector; i++) {
5153 pci_intr_disestablish(pc, sc->sc_ihs[i]);
5154 }
5155
5156 sc->sc_msix_vector_queue = 0;
5157 sc->sc_msix_vector_queue = 0;
5158
5159 return -1;
5160 }
5161
5162 static void
5163 ixl_set_affinity_msix(struct ixl_softc *sc)
5164 {
5165 kcpuset_t *affinity;
5166 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5167 int affinity_to, r;
5168 unsigned int i, vector;
5169 char intrbuf[PCI_INTRSTR_LEN];
5170 char const *intrstr;
5171
5172 affinity_to = 0;
5173 kcpuset_create(&affinity, false);
5174
5175 vector = sc->sc_msix_vector_queue;
5176
5177 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5178 affinity_to = i % ncpu;
5179
5180 kcpuset_zero(affinity);
5181 kcpuset_set(affinity, affinity_to);
5182
5183 intrstr = pci_intr_string(pc, sc->sc_ihp[vector + i],
5184 intrbuf, sizeof(intrbuf));
5185 r = interrupt_distribute(sc->sc_ihs[vector + i],
5186 affinity, NULL);
5187 if (r == 0) {
5188 aprint_normal_dev(sc->sc_dev,
5189 "for TXRX%u interrupting at %s affinity to %u\n",
5190 i, intrstr, affinity_to);
5191 } else {
5192 aprint_normal_dev(sc->sc_dev,
5193 "for TXRX%u interrupting at %s\n",
5194 i, intrstr);
5195 }
5196 }
5197
5198 vector = 0; /* vector 0 means "other" interrupt */
5199 affinity_to = (affinity_to + 1) % ncpu;
5200 kcpuset_zero(affinity);
5201 kcpuset_set(affinity, affinity_to);
5202
5203 intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5204 intrbuf, sizeof(intrbuf));
5205 r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5206 if (r == 0) {
5207 aprint_normal_dev(sc->sc_dev,
5208 "for other interrupting at %s affinity to %u\n",
5209 intrstr, affinity_to);
5210 } else {
5211 aprint_normal_dev(sc->sc_dev,
5212 "for other interrupting at %s", intrstr);
5213 }
5214
5215 kcpuset_destroy(affinity);
5216 }
5217
5218 static void
5219 ixl_config_queue_intr(struct ixl_softc *sc)
5220 {
5221 unsigned int i, vector;
5222
5223 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) {
5224 vector = sc->sc_msix_vector_queue;
5225 } else {
5226 vector = I40E_INTR_NOTX_INTR;
5227
5228 ixl_wr(sc, I40E_PFINT_LNKLST0,
5229 (I40E_INTR_NOTX_QUEUE <<
5230 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5231 (I40E_QUEUE_TYPE_RX <<
5232 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5233 }
5234
5235 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
5236 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), 0);
5237 ixl_flush(sc);
5238
5239 ixl_wr(sc, I40E_PFINT_LNKLSTN(i),
5240 ((i) << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
5241 (I40E_QUEUE_TYPE_RX <<
5242 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5243
5244 ixl_wr(sc, I40E_QINT_RQCTL(i),
5245 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5246 (I40E_ITR_INDEX_RX <<
5247 I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
5248 (I40E_INTR_NOTX_RX_QUEUE <<
5249 I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
5250 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5251 (I40E_QUEUE_TYPE_TX <<
5252 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5253 I40E_QINT_RQCTL_CAUSE_ENA_MASK);
5254
5255 ixl_wr(sc, I40E_QINT_TQCTL(i),
5256 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
5257 (I40E_ITR_INDEX_TX <<
5258 I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
5259 (I40E_INTR_NOTX_TX_QUEUE <<
5260 I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
5261 (I40E_QUEUE_TYPE_EOL <<
5262 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
5263 (I40E_QUEUE_TYPE_RX <<
5264 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
5265 I40E_QINT_TQCTL_CAUSE_ENA_MASK);
5266
5267 if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX)
5268 vector++;
5269 }
5270 ixl_flush(sc);
5271
5272 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_RX), 0x7a);
5273 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_TX), 0x7a);
5274 ixl_flush(sc);
5275 }
5276
5277 static void
5278 ixl_config_other_intr(struct ixl_softc *sc)
5279 {
5280 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5281 (void)ixl_rd(sc, I40E_PFINT_ICR0);
5282
5283 ixl_wr(sc, I40E_PFINT_ICR0_ENA,
5284 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
5285 I40E_PFINT_ICR0_ENA_GRST_MASK |
5286 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
5287 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
5288 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
5289 I40E_PFINT_ICR0_ENA_VFLR_MASK |
5290 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
5291 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
5292 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK);
5293
5294 ixl_wr(sc, I40E_PFINT_LNKLST0, 0x7FF);
5295 ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_OTHER), 0);
5296 ixl_wr(sc, I40E_PFINT_STAT_CTL0,
5297 (I40E_ITR_INDEX_OTHER <<
5298 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT));
5299 ixl_flush(sc);
5300 }
5301
5302 static int
5303 ixl_setup_interrupts(struct ixl_softc *sc)
5304 {
5305 struct pci_attach_args *pa = &sc->sc_pa;
5306 pci_intr_type_t max_type, intr_type;
5307 int counts[PCI_INTR_TYPE_SIZE];
5308 int error;
5309 unsigned int i;
5310 bool retry, nomsix = IXL_NOMSIX;
5311
5312 memset(counts, 0, sizeof(counts));
5313 max_type = PCI_INTR_TYPE_MSIX;
5314 /* QPs + other interrupt */
5315 counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueue_pairs_max + 1;
5316 counts[PCI_INTR_TYPE_INTX] = 1;
5317
5318 if (nomsix)
5319 counts[PCI_INTR_TYPE_MSIX] = 0;
5320
5321 do {
5322 retry = false;
5323 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, max_type);
5324 if (error != 0) {
5325 aprint_error_dev(sc->sc_dev,
5326 "couldn't map interrupt\n");
5327 break;
5328 }
5329 for (i = 0; i < sc->sc_nintrs; i++) {
5330 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[i],
5331 PCI_INTR_MPSAFE, true);
5332 }
5333
5334 intr_type = pci_intr_type(pa->pa_pc, sc->sc_ihp[0]);
5335 sc->sc_nintrs = counts[intr_type];
5336 KASSERT(sc->sc_nintrs > 0);
5337
5338 sc->sc_ihs = kmem_alloc(sizeof(sc->sc_ihs[0]) * sc->sc_nintrs,
5339 KM_SLEEP);
5340
5341 if (intr_type == PCI_INTR_TYPE_MSIX) {
5342 error = ixl_establish_msix(sc);
5343 if (error) {
5344 counts[PCI_INTR_TYPE_MSIX] = 0;
5345 retry = true;
5346 } else {
5347 ixl_set_affinity_msix(sc);
5348 }
5349 } else if (intr_type == PCI_INTR_TYPE_INTX) {
5350 error = ixl_establish_intx(sc);
5351 } else {
5352 error = -1;
5353 }
5354
5355 if (error) {
5356 kmem_free(sc->sc_ihs,
5357 sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5358 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5359 } else {
5360 sc->sc_intrtype = intr_type;
5361 }
5362 } while (retry);
5363
5364 return error;
5365 }
5366
5367 static void
5368 ixl_teardown_interrupts(struct ixl_softc *sc)
5369 {
5370 struct pci_attach_args *pa = &sc->sc_pa;
5371 unsigned int i;
5372
5373 for (i = 0; i < sc->sc_nintrs; i++) {
5374 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]);
5375 }
5376
5377 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
5378
5379 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
5380 sc->sc_ihs = NULL;
5381 sc->sc_nintrs = 0;
5382 }
5383
5384 static int
5385 ixl_setup_stats(struct ixl_softc *sc)
5386 {
5387 struct ixl_queue_pair *qp;
5388 struct ixl_tx_ring *txr;
5389 struct ixl_rx_ring *rxr;
5390 unsigned int i;
5391
5392 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5393 qp = &sc->sc_qps[i];
5394 txr = qp->qp_txr;
5395 rxr = qp->qp_rxr;
5396
5397 evcnt_attach_dynamic(&txr->txr_defragged, EVCNT_TYPE_MISC,
5398 NULL, qp->qp_name, "m_defrag successed");
5399 evcnt_attach_dynamic(&txr->txr_defrag_failed, EVCNT_TYPE_MISC,
5400 NULL, qp->qp_name, "m_defrag_failed");
5401 evcnt_attach_dynamic(&txr->txr_pcqdrop, EVCNT_TYPE_MISC,
5402 NULL, qp->qp_name, "Dropped in pcq");
5403 evcnt_attach_dynamic(&txr->txr_transmitdef, EVCNT_TYPE_MISC,
5404 NULL, qp->qp_name, "Deferred transmit");
5405 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR,
5406 NULL, qp->qp_name, "Interrupt on queue");
5407 evcnt_attach_dynamic(&txr->txr_defer, EVCNT_TYPE_MISC,
5408 NULL, qp->qp_name, "Handled queue in softint/workqueue");
5409
5410 evcnt_attach_dynamic(&rxr->rxr_mgethdr_failed, EVCNT_TYPE_MISC,
5411 NULL, qp->qp_name, "MGETHDR failed");
5412 evcnt_attach_dynamic(&rxr->rxr_mgetcl_failed, EVCNT_TYPE_MISC,
5413 NULL, qp->qp_name, "MCLGET failed");
5414 evcnt_attach_dynamic(&rxr->rxr_mbuf_load_failed,
5415 EVCNT_TYPE_MISC, NULL, qp->qp_name,
5416 "bus_dmamap_load_mbuf failed");
5417 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR,
5418 NULL, qp->qp_name, "Interrupt on queue");
5419 evcnt_attach_dynamic(&rxr->rxr_defer, EVCNT_TYPE_MISC,
5420 NULL, qp->qp_name, "Handled queue in softint/workqueue");
5421 }
5422
5423 evcnt_attach_dynamic(&sc->sc_event_atq, EVCNT_TYPE_INTR,
5424 NULL, device_xname(sc->sc_dev), "Interrupt for other events");
5425 evcnt_attach_dynamic(&sc->sc_event_link, EVCNT_TYPE_MISC,
5426 NULL, device_xname(sc->sc_dev), "Link status event");
5427 evcnt_attach_dynamic(&sc->sc_event_ecc_err, EVCNT_TYPE_MISC,
5428 NULL, device_xname(sc->sc_dev), "ECC error");
5429 evcnt_attach_dynamic(&sc->sc_event_pci_exception, EVCNT_TYPE_MISC,
5430 NULL, device_xname(sc->sc_dev), "PCI exception");
5431 evcnt_attach_dynamic(&sc->sc_event_crit_err, EVCNT_TYPE_MISC,
5432 NULL, device_xname(sc->sc_dev), "Critical error");
5433
5434 return 0;
5435 }
5436
5437 static void
5438 ixl_teardown_stats(struct ixl_softc *sc)
5439 {
5440 struct ixl_tx_ring *txr;
5441 struct ixl_rx_ring *rxr;
5442 unsigned int i;
5443
5444 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5445 txr = sc->sc_qps[i].qp_txr;
5446 rxr = sc->sc_qps[i].qp_rxr;
5447
5448 evcnt_detach(&txr->txr_defragged);
5449 evcnt_detach(&txr->txr_defrag_failed);
5450 evcnt_detach(&txr->txr_pcqdrop);
5451 evcnt_detach(&txr->txr_transmitdef);
5452 evcnt_detach(&txr->txr_intr);
5453 evcnt_detach(&txr->txr_defer);
5454
5455 evcnt_detach(&rxr->rxr_mgethdr_failed);
5456 evcnt_detach(&rxr->rxr_mgetcl_failed);
5457 evcnt_detach(&rxr->rxr_mbuf_load_failed);
5458 evcnt_detach(&rxr->rxr_intr);
5459 evcnt_detach(&rxr->rxr_defer);
5460 }
5461
5462 evcnt_detach(&sc->sc_event_atq);
5463 evcnt_detach(&sc->sc_event_link);
5464 evcnt_detach(&sc->sc_event_ecc_err);
5465 evcnt_detach(&sc->sc_event_pci_exception);
5466 evcnt_detach(&sc->sc_event_crit_err);
5467 }
5468
5469 static int
5470 ixl_setup_sysctls(struct ixl_softc *sc)
5471 {
5472 const char *devname;
5473 struct sysctllog **log;
5474 const struct sysctlnode *rnode, *rxnode, *txnode;
5475 int error;
5476
5477 log = &sc->sc_sysctllog;
5478 devname = device_xname(sc->sc_dev);
5479
5480 error = sysctl_createv(log, 0, NULL, &rnode,
5481 0, CTLTYPE_NODE, devname,
5482 SYSCTL_DESCR("ixl information and settings"),
5483 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
5484 if (error)
5485 goto out;
5486
5487 error = sysctl_createv(log, 0, &rnode, NULL,
5488 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
5489 SYSCTL_DESCR("Use workqueue for packet processing"),
5490 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
5491 if (error)
5492 goto out;
5493
5494 error = sysctl_createv(log, 0, &rnode, &rxnode,
5495 0, CTLTYPE_NODE, "rx",
5496 SYSCTL_DESCR("ixl information and settings for Rx"),
5497 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
5498 if (error)
5499 goto out;
5500
5501 error = sysctl_createv(log, 0, &rxnode, NULL,
5502 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
5503 SYSCTL_DESCR("max number of Rx packets"
5504 " to process for interrupt processing"),
5505 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
5506 if (error)
5507 goto out;
5508
5509 error = sysctl_createv(log, 0, &rxnode, NULL,
5510 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
5511 SYSCTL_DESCR("max number of Rx packets"
5512 " to process for deferred processing"),
5513 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
5514 if (error)
5515 goto out;
5516
5517 error = sysctl_createv(log, 0, &rnode, &txnode,
5518 0, CTLTYPE_NODE, "tx",
5519 SYSCTL_DESCR("ixl information and settings for Tx"),
5520 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
5521 if (error)
5522 goto out;
5523
5524 error = sysctl_createv(log, 0, &txnode, NULL,
5525 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
5526 SYSCTL_DESCR("max number of Tx packets"
5527 " to process for interrupt processing"),
5528 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
5529 if (error)
5530 goto out;
5531
5532 error = sysctl_createv(log, 0, &txnode, NULL,
5533 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
5534 SYSCTL_DESCR("max number of Tx packets"
5535 " to process for deferred processing"),
5536 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
5537 if (error)
5538 goto out;
5539
5540 out:
5541 if (error) {
5542 aprint_error_dev(sc->sc_dev,
5543 "unable to create sysctl node\n");
5544 sysctl_teardown(log);
5545 }
5546
5547 return error;
5548 }
5549
5550 static void
5551 ixl_teardown_sysctls(struct ixl_softc *sc)
5552 {
5553
5554 sysctl_teardown(&sc->sc_sysctllog);
5555 }
5556
5557 static struct workqueue *
5558 ixl_workq_create(const char *name, pri_t prio, int ipl, int flags)
5559 {
5560 struct workqueue *wq;
5561 int error;
5562
5563 error = workqueue_create(&wq, name, ixl_workq_work, NULL,
5564 prio, ipl, flags);
5565
5566 if (error)
5567 return NULL;
5568
5569 return wq;
5570 }
5571
5572 static void
5573 ixl_workq_destroy(struct workqueue *wq)
5574 {
5575
5576 workqueue_destroy(wq);
5577 }
5578
5579 static void
5580 ixl_work_set(struct ixl_work *work, void (*func)(void *), void *arg)
5581 {
5582
5583 memset(work, 0, sizeof(*work));
5584 work->ixw_func = func;
5585 work->ixw_arg = arg;
5586 }
5587
5588 static void
5589 ixl_work_add(struct workqueue *wq, struct ixl_work *work)
5590 {
5591 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0)
5592 return;
5593
5594 workqueue_enqueue(wq, &work->ixw_cookie, NULL);
5595 }
5596
5597 static void
5598 ixl_work_wait(struct workqueue *wq, struct ixl_work *work)
5599 {
5600
5601 workqueue_wait(wq, &work->ixw_cookie);
5602 }
5603
5604 static void
5605 ixl_workq_work(struct work *wk, void *context)
5606 {
5607 struct ixl_work *work;
5608
5609 work = container_of(wk, struct ixl_work, ixw_cookie);
5610
5611 atomic_swap_uint(&work->ixw_added, 0);
5612 kpreempt_disable();
5613 work->ixw_func(work->ixw_arg);
5614 kpreempt_enable();
5615 }
5616
5617 static int
5618 ixl_rx_ctl_read(struct ixl_softc *sc, uint32_t reg, uint32_t *rv)
5619 {
5620 struct ixl_aq_desc iaq;
5621
5622 memset(&iaq, 0, sizeof(iaq));
5623 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_READ);
5624 iaq.iaq_param[1] = htole32(reg);
5625
5626 if (ixl_atq_poll(sc, &iaq, 250) != 0)
5627 return ETIMEDOUT;
5628
5629 switch (htole16(iaq.iaq_retval)) {
5630 case IXL_AQ_RC_OK:
5631 /* success */
5632 break;
5633 case IXL_AQ_RC_EACCES:
5634 return EPERM;
5635 case IXL_AQ_RC_EAGAIN:
5636 return EAGAIN;
5637 default:
5638 return EIO;
5639 }
5640
5641 *rv = htole32(iaq.iaq_param[3]);
5642 return 0;
5643 }
5644
5645 static uint32_t
5646 ixl_rd_rx_csr(struct ixl_softc *sc, uint32_t reg)
5647 {
5648 uint32_t val;
5649 int rv, retry, retry_limit;
5650
5651 retry_limit = sc->sc_rxctl_atq ? 5 : 0;
5652
5653 for (retry = 0; retry < retry_limit; retry++) {
5654 rv = ixl_rx_ctl_read(sc, reg, &val);
5655 if (rv == 0)
5656 return val;
5657 else if (rv == EAGAIN)
5658 delaymsec(1);
5659 else
5660 break;
5661 }
5662
5663 val = ixl_rd(sc, reg);
5664
5665 return val;
5666 }
5667
5668 static int
5669 ixl_rx_ctl_write(struct ixl_softc *sc, uint32_t reg, uint32_t value)
5670 {
5671 struct ixl_aq_desc iaq;
5672
5673 memset(&iaq, 0, sizeof(iaq));
5674 iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_WRITE);
5675 iaq.iaq_param[1] = htole32(reg);
5676 iaq.iaq_param[3] = htole32(value);
5677
5678 if (ixl_atq_poll(sc, &iaq, 250) != 0)
5679 return ETIMEDOUT;
5680
5681 switch (htole16(iaq.iaq_retval)) {
5682 case IXL_AQ_RC_OK:
5683 /* success */
5684 break;
5685 case IXL_AQ_RC_EACCES:
5686 return EPERM;
5687 case IXL_AQ_RC_EAGAIN:
5688 return EAGAIN;
5689 default:
5690 return EIO;
5691 }
5692
5693 return 0;
5694 }
5695
5696 static void
5697 ixl_wr_rx_csr(struct ixl_softc *sc, uint32_t reg, uint32_t value)
5698 {
5699 int rv, retry, retry_limit;
5700
5701 retry_limit = sc->sc_rxctl_atq ? 5 : 0;
5702
5703 for (retry = 0; retry < retry_limit; retry++) {
5704 rv = ixl_rx_ctl_write(sc, reg, value);
5705 if (rv == 0)
5706 return;
5707 else if (rv == EAGAIN)
5708 delaymsec(1);
5709 else
5710 break;
5711 }
5712
5713 ixl_wr(sc, reg, value);
5714 }
5715
5716 MODULE(MODULE_CLASS_DRIVER, if_ixl, "pci");
5717
5718 #ifdef _MODULE
5719 #include "ioconf.c"
5720 #endif
5721
5722 static int
5723 if_ixl_modcmd(modcmd_t cmd, void *opaque)
5724 {
5725 int error = 0;
5726
5727 #ifdef _MODULE
5728 switch (cmd) {
5729 case MODULE_CMD_INIT:
5730 error = config_init_component(cfdriver_ioconf_if_ixl,
5731 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
5732 break;
5733 case MODULE_CMD_FINI:
5734 error = config_fini_component(cfdriver_ioconf_if_ixl,
5735 cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
5736 break;
5737 default:
5738 error = ENOTTY;
5739 break;
5740 }
5741 #endif
5742
5743 return error;
5744 }
5745