ixgbe.h revision 1.23 1 /******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*
34 * Copyright (c) 2011 The NetBSD Foundation, Inc.
35 * All rights reserved.
36 *
37 * This code is derived from software contributed to The NetBSD Foundation
38 * by Coyote Point Systems, Inc.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
50 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
51 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
52 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
53 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
54 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
55 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
56 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
57 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
58 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 */
61 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe.h 303890 2016-08-09 19:32:06Z dumbbell $*/
62 /*$NetBSD: ixgbe.h,v 1.23 2017/02/10 06:35:22 msaitoh Exp $*/
63
64
65 #ifndef _IXGBE_H_
66 #define _IXGBE_H_
67
68
69 #include <sys/param.h>
70 #include <sys/reboot.h>
71 #include <sys/systm.h>
72 #include <sys/pcq.h>
73 #include <sys/mbuf.h>
74 #include <sys/protosw.h>
75 #include <sys/socket.h>
76 #include <sys/malloc.h>
77 #include <sys/kernel.h>
78 #include <sys/module.h>
79 #include <sys/sockio.h>
80
81 #include <net/if.h>
82 #include <net/if_arp.h>
83 #include <net/bpf.h>
84 #include <net/if_ether.h>
85 #include <net/if_dl.h>
86 #include <net/if_media.h>
87
88 #include <net/bpf.h>
89 #include <net/if_types.h>
90 #include <net/if_vlanvar.h>
91
92 #include <netinet/in_systm.h>
93 #include <netinet/in.h>
94 #include <netinet/ip.h>
95 #include <netinet/ip6.h>
96 #include <netinet/tcp.h>
97 #include <netinet/udp.h>
98
99 #include <sys/bus.h>
100 #include <dev/pci/pcivar.h>
101 #include <dev/pci/pcireg.h>
102 #include <sys/proc.h>
103 #include <sys/sysctl.h>
104 #include <sys/endian.h>
105 #include <sys/workqueue.h>
106 #include <sys/cpu.h>
107 #include <sys/interrupt.h>
108 #include <sys/bitops.h>
109
110 #ifdef PCI_IOV
111 #include <sys/nv.h>
112 #include <sys/iov_schema.h>
113 #include <dev/pci/pci_iov.h>
114 #endif
115
116 #include "ixgbe_netbsd.h"
117 #include "ixgbe_api.h"
118 #include "ixgbe_common.h"
119 #include "ixgbe_phy.h"
120 #include "ixgbe_vf.h"
121
122 /* Tunables */
123
124 /*
125 * TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
126 * number of transmit descriptors allocated by the driver. Increasing this
127 * value allows the driver to queue more transmits. Each descriptor is 16
128 * bytes. Performance tests have show the 2K value to be optimal for top
129 * performance.
130 */
131 #define DEFAULT_TXD 1024
132 #define PERFORM_TXD 2048
133 #define MAX_TXD 4096
134 #define MIN_TXD 64
135
136 /*
137 * RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
138 * number of receive descriptors allocated for each RX queue. Increasing this
139 * value allows the driver to buffer more incoming packets. Each descriptor
140 * is 16 bytes. A receive buffer is also allocated for each descriptor.
141 *
142 * Note: with 8 rings and a dual port card, it is possible to bump up
143 * against the system mbuf pool limit, you can tune nmbclusters
144 * to adjust for this.
145 */
146 #define DEFAULT_RXD 1024
147 #define PERFORM_RXD 2048
148 #define MAX_RXD 4096
149 #define MIN_RXD 64
150
151 /* Alignment for rings */
152 #define DBA_ALIGN 128
153
154 /*
155 * This is the max watchdog interval, ie. the time that can
156 * pass between any two TX clean operations, such only happening
157 * when the TX hardware is functioning.
158 */
159 #define IXGBE_WATCHDOG (10 * hz)
160
161 /*
162 * This parameters control when the driver calls the routine to reclaim
163 * transmit descriptors.
164 */
165 #define IXGBE_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8)
166 #define IXGBE_TX_OP_THRESHOLD (adapter->num_tx_desc / 32)
167
168 /* These defines are used in MTU calculations */
169 #define IXGBE_MAX_FRAME_SIZE 9728
170 #define IXGBE_MTU_HDR (ETHER_HDR_LEN + ETHER_CRC_LEN)
171 #define IXGBE_MTU_HDR_VLAN (ETHER_HDR_LEN + ETHER_CRC_LEN + \
172 ETHER_VLAN_ENCAP_LEN)
173 #define IXGBE_MAX_MTU (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR)
174 #define IXGBE_MAX_MTU_VLAN (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR_VLAN)
175
176 /* Flow control constants */
177 #define IXGBE_FC_PAUSE 0xFFFF
178 #define IXGBE_FC_HI 0x20000
179 #define IXGBE_FC_LO 0x10000
180
181 /*
182 * Used for optimizing small rx mbufs. Effort is made to keep the copy
183 * small and aligned for the CPU L1 cache.
184 *
185 * MHLEN is typically 168 bytes, giving us 8-byte alignment. Getting
186 * 32 byte alignment needed for the fast bcopy results in 8 bytes being
187 * wasted. Getting 64 byte alignment, which _should_ be ideal for
188 * modern Intel CPUs, results in 40 bytes wasted and a significant drop
189 * in observed efficiency of the optimization, 97.9% -> 81.8%.
190 */
191 #define MPKTHSIZE (offsetof(struct _mbuf_dummy, m_pktdat))
192 #define IXGBE_RX_COPY_HDR_PADDED ((((MPKTHSIZE - 1) / 32) + 1) * 32)
193 #define IXGBE_RX_COPY_LEN (MSIZE - IXGBE_RX_COPY_HDR_PADDED)
194 #define IXGBE_RX_COPY_ALIGN (IXGBE_RX_COPY_HDR_PADDED - MPKTHSIZE)
195
196 /* Keep older OS drivers building... */
197 #if !defined(SYSCTL_ADD_UQUAD)
198 #define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD
199 #endif
200
201 /* Defines for printing debug information */
202 #define DEBUG_INIT 0
203 #define DEBUG_IOCTL 0
204 #define DEBUG_HW 0
205
206 #define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n")
207 #define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A)
208 #define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B)
209 #define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n")
210 #define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A)
211 #define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B)
212 #define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n")
213 #define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A)
214 #define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B)
215
216 #define MAX_NUM_MULTICAST_ADDRESSES 128
217 #define IXGBE_82598_SCATTER 100
218 #define IXGBE_82599_SCATTER 32
219 #define MSIX_82598_BAR 3
220 #define MSIX_82599_BAR 4
221 #define IXGBE_TSO_SIZE 262140
222 #define IXGBE_RX_HDR 128
223 #define IXGBE_VFTA_SIZE 128
224 #define IXGBE_BR_SIZE 4096
225 #define IXGBE_QUEUE_MIN_FREE 32
226 #define IXGBE_MAX_TX_BUSY 10
227 #define IXGBE_QUEUE_HUNG 0x80000000
228
229 #define IXV_EITR_DEFAULT 128
230
231 /* IOCTL define to gather SFP+ Diagnostic data */
232 #define SIOCGI2C SIOCGIFGENERIC
233
234 /* Offload bits in mbuf flag */
235 #define M_CSUM_OFFLOAD \
236 (M_CSUM_IPv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_UDPv6|M_CSUM_TCPv6)
237
238 /* Backward compatibility items for very old versions */
239 #ifndef pci_find_cap
240 #define pci_find_cap pci_find_extcap
241 #endif
242
243 #ifndef DEVMETHOD_END
244 #define DEVMETHOD_END { NULL, NULL }
245 #endif
246
247 /*
248 * Interrupt Moderation parameters
249 */
250 #define IXGBE_LOW_LATENCY 128
251 #define IXGBE_AVE_LATENCY 400
252 #define IXGBE_BULK_LATENCY 1200
253
254 /* Using 1FF (the max value), the interval is ~1.05ms */
255 #define IXGBE_LINK_ITR_QUANTA 0x1FF
256 #define IXGBE_LINK_ITR ((IXGBE_LINK_ITR_QUANTA << 3) & \
257 IXGBE_EITR_ITR_INT_MASK)
258
259 /* MAC type macros */
260 #define IXGBE_IS_X550VF(_adapter) \
261 ((_adapter->hw.mac.type == ixgbe_mac_X550_vf) || \
262 (_adapter->hw.mac.type == ixgbe_mac_X550EM_x_vf))
263
264 #define IXGBE_IS_VF(_adapter) \
265 (IXGBE_IS_X550VF(_adapter) || \
266 (_adapter->hw.mac.type == ixgbe_mac_X540_vf) || \
267 (_adapter->hw.mac.type == ixgbe_mac_82599_vf))
268
269 #ifdef PCI_IOV
270 #define IXGBE_VF_INDEX(vmdq) ((vmdq) / 32)
271 #define IXGBE_VF_BIT(vmdq) (1 << ((vmdq) % 32))
272
273 #define IXGBE_VT_MSG_MASK 0xFFFF
274
275 #define IXGBE_VT_MSGINFO(msg) \
276 (((msg) & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT)
277
278 #define IXGBE_VF_GET_QUEUES_RESP_LEN 5
279
280 #define IXGBE_API_VER_1_0 0
281 #define IXGBE_API_VER_2_0 1 /* Solaris API. Not supported. */
282 #define IXGBE_API_VER_1_1 2
283 #define IXGBE_API_VER_UNKNOWN UINT16_MAX
284
285 enum ixgbe_iov_mode {
286 IXGBE_64_VM,
287 IXGBE_32_VM,
288 IXGBE_NO_VM
289 };
290 #endif /* PCI_IOV */
291
292
293 /*
294 *****************************************************************************
295 * vendor_info_array
296 *
297 * This array contains the list of Subvendor/Subdevice IDs on which the driver
298 * should load.
299 *
300 *****************************************************************************
301 */
302 typedef struct _ixgbe_vendor_info_t {
303 unsigned int vendor_id;
304 unsigned int device_id;
305 unsigned int subvendor_id;
306 unsigned int subdevice_id;
307 unsigned int index;
308 } ixgbe_vendor_info_t;
309
310 /* This is used to get SFP+ module data */
311 struct ixgbe_i2c_req {
312 u8 dev_addr;
313 u8 offset;
314 u8 len;
315 u8 data[8];
316 };
317
318
319 struct ixgbe_tx_buf {
320 union ixgbe_adv_tx_desc *eop;
321 struct mbuf *m_head;
322 bus_dmamap_t map;
323 };
324
325 struct ixgbe_rx_buf {
326 struct mbuf *buf;
327 struct mbuf *fmp;
328 bus_dmamap_t pmap;
329 u_int flags;
330 #define IXGBE_RX_COPY 0x01
331 uint64_t addr;
332 };
333
334 /*
335 * Bus dma allocation structure used by ixgbe_dma_malloc and ixgbe_dma_free.
336 */
337 struct ixgbe_dma_alloc {
338 bus_addr_t dma_paddr;
339 void *dma_vaddr;
340 ixgbe_dma_tag_t *dma_tag;
341 bus_dmamap_t dma_map;
342 bus_dma_segment_t dma_seg;
343 bus_size_t dma_size;
344 };
345
346 struct ixgbe_mc_addr {
347 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
348 u32 vmdq;
349 };
350
351 /*
352 ** Driver queue struct: this is the interrupt container
353 ** for the associated tx and rx ring.
354 */
355 struct ix_queue {
356 struct adapter *adapter;
357 u32 msix; /* This queue's MSIX vector */
358 u32 eims; /* This queue's EIMS bit */
359 u32 eitr_setting;
360 u32 me;
361 struct resource *res;
362 void *tag;
363 int busy;
364 struct tx_ring *txr;
365 struct rx_ring *rxr;
366 void *que_si;
367 struct evcnt irqs;
368 char namebuf[32];
369 char evnamebuf[32];
370 };
371
372 /*
373 * The transmit ring, one per queue
374 */
375 struct tx_ring {
376 struct adapter *adapter;
377 kmutex_t tx_mtx;
378 u32 me;
379 u32 tail;
380 int busy;
381 union ixgbe_adv_tx_desc *tx_base;
382 struct ixgbe_tx_buf *tx_buffers;
383 struct ixgbe_dma_alloc txdma;
384 volatile u16 tx_avail;
385 u16 next_avail_desc;
386 u16 next_to_clean;
387 u16 num_desc;
388 u32 txd_cmd;
389 ixgbe_dma_tag_t *txtag;
390 char mtx_name[16];
391 #ifndef IXGBE_LEGACY_TX
392 pcq_t *txr_interq;
393 void *txr_si;
394 #endif
395 #ifdef IXGBE_FDIR
396 u16 atr_sample;
397 u16 atr_count;
398 #endif
399 u32 bytes; /* used for AIM */
400 u32 packets;
401 /* Soft Stats */
402 struct evcnt tso_tx;
403 struct evcnt no_tx_map_avail;
404 struct evcnt no_desc_avail;
405 struct evcnt total_packets;
406 struct evcnt pcq_drops;
407 };
408
409
410 /*
411 * The Receive ring, one per rx queue
412 */
413 struct rx_ring {
414 struct adapter *adapter;
415 kmutex_t rx_mtx;
416 u32 me;
417 u32 tail;
418 union ixgbe_adv_rx_desc *rx_base;
419 struct ixgbe_dma_alloc rxdma;
420 #ifdef LRO
421 struct lro_ctrl lro;
422 #endif /* LRO */
423 bool lro_enabled;
424 bool hw_rsc;
425 bool vtag_strip;
426 u16 next_to_refresh;
427 u16 next_to_check;
428 u16 num_desc;
429 u16 mbuf_sz;
430 char mtx_name[16];
431 struct ixgbe_rx_buf *rx_buffers;
432 ixgbe_dma_tag_t *ptag;
433
434 u32 bytes; /* Used for AIM calc */
435 u32 packets;
436
437 /* Soft stats */
438 struct evcnt rx_copies;
439 struct evcnt rx_packets;
440 struct evcnt rx_bytes;
441 struct evcnt rx_discarded;
442 struct evcnt no_jmbuf;
443 u64 rsc_num;
444 #ifdef IXGBE_FDIR
445 u64 flm;
446 #endif
447 };
448
449 #ifdef PCI_IOV
450 #define IXGBE_VF_CTS (1 << 0) /* VF is clear to send. */
451 #define IXGBE_VF_CAP_MAC (1 << 1) /* VF is permitted to change MAC. */
452 #define IXGBE_VF_CAP_VLAN (1 << 2) /* VF is permitted to join vlans. */
453 #define IXGBE_VF_ACTIVE (1 << 3) /* VF is active. */
454
455 #define IXGBE_MAX_VF_MC 30 /* Max number of multicast entries */
456
457 struct ixgbe_vf {
458 u_int pool;
459 u_int rar_index;
460 u_int max_frame_size;
461 uint32_t flags;
462 uint8_t ether_addr[ETHER_ADDR_LEN];
463 uint16_t mc_hash[IXGBE_MAX_VF_MC];
464 uint16_t num_mc_hashes;
465 uint16_t default_vlan;
466 uint16_t vlan_tag;
467 uint16_t api_ver;
468 };
469 #endif /* PCI_IOV */
470
471 /* Our adapter structure */
472 struct adapter {
473 struct ixgbe_hw hw;
474 struct ixgbe_osdep osdep;
475
476 device_t dev;
477 struct ifnet *ifp;
478
479 struct resource *pci_mem;
480 struct resource *msix_mem;
481
482 /*
483 * Interrupt resources: this set is
484 * either used for legacy, or for Link
485 * when doing MSIX
486 */
487 void *tag;
488 struct resource *res;
489
490 struct ifmedia media;
491 callout_t timer;
492 int msix;
493 int if_flags;
494
495 kmutex_t core_mtx;
496
497 unsigned int num_queues;
498
499 /*
500 ** Shadow VFTA table, this is needed because
501 ** the real vlan filter table gets cleared during
502 ** a soft reset and the driver needs to be able
503 ** to repopulate it.
504 */
505 u32 shadow_vfta[IXGBE_VFTA_SIZE];
506
507 /* Info about the interface */
508 u32 optics;
509 u32 fc; /* local flow ctrl setting */
510 int advertise; /* link speeds */
511 bool enable_aim; /* adaptive interrupt moderation */
512 bool link_active;
513 u16 max_frame_size;
514 u16 num_segs;
515 u32 link_speed;
516 bool link_up;
517 u32 vector;
518 u16 dmac;
519 bool eee_enabled;
520 u32 phy_layer;
521
522 /* Power management-related */
523 bool wol_support;
524 u32 wufc;
525
526 /* Mbuf cluster size */
527 u32 rx_mbuf_sz;
528
529 /* Support for pluggable optics */
530 bool sfp_probe;
531 void *link_si; /* Link tasklet */
532 void *mod_si; /* SFP tasklet */
533 void *msf_si; /* Multispeed Fiber */
534 #ifdef PCI_IOV
535 void *mbx_si; /* VF -> PF mailbox interrupt */
536 #endif /* PCI_IOV */
537 #ifdef IXGBE_FDIR
538 int fdir_reinit;
539 void *fdir_si;
540 #endif
541 void *phy_si; /* PHY intr tasklet */
542
543 /*
544 ** Queues:
545 ** This is the irq holder, it has
546 ** and RX/TX pair or rings associated
547 ** with it.
548 */
549 struct ix_queue *queues;
550
551 /*
552 * Transmit rings:
553 * Allocated at run time, an array of rings.
554 */
555 struct tx_ring *tx_rings;
556 u32 num_tx_desc;
557 u32 tx_process_limit;
558
559 /*
560 * Receive rings:
561 * Allocated at run time, an array of rings.
562 */
563 struct rx_ring *rx_rings;
564 u64 active_queues;
565 u32 num_rx_desc;
566 u32 rx_process_limit;
567
568 /* Multicast array memory */
569 struct ixgbe_mc_addr *mta;
570 int num_vfs;
571 int pool;
572 #ifdef PCI_IOV
573 struct ixgbe_vf *vfs;
574 #endif
575 #ifdef DEV_NETMAP
576 void (*init_locked)(struct adapter *);
577 void (*stop_locked)(void *);
578 #endif
579
580 /* Misc stats maintained by the driver */
581 struct evcnt mbuf_defrag_failed;
582 struct evcnt mbuf_header_failed;
583 struct evcnt mbuf_packet_failed;
584 struct evcnt efbig_tx_dma_setup;
585 struct evcnt efbig2_tx_dma_setup;
586 struct evcnt einval_tx_dma_setup;
587 struct evcnt other_tx_dma_setup;
588 struct evcnt eagain_tx_dma_setup;
589 struct evcnt enomem_tx_dma_setup;
590 struct evcnt tso_err;
591 struct evcnt watchdog_events;
592 struct evcnt link_irq;
593 struct evcnt handleq;
594 struct evcnt req;
595
596 union {
597 struct ixgbe_hw_stats pf;
598 struct ixgbevf_hw_stats vf;
599 } stats;
600 #if __FreeBSD_version >= 1100036
601 /* counter(9) stats */
602 u64 ipackets;
603 u64 ierrors;
604 u64 opackets;
605 u64 oerrors;
606 u64 ibytes;
607 u64 obytes;
608 u64 imcasts;
609 u64 omcasts;
610 u64 iqdrops;
611 u64 noproto;
612 #endif
613 struct sysctllog *sysctllog;
614 const struct sysctlnode *sysctltop;
615 ixgbe_extmem_head_t jcl_head;
616 };
617
618
619 /* Precision Time Sync (IEEE 1588) defines */
620 #define ETHERTYPE_IEEE1588 0x88F7
621 #define PICOSECS_PER_TICK 20833
622 #define TSYNC_UDP_PORT 319 /* UDP port for the protocol */
623 #define IXGBE_ADVTXD_TSTAMP 0x00080000
624
625
626 #define IXGBE_CORE_LOCK_INIT(_sc, _name) \
627 mutex_init(&(_sc)->core_mtx, MUTEX_DEFAULT, IPL_SOFTNET)
628 #define IXGBE_CORE_LOCK_DESTROY(_sc) mutex_destroy(&(_sc)->core_mtx)
629 #define IXGBE_TX_LOCK_DESTROY(_sc) mutex_destroy(&(_sc)->tx_mtx)
630 #define IXGBE_RX_LOCK_DESTROY(_sc) mutex_destroy(&(_sc)->rx_mtx)
631 #define IXGBE_CORE_LOCK(_sc) mutex_enter(&(_sc)->core_mtx)
632 #define IXGBE_TX_LOCK(_sc) mutex_enter(&(_sc)->tx_mtx)
633 #define IXGBE_TX_TRYLOCK(_sc) mutex_tryenter(&(_sc)->tx_mtx)
634 #define IXGBE_RX_LOCK(_sc) mutex_enter(&(_sc)->rx_mtx)
635 #define IXGBE_CORE_UNLOCK(_sc) mutex_exit(&(_sc)->core_mtx)
636 #define IXGBE_TX_UNLOCK(_sc) mutex_exit(&(_sc)->tx_mtx)
637 #define IXGBE_RX_UNLOCK(_sc) mutex_exit(&(_sc)->rx_mtx)
638 #define IXGBE_CORE_LOCK_ASSERT(_sc) KASSERT(mutex_owned(&(_sc)->core_mtx))
639 #define IXGBE_TX_LOCK_ASSERT(_sc) KASSERT(mutex_owned(&(_sc)->tx_mtx))
640
641 /* Stats macros */
642 #if __FreeBSD_version >= 1100036
643 #define IXGBE_SET_IPACKETS(sc, count) (sc)->ipackets = (count)
644 #define IXGBE_SET_IERRORS(sc, count) (sc)->ierrors = (count)
645 #define IXGBE_SET_OPACKETS(sc, count) (sc)->opackets = (count)
646 #define IXGBE_SET_OERRORS(sc, count) (sc)->oerrors = (count)
647 #define IXGBE_SET_COLLISIONS(sc, count)
648 #define IXGBE_SET_IBYTES(sc, count) (sc)->ibytes = (count)
649 #define IXGBE_SET_OBYTES(sc, count) (sc)->obytes = (count)
650 #define IXGBE_SET_IMCASTS(sc, count) (sc)->imcasts = (count)
651 #define IXGBE_SET_OMCASTS(sc, count) (sc)->omcasts = (count)
652 #define IXGBE_SET_IQDROPS(sc, count) (sc)->iqdrops = (count)
653 #else
654 #define IXGBE_SET_IPACKETS(sc, count) (sc)->ifp->if_ipackets = (count)
655 #define IXGBE_SET_IERRORS(sc, count) (sc)->ifp->if_ierrors = (count)
656 #define IXGBE_SET_OPACKETS(sc, count) (sc)->ifp->if_opackets = (count)
657 #define IXGBE_SET_OERRORS(sc, count) (sc)->ifp->if_oerrors = (count)
658 #define IXGBE_SET_COLLISIONS(sc, count) (sc)->ifp->if_collisions = (count)
659 #define IXGBE_SET_IBYTES(sc, count) (sc)->ifp->if_ibytes = (count)
660 #define IXGBE_SET_OBYTES(sc, count) (sc)->ifp->if_obytes = (count)
661 #define IXGBE_SET_IMCASTS(sc, count) (sc)->ifp->if_imcasts = (count)
662 #define IXGBE_SET_OMCASTS(sc, count) (sc)->ifp->if_omcasts = (count)
663 #define IXGBE_SET_IQDROPS(sc, count) (sc)->ifp->if_iqdrops = (count)
664 #endif
665
666 /* External PHY register addresses */
667 #define IXGBE_PHY_CURRENT_TEMP 0xC820
668 #define IXGBE_PHY_OVERTEMP_STATUS 0xC830
669
670 /* Sysctl help messages; displayed with sysctl -d */
671 #define IXGBE_SYSCTL_DESC_ADV_SPEED \
672 "\nControl advertised link speed using these flags:\n" \
673 "\t0x1 - advertise 100M\n" \
674 "\t0x2 - advertise 1G\n" \
675 "\t0x4 - advertise 10G\n\n" \
676 "\t100M is only supported on certain 10GBaseT adapters."
677
678 #define IXGBE_SYSCTL_DESC_SET_FC \
679 "\nSet flow control mode using these values:\n" \
680 "\t0 - off\n" \
681 "\t1 - rx pause\n" \
682 "\t2 - tx pause\n" \
683 "\t3 - tx and rx pause"
684
685 static inline bool
686 ixgbe_is_sfp(struct ixgbe_hw *hw)
687 {
688 switch (hw->phy.type) {
689 case ixgbe_phy_sfp_avago:
690 case ixgbe_phy_sfp_ftl:
691 case ixgbe_phy_sfp_intel:
692 case ixgbe_phy_sfp_unknown:
693 case ixgbe_phy_sfp_passive_tyco:
694 case ixgbe_phy_sfp_passive_unknown:
695 case ixgbe_phy_sfp_unsupported:
696 case ixgbe_phy_qsfp_passive_unknown:
697 case ixgbe_phy_qsfp_active_unknown:
698 case ixgbe_phy_qsfp_intel:
699 case ixgbe_phy_qsfp_unknown:
700 return TRUE;
701 default:
702 break;
703 }
704
705 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
706 return TRUE;
707
708 return FALSE;
709 }
710
711 /* Workaround to make 8.0 buildable */
712 #if __FreeBSD_version >= 800000 && __FreeBSD_version < 800504
713 static __inline int
714 drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
715 {
716 #ifdef ALTQ
717 if (ALTQ_IS_ENABLED(&ifp->if_snd))
718 return (1);
719 #endif
720 return (!buf_ring_empty(br));
721 }
722 #endif
723
724 /*
725 ** Find the number of unrefreshed RX descriptors
726 */
727 static inline u16
728 ixgbe_rx_unrefreshed(struct rx_ring *rxr)
729 {
730 if (rxr->next_to_check > rxr->next_to_refresh)
731 return (rxr->next_to_check - rxr->next_to_refresh - 1);
732 else
733 return ((rxr->num_desc + rxr->next_to_check) -
734 rxr->next_to_refresh - 1);
735 }
736
737 /*
738 ** This checks for a zero mac addr, something that will be likely
739 ** unless the Admin on the Host has created one.
740 */
741 static inline bool
742 ixv_check_ether_addr(u8 *addr)
743 {
744 bool status = TRUE;
745
746 if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 &&
747 addr[3] == 0 && addr[4]== 0 && addr[5] == 0))
748 status = FALSE;
749 return (status);
750 }
751
752 /* Shared Prototypes */
753
754 void ixgbe_start(struct ifnet *);
755 void ixgbe_start_locked(struct tx_ring *, struct ifnet *);
756 #ifndef IXGBE_LEGACY_TX
757 int ixgbe_mq_start(struct ifnet *, struct mbuf *);
758 int ixgbe_mq_start_locked(struct ifnet *, struct tx_ring *);
759 void ixgbe_deferred_mq_start(void *);
760 #endif /* !IXGBE_LEGACY_TX */
761
762 int ixgbe_allocate_queues(struct adapter *);
763 int ixgbe_allocate_transmit_buffers(struct tx_ring *);
764 int ixgbe_setup_transmit_structures(struct adapter *);
765 void ixgbe_free_transmit_structures(struct adapter *);
766 int ixgbe_allocate_receive_buffers(struct rx_ring *);
767 int ixgbe_setup_receive_structures(struct adapter *);
768 void ixgbe_free_receive_structures(struct adapter *);
769 void ixgbe_txeof(struct tx_ring *);
770 bool ixgbe_rxeof(struct ix_queue *);
771
772 int ixgbe_dma_malloc(struct adapter *,
773 bus_size_t, struct ixgbe_dma_alloc *, int);
774 void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
775
776 #ifdef PCI_IOV
777
778 static inline boolean_t
779 ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
780 {
781 return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
782 }
783
784 static inline void
785 ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
786 {
787
788 if (vf->flags & IXGBE_VF_CTS)
789 msg |= IXGBE_VT_MSGTYPE_CTS;
790
791 ixgbe_write_mbx(&adapter->hw, &msg, 1, vf->pool);
792 }
793
794 static inline void
795 ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
796 {
797 msg &= IXGBE_VT_MSG_MASK;
798 ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
799 }
800
801 static inline void
802 ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
803 {
804 msg &= IXGBE_VT_MSG_MASK;
805 ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
806 }
807
808 static inline void
809 ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
810 {
811 if (!(vf->flags & IXGBE_VF_CTS))
812 ixgbe_send_vf_nack(adapter, vf, 0);
813 }
814
815 static inline enum ixgbe_iov_mode
816 ixgbe_get_iov_mode(struct adapter *adapter)
817 {
818 if (adapter->num_vfs == 0)
819 return (IXGBE_NO_VM);
820 if (adapter->num_queues <= 2)
821 return (IXGBE_64_VM);
822 else if (adapter->num_queues <= 4)
823 return (IXGBE_32_VM);
824 else
825 return (IXGBE_NO_VM);
826 }
827
828 static inline u16
829 ixgbe_max_vfs(enum ixgbe_iov_mode mode)
830 {
831 /*
832 * We return odd numbers below because we
833 * reserve 1 VM's worth of queues for the PF.
834 */
835 switch (mode) {
836 case IXGBE_64_VM:
837 return (63);
838 case IXGBE_32_VM:
839 return (31);
840 case IXGBE_NO_VM:
841 default:
842 return (0);
843 }
844 }
845
846 static inline int
847 ixgbe_vf_queues(enum ixgbe_iov_mode mode)
848 {
849 switch (mode) {
850 case IXGBE_64_VM:
851 return (2);
852 case IXGBE_32_VM:
853 return (4);
854 case IXGBE_NO_VM:
855 default:
856 return (0);
857 }
858 }
859
860 static inline int
861 ixgbe_vf_que_index(enum ixgbe_iov_mode mode, u32 vfnum, int num)
862 {
863 return ((vfnum * ixgbe_vf_queues(mode)) + num);
864 }
865
866 static inline int
867 ixgbe_pf_que_index(enum ixgbe_iov_mode mode, int num)
868 {
869 return (ixgbe_vf_que_index(mode, ixgbe_max_vfs(mode), num));
870 }
871
872 static inline void
873 ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
874 {
875 if (adapter->max_frame_size < max_frame)
876 adapter->max_frame_size = max_frame;
877 }
878
879 static inline u32
880 ixgbe_get_mrqc(enum ixgbe_iov_mode mode)
881 {
882 u32 mrqc = 0;
883 switch (mode) {
884 case IXGBE_64_VM:
885 mrqc = IXGBE_MRQC_VMDQRSS64EN;
886 break;
887 case IXGBE_32_VM:
888 mrqc = IXGBE_MRQC_VMDQRSS32EN;
889 break;
890 case IXGBE_NO_VM:
891 mrqc = 0;
892 break;
893 default:
894 panic("Unexpected SR-IOV mode %d", mode);
895 }
896 return(mrqc);
897 }
898
899
900 static inline u32
901 ixgbe_get_mtqc(enum ixgbe_iov_mode mode)
902 {
903 uint32_t mtqc = 0;
904 switch (mode) {
905 case IXGBE_64_VM:
906 mtqc |= IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
907 break;
908 case IXGBE_32_VM:
909 mtqc |= IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
910 break;
911 case IXGBE_NO_VM:
912 mtqc = IXGBE_MTQC_64Q_1PB;
913 break;
914 default:
915 panic("Unexpected SR-IOV mode %d", mode);
916 }
917 return(mtqc);
918 }
919 #endif /* PCI_IOV */
920
921 #endif /* _IXGBE_H_ */
922