ixgbe.h revision 1.20 1 /******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*
34 * Copyright (c) 2011 The NetBSD Foundation, Inc.
35 * All rights reserved.
36 *
37 * This code is derived from software contributed to The NetBSD Foundation
38 * by Coyote Point Systems, Inc.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
50 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
51 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
52 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
53 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
54 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
55 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
56 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
57 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
58 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 */
61 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe.h 303890 2016-08-09 19:32:06Z dumbbell $*/
62 /*$NetBSD: ixgbe.h,v 1.20 2017/01/25 07:46:53 msaitoh Exp $*/
63
64
65 #ifndef _IXGBE_H_
66 #define _IXGBE_H_
67
68
69 #include <sys/param.h>
70 #include <sys/reboot.h>
71 #include <sys/systm.h>
72 #if __FreeBSD_version >= 800000
73 #include <sys/buf_ring.h>
74 #endif
75 #include <sys/mbuf.h>
76 #include <sys/protosw.h>
77 #include <sys/socket.h>
78 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/module.h>
81 #include <sys/sockio.h>
82
83 #include <net/if.h>
84 #include <net/if_arp.h>
85 #include <net/bpf.h>
86 #include <net/if_ether.h>
87 #include <net/if_dl.h>
88 #include <net/if_media.h>
89
90 #include <net/bpf.h>
91 #include <net/if_types.h>
92 #include <net/if_vlanvar.h>
93
94 #include <netinet/in_systm.h>
95 #include <netinet/in.h>
96 #include <netinet/ip.h>
97 #include <netinet/ip6.h>
98 #include <netinet/tcp.h>
99 #include <netinet/udp.h>
100
101 #include <sys/bus.h>
102 #include <dev/pci/pcivar.h>
103 #include <dev/pci/pcireg.h>
104 #include <sys/proc.h>
105 #include <sys/sysctl.h>
106 #include <sys/endian.h>
107 #include <sys/workqueue.h>
108 #include <sys/cpu.h>
109 #include <sys/interrupt.h>
110
111 #ifdef PCI_IOV
112 #include <sys/nv.h>
113 #include <sys/iov_schema.h>
114 #include <dev/pci/pci_iov.h>
115 #endif
116
117 #include "ixgbe_netbsd.h"
118 #include "ixgbe_api.h"
119 #include "ixgbe_common.h"
120 #include "ixgbe_phy.h"
121 #include "ixgbe_vf.h"
122
123 /* Tunables */
124
125 /*
126 * TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
127 * number of transmit descriptors allocated by the driver. Increasing this
128 * value allows the driver to queue more transmits. Each descriptor is 16
129 * bytes. Performance tests have show the 2K value to be optimal for top
130 * performance.
131 */
132 #define DEFAULT_TXD 1024
133 #define PERFORM_TXD 2048
134 #define MAX_TXD 4096
135 #define MIN_TXD 64
136
137 /*
138 * RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
139 * number of receive descriptors allocated for each RX queue. Increasing this
140 * value allows the driver to buffer more incoming packets. Each descriptor
141 * is 16 bytes. A receive buffer is also allocated for each descriptor.
142 *
143 * Note: with 8 rings and a dual port card, it is possible to bump up
144 * against the system mbuf pool limit, you can tune nmbclusters
145 * to adjust for this.
146 */
147 #define DEFAULT_RXD 1024
148 #define PERFORM_RXD 2048
149 #define MAX_RXD 4096
150 #define MIN_RXD 64
151
152 /* Alignment for rings */
153 #define DBA_ALIGN 128
154
155 /*
156 * This is the max watchdog interval, ie. the time that can
157 * pass between any two TX clean operations, such only happening
158 * when the TX hardware is functioning.
159 */
160 #define IXGBE_WATCHDOG (10 * hz)
161
162 /*
163 * This parameters control when the driver calls the routine to reclaim
164 * transmit descriptors.
165 */
166 #define IXGBE_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8)
167 #define IXGBE_TX_OP_THRESHOLD (adapter->num_tx_desc / 32)
168
169 /* These defines are used in MTU calculations */
170 #define IXGBE_MAX_FRAME_SIZE 9728
171 #define IXGBE_MTU_HDR (ETHER_HDR_LEN + ETHER_CRC_LEN)
172 #define IXGBE_MTU_HDR_VLAN (ETHER_HDR_LEN + ETHER_CRC_LEN + \
173 ETHER_VLAN_ENCAP_LEN)
174 #define IXGBE_MAX_MTU (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR)
175 #define IXGBE_MAX_MTU_VLAN (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR_VLAN)
176
177 /* Flow control constants */
178 #define IXGBE_FC_PAUSE 0xFFFF
179 #define IXGBE_FC_HI 0x20000
180 #define IXGBE_FC_LO 0x10000
181
182 /*
183 * Used for optimizing small rx mbufs. Effort is made to keep the copy
184 * small and aligned for the CPU L1 cache.
185 *
186 * MHLEN is typically 168 bytes, giving us 8-byte alignment. Getting
187 * 32 byte alignment needed for the fast bcopy results in 8 bytes being
188 * wasted. Getting 64 byte alignment, which _should_ be ideal for
189 * modern Intel CPUs, results in 40 bytes wasted and a significant drop
190 * in observed efficiency of the optimization, 97.9% -> 81.8%.
191 */
192 #define MPKTHSIZE (offsetof(struct _mbuf_dummy, m_pktdat))
193 #define IXGBE_RX_COPY_HDR_PADDED ((((MPKTHSIZE - 1) / 32) + 1) * 32)
194 #define IXGBE_RX_COPY_LEN (MSIZE - IXGBE_RX_COPY_HDR_PADDED)
195 #define IXGBE_RX_COPY_ALIGN (IXGBE_RX_COPY_HDR_PADDED - MPKTHSIZE)
196
197 /* Keep older OS drivers building... */
198 #if !defined(SYSCTL_ADD_UQUAD)
199 #define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD
200 #endif
201
202 /* Defines for printing debug information */
203 #define DEBUG_INIT 0
204 #define DEBUG_IOCTL 0
205 #define DEBUG_HW 0
206
207 #define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n")
208 #define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A)
209 #define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B)
210 #define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n")
211 #define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A)
212 #define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B)
213 #define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n")
214 #define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A)
215 #define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B)
216
217 #define MAX_NUM_MULTICAST_ADDRESSES 128
218 #define IXGBE_82598_SCATTER 100
219 #define IXGBE_82599_SCATTER 32
220 #define MSIX_82598_BAR 3
221 #define MSIX_82599_BAR 4
222 #define IXGBE_TSO_SIZE 262140
223 #define IXGBE_RX_HDR 128
224 #define IXGBE_VFTA_SIZE 128
225 #define IXGBE_BR_SIZE 4096
226 #define IXGBE_QUEUE_MIN_FREE 32
227 #define IXGBE_MAX_TX_BUSY 10
228 #define IXGBE_QUEUE_HUNG 0x80000000
229
230 #define IXV_EITR_DEFAULT 128
231
232 /* IOCTL define to gather SFP+ Diagnostic data */
233 #define SIOCGI2C SIOCGIFGENERIC
234
235 /* Offload bits in mbuf flag */
236 #define M_CSUM_OFFLOAD \
237 (M_CSUM_IPv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_UDPv6|M_CSUM_TCPv6)
238
239 /* Backward compatibility items for very old versions */
240 #ifndef pci_find_cap
241 #define pci_find_cap pci_find_extcap
242 #endif
243
244 #ifndef DEVMETHOD_END
245 #define DEVMETHOD_END { NULL, NULL }
246 #endif
247
248 /*
249 * Interrupt Moderation parameters
250 */
251 #define IXGBE_LOW_LATENCY 128
252 #define IXGBE_AVE_LATENCY 400
253 #define IXGBE_BULK_LATENCY 1200
254
255 /* Using 1FF (the max value), the interval is ~1.05ms */
256 #define IXGBE_LINK_ITR_QUANTA 0x1FF
257 #define IXGBE_LINK_ITR ((IXGBE_LINK_ITR_QUANTA << 3) & \
258 IXGBE_EITR_ITR_INT_MASK)
259
260 /* MAC type macros */
261 #define IXGBE_IS_X550VF(_adapter) \
262 ((_adapter->hw.mac.type == ixgbe_mac_X550_vf) || \
263 (_adapter->hw.mac.type == ixgbe_mac_X550EM_x_vf))
264
265 #define IXGBE_IS_VF(_adapter) \
266 (IXGBE_IS_X550VF(_adapter) || \
267 (_adapter->hw.mac.type == ixgbe_mac_X540_vf) || \
268 (_adapter->hw.mac.type == ixgbe_mac_82599_vf))
269
270 #ifdef PCI_IOV
271 #define IXGBE_VF_INDEX(vmdq) ((vmdq) / 32)
272 #define IXGBE_VF_BIT(vmdq) (1 << ((vmdq) % 32))
273
274 #define IXGBE_VT_MSG_MASK 0xFFFF
275
276 #define IXGBE_VT_MSGINFO(msg) \
277 (((msg) & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT)
278
279 #define IXGBE_VF_GET_QUEUES_RESP_LEN 5
280
281 #define IXGBE_API_VER_1_0 0
282 #define IXGBE_API_VER_2_0 1 /* Solaris API. Not supported. */
283 #define IXGBE_API_VER_1_1 2
284 #define IXGBE_API_VER_UNKNOWN UINT16_MAX
285
286 enum ixgbe_iov_mode {
287 IXGBE_64_VM,
288 IXGBE_32_VM,
289 IXGBE_NO_VM
290 };
291 #endif /* PCI_IOV */
292
293
294 /*
295 *****************************************************************************
296 * vendor_info_array
297 *
298 * This array contains the list of Subvendor/Subdevice IDs on which the driver
299 * should load.
300 *
301 *****************************************************************************
302 */
303 typedef struct _ixgbe_vendor_info_t {
304 unsigned int vendor_id;
305 unsigned int device_id;
306 unsigned int subvendor_id;
307 unsigned int subdevice_id;
308 unsigned int index;
309 } ixgbe_vendor_info_t;
310
311 /* This is used to get SFP+ module data */
312 struct ixgbe_i2c_req {
313 u8 dev_addr;
314 u8 offset;
315 u8 len;
316 u8 data[8];
317 };
318
319
320 struct ixgbe_tx_buf {
321 union ixgbe_adv_tx_desc *eop;
322 struct mbuf *m_head;
323 bus_dmamap_t map;
324 };
325
326 struct ixgbe_rx_buf {
327 struct mbuf *buf;
328 struct mbuf *fmp;
329 bus_dmamap_t pmap;
330 u_int flags;
331 #define IXGBE_RX_COPY 0x01
332 uint64_t addr;
333 };
334
335 /*
336 * Bus dma allocation structure used by ixgbe_dma_malloc and ixgbe_dma_free.
337 */
338 struct ixgbe_dma_alloc {
339 bus_addr_t dma_paddr;
340 void *dma_vaddr;
341 ixgbe_dma_tag_t *dma_tag;
342 bus_dmamap_t dma_map;
343 bus_dma_segment_t dma_seg;
344 bus_size_t dma_size;
345 };
346
347 struct ixgbe_mc_addr {
348 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
349 u32 vmdq;
350 };
351
352 /*
353 ** Driver queue struct: this is the interrupt container
354 ** for the associated tx and rx ring.
355 */
356 struct ix_queue {
357 struct adapter *adapter;
358 u32 msix; /* This queue's MSIX vector */
359 u32 eims; /* This queue's EIMS bit */
360 u32 eitr_setting;
361 u32 me;
362 struct resource *res;
363 void *tag;
364 int busy;
365 struct tx_ring *txr;
366 struct rx_ring *rxr;
367 void *que_si;
368 struct evcnt irqs;
369 char namebuf[32];
370 char evnamebuf[32];
371 };
372
373 /*
374 * The transmit ring, one per queue
375 */
376 struct tx_ring {
377 struct adapter *adapter;
378 kmutex_t tx_mtx;
379 u32 me;
380 u32 tail;
381 int busy;
382 union ixgbe_adv_tx_desc *tx_base;
383 struct ixgbe_tx_buf *tx_buffers;
384 struct ixgbe_dma_alloc txdma;
385 volatile u16 tx_avail;
386 u16 next_avail_desc;
387 u16 next_to_clean;
388 u16 num_desc;
389 u32 txd_cmd;
390 ixgbe_dma_tag_t *txtag;
391 char mtx_name[16];
392 #ifndef IXGBE_LEGACY_TX
393 struct buf_ring *br;
394 void *txq_si;
395 #endif
396 #ifdef IXGBE_FDIR
397 u16 atr_sample;
398 u16 atr_count;
399 #endif
400 u32 bytes; /* used for AIM */
401 u32 packets;
402 /* Soft Stats */
403 struct evcnt tso_tx;
404 struct evcnt no_tx_map_avail;
405 struct evcnt no_desc_avail;
406 struct evcnt total_packets;
407 };
408
409
410 /*
411 * The Receive ring, one per rx queue
412 */
413 struct rx_ring {
414 struct adapter *adapter;
415 kmutex_t rx_mtx;
416 u32 me;
417 u32 tail;
418 union ixgbe_adv_rx_desc *rx_base;
419 struct ixgbe_dma_alloc rxdma;
420 #ifdef LRO
421 struct lro_ctrl lro;
422 #endif /* LRO */
423 bool lro_enabled;
424 bool hw_rsc;
425 bool vtag_strip;
426 u16 next_to_refresh;
427 u16 next_to_check;
428 u16 num_desc;
429 u16 mbuf_sz;
430 char mtx_name[16];
431 struct ixgbe_rx_buf *rx_buffers;
432 ixgbe_dma_tag_t *ptag;
433
434 u32 bytes; /* Used for AIM calc */
435 u32 packets;
436
437 /* Soft stats */
438 struct evcnt rx_irq;
439 struct evcnt rx_copies;
440 struct evcnt rx_packets;
441 struct evcnt rx_bytes;
442 struct evcnt rx_discarded;
443 struct evcnt no_jmbuf;
444 u64 rsc_num;
445 #ifdef IXGBE_FDIR
446 u64 flm;
447 #endif
448 };
449
450 #ifdef PCI_IOV
451 #define IXGBE_VF_CTS (1 << 0) /* VF is clear to send. */
452 #define IXGBE_VF_CAP_MAC (1 << 1) /* VF is permitted to change MAC. */
453 #define IXGBE_VF_CAP_VLAN (1 << 2) /* VF is permitted to join vlans. */
454 #define IXGBE_VF_ACTIVE (1 << 3) /* VF is active. */
455
456 #define IXGBE_MAX_VF_MC 30 /* Max number of multicast entries */
457
458 struct ixgbe_vf {
459 u_int pool;
460 u_int rar_index;
461 u_int max_frame_size;
462 uint32_t flags;
463 uint8_t ether_addr[ETHER_ADDR_LEN];
464 uint16_t mc_hash[IXGBE_MAX_VF_MC];
465 uint16_t num_mc_hashes;
466 uint16_t default_vlan;
467 uint16_t vlan_tag;
468 uint16_t api_ver;
469 };
470 #endif /* PCI_IOV */
471
472 /* Our adapter structure */
473 struct adapter {
474 struct ixgbe_hw hw;
475 struct ixgbe_osdep osdep;
476
477 device_t dev;
478 struct ifnet *ifp;
479
480 struct resource *pci_mem;
481 struct resource *msix_mem;
482
483 /*
484 * Interrupt resources: this set is
485 * either used for legacy, or for Link
486 * when doing MSIX
487 */
488 void *tag;
489 struct resource *res;
490
491 struct ifmedia media;
492 callout_t timer;
493 int msix;
494 int if_flags;
495
496 kmutex_t core_mtx;
497
498 unsigned int num_queues;
499
500 /*
501 ** Shadow VFTA table, this is needed because
502 ** the real vlan filter table gets cleared during
503 ** a soft reset and the driver needs to be able
504 ** to repopulate it.
505 */
506 u32 shadow_vfta[IXGBE_VFTA_SIZE];
507
508 /* Info about the interface */
509 u32 optics;
510 u32 fc; /* local flow ctrl setting */
511 int advertise; /* link speeds */
512 bool enable_aim; /* adaptive interrupt moderation */
513 bool link_active;
514 u16 max_frame_size;
515 u16 num_segs;
516 u32 link_speed;
517 bool link_up;
518 u32 vector;
519 u16 dmac;
520 bool eee_enabled;
521 u32 phy_layer;
522
523 /* Power management-related */
524 bool wol_support;
525 u32 wufc;
526
527 /* Mbuf cluster size */
528 u32 rx_mbuf_sz;
529
530 /* Support for pluggable optics */
531 bool sfp_probe;
532 void *link_si; /* Link tasklet */
533 void *mod_si; /* SFP tasklet */
534 void *msf_si; /* Multispeed Fiber */
535 #ifdef PCI_IOV
536 void *mbx_si; /* VF -> PF mailbox interrupt */
537 #endif /* PCI_IOV */
538 #ifdef IXGBE_FDIR
539 int fdir_reinit;
540 void *fdir_si;
541 #endif
542 void *phy_si; /* PHY intr tasklet */
543
544 /*
545 ** Queues:
546 ** This is the irq holder, it has
547 ** and RX/TX pair or rings associated
548 ** with it.
549 */
550 struct ix_queue *queues;
551
552 /*
553 * Transmit rings:
554 * Allocated at run time, an array of rings.
555 */
556 struct tx_ring *tx_rings;
557 u32 num_tx_desc;
558 u32 tx_process_limit;
559
560 /*
561 * Receive rings:
562 * Allocated at run time, an array of rings.
563 */
564 struct rx_ring *rx_rings;
565 u64 active_queues;
566 u32 num_rx_desc;
567 u32 rx_process_limit;
568
569 /* Multicast array memory */
570 struct ixgbe_mc_addr *mta;
571 int num_vfs;
572 int pool;
573 #ifdef PCI_IOV
574 struct ixgbe_vf *vfs;
575 #endif
576 #ifdef DEV_NETMAP
577 void (*init_locked)(struct adapter *);
578 void (*stop_locked)(void *);
579 #endif
580
581 /* Misc stats maintained by the driver */
582 struct evcnt dropped_pkts;
583 struct evcnt mbuf_defrag_failed;
584 struct evcnt mbuf_header_failed;
585 struct evcnt mbuf_packet_failed;
586 struct evcnt efbig_tx_dma_setup;
587 struct evcnt efbig2_tx_dma_setup;
588 struct evcnt m_defrag_failed;
589 struct evcnt einval_tx_dma_setup;
590 struct evcnt other_tx_dma_setup;
591 struct evcnt eagain_tx_dma_setup;
592 struct evcnt enomem_tx_dma_setup;
593 struct evcnt watchdog_events;
594 struct evcnt tso_err;
595 struct evcnt link_irq;
596 struct evcnt morerx;
597 struct evcnt moretx;
598 struct evcnt txloops;
599 struct evcnt handleq;
600 struct evcnt req;
601
602 union {
603 struct ixgbe_hw_stats pf;
604 struct ixgbevf_hw_stats vf;
605 } stats;
606 #if __FreeBSD_version >= 1100036
607 /* counter(9) stats */
608 u64 ipackets;
609 u64 ierrors;
610 u64 opackets;
611 u64 oerrors;
612 u64 ibytes;
613 u64 obytes;
614 u64 imcasts;
615 u64 omcasts;
616 u64 iqdrops;
617 u64 noproto;
618 #endif
619 struct sysctllog *sysctllog;
620 const struct sysctlnode *sysctltop;
621 ixgbe_extmem_head_t jcl_head;
622 };
623
624
625 /* Precision Time Sync (IEEE 1588) defines */
626 #define ETHERTYPE_IEEE1588 0x88F7
627 #define PICOSECS_PER_TICK 20833
628 #define TSYNC_UDP_PORT 319 /* UDP port for the protocol */
629 #define IXGBE_ADVTXD_TSTAMP 0x00080000
630
631
632 #define IXGBE_CORE_LOCK_INIT(_sc, _name) \
633 mutex_init(&(_sc)->core_mtx, MUTEX_DEFAULT, IPL_SOFTNET)
634 #define IXGBE_CORE_LOCK_DESTROY(_sc) mutex_destroy(&(_sc)->core_mtx)
635 #define IXGBE_TX_LOCK_DESTROY(_sc) mutex_destroy(&(_sc)->tx_mtx)
636 #define IXGBE_RX_LOCK_DESTROY(_sc) mutex_destroy(&(_sc)->rx_mtx)
637 #define IXGBE_CORE_LOCK(_sc) mutex_enter(&(_sc)->core_mtx)
638 #define IXGBE_TX_LOCK(_sc) mutex_enter(&(_sc)->tx_mtx)
639 #define IXGBE_TX_TRYLOCK(_sc) mutex_tryenter(&(_sc)->tx_mtx)
640 #define IXGBE_RX_LOCK(_sc) mutex_enter(&(_sc)->rx_mtx)
641 #define IXGBE_CORE_UNLOCK(_sc) mutex_exit(&(_sc)->core_mtx)
642 #define IXGBE_TX_UNLOCK(_sc) mutex_exit(&(_sc)->tx_mtx)
643 #define IXGBE_RX_UNLOCK(_sc) mutex_exit(&(_sc)->rx_mtx)
644 #define IXGBE_CORE_LOCK_ASSERT(_sc) KASSERT(mutex_owned(&(_sc)->core_mtx))
645 #define IXGBE_TX_LOCK_ASSERT(_sc) KASSERT(mutex_owned(&(_sc)->tx_mtx))
646
647 /* Stats macros */
648 #if __FreeBSD_version >= 1100036
649 #define IXGBE_SET_IPACKETS(sc, count) (sc)->ipackets = (count)
650 #define IXGBE_SET_IERRORS(sc, count) (sc)->ierrors = (count)
651 #define IXGBE_SET_OPACKETS(sc, count) (sc)->opackets = (count)
652 #define IXGBE_SET_OERRORS(sc, count) (sc)->oerrors = (count)
653 #define IXGBE_SET_COLLISIONS(sc, count)
654 #define IXGBE_SET_IBYTES(sc, count) (sc)->ibytes = (count)
655 #define IXGBE_SET_OBYTES(sc, count) (sc)->obytes = (count)
656 #define IXGBE_SET_IMCASTS(sc, count) (sc)->imcasts = (count)
657 #define IXGBE_SET_OMCASTS(sc, count) (sc)->omcasts = (count)
658 #define IXGBE_SET_IQDROPS(sc, count) (sc)->iqdrops = (count)
659 #else
660 #define IXGBE_SET_IPACKETS(sc, count) (sc)->ifp->if_ipackets = (count)
661 #define IXGBE_SET_IERRORS(sc, count) (sc)->ifp->if_ierrors = (count)
662 #define IXGBE_SET_OPACKETS(sc, count) (sc)->ifp->if_opackets = (count)
663 #define IXGBE_SET_OERRORS(sc, count) (sc)->ifp->if_oerrors = (count)
664 #define IXGBE_SET_COLLISIONS(sc, count) (sc)->ifp->if_collisions = (count)
665 #define IXGBE_SET_IBYTES(sc, count) (sc)->ifp->if_ibytes = (count)
666 #define IXGBE_SET_OBYTES(sc, count) (sc)->ifp->if_obytes = (count)
667 #define IXGBE_SET_IMCASTS(sc, count) (sc)->ifp->if_imcasts = (count)
668 #define IXGBE_SET_OMCASTS(sc, count) (sc)->ifp->if_omcasts = (count)
669 #define IXGBE_SET_IQDROPS(sc, count) (sc)->ifp->if_iqdrops = (count)
670 #endif
671
672 /* External PHY register addresses */
673 #define IXGBE_PHY_CURRENT_TEMP 0xC820
674 #define IXGBE_PHY_OVERTEMP_STATUS 0xC830
675
676 /* Sysctl help messages; displayed with sysctl -d */
677 #define IXGBE_SYSCTL_DESC_ADV_SPEED \
678 "\nControl advertised link speed using these flags:\n" \
679 "\t0x1 - advertise 100M\n" \
680 "\t0x2 - advertise 1G\n" \
681 "\t0x4 - advertise 10G\n\n" \
682 "\t100M is only supported on certain 10GBaseT adapters."
683
684 #define IXGBE_SYSCTL_DESC_SET_FC \
685 "\nSet flow control mode using these values:\n" \
686 "\t0 - off\n" \
687 "\t1 - rx pause\n" \
688 "\t2 - tx pause\n" \
689 "\t3 - tx and rx pause"
690
691 static inline bool
692 ixgbe_is_sfp(struct ixgbe_hw *hw)
693 {
694 switch (hw->phy.type) {
695 case ixgbe_phy_sfp_avago:
696 case ixgbe_phy_sfp_ftl:
697 case ixgbe_phy_sfp_intel:
698 case ixgbe_phy_sfp_unknown:
699 case ixgbe_phy_sfp_passive_tyco:
700 case ixgbe_phy_sfp_passive_unknown:
701 case ixgbe_phy_sfp_unsupported:
702 case ixgbe_phy_qsfp_passive_unknown:
703 case ixgbe_phy_qsfp_active_unknown:
704 case ixgbe_phy_qsfp_intel:
705 case ixgbe_phy_qsfp_unknown:
706 return TRUE;
707 default:
708 break;
709 }
710
711 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
712 return TRUE;
713
714 return FALSE;
715 }
716
717 /* Workaround to make 8.0 buildable */
718 #if __FreeBSD_version >= 800000 && __FreeBSD_version < 800504
719 static __inline int
720 drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
721 {
722 #ifdef ALTQ
723 if (ALTQ_IS_ENABLED(&ifp->if_snd))
724 return (1);
725 #endif
726 return (!buf_ring_empty(br));
727 }
728 #endif
729
730 /*
731 ** Find the number of unrefreshed RX descriptors
732 */
733 static inline u16
734 ixgbe_rx_unrefreshed(struct rx_ring *rxr)
735 {
736 if (rxr->next_to_check > rxr->next_to_refresh)
737 return (rxr->next_to_check - rxr->next_to_refresh - 1);
738 else
739 return ((rxr->num_desc + rxr->next_to_check) -
740 rxr->next_to_refresh - 1);
741 }
742
743 /*
744 ** This checks for a zero mac addr, something that will be likely
745 ** unless the Admin on the Host has created one.
746 */
747 static inline bool
748 ixv_check_ether_addr(u8 *addr)
749 {
750 bool status = TRUE;
751
752 if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 &&
753 addr[3] == 0 && addr[4]== 0 && addr[5] == 0))
754 status = FALSE;
755 return (status);
756 }
757
758 /* Shared Prototypes */
759
760 #ifdef IXGBE_LEGACY_TX
761 void ixgbe_start(struct ifnet *);
762 void ixgbe_start_locked(struct tx_ring *, struct ifnet *);
763 #else /* ! IXGBE_LEGACY_TX */
764 int ixgbe_mq_start(struct ifnet *, struct mbuf *);
765 int ixgbe_mq_start_locked(struct ifnet *, struct tx_ring *);
766 void ixgbe_qflush(struct ifnet *);
767 void ixgbe_deferred_mq_start(void *, int);
768 #endif /* IXGBE_LEGACY_TX */
769
770 int ixgbe_allocate_queues(struct adapter *);
771 int ixgbe_allocate_transmit_buffers(struct tx_ring *);
772 int ixgbe_setup_transmit_structures(struct adapter *);
773 void ixgbe_free_transmit_structures(struct adapter *);
774 int ixgbe_allocate_receive_buffers(struct rx_ring *);
775 int ixgbe_setup_receive_structures(struct adapter *);
776 void ixgbe_free_receive_structures(struct adapter *);
777 void ixgbe_txeof(struct tx_ring *);
778 bool ixgbe_rxeof(struct ix_queue *);
779
780 int ixgbe_dma_malloc(struct adapter *,
781 bus_size_t, struct ixgbe_dma_alloc *, int);
782 void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
783
784 #ifdef PCI_IOV
785
786 static inline boolean_t
787 ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
788 {
789 return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
790 }
791
792 static inline void
793 ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
794 {
795
796 if (vf->flags & IXGBE_VF_CTS)
797 msg |= IXGBE_VT_MSGTYPE_CTS;
798
799 ixgbe_write_mbx(&adapter->hw, &msg, 1, vf->pool);
800 }
801
802 static inline void
803 ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
804 {
805 msg &= IXGBE_VT_MSG_MASK;
806 ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
807 }
808
809 static inline void
810 ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
811 {
812 msg &= IXGBE_VT_MSG_MASK;
813 ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
814 }
815
816 static inline void
817 ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
818 {
819 if (!(vf->flags & IXGBE_VF_CTS))
820 ixgbe_send_vf_nack(adapter, vf, 0);
821 }
822
823 static inline enum ixgbe_iov_mode
824 ixgbe_get_iov_mode(struct adapter *adapter)
825 {
826 if (adapter->num_vfs == 0)
827 return (IXGBE_NO_VM);
828 if (adapter->num_queues <= 2)
829 return (IXGBE_64_VM);
830 else if (adapter->num_queues <= 4)
831 return (IXGBE_32_VM);
832 else
833 return (IXGBE_NO_VM);
834 }
835
836 static inline u16
837 ixgbe_max_vfs(enum ixgbe_iov_mode mode)
838 {
839 /*
840 * We return odd numbers below because we
841 * reserve 1 VM's worth of queues for the PF.
842 */
843 switch (mode) {
844 case IXGBE_64_VM:
845 return (63);
846 case IXGBE_32_VM:
847 return (31);
848 case IXGBE_NO_VM:
849 default:
850 return (0);
851 }
852 }
853
854 static inline int
855 ixgbe_vf_queues(enum ixgbe_iov_mode mode)
856 {
857 switch (mode) {
858 case IXGBE_64_VM:
859 return (2);
860 case IXGBE_32_VM:
861 return (4);
862 case IXGBE_NO_VM:
863 default:
864 return (0);
865 }
866 }
867
868 static inline int
869 ixgbe_vf_que_index(enum ixgbe_iov_mode mode, u32 vfnum, int num)
870 {
871 return ((vfnum * ixgbe_vf_queues(mode)) + num);
872 }
873
874 static inline int
875 ixgbe_pf_que_index(enum ixgbe_iov_mode mode, int num)
876 {
877 return (ixgbe_vf_que_index(mode, ixgbe_max_vfs(mode), num));
878 }
879
880 static inline void
881 ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
882 {
883 if (adapter->max_frame_size < max_frame)
884 adapter->max_frame_size = max_frame;
885 }
886
887 static inline u32
888 ixgbe_get_mrqc(enum ixgbe_iov_mode mode)
889 {
890 u32 mrqc = 0;
891 switch (mode) {
892 case IXGBE_64_VM:
893 mrqc = IXGBE_MRQC_VMDQRSS64EN;
894 break;
895 case IXGBE_32_VM:
896 mrqc = IXGBE_MRQC_VMDQRSS32EN;
897 break;
898 case IXGBE_NO_VM:
899 mrqc = 0;
900 break;
901 default:
902 panic("Unexpected SR-IOV mode %d", mode);
903 }
904 return(mrqc);
905 }
906
907
908 static inline u32
909 ixgbe_get_mtqc(enum ixgbe_iov_mode mode)
910 {
911 uint32_t mtqc = 0;
912 switch (mode) {
913 case IXGBE_64_VM:
914 mtqc |= IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
915 break;
916 case IXGBE_32_VM:
917 mtqc |= IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
918 break;
919 case IXGBE_NO_VM:
920 mtqc = IXGBE_MTQC_64Q_1PB;
921 break;
922 default:
923 panic("Unexpected SR-IOV mode %d", mode);
924 }
925 return(mtqc);
926 }
927 #endif /* PCI_IOV */
928
929 #endif /* _IXGBE_H_ */
930