ixgbe.h revision 1.15 1 /******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*
34 * Copyright (c) 2011 The NetBSD Foundation, Inc.
35 * All rights reserved.
36 *
37 * This code is derived from software contributed to The NetBSD Foundation
38 * by Coyote Point Systems, Inc.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
50 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
51 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
52 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
53 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
54 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
55 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
56 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
57 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
58 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 */
61 /*$FreeBSD: head/sys/dev/ixgbe/ixgbe.h 289238 2015-10-13 17:34:18Z sbruno $*/
62 /*$NetBSD: ixgbe.h,v 1.15 2016/12/02 10:34:23 msaitoh Exp $*/
63
64
65 #ifndef _IXGBE_H_
66 #define _IXGBE_H_
67
68
69 #include <sys/param.h>
70 #include <sys/reboot.h>
71 #include <sys/systm.h>
72 #if __FreeBSD_version >= 800000
73 #include <sys/buf_ring.h>
74 #endif
75 #include <sys/mbuf.h>
76 #include <sys/protosw.h>
77 #include <sys/socket.h>
78 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/module.h>
81 #include <sys/sockio.h>
82
83 #include <net/if.h>
84 #include <net/if_arp.h>
85 #include <net/bpf.h>
86 #include <net/if_ether.h>
87 #include <net/if_dl.h>
88 #include <net/if_media.h>
89
90 #include <net/bpf.h>
91 #include <net/if_types.h>
92 #include <net/if_vlanvar.h>
93
94 #include <netinet/in_systm.h>
95 #include <netinet/in.h>
96 #include <netinet/ip.h>
97 #include <netinet/ip6.h>
98 #include <netinet/tcp.h>
99 #include <netinet/udp.h>
100
101 #include <sys/bus.h>
102 #include <dev/pci/pcivar.h>
103 #include <dev/pci/pcireg.h>
104 #include <sys/proc.h>
105 #include <sys/sysctl.h>
106 #include <sys/endian.h>
107 #include <sys/workqueue.h>
108 #include <sys/cpu.h>
109 #include <sys/interrupt.h>
110
111 #ifdef PCI_IOV
112 #include <sys/nv.h>
113 #include <sys/iov_schema.h>
114 #include <dev/pci/pci_iov.h>
115 #endif
116
117 #include "ixgbe_netbsd.h"
118 #include "ixgbe_api.h"
119 #include "ixgbe_common.h"
120 #include "ixgbe_phy.h"
121 #include "ixgbe_vf.h"
122
123 /* Tunables */
124
125 /*
126 * TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
127 * number of transmit descriptors allocated by the driver. Increasing this
128 * value allows the driver to queue more transmits. Each descriptor is 16
129 * bytes. Performance tests have show the 2K value to be optimal for top
130 * performance.
131 */
132 #define DEFAULT_TXD 1024
133 #define PERFORM_TXD 2048
134 #define MAX_TXD 4096
135 #define MIN_TXD 64
136
137 /*
138 * RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
139 * number of receive descriptors allocated for each RX queue. Increasing this
140 * value allows the driver to buffer more incoming packets. Each descriptor
141 * is 16 bytes. A receive buffer is also allocated for each descriptor.
142 *
143 * Note: with 8 rings and a dual port card, it is possible to bump up
144 * against the system mbuf pool limit, you can tune nmbclusters
145 * to adjust for this.
146 */
147 #define DEFAULT_RXD 1024
148 #define PERFORM_RXD 2048
149 #define MAX_RXD 4096
150 #define MIN_RXD 64
151
152 /* Alignment for rings */
153 #define DBA_ALIGN 128
154
155 /*
156 * This parameter controls the maximum no of times the driver will loop in
157 * the isr. Minimum Value = 1
158 */
159 #define MAX_LOOP 10
160
161 /*
162 * This is the max watchdog interval, ie. the time that can
163 * pass between any two TX clean operations, such only happening
164 * when the TX hardware is functioning.
165 */
166 #define IXGBE_WATCHDOG (10 * hz)
167
168 /*
169 * This parameters control when the driver calls the routine to reclaim
170 * transmit descriptors.
171 */
172 #define IXGBE_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8)
173 #define IXGBE_TX_OP_THRESHOLD (adapter->num_tx_desc / 32)
174
175 /* These defines are used in MTU calculations */
176 #define IXGBE_MAX_FRAME_SIZE 9728
177 #define IXGBE_MTU_HDR (ETHER_HDR_LEN + ETHER_CRC_LEN + \
178 ETHER_VLAN_ENCAP_LEN)
179 #define IXGBE_MAX_MTU (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR)
180
181 /* Flow control constants */
182 #define IXGBE_FC_PAUSE 0xFFFF
183 #define IXGBE_FC_HI 0x20000
184 #define IXGBE_FC_LO 0x10000
185
186 /*
187 * Used for optimizing small rx mbufs. Effort is made to keep the copy
188 * small and aligned for the CPU L1 cache.
189 *
190 * MHLEN is typically 168 bytes, giving us 8-byte alignment. Getting
191 * 32 byte alignment needed for the fast bcopy results in 8 bytes being
192 * wasted. Getting 64 byte alignment, which _should_ be ideal for
193 * modern Intel CPUs, results in 40 bytes wasted and a significant drop
194 * in observed efficiency of the optimization, 97.9% -> 81.8%.
195 */
196 #define MPKTHSIZE (offsetof(struct _mbuf_dummy, m_pktdat))
197 #define IXGBE_RX_COPY_HDR_PADDED ((((MPKTHSIZE - 1) / 32) + 1) * 32)
198 #define IXGBE_RX_COPY_LEN (MSIZE - IXGBE_RX_COPY_HDR_PADDED)
199 #define IXGBE_RX_COPY_ALIGN (IXGBE_RX_COPY_HDR_PADDED - MPKTHSIZE)
200
201 /* Keep older OS drivers building... */
202 #if !defined(SYSCTL_ADD_UQUAD)
203 #define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD
204 #endif
205
206 /* Defines for printing debug information */
207 #define DEBUG_INIT 0
208 #define DEBUG_IOCTL 0
209 #define DEBUG_HW 0
210
211 #define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n")
212 #define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A)
213 #define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B)
214 #define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n")
215 #define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A)
216 #define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B)
217 #define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n")
218 #define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A)
219 #define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B)
220
221 #define MAX_NUM_MULTICAST_ADDRESSES 128
222 #define IXGBE_82598_SCATTER 100
223 #define IXGBE_82599_SCATTER 32
224 #define MSIX_82598_BAR 3
225 #define MSIX_82599_BAR 4
226 #define IXGBE_TSO_SIZE 262140
227 #define IXGBE_TX_BUFFER_SIZE ((u32) 1514)
228 #define IXGBE_RX_HDR 128
229 #define IXGBE_VFTA_SIZE 128
230 #define IXGBE_BR_SIZE 4096
231 #define IXGBE_QUEUE_MIN_FREE 32
232 #define IXGBE_MAX_TX_BUSY 10
233 #define IXGBE_QUEUE_HUNG 0x80000000
234
235 #define IXV_EITR_DEFAULT 128
236
237 #define IXV_EITR_DEFAULT 128
238
239 /* IOCTL define to gather SFP+ Diagnostic data */
240 #define SIOCGI2C SIOCGIFGENERIC
241
242 /* Offload bits in mbuf flag */
243 #define M_CSUM_OFFLOAD \
244 (M_CSUM_IPv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_UDPv6|M_CSUM_TCPv6)
245
246 /* Backward compatibility items for very old versions */
247 #ifndef pci_find_cap
248 #define pci_find_cap pci_find_extcap
249 #endif
250
251 #ifndef DEVMETHOD_END
252 #define DEVMETHOD_END { NULL, NULL }
253 #endif
254
255 /*
256 * Interrupt Moderation parameters
257 */
258 #define IXGBE_LOW_LATENCY 128
259 #define IXGBE_AVE_LATENCY 400
260 #define IXGBE_BULK_LATENCY 1200
261 #define IXGBE_LINK_ITR 2000
262
263 /* MAC type macros */
264 #define IXGBE_IS_X550VF(_adapter) \
265 ((_adapter->hw.mac.type == ixgbe_mac_X550_vf) || \
266 (_adapter->hw.mac.type == ixgbe_mac_X550EM_x_vf))
267
268 #define IXGBE_IS_VF(_adapter) \
269 (IXGBE_IS_X550VF(_adapter) || \
270 (_adapter->hw.mac.type == ixgbe_mac_X540_vf) || \
271 (_adapter->hw.mac.type == ixgbe_mac_82599_vf))
272
273 #ifdef PCI_IOV
274 #define IXGBE_VF_INDEX(vmdq) ((vmdq) / 32)
275 #define IXGBE_VF_BIT(vmdq) (1 << ((vmdq) % 32))
276
277 #define IXGBE_VT_MSG_MASK 0xFFFF
278
279 #define IXGBE_VT_MSGINFO(msg) \
280 (((msg) & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT)
281
282 #define IXGBE_VF_GET_QUEUES_RESP_LEN 5
283
284 #define IXGBE_API_VER_1_0 0
285 #define IXGBE_API_VER_2_0 1 /* Solaris API. Not supported. */
286 #define IXGBE_API_VER_1_1 2
287 #define IXGBE_API_VER_UNKNOWN UINT16_MAX
288
289 enum ixgbe_iov_mode {
290 IXGBE_64_VM,
291 IXGBE_32_VM,
292 IXGBE_NO_VM
293 };
294 #endif /* PCI_IOV */
295
296
297 /*
298 *****************************************************************************
299 * vendor_info_array
300 *
301 * This array contains the list of Subvendor/Subdevice IDs on which the driver
302 * should load.
303 *
304 *****************************************************************************
305 */
306 typedef struct _ixgbe_vendor_info_t {
307 unsigned int vendor_id;
308 unsigned int device_id;
309 unsigned int subvendor_id;
310 unsigned int subdevice_id;
311 unsigned int index;
312 } ixgbe_vendor_info_t;
313
314 /* This is used to get SFP+ module data */
315 struct ixgbe_i2c_req {
316 u8 dev_addr;
317 u8 offset;
318 u8 len;
319 u8 data[8];
320 };
321
322
323 struct ixgbe_tx_buf {
324 union ixgbe_adv_tx_desc *eop;
325 struct mbuf *m_head;
326 bus_dmamap_t map;
327 };
328
329 struct ixgbe_rx_buf {
330 struct mbuf *buf;
331 struct mbuf *fmp;
332 bus_dmamap_t pmap;
333 u_int flags;
334 #define IXGBE_RX_COPY 0x01
335 uint64_t addr;
336 };
337
338 /*
339 * Bus dma allocation structure used by ixgbe_dma_malloc and ixgbe_dma_free.
340 */
341 struct ixgbe_dma_alloc {
342 bus_addr_t dma_paddr;
343 void *dma_vaddr;
344 ixgbe_dma_tag_t *dma_tag;
345 bus_dmamap_t dma_map;
346 bus_dma_segment_t dma_seg;
347 bus_size_t dma_size;
348 };
349
350 struct ixgbe_mc_addr {
351 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
352 u32 vmdq;
353 };
354
355 /*
356 ** Driver queue struct: this is the interrupt container
357 ** for the associated tx and rx ring.
358 */
359 struct ix_queue {
360 struct adapter *adapter;
361 u32 msix; /* This queue's MSIX vector */
362 u32 eims; /* This queue's EIMS bit */
363 u32 eitr_setting;
364 u32 me;
365 struct resource *res;
366 void *tag;
367 int busy;
368 struct tx_ring *txr;
369 struct rx_ring *rxr;
370 void *que_si;
371 struct evcnt irqs;
372 char namebuf[32];
373 char evnamebuf[32];
374 };
375
376 /*
377 * The transmit ring, one per queue
378 */
379 struct tx_ring {
380 struct adapter *adapter;
381 kmutex_t tx_mtx;
382 u32 me;
383 u32 tail;
384 int busy;
385 union ixgbe_adv_tx_desc *tx_base;
386 struct ixgbe_tx_buf *tx_buffers;
387 struct ixgbe_dma_alloc txdma;
388 volatile u16 tx_avail;
389 u16 next_avail_desc;
390 u16 next_to_clean;
391 u16 num_desc;
392 u32 txd_cmd;
393 ixgbe_dma_tag_t *txtag;
394 char mtx_name[16];
395 #ifndef IXGBE_LEGACY_TX
396 struct buf_ring *br;
397 void *txq_si;
398 #endif
399 #ifdef IXGBE_FDIR
400 u16 atr_sample;
401 u16 atr_count;
402 #endif
403 u32 bytes; /* used for AIM */
404 u32 packets;
405 /* Soft Stats */
406 struct evcnt tso_tx;
407 struct evcnt no_tx_map_avail;
408 struct evcnt no_desc_avail;
409 struct evcnt total_packets;
410 };
411
412
413 /*
414 * The Receive ring, one per rx queue
415 */
416 struct rx_ring {
417 struct adapter *adapter;
418 kmutex_t rx_mtx;
419 u32 me;
420 u32 tail;
421 union ixgbe_adv_rx_desc *rx_base;
422 struct ixgbe_dma_alloc rxdma;
423 #ifdef LRO
424 struct lro_ctrl lro;
425 #endif /* LRO */
426 bool lro_enabled;
427 bool hw_rsc;
428 bool vtag_strip;
429 u16 next_to_refresh;
430 u16 next_to_check;
431 u16 num_desc;
432 u16 mbuf_sz;
433 char mtx_name[16];
434 struct ixgbe_rx_buf *rx_buffers;
435 ixgbe_dma_tag_t *ptag;
436
437 u32 bytes; /* Used for AIM calc */
438 u32 packets;
439
440 /* Soft stats */
441 struct evcnt rx_irq;
442 struct evcnt rx_copies;
443 struct evcnt rx_packets;
444 struct evcnt rx_bytes;
445 struct evcnt rx_discarded;
446 struct evcnt no_jmbuf;
447 u64 rsc_num;
448 #ifdef IXGBE_FDIR
449 u64 flm;
450 #endif
451 };
452
453 #ifdef PCI_IOV
454 #define IXGBE_VF_CTS (1 << 0) /* VF is clear to send. */
455 #define IXGBE_VF_CAP_MAC (1 << 1) /* VF is permitted to change MAC. */
456 #define IXGBE_VF_CAP_VLAN (1 << 2) /* VF is permitted to join vlans. */
457 #define IXGBE_VF_ACTIVE (1 << 3) /* VF is active. */
458
459 #define IXGBE_MAX_VF_MC 30 /* Max number of multicast entries */
460
461 struct ixgbe_vf {
462 u_int pool;
463 u_int rar_index;
464 u_int max_frame_size;
465 uint32_t flags;
466 uint8_t ether_addr[ETHER_ADDR_LEN];
467 uint16_t mc_hash[IXGBE_MAX_VF_MC];
468 uint16_t num_mc_hashes;
469 uint16_t default_vlan;
470 uint16_t vlan_tag;
471 uint16_t api_ver;
472 };
473 #endif /* PCI_IOV */
474
475 /* Our adapter structure */
476 struct adapter {
477 struct ifnet *ifp;
478 struct ixgbe_hw hw;
479
480 struct ixgbe_osdep osdep;
481 device_t dev;
482
483 struct resource *pci_mem;
484 struct resource *msix_mem;
485
486 /*
487 * Interrupt resources: this set is
488 * either used for legacy, or for Link
489 * when doing MSIX
490 */
491 void *tag;
492 struct resource *res;
493
494 struct ifmedia media;
495 callout_t timer;
496 int msix;
497 int if_flags;
498
499 kmutex_t core_mtx;
500
501 unsigned int num_queues;
502
503 /*
504 ** Shadow VFTA table, this is needed because
505 ** the real vlan filter table gets cleared during
506 ** a soft reset and the driver needs to be able
507 ** to repopulate it.
508 */
509 u32 shadow_vfta[IXGBE_VFTA_SIZE];
510
511 /* Info about the interface */
512 u32 optics;
513 u32 fc; /* local flow ctrl setting */
514 int advertise; /* link speeds */
515 bool link_active;
516 u16 max_frame_size;
517 u16 num_segs;
518 u32 link_speed;
519 bool link_up;
520 u32 vector;
521 u16 dmac;
522 bool eee_enabled;
523 u32 phy_layer;
524
525 /* Power management-related */
526 bool wol_support;
527 u32 wufc;
528
529 /* Mbuf cluster size */
530 u32 rx_mbuf_sz;
531
532 /* Support for pluggable optics */
533 bool sfp_probe;
534 void *link_si; /* Link tasklet */
535 void *mod_si; /* SFP tasklet */
536 void *msf_si; /* Multispeed Fiber */
537 #ifdef PCI_IOV
538 void *mbx_si; /* VF -> PF mailbox interrupt */
539 #endif /* PCI_IOV */
540 #ifdef IXGBE_FDIR
541 int fdir_reinit;
542 void *fdir_si;
543 #endif
544 void *phy_si; /* PHY intr tasklet */
545
546 /*
547 ** Queues:
548 ** This is the irq holder, it has
549 ** and RX/TX pair or rings associated
550 ** with it.
551 */
552 struct ix_queue *queues;
553
554 /*
555 * Transmit rings:
556 * Allocated at run time, an array of rings.
557 */
558 struct tx_ring *tx_rings;
559 u32 num_tx_desc;
560 u32 tx_process_limit;
561
562 /*
563 * Receive rings:
564 * Allocated at run time, an array of rings.
565 */
566 struct rx_ring *rx_rings;
567 u64 active_queues;
568 u32 num_rx_desc;
569 u32 rx_process_limit;
570
571 /* Multicast array memory */
572 struct ixgbe_mc_addr *mta;
573 int num_vfs;
574 int pool;
575 #ifdef PCI_IOV
576 struct ixgbe_vf *vfs;
577 #endif
578 #ifdef DEV_NETMAP
579 void (*init_locked)(struct adapter *);
580 void (*stop_locked)(void *);
581 #endif
582
583 /* Misc stats maintained by the driver */
584 struct evcnt dropped_pkts;
585 struct evcnt mbuf_defrag_failed;
586 struct evcnt mbuf_header_failed;
587 struct evcnt mbuf_packet_failed;
588 struct evcnt efbig_tx_dma_setup;
589 struct evcnt efbig2_tx_dma_setup;
590 struct evcnt m_defrag_failed;
591 struct evcnt einval_tx_dma_setup;
592 struct evcnt other_tx_dma_setup;
593 struct evcnt eagain_tx_dma_setup;
594 struct evcnt enomem_tx_dma_setup;
595 struct evcnt watchdog_events;
596 struct evcnt tso_err;
597 struct evcnt link_irq;
598 struct evcnt morerx;
599 struct evcnt moretx;
600 struct evcnt txloops;
601 struct evcnt handleq;
602 struct evcnt req;
603
604 union {
605 struct ixgbe_hw_stats pf;
606 struct ixgbevf_hw_stats vf;
607 } stats;
608 #if __FreeBSD_version >= 1100036
609 /* counter(9) stats */
610 u64 ipackets;
611 u64 ierrors;
612 u64 opackets;
613 u64 oerrors;
614 u64 ibytes;
615 u64 obytes;
616 u64 imcasts;
617 u64 omcasts;
618 u64 iqdrops;
619 u64 noproto;
620 #endif
621 struct sysctllog *sysctllog;
622 ixgbe_extmem_head_t jcl_head;
623 };
624
625
626 /* Precision Time Sync (IEEE 1588) defines */
627 #define ETHERTYPE_IEEE1588 0x88F7
628 #define PICOSECS_PER_TICK 20833
629 #define TSYNC_UDP_PORT 319 /* UDP port for the protocol */
630 #define IXGBE_ADVTXD_TSTAMP 0x00080000
631
632
633 #define IXGBE_CORE_LOCK_INIT(_sc, _name) \
634 mutex_init(&(_sc)->core_mtx, MUTEX_DEFAULT, IPL_SOFTNET)
635 #define IXGBE_CORE_LOCK_DESTROY(_sc) mutex_destroy(&(_sc)->core_mtx)
636 #define IXGBE_TX_LOCK_DESTROY(_sc) mutex_destroy(&(_sc)->tx_mtx)
637 #define IXGBE_RX_LOCK_DESTROY(_sc) mutex_destroy(&(_sc)->rx_mtx)
638 #define IXGBE_CORE_LOCK(_sc) mutex_enter(&(_sc)->core_mtx)
639 #define IXGBE_TX_LOCK(_sc) mutex_enter(&(_sc)->tx_mtx)
640 #define IXGBE_TX_TRYLOCK(_sc) mutex_tryenter(&(_sc)->tx_mtx)
641 #define IXGBE_RX_LOCK(_sc) mutex_enter(&(_sc)->rx_mtx)
642 #define IXGBE_CORE_UNLOCK(_sc) mutex_exit(&(_sc)->core_mtx)
643 #define IXGBE_TX_UNLOCK(_sc) mutex_exit(&(_sc)->tx_mtx)
644 #define IXGBE_RX_UNLOCK(_sc) mutex_exit(&(_sc)->rx_mtx)
645 #define IXGBE_CORE_LOCK_ASSERT(_sc) KASSERT(mutex_owned(&(_sc)->core_mtx))
646 #define IXGBE_TX_LOCK_ASSERT(_sc) KASSERT(mutex_owned(&(_sc)->tx_mtx))
647
648 /* Stats macros */
649 #if __FreeBSD_version >= 1100036
650 #define IXGBE_SET_IPACKETS(sc, count) (sc)->ipackets = (count)
651 #define IXGBE_SET_IERRORS(sc, count) (sc)->ierrors = (count)
652 #define IXGBE_SET_OPACKETS(sc, count) (sc)->opackets = (count)
653 #define IXGBE_SET_OERRORS(sc, count) (sc)->oerrors = (count)
654 #define IXGBE_SET_COLLISIONS(sc, count)
655 #define IXGBE_SET_IBYTES(sc, count) (sc)->ibytes = (count)
656 #define IXGBE_SET_OBYTES(sc, count) (sc)->obytes = (count)
657 #define IXGBE_SET_IMCASTS(sc, count) (sc)->imcasts = (count)
658 #define IXGBE_SET_OMCASTS(sc, count) (sc)->omcasts = (count)
659 #define IXGBE_SET_IQDROPS(sc, count) (sc)->iqdrops = (count)
660 #else
661 #define IXGBE_SET_IPACKETS(sc, count) (sc)->ifp->if_ipackets = (count)
662 #define IXGBE_SET_IERRORS(sc, count) (sc)->ifp->if_ierrors = (count)
663 #define IXGBE_SET_OPACKETS(sc, count) (sc)->ifp->if_opackets = (count)
664 #define IXGBE_SET_OERRORS(sc, count) (sc)->ifp->if_oerrors = (count)
665 #define IXGBE_SET_COLLISIONS(sc, count) (sc)->ifp->if_collisions = (count)
666 #define IXGBE_SET_IBYTES(sc, count) (sc)->ifp->if_ibytes = (count)
667 #define IXGBE_SET_OBYTES(sc, count) (sc)->ifp->if_obytes = (count)
668 #define IXGBE_SET_IMCASTS(sc, count) (sc)->ifp->if_imcasts = (count)
669 #define IXGBE_SET_OMCASTS(sc, count) (sc)->ifp->if_omcasts = (count)
670 #define IXGBE_SET_IQDROPS(sc, count) (sc)->ifp->if_iqdrops = (count)
671 #endif
672
673 /* External PHY register addresses */
674 #define IXGBE_PHY_CURRENT_TEMP 0xC820
675 #define IXGBE_PHY_OVERTEMP_STATUS 0xC830
676
677 /* Sysctl help messages; displayed with sysctl -d */
678 #define IXGBE_SYSCTL_DESC_ADV_SPEED \
679 "\nControl advertised link speed using these flags:\n" \
680 "\t0x1 - advertise 100M\n" \
681 "\t0x2 - advertise 1G\n" \
682 "\t0x4 - advertise 10G\n\n" \
683 "\t100M is only supported on certain 10GBaseT adapters.\n"
684
685 #define IXGBE_SYSCTL_DESC_SET_FC \
686 "\nSet flow control mode using these values:\n" \
687 "\t0 - off\n" \
688 "\t1 - rx pause\n" \
689 "\t2 - tx pause\n" \
690 "\t3 - tx and rx pause"
691
692 static inline bool
693 ixgbe_is_sfp(struct ixgbe_hw *hw)
694 {
695 switch (hw->phy.type) {
696 case ixgbe_phy_sfp_avago:
697 case ixgbe_phy_sfp_ftl:
698 case ixgbe_phy_sfp_intel:
699 case ixgbe_phy_sfp_unknown:
700 case ixgbe_phy_sfp_passive_tyco:
701 case ixgbe_phy_sfp_passive_unknown:
702 case ixgbe_phy_qsfp_passive_unknown:
703 case ixgbe_phy_qsfp_active_unknown:
704 case ixgbe_phy_qsfp_intel:
705 case ixgbe_phy_qsfp_unknown:
706 return TRUE;
707 default:
708 return FALSE;
709 }
710 }
711
712 /* Workaround to make 8.0 buildable */
713 #if __FreeBSD_version >= 800000 && __FreeBSD_version < 800504
714 static __inline int
715 drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
716 {
717 #ifdef ALTQ
718 if (ALTQ_IS_ENABLED(&ifp->if_snd))
719 return (1);
720 #endif
721 return (!buf_ring_empty(br));
722 }
723 #endif
724
725 /*
726 ** Find the number of unrefreshed RX descriptors
727 */
728 static inline u16
729 ixgbe_rx_unrefreshed(struct rx_ring *rxr)
730 {
731 if (rxr->next_to_check > rxr->next_to_refresh)
732 return (rxr->next_to_check - rxr->next_to_refresh - 1);
733 else
734 return ((rxr->num_desc + rxr->next_to_check) -
735 rxr->next_to_refresh - 1);
736 }
737
738 /*
739 ** This checks for a zero mac addr, something that will be likely
740 ** unless the Admin on the Host has created one.
741 */
742 static inline bool
743 ixv_check_ether_addr(u8 *addr)
744 {
745 bool status = TRUE;
746
747 if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 &&
748 addr[3] == 0 && addr[4]== 0 && addr[5] == 0))
749 status = FALSE;
750 return (status);
751 }
752
753 /* Shared Prototypes */
754
755 #ifdef IXGBE_LEGACY_TX
756 void ixgbe_start(struct ifnet *);
757 void ixgbe_start_locked(struct tx_ring *, struct ifnet *);
758 #else /* ! IXGBE_LEGACY_TX */
759 int ixgbe_mq_start(struct ifnet *, struct mbuf *);
760 int ixgbe_mq_start_locked(struct ifnet *, struct tx_ring *);
761 void ixgbe_qflush(struct ifnet *);
762 void ixgbe_deferred_mq_start(void *, int);
763 #endif /* IXGBE_LEGACY_TX */
764
765 int ixgbe_allocate_queues(struct adapter *);
766 int ixgbe_allocate_transmit_buffers(struct tx_ring *);
767 int ixgbe_setup_transmit_structures(struct adapter *);
768 void ixgbe_free_transmit_structures(struct adapter *);
769 int ixgbe_allocate_receive_buffers(struct rx_ring *);
770 int ixgbe_setup_receive_structures(struct adapter *);
771 void ixgbe_free_receive_structures(struct adapter *);
772 void ixgbe_txeof(struct tx_ring *);
773 bool ixgbe_rxeof(struct ix_queue *);
774
775 int ixgbe_dma_malloc(struct adapter *,
776 bus_size_t, struct ixgbe_dma_alloc *, int);
777 void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
778
779 #ifdef PCI_IOV
780
781 static inline boolean_t
782 ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
783 {
784 return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
785 }
786
787 static inline void
788 ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
789 {
790
791 if (vf->flags & IXGBE_VF_CTS)
792 msg |= IXGBE_VT_MSGTYPE_CTS;
793
794 ixgbe_write_mbx(&adapter->hw, &msg, 1, vf->pool);
795 }
796
797 static inline void
798 ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
799 {
800 msg &= IXGBE_VT_MSG_MASK;
801 ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
802 }
803
804 static inline void
805 ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
806 {
807 msg &= IXGBE_VT_MSG_MASK;
808 ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
809 }
810
811 static inline void
812 ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
813 {
814 if (!(vf->flags & IXGBE_VF_CTS))
815 ixgbe_send_vf_nack(adapter, vf, 0);
816 }
817
818 static inline enum ixgbe_iov_mode
819 ixgbe_get_iov_mode(struct adapter *adapter)
820 {
821 if (adapter->num_vfs == 0)
822 return (IXGBE_NO_VM);
823 if (adapter->num_queues <= 2)
824 return (IXGBE_64_VM);
825 else if (adapter->num_queues <= 4)
826 return (IXGBE_32_VM);
827 else
828 return (IXGBE_NO_VM);
829 }
830
831 static inline u16
832 ixgbe_max_vfs(enum ixgbe_iov_mode mode)
833 {
834 /*
835 * We return odd numbers below because we
836 * reserve 1 VM's worth of queues for the PF.
837 */
838 switch (mode) {
839 case IXGBE_64_VM:
840 return (63);
841 case IXGBE_32_VM:
842 return (31);
843 case IXGBE_NO_VM:
844 default:
845 return (0);
846 }
847 }
848
849 static inline int
850 ixgbe_vf_queues(enum ixgbe_iov_mode mode)
851 {
852 switch (mode) {
853 case IXGBE_64_VM:
854 return (2);
855 case IXGBE_32_VM:
856 return (4);
857 case IXGBE_NO_VM:
858 default:
859 return (0);
860 }
861 }
862
863 static inline int
864 ixgbe_vf_que_index(enum ixgbe_iov_mode mode, u32 vfnum, int num)
865 {
866 return ((vfnum * ixgbe_vf_queues(mode)) + num);
867 }
868
869 static inline int
870 ixgbe_pf_que_index(enum ixgbe_iov_mode mode, int num)
871 {
872 return (ixgbe_vf_que_index(mode, ixgbe_max_vfs(mode), num));
873 }
874
875 static inline void
876 ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
877 {
878 if (adapter->max_frame_size < max_frame)
879 adapter->max_frame_size = max_frame;
880 }
881
882 static inline u32
883 ixgbe_get_mrqc(enum ixgbe_iov_mode mode)
884 {
885 u32 mrqc = 0;
886 switch (mode) {
887 case IXGBE_64_VM:
888 mrqc = IXGBE_MRQC_VMDQRSS64EN;
889 break;
890 case IXGBE_32_VM:
891 mrqc = IXGBE_MRQC_VMDQRSS32EN;
892 break;
893 case IXGBE_NO_VM:
894 mrqc = 0;
895 break;
896 default:
897 panic("Unexpected SR-IOV mode %d", mode);
898 }
899 return(mrqc);
900 }
901
902
903 static inline u32
904 ixgbe_get_mtqc(enum ixgbe_iov_mode mode)
905 {
906 uint32_t mtqc = 0;
907 switch (mode) {
908 case IXGBE_64_VM:
909 mtqc |= IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
910 break;
911 case IXGBE_32_VM:
912 mtqc |= IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
913 break;
914 case IXGBE_NO_VM:
915 mtqc = IXGBE_MTQC_64Q_1PB;
916 break;
917 default:
918 panic("Unexpected SR-IOV mode %d", mode);
919 }
920 return(mtqc);
921 }
922 #endif /* PCI_IOV */
923
924 #endif /* _IXGBE_H_ */
925