Home | History | Annotate | Line # | Download | only in ixgbe
if_sriov.c revision 1.3
      1  1.1  msaitoh /******************************************************************************
      2  1.1  msaitoh 
      3  1.1  msaitoh   Copyright (c) 2001-2017, Intel Corporation
      4  1.1  msaitoh   All rights reserved.
      5  1.1  msaitoh 
      6  1.1  msaitoh   Redistribution and use in source and binary forms, with or without
      7  1.1  msaitoh   modification, are permitted provided that the following conditions are met:
      8  1.1  msaitoh 
      9  1.1  msaitoh    1. Redistributions of source code must retain the above copyright notice,
     10  1.1  msaitoh       this list of conditions and the following disclaimer.
     11  1.1  msaitoh 
     12  1.1  msaitoh    2. Redistributions in binary form must reproduce the above copyright
     13  1.1  msaitoh       notice, this list of conditions and the following disclaimer in the
     14  1.1  msaitoh       documentation and/or other materials provided with the distribution.
     15  1.1  msaitoh 
     16  1.1  msaitoh    3. Neither the name of the Intel Corporation nor the names of its
     17  1.1  msaitoh       contributors may be used to endorse or promote products derived from
     18  1.1  msaitoh       this software without specific prior written permission.
     19  1.1  msaitoh 
     20  1.1  msaitoh   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21  1.1  msaitoh   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22  1.1  msaitoh   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23  1.1  msaitoh   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24  1.1  msaitoh   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  1.1  msaitoh   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  1.1  msaitoh   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  1.1  msaitoh   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  1.1  msaitoh   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  1.1  msaitoh   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  1.1  msaitoh   POSSIBILITY OF SUCH DAMAGE.
     31  1.1  msaitoh 
     32  1.1  msaitoh ******************************************************************************/
     33  1.3  msaitoh /*$FreeBSD: head/sys/dev/ixgbe/if_sriov.c 327031 2017-12-20 18:15:06Z erj $*/
     34  1.1  msaitoh 
     35  1.1  msaitoh #include "ixgbe.h"
     36  1.2  msaitoh #include "ixgbe_sriov.h"
     37  1.1  msaitoh 
     38  1.1  msaitoh #ifdef PCI_IOV
     39  1.1  msaitoh 
     40  1.1  msaitoh MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
     41  1.1  msaitoh 
     42  1.1  msaitoh /************************************************************************
     43  1.1  msaitoh  * ixgbe_pci_iov_detach
     44  1.1  msaitoh  ************************************************************************/
     45  1.1  msaitoh int
     46  1.1  msaitoh ixgbe_pci_iov_detach(device_t dev)
     47  1.1  msaitoh {
     48  1.1  msaitoh 	return pci_iov_detach(dev);
     49  1.1  msaitoh }
     50  1.1  msaitoh 
     51  1.1  msaitoh /************************************************************************
     52  1.1  msaitoh  * ixgbe_define_iov_schemas
     53  1.1  msaitoh  ************************************************************************/
     54  1.1  msaitoh void
     55  1.1  msaitoh ixgbe_define_iov_schemas(device_t dev, int *error)
     56  1.1  msaitoh {
     57  1.1  msaitoh 	nvlist_t *pf_schema, *vf_schema;
     58  1.1  msaitoh 
     59  1.1  msaitoh 	pf_schema = pci_iov_schema_alloc_node();
     60  1.1  msaitoh 	vf_schema = pci_iov_schema_alloc_node();
     61  1.1  msaitoh 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
     62  1.1  msaitoh 	pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
     63  1.1  msaitoh 	    IOV_SCHEMA_HASDEFAULT, TRUE);
     64  1.1  msaitoh 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
     65  1.1  msaitoh 	    IOV_SCHEMA_HASDEFAULT, FALSE);
     66  1.1  msaitoh 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
     67  1.1  msaitoh 	    IOV_SCHEMA_HASDEFAULT, FALSE);
     68  1.1  msaitoh 	*error = pci_iov_attach(dev, pf_schema, vf_schema);
     69  1.1  msaitoh 	if (*error != 0) {
     70  1.1  msaitoh 		device_printf(dev,
     71  1.1  msaitoh 		    "Error %d setting up SR-IOV\n", *error);
     72  1.1  msaitoh 	}
     73  1.1  msaitoh } /* ixgbe_define_iov_schemas */
     74  1.1  msaitoh 
     75  1.1  msaitoh /************************************************************************
     76  1.1  msaitoh  * ixgbe_align_all_queue_indices
     77  1.1  msaitoh  ************************************************************************/
     78  1.1  msaitoh inline void
     79  1.1  msaitoh ixgbe_align_all_queue_indices(struct adapter *adapter)
     80  1.1  msaitoh {
     81  1.1  msaitoh 	int i;
     82  1.1  msaitoh 	int index;
     83  1.1  msaitoh 
     84  1.1  msaitoh 	for (i = 0; i < adapter->num_queues; i++) {
     85  1.1  msaitoh 		index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
     86  1.1  msaitoh 		adapter->rx_rings[i].me = index;
     87  1.1  msaitoh 		adapter->tx_rings[i].me = index;
     88  1.1  msaitoh 	}
     89  1.1  msaitoh }
     90  1.1  msaitoh 
     91  1.1  msaitoh /* Support functions for SR-IOV/VF management */
     92  1.1  msaitoh static inline void
     93  1.3  msaitoh ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
     94  1.1  msaitoh {
     95  1.1  msaitoh 	if (vf->flags & IXGBE_VF_CTS)
     96  1.1  msaitoh 		msg |= IXGBE_VT_MSGTYPE_CTS;
     97  1.1  msaitoh 
     98  1.3  msaitoh 	adapter->hw.mbx.ops.write(&adapter->hw, &msg, 1, vf->pool);
     99  1.1  msaitoh }
    100  1.1  msaitoh 
    101  1.1  msaitoh static inline void
    102  1.1  msaitoh ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
    103  1.1  msaitoh {
    104  1.1  msaitoh 	msg &= IXGBE_VT_MSG_MASK;
    105  1.3  msaitoh 	ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
    106  1.1  msaitoh }
    107  1.1  msaitoh 
    108  1.1  msaitoh static inline void
    109  1.1  msaitoh ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
    110  1.1  msaitoh {
    111  1.1  msaitoh 	msg &= IXGBE_VT_MSG_MASK;
    112  1.3  msaitoh 	ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
    113  1.1  msaitoh }
    114  1.1  msaitoh 
    115  1.1  msaitoh static inline void
    116  1.1  msaitoh ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
    117  1.1  msaitoh {
    118  1.1  msaitoh 	if (!(vf->flags & IXGBE_VF_CTS))
    119  1.1  msaitoh 		ixgbe_send_vf_nack(adapter, vf, 0);
    120  1.1  msaitoh }
    121  1.1  msaitoh 
    122  1.1  msaitoh static inline boolean_t
    123  1.1  msaitoh ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
    124  1.1  msaitoh {
    125  1.1  msaitoh 	return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
    126  1.1  msaitoh }
    127  1.1  msaitoh 
    128  1.1  msaitoh static inline int
    129  1.1  msaitoh ixgbe_vf_queues(int mode)
    130  1.1  msaitoh {
    131  1.1  msaitoh 	switch (mode) {
    132  1.1  msaitoh 	case IXGBE_64_VM:
    133  1.1  msaitoh 		return (2);
    134  1.1  msaitoh 	case IXGBE_32_VM:
    135  1.1  msaitoh 		return (4);
    136  1.1  msaitoh 	case IXGBE_NO_VM:
    137  1.1  msaitoh 	default:
    138  1.1  msaitoh 		return (0);
    139  1.1  msaitoh 	}
    140  1.1  msaitoh }
    141  1.1  msaitoh 
    142  1.1  msaitoh inline int
    143  1.1  msaitoh ixgbe_vf_que_index(int mode, int vfnum, int num)
    144  1.1  msaitoh {
    145  1.1  msaitoh 	return ((vfnum * ixgbe_vf_queues(mode)) + num);
    146  1.1  msaitoh }
    147  1.1  msaitoh 
    148  1.1  msaitoh static inline void
    149  1.1  msaitoh ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
    150  1.1  msaitoh {
    151  1.1  msaitoh 	if (adapter->max_frame_size < max_frame)
    152  1.1  msaitoh 		adapter->max_frame_size = max_frame;
    153  1.1  msaitoh }
    154  1.1  msaitoh 
    155  1.1  msaitoh inline u32
    156  1.1  msaitoh ixgbe_get_mrqc(int iov_mode)
    157  1.1  msaitoh {
    158  1.1  msaitoh 	u32 mrqc;
    159  1.1  msaitoh 
    160  1.1  msaitoh 	switch (iov_mode) {
    161  1.1  msaitoh 	case IXGBE_64_VM:
    162  1.1  msaitoh 		mrqc = IXGBE_MRQC_VMDQRSS64EN;
    163  1.1  msaitoh 		break;
    164  1.1  msaitoh 	case IXGBE_32_VM:
    165  1.1  msaitoh 		mrqc = IXGBE_MRQC_VMDQRSS32EN;
    166  1.1  msaitoh 		break;
    167  1.1  msaitoh 	case IXGBE_NO_VM:
    168  1.1  msaitoh 		mrqc = 0;
    169  1.1  msaitoh 		break;
    170  1.1  msaitoh 	default:
    171  1.1  msaitoh 		panic("Unexpected SR-IOV mode %d", iov_mode);
    172  1.1  msaitoh 	}
    173  1.1  msaitoh 
    174  1.1  msaitoh 	return mrqc;
    175  1.1  msaitoh }
    176  1.1  msaitoh 
    177  1.1  msaitoh 
    178  1.1  msaitoh inline u32
    179  1.1  msaitoh ixgbe_get_mtqc(int iov_mode)
    180  1.1  msaitoh {
    181  1.1  msaitoh 	uint32_t mtqc;
    182  1.1  msaitoh 
    183  1.1  msaitoh 	switch (iov_mode) {
    184  1.1  msaitoh 	case IXGBE_64_VM:
    185  1.1  msaitoh 		mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
    186  1.1  msaitoh 		break;
    187  1.1  msaitoh 	case IXGBE_32_VM:
    188  1.1  msaitoh 		mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
    189  1.1  msaitoh 		break;
    190  1.1  msaitoh 	case IXGBE_NO_VM:
    191  1.1  msaitoh 		mtqc = IXGBE_MTQC_64Q_1PB;
    192  1.1  msaitoh 		break;
    193  1.1  msaitoh 	default:
    194  1.1  msaitoh 		panic("Unexpected SR-IOV mode %d", iov_mode);
    195  1.1  msaitoh 	}
    196  1.1  msaitoh 
    197  1.1  msaitoh 	return mtqc;
    198  1.1  msaitoh }
    199  1.1  msaitoh 
    200  1.1  msaitoh void
    201  1.1  msaitoh ixgbe_ping_all_vfs(struct adapter *adapter)
    202  1.1  msaitoh {
    203  1.1  msaitoh 	struct ixgbe_vf *vf;
    204  1.1  msaitoh 
    205  1.1  msaitoh 	for (int i = 0; i < adapter->num_vfs; i++) {
    206  1.1  msaitoh 		vf = &adapter->vfs[i];
    207  1.1  msaitoh 		if (vf->flags & IXGBE_VF_ACTIVE)
    208  1.3  msaitoh 			ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
    209  1.1  msaitoh 	}
    210  1.1  msaitoh } /* ixgbe_ping_all_vfs */
    211  1.1  msaitoh 
    212  1.1  msaitoh 
    213  1.1  msaitoh static void
    214  1.1  msaitoh ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
    215  1.1  msaitoh                           uint16_t tag)
    216  1.1  msaitoh {
    217  1.1  msaitoh 	struct ixgbe_hw *hw;
    218  1.1  msaitoh 	uint32_t vmolr, vmvir;
    219  1.1  msaitoh 
    220  1.1  msaitoh 	hw = &adapter->hw;
    221  1.1  msaitoh 
    222  1.1  msaitoh 	vf->vlan_tag = tag;
    223  1.1  msaitoh 
    224  1.1  msaitoh 	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
    225  1.1  msaitoh 
    226  1.1  msaitoh 	/* Do not receive packets that pass inexact filters. */
    227  1.1  msaitoh 	vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
    228  1.1  msaitoh 
    229  1.1  msaitoh 	/* Disable Multicast Promicuous Mode. */
    230  1.1  msaitoh 	vmolr &= ~IXGBE_VMOLR_MPE;
    231  1.1  msaitoh 
    232  1.1  msaitoh 	/* Accept broadcasts. */
    233  1.1  msaitoh 	vmolr |= IXGBE_VMOLR_BAM;
    234  1.1  msaitoh 
    235  1.1  msaitoh 	if (tag == 0) {
    236  1.1  msaitoh 		/* Accept non-vlan tagged traffic. */
    237  1.2  msaitoh 		vmolr |= IXGBE_VMOLR_AUPE;
    238  1.1  msaitoh 
    239  1.1  msaitoh 		/* Allow VM to tag outgoing traffic; no default tag. */
    240  1.1  msaitoh 		vmvir = 0;
    241  1.1  msaitoh 	} else {
    242  1.1  msaitoh 		/* Require vlan-tagged traffic. */
    243  1.1  msaitoh 		vmolr &= ~IXGBE_VMOLR_AUPE;
    244  1.1  msaitoh 
    245  1.1  msaitoh 		/* Tag all traffic with provided vlan tag. */
    246  1.1  msaitoh 		vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
    247  1.1  msaitoh 	}
    248  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
    249  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
    250  1.1  msaitoh } /* ixgbe_vf_set_default_vlan */
    251  1.1  msaitoh 
    252  1.1  msaitoh 
    253  1.1  msaitoh static boolean_t
    254  1.1  msaitoh ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
    255  1.1  msaitoh {
    256  1.1  msaitoh 
    257  1.1  msaitoh 	/*
    258  1.1  msaitoh 	 * Frame size compatibility between PF and VF is only a problem on
    259  1.1  msaitoh 	 * 82599-based cards.  X540 and later support any combination of jumbo
    260  1.1  msaitoh 	 * frames on PFs and VFs.
    261  1.1  msaitoh 	 */
    262  1.1  msaitoh 	if (adapter->hw.mac.type != ixgbe_mac_82599EB)
    263  1.1  msaitoh 		return (TRUE);
    264  1.1  msaitoh 
    265  1.1  msaitoh 	switch (vf->api_ver) {
    266  1.1  msaitoh 	case IXGBE_API_VER_1_0:
    267  1.1  msaitoh 	case IXGBE_API_VER_UNKNOWN:
    268  1.1  msaitoh 		/*
    269  1.1  msaitoh 		 * On legacy (1.0 and older) VF versions, we don't support jumbo
    270  1.1  msaitoh 		 * frames on either the PF or the VF.
    271  1.1  msaitoh 		 */
    272  1.1  msaitoh 		if (adapter->max_frame_size > ETHER_MAX_LEN ||
    273  1.1  msaitoh 		    vf->max_frame_size > ETHER_MAX_LEN)
    274  1.1  msaitoh 			return (FALSE);
    275  1.1  msaitoh 
    276  1.1  msaitoh 		return (TRUE);
    277  1.1  msaitoh 
    278  1.1  msaitoh 		break;
    279  1.1  msaitoh 	case IXGBE_API_VER_1_1:
    280  1.1  msaitoh 	default:
    281  1.1  msaitoh 		/*
    282  1.1  msaitoh 		 * 1.1 or later VF versions always work if they aren't using
    283  1.1  msaitoh 		 * jumbo frames.
    284  1.1  msaitoh 		 */
    285  1.1  msaitoh 		if (vf->max_frame_size <= ETHER_MAX_LEN)
    286  1.1  msaitoh 			return (TRUE);
    287  1.1  msaitoh 
    288  1.1  msaitoh 		/*
    289  1.1  msaitoh 		 * Jumbo frames only work with VFs if the PF is also using jumbo
    290  1.1  msaitoh 		 * frames.
    291  1.1  msaitoh 		 */
    292  1.1  msaitoh 		if (adapter->max_frame_size <= ETHER_MAX_LEN)
    293  1.1  msaitoh 			return (TRUE);
    294  1.1  msaitoh 
    295  1.1  msaitoh 		return (FALSE);
    296  1.1  msaitoh 	}
    297  1.1  msaitoh } /* ixgbe_vf_frame_size_compatible */
    298  1.1  msaitoh 
    299  1.1  msaitoh 
    300  1.1  msaitoh static void
    301  1.1  msaitoh ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
    302  1.1  msaitoh {
    303  1.1  msaitoh 	ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
    304  1.1  msaitoh 
    305  1.1  msaitoh 	// XXX clear multicast addresses
    306  1.1  msaitoh 
    307  1.1  msaitoh 	ixgbe_clear_rar(&adapter->hw, vf->rar_index);
    308  1.1  msaitoh 
    309  1.1  msaitoh 	vf->api_ver = IXGBE_API_VER_UNKNOWN;
    310  1.1  msaitoh } /* ixgbe_process_vf_reset */
    311  1.1  msaitoh 
    312  1.1  msaitoh 
    313  1.1  msaitoh static void
    314  1.1  msaitoh ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
    315  1.1  msaitoh {
    316  1.1  msaitoh 	struct ixgbe_hw *hw;
    317  1.1  msaitoh 	uint32_t vf_index, vfte;
    318  1.1  msaitoh 
    319  1.1  msaitoh 	hw = &adapter->hw;
    320  1.1  msaitoh 
    321  1.1  msaitoh 	vf_index = IXGBE_VF_INDEX(vf->pool);
    322  1.1  msaitoh 	vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
    323  1.1  msaitoh 	vfte |= IXGBE_VF_BIT(vf->pool);
    324  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
    325  1.1  msaitoh } /* ixgbe_vf_enable_transmit */
    326  1.1  msaitoh 
    327  1.1  msaitoh 
    328  1.1  msaitoh static void
    329  1.1  msaitoh ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
    330  1.1  msaitoh {
    331  1.1  msaitoh 	struct ixgbe_hw *hw;
    332  1.1  msaitoh 	uint32_t vf_index, vfre;
    333  1.1  msaitoh 
    334  1.1  msaitoh 	hw = &adapter->hw;
    335  1.1  msaitoh 
    336  1.1  msaitoh 	vf_index = IXGBE_VF_INDEX(vf->pool);
    337  1.1  msaitoh 	vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
    338  1.1  msaitoh 	if (ixgbe_vf_frame_size_compatible(adapter, vf))
    339  1.1  msaitoh 		vfre |= IXGBE_VF_BIT(vf->pool);
    340  1.1  msaitoh 	else
    341  1.1  msaitoh 		vfre &= ~IXGBE_VF_BIT(vf->pool);
    342  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
    343  1.1  msaitoh } /* ixgbe_vf_enable_receive */
    344  1.1  msaitoh 
    345  1.1  msaitoh 
    346  1.1  msaitoh static void
    347  1.1  msaitoh ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    348  1.1  msaitoh {
    349  1.1  msaitoh 	struct ixgbe_hw *hw;
    350  1.1  msaitoh 	uint32_t ack;
    351  1.1  msaitoh 	uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
    352  1.1  msaitoh 
    353  1.1  msaitoh 	hw = &adapter->hw;
    354  1.1  msaitoh 
    355  1.1  msaitoh 	ixgbe_process_vf_reset(adapter, vf);
    356  1.1  msaitoh 
    357  1.1  msaitoh 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
    358  1.1  msaitoh 		ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
    359  1.1  msaitoh 		    vf->pool, TRUE);
    360  1.1  msaitoh 		ack = IXGBE_VT_MSGTYPE_ACK;
    361  1.1  msaitoh 	} else
    362  1.1  msaitoh 		ack = IXGBE_VT_MSGTYPE_NACK;
    363  1.1  msaitoh 
    364  1.1  msaitoh 	ixgbe_vf_enable_transmit(adapter, vf);
    365  1.1  msaitoh 	ixgbe_vf_enable_receive(adapter, vf);
    366  1.1  msaitoh 
    367  1.1  msaitoh 	vf->flags |= IXGBE_VF_CTS;
    368  1.1  msaitoh 
    369  1.2  msaitoh 	resp[0] = IXGBE_VF_RESET | ack;
    370  1.1  msaitoh 	bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
    371  1.1  msaitoh 	resp[3] = hw->mac.mc_filter_type;
    372  1.1  msaitoh 	hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
    373  1.1  msaitoh } /* ixgbe_vf_reset_msg */
    374  1.1  msaitoh 
    375  1.1  msaitoh 
    376  1.1  msaitoh static void
    377  1.1  msaitoh ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    378  1.1  msaitoh {
    379  1.1  msaitoh 	uint8_t *mac;
    380  1.1  msaitoh 
    381  1.1  msaitoh 	mac = (uint8_t*)&msg[1];
    382  1.1  msaitoh 
    383  1.1  msaitoh 	/* Check that the VF has permission to change the MAC address. */
    384  1.1  msaitoh 	if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
    385  1.1  msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    386  1.1  msaitoh 		return;
    387  1.1  msaitoh 	}
    388  1.1  msaitoh 
    389  1.1  msaitoh 	if (ixgbe_validate_mac_addr(mac) != 0) {
    390  1.1  msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    391  1.1  msaitoh 		return;
    392  1.1  msaitoh 	}
    393  1.1  msaitoh 
    394  1.1  msaitoh 	bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
    395  1.1  msaitoh 
    396  1.1  msaitoh 	ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool,
    397  1.1  msaitoh 	    TRUE);
    398  1.1  msaitoh 
    399  1.1  msaitoh 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
    400  1.1  msaitoh } /* ixgbe_vf_set_mac */
    401  1.1  msaitoh 
    402  1.1  msaitoh 
    403  1.1  msaitoh /*
    404  1.1  msaitoh  * VF multicast addresses are set by using the appropriate bit in
    405  1.1  msaitoh  * 1 of 128 32 bit addresses (4096 possible).
    406  1.1  msaitoh  */
    407  1.1  msaitoh static void
    408  1.1  msaitoh ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
    409  1.1  msaitoh {
    410  1.1  msaitoh 	u16	*list = (u16*)&msg[1];
    411  1.1  msaitoh 	int	entries;
    412  1.1  msaitoh 	u32	vmolr, vec_bit, vec_reg, mta_reg;
    413  1.1  msaitoh 
    414  1.1  msaitoh 	entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
    415  1.1  msaitoh 	entries = min(entries, IXGBE_MAX_VF_MC);
    416  1.1  msaitoh 
    417  1.1  msaitoh 	vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
    418  1.1  msaitoh 
    419  1.1  msaitoh 	vf->num_mc_hashes = entries;
    420  1.1  msaitoh 
    421  1.1  msaitoh 	/* Set the appropriate MTA bit */
    422  1.1  msaitoh 	for (int i = 0; i < entries; i++) {
    423  1.1  msaitoh 		vf->mc_hash[i] = list[i];
    424  1.1  msaitoh 		vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
    425  1.1  msaitoh 		vec_bit = vf->mc_hash[i] & 0x1F;
    426  1.1  msaitoh 		mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
    427  1.1  msaitoh 		mta_reg |= (1 << vec_bit);
    428  1.1  msaitoh 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
    429  1.1  msaitoh 	}
    430  1.1  msaitoh 
    431  1.1  msaitoh 	vmolr |= IXGBE_VMOLR_ROMPE;
    432  1.1  msaitoh 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
    433  1.1  msaitoh 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
    434  1.1  msaitoh } /* ixgbe_vf_set_mc_addr */
    435  1.1  msaitoh 
    436  1.1  msaitoh 
    437  1.1  msaitoh static void
    438  1.1  msaitoh ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    439  1.1  msaitoh {
    440  1.1  msaitoh 	struct ixgbe_hw *hw;
    441  1.1  msaitoh 	int enable;
    442  1.1  msaitoh 	uint16_t tag;
    443  1.1  msaitoh 
    444  1.1  msaitoh 	hw = &adapter->hw;
    445  1.1  msaitoh 	enable = IXGBE_VT_MSGINFO(msg[0]);
    446  1.1  msaitoh 	tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
    447  1.1  msaitoh 
    448  1.1  msaitoh 	if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
    449  1.1  msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    450  1.1  msaitoh 		return;
    451  1.1  msaitoh 	}
    452  1.1  msaitoh 
    453  1.1  msaitoh 	/* It is illegal to enable vlan tag 0. */
    454  1.2  msaitoh 	if (tag == 0 && enable != 0) {
    455  1.1  msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    456  1.1  msaitoh 		return;
    457  1.1  msaitoh 	}
    458  1.1  msaitoh 
    459  1.1  msaitoh 	ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
    460  1.1  msaitoh 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
    461  1.1  msaitoh } /* ixgbe_vf_set_vlan */
    462  1.1  msaitoh 
    463  1.1  msaitoh 
    464  1.1  msaitoh static void
    465  1.1  msaitoh ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    466  1.1  msaitoh {
    467  1.1  msaitoh 	struct ixgbe_hw *hw;
    468  1.1  msaitoh 	uint32_t vf_max_size, pf_max_size, mhadd;
    469  1.1  msaitoh 
    470  1.1  msaitoh 	hw = &adapter->hw;
    471  1.1  msaitoh 	vf_max_size = msg[1];
    472  1.1  msaitoh 
    473  1.1  msaitoh 	if (vf_max_size < ETHER_CRC_LEN) {
    474  1.1  msaitoh 		/* We intentionally ACK invalid LPE requests. */
    475  1.1  msaitoh 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
    476  1.1  msaitoh 		return;
    477  1.1  msaitoh 	}
    478  1.1  msaitoh 
    479  1.1  msaitoh 	vf_max_size -= ETHER_CRC_LEN;
    480  1.1  msaitoh 
    481  1.1  msaitoh 	if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
    482  1.1  msaitoh 		/* We intentionally ACK invalid LPE requests. */
    483  1.1  msaitoh 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
    484  1.1  msaitoh 		return;
    485  1.1  msaitoh 	}
    486  1.1  msaitoh 
    487  1.1  msaitoh 	vf->max_frame_size = vf_max_size;
    488  1.1  msaitoh 	ixgbe_update_max_frame(adapter, vf->max_frame_size);
    489  1.1  msaitoh 
    490  1.1  msaitoh 	/*
    491  1.1  msaitoh 	 * We might have to disable reception to this VF if the frame size is
    492  1.1  msaitoh 	 * not compatible with the config on the PF.
    493  1.1  msaitoh 	 */
    494  1.1  msaitoh 	ixgbe_vf_enable_receive(adapter, vf);
    495  1.1  msaitoh 
    496  1.1  msaitoh 	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
    497  1.1  msaitoh 	pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
    498  1.1  msaitoh 
    499  1.1  msaitoh 	if (pf_max_size < adapter->max_frame_size) {
    500  1.1  msaitoh 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
    501  1.1  msaitoh 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
    502  1.1  msaitoh 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
    503  1.1  msaitoh 	}
    504  1.1  msaitoh 
    505  1.1  msaitoh 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
    506  1.1  msaitoh } /* ixgbe_vf_set_lpe */
    507  1.1  msaitoh 
    508  1.1  msaitoh 
    509  1.1  msaitoh static void
    510  1.1  msaitoh ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
    511  1.1  msaitoh                      uint32_t *msg)
    512  1.1  msaitoh {
    513  1.1  msaitoh 	//XXX implement this
    514  1.1  msaitoh 	ixgbe_send_vf_nack(adapter, vf, msg[0]);
    515  1.1  msaitoh } /* ixgbe_vf_set_macvlan */
    516  1.1  msaitoh 
    517  1.1  msaitoh 
    518  1.1  msaitoh static void
    519  1.1  msaitoh ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
    520  1.1  msaitoh     uint32_t *msg)
    521  1.1  msaitoh {
    522  1.1  msaitoh 
    523  1.1  msaitoh 	switch (msg[1]) {
    524  1.1  msaitoh 	case IXGBE_API_VER_1_0:
    525  1.1  msaitoh 	case IXGBE_API_VER_1_1:
    526  1.1  msaitoh 		vf->api_ver = msg[1];
    527  1.1  msaitoh 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
    528  1.1  msaitoh 		break;
    529  1.1  msaitoh 	default:
    530  1.1  msaitoh 		vf->api_ver = IXGBE_API_VER_UNKNOWN;
    531  1.1  msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    532  1.1  msaitoh 		break;
    533  1.1  msaitoh 	}
    534  1.1  msaitoh } /* ixgbe_vf_api_negotiate */
    535  1.1  msaitoh 
    536  1.1  msaitoh 
    537  1.1  msaitoh static void
    538  1.1  msaitoh ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    539  1.1  msaitoh {
    540  1.1  msaitoh 	struct ixgbe_hw *hw;
    541  1.1  msaitoh 	uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
    542  1.1  msaitoh 	int num_queues;
    543  1.1  msaitoh 
    544  1.1  msaitoh 	hw = &adapter->hw;
    545  1.1  msaitoh 
    546  1.1  msaitoh 	/* GET_QUEUES is not supported on pre-1.1 APIs. */
    547  1.1  msaitoh 	switch (msg[0]) {
    548  1.1  msaitoh 	case IXGBE_API_VER_1_0:
    549  1.1  msaitoh 	case IXGBE_API_VER_UNKNOWN:
    550  1.1  msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    551  1.1  msaitoh 		return;
    552  1.1  msaitoh 	}
    553  1.1  msaitoh 
    554  1.1  msaitoh 	resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
    555  1.1  msaitoh 	    IXGBE_VT_MSGTYPE_CTS;
    556  1.1  msaitoh 
    557  1.1  msaitoh 	num_queues = ixgbe_vf_queues(adapter->iov_mode);
    558  1.1  msaitoh 	resp[IXGBE_VF_TX_QUEUES] = num_queues;
    559  1.1  msaitoh 	resp[IXGBE_VF_RX_QUEUES] = num_queues;
    560  1.1  msaitoh 	resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
    561  1.1  msaitoh 	resp[IXGBE_VF_DEF_QUEUE] = 0;
    562  1.1  msaitoh 
    563  1.1  msaitoh 	hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
    564  1.1  msaitoh } /* ixgbe_vf_get_queues */
    565  1.1  msaitoh 
    566  1.1  msaitoh 
    567  1.1  msaitoh static void
    568  1.1  msaitoh ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
    569  1.1  msaitoh {
    570  1.1  msaitoh 	struct ixgbe_hw *hw;
    571  1.1  msaitoh 	uint32_t msg[IXGBE_VFMAILBOX_SIZE];
    572  1.1  msaitoh 	int error;
    573  1.1  msaitoh 
    574  1.1  msaitoh 	hw = &adapter->hw;
    575  1.1  msaitoh 
    576  1.1  msaitoh 	error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
    577  1.1  msaitoh 
    578  1.1  msaitoh 	if (error != 0)
    579  1.1  msaitoh 		return;
    580  1.1  msaitoh 
    581  1.2  msaitoh 	CTR3(KTR_MALLOC, "%s: received msg %x from %d", adapter->ifp->if_xname,
    582  1.2  msaitoh 	    msg[0], vf->pool);
    583  1.1  msaitoh 	if (msg[0] == IXGBE_VF_RESET) {
    584  1.1  msaitoh 		ixgbe_vf_reset_msg(adapter, vf, msg);
    585  1.1  msaitoh 		return;
    586  1.1  msaitoh 	}
    587  1.1  msaitoh 
    588  1.1  msaitoh 	if (!(vf->flags & IXGBE_VF_CTS)) {
    589  1.1  msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    590  1.1  msaitoh 		return;
    591  1.1  msaitoh 	}
    592  1.1  msaitoh 
    593  1.1  msaitoh 	switch (msg[0] & IXGBE_VT_MSG_MASK) {
    594  1.1  msaitoh 	case IXGBE_VF_SET_MAC_ADDR:
    595  1.1  msaitoh 		ixgbe_vf_set_mac(adapter, vf, msg);
    596  1.1  msaitoh 		break;
    597  1.1  msaitoh 	case IXGBE_VF_SET_MULTICAST:
    598  1.1  msaitoh 		ixgbe_vf_set_mc_addr(adapter, vf, msg);
    599  1.1  msaitoh 		break;
    600  1.1  msaitoh 	case IXGBE_VF_SET_VLAN:
    601  1.1  msaitoh 		ixgbe_vf_set_vlan(adapter, vf, msg);
    602  1.1  msaitoh 		break;
    603  1.1  msaitoh 	case IXGBE_VF_SET_LPE:
    604  1.1  msaitoh 		ixgbe_vf_set_lpe(adapter, vf, msg);
    605  1.1  msaitoh 		break;
    606  1.1  msaitoh 	case IXGBE_VF_SET_MACVLAN:
    607  1.1  msaitoh 		ixgbe_vf_set_macvlan(adapter, vf, msg);
    608  1.1  msaitoh 		break;
    609  1.1  msaitoh 	case IXGBE_VF_API_NEGOTIATE:
    610  1.1  msaitoh 		ixgbe_vf_api_negotiate(adapter, vf, msg);
    611  1.1  msaitoh 		break;
    612  1.1  msaitoh 	case IXGBE_VF_GET_QUEUES:
    613  1.1  msaitoh 		ixgbe_vf_get_queues(adapter, vf, msg);
    614  1.1  msaitoh 		break;
    615  1.1  msaitoh 	default:
    616  1.1  msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    617  1.1  msaitoh 	}
    618  1.1  msaitoh } /* ixgbe_process_vf_msg */
    619  1.1  msaitoh 
    620  1.1  msaitoh 
    621  1.1  msaitoh /* Tasklet for handling VF -> PF mailbox messages */
    622  1.1  msaitoh void
    623  1.1  msaitoh ixgbe_handle_mbx(void *context, int pending)
    624  1.1  msaitoh {
    625  1.2  msaitoh 	struct adapter *adapter = context;
    626  1.1  msaitoh 	struct ixgbe_hw *hw;
    627  1.1  msaitoh 	struct ixgbe_vf *vf;
    628  1.1  msaitoh 	int i;
    629  1.1  msaitoh 
    630  1.1  msaitoh 	hw = &adapter->hw;
    631  1.1  msaitoh 
    632  1.1  msaitoh 	IXGBE_CORE_LOCK(adapter);
    633  1.1  msaitoh 	for (i = 0; i < adapter->num_vfs; i++) {
    634  1.1  msaitoh 		vf = &adapter->vfs[i];
    635  1.1  msaitoh 
    636  1.1  msaitoh 		if (vf->flags & IXGBE_VF_ACTIVE) {
    637  1.1  msaitoh 			if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0)
    638  1.1  msaitoh 				ixgbe_process_vf_reset(adapter, vf);
    639  1.1  msaitoh 
    640  1.1  msaitoh 			if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
    641  1.1  msaitoh 				ixgbe_process_vf_msg(adapter, vf);
    642  1.1  msaitoh 
    643  1.1  msaitoh 			if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
    644  1.1  msaitoh 				ixgbe_process_vf_ack(adapter, vf);
    645  1.1  msaitoh 		}
    646  1.1  msaitoh 	}
    647  1.1  msaitoh 	IXGBE_CORE_UNLOCK(adapter);
    648  1.1  msaitoh } /* ixgbe_handle_mbx */
    649  1.1  msaitoh 
    650  1.1  msaitoh int
    651  1.1  msaitoh ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
    652  1.1  msaitoh {
    653  1.1  msaitoh 	struct adapter *adapter;
    654  1.1  msaitoh 	int retval = 0;
    655  1.1  msaitoh 
    656  1.1  msaitoh 	adapter = device_get_softc(dev);
    657  1.1  msaitoh 	adapter->iov_mode = IXGBE_NO_VM;
    658  1.1  msaitoh 
    659  1.1  msaitoh 	if (num_vfs == 0) {
    660  1.1  msaitoh 		/* Would we ever get num_vfs = 0? */
    661  1.1  msaitoh 		retval = EINVAL;
    662  1.1  msaitoh 		goto err_init_iov;
    663  1.1  msaitoh 	}
    664  1.1  msaitoh 
    665  1.1  msaitoh 	/*
    666  1.1  msaitoh 	 * We've got to reserve a VM's worth of queues for the PF,
    667  1.1  msaitoh 	 * thus we go into "64 VF mode" if 32+ VFs are requested.
    668  1.1  msaitoh 	 * With 64 VFs, you can only have two queues per VF.
    669  1.1  msaitoh 	 * With 32 VFs, you can have up to four queues per VF.
    670  1.1  msaitoh 	 */
    671  1.1  msaitoh 	if (num_vfs >= IXGBE_32_VM)
    672  1.1  msaitoh 		adapter->iov_mode = IXGBE_64_VM;
    673  1.1  msaitoh 	else
    674  1.1  msaitoh 		adapter->iov_mode = IXGBE_32_VM;
    675  1.1  msaitoh 
    676  1.1  msaitoh 	/* Again, reserving 1 VM's worth of queues for the PF */
    677  1.1  msaitoh 	adapter->pool = adapter->iov_mode - 1;
    678  1.1  msaitoh 
    679  1.1  msaitoh 	if ((num_vfs > adapter->pool) || (num_vfs >= IXGBE_64_VM)) {
    680  1.1  msaitoh 		retval = ENOSPC;
    681  1.1  msaitoh 		goto err_init_iov;
    682  1.1  msaitoh 	}
    683  1.1  msaitoh 
    684  1.1  msaitoh 	IXGBE_CORE_LOCK(adapter);
    685  1.1  msaitoh 
    686  1.1  msaitoh 	adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV,
    687  1.1  msaitoh 	    M_NOWAIT | M_ZERO);
    688  1.1  msaitoh 
    689  1.1  msaitoh 	if (adapter->vfs == NULL) {
    690  1.1  msaitoh 		retval = ENOMEM;
    691  1.1  msaitoh 		IXGBE_CORE_UNLOCK(adapter);
    692  1.1  msaitoh 		goto err_init_iov;
    693  1.1  msaitoh 	}
    694  1.1  msaitoh 
    695  1.1  msaitoh 	adapter->num_vfs = num_vfs;
    696  1.2  msaitoh 
    697  1.2  msaitoh 	/* set the SRIOV flag now as it's needed
    698  1.2  msaitoh 	 * by ixgbe_init_locked() */
    699  1.2  msaitoh 	adapter->feat_en |= IXGBE_FEATURE_SRIOV;
    700  1.1  msaitoh 	adapter->init_locked(adapter);
    701  1.1  msaitoh 
    702  1.1  msaitoh 	IXGBE_CORE_UNLOCK(adapter);
    703  1.1  msaitoh 
    704  1.2  msaitoh 	return (retval);
    705  1.1  msaitoh 
    706  1.1  msaitoh err_init_iov:
    707  1.1  msaitoh 	adapter->num_vfs = 0;
    708  1.1  msaitoh 	adapter->pool = 0;
    709  1.1  msaitoh 	adapter->iov_mode = IXGBE_NO_VM;
    710  1.1  msaitoh 
    711  1.2  msaitoh 	return (retval);
    712  1.1  msaitoh } /* ixgbe_init_iov */
    713  1.1  msaitoh 
    714  1.1  msaitoh void
    715  1.1  msaitoh ixgbe_uninit_iov(device_t dev)
    716  1.1  msaitoh {
    717  1.1  msaitoh 	struct ixgbe_hw *hw;
    718  1.1  msaitoh 	struct adapter *adapter;
    719  1.1  msaitoh 	uint32_t pf_reg, vf_reg;
    720  1.1  msaitoh 
    721  1.1  msaitoh 	adapter = device_get_softc(dev);
    722  1.1  msaitoh 	hw = &adapter->hw;
    723  1.1  msaitoh 
    724  1.1  msaitoh 	IXGBE_CORE_LOCK(adapter);
    725  1.1  msaitoh 
    726  1.1  msaitoh 	/* Enable rx/tx for the PF and disable it for all VFs. */
    727  1.1  msaitoh 	pf_reg = IXGBE_VF_INDEX(adapter->pool);
    728  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool));
    729  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool));
    730  1.1  msaitoh 
    731  1.1  msaitoh 	if (pf_reg == 0)
    732  1.1  msaitoh 		vf_reg = 1;
    733  1.1  msaitoh 	else
    734  1.1  msaitoh 		vf_reg = 0;
    735  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
    736  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
    737  1.1  msaitoh 
    738  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
    739  1.1  msaitoh 
    740  1.1  msaitoh 	free(adapter->vfs, M_IXGBE_SRIOV);
    741  1.1  msaitoh 	adapter->vfs = NULL;
    742  1.1  msaitoh 	adapter->num_vfs = 0;
    743  1.1  msaitoh 	adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
    744  1.1  msaitoh 
    745  1.1  msaitoh 	IXGBE_CORE_UNLOCK(adapter);
    746  1.1  msaitoh } /* ixgbe_uninit_iov */
    747  1.1  msaitoh 
    748  1.1  msaitoh static void
    749  1.1  msaitoh ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
    750  1.1  msaitoh {
    751  1.1  msaitoh 	struct ixgbe_hw *hw;
    752  1.1  msaitoh 	uint32_t vf_index, pfmbimr;
    753  1.1  msaitoh 
    754  1.1  msaitoh 	IXGBE_CORE_LOCK_ASSERT(adapter);
    755  1.1  msaitoh 
    756  1.1  msaitoh 	hw = &adapter->hw;
    757  1.1  msaitoh 
    758  1.1  msaitoh 	if (!(vf->flags & IXGBE_VF_ACTIVE))
    759  1.1  msaitoh 		return;
    760  1.1  msaitoh 
    761  1.1  msaitoh 	vf_index = IXGBE_VF_INDEX(vf->pool);
    762  1.1  msaitoh 	pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
    763  1.1  msaitoh 	pfmbimr |= IXGBE_VF_BIT(vf->pool);
    764  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
    765  1.1  msaitoh 
    766  1.1  msaitoh 	ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
    767  1.1  msaitoh 
    768  1.1  msaitoh 	// XXX multicast addresses
    769  1.1  msaitoh 
    770  1.1  msaitoh 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
    771  1.1  msaitoh 		ixgbe_set_rar(&adapter->hw, vf->rar_index,
    772  1.1  msaitoh 		    vf->ether_addr, vf->pool, TRUE);
    773  1.1  msaitoh 	}
    774  1.1  msaitoh 
    775  1.1  msaitoh 	ixgbe_vf_enable_transmit(adapter, vf);
    776  1.1  msaitoh 	ixgbe_vf_enable_receive(adapter, vf);
    777  1.1  msaitoh 
    778  1.3  msaitoh 	ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
    779  1.1  msaitoh } /* ixgbe_init_vf */
    780  1.1  msaitoh 
    781  1.1  msaitoh void
    782  1.1  msaitoh ixgbe_initialize_iov(struct adapter *adapter)
    783  1.1  msaitoh {
    784  1.1  msaitoh 	struct ixgbe_hw *hw = &adapter->hw;
    785  1.1  msaitoh 	uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
    786  1.1  msaitoh 	int i;
    787  1.1  msaitoh 
    788  1.1  msaitoh 	if (adapter->iov_mode == IXGBE_NO_VM)
    789  1.1  msaitoh 		return;
    790  1.1  msaitoh 
    791  1.1  msaitoh 	IXGBE_CORE_LOCK_ASSERT(adapter);
    792  1.1  msaitoh 
    793  1.1  msaitoh 	/* RMW appropriate registers based on IOV mode */
    794  1.1  msaitoh 	/* Read... */
    795  1.1  msaitoh 	mrqc    = IXGBE_READ_REG(hw, IXGBE_MRQC);
    796  1.1  msaitoh 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
    797  1.1  msaitoh 	gpie    = IXGBE_READ_REG(hw, IXGBE_GPIE);
    798  1.1  msaitoh 	/* Modify... */
    799  1.1  msaitoh 	mrqc    &= ~IXGBE_MRQC_MRQE_MASK;
    800  1.1  msaitoh 	mtqc     =  IXGBE_MTQC_VT_ENA;      /* No initial MTQC read needed */
    801  1.1  msaitoh 	gcr_ext |=  IXGBE_GCR_EXT_MSIX_EN;
    802  1.1  msaitoh 	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
    803  1.1  msaitoh 	gpie    &= ~IXGBE_GPIE_VTMODE_MASK;
    804  1.1  msaitoh 	switch (adapter->iov_mode) {
    805  1.1  msaitoh 	case IXGBE_64_VM:
    806  1.1  msaitoh 		mrqc    |= IXGBE_MRQC_VMDQRSS64EN;
    807  1.1  msaitoh 		mtqc    |= IXGBE_MTQC_64VF;
    808  1.1  msaitoh 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
    809  1.1  msaitoh 		gpie    |= IXGBE_GPIE_VTMODE_64;
    810  1.1  msaitoh 		break;
    811  1.1  msaitoh 	case IXGBE_32_VM:
    812  1.1  msaitoh 		mrqc    |= IXGBE_MRQC_VMDQRSS32EN;
    813  1.1  msaitoh 		mtqc    |= IXGBE_MTQC_32VF;
    814  1.1  msaitoh 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
    815  1.1  msaitoh 		gpie    |= IXGBE_GPIE_VTMODE_32;
    816  1.1  msaitoh 		break;
    817  1.1  msaitoh 	default:
    818  1.1  msaitoh 		panic("Unexpected SR-IOV mode %d", adapter->iov_mode);
    819  1.1  msaitoh 	}
    820  1.1  msaitoh 	/* Write... */
    821  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    822  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
    823  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
    824  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
    825  1.1  msaitoh 
    826  1.1  msaitoh 	/* Enable rx/tx for the PF. */
    827  1.1  msaitoh 	vf_reg = IXGBE_VF_INDEX(adapter->pool);
    828  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool));
    829  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool));
    830  1.1  msaitoh 
    831  1.1  msaitoh 	/* Allow VM-to-VM communication. */
    832  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
    833  1.1  msaitoh 
    834  1.1  msaitoh 	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
    835  1.1  msaitoh 	vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
    836  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
    837  1.1  msaitoh 
    838  1.1  msaitoh 	for (i = 0; i < adapter->num_vfs; i++)
    839  1.1  msaitoh 		ixgbe_init_vf(adapter, &adapter->vfs[i]);
    840  1.1  msaitoh } /* ixgbe_initialize_iov */
    841  1.1  msaitoh 
    842  1.1  msaitoh 
    843  1.1  msaitoh /* Check the max frame setting of all active VF's */
    844  1.1  msaitoh void
    845  1.1  msaitoh ixgbe_recalculate_max_frame(struct adapter *adapter)
    846  1.1  msaitoh {
    847  1.1  msaitoh 	struct ixgbe_vf *vf;
    848  1.1  msaitoh 
    849  1.1  msaitoh 	IXGBE_CORE_LOCK_ASSERT(adapter);
    850  1.1  msaitoh 
    851  1.1  msaitoh 	for (int i = 0; i < adapter->num_vfs; i++) {
    852  1.1  msaitoh 		vf = &adapter->vfs[i];
    853  1.1  msaitoh 		if (vf->flags & IXGBE_VF_ACTIVE)
    854  1.1  msaitoh 			ixgbe_update_max_frame(adapter, vf->max_frame_size);
    855  1.1  msaitoh 	}
    856  1.1  msaitoh } /* ixgbe_recalculate_max_frame */
    857  1.1  msaitoh 
    858  1.1  msaitoh int
    859  1.1  msaitoh ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
    860  1.1  msaitoh {
    861  1.1  msaitoh 	struct adapter *adapter;
    862  1.1  msaitoh 	struct ixgbe_vf *vf;
    863  1.1  msaitoh 	const void *mac;
    864  1.1  msaitoh 
    865  1.1  msaitoh 	adapter = device_get_softc(dev);
    866  1.1  msaitoh 
    867  1.1  msaitoh 	KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
    868  1.1  msaitoh 	    vfnum, adapter->num_vfs));
    869  1.1  msaitoh 
    870  1.1  msaitoh 	IXGBE_CORE_LOCK(adapter);
    871  1.1  msaitoh 	vf = &adapter->vfs[vfnum];
    872  1.1  msaitoh 	vf->pool= vfnum;
    873  1.1  msaitoh 
    874  1.1  msaitoh 	/* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
    875  1.1  msaitoh 	vf->rar_index = vfnum + 1;
    876  1.1  msaitoh 	vf->default_vlan = 0;
    877  1.1  msaitoh 	vf->max_frame_size = ETHER_MAX_LEN;
    878  1.1  msaitoh 	ixgbe_update_max_frame(adapter, vf->max_frame_size);
    879  1.1  msaitoh 
    880  1.1  msaitoh 	if (nvlist_exists_binary(config, "mac-addr")) {
    881  1.1  msaitoh 		mac = nvlist_get_binary(config, "mac-addr", NULL);
    882  1.1  msaitoh 		bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
    883  1.1  msaitoh 		if (nvlist_get_bool(config, "allow-set-mac"))
    884  1.1  msaitoh 			vf->flags |= IXGBE_VF_CAP_MAC;
    885  1.1  msaitoh 	} else
    886  1.1  msaitoh 		/*
    887  1.1  msaitoh 		 * If the administrator has not specified a MAC address then
    888  1.1  msaitoh 		 * we must allow the VF to choose one.
    889  1.1  msaitoh 		 */
    890  1.1  msaitoh 		vf->flags |= IXGBE_VF_CAP_MAC;
    891  1.1  msaitoh 
    892  1.1  msaitoh 	vf->flags |= IXGBE_VF_ACTIVE;
    893  1.1  msaitoh 
    894  1.1  msaitoh 	ixgbe_init_vf(adapter, vf);
    895  1.1  msaitoh 	IXGBE_CORE_UNLOCK(adapter);
    896  1.1  msaitoh 
    897  1.1  msaitoh 	return (0);
    898  1.1  msaitoh } /* ixgbe_add_vf */
    899  1.1  msaitoh 
    900  1.1  msaitoh #else
    901  1.1  msaitoh 
    902  1.1  msaitoh void
    903  1.1  msaitoh ixgbe_handle_mbx(void *context, int pending)
    904  1.1  msaitoh {
    905  1.1  msaitoh 	UNREFERENCED_2PARAMETER(context, pending);
    906  1.1  msaitoh } /* ixgbe_handle_mbx */
    907  1.1  msaitoh 
    908  1.1  msaitoh inline int
    909  1.1  msaitoh ixgbe_vf_que_index(int mode, int vfnum, int num)
    910  1.1  msaitoh {
    911  1.1  msaitoh 	UNREFERENCED_2PARAMETER(mode, vfnum);
    912  1.1  msaitoh 
    913  1.1  msaitoh 	return num;
    914  1.1  msaitoh } /* ixgbe_vf_que_index */
    915  1.1  msaitoh 
    916  1.1  msaitoh #endif
    917