Home | History | Annotate | Line # | Download | only in ixgbe
if_sriov.c revision 1.1
      1  1.1  msaitoh /******************************************************************************
      2  1.1  msaitoh 
      3  1.1  msaitoh   Copyright (c) 2001-2017, Intel Corporation
      4  1.1  msaitoh   All rights reserved.
      5  1.1  msaitoh 
      6  1.1  msaitoh   Redistribution and use in source and binary forms, with or without
      7  1.1  msaitoh   modification, are permitted provided that the following conditions are met:
      8  1.1  msaitoh 
      9  1.1  msaitoh    1. Redistributions of source code must retain the above copyright notice,
     10  1.1  msaitoh       this list of conditions and the following disclaimer.
     11  1.1  msaitoh 
     12  1.1  msaitoh    2. Redistributions in binary form must reproduce the above copyright
     13  1.1  msaitoh       notice, this list of conditions and the following disclaimer in the
     14  1.1  msaitoh       documentation and/or other materials provided with the distribution.
     15  1.1  msaitoh 
     16  1.1  msaitoh    3. Neither the name of the Intel Corporation nor the names of its
     17  1.1  msaitoh       contributors may be used to endorse or promote products derived from
     18  1.1  msaitoh       this software without specific prior written permission.
     19  1.1  msaitoh 
     20  1.1  msaitoh   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21  1.1  msaitoh   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22  1.1  msaitoh   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23  1.1  msaitoh   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24  1.1  msaitoh   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  1.1  msaitoh   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  1.1  msaitoh   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  1.1  msaitoh   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  1.1  msaitoh   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  1.1  msaitoh   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  1.1  msaitoh   POSSIBILITY OF SUCH DAMAGE.
     31  1.1  msaitoh 
     32  1.1  msaitoh ******************************************************************************/
     33  1.1  msaitoh /*$FreeBSD: head/sys/dev/ixgbe/if_sriov.c 320688 2017-07-05 17:27:03Z erj $*/
     34  1.1  msaitoh 
     35  1.1  msaitoh #include "ixgbe.h"
     36  1.1  msaitoh 
     37  1.1  msaitoh #ifdef PCI_IOV
     38  1.1  msaitoh 
     39  1.1  msaitoh MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
     40  1.1  msaitoh 
     41  1.1  msaitoh /************************************************************************
     42  1.1  msaitoh  * ixgbe_pci_iov_detach
     43  1.1  msaitoh  ************************************************************************/
     44  1.1  msaitoh int
     45  1.1  msaitoh ixgbe_pci_iov_detach(device_t dev)
     46  1.1  msaitoh {
     47  1.1  msaitoh 	return pci_iov_detach(dev);
     48  1.1  msaitoh }
     49  1.1  msaitoh 
     50  1.1  msaitoh /************************************************************************
     51  1.1  msaitoh  * ixgbe_define_iov_schemas
     52  1.1  msaitoh  ************************************************************************/
     53  1.1  msaitoh void
     54  1.1  msaitoh ixgbe_define_iov_schemas(device_t dev, int *error)
     55  1.1  msaitoh {
     56  1.1  msaitoh 	nvlist_t *pf_schema, *vf_schema;
     57  1.1  msaitoh 
     58  1.1  msaitoh 	pf_schema = pci_iov_schema_alloc_node();
     59  1.1  msaitoh 	vf_schema = pci_iov_schema_alloc_node();
     60  1.1  msaitoh 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
     61  1.1  msaitoh 	pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
     62  1.1  msaitoh 	    IOV_SCHEMA_HASDEFAULT, TRUE);
     63  1.1  msaitoh 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
     64  1.1  msaitoh 	    IOV_SCHEMA_HASDEFAULT, FALSE);
     65  1.1  msaitoh 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
     66  1.1  msaitoh 	    IOV_SCHEMA_HASDEFAULT, FALSE);
     67  1.1  msaitoh 	*error = pci_iov_attach(dev, pf_schema, vf_schema);
     68  1.1  msaitoh 	if (*error != 0) {
     69  1.1  msaitoh 		device_printf(dev,
     70  1.1  msaitoh 		    "Error %d setting up SR-IOV\n", *error);
     71  1.1  msaitoh 	}
     72  1.1  msaitoh } /* ixgbe_define_iov_schemas */
     73  1.1  msaitoh 
     74  1.1  msaitoh /************************************************************************
     75  1.1  msaitoh  * ixgbe_align_all_queue_indices
     76  1.1  msaitoh  ************************************************************************/
     77  1.1  msaitoh inline void
     78  1.1  msaitoh ixgbe_align_all_queue_indices(struct adapter *adapter)
     79  1.1  msaitoh {
     80  1.1  msaitoh 	int i;
     81  1.1  msaitoh 	int index;
     82  1.1  msaitoh 
     83  1.1  msaitoh 	for (i = 0; i < adapter->num_queues; i++) {
     84  1.1  msaitoh 		index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
     85  1.1  msaitoh 		adapter->rx_rings[i].me = index;
     86  1.1  msaitoh 		adapter->tx_rings[i].me = index;
     87  1.1  msaitoh 	}
     88  1.1  msaitoh }
     89  1.1  msaitoh 
     90  1.1  msaitoh /* Support functions for SR-IOV/VF management */
     91  1.1  msaitoh static inline void
     92  1.1  msaitoh ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
     93  1.1  msaitoh {
     94  1.1  msaitoh 	if (vf->flags & IXGBE_VF_CTS)
     95  1.1  msaitoh 		msg |= IXGBE_VT_MSGTYPE_CTS;
     96  1.1  msaitoh 
     97  1.1  msaitoh 	adapter->hw.mbx.ops.write(&adapter->hw, &msg, 1, vf->pool);
     98  1.1  msaitoh }
     99  1.1  msaitoh 
    100  1.1  msaitoh static inline void
    101  1.1  msaitoh ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
    102  1.1  msaitoh {
    103  1.1  msaitoh 	msg &= IXGBE_VT_MSG_MASK;
    104  1.1  msaitoh 	ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
    105  1.1  msaitoh }
    106  1.1  msaitoh 
    107  1.1  msaitoh static inline void
    108  1.1  msaitoh ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
    109  1.1  msaitoh {
    110  1.1  msaitoh 	msg &= IXGBE_VT_MSG_MASK;
    111  1.1  msaitoh 	ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
    112  1.1  msaitoh }
    113  1.1  msaitoh 
    114  1.1  msaitoh static inline void
    115  1.1  msaitoh ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
    116  1.1  msaitoh {
    117  1.1  msaitoh 	if (!(vf->flags & IXGBE_VF_CTS))
    118  1.1  msaitoh 		ixgbe_send_vf_nack(adapter, vf, 0);
    119  1.1  msaitoh }
    120  1.1  msaitoh 
    121  1.1  msaitoh static inline boolean_t
    122  1.1  msaitoh ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
    123  1.1  msaitoh {
    124  1.1  msaitoh 	return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
    125  1.1  msaitoh }
    126  1.1  msaitoh 
    127  1.1  msaitoh static inline int
    128  1.1  msaitoh ixgbe_vf_queues(int mode)
    129  1.1  msaitoh {
    130  1.1  msaitoh 	switch (mode) {
    131  1.1  msaitoh 	case IXGBE_64_VM:
    132  1.1  msaitoh 		return (2);
    133  1.1  msaitoh 	case IXGBE_32_VM:
    134  1.1  msaitoh 		return (4);
    135  1.1  msaitoh 	case IXGBE_NO_VM:
    136  1.1  msaitoh 	default:
    137  1.1  msaitoh 		return (0);
    138  1.1  msaitoh 	}
    139  1.1  msaitoh }
    140  1.1  msaitoh 
    141  1.1  msaitoh inline int
    142  1.1  msaitoh ixgbe_vf_que_index(int mode, int vfnum, int num)
    143  1.1  msaitoh {
    144  1.1  msaitoh 	return ((vfnum * ixgbe_vf_queues(mode)) + num);
    145  1.1  msaitoh }
    146  1.1  msaitoh 
    147  1.1  msaitoh static inline void
    148  1.1  msaitoh ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
    149  1.1  msaitoh {
    150  1.1  msaitoh 	if (adapter->max_frame_size < max_frame)
    151  1.1  msaitoh 		adapter->max_frame_size = max_frame;
    152  1.1  msaitoh }
    153  1.1  msaitoh 
    154  1.1  msaitoh inline u32
    155  1.1  msaitoh ixgbe_get_mrqc(int iov_mode)
    156  1.1  msaitoh {
    157  1.1  msaitoh 	u32 mrqc;
    158  1.1  msaitoh 
    159  1.1  msaitoh 	switch (iov_mode) {
    160  1.1  msaitoh 	case IXGBE_64_VM:
    161  1.1  msaitoh 		mrqc = IXGBE_MRQC_VMDQRSS64EN;
    162  1.1  msaitoh 		break;
    163  1.1  msaitoh 	case IXGBE_32_VM:
    164  1.1  msaitoh 		mrqc = IXGBE_MRQC_VMDQRSS32EN;
    165  1.1  msaitoh 		break;
    166  1.1  msaitoh 	case IXGBE_NO_VM:
    167  1.1  msaitoh 		mrqc = 0;
    168  1.1  msaitoh 		break;
    169  1.1  msaitoh 	default:
    170  1.1  msaitoh 		panic("Unexpected SR-IOV mode %d", iov_mode);
    171  1.1  msaitoh 	}
    172  1.1  msaitoh 
    173  1.1  msaitoh 	return mrqc;
    174  1.1  msaitoh }
    175  1.1  msaitoh 
    176  1.1  msaitoh 
    177  1.1  msaitoh inline u32
    178  1.1  msaitoh ixgbe_get_mtqc(int iov_mode)
    179  1.1  msaitoh {
    180  1.1  msaitoh 	uint32_t mtqc;
    181  1.1  msaitoh 
    182  1.1  msaitoh 	switch (iov_mode) {
    183  1.1  msaitoh 	case IXGBE_64_VM:
    184  1.1  msaitoh 		mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
    185  1.1  msaitoh 		break;
    186  1.1  msaitoh 	case IXGBE_32_VM:
    187  1.1  msaitoh 		mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
    188  1.1  msaitoh 		break;
    189  1.1  msaitoh 	case IXGBE_NO_VM:
    190  1.1  msaitoh 		mtqc = IXGBE_MTQC_64Q_1PB;
    191  1.1  msaitoh 		break;
    192  1.1  msaitoh 	default:
    193  1.1  msaitoh 		panic("Unexpected SR-IOV mode %d", iov_mode);
    194  1.1  msaitoh 	}
    195  1.1  msaitoh 
    196  1.1  msaitoh 	return mtqc;
    197  1.1  msaitoh }
    198  1.1  msaitoh 
    199  1.1  msaitoh void
    200  1.1  msaitoh ixgbe_ping_all_vfs(struct adapter *adapter)
    201  1.1  msaitoh {
    202  1.1  msaitoh 	struct ixgbe_vf *vf;
    203  1.1  msaitoh 
    204  1.1  msaitoh 	for (int i = 0; i < adapter->num_vfs; i++) {
    205  1.1  msaitoh 		vf = &adapter->vfs[i];
    206  1.1  msaitoh 		if (vf->flags & IXGBE_VF_ACTIVE)
    207  1.1  msaitoh 			ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
    208  1.1  msaitoh 	}
    209  1.1  msaitoh } /* ixgbe_ping_all_vfs */
    210  1.1  msaitoh 
    211  1.1  msaitoh 
    212  1.1  msaitoh static void
    213  1.1  msaitoh ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
    214  1.1  msaitoh                           uint16_t tag)
    215  1.1  msaitoh {
    216  1.1  msaitoh 	struct ixgbe_hw *hw;
    217  1.1  msaitoh 	uint32_t vmolr, vmvir;
    218  1.1  msaitoh 
    219  1.1  msaitoh 	hw = &adapter->hw;
    220  1.1  msaitoh 
    221  1.1  msaitoh 	vf->vlan_tag = tag;
    222  1.1  msaitoh 
    223  1.1  msaitoh 	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
    224  1.1  msaitoh 
    225  1.1  msaitoh 	/* Do not receive packets that pass inexact filters. */
    226  1.1  msaitoh 	vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
    227  1.1  msaitoh 
    228  1.1  msaitoh 	/* Disable Multicast Promicuous Mode. */
    229  1.1  msaitoh 	vmolr &= ~IXGBE_VMOLR_MPE;
    230  1.1  msaitoh 
    231  1.1  msaitoh 	/* Accept broadcasts. */
    232  1.1  msaitoh 	vmolr |= IXGBE_VMOLR_BAM;
    233  1.1  msaitoh 
    234  1.1  msaitoh 	if (tag == 0) {
    235  1.1  msaitoh 		/* Accept non-vlan tagged traffic. */
    236  1.1  msaitoh 		//vmolr |= IXGBE_VMOLR_AUPE;
    237  1.1  msaitoh 
    238  1.1  msaitoh 		/* Allow VM to tag outgoing traffic; no default tag. */
    239  1.1  msaitoh 		vmvir = 0;
    240  1.1  msaitoh 	} else {
    241  1.1  msaitoh 		/* Require vlan-tagged traffic. */
    242  1.1  msaitoh 		vmolr &= ~IXGBE_VMOLR_AUPE;
    243  1.1  msaitoh 
    244  1.1  msaitoh 		/* Tag all traffic with provided vlan tag. */
    245  1.1  msaitoh 		vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
    246  1.1  msaitoh 	}
    247  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
    248  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
    249  1.1  msaitoh } /* ixgbe_vf_set_default_vlan */
    250  1.1  msaitoh 
    251  1.1  msaitoh 
    252  1.1  msaitoh static boolean_t
    253  1.1  msaitoh ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
    254  1.1  msaitoh {
    255  1.1  msaitoh 
    256  1.1  msaitoh 	/*
    257  1.1  msaitoh 	 * Frame size compatibility between PF and VF is only a problem on
    258  1.1  msaitoh 	 * 82599-based cards.  X540 and later support any combination of jumbo
    259  1.1  msaitoh 	 * frames on PFs and VFs.
    260  1.1  msaitoh 	 */
    261  1.1  msaitoh 	if (adapter->hw.mac.type != ixgbe_mac_82599EB)
    262  1.1  msaitoh 		return (TRUE);
    263  1.1  msaitoh 
    264  1.1  msaitoh 	switch (vf->api_ver) {
    265  1.1  msaitoh 	case IXGBE_API_VER_1_0:
    266  1.1  msaitoh 	case IXGBE_API_VER_UNKNOWN:
    267  1.1  msaitoh 		/*
    268  1.1  msaitoh 		 * On legacy (1.0 and older) VF versions, we don't support jumbo
    269  1.1  msaitoh 		 * frames on either the PF or the VF.
    270  1.1  msaitoh 		 */
    271  1.1  msaitoh 		if (adapter->max_frame_size > ETHER_MAX_LEN ||
    272  1.1  msaitoh 		    vf->max_frame_size > ETHER_MAX_LEN)
    273  1.1  msaitoh 			return (FALSE);
    274  1.1  msaitoh 
    275  1.1  msaitoh 		return (TRUE);
    276  1.1  msaitoh 
    277  1.1  msaitoh 		break;
    278  1.1  msaitoh 	case IXGBE_API_VER_1_1:
    279  1.1  msaitoh 	default:
    280  1.1  msaitoh 		/*
    281  1.1  msaitoh 		 * 1.1 or later VF versions always work if they aren't using
    282  1.1  msaitoh 		 * jumbo frames.
    283  1.1  msaitoh 		 */
    284  1.1  msaitoh 		if (vf->max_frame_size <= ETHER_MAX_LEN)
    285  1.1  msaitoh 			return (TRUE);
    286  1.1  msaitoh 
    287  1.1  msaitoh 		/*
    288  1.1  msaitoh 		 * Jumbo frames only work with VFs if the PF is also using jumbo
    289  1.1  msaitoh 		 * frames.
    290  1.1  msaitoh 		 */
    291  1.1  msaitoh 		if (adapter->max_frame_size <= ETHER_MAX_LEN)
    292  1.1  msaitoh 			return (TRUE);
    293  1.1  msaitoh 
    294  1.1  msaitoh 		return (FALSE);
    295  1.1  msaitoh 
    296  1.1  msaitoh 	}
    297  1.1  msaitoh } /* ixgbe_vf_frame_size_compatible */
    298  1.1  msaitoh 
    299  1.1  msaitoh 
    300  1.1  msaitoh static void
    301  1.1  msaitoh ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
    302  1.1  msaitoh {
    303  1.1  msaitoh 	ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
    304  1.1  msaitoh 
    305  1.1  msaitoh 	// XXX clear multicast addresses
    306  1.1  msaitoh 
    307  1.1  msaitoh 	ixgbe_clear_rar(&adapter->hw, vf->rar_index);
    308  1.1  msaitoh 
    309  1.1  msaitoh 	vf->api_ver = IXGBE_API_VER_UNKNOWN;
    310  1.1  msaitoh } /* ixgbe_process_vf_reset */
    311  1.1  msaitoh 
    312  1.1  msaitoh 
    313  1.1  msaitoh static void
    314  1.1  msaitoh ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
    315  1.1  msaitoh {
    316  1.1  msaitoh 	struct ixgbe_hw *hw;
    317  1.1  msaitoh 	uint32_t vf_index, vfte;
    318  1.1  msaitoh 
    319  1.1  msaitoh 	hw = &adapter->hw;
    320  1.1  msaitoh 
    321  1.1  msaitoh 	vf_index = IXGBE_VF_INDEX(vf->pool);
    322  1.1  msaitoh 	vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
    323  1.1  msaitoh 	vfte |= IXGBE_VF_BIT(vf->pool);
    324  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
    325  1.1  msaitoh } /* ixgbe_vf_enable_transmit */
    326  1.1  msaitoh 
    327  1.1  msaitoh 
    328  1.1  msaitoh static void
    329  1.1  msaitoh ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
    330  1.1  msaitoh {
    331  1.1  msaitoh 	struct ixgbe_hw *hw;
    332  1.1  msaitoh 	uint32_t vf_index, vfre;
    333  1.1  msaitoh 
    334  1.1  msaitoh 	hw = &adapter->hw;
    335  1.1  msaitoh 
    336  1.1  msaitoh 	vf_index = IXGBE_VF_INDEX(vf->pool);
    337  1.1  msaitoh 	vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
    338  1.1  msaitoh 	if (ixgbe_vf_frame_size_compatible(adapter, vf))
    339  1.1  msaitoh 		vfre |= IXGBE_VF_BIT(vf->pool);
    340  1.1  msaitoh 	else
    341  1.1  msaitoh 		vfre &= ~IXGBE_VF_BIT(vf->pool);
    342  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
    343  1.1  msaitoh } /* ixgbe_vf_enable_receive */
    344  1.1  msaitoh 
    345  1.1  msaitoh 
    346  1.1  msaitoh static void
    347  1.1  msaitoh ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    348  1.1  msaitoh {
    349  1.1  msaitoh 	struct ixgbe_hw *hw;
    350  1.1  msaitoh 	uint32_t ack;
    351  1.1  msaitoh 	uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
    352  1.1  msaitoh 
    353  1.1  msaitoh 	hw = &adapter->hw;
    354  1.1  msaitoh 
    355  1.1  msaitoh 	ixgbe_process_vf_reset(adapter, vf);
    356  1.1  msaitoh 
    357  1.1  msaitoh 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
    358  1.1  msaitoh 		ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
    359  1.1  msaitoh 		    vf->pool, TRUE);
    360  1.1  msaitoh 		ack = IXGBE_VT_MSGTYPE_ACK;
    361  1.1  msaitoh 	} else
    362  1.1  msaitoh 		ack = IXGBE_VT_MSGTYPE_NACK;
    363  1.1  msaitoh 
    364  1.1  msaitoh 	ixgbe_vf_enable_transmit(adapter, vf);
    365  1.1  msaitoh 	ixgbe_vf_enable_receive(adapter, vf);
    366  1.1  msaitoh 
    367  1.1  msaitoh 	vf->flags |= IXGBE_VF_CTS;
    368  1.1  msaitoh 
    369  1.1  msaitoh 	resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
    370  1.1  msaitoh 	bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
    371  1.1  msaitoh 	resp[3] = hw->mac.mc_filter_type;
    372  1.1  msaitoh 	hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
    373  1.1  msaitoh } /* ixgbe_vf_reset_msg */
    374  1.1  msaitoh 
    375  1.1  msaitoh 
    376  1.1  msaitoh static void
    377  1.1  msaitoh ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    378  1.1  msaitoh {
    379  1.1  msaitoh 	uint8_t *mac;
    380  1.1  msaitoh 
    381  1.1  msaitoh 	mac = (uint8_t*)&msg[1];
    382  1.1  msaitoh 
    383  1.1  msaitoh 	/* Check that the VF has permission to change the MAC address. */
    384  1.1  msaitoh 	if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
    385  1.1  msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    386  1.1  msaitoh 		return;
    387  1.1  msaitoh 	}
    388  1.1  msaitoh 
    389  1.1  msaitoh 	if (ixgbe_validate_mac_addr(mac) != 0) {
    390  1.1  msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    391  1.1  msaitoh 		return;
    392  1.1  msaitoh 	}
    393  1.1  msaitoh 
    394  1.1  msaitoh 	bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
    395  1.1  msaitoh 
    396  1.1  msaitoh 	ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool,
    397  1.1  msaitoh 	    TRUE);
    398  1.1  msaitoh 
    399  1.1  msaitoh 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
    400  1.1  msaitoh } /* ixgbe_vf_set_mac */
    401  1.1  msaitoh 
    402  1.1  msaitoh 
    403  1.1  msaitoh /*
    404  1.1  msaitoh  * VF multicast addresses are set by using the appropriate bit in
    405  1.1  msaitoh  * 1 of 128 32 bit addresses (4096 possible).
    406  1.1  msaitoh  */
    407  1.1  msaitoh static void
    408  1.1  msaitoh ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
    409  1.1  msaitoh {
    410  1.1  msaitoh 	u16	*list = (u16*)&msg[1];
    411  1.1  msaitoh 	int	entries;
    412  1.1  msaitoh 	u32	vmolr, vec_bit, vec_reg, mta_reg;
    413  1.1  msaitoh 
    414  1.1  msaitoh 	entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
    415  1.1  msaitoh 	entries = min(entries, IXGBE_MAX_VF_MC);
    416  1.1  msaitoh 
    417  1.1  msaitoh 	vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
    418  1.1  msaitoh 
    419  1.1  msaitoh 	vf->num_mc_hashes = entries;
    420  1.1  msaitoh 
    421  1.1  msaitoh 	/* Set the appropriate MTA bit */
    422  1.1  msaitoh 	for (int i = 0; i < entries; i++) {
    423  1.1  msaitoh 		vf->mc_hash[i] = list[i];
    424  1.1  msaitoh 		vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
    425  1.1  msaitoh 		vec_bit = vf->mc_hash[i] & 0x1F;
    426  1.1  msaitoh 		mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
    427  1.1  msaitoh 		mta_reg |= (1 << vec_bit);
    428  1.1  msaitoh 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
    429  1.1  msaitoh 	}
    430  1.1  msaitoh 
    431  1.1  msaitoh 	vmolr |= IXGBE_VMOLR_ROMPE;
    432  1.1  msaitoh 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
    433  1.1  msaitoh 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
    434  1.1  msaitoh } /* ixgbe_vf_set_mc_addr */
    435  1.1  msaitoh 
    436  1.1  msaitoh 
    437  1.1  msaitoh static void
    438  1.1  msaitoh ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    439  1.1  msaitoh {
    440  1.1  msaitoh 	struct ixgbe_hw *hw;
    441  1.1  msaitoh 	int enable;
    442  1.1  msaitoh 	uint16_t tag;
    443  1.1  msaitoh 
    444  1.1  msaitoh 	hw = &adapter->hw;
    445  1.1  msaitoh 	enable = IXGBE_VT_MSGINFO(msg[0]);
    446  1.1  msaitoh 	tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
    447  1.1  msaitoh 
    448  1.1  msaitoh 	if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
    449  1.1  msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    450  1.1  msaitoh 		return;
    451  1.1  msaitoh 	}
    452  1.1  msaitoh 
    453  1.1  msaitoh 	/* It is illegal to enable vlan tag 0. */
    454  1.1  msaitoh 	if (tag == 0 && enable != 0){
    455  1.1  msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    456  1.1  msaitoh 		return;
    457  1.1  msaitoh 	}
    458  1.1  msaitoh 
    459  1.1  msaitoh 	ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
    460  1.1  msaitoh 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
    461  1.1  msaitoh } /* ixgbe_vf_set_vlan */
    462  1.1  msaitoh 
    463  1.1  msaitoh 
    464  1.1  msaitoh static void
    465  1.1  msaitoh ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    466  1.1  msaitoh {
    467  1.1  msaitoh 	struct ixgbe_hw *hw;
    468  1.1  msaitoh 	uint32_t vf_max_size, pf_max_size, mhadd;
    469  1.1  msaitoh 
    470  1.1  msaitoh 	hw = &adapter->hw;
    471  1.1  msaitoh 	vf_max_size = msg[1];
    472  1.1  msaitoh 
    473  1.1  msaitoh 	if (vf_max_size < ETHER_CRC_LEN) {
    474  1.1  msaitoh 		/* We intentionally ACK invalid LPE requests. */
    475  1.1  msaitoh 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
    476  1.1  msaitoh 		return;
    477  1.1  msaitoh 	}
    478  1.1  msaitoh 
    479  1.1  msaitoh 	vf_max_size -= ETHER_CRC_LEN;
    480  1.1  msaitoh 
    481  1.1  msaitoh 	if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
    482  1.1  msaitoh 		/* We intentionally ACK invalid LPE requests. */
    483  1.1  msaitoh 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
    484  1.1  msaitoh 		return;
    485  1.1  msaitoh 	}
    486  1.1  msaitoh 
    487  1.1  msaitoh 	vf->max_frame_size = vf_max_size;
    488  1.1  msaitoh 	ixgbe_update_max_frame(adapter, vf->max_frame_size);
    489  1.1  msaitoh 
    490  1.1  msaitoh 	/*
    491  1.1  msaitoh 	 * We might have to disable reception to this VF if the frame size is
    492  1.1  msaitoh 	 * not compatible with the config on the PF.
    493  1.1  msaitoh 	 */
    494  1.1  msaitoh 	ixgbe_vf_enable_receive(adapter, vf);
    495  1.1  msaitoh 
    496  1.1  msaitoh 	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
    497  1.1  msaitoh 	pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
    498  1.1  msaitoh 
    499  1.1  msaitoh 	if (pf_max_size < adapter->max_frame_size) {
    500  1.1  msaitoh 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
    501  1.1  msaitoh 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
    502  1.1  msaitoh 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
    503  1.1  msaitoh 	}
    504  1.1  msaitoh 
    505  1.1  msaitoh 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
    506  1.1  msaitoh } /* ixgbe_vf_set_lpe */
    507  1.1  msaitoh 
    508  1.1  msaitoh 
    509  1.1  msaitoh static void
    510  1.1  msaitoh ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
    511  1.1  msaitoh                      uint32_t *msg)
    512  1.1  msaitoh {
    513  1.1  msaitoh 	//XXX implement this
    514  1.1  msaitoh 	ixgbe_send_vf_nack(adapter, vf, msg[0]);
    515  1.1  msaitoh } /* ixgbe_vf_set_macvlan */
    516  1.1  msaitoh 
    517  1.1  msaitoh 
    518  1.1  msaitoh static void
    519  1.1  msaitoh ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
    520  1.1  msaitoh     uint32_t *msg)
    521  1.1  msaitoh {
    522  1.1  msaitoh 
    523  1.1  msaitoh 	switch (msg[1]) {
    524  1.1  msaitoh 	case IXGBE_API_VER_1_0:
    525  1.1  msaitoh 	case IXGBE_API_VER_1_1:
    526  1.1  msaitoh 		vf->api_ver = msg[1];
    527  1.1  msaitoh 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
    528  1.1  msaitoh 		break;
    529  1.1  msaitoh 	default:
    530  1.1  msaitoh 		vf->api_ver = IXGBE_API_VER_UNKNOWN;
    531  1.1  msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    532  1.1  msaitoh 		break;
    533  1.1  msaitoh 	}
    534  1.1  msaitoh } /* ixgbe_vf_api_negotiate */
    535  1.1  msaitoh 
    536  1.1  msaitoh 
    537  1.1  msaitoh static void
    538  1.1  msaitoh ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    539  1.1  msaitoh {
    540  1.1  msaitoh 	struct ixgbe_hw *hw;
    541  1.1  msaitoh 	uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
    542  1.1  msaitoh 	int num_queues;
    543  1.1  msaitoh 
    544  1.1  msaitoh 	hw = &adapter->hw;
    545  1.1  msaitoh 
    546  1.1  msaitoh 	/* GET_QUEUES is not supported on pre-1.1 APIs. */
    547  1.1  msaitoh 	switch (msg[0]) {
    548  1.1  msaitoh 	case IXGBE_API_VER_1_0:
    549  1.1  msaitoh 	case IXGBE_API_VER_UNKNOWN:
    550  1.1  msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    551  1.1  msaitoh 		return;
    552  1.1  msaitoh 	}
    553  1.1  msaitoh 
    554  1.1  msaitoh 	resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
    555  1.1  msaitoh 	    IXGBE_VT_MSGTYPE_CTS;
    556  1.1  msaitoh 
    557  1.1  msaitoh 	num_queues = ixgbe_vf_queues(adapter->iov_mode);
    558  1.1  msaitoh 	resp[IXGBE_VF_TX_QUEUES] = num_queues;
    559  1.1  msaitoh 	resp[IXGBE_VF_RX_QUEUES] = num_queues;
    560  1.1  msaitoh 	resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
    561  1.1  msaitoh 	resp[IXGBE_VF_DEF_QUEUE] = 0;
    562  1.1  msaitoh 
    563  1.1  msaitoh 	hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
    564  1.1  msaitoh } /* ixgbe_vf_get_queues */
    565  1.1  msaitoh 
    566  1.1  msaitoh 
    567  1.1  msaitoh static void
    568  1.1  msaitoh ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
    569  1.1  msaitoh {
    570  1.1  msaitoh 	struct ixgbe_hw *hw;
    571  1.1  msaitoh 	uint32_t msg[IXGBE_VFMAILBOX_SIZE];
    572  1.1  msaitoh 	int error;
    573  1.1  msaitoh 
    574  1.1  msaitoh 	hw = &adapter->hw;
    575  1.1  msaitoh 
    576  1.1  msaitoh 	error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
    577  1.1  msaitoh 
    578  1.1  msaitoh 	if (error != 0)
    579  1.1  msaitoh 		return;
    580  1.1  msaitoh 
    581  1.1  msaitoh 	CTR3(KTR_MALLOC, "%s: received msg %x from %d",
    582  1.1  msaitoh 	    adapter->ifp->if_xname, msg[0], vf->pool);
    583  1.1  msaitoh 	if (msg[0] == IXGBE_VF_RESET) {
    584  1.1  msaitoh 		ixgbe_vf_reset_msg(adapter, vf, msg);
    585  1.1  msaitoh 		return;
    586  1.1  msaitoh 	}
    587  1.1  msaitoh 
    588  1.1  msaitoh 	if (!(vf->flags & IXGBE_VF_CTS)) {
    589  1.1  msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    590  1.1  msaitoh 		return;
    591  1.1  msaitoh 	}
    592  1.1  msaitoh 
    593  1.1  msaitoh 	switch (msg[0] & IXGBE_VT_MSG_MASK) {
    594  1.1  msaitoh 	case IXGBE_VF_SET_MAC_ADDR:
    595  1.1  msaitoh 		ixgbe_vf_set_mac(adapter, vf, msg);
    596  1.1  msaitoh 		break;
    597  1.1  msaitoh 	case IXGBE_VF_SET_MULTICAST:
    598  1.1  msaitoh 		ixgbe_vf_set_mc_addr(adapter, vf, msg);
    599  1.1  msaitoh 		break;
    600  1.1  msaitoh 	case IXGBE_VF_SET_VLAN:
    601  1.1  msaitoh 		ixgbe_vf_set_vlan(adapter, vf, msg);
    602  1.1  msaitoh 		break;
    603  1.1  msaitoh 	case IXGBE_VF_SET_LPE:
    604  1.1  msaitoh 		ixgbe_vf_set_lpe(adapter, vf, msg);
    605  1.1  msaitoh 		break;
    606  1.1  msaitoh 	case IXGBE_VF_SET_MACVLAN:
    607  1.1  msaitoh 		ixgbe_vf_set_macvlan(adapter, vf, msg);
    608  1.1  msaitoh 		break;
    609  1.1  msaitoh 	case IXGBE_VF_API_NEGOTIATE:
    610  1.1  msaitoh 		ixgbe_vf_api_negotiate(adapter, vf, msg);
    611  1.1  msaitoh 		break;
    612  1.1  msaitoh 	case IXGBE_VF_GET_QUEUES:
    613  1.1  msaitoh 		ixgbe_vf_get_queues(adapter, vf, msg);
    614  1.1  msaitoh 		break;
    615  1.1  msaitoh 	default:
    616  1.1  msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    617  1.1  msaitoh 	}
    618  1.1  msaitoh } /* ixgbe_process_vf_msg */
    619  1.1  msaitoh 
    620  1.1  msaitoh 
    621  1.1  msaitoh /* Tasklet for handling VF -> PF mailbox messages */
    622  1.1  msaitoh void
    623  1.1  msaitoh ixgbe_handle_mbx(void *context, int pending)
    624  1.1  msaitoh {
    625  1.1  msaitoh 	struct adapter *adapter;
    626  1.1  msaitoh 	struct ixgbe_hw *hw;
    627  1.1  msaitoh 	struct ixgbe_vf *vf;
    628  1.1  msaitoh 	int i;
    629  1.1  msaitoh 
    630  1.1  msaitoh 	adapter = context;
    631  1.1  msaitoh 	hw = &adapter->hw;
    632  1.1  msaitoh 
    633  1.1  msaitoh 	IXGBE_CORE_LOCK(adapter);
    634  1.1  msaitoh 	for (i = 0; i < adapter->num_vfs; i++) {
    635  1.1  msaitoh 		vf = &adapter->vfs[i];
    636  1.1  msaitoh 
    637  1.1  msaitoh 		if (vf->flags & IXGBE_VF_ACTIVE) {
    638  1.1  msaitoh 			if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0)
    639  1.1  msaitoh 				ixgbe_process_vf_reset(adapter, vf);
    640  1.1  msaitoh 
    641  1.1  msaitoh 			if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
    642  1.1  msaitoh 				ixgbe_process_vf_msg(adapter, vf);
    643  1.1  msaitoh 
    644  1.1  msaitoh 			if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
    645  1.1  msaitoh 				ixgbe_process_vf_ack(adapter, vf);
    646  1.1  msaitoh 		}
    647  1.1  msaitoh 	}
    648  1.1  msaitoh 	IXGBE_CORE_UNLOCK(adapter);
    649  1.1  msaitoh } /* ixgbe_handle_mbx */
    650  1.1  msaitoh 
    651  1.1  msaitoh int
    652  1.1  msaitoh ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
    653  1.1  msaitoh {
    654  1.1  msaitoh 	struct adapter *adapter;
    655  1.1  msaitoh 	int retval = 0;
    656  1.1  msaitoh 
    657  1.1  msaitoh 	adapter = device_get_softc(dev);
    658  1.1  msaitoh 	adapter->iov_mode = IXGBE_NO_VM;
    659  1.1  msaitoh 
    660  1.1  msaitoh 	if (num_vfs == 0) {
    661  1.1  msaitoh 		/* Would we ever get num_vfs = 0? */
    662  1.1  msaitoh 		retval = EINVAL;
    663  1.1  msaitoh 		goto err_init_iov;
    664  1.1  msaitoh 	}
    665  1.1  msaitoh 
    666  1.1  msaitoh 	/*
    667  1.1  msaitoh 	 * We've got to reserve a VM's worth of queues for the PF,
    668  1.1  msaitoh 	 * thus we go into "64 VF mode" if 32+ VFs are requested.
    669  1.1  msaitoh 	 * With 64 VFs, you can only have two queues per VF.
    670  1.1  msaitoh 	 * With 32 VFs, you can have up to four queues per VF.
    671  1.1  msaitoh 	 */
    672  1.1  msaitoh 	if (num_vfs >= IXGBE_32_VM)
    673  1.1  msaitoh 		adapter->iov_mode = IXGBE_64_VM;
    674  1.1  msaitoh 	else
    675  1.1  msaitoh 		adapter->iov_mode = IXGBE_32_VM;
    676  1.1  msaitoh 
    677  1.1  msaitoh 	/* Again, reserving 1 VM's worth of queues for the PF */
    678  1.1  msaitoh 	adapter->pool = adapter->iov_mode - 1;
    679  1.1  msaitoh 
    680  1.1  msaitoh 	if ((num_vfs > adapter->pool) || (num_vfs >= IXGBE_64_VM)) {
    681  1.1  msaitoh 		retval = ENOSPC;
    682  1.1  msaitoh 		goto err_init_iov;
    683  1.1  msaitoh 	}
    684  1.1  msaitoh 
    685  1.1  msaitoh 	IXGBE_CORE_LOCK(adapter);
    686  1.1  msaitoh 
    687  1.1  msaitoh 	adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV,
    688  1.1  msaitoh 	    M_NOWAIT | M_ZERO);
    689  1.1  msaitoh 
    690  1.1  msaitoh 	if (adapter->vfs == NULL) {
    691  1.1  msaitoh 		retval = ENOMEM;
    692  1.1  msaitoh 		IXGBE_CORE_UNLOCK(adapter);
    693  1.1  msaitoh 		goto err_init_iov;
    694  1.1  msaitoh 	}
    695  1.1  msaitoh 
    696  1.1  msaitoh 	adapter->num_vfs = num_vfs;
    697  1.1  msaitoh 	adapter->init_locked(adapter);
    698  1.1  msaitoh 	adapter->feat_en |= IXGBE_FEATURE_SRIOV;
    699  1.1  msaitoh 
    700  1.1  msaitoh 	IXGBE_CORE_UNLOCK(adapter);
    701  1.1  msaitoh 
    702  1.1  msaitoh 	return retval;
    703  1.1  msaitoh 
    704  1.1  msaitoh err_init_iov:
    705  1.1  msaitoh 	adapter->num_vfs = 0;
    706  1.1  msaitoh 	adapter->pool = 0;
    707  1.1  msaitoh 	adapter->iov_mode = IXGBE_NO_VM;
    708  1.1  msaitoh 
    709  1.1  msaitoh 	return retval;
    710  1.1  msaitoh } /* ixgbe_init_iov */
    711  1.1  msaitoh 
    712  1.1  msaitoh void
    713  1.1  msaitoh ixgbe_uninit_iov(device_t dev)
    714  1.1  msaitoh {
    715  1.1  msaitoh 	struct ixgbe_hw *hw;
    716  1.1  msaitoh 	struct adapter *adapter;
    717  1.1  msaitoh 	uint32_t pf_reg, vf_reg;
    718  1.1  msaitoh 
    719  1.1  msaitoh 	adapter = device_get_softc(dev);
    720  1.1  msaitoh 	hw = &adapter->hw;
    721  1.1  msaitoh 
    722  1.1  msaitoh 	IXGBE_CORE_LOCK(adapter);
    723  1.1  msaitoh 
    724  1.1  msaitoh 	/* Enable rx/tx for the PF and disable it for all VFs. */
    725  1.1  msaitoh 	pf_reg = IXGBE_VF_INDEX(adapter->pool);
    726  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool));
    727  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool));
    728  1.1  msaitoh 
    729  1.1  msaitoh 	if (pf_reg == 0)
    730  1.1  msaitoh 		vf_reg = 1;
    731  1.1  msaitoh 	else
    732  1.1  msaitoh 		vf_reg = 0;
    733  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
    734  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
    735  1.1  msaitoh 
    736  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
    737  1.1  msaitoh 
    738  1.1  msaitoh 	free(adapter->vfs, M_IXGBE_SRIOV);
    739  1.1  msaitoh 	adapter->vfs = NULL;
    740  1.1  msaitoh 	adapter->num_vfs = 0;
    741  1.1  msaitoh 	adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
    742  1.1  msaitoh 
    743  1.1  msaitoh 	IXGBE_CORE_UNLOCK(adapter);
    744  1.1  msaitoh } /* ixgbe_uninit_iov */
    745  1.1  msaitoh 
    746  1.1  msaitoh static void
    747  1.1  msaitoh ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
    748  1.1  msaitoh {
    749  1.1  msaitoh 	struct ixgbe_hw *hw;
    750  1.1  msaitoh 	uint32_t vf_index, pfmbimr;
    751  1.1  msaitoh 
    752  1.1  msaitoh 	IXGBE_CORE_LOCK_ASSERT(adapter);
    753  1.1  msaitoh 
    754  1.1  msaitoh 	hw = &adapter->hw;
    755  1.1  msaitoh 
    756  1.1  msaitoh 	if (!(vf->flags & IXGBE_VF_ACTIVE))
    757  1.1  msaitoh 		return;
    758  1.1  msaitoh 
    759  1.1  msaitoh 	vf_index = IXGBE_VF_INDEX(vf->pool);
    760  1.1  msaitoh 	pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
    761  1.1  msaitoh 	pfmbimr |= IXGBE_VF_BIT(vf->pool);
    762  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
    763  1.1  msaitoh 
    764  1.1  msaitoh 	ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
    765  1.1  msaitoh 
    766  1.1  msaitoh 	// XXX multicast addresses
    767  1.1  msaitoh 
    768  1.1  msaitoh 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
    769  1.1  msaitoh 		ixgbe_set_rar(&adapter->hw, vf->rar_index,
    770  1.1  msaitoh 		    vf->ether_addr, vf->pool, TRUE);
    771  1.1  msaitoh 	}
    772  1.1  msaitoh 
    773  1.1  msaitoh 	ixgbe_vf_enable_transmit(adapter, vf);
    774  1.1  msaitoh 	ixgbe_vf_enable_receive(adapter, vf);
    775  1.1  msaitoh 
    776  1.1  msaitoh 	ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
    777  1.1  msaitoh } /* ixgbe_init_vf */
    778  1.1  msaitoh 
    779  1.1  msaitoh void
    780  1.1  msaitoh ixgbe_initialize_iov(struct adapter *adapter)
    781  1.1  msaitoh {
    782  1.1  msaitoh 	struct ixgbe_hw *hw = &adapter->hw;
    783  1.1  msaitoh 	uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
    784  1.1  msaitoh 	int i;
    785  1.1  msaitoh 
    786  1.1  msaitoh 	if (adapter->iov_mode == IXGBE_NO_VM)
    787  1.1  msaitoh 		return;
    788  1.1  msaitoh 
    789  1.1  msaitoh 	IXGBE_CORE_LOCK_ASSERT(adapter);
    790  1.1  msaitoh 
    791  1.1  msaitoh 	/* RMW appropriate registers based on IOV mode */
    792  1.1  msaitoh 	/* Read... */
    793  1.1  msaitoh 	mrqc    = IXGBE_READ_REG(hw, IXGBE_MRQC);
    794  1.1  msaitoh 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
    795  1.1  msaitoh 	gpie    = IXGBE_READ_REG(hw, IXGBE_GPIE);
    796  1.1  msaitoh 	/* Modify... */
    797  1.1  msaitoh 	mrqc    &= ~IXGBE_MRQC_MRQE_MASK;
    798  1.1  msaitoh 	mtqc     =  IXGBE_MTQC_VT_ENA;      /* No initial MTQC read needed */
    799  1.1  msaitoh 	gcr_ext |=  IXGBE_GCR_EXT_MSIX_EN;
    800  1.1  msaitoh 	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
    801  1.1  msaitoh 	gpie    &= ~IXGBE_GPIE_VTMODE_MASK;
    802  1.1  msaitoh 	switch (adapter->iov_mode) {
    803  1.1  msaitoh 	case IXGBE_64_VM:
    804  1.1  msaitoh 		mrqc    |= IXGBE_MRQC_VMDQRSS64EN;
    805  1.1  msaitoh 		mtqc    |= IXGBE_MTQC_64VF;
    806  1.1  msaitoh 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
    807  1.1  msaitoh 		gpie    |= IXGBE_GPIE_VTMODE_64;
    808  1.1  msaitoh 		break;
    809  1.1  msaitoh 	case IXGBE_32_VM:
    810  1.1  msaitoh 		mrqc    |= IXGBE_MRQC_VMDQRSS32EN;
    811  1.1  msaitoh 		mtqc    |= IXGBE_MTQC_32VF;
    812  1.1  msaitoh 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
    813  1.1  msaitoh 		gpie    |= IXGBE_GPIE_VTMODE_32;
    814  1.1  msaitoh 		break;
    815  1.1  msaitoh 	default:
    816  1.1  msaitoh 		panic("Unexpected SR-IOV mode %d", adapter->iov_mode);
    817  1.1  msaitoh 	}
    818  1.1  msaitoh 	/* Write... */
    819  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    820  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
    821  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
    822  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
    823  1.1  msaitoh 
    824  1.1  msaitoh 	/* Enable rx/tx for the PF. */
    825  1.1  msaitoh 	vf_reg = IXGBE_VF_INDEX(adapter->pool);
    826  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool));
    827  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool));
    828  1.1  msaitoh 
    829  1.1  msaitoh 	/* Allow VM-to-VM communication. */
    830  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
    831  1.1  msaitoh 
    832  1.1  msaitoh 	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
    833  1.1  msaitoh 	vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
    834  1.1  msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
    835  1.1  msaitoh 
    836  1.1  msaitoh 	for (i = 0; i < adapter->num_vfs; i++)
    837  1.1  msaitoh 		ixgbe_init_vf(adapter, &adapter->vfs[i]);
    838  1.1  msaitoh } /* ixgbe_initialize_iov */
    839  1.1  msaitoh 
    840  1.1  msaitoh 
    841  1.1  msaitoh /* Check the max frame setting of all active VF's */
    842  1.1  msaitoh void
    843  1.1  msaitoh ixgbe_recalculate_max_frame(struct adapter *adapter)
    844  1.1  msaitoh {
    845  1.1  msaitoh 	struct ixgbe_vf *vf;
    846  1.1  msaitoh 
    847  1.1  msaitoh 	IXGBE_CORE_LOCK_ASSERT(adapter);
    848  1.1  msaitoh 
    849  1.1  msaitoh 	for (int i = 0; i < adapter->num_vfs; i++) {
    850  1.1  msaitoh 		vf = &adapter->vfs[i];
    851  1.1  msaitoh 		if (vf->flags & IXGBE_VF_ACTIVE)
    852  1.1  msaitoh 			ixgbe_update_max_frame(adapter, vf->max_frame_size);
    853  1.1  msaitoh 	}
    854  1.1  msaitoh } /* ixgbe_recalculate_max_frame */
    855  1.1  msaitoh 
    856  1.1  msaitoh int
    857  1.1  msaitoh ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
    858  1.1  msaitoh {
    859  1.1  msaitoh 	struct adapter *adapter;
    860  1.1  msaitoh 	struct ixgbe_vf *vf;
    861  1.1  msaitoh 	const void *mac;
    862  1.1  msaitoh 
    863  1.1  msaitoh 	adapter = device_get_softc(dev);
    864  1.1  msaitoh 
    865  1.1  msaitoh 	KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
    866  1.1  msaitoh 	    vfnum, adapter->num_vfs));
    867  1.1  msaitoh 
    868  1.1  msaitoh 	IXGBE_CORE_LOCK(adapter);
    869  1.1  msaitoh 	vf = &adapter->vfs[vfnum];
    870  1.1  msaitoh 	vf->pool= vfnum;
    871  1.1  msaitoh 
    872  1.1  msaitoh 	/* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
    873  1.1  msaitoh 	vf->rar_index = vfnum + 1;
    874  1.1  msaitoh 	vf->default_vlan = 0;
    875  1.1  msaitoh 	vf->max_frame_size = ETHER_MAX_LEN;
    876  1.1  msaitoh 	ixgbe_update_max_frame(adapter, vf->max_frame_size);
    877  1.1  msaitoh 
    878  1.1  msaitoh 	if (nvlist_exists_binary(config, "mac-addr")) {
    879  1.1  msaitoh 		mac = nvlist_get_binary(config, "mac-addr", NULL);
    880  1.1  msaitoh 		bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
    881  1.1  msaitoh 		if (nvlist_get_bool(config, "allow-set-mac"))
    882  1.1  msaitoh 			vf->flags |= IXGBE_VF_CAP_MAC;
    883  1.1  msaitoh 	} else
    884  1.1  msaitoh 		/*
    885  1.1  msaitoh 		 * If the administrator has not specified a MAC address then
    886  1.1  msaitoh 		 * we must allow the VF to choose one.
    887  1.1  msaitoh 		 */
    888  1.1  msaitoh 		vf->flags |= IXGBE_VF_CAP_MAC;
    889  1.1  msaitoh 
    890  1.1  msaitoh 	vf->flags |= IXGBE_VF_ACTIVE;
    891  1.1  msaitoh 
    892  1.1  msaitoh 	ixgbe_init_vf(adapter, vf);
    893  1.1  msaitoh 	IXGBE_CORE_UNLOCK(adapter);
    894  1.1  msaitoh 
    895  1.1  msaitoh 	return (0);
    896  1.1  msaitoh } /* ixgbe_add_vf */
    897  1.1  msaitoh 
    898  1.1  msaitoh #else
    899  1.1  msaitoh 
    900  1.1  msaitoh void
    901  1.1  msaitoh ixgbe_handle_mbx(void *context, int pending)
    902  1.1  msaitoh {
    903  1.1  msaitoh 	UNREFERENCED_2PARAMETER(context, pending);
    904  1.1  msaitoh } /* ixgbe_handle_mbx */
    905  1.1  msaitoh 
    906  1.1  msaitoh inline int
    907  1.1  msaitoh ixgbe_vf_que_index(int mode, int vfnum, int num)
    908  1.1  msaitoh {
    909  1.1  msaitoh 	UNREFERENCED_2PARAMETER(mode, vfnum);
    910  1.1  msaitoh 
    911  1.1  msaitoh 	return num;
    912  1.1  msaitoh } /* ixgbe_vf_que_index */
    913  1.1  msaitoh 
    914  1.1  msaitoh #endif
    915