Home | History | Annotate | Line # | Download | only in ixgbe
if_sriov.c revision 1.16
      1  1.16   msaitoh /* $NetBSD: if_sriov.c,v 1.16 2021/12/24 05:01:00 msaitoh Exp $ */
      2   1.1   msaitoh /******************************************************************************
      3   1.1   msaitoh 
      4   1.1   msaitoh   Copyright (c) 2001-2017, Intel Corporation
      5   1.1   msaitoh   All rights reserved.
      6   1.1   msaitoh 
      7   1.1   msaitoh   Redistribution and use in source and binary forms, with or without
      8   1.1   msaitoh   modification, are permitted provided that the following conditions are met:
      9   1.1   msaitoh 
     10   1.1   msaitoh    1. Redistributions of source code must retain the above copyright notice,
     11   1.1   msaitoh       this list of conditions and the following disclaimer.
     12   1.1   msaitoh 
     13   1.1   msaitoh    2. Redistributions in binary form must reproduce the above copyright
     14   1.1   msaitoh       notice, this list of conditions and the following disclaimer in the
     15   1.1   msaitoh       documentation and/or other materials provided with the distribution.
     16   1.1   msaitoh 
     17   1.1   msaitoh    3. Neither the name of the Intel Corporation nor the names of its
     18   1.1   msaitoh       contributors may be used to endorse or promote products derived from
     19   1.1   msaitoh       this software without specific prior written permission.
     20   1.1   msaitoh 
     21   1.1   msaitoh   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     22   1.1   msaitoh   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     23   1.1   msaitoh   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     24   1.1   msaitoh   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     25   1.1   msaitoh   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     26   1.1   msaitoh   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     27   1.1   msaitoh   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     28   1.1   msaitoh   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     29   1.1   msaitoh   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     30   1.1   msaitoh   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     31   1.1   msaitoh   POSSIBILITY OF SUCH DAMAGE.
     32   1.1   msaitoh 
     33   1.1   msaitoh ******************************************************************************/
     34   1.3   msaitoh /*$FreeBSD: head/sys/dev/ixgbe/if_sriov.c 327031 2017-12-20 18:15:06Z erj $*/
     35   1.1   msaitoh 
     36  1.11   msaitoh #include <sys/cdefs.h>
     37  1.16   msaitoh __KERNEL_RCSID(0, "$NetBSD: if_sriov.c,v 1.16 2021/12/24 05:01:00 msaitoh Exp $");
     38  1.11   msaitoh 
     39   1.1   msaitoh #include "ixgbe.h"
     40   1.2   msaitoh #include "ixgbe_sriov.h"
     41   1.1   msaitoh 
     42   1.1   msaitoh #ifdef PCI_IOV
     43   1.1   msaitoh 
     44   1.1   msaitoh MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
     45   1.1   msaitoh 
     46   1.1   msaitoh /************************************************************************
     47   1.1   msaitoh  * ixgbe_pci_iov_detach
     48   1.1   msaitoh  ************************************************************************/
     49   1.1   msaitoh int
     50   1.1   msaitoh ixgbe_pci_iov_detach(device_t dev)
     51   1.1   msaitoh {
     52   1.1   msaitoh 	return pci_iov_detach(dev);
     53   1.1   msaitoh }
     54   1.1   msaitoh 
     55   1.1   msaitoh /************************************************************************
     56   1.1   msaitoh  * ixgbe_define_iov_schemas
     57   1.1   msaitoh  ************************************************************************/
     58   1.1   msaitoh void
     59   1.1   msaitoh ixgbe_define_iov_schemas(device_t dev, int *error)
     60   1.1   msaitoh {
     61   1.1   msaitoh 	nvlist_t *pf_schema, *vf_schema;
     62   1.1   msaitoh 
     63   1.1   msaitoh 	pf_schema = pci_iov_schema_alloc_node();
     64   1.1   msaitoh 	vf_schema = pci_iov_schema_alloc_node();
     65   1.1   msaitoh 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
     66   1.1   msaitoh 	pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
     67   1.1   msaitoh 	    IOV_SCHEMA_HASDEFAULT, TRUE);
     68   1.1   msaitoh 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
     69   1.1   msaitoh 	    IOV_SCHEMA_HASDEFAULT, FALSE);
     70   1.1   msaitoh 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
     71   1.1   msaitoh 	    IOV_SCHEMA_HASDEFAULT, FALSE);
     72   1.1   msaitoh 	*error = pci_iov_attach(dev, pf_schema, vf_schema);
     73   1.1   msaitoh 	if (*error != 0) {
     74   1.1   msaitoh 		device_printf(dev,
     75   1.1   msaitoh 		    "Error %d setting up SR-IOV\n", *error);
     76   1.1   msaitoh 	}
     77   1.1   msaitoh } /* ixgbe_define_iov_schemas */
     78   1.1   msaitoh 
     79   1.1   msaitoh /************************************************************************
     80   1.1   msaitoh  * ixgbe_align_all_queue_indices
     81   1.1   msaitoh  ************************************************************************/
     82   1.1   msaitoh inline void
     83   1.1   msaitoh ixgbe_align_all_queue_indices(struct adapter *adapter)
     84   1.1   msaitoh {
     85   1.1   msaitoh 	int i;
     86   1.1   msaitoh 	int index;
     87   1.1   msaitoh 
     88   1.1   msaitoh 	for (i = 0; i < adapter->num_queues; i++) {
     89   1.1   msaitoh 		index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
     90   1.1   msaitoh 		adapter->rx_rings[i].me = index;
     91   1.1   msaitoh 		adapter->tx_rings[i].me = index;
     92   1.1   msaitoh 	}
     93   1.1   msaitoh }
     94   1.1   msaitoh 
     95   1.1   msaitoh /* Support functions for SR-IOV/VF management */
     96   1.1   msaitoh static inline void
     97  1.16   msaitoh ixgbe_send_vf_msg(struct ixgbe_hw *hw, struct ixgbe_vf *vf, u32 msg)
     98   1.1   msaitoh {
     99   1.1   msaitoh 	if (vf->flags & IXGBE_VF_CTS)
    100   1.1   msaitoh 		msg |= IXGBE_VT_MSGTYPE_CTS;
    101   1.1   msaitoh 
    102  1.16   msaitoh 	hw->mbx.ops.write(hw, &msg, 1, vf->pool);
    103   1.1   msaitoh }
    104   1.1   msaitoh 
    105   1.1   msaitoh static inline void
    106   1.1   msaitoh ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
    107   1.1   msaitoh {
    108   1.1   msaitoh 	msg &= IXGBE_VT_MSG_MASK;
    109  1.16   msaitoh 	ixgbe_send_vf_msg(&adapter->hw, vf, msg | IXGBE_VT_MSGTYPE_SUCCESS);
    110   1.1   msaitoh }
    111   1.1   msaitoh 
    112   1.1   msaitoh static inline void
    113   1.1   msaitoh ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
    114   1.1   msaitoh {
    115   1.1   msaitoh 	msg &= IXGBE_VT_MSG_MASK;
    116  1.16   msaitoh 	ixgbe_send_vf_msg(&adapter->hw, vf, msg | IXGBE_VT_MSGTYPE_FAILURE);
    117   1.1   msaitoh }
    118   1.1   msaitoh 
    119   1.1   msaitoh static inline void
    120   1.1   msaitoh ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
    121   1.1   msaitoh {
    122   1.1   msaitoh 	if (!(vf->flags & IXGBE_VF_CTS))
    123   1.1   msaitoh 		ixgbe_send_vf_nack(adapter, vf, 0);
    124   1.1   msaitoh }
    125   1.1   msaitoh 
    126   1.9       mrg static inline bool
    127   1.1   msaitoh ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
    128   1.1   msaitoh {
    129   1.1   msaitoh 	return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
    130   1.1   msaitoh }
    131   1.1   msaitoh 
    132   1.1   msaitoh static inline int
    133   1.1   msaitoh ixgbe_vf_queues(int mode)
    134   1.1   msaitoh {
    135   1.1   msaitoh 	switch (mode) {
    136   1.1   msaitoh 	case IXGBE_64_VM:
    137   1.1   msaitoh 		return (2);
    138   1.1   msaitoh 	case IXGBE_32_VM:
    139   1.1   msaitoh 		return (4);
    140   1.1   msaitoh 	case IXGBE_NO_VM:
    141   1.1   msaitoh 	default:
    142   1.1   msaitoh 		return (0);
    143   1.1   msaitoh 	}
    144   1.1   msaitoh }
    145   1.1   msaitoh 
    146   1.1   msaitoh inline int
    147   1.1   msaitoh ixgbe_vf_que_index(int mode, int vfnum, int num)
    148   1.1   msaitoh {
    149   1.1   msaitoh 	return ((vfnum * ixgbe_vf_queues(mode)) + num);
    150   1.1   msaitoh }
    151   1.1   msaitoh 
    152   1.1   msaitoh static inline void
    153   1.1   msaitoh ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
    154   1.1   msaitoh {
    155   1.1   msaitoh 	if (adapter->max_frame_size < max_frame)
    156   1.1   msaitoh 		adapter->max_frame_size = max_frame;
    157   1.1   msaitoh }
    158   1.1   msaitoh 
    159   1.1   msaitoh inline u32
    160   1.1   msaitoh ixgbe_get_mrqc(int iov_mode)
    161   1.1   msaitoh {
    162   1.1   msaitoh 	u32 mrqc;
    163   1.1   msaitoh 
    164   1.1   msaitoh 	switch (iov_mode) {
    165   1.1   msaitoh 	case IXGBE_64_VM:
    166   1.1   msaitoh 		mrqc = IXGBE_MRQC_VMDQRSS64EN;
    167   1.1   msaitoh 		break;
    168   1.1   msaitoh 	case IXGBE_32_VM:
    169   1.1   msaitoh 		mrqc = IXGBE_MRQC_VMDQRSS32EN;
    170   1.1   msaitoh 		break;
    171   1.1   msaitoh 	case IXGBE_NO_VM:
    172   1.1   msaitoh 		mrqc = 0;
    173   1.1   msaitoh 		break;
    174   1.1   msaitoh 	default:
    175   1.1   msaitoh 		panic("Unexpected SR-IOV mode %d", iov_mode);
    176   1.1   msaitoh 	}
    177   1.1   msaitoh 
    178   1.1   msaitoh 	return mrqc;
    179   1.1   msaitoh }
    180   1.1   msaitoh 
    181   1.1   msaitoh 
    182   1.1   msaitoh inline u32
    183   1.1   msaitoh ixgbe_get_mtqc(int iov_mode)
    184   1.1   msaitoh {
    185   1.1   msaitoh 	uint32_t mtqc;
    186   1.1   msaitoh 
    187   1.1   msaitoh 	switch (iov_mode) {
    188   1.1   msaitoh 	case IXGBE_64_VM:
    189   1.1   msaitoh 		mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
    190   1.1   msaitoh 		break;
    191   1.1   msaitoh 	case IXGBE_32_VM:
    192   1.1   msaitoh 		mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
    193   1.1   msaitoh 		break;
    194   1.1   msaitoh 	case IXGBE_NO_VM:
    195   1.1   msaitoh 		mtqc = IXGBE_MTQC_64Q_1PB;
    196   1.1   msaitoh 		break;
    197   1.1   msaitoh 	default:
    198   1.1   msaitoh 		panic("Unexpected SR-IOV mode %d", iov_mode);
    199   1.1   msaitoh 	}
    200   1.1   msaitoh 
    201   1.1   msaitoh 	return mtqc;
    202   1.1   msaitoh }
    203   1.1   msaitoh 
    204   1.1   msaitoh void
    205   1.1   msaitoh ixgbe_ping_all_vfs(struct adapter *adapter)
    206   1.1   msaitoh {
    207   1.1   msaitoh 	struct ixgbe_vf *vf;
    208   1.1   msaitoh 
    209   1.1   msaitoh 	for (int i = 0; i < adapter->num_vfs; i++) {
    210   1.1   msaitoh 		vf = &adapter->vfs[i];
    211   1.1   msaitoh 		if (vf->flags & IXGBE_VF_ACTIVE)
    212  1.16   msaitoh 			ixgbe_send_vf_msg(&adapter->hw, vf, IXGBE_PF_CONTROL_MSG);
    213   1.1   msaitoh 	}
    214   1.1   msaitoh } /* ixgbe_ping_all_vfs */
    215   1.1   msaitoh 
    216   1.1   msaitoh 
    217   1.1   msaitoh static void
    218   1.1   msaitoh ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
    219   1.1   msaitoh                           uint16_t tag)
    220   1.1   msaitoh {
    221   1.1   msaitoh 	struct ixgbe_hw *hw;
    222   1.1   msaitoh 	uint32_t vmolr, vmvir;
    223   1.1   msaitoh 
    224   1.1   msaitoh 	hw = &adapter->hw;
    225   1.1   msaitoh 
    226   1.1   msaitoh 	vf->vlan_tag = tag;
    227   1.1   msaitoh 
    228   1.1   msaitoh 	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
    229   1.1   msaitoh 
    230   1.1   msaitoh 	/* Do not receive packets that pass inexact filters. */
    231   1.1   msaitoh 	vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
    232   1.1   msaitoh 
    233   1.1   msaitoh 	/* Disable Multicast Promicuous Mode. */
    234   1.1   msaitoh 	vmolr &= ~IXGBE_VMOLR_MPE;
    235   1.1   msaitoh 
    236   1.1   msaitoh 	/* Accept broadcasts. */
    237   1.1   msaitoh 	vmolr |= IXGBE_VMOLR_BAM;
    238   1.1   msaitoh 
    239   1.1   msaitoh 	if (tag == 0) {
    240   1.1   msaitoh 		/* Accept non-vlan tagged traffic. */
    241   1.2   msaitoh 		vmolr |= IXGBE_VMOLR_AUPE;
    242   1.1   msaitoh 
    243   1.1   msaitoh 		/* Allow VM to tag outgoing traffic; no default tag. */
    244   1.1   msaitoh 		vmvir = 0;
    245   1.1   msaitoh 	} else {
    246   1.1   msaitoh 		/* Require vlan-tagged traffic. */
    247   1.1   msaitoh 		vmolr &= ~IXGBE_VMOLR_AUPE;
    248   1.1   msaitoh 
    249   1.1   msaitoh 		/* Tag all traffic with provided vlan tag. */
    250   1.1   msaitoh 		vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
    251   1.1   msaitoh 	}
    252   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
    253   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
    254   1.1   msaitoh } /* ixgbe_vf_set_default_vlan */
    255   1.1   msaitoh 
    256   1.1   msaitoh 
    257   1.5   msaitoh static void
    258  1.13   msaitoh ixgbe_clear_vfmbmem(struct adapter *adapter, struct ixgbe_vf *vf)
    259   1.5   msaitoh {
    260  1.13   msaitoh 	struct ixgbe_hw *hw = &adapter->hw;
    261   1.5   msaitoh 	uint32_t vf_index = IXGBE_VF_INDEX(vf->pool);
    262   1.5   msaitoh 	uint16_t mbx_size = hw->mbx.size;
    263   1.5   msaitoh 	uint16_t i;
    264   1.5   msaitoh 
    265   1.5   msaitoh 	IXGBE_CORE_LOCK_ASSERT(adapter);
    266   1.5   msaitoh 
    267   1.5   msaitoh 	for (i = 0; i < mbx_size; ++i)
    268   1.5   msaitoh 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_index), i, 0x0);
    269   1.5   msaitoh } /* ixgbe_clear_vfmbmem */
    270   1.5   msaitoh 
    271   1.5   msaitoh 
    272   1.9       mrg static bool
    273   1.1   msaitoh ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
    274   1.1   msaitoh {
    275   1.1   msaitoh 
    276   1.1   msaitoh 	/*
    277   1.1   msaitoh 	 * Frame size compatibility between PF and VF is only a problem on
    278   1.1   msaitoh 	 * 82599-based cards.  X540 and later support any combination of jumbo
    279   1.1   msaitoh 	 * frames on PFs and VFs.
    280   1.1   msaitoh 	 */
    281   1.1   msaitoh 	if (adapter->hw.mac.type != ixgbe_mac_82599EB)
    282   1.1   msaitoh 		return (TRUE);
    283   1.1   msaitoh 
    284   1.1   msaitoh 	switch (vf->api_ver) {
    285   1.1   msaitoh 	case IXGBE_API_VER_1_0:
    286   1.1   msaitoh 	case IXGBE_API_VER_UNKNOWN:
    287   1.1   msaitoh 		/*
    288   1.1   msaitoh 		 * On legacy (1.0 and older) VF versions, we don't support jumbo
    289   1.1   msaitoh 		 * frames on either the PF or the VF.
    290   1.1   msaitoh 		 */
    291   1.1   msaitoh 		if (adapter->max_frame_size > ETHER_MAX_LEN ||
    292   1.1   msaitoh 		    vf->max_frame_size > ETHER_MAX_LEN)
    293   1.1   msaitoh 			return (FALSE);
    294   1.1   msaitoh 
    295   1.1   msaitoh 		return (TRUE);
    296   1.1   msaitoh 
    297   1.1   msaitoh 		break;
    298   1.1   msaitoh 	case IXGBE_API_VER_1_1:
    299   1.1   msaitoh 	default:
    300   1.1   msaitoh 		/*
    301   1.1   msaitoh 		 * 1.1 or later VF versions always work if they aren't using
    302   1.1   msaitoh 		 * jumbo frames.
    303   1.1   msaitoh 		 */
    304   1.1   msaitoh 		if (vf->max_frame_size <= ETHER_MAX_LEN)
    305   1.1   msaitoh 			return (TRUE);
    306   1.1   msaitoh 
    307   1.1   msaitoh 		/*
    308   1.1   msaitoh 		 * Jumbo frames only work with VFs if the PF is also using jumbo
    309   1.1   msaitoh 		 * frames.
    310   1.1   msaitoh 		 */
    311   1.1   msaitoh 		if (adapter->max_frame_size <= ETHER_MAX_LEN)
    312   1.1   msaitoh 			return (TRUE);
    313   1.1   msaitoh 
    314   1.1   msaitoh 		return (FALSE);
    315   1.1   msaitoh 	}
    316   1.1   msaitoh } /* ixgbe_vf_frame_size_compatible */
    317   1.1   msaitoh 
    318   1.1   msaitoh 
    319   1.1   msaitoh static void
    320   1.1   msaitoh ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
    321   1.1   msaitoh {
    322   1.1   msaitoh 	ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
    323   1.1   msaitoh 
    324   1.1   msaitoh 	// XXX clear multicast addresses
    325   1.1   msaitoh 
    326   1.1   msaitoh 	ixgbe_clear_rar(&adapter->hw, vf->rar_index);
    327  1.13   msaitoh 	ixgbe_clear_vfmbmem(adapter, vf);
    328   1.6   msaitoh 	ixgbe_toggle_txdctl(&adapter->hw, IXGBE_VF_INDEX(vf->pool));
    329   1.1   msaitoh 
    330   1.1   msaitoh 	vf->api_ver = IXGBE_API_VER_UNKNOWN;
    331   1.1   msaitoh } /* ixgbe_process_vf_reset */
    332   1.1   msaitoh 
    333   1.1   msaitoh 
    334   1.1   msaitoh static void
    335   1.1   msaitoh ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
    336   1.1   msaitoh {
    337   1.1   msaitoh 	struct ixgbe_hw *hw;
    338   1.1   msaitoh 	uint32_t vf_index, vfte;
    339   1.1   msaitoh 
    340   1.1   msaitoh 	hw = &adapter->hw;
    341   1.1   msaitoh 
    342   1.1   msaitoh 	vf_index = IXGBE_VF_INDEX(vf->pool);
    343   1.1   msaitoh 	vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
    344   1.1   msaitoh 	vfte |= IXGBE_VF_BIT(vf->pool);
    345   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
    346   1.1   msaitoh } /* ixgbe_vf_enable_transmit */
    347   1.1   msaitoh 
    348   1.1   msaitoh 
    349   1.1   msaitoh static void
    350   1.1   msaitoh ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
    351   1.1   msaitoh {
    352   1.1   msaitoh 	struct ixgbe_hw *hw;
    353   1.1   msaitoh 	uint32_t vf_index, vfre;
    354   1.1   msaitoh 
    355   1.1   msaitoh 	hw = &adapter->hw;
    356   1.1   msaitoh 
    357   1.1   msaitoh 	vf_index = IXGBE_VF_INDEX(vf->pool);
    358   1.1   msaitoh 	vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
    359   1.1   msaitoh 	if (ixgbe_vf_frame_size_compatible(adapter, vf))
    360   1.1   msaitoh 		vfre |= IXGBE_VF_BIT(vf->pool);
    361   1.1   msaitoh 	else
    362   1.1   msaitoh 		vfre &= ~IXGBE_VF_BIT(vf->pool);
    363   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
    364   1.1   msaitoh } /* ixgbe_vf_enable_receive */
    365   1.1   msaitoh 
    366   1.1   msaitoh 
    367   1.1   msaitoh static void
    368   1.1   msaitoh ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    369   1.1   msaitoh {
    370   1.1   msaitoh 	struct ixgbe_hw *hw;
    371   1.1   msaitoh 	uint32_t ack;
    372   1.1   msaitoh 	uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
    373   1.1   msaitoh 
    374   1.1   msaitoh 	hw = &adapter->hw;
    375   1.1   msaitoh 
    376   1.1   msaitoh 	ixgbe_process_vf_reset(adapter, vf);
    377   1.1   msaitoh 
    378   1.1   msaitoh 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
    379   1.1   msaitoh 		ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
    380   1.1   msaitoh 		    vf->pool, TRUE);
    381  1.15   msaitoh 		ack = IXGBE_VT_MSGTYPE_SUCCESS;
    382   1.1   msaitoh 	} else
    383  1.15   msaitoh 		ack = IXGBE_VT_MSGTYPE_FAILURE;
    384   1.1   msaitoh 
    385   1.1   msaitoh 	ixgbe_vf_enable_transmit(adapter, vf);
    386   1.1   msaitoh 	ixgbe_vf_enable_receive(adapter, vf);
    387   1.1   msaitoh 
    388   1.1   msaitoh 	vf->flags |= IXGBE_VF_CTS;
    389   1.1   msaitoh 
    390   1.2   msaitoh 	resp[0] = IXGBE_VF_RESET | ack;
    391   1.1   msaitoh 	bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
    392   1.1   msaitoh 	resp[3] = hw->mac.mc_filter_type;
    393   1.1   msaitoh 	hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
    394   1.1   msaitoh } /* ixgbe_vf_reset_msg */
    395   1.1   msaitoh 
    396   1.1   msaitoh 
    397   1.1   msaitoh static void
    398   1.1   msaitoh ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    399   1.1   msaitoh {
    400   1.1   msaitoh 	uint8_t *mac;
    401   1.1   msaitoh 
    402   1.1   msaitoh 	mac = (uint8_t*)&msg[1];
    403   1.1   msaitoh 
    404   1.1   msaitoh 	/* Check that the VF has permission to change the MAC address. */
    405   1.1   msaitoh 	if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
    406   1.1   msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    407   1.1   msaitoh 		return;
    408   1.1   msaitoh 	}
    409   1.1   msaitoh 
    410   1.1   msaitoh 	if (ixgbe_validate_mac_addr(mac) != 0) {
    411   1.1   msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    412   1.1   msaitoh 		return;
    413   1.1   msaitoh 	}
    414   1.1   msaitoh 
    415   1.1   msaitoh 	bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
    416   1.1   msaitoh 
    417   1.1   msaitoh 	ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool,
    418   1.1   msaitoh 	    TRUE);
    419   1.1   msaitoh 
    420   1.1   msaitoh 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
    421   1.1   msaitoh } /* ixgbe_vf_set_mac */
    422   1.1   msaitoh 
    423   1.1   msaitoh 
    424   1.1   msaitoh /*
    425   1.1   msaitoh  * VF multicast addresses are set by using the appropriate bit in
    426   1.1   msaitoh  * 1 of 128 32 bit addresses (4096 possible).
    427   1.1   msaitoh  */
    428   1.1   msaitoh static void
    429   1.1   msaitoh ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
    430   1.1   msaitoh {
    431   1.1   msaitoh 	u16	*list = (u16*)&msg[1];
    432   1.1   msaitoh 	int	entries;
    433   1.1   msaitoh 	u32	vmolr, vec_bit, vec_reg, mta_reg;
    434   1.1   msaitoh 
    435   1.1   msaitoh 	entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
    436   1.4  riastrad 	entries = uimin(entries, IXGBE_MAX_VF_MC);
    437   1.1   msaitoh 
    438   1.1   msaitoh 	vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
    439   1.1   msaitoh 
    440   1.1   msaitoh 	vf->num_mc_hashes = entries;
    441   1.1   msaitoh 
    442   1.1   msaitoh 	/* Set the appropriate MTA bit */
    443   1.1   msaitoh 	for (int i = 0; i < entries; i++) {
    444   1.1   msaitoh 		vf->mc_hash[i] = list[i];
    445   1.1   msaitoh 		vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
    446   1.1   msaitoh 		vec_bit = vf->mc_hash[i] & 0x1F;
    447   1.1   msaitoh 		mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
    448   1.1   msaitoh 		mta_reg |= (1 << vec_bit);
    449   1.1   msaitoh 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
    450   1.1   msaitoh 	}
    451   1.1   msaitoh 
    452   1.1   msaitoh 	vmolr |= IXGBE_VMOLR_ROMPE;
    453   1.1   msaitoh 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
    454   1.1   msaitoh 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
    455   1.1   msaitoh } /* ixgbe_vf_set_mc_addr */
    456   1.1   msaitoh 
    457   1.1   msaitoh 
    458   1.1   msaitoh static void
    459   1.1   msaitoh ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    460   1.1   msaitoh {
    461   1.1   msaitoh 	struct ixgbe_hw *hw;
    462   1.1   msaitoh 	int enable;
    463   1.1   msaitoh 	uint16_t tag;
    464   1.1   msaitoh 
    465   1.1   msaitoh 	hw = &adapter->hw;
    466   1.1   msaitoh 	enable = IXGBE_VT_MSGINFO(msg[0]);
    467   1.1   msaitoh 	tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
    468   1.1   msaitoh 
    469   1.1   msaitoh 	if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
    470   1.1   msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    471   1.1   msaitoh 		return;
    472   1.1   msaitoh 	}
    473   1.1   msaitoh 
    474   1.1   msaitoh 	/* It is illegal to enable vlan tag 0. */
    475   1.2   msaitoh 	if (tag == 0 && enable != 0) {
    476   1.1   msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    477   1.1   msaitoh 		return;
    478   1.1   msaitoh 	}
    479   1.1   msaitoh 
    480   1.1   msaitoh 	ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
    481   1.1   msaitoh 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
    482   1.1   msaitoh } /* ixgbe_vf_set_vlan */
    483   1.1   msaitoh 
    484   1.1   msaitoh 
    485   1.1   msaitoh static void
    486   1.1   msaitoh ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    487   1.1   msaitoh {
    488   1.1   msaitoh 	struct ixgbe_hw *hw;
    489   1.1   msaitoh 	uint32_t vf_max_size, pf_max_size, mhadd;
    490   1.1   msaitoh 
    491   1.1   msaitoh 	hw = &adapter->hw;
    492   1.1   msaitoh 	vf_max_size = msg[1];
    493   1.1   msaitoh 
    494   1.1   msaitoh 	if (vf_max_size < ETHER_CRC_LEN) {
    495   1.1   msaitoh 		/* We intentionally ACK invalid LPE requests. */
    496   1.1   msaitoh 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
    497   1.1   msaitoh 		return;
    498   1.1   msaitoh 	}
    499   1.1   msaitoh 
    500   1.1   msaitoh 	vf_max_size -= ETHER_CRC_LEN;
    501   1.1   msaitoh 
    502   1.1   msaitoh 	if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
    503   1.1   msaitoh 		/* We intentionally ACK invalid LPE requests. */
    504   1.1   msaitoh 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
    505   1.1   msaitoh 		return;
    506   1.1   msaitoh 	}
    507   1.1   msaitoh 
    508   1.1   msaitoh 	vf->max_frame_size = vf_max_size;
    509   1.1   msaitoh 	ixgbe_update_max_frame(adapter, vf->max_frame_size);
    510   1.1   msaitoh 
    511   1.1   msaitoh 	/*
    512   1.1   msaitoh 	 * We might have to disable reception to this VF if the frame size is
    513   1.1   msaitoh 	 * not compatible with the config on the PF.
    514   1.1   msaitoh 	 */
    515   1.1   msaitoh 	ixgbe_vf_enable_receive(adapter, vf);
    516   1.1   msaitoh 
    517   1.1   msaitoh 	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
    518   1.1   msaitoh 	pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
    519   1.1   msaitoh 
    520   1.1   msaitoh 	if (pf_max_size < adapter->max_frame_size) {
    521   1.1   msaitoh 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
    522   1.1   msaitoh 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
    523   1.1   msaitoh 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
    524   1.1   msaitoh 	}
    525   1.1   msaitoh 
    526   1.1   msaitoh 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
    527   1.1   msaitoh } /* ixgbe_vf_set_lpe */
    528   1.1   msaitoh 
    529   1.1   msaitoh 
    530   1.1   msaitoh static void
    531   1.1   msaitoh ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
    532   1.1   msaitoh                      uint32_t *msg)
    533   1.1   msaitoh {
    534   1.1   msaitoh 	//XXX implement this
    535   1.1   msaitoh 	ixgbe_send_vf_nack(adapter, vf, msg[0]);
    536   1.1   msaitoh } /* ixgbe_vf_set_macvlan */
    537   1.1   msaitoh 
    538   1.1   msaitoh 
    539   1.1   msaitoh static void
    540   1.1   msaitoh ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
    541   1.1   msaitoh     uint32_t *msg)
    542   1.1   msaitoh {
    543   1.1   msaitoh 
    544   1.1   msaitoh 	switch (msg[1]) {
    545   1.1   msaitoh 	case IXGBE_API_VER_1_0:
    546   1.1   msaitoh 	case IXGBE_API_VER_1_1:
    547   1.1   msaitoh 		vf->api_ver = msg[1];
    548   1.1   msaitoh 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
    549   1.1   msaitoh 		break;
    550   1.1   msaitoh 	default:
    551   1.1   msaitoh 		vf->api_ver = IXGBE_API_VER_UNKNOWN;
    552   1.1   msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    553   1.1   msaitoh 		break;
    554   1.1   msaitoh 	}
    555   1.1   msaitoh } /* ixgbe_vf_api_negotiate */
    556   1.1   msaitoh 
    557   1.1   msaitoh 
    558   1.1   msaitoh static void
    559   1.1   msaitoh ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    560   1.1   msaitoh {
    561   1.1   msaitoh 	struct ixgbe_hw *hw;
    562   1.1   msaitoh 	uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
    563   1.1   msaitoh 	int num_queues;
    564   1.1   msaitoh 
    565   1.1   msaitoh 	hw = &adapter->hw;
    566   1.1   msaitoh 
    567   1.1   msaitoh 	/* GET_QUEUES is not supported on pre-1.1 APIs. */
    568   1.1   msaitoh 	switch (msg[0]) {
    569   1.1   msaitoh 	case IXGBE_API_VER_1_0:
    570   1.1   msaitoh 	case IXGBE_API_VER_UNKNOWN:
    571   1.1   msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    572   1.1   msaitoh 		return;
    573   1.1   msaitoh 	}
    574   1.1   msaitoh 
    575  1.15   msaitoh 	resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_SUCCESS |
    576   1.1   msaitoh 	    IXGBE_VT_MSGTYPE_CTS;
    577   1.1   msaitoh 
    578   1.1   msaitoh 	num_queues = ixgbe_vf_queues(adapter->iov_mode);
    579   1.1   msaitoh 	resp[IXGBE_VF_TX_QUEUES] = num_queues;
    580   1.1   msaitoh 	resp[IXGBE_VF_RX_QUEUES] = num_queues;
    581   1.1   msaitoh 	resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
    582   1.1   msaitoh 	resp[IXGBE_VF_DEF_QUEUE] = 0;
    583   1.1   msaitoh 
    584   1.1   msaitoh 	hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
    585   1.1   msaitoh } /* ixgbe_vf_get_queues */
    586   1.1   msaitoh 
    587   1.1   msaitoh 
    588   1.1   msaitoh static void
    589   1.1   msaitoh ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
    590   1.1   msaitoh {
    591   1.1   msaitoh 	struct ixgbe_hw *hw;
    592   1.1   msaitoh 	uint32_t msg[IXGBE_VFMAILBOX_SIZE];
    593   1.1   msaitoh 	int error;
    594   1.1   msaitoh 
    595   1.1   msaitoh 	hw = &adapter->hw;
    596   1.1   msaitoh 
    597   1.1   msaitoh 	error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
    598   1.1   msaitoh 
    599   1.1   msaitoh 	if (error != 0)
    600   1.1   msaitoh 		return;
    601   1.1   msaitoh 
    602   1.2   msaitoh 	CTR3(KTR_MALLOC, "%s: received msg %x from %d", adapter->ifp->if_xname,
    603   1.2   msaitoh 	    msg[0], vf->pool);
    604   1.1   msaitoh 	if (msg[0] == IXGBE_VF_RESET) {
    605   1.1   msaitoh 		ixgbe_vf_reset_msg(adapter, vf, msg);
    606   1.1   msaitoh 		return;
    607   1.1   msaitoh 	}
    608   1.1   msaitoh 
    609   1.1   msaitoh 	if (!(vf->flags & IXGBE_VF_CTS)) {
    610   1.1   msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    611   1.1   msaitoh 		return;
    612   1.1   msaitoh 	}
    613   1.1   msaitoh 
    614   1.1   msaitoh 	switch (msg[0] & IXGBE_VT_MSG_MASK) {
    615   1.1   msaitoh 	case IXGBE_VF_SET_MAC_ADDR:
    616   1.1   msaitoh 		ixgbe_vf_set_mac(adapter, vf, msg);
    617   1.1   msaitoh 		break;
    618   1.1   msaitoh 	case IXGBE_VF_SET_MULTICAST:
    619   1.1   msaitoh 		ixgbe_vf_set_mc_addr(adapter, vf, msg);
    620   1.1   msaitoh 		break;
    621   1.1   msaitoh 	case IXGBE_VF_SET_VLAN:
    622   1.1   msaitoh 		ixgbe_vf_set_vlan(adapter, vf, msg);
    623   1.1   msaitoh 		break;
    624   1.1   msaitoh 	case IXGBE_VF_SET_LPE:
    625   1.1   msaitoh 		ixgbe_vf_set_lpe(adapter, vf, msg);
    626   1.1   msaitoh 		break;
    627   1.1   msaitoh 	case IXGBE_VF_SET_MACVLAN:
    628   1.1   msaitoh 		ixgbe_vf_set_macvlan(adapter, vf, msg);
    629   1.1   msaitoh 		break;
    630   1.1   msaitoh 	case IXGBE_VF_API_NEGOTIATE:
    631   1.1   msaitoh 		ixgbe_vf_api_negotiate(adapter, vf, msg);
    632   1.1   msaitoh 		break;
    633   1.1   msaitoh 	case IXGBE_VF_GET_QUEUES:
    634   1.1   msaitoh 		ixgbe_vf_get_queues(adapter, vf, msg);
    635   1.1   msaitoh 		break;
    636   1.1   msaitoh 	default:
    637   1.1   msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    638   1.1   msaitoh 	}
    639   1.1   msaitoh } /* ixgbe_process_vf_msg */
    640   1.1   msaitoh 
    641   1.1   msaitoh 
    642   1.1   msaitoh /* Tasklet for handling VF -> PF mailbox messages */
    643   1.1   msaitoh void
    644  1.12   msaitoh ixgbe_handle_mbx(void *context)
    645   1.1   msaitoh {
    646   1.2   msaitoh 	struct adapter *adapter = context;
    647   1.1   msaitoh 	struct ixgbe_hw *hw;
    648   1.1   msaitoh 	struct ixgbe_vf *vf;
    649   1.1   msaitoh 	int i;
    650   1.1   msaitoh 
    651   1.8   msaitoh 	KASSERT(mutex_owned(&adapter->core_mtx));
    652   1.8   msaitoh 
    653   1.1   msaitoh 	hw = &adapter->hw;
    654   1.1   msaitoh 
    655   1.1   msaitoh 	for (i = 0; i < adapter->num_vfs; i++) {
    656   1.1   msaitoh 		vf = &adapter->vfs[i];
    657   1.1   msaitoh 
    658   1.1   msaitoh 		if (vf->flags & IXGBE_VF_ACTIVE) {
    659   1.1   msaitoh 			if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0)
    660   1.1   msaitoh 				ixgbe_process_vf_reset(adapter, vf);
    661   1.1   msaitoh 
    662   1.1   msaitoh 			if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
    663   1.1   msaitoh 				ixgbe_process_vf_msg(adapter, vf);
    664   1.1   msaitoh 
    665   1.1   msaitoh 			if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
    666   1.1   msaitoh 				ixgbe_process_vf_ack(adapter, vf);
    667   1.1   msaitoh 		}
    668   1.1   msaitoh 	}
    669   1.1   msaitoh } /* ixgbe_handle_mbx */
    670   1.1   msaitoh 
    671   1.1   msaitoh int
    672   1.1   msaitoh ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
    673   1.1   msaitoh {
    674   1.1   msaitoh 	struct adapter *adapter;
    675   1.1   msaitoh 	int retval = 0;
    676   1.1   msaitoh 
    677   1.1   msaitoh 	adapter = device_get_softc(dev);
    678   1.1   msaitoh 	adapter->iov_mode = IXGBE_NO_VM;
    679   1.1   msaitoh 
    680   1.1   msaitoh 	if (num_vfs == 0) {
    681   1.1   msaitoh 		/* Would we ever get num_vfs = 0? */
    682   1.1   msaitoh 		retval = EINVAL;
    683   1.1   msaitoh 		goto err_init_iov;
    684   1.1   msaitoh 	}
    685   1.1   msaitoh 
    686   1.1   msaitoh 	/*
    687   1.1   msaitoh 	 * We've got to reserve a VM's worth of queues for the PF,
    688   1.1   msaitoh 	 * thus we go into "64 VF mode" if 32+ VFs are requested.
    689   1.1   msaitoh 	 * With 64 VFs, you can only have two queues per VF.
    690   1.1   msaitoh 	 * With 32 VFs, you can have up to four queues per VF.
    691   1.1   msaitoh 	 */
    692   1.1   msaitoh 	if (num_vfs >= IXGBE_32_VM)
    693   1.1   msaitoh 		adapter->iov_mode = IXGBE_64_VM;
    694   1.1   msaitoh 	else
    695   1.1   msaitoh 		adapter->iov_mode = IXGBE_32_VM;
    696   1.1   msaitoh 
    697   1.1   msaitoh 	/* Again, reserving 1 VM's worth of queues for the PF */
    698   1.1   msaitoh 	adapter->pool = adapter->iov_mode - 1;
    699   1.1   msaitoh 
    700   1.1   msaitoh 	if ((num_vfs > adapter->pool) || (num_vfs >= IXGBE_64_VM)) {
    701   1.1   msaitoh 		retval = ENOSPC;
    702   1.1   msaitoh 		goto err_init_iov;
    703   1.1   msaitoh 	}
    704   1.1   msaitoh 
    705   1.1   msaitoh 	IXGBE_CORE_LOCK(adapter);
    706   1.1   msaitoh 
    707   1.1   msaitoh 	adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV,
    708   1.1   msaitoh 	    M_NOWAIT | M_ZERO);
    709   1.1   msaitoh 
    710   1.1   msaitoh 	if (adapter->vfs == NULL) {
    711   1.1   msaitoh 		retval = ENOMEM;
    712   1.1   msaitoh 		IXGBE_CORE_UNLOCK(adapter);
    713   1.1   msaitoh 		goto err_init_iov;
    714   1.1   msaitoh 	}
    715   1.1   msaitoh 
    716   1.1   msaitoh 	adapter->num_vfs = num_vfs;
    717  1.14   msaitoh 	ixgbe_init_mbx_params_pf(&adapter->hw);
    718   1.2   msaitoh 
    719   1.2   msaitoh 	/* set the SRIOV flag now as it's needed
    720   1.2   msaitoh 	 * by ixgbe_init_locked() */
    721   1.2   msaitoh 	adapter->feat_en |= IXGBE_FEATURE_SRIOV;
    722   1.1   msaitoh 	adapter->init_locked(adapter);
    723   1.1   msaitoh 
    724   1.1   msaitoh 	IXGBE_CORE_UNLOCK(adapter);
    725   1.1   msaitoh 
    726   1.2   msaitoh 	return (retval);
    727   1.1   msaitoh 
    728   1.1   msaitoh err_init_iov:
    729   1.1   msaitoh 	adapter->num_vfs = 0;
    730   1.1   msaitoh 	adapter->pool = 0;
    731   1.1   msaitoh 	adapter->iov_mode = IXGBE_NO_VM;
    732   1.1   msaitoh 
    733   1.2   msaitoh 	return (retval);
    734   1.1   msaitoh } /* ixgbe_init_iov */
    735   1.1   msaitoh 
    736   1.1   msaitoh void
    737   1.1   msaitoh ixgbe_uninit_iov(device_t dev)
    738   1.1   msaitoh {
    739   1.1   msaitoh 	struct ixgbe_hw *hw;
    740   1.1   msaitoh 	struct adapter *adapter;
    741   1.1   msaitoh 	uint32_t pf_reg, vf_reg;
    742   1.1   msaitoh 
    743   1.1   msaitoh 	adapter = device_get_softc(dev);
    744   1.1   msaitoh 	hw = &adapter->hw;
    745   1.1   msaitoh 
    746   1.1   msaitoh 	IXGBE_CORE_LOCK(adapter);
    747   1.1   msaitoh 
    748   1.1   msaitoh 	/* Enable rx/tx for the PF and disable it for all VFs. */
    749   1.1   msaitoh 	pf_reg = IXGBE_VF_INDEX(adapter->pool);
    750   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool));
    751   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool));
    752   1.1   msaitoh 
    753   1.1   msaitoh 	if (pf_reg == 0)
    754   1.1   msaitoh 		vf_reg = 1;
    755   1.1   msaitoh 	else
    756   1.1   msaitoh 		vf_reg = 0;
    757   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
    758   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
    759   1.1   msaitoh 
    760   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
    761   1.1   msaitoh 
    762   1.1   msaitoh 	free(adapter->vfs, M_IXGBE_SRIOV);
    763   1.1   msaitoh 	adapter->vfs = NULL;
    764   1.1   msaitoh 	adapter->num_vfs = 0;
    765   1.1   msaitoh 	adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
    766   1.1   msaitoh 
    767   1.1   msaitoh 	IXGBE_CORE_UNLOCK(adapter);
    768   1.1   msaitoh } /* ixgbe_uninit_iov */
    769   1.1   msaitoh 
    770   1.1   msaitoh static void
    771   1.1   msaitoh ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
    772   1.1   msaitoh {
    773   1.1   msaitoh 	struct ixgbe_hw *hw;
    774   1.1   msaitoh 	uint32_t vf_index, pfmbimr;
    775   1.1   msaitoh 
    776   1.1   msaitoh 	IXGBE_CORE_LOCK_ASSERT(adapter);
    777   1.1   msaitoh 
    778   1.1   msaitoh 	hw = &adapter->hw;
    779   1.1   msaitoh 
    780   1.1   msaitoh 	if (!(vf->flags & IXGBE_VF_ACTIVE))
    781   1.1   msaitoh 		return;
    782   1.1   msaitoh 
    783   1.1   msaitoh 	vf_index = IXGBE_VF_INDEX(vf->pool);
    784   1.1   msaitoh 	pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
    785   1.1   msaitoh 	pfmbimr |= IXGBE_VF_BIT(vf->pool);
    786   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
    787   1.1   msaitoh 
    788   1.1   msaitoh 	ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
    789   1.1   msaitoh 
    790   1.1   msaitoh 	// XXX multicast addresses
    791   1.1   msaitoh 
    792   1.1   msaitoh 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
    793   1.1   msaitoh 		ixgbe_set_rar(&adapter->hw, vf->rar_index,
    794   1.1   msaitoh 		    vf->ether_addr, vf->pool, TRUE);
    795   1.1   msaitoh 	}
    796   1.1   msaitoh 
    797   1.1   msaitoh 	ixgbe_vf_enable_transmit(adapter, vf);
    798   1.1   msaitoh 	ixgbe_vf_enable_receive(adapter, vf);
    799   1.1   msaitoh 
    800  1.16   msaitoh 	ixgbe_send_vf_msg(&adapter->hw, vf, IXGBE_PF_CONTROL_MSG);
    801   1.1   msaitoh } /* ixgbe_init_vf */
    802   1.1   msaitoh 
    803   1.1   msaitoh void
    804   1.1   msaitoh ixgbe_initialize_iov(struct adapter *adapter)
    805   1.1   msaitoh {
    806   1.1   msaitoh 	struct ixgbe_hw *hw = &adapter->hw;
    807   1.1   msaitoh 	uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
    808   1.1   msaitoh 	int i;
    809   1.1   msaitoh 
    810   1.1   msaitoh 	if (adapter->iov_mode == IXGBE_NO_VM)
    811   1.1   msaitoh 		return;
    812   1.1   msaitoh 
    813   1.1   msaitoh 	IXGBE_CORE_LOCK_ASSERT(adapter);
    814   1.1   msaitoh 
    815   1.1   msaitoh 	/* RMW appropriate registers based on IOV mode */
    816   1.1   msaitoh 	/* Read... */
    817   1.1   msaitoh 	mrqc    = IXGBE_READ_REG(hw, IXGBE_MRQC);
    818   1.1   msaitoh 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
    819   1.1   msaitoh 	gpie    = IXGBE_READ_REG(hw, IXGBE_GPIE);
    820   1.1   msaitoh 	/* Modify... */
    821   1.1   msaitoh 	mrqc    &= ~IXGBE_MRQC_MRQE_MASK;
    822   1.1   msaitoh 	mtqc     =  IXGBE_MTQC_VT_ENA;      /* No initial MTQC read needed */
    823   1.1   msaitoh 	gcr_ext |=  IXGBE_GCR_EXT_MSIX_EN;
    824   1.1   msaitoh 	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
    825   1.1   msaitoh 	gpie    &= ~IXGBE_GPIE_VTMODE_MASK;
    826   1.1   msaitoh 	switch (adapter->iov_mode) {
    827   1.1   msaitoh 	case IXGBE_64_VM:
    828   1.1   msaitoh 		mrqc    |= IXGBE_MRQC_VMDQRSS64EN;
    829   1.1   msaitoh 		mtqc    |= IXGBE_MTQC_64VF;
    830   1.1   msaitoh 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
    831   1.1   msaitoh 		gpie    |= IXGBE_GPIE_VTMODE_64;
    832   1.1   msaitoh 		break;
    833   1.1   msaitoh 	case IXGBE_32_VM:
    834   1.1   msaitoh 		mrqc    |= IXGBE_MRQC_VMDQRSS32EN;
    835   1.1   msaitoh 		mtqc    |= IXGBE_MTQC_32VF;
    836   1.1   msaitoh 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
    837   1.1   msaitoh 		gpie    |= IXGBE_GPIE_VTMODE_32;
    838   1.1   msaitoh 		break;
    839   1.1   msaitoh 	default:
    840   1.1   msaitoh 		panic("Unexpected SR-IOV mode %d", adapter->iov_mode);
    841   1.1   msaitoh 	}
    842   1.1   msaitoh 	/* Write... */
    843   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    844   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
    845   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
    846   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
    847   1.1   msaitoh 
    848   1.1   msaitoh 	/* Enable rx/tx for the PF. */
    849   1.1   msaitoh 	vf_reg = IXGBE_VF_INDEX(adapter->pool);
    850   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool));
    851   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool));
    852   1.1   msaitoh 
    853   1.1   msaitoh 	/* Allow VM-to-VM communication. */
    854   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
    855   1.1   msaitoh 
    856   1.1   msaitoh 	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
    857   1.1   msaitoh 	vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
    858   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
    859   1.1   msaitoh 
    860   1.1   msaitoh 	for (i = 0; i < adapter->num_vfs; i++)
    861   1.1   msaitoh 		ixgbe_init_vf(adapter, &adapter->vfs[i]);
    862   1.1   msaitoh } /* ixgbe_initialize_iov */
    863   1.1   msaitoh 
    864   1.1   msaitoh 
    865   1.1   msaitoh /* Check the max frame setting of all active VF's */
    866   1.1   msaitoh void
    867   1.1   msaitoh ixgbe_recalculate_max_frame(struct adapter *adapter)
    868   1.1   msaitoh {
    869   1.1   msaitoh 	struct ixgbe_vf *vf;
    870   1.1   msaitoh 
    871   1.1   msaitoh 	IXGBE_CORE_LOCK_ASSERT(adapter);
    872   1.1   msaitoh 
    873   1.1   msaitoh 	for (int i = 0; i < adapter->num_vfs; i++) {
    874   1.1   msaitoh 		vf = &adapter->vfs[i];
    875   1.1   msaitoh 		if (vf->flags & IXGBE_VF_ACTIVE)
    876   1.1   msaitoh 			ixgbe_update_max_frame(adapter, vf->max_frame_size);
    877   1.1   msaitoh 	}
    878   1.1   msaitoh } /* ixgbe_recalculate_max_frame */
    879   1.1   msaitoh 
    880   1.1   msaitoh int
    881   1.1   msaitoh ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
    882   1.1   msaitoh {
    883   1.1   msaitoh 	struct adapter *adapter;
    884   1.1   msaitoh 	struct ixgbe_vf *vf;
    885   1.1   msaitoh 	const void *mac;
    886   1.1   msaitoh 
    887   1.1   msaitoh 	adapter = device_get_softc(dev);
    888   1.1   msaitoh 
    889   1.1   msaitoh 	KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
    890   1.1   msaitoh 	    vfnum, adapter->num_vfs));
    891   1.1   msaitoh 
    892   1.1   msaitoh 	IXGBE_CORE_LOCK(adapter);
    893   1.1   msaitoh 	vf = &adapter->vfs[vfnum];
    894   1.1   msaitoh 	vf->pool= vfnum;
    895   1.1   msaitoh 
    896   1.1   msaitoh 	/* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
    897   1.1   msaitoh 	vf->rar_index = vfnum + 1;
    898   1.1   msaitoh 	vf->default_vlan = 0;
    899   1.1   msaitoh 	vf->max_frame_size = ETHER_MAX_LEN;
    900   1.1   msaitoh 	ixgbe_update_max_frame(adapter, vf->max_frame_size);
    901   1.1   msaitoh 
    902   1.1   msaitoh 	if (nvlist_exists_binary(config, "mac-addr")) {
    903   1.1   msaitoh 		mac = nvlist_get_binary(config, "mac-addr", NULL);
    904   1.1   msaitoh 		bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
    905   1.1   msaitoh 		if (nvlist_get_bool(config, "allow-set-mac"))
    906   1.1   msaitoh 			vf->flags |= IXGBE_VF_CAP_MAC;
    907   1.1   msaitoh 	} else
    908   1.1   msaitoh 		/*
    909   1.1   msaitoh 		 * If the administrator has not specified a MAC address then
    910   1.1   msaitoh 		 * we must allow the VF to choose one.
    911   1.1   msaitoh 		 */
    912   1.1   msaitoh 		vf->flags |= IXGBE_VF_CAP_MAC;
    913   1.1   msaitoh 
    914   1.1   msaitoh 	vf->flags |= IXGBE_VF_ACTIVE;
    915   1.1   msaitoh 
    916   1.1   msaitoh 	ixgbe_init_vf(adapter, vf);
    917   1.1   msaitoh 	IXGBE_CORE_UNLOCK(adapter);
    918   1.1   msaitoh 
    919   1.1   msaitoh 	return (0);
    920   1.1   msaitoh } /* ixgbe_add_vf */
    921   1.1   msaitoh 
    922   1.1   msaitoh #else
    923   1.1   msaitoh 
    924   1.1   msaitoh void
    925  1.12   msaitoh ixgbe_handle_mbx(void *context)
    926   1.1   msaitoh {
    927  1.12   msaitoh 	UNREFERENCED_1PARAMETER(context);
    928   1.1   msaitoh } /* ixgbe_handle_mbx */
    929   1.1   msaitoh 
    930   1.1   msaitoh inline int
    931   1.1   msaitoh ixgbe_vf_que_index(int mode, int vfnum, int num)
    932   1.1   msaitoh {
    933   1.1   msaitoh 	UNREFERENCED_2PARAMETER(mode, vfnum);
    934   1.1   msaitoh 
    935   1.1   msaitoh 	return num;
    936   1.1   msaitoh } /* ixgbe_vf_que_index */
    937   1.1   msaitoh 
    938   1.1   msaitoh #endif
    939