Home | History | Annotate | Line # | Download | only in ixgbe
if_sriov.c revision 1.11
      1  1.11   msaitoh /* $NetBSD: if_sriov.c,v 1.11 2021/04/30 06:55:32 msaitoh Exp $ */
      2   1.1   msaitoh /******************************************************************************
      3   1.1   msaitoh 
      4   1.1   msaitoh   Copyright (c) 2001-2017, Intel Corporation
      5   1.1   msaitoh   All rights reserved.
      6   1.1   msaitoh 
      7   1.1   msaitoh   Redistribution and use in source and binary forms, with or without
      8   1.1   msaitoh   modification, are permitted provided that the following conditions are met:
      9   1.1   msaitoh 
     10   1.1   msaitoh    1. Redistributions of source code must retain the above copyright notice,
     11   1.1   msaitoh       this list of conditions and the following disclaimer.
     12   1.1   msaitoh 
     13   1.1   msaitoh    2. Redistributions in binary form must reproduce the above copyright
     14   1.1   msaitoh       notice, this list of conditions and the following disclaimer in the
     15   1.1   msaitoh       documentation and/or other materials provided with the distribution.
     16   1.1   msaitoh 
     17   1.1   msaitoh    3. Neither the name of the Intel Corporation nor the names of its
     18   1.1   msaitoh       contributors may be used to endorse or promote products derived from
     19   1.1   msaitoh       this software without specific prior written permission.
     20   1.1   msaitoh 
     21   1.1   msaitoh   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     22   1.1   msaitoh   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     23   1.1   msaitoh   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     24   1.1   msaitoh   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     25   1.1   msaitoh   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     26   1.1   msaitoh   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     27   1.1   msaitoh   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     28   1.1   msaitoh   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     29   1.1   msaitoh   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     30   1.1   msaitoh   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     31   1.1   msaitoh   POSSIBILITY OF SUCH DAMAGE.
     32   1.1   msaitoh 
     33   1.1   msaitoh ******************************************************************************/
     34   1.3   msaitoh /*$FreeBSD: head/sys/dev/ixgbe/if_sriov.c 327031 2017-12-20 18:15:06Z erj $*/
     35   1.1   msaitoh 
     36  1.11   msaitoh #include <sys/cdefs.h>
     37  1.11   msaitoh __KERNEL_RCSID(0, "$NetBSD: if_sriov.c,v 1.11 2021/04/30 06:55:32 msaitoh Exp $");
     38  1.11   msaitoh 
     39   1.1   msaitoh #include "ixgbe.h"
     40   1.2   msaitoh #include "ixgbe_sriov.h"
     41   1.1   msaitoh 
     42   1.1   msaitoh #ifdef PCI_IOV
     43   1.1   msaitoh 
     44   1.1   msaitoh MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
     45   1.1   msaitoh 
     46   1.1   msaitoh /************************************************************************
     47   1.1   msaitoh  * ixgbe_pci_iov_detach
     48   1.1   msaitoh  ************************************************************************/
     49   1.1   msaitoh int
     50   1.1   msaitoh ixgbe_pci_iov_detach(device_t dev)
     51   1.1   msaitoh {
     52   1.1   msaitoh 	return pci_iov_detach(dev);
     53   1.1   msaitoh }
     54   1.1   msaitoh 
     55   1.1   msaitoh /************************************************************************
     56   1.1   msaitoh  * ixgbe_define_iov_schemas
     57   1.1   msaitoh  ************************************************************************/
     58   1.1   msaitoh void
     59   1.1   msaitoh ixgbe_define_iov_schemas(device_t dev, int *error)
     60   1.1   msaitoh {
     61   1.1   msaitoh 	nvlist_t *pf_schema, *vf_schema;
     62   1.1   msaitoh 
     63   1.1   msaitoh 	pf_schema = pci_iov_schema_alloc_node();
     64   1.1   msaitoh 	vf_schema = pci_iov_schema_alloc_node();
     65   1.1   msaitoh 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
     66   1.1   msaitoh 	pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
     67   1.1   msaitoh 	    IOV_SCHEMA_HASDEFAULT, TRUE);
     68   1.1   msaitoh 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
     69   1.1   msaitoh 	    IOV_SCHEMA_HASDEFAULT, FALSE);
     70   1.1   msaitoh 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
     71   1.1   msaitoh 	    IOV_SCHEMA_HASDEFAULT, FALSE);
     72   1.1   msaitoh 	*error = pci_iov_attach(dev, pf_schema, vf_schema);
     73   1.1   msaitoh 	if (*error != 0) {
     74   1.1   msaitoh 		device_printf(dev,
     75   1.1   msaitoh 		    "Error %d setting up SR-IOV\n", *error);
     76   1.1   msaitoh 	}
     77   1.1   msaitoh } /* ixgbe_define_iov_schemas */
     78   1.1   msaitoh 
     79   1.1   msaitoh /************************************************************************
     80   1.1   msaitoh  * ixgbe_align_all_queue_indices
     81   1.1   msaitoh  ************************************************************************/
     82   1.1   msaitoh inline void
     83   1.1   msaitoh ixgbe_align_all_queue_indices(struct adapter *adapter)
     84   1.1   msaitoh {
     85   1.1   msaitoh 	int i;
     86   1.1   msaitoh 	int index;
     87   1.1   msaitoh 
     88   1.1   msaitoh 	for (i = 0; i < adapter->num_queues; i++) {
     89   1.1   msaitoh 		index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
     90   1.1   msaitoh 		adapter->rx_rings[i].me = index;
     91   1.1   msaitoh 		adapter->tx_rings[i].me = index;
     92   1.1   msaitoh 	}
     93   1.1   msaitoh }
     94   1.1   msaitoh 
     95   1.1   msaitoh /* Support functions for SR-IOV/VF management */
     96   1.1   msaitoh static inline void
     97   1.3   msaitoh ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
     98   1.1   msaitoh {
     99   1.1   msaitoh 	if (vf->flags & IXGBE_VF_CTS)
    100   1.1   msaitoh 		msg |= IXGBE_VT_MSGTYPE_CTS;
    101   1.1   msaitoh 
    102   1.3   msaitoh 	adapter->hw.mbx.ops.write(&adapter->hw, &msg, 1, vf->pool);
    103   1.1   msaitoh }
    104   1.1   msaitoh 
    105   1.1   msaitoh static inline void
    106   1.1   msaitoh ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
    107   1.1   msaitoh {
    108   1.1   msaitoh 	msg &= IXGBE_VT_MSG_MASK;
    109   1.3   msaitoh 	ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
    110   1.1   msaitoh }
    111   1.1   msaitoh 
    112   1.1   msaitoh static inline void
    113   1.1   msaitoh ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
    114   1.1   msaitoh {
    115   1.1   msaitoh 	msg &= IXGBE_VT_MSG_MASK;
    116   1.3   msaitoh 	ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
    117   1.1   msaitoh }
    118   1.1   msaitoh 
    119   1.1   msaitoh static inline void
    120   1.1   msaitoh ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
    121   1.1   msaitoh {
    122   1.1   msaitoh 	if (!(vf->flags & IXGBE_VF_CTS))
    123   1.1   msaitoh 		ixgbe_send_vf_nack(adapter, vf, 0);
    124   1.1   msaitoh }
    125   1.1   msaitoh 
    126   1.9       mrg static inline bool
    127   1.1   msaitoh ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
    128   1.1   msaitoh {
    129   1.1   msaitoh 	return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
    130   1.1   msaitoh }
    131   1.1   msaitoh 
    132   1.1   msaitoh static inline int
    133   1.1   msaitoh ixgbe_vf_queues(int mode)
    134   1.1   msaitoh {
    135   1.1   msaitoh 	switch (mode) {
    136   1.1   msaitoh 	case IXGBE_64_VM:
    137   1.1   msaitoh 		return (2);
    138   1.1   msaitoh 	case IXGBE_32_VM:
    139   1.1   msaitoh 		return (4);
    140   1.1   msaitoh 	case IXGBE_NO_VM:
    141   1.1   msaitoh 	default:
    142   1.1   msaitoh 		return (0);
    143   1.1   msaitoh 	}
    144   1.1   msaitoh }
    145   1.1   msaitoh 
    146   1.1   msaitoh inline int
    147   1.1   msaitoh ixgbe_vf_que_index(int mode, int vfnum, int num)
    148   1.1   msaitoh {
    149   1.1   msaitoh 	return ((vfnum * ixgbe_vf_queues(mode)) + num);
    150   1.1   msaitoh }
    151   1.1   msaitoh 
    152   1.1   msaitoh static inline void
    153   1.1   msaitoh ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
    154   1.1   msaitoh {
    155   1.1   msaitoh 	if (adapter->max_frame_size < max_frame)
    156   1.1   msaitoh 		adapter->max_frame_size = max_frame;
    157   1.1   msaitoh }
    158   1.1   msaitoh 
    159   1.1   msaitoh inline u32
    160   1.1   msaitoh ixgbe_get_mrqc(int iov_mode)
    161   1.1   msaitoh {
    162   1.1   msaitoh 	u32 mrqc;
    163   1.1   msaitoh 
    164   1.1   msaitoh 	switch (iov_mode) {
    165   1.1   msaitoh 	case IXGBE_64_VM:
    166   1.1   msaitoh 		mrqc = IXGBE_MRQC_VMDQRSS64EN;
    167   1.1   msaitoh 		break;
    168   1.1   msaitoh 	case IXGBE_32_VM:
    169   1.1   msaitoh 		mrqc = IXGBE_MRQC_VMDQRSS32EN;
    170   1.1   msaitoh 		break;
    171   1.1   msaitoh 	case IXGBE_NO_VM:
    172   1.1   msaitoh 		mrqc = 0;
    173   1.1   msaitoh 		break;
    174   1.1   msaitoh 	default:
    175   1.1   msaitoh 		panic("Unexpected SR-IOV mode %d", iov_mode);
    176   1.1   msaitoh 	}
    177   1.1   msaitoh 
    178   1.1   msaitoh 	return mrqc;
    179   1.1   msaitoh }
    180   1.1   msaitoh 
    181   1.1   msaitoh 
    182   1.1   msaitoh inline u32
    183   1.1   msaitoh ixgbe_get_mtqc(int iov_mode)
    184   1.1   msaitoh {
    185   1.1   msaitoh 	uint32_t mtqc;
    186   1.1   msaitoh 
    187   1.1   msaitoh 	switch (iov_mode) {
    188   1.1   msaitoh 	case IXGBE_64_VM:
    189   1.1   msaitoh 		mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
    190   1.1   msaitoh 		break;
    191   1.1   msaitoh 	case IXGBE_32_VM:
    192   1.1   msaitoh 		mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
    193   1.1   msaitoh 		break;
    194   1.1   msaitoh 	case IXGBE_NO_VM:
    195   1.1   msaitoh 		mtqc = IXGBE_MTQC_64Q_1PB;
    196   1.1   msaitoh 		break;
    197   1.1   msaitoh 	default:
    198   1.1   msaitoh 		panic("Unexpected SR-IOV mode %d", iov_mode);
    199   1.1   msaitoh 	}
    200   1.1   msaitoh 
    201   1.1   msaitoh 	return mtqc;
    202   1.1   msaitoh }
    203   1.1   msaitoh 
    204   1.1   msaitoh void
    205   1.1   msaitoh ixgbe_ping_all_vfs(struct adapter *adapter)
    206   1.1   msaitoh {
    207   1.1   msaitoh 	struct ixgbe_vf *vf;
    208   1.1   msaitoh 
    209   1.1   msaitoh 	for (int i = 0; i < adapter->num_vfs; i++) {
    210   1.1   msaitoh 		vf = &adapter->vfs[i];
    211   1.1   msaitoh 		if (vf->flags & IXGBE_VF_ACTIVE)
    212   1.3   msaitoh 			ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
    213   1.1   msaitoh 	}
    214   1.1   msaitoh } /* ixgbe_ping_all_vfs */
    215   1.1   msaitoh 
    216   1.1   msaitoh 
    217   1.1   msaitoh static void
    218   1.1   msaitoh ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
    219   1.1   msaitoh                           uint16_t tag)
    220   1.1   msaitoh {
    221   1.1   msaitoh 	struct ixgbe_hw *hw;
    222   1.1   msaitoh 	uint32_t vmolr, vmvir;
    223   1.1   msaitoh 
    224   1.1   msaitoh 	hw = &adapter->hw;
    225   1.1   msaitoh 
    226   1.1   msaitoh 	vf->vlan_tag = tag;
    227   1.1   msaitoh 
    228   1.1   msaitoh 	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
    229   1.1   msaitoh 
    230   1.1   msaitoh 	/* Do not receive packets that pass inexact filters. */
    231   1.1   msaitoh 	vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
    232   1.1   msaitoh 
    233   1.1   msaitoh 	/* Disable Multicast Promicuous Mode. */
    234   1.1   msaitoh 	vmolr &= ~IXGBE_VMOLR_MPE;
    235   1.1   msaitoh 
    236   1.1   msaitoh 	/* Accept broadcasts. */
    237   1.1   msaitoh 	vmolr |= IXGBE_VMOLR_BAM;
    238   1.1   msaitoh 
    239   1.1   msaitoh 	if (tag == 0) {
    240   1.1   msaitoh 		/* Accept non-vlan tagged traffic. */
    241   1.2   msaitoh 		vmolr |= IXGBE_VMOLR_AUPE;
    242   1.1   msaitoh 
    243   1.1   msaitoh 		/* Allow VM to tag outgoing traffic; no default tag. */
    244   1.1   msaitoh 		vmvir = 0;
    245   1.1   msaitoh 	} else {
    246   1.1   msaitoh 		/* Require vlan-tagged traffic. */
    247   1.1   msaitoh 		vmolr &= ~IXGBE_VMOLR_AUPE;
    248   1.1   msaitoh 
    249   1.1   msaitoh 		/* Tag all traffic with provided vlan tag. */
    250   1.1   msaitoh 		vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
    251   1.1   msaitoh 	}
    252   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
    253   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
    254   1.1   msaitoh } /* ixgbe_vf_set_default_vlan */
    255   1.1   msaitoh 
    256   1.1   msaitoh 
    257   1.5   msaitoh static void
    258   1.5   msaitoh ixgbe_clear_vfmbmem(struct ixgbe_hw *hw, struct ixgbe_vf *vf)
    259   1.5   msaitoh {
    260   1.5   msaitoh 	uint32_t vf_index = IXGBE_VF_INDEX(vf->pool);
    261   1.5   msaitoh 	uint16_t mbx_size = hw->mbx.size;
    262   1.5   msaitoh 	uint16_t i;
    263   1.5   msaitoh 
    264   1.5   msaitoh 	IXGBE_CORE_LOCK_ASSERT(adapter);
    265   1.5   msaitoh 
    266   1.5   msaitoh 	for (i = 0; i < mbx_size; ++i)
    267   1.5   msaitoh 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_index), i, 0x0);
    268   1.5   msaitoh } /* ixgbe_clear_vfmbmem */
    269   1.5   msaitoh 
    270   1.5   msaitoh 
    271   1.9       mrg static bool
    272   1.1   msaitoh ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
    273   1.1   msaitoh {
    274   1.1   msaitoh 
    275   1.1   msaitoh 	/*
    276   1.1   msaitoh 	 * Frame size compatibility between PF and VF is only a problem on
    277   1.1   msaitoh 	 * 82599-based cards.  X540 and later support any combination of jumbo
    278   1.1   msaitoh 	 * frames on PFs and VFs.
    279   1.1   msaitoh 	 */
    280   1.1   msaitoh 	if (adapter->hw.mac.type != ixgbe_mac_82599EB)
    281   1.1   msaitoh 		return (TRUE);
    282   1.1   msaitoh 
    283   1.1   msaitoh 	switch (vf->api_ver) {
    284   1.1   msaitoh 	case IXGBE_API_VER_1_0:
    285   1.1   msaitoh 	case IXGBE_API_VER_UNKNOWN:
    286   1.1   msaitoh 		/*
    287   1.1   msaitoh 		 * On legacy (1.0 and older) VF versions, we don't support jumbo
    288   1.1   msaitoh 		 * frames on either the PF or the VF.
    289   1.1   msaitoh 		 */
    290   1.1   msaitoh 		if (adapter->max_frame_size > ETHER_MAX_LEN ||
    291   1.1   msaitoh 		    vf->max_frame_size > ETHER_MAX_LEN)
    292   1.1   msaitoh 			return (FALSE);
    293   1.1   msaitoh 
    294   1.1   msaitoh 		return (TRUE);
    295   1.1   msaitoh 
    296   1.1   msaitoh 		break;
    297   1.1   msaitoh 	case IXGBE_API_VER_1_1:
    298   1.1   msaitoh 	default:
    299   1.1   msaitoh 		/*
    300   1.1   msaitoh 		 * 1.1 or later VF versions always work if they aren't using
    301   1.1   msaitoh 		 * jumbo frames.
    302   1.1   msaitoh 		 */
    303   1.1   msaitoh 		if (vf->max_frame_size <= ETHER_MAX_LEN)
    304   1.1   msaitoh 			return (TRUE);
    305   1.1   msaitoh 
    306   1.1   msaitoh 		/*
    307   1.1   msaitoh 		 * Jumbo frames only work with VFs if the PF is also using jumbo
    308   1.1   msaitoh 		 * frames.
    309   1.1   msaitoh 		 */
    310   1.1   msaitoh 		if (adapter->max_frame_size <= ETHER_MAX_LEN)
    311   1.1   msaitoh 			return (TRUE);
    312   1.1   msaitoh 
    313   1.1   msaitoh 		return (FALSE);
    314   1.1   msaitoh 	}
    315   1.1   msaitoh } /* ixgbe_vf_frame_size_compatible */
    316   1.1   msaitoh 
    317   1.1   msaitoh 
    318   1.1   msaitoh static void
    319   1.1   msaitoh ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
    320   1.1   msaitoh {
    321   1.1   msaitoh 	ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
    322   1.1   msaitoh 
    323   1.1   msaitoh 	// XXX clear multicast addresses
    324   1.1   msaitoh 
    325   1.1   msaitoh 	ixgbe_clear_rar(&adapter->hw, vf->rar_index);
    326   1.5   msaitoh 	ixgbe_clear_vfmbmem(&adapter->hw, vf);
    327   1.6   msaitoh 	ixgbe_toggle_txdctl(&adapter->hw, IXGBE_VF_INDEX(vf->pool));
    328   1.1   msaitoh 
    329   1.1   msaitoh 	vf->api_ver = IXGBE_API_VER_UNKNOWN;
    330   1.1   msaitoh } /* ixgbe_process_vf_reset */
    331   1.1   msaitoh 
    332   1.1   msaitoh 
    333   1.1   msaitoh static void
    334   1.1   msaitoh ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
    335   1.1   msaitoh {
    336   1.1   msaitoh 	struct ixgbe_hw *hw;
    337   1.1   msaitoh 	uint32_t vf_index, vfte;
    338   1.1   msaitoh 
    339   1.1   msaitoh 	hw = &adapter->hw;
    340   1.1   msaitoh 
    341   1.1   msaitoh 	vf_index = IXGBE_VF_INDEX(vf->pool);
    342   1.1   msaitoh 	vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
    343   1.1   msaitoh 	vfte |= IXGBE_VF_BIT(vf->pool);
    344   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
    345   1.1   msaitoh } /* ixgbe_vf_enable_transmit */
    346   1.1   msaitoh 
    347   1.1   msaitoh 
    348   1.1   msaitoh static void
    349   1.1   msaitoh ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
    350   1.1   msaitoh {
    351   1.1   msaitoh 	struct ixgbe_hw *hw;
    352   1.1   msaitoh 	uint32_t vf_index, vfre;
    353   1.1   msaitoh 
    354   1.1   msaitoh 	hw = &adapter->hw;
    355   1.1   msaitoh 
    356   1.1   msaitoh 	vf_index = IXGBE_VF_INDEX(vf->pool);
    357   1.1   msaitoh 	vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
    358   1.1   msaitoh 	if (ixgbe_vf_frame_size_compatible(adapter, vf))
    359   1.1   msaitoh 		vfre |= IXGBE_VF_BIT(vf->pool);
    360   1.1   msaitoh 	else
    361   1.1   msaitoh 		vfre &= ~IXGBE_VF_BIT(vf->pool);
    362   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
    363   1.1   msaitoh } /* ixgbe_vf_enable_receive */
    364   1.1   msaitoh 
    365   1.1   msaitoh 
    366   1.1   msaitoh static void
    367   1.1   msaitoh ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    368   1.1   msaitoh {
    369   1.1   msaitoh 	struct ixgbe_hw *hw;
    370   1.1   msaitoh 	uint32_t ack;
    371   1.1   msaitoh 	uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
    372   1.1   msaitoh 
    373   1.1   msaitoh 	hw = &adapter->hw;
    374   1.1   msaitoh 
    375   1.1   msaitoh 	ixgbe_process_vf_reset(adapter, vf);
    376   1.1   msaitoh 
    377   1.1   msaitoh 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
    378   1.1   msaitoh 		ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
    379   1.1   msaitoh 		    vf->pool, TRUE);
    380   1.1   msaitoh 		ack = IXGBE_VT_MSGTYPE_ACK;
    381   1.1   msaitoh 	} else
    382   1.1   msaitoh 		ack = IXGBE_VT_MSGTYPE_NACK;
    383   1.1   msaitoh 
    384   1.1   msaitoh 	ixgbe_vf_enable_transmit(adapter, vf);
    385   1.1   msaitoh 	ixgbe_vf_enable_receive(adapter, vf);
    386   1.1   msaitoh 
    387   1.1   msaitoh 	vf->flags |= IXGBE_VF_CTS;
    388   1.1   msaitoh 
    389   1.2   msaitoh 	resp[0] = IXGBE_VF_RESET | ack;
    390   1.1   msaitoh 	bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
    391   1.1   msaitoh 	resp[3] = hw->mac.mc_filter_type;
    392   1.1   msaitoh 	hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
    393   1.1   msaitoh } /* ixgbe_vf_reset_msg */
    394   1.1   msaitoh 
    395   1.1   msaitoh 
    396   1.1   msaitoh static void
    397   1.1   msaitoh ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    398   1.1   msaitoh {
    399   1.1   msaitoh 	uint8_t *mac;
    400   1.1   msaitoh 
    401   1.1   msaitoh 	mac = (uint8_t*)&msg[1];
    402   1.1   msaitoh 
    403   1.1   msaitoh 	/* Check that the VF has permission to change the MAC address. */
    404   1.1   msaitoh 	if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
    405   1.1   msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    406   1.1   msaitoh 		return;
    407   1.1   msaitoh 	}
    408   1.1   msaitoh 
    409   1.1   msaitoh 	if (ixgbe_validate_mac_addr(mac) != 0) {
    410   1.1   msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    411   1.1   msaitoh 		return;
    412   1.1   msaitoh 	}
    413   1.1   msaitoh 
    414   1.1   msaitoh 	bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
    415   1.1   msaitoh 
    416   1.1   msaitoh 	ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool,
    417   1.1   msaitoh 	    TRUE);
    418   1.1   msaitoh 
    419   1.1   msaitoh 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
    420   1.1   msaitoh } /* ixgbe_vf_set_mac */
    421   1.1   msaitoh 
    422   1.1   msaitoh 
    423   1.1   msaitoh /*
    424   1.1   msaitoh  * VF multicast addresses are set by using the appropriate bit in
    425   1.1   msaitoh  * 1 of 128 32 bit addresses (4096 possible).
    426   1.1   msaitoh  */
    427   1.1   msaitoh static void
    428   1.1   msaitoh ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
    429   1.1   msaitoh {
    430   1.1   msaitoh 	u16	*list = (u16*)&msg[1];
    431   1.1   msaitoh 	int	entries;
    432   1.1   msaitoh 	u32	vmolr, vec_bit, vec_reg, mta_reg;
    433   1.1   msaitoh 
    434   1.1   msaitoh 	entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
    435   1.4  riastrad 	entries = uimin(entries, IXGBE_MAX_VF_MC);
    436   1.1   msaitoh 
    437   1.1   msaitoh 	vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
    438   1.1   msaitoh 
    439   1.1   msaitoh 	vf->num_mc_hashes = entries;
    440   1.1   msaitoh 
    441   1.1   msaitoh 	/* Set the appropriate MTA bit */
    442   1.1   msaitoh 	for (int i = 0; i < entries; i++) {
    443   1.1   msaitoh 		vf->mc_hash[i] = list[i];
    444   1.1   msaitoh 		vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
    445   1.1   msaitoh 		vec_bit = vf->mc_hash[i] & 0x1F;
    446   1.1   msaitoh 		mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
    447   1.1   msaitoh 		mta_reg |= (1 << vec_bit);
    448   1.1   msaitoh 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
    449   1.1   msaitoh 	}
    450   1.1   msaitoh 
    451   1.1   msaitoh 	vmolr |= IXGBE_VMOLR_ROMPE;
    452   1.1   msaitoh 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
    453   1.1   msaitoh 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
    454   1.1   msaitoh } /* ixgbe_vf_set_mc_addr */
    455   1.1   msaitoh 
    456   1.1   msaitoh 
    457   1.1   msaitoh static void
    458   1.1   msaitoh ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    459   1.1   msaitoh {
    460   1.1   msaitoh 	struct ixgbe_hw *hw;
    461   1.1   msaitoh 	int enable;
    462   1.1   msaitoh 	uint16_t tag;
    463   1.1   msaitoh 
    464   1.1   msaitoh 	hw = &adapter->hw;
    465   1.1   msaitoh 	enable = IXGBE_VT_MSGINFO(msg[0]);
    466   1.1   msaitoh 	tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
    467   1.1   msaitoh 
    468   1.1   msaitoh 	if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
    469   1.1   msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    470   1.1   msaitoh 		return;
    471   1.1   msaitoh 	}
    472   1.1   msaitoh 
    473   1.1   msaitoh 	/* It is illegal to enable vlan tag 0. */
    474   1.2   msaitoh 	if (tag == 0 && enable != 0) {
    475   1.1   msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    476   1.1   msaitoh 		return;
    477   1.1   msaitoh 	}
    478   1.1   msaitoh 
    479   1.1   msaitoh 	ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
    480   1.1   msaitoh 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
    481   1.1   msaitoh } /* ixgbe_vf_set_vlan */
    482   1.1   msaitoh 
    483   1.1   msaitoh 
    484   1.1   msaitoh static void
    485   1.1   msaitoh ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    486   1.1   msaitoh {
    487   1.1   msaitoh 	struct ixgbe_hw *hw;
    488   1.1   msaitoh 	uint32_t vf_max_size, pf_max_size, mhadd;
    489   1.1   msaitoh 
    490   1.1   msaitoh 	hw = &adapter->hw;
    491   1.1   msaitoh 	vf_max_size = msg[1];
    492   1.1   msaitoh 
    493   1.1   msaitoh 	if (vf_max_size < ETHER_CRC_LEN) {
    494   1.1   msaitoh 		/* We intentionally ACK invalid LPE requests. */
    495   1.1   msaitoh 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
    496   1.1   msaitoh 		return;
    497   1.1   msaitoh 	}
    498   1.1   msaitoh 
    499   1.1   msaitoh 	vf_max_size -= ETHER_CRC_LEN;
    500   1.1   msaitoh 
    501   1.1   msaitoh 	if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
    502   1.1   msaitoh 		/* We intentionally ACK invalid LPE requests. */
    503   1.1   msaitoh 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
    504   1.1   msaitoh 		return;
    505   1.1   msaitoh 	}
    506   1.1   msaitoh 
    507   1.1   msaitoh 	vf->max_frame_size = vf_max_size;
    508   1.1   msaitoh 	ixgbe_update_max_frame(adapter, vf->max_frame_size);
    509   1.1   msaitoh 
    510   1.1   msaitoh 	/*
    511   1.1   msaitoh 	 * We might have to disable reception to this VF if the frame size is
    512   1.1   msaitoh 	 * not compatible with the config on the PF.
    513   1.1   msaitoh 	 */
    514   1.1   msaitoh 	ixgbe_vf_enable_receive(adapter, vf);
    515   1.1   msaitoh 
    516   1.1   msaitoh 	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
    517   1.1   msaitoh 	pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
    518   1.1   msaitoh 
    519   1.1   msaitoh 	if (pf_max_size < adapter->max_frame_size) {
    520   1.1   msaitoh 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
    521   1.1   msaitoh 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
    522   1.1   msaitoh 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
    523   1.1   msaitoh 	}
    524   1.1   msaitoh 
    525   1.1   msaitoh 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
    526   1.1   msaitoh } /* ixgbe_vf_set_lpe */
    527   1.1   msaitoh 
    528   1.1   msaitoh 
    529   1.1   msaitoh static void
    530   1.1   msaitoh ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
    531   1.1   msaitoh                      uint32_t *msg)
    532   1.1   msaitoh {
    533   1.1   msaitoh 	//XXX implement this
    534   1.1   msaitoh 	ixgbe_send_vf_nack(adapter, vf, msg[0]);
    535   1.1   msaitoh } /* ixgbe_vf_set_macvlan */
    536   1.1   msaitoh 
    537   1.1   msaitoh 
    538   1.1   msaitoh static void
    539   1.1   msaitoh ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
    540   1.1   msaitoh     uint32_t *msg)
    541   1.1   msaitoh {
    542   1.1   msaitoh 
    543   1.1   msaitoh 	switch (msg[1]) {
    544   1.1   msaitoh 	case IXGBE_API_VER_1_0:
    545   1.1   msaitoh 	case IXGBE_API_VER_1_1:
    546   1.1   msaitoh 		vf->api_ver = msg[1];
    547   1.1   msaitoh 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
    548   1.1   msaitoh 		break;
    549   1.1   msaitoh 	default:
    550   1.1   msaitoh 		vf->api_ver = IXGBE_API_VER_UNKNOWN;
    551   1.1   msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    552   1.1   msaitoh 		break;
    553   1.1   msaitoh 	}
    554   1.1   msaitoh } /* ixgbe_vf_api_negotiate */
    555   1.1   msaitoh 
    556   1.1   msaitoh 
    557   1.1   msaitoh static void
    558   1.1   msaitoh ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    559   1.1   msaitoh {
    560   1.1   msaitoh 	struct ixgbe_hw *hw;
    561   1.1   msaitoh 	uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
    562   1.1   msaitoh 	int num_queues;
    563   1.1   msaitoh 
    564   1.1   msaitoh 	hw = &adapter->hw;
    565   1.1   msaitoh 
    566   1.1   msaitoh 	/* GET_QUEUES is not supported on pre-1.1 APIs. */
    567   1.1   msaitoh 	switch (msg[0]) {
    568   1.1   msaitoh 	case IXGBE_API_VER_1_0:
    569   1.1   msaitoh 	case IXGBE_API_VER_UNKNOWN:
    570   1.1   msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    571   1.1   msaitoh 		return;
    572   1.1   msaitoh 	}
    573   1.1   msaitoh 
    574   1.1   msaitoh 	resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
    575   1.1   msaitoh 	    IXGBE_VT_MSGTYPE_CTS;
    576   1.1   msaitoh 
    577   1.1   msaitoh 	num_queues = ixgbe_vf_queues(adapter->iov_mode);
    578   1.1   msaitoh 	resp[IXGBE_VF_TX_QUEUES] = num_queues;
    579   1.1   msaitoh 	resp[IXGBE_VF_RX_QUEUES] = num_queues;
    580   1.1   msaitoh 	resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
    581   1.1   msaitoh 	resp[IXGBE_VF_DEF_QUEUE] = 0;
    582   1.1   msaitoh 
    583   1.1   msaitoh 	hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
    584   1.1   msaitoh } /* ixgbe_vf_get_queues */
    585   1.1   msaitoh 
    586   1.1   msaitoh 
    587   1.1   msaitoh static void
    588   1.1   msaitoh ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
    589   1.1   msaitoh {
    590   1.1   msaitoh 	struct ixgbe_hw *hw;
    591   1.1   msaitoh 	uint32_t msg[IXGBE_VFMAILBOX_SIZE];
    592   1.1   msaitoh 	int error;
    593   1.1   msaitoh 
    594   1.1   msaitoh 	hw = &adapter->hw;
    595   1.1   msaitoh 
    596   1.1   msaitoh 	error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
    597   1.1   msaitoh 
    598   1.1   msaitoh 	if (error != 0)
    599   1.1   msaitoh 		return;
    600   1.1   msaitoh 
    601   1.2   msaitoh 	CTR3(KTR_MALLOC, "%s: received msg %x from %d", adapter->ifp->if_xname,
    602   1.2   msaitoh 	    msg[0], vf->pool);
    603   1.1   msaitoh 	if (msg[0] == IXGBE_VF_RESET) {
    604   1.1   msaitoh 		ixgbe_vf_reset_msg(adapter, vf, msg);
    605   1.1   msaitoh 		return;
    606   1.1   msaitoh 	}
    607   1.1   msaitoh 
    608   1.1   msaitoh 	if (!(vf->flags & IXGBE_VF_CTS)) {
    609   1.1   msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    610   1.1   msaitoh 		return;
    611   1.1   msaitoh 	}
    612   1.1   msaitoh 
    613   1.1   msaitoh 	switch (msg[0] & IXGBE_VT_MSG_MASK) {
    614   1.1   msaitoh 	case IXGBE_VF_SET_MAC_ADDR:
    615   1.1   msaitoh 		ixgbe_vf_set_mac(adapter, vf, msg);
    616   1.1   msaitoh 		break;
    617   1.1   msaitoh 	case IXGBE_VF_SET_MULTICAST:
    618   1.1   msaitoh 		ixgbe_vf_set_mc_addr(adapter, vf, msg);
    619   1.1   msaitoh 		break;
    620   1.1   msaitoh 	case IXGBE_VF_SET_VLAN:
    621   1.1   msaitoh 		ixgbe_vf_set_vlan(adapter, vf, msg);
    622   1.1   msaitoh 		break;
    623   1.1   msaitoh 	case IXGBE_VF_SET_LPE:
    624   1.1   msaitoh 		ixgbe_vf_set_lpe(adapter, vf, msg);
    625   1.1   msaitoh 		break;
    626   1.1   msaitoh 	case IXGBE_VF_SET_MACVLAN:
    627   1.1   msaitoh 		ixgbe_vf_set_macvlan(adapter, vf, msg);
    628   1.1   msaitoh 		break;
    629   1.1   msaitoh 	case IXGBE_VF_API_NEGOTIATE:
    630   1.1   msaitoh 		ixgbe_vf_api_negotiate(adapter, vf, msg);
    631   1.1   msaitoh 		break;
    632   1.1   msaitoh 	case IXGBE_VF_GET_QUEUES:
    633   1.1   msaitoh 		ixgbe_vf_get_queues(adapter, vf, msg);
    634   1.1   msaitoh 		break;
    635   1.1   msaitoh 	default:
    636   1.1   msaitoh 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    637   1.1   msaitoh 	}
    638   1.1   msaitoh } /* ixgbe_process_vf_msg */
    639   1.1   msaitoh 
    640   1.1   msaitoh 
    641   1.1   msaitoh /* Tasklet for handling VF -> PF mailbox messages */
    642   1.1   msaitoh void
    643   1.1   msaitoh ixgbe_handle_mbx(void *context, int pending)
    644   1.1   msaitoh {
    645   1.2   msaitoh 	struct adapter *adapter = context;
    646   1.1   msaitoh 	struct ixgbe_hw *hw;
    647   1.1   msaitoh 	struct ixgbe_vf *vf;
    648   1.1   msaitoh 	int i;
    649   1.1   msaitoh 
    650   1.8   msaitoh 	KASSERT(mutex_owned(&adapter->core_mtx));
    651   1.8   msaitoh 
    652   1.1   msaitoh 	hw = &adapter->hw;
    653   1.1   msaitoh 
    654   1.1   msaitoh 	for (i = 0; i < adapter->num_vfs; i++) {
    655   1.1   msaitoh 		vf = &adapter->vfs[i];
    656   1.1   msaitoh 
    657   1.1   msaitoh 		if (vf->flags & IXGBE_VF_ACTIVE) {
    658   1.1   msaitoh 			if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0)
    659   1.1   msaitoh 				ixgbe_process_vf_reset(adapter, vf);
    660   1.1   msaitoh 
    661   1.1   msaitoh 			if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
    662   1.1   msaitoh 				ixgbe_process_vf_msg(adapter, vf);
    663   1.1   msaitoh 
    664   1.1   msaitoh 			if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
    665   1.1   msaitoh 				ixgbe_process_vf_ack(adapter, vf);
    666   1.1   msaitoh 		}
    667   1.1   msaitoh 	}
    668   1.1   msaitoh } /* ixgbe_handle_mbx */
    669   1.1   msaitoh 
    670   1.1   msaitoh int
    671   1.1   msaitoh ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
    672   1.1   msaitoh {
    673   1.1   msaitoh 	struct adapter *adapter;
    674   1.1   msaitoh 	int retval = 0;
    675   1.1   msaitoh 
    676   1.1   msaitoh 	adapter = device_get_softc(dev);
    677   1.1   msaitoh 	adapter->iov_mode = IXGBE_NO_VM;
    678   1.1   msaitoh 
    679   1.1   msaitoh 	if (num_vfs == 0) {
    680   1.1   msaitoh 		/* Would we ever get num_vfs = 0? */
    681   1.1   msaitoh 		retval = EINVAL;
    682   1.1   msaitoh 		goto err_init_iov;
    683   1.1   msaitoh 	}
    684   1.1   msaitoh 
    685   1.1   msaitoh 	/*
    686   1.1   msaitoh 	 * We've got to reserve a VM's worth of queues for the PF,
    687   1.1   msaitoh 	 * thus we go into "64 VF mode" if 32+ VFs are requested.
    688   1.1   msaitoh 	 * With 64 VFs, you can only have two queues per VF.
    689   1.1   msaitoh 	 * With 32 VFs, you can have up to four queues per VF.
    690   1.1   msaitoh 	 */
    691   1.1   msaitoh 	if (num_vfs >= IXGBE_32_VM)
    692   1.1   msaitoh 		adapter->iov_mode = IXGBE_64_VM;
    693   1.1   msaitoh 	else
    694   1.1   msaitoh 		adapter->iov_mode = IXGBE_32_VM;
    695   1.1   msaitoh 
    696   1.1   msaitoh 	/* Again, reserving 1 VM's worth of queues for the PF */
    697   1.1   msaitoh 	adapter->pool = adapter->iov_mode - 1;
    698   1.1   msaitoh 
    699   1.1   msaitoh 	if ((num_vfs > adapter->pool) || (num_vfs >= IXGBE_64_VM)) {
    700   1.1   msaitoh 		retval = ENOSPC;
    701   1.1   msaitoh 		goto err_init_iov;
    702   1.1   msaitoh 	}
    703   1.1   msaitoh 
    704   1.1   msaitoh 	IXGBE_CORE_LOCK(adapter);
    705   1.1   msaitoh 
    706   1.1   msaitoh 	adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV,
    707   1.1   msaitoh 	    M_NOWAIT | M_ZERO);
    708   1.1   msaitoh 
    709   1.1   msaitoh 	if (adapter->vfs == NULL) {
    710   1.1   msaitoh 		retval = ENOMEM;
    711   1.1   msaitoh 		IXGBE_CORE_UNLOCK(adapter);
    712   1.1   msaitoh 		goto err_init_iov;
    713   1.1   msaitoh 	}
    714   1.1   msaitoh 
    715   1.1   msaitoh 	adapter->num_vfs = num_vfs;
    716   1.2   msaitoh 
    717   1.2   msaitoh 	/* set the SRIOV flag now as it's needed
    718   1.2   msaitoh 	 * by ixgbe_init_locked() */
    719   1.2   msaitoh 	adapter->feat_en |= IXGBE_FEATURE_SRIOV;
    720   1.1   msaitoh 	adapter->init_locked(adapter);
    721   1.1   msaitoh 
    722   1.1   msaitoh 	IXGBE_CORE_UNLOCK(adapter);
    723   1.1   msaitoh 
    724   1.2   msaitoh 	return (retval);
    725   1.1   msaitoh 
    726   1.1   msaitoh err_init_iov:
    727   1.1   msaitoh 	adapter->num_vfs = 0;
    728   1.1   msaitoh 	adapter->pool = 0;
    729   1.1   msaitoh 	adapter->iov_mode = IXGBE_NO_VM;
    730   1.1   msaitoh 
    731   1.2   msaitoh 	return (retval);
    732   1.1   msaitoh } /* ixgbe_init_iov */
    733   1.1   msaitoh 
    734   1.1   msaitoh void
    735   1.1   msaitoh ixgbe_uninit_iov(device_t dev)
    736   1.1   msaitoh {
    737   1.1   msaitoh 	struct ixgbe_hw *hw;
    738   1.1   msaitoh 	struct adapter *adapter;
    739   1.1   msaitoh 	uint32_t pf_reg, vf_reg;
    740   1.1   msaitoh 
    741   1.1   msaitoh 	adapter = device_get_softc(dev);
    742   1.1   msaitoh 	hw = &adapter->hw;
    743   1.1   msaitoh 
    744   1.1   msaitoh 	IXGBE_CORE_LOCK(adapter);
    745   1.1   msaitoh 
    746   1.1   msaitoh 	/* Enable rx/tx for the PF and disable it for all VFs. */
    747   1.1   msaitoh 	pf_reg = IXGBE_VF_INDEX(adapter->pool);
    748   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool));
    749   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool));
    750   1.1   msaitoh 
    751   1.1   msaitoh 	if (pf_reg == 0)
    752   1.1   msaitoh 		vf_reg = 1;
    753   1.1   msaitoh 	else
    754   1.1   msaitoh 		vf_reg = 0;
    755   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
    756   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
    757   1.1   msaitoh 
    758   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
    759   1.1   msaitoh 
    760   1.1   msaitoh 	free(adapter->vfs, M_IXGBE_SRIOV);
    761   1.1   msaitoh 	adapter->vfs = NULL;
    762   1.1   msaitoh 	adapter->num_vfs = 0;
    763   1.1   msaitoh 	adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
    764   1.1   msaitoh 
    765   1.1   msaitoh 	IXGBE_CORE_UNLOCK(adapter);
    766   1.1   msaitoh } /* ixgbe_uninit_iov */
    767   1.1   msaitoh 
    768   1.1   msaitoh static void
    769   1.1   msaitoh ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
    770   1.1   msaitoh {
    771   1.1   msaitoh 	struct ixgbe_hw *hw;
    772   1.1   msaitoh 	uint32_t vf_index, pfmbimr;
    773   1.1   msaitoh 
    774   1.1   msaitoh 	IXGBE_CORE_LOCK_ASSERT(adapter);
    775   1.1   msaitoh 
    776   1.1   msaitoh 	hw = &adapter->hw;
    777   1.1   msaitoh 
    778   1.1   msaitoh 	if (!(vf->flags & IXGBE_VF_ACTIVE))
    779   1.1   msaitoh 		return;
    780   1.1   msaitoh 
    781   1.1   msaitoh 	vf_index = IXGBE_VF_INDEX(vf->pool);
    782   1.1   msaitoh 	pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
    783   1.1   msaitoh 	pfmbimr |= IXGBE_VF_BIT(vf->pool);
    784   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
    785   1.1   msaitoh 
    786   1.1   msaitoh 	ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
    787   1.1   msaitoh 
    788   1.1   msaitoh 	// XXX multicast addresses
    789   1.1   msaitoh 
    790   1.1   msaitoh 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
    791   1.1   msaitoh 		ixgbe_set_rar(&adapter->hw, vf->rar_index,
    792   1.1   msaitoh 		    vf->ether_addr, vf->pool, TRUE);
    793   1.1   msaitoh 	}
    794   1.1   msaitoh 
    795   1.1   msaitoh 	ixgbe_vf_enable_transmit(adapter, vf);
    796   1.1   msaitoh 	ixgbe_vf_enable_receive(adapter, vf);
    797   1.1   msaitoh 
    798   1.3   msaitoh 	ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
    799   1.1   msaitoh } /* ixgbe_init_vf */
    800   1.1   msaitoh 
    801   1.1   msaitoh void
    802   1.1   msaitoh ixgbe_initialize_iov(struct adapter *adapter)
    803   1.1   msaitoh {
    804   1.1   msaitoh 	struct ixgbe_hw *hw = &adapter->hw;
    805   1.1   msaitoh 	uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
    806   1.1   msaitoh 	int i;
    807   1.1   msaitoh 
    808   1.1   msaitoh 	if (adapter->iov_mode == IXGBE_NO_VM)
    809   1.1   msaitoh 		return;
    810   1.1   msaitoh 
    811   1.1   msaitoh 	IXGBE_CORE_LOCK_ASSERT(adapter);
    812   1.1   msaitoh 
    813   1.1   msaitoh 	/* RMW appropriate registers based on IOV mode */
    814   1.1   msaitoh 	/* Read... */
    815   1.1   msaitoh 	mrqc    = IXGBE_READ_REG(hw, IXGBE_MRQC);
    816   1.1   msaitoh 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
    817   1.1   msaitoh 	gpie    = IXGBE_READ_REG(hw, IXGBE_GPIE);
    818   1.1   msaitoh 	/* Modify... */
    819   1.1   msaitoh 	mrqc    &= ~IXGBE_MRQC_MRQE_MASK;
    820   1.1   msaitoh 	mtqc     =  IXGBE_MTQC_VT_ENA;      /* No initial MTQC read needed */
    821   1.1   msaitoh 	gcr_ext |=  IXGBE_GCR_EXT_MSIX_EN;
    822   1.1   msaitoh 	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
    823   1.1   msaitoh 	gpie    &= ~IXGBE_GPIE_VTMODE_MASK;
    824   1.1   msaitoh 	switch (adapter->iov_mode) {
    825   1.1   msaitoh 	case IXGBE_64_VM:
    826   1.1   msaitoh 		mrqc    |= IXGBE_MRQC_VMDQRSS64EN;
    827   1.1   msaitoh 		mtqc    |= IXGBE_MTQC_64VF;
    828   1.1   msaitoh 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
    829   1.1   msaitoh 		gpie    |= IXGBE_GPIE_VTMODE_64;
    830   1.1   msaitoh 		break;
    831   1.1   msaitoh 	case IXGBE_32_VM:
    832   1.1   msaitoh 		mrqc    |= IXGBE_MRQC_VMDQRSS32EN;
    833   1.1   msaitoh 		mtqc    |= IXGBE_MTQC_32VF;
    834   1.1   msaitoh 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
    835   1.1   msaitoh 		gpie    |= IXGBE_GPIE_VTMODE_32;
    836   1.1   msaitoh 		break;
    837   1.1   msaitoh 	default:
    838   1.1   msaitoh 		panic("Unexpected SR-IOV mode %d", adapter->iov_mode);
    839   1.1   msaitoh 	}
    840   1.1   msaitoh 	/* Write... */
    841   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    842   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
    843   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
    844   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
    845   1.1   msaitoh 
    846   1.1   msaitoh 	/* Enable rx/tx for the PF. */
    847   1.1   msaitoh 	vf_reg = IXGBE_VF_INDEX(adapter->pool);
    848   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool));
    849   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool));
    850   1.1   msaitoh 
    851   1.1   msaitoh 	/* Allow VM-to-VM communication. */
    852   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
    853   1.1   msaitoh 
    854   1.1   msaitoh 	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
    855   1.1   msaitoh 	vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
    856   1.1   msaitoh 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
    857   1.1   msaitoh 
    858   1.1   msaitoh 	for (i = 0; i < adapter->num_vfs; i++)
    859   1.1   msaitoh 		ixgbe_init_vf(adapter, &adapter->vfs[i]);
    860   1.1   msaitoh } /* ixgbe_initialize_iov */
    861   1.1   msaitoh 
    862   1.1   msaitoh 
    863   1.1   msaitoh /* Check the max frame setting of all active VF's */
    864   1.1   msaitoh void
    865   1.1   msaitoh ixgbe_recalculate_max_frame(struct adapter *adapter)
    866   1.1   msaitoh {
    867   1.1   msaitoh 	struct ixgbe_vf *vf;
    868   1.1   msaitoh 
    869   1.1   msaitoh 	IXGBE_CORE_LOCK_ASSERT(adapter);
    870   1.1   msaitoh 
    871   1.1   msaitoh 	for (int i = 0; i < adapter->num_vfs; i++) {
    872   1.1   msaitoh 		vf = &adapter->vfs[i];
    873   1.1   msaitoh 		if (vf->flags & IXGBE_VF_ACTIVE)
    874   1.1   msaitoh 			ixgbe_update_max_frame(adapter, vf->max_frame_size);
    875   1.1   msaitoh 	}
    876   1.1   msaitoh } /* ixgbe_recalculate_max_frame */
    877   1.1   msaitoh 
    878   1.1   msaitoh int
    879   1.1   msaitoh ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
    880   1.1   msaitoh {
    881   1.1   msaitoh 	struct adapter *adapter;
    882   1.1   msaitoh 	struct ixgbe_vf *vf;
    883   1.1   msaitoh 	const void *mac;
    884   1.1   msaitoh 
    885   1.1   msaitoh 	adapter = device_get_softc(dev);
    886   1.1   msaitoh 
    887   1.1   msaitoh 	KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
    888   1.1   msaitoh 	    vfnum, adapter->num_vfs));
    889   1.1   msaitoh 
    890   1.1   msaitoh 	IXGBE_CORE_LOCK(adapter);
    891   1.1   msaitoh 	vf = &adapter->vfs[vfnum];
    892   1.1   msaitoh 	vf->pool= vfnum;
    893   1.1   msaitoh 
    894   1.1   msaitoh 	/* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
    895   1.1   msaitoh 	vf->rar_index = vfnum + 1;
    896   1.1   msaitoh 	vf->default_vlan = 0;
    897   1.1   msaitoh 	vf->max_frame_size = ETHER_MAX_LEN;
    898   1.1   msaitoh 	ixgbe_update_max_frame(adapter, vf->max_frame_size);
    899   1.1   msaitoh 
    900   1.1   msaitoh 	if (nvlist_exists_binary(config, "mac-addr")) {
    901   1.1   msaitoh 		mac = nvlist_get_binary(config, "mac-addr", NULL);
    902   1.1   msaitoh 		bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
    903   1.1   msaitoh 		if (nvlist_get_bool(config, "allow-set-mac"))
    904   1.1   msaitoh 			vf->flags |= IXGBE_VF_CAP_MAC;
    905   1.1   msaitoh 	} else
    906   1.1   msaitoh 		/*
    907   1.1   msaitoh 		 * If the administrator has not specified a MAC address then
    908   1.1   msaitoh 		 * we must allow the VF to choose one.
    909   1.1   msaitoh 		 */
    910   1.1   msaitoh 		vf->flags |= IXGBE_VF_CAP_MAC;
    911   1.1   msaitoh 
    912   1.1   msaitoh 	vf->flags |= IXGBE_VF_ACTIVE;
    913   1.1   msaitoh 
    914   1.1   msaitoh 	ixgbe_init_vf(adapter, vf);
    915   1.1   msaitoh 	IXGBE_CORE_UNLOCK(adapter);
    916   1.1   msaitoh 
    917   1.1   msaitoh 	return (0);
    918   1.1   msaitoh } /* ixgbe_add_vf */
    919   1.1   msaitoh 
    920   1.1   msaitoh #else
    921   1.1   msaitoh 
    922   1.1   msaitoh void
    923   1.1   msaitoh ixgbe_handle_mbx(void *context, int pending)
    924   1.1   msaitoh {
    925   1.1   msaitoh 	UNREFERENCED_2PARAMETER(context, pending);
    926   1.1   msaitoh } /* ixgbe_handle_mbx */
    927   1.1   msaitoh 
    928   1.1   msaitoh inline int
    929   1.1   msaitoh ixgbe_vf_que_index(int mode, int vfnum, int num)
    930   1.1   msaitoh {
    931   1.1   msaitoh 	UNREFERENCED_2PARAMETER(mode, vfnum);
    932   1.1   msaitoh 
    933   1.1   msaitoh 	return num;
    934   1.1   msaitoh } /* ixgbe_vf_que_index */
    935   1.1   msaitoh 
    936   1.1   msaitoh #endif
    937