Home | History | Annotate | Line # | Download | only in ixgbe
if_sriov.c revision 1.15
      1 /* $NetBSD: if_sriov.c,v 1.15 2021/12/24 04:59:23 msaitoh Exp $ */
      2 /******************************************************************************
      3 
      4   Copyright (c) 2001-2017, Intel Corporation
      5   All rights reserved.
      6 
      7   Redistribution and use in source and binary forms, with or without
      8   modification, are permitted provided that the following conditions are met:
      9 
     10    1. Redistributions of source code must retain the above copyright notice,
     11       this list of conditions and the following disclaimer.
     12 
     13    2. Redistributions in binary form must reproduce the above copyright
     14       notice, this list of conditions and the following disclaimer in the
     15       documentation and/or other materials provided with the distribution.
     16 
     17    3. Neither the name of the Intel Corporation nor the names of its
     18       contributors may be used to endorse or promote products derived from
     19       this software without specific prior written permission.
     20 
     21   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     22   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     23   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     24   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     25   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     26   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     27   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     28   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     29   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     30   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     31   POSSIBILITY OF SUCH DAMAGE.
     32 
     33 ******************************************************************************/
     34 /*$FreeBSD: head/sys/dev/ixgbe/if_sriov.c 327031 2017-12-20 18:15:06Z erj $*/
     35 
     36 #include <sys/cdefs.h>
     37 __KERNEL_RCSID(0, "$NetBSD: if_sriov.c,v 1.15 2021/12/24 04:59:23 msaitoh Exp $");
     38 
     39 #include "ixgbe.h"
     40 #include "ixgbe_sriov.h"
     41 
     42 #ifdef PCI_IOV
     43 
     44 MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
     45 
     46 /************************************************************************
     47  * ixgbe_pci_iov_detach
     48  ************************************************************************/
     49 int
     50 ixgbe_pci_iov_detach(device_t dev)
     51 {
     52 	return pci_iov_detach(dev);
     53 }
     54 
     55 /************************************************************************
     56  * ixgbe_define_iov_schemas
     57  ************************************************************************/
     58 void
     59 ixgbe_define_iov_schemas(device_t dev, int *error)
     60 {
     61 	nvlist_t *pf_schema, *vf_schema;
     62 
     63 	pf_schema = pci_iov_schema_alloc_node();
     64 	vf_schema = pci_iov_schema_alloc_node();
     65 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
     66 	pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
     67 	    IOV_SCHEMA_HASDEFAULT, TRUE);
     68 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
     69 	    IOV_SCHEMA_HASDEFAULT, FALSE);
     70 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
     71 	    IOV_SCHEMA_HASDEFAULT, FALSE);
     72 	*error = pci_iov_attach(dev, pf_schema, vf_schema);
     73 	if (*error != 0) {
     74 		device_printf(dev,
     75 		    "Error %d setting up SR-IOV\n", *error);
     76 	}
     77 } /* ixgbe_define_iov_schemas */
     78 
     79 /************************************************************************
     80  * ixgbe_align_all_queue_indices
     81  ************************************************************************/
     82 inline void
     83 ixgbe_align_all_queue_indices(struct adapter *adapter)
     84 {
     85 	int i;
     86 	int index;
     87 
     88 	for (i = 0; i < adapter->num_queues; i++) {
     89 		index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
     90 		adapter->rx_rings[i].me = index;
     91 		adapter->tx_rings[i].me = index;
     92 	}
     93 }
     94 
     95 /* Support functions for SR-IOV/VF management */
     96 static inline void
     97 ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
     98 {
     99 	if (vf->flags & IXGBE_VF_CTS)
    100 		msg |= IXGBE_VT_MSGTYPE_CTS;
    101 
    102 	adapter->hw.mbx.ops.write(&adapter->hw, &msg, 1, vf->pool);
    103 }
    104 
    105 static inline void
    106 ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
    107 {
    108 	msg &= IXGBE_VT_MSG_MASK;
    109 	ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_SUCCESS);
    110 }
    111 
    112 static inline void
    113 ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
    114 {
    115 	msg &= IXGBE_VT_MSG_MASK;
    116 	ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_FAILURE);
    117 }
    118 
    119 static inline void
    120 ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
    121 {
    122 	if (!(vf->flags & IXGBE_VF_CTS))
    123 		ixgbe_send_vf_nack(adapter, vf, 0);
    124 }
    125 
    126 static inline bool
    127 ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
    128 {
    129 	return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
    130 }
    131 
    132 static inline int
    133 ixgbe_vf_queues(int mode)
    134 {
    135 	switch (mode) {
    136 	case IXGBE_64_VM:
    137 		return (2);
    138 	case IXGBE_32_VM:
    139 		return (4);
    140 	case IXGBE_NO_VM:
    141 	default:
    142 		return (0);
    143 	}
    144 }
    145 
    146 inline int
    147 ixgbe_vf_que_index(int mode, int vfnum, int num)
    148 {
    149 	return ((vfnum * ixgbe_vf_queues(mode)) + num);
    150 }
    151 
    152 static inline void
    153 ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
    154 {
    155 	if (adapter->max_frame_size < max_frame)
    156 		adapter->max_frame_size = max_frame;
    157 }
    158 
    159 inline u32
    160 ixgbe_get_mrqc(int iov_mode)
    161 {
    162 	u32 mrqc;
    163 
    164 	switch (iov_mode) {
    165 	case IXGBE_64_VM:
    166 		mrqc = IXGBE_MRQC_VMDQRSS64EN;
    167 		break;
    168 	case IXGBE_32_VM:
    169 		mrqc = IXGBE_MRQC_VMDQRSS32EN;
    170 		break;
    171 	case IXGBE_NO_VM:
    172 		mrqc = 0;
    173 		break;
    174 	default:
    175 		panic("Unexpected SR-IOV mode %d", iov_mode);
    176 	}
    177 
    178 	return mrqc;
    179 }
    180 
    181 
    182 inline u32
    183 ixgbe_get_mtqc(int iov_mode)
    184 {
    185 	uint32_t mtqc;
    186 
    187 	switch (iov_mode) {
    188 	case IXGBE_64_VM:
    189 		mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
    190 		break;
    191 	case IXGBE_32_VM:
    192 		mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
    193 		break;
    194 	case IXGBE_NO_VM:
    195 		mtqc = IXGBE_MTQC_64Q_1PB;
    196 		break;
    197 	default:
    198 		panic("Unexpected SR-IOV mode %d", iov_mode);
    199 	}
    200 
    201 	return mtqc;
    202 }
    203 
    204 void
    205 ixgbe_ping_all_vfs(struct adapter *adapter)
    206 {
    207 	struct ixgbe_vf *vf;
    208 
    209 	for (int i = 0; i < adapter->num_vfs; i++) {
    210 		vf = &adapter->vfs[i];
    211 		if (vf->flags & IXGBE_VF_ACTIVE)
    212 			ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
    213 	}
    214 } /* ixgbe_ping_all_vfs */
    215 
    216 
    217 static void
    218 ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
    219                           uint16_t tag)
    220 {
    221 	struct ixgbe_hw *hw;
    222 	uint32_t vmolr, vmvir;
    223 
    224 	hw = &adapter->hw;
    225 
    226 	vf->vlan_tag = tag;
    227 
    228 	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
    229 
    230 	/* Do not receive packets that pass inexact filters. */
    231 	vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
    232 
    233 	/* Disable Multicast Promicuous Mode. */
    234 	vmolr &= ~IXGBE_VMOLR_MPE;
    235 
    236 	/* Accept broadcasts. */
    237 	vmolr |= IXGBE_VMOLR_BAM;
    238 
    239 	if (tag == 0) {
    240 		/* Accept non-vlan tagged traffic. */
    241 		vmolr |= IXGBE_VMOLR_AUPE;
    242 
    243 		/* Allow VM to tag outgoing traffic; no default tag. */
    244 		vmvir = 0;
    245 	} else {
    246 		/* Require vlan-tagged traffic. */
    247 		vmolr &= ~IXGBE_VMOLR_AUPE;
    248 
    249 		/* Tag all traffic with provided vlan tag. */
    250 		vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
    251 	}
    252 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
    253 	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
    254 } /* ixgbe_vf_set_default_vlan */
    255 
    256 
    257 static void
    258 ixgbe_clear_vfmbmem(struct adapter *adapter, struct ixgbe_vf *vf)
    259 {
    260 	struct ixgbe_hw *hw = &adapter->hw;
    261 	uint32_t vf_index = IXGBE_VF_INDEX(vf->pool);
    262 	uint16_t mbx_size = hw->mbx.size;
    263 	uint16_t i;
    264 
    265 	IXGBE_CORE_LOCK_ASSERT(adapter);
    266 
    267 	for (i = 0; i < mbx_size; ++i)
    268 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_index), i, 0x0);
    269 } /* ixgbe_clear_vfmbmem */
    270 
    271 
    272 static bool
    273 ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
    274 {
    275 
    276 	/*
    277 	 * Frame size compatibility between PF and VF is only a problem on
    278 	 * 82599-based cards.  X540 and later support any combination of jumbo
    279 	 * frames on PFs and VFs.
    280 	 */
    281 	if (adapter->hw.mac.type != ixgbe_mac_82599EB)
    282 		return (TRUE);
    283 
    284 	switch (vf->api_ver) {
    285 	case IXGBE_API_VER_1_0:
    286 	case IXGBE_API_VER_UNKNOWN:
    287 		/*
    288 		 * On legacy (1.0 and older) VF versions, we don't support jumbo
    289 		 * frames on either the PF or the VF.
    290 		 */
    291 		if (adapter->max_frame_size > ETHER_MAX_LEN ||
    292 		    vf->max_frame_size > ETHER_MAX_LEN)
    293 			return (FALSE);
    294 
    295 		return (TRUE);
    296 
    297 		break;
    298 	case IXGBE_API_VER_1_1:
    299 	default:
    300 		/*
    301 		 * 1.1 or later VF versions always work if they aren't using
    302 		 * jumbo frames.
    303 		 */
    304 		if (vf->max_frame_size <= ETHER_MAX_LEN)
    305 			return (TRUE);
    306 
    307 		/*
    308 		 * Jumbo frames only work with VFs if the PF is also using jumbo
    309 		 * frames.
    310 		 */
    311 		if (adapter->max_frame_size <= ETHER_MAX_LEN)
    312 			return (TRUE);
    313 
    314 		return (FALSE);
    315 	}
    316 } /* ixgbe_vf_frame_size_compatible */
    317 
    318 
    319 static void
    320 ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
    321 {
    322 	ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
    323 
    324 	// XXX clear multicast addresses
    325 
    326 	ixgbe_clear_rar(&adapter->hw, vf->rar_index);
    327 	ixgbe_clear_vfmbmem(adapter, vf);
    328 	ixgbe_toggle_txdctl(&adapter->hw, IXGBE_VF_INDEX(vf->pool));
    329 
    330 	vf->api_ver = IXGBE_API_VER_UNKNOWN;
    331 } /* ixgbe_process_vf_reset */
    332 
    333 
    334 static void
    335 ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
    336 {
    337 	struct ixgbe_hw *hw;
    338 	uint32_t vf_index, vfte;
    339 
    340 	hw = &adapter->hw;
    341 
    342 	vf_index = IXGBE_VF_INDEX(vf->pool);
    343 	vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
    344 	vfte |= IXGBE_VF_BIT(vf->pool);
    345 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
    346 } /* ixgbe_vf_enable_transmit */
    347 
    348 
    349 static void
    350 ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
    351 {
    352 	struct ixgbe_hw *hw;
    353 	uint32_t vf_index, vfre;
    354 
    355 	hw = &adapter->hw;
    356 
    357 	vf_index = IXGBE_VF_INDEX(vf->pool);
    358 	vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
    359 	if (ixgbe_vf_frame_size_compatible(adapter, vf))
    360 		vfre |= IXGBE_VF_BIT(vf->pool);
    361 	else
    362 		vfre &= ~IXGBE_VF_BIT(vf->pool);
    363 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
    364 } /* ixgbe_vf_enable_receive */
    365 
    366 
    367 static void
    368 ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    369 {
    370 	struct ixgbe_hw *hw;
    371 	uint32_t ack;
    372 	uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
    373 
    374 	hw = &adapter->hw;
    375 
    376 	ixgbe_process_vf_reset(adapter, vf);
    377 
    378 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
    379 		ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
    380 		    vf->pool, TRUE);
    381 		ack = IXGBE_VT_MSGTYPE_SUCCESS;
    382 	} else
    383 		ack = IXGBE_VT_MSGTYPE_FAILURE;
    384 
    385 	ixgbe_vf_enable_transmit(adapter, vf);
    386 	ixgbe_vf_enable_receive(adapter, vf);
    387 
    388 	vf->flags |= IXGBE_VF_CTS;
    389 
    390 	resp[0] = IXGBE_VF_RESET | ack;
    391 	bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
    392 	resp[3] = hw->mac.mc_filter_type;
    393 	hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
    394 } /* ixgbe_vf_reset_msg */
    395 
    396 
    397 static void
    398 ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    399 {
    400 	uint8_t *mac;
    401 
    402 	mac = (uint8_t*)&msg[1];
    403 
    404 	/* Check that the VF has permission to change the MAC address. */
    405 	if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
    406 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    407 		return;
    408 	}
    409 
    410 	if (ixgbe_validate_mac_addr(mac) != 0) {
    411 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    412 		return;
    413 	}
    414 
    415 	bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
    416 
    417 	ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool,
    418 	    TRUE);
    419 
    420 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
    421 } /* ixgbe_vf_set_mac */
    422 
    423 
    424 /*
    425  * VF multicast addresses are set by using the appropriate bit in
    426  * 1 of 128 32 bit addresses (4096 possible).
    427  */
    428 static void
    429 ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
    430 {
    431 	u16	*list = (u16*)&msg[1];
    432 	int	entries;
    433 	u32	vmolr, vec_bit, vec_reg, mta_reg;
    434 
    435 	entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
    436 	entries = uimin(entries, IXGBE_MAX_VF_MC);
    437 
    438 	vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
    439 
    440 	vf->num_mc_hashes = entries;
    441 
    442 	/* Set the appropriate MTA bit */
    443 	for (int i = 0; i < entries; i++) {
    444 		vf->mc_hash[i] = list[i];
    445 		vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
    446 		vec_bit = vf->mc_hash[i] & 0x1F;
    447 		mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
    448 		mta_reg |= (1 << vec_bit);
    449 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
    450 	}
    451 
    452 	vmolr |= IXGBE_VMOLR_ROMPE;
    453 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
    454 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
    455 } /* ixgbe_vf_set_mc_addr */
    456 
    457 
    458 static void
    459 ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    460 {
    461 	struct ixgbe_hw *hw;
    462 	int enable;
    463 	uint16_t tag;
    464 
    465 	hw = &adapter->hw;
    466 	enable = IXGBE_VT_MSGINFO(msg[0]);
    467 	tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
    468 
    469 	if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
    470 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    471 		return;
    472 	}
    473 
    474 	/* It is illegal to enable vlan tag 0. */
    475 	if (tag == 0 && enable != 0) {
    476 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    477 		return;
    478 	}
    479 
    480 	ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
    481 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
    482 } /* ixgbe_vf_set_vlan */
    483 
    484 
    485 static void
    486 ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    487 {
    488 	struct ixgbe_hw *hw;
    489 	uint32_t vf_max_size, pf_max_size, mhadd;
    490 
    491 	hw = &adapter->hw;
    492 	vf_max_size = msg[1];
    493 
    494 	if (vf_max_size < ETHER_CRC_LEN) {
    495 		/* We intentionally ACK invalid LPE requests. */
    496 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
    497 		return;
    498 	}
    499 
    500 	vf_max_size -= ETHER_CRC_LEN;
    501 
    502 	if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
    503 		/* We intentionally ACK invalid LPE requests. */
    504 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
    505 		return;
    506 	}
    507 
    508 	vf->max_frame_size = vf_max_size;
    509 	ixgbe_update_max_frame(adapter, vf->max_frame_size);
    510 
    511 	/*
    512 	 * We might have to disable reception to this VF if the frame size is
    513 	 * not compatible with the config on the PF.
    514 	 */
    515 	ixgbe_vf_enable_receive(adapter, vf);
    516 
    517 	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
    518 	pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
    519 
    520 	if (pf_max_size < adapter->max_frame_size) {
    521 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
    522 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
    523 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
    524 	}
    525 
    526 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
    527 } /* ixgbe_vf_set_lpe */
    528 
    529 
    530 static void
    531 ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
    532                      uint32_t *msg)
    533 {
    534 	//XXX implement this
    535 	ixgbe_send_vf_nack(adapter, vf, msg[0]);
    536 } /* ixgbe_vf_set_macvlan */
    537 
    538 
    539 static void
    540 ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
    541     uint32_t *msg)
    542 {
    543 
    544 	switch (msg[1]) {
    545 	case IXGBE_API_VER_1_0:
    546 	case IXGBE_API_VER_1_1:
    547 		vf->api_ver = msg[1];
    548 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
    549 		break;
    550 	default:
    551 		vf->api_ver = IXGBE_API_VER_UNKNOWN;
    552 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    553 		break;
    554 	}
    555 } /* ixgbe_vf_api_negotiate */
    556 
    557 
    558 static void
    559 ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
    560 {
    561 	struct ixgbe_hw *hw;
    562 	uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
    563 	int num_queues;
    564 
    565 	hw = &adapter->hw;
    566 
    567 	/* GET_QUEUES is not supported on pre-1.1 APIs. */
    568 	switch (msg[0]) {
    569 	case IXGBE_API_VER_1_0:
    570 	case IXGBE_API_VER_UNKNOWN:
    571 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    572 		return;
    573 	}
    574 
    575 	resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_SUCCESS |
    576 	    IXGBE_VT_MSGTYPE_CTS;
    577 
    578 	num_queues = ixgbe_vf_queues(adapter->iov_mode);
    579 	resp[IXGBE_VF_TX_QUEUES] = num_queues;
    580 	resp[IXGBE_VF_RX_QUEUES] = num_queues;
    581 	resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
    582 	resp[IXGBE_VF_DEF_QUEUE] = 0;
    583 
    584 	hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
    585 } /* ixgbe_vf_get_queues */
    586 
    587 
    588 static void
    589 ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
    590 {
    591 	struct ixgbe_hw *hw;
    592 	uint32_t msg[IXGBE_VFMAILBOX_SIZE];
    593 	int error;
    594 
    595 	hw = &adapter->hw;
    596 
    597 	error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
    598 
    599 	if (error != 0)
    600 		return;
    601 
    602 	CTR3(KTR_MALLOC, "%s: received msg %x from %d", adapter->ifp->if_xname,
    603 	    msg[0], vf->pool);
    604 	if (msg[0] == IXGBE_VF_RESET) {
    605 		ixgbe_vf_reset_msg(adapter, vf, msg);
    606 		return;
    607 	}
    608 
    609 	if (!(vf->flags & IXGBE_VF_CTS)) {
    610 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    611 		return;
    612 	}
    613 
    614 	switch (msg[0] & IXGBE_VT_MSG_MASK) {
    615 	case IXGBE_VF_SET_MAC_ADDR:
    616 		ixgbe_vf_set_mac(adapter, vf, msg);
    617 		break;
    618 	case IXGBE_VF_SET_MULTICAST:
    619 		ixgbe_vf_set_mc_addr(adapter, vf, msg);
    620 		break;
    621 	case IXGBE_VF_SET_VLAN:
    622 		ixgbe_vf_set_vlan(adapter, vf, msg);
    623 		break;
    624 	case IXGBE_VF_SET_LPE:
    625 		ixgbe_vf_set_lpe(adapter, vf, msg);
    626 		break;
    627 	case IXGBE_VF_SET_MACVLAN:
    628 		ixgbe_vf_set_macvlan(adapter, vf, msg);
    629 		break;
    630 	case IXGBE_VF_API_NEGOTIATE:
    631 		ixgbe_vf_api_negotiate(adapter, vf, msg);
    632 		break;
    633 	case IXGBE_VF_GET_QUEUES:
    634 		ixgbe_vf_get_queues(adapter, vf, msg);
    635 		break;
    636 	default:
    637 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
    638 	}
    639 } /* ixgbe_process_vf_msg */
    640 
    641 
    642 /* Tasklet for handling VF -> PF mailbox messages */
    643 void
    644 ixgbe_handle_mbx(void *context)
    645 {
    646 	struct adapter *adapter = context;
    647 	struct ixgbe_hw *hw;
    648 	struct ixgbe_vf *vf;
    649 	int i;
    650 
    651 	KASSERT(mutex_owned(&adapter->core_mtx));
    652 
    653 	hw = &adapter->hw;
    654 
    655 	for (i = 0; i < adapter->num_vfs; i++) {
    656 		vf = &adapter->vfs[i];
    657 
    658 		if (vf->flags & IXGBE_VF_ACTIVE) {
    659 			if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0)
    660 				ixgbe_process_vf_reset(adapter, vf);
    661 
    662 			if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
    663 				ixgbe_process_vf_msg(adapter, vf);
    664 
    665 			if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
    666 				ixgbe_process_vf_ack(adapter, vf);
    667 		}
    668 	}
    669 } /* ixgbe_handle_mbx */
    670 
    671 int
    672 ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
    673 {
    674 	struct adapter *adapter;
    675 	int retval = 0;
    676 
    677 	adapter = device_get_softc(dev);
    678 	adapter->iov_mode = IXGBE_NO_VM;
    679 
    680 	if (num_vfs == 0) {
    681 		/* Would we ever get num_vfs = 0? */
    682 		retval = EINVAL;
    683 		goto err_init_iov;
    684 	}
    685 
    686 	/*
    687 	 * We've got to reserve a VM's worth of queues for the PF,
    688 	 * thus we go into "64 VF mode" if 32+ VFs are requested.
    689 	 * With 64 VFs, you can only have two queues per VF.
    690 	 * With 32 VFs, you can have up to four queues per VF.
    691 	 */
    692 	if (num_vfs >= IXGBE_32_VM)
    693 		adapter->iov_mode = IXGBE_64_VM;
    694 	else
    695 		adapter->iov_mode = IXGBE_32_VM;
    696 
    697 	/* Again, reserving 1 VM's worth of queues for the PF */
    698 	adapter->pool = adapter->iov_mode - 1;
    699 
    700 	if ((num_vfs > adapter->pool) || (num_vfs >= IXGBE_64_VM)) {
    701 		retval = ENOSPC;
    702 		goto err_init_iov;
    703 	}
    704 
    705 	IXGBE_CORE_LOCK(adapter);
    706 
    707 	adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV,
    708 	    M_NOWAIT | M_ZERO);
    709 
    710 	if (adapter->vfs == NULL) {
    711 		retval = ENOMEM;
    712 		IXGBE_CORE_UNLOCK(adapter);
    713 		goto err_init_iov;
    714 	}
    715 
    716 	adapter->num_vfs = num_vfs;
    717 	ixgbe_init_mbx_params_pf(&adapter->hw);
    718 
    719 	/* set the SRIOV flag now as it's needed
    720 	 * by ixgbe_init_locked() */
    721 	adapter->feat_en |= IXGBE_FEATURE_SRIOV;
    722 	adapter->init_locked(adapter);
    723 
    724 	IXGBE_CORE_UNLOCK(adapter);
    725 
    726 	return (retval);
    727 
    728 err_init_iov:
    729 	adapter->num_vfs = 0;
    730 	adapter->pool = 0;
    731 	adapter->iov_mode = IXGBE_NO_VM;
    732 
    733 	return (retval);
    734 } /* ixgbe_init_iov */
    735 
    736 void
    737 ixgbe_uninit_iov(device_t dev)
    738 {
    739 	struct ixgbe_hw *hw;
    740 	struct adapter *adapter;
    741 	uint32_t pf_reg, vf_reg;
    742 
    743 	adapter = device_get_softc(dev);
    744 	hw = &adapter->hw;
    745 
    746 	IXGBE_CORE_LOCK(adapter);
    747 
    748 	/* Enable rx/tx for the PF and disable it for all VFs. */
    749 	pf_reg = IXGBE_VF_INDEX(adapter->pool);
    750 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool));
    751 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool));
    752 
    753 	if (pf_reg == 0)
    754 		vf_reg = 1;
    755 	else
    756 		vf_reg = 0;
    757 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
    758 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
    759 
    760 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
    761 
    762 	free(adapter->vfs, M_IXGBE_SRIOV);
    763 	adapter->vfs = NULL;
    764 	adapter->num_vfs = 0;
    765 	adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
    766 
    767 	IXGBE_CORE_UNLOCK(adapter);
    768 } /* ixgbe_uninit_iov */
    769 
    770 static void
    771 ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
    772 {
    773 	struct ixgbe_hw *hw;
    774 	uint32_t vf_index, pfmbimr;
    775 
    776 	IXGBE_CORE_LOCK_ASSERT(adapter);
    777 
    778 	hw = &adapter->hw;
    779 
    780 	if (!(vf->flags & IXGBE_VF_ACTIVE))
    781 		return;
    782 
    783 	vf_index = IXGBE_VF_INDEX(vf->pool);
    784 	pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
    785 	pfmbimr |= IXGBE_VF_BIT(vf->pool);
    786 	IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
    787 
    788 	ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
    789 
    790 	// XXX multicast addresses
    791 
    792 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
    793 		ixgbe_set_rar(&adapter->hw, vf->rar_index,
    794 		    vf->ether_addr, vf->pool, TRUE);
    795 	}
    796 
    797 	ixgbe_vf_enable_transmit(adapter, vf);
    798 	ixgbe_vf_enable_receive(adapter, vf);
    799 
    800 	ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
    801 } /* ixgbe_init_vf */
    802 
    803 void
    804 ixgbe_initialize_iov(struct adapter *adapter)
    805 {
    806 	struct ixgbe_hw *hw = &adapter->hw;
    807 	uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
    808 	int i;
    809 
    810 	if (adapter->iov_mode == IXGBE_NO_VM)
    811 		return;
    812 
    813 	IXGBE_CORE_LOCK_ASSERT(adapter);
    814 
    815 	/* RMW appropriate registers based on IOV mode */
    816 	/* Read... */
    817 	mrqc    = IXGBE_READ_REG(hw, IXGBE_MRQC);
    818 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
    819 	gpie    = IXGBE_READ_REG(hw, IXGBE_GPIE);
    820 	/* Modify... */
    821 	mrqc    &= ~IXGBE_MRQC_MRQE_MASK;
    822 	mtqc     =  IXGBE_MTQC_VT_ENA;      /* No initial MTQC read needed */
    823 	gcr_ext |=  IXGBE_GCR_EXT_MSIX_EN;
    824 	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
    825 	gpie    &= ~IXGBE_GPIE_VTMODE_MASK;
    826 	switch (adapter->iov_mode) {
    827 	case IXGBE_64_VM:
    828 		mrqc    |= IXGBE_MRQC_VMDQRSS64EN;
    829 		mtqc    |= IXGBE_MTQC_64VF;
    830 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
    831 		gpie    |= IXGBE_GPIE_VTMODE_64;
    832 		break;
    833 	case IXGBE_32_VM:
    834 		mrqc    |= IXGBE_MRQC_VMDQRSS32EN;
    835 		mtqc    |= IXGBE_MTQC_32VF;
    836 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
    837 		gpie    |= IXGBE_GPIE_VTMODE_32;
    838 		break;
    839 	default:
    840 		panic("Unexpected SR-IOV mode %d", adapter->iov_mode);
    841 	}
    842 	/* Write... */
    843 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    844 	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
    845 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
    846 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
    847 
    848 	/* Enable rx/tx for the PF. */
    849 	vf_reg = IXGBE_VF_INDEX(adapter->pool);
    850 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool));
    851 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool));
    852 
    853 	/* Allow VM-to-VM communication. */
    854 	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
    855 
    856 	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
    857 	vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
    858 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
    859 
    860 	for (i = 0; i < adapter->num_vfs; i++)
    861 		ixgbe_init_vf(adapter, &adapter->vfs[i]);
    862 } /* ixgbe_initialize_iov */
    863 
    864 
    865 /* Check the max frame setting of all active VF's */
    866 void
    867 ixgbe_recalculate_max_frame(struct adapter *adapter)
    868 {
    869 	struct ixgbe_vf *vf;
    870 
    871 	IXGBE_CORE_LOCK_ASSERT(adapter);
    872 
    873 	for (int i = 0; i < adapter->num_vfs; i++) {
    874 		vf = &adapter->vfs[i];
    875 		if (vf->flags & IXGBE_VF_ACTIVE)
    876 			ixgbe_update_max_frame(adapter, vf->max_frame_size);
    877 	}
    878 } /* ixgbe_recalculate_max_frame */
    879 
    880 int
    881 ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
    882 {
    883 	struct adapter *adapter;
    884 	struct ixgbe_vf *vf;
    885 	const void *mac;
    886 
    887 	adapter = device_get_softc(dev);
    888 
    889 	KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
    890 	    vfnum, adapter->num_vfs));
    891 
    892 	IXGBE_CORE_LOCK(adapter);
    893 	vf = &adapter->vfs[vfnum];
    894 	vf->pool= vfnum;
    895 
    896 	/* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
    897 	vf->rar_index = vfnum + 1;
    898 	vf->default_vlan = 0;
    899 	vf->max_frame_size = ETHER_MAX_LEN;
    900 	ixgbe_update_max_frame(adapter, vf->max_frame_size);
    901 
    902 	if (nvlist_exists_binary(config, "mac-addr")) {
    903 		mac = nvlist_get_binary(config, "mac-addr", NULL);
    904 		bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
    905 		if (nvlist_get_bool(config, "allow-set-mac"))
    906 			vf->flags |= IXGBE_VF_CAP_MAC;
    907 	} else
    908 		/*
    909 		 * If the administrator has not specified a MAC address then
    910 		 * we must allow the VF to choose one.
    911 		 */
    912 		vf->flags |= IXGBE_VF_CAP_MAC;
    913 
    914 	vf->flags |= IXGBE_VF_ACTIVE;
    915 
    916 	ixgbe_init_vf(adapter, vf);
    917 	IXGBE_CORE_UNLOCK(adapter);
    918 
    919 	return (0);
    920 } /* ixgbe_add_vf */
    921 
    922 #else
    923 
    924 void
    925 ixgbe_handle_mbx(void *context)
    926 {
    927 	UNREFERENCED_1PARAMETER(context);
    928 } /* ixgbe_handle_mbx */
    929 
    930 inline int
    931 ixgbe_vf_que_index(int mode, int vfnum, int num)
    932 {
    933 	UNREFERENCED_2PARAMETER(mode, vfnum);
    934 
    935 	return num;
    936 } /* ixgbe_vf_que_index */
    937 
    938 #endif
    939