Lines Matching defs:vf
95 /* Support functions for SR-IOV/VF management */
97 ixgbe_send_vf_msg(struct ixgbe_hw *hw, struct ixgbe_vf *vf, u32 msg)
99 if (vf->flags & IXGBE_VF_CTS)
102 hw->mbx.ops[vf->pool].write(hw, &msg, 1, vf->pool);
106 ixgbe_send_vf_ack(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
109 ixgbe_send_vf_msg(&sc->hw, vf, msg | IXGBE_VT_MSGTYPE_SUCCESS);
113 ixgbe_send_vf_nack(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
116 ixgbe_send_vf_msg(&sc->hw, vf, msg | IXGBE_VT_MSGTYPE_FAILURE);
120 ixgbe_process_vf_ack(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
122 if (!(vf->flags & IXGBE_VF_CTS))
123 ixgbe_send_vf_nack(sc, vf, 0);
127 ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
129 return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
207 struct ixgbe_vf *vf;
210 vf = &sc->vfs[i];
211 if (vf->flags & IXGBE_VF_ACTIVE)
212 ixgbe_send_vf_msg(&sc->hw, vf, IXGBE_PF_CONTROL_MSG);
218 ixgbe_vf_set_default_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
226 vf->vlan_tag = tag;
228 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
252 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
253 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
258 ixgbe_clear_vfmbmem(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
261 uint32_t vf_index = IXGBE_VF_INDEX(vf->pool);
273 ixgbe_vf_frame_size_compatible(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
277 * Frame size compatibility between PF and VF is only a problem on
284 switch (vf->api_ver) {
288 * On legacy (1.0 and older) VF versions, we don't support jumbo
289 * frames on either the PF or the VF.
292 vf->max_frame_size > ETHER_MAX_LEN)
301 * 1.1 or later VF versions always work if they aren't using
304 if (vf->max_frame_size <= ETHER_MAX_LEN)
320 ixgbe_process_vf_reset(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
322 ixgbe_vf_set_default_vlan(sc, vf, vf->default_vlan);
326 ixgbe_clear_rar(&sc->hw, vf->rar_index);
327 ixgbe_clear_vfmbmem(sc, vf);
328 ixgbe_toggle_txdctl(&sc->hw, IXGBE_VF_INDEX(vf->pool));
330 vf->api_ver = IXGBE_API_VER_UNKNOWN;
335 ixgbe_vf_enable_transmit(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
342 vf_index = IXGBE_VF_INDEX(vf->pool);
344 vfte |= IXGBE_VF_BIT(vf->pool);
350 ixgbe_vf_enable_receive(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
357 vf_index = IXGBE_VF_INDEX(vf->pool);
359 if (ixgbe_vf_frame_size_compatible(sc, vf))
360 vfre |= IXGBE_VF_BIT(vf->pool);
362 vfre &= ~IXGBE_VF_BIT(vf->pool);
368 ixgbe_vf_reset_msg(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
376 ixgbe_process_vf_reset(sc, vf);
378 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
379 ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr,
380 vf->pool, TRUE);
385 ixgbe_vf_enable_transmit(sc, vf);
386 ixgbe_vf_enable_receive(sc, vf);
388 vf->flags |= IXGBE_VF_CTS;
391 bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
393 hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
398 ixgbe_vf_set_mac(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
404 /* Check that the VF has permission to change the MAC address. */
405 if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
406 ixgbe_send_vf_nack(sc, vf, msg[0]);
411 ixgbe_send_vf_nack(sc, vf, msg[0]);
415 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
417 ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr, vf->pool,
420 ixgbe_send_vf_ack(sc, vf, msg[0]);
425 * VF multicast addresses are set by using the appropriate bit in
429 ixgbe_vf_set_mc_addr(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 *msg)
438 vmolr = IXGBE_READ_REG(&sc->hw, IXGBE_VMOLR(vf->pool));
440 vf->num_mc_hashes = entries;
444 vf->mc_hash[i] = list[i];
445 vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
446 vec_bit = vf->mc_hash[i] & 0x1F;
453 IXGBE_WRITE_REG(&sc->hw, IXGBE_VMOLR(vf->pool), vmolr);
454 ixgbe_send_vf_ack(sc, vf, msg[0]);
459 ixgbe_vf_set_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
469 if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
470 ixgbe_send_vf_nack(sc, vf, msg[0]);
476 ixgbe_send_vf_nack(sc, vf, msg[0]);
480 ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
481 ixgbe_send_vf_ack(sc, vf, msg[0]);
486 ixgbe_vf_set_lpe(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
496 ixgbe_send_vf_ack(sc, vf, msg[0]);
504 ixgbe_send_vf_ack(sc, vf, msg[0]);
508 vf->max_frame_size = vf_max_size;
509 ixgbe_update_max_frame(sc, vf->max_frame_size);
512 * We might have to disable reception to this VF if the frame size is
515 ixgbe_vf_enable_receive(sc, vf);
526 ixgbe_send_vf_ack(sc, vf, msg[0]);
531 ixgbe_vf_set_macvlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
535 ixgbe_send_vf_nack(sc, vf, msg[0]);
540 ixgbe_vf_api_negotiate(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
547 vf->api_ver = msg[1];
548 ixgbe_send_vf_ack(sc, vf, msg[0]);
551 vf->api_ver = IXGBE_API_VER_UNKNOWN;
552 ixgbe_send_vf_nack(sc, vf, msg[0]);
559 ixgbe_vf_get_queues(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
571 ixgbe_send_vf_nack(sc, vf, msg[0]);
581 resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
584 hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
589 ixgbe_process_vf_msg(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
597 error = hw->mbx.ops[vf->pool].read(hw, msg, IXGBE_VFMAILBOX_SIZE,
598 vf->pool);
604 sc->ifp->if_xname, msg[0], vf->pool);
606 ixgbe_vf_reset_msg(sc, vf, msg);
610 if (!(vf->flags & IXGBE_VF_CTS)) {
611 ixgbe_send_vf_nack(sc, vf, msg[0]);
617 ixgbe_vf_set_mac(sc, vf, msg);
620 ixgbe_vf_set_mc_addr(sc, vf, msg);
623 ixgbe_vf_set_vlan(sc, vf, msg);
626 ixgbe_vf_set_lpe(sc, vf, msg);
629 ixgbe_vf_set_macvlan(sc, vf, msg);
632 ixgbe_vf_api_negotiate(sc, vf, msg);
635 ixgbe_vf_get_queues(sc, vf, msg);
638 ixgbe_send_vf_nack(sc, vf, msg[0]);
643 /* Tasklet for handling VF -> PF mailbox messages */
649 struct ixgbe_vf *vf;
657 vf = &sc->vfs[i];
659 if ((vf
662 if (hw->mbx.ops[vf->pool].check_for_rst(hw, vf->pool) == 0)
663 ixgbe_process_vf_reset(sc, vf);
665 if (hw->mbx.ops[vf->pool].check_for_msg(hw, vf->pool) == 0)
666 ixgbe_process_vf_msg(sc, vf);
668 if (hw->mbx.ops[vf->pool].check_for_ack(hw, vf->pool) == 0)
669 ixgbe_process_vf_ack(sc, vf);
690 * thus we go into "64 VF mode" if 32+ VFs are requested.
691 * With 64 VFs, you can only have two queues per VF.
692 * With 32 VFs, you can have up to four queues per VF.
773 ixgbe_init_vf(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
782 if (!(vf->flags & IXGBE_VF_ACTIVE))
785 vf_index = IXGBE_VF_INDEX(vf->pool);
787 pfmbimr |= IXGBE_VF_BIT(vf->pool);
790 ixgbe_vf_set_default_vlan(sc, vf, vf->vlan_tag);
794 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
795 ixgbe_set_rar(&sc->hw, vf->rar_index,
796 vf->ether_addr, vf->pool, TRUE);
799 ixgbe_vf_enable_transmit(sc, vf);
800 ixgbe_vf_enable_receive(sc, vf);
802 ixgbe_send_vf_msg(&sc->hw, vf, IXGBE_PF_CONTROL_MSG);
867 /* Check the max frame setting of all active VF's */
871 struct ixgbe_vf *vf;
876 vf = &sc->vfs[i];
877 if (vf->flags & IXGBE_VF_ACTIVE)
878 ixgbe_update_max_frame(sc, vf->max_frame_size);
886 struct ixgbe_vf *vf;
891 KASSERT(vfnum < sc->num_vfs, ("VF index %d is out of range %d",
895 vf = &sc->vfs[vfnum];
896 vf->pool= vfnum;
898 /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
899 vf->rar_index = vfnum + 1;
900 vf->default_vlan = 0;
901 vf->max_frame_size = ETHER_MAX_LEN;
902 ixgbe_update_max_frame(sc, vf->max_frame_size);
906 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
908 vf->flags |= IXGBE_VF_CAP_MAC;
912 * we must allow the VF to choose one.
914 vf->flags |= IXGBE_VF_CAP_MAC;
916 vf->flags |= IXGBE_VF_ACTIVE;
918 ixgbe_init_vf(sc, vf);