if_sriov.c revision 1.1.4.7 1 1.1.4.7 martin /* $NetBSD: if_sriov.c,v 1.1.4.7 2022/01/30 16:06:35 martin Exp $ */
2 1.1.4.2 snj /******************************************************************************
3 1.1.4.2 snj
4 1.1.4.2 snj Copyright (c) 2001-2017, Intel Corporation
5 1.1.4.2 snj All rights reserved.
6 1.1.4.2 snj
7 1.1.4.2 snj Redistribution and use in source and binary forms, with or without
8 1.1.4.2 snj modification, are permitted provided that the following conditions are met:
9 1.1.4.2 snj
10 1.1.4.2 snj 1. Redistributions of source code must retain the above copyright notice,
11 1.1.4.2 snj this list of conditions and the following disclaimer.
12 1.1.4.2 snj
13 1.1.4.2 snj 2. Redistributions in binary form must reproduce the above copyright
14 1.1.4.2 snj notice, this list of conditions and the following disclaimer in the
15 1.1.4.2 snj documentation and/or other materials provided with the distribution.
16 1.1.4.2 snj
17 1.1.4.2 snj 3. Neither the name of the Intel Corporation nor the names of its
18 1.1.4.2 snj contributors may be used to endorse or promote products derived from
19 1.1.4.2 snj this software without specific prior written permission.
20 1.1.4.2 snj
21 1.1.4.2 snj THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 1.1.4.2 snj AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 1.1.4.2 snj IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 1.1.4.2 snj ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 1.1.4.2 snj LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 1.1.4.2 snj CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 1.1.4.2 snj SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 1.1.4.2 snj INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 1.1.4.2 snj CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 1.1.4.2 snj ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 1.1.4.2 snj POSSIBILITY OF SUCH DAMAGE.
32 1.1.4.2 snj
33 1.1.4.2 snj ******************************************************************************/
34 1.1.4.4 martin /*$FreeBSD: head/sys/dev/ixgbe/if_sriov.c 327031 2017-12-20 18:15:06Z erj $*/
35 1.1.4.2 snj
36 1.1.4.6 martin #include <sys/cdefs.h>
37 1.1.4.7 martin __KERNEL_RCSID(0, "$NetBSD: if_sriov.c,v 1.1.4.7 2022/01/30 16:06:35 martin Exp $");
38 1.1.4.6 martin
39 1.1.4.2 snj #include "ixgbe.h"
40 1.1.4.3 martin #include "ixgbe_sriov.h"
41 1.1.4.2 snj
42 1.1.4.2 snj #ifdef PCI_IOV
43 1.1.4.2 snj
44 1.1.4.2 snj MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
45 1.1.4.2 snj
46 1.1.4.2 snj /************************************************************************
47 1.1.4.2 snj * ixgbe_pci_iov_detach
48 1.1.4.2 snj ************************************************************************/
49 1.1.4.2 snj int
50 1.1.4.2 snj ixgbe_pci_iov_detach(device_t dev)
51 1.1.4.2 snj {
52 1.1.4.2 snj return pci_iov_detach(dev);
53 1.1.4.2 snj }
54 1.1.4.2 snj
55 1.1.4.2 snj /************************************************************************
56 1.1.4.2 snj * ixgbe_define_iov_schemas
57 1.1.4.2 snj ************************************************************************/
58 1.1.4.2 snj void
59 1.1.4.2 snj ixgbe_define_iov_schemas(device_t dev, int *error)
60 1.1.4.2 snj {
61 1.1.4.2 snj nvlist_t *pf_schema, *vf_schema;
62 1.1.4.2 snj
63 1.1.4.2 snj pf_schema = pci_iov_schema_alloc_node();
64 1.1.4.2 snj vf_schema = pci_iov_schema_alloc_node();
65 1.1.4.2 snj pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
66 1.1.4.2 snj pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
67 1.1.4.2 snj IOV_SCHEMA_HASDEFAULT, TRUE);
68 1.1.4.2 snj pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
69 1.1.4.2 snj IOV_SCHEMA_HASDEFAULT, FALSE);
70 1.1.4.2 snj pci_iov_schema_add_bool(vf_schema, "allow-promisc",
71 1.1.4.2 snj IOV_SCHEMA_HASDEFAULT, FALSE);
72 1.1.4.2 snj *error = pci_iov_attach(dev, pf_schema, vf_schema);
73 1.1.4.2 snj if (*error != 0) {
74 1.1.4.2 snj device_printf(dev,
75 1.1.4.2 snj "Error %d setting up SR-IOV\n", *error);
76 1.1.4.2 snj }
77 1.1.4.2 snj } /* ixgbe_define_iov_schemas */
78 1.1.4.2 snj
79 1.1.4.2 snj /************************************************************************
80 1.1.4.2 snj * ixgbe_align_all_queue_indices
81 1.1.4.2 snj ************************************************************************/
82 1.1.4.2 snj inline void
83 1.1.4.2 snj ixgbe_align_all_queue_indices(struct adapter *adapter)
84 1.1.4.2 snj {
85 1.1.4.2 snj int i;
86 1.1.4.2 snj int index;
87 1.1.4.2 snj
88 1.1.4.2 snj for (i = 0; i < adapter->num_queues; i++) {
89 1.1.4.2 snj index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
90 1.1.4.2 snj adapter->rx_rings[i].me = index;
91 1.1.4.2 snj adapter->tx_rings[i].me = index;
92 1.1.4.2 snj }
93 1.1.4.2 snj }
94 1.1.4.2 snj
95 1.1.4.2 snj /* Support functions for SR-IOV/VF management */
96 1.1.4.2 snj static inline void
97 1.1.4.7 martin ixgbe_send_vf_msg(struct ixgbe_hw *hw, struct ixgbe_vf *vf, u32 msg)
98 1.1.4.2 snj {
99 1.1.4.2 snj if (vf->flags & IXGBE_VF_CTS)
100 1.1.4.2 snj msg |= IXGBE_VT_MSGTYPE_CTS;
101 1.1.4.2 snj
102 1.1.4.7 martin hw->mbx.ops.write(hw, &msg, 1, vf->pool);
103 1.1.4.2 snj }
104 1.1.4.2 snj
105 1.1.4.2 snj static inline void
106 1.1.4.2 snj ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
107 1.1.4.2 snj {
108 1.1.4.2 snj msg &= IXGBE_VT_MSG_MASK;
109 1.1.4.7 martin ixgbe_send_vf_msg(&adapter->hw, vf, msg | IXGBE_VT_MSGTYPE_SUCCESS);
110 1.1.4.2 snj }
111 1.1.4.2 snj
112 1.1.4.2 snj static inline void
113 1.1.4.2 snj ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
114 1.1.4.2 snj {
115 1.1.4.2 snj msg &= IXGBE_VT_MSG_MASK;
116 1.1.4.7 martin ixgbe_send_vf_msg(&adapter->hw, vf, msg | IXGBE_VT_MSGTYPE_FAILURE);
117 1.1.4.2 snj }
118 1.1.4.2 snj
119 1.1.4.2 snj static inline void
120 1.1.4.2 snj ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
121 1.1.4.2 snj {
122 1.1.4.2 snj if (!(vf->flags & IXGBE_VF_CTS))
123 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, 0);
124 1.1.4.2 snj }
125 1.1.4.2 snj
126 1.1.4.2 snj static inline boolean_t
127 1.1.4.2 snj ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
128 1.1.4.2 snj {
129 1.1.4.2 snj return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
130 1.1.4.2 snj }
131 1.1.4.2 snj
132 1.1.4.2 snj static inline int
133 1.1.4.2 snj ixgbe_vf_queues(int mode)
134 1.1.4.2 snj {
135 1.1.4.2 snj switch (mode) {
136 1.1.4.2 snj case IXGBE_64_VM:
137 1.1.4.2 snj return (2);
138 1.1.4.2 snj case IXGBE_32_VM:
139 1.1.4.2 snj return (4);
140 1.1.4.2 snj case IXGBE_NO_VM:
141 1.1.4.2 snj default:
142 1.1.4.2 snj return (0);
143 1.1.4.2 snj }
144 1.1.4.2 snj }
145 1.1.4.2 snj
146 1.1.4.2 snj inline int
147 1.1.4.2 snj ixgbe_vf_que_index(int mode, int vfnum, int num)
148 1.1.4.2 snj {
149 1.1.4.2 snj return ((vfnum * ixgbe_vf_queues(mode)) + num);
150 1.1.4.2 snj }
151 1.1.4.2 snj
152 1.1.4.2 snj static inline void
153 1.1.4.2 snj ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
154 1.1.4.2 snj {
155 1.1.4.2 snj if (adapter->max_frame_size < max_frame)
156 1.1.4.2 snj adapter->max_frame_size = max_frame;
157 1.1.4.2 snj }
158 1.1.4.2 snj
159 1.1.4.2 snj inline u32
160 1.1.4.2 snj ixgbe_get_mrqc(int iov_mode)
161 1.1.4.2 snj {
162 1.1.4.2 snj u32 mrqc;
163 1.1.4.2 snj
164 1.1.4.2 snj switch (iov_mode) {
165 1.1.4.2 snj case IXGBE_64_VM:
166 1.1.4.2 snj mrqc = IXGBE_MRQC_VMDQRSS64EN;
167 1.1.4.2 snj break;
168 1.1.4.2 snj case IXGBE_32_VM:
169 1.1.4.2 snj mrqc = IXGBE_MRQC_VMDQRSS32EN;
170 1.1.4.2 snj break;
171 1.1.4.2 snj case IXGBE_NO_VM:
172 1.1.4.2 snj mrqc = 0;
173 1.1.4.2 snj break;
174 1.1.4.2 snj default:
175 1.1.4.2 snj panic("Unexpected SR-IOV mode %d", iov_mode);
176 1.1.4.2 snj }
177 1.1.4.2 snj
178 1.1.4.2 snj return mrqc;
179 1.1.4.2 snj }
180 1.1.4.2 snj
181 1.1.4.2 snj
182 1.1.4.2 snj inline u32
183 1.1.4.2 snj ixgbe_get_mtqc(int iov_mode)
184 1.1.4.2 snj {
185 1.1.4.2 snj uint32_t mtqc;
186 1.1.4.2 snj
187 1.1.4.2 snj switch (iov_mode) {
188 1.1.4.2 snj case IXGBE_64_VM:
189 1.1.4.2 snj mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
190 1.1.4.2 snj break;
191 1.1.4.2 snj case IXGBE_32_VM:
192 1.1.4.2 snj mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
193 1.1.4.2 snj break;
194 1.1.4.2 snj case IXGBE_NO_VM:
195 1.1.4.2 snj mtqc = IXGBE_MTQC_64Q_1PB;
196 1.1.4.2 snj break;
197 1.1.4.2 snj default:
198 1.1.4.2 snj panic("Unexpected SR-IOV mode %d", iov_mode);
199 1.1.4.2 snj }
200 1.1.4.2 snj
201 1.1.4.2 snj return mtqc;
202 1.1.4.2 snj }
203 1.1.4.2 snj
204 1.1.4.2 snj void
205 1.1.4.2 snj ixgbe_ping_all_vfs(struct adapter *adapter)
206 1.1.4.2 snj {
207 1.1.4.2 snj struct ixgbe_vf *vf;
208 1.1.4.2 snj
209 1.1.4.2 snj for (int i = 0; i < adapter->num_vfs; i++) {
210 1.1.4.2 snj vf = &adapter->vfs[i];
211 1.1.4.2 snj if (vf->flags & IXGBE_VF_ACTIVE)
212 1.1.4.7 martin ixgbe_send_vf_msg(&adapter->hw, vf, IXGBE_PF_CONTROL_MSG);
213 1.1.4.2 snj }
214 1.1.4.2 snj } /* ixgbe_ping_all_vfs */
215 1.1.4.2 snj
216 1.1.4.2 snj
217 1.1.4.2 snj static void
218 1.1.4.2 snj ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
219 1.1.4.2 snj uint16_t tag)
220 1.1.4.2 snj {
221 1.1.4.2 snj struct ixgbe_hw *hw;
222 1.1.4.2 snj uint32_t vmolr, vmvir;
223 1.1.4.2 snj
224 1.1.4.2 snj hw = &adapter->hw;
225 1.1.4.2 snj
226 1.1.4.2 snj vf->vlan_tag = tag;
227 1.1.4.2 snj
228 1.1.4.2 snj vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
229 1.1.4.2 snj
230 1.1.4.2 snj /* Do not receive packets that pass inexact filters. */
231 1.1.4.2 snj vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
232 1.1.4.2 snj
233 1.1.4.2 snj /* Disable Multicast Promicuous Mode. */
234 1.1.4.2 snj vmolr &= ~IXGBE_VMOLR_MPE;
235 1.1.4.2 snj
236 1.1.4.2 snj /* Accept broadcasts. */
237 1.1.4.2 snj vmolr |= IXGBE_VMOLR_BAM;
238 1.1.4.2 snj
239 1.1.4.2 snj if (tag == 0) {
240 1.1.4.2 snj /* Accept non-vlan tagged traffic. */
241 1.1.4.3 martin vmolr |= IXGBE_VMOLR_AUPE;
242 1.1.4.2 snj
243 1.1.4.2 snj /* Allow VM to tag outgoing traffic; no default tag. */
244 1.1.4.2 snj vmvir = 0;
245 1.1.4.2 snj } else {
246 1.1.4.2 snj /* Require vlan-tagged traffic. */
247 1.1.4.2 snj vmolr &= ~IXGBE_VMOLR_AUPE;
248 1.1.4.2 snj
249 1.1.4.2 snj /* Tag all traffic with provided vlan tag. */
250 1.1.4.2 snj vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
251 1.1.4.2 snj }
252 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
253 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
254 1.1.4.2 snj } /* ixgbe_vf_set_default_vlan */
255 1.1.4.2 snj
256 1.1.4.2 snj
257 1.1.4.5 martin static void
258 1.1.4.7 martin ixgbe_clear_vfmbmem(struct adapter *adapter, struct ixgbe_vf *vf)
259 1.1.4.5 martin {
260 1.1.4.7 martin struct ixgbe_hw *hw = &adapter->hw;
261 1.1.4.5 martin uint32_t vf_index = IXGBE_VF_INDEX(vf->pool);
262 1.1.4.5 martin uint16_t mbx_size = hw->mbx.size;
263 1.1.4.5 martin uint16_t i;
264 1.1.4.5 martin
265 1.1.4.5 martin IXGBE_CORE_LOCK_ASSERT(adapter);
266 1.1.4.5 martin
267 1.1.4.5 martin for (i = 0; i < mbx_size; ++i)
268 1.1.4.5 martin IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_index), i, 0x0);
269 1.1.4.5 martin } /* ixgbe_clear_vfmbmem */
270 1.1.4.5 martin
271 1.1.4.5 martin
272 1.1.4.2 snj static boolean_t
273 1.1.4.2 snj ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
274 1.1.4.2 snj {
275 1.1.4.2 snj
276 1.1.4.2 snj /*
277 1.1.4.2 snj * Frame size compatibility between PF and VF is only a problem on
278 1.1.4.2 snj * 82599-based cards. X540 and later support any combination of jumbo
279 1.1.4.2 snj * frames on PFs and VFs.
280 1.1.4.2 snj */
281 1.1.4.2 snj if (adapter->hw.mac.type != ixgbe_mac_82599EB)
282 1.1.4.2 snj return (TRUE);
283 1.1.4.2 snj
284 1.1.4.2 snj switch (vf->api_ver) {
285 1.1.4.2 snj case IXGBE_API_VER_1_0:
286 1.1.4.2 snj case IXGBE_API_VER_UNKNOWN:
287 1.1.4.2 snj /*
288 1.1.4.2 snj * On legacy (1.0 and older) VF versions, we don't support jumbo
289 1.1.4.2 snj * frames on either the PF or the VF.
290 1.1.4.2 snj */
291 1.1.4.2 snj if (adapter->max_frame_size > ETHER_MAX_LEN ||
292 1.1.4.2 snj vf->max_frame_size > ETHER_MAX_LEN)
293 1.1.4.2 snj return (FALSE);
294 1.1.4.2 snj
295 1.1.4.2 snj return (TRUE);
296 1.1.4.2 snj
297 1.1.4.2 snj break;
298 1.1.4.2 snj case IXGBE_API_VER_1_1:
299 1.1.4.2 snj default:
300 1.1.4.2 snj /*
301 1.1.4.2 snj * 1.1 or later VF versions always work if they aren't using
302 1.1.4.2 snj * jumbo frames.
303 1.1.4.2 snj */
304 1.1.4.2 snj if (vf->max_frame_size <= ETHER_MAX_LEN)
305 1.1.4.2 snj return (TRUE);
306 1.1.4.2 snj
307 1.1.4.2 snj /*
308 1.1.4.2 snj * Jumbo frames only work with VFs if the PF is also using jumbo
309 1.1.4.2 snj * frames.
310 1.1.4.2 snj */
311 1.1.4.2 snj if (adapter->max_frame_size <= ETHER_MAX_LEN)
312 1.1.4.2 snj return (TRUE);
313 1.1.4.2 snj
314 1.1.4.2 snj return (FALSE);
315 1.1.4.2 snj }
316 1.1.4.2 snj } /* ixgbe_vf_frame_size_compatible */
317 1.1.4.2 snj
318 1.1.4.2 snj
319 1.1.4.2 snj static void
320 1.1.4.2 snj ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
321 1.1.4.2 snj {
322 1.1.4.2 snj ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
323 1.1.4.2 snj
324 1.1.4.2 snj // XXX clear multicast addresses
325 1.1.4.2 snj
326 1.1.4.2 snj ixgbe_clear_rar(&adapter->hw, vf->rar_index);
327 1.1.4.7 martin ixgbe_clear_vfmbmem(adapter, vf);
328 1.1.4.5 martin ixgbe_toggle_txdctl(&adapter->hw, IXGBE_VF_INDEX(vf->pool));
329 1.1.4.2 snj
330 1.1.4.2 snj vf->api_ver = IXGBE_API_VER_UNKNOWN;
331 1.1.4.2 snj } /* ixgbe_process_vf_reset */
332 1.1.4.2 snj
333 1.1.4.2 snj
334 1.1.4.2 snj static void
335 1.1.4.2 snj ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
336 1.1.4.2 snj {
337 1.1.4.2 snj struct ixgbe_hw *hw;
338 1.1.4.2 snj uint32_t vf_index, vfte;
339 1.1.4.2 snj
340 1.1.4.2 snj hw = &adapter->hw;
341 1.1.4.2 snj
342 1.1.4.2 snj vf_index = IXGBE_VF_INDEX(vf->pool);
343 1.1.4.2 snj vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
344 1.1.4.2 snj vfte |= IXGBE_VF_BIT(vf->pool);
345 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
346 1.1.4.2 snj } /* ixgbe_vf_enable_transmit */
347 1.1.4.2 snj
348 1.1.4.2 snj
349 1.1.4.2 snj static void
350 1.1.4.2 snj ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
351 1.1.4.2 snj {
352 1.1.4.2 snj struct ixgbe_hw *hw;
353 1.1.4.2 snj uint32_t vf_index, vfre;
354 1.1.4.2 snj
355 1.1.4.2 snj hw = &adapter->hw;
356 1.1.4.2 snj
357 1.1.4.2 snj vf_index = IXGBE_VF_INDEX(vf->pool);
358 1.1.4.2 snj vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
359 1.1.4.2 snj if (ixgbe_vf_frame_size_compatible(adapter, vf))
360 1.1.4.2 snj vfre |= IXGBE_VF_BIT(vf->pool);
361 1.1.4.2 snj else
362 1.1.4.2 snj vfre &= ~IXGBE_VF_BIT(vf->pool);
363 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
364 1.1.4.2 snj } /* ixgbe_vf_enable_receive */
365 1.1.4.2 snj
366 1.1.4.2 snj
367 1.1.4.2 snj static void
368 1.1.4.2 snj ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
369 1.1.4.2 snj {
370 1.1.4.2 snj struct ixgbe_hw *hw;
371 1.1.4.2 snj uint32_t ack;
372 1.1.4.2 snj uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
373 1.1.4.2 snj
374 1.1.4.2 snj hw = &adapter->hw;
375 1.1.4.2 snj
376 1.1.4.2 snj ixgbe_process_vf_reset(adapter, vf);
377 1.1.4.2 snj
378 1.1.4.2 snj if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
379 1.1.4.2 snj ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
380 1.1.4.2 snj vf->pool, TRUE);
381 1.1.4.7 martin ack = IXGBE_VT_MSGTYPE_SUCCESS;
382 1.1.4.2 snj } else
383 1.1.4.7 martin ack = IXGBE_VT_MSGTYPE_FAILURE;
384 1.1.4.2 snj
385 1.1.4.2 snj ixgbe_vf_enable_transmit(adapter, vf);
386 1.1.4.2 snj ixgbe_vf_enable_receive(adapter, vf);
387 1.1.4.2 snj
388 1.1.4.2 snj vf->flags |= IXGBE_VF_CTS;
389 1.1.4.2 snj
390 1.1.4.3 martin resp[0] = IXGBE_VF_RESET | ack;
391 1.1.4.2 snj bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
392 1.1.4.2 snj resp[3] = hw->mac.mc_filter_type;
393 1.1.4.2 snj hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
394 1.1.4.2 snj } /* ixgbe_vf_reset_msg */
395 1.1.4.2 snj
396 1.1.4.2 snj
397 1.1.4.2 snj static void
398 1.1.4.2 snj ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
399 1.1.4.2 snj {
400 1.1.4.2 snj uint8_t *mac;
401 1.1.4.2 snj
402 1.1.4.2 snj mac = (uint8_t*)&msg[1];
403 1.1.4.2 snj
404 1.1.4.2 snj /* Check that the VF has permission to change the MAC address. */
405 1.1.4.2 snj if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
406 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
407 1.1.4.2 snj return;
408 1.1.4.2 snj }
409 1.1.4.2 snj
410 1.1.4.2 snj if (ixgbe_validate_mac_addr(mac) != 0) {
411 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
412 1.1.4.2 snj return;
413 1.1.4.2 snj }
414 1.1.4.2 snj
415 1.1.4.2 snj bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
416 1.1.4.2 snj
417 1.1.4.2 snj ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool,
418 1.1.4.2 snj TRUE);
419 1.1.4.2 snj
420 1.1.4.2 snj ixgbe_send_vf_ack(adapter, vf, msg[0]);
421 1.1.4.2 snj } /* ixgbe_vf_set_mac */
422 1.1.4.2 snj
423 1.1.4.2 snj
424 1.1.4.2 snj /*
425 1.1.4.2 snj * VF multicast addresses are set by using the appropriate bit in
426 1.1.4.2 snj * 1 of 128 32 bit addresses (4096 possible).
427 1.1.4.2 snj */
428 1.1.4.2 snj static void
429 1.1.4.2 snj ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
430 1.1.4.2 snj {
431 1.1.4.2 snj u16 *list = (u16*)&msg[1];
432 1.1.4.2 snj int entries;
433 1.1.4.2 snj u32 vmolr, vec_bit, vec_reg, mta_reg;
434 1.1.4.2 snj
435 1.1.4.2 snj entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
436 1.1.4.2 snj entries = min(entries, IXGBE_MAX_VF_MC);
437 1.1.4.2 snj
438 1.1.4.2 snj vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
439 1.1.4.2 snj
440 1.1.4.2 snj vf->num_mc_hashes = entries;
441 1.1.4.2 snj
442 1.1.4.2 snj /* Set the appropriate MTA bit */
443 1.1.4.2 snj for (int i = 0; i < entries; i++) {
444 1.1.4.2 snj vf->mc_hash[i] = list[i];
445 1.1.4.2 snj vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
446 1.1.4.2 snj vec_bit = vf->mc_hash[i] & 0x1F;
447 1.1.4.2 snj mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
448 1.1.4.2 snj mta_reg |= (1 << vec_bit);
449 1.1.4.2 snj IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
450 1.1.4.2 snj }
451 1.1.4.2 snj
452 1.1.4.2 snj vmolr |= IXGBE_VMOLR_ROMPE;
453 1.1.4.2 snj IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
454 1.1.4.2 snj ixgbe_send_vf_ack(adapter, vf, msg[0]);
455 1.1.4.2 snj } /* ixgbe_vf_set_mc_addr */
456 1.1.4.2 snj
457 1.1.4.2 snj
458 1.1.4.2 snj static void
459 1.1.4.2 snj ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
460 1.1.4.2 snj {
461 1.1.4.2 snj struct ixgbe_hw *hw;
462 1.1.4.2 snj int enable;
463 1.1.4.2 snj uint16_t tag;
464 1.1.4.2 snj
465 1.1.4.2 snj hw = &adapter->hw;
466 1.1.4.2 snj enable = IXGBE_VT_MSGINFO(msg[0]);
467 1.1.4.2 snj tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
468 1.1.4.2 snj
469 1.1.4.2 snj if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
470 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
471 1.1.4.2 snj return;
472 1.1.4.2 snj }
473 1.1.4.2 snj
474 1.1.4.2 snj /* It is illegal to enable vlan tag 0. */
475 1.1.4.3 martin if (tag == 0 && enable != 0) {
476 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
477 1.1.4.2 snj return;
478 1.1.4.2 snj }
479 1.1.4.2 snj
480 1.1.4.2 snj ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
481 1.1.4.2 snj ixgbe_send_vf_ack(adapter, vf, msg[0]);
482 1.1.4.2 snj } /* ixgbe_vf_set_vlan */
483 1.1.4.2 snj
484 1.1.4.2 snj
485 1.1.4.2 snj static void
486 1.1.4.2 snj ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
487 1.1.4.2 snj {
488 1.1.4.2 snj struct ixgbe_hw *hw;
489 1.1.4.2 snj uint32_t vf_max_size, pf_max_size, mhadd;
490 1.1.4.2 snj
491 1.1.4.2 snj hw = &adapter->hw;
492 1.1.4.2 snj vf_max_size = msg[1];
493 1.1.4.2 snj
494 1.1.4.2 snj if (vf_max_size < ETHER_CRC_LEN) {
495 1.1.4.2 snj /* We intentionally ACK invalid LPE requests. */
496 1.1.4.2 snj ixgbe_send_vf_ack(adapter, vf, msg[0]);
497 1.1.4.2 snj return;
498 1.1.4.2 snj }
499 1.1.4.2 snj
500 1.1.4.2 snj vf_max_size -= ETHER_CRC_LEN;
501 1.1.4.2 snj
502 1.1.4.2 snj if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
503 1.1.4.2 snj /* We intentionally ACK invalid LPE requests. */
504 1.1.4.2 snj ixgbe_send_vf_ack(adapter, vf, msg[0]);
505 1.1.4.2 snj return;
506 1.1.4.2 snj }
507 1.1.4.2 snj
508 1.1.4.2 snj vf->max_frame_size = vf_max_size;
509 1.1.4.2 snj ixgbe_update_max_frame(adapter, vf->max_frame_size);
510 1.1.4.2 snj
511 1.1.4.2 snj /*
512 1.1.4.2 snj * We might have to disable reception to this VF if the frame size is
513 1.1.4.2 snj * not compatible with the config on the PF.
514 1.1.4.2 snj */
515 1.1.4.2 snj ixgbe_vf_enable_receive(adapter, vf);
516 1.1.4.2 snj
517 1.1.4.2 snj mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
518 1.1.4.2 snj pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
519 1.1.4.2 snj
520 1.1.4.2 snj if (pf_max_size < adapter->max_frame_size) {
521 1.1.4.2 snj mhadd &= ~IXGBE_MHADD_MFS_MASK;
522 1.1.4.2 snj mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
523 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
524 1.1.4.2 snj }
525 1.1.4.2 snj
526 1.1.4.2 snj ixgbe_send_vf_ack(adapter, vf, msg[0]);
527 1.1.4.2 snj } /* ixgbe_vf_set_lpe */
528 1.1.4.2 snj
529 1.1.4.2 snj
530 1.1.4.2 snj static void
531 1.1.4.2 snj ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
532 1.1.4.2 snj uint32_t *msg)
533 1.1.4.2 snj {
534 1.1.4.2 snj //XXX implement this
535 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
536 1.1.4.2 snj } /* ixgbe_vf_set_macvlan */
537 1.1.4.2 snj
538 1.1.4.2 snj
539 1.1.4.2 snj static void
540 1.1.4.2 snj ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
541 1.1.4.2 snj uint32_t *msg)
542 1.1.4.2 snj {
543 1.1.4.2 snj
544 1.1.4.2 snj switch (msg[1]) {
545 1.1.4.2 snj case IXGBE_API_VER_1_0:
546 1.1.4.2 snj case IXGBE_API_VER_1_1:
547 1.1.4.2 snj vf->api_ver = msg[1];
548 1.1.4.2 snj ixgbe_send_vf_ack(adapter, vf, msg[0]);
549 1.1.4.2 snj break;
550 1.1.4.2 snj default:
551 1.1.4.2 snj vf->api_ver = IXGBE_API_VER_UNKNOWN;
552 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
553 1.1.4.2 snj break;
554 1.1.4.2 snj }
555 1.1.4.2 snj } /* ixgbe_vf_api_negotiate */
556 1.1.4.2 snj
557 1.1.4.2 snj
558 1.1.4.2 snj static void
559 1.1.4.2 snj ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
560 1.1.4.2 snj {
561 1.1.4.2 snj struct ixgbe_hw *hw;
562 1.1.4.2 snj uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
563 1.1.4.2 snj int num_queues;
564 1.1.4.2 snj
565 1.1.4.2 snj hw = &adapter->hw;
566 1.1.4.2 snj
567 1.1.4.2 snj /* GET_QUEUES is not supported on pre-1.1 APIs. */
568 1.1.4.2 snj switch (msg[0]) {
569 1.1.4.2 snj case IXGBE_API_VER_1_0:
570 1.1.4.2 snj case IXGBE_API_VER_UNKNOWN:
571 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
572 1.1.4.2 snj return;
573 1.1.4.2 snj }
574 1.1.4.2 snj
575 1.1.4.7 martin resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_SUCCESS |
576 1.1.4.2 snj IXGBE_VT_MSGTYPE_CTS;
577 1.1.4.2 snj
578 1.1.4.2 snj num_queues = ixgbe_vf_queues(adapter->iov_mode);
579 1.1.4.2 snj resp[IXGBE_VF_TX_QUEUES] = num_queues;
580 1.1.4.2 snj resp[IXGBE_VF_RX_QUEUES] = num_queues;
581 1.1.4.2 snj resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
582 1.1.4.2 snj resp[IXGBE_VF_DEF_QUEUE] = 0;
583 1.1.4.2 snj
584 1.1.4.2 snj hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
585 1.1.4.2 snj } /* ixgbe_vf_get_queues */
586 1.1.4.2 snj
587 1.1.4.2 snj
588 1.1.4.2 snj static void
589 1.1.4.2 snj ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
590 1.1.4.2 snj {
591 1.1.4.2 snj struct ixgbe_hw *hw;
592 1.1.4.2 snj uint32_t msg[IXGBE_VFMAILBOX_SIZE];
593 1.1.4.2 snj int error;
594 1.1.4.2 snj
595 1.1.4.2 snj hw = &adapter->hw;
596 1.1.4.2 snj
597 1.1.4.2 snj error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
598 1.1.4.2 snj
599 1.1.4.2 snj if (error != 0)
600 1.1.4.2 snj return;
601 1.1.4.2 snj
602 1.1.4.3 martin CTR3(KTR_MALLOC, "%s: received msg %x from %d", adapter->ifp->if_xname,
603 1.1.4.3 martin msg[0], vf->pool);
604 1.1.4.2 snj if (msg[0] == IXGBE_VF_RESET) {
605 1.1.4.2 snj ixgbe_vf_reset_msg(adapter, vf, msg);
606 1.1.4.2 snj return;
607 1.1.4.2 snj }
608 1.1.4.2 snj
609 1.1.4.2 snj if (!(vf->flags & IXGBE_VF_CTS)) {
610 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
611 1.1.4.2 snj return;
612 1.1.4.2 snj }
613 1.1.4.2 snj
614 1.1.4.2 snj switch (msg[0] & IXGBE_VT_MSG_MASK) {
615 1.1.4.2 snj case IXGBE_VF_SET_MAC_ADDR:
616 1.1.4.2 snj ixgbe_vf_set_mac(adapter, vf, msg);
617 1.1.4.2 snj break;
618 1.1.4.2 snj case IXGBE_VF_SET_MULTICAST:
619 1.1.4.2 snj ixgbe_vf_set_mc_addr(adapter, vf, msg);
620 1.1.4.2 snj break;
621 1.1.4.2 snj case IXGBE_VF_SET_VLAN:
622 1.1.4.2 snj ixgbe_vf_set_vlan(adapter, vf, msg);
623 1.1.4.2 snj break;
624 1.1.4.2 snj case IXGBE_VF_SET_LPE:
625 1.1.4.2 snj ixgbe_vf_set_lpe(adapter, vf, msg);
626 1.1.4.2 snj break;
627 1.1.4.2 snj case IXGBE_VF_SET_MACVLAN:
628 1.1.4.2 snj ixgbe_vf_set_macvlan(adapter, vf, msg);
629 1.1.4.2 snj break;
630 1.1.4.2 snj case IXGBE_VF_API_NEGOTIATE:
631 1.1.4.2 snj ixgbe_vf_api_negotiate(adapter, vf, msg);
632 1.1.4.2 snj break;
633 1.1.4.2 snj case IXGBE_VF_GET_QUEUES:
634 1.1.4.2 snj ixgbe_vf_get_queues(adapter, vf, msg);
635 1.1.4.2 snj break;
636 1.1.4.2 snj default:
637 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
638 1.1.4.2 snj }
639 1.1.4.2 snj } /* ixgbe_process_vf_msg */
640 1.1.4.2 snj
641 1.1.4.2 snj
642 1.1.4.2 snj /* Tasklet for handling VF -> PF mailbox messages */
643 1.1.4.2 snj void
644 1.1.4.7 martin ixgbe_handle_mbx(void *context)
645 1.1.4.2 snj {
646 1.1.4.3 martin struct adapter *adapter = context;
647 1.1.4.2 snj struct ixgbe_hw *hw;
648 1.1.4.2 snj struct ixgbe_vf *vf;
649 1.1.4.2 snj int i;
650 1.1.4.2 snj
651 1.1.4.2 snj hw = &adapter->hw;
652 1.1.4.2 snj
653 1.1.4.2 snj IXGBE_CORE_LOCK(adapter);
654 1.1.4.2 snj for (i = 0; i < adapter->num_vfs; i++) {
655 1.1.4.2 snj vf = &adapter->vfs[i];
656 1.1.4.2 snj
657 1.1.4.2 snj if (vf->flags & IXGBE_VF_ACTIVE) {
658 1.1.4.2 snj if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0)
659 1.1.4.2 snj ixgbe_process_vf_reset(adapter, vf);
660 1.1.4.2 snj
661 1.1.4.2 snj if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
662 1.1.4.2 snj ixgbe_process_vf_msg(adapter, vf);
663 1.1.4.2 snj
664 1.1.4.2 snj if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
665 1.1.4.2 snj ixgbe_process_vf_ack(adapter, vf);
666 1.1.4.2 snj }
667 1.1.4.2 snj }
668 1.1.4.2 snj IXGBE_CORE_UNLOCK(adapter);
669 1.1.4.2 snj } /* ixgbe_handle_mbx */
670 1.1.4.2 snj
671 1.1.4.2 snj int
672 1.1.4.2 snj ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
673 1.1.4.2 snj {
674 1.1.4.2 snj struct adapter *adapter;
675 1.1.4.2 snj int retval = 0;
676 1.1.4.2 snj
677 1.1.4.2 snj adapter = device_get_softc(dev);
678 1.1.4.2 snj adapter->iov_mode = IXGBE_NO_VM;
679 1.1.4.2 snj
680 1.1.4.2 snj if (num_vfs == 0) {
681 1.1.4.2 snj /* Would we ever get num_vfs = 0? */
682 1.1.4.2 snj retval = EINVAL;
683 1.1.4.2 snj goto err_init_iov;
684 1.1.4.2 snj }
685 1.1.4.2 snj
686 1.1.4.2 snj /*
687 1.1.4.2 snj * We've got to reserve a VM's worth of queues for the PF,
688 1.1.4.2 snj * thus we go into "64 VF mode" if 32+ VFs are requested.
689 1.1.4.2 snj * With 64 VFs, you can only have two queues per VF.
690 1.1.4.2 snj * With 32 VFs, you can have up to four queues per VF.
691 1.1.4.2 snj */
692 1.1.4.2 snj if (num_vfs >= IXGBE_32_VM)
693 1.1.4.2 snj adapter->iov_mode = IXGBE_64_VM;
694 1.1.4.2 snj else
695 1.1.4.2 snj adapter->iov_mode = IXGBE_32_VM;
696 1.1.4.2 snj
697 1.1.4.2 snj /* Again, reserving 1 VM's worth of queues for the PF */
698 1.1.4.2 snj adapter->pool = adapter->iov_mode - 1;
699 1.1.4.2 snj
700 1.1.4.2 snj if ((num_vfs > adapter->pool) || (num_vfs >= IXGBE_64_VM)) {
701 1.1.4.2 snj retval = ENOSPC;
702 1.1.4.2 snj goto err_init_iov;
703 1.1.4.2 snj }
704 1.1.4.2 snj
705 1.1.4.2 snj IXGBE_CORE_LOCK(adapter);
706 1.1.4.2 snj
707 1.1.4.2 snj adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV,
708 1.1.4.2 snj M_NOWAIT | M_ZERO);
709 1.1.4.2 snj
710 1.1.4.2 snj if (adapter->vfs == NULL) {
711 1.1.4.2 snj retval = ENOMEM;
712 1.1.4.2 snj IXGBE_CORE_UNLOCK(adapter);
713 1.1.4.2 snj goto err_init_iov;
714 1.1.4.2 snj }
715 1.1.4.2 snj
716 1.1.4.2 snj adapter->num_vfs = num_vfs;
717 1.1.4.7 martin ixgbe_init_mbx_params_pf(&adapter->hw);
718 1.1.4.3 martin
719 1.1.4.3 martin /* set the SRIOV flag now as it's needed
720 1.1.4.3 martin * by ixgbe_init_locked() */
721 1.1.4.2 snj adapter->feat_en |= IXGBE_FEATURE_SRIOV;
722 1.1.4.3 martin adapter->init_locked(adapter);
723 1.1.4.2 snj
724 1.1.4.2 snj IXGBE_CORE_UNLOCK(adapter);
725 1.1.4.2 snj
726 1.1.4.3 martin return (retval);
727 1.1.4.2 snj
728 1.1.4.2 snj err_init_iov:
729 1.1.4.2 snj adapter->num_vfs = 0;
730 1.1.4.2 snj adapter->pool = 0;
731 1.1.4.2 snj adapter->iov_mode = IXGBE_NO_VM;
732 1.1.4.2 snj
733 1.1.4.3 martin return (retval);
734 1.1.4.2 snj } /* ixgbe_init_iov */
735 1.1.4.2 snj
736 1.1.4.2 snj void
737 1.1.4.2 snj ixgbe_uninit_iov(device_t dev)
738 1.1.4.2 snj {
739 1.1.4.2 snj struct ixgbe_hw *hw;
740 1.1.4.2 snj struct adapter *adapter;
741 1.1.4.2 snj uint32_t pf_reg, vf_reg;
742 1.1.4.2 snj
743 1.1.4.2 snj adapter = device_get_softc(dev);
744 1.1.4.2 snj hw = &adapter->hw;
745 1.1.4.2 snj
746 1.1.4.2 snj IXGBE_CORE_LOCK(adapter);
747 1.1.4.2 snj
748 1.1.4.2 snj /* Enable rx/tx for the PF and disable it for all VFs. */
749 1.1.4.2 snj pf_reg = IXGBE_VF_INDEX(adapter->pool);
750 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool));
751 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool));
752 1.1.4.2 snj
753 1.1.4.2 snj if (pf_reg == 0)
754 1.1.4.2 snj vf_reg = 1;
755 1.1.4.2 snj else
756 1.1.4.2 snj vf_reg = 0;
757 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
758 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
759 1.1.4.2 snj
760 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
761 1.1.4.2 snj
762 1.1.4.2 snj free(adapter->vfs, M_IXGBE_SRIOV);
763 1.1.4.2 snj adapter->vfs = NULL;
764 1.1.4.2 snj adapter->num_vfs = 0;
765 1.1.4.2 snj adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
766 1.1.4.2 snj
767 1.1.4.2 snj IXGBE_CORE_UNLOCK(adapter);
768 1.1.4.2 snj } /* ixgbe_uninit_iov */
769 1.1.4.2 snj
770 1.1.4.2 snj static void
771 1.1.4.2 snj ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
772 1.1.4.2 snj {
773 1.1.4.2 snj struct ixgbe_hw *hw;
774 1.1.4.2 snj uint32_t vf_index, pfmbimr;
775 1.1.4.2 snj
776 1.1.4.2 snj IXGBE_CORE_LOCK_ASSERT(adapter);
777 1.1.4.2 snj
778 1.1.4.2 snj hw = &adapter->hw;
779 1.1.4.2 snj
780 1.1.4.2 snj if (!(vf->flags & IXGBE_VF_ACTIVE))
781 1.1.4.2 snj return;
782 1.1.4.2 snj
783 1.1.4.2 snj vf_index = IXGBE_VF_INDEX(vf->pool);
784 1.1.4.2 snj pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
785 1.1.4.2 snj pfmbimr |= IXGBE_VF_BIT(vf->pool);
786 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
787 1.1.4.2 snj
788 1.1.4.2 snj ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
789 1.1.4.2 snj
790 1.1.4.2 snj // XXX multicast addresses
791 1.1.4.2 snj
792 1.1.4.2 snj if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
793 1.1.4.2 snj ixgbe_set_rar(&adapter->hw, vf->rar_index,
794 1.1.4.2 snj vf->ether_addr, vf->pool, TRUE);
795 1.1.4.2 snj }
796 1.1.4.2 snj
797 1.1.4.2 snj ixgbe_vf_enable_transmit(adapter, vf);
798 1.1.4.2 snj ixgbe_vf_enable_receive(adapter, vf);
799 1.1.4.2 snj
800 1.1.4.7 martin ixgbe_send_vf_msg(&adapter->hw, vf, IXGBE_PF_CONTROL_MSG);
801 1.1.4.2 snj } /* ixgbe_init_vf */
802 1.1.4.2 snj
803 1.1.4.2 snj void
804 1.1.4.2 snj ixgbe_initialize_iov(struct adapter *adapter)
805 1.1.4.2 snj {
806 1.1.4.2 snj struct ixgbe_hw *hw = &adapter->hw;
807 1.1.4.2 snj uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
808 1.1.4.2 snj int i;
809 1.1.4.2 snj
810 1.1.4.2 snj if (adapter->iov_mode == IXGBE_NO_VM)
811 1.1.4.2 snj return;
812 1.1.4.2 snj
813 1.1.4.2 snj IXGBE_CORE_LOCK_ASSERT(adapter);
814 1.1.4.2 snj
815 1.1.4.2 snj /* RMW appropriate registers based on IOV mode */
816 1.1.4.2 snj /* Read... */
817 1.1.4.2 snj mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
818 1.1.4.2 snj gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
819 1.1.4.2 snj gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
820 1.1.4.2 snj /* Modify... */
821 1.1.4.2 snj mrqc &= ~IXGBE_MRQC_MRQE_MASK;
822 1.1.4.2 snj mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */
823 1.1.4.2 snj gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
824 1.1.4.2 snj gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
825 1.1.4.2 snj gpie &= ~IXGBE_GPIE_VTMODE_MASK;
826 1.1.4.2 snj switch (adapter->iov_mode) {
827 1.1.4.2 snj case IXGBE_64_VM:
828 1.1.4.2 snj mrqc |= IXGBE_MRQC_VMDQRSS64EN;
829 1.1.4.2 snj mtqc |= IXGBE_MTQC_64VF;
830 1.1.4.2 snj gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
831 1.1.4.2 snj gpie |= IXGBE_GPIE_VTMODE_64;
832 1.1.4.2 snj break;
833 1.1.4.2 snj case IXGBE_32_VM:
834 1.1.4.2 snj mrqc |= IXGBE_MRQC_VMDQRSS32EN;
835 1.1.4.2 snj mtqc |= IXGBE_MTQC_32VF;
836 1.1.4.2 snj gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
837 1.1.4.2 snj gpie |= IXGBE_GPIE_VTMODE_32;
838 1.1.4.2 snj break;
839 1.1.4.2 snj default:
840 1.1.4.2 snj panic("Unexpected SR-IOV mode %d", adapter->iov_mode);
841 1.1.4.2 snj }
842 1.1.4.2 snj /* Write... */
843 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
844 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
845 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
846 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
847 1.1.4.2 snj
848 1.1.4.2 snj /* Enable rx/tx for the PF. */
849 1.1.4.2 snj vf_reg = IXGBE_VF_INDEX(adapter->pool);
850 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool));
851 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool));
852 1.1.4.2 snj
853 1.1.4.2 snj /* Allow VM-to-VM communication. */
854 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
855 1.1.4.2 snj
856 1.1.4.2 snj vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
857 1.1.4.2 snj vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
858 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
859 1.1.4.2 snj
860 1.1.4.2 snj for (i = 0; i < adapter->num_vfs; i++)
861 1.1.4.2 snj ixgbe_init_vf(adapter, &adapter->vfs[i]);
862 1.1.4.2 snj } /* ixgbe_initialize_iov */
863 1.1.4.2 snj
864 1.1.4.2 snj
865 1.1.4.2 snj /* Check the max frame setting of all active VF's */
866 1.1.4.2 snj void
867 1.1.4.2 snj ixgbe_recalculate_max_frame(struct adapter *adapter)
868 1.1.4.2 snj {
869 1.1.4.2 snj struct ixgbe_vf *vf;
870 1.1.4.2 snj
871 1.1.4.2 snj IXGBE_CORE_LOCK_ASSERT(adapter);
872 1.1.4.2 snj
873 1.1.4.2 snj for (int i = 0; i < adapter->num_vfs; i++) {
874 1.1.4.2 snj vf = &adapter->vfs[i];
875 1.1.4.2 snj if (vf->flags & IXGBE_VF_ACTIVE)
876 1.1.4.2 snj ixgbe_update_max_frame(adapter, vf->max_frame_size);
877 1.1.4.2 snj }
878 1.1.4.2 snj } /* ixgbe_recalculate_max_frame */
879 1.1.4.2 snj
880 1.1.4.2 snj int
881 1.1.4.2 snj ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
882 1.1.4.2 snj {
883 1.1.4.2 snj struct adapter *adapter;
884 1.1.4.2 snj struct ixgbe_vf *vf;
885 1.1.4.2 snj const void *mac;
886 1.1.4.2 snj
887 1.1.4.2 snj adapter = device_get_softc(dev);
888 1.1.4.2 snj
889 1.1.4.2 snj KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
890 1.1.4.2 snj vfnum, adapter->num_vfs));
891 1.1.4.2 snj
892 1.1.4.2 snj IXGBE_CORE_LOCK(adapter);
893 1.1.4.2 snj vf = &adapter->vfs[vfnum];
894 1.1.4.2 snj vf->pool= vfnum;
895 1.1.4.2 snj
896 1.1.4.2 snj /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
897 1.1.4.2 snj vf->rar_index = vfnum + 1;
898 1.1.4.2 snj vf->default_vlan = 0;
899 1.1.4.2 snj vf->max_frame_size = ETHER_MAX_LEN;
900 1.1.4.2 snj ixgbe_update_max_frame(adapter, vf->max_frame_size);
901 1.1.4.2 snj
902 1.1.4.2 snj if (nvlist_exists_binary(config, "mac-addr")) {
903 1.1.4.2 snj mac = nvlist_get_binary(config, "mac-addr", NULL);
904 1.1.4.2 snj bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
905 1.1.4.2 snj if (nvlist_get_bool(config, "allow-set-mac"))
906 1.1.4.2 snj vf->flags |= IXGBE_VF_CAP_MAC;
907 1.1.4.2 snj } else
908 1.1.4.2 snj /*
909 1.1.4.2 snj * If the administrator has not specified a MAC address then
910 1.1.4.2 snj * we must allow the VF to choose one.
911 1.1.4.2 snj */
912 1.1.4.2 snj vf->flags |= IXGBE_VF_CAP_MAC;
913 1.1.4.2 snj
914 1.1.4.2 snj vf->flags |= IXGBE_VF_ACTIVE;
915 1.1.4.2 snj
916 1.1.4.2 snj ixgbe_init_vf(adapter, vf);
917 1.1.4.2 snj IXGBE_CORE_UNLOCK(adapter);
918 1.1.4.2 snj
919 1.1.4.2 snj return (0);
920 1.1.4.2 snj } /* ixgbe_add_vf */
921 1.1.4.2 snj
922 1.1.4.2 snj #else
923 1.1.4.2 snj
924 1.1.4.2 snj void
925 1.1.4.7 martin ixgbe_handle_mbx(void *context)
926 1.1.4.2 snj {
927 1.1.4.7 martin UNREFERENCED_1PARAMETER(context);
928 1.1.4.2 snj } /* ixgbe_handle_mbx */
929 1.1.4.2 snj
930 1.1.4.2 snj inline int
931 1.1.4.2 snj ixgbe_vf_que_index(int mode, int vfnum, int num)
932 1.1.4.2 snj {
933 1.1.4.2 snj UNREFERENCED_2PARAMETER(mode, vfnum);
934 1.1.4.2 snj
935 1.1.4.2 snj return num;
936 1.1.4.2 snj } /* ixgbe_vf_que_index */
937 1.1.4.2 snj
938 1.1.4.2 snj #endif
939