if_sriov.c revision 1.9 1 1.1 msaitoh /******************************************************************************
2 1.1 msaitoh
3 1.1 msaitoh Copyright (c) 2001-2017, Intel Corporation
4 1.1 msaitoh All rights reserved.
5 1.1 msaitoh
6 1.1 msaitoh Redistribution and use in source and binary forms, with or without
7 1.1 msaitoh modification, are permitted provided that the following conditions are met:
8 1.1 msaitoh
9 1.1 msaitoh 1. Redistributions of source code must retain the above copyright notice,
10 1.1 msaitoh this list of conditions and the following disclaimer.
11 1.1 msaitoh
12 1.1 msaitoh 2. Redistributions in binary form must reproduce the above copyright
13 1.1 msaitoh notice, this list of conditions and the following disclaimer in the
14 1.1 msaitoh documentation and/or other materials provided with the distribution.
15 1.1 msaitoh
16 1.1 msaitoh 3. Neither the name of the Intel Corporation nor the names of its
17 1.1 msaitoh contributors may be used to endorse or promote products derived from
18 1.1 msaitoh this software without specific prior written permission.
19 1.1 msaitoh
20 1.1 msaitoh THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 1.1 msaitoh AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 1.1 msaitoh IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 1.1 msaitoh ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 1.1 msaitoh LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 msaitoh CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 msaitoh SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 msaitoh INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 msaitoh CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 msaitoh ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 msaitoh POSSIBILITY OF SUCH DAMAGE.
31 1.1 msaitoh
32 1.1 msaitoh ******************************************************************************/
33 1.3 msaitoh /*$FreeBSD: head/sys/dev/ixgbe/if_sriov.c 327031 2017-12-20 18:15:06Z erj $*/
34 1.1 msaitoh
35 1.1 msaitoh #include "ixgbe.h"
36 1.2 msaitoh #include "ixgbe_sriov.h"
37 1.1 msaitoh
38 1.1 msaitoh #ifdef PCI_IOV
39 1.1 msaitoh
40 1.1 msaitoh MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
41 1.1 msaitoh
42 1.1 msaitoh /************************************************************************
43 1.1 msaitoh * ixgbe_pci_iov_detach
44 1.1 msaitoh ************************************************************************/
45 1.1 msaitoh int
46 1.1 msaitoh ixgbe_pci_iov_detach(device_t dev)
47 1.1 msaitoh {
48 1.1 msaitoh return pci_iov_detach(dev);
49 1.1 msaitoh }
50 1.1 msaitoh
51 1.1 msaitoh /************************************************************************
52 1.1 msaitoh * ixgbe_define_iov_schemas
53 1.1 msaitoh ************************************************************************/
54 1.1 msaitoh void
55 1.1 msaitoh ixgbe_define_iov_schemas(device_t dev, int *error)
56 1.1 msaitoh {
57 1.1 msaitoh nvlist_t *pf_schema, *vf_schema;
58 1.1 msaitoh
59 1.1 msaitoh pf_schema = pci_iov_schema_alloc_node();
60 1.1 msaitoh vf_schema = pci_iov_schema_alloc_node();
61 1.1 msaitoh pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
62 1.1 msaitoh pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
63 1.1 msaitoh IOV_SCHEMA_HASDEFAULT, TRUE);
64 1.1 msaitoh pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
65 1.1 msaitoh IOV_SCHEMA_HASDEFAULT, FALSE);
66 1.1 msaitoh pci_iov_schema_add_bool(vf_schema, "allow-promisc",
67 1.1 msaitoh IOV_SCHEMA_HASDEFAULT, FALSE);
68 1.1 msaitoh *error = pci_iov_attach(dev, pf_schema, vf_schema);
69 1.1 msaitoh if (*error != 0) {
70 1.1 msaitoh device_printf(dev,
71 1.1 msaitoh "Error %d setting up SR-IOV\n", *error);
72 1.1 msaitoh }
73 1.1 msaitoh } /* ixgbe_define_iov_schemas */
74 1.1 msaitoh
75 1.1 msaitoh /************************************************************************
76 1.1 msaitoh * ixgbe_align_all_queue_indices
77 1.1 msaitoh ************************************************************************/
78 1.1 msaitoh inline void
79 1.1 msaitoh ixgbe_align_all_queue_indices(struct adapter *adapter)
80 1.1 msaitoh {
81 1.1 msaitoh int i;
82 1.1 msaitoh int index;
83 1.1 msaitoh
84 1.1 msaitoh for (i = 0; i < adapter->num_queues; i++) {
85 1.1 msaitoh index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
86 1.1 msaitoh adapter->rx_rings[i].me = index;
87 1.1 msaitoh adapter->tx_rings[i].me = index;
88 1.1 msaitoh }
89 1.1 msaitoh }
90 1.1 msaitoh
91 1.1 msaitoh /* Support functions for SR-IOV/VF management */
92 1.1 msaitoh static inline void
93 1.3 msaitoh ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
94 1.1 msaitoh {
95 1.1 msaitoh if (vf->flags & IXGBE_VF_CTS)
96 1.1 msaitoh msg |= IXGBE_VT_MSGTYPE_CTS;
97 1.1 msaitoh
98 1.3 msaitoh adapter->hw.mbx.ops.write(&adapter->hw, &msg, 1, vf->pool);
99 1.1 msaitoh }
100 1.1 msaitoh
101 1.1 msaitoh static inline void
102 1.1 msaitoh ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
103 1.1 msaitoh {
104 1.1 msaitoh msg &= IXGBE_VT_MSG_MASK;
105 1.3 msaitoh ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
106 1.1 msaitoh }
107 1.1 msaitoh
108 1.1 msaitoh static inline void
109 1.1 msaitoh ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
110 1.1 msaitoh {
111 1.1 msaitoh msg &= IXGBE_VT_MSG_MASK;
112 1.3 msaitoh ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
113 1.1 msaitoh }
114 1.1 msaitoh
115 1.1 msaitoh static inline void
116 1.1 msaitoh ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
117 1.1 msaitoh {
118 1.1 msaitoh if (!(vf->flags & IXGBE_VF_CTS))
119 1.1 msaitoh ixgbe_send_vf_nack(adapter, vf, 0);
120 1.1 msaitoh }
121 1.1 msaitoh
122 1.9 mrg static inline bool
123 1.1 msaitoh ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
124 1.1 msaitoh {
125 1.1 msaitoh return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
126 1.1 msaitoh }
127 1.1 msaitoh
128 1.1 msaitoh static inline int
129 1.1 msaitoh ixgbe_vf_queues(int mode)
130 1.1 msaitoh {
131 1.1 msaitoh switch (mode) {
132 1.1 msaitoh case IXGBE_64_VM:
133 1.1 msaitoh return (2);
134 1.1 msaitoh case IXGBE_32_VM:
135 1.1 msaitoh return (4);
136 1.1 msaitoh case IXGBE_NO_VM:
137 1.1 msaitoh default:
138 1.1 msaitoh return (0);
139 1.1 msaitoh }
140 1.1 msaitoh }
141 1.1 msaitoh
142 1.1 msaitoh inline int
143 1.1 msaitoh ixgbe_vf_que_index(int mode, int vfnum, int num)
144 1.1 msaitoh {
145 1.1 msaitoh return ((vfnum * ixgbe_vf_queues(mode)) + num);
146 1.1 msaitoh }
147 1.1 msaitoh
148 1.1 msaitoh static inline void
149 1.1 msaitoh ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
150 1.1 msaitoh {
151 1.1 msaitoh if (adapter->max_frame_size < max_frame)
152 1.1 msaitoh adapter->max_frame_size = max_frame;
153 1.1 msaitoh }
154 1.1 msaitoh
155 1.1 msaitoh inline u32
156 1.1 msaitoh ixgbe_get_mrqc(int iov_mode)
157 1.1 msaitoh {
158 1.1 msaitoh u32 mrqc;
159 1.1 msaitoh
160 1.1 msaitoh switch (iov_mode) {
161 1.1 msaitoh case IXGBE_64_VM:
162 1.1 msaitoh mrqc = IXGBE_MRQC_VMDQRSS64EN;
163 1.1 msaitoh break;
164 1.1 msaitoh case IXGBE_32_VM:
165 1.1 msaitoh mrqc = IXGBE_MRQC_VMDQRSS32EN;
166 1.1 msaitoh break;
167 1.1 msaitoh case IXGBE_NO_VM:
168 1.1 msaitoh mrqc = 0;
169 1.1 msaitoh break;
170 1.1 msaitoh default:
171 1.1 msaitoh panic("Unexpected SR-IOV mode %d", iov_mode);
172 1.1 msaitoh }
173 1.1 msaitoh
174 1.1 msaitoh return mrqc;
175 1.1 msaitoh }
176 1.1 msaitoh
177 1.1 msaitoh
178 1.1 msaitoh inline u32
179 1.1 msaitoh ixgbe_get_mtqc(int iov_mode)
180 1.1 msaitoh {
181 1.1 msaitoh uint32_t mtqc;
182 1.1 msaitoh
183 1.1 msaitoh switch (iov_mode) {
184 1.1 msaitoh case IXGBE_64_VM:
185 1.1 msaitoh mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
186 1.1 msaitoh break;
187 1.1 msaitoh case IXGBE_32_VM:
188 1.1 msaitoh mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
189 1.1 msaitoh break;
190 1.1 msaitoh case IXGBE_NO_VM:
191 1.1 msaitoh mtqc = IXGBE_MTQC_64Q_1PB;
192 1.1 msaitoh break;
193 1.1 msaitoh default:
194 1.1 msaitoh panic("Unexpected SR-IOV mode %d", iov_mode);
195 1.1 msaitoh }
196 1.1 msaitoh
197 1.1 msaitoh return mtqc;
198 1.1 msaitoh }
199 1.1 msaitoh
200 1.1 msaitoh void
201 1.1 msaitoh ixgbe_ping_all_vfs(struct adapter *adapter)
202 1.1 msaitoh {
203 1.1 msaitoh struct ixgbe_vf *vf;
204 1.1 msaitoh
205 1.1 msaitoh for (int i = 0; i < adapter->num_vfs; i++) {
206 1.1 msaitoh vf = &adapter->vfs[i];
207 1.1 msaitoh if (vf->flags & IXGBE_VF_ACTIVE)
208 1.3 msaitoh ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
209 1.1 msaitoh }
210 1.1 msaitoh } /* ixgbe_ping_all_vfs */
211 1.1 msaitoh
212 1.1 msaitoh
213 1.1 msaitoh static void
214 1.1 msaitoh ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
215 1.1 msaitoh uint16_t tag)
216 1.1 msaitoh {
217 1.1 msaitoh struct ixgbe_hw *hw;
218 1.1 msaitoh uint32_t vmolr, vmvir;
219 1.1 msaitoh
220 1.1 msaitoh hw = &adapter->hw;
221 1.1 msaitoh
222 1.1 msaitoh vf->vlan_tag = tag;
223 1.1 msaitoh
224 1.1 msaitoh vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
225 1.1 msaitoh
226 1.1 msaitoh /* Do not receive packets that pass inexact filters. */
227 1.1 msaitoh vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
228 1.1 msaitoh
229 1.1 msaitoh /* Disable Multicast Promicuous Mode. */
230 1.1 msaitoh vmolr &= ~IXGBE_VMOLR_MPE;
231 1.1 msaitoh
232 1.1 msaitoh /* Accept broadcasts. */
233 1.1 msaitoh vmolr |= IXGBE_VMOLR_BAM;
234 1.1 msaitoh
235 1.1 msaitoh if (tag == 0) {
236 1.1 msaitoh /* Accept non-vlan tagged traffic. */
237 1.2 msaitoh vmolr |= IXGBE_VMOLR_AUPE;
238 1.1 msaitoh
239 1.1 msaitoh /* Allow VM to tag outgoing traffic; no default tag. */
240 1.1 msaitoh vmvir = 0;
241 1.1 msaitoh } else {
242 1.1 msaitoh /* Require vlan-tagged traffic. */
243 1.1 msaitoh vmolr &= ~IXGBE_VMOLR_AUPE;
244 1.1 msaitoh
245 1.1 msaitoh /* Tag all traffic with provided vlan tag. */
246 1.1 msaitoh vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
247 1.1 msaitoh }
248 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
249 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
250 1.1 msaitoh } /* ixgbe_vf_set_default_vlan */
251 1.1 msaitoh
252 1.1 msaitoh
253 1.5 msaitoh static void
254 1.5 msaitoh ixgbe_clear_vfmbmem(struct ixgbe_hw *hw, struct ixgbe_vf *vf)
255 1.5 msaitoh {
256 1.5 msaitoh uint32_t vf_index = IXGBE_VF_INDEX(vf->pool);
257 1.5 msaitoh uint16_t mbx_size = hw->mbx.size;
258 1.5 msaitoh uint16_t i;
259 1.5 msaitoh
260 1.5 msaitoh IXGBE_CORE_LOCK_ASSERT(adapter);
261 1.5 msaitoh
262 1.5 msaitoh for (i = 0; i < mbx_size; ++i)
263 1.5 msaitoh IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_index), i, 0x0);
264 1.5 msaitoh } /* ixgbe_clear_vfmbmem */
265 1.5 msaitoh
266 1.5 msaitoh
267 1.9 mrg static bool
268 1.1 msaitoh ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
269 1.1 msaitoh {
270 1.1 msaitoh
271 1.1 msaitoh /*
272 1.1 msaitoh * Frame size compatibility between PF and VF is only a problem on
273 1.1 msaitoh * 82599-based cards. X540 and later support any combination of jumbo
274 1.1 msaitoh * frames on PFs and VFs.
275 1.1 msaitoh */
276 1.1 msaitoh if (adapter->hw.mac.type != ixgbe_mac_82599EB)
277 1.1 msaitoh return (TRUE);
278 1.1 msaitoh
279 1.1 msaitoh switch (vf->api_ver) {
280 1.1 msaitoh case IXGBE_API_VER_1_0:
281 1.1 msaitoh case IXGBE_API_VER_UNKNOWN:
282 1.1 msaitoh /*
283 1.1 msaitoh * On legacy (1.0 and older) VF versions, we don't support jumbo
284 1.1 msaitoh * frames on either the PF or the VF.
285 1.1 msaitoh */
286 1.1 msaitoh if (adapter->max_frame_size > ETHER_MAX_LEN ||
287 1.1 msaitoh vf->max_frame_size > ETHER_MAX_LEN)
288 1.1 msaitoh return (FALSE);
289 1.1 msaitoh
290 1.1 msaitoh return (TRUE);
291 1.1 msaitoh
292 1.1 msaitoh break;
293 1.1 msaitoh case IXGBE_API_VER_1_1:
294 1.1 msaitoh default:
295 1.1 msaitoh /*
296 1.1 msaitoh * 1.1 or later VF versions always work if they aren't using
297 1.1 msaitoh * jumbo frames.
298 1.1 msaitoh */
299 1.1 msaitoh if (vf->max_frame_size <= ETHER_MAX_LEN)
300 1.1 msaitoh return (TRUE);
301 1.1 msaitoh
302 1.1 msaitoh /*
303 1.1 msaitoh * Jumbo frames only work with VFs if the PF is also using jumbo
304 1.1 msaitoh * frames.
305 1.1 msaitoh */
306 1.1 msaitoh if (adapter->max_frame_size <= ETHER_MAX_LEN)
307 1.1 msaitoh return (TRUE);
308 1.1 msaitoh
309 1.1 msaitoh return (FALSE);
310 1.1 msaitoh }
311 1.1 msaitoh } /* ixgbe_vf_frame_size_compatible */
312 1.1 msaitoh
313 1.1 msaitoh
314 1.1 msaitoh static void
315 1.1 msaitoh ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
316 1.1 msaitoh {
317 1.1 msaitoh ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
318 1.1 msaitoh
319 1.1 msaitoh // XXX clear multicast addresses
320 1.1 msaitoh
321 1.1 msaitoh ixgbe_clear_rar(&adapter->hw, vf->rar_index);
322 1.5 msaitoh ixgbe_clear_vfmbmem(&adapter->hw, vf);
323 1.6 msaitoh ixgbe_toggle_txdctl(&adapter->hw, IXGBE_VF_INDEX(vf->pool));
324 1.1 msaitoh
325 1.1 msaitoh vf->api_ver = IXGBE_API_VER_UNKNOWN;
326 1.1 msaitoh } /* ixgbe_process_vf_reset */
327 1.1 msaitoh
328 1.1 msaitoh
329 1.1 msaitoh static void
330 1.1 msaitoh ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
331 1.1 msaitoh {
332 1.1 msaitoh struct ixgbe_hw *hw;
333 1.1 msaitoh uint32_t vf_index, vfte;
334 1.1 msaitoh
335 1.1 msaitoh hw = &adapter->hw;
336 1.1 msaitoh
337 1.1 msaitoh vf_index = IXGBE_VF_INDEX(vf->pool);
338 1.1 msaitoh vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
339 1.1 msaitoh vfte |= IXGBE_VF_BIT(vf->pool);
340 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
341 1.1 msaitoh } /* ixgbe_vf_enable_transmit */
342 1.1 msaitoh
343 1.1 msaitoh
344 1.1 msaitoh static void
345 1.1 msaitoh ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
346 1.1 msaitoh {
347 1.1 msaitoh struct ixgbe_hw *hw;
348 1.1 msaitoh uint32_t vf_index, vfre;
349 1.1 msaitoh
350 1.1 msaitoh hw = &adapter->hw;
351 1.1 msaitoh
352 1.1 msaitoh vf_index = IXGBE_VF_INDEX(vf->pool);
353 1.1 msaitoh vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
354 1.1 msaitoh if (ixgbe_vf_frame_size_compatible(adapter, vf))
355 1.1 msaitoh vfre |= IXGBE_VF_BIT(vf->pool);
356 1.1 msaitoh else
357 1.1 msaitoh vfre &= ~IXGBE_VF_BIT(vf->pool);
358 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
359 1.1 msaitoh } /* ixgbe_vf_enable_receive */
360 1.1 msaitoh
361 1.1 msaitoh
362 1.1 msaitoh static void
363 1.1 msaitoh ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
364 1.1 msaitoh {
365 1.1 msaitoh struct ixgbe_hw *hw;
366 1.1 msaitoh uint32_t ack;
367 1.1 msaitoh uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
368 1.1 msaitoh
369 1.1 msaitoh hw = &adapter->hw;
370 1.1 msaitoh
371 1.1 msaitoh ixgbe_process_vf_reset(adapter, vf);
372 1.1 msaitoh
373 1.1 msaitoh if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
374 1.1 msaitoh ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
375 1.1 msaitoh vf->pool, TRUE);
376 1.1 msaitoh ack = IXGBE_VT_MSGTYPE_ACK;
377 1.1 msaitoh } else
378 1.1 msaitoh ack = IXGBE_VT_MSGTYPE_NACK;
379 1.1 msaitoh
380 1.1 msaitoh ixgbe_vf_enable_transmit(adapter, vf);
381 1.1 msaitoh ixgbe_vf_enable_receive(adapter, vf);
382 1.1 msaitoh
383 1.1 msaitoh vf->flags |= IXGBE_VF_CTS;
384 1.1 msaitoh
385 1.2 msaitoh resp[0] = IXGBE_VF_RESET | ack;
386 1.1 msaitoh bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
387 1.1 msaitoh resp[3] = hw->mac.mc_filter_type;
388 1.1 msaitoh hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
389 1.1 msaitoh } /* ixgbe_vf_reset_msg */
390 1.1 msaitoh
391 1.1 msaitoh
392 1.1 msaitoh static void
393 1.1 msaitoh ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
394 1.1 msaitoh {
395 1.1 msaitoh uint8_t *mac;
396 1.1 msaitoh
397 1.1 msaitoh mac = (uint8_t*)&msg[1];
398 1.1 msaitoh
399 1.1 msaitoh /* Check that the VF has permission to change the MAC address. */
400 1.1 msaitoh if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
401 1.1 msaitoh ixgbe_send_vf_nack(adapter, vf, msg[0]);
402 1.1 msaitoh return;
403 1.1 msaitoh }
404 1.1 msaitoh
405 1.1 msaitoh if (ixgbe_validate_mac_addr(mac) != 0) {
406 1.1 msaitoh ixgbe_send_vf_nack(adapter, vf, msg[0]);
407 1.1 msaitoh return;
408 1.1 msaitoh }
409 1.1 msaitoh
410 1.1 msaitoh bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
411 1.1 msaitoh
412 1.1 msaitoh ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool,
413 1.1 msaitoh TRUE);
414 1.1 msaitoh
415 1.1 msaitoh ixgbe_send_vf_ack(adapter, vf, msg[0]);
416 1.1 msaitoh } /* ixgbe_vf_set_mac */
417 1.1 msaitoh
418 1.1 msaitoh
419 1.1 msaitoh /*
420 1.1 msaitoh * VF multicast addresses are set by using the appropriate bit in
421 1.1 msaitoh * 1 of 128 32 bit addresses (4096 possible).
422 1.1 msaitoh */
423 1.1 msaitoh static void
424 1.1 msaitoh ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
425 1.1 msaitoh {
426 1.1 msaitoh u16 *list = (u16*)&msg[1];
427 1.1 msaitoh int entries;
428 1.1 msaitoh u32 vmolr, vec_bit, vec_reg, mta_reg;
429 1.1 msaitoh
430 1.1 msaitoh entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
431 1.4 riastrad entries = uimin(entries, IXGBE_MAX_VF_MC);
432 1.1 msaitoh
433 1.1 msaitoh vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
434 1.1 msaitoh
435 1.1 msaitoh vf->num_mc_hashes = entries;
436 1.1 msaitoh
437 1.1 msaitoh /* Set the appropriate MTA bit */
438 1.1 msaitoh for (int i = 0; i < entries; i++) {
439 1.1 msaitoh vf->mc_hash[i] = list[i];
440 1.1 msaitoh vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
441 1.1 msaitoh vec_bit = vf->mc_hash[i] & 0x1F;
442 1.1 msaitoh mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
443 1.1 msaitoh mta_reg |= (1 << vec_bit);
444 1.1 msaitoh IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
445 1.1 msaitoh }
446 1.1 msaitoh
447 1.1 msaitoh vmolr |= IXGBE_VMOLR_ROMPE;
448 1.1 msaitoh IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
449 1.1 msaitoh ixgbe_send_vf_ack(adapter, vf, msg[0]);
450 1.1 msaitoh } /* ixgbe_vf_set_mc_addr */
451 1.1 msaitoh
452 1.1 msaitoh
453 1.1 msaitoh static void
454 1.1 msaitoh ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
455 1.1 msaitoh {
456 1.1 msaitoh struct ixgbe_hw *hw;
457 1.1 msaitoh int enable;
458 1.1 msaitoh uint16_t tag;
459 1.1 msaitoh
460 1.1 msaitoh hw = &adapter->hw;
461 1.1 msaitoh enable = IXGBE_VT_MSGINFO(msg[0]);
462 1.1 msaitoh tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
463 1.1 msaitoh
464 1.1 msaitoh if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
465 1.1 msaitoh ixgbe_send_vf_nack(adapter, vf, msg[0]);
466 1.1 msaitoh return;
467 1.1 msaitoh }
468 1.1 msaitoh
469 1.1 msaitoh /* It is illegal to enable vlan tag 0. */
470 1.2 msaitoh if (tag == 0 && enable != 0) {
471 1.1 msaitoh ixgbe_send_vf_nack(adapter, vf, msg[0]);
472 1.1 msaitoh return;
473 1.1 msaitoh }
474 1.1 msaitoh
475 1.1 msaitoh ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
476 1.1 msaitoh ixgbe_send_vf_ack(adapter, vf, msg[0]);
477 1.1 msaitoh } /* ixgbe_vf_set_vlan */
478 1.1 msaitoh
479 1.1 msaitoh
480 1.1 msaitoh static void
481 1.1 msaitoh ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
482 1.1 msaitoh {
483 1.1 msaitoh struct ixgbe_hw *hw;
484 1.1 msaitoh uint32_t vf_max_size, pf_max_size, mhadd;
485 1.1 msaitoh
486 1.1 msaitoh hw = &adapter->hw;
487 1.1 msaitoh vf_max_size = msg[1];
488 1.1 msaitoh
489 1.1 msaitoh if (vf_max_size < ETHER_CRC_LEN) {
490 1.1 msaitoh /* We intentionally ACK invalid LPE requests. */
491 1.1 msaitoh ixgbe_send_vf_ack(adapter, vf, msg[0]);
492 1.1 msaitoh return;
493 1.1 msaitoh }
494 1.1 msaitoh
495 1.1 msaitoh vf_max_size -= ETHER_CRC_LEN;
496 1.1 msaitoh
497 1.1 msaitoh if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
498 1.1 msaitoh /* We intentionally ACK invalid LPE requests. */
499 1.1 msaitoh ixgbe_send_vf_ack(adapter, vf, msg[0]);
500 1.1 msaitoh return;
501 1.1 msaitoh }
502 1.1 msaitoh
503 1.1 msaitoh vf->max_frame_size = vf_max_size;
504 1.1 msaitoh ixgbe_update_max_frame(adapter, vf->max_frame_size);
505 1.1 msaitoh
506 1.1 msaitoh /*
507 1.1 msaitoh * We might have to disable reception to this VF if the frame size is
508 1.1 msaitoh * not compatible with the config on the PF.
509 1.1 msaitoh */
510 1.1 msaitoh ixgbe_vf_enable_receive(adapter, vf);
511 1.1 msaitoh
512 1.1 msaitoh mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
513 1.1 msaitoh pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
514 1.1 msaitoh
515 1.1 msaitoh if (pf_max_size < adapter->max_frame_size) {
516 1.1 msaitoh mhadd &= ~IXGBE_MHADD_MFS_MASK;
517 1.1 msaitoh mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
518 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
519 1.1 msaitoh }
520 1.1 msaitoh
521 1.1 msaitoh ixgbe_send_vf_ack(adapter, vf, msg[0]);
522 1.1 msaitoh } /* ixgbe_vf_set_lpe */
523 1.1 msaitoh
524 1.1 msaitoh
525 1.1 msaitoh static void
526 1.1 msaitoh ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
527 1.1 msaitoh uint32_t *msg)
528 1.1 msaitoh {
529 1.1 msaitoh //XXX implement this
530 1.1 msaitoh ixgbe_send_vf_nack(adapter, vf, msg[0]);
531 1.1 msaitoh } /* ixgbe_vf_set_macvlan */
532 1.1 msaitoh
533 1.1 msaitoh
534 1.1 msaitoh static void
535 1.1 msaitoh ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
536 1.1 msaitoh uint32_t *msg)
537 1.1 msaitoh {
538 1.1 msaitoh
539 1.1 msaitoh switch (msg[1]) {
540 1.1 msaitoh case IXGBE_API_VER_1_0:
541 1.1 msaitoh case IXGBE_API_VER_1_1:
542 1.1 msaitoh vf->api_ver = msg[1];
543 1.1 msaitoh ixgbe_send_vf_ack(adapter, vf, msg[0]);
544 1.1 msaitoh break;
545 1.1 msaitoh default:
546 1.1 msaitoh vf->api_ver = IXGBE_API_VER_UNKNOWN;
547 1.1 msaitoh ixgbe_send_vf_nack(adapter, vf, msg[0]);
548 1.1 msaitoh break;
549 1.1 msaitoh }
550 1.1 msaitoh } /* ixgbe_vf_api_negotiate */
551 1.1 msaitoh
552 1.1 msaitoh
553 1.1 msaitoh static void
554 1.1 msaitoh ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
555 1.1 msaitoh {
556 1.1 msaitoh struct ixgbe_hw *hw;
557 1.1 msaitoh uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
558 1.1 msaitoh int num_queues;
559 1.1 msaitoh
560 1.1 msaitoh hw = &adapter->hw;
561 1.1 msaitoh
562 1.1 msaitoh /* GET_QUEUES is not supported on pre-1.1 APIs. */
563 1.1 msaitoh switch (msg[0]) {
564 1.1 msaitoh case IXGBE_API_VER_1_0:
565 1.1 msaitoh case IXGBE_API_VER_UNKNOWN:
566 1.1 msaitoh ixgbe_send_vf_nack(adapter, vf, msg[0]);
567 1.1 msaitoh return;
568 1.1 msaitoh }
569 1.1 msaitoh
570 1.1 msaitoh resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
571 1.1 msaitoh IXGBE_VT_MSGTYPE_CTS;
572 1.1 msaitoh
573 1.1 msaitoh num_queues = ixgbe_vf_queues(adapter->iov_mode);
574 1.1 msaitoh resp[IXGBE_VF_TX_QUEUES] = num_queues;
575 1.1 msaitoh resp[IXGBE_VF_RX_QUEUES] = num_queues;
576 1.1 msaitoh resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
577 1.1 msaitoh resp[IXGBE_VF_DEF_QUEUE] = 0;
578 1.1 msaitoh
579 1.1 msaitoh hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
580 1.1 msaitoh } /* ixgbe_vf_get_queues */
581 1.1 msaitoh
582 1.1 msaitoh
583 1.1 msaitoh static void
584 1.1 msaitoh ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
585 1.1 msaitoh {
586 1.1 msaitoh struct ixgbe_hw *hw;
587 1.1 msaitoh uint32_t msg[IXGBE_VFMAILBOX_SIZE];
588 1.1 msaitoh int error;
589 1.1 msaitoh
590 1.1 msaitoh hw = &adapter->hw;
591 1.1 msaitoh
592 1.1 msaitoh error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
593 1.1 msaitoh
594 1.1 msaitoh if (error != 0)
595 1.1 msaitoh return;
596 1.1 msaitoh
597 1.2 msaitoh CTR3(KTR_MALLOC, "%s: received msg %x from %d", adapter->ifp->if_xname,
598 1.2 msaitoh msg[0], vf->pool);
599 1.1 msaitoh if (msg[0] == IXGBE_VF_RESET) {
600 1.1 msaitoh ixgbe_vf_reset_msg(adapter, vf, msg);
601 1.1 msaitoh return;
602 1.1 msaitoh }
603 1.1 msaitoh
604 1.1 msaitoh if (!(vf->flags & IXGBE_VF_CTS)) {
605 1.1 msaitoh ixgbe_send_vf_nack(adapter, vf, msg[0]);
606 1.1 msaitoh return;
607 1.1 msaitoh }
608 1.1 msaitoh
609 1.1 msaitoh switch (msg[0] & IXGBE_VT_MSG_MASK) {
610 1.1 msaitoh case IXGBE_VF_SET_MAC_ADDR:
611 1.1 msaitoh ixgbe_vf_set_mac(adapter, vf, msg);
612 1.1 msaitoh break;
613 1.1 msaitoh case IXGBE_VF_SET_MULTICAST:
614 1.1 msaitoh ixgbe_vf_set_mc_addr(adapter, vf, msg);
615 1.1 msaitoh break;
616 1.1 msaitoh case IXGBE_VF_SET_VLAN:
617 1.1 msaitoh ixgbe_vf_set_vlan(adapter, vf, msg);
618 1.1 msaitoh break;
619 1.1 msaitoh case IXGBE_VF_SET_LPE:
620 1.1 msaitoh ixgbe_vf_set_lpe(adapter, vf, msg);
621 1.1 msaitoh break;
622 1.1 msaitoh case IXGBE_VF_SET_MACVLAN:
623 1.1 msaitoh ixgbe_vf_set_macvlan(adapter, vf, msg);
624 1.1 msaitoh break;
625 1.1 msaitoh case IXGBE_VF_API_NEGOTIATE:
626 1.1 msaitoh ixgbe_vf_api_negotiate(adapter, vf, msg);
627 1.1 msaitoh break;
628 1.1 msaitoh case IXGBE_VF_GET_QUEUES:
629 1.1 msaitoh ixgbe_vf_get_queues(adapter, vf, msg);
630 1.1 msaitoh break;
631 1.1 msaitoh default:
632 1.1 msaitoh ixgbe_send_vf_nack(adapter, vf, msg[0]);
633 1.1 msaitoh }
634 1.1 msaitoh } /* ixgbe_process_vf_msg */
635 1.1 msaitoh
636 1.1 msaitoh
637 1.1 msaitoh /* Tasklet for handling VF -> PF mailbox messages */
638 1.1 msaitoh void
639 1.1 msaitoh ixgbe_handle_mbx(void *context, int pending)
640 1.1 msaitoh {
641 1.2 msaitoh struct adapter *adapter = context;
642 1.1 msaitoh struct ixgbe_hw *hw;
643 1.1 msaitoh struct ixgbe_vf *vf;
644 1.1 msaitoh int i;
645 1.1 msaitoh
646 1.8 msaitoh KASSERT(mutex_owned(&adapter->core_mtx));
647 1.8 msaitoh
648 1.1 msaitoh hw = &adapter->hw;
649 1.1 msaitoh
650 1.1 msaitoh for (i = 0; i < adapter->num_vfs; i++) {
651 1.1 msaitoh vf = &adapter->vfs[i];
652 1.1 msaitoh
653 1.1 msaitoh if (vf->flags & IXGBE_VF_ACTIVE) {
654 1.1 msaitoh if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0)
655 1.1 msaitoh ixgbe_process_vf_reset(adapter, vf);
656 1.1 msaitoh
657 1.1 msaitoh if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
658 1.1 msaitoh ixgbe_process_vf_msg(adapter, vf);
659 1.1 msaitoh
660 1.1 msaitoh if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
661 1.1 msaitoh ixgbe_process_vf_ack(adapter, vf);
662 1.1 msaitoh }
663 1.1 msaitoh }
664 1.1 msaitoh } /* ixgbe_handle_mbx */
665 1.1 msaitoh
666 1.1 msaitoh int
667 1.1 msaitoh ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
668 1.1 msaitoh {
669 1.1 msaitoh struct adapter *adapter;
670 1.1 msaitoh int retval = 0;
671 1.1 msaitoh
672 1.1 msaitoh adapter = device_get_softc(dev);
673 1.1 msaitoh adapter->iov_mode = IXGBE_NO_VM;
674 1.1 msaitoh
675 1.1 msaitoh if (num_vfs == 0) {
676 1.1 msaitoh /* Would we ever get num_vfs = 0? */
677 1.1 msaitoh retval = EINVAL;
678 1.1 msaitoh goto err_init_iov;
679 1.1 msaitoh }
680 1.1 msaitoh
681 1.1 msaitoh /*
682 1.1 msaitoh * We've got to reserve a VM's worth of queues for the PF,
683 1.1 msaitoh * thus we go into "64 VF mode" if 32+ VFs are requested.
684 1.1 msaitoh * With 64 VFs, you can only have two queues per VF.
685 1.1 msaitoh * With 32 VFs, you can have up to four queues per VF.
686 1.1 msaitoh */
687 1.1 msaitoh if (num_vfs >= IXGBE_32_VM)
688 1.1 msaitoh adapter->iov_mode = IXGBE_64_VM;
689 1.1 msaitoh else
690 1.1 msaitoh adapter->iov_mode = IXGBE_32_VM;
691 1.1 msaitoh
692 1.1 msaitoh /* Again, reserving 1 VM's worth of queues for the PF */
693 1.1 msaitoh adapter->pool = adapter->iov_mode - 1;
694 1.1 msaitoh
695 1.1 msaitoh if ((num_vfs > adapter->pool) || (num_vfs >= IXGBE_64_VM)) {
696 1.1 msaitoh retval = ENOSPC;
697 1.1 msaitoh goto err_init_iov;
698 1.1 msaitoh }
699 1.1 msaitoh
700 1.1 msaitoh IXGBE_CORE_LOCK(adapter);
701 1.1 msaitoh
702 1.1 msaitoh adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV,
703 1.1 msaitoh M_NOWAIT | M_ZERO);
704 1.1 msaitoh
705 1.1 msaitoh if (adapter->vfs == NULL) {
706 1.1 msaitoh retval = ENOMEM;
707 1.1 msaitoh IXGBE_CORE_UNLOCK(adapter);
708 1.1 msaitoh goto err_init_iov;
709 1.1 msaitoh }
710 1.1 msaitoh
711 1.1 msaitoh adapter->num_vfs = num_vfs;
712 1.2 msaitoh
713 1.2 msaitoh /* set the SRIOV flag now as it's needed
714 1.2 msaitoh * by ixgbe_init_locked() */
715 1.2 msaitoh adapter->feat_en |= IXGBE_FEATURE_SRIOV;
716 1.1 msaitoh adapter->init_locked(adapter);
717 1.1 msaitoh
718 1.1 msaitoh IXGBE_CORE_UNLOCK(adapter);
719 1.1 msaitoh
720 1.2 msaitoh return (retval);
721 1.1 msaitoh
722 1.1 msaitoh err_init_iov:
723 1.1 msaitoh adapter->num_vfs = 0;
724 1.1 msaitoh adapter->pool = 0;
725 1.1 msaitoh adapter->iov_mode = IXGBE_NO_VM;
726 1.1 msaitoh
727 1.2 msaitoh return (retval);
728 1.1 msaitoh } /* ixgbe_init_iov */
729 1.1 msaitoh
730 1.1 msaitoh void
731 1.1 msaitoh ixgbe_uninit_iov(device_t dev)
732 1.1 msaitoh {
733 1.1 msaitoh struct ixgbe_hw *hw;
734 1.1 msaitoh struct adapter *adapter;
735 1.1 msaitoh uint32_t pf_reg, vf_reg;
736 1.1 msaitoh
737 1.1 msaitoh adapter = device_get_softc(dev);
738 1.1 msaitoh hw = &adapter->hw;
739 1.1 msaitoh
740 1.1 msaitoh IXGBE_CORE_LOCK(adapter);
741 1.1 msaitoh
742 1.1 msaitoh /* Enable rx/tx for the PF and disable it for all VFs. */
743 1.1 msaitoh pf_reg = IXGBE_VF_INDEX(adapter->pool);
744 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool));
745 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool));
746 1.1 msaitoh
747 1.1 msaitoh if (pf_reg == 0)
748 1.1 msaitoh vf_reg = 1;
749 1.1 msaitoh else
750 1.1 msaitoh vf_reg = 0;
751 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
752 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
753 1.1 msaitoh
754 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
755 1.1 msaitoh
756 1.1 msaitoh free(adapter->vfs, M_IXGBE_SRIOV);
757 1.1 msaitoh adapter->vfs = NULL;
758 1.1 msaitoh adapter->num_vfs = 0;
759 1.1 msaitoh adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
760 1.1 msaitoh
761 1.1 msaitoh IXGBE_CORE_UNLOCK(adapter);
762 1.1 msaitoh } /* ixgbe_uninit_iov */
763 1.1 msaitoh
764 1.1 msaitoh static void
765 1.1 msaitoh ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
766 1.1 msaitoh {
767 1.1 msaitoh struct ixgbe_hw *hw;
768 1.1 msaitoh uint32_t vf_index, pfmbimr;
769 1.1 msaitoh
770 1.1 msaitoh IXGBE_CORE_LOCK_ASSERT(adapter);
771 1.1 msaitoh
772 1.1 msaitoh hw = &adapter->hw;
773 1.1 msaitoh
774 1.1 msaitoh if (!(vf->flags & IXGBE_VF_ACTIVE))
775 1.1 msaitoh return;
776 1.1 msaitoh
777 1.1 msaitoh vf_index = IXGBE_VF_INDEX(vf->pool);
778 1.1 msaitoh pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
779 1.1 msaitoh pfmbimr |= IXGBE_VF_BIT(vf->pool);
780 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
781 1.1 msaitoh
782 1.1 msaitoh ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
783 1.1 msaitoh
784 1.1 msaitoh // XXX multicast addresses
785 1.1 msaitoh
786 1.1 msaitoh if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
787 1.1 msaitoh ixgbe_set_rar(&adapter->hw, vf->rar_index,
788 1.1 msaitoh vf->ether_addr, vf->pool, TRUE);
789 1.1 msaitoh }
790 1.1 msaitoh
791 1.1 msaitoh ixgbe_vf_enable_transmit(adapter, vf);
792 1.1 msaitoh ixgbe_vf_enable_receive(adapter, vf);
793 1.1 msaitoh
794 1.3 msaitoh ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
795 1.1 msaitoh } /* ixgbe_init_vf */
796 1.1 msaitoh
797 1.1 msaitoh void
798 1.1 msaitoh ixgbe_initialize_iov(struct adapter *adapter)
799 1.1 msaitoh {
800 1.1 msaitoh struct ixgbe_hw *hw = &adapter->hw;
801 1.1 msaitoh uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
802 1.1 msaitoh int i;
803 1.1 msaitoh
804 1.1 msaitoh if (adapter->iov_mode == IXGBE_NO_VM)
805 1.1 msaitoh return;
806 1.1 msaitoh
807 1.1 msaitoh IXGBE_CORE_LOCK_ASSERT(adapter);
808 1.1 msaitoh
809 1.1 msaitoh /* RMW appropriate registers based on IOV mode */
810 1.1 msaitoh /* Read... */
811 1.1 msaitoh mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
812 1.1 msaitoh gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
813 1.1 msaitoh gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
814 1.1 msaitoh /* Modify... */
815 1.1 msaitoh mrqc &= ~IXGBE_MRQC_MRQE_MASK;
816 1.1 msaitoh mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */
817 1.1 msaitoh gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
818 1.1 msaitoh gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
819 1.1 msaitoh gpie &= ~IXGBE_GPIE_VTMODE_MASK;
820 1.1 msaitoh switch (adapter->iov_mode) {
821 1.1 msaitoh case IXGBE_64_VM:
822 1.1 msaitoh mrqc |= IXGBE_MRQC_VMDQRSS64EN;
823 1.1 msaitoh mtqc |= IXGBE_MTQC_64VF;
824 1.1 msaitoh gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
825 1.1 msaitoh gpie |= IXGBE_GPIE_VTMODE_64;
826 1.1 msaitoh break;
827 1.1 msaitoh case IXGBE_32_VM:
828 1.1 msaitoh mrqc |= IXGBE_MRQC_VMDQRSS32EN;
829 1.1 msaitoh mtqc |= IXGBE_MTQC_32VF;
830 1.1 msaitoh gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
831 1.1 msaitoh gpie |= IXGBE_GPIE_VTMODE_32;
832 1.1 msaitoh break;
833 1.1 msaitoh default:
834 1.1 msaitoh panic("Unexpected SR-IOV mode %d", adapter->iov_mode);
835 1.1 msaitoh }
836 1.1 msaitoh /* Write... */
837 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
838 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
839 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
840 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
841 1.1 msaitoh
842 1.1 msaitoh /* Enable rx/tx for the PF. */
843 1.1 msaitoh vf_reg = IXGBE_VF_INDEX(adapter->pool);
844 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool));
845 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool));
846 1.1 msaitoh
847 1.1 msaitoh /* Allow VM-to-VM communication. */
848 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
849 1.1 msaitoh
850 1.1 msaitoh vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
851 1.1 msaitoh vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
852 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
853 1.1 msaitoh
854 1.1 msaitoh for (i = 0; i < adapter->num_vfs; i++)
855 1.1 msaitoh ixgbe_init_vf(adapter, &adapter->vfs[i]);
856 1.1 msaitoh } /* ixgbe_initialize_iov */
857 1.1 msaitoh
858 1.1 msaitoh
859 1.1 msaitoh /* Check the max frame setting of all active VF's */
860 1.1 msaitoh void
861 1.1 msaitoh ixgbe_recalculate_max_frame(struct adapter *adapter)
862 1.1 msaitoh {
863 1.1 msaitoh struct ixgbe_vf *vf;
864 1.1 msaitoh
865 1.1 msaitoh IXGBE_CORE_LOCK_ASSERT(adapter);
866 1.1 msaitoh
867 1.1 msaitoh for (int i = 0; i < adapter->num_vfs; i++) {
868 1.1 msaitoh vf = &adapter->vfs[i];
869 1.1 msaitoh if (vf->flags & IXGBE_VF_ACTIVE)
870 1.1 msaitoh ixgbe_update_max_frame(adapter, vf->max_frame_size);
871 1.1 msaitoh }
872 1.1 msaitoh } /* ixgbe_recalculate_max_frame */
873 1.1 msaitoh
874 1.1 msaitoh int
875 1.1 msaitoh ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
876 1.1 msaitoh {
877 1.1 msaitoh struct adapter *adapter;
878 1.1 msaitoh struct ixgbe_vf *vf;
879 1.1 msaitoh const void *mac;
880 1.1 msaitoh
881 1.1 msaitoh adapter = device_get_softc(dev);
882 1.1 msaitoh
883 1.1 msaitoh KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
884 1.1 msaitoh vfnum, adapter->num_vfs));
885 1.1 msaitoh
886 1.1 msaitoh IXGBE_CORE_LOCK(adapter);
887 1.1 msaitoh vf = &adapter->vfs[vfnum];
888 1.1 msaitoh vf->pool= vfnum;
889 1.1 msaitoh
890 1.1 msaitoh /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
891 1.1 msaitoh vf->rar_index = vfnum + 1;
892 1.1 msaitoh vf->default_vlan = 0;
893 1.1 msaitoh vf->max_frame_size = ETHER_MAX_LEN;
894 1.1 msaitoh ixgbe_update_max_frame(adapter, vf->max_frame_size);
895 1.1 msaitoh
896 1.1 msaitoh if (nvlist_exists_binary(config, "mac-addr")) {
897 1.1 msaitoh mac = nvlist_get_binary(config, "mac-addr", NULL);
898 1.1 msaitoh bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
899 1.1 msaitoh if (nvlist_get_bool(config, "allow-set-mac"))
900 1.1 msaitoh vf->flags |= IXGBE_VF_CAP_MAC;
901 1.1 msaitoh } else
902 1.1 msaitoh /*
903 1.1 msaitoh * If the administrator has not specified a MAC address then
904 1.1 msaitoh * we must allow the VF to choose one.
905 1.1 msaitoh */
906 1.1 msaitoh vf->flags |= IXGBE_VF_CAP_MAC;
907 1.1 msaitoh
908 1.1 msaitoh vf->flags |= IXGBE_VF_ACTIVE;
909 1.1 msaitoh
910 1.1 msaitoh ixgbe_init_vf(adapter, vf);
911 1.1 msaitoh IXGBE_CORE_UNLOCK(adapter);
912 1.1 msaitoh
913 1.1 msaitoh return (0);
914 1.1 msaitoh } /* ixgbe_add_vf */
915 1.1 msaitoh
916 1.1 msaitoh #else
917 1.1 msaitoh
918 1.1 msaitoh void
919 1.1 msaitoh ixgbe_handle_mbx(void *context, int pending)
920 1.1 msaitoh {
921 1.1 msaitoh UNREFERENCED_2PARAMETER(context, pending);
922 1.1 msaitoh } /* ixgbe_handle_mbx */
923 1.1 msaitoh
924 1.1 msaitoh inline int
925 1.1 msaitoh ixgbe_vf_que_index(int mode, int vfnum, int num)
926 1.1 msaitoh {
927 1.1 msaitoh UNREFERENCED_2PARAMETER(mode, vfnum);
928 1.1 msaitoh
929 1.1 msaitoh return num;
930 1.1 msaitoh } /* ixgbe_vf_que_index */
931 1.1 msaitoh
932 1.1 msaitoh #endif
933