if_sriov.c revision 1.5 1 1.1 msaitoh /******************************************************************************
2 1.1 msaitoh
3 1.1 msaitoh Copyright (c) 2001-2017, Intel Corporation
4 1.1 msaitoh All rights reserved.
5 1.1 msaitoh
6 1.1 msaitoh Redistribution and use in source and binary forms, with or without
7 1.1 msaitoh modification, are permitted provided that the following conditions are met:
8 1.1 msaitoh
9 1.1 msaitoh 1. Redistributions of source code must retain the above copyright notice,
10 1.1 msaitoh this list of conditions and the following disclaimer.
11 1.1 msaitoh
12 1.1 msaitoh 2. Redistributions in binary form must reproduce the above copyright
13 1.1 msaitoh notice, this list of conditions and the following disclaimer in the
14 1.1 msaitoh documentation and/or other materials provided with the distribution.
15 1.1 msaitoh
16 1.1 msaitoh 3. Neither the name of the Intel Corporation nor the names of its
17 1.1 msaitoh contributors may be used to endorse or promote products derived from
18 1.1 msaitoh this software without specific prior written permission.
19 1.1 msaitoh
20 1.1 msaitoh THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 1.1 msaitoh AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 1.1 msaitoh IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 1.1 msaitoh ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 1.1 msaitoh LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 msaitoh CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 msaitoh SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 msaitoh INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 msaitoh CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 msaitoh ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 msaitoh POSSIBILITY OF SUCH DAMAGE.
31 1.1 msaitoh
32 1.1 msaitoh ******************************************************************************/
33 1.3 msaitoh /*$FreeBSD: head/sys/dev/ixgbe/if_sriov.c 327031 2017-12-20 18:15:06Z erj $*/
34 1.1 msaitoh
35 1.1 msaitoh #include "ixgbe.h"
36 1.2 msaitoh #include "ixgbe_sriov.h"
37 1.1 msaitoh
38 1.1 msaitoh #ifdef PCI_IOV
39 1.1 msaitoh
40 1.1 msaitoh MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
41 1.1 msaitoh
42 1.1 msaitoh /************************************************************************
43 1.1 msaitoh * ixgbe_pci_iov_detach
44 1.1 msaitoh ************************************************************************/
45 1.1 msaitoh int
46 1.1 msaitoh ixgbe_pci_iov_detach(device_t dev)
47 1.1 msaitoh {
48 1.1 msaitoh return pci_iov_detach(dev);
49 1.1 msaitoh }
50 1.1 msaitoh
51 1.1 msaitoh /************************************************************************
52 1.1 msaitoh * ixgbe_define_iov_schemas
53 1.1 msaitoh ************************************************************************/
54 1.1 msaitoh void
55 1.1 msaitoh ixgbe_define_iov_schemas(device_t dev, int *error)
56 1.1 msaitoh {
57 1.1 msaitoh nvlist_t *pf_schema, *vf_schema;
58 1.1 msaitoh
59 1.1 msaitoh pf_schema = pci_iov_schema_alloc_node();
60 1.1 msaitoh vf_schema = pci_iov_schema_alloc_node();
61 1.1 msaitoh pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
62 1.1 msaitoh pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
63 1.1 msaitoh IOV_SCHEMA_HASDEFAULT, TRUE);
64 1.1 msaitoh pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
65 1.1 msaitoh IOV_SCHEMA_HASDEFAULT, FALSE);
66 1.1 msaitoh pci_iov_schema_add_bool(vf_schema, "allow-promisc",
67 1.1 msaitoh IOV_SCHEMA_HASDEFAULT, FALSE);
68 1.1 msaitoh *error = pci_iov_attach(dev, pf_schema, vf_schema);
69 1.1 msaitoh if (*error != 0) {
70 1.1 msaitoh device_printf(dev,
71 1.1 msaitoh "Error %d setting up SR-IOV\n", *error);
72 1.1 msaitoh }
73 1.1 msaitoh } /* ixgbe_define_iov_schemas */
74 1.1 msaitoh
75 1.1 msaitoh /************************************************************************
76 1.1 msaitoh * ixgbe_align_all_queue_indices
77 1.1 msaitoh ************************************************************************/
78 1.1 msaitoh inline void
79 1.1 msaitoh ixgbe_align_all_queue_indices(struct adapter *adapter)
80 1.1 msaitoh {
81 1.1 msaitoh int i;
82 1.1 msaitoh int index;
83 1.1 msaitoh
84 1.1 msaitoh for (i = 0; i < adapter->num_queues; i++) {
85 1.1 msaitoh index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
86 1.1 msaitoh adapter->rx_rings[i].me = index;
87 1.1 msaitoh adapter->tx_rings[i].me = index;
88 1.1 msaitoh }
89 1.1 msaitoh }
90 1.1 msaitoh
91 1.1 msaitoh /* Support functions for SR-IOV/VF management */
92 1.1 msaitoh static inline void
93 1.3 msaitoh ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
94 1.1 msaitoh {
95 1.1 msaitoh if (vf->flags & IXGBE_VF_CTS)
96 1.1 msaitoh msg |= IXGBE_VT_MSGTYPE_CTS;
97 1.1 msaitoh
98 1.3 msaitoh adapter->hw.mbx.ops.write(&adapter->hw, &msg, 1, vf->pool);
99 1.1 msaitoh }
100 1.1 msaitoh
101 1.1 msaitoh static inline void
102 1.1 msaitoh ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
103 1.1 msaitoh {
104 1.1 msaitoh msg &= IXGBE_VT_MSG_MASK;
105 1.3 msaitoh ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
106 1.1 msaitoh }
107 1.1 msaitoh
108 1.1 msaitoh static inline void
109 1.1 msaitoh ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
110 1.1 msaitoh {
111 1.1 msaitoh msg &= IXGBE_VT_MSG_MASK;
112 1.3 msaitoh ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
113 1.1 msaitoh }
114 1.1 msaitoh
115 1.1 msaitoh static inline void
116 1.1 msaitoh ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
117 1.1 msaitoh {
118 1.1 msaitoh if (!(vf->flags & IXGBE_VF_CTS))
119 1.1 msaitoh ixgbe_send_vf_nack(adapter, vf, 0);
120 1.1 msaitoh }
121 1.1 msaitoh
122 1.1 msaitoh static inline boolean_t
123 1.1 msaitoh ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
124 1.1 msaitoh {
125 1.1 msaitoh return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
126 1.1 msaitoh }
127 1.1 msaitoh
128 1.1 msaitoh static inline int
129 1.1 msaitoh ixgbe_vf_queues(int mode)
130 1.1 msaitoh {
131 1.1 msaitoh switch (mode) {
132 1.1 msaitoh case IXGBE_64_VM:
133 1.1 msaitoh return (2);
134 1.1 msaitoh case IXGBE_32_VM:
135 1.1 msaitoh return (4);
136 1.1 msaitoh case IXGBE_NO_VM:
137 1.1 msaitoh default:
138 1.1 msaitoh return (0);
139 1.1 msaitoh }
140 1.1 msaitoh }
141 1.1 msaitoh
142 1.1 msaitoh inline int
143 1.1 msaitoh ixgbe_vf_que_index(int mode, int vfnum, int num)
144 1.1 msaitoh {
145 1.1 msaitoh return ((vfnum * ixgbe_vf_queues(mode)) + num);
146 1.1 msaitoh }
147 1.1 msaitoh
148 1.1 msaitoh static inline void
149 1.1 msaitoh ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
150 1.1 msaitoh {
151 1.1 msaitoh if (adapter->max_frame_size < max_frame)
152 1.1 msaitoh adapter->max_frame_size = max_frame;
153 1.1 msaitoh }
154 1.1 msaitoh
155 1.1 msaitoh inline u32
156 1.1 msaitoh ixgbe_get_mrqc(int iov_mode)
157 1.1 msaitoh {
158 1.1 msaitoh u32 mrqc;
159 1.1 msaitoh
160 1.1 msaitoh switch (iov_mode) {
161 1.1 msaitoh case IXGBE_64_VM:
162 1.1 msaitoh mrqc = IXGBE_MRQC_VMDQRSS64EN;
163 1.1 msaitoh break;
164 1.1 msaitoh case IXGBE_32_VM:
165 1.1 msaitoh mrqc = IXGBE_MRQC_VMDQRSS32EN;
166 1.1 msaitoh break;
167 1.1 msaitoh case IXGBE_NO_VM:
168 1.1 msaitoh mrqc = 0;
169 1.1 msaitoh break;
170 1.1 msaitoh default:
171 1.1 msaitoh panic("Unexpected SR-IOV mode %d", iov_mode);
172 1.1 msaitoh }
173 1.1 msaitoh
174 1.1 msaitoh return mrqc;
175 1.1 msaitoh }
176 1.1 msaitoh
177 1.1 msaitoh
178 1.1 msaitoh inline u32
179 1.1 msaitoh ixgbe_get_mtqc(int iov_mode)
180 1.1 msaitoh {
181 1.1 msaitoh uint32_t mtqc;
182 1.1 msaitoh
183 1.1 msaitoh switch (iov_mode) {
184 1.1 msaitoh case IXGBE_64_VM:
185 1.1 msaitoh mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
186 1.1 msaitoh break;
187 1.1 msaitoh case IXGBE_32_VM:
188 1.1 msaitoh mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
189 1.1 msaitoh break;
190 1.1 msaitoh case IXGBE_NO_VM:
191 1.1 msaitoh mtqc = IXGBE_MTQC_64Q_1PB;
192 1.1 msaitoh break;
193 1.1 msaitoh default:
194 1.1 msaitoh panic("Unexpected SR-IOV mode %d", iov_mode);
195 1.1 msaitoh }
196 1.1 msaitoh
197 1.1 msaitoh return mtqc;
198 1.1 msaitoh }
199 1.1 msaitoh
200 1.1 msaitoh void
201 1.1 msaitoh ixgbe_ping_all_vfs(struct adapter *adapter)
202 1.1 msaitoh {
203 1.1 msaitoh struct ixgbe_vf *vf;
204 1.1 msaitoh
205 1.1 msaitoh for (int i = 0; i < adapter->num_vfs; i++) {
206 1.1 msaitoh vf = &adapter->vfs[i];
207 1.1 msaitoh if (vf->flags & IXGBE_VF_ACTIVE)
208 1.3 msaitoh ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
209 1.1 msaitoh }
210 1.1 msaitoh } /* ixgbe_ping_all_vfs */
211 1.1 msaitoh
212 1.1 msaitoh
213 1.1 msaitoh static void
214 1.1 msaitoh ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
215 1.1 msaitoh uint16_t tag)
216 1.1 msaitoh {
217 1.1 msaitoh struct ixgbe_hw *hw;
218 1.1 msaitoh uint32_t vmolr, vmvir;
219 1.1 msaitoh
220 1.1 msaitoh hw = &adapter->hw;
221 1.1 msaitoh
222 1.1 msaitoh vf->vlan_tag = tag;
223 1.1 msaitoh
224 1.1 msaitoh vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
225 1.1 msaitoh
226 1.1 msaitoh /* Do not receive packets that pass inexact filters. */
227 1.1 msaitoh vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
228 1.1 msaitoh
229 1.1 msaitoh /* Disable Multicast Promicuous Mode. */
230 1.1 msaitoh vmolr &= ~IXGBE_VMOLR_MPE;
231 1.1 msaitoh
232 1.1 msaitoh /* Accept broadcasts. */
233 1.1 msaitoh vmolr |= IXGBE_VMOLR_BAM;
234 1.1 msaitoh
235 1.1 msaitoh if (tag == 0) {
236 1.1 msaitoh /* Accept non-vlan tagged traffic. */
237 1.2 msaitoh vmolr |= IXGBE_VMOLR_AUPE;
238 1.1 msaitoh
239 1.1 msaitoh /* Allow VM to tag outgoing traffic; no default tag. */
240 1.1 msaitoh vmvir = 0;
241 1.1 msaitoh } else {
242 1.1 msaitoh /* Require vlan-tagged traffic. */
243 1.1 msaitoh vmolr &= ~IXGBE_VMOLR_AUPE;
244 1.1 msaitoh
245 1.1 msaitoh /* Tag all traffic with provided vlan tag. */
246 1.1 msaitoh vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
247 1.1 msaitoh }
248 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
249 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
250 1.1 msaitoh } /* ixgbe_vf_set_default_vlan */
251 1.1 msaitoh
252 1.1 msaitoh
253 1.5 msaitoh static void
254 1.5 msaitoh ixgbe_clear_vfmbmem(struct ixgbe_hw *hw, struct ixgbe_vf *vf)
255 1.5 msaitoh {
256 1.5 msaitoh uint32_t vf_index = IXGBE_VF_INDEX(vf->pool);
257 1.5 msaitoh uint16_t mbx_size = hw->mbx.size;
258 1.5 msaitoh uint16_t i;
259 1.5 msaitoh
260 1.5 msaitoh IXGBE_CORE_LOCK_ASSERT(adapter);
261 1.5 msaitoh
262 1.5 msaitoh for (i = 0; i < mbx_size; ++i)
263 1.5 msaitoh IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_index), i, 0x0);
264 1.5 msaitoh } /* ixgbe_clear_vfmbmem */
265 1.5 msaitoh
266 1.5 msaitoh
267 1.5 msaitoh static void
268 1.5 msaitoh ixgbe_toggle_txdctl(struct ixgbe_hw *hw, struct ixgbe_vf *vf)
269 1.5 msaitoh {
270 1.5 msaitoh uint32_t vf_index, offset, reg;
271 1.5 msaitoh uint8_t queue_count, i;
272 1.5 msaitoh
273 1.5 msaitoh IXGBE_CORE_LOCK_ASSERT(adapter);
274 1.5 msaitoh
275 1.5 msaitoh vf_index = IXGBE_VF_INDEX(vf->pool);
276 1.5 msaitoh
277 1.5 msaitoh /* Determine number of queues by checking
278 1.5 msaitoh * number of virtual functions */
279 1.5 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
280 1.5 msaitoh switch (reg & IXGBE_GCR_EXT_VT_MODE_MASK) {
281 1.5 msaitoh case IXGBE_GCR_EXT_VT_MODE_64:
282 1.5 msaitoh queue_count = 2;
283 1.5 msaitoh break;
284 1.5 msaitoh case IXGBE_GCR_EXT_VT_MODE_32:
285 1.5 msaitoh queue_count = 4;
286 1.5 msaitoh break;
287 1.5 msaitoh default:
288 1.5 msaitoh return;
289 1.5 msaitoh }
290 1.5 msaitoh
291 1.5 msaitoh /* Toggle queues */
292 1.5 msaitoh for (i = 0; i < queue_count; ++i) {
293 1.5 msaitoh /* Calculate offset of current queue */
294 1.5 msaitoh offset = queue_count * vf_index + i;
295 1.5 msaitoh
296 1.5 msaitoh /* Enable queue */
297 1.5 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(offset));
298 1.5 msaitoh reg |= IXGBE_TXDCTL_ENABLE;
299 1.5 msaitoh IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(offset), reg);
300 1.5 msaitoh IXGBE_WRITE_FLUSH(hw);
301 1.5 msaitoh
302 1.5 msaitoh /* Disable queue */
303 1.5 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(offset));
304 1.5 msaitoh reg &= ~IXGBE_TXDCTL_ENABLE;
305 1.5 msaitoh IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(offset), reg);
306 1.5 msaitoh IXGBE_WRITE_FLUSH(hw);
307 1.5 msaitoh }
308 1.5 msaitoh } /* ixgbe_toggle_txdctl */
309 1.5 msaitoh
310 1.5 msaitoh
311 1.1 msaitoh static boolean_t
312 1.1 msaitoh ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
313 1.1 msaitoh {
314 1.1 msaitoh
315 1.1 msaitoh /*
316 1.1 msaitoh * Frame size compatibility between PF and VF is only a problem on
317 1.1 msaitoh * 82599-based cards. X540 and later support any combination of jumbo
318 1.1 msaitoh * frames on PFs and VFs.
319 1.1 msaitoh */
320 1.1 msaitoh if (adapter->hw.mac.type != ixgbe_mac_82599EB)
321 1.1 msaitoh return (TRUE);
322 1.1 msaitoh
323 1.1 msaitoh switch (vf->api_ver) {
324 1.1 msaitoh case IXGBE_API_VER_1_0:
325 1.1 msaitoh case IXGBE_API_VER_UNKNOWN:
326 1.1 msaitoh /*
327 1.1 msaitoh * On legacy (1.0 and older) VF versions, we don't support jumbo
328 1.1 msaitoh * frames on either the PF or the VF.
329 1.1 msaitoh */
330 1.1 msaitoh if (adapter->max_frame_size > ETHER_MAX_LEN ||
331 1.1 msaitoh vf->max_frame_size > ETHER_MAX_LEN)
332 1.1 msaitoh return (FALSE);
333 1.1 msaitoh
334 1.1 msaitoh return (TRUE);
335 1.1 msaitoh
336 1.1 msaitoh break;
337 1.1 msaitoh case IXGBE_API_VER_1_1:
338 1.1 msaitoh default:
339 1.1 msaitoh /*
340 1.1 msaitoh * 1.1 or later VF versions always work if they aren't using
341 1.1 msaitoh * jumbo frames.
342 1.1 msaitoh */
343 1.1 msaitoh if (vf->max_frame_size <= ETHER_MAX_LEN)
344 1.1 msaitoh return (TRUE);
345 1.1 msaitoh
346 1.1 msaitoh /*
347 1.1 msaitoh * Jumbo frames only work with VFs if the PF is also using jumbo
348 1.1 msaitoh * frames.
349 1.1 msaitoh */
350 1.1 msaitoh if (adapter->max_frame_size <= ETHER_MAX_LEN)
351 1.1 msaitoh return (TRUE);
352 1.1 msaitoh
353 1.1 msaitoh return (FALSE);
354 1.1 msaitoh }
355 1.1 msaitoh } /* ixgbe_vf_frame_size_compatible */
356 1.1 msaitoh
357 1.1 msaitoh
358 1.1 msaitoh static void
359 1.1 msaitoh ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
360 1.1 msaitoh {
361 1.1 msaitoh ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
362 1.1 msaitoh
363 1.1 msaitoh // XXX clear multicast addresses
364 1.1 msaitoh
365 1.1 msaitoh ixgbe_clear_rar(&adapter->hw, vf->rar_index);
366 1.5 msaitoh ixgbe_clear_vfmbmem(&adapter->hw, vf);
367 1.5 msaitoh ixgbe_toggle_txdctl(&adapter->hw, vf);
368 1.1 msaitoh
369 1.1 msaitoh vf->api_ver = IXGBE_API_VER_UNKNOWN;
370 1.1 msaitoh } /* ixgbe_process_vf_reset */
371 1.1 msaitoh
372 1.1 msaitoh
373 1.1 msaitoh static void
374 1.1 msaitoh ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
375 1.1 msaitoh {
376 1.1 msaitoh struct ixgbe_hw *hw;
377 1.1 msaitoh uint32_t vf_index, vfte;
378 1.1 msaitoh
379 1.1 msaitoh hw = &adapter->hw;
380 1.1 msaitoh
381 1.1 msaitoh vf_index = IXGBE_VF_INDEX(vf->pool);
382 1.1 msaitoh vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
383 1.1 msaitoh vfte |= IXGBE_VF_BIT(vf->pool);
384 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
385 1.1 msaitoh } /* ixgbe_vf_enable_transmit */
386 1.1 msaitoh
387 1.1 msaitoh
388 1.1 msaitoh static void
389 1.1 msaitoh ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
390 1.1 msaitoh {
391 1.1 msaitoh struct ixgbe_hw *hw;
392 1.1 msaitoh uint32_t vf_index, vfre;
393 1.1 msaitoh
394 1.1 msaitoh hw = &adapter->hw;
395 1.1 msaitoh
396 1.1 msaitoh vf_index = IXGBE_VF_INDEX(vf->pool);
397 1.1 msaitoh vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
398 1.1 msaitoh if (ixgbe_vf_frame_size_compatible(adapter, vf))
399 1.1 msaitoh vfre |= IXGBE_VF_BIT(vf->pool);
400 1.1 msaitoh else
401 1.1 msaitoh vfre &= ~IXGBE_VF_BIT(vf->pool);
402 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
403 1.1 msaitoh } /* ixgbe_vf_enable_receive */
404 1.1 msaitoh
405 1.1 msaitoh
406 1.1 msaitoh static void
407 1.1 msaitoh ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
408 1.1 msaitoh {
409 1.1 msaitoh struct ixgbe_hw *hw;
410 1.1 msaitoh uint32_t ack;
411 1.1 msaitoh uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
412 1.1 msaitoh
413 1.1 msaitoh hw = &adapter->hw;
414 1.1 msaitoh
415 1.1 msaitoh ixgbe_process_vf_reset(adapter, vf);
416 1.1 msaitoh
417 1.1 msaitoh if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
418 1.1 msaitoh ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
419 1.1 msaitoh vf->pool, TRUE);
420 1.1 msaitoh ack = IXGBE_VT_MSGTYPE_ACK;
421 1.1 msaitoh } else
422 1.1 msaitoh ack = IXGBE_VT_MSGTYPE_NACK;
423 1.1 msaitoh
424 1.1 msaitoh ixgbe_vf_enable_transmit(adapter, vf);
425 1.1 msaitoh ixgbe_vf_enable_receive(adapter, vf);
426 1.1 msaitoh
427 1.1 msaitoh vf->flags |= IXGBE_VF_CTS;
428 1.1 msaitoh
429 1.2 msaitoh resp[0] = IXGBE_VF_RESET | ack;
430 1.1 msaitoh bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
431 1.1 msaitoh resp[3] = hw->mac.mc_filter_type;
432 1.1 msaitoh hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
433 1.1 msaitoh } /* ixgbe_vf_reset_msg */
434 1.1 msaitoh
435 1.1 msaitoh
436 1.1 msaitoh static void
437 1.1 msaitoh ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
438 1.1 msaitoh {
439 1.1 msaitoh uint8_t *mac;
440 1.1 msaitoh
441 1.1 msaitoh mac = (uint8_t*)&msg[1];
442 1.1 msaitoh
443 1.1 msaitoh /* Check that the VF has permission to change the MAC address. */
444 1.1 msaitoh if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
445 1.1 msaitoh ixgbe_send_vf_nack(adapter, vf, msg[0]);
446 1.1 msaitoh return;
447 1.1 msaitoh }
448 1.1 msaitoh
449 1.1 msaitoh if (ixgbe_validate_mac_addr(mac) != 0) {
450 1.1 msaitoh ixgbe_send_vf_nack(adapter, vf, msg[0]);
451 1.1 msaitoh return;
452 1.1 msaitoh }
453 1.1 msaitoh
454 1.1 msaitoh bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
455 1.1 msaitoh
456 1.1 msaitoh ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool,
457 1.1 msaitoh TRUE);
458 1.1 msaitoh
459 1.1 msaitoh ixgbe_send_vf_ack(adapter, vf, msg[0]);
460 1.1 msaitoh } /* ixgbe_vf_set_mac */
461 1.1 msaitoh
462 1.1 msaitoh
463 1.1 msaitoh /*
464 1.1 msaitoh * VF multicast addresses are set by using the appropriate bit in
465 1.1 msaitoh * 1 of 128 32 bit addresses (4096 possible).
466 1.1 msaitoh */
467 1.1 msaitoh static void
468 1.1 msaitoh ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
469 1.1 msaitoh {
470 1.1 msaitoh u16 *list = (u16*)&msg[1];
471 1.1 msaitoh int entries;
472 1.1 msaitoh u32 vmolr, vec_bit, vec_reg, mta_reg;
473 1.1 msaitoh
474 1.1 msaitoh entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
475 1.4 riastrad entries = uimin(entries, IXGBE_MAX_VF_MC);
476 1.1 msaitoh
477 1.1 msaitoh vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
478 1.1 msaitoh
479 1.1 msaitoh vf->num_mc_hashes = entries;
480 1.1 msaitoh
481 1.1 msaitoh /* Set the appropriate MTA bit */
482 1.1 msaitoh for (int i = 0; i < entries; i++) {
483 1.1 msaitoh vf->mc_hash[i] = list[i];
484 1.1 msaitoh vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
485 1.1 msaitoh vec_bit = vf->mc_hash[i] & 0x1F;
486 1.1 msaitoh mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
487 1.1 msaitoh mta_reg |= (1 << vec_bit);
488 1.1 msaitoh IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
489 1.1 msaitoh }
490 1.1 msaitoh
491 1.1 msaitoh vmolr |= IXGBE_VMOLR_ROMPE;
492 1.1 msaitoh IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
493 1.1 msaitoh ixgbe_send_vf_ack(adapter, vf, msg[0]);
494 1.1 msaitoh } /* ixgbe_vf_set_mc_addr */
495 1.1 msaitoh
496 1.1 msaitoh
497 1.1 msaitoh static void
498 1.1 msaitoh ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
499 1.1 msaitoh {
500 1.1 msaitoh struct ixgbe_hw *hw;
501 1.1 msaitoh int enable;
502 1.1 msaitoh uint16_t tag;
503 1.1 msaitoh
504 1.1 msaitoh hw = &adapter->hw;
505 1.1 msaitoh enable = IXGBE_VT_MSGINFO(msg[0]);
506 1.1 msaitoh tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
507 1.1 msaitoh
508 1.1 msaitoh if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
509 1.1 msaitoh ixgbe_send_vf_nack(adapter, vf, msg[0]);
510 1.1 msaitoh return;
511 1.1 msaitoh }
512 1.1 msaitoh
513 1.1 msaitoh /* It is illegal to enable vlan tag 0. */
514 1.2 msaitoh if (tag == 0 && enable != 0) {
515 1.1 msaitoh ixgbe_send_vf_nack(adapter, vf, msg[0]);
516 1.1 msaitoh return;
517 1.1 msaitoh }
518 1.1 msaitoh
519 1.1 msaitoh ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
520 1.1 msaitoh ixgbe_send_vf_ack(adapter, vf, msg[0]);
521 1.1 msaitoh } /* ixgbe_vf_set_vlan */
522 1.1 msaitoh
523 1.1 msaitoh
524 1.1 msaitoh static void
525 1.1 msaitoh ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
526 1.1 msaitoh {
527 1.1 msaitoh struct ixgbe_hw *hw;
528 1.1 msaitoh uint32_t vf_max_size, pf_max_size, mhadd;
529 1.1 msaitoh
530 1.1 msaitoh hw = &adapter->hw;
531 1.1 msaitoh vf_max_size = msg[1];
532 1.1 msaitoh
533 1.1 msaitoh if (vf_max_size < ETHER_CRC_LEN) {
534 1.1 msaitoh /* We intentionally ACK invalid LPE requests. */
535 1.1 msaitoh ixgbe_send_vf_ack(adapter, vf, msg[0]);
536 1.1 msaitoh return;
537 1.1 msaitoh }
538 1.1 msaitoh
539 1.1 msaitoh vf_max_size -= ETHER_CRC_LEN;
540 1.1 msaitoh
541 1.1 msaitoh if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
542 1.1 msaitoh /* We intentionally ACK invalid LPE requests. */
543 1.1 msaitoh ixgbe_send_vf_ack(adapter, vf, msg[0]);
544 1.1 msaitoh return;
545 1.1 msaitoh }
546 1.1 msaitoh
547 1.1 msaitoh vf->max_frame_size = vf_max_size;
548 1.1 msaitoh ixgbe_update_max_frame(adapter, vf->max_frame_size);
549 1.1 msaitoh
550 1.1 msaitoh /*
551 1.1 msaitoh * We might have to disable reception to this VF if the frame size is
552 1.1 msaitoh * not compatible with the config on the PF.
553 1.1 msaitoh */
554 1.1 msaitoh ixgbe_vf_enable_receive(adapter, vf);
555 1.1 msaitoh
556 1.1 msaitoh mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
557 1.1 msaitoh pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
558 1.1 msaitoh
559 1.1 msaitoh if (pf_max_size < adapter->max_frame_size) {
560 1.1 msaitoh mhadd &= ~IXGBE_MHADD_MFS_MASK;
561 1.1 msaitoh mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
562 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
563 1.1 msaitoh }
564 1.1 msaitoh
565 1.1 msaitoh ixgbe_send_vf_ack(adapter, vf, msg[0]);
566 1.1 msaitoh } /* ixgbe_vf_set_lpe */
567 1.1 msaitoh
568 1.1 msaitoh
569 1.1 msaitoh static void
570 1.1 msaitoh ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
571 1.1 msaitoh uint32_t *msg)
572 1.1 msaitoh {
573 1.1 msaitoh //XXX implement this
574 1.1 msaitoh ixgbe_send_vf_nack(adapter, vf, msg[0]);
575 1.1 msaitoh } /* ixgbe_vf_set_macvlan */
576 1.1 msaitoh
577 1.1 msaitoh
578 1.1 msaitoh static void
579 1.1 msaitoh ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
580 1.1 msaitoh uint32_t *msg)
581 1.1 msaitoh {
582 1.1 msaitoh
583 1.1 msaitoh switch (msg[1]) {
584 1.1 msaitoh case IXGBE_API_VER_1_0:
585 1.1 msaitoh case IXGBE_API_VER_1_1:
586 1.1 msaitoh vf->api_ver = msg[1];
587 1.1 msaitoh ixgbe_send_vf_ack(adapter, vf, msg[0]);
588 1.1 msaitoh break;
589 1.1 msaitoh default:
590 1.1 msaitoh vf->api_ver = IXGBE_API_VER_UNKNOWN;
591 1.1 msaitoh ixgbe_send_vf_nack(adapter, vf, msg[0]);
592 1.1 msaitoh break;
593 1.1 msaitoh }
594 1.1 msaitoh } /* ixgbe_vf_api_negotiate */
595 1.1 msaitoh
596 1.1 msaitoh
597 1.1 msaitoh static void
598 1.1 msaitoh ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
599 1.1 msaitoh {
600 1.1 msaitoh struct ixgbe_hw *hw;
601 1.1 msaitoh uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
602 1.1 msaitoh int num_queues;
603 1.1 msaitoh
604 1.1 msaitoh hw = &adapter->hw;
605 1.1 msaitoh
606 1.1 msaitoh /* GET_QUEUES is not supported on pre-1.1 APIs. */
607 1.1 msaitoh switch (msg[0]) {
608 1.1 msaitoh case IXGBE_API_VER_1_0:
609 1.1 msaitoh case IXGBE_API_VER_UNKNOWN:
610 1.1 msaitoh ixgbe_send_vf_nack(adapter, vf, msg[0]);
611 1.1 msaitoh return;
612 1.1 msaitoh }
613 1.1 msaitoh
614 1.1 msaitoh resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
615 1.1 msaitoh IXGBE_VT_MSGTYPE_CTS;
616 1.1 msaitoh
617 1.1 msaitoh num_queues = ixgbe_vf_queues(adapter->iov_mode);
618 1.1 msaitoh resp[IXGBE_VF_TX_QUEUES] = num_queues;
619 1.1 msaitoh resp[IXGBE_VF_RX_QUEUES] = num_queues;
620 1.1 msaitoh resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
621 1.1 msaitoh resp[IXGBE_VF_DEF_QUEUE] = 0;
622 1.1 msaitoh
623 1.1 msaitoh hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
624 1.1 msaitoh } /* ixgbe_vf_get_queues */
625 1.1 msaitoh
626 1.1 msaitoh
627 1.1 msaitoh static void
628 1.1 msaitoh ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
629 1.1 msaitoh {
630 1.1 msaitoh struct ixgbe_hw *hw;
631 1.1 msaitoh uint32_t msg[IXGBE_VFMAILBOX_SIZE];
632 1.1 msaitoh int error;
633 1.1 msaitoh
634 1.1 msaitoh hw = &adapter->hw;
635 1.1 msaitoh
636 1.1 msaitoh error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
637 1.1 msaitoh
638 1.1 msaitoh if (error != 0)
639 1.1 msaitoh return;
640 1.1 msaitoh
641 1.2 msaitoh CTR3(KTR_MALLOC, "%s: received msg %x from %d", adapter->ifp->if_xname,
642 1.2 msaitoh msg[0], vf->pool);
643 1.1 msaitoh if (msg[0] == IXGBE_VF_RESET) {
644 1.1 msaitoh ixgbe_vf_reset_msg(adapter, vf, msg);
645 1.1 msaitoh return;
646 1.1 msaitoh }
647 1.1 msaitoh
648 1.1 msaitoh if (!(vf->flags & IXGBE_VF_CTS)) {
649 1.1 msaitoh ixgbe_send_vf_nack(adapter, vf, msg[0]);
650 1.1 msaitoh return;
651 1.1 msaitoh }
652 1.1 msaitoh
653 1.1 msaitoh switch (msg[0] & IXGBE_VT_MSG_MASK) {
654 1.1 msaitoh case IXGBE_VF_SET_MAC_ADDR:
655 1.1 msaitoh ixgbe_vf_set_mac(adapter, vf, msg);
656 1.1 msaitoh break;
657 1.1 msaitoh case IXGBE_VF_SET_MULTICAST:
658 1.1 msaitoh ixgbe_vf_set_mc_addr(adapter, vf, msg);
659 1.1 msaitoh break;
660 1.1 msaitoh case IXGBE_VF_SET_VLAN:
661 1.1 msaitoh ixgbe_vf_set_vlan(adapter, vf, msg);
662 1.1 msaitoh break;
663 1.1 msaitoh case IXGBE_VF_SET_LPE:
664 1.1 msaitoh ixgbe_vf_set_lpe(adapter, vf, msg);
665 1.1 msaitoh break;
666 1.1 msaitoh case IXGBE_VF_SET_MACVLAN:
667 1.1 msaitoh ixgbe_vf_set_macvlan(adapter, vf, msg);
668 1.1 msaitoh break;
669 1.1 msaitoh case IXGBE_VF_API_NEGOTIATE:
670 1.1 msaitoh ixgbe_vf_api_negotiate(adapter, vf, msg);
671 1.1 msaitoh break;
672 1.1 msaitoh case IXGBE_VF_GET_QUEUES:
673 1.1 msaitoh ixgbe_vf_get_queues(adapter, vf, msg);
674 1.1 msaitoh break;
675 1.1 msaitoh default:
676 1.1 msaitoh ixgbe_send_vf_nack(adapter, vf, msg[0]);
677 1.1 msaitoh }
678 1.1 msaitoh } /* ixgbe_process_vf_msg */
679 1.1 msaitoh
680 1.1 msaitoh
681 1.1 msaitoh /* Tasklet for handling VF -> PF mailbox messages */
682 1.1 msaitoh void
683 1.1 msaitoh ixgbe_handle_mbx(void *context, int pending)
684 1.1 msaitoh {
685 1.2 msaitoh struct adapter *adapter = context;
686 1.1 msaitoh struct ixgbe_hw *hw;
687 1.1 msaitoh struct ixgbe_vf *vf;
688 1.1 msaitoh int i;
689 1.1 msaitoh
690 1.1 msaitoh hw = &adapter->hw;
691 1.1 msaitoh
692 1.1 msaitoh IXGBE_CORE_LOCK(adapter);
693 1.1 msaitoh for (i = 0; i < adapter->num_vfs; i++) {
694 1.1 msaitoh vf = &adapter->vfs[i];
695 1.1 msaitoh
696 1.1 msaitoh if (vf->flags & IXGBE_VF_ACTIVE) {
697 1.1 msaitoh if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0)
698 1.1 msaitoh ixgbe_process_vf_reset(adapter, vf);
699 1.1 msaitoh
700 1.1 msaitoh if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
701 1.1 msaitoh ixgbe_process_vf_msg(adapter, vf);
702 1.1 msaitoh
703 1.1 msaitoh if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
704 1.1 msaitoh ixgbe_process_vf_ack(adapter, vf);
705 1.1 msaitoh }
706 1.1 msaitoh }
707 1.1 msaitoh IXGBE_CORE_UNLOCK(adapter);
708 1.1 msaitoh } /* ixgbe_handle_mbx */
709 1.1 msaitoh
710 1.1 msaitoh int
711 1.1 msaitoh ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
712 1.1 msaitoh {
713 1.1 msaitoh struct adapter *adapter;
714 1.1 msaitoh int retval = 0;
715 1.1 msaitoh
716 1.1 msaitoh adapter = device_get_softc(dev);
717 1.1 msaitoh adapter->iov_mode = IXGBE_NO_VM;
718 1.1 msaitoh
719 1.1 msaitoh if (num_vfs == 0) {
720 1.1 msaitoh /* Would we ever get num_vfs = 0? */
721 1.1 msaitoh retval = EINVAL;
722 1.1 msaitoh goto err_init_iov;
723 1.1 msaitoh }
724 1.1 msaitoh
725 1.1 msaitoh /*
726 1.1 msaitoh * We've got to reserve a VM's worth of queues for the PF,
727 1.1 msaitoh * thus we go into "64 VF mode" if 32+ VFs are requested.
728 1.1 msaitoh * With 64 VFs, you can only have two queues per VF.
729 1.1 msaitoh * With 32 VFs, you can have up to four queues per VF.
730 1.1 msaitoh */
731 1.1 msaitoh if (num_vfs >= IXGBE_32_VM)
732 1.1 msaitoh adapter->iov_mode = IXGBE_64_VM;
733 1.1 msaitoh else
734 1.1 msaitoh adapter->iov_mode = IXGBE_32_VM;
735 1.1 msaitoh
736 1.1 msaitoh /* Again, reserving 1 VM's worth of queues for the PF */
737 1.1 msaitoh adapter->pool = adapter->iov_mode - 1;
738 1.1 msaitoh
739 1.1 msaitoh if ((num_vfs > adapter->pool) || (num_vfs >= IXGBE_64_VM)) {
740 1.1 msaitoh retval = ENOSPC;
741 1.1 msaitoh goto err_init_iov;
742 1.1 msaitoh }
743 1.1 msaitoh
744 1.1 msaitoh IXGBE_CORE_LOCK(adapter);
745 1.1 msaitoh
746 1.1 msaitoh adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV,
747 1.1 msaitoh M_NOWAIT | M_ZERO);
748 1.1 msaitoh
749 1.1 msaitoh if (adapter->vfs == NULL) {
750 1.1 msaitoh retval = ENOMEM;
751 1.1 msaitoh IXGBE_CORE_UNLOCK(adapter);
752 1.1 msaitoh goto err_init_iov;
753 1.1 msaitoh }
754 1.1 msaitoh
755 1.1 msaitoh adapter->num_vfs = num_vfs;
756 1.2 msaitoh
757 1.2 msaitoh /* set the SRIOV flag now as it's needed
758 1.2 msaitoh * by ixgbe_init_locked() */
759 1.2 msaitoh adapter->feat_en |= IXGBE_FEATURE_SRIOV;
760 1.1 msaitoh adapter->init_locked(adapter);
761 1.1 msaitoh
762 1.1 msaitoh IXGBE_CORE_UNLOCK(adapter);
763 1.1 msaitoh
764 1.2 msaitoh return (retval);
765 1.1 msaitoh
766 1.1 msaitoh err_init_iov:
767 1.1 msaitoh adapter->num_vfs = 0;
768 1.1 msaitoh adapter->pool = 0;
769 1.1 msaitoh adapter->iov_mode = IXGBE_NO_VM;
770 1.1 msaitoh
771 1.2 msaitoh return (retval);
772 1.1 msaitoh } /* ixgbe_init_iov */
773 1.1 msaitoh
774 1.1 msaitoh void
775 1.1 msaitoh ixgbe_uninit_iov(device_t dev)
776 1.1 msaitoh {
777 1.1 msaitoh struct ixgbe_hw *hw;
778 1.1 msaitoh struct adapter *adapter;
779 1.1 msaitoh uint32_t pf_reg, vf_reg;
780 1.1 msaitoh
781 1.1 msaitoh adapter = device_get_softc(dev);
782 1.1 msaitoh hw = &adapter->hw;
783 1.1 msaitoh
784 1.1 msaitoh IXGBE_CORE_LOCK(adapter);
785 1.1 msaitoh
786 1.1 msaitoh /* Enable rx/tx for the PF and disable it for all VFs. */
787 1.1 msaitoh pf_reg = IXGBE_VF_INDEX(adapter->pool);
788 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool));
789 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool));
790 1.1 msaitoh
791 1.1 msaitoh if (pf_reg == 0)
792 1.1 msaitoh vf_reg = 1;
793 1.1 msaitoh else
794 1.1 msaitoh vf_reg = 0;
795 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
796 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
797 1.1 msaitoh
798 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
799 1.1 msaitoh
800 1.1 msaitoh free(adapter->vfs, M_IXGBE_SRIOV);
801 1.1 msaitoh adapter->vfs = NULL;
802 1.1 msaitoh adapter->num_vfs = 0;
803 1.1 msaitoh adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
804 1.1 msaitoh
805 1.1 msaitoh IXGBE_CORE_UNLOCK(adapter);
806 1.1 msaitoh } /* ixgbe_uninit_iov */
807 1.1 msaitoh
808 1.1 msaitoh static void
809 1.1 msaitoh ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
810 1.1 msaitoh {
811 1.1 msaitoh struct ixgbe_hw *hw;
812 1.1 msaitoh uint32_t vf_index, pfmbimr;
813 1.1 msaitoh
814 1.1 msaitoh IXGBE_CORE_LOCK_ASSERT(adapter);
815 1.1 msaitoh
816 1.1 msaitoh hw = &adapter->hw;
817 1.1 msaitoh
818 1.1 msaitoh if (!(vf->flags & IXGBE_VF_ACTIVE))
819 1.1 msaitoh return;
820 1.1 msaitoh
821 1.1 msaitoh vf_index = IXGBE_VF_INDEX(vf->pool);
822 1.1 msaitoh pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
823 1.1 msaitoh pfmbimr |= IXGBE_VF_BIT(vf->pool);
824 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
825 1.1 msaitoh
826 1.1 msaitoh ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
827 1.1 msaitoh
828 1.1 msaitoh // XXX multicast addresses
829 1.1 msaitoh
830 1.1 msaitoh if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
831 1.1 msaitoh ixgbe_set_rar(&adapter->hw, vf->rar_index,
832 1.1 msaitoh vf->ether_addr, vf->pool, TRUE);
833 1.1 msaitoh }
834 1.1 msaitoh
835 1.1 msaitoh ixgbe_vf_enable_transmit(adapter, vf);
836 1.1 msaitoh ixgbe_vf_enable_receive(adapter, vf);
837 1.1 msaitoh
838 1.3 msaitoh ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
839 1.1 msaitoh } /* ixgbe_init_vf */
840 1.1 msaitoh
841 1.1 msaitoh void
842 1.1 msaitoh ixgbe_initialize_iov(struct adapter *adapter)
843 1.1 msaitoh {
844 1.1 msaitoh struct ixgbe_hw *hw = &adapter->hw;
845 1.1 msaitoh uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
846 1.1 msaitoh int i;
847 1.1 msaitoh
848 1.1 msaitoh if (adapter->iov_mode == IXGBE_NO_VM)
849 1.1 msaitoh return;
850 1.1 msaitoh
851 1.1 msaitoh IXGBE_CORE_LOCK_ASSERT(adapter);
852 1.1 msaitoh
853 1.1 msaitoh /* RMW appropriate registers based on IOV mode */
854 1.1 msaitoh /* Read... */
855 1.1 msaitoh mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
856 1.1 msaitoh gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
857 1.1 msaitoh gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
858 1.1 msaitoh /* Modify... */
859 1.1 msaitoh mrqc &= ~IXGBE_MRQC_MRQE_MASK;
860 1.1 msaitoh mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */
861 1.1 msaitoh gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
862 1.1 msaitoh gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
863 1.1 msaitoh gpie &= ~IXGBE_GPIE_VTMODE_MASK;
864 1.1 msaitoh switch (adapter->iov_mode) {
865 1.1 msaitoh case IXGBE_64_VM:
866 1.1 msaitoh mrqc |= IXGBE_MRQC_VMDQRSS64EN;
867 1.1 msaitoh mtqc |= IXGBE_MTQC_64VF;
868 1.1 msaitoh gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
869 1.1 msaitoh gpie |= IXGBE_GPIE_VTMODE_64;
870 1.1 msaitoh break;
871 1.1 msaitoh case IXGBE_32_VM:
872 1.1 msaitoh mrqc |= IXGBE_MRQC_VMDQRSS32EN;
873 1.1 msaitoh mtqc |= IXGBE_MTQC_32VF;
874 1.1 msaitoh gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
875 1.1 msaitoh gpie |= IXGBE_GPIE_VTMODE_32;
876 1.1 msaitoh break;
877 1.1 msaitoh default:
878 1.1 msaitoh panic("Unexpected SR-IOV mode %d", adapter->iov_mode);
879 1.1 msaitoh }
880 1.1 msaitoh /* Write... */
881 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
882 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
883 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
884 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
885 1.1 msaitoh
886 1.1 msaitoh /* Enable rx/tx for the PF. */
887 1.1 msaitoh vf_reg = IXGBE_VF_INDEX(adapter->pool);
888 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool));
889 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool));
890 1.1 msaitoh
891 1.1 msaitoh /* Allow VM-to-VM communication. */
892 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
893 1.1 msaitoh
894 1.1 msaitoh vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
895 1.1 msaitoh vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
896 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
897 1.1 msaitoh
898 1.1 msaitoh for (i = 0; i < adapter->num_vfs; i++)
899 1.1 msaitoh ixgbe_init_vf(adapter, &adapter->vfs[i]);
900 1.1 msaitoh } /* ixgbe_initialize_iov */
901 1.1 msaitoh
902 1.1 msaitoh
903 1.1 msaitoh /* Check the max frame setting of all active VF's */
904 1.1 msaitoh void
905 1.1 msaitoh ixgbe_recalculate_max_frame(struct adapter *adapter)
906 1.1 msaitoh {
907 1.1 msaitoh struct ixgbe_vf *vf;
908 1.1 msaitoh
909 1.1 msaitoh IXGBE_CORE_LOCK_ASSERT(adapter);
910 1.1 msaitoh
911 1.1 msaitoh for (int i = 0; i < adapter->num_vfs; i++) {
912 1.1 msaitoh vf = &adapter->vfs[i];
913 1.1 msaitoh if (vf->flags & IXGBE_VF_ACTIVE)
914 1.1 msaitoh ixgbe_update_max_frame(adapter, vf->max_frame_size);
915 1.1 msaitoh }
916 1.1 msaitoh } /* ixgbe_recalculate_max_frame */
917 1.1 msaitoh
918 1.1 msaitoh int
919 1.1 msaitoh ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
920 1.1 msaitoh {
921 1.1 msaitoh struct adapter *adapter;
922 1.1 msaitoh struct ixgbe_vf *vf;
923 1.1 msaitoh const void *mac;
924 1.1 msaitoh
925 1.1 msaitoh adapter = device_get_softc(dev);
926 1.1 msaitoh
927 1.1 msaitoh KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
928 1.1 msaitoh vfnum, adapter->num_vfs));
929 1.1 msaitoh
930 1.1 msaitoh IXGBE_CORE_LOCK(adapter);
931 1.1 msaitoh vf = &adapter->vfs[vfnum];
932 1.1 msaitoh vf->pool= vfnum;
933 1.1 msaitoh
934 1.1 msaitoh /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
935 1.1 msaitoh vf->rar_index = vfnum + 1;
936 1.1 msaitoh vf->default_vlan = 0;
937 1.1 msaitoh vf->max_frame_size = ETHER_MAX_LEN;
938 1.1 msaitoh ixgbe_update_max_frame(adapter, vf->max_frame_size);
939 1.1 msaitoh
940 1.1 msaitoh if (nvlist_exists_binary(config, "mac-addr")) {
941 1.1 msaitoh mac = nvlist_get_binary(config, "mac-addr", NULL);
942 1.1 msaitoh bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
943 1.1 msaitoh if (nvlist_get_bool(config, "allow-set-mac"))
944 1.1 msaitoh vf->flags |= IXGBE_VF_CAP_MAC;
945 1.1 msaitoh } else
946 1.1 msaitoh /*
947 1.1 msaitoh * If the administrator has not specified a MAC address then
948 1.1 msaitoh * we must allow the VF to choose one.
949 1.1 msaitoh */
950 1.1 msaitoh vf->flags |= IXGBE_VF_CAP_MAC;
951 1.1 msaitoh
952 1.1 msaitoh vf->flags |= IXGBE_VF_ACTIVE;
953 1.1 msaitoh
954 1.1 msaitoh ixgbe_init_vf(adapter, vf);
955 1.1 msaitoh IXGBE_CORE_UNLOCK(adapter);
956 1.1 msaitoh
957 1.1 msaitoh return (0);
958 1.1 msaitoh } /* ixgbe_add_vf */
959 1.1 msaitoh
960 1.1 msaitoh #else
961 1.1 msaitoh
962 1.1 msaitoh void
963 1.1 msaitoh ixgbe_handle_mbx(void *context, int pending)
964 1.1 msaitoh {
965 1.1 msaitoh UNREFERENCED_2PARAMETER(context, pending);
966 1.1 msaitoh } /* ixgbe_handle_mbx */
967 1.1 msaitoh
968 1.1 msaitoh inline int
969 1.1 msaitoh ixgbe_vf_que_index(int mode, int vfnum, int num)
970 1.1 msaitoh {
971 1.1 msaitoh UNREFERENCED_2PARAMETER(mode, vfnum);
972 1.1 msaitoh
973 1.1 msaitoh return num;
974 1.1 msaitoh } /* ixgbe_vf_que_index */
975 1.1 msaitoh
976 1.1 msaitoh #endif
977