if_sriov.c revision 1.1.4.5 1 1.1.4.2 snj /******************************************************************************
2 1.1.4.2 snj
3 1.1.4.2 snj Copyright (c) 2001-2017, Intel Corporation
4 1.1.4.2 snj All rights reserved.
5 1.1.4.2 snj
6 1.1.4.2 snj Redistribution and use in source and binary forms, with or without
7 1.1.4.2 snj modification, are permitted provided that the following conditions are met:
8 1.1.4.2 snj
9 1.1.4.2 snj 1. Redistributions of source code must retain the above copyright notice,
10 1.1.4.2 snj this list of conditions and the following disclaimer.
11 1.1.4.2 snj
12 1.1.4.2 snj 2. Redistributions in binary form must reproduce the above copyright
13 1.1.4.2 snj notice, this list of conditions and the following disclaimer in the
14 1.1.4.2 snj documentation and/or other materials provided with the distribution.
15 1.1.4.2 snj
16 1.1.4.2 snj 3. Neither the name of the Intel Corporation nor the names of its
17 1.1.4.2 snj contributors may be used to endorse or promote products derived from
18 1.1.4.2 snj this software without specific prior written permission.
19 1.1.4.2 snj
20 1.1.4.2 snj THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 1.1.4.2 snj AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 1.1.4.2 snj IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 1.1.4.2 snj ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 1.1.4.2 snj LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1.4.2 snj CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1.4.2 snj SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1.4.2 snj INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1.4.2 snj CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1.4.2 snj ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1.4.2 snj POSSIBILITY OF SUCH DAMAGE.
31 1.1.4.2 snj
32 1.1.4.2 snj ******************************************************************************/
33 1.1.4.4 martin /*$FreeBSD: head/sys/dev/ixgbe/if_sriov.c 327031 2017-12-20 18:15:06Z erj $*/
34 1.1.4.2 snj
35 1.1.4.2 snj #include "ixgbe.h"
36 1.1.4.3 martin #include "ixgbe_sriov.h"
37 1.1.4.2 snj
38 1.1.4.2 snj #ifdef PCI_IOV
39 1.1.4.2 snj
40 1.1.4.2 snj MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
41 1.1.4.2 snj
42 1.1.4.2 snj /************************************************************************
43 1.1.4.2 snj * ixgbe_pci_iov_detach
44 1.1.4.2 snj ************************************************************************/
45 1.1.4.2 snj int
46 1.1.4.2 snj ixgbe_pci_iov_detach(device_t dev)
47 1.1.4.2 snj {
48 1.1.4.2 snj return pci_iov_detach(dev);
49 1.1.4.2 snj }
50 1.1.4.2 snj
51 1.1.4.2 snj /************************************************************************
52 1.1.4.2 snj * ixgbe_define_iov_schemas
53 1.1.4.2 snj ************************************************************************/
54 1.1.4.2 snj void
55 1.1.4.2 snj ixgbe_define_iov_schemas(device_t dev, int *error)
56 1.1.4.2 snj {
57 1.1.4.2 snj nvlist_t *pf_schema, *vf_schema;
58 1.1.4.2 snj
59 1.1.4.2 snj pf_schema = pci_iov_schema_alloc_node();
60 1.1.4.2 snj vf_schema = pci_iov_schema_alloc_node();
61 1.1.4.2 snj pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
62 1.1.4.2 snj pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
63 1.1.4.2 snj IOV_SCHEMA_HASDEFAULT, TRUE);
64 1.1.4.2 snj pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
65 1.1.4.2 snj IOV_SCHEMA_HASDEFAULT, FALSE);
66 1.1.4.2 snj pci_iov_schema_add_bool(vf_schema, "allow-promisc",
67 1.1.4.2 snj IOV_SCHEMA_HASDEFAULT, FALSE);
68 1.1.4.2 snj *error = pci_iov_attach(dev, pf_schema, vf_schema);
69 1.1.4.2 snj if (*error != 0) {
70 1.1.4.2 snj device_printf(dev,
71 1.1.4.2 snj "Error %d setting up SR-IOV\n", *error);
72 1.1.4.2 snj }
73 1.1.4.2 snj } /* ixgbe_define_iov_schemas */
74 1.1.4.2 snj
75 1.1.4.2 snj /************************************************************************
76 1.1.4.2 snj * ixgbe_align_all_queue_indices
77 1.1.4.2 snj ************************************************************************/
78 1.1.4.2 snj inline void
79 1.1.4.2 snj ixgbe_align_all_queue_indices(struct adapter *adapter)
80 1.1.4.2 snj {
81 1.1.4.2 snj int i;
82 1.1.4.2 snj int index;
83 1.1.4.2 snj
84 1.1.4.2 snj for (i = 0; i < adapter->num_queues; i++) {
85 1.1.4.2 snj index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
86 1.1.4.2 snj adapter->rx_rings[i].me = index;
87 1.1.4.2 snj adapter->tx_rings[i].me = index;
88 1.1.4.2 snj }
89 1.1.4.2 snj }
90 1.1.4.2 snj
91 1.1.4.2 snj /* Support functions for SR-IOV/VF management */
92 1.1.4.2 snj static inline void
93 1.1.4.4 martin ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
94 1.1.4.2 snj {
95 1.1.4.2 snj if (vf->flags & IXGBE_VF_CTS)
96 1.1.4.2 snj msg |= IXGBE_VT_MSGTYPE_CTS;
97 1.1.4.2 snj
98 1.1.4.4 martin adapter->hw.mbx.ops.write(&adapter->hw, &msg, 1, vf->pool);
99 1.1.4.2 snj }
100 1.1.4.2 snj
101 1.1.4.2 snj static inline void
102 1.1.4.2 snj ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
103 1.1.4.2 snj {
104 1.1.4.2 snj msg &= IXGBE_VT_MSG_MASK;
105 1.1.4.4 martin ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
106 1.1.4.2 snj }
107 1.1.4.2 snj
108 1.1.4.2 snj static inline void
109 1.1.4.2 snj ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
110 1.1.4.2 snj {
111 1.1.4.2 snj msg &= IXGBE_VT_MSG_MASK;
112 1.1.4.4 martin ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
113 1.1.4.2 snj }
114 1.1.4.2 snj
115 1.1.4.2 snj static inline void
116 1.1.4.2 snj ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
117 1.1.4.2 snj {
118 1.1.4.2 snj if (!(vf->flags & IXGBE_VF_CTS))
119 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, 0);
120 1.1.4.2 snj }
121 1.1.4.2 snj
122 1.1.4.2 snj static inline boolean_t
123 1.1.4.2 snj ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
124 1.1.4.2 snj {
125 1.1.4.2 snj return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
126 1.1.4.2 snj }
127 1.1.4.2 snj
128 1.1.4.2 snj static inline int
129 1.1.4.2 snj ixgbe_vf_queues(int mode)
130 1.1.4.2 snj {
131 1.1.4.2 snj switch (mode) {
132 1.1.4.2 snj case IXGBE_64_VM:
133 1.1.4.2 snj return (2);
134 1.1.4.2 snj case IXGBE_32_VM:
135 1.1.4.2 snj return (4);
136 1.1.4.2 snj case IXGBE_NO_VM:
137 1.1.4.2 snj default:
138 1.1.4.2 snj return (0);
139 1.1.4.2 snj }
140 1.1.4.2 snj }
141 1.1.4.2 snj
142 1.1.4.2 snj inline int
143 1.1.4.2 snj ixgbe_vf_que_index(int mode, int vfnum, int num)
144 1.1.4.2 snj {
145 1.1.4.2 snj return ((vfnum * ixgbe_vf_queues(mode)) + num);
146 1.1.4.2 snj }
147 1.1.4.2 snj
148 1.1.4.2 snj static inline void
149 1.1.4.2 snj ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
150 1.1.4.2 snj {
151 1.1.4.2 snj if (adapter->max_frame_size < max_frame)
152 1.1.4.2 snj adapter->max_frame_size = max_frame;
153 1.1.4.2 snj }
154 1.1.4.2 snj
155 1.1.4.2 snj inline u32
156 1.1.4.2 snj ixgbe_get_mrqc(int iov_mode)
157 1.1.4.2 snj {
158 1.1.4.2 snj u32 mrqc;
159 1.1.4.2 snj
160 1.1.4.2 snj switch (iov_mode) {
161 1.1.4.2 snj case IXGBE_64_VM:
162 1.1.4.2 snj mrqc = IXGBE_MRQC_VMDQRSS64EN;
163 1.1.4.2 snj break;
164 1.1.4.2 snj case IXGBE_32_VM:
165 1.1.4.2 snj mrqc = IXGBE_MRQC_VMDQRSS32EN;
166 1.1.4.2 snj break;
167 1.1.4.2 snj case IXGBE_NO_VM:
168 1.1.4.2 snj mrqc = 0;
169 1.1.4.2 snj break;
170 1.1.4.2 snj default:
171 1.1.4.2 snj panic("Unexpected SR-IOV mode %d", iov_mode);
172 1.1.4.2 snj }
173 1.1.4.2 snj
174 1.1.4.2 snj return mrqc;
175 1.1.4.2 snj }
176 1.1.4.2 snj
177 1.1.4.2 snj
178 1.1.4.2 snj inline u32
179 1.1.4.2 snj ixgbe_get_mtqc(int iov_mode)
180 1.1.4.2 snj {
181 1.1.4.2 snj uint32_t mtqc;
182 1.1.4.2 snj
183 1.1.4.2 snj switch (iov_mode) {
184 1.1.4.2 snj case IXGBE_64_VM:
185 1.1.4.2 snj mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
186 1.1.4.2 snj break;
187 1.1.4.2 snj case IXGBE_32_VM:
188 1.1.4.2 snj mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
189 1.1.4.2 snj break;
190 1.1.4.2 snj case IXGBE_NO_VM:
191 1.1.4.2 snj mtqc = IXGBE_MTQC_64Q_1PB;
192 1.1.4.2 snj break;
193 1.1.4.2 snj default:
194 1.1.4.2 snj panic("Unexpected SR-IOV mode %d", iov_mode);
195 1.1.4.2 snj }
196 1.1.4.2 snj
197 1.1.4.2 snj return mtqc;
198 1.1.4.2 snj }
199 1.1.4.2 snj
200 1.1.4.2 snj void
201 1.1.4.2 snj ixgbe_ping_all_vfs(struct adapter *adapter)
202 1.1.4.2 snj {
203 1.1.4.2 snj struct ixgbe_vf *vf;
204 1.1.4.2 snj
205 1.1.4.2 snj for (int i = 0; i < adapter->num_vfs; i++) {
206 1.1.4.2 snj vf = &adapter->vfs[i];
207 1.1.4.2 snj if (vf->flags & IXGBE_VF_ACTIVE)
208 1.1.4.4 martin ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
209 1.1.4.2 snj }
210 1.1.4.2 snj } /* ixgbe_ping_all_vfs */
211 1.1.4.2 snj
212 1.1.4.2 snj
213 1.1.4.2 snj static void
214 1.1.4.2 snj ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
215 1.1.4.2 snj uint16_t tag)
216 1.1.4.2 snj {
217 1.1.4.2 snj struct ixgbe_hw *hw;
218 1.1.4.2 snj uint32_t vmolr, vmvir;
219 1.1.4.2 snj
220 1.1.4.2 snj hw = &adapter->hw;
221 1.1.4.2 snj
222 1.1.4.2 snj vf->vlan_tag = tag;
223 1.1.4.2 snj
224 1.1.4.2 snj vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
225 1.1.4.2 snj
226 1.1.4.2 snj /* Do not receive packets that pass inexact filters. */
227 1.1.4.2 snj vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
228 1.1.4.2 snj
229 1.1.4.2 snj /* Disable Multicast Promicuous Mode. */
230 1.1.4.2 snj vmolr &= ~IXGBE_VMOLR_MPE;
231 1.1.4.2 snj
232 1.1.4.2 snj /* Accept broadcasts. */
233 1.1.4.2 snj vmolr |= IXGBE_VMOLR_BAM;
234 1.1.4.2 snj
235 1.1.4.2 snj if (tag == 0) {
236 1.1.4.2 snj /* Accept non-vlan tagged traffic. */
237 1.1.4.3 martin vmolr |= IXGBE_VMOLR_AUPE;
238 1.1.4.2 snj
239 1.1.4.2 snj /* Allow VM to tag outgoing traffic; no default tag. */
240 1.1.4.2 snj vmvir = 0;
241 1.1.4.2 snj } else {
242 1.1.4.2 snj /* Require vlan-tagged traffic. */
243 1.1.4.2 snj vmolr &= ~IXGBE_VMOLR_AUPE;
244 1.1.4.2 snj
245 1.1.4.2 snj /* Tag all traffic with provided vlan tag. */
246 1.1.4.2 snj vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
247 1.1.4.2 snj }
248 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
249 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
250 1.1.4.2 snj } /* ixgbe_vf_set_default_vlan */
251 1.1.4.2 snj
252 1.1.4.2 snj
253 1.1.4.5 martin static void
254 1.1.4.5 martin ixgbe_clear_vfmbmem(struct ixgbe_hw *hw, struct ixgbe_vf *vf)
255 1.1.4.5 martin {
256 1.1.4.5 martin uint32_t vf_index = IXGBE_VF_INDEX(vf->pool);
257 1.1.4.5 martin uint16_t mbx_size = hw->mbx.size;
258 1.1.4.5 martin uint16_t i;
259 1.1.4.5 martin
260 1.1.4.5 martin IXGBE_CORE_LOCK_ASSERT(adapter);
261 1.1.4.5 martin
262 1.1.4.5 martin for (i = 0; i < mbx_size; ++i)
263 1.1.4.5 martin IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_index), i, 0x0);
264 1.1.4.5 martin } /* ixgbe_clear_vfmbmem */
265 1.1.4.5 martin
266 1.1.4.5 martin
267 1.1.4.2 snj static boolean_t
268 1.1.4.2 snj ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
269 1.1.4.2 snj {
270 1.1.4.2 snj
271 1.1.4.2 snj /*
272 1.1.4.2 snj * Frame size compatibility between PF and VF is only a problem on
273 1.1.4.2 snj * 82599-based cards. X540 and later support any combination of jumbo
274 1.1.4.2 snj * frames on PFs and VFs.
275 1.1.4.2 snj */
276 1.1.4.2 snj if (adapter->hw.mac.type != ixgbe_mac_82599EB)
277 1.1.4.2 snj return (TRUE);
278 1.1.4.2 snj
279 1.1.4.2 snj switch (vf->api_ver) {
280 1.1.4.2 snj case IXGBE_API_VER_1_0:
281 1.1.4.2 snj case IXGBE_API_VER_UNKNOWN:
282 1.1.4.2 snj /*
283 1.1.4.2 snj * On legacy (1.0 and older) VF versions, we don't support jumbo
284 1.1.4.2 snj * frames on either the PF or the VF.
285 1.1.4.2 snj */
286 1.1.4.2 snj if (adapter->max_frame_size > ETHER_MAX_LEN ||
287 1.1.4.2 snj vf->max_frame_size > ETHER_MAX_LEN)
288 1.1.4.2 snj return (FALSE);
289 1.1.4.2 snj
290 1.1.4.2 snj return (TRUE);
291 1.1.4.2 snj
292 1.1.4.2 snj break;
293 1.1.4.2 snj case IXGBE_API_VER_1_1:
294 1.1.4.2 snj default:
295 1.1.4.2 snj /*
296 1.1.4.2 snj * 1.1 or later VF versions always work if they aren't using
297 1.1.4.2 snj * jumbo frames.
298 1.1.4.2 snj */
299 1.1.4.2 snj if (vf->max_frame_size <= ETHER_MAX_LEN)
300 1.1.4.2 snj return (TRUE);
301 1.1.4.2 snj
302 1.1.4.2 snj /*
303 1.1.4.2 snj * Jumbo frames only work with VFs if the PF is also using jumbo
304 1.1.4.2 snj * frames.
305 1.1.4.2 snj */
306 1.1.4.2 snj if (adapter->max_frame_size <= ETHER_MAX_LEN)
307 1.1.4.2 snj return (TRUE);
308 1.1.4.2 snj
309 1.1.4.2 snj return (FALSE);
310 1.1.4.2 snj }
311 1.1.4.2 snj } /* ixgbe_vf_frame_size_compatible */
312 1.1.4.2 snj
313 1.1.4.2 snj
314 1.1.4.2 snj static void
315 1.1.4.2 snj ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
316 1.1.4.2 snj {
317 1.1.4.2 snj ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
318 1.1.4.2 snj
319 1.1.4.2 snj // XXX clear multicast addresses
320 1.1.4.2 snj
321 1.1.4.2 snj ixgbe_clear_rar(&adapter->hw, vf->rar_index);
322 1.1.4.5 martin ixgbe_clear_vfmbmem(&adapter->hw, vf);
323 1.1.4.5 martin ixgbe_toggle_txdctl(&adapter->hw, IXGBE_VF_INDEX(vf->pool));
324 1.1.4.2 snj
325 1.1.4.2 snj vf->api_ver = IXGBE_API_VER_UNKNOWN;
326 1.1.4.2 snj } /* ixgbe_process_vf_reset */
327 1.1.4.2 snj
328 1.1.4.2 snj
329 1.1.4.2 snj static void
330 1.1.4.2 snj ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
331 1.1.4.2 snj {
332 1.1.4.2 snj struct ixgbe_hw *hw;
333 1.1.4.2 snj uint32_t vf_index, vfte;
334 1.1.4.2 snj
335 1.1.4.2 snj hw = &adapter->hw;
336 1.1.4.2 snj
337 1.1.4.2 snj vf_index = IXGBE_VF_INDEX(vf->pool);
338 1.1.4.2 snj vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
339 1.1.4.2 snj vfte |= IXGBE_VF_BIT(vf->pool);
340 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
341 1.1.4.2 snj } /* ixgbe_vf_enable_transmit */
342 1.1.4.2 snj
343 1.1.4.2 snj
344 1.1.4.2 snj static void
345 1.1.4.2 snj ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
346 1.1.4.2 snj {
347 1.1.4.2 snj struct ixgbe_hw *hw;
348 1.1.4.2 snj uint32_t vf_index, vfre;
349 1.1.4.2 snj
350 1.1.4.2 snj hw = &adapter->hw;
351 1.1.4.2 snj
352 1.1.4.2 snj vf_index = IXGBE_VF_INDEX(vf->pool);
353 1.1.4.2 snj vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
354 1.1.4.2 snj if (ixgbe_vf_frame_size_compatible(adapter, vf))
355 1.1.4.2 snj vfre |= IXGBE_VF_BIT(vf->pool);
356 1.1.4.2 snj else
357 1.1.4.2 snj vfre &= ~IXGBE_VF_BIT(vf->pool);
358 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
359 1.1.4.2 snj } /* ixgbe_vf_enable_receive */
360 1.1.4.2 snj
361 1.1.4.2 snj
362 1.1.4.2 snj static void
363 1.1.4.2 snj ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
364 1.1.4.2 snj {
365 1.1.4.2 snj struct ixgbe_hw *hw;
366 1.1.4.2 snj uint32_t ack;
367 1.1.4.2 snj uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
368 1.1.4.2 snj
369 1.1.4.2 snj hw = &adapter->hw;
370 1.1.4.2 snj
371 1.1.4.2 snj ixgbe_process_vf_reset(adapter, vf);
372 1.1.4.2 snj
373 1.1.4.2 snj if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
374 1.1.4.2 snj ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
375 1.1.4.2 snj vf->pool, TRUE);
376 1.1.4.2 snj ack = IXGBE_VT_MSGTYPE_ACK;
377 1.1.4.2 snj } else
378 1.1.4.2 snj ack = IXGBE_VT_MSGTYPE_NACK;
379 1.1.4.2 snj
380 1.1.4.2 snj ixgbe_vf_enable_transmit(adapter, vf);
381 1.1.4.2 snj ixgbe_vf_enable_receive(adapter, vf);
382 1.1.4.2 snj
383 1.1.4.2 snj vf->flags |= IXGBE_VF_CTS;
384 1.1.4.2 snj
385 1.1.4.3 martin resp[0] = IXGBE_VF_RESET | ack;
386 1.1.4.2 snj bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
387 1.1.4.2 snj resp[3] = hw->mac.mc_filter_type;
388 1.1.4.2 snj hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
389 1.1.4.2 snj } /* ixgbe_vf_reset_msg */
390 1.1.4.2 snj
391 1.1.4.2 snj
392 1.1.4.2 snj static void
393 1.1.4.2 snj ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
394 1.1.4.2 snj {
395 1.1.4.2 snj uint8_t *mac;
396 1.1.4.2 snj
397 1.1.4.2 snj mac = (uint8_t*)&msg[1];
398 1.1.4.2 snj
399 1.1.4.2 snj /* Check that the VF has permission to change the MAC address. */
400 1.1.4.2 snj if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
401 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
402 1.1.4.2 snj return;
403 1.1.4.2 snj }
404 1.1.4.2 snj
405 1.1.4.2 snj if (ixgbe_validate_mac_addr(mac) != 0) {
406 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
407 1.1.4.2 snj return;
408 1.1.4.2 snj }
409 1.1.4.2 snj
410 1.1.4.2 snj bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
411 1.1.4.2 snj
412 1.1.4.2 snj ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool,
413 1.1.4.2 snj TRUE);
414 1.1.4.2 snj
415 1.1.4.2 snj ixgbe_send_vf_ack(adapter, vf, msg[0]);
416 1.1.4.2 snj } /* ixgbe_vf_set_mac */
417 1.1.4.2 snj
418 1.1.4.2 snj
419 1.1.4.2 snj /*
420 1.1.4.2 snj * VF multicast addresses are set by using the appropriate bit in
421 1.1.4.2 snj * 1 of 128 32 bit addresses (4096 possible).
422 1.1.4.2 snj */
423 1.1.4.2 snj static void
424 1.1.4.2 snj ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
425 1.1.4.2 snj {
426 1.1.4.2 snj u16 *list = (u16*)&msg[1];
427 1.1.4.2 snj int entries;
428 1.1.4.2 snj u32 vmolr, vec_bit, vec_reg, mta_reg;
429 1.1.4.2 snj
430 1.1.4.2 snj entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
431 1.1.4.2 snj entries = min(entries, IXGBE_MAX_VF_MC);
432 1.1.4.2 snj
433 1.1.4.2 snj vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
434 1.1.4.2 snj
435 1.1.4.2 snj vf->num_mc_hashes = entries;
436 1.1.4.2 snj
437 1.1.4.2 snj /* Set the appropriate MTA bit */
438 1.1.4.2 snj for (int i = 0; i < entries; i++) {
439 1.1.4.2 snj vf->mc_hash[i] = list[i];
440 1.1.4.2 snj vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
441 1.1.4.2 snj vec_bit = vf->mc_hash[i] & 0x1F;
442 1.1.4.2 snj mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
443 1.1.4.2 snj mta_reg |= (1 << vec_bit);
444 1.1.4.2 snj IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
445 1.1.4.2 snj }
446 1.1.4.2 snj
447 1.1.4.2 snj vmolr |= IXGBE_VMOLR_ROMPE;
448 1.1.4.2 snj IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
449 1.1.4.2 snj ixgbe_send_vf_ack(adapter, vf, msg[0]);
450 1.1.4.2 snj } /* ixgbe_vf_set_mc_addr */
451 1.1.4.2 snj
452 1.1.4.2 snj
453 1.1.4.2 snj static void
454 1.1.4.2 snj ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
455 1.1.4.2 snj {
456 1.1.4.2 snj struct ixgbe_hw *hw;
457 1.1.4.2 snj int enable;
458 1.1.4.2 snj uint16_t tag;
459 1.1.4.2 snj
460 1.1.4.2 snj hw = &adapter->hw;
461 1.1.4.2 snj enable = IXGBE_VT_MSGINFO(msg[0]);
462 1.1.4.2 snj tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
463 1.1.4.2 snj
464 1.1.4.2 snj if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
465 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
466 1.1.4.2 snj return;
467 1.1.4.2 snj }
468 1.1.4.2 snj
469 1.1.4.2 snj /* It is illegal to enable vlan tag 0. */
470 1.1.4.3 martin if (tag == 0 && enable != 0) {
471 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
472 1.1.4.2 snj return;
473 1.1.4.2 snj }
474 1.1.4.2 snj
475 1.1.4.2 snj ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
476 1.1.4.2 snj ixgbe_send_vf_ack(adapter, vf, msg[0]);
477 1.1.4.2 snj } /* ixgbe_vf_set_vlan */
478 1.1.4.2 snj
479 1.1.4.2 snj
480 1.1.4.2 snj static void
481 1.1.4.2 snj ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
482 1.1.4.2 snj {
483 1.1.4.2 snj struct ixgbe_hw *hw;
484 1.1.4.2 snj uint32_t vf_max_size, pf_max_size, mhadd;
485 1.1.4.2 snj
486 1.1.4.2 snj hw = &adapter->hw;
487 1.1.4.2 snj vf_max_size = msg[1];
488 1.1.4.2 snj
489 1.1.4.2 snj if (vf_max_size < ETHER_CRC_LEN) {
490 1.1.4.2 snj /* We intentionally ACK invalid LPE requests. */
491 1.1.4.2 snj ixgbe_send_vf_ack(adapter, vf, msg[0]);
492 1.1.4.2 snj return;
493 1.1.4.2 snj }
494 1.1.4.2 snj
495 1.1.4.2 snj vf_max_size -= ETHER_CRC_LEN;
496 1.1.4.2 snj
497 1.1.4.2 snj if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
498 1.1.4.2 snj /* We intentionally ACK invalid LPE requests. */
499 1.1.4.2 snj ixgbe_send_vf_ack(adapter, vf, msg[0]);
500 1.1.4.2 snj return;
501 1.1.4.2 snj }
502 1.1.4.2 snj
503 1.1.4.2 snj vf->max_frame_size = vf_max_size;
504 1.1.4.2 snj ixgbe_update_max_frame(adapter, vf->max_frame_size);
505 1.1.4.2 snj
506 1.1.4.2 snj /*
507 1.1.4.2 snj * We might have to disable reception to this VF if the frame size is
508 1.1.4.2 snj * not compatible with the config on the PF.
509 1.1.4.2 snj */
510 1.1.4.2 snj ixgbe_vf_enable_receive(adapter, vf);
511 1.1.4.2 snj
512 1.1.4.2 snj mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
513 1.1.4.2 snj pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
514 1.1.4.2 snj
515 1.1.4.2 snj if (pf_max_size < adapter->max_frame_size) {
516 1.1.4.2 snj mhadd &= ~IXGBE_MHADD_MFS_MASK;
517 1.1.4.2 snj mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
518 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
519 1.1.4.2 snj }
520 1.1.4.2 snj
521 1.1.4.2 snj ixgbe_send_vf_ack(adapter, vf, msg[0]);
522 1.1.4.2 snj } /* ixgbe_vf_set_lpe */
523 1.1.4.2 snj
524 1.1.4.2 snj
525 1.1.4.2 snj static void
526 1.1.4.2 snj ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
527 1.1.4.2 snj uint32_t *msg)
528 1.1.4.2 snj {
529 1.1.4.2 snj //XXX implement this
530 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
531 1.1.4.2 snj } /* ixgbe_vf_set_macvlan */
532 1.1.4.2 snj
533 1.1.4.2 snj
534 1.1.4.2 snj static void
535 1.1.4.2 snj ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
536 1.1.4.2 snj uint32_t *msg)
537 1.1.4.2 snj {
538 1.1.4.2 snj
539 1.1.4.2 snj switch (msg[1]) {
540 1.1.4.2 snj case IXGBE_API_VER_1_0:
541 1.1.4.2 snj case IXGBE_API_VER_1_1:
542 1.1.4.2 snj vf->api_ver = msg[1];
543 1.1.4.2 snj ixgbe_send_vf_ack(adapter, vf, msg[0]);
544 1.1.4.2 snj break;
545 1.1.4.2 snj default:
546 1.1.4.2 snj vf->api_ver = IXGBE_API_VER_UNKNOWN;
547 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
548 1.1.4.2 snj break;
549 1.1.4.2 snj }
550 1.1.4.2 snj } /* ixgbe_vf_api_negotiate */
551 1.1.4.2 snj
552 1.1.4.2 snj
553 1.1.4.2 snj static void
554 1.1.4.2 snj ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
555 1.1.4.2 snj {
556 1.1.4.2 snj struct ixgbe_hw *hw;
557 1.1.4.2 snj uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
558 1.1.4.2 snj int num_queues;
559 1.1.4.2 snj
560 1.1.4.2 snj hw = &adapter->hw;
561 1.1.4.2 snj
562 1.1.4.2 snj /* GET_QUEUES is not supported on pre-1.1 APIs. */
563 1.1.4.2 snj switch (msg[0]) {
564 1.1.4.2 snj case IXGBE_API_VER_1_0:
565 1.1.4.2 snj case IXGBE_API_VER_UNKNOWN:
566 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
567 1.1.4.2 snj return;
568 1.1.4.2 snj }
569 1.1.4.2 snj
570 1.1.4.2 snj resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
571 1.1.4.2 snj IXGBE_VT_MSGTYPE_CTS;
572 1.1.4.2 snj
573 1.1.4.2 snj num_queues = ixgbe_vf_queues(adapter->iov_mode);
574 1.1.4.2 snj resp[IXGBE_VF_TX_QUEUES] = num_queues;
575 1.1.4.2 snj resp[IXGBE_VF_RX_QUEUES] = num_queues;
576 1.1.4.2 snj resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
577 1.1.4.2 snj resp[IXGBE_VF_DEF_QUEUE] = 0;
578 1.1.4.2 snj
579 1.1.4.2 snj hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
580 1.1.4.2 snj } /* ixgbe_vf_get_queues */
581 1.1.4.2 snj
582 1.1.4.2 snj
583 1.1.4.2 snj static void
584 1.1.4.2 snj ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
585 1.1.4.2 snj {
586 1.1.4.2 snj struct ixgbe_hw *hw;
587 1.1.4.2 snj uint32_t msg[IXGBE_VFMAILBOX_SIZE];
588 1.1.4.2 snj int error;
589 1.1.4.2 snj
590 1.1.4.2 snj hw = &adapter->hw;
591 1.1.4.2 snj
592 1.1.4.2 snj error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
593 1.1.4.2 snj
594 1.1.4.2 snj if (error != 0)
595 1.1.4.2 snj return;
596 1.1.4.2 snj
597 1.1.4.3 martin CTR3(KTR_MALLOC, "%s: received msg %x from %d", adapter->ifp->if_xname,
598 1.1.4.3 martin msg[0], vf->pool);
599 1.1.4.2 snj if (msg[0] == IXGBE_VF_RESET) {
600 1.1.4.2 snj ixgbe_vf_reset_msg(adapter, vf, msg);
601 1.1.4.2 snj return;
602 1.1.4.2 snj }
603 1.1.4.2 snj
604 1.1.4.2 snj if (!(vf->flags & IXGBE_VF_CTS)) {
605 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
606 1.1.4.2 snj return;
607 1.1.4.2 snj }
608 1.1.4.2 snj
609 1.1.4.2 snj switch (msg[0] & IXGBE_VT_MSG_MASK) {
610 1.1.4.2 snj case IXGBE_VF_SET_MAC_ADDR:
611 1.1.4.2 snj ixgbe_vf_set_mac(adapter, vf, msg);
612 1.1.4.2 snj break;
613 1.1.4.2 snj case IXGBE_VF_SET_MULTICAST:
614 1.1.4.2 snj ixgbe_vf_set_mc_addr(adapter, vf, msg);
615 1.1.4.2 snj break;
616 1.1.4.2 snj case IXGBE_VF_SET_VLAN:
617 1.1.4.2 snj ixgbe_vf_set_vlan(adapter, vf, msg);
618 1.1.4.2 snj break;
619 1.1.4.2 snj case IXGBE_VF_SET_LPE:
620 1.1.4.2 snj ixgbe_vf_set_lpe(adapter, vf, msg);
621 1.1.4.2 snj break;
622 1.1.4.2 snj case IXGBE_VF_SET_MACVLAN:
623 1.1.4.2 snj ixgbe_vf_set_macvlan(adapter, vf, msg);
624 1.1.4.2 snj break;
625 1.1.4.2 snj case IXGBE_VF_API_NEGOTIATE:
626 1.1.4.2 snj ixgbe_vf_api_negotiate(adapter, vf, msg);
627 1.1.4.2 snj break;
628 1.1.4.2 snj case IXGBE_VF_GET_QUEUES:
629 1.1.4.2 snj ixgbe_vf_get_queues(adapter, vf, msg);
630 1.1.4.2 snj break;
631 1.1.4.2 snj default:
632 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
633 1.1.4.2 snj }
634 1.1.4.2 snj } /* ixgbe_process_vf_msg */
635 1.1.4.2 snj
636 1.1.4.2 snj
637 1.1.4.2 snj /* Tasklet for handling VF -> PF mailbox messages */
638 1.1.4.2 snj void
639 1.1.4.2 snj ixgbe_handle_mbx(void *context, int pending)
640 1.1.4.2 snj {
641 1.1.4.3 martin struct adapter *adapter = context;
642 1.1.4.2 snj struct ixgbe_hw *hw;
643 1.1.4.2 snj struct ixgbe_vf *vf;
644 1.1.4.2 snj int i;
645 1.1.4.2 snj
646 1.1.4.2 snj hw = &adapter->hw;
647 1.1.4.2 snj
648 1.1.4.2 snj IXGBE_CORE_LOCK(adapter);
649 1.1.4.2 snj for (i = 0; i < adapter->num_vfs; i++) {
650 1.1.4.2 snj vf = &adapter->vfs[i];
651 1.1.4.2 snj
652 1.1.4.2 snj if (vf->flags & IXGBE_VF_ACTIVE) {
653 1.1.4.2 snj if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0)
654 1.1.4.2 snj ixgbe_process_vf_reset(adapter, vf);
655 1.1.4.2 snj
656 1.1.4.2 snj if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
657 1.1.4.2 snj ixgbe_process_vf_msg(adapter, vf);
658 1.1.4.2 snj
659 1.1.4.2 snj if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
660 1.1.4.2 snj ixgbe_process_vf_ack(adapter, vf);
661 1.1.4.2 snj }
662 1.1.4.2 snj }
663 1.1.4.2 snj IXGBE_CORE_UNLOCK(adapter);
664 1.1.4.2 snj } /* ixgbe_handle_mbx */
665 1.1.4.2 snj
666 1.1.4.2 snj int
667 1.1.4.2 snj ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
668 1.1.4.2 snj {
669 1.1.4.2 snj struct adapter *adapter;
670 1.1.4.2 snj int retval = 0;
671 1.1.4.2 snj
672 1.1.4.2 snj adapter = device_get_softc(dev);
673 1.1.4.2 snj adapter->iov_mode = IXGBE_NO_VM;
674 1.1.4.2 snj
675 1.1.4.2 snj if (num_vfs == 0) {
676 1.1.4.2 snj /* Would we ever get num_vfs = 0? */
677 1.1.4.2 snj retval = EINVAL;
678 1.1.4.2 snj goto err_init_iov;
679 1.1.4.2 snj }
680 1.1.4.2 snj
681 1.1.4.2 snj /*
682 1.1.4.2 snj * We've got to reserve a VM's worth of queues for the PF,
683 1.1.4.2 snj * thus we go into "64 VF mode" if 32+ VFs are requested.
684 1.1.4.2 snj * With 64 VFs, you can only have two queues per VF.
685 1.1.4.2 snj * With 32 VFs, you can have up to four queues per VF.
686 1.1.4.2 snj */
687 1.1.4.2 snj if (num_vfs >= IXGBE_32_VM)
688 1.1.4.2 snj adapter->iov_mode = IXGBE_64_VM;
689 1.1.4.2 snj else
690 1.1.4.2 snj adapter->iov_mode = IXGBE_32_VM;
691 1.1.4.2 snj
692 1.1.4.2 snj /* Again, reserving 1 VM's worth of queues for the PF */
693 1.1.4.2 snj adapter->pool = adapter->iov_mode - 1;
694 1.1.4.2 snj
695 1.1.4.2 snj if ((num_vfs > adapter->pool) || (num_vfs >= IXGBE_64_VM)) {
696 1.1.4.2 snj retval = ENOSPC;
697 1.1.4.2 snj goto err_init_iov;
698 1.1.4.2 snj }
699 1.1.4.2 snj
700 1.1.4.2 snj IXGBE_CORE_LOCK(adapter);
701 1.1.4.2 snj
702 1.1.4.2 snj adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV,
703 1.1.4.2 snj M_NOWAIT | M_ZERO);
704 1.1.4.2 snj
705 1.1.4.2 snj if (adapter->vfs == NULL) {
706 1.1.4.2 snj retval = ENOMEM;
707 1.1.4.2 snj IXGBE_CORE_UNLOCK(adapter);
708 1.1.4.2 snj goto err_init_iov;
709 1.1.4.2 snj }
710 1.1.4.2 snj
711 1.1.4.2 snj adapter->num_vfs = num_vfs;
712 1.1.4.3 martin
713 1.1.4.3 martin /* set the SRIOV flag now as it's needed
714 1.1.4.3 martin * by ixgbe_init_locked() */
715 1.1.4.2 snj adapter->feat_en |= IXGBE_FEATURE_SRIOV;
716 1.1.4.3 martin adapter->init_locked(adapter);
717 1.1.4.2 snj
718 1.1.4.2 snj IXGBE_CORE_UNLOCK(adapter);
719 1.1.4.2 snj
720 1.1.4.3 martin return (retval);
721 1.1.4.2 snj
722 1.1.4.2 snj err_init_iov:
723 1.1.4.2 snj adapter->num_vfs = 0;
724 1.1.4.2 snj adapter->pool = 0;
725 1.1.4.2 snj adapter->iov_mode = IXGBE_NO_VM;
726 1.1.4.2 snj
727 1.1.4.3 martin return (retval);
728 1.1.4.2 snj } /* ixgbe_init_iov */
729 1.1.4.2 snj
730 1.1.4.2 snj void
731 1.1.4.2 snj ixgbe_uninit_iov(device_t dev)
732 1.1.4.2 snj {
733 1.1.4.2 snj struct ixgbe_hw *hw;
734 1.1.4.2 snj struct adapter *adapter;
735 1.1.4.2 snj uint32_t pf_reg, vf_reg;
736 1.1.4.2 snj
737 1.1.4.2 snj adapter = device_get_softc(dev);
738 1.1.4.2 snj hw = &adapter->hw;
739 1.1.4.2 snj
740 1.1.4.2 snj IXGBE_CORE_LOCK(adapter);
741 1.1.4.2 snj
742 1.1.4.2 snj /* Enable rx/tx for the PF and disable it for all VFs. */
743 1.1.4.2 snj pf_reg = IXGBE_VF_INDEX(adapter->pool);
744 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool));
745 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool));
746 1.1.4.2 snj
747 1.1.4.2 snj if (pf_reg == 0)
748 1.1.4.2 snj vf_reg = 1;
749 1.1.4.2 snj else
750 1.1.4.2 snj vf_reg = 0;
751 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
752 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
753 1.1.4.2 snj
754 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
755 1.1.4.2 snj
756 1.1.4.2 snj free(adapter->vfs, M_IXGBE_SRIOV);
757 1.1.4.2 snj adapter->vfs = NULL;
758 1.1.4.2 snj adapter->num_vfs = 0;
759 1.1.4.2 snj adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
760 1.1.4.2 snj
761 1.1.4.2 snj IXGBE_CORE_UNLOCK(adapter);
762 1.1.4.2 snj } /* ixgbe_uninit_iov */
763 1.1.4.2 snj
764 1.1.4.2 snj static void
765 1.1.4.2 snj ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
766 1.1.4.2 snj {
767 1.1.4.2 snj struct ixgbe_hw *hw;
768 1.1.4.2 snj uint32_t vf_index, pfmbimr;
769 1.1.4.2 snj
770 1.1.4.2 snj IXGBE_CORE_LOCK_ASSERT(adapter);
771 1.1.4.2 snj
772 1.1.4.2 snj hw = &adapter->hw;
773 1.1.4.2 snj
774 1.1.4.2 snj if (!(vf->flags & IXGBE_VF_ACTIVE))
775 1.1.4.2 snj return;
776 1.1.4.2 snj
777 1.1.4.2 snj vf_index = IXGBE_VF_INDEX(vf->pool);
778 1.1.4.2 snj pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
779 1.1.4.2 snj pfmbimr |= IXGBE_VF_BIT(vf->pool);
780 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
781 1.1.4.2 snj
782 1.1.4.2 snj ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
783 1.1.4.2 snj
784 1.1.4.2 snj // XXX multicast addresses
785 1.1.4.2 snj
786 1.1.4.2 snj if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
787 1.1.4.2 snj ixgbe_set_rar(&adapter->hw, vf->rar_index,
788 1.1.4.2 snj vf->ether_addr, vf->pool, TRUE);
789 1.1.4.2 snj }
790 1.1.4.2 snj
791 1.1.4.2 snj ixgbe_vf_enable_transmit(adapter, vf);
792 1.1.4.2 snj ixgbe_vf_enable_receive(adapter, vf);
793 1.1.4.2 snj
794 1.1.4.4 martin ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
795 1.1.4.2 snj } /* ixgbe_init_vf */
796 1.1.4.2 snj
797 1.1.4.2 snj void
798 1.1.4.2 snj ixgbe_initialize_iov(struct adapter *adapter)
799 1.1.4.2 snj {
800 1.1.4.2 snj struct ixgbe_hw *hw = &adapter->hw;
801 1.1.4.2 snj uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
802 1.1.4.2 snj int i;
803 1.1.4.2 snj
804 1.1.4.2 snj if (adapter->iov_mode == IXGBE_NO_VM)
805 1.1.4.2 snj return;
806 1.1.4.2 snj
807 1.1.4.2 snj IXGBE_CORE_LOCK_ASSERT(adapter);
808 1.1.4.2 snj
809 1.1.4.2 snj /* RMW appropriate registers based on IOV mode */
810 1.1.4.2 snj /* Read... */
811 1.1.4.2 snj mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
812 1.1.4.2 snj gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
813 1.1.4.2 snj gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
814 1.1.4.2 snj /* Modify... */
815 1.1.4.2 snj mrqc &= ~IXGBE_MRQC_MRQE_MASK;
816 1.1.4.2 snj mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */
817 1.1.4.2 snj gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
818 1.1.4.2 snj gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
819 1.1.4.2 snj gpie &= ~IXGBE_GPIE_VTMODE_MASK;
820 1.1.4.2 snj switch (adapter->iov_mode) {
821 1.1.4.2 snj case IXGBE_64_VM:
822 1.1.4.2 snj mrqc |= IXGBE_MRQC_VMDQRSS64EN;
823 1.1.4.2 snj mtqc |= IXGBE_MTQC_64VF;
824 1.1.4.2 snj gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
825 1.1.4.2 snj gpie |= IXGBE_GPIE_VTMODE_64;
826 1.1.4.2 snj break;
827 1.1.4.2 snj case IXGBE_32_VM:
828 1.1.4.2 snj mrqc |= IXGBE_MRQC_VMDQRSS32EN;
829 1.1.4.2 snj mtqc |= IXGBE_MTQC_32VF;
830 1.1.4.2 snj gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
831 1.1.4.2 snj gpie |= IXGBE_GPIE_VTMODE_32;
832 1.1.4.2 snj break;
833 1.1.4.2 snj default:
834 1.1.4.2 snj panic("Unexpected SR-IOV mode %d", adapter->iov_mode);
835 1.1.4.2 snj }
836 1.1.4.2 snj /* Write... */
837 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
838 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
839 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
840 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
841 1.1.4.2 snj
842 1.1.4.2 snj /* Enable rx/tx for the PF. */
843 1.1.4.2 snj vf_reg = IXGBE_VF_INDEX(adapter->pool);
844 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool));
845 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool));
846 1.1.4.2 snj
847 1.1.4.2 snj /* Allow VM-to-VM communication. */
848 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
849 1.1.4.2 snj
850 1.1.4.2 snj vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
851 1.1.4.2 snj vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
852 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
853 1.1.4.2 snj
854 1.1.4.2 snj for (i = 0; i < adapter->num_vfs; i++)
855 1.1.4.2 snj ixgbe_init_vf(adapter, &adapter->vfs[i]);
856 1.1.4.2 snj } /* ixgbe_initialize_iov */
857 1.1.4.2 snj
858 1.1.4.2 snj
859 1.1.4.2 snj /* Check the max frame setting of all active VF's */
860 1.1.4.2 snj void
861 1.1.4.2 snj ixgbe_recalculate_max_frame(struct adapter *adapter)
862 1.1.4.2 snj {
863 1.1.4.2 snj struct ixgbe_vf *vf;
864 1.1.4.2 snj
865 1.1.4.2 snj IXGBE_CORE_LOCK_ASSERT(adapter);
866 1.1.4.2 snj
867 1.1.4.2 snj for (int i = 0; i < adapter->num_vfs; i++) {
868 1.1.4.2 snj vf = &adapter->vfs[i];
869 1.1.4.2 snj if (vf->flags & IXGBE_VF_ACTIVE)
870 1.1.4.2 snj ixgbe_update_max_frame(adapter, vf->max_frame_size);
871 1.1.4.2 snj }
872 1.1.4.2 snj } /* ixgbe_recalculate_max_frame */
873 1.1.4.2 snj
874 1.1.4.2 snj int
875 1.1.4.2 snj ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
876 1.1.4.2 snj {
877 1.1.4.2 snj struct adapter *adapter;
878 1.1.4.2 snj struct ixgbe_vf *vf;
879 1.1.4.2 snj const void *mac;
880 1.1.4.2 snj
881 1.1.4.2 snj adapter = device_get_softc(dev);
882 1.1.4.2 snj
883 1.1.4.2 snj KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
884 1.1.4.2 snj vfnum, adapter->num_vfs));
885 1.1.4.2 snj
886 1.1.4.2 snj IXGBE_CORE_LOCK(adapter);
887 1.1.4.2 snj vf = &adapter->vfs[vfnum];
888 1.1.4.2 snj vf->pool= vfnum;
889 1.1.4.2 snj
890 1.1.4.2 snj /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
891 1.1.4.2 snj vf->rar_index = vfnum + 1;
892 1.1.4.2 snj vf->default_vlan = 0;
893 1.1.4.2 snj vf->max_frame_size = ETHER_MAX_LEN;
894 1.1.4.2 snj ixgbe_update_max_frame(adapter, vf->max_frame_size);
895 1.1.4.2 snj
896 1.1.4.2 snj if (nvlist_exists_binary(config, "mac-addr")) {
897 1.1.4.2 snj mac = nvlist_get_binary(config, "mac-addr", NULL);
898 1.1.4.2 snj bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
899 1.1.4.2 snj if (nvlist_get_bool(config, "allow-set-mac"))
900 1.1.4.2 snj vf->flags |= IXGBE_VF_CAP_MAC;
901 1.1.4.2 snj } else
902 1.1.4.2 snj /*
903 1.1.4.2 snj * If the administrator has not specified a MAC address then
904 1.1.4.2 snj * we must allow the VF to choose one.
905 1.1.4.2 snj */
906 1.1.4.2 snj vf->flags |= IXGBE_VF_CAP_MAC;
907 1.1.4.2 snj
908 1.1.4.2 snj vf->flags |= IXGBE_VF_ACTIVE;
909 1.1.4.2 snj
910 1.1.4.2 snj ixgbe_init_vf(adapter, vf);
911 1.1.4.2 snj IXGBE_CORE_UNLOCK(adapter);
912 1.1.4.2 snj
913 1.1.4.2 snj return (0);
914 1.1.4.2 snj } /* ixgbe_add_vf */
915 1.1.4.2 snj
916 1.1.4.2 snj #else
917 1.1.4.2 snj
918 1.1.4.2 snj void
919 1.1.4.2 snj ixgbe_handle_mbx(void *context, int pending)
920 1.1.4.2 snj {
921 1.1.4.2 snj UNREFERENCED_2PARAMETER(context, pending);
922 1.1.4.2 snj } /* ixgbe_handle_mbx */
923 1.1.4.2 snj
924 1.1.4.2 snj inline int
925 1.1.4.2 snj ixgbe_vf_que_index(int mode, int vfnum, int num)
926 1.1.4.2 snj {
927 1.1.4.2 snj UNREFERENCED_2PARAMETER(mode, vfnum);
928 1.1.4.2 snj
929 1.1.4.2 snj return num;
930 1.1.4.2 snj } /* ixgbe_vf_que_index */
931 1.1.4.2 snj
932 1.1.4.2 snj #endif
933