if_sriov.c revision 1.1.4.6 1 1.1.4.6 martin /* $NetBSD: if_sriov.c,v 1.1.4.6 2021/09/15 16:38:00 martin Exp $ */
2 1.1.4.2 snj /******************************************************************************
3 1.1.4.2 snj
4 1.1.4.2 snj Copyright (c) 2001-2017, Intel Corporation
5 1.1.4.2 snj All rights reserved.
6 1.1.4.2 snj
7 1.1.4.2 snj Redistribution and use in source and binary forms, with or without
8 1.1.4.2 snj modification, are permitted provided that the following conditions are met:
9 1.1.4.2 snj
10 1.1.4.2 snj 1. Redistributions of source code must retain the above copyright notice,
11 1.1.4.2 snj this list of conditions and the following disclaimer.
12 1.1.4.2 snj
13 1.1.4.2 snj 2. Redistributions in binary form must reproduce the above copyright
14 1.1.4.2 snj notice, this list of conditions and the following disclaimer in the
15 1.1.4.2 snj documentation and/or other materials provided with the distribution.
16 1.1.4.2 snj
17 1.1.4.2 snj 3. Neither the name of the Intel Corporation nor the names of its
18 1.1.4.2 snj contributors may be used to endorse or promote products derived from
19 1.1.4.2 snj this software without specific prior written permission.
20 1.1.4.2 snj
21 1.1.4.2 snj THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 1.1.4.2 snj AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 1.1.4.2 snj IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 1.1.4.2 snj ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 1.1.4.2 snj LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 1.1.4.2 snj CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 1.1.4.2 snj SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 1.1.4.2 snj INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 1.1.4.2 snj CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 1.1.4.2 snj ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 1.1.4.2 snj POSSIBILITY OF SUCH DAMAGE.
32 1.1.4.2 snj
33 1.1.4.2 snj ******************************************************************************/
34 1.1.4.4 martin /*$FreeBSD: head/sys/dev/ixgbe/if_sriov.c 327031 2017-12-20 18:15:06Z erj $*/
35 1.1.4.2 snj
36 1.1.4.6 martin #include <sys/cdefs.h>
37 1.1.4.6 martin __KERNEL_RCSID(0, "$NetBSD: if_sriov.c,v 1.1.4.6 2021/09/15 16:38:00 martin Exp $");
38 1.1.4.6 martin
39 1.1.4.2 snj #include "ixgbe.h"
40 1.1.4.3 martin #include "ixgbe_sriov.h"
41 1.1.4.2 snj
42 1.1.4.2 snj #ifdef PCI_IOV
43 1.1.4.2 snj
44 1.1.4.2 snj MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
45 1.1.4.2 snj
46 1.1.4.2 snj /************************************************************************
47 1.1.4.2 snj * ixgbe_pci_iov_detach
48 1.1.4.2 snj ************************************************************************/
49 1.1.4.2 snj int
50 1.1.4.2 snj ixgbe_pci_iov_detach(device_t dev)
51 1.1.4.2 snj {
52 1.1.4.2 snj return pci_iov_detach(dev);
53 1.1.4.2 snj }
54 1.1.4.2 snj
55 1.1.4.2 snj /************************************************************************
56 1.1.4.2 snj * ixgbe_define_iov_schemas
57 1.1.4.2 snj ************************************************************************/
58 1.1.4.2 snj void
59 1.1.4.2 snj ixgbe_define_iov_schemas(device_t dev, int *error)
60 1.1.4.2 snj {
61 1.1.4.2 snj nvlist_t *pf_schema, *vf_schema;
62 1.1.4.2 snj
63 1.1.4.2 snj pf_schema = pci_iov_schema_alloc_node();
64 1.1.4.2 snj vf_schema = pci_iov_schema_alloc_node();
65 1.1.4.2 snj pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
66 1.1.4.2 snj pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
67 1.1.4.2 snj IOV_SCHEMA_HASDEFAULT, TRUE);
68 1.1.4.2 snj pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
69 1.1.4.2 snj IOV_SCHEMA_HASDEFAULT, FALSE);
70 1.1.4.2 snj pci_iov_schema_add_bool(vf_schema, "allow-promisc",
71 1.1.4.2 snj IOV_SCHEMA_HASDEFAULT, FALSE);
72 1.1.4.2 snj *error = pci_iov_attach(dev, pf_schema, vf_schema);
73 1.1.4.2 snj if (*error != 0) {
74 1.1.4.2 snj device_printf(dev,
75 1.1.4.2 snj "Error %d setting up SR-IOV\n", *error);
76 1.1.4.2 snj }
77 1.1.4.2 snj } /* ixgbe_define_iov_schemas */
78 1.1.4.2 snj
79 1.1.4.2 snj /************************************************************************
80 1.1.4.2 snj * ixgbe_align_all_queue_indices
81 1.1.4.2 snj ************************************************************************/
82 1.1.4.2 snj inline void
83 1.1.4.2 snj ixgbe_align_all_queue_indices(struct adapter *adapter)
84 1.1.4.2 snj {
85 1.1.4.2 snj int i;
86 1.1.4.2 snj int index;
87 1.1.4.2 snj
88 1.1.4.2 snj for (i = 0; i < adapter->num_queues; i++) {
89 1.1.4.2 snj index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
90 1.1.4.2 snj adapter->rx_rings[i].me = index;
91 1.1.4.2 snj adapter->tx_rings[i].me = index;
92 1.1.4.2 snj }
93 1.1.4.2 snj }
94 1.1.4.2 snj
95 1.1.4.2 snj /* Support functions for SR-IOV/VF management */
96 1.1.4.2 snj static inline void
97 1.1.4.4 martin ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
98 1.1.4.2 snj {
99 1.1.4.2 snj if (vf->flags & IXGBE_VF_CTS)
100 1.1.4.2 snj msg |= IXGBE_VT_MSGTYPE_CTS;
101 1.1.4.2 snj
102 1.1.4.4 martin adapter->hw.mbx.ops.write(&adapter->hw, &msg, 1, vf->pool);
103 1.1.4.2 snj }
104 1.1.4.2 snj
105 1.1.4.2 snj static inline void
106 1.1.4.2 snj ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
107 1.1.4.2 snj {
108 1.1.4.2 snj msg &= IXGBE_VT_MSG_MASK;
109 1.1.4.4 martin ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
110 1.1.4.2 snj }
111 1.1.4.2 snj
112 1.1.4.2 snj static inline void
113 1.1.4.2 snj ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
114 1.1.4.2 snj {
115 1.1.4.2 snj msg &= IXGBE_VT_MSG_MASK;
116 1.1.4.4 martin ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
117 1.1.4.2 snj }
118 1.1.4.2 snj
119 1.1.4.2 snj static inline void
120 1.1.4.2 snj ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
121 1.1.4.2 snj {
122 1.1.4.2 snj if (!(vf->flags & IXGBE_VF_CTS))
123 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, 0);
124 1.1.4.2 snj }
125 1.1.4.2 snj
126 1.1.4.2 snj static inline boolean_t
127 1.1.4.2 snj ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
128 1.1.4.2 snj {
129 1.1.4.2 snj return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
130 1.1.4.2 snj }
131 1.1.4.2 snj
132 1.1.4.2 snj static inline int
133 1.1.4.2 snj ixgbe_vf_queues(int mode)
134 1.1.4.2 snj {
135 1.1.4.2 snj switch (mode) {
136 1.1.4.2 snj case IXGBE_64_VM:
137 1.1.4.2 snj return (2);
138 1.1.4.2 snj case IXGBE_32_VM:
139 1.1.4.2 snj return (4);
140 1.1.4.2 snj case IXGBE_NO_VM:
141 1.1.4.2 snj default:
142 1.1.4.2 snj return (0);
143 1.1.4.2 snj }
144 1.1.4.2 snj }
145 1.1.4.2 snj
146 1.1.4.2 snj inline int
147 1.1.4.2 snj ixgbe_vf_que_index(int mode, int vfnum, int num)
148 1.1.4.2 snj {
149 1.1.4.2 snj return ((vfnum * ixgbe_vf_queues(mode)) + num);
150 1.1.4.2 snj }
151 1.1.4.2 snj
152 1.1.4.2 snj static inline void
153 1.1.4.2 snj ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
154 1.1.4.2 snj {
155 1.1.4.2 snj if (adapter->max_frame_size < max_frame)
156 1.1.4.2 snj adapter->max_frame_size = max_frame;
157 1.1.4.2 snj }
158 1.1.4.2 snj
159 1.1.4.2 snj inline u32
160 1.1.4.2 snj ixgbe_get_mrqc(int iov_mode)
161 1.1.4.2 snj {
162 1.1.4.2 snj u32 mrqc;
163 1.1.4.2 snj
164 1.1.4.2 snj switch (iov_mode) {
165 1.1.4.2 snj case IXGBE_64_VM:
166 1.1.4.2 snj mrqc = IXGBE_MRQC_VMDQRSS64EN;
167 1.1.4.2 snj break;
168 1.1.4.2 snj case IXGBE_32_VM:
169 1.1.4.2 snj mrqc = IXGBE_MRQC_VMDQRSS32EN;
170 1.1.4.2 snj break;
171 1.1.4.2 snj case IXGBE_NO_VM:
172 1.1.4.2 snj mrqc = 0;
173 1.1.4.2 snj break;
174 1.1.4.2 snj default:
175 1.1.4.2 snj panic("Unexpected SR-IOV mode %d", iov_mode);
176 1.1.4.2 snj }
177 1.1.4.2 snj
178 1.1.4.2 snj return mrqc;
179 1.1.4.2 snj }
180 1.1.4.2 snj
181 1.1.4.2 snj
182 1.1.4.2 snj inline u32
183 1.1.4.2 snj ixgbe_get_mtqc(int iov_mode)
184 1.1.4.2 snj {
185 1.1.4.2 snj uint32_t mtqc;
186 1.1.4.2 snj
187 1.1.4.2 snj switch (iov_mode) {
188 1.1.4.2 snj case IXGBE_64_VM:
189 1.1.4.2 snj mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
190 1.1.4.2 snj break;
191 1.1.4.2 snj case IXGBE_32_VM:
192 1.1.4.2 snj mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
193 1.1.4.2 snj break;
194 1.1.4.2 snj case IXGBE_NO_VM:
195 1.1.4.2 snj mtqc = IXGBE_MTQC_64Q_1PB;
196 1.1.4.2 snj break;
197 1.1.4.2 snj default:
198 1.1.4.2 snj panic("Unexpected SR-IOV mode %d", iov_mode);
199 1.1.4.2 snj }
200 1.1.4.2 snj
201 1.1.4.2 snj return mtqc;
202 1.1.4.2 snj }
203 1.1.4.2 snj
204 1.1.4.2 snj void
205 1.1.4.2 snj ixgbe_ping_all_vfs(struct adapter *adapter)
206 1.1.4.2 snj {
207 1.1.4.2 snj struct ixgbe_vf *vf;
208 1.1.4.2 snj
209 1.1.4.2 snj for (int i = 0; i < adapter->num_vfs; i++) {
210 1.1.4.2 snj vf = &adapter->vfs[i];
211 1.1.4.2 snj if (vf->flags & IXGBE_VF_ACTIVE)
212 1.1.4.4 martin ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
213 1.1.4.2 snj }
214 1.1.4.2 snj } /* ixgbe_ping_all_vfs */
215 1.1.4.2 snj
216 1.1.4.2 snj
217 1.1.4.2 snj static void
218 1.1.4.2 snj ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
219 1.1.4.2 snj uint16_t tag)
220 1.1.4.2 snj {
221 1.1.4.2 snj struct ixgbe_hw *hw;
222 1.1.4.2 snj uint32_t vmolr, vmvir;
223 1.1.4.2 snj
224 1.1.4.2 snj hw = &adapter->hw;
225 1.1.4.2 snj
226 1.1.4.2 snj vf->vlan_tag = tag;
227 1.1.4.2 snj
228 1.1.4.2 snj vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
229 1.1.4.2 snj
230 1.1.4.2 snj /* Do not receive packets that pass inexact filters. */
231 1.1.4.2 snj vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
232 1.1.4.2 snj
233 1.1.4.2 snj /* Disable Multicast Promicuous Mode. */
234 1.1.4.2 snj vmolr &= ~IXGBE_VMOLR_MPE;
235 1.1.4.2 snj
236 1.1.4.2 snj /* Accept broadcasts. */
237 1.1.4.2 snj vmolr |= IXGBE_VMOLR_BAM;
238 1.1.4.2 snj
239 1.1.4.2 snj if (tag == 0) {
240 1.1.4.2 snj /* Accept non-vlan tagged traffic. */
241 1.1.4.3 martin vmolr |= IXGBE_VMOLR_AUPE;
242 1.1.4.2 snj
243 1.1.4.2 snj /* Allow VM to tag outgoing traffic; no default tag. */
244 1.1.4.2 snj vmvir = 0;
245 1.1.4.2 snj } else {
246 1.1.4.2 snj /* Require vlan-tagged traffic. */
247 1.1.4.2 snj vmolr &= ~IXGBE_VMOLR_AUPE;
248 1.1.4.2 snj
249 1.1.4.2 snj /* Tag all traffic with provided vlan tag. */
250 1.1.4.2 snj vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
251 1.1.4.2 snj }
252 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
253 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
254 1.1.4.2 snj } /* ixgbe_vf_set_default_vlan */
255 1.1.4.2 snj
256 1.1.4.2 snj
257 1.1.4.5 martin static void
258 1.1.4.5 martin ixgbe_clear_vfmbmem(struct ixgbe_hw *hw, struct ixgbe_vf *vf)
259 1.1.4.5 martin {
260 1.1.4.5 martin uint32_t vf_index = IXGBE_VF_INDEX(vf->pool);
261 1.1.4.5 martin uint16_t mbx_size = hw->mbx.size;
262 1.1.4.5 martin uint16_t i;
263 1.1.4.5 martin
264 1.1.4.5 martin IXGBE_CORE_LOCK_ASSERT(adapter);
265 1.1.4.5 martin
266 1.1.4.5 martin for (i = 0; i < mbx_size; ++i)
267 1.1.4.5 martin IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_index), i, 0x0);
268 1.1.4.5 martin } /* ixgbe_clear_vfmbmem */
269 1.1.4.5 martin
270 1.1.4.5 martin
271 1.1.4.2 snj static boolean_t
272 1.1.4.2 snj ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
273 1.1.4.2 snj {
274 1.1.4.2 snj
275 1.1.4.2 snj /*
276 1.1.4.2 snj * Frame size compatibility between PF and VF is only a problem on
277 1.1.4.2 snj * 82599-based cards. X540 and later support any combination of jumbo
278 1.1.4.2 snj * frames on PFs and VFs.
279 1.1.4.2 snj */
280 1.1.4.2 snj if (adapter->hw.mac.type != ixgbe_mac_82599EB)
281 1.1.4.2 snj return (TRUE);
282 1.1.4.2 snj
283 1.1.4.2 snj switch (vf->api_ver) {
284 1.1.4.2 snj case IXGBE_API_VER_1_0:
285 1.1.4.2 snj case IXGBE_API_VER_UNKNOWN:
286 1.1.4.2 snj /*
287 1.1.4.2 snj * On legacy (1.0 and older) VF versions, we don't support jumbo
288 1.1.4.2 snj * frames on either the PF or the VF.
289 1.1.4.2 snj */
290 1.1.4.2 snj if (adapter->max_frame_size > ETHER_MAX_LEN ||
291 1.1.4.2 snj vf->max_frame_size > ETHER_MAX_LEN)
292 1.1.4.2 snj return (FALSE);
293 1.1.4.2 snj
294 1.1.4.2 snj return (TRUE);
295 1.1.4.2 snj
296 1.1.4.2 snj break;
297 1.1.4.2 snj case IXGBE_API_VER_1_1:
298 1.1.4.2 snj default:
299 1.1.4.2 snj /*
300 1.1.4.2 snj * 1.1 or later VF versions always work if they aren't using
301 1.1.4.2 snj * jumbo frames.
302 1.1.4.2 snj */
303 1.1.4.2 snj if (vf->max_frame_size <= ETHER_MAX_LEN)
304 1.1.4.2 snj return (TRUE);
305 1.1.4.2 snj
306 1.1.4.2 snj /*
307 1.1.4.2 snj * Jumbo frames only work with VFs if the PF is also using jumbo
308 1.1.4.2 snj * frames.
309 1.1.4.2 snj */
310 1.1.4.2 snj if (adapter->max_frame_size <= ETHER_MAX_LEN)
311 1.1.4.2 snj return (TRUE);
312 1.1.4.2 snj
313 1.1.4.2 snj return (FALSE);
314 1.1.4.2 snj }
315 1.1.4.2 snj } /* ixgbe_vf_frame_size_compatible */
316 1.1.4.2 snj
317 1.1.4.2 snj
318 1.1.4.2 snj static void
319 1.1.4.2 snj ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
320 1.1.4.2 snj {
321 1.1.4.2 snj ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
322 1.1.4.2 snj
323 1.1.4.2 snj // XXX clear multicast addresses
324 1.1.4.2 snj
325 1.1.4.2 snj ixgbe_clear_rar(&adapter->hw, vf->rar_index);
326 1.1.4.5 martin ixgbe_clear_vfmbmem(&adapter->hw, vf);
327 1.1.4.5 martin ixgbe_toggle_txdctl(&adapter->hw, IXGBE_VF_INDEX(vf->pool));
328 1.1.4.2 snj
329 1.1.4.2 snj vf->api_ver = IXGBE_API_VER_UNKNOWN;
330 1.1.4.2 snj } /* ixgbe_process_vf_reset */
331 1.1.4.2 snj
332 1.1.4.2 snj
333 1.1.4.2 snj static void
334 1.1.4.2 snj ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
335 1.1.4.2 snj {
336 1.1.4.2 snj struct ixgbe_hw *hw;
337 1.1.4.2 snj uint32_t vf_index, vfte;
338 1.1.4.2 snj
339 1.1.4.2 snj hw = &adapter->hw;
340 1.1.4.2 snj
341 1.1.4.2 snj vf_index = IXGBE_VF_INDEX(vf->pool);
342 1.1.4.2 snj vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
343 1.1.4.2 snj vfte |= IXGBE_VF_BIT(vf->pool);
344 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
345 1.1.4.2 snj } /* ixgbe_vf_enable_transmit */
346 1.1.4.2 snj
347 1.1.4.2 snj
348 1.1.4.2 snj static void
349 1.1.4.2 snj ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
350 1.1.4.2 snj {
351 1.1.4.2 snj struct ixgbe_hw *hw;
352 1.1.4.2 snj uint32_t vf_index, vfre;
353 1.1.4.2 snj
354 1.1.4.2 snj hw = &adapter->hw;
355 1.1.4.2 snj
356 1.1.4.2 snj vf_index = IXGBE_VF_INDEX(vf->pool);
357 1.1.4.2 snj vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
358 1.1.4.2 snj if (ixgbe_vf_frame_size_compatible(adapter, vf))
359 1.1.4.2 snj vfre |= IXGBE_VF_BIT(vf->pool);
360 1.1.4.2 snj else
361 1.1.4.2 snj vfre &= ~IXGBE_VF_BIT(vf->pool);
362 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
363 1.1.4.2 snj } /* ixgbe_vf_enable_receive */
364 1.1.4.2 snj
365 1.1.4.2 snj
366 1.1.4.2 snj static void
367 1.1.4.2 snj ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
368 1.1.4.2 snj {
369 1.1.4.2 snj struct ixgbe_hw *hw;
370 1.1.4.2 snj uint32_t ack;
371 1.1.4.2 snj uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
372 1.1.4.2 snj
373 1.1.4.2 snj hw = &adapter->hw;
374 1.1.4.2 snj
375 1.1.4.2 snj ixgbe_process_vf_reset(adapter, vf);
376 1.1.4.2 snj
377 1.1.4.2 snj if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
378 1.1.4.2 snj ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
379 1.1.4.2 snj vf->pool, TRUE);
380 1.1.4.2 snj ack = IXGBE_VT_MSGTYPE_ACK;
381 1.1.4.2 snj } else
382 1.1.4.2 snj ack = IXGBE_VT_MSGTYPE_NACK;
383 1.1.4.2 snj
384 1.1.4.2 snj ixgbe_vf_enable_transmit(adapter, vf);
385 1.1.4.2 snj ixgbe_vf_enable_receive(adapter, vf);
386 1.1.4.2 snj
387 1.1.4.2 snj vf->flags |= IXGBE_VF_CTS;
388 1.1.4.2 snj
389 1.1.4.3 martin resp[0] = IXGBE_VF_RESET | ack;
390 1.1.4.2 snj bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
391 1.1.4.2 snj resp[3] = hw->mac.mc_filter_type;
392 1.1.4.2 snj hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
393 1.1.4.2 snj } /* ixgbe_vf_reset_msg */
394 1.1.4.2 snj
395 1.1.4.2 snj
396 1.1.4.2 snj static void
397 1.1.4.2 snj ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
398 1.1.4.2 snj {
399 1.1.4.2 snj uint8_t *mac;
400 1.1.4.2 snj
401 1.1.4.2 snj mac = (uint8_t*)&msg[1];
402 1.1.4.2 snj
403 1.1.4.2 snj /* Check that the VF has permission to change the MAC address. */
404 1.1.4.2 snj if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
405 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
406 1.1.4.2 snj return;
407 1.1.4.2 snj }
408 1.1.4.2 snj
409 1.1.4.2 snj if (ixgbe_validate_mac_addr(mac) != 0) {
410 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
411 1.1.4.2 snj return;
412 1.1.4.2 snj }
413 1.1.4.2 snj
414 1.1.4.2 snj bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
415 1.1.4.2 snj
416 1.1.4.2 snj ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool,
417 1.1.4.2 snj TRUE);
418 1.1.4.2 snj
419 1.1.4.2 snj ixgbe_send_vf_ack(adapter, vf, msg[0]);
420 1.1.4.2 snj } /* ixgbe_vf_set_mac */
421 1.1.4.2 snj
422 1.1.4.2 snj
423 1.1.4.2 snj /*
424 1.1.4.2 snj * VF multicast addresses are set by using the appropriate bit in
425 1.1.4.2 snj * 1 of 128 32 bit addresses (4096 possible).
426 1.1.4.2 snj */
427 1.1.4.2 snj static void
428 1.1.4.2 snj ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
429 1.1.4.2 snj {
430 1.1.4.2 snj u16 *list = (u16*)&msg[1];
431 1.1.4.2 snj int entries;
432 1.1.4.2 snj u32 vmolr, vec_bit, vec_reg, mta_reg;
433 1.1.4.2 snj
434 1.1.4.2 snj entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
435 1.1.4.2 snj entries = min(entries, IXGBE_MAX_VF_MC);
436 1.1.4.2 snj
437 1.1.4.2 snj vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
438 1.1.4.2 snj
439 1.1.4.2 snj vf->num_mc_hashes = entries;
440 1.1.4.2 snj
441 1.1.4.2 snj /* Set the appropriate MTA bit */
442 1.1.4.2 snj for (int i = 0; i < entries; i++) {
443 1.1.4.2 snj vf->mc_hash[i] = list[i];
444 1.1.4.2 snj vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
445 1.1.4.2 snj vec_bit = vf->mc_hash[i] & 0x1F;
446 1.1.4.2 snj mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
447 1.1.4.2 snj mta_reg |= (1 << vec_bit);
448 1.1.4.2 snj IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
449 1.1.4.2 snj }
450 1.1.4.2 snj
451 1.1.4.2 snj vmolr |= IXGBE_VMOLR_ROMPE;
452 1.1.4.2 snj IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
453 1.1.4.2 snj ixgbe_send_vf_ack(adapter, vf, msg[0]);
454 1.1.4.2 snj } /* ixgbe_vf_set_mc_addr */
455 1.1.4.2 snj
456 1.1.4.2 snj
457 1.1.4.2 snj static void
458 1.1.4.2 snj ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
459 1.1.4.2 snj {
460 1.1.4.2 snj struct ixgbe_hw *hw;
461 1.1.4.2 snj int enable;
462 1.1.4.2 snj uint16_t tag;
463 1.1.4.2 snj
464 1.1.4.2 snj hw = &adapter->hw;
465 1.1.4.2 snj enable = IXGBE_VT_MSGINFO(msg[0]);
466 1.1.4.2 snj tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
467 1.1.4.2 snj
468 1.1.4.2 snj if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
469 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
470 1.1.4.2 snj return;
471 1.1.4.2 snj }
472 1.1.4.2 snj
473 1.1.4.2 snj /* It is illegal to enable vlan tag 0. */
474 1.1.4.3 martin if (tag == 0 && enable != 0) {
475 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
476 1.1.4.2 snj return;
477 1.1.4.2 snj }
478 1.1.4.2 snj
479 1.1.4.2 snj ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
480 1.1.4.2 snj ixgbe_send_vf_ack(adapter, vf, msg[0]);
481 1.1.4.2 snj } /* ixgbe_vf_set_vlan */
482 1.1.4.2 snj
483 1.1.4.2 snj
484 1.1.4.2 snj static void
485 1.1.4.2 snj ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
486 1.1.4.2 snj {
487 1.1.4.2 snj struct ixgbe_hw *hw;
488 1.1.4.2 snj uint32_t vf_max_size, pf_max_size, mhadd;
489 1.1.4.2 snj
490 1.1.4.2 snj hw = &adapter->hw;
491 1.1.4.2 snj vf_max_size = msg[1];
492 1.1.4.2 snj
493 1.1.4.2 snj if (vf_max_size < ETHER_CRC_LEN) {
494 1.1.4.2 snj /* We intentionally ACK invalid LPE requests. */
495 1.1.4.2 snj ixgbe_send_vf_ack(adapter, vf, msg[0]);
496 1.1.4.2 snj return;
497 1.1.4.2 snj }
498 1.1.4.2 snj
499 1.1.4.2 snj vf_max_size -= ETHER_CRC_LEN;
500 1.1.4.2 snj
501 1.1.4.2 snj if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
502 1.1.4.2 snj /* We intentionally ACK invalid LPE requests. */
503 1.1.4.2 snj ixgbe_send_vf_ack(adapter, vf, msg[0]);
504 1.1.4.2 snj return;
505 1.1.4.2 snj }
506 1.1.4.2 snj
507 1.1.4.2 snj vf->max_frame_size = vf_max_size;
508 1.1.4.2 snj ixgbe_update_max_frame(adapter, vf->max_frame_size);
509 1.1.4.2 snj
510 1.1.4.2 snj /*
511 1.1.4.2 snj * We might have to disable reception to this VF if the frame size is
512 1.1.4.2 snj * not compatible with the config on the PF.
513 1.1.4.2 snj */
514 1.1.4.2 snj ixgbe_vf_enable_receive(adapter, vf);
515 1.1.4.2 snj
516 1.1.4.2 snj mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
517 1.1.4.2 snj pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
518 1.1.4.2 snj
519 1.1.4.2 snj if (pf_max_size < adapter->max_frame_size) {
520 1.1.4.2 snj mhadd &= ~IXGBE_MHADD_MFS_MASK;
521 1.1.4.2 snj mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
522 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
523 1.1.4.2 snj }
524 1.1.4.2 snj
525 1.1.4.2 snj ixgbe_send_vf_ack(adapter, vf, msg[0]);
526 1.1.4.2 snj } /* ixgbe_vf_set_lpe */
527 1.1.4.2 snj
528 1.1.4.2 snj
529 1.1.4.2 snj static void
530 1.1.4.2 snj ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
531 1.1.4.2 snj uint32_t *msg)
532 1.1.4.2 snj {
533 1.1.4.2 snj //XXX implement this
534 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
535 1.1.4.2 snj } /* ixgbe_vf_set_macvlan */
536 1.1.4.2 snj
537 1.1.4.2 snj
538 1.1.4.2 snj static void
539 1.1.4.2 snj ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
540 1.1.4.2 snj uint32_t *msg)
541 1.1.4.2 snj {
542 1.1.4.2 snj
543 1.1.4.2 snj switch (msg[1]) {
544 1.1.4.2 snj case IXGBE_API_VER_1_0:
545 1.1.4.2 snj case IXGBE_API_VER_1_1:
546 1.1.4.2 snj vf->api_ver = msg[1];
547 1.1.4.2 snj ixgbe_send_vf_ack(adapter, vf, msg[0]);
548 1.1.4.2 snj break;
549 1.1.4.2 snj default:
550 1.1.4.2 snj vf->api_ver = IXGBE_API_VER_UNKNOWN;
551 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
552 1.1.4.2 snj break;
553 1.1.4.2 snj }
554 1.1.4.2 snj } /* ixgbe_vf_api_negotiate */
555 1.1.4.2 snj
556 1.1.4.2 snj
557 1.1.4.2 snj static void
558 1.1.4.2 snj ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
559 1.1.4.2 snj {
560 1.1.4.2 snj struct ixgbe_hw *hw;
561 1.1.4.2 snj uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
562 1.1.4.2 snj int num_queues;
563 1.1.4.2 snj
564 1.1.4.2 snj hw = &adapter->hw;
565 1.1.4.2 snj
566 1.1.4.2 snj /* GET_QUEUES is not supported on pre-1.1 APIs. */
567 1.1.4.2 snj switch (msg[0]) {
568 1.1.4.2 snj case IXGBE_API_VER_1_0:
569 1.1.4.2 snj case IXGBE_API_VER_UNKNOWN:
570 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
571 1.1.4.2 snj return;
572 1.1.4.2 snj }
573 1.1.4.2 snj
574 1.1.4.2 snj resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
575 1.1.4.2 snj IXGBE_VT_MSGTYPE_CTS;
576 1.1.4.2 snj
577 1.1.4.2 snj num_queues = ixgbe_vf_queues(adapter->iov_mode);
578 1.1.4.2 snj resp[IXGBE_VF_TX_QUEUES] = num_queues;
579 1.1.4.2 snj resp[IXGBE_VF_RX_QUEUES] = num_queues;
580 1.1.4.2 snj resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
581 1.1.4.2 snj resp[IXGBE_VF_DEF_QUEUE] = 0;
582 1.1.4.2 snj
583 1.1.4.2 snj hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
584 1.1.4.2 snj } /* ixgbe_vf_get_queues */
585 1.1.4.2 snj
586 1.1.4.2 snj
587 1.1.4.2 snj static void
588 1.1.4.2 snj ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
589 1.1.4.2 snj {
590 1.1.4.2 snj struct ixgbe_hw *hw;
591 1.1.4.2 snj uint32_t msg[IXGBE_VFMAILBOX_SIZE];
592 1.1.4.2 snj int error;
593 1.1.4.2 snj
594 1.1.4.2 snj hw = &adapter->hw;
595 1.1.4.2 snj
596 1.1.4.2 snj error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
597 1.1.4.2 snj
598 1.1.4.2 snj if (error != 0)
599 1.1.4.2 snj return;
600 1.1.4.2 snj
601 1.1.4.3 martin CTR3(KTR_MALLOC, "%s: received msg %x from %d", adapter->ifp->if_xname,
602 1.1.4.3 martin msg[0], vf->pool);
603 1.1.4.2 snj if (msg[0] == IXGBE_VF_RESET) {
604 1.1.4.2 snj ixgbe_vf_reset_msg(adapter, vf, msg);
605 1.1.4.2 snj return;
606 1.1.4.2 snj }
607 1.1.4.2 snj
608 1.1.4.2 snj if (!(vf->flags & IXGBE_VF_CTS)) {
609 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
610 1.1.4.2 snj return;
611 1.1.4.2 snj }
612 1.1.4.2 snj
613 1.1.4.2 snj switch (msg[0] & IXGBE_VT_MSG_MASK) {
614 1.1.4.2 snj case IXGBE_VF_SET_MAC_ADDR:
615 1.1.4.2 snj ixgbe_vf_set_mac(adapter, vf, msg);
616 1.1.4.2 snj break;
617 1.1.4.2 snj case IXGBE_VF_SET_MULTICAST:
618 1.1.4.2 snj ixgbe_vf_set_mc_addr(adapter, vf, msg);
619 1.1.4.2 snj break;
620 1.1.4.2 snj case IXGBE_VF_SET_VLAN:
621 1.1.4.2 snj ixgbe_vf_set_vlan(adapter, vf, msg);
622 1.1.4.2 snj break;
623 1.1.4.2 snj case IXGBE_VF_SET_LPE:
624 1.1.4.2 snj ixgbe_vf_set_lpe(adapter, vf, msg);
625 1.1.4.2 snj break;
626 1.1.4.2 snj case IXGBE_VF_SET_MACVLAN:
627 1.1.4.2 snj ixgbe_vf_set_macvlan(adapter, vf, msg);
628 1.1.4.2 snj break;
629 1.1.4.2 snj case IXGBE_VF_API_NEGOTIATE:
630 1.1.4.2 snj ixgbe_vf_api_negotiate(adapter, vf, msg);
631 1.1.4.2 snj break;
632 1.1.4.2 snj case IXGBE_VF_GET_QUEUES:
633 1.1.4.2 snj ixgbe_vf_get_queues(adapter, vf, msg);
634 1.1.4.2 snj break;
635 1.1.4.2 snj default:
636 1.1.4.2 snj ixgbe_send_vf_nack(adapter, vf, msg[0]);
637 1.1.4.2 snj }
638 1.1.4.2 snj } /* ixgbe_process_vf_msg */
639 1.1.4.2 snj
640 1.1.4.2 snj
641 1.1.4.2 snj /* Tasklet for handling VF -> PF mailbox messages */
642 1.1.4.2 snj void
643 1.1.4.2 snj ixgbe_handle_mbx(void *context, int pending)
644 1.1.4.2 snj {
645 1.1.4.3 martin struct adapter *adapter = context;
646 1.1.4.2 snj struct ixgbe_hw *hw;
647 1.1.4.2 snj struct ixgbe_vf *vf;
648 1.1.4.2 snj int i;
649 1.1.4.2 snj
650 1.1.4.2 snj hw = &adapter->hw;
651 1.1.4.2 snj
652 1.1.4.2 snj IXGBE_CORE_LOCK(adapter);
653 1.1.4.2 snj for (i = 0; i < adapter->num_vfs; i++) {
654 1.1.4.2 snj vf = &adapter->vfs[i];
655 1.1.4.2 snj
656 1.1.4.2 snj if (vf->flags & IXGBE_VF_ACTIVE) {
657 1.1.4.2 snj if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0)
658 1.1.4.2 snj ixgbe_process_vf_reset(adapter, vf);
659 1.1.4.2 snj
660 1.1.4.2 snj if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
661 1.1.4.2 snj ixgbe_process_vf_msg(adapter, vf);
662 1.1.4.2 snj
663 1.1.4.2 snj if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
664 1.1.4.2 snj ixgbe_process_vf_ack(adapter, vf);
665 1.1.4.2 snj }
666 1.1.4.2 snj }
667 1.1.4.2 snj IXGBE_CORE_UNLOCK(adapter);
668 1.1.4.2 snj } /* ixgbe_handle_mbx */
669 1.1.4.2 snj
670 1.1.4.2 snj int
671 1.1.4.2 snj ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
672 1.1.4.2 snj {
673 1.1.4.2 snj struct adapter *adapter;
674 1.1.4.2 snj int retval = 0;
675 1.1.4.2 snj
676 1.1.4.2 snj adapter = device_get_softc(dev);
677 1.1.4.2 snj adapter->iov_mode = IXGBE_NO_VM;
678 1.1.4.2 snj
679 1.1.4.2 snj if (num_vfs == 0) {
680 1.1.4.2 snj /* Would we ever get num_vfs = 0? */
681 1.1.4.2 snj retval = EINVAL;
682 1.1.4.2 snj goto err_init_iov;
683 1.1.4.2 snj }
684 1.1.4.2 snj
685 1.1.4.2 snj /*
686 1.1.4.2 snj * We've got to reserve a VM's worth of queues for the PF,
687 1.1.4.2 snj * thus we go into "64 VF mode" if 32+ VFs are requested.
688 1.1.4.2 snj * With 64 VFs, you can only have two queues per VF.
689 1.1.4.2 snj * With 32 VFs, you can have up to four queues per VF.
690 1.1.4.2 snj */
691 1.1.4.2 snj if (num_vfs >= IXGBE_32_VM)
692 1.1.4.2 snj adapter->iov_mode = IXGBE_64_VM;
693 1.1.4.2 snj else
694 1.1.4.2 snj adapter->iov_mode = IXGBE_32_VM;
695 1.1.4.2 snj
696 1.1.4.2 snj /* Again, reserving 1 VM's worth of queues for the PF */
697 1.1.4.2 snj adapter->pool = adapter->iov_mode - 1;
698 1.1.4.2 snj
699 1.1.4.2 snj if ((num_vfs > adapter->pool) || (num_vfs >= IXGBE_64_VM)) {
700 1.1.4.2 snj retval = ENOSPC;
701 1.1.4.2 snj goto err_init_iov;
702 1.1.4.2 snj }
703 1.1.4.2 snj
704 1.1.4.2 snj IXGBE_CORE_LOCK(adapter);
705 1.1.4.2 snj
706 1.1.4.2 snj adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV,
707 1.1.4.2 snj M_NOWAIT | M_ZERO);
708 1.1.4.2 snj
709 1.1.4.2 snj if (adapter->vfs == NULL) {
710 1.1.4.2 snj retval = ENOMEM;
711 1.1.4.2 snj IXGBE_CORE_UNLOCK(adapter);
712 1.1.4.2 snj goto err_init_iov;
713 1.1.4.2 snj }
714 1.1.4.2 snj
715 1.1.4.2 snj adapter->num_vfs = num_vfs;
716 1.1.4.3 martin
717 1.1.4.3 martin /* set the SRIOV flag now as it's needed
718 1.1.4.3 martin * by ixgbe_init_locked() */
719 1.1.4.2 snj adapter->feat_en |= IXGBE_FEATURE_SRIOV;
720 1.1.4.3 martin adapter->init_locked(adapter);
721 1.1.4.2 snj
722 1.1.4.2 snj IXGBE_CORE_UNLOCK(adapter);
723 1.1.4.2 snj
724 1.1.4.3 martin return (retval);
725 1.1.4.2 snj
726 1.1.4.2 snj err_init_iov:
727 1.1.4.2 snj adapter->num_vfs = 0;
728 1.1.4.2 snj adapter->pool = 0;
729 1.1.4.2 snj adapter->iov_mode = IXGBE_NO_VM;
730 1.1.4.2 snj
731 1.1.4.3 martin return (retval);
732 1.1.4.2 snj } /* ixgbe_init_iov */
733 1.1.4.2 snj
734 1.1.4.2 snj void
735 1.1.4.2 snj ixgbe_uninit_iov(device_t dev)
736 1.1.4.2 snj {
737 1.1.4.2 snj struct ixgbe_hw *hw;
738 1.1.4.2 snj struct adapter *adapter;
739 1.1.4.2 snj uint32_t pf_reg, vf_reg;
740 1.1.4.2 snj
741 1.1.4.2 snj adapter = device_get_softc(dev);
742 1.1.4.2 snj hw = &adapter->hw;
743 1.1.4.2 snj
744 1.1.4.2 snj IXGBE_CORE_LOCK(adapter);
745 1.1.4.2 snj
746 1.1.4.2 snj /* Enable rx/tx for the PF and disable it for all VFs. */
747 1.1.4.2 snj pf_reg = IXGBE_VF_INDEX(adapter->pool);
748 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool));
749 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool));
750 1.1.4.2 snj
751 1.1.4.2 snj if (pf_reg == 0)
752 1.1.4.2 snj vf_reg = 1;
753 1.1.4.2 snj else
754 1.1.4.2 snj vf_reg = 0;
755 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
756 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
757 1.1.4.2 snj
758 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
759 1.1.4.2 snj
760 1.1.4.2 snj free(adapter->vfs, M_IXGBE_SRIOV);
761 1.1.4.2 snj adapter->vfs = NULL;
762 1.1.4.2 snj adapter->num_vfs = 0;
763 1.1.4.2 snj adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
764 1.1.4.2 snj
765 1.1.4.2 snj IXGBE_CORE_UNLOCK(adapter);
766 1.1.4.2 snj } /* ixgbe_uninit_iov */
767 1.1.4.2 snj
768 1.1.4.2 snj static void
769 1.1.4.2 snj ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
770 1.1.4.2 snj {
771 1.1.4.2 snj struct ixgbe_hw *hw;
772 1.1.4.2 snj uint32_t vf_index, pfmbimr;
773 1.1.4.2 snj
774 1.1.4.2 snj IXGBE_CORE_LOCK_ASSERT(adapter);
775 1.1.4.2 snj
776 1.1.4.2 snj hw = &adapter->hw;
777 1.1.4.2 snj
778 1.1.4.2 snj if (!(vf->flags & IXGBE_VF_ACTIVE))
779 1.1.4.2 snj return;
780 1.1.4.2 snj
781 1.1.4.2 snj vf_index = IXGBE_VF_INDEX(vf->pool);
782 1.1.4.2 snj pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
783 1.1.4.2 snj pfmbimr |= IXGBE_VF_BIT(vf->pool);
784 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
785 1.1.4.2 snj
786 1.1.4.2 snj ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
787 1.1.4.2 snj
788 1.1.4.2 snj // XXX multicast addresses
789 1.1.4.2 snj
790 1.1.4.2 snj if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
791 1.1.4.2 snj ixgbe_set_rar(&adapter->hw, vf->rar_index,
792 1.1.4.2 snj vf->ether_addr, vf->pool, TRUE);
793 1.1.4.2 snj }
794 1.1.4.2 snj
795 1.1.4.2 snj ixgbe_vf_enable_transmit(adapter, vf);
796 1.1.4.2 snj ixgbe_vf_enable_receive(adapter, vf);
797 1.1.4.2 snj
798 1.1.4.4 martin ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
799 1.1.4.2 snj } /* ixgbe_init_vf */
800 1.1.4.2 snj
801 1.1.4.2 snj void
802 1.1.4.2 snj ixgbe_initialize_iov(struct adapter *adapter)
803 1.1.4.2 snj {
804 1.1.4.2 snj struct ixgbe_hw *hw = &adapter->hw;
805 1.1.4.2 snj uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
806 1.1.4.2 snj int i;
807 1.1.4.2 snj
808 1.1.4.2 snj if (adapter->iov_mode == IXGBE_NO_VM)
809 1.1.4.2 snj return;
810 1.1.4.2 snj
811 1.1.4.2 snj IXGBE_CORE_LOCK_ASSERT(adapter);
812 1.1.4.2 snj
813 1.1.4.2 snj /* RMW appropriate registers based on IOV mode */
814 1.1.4.2 snj /* Read... */
815 1.1.4.2 snj mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
816 1.1.4.2 snj gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
817 1.1.4.2 snj gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
818 1.1.4.2 snj /* Modify... */
819 1.1.4.2 snj mrqc &= ~IXGBE_MRQC_MRQE_MASK;
820 1.1.4.2 snj mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */
821 1.1.4.2 snj gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
822 1.1.4.2 snj gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
823 1.1.4.2 snj gpie &= ~IXGBE_GPIE_VTMODE_MASK;
824 1.1.4.2 snj switch (adapter->iov_mode) {
825 1.1.4.2 snj case IXGBE_64_VM:
826 1.1.4.2 snj mrqc |= IXGBE_MRQC_VMDQRSS64EN;
827 1.1.4.2 snj mtqc |= IXGBE_MTQC_64VF;
828 1.1.4.2 snj gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
829 1.1.4.2 snj gpie |= IXGBE_GPIE_VTMODE_64;
830 1.1.4.2 snj break;
831 1.1.4.2 snj case IXGBE_32_VM:
832 1.1.4.2 snj mrqc |= IXGBE_MRQC_VMDQRSS32EN;
833 1.1.4.2 snj mtqc |= IXGBE_MTQC_32VF;
834 1.1.4.2 snj gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
835 1.1.4.2 snj gpie |= IXGBE_GPIE_VTMODE_32;
836 1.1.4.2 snj break;
837 1.1.4.2 snj default:
838 1.1.4.2 snj panic("Unexpected SR-IOV mode %d", adapter->iov_mode);
839 1.1.4.2 snj }
840 1.1.4.2 snj /* Write... */
841 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
842 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
843 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
844 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
845 1.1.4.2 snj
846 1.1.4.2 snj /* Enable rx/tx for the PF. */
847 1.1.4.2 snj vf_reg = IXGBE_VF_INDEX(adapter->pool);
848 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool));
849 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool));
850 1.1.4.2 snj
851 1.1.4.2 snj /* Allow VM-to-VM communication. */
852 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
853 1.1.4.2 snj
854 1.1.4.2 snj vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
855 1.1.4.2 snj vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
856 1.1.4.2 snj IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
857 1.1.4.2 snj
858 1.1.4.2 snj for (i = 0; i < adapter->num_vfs; i++)
859 1.1.4.2 snj ixgbe_init_vf(adapter, &adapter->vfs[i]);
860 1.1.4.2 snj } /* ixgbe_initialize_iov */
861 1.1.4.2 snj
862 1.1.4.2 snj
863 1.1.4.2 snj /* Check the max frame setting of all active VF's */
864 1.1.4.2 snj void
865 1.1.4.2 snj ixgbe_recalculate_max_frame(struct adapter *adapter)
866 1.1.4.2 snj {
867 1.1.4.2 snj struct ixgbe_vf *vf;
868 1.1.4.2 snj
869 1.1.4.2 snj IXGBE_CORE_LOCK_ASSERT(adapter);
870 1.1.4.2 snj
871 1.1.4.2 snj for (int i = 0; i < adapter->num_vfs; i++) {
872 1.1.4.2 snj vf = &adapter->vfs[i];
873 1.1.4.2 snj if (vf->flags & IXGBE_VF_ACTIVE)
874 1.1.4.2 snj ixgbe_update_max_frame(adapter, vf->max_frame_size);
875 1.1.4.2 snj }
876 1.1.4.2 snj } /* ixgbe_recalculate_max_frame */
877 1.1.4.2 snj
878 1.1.4.2 snj int
879 1.1.4.2 snj ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
880 1.1.4.2 snj {
881 1.1.4.2 snj struct adapter *adapter;
882 1.1.4.2 snj struct ixgbe_vf *vf;
883 1.1.4.2 snj const void *mac;
884 1.1.4.2 snj
885 1.1.4.2 snj adapter = device_get_softc(dev);
886 1.1.4.2 snj
887 1.1.4.2 snj KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
888 1.1.4.2 snj vfnum, adapter->num_vfs));
889 1.1.4.2 snj
890 1.1.4.2 snj IXGBE_CORE_LOCK(adapter);
891 1.1.4.2 snj vf = &adapter->vfs[vfnum];
892 1.1.4.2 snj vf->pool= vfnum;
893 1.1.4.2 snj
894 1.1.4.2 snj /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
895 1.1.4.2 snj vf->rar_index = vfnum + 1;
896 1.1.4.2 snj vf->default_vlan = 0;
897 1.1.4.2 snj vf->max_frame_size = ETHER_MAX_LEN;
898 1.1.4.2 snj ixgbe_update_max_frame(adapter, vf->max_frame_size);
899 1.1.4.2 snj
900 1.1.4.2 snj if (nvlist_exists_binary(config, "mac-addr")) {
901 1.1.4.2 snj mac = nvlist_get_binary(config, "mac-addr", NULL);
902 1.1.4.2 snj bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
903 1.1.4.2 snj if (nvlist_get_bool(config, "allow-set-mac"))
904 1.1.4.2 snj vf->flags |= IXGBE_VF_CAP_MAC;
905 1.1.4.2 snj } else
906 1.1.4.2 snj /*
907 1.1.4.2 snj * If the administrator has not specified a MAC address then
908 1.1.4.2 snj * we must allow the VF to choose one.
909 1.1.4.2 snj */
910 1.1.4.2 snj vf->flags |= IXGBE_VF_CAP_MAC;
911 1.1.4.2 snj
912 1.1.4.2 snj vf->flags |= IXGBE_VF_ACTIVE;
913 1.1.4.2 snj
914 1.1.4.2 snj ixgbe_init_vf(adapter, vf);
915 1.1.4.2 snj IXGBE_CORE_UNLOCK(adapter);
916 1.1.4.2 snj
917 1.1.4.2 snj return (0);
918 1.1.4.2 snj } /* ixgbe_add_vf */
919 1.1.4.2 snj
920 1.1.4.2 snj #else
921 1.1.4.2 snj
922 1.1.4.2 snj void
923 1.1.4.2 snj ixgbe_handle_mbx(void *context, int pending)
924 1.1.4.2 snj {
925 1.1.4.2 snj UNREFERENCED_2PARAMETER(context, pending);
926 1.1.4.2 snj } /* ixgbe_handle_mbx */
927 1.1.4.2 snj
928 1.1.4.2 snj inline int
929 1.1.4.2 snj ixgbe_vf_que_index(int mode, int vfnum, int num)
930 1.1.4.2 snj {
931 1.1.4.2 snj UNREFERENCED_2PARAMETER(mode, vfnum);
932 1.1.4.2 snj
933 1.1.4.2 snj return num;
934 1.1.4.2 snj } /* ixgbe_vf_que_index */
935 1.1.4.2 snj
936 1.1.4.2 snj #endif
937