if_sriov.c revision 1.17.4.1 1 1.17.4.1 martin /* $NetBSD: if_sriov.c,v 1.17.4.1 2023/10/13 18:16:51 martin Exp $ */
2 1.1 msaitoh /******************************************************************************
3 1.1 msaitoh
4 1.1 msaitoh Copyright (c) 2001-2017, Intel Corporation
5 1.1 msaitoh All rights reserved.
6 1.1 msaitoh
7 1.1 msaitoh Redistribution and use in source and binary forms, with or without
8 1.1 msaitoh modification, are permitted provided that the following conditions are met:
9 1.1 msaitoh
10 1.1 msaitoh 1. Redistributions of source code must retain the above copyright notice,
11 1.1 msaitoh this list of conditions and the following disclaimer.
12 1.1 msaitoh
13 1.1 msaitoh 2. Redistributions in binary form must reproduce the above copyright
14 1.1 msaitoh notice, this list of conditions and the following disclaimer in the
15 1.1 msaitoh documentation and/or other materials provided with the distribution.
16 1.1 msaitoh
17 1.1 msaitoh 3. Neither the name of the Intel Corporation nor the names of its
18 1.1 msaitoh contributors may be used to endorse or promote products derived from
19 1.1 msaitoh this software without specific prior written permission.
20 1.1 msaitoh
21 1.1 msaitoh THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 1.1 msaitoh AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 1.1 msaitoh IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 1.1 msaitoh ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 1.1 msaitoh LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 1.1 msaitoh CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 1.1 msaitoh SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 1.1 msaitoh INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 1.1 msaitoh CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 1.1 msaitoh ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 1.1 msaitoh POSSIBILITY OF SUCH DAMAGE.
32 1.1 msaitoh
33 1.1 msaitoh ******************************************************************************/
34 1.3 msaitoh /*$FreeBSD: head/sys/dev/ixgbe/if_sriov.c 327031 2017-12-20 18:15:06Z erj $*/
35 1.1 msaitoh
36 1.11 msaitoh #include <sys/cdefs.h>
37 1.17.4.1 martin __KERNEL_RCSID(0, "$NetBSD: if_sriov.c,v 1.17.4.1 2023/10/13 18:16:51 martin Exp $");
38 1.11 msaitoh
39 1.1 msaitoh #include "ixgbe.h"
40 1.2 msaitoh #include "ixgbe_sriov.h"
41 1.1 msaitoh
42 1.1 msaitoh #ifdef PCI_IOV
43 1.1 msaitoh
44 1.1 msaitoh MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
45 1.1 msaitoh
46 1.1 msaitoh /************************************************************************
47 1.1 msaitoh * ixgbe_pci_iov_detach
48 1.1 msaitoh ************************************************************************/
49 1.1 msaitoh int
50 1.1 msaitoh ixgbe_pci_iov_detach(device_t dev)
51 1.1 msaitoh {
52 1.1 msaitoh return pci_iov_detach(dev);
53 1.1 msaitoh }
54 1.1 msaitoh
55 1.1 msaitoh /************************************************************************
56 1.1 msaitoh * ixgbe_define_iov_schemas
57 1.1 msaitoh ************************************************************************/
58 1.1 msaitoh void
59 1.1 msaitoh ixgbe_define_iov_schemas(device_t dev, int *error)
60 1.1 msaitoh {
61 1.1 msaitoh nvlist_t *pf_schema, *vf_schema;
62 1.1 msaitoh
63 1.1 msaitoh pf_schema = pci_iov_schema_alloc_node();
64 1.1 msaitoh vf_schema = pci_iov_schema_alloc_node();
65 1.1 msaitoh pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
66 1.1 msaitoh pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
67 1.1 msaitoh IOV_SCHEMA_HASDEFAULT, TRUE);
68 1.1 msaitoh pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
69 1.1 msaitoh IOV_SCHEMA_HASDEFAULT, FALSE);
70 1.1 msaitoh pci_iov_schema_add_bool(vf_schema, "allow-promisc",
71 1.1 msaitoh IOV_SCHEMA_HASDEFAULT, FALSE);
72 1.1 msaitoh *error = pci_iov_attach(dev, pf_schema, vf_schema);
73 1.1 msaitoh if (*error != 0) {
74 1.1 msaitoh device_printf(dev,
75 1.1 msaitoh "Error %d setting up SR-IOV\n", *error);
76 1.1 msaitoh }
77 1.1 msaitoh } /* ixgbe_define_iov_schemas */
78 1.1 msaitoh
79 1.1 msaitoh /************************************************************************
80 1.1 msaitoh * ixgbe_align_all_queue_indices
81 1.1 msaitoh ************************************************************************/
82 1.1 msaitoh inline void
83 1.17.4.1 martin ixgbe_align_all_queue_indices(struct ixgbe_softc *sc)
84 1.1 msaitoh {
85 1.1 msaitoh int i;
86 1.1 msaitoh int index;
87 1.1 msaitoh
88 1.17.4.1 martin for (i = 0; i < sc->num_queues; i++) {
89 1.17.4.1 martin index = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
90 1.17.4.1 martin sc->rx_rings[i].me = index;
91 1.17.4.1 martin sc->tx_rings[i].me = index;
92 1.1 msaitoh }
93 1.1 msaitoh }
94 1.1 msaitoh
95 1.1 msaitoh /* Support functions for SR-IOV/VF management */
96 1.1 msaitoh static inline void
97 1.16 msaitoh ixgbe_send_vf_msg(struct ixgbe_hw *hw, struct ixgbe_vf *vf, u32 msg)
98 1.1 msaitoh {
99 1.1 msaitoh if (vf->flags & IXGBE_VF_CTS)
100 1.1 msaitoh msg |= IXGBE_VT_MSGTYPE_CTS;
101 1.1 msaitoh
102 1.17 msaitoh hw->mbx.ops[vf->pool].write(hw, &msg, 1, vf->pool);
103 1.1 msaitoh }
104 1.1 msaitoh
105 1.1 msaitoh static inline void
106 1.17.4.1 martin ixgbe_send_vf_ack(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
107 1.1 msaitoh {
108 1.1 msaitoh msg &= IXGBE_VT_MSG_MASK;
109 1.17.4.1 martin ixgbe_send_vf_msg(&sc->hw, vf, msg | IXGBE_VT_MSGTYPE_SUCCESS);
110 1.1 msaitoh }
111 1.1 msaitoh
112 1.1 msaitoh static inline void
113 1.17.4.1 martin ixgbe_send_vf_nack(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
114 1.1 msaitoh {
115 1.1 msaitoh msg &= IXGBE_VT_MSG_MASK;
116 1.17.4.1 martin ixgbe_send_vf_msg(&sc->hw, vf, msg | IXGBE_VT_MSGTYPE_FAILURE);
117 1.1 msaitoh }
118 1.1 msaitoh
119 1.1 msaitoh static inline void
120 1.17.4.1 martin ixgbe_process_vf_ack(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
121 1.1 msaitoh {
122 1.1 msaitoh if (!(vf->flags & IXGBE_VF_CTS))
123 1.17.4.1 martin ixgbe_send_vf_nack(sc, vf, 0);
124 1.1 msaitoh }
125 1.1 msaitoh
126 1.9 mrg static inline bool
127 1.1 msaitoh ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
128 1.1 msaitoh {
129 1.1 msaitoh return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
130 1.1 msaitoh }
131 1.1 msaitoh
132 1.1 msaitoh static inline int
133 1.1 msaitoh ixgbe_vf_queues(int mode)
134 1.1 msaitoh {
135 1.1 msaitoh switch (mode) {
136 1.1 msaitoh case IXGBE_64_VM:
137 1.1 msaitoh return (2);
138 1.1 msaitoh case IXGBE_32_VM:
139 1.1 msaitoh return (4);
140 1.1 msaitoh case IXGBE_NO_VM:
141 1.1 msaitoh default:
142 1.1 msaitoh return (0);
143 1.1 msaitoh }
144 1.1 msaitoh }
145 1.1 msaitoh
146 1.1 msaitoh inline int
147 1.1 msaitoh ixgbe_vf_que_index(int mode, int vfnum, int num)
148 1.1 msaitoh {
149 1.1 msaitoh return ((vfnum * ixgbe_vf_queues(mode)) + num);
150 1.1 msaitoh }
151 1.1 msaitoh
152 1.1 msaitoh static inline void
153 1.17.4.1 martin ixgbe_update_max_frame(struct ixgbe_softc * sc, int max_frame)
154 1.1 msaitoh {
155 1.17.4.1 martin if (sc->max_frame_size < max_frame)
156 1.17.4.1 martin sc->max_frame_size = max_frame;
157 1.1 msaitoh }
158 1.1 msaitoh
159 1.1 msaitoh inline u32
160 1.1 msaitoh ixgbe_get_mrqc(int iov_mode)
161 1.1 msaitoh {
162 1.1 msaitoh u32 mrqc;
163 1.1 msaitoh
164 1.1 msaitoh switch (iov_mode) {
165 1.1 msaitoh case IXGBE_64_VM:
166 1.1 msaitoh mrqc = IXGBE_MRQC_VMDQRSS64EN;
167 1.1 msaitoh break;
168 1.1 msaitoh case IXGBE_32_VM:
169 1.1 msaitoh mrqc = IXGBE_MRQC_VMDQRSS32EN;
170 1.1 msaitoh break;
171 1.1 msaitoh case IXGBE_NO_VM:
172 1.1 msaitoh mrqc = 0;
173 1.1 msaitoh break;
174 1.1 msaitoh default:
175 1.1 msaitoh panic("Unexpected SR-IOV mode %d", iov_mode);
176 1.1 msaitoh }
177 1.1 msaitoh
178 1.1 msaitoh return mrqc;
179 1.1 msaitoh }
180 1.1 msaitoh
181 1.1 msaitoh
182 1.1 msaitoh inline u32
183 1.1 msaitoh ixgbe_get_mtqc(int iov_mode)
184 1.1 msaitoh {
185 1.1 msaitoh uint32_t mtqc;
186 1.1 msaitoh
187 1.1 msaitoh switch (iov_mode) {
188 1.1 msaitoh case IXGBE_64_VM:
189 1.1 msaitoh mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
190 1.1 msaitoh break;
191 1.1 msaitoh case IXGBE_32_VM:
192 1.1 msaitoh mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
193 1.1 msaitoh break;
194 1.1 msaitoh case IXGBE_NO_VM:
195 1.1 msaitoh mtqc = IXGBE_MTQC_64Q_1PB;
196 1.1 msaitoh break;
197 1.1 msaitoh default:
198 1.1 msaitoh panic("Unexpected SR-IOV mode %d", iov_mode);
199 1.1 msaitoh }
200 1.1 msaitoh
201 1.1 msaitoh return mtqc;
202 1.1 msaitoh }
203 1.1 msaitoh
204 1.1 msaitoh void
205 1.17.4.1 martin ixgbe_ping_all_vfs(struct ixgbe_softc *sc)
206 1.1 msaitoh {
207 1.1 msaitoh struct ixgbe_vf *vf;
208 1.1 msaitoh
209 1.17.4.1 martin for (int i = 0; i < sc->num_vfs; i++) {
210 1.17.4.1 martin vf = &sc->vfs[i];
211 1.1 msaitoh if (vf->flags & IXGBE_VF_ACTIVE)
212 1.17.4.1 martin ixgbe_send_vf_msg(&sc->hw, vf, IXGBE_PF_CONTROL_MSG);
213 1.1 msaitoh }
214 1.1 msaitoh } /* ixgbe_ping_all_vfs */
215 1.1 msaitoh
216 1.1 msaitoh
217 1.1 msaitoh static void
218 1.17.4.1 martin ixgbe_vf_set_default_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
219 1.1 msaitoh uint16_t tag)
220 1.1 msaitoh {
221 1.1 msaitoh struct ixgbe_hw *hw;
222 1.1 msaitoh uint32_t vmolr, vmvir;
223 1.1 msaitoh
224 1.17.4.1 martin hw = &sc->hw;
225 1.1 msaitoh
226 1.1 msaitoh vf->vlan_tag = tag;
227 1.1 msaitoh
228 1.1 msaitoh vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
229 1.1 msaitoh
230 1.1 msaitoh /* Do not receive packets that pass inexact filters. */
231 1.1 msaitoh vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
232 1.1 msaitoh
233 1.1 msaitoh /* Disable Multicast Promicuous Mode. */
234 1.1 msaitoh vmolr &= ~IXGBE_VMOLR_MPE;
235 1.1 msaitoh
236 1.1 msaitoh /* Accept broadcasts. */
237 1.1 msaitoh vmolr |= IXGBE_VMOLR_BAM;
238 1.1 msaitoh
239 1.1 msaitoh if (tag == 0) {
240 1.1 msaitoh /* Accept non-vlan tagged traffic. */
241 1.2 msaitoh vmolr |= IXGBE_VMOLR_AUPE;
242 1.1 msaitoh
243 1.1 msaitoh /* Allow VM to tag outgoing traffic; no default tag. */
244 1.1 msaitoh vmvir = 0;
245 1.1 msaitoh } else {
246 1.1 msaitoh /* Require vlan-tagged traffic. */
247 1.1 msaitoh vmolr &= ~IXGBE_VMOLR_AUPE;
248 1.1 msaitoh
249 1.1 msaitoh /* Tag all traffic with provided vlan tag. */
250 1.1 msaitoh vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
251 1.1 msaitoh }
252 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
253 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
254 1.1 msaitoh } /* ixgbe_vf_set_default_vlan */
255 1.1 msaitoh
256 1.1 msaitoh
257 1.5 msaitoh static void
258 1.17.4.1 martin ixgbe_clear_vfmbmem(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
259 1.5 msaitoh {
260 1.17.4.1 martin struct ixgbe_hw *hw = &sc->hw;
261 1.5 msaitoh uint32_t vf_index = IXGBE_VF_INDEX(vf->pool);
262 1.5 msaitoh uint16_t mbx_size = hw->mbx.size;
263 1.5 msaitoh uint16_t i;
264 1.5 msaitoh
265 1.17.4.1 martin IXGBE_CORE_LOCK_ASSERT(sc);
266 1.5 msaitoh
267 1.5 msaitoh for (i = 0; i < mbx_size; ++i)
268 1.5 msaitoh IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_index), i, 0x0);
269 1.5 msaitoh } /* ixgbe_clear_vfmbmem */
270 1.5 msaitoh
271 1.5 msaitoh
272 1.9 mrg static bool
273 1.17.4.1 martin ixgbe_vf_frame_size_compatible(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
274 1.1 msaitoh {
275 1.1 msaitoh
276 1.1 msaitoh /*
277 1.1 msaitoh * Frame size compatibility between PF and VF is only a problem on
278 1.1 msaitoh * 82599-based cards. X540 and later support any combination of jumbo
279 1.1 msaitoh * frames on PFs and VFs.
280 1.1 msaitoh */
281 1.17.4.1 martin if (sc->hw.mac.type != ixgbe_mac_82599EB)
282 1.1 msaitoh return (TRUE);
283 1.1 msaitoh
284 1.1 msaitoh switch (vf->api_ver) {
285 1.1 msaitoh case IXGBE_API_VER_1_0:
286 1.1 msaitoh case IXGBE_API_VER_UNKNOWN:
287 1.1 msaitoh /*
288 1.1 msaitoh * On legacy (1.0 and older) VF versions, we don't support jumbo
289 1.1 msaitoh * frames on either the PF or the VF.
290 1.1 msaitoh */
291 1.17.4.1 martin if (sc->max_frame_size > ETHER_MAX_LEN ||
292 1.1 msaitoh vf->max_frame_size > ETHER_MAX_LEN)
293 1.1 msaitoh return (FALSE);
294 1.1 msaitoh
295 1.1 msaitoh return (TRUE);
296 1.1 msaitoh
297 1.1 msaitoh break;
298 1.1 msaitoh case IXGBE_API_VER_1_1:
299 1.1 msaitoh default:
300 1.1 msaitoh /*
301 1.1 msaitoh * 1.1 or later VF versions always work if they aren't using
302 1.1 msaitoh * jumbo frames.
303 1.1 msaitoh */
304 1.1 msaitoh if (vf->max_frame_size <= ETHER_MAX_LEN)
305 1.1 msaitoh return (TRUE);
306 1.1 msaitoh
307 1.1 msaitoh /*
308 1.1 msaitoh * Jumbo frames only work with VFs if the PF is also using jumbo
309 1.1 msaitoh * frames.
310 1.1 msaitoh */
311 1.17.4.1 martin if (sc->max_frame_size <= ETHER_MAX_LEN)
312 1.1 msaitoh return (TRUE);
313 1.1 msaitoh
314 1.1 msaitoh return (FALSE);
315 1.1 msaitoh }
316 1.1 msaitoh } /* ixgbe_vf_frame_size_compatible */
317 1.1 msaitoh
318 1.1 msaitoh
319 1.1 msaitoh static void
320 1.17.4.1 martin ixgbe_process_vf_reset(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
321 1.1 msaitoh {
322 1.17.4.1 martin ixgbe_vf_set_default_vlan(sc, vf, vf->default_vlan);
323 1.1 msaitoh
324 1.1 msaitoh // XXX clear multicast addresses
325 1.1 msaitoh
326 1.17.4.1 martin ixgbe_clear_rar(&sc->hw, vf->rar_index);
327 1.17.4.1 martin ixgbe_clear_vfmbmem(sc, vf);
328 1.17.4.1 martin ixgbe_toggle_txdctl(&sc->hw, IXGBE_VF_INDEX(vf->pool));
329 1.1 msaitoh
330 1.1 msaitoh vf->api_ver = IXGBE_API_VER_UNKNOWN;
331 1.1 msaitoh } /* ixgbe_process_vf_reset */
332 1.1 msaitoh
333 1.1 msaitoh
334 1.1 msaitoh static void
335 1.17.4.1 martin ixgbe_vf_enable_transmit(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
336 1.1 msaitoh {
337 1.1 msaitoh struct ixgbe_hw *hw;
338 1.1 msaitoh uint32_t vf_index, vfte;
339 1.1 msaitoh
340 1.17.4.1 martin hw = &sc->hw;
341 1.1 msaitoh
342 1.1 msaitoh vf_index = IXGBE_VF_INDEX(vf->pool);
343 1.1 msaitoh vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
344 1.1 msaitoh vfte |= IXGBE_VF_BIT(vf->pool);
345 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
346 1.1 msaitoh } /* ixgbe_vf_enable_transmit */
347 1.1 msaitoh
348 1.1 msaitoh
349 1.1 msaitoh static void
350 1.17.4.1 martin ixgbe_vf_enable_receive(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
351 1.1 msaitoh {
352 1.1 msaitoh struct ixgbe_hw *hw;
353 1.1 msaitoh uint32_t vf_index, vfre;
354 1.1 msaitoh
355 1.17.4.1 martin hw = &sc->hw;
356 1.1 msaitoh
357 1.1 msaitoh vf_index = IXGBE_VF_INDEX(vf->pool);
358 1.1 msaitoh vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
359 1.17.4.1 martin if (ixgbe_vf_frame_size_compatible(sc, vf))
360 1.1 msaitoh vfre |= IXGBE_VF_BIT(vf->pool);
361 1.1 msaitoh else
362 1.1 msaitoh vfre &= ~IXGBE_VF_BIT(vf->pool);
363 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
364 1.1 msaitoh } /* ixgbe_vf_enable_receive */
365 1.1 msaitoh
366 1.1 msaitoh
367 1.1 msaitoh static void
368 1.17.4.1 martin ixgbe_vf_reset_msg(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
369 1.1 msaitoh {
370 1.1 msaitoh struct ixgbe_hw *hw;
371 1.1 msaitoh uint32_t ack;
372 1.1 msaitoh uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
373 1.1 msaitoh
374 1.17.4.1 martin hw = &sc->hw;
375 1.1 msaitoh
376 1.17.4.1 martin ixgbe_process_vf_reset(sc, vf);
377 1.1 msaitoh
378 1.1 msaitoh if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
379 1.17.4.1 martin ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr,
380 1.1 msaitoh vf->pool, TRUE);
381 1.15 msaitoh ack = IXGBE_VT_MSGTYPE_SUCCESS;
382 1.1 msaitoh } else
383 1.15 msaitoh ack = IXGBE_VT_MSGTYPE_FAILURE;
384 1.1 msaitoh
385 1.17.4.1 martin ixgbe_vf_enable_transmit(sc, vf);
386 1.17.4.1 martin ixgbe_vf_enable_receive(sc, vf);
387 1.1 msaitoh
388 1.1 msaitoh vf->flags |= IXGBE_VF_CTS;
389 1.1 msaitoh
390 1.2 msaitoh resp[0] = IXGBE_VF_RESET | ack;
391 1.1 msaitoh bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
392 1.1 msaitoh resp[3] = hw->mac.mc_filter_type;
393 1.1 msaitoh hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
394 1.1 msaitoh } /* ixgbe_vf_reset_msg */
395 1.1 msaitoh
396 1.1 msaitoh
397 1.1 msaitoh static void
398 1.17.4.1 martin ixgbe_vf_set_mac(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
399 1.1 msaitoh {
400 1.1 msaitoh uint8_t *mac;
401 1.1 msaitoh
402 1.1 msaitoh mac = (uint8_t*)&msg[1];
403 1.1 msaitoh
404 1.1 msaitoh /* Check that the VF has permission to change the MAC address. */
405 1.1 msaitoh if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
406 1.17.4.1 martin ixgbe_send_vf_nack(sc, vf, msg[0]);
407 1.1 msaitoh return;
408 1.1 msaitoh }
409 1.1 msaitoh
410 1.1 msaitoh if (ixgbe_validate_mac_addr(mac) != 0) {
411 1.17.4.1 martin ixgbe_send_vf_nack(sc, vf, msg[0]);
412 1.1 msaitoh return;
413 1.1 msaitoh }
414 1.1 msaitoh
415 1.1 msaitoh bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
416 1.1 msaitoh
417 1.17.4.1 martin ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr, vf->pool,
418 1.1 msaitoh TRUE);
419 1.1 msaitoh
420 1.17.4.1 martin ixgbe_send_vf_ack(sc, vf, msg[0]);
421 1.1 msaitoh } /* ixgbe_vf_set_mac */
422 1.1 msaitoh
423 1.1 msaitoh
424 1.1 msaitoh /*
425 1.1 msaitoh * VF multicast addresses are set by using the appropriate bit in
426 1.1 msaitoh * 1 of 128 32 bit addresses (4096 possible).
427 1.1 msaitoh */
428 1.1 msaitoh static void
429 1.17.4.1 martin ixgbe_vf_set_mc_addr(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 *msg)
430 1.1 msaitoh {
431 1.1 msaitoh u16 *list = (u16*)&msg[1];
432 1.1 msaitoh int entries;
433 1.1 msaitoh u32 vmolr, vec_bit, vec_reg, mta_reg;
434 1.1 msaitoh
435 1.1 msaitoh entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
436 1.4 riastrad entries = uimin(entries, IXGBE_MAX_VF_MC);
437 1.1 msaitoh
438 1.17.4.1 martin vmolr = IXGBE_READ_REG(&sc->hw, IXGBE_VMOLR(vf->pool));
439 1.1 msaitoh
440 1.1 msaitoh vf->num_mc_hashes = entries;
441 1.1 msaitoh
442 1.1 msaitoh /* Set the appropriate MTA bit */
443 1.1 msaitoh for (int i = 0; i < entries; i++) {
444 1.1 msaitoh vf->mc_hash[i] = list[i];
445 1.1 msaitoh vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
446 1.1 msaitoh vec_bit = vf->mc_hash[i] & 0x1F;
447 1.17.4.1 martin mta_reg = IXGBE_READ_REG(&sc->hw, IXGBE_MTA(vec_reg));
448 1.1 msaitoh mta_reg |= (1 << vec_bit);
449 1.17.4.1 martin IXGBE_WRITE_REG(&sc->hw, IXGBE_MTA(vec_reg), mta_reg);
450 1.1 msaitoh }
451 1.1 msaitoh
452 1.1 msaitoh vmolr |= IXGBE_VMOLR_ROMPE;
453 1.17.4.1 martin IXGBE_WRITE_REG(&sc->hw, IXGBE_VMOLR(vf->pool), vmolr);
454 1.17.4.1 martin ixgbe_send_vf_ack(sc, vf, msg[0]);
455 1.1 msaitoh } /* ixgbe_vf_set_mc_addr */
456 1.1 msaitoh
457 1.1 msaitoh
458 1.1 msaitoh static void
459 1.17.4.1 martin ixgbe_vf_set_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
460 1.1 msaitoh {
461 1.1 msaitoh struct ixgbe_hw *hw;
462 1.1 msaitoh int enable;
463 1.1 msaitoh uint16_t tag;
464 1.1 msaitoh
465 1.17.4.1 martin hw = &sc->hw;
466 1.1 msaitoh enable = IXGBE_VT_MSGINFO(msg[0]);
467 1.1 msaitoh tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
468 1.1 msaitoh
469 1.1 msaitoh if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
470 1.17.4.1 martin ixgbe_send_vf_nack(sc, vf, msg[0]);
471 1.1 msaitoh return;
472 1.1 msaitoh }
473 1.1 msaitoh
474 1.1 msaitoh /* It is illegal to enable vlan tag 0. */
475 1.2 msaitoh if (tag == 0 && enable != 0) {
476 1.17.4.1 martin ixgbe_send_vf_nack(sc, vf, msg[0]);
477 1.1 msaitoh return;
478 1.1 msaitoh }
479 1.1 msaitoh
480 1.1 msaitoh ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
481 1.17.4.1 martin ixgbe_send_vf_ack(sc, vf, msg[0]);
482 1.1 msaitoh } /* ixgbe_vf_set_vlan */
483 1.1 msaitoh
484 1.1 msaitoh
485 1.1 msaitoh static void
486 1.17.4.1 martin ixgbe_vf_set_lpe(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
487 1.1 msaitoh {
488 1.1 msaitoh struct ixgbe_hw *hw;
489 1.1 msaitoh uint32_t vf_max_size, pf_max_size, mhadd;
490 1.1 msaitoh
491 1.17.4.1 martin hw = &sc->hw;
492 1.1 msaitoh vf_max_size = msg[1];
493 1.1 msaitoh
494 1.1 msaitoh if (vf_max_size < ETHER_CRC_LEN) {
495 1.1 msaitoh /* We intentionally ACK invalid LPE requests. */
496 1.17.4.1 martin ixgbe_send_vf_ack(sc, vf, msg[0]);
497 1.1 msaitoh return;
498 1.1 msaitoh }
499 1.1 msaitoh
500 1.1 msaitoh vf_max_size -= ETHER_CRC_LEN;
501 1.1 msaitoh
502 1.1 msaitoh if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
503 1.1 msaitoh /* We intentionally ACK invalid LPE requests. */
504 1.17.4.1 martin ixgbe_send_vf_ack(sc, vf, msg[0]);
505 1.1 msaitoh return;
506 1.1 msaitoh }
507 1.1 msaitoh
508 1.1 msaitoh vf->max_frame_size = vf_max_size;
509 1.17.4.1 martin ixgbe_update_max_frame(sc, vf->max_frame_size);
510 1.1 msaitoh
511 1.1 msaitoh /*
512 1.1 msaitoh * We might have to disable reception to this VF if the frame size is
513 1.1 msaitoh * not compatible with the config on the PF.
514 1.1 msaitoh */
515 1.17.4.1 martin ixgbe_vf_enable_receive(sc, vf);
516 1.1 msaitoh
517 1.1 msaitoh mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
518 1.1 msaitoh pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
519 1.1 msaitoh
520 1.17.4.1 martin if (pf_max_size < sc->max_frame_size) {
521 1.1 msaitoh mhadd &= ~IXGBE_MHADD_MFS_MASK;
522 1.17.4.1 martin mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
523 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
524 1.1 msaitoh }
525 1.1 msaitoh
526 1.17.4.1 martin ixgbe_send_vf_ack(sc, vf, msg[0]);
527 1.1 msaitoh } /* ixgbe_vf_set_lpe */
528 1.1 msaitoh
529 1.1 msaitoh
530 1.1 msaitoh static void
531 1.17.4.1 martin ixgbe_vf_set_macvlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
532 1.1 msaitoh uint32_t *msg)
533 1.1 msaitoh {
534 1.1 msaitoh //XXX implement this
535 1.17.4.1 martin ixgbe_send_vf_nack(sc, vf, msg[0]);
536 1.1 msaitoh } /* ixgbe_vf_set_macvlan */
537 1.1 msaitoh
538 1.1 msaitoh
539 1.1 msaitoh static void
540 1.17.4.1 martin ixgbe_vf_api_negotiate(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
541 1.1 msaitoh uint32_t *msg)
542 1.1 msaitoh {
543 1.1 msaitoh
544 1.1 msaitoh switch (msg[1]) {
545 1.1 msaitoh case IXGBE_API_VER_1_0:
546 1.1 msaitoh case IXGBE_API_VER_1_1:
547 1.1 msaitoh vf->api_ver = msg[1];
548 1.17.4.1 martin ixgbe_send_vf_ack(sc, vf, msg[0]);
549 1.1 msaitoh break;
550 1.1 msaitoh default:
551 1.1 msaitoh vf->api_ver = IXGBE_API_VER_UNKNOWN;
552 1.17.4.1 martin ixgbe_send_vf_nack(sc, vf, msg[0]);
553 1.1 msaitoh break;
554 1.1 msaitoh }
555 1.1 msaitoh } /* ixgbe_vf_api_negotiate */
556 1.1 msaitoh
557 1.1 msaitoh
558 1.1 msaitoh static void
559 1.17.4.1 martin ixgbe_vf_get_queues(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
560 1.1 msaitoh {
561 1.1 msaitoh struct ixgbe_hw *hw;
562 1.1 msaitoh uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
563 1.1 msaitoh int num_queues;
564 1.1 msaitoh
565 1.17.4.1 martin hw = &sc->hw;
566 1.1 msaitoh
567 1.1 msaitoh /* GET_QUEUES is not supported on pre-1.1 APIs. */
568 1.1 msaitoh switch (msg[0]) {
569 1.1 msaitoh case IXGBE_API_VER_1_0:
570 1.1 msaitoh case IXGBE_API_VER_UNKNOWN:
571 1.17.4.1 martin ixgbe_send_vf_nack(sc, vf, msg[0]);
572 1.1 msaitoh return;
573 1.1 msaitoh }
574 1.1 msaitoh
575 1.15 msaitoh resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_SUCCESS |
576 1.1 msaitoh IXGBE_VT_MSGTYPE_CTS;
577 1.1 msaitoh
578 1.17.4.1 martin num_queues = ixgbe_vf_queues(sc->iov_mode);
579 1.1 msaitoh resp[IXGBE_VF_TX_QUEUES] = num_queues;
580 1.1 msaitoh resp[IXGBE_VF_RX_QUEUES] = num_queues;
581 1.1 msaitoh resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
582 1.1 msaitoh resp[IXGBE_VF_DEF_QUEUE] = 0;
583 1.1 msaitoh
584 1.1 msaitoh hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
585 1.1 msaitoh } /* ixgbe_vf_get_queues */
586 1.1 msaitoh
587 1.1 msaitoh
588 1.1 msaitoh static void
589 1.17.4.1 martin ixgbe_process_vf_msg(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
590 1.1 msaitoh {
591 1.1 msaitoh struct ixgbe_hw *hw;
592 1.1 msaitoh uint32_t msg[IXGBE_VFMAILBOX_SIZE];
593 1.1 msaitoh int error;
594 1.1 msaitoh
595 1.17.4.1 martin hw = &sc->hw;
596 1.1 msaitoh
597 1.17 msaitoh error = hw->mbx.ops[vf->pool].read(hw, msg, IXGBE_VFMAILBOX_SIZE,
598 1.17 msaitoh vf->pool);
599 1.1 msaitoh
600 1.1 msaitoh if (error != 0)
601 1.1 msaitoh return;
602 1.1 msaitoh
603 1.17.4.1 martin CTR3(KTR_MALLOC, "%s: received msg %x from %d",
604 1.17.4.1 martin sc->ifp->if_xname, msg[0], vf->pool);
605 1.1 msaitoh if (msg[0] == IXGBE_VF_RESET) {
606 1.17.4.1 martin ixgbe_vf_reset_msg(sc, vf, msg);
607 1.1 msaitoh return;
608 1.1 msaitoh }
609 1.1 msaitoh
610 1.1 msaitoh if (!(vf->flags & IXGBE_VF_CTS)) {
611 1.17.4.1 martin ixgbe_send_vf_nack(sc, vf, msg[0]);
612 1.1 msaitoh return;
613 1.1 msaitoh }
614 1.1 msaitoh
615 1.1 msaitoh switch (msg[0] & IXGBE_VT_MSG_MASK) {
616 1.1 msaitoh case IXGBE_VF_SET_MAC_ADDR:
617 1.17.4.1 martin ixgbe_vf_set_mac(sc, vf, msg);
618 1.1 msaitoh break;
619 1.1 msaitoh case IXGBE_VF_SET_MULTICAST:
620 1.17.4.1 martin ixgbe_vf_set_mc_addr(sc, vf, msg);
621 1.1 msaitoh break;
622 1.1 msaitoh case IXGBE_VF_SET_VLAN:
623 1.17.4.1 martin ixgbe_vf_set_vlan(sc, vf, msg);
624 1.1 msaitoh break;
625 1.1 msaitoh case IXGBE_VF_SET_LPE:
626 1.17.4.1 martin ixgbe_vf_set_lpe(sc, vf, msg);
627 1.1 msaitoh break;
628 1.1 msaitoh case IXGBE_VF_SET_MACVLAN:
629 1.17.4.1 martin ixgbe_vf_set_macvlan(sc, vf, msg);
630 1.1 msaitoh break;
631 1.1 msaitoh case IXGBE_VF_API_NEGOTIATE:
632 1.17.4.1 martin ixgbe_vf_api_negotiate(sc, vf, msg);
633 1.1 msaitoh break;
634 1.1 msaitoh case IXGBE_VF_GET_QUEUES:
635 1.17.4.1 martin ixgbe_vf_get_queues(sc, vf, msg);
636 1.1 msaitoh break;
637 1.1 msaitoh default:
638 1.17.4.1 martin ixgbe_send_vf_nack(sc, vf, msg[0]);
639 1.1 msaitoh }
640 1.1 msaitoh } /* ixgbe_process_vf_msg */
641 1.1 msaitoh
642 1.1 msaitoh
643 1.1 msaitoh /* Tasklet for handling VF -> PF mailbox messages */
644 1.1 msaitoh void
645 1.12 msaitoh ixgbe_handle_mbx(void *context)
646 1.1 msaitoh {
647 1.17.4.1 martin struct ixgbe_softc *sc = context;
648 1.1 msaitoh struct ixgbe_hw *hw;
649 1.1 msaitoh struct ixgbe_vf *vf;
650 1.1 msaitoh int i;
651 1.1 msaitoh
652 1.17.4.1 martin KASSERT(mutex_owned(&sc->core_mtx));
653 1.8 msaitoh
654 1.17.4.1 martin hw = &sc->hw;
655 1.1 msaitoh
656 1.17.4.1 martin for (i = 0; i < sc->num_vfs; i++) {
657 1.17.4.1 martin vf = &sc->vfs[i];
658 1.1 msaitoh
659 1.17 msaitoh if ((vf->flags & IXGBE_VF_ACTIVE) == 0)
660 1.17 msaitoh continue;
661 1.17 msaitoh
662 1.17 msaitoh if (hw->mbx.ops[vf->pool].check_for_rst(hw, vf->pool) == 0)
663 1.17.4.1 martin ixgbe_process_vf_reset(sc, vf);
664 1.17 msaitoh
665 1.17 msaitoh if (hw->mbx.ops[vf->pool].check_for_msg(hw, vf->pool) == 0)
666 1.17.4.1 martin ixgbe_process_vf_msg(sc, vf);
667 1.17 msaitoh
668 1.17 msaitoh if (hw->mbx.ops[vf->pool].check_for_ack(hw, vf->pool) == 0)
669 1.17.4.1 martin ixgbe_process_vf_ack(sc, vf);
670 1.1 msaitoh }
671 1.1 msaitoh } /* ixgbe_handle_mbx */
672 1.1 msaitoh
673 1.1 msaitoh int
674 1.1 msaitoh ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
675 1.1 msaitoh {
676 1.17.4.1 martin struct ixgbe_softc *sc;
677 1.1 msaitoh int retval = 0;
678 1.1 msaitoh
679 1.17.4.1 martin sc = device_get_softc(dev);
680 1.17.4.1 martin sc->iov_mode = IXGBE_NO_VM;
681 1.1 msaitoh
682 1.1 msaitoh if (num_vfs == 0) {
683 1.1 msaitoh /* Would we ever get num_vfs = 0? */
684 1.1 msaitoh retval = EINVAL;
685 1.1 msaitoh goto err_init_iov;
686 1.1 msaitoh }
687 1.1 msaitoh
688 1.1 msaitoh /*
689 1.1 msaitoh * We've got to reserve a VM's worth of queues for the PF,
690 1.1 msaitoh * thus we go into "64 VF mode" if 32+ VFs are requested.
691 1.1 msaitoh * With 64 VFs, you can only have two queues per VF.
692 1.1 msaitoh * With 32 VFs, you can have up to four queues per VF.
693 1.1 msaitoh */
694 1.1 msaitoh if (num_vfs >= IXGBE_32_VM)
695 1.17.4.1 martin sc->iov_mode = IXGBE_64_VM;
696 1.1 msaitoh else
697 1.17.4.1 martin sc->iov_mode = IXGBE_32_VM;
698 1.1 msaitoh
699 1.1 msaitoh /* Again, reserving 1 VM's worth of queues for the PF */
700 1.17.4.1 martin sc->pool = sc->iov_mode - 1;
701 1.1 msaitoh
702 1.17.4.1 martin if ((num_vfs > sc->pool) || (num_vfs >= IXGBE_64_VM)) {
703 1.1 msaitoh retval = ENOSPC;
704 1.1 msaitoh goto err_init_iov;
705 1.1 msaitoh }
706 1.1 msaitoh
707 1.17.4.1 martin IXGBE_CORE_LOCK(sc);
708 1.1 msaitoh
709 1.17.4.1 martin sc->vfs = malloc(sizeof(*sc->vfs) * num_vfs, M_IXGBE_SRIOV,
710 1.1 msaitoh M_NOWAIT | M_ZERO);
711 1.1 msaitoh
712 1.17.4.1 martin if (sc->vfs == NULL) {
713 1.1 msaitoh retval = ENOMEM;
714 1.17.4.1 martin IXGBE_CORE_UNLOCK(sc);
715 1.1 msaitoh goto err_init_iov;
716 1.1 msaitoh }
717 1.1 msaitoh
718 1.17.4.1 martin sc->num_vfs = num_vfs;
719 1.17.4.1 martin ixgbe_init_mbx_params_pf(&sc->hw);
720 1.2 msaitoh
721 1.2 msaitoh /* set the SRIOV flag now as it's needed
722 1.2 msaitoh * by ixgbe_init_locked() */
723 1.17.4.1 martin sc->feat_en |= IXGBE_FEATURE_SRIOV;
724 1.17.4.1 martin sc->init_locked(sc);
725 1.1 msaitoh
726 1.17.4.1 martin IXGBE_CORE_UNLOCK(sc);
727 1.1 msaitoh
728 1.17.4.1 martin return retval;
729 1.1 msaitoh
730 1.1 msaitoh err_init_iov:
731 1.17.4.1 martin sc->num_vfs = 0;
732 1.17.4.1 martin sc->pool = 0;
733 1.17.4.1 martin sc->iov_mode = IXGBE_NO_VM;
734 1.1 msaitoh
735 1.17.4.1 martin return retval;
736 1.1 msaitoh } /* ixgbe_init_iov */
737 1.1 msaitoh
738 1.1 msaitoh void
739 1.1 msaitoh ixgbe_uninit_iov(device_t dev)
740 1.1 msaitoh {
741 1.1 msaitoh struct ixgbe_hw *hw;
742 1.17.4.1 martin struct ixgbe_softc *sc;
743 1.1 msaitoh uint32_t pf_reg, vf_reg;
744 1.1 msaitoh
745 1.17.4.1 martin sc = device_get_softc(dev);
746 1.17.4.1 martin hw = &sc->hw;
747 1.1 msaitoh
748 1.17.4.1 martin IXGBE_CORE_LOCK(sc);
749 1.1 msaitoh
750 1.1 msaitoh /* Enable rx/tx for the PF and disable it for all VFs. */
751 1.17.4.1 martin pf_reg = IXGBE_VF_INDEX(sc->pool);
752 1.17.4.1 martin IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(sc->pool));
753 1.17.4.1 martin IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(sc->pool));
754 1.1 msaitoh
755 1.1 msaitoh if (pf_reg == 0)
756 1.1 msaitoh vf_reg = 1;
757 1.1 msaitoh else
758 1.1 msaitoh vf_reg = 0;
759 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
760 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
761 1.1 msaitoh
762 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
763 1.1 msaitoh
764 1.17.4.1 martin free(sc->vfs, M_IXGBE_SRIOV);
765 1.17.4.1 martin sc->vfs = NULL;
766 1.17.4.1 martin sc->num_vfs = 0;
767 1.17.4.1 martin sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
768 1.1 msaitoh
769 1.17.4.1 martin IXGBE_CORE_UNLOCK(sc);
770 1.1 msaitoh } /* ixgbe_uninit_iov */
771 1.1 msaitoh
772 1.1 msaitoh static void
773 1.17.4.1 martin ixgbe_init_vf(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
774 1.1 msaitoh {
775 1.1 msaitoh struct ixgbe_hw *hw;
776 1.1 msaitoh uint32_t vf_index, pfmbimr;
777 1.1 msaitoh
778 1.17.4.1 martin IXGBE_CORE_LOCK_ASSERT(sc);
779 1.1 msaitoh
780 1.17.4.1 martin hw = &sc->hw;
781 1.1 msaitoh
782 1.1 msaitoh if (!(vf->flags & IXGBE_VF_ACTIVE))
783 1.1 msaitoh return;
784 1.1 msaitoh
785 1.1 msaitoh vf_index = IXGBE_VF_INDEX(vf->pool);
786 1.1 msaitoh pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
787 1.1 msaitoh pfmbimr |= IXGBE_VF_BIT(vf->pool);
788 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
789 1.1 msaitoh
790 1.17.4.1 martin ixgbe_vf_set_default_vlan(sc, vf, vf->vlan_tag);
791 1.1 msaitoh
792 1.1 msaitoh // XXX multicast addresses
793 1.1 msaitoh
794 1.1 msaitoh if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
795 1.17.4.1 martin ixgbe_set_rar(&sc->hw, vf->rar_index,
796 1.1 msaitoh vf->ether_addr, vf->pool, TRUE);
797 1.1 msaitoh }
798 1.1 msaitoh
799 1.17.4.1 martin ixgbe_vf_enable_transmit(sc, vf);
800 1.17.4.1 martin ixgbe_vf_enable_receive(sc, vf);
801 1.1 msaitoh
802 1.17.4.1 martin ixgbe_send_vf_msg(&sc->hw, vf, IXGBE_PF_CONTROL_MSG);
803 1.1 msaitoh } /* ixgbe_init_vf */
804 1.1 msaitoh
805 1.1 msaitoh void
806 1.17.4.1 martin ixgbe_initialize_iov(struct ixgbe_softc *sc)
807 1.1 msaitoh {
808 1.17.4.1 martin struct ixgbe_hw *hw = &sc->hw;
809 1.1 msaitoh uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
810 1.1 msaitoh int i;
811 1.1 msaitoh
812 1.17.4.1 martin if (sc->iov_mode == IXGBE_NO_VM)
813 1.1 msaitoh return;
814 1.1 msaitoh
815 1.17.4.1 martin IXGBE_CORE_LOCK_ASSERT(sc);
816 1.1 msaitoh
817 1.1 msaitoh /* RMW appropriate registers based on IOV mode */
818 1.1 msaitoh /* Read... */
819 1.1 msaitoh mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
820 1.1 msaitoh gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
821 1.1 msaitoh gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
822 1.1 msaitoh /* Modify... */
823 1.1 msaitoh mrqc &= ~IXGBE_MRQC_MRQE_MASK;
824 1.1 msaitoh mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */
825 1.1 msaitoh gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
826 1.1 msaitoh gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
827 1.1 msaitoh gpie &= ~IXGBE_GPIE_VTMODE_MASK;
828 1.17.4.1 martin switch (sc->iov_mode) {
829 1.1 msaitoh case IXGBE_64_VM:
830 1.1 msaitoh mrqc |= IXGBE_MRQC_VMDQRSS64EN;
831 1.1 msaitoh mtqc |= IXGBE_MTQC_64VF;
832 1.1 msaitoh gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
833 1.1 msaitoh gpie |= IXGBE_GPIE_VTMODE_64;
834 1.1 msaitoh break;
835 1.1 msaitoh case IXGBE_32_VM:
836 1.1 msaitoh mrqc |= IXGBE_MRQC_VMDQRSS32EN;
837 1.1 msaitoh mtqc |= IXGBE_MTQC_32VF;
838 1.1 msaitoh gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
839 1.1 msaitoh gpie |= IXGBE_GPIE_VTMODE_32;
840 1.1 msaitoh break;
841 1.1 msaitoh default:
842 1.17.4.1 martin panic("Unexpected SR-IOV mode %d", sc->iov_mode);
843 1.1 msaitoh }
844 1.1 msaitoh /* Write... */
845 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
846 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
847 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
848 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
849 1.1 msaitoh
850 1.1 msaitoh /* Enable rx/tx for the PF. */
851 1.17.4.1 martin vf_reg = IXGBE_VF_INDEX(sc->pool);
852 1.17.4.1 martin IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(sc->pool));
853 1.17.4.1 martin IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(sc->pool));
854 1.1 msaitoh
855 1.1 msaitoh /* Allow VM-to-VM communication. */
856 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
857 1.1 msaitoh
858 1.1 msaitoh vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
859 1.17.4.1 martin vt_ctl |= (sc->pool << IXGBE_VT_CTL_POOL_SHIFT);
860 1.1 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
861 1.1 msaitoh
862 1.17.4.1 martin for (i = 0; i < sc->num_vfs; i++)
863 1.17.4.1 martin ixgbe_init_vf(sc, &sc->vfs[i]);
864 1.1 msaitoh } /* ixgbe_initialize_iov */
865 1.1 msaitoh
866 1.1 msaitoh
867 1.1 msaitoh /* Check the max frame setting of all active VF's */
868 1.1 msaitoh void
869 1.17.4.1 martin ixgbe_recalculate_max_frame(struct ixgbe_softc *sc)
870 1.1 msaitoh {
871 1.1 msaitoh struct ixgbe_vf *vf;
872 1.1 msaitoh
873 1.17.4.1 martin IXGBE_CORE_LOCK_ASSERT(sc);
874 1.1 msaitoh
875 1.17.4.1 martin for (int i = 0; i < sc->num_vfs; i++) {
876 1.17.4.1 martin vf = &sc->vfs[i];
877 1.1 msaitoh if (vf->flags & IXGBE_VF_ACTIVE)
878 1.17.4.1 martin ixgbe_update_max_frame(sc, vf->max_frame_size);
879 1.1 msaitoh }
880 1.1 msaitoh } /* ixgbe_recalculate_max_frame */
881 1.1 msaitoh
882 1.1 msaitoh int
883 1.1 msaitoh ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
884 1.1 msaitoh {
885 1.17.4.1 martin struct ixgbe_softc *sc;
886 1.1 msaitoh struct ixgbe_vf *vf;
887 1.1 msaitoh const void *mac;
888 1.1 msaitoh
889 1.17.4.1 martin sc = device_get_softc(dev);
890 1.1 msaitoh
891 1.17.4.1 martin KASSERT(vfnum < sc->num_vfs, ("VF index %d is out of range %d",
892 1.17.4.1 martin vfnum, sc->num_vfs));
893 1.1 msaitoh
894 1.17.4.1 martin IXGBE_CORE_LOCK(sc);
895 1.17.4.1 martin vf = &sc->vfs[vfnum];
896 1.1 msaitoh vf->pool= vfnum;
897 1.1 msaitoh
898 1.1 msaitoh /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
899 1.1 msaitoh vf->rar_index = vfnum + 1;
900 1.1 msaitoh vf->default_vlan = 0;
901 1.1 msaitoh vf->max_frame_size = ETHER_MAX_LEN;
902 1.17.4.1 martin ixgbe_update_max_frame(sc, vf->max_frame_size);
903 1.1 msaitoh
904 1.1 msaitoh if (nvlist_exists_binary(config, "mac-addr")) {
905 1.1 msaitoh mac = nvlist_get_binary(config, "mac-addr", NULL);
906 1.1 msaitoh bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
907 1.1 msaitoh if (nvlist_get_bool(config, "allow-set-mac"))
908 1.1 msaitoh vf->flags |= IXGBE_VF_CAP_MAC;
909 1.1 msaitoh } else
910 1.1 msaitoh /*
911 1.1 msaitoh * If the administrator has not specified a MAC address then
912 1.1 msaitoh * we must allow the VF to choose one.
913 1.1 msaitoh */
914 1.1 msaitoh vf->flags |= IXGBE_VF_CAP_MAC;
915 1.1 msaitoh
916 1.1 msaitoh vf->flags |= IXGBE_VF_ACTIVE;
917 1.1 msaitoh
918 1.17.4.1 martin ixgbe_init_vf(sc, vf);
919 1.17.4.1 martin IXGBE_CORE_UNLOCK(sc);
920 1.1 msaitoh
921 1.1 msaitoh return (0);
922 1.1 msaitoh } /* ixgbe_add_vf */
923 1.1 msaitoh
924 1.1 msaitoh #else
925 1.1 msaitoh
926 1.1 msaitoh void
927 1.12 msaitoh ixgbe_handle_mbx(void *context)
928 1.1 msaitoh {
929 1.12 msaitoh UNREFERENCED_1PARAMETER(context);
930 1.1 msaitoh } /* ixgbe_handle_mbx */
931 1.1 msaitoh
932 1.1 msaitoh inline int
933 1.1 msaitoh ixgbe_vf_que_index(int mode, int vfnum, int num)
934 1.1 msaitoh {
935 1.1 msaitoh UNREFERENCED_2PARAMETER(mode, vfnum);
936 1.1 msaitoh
937 1.1 msaitoh return num;
938 1.1 msaitoh } /* ixgbe_vf_que_index */
939 1.1 msaitoh
940 1.1 msaitoh #endif
941